aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-firmware-zynqmp103
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio2
-rw-r--r--Documentation/ABI/testing/sysfs-class-fpga-bridge9
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-queues22
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu11
-rw-r--r--Documentation/ABI/testing/sysfs-driver-cortexa53-edac10
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-oops_count6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-warn_count6
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst13
-rw-r--r--Documentation/admin-guide/hw-vuln/gather_data_sampling.rst109
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst14
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst29
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt107
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst15
-rw-r--r--Documentation/admin-guide/security-bugs.rst39
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst19
-rw-r--r--Documentation/admin-guide/sysctl/net.rst19
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst9
-rw-r--r--Documentation/arm64/silicon-errata.rst7
-rw-r--r--Documentation/atomic_bitops.txt2
-rw-r--r--Documentation/dev-tools/gdb-kernel-debugging.rst4
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/xilinx.yaml12
-rw-r--r--Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt17
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-ceva.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt156
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si5324.txt78
-rw-r--r--Documentation/devicetree/bindings/clock/xlnx,versal-clk.txt48
-rw-r--r--Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt12
-rw-r--r--Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt12
-rw-r--r--Documentation/devicetree/bindings/crypto/zynqmp-sha.txt12
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/bridge.txt29
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt74
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt163
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt41
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt54
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt35
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt51
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt32
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt82
-rw-r--r--Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt38
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt67
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt39
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt18
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt91
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt123
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/cresample.txt22
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/dp.txt65
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/dp_sub.txt65
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/dsi.txt61
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/osd.txt19
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/rgb2ycrcb.txt14
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/sdi.txt34
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/vtc.txt18
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/xilinx_drm.txt162
-rw-r--r--Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt13
-rw-r--r--Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt15
-rw-r--r--Documentation/devicetree/bindings/edac/pl310_edac_l2.txt19
-rw-r--r--Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt18
-rw-r--r--Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt16
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-region.txt1
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt61
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt10
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt19
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt4
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-zynq.txt4
-rw-r--r--Documentation/devicetree/bindings/hwmon/tps544.txt14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt159
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt19
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt56
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt124
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt25
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt66
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt141
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt58
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt54
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt62
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt63
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt64
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt95
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt61
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt54
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt75
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt164
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt55
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt17
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt66
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt93
-rw-r--r--Documentation/devicetree/bindings/misc/jesd-phy.txt24
-rw-r--r--Documentation/devicetree/bindings/misc/jesd204b.txt28
-rw-r--r--Documentation/devicetree/bindings/misc/xilinx-axitrafgen.txt25
-rw-r--r--Documentation/devicetree/bindings/misc/xlnx,fclk.txt12
-rw-r--r--Documentation/devicetree/bindings/misc/xlnx,flexnoc-pm.txt24
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt30
-rw-r--r--Documentation/devicetree/bindings/mtd/arasan_nand.txt33
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-quadspi.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt2
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt8
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.txt5
-rw-r--r--Documentation/devicetree/bindings/net/xilinx-phy.txt15
-rw-r--r--Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt54
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_axienet.txt133
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_emaclite.txt34
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn.txt14
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt35
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt23
-rw-r--r--Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt90
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt133
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml (renamed from Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml)6
-rw-r--r--Documentation/devicetree/bindings/phy/phy-zynqmp.txt119
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt275
-rw-r--r--Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt41
-rw-r--r--Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt135
-rw-r--r--Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt47
-rw-r--r--Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/uartlite.c26
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt23
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt28
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt54
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd9335.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,i2s.txt14
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,spdif.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt60
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt15
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt11
-rw-r--r--Documentation/devicetree/bindings/staging/xroeframer/xroeframer.txt17
-rw-r--r--Documentation/devicetree/bindings/staging/xroetrafficgen/xroetrafficgen.txt15
-rw-r--r--Documentation/devicetree/bindings/uio/xilinx_apm.txt44
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt15
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/usb/udc-xilinx.txt19
-rw-r--r--Documentation/devicetree/bindings/video/xilinx-fb.txt35
-rw-r--r--Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt21
-rw-r--r--Documentation/devicetree/bindings/xilinx.txt1
-rw-r--r--Documentation/devicetree/bindings/xlnx,ctrl-fb.txt22
-rw-r--r--Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt21
-rw-r--r--Documentation/devicetree/configfs-overlays.txt31
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst7
-rw-r--r--Documentation/driver-api/spi.rst4
-rw-r--r--Documentation/fault-injection/fault-injection.rst16
-rw-r--r--Documentation/filesystems/directory-locking.rst37
-rw-r--r--Documentation/filesystems/locking.rst5
-rw-r--r--Documentation/filesystems/porting.rst18
-rw-r--r--Documentation/filesystems/vfs.rst2
-rw-r--r--Documentation/firmware-guide/acpi/apei/einj.rst2
-rw-r--r--Documentation/input/joydev/joystick.rst1
-rw-r--r--Documentation/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/media/kapi/v4l2-dev.rst4
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst899
-rw-r--r--Documentation/misc-devices/xilinx_sdfec.rst291
-rw-r--r--Documentation/misc-devices/xilinx_trafgen.txt87
-rw-r--r--Documentation/networking/af_xdp.rst268
-rw-r--r--Documentation/networking/decnet.txt230
-rw-r--r--Documentation/power/runtime_pm.rst6
-rw-r--r--Documentation/process/code-of-conduct-interpretation.rst2
-rw-r--r--Documentation/sound/hd-audio/models.rst2
-rw-r--r--Documentation/sound/soc/dapm.rst2
-rw-r--r--Documentation/sphinx/load_config.py6
-rw-r--r--Documentation/trace/histogram.rst2
-rw-r--r--Documentation/translations/zh_CN/video4linux/v4l2-framework.txt4
-rw-r--r--Documentation/virt/kvm/api.txt18
-rw-r--r--Documentation/virt/kvm/devices/vm.txt4
-rw-r--r--MAINTAINERS317
-rw-r--r--Makefile24
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/bugs.h20
-rw-r--r--arch/alpha/kernel/entry.S4
-rw-r--r--arch/alpha/kernel/module.c4
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/setup.c3
-rw-r--r--arch/alpha/kernel/traps.c36
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/include/asm/io.h2
-rw-r--r--arch/arc/include/asm/linkage.h8
-rw-r--r--arch/arc/kernel/signal.c6
-rw-r--r--arch/arc/mm/dma.c8
-rw-r--r--arch/arc/mm/ioremap.c2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/am335x-pcm-953.dtsi28
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi3
-rw-r--r--arch/arm/boot/dts/am57xx-cl-som-am57x.dts2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb1176.dts2
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-380.dtsi4
-rw-r--r--arch/arm/boot/dts/armada-385-turris-omnia.dts22
-rw-r--r--arch/arm/boot/dts/armada-385.dtsi6
-rw-r--r--arch/arm/boot/dts/armada-39x.dtsi6
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi8
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi16
-rw-r--r--arch/arm/boot/dts/aspeed-ast2500-evb.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-ast2600-evb.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9261ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi9
-rw-r--r--arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts3
-rw-r--r--arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts14
-rw-r--r--arch/arm/boot/dts/bcm47189-luxul-xap-810.dts15
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm53573.dtsi3
-rw-r--r--arch/arm/boot/dts/bcm947189acdbmr.dts6
-rw-r--r--arch/arm/boot/dts/dove.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts2
-rw-r--r--arch/arm/boot/dts/exynos4-cpu-thermal.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4412-itop-elite.dts2
-rw-r--r--arch/arm/boot/dts/exynos4412-midas.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts2
-rw-r--r--arch/arm/boot/dts/exynos5410-odroidxu.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi2
-rw-r--r--arch/arm/boot/dts/imx1-ads.dts2
-rw-r--r--arch/arm/boot/dts/imx1-apf9328.dts2
-rw-r--r--arch/arm/boot/dts/imx1.dtsi5
-rw-r--r--arch/arm/boot/dts/imx23-sansa.dts12
-rw-r--r--arch/arm/boot/dts/imx23.dtsi4
-rw-r--r--arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi2
-rw-r--r--arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts2
-rw-r--r--arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts2
-rw-r--r--arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts2
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts2
-rw-r--r--arch/arm/boot/dts/imx27-apf27dev.dts4
-rw-r--r--arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi4
-rw-r--r--arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts2
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts2
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts2
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi2
-rw-r--r--arch/arm/boot/dts/imx27.dtsi3
-rw-r--r--arch/arm/boot/dts/imx28.dtsi4
-rw-r--r--arch/arm/boot/dts/imx35.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-ppd.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi28
-rw-r--r--arch/arm/boot/dts/imx6dl.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw560x.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi10
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi10
-rw-r--r--arch/arm/boot/dts/imx6qp.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi10
-rw-r--r--arch/arm/boot/dts/imx6sll.dtsi37
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi21
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi39
-rw-r--r--arch/arm/boot/dts/imx7d-pico-hobbit.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts9
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi9
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi39
-rw-r--r--arch/arm/boot/dts/integratorap.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-lsxl.dtsi16
-rw-r--r--arch/arm/boot/dts/moxart-uc7112lx.dts2
-rw-r--r--arch/arm/boot/dts/moxart.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04a5one.dts4
-rw-r--r--arch/arm/boot/dts/omap3-lilly-a83x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts5
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts2
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi15
-rw-r--r--arch/arm/boot/dts/qcom-pm8841.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3036-evb.dts2
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi14
-rw-r--r--arch/arm/boot/dts/rk3188-radxarock.dts2
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288-evb-act8846.dts2
-rw-r--r--arch/arm/boot/dts/rk3288-firefly.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288-miqi.dts2
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-square.dts2
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi7
-rw-r--r--arch/arm/boot/dts/s3c6410-mini6410.dts38
-rw-r--r--arch/arm/boot/dts/s3c64xx-pinctrl.dtsi210
-rw-r--r--arch/arm/boot/dts/s5pv210-smdkv210.dts24
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi2
-rw-r--r--arch/arm/boot/dts/spear320-hmi.dts2
-rw-r--r--arch/arm/boot/dts/spear600.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32f7-pinctrl.dtsi82
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts24
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi8
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts1
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi226
-rw-r--r--arch/arm/boot/dts/zynq-cc108.dts41
-rw-r--r--arch/arm/boot/dts/zynq-zc702.dts71
-rw-r--r--arch/arm/boot/dts/zynq-zc706.dts54
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm010.dts36
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm011.dts42
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm012.dts42
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm013.dts37
-rw-r--r--arch/arm/boot/dts/zynq-zed.dts51
-rw-r--r--arch/arm/boot/dts/zynq-zturn.dts2
-rw-r--r--arch/arm/boot/dts/zynq-zybo.dts12
-rw-r--r--arch/arm/configs/xilinx_zynq_defconfig240
-rw-r--r--arch/arm/crypto/sha256_glue.c13
-rw-r--r--arch/arm/crypto/sha512-glue.c12
-rw-r--r--arch/arm/include/asm/bugs.h4
-rw-r--r--arch/arm/include/asm/exception.h4
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h6
-rw-r--r--arch/arm/include/asm/pgtable.h16
-rw-r--r--arch/arm/include/asm/smp.h3
-rw-r--r--arch/arm/kernel/bugs.c3
-rw-r--r--arch/arm/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm/kernel/smp.c168
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/kernel/unwind.c25
-rw-r--r--arch/arm/lib/findbit.S16
-rw-r--r--arch/arm/lib/memset.S1
-rw-r--r--arch/arm/mach-bcm/bcm_kona_smc.c1
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-ep93xx/core.c1
-rw-r--r--arch/arm/mach-ep93xx/timer-ep93xx.c3
-rw-r--r--arch/arm/mach-imx/cpu-imx25.c1
-rw-r--r--arch/arm/mach-imx/cpu-imx27.c11
-rw-r--r--arch/arm/mach-imx/cpu-imx31.c10
-rw-r--r--arch/arm/mach-imx/cpu-imx35.c10
-rw-r--r--arch/arm/mach-imx/cpu-imx5.c1
-rw-r--r--arch/arm/mach-imx/mmdc.c29
-rw-r--r--arch/arm/mach-mmp/time.c11
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c4
-rw-r--r--arch/arm/mach-omap1/timer.c2
-rw-r--r--arch/arm/mach-omap2/display.c3
-rw-r--r--arch/arm/mach-omap2/id.c5
-rw-r--r--arch/arm/mach-omap2/powerdomain.c2
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c1
-rw-r--r--arch/arm/mach-omap2/timer.c1
-rw-r--r--arch/arm/mach-orion5x/board-dt.c3
-rw-r--r--arch/arm/mach-orion5x/common.h6
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c2
-rw-r--r--arch/arm/mach-pxa/spitz.c14
-rw-r--r--arch/arm/mach-sa1100/assabet.c2
-rw-r--r--arch/arm/mach-sa1100/jornada720_ssp.c5
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c5
-rw-r--r--arch/arm/mach-sunxi/mc_smp.c8
-rw-r--r--arch/arm/mach-zynq/Kconfig16
-rw-r--r--arch/arm/mach-zynq/Makefile6
-rw-r--r--arch/arm/mach-zynq/common.c10
-rw-r--r--arch/arm/mach-zynq/common.h26
-rw-r--r--arch/arm/mach-zynq/efuse.c75
-rw-r--r--arch/arm/mach-zynq/platsmp.c10
-rw-r--r--arch/arm/mach-zynq/pm.c170
-rw-r--r--arch/arm/mach-zynq/slcr.c48
-rw-r--r--arch/arm/mach-zynq/suspend.S185
-rw-r--r--arch/arm/mach-zynq/zynq_ocm.c245
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/dma-mapping.c8
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/nommu.c19
-rw-r--r--arch/arm/nwfpe/Makefile6
-rw-r--r--arch/arm/probes/kprobes/checkers-common.c2
-rw-r--r--arch/arm/probes/kprobes/core.c2
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c2
-rw-r--r--arch/arm/probes/kprobes/test-core.c2
-rw-r--r--arch/arm/probes/kprobes/test-core.h4
-rw-r--r--arch/arm/xen/enlighten.c3
-rw-r--r--arch/arm/xen/mm.c12
-rw-r--r--arch/arm64/Kconfig16
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi11
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a.dtsi20
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi22
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6797.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi783
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi213
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi32
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi26
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi9
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts22
-rw-r--r--arch/arm64/boot/dts/xilinx/Makefile5
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi313
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi46
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts35
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts32
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts42
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts311
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts384
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts64
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts47
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts333
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts332
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts446
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts3
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts320
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts547
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts382
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts325
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revA.dts72
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revB.dts106
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts246
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi626
-rw-r--r--arch/arm64/configs/xilinx_versal_defconfig228
-rw-r--r--arch/arm64/configs/xilinx_zynqmp_defconfig396
-rw-r--r--arch/arm64/crypto/Kconfig1
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h114
-rw-r--r--arch/arm64/include/asm/atomic_lse.h16
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h23
-rw-r--r--arch/arm64/include/asm/cputype.h6
-rw-r--r--arch/arm64/include/asm/debug-monitors.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h22
-rw-r--r--arch/arm64/include/asm/pgtable.h6
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/syscall_wrapper.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h12
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c13
-rw-r--r--arch/arm64/kernel/cacheinfo.c6
-rw-r--r--arch/arm64/kernel/cpu_errata.c46
-rw-r--r--arch/arm64/kernel/cpufeature.c13
-rw-r--r--arch/arm64/kernel/debug-monitors.c5
-rw-r--r--arch/arm64/kernel/efi.c52
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm64/kernel/kgdb.c2
-rw-r--r--arch/arm64/kernel/topology.c40
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c10
-rw-r--r--arch/arm64/mm/dma-mapping.c8
-rw-r--r--arch/arm64/mm/fault.c6
-rw-r--r--arch/arm64/mm/mmu.c4
-rw-r--r--arch/c6x/mm/dma-coherent.c14
-rw-r--r--arch/csky/abiv1/alignment.c2
-rw-r--r--arch/csky/kernel/traps.c2
-rw-r--r--arch/csky/mm/dma-mapping.c8
-rw-r--r--arch/h8300/kernel/traps.c3
-rw-r--r--arch/h8300/mm/fault.c2
-rw-r--r--arch/hexagon/kernel/dma.c4
-rw-r--r--arch/hexagon/kernel/traps.c2
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/include/asm/bugs.h20
-rw-r--r--arch/ia64/include/asm/processor.h2
-rw-r--r--arch/ia64/kernel/mca_drv.c3
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/setup.c3
-rw-r--r--arch/ia64/kernel/topology.c5
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/mm/numa.c5
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--arch/m68k/68000/entry.S2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/Kconfig.devices1
-rw-r--r--arch/m68k/coldfire/entry.S2
-rw-r--r--arch/m68k/fpsp040/skeleton.S4
-rw-r--r--arch/m68k/ifpsp060/os.S4
-rw-r--r--arch/m68k/include/asm/bugs.h21
-rw-r--r--arch/m68k/kernel/dma.c4
-rw-r--r--arch/m68k/kernel/entry.S3
-rw-r--r--arch/m68k/kernel/relocate_kernel.S4
-rw-r--r--arch/m68k/kernel/setup_mm.c3
-rw-r--r--arch/m68k/kernel/signal.c14
-rw-r--r--arch/m68k/kernel/traps.c6
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/configs/mmu_defconfig5
-rw-r--r--arch/microblaze/configs/nommu_defconfig2
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/irq.h1
-rw-r--r--arch/microblaze/kernel/dma.c14
-rw-r--r--arch/microblaze/kernel/exceptions.c4
-rw-r--r--arch/microblaze/kernel/head.S10
-rw-r--r--arch/microblaze/kernel/setup.c2
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S2
-rw-r--r--arch/microblaze/pci/pci-common.c32
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/alchemy/devboards/db1000.c12
-rw-r--r--arch/mips/alchemy/devboards/db1200.c27
-rw-r--r--arch/mips/alchemy/devboards/db1300.c14
-rw-r--r--arch/mips/alchemy/devboards/db1550.c2
-rw-r--r--arch/mips/bcm47xx/prom.c4
-rw-r--r--arch/mips/bcm63xx/clk.c2
-rw-r--r--arch/mips/bmips/dma.c7
-rw-r--r--arch/mips/bmips/setup.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c10
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c3
-rw-r--r--arch/mips/configs/decstation_64_defconfig2
-rw-r--r--arch/mips/configs/decstation_defconfig2
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig2
-rw-r--r--arch/mips/configs/gpr_defconfig2
-rw-r--r--arch/mips/configs/jazz_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig2
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig2
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig2
-rw-r--r--arch/mips/configs/rm200_defconfig2
-rw-r--r--arch/mips/fw/lib/cmdline.c2
-rw-r--r--arch/mips/include/asm/bugs.h17
-rw-r--r--arch/mips/include/asm/checksum.h3
-rw-r--r--arch/mips/include/asm/cpu-features.h21
-rw-r--r--arch/mips/include/asm/dec/prom.h2
-rw-r--r--arch/mips/include/asm/fw/fw.h2
-rw-r--r--arch/mips/include/asm/mach-rc32434/pci.h2
-rw-r--r--arch/mips/include/asm/mmzone.h6
-rw-r--r--arch/mips/include/asm/ptrace.h1
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/asm/vpe.h1
-rw-r--r--arch/mips/jazz/jazzdma.c17
-rw-r--r--arch/mips/kernel/elf.c6
-rw-r--r--arch/mips/kernel/jump_label.c2
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/setup.c26
-rw-r--r--arch/mips/kernel/smp-cps.c8
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vpe-cmp.c4
-rw-r--r--arch/mips/kernel/vpe-mt.c11
-rw-r--r--arch/mips/kvm/mmu.c3
-rw-r--r--arch/mips/lantiq/clk.c1
-rw-r--r--arch/mips/lantiq/prom.c6
-rw-r--r--arch/mips/lasat/picvue_proc.c2
-rw-r--r--arch/mips/lib/dump_tlb.c2
-rw-r--r--arch/mips/loongson32/common/platform.c16
-rw-r--r--arch/mips/loongson32/ls1c/board.c1
-rw-r--r--arch/mips/mm/dma-noncoherent.c12
-rw-r--r--arch/mips/mm/init.c17
-rw-r--r--arch/mips/mm/tlb-r4k.c6
-rw-r--r--arch/mips/mm/tlbex.c4
-rw-r--r--arch/mips/pic32/pic32mzda/early_console.c13
-rw-r--r--arch/mips/pic32/pic32mzda/init.c2
-rw-r--r--arch/nds32/include/asm/memory.h6
-rw-r--r--arch/nds32/kernel/dma.c8
-rw-r--r--arch/nds32/kernel/fpu.c2
-rw-r--r--arch/nds32/kernel/traps.c8
-rw-r--r--arch/nios2/boot/Makefile2
-rw-r--r--arch/nios2/boot/dts/10m50_devboard.dts2
-rw-r--r--arch/nios2/boot/dts/3c120_devboard.dts2
-rw-r--r--arch/nios2/include/asm/entry.h3
-rw-r--r--arch/nios2/include/asm/ptrace.h2
-rw-r--r--arch/nios2/kernel/entry.S22
-rw-r--r--arch/nios2/kernel/signal.c3
-rw-r--r--arch/nios2/kernel/syscall_table.c1
-rw-r--r--arch/nios2/kernel/traps.c4
-rw-r--r--arch/nios2/mm/dma-mapping.c8
-rw-r--r--arch/openrisc/kernel/dma.c2
-rw-r--r--arch/openrisc/kernel/entry.S6
-rw-r--r--arch/openrisc/kernel/traps.c2
-rw-r--r--arch/parisc/include/asm/bugs.h20
-rw-r--r--arch/parisc/include/asm/cacheflush.h5
-rw-r--r--arch/parisc/include/asm/hardware.h12
-rw-r--r--arch/parisc/include/asm/ldcw.h36
-rw-r--r--arch/parisc/include/asm/led.h4
-rw-r--r--arch/parisc/include/asm/processor.h1
-rw-r--r--arch/parisc/include/asm/ropes.h3
-rw-r--r--arch/parisc/include/asm/spinlock_types.h5
-rw-r--r--arch/parisc/include/uapi/asm/mman.h23
-rw-r--r--arch/parisc/include/uapi/asm/pdc.h1
-rw-r--r--arch/parisc/kernel/cache.c5
-rw-r--r--arch/parisc/kernel/drivers.c25
-rw-r--r--arch/parisc/kernel/entry.S7
-rw-r--r--arch/parisc/kernel/firmware.c9
-rw-r--r--arch/parisc/kernel/ftrace.c2
-rw-r--r--arch/parisc/kernel/head.S42
-rw-r--r--arch/parisc/kernel/irq.c2
-rw-r--r--arch/parisc/kernel/pci-dma.c26
-rw-r--r--arch/parisc/kernel/process.c11
-rw-r--r--arch/parisc/kernel/processor.c18
-rw-r--r--arch/parisc/kernel/ptrace.c15
-rw-r--r--arch/parisc/kernel/real2.S5
-rw-r--r--arch/parisc/kernel/sys_parisc.c27
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/parisc/kernel/traps.c6
-rw-r--r--arch/parisc/kernel/unaligned.c2
-rw-r--r--arch/powerpc/Kconfig.debug2
-rw-r--r--arch/powerpc/Makefile38
-rw-r--r--arch/powerpc/boot/Makefile1
-rw-r--r--arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi51
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8540ads.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8541cds.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8555cds.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8560ads.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi44
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi44
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi20
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/include/asm/bugs.h15
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h4
-rw-r--r--arch/powerpc/include/asm/mmzone.h3
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h7
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h2
-rw-r--r--arch/powerpc/kernel/eeh_driver.c71
-rw-r--r--arch/powerpc/kernel/fadump.c1
-rw-r--r--arch/powerpc/kernel/fpu.S13
-rw-r--r--arch/powerpc/kernel/head_32.S2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c9
-rw-r--r--arch/powerpc/kernel/iommu.c17
-rw-r--r--arch/powerpc/kernel/pci-common.c45
-rw-r--r--arch/powerpc/kernel/pci_dn.c1
-rw-r--r--arch/powerpc/kernel/prom.c7
-rw-r--r--arch/powerpc/kernel/rtas.c46
-rw-r--r--arch/powerpc/kernel/rtas_flash.c6
-rw-r--r--arch/powerpc/kernel/systbl.S1
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S17
-rw-r--r--arch/powerpc/kernel/traps.c4
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c2
-rw-r--r--arch/powerpc/lib/Makefile4
-rw-r--r--arch/powerpc/lib/sstep.c10
-rw-r--r--arch/powerpc/math-emu/math_efp.c1
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c2
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c4
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c8
-rw-r--r--arch/powerpc/mm/init-common.c5
-rw-r--r--arch/powerpc/mm/init_64.c5
-rw-r--r--arch/powerpc/mm/kasan/Makefile1
-rw-r--r--arch/powerpc/mm/ptdump/shared.c6
-rw-r--r--arch/powerpc/perf/callchain.c1
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/perf/hv-gpci-requests.h4
-rw-r--r--arch/powerpc/perf/hv-gpci.c62
-rw-r--r--arch/powerpc/perf/hv-gpci.h1
-rw-r--r--arch/powerpc/perf/imc-pmu.c128
-rw-r--r--arch/powerpc/perf/req-gen/perf.h20
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype25
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc10x.h3
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c4
-rw-r--r--arch/powerpc/platforms/powermac/smp.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-powercap.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c3
-rw-r--r--arch/powerpc/platforms/powernv/rng.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c21
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c24
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c20
-rw-r--r--arch/powerpc/purgatory/Makefile5
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c8
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h1
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c5
-rw-r--r--arch/powerpc/sysdev/xive/native.c2
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c2
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/Makefile5
-rw-r--r--arch/riscv/include/asm/io.h16
-rw-r--r--arch/riscv/include/asm/uaccess.h2
-rw-r--r--arch/riscv/include/uapi/asm/setup.h8
-rw-r--r--arch/riscv/kernel/process.c2
-rw-r--r--arch/riscv/kernel/smpboot.c4
-rw-r--r--arch/riscv/kernel/stacktrace.c2
-rw-r--r--arch/riscv/kernel/sys_riscv.c4
-rw-r--r--arch/riscv/kernel/time.c3
-rw-r--r--arch/riscv/kernel/traps.c6
-rw-r--r--arch/riscv/kernel/vdso/Makefile3
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/riscv/mm/cacheflush.c4
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/s390/boot/compressed/decompressor.c2
-rw-r--r--arch/s390/boot/ipl_report.c8
-rw-r--r--arch/s390/hypfs/hypfs_diag.c2
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/debug.h6
-rw-r--r--arch/s390/include/asm/fpu/api.h2
-rw-r--r--arch/s390/include/asm/futex.h3
-rw-r--r--arch/s390/include/asm/hugetlb.h6
-rw-r--r--arch/s390/include/asm/pci_io.h32
-rw-r--r--arch/s390/include/asm/percpu.h2
-rw-r--r--arch/s390/kernel/crash_dump.c2
-rw-r--r--arch/s390/kernel/dumpstack.c2
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/kprobes.c4
-rw-r--r--arch/s390/kernel/machine_kexec_file.c23
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/process.c22
-rw-r--r--arch/s390/kernel/ptrace.c14
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/sthyi.c6
-rw-r--r--arch/s390/kernel/vmlinux.lds.S4
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/kvm/intercept.c9
-rw-r--r--arch/s390/kvm/interrupt.c12
-rw-r--r--arch/s390/kvm/kvm-s390.c25
-rw-r--r--arch/s390/kvm/vsie.c10
-rw-r--r--arch/s390/lib/uaccess.c2
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/page-states.c14
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/pci/pci.c2
-rw-r--r--arch/s390/pci/pci_dma.c15
-rw-r--r--arch/s390/pci/pci_mmio.c20
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/Kconfig.debug11
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c2
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c6
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c2
-rw-r--r--arch/sh/boards/mach-migor/setup.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c6
-rw-r--r--arch/sh/drivers/dma/dma-sh.c37
-rw-r--r--arch/sh/include/asm/bugs.h78
-rw-r--r--arch/sh/include/asm/processor.h2
-rw-r--r--arch/sh/include/asm/processor_32.h1
-rw-r--r--arch/sh/include/asm/sections.h2
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c2
-rw-r--r--arch/sh/kernel/dma-coherent.c6
-rw-r--r--arch/sh/kernel/head_32.S6
-rw-r--r--arch/sh/kernel/idle.c1
-rw-r--r--arch/sh/kernel/machvec.c10
-rw-r--r--arch/sh/kernel/nmi_debug.c4
-rw-r--r--arch/sh/kernel/setup.c59
-rw-r--r--arch/sh/kernel/signal_32.c3
-rw-r--r--arch/sh/kernel/traps.c2
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/math-emu/sfp-util.h4
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/bugs.h18
-rw-r--r--arch/sparc/kernel/ioport.c4
-rw-r--r--arch/sparc/kernel/leon_pci_grpci1.c2
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c2
-rw-r--r--arch/sparc/kernel/setup_32.c7
-rw-r--r--arch/sparc/kernel/traps_32.c4
-rw-r--r--arch/sparc/kernel/traps_64.c4
-rw-r--r--arch/um/Kconfig14
-rw-r--r--arch/um/Makefile11
-rw-r--r--arch/um/configs/i386_defconfig1
-rw-r--r--arch/um/configs/x86_64_defconfig1
-rw-r--r--arch/um/drivers/Kconfig16
-rw-r--r--arch/um/drivers/Makefile2
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/vector_kern.c1
-rw-r--r--arch/um/include/asm/bugs.h7
-rw-r--r--arch/um/include/shared/kern_util.h2
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/um_arch.c5
-rw-r--r--arch/um/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/os-Linux/helper.c6
-rw-r--r--arch/um/os-Linux/skas/process.c17
-rw-r--r--arch/um/os-Linux/util.c19
-rw-r--r--arch/x86/Kconfig20
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/Makefile.um2
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/bioscall.S4
-rw-r--r--arch/x86/boot/boot.h36
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/head_32.S3
-rw-r--r--arch/x86/boot/compressed/head_64.S39
-rw-r--r--arch/x86/boot/main.c2
-rw-r--r--arch/x86/boot/pmjump.S6
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c6
-rw-r--r--arch/x86/entry/calling.h68
-rw-r--r--arch/x86/entry/entry_32.S8
-rw-r--r--arch/x86/entry/entry_64.S44
-rw-r--r--arch/x86/entry/entry_64_compat.S11
-rw-r--r--arch/x86/entry/vdso/Makefile2
-rw-r--r--arch/x86/entry/vdso/vma.c4
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/intel/ds.c9
-rw-r--r--arch/x86/events/intel/uncore_snb.c18
-rw-r--r--arch/x86/events/intel/uncore_snbep.c1
-rw-r--r--arch/x86/ia32/ia32_aout.c3
-rw-r--r--arch/x86/include/asm/bugs.h2
-rw-r--r--arch/x86/include/asm/cpu_device_id.h132
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h10
-rw-r--r--arch/x86/include/asm/cpufeatures.h31
-rw-r--r--arch/x86/include/asm/disabled-features.h4
-rw-r--r--arch/x86/include/asm/fpu/internal.h2
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h4
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/intel-family.h11
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/mem_encrypt.h2
-rw-r--r--arch/x86/include/asm/microcode.h6
-rw-r--r--arch/x86/include/asm/microcode_amd.h6
-rw-r--r--arch/x86/include/asm/msr-index.h44
-rw-r--r--arch/x86/include/asm/nospec-branch.h57
-rw-r--r--arch/x86/include/asm/numa.h7
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/reboot.h2
-rw-r--r--arch/x86/include/asm/required-features.h4
-rw-r--r--arch/x86/include/asm/resctrl_sched.h19
-rw-r--r--arch/x86/include/asm/setup.h46
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h25
-rw-r--r--arch/x86/include/asm/virtext.h22
-rw-r--r--arch/x86/include/asm/vmx.h6
-rw-r--r--arch/x86/include/uapi/asm/vmx.h4
-rw-r--r--arch/x86/kernel/acpi/boot.c3
-rw-r--r--arch/x86/kernel/alternative.c15
-rw-r--r--arch/x86/kernel/apic/apic.c5
-rw-r--r--arch/x86/kernel/apic/io_apic.c14
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c5
-rw-r--r--arch/x86/kernel/apm_32.c6
-rw-r--r--arch/x86/kernel/cpu/amd.c248
-rw-r--r--arch/x86/kernel/cpu/bugs.c661
-rw-r--r--arch/x86/kernel/cpu/common.c222
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/hygon.c12
-rw-r--r--arch/x86/kernel/cpu/intel.c178
-rw-r--r--arch/x86/kernel/cpu/match.c13
-rw-r--r--arch/x86/kernel/cpu/mce/core.c16
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c72
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c6
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c12
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c76
-rw-r--r--arch/x86/kernel/cpu/scattered.c4
-rw-r--r--arch/x86/kernel/cpu/topology.c5
-rw-r--r--arch/x86/kernel/cpu/tsx.c33
-rw-r--r--arch/x86/kernel/crash.c17
-rw-r--r--arch/x86/kernel/dumpstack.c11
-rw-r--r--arch/x86/kernel/fpu/init.c15
-rw-r--r--arch/x86/kernel/fpu/xstate.c8
-rw-r--r--arch/x86/kernel/i8259.c39
-rw-r--r--arch/x86/kernel/irqinit.c4
-rw-r--r--arch/x86/kernel/kprobes/opt.c6
-rw-r--r--arch/x86/kernel/kvmclock.c12
-rw-r--r--arch/x86/kernel/pmem.c7
-rw-r--r--arch/x86/kernel/process.c11
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/quirks.c10
-rw-r--r--arch/x86/kernel/reboot.c88
-rw-r--r--arch/x86/kernel/smp.c6
-rw-r--r--arch/x86/kernel/smpboot.c26
-rw-r--r--arch/x86/kernel/sysfb_efi.c16
-rw-r--r--arch/x86/kernel/unwind_orc.c17
-rw-r--r--arch/x86/kernel/uprobes.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/cpuid.h1
-rw-r--r--arch/x86/kvm/emulate.c127
-rw-r--r--arch/x86/kvm/hyperv.c13
-rw-r--r--arch/x86/kvm/lapic.c12
-rw-r--r--arch/x86/kvm/svm.c16
-rw-r--r--arch/x86/kvm/vmx/nested.c190
-rw-r--r--arch/x86/kvm/vmx/run_flags.h8
-rw-r--r--arch/x86/kvm/vmx/vmenter.S160
-rw-r--r--arch/x86/kvm/vmx/vmx.c193
-rw-r--r--arch/x86/kvm/vmx/vmx.h5
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/lib/iomap_copy_64.S2
-rw-r--r--arch/x86/lib/misc.c2
-rw-r--r--arch/x86/mm/ident_map.c23
-rw-r--r--arch/x86/mm/init.c32
-rw-r--r--arch/x86/mm/ioremap.c8
-rw-r--r--arch/x86/mm/kaslr.c8
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c3
-rw-r--r--arch/x86/mm/numa.c11
-rw-r--r--arch/x86/mm/pkeys.c6
-rw-r--r--arch/x86/pci/fixup.c21
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c2
-rw-r--r--arch/x86/power/cpu.c22
-rw-r--r--arch/x86/purgatory/Makefile10
-rw-r--r--arch/x86/realmode/rm/wakeup_asm.S6
-rw-r--r--arch/x86/tools/relocs.c8
-rw-r--r--arch/x86/um/shared/sysdep/syscalls_32.h5
-rw-r--r--arch/x86/um/tls_32.c6
-rw-r--r--arch/x86/um/vdso/Makefile2
-rw-r--r--arch/x86/um/vdso/um_vdso.c12
-rw-r--r--arch/x86/xen/smp.c53
-rw-r--r--arch/x86/xen/smp_pv.c14
-rw-r--r--arch/x86/xen/spinlock.c6
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/boot/Makefile3
-rw-r--r--arch/xtensa/boot/lib/zmem.c5
-rw-r--r--arch/xtensa/include/asm/bugs.h18
-rw-r--r--arch/xtensa/include/asm/core.h13
-rw-r--r--arch/xtensa/include/asm/page.h4
-rw-r--r--arch/xtensa/kernel/pci-dma.c8
-rw-r--r--arch/xtensa/kernel/perf_event.c17
-rw-r--r--arch/xtensa/kernel/traps.c2
-rw-r--r--arch/xtensa/platforms/iss/network.c6
-rw-r--r--block/bfq-cgroup.c4
-rw-r--r--block/bfq-iosched.c4
-rw-r--r--block/bio-integrity.c1
-rw-r--r--block/bio.c9
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-iocost.c21
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-mq-debugfs.c3
-rw-r--r--block/blk-mq-sched.c71
-rw-r--r--block/blk-mq-sysfs.c11
-rw-r--r--block/blk-mq.c22
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/opal_proto.h1
-rw-r--r--block/partition-generic.c7
-rw-r--r--block/partitions/amiga.c104
-rw-r--r--block/sed-opal.c38
-rw-r--r--crypto/af_alg.c21
-rw-r--r--crypto/akcipher.c8
-rw-r--r--crypto/algapi.c1
-rw-r--r--crypto/algif_skcipher.c7
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c10
-rw-r--r--crypto/asymmetric_keys/public_key.c36
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c32
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c5
-rw-r--r--crypto/blkcipher.c9
-rw-r--r--crypto/drbg.c16
-rw-r--r--crypto/essiv.c10
-rw-r--r--crypto/pcrypt.c4
-rw-r--r--crypto/rsa-pkcs1pad.c34
-rw-r--r--crypto/scompress.c6
-rw-r--r--crypto/seqiv.c2
-rw-r--r--crypto/skcipher.c18
-rw-r--r--crypto/tcrypt.c9
-rw-r--r--drivers/acpi/acpi_extlog.c38
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_lpss.c3
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpi_video.c37
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/dbnames.c3
-rw-r--r--drivers/acpi/acpica/dsmethod.c10
-rw-r--r--drivers/acpi/acpica/dswstate.c11
-rw-r--r--drivers/acpi/acpica/hwvalid.c7
-rw-r--r--drivers/acpi/acpica/nsrepair.c12
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c7
-rw-r--r--drivers/acpi/apei/bert.c31
-rw-r--r--drivers/acpi/arm64/iort.c31
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/cppc_acpi.c54
-rw-r--r--drivers/acpi/device_sysfs.c10
-rw-r--r--drivers/acpi/ec.c8
-rw-r--r--drivers/acpi/irq.c7
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/pci_mcfg.c3
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/processor_perflib.c38
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/property.c12
-rw-r--r--drivers/acpi/resource.c147
-rw-r--r--drivers/acpi/scan.c8
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video_detect.c126
-rw-r--r--drivers/amba/bus.c1
-rw-r--r--drivers/android/binder.c489
-rw-r--r--drivers/android/binder_alloc.c41
-rw-r--r--drivers/android/binder_alloc.h1
-rw-r--r--drivers/ata/ahci.c75
-rw-r--r--drivers/ata/ahci.h248
-rw-r--r--drivers/ata/ahci_imx.c2
-rw-r--r--drivers/ata/libahci.c35
-rw-r--r--drivers/ata/libahci_platform.c14
-rw-r--r--drivers/ata/libata-core.c66
-rw-r--r--drivers/ata/libata-eh.c16
-rw-r--r--drivers/ata/libata-scsi.c38
-rw-r--r--drivers/ata/libata-transport.c10
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_ftide010.c1
-rw-r--r--drivers/ata/pata_isapnp.c3
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_legacy.c5
-rw-r--r--drivers/ata/pata_ns87415.c2
-rw-r--r--drivers/ata/sata_gemini.c1
-rw-r--r--drivers/atm/idt77252.c14
-rw-r--r--drivers/atm/iphase.c20
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/arch_topology.c19
-rw-r--r--drivers/base/class.c5
-rw-r--r--drivers/base/core.c51
-rw-r--r--drivers/base/cpu.c19
-rw-r--r--drivers/base/dd.c27
-rw-r--r--drivers/base/devcoredump.c86
-rw-r--r--drivers/base/driver.c69
-rw-r--r--drivers/base/platform.c28
-rw-r--r--drivers/base/power/domain.c206
-rw-r--r--drivers/base/power/domain_governor.c12
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/runtime.c95
-rw-r--r--drivers/base/power/wakeirq.c111
-rw-r--r--drivers/base/regmap/internal.h4
-rw-r--r--drivers/base/regmap/regcache-rbtree.c13
-rw-r--r--drivers/base/regmap/regcache.c6
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/base/regmap/regmap-i2c.c4
-rw-r--r--drivers/base/regmap/regmap.c103
-rw-r--r--drivers/base/swnode.c3
-rw-r--r--drivers/base/test/test_async_driver_probe.c4
-rw-r--r--drivers/block/Kconfig9
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/aoe/aoecmd.c12
-rw-r--r--drivers/block/aoe/aoenet.c1
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/loop.c7
-rw-r--r--drivers/block/nbd.c19
-rw-r--r--drivers/block/null_blk_main.c14
-rw-r--r--drivers/block/rbd.c537
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/sx8.c1586
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/bluetooth/btmtkuart.c11
-rw-r--r--drivers/bluetooth/btqcomsmd.c17
-rw-r--r--drivers/bluetooth/btsdio.c1
-rw-r--r--drivers/bluetooth/btusb.c41
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_intel.c6
-rw-r--r--drivers/bluetooth/hci_ll.c3
-rw-r--r--drivers/bluetooth/hci_nokia.c6
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c3
-rw-r--r--drivers/bus/Kconfig5
-rw-r--r--drivers/bus/hisi_lpc.c10
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/bus/moxtet.c7
-rw-r--r--drivers/bus/sunxi-rsb.c37
-rw-r--r--drivers/bus/ti-sysc.c34
-rw-r--r--drivers/char/agp/parisc-agp.c17
-rw-r--r--drivers/char/hw_random/amd-rng.c18
-rw-r--r--drivers/char/hw_random/core.c34
-rw-r--r--drivers/char/hw_random/geode-rng.c42
-rw-r--r--drivers/char/hw_random/imx-rngc.c6
-rw-r--r--drivers/char/hw_random/iproc-rng200.c33
-rw-r--r--drivers/char/hw_random/st-rng.c24
-rw-r--r--drivers/char/hw_random/virtio-rng.c88
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c12
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c32
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c205
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/random.c25
-rw-r--r--drivers/char/tpm/tpm-interface.c5
-rw-r--r--drivers/char/tpm/tpm_crb.c31
-rw-r--r--drivers/char/tpm/tpm_tis.c25
-rw-r--r--drivers/char/tpm/tpm_tis_core.c54
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c30
-rw-r--r--drivers/clk/Kconfig36
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c8
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c12
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c2
-rw-r--r--drivers/clk/berlin/bg2.c5
-rw-r--r--drivers/clk/berlin/bg2q.c6
-rw-r--r--drivers/clk/clk-ast2600.c2
-rw-r--r--drivers/clk/clk-cdce925.c12
-rw-r--r--drivers/clk/clk-conf.c12
-rw-r--r--drivers/clk/clk-devres.c104
-rw-r--r--drivers/clk/clk-fixed-factor.c5
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-npcm7xx.c2
-rw-r--r--drivers/clk/clk-oxnas.c6
-rw-r--r--drivers/clk/clk-scmi.c1
-rw-r--r--drivers/clk/clk-si5324.c1227
-rw-r--r--drivers/clk/clk-si5324.h140
-rw-r--r--drivers/clk/clk-si5341.c4
-rw-r--r--drivers/clk/clk.c66
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c4
-rw-r--r--drivers/clk/idt/Makefile3
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-core.c933
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-core.h272
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-debugfs.c382
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-debugfs.h21
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x.c641
-rw-r--r--drivers/clk/imx/Kconfig1
-rw-r--r--drivers/clk/imx/clk-composite-8m.c12
-rw-r--r--drivers/clk/imx/clk-imx6sx.c4
-rw-r--r--drivers/clk/imx/clk-imx8mm.c87
-rw-r--r--drivers/clk/imx/clk-imx8mn.c14
-rw-r--r--drivers/clk/imx/clk-pll14xx.c30
-rw-r--r--drivers/clk/imx/clk.h3
-rw-r--r--drivers/clk/ingenic/tcu.c15
-rw-r--r--drivers/clk/keystone/pll.c17
-rw-r--r--drivers/clk/keystone/sci-clk.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c8
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c6
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c6
-rw-r--r--drivers/clk/mediatek/reset.c4
-rw-r--r--drivers/clk/meson/meson-aoclk.c5
-rw-r--r--drivers/clk/meson/meson-eeclk.c5
-rw-r--r--drivers/clk/meson/meson8b.c5
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c3
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c4
-rw-r--r--drivers/clk/qcom/clk-krait.c9
-rw-r--r--drivers/clk/qcom/clk-rcg2.c14
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c25
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c24
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c98
-rw-r--r--drivers/clk/qcom/gpucc-sdm845.c7
-rw-r--r--drivers/clk/qcom/reset.c33
-rw-r--r--drivers/clk/qcom/reset.h2
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c11
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c29
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h12
-rw-r--r--drivers/clk/rockchip/clk-pll.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c2
-rw-r--r--drivers/clk/samsung/clk-pll.c1
-rw-r--r--drivers/clk/si5324.h68
-rw-r--r--drivers/clk/si5324drv.c382
-rw-r--r--drivers/clk/si5324drv.h100
-rw-r--r--drivers/clk/socfpga/clk-gate.c16
-rw-r--r--drivers/clk/socfpga/clk-periph.c8
-rw-r--r--drivers/clk/socfpga/clk-pll.c17
-rw-r--r--drivers/clk/st/clkgen-fsyn.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c2
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/clk/tegra/clk-emc.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c1
-rw-r--r--drivers/clk/tegra/clk-tegra20.c29
-rw-r--r--drivers/clk/tegra/clk-tegra210.c1
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c9
-rw-r--r--drivers/clk/zynq/clkc.c43
-rw-r--r--drivers/clk/zynqmp/clk-mux-zynqmp.c2
-rw-r--r--drivers/clk/zynqmp/clkc.c13
-rw-r--r--drivers/clk/zynqmp/divider.c99
-rw-r--r--drivers/clk/zynqmp/pll.c40
-rw-r--r--drivers/clocksource/sh_cmt.c16
-rw-r--r--drivers/clocksource/timer-atmel-tcb.c1
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c45
-rw-r--r--drivers/clocksource/timer-davinci.c48
-rw-r--r--drivers/clocksource/timer-imx-gpt.c18
-rw-r--r--drivers/clocksource/timer-riscv.c2
-rw-r--r--drivers/counter/104-quad-8.c30
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c1
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c3
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c32
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/cpufreq/powernow-k8.c3
-rw-r--r--drivers/cpuidle/dt_idle_states.c2
-rw-r--r--drivers/crypto/Kconfig30
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c10
-rw-r--r--drivers/crypto/caam/caamalg.c3
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c3
-rw-r--r--drivers/crypto/caam/caampkc.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c17
-rw-r--r--drivers/crypto/ccp/ccp-ops.c5
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c11
-rw-r--r--drivers/crypto/ccree/cc_hash.c49
-rw-r--r--drivers/crypto/ccree/cc_hash.h2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c2
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c14
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/img-hash.c8
-rw-r--r--drivers/crypto/inside-secure/safexcel.c52
-rw-r--r--drivers/crypto/inside-secure/safexcel.h3
-rw-r--r--drivers/crypto/marvell/cipher.c2
-rw-r--r--drivers/crypto/n2_core.c6
-rw-r--r--drivers/crypto/nx/Makefile2
-rw-r--r--drivers/crypto/nx/nx.h4
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/sahara.c127
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c2
-rw-r--r--drivers/crypto/stm32/stm32-hash.c9
-rw-r--r--drivers/crypto/virtio/Kconfig1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c26
-rw-r--r--drivers/crypto/zynqmp-aes.c336
-rw-r--r--drivers/crypto/zynqmp-rsa.c238
-rw-r--r--drivers/crypto/zynqmp-sha.c301
-rw-r--r--drivers/devfreq/devfreq.c7
-rw-r--r--drivers/devfreq/governor_userspace.c12
-rw-r--r--drivers/dio/dio.c8
-rw-r--r--drivers/dma-buf/sw_sync.c18
-rw-r--r--drivers/dma-buf/udmabuf.c18
-rw-r--r--drivers/dma/Kconfig30
-rw-r--r--drivers/dma/at_hdmac.c34
-rw-r--r--drivers/dma/at_hdmac_regs.h10
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/dmaengine.c14
-rw-r--r--drivers/dma/dmatest.c38
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c6
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c27
-rw-r--r--drivers/dma/fsl-qdma.c50
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/mcf-edma.c13
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c3
-rw-r--r--drivers/dma/mv_xor_v2.c3
-rw-r--r--drivers/dma/pl330.c26
-rw-r--r--drivers/dma/pxa_dma.c5
-rw-r--r--drivers/dma/sh/rcar-dmac.c5
-rw-r--r--drivers/dma/sh/shdma.h2
-rw-r--r--drivers/dma/sprd-dma.c5
-rw-r--r--drivers/dma/ste_dma40.c5
-rw-r--r--drivers/dma/stm32-mdma.c8
-rw-r--r--drivers/dma/tegra210-adma.c2
-rw-r--r--drivers/dma/ti/edma.c4
-rw-r--r--drivers/dma/xilinx/Kconfig60
-rw-r--r--drivers/dma/xilinx/Makefile7
-rw-r--r--drivers/dma/xilinx/axidmatest.c701
-rw-r--r--drivers/dma/xilinx/vdmatest.c662
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c445
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c2323
-rw-r--r--drivers/dma/xilinx/xilinx_frmbuf.c1709
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie.h44
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c1402
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_main.c200
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_platform.c3170
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c94
-rw-r--r--drivers/edac/Kconfig22
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/cortex_arm64_edac.c470
-rw-r--r--drivers/edac/edac_device.c30
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/highbank_mc_edac.c7
-rw-r--r--drivers/edac/i10nm_base.c3
-rw-r--r--drivers/edac/pl310_edac_l2.c233
-rw-r--r--drivers/edac/qcom_edac.c5
-rw-r--r--drivers/edac/skx_base.c4
-rw-r--r--drivers/edac/thunderx_edac.c10
-rw-r--r--drivers/edac/zynqmp_ocm_edac.c651
-rw-r--r--drivers/extcon/extcon.c8
-rw-r--r--drivers/firewire/core-card.c32
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/core-device.c18
-rw-r--r--drivers/firewire/ohci.c65
-rw-r--r--drivers/firmware/arm_scmi/driver.c37
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c20
-rw-r--r--drivers/firmware/arm_scpi.c61
-rw-r--r--drivers/firmware/arm_sdei.c37
-rw-r--r--drivers/firmware/efi/capsule-loader.c33
-rw-r--r--drivers/firmware/efi/efi.c13
-rw-r--r--drivers/firmware/efi/libstub/fdt.c8
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c8
-rw-r--r--drivers/firmware/efi/memattr.c2
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c1
-rw-r--r--drivers/firmware/google/coreboot_table.c44
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c4
-rw-r--r--drivers/firmware/google/gsmi.c16
-rw-r--r--drivers/firmware/qcom_scm.c3
-rw-r--r--drivers/firmware/raspberrypi.c70
-rw-r--r--drivers/firmware/stratix10-svc.c8
-rw-r--r--drivers/firmware/ti_sci.c58
-rw-r--r--drivers/firmware/ti_sci.h2
-rw-r--r--drivers/firmware/xilinx/Kconfig6
-rw-r--r--drivers/firmware/xilinx/Makefile3
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c261
-rw-r--r--drivers/firmware/xilinx/zynqmp-ggs.c289
-rw-r--r--drivers/firmware/xilinx/zynqmp-secure.c197
-rw-r--r--drivers/firmware/xilinx/zynqmp.c951
-rw-r--r--drivers/fpga/Kconfig34
-rw-r--r--drivers/fpga/Makefile3
-rw-r--r--drivers/fpga/altera-pr-ip-core.c2
-rw-r--r--drivers/fpga/fpga-bridge.c32
-rw-r--r--drivers/fpga/fpga-mgr.c258
-rw-r--r--drivers/fpga/fpga-region.c10
-rw-r--r--drivers/fpga/stratix10-soc.c4
-rw-r--r--drivers/fpga/versal-fpga.c153
-rw-r--r--drivers/fpga/xilinx-afi.c92
-rw-r--r--drivers/fpga/zynq-afi.c81
-rw-r--r--drivers/fpga/zynqmp-fpga.c316
-rw-r--r--drivers/fsi/fsi-core.c3
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c1
-rw-r--r--drivers/fsi/fsi-sbefifo.c6
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-amd8111.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-eic-sprd.c32
-rw-r--r--drivers/gpio/gpio-mockup.c2
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c8
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c1
-rw-r--r--drivers/gpio/gpio-pxa.c1
-rw-r--r--drivers/gpio/gpio-tb10x.c6
-rw-r--r--drivers/gpio/gpio-timberdale.c5
-rw-r--r--drivers/gpio/gpio-tps68470.c6
-rw-r--r--drivers/gpio/gpio-vf610.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c661
-rw-r--r--drivers/gpio/gpio-zynq.c63
-rw-r--r--drivers/gpio/gpiolib-acpi.c14
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpio/gpiolib-sysfs.c15
-rw-r--r--drivers/gpu/drm/Kconfig7
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c69
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h1
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c9
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c6
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c33
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c13
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c10
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c6
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c40
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c14
-rw-r--r--drivers/gpu/drm/drm_bridge.c125
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c6
-rw-r--r--drivers/gpu/drm/drm_connector.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c10
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_encoder.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c3
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_fourcc.c47
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c5
-rw-r--r--drivers/gpu/drm/drm_gem.c21
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c24
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c7
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c70
-rw-r--r--drivers/gpu/drm/drm_panel.c14
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c53
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c20
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c7
-rw-r--r--drivers/gpu/drm/drm_syncobj.c19
-rw-r--r--drivers/gpu/drm/drm_vblank.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c58
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c5
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c6
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c45
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c8
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c5
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c24
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c29
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c15
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c9
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c2
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c3
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c5
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c5
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c5
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c5
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c5
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c5
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c5
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c5
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c7
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c5
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c5
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c81
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c15
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c5
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c5
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/cik.c96
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/ni.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c8
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/si.c102
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c4
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c10
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c22
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c10
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c6
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c7
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c7
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c19
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.c4
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c6
-rw-r--r--drivers/gpu/drm/tegra/dsi.c49
-rw-r--r--drivers/gpu/drm/tegra/fb.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c16
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c66
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c30
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/xilinx/Kconfig59
-rw-r--r--drivers/gpu/drm/xilinx/Makefile14
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_cresample.c154
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_cresample.h40
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_connector.c204
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_connector.h29
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_crtc.c595
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_crtc.h39
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp.c2186
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c2265
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h69
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_drv.c614
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_drv.h65
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dsi.c808
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_encoder.c240
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_encoder.h28
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_fb.c511
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_fb.h38
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_gem.c45
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_gem.h25
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_plane.c1098
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_plane.h61
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_sdi.c1452
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_sdi.h29
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_osd.c382
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_osd.h62
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c119
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h35
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_vtc.c645
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_vtc.h44
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig106
-rw-r--r--drivers/gpu/drm/xlnx/Makefile21
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_bridge.c561
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_bridge.h178
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_crtc.c206
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_crtc.h76
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_csc.c571
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_drv.c539
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_drv.h33
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_dsi.c1011
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_fb.c356
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_fb.h33
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_gem.c47
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_gem.h26
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_mixer.c2830
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_pl_disp.c618
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_scaler.c1789
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi.c1227
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_modes.h356
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_timing.c425
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_timing.h20
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_vtc.c447
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c3333
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.h36
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c1917
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h38
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c194
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h28
-rw-r--r--drivers/gpu/drm/zocl/Kconfig8
-rw-r--r--drivers/gpu/drm/zocl/Makefile4
-rw-r--r--drivers/gpu/drm/zocl/zocl_bo.c271
-rw-r--r--drivers/gpu/drm/zocl/zocl_drv.c216
-rw-r--r--drivers/gpu/drm/zocl/zocl_drv.h59
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c3
-rw-r--r--drivers/gpu/host1x/mipi.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c1
-rw-r--r--drivers/hid/hid-alps.c2
-rw-r--r--drivers/hid/hid-apple.c63
-rw-r--r--drivers/hid/hid-asus.c161
-rw-r--r--drivers/hid/hid-betopff.c17
-rw-r--r--drivers/hid/hid-bigbenff.c78
-rw-r--r--drivers/hid/hid-core.c44
-rw-r--r--drivers/hid/hid-cp2112.c28
-rw-r--r--drivers/hid/hid-debug.c4
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-holtek-kbd.c4
-rw-r--r--drivers/hid/hid-hyperv.c2
-rw-r--r--drivers/hid/hid-ids.h13
-rw-r--r--drivers/hid/hid-input.c8
-rw-r--r--drivers/hid/hid-ite.c26
-rw-r--r--drivers/hid/hid-lg4ff.c6
-rw-r--r--drivers/hid/hid-logitech-dj.c5
-rw-r--r--drivers/hid/hid-logitech-hidpp.c56
-rw-r--r--drivers/hid/hid-magicmouse.c2
-rw-r--r--drivers/hid/hid-multitouch.c38
-rw-r--r--drivers/hid/hid-plantronics.c9
-rw-r--r--drivers/hid/hid-quirks.c7
-rw-r--r--drivers/hid/hid-roccat.c4
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c2
-rw-r--r--drivers/hid/hid-steam.c10
-rw-r--r--drivers/hid/hid-uclogic-core.c1
-rw-r--r--drivers/hid/hidraw.c3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c68
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/dma-if.c10
-rw-r--r--drivers/hid/uhid.c1
-rw-r--r--drivers/hid/wacom.h4
-rw-r--r--drivers/hid/wacom_sys.c107
-rw-r--r--drivers/hid/wacom_wac.c238
-rw-r--r--drivers/hid/wacom_wac.h3
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c15
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c8
-rw-r--r--drivers/hv/channel_mgmt.c24
-rw-r--r--drivers/hv/vmbus_drv.c11
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/adt7475.c8
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c7
-rw-r--r--drivers/hwmon/coretemp.c211
-rw-r--r--drivers/hwmon/gpio-fan.c3
-rw-r--r--drivers/hwmon/i5500_temp.c2
-rw-r--r--drivers/hwmon/ibmpex.c1
-rw-r--r--drivers/hwmon/ina3221.c2
-rw-r--r--drivers/hwmon/it87.c4
-rw-r--r--drivers/hwmon/ltc2945.c2
-rw-r--r--drivers/hwmon/mlxreg-fan.c6
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig20
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/pmbus.c7
-rw-r--r--drivers/hwmon/pmbus/tps544.c364
-rw-r--r--drivers/hwmon/xgene-hwmon.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight.c1
-rw-r--r--drivers/hwtracing/intel_th/msu-sink.c3
-rw-r--r--drivers/hwtracing/intel_th/msu.c14
-rw-r--r--drivers/hwtracing/intel_th/pci.c25
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c58
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c11
-rw-r--r--drivers/i2c/busses/i2c-cadence.c559
-rw-r--r--drivers/i2c/busses/i2c-i801.c40
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c10
-rw-r--r--drivers/i2c/busses/i2c-ismt.c3
-rw-r--r--drivers/i2c/busses/i2c-ocores.c35
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c10
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c46
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c40
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c9
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c5
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c3
-rw-r--r--drivers/i2c/busses/i2c-xiic.c109
-rw-r--r--drivers/i2c/i2c-core-base.c3
-rw-r--r--drivers/i2c/i2c-core.h4
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/i3c/master.c4
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c13
-rw-r--r--drivers/ide/ide-acpi.c2
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-io-std.c4
-rw-r--r--drivers/ide/ide-io.c8
-rw-r--r--drivers/ide/ide-sysfs.c2
-rw-r--r--drivers/ide/umc8672.c2
-rw-r--r--drivers/idle/intel_idle.c43
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c1
-rw-r--r--drivers/iio/accel/mma9551_core.c10
-rw-r--r--drivers/iio/adc/Kconfig26
-rw-r--r--drivers/iio/adc/Makefile2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c8
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c16
-rw-r--r--drivers/iio/adc/at91_adc.c4
-rw-r--r--drivers/iio/adc/berlin2-adc.c4
-rw-r--r--drivers/iio/adc/exynos_adc.c24
-rw-r--r--drivers/iio/adc/mcp3911.c19
-rw-r--r--drivers/iio/adc/meson_saradc.c2
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c10
-rw-r--r--drivers/iio/adc/palmas_gpadc.c2
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1
-rw-r--r--drivers/iio/adc/ti-adc128s052.c14
-rw-r--r--drivers/iio/adc/ti-ads7950.c1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c4
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c32
-rw-r--r--drivers/iio/adc/xilinx-ams.c1109
-rw-r--r--drivers/iio/adc/xilinx-ams.h278
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c122
-rw-r--r--drivers/iio/addac/Kconfig24
-rw-r--r--drivers/iio/addac/Makefile7
-rw-r--r--drivers/iio/addac/stx104.c (renamed from drivers/iio/adc/stx104.c)96
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c2
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c4
-rw-r--r--drivers/iio/dac/Makefile2
-rw-r--r--drivers/iio/dac/ad5593r.c46
-rw-r--r--drivers/iio/dac/cio-dac.c4
-rw-r--r--drivers/iio/dac/mcp4725.c16
-rw-r--r--drivers/iio/health/afe4403.c5
-rw-r--r--drivers/iio/health/afe4404.c12
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c4
-rw-r--r--drivers/iio/industrialio-sw-trigger.c6
-rw-r--r--drivers/iio/inkern.c6
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/apds9960.c12
-rw-r--r--drivers/iio/light/isl29028.c2
-rw-r--r--drivers/iio/light/max44009.c13
-rw-r--r--drivers/iio/light/tsl2583.c2
-rw-r--r--drivers/iio/light/tsl2772.c1
-rw-r--r--drivers/iio/light/vcnl4035.c3
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c10
-rw-r--r--drivers/iio/pressure/bmp280-core.c2
-rw-r--r--drivers/iio/pressure/dps310.c266
-rw-r--r--drivers/iio/pressure/ms5611.h18
-rw-r--r--drivers/iio/pressure/ms5611_core.c58
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c11
-rw-r--r--drivers/iio/pressure/ms5611_spi.c19
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c6
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/device.c49
-rw-r--r--drivers/infiniband/core/nldev.c7
-rw-r--r--drivers/infiniband/core/user_mad.c23
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/core/uverbs_main.c14
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c2
-rw-r--r--drivers/infiniband/core/verbs.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h14
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c634
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c138
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c1
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c11
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c6
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c13
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c9
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h15
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c110
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c10
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c32
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c10
-rw-r--r--drivers/infiniband/hw/qedr/main.c9
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c8
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c44
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h5
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c24
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c24
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c20
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c42
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c7
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c14
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h8
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c18
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c9
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c8
-rw-r--r--drivers/input/joystick/iforce/iforce.h6
-rw-r--r--drivers/input/joystick/xpad.c31
-rw-r--r--drivers/input/keyboard/atkbd.c49
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c10
-rw-r--r--drivers/input/keyboard/ipaq-micro-keys.c3
-rw-r--r--drivers/input/keyboard/omap-keypad.c2
-rw-r--r--drivers/input/misc/adxl34x.c3
-rw-r--r--drivers/input/misc/drv260x.c1
-rw-r--r--drivers/input/misc/powermate.c1
-rw-r--r--drivers/input/misc/rk805-pwrkey.c1
-rw-r--r--drivers/input/mouse/alps.c16
-rw-r--r--drivers/input/mouse/elantech.c10
-rw-r--r--drivers/input/mouse/focaltech.c8
-rw-r--r--drivers/input/mouse/synaptics.c2
-rw-r--r--drivers/input/rmi4/rmi_bus.c2
-rw-r--r--drivers/input/rmi4/rmi_smbus.c50
-rw-r--r--drivers/input/serio/hil_mlc.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1217
-rw-r--r--drivers/input/serio/i8042.c4
-rw-r--r--drivers/input/serio/serio_raw.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c13
-rw-r--r--drivers/input/touchscreen/elants_i2c.c9
-rw-r--r--drivers/input/touchscreen/goodix.c14
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c2
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c3
-rw-r--r--drivers/interconnect/core.c84
-rw-r--r--drivers/interconnect/internal.h42
-rw-r--r--drivers/iommu/amd_iommu.c8
-rw-r--r--drivers/iommu/amd_iommu_init.c115
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/amd_iommu_v2.c1
-rw-r--r--drivers/iommu/arm-smmu-v3.c21
-rw-r--r--drivers/iommu/arm-smmu.c2
-rw-r--r--drivers/iommu/dma-iommu.c16
-rw-r--r--drivers/iommu/dmar.c3
-rw-r--r--drivers/iommu/exynos-iommu.c8
-rw-r--r--drivers/iommu/fsl_pamu.c2
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/intel-pasid.c7
-rw-r--r--drivers/iommu/iommu.c43
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c28
-rw-r--r--drivers/iommu/of_iommu.c4
-rw-r--r--drivers/iommu/omap-iommu-debug.c6
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/qcom_iommu.c9
-rw-r--r--drivers/iommu/rockchip-iommu.c16
-rw-r--r--drivers/iommu/s390-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c2
-rw-r--r--drivers/iommu/virtio-iommu.c2
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/irq-alpine-msi.c1
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c14
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c11
-rw-r--r--drivers/irqchip/irq-gic-pm.c2
-rw-r--r--drivers/irqchip/irq-gic.c17
-rw-r--r--drivers/irqchip/irq-jcore-aic.c11
-rw-r--r--drivers/irqchip/irq-meson-gpio.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c65
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c1
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/irqchip/irq-tegra.c10
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c2
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c3
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c143
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c19
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c23
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c12
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/mISDN/core.c7
-rw-r--r--drivers/isdn/mISDN/dsp.h2
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c2
-rw-r--r--drivers/isdn/mISDN/dsp_core.c2
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c3
-rw-r--r--drivers/isdn/mISDN/l1oip.h1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c13
-rw-r--r--drivers/leds/Kconfig2
-rw-r--r--drivers/leds/leds-pwm.c41
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c17
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c3
-rw-r--r--drivers/leds/trigger/ledtrig-panic.c5
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/macintosh/macio-adb.c4
-rw-r--r--drivers/macintosh/macio_asic.c2
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c1
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c4
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c8
-rw-r--r--drivers/mailbox/mailbox-test.c13
-rw-r--r--drivers/mailbox/ti-msgmgr.c12
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c24
-rw-r--r--drivers/mcb/mcb-core.c15
-rw-r--r--drivers/mcb/mcb-lpc.c35
-rw-r--r--drivers/mcb/mcb-parse.c17
-rw-r--r--drivers/mcb/mcb-pci.c27
-rw-r--r--drivers/md/bcache/alloc.c35
-rw-r--r--drivers/md/bcache/bcache.h5
-rw-r--r--drivers/md/bcache/btree.c21
-rw-r--r--drivers/md/bcache/super.c10
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/writeback.c73
-rw-r--r--drivers/md/dm-cache-metadata.c54
-rw-r--r--drivers/md/dm-cache-policy-smq.c28
-rw-r--r--drivers/md/dm-cache-target.c15
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-crypt.c11
-rw-r--r--drivers/md/dm-delay.c17
-rw-r--r--drivers/md/dm-flakey.c34
-rw-r--r--drivers/md/dm-integrity.c32
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-ioctl.c13
-rw-r--r--drivers/md/dm-raid.c17
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-stats.c7
-rw-r--r--drivers/md/dm-stats.h2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-thin-metadata.c67
-rw-r--r--drivers/md/dm-thin.c26
-rw-r--r--drivers/md/dm-verity-fec.c3
-rw-r--r--drivers/md/dm-verity-target.c21
-rw-r--r--drivers/md/dm-verity.h10
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm.c36
-rw-r--r--drivers/md/md-bitmap.c90
-rw-r--r--drivers/md/md.c90
-rw-r--r--drivers/md/raid0.c66
-rw-r--r--drivers/md/raid0.h1
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c139
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/cec/cec-adap.c3
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c52
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c51
-rw-r--r--drivers/media/dvb-core/dvb_demux.c4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c26
-rw-r--r--drivers/media/dvb-core/dvb_net.c38
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c11
-rw-r--r--drivers/media/dvb-core/dvbdev.c128
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c2
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c2
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c3
-rw-r--r--drivers/media/dvb-frontends/cx22700.c2
-rw-r--r--drivers/media/dvb-frontends/cx22702.c2
-rw-r--r--drivers/media/dvb-frontends/cx24110.c2
-rw-r--r--drivers/media/dvb-frontends/cx24113.c2
-rw-r--r--drivers/media/dvb-frontends/cx24116.c2
-rw-r--r--drivers/media/dvb-frontends/cx24120.c6
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_top.c2
-rw-r--r--drivers/media/dvb-frontends/dib0070.c2
-rw-r--r--drivers/media/dvb-frontends/dib0090.c4
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c2
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c4
-rw-r--r--drivers/media/dvb-frontends/dib8000.c2
-rw-r--r--drivers/media/dvb-frontends/dib9000.c2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c4
-rw-r--r--drivers/media/dvb-frontends/ds3000.c2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c2
-rw-r--r--drivers/media/dvb-frontends/ec100.c2
-rw-r--r--drivers/media/dvb-frontends/helene.c4
-rw-r--r--drivers/media/dvb-frontends/horus3a.c2
-rw-r--r--drivers/media/dvb-frontends/isl6405.c2
-rw-r--r--drivers/media/dvb-frontends/isl6421.c2
-rw-r--r--drivers/media/dvb-frontends/isl6423.c2
-rw-r--r--drivers/media/dvb-frontends/itd1000.c2
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb-frontends/l64781.c2
-rw-r--r--drivers/media/dvb-frontends/lg2160.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c2
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c2
-rw-r--r--drivers/media/dvb-frontends/lnbh25.c2
-rw-r--r--drivers/media/dvb-frontends/lnbp21.c4
-rw-r--r--drivers/media/dvb-frontends/lnbp22.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c2
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c2
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c2
-rw-r--r--drivers/media/dvb-frontends/mt312.c2
-rw-r--r--drivers/media/dvb-frontends/mt352.c2
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c2
-rw-r--r--drivers/media/dvb-frontends/nxt6000.c2
-rw-r--r--drivers/media/dvb-frontends/or51132.c2
-rw-r--r--drivers/media/dvb-frontends/or51211.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c2
-rw-r--r--drivers/media/dvb-frontends/s921.c2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c2
-rw-r--r--drivers/media/dvb-frontends/sp887x.c2
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/stb6000.c2
-rw-r--r--drivers/media/dvb-frontends/stb6100.c2
-rw-r--r--drivers/media/dvb-frontends/stv0288.c7
-rw-r--r--drivers/media/dvb-frontends/stv0297.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c40
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c2
-rw-r--r--drivers/media/dvb-frontends/tda10021.c2
-rw-r--r--drivers/media/dvb-frontends/tda10023.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c2
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c4
-rw-r--r--drivers/media/dvb-frontends/tda10086.c2
-rw-r--r--drivers/media/dvb-frontends/tda665x.c2
-rw-r--r--drivers/media/dvb-frontends/tda8083.c2
-rw-r--r--drivers/media/dvb-frontends/tda8261.c2
-rw-r--r--drivers/media/dvb-frontends/tda826x.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c2
-rw-r--r--drivers/media/dvb-frontends/tua6100.c2
-rw-r--r--drivers/media/dvb-frontends/ves1820.c2
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.c2
-rw-r--r--drivers/media/dvb-frontends/zl10039.c2
-rw-r--r--drivers/media/dvb-frontends/zl10353.c2
-rw-r--r--drivers/media/i2c/ad5820.c10
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c531
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/i2c/ov2680.c246
-rw-r--r--drivers/media/i2c/ov5640.c376
-rw-r--r--drivers/media/i2c/ov5675.c4
-rw-r--r--drivers/media/i2c/ov7670.c2
-rw-r--r--drivers/media/i2c/ov772x.c3
-rw-r--r--drivers/media/i2c/tc358743.c7
-rw-r--r--drivers/media/mc/mc-entity.c31
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c5
-rw-r--r--drivers/media/pci/bt8xx/dst.c2
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c11
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c9
-rw-r--r--drivers/media/pci/cx88/cx88-video.c45
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-main.c2
-rw-r--r--drivers/media/pci/dm1105/dm1105.c1
-rw-r--r--drivers/media/pci/dt3155/dt3155.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c7
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c12
-rw-r--r--drivers/media/pci/meye/meye.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c19
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c1
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c2
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c2
-rw-r--r--drivers/media/pci/saa7146/mxb.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c4
-rw-r--r--drivers/media/pci/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/pci/ttpci/budget-av.c10
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c18
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c2
-rw-r--r--drivers/media/platform/coda/coda-bit.c14
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c1
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c6
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c5
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c15
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c10
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c2
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c1
-rw-r--r--drivers/media/platform/rcar_fdp1.c50
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c15
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c6
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c17
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c14
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1
-rw-r--r--drivers/media/platform/vivid/vivid-core.c22
-rw-r--r--drivers/media/platform/vivid/vivid-core.h2
-rw-r--r--drivers/media/platform/vivid/vivid-rds-gen.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c28
-rw-r--r--drivers/media/platform/xilinx/Kconfig135
-rw-r--r--drivers/media/platform/xilinx/Makefile19
-rw-r--r--drivers/media/platform/xilinx/xilinx-axis-switch.c588
-rw-r--r--drivers/media/platform/xilinx/xilinx-cfa.c394
-rw-r--r--drivers/media/platform/xilinx/xilinx-cresample.c447
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c2060
-rw-r--r--drivers/media/platform/xilinx/xilinx-demosaic.c418
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c1020
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h23
-rw-r--r--drivers/media/platform/xilinx/xilinx-gamma-coeff.h5385
-rw-r--r--drivers/media/platform/xilinx/xilinx-gamma.c543
-rw-r--r--drivers/media/platform/xilinx/xilinx-hls-common.h36
-rw-r--r--drivers/media/platform/xilinx/xilinx-hls.c481
-rw-r--r--drivers/media/platform/xilinx/xilinx-m2m.c2116
-rw-r--r--drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h574
-rw-r--r--drivers/media/platform/xilinx/xilinx-multi-scaler.c2449
-rw-r--r--drivers/media/platform/xilinx/xilinx-remapper.c546
-rw-r--r--drivers/media/platform/xilinx/xilinx-rgb2yuv.c566
-rw-r--r--drivers/media/platform/xilinx/xilinx-scaler.c708
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange-channel.c452
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange-dma.c554
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange.c195
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange.h245
-rw-r--r--drivers/media/platform/xilinx/xilinx-sdirxss.c2048
-rw-r--r--drivers/media/platform/xilinx/xilinx-switch.c460
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c627
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c177
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h14
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c71
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h5
-rw-r--r--drivers/media/platform/xilinx/xilinx-vpss-csc.c1170
-rw-r--r--drivers/media/platform/xilinx/xilinx-vpss-scaler.c1936
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c18
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h1
-rw-r--r--drivers/media/radio/radio-shark.c10
-rw-r--r--drivers/media/radio/radio-shark2.c10
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c4
-rw-r--r--drivers/media/rc/ene_ir.c3
-rw-r--r--drivers/media/rc/gpio-ir-recv.c2
-rw-r--r--drivers/media/rc/imon.c6
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c8
-rw-r--r--drivers/media/rc/lirc_dev.c6
-rw-r--r--drivers/media/tuners/fc0011.c2
-rw-r--r--drivers/media/tuners/fc0012.c2
-rw-r--r--drivers/media/tuners/fc0013.c2
-rw-r--r--drivers/media/tuners/max2165.c2
-rw-r--r--drivers/media/tuners/mc44s803.c2
-rw-r--r--drivers/media/tuners/mt2060.c2
-rw-r--r--drivers/media/tuners/mt2131.c2
-rw-r--r--drivers/media/tuners/mt2266.c2
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/tuners/qt1010.c17
-rw-r--r--drivers/media/tuners/tda18218.c2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/tuners/xc5000.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c19
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c11
-rw-r--r--drivers/media/usb/dvb-usb-v2/ce6230.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c12
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c20
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c5
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c16
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c4
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c26
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c5
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c4
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c8
-rw-r--r--drivers/media/usb/go7007/go7007-i2c.c2
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c4
-rw-r--r--drivers/media/usb/gspca/cpia1.c3
-rw-r--r--drivers/media/usb/gspca/vicam.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.c11
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-dvb.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c11
-rw-r--r--drivers/media/usb/siano/smsusb.c23
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c5
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c3
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c30
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c66
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c13
-rw-r--r--drivers/media/usb/uvc/uvc_status.c40
-rw-r--r--drivers/media/usb/uvc/uvc_video.c87
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h11
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c68
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c22
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c45
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c70
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c28
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c22
-rw-r--r--drivers/memory/atmel-sdramc.c6
-rw-r--r--drivers/memory/brcmstb_dpfe.c4
-rw-r--r--drivers/memory/mvebu-devbus.c3
-rw-r--r--drivers/memory/of_memory.c1
-rw-r--r--drivers/memory/pl353-smc.c1
-rw-r--r--drivers/memstick/core/memstick.c5
-rw-r--r--drivers/memstick/core/ms_block.c11
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/memstick/host/r592.c6
-rw-r--r--drivers/memstick/host/tifm_ms.c2
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/altera-sysmgr.c4
-rw-r--r--drivers/mfd/arizona-core.c2
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c34
-rw-r--r--drivers/mfd/intel-lpss-acpi.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c1
-rw-r--r--drivers/mfd/lp8788-irq.c3
-rw-r--r--drivers/mfd/lp8788.c12
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/pcf50633-adc.c7
-rw-r--r--drivers/mfd/rt5033.c3
-rw-r--r--drivers/mfd/sm501.c7
-rw-r--r--drivers/mfd/stmfx.c2
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/mfd/t7l66xb.c6
-rw-r--r--drivers/misc/Kconfig22
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c6
-rw-r--r--drivers/misc/cxl/guest.c24
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/pci.c21
-rw-r--r--drivers/misc/eeprom/Kconfig1
-rw-r--r--drivers/misc/fastrpc.c49
-rw-r--r--drivers/misc/jesd204b/Kconfig28
-rw-r--r--drivers/misc/jesd204b/Makefile5
-rw-r--r--drivers/misc/jesd204b/gtx7s_cpll_bands.c88
-rw-r--r--drivers/misc/jesd204b/gtx7s_cpll_bands.h31
-rw-r--r--drivers/misc/jesd204b/gtx7s_qpll_bands.c96
-rw-r--r--drivers/misc/jesd204b/gtx7s_qpll_bands.h30
-rw-r--r--drivers/misc/jesd204b/jesd_phy.c384
-rw-r--r--drivers/misc/jesd204b/jesd_phy.h42
-rw-r--r--drivers/misc/jesd204b/s7_gtxe2_drp.h123
-rw-r--r--drivers/misc/jesd204b/xilinx_jesd204b.c399
-rw-r--r--drivers/misc/jesd204b/xilinx_jesd204b.h135
-rw-r--r--drivers/misc/mei/bus-fixup.c8
-rw-r--r--drivers/misc/ocxl/file.c9
-rw-r--r--drivers/misc/pci_endpoint_test.c10
-rw-r--r--drivers/misc/sgi-gru/grufault.c13
-rw-r--r--drivers/misc/sgi-gru/grumain.c22
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/misc/ti-st/st_core.c7
-rw-r--r--drivers/misc/tifm_7xx1.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/misc/xilinx_flex_pm.c646
-rw-r--r--drivers/misc/xilinx_trafgen.c1648
-rw-r--r--drivers/mmc/core/block.c67
-rw-r--r--drivers/mmc/core/core.c34
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c5
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/mmc_test.c3
-rw-r--r--drivers/mmc/core/quirks.h14
-rw-r--r--drivers/mmc/core/regulator.c41
-rw-r--r--drivers/mmc/core/sd.c20
-rw-r--r--drivers/mmc/core/sdio.c8
-rw-r--r--drivers/mmc/core/sdio_bus.c20
-rw-r--r--drivers/mmc/core/sdio_cis.c12
-rw-r--r--drivers/mmc/core/slot-gpio.c6
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/alcor.c5
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/au1xmmc.c3
-rw-r--r--drivers/mmc/host/bcm2835.c4
-rw-r--r--drivers/mmc/host/cavium-octeon.c1
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/cqhci.c44
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c15
-rw-r--r--drivers/mmc/host/mmc_spi.c8
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mmc/host/moxart-mmc.c29
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/mvsdio.c8
-rw-r--r--drivers/mmc/host/mxcmmc.c4
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c10
-rw-r--r--drivers/mmc/host/pxamci.c11
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c11
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-cqhci.h24
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c62
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c458
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c9
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c25
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c7
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c3
-rw-r--r--drivers/mmc/host/sdhci-sprd.c49
-rw-r--r--drivers/mmc/host/sdhci-tegra.c3
-rw-r--r--drivers/mmc/host/sdhci.c80
-rw-r--r--drivers/mmc/host/sdhci.h14
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c71
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c12
-rw-r--r--drivers/mmc/host/toshsd.c6
-rw-r--r--drivers/mmc/host/usdhi6rol0.c6
-rw-r--r--drivers/mmc/host/via-sdmmc.c4
-rw-r--r--drivers/mmc/host/vub300.c17
-rw-r--r--drivers/mmc/host/wbsd.c10
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c15
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c29
-rw-r--r--drivers/mtd/chips/cfi_probe.c45
-rw-r--r--drivers/mtd/devices/docg3.c7
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c8
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c2
-rw-r--r--drivers/mtd/maps/physmap-core.c13
-rw-r--r--drivers/mtd/maps/physmap-versatile.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/mtdblock.c12
-rw-r--r--drivers/mtd/mtdcore.c4
-rw-r--r--drivers/mtd/nand/raw/Kconfig14
-rw-r--r--drivers/mtd/nand/raw/Makefile2
-rw-r--r--drivers/mtd/nand/raw/arasan_nand.c1465
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c112
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c7
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c8
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.h8
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c5
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c35
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c26
-rw-r--r--drivers/mtd/nand/raw/nand_base.c15
-rw-r--r--drivers/mtd/nand/raw/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c8
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c6
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c24
-rw-r--r--drivers/mtd/nand/raw/pl353_nand.c1398
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c2
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c2
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c3
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/nand/spi/macronix.c16
-rw-r--r--drivers/mtd/nand/spi/micron.c2
-rw-r--r--drivers/mtd/nand/spi/toshiba.c4
-rw-r--r--drivers/mtd/parsers/afs.c4
-rw-r--r--drivers/mtd/parsers/redboot.c1
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c734
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c889
-rw-r--r--drivers/mtd/ubi/build.c23
-rw-r--r--drivers/mtd/ubi/eba.c21
-rw-r--r--drivers/mtd/ubi/vmt.c18
-rw-r--r--drivers/mtd/ubi/wl.c30
-rw-r--r--drivers/net/arcnet/arc-rimi.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h8
-rw-r--r--drivers/net/arcnet/arcnet.c68
-rw-r--r--drivers/net/arcnet/com20020-isa.c4
-rw-r--r--drivers/net/arcnet/com20020-pci.c119
-rw-r--r--drivers/net/arcnet/com20020_cs.c2
-rw-r--r--drivers/net/arcnet/com90io.c4
-rw-r--r--drivers/net/arcnet/com90xx.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c39
-rw-r--r--drivers/net/bonding/bond_alb.c13
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c121
-rw-r--r--drivers/net/can/cc770/cc770_isa.c10
-rw-r--r--drivers/net/can/dev/dev.c10
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c51
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c8
-rw-r--r--drivers/net/can/pch_can.c8
-rw-r--r--drivers/net/can/rcar/rcar_can.c8
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c6
-rw-r--r--drivers/net/can/sja1000/sja1000.c7
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c10
-rw-r--r--drivers/net/can/spi/hi311x.c5
-rw-r--r--drivers/net/can/spi/mcp251x.c18
-rw-r--r--drivers/net/can/sun4i_can.c9
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c10
-rw-r--r--drivers/net/can/usb/gs_usb.c11
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h32
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c118
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c216
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c522
-rw-r--r--drivers/net/can/usb/mcba_usb.c10
-rw-r--r--drivers/net/can/usb/usb_8dev.c7
-rw-r--r--drivers/net/can/vxcan.c7
-rw-r--r--drivers/net/can/xilinx_can.c24
-rw-r--r--drivers/net/dsa/b53/b53_common.c1
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c14
-rw-r--r--drivers/net/dsa/dsa_loop.c25
-rw-r--r--drivers/net/dsa/lan9303-core.c10
-rw-r--r--drivers/net/dsa/lan9303_mdio.c4
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c7
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c31
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h1
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c11
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c20
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c23
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c54
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c26
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h2
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/apple/mace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c1
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c13
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c43
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c10
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c65
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.h33
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c384
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c34
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c1
-rw-r--r--drivers/net/ethernet/cortina/gemini.c45
-rw-r--r--drivers/net/ethernet/cortina/gemini.h4
-rw-r--r--drivers/net/ethernet/dnet.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c106
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c92
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c77
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c418
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c45
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_register.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c153
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c24
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c13
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c37
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c177
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c130
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c119
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c87
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c149
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c72
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c3
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c21
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c29
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c33
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/ni/nixge.c30
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c72
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c201
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c23
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c15
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c35
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c162
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c8
-rw-r--r--drivers/net/ethernet/sun/sunhme.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c24
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c41
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h5
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig79
-rw-r--r--drivers/net/ethernet/xilinx/Makefile12
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c33
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h737
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_dma.c503
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2689
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c1043
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c105
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c64
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_cb.c177
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ep.c161
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c223
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h159
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h88
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c325
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c369
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_qci.c151
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c232
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h151
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_switch.c807
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_switch.h364
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_timer.h73
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c5
-rw-r--r--drivers/net/fddi/defxx.c22
-rw-r--r--drivers/net/fjes/fjes_hw.c37
-rw-r--r--drivers/net/geneve.c21
-rw-r--r--drivers/net/gtp.c21
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/scc.c6
-rw-r--r--drivers/net/hyperv/Kconfig1
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c145
-rw-r--r--drivers/net/ieee802154/adf7242.c8
-rw-r--r--drivers/net/ieee802154/ca8210.c24
-rw-r--r--drivers/net/ieee802154/cc2520.c3
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c68
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c5
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/macsec.c73
-rw-r--r--drivers/net/macvlan.c12
-rw-r--r--drivers/net/net_failover.c8
-rw-r--r--drivers/net/netdevsim/bpf.c8
-rw-r--r--drivers/net/ntb_netdev.c13
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/broadcom.c13
-rw-r--r--drivers/net/phy/dp83822.c4
-rw-r--r--drivers/net/phy/dp83867.c138
-rw-r--r--drivers/net/phy/mdio-i2c.c3
-rw-r--r--drivers/net/phy/mdio-mux-meson-g12a.c23
-rw-r--r--drivers/net/phy/mdio-thunder.c1
-rw-r--r--drivers/net/phy/mdio-xgene.c2
-rw-r--r--drivers/net/phy/mdio_bus.c9
-rw-r--r--drivers/net/phy/meson-gxl.c4
-rw-r--r--drivers/net/phy/micrel.c1
-rw-r--r--drivers/net/phy/microchip.c32
-rw-r--r--drivers/net/phy/phy_device.c1
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/phy/smsc.c5
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c6
-rw-r--r--drivers/net/phy/xilinx_phy.c160
-rw-r--r--drivers/net/plip/plip.c6
-rw-r--r--drivers/net/ppp/ppp_async.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/ppp_synctty.c6
-rw-r--r--drivers/net/tap.c12
-rw-r--r--drivers/net/team/team.c58
-rw-r--r--drivers/net/thunderbolt.c22
-rw-r--r--drivers/net/tun.c146
-rw-r--r--drivers/net/usb/aqc111.c8
-rw-r--r--drivers/net/usb/ax88172a.c4
-rw-r--r--drivers/net/usb/ax88179_178a.c20
-rw-r--r--drivers/net/usb/cdc_ether.c28
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c435
-rw-r--r--drivers/net/usb/dm9601.c7
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c8
-rw-r--r--drivers/net/usb/kalmia.c8
-rw-r--r--drivers/net/usb/lan78xx.c990
-rw-r--r--drivers/net/usb/plusb.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c11
-rw-r--r--drivers/net/usb/r8152.c18
-rw-r--r--drivers/net/usb/rndis_host.c3
-rw-r--r--drivers/net/usb/smsc75xx.c11
-rw-r--r--drivers/net/usb/smsc95xx.c8
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/sr9800.c4
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/usb/zaurus.c21
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/virtio_net.c66
-rw-r--r--drivers/net/vxlan.c80
-rw-r--r--drivers/net/wan/farsync.c6
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c18
-rw-r--r--drivers/net/wan/lapbether.c5
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c4
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c79
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c125
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c18
-rw-r--r--drivers/net/wireless/atmel/atmel_cs.c13
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h18
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c10
-rw-r--r--drivers/net/wireless/broadcom/b43/lo.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c22
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c12
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h2
-rw-r--r--drivers/net/wireless/cisco/airo.c5
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c121
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c67
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c24
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c14
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c5
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_cs.c13
-rw-r--r--drivers/net/wireless/intersil/orinoco/spectrum_cs.c13
-rw-r--r--drivers/net/wireless/intersil/p54/main.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c30
-rw-r--r--drivers/net/wireless/marvell/libertas/Kconfig2
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c13
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c176
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c30
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c13
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c34
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c11
-rw-r--r--drivers/net/wireless/ray_cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c144
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c82
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c259
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c347
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c98
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c76
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c110
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/rndis_wlan.c19
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c3
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c3
-rw-r--r--drivers/net/wireless/wl3501_cs.c53
-rw-r--r--drivers/net/xen-netback/common.h16
-rw-r--r--drivers/net/xen-netback/interface.c25
-rw-r--r--drivers/net/xen-netback/netback.c363
-rw-r--r--drivers/net/xen-netback/rx.c10
-rw-r--r--drivers/net/xen-netback/xenbus.c3
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/nfc/fdp/i2c.c4
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c7
-rw-r--r--drivers/nfc/nfcsim.c4
-rw-r--r--drivers/nfc/pn533/pn533.c4
-rw-r--r--drivers/nfc/pn533/usb.c45
-rw-r--r--drivers/nfc/s3fwrn5/core.c8
-rw-r--r--drivers/nfc/st-nci/ndlc.c6
-rw-r--r--drivers/nfc/st-nci/se.c12
-rw-r--r--drivers/nfc/st21nfca/se.c6
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c7
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c7
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c7
-rw-r--r--drivers/ntb/ntb_transport.c21
-rw-r--r--drivers/ntb/test/ntb_tool.c10
-rw-r--r--drivers/nvdimm/of_pmem.c8
-rw-r--r--drivers/nvdimm/region_devs.c8
-rw-r--r--drivers/nvme/host/core.c60
-rw-r--r--drivers/nvme/host/nvme.h21
-rw-r--r--drivers/nvme/host/pci.c30
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/host/tcp.c51
-rw-r--r--drivers/nvme/host/trace.h15
-rw-r--r--drivers/nvme/target/core.c31
-rw-r--r--drivers/nvme/target/fabrics-cmd.c15
-rw-r--r--drivers/nvme/target/fc.c12
-rw-r--r--drivers/nvme/target/fcloop.c48
-rw-r--r--drivers/nvme/target/nvmet.h5
-rw-r--r--drivers/nvme/target/tcp.c88
-rw-r--r--drivers/nvmem/core.c3
-rw-r--r--drivers/nvmem/imx-ocotp.c6
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c184
-rw-r--r--drivers/of/Kconfig7
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c405
-rw-r--r--drivers/of/configfs.c293
-rw-r--r--drivers/of/device.c7
-rw-r--r--drivers/of/dynamic.c195
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--drivers/of/irq.c46
-rw-r--r--drivers/of/of_mdio.c1
-rw-r--r--drivers/of/overlay.c20
-rw-r--r--drivers/of/platform.c10
-rw-r--r--drivers/of/property.c70
-rw-r--r--drivers/of/unittest-data/Makefile8
-rw-r--r--drivers/of/unittest-data/overlay_gpio_01.dts23
-rw-r--r--drivers/of/unittest-data/overlay_gpio_02a.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_02b.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_03.dts23
-rw-r--r--drivers/of/unittest-data/overlay_gpio_04a.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_04b.dts16
-rw-r--r--drivers/of/unittest-data/tests-phandle.dtsi10
-rw-r--r--drivers/of/unittest.c345
-rw-r--r--drivers/opp/core.c4
-rw-r--r--drivers/opp/debugfs.c2
-rw-r--r--drivers/parisc/ccio-dma.c12
-rw-r--r--drivers/parisc/iosapic.c5
-rw-r--r--drivers/parisc/iosapic_private.h4
-rw-r--r--drivers/parisc/led.c7
-rw-r--r--drivers/parport/parport_pc.c23
-rw-r--r--drivers/parport/parport_serial.c64
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/controller/Kconfig8
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c7
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c57
-rw-r--r--drivers/pci/controller/pci-ftpci100.c14
-rw-r--r--drivers/pci/controller/pci-hyperv.c18
-rw-r--r--drivers/pci/controller/pci-tegra.c17
-rw-r--r--drivers/pci/controller/pcie-mediatek.c10
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c65
-rw-r--r--drivers/pci/controller/pcie-rockchip.c17
-rw-r--r--drivers/pci/controller/pcie-rockchip.h11
-rw-r--r--drivers/pci/controller/pcie-xdma-pl.c892
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c27
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c31
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c15
-rw-r--r--drivers/pci/irq.c2
-rw-r--r--drivers/pci/msi.c11
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci-sysfs.c18
-rw-r--r--drivers/pci/pci.c68
-rw-r--r--drivers/pci/pci.h53
-rw-r--r--drivers/pci/pcie/aer.c11
-rw-r--r--drivers/pci/pcie/aspm.c102
-rw-r--r--drivers/pci/pcie/dpc.c2
-rw-r--r--drivers/pci/quirks.c89
-rw-r--r--drivers/pci/setup-res.c11
-rw-r--r--drivers/pci/switch/switchtec.c29
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/ds.c14
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c2
-rw-r--r--drivers/perf/arm_dsu_pmu.c6
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c54
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c24
-rw-r--r--drivers/phy/Kconfig9
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c2
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c38
-rw-r--r--drivers/phy/phy-zynqmp.c1591
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hsic.c6
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c18
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c3
-rw-r--r--drivers/phy/st/phy-miphy28lp.c42
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c4
-rw-r--r--drivers/pinctrl/Kconfig8
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c2
-rw-r--r--drivers/pinctrl/cirrus/Kconfig3
-rw-r--r--drivers/pinctrl/core.c6
-rw-r--r--drivers/pinctrl/devicetree.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c15
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c39
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinconf-generic.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c104
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c15
-rw-r--r--drivers/pinctrl/pinctrl-at91.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c23
-rw-r--r--drivers/pinctrl/pinctrl-rza2.c17
-rw-r--r--drivers/pinctrl/pinctrl-single.c4
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c1074
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c7
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c24
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c5
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c101
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/x86/acer-wmi.c18
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.h2
-rw-r--r--drivers/platform/x86/hdaps.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c15
-rw-r--r--drivers/platform/x86/huawei-wmi.c2
-rw-r--r--drivers/platform/x86/intel-hid.c21
-rw-r--r--drivers/platform/x86/intel_pmc_core_pltdrv.c9
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c4
-rw-r--r--drivers/platform/x86/msi-laptop.c22
-rw-r--r--drivers/platform/x86/mxm-wmi.c8
-rw-r--r--drivers/platform/x86/pmc_atom.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c21
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c49
-rw-r--r--drivers/platform/x86/wmi.c131
-rw-r--r--drivers/pnp/core.c4
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c12
-rw-r--r--drivers/power/avs/smartreflex.c1
-rw-r--r--drivers/power/supply/ab8500_btemp.c6
-rw-r--r--drivers/power/supply/ab8500_fg.c6
-rw-r--r--drivers/power/supply/adp5061.c6
-rw-r--r--drivers/power/supply/bq24190_charger.c13
-rw-r--r--drivers/power/supply/bq27xxx_battery.c51
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c5
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c2
-rw-r--r--drivers/power/supply/da9150-charger.c1
-rw-r--r--drivers/power/supply/generic-adc-battery.c3
-rw-r--r--drivers/power/supply/power_supply_core.c72
-rw-r--r--drivers/power/supply/power_supply_leds.c5
-rw-r--r--drivers/power/supply/power_supply_sysfs.c3
-rw-r--r--drivers/power/supply/sbs-charger.c2
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c9
-rw-r--r--drivers/powercap/Kconfig4
-rw-r--r--drivers/powercap/intel_rapl_common.c3
-rw-r--r--drivers/powercap/intel_rapl_msr.c1
-rw-r--r--drivers/powercap/powercap_sys.c14
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/ptp/ptp_clock.c5
-rw-r--r--drivers/ptp/ptp_private.h8
-rw-r--r--drivers/ptp/ptp_qoriq.c2
-rw-r--r--drivers/ptp/ptp_sysfs.c3
-rw-r--r--drivers/pwm/pwm-brcmstb.c4
-rw-r--r--drivers/pwm/pwm-cros-ec.c1
-rw-r--r--drivers/pwm/pwm-hibvt.c1
-rw-r--r--drivers/pwm/pwm-imx-tpm.c7
-rw-r--r--drivers/pwm/pwm-lpc32xx.c16
-rw-r--r--drivers/pwm/pwm-meson.c40
-rw-r--r--drivers/pwm/pwm-mtk-disp.c94
-rw-r--r--drivers/pwm/pwm-sifive.c21
-rw-r--r--drivers/pwm/pwm-sprd.c1
-rw-r--r--drivers/pwm/pwm-sti.c75
-rw-r--r--drivers/pwm/pwm-stm32-lp.c2
-rw-r--r--drivers/pwm/sysfs.c17
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c15
-rw-r--r--drivers/rapidio/rio-scan.c8
-rw-r--r--drivers/rapidio/rio.c9
-rw-r--r--drivers/regulator/core.c207
-rw-r--r--drivers/regulator/da9211-regulator.c11
-rw-r--r--drivers/regulator/fan53555.c11
-rw-r--r--drivers/regulator/fixed.c4
-rw-r--r--drivers/regulator/max77802-regulator.c34
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/pwm-regulator.c3
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c24
-rw-r--r--drivers/regulator/s5m8767.c6
-rw-r--r--drivers/regulator/slg51000-regulator.c2
-rw-r--r--drivers/regulator/stm32-pwr.c7
-rw-r--r--drivers/regulator/twl6030-regulator.c17
-rw-r--r--drivers/remoteproc/Kconfig20
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c59
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c1
-rw-r--r--drivers/remoteproc/qcom_sysmon.c5
-rw-r--r--drivers/remoteproc/qcom_wcnss.c10
-rw-r--r--drivers/remoteproc/remoteproc_internal.h23
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c140
-rw-r--r--drivers/remoteproc/st_remoteproc.c5
-rw-r--r--drivers/remoteproc/stm32_rproc.c6
-rw-r--r--drivers/remoteproc/zynq_remoteproc.c494
-rw-r--r--drivers/remoteproc/zynqmp_r5_remoteproc.c975
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c3
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c2
-rw-r--r--drivers/reset/reset-ti-sci.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c8
-rw-r--r--drivers/rpmsg/qcom_smd.c5
-rw-r--r--drivers/rpmsg/rpmsg_core.c37
-rw-r--r--drivers/rpmsg/rpmsg_internal.h5
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c1
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/rtc-ds1685.c2
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c4
-rw-r--r--drivers/rtc/rtc-mxc_v2.c4
-rw-r--r--drivers/rtc/rtc-omap.c1
-rw-r--r--drivers/rtc/rtc-pcf85063.c8
-rw-r--r--drivers/rtc/rtc-pcf85363.c2
-rw-r--r--drivers/rtc/rtc-pic32.c8
-rw-r--r--drivers/rtc/rtc-pm8xxx.c24
-rw-r--r--drivers/rtc/rtc-snvs.c16
-rw-r--r--drivers/rtc/rtc-st-lpc.c3
-rw-r--r--drivers/rtc/rtc-sun6i.c32
-rw-r--r--drivers/rtc/rtc-zynqmp.c30
-rw-r--r--drivers/s390/block/dasd.c155
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_alias.c9
-rw-r--r--drivers/s390/block/dasd_diag.c7
-rw-r--r--drivers/s390/block/dasd_eckd.c88
-rw-r--r--drivers/s390/block/dasd_fba.c7
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c47
-rw-r--r--drivers/s390/block/scm_blk.c7
-rw-r--r--drivers/s390/char/zcore.c11
-rw-r--r--drivers/s390/cio/device.c5
-rw-r--r--drivers/s390/cio/qdio.h25
-rw-r--r--drivers/s390/cio/qdio_main.c62
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c14
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c3
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1
-rw-r--r--drivers/s390/net/ctcm_main.c11
-rw-r--r--drivers/s390/net/lcs.c8
-rw-r--r--drivers/s390/net/netiucv.c9
-rw-r--r--drivers/s390/net/qeth_l3_main.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c35
-rw-r--r--drivers/s390/scsi/zfcp_fc.h6
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c6
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c3
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/bfa/bfa.h9
-rw-r--r--drivers/scsi/bfa/bfa_core.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c11
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c9
-rw-r--r--drivers/scsi/csiostor/csio_defs.h18
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c8
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h13
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c6
-rw-r--r--drivers/scsi/dpt_i2o.c278
-rw-r--r--drivers/scsi/dpti.h1
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c19
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/hpsa.c11
-rw-r--r--drivers/scsi/ipr.c51
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c9
-rw-r--r--drivers/scsi/libfc/fc_fcp.c18
-rw-r--r--drivers/scsi/libfc/fc_lport.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c8
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c16
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/raid_class.c47
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_proc.c30
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/ses.c74
-rw-r--r--drivers/scsi/sg.c57
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c4
-rw-r--r--drivers/scsi/snic/snic_disc.c5
-rw-r--r--drivers/scsi/stex.c21
-rw-r--r--drivers/scsi/storvsc_drv.c8
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/siox/siox-core.c2
-rw-r--r--drivers/slimbus/stream.c8
-rw-r--r--drivers/soc/amlogic/meson-mx-socinfo.c1
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c50
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/guts.c2
-rw-r--r--drivers/soc/fsl/qe/Kconfig1
-rw-r--r--drivers/soc/qcom/Kconfig15
-rw-r--r--drivers/soc/qcom/Makefile3
-rw-r--r--drivers/soc/qcom/llcc-qcom.c (renamed from drivers/soc/qcom/llcc-slice.c)63
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c100
-rw-r--r--drivers/soc/qcom/qcom_aoss.c4
-rw-r--r--drivers/soc/qcom/qmi_encdec.c4
-rw-r--r--drivers/soc/qcom/smem_state.c3
-rw-r--r--drivers/soc/qcom/smsm.c20
-rw-r--r--drivers/soc/renesas/r8a77980-sysc.c3
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c27
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c6
-rw-r--r--drivers/soc/ux500/ux500-soc-id.c10
-rw-r--r--drivers/soc/xilinx/Kconfig7
-rw-r--r--drivers/soc/xilinx/Makefile4
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c579
-rw-r--r--drivers/soc/xilinx/xlnx_vcu_clk.c915
-rw-r--r--drivers/soc/xilinx/xlnx_vcu_core.c168
-rw-r--r--drivers/soc/xilinx/zynqmp/Makefile1
-rw-r--r--drivers/soc/xilinx/zynqmp/tap_delays.c69
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c10
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c121
-rw-r--r--drivers/soundwire/bus_type.c8
-rw-r--r--drivers/soundwire/stream.c7
-rw-r--r--drivers/spi/Kconfig14
-rw-r--r--drivers/spi/spi-bcm-qspi.c14
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c20
-rw-r--r--drivers/spi/spi-bcm63xx.c2
-rw-r--r--drivers/spi/spi-fsl-cpm.c23
-rw-r--r--drivers/spi/spi-fsl-dspi.c19
-rw-r--r--drivers/spi/spi-fsl-spi.c51
-rw-r--r--drivers/spi/spi-geni-qcom.c2
-rw-r--r--drivers/spi/spi-gpio.c16
-rw-r--r--drivers/spi/spi-imx.c27
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-mt65xx.c22
-rw-r--r--drivers/spi/spi-mt7621.c8
-rw-r--r--drivers/spi/spi-npcm-fiu.c5
-rw-r--r--drivers/spi/spi-nxp-fspi.c7
-rw-r--r--drivers/spi/spi-omap-100k.c1
-rw-r--r--drivers/spi/spi-ppc4xx.c5
-rw-r--r--drivers/spi/spi-qup.c80
-rw-r--r--drivers/spi/spi-rspi.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c9
-rw-r--r--drivers/spi/spi-sh-msiof.c17
-rw-r--r--drivers/spi/spi-stm32.c3
-rw-r--r--drivers/spi/spi-synquacer.c8
-rw-r--r--drivers/spi/spi-tegra20-sflash.c6
-rw-r--r--drivers/spi/spi-xilinx.c1055
-rw-r--r--drivers/spi/spi-zynq-qspi.c602
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c403
-rw-r--r--drivers/spi/spi.c12
-rw-r--r--drivers/spi/spidev.c23
-rw-r--r--drivers/spmi/spmi-pmic-arb.c13
-rw-r--r--drivers/spmi/spmi.c3
-rw-r--r--drivers/ssb/driver_chipcommon.c4
-rw-r--r--drivers/staging/Kconfig16
-rw-r--r--drivers/staging/Makefile7
-rw-r--r--drivers/staging/apf/Kconfig20
-rw-r--r--drivers/staging/apf/Makefile9
-rw-r--r--drivers/staging/apf/dt-binding.txt17
-rw-r--r--drivers/staging/apf/xilinx-dma-apf.c1232
-rw-r--r--drivers/staging/apf/xilinx-dma-apf.h234
-rw-r--r--drivers/staging/apf/xlnk-eng.c242
-rw-r--r--drivers/staging/apf/xlnk-eng.h33
-rw-r--r--drivers/staging/apf/xlnk-ioctl.h37
-rw-r--r--drivers/staging/apf/xlnk-sysdef.h34
-rw-r--r--drivers/staging/apf/xlnk.c1580
-rw-r--r--drivers/staging/apf/xlnk.h175
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c398
-rw-r--r--drivers/staging/clocking-wizard/dt-binding.txt4
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1760.c2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c7
-rw-r--r--drivers/staging/fclk/Kconfig9
-rw-r--r--drivers/staging/fclk/Makefile1
-rw-r--r--drivers/staging/fclk/TODO2
-rw-r--r--drivers/staging/fclk/dt-binding.txt16
-rw-r--r--drivers/staging/fclk/xilinx_fclk.c125
-rw-r--r--drivers/staging/greybus/light.c8
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c6
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c1
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c4
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c39
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c38
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c36
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c16
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/vt6655/device_main.c8
-rw-r--r--drivers/staging/wilc1000/wilc_hif.c40
-rw-r--r--drivers/staging/wilc1000/wilc_netdev.c7
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c1
-rw-r--r--drivers/staging/xlnx_ctrl_driver/Kconfig15
-rw-r--r--drivers/staging/xlnx_ctrl_driver/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_ctrl_driver/Makefile2
-rw-r--r--drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c290
-rw-r--r--drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c595
-rw-r--r--drivers/staging/xlnx_ernic/Kconfig4
-rw-r--r--drivers/staging/xlnx_ernic/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_ernic/Makefile7
-rw-r--r--drivers/staging/xlnx_ernic/dt-binding.txt29
-rw-r--r--drivers/staging/xlnx_ernic/xcm.c1962
-rw-r--r--drivers/staging/xlnx_ernic/xcm.h170
-rw-r--r--drivers/staging/xlnx_ernic/xcommon.h73
-rw-r--r--drivers/staging/xlnx_ernic/xernic_bw_test.c482
-rw-r--r--drivers/staging/xlnx_ernic/xhw_config.h26
-rw-r--r--drivers/staging/xlnx_ernic/xhw_def.h641
-rw-r--r--drivers/staging/xlnx_ernic/xif.h239
-rw-r--r--drivers/staging/xlnx_ernic/xioctl.h24
-rw-r--r--drivers/staging/xlnx_ernic/xmain.c1592
-rw-r--r--drivers/staging/xlnx_ernic/xmain.h33
-rw-r--r--drivers/staging/xlnx_ernic/xmr.c413
-rw-r--r--drivers/staging/xlnx_ernic/xmr.h68
-rw-r--r--drivers/staging/xlnx_ernic/xperftest.h33
-rw-r--r--drivers/staging/xlnx_ernic/xqp.c1310
-rw-r--r--drivers/staging/xlnx_ernic/xqp.h114
-rw-r--r--drivers/staging/xlnx_ernic/xrocev2.h409
-rw-r--r--drivers/staging/xlnx_tsmux/Kconfig11
-rw-r--r--drivers/staging/xlnx_tsmux/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_tsmux/Makefile1
-rw-r--r--drivers/staging/xlnx_tsmux/dt-binding.txt28
-rw-r--r--drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c1510
-rw-r--r--drivers/staging/xlnxsync/Kconfig11
-rw-r--r--drivers/staging/xlnxsync/MAINTAINERS4
-rw-r--r--drivers/staging/xlnxsync/Makefile1
-rw-r--r--drivers/staging/xlnxsync/dt-binding.txt34
-rw-r--r--drivers/staging/xlnxsync/xlnxsync.c1290
-rw-r--r--drivers/staging/xroeframer/Kconfig18
-rw-r--r--drivers/staging/xroeframer/Makefile12
-rw-r--r--drivers/staging/xroeframer/README47
-rw-r--r--drivers/staging/xroeframer/roe_framer_ctrl.h1088
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe.c562
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c718
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c571
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_stats.c401
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_udp.c181
-rw-r--r--drivers/staging/xroeframer/xroe_framer.c155
-rw-r--r--drivers/staging/xroeframer/xroe_framer.h63
-rw-r--r--drivers/staging/xroetrafficgen/Kconfig14
-rw-r--r--drivers/staging/xroetrafficgen/Makefile8
-rw-r--r--drivers/staging/xroetrafficgen/README19
-rw-r--r--drivers/staging/xroetrafficgen/roe_radio_ctrl.h183
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c824
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen.c124
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen.h15
-rw-r--r--drivers/target/iscsi/iscsi_target.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c54
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c12
-rw-r--r--drivers/target/loopback/tcm_loop.c3
-rw-r--r--drivers/target/target_core_device.c16
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/tee/optee/device.c2
-rw-r--r--drivers/tee/tee_core.c3
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/intel/Kconfig3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c28
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h1
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c24
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c12
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c2
-rw-r--r--drivers/thermal/of-thermal.c9
-rw-r--r--drivers/thermal/thermal_core.c6
-rw-r--r--drivers/thermal/thermal_sysfs.c10
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/cyclades.c2
-rw-r--r--drivers/tty/hvc/hvc_dcc.c28
-rw-r--r--drivers/tty/hvc/hvc_xen.c70
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/n_gsm.c92
-rw-r--r--drivers/tty/serial/8250/8250.h13
-rw-r--r--drivers/tty/serial/8250/8250_core.c1
-rw-r--r--drivers/tty/serial/8250/8250_dma.c26
-rw-r--r--drivers/tty/serial/8250/8250_dw.c3
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c6
-rw-r--r--drivers/tty/serial/8250/8250_early.c1
-rw-r--r--drivers/tty/serial/8250/8250_em.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c5
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c15
-rw-r--r--drivers/tty/serial/8250/8250_omap.c50
-rw-r--r--drivers/tty/serial/8250/8250_pci.c146
-rw-r--r--drivers/tty/serial/8250/8250_port.c45
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/Kconfig10
-rw-r--r--drivers/tty/serial/altera_uart.c21
-rw-r--r--drivers/tty/serial/amba-pl011.c127
-rw-r--r--drivers/tty/serial/arc_uart.c7
-rw-r--r--drivers/tty/serial/atmel_serial.c44
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c13
-rw-r--r--drivers/tty/serial/fsl_lpuart.c64
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c3
-rw-r--r--drivers/tty/serial/lantiq.c1
-rw-r--r--drivers/tty/serial/max310x.c401
-rw-r--r--drivers/tty/serial/meson_uart.c33
-rw-r--r--drivers/tty/serial/mvebu-uart.c11
-rw-r--r--drivers/tty/serial/pch_uart.c6
-rw-r--r--drivers/tty/serial/samsung.c19
-rw-r--r--drivers/tty/serial/sc16is7xx.c38
-rw-r--r--drivers/tty/serial/serial-tegra.c126
-rw-r--r--drivers/tty/serial/sh-sci.c9
-rw-r--r--drivers/tty/serial/sifive.c2
-rw-r--r--drivers/tty/serial/sprd_serial.c68
-rw-r--r--drivers/tty/serial/sunsab.c8
-rw-r--r--drivers/tty/serial/tegra-tcu.c2
-rw-r--r--drivers/tty/serial/uartlite.c277
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c13
-rw-r--r--drivers/tty/tty_io.c12
-rw-r--r--drivers/tty/tty_ioctl.c45
-rw-r--r--drivers/tty/tty_jobctrl.c17
-rw-r--r--drivers/tty/vcc.c16
-rw-r--r--drivers/tty/vt/keyboard.c2
-rw-r--r--drivers/tty/vt/vc_screen.c71
-rw-r--r--drivers/tty/vt/vt.c16
-rw-r--r--drivers/uio/Kconfig24
-rw-r--r--drivers/uio/Makefile4
-rw-r--r--drivers/uio/uio_core.c (renamed from drivers/uio/uio.c)50
-rw-r--r--drivers/uio/uio_dmabuf.c210
-rw-r--r--drivers/uio/uio_dmabuf.h26
-rw-r--r--drivers/uio/uio_dmem_genirq.c13
-rw-r--r--drivers/uio/uio_xilinx_ai_engine.c296
-rw-r--r--drivers/uio/uio_xilinx_apm.c369
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c5
-rw-r--r--drivers/usb/cdns3/gadget.c19
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c5
-rw-r--r--drivers/usb/chipidea/core.c20
-rw-r--r--drivers/usb/chipidea/host.c9
-rw-r--r--drivers/usb/chipidea/otg.c5
-rw-r--r--drivers/usb/chipidea/otg_fsm.c11
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/common/usb-conn-gpio.c6
-rw-r--r--drivers/usb/core/buffer.c41
-rw-r--r--drivers/usb/core/devio.c22
-rw-r--r--drivers/usb/core/hcd.c26
-rw-r--r--drivers/usb/core/hub.c90
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/quirks.c23
-rw-r--r--drivers/usb/core/sysfs.c5
-rw-r--r--drivers/usb/core/usb-acpi.c65
-rw-r--r--drivers/usb/core/usb.c76
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc2/hcd_intr.c15
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/Kconfig8
-rw-r--r--drivers/usb/dwc3/Makefile10
-rw-r--r--drivers/usb/dwc3/core.c294
-rw-r--r--drivers/usb/dwc3/core.h107
-rw-r--r--drivers/usb/dwc3/debugfs.c160
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c11
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c507
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c6
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c118
-rw-r--r--drivers/usb/dwc3/ep0.c48
-rw-r--r--drivers/usb/dwc3/gadget.c448
-rw-r--r--drivers/usb/dwc3/gadget.h18
-rw-r--r--drivers/usb/dwc3/gadget_hibernation.c567
-rw-r--r--drivers/usb/dwc3/host.c28
-rw-r--r--drivers/usb/dwc3/otg.c2199
-rw-r--r--drivers/usb/dwc3/otg.h252
-rw-r--r--drivers/usb/dwc3/platform_data.h54
-rw-r--r--drivers/usb/gadget/configfs.c45
-rw-r--r--drivers/usb/gadget/function/f_fs.c11
-rw-r--r--drivers/usb/gadget/function/f_hid.c266
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c22
-rw-r--r--drivers/usb/gadget/function/f_ncm.c65
-rw-r--r--drivers/usb/gadget/function/f_printer.c12
-rw-r--r--drivers/usb/gadget/function/f_tcm.c311
-rw-r--r--drivers/usb/gadget/function/f_uvc.c5
-rw-r--r--drivers/usb/gadget/function/storage_common.c6
-rw-r--r--drivers/usb/gadget/function/tcm.h7
-rw-r--r--drivers/usb/gadget/function/u_audio.c2
-rw-r--r--drivers/usb/gadget/function/u_ether.c39
-rw-r--r--drivers/usb/gadget/function/u_hid.h1
-rw-r--r--drivers/usb/gadget/function/uvc_video.c7
-rw-r--r--drivers/usb/gadget/legacy/inode.c29
-rw-r--r--drivers/usb/gadget/legacy/webcam.c3
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c3
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c1
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c12
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c1
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c6
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c101
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c12
-rw-r--r--drivers/usb/host/fhci-sched.c2
-rw-r--r--drivers/usb/host/fotg210-hcd.c3
-rw-r--r--drivers/usb/host/ohci-at91.c8
-rw-r--r--drivers/usb/host/ohci-nxp.c1
-rw-r--r--drivers/usb/host/ohci-ppc-of.c1
-rw-r--r--drivers/usb/host/uhci-pci.c10
-rw-r--r--drivers/usb/host/xhci-debugfs.c1
-rw-r--r--drivers/usb/host/xhci-hub.c20
-rw-r--r--drivers/usb/host/xhci-mem.c41
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c149
-rw-r--r--drivers/usb/host/xhci-mtk.c1
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-mvebu.c2
-rw-r--r--drivers/usb/host/xhci-pci.c53
-rw-r--r--drivers/usb/host/xhci-plat.c61
-rw-r--r--drivers/usb/host/xhci-rcar.c3
-rw-r--r--drivers/usb/host/xhci-ring.c116
-rw-r--r--drivers/usb/host/xhci.c78
-rw-r--r--drivers/usb/host/xhci.h16
-rw-r--r--drivers/usb/misc/idmouse.c8
-rw-r--r--drivers/usb/misc/iowarrior.c2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c14
-rw-r--r--drivers/usb/mon/mon_bin.c12
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c5
-rw-r--r--drivers/usb/musb/cppi_dma.c2
-rw-r--r--drivers/usb/musb/musb_debugfs.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c5
-rw-r--r--drivers/usb/musb/musb_host.c9
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c9
-rw-r--r--drivers/usb/phy/phy-tahvo.c2
-rw-r--r--drivers/usb/phy/phy-ulpi.c99
-rw-r--r--drivers/usb/renesas_usbhs/rza.c4
-rw-r--r--drivers/usb/roles/class.c17
-rw-r--r--drivers/usb/serial/ch341.c15
-rw-r--r--drivers/usb/serial/cp210x.c6
-rw-r--r--drivers/usb/serial/f81232.c12
-rw-r--r--drivers/usb/serial/f81534.c12
-rw-r--r--drivers/usb/serial/ftdi_sio.c11
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/option.c133
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c73
-rw-r--r--drivers/usb/serial/usb-serial.c2
-rw-r--r--drivers/usb/serial/usb_wwan.c3
-rw-r--r--drivers/usb/storage/alauda.c11
-rw-r--r--drivers/usb/storage/ene_ub6250.c2
-rw-r--r--drivers/usb/storage/scsiglue.c28
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/usb/storage/uas-detect.h13
-rw-r--r--drivers/usb/storage/uas.c307
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h13
-rw-r--r--drivers/usb/storage/unusual_uas.h34
-rw-r--r--drivers/usb/typec/altmodes/displayport.c44
-rw-r--r--drivers/usb/typec/bus.c14
-rw-r--r--drivers/usb/typec/class.c9
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c9
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c138
-rw-r--r--drivers/usb/usbip/stub_dev.c9
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c3
-rw-r--r--drivers/vfio/vfio.c1
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/vhost.c9
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/backlight/bd6107.c2
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/gpio_backlight.c2
-rw-r--r--drivers/video/backlight/lm3630a_bl.c15
-rw-r--r--drivers/video/backlight/lm3639_bl.c1
-rw-r--r--drivers/video/backlight/lp8788_bl.c1
-rw-r--r--drivers/video/backlight/lv5207lp.c2
-rw-r--r--drivers/video/fbdev/Kconfig3
-rw-r--r--drivers/video/fbdev/amba-clcd.c24
-rw-r--r--drivers/video/fbdev/arkfb.c9
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c4
-rw-r--r--drivers/video/fbdev/au1200fb.c6
-rw-r--r--drivers/video/fbdev/chipsfb.c1
-rw-r--r--drivers/video/fbdev/core/bitblit.c3
-rw-r--r--drivers/video/fbdev/core/fb_defio.c6
-rw-r--r--drivers/video/fbdev/core/fbcon.c17
-rw-r--r--drivers/video/fbdev/core/modedb.c5
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c64
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c1
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c2
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c3
-rw-r--r--drivers/video/fbdev/i740fb.c9
-rw-r--r--drivers/video/fbdev/imsttfb.c39
-rw-r--r--drivers/video/fbdev/imxfb.c4
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c3
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.c6
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c4
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_mipid.c6
-rw-r--r--drivers/video/fbdev/pm2fb.c14
-rw-r--r--drivers/video/fbdev/pm3fb.c6
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c2
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c3
-rw-r--r--drivers/video/fbdev/s3fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c3
-rw-r--r--drivers/video/fbdev/sis/init.c4
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/video/fbdev/smscufx.c115
-rw-r--r--drivers/video/fbdev/sticore.h2
-rw-r--r--drivers/video/fbdev/stifb.c30
-rw-r--r--drivers/video/fbdev/tgafb.c3
-rw-r--r--drivers/video/fbdev/udlfb.c13
-rw-r--r--drivers/video/fbdev/uvesafb.c3
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c4
-rw-r--r--drivers/video/fbdev/via/via-core.c9
-rw-r--r--drivers/video/fbdev/vt8623fb.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c9
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/virtio/virtio_mmio.c38
-rw-r--r--drivers/virtio/virtio_ring.c8
-rw-r--r--drivers/vme/bridges/vme_fake.c2
-rw-r--r--drivers/vme/bridges/vme_tsi148.c1
-rw-r--r--drivers/w1/w1.c10
-rw-r--r--drivers/w1/w1_int.c5
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c7
-rw-r--r--drivers/watchdog/bcm2835_wdt.c3
-rw-r--r--drivers/watchdog/cadence_wdt.c13
-rw-r--r--drivers/watchdog/diag288_wdt.c15
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c26
-rw-r--r--drivers/watchdog/intel-mid_wdt.c1
-rw-r--r--drivers/watchdog/menz69_wdt.c16
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c300
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/sbsa_gwdt.c1
-rw-r--r--drivers/watchdog/sp5100_tco.c4
-rw-r--r--drivers/watchdog/stm32_iwdg.c3
-rw-r--r--drivers/watchdog/watchdog_dev.c5
-rw-r--r--drivers/xen/events/events_base.c99
-rw-r--r--drivers/xen/events/events_internal.h2
-rw-r--r--drivers/xen/gntdev.c30
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/platform-pci.c7
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/pvcalls-back.c17
-rw-r--r--drivers/xen/swiotlb-xen.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--fs/Makefile1
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/afs/cell.c6
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/dynroot.c33
-rw-r--r--fs/afs/flock.c2
-rw-r--r--fs/afs/fs_probe.c4
-rw-r--r--fs/afs/fsclient.c2
-rw-r--r--fs/afs/inode.c1
-rw-r--r--fs/afs/internal.h3
-rw-r--r--fs/afs/misc.c1
-rw-r--r--fs/afs/rxrpc.c9
-rw-r--r--fs/afs/security.c2
-rw-r--r--fs/afs/server.c7
-rw-r--r--fs/afs/super.c2
-rw-r--r--fs/afs/vl_probe.c4
-rw-r--r--fs/afs/vl_rotate.c10
-rw-r--r--fs/afs/volume.c4
-rw-r--r--fs/afs/yfsclient.c3
-rw-r--r--fs/aio.c13
-rw-r--r--fs/attr.c22
-rw-r--r--fs/autofs/waitq.c3
-rw-r--r--fs/binfmt_aout.c2
-rw-r--r--fs/binfmt_elf_fdpic.c12
-rw-r--r--fs/binfmt_flat.c3
-rw-r--r--fs/binfmt_misc.c8
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/btrfs/backref.c104
-rw-r--r--fs/btrfs/block-group.c1
-rw-r--r--fs/btrfs/block-rsv.c5
-rw-r--r--fs/btrfs/block-rsv.h16
-rw-r--r--fs/btrfs/ctree.c34
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/delalloc-space.c3
-rw-r--r--fs/btrfs/delayed-inode.c19
-rw-r--r--fs/btrfs/dev-replace.c29
-rw-r--r--fs/btrfs/dir-item.c122
-rw-r--r--fs/btrfs/disk-io.c58
-rw-r--r--fs/btrfs/export.c2
-rw-r--r--fs/btrfs/export.h2
-rw-r--r--fs/btrfs/extent-tree.c19
-rw-r--r--fs/btrfs/extent_io.c7
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/free-space-cache.c7
-rw-r--r--fs/btrfs/inode.c60
-rw-r--r--fs/btrfs/ioctl.c71
-rw-r--r--fs/btrfs/print-tree.c6
-rw-r--r--fs/btrfs/qgroup.c195
-rw-r--r--fs/btrfs/qgroup.h1
-rw-r--r--fs/btrfs/raid56.c74
-rw-r--r--fs/btrfs/rcu-string.h6
-rw-r--r--fs/btrfs/ref-verify.c6
-rw-r--r--fs/btrfs/relocation.c14
-rw-r--r--fs/btrfs/root-tree.c5
-rw-r--r--fs/btrfs/scrub.c36
-rw-r--r--fs/btrfs/send.c34
-rw-r--r--fs/btrfs/super.c4
-rw-r--r--fs/btrfs/sysfs.c7
-rw-r--r--fs/btrfs/tests/btrfs-tests.c2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c36
-rw-r--r--fs/btrfs/transaction.c17
-rw-r--r--fs/btrfs/tree-checker.c27
-rw-r--r--fs/btrfs/tree-log.c20
-rw-r--r--fs/btrfs/volumes.c93
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/btrfs/xattr.c3
-rw-r--r--fs/btrfs/zlib.c2
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/cachefiles/bind.c3
-rw-r--r--fs/ceph/caps.c41
-rw-r--r--fs/ceph/debugfs.c13
-rw-r--r--fs/ceph/file.c10
-rw-r--r--fs/ceph/inode.c4
-rw-r--r--fs/ceph/mds_client.c11
-rw-r--r--fs/ceph/mds_client.h14
-rw-r--r--fs/ceph/snap.c48
-rw-r--r--fs/ceph/super.c14
-rw-r--r--fs/char_dev.c2
-rw-r--r--fs/cifs/cifs_spnego.c4
-rw-r--r--fs/cifs/cifsacl.c14
-rw-r--r--fs/cifs/cifsfs.c22
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h76
-rw-r--r--fs/cifs/cifsproto.h12
-rw-r--r--fs/cifs/cifssmb.c9
-rw-r--r--fs/cifs/connect.c28
-rw-r--r--fs/cifs/dfs_cache.c701
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/file.c24
-rw-r--r--fs/cifs/inode.c8
-rw-r--r--fs/cifs/ioctl.c6
-rw-r--r--fs/cifs/link.c19
-rw-r--r--fs/cifs/misc.c8
-rw-r--r--fs/cifs/smb1ops.c19
-rw-r--r--fs/cifs/smb2inode.c9
-rw-r--r--fs/cifs/smb2misc.c26
-rw-r--r--fs/cifs/smb2ops.c261
-rw-r--r--fs/cifs/smb2pdu.c19
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/smb2proto.h2
-rw-r--r--fs/cifs/smbdirect.c19
-rw-r--r--fs/cifs/transport.c27
-rw-r--r--fs/coda/upcall.c2
-rw-r--r--fs/compat_ioctl.c3
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/debugfs/file.c36
-rw-r--r--fs/debugfs/inode.c28
-rw-r--r--fs/dlm/ast.c6
-rw-r--r--fs/dlm/lock.c69
-rw-r--r--fs/dlm/netlink.c2
-rw-r--r--fs/dlm/plock.c185
-rw-r--r--fs/dlm/recover.c39
-rw-r--r--fs/ecryptfs/inode.c8
-rw-r--r--fs/erofs/decompressor.c16
-rw-r--r--fs/erofs/internal.h2
-rw-r--r--fs/erofs/zdata.c4
-rw-r--r--fs/erofs/zmap.c10
-rw-r--r--fs/eventfd.c7
-rw-r--r--fs/eventpoll.c94
-rw-r--r--fs/ext2/ext2.h13
-rw-r--r--fs/ext2/super.c42
-rw-r--r--fs/ext2/xattr.c4
-rw-r--r--fs/ext4/acl.h5
-rw-r--r--fs/ext4/balloc.c25
-rw-r--r--fs/ext4/ext4.h12
-rw-r--r--fs/ext4/extents.c23
-rw-r--r--fs/ext4/extents_status.c343
-rw-r--r--fs/ext4/file.c6
-rw-r--r--fs/ext4/fsmap.c10
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/indirect.c17
-rw-r--r--fs/ext4/inline.c32
-rw-r--r--fs/ext4/inode.c129
-rw-r--r--fs/ext4/ioctl.c19
-rw-r--r--fs/ext4/mballoc.c257
-rw-r--r--fs/ext4/mballoc.h14
-rw-r--r--fs/ext4/migrate.c7
-rw-r--r--fs/ext4/move_extent.c6
-rw-r--r--fs/ext4/namei.c77
-rw-r--r--fs/ext4/page-io.c10
-rw-r--r--fs/ext4/resize.c75
-rw-r--r--fs/ext4/super.c97
-rw-r--r--fs/ext4/sysfs.c7
-rw-r--r--fs/ext4/verity.c7
-rw-r--r--fs/ext4/xattr.c319
-rw-r--r--fs/ext4/xattr.h20
-rw-r--r--fs/f2fs/checkpoint.c22
-rw-r--r--fs/f2fs/data.c9
-rw-r--r--fs/f2fs/extent_cache.c6
-rw-r--r--fs/f2fs/f2fs.h4
-rw-r--r--fs/f2fs/file.c16
-rw-r--r--fs/f2fs/gc.c25
-rw-r--r--fs/f2fs/inline.c28
-rw-r--r--fs/f2fs/namei.c2
-rw-r--r--fs/f2fs/node.c10
-rw-r--r--fs/f2fs/recovery.c46
-rw-r--r--fs/f2fs/segment.c2
-rw-r--r--fs/f2fs/super.c15
-rw-r--r--fs/f2fs/verity.c12
-rw-r--r--fs/f2fs/xattr.c6
-rw-r--r--fs/fat/dir.c2
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/file.c1
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fs_context.c3
-rw-r--r--fs/fuse/control.c8
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/file.c26
-rw-r--r--fs/fuse/inode.c6
-rw-r--r--fs/fuse/readdir.c10
-rw-r--r--fs/gfs2/aops.c7
-rw-r--r--fs/gfs2/bmap.c5
-rw-r--r--fs/gfs2/glops.c6
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/ops_fstype.c17
-rw-r--r--fs/gfs2/quota.c11
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c34
-rw-r--r--fs/hfs/bnode.c1
-rw-r--r--fs/hfs/inode.c13
-rw-r--r--fs/hfs/trans.c2
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c36
-rw-r--r--fs/hfsplus/options.c4
-rw-r--r--fs/hfsplus/super.c4
-rw-r--r--fs/hfsplus/unicode.c2
-rw-r--r--fs/hugetlbfs/inode.c12
-rw-r--r--fs/inode.c81
-rw-r--r--fs/internal.h2
-rw-r--r--fs/io_uring.c178
-rw-r--r--fs/iomap/buffered-io.c3
-rw-r--r--fs/isofs/namei.c4
-rw-r--r--fs/jbd2/checkpoint.c137
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c46
-rw-r--r--fs/jbd2/recovery.c8
-rw-r--r--fs/jbd2/transaction.c51
-rw-r--r--fs/jffs2/build.c5
-rw-r--r--fs/jffs2/erase.c2
-rw-r--r--fs/jffs2/file.c15
-rw-r--r--fs/jffs2/xattr.c13
-rw-r--r--fs/jffs2/xattr.h4
-rw-r--r--fs/jfs/jfs_dmap.c116
-rw-r--r--fs/jfs/jfs_dtree.c7
-rw-r--r--fs/jfs/jfs_extent.c5
-rw-r--r--fs/jfs/jfs_filsys.h2
-rw-r--r--fs/jfs/jfs_imap.c9
-rw-r--r--fs/jfs/jfs_mount.c6
-rw-r--r--fs/jfs/jfs_txnmgr.c5
-rw-r--r--fs/jfs/namei.c5
-rw-r--r--fs/kernfs/dir.c17
-rw-r--r--fs/kernfs/mount.c2
-rw-r--r--fs/libfs.c22
-rw-r--r--fs/lockd/mon.c3
-rw-r--r--fs/locks.c2
-rw-r--r--fs/mbcache.c121
-rw-r--r--fs/namei.c159
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/blocklayout/dev.c4
-rw-r--r--fs/nfs/filelayout/filelayout.c8
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs3xdr.c2
-rw-r--r--fs/nfs/nfs4client.c1
-rw-r--r--fs/nfs/nfs4idmap.c46
-rw-r--r--fs/nfs/nfs4proc.c83
-rw-r--r--fs/nfs/nfs4state.c18
-rw-r--r--fs/nfs/nfs4trace.h93
-rw-r--r--fs/nfs/nfs4xdr.c12
-rw-r--r--fs/nfs/nfsroot.c4
-rw-r--r--fs/nfs/pnfs_dev.c2
-rw-r--r--fs/nfs/super.c62
-rw-r--r--fs/nfsd/blocklayoutxdr.c9
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c9
-rw-r--r--fs/nfsd/netns.h6
-rw-r--r--fs/nfsd/nfs3xdr.c4
-rw-r--r--fs/nfsd/nfs4callback.c8
-rw-r--r--fs/nfsd/nfs4layouts.c4
-rw-r--r--fs/nfsd/nfs4proc.c6
-rw-r--r--fs/nfsd/nfs4recover.c4
-rw-r--r--fs/nfsd/nfs4state.c72
-rw-r--r--fs/nfsd/nfs4xdr.c51
-rw-r--r--fs/nfsd/nfscache.c57
-rw-r--r--fs/nfsd/nfsctl.c16
-rw-r--r--fs/nfsd/nfsd.h3
-rw-r--r--fs/nfsd/nfssvc.c37
-rw-r--r--fs/nfsd/trace.h65
-rw-r--r--fs/nfsd/vfs.c12
-rw-r--r--fs/nilfs2/alloc.c3
-rw-r--r--fs/nilfs2/bmap.c16
-rw-r--r--fs/nilfs2/btnode.c12
-rw-r--r--fs/nilfs2/btree.c15
-rw-r--r--fs/nilfs2/dat.c34
-rw-r--r--fs/nilfs2/file.c8
-rw-r--r--fs/nilfs2/gcinode.c6
-rw-r--r--fs/nilfs2/inode.c53
-rw-r--r--fs/nilfs2/ioctl.c9
-rw-r--r--fs/nilfs2/page.c10
-rw-r--r--fs/nilfs2/recovery.c7
-rw-r--r--fs/nilfs2/segbuf.c6
-rw-r--r--fs/nilfs2/segment.c86
-rw-r--r--fs/nilfs2/sufile.c51
-rw-r--r--fs/nilfs2/super.c36
-rw-r--r--fs/nilfs2/the_nilfs.c103
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/nls/nls_base.c4
-rw-r--r--fs/notify/fanotify/fanotify_user.c22
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c11
-rw-r--r--fs/ntfs/attrib.c28
-rw-r--r--fs/ntfs/inode.c7
-rw-r--r--fs/ntfs/super.c3
-rw-r--r--fs/ocfs2/alloc.c4
-rw-r--r--fs/ocfs2/aops.c18
-rw-r--r--fs/ocfs2/dir.c14
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlmglue.c8
-rw-r--r--fs/ocfs2/extent_map.c4
-rw-r--r--fs/ocfs2/file.c8
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/journal.h1
-rw-r--r--fs/ocfs2/move_extents.c34
-rw-r--r--fs/ocfs2/namei.c30
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/ocfs2/stackglue.c8
-rw-r--r--fs/ocfs2/super.c114
-rw-r--r--fs/ocfs2/xattr.c2
-rw-r--r--fs/omfs/file.c2
-rw-r--r--fs/orangefs/orangefs-debugfs.c29
-rw-r--r--fs/orangefs/orangefs-mod.c8
-rw-r--r--fs/overlayfs/copy_up.c4
-rw-r--r--fs/overlayfs/dir.c46
-rw-r--r--fs/overlayfs/export.c2
-rw-r--r--fs/overlayfs/namei.c24
-rw-r--r--fs/overlayfs/ovl_entry.h9
-rw-r--r--fs/overlayfs/super.c5
-rw-r--r--fs/pnode.c2
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/proc/proc_sysctl.c33
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--fs/pstore/Kconfig1
-rw-r--r--fs/pstore/ram.c3
-rw-r--r--fs/pstore/ram_core.c12
-rw-r--r--fs/quota/dquot.c563
-rw-r--r--fs/quota/quota_tree.c38
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/reiserfs/namei.c4
-rw-r--r--fs/reiserfs/super.c6
-rw-r--r--fs/reiserfs/xattr_security.c10
-rw-r--r--fs/select.c2
-rw-r--r--fs/signalfd.c1
-rw-r--r--fs/squashfs/squashfs_fs.h2
-rw-r--r--fs/squashfs/squashfs_fs_sb.h2
-rw-r--r--fs/squashfs/xattr.h4
-rw-r--r--fs/squashfs/xattr_id.c2
-rw-r--r--fs/statfs.c4
-rw-r--r--fs/super.c11
-rw-r--r--fs/sync.c3
-rw-r--r--fs/sysv/itree.c6
-rw-r--r--fs/tracefs/inode.c34
-rw-r--r--fs/ubifs/budget.c9
-rw-r--r--fs/ubifs/commit.c6
-rw-r--r--fs/ubifs/dir.c14
-rw-r--r--fs/ubifs/file.c16
-rw-r--r--fs/ubifs/journal.c4
-rw-r--r--fs/ubifs/lpt.c2
-rw-r--r--fs/ubifs/super.c17
-rw-r--r--fs/ubifs/tnc.c158
-rw-r--r--fs/ubifs/tnc_misc.c4
-rw-r--r--fs/ubifs/ubifs.h5
-rw-r--r--fs/udf/balloc.c33
-rw-r--r--fs/udf/file.c26
-rw-r--r--fs/udf/inode.c195
-rw-r--r--fs/udf/namei.c10
-rw-r--r--fs/udf/super.c1
-rw-r--r--fs/udf/truncate.c48
-rw-r--r--fs/udf/udf_i.h3
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/udf/unicode.c2
-rw-r--r--fs/userfaultfd.c4
-rw-r--r--fs/verity/enable.c24
-rw-r--r--fs/verity/signature.c16
-rw-r--r--fs/verity/verify.c12
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c27
-rw-r--r--fs/xfs/libxfs/xfs_attr.c2
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c37
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h26
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c85
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c58
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h18
-rw-r--r--fs/xfs/libxfs/xfs_btree.c35
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c24
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h17
-rw-r--r--fs/xfs/libxfs/xfs_da_format.c1
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h59
-rw-r--r--fs/xfs/libxfs/xfs_defer.c368
-rw-r--r--fs/xfs/libxfs/xfs_defer.h49
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c4
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c33
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c32
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c4
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c14
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h19
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c28
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c8
-rw-r--r--fs/xfs/libxfs/xfs_format.h50
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c70
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h1
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c54
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h8
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c10
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h9
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h10
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c4
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c6
-rw-r--r--fs/xfs/libxfs/xfs_shared.h1
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c8
-rw-r--r--fs/xfs/xfs_acl.c27
-rw-r--r--fs/xfs/xfs_aops.c4
-rw-r--r--fs/xfs/xfs_attr_inactive.c156
-rw-r--r--fs/xfs/xfs_attr_list.c5
-rw-r--r--fs/xfs/xfs_bmap_item.c243
-rw-r--r--fs/xfs/xfs_bmap_item.h3
-rw-r--r--fs/xfs/xfs_bmap_util.c105
-rw-r--r--fs/xfs/xfs_buf.c22
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_dquot.c90
-rw-r--r--fs/xfs/xfs_dquot.h98
-rw-r--r--fs/xfs/xfs_dquot_item.c47
-rw-r--r--fs/xfs/xfs_dquot_item.h35
-rw-r--r--fs/xfs/xfs_error.c32
-rw-r--r--fs/xfs/xfs_error.h7
-rw-r--r--fs/xfs/xfs_export.c14
-rw-r--r--fs/xfs/xfs_extent_busy.c14
-rw-r--r--fs/xfs/xfs_extfree_item.c180
-rw-r--r--fs/xfs/xfs_extfree_item.h18
-rw-r--r--fs/xfs/xfs_file.c53
-rw-r--r--fs/xfs/xfs_fsmap.c1
-rw-r--r--fs/xfs/xfs_icache.c8
-rw-r--r--fs/xfs/xfs_icreate_item.c1
-rw-r--r--fs/xfs/xfs_inode.c153
-rw-r--r--fs/xfs/xfs_inode.h22
-rw-r--r--fs/xfs/xfs_inode_item.c55
-rw-r--r--fs/xfs/xfs_inode_item.h4
-rw-r--r--fs/xfs/xfs_ioctl.c22
-rw-r--r--fs/xfs/xfs_iomap.c17
-rw-r--r--fs/xfs/xfs_iops.c21
-rw-r--r--fs/xfs/xfs_itable.c8
-rw-r--r--fs/xfs/xfs_iwalk.c27
-rw-r--r--fs/xfs/xfs_linux.h32
-rw-r--r--fs/xfs/xfs_log.c94
-rw-r--r--fs/xfs/xfs_log.h3
-rw-r--r--fs/xfs/xfs_log_cil.c47
-rw-r--r--fs/xfs/xfs_log_priv.h53
-rw-r--r--fs/xfs/xfs_log_recover.c253
-rw-r--r--fs/xfs/xfs_message.c2
-rw-r--r--fs/xfs/xfs_message.h2
-rw-r--r--fs/xfs/xfs_mount.c261
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_pnfs.c4
-rw-r--r--fs/xfs/xfs_qm.c121
-rw-r--r--fs/xfs/xfs_qm_bhv.c8
-rw-r--r--fs/xfs/xfs_qm_syscalls.c142
-rw-r--r--fs/xfs/xfs_quota.h4
-rw-r--r--fs/xfs/xfs_refcount_item.c178
-rw-r--r--fs/xfs/xfs_refcount_item.h3
-rw-r--r--fs/xfs/xfs_reflink.c247
-rw-r--r--fs/xfs/xfs_rmap_item.c170
-rw-r--r--fs/xfs/xfs_rmap_item.h3
-rw-r--r--fs/xfs/xfs_stats.c14
-rw-r--r--fs/xfs/xfs_stats.h1
-rw-r--r--fs/xfs/xfs_super.c98
-rw-r--r--fs/xfs/xfs_symlink.c7
-rw-r--r--fs/xfs/xfs_trace.h75
-rw-r--r--fs/xfs/xfs_trans.c182
-rw-r--r--fs/xfs/xfs_trans.h10
-rw-r--r--fs/xfs/xfs_trans_ail.c104
-rw-r--r--fs/xfs/xfs_trans_dquot.c73
-rw-r--r--fs/xfs/xfs_trans_priv.h6
-rw-r--r--include/acpi/cppc_acpi.h2
-rw-r--r--include/asm-generic/bitops/atomic.h6
-rw-r--r--include/asm-generic/bugs.h11
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/qspinlock.h2
-rw-r--r--include/asm-generic/sections.h7
-rw-r--r--include/asm-generic/tlb.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h18
-rw-r--r--include/asm-generic/word-at-a-time.h2
-rw-r--r--include/crypto/if_alg.h5
-rw-r--r--include/crypto/skcipher.h8
-rw-r--r--include/drm/drm_bridge.h79
-rw-r--r--include/drm/drm_crtc.h34
-rw-r--r--include/drm/drm_dp_helper.h2
-rw-r--r--include/drm/drm_encoder_slave.h24
-rw-r--r--include/drm/drm_fixed.h2
-rw-r--r--include/drm/drm_fourcc.h20
-rw-r--r--include/drm/drm_mipi_dsi.h6
-rw-r--r--include/drm/drm_panel.h13
-rw-r--r--include/drm/drm_simple_kms_helper.h7
-rw-r--r--include/drm/drm_vblank.h1
-rw-r--r--include/dt-bindings/clock/xlnx-versal-clk.h123
-rw-r--r--include/dt-bindings/drm/mipi-dsi.h11
-rw-r--r--include/dt-bindings/media/xilinx-vip.h6
-rw-r--r--include/dt-bindings/phy/phy.h2
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-zynqmp.h36
-rw-r--r--include/dt-bindings/power/xlnx-versal-power.h40
-rw-r--r--include/linux/acpi_iort.h14
-rw-r--r--include/linux/ata.h39
-rw-r--r--include/linux/bitops.h1
-rw-r--r--include/linux/bpf.h6
-rw-r--r--include/linux/buffer_head.h36
-rw-r--r--include/linux/can/platform/sja1000.h2
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/clk.h189
-rw-r--r--include/linux/clk/zynq.h4
-rw-r--r--include/linux/cpu.h8
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cred.h8
-rw-r--r--include/linux/crypto.h12
-rw-r--r--include/linux/debugfs.h26
-rw-r--r--include/linux/devfreq.h11
-rw-r--r--include/linux/device.h6
-rw-r--r--include/linux/dim.h3
-rw-r--r--include/linux/dma-mapping.h80
-rw-r--r--include/linux/dma-noncoherent.h20
-rw-r--r--include/linux/dma/xilinx_frmbuf.h205
-rw-r--r--include/linux/dma/xilinx_ps_pcie_dma.h69
-rw-r--r--include/linux/dmaengine.h22
-rw-r--r--include/linux/dynamic_debug.h2
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/etherdevice.h12
-rw-r--r--include/linux/eventfd.h8
-rw-r--r--include/linux/filter.h24
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h351
-rw-r--r--include/linux/fpga/fpga-mgr.h26
-rw-r--r--include/linux/fs.h64
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/hid.h8
-rw-r--r--include/linux/highmem.h18
-rw-r--r--include/linux/host1x.h3
-rw-r--r--include/linux/hrtimer.h8
-rw-r--r--include/linux/hugetlb.h24
-rw-r--r--include/linux/idr.h6
-rw-r--r--include/linux/if_arp.h4
-rw-r--r--include/linux/if_team.h3
-rw-r--r--include/linux/if_vlan.h17
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/interconnect.h12
-rw-r--r--include/linux/interrupt.h19
-rw-r--r--include/linux/iomap.h2
-rw-r--r--include/linux/iommu.h21
-rw-r--r--include/linux/ioport.h7
-rw-r--r--include/linux/iova.h2
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/kvm_host.h30
-rw-r--r--include/linux/libata.h6
-rw-r--r--include/linux/list.h20
-rw-r--r--include/linux/lockdep.h5
-rw-r--r--include/linux/lsm_hooks.h9
-rw-r--r--include/linux/mailbox/zynqmp-ipi-message.h5
-rw-r--r--include/linux/mbcache.h41
-rw-r--r--include/linux/mcb.h1
-rw-r--r--include/linux/mdio.h3
-rw-r--r--include/linux/mdio/mdio-i2c.h (renamed from drivers/net/phy/mdio-i2c.h)0
-rw-r--r--include/linux/mdio/mdio-xgene.h (renamed from drivers/net/phy/mdio-xgene.h)0
-rw-r--r--include/linux/mfd/t7l66xb.h1
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmc/host.h3
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mod_devicetable.h4
-rw-r--r--include/linux/mtd/mtd.h3
-rw-r--r--include/linux/mtd/onfi.h4
-rw-r--r--include/linux/mtd/rawnand.h11
-rw-r--r--include/linux/mtd/spi-nor.h52
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/netdevice.h98
-rw-r--r--include/linux/netfilter.h5
-rw-r--r--include/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h1
-rw-r--r--include/linux/netfilter/nfnetlink.h28
-rw-r--r--include/linux/netfilter_bridge/ebtables.h4
-rw-r--r--include/linux/netfilter_defs.h8
-rw-r--r--include/linux/nls.h2
-rw-r--r--include/linux/nmi.h4
-rw-r--r--include/linux/nospec.h4
-rw-r--r--include/linux/nvme-tcp.h5
-rw-r--r--include/linux/nvme.h60
-rw-r--r--include/linux/of.h408
-rw-r--r--include/linux/of_irq.h13
-rw-r--r--include/linux/once.h28
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--include/linux/perf_event.h25
-rw-r--r--include/linux/phy/phy-zynqmp.h60
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h6
-rw-r--r--include/linux/platform_device.h6
-rw-r--r--include/linux/pm_domain.h12
-rw-r--r--include/linux/pm_runtime.h21
-rw-r--r--include/linux/pm_wakeirq.h9
-rw-r--r--include/linux/poll.h4
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/power_supply.h5
-rw-r--r--include/linux/preempt.h30
-rw-r--r--include/linux/printk.h19
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/pwm.h4
-rw-r--r--include/linux/quota.h4
-rw-r--r--include/linux/quotaops.h4
-rw-r--r--include/linux/raid_class.h4
-rw-r--r--include/linux/random.h6
-rw-r--r--include/linux/rcupdate.h49
-rw-r--r--include/linux/regmap.h19
-rw-r--r--include/linux/remoteproc.h6
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rmap.h7
-rw-r--r--include/linux/rpmsg.h14
-rw-r--r--include/linux/scatterlist.h50
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/sched/task.h31
-rw-r--r--include/linux/sched/task_stack.h4
-rw-r--r--include/linux/security.h9
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/serial_core.h17
-rw-r--r--include/linux/sh_intc.h5
-rw-r--r--include/linux/skbuff.h15
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h57
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h2
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h2
-rw-r--r--include/linux/soc/xilinx/zynqmp/fw.h37
-rw-r--r--include/linux/soc/xilinx/zynqmp/tap_delays.h32
-rw-r--r--include/linux/spi/spi.h25
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/string_helpers.h15
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h5
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/tcp.h4
-rw-r--r--include/linux/tick.h9
-rw-r--r--include/linux/timerqueue.h2
-rw-r--r--include/linux/tpm_eventlog.h6
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/tracepoint.h15
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/units.h92
-rw-r--r--include/linux/usb.h10
-rw-r--r--include/linux/usb/cdc_ncm.h15
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/hcd.h6
-rw-r--r--include/linux/usb/typec_altmode.h2
-rw-r--r--include/linux/usb/typec_dp.h5
-rw-r--r--include/linux/usb/xhci_pdriver.h29
-rw-r--r--include/linux/virtio_net.h4
-rw-r--r--include/linux/vt_buffer.h2
-rw-r--r--include/linux/wait.h11
-rw-r--r--include/linux/workqueue.h15
-rw-r--r--include/linux/xilinx_phy.h20
-rw-r--r--include/media/dvb_net.h4
-rw-r--r--include/media/dvbdev.h47
-rw-r--r--include/media/media-entity.h8
-rw-r--r--include/media/v4l2-common.h3
-rw-r--r--include/media/v4l2-dev.h6
-rw-r--r--include/media/v4l2-mem2mem.h18
-rw-r--r--include/media/v4l2-subdev.h4
-rw-r--r--include/net/addrconf.h12
-rw-r--r--include/net/af_unix.h20
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/bond_alb.h4
-rw-r--r--include/net/bonding.h28
-rw-r--r--include/net/busy_poll.h2
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/dn.h231
-rw-r--r--include/net/dn_dev.h199
-rw-r--r--include/net/dn_fib.h167
-rw-r--r--include/net/dn_neigh.h30
-rw-r--r--include/net/dn_nsp.h195
-rw-r--r--include/net/dn_route.h115
-rw-r--r--include/net/dst.h11
-rw-r--r--include/net/dst_ops.h6
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/genetlink.h3
-rw-r--r--include/net/ieee802154_netdev.h43
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_tunnels.h29
-rw-r--r--include/net/ipv6.h10
-rw-r--r--include/net/llc_pdu.h6
-rw-r--r--include/net/lwtunnel.h5
-rw-r--r--include/net/mrp.h1
-rw-r--r--include/net/neighbour.h9
-rw-r--r--include/net/netfilter/nf_nat_redirect.h3
-rw-r--r--include/net/netfilter/nf_tables.h194
-rw-r--r--include/net/netfilter/nf_tables_core.h8
-rw-r--r--include/net/netfilter/nf_tproxy.h7
-rw-r--r--include/net/netfilter/nft_fib.h2
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netns/ipv6.h4
-rw-r--r--include/net/netns/netfilter.h3
-rw-r--r--include/net/netns/nftables.h6
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/nfc/nfc.h4
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h7
-rw-r--r--include/net/scm.h13
-rw-r--r--include/net/sctp/stream_sched.h2
-rw-r--r--include/net/sctp/structs.h1
-rw-r--r--include/net/sock.h88
-rw-r--r--include/net/tcp.h37
-rw-r--r--include/net/udp.h3
-rw-r--r--include/net/udplite.h8
-rw-r--r--include/net/vxlan.h13
-rw-r--r--include/scsi/scsi_cmnd.h8
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h10
-rw-r--r--include/soc/xilinx/xlnx_vcu.h39
-rw-r--r--include/sound/core.h11
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/sound/hdaudio_ext.h1
-rw-r--r--include/sound/pcm.h36
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/trace/events/f2fs.h2
-rw-r--r--include/trace/events/jbd2.h40
-rw-r--r--include/trace/events/neigh.h4
-rw-r--r--include/trace/events/rpm.h6
-rw-r--r--include/trace/events/spmi.h12
-rw-r--r--include/trace/events/timer.h9
-rw-r--r--include/trace/events/writeback.h2
-rw-r--r--include/uapi/drm/drm_fourcc.h15
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/linux/affs_hardblocks.h68
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/blkzoned.h10
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/btrfs.h4
-rw-r--r--include/uapi/linux/can/error.h5
-rw-r--r--include/uapi/linux/capability.h2
-rw-r--r--include/uapi/linux/const.h2
-rw-r--r--include/uapi/linux/dn.h149
-rw-r--r--include/uapi/linux/gtp.h2
-rw-r--r--include/uapi/linux/if_alg.h1
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/linux/media-bus-format.h25
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cttimeout.h2
-rw-r--r--include/uapi/linux/netfilter_decnet.h72
-rw-r--r--include/uapi/linux/netlink.h2
-rw-r--r--include/uapi/linux/pci_regs.h2
-rw-r--r--include/uapi/linux/perf_event.h5
-rw-r--r--include/uapi/linux/resource.h4
-rw-r--r--include/uapi/linux/swab.h2
-rw-r--r--include/uapi/linux/sync_file.h2
-rw-r--r--include/uapi/linux/uio/uio.h65
-rw-r--r--include/uapi/linux/usb/video.h30
-rw-r--r--include/uapi/linux/uvcvideo.h2
-rw-r--r--include/uapi/linux/v4l2-mediabus.h3
-rw-r--r--include/uapi/linux/v4l2-subdev.h23
-rw-r--r--include/uapi/linux/videodev2.h34
-rw-r--r--include/uapi/linux/virtio_crypto.h1
-rw-r--r--include/uapi/linux/watch_queue.h55
-rw-r--r--include/uapi/linux/xilinx-csi2rxss.h18
-rw-r--r--include/uapi/linux/xilinx-hls.h21
-rw-r--r--include/uapi/linux/xilinx-sdirxss.h64
-rw-r--r--include/uapi/linux/xilinx-v4l2-controls.h161
-rw-r--r--include/uapi/linux/xilinx-v4l2-events.h24
-rw-r--r--include/uapi/linux/xlnx_ctrl.h34
-rw-r--r--include/uapi/linux/xlnx_mpg2tsmux_interface.h251
-rw-r--r--include/uapi/linux/xlnxsync.h149
-rw-r--r--include/uapi/linux/zocl_ioctl.h125
-rw-r--r--include/uapi/sound/asequencer.h8
-rw-r--r--include/uapi/sound/skl-tplg-interface.h3
-rw-r--r--include/xen/swiotlb-xen.h8
-rw-r--r--init/Kconfig2
-rw-r--r--init/do_mounts.c9
-rw-r--r--init/main.c21
-rw-r--r--ipc/msg.c2
-rw-r--r--ipc/sem.c9
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/async.c4
-rw-r--r--kernel/audit.c33
-rw-r--r--kernel/audit_fsnotify.c1
-rw-r--r--kernel/audit_watch.c9
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/backtracetest.c2
-rw-r--r--kernel/bpf/arraymap.c12
-rw-r--r--kernel/bpf/bpf_lru_list.c21
-rw-r--r--kernel/bpf/bpf_lru_list.h7
-rw-r--r--kernel/bpf/btf.c8
-rw-r--r--kernel/bpf/cgroup.c9
-rw-r--r--kernel/bpf/core.c5
-rw-r--r--kernel/bpf/cpumap.c3
-rw-r--r--kernel/bpf/hashtab.c20
-rw-r--r--kernel/bpf/helpers.c18
-rw-r--r--kernel/bpf/lpm_trie.c3
-rw-r--r--kernel/bpf/map_in_map.c2
-rw-r--r--kernel/bpf/map_in_map.h2
-rw-r--r--kernel/bpf/queue_stack_maps.c21
-rw-r--r--kernel/bpf/stackmap.c9
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c34
-rw-r--r--kernel/cgroup/cgroup-internal.h6
-rw-r--r--kernel/cgroup/cgroup-v1.c16
-rw-r--r--kernel/cgroup/cgroup.c111
-rw-r--r--kernel/cgroup/cpuset.c27
-rw-r--r--kernel/compat.c2
-rw-r--r--kernel/cpu.c8
-rw-r--r--kernel/cred.c64
-rw-r--r--kernel/debug/debug_core.c5
-rw-r--r--kernel/debug/kdb/kdb_main.c14
-rw-r--r--kernel/dma/coherent.c4
-rw-r--r--kernel/dma/debug.c2
-rw-r--r--kernel/dma/direct.c14
-rw-r--r--kernel/events/core.c188
-rw-r--r--kernel/events/ring_buffer.c11
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/exit.c74
-rw-r--r--kernel/extable.c3
-rw-r--r--kernel/fail_function.c5
-rw-r--r--kernel/fork.c45
-rw-r--r--kernel/futex.c12
-rw-r--r--kernel/gcov/clang.c2
-rw-r--r--kernel/gcov/gcc_4_7.c23
-rw-r--r--kernel/irq/chip.c3
-rw-r--r--kernel/irq/generic-chip.c25
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/irqdesc.c15
-rw-r--r--kernel/irq/irqdomain.c271
-rw-r--r--kernel/irq/manage.c11
-rw-r--r--kernel/irq/matrix.c6
-rw-r--r--kernel/irq/resend.c2
-rw-r--r--kernel/kexec_core.c5
-rw-r--r--kernel/kexec_file.c14
-rw-r--r--kernel/kheaders.c10
-rw-r--r--kernel/kprobes.c32
-rw-r--r--kernel/livepatch/transition.c18
-rw-r--r--kernel/locking/lockdep.c14
-rw-r--r--kernel/locking/test-ww_mutex.c20
-rw-r--r--kernel/module.c41
-rw-r--r--kernel/padata.c2
-rw-r--r--kernel/panic.c75
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/power/snapshot.c20
-rw-r--r--kernel/power/swap.c38
-rw-r--r--kernel/power/user.c13
-rw-r--r--kernel/profile.c7
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/rcu/tree_exp.h2
-rw-r--r--kernel/reboot.c1
-rw-r--r--kernel/relay.c22
-rw-r--r--kernel/sched/core.c21
-rw-r--r--kernel/sched/deadline.c136
-rw-r--r--kernel/sched/fair.c56
-rw-r--r--kernel/sched/membarrier.c9
-rw-r--r--kernel/sched/psi.c7
-rw-r--r--kernel/sched/rt.c35
-rw-r--r--kernel/sched/wait.c2
-rw-r--r--kernel/sys.c93
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c42
-rw-r--r--kernel/time/alarmtimer.c33
-rw-r--r--kernel/time/hrtimer.c38
-rw-r--r--kernel/time/jiffies.c7
-rw-r--r--kernel/time/posix-stubs.c2
-rw-r--r--kernel/time/posix-timers.c33
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-common.c19
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c142
-rw-r--r--kernel/time/timekeeping.c37
-rw-r--r--kernel/time/timekeeping.h3
-rw-r--r--kernel/trace/blktrace.c3
-rw-r--r--kernel/trace/bpf_trace.c5
-rw-r--r--kernel/trace/ftrace.c101
-rw-r--r--kernel/trace/ring_buffer.c209
-rw-r--r--kernel/trace/trace.c295
-rw-r--r--kernel/trace/trace.h9
-rw-r--r--kernel/trace/trace_dynevent.c2
-rw-r--r--kernel/trace/trace_events.c74
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_events_hist.c27
-rw-r--r--kernel/trace/trace_events_trigger.c6
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_kprobe.c3
-rw-r--r--kernel/trace/trace_output.c9
-rw-r--r--kernel/trace/trace_preemptirq.c4
-rw-r--r--kernel/trace/trace_probe.c5
-rw-r--r--kernel/trace/trace_probe.h2
-rw-r--r--kernel/trace/trace_probe_tmpl.h20
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_uprobe.c6
-rw-r--r--kernel/trace/tracing_map.c7
-rw-r--r--kernel/watchdog.c21
-rw-r--r--kernel/watchdog_hld.c6
-rw-r--r--kernel/workqueue.c27
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/clz_ctz.c32
-rw-r--r--lib/cpu_rmap.c5
-rw-r--r--lib/debugobjects.c152
-rw-r--r--lib/dim/dim.c5
-rw-r--r--lib/dim/net_dim.c3
-rw-r--r--lib/dim/rdma_dim.c3
-rw-r--r--lib/dynamic_debug.c13
-rw-r--r--lib/fonts/fonts.c4
-rw-r--r--lib/idr.c4
-rw-r--r--lib/iov_iter.c14
-rw-r--r--lib/kobject.c17
-rw-r--r--lib/list_debug.c12
-rw-r--r--lib/lockref.c1
-rw-r--r--lib/mpi/mpi-cmp.c8
-rw-r--r--lib/mpi/mpicoder.c3
-rw-r--r--lib/nlattr.c3
-rw-r--r--lib/notifier-error-inject.c2
-rw-r--r--lib/once.c30
-rw-r--r--lib/radix-tree.c3
-rw-r--r--lib/ratelimit.c12
-rw-r--r--lib/test_blackhole_dev.c3
-rw-r--r--lib/test_firmware.c77
-rw-r--r--lib/test_ida.c40
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/ts_bm.c4
-rw-r--r--lib/usercopy.c7
-rw-r--r--lib/vdso/Makefile2
-rw-r--r--lib/vdso/gettimeofday.c27
-rw-r--r--mm/cma.c2
-rw-r--r--mm/compaction.c18
-rw-r--r--mm/filemap.c239
-rw-r--r--mm/frame_vector.c31
-rw-r--r--mm/frontswap.c2
-rw-r--r--mm/gup.c15
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/hugetlb.c32
-rw-r--r--mm/kasan/report.c4
-rw-r--r--mm/khugepaged.c45
-rw-r--r--mm/kmemleak.c8
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/madvise.c7
-rw-r--r--mm/memcontrol.c27
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/mmap.c26
-rw-r--r--mm/mmu_gather.c5
-rw-r--r--mm/mremap.c6
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/page_alloc.c83
-rw-r--r--mm/pagewalk.c13
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/readahead.c3
-rw-r--r--mm/rmap.c31
-rw-r--r--mm/shmem.c28
-rw-r--r--mm/slub.c9
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swapfile.c16
-rw-r--r--mm/userfaultfd.c14
-rw-r--r--net/802/mrp.c18
-rw-r--r--net/8021q/vlan_core.c9
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/8021q/vlan_netlink.c4
-rw-r--r--net/9p/client.c5
-rw-r--r--net/9p/protocol.c17
-rw-r--r--net/9p/trans_fd.c18
-rw-r--r--net/9p/trans_rdma.c15
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/9p/trans_xen.c61
-rw-r--r--net/Kconfig2
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/ddp.c9
-rw-r--r--net/atm/ioctl.c7
-rw-r--r--net/atm/mpoa_proc.c3
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/atm/resources.c2
-rw-r--r--net/batman-adv/bat_v_elp.c3
-rw-r--r--net/batman-adv/bat_v_ogm.c7
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/hard-interface.c14
-rw-r--r--net/batman-adv/netlink.c3
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/translation-table.c1
-rw-r--r--net/batman-adv/types.h6
-rw-r--r--net/bluetooth/6lowpan.c1
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/hci_conn.c86
-rw-r--r--net/bluetooth/hci_core.c28
-rw-r--r--net/bluetooth/hci_event.c69
-rw-r--r--net/bluetooth/hci_sock.c51
-rw-r--r--net/bluetooth/hci_sysfs.c26
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c169
-rw-r--r--net/bluetooth/l2cap_sock.c10
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bpf/test_run.c1
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_forward.c6
-rw-r--r--net/bridge/br_if.c5
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_netfilter_hooks.c18
-rw-r--r--net/bridge/br_netfilter_ipv6.c1
-rw-r--r--net/bridge/netfilter/ebtable_broute.c8
-rw-r--r--net/bridge/netfilter/ebtable_filter.c8
-rw-r--r--net/bridge/netfilter/ebtable_nat.c8
-rw-r--r--net/bridge/netfilter/ebtables.c10
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c2
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c5
-rw-r--r--net/caif/caif_socket.c1
-rw-r--r--net/caif/caif_usb.c3
-rw-r--r--net/caif/cfctrl.c6
-rw-r--r--net/caif/chnl_net.c3
-rw-r--r--net/can/af_can.c6
-rw-r--r--net/can/bcm.c35
-rw-r--r--net/can/j1939/address-claim.c40
-rw-r--r--net/can/j1939/j1939-priv.h1
-rw-r--r--net/can/j1939/main.c27
-rw-r--r--net/can/j1939/socket.c34
-rw-r--r--net/can/j1939/transport.c21
-rw-r--r--net/can/raw.c12
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/ceph/osd_client.c20
-rw-r--r--net/core/datagram.c15
-rw-r--r--net/core/dev.c57
-rw-r--r--net/core/devlink.c5
-rw-r--r--net/core/drop_monitor.c4
-rw-r--r--net/core/dst.c12
-rw-r--r--net/core/ethtool.c3
-rw-r--r--net/core/filter.c46
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/lwt_bpf.c7
-rw-r--r--net/core/neighbour.c159
-rw-r--r--net/core/net_namespace.c7
-rw-r--r--net/core/netpoll.c19
-rw-r--r--net/core/pktgen.c14
-rw-r--r--net/core/rtnetlink.c156
-rw-r--r--net/core/scm.c6
-rw-r--r--net/core/skbuff.c83
-rw-r--r--net/core/sock.c78
-rw-r--r--net/core/sock_diag.c10
-rw-r--r--net/core/sock_map.c4
-rw-r--r--net/core/stream.c10
-rw-r--r--net/core/sysctl_net_core.c15
-rw-r--r--net/dcb/dcbnl.c2
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/ipv4.c20
-rw-r--r--net/dccp/ipv6.c44
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/dccp/output.c2
-rw-r--r--net/dccp/proto.c48
-rw-r--r--net/decnet/Kconfig43
-rw-r--r--net/decnet/Makefile10
-rw-r--r--net/decnet/README8
-rw-r--r--net/decnet/af_decnet.c2400
-rw-r--r--net/decnet/dn_dev.c1438
-rw-r--r--net/decnet/dn_fib.c799
-rw-r--r--net/decnet/dn_neigh.c605
-rw-r--r--net/decnet/dn_nsp_in.c906
-rw-r--r--net/decnet/dn_nsp_out.c695
-rw-r--r--net/decnet/dn_route.c1921
-rw-r--r--net/decnet/dn_rules.c258
-rw-r--r--net/decnet/dn_table.c929
-rw-r--r--net/decnet/dn_timer.c104
-rw-r--r--net/decnet/netfilter/Kconfig17
-rw-r--r--net/decnet/netfilter/Makefile6
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c158
-rw-r--r--net/decnet/sysctl_net_decnet.c373
-rw-r--r--net/dsa/tag_ksz.c3
-rw-r--r--net/dsa/tag_sja1105.c4
-rw-r--r--net/hsr/hsr_forward.c5
-rw-r--r--net/hsr/hsr_framereg.c20
-rw-r--r--net/hsr/hsr_framereg.h1
-rw-r--r--net/hsr/hsr_main.c15
-rw-r--r--net/ieee802154/socket.c50
-rw-r--r--net/ife/ife.c1
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/af_inet.c26
-rw-r--r--net/ipv4/devinet.c31
-rw-r--r--net/ipv4/esp4.c4
-rw-r--r--net/ipv4/esp4_offload.c3
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/fib_semantics.c32
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/igmp.c9
-rw-r--r--net/ipv4/inet_connection_sock.c18
-rw-r--r--net/ipv4/inet_hashtables.c26
-rw-r--r--net/ipv4/ip_gre.c21
-rw-r--r--net/ipv4/ip_input.c37
-rw-r--r--net/ipv4/ip_output.c32
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/ip_tunnel.c43
-rw-r--r--net/ipv4/ip_vti.c4
-rw-r--r--net/ipv4/metrics.c2
-rw-r--r--net/ipv4/netfilter/nf_socket_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c18
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c3
-rw-r--r--net/ipv4/nexthop.c2
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/syncookies.c22
-rw-r--r--net/ipv4/sysctl_net_ipv4.c59
-rw-r--r--net/ipv4/tcp.c93
-rw-r--r--net/ipv4/tcp_bpf.c16
-rw-r--r--net/ipv4/tcp_cdg.c2
-rw-r--r--net/ipv4/tcp_fastopen.c6
-rw-r--r--net/ipv4/tcp_input.c86
-rw-r--r--net/ipv4/tcp_ipv4.c13
-rw-r--r--net/ipv4/tcp_metrics.c85
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_output.c93
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/udp.c19
-rw-r--r--net/ipv4/udp_tunnel.c1
-rw-r--r--net/ipv4/udplite.c10
-rw-r--r--net/ipv6/addrconf.c53
-rw-r--r--net/ipv6/addrconf_core.c21
-rw-r--r--net/ipv6/addrlabel.c1
-rw-r--r--net/ipv6/af_inet6.c15
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/esp6.c4
-rw-r--r--net/ipv6/esp6_offload.c3
-rw-r--r--net/ipv6/exthdrs_core.c2
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/icmp.c5
-rw-r--r--net/ipv6/ila/ila_xlat.c1
-rw-r--r--net/ipv6/ip6_fib.c6
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/ip6_gre.c55
-rw-r--r--net/ipv6/ip6_input.c26
-rw-r--r--net/ipv6/ip6_output.c24
-rw-r--r--net/ipv6/ip6_tunnel.c40
-rw-r--r--net/ipv6/ip6_vti.c4
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c27
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/nf_socket_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_tproxy_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c18
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c6
-rw-r--r--net/ipv6/ping.c9
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/route.c71
-rw-r--r--net/ipv6/seg6.c25
-rw-r--r--net/ipv6/sit.c16
-rw-r--r--net/ipv6/syncookies.c7
-rw-r--r--net/ipv6/tcp_ipv6.c41
-rw-r--r--net/ipv6/udp.c40
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udplite.c11
-rw-r--r--net/ipv6/xfrm6_policy.c10
-rw-r--r--net/iucv/af_iucv.c14
-rw-r--r--net/iucv/iucv.c8
-rw-r--r--net/kcm/kcmsock.c118
-rw-r--r--net/key/af_key.c51
-rw-r--r--net/l2tp/l2tp_core.c30
-rw-r--r--net/l2tp/l2tp_ip6.c4
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/llc/af_llc.c26
-rw-r--r--net/llc/llc_core.c7
-rw-r--r--net/llc/llc_input.c13
-rw-r--r--net/llc/llc_s_ac.c3
-rw-r--r--net/llc/llc_station.c3
-rw-r--r--net/mac80211/cfg.c7
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h28
-rw-r--r--net/mac80211/main.c8
-rw-r--r--net/mac80211/mesh.h22
-rw-r--r--net/mac80211/mesh_pathtbl.c91
-rw-r--r--net/mac80211/mesh_plink.c10
-rw-r--r--net/mac80211/mlme.c21
-rw-r--r--net/mac80211/scan.c13
-rw-r--r--net/mac80211/sta_info.c7
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/mac80211/util.c13
-rw-r--r--net/mac80211/wme.c6
-rw-r--r--net/mac802154/iface.c1
-rw-r--r--net/mac802154/rx.c7
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/ncsi/internal.h9
-rw-r--r--net/ncsi/ncsi-aen.c1
-rw-r--r--net/ncsi/ncsi-manage.c3
-rw-r--r--net/ncsi/ncsi-netlink.c4
-rw-r--r--net/ncsi/ncsi-pkt.h7
-rw-r--r--net/ncsi/ncsi-rsp.c32
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/core.c16
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h14
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c4
-rw-r--r--net/netfilter/ipset/ip_set_core.c80
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h19
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c1
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c30
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c4
-rw-r--r--net/netfilter/nf_conntrack_irc.c39
-rw-r--r--net/netfilter/nf_conntrack_netlink.c94
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c52
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c53
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c163
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c10
-rw-r--r--net/netfilter/nf_conntrack_sip.c6
-rw-r--r--net/netfilter/nf_conntrack_standalone.c11
-rw-r--r--net/netfilter/nf_log.c7
-rw-r--r--net/netfilter/nf_nat_redirect.c98
-rw-r--r--net/netfilter/nf_tables_api.c1486
-rw-r--r--net/netfilter/nf_tables_offload.c29
-rw-r--r--net/netfilter/nf_tables_trace.c9
-rw-r--r--net/netfilter/nfnetlink.c5
-rw-r--r--net/netfilter/nfnetlink_acct.c11
-rw-r--r--net/netfilter/nfnetlink_cthelper.c11
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c22
-rw-r--r--net/netfilter/nfnetlink_log.c13
-rw-r--r--net/netfilter/nfnetlink_osf.c13
-rw-r--r--net/netfilter/nfnetlink_queue.c16
-rw-r--r--net/netfilter/nft_bitwise.c14
-rw-r--r--net/netfilter/nft_byteorder.c33
-rw-r--r--net/netfilter/nft_chain_filter.c14
-rw-r--r--net/netfilter/nft_cmp.c8
-rw-r--r--net/netfilter/nft_compat.c54
-rw-r--r--net/netfilter/nft_ct.c38
-rw-r--r--net/netfilter/nft_dup_netdev.c6
-rw-r--r--net/netfilter/nft_dynset.c23
-rw-r--r--net/netfilter/nft_exthdr.c14
-rw-r--r--net/netfilter/nft_fib.c5
-rw-r--r--net/netfilter/nft_flow_offload.c11
-rw-r--r--net/netfilter/nft_fwd_netdev.c18
-rw-r--r--net/netfilter/nft_hash.c25
-rw-r--r--net/netfilter/nft_immediate.c6
-rw-r--r--net/netfilter/nft_lookup.c16
-rw-r--r--net/netfilter/nft_masq.c18
-rw-r--r--net/netfilter/nft_meta.c10
-rw-r--r--net/netfilter/nft_nat.c40
-rw-r--r--net/netfilter/nft_numgen.c15
-rw-r--r--net/netfilter/nft_objref.c16
-rw-r--r--net/netfilter/nft_osf.c26
-rw-r--r--net/netfilter/nft_payload.c41
-rw-r--r--net/netfilter/nft_queue.c12
-rw-r--r--net/netfilter/nft_range.c6
-rw-r--r--net/netfilter/nft_redir.c104
-rw-r--r--net/netfilter/nft_rt.c12
-rw-r--r--net/netfilter/nft_set_bitmap.c5
-rw-r--r--net/netfilter/nft_set_hash.c110
-rw-r--r--net/netfilter/nft_set_rbtree.c378
-rw-r--r--net/netfilter/nft_socket.c12
-rw-r--r--net/netfilter/nft_synproxy.c7
-rw-r--r--net/netfilter/nft_tproxy.c27
-rw-r--r--net/netfilter/nft_tunnel.c9
-rw-r--r--net/netfilter/nft_xfrm.c12
-rw-r--r--net/netfilter/xt_REDIRECT.c10
-rw-r--r--net/netfilter/xt_osf.c1
-rw-r--r--net/netfilter/xt_owner.c16
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/netfilter/xt_u32.c21
-rw-r--r--net/netlabel/netlabel_calipso.c52
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_kapi.c3
-rw-r--r--net/netlabel/netlabel_mgmt.c8
-rw-r--r--net/netlabel/netlabel_unlabeled.c10
-rw-r--r--net/netlabel/netlabel_user.h4
-rw-r--r--net/netlink/af_netlink.c136
-rw-r--r--net/netlink/diag.c7
-rw-r--r--net/netlink/genetlink.c35
-rw-r--r--net/netrom/af_netrom.c24
-rw-r--r--net/netrom/nr_dev.c2
-rw-r--r--net/netrom/nr_in.c6
-rw-r--r--net/netrom/nr_out.c2
-rw-r--r--net/netrom/nr_route.c8
-rw-r--r--net/netrom/nr_subr.c12
-rw-r--r--net/netrom/nr_timer.c1
-rw-r--r--net/nfc/core.c4
-rw-r--r--net/nfc/hci/llc_shdlc.c10
-rw-r--r--net/nfc/llcp.h9
-rw-r--r--net/nfc/llcp_commands.c59
-rw-r--r--net/nfc/llcp_core.c163
-rw-r--r--net/nfc/llcp_sock.c21
-rw-r--r--net/nfc/nci/core.c11
-rw-r--r--net/nfc/nci/data.c4
-rw-r--r--net/nfc/nci/ntf.c6
-rw-r--r--net/nfc/nci/spi.c2
-rw-r--r--net/nfc/netlink.c76
-rw-r--r--net/nfc/nfc.h3
-rw-r--r--net/nsh/nsh.c8
-rw-r--r--net/openvswitch/datapath.c52
-rw-r--r--net/packet/af_packet.c108
-rw-r--r--net/packet/diag.c6
-rw-r--r--net/packet/internal.h28
-rw-r--r--net/psample/psample.c3
-rw-r--r--net/rds/af_rds.c2
-rw-r--r--net/rds/ib_recv.c1
-rw-r--r--net/rds/message.c8
-rw-r--r--net/rds/rdma.c3
-rw-r--r--net/rds/rdma_transport.c8
-rw-r--r--net/rds/send.c11
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rfkill/rfkill-gpio.c12
-rw-r--r--net/rose/af_rose.c62
-rw-r--r--net/rose/rose_link.c3
-rw-r--r--net/rose/rose_loopback.c3
-rw-r--r--net/rose/rose_route.c2
-rw-r--r--net/rxrpc/call_event.c2
-rw-r--r--net/rxrpc/conn_event.c8
-rw-r--r--net/rxrpc/conn_service.c3
-rw-r--r--net/rxrpc/local_object.c3
-rw-r--r--net/rxrpc/output.c2
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/rxrpc/sendmsg.c4
-rw-r--r--net/sched/Kconfig81
-rw-r--r--net/sched/Makefile6
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_mpls.c8
-rw-r--r--net/sched/act_pedit.c1
-rw-r--r--net/sched/act_sample.c11
-rw-r--r--net/sched/cls_api.c23
-rw-r--r--net/sched/cls_flow.c2
-rw-r--r--net/sched/cls_flower.c56
-rw-r--r--net/sched/cls_fw.c11
-rw-r--r--net/sched/cls_route.c13
-rw-r--r--net/sched/cls_rsvp.c24
-rw-r--r--net/sched/cls_rsvp.h777
-rw-r--r--net/sched/cls_rsvp6.c24
-rw-r--r--net/sched/cls_tcindex.c738
-rw-r--r--net/sched/cls_u32.c75
-rw-r--r--net/sched/em_text.c4
-rw-r--r--net/sched/ematch.c2
-rw-r--r--net/sched/sch_api.c99
-rw-r--r--net/sched/sch_atm.c707
-rw-r--r--net/sched/sch_cake.c6
-rw-r--r--net/sched/sch_cbq.c1817
-rw-r--r--net/sched/sch_dsmark.c523
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_generic.c25
-rw-r--r--net/sched/sch_hfsc.c16
-rw-r--r--net/sched/sch_htb.c7
-rw-r--r--net/sched/sch_ingress.c16
-rw-r--r--net/sched/sch_mqprio.c144
-rw-r--r--net/sched/sch_netem.c61
-rw-r--r--net/sched/sch_plug.c2
-rw-r--r--net/sched/sch_qfq.c42
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_taprio.c12
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/auth.c18
-rw-r--r--net/sctp/bind_addr.c6
-rw-r--r--net/sctp/diag.c4
-rw-r--r--net/sctp/proc.c2
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c50
-rw-r--r--net/sctp/stream.c25
-rw-r--r--net/sctp/stream_interleave.c3
-rw-r--r--net/sctp/stream_sched.c5
-rw-r--r--net/sctp/stream_sched_prio.c45
-rw-r--r--net/sctp/stream_sched_rr.c5
-rw-r--r--net/smc/af_smc.c18
-rw-r--r--net/smc/smc.h5
-rw-r--r--net/smc/smc_cdc.c2
-rw-r--r--net/smc/smc_close.c2
-rw-r--r--net/smc/smc_diag.c2
-rw-r--r--net/socket.c47
-rw-r--r--net/sunrpc/addr.c4
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c19
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c27
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c9
-rw-r--r--net/sunrpc/backchannel_rqst.c14
-rw-r--r--net/sunrpc/clnt.c8
-rw-r--r--net/sunrpc/sched.c1
-rw-r--r--net/sunrpc/svcauth_unix.c17
-rw-r--r--net/sunrpc/svcsock.c27
-rw-r--r--net/sunrpc/xprtmultipath.c17
-rw-r--r--net/sunrpc/xprtrdma/verbs.c4
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/tipc/bearer.c10
-rw-r--r--net/tipc/core.c16
-rw-r--r--net/tipc/core.h6
-rw-r--r--net/tipc/discover.c11
-rw-r--r--net/tipc/link.c4
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/tipc/msg.h19
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/netlink.c4
-rw-r--r--net/tipc/netlink_compat.c3
-rw-r--r--net/tipc/node.c172
-rw-r--r--net/tipc/node.h5
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tipc/topsrv.c38
-rw-r--r--net/tls/tls_main.c9
-rw-r--r--net/tls/tls_sw.c46
-rw-r--r--net/unix/af_unix.c49
-rw-r--r--net/unix/diag.c22
-rw-r--r--net/unix/garbage.c22
-rw-r--r--net/unix/scm.c10
-rw-r--r--net/vmw_vsock/af_vsock.c12
-rw-r--r--net/vmw_vsock/virtio_transport_common.c17
-rw-r--r--net/vmw_vsock/vmci_transport.c6
-rw-r--r--net/wireless/certs/wens.hex87
-rw-r--r--net/wireless/debugfs.c3
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/reg.c16
-rw-r--r--net/wireless/scan.c287
-rw-r--r--net/wireless/sme.c29
-rw-r--r--net/wireless/wext-core.c6
-rw-r--r--net/x25/af_x25.c10
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--net/xfrm/Makefile2
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_interface_core.c (renamed from net/xfrm/xfrm_interface.c)26
-rw-r--r--net/xfrm/xfrm_ipcomp.c1
-rw-r--r--net/xfrm/xfrm_policy.c18
-rw-r--r--net/xfrm/xfrm_state.c3
-rw-r--r--net/xfrm/xfrm_user.c13
-rw-r--r--samples/bpf/hbm.c1
-rw-r--r--samples/bpf/tcp_basertt_kern.c2
-rw-r--r--samples/v4l/v4l2-pci-skeleton.c2
-rw-r--r--samples/vfio-mdev/mdpy-fb.c8
-rw-r--r--samples/xilinx_apm/Makefile71
-rw-r--r--samples/xilinx_apm/main.c134
-rw-r--r--samples/xilinx_apm/xaxipmon.c1269
-rw-r--r--samples/xilinx_apm/xaxipmon.h943
-rw-r--r--scripts/Kbuild.include23
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.gcc-plugins2
-rw-r--r--scripts/Makefile.modpost3
-rw-r--r--scripts/asn1_compiler.c2
-rwxr-xr-xscripts/bpf_helpers_doc.py157
-rw-r--r--scripts/extract-cert.c7
-rwxr-xr-xscripts/faddr2line11
-rw-r--r--scripts/gcc-plugins/randomize_layout_plugin.c11
-rw-r--r--scripts/gdb/linux/clk.py2
-rw-r--r--scripts/gdb/linux/genpd.py16
-rw-r--r--scripts/gdb/linux/timerlist.py4
-rw-r--r--scripts/gdb/linux/utils.py5
-rw-r--r--scripts/kconfig/lexer.l7
-rw-r--r--scripts/kconfig/preprocess.c3
-rw-r--r--scripts/kconfig/symbol.c14
-rwxr-xr-xscripts/link-vmlinux.sh9
-rwxr-xr-xscripts/mksysmap2
-rw-r--r--scripts/mod/file2alias.c12
-rw-r--r--scripts/mod/modpost.c26
-rw-r--r--scripts/recordmcount.c6
-rwxr-xr-xscripts/selinux/install_policy.sh2
-rw-r--r--scripts/sign-file.c19
-rwxr-xr-xscripts/tags.sh9
-rwxr-xr-xscripts/tracing/ftrace-bisect.sh34
-rw-r--r--security/apparmor/apparmorfs.c6
-rw-r--r--security/apparmor/audit.c2
-rw-r--r--security/apparmor/domain.c2
-rw-r--r--security/apparmor/include/lib.h5
-rw-r--r--security/apparmor/include/policy.h2
-rw-r--r--security/apparmor/label.c13
-rw-r--r--security/apparmor/lsm.c4
-rw-r--r--security/apparmor/mount.c8
-rw-r--r--security/apparmor/policy.c2
-rw-r--r--security/apparmor/policy_unpack.c6
-rw-r--r--security/commoncap.c6
-rw-r--r--security/device_cgroup.c33
-rw-r--r--security/integrity/digsig.c6
-rw-r--r--security/integrity/evm/evm_main.c2
-rw-r--r--security/integrity/iint.c63
-rw-r--r--security/integrity/ima/Kconfig34
-rw-r--r--security/integrity/ima/ima.h16
-rw-r--r--security/integrity/ima/ima_api.c5
-rw-r--r--security/integrity/ima/ima_main.c24
-rw-r--r--security/integrity/ima/ima_modsig.c3
-rw-r--r--security/integrity/ima/ima_policy.c68
-rw-r--r--security/integrity/ima/ima_template.c9
-rw-r--r--security/integrity/integrity.h2
-rw-r--r--security/integrity/platform_certs/load_uefi.c3
-rw-r--r--security/keys/keyctl.c11
-rw-r--r--security/keys/request_key.c44
-rw-r--r--security/security.c24
-rw-r--r--security/selinux/Makefile8
-rw-r--r--security/selinux/hooks.c35
-rw-r--r--security/selinux/ss/policydb.h2
-rw-r--r--security/smack/smack.h1
-rw-r--r--security/smack/smack_lsm.c66
-rw-r--r--security/smack/smackfs.c2
-rw-r--r--security/tomoyo/Makefile2
-rw-r--r--security/tomoyo/tomoyo.c1
-rw-r--r--sound/Kconfig2
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c7
-rw-r--r--sound/core/Makefile1
-rw-r--r--sound/core/control_compat.c6
-rw-r--r--sound/core/info.c27
-rw-r--r--sound/core/init.c1
-rw-r--r--sound/core/jack.c15
-rw-r--r--sound/core/misc.c94
-rw-r--r--sound/core/oss/pcm_oss.c5
-rw-r--r--sound/core/oss/pcm_plugin.h16
-rw-r--r--sound/core/pcm.c1
-rw-r--r--sound/core/pcm_compat.c8
-rw-r--r--sound/core/pcm_dmaengine.c8
-rw-r--r--sound/core/pcm_memory.c99
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c37
-rw-r--r--sound/core/seq/seq_clientmgr.c12
-rw-r--r--sound/core/seq/seq_memory.c11
-rw-r--r--sound/core/seq/seq_midi.c8
-rw-r--r--sound/core/seq/seq_virmidi.c9
-rw-r--r--sound/core/sound_oss.c13
-rw-r--r--sound/core/timer.c11
-rw-r--r--sound/drivers/aloop.c7
-rw-r--r--sound/drivers/mts64.c3
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c2
-rw-r--r--sound/firewire/digi00x/digi00x-stream.c4
-rw-r--r--sound/firewire/tascam/tascam-stream.c2
-rw-r--r--sound/hda/ext/hdac_ext_stream.c17
-rw-r--r--sound/hda/hdac_device.c2
-rw-r--r--sound/hda/hdac_regmap.c7
-rw-r--r--sound/hda/hdac_stream.c33
-rw-r--r--sound/hda/hdac_sysfs.c4
-rw-r--r--sound/i2c/cs8427.c7
-rw-r--r--sound/isa/sb/sb16_csp.c2
-rw-r--r--sound/pci/ac97/ac97_codec.c6
-rw-r--r--sound/pci/asihpi/hpi6205.c2
-rw-r--r--sound/pci/asihpi/hpioctl.c2
-rw-r--r--sound/pci/au88x0/au88x0.h6
-rw-r--r--sound/pci/au88x0/au88x0_core.c2
-rw-r--r--sound/pci/emu10k1/emufx.c112
-rw-r--r--sound/pci/emu10k1/emupcm.c6
-rw-r--r--sound/pci/hda/hda_beep.c15
-rw-r--r--sound/pci/hda/hda_beep.h1
-rw-r--r--sound/pci/hda/hda_controller.c4
-rw-r--r--sound/pci/hda/hda_generic.c7
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/pci/hda/hda_tegra.c3
-rw-r--r--sound/pci/hda/patch_ca0132.c8
-rw-r--r--sound/pci/hda/patch_cirrus.c1
-rw-r--r--sound/pci/hda/patch_conexant.c32
-rw-r--r--sound/pci/hda/patch_hdmi.c21
-rw-r--r--sound/pci/hda/patch_realtek.c153
-rw-r--r--sound/pci/hda/patch_sigmatel.c13
-rw-r--r--sound/pci/hda/patch_via.c3
-rw-r--r--sound/pci/ice1712/aureon.c2
-rw-r--r--sound/pci/lx6464es/lx_core.c11
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c2
-rw-r--r--sound/soc/atmel/atmel-i2s.c5
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c61
-rw-r--r--sound/soc/codecs/cs35l33.c4
-rw-r--r--sound/soc/codecs/cs35l34.c4
-rw-r--r--sound/soc/codecs/cs42l51-i2c.c6
-rw-r--r--sound/soc/codecs/cs42l51.c7
-rw-r--r--sound/soc/codecs/cs42l51.h1
-rw-r--r--sound/soc/codecs/cs42l56.c6
-rw-r--r--sound/soc/codecs/cs43130.c6
-rw-r--r--sound/soc/codecs/da7210.c2
-rw-r--r--sound/soc/codecs/da7219-aad.c14
-rw-r--r--sound/soc/codecs/es8316.c42
-rw-r--r--sound/soc/codecs/jz4725b.c34
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c46
-rw-r--r--sound/soc/codecs/nau8822.c9
-rw-r--r--sound/soc/codecs/nau8824.c41
-rw-r--r--sound/soc/codecs/pcm512x.c8
-rw-r--r--sound/soc/codecs/rt298.c7
-rw-r--r--sound/soc/codecs/rt5645.c23
-rw-r--r--sound/soc/codecs/rt5665.c2
-rw-r--r--sound/soc/codecs/rt5670.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c1
-rw-r--r--sound/soc/codecs/ssm2602.c15
-rw-r--r--sound/soc/codecs/wcd9335.c83
-rw-r--r--sound/soc/codecs/wm8904.c3
-rw-r--r--sound/soc/codecs/wm8962.c83
-rw-r--r--sound/soc/codecs/wm8994.c5
-rw-r--r--sound/soc/codecs/wm_adsp.c8
-rw-r--r--sound/soc/dwc/dwc-i2s.c4
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c8
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c8
-rw-r--r--sound/soc/fsl/fsl_micfil.c31
-rw-r--r--sound/soc/fsl/fsl_sai.c150
-rw-r--r--sound/soc/fsl/fsl_sai.h60
-rw-r--r--sound/soc/fsl/fsl_spdif.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c4
-rw-r--r--sound/soc/fsl/imx-audmix.c11
-rw-r--r--sound/soc/generic/audio-graph-card.c8
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c7
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c39
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c2
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c9
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c4
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c1
-rw-r--r--sound/soc/intel/skylake/skl.c7
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c2
-rw-r--r--sound/soc/mediatek/common/mtk-btcvsd.c6
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-mt6351.c6
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c20
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c7
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c10
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c9
-rw-r--r--sound/soc/meson/Kconfig4
-rw-r--r--sound/soc/meson/Makefile2
-rw-r--r--sound/soc/meson/axg-spdifin.c49
-rw-r--r--sound/soc/meson/axg-tdm-formatter.c42
-rw-r--r--sound/soc/meson/axg-tdm-interface.c4
-rw-r--r--sound/soc/meson/g12a-tohdmitx.c227
-rw-r--r--sound/soc/meson/meson-codec-glue.c149
-rw-r--r--sound/soc/meson/meson-codec-glue.h32
-rw-r--r--sound/soc/pxa/mmp-pcm.c2
-rw-r--r--sound/soc/pxa/pxa-ssp.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c2
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c1
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c1
-rw-r--r--sound/soc/sh/rcar/ctu.c6
-rw-r--r--sound/soc/sh/rcar/dvc.c6
-rw-r--r--sound/soc/sh/rcar/mix.c6
-rw-r--r--sound/soc/sh/rcar/src.c5
-rw-r--r--sound/soc/sh/rcar/ssi.c4
-rw-r--r--sound/soc/soc-compress.c2
-rw-r--r--sound/soc/soc-core.c17
-rw-r--r--sound/soc/soc-ops.c11
-rw-r--r--sound/soc/soc-pcm.c2
-rw-r--r--sound/soc/soc-utils.c2
-rw-r--r--sound/soc/sof/intel/hda-dai.c8
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c5
-rw-r--r--sound/soc/ti/ams-delta.c4
-rw-r--r--sound/soc/ti/omap-mcbsp.c6
-rw-r--r--sound/soc/xilinx/Kconfig35
-rw-r--r--sound/soc/xilinx/Makefile6
-rw-r--r--sound/soc/xilinx/xilinx-dp-card.c118
-rw-r--r--sound/soc/xilinx/xilinx-dp-codec.c178
-rw-r--r--sound/soc/xilinx/xilinx-dp-pcm.c76
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c224
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c113
-rw-r--r--sound/soc/xilinx/xlnx_pl_snd_card.c448
-rw-r--r--sound/soc/xilinx/xlnx_sdi_audio.c610
-rw-r--r--sound/soc/xilinx/xlnx_snd_common.h23
-rw-r--r--sound/synth/emux/emux.c7
-rw-r--r--sound/synth/emux/emux_nrpn.c3
-rw-r--r--sound/usb/bcd2000/bcd2000.c3
-rw-r--r--sound/usb/caiaq/input.c1
-rw-r--r--sound/usb/endpoint.c8
-rw-r--r--sound/usb/format.c8
-rw-r--r--sound/usb/line6/driver.c3
-rw-r--r--sound/usb/line6/midi.c6
-rw-r--r--sound/usb/line6/midibuf.c25
-rw-r--r--sound/usb/line6/midibuf.h5
-rw-r--r--sound/usb/line6/pod.c3
-rw-r--r--sound/usb/midi.c4
-rw-r--r--sound/usb/quirks-table.h91
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/usb/stream.c7
-rw-r--r--tools/arch/parisc/include/uapi/asm/mman.h12
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h19
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h3
-rw-r--r--tools/arch/x86/include/asm/required-features.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h4
-rw-r--r--tools/bpf/bpftool/btf_dumper.c2
-rw-r--r--tools/bpf/bpftool/json_writer.c3
-rw-r--r--tools/bpf/bpftool/main.c10
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c7
-rw-r--r--tools/build/Makefile.feature2
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-libbfd-buildid.c8
-rw-r--r--tools/build/feature/test-libcrypto.c15
-rw-r--r--tools/iio/iio_generic_buffer.c59
-rw-r--r--tools/iio/iio_utils.c27
-rw-r--r--tools/include/nolibc/nolibc.h4
-rw-r--r--tools/include/uapi/linux/bpf.h7
-rw-r--r--tools/include/uapi/linux/perf_event.h5
-rw-r--r--tools/lib/bpf/libbpf.c9
-rw-r--r--tools/lib/bpf/nlattr.c2
-rw-r--r--tools/lib/subcmd/help.c18
-rw-r--r--tools/objtool/check.c5
-rw-r--r--tools/perf/Makefile.config43
-rw-r--r--tools/perf/bench/bench.h12
-rw-r--r--tools/perf/builtin-record.c25
-rw-r--r--tools/perf/builtin-sched.c2
-rw-r--r--tools/perf/builtin-top.c23
-rw-r--r--tools/perf/builtin-trace.c135
-rw-r--r--tools/perf/perf-completion.sh11
-rw-r--r--tools/perf/pmu-events/Build5
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/other.json4
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pipeline.json2
-rw-r--r--tools/perf/tests/shell/test_uprobe_from_different_cu.sh83
-rw-r--r--tools/perf/trace/beauty/beauty.h3
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/annotate.c10
-rw-r--r--tools/perf/util/auxtrace.c17
-rw-r--r--tools/perf/util/bpf-event.c11
-rw-r--r--tools/perf/util/bpf-event.h19
-rw-r--r--tools/perf/util/data.c2
-rw-r--r--tools/perf/util/dwarf-aux.c25
-rw-r--r--tools/perf/util/env.c90
-rw-r--r--tools/perf/util/env.h10
-rw-r--r--tools/perf/util/evlist.c21
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/genelf.c20
-rw-r--r--tools/perf/util/genelf.h4
-rw-r--r--tools/perf/util/get_current_dir_name.c3
-rw-r--r--tools/perf/util/header.c19
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c2
-rw-r--r--tools/perf/util/intel-pt.c9
-rw-r--r--tools/perf/util/llvm-utils.c25
-rw-r--r--tools/perf/util/sort.c3
-rw-r--r--tools/perf/util/stat-display.c4
-rw-r--r--tools/perf/util/symbol-elf.c38
-rw-r--r--tools/perf/util/thread_map.c2
-rw-r--r--tools/perf/util/top.h2
-rw-r--r--tools/power/cpupower/Makefile8
-rw-r--r--tools/power/cpupower/bench/Makefile2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c31
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
-rwxr-xr-xtools/testing/ktest/ktest.pl49
-rw-r--r--tools/testing/ktest/sample.conf5
-rw-r--r--tools/testing/radix-tree/regression1.c2
-rw-r--r--tools/testing/selftests/Makefile28
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf180.c22
-rw-r--r--tools/testing/selftests/bpf/test_align.c27
-rw-r--r--tools/testing/selftests/bpf/test_btf.c31
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c13
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c36
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c81
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh52
-rw-r--r--tools/testing/selftests/efivarfs/create-read.c2
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh5
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest8
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc15
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance-event.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc13
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc1
-rw-r--r--tools/testing/selftests/futex/functional/Makefile6
-rw-r--r--tools/testing/selftests/intel_pstate/Makefile6
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h8
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h8
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c2
-rw-r--r--tools/testing/selftests/lib.mk5
-rw-r--r--tools/testing/selftests/memfd/fuse_test.c1
-rwxr-xr-xtools/testing/selftests/net/fib_nexthop_multiprefix.sh4
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh30
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh8
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh18
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c2
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh3
-rw-r--r--tools/testing/selftests/net/tls.c19
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh24
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c10
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c36
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_icmp_related.sh36
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c5
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_preempt.c9
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_preempt.c10
-rw-r--r--tools/testing/selftests/proc/proc-uptime-002.c3
-rw-r--r--tools/testing/selftests/ptp/testptp.c6
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk2
-rw-r--r--tools/testing/selftests/rseq/Makefile4
-rw-r--r--tools/testing/selftests/rseq/rseq.c33
-rw-r--r--tools/testing/selftests/sigaltstack/current_stack_pointer.h23
-rw-r--r--tools/testing/selftests/sigaltstack/sas.c7
-rw-r--r--tools/testing/selftests/tc-testing/settings1
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c6
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c2
-rw-r--r--tools/testing/selftests/vm/map_hugetlb.c7
-rw-r--r--tools/thermal/tmon/sysfs.c24
-rw-r--r--tools/thermal/tmon/tmon.h3
-rw-r--r--tools/virtio/linux/bug.h8
-rw-r--r--tools/virtio/linux/build_bug.h7
-rw-r--r--tools/virtio/linux/cpumask.h7
-rw-r--r--tools/virtio/linux/gfp.h7
-rw-r--r--tools/virtio/linux/kernel.h1
-rw-r--r--tools/virtio/linux/kmsan.h12
-rw-r--r--tools/virtio/linux/scatterlist.h1
-rw-r--r--tools/virtio/linux/topology.h7
-rw-r--r--tools/vm/slabinfo-gnuplot.sh4
-rw-r--r--tools/vm/slabinfo.c32
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c15
-rw-r--r--virt/kvm/coalesced_mmio.c8
-rw-r--r--virt/kvm/kvm_main.c15
5585 files changed, 215733 insertions, 54812 deletions
diff --git a/Documentation/ABI/stable/sysfs-firmware-zynqmp b/Documentation/ABI/stable/sysfs-firmware-zynqmp
new file mode 100644
index 000000000000..eeae291a048c
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-firmware-zynqmp
@@ -0,0 +1,103 @@
+What: /sys/firmware/zynqmp/ggs*
+Date: January 2018
+KernelVersion: 4.15.0
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ Read/Write PMU global general storage register value,
+ GLOBAL_GEN_STORAGE{0:3}.
+ Global general storage register that can be used
+ by system to pass information between masters.
+
+ The register is reset during system or power-on
+ resets. Three registers are used by the FSBL and
+ other Xilinx software products: GLOBAL_GEN_STORAGE{4:6}.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/ggs0
+ # echo <mask> <value> > /sys/firmware/zynqmp/ggs0
+
+ Example:
+ # cat /sys/firmware/zynqmp/ggs0
+ # echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/ggs0
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/pggs*
+Date: January 2018
+KernelVersion: 4.15.0
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ Read/Write PMU persistent global general storage register
+ value, PERS_GLOB_GEN_STORAGE{0:3}.
+ Persistent global general storage register that
+ can be used by system to pass information between
+ masters.
+
+ This register is only reset by the power-on reset
+ and maintains its value through a system reset.
+ Four registers are used by the FSBL and other Xilinx
+ software products: PERS_GLOB_GEN_STORAGE{4:7}.
+ Register is reset only by a POR reset.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/pggs0
+ # echo <mask> <value> > /sys/firmware/zynqmp/pggs0
+
+ Example:
+ # cat /sys/firmware/zynqmp/pggs0
+ # echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/pggs0
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/shutdown_scope
+Date: February 2018
+KernelVersion: 4.15.6
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ This sysfs interface allows to set the shutdown scope for the
+ next shutdown request. When the next shutdown is performed, the
+ platform specific portion of PSCI-system_off can use the chosen
+ shutdown scope.
+
+ Following are available shutdown scopes(subtypes):
+
+ subsystem: Only the APU along with all of its peripherals
+ not used by other processing units will be
+ shut down. This may result in the FPD power
+ domain being shut down provided that no other
+ processing unit uses FPD peripherals or DRAM.
+ ps_only: The complete PS will be shut down, including the
+ RPU, PMU, etc. Only the PL domain (FPGA)
+ remains untouched.
+ system: The complete system/device is shut down.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/shutdown_scope
+ # echo <scope> > /sys/firmware/zynqmp/shutdown_scope
+
+ Example:
+ # cat /sys/firmware/zynqmp/shutdown_scope
+ # echo "subsystem" > /sys/firmware/zynqmp/shutdown_scope
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/health_status
+Date: April 2018
+KernelVersion: 4.14.0
+Contact: "Rajan Vaja" <rajanv@xilinx.com>
+Description:
+ This sysfs interface allows to set the health status. If PMUFW
+ is compiled with CHECK_HEALTHY_BOOT, it will check the healthy
+ bit on FPD WDT expiration. If healthy bit is set by a user
+ application running in Linux, PMUFW will do APU only restart. If
+ healthy bit is not set during FPD WDT expiration, PMUFW will do
+ system restart.
+
+ Usage:
+ Set healty bit
+ # echo 1 > /sys/firmware/zynqmp/health_status
+
+ Unset healty bit
+ # echo 0 > /sys/firmware/zynqmp/health_status
+
+Users: Xilinx
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index c3767d4d01a6..4d873e813949 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -138,7 +138,7 @@ Description:
Raw capacitance measurement from channel Y. Units after
application of scale and offset are nanofarads.
-What: /sys/.../iio:deviceX/in_capacitanceY-in_capacitanceZ_raw
+What: /sys/.../iio:deviceX/in_capacitanceY-capacitanceZ_raw
KernelVersion: 3.2
Contact: linux-iio@vger.kernel.org
Description:
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-bridge b/Documentation/ABI/testing/sysfs-class-fpga-bridge
index 312ae2c579d8..676700d7a61f 100644
--- a/Documentation/ABI/testing/sysfs-class-fpga-bridge
+++ b/Documentation/ABI/testing/sysfs-class-fpga-bridge
@@ -9,3 +9,12 @@ Date: January 2016
KernelVersion: 4.5
Contact: Alan Tull <atull@opensource.altera.com>
Description: Show bridge state as "enabled" or "disabled"
+
+What: /sys/class/fpga_bridge/<bridge>/set
+Date: January 2017
+KernelVersion: 4.9
+Contact: Michal Simek <michal.simek@xilinx.com>
+Description: Manual set bridge state (0-disable, !0 enable).
+ Enabling this option requires that the module is
+ compiled with #define DEBUG which is enabled by default
+ when CONFIG_DEBUG_KERNEL is setup.
diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
index 978b76358661..40d5aab8452d 100644
--- a/Documentation/ABI/testing/sysfs-class-net-queues
+++ b/Documentation/ABI/testing/sysfs-class-net-queues
@@ -1,4 +1,4 @@
-What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
+What: /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
Date: March 2010
KernelVersion: 2.6.35
Contact: netdev@vger.kernel.org
@@ -8,7 +8,7 @@ Description:
network device queue. Possible values depend on the number
of available CPU(s) in the system.
-What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
+What: /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
Date: April 2010
KernelVersion: 2.6.35
Contact: netdev@vger.kernel.org
@@ -16,7 +16,7 @@ Description:
Number of Receive Packet Steering flows being currently
processed by this particular network device receive queue.
-What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
+What: /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -24,7 +24,7 @@ Description:
Indicates the number of transmit timeout events seen by this
network interface transmit queue.
-What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
+What: /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
Date: March 2015
KernelVersion: 4.1
Contact: netdev@vger.kernel.org
@@ -32,7 +32,7 @@ Description:
A Mbps max-rate set for the queue, a value of zero means disabled,
default is disabled.
-What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
+What: /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
Date: November 2010
KernelVersion: 2.6.38
Contact: netdev@vger.kernel.org
@@ -42,7 +42,7 @@ Description:
network device transmit queue. Possible vaules depend on the
number of available CPU(s) in the system.
-What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
+What: /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
Date: June 2018
KernelVersion: 4.18.0
Contact: netdev@vger.kernel.org
@@ -53,7 +53,7 @@ Description:
number of available receive queue(s) in the network device.
Default is disabled.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -62,7 +62,7 @@ Description:
of this particular network device transmit queue.
Default value is 1000.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -70,7 +70,7 @@ Description:
Indicates the number of bytes (objects) in flight on this
network device transmit queue.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -79,7 +79,7 @@ Description:
on this network device transmit queue. This value is clamped
to be within the bounds defined by limit_max and limit_min.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -88,7 +88,7 @@ Description:
queued on this network device transmit queue. See
include/linux/dynamic_queue_limits.h for the default value.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 726ac2e01b77..08e153614e09 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -480,16 +480,17 @@ Description: information about CPUs heterogeneity.
cpu_capacity: capacity of cpu#.
What: /sys/devices/system/cpu/vulnerabilities
+ /sys/devices/system/cpu/vulnerabilities/gather_data_sampling
+ /sys/devices/system/cpu/vulnerabilities/itlb_multihit
+ /sys/devices/system/cpu/vulnerabilities/l1tf
+ /sys/devices/system/cpu/vulnerabilities/mds
/sys/devices/system/cpu/vulnerabilities/meltdown
+ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2
- /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
- /sys/devices/system/cpu/vulnerabilities/l1tf
- /sys/devices/system/cpu/vulnerabilities/mds
/sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
- /sys/devices/system/cpu/vulnerabilities/itlb_multihit
- /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
diff --git a/Documentation/ABI/testing/sysfs-driver-cortexa53-edac b/Documentation/ABI/testing/sysfs-driver-cortexa53-edac
new file mode 100644
index 000000000000..87ed5ca3af22
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-cortexa53-edac
@@ -0,0 +1,10 @@
+What: /sys/devices/system/edac/cpu_cache/inject_(L1/L2)_Cache_Error
+Date: June 2016
+Contact: nagasure@xilinx.com
+ punnaia@xilinx.com
+Description: This control file allows to inject cache errors on cortexa53
+ L1 and L2 caches. arm provided error injection for cortexa53
+ caches (L1 and L2). Just echo 1 > /sys/devices/system/edac/
+ cpu_cache/inject_L1_Error for L1 cache error injection and
+ echo 1 > /sys/devices/system/edac/cpu_cache/inject_L2_Error
+ for L2 cache error injection.
diff --git a/Documentation/ABI/testing/sysfs-kernel-oops_count b/Documentation/ABI/testing/sysfs-kernel-oops_count
new file mode 100644
index 000000000000..156cca9dbc96
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-oops_count
@@ -0,0 +1,6 @@
+What: /sys/kernel/oops_count
+Date: November 2022
+KernelVersion: 6.2.0
+Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
+Description:
+ Shows how many times the system has Oopsed since last boot.
diff --git a/Documentation/ABI/testing/sysfs-kernel-warn_count b/Documentation/ABI/testing/sysfs-kernel-warn_count
new file mode 100644
index 000000000000..90a029813717
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-warn_count
@@ -0,0 +1,6 @@
+What: /sys/kernel/warn_count
+Date: November 2022
+KernelVersion: 6.2.0
+Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
+Description:
+ Shows how many times the system has Warned since last boot.
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 0ae4f564c2d6..6ec63c239616 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -82,6 +82,8 @@ Brief summary of control files.
memory.swappiness set/show swappiness parameter of vmscan
(See sysctl's vm.swappiness)
memory.move_charge_at_immigrate set/show controls of moving charges
+ This knob is deprecated and shouldn't be
+ used.
memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa
node
@@ -745,8 +747,15 @@ NOTE2:
It is recommended to set the soft limit always below the hard limit,
otherwise the hard limit will take precedence.
-8. Move charges at task migration
-=================================
+8. Move charges at task migration (DEPRECATED!)
+===============================================
+
+THIS IS DEPRECATED!
+
+It's expensive and unreliable! It's better practice to launch workload
+tasks directly from inside their target cgroup. Use dedicated workload
+cgroups to allow fine-grained policy adjustments without having to
+move physical pages between control domains.
Users can move charges associated with a task along with task migration, that
is, uncharge task's pages from the old cgroup and charge them to the new cgroup.
diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
new file mode 100644
index 000000000000..264bfa937f7d
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
@@ -0,0 +1,109 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+GDS - Gather Data Sampling
+==========================
+
+Gather Data Sampling is a hardware vulnerability which allows unprivileged
+speculative access to data which was previously stored in vector registers.
+
+Problem
+-------
+When a gather instruction performs loads from memory, different data elements
+are merged into the destination vector register. However, when a gather
+instruction that is transiently executed encounters a fault, stale data from
+architectural or internal vector registers may get transiently forwarded to the
+destination vector register instead. This will allow a malicious attacker to
+infer stale data using typical side channel techniques like cache timing
+attacks. GDS is a purely sampling-based attack.
+
+The attacker uses gather instructions to infer the stale vector register data.
+The victim does not need to do anything special other than use the vector
+registers. The victim does not need to use gather instructions to be
+vulnerable.
+
+Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks
+are possible.
+
+Attack scenarios
+----------------
+Without mitigation, GDS can infer stale data across virtually all
+permission boundaries:
+
+ Non-enclaves can infer SGX enclave data
+ Userspace can infer kernel data
+ Guests can infer data from hosts
+ Guest can infer guest from other guests
+ Users can infer data from other users
+
+Because of this, it is important to ensure that the mitigation stays enabled in
+lower-privilege contexts like guests and when running outside SGX enclaves.
+
+The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure
+that guests are not allowed to disable the GDS mitigation. If a host erred and
+allowed this, a guest could theoretically disable GDS mitigation, mount an
+attack, and re-enable it.
+
+Mitigation mechanism
+--------------------
+This issue is mitigated in microcode. The microcode defines the following new
+bits:
+
+ ================================ === ============================
+ IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability
+ and mitigation support.
+ IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable.
+ IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation
+ 0 by default.
+ IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes
+ to GDS_MITG_DIS are ignored
+ Can't be cleared once set.
+ ================================ === ============================
+
+GDS can also be mitigated on systems that don't have updated microcode by
+disabling AVX. This can be done by setting gather_data_sampling="force" or
+"clearcpuid=avx" on the kernel command-line.
+
+If used, these options will disable AVX use by turning off XSAVE YMM support.
+However, the processor will still enumerate AVX support. Userspace that
+does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM
+support will break.
+
+Mitigation control on the kernel command line
+---------------------------------------------
+The mitigation can be disabled by setting "gather_data_sampling=off" or
+"mitigations=off" on the kernel command line. Not specifying either will default
+to the mitigation being enabled. Specifying "gather_data_sampling=force" will
+use the microcode mitigation when available or disable AVX on affected systems
+where the microcode hasn't been updated to include the mitigation.
+
+GDS System Information
+------------------------
+The kernel provides vulnerability status information through sysfs. For
+GDS this can be accessed by the following sysfs file:
+
+/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
+
+The possible values contained in this file are:
+
+ ============================== =============================================
+ Not affected Processor not vulnerable.
+ Vulnerable Processor vulnerable and mitigation disabled.
+ Vulnerable: No microcode Processor vulnerable and microcode is missing
+ mitigation.
+ Mitigation: AVX disabled,
+ no microcode Processor is vulnerable and microcode is missing
+ mitigation. AVX disabled as mitigation.
+ Mitigation: Microcode Processor is vulnerable and mitigation is in
+ effect.
+ Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in
+ effect and cannot be disabled.
+ Unknown: Dependent on
+ hypervisor status Running on a virtual guest processor that is
+ affected but with no way to know if host
+ processor is mitigated or vulnerable.
+ ============================== =============================================
+
+GDS Default mitigation
+----------------------
+The updated microcode will enable the mitigation by default. The kernel's
+default action is to leave the mitigation enabled.
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index 2adec1e6520a..245468b0f2be 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -16,3 +16,4 @@ are configurable at compile, boot or run time.
multihit.rst
special-register-buffer-data-sampling.rst
processor_mmio_stale_data.rst
+ gather_data_sampling.rst
diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
index 9393c50b5afc..c98fd11907cc 100644
--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
@@ -230,6 +230,20 @@ The possible values in this file are:
* - 'Mitigation: Clear CPU buffers'
- The processor is vulnerable and the CPU buffer clearing mitigation is
enabled.
+ * - 'Unknown: No mitigations'
+ - The processor vulnerability status is unknown because it is
+ out of Servicing period. Mitigation is not attempted.
+
+Definitions:
+------------
+
+Servicing period: The process of providing functional and security updates to
+Intel processors or platforms, utilizing the Intel Platform Update (IPU)
+process or other similar mechanisms.
+
+End of Servicing Updates (ESU): ESU is the date at which Intel will no
+longer provide Servicing, such as through IPU or other similar update
+processes. ESU dates will typically be aligned to end of quarter.
If the processor is vulnerable then the following information is appended to
the above information:
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 6bd97cd50d62..0fba3758d0da 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -422,6 +422,14 @@ The possible values in this file are:
'RSB filling' Protection of RSB on context switch enabled
============= ===========================================
+ - EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:
+
+ =========================== =======================================================
+ 'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled
+ 'PBRSB-eIBRS: Vulnerable' CPU is vulnerable
+ 'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
+ =========================== =======================================================
+
Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
report vulnerability.
@@ -471,8 +479,16 @@ Spectre variant 2
On Intel Skylake-era systems the mitigation covers most, but not all,
cases. See :ref:`[3] <spec_ref3>` for more details.
- On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced
- IBRS on x86), retpoline is automatically disabled at run time.
+ On CPUs with hardware mitigation for Spectre variant 2 (e.g. IBRS
+ or enhanced IBRS on x86), retpoline is automatically disabled at run time.
+
+ Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
+ boot, by setting the IBRS bit, and they're automatically protected against
+ Spectre v2 variant attacks, including cross-thread branch target injections
+ on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
+
+ Legacy IBRS systems clear the IBRS bit on exit to userspace and
+ therefore explicitly enable STIBP for that
The retpoline mitigation is turned on by default on vulnerable
CPUs. It can be forced on or off by the administrator
@@ -496,9 +512,12 @@ Spectre variant 2
For Spectre variant 2 mitigation, individual user programs
can be compiled with return trampolines for indirect branches.
This protects them from consuming poisoned entries in the branch
- target buffer left by malicious software. Alternatively, the
- programs can disable their indirect branch speculation via prctl()
- (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
+ target buffer left by malicious software.
+
+ On legacy IBRS systems, at return to userspace, implicit STIBP is disabled
+ because the kernel clears the IBRS bit. In this case, the userspace programs
+ can disable indirect branch speculation via prctl() (See
+ :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
On x86, this will turn on STIBP to guard against attacks from the
sibling thread when the user program is running, and use IBPB to
flush the branch target buffer when switching to/from the program.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2b35a2836d92..8edab786a6f7 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -825,10 +825,6 @@
debugpat [X86] Enable PAT debugging
- decnet.addr= [HW,NET]
- Format: <area>[,<node>]
- See also Documentation/networking/decnet.txt.
-
default_hugepagesz=
[same as hugepagesz=] The size of the default
HugeTLB page size. This is the size represented by
@@ -1340,6 +1336,26 @@
Format: off | on
default: on
+ gather_data_sampling=
+ [X86,INTEL] Control the Gather Data Sampling (GDS)
+ mitigation.
+
+ Gather Data Sampling is a hardware vulnerability which
+ allows unprivileged speculative access to data which was
+ previously stored in vector registers.
+
+ This issue is mitigated by default in updated microcode.
+ The mitigation may have a performance impact but can be
+ disabled. On systems without the microcode mitigation
+ disabling AVX serves as a mitigation.
+
+ force: Disable AVX to mitigate systems without
+ microcode mitigation. No effect if the microcode
+ mitigation is present. Known to cause crashes in
+ userspace with buggy AVX enumeration.
+
+ off: Disable GDS mitigation.
+
gcov_persist= [GCOV] When non-zero (default), profiling data for
kernel modules is saved and remains accessible via
debugfs, even when the module is unloaded/reloaded.
@@ -1944,24 +1960,57 @@
ivrs_ioapic [HW,X86_64]
Provide an override to the IOAPIC-ID<->DEVICE-ID
- mapping provided in the IVRS ACPI table. For
- example, to map IOAPIC-ID decimal 10 to
- PCI device 00:14.0 write the parameter as:
+ mapping provided in the IVRS ACPI table.
+ By default, PCI segment is 0, and can be omitted.
+
+ For example, to map IOAPIC-ID decimal 10 to
+ PCI segment 0x1 and PCI device 00:14.0,
+ write the parameter as:
+ ivrs_ioapic=10@0001:00:14.0
+
+ Deprecated formats:
+ * To map IOAPIC-ID decimal 10 to PCI device 00:14.0
+ write the parameter as:
ivrs_ioapic[10]=00:14.0
+ * To map IOAPIC-ID decimal 10 to PCI segment 0x1 and
+ PCI device 00:14.0 write the parameter as:
+ ivrs_ioapic[10]=0001:00:14.0
ivrs_hpet [HW,X86_64]
Provide an override to the HPET-ID<->DEVICE-ID
- mapping provided in the IVRS ACPI table. For
- example, to map HPET-ID decimal 0 to
- PCI device 00:14.0 write the parameter as:
+ mapping provided in the IVRS ACPI table.
+ By default, PCI segment is 0, and can be omitted.
+
+ For example, to map HPET-ID decimal 10 to
+ PCI segment 0x1 and PCI device 00:14.0,
+ write the parameter as:
+ ivrs_hpet=10@0001:00:14.0
+
+ Deprecated formats:
+ * To map HPET-ID decimal 0 to PCI device 00:14.0
+ write the parameter as:
ivrs_hpet[0]=00:14.0
+ * To map HPET-ID decimal 10 to PCI segment 0x1 and
+ PCI device 00:14.0 write the parameter as:
+ ivrs_ioapic[10]=0001:00:14.0
ivrs_acpihid [HW,X86_64]
Provide an override to the ACPI-HID:UID<->DEVICE-ID
- mapping provided in the IVRS ACPI table. For
- example, to map UART-HID:UID AMD0020:0 to
- PCI device 00:14.5 write the parameter as:
+ mapping provided in the IVRS ACPI table.
+ By default, PCI segment is 0, and can be omitted.
+
+ For example, to map UART-HID:UID AMD0020:0 to
+ PCI segment 0x1 and PCI device ID 00:14.5,
+ write the parameter as:
+ ivrs_acpihid=AMD0020:0@0001:00:14.5
+
+ Deprecated formats:
+ * To map UART-HID:UID AMD0020:0 to PCI segment is 0,
+ PCI device ID 00:14.5, write the parameter as:
ivrs_acpihid[00:14.5]=AMD0020:0
+ * To map UART-HID:UID AMD0020:0 to PCI segment 0x1 and
+ PCI device ID 00:14.5, write the parameter as:
+ ivrs_acpihid[0001:00:14.5]=AMD0020:0
js= [HW,JOY] Analog joystick
See Documentation/input/joydev/joystick.rst.
@@ -2667,21 +2716,22 @@
Disable all optional CPU mitigations. This
improves system performance, but it may also
expose users to several CPU vulnerabilities.
- Equivalent to: nopti [X86,PPC]
+ Equivalent to: gather_data_sampling=off [X86]
kpti=0 [ARM64]
- nospectre_v1 [X86,PPC]
+ kvm.nx_huge_pages=off [X86]
+ l1tf=off [X86]
+ mds=off [X86]
+ mmio_stale_data=off [X86]
+ no_entry_flush [PPC]
+ no_uaccess_flush [PPC]
nobp=0 [S390]
+ nopti [X86,PPC]
+ nospectre_v1 [X86,PPC]
nospectre_v2 [X86,PPC,S390,ARM64]
- spectre_v2_user=off [X86]
spec_store_bypass_disable=off [X86,PPC]
+ spectre_v2_user=off [X86]
ssbd=force-off [ARM64]
- l1tf=off [X86]
- mds=off [X86]
tsx_async_abort=off [X86]
- kvm.nx_huge_pages=off [X86]
- no_entry_flush [PPC]
- no_uaccess_flush [PPC]
- mmio_stale_data=off [X86]
Exceptions:
This does not have any effect on
@@ -4298,6 +4348,18 @@
retain_initrd [RAM] Keep initrd memory after extraction
+ retbleed= [X86] Control mitigation of RETBleed (Arbitrary
+ Speculative Code Execution with Return Instructions)
+ vulnerability.
+
+ off - unconditionally disable
+ auto - automatically select a migitation
+
+ Selecting 'auto' will choose a mitigation method at run
+ time according to the CPU.
+
+ Not specifying this option is equivalent to retbleed=auto.
+
rfkill.default_state=
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
etc. communication is blocked by default.
@@ -4541,6 +4603,7 @@
eibrs - enhanced IBRS
eibrs,retpoline - enhanced IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE
+ ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to
spectre_v2=auto.
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index e70b365dbc60..80cf2ef2a506 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -676,8 +676,8 @@ the ``menu`` governor to be used on the systems that use the ``ladder`` governor
by default this way, for example.
The other kernel command line parameters controlling CPU idle time management
-described below are only relevant for the *x86* architecture and some of
-them affect Intel processors only.
+described below are only relevant for the *x86* architecture and references
+to ``intel_idle`` affect Intel processors only.
The *x86* architecture support code recognizes three kernel command line
options related to CPU idle time management: ``idle=poll``, ``idle=halt``,
@@ -699,10 +699,13 @@ idle, so it very well may hurt single-thread computations performance as well as
energy-efficiency. Thus using it for performance reasons may not be a good idea
at all.]
-The ``idle=nomwait`` option disables the ``intel_idle`` driver and causes
-``acpi_idle`` to be used (as long as all of the information needed by it is
-there in the system's ACPI tables), but it is not allowed to use the
-``MWAIT`` instruction of the CPUs to ask the hardware to enter idle states.
+The ``idle=nomwait`` option prevents the use of ``MWAIT`` instruction of
+the CPU to enter idle states. When this option is used, the ``acpi_idle``
+driver will use the ``HLT`` instruction instead of ``MWAIT``. On systems
+running Intel processors, this option disables the ``intel_idle`` driver
+and forces the use of the ``acpi_idle`` driver instead. Note that in either
+case, ``acpi_idle`` driver will function only if all the information needed
+by it is in the system's ACPI tables.
In addition to the architecture-level kernel command line options affecting CPU
idle time management, there are parameters affecting individual ``CPUIdle``
diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst
index dcd6c93c7aac..521acda367ae 100644
--- a/Documentation/admin-guide/security-bugs.rst
+++ b/Documentation/admin-guide/security-bugs.rst
@@ -56,31 +56,28 @@ information submitted to the security list and any followup discussions
of the report are treated confidentially even after the embargo has been
lifted, in perpetuity.
-Coordination
-------------
-
-Fixes for sensitive bugs, such as those that might lead to privilege
-escalations, may need to be coordinated with the private
-<linux-distros@vs.openwall.org> mailing list so that distribution vendors
-are well prepared to issue a fixed kernel upon public disclosure of the
-upstream fix. Distros will need some time to test the proposed patch and
-will generally request at least a few days of embargo, and vendor update
-publication prefers to happen Tuesday through Thursday. When appropriate,
-the security team can assist with this coordination, or the reporter can
-include linux-distros from the start. In this case, remember to prefix
-the email Subject line with "[vs]" as described in the linux-distros wiki:
-<http://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists>
+Coordination with other groups
+------------------------------
+
+The kernel security team strongly recommends that reporters of potential
+security issues NEVER contact the "linux-distros" mailing list until
+AFTER discussing it with the kernel security team. Do not Cc: both
+lists at once. You may contact the linux-distros mailing list after a
+fix has been agreed on and you fully understand the requirements that
+doing so will impose on you and the kernel community.
+
+The different lists have different goals and the linux-distros rules do
+not contribute to actually fixing any potential security problems.
CVE assignment
--------------
-The security team does not normally assign CVEs, nor do we require them
-for reports or fixes, as this can needlessly complicate the process and
-may delay the bug handling. If a reporter wishes to have a CVE identifier
-assigned ahead of public disclosure, they will need to contact the private
-linux-distros list, described above. When such a CVE identifier is known
-before a patch is provided, it is desirable to mention it in the commit
-message if the reporter agrees.
+The security team does not assign CVEs, nor do we require them for
+reports or fixes, as this can needlessly complicate the process and may
+delay the bug handling. If a reporter wishes to have a CVE identifier
+assigned, they should find one by themselves, for example by contacting
+MITRE directly. However under no circumstances will a patch inclusion
+be delayed to wait for a CVE identifier to arrive.
Non-disclosure agreements
-------------------------
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 9715685be6e3..568c24ff00a7 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -557,6 +557,15 @@ numa_balancing_scan_size_mb is how many megabytes worth of pages are
scanned for a given scan.
+oops_limit
+==========
+
+Number of kernel oopses after which the kernel should panic when
+``panic_on_oops`` is not set. Setting this to 0 disables checking
+the count. Setting this to 1 has the same effect as setting
+``panic_on_oops=1``. The default value is 10000.
+
+
osrelease, ostype & version:
============================
@@ -1177,6 +1186,16 @@ entry will default to 2 instead of 0.
2 Unprivileged calls to ``bpf()`` are disabled
= =============================================================
+
+warn_limit
+==========
+
+Number of kernel warnings after which the kernel should panic when
+``panic_on_warn`` is not set. Setting this to 0 disables checking
+the warning count. Setting this to 1 has the same effect as setting
+``panic_on_warn=1``. The default value is 0.
+
+
watchdog:
=========
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 287b98708a40..70bab788fca3 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -31,17 +31,18 @@ see only some of them, depending on your kernel's configuration.
Table : Subdirectories in /proc/sys/net
- ========= =================== = ========== ==================
+ ========= =================== = ========== ===================
Directory Content Directory Content
- ========= =================== = ========== ==================
- core General parameter appletalk Appletalk protocol
- unix Unix domain sockets netrom NET/ROM
- 802 E802 protocol ax25 AX25
- ethernet Ethernet protocol rose X.25 PLP layer
+ ========= =================== = ========== ===================
+ 802 E802 protocol mptcp Multipath TCP
+ appletalk Appletalk protocol netfilter Network Filter
+ ax25 AX25 netrom NET/ROM
+ bridge Bridging rose X.25 PLP layer
+ core General parameter tipc TIPC
+ ethernet Ethernet protocol unix Unix domain sockets
ipv4 IP version 4 x25 X.25 protocol
- bridge Bridging decnet DEC net
- ipv6 IP version 6 tipc TIPC
- ========= =================== = ========== ==================
+ ipv6 IP version 6
+ ========= =================== = ========== ===================
1. /proc/sys/net/core - Network core options
============================================
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 64aeee1009ca..fdc9c99437d1 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -61,6 +61,7 @@ Currently, these files are in /proc/sys/vm:
- overcommit_memory
- overcommit_ratio
- page-cluster
+- page_lock_unfairness
- panic_on_oom
- percpu_pagelist_fraction
- stat_interval
@@ -741,6 +742,14 @@ extra faults and I/O delays for following faults if they would have been part of
that consecutive pages readahead would have brought in.
+page_lock_unfairness
+====================
+
+This value determines the number of times that the page lock can be
+stolen from under a waiter. After the lock is stolen the number of times
+specified in this file (default is 5), the "fair lock handoff" semantics
+will apply, and the waiter will only be awakened if the lock can be taken.
+
panic_on_oom
============
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 59daa4c21816..6b70b6aabcff 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -70,8 +70,12 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A72 | #853709 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
@@ -130,6 +134,9 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| Hisilicon | Hip08 SMMU PMCG | #162001900 | N/A |
+| | Hip09 SMMU PMCG | | |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt
index 093cdaefdb37..d8b101c97031 100644
--- a/Documentation/atomic_bitops.txt
+++ b/Documentation/atomic_bitops.txt
@@ -59,7 +59,7 @@ Like with atomic_t, the rule of thumb is:
- RMW operations that have a return value are fully ordered.
- RMW operations that are conditional are unordered on FAILURE,
- otherwise the above rules apply. In the case of test_and_{}_bit() operations,
+ otherwise the above rules apply. In the case of test_and_set_bit_lock(),
if the bit in memory is unchanged by the operation then it is deemed to have
failed.
diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst
index 19df79286f00..afe4bc206486 100644
--- a/Documentation/dev-tools/gdb-kernel-debugging.rst
+++ b/Documentation/dev-tools/gdb-kernel-debugging.rst
@@ -39,6 +39,10 @@ Setup
this mode. In this case, you should build the kernel with
CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR.
+- Build the gdb scripts (required on kernels v5.1 and above)::
+
+ make scripts_gdb
+
- Enable the gdb stub of QEMU/KVM, either
- at VM startup time by appending "-s" to the QEMU command line
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index e39d8f02e33c..3de6182cde97 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -112,8 +112,8 @@ properties:
- const: qcom,msm8974
- items:
- - const: qcom,msm8916-mtp/1
- const: qcom,msm8916-mtp
+ - const: qcom,msm8916-mtp/1
- const: qcom,msm8916
- items:
diff --git a/Documentation/devicetree/bindings/arm/xilinx.yaml b/Documentation/devicetree/bindings/arm/xilinx.yaml
index c73b1f5c7f49..696cf7d9dbdd 100644
--- a/Documentation/devicetree/bindings/arm/xilinx.yaml
+++ b/Documentation/devicetree/bindings/arm/xilinx.yaml
@@ -58,10 +58,16 @@ properties:
- const: xlnx,zynqmp-zc1254
- const: xlnx,zynqmp
- - description: Xilinx internal board zc1275
+ - description: Xilinx RFSoC chracterization kit zcu1275
items:
- - const: xlnx,zynqmp-zc1275-revA
- - const: xlnx,zynqmp-zc1275
+ - const: xlnx,zynqmp-zcu1275-revA
+ - const: xlnx,zynqmp-zcu1275
+ - const: xlnx,zynqmp
+
+ - description: Xilinx RFSoC chracterization kit zcu1285
+ items:
+ - const: xlnx,zynqmp-zcu1285-revA
+ - const: xlnx,zynqmp-zcu1285
- const: xlnx,zynqmp
- description: Xilinx 96boards compatible board zcu100
diff --git a/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt b/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt
new file mode 100644
index 000000000000..39817e9750c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt
@@ -0,0 +1,15 @@
+Device tree bindings for Zynq's eFuse Controller
+
+The Zynq eFuse controller provides the access to the chip efuses which contain
+information about device DNA, security settings and also device status.
+
+Required properties:
+ compatible: Compatibility string. Must be "xlnx,zynq-efuse".
+ reg: Specify the base and size of the EFUSE controller registers
+ in the memory map. E.g.: reg = <0xf800d000 0x20>;
+
+Example:
+efuse: efuse@f800d000 {
+ compatible = "xlnx,zynq-efuse";
+ reg = <0xf800d000 0x20>;
+};
diff --git a/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt b/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt
new file mode 100644
index 000000000000..b6dbf05b4eb5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt
@@ -0,0 +1,17 @@
+Device tree bindings for Zynq's OCM Controller
+
+The OCM is divided to 4 64kB segments which can be separately configured
+to low or high location. Location is controlled via SLCR.
+
+Required properties:
+ compatible: Compatibility string. Must be "xlnx,zynq-ocmc-1.0".
+ reg: Specify the base and size of the OCM controller registers
+ in the memory map. E.g.: reg = <0xf800c000 0x1000>;
+
+Example:
+ocmc: ocmc@f800c000 {
+ compatible = "xlnx,zynq-ocmc-1.0";
+ interrupt-parent = <&intc>;
+ interrupts = <0 3 4>;
+ reg = <0xf800c000 0x1000>;
+} ;
diff --git a/Documentation/devicetree/bindings/ata/ahci-ceva.txt b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
index 7561cc4de371..d34f11771d5f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-ceva.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
@@ -38,6 +38,8 @@ Required properties:
Optional properties:
- ceva,broken-gen2: limit to gen1 speed instead of gen2.
+ - dma-coherent: Enable this flag if CCI is enabled in design.
+ Adding this flag configures AXI cache control register.
Examples:
ahci@fd0c0000 {
@@ -56,4 +58,5 @@ Examples:
ceva,p1-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x0216 0x7F06>;
ceva,broken-gen2;
+ dma-coherent;
};
diff --git a/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt b/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt
new file mode 100644
index 000000000000..8b52017cf1c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt
@@ -0,0 +1,156 @@
+Binding for IDT 8T49N24x Universal Frequency Translator
+
+The 8T49N241 has one fractional-feedback PLL that can be used as a
+jitter attenuator and frequency translator. It is equipped with one
+integer and three fractional output dividers, allowing the generation
+of up to four different output frequencies, ranging from 8kHz to 1GHz.
+These frequencies are completely independent of each other, the input
+reference frequencies and the crystal reference frequency. The device
+places virtually no constraints on input to output frequency conversion,
+supporting all FEC rates, including the new revision of ITU-T
+Recommendation G.709 (2009), most with 0ppm conversion error.
+The outputs may select among LVPECL, LVDS, HCSL or LVCMOS output levels.
+
+The driver can read a full register map from the DT, and will use that
+register map to initialize the attached part (via I2C) when the system
+boots. Any configuration not supported by the common clock framework
+must be done via the full register map, including optimized settings.
+
+The 8T49N241 accepts up to two differential or single-ended input clocks
+and a fundamental-mode crystal input. The internal PLL can lock to either
+of the input reference clocks or just to the crystal to behave as a
+frequency synthesizer. The PLL can use the second input for redundant
+backup of the primary input reference, but in this case, both input clock
+references must be related in frequency.
+
+All outputs are currently assumed to be LVDS, unless overridden in the
+full register map in the DT.
+
+==I2C device node==
+
+Required properties:
+- compatible: shall be one of "idt,idt8t49n241"
+- reg: i2c device address, shall be one of 0x7C, 0x6C, 0x7D, 0x6D,
+ 0x7E, 0x6E, 0x7F, 0x6F.
+- #clock-cells: From common clock bindings: Shall be 1.
+
+- clocks: from common clock binding; input clock handle. Required.
+- clock-names: from common clock binding; clock input names, shall be
+ one of "input-clk0", "input-clk1", "input-xtal". Required.
+
+==Mapping between clock specifier and physical pins==
+
+When referencing the provided clock in the DT using phandle and
+clock specifier, the following mapping applies:
+
+8T49N241:
+ 0 -- Q0
+ 1 -- Q1
+ 2 -- Q2
+ 3 -- Q3
+
+==Example==
+
+/* Example1: 25MHz input clock (via CLK0) */
+
+ref25: ref25m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+};
+
+i2c-master-node {
+
+ /* IDT 8T49N241 i2c universal frequency translator */
+ i2c241: clock-generator@6a {
+ compatible = "idt,idt8t49n241";
+ reg = <0x6c>;
+ #clock-cells = <1>;
+
+ /* Connect input-clk0 to 25MHz reference */
+ clocks = <&ref25m>;
+ clock-names = "input-clk0";
+ };
+};
+
+/* Consumer referencing the 8T49N241 pin Q1 */
+consumer {
+ ...
+ clocks = <&i2c241 1>;
+ ...
+}
+
+/* Example2: 40MHz xtal frequency, specify all settings */
+
+ref40: ref40m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <40000000>;
+};
+
+i2c-master-node {
+
+ /* IDT 8T49N241 i2c universal frequency translator */
+ i2c241: clock-generator@6a {
+ compatible = "idt,idt8t49n241";
+ reg = <0x6c>;
+ #clock-cells = <1>;
+
+ /* Connect input-xtal to 40MHz reference */
+ clocks = <&ref40m>;
+ clock-names = "input-xtal";
+
+ settings=[
+09 50 00 60 67 C5 6C FF 03 00 30 00 00 01 00 00
+01 07 00 00 07 00 00 77 6D 06 00 00 00 00 00 FF
+FF FF FF 00 3F 00 2A 00 16 33 33 00 01 00 00 D0
+00 00 00 00 00 00 00 00 00 04 00 00 00 02 00 00
+00 00 00 00 00 00 00 17 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 D7 0A 2B 20 00 00 00 0B
+00 00 00 00 00 00 00 00 00 00 27 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+C3 00 08 01 00 00 00 00 00 00 00 00 00 30 00 00
+00 0A 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 85 00 00 9C 01 D4 02 71 07 00 00 00
+00 83 00 10 02 08 8C
+];
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/silabs,si5324.txt b/Documentation/devicetree/bindings/clock/silabs,si5324.txt
new file mode 100644
index 000000000000..642af113aa6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/silabs,si5324.txt
@@ -0,0 +1,78 @@
+Binding for Silicon Labs si5324, si5328 and si5319 programmable
+I2C clock generator.
+
+Reference
+This binding uses the common clock binding[1].
+The si5324 is programmable i2c low-bandwidth, jitter-attenuating, precision
+clock multiplier with up to 2 output clocks. The internal structure can be
+found in [2].
+The internal pin structure of si5328 and si5319 can be found in [3].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Si5324 Data Sheet
+ http://www.silabs.com/Support%20Documents/TechnicalDocs/Si5324.pdf
+[3] Si53xx Reference Manual
+ http://www.silabs.com/Support%20Documents/TechnicalDocs/
+ Si53xxReferenceManual.pdf
+
+==I2C device node==
+
+Required properties:
+- compatible: should be one of
+ "silabs,si5324"
+ "silabs,si5319"
+ "silabs,si5328"
+- reg: i2c device address.
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: from common clock binding; list of parent clock
+ handles, clock name should be one of
+ "xtal"
+ "clkin1"
+ "clkin2"
+- #address-cells: shall be set to 1.
+- #size-cells: shall be set to 0.
+
+Optional properties:
+- silabs,pll-source: pair of (number, source) for each pll. Allows
+ to overwrite clock source of pll.
+
+==Child nodes==
+
+Each of the clock outputs can be overwritten individually by
+using a child node to the I2C device node. If a child node for a clock
+output is not set, the eeprom configuration is not overwritten.
+
+Required child node properties:
+- reg: number of clock output.
+- clock-frequency: default output frequency at power on
+
+Optional child node properties:
+- silabs,drive-strength: output drive strength in mA, shall be one of {2,4,6,8}.
+
+Example:
+Following example describes the ZCU102 board with hdmi design which
+uses si5319 as clock generator. XTAL is hard-wired on the board to act
+as input clock with a frequency of 114.285MHz.
+
+refhdmi: refhdmi {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <114285000>;
+};
+
+/* Si5319 i2c clock generator */
+si5319: clock-generator@68 {
+ status = "okay";
+ compatible = "silabs,si5319";
+ reg = <0x68>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+ clocks = <&refhdmi>;
+ clock-names = "xtal";
+
+ clk0 {
+ reg = <0>;
+ clock-frequency = <27000000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.txt b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.txt
new file mode 100644
index 000000000000..94a27f65bcac
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.txt
@@ -0,0 +1,48 @@
+--------------------------------------------------------------------------
+Device Tree Clock bindings for the Xilinx Versal
+--------------------------------------------------------------------------
+The clock controller is a h/w block of Xilinx versal clock tree. It reads
+required input clock frequencies from the devicetree and acts as clock provider
+for all clock consumers of PS clocks.
+
+See clock_bindings.txt for more information on the generic clock bindings.
+
+Required properties:
+ - #clock-cells: Must be 1
+ - compatible: Must contain: "xlnx,versal-clk"
+ - clocks: List of clock specifiers which are external input
+ clocks to the given clock controller. Please refer
+ the next section to find the input clocks for a
+ given controller.
+ - clock-names: List of clock names which are exteral input clocks
+ to the given clock controller. Please refer to the
+ clock bindings for more details.
+
+Input clocks for Xilinx Versal clock controller:
+
+The Xilinx Versal has one primary and two alternative reference clock inputs.
+These required clock inputs are:
+ - ref_clk
+ - alt_ref_clk
+ - pl_alt_ref_clk
+
+Output clocks are registered based on clock information received
+from firmware. Output clocks indexes are mentioned in
+include/dt-bindings/clock/xlnx-versal-clk.h.
+
+-------
+Example
+-------
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+ versal_clk: clock-controller {
+ #clock-cells = <1>;
+ compatible = "xlnx,versal-clk";
+ clocks = <&ref_clk>, <&alt_ref_clk>, <&pl_alt_ref_clk>;
+ clock-names = "ref_clk", "alt_ref_clk", "pl_alt_ref_clk";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt b/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
new file mode 100644
index 000000000000..226bfb9261d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP AES hw acceleration support
+
+The ZynqMP PS-AES hw accelerator is used to encrypt/decrypt
+the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-aes"
+
+Example:
+ zynqmp_aes {
+ compatible = "xlnx,zynqmp-aes";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt b/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
new file mode 100644
index 000000000000..6b4c0e0446fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP RSA hw acceleration support
+
+The zynqmp PS-RSA hw accelerator is used to encrypt/decrypt
+the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-rsa"
+
+Example:
+ xlnx_rsa: zynqmp_rsa {
+ compatible = "xlnx,zynqmp-rsa";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt b/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
new file mode 100644
index 000000000000..c7be6e2ce246
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP SHA3(keccak-384) hw acceleration support.
+
+The ZynqMp PS-SHA hw accelerator is used to calculate the
+SHA3(keccak-384) hash value on the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-keccak-384"
+
+Example:
+ xlnx_keccak_384: sha384 {
+ compatible = "xlnx,zynqmp-keccak-384";
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/bridge.txt b/Documentation/devicetree/bindings/display/xlnx/bridge.txt
new file mode 100644
index 000000000000..c5f7c0a1dea0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/bridge.txt
@@ -0,0 +1,29 @@
+Xilinx DRM bridge
+-----------------
+
+The Xilinx DRM provides the interface layer called Xilinx bridge to bridge
+multiple components with a series of functions. It models a simple
+unidirectional communication, single client -> single bridge. The client
+is not limited to DRM compatible drivers, but can be any subsystem driver,
+but the client driver should call the bridge functions explicitly.
+
+Provider
+--------
+
+The bridge provider should assign a corresponding of_node to struct xlnx_bridge.
+For example, if its own node is used,
+
+ provider_node: provider_node {
+ };
+
+ bridge.of_node = provider_device->of_node;
+
+Client
+------
+
+The bridge client should have a phandle to the bridge device node. The bridge
+device node should be passed to get a bridge instance,
+
+ client_node {
+ xlnx,bridge = <&provider_node>;
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt
new file mode 100644
index 000000000000..a545a0d818e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt
@@ -0,0 +1,74 @@
+Device-Tree bindings for Xilinx MIPI DSI Tx IP core
+
+The IP core supports transmission of video data in MIPI DSI protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,dsi".
+
+ - reg: Base address and size of the IP core.
+
+ - xlnx,dsi-datatype: Color format. The value should be one of "MIPI_DSI_FMT_RGB888",
+ "MIPI_DSI_FMT_RGB666", "MIPI_DSI_FMT_RGB666_PACKED" or "MIPI_DSI_FMT_RGB565".
+
+ - simple_panel: The subnode for connected panel. This represents the
+ DSI peripheral connected to the DSI host node. Please refer to
+ Documentation/devicetree/bindings/display/mipi-dsi-bus.txt. The
+ simple-panel driver has auo,b101uan01 panel timing parameters added along
+ with other existing panels. DSI driver derive the required Tx IP controller
+ timing values from the panel timing parameters.
+
+ - port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+
+ - xlnx,dsi-num-lanes: Possible number of DSI lanes for the Tx controller.
+ The values should be 1, 2, 3 or 4. Based on xlnx,dsi-num-lanes and
+ line rate for the MIPI D-PHY core in Mbps, the AXI4-stream received by
+ Xilinx MIPI DSI Tx IP core adds markers as per DSI protocol and the packet
+ thus framed is convered to serial data by MIPI D-PHY core. Please refer
+ Xilinx pg238 for more details. This value should be equal to the number
+ of lanes supported by the connected DSI panel. Panel has to support this
+ value or has to be programmed to the same value that DSI Tx controller is
+ configured to.
+
+ - clocks: List of phandles to Video and 200Mhz DPHY clocks.
+
+ - clock-names: Must contain "s_axis_aclk" and "dphy_clk_200M" in same order as
+ clocks listed in clocks property.
+
+Required simple_panel properties:
+ - compatible: Value should be one of the panel names in
+ Documentation/devicetree/bindings/display/panel/. e.g. "auo,b101uan01".
+ For available panel compatible strings, please refer to bindings in
+ Documentation/devicetree/bindings/display/panel/
+
+Optional properties:
+ - xlnx,vpss: vpss phandle
+ This handle is required only when VPSS is connected to DSI as bridge.
+ - xlnx,dsi-cmd-mode: denotes command mode enable.
+
+Example:
+
+#include <dt-bindings/drm/mipi-dsi.h>
+ mipi_dsi_tx_subsystem@80000000 {
+ compatible = "xlnx,dsi";
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dsi-num-lanes = <4>;
+ xlnx,dsi-data-type = <MIPI_DSI_FMT_RGB888>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,vpss = <&v_proc_ss_0>;
+ clock-names = "dphy_clk_200M", "s_axis_aclk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ encoder_dsi_port: port@0 {
+ reg = <0>;
+ dsi_encoder: endpoint {
+ remote-endpoint = <&xyz_port>;
+ };
+ };
+ simple_panel: simple-panel@0 {
+ compatible = "auo,b101uan01";
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt
new file mode 100644
index 000000000000..20f4cec27175
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt
@@ -0,0 +1,163 @@
+Device-Tree bindings for Xilinx Video Mixer IP core
+
+The IP core provides a flexible video processing block for alpha blending
+and compositing multiple video and/or graphics layers.
+Support for up to sixteen layers based on IP version, with an optional logo
+layer, using a combination of video inputs from either frame buffer or
+streaming video cores (through AXI4-Stream interfaces) is provided.
+The Video Mixer always has one streaming input layer, known as master layer.
+
+Required properties:
+ - compatible: Must contain atleast one of
+ "xlnx,mixer-4.0" (MIXER 4.0 version)
+ "xlnx,mixer-3.0" (MIXER 3.0 version)
+ - reg: Base address and size of the IP core.
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reset-gpio: gpio to reset the mixer IP
+ - xlnx,dma-addr-width: dma address width, valid values are 32 and 64
+ - xlnx,bpc: bits per component for mixer
+ - xlnx,ppc: pixel per clock for mixer
+ - xlnx,num-layers: Total number of layers (excluding logo)
+ Value ranges from 1-9 for compatible string xlnx,mixer-3.0 and
+ Value ranges from 1-17 for comptaible string xlnx,mixer-4.0
+ - layer_[x]: node for [x] layer
+ - xlnx,layer-id: layer identifier number
+ - xlnx,vformat: video format for layer. See list of supported formats below.
+ - xlnx,layer-max-width: max layer width, mandatory for master layer
+ for overlay layers if scaling is alowed then this is mandatory otherwise
+ not required for overlay layers
+ - xlnx,layer-max-height: max layer height, mandatory for master layer
+ Not required for overlay layers
+ - xlnx,layer-primary: denotes the primary layer, should be mentioned in node
+ of layer which is expected to be constructing the primary plane
+
+Optional properties:
+ - dmas: dma attach to layer, mandatory for master layer
+ for rest other layers its optional
+ - dma-names: Should be "dma0", for more details on DMA identifier string
+ refer Documentation/devicetree/bindings/dma/dma.txt
+ - xlnx,layer-streaming: denotes layer can be streaming,
+ mandatory for master layer. Streaming layers need external dma, where
+ as non streaming layers read directly from memory.
+ - xlnx,layer-alpha: denotes layer can do alpha compositing
+ - xlnx,layer-scale: denotes layer can be scale to 2x and 4x
+ - xlnx,logo-layer: denotes logo layer is enable
+ - logo: logo layer
+ - xlnx,bridge: phandle to bridge node.
+ This handle is required only when VTC is connected as bridge.
+
+Supported Formats:
+ Mixer IP Format Driver supported Format String
+ BGR888 "RG24"
+ RGB888 "BG24"
+ XBGR2101010 "XB30"
+ XRGB8888 "XR24"
+ RGBA8888 "RA24"
+ ABGR8888 "AB24"
+ ARGB8888 "AR24"
+ XBGR8888 "XB24"
+ YUYV "YUYV"
+ UYVY "UYVY"
+ AYUV "AYUV"
+ NV12 "NV12"
+ NV16 "NV16"
+ Y8 "GREY"
+ Y10 "Y10 " (Note: Space included)
+ XVUY2101010 "XV30"
+ VUY888 "VU24"
+ XVUY8888 "XV24"
+ XV15 "XV15"
+ XV20 "XV20"
+Note : Format strings are case sensitive.
+
+Example:
+ v_mix_0: v_mix@80100000 {
+ compatible = "xlnx,mixer-3.0";
+ interrupt-parent = <&gic>;
+ interrupts = <0 93 4>;
+ reg = <0x0 0x80100000 0x0 0x80000>;
+
+ xlnx,dma-addr-width=<32>;
+ reset-gpios = <&gpio 1 1>;
+
+ xlnx,bpc = <8>;
+ xlnx,ppc = <2>;
+ xlnx,num-layers = <8>;
+ xlnx,logo-layer;
+ xlnx,bridge = <&v_tc_0>;
+
+ mixer_port: mixer_port@0 {
+ reg = <0>;
+ mixer_crtc: endpoint {
+ remote-endpoint = <&sdi_encoder>;
+ };
+ };
+ xv_mix_master: layer_0 {
+ xlnx,layer-id = <0>;
+ xlnx,vformat = "YUYV";
+ xlnx,layer-max-width = <4096>;
+ xlnx,layer-height = <2160>;
+ dmas = <&axi_vdma_0 0>;
+ dma-names = "dma0";
+ xlnx,layer-streaming;
+ xlnx,layer-primary;
+ };
+ xv_mix_overlay_1: layer_1 {
+ xlnx,layer-id = <1>;
+ xlnx,vformat = "NV16";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_2: layer_2 {
+ xlnx,layer-id = <2>;
+ xlnx,vformat = "YUYV";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_3: layer_3 {
+ xlnx,layer-id = <3>;
+ xlnx,vformat = "AYUV";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_4: layer_4 {
+ xlnx,layer-id = <4>;
+ xlnx,vformat = "GREY";
+ dmas = <&scaler_v_frmbuf_rd_0 0>;
+ dma-names = "dma0";
+ xlnx,layer-streaming;
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_5: layer_5 {
+ xlnx,layer-id = <5>;
+ xlnx,vformat = "AB24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_6: layer_6 {
+ xlnx,layer-id = <6>;
+ xlnx,vformat = "XB24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_7: layer_7 {
+ xlnx,layer-id = <7>;
+ xlnx,vformat = "BG24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_logo: logo {
+ xlnx,layer-id = <8>;
+ xlnx,logo-height = <64>;
+ xlnx,logo-width = <64>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt
new file mode 100644
index 000000000000..c6034bffc64a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt
@@ -0,0 +1,41 @@
+Xilinx PL Display driver
+------------------------
+
+Pl_Display is a logical device to provide completeness to xilinx display
+pipeline. This is a software driver for providing drm components crtc
+and plane for various IPs using xilinx display pipelines.
+
+A linear pipeline with multiple blocks:
+DMA --> PL_Display --> SDI
+
+Required properties:
+
+- compatible: Must be "xlnx,pl-disp"
+- dmas: dma attach to pipeline
+- dma-names: names for dma
+- xlnx,vformat: video format for layer
+- port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+- reg: Base address and size of device
+
+Optional properties:
+ - xlnx,bridge: bridge phandle
+ This handle is required only when VTC is connected as bridge.
+
+Example:
+
+ drm-pl-disp-drv {
+ compatible = "xlnx,pl-disp";
+ dmas = <&axi_vdma_0 0>;
+ dma-names = "dma0";
+ xlnx,vformat = "YUYV";
+ xlnx,bridge = <&v_tc_0>;
+ pl_disp_port@0 {
+ reg = <0>;
+ endpoint {
+ remote-endpoint = <&sdi_port>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt
new file mode 100644
index 000000000000..971ac5304761
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt
@@ -0,0 +1,54 @@
+Device-Tree bindings for Xilinx SDI Tx subsystem
+
+The IP core supports transmission of video data in SDI Tx protocol
+
+Required properties:
+ - compatible: Should be "xlnx,sdi-tx".
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reg: Base address and size of the IP core.
+ - port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+ Minimum one port is required. At max, 2 ports are present.
+ The reg index for AXI4 stream port is 0 and for ancillary data is 1.
+ - clocks: List of phandles to AXI Lite, Video and SDI Tx Clock
+ - clock-names: Must contain "s_axi_aclk", "video_in_clk" and "sdi_tx_clk"
+ in same order as clocks listed in clocks property.
+
+Optional properties:
+ - xlnx,vpss: vpss phandle
+ This handle is required only when VPSS is connected to SDI as bridge.
+ - xlnx,tx-insert-c-str-st352: Insert ST352 payload in Chroma stream.
+
+Example:
+
+ sdi_tx_subsystem@80000000 {
+ compatible = "xlnx,sdi-tx";
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,vpss = <&v_proc_ss_0>;
+ clock-names = "s_axi_aclk", "video_in_clk", "sdi_tx_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_2>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ encoder_sdi_port: port@0 {
+ reg = <0>;
+ sdi_encoder: endpoint {
+ remote-endpoint = <&pl_disp_crtc>;
+ };
+ };
+
+ sdi_audio_port: port@1 {
+ reg = <1>;
+ sdi_audio_sink_port: endpoint {
+ remote-endpoint = <&sditx_audio_embed_src_port>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt
new file mode 100644
index 000000000000..cf80d185d429
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt
@@ -0,0 +1,35 @@
+Xllinx VPSS Color Space Converter
+-----------------------------------------
+The Xilinx VPSS Color Space Converter is a Video IP that supports
+color space conversion from RGB to YUV 444/422/420 and vice versa.
+
+Required properties:
+
+- compatible: Must be "xlnx,vpss-csc".
+
+- reg: Physical base address and length of registers set for the device.
+
+- xlnx,video-width: This property qualifies the video format with sample
+ width expressed as a number of bits per pixel component. Supported video
+ width values are 8/10/12/16.
+
+-reset-gpios: GPIO specifier to assert/de-assert the reset line.
+
+- clocks: phandle to IP clock.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- xlnx,max-height: Maximum number of lines in a frame.
+ Valid range from 64 to 4320.
+
+Example:
+ csc@a0040000 {
+ compatible = "xlnx,vpss-csc";
+ reg = <0x0 0xa0040000 0x0 0x10000>;
+ reset-gpios = <&gpio 0x0 GPIO_ACTIVE_LOW>;
+ xlnx,video-width = <8>;
+ clocks = <&misc_clk_0>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ }
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt
new file mode 100644
index 000000000000..76ee3ec15a9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt
@@ -0,0 +1,51 @@
+Xilinx VPSS Scaler
+------------------
+The Xilinx VPSS Scaler is a Video IP that supports up scaling,
+down scaling and no scaling functionailty. This supports custom
+resolution values between 0 to 4096.
+
+Required properties:
+
+- compatible: Must be "xlnx,vpss-scaler-2.2" or "xlnx,vpss-scaler".
+
+- reg: Physical base address and length of registers set for the device.
+
+- xlnx,num-hori-taps: The number of horizontal taps for scaling filter
+ supported tap values are 2/4/6/8/10/12.
+
+- xlnx,num-vert-taps: The number of vertical taps for scaling filter
+ supported tap values are 2/4/6/8/10/12.
+
+ A value of 2 represents bilinear filters. A value of 4 represents bicubic.
+ Values 6, 8, 10, 12 represent polyphase filters.
+
+- xlnx,pix-per-clk : The pixels per clock property of the IP.
+ supported values are 1 and 2.
+
+- reset-gpios: GPIO specifier to assert/de-assert the reset line.
+
+- clocks: List of phandles to AXI Lite and Video clock
+
+- clock-names: Must contain "aclk_ctrl" and "aclk_axis" in same order as clocks
+ listed in clocks property.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- xlnx,max-height: Maximum number of lines in a frame.
+ Valid range from 64 to 4320.
+
+Example:
+ scaler@a0040000 {
+ compatible = "xlnx,vpss-scaler";
+ reg = <0x0 0xa0000000 0x0 0x40000>;
+ reset-gpios = <&gpio 0x0 GPIO_ACTIVE_LOW>;
+ xlnx,num-hori-taps = <8>;
+ xlnx,num-vert-taps = <8>;
+ xlnx,pix-per-clk = <2>;
+ clock-names = "aclk_ctrl", "aclk_axis";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ }
+
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt
new file mode 100644
index 000000000000..6a4d5bcc5e59
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt
@@ -0,0 +1,32 @@
+Device-Tree bindings for Xilinx Video Timing Controller(VTC)
+
+Xilinx VTC is a general purpose video timing generator and detector.
+The input side of this core automatically detects horizontal and
+vertical synchronization, pulses, polarity, blanking timing and active pixels.
+While on the output, it generates the horizontal and vertical blanking and
+synchronization pulses used with a standard video system including support
+for programmable pulse polarity.
+
+The core is commonly used with Video in to AXI4-Stream core to detect the
+format and timing of incoming video data or with AXI4-Stream to Video out core
+to generate outgoing video timing for downstream sinks like a video monitor.
+
+For details please refer to
+https://www.xilinx.com/support/documentation/ip_documentation/v_tc/v6_1/pg016_v_tc.pdf
+
+Required properties:
+ - compatible: value should be "xlnx,bridge-v-tc-6.1"
+ - reg: base address and size of the VTC IP
+ - xlnx,pixels-per-clock: Pixels per clock of the stream. Can be 1, 2 or 4.
+ - clocks: List of phandles for AXI Lite and Video Clock
+ - clock-names: Must contain "s_axi_aclk" and "clk" in same order as clocks listed
+ in clocks property.
+
+Example:
+ v_tc_0: v_tc@80030000 {
+ compatible = "xlnx,bridge-v-tc-6.1";
+ reg = <0x0 0x80030000 0x0 0x10000>;
+ xlnx,pixels-per-clock = <2>;
+ clock-names = "s_axi_aclk", "clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt
new file mode 100644
index 000000000000..46d0c7671ee5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt
@@ -0,0 +1,82 @@
+Xilinx ZynqMP DisplayPort subsystem
+-----------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,zynqmp-dpsub-1.7".
+
+- reg: Physical base address and length of the registers set for the device.
+- reg-names: Must be "dp", "blend", "av_buf", and "aud" to map logical register
+ partitions.
+
+- interrupts: Interrupt number.
+- interrupts-parent: phandle for interrupt controller.
+
+- clocks: phandles for axi, audio, non-live video, and live video clocks.
+ axi clock is required. Audio clock is optional. If not present, audio will
+ be disabled. One of non-live or live video clock should be present.
+- clock-names: The identification strings are required. "aclk" for axi clock.
+ "dp_aud_clk" for audio clock. "dp_vtc_pixel_clk_in" for non-live video clock.
+ "dp_live_video_in_clk" for live video clock (clock from programmable logic).
+
+- phys: phandles for phy specifier. The number of lanes is configurable
+ between 1 and 2. The number of phandles should be 1 or 2.
+- phy-names: The identifier strings. "dp-phy" followed by index, 0 or 1.
+ For single lane, only "dp-phy0" is required. For dual lane, both "dp-phy0"
+ and "dp-phy1" are required where "dp-phy0" is the primary lane.
+
+- power-domains: phandle for the corresponding power domain
+
+- vid-layer, gfx-layer: Required to represent available layers
+
+Required layer properties
+
+- dmas: phandles for DMA channels as defined in
+ Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: The identifier strings are required. "gfx0" for graphics layer
+ dma channel. "vid" followed by index (0 - 2) for video layer dma channels.
+
+Optional child node
+
+- The driver populates any child device node in this node. This can be used,
+ for example, to populate the sound device from the DisplayPort subsystem
+ driver.
+
+Example:
+ zynqmp-display-subsystem@fd4a0000 {
+ compatible = "xlnx,zynqmp-dpsub-1.7";
+ reg = <0x0 0xfd4a0000 0x0 0x1000>,
+ <0x0 0xfd4aa000 0x0 0x1000>,
+ <0x0 0xfd4ab000 0x0 0x1000>,
+ <0x0 0xfd4ac000 0x0 0x1000>;
+ reg-names = "dp", "blend", "av_buf", "aud";
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+
+ clock-names = "dp_apb_clk", "dp_aud_clk", "dp_live_video_in_clk";
+ clocks = <&dp_aclk>, <&clkc 17>, <&si570_1>;
+
+ phys = <&lane1>, <&lane0>;
+ phy-names = "dp-phy0", "dp-phy1";
+
+ power-domains = <&pd_dp>;
+
+ vid-layer {
+ dma-names = "vid0", "vid1", "vid2";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>;
+ };
+
+ gfx-layer {
+ dma-names = "gfx0";
+ dmas = <&xlnx_dpdma 3>;
+ };
+
+ dma-names = "vid0", "vid1", "vid2", "gfx0";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>,
+ <&xlnx_dpdma 3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
index 8a9f3559335b..7e14e26676ec 100644
--- a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -34,8 +34,8 @@ Example:
Use specific request line passing from dma
For example, MMC request line is 5
- sdhci: sdhci@98e00000 {
- compatible = "moxa,moxart-sdhci";
+ mmc: mmc@98e00000 {
+ compatible = "moxa,moxart-mmc";
reg = <0x98e00000 0x5C>;
interrupts = <5 0>;
clocks = <&clk_apb>;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt b/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt
new file mode 100644
index 000000000000..f4f5b018dfa5
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt
@@ -0,0 +1,38 @@
+* Xilinx AXI DMA Test client
+
+Required properties:
+- compatible: Should be "xlnx,axi-dma-test-1.00.a"
+- dmas: a list of <[DMA device phandle] [Channel ID]> pairs,
+ where Channel ID is '0' for write/tx and '1' for read/rx
+ channel.
+- dma-names: a list of DMA channel names, one per "dmas" entry
+
+Example:
+++++++++
+
+dmatest_0: dmatest@0 {
+ compatible ="xlnx,axi-dma-test-1.00.a";
+ dmas = <&axi_dma_0 0
+ &axi_dma_0 1>;
+ dma-names = "axidma0", "axidma1";
+} ;
+
+
+Xilinx AXI DMA Device Node Example
+++++++++++++++++++++++++++++++++++++
+
+axi_dma_0: axidma@40400000 {
+ compatible = "xlnx,axi-dma-1.00.a";
+ #dma-cells = <1>;
+ reg = < 0x40400000 0x10000 >;
+ dma-channel@40400000 {
+ compatible = "xlnx,axi-dma-mm2s-channel";
+ interrupts = < 0 59 4 >;
+ xlnx,datawidth = <0x40>;
+ } ;
+ dma-channel@40400030 {
+ compatible = "xlnx,axi-dma-s2mm-channel";
+ interrupts = < 0 58 4 >;
+ xlnx,datawidth = <0x40>;
+ } ;
+} ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
new file mode 100644
index 000000000000..acdcc445f01b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
@@ -0,0 +1,67 @@
+* Xilinx PS PCIe Root DMA
+
+Required properties:
+- compatible: Should be "xlnx,ps_pcie_dma-1.00.a"
+- reg: Register offset for Root DMA channels
+- reg-names: Name for the register. Should be "ps_pcie_regbase"
+- interrupts: Interrupt pin for Root DMA
+- interrupt-names: Name for the interrupt. Should be "ps_pcie_rootdma_intr"
+- interrupt-parent: Should be gic in case of zynqmp
+- rootdma: Indicates this platform device is root dma.
+ This is required as the same platform driver will be invoked by pcie end points too
+- dma_vendorid: 16 bit PCIe device vendor id.
+ This can be later used by dma client for matching while using dma_request_channel
+- dma_deviceid: 16 bit PCIe device id
+ This can be later used by dma client for matching while using dma_request_channel
+- numchannels: Indicates number of channels to be enabled for the device.
+ Valid values are from 1 to 4 for zynqmp
+- ps_pcie_channel : One for each channel to be enabled.
+ This array contains channel specific properties.
+ Index 0: Direction of channel
+ Direction of channel can be either PCIe Memory to AXI memory i.e., Host to Card or
+ AXI Memory to PCIe memory i.e., Card to Host
+ PCIe to AXI Channel Direction is represented as 0x1
+ AXI to PCIe Channel Direction is represented as 0x0
+ Index 1: Number of Buffer Descriptors
+ This number describes number of buffer descriptors to be allocated for a channel
+ Index 2: Number of Queues
+ Each Channel has four DMA Buffer Descriptor Queues.
+ By default All four Queues will be managed by Root DMA driver.
+ User may choose to have only two queues either Source and it's Status Queue or
+ Destination and it's Status Queue to be handled by Driver.
+ The other two queues need to be handled by user logic which will not be part of this driver.
+ All Queues on Host is represented by 0x4
+ Two Queues on Host is represented by 0x2
+ Index 3: Coalesce Count
+ This number indicates the number of transfers after which interrupt needs to
+ be raised for the particular channel. The allowed range is from 0 to 255
+ Index 4: Coalesce Count Timer frequency
+ This property is used to control the frequency of poll timer. Poll timer is
+ created for a channel whenever coalesce count value (>= 1) is programmed for the particular
+ channel. This timer is helpful in draining out completed transactions even though interrupt is
+ not generated.
+
+Client Usage:
+ DMA clients can request for these channels using dma_request_channel API
+
+
+Xilinx PS PCIe Root DMA node Example
+++++++++++++++++++++++++++++++++++++
+
+ pci_rootdma: rootdma@fd0f0000 {
+ compatible = "xlnx,ps_pcie_dma-1.00.a";
+ reg = <0x0 0xfd0f0000 0x0 0x1000>;
+ reg-names = "ps_pcie_regbase";
+ interrupts = <0 117 4>;
+ interrupt-names = "ps_pcie_rootdma_intr";
+ interrupt-parent = <&gic>;
+ rootdma;
+ dma_vendorid = /bits/ 16 <0x10EE>;
+ dma_deviceid = /bits/ 16 <0xD021>;
+ numchannels = <0x4>;
+ #size-cells = <0x5>;
+ ps_pcie_channel0 = <0x1 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel1 = <0x0 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel2 = <0x1 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel3 = <0x0 0x7CF 0x4 0x0 0x3E8>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt b/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt
new file mode 100644
index 000000000000..5821fdc3e5e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt
@@ -0,0 +1,39 @@
+* Xilinx Video DMA Test client
+
+Required properties:
+- compatible: Should be "xlnx,axi-vdma-test-1.00.a"
+- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
+ where Channel ID is '0' for write/tx and '1' for read/rx
+ channel.
+- dma-names: a list of DMA channel names, one per "dmas" entry
+- xlnx,num-fstores: Should be the number of framebuffers as configured in
+ VDMA device node.
+
+Example:
+++++++++
+
+vdmatest_0: vdmatest@0 {
+ compatible ="xlnx,axi-vdma-test-1.00.a";
+ dmas = <&axi_vdma_0 0
+ &axi_vdma_0 1>;
+ dma-names = "vdma0", "vdma1";
+ xlnx,num-fstores = <0x8>;
+} ;
+
+
+Xilinx Video DMA Device Node Example
+++++++++++++++++++++++++++++++++++++
+axi_vdma_0: axivdma@44A40000 {
+ compatible = "xlnx,axi-vdma-1.00.a";
+ ...
+ dma-channel@44A40000 {
+ ...
+ xlnx,num-fstores = <0x8>;
+ ...
+ } ;
+ dma-channel@44A40030 {
+ ...
+ xlnx,num-fstores = <0x8>;
+ ...
+ } ;
+} ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index 93b6d961dd4f..613d775658a2 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -15,7 +15,7 @@ Required properties:
- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or
"xlnx,axi-cdma-1.00.a""
- #dma-cells: Should be <1>, see "dmas" property below
-- reg: Should contain VDMA registers location and length.
+- reg: Should contain DMA registers location and length.
- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
- dma-ranges: Should be as the following <dma_addr cpu_addr max_len>.
- dma-channel child node: Should have at least one channel and can have up to
@@ -38,11 +38,10 @@ Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
Optional properties for AXI DMA:
-- xlnx,sg-length-width: Should be set to the width in bits of the length
- register as configured in h/w. Takes values {8...26}. If the property
- is missing or invalid then the default value 23 is used. This is the
- maximum value that is supported by all IP versions.
-- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
+- xlnx,sg-length-width: Should be set to the width of buffer length register as
+ configured in hardware. If this property is missing or has invalid width
+ i.e not in range of 8-26, maximum buffer length width (common to all IP
+ versions) 23 bits is used.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
It takes following values:
@@ -69,15 +68,13 @@ Optional child node properties for VDMA:
enabled/disabled in hardware.
- xlnx,enable-vert-flip: Tells vertical flip is
enabled/disabled in hardware(S2MM path).
-Optional child node properties for AXI DMA:
--dma-channels: Number of dma channels in child node.
Example:
++++++++
axi_vdma_0: axivdma@40030000 {
compatible = "xlnx,axi-vdma-1.00.a";
- #dma_cells = <1>;
+ #dma-cells = <1>;
reg = < 0x40030000 0x10000 >;
dma-ranges = <0x00000000 0x00000000 0x40000000>;
xlnx,num-fstores = <0x8>;
@@ -104,7 +101,8 @@ axi_vdma_0: axivdma@40030000 {
Required properties:
- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
where Channel ID is '0' for write/tx and '1' for read/rx
- channel.
+ channel if both channels are enabled.
+ If only one channel is enabled either tx or rx the Channel ID is '0'.
- dma-names: a list of DMA channel names, one per "dmas" entry
Example:
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt
new file mode 100644
index 000000000000..5f1e680ffcc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt
@@ -0,0 +1,91 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Subsystem
+
+The ZynqMP DisplayPort subsystem handles DMA channel buffer management,
+blending, and audio mixing. The DisplayPort subsystem receives display
+and audio frames from DPDMA and transmits output to the DisplayPort IP core.
+
+Required properties:
+ - compatible: Should be "xlnx,dpdma".
+ - reg: Base address and size of the IP core.
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - clocks: phandle for AXI clock
+ - clock-names: The identification string, "axi_clk", is always required.
+
+Required child node properties:
+- compatible: Should be one of "xlnx,video0", "xlnx,video1", "xlnx,video2",
+ "xlnx,graphics", "xlnx,audio0", or "xlnx,audio1".
+
+Example:
+
+ xlnx_dpdma: axidpdma@43c10000 {
+ compatible = "xlnx,dpdma";
+ reg = <0x43c10000 0x1000>;
+ interrupts = <0 54 4>;
+ interrupt-parent = <&intc>;
+ clocks = <&clkc 16>;
+ clock-names = "axi_clk";
+ xlnx,axi-clock-freq = <200000000>;
+
+ dma-channels = <6>;
+
+ #dma-cells = <1>;
+ dma-video0channel@43c10000 {
+ compatible = "xlnx,video0";
+ };
+ dma-video1channel@43c10000 {
+ compatible = "xlnx,video1";
+ };
+ dma-video2channel@43c10000 {
+ compatible = "xlnx,video2";
+ };
+ dma-graphicschannel@43c10000 {
+ compatible = "xlnx,graphics";
+ };
+ dma-audio0channel@43c10000 {
+ compatible = "xlnx,audio0";
+ };
+ dma-audio1channel@43c10000 {
+ compatible = "xlnx,audio1";
+ };
+ };
+
+* DMA client
+
+Required properties:
+- dmas: a list of <[DPDMA device phandle] [Channel ID]> pairs. "Channel ID"
+ is defined as video0 = 0, video1 = 1, video2 = 2, graphics = 3, audio0 = 4,
+ and audio1 = 5.
+
+Example:
+
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,encoder-slave = <&xlnx_dp>;
+ clocks = <&si570 0>;
+ xlnx,connector-type = "DisplayPort";
+ xlnx,dp-sub = <&xlnx_dp_sub>;
+ planes {
+ xlnx,pixel-format = "rgb565";
+ plane0 {
+ dmas = <&xlnx_dpdma 3>;
+ dma-names = "dma";
+ };
+ plane1 {
+ dmas = <&xlnx_dpdma 0>;
+ dma-names = "dma";
+ };
+ };
+ };
+
+ xlnx_dp_snd_pcm0: dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
+
+ xlnx_dp_snd_pcm1: dp_snd_pcm1 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 5>;
+ dma-names = "tx";
+ };
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
new file mode 100644
index 000000000000..39cb6ff762c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
@@ -0,0 +1,123 @@
+The Xilinx framebuffer DMA engine supports two soft IP blocks: one IP
+block is used for reading video frame data from memory (FB Read) to the device
+and the other IP block is used for writing video frame data from the device
+to memory (FB Write). Both the FB Read/Write IP blocks are aware of the
+format of the data being written to or read from memory including RGB and
+YUV in packed, planar, and semi-planar formats. Because the FB Read/Write
+is format aware, only one buffer pointer is needed by the IP blocks even
+when planar or semi-planar format are used.
+
+FB Read Required propertie(s):
+- compatible : Should be "xlnx,axi-frmbuf-rd-v2.1". Older string
+ "xlnx,axi-frmbuf-rd-v2" is now deprecated.
+
+Note: Compatible string "xlnx,axi-frmbuf-rd" and the hardware it
+represented is no longer supported.
+
+FB Write Required propertie(s):
+- compatible : Should be "xlnx,axi-frmbuf-wr-v2.1". Older string
+ "xlnx,axi-frmbuf-wr-v2" is now deprecated.
+
+Note: Compatible string "xlnx,axi-frmbuf-wr" and the hardware it
+represented is no longer supported.
+
+Required Properties Common to both FB Read and FB Write:
+- #dma-cells : should be 1
+- interrupt-parent : Interrupt controller the interrupt is routed through
+- interrupts : Should contain DMA channel interrupt
+- reset-gpios : Should contain GPIO reset phandle
+- reg : Memory map for module access
+- xlnx,dma-addr-width : Size of dma address pointer in IP (either 32 or 64)
+- xlnx,vid-formats : A list of strings indicating what video memory
+ formats the IP has been configured to support.
+ See VIDEO FORMATS table below and examples.
+
+Required Properties Common to both FB Read and FB Write for v2.1:
+- xlnx,pixels-per-clock : Pixels per clock set in IP (1, 2 or 4)
+- clocks: Reference to the AXI Streaming clock feeding the AP_CLK
+- clock-names: Must have "ap_clk"
+
+Optional Properties Common to both FB Read and FB Write for v2.1:
+- xlnx,dma-align : DMA alignment required in bytes.
+ If absent then dma alignment is calculated as
+ pixels per clock * 8.
+ If present it should be power of 2 and at least
+ pixels per clock * 8.
+ Minimum is 8, 16, 32 when pixels-per-clock is
+ 1, 2 or 4.
+- xlnx,fid : Field ID enabled for interlaced video support.
+ Can be absent for progressive video.
+
+Optional properties:
+- xlnx,max-height : Maximum number of lines.
+- xlnx,max-width : Maximum number of pixels in a line.
+
+VIDEO FORMATS
+The following table describes the legal string values to be used for
+the xlnx,vid-formats property. To the left is the string value and the
+two columns to the right describe how this is mapped to an equivalent V4L2
+and DRM fourcc code---respectively---by the driver.
+
+IP FORMAT DTS String V4L2 Fourcc DRM Fourcc
+-------------|----------------|----------------------|---------------------
+RGB8 bgr888 V4L2_PIX_FMT_RGB24 DRM_FORMAT_BGR888
+BGR8 rgb888 V4L2_PIX_FMT_BGR24 DRM_FORMAT_RGB888
+RGBX8 xbgr8888 V4L2_PIX_FMT_BGRX32 DRM_FORMAT_XBGR8888
+RGBA8 abgr8888 <not supported> DRM_FORMAT_ABGR8888
+BGRA8 argb8888 <not supported> DRM_FORMAT_ARGB8888
+BGRX8 xrgb8888 V4L2_PIX_FMT_XBGR32 DRM_FORMAT_XRGB8888
+RGBX10 xbgr2101010 V4L2_PIX_FMT_XBGR30 DRM_FORMAT_XBGR2101010
+RGBX12 xbgr2121212 V4L2_PIX_FMT_XBGR40 <not supported>
+RGBX16 rgb16 V4L2_PIX_FMT_BGR40 <not supported>
+YUV8 vuy888 V4L2_PIX_FMT_VUY24 DRM_FORMAT_VUY888
+YUVX8 xvuy8888 V4L2_PIX_FMT_XVUY32 DRM_FORMAT_XVUY8888
+YUYV8 yuyv V4L2_PIX_FMT_YUYV DRM_FORMAT_YUYV
+UYVY8 uyvy V4L2_PIX_FMT_UYVY DRM_FORMAT_UYVY
+YUVA8 avuy8888 <not supported> DRM_FORMAT_AVUY
+YUVX10 yuvx2101010 V4L2_PIX_FMT_XVUY10 DRM_FORMAT_XVUY2101010
+Y8 y8 V4L2_PIX_FMT_GREY DRM_FORMAT_Y8
+Y10 y10 V4L2_PIX_FMT_Y10 DRM_FORMAT_Y10
+Y_UV8 nv16 V4L2_PIX_FMT_NV16 DRM_FORMAT_NV16
+Y_UV8 nv16 V4L2_PIX_FMT_NV16M DRM_FORMAT_NV16
+Y_UV8_420 nv12 V4L2_PIX_FMT_NV12 DRM_FORMAT_NV12
+Y_UV8_420 nv12 V4L2_PIX_FMT_NV12M DRM_FORMAT_NV12
+Y_UV10 xv20 V4L2_PIX_FMT_XV20M DRM_FORMAT_XV20
+Y_UV10 xv20 V4L2_PIX_FMT_XV20 <not supported>
+Y_UV10_420 xv15 V4L2_PIX_FMT_XV15M DRM_FORMAT_XV15
+Y_UV10_420 xv15 V4L2_PIX_FMT_XV20 <not supported>
+
+Examples:
+
+FB Read Example:
+++++++++
+v_frmbuf_rd_0: v_frmbuf_rd@80000000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,axi-frmbuf-rd-v2.1";
+ interrupt-parent = <&gic>;
+ interrupts = <0 92 4>;
+ reset-gpios = <&gpio 80 1>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dma-addr-width = <32>;
+ xlnx,vid-formats = "bgr888","xbgr8888";
+ xlnx,pixels-per-clock = <1>;
+ xlnx,dma-align = <8>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "ap_clk"
+};
+
+FB Write Example:
+++++++++
+v_frmbuf_wr_0: v_frmbuf_wr@80000000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,axi-frmbuf-wr-v2.1";
+ interrupt-parent = <&gic>;
+ interrupts = <0 92 4>;
+ reset-gpios = <&gpio 80 1>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dma-addr-width = <64>;
+ xlnx,vid-formats = "bgr888","yuyv","nv16","nv12";
+ xlnx,pixels-per-clock = <2>;
+ xlnx,dma-align = <16>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "ap_clk"
+};
diff --git a/Documentation/devicetree/bindings/drm/xilinx/cresample.txt b/Documentation/devicetree/bindings/drm/xilinx/cresample.txt
new file mode 100644
index 000000000000..177ab58bfc9d
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/cresample.txt
@@ -0,0 +1,22 @@
+Device-Tree bindings for Xilinx Chroma Resampler(CRESAMPLE)
+
+Xilinx CRESAMPLE provides the chroma resampling of YUV formats.
+
+Required properties:
+ - compatible: value should be "xlnx,v-cresample-3.01.a"
+ - reg: base address and size of the CRESAMPLE IP
+ - xlnx,input-format, xlnx,output-format: the input/output video formats of
+ CRESAMPLE. The value should be one of following format strings.
+
+ yuv422
+ yuv444
+ yuv420
+
+Example:
+
+ v_cresample_0: v-cresample@40020000 {
+ compatible = "xlnx,v-cresample-3.01.a";
+ reg = <0x40020000 0x10000>;
+ xlnx,input-format = "yuv444";
+ xlnx,output-format = "yuv422";
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/dp.txt b/Documentation/devicetree/bindings/drm/xilinx/dp.txt
new file mode 100644
index 000000000000..09485a46d78b
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/dp.txt
@@ -0,0 +1,65 @@
+Device-Tree bindings for Xilinx DisplayPort IP core
+
+The IP core supports transmission of video data in DisplayPort protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,v-dp".
+ - reg: Base address and size of the IP core.
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - clocks: phandle for aclk and audio clock
+ - clock-names: The identification string, "aclk", is always required for
+ the axi clock. "aud_clk" is required only when audio needs to be enabled.
+
+ - xlnx,dp-version: Version of DisplayPort protocol.
+ - xlnx,max-lanes: Maximum number of lanes of the IP core. The value should
+ be one of 1, 2, or 4.
+ - xlnx,max-link-rate: Maximum link rate of the IP core. The value should be
+ one of 162000, 270000, or 540000.
+ - xlnx,max-bpc: Maximum bits-per-color. The value should be one of 8, 10, 12,
+ or 16.
+ - xlnx,axi-clock: Clock rate of axi4-lite. This is required to provide
+ the correct clock divider for AUX.
+
+ - xlnx,colormetry: Color format. The value should be one of "rgb", "ycrcb422",
+ "ycrcb444", or "yonly". These are based on the DisplayPort specification.
+ - xlnx,bpc: bits-per-color value to be configured. The value should be one of
+ 6, 8, 10, 12, or 16.
+
+Optional properties:
+ - xlnx,enable-yonly: Enable yonly colormetry.
+ - xlnx,enable-ycrcb: Enable ycrcb colormetry.
+ - xlnx,enable-sync: Enable synchronous operation with video clock.
+ - xlnx,max-pclock-frequency: Maximum pixel clock rate(KHz). The value should
+ specify the maximum pixel clock rate in KHz that the IP core supports.
+ - xlnx,dp-sub: A phandle referencing the ZynqMP DisplayPort subsystem
+ which contains additional blocks such as buffer managers, blender, and audio.
+ - phy-names: Names for each phy lane. The name should be 'dp-phy' with lane
+ number. For example, 'dp-phy0', 'dp-phy1'. 'dp-phy0' should be the primary
+ lane, and used for PLL lock.
+ - phys: The phy phandles for each lane. This should follow the phy-zynqmp
+ definition in Documentation/devicetree/bindings/phy/phy-zynqmp.txt
+
+Example:
+
+ xlnx_dp: dp@83c10000 {
+ compatible = "xlnx,v-dp";
+ reg = <0x83c10000 0x10000>;
+ interrupts = <0 57 4>;
+ interrupt-parent = <&ps7_scugic_0>;
+ clocks = <&dp_aclk>, <&dp_aud_clk>;
+ clock-names = "aclk", "aud_clk";
+
+ xlnx,dp-version = "v1.2";
+ xlnx,max-lanes = <4>;
+ xlnx,max-link-rate = <270000>;
+ xlnx,max-bpc = <16>;
+ xlnx,max-pclock-frequency = <150000>;
+ xlnx,enable-ycrcb;
+
+ xlnx,colormetry = "rgb";
+ xlnx,bpc = <8>;
+
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/dp_sub.txt b/Documentation/devicetree/bindings/drm/xilinx/dp_sub.txt
new file mode 100644
index 000000000000..678235355caa
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/dp_sub.txt
@@ -0,0 +1,65 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Subsystem
+
+The ZynqMP DisplayPort subsystem handles DMA channel buffer management,
+blending, and audio mixing. The DisplayPort subsystem receives display
+and audio frames from DPDMA and transmits output to the DisplayPort IP core.
+
+Required properties:
+ - compatible: Should be one of "xlnx,dp-sub".
+ - reg: Base address and size of the IP core.
+ - reg-names: "blend", "av_buf", and "aud" for Blender, AV Buffer manager, and
+ Audio modules respectively.
+ - xlnx,output-fmt: Output color format. The value should be one of "rgb",
+ "ycrcb422", "ycrcb444", or "yonly". These are based on the DisplayPort
+ specification. The value shall be synced with DP colormetry..
+
+Optional properties:
+ - xlnx,vid-fmt: Video input color format. The value should be one of
+ "vyuy"
+ "uyvy"
+ "yuyv"
+ "yvyu"
+ "yuv422"
+ "yvu422"
+ "yuv444"
+ "yvu444"
+ "nv16"
+ "nv61
+ "bgr888"
+ "rgb888"
+ "xbgr8888"
+ "xrgb8888"
+ "xbgr2101010"
+ "xrgb2101010"
+ "yuv420"
+ "yvu420"
+ "nv12"
+ "nv21".
+ If nothing is specified, "vyuy" will be selected.
+ - xlnx,gfx-fmt: Graphics input color format. The value should be one of
+ "abgr8888"
+ "argb8888"
+ "rgba8888"
+ "bgra8888"
+ "bgr888"
+ "rgb888"
+ "rgba5551"
+ "bgra5551"
+ "rgba4444"
+ "bgra4444"
+ "rgb565"
+ "bgr565".
+ If nothing is specified, "abgr8888" will be selected.
+
+Optional properties:
+ - xlnx,vid-clk-pl: Should be used when the pixel clock is coming from PL.
+
+Example:
+
+ xlnx_dp_sub: dp_sub@43c0a000 {
+ compatible = "xlnx,dp-sub";
+ reg = <0x43c0a000 0x1000>, <0x43c0b000 0x1000>,
+ <0x43c0c000 0x1000>;
+ reg-names = "blend", "av_buf", "aud";
+ xlnx,output-fmt = "rgb";
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/dsi.txt b/Documentation/devicetree/bindings/drm/xilinx/dsi.txt
new file mode 100644
index 000000000000..f56db6a0d95b
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/dsi.txt
@@ -0,0 +1,61 @@
+Device-Tree bindings for Xilinx MIPI DSI Tx IP core
+
+The IP core supports transmission of video data in MIPI DSI protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,mipi-dsi-tx-subsystem".
+ - reg: Base address and size of the IP core.
+ - xlnx,dsi-datatype: Color format. The value should be one of "MIPI_DSI_FMT_RGB888",
+ "MIPI_DSI_FMT_RGB666", "MIPI_DSI_FMT_RGB666_PACKED" or "MIPI_DSI_FMT_RGB565".
+ - simple_panel: The subnode for connected panel. This represents the
+ DSI peripheral connected to the DSI host node. please refer to
+ Documentation/devicetree/bindings/display/mipi-dsi-bus.txt. The
+ simple-panel driver has auo,b101uan01 panel timing parameters added along
+ with other existing panels. DSI driver derive the required Tx IP controller
+ timing values from the panel timing parameters. Refer to the
+ xilinx_dsi_mode_set() in the DSI driver on how to derive the DSI
+ Tx controller timing paramters.
+ - ports: Connects to the drm device node through device graph binding.
+ The port should contain a 'remote-endpoint' subnode that points to the
+ endpoint in the port of the drm device node. Refer to
+ Documentation/devicetree/bindings/graph.txt.
+ - xlnx,dsi-num-lanes: Possible number of DSI lanes for the Tx controller.
+ The values should be 1, 2, 3 or 4. Based on xlnx,dsi-num-lanes and
+ line rate for the MIPI D-PHY core in Mbps, the AXI4-stream received by
+ Xilinx MIPI DSI Tx IP core adds markers as per DSI protocol and the packet
+ thus framed is convered to serial data by MIPI D-PHY core. Please refer
+ Xilinx pg238 for more details. This value should be equal to the number
+ of lanes supported by the connected DSI panel. Panel has to support this
+ value or has to be programmed to the same value that DSI Tx controller is
+ configured to.
+
+Required simple_panel properties:
+ - compatible: Value should be one of the panel name mentioned in the
+ of_match_table of simple panel driver drivers/gpu/drm/panel/panel-simple.c
+ e.g. "auo,b101uan01".
+
+Example:
+
+#include <dt-bindings/drm/mipi-dsi.h>
+ mipi_dsi_tx_subsystem_0: mipi_dsi_tx_subsystem@80000000 {
+ compatible = "xlnx,mipi-dsi-tx-subsystem";
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dsi-num-lanes = <4>;
+ xlnx,dsi-data-type = <MIPI_DSI_FMT_RGB888>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ mipi_port: endpoint {
+ remote-endpoint = <&drm_port>;
+ };
+ };
+ };
+ simple_panel: simple-panel@0 {
+ compatible = "auo,b101uan01";
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/osd.txt b/Documentation/devicetree/bindings/drm/xilinx/osd.txt
new file mode 100644
index 000000000000..9f30706af15d
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/osd.txt
@@ -0,0 +1,19 @@
+Device-Tree bindings for Xilinx Video On Screen Display(OSD)
+
+Xilinx OSD provides the multiplane support. Some properties can be configured
+in IP synthesis.
+
+Required properties:
+ - compatible: value should be "xlnx,v-osd-5.01.a"
+ - reg: base address and size of the OSD IP
+ - xlnx,num-layers: the number of layers(up to 8) supported by OSD
+ - xlnx,screen-width: the maximum size(up to 4096) of screen pixel width by OSD
+
+Example:
+
+ v_osd_0: v-osd@40040000 {
+ compatible = "xlnx,v-osd-5.01.a";
+ reg = <0x40040000 0x10000>;
+ xlnx,num-layers = <2>;
+ xlnx,screen-width = <1920>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/rgb2ycrcb.txt b/Documentation/devicetree/bindings/drm/xilinx/rgb2ycrcb.txt
new file mode 100644
index 000000000000..6d801d0426d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/rgb2ycrcb.txt
@@ -0,0 +1,14 @@
+Device-Tree bindings for Xilinx RGB to YCrCb convertor(RGB2YCRCB)
+
+Xilinx RGB2YCRCB converts the pixel format from RGB to YCrCb
+
+Required properties:
+ - compatible: value should be "xlnx,v-rgb2ycrcb-6.01.a"
+ - reg: base address and size of the RGB2YCRCB IP
+
+Example:
+
+ v_rgb2ycrcb_0: v-rgb2ycrcb@40030000 {
+ compatible = "xlnx,v-rgb2ycrcb-6.01.a";
+ reg = <0x40030000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/sdi.txt b/Documentation/devicetree/bindings/drm/xilinx/sdi.txt
new file mode 100644
index 000000000000..ceb5340364bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/sdi.txt
@@ -0,0 +1,34 @@
+Device-Tree bindings for Xilinx SDI Tx IP core
+
+The IP core supports transmission of video data in SDI Tx: protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,v-smpte-uhdsdi-tx-ss".
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reg: Base address and size of the IP core.
+ - ports: Connects to the drm device node through device graph binding.
+ The port should contain a 'remote-endpoint' subnode that points to the
+ endpoint in the port of the drm device node. Refer to
+ Documentation/devicetree/bindings/graph.txt.
+ - xlnx,vtc: vtc phandle
+
+Example:
+
+ v_smpte_uhdsdi_tx_ss: v_smpte_uhdsdi_tx_ss@80020000 {
+ compatible = "xlnx,v-smpte-uhdsdi-tx-ss";
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ reg = <0x0 0x80020000 0x0 0x10000>;
+ xlnx,vtc = <&v_tc_0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ sdi_port: endpoint {
+ remote-endpoint = <&drm_port>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/vtc.txt b/Documentation/devicetree/bindings/drm/xilinx/vtc.txt
new file mode 100644
index 000000000000..13309048e789
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/vtc.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for Xilinx Video Timing Controller(VTC)
+
+Xilinx VTC provides the timings for Video IPs.
+
+Required properties:
+ - compatible: value should be "xlnx,v-tc-5.01.a"
+ - reg: base address and size of the VTC IP
+ - interrupts: the interrupt number
+ - interrupts-parent: the phandle for interrupt controller
+
+Example:
+
+ v_tc_0: v-tc@40010000 {
+ compatible = "xlnx,v-tc-5.01.a";
+ interrupt-parent = <&intc>;
+ interrupts = <0 54 4>;
+ reg = <0x40010000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/xilinx_drm.txt b/Documentation/devicetree/bindings/drm/xilinx/xilinx_drm.txt
new file mode 100644
index 000000000000..25859d47ba85
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/xilinx_drm.txt
@@ -0,0 +1,162 @@
+Device-Tree bindings for Xilinx DRM
+
+Xilinx DRM supports the display pipelines with Xilinx soft IPs on FPGA and
+IPs on Xilinx boards.
+
+The example hardware pipeline is depicted below
+(*IPs in parentheses() are optional. IPs in brackets[] don't require drivers).
+vdma-[remap]-(rgb2yuv)-(cresample)-(osd)-(rgb2yuv)-(cresample)-[axi2vid]-adv7511
+(vdma-[remap]-(rgb2yuv)-(cresample)-|) |
+ si570 -> vtc
+
+Required properties:
+ - compatible: value should be "xlnx,drm".
+ - xlnx,osd: the phandle for on screen display IP if used in the hardware design
+ - xlnx,rgb2yuv: the phandle for rgb2ycrcb IP if used in the hardware design
+ - xlnx,cresample: the phandle for chroma resampler IP if used in the hardware
+ design
+ - xlnx,vtc: the phandle for video timing controller IP
+ - xlnx,encoder-slave: the phandle for the encoder slave.
+ - clocks: the phandle for the pixel clock
+ - planes: the subnode for resources for each plane
+ - xlnx,connector-type: the type of connector. The value should be one of
+ "HDMIA" or "DisplayPort" depending on which connector type to be used.
+
+Optional properties:
+ - xlnx,dp-sub: the phandle to DisplayPort subsystem node for ZynqMP.
+ - xlnx,sdi: the phandle to SDI node if the pipeline has the SDI IP core.
+ - ports: device graph binding can be used to define connectivity. The DT
+ bindings are defined in Documentation/devicetree/bindings/graph.txt.
+
+Required planes properties:
+ - xlnx,pixel-format: the format of plane manager. The value should be one of
+ following format strings.
+
+ "yuv420"
+ "uvy422"
+ "vuy422"
+ "yuv422"
+ "yv4u22"
+ "yuv444"
+ "nv12"
+ "nv21"
+ "nv16"
+ "nv61"
+ "abgr1555"
+ "argb1555"
+ "rgba4444"
+ "bgra4444"
+ "bgr565"
+ "rgb565"
+ "bgr888"
+ "rgb888"
+ "xbgr8888"
+ "xrgb8888"
+ "abgr8888"
+ "argb8888"
+ "bgra8888"
+ "rgba8888"
+
+Required plane properties:
+ - dmas: the phandle list of DMA specifiers
+ - dma-names: the identifier strings for DMAs.
+ - xlnx,rgb2yuv: the phandle for rgb2ycrcb IP if used for plane
+ - xlnx,cresample: the phandle for chroma resampler IP if used for plane
+
+The pipeline can be configured as following examples or more.
+ - Example 1:
+vdma - [remap] - rgb2yuv - cresample - [axi2vid] - adv7511
+ |
+ si570 - vtc
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,vtc = <&v_tc_0>;
+ xlnx,encoder-slave = <&adv7511>;
+ xlnx,connector-type = "HDMIA";
+ clocks = <&si570>;
+ planes {
+ xlnx,pixel-format = "yuv422";
+ plane0 {
+ dma = <&axi_vdma_0>;
+ dma-names = "axi_vdma_0";
+ xlnx,rgb2yuv = <&v_rgb2ycrcb_0>;
+ xlnx,cresample = <&v_cresample_0>;
+ };
+ };
+ };
+
+ - Example 2:
+vdma - [remap] --------- osd - cresample - [axi2vid] - adv7511
+vdma - [remap] - rgb2yuv -| |
+ si570 - vtc
+
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,osd = <&v_osd_0>;
+ xlnx,cresample = <&v_cresample_0>;
+ xlnx,vtc = <&v_tc_0>;
+ xlnx,encoder-slave = <&adv7511>;
+ xlnx,connector-type = "DisplayPort";
+ clocks = <&si570>;
+ planes {
+ xlnx,pixel-format = "yuv422";
+ plane0 {
+ dma = <&axi_vdma_0>;
+ dma-names = "axi_vdma_0";
+ };
+ plane1 {
+ dma = <&axi_vdma_1>;
+ dma-names = "axi_vdma_1";
+ xlnx,rgb2yuv = <&v_rgb2ycrcb_0>;
+ };
+ };
+ };
+
+ - Example 3:
+dpdma - ZynqMP DP subsystem - DP
+
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,encoder-slave = <&xlnx_dp>;
+ clocks = <&si570 0>;
+ xlnx,connector-type = "DisplayPort";
+ xlnx,dp-sub = <&xlnx_dp_sub>;
+ planes {
+ xlnx,pixel-format = "rgb565";
+ plane0 {
+ dmas = <&xlnx_dpdma 3>;
+ dma-names = "xlnx_dpdma";
+ };
+ plane1 {
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>;
+ dma-names = "xlnx_dpdma_0",
+ "xlnx_dpdma_1",
+ "xlnx_dpdma_2";
+ };
+ };
+ };
+
+- Example 4:
+vdma - Xilinx MIPI DSI
+ xilinx_drm: xilinx_drm {
+ compatible = "xlnx,drm";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ drm_port: endpoint {
+ remote-endpoint = <&mipi_port>;
+ };
+ };
+ };
+ planes {
+ xlnx,pixel-format = "rgb888";
+ plane0 {
+ dmas = <&axi_vdma_0 0>;
+ dma-names = "axi_vdma_0";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt b/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt
new file mode 100644
index 000000000000..bb9e30af4afc
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt
@@ -0,0 +1,13 @@
+Binding for ZynQ OpenCL DRM driver
+
+Required properties:
+- compatible: should contain "xlnx,zocl"
+- reg: base address and size for memory mapped control port for opencl kernel
+
+Example:
+
+ zocl_drm {
+ compatible = "xlnx,zocl";
+ status = "okay";
+ reg = <0x80000000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt b/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt
new file mode 100644
index 000000000000..552f0c7774b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt
@@ -0,0 +1,15 @@
+* ARM Cortex A57 and A53 L1/L2 cache error reporting
+
+CPU Memory Error Syndrome and L2 Memory Error Syndrome registers can be used
+for checking L1 and L2 memory errors.
+
+The following section describes the Cortex A57/A53 EDAC DT node binding.
+
+Required properties:
+- compatible: Should be "arm,cortex-a57-edac" or "arm,cortex-a53-edac"
+
+Example:
+ edac {
+ compatible = "arm,cortex-a57-edac";
+ };
+
diff --git a/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt b/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt
new file mode 100644
index 000000000000..94fbb8da2d1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt
@@ -0,0 +1,19 @@
+Pl310 L2 Cache EDAC driver, it does reports the data and tag ram parity errors.
+
+Required properties:
+- compatible: Should be "arm,pl310-cache".
+- intterupts: Interrupt number to the cpu.
+- reg: Physical base address and size of cache controller's memory mapped
+ registers
+
+Example:
+++++++++
+
+ L2: cache-controller {
+ compatible = "arm,pl310-cache";
+ interrupts = <0 2 4>;
+ reg = <0xf8f02000 0x1000>;
+ };
+
+PL310 L2 Cache EDAC driver detects the Parity enable state by reading the
+appropriate control register.
diff --git a/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt b/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt
new file mode 100644
index 000000000000..252bb96bee90
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt
@@ -0,0 +1,18 @@
+Xilinx ZynqMP OCM EDAC driver, it does reports the OCM ECC single bit errors
+that are corrected and double bit ecc errors that are detected by the OCM
+ECC controller.
+
+Required properties:
+- compatible: Should be "xlnx,zynqmp-ocmc-1.0".
+- reg: Should contain OCM controller registers location and length.
+- interrupt-parent: Should be core interrupt controller.
+- interrupts: Property with a value describing the interrupt number.
+
+Example:
+++++++++
+ocm: memory-controller@ff960000 {
+ compatible = "xlnx,zynqmp-ocmc-1.0";
+ reg = <0x0 0xff960000 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 10 4>;
+};
diff --git a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
index a4fe136be2ba..56ec638f3976 100644
--- a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
+++ b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
@@ -11,7 +11,9 @@ power management service, FPGA service and other platform management
services.
Required properties:
- - compatible: Must contain: "xlnx,zynqmp-firmware"
+ - compatible: Must contain any of below:
+ "xlnx,zynqmp-firmware" for Zynq Ultrascale+ MPSoC
+ "xlnx,versal-firmware-wip" for Versal
- method: The method of calling the PM-API firmware layer.
Permitted values are:
- "smc" : SMC #0, following the SMCCC
@@ -21,6 +23,8 @@ Required properties:
Example
-------
+Zynq Ultrascale+ MPSoC
+----------------------
firmware {
zynqmp_firmware: zynqmp-firmware {
compatible = "xlnx,zynqmp-firmware";
@@ -28,3 +32,13 @@ firmware {
...
};
};
+
+Versal
+------
+firmware {
+ versal_firmware: versal-firmware {
+ compatible = "xlnx,versal-firmware-wip";
+ method = "smc";
+ ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
index 90c44694a30b..f64d815ec75e 100644
--- a/Documentation/devicetree/bindings/fpga/fpga-region.txt
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -196,6 +196,7 @@ Optional properties:
- config-complete-timeout-us : The maximum time in microseconds time for the
FPGA to go to operating mode after the region has been programmed.
- child nodes : devices in the FPGA after programming.
+- resets : Phandle and reset specifier for this region
In the example below, when an overlay is applied targeting fpga-region0,
fpga_mgr is used to program the FPGA. Two bridges are controlled during
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt
new file mode 100644
index 000000000000..85f8970010b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt
@@ -0,0 +1,61 @@
+Xilinx ZynqMp AFI interface Manager
+
+The Zynq UltraScale+ MPSoC Processing System core provides access from PL
+masters to PS internal peripherals, and memory through AXI FIFO interface
+(AFI) interfaces.
+
+Required properties:
+-compatible: Should contain "xlnx,afi-fpga"
+-config-afi: Pairs of <regid value >
+
+The possible values of regid and values are
+ regid: Regids of the register to be written possible values
+ 0- AFIFM0_RDCTRL
+ 1- AFIFM0_WRCTRL
+ 2- AFIFM1_RDCTRL
+ 3- AFIFM1_WRCTRL
+ 4- AFIFM2_RDCTRL
+ 5- AFIFM2_WRCTRL
+ 6- AFIFM3_RDCTRL
+ 7- AFIFM3_WRCTRL
+ 8- AFIFM4_RDCTRL
+ 9- AFIFM4_WRCTRL
+ 10- AFIFM5_RDCTRL
+ 11- AFIFM5_WRCTRL
+ 12- AFIFM6_RDCTRL
+ 13- AFIFM6_WRCTRL
+ 14- AFIFS
+ 15- AFIFS_SS2
+- value: Array of values to be written.
+ for FM0_RDCTRL(0) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM0_WRCTRL(1) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM1_RDCTRL(2) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM1_WRCTRL(3) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM2_RDCTRL(4) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM2_WRCTRL(5) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM3_RDCTRL(6) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM3_WRCTRL(7) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM4_RDCTRL(8) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM4_WRCTRL(9) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM5_RDCTRL(10) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM5_WRCTRL(11) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM6_RDCTRL(12) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM6_WRCTRL(13) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for AFI_FA(14)
+ dw_ss1_sel bits (11:10)
+ dw_ss0_sel bits (9:8)
+ 0x0: 32-bit AXI data width),
+ 0x1: 64-bit AXI data width,
+ 0x2: 128-bit AXI data
+ All other bits are 0 write ignored.
+
+ for AFI_FA(15) selects for ss2AXI data width valid values
+ 0x000: 32-bit AXI data width),
+ 0x100: 64-bit AXI data width,
+ 0x200: 128-bit AXI data
+
+Example:
+afi0: afi0 {
+ compatible = "xlnx,afi-fpga";
+ config-afi = <0 2>, <1 1>, <2 1>;
+};
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt
new file mode 100644
index 000000000000..acca970cd341
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt
@@ -0,0 +1,10 @@
+Device Tree versal-fpga bindings for the Versal SOC, Controlled
+using Versal SoC firmware interface.
+
+Required properties:
+- compatible: should contain "xlnx,versal-fpga"
+
+Example:
+ versal_fpga: fpga {
+ compatible = "xlnx,versal-fpga";
+ };
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt
new file mode 100644
index 000000000000..e00942cf3091
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt
@@ -0,0 +1,19 @@
+Xilinx Zynq AFI interface Manager
+
+The Zynq Processing System core provides access from PL masters to PS
+internal peripherals, and memory through AXI FIFO interface
+(AFI) interfaces.
+
+Required properties:
+-compatible: Should contain "xlnx,zynq-afi-fpga"
+-reg: Physical base address and size of the controller's register area.
+-xlnx,afi-buswidth : Size of the afi bus width.
+ 0: 64-bit AXI data width,
+ 1: 32-bit AXI data width,
+
+Example:
+afi0: afi0 {
+ compatible = "xlnx,zynq-afi-fpga";
+ reg = <0xf8008000 0x1000>;
+ xlnx,afi-buswidth = <1>;
+};
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
index 3052bf619dd5..105943a48610 100644
--- a/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
+++ b/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
@@ -4,11 +4,15 @@ Programmable Logic (PL). The configuration uses the firmware interface.
Required properties:
- compatible: should contain "xlnx,zynqmp-pcap-fpga"
+- clocks: phandle for clocks required operation
+- clock-names: name for the clock, should be "ref_clk"
Example for full FPGA configuration:
fpga-region0 {
compatible = "fpga-region";
+ clocks = <&clkc 41>;
+ clock-names = "ref_clk";
fpga-mgr = <&zynqmp_pcap>;
#address-cells = <0x1>;
#size-cells = <0x1>;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
index 08eed2335db0..516f4f50b124 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
@@ -8,11 +8,17 @@ local interrupts can be enabled on channel basis.
Required properties:
- compatible : Should be "xlnx,xps-gpio-1.00.a"
- reg : Address and length of the register set for the device
-- #gpio-cells : Should be two. The first cell is the pin number and the
- second cell is used to specify optional parameters (currently unused).
+- #gpio-cells : Should be two or three. The first cell is the pin number,
+ The second cell is used to specify channel offset:
+ 0 - first channel
+ 8 - second channel
+ The third cell is optional and used to specify flags. Use the macros
+ defined in include/dt-bindings/gpio/gpio.h
- gpio-controller : Marks the device node as a GPIO controller.
Optional properties:
+- clock-names : Should be "s_axi_aclk"
+- clocks: Input clock specifier. Refer to common clock bindings.
- interrupts : Interrupt mapping for GPIO IRQ.
- xlnx,all-inputs : if n-th bit is setup, GPIO-n is input
- xlnx,dout-default : if n-th bit is 1, GPIO-n default value is 1
@@ -23,6 +29,7 @@ Optional properties:
- xlnx,dout-default-2 : as above but the second channel
- xlnx,gpio2-width : as above but for the second channel
- xlnx,tri-default-2 : as above but for the second channel
+- xlnx,no-init : No initialisation at probe
Example:
@@ -30,6 +37,8 @@ gpio: gpio@40000000 {
#gpio-cells = <2>;
compatible = "xlnx,xps-gpio-1.00.a";
gpio-controller ;
+ clock-names = "s_axi_aclk";
+ clocks = <&clkc 71>;
interrupt-parent = <&microblaze_0_intc>;
interrupts = < 6 2 >;
reg = < 0x40000000 0x10000 >;
@@ -44,3 +53,11 @@ gpio: gpio@40000000 {
xlnx,tri-default = <0xffffffff>;
xlnx,tri-default-2 = <0xffffffff>;
} ;
+
+Example to demonstrate how reset-gpios property is used in drivers:
+
+driver: driver@80000000 {
+ compatible = "xlnx,driver";
+ reset-gpios = <&gpio 0 0 GPIO_ACTIVE_LOW>; /* gpio phandle, gpio pin-number, channel offset, flag state */
+ reg = <0x0 0x80000000 0x0 0x10000>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
index 4fa4eb5507cd..f693e82b4c0f 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
@@ -6,7 +6,9 @@ Required properties:
- First cell is the GPIO line number
- Second cell is used to specify optional
parameters (unused)
-- compatible : Should be "xlnx,zynq-gpio-1.0" or "xlnx,zynqmp-gpio-1.0"
+- compatible : Should be "xlnx,zynq-gpio-1.0" or
+ "xlnx,zynqmp-gpio-1.0" or "xlnx,versal-gpio-1.0
+ or "xlnx,pmc-gpio-1.0
- clocks : Clock specifier (see clock bindings for details)
- gpio-controller : Marks the device node as a GPIO controller.
- interrupts : Interrupt specifier (see interrupt bindings for
diff --git a/Documentation/devicetree/bindings/hwmon/tps544.txt b/Documentation/devicetree/bindings/hwmon/tps544.txt
new file mode 100644
index 000000000000..bb71fd287ca2
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/tps544.txt
@@ -0,0 +1,14 @@
+TPS544B25 power regulator
+
+This power regulator driver supports voltage read/write and
+current calibration and readback.
+
+Required properties:
+- compatible: should be "ti,tps544"
+- reg: I2C slave address
+
+Example:
+tps544@24 {
+ compatible = "ti,tps544";
+ reg = <0x24>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt
new file mode 100644
index 000000000000..3d1e77014865
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt
@@ -0,0 +1,159 @@
+Xilinx AMS device driver
+
+The AMS includes an ADC as well as on-chip sensors that can be used to
+sample external voltages and monitor on-die operating conditions, such as
+temperature and supply voltage levels. The AMS has two SYSMON blocks.
+PL-SYSMON block is capable of monitoring off chip voltage and temperature.
+PL-SYSMON block has DRP, JTAG and I2C interface to enable monitoring from
+external master. Out of this interface currenlty only DRP is supported.
+Other block PS-SYSMON is memory mapped to PS. Both of block has built-in
+alarm generation logic that is used to interrupt the processor based on
+condition set.
+
+All designs should have AMS register, but PS and PL are optional depending on
+the design. The driver can work with only PS, only PL and both PS and PL
+configurations. Please specify registers according to your design. DTS file
+should always have AMS module property. Providing PS & PL module is optional.
+
+Required properties:
+ - compatible: Should be "xlnx,zynqmp-ams"
+ - reg: Should specify AMS register space
+ - interrupts: Interrupt number for the AMS control interface
+ - interrupt-names: Interrupt name, must be "ams-irq"
+ - clocks: Should contain a clock specifier for the device
+ - ranges: keep the property empty to map child address space
+ (for PS and/or PL) nodes 1:1 onto the parent address
+ space
+
+AMS device tree subnode:
+ - compatible: Should be "xlnx,zynqmp-ams-ps" or "xlnx,zynqmp-ams-pl"
+ - reg: Register space for PS or PL
+
+Optional properties:
+
+Following optional property only valid for PL.
+ - xlnx,ext-channels: List of external channels that are connected to the
+ AMS PL module.
+
+ The child nodes of this node represent the external channels which are
+ connected to the AMS Module. If the property is not present
+ no external channels will be assumed to be connected.
+
+ Each child node represents one channel and has the following
+ properties:
+ Required properties:
+ * reg: Pair of pins the channel is connected to.
+ 0: VP/VN
+ 1: VUSER0
+ 2: VUSER1
+ 3: VUSER3
+ 4: VUSER4
+ 5: VAUXP[0]/VAUXN[0]
+ 6: VAUXP[1]/VAUXN[1]
+ ...
+ 20: VAUXP[15]/VAUXN[15]
+ Note each channel number should only be used at most
+ once.
+ Optional properties:
+ * xlnx,bipolar: If set the channel is used in bipolar
+ mode.
+
+
+Example:
+ xilinx_ams: ams@ffa50000 {
+ compatible = "xlnx,zynqmp-ams";
+ interrupt-parent = <&gic>;
+ interrupts = <0 56 4>;
+ interrupt-names = "ams-irq";
+ clocks = <&clkc 70>;
+ reg = <0x0 0xffa50000 0x0 0x800>;
+ reg-names = "ams-base";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ams_ps: ams_ps@ffa50800 {
+ compatible = "xlnx,zynqmp-ams-ps";
+ reg = <0x0 0xffa50800 0x0 0x400>;
+ };
+
+ ams_pl: ams_pl@ffa50c00 {
+ compatible = "xlnx,zynqmp-ams-pl";
+ reg = <0x0 0xffa50c00 0x0 0x400>;
+ xlnx,ext-channels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ xlnx,bipolar;
+ };
+ channel@1 {
+ reg = <1>;
+ };
+ channel@8 {
+ reg = <8>;
+ xlnx,bipolar;
+ };
+ };
+ };
+ };
+
+AMS Channels Details:
+
+Sysmon Block |Channel| Details |Measurement
+ Number Type
+---------------------------------------------------------------------------------------------------------
+AMS CTRL |0 |System PLLs voltage measurement, VCC_PSPLL. |Voltage
+ |1 |Battery voltage measurement, VCC_PSBATT. |Voltage
+ |2 |PL Internal voltage measurement, VCCINT. |Voltage
+ |3 |Block RAM voltage measurement, VCCBRAM. |Voltage
+ |4 |PL Aux voltage measurement, VCCAUX. |Voltage
+ |5 |Voltage measurement for six DDR I/O PLLs, VCC_PSDDR_PLL. |Voltage
+ |6 |VCC_PSINTFP_DDR voltage measurement. |Voltage
+---------------------------------------------------------------------------------------------------------
+PS Sysmon |7 |LPD temperature measurement. |Temperature
+ |8 |FPD Temperature Measurment (REMOTE). |Temperature
+ |9 |VCC PS LPD voltage measurement (supply1). |Voltage
+ |10 |VCC PS FPD voltage measurement (supply2). |Voltage
+ |11 |PS Aux voltage reference (supply3). |Voltage
+ |12 |DDR I/O VCC voltage measurement. |Voltage
+ |13 |PS IO Bank 503 voltage measurement (supply5). |Voltage
+ |14 |PS IO Bank 500 voltage measurement (supply6). |Voltage
+ |15 |VCCO_PSIO1 voltage measurement. |Voltage
+ |16 |VCCO_PSIO2 voltage measurement. |Voltage
+ |17 |VCC_PS_GTR voltage measurement (VPS_MGTRAVCC). |Voltage
+ |18 |VTT_PS_GTR voltage measurement (VPS_MGTRAVTT). |Voltage
+ |19 |VCC_PSADC voltage measurement. |Voltage
+---------------------------------------------------------------------------------------------------------
+PL Sysmon |20 |PL Temperature measurement. |Temperature
+ |21 |PL Internal Voltage Voltage measurement, VCCINT. |Voltage
+ |22 |PL Auxiliary Voltage measurement, VCCAUX. |Voltage
+ |23 |ADC Reference P+ Voltage measurement. |Voltage
+ |24 |ADC Reference N- Voltage measurement. |Voltage
+ |25 |PL Block RAM Voltage measurement, VCCBRAM. |Voltage
+ |26 |LPD Internal Voltage measurement, VCC_PSINTLP (supply4). |Voltage
+ |27 |FPD Internal Voltage measurement, VCC_PSINTFP (supply5). |Voltage
+ |28 |PS Auxiliary Voltage measurement (supply6). |Voltage
+ |29 |PL VCCADC Voltage measurement (vccams). |Voltage
+ |30 |Differencial analog input signal Voltage measurment. |Voltage
+ |31 |VUser0 Voltage measurement (supply7). |Voltage
+ |32 |VUser1 Voltage measurement (supply8). |Voltage
+ |33 |VUser2 Voltage measurement (supply9). |Voltage
+ |34 |VUser3 Voltage measurement (supply10). |Voltage
+ |35 |Auxiliary ch 0 Voltage measurement (VAux0). |Voltage
+ |36 |Auxiliary ch 1 Voltage measurement (VAux1). |Voltage
+ |37 |Auxiliary ch 2 Voltage measurement (VAux2). |Voltage
+ |38 |Auxiliary ch 3 Voltage measurement (VAux3). |Voltage
+ |39 |Auxiliary ch 4 Voltage measurement (VAux4). |Voltage
+ |40 |Auxiliary ch 5 Voltage measurement (VAux5). |Voltage
+ |41 |Auxiliary ch 6 Voltage measurement (VAux6). |Voltage
+ |42 |Auxiliary ch 7 Voltage measurement (VAux7). |Voltage
+ |43 |Auxiliary ch 8 Voltage measurement (VAux8). |Voltage
+ |44 |Auxiliary ch 9 Voltage measurement (VAux9). |Voltage
+ |45 |Auxiliary ch 10 Voltage measurement (VAux10). |Voltage
+ |46 |Auxiliary ch 11 Voltage measurement (VAux11). |Voltage
+ |47 |Auxiliary ch 12 Voltage measurement (VAux12). |Voltage
+ |48 |Auxiliary ch 13 Voltage measurement (VAux13). |Voltage
+ |49 |Auxiliary ch 14 Voltage measurement (VAux14). |Voltage
+ |50 |Auxiliary ch 15 Voltage measurement (VAux15). |Voltage
+---------------------------------------------------------------------------------------------------------
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
index e0e0755cabd8..fecb1afdd8c1 100644
--- a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
@@ -15,6 +15,8 @@ Required properties:
configuration interface to interface to the XADC hardmacro.
* "xlnx,axi-xadc-1.00.a": When using the axi-xadc pcore to
interface to the XADC hardmacro.
+ * "xlnx,axi-sysmon-1.3": When using the axi-sysmon pcore to
+ interface to the sysmon hardmacro.
- reg: Address and length of the register set for the device
- interrupts: Interrupt for the XADC control interface.
- clocks: When using the ZYNQ this must be the ZYNQ PCAP clock,
@@ -110,3 +112,20 @@ Examples:
};
};
};
+
+ xadc@44a00000 {
+ compatible = "xlnx,axi-sysmon-1.3";
+ interrupt-parent = <&axi_intc_0>;
+ interrupts = <2 2>;
+ clocks = <&clk_bus_0>;
+ reg = <0x44a00000 0x10000>;
+
+ xlnx,channels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ xlnx,bipolar;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
index 1a8718f8855d..178fca08278f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
@@ -55,7 +55,7 @@ Required Properties:
corresponds to a range of host irqs.
For more details on TISCI IRQ resource management refer:
-http://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html
+https://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html
Example:
--------
diff --git a/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt b/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt
new file mode 100644
index 000000000000..03b39f4b1625
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt
@@ -0,0 +1,56 @@
+Xilinx Interrupt Controller
+
+The controller is a soft IP core that is configured at build time for the
+number of interrupts and the type of each interrupt. These details cannot
+be changed at run time.
+
+Required properties:
+
+- compatible : should be "xlnx,xps-intc-1.00.a"
+- reg : Specifies base physical address and size of the registers.
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The value shall be a minimum of 1.
+ The Xilinx device trees typically use 2 but the 2nd value
+ is not used.
+- xlnx,kind-of-intr : A 32 bit value specifying the interrupt type for each
+ possible interrupt (1 = edge, 0 = level). The interrupt
+ type typically comes in thru the device tree node of
+ the interrupt generating device, but in this case
+ the interrupt type is determined by the interrupt
+ controller based on how it was implemented.
+- xlnx,num-intr-inputs: Specifies the number of interrupts supported
+ by the specific implementation of the controller (1-32).
+
+Optional properties:
+- interrupt-parent : Specifies an interrupt controller from which it is
+ chained (cascaded).
+- interrupts : Specifies the interrupt of the parent controller from which
+ it is chained.
+
+Example:
+
+axi_intc_0: interrupt-controller@41800000 {
+ #interrupt-cells = <2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller;
+ reg = <0x41800000 0x10000>;
+ xlnx,kind-of-intr = <0x1>;
+ xlnx,num-intr-inputs = <0x1>;
+};
+
+Chained Example:
+
+The interrupt is chained to hardware interrupt 61 (29 + 32) of the GIC
+for Zynq.
+
+axi_intc_0: interrupt-controller@41800000 {
+ #interrupt-cells = <2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller;
+ interrupt-parent = <&ps7_scugic_0>;
+ interrupts = <0 29 4>;
+ reg = <0x41800000 0x10000>;
+ xlnx,kind-of-intr = <0x1>;
+ xlnx,num-intr-inputs = <0x1>;
+};
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
new file mode 100644
index 000000000000..2e10719fc8b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
@@ -0,0 +1,124 @@
+
+Xilinx MIPI CSI2 Receiver Subsystem (CSI2RxSS)
+----------------------------------------------
+
+The Xilinx MIPI CSI2 Receiver Subsystem is used to capture MIPI CSI2 traffic
+from compliant camera sensors and send the output as AXI4 Stream video data
+for image processing. The subsystem consists of a MIPI DPHY in slave mode
+which captures the data packets. This is passed along the MIPI CSI2 IP which
+extracts the packet data. This data is taken in by the Video Format Bridge
+(VFB) if selected and converted into AXI4 Stream video data at selected
+pixels per clock as per AXI4-Stream Video IP and System Design UG934.
+
+For more details, please refer to PG232 MIPI CSI-2 Receiver Subsystem v2.0
+
+Required properties:
+
+- compatible: Must contain "xlnx,mipi-csi2-rx-subsystem-4.0". The older strings
+ "xlnx,mipi-csi2-rx-subsystem-2.0" and "xlnx,mipi-csi2-rx-subsystem-3.0" are
+ deprecated.
+
+- reg: Physical base address and length of the registers set for the device.
+
+- xlnx,max-lanes: Maximum active lanes in the design.
+
+- xlnx,en-active-lanes: Enable Active lanes configuration in Protocol
+ Configuration Register.
+
+- xlnx,dphy-present: This is equivalent to whether DPHY register interface is
+ enabled or not.
+
+- xlnx,iic-present: This shows whether subsystem's IIC is present or not. This
+ affects the base address of the DPHY.
+
+- xlnx,vc: Virtual Channel, specifies virtual channel number to be filtered.
+ If this is 4 then all virtual channels are allowed.
+
+- xlnx,csi-pxl-format: This denotes the CSI Data type selected in hw design.
+ Packets other than this data type (except for RAW8 and User defined data
+ types) will be filtered out. Possible values are RAW6, RAW7, RAW8, RAW10,
+ RAW12, RAW14, RAW16, RAW20, RGB444, RGB555, RGB565, RGB666, RGB888 and YUV4228bit.
+
+- xlnx,vfb: Video Format Bridge, Denotes if Video Format Bridge is selected
+ so that output is as per AXI stream documented in UG934.
+
+- xlnx,ppc: Pixels per clock, Number of pixels to be transferred per pixel
+ clock. This is valid only if xlnx,vfb property is set to 1.
+
+- xlnx,axis-tdata-width: AXI Stream width, This denotes the AXI Stream width.
+ It depends on Data type chosen, Video Format Bridge enabled/disabled and
+ pixels per clock. If VFB is disabled then its value is either 0x20 (32 bit)
+ or 0x40(64 bit) width.
+
+- xlnx,video-format, xlnx,video-width: Video format and width, as defined in
+ video.txt.
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+ The CSI 2 Rx Subsystem has a two ports, one input port for connecting to
+ camera sensor and other is output port.
+
+- data-lanes: The number of data lanes through which CSI2 Rx Subsystem is
+ connected to the camera sensor as per video-interfaces.txt
+
+- clocks: List of phandles to AXI Lite, Video and 200 MHz DPHY clocks.
+
+- clock-names: Must contain "lite_aclk", "video_aclk" and "dphy_clk_200M" in
+ the same order as clocks listed in clocks property.
+
+Optional Properties
+
+- xlnx,en-vcx: When present, the max number of virtual channels can be 16 else 4.
+
+- reset-gpios: Optional specifier for a GPIO that asserts video_aresetn.
+
+Example:
+
+ csiss_1: csiss@a0020000 {
+ compatible = "xlnx,mipi-csi2-rx-subsystem-4.0";
+ reg = <0x0 0xa0020000 0x0 0x20000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 95 4>;
+
+ reset-gpios = <&gpio 81 1>;
+ xlnx,max-lanes = <0x4>;
+ xlnx,en-active-lanes;
+ xlnx,dphy-present;
+ xlnx,iic-present;
+ xlnx,vc = <0x4>;
+ xlnx,csi-pxl-format = "RAW8";
+ xlnx,vfb;
+ xlnx,ppc = <0x4>;
+ xlnx,axis-tdata-width = <0x20>;
+
+ clock-names = "lite_aclk", "dphy_clk_200M", "video_aclk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+ csiss_out: endpoint {
+ remote-endpoint = <&vcap_csiss_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ csiss_in: endpoint {
+ data-lanes = <1 2 3 4>;
+ /* MIPI CSI2 Camera handle */
+ remote-endpoint = <&vs2016_out>;
+ };
+
+ };
+
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt
new file mode 100644
index 000000000000..73af77faeb20
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt
@@ -0,0 +1,25 @@
+Xilinx Video IP MEM2MEM Pipeline (XVIM2M)
+----------------------------------------
+
+Xilinx video IP mem2mem pipeline processes DMA transfers to achieve memory
+copy from one physical memory to other. The data is copied by employing two
+DMA transfers memory to device and device to memory transactions one after
+the other. The DT node of the XVIM2M represents as a top level node of the
+pipeline and defines mappings between DMAs.
+
+Required properties:
+
+- compatible: Must be "xlnx,mem2mem".
+
+- dmas, dma-names: List of two DMA specifier and identifier strings (as
+ defined in Documentation/devicetree/bindings/dma/dma.txt) per port.
+ Identifier string of one DMA channel should be "tx" and other should be
+ "rx".
+
+Example:
+
+ video_m2m {
+ compatible = "xlnx,mem2mem";
+ dmas = <&dma_1 0>, <&dma_2 0>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
new file mode 100644
index 000000000000..1e38b7d5c5a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
@@ -0,0 +1,66 @@
+
+Xilinx SDI Receiver Subsystem
+------------------------------
+
+The Xilinx SDI Rx Subsystem is used to capture SDI Video in upto 12G mode.
+It outputs the video as an AXI4 Stream video data in YUV 422 10bpc mode.
+The subsystem consists of the SDI Rx IP whose SDI native output is connected
+to a SDI to Native conversion Bridge. The output of the Native bridge is
+connected to a Native to AXI4S Bridge which generates the AXI4 Stream of
+YUV422 or YUV420 10 bpc in dual pixel per clock.
+
+Required properties:
+
+- compatible: Must contain "xlnx,v-smpte-uhdsdi-rx-ss"
+
+- reg: Physical base address and length of the registers set for the device.
+
+- interrupts: Contains the interrupt line number.
+
+- interrupt-parent: phandle to interrupt controller.
+
+- xlnx,include-edh: Whether the EDH processor is enabled in design or not.
+
+- xlnx,line-rate: The maximum mode supported by the design.
+
+- clocks: Input clock specifier. Refer to common clock bindings.
+
+- clock-names: List of input clocks.
+ Required elements: "s_axi_aclk", "sdi_rx_clk", "video_out_clk"
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+ The SDI Rx subsystem has one port configured as output port.
+
+- xlnx,video-format, xlnx,video-width: Video format and width, as defined in
+ video.txt. Please note that the video format is fixed to either YUV422 or YUV420
+ and the video-width is 10.
+
+Example:
+ v_smpte_uhdsdi_rx_ss: v_smpte_uhdsdi_rx_ss@80000000 {
+ compatible = "xlnx,v-smpte-uhdsdi-rx-ss";
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,include-axilite = "true";
+ xlnx,include-edh = "true";
+ xlnx,include-vid-over-axi = "true";
+ xlnx,line-rate = "12G_SDI_8DS";
+ clocks = <&clk_1>, <&si570_1>, <&clk_2>;
+ clock-names = "s_axi_aclk", "sdi_rx_clk", "video_out_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <10>;
+
+ sdirx_out: endpoint {
+ remote-endpoint = <&vcap_sdirx_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
new file mode 100644
index 000000000000..fb5ed47d959a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
@@ -0,0 +1,141 @@
+Xilinx AXI4-Stream Switch
+-------------------------------
+
+The AXI4-Stream Switch provides configurable routing between masters and slaves.
+It supports up to 16 masters/sources and 16 slaves/sinks and two routing options.
+There is atleast one slave/sink port and two master/source ports.
+
+The two routing options available are TDEST routing and control register routing.
+The TDEST based routing uses design parameters and hence there no software control.
+Each port is mapped as a pad and has its own format specified.
+
+Control register routing introduces an AXI4-Lite interface to configure the
+routing table. There is one register for each of the master interfaces to
+control each of the selectors. This routing mode requires that there is
+precisely only one path between master and slave. When attempting to map the
+same slave interface to multiple master interfaces, only the lowest master
+interface is able to access the slave interface.
+Here only the slave/sink ports have formats as master/source ports will inherit
+the corresponding slave ports formats. A routing table is maintained in this case.
+
+Please refer to PG085 AXI4-Stream Infrastructure IP Suite v2.2 for more details.
+
+Required properties:
+
+ - compatible: Must be "xlnx,axis-switch-1.1".
+ - xlnx,routing-mode: Can be 0 (TDEST routing) or 1 (Control reg routing)
+ - xlnx,num-si-slots: Number of slave / input ports. Min 1 Max 16 .
+ - xlnx,num-mi-slots: Number of master / output ports. Min 1 Max 16.
+ - ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ - clocks: Reference to the AXI Streaming clock feeding the ACLK and
+ AXI4 Lite control interface clock when control routing is enabled.
+ - clock-names: Must have "aclk".
+
+Optional properties:
+ - reg: Physical base address and length of the registers set for the device.
+ This is required only if xlnx,routing-mode is 1.
+ - clocks: Reference to AXI4 Lite control interface clock when routing-mode is 1.
+ - clock-names: "s_axi_ctl_clk" clock for AXI4 Lite interface when routing-mode is 1.
+
+Example:
+
+For TDEST routing, from 1 slave port to 4 master ports
+
+ axis_switch_0: axis_switch@0 {
+ compatible = "xlnx,axis-switch-1.1";
+ xlnx,routing-mode = <0x0>;
+ xlnx,num-si-slots = <0x1>;
+ xlnx,num-mi-slots = <0x4>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "aclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&csirxss_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_out0: endpoint {
+ remote-endpoint = <&vcap_csirxss0_in>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap_csirxss1_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out2: endpoint {
+ remote-endpoint = <&vcap_csirxss2_in>;
+ };
+ };
+ port@4 {
+ reg = <4>;
+ switch_out3: endpoint {
+ remote-endpoint = <&vcap_csirxss3_in>;
+ };
+ };
+ };
+
+ };
+
+For Control reg based routing, from 2 slave ports to 4 master ports
+
+ axis_switch_0: axis_switch@a0050000 {
+ compatible = "xlnx,axis-switch-1.1";
+ reg = <0x0 0xa0050000 0x0 0x1000>;
+ xlnx,routing-mode = <0x1>;
+ xlnx,num-si-slots = <0x2>;
+ xlnx,num-mi-slots = <0x4>;
+ clocks = <&vid_stream_clk>, <&misc_clk_0>;
+ clock-names = "aclk", "s_axi_ctl_clk;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&csirxss_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_in1: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out0: endpoint {
+ remote-endpoint = <&vcap_csirxss0_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap_csirxss1_in>;
+ };
+ };
+ port@4 {
+ reg = <4>;
+ switch_out2: endpoint {
+ remote-endpoint = <&vcap_csirxss2_in>;
+ };
+ };
+ port@5 {
+ reg = <5>;
+ switch_out3: endpoint {
+ remote-endpoint = <&vcap_csirxss3_in>;
+ };
+ };
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt
new file mode 100644
index 000000000000..cdb0886cf975
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt
@@ -0,0 +1,58 @@
+Xilinx Color Filter Array (CFA)
+-------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-cfa-7.0".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The cfa has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be SENSOR_MONO for the input port (0), and RBG for
+ the output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+- xlnx, cfa-pattern: Must be one of "rggb", "grbg", "gbrg", and "bggr" for the
+ input port (0). Must not be specified for the output port (1).
+
+Example:
+
+ cfa_0: cfa@400b0000 {
+ compatible = "xlnx,v-cfa-7.0";
+ reg = <0x400b0000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_SENSOR_MONO>;
+ xlnx,video-width = <8>;
+ xlnx,cfa-pattern = "rggb";
+
+ cfa0_in: endpoint {
+ remote-endpoint = <&spc0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ cfa0_out: endpoint {
+ remote-endpoint = <&ccm0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt
new file mode 100644
index 000000000000..f404ee301272
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt
@@ -0,0 +1,54 @@
+Xilinx Chroma Resampler (CRESAMPLE)
+-----------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-cresample-4.0".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The cresample as han input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be one of YUV_444, YUV_422 or YUV_420 for the input
+ port (0), and one of YUV_422 or YUV_420 for the output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ cresample_0: cresample@40120000 {
+ compatible = "xlnx,v-cresample-4.0";
+ reg = <0x40120000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_444>;
+ xlnx,video-width = <8>;
+
+ cresample0_in: endpoint {
+ remote-endpoint = <&rgb2yuv0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ cresample0_out: endpoint {
+ remote-endpoint = <&scaler0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
new file mode 100644
index 000000000000..9b3aff413e0e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
@@ -0,0 +1,62 @@
+Xilinx Video Demosaic IP
+-----------------------------
+The Xilinx Video Demosaic IP is used to interface to a Bayer video source.
+
+The driver set default Sink Pad media bus format to RGGB.
+The IP and driver only support RGB as its Source Pad media format.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-demosaic".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the AXI Streaming clock feeding the Demosaic ap_clk.
+
+- xlnx,max-height: Maximum number of lines. Valid range is 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line. Valid range is 64 to 8192.
+
+- reset-gpios: Specifier for GPIO that asserts Demosaic IP (AP_RST_N) reset.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+
+Required port properties:
+
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+ demosaic_1: demosaic@a00b0000 {
+ compatible = "xlnx,v-demosaic";
+ reg = <0x0 0xa00b0000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 87 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ xlnx,video-width = <8>;
+
+ demosaic_in: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ xlnx,video-width = <8>;
+
+ demosaic_out: endpoint {
+ remote-endpoint = <&gamma_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
new file mode 100644
index 000000000000..7bd750f009b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
@@ -0,0 +1,63 @@
+Xilinx Video Gamma Correction IP
+-----------------------------------
+The Xilinx Video Gamma Correction IP is used to provide RGB gamma correction.
+The IP provides a look up table for each R,G and B components.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-gamma-lut".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the clock that drives the ap_clk
+ signal of Video Gamma Lookup.
+
+- xlnx,max-height: Maximum number of lines. Valid range is 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line. Valid range is 64 to 8192.
+
+- reset-gpios: Specifier for a GPIO that asserts Gamma IP (AP_RST_N) reset
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The Gamma LUT IP has an input port (0) and an output port (1).
+
+
+Required port properties:
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt. Can be either 8 or 10.
+
+Example:
+
+ gamma_lut_1: gamma_lut_1@0xa0080000 {
+ compatible = "xlnx,v-gamma-lut";
+ reg = <0x0 0xa0080000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 83 1>;
+ xlnx,max-height = <2160>;
+ xlnx,max-width = <3840>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ xlnx,video-width = <8>;
+
+ gamma_in: endpoint {
+ remote-endpoint = <&demosaic_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ xlnx,video-width = <8>;
+
+ gamma_out: endpoint {
+ remote-endpoint = <&csc_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt
new file mode 100644
index 000000000000..a6db3040565a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt
@@ -0,0 +1,64 @@
+Xilinx High-Level Synthesis Core (HLS)
+--------------------------------------
+
+High-Level Synthesis cores are synthesized from a high-level function
+description developed by the user. As such their functions vary widely, but
+they all share a set of common characteristics that allow them to be described
+by common bindings.
+
+
+Required properties:
+
+- compatible: This property must contain "xlnx,v-hls" to indicate that the
+ core is compatible with the generic Xilinx HLS DT bindings. It can also
+ contain a more specific string to identify the HLS core implementation. The
+ value of those implementation-specific strings is out of scope for these DT
+ bindings.
+
+- reg: Physical base address and length of the registers sets for the device.
+ The HLS core has two registers sets, the first one contains the core
+ standard registers and the second one contains the custom user registers.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The HLS core has one input port (0) and one output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Video format as defined in video.txt.
+- xlnx,video-width: Video width as defined in video.txt.
+
+Example:
+
+ hls_0: hls@43c00000 {
+ compatible = "xlnx,v-hls-sobel", "xlnx,v-hls";
+ reg = <0x43c00000 0x24>, <0x43c00024 0xa0>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ hls0_in: endpoint {
+ remote-endpoint = <&vdma_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ hls0_out: endpoint {
+ remote-endpoint = <&vdma_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt
new file mode 100644
index 000000000000..3aea1f36a6ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt
@@ -0,0 +1,95 @@
+Xilinx mem2mem Multi Video Scaler (XM2MSC)
+-----------------------------------------
+
+Required propertie(s):
+- compatible : Should be "xlnx,v-multi-scaler-v1.0"
+- clocks : Input clock specifier. Refer to common clk bindings.
+- interrupt-parent : Interrupt controller the interrupt is routed through
+- interrupts : Should contain MultiScaler interrupt
+- reset-gpios : Should contain GPIO reset phandle
+- reg : Physical base address and
+ length of the registers set for the device.
+- xlnx,max-chan : Maximum number of supported scaling channels (1 - 8)
+- xlnx,max-width : Maximum number of supported column/width (64 - 3840)
+- xlnx,max-height : Maximum number of supported row/height (64 - 2160)
+- xlnx,dma-addr-width : dma address width (either 32 or 64)
+- xlnx,pixels-per-clock : pixels per clock set in IP (1, 2 or 4)
+- xlnx,vid-formats : A list of strings indicating what video memory
+ formats the IP has been configured to support.
+ See VIDEO FORMATS table below and examples.
+- xlnx,num-taps : The number of filter taps for scaling (6, 8, 10, 12)
+
+VIDEO FORMATS
+The following table describes the legal string values to be used for
+the xlnx,vid-formats property. To the left is the string value and the
+column to the right describes the format.
+
+IP FORMAT DTS String Description
+-------------|----------------|---------------------
+RGB8 bgr888 Packed RGB, 8 bits per component.
+ Every RGB pixel in memory is represented with
+ 24 bits.
+RGBX8 xbgr8888 Packed RGB, 8 bits per component. Every RGB
+ pixel in memory is represented with 32 bits.
+ Bits[31:24] do not contain pixel information.
+BGRX8 xrgb8888 Packed BGR, 8 bits per component. Every BGR
+ pixel in memory is represented with 32 bits.
+ Bits[31:24] do not contain pixel information.
+RGBX10 xbgr2101010 Packed RGB, 10 bits per component. Every RGB
+ pixel is represented with 32 bits. Bits[31:30]
+ do not contain any pixel information.
+YUV8 vuy888 Packed YUV 4:4:4, 8 bits per component. Every
+ YUV 4:4:4 pixel in memory is represented with
+ 24 bits.
+YUVX8 xvuy8888 Packed YUV 4:4:4, 8 bits per component.
+ Every YUV 4:4:4 pixel in memory is represented
+ with 32 bits. Bits[31:24] do not contain pixel
+ information.
+YUYV8 yuyv Packed YUV 4:2:2, 8 bits per component. Every
+ two YUV 4:2:2 pixels in memory are represented
+ with 32 bits.
+UYVY8 uyvy Packed YUV 4:2:2, 8 bits per component.
+ Every two YUV 4:2:2 pixels in memory are
+ represented with 32 bits.
+YUVX10 yuvx2101010 Packed YUV 4:4:4, 10 bits per component.
+ Every YUV 4:4:4 pixel is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+Y8 y8 Packed Luma-Only, 8 bits per component. Every
+ luma-only pixel in memory is represented with
+ 8 bits. Y8 is presented as YUV 4:4:4 on the
+ AXI4-Stream interface.
+Y10 y10 Packed Luma-Only, 10 bits per component. Every
+ three luma-only pixels in memory is represented
+ with 32 bits. Y10 is presented as YUV 4:4:4 on
+ the AXI4-Stream interface.
+Y_UV8 nv16 Semi-planar YUV 4:2:2 with 8 bits per component.
+ Y and UV stored in separate planes.
+Y_UV8_420 nv12 Semi-planar YUV 4:2:0 with 8 bits per component.
+ Y and UV stored in separate planes.
+Y_UV10 xv20 Semi-planar YUV 4:2:2 with 10 bits per component.
+ Every 3 pixels is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+ Y and UV stored in separate planes.
+Y_UV10_420 xv15 Semi-planar YUV 4:2:0 with 10 bits per component.
+ Every 3 pixels is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+ Y and UV stored in separate planes.
+
+Example
+
+v_multi_scaler_0: v_multi_scaler@a0000000 {\
+ clocks = <&clk 71>;
+ compatible = "xlnx,v-multi-scaler-v1.0";
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ xlnx,vid-formats = "bgr888","vuy888";
+ reset-gpios = <&gpio 78 1>;
+ xlnx,max-chan = <0x01>;
+ xlnx,dma-addr-width = <0x20>;
+ xlnx,pixels-per-clock = /bits/ 8 <2>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ xlnx,num-taps = <6>;
+};
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt
new file mode 100644
index 000000000000..cda02cb97a21
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt
@@ -0,0 +1,61 @@
+Xilinx Video Remapper
+---------------------
+
+The IP core remaps input pixel components to produce an output pixel with
+less, more or the same number of components as the input pixel.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-remapper".
+
+- clocks: Reference to the video core clock.
+
+- xlnx,video-width: Video pixel component width, as defined in video.txt.
+
+- #xlnx,s-components: Number of components per pixel at the input port
+ (between 1 and 4 inclusive).
+
+- #xlnx,m-components: Number of components per pixel at the output port
+ (between 1 and 4 inclusive).
+
+- xlnx,component-maps: Remapping configuration represented as an array of
+ integers. The array contains one entry per output component, in the low to
+ high order. Each entry corresponds to the zero-based position of the
+ corresponding input component, or the value 4 to drive a constant value on
+ the output component. For example, to remap RGB to BGR use <2 1 0>, and to
+ remap RBG to xRGB use <1 0 2 4>.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The remapper as an input port (0) and and output port (1).
+
+Example: RBG to xRGB remapper
+
+ remapper_0: remapper {
+ compatible = "xlnx,v-remapper";
+
+ clocks = <&clkc 15>;
+
+ xlnx,video-width = <8>;
+
+ #xlnx,s-components = <3>;
+ #xlnx,m-components = <4>;
+ xlnx,component-maps = <1 0 2 4>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ remap0_in: endpoint {
+ remote-endpoint = <&tpg0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ remap0_out: endpoint {
+ remote-endpoint = <&sobel0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt
new file mode 100644
index 000000000000..ecd10fb31ac1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt
@@ -0,0 +1,54 @@
+Xilinx RGB to YUV (RGB2YUV)
+---------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-rgb2yuv-7.1".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The rgb2yuv has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be RBG for the input port (0) and YUV_444 for the
+ output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ rgb2yuv_0: rgb2yuv@40100000 {
+ compatible = "xlnx,v-rgb2yuv-7.1";
+ reg = <0x40100000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ rgb2yuv0_in: endpoint {
+ remote-endpoint = <&gamma0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_444>;
+ xlnx,video-width = <8>;
+
+ rgb2yuv0_out: endpoint {
+ remote-endpoint = <&cresample0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt
new file mode 100644
index 000000000000..0bb9c405f5ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt
@@ -0,0 +1,75 @@
+Xilinx Scaler (SCALER)
+------------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-scaler-8.1".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- xlnx,num-hori-taps, xlnx,num-vert-taps: The number of horizontal and vertical
+ taps for scaling filter(range: 2 - 12).
+
+- xlnx,max-num-phases: The maximum number of phases for scaling filter
+ (range: 2 - 64).
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Optional properties:
+
+- xlnx,separate-yc-coef: When set, this boolean property specifies that
+ the hardware uses separate coefficients for the luma and chroma filters.
+ Otherwise a single set of coefficients is shared for both.
+
+- xlnx,separate-hv-coef: When set, this boolean property specifies that
+ the hardware uses separate coefficients for the horizontal and vertical
+ filters. Otherwise a single set of coefficients is shared for both.
+
+Required port properties:
+
+- xlnx,video-format: Must be one of RBG, YUV_422, YUV_422 or YUV_420 for
+ both input port (0) and output port (1). The two formats must be identical.
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ scaler_0: scaler@43c30000 {
+ compatible = "xlnx,v-scaler-8.1";
+ reg = <0x43c30000 0x10000>;
+ clocks = <&clkc 15>;
+
+ xlnx,num-hori-taps = <12>;
+ xlnx,num-vert-taps = <12>;
+ xlnx,max-num-phases = <4>;
+ xlnx,separate-hv-coef;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler0_in: endpoint {
+ remote-endpoint = <&cresample0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler0_out: endpoint {
+ remote-endpoint = <&vcap0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt
new file mode 100644
index 000000000000..a05e9712c833
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt
@@ -0,0 +1,164 @@
+Xilinx Scene Change Detection IP (SCD)
+--------------------------------------
+
+The Xilinx Scene Change Detection IP contains two blocks: one IP block is used
+for reading video frame data from memory to the device and the other IP block
+is used for determining whether there is a scene change between current and the
+previous frame. The IP supports YUV planar and semi-planar formats. IP only
+needs luma frame to determine the scene change event. The IP supports memory
+based model, which means that it will accept a dma buffer address and perform
+MEM2DEV transfer followed by statistical based image processing and give the
+data back to application if scene change detection is present or not.
+
+Another version of scene change detection IP which supports streaming model,
+which means that IP can be inserted in a capture pipeline. For example,
+"hdmirx -> streaming-scd -> fb_wr" is a typical capture pipeline where
+streaming SCD can be embedded. The IP accespts the AXI video data and perform
+histogram based statistical analysis to detect scene change. The IP supports
+single channel.
+
+Required properties:
+
+- compatible: Should be "xlnx,v-scd"
+
+- reg: Physical base address and length of the registers set for the device
+
+- clocks: Reference to the video core clock.
+
+- reset-gpios: Specifier for a GPIO that assert SCD (AP_RST_N) reset.
+
+- xlnx,memory-based: This is to differentiate between memory based and
+ streaming based IP. The value is 1 for memory based and 0 for streaming
+ based IPs.
+
+- xlnx,numstreams: Maximum active streams IP can support is 8 and this is based
+ on the design.
+
+- xlnx,addrwidth: Size of dma address pointer in IP (either 32 or 64)
+
+- subdev: Each channel will have its own subdev node. Each subdev will have its
+ sink port.
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+
+Example:
+
+1. Memory based device tree
+
+The following example shows how the device tree would look like for a memory
+based design where 8 streams are enabled.
+
+ scd: scenechange@a0100000 {
+ compatible = "xlnx,v-scd";
+ reg = <0x0 0xa0100000 0x0 0x1fff>;
+ clocks = <&misc_clk_0>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ reset-gpios = <&gpio 94 1>;
+
+ xlnx,memory-based;
+ xlnx,numstreams = <8>;
+ xlnx,addrwidth = <0x20>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #dma-cells = <1>;
+
+ subdev@0 {
+ port@0 {
+ reg = <0>;
+ scd_in0: endpoint {
+ remote-endpoint = <&vcap0_out0>;
+ };
+ };
+ };
+ subdev@1 {
+ port@0 {
+ reg = <0>;
+ scd_in1: endpoint {
+ remote-endpoint = <&vcap0_out1>;
+ };
+ };
+ };
+ subdev@2 {
+ port@0 {
+ reg = <0>;
+ scd_in2: endpoint {
+ remote-endpoint = <&vcap0_out2>;
+ };
+ };
+ };
+ subdev@3 {
+ port@0 {
+ reg = <0>;
+ scd_in3: endpoint {
+ remote-endpoint = <&vcap0_out3>;
+ };
+ };
+ };
+ subdev@4 {
+ port@0 {
+ reg = <0>;
+ scd_in4: endpoint {
+ remote-endpoint = <&vcap0_out4>;
+ };
+ };
+ };
+ subdev@5 {
+ port@0 {
+ reg = <0>;
+ scd_in5: endpoint {
+ remote-endpoint = <&vcap0_out5>;
+ };
+ };
+ };
+ subdev@6 {
+ port@0 {
+ reg = <0>;
+ scd_in6: endpoint {
+ remote-endpoint = <&vcap0_out6>;
+ };
+ };
+ };
+ subdev@7 {
+ port@0 {
+ reg = <0>;
+ scd_in7: endpoint {
+ remote-endpoint = <&vcap0_out7>;
+ };
+ };
+ };
+ };
+
+2. Streaming based device tree
+
+The following example shows how the device tree would look like for a streaming
+based design.
+
+ scd: scenechange@a0280000 {
+ compatible = "xlnx,v-scd";
+ reg = <0x0 0xa0280000 0x0 0x1fff>;
+ clocks = <&clk 72>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 111 4>;
+ reset-gpios = <&gpio 100 1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,numstreams = <1>;
+
+ scd {
+ port@0 {
+ reg = <0x0>;
+ scd_in0: endpoint {
+ remote-endpoint = <&vpss_scaler_out>;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+ scd_out0: endpoint {
+ remote-endpoint = <&vcap_hdmi_in_1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt
new file mode 100644
index 000000000000..91dc3af4a2b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt
@@ -0,0 +1,55 @@
+Xilinx Video Switch
+-------------------
+
+Required properties:
+
+ - compatible: Must be "xlnx,v-switch-1.0".
+
+ - reg: Physical base address and length of the registers set for the device.
+
+ - clocks: Reference to the video core clock.
+
+ - #xlnx,inputs: Number of input ports
+ - #xlnx,outputs: Number of outputs ports
+
+ - ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+
+Example:
+
+ switch: switch@43c10000 {
+ compatible = "xlnx,v-switch-1.0";
+ reg = <0x43c10000 0x10000>;
+ clocks = <&clkc 15>;
+
+ #xlnx,inputs = <2>;
+ #xlnx,outputs = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_in1: endpoint {
+ remote-endpoint = <&cresample0_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out0: endpoint {
+ remote-endpoint = <&scaler0_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap0_in1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
index 439351ab2a79..4b2126a78a3f 100644
--- a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
@@ -6,7 +6,8 @@ Required properties:
- compatible: Must contain at least one of
"xlnx,v-tpg-5.0" (TPG version 5.0)
- "xlnx,v-tpg-6.0" (TPG version 6.0)
+ "xlnx,v-tpg-7.0" (TPG version 7.0)
+ "xlnx,v-tpg-8.0" (TPG version 8.0)
TPG versions backward-compatible with previous versions should list all
compatible versions in the newer to older order.
@@ -23,6 +24,8 @@ Required properties:
Optional properties:
+- xlnx,ppc: Pixels per clock. Valid values are 1, 2, 4 or 8.
+
- xlnx,vtc: A phandle referencing the Video Timing Controller that generates
video timings for the TPG test patterns.
@@ -30,16 +33,26 @@ Optional properties:
input. The GPIO active level corresponds to the selection of VTC-generated
video timings.
+- reset-gpios: Specifier for a GPIO that assert TPG (AP_RST_N) reset.
+ This property is mandatory for TPG v7.0 and above.
+
+- xlnx,max-height: Maximum number of lines.
+ This property is mandatory for TPG v8.0. Value ranges from 64 to 7760.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ This property is mandatory for TPG v8.0. Value ranges from 64 to 10328.
+
The xlnx,vtc and timing-gpios properties are mandatory when the TPG is
synthesized with two ports and forbidden when synthesized with one port.
Example:
tpg_0: tpg@40050000 {
- compatible = "xlnx,v-tpg-6.0", "xlnx,v-tpg-5.0";
+ compatible = "xlnx,v-tpg-5.0";
reg = <0x40050000 0x10000>;
clocks = <&clkc 15>;
+ xlnx,ppc = <2>;
xlnx,vtc = <&vtc_3>;
timing-gpios = <&ps7_gpio_0 55 GPIO_ACTIVE_LOW>;
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
new file mode 100644
index 000000000000..b3627af85e6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
@@ -0,0 +1,66 @@
+Xilinx VPSS Color Space Converter (CSC)
+-----------------------------------------
+The Xilinx VPSS Color Space Converter (CSC) is a Video IP that supports
+color space conversion from RGB input to YUV output.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-vpss-csc".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the clock that drives the ap_clk signal.
+
+- xlnx,max-height: Maximum number of lines.
+ Valid range from 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- reset-gpios: Specifier for a GPIO that assert VPSS CSC (AP_RST_N) reset.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be XVIP_VF_RBG, XVIP_VF_YUV_444 or XVIP_VF_YUV_422
+ for input port (0) and XVIP_VF_RBG, XVIP_VF_YUV_444 or XVIP_VF_YUV_422
+ for output port (1). See <dt-bindings/media/xilinx-vip.h> for more details.
+
+- xlnx,video-width: Video width as defined in video.txt. Must be either 8 or 10.
+
+Example:
+ csc_1:csc@a0040000 {
+ compatible = "xlnx,v-vpss-csc";
+ reg = <0x0 0xa0040000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 84 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* Sink Pad */
+ port@0 {
+ reg = <0>;
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ csc_in: endpoint {
+ remote-endpoint = <&gamma_out>;
+ };
+ };
+ /* Source Pad */
+ port@1 {
+ reg = <1>;
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ csc_out: endpoint {
+ remote-endpoint = <&scalar_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
new file mode 100644
index 000000000000..05ca0cb33cad
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
@@ -0,0 +1,93 @@
+ Xilinx VPSS Scaler
+------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-vpss-scaler-2.2" or "xlnx,v-vpss-scaler-1.0".
+ The older string "xlnx,v-vpss-scaler" will be deprecated.
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the AXI Streaming clock feeding the VPSS Scaler AP_CLK
+ and AXI4 Lite control interface clock.
+
+- clock-names: Must contain "aclk_axis" and "aclk_ctrl" in the same order as
+ clocks listed in clocks property.
+
+- xlnx,num-hori-taps, xlnx,num-vert-taps: The number of horizontal and vertical
+ taps for scaling filter(range: 2,4,6,8,10,12).
+
+ A value of 2 represents bilinear filters. A value of 4 represents bicubic.
+ Values 6,8,10,12 represent polyphase filters.
+
+- xlnx,pix-per-clk : The pixels per clock property of the IP
+
+- reset-gpios: Specifier for a GPIO that assert for VPSS Scaler reset.
+ This property is mandatory for the Scaler
+
+- xlnx,max-height: Maximum number of lines.
+ Valid range from 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be one of XVIP_VF_RBG or XVIP_VF_YUV_422 for
+ input port (0) and must be XVIP_VF_RBG or XVIP_VF_YUV_422 or for
+ the output port (1).
+ See <dt-bindings/media/xilinx-vip.h> for more details.
+
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ scaler_1:scaler@a0000000 {
+ compatible = "xlnx,v-vpss-scaler-1.0";
+ reg = <0x0 0xa0000000 0x0 0x40000>;
+ clocks = <&vid_stream_clk>, <&misc_clk_2>;
+ clock-names = "aclk_axis", "aclk_ctrl";
+ xlnx,num-hori-taps = <8>;
+ xlnx,num-vert-taps = <8>;
+ xlnx,pix-per-clk = <2>;
+ reset-gpios = <&gpio 87 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ /* Sink Pad */
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ scaler_in: endpoint {
+ remote-endpoint = <&csc_out>;
+ };
+ };
+
+ port@1 {
+ /* Source Pad */
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler_out: endpoint {
+ remote-endpoint = <&vcap_tpg_in>;
+ };
+ };
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/misc/jesd-phy.txt b/Documentation/devicetree/bindings/misc/jesd-phy.txt
new file mode 100644
index 000000000000..84535cb1e905
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/jesd-phy.txt
@@ -0,0 +1,24 @@
+* Xilinx JESD204B Phy
+
+Description:
+The LogiCORE™ IP JESD204 PHY core implements a JESD204B Physical interface supporting
+line rates between 1.0 and 12.5 Gb/s on 1 to 12 lanes using GTX, GTH, or GTP transceivers.
+
+Required properties:
+- compatible = "xlnx,jesd204-phy-2.0"
+- reg = Should contain JESD204B phy registers location and length
+- xlnx,pll-selection = The PLL selection 3 for QPLL and 1 For CPLL
+- xlnx,lanes = No of Lanes
+- xlnx,gt-refclk-freq = Reference frequency in Hz
+- clocks = The phandle to the clock tree
+
+Example:
+++++++++
+ jesd204_phycores:phy@41e10000 {
+ compatible = "xlnx,jesd204-phy-2.0";
+ reg = <0x41e10000 0x10000>;
+ xlnx,gt-refclk-freq = "156250000";
+ xlnx,lanes = <0x1>;
+ xlnx,pll-selection = <0x3>;
+ clocks = <&si570>;
+ };
diff --git a/Documentation/devicetree/bindings/misc/jesd204b.txt b/Documentation/devicetree/bindings/misc/jesd204b.txt
new file mode 100644
index 000000000000..53f8192c8afa
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/jesd204b.txt
@@ -0,0 +1,28 @@
+* Xilinx JESD204B core
+
+Description:
+The LogiCORE™ IP JESD204 core implements a JESD204B core
+
+Required properties:
+- compatible = Should be one of
+ "xlnx,jesd204-5.1";
+ "xlnx,jesd204-5.2";
+ "xlnx,jesd204-6.1";
+- reg = Should contain JESD204B registers location and length
+- xlnx,frames-per-multiframe = No of frames per multiframe
+- xlnx,bytes-per-frame = No of bytes per frame
+- xlnx,lanes = No of Lanes
+- xlnx,subclass = subclass
+- xlnx,node-is-transmit = should be present only for transmit nodes
+
+Example:
+++++++++
+jesd_Tx_axi_0: jesd_Tx@44a20000 {
+ compatible = "xlnx,jesd204-5.1";
+ reg = <0x44a20000 0x10000>;
+ xlnx,frames-per-multiframe = <30>;
+ xlnx,bytes-per-frame = <2>;
+ xlnx,subclass = <1>;
+ xlnx,lanes = <0x2>;
+ xlnx,node-is-transmit;
+};
diff --git a/Documentation/devicetree/bindings/misc/xilinx-axitrafgen.txt b/Documentation/devicetree/bindings/misc/xilinx-axitrafgen.txt
new file mode 100644
index 000000000000..6edb8f6a3a10
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/xilinx-axitrafgen.txt
@@ -0,0 +1,25 @@
+* Xilinx AXI Traffic generator IP
+
+Required properties:
+- compatible: "xlnx,axi-traffic-gen"
+- interrupts: Should contain AXI Traffic Generator interrupts.
+- interrupt-parent: Must be core interrupt controller.
+- reg: Should contain AXI Traffic Generator registers location and length.
+- interrupt-names: Should contain both the intr names of device - error
+ and completion.
+- xlnx,device-id: Device instance Id.
+
+Optional properties:
+- clocks: Input clock specifier. Refer to common clock bindings.
+
+Example:
+++++++++
+axi_traffic_gen_1: axi-traffic-gen@76000000 {
+ compatible = "xlnx,axi-traffic-gen-1.0", "xlnx,axi-traffic-gen";
+ clocks = <&clkc 15>;
+ interrupts = <0 2 2 2>;
+ interrupt-parent = <&axi_intc_1>;
+ interrupt-names = "err-out", "irq-out";
+ reg = <0x76000000 0x800000>;
+ xlnx,device-id = <0x0>;
+} ;
diff --git a/Documentation/devicetree/bindings/misc/xlnx,fclk.txt b/Documentation/devicetree/bindings/misc/xlnx,fclk.txt
new file mode 100644
index 000000000000..e1a1acc6c5ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/xlnx,fclk.txt
@@ -0,0 +1,12 @@
+* Xilinx fclk clock enable
+Temporary solution for enabling the PS_PL clocks.
+
+Required properties:
+- compatible: "xlnx,fclk"
+
+Example:
+++++++++
+fclk0: fclk0 {
+ compatible = "xlnx,fclk";
+ clocks = <&clkc 71>;
+};
diff --git a/Documentation/devicetree/bindings/misc/xlnx,flexnoc-pm.txt b/Documentation/devicetree/bindings/misc/xlnx,flexnoc-pm.txt
new file mode 100644
index 000000000000..9d07bb0a7364
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/xlnx,flexnoc-pm.txt
@@ -0,0 +1,24 @@
+* Xilinx Flexnoc Performance Monitor driver
+
+The FlexNoc Performance Monitor has counters for monitoring
+the read and the write transaction counter.
+
+Required properties:
+- compatible: "xlnx,flexnoc-pm-2.7"
+- reg : Address and length of register sets for each device in
+ "reg-names"
+- reg-names : The names of the register addresses corresponding to the
+ registers filled in "reg"
+ - funnel: base address of the funnel registers
+ - baselpd: base address of the LPD PM registers
+ - basefpd: base address FPD PM registers
+
+Example:
+++++++++
+performance-monitor@f0920000 {
+ compatible = "xlnx,flexnoc-pm-2.7";
+ reg-names = "funnel", "baselpd", "basefpd";
+ reg = <0x0 0xf0920000 0x0 0x1000>,
+ <0x0 0xf0980000 0x0 0x9000>,
+ <0x0 0xf0b80000 0x0 0x9000>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 7ca0aa7ccc0b..3af5121e7dba 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -14,6 +14,8 @@ Required Properties:
- "arasan,sdhci-4.9a": generic Arasan SDHCI 4.9a PHY
- "arasan,sdhci-5.1": generic Arasan SDHCI 5.1 PHY
- "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1": rk3399 eMMC PHY
+ - "xlnx,zynqmp-8.9a": Xilinx ZynqMP Arasan SDHCI 8.9a PHY
+ - "xlnx,versal-8.9a": Xilinx Versal Arasan SDHCI 8.9a PHY
For this device it is strongly suggested to include arasan,soc-ctl-syscon.
- "ti,am654-sdhci-5.1", "arasan,sdhci-5.1": TI AM654 MMC PHY
Note: This binding has been deprecated and moved to [5].
@@ -31,7 +33,13 @@ Required Properties for "arasan,sdhci-5.1":
- phys: From PHY bindings: Phandle for the Generic PHY for arasan.
- phy-names: MUST be "phy_arasan".
+Required Properties for "xlnx,zynqmp-8.9a" and "xlnx,versal-8.9a":
+ - xlnx,mio_bank: The value will be 0/1/2 depending on MIO bank selection.
+ - xlnx,device_id: Unique Id of the device, value will be 0/1.
+
Optional Properties:
+ - broken-mmc-highspeed: Indicates to force
+ the controller to use in standard speed.
- arasan,soc-ctl-syscon: A phandle to a syscon device (see ../mfd/syscon.txt)
used to access core corecfg registers. Offsets of registers in this
syscon are determined based on the main compatible string for the device.
@@ -46,6 +54,28 @@ Optional Properties:
properly. Test mode can be used to force the controller to function.
- xlnx,int-clock-stable-broken: when present, the controller always reports
that the internal clock is stable even when it is not.
+ - pinctrl-0: pin control group to be used for this controller.
+ - pinctrl-names: must contain a "default" entry.
+
+Optional Properties for "xlnx,zynqmp-8.9a":
+ - nvmem-cells: list of phandle to the nvmem data cells.
+ - nvmem-cell-names: Names for the each nvmem-cells specified.
+ - xlnx,itap-delay-sd-hsd: Input Tap Delay for SD HS.
+ - xlnx,itap-delay-sdr25: Input Tap Delay for SDR25.
+ - xlnx,itap-delay-sdr50: Input Tap Delay for SDR50.
+ - xlnx,itap-delay-sdr104: Input Tap Delay for SDR104.
+ - xlnx,itap-delay-sd-ddr50: Input Tap Delay for SD DDR50.
+ - xlnx,itap-delay-mmc-hsd: Input Tap Delay for MMC HS.
+ - xlnx,itap-delay-mmc-ddr52: Input Tap Delay for MMC DDR52.
+ - xlnx,itap-delay-mmc-hs200: Input Tap Delay for MMC HS200.
+ - xlnx,otap-delay-sd-hsd: Output Tap Delay for SD HS.
+ - xlnx,otap-delay-sdr25: Output Tap Delay for SDR25.
+ - xlnx,otap-delay-sdr50: Output Tap Delay for SDR50.
+ - xlnx,otap-delay-sdr104: Output Tap Delay for SDR104.
+ - xlnx,otap-delay-sd-ddr50: Output Tap Delay for DDR50.
+ - xlnx,otap-delay-mmc-hsd: Output Tap Delay for MMC HS.
+ - xlnx,otap-delay-mmc-ddr52: Output Tap Delay for MMC DDR52.
+ - xlnx,otap-delay-mmc-hs200: Output Tap Delay for MMC HS200.
Example:
sdhci@e0100000 {
diff --git a/Documentation/devicetree/bindings/mtd/arasan_nand.txt b/Documentation/devicetree/bindings/mtd/arasan_nand.txt
new file mode 100644
index 000000000000..546ed98d9777
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/arasan_nand.txt
@@ -0,0 +1,33 @@
+Arasan NAND Flash Controller with ONFI 3.1 support
+
+Required properties:
+- compatible: Should be "xlnx,zynqmp-nand", "arasan,nfc-v3p10"
+- reg: Memory map for module access
+- interrupt-parent: Interrupt controller the interrupt is routed through
+- interrupts: Should contain the interrupt for the device
+- clock-name: List of input clocks - "sys", "flash"
+ (See clock bindings for details)
+- clocks: Clock phandles (see clock bindings for details)
+
+Required properties for child node:
+- nand-ecc-mode: see nand.txt
+
+For NAND partition information please refer the below file
+Documentation/devicetree/bindings/mtd/partition.txt
+
+Example:
+ nfc: nand@ff100000 {
+ compatible = "xlnx,zynqmp-nand", "arasan,nfc-v3p10"
+ reg = <0x0 0xff100000 0x1000>;
+ clock-name = "sys", "flash"
+ clocks = <&misc_clk &misc_clk>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 14 4>;
+ #address-cells = <1>;
+ #size-cells = <0>
+
+ nand@0 {
+ reg = <0>
+ nand-ecc-mode = "hw";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
index 945be7d5b236..b169379c3770 100644
--- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
@@ -5,6 +5,7 @@ Required properties:
Generic default - "cdns,qspi-nor".
For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor".
For TI AM654 SoC - "ti,am654-ospi", "cdns,qspi-nor".
+ For xilinx versal - "xlnx,versal-ospi-1.0"
- reg : Contains two entries, each of which is a tuple consisting of a
physical address and length. The first entry is the address and
length of the controller register set. The second entry is the
@@ -14,6 +15,7 @@ Required properties:
- cdns,fifo-depth : Size of the data FIFO in words.
- cdns,fifo-width : Bus width of the data FIFO in bytes.
- cdns,trigger-address : 32-bit indirect AHB trigger address.
+- reset-gpios : GPIO to be used to reset the OSPI flash device.
Optional properties:
- cdns,is-decoded-cs : Flag to indicate whether decoder is used or not.
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index c69f4f065d23..76669dfb737d 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -31,6 +31,8 @@ file systems on embedded devices.
Bits) locking.
- addr-gpios : (optional) List of GPIO descriptors that will be used to
address the MSBs address lines. The order goes from LSB to MSB.
+ - multi-die : (optional) boolean to enable support for multiple die flash
+ device.
For JEDEC compatible devices, the following additional properties
are defined:
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 0b61a90f1592..5792565aedd3 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -15,6 +15,7 @@ Required properties:
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
+ Use "cdns,versal-gem" for Xilinx Versal.
Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
Or the generic form: "cdns,emac".
- reg: Address and length of the register set for the device
@@ -38,12 +39,19 @@ Optional properties for PHY child node:
up via magic packet.
- phy-handle : see ethernet.txt file in the same directory
+Optional properties:
+- tsu-clk: Time stamp unit clock frequency used.
+- rx-watermark: Set watermark value for pbuf_rxcutthru reg and enable
+ rx partial store and forward, only when compatible = "cdns,zynqmp-gem".
+ Value should be less than 0xFFF.
+
Examples:
macb0: ethernet@fffc4000 {
compatible = "cdns,at32ap7000-macb";
reg = <0xfffc4000 0x4000>;
interrupts = <21>;
+ rx-watermark = /bits/ 16 <0x44>;
phy-mode = "rmii";
local-mac-address = [3a 0e 03 04 05 06];
clock-names = "pclk", "hclk", "tx_clk";
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
index 388ff48f53ae..339135060fe9 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -42,6 +42,11 @@ Optional property:
Some MACs work with differential SGMII clock.
See data manual for details.
+ - ti,6-wire-mode - This denotes the fact that the board has SGMII
+ 6-wire mode configuration. If this
+ property is not present default is 4-wire
+ mode. See data manual for details.
+
Note: ti,min-output-impedance and ti,max-output-impedance are mutually
exclusive. When both properties are present ti,max-output-impedance
takes precedence.
diff --git a/Documentation/devicetree/bindings/net/xilinx-phy.txt b/Documentation/devicetree/bindings/net/xilinx-phy.txt
new file mode 100644
index 000000000000..aeb9917497b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx-phy.txt
@@ -0,0 +1,15 @@
+Xilinx PCS/PMA PHY bindings
+
+Required properties:
+ - reg - The ID number for the phy, usually a small integer
+
+Optional properties:
+ - xlnx,phy-type - Describes type 1000BaseX (set to 0x5) or
+ SGMII (set to 0x4)
+
+Example:
+
+ ethernet-phy@9 {
+ reg = <9>;
+ xlnx,phy-type = <0x5>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt b/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt
new file mode 100644
index 000000000000..e66b64bc10e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt
@@ -0,0 +1,54 @@
+Xilinx TSN (time sensitive networking) TEMAC axi ethernet driver (xilinx_axienet)
+-----------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-ethernet-1.00.a".
+- reg : Physical base address and size of the TSN registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupts-names : Property denotes the interrupt names.
+- interrupt-parent : Must be core interrupt controller.
+- phy-handle : See ethernet.txt file [1].
+- local-mac-address : See ethernet.txt file [1].
+- phy-mode : see ethernet.txt file [1].
+
+Optional properties:
+- xlnx,tsn : Denotes a ethernet with TSN capabilities.
+- xlnx,tsn-slave : Denotes a TSN slave port.
+- xlnx,txcsum : Tx checksum mode (Full, Partial and None).
+- xlnx,rxcsum : Rx checksum mode (Full, Partial and None).
+- xlnx,phy-type : Xilinx phy device type. See xilinx-phy.txt [2].
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non-processor mode.
+- xlnx,num-queue : Number of queue supported in current design, range is
+ 2 to 5 and default value is 5.
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,qbv-addr : Denotes mac scheduler physical base address.
+- xlnx,qbv-size : Denotes mac scheduler address space size.
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+[2] Documentation/devicetree/bindings/net/xilinx-phy.txt
+
+Example:
+
+ tsn_emac_0: tsn_mac@80040000 {
+ compatible = "xlnx,tsn-ethernet-1.00.a";
+ interrupt-parent = <&gic>;
+ interrupts = <0 104 4 0 106 4 0 91 4 0 110 4>;
+ interrupt-names = "interrupt_ptp_rx_1", "interrupt_ptp_tx_1", "mac_irq_1", "interrupt_ptp_timer";
+ local-mac-address = [ 00 0A 35 00 01 0e ];
+ phy-mode = "rgmii";
+ reg = <0x0 0x80040000 0x0 0x14000>;
+ tsn,endpoint = <&tsn_ep>;
+ xlnx,tsn;
+ xlnx,tsn-slave;
+ xlnx,phy-type = <0x3>;
+ xlnx,eth-hasnobuf;
+ xlnx,num-queue = <0x2>;
+ xlnx,num-tc = <0x3>;
+ xlnx,qbv-addr = <0x80054000>;
+ xlnx,qbv-size = <0x2000>;
+ xlnx,txsum = <0>;
+ xlnx,rxsum = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_axienet.txt b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
index 7360617cdedb..c799bb152322 100644
--- a/Documentation/devicetree/bindings/net/xilinx_axienet.txt
+++ b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
@@ -1,22 +1,28 @@
-XILINX AXI ETHERNET Device Tree Bindings
+XILINX AXI ETHERNET driver (xilinx_axienet)
--------------------------------------------------------
-Also called AXI 1G/2.5G Ethernet Subsystem, the xilinx axi ethernet IP core
-provides connectivity to an external ethernet PHY supporting different
-interfaces: MII, GMII, RGMII, SGMII, 1000BaseX. It also includes two
-segments of memory for buffering TX and RX, as well as the capability of
-offloading TX/RX checksum calculation off the processor.
+This driver supports following MAC configurations-
+a) AXI 1G/2.5G Ethernet Subsystem.
+b) 10G/25G High Speed Ethernet Subsystem.
+c) 10 Gigabit Ethernet Subsystem.
+d) USXGMII Ethernet Subsystem.
-Management configuration is done through the AXI interface, while payload is
-sent and received through means of an AXI DMA controller. This driver
-includes the DMA driver code, so this driver is incompatible with AXI DMA
-driver.
+Management configuration is done through the AXI4-Lite interface.
+The transmit and receive data interface is via the AXI4-Stream
+interface connected to DMA controller. This driver supports Xilinx
+AXI DMA and MCDMA DMA IP's. Programming sequence for these DMA IP's
+is included in the xilinx_axienet driver.
-For more details about mdio please refer phy.txt file in the same directory.
+For details about MDIO please refer phy.txt [1].
Required properties:
-- compatible : Must be one of "xlnx,axi-ethernet-1.00.a",
- "xlnx,axi-ethernet-1.01.a", "xlnx,axi-ethernet-2.01.a"
+- compatible : Must be one of "xlnx,axi-ethernet-1.00.a" or
+ "xlnx,axi-ethernet-1.01.a" or "xlnx,axi-ethernet-2.01.a"
+ for 1G MAC,
+ "xlnx,ten-gig-eth-mac" for 10 Gigabit Ethernet Subsystem,
+ "xlnx,xxv-ethernet-1.0" for 10G/25G MAC,
+ "xlnx,axi-2_5-gig-ethernet-1.0" for 2.5G MAC and
+ "xlnx,xxv-usxgmii-ethernet-1.0" for USXGMII.
- reg : Address and length of the IO space, as well as the address
and length of the AXI DMA controller IO space, unless
axistream-connected is specified, in which case the reg
@@ -26,18 +32,22 @@ Required properties:
specified, the TX/RX DMA interrupts should be on that node
instead, and only the Ethernet core interrupt is optionally
specified here.
-- phy-handle : Should point to the external phy device.
- See ethernet.txt file in the same directory.
-- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware
+- phy-handle : See ethernet.txt [2].
+- local-mac-address : See ethernet.txt [2].
+- phy-mode : See ethernet.txt [2].
+- axistream-connected : Should contain phandle of DMA node.
+- interrupt-parent : Must be core interrupt controller.
+Required properties (When AxiEthernet is configured with MCDMA):
+- xlnx,channel-ids : Queue Identifier associated with the MCDMA Channel.
+- interrupt-names : Should contain the interrupt names.
Optional properties:
-- phy-mode : See ethernet.txt
-- xlnx,phy-type : Deprecated, do not use, but still accepted in preference
- to phy-mode.
-- xlnx,txcsum : 0 or empty for disabling TX checksum offload,
- 1 to enable partial TX checksum offload,
- 2 to enable full TX checksum offload
-- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload
+- xlnx,rxmem : Max Rx Memory size.
+- xlnx,txcsum : Tx checksum mode (Full, Partial and None).
+- xlnx,rxcsum : Rx checksum mode (Full, Partial and None).
+- xlnx,phy-type : Xilinx phy device type. See xilinx-phy.txt [3].
+- dma-coherent : Present if dma operations are coherent.
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non-processor mode.
- clocks : AXI bus clock for the device. Refer to common clock bindings.
Used to calculate MDIO clock divisor. If not specified, it is
auto-detected from the CPU clock (but only on platforms where
@@ -51,26 +61,63 @@ Optional properties:
- mdio : Child node for MDIO bus. Must be defined if PHY access is
required through the core's MDIO interface (i.e. always,
unless the PHY is accessed through a different bus).
+- xlnx,rxtsfifo : Configures the axi fifo for receive timestamping.
+- clocks : Input clock specifier. Refer to common clock bindings.
+- clock-names : Input clock names. Refer to IP PG for signal description.
+ 1G/2.5G: s_axi_lite_clk, axis_clk and ref_clk.
+ 10G/25G and USXGMII: s_axi_aclk, rx_core_clk and dclk.
+ 10 Gigabit: s_axi_aclk and dclk.
+ AXI DMA and MCDMA: m_axi_sg_aclk, m_axi_mm2s_aclk and
+ m_axi_s2mm_aclk.
-Example:
- axi_ethernet_eth: ethernet@40c00000 {
- compatible = "xlnx,axi-ethernet-1.00.a";
- device_type = "network";
- interrupt-parent = <&microblaze_0_axi_intc>;
- interrupts = <2 0 1>;
- clocks = <&axi_clk>;
- phy-mode = "mii";
- reg = <0x40c00000 0x40000 0x50c00000 0x40000>;
- xlnx,rxcsum = <0x2>;
- xlnx,rxmem = <0x800>;
- xlnx,txcsum = <0x2>;
- phy-handle = <&phy0>;
- axi_ethernetlite_0_mdio: mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- phy0: phy@0 {
- device_type = "ethernet-phy";
- reg = <1>;
+Optional properties (When AxiEthernet is configured with MCDMA):
+- xlnx,num-queues : Number of queues/channels configured in h/w.
+Optional properties (When USXGMII is in use):
+- xlnx,usxgmii-rate : USXGMII PHY speed - can be 10, 100, 1000, 2500,
+ 5000 or 10000.
+Optional properties for connected DMA node:
+- xlnx,addrwidth : Specify the width of the DMA address space in bits.
+ Value type is u8. Valid range is 32-64. Default is 32.
+- xlnx,include-dre : Tells whether DMA h/w is configured with data
+ realignment engine(DRE) or not.
+
+NOTE: Time Sensitive Networking (TSN) related DT bindings are explained in [4].
+
+[1] Documentation/devicetree/bindings/net/phy.txt
+[2] Documentation/devicetree/bindings/net/ethernet.txt
+[3] Documentation/devicetree/bindings/net/xilinx-phy.txt
+[4] Documentation/devicetree/bindings/net/xilinx_tsn.txt
+
+Example: AXI 1G/2.5G Ethernet Subsystem + AXIDMA
+
+ axi_eth_0_dma: dma@80040000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,eth-dma";
+ xlnx,addrwidth = /bits/ 8 <32>;
+ <snip>
+ };
+
+ axi_eth_0: ethernet@80000000 {
+ axistream-connected = <&axi_eth_0_dma>;
+ compatible = "xlnx,axi-ethernet-1.00.a";
+ device_type = "network";
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 91 4>;
+ phy-handle = <&phy2>;
+ phy-mode = "sgmii";
+ reg = <0x0 0x80000000 0x0 0x40000>;
+ xlnx,include-dre ;
+ xlnx,phy-type = <0x5>;
+ xlnx,rxcsum = <0x0>;
+ xlnx,rxmem = <0x1000>;
+ xlnx,txcsum = <0x0>;
+ axi_eth_0_mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy2: phy@2 {
+ device_type = "ethernet-phy";
+ reg = <2>;
+ };
};
- };
};
diff --git a/Documentation/devicetree/bindings/net/xilinx_emaclite.txt b/Documentation/devicetree/bindings/net/xilinx_emaclite.txt
new file mode 100644
index 000000000000..55965d942f97
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_emaclite.txt
@@ -0,0 +1,34 @@
+Xilinx Axi Ethernetlite controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,opb-ethernetlite-1.01.a" or
+ "xlnx,opb-ethernetlite-1.01.b" or
+ "xlnx,opb-ethernetlite-1.00.a" or
+ "xlnx,xps-ethernetlite-2.00.a" or
+ "xlnx,xps-ethernetlite-2.01.a" or
+ "xlnx,xps-ethernetlite-3.00.a" or.
+- reg : Physical base address and size of the Axi ethernetlite
+ registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupt-parent : Must be core interrupt controller.
+- phy-handle : See ethernet.txt file in the same directory.
+- local-mac-address : See ethernet.txt file in the same directory.
+
+Optional properties:
+- xlnx,tx-ping-pong : If present, hardware supports tx ping pong buffer.
+- xlnx,rx-ping-pong : If present, hardware supports rx ping pong buffer.
+
+Example:
+ axi_ethernetlite_1: ethernet@40e00000 {
+ compatible = "xlnx,axi-ethernetlite-3.0", "xlnx,xps-ethernetlite-1.00.a";
+ device_type = "network";
+ interrupt-parent = <&axi_intc_1>;
+ interrupts = <1 0>;
+ local-mac-address = [00 0a 35 00 00 00];
+ phy-handle = <&phy0>;
+ reg = <0x40e00000 0x10000>;
+ xlnx,rx-ping-pong;
+ xlnx,tx-ping-pong;
+ }
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn.txt b/Documentation/devicetree/bindings/net/xilinx_tsn.txt
new file mode 100644
index 000000000000..8ef9fa9f3968
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn.txt
@@ -0,0 +1,14 @@
+Xilinx TSN (time sensitive networking) IP driver (xilinx_tsn_ip)
+-----------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be one of "xlnx,tsn-endpoint-ethernet-mac-1.0",
+ "xlnx,tsn-endpoint-ethernet-mac-2.0" for TSN.
+- reg : Physical base address and size of the TSN registers map.
+
+Example:
+
+ tsn_endpoint_ip_0: tsn_endpoint_ip_0 {
+ compatible = "xlnx,tsn-endpoint-ethernet-mac-2.0";
+ reg = <0x0 0x80040000 0x0 0x40000>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt b/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt
new file mode 100644
index 000000000000..f42e5417d164
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt
@@ -0,0 +1,35 @@
+Xilinx TSN (time sensitive networking) EndPoint Driver (xilinx_tsn_ep)
+-------------------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-ep"
+- reg : Physical base address and size of the TSN Endpoint
+ registers map
+- interrupts : Property with a value describing the interrupt
+- interrupts-names : Property denotes the interrupt names.
+- interrupt-parent : Must be core interrupt controller.
+- local-mac-address : See ethernet.txt [1].
+
+Optional properties:
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,channel-ids : Queue Identifier associated with the MCDMA Channel, range
+ is Tx: "1 to 2" and Rx: "2 to 5", default value is "1 to 5".
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non processor mode.
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+
+Example:
+
+ tsn_ep: tsn_ep@80056000 {
+ compatible = "xlnx,tsn-ep";
+ reg = <0x0 0x80056000 0x0 0xA000>;
+ xlnx,num-tc = <0x3>;
+ interrupt-names = "tsn_ep_scheduler_irq";
+ interrupt-parent = <&gic>;
+ interrupts = <0 111 4>;
+ local-mac-address = [00 0A 35 00 01 10];
+ xlnx,channel-ids = "1","2","3","4","5";
+ xlnx,eth-hasnobuf ;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt b/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt
new file mode 100644
index 000000000000..898e5b7b57e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt
@@ -0,0 +1,23 @@
+Xilinx TSN (time sensitive networking) Switch Driver (xilinx_tsn_switch)
+-----------------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-switch"
+- reg : Physical base address and size of the TSN registers map.
+
+Optional properties:
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,has-hwaddr-learning : Denotes hardware address learning support
+- xlnx,has-inband-mgmt-tag : Denotes inband management support
+
+Example:
+
+ epswitch: tsn_switch@80078000 {
+ compatible = "xlnx,tsn-switch";
+ reg = <0x0 0x80078000 0x0 0x4000>;
+ xlnx,num-tc = <0x3>;
+ xlnx,has-hwaddr-learning ;
+ xlnx,has-inband-mgmt-tag ;
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
index 4881561b3a02..be126ccf4802 100644
--- a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
+++ b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
@@ -25,9 +25,78 @@ firmware {
#size-cells = <1>;
/* Data cells */
- soc_revision: soc_revision {
+ soc_revision: soc_revision@0 {
reg = <0x0 0x4>;
};
+ /*
+ * efuse memory access:
+ * all the efuse feilds need to be read
+ * with the exact size specified in the node
+ */
+ /* DNA */
+ efuse_dna: efuse_dna@c {
+ reg = <0xc 0xc>;
+ };
+ /* User 0 */
+ efuse_usr0: efuse_usr0@20 {
+ reg = <0x20 0x4>;
+ };
+ /* User 1 */
+ efuse_usr1: efuse_usr1@24 {
+ reg = <0x24 0x4>;
+ };
+ /* User 2 */
+ efuse_usr2: efuse_usr2@28 {
+ reg = <0x28 0x4>;
+ };
+ /* User 3 */
+ efuse_usr3: efuse_usr3@2c {
+ reg = <0x2c 0x4>;
+ };
+ /* User 4 */
+ efuse_usr4: efuse_usr4@30 {
+ reg = <0x30 0x4>;
+ };
+ /* User 5 */
+ efuse_usr5: efuse_usr5@34 {
+ reg = <0x34 0x4>;
+ };
+ /* User 6 */
+ efuse_usr6: efuse_usr6@38 {
+ reg = <0x38 0x4>;
+ };
+ /* User 7 */
+ efuse_usr7: efuse_usr7@3c {
+ reg = <0x3c 0x4>;
+ };
+ /* Misc user control bits */
+ efuse_miscusr: efuse_miscusr@40 {
+ reg = <0x40 0x4>;
+ };
+ /* PUF chash */
+ efuse_chash: efuse_chash@50 {
+ reg = <0x50 0x4>;
+ };
+ /* PUF misc */
+ efuse_pufmisc: efuse_pufmisc@54 {
+ reg = <0x54 0x4>;
+ };
+ /* SEC_CTRL */
+ efuse_sec: efuse_sec@58 {
+ reg = <0x58 0x4>;
+ };
+ /* SPK ID */
+ efuse_spkid: efuse_spkid@5c {
+ reg = <0x5c 0x4>;
+ };
+ /* PPK0 hash */
+ efuse_ppk0hash: efuse_ppk0hash@a0 {
+ reg = <0xa0 0x30>;
+ };
+ /* PPK1 hash */
+ efuse_ppk1hash: efuse_ppk1hash@d0 {
+ reg = <0xd0 0x30>;
+ };
};
};
};
@@ -44,3 +113,22 @@ For example:
...
};
+
+To program efuse memory, one should request specified bytes of size as below,
+NOTE: Efuse bits once programmed cannot be reverted.
+
+ - | TYPE | OFFSET | SIZE(bytes) |
+ - |User-0 | 0x20 | 0x4 |
+ - |User-1 | 0x24 | 0x4 |
+ - |User-2 | 0x28 | 0x4 |
+ - |User-3 | 0x2C | 0x4 |
+ - |User-4 | 0x30 | 0x4 |
+ - |User-5 | 0x34 | 0x4 |
+ - |User-6 | 0x38 | 0x4 |
+ - |User-7 | 0x3c | 0x4 |
+ - |Misc User | 0x40 | 0x4 |
+ - |SEC_CTRL | 0x58 | 0x4 |
+ - |SPK ID | 0x5C | 0x4 |
+ - |AES KEY | 0x60 | 0x20 |
+ - |PPK0 hash | 0xA0 | 0x30 |
+ - |PPK1 hash | 0xD0 | 0x30 |
diff --git a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
index 01bf7fdf4c19..c12bcf0f8947 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
@@ -18,6 +18,7 @@ Required properties:
"msi1, msi0": interrupt asserted when an MSI is received
"intx": interrupt asserted when a legacy interrupt is received
"misc": interrupt asserted when miscellaneous interrupt is received
+- clocks: Should contain a clock specifier for the device
- interrupt-map-mask and interrupt-map: standard PCI properties to define the
mapping of the PCI interface to interrupt numbers.
- ranges: ranges for the PCI memory regions (I/O space region is not
@@ -52,6 +53,7 @@ nwl_pcie: pcie@fd0e0000 {
<0x0 0x0 0x0 0x2 &pcie_intc 0x2>,
<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
+ clocks = <&clkc 23>
msi-parent = <&nwl_pcie>;
reg = <0x0 0xfd0e0000 0x0 0x1000>,
diff --git a/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt
new file mode 100644
index 000000000000..a416f04d00e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt
@@ -0,0 +1,133 @@
+* Xilinx XDMA PL PCIe Root Port Bridge DT description
+
+Required properties:
+- #address-cells: Address representation for root ports, set to <3>
+- #size-cells: Size representation for root ports, set to <2>
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- compatible: Should contain "xlnx,xdma-host-3.00"
+- reg: Should contain XDMA PCIe registers location and length
+- device_type: must be "pci"
+- interrupts: Should contain AXI PCIe interrupt
+- interrupt-map-mask,
+ interrupt-map: standard PCI properties to define the mapping of the
+ PCI interface to interrupt numbers.
+- ranges: ranges for the PCI memory regions (I/O space region is not
+ supported by hardware)
+ Please refer to the standard PCI bus binding document for a more
+ detailed explanation
+
+For MSI DECODE mode:
+- interrupt-names: Must include the following entries:
+ "misc": interrupt asserted when legacy or error interrupt is received
+ "msi1, msi0": interrupt asserted when an MSI is received
+
+Interrupt controller child node
++++++++++++++++++++++++++++++++
+Required properties:
+- interrupt-controller: identifies the node as an interrupt controller
+- #address-cells: specifies the number of cells needed to encode an
+ address. The value must be 0.
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+
+NOTE:
+The core provides a single interrupt for both INTx/MSI messages. So,
+created a interrupt controller node to support 'interrupt-map' DT
+functionality. The driver will create an IRQ domain for this map, decode
+the four INTx interrupts in ISR and route them to this domain.
+
+
+Example:
+++++++++
+MSI FIFO mode:
+ xdma_0: axi-pcie@a0000000 {
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ compatible = "xlnx,xdma-host-3.00";
+ device_type = "pci";
+ interrupt-map = <0 0 0 1 &pcie_intc_0 1>,
+ <0 0 0 2 &pcie_intc_0 2>,
+ <0 0 0 3 &pcie_intc_0 3>,
+ <0 0 0 4 &pcie_intc_0 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ ranges = <0x02000000 0x00000000 0xB0000000 0x0 0xB0000000 0x00000000 0x01000000>,
+ <0x43000000 0x00000005 0x00000000 0x00000005 0x00000000 0x00000000 0x01000000>;
+ reg = <0x0 0xA0000000 0x0 0x10000000>;
+ pcie_intc_0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller ;
+ };
+ };
+
+MSI DECODE mode:
+ xdma_0: axi-pcie@a0000000 {
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ compatible = "xlnx,xdma-host-3.00";
+ device_type = "pci";
+ interrupt-map = <0 0 0 1 &pcie_intc_0 1>, <0 0 0 2 &pcie_intc_0 2>, <0 0 0 3 &pcie_intc_0 3>, <0 0 0 4 &pcie_intc_0 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "misc", "msi0", "msi1";
+ interrupts = <0 89 4>, <0 90 4>, <0 91 4>;
+ ranges = <0x02000000 0x00000000 0xB0000000 0x0 0xB0000000 0x00000000 0x01000000>,
+ <0x43000000 0x00000005 0x00000000 0x00000005 0x00000000 0x00000000 0x01000000>;
+ reg = <0x0 0xA0000000 0x0 0x10000000>;
+ pcie_intc_0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller ;
+ };
+ };
+
+Versal CPM host Bridge DT description.
+
+The properties and their meanings are identical to those described in
+above Xilinx XDMA PL PCIe Root Port Bridge description.
+
+Properties that differ are:
+- compatible: Should contain "xlnx,versal-cpm-host-1.00"
+- reg: Should contain configuration space and CPM system level control and
+ status registers, and length
+- reg-names: Must include the following entries:
+ "cfg": configuration space region
+ "cpm_slcr": CPM system level control and status registers
+- msi-map: Maps a Requester ID to an MSI controller and associated MSI
+ sideband data
+
+Refer to the following binding document for more detailed description on
+the use of 'msi-map':
+ Documentation/devicetree/bindings/pci/pci-msi.txt
+
+Example:
+ pci@fca10000 {
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ compatible = "xlnx,versal-cpm-host-1.00";
+ interrupt-map = <0 0 0 1 &pcie_intc_0 1>,
+ <0 0 0 2 &pcie_intc_0 2>,
+ <0 0 0 3 &pcie_intc_0 3>,
+ <0 0 0 4 &pcie_intc_0 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "misc";
+ interrupts = <0 72 4>;
+ ranges = <0x02000000 0x00000000 0xE0000000 0x0 0xE0000000 0x00000000 0x10000000>,
+ <0x43000000 0x00000080 0x00000000 0x00000080 0x00000000 0x00000000 0x80000000>;
+ msi-map = <0x0 &its_gic 0x0 0x10000>;
+ reg = <0x6 0x00000000 0x0 0x1000000>,
+ <0x0 0xFCA10000 0x0 0x1000>;
+ reg-names = "cfg", "cpm_slcr";
+ pcie_intc_0: pci-interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller ;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml
index 346f9c35427c..5407468a8dda 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml
@@ -2,7 +2,7 @@
# Copyright 2019 BayLibre, SAS
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#"
+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic G12A USB3 + PCIE Combo PHY
@@ -13,7 +13,7 @@ maintainers:
properties:
compatible:
enum:
- - amlogic,meson-g12a-usb3-pcie-phy
+ - amlogic,g12a-usb3-pcie-phy
reg:
maxItems: 1
@@ -47,7 +47,7 @@ required:
examples:
- |
phy@46000 {
- compatible = "amlogic,meson-g12a-usb3-pcie-phy";
+ compatible = "amlogic,g12a-usb3-pcie-phy";
reg = <0x46000 0x2000>;
clocks = <&ref_clk>;
clock-names = "ref_clk";
diff --git a/Documentation/devicetree/bindings/phy/phy-zynqmp.txt b/Documentation/devicetree/bindings/phy/phy-zynqmp.txt
new file mode 100644
index 000000000000..ed080df891a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-zynqmp.txt
@@ -0,0 +1,119 @@
+Xilinx ZynqMP PHY binding
+
+This binding describes a ZynqMP PHY device that is used to control ZynqMP
+High Speed Gigabit Transceiver(GT). ZynqMP PS GTR provides four lanes
+and are used by USB, SATA, PCIE, Display port and Ethernet SGMMI controllers.
+
+Required properties (controller (parent) node):
+- compatible : Can be "xlnx,zynqmp-psgtr-v1.1" or "xlnx,zynqmp-psgtr"
+ "xlnx,zynqmp-psgtr-v1.1" has the lpd address mapping removed
+
+- reg : Address and length of register sets for each device in
+ "reg-names"
+- reg-names : The names of the register addresses corresponding to the
+ registers filled in "reg":
+ - serdes: SERDES block register set
+ - siou: SIOU block register set
+ - lpd: Low power domain peripherals reset control
+
+Required nodes : A sub-node is required for each lane the controller
+ provides.
+
+Required properties (port (child) nodes):
+lane0:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 0 LANE_NUM FREQUENCY>
+lane1:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 1 LANE_NUM FREQUENCY>
+lane2:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 2 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 1 LANE_NUM FREQUENC>
+ - <PHY_TYPE_SGMII 2 LANE_NUM FREQUENCY>
+lane3:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 3 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 1 LANE_NUM FREQUENCY >
+ - <PHY_TYPE_DP 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 3 LANE_NUM FREQUENCY>
+
+Note: LANE_NUM : This determines which lane's reference clock is shared by controller.
+ FREQUENCY: This the clock frequency at which controller wants to operate.
+
+
+Example:
+ serdes: zynqmp_phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr";
+ status = "okay";
+ reg = <0x0 0xfd400000 0x0 0x40000>, <0x0 0xfd3d0000 0x0 0x1000>,
+ <0x0 0xff5e0000 0x0 0x1000>;
+ reg-names = "serdes", "siou", "lpd";
+
+ lane0: lane@0 {
+ #phy-cells = <4>;
+ };
+ lane1: lane@1 {
+ #phy-cells = <4>;
+ };
+ lane2: lane@2 {
+ #phy-cells = <4>;
+ };
+ lane3: lane@3 {
+ #phy-cells = <4>;
+ };
+ };
+
+Specifying phy control of devices
+=================================
+
+Device nodes should specify the configuration required in their "phys"
+property, containing a phandle to the phy port node and a device type.
+
+phys = <PHANDLE CONTROLLER_TYPE CONTROLLER_INSTANCE LANE_NUM LANE_FREQ>;
+
+PHANDLE = &lane0 or &lane1 or &lane2 or &lane3
+CONTROLLER_TYPE = PHY_TYPE_PCIE or PHY_TYPE_SATA or PHY_TYPE_USB
+ or PHY_TYPE_DP or PHY_TYPE_SGMII
+CONTROLLER_INSTANCE = Depends on controller type used, can be any of
+ PHY_TYPE_PCIE : 0 or 1 or 2 or 3
+ PHY_TYPE_SATA : 0 or 1
+ PHY_TYPE_USB : 0 or 1
+ PHY_TYPE_DP : 0 or 1
+ PHY_TYPE_SGMII: 0 or 1 or 2 or 3
+LANE_NUM = Depends on which lane clock is used as ref clk, can be
+ 0 or 1 or 2 or 3
+LANE_FREQ = Frequency that controller can operate, can be any of
+ 19.2Mhz,20Mhz,24Mhz,26Mhz,27Mhz,28.4Mhz,40Mhz,52Mhz,
+ 100Mhz,108Mhz,125Mhz,135Mhz,150Mhz
+
+Example:
+
+#include <dt-bindings/phy/phy.h>
+
+ usb@fe200000 {
+ ...
+ phys = <&lane2 PHY_TYPE_USB3 0 2 2600000>;
+ ...
+ };
+
+ ahci@fd0c0000 {
+ ...
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt
new file mode 100644
index 000000000000..3007f6f4705d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt
@@ -0,0 +1,275 @@
+ Binding for Xilinx ZynqMP Pinctrl
+
+Required properties:
+- compatible: "xlnx,zynqmp-pinctrl"
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+ZynqMP's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, slew rate, etc.
+
+Each configuration node can consist of multiple nodes describing the pinmux and
+pinconf options. Those nodes can be pinmux nodes or pinconf nodes.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Required properties for pinmux nodes are:
+ - groups: A list of pinmux groups.
+ - function: The name of a pinmux function to activate for the specified set
+ of groups.
+
+Required properties for configuration nodes:
+One of:
+ - pins: A list of pin names
+ - groups: A list of pinmux groups.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinmux subnode:
+ groups, function
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinconf subnode:
+ groups, pins, bias-disable, bias-pull-up, bias-pull-down, slew-rate
+
+ Valid arguments for 'slew-rate' are 'SLEW_RATE_SLOW' and 'SLEW_RATE_FAST' to
+ select between slow and fast respectively.
+
+ Valid values for groups are:
+ ethernet0_0_grp, ethernet1_0_grp, ethernet2_0_grp,
+ ethernet3_0_grp, gemtsu0_0_grp, gemtsu0_1_grp,
+ gemtsu0_2_grp, mdio0_0_grp, mdio1_0_grp,
+ mdio1_1_grp, mdio2_0_grp, mdio3_0_grp,
+ qspi0_0_grp, qspi_ss_0_grp, qspi_fbclk_0_grp,
+ spi0_0_grp, spi0_ss_0_grp, spi0_ss_1_grp,
+ spi0_ss_2_grp, spi0_1_grp, spi0_ss_3_grp,
+ spi0_ss_4_grp, spi0_ss_5_grp, spi0_2_grp,
+ spi0_ss_6_grp, spi0_ss_7_grp, spi0_ss_8_grp,
+ spi0_3_grp, spi0_ss_9_grp, spi0_ss_10_grp,
+ spi0_ss_11_grp, spi0_4_grp, spi0_ss_12_grp,
+ spi0_ss_13_grp, spi0_ss_14_grp, spi0_5_grp,
+ spi0_ss_15_grp, spi0_ss_16_grp, spi0_ss_17_grp,
+ spi1_0_grp, spi1_ss_0_grp, spi1_ss_1_grp,
+ spi1_ss_2_grp, spi1_1_grp, spi1_ss_3_grp,
+ spi1_ss_4_grp, spi1_ss_5_grp, spi1_2_grp,
+ spi1_ss_6_grp, spi1_ss_7_grp, spi1_ss_8_grp,
+ spi1_3_grp, spi1_ss_9_grp, spi1_ss_10_grp,
+ spi1_ss_11_grp, spi1_4_grp, spi1_ss_12_grp,
+ spi1_ss_13_grp, spi1_ss_14_grp, spi1_5_grp,
+ spi1_ss_15_grp, spi1_ss_16_grp, spi1_ss_17_grp,
+ sdio0_0_grp, sdio0_1_grp, sdio0_2_grp,
+ sdio0_3_grp, sdio0_4_grp, sdio0_5_grp,
+ sdio0_6_grp, sdio0_7_grp, sdio0_8_grp,
+ sdio0_9_grp, sdio0_10_grp, sdio0_11_grp,
+ sdio0_12_grp, sdio0_13_grp, sdio0_14_grp,
+ sdio0_15_grp, sdio0_16_grp, sdio0_17_grp,
+ sdio0_18_grp, sdio0_19_grp, sdio0_20_grp,
+ sdio0_21_grp, sdio0_22_grp, sdio0_23_grp,
+ sdio0_24_grp, sdio0_25_grp, sdio0_26_grp,
+ sdio0_27_grp, sdio0_28_grp, sdio0_29_grp,
+ sdio0_30_grp, sdio0_31_grp, sdio0_32_grp,
+ sdio0_pc_0_grp, sdio0_cd_0_grp, sdio0_wp_0_grp,
+ sdio0_pc_1_grp, sdio0_cd_1_grp, sdio0_wp_1_grp,
+ sdio0_pc_2_grp, sdio0_cd_2_grp, sdio0_wp_2_grp,
+ sdio1_0_grp, sdio1_1_grp, sdio1_2_grp,
+ sdio1_3_grp, sdio1_4_grp, sdio1_5_grp,
+ sdio1_6_grp, sdio1_7_grp, sdio1_8_grp,
+ sdio1_9_grp, sdio1_10_grp, sdio1_11_grp,
+ sdio1_12_grp, sdio1_13_grp, sdio1_14_grp,
+ sdio1_15_grp, sdio1_pc_0_grp, sdio1_cd_0_grp,
+ sdio1_wp_0_grp, sdio1_pc_1_grp, sdio1_cd_1_grp,
+ sdio1_wp_1_grp, nand0_0_grp, nand0_ce_0_grp,
+ nand0_rb_0_grp, nand0_dqs_0_grp, nand0_ce_1_grp,
+ nand0_rb_1_grp, nand0_dqs_1_grp, can0_0_grp,
+ can0_1_grp, can0_2_grp, can0_3_grp,
+ can0_4_grp, can0_5_grp, can0_6_grp,
+ can0_7_grp, can0_8_grp, can0_9_grp,
+ can0_10_grp, can0_11_grp, can0_12_grp,
+ can0_13_grp, can0_14_grp, can0_15_grp,
+ can0_16_grp, can0_17_grp, can0_18_grp,
+ can1_0_grp, can1_1_grp, can1_2_grp,
+ can1_3_grp, can1_4_grp, can1_5_grp,
+ can1_6_grp, can1_7_grp, can1_8_grp,
+ can1_9_grp, can1_10_grp, can1_11_grp,
+ can1_12_grp, can1_13_grp, can1_14_grp,
+ can1_15_grp, can1_16_grp, can1_17_grp,
+ can1_18_grp, can1_19_grp, uart0_0_grp,
+ uart0_1_grp, uart0_2_grp, uart0_3_grp,
+ uart0_4_grp, uart0_5_grp, uart0_6_grp,
+ uart0_7_grp, uart0_8_grp, uart0_9_grp,
+ uart0_10_grp, uart0_11_grp, uart0_12_grp,
+ uart0_13_grp, uart0_14_grp, uart0_15_grp,
+ uart0_16_grp, uart0_17_grp, uart0_18_grp,
+ uart1_0_grp, uart1_1_grp, uart1_2_grp,
+ uart1_3_grp, uart1_4_grp, uart1_5_grp,
+ uart1_6_grp, uart1_7_grp, uart1_8_grp,
+ uart1_9_grp, uart1_10_grp, uart1_11_grp,
+ uart1_12_grp, uart1_13_grp, uart1_14_grp,
+ uart1_15_grp, uart1_16_grp, uart1_17_grp,
+ uart1_18_grp, i2c0_0_grp, i2c0_1_grp,
+ i2c0_2_grp, i2c0_3_grp, i2c0_4_grp,
+ i2c0_5_grp, i2c0_6_grp, i2c0_7_grp,
+ i2c0_8_grp, i2c0_9_grp, i2c0_10_grp,
+ i2c0_11_grp, i2c0_12_grp, i2c0_13_grp,
+ i2c0_14_grp, i2c0_15_grp, i2c0_16_grp,
+ i2c0_17_grp, i2c0_18_grp, i2c1_0_grp,
+ i2c1_1_grp, i2c1_2_grp, i2c1_3_grp,
+ i2c1_4_grp, i2c1_5_grp, i2c1_6_grp,
+ i2c1_7_grp, i2c1_8_grp, i2c1_9_grp,
+ i2c1_10_grp, i2c1_11_grp, i2c1_12_grp,
+ i2c1_13_grp, i2c1_14_grp, i2c1_15_grp,
+ i2c1_16_grp, i2c1_17_grp, i2c1_18_grp,
+ i2c1_19_grp, ttc0_clk_0_grp, ttc0_wav_0_grp,
+ ttc0_clk_1_grp, ttc0_wav_1_grp, ttc0_clk_2_grp,
+ ttc0_wav_2_grp, ttc0_clk_3_grp, ttc0_wav_3_grp,
+ ttc0_clk_4_grp, ttc0_wav_4_grp, ttc0_clk_5_grp,
+ ttc0_wav_5_grp, ttc0_clk_6_grp, ttc0_wav_6_grp,
+ ttc0_clk_7_grp, ttc0_wav_7_grp, ttc0_clk_8_grp,
+ ttc0_wav_8_grp, ttc1_clk_0_grp, ttc1_wav_0_grp,
+ ttc1_clk_1_grp, ttc1_wav_1_grp, ttc1_clk_2_grp,
+ ttc1_wav_2_grp, ttc1_clk_3_grp, ttc1_wav_3_grp,
+ ttc1_clk_4_grp, ttc1_wav_4_grp, ttc1_clk_5_grp,
+ ttc1_wav_5_grp, ttc1_clk_6_grp, ttc1_wav_6_grp,
+ ttc1_clk_7_grp, ttc1_wav_7_grp, ttc1_clk_8_grp,
+ ttc1_wav_8_grp, ttc2_clk_0_grp, ttc2_wav_0_grp,
+ ttc2_clk_1_grp, ttc2_wav_1_grp, ttc2_clk_2_grp,
+ ttc2_wav_2_grp, ttc2_clk_3_grp, ttc2_wav_3_grp,
+ ttc2_clk_4_grp, ttc2_wav_4_grp, ttc2_clk_5_grp,
+ ttc2_wav_5_grp, ttc2_clk_6_grp, ttc2_wav_6_grp,
+ ttc2_clk_7_grp, ttc2_wav_7_grp, ttc2_clk_8_grp,
+ ttc2_wav_8_grp, ttc3_clk_0_grp, ttc3_wav_0_grp,
+ ttc3_clk_1_grp, ttc3_wav_1_grp, ttc3_clk_2_grp,
+ ttc3_wav_2_grp, ttc3_clk_3_grp, ttc3_wav_3_grp,
+ ttc3_clk_4_grp, ttc3_wav_4_grp, ttc3_clk_5_grp,
+ ttc3_wav_5_grp, ttc3_clk_6_grp, ttc3_wav_6_grp,
+ ttc3_clk_7_grp, ttc3_wav_7_grp, ttc3_clk_8_grp,
+ ttc3_wav_8_grp, swdt0_clk_0_grp, swdt0_rst_0_grp,
+ swdt0_clk_1_grp, swdt0_rst_1_grp, swdt0_clk_2_grp,
+ swdt0_rst_2_grp, swdt0_clk_3_grp, swdt0_rst_3_grp,
+ swdt0_clk_4_grp, swdt0_rst_4_grp, swdt0_clk_5_grp,
+ swdt0_rst_5_grp, swdt0_clk_6_grp, swdt0_rst_6_grp,
+ swdt0_clk_7_grp, swdt0_rst_7_grp, swdt0_clk_8_grp,
+ swdt0_rst_8_grp, swdt0_clk_9_grp, swdt0_rst_9_grp,
+ swdt0_clk_10_grp, swdt0_rst_10_grp, swdt0_clk_11_grp,
+ swdt0_rst_11_grp, swdt0_clk_12_grp, swdt0_rst_12_grp,
+ swdt1_clk_0_grp, swdt1_rst_0_grp, swdt1_clk_1_grp,
+ swdt1_rst_1_grp, swdt1_clk_2_grp, swdt1_rst_2_grp,
+ swdt1_clk_3_grp, swdt1_rst_3_grp, swdt1_clk_4_grp,
+ swdt1_rst_4_grp, swdt1_clk_5_grp, swdt1_rst_5_grp,
+ swdt1_clk_6_grp, swdt1_rst_6_grp, swdt1_clk_7_grp,
+ swdt1_rst_7_grp, swdt1_clk_8_grp, swdt1_rst_8_grp,
+ swdt1_clk_9_grp, swdt1_rst_9_grp, swdt1_clk_10_grp,
+ swdt1_rst_10_grp, swdt1_clk_11_grp, swdt1_rst_11_grp,
+ swdt1_clk_12_grp, swdt1_rst_12_grp, gpio0_0_grp,
+ gpio0_1_grp, gpio0_2_grp, gpio0_3_grp,
+ gpio0_4_grp, gpio0_5_grp, gpio0_6_grp,
+ gpio0_7_grp, gpio0_8_grp, gpio0_9_grp,
+ gpio0_10_grp, gpio0_11_grp, gpio0_12_grp,
+ gpio0_13_grp, gpio0_14_grp, gpio0_15_grp,
+ gpio0_16_grp, gpio0_17_grp, gpio0_18_grp,
+ gpio0_19_grp, gpio0_20_grp, gpio0_21_grp,
+ gpio0_22_grp, gpio0_23_grp, gpio0_24_grp,
+ gpio0_25_grp, gpio0_26_grp, gpio0_27_grp,
+ gpio0_28_grp, gpio0_29_grp, gpio0_30_grp,
+ gpio0_31_grp, gpio0_32_grp, gpio0_33_grp,
+ gpio0_34_grp, gpio0_35_grp, gpio0_36_grp,
+ gpio0_37_grp, gpio0_38_grp, gpio0_39_grp,
+ gpio0_40_grp, gpio0_41_grp, gpio0_42_grp,
+ gpio0_43_grp, gpio0_44_grp, gpio0_45_grp,
+ gpio0_46_grp, gpio0_47_grp, gpio0_48_grp,
+ gpio0_49_grp, gpio0_50_grp, gpio0_51_grp,
+ gpio0_52_grp, gpio0_53_grp, gpio0_54_grp,
+ gpio0_55_grp, gpio0_56_grp, gpio0_57_grp,
+ gpio0_58_grp, gpio0_59_grp, gpio0_60_grp,
+ gpio0_61_grp, gpio0_62_grp, gpio0_63_grp,
+ gpio0_64_grp, gpio0_65_grp, gpio0_66_grp,
+ gpio0_67_grp, gpio0_68_grp, gpio0_69_grp,
+ gpio0_70_grp, gpio0_71_grp, gpio0_72_grp,
+ gpio0_73_grp, gpio0_74_grp, gpio0_75_grp,
+ gpio0_76_grp, gpio0_77_grp, usb0_0_grp,
+ usb1_0_grp, pmu0_0_grp, pmu0_1_grp,
+ pmu0_2_grp, pmu0_3_grp, pmu0_4_grp,
+ pmu0_5_grp, pmu0_6_grp, pmu0_7_grp,
+ pmu0_8_grp, pmu0_9_grp, pmu0_10_grp,
+ pmu0_11_grp, pcie0_0_grp, pcie0_1_grp,
+ pcie0_2_grp, pcie0_3_grp, pcie0_4_grp,
+ pcie0_5_grp, pcie0_6_grp, pcie0_7_grp,
+ csu0_0_grp, csu0_1_grp, csu0_2_grp,
+ csu0_3_grp, csu0_4_grp, csu0_5_grp,
+ csu0_6_grp, csu0_7_grp, csu0_8_grp,
+ csu0_9_grp, csu0_10_grp, csu0_11_grp,
+ dpaux0_0_grp, dpaux0_1_grp, dpaux0_2_grp,
+ dpaux0_3_grp, pjtag0_0_grp, pjtag0_1_grp,
+ pjtag0_2_grp, pjtag0_3_grp, pjtag0_4_grp,
+ pjtag0_5_grp, trace0_0_grp, trace0_clk_0_grp,
+ trace0_1_grp, trace0_clk_1_grp, trace0_2_grp,
+ trace0_clk_2_grp, testscan0_0_grp
+
+ Valid values for pins are:
+ MIO0 - MIO77
+
+ Valid values for function are:
+ ethernet0, ethernet1, ethernet2, ethernet3, gemtsu0, usb0, usb1, mdio0,
+ mdio1, mdio2, mdio3, qspi0, qspi_fbclk, qspi_ss, spi0, spi1, spi0_ss,
+ spi1_ss, sdio0, sdio0_pc, sdio0_wp, sdio0_cd, sdio1, sdio1_pc, sdio1_wp,
+ sdio1_cd, nand0, nand0_ce, nand0_rb, nand0_dqs, can0, can1, uart0, uart1,
+ i2c0, i2c1, ttc0_clk, ttc0_wav, ttc1_clk, ttc1_wav, ttc2_clk, ttc2_wav,
+ ttc3_clk, ttc3_wav, swdt0_clk, swdt0_rst, swdt1_clk, swdt1_rst, gpio0, pmu0,
+ pcie0, csu0, dpaux0, pjtag0, trace0, trace0_clk, testscan0
+
+The following driver-specific properties as defined here are valid to specify in
+a pin configuration subnode:
+ - io-standard: Configure the pin to use the selected IO standard. Valid
+ arguments are 'IO_STANDARD_LVCMOS33' and 'IO_STANDARD_LVCMOS18'.
+ - schmitt-cmos: Selects either Schmitt or CMOS input for MIO pins. Valid
+ arguments are 'PIN_INPUT_TYPE_SCHMITT' and 'PIN_INPUT_TYPE_CMOS'.
+
+Example:
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+
+ pinctrl0: pinctrl {
+ compatible = "xlnx,zynqmp-pinctrl";
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ schmitt-cmos = <PIN_INPUT_TYPE_CMOS>;
+ };
+ };
+ };
+ };
+};
+
+uart1 {
+ ...
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
+ ...
+
+};
diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
index d366f1eb623a..450f3a41c717 100644
--- a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
+++ b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
@@ -8,9 +8,27 @@ Required properties:
- compatible: Must contain: "xlnx,zynqmp-power"
- interrupts: Interrupt specifier
--------
-Example
--------
+Optional properties:
+ - mbox-names : Name given to channels seen in the 'mboxes' property.
+ "rx" - Mailbox corresponding to receive path
+ "tx" - Mailbox corresponding to transmit path
+ - mboxes : Standard property to specify a Mailbox. Each value of
+ the mboxes property should contain a phandle to the
+ mailbox controller device node and an args specifier
+ that will be the phandle to the intended sub-mailbox
+ child node to be used for communication. See
+ Documentation/devicetree/bindings/mailbox/mailbox.txt
+ for more details about the generic mailbox controller
+ and client driver bindings. Also see
+ Documentation/devicetree/bindings/mailbox/ \
+ xlnx,zynqmp-ipi-mailbox.txt for typical controller that
+ is used to communicate with this System controllers.
+
+--------
+Examples
+--------
+
+Example with interrupt method:
firmware {
zynqmp_firmware: zynqmp-firmware {
@@ -23,3 +41,20 @@ firmware {
};
};
};
+
+Example with IPI mailbox method:
+
+firmware {
+
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+
+ zynqmp_power: zynqmp-power {
+ compatible = "xlnx,zynqmp-power";
+ mboxes = <&ipi_mailbox_pmu0 0>,
+ <&ipi_mailbox_pmu0 1>;
+ mbox-names = "tx", "rx";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt b/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
new file mode 100644
index 000000000000..de28128c4e5d
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
@@ -0,0 +1,135 @@
+Xilinx ARM Cortex A53-R5 remoteproc driver
+==========================================
+
+ZynqMP family of devices use two Cortex R5 processors to help with various
+low power / real time tasks.
+
+This driver requires specific ZynqMP hardware design.
+
+ZynqMP R5 Device Node:
+=================================
+A ZynqMP R5 device node is used to represent RPU domain
+within ZynqMP SoC. This device node contains RPU processor
+subnodes.
+
+Required Properties:
+--------------------
+ - compatible : Should be "xlnx,zynqmp-r5-remoteproc-1.0"
+ - core_conf : R5 core configuration (valid string - split or lock-step)
+ - interrupts : Interrupt mapping for remoteproc IPI. It is required if the
+ user uses the remoteproc driver with the RPMsg kernel driver.
+ - interrupt-parent : Phandle for the interrupt controller. It is required if
+ the user uses the remoteproc driver with the RPMsg kernel
+ kernel driver.
+
+ZynqMP R5 Remoteproc Device Node:
+=================================
+A ZynqMP R5 Remoteproc device node is used to represent a RPU processor.
+It is a subnode to the ZynqMP R5 device node. It also contains tightly
+coupled memory subnodes.
+
+Required Properties:
+--------------------
+ - pnode-id: ZynqMP R5 processor power domain ID which will be used by
+ ZynqMP power management unit to idetify the processor.
+
+Optional Properties:
+--------------------
+ - memory-region: reversed memory which will be used by R5 processor
+
+
+ZynqMP R5 Remoteproc Device Node:
+=================================
+A ZynqMP R5 Remoteproc device node is used to represent a RPU processor.
+It is a subnode to the ZynqMP R5 device node.
+
+Required Properties:
+--------------------
+ - pnode-id: ZynqMP R5 processor power domain ID which will be used by
+ ZynqMP power management unit to idetify the processor.
+
+Optional Properties:
+--------------------
+ - memory-region: reversed memory which will be used by R5 processor
+ - mboxes: Specify tx and rx mailboxes
+ - mbox-names: List of identifier strings for tx/rx mailbox channel.
+
+ZynqMP R5 TCM Device Node:
+=================================
+The ZynqMP R5 TCM device node is used to represent the TCM memory.
+It is a subnode to the ZynqMP R5 processor.
+
+Required Properties:
+--------------------
+ - reg: TCM address range
+ - pnode-id: TCM power domain ID
+
+
+Example:
+--------
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ /* R5 0 firmware memory in DDR */
+ rproc_0_fw_reserved: rproc@3ed000000 {
+ no-map;
+ reg = <0x0 0x3ed00000 0x0 0x40000>;
+ };
+ /* DMA shared memory between APU and RPU */
+ rproc_0_dma_reserved: rproc@3ed400000 {
+ compatible = "shared-dma-pool";
+ no-map;
+ reg = <0x0 0x3ed40000 0x0 0x100000>;
+ };
+ };
+
+ zynqmp-r5-remoteproc@0 {
+ compatible = "xlnx,zynqmp-r5-remoteproc-1.0";
+ core_conf = "split";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ r5-0: r5@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ memory-region = <&rproc_0_fw_reserved>,
+ <&rproc_0_dma_reserved>;
+ pnode-id = <0x7>;
+ mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
+ mbox-names = "tx", "rx";
+ tcm-a: tcm@0 {
+ reg = <0x0 0xFFE00000 0x0 0x10000>,
+ pnode-id = <0xf>;
+ };
+ tcm-b: tcm@1 {
+ reg = <0x0 0xFFE20000 0x0 0x10000>,
+ pnode-id = <0x10>;
+ };
+ };
+ } ;
+
+ zynqmp_ipi {
+ compatible = "xlnx,zynqmp-ipi-mailbox";
+ interrupt-parent = <&gic>;
+ interrupts = <0 29 4>;
+ xlnx,ipi-id = <7>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* APU<->RPU0 IPI mailbox controller */
+ ipi_mailbox_rpu0: mailbox@ff90600 {
+ reg = <0xff990600 0x20>,
+ <0xff990620 0x20>,
+ <0xff9900c0 0x20>,
+ <0xff9900e0 0x20>;
+ reg-names = "local_request_region",
+ "local_response_region",
+ "remote_request_region",
+ "remote_response_region";
+ #mbox-cells = <1>;
+ xlnx,ipi-id = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt b/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt
new file mode 100644
index 000000000000..0ac9a8c24a5c
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt
@@ -0,0 +1,47 @@
+Xilinx ARM Cortex A9-A9 remoteproc driver
+==========================================
+
+Zynq family of devices can use one A9 processor to help with various
+low power / real time tasks.
+
+This driver requires specific Zynq hardware design.
+
+Zynq RemoteProc Device Node:
+=================================
+A zynq_remoteproc device node is used to represent the 2nd A9 instance
+within Zynq SoC.
+
+Required properties:
+--------------------
+ - compatible : should be "xlnx,zynq_remoteproc"
+ - vring0: soft interrupt for kicking from firmware
+ - vring1: soft interrupt for kicking from Linux kernel
+
+Optional Properties:
+--------------------
+ - memory-region: reversed memory which will be used by R5 processor
+
+Example:
+--------
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ rproc_0_reserved: rproc@3e000000 {
+ no-map;
+ reg = <0x3e000000 0x400000>;
+ };
+ rproc_0_dma: rproc@3e800000 {
+ no-map;
+ compatible = "shared-dma-pool";
+ reg = <0x3e800000 0x100000>;
+ };
+ };
+
+ zynq_remoteproc@0 {
+ compatible = "xlnx,zynq_remoteproc";
+ vring0 = <15>;
+ vring1 = <14>;
+ memory-region = <&rproc_0_reserved>, <&rproc_0_dma>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
index d7a57ec4a640..140793050df3 100644
--- a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
@@ -128,7 +128,6 @@ required:
- compatible
- reg
- interrupts
- - clocks
- clock-output-names
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/serial/uartlite.c b/Documentation/devicetree/bindings/serial/uartlite.c
new file mode 100644
index 000000000000..7ae900880d30
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/uartlite.c
@@ -0,0 +1,26 @@
+Xilinx Axi Uartlite controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Can be either of
+ "xlnx,xps-uartlite-1.00.a"
+ "xlnx,opb-uartlite-1.00.b"
+- reg : Physical base address and size of the Axi Uartlite
+ registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupt-parent : Must be core interrupt controller.
+
+Optional properties:
+- port-number : Set Uart port number
+- clock-names : Should be "s_axi_aclk"
+- clocks : Input clock specifier. Refer to common clock bindings.
+
+Example:
+serial@800C0000 {
+ compatible = "xlnx,xps-uartlite-1.00.a";
+ reg = <0x0 0x800c0000 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x6e 0x1>;
+ port-number = <0>;
+};
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
new file mode 100644
index 000000000000..b1c1466a34ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
@@ -0,0 +1,23 @@
+Xilinx AI Engine NPI
+--------------------
+
+The Xilinx AI Engine NPI space is where the privileged operations for AI Engine
+device are handled, such as reset and pll. The space is typically meant to be
+owned by platform management software, and this space is accessible only when
+the platform management software grants the access. Thus, this dt binding only
+works in such configuration, and in case the platform locks the access,
+the non-secure software fails to access the device.
+
+This is a temporary solution to allow direct access to NPI space.
+
+Required properties:
+
+- compatible: Must be "xlnx,ai-engine-npi"
+- reg: Physical base address and length of the registers set for the device.
+
+Example:
+
+ aie-npi@f70a0000 {
+ compatible = "xlnx,ai-engine-npi";
+ reg = <0x0 0xf70a0000 0x0 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt
new file mode 100644
index 000000000000..b7643a1380d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt
@@ -0,0 +1,28 @@
+Xilinx AI Engine
+----------------
+
+The Xilinx AI Engine is a tile processor with many cores (up to 400) that
+can run in parallel. The data routing between cores is configured through
+internal switches, and shim tiles interface with external interconnect, such
+as memory or PL.
+
+Required properties:
+
+- compatible: Must be "xlnx,ai_engine".
+- reg: Physical base address and length of the registers set for the device.
+- interrupt-parent: the phandle to the interrupt controller.
+- interrupts: the interrupt numbers.
+- interrupt-names: Should be "interrupt0", "interrupt1", "interrupt2" or
+ "interrupt3".
+
+Example:
+
+ ai_engine@20000000000 {
+ compatible = "xlnx,ai_engine";
+ reg = <0x200 0x0 0x1 0x0>;
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x94 0x1>,
+ <0x0 0x95 0x1>,
+ <0x0 0x96 0x1>;
+ interrupt-names = "interrupt1", "interrupt2", "interrupt3";
+ };
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
index 6786d6715df0..98474f2accca 100644
--- a/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
@@ -16,16 +16,60 @@ Required properties:
1. vcu slcr
2. Logicore
reg-names should contain name for the each register sequence.
-- clocks: phandle for aclk and pll_ref clocksource
-- clock-names: The identification string, "aclk", is always required for
- the axi clock. "pll_ref" is required for pll.
+- #clock-cells : Must be 1
+- clocks: phandle for aclk, pll_ref and encoder/decoder clocksources
+- clock-names: The identification string,
+ * "aclk", is always required for the axi clock.
+ * "pll_ref" is required for pll.
+ * "vcu_core_enc" is required for VCU core encoder.
+ * "vcu_core_dec" is required for VCU core decoder.
+ * "vcu_mcu_enc" is required for MCU core encoder.
+ * "vcu_mcu_dec" is required for MCU core decoder.
+- ranges
+- VCU Init driver node define the following child nodes:
+ * Allegro encoder driver node
+ - compatible: Must be "al,al5e"
+ - reg: There is a one set of register.
+ - interrupts: interrupt number to the cpu.
+ - interrupt-parent: the phandle for the interrupt controller
+ that services interrupts for this device.
+ * Allegro decoder driver node
+ - compatible: Must be "al,al5d"
+ - reg: There is a one set of register.
+ - interrupts: interrupt number to the cpu.
+ - interrupt-parent: the phandle for the interrupt controller
+ that services interrupts for this device.
+
+Optional properties:
+- reset-gpios : The GPIO used to reset the VCU, if available. Need use this
+ reset gpio when in design 'vcu_resetn' is driven by gpio. See
+ Documentation/devicetree/bindings/gpio/gpio.txt for details.
+
Example:
xlnx_vcu: vcu@a0040000 {
compatible = "xlnx,vcu-logicoreip-1.0";
+ #address-cells = <2>;
+ #size-cells = <2>;
reg = <0x0 0xa0040000 0x0 0x1000>,
<0x0 0xa0041000 0x0 0x1000>;
reg-names = "vcu_slcr", "logicore";
- clocks = <&si570_1>, <&clkc 71>;
- clock-names = "pll_ref", "aclk";
+ reset-gpios = <&gpio 0x4e GPIO_ACTIVE_HIGH>;
+ #clock-cells = <0x1>;
+ clock-names = "pll_ref", "aclk", "vcu_core_enc", "vcu_core_dec", "vcu_mcu_enc", "vcu_mcu_dec";
+ clocks = <&si570_1>, <&clkc 71>, <&xlnx_vcu 1>, <&xlnx_vcu 2>, <&xlnx_vcu 3>, <&xlnx_vcu 4>;
+ ranges;
+ encoder: al5e@a0000000 {
+ compatible = "al,al5e";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ interrupts = <0 89 4>;
+ interrupt-parent = <&gic>;
+ };
+
+ decoder: al5d@a0020000 {
+ compatible = "al,al5d";
+ reg = <0x0 0xa0020000 0x0 0x10000>;
+ interrupts = <0 89 4>;
+ interrupt-parent = <&gic>;
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
index 5d6ea66a863f..1f75feec3dec 100644
--- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
@@ -109,7 +109,7 @@ audio-codec@1{
reg = <1 0>;
interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr2"
- reset-gpios = <&msmgpio 64 0>;
+ reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>;
slim-ifc-dev = <&wc9335_ifd>;
clock-names = "mclk", "native";
clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
diff --git a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
index cbc93c8f4963..6b5e4f762268 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
@@ -14,7 +14,7 @@ Required properties:
- interrupts: List of Interrupt numbers.
- reg: Base address and size of the IP core instance.
- clock-names: List of input clocks.
- Required elements: "s_axi_lite_aclk", "aud_mclk"
+ Required elements: "s_axi_lite_aclk", "m_axis_mm2s_aclk", "aud_mclk", "s_axis_s2mm_aclk"
- clocks: Input clock specifier. Refer to common clock bindings.
Example:
@@ -24,6 +24,6 @@ Example:
interrupt-parent = <&gic>;
interrupts = <0 104 4>, <0 105 4>;
reg = <0x0 0x80010000 0x0 0x1000>;
- clock-names = "s_axi_lite_aclk", "aud_mclk";
- clocks = <&clk 71>, <&clk_wiz_1 0>;
+ clock-names = "s_axi_lite_aclk", "m_axis_mm2s_aclk", "aud_mclk", "s_axis_s2mm_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&audio_ss_0_clk_wiz_0 0>, <&clk 71>;
};
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt
new file mode 100644
index 000000000000..7eb932913983
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt
@@ -0,0 +1,17 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Audio Card
+
+The card driver integrates codec and pcm components and represents as a single
+audio device.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-card".
+ - xlnx,dp-snd-pcm: phandle(s) to the ZynqMP DP PCM node.
+ - xlnx,dp-snd-codec: phandle to the ZynqMP DP card node.
+
+Example:
+
+ xlnx_dp_snd_card: dp_snd_card {
+ compatible = "xlnx,dp-snd-card";
+ xlnx,dp-snd-pcm = <&xlnx_dp_snd_pcm0>, <&xlnx_dp_snd_pcm1>;
+ xlnx,dp-snd-codec = <&xlnx_dp_snd_codec0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt
new file mode 100644
index 000000000000..d094fdd9d9e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Codec
+
+The codec driver handles the audio clock and format management.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-codec".
+ - clocks: The phandle for the audio clock. The audio clock should be
+ configured to the correct audio clock rate, which should be one of
+ (44100 * 512) or (48000 * 512).
+ - clock-names: The identification string should be "aud_clk".
+
+Example:
+
+ xlnx_dp_snd_codec0: dp_snd_codec0 {
+ compatible = "xlnx,dp-snd-codec";
+ clocks = <&dp_aud_clk>;
+ clock-names = "aud_clk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt
new file mode 100644
index 000000000000..303232a2a375
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort PCM
+
+The DPDMA driver of ZynqMP DisplayPort subsystem is based on DMA engine,
+and the DP PCM driver is based on snd dmaengine helpers.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-pcm".
+ - dmas: the phandle list of DMA specifiers. The dma channel ID should be one
+ of 4 for audio0 channel or 5 for audio1 channel.
+ - dma-names: the indentifier strings for DMAs. The value should be "tx".
+
+Example:
+
+ xlnx_dp_snd_pcm0: dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
index 5e7c7d5bb60a..86b727c88bf9 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
@@ -11,18 +11,32 @@ Required property common to both I2S playback and capture:
- xlnx,dwidth: sample data width. Can be any of 16, 24.
- xlnx,num-channels: Number of I2S streams. Can be any of 1, 2, 3, 4.
supported channels = 2 * xlnx,num-channels
+ - xlnx,snd-pcm: reference to audio formatter block
+ - clock-names: List of input clocks.
+ Required elements for I2S Tx: "s_axi_ctrl_aclk", "aud_mclk", "s_axis_aud_aclk".
+ Required elements for I2S Rx: "s_axi_ctrl_aclk", "aud_mclk", "m_axis_aud_aclk".
+ - clocks: Input clock specifier. Refer to common clock bindings.
Example:
i2s_receiver@a0080000 {
compatible = "xlnx,i2s-receiver-1.0";
+ clock-names = "s_axi_ctrl_aclk", "aud_mclk", "m_axis_aud_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&clk 71>;
reg = <0x0 0xa0080000 0x0 0x10000>;
xlnx,dwidth = <0x18>;
xlnx,num-channels = <1>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
};
i2s_transmitter@a0090000 {
compatible = "xlnx,i2s-transmitter-1.0";
+ clock-names = "s_axi_ctrl_aclk", "aud_mclk", "s_axis_aud_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&audio_ss_0_clk_wiz_0 0>;
reg = <0x0 0xa0090000 0x0 0x10000>;
xlnx,dwidth = <0x18>;
xlnx,num-channels = <1>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
};
+ Documentation of "audio_ss_0_audio_formatter_0" node is located
+ at Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+
diff --git a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
index 15c2d64d247c..59bfa6bcb566 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
@@ -5,7 +5,8 @@ The IP supports playback and capture of SPDIF audio
Required properties:
- compatible: "xlnx,spdif-2.0"
- clock-names: List of input clocks.
- Required elements: "s_axi_aclk", "aud_clk_i"
+ Required elements for SPDIF Tx: "aud_clk_i", "s_axi_aclk", "s_axis_aclk".
+ Required elements for SPDIF Rx: "aud_clk_i", "s_axi_aclk", "m_axis_aclk".
- clocks: Input clock specifier. Refer to common clock bindings.
- reg: Base address and address length of the IP core instance.
- interrupts-parent: Phandle for interrupt controller.
@@ -14,10 +15,10 @@ Required properties:
1 :- transmitter mode
- xlnx,aud_clk_i: input audio clock value.
-Example:
+Example - SPDIF Rx:
spdif_0: spdif@80010000 {
- clock-names = "aud_clk_i", "s_axi_aclk";
- clocks = <&misc_clk_0>, <&clk 71>;
+ clock-names = "aud_clk_i", "s_axi_aclk", "m_axis_aclk";
+ clocks = <&si570_1>, <&clk 71>, <&clk 71>;
compatible = "xlnx,spdif-2.0";
interrupt-names = "spdif_interrupt";
interrupt-parent = <&gic>;
diff --git a/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt b/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
new file mode 100644
index 000000000000..69134458b9d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
@@ -0,0 +1,60 @@
+Device-Tree bindings for Xilinx SDI audio
+
+The IP core supports embed/extract of audio in SDI Tx and Rx
+protocol respectively. Reference to PG:
+https://www.xilinx.com/support/documentation/ip_documentation/v_uhdsdi_audio/v1_0/pg309-v-uhdsdi-audio.pdf
+
+Required properties:
+ - compatible: Should be one of:
+ "xlnx,v-uhdsdi-audio-2.0"
+ "xlnx,v-uhdsdi-audio-1.0"
+ Note: v1.0 (xlnx,v-uhdsdi-audio-1.0) is deprecated
+ and driver no longer supports it. Mandatory to upgrade to v2.0
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reg: Base address and size of the IP core instance.
+ - xlnx,snd-pcm: reference to audio formatter block
+ - clock-names: List of input clocks.
+ Required elements for SDI Embed: "s_axi_aclk", "s_axis_clk", "sdi_embed_clk".
+ Required elements for SDI Extract: "s_axi_aclk", "sdi_extract_clk", "m_axis_clk".
+ - clocks: Input clock specifier. Refer to common clock bindings.
+
+SDI embed contains a output port to remote endpoint of SDI video Tx node.
+This pipeline should be described using the DT bindings defined in
+Documentation/devicetree/bindings/graph.txt
+
+Example:
+
+ audio_ss_0_v_uhdsdi_audio_extract_0: v_uhdsdi_audio@80080000 {
+ compatible = "xlnx,v-uhdsdi-audio-2.0";
+ clock-names = "s_axi_aclk", "sdi_extract_clk", "m_axis_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_0>;
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 106 4>;
+ reg = <0x0 0x80080000 0x0 0x10000>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
+ };
+
+ audio_ss_0_v_uhdsdi_audio_embed_0: v_uhdsdi_audio@80090000 {
+ compatible = "xlnx,v-uhdsdi-audio-2.0";
+ clock-names = "s_axi_aclk", "s_axis_clk", "sdi_embed_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_0>, <&misc_clk_1>;
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 107 4>;
+ reg = <0x0 0x80090000 0x0 0x10000>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
+ sdi_av_port: port@0 {
+ reg = <0>;
+ sditx_audio_embed_src: endpoint {
+ remote-endpoint = <&sdi_audio_sink_port>;
+ };
+ };
+ };
+
+ Node 'v_smpte_uhdsdi_tx_ss' is documented in SDI Tx video bindings,
+ located at Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt.
+
+ Node 'audio_ss_0_audio_formatter_0' node is documented
+ at Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
index dc924a5f71db..9a9af55055f5 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -6,16 +6,27 @@ Required properties:
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
+- fifo-size : Depth of TX/RX Fifos
Optional properties:
-- xlnx,num-ss-bits : Number of chip selects used.
+- num-cs : Number of chip selects used.
+- bits-per-word : Number of bits per word.
+- clock-names : Can be one or more strings from "axi_clk", "axi4_clk"
+ and "spi_clk" depending on IP configurations.
+- clocks : Input clock specifier. Refer to common clock bindings.
+- xlnx,startup-block : Indicates whether startup block is enabled or disabled.
Example:
axi_quad_spi@41e00000 {
compatible = "xlnx,xps-spi-2.00.a";
+ clock-names = "axi_clk", "axi4_clk", "spi_clk";
+ clocks = <&clkc 71>, <&clkc 72>, <&clkc 73>;
interrupt-parent = <&intc>;
interrupts = <0 31 1>;
reg = <0x41e00000 0x10000>;
- xlnx,num-ss-bits = <0x1>;
+ num-cs = <0x1>;
+ fifo-size = <256>;
+ bits-per-word = <8>;
+ xlnx,startup-block;
};
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
index 0f6d37ff541c..a40827f58164 100644
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
@@ -2,7 +2,8 @@ Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
-------------------------------------------------------------------
Required properties:
-- compatible : Should be "xlnx,zynqmp-qspi-1.0".
+- compatible : Should be "xlnx,zynqmp-qspi-1.0" for zynqmp or
+ "xlnx,versal-qspi-1.0" for versal.
- reg : Physical base address and size of GQSPI registers map.
- interrupts : Property with a value describing the interrupt
number.
@@ -12,6 +13,14 @@ Required properties:
Optional properties:
- num-cs : Number of chip selects used.
+- has-io-mode : boolean property describes the controller operating
+ mode. if exists controller will operate in IO mode
+ else dma mode.
+- is-dual : zynqmp qspi support for dual-parallel mode configuration
+ value should be 1.
+- is-stacked : zynqmp qspi support for stacked mode configuration.
+ to enable this mode, is-dual should be 0 and is-stacked
+ should be 1.
Example:
qspi: spi@ff0f0000 {
diff --git a/Documentation/devicetree/bindings/staging/xroeframer/xroeframer.txt b/Documentation/devicetree/bindings/staging/xroeframer/xroeframer.txt
new file mode 100644
index 000000000000..8dabef16d083
--- /dev/null
+++ b/Documentation/devicetree/bindings/staging/xroeframer/xroeframer.txt
@@ -0,0 +1,17 @@
+* Xilinx Radio over Ethernet Framer driver
+
+Required properties:
+- compatible: must be "xlnx,roe-framer-1.0"
+- reg: physical base address of the framer and length of memory mapped region
+- clock-names: list of clock names
+- clocks: list of clock sources corresponding to the clock names
+
+Example:
+ roe_framer@a0000000 {
+ compatible = "xlnx,roe-framer-1.0";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ clock-names = "s_axi_aclk", "m_axis_defm_aclk",
+ "s_axis_fram_aclk", "tx0_eth_port_clk",
+ "internal_bus_clk";
+ clocks = <0x43 0x44 0x44 0x45 0x45>;
+ };
diff --git a/Documentation/devicetree/bindings/staging/xroetrafficgen/xroetrafficgen.txt b/Documentation/devicetree/bindings/staging/xroetrafficgen/xroetrafficgen.txt
new file mode 100644
index 000000000000..3516d3ff8009
--- /dev/null
+++ b/Documentation/devicetree/bindings/staging/xroetrafficgen/xroetrafficgen.txt
@@ -0,0 +1,15 @@
+* Xilinx Radio over Ethernet Traffic Generator driver
+
+Required properties:
+- compatible: must be "xlnx,roe-framer-1.0"
+- reg: physical base address of the framer and length of memory mapped region
+- clock-names: list of clock names
+- clocks: list of clock sources corresponding to the clock names
+
+Example:
+ roe_radio_ctrl@a0060000 {
+ compatible = "xlnx,roe-traffic-gen-1.0";
+ reg = <0x0 0xa0060000 0x0 0x10000>;
+ clock-names = "s_axis_fram_aclk", "s_axi_aclk";
+ clocks = <0x44 0x43>;
+ };
diff --git a/Documentation/devicetree/bindings/uio/xilinx_apm.txt b/Documentation/devicetree/bindings/uio/xilinx_apm.txt
new file mode 100644
index 000000000000..a11c82e84b6a
--- /dev/null
+++ b/Documentation/devicetree/bindings/uio/xilinx_apm.txt
@@ -0,0 +1,44 @@
+* Xilinx AXI Performance monitor IP
+
+Required properties:
+- compatible: "xlnx,axi-perf-monitor"
+- interrupts: Should contain APM interrupts.
+- interrupt-parent: Must be core interrupt controller.
+- reg: Should contain APM registers location and length.
+- xlnx,enable-profile: Enables the profile mode.
+- xlnx,enable-trace: Enables trace mode.
+- xlnx,num-monitor-slots: Maximum number of slots in APM.
+- xlnx,enable-event-count: Enable event count.
+- xlnx,enable-event-log: Enable event logging.
+- xlnx,have-sampled-metric-cnt:Sampled metric counters enabled in APM.
+- xlnx,num-of-counters: Number of counters in APM
+- xlnx,metric-count-width: Metric Counter width (32/64)
+- xlnx,metrics-sample-count-width: Sampled metric counter width
+- xlnx,global-count-width: Global Clock counter width
+- clocks: Input clock specifier.
+
+Optional properties:
+- xlnx,id-filter-32bit: APM is in 32-bit mode
+
+Example:
+++++++++
+
+apm: apm@44a00000 {
+ compatible = "xlnx,axi-perf-monitor";
+ interrupt-parent = <&axi_intc_1>;
+ interrupts = <1 2>;
+ reg = <0x44a00000 0x1000>;
+ clocks = <&clkc 15>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <4>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ xlnx,id-filter-32bit;
+};
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
index 4aae5b2cef56..622e27fc0b71 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
@@ -1,7 +1,8 @@
Xilinx SuperSpeed DWC3 USB SoC controller
Required properties:
-- compatible: Should contain "xlnx,zynqmp-dwc3"
+- compatible: May contain "xlnx,zynqmp-dwc3" or "xlnx,versal-dwc3"
+- reg: Base address and length of the register control block
- clocks: A list of phandles for the clocks listed in clock-names
- clock-names: Should contain the following:
"bus_clk" Master/Core clock, have to be >= 125 MHz for SS
@@ -13,20 +14,38 @@ Required child node:
A child node must exist to represent the core DWC3 IP block. The name of
the node is not important. The content of the node is defined in dwc3.txt.
+Optional properties for xlnx,zynqmp-dwc3:
+- nvmem-cells: list of phandle to the nvmem data cells.
+- nvmem-cell-names: Names for the each nvmem-cells specified.
+
+Optional properties for snps,dwc3:
+- dma-coherent: Enable this flag if CCI is enabled in design. Adding this
+ flag configures Global SoC bus Configuration Register and
+ Xilinx USB 3.0 IP - USB coherency register to enable CCI.
+- snps,enable-hibernation: Add this flag to enable hibernation support for
+ peripheral mode
+- interrupt-names: This property provides the names of the interrupt ids used
+
Example device node:
usb@0 {
#address-cells = <0x2>;
#size-cells = <0x1>;
compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9d0000 0x0 0x100>;
clock-names = "bus_clk" "ref_clk";
clocks = <&clk125>, <&clk125>;
ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
dwc3@fe200000 {
compatible = "snps,dwc3";
reg = <0x0 0xfe200000 0x40000>;
+ interrupt-name = "dwc_usb3";
interrupts = <0x0 0x41 0x4>;
dr_mode = "host";
+ dma-coherent;
+ snps,enable-hibernation
};
};
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index c977a3ba2f35..be53234818d7 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -87,6 +87,19 @@ Optional properties:
- snps,quirk-frame-length-adjustment: Value for GFLADJ_30MHZ field of GFLADJ
register for post-silicon frame length adjustment when the
fladj_30mhz_sdbnd signal is invalid or incorrect.
+ - snps,refclk_fladj: Enable frame length adjustment for SOF/ITP counter.
+ - snps,enable_guctl1_resume_quirk: Adding this flag sets bit 10 of GUCTL1
+ thus enabling the workaround in HW to fix the issue where the controller
+ was not able to generate correct CRC checksum on the very first transfer
+ packet after sending resume signal.
+ - snps,enable_guctl1_ipd_quirk: Adding this flag sets bit 9 of GUCTL1
+ enabling the workaround in HW to reduce the Inter Packet Delay (IPD)
+ and making controller enumerate FS/LS devices connected behind VIA-LAB.
+ - snps,xhci-stream-quirk: Dwc3 host controller has a bug where it sometimes
+ fails to process the traansfer descriptors present in the BULK IN
+ stream ring. Since the controller is not processing any TD, no transfer
+ events will be triggered, resulting in a hang condition. Enabling this
+ flag in dts fixes the above said issue.
- snps,rx-thr-num-pkt-prd: periodic ESS RX packet threshold count - host mode
only. Set this and rx-max-burst-prd to a valid,
non-zero value 1-16 (DWC_usb31 programming guide
@@ -110,6 +123,8 @@ Optional properties:
When just one value, which means INCRX burst mode enabled. When
more than one value, which means undefined length INCR burst type
enabled. The values can be 1, 4, 8, 16, 32, 64, 128 and 256.
+ - snps,mask_phy_reset: enabling this quirk masks phy reset signal from reaching
+ the ULPI phy after initialization done of the phy.
- in addition all properties from usb-xhci.txt from the current directory are
supported as well
diff --git a/Documentation/devicetree/bindings/usb/ehci-xilinx.txt b/Documentation/devicetree/bindings/usb/ehci-xilinx.txt
new file mode 100644
index 000000000000..4df7ad6e3541
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ehci-xilinx.txt
@@ -0,0 +1,21 @@
+Xilinx USB EHCI controller
+
+Required properties:
+- compatible: must be "xlnx,xps-usb-host-1.00.a"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The EHCI interrupt
+
+Optional properties:
+- xlnx,ext-vbus-valid: Use external VBUS
+- xlnx,support-usb-fs: Support for Full Speed USB
+- xlnx,use-phy-bus-pwr: Use phy bus power in USB
+
+Example:
+
+ xps_usb_host_0: usb@82400000 {
+ compatible = "xlnx,xps-usb-host-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 0 2 >;
+ reg = < 0x82400000 0x200 >;
+ } ;
diff --git a/Documentation/devicetree/bindings/usb/udc-xilinx.txt b/Documentation/devicetree/bindings/usb/udc-xilinx.txt
index 47b4e397a08d..86f705384132 100644
--- a/Documentation/devicetree/bindings/usb/udc-xilinx.txt
+++ b/Documentation/devicetree/bindings/usb/udc-xilinx.txt
@@ -6,13 +6,16 @@ Required properties:
device registers map.
- interrupts : Should contain single irq line of USB2 device
controller
-- xlnx,has-builtin-dma : if DMA is included
+- xlnx,has-builtin-dma : If DMA is included
-Example:
- axi-usb2-device@42e00000 {
- compatible = "xlnx,usb2-device-4.00.a";
- interrupts = <0x0 0x39 0x1>;
- reg = <0x42e00000 0x10000>;
- xlnx,has-builtin-dma;
- };
+Optional properties:
+- clock-names : Should be "s_axi_aclk"
+- clocks : Input clock specifier. Refer to common clock bindings.
+Example:
+ axi-usb2-device@42e00000 {
+ compatible = "xlnx,usb2-device-4.00.a";
+ interrupts = <0x0 0x39 0x1>;
+ reg = <0x42e00000 0x10000>;
+ xlnx,has-builtin-dma;
+ };
diff --git a/Documentation/devicetree/bindings/video/xilinx-fb.txt b/Documentation/devicetree/bindings/video/xilinx-fb.txt
new file mode 100644
index 000000000000..11a6ba01a032
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/xilinx-fb.txt
@@ -0,0 +1,35 @@
+Xilinx Axi TFT controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Can be any of the following
+ "xlnx,xps-tft-1.00.a","xlnx,xps-tft-2.00.a",
+ "xlnx,xps-tft-2.01.a","xlnx,plb-tft-cntlr-ref-1.00.a",
+ "xlnx,plb-dvi-cntlr-ref-1.00.c"
+- reg : Physical base address and size of the Axi Tft
+ registers map
+- interrupts : Property with a value describing the interrupt
+ number
+- interrupt-parent : Must be core interrupt controller
+- xlnx,dcr-splb-slave-if : Accessing TFT Controller through Bus or DCR interface.
+ for BUS its value is 1 and for DCR it is 0.
+ default is BUS i.e. 1
+- resolution : <xres yres> pixel resolution of framebuffer.Some
+ implementations use a different resolution
+- virtual-resolution : <xvirt yvirt> Size of framebuffer in memory.
+- rotate-display : (empty) rotate display 180 degrees
+- phys-size : <screen_width_mm screen_height_mm> width and heigth of
+ screen
+
+Example:
+axi_tft_0: axi_tft@44a00000 {
+ compatible = "xlnx,xps-tft-1.00.a";
+ interrupt-parent = <&axi_intc>;
+ interrupts = <1 0>;
+ reg = <0x44a00000 0x10000>;
+ xlnx,dcr-splb-slave-if = <0x1>;
+ resolution = <640 480>;
+ virtual-resolution = <1024 480>;
+ phys-size = <1024 512>;
+ rotate-display;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
index c6ae9c9d5e3e..10d68003158d 100644
--- a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
@@ -1,21 +1,28 @@
-Xilinx AXI/PLB soft-core watchdog Device Tree Bindings
----------------------------------------------------------
+Xilinx AXI/PLB soft-core watchdog and window watchdog Device Tree Bindings
+--------------------------------------------------------------------------
Required properties:
- compatible : Should be "xlnx,xps-timebase-wdt-1.00.a" or
- "xlnx,xps-timebase-wdt-1.01.a".
+ "xlnx,xps-timebase-wdt-1.01.a" or
+ "xlnx,versal-wwdt-1.0".
- reg : Physical base address and size
Optional properties:
- clocks : Input clock specifier. Refer to common clock
bindings.
- clock-frequency : Frequency of clock in Hz
+
+Optional properties for AXI/PLB soft-core watchdog:
- xlnx,wdt-enable-once : 0 - Watchdog can be restarted
1 - Watchdog can be enabled just once
- xlnx,wdt-interval : Watchdog timeout interval in 2^<val> clock cycles,
<val> is integer from 8 to 31.
+Optional properties for window watchdog:
+- timeout-sec : Watchdog timeout value (in seconds).
+
Example:
+Xilinx AXI/PLB soft-core watchdog:
axi-timebase-wdt@40100000 {
clock-frequency = <50000000>;
compatible = "xlnx,xps-timebase-wdt-1.00.a";
@@ -24,3 +31,11 @@ axi-timebase-wdt@40100000 {
xlnx,wdt-enable-once = <0x0>;
xlnx,wdt-interval = <0x1b>;
} ;
+
+Xilinx Versal window watchdog:
+watchdog@fd4d0000 {
+ compatible = "xlnx,versal-wwdt-1.0";
+ reg = <0x0 0xfd4d0000 0x0 0x10000>;
+ clocks = <&clk25>;
+ timeout-sec = <10>;
+} ;
diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
index d058ace29345..0c75bb153ca6 100644
--- a/Documentation/devicetree/bindings/xilinx.txt
+++ b/Documentation/devicetree/bindings/xilinx.txt
@@ -253,6 +253,7 @@
Optional properties:
- 8-bit (empty) : Set this property for SystemACE in 8 bit mode
+ - port-number = <port_number> : Set port number for particular device
iii) Xilinx EMAC and Xilinx TEMAC
diff --git a/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt b/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt
new file mode 100644
index 000000000000..8abc053dfa30
--- /dev/null
+++ b/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt
@@ -0,0 +1,22 @@
+The Xilinx framebuffer DMA engine supports two soft IP blocks: one IP
+block is used for reading video frame data from memory (FB Read) to the device
+and the other IP block is used for writing video frame data from the device
+to memory (FB Write). Both the FB Read/Write IP blocks are aware of the
+format of the data being written to or read from memory including RGB and
+YUV in packed, planar, and semi-planar formats. Because the FB Read/Write
+is format aware, only one buffer pointer is needed by the IP blocks even
+when planar or semi-planar format are used.
+
+Required properties:
+ - compatible: Should be "xlnx,ctrl-fbwr-1.0" for framebuffer Write OR
+ "xlnx,ctrl-fbrd-1.0" for framebuffer Read.
+ - reg: Base address and size of the IP core.
+ - reset-gpios: gpio to reset the framebuffer IP
+
+Example:
+
+ fbwr@0xa0000000 {
+ compatible = "xlnx,ctrl-fbwr-1.0";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ reset-gpios = <&gpio 82 1>;
+ };
diff --git a/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt b/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt
new file mode 100644
index 000000000000..04e6426f4e9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt
@@ -0,0 +1,21 @@
+The Xilinx VPSS Scaler is a Video IP that supports up scaling, down scaling and
+no scaling functionailty along with color space conversion. This supports custom
+resolution values between 0 to 4096.
+
+Required properties:
+
+- compatible: Must be "xlnx,ctrl-xvpss-1.0".
+- reg: Base address and size of the IP core.
+- reset-gpios: gpio to reset the framebuffer IP
+- xlnx,vpss-taps: number of taps
+- xlnx,vpss-ppc: pixels per clock
+
+Example:
+
+ ctrlvpss: vpss@0xa0200000 {
+ compatible = "xlnx,ctrl-xvpss-1.0";
+ reg = <0x0 0xa0200000 0x0 0x30000>;
+ reset-gpios = <&gpio 80 1>;
+ xlnx,vpss-taps = <6>;
+ xlnx,vpss-ppc = <2>;
+ };
diff --git a/Documentation/devicetree/configfs-overlays.txt b/Documentation/devicetree/configfs-overlays.txt
new file mode 100644
index 000000000000..5fa43e064307
--- /dev/null
+++ b/Documentation/devicetree/configfs-overlays.txt
@@ -0,0 +1,31 @@
+Howto use the configfs overlay interface.
+
+A device-tree configfs entry is created in /config/device-tree/overlays
+and and it is manipulated using standard file system I/O.
+Note that this is a debug level interface, for use by developers and
+not necessarily something accessed by normal users due to the
+security implications of having direct access to the kernel's device tree.
+
+* To create an overlay you mkdir the directory:
+
+ # mkdir /config/device-tree/overlays/foo
+
+* Either you echo the overlay firmware file to the path property file.
+
+ # echo foo.dtbo >/config/device-tree/overlays/foo/path
+
+* Or you cat the contents of the overlay to the dtbo file
+
+ # cat foo.dtbo >/config/device-tree/overlays/foo/dtbo
+
+The overlay file will be applied, and devices will be created/destroyed
+as required.
+
+To remove it simply rmdir the directory.
+
+ # rmdir /config/device-tree/overlays/foo
+
+The rationalle of the dual interface (firmware & direct copy) is that each is
+better suited to different use patterns. The firmware interface is what's
+intended to be used by hardware managers in the kernel, while the copy interface
+make sense for developers (since it avoids problems with namespaces).
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index dfc4486b5743..94c054fb0fd6 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -191,6 +191,13 @@ Currently, the types available are:
- Used by the client drivers to register a callback that will be
called on a regular basis through the DMA controller interrupt
+- DMA_SG
+ - The device supports memory to memory scatter-gather
+ transfers.
+ - Even though a plain memcpy can look like a particular case of a
+ scatter-gather transfer, with a single chunk to transfer, it's a
+ distinct transaction type in the mem2mem transfers case
+
- DMA_PRIVATE
- The devices only supports slave transfers, and as such isn't
diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst
index f64cb666498a..f28887045049 100644
--- a/Documentation/driver-api/spi.rst
+++ b/Documentation/driver-api/spi.rst
@@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as
a pair of FIFOs connected to dual DMA engines on the other side of the
SPI shift register (maximizing throughput). Such drivers bridge between
whatever bus they sit on (often the platform bus) and SPI, and expose
-the SPI side of their device as a :c:type:`struct spi_master
-<spi_master>`. SPI devices are children of that master,
+the SPI side of their device as a :c:type:`struct spi_controller
+<spi_controller>`. SPI devices are children of that master,
represented as a :c:type:`struct spi_device <spi_device>` and
manufactured from :c:type:`struct spi_board_info
<spi_board_info>` descriptors which are usually provided by
diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
index f51bb21d20e4..49b577307385 100644
--- a/Documentation/fault-injection/fault-injection.rst
+++ b/Documentation/fault-injection/fault-injection.rst
@@ -74,8 +74,8 @@ configuration of fault-injection capabilities.
- /sys/kernel/debug/fail*/times:
- specifies how many times failures may happen at most.
- A value of -1 means "no limit".
+ specifies how many times failures may happen at most. A value of -1
+ means "no limit".
- /sys/kernel/debug/fail*/space:
@@ -163,11 +163,13 @@ configuration of fault-injection capabilities.
- ERRNO: retval must be -1 to -MAX_ERRNO (-4096).
- ERR_NULL: retval must be 0 or -1 to -MAX_ERRNO (-4096).
-- /sys/kernel/debug/fail_function/<functiuon-name>/retval:
+- /sys/kernel/debug/fail_function/<function-name>/retval:
- specifies the "error" return value to inject to the given
- function for given function. This will be created when
- user specifies new injection entry.
+ specifies the "error" return value to inject to the given function.
+ This will be created when the user specifies a new injection entry.
+ Note that this file only accepts unsigned values. So, if you want to
+ use a negative errno, you better use 'printf' instead of 'echo', e.g.:
+ $ printf %#x -12 > retval
Boot option
^^^^^^^^^^^
@@ -331,7 +333,7 @@ Application Examples
FAILTYPE=fail_function
FAILFUNC=open_ctree
echo $FAILFUNC > /sys/kernel/debug/$FAILTYPE/inject
- echo -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
+ printf %#x -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
echo N > /sys/kernel/debug/$FAILTYPE/task-filter
echo 100 > /sys/kernel/debug/$FAILTYPE/probability
echo 0 > /sys/kernel/debug/$FAILTYPE/interval
diff --git a/Documentation/filesystems/directory-locking.rst b/Documentation/filesystems/directory-locking.rst
index de12016ee419..6a238477f27f 100644
--- a/Documentation/filesystems/directory-locking.rst
+++ b/Documentation/filesystems/directory-locking.rst
@@ -23,13 +23,15 @@ exclusive.
locks victim and calls the method. Locks are exclusive.
4) rename() that is _not_ cross-directory. Locking rules: caller locks
-the parent and finds source and target. In case of exchange (with
-RENAME_EXCHANGE in flags argument) lock both. In any case,
-if the target already exists, lock it. If the source is a non-directory,
-lock it. If we need to lock both, lock them in inode pointer order.
-Then call the method. All locks are exclusive.
-NB: we might get away with locking the the source (and target in exchange
-case) shared.
+the parent and finds source and target. Then we decide which of the
+source and target need to be locked. Source needs to be locked if it's a
+non-directory; target - if it's a non-directory or about to be removed.
+Take the locks that need to be taken, in inode pointer order if need
+to take both (that can happen only when both source and target are
+non-directories - the source because it wouldn't be locked otherwise
+and the target because mixing directory and non-directory is allowed
+only with RENAME_EXCHANGE, and that won't be removing the target).
+After the locks had been taken, call the method. All locks are exclusive.
5) link creation. Locking rules:
@@ -44,19 +46,22 @@ All locks are exclusive.
rules:
* lock the filesystem
- * lock parents in "ancestors first" order.
+ * lock parents in "ancestors first" order. If one is not ancestor of
+ the other, lock the parent of source first.
* find source and target.
* if old parent is equal to or is a descendent of target
fail with -ENOTEMPTY
* if new parent is equal to or is a descendent of source
fail with -ELOOP
- * If it's an exchange, lock both the source and the target.
- * If the target exists, lock it. If the source is a non-directory,
- lock it. If we need to lock both, do so in inode pointer order.
+ * Lock both the source and the target provided they exist. If we
+ need to lock two inodes of different type (dir vs non-dir), we lock
+ the directory first. If we need to lock two inodes of the same type,
+ lock them in inode pointer order.
+ * Lock subdirectories involved (source before target).
+ * Lock non-directories involved, in inode pointer order.
* call the method.
-All ->i_rwsem are taken exclusive. Again, we might get away with locking
-the the source (and target in exchange case) shared.
+All ->i_rwsem are taken exclusive.
The rules above obviously guarantee that all directories that are going to be
read, modified or removed by method will be locked by caller.
@@ -66,8 +71,10 @@ If no directory is its own ancestor, the scheme above is deadlock-free.
Proof:
- First of all, at any moment we have a partial ordering of the
- objects - A < B iff A is an ancestor of B.
+[XXX: will be updated once we are done massaging the lock_rename()]
+ First of all, at any moment we have a linear ordering of the
+ objects - A < B iff (A is an ancestor of B) or (B is not an ancestor
+ of A and ptr(A) < ptr(B)).
That ordering can change. However, the following is true:
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index fc3a0704553c..b8b2906dc221 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -95,7 +95,7 @@ symlink: exclusive
mkdir: exclusive
unlink: exclusive (both)
rmdir: exclusive (both)(see below)
-rename: exclusive (all) (see below)
+rename: exclusive (both parents, some children) (see below)
readlink: no
get_link: no
setattr: exclusive
@@ -113,6 +113,9 @@ tmpfile: no
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_rwsem
exclusive on victim.
cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
+ ->unlink() and ->rename() have ->i_rwsem exclusive on all non-directories
+ involved.
+ ->rename() has ->i_rwsem exclusive on any subdirectory that changes parent.
See Documentation/filesystems/directory-locking.rst for more detailed discussion
of the locking scheme for directory operations.
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 26c093969573..48301b6b517c 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -858,3 +858,21 @@ be misspelled d_alloc_anon().
[should've been added in 2016] stale comment in finish_open() nonwithstanding,
failure exits in ->atomic_open() instances should *NOT* fput() the file,
no matter what. Everything is handled by the caller.
+
+---
+
+**mandatory**
+
+If ->rename() update of .. on cross-directory move needs an exclusion with
+directory modifications, do *not* lock the subdirectory in question in your
+->rename() - it's done by the caller now [that item should've been added in
+28eceeda130f "fs: Lock moved directories"].
+
+---
+
+**mandatory**
+
+On same-directory ->rename() the (tautological) update of .. is not protected
+by any locks; just don't do it if the old parent is the same as the new one.
+We really can't lock two subdirectories in same-directory rename - not without
+deadlocks.
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 7d4d09dd5e6d..241e31200643 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -1173,7 +1173,7 @@ defined:
return
-ECHILD and it will be called again in ref-walk mode.
-``_weak_revalidate``
+``d_weak_revalidate``
called when the VFS needs to revalidate a "jumped" dentry. This
is called when a path-walk ends at dentry that was not acquired
by doing a lookup in the parent directory. This includes "/",
diff --git a/Documentation/firmware-guide/acpi/apei/einj.rst b/Documentation/firmware-guide/acpi/apei/einj.rst
index e588bccf5158..344284236a81 100644
--- a/Documentation/firmware-guide/acpi/apei/einj.rst
+++ b/Documentation/firmware-guide/acpi/apei/einj.rst
@@ -168,7 +168,7 @@ An error injection example::
0x00000008 Memory Correctable
0x00000010 Memory Uncorrectable non-fatal
# echo 0x12345000 > param1 # Set memory address for injection
- # echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page
+ # echo 0xfffffffffffff000 > param2 # Mask - anywhere in this page
# echo 0x8 > error_type # Choose correctable memory error
# echo 1 > error_inject # Inject now
diff --git a/Documentation/input/joydev/joystick.rst b/Documentation/input/joydev/joystick.rst
index 9746fd76cc58..f38c330c028e 100644
--- a/Documentation/input/joydev/joystick.rst
+++ b/Documentation/input/joydev/joystick.rst
@@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. This includes:
* AVB Mag Turbo Force
* AVB Top Shot Pegasus
* AVB Top Shot Force Feedback Racing Wheel
+* Boeder Force Feedback Wheel
* Logitech WingMan Force
* Logitech WingMan Force Wheel
* Guillemot Race Leader Force Feedback
diff --git a/Documentation/ioctl/ioctl-number.rst b/Documentation/ioctl/ioctl-number.rst
index bef79cd4c6b4..6246a0d4f323 100644
--- a/Documentation/ioctl/ioctl-number.rst
+++ b/Documentation/ioctl/ioctl-number.rst
@@ -301,7 +301,6 @@ Code Seq# Include File Comments
0x89 00-06 arch/x86/include/asm/sockios.h
0x89 0B-DF linux/sockios.h
0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range
-0x89 E0-EF linux/dn.h PROTOPRIVATE range
0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range
0x8B all linux/wireless.h
0x8C 00-3F WiNRADiO driver
diff --git a/Documentation/media/kapi/v4l2-dev.rst b/Documentation/media/kapi/v4l2-dev.rst
index 4c5a15c53dbf..63c064837c00 100644
--- a/Documentation/media/kapi/v4l2-dev.rst
+++ b/Documentation/media/kapi/v4l2-dev.rst
@@ -185,7 +185,7 @@ This will create the character device for you.
.. code-block:: c
- err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (err) {
video_device_release(vdev); /* or kfree(my_vdev); */
return err;
@@ -201,7 +201,7 @@ types exist:
========================== ==================== ==============================
:c:type:`vfl_devnode_type` Device name Usage
========================== ==================== ==============================
-``VFL_TYPE_GRABBER`` ``/dev/videoX`` for video input/output devices
+``VFL_TYPE_VIDEO`` ``/dev/videoX`` for video input/output devices
``VFL_TYPE_VBI`` ``/dev/vbiX`` for vertical blank data (i.e.
closed captions, teletext)
``VFL_TYPE_RADIO`` ``/dev/radioX`` for radio tuners
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 15e11f27b4c8..c035f63b13b7 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -1531,6 +1531,43 @@ The following tables list existing packed RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG101010-1X30:
+
+ - MEDIA_BUS_FMT_RBG101010_1X30
+ - 0x1100
+ -
+ - 0
+ - 0
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -1638,6 +1675,47 @@ The following table list existing packed 36bit wide RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG121212-1X36:
+
+ - MEDIA_BUS_FMT_RBG121212_1X36
+ - 0x1101
+ -
+ - r\ :sub:`11`
+ - r\ :sub:`10`
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - b\ :sub:`11`
+ - b\ :sub:`10`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`11`
+ - g\ :sub:`10`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -1807,6 +1885,78 @@ The following table list existing packed 48bit wide RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG161616-1X48:
+
+ - MEDIA_BUS_FMT_RBG161616_1X48
+ - 0x1102
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - r\ :sub:`15`
+ - r\ :sub:`14`
+ - r\ :sub:`13`
+ - r\ :sub:`12`
+ - r\ :sub:`11`
+ - r\ :sub:`10`
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ * -
+ -
+ -
+ - b\ :sub:`15`
+ - b\ :sub:`14`
+ - b\ :sub:`13`
+ - b\ :sub:`12`
+ - b\ :sub:`11`
+ - b\ :sub:`10`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`15`
+ - g\ :sub:`14`
+ - g\ :sub:`13`
+ - g\ :sub:`12`
+ - g\ :sub:`11`
+ - g\ :sub:`10`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -5187,6 +5337,148 @@ the following codes.
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYYUYY10_4X20:
+
+ - MEDIA_BUS_FMT_VYYUYY10_4X20
+ - 0x2101
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-Y12-1X12:
- MEDIA_BUS_FMT_Y12_1X12
@@ -6944,6 +7236,185 @@ the following codes.
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYYVYY12-4X24:
+
+ - MEDIA_BUS_FMT_UYYVYY12_4X24
+ - 0x2103
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYYUYY8-1X24:
+
+ - MEDIA_BUS_FMT_VYYUYY8_1X24
+ - 0x2100
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-YUV10-1X30:
- MEDIA_BUS_FMT_YUV10_1X30
@@ -6981,6 +7452,43 @@ the following codes.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY10-1X30:
+
+ - MEDIA_BUS_FMT_VUY10_1X30
+ - 0x2102
+ -
+ -
+ -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-UYYVYY10-0-5X30:
- MEDIA_BUS_FMT_UYYVYY10_0_5X30
@@ -7053,6 +7561,43 @@ the following codes.
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-Y16-1X16:
+
+ - MEDIA_BUS_FMT_Y16_1X16
+ - 0x2105
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-AYUV8-1X32:
- MEDIA_BUS_FMT_AYUV8_1X32
@@ -7090,6 +7635,220 @@ the following codes.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYYVYY16-4X32:
+
+ - MEDIA_BUS_FMT_UYYVYY16_4X32
+ - 0x2106
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYVY16-2X32:
+
+ - MEDIA_BUS_FMT_UYVY16_2X32
+ - 0x2108
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
.. raw:: latex
@@ -7278,6 +8037,47 @@ The following table list existing packed 36bit wide YUV formats.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY12-1X36:
+
+ - MEDIA_BUS_FMT_VUY12_1X36
+ - 0x2104
+ -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
.. raw:: latex
@@ -7448,6 +8248,78 @@ The following table list existing packed 48bit wide YUV formats.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY16-1X48:
+
+ - MEDIA_BUS_FMT_VUY16_1X48
+ - 0x2107
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * -
+ -
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`8`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-UYYVYY16-0-5X48:
- MEDIA_BUS_FMT_UYYVYY16_0_5X48
@@ -7794,3 +8666,30 @@ formats.
- 0x5001
- Interleaved raw UYVY and JPEG image format with embedded meta-data
used by Samsung S3C73MX camera sensors.
+
+.. _v4l2-mbus-metadata-fmts:
+
+Metadata Formats
+^^^^^^^^^^^^^^^^
+
+This section lists all metadata formats.
+
+The following table lists the existing metadata formats.
+
+.. tabularcolumns:: |p{8.0cm}|p{1.4cm}|p{7.7cm}|
+
+.. flat-table:: Metadata formats
+ :header-rows: 1
+ :stub-columns: 0
+
+ * - Identifier
+ - Code
+ - Comments
+ * .. _MEDIA-BUS-FMT-METADATA-FIXED:
+
+ - MEDIA_BUS_FMT_METADATA_FIXED
+ - 0x7001
+ - This format should be used when the same driver handles
+ both sides of the link and the bus format is a fixed
+ metadata format that is not configurable from userspace.
+ Width and height will be set to 0 for this format.
diff --git a/Documentation/misc-devices/xilinx_sdfec.rst b/Documentation/misc-devices/xilinx_sdfec.rst
new file mode 100644
index 000000000000..87966e3aa5fe
--- /dev/null
+++ b/Documentation/misc-devices/xilinx_sdfec.rst
@@ -0,0 +1,291 @@
+.. SPDX-License-Identifier: GPL-2.0+
+====================
+Xilinx SD-FEC Driver
+====================
+
+Overview
+========
+
+This driver supports SD-FEC Integrated Block for Zynq |Ultrascale+ (TM)| RFSoCs.
+
+.. |Ultrascale+ (TM)| unicode:: Ultrascale+ U+2122
+ .. with trademark sign
+
+For a full description of SD-FEC core features, see the `SD-FEC Product Guide (PG256) <https://www.xilinx.com/cgi-bin/docs/ipdoc?c=sd_fec;v=latest;d=pg256-sdfec-integrated-block.pdf>`_
+
+This driver supports the following features:
+
+ - Retrieval of the Integrated Block configuration and status information
+ - Configuration of LDPC codes
+ - Configuration of Turbo decoding
+ - Monitoring errors
+
+Missing features, known issues, and limitations of the SD-FEC driver are as
+follows:
+
+ - Only allows a single open file handler to any instance of the driver at any time
+ - Reset of the SD-FEC Integrated Block is not controlled by this driver
+ - Does not support shared LDPC code table wraparound
+
+The device tree entry is described in:
+`linux-xlnx/Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt <https://github.com/Xilinx/linux-xlnx/blob/master/Documentation/devicetree/bindings/misc/xlnx%2Csd-fec.txt>`_
+
+
+Modes of Operation
+------------------
+
+The driver works with the SD-FEC core in two modes of operation:
+
+ - Run-time configuration
+ - Programmable Logic (PL) initialization
+
+
+Run-time Configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+For Run-time configuration the role of driver is to allow the software application to do the following:
+
+ - Load the configuration parameters for either Turbo decode or LDPC encode or decode
+ - Activate the SD-FEC core
+ - Monitor the SD-FEC core for errors
+ - Retrieve the status and configuration of the SD-FEC core
+
+Programmable Logic (PL) Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For PL initialization, supporting logic loads configuration parameters for either
+the Turbo decode or LDPC encode or decode. The role of the driver is to allow
+the software application to do the following:
+
+ - Activate the SD-FEC core
+ - Monitor the SD-FEC core for errors
+ - Retrieve the status and configuration of the SD-FEC core
+
+
+Driver Structure
+================
+
+The driver provides a platform device where the ``probe`` and ``remove``
+operations are provided.
+
+ - probe: Updates configuration register with device-tree entries plus determines the current activate state of the core, for example, is the core bypassed or has the core been started.
+
+
+The driver defines the following driver file operations to provide user
+application interfaces:
+
+ - open: Implements restriction that only a single file descriptor can be open per SD-FEC instance at any time
+ - release: Allows another file descriptor to be open, that is after current file descriptor is closed
+ - poll: Provides a method to monitor for SD-FEC Error events
+ - unlocked_ioctl: Provides the the following ioctl commands that allows the application configure the SD-FEC core:
+
+ - :c:macro:`XSDFEC_START_DEV`
+ - :c:macro:`XSDFEC_STOP_DEV`
+ - :c:macro:`XSDFEC_GET_STATUS`
+ - :c:macro:`XSDFEC_SET_IRQ`
+ - :c:macro:`XSDFEC_SET_TURBO`
+ - :c:macro:`XSDFEC_ADD_LDPC_CODE_PARAMS`
+ - :c:macro:`XSDFEC_GET_CONFIG`
+ - :c:macro:`XSDFEC_SET_ORDER`
+ - :c:macro:`XSDFEC_SET_BYPASS`
+ - :c:macro:`XSDFEC_IS_ACTIVE`
+ - :c:macro:`XSDFEC_CLEAR_STATS`
+ - :c:macro:`XSDFEC_SET_DEFAULT_CONFIG`
+
+
+Driver Usage
+============
+
+
+Overview
+--------
+
+After opening the driver, the user should find out what operations need to be
+performed to configure and activate the SD-FEC core and determine the
+configuration of the driver.
+The following outlines the flow the user should perform:
+
+ - Determine Configuration
+ - Set the order, if not already configured as desired
+ - Set Turbo decode, LPDC encode or decode parameters, depending on how the
+ SD-FEC core is configured plus if the SD-FEC has not been configured for PL
+ initialization
+ - Enable interrupts, if not already enabled
+ - Bypass the SD-FEC core, if required
+ - Start the SD-FEC core if not already started
+ - Get the SD-FEC core status
+ - Monitor for interrupts
+ - Stop the SD-FEC core
+
+
+Note: When monitoring for interrupts if a critical error is detected where a reset is required, the driver will be required to load the default configuration.
+
+
+Determine Configuration
+-----------------------
+
+Determine the configuration of the SD-FEC core by using the ioctl
+:c:macro:`XSDFEC_GET_CONFIG`.
+
+Set the Order
+-------------
+
+Setting the order determines how the order of Blocks can change from input to output.
+
+Setting the order is done by using the ioctl :c:macro:`XSDFEC_SET_ORDER`
+
+Setting the order can only be done if the following restrictions are met:
+
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+
+Add LDPC Codes
+--------------
+
+The following steps indicate how to add LDPC codes to the SD-FEC core:
+
+ - Use the auto-generated parameters to fill the :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>` for the desired LDPC code.
+ - Set the SC, QA, and LA table offsets for the LPDC parameters and the parameters in the structure :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>`
+ - Set the desired Code Id value in the structure :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>`
+ - Add the LPDC Code Parameters using the ioctl :c:macro:`XSDFEC_ADD_LDPC_CODE_PARAMS`
+ - For the applied LPDC Code Parameter use the function :c:func:`xsdfec_calculate_shared_ldpc_table_entry_size` to calculate the size of shared LPDC code tables. This allows the user to determine the shared table usage so when selecting the table offsets for the next LDPC code parameters unused table areas can be selected.
+ - Repeat for each LDPC code parameter.
+
+Adding LDPC codes can only be done if the following restrictions are met:
+
+ - The ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as LDPC
+ - The ``code_wr_protect`` of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates that write protection is not enabled
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not started
+
+Set Turbo Decode
+----------------
+
+Configuring the Turbo decode parameters is done by using the ioctl :c:macro:`XSDFEC_SET_TURBO` using auto-generated parameters to fill the :c:type:`struct xsdfec_turbo <xsdfec_turbo>` for the desired Turbo code.
+
+Adding Turbo decode can only be done if the following restrictions are met:
+
+ - The ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as TURBO
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+Enable Interrupts
+-----------------
+
+Enabling or disabling interrupts is done by using the ioctl :c:macro:`XSDFEC_SET_IRQ`. The members of the parameter passed, :c:type:`struct xsdfec_irq <xsdfec_irq>`, to the ioctl are used to set and clear different categories of interrupts. The category of interrupt is controlled as following:
+
+ - ``enable_isr`` controls the ``tlast`` interrupts
+ - ``enable_ecc_isr`` controls the ECC interrupts
+
+If the ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as TURBO then the enabling ECC errors is not required.
+
+Bypass the SD-FEC
+-----------------
+
+Bypassing the SD-FEC is done by using the ioctl :c:macro:`XSDFEC_SET_BYPASS`
+
+Bypassing the SD-FEC can only be done if the following restrictions are met:
+
+ - The ``state`` member of :c:type:`struct xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+Start the SD-FEC core
+---------------------
+
+Start the SD-FEC core by using the ioctl :c:macro:`XSDFEC_START_DEV`
+
+Get SD-FEC Status
+-----------------
+
+Get the SD-FEC status of the device by using the ioctl :c:macro:`XSDFEC_GET_STATUS`, which will fill the :c:type:`struct xsdfec_status <xsdfec_status>`
+
+Monitor for Interrupts
+----------------------
+
+ - Use the poll system call to monitor for an interrupt. The poll system call waits for an interrupt to wake it up or times out if no interrupt occurs.
+ - On return Poll ``revents`` will indicate whether stats and/or state have been updated
+ - ``POLLPRI`` indicates a critical error and the user should use :c:macro:`XSDFEC_GET_STATUS` and :c:macro:`XSDFEC_GET_STATS` to confirm
+ - ``POLLRDNORM`` indicates a non-critical error has occurred and the user should use :c:macro:`XSDFEC_GET_STATS` to confirm
+ - Get stats by using the ioctl :c:macro:`XSDFEC_GET_STATS`
+ - For critical error the ``isr_err_count`` or ``uecc_count`` member of :c:type:`struct xsdfec_stats <xsdfec_stats>` is non-zero
+ - For non-critical errors the ``cecc_count`` member of :c:type:`struct xsdfec_stats <xsdfec_stats>` is non-zero
+ - Get state by using the ioctl :c:macro:`XSDFEC_GET_STATUS`
+ - For a critical error the ``state`` of :c:type:`xsdfec_status <xsdfec_status>` will indicate a Reset Is Required
+ - Clear stats by using the ioctl :c:macro:`XSDFEC_CLEAR_STATS`
+
+If a critical error is detected where a reset is required. The application is required to call the ioctl :c:macro:`XSDFEC_SET_DEFAULT_CONFIG`, after the reset and it is not required to call the ioctl :c:macro:`XSDFEC_STOP_DEV`
+
+Note: Using poll system call prevents busy looping using :c:macro:`XSDFEC_GET_STATS` and :c:macro:`XSDFEC_GET_STATUS`
+
+Stop the SD-FEC Core
+---------------------
+
+Stop the device by using the ioctl :c:macro:`XSDFEC_STOP_DEV`
+
+Set the Default Configuration
+-----------------------------
+
+Load default configuration by using the ioctl :c:macro:`XSDFEC_SET_DEFAULT_CONFIG` to restore the driver.
+
+Limitations
+-----------
+
+Users should not duplicate SD-FEC device file handlers, for example fork() or dup() a process that has a created an SD-FEC file handler.
+
+Driver IOCTLs
+==============
+
+.. c:macro:: XSDFEC_START_DEV
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_START_DEV
+
+.. c:macro:: XSDFEC_STOP_DEV
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_STOP_DEV
+
+.. c:macro:: XSDFEC_GET_STATUS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_STATUS
+
+.. c:macro:: XSDFEC_SET_IRQ
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_IRQ
+
+.. c:macro:: XSDFEC_SET_TURBO
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_TURBO
+
+.. c:macro:: XSDFEC_ADD_LDPC_CODE_PARAMS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_ADD_LDPC_CODE_PARAMS
+
+.. c:macro:: XSDFEC_GET_CONFIG
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_CONFIG
+
+.. c:macro:: XSDFEC_SET_ORDER
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_ORDER
+
+.. c:macro:: XSDFEC_SET_BYPASS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_BYPASS
+
+.. c:macro:: XSDFEC_IS_ACTIVE
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_IS_ACTIVE
+
+.. c:macro:: XSDFEC_CLEAR_STATS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_CLEAR_STATS
+
+.. c:macro:: XSDFEC_GET_STATS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_STATS
+
+.. c:macro:: XSDFEC_SET_DEFAULT_CONFIG
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_DEFAULT_CONFIG
+
+Driver Type Definitions
+=======================
+
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :internal: \ No newline at end of file
diff --git a/Documentation/misc-devices/xilinx_trafgen.txt b/Documentation/misc-devices/xilinx_trafgen.txt
new file mode 100644
index 000000000000..473abc9628f6
--- /dev/null
+++ b/Documentation/misc-devices/xilinx_trafgen.txt
@@ -0,0 +1,87 @@
+Kernel driver xilinx_trafgen
+============================
+
+Supported chips:
+Zynq SOC, Xilinx 7 series fpga's (Virtex,Kintex,Artix)
+
+Data Sheet:
+ http://www.xilinx.com/support/documentation/ip_documentation/axi_traffic_gen/v2_0/pg125-axi-traffic-gen.pdf
+
+Author:
+ Kedareswara Rao Appana <appanad@xilinx.com>
+
+Description
+-----------
+
+AXI Traffic Generator IP is a core that stresses the AXI4 interconnect and other
+AXI4 peripherals in the system. It generates a wide variety of AXI4 transactions
+based on the core programming.
+
+Features:
+---> Configurable option to generate and accept data according to different
+traffic profiles.
+---> Supports dependent/independent transaction between read/write master port
+with configurable delays.
+---> Programmable repeat count for each transaction with
+constant/increment/random address.
+---> External start/stop to generate traffic without processor intervention.
+---> Generates IP-specific traffic on AXI interface for pre-defined protocols.
+
+SYSFS:
+
+id
+ RO - shows the trafgen id.
+
+resource
+ RO - shows the baseaddress for the trafgen.
+
+master_start_stop
+ RW - monitors the master start logic.
+
+config_slave_status
+ RW - configure and monitors the slave status.
+
+err_sts
+ RW - get the error statistics/clear the errors.
+
+err_en
+ WO - enable the errors.
+
+intr_en
+ WO - enable the interrupts.
+
+last_valid_index
+ RW - gets the last valid index value.
+
+config_sts_show
+ RO - show the config status value.
+
+mram_clear
+ WO - clears the master ram.
+
+cram_clear
+ WO - clears the command ram.
+
+pram_clear
+ WO - clears the parameter ram.
+
+static_enable
+ RO - enables the static mode.
+
+static_burst_len
+ RW - gets/sets the static burst length.
+
+static_transferdone
+ RW - monitors the static transfer done status.
+
+reset_static_transferdone
+ RO - resets the static transferdone bit.
+
+stream_enable
+ RO - enables the streaming mode.
+
+stream_transferlen
+ RW - get/set the streaming mode transfer length.
+
+stream_transfercnt
+ RW - get/set the streaming mode transfer count.
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 83f7ae5fc045..09b3943b3b71 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -40,13 +40,13 @@ allocates memory for this UMEM using whatever means it feels is most
appropriate (malloc, mmap, huge pages, etc). This memory area is then
registered with the kernel using the new setsockopt XDP_UMEM_REG. The
UMEM also has two rings: the FILL ring and the COMPLETION ring. The
-fill ring is used by the application to send down addr for the kernel
+FILL ring is used by the application to send down addr for the kernel
to fill in with RX packet data. References to these frames will then
appear in the RX ring once each packet has been received. The
-completion ring, on the other hand, contains frame addr that the
+COMPLETION ring, on the other hand, contains frame addr that the
kernel has transmitted completely and can now be used again by user
space, for either TX or RX. Thus, the frame addrs appearing in the
-completion ring are addrs that were previously transmitted using the
+COMPLETION ring are addrs that were previously transmitted using the
TX ring. In summary, the RX and FILL rings are used for the RX path
and the TX and COMPLETION rings are used for the TX path.
@@ -91,11 +91,16 @@ Concepts
========
In order to use an AF_XDP socket, a number of associated objects need
-to be setup.
+to be setup. These objects and their options are explained in the
+following sections.
-Jonathan Corbet has also written an excellent article on LWN,
-"Accelerating networking with AF_XDP". It can be found at
-https://lwn.net/Articles/750845/.
+For an overview on how AF_XDP works, you can also take a look at the
+Linux Plumbers paper from 2018 on the subject:
+http://vger.kernel.org/lpc_net2018_talks/lpc18_paper_af_xdp_perf-v2.pdf. Do
+NOT consult the paper from 2017 on "AF_PACKET v4", the first attempt
+at AF_XDP. Nearly everything changed since then. Jonathan Corbet has
+also written an excellent article on LWN, "Accelerating networking
+with AF_XDP". It can be found at https://lwn.net/Articles/750845/.
UMEM
----
@@ -113,22 +118,22 @@ the next socket B can do this by setting the XDP_SHARED_UMEM flag in
struct sockaddr_xdp member sxdp_flags, and passing the file descriptor
of A to struct sockaddr_xdp member sxdp_shared_umem_fd.
-The UMEM has two single-producer/single-consumer rings, that are used
+The UMEM has two single-producer/single-consumer rings that are used
to transfer ownership of UMEM frames between the kernel and the
user-space application.
Rings
-----
-There are a four different kind of rings: Fill, Completion, RX and
+There are a four different kind of rings: FILL, COMPLETION, RX and
TX. All rings are single-producer/single-consumer, so the user-space
application need explicit synchronization of multiple
processes/threads are reading/writing to them.
-The UMEM uses two rings: Fill and Completion. Each socket associated
+The UMEM uses two rings: FILL and COMPLETION. Each socket associated
with the UMEM must have an RX queue, TX queue or both. Say, that there
is a setup with four sockets (all doing TX and RX). Then there will be
-one Fill ring, one Completion ring, four TX rings and four RX rings.
+one FILL ring, one COMPLETION ring, four TX rings and four RX rings.
The rings are head(producer)/tail(consumer) based rings. A producer
writes the data ring at the index pointed out by struct xdp_ring
@@ -146,7 +151,7 @@ The size of the rings need to be of size power of two.
UMEM Fill Ring
~~~~~~~~~~~~~~
-The Fill ring is used to transfer ownership of UMEM frames from
+The FILL ring is used to transfer ownership of UMEM frames from
user-space to kernel-space. The UMEM addrs are passed in the ring. As
an example, if the UMEM is 64k and each chunk is 4k, then the UMEM has
16 chunks and can pass addrs between 0 and 64k.
@@ -164,8 +169,8 @@ chunks mode, then the incoming addr will be left untouched.
UMEM Completion Ring
~~~~~~~~~~~~~~~~~~~~
-The Completion Ring is used transfer ownership of UMEM frames from
-kernel-space to user-space. Just like the Fill ring, UMEM indicies are
+The COMPLETION Ring is used transfer ownership of UMEM frames from
+kernel-space to user-space. Just like the FILL ring, UMEM indices are
used.
Frames passed from the kernel to user-space are frames that has been
@@ -181,7 +186,7 @@ The RX ring is the receiving side of a socket. Each entry in the ring
is a struct xdp_desc descriptor. The descriptor contains UMEM offset
(addr) and the length of the data (len).
-If no frames have been passed to kernel via the Fill ring, no
+If no frames have been passed to kernel via the FILL ring, no
descriptors will (or can) appear on the RX ring.
The user application consumes struct xdp_desc descriptors from this
@@ -199,8 +204,24 @@ be relaxed in the future.
The user application produces struct xdp_desc descriptors to this
ring.
+Libbpf
+======
+
+Libbpf is a helper library for eBPF and XDP that makes using these
+technologies a lot simpler. It also contains specific helper functions
+in tools/lib/bpf/xsk.h for facilitating the use of AF_XDP. It
+contains two types of functions: those that can be used to make the
+setup of AF_XDP socket easier and ones that can be used in the data
+plane to access the rings safely and quickly. To see an example on how
+to use this API, please take a look at the sample application in
+samples/bpf/xdpsock_usr.c which uses libbpf for both setup and data
+plane operations.
+
+We recommend that you use this library unless you have become a power
+user. It will make your program a lot simpler.
+
XSKMAP / BPF_MAP_TYPE_XSKMAP
-----------------------------
+============================
On XDP side there is a BPF map type BPF_MAP_TYPE_XSKMAP (XSKMAP) that
is used in conjunction with bpf_redirect_map() to pass the ingress
@@ -216,21 +237,193 @@ queue 17. Only the XDP program executing for eth0 and queue 17 will
successfully pass data to the socket. Please refer to the sample
application (samples/bpf/) in for an example.
+Configuration Flags and Socket Options
+======================================
+
+These are the various configuration flags that can be used to control
+and monitor the behavior of AF_XDP sockets.
+
+XDP_COPY and XDP_ZERO_COPY bind flags
+-------------------------------------
+
+When you bind to a socket, the kernel will first try to use zero-copy
+copy. If zero-copy is not supported, it will fall back on using copy
+mode, i.e. copying all packets out to user space. But if you would
+like to force a certain mode, you can use the following flags. If you
+pass the XDP_COPY flag to the bind call, the kernel will force the
+socket into copy mode. If it cannot use copy mode, the bind call will
+fail with an error. Conversely, the XDP_ZERO_COPY flag will force the
+socket into zero-copy mode or fail.
+
+XDP_SHARED_UMEM bind flag
+-------------------------
+
+This flag enables you to bind multiple sockets to the same UMEM, but
+only if they share the same queue id. In this mode, each socket has
+their own RX and TX rings, but the UMEM (tied to the fist socket
+created) only has a single FILL ring and a single COMPLETION
+ring. To use this mode, create the first socket and bind it in the normal
+way. Create a second socket and create an RX and a TX ring, or at
+least one of them, but no FILL or COMPLETION rings as the ones from
+the first socket will be used. In the bind call, set he
+XDP_SHARED_UMEM option and provide the initial socket's fd in the
+sxdp_shared_umem_fd field. You can attach an arbitrary number of extra
+sockets this way.
+
+What socket will then a packet arrive on? This is decided by the XDP
+program. Put all the sockets in the XSK_MAP and just indicate which
+index in the array you would like to send each packet to. A simple
+round-robin example of distributing packets is shown below:
+
+.. code-block:: c
+
+ #include <linux/bpf.h>
+ #include "bpf_helpers.h"
+
+ #define MAX_SOCKS 16
+
+ struct {
+ __uint(type, BPF_MAP_TYPE_XSKMAP);
+ __uint(max_entries, MAX_SOCKS);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ } xsks_map SEC(".maps");
+
+ static unsigned int rr;
+
+ SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
+ {
+ rr = (rr + 1) & (MAX_SOCKS - 1);
+
+ return bpf_redirect_map(&xsks_map, rr, 0);
+ }
+
+Note, that since there is only a single set of FILL and COMPLETION
+rings, and they are single producer, single consumer rings, you need
+to make sure that multiple processes or threads do not use these rings
+concurrently. There are no synchronization primitives in the
+libbpf code that protects multiple users at this point in time.
+
+XDP_USE_NEED_WAKEUP bind flag
+-----------------------------
+
+This option adds support for a new flag called need_wakeup that is
+present in the FILL ring and the TX ring, the rings for which user
+space is a producer. When this option is set in the bind call, the
+need_wakeup flag will be set if the kernel needs to be explicitly
+woken up by a syscall to continue processing packets. If the flag is
+zero, no syscall is needed.
+
+If the flag is set on the FILL ring, the application needs to call
+poll() to be able to continue to receive packets on the RX ring. This
+can happen, for example, when the kernel has detected that there are no
+more buffers on the FILL ring and no buffers left on the RX HW ring of
+the NIC. In this case, interrupts are turned off as the NIC cannot
+receive any packets (as there are no buffers to put them in), and the
+need_wakeup flag is set so that user space can put buffers on the
+FILL ring and then call poll() so that the kernel driver can put these
+buffers on the HW ring and start to receive packets.
+
+If the flag is set for the TX ring, it means that the application
+needs to explicitly notify the kernel to send any packets put on the
+TX ring. This can be accomplished either by a poll() call, as in the
+RX path, or by calling sendto().
+
+An example of how to use this flag can be found in
+samples/bpf/xdpsock_user.c. An example with the use of libbpf helpers
+would look like this for the TX path:
+
+.. code-block:: c
+
+ if (xsk_ring_prod__needs_wakeup(&my_tx_ring))
+ sendto(xsk_socket__fd(xsk_handle), NULL, 0, MSG_DONTWAIT, NULL, 0);
+
+I.e., only use the syscall if the flag is set.
+
+We recommend that you always enable this mode as it usually leads to
+better performance especially if you run the application and the
+driver on the same core, but also if you use different cores for the
+application and the kernel driver, as it reduces the number of
+syscalls needed for the TX path.
+
+XDP_{RX|TX|UMEM_FILL|UMEM_COMPLETION}_RING setsockopts
+------------------------------------------------------
+
+These setsockopts sets the number of descriptors that the RX, TX,
+FILL, and COMPLETION rings respectively should have. It is mandatory
+to set the size of at least one of the RX and TX rings. If you set
+both, you will be able to both receive and send traffic from your
+application, but if you only want to do one of them, you can save
+resources by only setting up one of them. Both the FILL ring and the
+COMPLETION ring are mandatory if you have a UMEM tied to your socket,
+which is the normal case. But if the XDP_SHARED_UMEM flag is used, any
+socket after the first one does not have a UMEM and should in that
+case not have any FILL or COMPLETION rings created.
+
+XDP_UMEM_REG setsockopt
+-----------------------
+
+This setsockopt registers a UMEM to a socket. This is the area that
+contain all the buffers that packet can recide in. The call takes a
+pointer to the beginning of this area and the size of it. Moreover, it
+also has parameter called chunk_size that is the size that the UMEM is
+divided into. It can only be 2K or 4K at the moment. If you have an
+UMEM area that is 128K and a chunk size of 2K, this means that you
+will be able to hold a maximum of 128K / 2K = 64 packets in your UMEM
+area and that your largest packet size can be 2K.
+
+There is also an option to set the headroom of each single buffer in
+the UMEM. If you set this to N bytes, it means that the packet will
+start N bytes into the buffer leaving the first N bytes for the
+application to use. The final option is the flags field, but it will
+be dealt with in separate sections for each UMEM flag.
+
+SO_BINDTODEVICE setsockopt
+--------------------------
+
+This is a generic SOL_SOCKET option that can be used to tie AF_XDP
+socket to a particular network interface. It is useful when a socket
+is created by a privileged process and passed to a non-privileged one.
+Once the option is set, kernel will refuse attempts to bind that socket
+to a different interface. Updating the value requires CAP_NET_RAW.
+
+XDP_STATISTICS getsockopt
+-------------------------
+
+Gets drop statistics of a socket that can be useful for debug
+purposes. The supported statistics are shown below:
+
+.. code-block:: c
+
+ struct xdp_statistics {
+ __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+ };
+
+XDP_OPTIONS getsockopt
+----------------------
+
+Gets options from an XDP socket. The only one supported so far is
+XDP_OPTIONS_ZEROCOPY which tells you if zero-copy is on or not.
+
Usage
=====
-In order to use AF_XDP sockets there are two parts needed. The
+In order to use AF_XDP sockets two parts are needed. The
user-space application and the XDP program. For a complete setup and
usage example, please refer to the sample application. The user-space
side is xdpsock_user.c and the XDP side is part of libbpf.
-The XDP code sample included in tools/lib/bpf/xsk.c is the following::
+The XDP code sample included in tools/lib/bpf/xsk.c is the following:
+
+.. code-block:: c
SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
{
int index = ctx->rx_queue_index;
- // A set entry here means that the correspnding queue_id
+ // A set entry here means that the corresponding queue_id
// has an active AF_XDP socket bound to it.
if (bpf_map_lookup_elem(&xsks_map, &index))
return bpf_redirect_map(&xsks_map, index, 0);
@@ -238,7 +431,10 @@ The XDP code sample included in tools/lib/bpf/xsk.c is the following::
return XDP_PASS;
}
-Naive ring dequeue and enqueue could look like this::
+A simple but not so performance ring dequeue and enqueue could look
+like this:
+
+.. code-block:: c
// struct xdp_rxtx_ring {
// __u32 *producer;
@@ -287,17 +483,16 @@ Naive ring dequeue and enqueue could look like this::
return 0;
}
-
-For a more optimized version, please refer to the sample application.
+But please use the libbpf functions as they are optimized and ready to
+use. Will make your life easier.
Sample application
==================
There is a xdpsock benchmarking/test application included that
-demonstrates how to use AF_XDP sockets with both private and shared
-UMEMs. Say that you would like your UDP traffic from port 4242 to end
-up in queue 16, that we will enable AF_XDP on. Here, we use ethtool
-for this::
+demonstrates how to use AF_XDP sockets with private UMEMs. Say that
+you would like your UDP traffic from port 4242 to end up in queue 16,
+that we will enable AF_XDP on. Here, we use ethtool for this::
ethtool -N p3p2 rx-flow-hash udp4 fn
ethtool -N p3p2 flow-type udp4 src-port 4242 dst-port 4242 \
@@ -311,13 +506,18 @@ using::
For XDP_SKB mode, use the switch "-S" instead of "-N" and all options
can be displayed with "-h", as usual.
+This sample application uses libbpf to make the setup and usage of
+AF_XDP simpler. If you want to know how the raw uapi of AF_XDP is
+really used to make something more advanced, take a look at the libbpf
+code in tools/lib/bpf/xsk.[ch].
+
FAQ
=======
Q: I am not seeing any traffic on the socket. What am I doing wrong?
A: When a netdev of a physical NIC is initialized, Linux usually
- allocates one Rx and Tx queue pair per core. So on a 8 core system,
+ allocates one RX and TX queue pair per core. So on a 8 core system,
queue ids 0 to 7 will be allocated, one per core. In the AF_XDP
bind call or the xsk_socket__create libbpf function call, you
specify a specific queue id to bind to and it is only the traffic
@@ -343,9 +543,21 @@ A: When a netdev of a physical NIC is initialized, Linux usually
sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \
4242 action 2
- A number of other ways are possible all up to the capabilitites of
+ A number of other ways are possible all up to the capabilities of
the NIC you have.
+Q: Can I use the XSKMAP to implement a switch betwen different umems
+ in copy mode?
+
+A: The short answer is no, that is not supported at the moment. The
+ XSKMAP can only be used to switch traffic coming in on queue id X
+ to sockets bound to the same queue id X. The XSKMAP can contain
+ sockets bound to different queue ids, for example X and Y, but only
+ traffic goming in from queue id Y can be directed to sockets bound
+ to the same queue id Y. In zero-copy mode, you should use the
+ switch, or other distribution mechanism, in your NIC to direct
+ traffic to the correct queue id and socket.
+
Credits
=======
diff --git a/Documentation/networking/decnet.txt b/Documentation/networking/decnet.txt
deleted file mode 100644
index d192f8b9948b..000000000000
--- a/Documentation/networking/decnet.txt
+++ /dev/null
@@ -1,230 +0,0 @@
- Linux DECnet Networking Layer Information
- ===========================================
-
-1) Other documentation....
-
- o Project Home Pages
- http://www.chygwyn.com/ - Kernel info
- http://linux-decnet.sourceforge.net/ - Userland tools
- http://www.sourceforge.net/projects/linux-decnet/ - Status page
-
-2) Configuring the kernel
-
-Be sure to turn on the following options:
-
- CONFIG_DECNET (obviously)
- CONFIG_PROC_FS (to see what's going on)
- CONFIG_SYSCTL (for easy configuration)
-
-if you want to try out router support (not properly debugged yet)
-you'll need the following options as well...
-
- CONFIG_DECNET_ROUTER (to be able to add/delete routes)
- CONFIG_NETFILTER (will be required for the DECnet routing daemon)
-
-Don't turn on SIOCGIFCONF support for DECnet unless you are really sure
-that you need it, in general you won't and it can cause ifconfig to
-malfunction.
-
-Run time configuration has changed slightly from the 2.4 system. If you
-want to configure an endnode, then the simplified procedure is as follows:
-
- o Set the MAC address on your ethernet card before starting _any_ other
- network protocols.
-
-As soon as your network card is brought into the UP state, DECnet should
-start working. If you need something more complicated or are unsure how
-to set the MAC address, see the next section. Also all configurations which
-worked with 2.4 will work under 2.5 with no change.
-
-3) Command line options
-
-You can set a DECnet address on the kernel command line for compatibility
-with the 2.4 configuration procedure, but in general it's not needed any more.
-If you do st a DECnet address on the command line, it has only one purpose
-which is that its added to the addresses on the loopback device.
-
-With 2.4 kernels, DECnet would only recognise addresses as local if they
-were added to the loopback device. In 2.5, any local interface address
-can be used to loop back to the local machine. Of course this does not
-prevent you adding further addresses to the loopback device if you
-want to.
-
-N.B. Since the address list of an interface determines the addresses for
-which "hello" messages are sent, if you don't set an address on the loopback
-interface then you won't see any entries in /proc/net/neigh for the local
-host until such time as you start a connection. This doesn't affect the
-operation of the local communications in any other way though.
-
-The kernel command line takes options looking like the following:
-
- decnet.addr=1,2
-
-the two numbers are the node address 1,2 = 1.2 For 2.2.xx kernels
-and early 2.3.xx kernels, you must use a comma when specifying the
-DECnet address like this. For more recent 2.3.xx kernels, you may
-use almost any character except space, although a `.` would be the most
-obvious choice :-)
-
-There used to be a third number specifying the node type. This option
-has gone away in favour of a per interface node type. This is now set
-using /proc/sys/net/decnet/conf/<dev>/forwarding. This file can be
-set with a single digit, 0=EndNode, 1=L1 Router and 2=L2 Router.
-
-There are also equivalent options for modules. The node address can
-also be set through the /proc/sys/net/decnet/ files, as can other system
-parameters.
-
-Currently the only supported devices are ethernet and ip_gre. The
-ethernet address of your ethernet card has to be set according to the DECnet
-address of the node in order for it to be autoconfigured (and then appear in
-/proc/net/decnet_dev). There is a utility available at the above
-FTP sites called dn2ethaddr which can compute the correct ethernet
-address to use. The address can be set by ifconfig either before or
-at the time the device is brought up. If you are using RedHat you can
-add the line:
-
- MACADDR=AA:00:04:00:03:04
-
-or something similar, to /etc/sysconfig/network-scripts/ifcfg-eth0 or
-wherever your network card's configuration lives. Setting the MAC address
-of your ethernet card to an address starting with "hi-ord" will cause a
-DECnet address which matches to be added to the interface (which you can
-verify with iproute2).
-
-The default device for routing can be set through the /proc filesystem
-by setting /proc/sys/net/decnet/default_device to the
-device you want DECnet to route packets out of when no specific route
-is available. Usually this will be eth0, for example:
-
- echo -n "eth0" >/proc/sys/net/decnet/default_device
-
-If you don't set the default device, then it will default to the first
-ethernet card which has been autoconfigured as described above. You can
-confirm that by looking in the default_device file of course.
-
-There is a list of what the other files under /proc/sys/net/decnet/ do
-on the kernel patch web site (shown above).
-
-4) Run time kernel configuration
-
-This is either done through the sysctl/proc interface (see the kernel web
-pages for details on what the various options do) or through the iproute2
-package in the same way as IPv4/6 configuration is performed.
-
-Documentation for iproute2 is included with the package, although there is
-as yet no specific section on DECnet, most of the features apply to both
-IP and DECnet, albeit with DECnet addresses instead of IP addresses and
-a reduced functionality.
-
-If you want to configure a DECnet router you'll need the iproute2 package
-since its the _only_ way to add and delete routes currently. Eventually
-there will be a routing daemon to send and receive routing messages for
-each interface and update the kernel routing tables accordingly. The
-routing daemon will use netfilter to listen to routing packets, and
-rtnetlink to update the kernels routing tables.
-
-The DECnet raw socket layer has been removed since it was there purely
-for use by the routing daemon which will now use netfilter (a much cleaner
-and more generic solution) instead.
-
-5) How can I tell if its working ?
-
-Here is a quick guide of what to look for in order to know if your DECnet
-kernel subsystem is working.
-
- - Is the node address set (see /proc/sys/net/decnet/node_address)
- - Is the node of the correct type
- (see /proc/sys/net/decnet/conf/<dev>/forwarding)
- - Is the Ethernet MAC address of each Ethernet card set to match
- the DECnet address. If in doubt use the dn2ethaddr utility available
- at the ftp archive.
- - If the previous two steps are satisfied, and the Ethernet card is up,
- you should find that it is listed in /proc/net/decnet_dev and also
- that it appears as a directory in /proc/sys/net/decnet/conf/. The
- loopback device (lo) should also appear and is required to communicate
- within a node.
- - If you have any DECnet routers on your network, they should appear
- in /proc/net/decnet_neigh, otherwise this file will only contain the
- entry for the node itself (if it doesn't check to see if lo is up).
- - If you want to send to any node which is not listed in the
- /proc/net/decnet_neigh file, you'll need to set the default device
- to point to an Ethernet card with connection to a router. This is
- again done with the /proc/sys/net/decnet/default_device file.
- - Try starting a simple server and client, like the dnping/dnmirror
- over the loopback interface. With luck they should communicate.
- For this step and those after, you'll need the DECnet library
- which can be obtained from the above ftp sites as well as the
- actual utilities themselves.
- - If this seems to work, then try talking to a node on your local
- network, and see if you can obtain the same results.
- - At this point you are on your own... :-)
-
-6) How to send a bug report
-
-If you've found a bug and want to report it, then there are several things
-you can do to help me work out exactly what it is that is wrong. Useful
-information (_most_ of which _is_ _essential_) includes:
-
- - What kernel version are you running ?
- - What version of the patch are you running ?
- - How far though the above set of tests can you get ?
- - What is in the /proc/decnet* files and /proc/sys/net/decnet/* files ?
- - Which services are you running ?
- - Which client caused the problem ?
- - How much data was being transferred ?
- - Was the network congested ?
- - How can the problem be reproduced ?
- - Can you use tcpdump to get a trace ? (N.B. Most (all?) versions of
- tcpdump don't understand how to dump DECnet properly, so including
- the hex listing of the packet contents is _essential_, usually the -x flag.
- You may also need to increase the length grabbed with the -s flag. The
- -e flag also provides very useful information (ethernet MAC addresses))
-
-7) MAC FAQ
-
-A quick FAQ on ethernet MAC addresses to explain how Linux and DECnet
-interact and how to get the best performance from your hardware.
-
-Ethernet cards are designed to normally only pass received network frames
-to a host computer when they are addressed to it, or to the broadcast address.
-
-Linux has an interface which allows the setting of extra addresses for
-an ethernet card to listen to. If the ethernet card supports it, the
-filtering operation will be done in hardware, if not the extra unwanted packets
-received will be discarded by the host computer. In the latter case,
-significant processor time and bus bandwidth can be used up on a busy
-network (see the NAPI documentation for a longer explanation of these
-effects).
-
-DECnet makes use of this interface to allow running DECnet on an ethernet
-card which has already been configured using TCP/IP (presumably using the
-built in MAC address of the card, as usual) and/or to allow multiple DECnet
-addresses on each physical interface. If you do this, be aware that if your
-ethernet card doesn't support perfect hashing in its MAC address filter
-then your computer will be doing more work than required. Some cards
-will simply set themselves into promiscuous mode in order to receive
-packets from the DECnet specified addresses. So if you have one of these
-cards its better to set the MAC address of the card as described above
-to gain the best efficiency. Better still is to use a card which supports
-NAPI as well.
-
-
-8) Mailing list
-
-If you are keen to get involved in development, or want to ask questions
-about configuration, or even just report bugs, then there is a mailing
-list that you can join, details are at:
-
-http://sourceforge.net/mail/?group_id=4993
-
-9) Legal Info
-
-The Linux DECnet project team have placed their code under the GPL. The
-software is provided "as is" and without warranty express or implied.
-DECnet is a trademark of Compaq. This software is not a product of
-Compaq. We acknowledge the help of people at Compaq in providing extra
-documentation above and beyond what was previously publicly available.
-
-Steve Whitehouse <SteveW@ACM.org>
-
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
index 2c2ec99b5088..78bef529464f 100644
--- a/Documentation/power/runtime_pm.rst
+++ b/Documentation/power/runtime_pm.rst
@@ -382,6 +382,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
nonzero, increment the counter and return 1; otherwise return 0 without
changing the counter
+ `int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);`
+ - return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
+ runtime PM status is RPM_ACTIVE, and either ign_usage_count is true
+ or the device's usage_count is non-zero, increment the counter and
+ return 1; otherwise return 0 without changing the counter
+
`void pm_runtime_put_noidle(struct device *dev);`
- decrement the device's usage counter
diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst
index e899f14a4ba2..43da2cc2e3b9 100644
--- a/Documentation/process/code-of-conduct-interpretation.rst
+++ b/Documentation/process/code-of-conduct-interpretation.rst
@@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're
uncertain how to handle situations that come up. It will not be
considered a violation report unless you want it to be. If you are
uncertain about approaching the TAB or any other maintainers, please
-reach out to our conflict mediator, Mishi Choudhary <mishi@linux.com>.
+reach out to our conflict mediator, Joanna Lee <jlee@linuxfoundation.org>.
In the end, "be kind to each other" is really what the end goal is for
everybody. We know everyone is human and we all fail at times, but the
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 4c91abad7b35..71ac38739d27 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -702,7 +702,7 @@ ref
no-jd
BIOS setup but without jack-detection
intel
- Intel DG45* mobos
+ Intel D*45* mobos
dell-m6-amic
Dell desktops/laptops with analog mics
dell-m6-dmic
diff --git a/Documentation/sound/soc/dapm.rst b/Documentation/sound/soc/dapm.rst
index 8e44107933ab..c3154ce6e1b2 100644
--- a/Documentation/sound/soc/dapm.rst
+++ b/Documentation/sound/soc/dapm.rst
@@ -234,7 +234,7 @@ corresponding soft power control. In this case it is necessary to create
a virtual widget - a widget with no control bits e.g.
::
- SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_DAPM_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
This can be used to merge to signal paths together in software.
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
index eeb394b39e2c..8b416bfd75ac 100644
--- a/Documentation/sphinx/load_config.py
+++ b/Documentation/sphinx/load_config.py
@@ -3,7 +3,7 @@
import os
import sys
-from sphinx.util.pycompat import execfile_
+from sphinx.util.osutil import fs_encoding
# ------------------------------------------------------------------------------
def loadConfig(namespace):
@@ -48,7 +48,9 @@ def loadConfig(namespace):
sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
config = namespace.copy()
config['__file__'] = config_file
- execfile_(config_file, config)
+ with open(config_file, 'rb') as f:
+ code = compile(f.read(), fs_encoding, 'exec')
+ exec(code, config)
del config['__file__']
namespace.update(config)
else:
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index 3f3d1b960fe7..931f3b71745a 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -39,7 +39,7 @@ Documentation written by Tom Zanussi
will use the event's kernel stacktrace as the key. The keywords
'keys' or 'key' can be used to specify keys, and the keywords
'values', 'vals', or 'val' can be used to specify values. Compound
- keys consisting of up to two fields can be specified by the 'keys'
+ keys consisting of up to three fields can be specified by the 'keys'
keyword. Hashing a compound key produces a unique entry in the
table for each unique combination of component keys, and can be
useful for providing more fine-grained summaries of event data.
diff --git a/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
index 66c7c568bd86..9c39ee58ea50 100644
--- a/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
+++ b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
@@ -649,7 +649,7 @@ video_device注册
接下来你需要注册视频设备:这会为你创建一个字符设备。
- err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (err) {
video_device_release(vdev); /* or kfree(my_vdev); */
return err;
@@ -660,7 +660,7 @@ video_device注册
注册哪种设备是根据类型(type)参数。存在以下类型:
-VFL_TYPE_GRABBER: 用于视频输入/输出设备的 videoX
+VFL_TYPE_VIDEO: 用于视频输入/输出设备的 videoX
VFL_TYPE_VBI: 用于垂直消隐数据的 vbiX (例如,隐藏式字幕,图文电视)
VFL_TYPE_RADIO: 用于广播调谐器的 radioX
diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt
index fd22224853e5..180475bcd671 100644
--- a/Documentation/virt/kvm/api.txt
+++ b/Documentation/virt/kvm/api.txt
@@ -3615,6 +3615,18 @@ Type: vm ioctl
Parameters: struct kvm_s390_cmma_log (in, out)
Returns: 0 on success, a negative value on error
+Errors:
+
+ ====== =============================================================
+ ENOMEM not enough memory can be allocated to complete the task
+ ENXIO if CMMA is not enabled
+ EINVAL if KVM_S390_CMMA_PEEK is not set but migration mode was not enabled
+ EINVAL if KVM_S390_CMMA_PEEK is not set but dirty tracking has been
+ disabled (and thus migration mode was automatically disabled)
+ EFAULT if the userspace address is invalid or if no page table is
+ present for the addresses (e.g. when using hugepages).
+ ====== =============================================================
+
This ioctl is used to get the values of the CMMA bits on the s390
architecture. It is meant to be used in two scenarios:
- During live migration to save the CMMA values. Live migration needs
@@ -3691,12 +3703,6 @@ mask is unused.
values points to the userspace buffer where the result will be stored.
-This ioctl can fail with -ENOMEM if not enough memory can be allocated to
-complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if
-KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with
--EFAULT if the userspace address is invalid or if no page table is
-present for the addresses (e.g. when using hugepages).
-
4.108 KVM_S390_SET_CMMA_BITS
Capability: KVM_CAP_S390_CMMA_MIGRATION
diff --git a/Documentation/virt/kvm/devices/vm.txt b/Documentation/virt/kvm/devices/vm.txt
index 4ffb82b02468..38ec142a4a84 100644
--- a/Documentation/virt/kvm/devices/vm.txt
+++ b/Documentation/virt/kvm/devices/vm.txt
@@ -254,6 +254,10 @@ Allows userspace to start migration mode, needed for PGSTE migration.
Setting this attribute when migration mode is already active will have
no effects.
+Dirty tracking must be enabled on all memslots, else -EINVAL is returned. When
+dirty tracking is disabled on any memslot, migration mode is automatically
+stopped.
+
Parameters: none
Returns: -ENOMEM if there is not enough free memory to start migration mode
-EINVAL if the state of the VM is invalid (e.g. no memory defined)
diff --git a/MAINTAINERS b/MAINTAINERS
index c004c8628e39..2d612a3dadb6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1101,7 +1101,7 @@ APEX EMBEDDED SYSTEMS STX104 IIO DRIVER
M: William Breathitt Gray <vilhelm.gray@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
-F: drivers/iio/adc/stx104.c
+F: drivers/iio/addac/stx104.c
APM DRIVER
M: Jiri Kosina <jikos@kernel.org>
@@ -2778,8 +2778,11 @@ F: include/uapi/linux/atm*
ATMEL MACB ETHERNET DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+L: git@xilinx.com
S: Supported
F: drivers/net/ethernet/cadence/
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18841740/Macb+Driver
ATMEL MAXTOUCH DRIVER
M: Nick Dyer <nick@shmanahar.org>
@@ -3601,6 +3604,13 @@ S: Supported
F: Documentation/filesystems/caching/cachefiles.txt
F: fs/cachefiles/
+CADENCE I2C DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Venkata Visweswarachari Mallavarapu <vmallava@xilinx.com>
+L: git@xilinx.com
+F: drivers/i2c/busses/i2c-cadence.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842160/Cadence+I2C+Driver
+
CADENCE MIPI-CSI2 BRIDGES
M: Maxime Ripard <mripard@kernel.org>
L: linux-media@vger.kernel.org
@@ -3900,6 +3910,7 @@ F: Documentation/translations/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@nxp.com>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
L: linux-usb@vger.kernel.org
S: Maintained
@@ -4630,13 +4641,6 @@ F: include/uapi/linux/dccp.h
F: include/linux/tfrc.h
F: net/dccp/
-DECnet NETWORK LAYER
-W: http://linux-decnet.sourceforge.net
-L: linux-decnet-user@lists.sourceforge.net
-S: Orphan
-F: Documentation/networking/decnet.txt
-F: net/decnet/
-
DECSTATION PLATFORM SUPPORT
M: "Maciej W. Rozycki" <macro@linux-mips.org>
L: linux-mips@vger.kernel.org
@@ -4753,6 +4757,7 @@ F: drivers/usb/dwc2/
DESIGNWARE USB3 DRD IP DRIVER
M: Felipe Balbi <balbi@kernel.org>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
@@ -5587,6 +5592,15 @@ F: drivers/gpu/drm/etnaviv/
F: include/uapi/drm/etnaviv_drm.h
F: Documentation/devicetree/bindings/display/etnaviv/
+DRM DRIVERS FOR XILINX
+M: Hyun Kwon <hyun.kwon@xilinx.com>
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/xlnx/
+F: Documentation/devicetree/bindings/display/xlnx/
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DRM DRIVERS FOR ZTE ZX
M: Shawn Guo <shawnguo@kernel.org>
L: dri-devel@lists.freedesktop.org
@@ -9283,6 +9297,12 @@ F: drivers/ata/pata_ftide010.c
F: drivers/ata/sata_gemini.c
F: drivers/ata/sata_gemini.h
+LIBATA SATA AHCI CEVA CONTROLLER DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/ata/ahci_ceva.c
+F: Documentation/devicetree/bindings/ata/ahci-ceva.txt
+
LIBATA SATA AHCI PLATFORM devices support
M: Hans de Goede <hdegoede@redhat.com>
M: Jens Axboe <axboe@kernel.dk>
@@ -14674,6 +14694,7 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/phylink.c
F: drivers/net/phy/sfp*
+F: include/linux/mdio/mdio-i2c.h
F: include/linux/phylink.h
F: include/linux/sfp.h
K: phylink
@@ -17032,6 +17053,7 @@ F: drivers/net/wireless/rndis_wlan.c
USB XHCI DRIVER
M: Mathias Nyman <mathias.nyman@intel.com>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
L: linux-usb@vger.kernel.org
S: Supported
F: drivers/usb/host/xhci*
@@ -17089,6 +17111,11 @@ F: Documentation/driver-api/uio-howto.rst
F: drivers/uio/
F: include/linux/uio_driver.h
+USERSPACE I/O (UIO) DRIVER FOR XILINX AI ENGINE NPI
+M: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
+
UTIL-LINUX PACKAGE
M: Karel Zak <kzak@redhat.com>
L: util-linux@vger.kernel.org
@@ -17877,7 +17904,8 @@ S: Supported
F: sound/xen/*
XFS FILESYSTEM
-M: Darrick J. Wong <darrick.wong@oracle.com>
+M: Chandan Babu R <chandan.babu@oracle.com>
+M: Darrick J. Wong <djwong@kernel.org>
M: linux-xfs@vger.kernel.org
L: linux-xfs@vger.kernel.org
W: http://xfs.org/
@@ -17891,10 +17919,210 @@ F: fs/xfs/
F: include/uapi/linux/dqblk_xfs.h
F: include/uapi/linux/fsmap.h
+XILINX AUDIO FORMATTER (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+F: sound/soc/xilinx/xlnx_formatter_pcm.c
+
+XILINX AXI DMAENGINE DRIVER
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/dma/xilinx/xilinx_dma.c
+F: Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+
XILINX AXI ETHERNET DRIVER
M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*
+F: Documentation/devicetree/bindings/net/xilinx_axienet.txt
+
+XILINX AXI SPI DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+S: Maintained
+F: drivers/spi/spi-xilinx.c
+F: Documentation/devicetree/bindings/spi/spi-xilinx.txt
+
+XILINX FLEXNOC PERFORMANCE MONITOR DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Srinivas Goud <srinivas.goud@xilinx.com>
+S: Maintained
+F: drivers/misc/xilinx_flex_pm.c
+F: Documentation/devicetree/bindings/misc/xlnx,flexnoc-pm.txt
+
+XILINX AXI USB DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/usb/gadget/udc/udc-xilinx.c
+F: Documentation/devicetree/bindings/usb/udc-xilinx.txt
+
+XILINX AXI PERFORMANCE MONITOR DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+S: Maintained
+F: drivers/uio/uio_xilinx_apm.c
+F: Documentation/devicetree/bindings/uio/xilinx_apm.txt
+
+XILINX CAN DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/net/can/xilinx_can.c
+F: Documentation/devicetree/bindings/net/can/xilinx_can.txt
+
+XILINX DMA FRAMEBUFFER READ/WRITE DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
+F: drivers/dma/xilinx/xilinx_frmbuf.c
+F: include/linux/dma/xilinx_frmbuf.h
+
+XILINX EMACLITE DRIVER
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/net/ethernet/xilinx/xilinx_emaclite.c
+F: Documentation/devicetree/bindings/net/xilinx_emaclite.txt
+
+XILINX GMII2RGMII DRIVER
+M: Harini Katakam <harini.katakam@xilinx.com>
+R: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/net/phy/xilinx_gmii2rgmii.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842246/Xilinx+GMII2RGMII+convertor
+
+XILINX GQSPI ZYNQMP SPI DRIVER
+M: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Amit Kumar Mahapatra <amit.kumar-mahapatra@xilinx.com>
+F: drivers/spi/spi-zynqmp-gqspi.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18841754/Zynqmp+QSPI+Driver
+
+XILINX I2S AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,i2s.txt
+F: sound/soc/xilinx/xlnx_i2s.c
+
+XILINX MEDIA AXIS SWITCH DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
+F: drivers/media/platform/xilinx/xilinx-axis-switch.c
+
+XILINX MEDIA CSI2RXSS DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
+F: drivers/media/platform/xilinx/xilinx-csi2rxss.c
+F: include/uapi/linux/xilinx-csi2rxss.h
+
+XILINX MEDIA DEMOSAIC DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
+F: drivers/media/platform/xilinx/xilinx-demosaic.c
+
+XILINX MEDIA GAMMA DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
+F: drivers/media/platform/xilinx/xilinx-gamma.c
+F: drivers/media/platform/xilinx/xilinx-gamma-coeff.h
+
+XILINX MEDIA SDI RX DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
+F: drivers/media/platform/xilinx/xilinx-sdirxss.c
+F: include/uapi/linux/xilinx-sdirxss.h
+
+XILINX MEDIA VPSS COLOR SPACE CONVERTER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
+F: drivers/media/platform/xilinx/xilinx-vpss-csc.c
+
+XILINX MEDIA VPSS SCALER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
+F: drivers/media/platform/xilinx/xilinx-vpss-scaler.c
+
+XILINX PL SOUND CARD (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: sound/soc/xilinx/xlnx_pl_snd_card.c
+
+XILINX QSPI ZYNQ SPI DRIVER
+M: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Amit Kumar Mahapatra <amit.kumar-mahapatra@xilinx.com>
+F: drivers/spi/spi-zynq-qspi.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842262/Zynq+QSPI+Driver
+
+XILINX RTC ZYNQMP DRIVER
+M: Neeli Srinivas <srinivas.neeli@xilinx.com>
+R: Srinivas Goud <srinivas.goud@xilinx.com>
+R: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+S: Maintained
+F: drivers/rtc/rtc-zynqmp.c
+
+XILINX SD-FEC IP CORES
+M: Derek Kiernan <derek.kiernan@xilinx.com>
+M: Dragan Cvetic <dragan.cvetic@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt
+F: Documentation/misc-devices/xilinx_sdfec.rst
+F: drivers/misc/xilinx_sdfec.c
+F: drivers/misc/Kconfig
+F: drivers/misc/Makefile
+F: include/uapi/misc/xilinx_sdfec.h
+
+XILINX SDI AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
+F: sound/soc/xilinx/xlnx_sdi_audio.c
+
+XILINX SPDIF AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,spdif.txt
+F: sound/soc/xilinx/xlnx_spdif.c
XILINX UARTLITE SERIAL DRIVER
M: Peter Korsgaard <jacmet@sunsite.dk>
@@ -17902,6 +18130,12 @@ L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial/uartlite.c
+XILINX UARTPS DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+S: Maintained
+F: drivers/tty/serial/xilinx_uartps.c
+F: Documentation/devicetree/bindings/serial/cdns,uart.txt
+
XILINX VIDEO IP CORES
M: Hyun Kwon <hyun.kwon@xilinx.com>
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
@@ -17923,6 +18157,71 @@ F: drivers/misc/Kconfig
F: drivers/misc/Makefile
F: include/uapi/misc/xilinx_sdfec.h
+XILINX ZYNQ FPGA MANAGER DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/fpga/zynq-fpga.c
+F: Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt
+
+XILINX ZYNQMP AES DRIVER
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-aes.c
+F: Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
+
+XILINX ZYNQMP DMAENGINE DRIVER
+M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/dma/xilinx/zynqmp_dma.c
+F: Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
+
+XILINX ZYNQMP FPGA MANAGER DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/fpga/zynqmp-fpga.c
+F: Documentation/devicetree/bindings/fpga/xilinx-zynqmp-fpga-mgr.txt
+
+ZYNQMP IPI MAILBOX CONTROLLER DRIVER
+M: Wendy Liang <wendy.liang@xilinx.com>
+S: Maintained
+F: drivers/mailbox/zynqmp-ipi-mailbox.c
+F: include/linux/mailbox/zynqmp-ipi-message.h
+F: Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
+
+XILINX ZYNQMP PHY DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/phy/phy-zynqmp.c
+F: Documentation/devicetree/bindings/phy/phy-zynqmp.txt
+
+XILINX ZYNQMP R5 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
+M: Wendy Liang <wendy.liang@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
+F: drivers/remoteproc/zynqmp_r5_remoteproc.c
+
+XILINX ZYNQMP RSA DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-rsa.c
+F: Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
+
+XILINX ZYNQMP SHA DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-sha.c
+F: Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
+
XILLYBUS DRIVER
M: Eli Billauer <eli.billauer@gmail.com>
L: linux-kernel@vger.kernel.org
diff --git a/Makefile b/Makefile
index 43bbf9639777..9732894f547b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
-SUBLEVEL = 209
+SUBLEVEL = 273
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@@ -89,9 +89,16 @@ endif
# If the user is running make -s (silent mode), suppress echoing of
# commands
+# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
-ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
- quiet=silent_
+ifeq ($(filter 3.%,$(MAKE_VERSION)),)
+silence:=$(findstring s,$(firstword -$(MAKEFLAGS)))
+else
+silence:=$(findstring s,$(filter-out --%,$(MAKEFLAGS)))
+endif
+
+ifeq ($(silence),s)
+quiet=silent_
endif
export quiet Q KBUILD_VERBOSE
@@ -776,6 +783,10 @@ endif
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+
+# These result in bogus false positives
+KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer)
+
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
@@ -802,7 +813,9 @@ DEBUG_CFLAGS += -gsplit-dwarf
else
DEBUG_CFLAGS += -g
endif
-ifneq ($(LLVM_IAS),1)
+ifeq ($(LLVM_IAS),1)
+KBUILD_AFLAGS += -g
+else
KBUILD_AFLAGS += -Wa,-gdwarf-2
endif
endif
@@ -932,6 +945,9 @@ KBUILD_CFLAGS += $(KCFLAGS)
KBUILD_LDFLAGS_MODULE += --build-id
LDFLAGS_vmlinux += --build-id
+KBUILD_LDFLAGS += -z noexecstack
+KBUILD_LDFLAGS += $(call ld-option,--no-warn-rwx-segments)
+
ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
diff --git a/arch/Kconfig b/arch/Kconfig
index 2219a07dca1e..4d03616bf597 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -271,6 +271,9 @@ config ARCH_HAS_UNCACHED_SEGMENT
select ARCH_HAS_DMA_PREP_COHERENT
bool
+config ARCH_HAS_CPU_FINALIZE_INIT
+ bool
+
# Select if arch init_task must go in the __init_task_data section
config ARCH_TASK_STRUCT_ON_STACK
bool
diff --git a/arch/alpha/include/asm/bugs.h b/arch/alpha/include/asm/bugs.h
deleted file mode 100644
index 78030d1c7e7e..000000000000
--- a/arch/alpha/include/asm/bugs.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * include/asm-alpha/bugs.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-/*
- * I don't know of any alpha bugs yet.. Nice chip
- */
-
-static void check_bugs(void)
-{
-}
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index 2e09248f8324..c27d01232799 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -469,8 +469,10 @@ entSys:
#ifdef CONFIG_AUDITSYSCALL
lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
and $3, $6, $3
-#endif
bne $3, strace
+#else
+ blbs $3, strace /* check for SYSCALL_TRACE in disguise */
+#endif
beq $4, 1f
ldq $27, 0($5)
1: jsr $26, ($27), sys_ni_syscall
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index ac110ae8f978..b19a8aae74e1 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
- /* The small sections were sorted to the end of the segment.
- The following should definitely cover them. */
- gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
got = sechdrs[me->arch.gotsecindex].sh_addr;
+ gp = got + 0x8000;
for (i = 0; i < n; i++) {
unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index bf497b8b0ec6..bbe7a0da6264 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -963,7 +963,7 @@ put_tv32(struct timeval32 __user *o, struct timespec64 *i)
}
static inline long
-put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
+put_tv_to_tv32(struct timeval32 __user *o, struct __kernel_old_timeval *i)
{
return copy_to_user(o, &(struct timeval32){
.tv_sec = i->tv_sec,
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 5d4c76a77a9f..f1072b10913c 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -394,8 +394,7 @@ setup_memory(void *kernel_end)
extern void setup_memory(void *);
#endif /* !CONFIG_DISCONTIGMEM */
-int __init
-page_is_ram(unsigned long pfn)
+int page_is_ram(unsigned long pfn)
{
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index f6b9664ac504..1a0a98cfdae7 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -192,7 +192,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
local_irq_enable();
while (1);
}
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
#ifndef CONFIG_MATHEMU
@@ -235,7 +235,21 @@ do_entIF(unsigned long type, struct pt_regs *regs)
{
int signo, code;
- if ((regs->ps & ~IPL_MAX) == 0) {
+ if (type == 3) { /* FEN fault */
+ /* Irritating users can call PAL_clrfen to disable the
+ FPU for the process. The kernel will then trap in
+ do_switch_stack and undo_switch_stack when we try
+ to save and restore the FP registers.
+
+ Given that GCC by default generates code that uses the
+ FP registers, PAL_clrfen is not useful except for DoS
+ attacks. So turn the bleeding FPU back on and be done
+ with it. */
+ current_thread_info()->pcb.flags |= 1;
+ __reload_thread(&current_thread_info()->pcb);
+ return;
+ }
+ if (!user_mode(regs)) {
if (type == 1) {
const unsigned int *data
= (const unsigned int *) regs->pc;
@@ -368,20 +382,6 @@ do_entIF(unsigned long type, struct pt_regs *regs)
}
break;
- case 3: /* FEN fault */
- /* Irritating users can call PAL_clrfen to disable the
- FPU for the process. The kernel will then trap in
- do_switch_stack and undo_switch_stack when we try
- to save and restore the FP registers.
-
- Given that GCC by default generates code that uses the
- FP registers, PAL_clrfen is not useful except for DoS
- attacks. So turn the bleeding FPU back on and be done
- with it. */
- current_thread_info()->pcb.flags |= 1;
- __reload_thread(&current_thread_info()->pcb);
- return;
-
case 5: /* illoc */
default: /* unexpected instruction-fault type */
;
@@ -577,7 +577,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
pc, va, opcode, reg);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
got_exception:
/* Ok, we caught the exception, but we don't want it. Is there
@@ -632,7 +632,7 @@ got_exception:
local_irq_enable();
while (1);
}
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
/*
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 741e61ef9d3f..a86286d2d3f3 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -206,7 +206,7 @@ retry:
printk(KERN_ALERT "Unable to handle kernel paging request at "
"virtual address %016lx\n", address);
die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 72f7929736f8..94ef71b38b69 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -32,7 +32,7 @@ static inline void ioport_unmap(void __iomem *addr)
{
}
-extern void iounmap(const void __iomem *addr);
+extern void iounmap(const volatile void __iomem *addr);
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
#define ioremap_wc(phy, sz) ioremap(phy, sz)
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index fe19f1d412e7..284fd513d7c6 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -8,6 +8,10 @@
#include <asm/dwarf.h>
+#define ASM_NL ` /* use '`' to mark new line in macro */
+#define __ALIGN .align 4
+#define __ALIGN_STR __stringify(__ALIGN)
+
#ifdef __ASSEMBLY__
.macro ST2 e, o, off
@@ -28,10 +32,6 @@
#endif
.endm
-#define ASM_NL ` /* use '`' to mark new line in macro */
-#define __ALIGN .align 4
-#define __ALIGN_STR __stringify(__ALIGN)
-
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_DATA nm
#ifdef CONFIG_ARC_HAS_DCCM
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 8877de0dfe6c..2759d49a2f3a 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -61,7 +61,7 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
-static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT
@@ -74,12 +74,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
#else
v2abi.r58 = v2abi.r59 = 0;
#endif
- err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+ err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
#endif
return err;
}
-static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 73a7e88a1e92..e947572a521e 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -48,8 +48,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
* upper layer functions (in include/linux/dma-mapping.h)
*/
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
@@ -69,8 +69,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 95c649fbc95a..d3b1ea16e9cd 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -93,7 +93,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
EXPORT_SYMBOL(ioremap_prot);
-void iounmap(const void __iomem *addr)
+void iounmap(const volatile void __iomem *addr)
{
/* weird double cast to handle phys_addr_t > 32 bits */
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a4364cce85f8..2feb5dade121 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,7 @@ config ARM
select ARCH_32BIT_OFF_T
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CPU_FINALIZE_INIT if MMU
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
@@ -1837,7 +1838,6 @@ config CMDLINE
choice
prompt "Kernel command line type" if CMDLINE != ""
default CMDLINE_FROM_BOOTLOADER
- depends on ATAGS
config CMDLINE_FROM_BOOTLOADER
bool "Use bootloader kernel arguments if available"
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi
index 9bfa032bcada..f2d9145b3c6a 100644
--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
@@ -12,22 +12,20 @@
compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx";
/* Power */
- regulators {
- vcc3v3: fixedregulator@1 {
- compatible = "regulator-fixed";
- regulator-name = "vcc3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- };
+ vcc3v3: fixedregulator1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
- vcc1v8: fixedregulator@2 {
- compatible = "regulator-fixed";
- regulator-name = "vcc1v8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- };
+ vcc1v8: fixedregulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
};
/* User IO */
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 7a9eb2b0d45b..8ec6c4500fd5 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -1337,8 +1337,7 @@
ti,dual-volt;
ti,needs-special-reset;
ti,needs-special-hs-handling;
- dmas = <&edma_xbar 24 0 0
- &edma_xbar 25 0 0>;
+ dmas = <&edma 24 0>, <&edma 25 0>;
dma-names = "tx", "rx";
interrupts = <64>;
reg = <0x0 0x1000>;
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index e86d4795e024..e542a9f59e93 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -527,7 +527,7 @@
interrupt-parent = <&gpio1>;
interrupts = <31 0>;
- pendown-gpio = <&gpio1 31 0>;
+ pendown-gpio = <&gpio1 31 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index 2625ce66f8e7..673c6e5b2f56 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -435,7 +435,7 @@
/* Direct-mapped development chip ROM */
pb1176_rom@10200000 {
- compatible = "direct-mapped";
+ compatible = "mtd-rom";
reg = <0x10200000 0x4000>;
bank-width = <1>;
};
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 46e6d3ed8f35..c042c416a94a 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -74,7 +74,7 @@
pcie2: pcie@2,0 {
device_type = "pci";
- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index 2932a29ae272..230f6dd876a2 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -584,7 +584,7 @@
pcie1: pcie@2,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
index cff1269f3fbf..7146cc8f082a 100644
--- a/arch/arm/boot/dts/armada-380.dtsi
+++ b/arch/arm/boot/dts/armada-380.dtsi
@@ -79,7 +79,7 @@
/* x1 port */
pcie@2,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -98,7 +98,7 @@
/* x1 port */
pcie@3,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
reg = <0x1800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
index fde4c302f08e..320c759b4090 100644
--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
+++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
@@ -22,6 +22,12 @@
stdout-path = &uart0;
};
+ aliases {
+ ethernet0 = &eth0;
+ ethernet1 = &eth1;
+ ethernet2 = &eth2;
+ };
+
memory {
device_type = "memory";
reg = <0x00000000 0x40000000>; /* 1024 MB */
@@ -291,7 +297,17 @@
};
};
- /* port 6 is connected to eth0 */
+ ports@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&eth0>;
+ phy-mode = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
};
};
};
@@ -307,7 +323,7 @@
marvell,function = "spi0";
};
- spi0cs1_pins: spi0cs1-pins {
+ spi0cs2_pins: spi0cs2-pins {
marvell,pins = "mpp26";
marvell,function = "spi0";
};
@@ -342,7 +358,7 @@
};
};
- /* MISO, MOSI, SCLK and CS1 are routed to pin header CN11 */
+ /* MISO, MOSI, SCLK and CS2 are routed to pin header CN11 */
};
&uart0 {
diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
index f0022d10c715..f081f7cb66e5 100644
--- a/arch/arm/boot/dts/armada-385.dtsi
+++ b/arch/arm/boot/dts/armada-385.dtsi
@@ -84,7 +84,7 @@
/* x1 port */
pcie2: pcie@2,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -103,7 +103,7 @@
/* x1 port */
pcie3: pcie@3,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
reg = <0x1800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -125,7 +125,7 @@
*/
pcie4: pcie@4,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
+ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>;
reg = <0x2000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index b1b86934c688..cd7a46c48d19 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -457,7 +457,7 @@
/* x1 port */
pcie@2,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -476,7 +476,7 @@
/* x1 port */
pcie@3,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
reg = <0x1800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -498,7 +498,7 @@
*/
pcie@4,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
+ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>;
reg = <0x2000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index 8558bf6bb54c..d55fe162fc7f 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -97,7 +97,7 @@
pcie2: pcie@2,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -115,7 +115,7 @@
pcie3: pcie@3,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
+ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
reg = <0x1800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -133,7 +133,7 @@
pcie4: pcie@4,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
+ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
reg = <0x2000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -151,7 +151,7 @@
pcie5: pcie@5,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
reg = <0x2800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 2d85fe8ac327..fdcc81819940 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -112,7 +112,7 @@
pcie2: pcie@2,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -130,7 +130,7 @@
pcie3: pcie@3,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
+ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
reg = <0x1800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -148,7 +148,7 @@
pcie4: pcie@4,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
+ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
reg = <0x2000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -166,7 +166,7 @@
pcie5: pcie@5,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
reg = <0x2800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -184,7 +184,7 @@
pcie6: pcie@6,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
+ assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
reg = <0x3000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -202,7 +202,7 @@
pcie7: pcie@7,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
+ assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
reg = <0x3800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -220,7 +220,7 @@
pcie8: pcie@8,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
+ assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
reg = <0x4000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
@@ -238,7 +238,7 @@
pcie9: pcie@9,0 {
device_type = "pci";
- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
+ assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
reg = <0x4800 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
index c9d88c90135e..9db4d42d0deb 100644
--- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
@@ -5,7 +5,7 @@
/ {
model = "AST2500 EVB";
- compatible = "aspeed,ast2500";
+ compatible = "aspeed,ast2500-evb", "aspeed,ast2500";
aliases {
serial4 = &uart5;
diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb.dts b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
index 9870553919b7..f00b19ad4fa6 100644
--- a/arch/arm/boot/dts/aspeed-ast2600-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
@@ -7,7 +7,7 @@
/ {
model = "AST2600 EVB";
- compatible = "aspeed,ast2600";
+ compatible = "aspeed,ast2600-evb-a1", "aspeed,ast2600";
aliases {
serial4 = &uart5;
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index c4ef74fea97c..ee90ea09e781 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -156,7 +156,7 @@
compatible = "ti,ads7843";
interrupts-extended = <&pioC 2 IRQ_TYPE_EDGE_BOTH>;
spi-max-frequency = <3000000>;
- pendown-gpio = <&pioC 2 GPIO_ACTIVE_HIGH>;
+ pendown-gpio = <&pioC 2 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <150>;
ti,x-max = /bits/ 16 <3830>;
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 287566e09a67..3d694b60d452 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -38,6 +38,13 @@
};
+ usb1 {
+ pinctrl_usb1_vbus_gpio: usb1_vbus_gpio {
+ atmel,pins =
+ <AT91_PIOC 5 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PC5 GPIO */
+ };
+ };
+
mmc0_slot1 {
pinctrl_board_mmc0_slot1: mmc0_slot1-board {
atmel,pins =
@@ -83,6 +90,8 @@
};
usb1: gadget@fffa4000 {
+ pinctrl-0 = <&pinctrl_usb1_vbus_gpio>;
+ pinctrl-names = "default";
atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts b/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts
index cd797b4202ad..01c48faabfad 100644
--- a/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts
+++ b/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts
@@ -19,7 +19,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
gpio-keys {
diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
index 57ca1cfaecd8..5901160919dc 100644
--- a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
+++ b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
@@ -26,7 +26,6 @@
wlan {
label = "bcm53xx:blue:wlan";
gpios = <&chipcommon 10 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
system {
@@ -46,3 +45,16 @@
};
};
};
+
+&gmac0 {
+ phy-mode = "rgmii";
+ phy-handle = <&bcm54210e>;
+
+ mdio {
+ /delete-node/ switch@1e;
+
+ bcm54210e: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
index 2e1a7e382cb7..8e7483272d47 100644
--- a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
+++ b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
@@ -26,7 +26,6 @@
5ghz {
label = "bcm53xx:blue:5ghz";
gpios = <&chipcommon 11 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
system {
@@ -42,7 +41,6 @@
2ghz {
label = "bcm53xx:blue:2ghz";
gpios = <&pcie0_chipcommon 3 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
};
@@ -83,3 +81,16 @@
};
};
};
+
+&gmac0 {
+ phy-mode = "rgmii";
+ phy-handle = <&bcm54210e>;
+
+ mdio {
+ /delete-node/ switch@1e;
+
+ bcm54210e: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 05d67f976911..bf8154aa203a 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -511,7 +511,6 @@
"spi_lr_session_done",
"spi_lr_overread";
clocks = <&iprocmed>;
- clock-names = "iprocmed";
num-cs = <2>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/bcm53573.dtsi b/arch/arm/boot/dts/bcm53573.dtsi
index 4af8e3293cff..34bd72b5c9cd 100644
--- a/arch/arm/boot/dts/bcm53573.dtsi
+++ b/arch/arm/boot/dts/bcm53573.dtsi
@@ -127,6 +127,9 @@
pcie0: pcie@2000 {
reg = <0x00002000 0x1000>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
};
usb2: usb2@4000 {
diff --git a/arch/arm/boot/dts/bcm947189acdbmr.dts b/arch/arm/boot/dts/bcm947189acdbmr.dts
index b0b8c774a37f..1f0be30e5443 100644
--- a/arch/arm/boot/dts/bcm947189acdbmr.dts
+++ b/arch/arm/boot/dts/bcm947189acdbmr.dts
@@ -60,9 +60,9 @@
spi {
compatible = "spi-gpio";
num-chipselects = <1>;
- gpio-sck = <&chipcommon 21 0>;
- gpio-miso = <&chipcommon 22 0>;
- gpio-mosi = <&chipcommon 23 0>;
+ sck-gpios = <&chipcommon 21 0>;
+ miso-gpios = <&chipcommon 22 0>;
+ mosi-gpios = <&chipcommon 23 0>;
cs-gpios = <&chipcommon 24 0>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 2e8a3977219f..347624ea96cd 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -129,7 +129,7 @@
pcie1: pcie@2 {
device_type = "pci";
status = "disabled";
- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
clocks = <&gate_clk 5>;
marvell,pcie-port = <1>;
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 468932f45289..72d6b7c27151 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -239,7 +239,7 @@
i80-if-timings {
cs-setup = <0>;
wr-setup = <0>;
- wr-act = <1>;
+ wr-active = <1>;
wr-hold = <0>;
};
};
diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
index 021d9fc1b492..27a1a8952665 100644
--- a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
+++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
@@ -10,7 +10,7 @@
/ {
thermal-zones {
cpu_thermal: cpu-thermal {
- thermal-sensors = <&tmu 0>;
+ thermal-sensors = <&tmu>;
polling-delay-passive = <0>;
polling-delay = <0>;
trips {
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 433f109d97ca..1dd009c4dc91 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -605,7 +605,7 @@
status = "disabled";
hdmi_i2c_phy: hdmiphy@38 {
- compatible = "exynos4210-hdmiphy";
+ compatible = "samsung,exynos4210-hdmiphy";
reg = <0x38>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-itop-elite.dts b/arch/arm/boot/dts/exynos4412-itop-elite.dts
index f6d0a5f5d339..9a2a49420d4d 100644
--- a/arch/arm/boot/dts/exynos4412-itop-elite.dts
+++ b/arch/arm/boot/dts/exynos4412-itop-elite.dts
@@ -179,7 +179,7 @@
compatible = "wlf,wm8960";
reg = <0x1a>;
clocks = <&pmu_system_controller 0>;
- clock-names = "MCLK1";
+ clock-names = "mclk";
wlf,shared-lrclk;
#sound-dai-cells = <0>;
};
diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
index fedb21377c66..3538739c7901 100644
--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
+++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
@@ -534,7 +534,7 @@
clocks = <&camera 1>;
clock-names = "extclk";
samsung,camclk-out = <1>;
- gpios = <&gpm1 6 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpm1 6 GPIO_ACTIVE_LOW>;
port {
is_s5k6a3_ep: endpoint {
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index ecd14b283a6b..c6678c120cbd 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -86,7 +86,7 @@
};
&ehci {
- samsung,vbus-gpio = <&gpx3 5 1>;
+ samsung,vbus-gpio = <&gpx3 5 GPIO_ACTIVE_HIGH>;
status = "okay";
phys = <&exynos_usbphy 2>, <&exynos_usbphy 3>;
phy-names = "hsic0", "hsic1";
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index f68baaf58f9e..51b1a7f19471 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -116,7 +116,6 @@
};
&cpu0_thermal {
- thermal-sensors = <&tmu_cpu0 0>;
polling-delay-passive = <0>;
polling-delay = <0>;
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 7d51e0f4ab79..5da434ccf12a 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -539,7 +539,7 @@
};
mipi_phy: mipi-video-phy {
- compatible = "samsung,s5pv210-mipi-video-phy";
+ compatible = "samsung,exynos5420-mipi-video-phy";
syscon = <&pmu_system_controller>;
#phy-cells = <1>;
};
diff --git a/arch/arm/boot/dts/imx1-ads.dts b/arch/arm/boot/dts/imx1-ads.dts
index 5833fb6f15d8..2c817c4a4c68 100644
--- a/arch/arm/boot/dts/imx1-ads.dts
+++ b/arch/arm/boot/dts/imx1-ads.dts
@@ -65,7 +65,7 @@
pinctrl-0 = <&pinctrl_weim>;
status = "okay";
- nor: nor@0,0 {
+ nor: flash@0,0 {
compatible = "cfi-flash";
reg = <0 0x00000000 0x02000000>;
bank-width = <4>;
diff --git a/arch/arm/boot/dts/imx1-apf9328.dts b/arch/arm/boot/dts/imx1-apf9328.dts
index 77b21aa7a146..27e72b07b517 100644
--- a/arch/arm/boot/dts/imx1-apf9328.dts
+++ b/arch/arm/boot/dts/imx1-apf9328.dts
@@ -45,7 +45,7 @@
pinctrl-0 = <&pinctrl_weim>;
status = "okay";
- nor: nor@0,0 {
+ nor: flash@0,0 {
compatible = "cfi-flash";
reg = <0 0x00000000 0x02000000>;
bank-width = <2>;
diff --git a/arch/arm/boot/dts/imx1.dtsi b/arch/arm/boot/dts/imx1.dtsi
index b30448cde582..a74870d03a67 100644
--- a/arch/arm/boot/dts/imx1.dtsi
+++ b/arch/arm/boot/dts/imx1.dtsi
@@ -268,9 +268,12 @@
status = "disabled";
};
- esram: esram@300000 {
+ esram: sram@300000 {
compatible = "mmio-sram";
reg = <0x00300000 0x20000>;
+ ranges = <0 0x00300000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
};
};
};
diff --git a/arch/arm/boot/dts/imx23-sansa.dts b/arch/arm/boot/dts/imx23-sansa.dts
index 46057d9bf555..c2efcc20ae80 100644
--- a/arch/arm/boot/dts/imx23-sansa.dts
+++ b/arch/arm/boot/dts/imx23-sansa.dts
@@ -175,10 +175,8 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "i2c-gpio";
- gpios = <
- &gpio1 24 0 /* SDA */
- &gpio1 22 0 /* SCL */
- >;
+ sda-gpios = <&gpio1 24 0>;
+ scl-gpios = <&gpio1 22 0>;
i2c-gpio,delay-us = <2>; /* ~100 kHz */
};
@@ -186,10 +184,8 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "i2c-gpio";
- gpios = <
- &gpio0 31 0 /* SDA */
- &gpio0 30 0 /* SCL */
- >;
+ sda-gpios = <&gpio0 31 0>;
+ scl-gpios = <&gpio0 30 0>;
i2c-gpio,delay-us = <2>; /* ~100 kHz */
touch: touch@20 {
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 8257630f7a49..ba1705595b29 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -59,7 +59,7 @@
reg = <0x80000000 0x2000>;
};
- dma_apbh: dma-apbh@80004000 {
+ dma_apbh: dma-controller@80004000 {
compatible = "fsl,imx23-dma-apbh";
reg = <0x80004000 0x2000>;
interrupts = <0 14 20 0
@@ -406,7 +406,7 @@
status = "disabled";
};
- dma_apbx: dma-apbx@80024000 {
+ dma_apbx: dma-controller@80024000 {
compatible = "fsl,imx23-dma-apbx";
reg = <0x80024000 0x2000>;
interrupts = <7 5 9 26
diff --git a/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi b/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi
index 0703f62d10d1..93a6e4e680b4 100644
--- a/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi
+++ b/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi
@@ -27,7 +27,7 @@
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
- pcf8563@51 {
+ rtc@51 {
compatible = "nxp,pcf8563";
reg = <0x51>;
};
diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
index 7d4301b22b90..1ed3fb7b9ce6 100644
--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
+++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
@@ -16,7 +16,7 @@
bus-width = <18>;
display-timings {
native-mode = <&qvga_timings>;
- qvga_timings: 320x240 {
+ qvga_timings: timing0 {
clock-frequency = <6500000>;
hactive = <320>;
vactive = <240>;
diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
index 80a7f96de4c6..64b2ffac463b 100644
--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
+++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
@@ -16,7 +16,7 @@
bus-width = <18>;
display-timings {
native-mode = <&dvi_svga_timings>;
- dvi_svga_timings: 800x600 {
+ dvi_svga_timings: timing0 {
clock-frequency = <40000000>;
hactive = <800>;
vactive = <600>;
diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
index 24027a1fb46d..fb074bfdaa8d 100644
--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
+++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
@@ -16,7 +16,7 @@
bus-width = <18>;
display-timings {
native-mode = <&dvi_vga_timings>;
- dvi_vga_timings: 640x480 {
+ dvi_vga_timings: timing0 {
clock-frequency = <31250000>;
hactive = <640>;
vactive = <480>;
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index 05cccd12624c..876b101ce3e6 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -78,7 +78,7 @@
bus-width = <18>;
display-timings {
native-mode = <&wvga_timings>;
- wvga_timings: 640x480 {
+ wvga_timings: timing0 {
hactive = <640>;
vactive = <480>;
hback-porch = <45>;
diff --git a/arch/arm/boot/dts/imx27-apf27dev.dts b/arch/arm/boot/dts/imx27-apf27dev.dts
index 6f1e8ce9e76e..3d9bb7fc3be2 100644
--- a/arch/arm/boot/dts/imx27-apf27dev.dts
+++ b/arch/arm/boot/dts/imx27-apf27dev.dts
@@ -16,7 +16,7 @@
fsl,pcr = <0xfae80083>; /* non-standard but required */
display-timings {
native-mode = <&timing0>;
- timing0: 800x480 {
+ timing0: timing0 {
clock-frequency = <33000033>;
hactive = <800>;
vactive = <480>;
@@ -47,7 +47,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio_leds>;
- user {
+ led-user {
label = "Heartbeat";
gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
diff --git a/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi b/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
index 74110bbcd9d4..c7e923584878 100644
--- a/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
+++ b/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
@@ -33,7 +33,7 @@
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
- pcf8563@51 {
+ rtc@51 {
compatible = "nxp,pcf8563";
reg = <0x51>;
};
@@ -90,7 +90,7 @@
&weim {
status = "okay";
- nor: nor@0,0 {
+ nor: flash@0,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
diff --git a/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts b/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
index 9c3ec82ec7e5..50fa0bd4c8a1 100644
--- a/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
+++ b/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
@@ -16,7 +16,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: 320x240 {
+ timing0: timing0 {
clock-frequency = <6500000>;
hactive = <320>;
vactive = <240>;
diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
index 188639738dc3..7f36af150a25 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
@@ -19,7 +19,7 @@
fsl,pcr = <0xf0c88080>; /* non-standard but required */
display-timings {
native-mode = <&timing0>;
- timing0: 640x480 {
+ timing0: timing0 {
hactive = <640>;
vactive = <480>;
hback-porch = <112>;
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
index bf883e45576a..39e0fcb12d23 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
@@ -19,7 +19,7 @@
display-timings {
native-mode = <&timing0>;
- timing0: 240x320 {
+ timing0: timing0 {
clock-frequency = <5500000>;
hactive = <240>;
vactive = <320>;
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
index 3d10273177e9..a5fdc2fd4ce5 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
+++ b/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
@@ -322,7 +322,7 @@
&weim {
status = "okay";
- nor: nor@0,0 {
+ nor: flash@0,0 {
compatible = "cfi-flash";
reg = <0 0x00000000 0x02000000>;
bank-width = <2>;
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 3652f5556b29..eb0dac710044 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -588,6 +588,9 @@
iram: iram@ffff4c00 {
compatible = "mmio-sram";
reg = <0xffff4c00 0xb400>;
+ ranges = <0 0xffff4c00 0xb400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
};
};
};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index e14d8ef0158b..26dc6c9e1e6c 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -78,7 +78,7 @@
status = "disabled";
};
- dma_apbh: dma-apbh@80004000 {
+ dma_apbh: dma-controller@80004000 {
compatible = "fsl,imx28-dma-apbh";
reg = <0x80004000 0x2000>;
interrupts = <82 83 84 85
@@ -982,7 +982,7 @@
status = "disabled";
};
- dma_apbx: dma-apbx@80024000 {
+ dma_apbx: dma-controller@80024000 {
compatible = "fsl,imx28-dma-apbx";
reg = <0x80024000 0x2000>;
interrupts = <78 79 66 0
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 9cbdc1a15cda..d326b18333d1 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -59,7 +59,7 @@
interrupt-parent = <&avic>;
ranges;
- L2: l2-cache@30000000 {
+ L2: cache-controller@30000000 {
compatible = "arm,l210-cache";
reg = <0x30000000 0x1000>;
cache-unified;
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
index c80d1700e094..c01dc571b55c 100644
--- a/arch/arm/boot/dts/imx53-ppd.dts
+++ b/arch/arm/boot/dts/imx53-ppd.dts
@@ -461,7 +461,7 @@
scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
status = "okay";
- i2c-switch@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9547";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index ce4a5a807442..68af441e7894 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -96,8 +96,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii-id";
- phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
- phy-reset-duration = <20>;
phy-supply = <&sw2_reg>;
status = "okay";
@@ -110,17 +108,10 @@
#address-cells = <1>;
#size-cells = <0>;
- phy_port2: phy@1 {
- reg = <1>;
- };
-
- phy_port3: phy@2 {
- reg = <2>;
- };
-
switch@10 {
compatible = "qca,qca8334";
- reg = <10>;
+ reg = <0x10>;
+ reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
switch_ports: ports {
#address-cells = <1>;
@@ -141,15 +132,30 @@
port@2 {
reg = <2>;
label = "eth2";
+ phy-mode = "internal";
phy-handle = <&phy_port2>;
};
port@3 {
reg = <3>;
label = "eth1";
+ phy-mode = "internal";
phy-handle = <&phy_port3>;
};
};
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy_port2: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ phy_port3: ethernet-phy@2 {
+ reg = <2>;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 2ed10310a7b7..4bde98033ff4 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -81,6 +81,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
+ ranges = <0 0x00900000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index d038f4117024..013080e709f8 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -158,6 +158,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x40000>;
+ ranges = <0 0x00900000 0x40000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
index e8e36dfd0a6b..c951834f4984 100644
--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
@@ -464,7 +464,6 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- uart-has-rtscts;
rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
index eea317b41020..5e454a694b78 100644
--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
@@ -51,16 +51,6 @@
vin-supply = <&reg_3p3v_s5>;
};
- reg_3p3v_s0: regulator-3p3v-s0 {
- compatible = "regulator-fixed";
- regulator-name = "V_3V3_S0";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- vin-supply = <&reg_3p3v_s5>;
- };
-
reg_3p3v_s5: regulator-3p3v-s5 {
compatible = "regulator-fixed";
regulator-name = "V_3V3_S5";
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e9955ef12e02..0150b2d5c534 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -45,6 +45,10 @@
spi1 = &ecspi2;
spi2 = &ecspi3;
spi3 = &ecspi4;
+ usb0 = &usbotg;
+ usb1 = &usbh1;
+ usb2 = &usbh2;
+ usb3 = &usbh3;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};
@@ -156,7 +160,7 @@
interrupt-parent = <&gpc>;
ranges;
- dma_apbh: dma-apbh@110000 {
+ dma_apbh: dma-controller@110000 {
compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x00110000 0x2000>;
interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
@@ -255,7 +259,7 @@
interrupt-parent = <&intc>;
};
- L2: l2-cache@a02000 {
+ L2: cache-controller@a02000 {
compatible = "arm,pl310-cache";
reg = <0x00a02000 0x1000>;
interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
@@ -574,7 +578,7 @@
status = "disabled";
};
- gpt: gpt@2098000 {
+ gpt: timer@2098000 {
compatible = "fsl,imx6q-gpt", "fsl,imx31-gpt";
reg = <0x02098000 0x4000>;
interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi
index d91f92f944c5..3633383db706 100644
--- a/arch/arm/boot/dts/imx6qp.dtsi
+++ b/arch/arm/boot/dts/imx6qp.dtsi
@@ -9,12 +9,18 @@
ocram2: sram@940000 {
compatible = "mmio-sram";
reg = <0x00940000 0x20000>;
+ ranges = <0 0x00940000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
ocram3: sram@960000 {
compatible = "mmio-sram";
reg = <0x00960000 0x20000>;
+ ranges = <0 0x00960000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 540880f0413f..fbbae0004e62 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -39,6 +39,9 @@
spi1 = &ecspi2;
spi2 = &ecspi3;
spi3 = &ecspi4;
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
+ usb2 = &usbh;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};
@@ -121,6 +124,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
+ ranges = <0 0x00900000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
clocks = <&clks IMX6SL_CLK_OCRAM>;
};
@@ -133,7 +139,7 @@
interrupt-parent = <&intc>;
};
- L2: l2-cache@a02000 {
+ L2: cache-controller@a02000 {
compatible = "arm,pl310-cache";
reg = <0x00a02000 0x1000>;
interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
@@ -380,7 +386,7 @@
clock-names = "ipg", "per";
};
- gpt: gpt@2098000 {
+ gpt: timer@2098000 {
compatible = "fsl,imx6sl-gpt";
reg = <0x02098000 0x4000>;
interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
index 13c7ba7fa6bc..ad3deb82a50a 100644
--- a/arch/arm/boot/dts/imx6sll.dtsi
+++ b/arch/arm/boot/dts/imx6sll.dtsi
@@ -36,6 +36,8 @@
spi1 = &ecspi2;
spi3 = &ecspi3;
spi4 = &ecspi4;
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};
@@ -49,20 +51,18 @@
device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
- operating-points = <
+ operating-points =
/* kHz uV */
- 996000 1275000
- 792000 1175000
- 396000 1075000
- 198000 975000
- >;
- fsl,soc-operating-points = <
+ <996000 1275000>,
+ <792000 1175000>,
+ <396000 1075000>,
+ <198000 975000>;
+ fsl,soc-operating-points =
/* ARM kHz SOC-PU uV */
- 996000 1175000
- 792000 1175000
- 396000 1175000
- 198000 1175000
- >;
+ <996000 1175000>,
+ <792000 1175000>,
+ <396000 1175000>,
+ <198000 1175000>;
clock-latency = <61036>; /* two CLK32 periods */
#cooling-cells = <2>;
clocks = <&clks IMX6SLL_CLK_ARM>,
@@ -123,6 +123,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
+ ranges = <0 0x00900000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
};
intc: interrupt-controller@a01000 {
@@ -134,7 +137,7 @@
interrupt-parent = <&intc>;
};
- L2: l2-cache@a02000 {
+ L2: cache-controller@a02000 {
compatible = "arm,pl310-cache";
reg = <0x00a02000 0x1000>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
@@ -269,7 +272,7 @@
status = "disabled";
};
- ssi1: ssi-controller@2028000 {
+ ssi1: ssi@2028000 {
compatible = "fsl,imx6sl-ssi", "fsl,imx51-ssi";
reg = <0x02028000 0x4000>;
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
@@ -282,7 +285,7 @@
status = "disabled";
};
- ssi2: ssi-controller@202c000 {
+ ssi2: ssi@202c000 {
compatible = "fsl,imx6sl-ssi", "fsl,imx51-ssi";
reg = <0x0202c000 0x4000>;
interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
@@ -295,7 +298,7 @@
status = "disabled";
};
- ssi3: ssi-controller@2030000 {
+ ssi3: ssi@2030000 {
compatible = "fsl,imx6sl-ssi", "fsl,imx51-ssi";
reg = <0x02030000 0x4000>;
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
@@ -547,7 +550,7 @@
reg = <0x020ca000 0x1000>;
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SLL_CLK_USBPHY2>;
- phy-reg_3p0-supply = <&reg_3p0>;
+ phy-3p0-supply = <&reg_3p0>;
fsl,anatop = <&anatop>;
};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 531a52c1e987..1afeae14560a 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -49,6 +49,9 @@
spi2 = &ecspi3;
spi3 = &ecspi4;
spi4 = &ecspi5;
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
+ usb2 = &usbh;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};
@@ -163,12 +166,18 @@
ocram_s: sram@8f8000 {
compatible = "mmio-sram";
reg = <0x008f8000 0x4000>;
+ ranges = <0 0x008f8000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
clocks = <&clks IMX6SX_CLK_OCRAM_S>;
};
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
+ ranges = <0 0x00900000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
clocks = <&clks IMX6SX_CLK_OCRAM>;
};
@@ -181,7 +190,7 @@
interrupt-parent = <&intc>;
};
- L2: l2-cache@a02000 {
+ L2: cache-controller@a02000 {
compatible = "arm,pl310-cache";
reg = <0x00a02000 0x1000>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
@@ -202,7 +211,7 @@
power-domains = <&pd_pu>;
};
- dma_apbh: dma-apbh@1804000 {
+ dma_apbh: dma-controller@1804000 {
compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x01804000 0x2000>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
@@ -466,7 +475,7 @@
status = "disabled";
};
- gpt: gpt@2098000 {
+ gpt: timer@2098000 {
compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
reg = <0x02098000 0x4000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
@@ -949,6 +958,8 @@
<&clks IMX6SX_CLK_USDHC1>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
status = "disabled";
};
@@ -961,6 +972,8 @@
<&clks IMX6SX_CLK_USDHC2>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
status = "disabled";
};
@@ -973,6 +986,8 @@
<&clks IMX6SX_CLK_USDHC3>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index f008036e9294..7037d2a45e60 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -47,6 +47,8 @@
spi1 = &ecspi2;
spi2 = &ecspi3;
spi3 = &ecspi4;
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
usbphy0 = &usbphy1;
usbphy1 = &usbphy2;
};
@@ -62,20 +64,18 @@
clock-frequency = <696000000>;
clock-latency = <61036>; /* two CLK32 periods */
#cooling-cells = <2>;
- operating-points = <
+ operating-points =
/* kHz uV */
- 696000 1275000
- 528000 1175000
- 396000 1025000
- 198000 950000
- >;
- fsl,soc-operating-points = <
+ <696000 1275000>,
+ <528000 1175000>,
+ <396000 1025000>,
+ <198000 950000>;
+ fsl,soc-operating-points =
/* KHz uV */
- 696000 1275000
- 528000 1175000
- 396000 1175000
- 198000 1175000
- >;
+ <696000 1275000>,
+ <528000 1175000>,
+ <396000 1175000>,
+ <198000 1175000>;
clocks = <&clks IMX6UL_CLK_ARM>,
<&clks IMX6UL_CLK_PLL2_BUS>,
<&clks IMX6UL_CLK_PLL2_PFD2>,
@@ -157,6 +157,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
+ ranges = <0 0x00900000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
};
intc: interrupt-controller@a01000 {
@@ -171,7 +174,7 @@
<0x00a06000 0x2000>;
};
- dma_apbh: dma-apbh@1804000 {
+ dma_apbh: dma-controller@1804000 {
compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x01804000 0x2000>;
interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
@@ -430,7 +433,7 @@
status = "disabled";
};
- gpt1: gpt@2098000 {
+ gpt1: timer@2098000 {
compatible = "fsl,imx6ul-gpt", "fsl,imx6sx-gpt";
reg = <0x02098000 0x4000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
@@ -704,7 +707,7 @@
reg = <0x020e4000 0x4000>;
};
- gpt2: gpt@20e8000 {
+ gpt2: timer@20e8000 {
compatible = "fsl,imx6ul-gpt", "fsl,imx6sx-gpt";
reg = <0x020e8000 0x4000>;
interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
@@ -966,7 +969,7 @@
};
csi: csi@21c4000 {
- compatible = "fsl,imx6ul-csi", "fsl,imx7-csi";
+ compatible = "fsl,imx6ul-csi";
reg = <0x021c4000 0x4000>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_CSI>;
@@ -975,7 +978,7 @@
};
lcdif: lcdif@21c8000 {
- compatible = "fsl,imx6ul-lcdif", "fsl,imx28-lcdif";
+ compatible = "fsl,imx6ul-lcdif", "fsl,imx6sx-lcdif";
reg = <0x021c8000 0x4000>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_LCDIF_PIX>,
@@ -996,7 +999,7 @@
qspi: spi@21e0000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fsl,imx6ul-qspi", "fsl,imx6sx-qspi";
+ compatible = "fsl,imx6ul-qspi";
reg = <0x021e0000 0x4000>, <0x60000000 0x10000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/imx7d-pico-hobbit.dts b/arch/arm/boot/dts/imx7d-pico-hobbit.dts
index d917dc4f2f22..6ad39dca7009 100644
--- a/arch/arm/boot/dts/imx7d-pico-hobbit.dts
+++ b/arch/arm/boot/dts/imx7d-pico-hobbit.dts
@@ -64,7 +64,7 @@
interrupt-parent = <&gpio2>;
interrupts = <7 0>;
spi-max-frequency = <1000000>;
- pendown-gpio = <&gpio2 7 0>;
+ pendown-gpio = <&gpio2 7 GPIO_ACTIVE_LOW>;
vcc-supply = <&reg_3p3v>;
ti,x-min = /bits/ 16 <0>;
ti,x-max = /bits/ 16 <4095>;
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index a97cda17e484..88e62801b82e 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -176,13 +176,8 @@
pinctrl-0 = <&pinctrl_tsc2046_pendown>;
interrupt-parent = <&gpio2>;
interrupts = <29 0>;
- pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>;
- ti,x-min = /bits/ 16 <0>;
- ti,x-max = /bits/ 16 <0>;
- ti,y-min = /bits/ 16 <0>;
- ti,y-max = /bits/ 16 <0>;
- ti,pressure-max = /bits/ 16 <0>;
- ti,x-plate-ohms = /bits/ 16 <400>;
+ pendown-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>;
+ touchscreen-max-pressure = <255>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 9c8dd32cc035..2b9d0b1bd982 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -7,6 +7,12 @@
#include <dt-bindings/reset/imx7-reset.h>
/ {
+ aliases {
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
+ usb2 = &usbh;
+ };
+
cpus {
cpu0: cpu@0 {
clock-frequency = <996000000>;
@@ -198,9 +204,6 @@
};
&ca_funnel_in_ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
port@1 {
reg = <1>;
ca_funnel_in_port1: endpoint {
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index e2e604d6ba0b..8e86841ef4e6 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -47,6 +47,8 @@
spi1 = &ecspi2;
spi2 = &ecspi3;
spi3 = &ecspi4;
+ usb0 = &usbotg1;
+ usb1 = &usbh;
};
cpus {
@@ -181,7 +183,11 @@
clock-names = "apb_pclk";
ca_funnel_in_ports: in-ports {
- port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
ca_funnel_in_port0: endpoint {
remote-endpoint = <&etm0_out_port>;
};
@@ -444,8 +450,8 @@
fsl,input-sel = <&iomuxc>;
};
- gpt1: gpt@302d0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ gpt1: timer@302d0000 {
+ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
reg = <0x302d0000 0x10000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
@@ -453,8 +459,8 @@
clock-names = "ipg", "per";
};
- gpt2: gpt@302e0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ gpt2: timer@302e0000 {
+ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
reg = <0x302e0000 0x10000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
@@ -463,8 +469,8 @@
status = "disabled";
};
- gpt3: gpt@302f0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ gpt3: timer@302f0000 {
+ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
reg = <0x302f0000 0x10000>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
@@ -473,8 +479,8 @@
status = "disabled";
};
- gpt4: gpt@30300000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ gpt4: timer@30300000 {
+ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
reg = <0x30300000 0x10000>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
@@ -504,7 +510,7 @@
mux: mux-controller {
compatible = "mmio-mux";
- #mux-control-cells = <0>;
+ #mux-control-cells = <1>;
mux-reg-masks = <0x14 0x00000010>;
};
@@ -763,7 +769,7 @@
};
lcdif: lcdif@30730000 {
- compatible = "fsl,imx7d-lcdif", "fsl,imx28-lcdif";
+ compatible = "fsl,imx7d-lcdif", "fsl,imx6sx-lcdif";
reg = <0x30730000 0x10000>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
@@ -1131,6 +1137,8 @@
<&clks IMX7D_USDHC1_ROOT_CLK>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
+ fsl,tuning-step = <2>;
+ fsl,tuning-start-tap = <20>;
status = "disabled";
};
@@ -1143,6 +1151,8 @@
<&clks IMX7D_USDHC2_ROOT_CLK>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
+ fsl,tuning-step = <2>;
+ fsl,tuning-start-tap = <20>;
status = "disabled";
};
@@ -1155,6 +1165,8 @@
<&clks IMX7D_USDHC3_ROOT_CLK>;
clock-names = "ipg", "ahb", "per";
bus-width = <4>;
+ fsl,tuning-step = <2>;
+ fsl,tuning-start-tap = <20>;
status = "disabled";
};
@@ -1190,14 +1202,13 @@
};
};
- dma_apbh: dma-apbh@33000000 {
+ dma_apbh: dma-controller@33000000 {
compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x33000000 0x2000>;
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3";
#dma-cells = <1>;
dma-channels = <4>;
clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
@@ -1206,7 +1217,7 @@
gpmi: gpmi-nand@33002000{
compatible = "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index 94d2ff9836d0..e6c5a99e0741 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -155,6 +155,7 @@
pci: pciv3@62000000 {
compatible = "arm,integrator-ap-pci", "v3,v360epc-pci";
+ device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
index 7b151acb9984..88b70ba1c8fe 100644
--- a/arch/arm/boot/dts/kirkwood-lsxl.dtsi
+++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
@@ -10,6 +10,11 @@
ocp@f1000000 {
pinctrl: pin-controller@10000 {
+ /* Non-default UART pins */
+ pmx_uart0: pmx-uart0 {
+ marvell,pins = "mpp4", "mpp5";
+ };
+
pmx_power_hdd: pmx-power-hdd {
marvell,pins = "mpp10";
marvell,function = "gpo";
@@ -213,22 +218,11 @@
&mdio {
status = "okay";
- ethphy0: ethernet-phy@0 {
- reg = <0>;
- };
-
ethphy1: ethernet-phy@8 {
reg = <8>;
};
};
-&eth0 {
- status = "okay";
- ethernet0-port@0 {
- phy-handle = <&ethphy0>;
- };
-};
-
&eth1 {
status = "okay";
ethernet1-port@0 {
diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts
index eb5291b0ee3a..e07b807b4cec 100644
--- a/arch/arm/boot/dts/moxart-uc7112lx.dts
+++ b/arch/arm/boot/dts/moxart-uc7112lx.dts
@@ -79,7 +79,7 @@
clocks = <&ref12>;
};
-&sdhci {
+&mmc {
status = "okay";
};
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index f5f070a87482..764832ddfa78 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -93,8 +93,8 @@
clock-names = "PCLK";
};
- sdhci: sdhci@98e00000 {
- compatible = "moxa,moxart-sdhci";
+ mmc: mmc@98e00000 {
+ compatible = "moxa,moxart-mmc";
reg = <0x98e00000 0x5C>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_apb>;
diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
index cdb632df152a..11ae18944195 100644
--- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi
+++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
@@ -227,7 +227,7 @@
interrupt-parent = <&gpio2>;
interrupts = <25 0>; /* gpio_57 */
- pendown-gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
+ pendown-gpio = <&gpio2 25 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
index 3decc2d78a6c..a7f99ae0c1fe 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
@@ -54,7 +54,7 @@
interrupt-parent = <&gpio1>;
interrupts = <27 0>; /* gpio_27 */
- pendown-gpio = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+ pendown-gpio = <&gpio1 27 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;
diff --git a/arch/arm/boot/dts/omap3-gta04a5one.dts b/arch/arm/boot/dts/omap3-gta04a5one.dts
index 9db9fe67cd63..95df45cc70c0 100644
--- a/arch/arm/boot/dts/omap3-gta04a5one.dts
+++ b/arch/arm/boot/dts/omap3-gta04a5one.dts
@@ -5,9 +5,11 @@
#include "omap3-gta04a5.dts"
-&omap3_pmx_core {
+/ {
model = "Goldelico GTA04A5/Letux 2804 with OneNAND";
+};
+&omap3_pmx_core {
gpmc_pins: pinmux_gpmc_pins {
pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
index c22833d4e568..fba69b3f79c2 100644
--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
@@ -311,7 +311,7 @@
interrupt-parent = <&gpio1>;
interrupts = <8 0>; /* boot6 / gpio_8 */
spi-max-frequency = <1000000>;
- pendown-gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ pendown-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>;
vcc-supply = <&reg_vcc3>;
pinctrl-names = "default";
pinctrl-0 = <&tsc2048_pins>;
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
index 185ce53de0ec..0523a369a4d7 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
@@ -149,7 +149,7 @@
interrupt-parent = <&gpio4>;
interrupts = <18 0>; /* gpio_114 */
- pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>;
+ pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
index 7fe0f9148232..d340eb722f12 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
@@ -160,7 +160,7 @@
interrupt-parent = <&gpio4>;
interrupts = <18 0>; /* gpio_114 */
- pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>;
+ pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index 150d5be42d27..4ea5656825de 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -651,7 +651,7 @@
pinctrl-0 = <&penirq_pins>;
interrupt-parent = <&gpio3>;
interrupts = <30 IRQ_TYPE_NONE>; /* GPIO_94 */
- pendown-gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>;
+ pendown-gpio = <&gpio3 30 GPIO_ACTIVE_LOW>;
vcc-supply = <&vaux4>;
ti,x-min = /bits/ 16 <0>;
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index a40fe8d49da6..73425f692774 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -656,12 +656,12 @@
/* Configure pwm clock source for timers 8 & 9 */
&timer8 {
assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
- assigned-clock-parents = <&sys_clkin_ck>;
+ assigned-clock-parents = <&sys_32k_ck>;
};
&timer9 {
assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
- assigned-clock-parents = <&sys_clkin_ck>;
+ assigned-clock-parents = <&sys_32k_ck>;
};
/*
@@ -678,6 +678,7 @@
&uart3 {
interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core 0x17c>;
+ overrun-throttle-ms = <500>;
};
&uart4 {
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index e78d3718f145..d38781025eef 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -354,7 +354,7 @@
interrupt-parent = <&gpio1>;
interrupts = <15 0>; /* gpio1_wk15 */
- pendown-gpio = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ pendown-gpio = <&gpio1 15 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 764984c95c68..cd200910ccdf 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -759,7 +759,7 @@
xoadc: xoadc@197 {
compatible = "qcom,pm8921-adc";
- reg = <197>;
+ reg = <0x197>;
interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
#address-cells = <2>;
#size-cells = <0>;
@@ -1570,7 +1570,7 @@
};
etb@1a01000 {
- compatible = "coresight-etb10", "arm,primecell";
+ compatible = "arm,coresight-etb10", "arm,primecell";
reg = <0x1a01000 0x1000>;
clocks = <&rpmcc RPM_QDSS_CLK>;
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 338256c59ca5..fc31ee980639 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -393,8 +393,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
- <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
+ ranges = <0x81000000 0x0 0x00000000 0x40200000 0x0 0x00100000>,
+ <0x82000000 0x0 0x40300000 0x40300000 0x0 0x00d00000>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 16c0da97932c..976a8a1fa953 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -451,8 +451,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x0fe00000 0x0fe00000 0 0x00100000 /* downstream I/O */
- 0x82000000 0 0x08000000 0x08000000 0 0x07e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x0fe00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x08000000 0x08000000 0x0 0x07e00000>; /* MEM */
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -502,8 +502,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x31e00000 0x31e00000 0 0x00100000 /* downstream I/O */
- 0x82000000 0 0x2e000000 0x2e000000 0 0x03e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x31e00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x2e000000 0x2e000000 0x0 0x03e00000>; /* MEM */
interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
@@ -553,8 +553,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x35e00000 0x35e00000 0 0x00100000 /* downstream I/O */
- 0x82000000 0 0x32000000 0x32000000 0 0x03e00000>; /* non-prefetchable memory */
+ ranges = <0x81000000 0x0 0x00000000 0x35e00000 0x0 0x00010000 /* I/O */
+ 0x82000000 0x0 0x32000000 0x32000000 0x0 0x03e00000>; /* MEM */
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 356e9535f7a6..8b3eb93ec451 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -82,14 +82,12 @@
};
};
- regulators {
- vsdcc_fixed: vsdcc-regulator {
- compatible = "regulator-fixed";
- regulator-name = "SDCC Power";
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <2700000>;
- regulator-always-on;
- };
+ vsdcc_fixed: vsdcc-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "SDCC Power";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ regulator-always-on;
};
soc: soc {
@@ -323,6 +321,7 @@
pmicgpio: gpio@150 {
compatible = "qcom,pm8018-gpio", "qcom,ssbi-gpio";
+ reg = <0x150>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-controller;
diff --git a/arch/arm/boot/dts/qcom-pm8841.dtsi b/arch/arm/boot/dts/qcom-pm8841.dtsi
index 2fd59c440903..c73e5b149ac5 100644
--- a/arch/arm/boot/dts/qcom-pm8841.dtsi
+++ b/arch/arm/boot/dts/qcom-pm8841.dtsi
@@ -25,6 +25,7 @@
compatible = "qcom,spmi-temp-alarm";
reg = <0x2400>;
interrupts = <4 0x24 0 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <0>;
};
};
diff --git a/arch/arm/boot/dts/rk3036-evb.dts b/arch/arm/boot/dts/rk3036-evb.dts
index 2a7e6624efb9..ea23ba98625e 100644
--- a/arch/arm/boot/dts/rk3036-evb.dts
+++ b/arch/arm/boot/dts/rk3036-evb.dts
@@ -31,7 +31,7 @@
&i2c1 {
status = "okay";
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index d282a7b638d8..cc2d596da7d4 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -337,12 +337,20 @@
pinctrl-0 = <&hdmi_ctl>;
status = "disabled";
- hdmi_in: port {
+ ports {
#address-cells = <1>;
#size-cells = <0>;
- hdmi_in_vop: endpoint@0 {
+
+ hdmi_in: port@0 {
reg = <0>;
- remote-endpoint = <&vop_out_hdmi>;
+
+ hdmi_in_vop: endpoint {
+ remote-endpoint = <&vop_out_hdmi>;
+ };
+ };
+
+ hdmi_out: port@1 {
+ reg = <1>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index c9a7f5409960..f8e51ca3ee00 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -67,7 +67,7 @@
#sound-dai-cells = <0>;
};
- ir_recv: gpio-ir-receiver {
+ ir_recv: ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index ee8a24a0e3cb..5e8ba80d7b4f 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -404,7 +404,7 @@
rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
};
- lcdc1_rgb24: ldcd1-rgb24 {
+ lcdc1_rgb24: lcdc1-rgb24 {
rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
<2 RK_PA1 1 &pcfg_pull_none>,
<2 RK_PA2 1 &pcfg_pull_none>,
@@ -632,7 +632,6 @@
&global_timer {
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
- status = "disabled";
};
&local_timer {
diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts
index 80080767c365..9ac40c100e3f 100644
--- a/arch/arm/boot/dts/rk3288-evb-act8846.dts
+++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts
@@ -53,7 +53,7 @@
vin-supply = <&vcc_sys>;
};
- hym8563@51 {
+ rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index 5e0a19004e46..8d418881166f 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -233,7 +233,7 @@
vin-supply = <&vcc_sys>;
};
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index c41d012c8850..bf8f3f9bb4d5 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -145,7 +145,7 @@
vin-supply = <&vcc_sys>;
};
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts
index cdcdc921ee09..7220de126635 100644
--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
+++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
@@ -165,7 +165,7 @@
};
&i2c0 {
- hym8563: hym8563@51 {
+ hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 7dcafd0833ba..36f943a3f3ad 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -942,7 +942,7 @@
status = "disabled";
};
- spdif: sound@ff88b0000 {
+ spdif: sound@ff8b0000 {
compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
reg = <0x0 0xff8b0000 0x0 0x10000>;
#sound-dai-cells = <0>;
@@ -1188,6 +1188,7 @@
clock-names = "dp", "pclk";
phys = <&edp_phy>;
phy-names = "dp";
+ power-domains = <&power RK3288_PD_VIO>;
resets = <&cru SRST_EDP>;
reset-names = "dp";
rockchip,grf = <&grf>;
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index bce0b05ef7bf..0580eea90fbc 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -108,6 +108,13 @@
reg = <0x1013c200 0x20>;
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
clocks = <&cru CORE_PERI>;
+ status = "disabled";
+ /* The clock source and the sched_clock provided by the arm_global_timer
+ * on Rockchip rk3066a/rk3188 are quite unstable because their rates
+ * depend on the CPU frequency.
+ * Keep the arm_global_timer disabled in order to have the
+ * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
+ */
};
local_timer: local-timer@1013c600 {
diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts
index 1aeac33b0d34..0b07b3c31960 100644
--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
+++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
@@ -28,29 +28,21 @@
bootargs = "console=ttySAC0,115200n8 earlyprintk rootwait root=/dev/mmcblk0p1";
};
- clocks {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- fin_pll: oscillator@0 {
- compatible = "fixed-clock";
- reg = <0>;
- clock-frequency = <12000000>;
- clock-output-names = "fin_pll";
- #clock-cells = <0>;
- };
+ fin_pll: oscillator-0 {
+ compatible = "fixed-clock";
+ clock-frequency = <12000000>;
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ };
- xusbxti: oscillator@1 {
- compatible = "fixed-clock";
- reg = <1>;
- clock-output-names = "xusbxti";
- clock-frequency = <48000000>;
- #clock-cells = <0>;
- };
+ xusbxti: oscillator-1 {
+ compatible = "fixed-clock";
+ clock-output-names = "xusbxti";
+ clock-frequency = <48000000>;
+ #clock-cells = <0>;
};
- srom-cs1@18000000 {
+ srom-cs1-bus@18000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -59,7 +51,7 @@
ethernet@18000000 {
compatible = "davicom,dm9000";
- reg = <0x18000000 0x2 0x18000004 0x2>;
+ reg = <0x18000000 0x2>, <0x18000004 0x2>;
interrupt-parent = <&gpn>;
interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
davicom,no-eeprom;
@@ -201,12 +193,12 @@
};
&pinctrl0 {
- gpio_leds: gpio-leds {
+ gpio_leds: gpio-leds-pins {
samsung,pins = "gpk-4", "gpk-5", "gpk-6", "gpk-7";
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- gpio_keys: gpio-keys {
+ gpio_keys: gpio-keys-pins {
samsung,pins = "gpn-0", "gpn-1", "gpn-2", "gpn-3",
"gpn-4", "gpn-5", "gpl-11", "gpl-12";
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
diff --git a/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi b/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi
index 8e9594d64b57..0a3186d57cb5 100644
--- a/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi
+++ b/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi
@@ -16,111 +16,111 @@
* Pin banks
*/
- gpa: gpa {
+ gpa: gpa-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpb: gpb {
+ gpb: gpb-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpc: gpc {
+ gpc: gpc-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpd: gpd {
+ gpd: gpd-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpe: gpe {
+ gpe: gpe-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
};
- gpf: gpf {
+ gpf: gpf-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpg: gpg {
+ gpg: gpg-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gph: gph {
+ gph: gph-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpi: gpi {
+ gpi: gpi-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
};
- gpj: gpj {
+ gpj: gpj-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
};
- gpk: gpk {
+ gpk: gpk-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
};
- gpl: gpl {
+ gpl: gpl-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpm: gpm {
+ gpm: gpm-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpn: gpn {
+ gpn: gpn-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpo: gpo {
+ gpo: gpo-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpp: gpp {
+ gpp: gpp-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
- gpq: gpq {
+ gpq: gpq-gpio-bank {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -131,225 +131,225 @@
* Pin groups
*/
- uart0_data: uart0-data {
+ uart0_data: uart0-data-pins {
samsung,pins = "gpa-0", "gpa-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- uart0_fctl: uart0-fctl {
+ uart0_fctl: uart0-fctl-pins {
samsung,pins = "gpa-2", "gpa-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- uart1_data: uart1-data {
+ uart1_data: uart1-data-pins {
samsung,pins = "gpa-4", "gpa-5";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- uart1_fctl: uart1-fctl {
+ uart1_fctl: uart1-fctl-pins {
samsung,pins = "gpa-6", "gpa-7";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- uart2_data: uart2-data {
+ uart2_data: uart2-data-pins {
samsung,pins = "gpb-0", "gpb-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- uart3_data: uart3-data {
+ uart3_data: uart3-data-pins {
samsung,pins = "gpb-2", "gpb-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- ext_dma_0: ext-dma-0 {
+ ext_dma_0: ext-dma-0-pins {
samsung,pins = "gpb-0", "gpb-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- ext_dma_1: ext-dma-1 {
+ ext_dma_1: ext-dma-1-pins {
samsung,pins = "gpb-2", "gpb-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- irda_data_0: irda-data-0 {
+ irda_data_0: irda-data-0-pins {
samsung,pins = "gpb-0", "gpb-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- irda_data_1: irda-data-1 {
+ irda_data_1: irda-data-1-pins {
samsung,pins = "gpb-2", "gpb-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- irda_sdbw: irda-sdbw {
+ irda_sdbw: irda-sdbw-pins {
samsung,pins = "gpb-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- i2c0_bus: i2c0-bus {
+ i2c0_bus: i2c0-bus-pins {
samsung,pins = "gpb-5", "gpb-6";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
- i2c1_bus: i2c1-bus {
+ i2c1_bus: i2c1-bus-pins {
/* S3C6410-only */
samsung,pins = "gpb-2", "gpb-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_6>;
samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
- spi0_bus: spi0-bus {
+ spi0_bus: spi0-bus-pins {
samsung,pins = "gpc-0", "gpc-1", "gpc-2";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
- spi0_cs: spi0-cs {
+ spi0_cs: spi0-cs-pins {
samsung,pins = "gpc-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- spi1_bus: spi1-bus {
+ spi1_bus: spi1-bus-pins {
samsung,pins = "gpc-4", "gpc-5", "gpc-6";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
- spi1_cs: spi1-cs {
+ spi1_cs: spi1-cs-pins {
samsung,pins = "gpc-7";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd0_cmd: sd0-cmd {
+ sd0_cmd: sd0-cmd-pins {
samsung,pins = "gpg-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd0_clk: sd0-clk {
+ sd0_clk: sd0-clk-pins {
samsung,pins = "gpg-0";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd0_bus1: sd0-bus1 {
+ sd0_bus1: sd0-bus1-pins {
samsung,pins = "gpg-2";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd0_bus4: sd0-bus4 {
+ sd0_bus4: sd0-bus4-pins {
samsung,pins = "gpg-2", "gpg-3", "gpg-4", "gpg-5";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd0_cd: sd0-cd {
+ sd0_cd: sd0-cd-pins {
samsung,pins = "gpg-6";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
- sd1_cmd: sd1-cmd {
+ sd1_cmd: sd1-cmd-pins {
samsung,pins = "gph-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd1_clk: sd1-clk {
+ sd1_clk: sd1-clk-pins {
samsung,pins = "gph-0";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd1_bus1: sd1-bus1 {
+ sd1_bus1: sd1-bus1-pins {
samsung,pins = "gph-2";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd1_bus4: sd1-bus4 {
+ sd1_bus4: sd1-bus4-pins {
samsung,pins = "gph-2", "gph-3", "gph-4", "gph-5";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd1_bus8: sd1-bus8 {
+ sd1_bus8: sd1-bus8-pins {
samsung,pins = "gph-2", "gph-3", "gph-4", "gph-5",
"gph-6", "gph-7", "gph-8", "gph-9";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd1_cd: sd1-cd {
+ sd1_cd: sd1-cd-pins {
samsung,pins = "gpg-6";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
};
- sd2_cmd: sd2-cmd {
+ sd2_cmd: sd2-cmd-pins {
samsung,pins = "gpc-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd2_clk: sd2-clk {
+ sd2_clk: sd2-clk-pins {
samsung,pins = "gpc-5";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd2_bus1: sd2-bus1 {
+ sd2_bus1: sd2-bus1-pins {
samsung,pins = "gph-6";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- sd2_bus4: sd2-bus4 {
+ sd2_bus4: sd2-bus4-pins {
samsung,pins = "gph-6", "gph-7", "gph-8", "gph-9";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- i2s0_bus: i2s0-bus {
+ i2s0_bus: i2s0-bus-pins {
samsung,pins = "gpd-0", "gpd-2", "gpd-3", "gpd-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- i2s0_cdclk: i2s0-cdclk {
+ i2s0_cdclk: i2s0-cdclk-pins {
samsung,pins = "gpd-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- i2s1_bus: i2s1-bus {
+ i2s1_bus: i2s1-bus-pins {
samsung,pins = "gpe-0", "gpe-2", "gpe-3", "gpe-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- i2s1_cdclk: i2s1-cdclk {
+ i2s1_cdclk: i2s1-cdclk-pins {
samsung,pins = "gpe-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- i2s2_bus: i2s2-bus {
+ i2s2_bus: i2s2-bus-pins {
/* S3C6410-only */
samsung,pins = "gpc-4", "gpc-5", "gpc-6", "gph-6",
"gph-8", "gph-9";
@@ -357,50 +357,50 @@
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- i2s2_cdclk: i2s2-cdclk {
+ i2s2_cdclk: i2s2-cdclk-pins {
/* S3C6410-only */
samsung,pins = "gph-7";
samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- pcm0_bus: pcm0-bus {
+ pcm0_bus: pcm0-bus-pins {
samsung,pins = "gpd-0", "gpd-2", "gpd-3", "gpd-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- pcm0_extclk: pcm0-extclk {
+ pcm0_extclk: pcm0-extclk-pins {
samsung,pins = "gpd-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- pcm1_bus: pcm1-bus {
+ pcm1_bus: pcm1-bus-pins {
samsung,pins = "gpe-0", "gpe-2", "gpe-3", "gpe-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- pcm1_extclk: pcm1-extclk {
+ pcm1_extclk: pcm1-extclk-pins {
samsung,pins = "gpe-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- ac97_bus_0: ac97-bus-0 {
+ ac97_bus_0: ac97-bus-0-pins {
samsung,pins = "gpd-0", "gpd-1", "gpd-2", "gpd-3", "gpd-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- ac97_bus_1: ac97-bus-1 {
+ ac97_bus_1: ac97-bus-1-pins {
samsung,pins = "gpe-0", "gpe-1", "gpe-2", "gpe-3", "gpe-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- cam_port: cam-port {
+ cam_port: cam-port-pins {
samsung,pins = "gpf-0", "gpf-1", "gpf-2", "gpf-4",
"gpf-5", "gpf-6", "gpf-7", "gpf-8",
"gpf-9", "gpf-10", "gpf-11", "gpf-12";
@@ -408,242 +408,242 @@
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- cam_rst: cam-rst {
+ cam_rst: cam-rst-pins {
samsung,pins = "gpf-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- cam_field: cam-field {
+ cam_field: cam-field-pins {
/* S3C6410-only */
samsung,pins = "gpb-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- pwm_extclk: pwm-extclk {
+ pwm_extclk: pwm-extclk-pins {
samsung,pins = "gpf-13";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- pwm0_out: pwm0-out {
+ pwm0_out: pwm0-out-pins {
samsung,pins = "gpf-14";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- pwm1_out: pwm1-out {
+ pwm1_out: pwm1-out-pins {
samsung,pins = "gpf-15";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- clkout0: clkout-0 {
+ clkout0: clkout-0-pins {
samsung,pins = "gpf-14";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col0_0: keypad-col0-0 {
+ keypad_col0_0: keypad-col0-0-pins {
samsung,pins = "gph-0";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col1_0: keypad-col1-0 {
+ keypad_col1_0: keypad-col1-0-pins {
samsung,pins = "gph-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col2_0: keypad-col2-0 {
+ keypad_col2_0: keypad-col2-0-pins {
samsung,pins = "gph-2";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col3_0: keypad-col3-0 {
+ keypad_col3_0: keypad-col3-0-pins {
samsung,pins = "gph-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col4_0: keypad-col4-0 {
+ keypad_col4_0: keypad-col4-0-pins {
samsung,pins = "gph-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col5_0: keypad-col5-0 {
+ keypad_col5_0: keypad-col5-0-pins {
samsung,pins = "gph-5";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col6_0: keypad-col6-0 {
+ keypad_col6_0: keypad-col6-0-pins {
samsung,pins = "gph-6";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col7_0: keypad-col7-0 {
+ keypad_col7_0: keypad-col7-0-pins {
samsung,pins = "gph-7";
samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col0_1: keypad-col0-1 {
+ keypad_col0_1: keypad-col0-1-pins {
samsung,pins = "gpl-0";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col1_1: keypad-col1-1 {
+ keypad_col1_1: keypad-col1-1-pins {
samsung,pins = "gpl-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col2_1: keypad-col2-1 {
+ keypad_col2_1: keypad-col2-1-pins {
samsung,pins = "gpl-2";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col3_1: keypad-col3-1 {
+ keypad_col3_1: keypad-col3-1-pins {
samsung,pins = "gpl-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col4_1: keypad-col4-1 {
+ keypad_col4_1: keypad-col4-1-pins {
samsung,pins = "gpl-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col5_1: keypad-col5-1 {
+ keypad_col5_1: keypad-col5-1-pins {
samsung,pins = "gpl-5";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col6_1: keypad-col6-1 {
+ keypad_col6_1: keypad-col6-1-pins {
samsung,pins = "gpl-6";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_col7_1: keypad-col7-1 {
+ keypad_col7_1: keypad-col7-1-pins {
samsung,pins = "gpl-7";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row0_0: keypad-row0-0 {
+ keypad_row0_0: keypad-row0-0-pins {
samsung,pins = "gpk-8";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row1_0: keypad-row1-0 {
+ keypad_row1_0: keypad-row1-0-pins {
samsung,pins = "gpk-9";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row2_0: keypad-row2-0 {
+ keypad_row2_0: keypad-row2-0-pins {
samsung,pins = "gpk-10";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row3_0: keypad-row3-0 {
+ keypad_row3_0: keypad-row3-0-pins {
samsung,pins = "gpk-11";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row4_0: keypad-row4-0 {
+ keypad_row4_0: keypad-row4-0-pins {
samsung,pins = "gpk-12";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row5_0: keypad-row5-0 {
+ keypad_row5_0: keypad-row5-0-pins {
samsung,pins = "gpk-13";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row6_0: keypad-row6-0 {
+ keypad_row6_0: keypad-row6-0-pins {
samsung,pins = "gpk-14";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row7_0: keypad-row7-0 {
+ keypad_row7_0: keypad-row7-0-pins {
samsung,pins = "gpk-15";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row0_1: keypad-row0-1 {
+ keypad_row0_1: keypad-row0-1-pins {
samsung,pins = "gpn-0";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row1_1: keypad-row1-1 {
+ keypad_row1_1: keypad-row1-1-pins {
samsung,pins = "gpn-1";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row2_1: keypad-row2-1 {
+ keypad_row2_1: keypad-row2-1-pins {
samsung,pins = "gpn-2";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row3_1: keypad-row3-1 {
+ keypad_row3_1: keypad-row3-1-pins {
samsung,pins = "gpn-3";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row4_1: keypad-row4-1 {
+ keypad_row4_1: keypad-row4-1-pins {
samsung,pins = "gpn-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row5_1: keypad-row5-1 {
+ keypad_row5_1: keypad-row5-1-pins {
samsung,pins = "gpn-5";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row6_1: keypad-row6-1 {
+ keypad_row6_1: keypad-row6-1-pins {
samsung,pins = "gpn-6";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- keypad_row7_1: keypad-row7-1 {
+ keypad_row7_1: keypad-row7-1-pins {
samsung,pins = "gpn-7";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- lcd_ctrl: lcd-ctrl {
+ lcd_ctrl: lcd-ctrl-pins {
samsung,pins = "gpj-8", "gpj-9", "gpj-10", "gpj-11";
samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- lcd_data16: lcd-data-width16 {
+ lcd_data16: lcd-data-width16-pins {
samsung,pins = "gpi-3", "gpi-4", "gpi-5", "gpi-6",
"gpi-7", "gpi-10", "gpi-11", "gpi-12",
"gpi-13", "gpi-14", "gpi-15", "gpj-3",
@@ -652,7 +652,7 @@
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- lcd_data18: lcd-data-width18 {
+ lcd_data18: lcd-data-width18-pins {
samsung,pins = "gpi-2", "gpi-3", "gpi-4", "gpi-5",
"gpi-6", "gpi-7", "gpi-10", "gpi-11",
"gpi-12", "gpi-13", "gpi-14", "gpi-15",
@@ -662,7 +662,7 @@
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- lcd_data24: lcd-data-width24 {
+ lcd_data24: lcd-data-width24-pins {
samsung,pins = "gpi-0", "gpi-1", "gpi-2", "gpi-3",
"gpi-4", "gpi-5", "gpi-6", "gpi-7",
"gpi-8", "gpi-9", "gpi-10", "gpi-11",
@@ -673,7 +673,7 @@
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
- hsi_bus: hsi-bus {
+ hsi_bus: hsi-bus-pins {
samsung,pins = "gpk-0", "gpk-1", "gpk-2", "gpk-3",
"gpk-4", "gpk-5", "gpk-6", "gpk-7";
samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
diff --git a/arch/arm/boot/dts/s5pv210-smdkv210.dts b/arch/arm/boot/dts/s5pv210-smdkv210.dts
index 84b38f185199..53a841ecf7a4 100644
--- a/arch/arm/boot/dts/s5pv210-smdkv210.dts
+++ b/arch/arm/boot/dts/s5pv210-smdkv210.dts
@@ -15,6 +15,7 @@
*/
/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/input/input.h>
#include "s5pv210.dtsi"
@@ -31,11 +32,18 @@
reg = <0x20000000 0x40000000>;
};
- ethernet@18000000 {
+ pmic_ap_clk: clock-0 {
+ /* Workaround for missing PMIC and its clock */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ ethernet@a8000000 {
compatible = "davicom,dm9000";
- reg = <0xA8000000 0x2 0xA8000002 0x2>;
+ reg = <0xa8000000 0x2>, <0xa8000002 0x2>;
interrupt-parent = <&gph1>;
- interrupts = <1 4>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
local-mac-address = [00 00 de ad be ef];
davicom,no-eeprom;
};
@@ -47,6 +55,14 @@
default-brightness-level = <6>;
pinctrl-names = "default";
pinctrl-0 = <&pwm3_out>;
+ power-supply = <&dc5v_reg>;
+ };
+
+ dc5v_reg: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "DC5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
};
};
@@ -147,6 +163,8 @@
&rtc {
status = "okay";
+ clocks = <&clocks CLK_RTC>, <&pmic_ap_clk>;
+ clock-names = "rtc", "rtc_src";
};
&sdhci0 {
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index 61822afa30ab..cd8f6e7a7f7f 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -571,7 +571,7 @@
interrupts = <29>;
clocks = <&clocks CLK_CSIS>,
<&clocks SCLK_CSIS>;
- clock-names = "clk_csis",
+ clock-names = "csis",
"sclk_csis";
bus-width = <4>;
status = "disabled";
diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts
index 367ba48aac3e..5c562fb4886f 100644
--- a/arch/arm/boot/dts/spear320-hmi.dts
+++ b/arch/arm/boot/dts/spear320-hmi.dts
@@ -242,7 +242,7 @@
irq-trigger = <0x1>;
stmpegpio: stmpe-gpio {
- compatible = "stmpe,gpio";
+ compatible = "st,stmpe-gpio";
reg = <0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index fd41243a0b2c..9d5a04a46b14 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -47,7 +47,7 @@
compatible = "arm,pl110", "arm,primecell";
reg = <0xfc200000 0x1000>;
interrupt-parent = <&vic1>;
- interrupts = <12>;
+ interrupts = <13>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
index 9314128df185..639a6b65749f 100644
--- a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
@@ -284,6 +284,88 @@
slew-rate = <2>;
};
};
+
+ can1_pins_a: can1-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 12, AF9)>; /* CAN1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 11, AF9)>; /* CAN1_RX */
+ bias-pull-up;
+ };
+ };
+
+ can1_pins_b: can1-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 9, AF9)>; /* CAN1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 8, AF9)>; /* CAN1_RX */
+ bias-pull-up;
+ };
+ };
+
+ can1_pins_c: can1-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('D', 1, AF9)>; /* CAN1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 0, AF9)>; /* CAN1_RX */
+ bias-pull-up;
+
+ };
+ };
+
+ can1_pins_d: can1-3 {
+ pins1 {
+ pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('H', 14, AF9)>; /* CAN1_RX */
+ bias-pull-up;
+
+ };
+ };
+
+ can2_pins_a: can2-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 6, AF9)>; /* CAN2_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 5, AF9)>; /* CAN2_RX */
+ bias-pull-up;
+ };
+ };
+
+ can2_pins_b: can2-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 13, AF9)>; /* CAN2_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 12, AF9)>; /* CAN2_RX */
+ bias-pull-up;
+ };
+ };
+
+ can3_pins_a: can3-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 15, AF11)>; /* CAN3_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 8, AF11)>; /* CAN3_RX */
+ bias-pull-up;
+ };
+ };
+
+ can3_pins_b: can3-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 4, AF11)>; /* CAN3_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 3, AF11)>; /* CAN3_RX */
+ bias-pull-up;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
index 4c6704e4c57e..74d5732c412b 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
@@ -62,6 +62,30 @@
states = <1100000 0>, <1300000 1>;
};
+ reg_vcc_dram: vcc-dram {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-dram";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&r_pio 0 9 GPIO_ACTIVE_HIGH>; /* PL9 */
+ vin-supply = <&reg_vcc5v0>;
+ };
+
+ reg_vcc1v2: vcc1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
+ vin-supply = <&reg_vcc5v0>;
+ };
+
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 60a588ce45e1..049ca941b479 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -585,8 +585,8 @@
compatible = "socionext,uniphier-dwc3", "snps,dwc3";
status = "disabled";
reg = <0x65a00000 0xcd00>;
- interrupt-names = "host", "peripheral";
- interrupts = <0 134 4>, <0 135 4>;
+ interrupt-names = "dwc_usb3";
+ interrupts = <0 134 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>, <&pinctrl_usb2>;
clock-names = "ref", "bus_early", "suspend";
@@ -681,8 +681,8 @@
compatible = "socionext,uniphier-dwc3", "snps,dwc3";
status = "disabled";
reg = <0x65c00000 0xcd00>;
- interrupt-names = "host", "peripheral";
- interrupts = <0 137 4>, <0 138 4>;
+ interrupt-names = "dwc_usb3";
+ interrupts = <0 137 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb3>;
clock-names = "ref", "bus_early", "suspend";
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index d5b47d526f9e..7f1edc3ba6a1 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -132,6 +132,7 @@
reg = <0x2c0f0000 0x1000>;
interrupts = <0 84 4>;
cache-level = <2>;
+ cache-unified;
};
pmu {
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index ca6425ad794c..f23ebe0558c3 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -1,6 +1,9 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Xilinx Zynq 7000 DTSI
+ * Describes the hardware common to all Zynq 7000-based boards.
+ *
+ * Copyright (C) 2011 - 2015 Xilinx
*/
/ {
@@ -59,7 +62,42 @@
regulator-always-on;
};
+ replicator {
+ compatible = "arm,coresight-static-replicator";
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&etb_in_port>;
+ };
+ };
+ };
+ in-ports {
+ /* replicator input port */
+ port {
+ replicator_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel_out_port>;
+ };
+ };
+ };
+ };
+
amba: amba {
+ u-boot,dm-pre-reloc;
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -155,6 +193,13 @@
reg = <0xf8006000 0x1000>;
};
+ ocmc: ocmc@f800c000 {
+ compatible = "xlnx,zynq-ocmc-1.0";
+ interrupt-parent = <&intc>;
+ interrupts = <0 3 4>;
+ reg = <0xf800c000 0x1000>;
+ };
+
uart0: serial@e0000000 {
compatible = "xlnx,xuartps", "cdns,uart-r1p8";
status = "disabled";
@@ -197,6 +242,45 @@
#size-cells = <0>;
};
+ qspi: spi@e000d000 {
+ clock-names = "ref_clk", "pclk";
+ clocks = <&clkc 10>, <&clkc 43>;
+ compatible = "xlnx,zynq-qspi-1.0";
+ status = "disabled";
+ interrupt-parent = <&intc>;
+ interrupts = <0 19 4>;
+ reg = <0xe000d000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ smcc: memory-controller@e000e000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ clock-names = "memclk", "apb_pclk";
+ clocks = <&clkc 11>, <&clkc 44>;
+ compatible = "arm,pl353-smc-r2p1", "arm,primecell";
+ interrupt-parent = <&intc>;
+ interrupts = <0 18 4>;
+ ranges ;
+ reg = <0xe000e000 0x1000>;
+ nand0: flash@e1000000 {
+ status = "disabled";
+ compatible = "arm,pl353-nand-r2p1";
+ reg = <0xe1000000 0x1000000>;
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ };
+ nor0: flash@e2000000 {
+ status = "disabled";
+ compatible = "cfi-flash";
+ reg = <0xe2000000 0x2000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
gem0: ethernet@e000b000 {
compatible = "cdns,zynq-gem", "cdns,gem";
reg = <0xe000b000 0x1000>;
@@ -240,15 +324,17 @@
};
slcr: slcr@f8000000 {
+ u-boot,dm-pre-reloc;
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,zynq-slcr", "syscon", "simple-mfd";
reg = <0xF8000000 0x1000>;
ranges;
clkc: clkc@100 {
+ u-boot,dm-pre-reloc;
#clock-cells = <1>;
compatible = "xlnx,ps7-clkc";
- fclk-enable = <0>;
+ fclk-enable = <0xf>;
clock-output-names = "armpll", "ddrpll", "iopll", "cpu_6or4x",
"cpu_3or2x", "cpu_2x", "cpu_1x", "ddr2x", "ddr3x",
"dci", "lqspi", "smc", "pcap", "gem0", "gem1",
@@ -297,14 +383,19 @@
devcfg: devcfg@f8007000 {
compatible = "xlnx,zynq-devcfg-1.0";
- reg = <0xf8007000 0x100>;
interrupt-parent = <&intc>;
interrupts = <0 8 4>;
- clocks = <&clkc 12>;
- clock-names = "ref_clk";
+ reg = <0xf8007000 0x100>;
+ clocks = <&clkc 12>, <&clkc 15>, <&clkc 16>, <&clkc 17>, <&clkc 18>;
+ clock-names = "ref_clk", "fclk0", "fclk1", "fclk2", "fclk3";
syscon = <&slcr>;
};
+ efuse: efuse@f800d000 {
+ compatible = "xlnx,zynq-efuse";
+ reg = <0xf800d000 0x20>;
+ };
+
global_timer: timer@f8f00200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0xf8f00200 0x20>;
@@ -365,5 +456,128 @@
reg = <0xf8005000 0x1000>;
timeout-sec = <10>;
};
+
+ etb@f8801000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0xf8801000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+ in-ports {
+ port {
+ etb_in_port: endpoint {
+ remote-endpoint = <&replicator_out_port1>;
+ };
+ };
+ };
+ };
+
+ tpiu@f8803000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0xf8803000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+ in-ports {
+ port {
+ tpiu_in_port: endpoint {
+ remote-endpoint = <&replicator_out_port0>;
+ };
+ };
+ };
+ };
+
+ funnel@f8804000 {
+ compatible = "arm,coresight-static-funnel", "arm,primecell";
+ reg = <0xf8804000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+ /* funnel output ports */
+ out-ports {
+ port {
+ funnel_out_port: endpoint {
+ remote-endpoint =
+ <&replicator_in_port0>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel input ports */
+ port@0 {
+ reg = <0>;
+ funnel0_in_port0: endpoint {
+ remote-endpoint = <&ptm0_out_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ funnel0_in_port1: endpoint {
+ remote-endpoint = <&ptm1_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ funnel0_in_port2: endpoint {
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ funnel0_in_port3: endpoint {
+ remote-endpoint = <&itm_out_port>;
+ };
+ };
+ /* The other input ports are not connect to anything */
+ };
+ };
+
+ /* ITM is not supported by kernel, only leave device node here */
+ itm@f8805000 {
+ reg = <0xf8805000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+ out-ports {
+ port {
+ itm_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port3>;
+ };
+ };
+ };
+ };
+
+ ptm@f889c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0xf889c000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+ cpu = <&cpu0>;
+ out-ports {
+ port {
+ ptm0_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port0>;
+ };
+ };
+ };
+ };
+
+ ptm@f889d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0xf889d000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+ cpu = <&cpu1>;
+ out-ports {
+ port {
+ ptm1_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port1>;
+ };
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/zynq-cc108.dts b/arch/arm/boot/dts/zynq-cc108.dts
index 8b9ab9bba23b..64d73ecbc592 100644
--- a/arch/arm/boot/dts/zynq-cc108.dts
+++ b/arch/arm/boot/dts/zynq-cc108.dts
@@ -18,6 +18,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart0;
+ spi0 = &qspi;
};
chosen {
@@ -52,6 +53,45 @@
};
};
+&qspi {
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 { /* 16 MB */
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot-bs";
+ reg = <0x0 0x400000>; /* 4MB */
+ };
+ partition@400000 {
+ label = "qspi-linux";
+ reg = <0x400000 0x400000>; /* 4MB */
+ };
+ partition@800000 {
+ label = "qspi-rootfs";
+ reg = <0x800000 0x400000>; /* 4MB */
+ };
+ partition@c00000 {
+ label = "qspi-devicetree";
+ reg = <0xc00000 0x100000>; /* 1MB */
+ };
+ partition@d00000 {
+ label = "qspi-scratch";
+ reg = <0xd00000 0x200000>; /* 2MB */
+ };
+ partition@f00000 {
+ label = "qspi-uboot-env";
+ reg = <0xf00000 0x100000>; /* 1MB */
+ };
+ };
+};
+
&sdhci1 {
status = "okay";
broken-cd ;
@@ -59,6 +99,7 @@
};
&uart0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts
index 27cd6cb52f1b..c9940fb366ce 100644
--- a/arch/arm/boot/dts/zynq-zc702.dts
+++ b/arch/arm/boot/dts/zynq-zc702.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -14,7 +14,9 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
+ usb0 = &usb0;
};
memory@0 {
@@ -56,9 +58,12 @@
};
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -85,6 +90,8 @@
phy-handle = <&ethernet_phy>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gem0_default>;
+ phy-reset-gpio = <&gpio0 11 0>;
+ phy-reset-active-low;
ethernet_phy: ethernet-phy@7 {
reg = <7>;
@@ -100,8 +107,11 @@
&i2c0 {
status = "okay";
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio0 50 0>;
+ sda-gpios = <&gpio0 51 0>;
i2c-mux@74 {
compatible = "nxp,pca9548";
@@ -292,6 +302,19 @@
};
};
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_50_grp", "gpio0_51_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_50_grp", "gpio0_51_grp";
+ slew-rate = <0>;
+ io-standard = <1>;
+ };
+ };
+
pinctrl_sdhci0_default: sdhci0-default {
mux {
groups = "sdio0_2_grp";
@@ -380,13 +403,51 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhci0_default>;
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_default>;
diff --git a/arch/arm/boot/dts/zynq-zc706.dts b/arch/arm/boot/dts/zynq-zc706.dts
index 77943c16d33f..1a1b03a4223d 100644
--- a/arch/arm/boot/dts/zynq-zc706.dts
+++ b/arch/arm/boot/dts/zynq-zc706.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -14,6 +14,7 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -27,9 +28,12 @@
stdout-path = "serial0:115200n8";
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -303,13 +307,51 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <1>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhci0_default>;
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_default>;
@@ -322,3 +364,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0_default>;
};
+
+&watchdog0 {
+ reset-on-timeout;
+};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm010.dts b/arch/arm/boot/dts/zynq-zc770-xm010.dts
index 0dd352289a45..12d13588402e 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm010.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm010.dts
@@ -15,6 +15,7 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
spi1 = &spi1;
};
@@ -57,7 +58,41 @@
compatible = "atmel,24c02";
reg = <0x52>;
};
+};
+&qspi {
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
};
&sdhci0 {
@@ -85,6 +120,7 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm011.dts b/arch/arm/boot/dts/zynq-zc770-xm011.dts
index b7f65862c022..142e7263a177 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm011.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm011.dts
@@ -47,6 +47,47 @@
};
};
+&nand0 {
+ status = "okay";
+ arm,nand-cycle-t0 = <0x4>;
+ arm,nand-cycle-t1 = <0x4>;
+ arm,nand-cycle-t2 = <0x1>;
+ arm,nand-cycle-t3 = <0x2>;
+ arm,nand-cycle-t4 = <0x2>;
+ arm,nand-cycle-t5 = <0x2>;
+ arm,nand-cycle-t6 = <0x4>;
+
+ partition@nand-fsbl-uboot {
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@nand-linux {
+ label = "nand-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@nand-device-tree {
+ label = "nand-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@nand-rootfs {
+ label = "nand-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@nand-bitstream {
+ label = "nand-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+};
+
+&smcc {
+ status = "okay";
+ arm,addr25 = <0x0>;
+ arm,nor-chip-sel0 = <0x0>;
+ arm,nor-chip-sel1 = <0x0>;
+ arm,sram-chip-sel0 = <0x0>;
+ arm,sram-chip-sel1 = <0x0>;
+};
+
&spi0 {
status = "okay";
num-cs = <4>;
@@ -54,6 +95,7 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm012.dts b/arch/arm/boot/dts/zynq-zc770-xm012.dts
index d2359b789eb8..e0e5980200cb 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm012.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm012.dts
@@ -53,6 +53,47 @@
};
};
+&nor0 {
+ status = "okay";
+ bank-width = <1>;
+ xlnx,sram-cycle-t0 = <0xb>;
+ xlnx,sram-cycle-t1 = <0xb>;
+ xlnx,sram-cycle-t2 = <0x4>;
+ xlnx,sram-cycle-t3 = <0x4>;
+ xlnx,sram-cycle-t4 = <0x3>;
+ xlnx,sram-cycle-t5 = <0x3>;
+ xlnx,sram-cycle-t6 = <0x2>;
+ partition@nor-fsbl-uboot {
+ label = "nor-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@nor-linux {
+ label = "nor-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@nor-device-tree {
+ label = "nor-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@nor-rootfs {
+ label = "nor-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@nor-bitstream {
+ label = "nor-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+};
+
+&smcc {
+ status = "okay";
+ arm,addr25 = <0x1>;
+ arm,nor-chip-sel0 = <0x1>;
+ arm,nor-chip-sel1 = <0x0>;
+ arm,sram-chip-sel0 = <0x0>;
+ arm,sram-chip-sel1 = <0x0>;
+};
+
&spi1 {
status = "okay";
num-cs = <4>;
@@ -60,5 +101,6 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm013.dts b/arch/arm/boot/dts/zynq-zc770-xm013.dts
index 4ae2c85df3a0..d6c5a7b1baf2 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm013.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm013.dts
@@ -15,6 +15,7 @@
ethernet0 = &gem1;
i2c0 = &i2c1;
serial0 = &uart0;
+ spi0 = &qspi;
spi1 = &spi0;
};
@@ -58,6 +59,41 @@
};
};
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&spi0 {
status = "okay";
num-cs = <4>;
@@ -74,5 +110,6 @@
};
&uart0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zed.dts b/arch/arm/boot/dts/zynq-zed.dts
index 6a5a93aa6552..b9df9b30fc8d 100644
--- a/arch/arm/boot/dts/zynq-zed.dts
+++ b/arch/arm/boot/dts/zynq-zed.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -13,6 +13,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -26,9 +27,12 @@
stdout-path = "serial0:115200n8";
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -47,11 +51,50 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "spansion,s25fl256s1", "jedec,spi-nor";
+ reg = <0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zturn.dts b/arch/arm/boot/dts/zynq-zturn.dts
index 5ec616ebca08..b38704657960 100644
--- a/arch/arm/boot/dts/zynq-zturn.dts
+++ b/arch/arm/boot/dts/zynq-zturn.dts
@@ -54,7 +54,7 @@
label = "K1";
gpios = <&gpio0 0x32 0x1>;
linux,code = <0x66>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
diff --git a/arch/arm/boot/dts/zynq-zybo.dts b/arch/arm/boot/dts/zynq-zybo.dts
index 755f6f109d5a..0ac54ebbdc8b 100644
--- a/arch/arm/boot/dts/zynq-zybo.dts
+++ b/arch/arm/boot/dts/zynq-zybo.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -13,6 +13,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -48,11 +49,18 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/configs/xilinx_zynq_defconfig b/arch/arm/configs/xilinx_zynq_defconfig
new file mode 100644
index 000000000000..b4332be480ef
--- /dev/null
+++ b/arch/arm/configs/xilinx_zynq_defconfig
@@ -0,0 +1,240 @@
+CONFIG_LOCALVERSION="-xilinx"
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_BUG is not set
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_ZYNQ=y
+CONFIG_PL310_ERRATA_588369=y
+CONFIG_PL310_ERRATA_727915=y
+CONFIG_PL310_ERRATA_769419=y
+CONFIG_ARM_ERRATA_754322=y
+CONFIG_ARM_ERRATA_754327=y
+CONFIG_ARM_ERRATA_764369=y
+CONFIG_ARM_ERRATA_775420=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_XILINX=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_ZYNQ_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PL353=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_XILINX_TRAFGEN=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_NETDEVICES=y
+CONFIG_MACB=y
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+CONFIG_E1000E=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MARVELL_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_INPUT_SPARSEKMAP=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQ_QSPI=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_UCD9000=y
+CONFIG_SENSORS_UCD9200=y
+CONFIG_THERMAL=y
+CONFIG_CPU_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ADV7604=y
+CONFIG_DRM=y
+CONFIG_DRM_XILINX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_ADI=y
+CONFIG_SND_SOC_ADI_AXI_I2S=y
+CONFIG_SND_SOC_ADI_AXI_SPDIF=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_STORAGE=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_ZERO=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PCF8563=y
+CONFIG_PL330_DMA=y
+CONFIG_XILINX_DMA_ENGINES=y
+CONFIG_XILINX_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=y
+CONFIG_UIO_XILINX_APM=y
+CONFIG_COMMON_CLK_SI570=y
+CONFIG_REMOTEPROC=y
+CONFIG_ZYNQ_REMOTEPROC=m
+CONFIG_MEMORY=y
+CONFIG_IIO=y
+CONFIG_XILINX_XADC=y
+CONFIG_RAS=y
+CONFIG_FPGA=y
+CONFIG_FPGA_MGR_ZYNQ_FPGA=y
+CONFIG_FPGA_MGR_ZYNQ_AFI_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_EXT3_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index 215497f011f2..cb3be0c15a04 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -25,8 +25,8 @@
#include "sha256_glue.h"
-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
- unsigned int num_blks);
+asmlinkage void sha256_block_data_order(struct sha256_state *state,
+ const u8 *data, int num_blks);
int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -34,23 +34,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
/* make sure casting to sha256_block_fn() is safe */
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
- return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ return sha256_base_do_update(desc, data, len, sha256_block_data_order);
}
EXPORT_SYMBOL(crypto_sha256_arm_update);
static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out)
{
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc, sha256_block_data_order);
return sha256_base_finish(desc, out);
}
int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_update(desc, data, len, sha256_block_data_order);
return crypto_sha256_arm_final(desc, out);
}
EXPORT_SYMBOL(crypto_sha256_arm_finup);
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 8775aa42bbbe..1a16b98ec108 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -25,27 +25,25 @@ MODULE_ALIAS_CRYPTO("sha512");
MODULE_ALIAS_CRYPTO("sha384-arm");
MODULE_ALIAS_CRYPTO("sha512-arm");
-asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
+asmlinkage void sha512_block_data_order(struct sha512_state *state,
+ u8 const *src, int blocks);
int sha512_arm_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ return sha512_base_do_update(desc, data, len, sha512_block_data_order);
}
static int sha512_arm_final(struct shash_desc *desc, u8 *out)
{
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc, sha512_block_data_order);
return sha512_base_finish(desc, out);
}
int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_update(desc, data, len, sha512_block_data_order);
return sha512_arm_final(desc, out);
}
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
index 97a312ba0840..fe385551edec 100644
--- a/arch/arm/include/asm/bugs.h
+++ b/arch/arm/include/asm/bugs.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * arch/arm/include/asm/bugs.h
- *
* Copyright (C) 1995-2003 Russell King
*/
#ifndef __ASM_BUGS_H
@@ -10,10 +8,8 @@
extern void check_writebuffer_bugs(void);
#ifdef CONFIG_MMU
-extern void check_bugs(void);
extern void check_other_bugs(void);
#else
-#define check_bugs() do { } while (0)
#define check_other_bugs() do { } while (0)
#endif
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
index 58e039a851af..3c82975d46db 100644
--- a/arch/arm/include/asm/exception.h
+++ b/arch/arm/include/asm/exception.h
@@ -10,10 +10,6 @@
#include <linux/interrupt.h>
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
-#else
-#define __exception_irq_entry
-#endif
#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 7a88f160b1fb..136a9506f1ed 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -7,7 +7,7 @@
#include <asm/irq.h>
/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
-#define NR_IPI 7
+#define NR_IPI 16
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index fe87397c3d8c..bdbc1e590891 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
- (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
+ frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
(regs)->ARM_sp = current_stack_pointer; \
(regs)->ARM_cpsr = SVC_MODE; \
}
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 010fa1a35a68..e8ac2f95fb37 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -52,12 +52,6 @@
typedef pte_t *pte_addr_t;
/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(0))
-
-/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) (prot)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 3ae120cd1715..ecfd6e7e128f 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -10,6 +10,15 @@
#include <linux/const.h>
#include <asm/proc-fns.h>
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+#endif
+
#ifndef CONFIG_MMU
#include <asm-generic/4level-fixup.h>
@@ -166,13 +175,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr) (empty_zero_page)
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a91f21e3c5b5..bbdfd74ff98a 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -120,4 +120,7 @@ struct of_cpu_method {
*/
extern void smp_set_ops(const struct smp_operations *);
+extern int set_ipi_handler(int ipinr, void *handler, char *desc);
+extern void clear_ipi_handler(int ipinr);
+
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
index 14c8dbbb7d2d..087bce6ec8e9 100644
--- a/arch/arm/kernel/bugs.c
+++ b/arch/arm/kernel/bugs.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
+#include <linux/cpu.h>
#include <asm/bugs.h>
#include <asm/proc-fns.h>
@@ -11,7 +12,7 @@ void check_other_bugs(void)
#endif
}
-void __init check_bugs(void)
+void __init arch_cpu_finalize_init(void)
{
check_writebuffer_bugs();
check_other_bugs();
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index b06d9ea07c84..a69dd64a8401 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -623,7 +623,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
hw->address &= ~alignment_mask;
hw->ctrl.len <<= offset;
- if (is_default_overflow_handler(bp)) {
+ if (uses_default_overflow_handler(bp)) {
/*
* Mismatch breakpoints are required for single-stepping
* breakpoints.
@@ -795,7 +795,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
* Otherwise, insert a temporary mismatch breakpoint so that
* we can single-step over the watchpoint trigger.
*/
- if (!is_default_overflow_handler(wp))
+ if (!uses_default_overflow_handler(wp))
continue;
step:
enable_single_step(wp, instruction_pointer(regs));
@@ -808,7 +808,7 @@ step:
info->trigger = addr;
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
perf_bp_event(wp, regs);
- if (is_default_overflow_handler(wp))
+ if (uses_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
}
@@ -883,7 +883,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
- if (is_default_overflow_handler(bp))
+ if (uses_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 46e1be9e57a8..045b8820524c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -510,20 +510,59 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
__smp_cross_call = fn;
}
-static const char *ipi_types[NR_IPI] __tracepoint_string = {
-#define S(x,s) [x] = s
- S(IPI_WAKEUP, "CPU wakeup interrupts"),
- S(IPI_TIMER, "Timer broadcast interrupts"),
- S(IPI_RESCHEDULE, "Rescheduling interrupts"),
- S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CPU_STOP, "CPU stop interrupts"),
- S(IPI_IRQ_WORK, "IRQ work interrupts"),
- S(IPI_COMPLETION, "completion interrupts"),
+struct ipi {
+ const char *desc;
+ void (*handler)(void);
+};
+
+static void ipi_cpu_stop(void);
+static void ipi_complete(void);
+
+#define IPI_DESC_STRING_IPI_WAKEUP "CPU wakeup interrupts"
+#define IPI_DESC_STRING_IPI_TIMER "Timer broadcast interrupts"
+#define IPI_DESC_STRING_IPI_RESCHEDULE "Rescheduling interrupts"
+#define IPI_DESC_STRING_IPI_CALL_FUNC "Function call interrupts"
+#define IPI_DESC_STRING_IPI_CPU_STOP "CPU stop interrupts"
+#define IPI_DESC_STRING_IPI_IRQ_WORK "IRQ work interrupts"
+#define IPI_DESC_STRING_IPI_COMPLETION "completion interrupts"
+
+#define IPI_DESC_STR(x) IPI_DESC_STRING_ ## x
+
+static const char* ipi_desc_strings[] __tracepoint_string =
+ {
+ [IPI_WAKEUP] = IPI_DESC_STR(IPI_WAKEUP),
+ [IPI_TIMER] = IPI_DESC_STR(IPI_TIMER),
+ [IPI_RESCHEDULE] = IPI_DESC_STR(IPI_RESCHEDULE),
+ [IPI_CALL_FUNC] = IPI_DESC_STR(IPI_CALL_FUNC),
+ [IPI_CPU_STOP] = IPI_DESC_STR(IPI_CPU_STOP),
+ [IPI_IRQ_WORK] = IPI_DESC_STR(IPI_IRQ_WORK),
+ [IPI_COMPLETION] = IPI_DESC_STR(IPI_COMPLETION),
+ };
+
+
+static void tick_receive_broadcast_local(void)
+{
+ tick_receive_broadcast();
+}
+
+static struct ipi ipi_types[NR_IPI] = {
+#define S(x, f) [x].desc = IPI_DESC_STR(x), [x].handler = f
+ S(IPI_WAKEUP, NULL),
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ S(IPI_TIMER, tick_receive_broadcast_local),
+#endif
+ S(IPI_RESCHEDULE, scheduler_ipi),
+ S(IPI_CALL_FUNC, generic_smp_call_function_interrupt),
+ S(IPI_CPU_STOP, ipi_cpu_stop),
+#ifdef CONFIG_IRQ_WORK
+ S(IPI_IRQ_WORK, irq_work_run),
+#endif
+ S(IPI_COMPLETION, ipi_complete),
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
+ trace_ipi_raise_rcuidle(target, ipi_desc_strings[ipinr]);
__smp_cross_call(target, ipinr);
}
@@ -532,13 +571,13 @@ void show_ipi_list(struct seq_file *p, int prec)
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
-
- for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
-
- seq_printf(p, " %s\n", ipi_types[i]);
+ if (ipi_types[i].handler) {
+ seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ",
+ __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, " %s\n", ipi_types[i].desc);
+ }
}
}
@@ -588,8 +627,10 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
/*
* ipi_cpu_stop - handle IPI from smp_send_stop()
*/
-static void ipi_cpu_stop(unsigned int cpu)
+static void ipi_cpu_stop(void)
{
+ unsigned int cpu = smp_processor_id();
+
if (system_state <= SYSTEM_RUNNING) {
raw_spin_lock(&stop_lock);
pr_crit("CPU%u: stopping\n", cpu);
@@ -616,8 +657,10 @@ int register_ipi_completion(struct completion *completion, int cpu)
return IPI_COMPLETION;
}
-static void ipi_complete(unsigned int cpu)
+static void ipi_complete(void)
{
+ unsigned int cpu = smp_processor_id();
+
complete(per_cpu(cpu_completion, cpu));
}
@@ -634,71 +677,48 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
- trace_ipi_entry_rcuidle(ipi_types[ipinr]);
+ if (ipi_types[ipinr].handler) {
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
-
- switch (ipinr) {
- case IPI_WAKEUP:
- break;
-
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
- case IPI_TIMER:
- irq_enter();
- tick_receive_broadcast();
- irq_exit();
- break;
-#endif
-
- case IPI_RESCHEDULE:
- scheduler_ipi();
- break;
-
- case IPI_CALL_FUNC:
irq_enter();
- generic_smp_call_function_interrupt();
+ (*ipi_types[ipinr].handler)();
irq_exit();
- break;
+ } else
+ pr_debug("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
- case IPI_CPU_STOP:
- irq_enter();
- ipi_cpu_stop(cpu);
- irq_exit();
- break;
+ set_irq_regs(old_regs);
+}
-#ifdef CONFIG_IRQ_WORK
- case IPI_IRQ_WORK:
- irq_enter();
- irq_work_run();
- irq_exit();
- break;
-#endif
+/*
+ * set_ipi_handler:
+ * Interface provided for a kernel module to specify an IPI handler function.
+ */
+int set_ipi_handler(int ipinr, void *handler, char *desc)
+{
+ unsigned int cpu = smp_processor_id();
- case IPI_COMPLETION:
- irq_enter();
- ipi_complete(cpu);
- irq_exit();
- break;
+ if (ipi_types[ipinr].handler) {
+ pr_crit("CPU%u: IPI handler 0x%x already registered to %pf\n",
+ cpu, ipinr, ipi_types[ipinr].handler);
+ return -1;
+ }
- case IPI_CPU_BACKTRACE:
- printk_nmi_enter();
- irq_enter();
- nmi_cpu_backtrace(regs);
- irq_exit();
- printk_nmi_exit();
- break;
+ ipi_types[ipinr].handler = handler;
+ ipi_types[ipinr].desc = desc;
- default:
- pr_crit("CPU%u: Unknown IPI message 0x%x\n",
- cpu, ipinr);
- break;
- }
+ return 0;
+}
+EXPORT_SYMBOL(set_ipi_handler);
- if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit_rcuidle(ipi_types[ipinr]);
- set_irq_regs(old_regs);
+/*
+ * clear_ipi_handler:
+ * Interface provided for a kernel module to clear an IPI handler function.
+ */
+void clear_ipi_handler(int ipinr)
+{
+ ipi_types[ipinr].handler = NULL;
+ ipi_types[ipinr].desc = NULL;
}
+EXPORT_SYMBOL(clear_ipi_handler);
void smp_send_reschedule(int cpu)
{
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 207ef9a797bd..03dfeb120843 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -341,7 +341,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
if (panic_on_oops)
panic("Fatal exception");
if (signr)
- do_exit(signr);
+ make_task_dead(signr);
}
/*
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 4574e6aea0a5..f321c2aa94e1 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -300,6 +300,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
return URC_OK;
}
+static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
+{
+ unsigned long bytes = 0;
+ unsigned long insn;
+ unsigned long result = 0;
+
+ /*
+ * unwind_get_byte() will advance `ctrl` one instruction at a time, so
+ * loop until we get an instruction byte where bit 7 is not set.
+ *
+ * Note: This decodes a maximum of 4 bytes to output 28 bits data where
+ * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
+ * it is sufficient for unwinding the stack.
+ */
+ do {
+ insn = unwind_get_byte(ctrl);
+ result |= (insn & 0x7f) << (bytes * 7);
+ bytes++;
+ } while (!!(insn & 0x80) && (bytes != sizeof(result)));
+
+ return result;
+}
+
/*
* Execute the current unwind instruction.
*/
@@ -353,7 +376,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if (ret)
goto error;
} else if (insn == 0xb2) {
- unsigned long uleb128 = unwind_get_byte(ctrl);
+ unsigned long uleb128 = unwind_decode_uleb128(ctrl);
ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
} else {
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index b5e8b9ae4c7d..7fd3600db8ef 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -40,8 +40,8 @@ ENDPROC(_find_first_zero_bit_le)
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
*/
ENTRY(_find_next_zero_bit_le)
- teq r1, #0
- beq 3b
+ cmp r2, r1
+ bhs 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ARM( ldrb r3, [r0, r2, lsr #3] )
@@ -81,8 +81,8 @@ ENDPROC(_find_first_bit_le)
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
*/
ENTRY(_find_next_bit_le)
- teq r1, #0
- beq 3b
+ cmp r2, r1
+ bhs 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ARM( ldrb r3, [r0, r2, lsr #3] )
@@ -115,8 +115,8 @@ ENTRY(_find_first_zero_bit_be)
ENDPROC(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be)
- teq r1, #0
- beq 3b
+ cmp r2, r1
+ bhs 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
@@ -149,8 +149,8 @@ ENTRY(_find_first_bit_be)
ENDPROC(_find_first_bit_be)
ENTRY(_find_next_bit_be)
- teq r1, #0
- beq 3b
+ cmp r2, r1
+ bhs 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 6ca4535c47fb..e36d053a8a90 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -16,6 +16,7 @@
ENTRY(mmioset)
ENTRY(memset)
UNWIND( .fnstart )
+ and r1, r1, #255 @ cast to unsigned char
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
bne 6f @ 1
diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c
index 541e850a736c..9175c130967e 100644
--- a/arch/arm/mach-bcm/bcm_kona_smc.c
+++ b/arch/arm/mach-bcm/bcm_kona_smc.c
@@ -54,6 +54,7 @@ int __init bcm_kona_smc_init(void)
return -ENODEV;
prop_val = of_get_address(node, 0, &prop_size, NULL);
+ of_node_put(node);
if (!prop_val)
return -EINVAL;
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 4d3b7d0418c4..68e3788f026c 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -3,6 +3,7 @@
menuconfig ARCH_DAVINCI
bool "TI DaVinci"
depends on ARCH_MULTI_V5
+ select CPU_ARM926T
select DAVINCI_TIMER
select ZONE_DMA
select PM_GENERIC_DOMAINS if PM
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 6fb19a393fd2..c06ae33dc53e 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -337,6 +337,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
GPIO_LOOKUP_IDX("G", 0, NULL, 1,
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ { }
},
};
diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c
index de998830f534..b07956883e16 100644
--- a/arch/arm/mach-ep93xx/timer-ep93xx.c
+++ b/arch/arm/mach-ep93xx/timer-ep93xx.c
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <asm/mach/time.h>
#include "soc.h"
+#include "platform.h"
/*************************************************************************
* Timer handling for EP93xx
@@ -60,7 +61,7 @@ static u64 notrace ep93xx_read_sched_clock(void)
return ret;
}
-u64 ep93xx_clocksource_read(struct clocksource *c)
+static u64 ep93xx_clocksource_read(struct clocksource *c)
{
u64 ret;
diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c
index b2e1963f473d..2ee2d2813d57 100644
--- a/arch/arm/mach-imx/cpu-imx25.c
+++ b/arch/arm/mach-imx/cpu-imx25.c
@@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void)
np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
iim_base = of_iomap(np, 0);
+ of_node_put(np);
BUG_ON(!iim_base);
rev = readl(iim_base + MXC_IIMSREV);
iounmap(iim_base);
diff --git a/arch/arm/mach-imx/cpu-imx27.c b/arch/arm/mach-imx/cpu-imx27.c
index a969aa71b60f..1d2893908368 100644
--- a/arch/arm/mach-imx/cpu-imx27.c
+++ b/arch/arm/mach-imx/cpu-imx27.c
@@ -9,6 +9,7 @@
*/
#include <linux/io.h>
+#include <linux/of_address.h>
#include <linux/module.h>
#include "hardware.h"
@@ -17,16 +18,24 @@ static int mx27_cpu_rev = -1;
static int mx27_cpu_partnumber;
#define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */
+#define SYSCTRL_OFFSET 0x800 /* Offset from CCM base address */
static int mx27_read_cpu_rev(void)
{
+ void __iomem *ccm_base;
+ struct device_node *np;
u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm");
+ ccm_base = of_iomap(np, 0);
+ of_node_put(np);
+ BUG_ON(!ccm_base);
/*
* now we have access to the IO registers. As we need
* the silicon revision very early we read it here to
* avoid any further hooks
*/
- val = imx_readl(MX27_IO_ADDRESS(MX27_SYSCTRL_BASE_ADDR + SYS_CHIP_ID));
+ val = imx_readl(ccm_base + SYSCTRL_OFFSET + SYS_CHIP_ID);
mx27_cpu_partnumber = (int)((val >> 12) & 0xFFFF);
diff --git a/arch/arm/mach-imx/cpu-imx31.c b/arch/arm/mach-imx/cpu-imx31.c
index 3ee684b71006..35c544924e50 100644
--- a/arch/arm/mach-imx/cpu-imx31.c
+++ b/arch/arm/mach-imx/cpu-imx31.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/io.h>
#include "common.h"
@@ -32,10 +33,17 @@ static struct {
static int mx31_read_cpu_rev(void)
{
+ void __iomem *iim_base;
+ struct device_node *np;
u32 i, srev;
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim");
+ iim_base = of_iomap(np, 0);
+ of_node_put(np);
+ BUG_ON(!iim_base);
+
/* read SREV register from IIM module */
- srev = imx_readl(MX31_IO_ADDRESS(MX31_IIM_BASE_ADDR + MXC_IIMSREV));
+ srev = imx_readl(iim_base + MXC_IIMSREV);
srev &= 0xff;
for (i = 0; i < ARRAY_SIZE(mx31_cpu_type); i++)
diff --git a/arch/arm/mach-imx/cpu-imx35.c b/arch/arm/mach-imx/cpu-imx35.c
index ebb3cdabd506..1fe75b39c2d9 100644
--- a/arch/arm/mach-imx/cpu-imx35.c
+++ b/arch/arm/mach-imx/cpu-imx35.c
@@ -5,6 +5,7 @@
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
*/
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/io.h>
#include "hardware.h"
@@ -14,9 +15,16 @@ static int mx35_cpu_rev = -1;
static int mx35_read_cpu_rev(void)
{
+ void __iomem *iim_base;
+ struct device_node *np;
u32 rev;
- rev = imx_readl(MX35_IO_ADDRESS(MX35_IIM_BASE_ADDR + MXC_IIMSREV));
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim");
+ iim_base = of_iomap(np, 0);
+ of_node_put(np);
+ BUG_ON(!iim_base);
+
+ rev = imx_readl(iim_base + MXC_IIMSREV);
switch (rev) {
case 0x00:
return IMX_CHIP_REVISION_1_0;
diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
index ad56263778f9..a67c89bf155d 100644
--- a/arch/arm/mach-imx/cpu-imx5.c
+++ b/arch/arm/mach-imx/cpu-imx5.c
@@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat)
np = of_find_compatible_node(NULL, NULL, compat);
iim_base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!iim_base);
srev = readl(iim_base + IIM_SREV) & 0xff;
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index af12668d0bf5..3d76e8c28c51 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -99,6 +99,7 @@ struct mmdc_pmu {
cpumask_t cpu;
struct hrtimer hrtimer;
unsigned int active_events;
+ int id;
struct device *dev;
struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
struct hlist_node node;
@@ -433,8 +434,6 @@ static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
void __iomem *mmdc_base, struct device *dev)
{
- int mmdc_num;
-
*pmu_mmdc = (struct mmdc_pmu) {
.pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context,
@@ -452,15 +451,16 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
.active_events = 0,
};
- mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
+ pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
- return mmdc_num;
+ return pmu_mmdc->id;
}
static int imx_mmdc_remove(struct platform_device *pdev)
{
struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
+ ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
perf_pmu_unregister(&pmu_mmdc->pmu);
iounmap(pmu_mmdc->mmdc_base);
@@ -474,7 +474,6 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
{
struct mmdc_pmu *pmu_mmdc;
char *name;
- int mmdc_num;
int ret;
const struct of_device_id *of_id =
of_match_device(imx_mmdc_dt_ids, &pdev->dev);
@@ -497,14 +496,18 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
cpuhp_mmdc_state = ret;
}
- mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
- pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
- if (mmdc_num == 0)
- name = "mmdc";
- else
- name = devm_kasprintf(&pdev->dev,
- GFP_KERNEL, "mmdc%d", mmdc_num);
+ ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
+ if (ret < 0)
+ goto pmu_free;
+ name = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "mmdc%d", ret);
+ if (!name) {
+ ret = -ENOMEM;
+ goto pmu_release_id;
+ }
+
+ pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
@@ -527,6 +530,8 @@ pmu_register_err:
pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
hrtimer_cancel(&pmu_mmdc->hrtimer);
+pmu_release_id:
+ ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
pmu_free:
kfree(pmu_mmdc);
return ret;
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 483df32583be..0bdb872f5018 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -44,18 +44,21 @@
static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE;
/*
- * FIXME: the timer needs some delay to stablize the counter capture
+ * Read the timer through the CVWR register. Delay is required after requesting
+ * a read. The CR register cannot be directly read due to metastability issues
+ * documented in the PXA168 software manual.
*/
static inline uint32_t timer_read(void)
{
- int delay = 100;
+ uint32_t val;
+ int delay = 3;
__raw_writel(1, mmp_timer_base + TMR_CVWR(1));
while (delay--)
- cpu_relax();
+ val = __raw_readl(mmp_timer_base + TMR_CVWR(1));
- return __raw_readl(mmp_timer_base + TMR_CVWR(1));
+ return val;
}
static u64 notrace mmp_read_sched_clock(void)
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index c109f47e9cbc..a687e83ad604 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -387,8 +387,10 @@ static void __init mxs_machine_init(void)
root = of_find_node_by_path("/");
ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
- if (ret)
+ if (ret) {
+ kfree(soc_dev_attr);
return;
+ }
soc_dev_attr->family = "Freescale MXS Family";
soc_dev_attr->soc_id = mxs_get_soc_id();
diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
index 4447210c9b0d..291bc376d30e 100644
--- a/arch/arm/mach-omap1/timer.c
+++ b/arch/arm/mach-omap1/timer.c
@@ -165,7 +165,7 @@ err_free_pdata:
kfree(pdata);
err_free_pdev:
- platform_device_unregister(pdev);
+ platform_device_put(pdev);
return ret;
}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 1bd64f6ba8cf..1de25166414e 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -211,6 +211,7 @@ static int __init omapdss_init_fbdev(void)
node = of_find_node_by_name(NULL, "omap4_padconf_global");
if (node)
omap4_dsi_mux_syscon = syscon_node_to_regmap(node);
+ of_node_put(node);
return 0;
}
@@ -259,11 +260,13 @@ static int __init omapdss_init_of(void)
if (!pdev) {
pr_err("Unable to find DSS platform device\n");
+ of_node_put(node);
return -ENODEV;
}
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
put_device(&pdev->dev);
+ of_node_put(node);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
return r;
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 188ea5258c99..8c9160779689 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -797,10 +797,15 @@ void __init omap_soc_device_init(void)
soc_dev_attr->machine = soc_name;
soc_dev_attr->family = omap_get_family();
+ if (!soc_dev_attr->family) {
+ kfree(soc_dev_attr);
+ return;
+ }
soc_dev_attr->revision = soc_rev;
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->family);
kfree(soc_dev_attr);
return;
}
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 1cbac76136d4..6a10d23d787e 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -174,7 +174,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
break;
case PWRDM_STATE_PREV:
prev = pwrdm_read_prev_pwrst(pwrdm);
- if (pwrdm->state != prev)
+ if (prev >= 0 && pwrdm->state != prev)
pwrdm->state_counter[prev]++;
if (prev == PWRDM_POWER_RET)
_update_logic_membank_counters(pwrdm);
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 1b442b128569..63e73e9b82bc 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -708,6 +708,7 @@ static int omap3xxx_prm_late_init(void)
}
irq_num = of_irq_get(np, 0);
+ of_node_put(np);
if (irq_num == -EPROBE_DEFER)
return irq_num;
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 1defb838eae3..a5da85b73461 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -700,6 +700,7 @@ static void __init realtime_counter_init(void)
}
rate = clk_get_rate(sys_clk);
+ clk_put(sys_clk);
if (soc_is_dra7xx()) {
/*
diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c
index 3d36f1d95196..3f651df3a71c 100644
--- a/arch/arm/mach-orion5x/board-dt.c
+++ b/arch/arm/mach-orion5x/board-dt.c
@@ -63,6 +63,9 @@ static void __init orion5x_dt_init(void)
if (of_machine_is_compatible("maxtor,shared-storage-2"))
mss2_init();
+ if (of_machine_is_compatible("lacie,d2-network"))
+ d2net_init();
+
of_platform_default_populate(NULL, orion5x_auxdata_lookup, NULL);
}
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index eb96009e21c4..b9cfdb456456 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -75,6 +75,12 @@ extern void mss2_init(void);
static inline void mss2_init(void) {}
#endif
+#ifdef CONFIG_MACH_D2NET_DT
+void d2net_init(void);
+#else
+static inline void d2net_init(void) {}
+#endif
+
/*****************************************************************************
* Helpers to access Orion registers
****************************************************************************/
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 83cfbb882a2d..7f6bd7f069e4 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -220,8 +220,6 @@ void sharpsl_battery_kick(void)
{
schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125));
}
-EXPORT_SYMBOL(sharpsl_battery_kick);
-
static void sharpsl_battery_thread(struct work_struct *private_)
{
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index a4fdc399d152..742c67301dee 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h> /* symbol_get ; symbol_put */
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio_keys.h>
@@ -514,17 +513,6 @@ static struct pxa2xx_spi_chip spitz_ads7846_chip = {
.gpio_cs = SPITZ_GPIO_ADS7846_CS,
};
-static void spitz_bl_kick_battery(void)
-{
- void (*kick_batt)(void);
-
- kick_batt = symbol_get(sharpsl_battery_kick);
- if (kick_batt) {
- kick_batt();
- symbol_put(sharpsl_battery_kick);
- }
-}
-
static struct corgi_lcd_platform_data spitz_lcdcon_info = {
.init_mode = CORGI_LCD_MODE_VGA,
.max_intensity = 0x2f,
@@ -532,7 +520,7 @@ static struct corgi_lcd_platform_data spitz_lcdcon_info = {
.limit_mask = 0x0b,
.gpio_backlight_cont = SPITZ_GPIO_BACKLIGHT_CONT,
.gpio_backlight_on = SPITZ_GPIO_BACKLIGHT_ON,
- .kick_battery = spitz_bl_kick_battery,
+ .kick_battery = sharpsl_battery_kick,
};
static struct pxa2xx_spi_chip spitz_lcdcon_chip = {
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index d96a101e5504..2f305c7ce1cb 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -653,7 +653,7 @@ static void __init map_sa1100_gpio_regs( void )
*/
static void __init get_assabet_scr(void)
{
- unsigned long uninitialized_var(scr), i;
+ unsigned long scr, i;
GPDR |= 0x3fc; /* Configure GPIO 9:2 as outputs */
GPSR = 0x3fc; /* Write 0xFF to GPIO 9:2 */
diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
index 1dbe98948ce3..9627c4cf3e41 100644
--- a/arch/arm/mach-sa1100/jornada720_ssp.c
+++ b/arch/arm/mach-sa1100/jornada720_ssp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* arch/arm/mac-sa1100/jornada720_ssp.c
*
* Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
@@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags;
/**
* jornada_ssp_reverse - reverses input byte
+ * @byte: input byte to reverse
*
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
@@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse);
/**
* jornada_ssp_byte - waits for ready ssp bus and sends byte
+ * @byte: input byte to transmit
*
* waits for fifo buffer to clear and then transmits, if it doesn't then we will
* timeout after <timeout> rounds. Needs mcu running before its called.
@@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte);
/**
* jornada_ssp_inout - decide if input is command or trading byte
+ * @byte: input byte to send (may be %TXDUMMY)
*
* returns : (jornada_ssp_byte(byte)) on success
* : %-ETIMEDOUT on timeout failure
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
index 09ef73b99dd8..ba44cec5e59a 100644
--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -125,6 +125,7 @@ remove:
list_for_each_entry_safe(pos, tmp, &quirk_list, list) {
list_del(&pos->list);
+ of_node_put(pos->np);
kfree(pos);
}
@@ -174,11 +175,12 @@ static int __init rcar_gen2_regulator_quirk(void)
memcpy(&quirk->i2c_msg, id->data, sizeof(quirk->i2c_msg));
quirk->id = id;
- quirk->np = np;
+ quirk->np = of_node_get(np);
quirk->i2c_msg.addr = addr;
ret = of_irq_parse_one(np, 0, argsa);
if (ret) { /* Skip invalid entry and continue */
+ of_node_put(np);
kfree(quirk);
continue;
}
@@ -225,6 +227,7 @@ err_free:
err_mem:
list_for_each_entry_safe(pos, tmp, &quirk_list, list) {
list_del(&pos->list);
+ of_node_put(pos->np);
kfree(pos);
}
diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
index 26cbce135338..f779e386b6e7 100644
--- a/arch/arm/mach-sunxi/mc_smp.c
+++ b/arch/arm/mach-sunxi/mc_smp.c
@@ -804,16 +804,16 @@ static int __init sunxi_mc_smp_init(void)
for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
ret = of_property_match_string(node, "enable-method",
sunxi_mc_smp_data[i].enable_method);
- if (!ret)
+ if (ret >= 0)
break;
}
- is_a83t = sunxi_mc_smp_data[i].is_a83t;
-
of_node_put(node);
- if (ret)
+ if (ret < 0)
return -ENODEV;
+ is_a83t = sunxi_mc_smp_data[i].is_a83t;
+
if (!sunxi_mc_smp_cpu_table_init())
return -EINVAL;
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 1ca633e3d024..557bfe794d29 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -17,3 +17,19 @@ config ARCH_ZYNQ
select SOC_BUS
help
Support for Xilinx Zynq ARM Cortex A9 Platform
+
+if ARCH_ZYNQ
+
+menu "Xilinx Specific Options"
+
+config XILINX_PREFETCH
+ bool "Cache Prefetch"
+ default y
+ help
+ This option turns on L1 & L2 cache prefetching to get the best performance
+ in many cases. This may not always be the best performance depending on
+ the usage.
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-zynq/Makefile b/arch/arm/mach-zynq/Makefile
index 9df74cd85fd0..dbb75be53deb 100644
--- a/arch/arm/mach-zynq/Makefile
+++ b/arch/arm/mach-zynq/Makefile
@@ -4,5 +4,9 @@
#
# Common support
-obj-y := common.o slcr.o pm.o
+obj-y := common.o efuse.o slcr.o zynq_ocm.o pm.o
+
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
+ORIG_AFLAGS := $(KBUILD_AFLAGS)
+KBUILD_AFLAGS = $(subst -march=armv6k,,$(ORIG_AFLAGS))
+obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 3a4248fd7962..6da93dd0cd96 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -77,6 +77,7 @@ static int __init zynq_get_revision(void)
}
zynq_devcfg_base = of_iomap(np, 0);
+ of_node_put(np);
if (!zynq_devcfg_base) {
pr_err("%s: Unable to map I/O memory\n", __func__);
return -1;
@@ -95,6 +96,7 @@ static void __init zynq_init_late(void)
{
zynq_core_pm_init();
zynq_pm_late_init();
+ zynq_prefetch_init();
}
/**
@@ -175,6 +177,7 @@ static void __init zynq_map_io(void)
static void __init zynq_irq_init(void)
{
+ zynq_early_efuse_init();
zynq_early_slcr_init();
irqchip_init();
}
@@ -186,8 +189,13 @@ static const char * const zynq_dt_match[] = {
DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
/* 64KB way size, 8-way associativity, parity disabled */
- .l2c_aux_val = 0x00400000,
+#ifdef CONFIG_XILINX_PREFETCH
+ .l2c_aux_val = 0x30400000,
+ .l2c_aux_mask = 0xcfbfffff,
+#else
+ .l2c_aux_val = 0x00400000,
.l2c_aux_mask = 0xffbfffff,
+#endif
.smp = smp_ops(zynq_smp_ops),
.map_io = zynq_map_io,
.init_irq = zynq_irq_init,
diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
index 60e662324699..5816d57e5a5d 100644
--- a/arch/arm/mach-zynq/common.h
+++ b/arch/arm/mach-zynq/common.h
@@ -15,8 +15,12 @@ extern void zynq_slcr_cpu_stop(int cpu);
extern void zynq_slcr_cpu_start(int cpu);
extern bool zynq_slcr_cpu_state_read(int cpu);
extern void zynq_slcr_cpu_state_write(int cpu, bool die);
+extern u32 zynq_slcr_get_ocm_config(void);
extern u32 zynq_slcr_get_device_id(void);
+extern bool zynq_efuse_cpu_state(int cpu);
+extern int zynq_early_efuse_init(void);
+
#ifdef CONFIG_SMP
extern char zynq_secondary_trampoline;
extern char zynq_secondary_trampoline_jump;
@@ -25,9 +29,31 @@ extern int zynq_cpun_start(u32 address, int cpu);
extern const struct smp_operations zynq_smp_ops;
#endif
+extern void zynq_slcr_init_preload_fpga(void);
+extern void zynq_slcr_init_postload_fpga(void);
+
+extern void __iomem *zynq_slcr_base;
extern void __iomem *zynq_scu_base;
void zynq_pm_late_init(void);
+extern unsigned int zynq_sys_suspend_sz;
+int zynq_sys_suspend(void __iomem *ddrc_base, void __iomem *slcr_base);
+
+static inline void zynq_prefetch_init(void)
+{
+ /*
+ * Enable prefetching in aux control register. L2 prefetch must
+ * only be enabled if the slave supports it (PL310 does)
+ */
+ asm volatile ("mrc p15, 0, r1, c1, c0, 1\n"
+#ifdef CONFIG_XILINX_PREFETCH
+ "orr r1, r1, #6\n"
+#else
+ "bic r1, r1, #6\n"
+#endif
+ "mcr p15, 0, r1, c1, c0, 1\n"
+ : : : "r1");
+}
static inline void zynq_core_pm_init(void)
{
diff --git a/arch/arm/mach-zynq/efuse.c b/arch/arm/mach-zynq/efuse.c
new file mode 100644
index 000000000000..d31a5822ec65
--- /dev/null
+++ b/arch/arm/mach-zynq/efuse.c
@@ -0,0 +1,75 @@
+/*
+ * Xilinx EFUSE driver
+ *
+ * Copyright (c) 2016 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include "common.h"
+
+#define EFUSE_STATUS_OFFSET 0x10
+
+/* 0 means cpu1 is working, 1 means cpu1 is broken */
+#define EFUSE_STATUS_CPU_BIT BIT(7)
+
+void __iomem *zynq_efuse_base;
+
+/**
+ * zynq_efuse_cpu_state - Read/write cpu state
+ * @cpu: cpu number
+ *
+ * Return: true if cpu is running, false if cpu is broken
+ */
+bool zynq_efuse_cpu_state(int cpu)
+{
+ u32 state;
+
+ if (!cpu)
+ return true;
+
+ state = readl(zynq_efuse_base + EFUSE_STATUS_OFFSET);
+ state &= EFUSE_STATUS_CPU_BIT;
+
+ if (!state)
+ return true;
+
+ return false;
+}
+
+/**
+ * zynq_early_efuse_init - Early efuse init function
+ *
+ * Return: 0 on success, negative errno otherwise.
+ *
+ * Called very early during boot from platform code.
+ */
+int __init zynq_early_efuse_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-efuse");
+ if (!np) {
+ pr_err("%s: no efuse node found\n", __func__);
+ BUG();
+ }
+
+ zynq_efuse_base = of_iomap(np, 0);
+ if (!zynq_efuse_base) {
+ pr_err("%s: Unable to map I/O memory\n", __func__);
+ BUG();
+ }
+
+ np->data = (__force void *)zynq_efuse_base;
+
+ pr_info("%s mapped to %p\n", np->name, zynq_efuse_base);
+
+ of_node_put(np);
+
+ return 0;
+}
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index a10085be9073..4568f68c3c07 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <linux/irqchip/arm-gic.h>
#include "common.h"
@@ -30,6 +31,7 @@ int zynq_cpun_start(u32 address, int cpu)
{
u32 trampoline_code_size = &zynq_secondary_trampoline_end -
&zynq_secondary_trampoline;
+ u32 phy_cpuid = cpu_logical_map(cpu);
/* MS: Expectation that SLCR are directly map and accessible */
/* Not possible to jump to non aligned address */
@@ -39,7 +41,7 @@ int zynq_cpun_start(u32 address, int cpu)
u32 trampoline_size = &zynq_secondary_trampoline_jump -
&zynq_secondary_trampoline;
- zynq_slcr_cpu_stop(cpu);
+ zynq_slcr_cpu_stop(phy_cpuid);
if (address) {
if (__pa(PAGE_OFFSET)) {
zero = ioremap(0, trampoline_code_size);
@@ -68,7 +70,7 @@ int zynq_cpun_start(u32 address, int cpu)
if (__pa(PAGE_OFFSET))
iounmap(zero);
}
- zynq_slcr_cpu_start(cpu);
+ zynq_slcr_cpu_start(phy_cpuid);
return 0;
}
@@ -81,6 +83,9 @@ EXPORT_SYMBOL(zynq_cpun_start);
static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
+ if (!zynq_efuse_cpu_state(cpu))
+ return -1;
+
return zynq_cpun_start(__pa_symbol(secondary_startup_arm), cpu);
}
@@ -113,6 +118,7 @@ static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
static void zynq_secondary_init(unsigned int cpu)
{
zynq_core_pm_init();
+ zynq_prefetch_init();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/mach-zynq/pm.c b/arch/arm/mach-zynq/pm.c
index 8ba450ab559c..b9445a654b59 100644
--- a/arch/arm/mach-zynq/pm.c
+++ b/arch/arm/mach-zynq/pm.c
@@ -7,6 +7,14 @@
* Sören Brinkmann <soren.brinkmann@xilinx.com>
*/
+#include <linux/clk/zynq.h>
+#include <linux/genalloc.h>
+#include <linux/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/fncpy.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/map.h>
+#include <asm/suspend.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -22,6 +30,165 @@
static void __iomem *ddrc_base;
+#ifdef CONFIG_SUSPEND
+static int (*zynq_suspend_ptr)(void __iomem *, void __iomem *);
+
+static int zynq_pm_prepare_late(void)
+{
+ return zynq_clk_suspend_early();
+}
+
+static void zynq_pm_wake(void)
+{
+ zynq_clk_resume_late();
+}
+
+static int zynq_pm_suspend(unsigned long arg)
+{
+ u32 reg;
+ int do_ddrpll_bypass = 1;
+
+ /* Topswitch clock stop disable */
+ zynq_clk_topswitch_disable();
+
+ if (!zynq_suspend_ptr || !ddrc_base) {
+ do_ddrpll_bypass = 0;
+ } else {
+ /* enable DDRC self-refresh mode */
+ reg = readl(ddrc_base + DDRC_CTRL_REG1_OFFS);
+ reg |= DDRC_SELFREFRESH_MASK;
+ writel(reg, ddrc_base + DDRC_CTRL_REG1_OFFS);
+ }
+
+ if (do_ddrpll_bypass) {
+ /*
+ * Going this way will turn off DDR related clocks and the DDR
+ * PLL. I.e. We might brake sub systems relying on any of this
+ * clocks. And even worse: If there are any other masters in the
+ * system (e.g. in the PL) accessing DDR they are screwed.
+ */
+ flush_cache_all();
+ if (zynq_suspend_ptr(ddrc_base, zynq_slcr_base))
+ pr_warn("DDR self refresh failed.\n");
+ } else {
+ WARN_ONCE(1, "DRAM self-refresh not available\n");
+ cpu_do_idle();
+ }
+
+ /* disable DDRC self-refresh mode */
+ if (do_ddrpll_bypass) {
+ reg = readl(ddrc_base + DDRC_CTRL_REG1_OFFS);
+ reg &= ~DDRC_SELFREFRESH_MASK;
+ writel(reg, ddrc_base + DDRC_CTRL_REG1_OFFS);
+ }
+
+ /* Topswitch clock stop enable */
+ zynq_clk_topswitch_enable();
+
+ return 0;
+}
+
+static int zynq_pm_enter(suspend_state_t suspend_state)
+{
+ switch (suspend_state) {
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ cpu_suspend(0, zynq_pm_suspend);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct platform_suspend_ops zynq_pm_ops = {
+ .prepare_late = zynq_pm_prepare_late,
+ .enter = zynq_pm_enter,
+ .wake = zynq_pm_wake,
+ .valid = suspend_valid_only_mem,
+};
+
+/**
+ * zynq_pm_remap_ocm() - Remap OCM
+ * Returns a pointer to the mapped memory or NULL.
+ *
+ * Remap the OCM.
+ */
+static void __iomem *zynq_pm_remap_ocm(void)
+{
+ struct device_node *np;
+ const char *comp = "xlnx,zynq-ocmc-1.0";
+ void __iomem *base = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, comp);
+ if (np) {
+ struct device *dev;
+ unsigned long pool_addr;
+ unsigned long pool_addr_virt;
+ struct gen_pool *pool;
+
+ of_node_put(np);
+
+ dev = &(of_find_device_by_node(np)->dev);
+
+ /* Get OCM pool from device tree or platform data */
+ pool = gen_pool_get(dev, NULL);
+ if (!pool) {
+ pr_warn("%s: OCM pool is not available\n", __func__);
+ return NULL;
+ }
+
+ pool_addr_virt = gen_pool_alloc(pool, zynq_sys_suspend_sz);
+ if (!pool_addr_virt) {
+ pr_warn("%s: Can't get OCM poll\n", __func__);
+ return NULL;
+ }
+ pool_addr = gen_pool_virt_to_phys(pool, pool_addr_virt);
+ if (!pool_addr) {
+ pr_warn("%s: Can't get physical address of OCM pool\n",
+ __func__);
+ return NULL;
+ }
+ base = __arm_ioremap_exec(pool_addr, zynq_sys_suspend_sz,
+ MT_MEMORY_RWX);
+ if (!base) {
+ pr_warn("%s: IOremap OCM pool failed\n", __func__);
+ return NULL;
+ }
+ pr_debug("%s: Remap OCM %s from %lx to %lx\n", __func__, comp,
+ pool_addr_virt, (unsigned long)base);
+ } else {
+ pr_warn("%s: no compatible node found for '%s'\n", __func__,
+ comp);
+ }
+
+ return base;
+}
+
+static void zynq_pm_suspend_init(void)
+{
+ void __iomem *ocm_base = zynq_pm_remap_ocm();
+
+ if (!ocm_base) {
+ pr_warn("%s: Unable to map OCM.\n", __func__);
+ } else {
+ /*
+ * Copy code to suspend system into OCM. The suspend code
+ * needs to run from OCM as DRAM may no longer be available
+ * when the PLL is stopped.
+ */
+ zynq_suspend_ptr = fncpy((__force void *)ocm_base,
+ (__force void *)&zynq_sys_suspend,
+ zynq_sys_suspend_sz);
+ }
+
+ suspend_set_ops(&zynq_pm_ops);
+}
+#else /* CONFIG_SUSPEND */
+static void zynq_pm_suspend_init(void) { };
+#endif /* CONFIG_SUSPEND */
+
/**
* zynq_pm_ioremap() - Create IO mappings
* @comp: DT compatible string
@@ -68,4 +235,7 @@ void __init zynq_pm_late_init(void)
reg |= DDRC_CLOCKSTOP_MASK;
writel(reg, ddrc_base + DDRC_DRAM_PARAM_REG3_OFFS);
}
+
+ /* set up suspend */
+ zynq_pm_suspend_init();
}
diff --git a/arch/arm/mach-zynq/slcr.c b/arch/arm/mach-zynq/slcr.c
index 37707614885a..19f0bbd09db0 100644
--- a/arch/arm/mach-zynq/slcr.c
+++ b/arch/arm/mach-zynq/slcr.c
@@ -16,10 +16,13 @@
/* register offsets */
#define SLCR_UNLOCK_OFFSET 0x8 /* SCLR unlock register */
#define SLCR_PS_RST_CTRL_OFFSET 0x200 /* PS Software Reset Control */
+#define SLCR_FPGA_RST_CTRL_OFFSET 0x240 /* FPGA Software Reset Control */
#define SLCR_A9_CPU_RST_CTRL_OFFSET 0x244 /* CPU Software Reset Control */
#define SLCR_REBOOT_STATUS_OFFSET 0x258 /* PS Reboot Status */
#define SLCR_PSS_IDCODE 0x530 /* PS IDCODE */
#define SLCR_L2C_RAM 0xA1C /* L2C_RAM in AR#54190 */
+#define SLCR_LVL_SHFTR_EN_OFFSET 0x900 /* Level Shifters Enable */
+#define SLCR_OCM_CFG_OFFSET 0x910 /* OCM Address Mapping */
#define SLCR_UNLOCK_MAGIC 0xDF0D
#define SLCR_A9_CPU_CLKSTOP 0x10
@@ -27,7 +30,7 @@
#define SLCR_PSS_IDCODE_DEVICE_SHIFT 12
#define SLCR_PSS_IDCODE_DEVICE_MASK 0x1F
-static void __iomem *zynq_slcr_base;
+void __iomem *zynq_slcr_base;
static struct regmap *zynq_slcr_regmap;
/**
@@ -116,6 +119,48 @@ static struct notifier_block zynq_slcr_restart_nb = {
};
/**
+ * zynq_slcr_get_ocm_config - Get SLCR OCM config
+ *
+ * return: OCM config bits
+ */
+u32 zynq_slcr_get_ocm_config(void)
+{
+ u32 ret;
+
+ zynq_slcr_read(&ret, SLCR_OCM_CFG_OFFSET);
+ return ret;
+}
+
+/**
+ * zynq_slcr_init_preload_fpga - Disable communication from the PL to PS.
+ */
+void zynq_slcr_init_preload_fpga(void)
+{
+ /* Assert FPGA top level output resets */
+ zynq_slcr_write(0xF, SLCR_FPGA_RST_CTRL_OFFSET);
+
+ /* Disable level shifters */
+ zynq_slcr_write(0, SLCR_LVL_SHFTR_EN_OFFSET);
+
+ /* Enable output level shifters */
+ zynq_slcr_write(0xA, SLCR_LVL_SHFTR_EN_OFFSET);
+}
+EXPORT_SYMBOL(zynq_slcr_init_preload_fpga);
+
+/**
+ * zynq_slcr_init_postload_fpga - Re-enable communication from the PL to PS.
+ */
+void zynq_slcr_init_postload_fpga(void)
+{
+ /* Enable level shifters */
+ zynq_slcr_write(0xf, SLCR_LVL_SHFTR_EN_OFFSET);
+
+ /* Deassert AXI interface resets */
+ zynq_slcr_write(0, SLCR_FPGA_RST_CTRL_OFFSET);
+}
+EXPORT_SYMBOL(zynq_slcr_init_postload_fpga);
+
+/**
* zynq_slcr_cpu_start - Start cpu
* @cpu: cpu number
*/
@@ -213,6 +258,7 @@ int __init zynq_early_slcr_init(void)
zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr");
if (IS_ERR(zynq_slcr_regmap)) {
pr_err("%s: failed to find zynq-slcr\n", __func__);
+ of_node_put(np);
return -ENODEV;
}
diff --git a/arch/arm/mach-zynq/suspend.S b/arch/arm/mach-zynq/suspend.S
new file mode 100644
index 000000000000..f3f8440e8018
--- /dev/null
+++ b/arch/arm/mach-zynq/suspend.S
@@ -0,0 +1,185 @@
+/*
+ * Suspend support for Zynq
+ *
+ * Copyright (C) 2012 Xilinx
+ *
+ * Soren Brinkmann <soren.brinkmann@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+#define ARMPLL_CTRL_OFFS 0x100
+#define DDRPLL_CTRL_OFFS 0x104
+#define PLLSTATUS_OFFS 0x10c
+#define DDR_CLK_CTRL_OFFS 0x124
+#define DCI_CLK_CTRL_OFFS 0x128
+#define MODE_STS_OFFS 0x54
+
+#define PLL_RESET_MASK 1
+#define PLL_PWRDWN_MASK (1 << 1)
+#define PLL_BYPASS_MASK (1 << 4)
+#define DCICLK_ENABLE_MASK 1
+#define DDRCLK_ENABLE_MASK 3
+#define ARM_LOCK_MASK (1 << 0)
+#define DDR_LOCK_MASK (1 << 1)
+#define DDRC_STATUS_MASK 7
+
+#define DDRC_OPMODE_SR 3
+#define MAXTRIES 100
+
+ .text
+ .align 3
+
+/**
+ * zynq_sys_suspend - Enter suspend
+ * @ddrc_base: Base address of the DDRC
+ * @slcr_base: Base address of the SLCR
+ * Returns -1 if DRAM subsystem is not gated off, 0 otherwise.
+ *
+ * This function is moved into OCM and finishes the suspend operation. I.e. DDR
+ * related clocks are gated off and the DDR PLL is bypassed.
+ */
+ENTRY(zynq_sys_suspend)
+ push {r4 - r7}
+
+ /* Check DDRC is in self-refresh mode */
+ ldr r2, [r0, #MODE_STS_OFFS]
+ and r2, #DDRC_STATUS_MASK
+ cmp r2, #DDRC_OPMODE_SR
+ movweq r3, #0xff00
+ bne suspend
+
+ mov r3, #MAXTRIES
+ movw r4, #0xfff0
+ movt r4, #0x1f
+ /* Wait for command queue empty */
+1: subs r3, #1
+ movweq r3, #0xff00
+ beq suspend
+ dsb sy
+ ldr r2, [r0, #MODE_STS_OFFS]
+ ands r2, r4
+ bne 1b
+
+ dsb sy
+
+ /*
+ * Wait for DDRC pipeline/queues to drain.
+ * We should wait ~40 DDR cycles. DDR is still at full speed while the
+ * CPU might already run in PLL bypass mode. The fastest speed the CPU
+ * runs at is ~1 GHz ~ 2 * DDR speed.
+ */
+ mov r3, #160
+1: nop
+ subs r3, #1
+ bne 1b
+
+ dsb sy
+
+ /* read back CAM status once more */
+ ldr r2, [r0, #MODE_STS_OFFS]
+ ands r2, r4
+ movwne r3, #0xff00
+ bne suspend
+
+ /* Stop DDR clocks */
+ ldr r2, [r1, #DDR_CLK_CTRL_OFFS]
+ bic r2, #DDRCLK_ENABLE_MASK
+ str r2, [r1, #DDR_CLK_CTRL_OFFS]
+
+ dmb st
+
+ ldr r2, [r1, #DCI_CLK_CTRL_OFFS]
+ bic r2, #DCICLK_ENABLE_MASK
+ str r2, [r1, #DCI_CLK_CTRL_OFFS]
+
+ dmb st
+
+ /* Bypass and powerdown DDR PLL */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ orr r2, #PLL_BYPASS_MASK
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+ orr r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+
+ /* Bypass and powerdown ARM PLL */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ orr r2, #PLL_BYPASS_MASK
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+ orr r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+
+suspend:
+ dsb sy
+ wfi
+ dsb sy
+ cmp r3, #0xff00
+ moveq r0, #-1
+ beq exit
+
+ /* Power up ARM PLL */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ bic r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+ /* wait for lock */
+1: ldr r2, [r1, #PLLSTATUS_OFFS]
+ ands r2, #ARM_LOCK_MASK
+ beq 1b
+
+ dsb sy
+
+ /* Disable ARM PLL bypass */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ bic r2, #PLL_BYPASS_MASK
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+
+ dmb st
+
+ /* Power up DDR PLL */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ bic r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+ /* wait for lock */
+1: ldr r2, [r1, #PLLSTATUS_OFFS]
+ ands r2, #DDR_LOCK_MASK
+ beq 1b
+
+ dsb sy
+
+ /* Disable DDR PLL bypass */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ bic r2, #PLL_BYPASS_MASK
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+
+ dmb st
+
+ /* Start DDR clocks */
+ ldr r2, [r1, #DCI_CLK_CTRL_OFFS]
+ orr r2, #DCICLK_ENABLE_MASK
+ str r2, [r1, #DCI_CLK_CTRL_OFFS]
+
+ dmb st
+
+ ldr r2, [r1, #DDR_CLK_CTRL_OFFS]
+ orr r2, #DDRCLK_ENABLE_MASK
+ str r2, [r1, #DDR_CLK_CTRL_OFFS]
+
+ dsb sy
+
+ mov r0, #0
+exit: pop {r4 - r7}
+ bx lr
+
+ENTRY(zynq_sys_suspend_sz)
+ .word . - zynq_sys_suspend
+
+ ENDPROC(zynq_sys_suspend)
diff --git a/arch/arm/mach-zynq/zynq_ocm.c b/arch/arm/mach-zynq/zynq_ocm.c
new file mode 100644
index 000000000000..324b7c125bf5
--- /dev/null
+++ b/arch/arm/mach-zynq/zynq_ocm.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2013 Xilinx
+ *
+ * Based on "Generic on-chip SRAM allocation driver"
+ *
+ * Copyright (C) 2012 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/genalloc.h>
+
+#include "common.h"
+
+#define ZYNQ_OCM_HIGHADDR 0xfffc0000
+#define ZYNQ_OCM_LOWADDR 0x0
+#define ZYNQ_OCM_BLOCK_SIZE 0x10000
+#define ZYNQ_OCM_BLOCKS 4
+#define ZYNQ_OCM_GRANULARITY 32
+
+#define ZYNQ_OCM_PARITY_CTRL 0x0
+#define ZYNQ_OCM_PARITY_ENABLE 0x1e
+
+#define ZYNQ_OCM_PARITY_ERRADDRESS 0x4
+
+#define ZYNQ_OCM_IRQ_STS 0x8
+#define ZYNQ_OCM_IRQ_STS_ERR_MASK 0x7
+
+struct zynq_ocm_dev {
+ void __iomem *base;
+ int irq;
+ struct gen_pool *pool;
+ struct resource res[ZYNQ_OCM_BLOCKS];
+};
+
+/**
+ * zynq_ocm_irq_handler - Interrupt service routine of the OCM controller
+ * @irq: IRQ number
+ * @data: Pointer to the zynq_ocm_dev structure
+ *
+ * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t zynq_ocm_irq_handler(int irq, void *data)
+{
+ u32 sts;
+ u32 err_addr;
+ struct zynq_ocm_dev *zynq_ocm = data;
+
+ /* check status */
+ sts = readl(zynq_ocm->base + ZYNQ_OCM_IRQ_STS);
+ if (sts & ZYNQ_OCM_IRQ_STS_ERR_MASK) {
+ /* check error address */
+ err_addr = readl(zynq_ocm->base + ZYNQ_OCM_PARITY_ERRADDRESS);
+ pr_err("%s: OCM err intr generated at 0x%04x (stat: 0x%08x).",
+ __func__, err_addr, sts & ZYNQ_OCM_IRQ_STS_ERR_MASK);
+ return IRQ_HANDLED;
+ }
+ pr_warn("%s: Interrupt generated by OCM, but no error is found.",
+ __func__);
+
+ return IRQ_NONE;
+}
+
+/**
+ * zynq_ocm_probe - Probe method for the OCM driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_ocm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct zynq_ocm_dev *zynq_ocm;
+ u32 i, ocm_config, curr;
+ struct resource *res;
+
+ ocm_config = zynq_slcr_get_ocm_config();
+
+ zynq_ocm = devm_kzalloc(&pdev->dev, sizeof(*zynq_ocm), GFP_KERNEL);
+ if (!zynq_ocm)
+ return -ENOMEM;
+
+ zynq_ocm->pool = devm_gen_pool_create(&pdev->dev,
+ ilog2(ZYNQ_OCM_GRANULARITY),
+ NUMA_NO_NODE, NULL);
+ if (!zynq_ocm->pool)
+ return -ENOMEM;
+
+ curr = 0; /* For storing current struct resource for OCM */
+ for (i = 0; i < ZYNQ_OCM_BLOCKS; i++) {
+ u32 base, start, end;
+
+ /* Setup base address for 64kB OCM block */
+ if (ocm_config & BIT(i))
+ base = ZYNQ_OCM_HIGHADDR;
+ else
+ base = ZYNQ_OCM_LOWADDR;
+
+ /* Calculate start and end block addresses */
+ start = i * ZYNQ_OCM_BLOCK_SIZE + base;
+ end = start + (ZYNQ_OCM_BLOCK_SIZE - 1);
+
+ /* Concatenate OCM blocks together to get bigger pool */
+ if (i > 0 && start == (zynq_ocm->res[curr - 1].end + 1)) {
+ zynq_ocm->res[curr - 1].end = end;
+ } else {
+#ifdef CONFIG_SMP
+ /*
+ * OCM block if placed at 0x0 has special meaning
+ * for SMP because jump trampoline is added there.
+ * Ensure that this address won't be allocated.
+ */
+ if (!base) {
+ u32 trampoline_code_size =
+ &zynq_secondary_trampoline_end -
+ &zynq_secondary_trampoline;
+ dev_dbg(&pdev->dev,
+ "Allocate reset vector table %dB\n",
+ trampoline_code_size);
+ /* postpone start offset */
+ start += trampoline_code_size;
+ }
+#endif
+ /* First resource is always initialized */
+ zynq_ocm->res[curr].start = start;
+ zynq_ocm->res[curr].end = end;
+ zynq_ocm->res[curr].flags = IORESOURCE_MEM;
+ curr++; /* Increment curr value */
+ }
+ dev_dbg(&pdev->dev, "OCM block %d, start %x, end %x\n",
+ i, start, end);
+ }
+
+ /*
+ * Separate pool allocation from OCM block detection to ensure
+ * the biggest possible pool.
+ */
+ for (i = 0; i < ZYNQ_OCM_BLOCKS; i++) {
+ unsigned long size;
+ void __iomem *virt_base;
+
+ /* Skip all zero size resources */
+ if (zynq_ocm->res[i].end == 0)
+ break;
+ dev_dbg(&pdev->dev, "OCM resources %d, start %x, end %x\n",
+ i, zynq_ocm->res[i].start, zynq_ocm->res[i].end);
+ size = resource_size(&zynq_ocm->res[i]);
+ virt_base = devm_ioremap_resource(&pdev->dev,
+ &zynq_ocm->res[i]);
+ if (IS_ERR(virt_base))
+ return PTR_ERR(virt_base);
+
+ ret = gen_pool_add_virt(zynq_ocm->pool,
+ (unsigned long)virt_base,
+ zynq_ocm->res[i].start, size, -1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Gen pool failed\n");
+ return ret;
+ }
+ dev_info(&pdev->dev, "ZYNQ OCM pool: %ld KiB @ 0x%p\n",
+ size / 1024, virt_base);
+ }
+
+ /* Get OCM config space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ zynq_ocm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(zynq_ocm->base))
+ return PTR_ERR(zynq_ocm->base);
+
+ /* Allocate OCM parity IRQ */
+ zynq_ocm->irq = platform_get_irq(pdev, 0);
+ if (zynq_ocm->irq < 0) {
+ dev_err(&pdev->dev, "irq resource not found\n");
+ return zynq_ocm->irq;
+ }
+ ret = devm_request_irq(&pdev->dev, zynq_ocm->irq, zynq_ocm_irq_handler,
+ 0, pdev->name, zynq_ocm);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ return ret;
+ }
+
+ /* Enable parity errors */
+ writel(ZYNQ_OCM_PARITY_ENABLE, zynq_ocm->base + ZYNQ_OCM_PARITY_CTRL);
+
+ platform_set_drvdata(pdev, zynq_ocm);
+
+ return 0;
+}
+
+/**
+ * zynq_ocm_remove - Remove method for the OCM driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_ocm_remove(struct platform_device *pdev)
+{
+ struct zynq_ocm_dev *zynq_ocm = platform_get_drvdata(pdev);
+
+ if (gen_pool_avail(zynq_ocm->pool) < gen_pool_size(zynq_ocm->pool))
+ dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
+
+ return 0;
+}
+
+static struct of_device_id zynq_ocm_dt_ids[] = {
+ { .compatible = "xlnx,zynq-ocmc-1.0" },
+ { /* end of table */ }
+};
+
+static struct platform_driver zynq_ocm_driver = {
+ .driver = {
+ .name = "zynq-ocm",
+ .of_match_table = zynq_ocm_dt_ids,
+ },
+ .probe = zynq_ocm_probe,
+ .remove = zynq_ocm_remove,
+};
+
+static int __init zynq_ocm_init(void)
+{
+ return platform_driver_register(&zynq_ocm_driver);
+}
+
+arch_initcall(zynq_ocm_init);
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 1fc6c4810a5c..52ff66f004e4 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -799,7 +799,7 @@ static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- union offset_union uninitialized_var(offset);
+ union offset_union offset;
unsigned long instrptr;
int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
unsigned int type;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 27576c7b836e..fbfb9250e743 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2332,15 +2332,15 @@ void arch_teardown_dma_ops(struct device *dev)
}
#ifdef CONFIG_SWIOTLB
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 2d417867d46f..c276474cc824 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -124,7 +124,7 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
show_pte(KERN_ALERT, mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
/*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 463cbb0631be..5becec790379 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -320,7 +320,11 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
+#ifdef CONFIG_ARM_LPAE
+ .prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+#else
.prot_sect = PMD_TYPE_SECT,
+#endif
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 24ecf8d30a1e..74a24e6e9a2d 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -26,6 +26,13 @@
unsigned long vectors_base;
+/*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+ */
+struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
#endif
@@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void)
*/
void __init paging_init(const struct machine_desc *mdesc)
{
+ void *zero_page;
+
early_trap_init((void *)vectors_base);
mpu_setup();
+
+ /* allocate the zero page. */
+ zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
bootmem_init();
+
+ empty_zero_page = virt_to_page(zero_page);
+ flush_dcache_page(empty_zero_page);
}
/*
diff --git a/arch/arm/nwfpe/Makefile b/arch/arm/nwfpe/Makefile
index 303400fa2cdf..2aec85ab1e8b 100644
--- a/arch/arm/nwfpe/Makefile
+++ b/arch/arm/nwfpe/Makefile
@@ -11,3 +11,9 @@ nwfpe-y += fpa11.o fpa11_cpdo.o fpa11_cpdt.o \
entry.o
nwfpe-$(CONFIG_FPE_NWFPE_XP) += extended_cpdo.o
+
+# Try really hard to avoid generating calls to __aeabi_uldivmod() from
+# float64_rem() due to loop elision.
+ifdef CONFIG_CC_IS_CLANG
+CFLAGS_softfloat.o += -mllvm -replexitval=never
+endif
diff --git a/arch/arm/probes/kprobes/checkers-common.c b/arch/arm/probes/kprobes/checkers-common.c
index 4d720990cf2a..eba7ac4725c0 100644
--- a/arch/arm/probes/kprobes/checkers-common.c
+++ b/arch/arm/probes/kprobes/checkers-common.c
@@ -40,7 +40,7 @@ enum probes_insn checker_stack_use_imm_0xx(probes_opcode_t insn,
* Different from other insn uses imm8, the real addressing offset of
* STRD in T32 encoding should be imm8 * 4. See ARMARM description.
*/
-enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
+static enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
struct arch_probes_insn *asi,
const struct decode_header *h)
{
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index 0a783bd4641c..44b5f7dbcc00 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -231,7 +231,7 @@ singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
* kprobe, and that level is reserved for user kprobe handlers, so we can't
* risk encountering a new kprobe in an interrupt handler.
*/
-void __kprobes kprobe_handler(struct pt_regs *regs)
+static void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur;
struct kprobe_ctlblk *kcb;
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index c78180172120..e20304f1d8bc 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -145,8 +145,6 @@ __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
}
}
-extern void kprobe_handler(struct pt_regs *regs);
-
static void
optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
{
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index c562832b8627..171c7076b89f 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -720,7 +720,7 @@ static const char coverage_register_lookup[16] = {
[REG_TYPE_NOSPPCX] = COVERAGE_ANY_REG | COVERAGE_SP,
};
-unsigned coverage_start_registers(const struct decode_header *h)
+static unsigned coverage_start_registers(const struct decode_header *h)
{
unsigned regs = 0;
int i;
diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h
index 19a5b2add41e..805116c2ec27 100644
--- a/arch/arm/probes/kprobes/test-core.h
+++ b/arch/arm/probes/kprobes/test-core.h
@@ -453,3 +453,7 @@ void kprobe_thumb32_test_cases(void);
#else
void kprobe_arm_test_cases(void);
#endif
+
+void __kprobes_test_case_start(void);
+void __kprobes_test_case_end_16(void);
+void __kprobes_test_case_end_32(void);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 57dfc13b2752..c4d1cac0fe32 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -354,7 +354,8 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/
- xen_vcpu_info = alloc_percpu(struct vcpu_info);
+ xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
+ 1 << fls(sizeof(struct vcpu_info) - 1));
if (xen_vcpu_info == NULL)
return -ENOMEM;
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 38fa917c8585..a6a2514e5fe8 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -70,20 +70,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
* pfn_valid returns true the pages is local and we can use the native
* dma-direct functions, otherwise we call the Xen specific version.
*/
-void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
- arch_sync_dma_for_cpu(dev, paddr, size, dir);
+ arch_sync_dma_for_cpu(paddr, size, dir);
else if (dir != DMA_TO_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
}
-void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
- arch_sync_dma_for_device(dev, paddr, size, dir);
+ arch_sync_dma_for_device(paddr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
else
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6b73143f0cf8..384b1bf56667 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -574,6 +574,22 @@ config ARM64_ERRATUM_1542419
If unsure, say Y.
+config ARM64_ERRATUM_1742098
+ bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
+ depends on COMPAT
+ default y
+ help
+ This option removes the AES hwcap for aarch32 user-space to
+ workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
+
+ Affected parts may corrupt the AES state if an interrupt is
+ taken between a pair of AES instructions. These instructions
+ are only present if the cryptography extensions are present.
+ All software should have a fallback implementation for CPUs
+ that don't implement the cryptography extensions.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 90202e5608d1..b0b2dee8525f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -296,6 +296,8 @@ config ARCH_ZX
config ARCH_ZYNQMP
bool "Xilinx ZynqMP Family"
+ select PINCTRL
+ select PINCTRL_ZYNQMP
select ZYNQMP_FIRMWARE
help
This enables support for Xilinx ZynqMP Family
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
index a0db02504b69..963a7c505e30 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
@@ -78,7 +78,7 @@
leds {
compatible = "gpio-leds";
- status {
+ led-0 {
label = "orangepi:green:status";
gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
};
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
index 2c8c2b322c72..33f1fb9fd161 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
@@ -129,7 +129,7 @@
status = "okay";
clock-frequency = <100000>;
i2c-sda-falling-time-ns = <890>; /* hcnt */
- i2c-sdl-falling-time-ns = <890>; /* lcnt */
+ i2c-scl-falling-time-ns = <890>; /* lcnt */
adc@14 {
compatible = "lltc,ltc2497";
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index 502c4ac45c29..72255898081c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -150,7 +150,7 @@
scpi_clocks: clocks {
compatible = "arm,scpi-clocks";
- scpi_dvfs: clock-controller {
+ scpi_dvfs: clocks-0 {
compatible = "arm,scpi-dvfs-clocks";
#clock-cells = <1>;
clock-indices = <0>;
@@ -159,7 +159,7 @@
};
scpi_sensors: sensors {
- compatible = "amlogic,meson-gxbb-scpi-sensors";
+ compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors";
#thermal-sensor-cells = <1>;
};
};
@@ -1705,7 +1705,7 @@
sd_emmc_b: sd@5000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0x5000 0x0 0x800>;
- interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_B>,
<&clkc CLKID_SD_EMMC_B_CLK0>,
@@ -1717,7 +1717,7 @@
sd_emmc_c: mmc@7000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0x7000 0x0 0x800>;
- interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_C>,
<&clkc CLKID_SD_EMMC_C_CLK0>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index d2d255a988a8..a31b623fedb7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -1376,10 +1376,9 @@
dmc: bus@38000 {
compatible = "simple-bus";
- reg = <0x0 0x38000 0x0 0x400>;
#address-cells = <2>;
#size-cells = <2>;
- ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>;
+ ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>;
canvas: video-lut@48 {
compatible = "amlogic,canvas";
@@ -1783,7 +1782,7 @@
#address-cells = <1>;
#size-cells = <0>;
- internal_ephy: ethernet_phy@8 {
+ internal_ephy: ethernet-phy@8 {
compatible = "ethernet-phy-id0180.3301",
"ethernet-phy-ieee802.3-c22";
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
@@ -2317,7 +2316,7 @@
sd_emmc_a: sd@ffe03000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe03000 0x0 0x800>;
- interrupts = <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_A>,
<&clkc CLKID_SD_EMMC_A_CLK0>,
@@ -2329,7 +2328,7 @@
sd_emmc_b: sd@ffe05000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe05000 0x0 0x800>;
- interrupts = <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_B>,
<&clkc CLKID_SD_EMMC_B_CLK0>,
@@ -2341,7 +2340,7 @@
sd_emmc_c: mmc@ffe07000 {
compatible = "amlogic,meson-axg-mmc";
reg = <0x0 0xffe07000 0x0 0x800>;
- interrupts = <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_C>,
<&clkc CLKID_SD_EMMC_C_CLK0>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index eb5d177d7a99..c8c438c0b429 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -54,26 +54,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <731000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <731000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <731000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <666666666>;
- opp-microvolt = <731000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index ad7bc0eec668..e9f9ddd27ad7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -172,7 +172,7 @@
reg = <0x14 0x10>;
};
- eth_mac: eth_mac@34 {
+ eth_mac: eth-mac@34 {
reg = <0x34 0x10>;
};
@@ -189,7 +189,7 @@
scpi_clocks: clocks {
compatible = "arm,scpi-clocks";
- scpi_dvfs: scpi_clocks@0 {
+ scpi_dvfs: clocks-0 {
compatible = "arm,scpi-dvfs-clocks";
#clock-cells = <1>;
clock-indices = <0>;
@@ -464,7 +464,7 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
- hwrng: rng {
+ hwrng: rng@0 {
compatible = "amlogic,meson-rng";
reg = <0x0 0x0 0x0 0x4>;
};
@@ -528,21 +528,21 @@
sd_emmc_a: mmc@70000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x70000 0x0 0x800>;
- interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sd_emmc_b: mmc@72000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x72000 0x0 0x800>;
- interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sd_emmc_c: mmc@74000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
reg = <0x0 0x74000 0x0 0x800>;
- interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
index b5667f1fb2c8..22fb3e324da5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
@@ -18,7 +18,7 @@
leds {
compatible = "gpio-leds";
- status {
+ led {
label = "n1:white:status";
gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index e3cfa24dca5a..6809f495a503 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -700,7 +700,7 @@
};
};
- eth-phy-mux {
+ eth-phy-mux@55c {
compatible = "mdio-mux-mmioreg", "mdio-mux";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index 93b44efdbc52..35a60b0d3a4f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -585,7 +585,7 @@
#define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0
#define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0
#define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0
-#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0
+#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1
#define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0
#define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0
#define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 7b178a77cc71..304399686c5a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -838,10 +838,10 @@
clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>;
};
- gpmi: nand-controller@33002000{
+ gpmi: nand-controller@33002000 {
compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 546511b373d4..31c017736a05 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -695,7 +695,7 @@
gpmi: nand-controller@33002000 {
compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index 2e8239d489f8..c46b82e2297c 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -122,9 +122,12 @@
/delete-property/ mrvl,i2c-fast-mode;
status = "okay";
+ /* MCP7940MT-I/MNY RTC */
rtc@6f {
compatible = "microchip,mcp7940x";
reg = <0x6f>;
+ interrupt-parent = <&gpiosb>;
+ interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 2b91daf5c1a6..45e37aa67ce7 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -26,14 +26,14 @@
stdout-path = "serial0:921600n8";
};
- cpus_fixed_vproc0: fixedregulator@0 {
+ cpus_fixed_vproc0: regulator-vproc-buck0 {
compatible = "regulator-fixed";
regulator-name = "vproc_buck0";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
};
- cpus_fixed_vproc1: fixedregulator@1 {
+ cpus_fixed_vproc1: regulator-vproc-buck1 {
compatible = "regulator-fixed";
regulator-name = "vproc_buck1";
regulator-min-microvolt = <1000000>;
@@ -50,7 +50,7 @@
id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>;
};
- usb_p0_vbus: regulator@2 {
+ usb_p0_vbus: regulator-usb-p0-vbus {
compatible = "regulator-fixed";
regulator-name = "p0_vbus";
regulator-min-microvolt = <5000000>;
@@ -59,7 +59,7 @@
enable-active-high;
};
- usb_p1_vbus: regulator@3 {
+ usb_p1_vbus: regulator-usb-p1-vbus {
compatible = "regulator-fixed";
regulator-name = "p1_vbus";
regulator-min-microvolt = <5000000>;
@@ -68,7 +68,7 @@
enable-active-high;
};
- usb_p2_vbus: regulator@4 {
+ usb_p2_vbus: regulator-usb-p2-vbus {
compatible = "regulator-fixed";
regulator-name = "p2_vbus";
regulator-min-microvolt = <5000000>;
@@ -77,7 +77,7 @@
enable-active-high;
};
- usb_p3_vbus: regulator@5 {
+ usb_p3_vbus: regulator-usb-p3-vbus {
compatible = "regulator-fixed";
regulator-name = "p3_vbus";
regulator-min-microvolt = <5000000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 43307bad3f0d..3b12bb313dcd 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -160,70 +160,70 @@
#clock-cells = <0>;
};
- clk26m: oscillator@0 {
+ clk26m: oscillator-26m {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <26000000>;
clock-output-names = "clk26m";
};
- clk32k: oscillator@1 {
+ clk32k: oscillator-32k {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
clock-output-names = "clk32k";
};
- clkfpc: oscillator@2 {
+ clkfpc: oscillator-50m {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
clock-output-names = "clkfpc";
};
- clkaud_ext_i_0: oscillator@3 {
+ clkaud_ext_i_0: oscillator-aud0 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <6500000>;
clock-output-names = "clkaud_ext_i_0";
};
- clkaud_ext_i_1: oscillator@4 {
+ clkaud_ext_i_1: oscillator-aud1 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <196608000>;
clock-output-names = "clkaud_ext_i_1";
};
- clkaud_ext_i_2: oscillator@5 {
+ clkaud_ext_i_2: oscillator-aud2 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <180633600>;
clock-output-names = "clkaud_ext_i_2";
};
- clki2si0_mck_i: oscillator@6 {
+ clki2si0_mck_i: oscillator-i2s0 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <30000000>;
clock-output-names = "clki2si0_mck_i";
};
- clki2si1_mck_i: oscillator@7 {
+ clki2si1_mck_i: oscillator-i2s1 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <30000000>;
clock-output-names = "clki2si1_mck_i";
};
- clki2si2_mck_i: oscillator@8 {
+ clki2si2_mck_i: oscillator-i2s2 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <30000000>;
clock-output-names = "clki2si2_mck_i";
};
- clktdmin_mclk_i: oscillator@9 {
+ clktdmin_mclk_i: oscillator-mclk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <30000000>;
@@ -266,7 +266,7 @@
reg = <0 0x10005000 0 0x1000>;
};
- pio: pinctrl@10005000 {
+ pio: pinctrl@1000b000 {
compatible = "mediatek,mt2712-pinctrl";
reg = <0 0x1000b000 0 0x1000>;
mediatek,pctl-regmap = <&syscfg_pctl_a>;
diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
index 2b2a69c7567f..d4c78c9672ff 100644
--- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
@@ -102,7 +102,7 @@
};
};
- clk26m: oscillator@0 {
+ clk26m: oscillator-26m {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <26000000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index 83e10591e0e5..eec9ec1db682 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -49,7 +49,7 @@
wps {
label = "wps";
linux,code = <KEY_WPS_BUTTON>;
- gpios = <&pio 102 GPIO_ACTIVE_HIGH>;
+ gpios = <&pio 102 GPIO_ACTIVE_LOW>;
};
};
@@ -69,8 +69,9 @@
};
};
- memory {
+ memory@40000000 {
reg = <0 0x40000000 0 0x40000000>;
+ device_type = "memory";
};
reg_1p8v: regulator-1p8v {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index 3f783348c66a..ee57fccd489a 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -55,8 +55,9 @@
};
};
- memory {
+ memory@40000000 {
reg = <0 0x40000000 0 0x20000000>;
+ device_type = "memory";
};
reg_1p8v: regulator-1p8v {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index e7e002d8b108..3bfe9f5d2a14 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -428,6 +428,7 @@
pwm: pwm@11006000 {
compatible = "mediatek,mt7622-pwm";
reg = <0 0x11006000 0 0x1000>;
+ #pwm-cells = <2>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
clocks = <&topckgen CLK_TOP_PWM_SEL>,
<&pericfg CLK_PERI_PWM_PD>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 6dffada2e66b..2b66afcf026e 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -43,7 +43,7 @@
id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
};
- usb_p1_vbus: regulator@0 {
+ usb_p1_vbus: regulator-usb-p1 {
compatible = "regulator-fixed";
regulator-name = "usb_vbus";
regulator-min-microvolt = <5000000>;
@@ -52,7 +52,7 @@
enable-active-high;
};
- usb_p0_vbus: regulator@1 {
+ usb_p0_vbus: regulator-usb-p0 {
compatible = "regulator-fixed";
regulator-name = "vbus";
regulator-min-microvolt = <5000000>;
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index dba3488492f1..21e029afb27b 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -77,425 +77,427 @@
enable-gpios = <&pm8994_gpios 15 0>;
};
};
+};
- soc {
- serial@7570000 {
- label = "BT-UART";
- status = "okay";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp1_uart1_default>;
- pinctrl-1 = <&blsp1_uart1_sleep>;
+&blsp1_uart1 {
+ label = "BT-UART";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
- bluetooth {
- compatible = "qcom,qca6174-bt";
+ bluetooth {
+ compatible = "qcom,qca6174-bt";
- /* bt_disable_n gpio */
- enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+ /* bt_disable_n gpio */
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
- clocks = <&divclk4>;
- };
- };
+ clocks = <&divclk4>;
+ };
+};
- serial@75b0000 {
- label = "LS-UART1";
- status = "okay";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_uart1_2pins_default>;
- pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
- };
+&blsp2_uart1 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart1_2pins_default>;
+ pinctrl-1 = <&blsp2_uart1_2pins_sleep>;
+};
- serial@75b1000 {
- label = "LS-UART0";
- status = "disabled";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_uart2_4pins_default>;
- pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
- };
+&blsp2_uart2 {
+ label = "LS-UART0";
+ status = "disabled";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart2_4pins_default>;
+ pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
+};
- i2c@7577000 {
- /* On Low speed expansion */
- label = "LS-I2C0";
- status = "okay";
- };
+&blsp1_i2c2 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+};
- i2c@75b6000 {
- /* On Low speed expansion */
- label = "LS-I2C1";
- status = "okay";
- };
+&blsp2_i2c1 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+};
- spi@7575000 {
- /* On Low speed expansion */
- label = "LS-SPI0";
- status = "okay";
- };
+&blsp1_spi0 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+};
- i2c@75b5000 {
- /* On High speed expansion */
- label = "HS-I2C2";
- status = "okay";
- };
+&blsp2_i2c0 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+};
- spi@75ba000{
- /* On High speed expansion */
- label = "HS-SPI1";
- status = "okay";
- };
+&blsp2_spi5 {
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+};
- sdhci@74a4900 {
- /* External SD card */
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
- cd-gpios = <&msmgpio 38 0x1>;
- vmmc-supply = <&pm8994_l21>;
- vqmmc-supply = <&pm8994_l13>;
- status = "okay";
- };
+&camss {
+ vdda-supply = <&pm8994_l2>;
+};
- phy@627000 {
- status = "okay";
- };
+&sdhc2 {
+ /* External SD card */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ cd-gpios = <&msmgpio 38 0x1>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+ status = "okay";
+};
- ufshc@624000 {
- status = "okay";
- };
+&ufsphy {
+ status = "okay";
- pinctrl@1010000 {
- gpio-line-names =
- "[SPI0_DOUT]", /* GPIO_0, BLSP1_SPI_MOSI, LSEC pin 14 */
- "[SPI0_DIN]", /* GPIO_1, BLSP1_SPI_MISO, LSEC pin 10 */
- "[SPI0_CS]", /* GPIO_2, BLSP1_SPI_CS_N, LSEC pin 12 */
- "[SPI0_SCLK]", /* GPIO_3, BLSP1_SPI_CLK, LSEC pin 8 */
- "[UART1_TxD]", /* GPIO_4, BLSP8_UART_TX, LSEC pin 11 */
- "[UART1_RxD]", /* GPIO_5, BLSP8_UART_RX, LSEC pin 13 */
- "[I2C1_SDA]", /* GPIO_6, BLSP8_I2C_SDA, LSEC pin 21 */
- "[I2C1_SCL]", /* GPIO_7, BLSP8_I2C_SCL, LSEC pin 19 */
- "GPIO-H", /* GPIO_8, LCD0_RESET_N, LSEC pin 30 */
- "TP93", /* GPIO_9 */
- "GPIO-G", /* GPIO_10, MDP_VSYNC_P, LSEC pin 29 */
- "[MDP_VSYNC_S]", /* GPIO_11, S HSEC pin 55 */
- "NC", /* GPIO_12 */
- "[CSI0_MCLK]", /* GPIO_13, CAM_MCLK0, P HSEC pin 15 */
- "[CAM_MCLK1]", /* GPIO_14, J14 pin 11 */
- "[CSI1_MCLK]", /* GPIO_15, CAM_MCLK2, P HSEC pin 17 */
- "TP99", /* GPIO_16 */
- "[I2C2_SDA]", /* GPIO_17, CCI_I2C_SDA0, P HSEC pin 34 */
- "[I2C2_SCL]", /* GPIO_18, CCI_I2C_SCL0, P HSEC pin 32 */
- "[CCI_I2C_SDA1]", /* GPIO_19, S HSEC pin 38 */
- "[CCI_I2C_SCL1]", /* GPIO_20, S HSEC pin 36 */
- "FLASH_STROBE_EN", /* GPIO_21, S HSEC pin 5 */
- "FLASH_STROBE_TRIG", /* GPIO_22, S HSEC pin 1 */
- "GPIO-K", /* GPIO_23, CAM2_RST_N, LSEC pin 33 */
- "GPIO-D", /* GPIO_24, LSEC pin 26 */
- "GPIO-I", /* GPIO_25, CAM0_RST_N, LSEC pin 31 */
- "GPIO-J", /* GPIO_26, CAM0_STANDBY_N, LSEC pin 32 */
- "BLSP6_I2C_SDA", /* GPIO_27 */
- "BLSP6_I2C_SCL", /* GPIO_28 */
- "GPIO-B", /* GPIO_29, TS0_RESET_N, LSEC pin 24 */
- "GPIO30", /* GPIO_30, S HSEC pin 4 */
- "HDMI_CEC", /* GPIO_31 */
- "HDMI_DDC_CLOCK", /* GPIO_32 */
- "HDMI_DDC_DATA", /* GPIO_33 */
- "HDMI_HOT_PLUG_DETECT", /* GPIO_34 */
- "PCIE0_RST_N", /* GPIO_35 */
- "PCIE0_CLKREQ_N", /* GPIO_36 */
- "PCIE0_WAKE", /* GPIO_37 */
- "SD_CARD_DET_N", /* GPIO_38 */
- "TSIF1_SYNC", /* GPIO_39, S HSEC pin 48 */
- "W_DISABLE_N", /* GPIO_40 */
- "[BLSP9_UART_TX]", /* GPIO_41 */
- "[BLSP9_UART_RX]", /* GPIO_42 */
- "[BLSP2_UART_CTS_N]", /* GPIO_43 */
- "[BLSP2_UART_RFR_N]", /* GPIO_44 */
- "[BLSP3_UART_TX]", /* GPIO_45 */
- "[BLSP3_UART_RX]", /* GPIO_46 */
- "[I2C0_SDA]", /* GPIO_47, LS_I2C0_SDA, LSEC pin 17 */
- "[I2C0_SCL]", /* GPIO_48, LS_I2C0_SCL, LSEC pin 15 */
- "[UART0_TxD]", /* GPIO_49, BLSP9_UART_TX, LSEC pin 5 */
- "[UART0_RxD]", /* GPIO_50, BLSP9_UART_RX, LSEC pin 7 */
- "[UART0_CTS]", /* GPIO_51, BLSP9_UART_CTS_N, LSEC pin 3 */
- "[UART0_RTS]", /* GPIO_52, BLSP9_UART_RFR_N, LSEC pin 9 */
- "[CODEC_INT1_N]", /* GPIO_53 */
- "[CODEC_INT2_N]", /* GPIO_54 */
- "[BLSP7_I2C_SDA]", /* GPIO_55 */
- "[BLSP7_I2C_SCL]", /* GPIO_56 */
- "MI2S_MCLK", /* GPIO_57, S HSEC pin 3 */
- "[PCM_CLK]", /* GPIO_58, QUA_MI2S_SCK, LSEC pin 18 */
- "[PCM_FS]", /* GPIO_59, QUA_MI2S_WS, LSEC pin 16 */
- "[PCM_DO]", /* GPIO_60, QUA_MI2S_DATA0, LSEC pin 20 */
- "[PCM_DI]", /* GPIO_61, QUA_MI2S_DATA1, LSEC pin 22 */
- "GPIO-E", /* GPIO_62, LSEC pin 27 */
- "TP87", /* GPIO_63 */
- "[CODEC_RST_N]", /* GPIO_64 */
- "[PCM1_CLK]", /* GPIO_65 */
- "[PCM1_SYNC]", /* GPIO_66 */
- "[PCM1_DIN]", /* GPIO_67 */
- "[PCM1_DOUT]", /* GPIO_68 */
- "AUDIO_REF_CLK", /* GPIO_69 */
- "SLIMBUS_CLK", /* GPIO_70 */
- "SLIMBUS_DATA0", /* GPIO_71 */
- "SLIMBUS_DATA1", /* GPIO_72 */
- "NC", /* GPIO_73 */
- "NC", /* GPIO_74 */
- "NC", /* GPIO_75 */
- "NC", /* GPIO_76 */
- "TP94", /* GPIO_77 */
- "NC", /* GPIO_78 */
- "TP95", /* GPIO_79 */
- "GPIO-A", /* GPIO_80, MEMS_RESET_N, LSEC pin 23 */
- "TP88", /* GPIO_81 */
- "TP89", /* GPIO_82 */
- "TP90", /* GPIO_83 */
- "TP91", /* GPIO_84 */
- "[SD_DAT0]", /* GPIO_85, BLSP12_SPI_MOSI, P HSEC pin 1 */
- "[SD_CMD]", /* GPIO_86, BLSP12_SPI_MISO, P HSEC pin 11 */
- "[SD_DAT3]", /* GPIO_87, BLSP12_SPI_CS_N, P HSEC pin 7 */
- "[SD_SCLK]", /* GPIO_88, BLSP12_SPI_CLK, P HSEC pin 9 */
- "TSIF1_CLK", /* GPIO_89, S HSEC pin 42 */
- "TSIF1_EN", /* GPIO_90, S HSEC pin 46 */
- "TSIF1_DATA", /* GPIO_91, S HSEC pin 44 */
- "NC", /* GPIO_92 */
- "TSIF2_CLK", /* GPIO_93, S HSEC pin 52 */
- "TSIF2_EN", /* GPIO_94, S HSEC pin 56 */
- "TSIF2_DATA", /* GPIO_95, S HSEC pin 54 */
- "TSIF2_SYNC", /* GPIO_96, S HSEC pin 58 */
- "NC", /* GPIO_97 */
- "CAM1_STANDBY_N", /* GPIO_98 */
- "NC", /* GPIO_99 */
- "NC", /* GPIO_100 */
- "[LCD1_RESET_N]", /* GPIO_101, S HSEC pin 51 */
- "BOOT_CONFIG1", /* GPIO_102 */
- "USB_HUB_RESET", /* GPIO_103 */
- "CAM1_RST_N", /* GPIO_104 */
- "NC", /* GPIO_105 */
- "NC", /* GPIO_106 */
- "NC", /* GPIO_107 */
- "NC", /* GPIO_108 */
- "NC", /* GPIO_109 */
- "NC", /* GPIO_110 */
- "NC", /* GPIO_111 */
- "NC", /* GPIO_112 */
- "PMI8994_BUA", /* GPIO_113 */
- "PCIE2_RST_N", /* GPIO_114 */
- "PCIE2_CLKREQ_N", /* GPIO_115 */
- "PCIE2_WAKE", /* GPIO_116 */
- "SSC_IRQ_0", /* GPIO_117 */
- "SSC_IRQ_1", /* GPIO_118 */
- "SSC_IRQ_2", /* GPIO_119 */
- "NC", /* GPIO_120 */
- "GPIO121", /* GPIO_121, S HSEC pin 2 */
- "NC", /* GPIO_122 */
- "SSC_IRQ_6", /* GPIO_123 */
- "SSC_IRQ_7", /* GPIO_124 */
- "GPIO-C", /* GPIO_125, TS_INT0, LSEC pin 25 */
- "BOOT_CONFIG5", /* GPIO_126 */
- "NC", /* GPIO_127 */
- "NC", /* GPIO_128 */
- "BOOT_CONFIG7", /* GPIO_129 */
- "PCIE1_RST_N", /* GPIO_130 */
- "PCIE1_CLKREQ_N", /* GPIO_131 */
- "PCIE1_WAKE", /* GPIO_132 */
- "GPIO-L", /* GPIO_133, CAM2_STANDBY_N, LSEC pin 34 */
- "NC", /* GPIO_134 */
- "NC", /* GPIO_135 */
- "BOOT_CONFIG8", /* GPIO_136 */
- "NC", /* GPIO_137 */
- "NC", /* GPIO_138 */
- "GPS_SSBI2", /* GPIO_139 */
- "GPS_SSBI1", /* GPIO_140 */
- "NC", /* GPIO_141 */
- "NC", /* GPIO_142 */
- "NC", /* GPIO_143 */
- "BOOT_CONFIG6", /* GPIO_144 */
- "NC", /* GPIO_145 */
- "NC", /* GPIO_146 */
- "NC", /* GPIO_147 */
- "NC", /* GPIO_148 */
- "NC"; /* GPIO_149 */
- };
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
- qcom,spmi@400f000 {
- pmic@0 {
- gpios@c000 {
- gpio-line-names =
- "NC",
- "KEY_VOLP_N",
- "NC",
- "BL1_PWM",
- "GPIO-F", /* BL0_PWM, LSEC pin 28 */
- "BL1_EN",
- "NC",
- "WLAN_EN",
- "NC",
- "NC",
- "NC",
- "NC",
- "NC",
- "NC",
- "DIVCLK1",
- "DIVCLK2",
- "DIVCLK3",
- "DIVCLK4",
- "BT_EN",
- "PMIC_SLB",
- "PMIC_BUA",
- "USB_VBUS_DET";
- };
-
- mpps@a000 {
- gpio-line-names =
- "VDDPX_BIAS",
- "WIFI_LED",
- "NC",
- "BT_LED",
- "PM_MPP05",
- "PM_MPP06",
- "PM_MPP07",
- "NC";
- };
- };
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
- pmic@2 {
- gpios@c000 {
- gpio-line-names =
- "NC",
- "SPKR_AMP_EN1",
- "SPKR_AMP_EN2",
- "TP61",
- "NC",
- "USB2_VBUS_DET",
- "NC",
- "NC",
- "NC",
- "NC";
- };
- };
- };
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+};
- phy@34000 {
- status = "okay";
- };
+&ufshc {
+ status = "okay";
- phy@7410000 {
- status = "okay";
- };
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
- phy@7411000 {
- status = "okay";
- };
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+};
- phy@7412000 {
- status = "okay";
- };
+&msmgpio {
+ gpio-line-names =
+ "[SPI0_DOUT]", /* GPIO_0, BLSP1_SPI_MOSI, LSEC pin 14 */
+ "[SPI0_DIN]", /* GPIO_1, BLSP1_SPI_MISO, LSEC pin 10 */
+ "[SPI0_CS]", /* GPIO_2, BLSP1_SPI_CS_N, LSEC pin 12 */
+ "[SPI0_SCLK]", /* GPIO_3, BLSP1_SPI_CLK, LSEC pin 8 */
+ "[UART1_TxD]", /* GPIO_4, BLSP8_UART_TX, LSEC pin 11 */
+ "[UART1_RxD]", /* GPIO_5, BLSP8_UART_RX, LSEC pin 13 */
+ "[I2C1_SDA]", /* GPIO_6, BLSP8_I2C_SDA, LSEC pin 21 */
+ "[I2C1_SCL]", /* GPIO_7, BLSP8_I2C_SCL, LSEC pin 19 */
+ "GPIO-H", /* GPIO_8, LCD0_RESET_N, LSEC pin 30 */
+ "TP93", /* GPIO_9 */
+ "GPIO-G", /* GPIO_10, MDP_VSYNC_P, LSEC pin 29 */
+ "[MDP_VSYNC_S]", /* GPIO_11, S HSEC pin 55 */
+ "NC", /* GPIO_12 */
+ "[CSI0_MCLK]", /* GPIO_13, CAM_MCLK0, P HSEC pin 15 */
+ "[CAM_MCLK1]", /* GPIO_14, J14 pin 11 */
+ "[CSI1_MCLK]", /* GPIO_15, CAM_MCLK2, P HSEC pin 17 */
+ "TP99", /* GPIO_16 */
+ "[I2C2_SDA]", /* GPIO_17, CCI_I2C_SDA0, P HSEC pin 34 */
+ "[I2C2_SCL]", /* GPIO_18, CCI_I2C_SCL0, P HSEC pin 32 */
+ "[CCI_I2C_SDA1]", /* GPIO_19, S HSEC pin 38 */
+ "[CCI_I2C_SCL1]", /* GPIO_20, S HSEC pin 36 */
+ "FLASH_STROBE_EN", /* GPIO_21, S HSEC pin 5 */
+ "FLASH_STROBE_TRIG", /* GPIO_22, S HSEC pin 1 */
+ "GPIO-K", /* GPIO_23, CAM2_RST_N, LSEC pin 33 */
+ "GPIO-D", /* GPIO_24, LSEC pin 26 */
+ "GPIO-I", /* GPIO_25, CAM0_RST_N, LSEC pin 31 */
+ "GPIO-J", /* GPIO_26, CAM0_STANDBY_N, LSEC pin 32 */
+ "BLSP6_I2C_SDA", /* GPIO_27 */
+ "BLSP6_I2C_SCL", /* GPIO_28 */
+ "GPIO-B", /* GPIO_29, TS0_RESET_N, LSEC pin 24 */
+ "GPIO30", /* GPIO_30, S HSEC pin 4 */
+ "HDMI_CEC", /* GPIO_31 */
+ "HDMI_DDC_CLOCK", /* GPIO_32 */
+ "HDMI_DDC_DATA", /* GPIO_33 */
+ "HDMI_HOT_PLUG_DETECT", /* GPIO_34 */
+ "PCIE0_RST_N", /* GPIO_35 */
+ "PCIE0_CLKREQ_N", /* GPIO_36 */
+ "PCIE0_WAKE", /* GPIO_37 */
+ "SD_CARD_DET_N", /* GPIO_38 */
+ "TSIF1_SYNC", /* GPIO_39, S HSEC pin 48 */
+ "W_DISABLE_N", /* GPIO_40 */
+ "[BLSP9_UART_TX]", /* GPIO_41 */
+ "[BLSP9_UART_RX]", /* GPIO_42 */
+ "[BLSP2_UART_CTS_N]", /* GPIO_43 */
+ "[BLSP2_UART_RFR_N]", /* GPIO_44 */
+ "[BLSP3_UART_TX]", /* GPIO_45 */
+ "[BLSP3_UART_RX]", /* GPIO_46 */
+ "[I2C0_SDA]", /* GPIO_47, LS_I2C0_SDA, LSEC pin 17 */
+ "[I2C0_SCL]", /* GPIO_48, LS_I2C0_SCL, LSEC pin 15 */
+ "[UART0_TxD]", /* GPIO_49, BLSP9_UART_TX, LSEC pin 5 */
+ "[UART0_RxD]", /* GPIO_50, BLSP9_UART_RX, LSEC pin 7 */
+ "[UART0_CTS]", /* GPIO_51, BLSP9_UART_CTS_N, LSEC pin 3 */
+ "[UART0_RTS]", /* GPIO_52, BLSP9_UART_RFR_N, LSEC pin 9 */
+ "[CODEC_INT1_N]", /* GPIO_53 */
+ "[CODEC_INT2_N]", /* GPIO_54 */
+ "[BLSP7_I2C_SDA]", /* GPIO_55 */
+ "[BLSP7_I2C_SCL]", /* GPIO_56 */
+ "MI2S_MCLK", /* GPIO_57, S HSEC pin 3 */
+ "[PCM_CLK]", /* GPIO_58, QUA_MI2S_SCK, LSEC pin 18 */
+ "[PCM_FS]", /* GPIO_59, QUA_MI2S_WS, LSEC pin 16 */
+ "[PCM_DO]", /* GPIO_60, QUA_MI2S_DATA0, LSEC pin 20 */
+ "[PCM_DI]", /* GPIO_61, QUA_MI2S_DATA1, LSEC pin 22 */
+ "GPIO-E", /* GPIO_62, LSEC pin 27 */
+ "TP87", /* GPIO_63 */
+ "[CODEC_RST_N]", /* GPIO_64 */
+ "[PCM1_CLK]", /* GPIO_65 */
+ "[PCM1_SYNC]", /* GPIO_66 */
+ "[PCM1_DIN]", /* GPIO_67 */
+ "[PCM1_DOUT]", /* GPIO_68 */
+ "AUDIO_REF_CLK", /* GPIO_69 */
+ "SLIMBUS_CLK", /* GPIO_70 */
+ "SLIMBUS_DATA0", /* GPIO_71 */
+ "SLIMBUS_DATA1", /* GPIO_72 */
+ "NC", /* GPIO_73 */
+ "NC", /* GPIO_74 */
+ "NC", /* GPIO_75 */
+ "NC", /* GPIO_76 */
+ "TP94", /* GPIO_77 */
+ "NC", /* GPIO_78 */
+ "TP95", /* GPIO_79 */
+ "GPIO-A", /* GPIO_80, MEMS_RESET_N, LSEC pin 23 */
+ "TP88", /* GPIO_81 */
+ "TP89", /* GPIO_82 */
+ "TP90", /* GPIO_83 */
+ "TP91", /* GPIO_84 */
+ "[SD_DAT0]", /* GPIO_85, BLSP12_SPI_MOSI, P HSEC pin 1 */
+ "[SD_CMD]", /* GPIO_86, BLSP12_SPI_MISO, P HSEC pin 11 */
+ "[SD_DAT3]", /* GPIO_87, BLSP12_SPI_CS_N, P HSEC pin 7 */
+ "[SD_SCLK]", /* GPIO_88, BLSP12_SPI_CLK, P HSEC pin 9 */
+ "TSIF1_CLK", /* GPIO_89, S HSEC pin 42 */
+ "TSIF1_EN", /* GPIO_90, S HSEC pin 46 */
+ "TSIF1_DATA", /* GPIO_91, S HSEC pin 44 */
+ "NC", /* GPIO_92 */
+ "TSIF2_CLK", /* GPIO_93, S HSEC pin 52 */
+ "TSIF2_EN", /* GPIO_94, S HSEC pin 56 */
+ "TSIF2_DATA", /* GPIO_95, S HSEC pin 54 */
+ "TSIF2_SYNC", /* GPIO_96, S HSEC pin 58 */
+ "NC", /* GPIO_97 */
+ "CAM1_STANDBY_N", /* GPIO_98 */
+ "NC", /* GPIO_99 */
+ "NC", /* GPIO_100 */
+ "[LCD1_RESET_N]", /* GPIO_101, S HSEC pin 51 */
+ "BOOT_CONFIG1", /* GPIO_102 */
+ "USB_HUB_RESET", /* GPIO_103 */
+ "CAM1_RST_N", /* GPIO_104 */
+ "NC", /* GPIO_105 */
+ "NC", /* GPIO_106 */
+ "NC", /* GPIO_107 */
+ "NC", /* GPIO_108 */
+ "NC", /* GPIO_109 */
+ "NC", /* GPIO_110 */
+ "NC", /* GPIO_111 */
+ "NC", /* GPIO_112 */
+ "PMI8994_BUA", /* GPIO_113 */
+ "PCIE2_RST_N", /* GPIO_114 */
+ "PCIE2_CLKREQ_N", /* GPIO_115 */
+ "PCIE2_WAKE", /* GPIO_116 */
+ "SSC_IRQ_0", /* GPIO_117 */
+ "SSC_IRQ_1", /* GPIO_118 */
+ "SSC_IRQ_2", /* GPIO_119 */
+ "NC", /* GPIO_120 */
+ "GPIO121", /* GPIO_121, S HSEC pin 2 */
+ "NC", /* GPIO_122 */
+ "SSC_IRQ_6", /* GPIO_123 */
+ "SSC_IRQ_7", /* GPIO_124 */
+ "GPIO-C", /* GPIO_125, TS_INT0, LSEC pin 25 */
+ "BOOT_CONFIG5", /* GPIO_126 */
+ "NC", /* GPIO_127 */
+ "NC", /* GPIO_128 */
+ "BOOT_CONFIG7", /* GPIO_129 */
+ "PCIE1_RST_N", /* GPIO_130 */
+ "PCIE1_CLKREQ_N", /* GPIO_131 */
+ "PCIE1_WAKE", /* GPIO_132 */
+ "GPIO-L", /* GPIO_133, CAM2_STANDBY_N, LSEC pin 34 */
+ "NC", /* GPIO_134 */
+ "NC", /* GPIO_135 */
+ "BOOT_CONFIG8", /* GPIO_136 */
+ "NC", /* GPIO_137 */
+ "NC", /* GPIO_138 */
+ "GPS_SSBI2", /* GPIO_139 */
+ "GPS_SSBI1", /* GPIO_140 */
+ "NC", /* GPIO_141 */
+ "NC", /* GPIO_142 */
+ "NC", /* GPIO_143 */
+ "BOOT_CONFIG6", /* GPIO_144 */
+ "NC", /* GPIO_145 */
+ "NC", /* GPIO_146 */
+ "NC", /* GPIO_147 */
+ "NC", /* GPIO_148 */
+ "NC"; /* GPIO_149 */
+};
- usb@6af8800 {
- status = "okay";
- extcon = <&usb3_id>;
+&pm8994_gpios {
+ gpio-line-names =
+ "NC",
+ "KEY_VOLP_N",
+ "NC",
+ "BL1_PWM",
+ "GPIO-F", /* BL0_PWM, LSEC pin 28 */
+ "BL1_EN",
+ "NC",
+ "WLAN_EN",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "DIVCLK1",
+ "DIVCLK2",
+ "DIVCLK3",
+ "DIVCLK4",
+ "BT_EN",
+ "PMIC_SLB",
+ "PMIC_BUA",
+ "USB_VBUS_DET";
+};
- dwc3@6a00000 {
- extcon = <&usb3_id>;
- dr_mode = "otg";
- };
- };
+&pm8994_mpps {
+ gpio-line-names =
+ "VDDPX_BIAS",
+ "WIFI_LED",
+ "NC",
+ "BT_LED",
+ "PM_MPP05",
+ "PM_MPP06",
+ "PM_MPP07",
+ "NC";
+};
- usb3_id: usb3-id {
- compatible = "linux,extcon-usb-gpio";
- id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&usb3_vbus_det_gpio>;
- };
+&pmi8994_gpios {
+ gpio-line-names =
+ "NC",
+ "SPKR_AMP_EN1",
+ "SPKR_AMP_EN2",
+ "TP61",
+ "NC",
+ "USB2_VBUS_DET",
+ "NC",
+ "NC",
+ "NC",
+ "NC";
+};
- usb@76f8800 {
- status = "okay";
- extcon = <&usb2_id>;
+&pcie_phy {
+ status = "okay";
- dwc3@7600000 {
- extcon = <&usb2_id>;
- dr_mode = "otg";
- maximum-speed = "high-speed";
- };
- };
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+};
- usb2_id: usb2-id {
- compatible = "linux,extcon-usb-gpio";
- id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&usb2_vbus_det_gpio>;
- };
+&usb3phy {
+ status = "okay";
- wlan_en: wlan-en-1-8v {
- pinctrl-names = "default";
- pinctrl-0 = <&wlan_en_gpios>;
- compatible = "regulator-fixed";
- regulator-name = "wlan-en-regulator";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
- gpio = <&pm8994_gpios 8 0>;
+};
- /* WLAN card specific delay */
- startup-delay-us = <70000>;
- enable-active-high;
- };
+&hsusb_phy1 {
+ status = "okay";
- agnoc@0 {
- pcie@600000 {
- status = "okay";
- perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
- vddpe-3v3-supply = <&wlan_en>;
- };
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+};
- pcie@608000 {
- status = "okay";
- perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
- };
+&hsusb_phy2 {
+ status = "okay";
- pcie@610000 {
- status = "okay";
- perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
- };
- };
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+};
- slim_msm: slim@91c0000 {
- ngd@1 {
- wcd9335: codec@1{
- clock-names = "mclk", "slimbus";
- clocks = <&div1_mclk>,
- <&rpmcc RPM_SMD_BB_CLK1>;
- };
- };
- };
+&usb3 {
+ status = "okay";
+ extcon = <&usb3_id>;
- mdss@900000 {
- status = "okay";
+ dwc3@6a00000 {
+ extcon = <&usb3_id>;
+ dr_mode = "otg";
+ };
+};
- mdp@901000 {
- status = "okay";
- };
+&usb2 {
+ status = "okay";
+ extcon = <&usb2_id>;
- hdmi-phy@9a0600 {
- status = "okay";
+ dwc3@7600000 {
+ extcon = <&usb2_id>;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ };
+};
- vddio-supply = <&pm8994_l12>;
- vcca-supply = <&pm8994_l28>;
- #phy-cells = <0>;
- };
+&pcie0 {
+ status = "okay";
+ perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ vddpe-3v3-supply = <&wlan_en>;
+ vdda-supply = <&pm8994_l28>;
+};
- hdmi-tx@9a0000 {
- status = "okay";
+&pcie1 {
+ status = "okay";
+ perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
+ vdda-supply = <&pm8994_l28>;
+};
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
- pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+&pcie2 {
+ status = "okay";
+ perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
+ vdda-supply = <&pm8994_l28>;
+};
- core-vdda-supply = <&pm8994_l12>;
- core-vcc-supply = <&pm8994_s4>;
- };
- };
- };
+&wcd9335 {
+ clock-names = "mclk", "slimbus";
+ clocks = <&div1_mclk>,
+ <&rpmcc RPM_SMD_BB_CLK1>;
+ vdd-buck-supply = <&pm8994_s4>;
+ vdd-buck-sido-supply = <&pm8994_s4>;
+ vdd-tx-supply = <&pm8994_s4>;
+ vdd-rx-supply = <&pm8994_s4>;
+ vdd-io-supply = <&pm8994_s4>;
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdp {
+ status = "okay";
+};
+
+&hdmi_phy {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ #phy-cells = <0>;
+};
+
+&hdmi {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+};
+/ {
gpio_keys {
compatible = "gpio-keys";
#address-cells = <1>;
@@ -667,6 +669,35 @@
};
};
};
+
+ usb2_id: usb2-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_gpio>;
+ };
+
+ usb3_id: usb3-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_vbus_det_gpio>;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_gpios>;
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&pm8994_gpios 8 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
};
&spmi_bus {
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 7822592664ff..1e9fa049c550 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -253,7 +253,7 @@
status = "disabled";
};
- qpic_nand: nand@79b0000 {
+ qpic_nand: nand-controller@79b0000 {
compatible = "qcom,ipq8074-nand";
reg = <0x79b0000 0x10000>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 301c1c467c0b..e1097ba6c948 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -1097,8 +1097,8 @@
vddmx-supply = <&pm8916_l3>;
vddpx-supply = <&pm8916_l7>;
- qcom,state = <&wcnss_smp2p_out 0>;
- qcom,state-names = "stop";
+ qcom,smem-states = <&wcnss_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
pinctrl-names = "default";
pinctrl-0 = <&wcnss_pin_a>;
@@ -1451,7 +1451,7 @@
};
};
- camss: camss@1b00000 {
+ camss: camss@1b0ac00 {
compatible = "qcom,msm8916-camss";
reg = <0x1b0ac00 0x200>,
<0x1b00030 0x4>,
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index f1d3c51ea8d0..8bfb897b0e81 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -433,6 +433,19 @@
};
};
+ etm {
+ compatible = "qcom,coresight-remote-etm";
+
+ out-ports {
+ port {
+ modem_etm_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_modem_etm>;
+ };
+ };
+ };
+ };
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -571,7 +584,7 @@
rpm_msg_ram: memory@68000 {
compatible = "qcom,rpm-msg-ram";
- reg = <0x68000 0x6000>;
+ reg = <0x00068000 0x6000>;
};
rng: rng@83000 {
@@ -583,28 +596,28 @@
tcsr_mutex_regs: syscon@740000 {
compatible = "syscon";
- reg = <0x740000 0x20000>;
+ reg = <0x00740000 0x20000>;
};
tsens0: thermal-sensor@4a9000 {
compatible = "qcom,msm8996-tsens";
- reg = <0x4a9000 0x1000>, /* TM */
- <0x4a8000 0x1000>; /* SROT */
+ reg = <0x004a9000 0x1000>, /* TM */
+ <0x004a8000 0x1000>; /* SROT */
#qcom,sensors = <13>;
#thermal-sensor-cells = <1>;
};
tsens1: thermal-sensor@4ad000 {
compatible = "qcom,msm8996-tsens";
- reg = <0x4ad000 0x1000>, /* TM */
- <0x4ac000 0x1000>; /* SROT */
+ reg = <0x004ad000 0x1000>, /* TM */
+ <0x004ac000 0x1000>; /* SROT */
#qcom,sensors = <8>;
#thermal-sensor-cells = <1>;
};
tcsr: syscon@7a0000 {
compatible = "qcom,tcsr-msm8996", "syscon";
- reg = <0x7a0000 0x18000>;
+ reg = <0x007a0000 0x18000>;
};
intc: interrupt-controller@9bc0000 {
@@ -620,7 +633,7 @@
apcs_glb: mailbox@9820000 {
compatible = "qcom,msm8996-apcs-hmss-global";
- reg = <0x9820000 0x1000>;
+ reg = <0x09820000 0x1000>;
#mbox-cells = <1>;
};
@@ -630,7 +643,7 @@
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
- reg = <0x300000 0x90000>;
+ reg = <0x00300000 0x90000>;
};
stm@3002000 {
@@ -736,6 +749,14 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ in-ports {
+ port {
+ funnel_in2_in_modem_etm: endpoint {
+ remote-endpoint =
+ <&modem_etm_out_funnel_in2>;
+ };
+ };
+ };
out-ports {
port {
@@ -1103,7 +1124,7 @@
kryocc: clock-controller@6400000 {
compatible = "qcom,apcc-msm8996";
- reg = <0x6400000 0x90000>;
+ reg = <0x06400000 0x90000>;
#clock-cells = <1>;
};
@@ -1149,7 +1170,7 @@
blsp2_uart1: serial@75b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0x75b0000 0x1000>;
+ reg = <0x075b0000 0x1000>;
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
<&gcc GCC_BLSP2_AHB_CLK>;
@@ -1215,7 +1236,7 @@
sdhc2: sdhci@74a4900 {
status = "disabled";
compatible = "qcom,sdhci-msm-v4";
- reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg = <0x074a4900 0x314>, <0x074a4000 0x800>;
reg-names = "hc_mem", "core_mem";
interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>,
@@ -1300,11 +1321,11 @@
spmi_bus: qcom,spmi@400f000 {
compatible = "qcom,spmi-pmic-arb";
- reg = <0x400f000 0x1000>,
- <0x4400000 0x800000>,
- <0x4c00000 0x800000>,
- <0x5800000 0x200000>,
- <0x400a000 0x002100>;
+ reg = <0x0400f000 0x1000>,
+ <0x04400000 0x800000>,
+ <0x04c00000 0x800000>,
+ <0x05800000 0x200000>,
+ <0x0400a000 0x002100>;
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
interrupt-names = "periph_irq";
interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
@@ -1318,20 +1339,10 @@
ufsphy: phy@627000 {
compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
- reg = <0x627000 0xda8>;
+ reg = <0x00627000 0xda8>;
reg-names = "phy_mem";
#phy-cells = <0>;
- vdda-phy-supply = <&pm8994_l28>;
- vdda-pll-supply = <&pm8994_l12>;
-
- vdda-phy-max-microamp = <18380>;
- vdda-pll-max-microamp = <9440>;
-
- vddp-ref-clk-supply = <&pm8994_l25>;
- vddp-ref-clk-max-microamp = <100>;
- vddp-ref-clk-always-on;
-
clock-names = "ref_clk_src", "ref_clk";
clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
<&gcc GCC_UFS_CLKREF_CLK>;
@@ -1341,20 +1352,12 @@
ufshc: ufshc@624000 {
compatible = "qcom,ufshc";
- reg = <0x624000 0x2500>;
+ reg = <0x00624000 0x2500>;
interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
phys = <&ufsphy>;
phy-names = "ufsphy";
- vcc-supply = <&pm8994_l20>;
- vccq-supply = <&pm8994_l25>;
- vccq2-supply = <&pm8994_s4>;
-
- vcc-max-microamp = <600000>;
- vccq-max-microamp = <450000>;
- vccq2-max-microamp = <450000>;
-
power-domains = <&gcc UFS_GDSC>;
clock-names =
@@ -1408,7 +1411,7 @@
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
- reg = <0x8c0000 0x40000>;
+ reg = <0x008c0000 0x40000>;
assigned-clocks = <&mmcc MMPLL9_PLL>,
<&mmcc MMPLL1_PLL>,
<&mmcc MMPLL3_PLL>,
@@ -1423,7 +1426,7 @@
qfprom@74000 {
compatible = "qcom,qfprom";
- reg = <0x74000 0x8ff>;
+ reg = <0x00074000 0x8ff>;
#address-cells = <1>;
#size-cells = <1>;
@@ -1443,9 +1446,9 @@
};
};
- phy@34000 {
+ pcie_phy: phy@34000 {
compatible = "qcom,msm8996-qmp-pcie-phy";
- reg = <0x34000 0x488>;
+ reg = <0x00034000 0x488>;
#clock-cells = <1>;
#address-cells = <1>;
#size-cells = <1>;
@@ -1456,9 +1459,6 @@
<&gcc GCC_PCIE_CLKREF_CLK>;
clock-names = "aux", "cfg_ahb", "ref";
- vdda-phy-supply = <&pm8994_l28>;
- vdda-pll-supply = <&pm8994_l12>;
-
resets = <&gcc GCC_PCIE_PHY_BCR>,
<&gcc GCC_PCIE_PHY_COM_BCR>,
<&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
@@ -1466,9 +1466,9 @@
status = "disabled";
pciephy_0: lane@35000 {
- reg = <0x035000 0x130>,
- <0x035200 0x200>,
- <0x035400 0x1dc>;
+ reg = <0x00035000 0x130>,
+ <0x00035200 0x200>,
+ <0x00035400 0x1dc>;
#phy-cells = <0>;
clock-output-names = "pcie_0_pipe_clk_src";
@@ -1479,9 +1479,9 @@
};
pciephy_1: lane@36000 {
- reg = <0x036000 0x130>,
- <0x036200 0x200>,
- <0x036400 0x1dc>;
+ reg = <0x00036000 0x130>,
+ <0x00036200 0x200>,
+ <0x00036400 0x1dc>;
#phy-cells = <0>;
clock-output-names = "pcie_1_pipe_clk_src";
@@ -1492,9 +1492,9 @@
};
pciephy_2: lane@37000 {
- reg = <0x037000 0x130>,
- <0x037200 0x200>,
- <0x037400 0x1dc>;
+ reg = <0x00037000 0x130>,
+ <0x00037200 0x200>,
+ <0x00037400 0x1dc>;
#phy-cells = <0>;
clock-output-names = "pcie_2_pipe_clk_src";
@@ -1505,9 +1505,9 @@
};
};
- phy@7410000 {
+ usb3phy: phy@7410000 {
compatible = "qcom,msm8996-qmp-usb3-phy";
- reg = <0x7410000 0x1c4>;
+ reg = <0x07410000 0x1c4>;
#clock-cells = <1>;
#address-cells = <1>;
#size-cells = <1>;
@@ -1518,18 +1518,15 @@
<&gcc GCC_USB3_CLKREF_CLK>;
clock-names = "aux", "cfg_ahb", "ref";
- vdda-phy-supply = <&pm8994_l28>;
- vdda-pll-supply = <&pm8994_l12>;
-
resets = <&gcc GCC_USB3_PHY_BCR>,
<&gcc GCC_USB3PHY_PHY_BCR>;
reset-names = "phy", "common";
status = "disabled";
ssusb_phy_0: lane@7410200 {
- reg = <0x7410200 0x200>,
- <0x7410400 0x130>,
- <0x7410600 0x1a8>;
+ reg = <0x07410200 0x200>,
+ <0x07410400 0x130>,
+ <0x07410600 0x1a8>;
#phy-cells = <0>;
clock-output-names = "usb3_phy_pipe_clk_src";
@@ -1540,16 +1537,13 @@
hsusb_phy1: phy@7411000 {
compatible = "qcom,msm8996-qusb2-phy";
- reg = <0x7411000 0x180>;
+ reg = <0x07411000 0x180>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
<&gcc GCC_RX1_USB2_CLKREF_CLK>;
clock-names = "cfg_ahb", "ref";
- vdda-pll-supply = <&pm8994_l12>;
- vdda-phy-dpdm-supply = <&pm8994_l24>;
-
resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
nvmem-cells = <&qusb2p_hstx_trim>;
status = "disabled";
@@ -1557,16 +1551,13 @@
hsusb_phy2: phy@7412000 {
compatible = "qcom,msm8996-qusb2-phy";
- reg = <0x7412000 0x180>;
+ reg = <0x07412000 0x180>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
<&gcc GCC_RX2_USB2_CLKREF_CLK>;
clock-names = "cfg_ahb", "ref";
- vdda-pll-supply = <&pm8994_l12>;
- vdda-phy-dpdm-supply = <&pm8994_l24>;
-
resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
nvmem-cells = <&qusb2s_hstx_trim>;
status = "disabled";
@@ -1574,7 +1565,7 @@
usb2: usb@76f8800 {
compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
- reg = <0x76f8800 0x400>;
+ reg = <0x076f8800 0x400>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1594,7 +1585,7 @@
dwc3@7600000 {
compatible = "snps,dwc3";
- reg = <0x7600000 0xcc00>;
+ reg = <0x07600000 0xcc00>;
interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
phys = <&hsusb_phy2>;
phy-names = "usb2-phy";
@@ -1605,7 +1596,7 @@
usb3: usb@6af8800 {
compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
- reg = <0x6af8800 0x400>;
+ reg = <0x06af8800 0x400>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -1626,7 +1617,7 @@
dwc3@6a00000 {
compatible = "snps,dwc3";
- reg = <0x6a00000 0xcc00>;
+ reg = <0x06a00000 0xcc00>;
interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
phys = <&hsusb_phy1>, <&ssusb_phy_0>;
phy-names = "usb2-phy", "usb3-phy";
@@ -1637,7 +1628,7 @@
vfe_smmu: iommu@da0000 {
compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
- reg = <0xda0000 0x10000>;
+ reg = <0x00da0000 0x10000>;
#global-interrupts = <1>;
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
@@ -1653,20 +1644,20 @@
camss: camss@a00000 {
compatible = "qcom,msm8996-camss";
- reg = <0xa34000 0x1000>,
- <0xa00030 0x4>,
- <0xa35000 0x1000>,
- <0xa00038 0x4>,
- <0xa36000 0x1000>,
- <0xa00040 0x4>,
- <0xa30000 0x100>,
- <0xa30400 0x100>,
- <0xa30800 0x100>,
- <0xa30c00 0x100>,
- <0xa31000 0x500>,
- <0xa00020 0x10>,
- <0xa10000 0x1000>,
- <0xa14000 0x1000>;
+ reg = <0x00a34000 0x1000>,
+ <0x00a00030 0x4>,
+ <0x00a35000 0x1000>,
+ <0x00a00038 0x4>,
+ <0x00a36000 0x1000>,
+ <0x00a00040 0x4>,
+ <0x00a30000 0x100>,
+ <0x00a30400 0x100>,
+ <0x00a30800 0x100>,
+ <0x00a30c00 0x100>,
+ <0x00a31000 0x500>,
+ <0x00a00020 0x10>,
+ <0x00a10000 0x1000>,
+ <0x00a14000 0x1000>;
reg-names = "csiphy0",
"csiphy0_clk_mux",
"csiphy1",
@@ -1774,7 +1765,6 @@
"vfe1_stream",
"vfe_ahb",
"vfe_axi";
- vdda-supply = <&pm8994_l2>;
iommus = <&vfe_smmu 0>,
<&vfe_smmu 1>,
<&vfe_smmu 2>,
@@ -1788,7 +1778,7 @@
adreno_smmu: iommu@b40000 {
compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
- reg = <0xb40000 0x10000>;
+ reg = <0x00b40000 0x10000>;
#global-interrupts = <1>;
interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
@@ -1805,7 +1795,7 @@
mdp_smmu: iommu@d00000 {
compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
- reg = <0xd00000 0x10000>;
+ reg = <0x00d00000 0x10000>;
#global-interrupts = <1>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
@@ -1821,7 +1811,7 @@
lpass_q6_smmu: iommu@1600000 {
compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
- reg = <0x1600000 0x20000>;
+ reg = <0x01600000 0x20000>;
#iommu-cells = <1>;
power-domains = <&gcc HLOS1_VOTE_LPASS_CORE_GDSC>;
@@ -1886,9 +1876,6 @@
pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_default &pcie0_wake_sleep>;
-
- vdda-supply = <&pm8994_l28>;
-
linux,pci-domain = <0>;
clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
@@ -1941,8 +1928,6 @@
pinctrl-0 = <&pcie1_clkreq_default &pcie1_perst_default &pcie1_wake_default>;
pinctrl-1 = <&pcie1_clkreq_sleep &pcie1_perst_default &pcie1_wake_sleep>;
-
- vdda-supply = <&pm8994_l28>;
linux,pci-domain = <1>;
clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
@@ -1994,8 +1979,6 @@
pinctrl-0 = <&pcie2_clkreq_default &pcie2_perst_default &pcie2_wake_default>;
pinctrl-1 = <&pcie2_clkreq_sleep &pcie2_perst_default &pcie2_wake_sleep >;
- vdda-supply = <&pm8994_l28>;
-
linux,pci-domain = <2>;
clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
<&gcc GCC_PCIE_2_AUX_CLK>,
@@ -2015,7 +1998,7 @@
{
compatible = "qcom,bam-v1.7.0";
qcom,controlled-remotely;
- reg = <0x9184000 0x32000>;
+ reg = <0x09184000 0x32000>;
num-channels = <31>;
interrupts = <0 164 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
@@ -2025,7 +2008,7 @@
slim_msm: slim@91c0000 {
compatible = "qcom,slim-ngd-v1.5.0";
- reg = <0x91c0000 0x2C000>;
+ reg = <0x091c0000 0x2C000>;
reg-names = "ctrl";
interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&slimbam 3>, <&slimbam 4>,
@@ -2060,12 +2043,6 @@
slim-ifc-dev = <&tasha_ifd>;
- vdd-buck-supply = <&pm8994_s4>;
- vdd-buck-sido-supply = <&pm8994_s4>;
- vdd-tx-supply = <&pm8994_s4>;
- vdd-rx-supply = <&pm8994_s4>;
- vdd-io-supply = <&pm8994_s4>;
-
#sound-dai-cells = <1>;
};
};
@@ -2075,7 +2052,7 @@
compatible = "qcom,adreno-530.2", "qcom,adreno";
#stream-id-cells = <16>;
- reg = <0xb00000 0x3f000>;
+ reg = <0x00b00000 0x3f000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <0 300 IRQ_TYPE_LEVEL_HIGH>;
@@ -2146,9 +2123,9 @@
mdss: mdss@900000 {
compatible = "qcom,mdss";
- reg = <0x900000 0x1000>,
- <0x9b0000 0x1040>,
- <0x9b8000 0x1040>;
+ reg = <0x00900000 0x1000>,
+ <0x009b0000 0x1040>,
+ <0x009b8000 0x1040>;
reg-names = "mdss_phys",
"vbif_phys",
"vbif_nrt_phys";
@@ -2168,7 +2145,7 @@
mdp: mdp@901000 {
compatible = "qcom,mdp5";
- reg = <0x901000 0x90000>;
+ reg = <0x00901000 0x90000>;
reg-names = "mdp_phys";
interrupt-parent = <&mdss>;
@@ -2244,12 +2221,12 @@
hdmi_phy: hdmi-phy@9a0600 {
#phy-cells = <0>;
compatible = "qcom,hdmi-phy-8996";
- reg = <0x9a0600 0x1c4>,
- <0x9a0a00 0x124>,
- <0x9a0c00 0x124>,
- <0x9a0e00 0x124>,
- <0x9a1000 0x124>,
- <0x9a1200 0x0c8>;
+ reg = <0x009a0600 0x1c4>,
+ <0x009a0a00 0x124>,
+ <0x009a0c00 0x124>,
+ <0x009a0e00 0x124>,
+ <0x009a1000 0x124>,
+ <0x009a1200 0x0c8>;
reg-names = "hdmi_pll",
"hdmi_tx_l0",
"hdmi_tx_l1",
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index dcb79003ca0e..9cb7163c5714 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -1396,9 +1396,11 @@
cpu = <&CPU4>;
- port{
- etm4_out: endpoint {
- remote-endpoint = <&apss_funnel_in4>;
+ out-ports {
+ port{
+ etm4_out: endpoint {
+ remote-endpoint = <&apss_funnel_in4>;
+ };
};
};
};
@@ -1413,9 +1415,11 @@
cpu = <&CPU5>;
- port{
- etm5_out: endpoint {
- remote-endpoint = <&apss_funnel_in5>;
+ out-ports {
+ port{
+ etm5_out: endpoint {
+ remote-endpoint = <&apss_funnel_in5>;
+ };
};
};
};
@@ -1430,9 +1434,11 @@
cpu = <&CPU6>;
- port{
- etm6_out: endpoint {
- remote-endpoint = <&apss_funnel_in6>;
+ out-ports {
+ port{
+ etm6_out: endpoint {
+ remote-endpoint = <&apss_funnel_in6>;
+ };
};
};
};
@@ -1447,9 +1453,11 @@
cpu = <&CPU7>;
- port{
- etm7_out: endpoint {
- remote-endpoint = <&apss_funnel_in7>;
+ out-ports {
+ port{
+ etm7_out: endpoint {
+ remote-endpoint = <&apss_funnel_in7>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index a97eeb4569c0..1eb51b12cfac 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -533,7 +533,7 @@
clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
resets = <&gcc GCC_PCIEPHY_0_PHY_BCR>,
- <&gcc 21>;
+ <&gcc GCC_PCIE_0_PIPE_ARES>;
reset-names = "phy", "pipe";
clock-output-names = "pcie_0_pipe_clk";
@@ -991,12 +991,12 @@
<&gcc GCC_PCIE_0_SLV_AXI_CLK>;
clock-names = "iface", "aux", "master_bus", "slave_bus";
- resets = <&gcc 18>,
- <&gcc 17>,
- <&gcc 15>,
- <&gcc 19>,
+ resets = <&gcc GCC_PCIE_0_AXI_MASTER_ARES>,
+ <&gcc GCC_PCIE_0_AXI_SLAVE_ARES>,
+ <&gcc GCC_PCIE_0_AXI_MASTER_STICKY_ARES>,
+ <&gcc GCC_PCIE_0_CORE_STICKY_ARES>,
<&gcc GCC_PCIE_0_BCR>,
- <&gcc 16>;
+ <&gcc GCC_PCIE_0_AHB_ARES>;
reset-names = "axi_m",
"axi_s",
"axi_m_sticky",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
index 99a28d64ee62..2b7923f1f0ec 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
@@ -1310,7 +1310,7 @@ ap_ts_i2c: &i2c14 {
config {
pins = "gpio126";
function = "gpio";
- bias-no-pull;
+ bias-disable;
drive-strength = <2>;
output-low;
};
@@ -1320,7 +1320,7 @@ ap_ts_i2c: &i2c14 {
config {
pins = "gpio126";
function = "gpio";
- bias-no-pull;
+ bias-disable;
drive-strength = <2>;
output-high;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index bf4fde88011c..e99a58b76d86 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -53,8 +53,8 @@
user4 {
label = "green:user4";
gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "panic-indicator";
default-state = "off";
+ panic-indicator;
};
wlan {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index c57548b7b250..e5331a81249b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -468,6 +468,8 @@
vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+
+ qcom,snoc-host-cap-8bit-quirk;
};
/* PINCTRL - additions to nodes defined in sdm845.dtsi */
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 2287354fef86..b4a0234f66d1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -715,6 +715,7 @@
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
+ power-domains = <&rpmhpd SDM845_CX>;
};
qfprom@784000 {
@@ -2500,10 +2501,10 @@
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
assigned-clock-rates = <19200000>, <150000000>;
- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc_intc 8 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc_intc 9 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
"dm_hs_phy_irq", "dp_hs_phy_irq";
@@ -2544,10 +2545,10 @@
<&gcc GCC_USB30_SEC_MASTER_CLK>;
assigned-clock-rates = <19200000>, <150000000>;
- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc_intc 10 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc_intc 11 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
"dm_hs_phy_irq", "dp_hs_phy_irq";
@@ -2939,6 +2940,15 @@
#power-domain-cells = <1>;
};
+ pdc_intc: interrupt-controller@b220000 {
+ compatible = "qcom,sdm845-pdc", "qcom,pdc";
+ reg = <0 0x0b220000 0 0x30000>;
+ qcom,pdc-ranges = <0 480 94>, <94 609 15>, <115 630 7>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
pdc_reset: reset-controller@b2e0000 {
compatible = "qcom,sdm845-pdc-global";
reg = <0 0x0b2e0000 0 0x20000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index 840d6b9bbb59..5eb8f014c3f5 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -298,8 +298,10 @@
};
&qup_i2c12_default {
- drive-strength = <2>;
- bias-disable;
+ pinmux {
+ drive-strength = <2>;
+ bias-disable;
+ };
};
&qup_uart6_default {
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 73ded80a79ba..1de7891c658c 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -49,17 +49,14 @@
opp-shared;
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
opp-suspend;
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index dabee157119f..82efc8a3627d 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -60,17 +60,14 @@
opp-shared;
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <820000>;
clock-latency-ns = <300000>;
opp-suspend;
};
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
index 82e463c32a37..bbf8d856e750 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
@@ -325,7 +325,7 @@
};
scif1_pins: scif1 {
- groups = "scif1_data_b", "scif1_ctrl";
+ groups = "scif1_data_b";
function = "scif1";
};
@@ -385,7 +385,6 @@
&scif1 {
pinctrl-0 = <&scif1_pins>;
pinctrl-names = "default";
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index 6c3368f795ca..fbd942b46c54 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -90,7 +90,6 @@
linux,default-trigger = "heartbeat";
gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
default-state = "on";
- mode = <0x23>;
};
user {
@@ -98,7 +97,6 @@
linux,default-trigger = "mmc1";
gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
default-state = "off";
- mode = <0x05>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index a9f4d6d7d2b7..586351340da6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -77,3 +77,8 @@
};
};
};
+
+&wlan_host_wake_l {
+ /* Kevin has an external pull up, but Bob does not. */
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
index 7cd6d470c1cb..7416db3d27a7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
@@ -237,6 +237,14 @@
&edp {
status = "okay";
+ /*
+ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
+ * set this here, because rk3399-gru.dtsi ensures we can generate this
+ * off GPLL=600MHz, whereas some other RK3399 boards may not.
+ */
+ assigned-clocks = <&cru PCLK_EDP>;
+ assigned-clock-rates = <24000000>;
+
ports {
edp_out: port@1 {
reg = <1>;
@@ -397,6 +405,7 @@ ap_i2c_tp: &i2c5 {
};
wlan_host_wake_l: wlan-host-wake-l {
+ /* Kevin has an external pull up, but Bob does not */
rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index d80d6b726820..d29937e4a606 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -203,7 +203,7 @@
cap-sd-highspeed;
cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
disable-wp;
- max-frequency = <150000000>;
+ max-frequency = <40000000>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
vmmc-supply = <&vcc3v3_baseboard>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 390b86ec6538..365fa9a3c5bf 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -102,7 +102,6 @@
vcc5v0_host: vcc5v0-host-regulator {
compatible = "regulator-fixed";
gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
- enable-active-low;
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_host_en>;
regulator-name = "vcc5v0_host";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
index da3b031d4bef..79d04a664b82 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
@@ -441,7 +441,6 @@
&i2s1 {
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
- status = "okay";
};
&i2s2 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 6537c69de3dd..b793a43e0612 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -544,8 +544,8 @@
compatible = "socionext,uniphier-dwc3", "snps,dwc3";
status = "disabled";
reg = <0x65a00000 0xcd00>;
- interrupt-names = "host", "peripheral";
- interrupts = <0 134 4>, <0 135 4>;
+ interrupt-names = "dwc_usb3";
+ interrupts = <0 134 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>, <&pinctrl_usb2>;
clock-names = "ref", "bus_early", "suspend";
@@ -646,8 +646,8 @@
compatible = "socionext,uniphier-dwc3", "snps,dwc3";
status = "disabled";
reg = <0x65c00000 0xcd00>;
- interrupt-names = "host", "peripheral";
- interrupts = <0 137 4>, <0 138 4>;
+ interrupt-names = "dwc_usb3";
+ interrupts = <0 137 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb3>;
clock-names = "ref", "bus_early", "suspend";
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index 069d6f48b188..0b124ac7e2b9 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -337,3 +337,25 @@
&pcie1_ep {
status = "disabled";
};
+
+&ospi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_fss0_ospi0_pins_default>;
+
+ flash@0{
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <8>;
+ spi-max-frequency = <50000000>;
+ spi-dqs;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <2>;
+ cdns,phy-mode;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
index 60f5443f3ef4..5aef9e0951ac 100644
--- a/arch/arm64/boot/dts/xilinx/Makefile
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -2,7 +2,6 @@
dtb-$(CONFIG_ARCH_ZYNQMP) += avnet-ultra96-rev1.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1232-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1254-revA.dtb
-dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1275-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm015-dc1.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm016-dc2.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm017-dc3.dtb
@@ -13,5 +12,9 @@ dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-revB.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-rev1.0.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu104-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu104-revC.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu106-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu111-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu1275-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu1275-revB.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu1285-revA.dtb
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
new file mode 100644
index 000000000000..d4ce8499020c
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Clock specification for Xilinx ZynqMP
+ *
+ * (C) Copyright 2017, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+#include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+/ {
+ fclk0: fclk0 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL0_REF>;
+ };
+
+ fclk1: fclk1 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL1_REF>;
+ };
+
+ fclk2: fclk2 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL2_REF>;
+ };
+
+ fclk3: fclk3 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL3_REF>;
+ };
+
+ pss_ref_clk: pss_ref_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <33333333>;
+ };
+
+ video_clk: video_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+
+ pss_alt_ref_clk: pss_alt_ref_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ gt_crx_ref_clk: gt_crx_ref_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <108000000>;
+ };
+
+ aux_ref_clk: aux_ref_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+
+ dp_aclk: dp_aclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-accuracy = <100>;
+ };
+};
+
+&zynqmp_firmware {
+ zynqmp_clk: clock-controller {
+ u-boot,dm-pre-reloc;
+ #clock-cells = <1>;
+ compatible = "xlnx,zynqmp-clk";
+ clocks = <&pss_ref_clk>, <&video_clk>, <&pss_alt_ref_clk>,
+ <&aux_ref_clk>, <&gt_crx_ref_clk>;
+ clock-names = "pss_ref_clk", "video_clk", "pss_alt_ref_clk",
+ "aux_ref_clk", "gt_crx_ref_clk";
+ };
+};
+
+&can0 {
+ clocks = <&zynqmp_clk CAN0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&can1 {
+ clocks = <&zynqmp_clk CAN1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&cpu0 {
+ clocks = <&zynqmp_clk ACPU>;
+};
+
+&fpd_dma_chan1 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan2 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan3 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan4 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan5 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan6 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan7 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan8 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&gpu {
+ clocks = <&zynqmp_clk GPU_REF>, <&zynqmp_clk GPU_PP0_REF>, <&zynqmp_clk GPU_PP1_REF>;
+};
+
+&lpd_dma_chan1 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan2 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan3 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan4 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan5 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan6 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan7 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan8 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&nand0 {
+ clocks = <&zynqmp_clk NAND_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&gem0 {
+ clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM0_REF>, <&zynqmp_clk GEM0_TX>,
+ <&zynqmp_clk GEM0_RX>, <&zynqmp_clk GEM_TSU>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem1 {
+ clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM1_REF>, <&zynqmp_clk GEM1_TX>,
+ <&zynqmp_clk GEM1_RX>, <&zynqmp_clk GEM_TSU>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem2 {
+ clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM2_REF>, <&zynqmp_clk GEM2_TX>,
+ <&zynqmp_clk GEM2_RX>, <&zynqmp_clk GEM_TSU>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem3 {
+ clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM3_REF>, <&zynqmp_clk GEM3_TX>,
+ <&zynqmp_clk GEM3_RX>, <&zynqmp_clk GEM_TSU>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gpio {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&i2c0 {
+ clocks = <&zynqmp_clk I2C0_REF>;
+};
+
+&i2c1 {
+ clocks = <&zynqmp_clk I2C1_REF>;
+};
+
+&perf_monitor_ocm {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&perf_monitor_ddr {
+ clocks = <&zynqmp_clk TOPSW_LSBUS>;
+};
+
+&perf_monitor_cci {
+ clocks = <&zynqmp_clk TOPSW_LSBUS>;
+};
+
+&perf_monitor_lpd {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&pcie {
+ clocks = <&zynqmp_clk PCIE_REF>;
+};
+
+&qspi {
+ clocks = <&zynqmp_clk QSPI_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&sata {
+ clocks = <&zynqmp_clk SATA_REF>;
+};
+
+&sdhci0 {
+ clocks = <&zynqmp_clk SDIO0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&sdhci1 {
+ clocks = <&zynqmp_clk SDIO1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&spi0 {
+ clocks = <&zynqmp_clk SPI0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&spi1 {
+ clocks = <&zynqmp_clk SPI1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc0 {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc1 {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc2 {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc3 {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&uart0 {
+ clocks = <&zynqmp_clk UART0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&uart1 {
+ clocks = <&zynqmp_clk UART1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&usb0 {
+ clocks = <&zynqmp_clk USB0_BUS_REF>, <&zynqmp_clk USB3_DUAL_REF>;
+};
+
+&usb1 {
+ clocks = <&zynqmp_clk USB1_BUS_REF>, <&zynqmp_clk USB3_DUAL_REF>;
+};
+
+&watchdog0 {
+ clocks = <&zynqmp_clk WDT>;
+};
+
+&lpd_watchdog {
+ clocks = <&zynqmp_clk LPD_WDT>;
+};
+
+&xilinx_ams {
+ clocks = <&zynqmp_clk AMS_REF>;
+};
+
+&zynqmp_dpsub {
+ clocks = <&dp_aclk>, <&zynqmp_clk DP_AUDIO_REF>, <&zynqmp_clk DP_VIDEO_REF>;
+};
+
+&xlnx_dpdma {
+ clocks = <&zynqmp_clk DPDMA_REF>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ clocks = <&zynqmp_clk DP_AUDIO_REF>;
+};
+
+&pcap {
+ clocks = <&zynqmp_clk PCAP>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
index 306ad2157c98..1f45f4290592 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
@@ -12,6 +12,7 @@
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <100000000>;
+ u-boot,dm-pre-reloc;
};
clk125: clk125 {
@@ -24,6 +25,7 @@
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <200000000>;
+ u-boot,dm-pre-reloc;
};
clk250: clk250 {
@@ -58,13 +60,13 @@
clock-accuracy = <100>;
};
- dpdma_clk: dpdma-clk {
+ dpdma_clk: dpdma_clk {
compatible = "fixed-clock";
#clock-cells = <0x0>;
clock-frequency = <533000000>;
};
- drm_clock: drm-clock {
+ drm_clock: drm_clock {
compatible = "fixed-clock";
#clock-cells = <0x0>;
clock-frequency = <262750000>;
@@ -144,6 +146,10 @@
clocks = <&clk600>, <&clk100>;
};
+&nand0 {
+ clocks = <&clk100 &clk100>;
+};
+
&gem0 {
clocks = <&clk125>, <&clk125>, <&clk125>;
};
@@ -172,6 +178,26 @@
clocks = <&clk100>;
};
+&perf_monitor_ocm {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_ddr {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_cci {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_lpd {
+ clocks = <&clk100>;
+};
+
+&qspi {
+ clocks = <&clk300 &clk300>;
+};
+
&sata {
clocks = <&clk250>;
};
@@ -209,5 +235,21 @@
};
&watchdog0 {
+ clocks = <&clk100>;
+};
+
+&lpd_watchdog {
clocks = <&clk250>;
};
+
+&zynqmp_dpsub {
+ clocks = <&dp_aclk>, <&dp_aud_clk>, <&drm_clock>;
+};
+
+&xlnx_dpdma {
+ clocks = <&dpdma_clk>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ clocks = <&dp_aud_clk>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
index 0f7b4cf6078e..6117f83c474e 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
@@ -10,7 +10,8 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZC1232 RevA";
@@ -19,6 +20,7 @@
aliases {
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
};
chosen {
@@ -36,6 +38,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB FIXME */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&sata {
status = "okay";
/* SATA OOB timing settings */
@@ -47,6 +78,8 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane0 PHY_TYPE_SATA 0 0 125000000>, <&lane1 PHY_TYPE_SATA 1 1 125000000>;
};
&uart0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
index 9092828f92ec..6ac8346d23d9 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
@@ -11,7 +11,7 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
/ {
model = "ZynqMP ZC1254 RevA";
@@ -20,6 +20,7 @@
aliases {
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
};
chosen {
@@ -37,6 +38,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
deleted file mode 100644
index 4f404c580eec..000000000000
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * dts file for Xilinx ZynqMP ZC1275
- *
- * (C) Copyright 2017 - 2018, Xilinx, Inc.
- *
- * Michal Simek <michal.simek@xilinx.com>
- * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
- */
-
-/dts-v1/;
-
-#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
-
-/ {
- model = "ZynqMP ZC1275 RevA";
- compatible = "xlnx,zynqmp-zc1275-revA", "xlnx,zynqmp-zc1275", "xlnx,zynqmp";
-
- aliases {
- serial0 = &uart0;
- serial1 = &dcc;
- };
-
- chosen {
- bootargs = "earlycon";
- stdout-path = "serial0:115200n8";
- };
-
- memory@0 {
- device_type = "memory";
- reg = <0x0 0x0 0x0 0x80000000>;
- };
-};
-
-&dcc {
- status = "okay";
-};
-
-&uart0 {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
index 9a3e39d1294f..a3f0d135061b 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
@@ -10,8 +10,10 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/phy/phy.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm015-dc1 RevA";
@@ -19,11 +21,14 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c1;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
@@ -73,19 +78,31 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- phy0: phy@0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
+ phy0: ethernet-phy@0 {
reg = <0>;
};
};
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
};
+&gpu {
+ status = "okay";
+};
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 36 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>;
eeprom: eeprom@55 {
compatible = "atmel,24c64"; /* 24AA64 */
@@ -93,6 +110,245 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_9_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_9_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_36_grp", "gpio0_37_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_36_grp", "gpio0_37_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_8_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_8_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO34";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO35";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_0_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio0_wp_0_grp";
+ function = "sdio0_wp";
+ };
+
+ conf-wp {
+ groups = "sdio0_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_38_grp";
+ };
+
+ conf {
+ groups = "gpio0_38_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* Micron MT25QU512ABB8ESF */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -108,24 +364,75 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x19 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 3 150000000>;
};
/* eMMC */
&sdhci0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
bus-width = <8>;
+ xlnx,mio_bank = <0>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 0 27000000>,
+ <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
index 41a66787247b..133dcef9d046 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
@@ -10,8 +10,9 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm016-dc2 RevA";
@@ -21,12 +22,14 @@
can0 = &can0;
can1 = &can1;
ethernet0 = &gem2;
+ gpio0 = &gpio;
i2c0 = &i2c0;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
spi0 = &spi0;
spi1 = &spi1;
+ usb0 = &usb1;
};
chosen {
@@ -42,10 +45,14 @@
&can0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can0_default>;
};
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&fpd_dma_chan1 {
@@ -84,7 +91,9 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- phy0: phy@5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem2_default>;
+ phy0: ethernet-phy@5 {
reg = <5>;
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
@@ -100,6 +109,11 @@
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 6 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 7 GPIO_ACTIVE_HIGH>;
tca6416_u26: gpio@20 {
compatible = "ti,tca6416";
@@ -115,6 +129,353 @@
};
};
+&nand0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_nand0_default>;
+ arasan,has-mdma;
+
+ nand@0 {
+ reg = <0x0>;
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand-rootfs";
+ reg = <0x0 0x1c00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand-misc";
+ reg = <0x0 0x3400000 0xfcc00000>;
+ };
+ };
+ nand@1 {
+ reg = <0x1>;
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand1-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand1-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand1-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand1-rootfs";
+ reg = <0x0 0x1c00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand1-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand1-misc";
+ reg = <0x0 0x3400000 0xfcc00000>;
+ };
+ };
+};
+
+&pinctrl0 {
+ status = "okay";
+ pinctrl_can0_default: can0-default {
+ mux {
+ function = "can0";
+ groups = "can0_9_grp";
+ };
+
+ conf {
+ groups = "can0_9_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO38";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO39";
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_8_grp";
+ };
+
+ conf {
+ groups = "can1_8_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO33";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO32";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_1_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_1_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_6_grp", "gpio0_7_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_6_grp", "gpio0_7_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_10_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_10_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO42";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO43";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_10_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_10_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO41";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO40";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb1_default: usb1-default {
+ mux {
+ groups = "usb1_0_grp";
+ function = "usb1";
+ };
+
+ conf {
+ groups = "usb1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO64", "MIO65", "MIO67";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO66", "MIO68", "MIO69", "MIO70", "MIO71",
+ "MIO72", "MIO73", "MIO74", "MIO75";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem2_default: gem2-default {
+ mux {
+ function = "ethernet2";
+ groups = "ethernet2_0_grp";
+ };
+
+ conf {
+ groups = "ethernet2_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO58", "MIO59", "MIO60", "MIO61", "MIO62",
+ "MIO63";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO52", "MIO53", "MIO54", "MIO55", "MIO56",
+ "MIO57";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio2";
+ groups = "mdio2_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio2_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_nand0_default: nand0-default {
+ mux {
+ groups = "nand0_0_grp";
+ function = "nand0";
+ };
+
+ conf {
+ groups = "nand0_0_grp";
+ bias-pull-up;
+ };
+
+ mux-ce {
+ groups = "nand0_ce_0_grp";
+ function = "nand0_ce";
+ };
+
+ conf-ce {
+ groups = "nand0_ce_0_grp";
+ bias-pull-up;
+ };
+
+ mux-rb {
+ groups = "nand0_rb_0_grp";
+ function = "nand0_rb";
+ };
+
+ conf-rb {
+ groups = "nand0_rb_0_grp";
+ bias-pull-up;
+ };
+
+ mux-dqs {
+ groups = "nand0_dqs_0_grp";
+ function = "nand0_dqs";
+ };
+
+ conf-dqs {
+ groups = "nand0_dqs_0_grp";
+ bias-pull-up;
+ };
+ };
+
+ pinctrl_spi0_default: spi0-default {
+ mux {
+ groups = "spi0_0_grp";
+ function = "spi0";
+ };
+
+ conf {
+ groups = "spi0_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi0_ss_0_grp", "spi0_ss_1_grp",
+ "spi0_ss_2_grp";
+ function = "spi0_ss";
+ };
+
+ conf-cs {
+ groups = "spi0_ss_0_grp", "spi0_ss_1_grp",
+ "spi0_ss_2_grp";
+ bias-disable;
+ };
+ };
+
+ pinctrl_spi1_default: spi1-default {
+ mux {
+ groups = "spi1_3_grp";
+ function = "spi1";
+ };
+
+ conf {
+ groups = "spi1_3_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi1_ss_9_grp", "spi1_ss_10_grp",
+ "spi1_ss_11_grp";
+ function = "spi1_ss";
+ };
+
+ conf-cs {
+ groups = "spi1_ss_9_grp", "spi1_ss_10_grp",
+ "spi1_ss_11_grp";
+ bias-disable;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -122,8 +483,10 @@
&spi0 {
status = "okay";
num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0_default>;
- spi0_flash0: flash0@0 {
+ spi0_flash0: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "sst,sst25wf080", "jedec,spi-nor";
@@ -140,8 +503,10 @@
&spi1 {
status = "okay";
num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
- spi1_flash0: flash0@0 {
+ spi1_flash0: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "atmel,at45db041e", "atmel,at45", "atmel,dataflash";
@@ -158,12 +523,23 @@
/* ULPI SMSC USB3320 */
&usb1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_default>;
+};
+
+&dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
index 7a49deeae647..2ead8dd24d57 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
@@ -10,7 +10,7 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
/ {
model = "ZynqMP zc1751-xm017-dc3 RevA";
@@ -18,12 +18,15 @@
aliases {
ethernet0 = &gem0;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
+ usb0 = &usb0;
+ usb1 = &usb1;
};
chosen {
@@ -73,7 +76,7 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- phy0: phy@0 { /* VSC8211 */
+ phy0: ethernet-phy@0 { /* VSC8211 */
reg = <0>;
};
};
@@ -107,6 +110,63 @@
clock-frequency = <400000>;
};
+/* MT29F64G08AECDBJ4-6 */
+&nand0 {
+ status = "okay";
+ arasan,has-mdma;
+ num-cs = <2>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand-rootfs";
+ reg = <0x0 0x1C00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand-misc";
+ reg = <0x0 0x3400000 0xFCC00000>;
+ };
+
+ partition@6 { /* for testing purpose */
+ label = "nand1-fsbl-uboot";
+ reg = <0x1 0x0 0x400000>;
+ };
+ partition@7 { /* for testing purpose */
+ label = "nand1-linux";
+ reg = <0x1 0x400000 0x1400000>;
+ };
+ partition@8 { /* for testing purpose */
+ label = "nand1-device-tree";
+ reg = <0x1 0x1800000 0x400000>;
+ };
+ partition@9 { /* for testing purpose */
+ label = "nand1-rootfs";
+ reg = <0x1 0x1C00000 0x1400000>;
+ };
+ partition@10 { /* for testing purpose */
+ label = "nand1-bitstream";
+ reg = <0x1 0x3000000 0x400000>;
+ };
+ partition@11 { /* for testing purpose */
+ label = "nand1-misc";
+ reg = <0x1 0x3400000 0xFCC00000>;
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
index 54c7b4f1d1e4..11f596a1e65e 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
@@ -10,22 +10,26 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
/ {
model = "ZynqMP zc1751-xm018-dc4";
compatible = "xlnx,zynqmp-zc1751", "xlnx,zynqmp";
aliases {
+ can0 = &can0;
+ can1 = &can1;
ethernet0 = &gem0;
ethernet1 = &gem1;
ethernet2 = &gem2;
ethernet3 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
+ spi0 = &qspi;
};
chosen {
@@ -111,6 +115,14 @@
status = "okay";
};
+&zynqmp_dpsub {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
+
&gem0 {
status = "okay";
phy-mode = "rgmii-id";
@@ -151,6 +163,10 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c0 {
clock-frequency = <400000>;
status = "okay";
@@ -161,6 +177,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
index b8b5ff13818d..e8852a283958 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
@@ -11,8 +11,9 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm019-dc5 RevA";
@@ -20,6 +21,7 @@
aliases {
ethernet0 = &gem1;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci0;
@@ -74,7 +76,9 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- phy0: phy@0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem1_default>;
+ phy0: ethernet-phy@0 {
reg = <0>;
};
};
@@ -85,41 +89,366 @@
&i2c0 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 74 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 75 GPIO_ACTIVE_HIGH>;
};
&i2c1 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 76 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 77 GPIO_ACTIVE_HIGH>;
+
+};
+
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_18_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_18_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_74_grp", "gpio0_75_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_74_grp", "gpio0_75_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_19_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_19_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_76_grp", "gpio0_77_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_76_grp", "gpio0_77_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_17_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO71";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_18_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_18_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO73";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO72";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem1_default: gem1-default {
+ mux {
+ function = "ethernet1";
+ groups = "ethernet1_0_grp";
+ };
+
+ conf {
+ groups = "ethernet1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO44", "MIO45", "MIO46", "MIO47", "MIO48",
+ "MIO49";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO38", "MIO39", "MIO40", "MIO41", "MIO42",
+ "MIO43";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio1";
+ groups = "mdio1_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_0_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio0_wp_0_grp";
+ function = "sdio0_wp";
+ };
+
+ conf-wp {
+ groups = "sdio0_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_watchdog0_default: watchdog0-default {
+ mux-clk {
+ groups = "swdt0_clk_1_grp";
+ function = "swdt0_clk";
+ };
+
+ conf-clk {
+ groups = "swdt0_clk_1_grp";
+ bias-pull-up;
+ };
+
+ mux-rst {
+ groups = "swdt0_rst_1_grp";
+ function = "swdt0_rst";
+ };
+
+ conf-rst {
+ groups = "swdt0_rst_1_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc0_default: ttc0-default {
+ mux-clk {
+ groups = "ttc0_clk_0_grp";
+ function = "ttc0_clk";
+ };
+
+ conf-clk {
+ groups = "ttc0_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc0_wav_0_grp";
+ function = "ttc0_wav";
+ };
+
+ conf-wav {
+ groups = "ttc0_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc1_default: ttc1-default {
+ mux-clk {
+ groups = "ttc1_clk_0_grp";
+ function = "ttc1_clk";
+ };
+
+ conf-clk {
+ groups = "ttc1_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc1_wav_0_grp";
+ function = "ttc1_wav";
+ };
+
+ conf-wav {
+ groups = "ttc1_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc2_default: ttc2-default {
+ mux-clk {
+ groups = "ttc2_clk_0_grp";
+ function = "ttc2_clk";
+ };
+
+ conf-clk {
+ groups = "ttc2_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc2_wav_0_grp";
+ function = "ttc2_wav";
+ };
+
+ conf-wav {
+ groups = "ttc2_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc3_default: ttc3-default {
+ mux-clk {
+ groups = "ttc3_clk_0_grp";
+ function = "ttc3_clk";
+ };
+
+ conf-clk {
+ groups = "ttc3_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc3_wav_0_grp";
+ function = "ttc3_wav";
+ };
+
+ conf-wav {
+ groups = "ttc3_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
};
&sdhci0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
no-1-8-v;
+ xlnx,mio_bank = <0>;
};
&ttc0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc0_default>;
};
&ttc1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc1_default>;
};
&ttc2 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc2_default>;
};
&ttc3 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc3_default>;
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
&watchdog0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_watchdog0_default>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index e5699d0d91e4..e0590d29051f 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -11,16 +11,19 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU100 RevC";
compatible = "xlnx,zynqmp-zcu100-revC", "xlnx,zynqmp-zcu100", "xlnx,zynqmp";
aliases {
+ gpio0 = &gpio;
i2c0 = &i2c1;
rtc0 = &rtc;
serial0 = &uart1;
@@ -28,6 +31,8 @@
serial2 = &dcc;
spi0 = &spi0;
spi1 = &spi1;
+ usb0 = &usb0;
+ usb1 = &usb1;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
};
@@ -49,11 +54,20 @@
label = "sw4";
gpios = <&gpio 23 GPIO_ACTIVE_LOW>;
linux,code = <KEY_POWER>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&xilinx_ams 0>, <&xilinx_ams 1>, <&xilinx_ams 2>,
+ <&xilinx_ams 3>, <&xilinx_ams 4>, <&xilinx_ams 5>,
+ <&xilinx_ams 6>, <&xilinx_ams 7>, <&xilinx_ams 8>,
+ <&xilinx_ams 9>, <&xilinx_ams 10>,
+ <&xilinx_ams 11>, <&xilinx_ams 12>;
+ };
+
leds {
compatible = "gpio-leds";
ds2 {
@@ -82,13 +96,22 @@
linux,default-trigger = "bluetooth-power";
};
- vbus-det { /* U5 USB5744 VBUS detection via MIO25 */
+ vbus_det { /* U5 USB5744 VBUS detection via MIO25 */
label = "vbus_det";
gpios = <&gpio 25 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
};
+ ltc2954: ltc2954 { /* U7 */
+ compatible = "lltc,ltc2954", "lltc,ltc2952";
+ status = "disabled";
+ trigger-gpios = <&gpio 26 GPIO_ACTIVE_LOW>; /* INT line - input */
+ /* If there is HW watchdog on mezzanine this signal should be connected there */
+ watchdog-gpios = <&gpio 35 GPIO_ACTIVE_HIGH>; /* MIO on PAD */
+ kill-gpios = <&gpio 34 GPIO_ACTIVE_LOW>; /* KILL signal - output */
+ };
+
wmmcsdio_fixed: fixedregulator-mmcsdio {
compatible = "regulator-fixed";
regulator-name = "wmmcsdio_fixed";
@@ -98,10 +121,9 @@
regulator-boot-on;
};
- sdio_pwrseq: sdio-pwrseq {
+ sdio_pwrseq: sdio_pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
- post-power-on-delay-ms = <10>;
};
};
@@ -140,8 +162,17 @@
"", "", "", "";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 4 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 5 GPIO_ACTIVE_HIGH>;
clock-frequency = <100000>;
i2c-mux@75 { /* u11 */
compatible = "nxp,pca9548";
@@ -218,6 +249,221 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_1_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_1_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_4_grp", "gpio0_5_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_4_grp", "gpio0_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_3_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_3_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_2_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_2_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_spi0_default: spi0-default {
+ mux {
+ groups = "spi0_3_grp";
+ function = "spi0";
+ };
+
+ conf {
+ groups = "spi0_3_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi0_ss_9_grp";
+ function = "spi0_ss";
+ };
+
+ conf-cs {
+ groups = "spi0_ss_9_grp";
+ bias-disable;
+ };
+
+ };
+
+ pinctrl_spi1_default: spi1-default {
+ mux {
+ groups = "spi1_0_grp";
+ function = "spi1";
+ };
+
+ conf {
+ groups = "spi1_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi1_ss_0_grp";
+ function = "spi1_ss";
+ };
+
+ conf-cs {
+ groups = "spi1_ss_0_grp";
+ bias-disable;
+ };
+
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_0_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO3";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO2";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_0_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO1";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO0";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb1_default: usb1-default {
+ mux {
+ groups = "usb1_0_grp";
+ function = "usb1";
+ };
+
+ conf {
+ groups = "usb1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO64", "MIO65", "MIO67";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO66", "MIO68", "MIO69", "MIO70", "MIO71",
+ "MIO72", "MIO73", "MIO74", "MIO75";
+ bias-disable;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -226,13 +472,18 @@
&sdhci0 {
status = "okay";
no-1-8-v;
- broken-cd; /* CD has to be enabled by default */
disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
+ xlnx,mio_bank = <0>;
};
&sdhci1 {
status = "okay";
bus-width = <0x4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <0>;
non-removable;
disable-wp;
cap-power-off-card;
@@ -248,18 +499,30 @@
};
};
+&serdes {
+ status = "okay";
+};
+
&spi0 { /* Low Speed connector */
status = "okay";
label = "LS-SPI0";
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0_default>;
};
&spi1 { /* High Speed connector */
status = "okay";
label = "HS-SPI1";
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
bluetooth {
compatible = "ti,wl1831-st";
enable-gpios = <&gpio 8 GPIO_ACTIVE_HIGH>;
@@ -268,19 +531,76 @@
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "peripheral";
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 0 26000000>;
+ maximum-speed = "super-speed";
};
/* ULPI SMSC USB3320 */
&usb1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_default>;
+};
+
+&dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
+ phy-names = "usb3-phy";
+ phys = <&lane3 PHY_TYPE_USB3 1 0 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 1 27000000>,
+ <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
index 6647e97edba3..6c702f2674e3 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
@@ -34,3 +34,7 @@
reg = <0xe0 0x3>;
};
};
+
+&sdhci1 {
+ /delete-property/ no-1-8-v;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index 2a3b66547c6d..8a1ec55f2be4 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -10,9 +10,11 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU102 RevA";
@@ -20,6 +22,7 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
@@ -27,11 +30,14 @@
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -46,14 +52,14 @@
label = "sw19";
gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_DOWN>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
leds {
compatible = "gpio-leds";
- heartbeat-led {
+ heartbeat_led {
label = "heartbeat";
gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -63,6 +69,8 @@
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&dcc {
@@ -105,60 +113,64 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- phy0: phy@21 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
+ phy0: ethernet-phy@21 {
reg = <21>;
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
ti,dp83867-rxctrl-strap-quirk;
+ /* reset-gpios = <&tca6416_u97 6 GPIO_ACTIVE_LOW>; */
};
};
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u97: gpio@20 {
compatible = "ti,tca6416";
reg = <0x20>;
- gpio-controller;
+ gpio-controller; /* IRQ not connected */
#gpio-cells = <2>;
- /*
- * IRQ not connected
- * Lines:
- * 0 - PS_GTR_LAN_SEL0
- * 1 - PS_GTR_LAN_SEL1
- * 2 - PS_GTR_LAN_SEL2
- * 3 - PS_GTR_LAN_SEL3
- * 4 - PCI_CLK_DIR_SEL
- * 5 - IIC_MUX_RESET_B
- * 6 - GEM3_EXP_RESET_B
- * 7, 10 - 17 - not connected
- */
-
- gtr-sel0 {
+ gpio-line-names = "PS_GTR_LAN_SEL0", "PS_GTR_LAN_SEL1", "PS_GTR_LAN_SEL2", "PS_GTR_LAN_SEL3",
+ "PCI_CLK_DIR_SEL", "IIC_MUX_RESET_B", "GEM3_EXP_RESET_B",
+ "", "", "", "", "", "", "", "", "";
+ gtr_sel0 {
gpio-hog;
gpios = <0 0>;
output-low; /* PCIE = 0, DP = 1 */
line-name = "sel0";
};
- gtr-sel1 {
+ gtr_sel1 {
gpio-hog;
gpios = <1 0>;
output-high; /* PCIE = 0, DP = 1 */
line-name = "sel1";
};
- gtr-sel2 {
+ gtr_sel2 {
gpio-hog;
gpios = <2 0>;
output-high; /* PCIE = 0, USB0 = 1 */
line-name = "sel2";
};
- gtr-sel3 {
+ gtr_sel3 {
gpio-hog;
gpios = <3 0>;
output-high; /* PCIE = 0, SATA = 1 */
@@ -169,27 +181,12 @@
tca6416_u61: gpio@21 {
compatible = "ti,tca6416";
reg = <0x21>;
- gpio-controller;
+ gpio-controller; /* IRQ not connected */
#gpio-cells = <2>;
- /*
- * IRQ not connected
- * Lines:
- * 0 - VCCPSPLL_EN
- * 1 - MGTRAVCC_EN
- * 2 - MGTRAVTT_EN
- * 3 - VCCPSDDRPLL_EN
- * 4 - MIO26_PMU_INPUT_LS
- * 5 - PL_PMBUS_ALERT
- * 6 - PS_PMBUS_ALERT
- * 7 - MAXIM_PMBUS_ALERT
- * 10 - PL_DDR4_VTERM_EN
- * 11 - PL_DDR4_VPP_2V5_EN
- * 12 - PS_DIMM_VDDQ_TO_PSVCCO_ON
- * 13 - PS_DIMM_SUSPEND_EN
- * 14 - PS_DDR4_VTERM_EN
- * 15 - PS_DDR4_VPP_2V5_EN
- * 16 - 17 - not connected
- */
+ gpio-line-names = "VCCPSPLL_EN", "MGTRAVCC_EN", "MGTRAVTT_EN", "VCCPSDDRPLL_EN", "MIO26_PMU_INPUT_LS",
+ "PL_PMBUS_ALERT", "PS_PMBUS_ALERT", "MAXIM_PMBUS_ALERT", "PL_DDR4_VTERM_EN",
+ "PL_DDR4_VPP_2V5_EN", "PS_DIMM_VDDQ_TO_PSVCCO_ON", "PS_DIMM_SUSPEND_EN",
+ "PS_DDR4_VTERM_EN", "PS_DDR4_VPP_2V5_EN", "", "";
};
i2c-mux@75 { /* u60 */
@@ -353,7 +350,6 @@
status = "disabled"; /* unreachable */
reg = <0x20>;
};
-
max20751@72 { /* u95 */
compatible = "maxim,max20751";
reg = <0x72>;
@@ -370,6 +366,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -399,6 +400,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u69 */
+ compatible = "silabs,si5341";
reg = <0x36>;
};
@@ -414,6 +416,7 @@
temperature-stability = <50>;
factory-fout = <300000000>;
clock-frequency = <300000000>;
+ clock-output-names = "si570_user";
};
};
i2c@3 {
@@ -427,6 +430,7 @@
temperature-stability = <50>; /* copy from zc702 */
factory-fout = <156250000>;
clock-frequency = <148500000>;
+ clock-output-names = "si570_mgt";
};
};
i2c@4 {
@@ -434,6 +438,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 {/* SI5328 - u20 */
+ compatible = "silabs,si5328";
reg = <0x69>;
/*
* Chip has interrupt present connected to PL
@@ -502,10 +507,303 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux-sw {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf-sw {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22", "MIO23";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
&pcie {
status = "okay";
};
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -521,27 +819,89 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
no-1-8-v;
};
+&serdes {
+ status = "okay";
+};
+
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
index 1780ed237daf..2132024a253d 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
@@ -16,12 +16,13 @@
&gem3 {
phy-handle = <&phyc>;
- phyc: phy@c {
+ phyc: ethernet-phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
ti,dp83867-rxctrl-strap-quirk;
+ /* reset-gpios = <&tca6416_u97 6 GPIO_ACTIVE_LOW>; */
};
/* Cleanup from RevA */
/delete-node/ phy@21;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
index 8f456146409f..9335914d8ccf 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
@@ -10,8 +10,10 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU104 RevA";
@@ -19,12 +21,15 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
@@ -50,7 +55,7 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- phy0: phy@c {
+ phy0: ethernet-phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
@@ -63,9 +68,18 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* Another connection to this bus via PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -85,7 +99,7 @@
* 512B - 768B address 0x56
* 768B - 1024B address 0x57
*/
- eeprom@54 { /* u23 */
+ eeprom: eeprom@54 { /* u23 */
compatible = "atmel,24c08";
reg = <0x54>;
#address-cells = <1>;
@@ -98,6 +112,7 @@
#size-cells = <0>;
reg = <1>;
clock_8t49n287: clock-generator@6c { /* 8T49N287 - u182 */
+ compatible = "idt,8t49n287";
reg = <0x6c>;
};
};
@@ -107,9 +122,13 @@
#size-cells = <0>;
reg = <2>;
irps5401_43: irps54012@43 { /* IRPS5401 - u175 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x43>;
};
irps5401_4d: irps54012@4d { /* IRPS5401 - u180 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x4d>;
};
};
@@ -118,9 +137,9 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- tca6416_u97: gpio@21 {
+ tca6416_u97: gpio@20 {
compatible = "ti,tca6416";
- reg = <0x21>;
+ reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
/*
@@ -154,6 +173,233 @@
};
};
+&pinctrl0 {
+ status = "okay";
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* n25q512a 128MiB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -169,28 +415,90 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
- no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
disable-wp;
+ no-1-8-v;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
new file mode 100644
index 000000000000..654028d9afde
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dts file for Xilinx ZynqMP ZCU104
+ *
+ * (C) Copyright 2017 - 2018, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
+
+/ {
+ model = "ZynqMP ZCU104 RevC";
+ compatible = "xlnx,zynqmp-zcu104-revC", "xlnx,zynqmp-zcu104", "xlnx,zynqmp";
+
+ aliases {
+ ethernet0 = &gem3;
+ gpio0 = &gpio;
+ i2c0 = &i2c1;
+ mmc0 = &sdhci1;
+ rtc0 = &rtc;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&can1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
+};
+
+&dcc {
+ status = "okay";
+};
+
+&fpd_dma_chan1 {
+ status = "okay";
+};
+
+&fpd_dma_chan2 {
+ status = "okay";
+};
+
+&fpd_dma_chan3 {
+ status = "okay";
+};
+
+&fpd_dma_chan4 {
+ status = "okay";
+};
+
+&fpd_dma_chan5 {
+ status = "okay";
+};
+
+&fpd_dma_chan6 {
+ status = "okay";
+};
+
+&fpd_dma_chan7 {
+ status = "okay";
+};
+
+&fpd_dma_chan8 {
+ status = "okay";
+};
+
+&gem3 {
+ status = "okay";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
+ phy0: ethernet-phy@c {
+ reg = <0xc>;
+ ti,rx-internal-delay = <0x8>;
+ ti,tx-internal-delay = <0xa>;
+ ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
+ };
+};
+
+&gpio {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+
+ tca6416_u97: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /*
+ * IRQ not connected
+ * Lines:
+ * 0 - IRPS5401_ALERT_B
+ * 1 - HDMI_8T49N241_INT_ALM
+ * 2 - MAX6643_OT_B
+ * 3 - MAX6643_FANFAIL_B
+ * 5 - IIC_MUX_RESET_B
+ * 6 - GEM3_EXP_RESET_B
+ * 7 - FMC_LPC_PRSNT_M2C_B
+ * 4, 10 - 17 - not connected
+ */
+ };
+
+ /* Another connection to this bus via PL i2c via PCA9306 - u45 */
+ i2c-mux@74 { /* u34 */
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x74>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /*
+ * IIC_EEPROM 1kB memory which uses 256B blocks
+ * where every block has different address.
+ * 0 - 256B address 0x54
+ * 256B - 512B address 0x55
+ * 512B - 768B address 0x56
+ * 768B - 1024B address 0x57
+ */
+ eeprom: eeprom@54 { /* u23 */
+ compatible = "atmel,24c08";
+ reg = <0x54>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ clock_8t49n287: clock-generator@6c { /* 8T49N287 - u182 */
+ compatible = "idt,8t49n287";
+ reg = <0x6c>;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ irps5401_43: irps54012@43 { /* IRPS5401 - u175 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
+ reg = <0x43>;
+ };
+ irps5401_4d: irps54012@4d { /* IRPS5401 - u180 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
+ reg = <0x4d>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ ina226@40 { /* u183 */
+ compatible = "ti,ina226";
+ reg = <0x40>;
+ shunt-resistor = <5000>;
+ };
+ };
+
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+
+ /* 4, 6 not connected */
+ };
+};
+
+&pinctrl0 {
+ status = "okay";
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* n25q512a 128MiB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+ /* SATA OOB timing settings */
+ ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ ceva,p1-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
+};
+
+/* SD1 with level shifter */
+&sdhci1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
+ disable-wp;
+ no-1-8-v;
+};
+
+&serdes {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
+};
+
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
+};
+
+/* ULPI SMSC USB3320 */
+&usb0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
+};
+
+&watchdog0 {
+ status = "okay";
+};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
index 93ce7eb81498..15be15f80a5d 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
@@ -10,9 +10,11 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU106 RevA";
@@ -20,6 +22,7 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
@@ -27,11 +30,14 @@
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -46,14 +52,14 @@
label = "sw19";
gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_DOWN>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
leds {
compatible = "gpio-leds";
- heartbeat-led {
+ heartbeat_led {
label = "heartbeat";
gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -63,13 +69,14 @@
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&dcc {
status = "okay";
};
-/* fpd_dma clk 667MHz, lpd_dma 500MHz */
&fpd_dma_chan1 {
status = "okay";
};
@@ -106,7 +113,9 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- phy0: phy@c {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
+ phy0: ethernet-phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
@@ -117,11 +126,22 @@
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u97: gpio@20 {
compatible = "ti,tca6416";
@@ -344,6 +364,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -373,6 +398,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u69 */
+ compatible = "si5341";
reg = <0x36>;
};
@@ -388,6 +414,7 @@
temperature-stability = <50>;
factory-fout = <300000000>;
clock-frequency = <300000000>;
+ clock-output-names = "si570_user";
};
};
i2c@3 {
@@ -401,6 +428,7 @@
temperature-stability = <50>; /* copy from zc702 */
factory-fout = <156250000>;
clock-frequency = <148500000>;
+ clock-output-names = "si570_mgt";
};
};
i2c@4 {
@@ -408,6 +436,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 {/* SI5328 - u20 */
+ compatible = "silabs,si5328";
reg = <0x69>;
};
};
@@ -480,6 +509,299 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO23", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -495,27 +817,75 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
- no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
};
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
index 8bb0001a026f..f90fad4cc645 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
@@ -10,9 +10,11 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU111 RevA";
@@ -20,17 +22,21 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -46,14 +52,14 @@
label = "sw19";
gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_DOWN>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
leds {
compatible = "gpio-leds";
- heartbeat-led {
+ heartbeat_led {
label = "heartbeat";
gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -101,7 +107,9 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
- phy0: phy@c {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
+ phy0: ethernet-phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
@@ -112,11 +120,22 @@
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u22: gpio@20 {
compatible = "ti,tca6416";
@@ -234,12 +253,18 @@
#size-cells = <0>;
reg = <2>;
irps5401_43: irps54012@43 { /* IRPS5401 - u53 check these */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x43>;
};
irps5401_44: irps54012@44 { /* IRPS5401 - u55 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x44>;
};
irps5401_45: irps54012@45 { /* IRPS5401 - u57 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x45>;
};
/* u68 IR38064 +0 */
@@ -261,6 +286,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
i2c-mux@74 { /* u26 */
compatible = "nxp,pca9548";
@@ -289,6 +319,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u46 */
+ compatible = "si5341";
reg = <0x36>;
};
@@ -304,6 +335,7 @@
temperature-stability = <50>;
factory-fout = <300000000>;
clock-frequency = <300000000>;
+ clock-output-names = "si570_user";
};
};
i2c@3 {
@@ -317,6 +349,7 @@
temperature-stability = <50>;
factory-fout = <156250000>;
clock-frequency = <148500000>;
+ clock-output-names = "si570_mgt";
};
};
i2c@4 {
@@ -324,6 +357,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 { /* SI5328 - u48 */
+ compatible = "silabs,si5328";
reg = <0x69>;
};
};
@@ -410,6 +444,240 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO23", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -425,19 +693,66 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 3 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
- no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ disable-wp;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 1 27000000>, <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revA.dts
new file mode 100644
index 000000000000..9a85f8873836
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revA.dts
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dts file for Xilinx ZynqMP ZCU1275
+ *
+ * (C) Copyright 2017 - 2018, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+
+/ {
+ model = "ZynqMP ZCU1275 RevA";
+ compatible = "xlnx,zynqmp-zcu1275-revA", "xlnx,zynqmp-zcu1275", "xlnx,zynqmp";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revB.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revB.dts
new file mode 100644
index 000000000000..4b2562b61474
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revB.dts
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx ZynqMP ZCU1275 RevB
+ *
+ * (C) Copyright 2018, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+
+/ {
+ model = "ZynqMP ZCU1275 RevB";
+ compatible = "xlnx,zynqmp-zcu1275-revB", "xlnx,zynqmp-zcu1275", "xlnx,zynqmp";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ mmc0 = &sdhci1;
+ ethernet0 = &gem1;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&gem1 {
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ reg = <1>; /* KSZ9031RNXIC on AES-FMC-NETW1-G */
+ rxc-skew-ps = <1800>; /* Skew control of RX_CLK pad output */
+ txc-skew-ps = <1800>; /* Skew control of GTX_CLK pad input */
+ txen-skew-ps = <900>; /* Skew control of TX_CTL pad input */
+ rxdv-skew-ps = <0>; /* Skew control of RX_CTL pad output */
+ rxd0-skew-ps = <0>; /* Skew control of RXD0 pad output */
+ rxd1-skew-ps = <0>; /* Skew control of RXD1 pad output */
+ rxd2-skew-ps = <0>; /* Skew control of RXD2 pad output */
+ rxd3-skew-ps = <0>; /* Skew control of RXD3 pad output */
+ txd0-skew-ps = <900>; /* Skew control of TXD0 pad input */
+ txd1-skew-ps = <900>; /* Skew control of TXD1 pad input */
+ txd2-skew-ps = <900>; /* Skew control of TXD2 pad input */
+ txd3-skew-ps = <900>; /* Skew control of TXD3 pad input */
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+ xlnx,mio_bank = <1>;
+ /*
+ * 1.0 revision has level shifter and this property should be
+ * removed for supporting UHS mode
+ */
+ no-1-8-v;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts
new file mode 100644
index 000000000000..8b0819696869
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx ZynqMP ZC1285 RevA
+ *
+ * (C) Copyright 2018 - 2019, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+
+/ {
+ model = "ZynqMP ZCU1285 RevA";
+ compatible = "xlnx,zynqmp-zcu1285-revA", "xlnx,zynqmp-zcu1285", "xlnx,zynqmp";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ mmc0 = &sdhci1;
+ ethernet0 = &gem1; /* EMIO */
+ ethernet1 = &gem3; /* PS ethernet */
+ i2c = &i2c0; /* EMIO */
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ i2c-mux@75 {
+ compatible = "nxp,pca9548"; /* u22 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x75>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /* PMBUS */
+ max20751@74 { /* u23 */
+ compatible = "maxim,max20751";
+ reg = <0x74>;
+ };
+ max20751@70 { /* u89 */
+ compatible = "maxim,max20751";
+ reg = <0x70>;
+ };
+ max15301@a { /* u28 */
+ compatible = "maxim,max15301";
+ reg = <0xa>;
+ };
+ max15303@b { /* u48 */
+ compatible = "maxim,max15303";
+ reg = <0xb>;
+ };
+ max15303@d { /* u27 */
+ compatible = "maxim,max15303";
+ reg = <0xd>;
+ };
+ max15303@e { /* u11 */
+ compatible = "maxim,max15303";
+ reg = <0xe>;
+ };
+ max15303@f { /* u96 */
+ compatible = "maxim,max15303";
+ reg = <0xf>;
+ };
+ max15303@11 { /* u47 */
+ compatible = "maxim,max15303";
+ reg = <0x11>;
+ };
+ max15303@12 { /* u24 */
+ compatible = "maxim,max15303";
+ reg = <0x12>;
+ };
+ max15301@13 { /* u29 */
+ compatible = "maxim,max15301";
+ reg = <0x13>;
+ };
+ max15303@14 { /* u51 */
+ compatible = "maxim,max15303";
+ reg = <0x14>;
+ };
+ max15303@15 { /* u30 */
+ compatible = "maxim,max15303";
+ reg = <0x15>;
+ };
+ max15303@16 { /* u102 */
+ compatible = "maxim,max15303";
+ reg = <0x16>;
+ };
+ max15301@17 { /* u50 */
+ compatible = "maxim,max15301";
+ reg = <0x17>;
+ };
+ max15301@18 { /* u31 */
+ compatible = "maxim,max15301";
+ reg = <0x18>;
+ };
+ };
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ /* CM_I2C */
+ };
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ /* SYS_EEPROM */
+ eeprom: eeprom@54 { /* u101 */
+ compatible = "atmel,24c32"; /* 24LC32A */
+ reg = <0x54>;
+ };
+ };
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ /* FMC1 */
+ };
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ /* FMC2 */
+ };
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ /* ANALOG_PMBUS */
+ ina226@40 { /* u60 */
+ compatible = "ti,ina226";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+ ina226@41 { /* u61 */
+ compatible = "ti,ina226";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+ ina226@42 { /* u63 */
+ compatible = "ti,ina226";
+ reg = <0x42>;
+ shunt-resistor = <1000>;
+ };
+ ina226@43 { /* u65 */
+ compatible = "ti,ina226";
+ reg = <0x43>;
+ shunt-resistor = <1000>;
+ };
+ ina226@44 { /* u64 */
+ compatible = "ti,ina226";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+ };
+ i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ /* ANALOG_CM_I2C */
+ };
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ /* FMC3 */
+ };
+ };
+};
+
+&gem1 {
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ reg = <1>; /* KSZ9031RNXIC on AES-FMC-NETW1-G */
+ rxc-skew-ps = <1800>; /* Skew control of RX_CLK pad output */
+ txc-skew-ps = <1800>; /* Skew control of GTX_CLK pad input */
+ txen-skew-ps = <900>; /* Skew control of TX_CTL pad input */
+ rxdv-skew-ps = <0>; /* Skew control of RX_CTL pad output */
+ rxd0-skew-ps = <0>; /* Skew control of RXD0 pad output */
+ rxd1-skew-ps = <0>; /* Skew control of RXD1 pad output */
+ rxd2-skew-ps = <0>; /* Skew control of RXD2 pad output */
+ rxd3-skew-ps = <0>; /* Skew control of RXD3 pad output */
+ txd0-skew-ps = <900>; /* Skew control of TXD0 pad input */
+ txd1-skew-ps = <900>; /* Skew control of TXD1 pad input */
+ txd2-skew-ps = <900>; /* Skew control of TXD2 pad input */
+ txd3-skew-ps = <900>; /* Skew control of TXD3 pad input */
+ };
+ };
+};
+
+&gem3 {
+ status = "okay";
+ phy-mode = "rgmii";
+ phy-handle = <&phy2>;
+ phy2: ethernet-phy@1 {
+ reg = <1>; /* KSZ9031RNXIC */
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+ xlnx,mio_bank = <1>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index b92549fb3240..3f08f0b067ca 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -12,6 +12,9 @@
* the License, or (at your option) any later version.
*/
+#include <dt-bindings/power/xlnx-zynqmp-power.h>
+#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
+
/ {
compatible = "xlnx,zynqmp";
#address-cells = <2>;
@@ -71,7 +74,7 @@
};
};
- cpu_opp_table: cpu-opp-table {
+ cpu_opp_table: cpu_opp_table {
compatible = "operating-points-v2";
opp-shared;
opp00 {
@@ -99,6 +102,28 @@
dcc: dcc {
compatible = "arm,dcc";
status = "disabled";
+ u-boot,dm-pre-reloc;
+ };
+
+ zynqmp_ipi {
+ compatible = "xlnx,zynqmp-ipi-mailbox";
+ interrupt-parent = <&gic>;
+ interrupts = <0 35 4>;
+ xlnx,ipi-id = <0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipi_mailbox_pmu1: mailbox@ff990400 {
+ reg = <0x0 0xff9905c0 0x0 0x20>,
+ <0x0 0xff9905e0 0x0 0x20>,
+ <0x0 0xff990e80 0x0 0x20>,
+ <0x0 0xff990ea0 0x0 0x20>;
+ reg-names = "local_request_region", "local_response_region",
+ "remote_request_region", "remote_response_region";
+ #mbox-cells = <1>;
+ xlnx,ipi-id = <4>;
+ };
};
pmu {
@@ -115,6 +140,34 @@
method = "smc";
};
+ firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+ #power-domain-cells = <0x1>;
+ u-boot,dm-pre-reloc;
+
+ zynqmp_power: zynqmp-power {
+ compatible = "xlnx,zynqmp-power";
+ interrupt-parent = <&gic>;
+ interrupts = <0 35 4>;
+ mboxes = <&ipi_mailbox_pmu1 0>,
+ <&ipi_mailbox_pmu1 1>;
+ mbox-names = "tx", "rx";
+ };
+
+ zynqmp_reset: reset-controller {
+ compatible = "xlnx,zynqmp-reset";
+ #reset-cells = <1>;
+ };
+
+ pinctrl0: pinctrl {
+ compatible = "xlnx,zynqmp-pinctrl";
+ status = "disabled";
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
@@ -124,7 +177,94 @@
<1 10 0xf08>;
};
- amba_apu: amba-apu@0 {
+ edac {
+ compatible = "arm,cortex-a53-edac";
+ };
+
+ fpga_full: fpga-full {
+ compatible = "fpga-region";
+ fpga-mgr = <&pcap>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ };
+
+ nvmem_firmware {
+ compatible = "xlnx,zynqmp-nvmem-fw";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ soc_revision: soc_revision@0 {
+ reg = <0x0 0x4>;
+ };
+ /* efuse access */
+ efuse_dna: efuse_dna@c {
+ reg = <0xc 0xc>;
+ };
+ efuse_usr0: efuse_usr0@20 {
+ reg = <0x20 0x4>;
+ };
+ efuse_usr1: efuse_usr1@24 {
+ reg = <0x24 0x4>;
+ };
+ efuse_usr2: efuse_usr2@28 {
+ reg = <0x28 0x4>;
+ };
+ efuse_usr3: efuse_usr3@2c {
+ reg = <0x2c 0x4>;
+ };
+ efuse_usr4: efuse_usr4@30 {
+ reg = <0x30 0x4>;
+ };
+ efuse_usr5: efuse_usr5@34 {
+ reg = <0x34 0x4>;
+ };
+ efuse_usr6: efuse_usr6@38 {
+ reg = <0x38 0x4>;
+ };
+ efuse_usr7: efuse_usr7@3c {
+ reg = <0x3c 0x4>;
+ };
+ efuse_miscusr: efuse_miscusr@40 {
+ reg = <0x40 0x4>;
+ };
+ efuse_chash: efuse_chash@50 {
+ reg = <0x50 0x4>;
+ };
+ efuse_pufmisc: efuse_pufmisc@54 {
+ reg = <0x54 0x4>;
+ };
+ efuse_sec: efuse_sec@58 {
+ reg = <0x58 0x4>;
+ };
+ efuse_spkid: efuse_spkid@5c {
+ reg = <0x5c 0x4>;
+ };
+ efuse_ppk0hash: efuse_ppk0hash@a0 {
+ reg = <0xa0 0x30>;
+ };
+ efuse_ppk1hash: efuse_ppk1hash@d0 {
+ reg = <0xd0 0x30>;
+ };
+ };
+
+ pcap: pcap {
+ compatible = "xlnx,zynqmp-pcap-fpga";
+ clock-names = "ref_clk";
+ };
+
+ xlnx_rsa: zynqmp_rsa {
+ compatible = "xlnx,zynqmp-rsa";
+ };
+
+ xlnx_keccak_384: sha384 {
+ compatible = "xlnx,zynqmp-keccak-384";
+ };
+
+ xlnx_aes: zynqmp_aes {
+ compatible = "xlnx,zynqmp-aes";
+ };
+
+ amba_apu: amba_apu@0 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
@@ -143,8 +283,23 @@
};
};
+ smmu: smmu@fd800000 {
+ compatible = "arm,mmu-500";
+ reg = <0x0 0xfd800000 0x0 0x20000>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ #global-interrupts = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>;
+ };
+
amba: amba {
compatible = "simple-bus";
+ u-boot,dm-pre-reloc;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -158,6 +313,7 @@
interrupt-parent = <&gic>;
tx-fifo-depth = <0x40>;
rx-fifo-depth = <0x40>;
+ power-domains = <&zynqmp_firmware PD_CAN_0>;
};
can1: can@ff070000 {
@@ -169,6 +325,7 @@
interrupt-parent = <&gic>;
tx-fifo-depth = <0x40>;
rx-fifo-depth = <0x40>;
+ power-domains = <&zynqmp_firmware PD_CAN_1>;
};
cci: cci@fd6e0000 {
@@ -199,6 +356,9 @@
interrupts = <0 124 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14e8>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan2: dma@fd510000 {
@@ -209,6 +369,9 @@
interrupts = <0 125 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14e9>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan3: dma@fd520000 {
@@ -219,6 +382,9 @@
interrupts = <0 126 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ea>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan4: dma@fd530000 {
@@ -229,6 +395,9 @@
interrupts = <0 127 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14eb>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan5: dma@fd540000 {
@@ -239,6 +408,9 @@
interrupts = <0 128 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ec>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan6: dma@fd550000 {
@@ -249,6 +421,9 @@
interrupts = <0 129 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ed>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan7: dma@fd560000 {
@@ -259,6 +434,9 @@
interrupts = <0 130 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ee>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan8: dma@fd570000 {
@@ -269,6 +447,20 @@
interrupts = <0 131 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ef>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
+ };
+
+ gpu: gpu@fd4b0000 {
+ status = "disabled";
+ compatible = "arm,mali-400", "arm,mali-utgard";
+ reg = <0x0 0xfd4b0000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>;
+ interrupt-names = "IRQGP", "IRQGPMMU", "IRQPP0", "IRQPPMMU0", "IRQPP1", "IRQPPMMU1";
+ clock-names = "gpu", "gpu_pp0", "gpu_pp1";
+ power-domains = <&zynqmp_firmware PD_GPU>;
};
/* LPDDMA default allows only secured access. inorder to enable
@@ -283,6 +475,9 @@
interrupts = <0 77 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x868>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan2: dma@ffa90000 {
@@ -293,6 +488,9 @@
interrupts = <0 78 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x869>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan3: dma@ffaa0000 {
@@ -303,6 +501,9 @@
interrupts = <0 79 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86a>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan4: dma@ffab0000 {
@@ -313,6 +514,9 @@
interrupts = <0 80 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86b>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan5: dma@ffac0000 {
@@ -323,6 +527,9 @@
interrupts = <0 81 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86c>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan6: dma@ffad0000 {
@@ -333,6 +540,9 @@
interrupts = <0 82 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86d>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan7: dma@ffae0000 {
@@ -343,6 +553,9 @@
interrupts = <0 83 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86e>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan8: dma@ffaf0000 {
@@ -353,6 +566,9 @@
interrupts = <0 84 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86f>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
mc: memory-controller@fd070000 {
@@ -362,6 +578,20 @@
interrupts = <0 112 4>;
};
+ nand0: nand@ff100000 {
+ compatible = "arasan,nfc-v3p10";
+ status = "disabled";
+ reg = <0x0 0xff100000 0x0 0x1000>;
+ clock-names = "clk_sys", "clk_flash";
+ interrupt-parent = <&gic>;
+ interrupts = <0 14 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x872>;
+ power-domains = <&zynqmp_firmware PD_NAND>;
+ };
+
gem0: ethernet@ff0b0000 {
compatible = "cdns,zynqmp-gem", "cdns,gem";
status = "disabled";
@@ -371,6 +601,9 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x874>;
+ power-domains = <&zynqmp_firmware PD_ETH_0>;
};
gem1: ethernet@ff0c0000 {
@@ -382,6 +615,9 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x875>;
+ power-domains = <&zynqmp_firmware PD_ETH_1>;
};
gem2: ethernet@ff0d0000 {
@@ -393,6 +629,9 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x876>;
+ power-domains = <&zynqmp_firmware PD_ETH_2>;
};
gem3: ethernet@ff0e0000 {
@@ -404,18 +643,22 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x877>;
+ power-domains = <&zynqmp_firmware PD_ETH_3>;
};
gpio: gpio@ff0a0000 {
compatible = "xlnx,zynqmp-gpio-1.0";
status = "disabled";
#gpio-cells = <0x2>;
- gpio-controller;
interrupt-parent = <&gic>;
interrupts = <0 16 4>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x0 0xff0a0000 0x0 0x1000>;
+ gpio-controller;
+ power-domains = <&zynqmp_firmware PD_GPIO>;
};
i2c0: i2c@ff020000 {
@@ -426,6 +669,7 @@
reg = <0x0 0xff020000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
+ power-domains = <&zynqmp_firmware PD_I2C_0>;
};
i2c1: i2c@ff030000 {
@@ -436,6 +680,86 @@
reg = <0x0 0xff030000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
+ power-domains = <&zynqmp_firmware PD_I2C_1>;
+ };
+
+ ocm: memory-controller@ff960000 {
+ compatible = "xlnx,zynqmp-ocmc-1.0";
+ reg = <0x0 0xff960000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 10 4>;
+ };
+
+ perf_monitor_ocm: perf-monitor@ffa00000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xffa00000 0x0 0x10000>;
+ interrupts = <0 25 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_ddr: perf-monitor@fd0b0000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xfd0b0000 0x0 0x10000>;
+ interrupts = <0 123 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <6>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <0>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <10>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_cci: perf-monitor@fd490000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xfd490000 0x0 0x10000>;
+ interrupts = <0 123 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <0>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_lpd: perf-monitor@ffa10000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xffa10000 0x0 0x10000>;
+ interrupts = <0 25 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
};
pcie: pcie@fd0e0000 {
@@ -467,6 +791,7 @@
<0x0 0x0 0x0 0x2 &pcie_intc 0x2>,
<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
+ power-domains = <&zynqmp_firmware PD_PCIE>;
pcie_intc: legacy-interrupt-controller {
interrupt-controller;
#address-cells = <0>;
@@ -474,6 +799,23 @@
};
};
+ qspi: spi@ff0f0000 {
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-qspi-1.0";
+ status = "disabled";
+ clock-names = "ref_clk", "pclk";
+ interrupts = <0 15 4>;
+ interrupt-parent = <&gic>;
+ num-cs = <1>;
+ reg = <0x0 0xff0f0000 0x0 0x1000>,
+ <0x0 0xc0000000 0x0 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x873>;
+ power-domains = <&zynqmp_firmware PD_QSPI>;
+ };
+
rtc: rtc@ffa60000 {
compatible = "xlnx,zynqmp-rtc";
status = "disabled";
@@ -484,43 +826,87 @@
calibration = <0x8000>;
};
+ serdes: zynqmp_phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr-v1.1";
+ status = "disabled";
+ reg = <0x0 0xfd400000 0x0 0x40000>,
+ <0x0 0xfd3d0000 0x0 0x1000>;
+ reg-names = "serdes", "siou";
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+ resets = <&zynqmp_reset ZYNQMP_RESET_SATA>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_CORERESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_CORERESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_HIBERRESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_HIBERRESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_APB>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_APB>,
+ <&zynqmp_reset ZYNQMP_RESET_DP>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM0>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM1>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM2>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM3>;
+ reset-names = "sata_rst", "usb0_crst", "usb1_crst",
+ "usb0_hibrst", "usb1_hibrst", "usb0_apbrst",
+ "usb1_apbrst", "dp_rst", "gem0_rst",
+ "gem1_rst", "gem2_rst", "gem3_rst";
+ lane0: lane0 {
+ #phy-cells = <4>;
+ };
+ lane1: lane1 {
+ #phy-cells = <4>;
+ };
+ lane2: lane2 {
+ #phy-cells = <4>;
+ };
+ lane3: lane3 {
+ #phy-cells = <4>;
+ };
+ };
+
sata: ahci@fd0c0000 {
compatible = "ceva,ahci-1v84";
status = "disabled";
reg = <0x0 0xfd0c0000 0x0 0x2000>;
interrupt-parent = <&gic>;
interrupts = <0 133 4>;
+ power-domains = <&zynqmp_firmware PD_SATA>;
+ #stream-id-cells = <4>;
+ /* iommus = <&smmu 0x4c0>, <&smmu 0x4c1>, */
+ /* <&smmu 0x4c2>, <&smmu 0x4c3>; */
+ /* dma-coherent; */
};
sdhci0: mmc@ff160000 {
- compatible = "arasan,sdhci-8.9a";
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 48 4>;
reg = <0x0 0xff160000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
+ xlnx,device_id = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x870>;
+ power-domains = <&zynqmp_firmware PD_SD_0>;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
};
sdhci1: mmc@ff170000 {
- compatible = "arasan,sdhci-8.9a";
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 49 4>;
reg = <0x0 0xff170000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
- };
-
- smmu: smmu@fd800000 {
- compatible = "arm,mmu-500";
- reg = <0x0 0xfd800000 0x0 0x20000>;
- status = "disabled";
- #global-interrupts = <1>;
- interrupt-parent = <&gic>;
- interrupts = <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>;
+ xlnx,device_id = <1>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x871>;
+ power-domains = <&zynqmp_firmware PD_SD_1>;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
};
spi0: spi@ff040000 {
@@ -532,6 +918,7 @@
clock-names = "ref_clk", "pclk";
#address-cells = <1>;
#size-cells = <0>;
+ power-domains = <&zynqmp_firmware PD_SPI_0>;
};
spi1: spi@ff050000 {
@@ -543,6 +930,7 @@
clock-names = "ref_clk", "pclk";
#address-cells = <1>;
#size-cells = <0>;
+ power-domains = <&zynqmp_firmware PD_SPI_1>;
};
ttc0: timer@ff110000 {
@@ -552,6 +940,7 @@
interrupts = <0 36 4>, <0 37 4>, <0 38 4>;
reg = <0x0 0xff110000 0x0 0x1000>;
timer-width = <32>;
+ power-domains = <&zynqmp_firmware PD_TTC_0>;
};
ttc1: timer@ff120000 {
@@ -561,6 +950,7 @@
interrupts = <0 39 4>, <0 40 4>, <0 41 4>;
reg = <0x0 0xff120000 0x0 0x1000>;
timer-width = <32>;
+ power-domains = <&zynqmp_firmware PD_TTC_1>;
};
ttc2: timer@ff130000 {
@@ -570,6 +960,7 @@
interrupts = <0 42 4>, <0 43 4>, <0 44 4>;
reg = <0x0 0xff130000 0x0 0x1000>;
timer-width = <32>;
+ power-domains = <&zynqmp_firmware PD_TTC_2>;
};
ttc3: timer@ff140000 {
@@ -579,42 +970,90 @@
interrupts = <0 45 4>, <0 46 4>, <0 47 4>;
reg = <0x0 0xff140000 0x0 0x1000>;
timer-width = <32>;
+ power-domains = <&zynqmp_firmware PD_TTC_3>;
};
uart0: serial@ff000000 {
- compatible = "xlnx,zynqmp-uart", "cdns,uart-r1p12";
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-uart", "cdns,uart-r1p12", "xlnx,xuartps";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 21 4>;
reg = <0x0 0xff000000 0x0 0x1000>;
clock-names = "uart_clk", "pclk";
+ power-domains = <&zynqmp_firmware PD_UART_0>;
};
uart1: serial@ff010000 {
- compatible = "xlnx,zynqmp-uart", "cdns,uart-r1p12";
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-uart","cdns,uart-r1p12", "xlnx,xuartps";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 22 4>;
reg = <0x0 0xff010000 0x0 0x1000>;
clock-names = "uart_clk", "pclk";
+ power-domains = <&zynqmp_firmware PD_UART_1>;
};
- usb0: usb@fe200000 {
- compatible = "snps,dwc3";
+ usb0: usb0@ff9d0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 65 4>;
- reg = <0x0 0xfe200000 0x0 0x40000>;
- clock-names = "clk_xin", "clk_ahb";
+ compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9d0000 0x0 0x100>;
+ clock-names = "bus_clk", "ref_clk";
+ power-domains = <&zynqmp_firmware PD_USB_0>;
+ ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+
+ dwc3_0: dwc3@fe200000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ reg = <0x0 0xfe200000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "dwc_usb3", "otg", "hiber";
+ interrupts = <0 65 4>, <0 69 4>, <0 75 4>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x860>;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,refclk_fladj;
+ snps,enable_guctl1_resume_quirk;
+ snps,enable_guctl1_ipd_quirk;
+ snps,xhci-stream-quirk;
+ /* dma-coherent; */
+ /* snps,enable-hibernation; */
+ };
};
- usb1: usb@fe300000 {
- compatible = "snps,dwc3";
+ usb1: usb1@ff9e0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 70 4>;
- reg = <0x0 0xfe300000 0x0 0x40000>;
- clock-names = "clk_xin", "clk_ahb";
+ compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9e0000 0x0 0x100>;
+ clock-names = "bus_clk", "ref_clk";
+ power-domains = <&zynqmp_firmware PD_USB_1>;
+ ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+
+ dwc3_1: dwc3@fe300000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ reg = <0x0 0xfe300000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "dwc_usb3", "otg", "hiber";
+ interrupts = <0 70 4>, <0 74 4>, <0 76 4>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x861>;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,refclk_fladj;
+ snps,enable_guctl1_resume_quirk;
+ snps,enable_guctl1_ipd_quirk;
+ snps,xhci-stream-quirk;
+ /* dma-coherent; */
+ };
};
watchdog0: watchdog@fd4d0000 {
@@ -623,7 +1062,130 @@
interrupt-parent = <&gic>;
interrupts = <0 113 1>;
reg = <0x0 0xfd4d0000 0x0 0x1000>;
+ timeout-sec = <60>;
+ reset-on-timeout;
+ };
+
+ lpd_watchdog: watchdog@ff150000 {
+ compatible = "cdns,wdt-r1p2";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 52 1>;
+ reg = <0x0 0xff150000 0x0 0x1000>;
timeout-sec = <10>;
};
+
+ xilinx_ams: ams@ffa50000 {
+ compatible = "xlnx,zynqmp-ams";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 56 4>;
+ interrupt-names = "ams-irq";
+ reg = <0x0 0xffa50000 0x0 0x800>;
+ reg-names = "ams-base";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ #io-channel-cells = <1>;
+ ranges;
+
+ ams_ps: ams_ps@ffa50800 {
+ compatible = "xlnx,zynqmp-ams-ps";
+ status = "disabled";
+ reg = <0x0 0xffa50800 0x0 0x400>;
+ };
+
+ ams_pl: ams_pl@ffa50c00 {
+ compatible = "xlnx,zynqmp-ams-pl";
+ status = "disabled";
+ reg = <0x0 0xffa50c00 0x0 0x400>;
+ };
+ };
+
+ xlnx_dpdma: dma@fd4c0000 {
+ compatible = "xlnx,dpdma";
+ status = "disabled";
+ reg = <0x0 0xfd4c0000 0x0 0x1000>;
+ interrupts = <0 122 4>;
+ interrupt-parent = <&gic>;
+ clock-names = "axi_clk";
+ power-domains = <&zynqmp_firmware PD_DP>;
+ dma-channels = <6>;
+ #dma-cells = <1>;
+ dma-video0channel {
+ compatible = "xlnx,video0";
+ };
+ dma-video1channel {
+ compatible = "xlnx,video1";
+ };
+ dma-video2channel {
+ compatible = "xlnx,video2";
+ };
+ dma-graphicschannel {
+ compatible = "xlnx,graphics";
+ };
+ dma-audio0channel {
+ compatible = "xlnx,audio0";
+ };
+ dma-audio1channel {
+ compatible = "xlnx,audio1";
+ };
+ };
+
+ zynqmp_dpsub: zynqmp-display@fd4a0000 {
+ compatible = "xlnx,zynqmp-dpsub-1.7";
+ status = "disabled";
+ reg = <0x0 0xfd4a0000 0x0 0x1000>,
+ <0x0 0xfd4aa000 0x0 0x1000>,
+ <0x0 0xfd4ab000 0x0 0x1000>,
+ <0x0 0xfd4ac000 0x0 0x1000>;
+ reg-names = "dp", "blend", "av_buf", "aud";
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+
+ clock-names = "dp_apb_clk", "dp_aud_clk",
+ "dp_vtc_pixel_clk_in";
+
+ power-domains = <&zynqmp_firmware PD_DP>;
+
+ vid-layer {
+ dma-names = "vid0", "vid1", "vid2";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>;
+ };
+
+ gfx-layer {
+ dma-names = "gfx0";
+ dmas = <&xlnx_dpdma 3>;
+ };
+
+ /* dummy node to indicate there's no child i2c device */
+ i2c-bus {
+ };
+
+ zynqmp_dp_snd_codec0: zynqmp_dp_snd_codec0 {
+ compatible = "xlnx,dp-snd-codec";
+ clock-names = "aud_clk";
+ };
+
+ zynqmp_dp_snd_pcm0: zynqmp_dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
+
+ zynqmp_dp_snd_pcm1: zynqmp_dp_snd_pcm1 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 5>;
+ dma-names = "tx";
+ };
+
+ zynqmp_dp_snd_card0: zynqmp_dp_snd_card {
+ compatible = "xlnx,dp-snd-card";
+ xlnx,dp-snd-pcm = <&zynqmp_dp_snd_pcm0>,
+ <&zynqmp_dp_snd_pcm1>;
+ xlnx,dp-snd-codec = <&zynqmp_dp_snd_codec0>;
+ };
+ };
};
};
diff --git a/arch/arm64/configs/xilinx_versal_defconfig b/arch/arm64/configs/xilinx_versal_defconfig
new file mode 100644
index 000000000000..d7f8a1814cbe
--- /dev/null
+++ b/arch/arm64/configs/xilinx_versal_defconfig
@@ -0,0 +1,228 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_NR_CPUS=8
+# CONFIG_EFI is not set
+CONFIG_COMPAT=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ZYNQMP_FIRMWARE_DEBUG=y
+CONFIG_MODULES=y
+# CONFIG_SPARSEMEM_VMEMMAP is not set
+# CONFIG_COMPACTION is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE=y
+CONFIG_NET_PKTGEN=y
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_SPI_CADENCE_QUADSPI=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT24=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_MACB=y
+CONFIG_DP83848_PHY=y
+CONFIG_DP83867_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_REALTEK_PHY=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_CADENCE=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQMP_GQSPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_ZYNQ=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_AXI4S_SWITCH=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_DEMOSAIC=y
+CONFIG_VIDEO_XILINX_GAMMA=y
+CONFIG_VIDEO_XILINX_HLS=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_MULTISCALER=y
+CONFIG_VIDEO_XILINX_SDIRXSS=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+CONFIG_VIDEO_XILINX_VPSS_CSC=y
+CONFIG_VIDEO_XILINX_VPSS_SCALER=y
+CONFIG_VIDEO_XILINX_CSI2RXSS=y
+CONFIG_VIDEO_XILINX_SCD=y
+CONFIG_VIDEO_XILINX_M2M=y
+CONFIG_DRM=y
+CONFIG_DRM_XLNX=y
+CONFIG_DRM_XLNX_BRIDGE=y
+CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS=y
+CONFIG_DRM_XLNX_DSI=y
+CONFIG_DRM_XLNX_MIXER=y
+CONFIG_DRM_XLNX_PL_DISP=y
+CONFIG_DRM_XLNX_SDI=y
+CONFIG_DRM_XLNX_BRIDGE_CSC=y
+CONFIG_DRM_XLNX_BRIDGE_SCALER=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_FSM=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ZYNQMP=y
+CONFIG_XILINX_DMA=y
+CONFIG_XILINX_ZYNQMP_DMA=y
+CONFIG_DMATEST=y
+CONFIG_UIO=y
+CONFIG_UIO_XILINX_AI_ENGINE=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_ION_CARVEOUT_HEAP=y
+CONFIG_ION_CHUNK_HEAP=y
+CONFIG_ION_CMA_HEAP=y
+# CONFIG_COMMON_CLK_XGENE is not set
+CONFIG_COMMON_CLK_ZYNQMP=y
+# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set
+# CONFIG_FSL_ERRATUM_A008585 is not set
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_CCI_PMU=y
+# CONFIG_ARM_PMU is not set
+CONFIG_ANDROID=y
+CONFIG_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_FPGA_MGR_VERSAL_FPGA=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arm64/configs/xilinx_zynqmp_defconfig b/arch/arm64/configs/xilinx_zynqmp_defconfig
new file mode 100644
index 000000000000..dca3b33687c2
--- /dev/null
+++ b/arch/arm64/configs/xilinx_zynqmp_defconfig
@@ -0,0 +1,396 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX_NWL=y
+CONFIG_NR_CPUS=8
+# CONFIG_DMI is not set
+CONFIG_COMPAT=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE=y
+CONFIG_NET_PKTGEN=y
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_LEDS=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_INTEL=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+CONFIG_BT_HCIBFUSB=y
+CONFIG_BT_HCIVHCI=y
+CONFIG_BT_MRVL=y
+CONFIG_BT_MRVL_SDIO=y
+CONFIG_BT_ATH3K=y
+CONFIG_BT_WILINK=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_REG_RELAX_NO_IR=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_MESSAGE_TRACING=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ARASAN=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_XILINX_SDFEC=y
+CONFIG_XILINX_JESD204B=y
+CONFIG_XILINX_JESD204B_PHY=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_TI_ST=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_MACB=y
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_AMD_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_BCM7XXX_PHY=y
+CONFIG_BCM87XX_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_DP83867_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_LSI_ET1011C_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_NATIONAL_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_STE10XP=y
+CONFIG_VITESSE_PHY=y
+CONFIG_XILINX_GMII2RGMII=y
+CONFIG_USB_USBNET=y
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SPI=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_MAX310X=y
+CONFIG_SERIAL_UARTLITE=y
+CONFIG_SERIAL_UARTLITE_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX_PCA9541=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_I2C_XILINX=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQMP_GQSPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_TPS65086=y
+CONFIG_POWER_RESET_LTC2952=y
+CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_MAX20751=y
+CONFIG_SENSORS_INA2XX=y
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_MFD_TPS65086=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS65086=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_AXI4S_SWITCH=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_DEMOSAIC=y
+CONFIG_VIDEO_XILINX_GAMMA=y
+CONFIG_VIDEO_XILINX_HLS=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_MULTISCALER=y
+CONFIG_VIDEO_XILINX_SDIRXSS=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+CONFIG_VIDEO_XILINX_VPSS_CSC=y
+CONFIG_VIDEO_XILINX_VPSS_SCALER=y
+CONFIG_VIDEO_XILINX_CSI2RXSS=y
+CONFIG_VIDEO_XILINX_SCD=y
+CONFIG_VIDEO_XILINX_M2M=y
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+CONFIG_DRM_XILINX=y
+CONFIG_DRM_XILINX_SDI=y
+CONFIG_DRM_XLNX=y
+CONFIG_DRM_XLNX_BRIDGE=y
+CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS=y
+CONFIG_DRM_ZYNQMP_DPSUB=y
+CONFIG_DRM_XLNX_DSI=y
+CONFIG_DRM_XLNX_MIXER=y
+CONFIG_DRM_XLNX_PL_DISP=y
+CONFIG_DRM_XLNX_SDI=y
+CONFIG_DRM_XLNX_BRIDGE_CSC=y
+CONFIG_DRM_XLNX_BRIDGE_SCALER=y
+CONFIG_FB_XILINX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_PCI is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_XILINX_DP=y
+CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=y
+CONFIG_SND_SOC_XILINX_SDI=y
+CONFIG_SND_SOC_XILINX_I2S=y
+CONFIG_SND_SOC_XILINX_SPDIF=y
+CONFIG_SND_SOC_XILINX_PL_SND_CARD=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEFAULT_PERSIST is not set
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_FSM=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_EEM=y
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_EDAC_ZYNQMP_OCM=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ZYNQMP=y
+CONFIG_XILINX_DMA=y
+CONFIG_XILINX_ZYNQMP_DMA=y
+CONFIG_DMATEST=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_XILINX_APM=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_ION_CARVEOUT_HEAP=y
+CONFIG_ION_CHUNK_HEAP=y
+CONFIG_ION_CMA_HEAP=y
+CONFIG_COMMON_CLK_XLNX_CLKWZRD=y
+CONFIG_XILINX_FCLK=y
+CONFIG_XLNX_CTRL_FRMBUF=y
+CONFIG_XLNX_CTRL_VPSS=y
+CONFIG_COMMON_CLK_SI570=y
+CONFIG_COMMON_CLK_SI5324=y
+# CONFIG_COMMON_CLK_XGENE is not set
+CONFIG_COMMON_CLK_ZYNQMP=y
+CONFIG_ARM_SMMU=y
+CONFIG_REMOTEPROC=y
+CONFIG_ZYNQMP_R5_REMOTEPROC=m
+CONFIG_XILINX_VCU=m
+CONFIG_IIO=y
+CONFIG_XILINX_XADC=y
+CONFIG_XILINX_AMS=y
+CONFIG_XILINX_INTC=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_NVMEM_ZYNQMP=y
+CONFIG_FPGA=y
+CONFIG_XILINX_AFI_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_FPGA_MGR_ZYNQMP_FPGA=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_DEV_ZYNQMP_SHA3=y
+CONFIG_CRYPTO_DEV_XILINX_RSA=y
+CONFIG_CRYPTO_DEV_ZYNQMP_AES=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 4922c4451e7c..99cddf1145c2 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -59,6 +59,7 @@ config CRYPTO_GHASH_ARM64_CE
select CRYPTO_HASH
select CRYPTO_GF128MUL
select CRYPTO_LIB_AES
+ select CRYPTO_AEAD
config CRYPTO_CRCT10DIF_ARM64_CE
tristate "CRCT10DIF digest algorithm using PMULL instructions"
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index 7b012148bfd6..abd302e521c0 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -12,19 +12,6 @@
#include <linux/stringify.h>
-#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
-#define __LL_SC_FALLBACK(asm_ops) \
-" b 3f\n" \
-" .subsection 1\n" \
-"3:\n" \
-asm_ops "\n" \
-" b 4f\n" \
-" .previous\n" \
-"4:\n"
-#else
-#define __LL_SC_FALLBACK(asm_ops) asm_ops
-#endif
-
#ifndef CONFIG_CC_HAS_K_CONSTRAINT
#define K
#endif
@@ -43,12 +30,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
int result; \
\
asm volatile("// atomic_" #op "\n" \
- __LL_SC_FALLBACK( \
-" prfm pstl1strm, %2\n" \
-"1: ldxr %w0, %2\n" \
-" " #asm_op " %w0, %w0, %w3\n" \
-" stxr %w1, %w0, %2\n" \
-" cbnz %w1, 1b\n") \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxr %w0, %2\n" \
+ " " #asm_op " %w0, %w0, %w3\n" \
+ " stxr %w1, %w0, %2\n" \
+ " cbnz %w1, 1b\n" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \
}
@@ -61,13 +47,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
int result; \
\
asm volatile("// atomic_" #op "_return" #name "\n" \
- __LL_SC_FALLBACK( \
-" prfm pstl1strm, %2\n" \
-"1: ld" #acq "xr %w0, %2\n" \
-" " #asm_op " %w0, %w0, %w3\n" \
-" st" #rel "xr %w1, %w0, %2\n" \
-" cbnz %w1, 1b\n" \
-" " #mb ) \
+ " prfm pstl1strm, %2\n" \
+ "1: ld" #acq "xr %w0, %2\n" \
+ " " #asm_op " %w0, %w0, %w3\n" \
+ " st" #rel "xr %w1, %w0, %2\n" \
+ " cbnz %w1, 1b\n" \
+ " " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@@ -83,13 +68,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
int val, result; \
\
asm volatile("// atomic_fetch_" #op #name "\n" \
- __LL_SC_FALLBACK( \
-" prfm pstl1strm, %3\n" \
-"1: ld" #acq "xr %w0, %3\n" \
-" " #asm_op " %w1, %w0, %w4\n" \
-" st" #rel "xr %w2, %w1, %3\n" \
-" cbnz %w2, 1b\n" \
-" " #mb ) \
+ " prfm pstl1strm, %3\n" \
+ "1: ld" #acq "xr %w0, %3\n" \
+ " " #asm_op " %w1, %w0, %w4\n" \
+ " st" #rel "xr %w2, %w1, %3\n" \
+ " cbnz %w2, 1b\n" \
+ " " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@@ -142,12 +126,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "\n" \
- __LL_SC_FALLBACK( \
-" prfm pstl1strm, %2\n" \
-"1: ldxr %0, %2\n" \
-" " #asm_op " %0, %0, %3\n" \
-" stxr %w1, %0, %2\n" \
-" cbnz %w1, 1b") \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxr %0, %2\n" \
+ " " #asm_op " %0, %0, %3\n" \
+ " stxr %w1, %0, %2\n" \
+ " cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \
}
@@ -160,13 +143,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "_return" #name "\n" \
- __LL_SC_FALLBACK( \
-" prfm pstl1strm, %2\n" \
-"1: ld" #acq "xr %0, %2\n" \
-" " #asm_op " %0, %0, %3\n" \
-" st" #rel "xr %w1, %0, %2\n" \
-" cbnz %w1, 1b\n" \
-" " #mb ) \
+ " prfm pstl1strm, %2\n" \
+ "1: ld" #acq "xr %0, %2\n" \
+ " " #asm_op " %0, %0, %3\n" \
+ " st" #rel "xr %w1, %0, %2\n" \
+ " cbnz %w1, 1b\n" \
+ " " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@@ -176,19 +158,18 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline long \
-__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
+__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
\
asm volatile("// atomic64_fetch_" #op #name "\n" \
- __LL_SC_FALLBACK( \
-" prfm pstl1strm, %3\n" \
-"1: ld" #acq "xr %0, %3\n" \
-" " #asm_op " %1, %0, %4\n" \
-" st" #rel "xr %w2, %1, %3\n" \
-" cbnz %w2, 1b\n" \
-" " #mb ) \
+ " prfm pstl1strm, %3\n" \
+ "1: ld" #acq "xr %0, %3\n" \
+ " " #asm_op " %1, %0, %4\n" \
+ " st" #rel "xr %w2, %1, %3\n" \
+ " cbnz %w2, 1b\n" \
+ " " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@@ -240,15 +221,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
- __LL_SC_FALLBACK(
-" prfm pstl1strm, %2\n"
-"1: ldxr %0, %2\n"
-" subs %0, %0, #1\n"
-" b.lt 2f\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b\n"
-" dmb ish\n"
-"2:")
+ " prfm pstl1strm, %2\n"
+ "1: ldxr %0, %2\n"
+ " subs %0, %0, #1\n"
+ " b.lt 2f\n"
+ " stlxr %w1, %0, %2\n"
+ " cbnz %w1, 1b\n"
+ " dmb ish\n"
+ "2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
: "cc", "memory");
@@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
old = (u##sz)old; \
\
asm volatile( \
- __LL_SC_FALLBACK( \
" prfm pstl1strm, %[v]\n" \
"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
@@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
" st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
- "2:") \
+ "2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(u##sz *)ptr) \
: [old] __stringify(constraint) "r" (old), [new] "r" (new) \
@@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
unsigned long tmp, ret; \
\
asm volatile("// __cmpxchg_double" #name "\n" \
- __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ldxp %0, %1, %2\n" \
" eor %0, %0, %3\n" \
@@ -336,8 +314,8 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
" st" #rel "xp %w0, %5, %6, %2\n" \
" cbnz %w0, 1b\n" \
" " #mb "\n" \
- "2:") \
- : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
+ "2:" \
+ : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
: cl); \
\
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index da3280f639cd..28e96118c1e5 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -11,11 +11,11 @@
#define __ASM_ATOMIC_LSE_H
#define ATOMIC_OP(op, asm_op) \
-static inline void __lse_atomic_##op(int i, atomic_t *v) \
+static inline void __lse_atomic_##op(int i, atomic_t *v) \
{ \
asm volatile( \
__LSE_PREAMBLE \
-" " #asm_op " %w[i], %[v]\n" \
+ " " #asm_op " %w[i], %[v]\n" \
: [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v)); \
}
@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
asm volatile( \
__LSE_PREAMBLE \
-" " #asm_op #mb " %w[i], %w[i], %[v]" \
+ " " #asm_op #mb " %w[i], %w[i], %[v]" \
: [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v) \
: cl); \
@@ -130,7 +130,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
" add %w[i], %w[i], %w[tmp]" \
: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
: "r" (v) \
- : cl); \
+ : cl); \
\
return i; \
}
@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
{ \
asm volatile( \
__LSE_PREAMBLE \
-" " #asm_op " %[i], %[v]\n" \
+ " " #asm_op " %[i], %[v]\n" \
: [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v)); \
}
@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
{ \
asm volatile( \
__LSE_PREAMBLE \
-" " #asm_op #mb " %[i], %[i], %[v]" \
+ " " #asm_op #mb " %[i], %[i], %[v]" \
: [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v) \
: cl); \
@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
-static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
{ \
unsigned long tmp; \
\
@@ -403,7 +403,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
" eor %[old2], %[old2], %[oldval2]\n" \
" orr %[old1], %[old1], %[old2]" \
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
- [v] "+Q" (*(unsigned long *)ptr) \
+ [v] "+Q" (*(__uint128_t *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
: cl); \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 4ffa86149d28..3b16cbc945cf 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -56,7 +56,8 @@
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
#define ARM64_WORKAROUND_1542419 47
#define ARM64_SPECTRE_BHB 48
+#define ARM64_WORKAROUND_1742098 49
-#define ARM64_NCAPS 49
+#define ARM64_NCAPS 50
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index f63438474dd5..587b8a804fc7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -449,6 +449,29 @@ cpuid_feature_extract_unsigned_field(u64 features, int field)
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
}
+/*
+ * Fields that identify the version of the Performance Monitors Extension do
+ * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
+ * "Alternative ID scheme used for the Performance Monitors Extension version".
+ */
+static inline u64 __attribute_const__
+cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
+{
+ u64 val = cpuid_feature_extract_unsigned_field(features, field);
+ u64 mask = GENMASK_ULL(field + 3, field);
+
+ /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
+ if (val == 0xf)
+ val = 0;
+
+ if (val > cap) {
+ features &= ~mask;
+ features |= (cap << field) & mask;
+ }
+
+ return features;
+}
+
static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
{
return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index f0165df489a3..892fc0ceccb8 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -41,7 +41,7 @@
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_CPU_MODEL(imp, partnum) \
- (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
+ ((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT))
@@ -59,6 +59,7 @@
#define ARM_CPU_IMP_NVIDIA 0x4E
#define ARM_CPU_IMP_FUJITSU 0x46
#define ARM_CPU_IMP_HISI 0x48
+#define ARM_CPU_IMP_AMPERE 0xC0
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -101,6 +102,8 @@
#define HISI_CPU_PART_TSV110 0xD01
+#define AMPERE_CPU_PART_AMPERE1 0xAC3
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -131,6 +134,7 @@
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 0253691c4b3a..bd451732a3af 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -116,6 +116,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void);
int kernel_active_single_step(void);
+void kernel_rewind_single_step(struct pt_regs *regs);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int reinstall_suspended_bps(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f65ff6b90f4a..4a4c20a1bf26 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -378,8 +378,26 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
- if (kvm_vcpu_abt_iss1tw(vcpu))
- return true;
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
+ /*
+ * Only a permission fault on a S1PTW should be
+ * considered as a write. Otherwise, page tables baked
+ * in a read-only memslot will result in an exception
+ * being delivered in the guest.
+ *
+ * The drawback is that we end-up faulting twice if the
+ * guest is using any of HW AF/DB: a translation fault
+ * to map the page containing the PT (read only at
+ * first), then a permission fault to allow the flags
+ * to be set.
+ */
+ switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
+ case ESR_ELx_FSC_PERM:
+ return true;
+ default:
+ return false;
+ }
+ }
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 3a057d427900..709badd4475f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -679,6 +679,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ /*
+ * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
+ * dirtiness again.
+ */
+ if (pte_sw_dirty(pte))
+ pte = pte_mkdirty(pte);
return pte;
}
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 5623685c7d13..65834b84f0e1 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -184,8 +184,9 @@ void tls_preserve_current_state(void);
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
{
+ s32 previous_syscall = regs->syscallno;
memset(regs, 0, sizeof(*regs));
- forget_syscall(regs);
+ regs->syscallno = previous_syscall;
regs->pc = pc;
if (system_uses_irq_prio_masking())
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
index 06d880b3526c..43a20888bf19 100644
--- a/arch/arm64/include/asm/syscall_wrapper.h
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -8,7 +8,7 @@
#ifndef __ASM_SYSCALL_WRAPPER_H
#define __ASM_SYSCALL_WRAPPER_H
-struct pt_regs;
+#include <asm/ptrace.h>
#define SC_ARM64_REGS_TO_ARGS(x, ...) \
__MAP(x,__SC_ARGS \
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 5b3bdad66b27..290a1fa7cacf 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -102,8 +102,14 @@
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
+#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
+#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
+#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
+#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
+#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
+#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
@@ -691,6 +697,12 @@
#define ID_AA64DFR0_TRACEVER_SHIFT 4
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
+#define ID_AA64DFR0_PMUVER_8_1 0x4
+
+#define ID_DFR0_PERFMON_SHIFT 24
+
+#define ID_DFR0_PERFMON_8_1 0x4
+
#define ID_ISAR5_RDM_SHIFT 24
#define ID_ISAR5_CRC32_SHIFT 16
#define ID_ISAR5_SHA2_SHIFT 12
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index bcb14d11232f..12b545f7fb32 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -59,6 +59,7 @@ struct insn_emulation {
static LIST_HEAD(insn_emulation);
static int nr_insn_emulated __initdata;
static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
+static DEFINE_MUTEX(insn_emulation_mutex);
static void register_emulation_hooks(struct insn_emulation_ops *ops)
{
@@ -207,10 +208,12 @@ static int emulation_proc_handler(struct ctl_table *table, int write,
loff_t *ppos)
{
int ret = 0;
- struct insn_emulation *insn = (struct insn_emulation *) table->data;
- enum insn_emulation_mode prev_mode = insn->current_mode;
+ struct insn_emulation *insn;
+ enum insn_emulation_mode prev_mode;
- table->data = &insn->current_mode;
+ mutex_lock(&insn_emulation_mutex);
+ insn = container_of(table->data, struct insn_emulation, current_mode);
+ prev_mode = insn->current_mode;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write || prev_mode == insn->current_mode)
@@ -223,7 +226,7 @@ static int emulation_proc_handler(struct ctl_table *table, int write,
update_insn_emulation_mode(insn, INSN_UNDEF);
}
ret:
- table->data = insn;
+ mutex_unlock(&insn_emulation_mutex);
return ret;
}
@@ -247,7 +250,7 @@ static void __init register_insn_emulation_sysctl(void)
sysctl->maxlen = sizeof(int);
sysctl->procname = insn->ops->name;
- sysctl->data = insn;
+ sysctl->data = &insn->current_mode;
sysctl->extra1 = &insn->min;
sysctl->extra2 = &insn->max;
sysctl->proc_handler = emulation_proc_handler;
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 587543c6c51c..97c42be71338 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
int init_cache_level(unsigned int cpu)
{
- unsigned int ctype, level, leaves, fw_level;
+ unsigned int ctype, level, leaves;
+ int fw_level;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
@@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu)
else
fw_level = acpi_find_last_cache_level(cpu);
+ if (fw_level < 0)
+ return fw_level;
+
if (level < fw_level) {
/*
* some external caches not specified in CLIDR_EL1
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 33b33416fea4..342cba2ae982 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -170,9 +170,12 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
- __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
- __this_cpu_write(bp_hardening_data.fn, fn);
- __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
+ if (fn != __this_cpu_read(bp_hardening_data.fn)) {
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ __this_cpu_write(bp_hardening_data.template_start,
+ hyp_vecs_start);
+ }
raw_spin_unlock(&bp_lock);
}
#else
@@ -817,6 +820,14 @@ static const struct arm64_cpu_capabilities erratum_843419_list[] = {
};
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+static struct midr_range broken_aarch32_aes[] = {
+ MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -998,6 +1009,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.cpu_enable = cpu_enable_trap_ctr_access,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+ {
+ .desc = "ARM erratum 1742098",
+ .capability = ARM64_WORKAROUND_1742098,
+ CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ },
+#endif
{
}
};
@@ -1126,6 +1145,10 @@ u8 spectre_bhb_loop_affected(int scope)
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
{},
};
+ static const struct midr_range spectre_bhb_k11_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ {},
+ };
static const struct midr_range spectre_bhb_k8_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@ -1136,6 +1159,8 @@ u8 spectre_bhb_loop_affected(int scope)
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
+ k = 11;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = 8;
@@ -1304,8 +1329,11 @@ static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
- __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
- __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
+ if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.template_start,
+ hyp_vecs_start);
+ }
raw_spin_unlock(&bp_lock);
}
#else
@@ -1341,7 +1369,13 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
case 8:
- kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
+ /*
+ * A57/A72-r0 will already have selected the
+ * spectre-indirect vector, which is sufficient
+ * for BHB too.
+ */
+ if (!__this_cpu_read(bp_hardening_data.fn))
+ kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
break;
case 24:
kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d07dadd6b8ff..396d96224b48 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -21,6 +21,7 @@
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
+#include <asm/hwcap.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
@@ -1280,6 +1281,14 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
}
#endif
+static void elf_hwcap_fixup(void)
+{
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
+ compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
+#endif /* ARM64_ERRATUM_1742098 */
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -2103,8 +2112,10 @@ void __init setup_cpu_features(void)
mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps);
- if (system_supports_32bit_el0())
+ if (system_supports_32bit_el0()) {
setup_elf_hwcaps(compat_elf_hwcaps);
+ elf_hwcap_fixup();
+ }
if (system_uses_ttbr0_pan())
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index d64a3c1e1b6b..62ab9d4f995e 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -441,6 +441,11 @@ int kernel_active_single_step(void)
}
NOKPROBE_SYMBOL(kernel_active_single_step);
+void kernel_rewind_single_step(struct pt_regs *regs)
+{
+ set_regs_spsr_ss(regs);
+}
+
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
{
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index d0cf596db82c..14b7352bc05e 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -12,6 +12,14 @@
#include <asm/efi.h>
+static bool region_is_misaligned(const efi_memory_desc_t *md)
+{
+ if (PAGE_SIZE == EFI_PAGE_SIZE)
+ return false;
+ return !PAGE_ALIGNED(md->phys_addr) ||
+ !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
+}
+
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
@@ -25,14 +33,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
if (type == EFI_MEMORY_MAPPED_IO)
return PROT_DEVICE_nGnRE;
- if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
- "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
+ if (region_is_misaligned(md)) {
+ static bool __initdata code_is_misaligned;
+
/*
- * If the region is not aligned to the page size of the OS, we
- * can not use strict permissions, since that would also affect
- * the mapping attributes of the adjacent regions.
+ * Regions that are not aligned to the OS page size cannot be
+ * mapped with strict permissions, as those might interfere
+ * with the permissions that are needed by the adjacent
+ * region's mapping. However, if we haven't encountered any
+ * misaligned runtime code regions so far, we can safely use
+ * non-executable permissions for non-code regions.
*/
- return pgprot_val(PAGE_KERNEL_EXEC);
+ code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
+
+ return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
+ : pgprot_val(PAGE_KERNEL);
+ }
/* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
@@ -62,19 +78,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_RUNTIME_SERVICES_DATA);
- if (!PAGE_ALIGNED(md->phys_addr) ||
- !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
- /*
- * If the end address of this region is not aligned to page
- * size, the mapping is rounded up, and may end up sharing a
- * page frame with the next UEFI memory region. If we create
- * a block entry now, we may need to split it again when mapping
- * the next region, and support for that is going to be removed
- * from the MMU routines. So avoid block mappings altogether in
- * that case.
- */
+ /*
+ * If this region is not aligned to the page size used by the OS, the
+ * mapping will be rounded outwards, and may end up sharing a page
+ * frame with an adjacent runtime memory region. Given that the page
+ * table descriptor covering the shared page will be rewritten when the
+ * adjacent region gets mapped, we must avoid block mappings here so we
+ * don't have to worry about splitting them when that happens.
+ */
+ if (region_is_misaligned(md))
page_mappings_only = true;
- }
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
@@ -101,6 +114,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
md->type != EFI_RUNTIME_SERVICES_DATA);
+ if (region_is_misaligned(md))
+ return 0;
+
/*
* Calling apply_to_page_range() is only safe on regions that are
* guaranteed to be mapped down to pages. Since we are only called
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index b4a160795824..534578eba556 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -654,7 +654,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
perf_bp_event(bp, regs);
/* Do we need to handle the stepping? */
- if (is_default_overflow_handler(bp))
+ if (uses_default_overflow_handler(bp))
step = 1;
unlock:
rcu_read_unlock();
@@ -733,7 +733,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
static int watchpoint_report(struct perf_event *wp, unsigned long addr,
struct pt_regs *regs)
{
- int step = is_default_overflow_handler(wp);
+ int step = uses_default_overflow_handler(wp);
struct arch_hw_breakpoint *info = counter_arch_bp(wp);
info->trigger = addr;
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 1a157ca33262..e4e95821b1f6 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -223,6 +223,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
*/
if (!kernel_active_single_step())
kernel_enable_single_step(linux_regs);
+ else
+ kernel_rewind_single_step(linux_regs);
err = 0;
break;
default:
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 113903db666c..3d3a673c6704 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -21,46 +21,6 @@
#include <asm/cputype.h>
#include <asm/topology.h>
-void store_cpu_topology(unsigned int cpuid)
-{
- struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
- u64 mpidr;
-
- if (cpuid_topo->package_id != -1)
- goto topology_populated;
-
- mpidr = read_cpuid_mpidr();
-
- /* Uniprocessor systems can rely on default topology values */
- if (mpidr & MPIDR_UP_BITMASK)
- return;
-
- /*
- * This would be the place to create cpu topology based on MPIDR.
- *
- * However, it cannot be trusted to depict the actual topology; some
- * pieces of the architecture enforce an artificial cap on Aff0 values
- * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
- * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
- * having absolutely no relationship to the actual underlying system
- * topology, and cannot be reasonably used as core / package ID.
- *
- * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
- * we still wouldn't be able to obtain a sane core ID. This means we
- * need to entirely ignore MPIDR for any topology deduction.
- */
- cpuid_topo->thread_id = -1;
- cpuid_topo->core_id = cpuid;
- cpuid_topo->package_id = cpu_to_node(cpuid);
-
- pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
- cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
- cpuid_topo->thread_id, mpidr);
-
-topology_populated:
- update_siblings_masks(cpuid);
-}
-
#ifdef CONFIG_ACPI
static bool __init acpi_cpu_is_threaded(int cpu)
{
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 4e3e9d9c8151..a436a6972ced 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -202,7 +202,7 @@ void die(const char *str, struct pt_regs *regs, int err)
raw_spin_unlock_irqrestore(&die_lock, flags);
if (ret != NOTIFY_STOP)
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
static void arm64_show_signal(int signo, const char *str)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a25f737dfa0b..5aae2f9f0ac8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1089,6 +1089,16 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
(0xfUL << ID_AA64ISAR1_API_SHIFT) |
(0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
(0xfUL << ID_AA64ISAR1_GPI_SHIFT));
+ } else if (id == SYS_ID_AA64DFR0_EL1) {
+ /* Limit guests to PMUv3 for ARMv8.1 */
+ val = cpuid_feature_cap_perfmon_field(val,
+ ID_AA64DFR0_PMUVER_SHIFT,
+ ID_AA64DFR0_PMUVER_8_1);
+ } else if (id == SYS_ID_DFR0_EL1) {
+ /* Limit guests to PMUv3 for ARMv8.1 */
+ val = cpuid_feature_cap_perfmon_field(val,
+ ID_DFR0_PERFMON_SHIFT,
+ ID_DFR0_PERFMON_8_1);
}
return val;
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 9239416e93d4..6c45350e33aa 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -13,14 +13,14 @@
#include <asm/cacheflush.h>
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_map_area(phys_to_virt(paddr), size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_unmap_area(phys_to_virt(paddr), size, dir);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2a7339aeb1ad..af9a6e1fa0d3 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -296,7 +296,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
show_pte(addr);
die("Oops", regs, esr);
bust_spinlocks(0);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
static void __do_kernel_fault(unsigned long addr, unsigned int esr,
@@ -403,8 +403,8 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
}
}
-#define VM_FAULT_BADMAP 0x010000
-#define VM_FAULT_BADACCESS 0x020000
+#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5cf575f23af2..8e934bb44f12 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -399,7 +399,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
- if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
+ if (virt < PAGE_OFFSET) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
@@ -426,7 +426,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
- if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
+ if (virt < PAGE_OFFSET) {
pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index b319808e8f6b..a5909091cb14 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -140,7 +140,7 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
sizeof(long));
}
-static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
+static void c6x_dma_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));
@@ -160,14 +160,14 @@ static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
}
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- return c6x_dma_sync(dev, paddr, size, dir);
+ return c6x_dma_sync(paddr, size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- return c6x_dma_sync(dev, paddr, size, dir);
+ return c6x_dma_sync(paddr, size, dir);
}
diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c
index cb2a0d94a144..2df115d0e210 100644
--- a/arch/csky/abiv1/alignment.c
+++ b/arch/csky/abiv1/alignment.c
@@ -294,7 +294,7 @@ bad_area:
__func__, opcode, rz, rx, imm, addr);
show_regs(regs);
bust_spinlocks(0);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
index 63715cb90ee9..8cdbbcb5ed87 100644
--- a/arch/csky/kernel/traps.c
+++ b/arch/csky/kernel/traps.c
@@ -85,7 +85,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, int nr)
pr_err("%s: %08x\n", str, nr);
show_regs(regs);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
void buserr(struct pt_regs *regs)
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
index 06e85b565454..8f6571ae27c8 100644
--- a/arch/csky/mm/dma-mapping.c
+++ b/arch/csky/mm/dma-mapping.c
@@ -58,8 +58,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
@@ -74,8 +74,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index e47a9e0dc278..090adaee4b84 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
#include <linux/mm_types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -110,7 +111,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err)
dump(fp);
spin_unlock_irq(&die_lock);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
static int kstack_depth_to_print = 24;
diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
index fabffb83930a..573825c3cb70 100644
--- a/arch/h8300/mm/fault.c
+++ b/arch/h8300/mm/fault.c
@@ -52,7 +52,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
printk(" at virtual address %08lx\n", address);
if (!user_mode(regs))
die("Oops", regs, error_code);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
return 1;
}
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index f561b127c4b4..25f388d9cfcc 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -55,8 +55,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
void *addr = phys_to_virt(paddr);
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 69c623b14ddd..f69eae3f32bd 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -221,7 +221,7 @@ int die(const char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
oops_exit();
- do_exit(err);
+ make_task_dead(err);
return 0;
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 16714477eef4..72dc0ac46d6b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -8,6 +8,7 @@ menu "Processor type and features"
config IA64
bool
+ select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ACPI
@@ -360,7 +361,7 @@ config ARCH_PROC_KCORE_TEXT
depends on PROC_KCORE
config IA64_MCA_RECOVERY
- tristate "MCA recovery from errors other than TLB."
+ bool "MCA recovery from errors other than TLB."
config PERFMON
bool "Performance monitor support"
diff --git a/arch/ia64/include/asm/bugs.h b/arch/ia64/include/asm/bugs.h
deleted file mode 100644
index 0d6b9bded56c..000000000000
--- a/arch/ia64/include/asm/bugs.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- *
- * Based on <asm-alpha/bugs.h>.
- *
- * Modified 1998, 1999, 2003
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
- */
-#ifndef _ASM_IA64_BUGS_H
-#define _ASM_IA64_BUGS_H
-
-#include <asm/processor.h>
-
-extern void check_bugs (void);
-
-#endif /* _ASM_IA64_BUGS_H */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 95a2ec37400f..0025f890b5a3 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -552,7 +552,7 @@ ia64_get_irr(unsigned int vector)
{
unsigned int reg = vector / 64;
unsigned int bit = vector % 64;
- u64 irr;
+ unsigned long irr;
switch (reg) {
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 2a40268c3d49..c8a87798618e 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/sched/task.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kallsyms.h>
@@ -176,7 +177,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
spin_unlock(&mca_bh_lock);
/* This process is about to be killed itself */
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
/**
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 968b5f33e725..1a8e20652e7c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -444,7 +444,7 @@ static void
do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
{
unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm;
- unsigned long uninitialized_var(ip); /* GCC be quiet */
+ unsigned long ip;
elf_greg_t *dst = arg;
struct pt_regs *pt;
char nat;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index b392c0a50346..0ec8b55b28ac 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -581,7 +581,7 @@ static int salinfo_cpu_pre_down(unsigned int cpu)
* 'data' contains an integer that corresponds to the feature we're
* testing
*/
-static int proc_salinfo_show(struct seq_file *m, void *v)
+static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v)
{
unsigned long data = (unsigned long)v;
seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n");
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index bb320c6d0cc9..6700f066f59c 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1073,8 +1073,7 @@ cpu_init (void)
}
}
-void __init
-check_bugs (void)
+void __init arch_cpu_finalize_init(void)
{
ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
(unsigned long) __end___mckinley_e9_bundles);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 09fc385c2acd..3639e0a7cb3b 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
+ * This file contains NUMA specific variables and functions which are used on
+ * NUMA machines with contiguous memory.
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
* Populate cpu entries in sysfs for non-numa systems as well
* Intel Corporation - Ashok Raj
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index e13cb905930f..753642366e12 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err)
if (panic_on_oops)
panic("Fatal exception");
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
return 0;
}
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 5b00dc3898e1..f2b40bcd2495 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -81,7 +81,7 @@ skip:
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
-static inline void
+static inline __init void
alloc_per_cpu_data(void)
{
size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 41d243c0c626..93789f31c89a 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -180,7 +180,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
void __init setup_per_cpu_areas(void)
{
struct pcpu_alloc_info *ai;
- struct pcpu_group_info *uninitialized_var(gi);
+ struct pcpu_group_info *gi;
unsigned int *cpu_map;
void *base;
unsigned long base_offset;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index c2f299fe9e04..7f8c49579a2c 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -272,7 +272,7 @@ retry:
regs = NULL;
bust_spinlocks(0);
if (regs)
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
return;
out_of_memory:
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ee50506d86f4..df6d3dfa9d82 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -73,8 +73,8 @@ __ia64_sync_icache_dcache (pte_t pte)
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
unsigned long pfn = PHYS_PFN(paddr);
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 5e1015eb6d0d..ad6837d00e7d 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
+ * This file contains NUMA specific variables and functions which are used on
+ * NUMA machines with contiguous memory.
*
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
*/
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 72cc568bc841..71c19918e387 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -369,7 +369,7 @@ EXPORT_SYMBOL(flush_tlb_range);
void ia64_tlb_init(void)
{
- ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
+ ia64_ptce_info_t ptce_info;
u64 tr_pgbits;
long status;
pal_vm_info_1_u_t vm_info_1;
diff --git a/arch/m68k/68000/entry.S b/arch/m68k/68000/entry.S
index 259b3661b614..94abf3d8afc5 100644
--- a/arch/m68k/68000/entry.S
+++ b/arch/m68k/68000/entry.S
@@ -47,6 +47,8 @@ do_trace:
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
+ addql #1,%d0
+ jeq ret_from_exception
movel %sp@(PT_OFF_ORIG_D0),%d1
movel #-ENOSYS,%d0
cmpl #NR_syscalls,%d1
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 6663f1741798..d84a6a63d36a 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -4,6 +4,7 @@ config M68K
default y
select ARCH_32BIT_OFF_T
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CPU_FINALIZE_INIT if MMU
select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index 3e9b0b826f8a..6fb693bb0771 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -19,6 +19,7 @@ config HEARTBEAT
# We have a dedicated heartbeat LED. :-)
config PROC_HARDWARE
bool "/proc/hardware support"
+ depends on PROC_FS
help
Say Y here to support the /proc/hardware file, which gives you
access to information about the machine you're running on,
diff --git a/arch/m68k/coldfire/entry.S b/arch/m68k/coldfire/entry.S
index 52d312d5b4d4..fb3b06567745 100644
--- a/arch/m68k/coldfire/entry.S
+++ b/arch/m68k/coldfire/entry.S
@@ -92,6 +92,8 @@ ENTRY(system_call)
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
+ addql #1,%d0
+ jeq ret_from_exception
movel %d3,%a0
jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value */
diff --git a/arch/m68k/fpsp040/skeleton.S b/arch/m68k/fpsp040/skeleton.S
index a8f41615d94a..31a9c634c81e 100644
--- a/arch/m68k/fpsp040/skeleton.S
+++ b/arch/m68k/fpsp040/skeleton.S
@@ -499,12 +499,12 @@ in_ea:
dbf %d0,morein
rts
- .section .fixup,#alloc,#execinstr
+ .section .fixup,"ax"
.even
1:
jbra fpsp040_die
- .section __ex_table,#alloc
+ .section __ex_table,"a"
.align 4
.long in_ea,1b
diff --git a/arch/m68k/ifpsp060/os.S b/arch/m68k/ifpsp060/os.S
index 7a0d6e428066..89e2ec224ab6 100644
--- a/arch/m68k/ifpsp060/os.S
+++ b/arch/m68k/ifpsp060/os.S
@@ -379,11 +379,11 @@ _060_real_access:
| Execption handling for movs access to illegal memory
- .section .fixup,#alloc,#execinstr
+ .section .fixup,"ax"
.even
1: moveq #-1,%d1
rts
-.section __ex_table,#alloc
+.section __ex_table,"a"
.align 4
.long dmrbuae,1b
.long dmrwuae,1b
diff --git a/arch/m68k/include/asm/bugs.h b/arch/m68k/include/asm/bugs.h
deleted file mode 100644
index 745530651e0b..000000000000
--- a/arch/m68k/include/asm/bugs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * include/asm-m68k/bugs.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-#ifdef CONFIG_MMU
-extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */
-#else
-static void check_bugs(void)
-{
-}
-#endif
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 3fab684cc0db..871a0e11da34 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -61,8 +61,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 97cd3ea5f10b..9a66657773be 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -160,9 +160,12 @@ do_trace_entry:
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
+ addql #1,%d0 | optimization for cmpil #-1,%d0
+ jeq ret_from_syscall
movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl #NR_syscalls,%d0
jcs syscall
+ jra ret_from_syscall
badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_syscall
diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S
index ab0f1e7d4653..f7667079e08e 100644
--- a/arch/m68k/kernel/relocate_kernel.S
+++ b/arch/m68k/kernel/relocate_kernel.S
@@ -26,7 +26,7 @@ ENTRY(relocate_new_kernel)
lea %pc@(.Lcopy),%a4
2: addl #0x00000000,%a4 /* virt_to_phys() */
- .section ".m68k_fixup","aw"
+ .section .m68k_fixup,"aw"
.long M68K_FIXUP_MEMOFFSET, 2b+2
.previous
@@ -49,7 +49,7 @@ ENTRY(relocate_new_kernel)
lea %pc@(.Lcont040),%a4
5: addl #0x00000000,%a4 /* virt_to_phys() */
- .section ".m68k_fixup","aw"
+ .section .m68k_fixup,"aw"
.long M68K_FIXUP_MEMOFFSET, 5b+2
.previous
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 528484feff80..a9ef8b6b01bc 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -527,7 +528,7 @@ static int __init proc_hardware_init(void)
module_init(proc_hardware_init);
#endif
-void check_bugs(void)
+void __init arch_cpu_finalize_init(void)
{
#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU)
if (m68k_fputype == 0) {
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index f7121b775e5f..ab5fec67e541 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -883,11 +883,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
}
static inline void __user *
-get_sigframe(struct ksignal *ksig, size_t frame_size)
+get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
{
unsigned long usp = sigsp(rdusp(), ksig);
+ unsigned long gap = 0;
- return (void __user *)((usp - frame_size) & -8UL);
+ if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
+ /* USP is unreliable so use worst-case value */
+ gap = 256;
+ }
+
+ return (void __user *)((usp - gap - frame_size) & -8UL);
}
static int setup_frame(struct ksignal *ksig, sigset_t *set,
@@ -905,7 +911,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
}
- frame = get_sigframe(ksig, sizeof(*frame) + fsize);
+ frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
if (fsize)
err |= copy_to_user (frame + 1, regs + 1, fsize);
@@ -976,7 +982,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
}
- frame = get_sigframe(ksig, sizeof(*frame));
+ frame = get_sigframe(ksig, tregs, sizeof(*frame));
if (fsize)
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 344f93d36a9a..5bf314871e9f 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
+#include <linux/extable.h>
#include <asm/setup.h>
#include <asm/fpu.h>
@@ -550,7 +551,8 @@ static inline void bus_error030 (struct frame *fp)
errorcode |= 2;
if (mmusr & (MMU_I | MMU_WP)) {
- if (ssw & 4) {
+ /* We might have an exception table for this PC */
+ if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
@@ -1139,7 +1141,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
pr_crit("%s: %08x\n", str, nr);
show_registers(fp);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
asmlinkage void set_esp0(unsigned long ssp)
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index e9b1d7585b43..03ebb67b413e 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs)
pr_alert("Unable to handle kernel access");
pr_cont(" at virtual address %p\n", addr);
die_if_kernel("Oops", regs, 0 /*error_code*/);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
return 1;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index c9c4be822456..13d0873b9053 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -45,6 +45,7 @@ config MICROBLAZE
select TRACING_SUPPORT
select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
+ select SPARSE_IRQ
select MMU_GATHER_NO_RANGE if MMU
# Endianness selection
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 654edfdc7867..c3a173473d18 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -32,6 +32,8 @@ CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
CONFIG_BRIDGE=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_PCI=y
CONFIG_MTD=y
CONFIG_MTD_CFI=y
@@ -71,8 +73,9 @@ CONFIG_FB_XILINX=y
CONFIG_UIO=y
CONFIG_UIO_PDRV_GENIRQ=y
CONFIG_UIO_DMEM_GENIRQ=y
-CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index 377de39ccb8c..8c420782d6e4 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -70,7 +70,7 @@ CONFIG_XILINX_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_XILINX=y
# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index e5c9170a07fc..83417105c00a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
+generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index d785defeeed5..eac2fb4b3fb9 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -9,7 +9,6 @@
#ifndef _ASM_MICROBLAZE_IRQ_H
#define _ASM_MICROBLAZE_IRQ_H
-#define NR_IRQS (32 + 1)
#include <asm-generic/irq.h>
struct pt_regs;
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index a89c2d4ed5ff..d7bebd04247b 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -15,7 +15,7 @@
#include <linux/bug.h>
#include <asm/cacheflush.h>
-static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
+static void __dma_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
@@ -31,14 +31,14 @@ static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
}
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- __dma_sync(dev, paddr, size, dir);
+ __dma_sync(paddr, size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- __dma_sync(dev, paddr, size, dir);
+ __dma_sync(paddr, size, dir);
}
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index cf99c411503e..6d3a6a644220 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err)
pr_warn("Oops: %s, sig: %ld\n", str, err);
show_regs(fp);
spin_unlock_irq(&die_lock);
- /* do_exit() should take care of panic'ing from an interrupt
+ /* make_task_dead() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
- do_exit(err);
+ make_task_dead(err);
}
/* for user application debugging */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index f264fdcf152a..14b276406153 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -99,7 +99,7 @@ big_endian:
_prepare_copy_fdt:
or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_fdt_start)
- ori r3, r0, (0x8000 - 4)
+ ori r3, r0, (0x10000 - 4)
_copy_fdt:
lw r12, r7, r11 /* r12 = r7 + r11 */
sw r12, r4, r11 /* addr[r4 + r11] = r12 */
@@ -121,10 +121,10 @@ no_fdt_arg:
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
- /* r2=r5+r6 - r5 contain pointer to command line */
+ /* r2=r5+r11 - r5 contain pointer to command line */
lbu r2, r5, r11
beqid r2, skip /* Skip if no data */
- sb r2, r4, r11 /* addr[r4+r6]= r2 */
+ sb r2, r4, r11 /* addr[r4+r11]= r2 */
addik r11, r11, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* decrement loop */
@@ -139,8 +139,8 @@ skip:
ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
ori r3, r0, (LMB_SIZE - 4)
_copy_bram:
- lw r7, r0, r11 /* r7 = r0 + r6 */
- sw r7, r4, r11 /* addr[r4 + r6] = r7 */
+ lw r7, r0, r11 /* r7 = r0 + r11 */
+ sw r7, r4, r11 /* addr[r4 + r11] = r7 */
addik r11, r11, 4 /* increment counting */
bgtid r3, _copy_bram /* loop for all entries */
addik r3, r3, -4 /* descrement loop */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 522a0c5d9c59..effed14eee06 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -138,7 +138,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
if (fdt)
pr_info("FDT at 0x%08x\n", fdt);
else
- pr_info("Compiled-in FDT at %p\n", _fdt_start);
+ pr_info("Compiled-in FDT at 0x%08x\n", (unsigned)&_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
pr_info("Found romfs @ 0x%08x (0x%08x)\n",
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index e1f3e8741292..71072c5cf61f 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -46,7 +46,7 @@ SECTIONS {
__fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
_fdt_start = . ; /* place for fdt blob */
*(__fdt_blob) ; /* Any link-placed DTB */
- . = _fdt_start + 0x8000; /* Pad up to 32kbyte */
+ . = _fdt_start + 0x10000; /* Pad up to 64kbyte */
_fdt_end = . ;
}
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 58cc4965bd3e..d3c28ffd87db 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -141,7 +141,7 @@ struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
return NULL;
}
-void pcibios_set_master(struct pci_dev *dev)
+void __weak pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
@@ -559,37 +559,7 @@ int pci_proc_domain(struct pci_bus *bus)
*/
static void pcibios_fixup_resources(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- int i;
- if (!hose) {
- pr_err("No host bridge for PCI dev %s !\n",
- pci_name(dev));
- return;
- }
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- struct resource *res = dev->resource + i;
- if (!res->flags)
- continue;
- if (res->start == 0) {
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
- pci_name(dev), i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags);
- pr_debug("is unassigned\n");
- res->end -= res->start;
- res->start = 0;
- res->flags |= IORESOURCE_UNSET;
- continue;
- }
-
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
- pci_name(dev), i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags);
- }
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 887b9c02d758..10a2de34fff6 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -5,6 +5,7 @@ config MIPS
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
select ARCH_CLOCKSOURCE_DATA
+ select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_UPROBES
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 2c52ee27b4f2..3183df60ad33 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/leds.h>
#include <linux/mmc/host.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
@@ -165,14 +164,10 @@ static struct platform_device db1x00_audio_dev = {
/******************************************************************************/
+#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
{
- void (*mmc_cd)(struct mmc_host *, unsigned long);
- /* link against CONFIG_MMC=m */
- mmc_cd = symbol_get(mmc_detect_change);
- mmc_cd(ptr, msecs_to_jiffies(500));
- symbol_put(mmc_detect_change);
-
+ mmc_detect_change(ptr, msecs_to_jiffies(500));
return IRQ_HANDLED;
}
@@ -375,6 +370,7 @@ static struct platform_device db1100_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1100_mmc1_res),
.resource = au1100_mmc1_res,
};
+#endif /* CONFIG_MMC_AU1X */
/******************************************************************************/
@@ -438,8 +434,10 @@ static struct platform_device *db1x00_devs[] = {
static struct platform_device *db1100_devs[] = {
&au1100_lcd_device,
+#ifdef CONFIG_MMC_AU1X
&db1100_mmc0_dev,
&db1100_mmc1_dev,
+#endif
};
int __init db1000_dev_setup(void)
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index 421d651433b6..9ad26215b004 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -10,7 +10,6 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/leds.h>
@@ -327,6 +326,7 @@ static struct platform_device db1200_ide_dev = {
/**********************************************************************/
+#ifdef CONFIG_MMC_AU1X
/* SD carddetects: they're supposed to be edge-triggered, but ack
* doesn't seem to work (CPLD Rev 2). Instead, the screaming one
* is disabled and its counterpart enabled. The 200ms timeout is
@@ -340,14 +340,7 @@ static irqreturn_t db1200_mmc_cd(int irq, void *ptr)
static irqreturn_t db1200_mmc_cdfn(int irq, void *ptr)
{
- void (*mmc_cd)(struct mmc_host *, unsigned long);
-
- /* link against CONFIG_MMC=m */
- mmc_cd = symbol_get(mmc_detect_change);
- if (mmc_cd) {
- mmc_cd(ptr, msecs_to_jiffies(200));
- symbol_put(mmc_detect_change);
- }
+ mmc_detect_change(ptr, msecs_to_jiffies(200));
msleep(100); /* debounce */
if (irq == DB1200_SD0_INSERT_INT)
@@ -431,14 +424,7 @@ static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr)
static irqreturn_t pb1200_mmc1_cdfn(int irq, void *ptr)
{
- void (*mmc_cd)(struct mmc_host *, unsigned long);
-
- /* link against CONFIG_MMC=m */
- mmc_cd = symbol_get(mmc_detect_change);
- if (mmc_cd) {
- mmc_cd(ptr, msecs_to_jiffies(200));
- symbol_put(mmc_detect_change);
- }
+ mmc_detect_change(ptr, msecs_to_jiffies(200));
msleep(100); /* debounce */
if (irq == PB1200_SD1_INSERT_INT)
@@ -599,6 +585,7 @@ static struct platform_device pb1200_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1200_mmc1_res),
.resource = au1200_mmc1_res,
};
+#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@@ -766,7 +753,9 @@ static struct platform_device db1200_audiodma_dev = {
static struct platform_device *db1200_devs[] __initdata = {
NULL, /* PSC0, selected by S6.8 */
&db1200_ide_dev,
+#ifdef CONFIG_MMC_AU1X
&db1200_mmc0_dev,
+#endif
&au1200_lcd_dev,
&db1200_eth_dev,
&db1200_nand_dev,
@@ -777,7 +766,9 @@ static struct platform_device *db1200_devs[] __initdata = {
};
static struct platform_device *pb1200_devs[] __initdata = {
+#ifdef CONFIG_MMC_AU1X
&pb1200_mmc1_dev,
+#endif
};
/* Some peripheral base addresses differ on the PB1200 */
@@ -856,7 +847,7 @@ int __init db1200_dev_setup(void)
i2c_register_board_info(0, db1200_i2c_devs,
ARRAY_SIZE(db1200_i2c_devs));
spi_register_board_info(db1200_spi_devs,
- ARRAY_SIZE(db1200_i2c_devs));
+ ARRAY_SIZE(db1200_spi_devs));
/* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
* S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index 8ac1f56ee57d..fcfefa48d260 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -17,7 +17,6 @@
#include <linux/interrupt.h>
#include <linux/ata_platform.h>
#include <linux/mmc/host.h>
-#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/platnand.h>
#include <linux/platform_device.h>
@@ -451,6 +450,7 @@ static struct platform_device db1300_ide_dev = {
/**********************************************************************/
+#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
@@ -459,14 +459,7 @@ static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
static irqreturn_t db1300_mmc_cdfn(int irq, void *ptr)
{
- void (*mmc_cd)(struct mmc_host *, unsigned long);
-
- /* link against CONFIG_MMC=m. We can only be called once MMC core has
- * initialized the controller, so symbol_get() should always succeed.
- */
- mmc_cd = symbol_get(mmc_detect_change);
- mmc_cd(ptr, msecs_to_jiffies(200));
- symbol_put(mmc_detect_change);
+ mmc_detect_change(ptr, msecs_to_jiffies(200));
msleep(100); /* debounce */
if (irq == DB1300_SD1_INSERT_INT)
@@ -640,6 +633,7 @@ static struct platform_device db1300_sd0_dev = {
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
+#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@@ -777,8 +771,10 @@ static struct platform_device *db1300_dev[] __initdata = {
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
+#ifdef CONFIG_MMC_AU1X
&db1300_sd0_dev,
&db1300_sd1_dev,
+#endif
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 3e0c75c0ece0..583b265d7407 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -588,7 +588,7 @@ int __init db1550_dev_setup(void)
i2c_register_board_info(0, db1550_i2c_devs,
ARRAY_SIZE(db1550_i2c_devs));
spi_register_board_info(db1550_spi_devs,
- ARRAY_SIZE(db1550_i2c_devs));
+ ARRAY_SIZE(db1550_spi_devs));
c = clk_get(NULL, "psc0_intclk");
if (!IS_ERR(c)) {
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 135a5407f015..d26d9a6f6ee7 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -85,7 +85,7 @@ static __init void prom_init_mem(void)
pr_debug("Assume 128MB RAM\n");
break;
}
- if (!memcmp(prom_init, prom_init + mem, 32))
+ if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32))
break;
}
lowmem = mem;
@@ -162,7 +162,7 @@ void __init bcm47xx_prom_highmem_init(void)
off = EXTVBASE + __pa(off);
for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) {
- if (!memcmp(prom_init, (void *)(off + extmem), 16))
+ if (!memcmp((void *)prom_init, (void *)(off + extmem), 16))
break;
}
extmem -= lowmem;
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index dcfa0ea912fe..f183c45503ce 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -361,6 +361,8 @@ static struct clk clk_periph = {
*/
int clk_enable(struct clk *clk)
{
+ if (!clk)
+ return 0;
mutex_lock(&clocks_mutex);
clk_enable_unlocked(clk);
mutex_unlock(&clocks_mutex);
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index 3d13c77c125f..98d39585c80f 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -64,7 +64,9 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr;
}
-void arch_sync_dma_for_cpu_all(struct device *dev)
+bool bmips_rac_flush_disable;
+
+void arch_sync_dma_for_cpu_all(void)
{
void __iomem *cbr = BMIPS_GET_CBR();
u32 cfg;
@@ -74,6 +76,9 @@ void arch_sync_dma_for_cpu_all(struct device *dev)
boot_cpu_type() != CPU_BMIPS4380)
return;
+ if (unlikely(bmips_rac_flush_disable))
+ return;
+
/* Flush stale data out of the readahead cache */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 7aee9ff19c1a..36fbedcbd518 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -34,6 +34,8 @@
#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
#define BCM6328_TP1_DISABLED BIT(9)
+extern bool bmips_rac_flush_disable;
+
static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
struct bmips_quirk {
@@ -103,6 +105,12 @@ static void bcm6358_quirks(void)
* disable SMP for now
*/
bmips_smp_enabled = 0;
+
+ /*
+ * RAC flush causes kernel panics on BCM6358 when booting from TP1
+ * because the bootloader is not initializing it properly.
+ */
+ bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
}
static void bcm6368_quirks(void)
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 2e2d45bc850d..601afad60bfe 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -211,7 +211,7 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
- WARN(!octeon_is_simulation(),
+ WARN_ONCE(!octeon_is_simulation(),
"Using deprecated link status - please update your DT");
/* Unless we fix it later, all links are defaulted to down */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index de391541d6f7..89a397c73aa6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -1100,7 +1100,7 @@ cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
if (index == 0)
result = __cvmx_helper_rgmii_link_get(ipd_port);
else {
- WARN(1, "Using deprecated link status - please update your DT");
+ WARN_ONCE(1, "Using deprecated link status - please update your DT");
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 3ad1f76c063a..2d5e7b21d960 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -127,6 +127,16 @@ static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
int irq, int line, int bit)
{
+ struct device_node *of_node;
+ int ret;
+
+ of_node = irq_domain_get_of_node(domain);
+ if (!of_node)
+ return -EINVAL;
+ ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node));
+ if (ret < 0)
+ return ret;
+
return irq_domain_associate(domain, irq, line << 6 | bit);
}
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index c214fe4e678b..04bc34714727 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -86,11 +86,12 @@ static void octeon2_usb_clocks_start(struct device *dev)
"refclk-frequency", &clock_rate);
if (i) {
dev_err(dev, "No UCTL \"refclk-frequency\"\n");
+ of_node_put(uctl_node);
goto exit;
}
i = of_property_read_string(uctl_node,
"refclk-type", &clock_type);
-
+ of_node_put(uctl_node);
if (!i && strcmp("crystal", clock_type) == 0)
is_crystal_clock = true;
}
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
index 85f1955b4b00..4a81297e21a7 100644
--- a/arch/mips/configs/decstation_64_defconfig
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -53,8 +53,6 @@ CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
CONFIG_IP_SCTP=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
# CONFIG_FW_LOADER is not set
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 30a6eafdb1d0..fd35454bae4c 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -49,8 +49,6 @@ CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
CONFIG_IP_SCTP=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
# CONFIG_FW_LOADER is not set
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index e2b58dbf4aa9..7ed8f4c7cbdd 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -48,8 +48,6 @@ CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
CONFIG_IP_SCTP=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
# CONFIG_FW_LOADER is not set
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 9085f4d6c698..5b9b59a8fe68 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -69,7 +69,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -99,7 +98,6 @@ CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
CONFIG_LLC2=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 982b990469af..1767f494a5b0 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -106,7 +106,6 @@ CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -127,7 +126,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE=m
-CONFIG_DECNET=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 914af125a7fa..d6578536d77c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -117,7 +117,6 @@ CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -147,7 +146,6 @@ CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
CONFIG_LLC2=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index 72a211d2d556..45421a05bb0e 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -200,7 +200,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -234,7 +233,6 @@ CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_DECNET=m
CONFIG_LLC2=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 4ecb157e56d4..1081972c81da 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -198,7 +198,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -232,7 +231,6 @@ CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_DECNET=m
CONFIG_LLC2=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 30d7c3db884e..2d0506159dfa 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -116,7 +116,6 @@ CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -137,7 +136,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE=m
-CONFIG_DECNET=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
index 6ecda64ad184..ed88abc40513 100644
--- a/arch/mips/fw/lib/cmdline.c
+++ b/arch/mips/fw/lib/cmdline.c
@@ -51,7 +51,7 @@ char *fw_getenv(char *envname)
{
char *result = NULL;
- if (_fw_envp != NULL) {
+ if (_fw_envp != NULL && fw_envp(0) != NULL) {
/*
* Return a pointer to the given environment variable.
* YAMON uses "name", "value" pairs, while U-Boot uses
diff --git a/arch/mips/include/asm/bugs.h b/arch/mips/include/asm/bugs.h
index d8ab8b7129b5..6d04d7d3a8f2 100644
--- a/arch/mips/include/asm/bugs.h
+++ b/arch/mips/include/asm/bugs.h
@@ -1,17 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
* Copyright (C) 2007 Maciej W. Rozycki
- *
- * Needs:
- * void check_bugs(void);
*/
#ifndef _ASM_BUGS_H
#define _ASM_BUGS_H
#include <linux/bug.h>
-#include <linux/delay.h>
#include <linux/smp.h>
#include <asm/cpu.h>
@@ -31,17 +25,6 @@ static inline void check_bugs_early(void)
#endif
}
-static inline void check_bugs(void)
-{
- unsigned int cpu = smp_processor_id();
-
- cpu_data[cpu].udelay_val = loops_per_jiffy;
- check_bugs32();
-#ifdef CONFIG_64BIT
- check_bugs64();
-#endif
-}
-
static inline int r4k_daddiu_bug(void)
{
#ifdef CONFIG_64BIT
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index dcebaaf8c862..803f2a6f9960 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -276,7 +276,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" .set pop"
: "=&r" (sum), "=&r" (tmp)
: "r" (saddr), "r" (daddr),
- "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
+ "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
+ : "memory");
return csum_fold(sum);
}
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 3e26b0c7391b..ae4a2f52e3c4 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -124,7 +124,24 @@
#define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
-#define cpu_has_octeon_cache 0
+#define cpu_has_octeon_cache \
+({ \
+ int __res; \
+ \
+ switch (boot_cpu_type()) { \
+ case CPU_CAVIUM_OCTEON: \
+ case CPU_CAVIUM_OCTEON_PLUS: \
+ case CPU_CAVIUM_OCTEON2: \
+ case CPU_CAVIUM_OCTEON3: \
+ __res = 1; \
+ break; \
+ \
+ default: \
+ __res = 0; \
+ } \
+ \
+ __res; \
+})
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
@@ -341,7 +358,7 @@
({ \
int __res; \
\
- switch (current_cpu_type()) { \
+ switch (boot_cpu_type()) { \
case CPU_M14KC: \
case CPU_74K: \
case CPU_1074K: \
diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
index 1e1247add1cf..908e96e3a311 100644
--- a/arch/mips/include/asm/dec/prom.h
+++ b/arch/mips/include/asm/dec/prom.h
@@ -70,7 +70,7 @@ static inline bool prom_is_rex(u32 magic)
*/
typedef struct {
int pagesize;
- unsigned char bitmap[0];
+ unsigned char bitmap[];
} memmap;
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
index d0ef8b4892bb..d0494ce4b337 100644
--- a/arch/mips/include/asm/fw/fw.h
+++ b/arch/mips/include/asm/fw/fw.h
@@ -26,6 +26,6 @@ extern char *fw_getcmdline(void);
extern void fw_meminit(void);
extern char *fw_getenv(char *name);
extern unsigned long fw_getenvl(char *name);
-extern void fw_init_early_console(char port);
+extern void fw_init_early_console(void);
#endif /* __ASM_FW_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h
index 6f40d1515580..1ff8a987025c 100644
--- a/arch/mips/include/asm/mach-rc32434/pci.h
+++ b/arch/mips/include/asm/mach-rc32434/pci.h
@@ -377,7 +377,7 @@ struct pci_msu {
PCI_CFG04_STAT_SSE | \
PCI_CFG04_STAT_PE)
-#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD)
+#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD)
#define KORINA_REVID 0
#define KORINA_CLASS_CODE 0
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index b826b8473e95..7649ab45e80c 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -20,10 +20,4 @@
#define nid_to_addrbase(nid) 0
#endif
-#ifdef CONFIG_DISCONTIGMEM
-
-#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
-
-#endif /* CONFIG_DISCONTIGMEM */
-
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 1e76774b36dd..2849a9b65a05 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->cp0_epc = val;
+ regs->cp0_cause &= ~CAUSEF_BD;
}
/* Query offset/name of register from its name/offset */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 25fa651c937d..ebdf4d910af2 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -38,7 +38,7 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task,
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
- return current_thread_info()->syscall;
+ return task_thread_info(task)->syscall;
}
static inline void mips_syscall_update_nr(struct task_struct *task,
diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
index 80e70dbd1f64..012731546cf6 100644
--- a/arch/mips/include/asm/vpe.h
+++ b/arch/mips/include/asm/vpe.h
@@ -104,7 +104,6 @@ struct vpe_control {
struct list_head tc_list; /* Thread contexts */
};
-extern unsigned long physical_memsize;
extern struct vpe_control vpecontrol;
extern const struct file_operations vpe_fops;
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index a01e14955187..c64a297e82b3 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -592,7 +592,7 @@ static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return vdma_alloc(phys, size);
}
@@ -600,7 +600,7 @@ static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
+ arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
vdma_free(dma_addr);
}
@@ -612,7 +612,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ arch_sync_dma_for_device(sg_phys(sg), sg->length,
dir);
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
if (sg->dma_address == DMA_MAPPING_ERROR)
@@ -631,8 +631,7 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
- dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
vdma_free(sg->dma_address);
}
}
@@ -640,13 +639,13 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
static void jazz_dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
+ arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
+ arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_sg_for_device(struct device *dev,
@@ -656,7 +655,7 @@ static void jazz_dma_sync_sg_for_device(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static void jazz_dma_sync_sg_for_cpu(struct device *dev,
@@ -666,7 +665,7 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
const struct dma_map_ops jazz_dma_ops = {
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 7b045d2a0b51..bbc6f07d8124 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -11,6 +11,7 @@
#include <asm/cpu-features.h>
#include <asm/cpu-info.h>
+#include <asm/fpu.h>
#ifdef CONFIG_MIPS_FP_SUPPORT
@@ -309,6 +310,11 @@ void mips_set_personality_nan(struct arch_elf_state *state)
struct cpuinfo_mips *c = &boot_cpu_data;
struct task_struct *t = current;
+ /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case
+ * we are preempted before the lose_fpu(0) in start_thread.
+ */
+ lose_fpu(0);
+
t->thread.fpu.fcr31 = c->fpu_csr31;
switch (state->nan_2008) {
case 0:
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index 662c8db9f45b..9f5b1247b4ba 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -56,7 +56,7 @@ void arch_jump_label_transform(struct jump_entry *e,
* The branch offset must fit in the instruction's 26
* bit field.
*/
- WARN_ON((offset >= BIT(25)) ||
+ WARN_ON((offset >= (long)BIT(25)) ||
(offset < -(long)BIT(25)));
insn.j_format.opcode = bc6_op;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index f8d36710cd58..d408b3a5bfd5 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -168,7 +168,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
{
unsigned long i = *pos;
- return i < NR_CPUS ? (void *) (i + 1) : NULL;
+ return i < nr_cpu_ids ? (void *) (i + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 82e44b31aad5..0419629aee60 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -11,6 +11,8 @@
* Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
*/
#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/screen_info.h>
@@ -190,10 +192,6 @@ static unsigned long __init init_initrd(void)
pr_err("initrd start must be page aligned\n");
goto disable;
}
- if (initrd_start < PAGE_OFFSET) {
- pr_err("initrd start < PAGE_OFFSET\n");
- goto disable;
- }
/*
* Sanitize initrd addresses. For example firmware
@@ -206,6 +204,11 @@ static unsigned long __init init_initrd(void)
initrd_end = (unsigned long)__va(end);
initrd_start = (unsigned long)__va(__pa(initrd_start));
+ if (initrd_start < PAGE_OFFSET) {
+ pr_err("initrd start < PAGE_OFFSET\n");
+ goto disable;
+ }
+
ROOT_DEV = Root_RAM0;
return PFN_UP(end);
disable:
@@ -359,11 +362,11 @@ static void __init bootmem_init(void)
panic("Incorrect memory mapping !!!");
if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
+ max_low_pfn = PFN_DOWN(HIGHMEM_START);
#ifdef CONFIG_HIGHMEM
- highstart_pfn = PFN_DOWN(HIGHMEM_START);
+ highstart_pfn = max_low_pfn;
highend_pfn = max_pfn;
#else
- max_low_pfn = PFN_DOWN(HIGHMEM_START);
max_pfn = max_low_pfn;
#endif
}
@@ -811,3 +814,14 @@ static int __init setnocoherentio(char *str)
}
early_param("nocoherentio", setnocoherentio);
#endif
+
+void __init arch_cpu_finalize_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ cpu_data[cpu].udelay_val = loops_per_jiffy;
+ check_bugs32();
+
+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ check_bugs64();
+}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index dbb3f1fc71ab..f659adb681bc 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -423,9 +423,11 @@ static void cps_shutdown_this_cpu(enum cpu_death death)
wmb();
}
} else {
- pr_debug("Gating power to core %d\n", core);
- /* Power down the core */
- cps_pm_enter_state(CPS_PM_POWER_GATED);
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ pr_debug("Gating power to core %d\n", core);
+ /* Power down the core */
+ cps_pm_enter_state(CPS_PM_POWER_GATED);
+ }
}
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 749089c25d5e..5a491eca456f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -415,7 +415,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
if (regs && kexec_should_crash(current))
crash_kexec(regs);
- do_exit(sig);
+ make_task_dead(sig);
}
extern struct exception_table_entry __start___dbe_table[];
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index faf98f209b3f..6f62c1a3bcfc 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -10,6 +10,8 @@
*/
#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+#define RUNTIME_DISCARD_EXIT
+
#include <asm-generic/vmlinux.lds.h>
#undef mips
diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
index 9268ebc0f61e..903c07bdc92d 100644
--- a/arch/mips/kernel/vpe-cmp.c
+++ b/arch/mips/kernel/vpe-cmp.c
@@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe);
static void vpe_device_release(struct device *cd)
{
- kfree(cd);
}
static struct class vpe_class = {
@@ -157,6 +156,7 @@ out_dev:
device_del(&vpe_device);
out_class:
+ put_device(&vpe_device);
class_unregister(&vpe_class);
out_chrdev:
@@ -169,7 +169,7 @@ void __exit vpe_module_exit(void)
{
struct vpe *v, *n;
- device_del(&vpe_device);
+ device_unregister(&vpe_device);
class_unregister(&vpe_class);
unregister_chrdev(major, VPE_MODULE_NAME);
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
index 2e003b11a098..496ed8f362f6 100644
--- a/arch/mips/kernel/vpe-mt.c
+++ b/arch/mips/kernel/vpe-mt.c
@@ -92,12 +92,11 @@ int vpe_run(struct vpe *v)
write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
/*
- * The sde-kit passes 'memsize' to __start in $a3, so set something
- * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
- * DFLT_HEAP_SIZE when you compile your program
+ * We don't pass the memsize here, so VPE programs need to be
+ * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined.
*/
+ mttgpr(7, 0);
mttgpr(6, v->ntcs);
- mttgpr(7, physical_memsize);
/* set up VPE1 */
/*
@@ -313,7 +312,6 @@ ATTRIBUTE_GROUPS(vpe);
static void vpe_device_release(struct device *cd)
{
- kfree(cd);
}
static struct class vpe_class = {
@@ -497,6 +495,7 @@ out_dev:
device_del(&vpe_device);
out_class:
+ put_device(&vpe_device);
class_unregister(&vpe_class);
out_chrdev:
@@ -509,7 +508,7 @@ void __exit vpe_module_exit(void)
{
struct vpe *v, *n;
- device_del(&vpe_device);
+ device_unregister(&vpe_device);
class_unregister(&vpe_class);
unregister_chrdev(major, VPE_MODULE_NAME);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 97f63a84aa51..8a5f666d34c8 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -693,7 +693,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
- pte_t *ptep, entry, old_pte;
+ pte_t *ptep, entry;
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
@@ -766,7 +766,6 @@ retry:
entry = pfn_pte(pfn, __pgprot(prot_bits));
/* Write the PTE */
- old_pte = *ptep;
set_pte(ptep, entry);
err = 0;
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 7a623684d9b5..2d5a0bcb0cec 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
{
return &cpu_clk_generic[2];
}
+EXPORT_SYMBOL_GPL(clk_get_io);
struct clk *clk_get_ppe(void)
{
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 3f568f5aae2d..2729a4b63e18 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -23,12 +23,6 @@ DEFINE_SPINLOCK(ebu_lock);
EXPORT_SYMBOL_GPL(ebu_lock);
/*
- * This is needed by the VPE loader code, just set it to 0 and assume
- * that the firmware hardcodes this value to something useful.
- */
-unsigned long physical_memsize = 0L;
-
-/*
* this struct is filled by the soc specific detection code and holds
* information about the specific soc type, revision and name
*/
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index 8126f15b8e09..6b019915b0c8 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -39,7 +39,7 @@ static void pvc_display(unsigned long data)
pvc_write_string(pvc_lines[i], 0, i);
}
-static DECLARE_TASKLET(pvc_display_tasklet, &pvc_display, 0);
+static DECLARE_TASKLET_OLD(pvc_display_tasklet, &pvc_display);
static int pvc_line_proc_show(struct seq_file *m, void *v)
{
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 83ed37298e66..0ba6f746485f 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -80,7 +80,7 @@ static void dump_tlb(int first, int last)
unsigned int pagemask, guestctl1 = 0, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
- unsigned long uninitialized_var(s_mmid);
+ unsigned long s_mmid;
#ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8;
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 794c96c2a4cd..311dc1580bbd 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
if (plat_dat->bus_id) {
__raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
GMAC1_USE_UART0, LS1X_MUX_CTRL0);
- switch (plat_dat->interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
break;
default:
pr_err("unsupported mii mode %d\n",
- plat_dat->interface);
+ plat_dat->phy_interface);
return -ENOTSUPP;
}
val &= ~GMAC1_SHUT;
} else {
- switch (plat_dat->interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
break;
default:
pr_err("unsupported mii mode %d\n",
- plat_dat->interface);
+ plat_dat->phy_interface);
return -ENOTSUPP;
}
val &= ~GMAC0_SHUT;
@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
plat_dat = dev_get_platdata(&pdev->dev);
val &= ~PHY_INTF_SELI;
- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
val |= 0x4 << PHY_INTF_SELI_SHIFT;
__raw_writel(val, LS1X_MUX_CTRL1);
@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
.bus_id = 0,
.phy_addr = -1,
#if defined(CONFIG_LOONGSON1_LS1B)
- .interface = PHY_INTERFACE_MODE_MII,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
#elif defined(CONFIG_LOONGSON1_LS1C)
- .interface = PHY_INTERFACE_MODE_RMII,
+ .phy_interface = PHY_INTERFACE_MODE_RMII,
#endif
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
.bus_id = 1,
.phy_addr = -1,
- .interface = PHY_INTERFACE_MODE_MII,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c
index e9de6da0ce51..9dcfe9de55b0 100644
--- a/arch/mips/loongson32/ls1c/board.c
+++ b/arch/mips/loongson32/ls1c/board.c
@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = {
static int __init ls1c_platform_init(void)
{
ls1x_serial_set_uartclk(&ls1x_uart_pdev);
- ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
return platform_add_devices(ls1c_platform_devices,
ARRAY_SIZE(ls1c_platform_devices));
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 1d4d57dd9acf..6cfacb04865f 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -27,7 +27,7 @@
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
* SGI IP32 aka O2.
*/
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
+static inline bool cpu_needs_post_dma_flush(void)
{
switch (boot_cpu_type()) {
case CPU_R10000:
@@ -118,17 +118,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
} while (left);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
dma_sync_phys(paddr, size, dir);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- if (cpu_needs_post_dma_flush(dev))
+ if (cpu_needs_post_dma_flush())
dma_sync_phys(paddr, size, dir);
}
#endif
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 090fa653dfa9..800cc5bc7a38 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -84,7 +84,7 @@ void setup_zero_pages(void)
static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
{
enum fixed_addresses idx;
- unsigned int uninitialized_var(old_mmid);
+ unsigned int old_mmid;
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
@@ -416,7 +416,12 @@ void __init paging_init(void)
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
}
+
+ max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
+#else
+ max_mapnr = max_low_pfn;
#endif
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
free_area_init_nodes(max_zone_pfns);
}
@@ -452,16 +457,6 @@ void __init mem_init(void)
*/
BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
-#ifdef CONFIG_HIGHMEM
-#ifdef CONFIG_DISCONTIGMEM
-#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
-#endif
- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
-#else
- max_mapnr = max_low_pfn;
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
maar_init();
memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 60046445122b..b6104d9413e0 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -120,7 +120,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
- unsigned long old_entryhi, uninitialized_var(old_mmid);
+ unsigned long old_entryhi, old_mmid;
int newpid = cpu_asid(cpu, mm);
old_entryhi = read_c0_entryhi();
@@ -214,7 +214,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
- unsigned long uninitialized_var(old_mmid);
+ unsigned long old_mmid;
unsigned long flags, old_entryhi;
int idx;
@@ -381,7 +381,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
- unsigned int uninitialized_var(old_mmid);
+ unsigned int old_mmid;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 32202fadc26c..70add795e240 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -630,7 +630,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
return;
}
- if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
+ if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
@@ -2569,7 +2569,7 @@ static void check_pabits(void)
unsigned long entry;
unsigned pabits, fillbits;
- if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
+ if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
/*
* We'll only be making use of the fact that we can rotate bits
* into the fill if the CPU supports RIXI, so don't bother
diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c
index 8c236738b5ee..5d48408f84b1 100644
--- a/arch/mips/pic32/pic32mzda/early_console.c
+++ b/arch/mips/pic32/pic32mzda/early_console.c
@@ -27,7 +27,7 @@
#define U_BRG(x) (UART_BASE(x) + 0x40)
static void __iomem *uart_base;
-static char console_port = -1;
+static int console_port = -1;
static int __init configure_uart_pins(int port)
{
@@ -47,7 +47,7 @@ static int __init configure_uart_pins(int port)
return 0;
}
-static void __init configure_uart(char port, int baud)
+static void __init configure_uart(int port, int baud)
{
u32 pbclk;
@@ -60,7 +60,7 @@ static void __init configure_uart(char port, int baud)
uart_base + PIC32_SET(U_STA(port)));
}
-static void __init setup_early_console(char port, int baud)
+static void __init setup_early_console(int port, int baud)
{
if (configure_uart_pins(port))
return;
@@ -130,16 +130,15 @@ _out:
return baud;
}
-void __init fw_init_early_console(char port)
+void __init fw_init_early_console(void)
{
char *arch_cmdline = pic32_getcmdline();
- int baud = -1;
+ int baud, port;
uart_base = ioremap_nocache(PIC32_BASE_UART, 0xc00);
baud = get_baud_from_cmdline(arch_cmdline);
- if (port == -1)
- port = get_port_from_cmdline(arch_cmdline);
+ port = get_port_from_cmdline(arch_cmdline);
if (port == -1)
port = EARLY_CONSOLE_PORT;
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
index f232c77ff526..488c0bee7ebf 100644
--- a/arch/mips/pic32/pic32mzda/init.c
+++ b/arch/mips/pic32/pic32mzda/init.c
@@ -60,7 +60,7 @@ void __init plat_mem_setup(void)
strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
#ifdef CONFIG_EARLY_PRINTK
- fw_init_early_console(-1);
+ fw_init_early_console();
#endif
pic32_config_init();
}
diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h
index 940d32842793..62faafbc28e4 100644
--- a/arch/nds32/include/asm/memory.h
+++ b/arch/nds32/include/asm/memory.h
@@ -76,18 +76,12 @@
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
*/
-#ifndef CONFIG_DISCONTIGMEM
-
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
-#else /* CONFIG_DISCONTIGMEM */
-#error CONFIG_DISCONTIGMEM is not supported yet.
-#endif /* !CONFIG_DISCONTIGMEM */
-
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#endif
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
index 4206d4b6c8ce..69d762182d49 100644
--- a/arch/nds32/kernel/dma.c
+++ b/arch/nds32/kernel/dma.c
@@ -46,8 +46,8 @@ static inline void cache_op(phys_addr_t paddr, size_t size,
} while (left);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
@@ -61,8 +61,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
index 62bdafbc53f4..26c62d5a55c1 100644
--- a/arch/nds32/kernel/fpu.c
+++ b/arch/nds32/kernel/fpu.c
@@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs)
}
} else if (fpcsr & FPCSR_mskRIT) {
if (!user_mode(regs))
- do_exit(SIGILL);
+ make_task_dead(SIGILL);
si_signo = SIGILL;
}
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index f4d386b52622..f6648845aae7 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -184,7 +184,7 @@ void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
EXPORT_SYMBOL(die);
@@ -288,7 +288,7 @@ void unhandled_interruption(struct pt_regs *regs)
pr_emerg("unhandled_interruption\n");
show_regs(regs);
if (!user_mode(regs))
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
force_sig(SIGKILL);
}
@@ -299,7 +299,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr,
addr, type);
show_regs(regs);
if (!user_mode(regs))
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
force_sig(SIGKILL);
}
@@ -326,7 +326,7 @@ void do_revinsn(struct pt_regs *regs)
pr_emerg("Reserved Instruction\n");
show_regs(regs);
if (!user_mode(regs))
- do_exit(SIGILL);
+ make_task_dead(SIGILL);
force_sig(SIGILL);
}
diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile
index 37dfc7e584bc..0b704c1f379f 100644
--- a/arch/nios2/boot/Makefile
+++ b/arch/nios2/boot/Makefile
@@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
-$(obj)/vmImage: $(obj)/vmlinux.gz
+$(obj)/vmImage: $(obj)/vmlinux.gz FORCE
$(call if_changed,uimage)
@$(kecho) 'Kernel: $@ is ready'
diff --git a/arch/nios2/boot/dts/10m50_devboard.dts b/arch/nios2/boot/dts/10m50_devboard.dts
index 5e4ab032c1e8..4088cb08726b 100644
--- a/arch/nios2/boot/dts/10m50_devboard.dts
+++ b/arch/nios2/boot/dts/10m50_devboard.dts
@@ -97,7 +97,7 @@
rx-fifo-depth = <8192>;
tx-fifo-depth = <8192>;
address-bits = <48>;
- max-frame-size = <1518>;
+ max-frame-size = <1500>;
local-mac-address = [00 00 00 00 00 00];
altr,has-supplementary-unicast;
altr,enable-sup-addr = <1>;
diff --git a/arch/nios2/boot/dts/3c120_devboard.dts b/arch/nios2/boot/dts/3c120_devboard.dts
index d10fb81686c7..3ee316906379 100644
--- a/arch/nios2/boot/dts/3c120_devboard.dts
+++ b/arch/nios2/boot/dts/3c120_devboard.dts
@@ -106,7 +106,7 @@
interrupt-names = "rx_irq", "tx_irq";
rx-fifo-depth = <8192>;
tx-fifo-depth = <8192>;
- max-frame-size = <1518>;
+ max-frame-size = <1500>;
local-mac-address = [ 00 00 00 00 00 00 ];
phy-mode = "rgmii-id";
phy-handle = <&phy0>;
diff --git a/arch/nios2/include/asm/entry.h b/arch/nios2/include/asm/entry.h
index cf37f55efbc2..bafb7b2ca59f 100644
--- a/arch/nios2/include/asm/entry.h
+++ b/arch/nios2/include/asm/entry.h
@@ -50,7 +50,8 @@
stw r13, PT_R13(sp)
stw r14, PT_R14(sp)
stw r15, PT_R15(sp)
- stw r2, PT_ORIG_R2(sp)
+ movi r24, -1
+ stw r24, PT_ORIG_R2(sp)
stw r7, PT_ORIG_R7(sp)
stw ra, PT_RA(sp)
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h
index 642462144872..9da34c3022a2 100644
--- a/arch/nios2/include/asm/ptrace.h
+++ b/arch/nios2/include/asm/ptrace.h
@@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *);
((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\
- 1)
+#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1)
+
int do_syscall_trace_enter(void);
void do_syscall_trace_exit(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 1e515ccd698e..af556588248e 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -185,6 +185,7 @@ ENTRY(handle_system_call)
ldw r5, PT_R5(sp)
local_restart:
+ stw r2, PT_ORIG_R2(sp)
/* Check that the requested system call is within limits */
movui r1, __NR_syscalls
bgeu r2, r1, ret_invsyscall
@@ -192,7 +193,6 @@ local_restart:
movhi r11, %hiadj(sys_call_table)
add r1, r1, r11
ldw r1, %lo(sys_call_table)(r1)
- beq r1, r0, ret_invsyscall
/* Check if we are being traced */
GET_THREAD_INFO r11
@@ -213,6 +213,9 @@ local_restart:
translate_rc_and_ret:
movi r1, 0
bge r2, zero, 3f
+ ldw r1, PT_ORIG_R2(sp)
+ addi r1, r1, 1
+ beq r1, zero, 3f
sub r2, zero, r2
movi r1, 1
3:
@@ -255,9 +258,9 @@ traced_system_call:
ldw r6, PT_R6(sp)
ldw r7, PT_R7(sp)
- /* Fetch the syscall function, we don't need to check the boundaries
- * since this is already done.
- */
+ /* Fetch the syscall function. */
+ movui r1, __NR_syscalls
+ bgeu r2, r1, traced_invsyscall
slli r1, r2, 2
movhi r11,%hiadj(sys_call_table)
add r1, r1, r11
@@ -276,6 +279,9 @@ traced_system_call:
translate_rc_and_ret2:
movi r1, 0
bge r2, zero, 4f
+ ldw r1, PT_ORIG_R2(sp)
+ addi r1, r1, 1
+ beq r1, zero, 4f
sub r2, zero, r2
movi r1, 1
4:
@@ -287,6 +293,11 @@ end_translate_rc_and_ret2:
RESTORE_SWITCH_STACK
br ret_from_exception
+ /* If the syscall number was invalid return ENOSYS */
+traced_invsyscall:
+ movi r2, -ENOSYS
+ br translate_rc_and_ret2
+
Luser_return:
GET_THREAD_INFO r11 /* get thread_info pointer */
ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
@@ -336,9 +347,6 @@ external_interrupt:
/* skip if no interrupt is pending */
beq r12, r0, ret_from_interrupt
- movi r24, -1
- stw r24, PT_ORIG_R2(sp)
-
/*
* Process an external hardware interrupt.
*/
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index a42dd09c6578..bfb20821a8f4 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -240,7 +240,7 @@ static int do_signal(struct pt_regs *regs)
/*
* If we were from a system call, check for system call restarting...
*/
- if (regs->orig_r2 >= 0) {
+ if (regs->orig_r2 >= 0 && regs->r1) {
continue_addr = regs->ea;
restart_addr = continue_addr - 4;
retval = regs->r2;
@@ -261,6 +261,7 @@ static int do_signal(struct pt_regs *regs)
regs->ea = restart_addr;
break;
}
+ regs->orig_r2 = -1;
}
if (get_signal(&ksig)) {
diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c
index 6176d63023c1..c2875a6dd5a4 100644
--- a/arch/nios2/kernel/syscall_table.c
+++ b/arch/nios2/kernel/syscall_table.c
@@ -13,5 +13,6 @@
#define __SYSCALL(nr, call) [nr] = (call),
void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls-1] = sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
index 486db793923c..8e192d656426 100644
--- a/arch/nios2/kernel/traps.c
+++ b/arch/nios2/kernel/traps.c
@@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err)
show_regs(regs);
spin_unlock_irq(&die_lock);
/*
- * do_exit() should take care of panic'ing from an interrupt
+ * make_task_dead() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
- do_exit(err);
+ make_task_dead(err);
}
void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index 9cb238664584..0ed711e37902 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -18,8 +18,8 @@
#include <linux/cache.h>
#include <asm/cacheflush.h>
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
void *vaddr = phys_to_virt(paddr);
@@ -42,8 +42,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
void *vaddr = phys_to_virt(paddr);
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 4d5b8bd1d795..adec711ad39d 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -125,7 +125,7 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages_exact(vaddr, size);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
+void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 6b27cf4a0d78..8db433947d39 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -173,7 +173,6 @@ handler: ;\
l.sw PT_GPR28(r1),r28 ;\
l.sw PT_GPR29(r1),r29 ;\
/* r30 already save */ ;\
-/* l.sw PT_GPR30(r1),r30*/ ;\
l.sw PT_GPR31(r1),r31 ;\
TRACE_IRQS_OFF_ENTRY ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
@@ -211,9 +210,8 @@ handler: ;\
l.sw PT_GPR27(r1),r27 ;\
l.sw PT_GPR28(r1),r28 ;\
l.sw PT_GPR29(r1),r29 ;\
- /* r31 already saved */ ;\
- l.sw PT_GPR30(r1),r30 ;\
-/* l.sw PT_GPR31(r1),r31 */ ;\
+ /* r30 already saved */ ;\
+ l.sw PT_GPR31(r1),r31 ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
l.addi r30,r0,-1 ;\
l.sw PT_ORIG_GPR11(r1),r30 ;\
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 932a8ec2b520..2804852a5592 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -218,7 +218,7 @@ void die(const char *str, struct pt_regs *regs, long err)
__asm__ __volatile__("l.nop 1");
do {} while (1);
#endif
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
/* This is normally the 'Oops' routine */
diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h
deleted file mode 100644
index 0a7f9db6bd1c..000000000000
--- a/arch/parisc/include/asm/bugs.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * include/asm-parisc/bugs.h
- *
- * Copyright (C) 1999 Mike Shaver
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-#include <asm/processor.h>
-
-static inline void check_bugs(void)
-{
-// identify_cpu(&boot_cpu_data);
-}
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 0c83644bfa5c..b4076ac51005 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -57,6 +57,11 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
+ xa_lock_irqsave(&mapping->i_pages, flags)
+#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
+ xa_unlock_irqrestore(&mapping->i_pages, flags)
+
#define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page(page); \
diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h
index 9d3d7737c58b..a005ebc54779 100644
--- a/arch/parisc/include/asm/hardware.h
+++ b/arch/parisc/include/asm/hardware.h
@@ -10,12 +10,12 @@
#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
struct hp_hardware {
- unsigned short hw_type:5; /* HPHW_xxx */
- unsigned short hversion;
- unsigned long sversion:28;
- unsigned short opt;
- const char name[80]; /* The hardware description */
-};
+ unsigned int hw_type:8; /* HPHW_xxx */
+ unsigned int hversion:12;
+ unsigned int sversion:12;
+ unsigned char opt;
+ unsigned char name[59]; /* The hardware description */
+} __packed;
struct parisc_device;
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index e080143e79a3..b432a67c793c 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -2,14 +2,28 @@
#ifndef __PARISC_LDCW_H
#define __PARISC_LDCW_H
-#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
specify "__attribute ((aligned(16)))" in the type declaration. So,
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
- for the semaphore. */
+ for the semaphore. */
+
+/* From: "Jim Hull" <jim.hull of hp.com>
+ I've attached a summary of the change, but basically, for PA 2.0, as
+ long as the ",CO" (coherent operation) completer is implemented, then the
+ 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+ they only require "natural" alignment (4-byte for ldcw, 8-byte for
+ ldcd).
+
+ Although the cache control hint is accepted by all PA 2.0 processors,
+ it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
+ require 16-byte alignment. If the address is unaligned, the operation
+ of the instruction is undefined. The ldcw instruction does not generate
+ unaligned data reference traps so misaligned accesses are not detected.
+ This hid the problem for years. So, restore the 16-byte alignment dropped
+ by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
#define __PA_LDCW_ALIGNMENT 16
#define __PA_LDCW_ALIGN_ORDER 4
@@ -19,22 +33,12 @@
& ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
-#define __LDCW "ldcw"
-#else /*CONFIG_PA20*/
-/* From: "Jim Hull" <jim.hull of hp.com>
- I've attached a summary of the change, but basically, for PA 2.0, as
- long as the ",CO" (coherent operation) completer is specified, then the
- 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
- they only require "natural" alignment (4-byte for ldcw, 8-byte for
- ldcd). */
-
-#define __PA_LDCW_ALIGNMENT 4
-#define __PA_LDCW_ALIGN_ORDER 2
-#define __ldcw_align(a) (&(a)->slock)
+#ifdef CONFIG_PA20
#define __LDCW "ldcw,co"
-
-#endif /*!CONFIG_PA20*/
+#else
+#define __LDCW "ldcw"
+#endif
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload
diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h
index 6de13d08a388..b70b9094fb7c 100644
--- a/arch/parisc/include/asm/led.h
+++ b/arch/parisc/include/asm/led.h
@@ -11,8 +11,8 @@
#define LED1 0x02
#define LED0 0x01 /* bottom (or furthest left) LED */
-#define LED_LAN_TX LED0 /* for LAN transmit activity */
-#define LED_LAN_RCV LED1 /* for LAN receive activity */
+#define LED_LAN_RCV LED0 /* for LAN receive activity */
+#define LED_LAN_TX LED1 /* for LAN transmit activity */
#define LED_DISK_IO LED2 /* for disk activity */
#define LED_HEARTBEAT LED3 /* heartbeat */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 6e2a8176b0dd..40135be97965 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -97,7 +97,6 @@ struct cpuinfo_parisc {
unsigned long cpu_loc; /* CPU location from PAT firmware */
unsigned int state;
struct parisc_device *dev;
- unsigned long loops_per_jiffy;
};
extern struct system_cpuinfo_parisc boot_cpu_data;
diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h
index 8e51c775c80a..62399c7ea94a 100644
--- a/arch/parisc/include/asm/ropes.h
+++ b/arch/parisc/include/asm/ropes.h
@@ -86,6 +86,9 @@ struct sba_device {
struct ioc ioc[MAX_IOC];
};
+/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
+extern struct sba_device *sba_list;
+
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 42979c5704dc..82d2384c3f22 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -3,13 +3,8 @@
#define __ASM_SPINLOCK_TYPES_H
typedef struct {
-#ifdef CONFIG_PA20
- volatile unsigned int slock;
-# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
-#else
volatile unsigned int lock[4];
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
-#endif
} arch_spinlock_t;
typedef struct {
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index 6fd8871e4081..6b5da7b06d43 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -48,28 +48,27 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
-#define MADV_COLD 20 /* deactivate these pages */
-#define MADV_PAGEOUT 21 /* reclaim these pages */
-
-#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
-#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
-#define MADV_HUGEPAGE 67 /* Worth backing with hugepages */
-#define MADV_NOHUGEPAGE 68 /* Not worth backing with hugepages */
+#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
-#define MADV_DONTDUMP 69 /* Explicity exclude from the core dump,
+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
overrides the coredump filter bits */
-#define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */
+#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
-#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */
-#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
#define MADV_HWPOISON 100 /* poison a page for testing */
#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
/* compatibility flags */
#define MAP_FILE 0
-#define MAP_VARIABLE 0
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
index 15211723ebf5..ee903551ae10 100644
--- a/arch/parisc/include/uapi/asm/pdc.h
+++ b/arch/parisc/include/uapi/asm/pdc.h
@@ -465,6 +465,7 @@ struct pdc_model { /* for PDC_MODEL */
unsigned long arch_rev;
unsigned long pot_key;
unsigned long curr_key;
+ unsigned long width; /* default of PSW_W bit (1=enabled) */
};
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a82b3eaa5398..fd84789b5ca4 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -328,6 +328,7 @@ void flush_dcache_page(struct page *page)
struct vm_area_struct *mpnt;
unsigned long offset;
unsigned long addr, old_addr = 0;
+ unsigned long flags;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
@@ -347,7 +348,7 @@ void flush_dcache_page(struct page *page)
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
* to flush one address here for them all to become coherent */
- flush_dcache_mmap_lock(mapping);
+ flush_dcache_mmap_lock_irqsave(mapping, flags);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
@@ -370,7 +371,7 @@ void flush_dcache_page(struct page *page)
old_addr = addr;
}
}
- flush_dcache_mmap_unlock(mapping);
+ flush_dcache_mmap_unlock_irqrestore(mapping, flags);
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index a5f3e50fe976..f1d494931328 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -520,7 +520,6 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
dev->id.hversion_rev = iodc_data[1] & 0x0f;
dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
(iodc_data[5] << 8) | iodc_data[6];
- dev->hpa.name = parisc_pathname(dev);
dev->hpa.start = hpa;
/* This is awkward. The STI spec says that gfx devices may occupy
* 32MB or 64MB. Unfortunately, we don't know how to tell whether
@@ -534,10 +533,10 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
dev->hpa.end = hpa + 0xfff;
}
dev->hpa.flags = IORESOURCE_MEM;
- name = parisc_hardware_description(&dev->id);
- if (name) {
- strlcpy(dev->name, name, sizeof(dev->name));
- }
+ dev->hpa.name = dev->name;
+ name = parisc_hardware_description(&dev->id) ? : "unknown";
+ snprintf(dev->name, sizeof(dev->name), "%s [%s]",
+ name, parisc_pathname(dev));
/* Silently fail things like mouse ports which are subsumed within
* the keyboard controller
@@ -883,15 +882,13 @@ void __init walk_central_bus(void)
&root);
}
-static void print_parisc_device(struct parisc_device *dev)
+static __init void print_parisc_device(struct parisc_device *dev)
{
- char hw_path[64];
- static int count;
+ static int count __initdata;
- print_pa_hwpath(dev, hw_path);
- pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
- ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
- dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
+ pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
+ ++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
+ dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
if (dev->num_addrs) {
int k;
@@ -927,9 +924,9 @@ static __init void qemu_header(void)
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
+ #define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
- #define p ((unsigned long *)&boot_cpu_data.pdc.model)
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p
@@ -1080,7 +1077,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
-static int print_one_device(struct device * dev, void * data)
+static __init int print_one_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 2f64f112934b..9ce4e525b392 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -511,13 +511,13 @@
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
+ #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte,tmp
#ifdef CONFIG_HUGETLB_PAGE
copy \pte,\tmp
- extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
@@ -525,8 +525,7 @@
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
#else /* Huge pages disabled */
- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
#endif
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 1d976f2ebff0..46d48a0b6dec 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -122,10 +122,10 @@ static unsigned long f_extend(unsigned long address)
#ifdef CONFIG_64BIT
if(unlikely(parisc_narrow_firmware)) {
if((address & 0xff000000) == 0xf0000000)
- return 0xf0f0f0f000000000UL | (u32)address;
+ return (0xfffffff0UL << 32) | (u32)address;
if((address & 0xf0000000) == 0xf0000000)
- return 0xffffffff00000000UL | (u32)address;
+ return (0xffffffffUL << 32) | (u32)address;
}
#endif
return address;
@@ -1229,7 +1229,7 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096];
*/
int pdc_iodc_print(const unsigned char *str, unsigned count)
{
- unsigned int i;
+ unsigned int i, found = 0;
unsigned long flags;
for (i = 0; i < count;) {
@@ -1238,6 +1238,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
+ found = 1;
goto print;
default:
iodc_dbuf[i] = str[i];
@@ -1254,7 +1255,7 @@ print:
__pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0);
spin_unlock_irqrestore(&pdc_lock, flags);
- return i;
+ return i - found;
}
#if !defined(BOOTLOADER)
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index b836fc61a24f..f3a5c5e480cf 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -80,7 +80,7 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
#endif
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
int ftrace_enable_ftrace_graph_caller(void)
{
return 0;
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 951a339369dd..9fee4d626375 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -22,7 +22,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
- .level PA_ASM_LEVEL
+ .level 1.1
__INITDATA
ENTRY(boot_args)
@@ -69,6 +69,46 @@ $bss_loop:
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
+#if defined(CONFIG_PA20)
+ /* check for 64-bit capable CPU as required by current kernel */
+ ldi 32,%r10
+ mtctl %r10,%cr11
+ .level 2.0
+ mfctl,w %cr11,%r10
+ .level 1.1
+ comib,<>,n 0,%r10,$cpu_ok
+
+ load32 PA(msg1),%arg0
+ ldi msg1_end-msg1,%arg1
+$iodc_panic:
+ copy %arg0, %r10
+ copy %arg1, %r11
+ load32 PA(init_stack),%sp
+#define MEM_CONS 0x3A0
+ ldw MEM_CONS+32(%r0),%arg0 // HPA
+ ldi ENTRY_IO_COUT,%arg1
+ ldw MEM_CONS+36(%r0),%arg2 // SPA
+ ldw MEM_CONS+8(%r0),%arg3 // layers
+ load32 PA(__bss_start),%r1
+ stw %r1,-52(%sp) // arg4
+ stw %r0,-56(%sp) // arg5
+ stw %r10,-60(%sp) // arg6 = ptr to text
+ stw %r11,-64(%sp) // arg7 = len
+ stw %r0,-68(%sp) // arg8
+ load32 PA(.iodc_panic_ret), %rp
+ ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
+ bv,n (%r1)
+.iodc_panic_ret:
+ b . /* wait endless with ... */
+ or %r10,%r10,%r10 /* qemu idle sleep */
+msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
+msg1_end:
+
+$cpu_ok:
+#endif
+
+ .level PA_ASM_LEVEL
+
/* Initialize startup VM. Just map first 16/32 MB of memory */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 4d54aa70ea5f..b4aa5af943ba 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -388,7 +388,7 @@ union irq_stack_union {
volatile unsigned int lock[1];
};
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index ca35d9a76e50..1621f678aa05 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -439,16 +439,32 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages((unsigned long)__va(dma_handle), order);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
+ /*
+ * fdc: The data cache line is written back to memory, if and only if
+ * it is dirty, and then invalidated from the data cache.
+ */
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
+ unsigned long addr = (unsigned long) phys_to_virt(paddr);
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ flush_kernel_dcache_range(addr, size);
+ return;
+ case DMA_FROM_DEVICE:
+ purge_kernel_dcache_range_asm(addr, addr + size);
+ return;
+ default:
+ BUG();
+ }
}
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 230a6422b99f..0e67ab681b4e 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -124,13 +124,18 @@ void machine_power_off(void)
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
- printk(KERN_EMERG "System shut down completed.\n"
- "Please power this system off now.");
+ printk("Power off or press RETURN to reboot.\n");
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
lockup_detector_soft_poweroff();
- for (;;);
+ while (1) {
+ /* reboot if user presses RETURN key */
+ if (pdc_iodc_getc() == 13) {
+ printk("Rebooting...\n");
+ machine_restart(NULL);
+ }
+ }
}
void (*pm_power_off)(void);
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index b0045889864c..8b9c8f14760d 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -163,7 +163,6 @@ static int __init processor_probe(struct parisc_device *dev)
if (cpuid)
memset(p, 0, sizeof(struct cpuinfo_parisc));
- p->loops_per_jiffy = loops_per_jiffy;
p->dev = dev; /* Save IODC data in case we need it */
p->hpa = dev->hpa.start; /* save CPU hpa */
p->cpuid = cpuid; /* save CPU id */
@@ -373,10 +372,18 @@ int
show_cpuinfo (struct seq_file *m, void *v)
{
unsigned long cpu;
+ char cpu_name[60], *p;
+
+ /* strip PA path from CPU name to not confuse lscpu */
+ strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
+ p = strrchr(cpu_name, '[');
+ if (p)
+ *(--p) = 0;
for_each_online_cpu(cpu) {
- const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
#ifdef CONFIG_SMP
+ const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+
if (0 == cpuinfo->hpa)
continue;
#endif
@@ -421,8 +428,7 @@ show_cpuinfo (struct seq_file *m, void *v)
seq_printf(m, "model\t\t: %s - %s\n",
boot_cpu_data.pdc.sys_model_name,
- cpuinfo->dev ?
- cpuinfo->dev->name : "Unknown");
+ cpu_name);
seq_printf(m, "hversion\t: 0x%08x\n"
"sversion\t: 0x%08x\n",
@@ -433,8 +439,8 @@ show_cpuinfo (struct seq_file *m, void *v)
show_cache_info(m);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
- cpuinfo->loops_per_jiffy / (500000 / HZ),
- (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
+ loops_per_jiffy / (500000 / HZ),
+ loops_per_jiffy / (5000 / HZ) % 100);
seq_printf(m, "software id\t: %ld\n\n",
boot_cpu_data.pdc.model.sw_id);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 9f6ff7bc06f9..7407fee7d818 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -128,6 +128,12 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned long tmp;
long ret = -EIO;
+ unsigned long user_regs_struct_size = sizeof(struct user_regs_struct);
+#ifdef CONFIG_64BIT
+ if (is_compat_task())
+ user_regs_struct_size /= 2;
+#endif
+
switch (request) {
/* Read the word at location addr in the USER area. For ptraced
@@ -183,14 +189,14 @@ long arch_ptrace(struct task_struct *child, long request,
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
- 0, sizeof(struct user_regs_struct),
+ 0, user_regs_struct_size,
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
- 0, sizeof(struct user_regs_struct),
+ 0, user_regs_struct_size,
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
@@ -304,6 +310,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
}
break;
+ case PTRACE_GETREGS:
+ case PTRACE_SETREGS:
+ case PTRACE_GETFPREGS:
+ case PTRACE_SETFPREGS:
+ return arch_ptrace(child, request, addr, data);
default:
ret = compat_ptrace_request(child, request, addr, data);
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 2b16d8d6598f..c37010a13586 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -248,9 +248,6 @@ ENTRY_CFI(real64_call_asm)
/* save fn */
copy %arg2, %r31
- /* set up the new ap */
- ldo 64(%arg1), %r29
-
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
@@ -262,7 +259,9 @@ ENTRY_CFI(real64_call_asm)
ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
+ /* set up real-mode stack and real-mode ap */
tophys_r1 %sp
+ ldo -16(%sp), %r29 /* Reference param save area */
b,l rfi_virt2real,%r2
nop
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 5d458a44b09c..a795ce76bafe 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -373,3 +373,30 @@ long parisc_personality(unsigned long personality)
return err;
}
+
+/*
+ * madvise() wrapper
+ *
+ * Up to kernel v6.1 parisc has different values than all other
+ * platforms for the MADV_xxx flags listed below.
+ * To keep binary compatibility with existing userspace programs
+ * translate the former values to the new values.
+ *
+ * XXX: Remove this wrapper in year 2025 (or later)
+ */
+
+asmlinkage notrace long parisc_madvise(unsigned long start, size_t len_in, int behavior)
+{
+ switch (behavior) {
+ case 65: behavior = MADV_MERGEABLE; break;
+ case 66: behavior = MADV_UNMERGEABLE; break;
+ case 67: behavior = MADV_HUGEPAGE; break;
+ case 68: behavior = MADV_NOHUGEPAGE; break;
+ case 69: behavior = MADV_DONTDUMP; break;
+ case 70: behavior = MADV_DODUMP; break;
+ case 71: behavior = MADV_WIPEONFORK; break;
+ case 72: behavior = MADV_KEEPONFORK; break;
+ }
+
+ return sys_madvise(start, len_in, behavior);
+}
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 285ff516150c..e016bf6fae1e 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -131,7 +131,7 @@
116 common sysinfo sys_sysinfo compat_sys_sysinfo
117 common shutdown sys_shutdown
118 common fsync sys_fsync
-119 common madvise sys_madvise
+119 common madvise parisc_madvise
120 common clone sys_clone_wrapper
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
@@ -413,7 +413,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 2a1060d747a5..10776991296a 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -268,7 +268,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
oops_exit();
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
/* gdb uses break 4,8 */
@@ -305,8 +305,8 @@ static void handle_break(struct pt_regs *regs)
#endif
#ifdef CONFIG_KGDB
- if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
- iir == PARISC_KGDB_BREAK_INSN)) {
+ if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
+ iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
kgdb_handle_exception(9, SIGTRAP, 0, regs);
return;
}
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 286cec4d86d7..cc6ed7496050 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -107,7 +107,7 @@
#define R1(i) (((i)>>21)&0x1f)
#define R2(i) (((i)>>16)&0x1f)
#define R3(i) ((i)&0x1f)
-#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
+#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
#define IM5_2(i) IM((i)>>16,5)
#define IM5_3(i) IM((i),5)
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 2ca9114fcf00..0c8436b06c49 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -234,7 +234,7 @@ config PPC_EARLY_DEBUG_40x
config PPC_EARLY_DEBUG_CPM
bool "Early serial debugging for Freescale CPM-based serial ports"
- depends on SERIAL_CPM
+ depends on SERIAL_CPM=y
help
Select this to enable early debugging for Freescale chips
using a CPM-based serial port. This assumes that the bootwrapper
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index c504c7ddda2e..17bcc362b8c1 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -17,23 +17,6 @@ HAS_BIARCH := $(call cc-option-yn, -m32)
# Set default 32 bits cross compilers for vdso and boot wrapper
CROSS32_COMPILE ?=
-ifeq ($(HAS_BIARCH),y)
-ifeq ($(CROSS32_COMPILE),)
-ifdef CONFIG_PPC32
-# These options will be overridden by any -mcpu option that the CPU
-# or platform code sets later on the command line, but they are needed
-# to set a sane 32-bit cpu target for the 64-bit cross compiler which
-# may default to the wrong ISA.
-KBUILD_CFLAGS += -mcpu=powerpc
-KBUILD_AFLAGS += -mcpu=powerpc
-endif
-endif
-endif
-
-ifdef CONFIG_PPC_BOOK3S_32
-KBUILD_CFLAGS += -mcpu=powerpc
-endif
-
# If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use
# ppc64_defconfig because we have nothing better to go on.
uname := $(shell uname -m)
@@ -110,7 +93,7 @@ aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
ifeq ($(HAS_BIARCH),y)
KBUILD_CFLAGS += -m$(BITS)
-KBUILD_AFLAGS += -m$(BITS) -Wl,-a$(BITS)
+KBUILD_AFLAGS += -m$(BITS)
KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION)
endif
@@ -172,7 +155,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8)
else
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5))
-CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4)
+CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
endif
else ifdef CONFIG_PPC_BOOK3E_64
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
@@ -192,6 +175,7 @@ endif
endif
CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
+AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
# Altivec option not allowed with e500mc64 in GCC.
ifdef CONFIG_ALTIVEC
@@ -202,14 +186,6 @@ endif
CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
-ifdef CONFIG_PPC32
-ifdef CONFIG_PPC_E500MC
-CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc)
-else
-CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc)
-endif
-endif
-
asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr)
@@ -455,3 +431,11 @@ checkbin:
echo -n '*** Please use a different binutils version.' ; \
false ; \
fi
+ @if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \
+ "x${CONFIG_LD_IS_BFD}" = "xy" -a \
+ "${CONFIG_LD_VERSION}" = "23700" ; then \
+ echo -n '*** binutils 2.37 drops unused section symbols, which recordmcount ' ; \
+ echo 'is unable to handle.' ; \
+ echo '*** Please use a different binutils version.' ; \
+ false ; \
+ fi
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8c69bd07ada6..0db276fa2011 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -30,6 +30,7 @@ endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
+ $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
$(LINUXINCLUDE)
diff --git a/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi
new file mode 100644
index 000000000000..7e2a90cde72e
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi
@@ -0,0 +1,51 @@
+/*
+ * e500v1 Power ISA Device Tree Source (include)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/ {
+ cpus {
+ power-isa-version = "2.03";
+ power-isa-b; // Base
+ power-isa-e; // Embedded
+ power-isa-atb; // Alternate Time Base
+ power-isa-cs; // Cache Specification
+ power-isa-e.le; // Embedded.Little-Endian
+ power-isa-e.pm; // Embedded.Performance Monitor
+ power-isa-ecl; // Embedded Cache Locking
+ power-isa-mmc; // Memory Coherence
+ power-isa-sp; // Signal Processing Engine
+ power-isa-sp.fs; // SPE.Embedded Float Scalar Single
+ power-isa-sp.fv; // SPE.Embedded Float Vector
+ mmu-type = "power-embedded";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts
index 18a885130538..e03ae130162b 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-/include/ "e500v2_power_isa.dtsi"
+/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8540ADS";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts
index ac381e7b1c60..a2a6c5cf852e 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-/include/ "e500v2_power_isa.dtsi"
+/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8541CDS";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts
index 9f58db2a7e66..901b6ff06dfb 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-/include/ "e500v2_power_isa.dtsi"
+/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8555CDS";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts
index a24722ccaebf..c2f9aea78b29 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-/include/ "e500v2_power_isa.dtsi"
+/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8560ADS";
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
new file mode 100644
index 000000000000..437dab3fc017
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ]
+ *
+ * Copyright 2022 Sean Anderson <sean.anderson@seco.com>
+ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
+ */
+
+fman@400000 {
+ fman0_rx_0x08: port@88000 {
+ cell-index = <0x8>;
+ compatible = "fsl,fman-v3-port-rx";
+ reg = <0x88000 0x1000>;
+ fsl,fman-10g-port;
+ };
+
+ fman0_tx_0x28: port@a8000 {
+ cell-index = <0x28>;
+ compatible = "fsl,fman-v3-port-tx";
+ reg = <0xa8000 0x1000>;
+ fsl,fman-10g-port;
+ };
+
+ ethernet@e0000 {
+ cell-index = <0>;
+ compatible = "fsl,fman-memac";
+ reg = <0xe0000 0x1000>;
+ fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
+ ptp-timer = <&ptp_timer0>;
+ pcsphy-handle = <&pcsphy0>;
+ };
+
+ mdio@e1000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ reg = <0xe1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
+
+ pcsphy0: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
new file mode 100644
index 000000000000..ad116b17850a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ]
+ *
+ * Copyright 2022 Sean Anderson <sean.anderson@seco.com>
+ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
+ */
+
+fman@400000 {
+ fman0_rx_0x09: port@89000 {
+ cell-index = <0x9>;
+ compatible = "fsl,fman-v3-port-rx";
+ reg = <0x89000 0x1000>;
+ fsl,fman-10g-port;
+ };
+
+ fman0_tx_0x29: port@a9000 {
+ cell-index = <0x29>;
+ compatible = "fsl,fman-v3-port-tx";
+ reg = <0xa9000 0x1000>;
+ fsl,fman-10g-port;
+ };
+
+ ethernet@e2000 {
+ cell-index = <1>;
+ compatible = "fsl,fman-memac";
+ reg = <0xe2000 0x1000>;
+ fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
+ ptp-timer = <&ptp_timer0>;
+ pcsphy-handle = <&pcsphy1>;
+ };
+
+ mdio@e3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ reg = <0xe3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
+
+ pcsphy1: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index ecbb447920bc..27714dc2f04a 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -609,8 +609,8 @@
/include/ "qoriq-bman1.dtsi"
/include/ "qoriq-fman3-0.dtsi"
-/include/ "qoriq-fman3-0-1g-0.dtsi"
-/include/ "qoriq-fman3-0-1g-1.dtsi"
+/include/ "qoriq-fman3-0-10g-2.dtsi"
+/include/ "qoriq-fman3-0-10g-3.dtsi"
/include/ "qoriq-fman3-0-1g-2.dtsi"
/include/ "qoriq-fman3-0-1g-3.dtsi"
/include/ "qoriq-fman3-0-1g-4.dtsi"
@@ -659,3 +659,19 @@
interrupts = <16 2 1 9>;
};
};
+
+&fman0_rx_0x08 {
+ /delete-property/ fsl,fman-10g-port;
+};
+
+&fman0_tx_0x28 {
+ /delete-property/ fsl,fman-10g-port;
+};
+
+&fman0_rx_0x09 {
+ /delete-property/ fsl,fman-10g-port;
+};
+
+&fman0_tx_0x29 {
+ /delete-property/ fsl,fman-10g-port;
+};
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 682d68f39c2b..b75b62b094b5 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -253,8 +253,6 @@ CONFIG_ATM_LANE=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
CONFIG_IPX=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
diff --git a/arch/powerpc/include/asm/bugs.h b/arch/powerpc/include/asm/bugs.h
deleted file mode 100644
index 01b8f6ca4dbb..000000000000
--- a/arch/powerpc/include/asm/bugs.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _ASM_POWERPC_BUGS_H
-#define _ASM_POWERPC_BUGS_H
-
-/*
- */
-
-/*
- * This file is included by 'init/main.c' to check for
- * architecture-dependent bugs.
- */
-
-static inline void check_bugs(void) { }
-
-#endif /* _ASM_POWERPC_BUGS_H */
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
index 4da4fcba0684..11d2da692fe8 100644
--- a/arch/powerpc/include/asm/imc-pmu.h
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -132,7 +132,7 @@ struct imc_pmu {
* are inited.
*/
struct imc_pmu_ref {
- struct mutex lock;
+ spinlock_t lock;
unsigned int id;
int refc;
};
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 0699cfeeb8c9..2141ae7441a9 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -375,5 +375,9 @@ extern void *abatron_pteptrs[2];
#include <asm/nohash/mmu.h>
#endif
+#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
+#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 91c69ff53a8a..50c2198c2c76 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -42,9 +42,6 @@ u64 memory_hotplug_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
#endif /* CONFIG_NEED_MULTIPLE_NODES */
-#ifdef CONFIG_FA_DUMP
-#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
-#endif
#endif /* __KERNEL__ */
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index c9e4b2d90f65..93ecf4e80ca7 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -91,6 +91,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
#define pte_wrprotect pte_wrprotect
+static inline int pte_read(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
+}
+
+#define pte_read pte_read
+
static inline int pte_write(pte_t pte)
{
return !(pte_val(pte) & _PAGE_RO);
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 9a33b8bd842d..c32cb88a1575 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -244,7 +244,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if (pte_young(*ptep))
+ if (!pte_young(*ptep))
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 3d2a78ab051a..15dec9994c78 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -45,7 +45,9 @@ static inline int pte_write(pte_t pte)
return pte_val(pte) & _PAGE_RW;
}
#endif
+#ifndef pte_read
static inline int pte_read(pte_t pte) { return 1; }
+#endif
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index f3f4710d4ff5..99129b0cd8b8 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask)
return leading_zero_bits >> 3;
}
-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
{
unsigned long rhs = val | c->low_bits;
*data = rhs;
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 4fd7efdf2a53..68decc2bf42b 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -1072,45 +1072,46 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
pr_info("EEH: Recovery successful.\n");
- } else {
- /*
- * About 90% of all real-life EEH failures in the field
- * are due to poorly seated PCI cards. Only 10% or so are
- * due to actual, failed cards.
- */
- pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
- "Please try reseating or replacing it\n",
- pe->phb->global_number, pe->addr);
+ goto out;
+ }
- eeh_slot_error_detail(pe, EEH_LOG_PERM);
+ /*
+ * About 90% of all real-life EEH failures in the field
+ * are due to poorly seated PCI cards. Only 10% or so are
+ * due to actual, failed cards.
+ */
+ pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
+ "Please try reseating or replacing it\n",
+ pe->phb->global_number, pe->addr);
- /* Notify all devices that they're about to go down. */
- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
- eeh_set_irq_state(pe, false);
- eeh_pe_report("error_detected(permanent failure)", pe,
- eeh_report_failure, NULL);
+ eeh_slot_error_detail(pe, EEH_LOG_PERM);
- /* Mark the PE to be removed permanently */
- eeh_pe_state_mark(pe, EEH_PE_REMOVED);
+ /* Notify all devices that they're about to go down. */
+ eeh_set_irq_state(pe, false);
+ eeh_pe_report("error_detected(permanent failure)", pe,
+ eeh_report_failure, NULL);
+ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
- /*
- * Shut down the device drivers for good. We mark
- * all removed devices correctly to avoid access
- * the their PCI config any more.
- */
- if (pe->type & EEH_PE_VF) {
- eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
- eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
- } else {
- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
- eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ /* Mark the PE to be removed permanently */
+ eeh_pe_state_mark(pe, EEH_PE_REMOVED);
- pci_lock_rescan_remove();
- pci_hp_remove_devices(bus);
- pci_unlock_rescan_remove();
- /* The passed PE should no longer be used */
- return;
- }
+ /*
+ * Shut down the device drivers for good. We mark
+ * all removed devices correctly to avoid access
+ * the their PCI config any more.
+ */
+ if (pe->type & EEH_PE_VF) {
+ eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
+ eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ } else {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+ eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(bus);
+ pci_unlock_rescan_remove();
+ /* The passed PE should no longer be used */
+ return;
}
out:
@@ -1206,10 +1207,10 @@ void eeh_handle_special_event(void)
/* Notify all devices to be down */
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
eeh_pe_report(
"error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
+ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
pci_lock_rescan_remove();
list_for_each_entry(hose, &hose_list, list_node) {
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 69d64f406204..15405db43141 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -629,6 +629,7 @@ int __init fadump_reserve_mem(void)
return ret;
error_out:
fw_dump.fadump_enabled = 0;
+ fw_dump.reserve_dump_area_size = 0;
return 0;
}
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 0bb991ddd264..81981bb878ee 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -24,6 +24,15 @@
#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
+#define __REST_1FPVSR(n,c,base) \
+BEGIN_FTR_SECTION \
+ b 2f; \
+END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
+ REST_FPR(n,base); \
+ b 3f; \
+2: REST_VSR(n,c,base); \
+3:
+
#define __REST_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@@ -42,9 +51,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: SAVE_32VSRS(n,c,base); \
3:
#else
+#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
+#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
@@ -68,6 +79,7 @@ _GLOBAL(store_fp_state)
SAVE_32FPVSRS(0, R4, R3)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r3)
+ REST_1FPVSR(0, R4, R3)
blr
EXPORT_SYMBOL(store_fp_state)
@@ -132,6 +144,7 @@ _GLOBAL(save_fpu)
2: SAVE_32FPVSRS(0, R4, R6)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r6)
+ REST_1FPVSR(0, R4, R6)
blr
/*
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index edaab1142498..2b0e2eb7bde8 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -922,7 +922,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
*/
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
- stw r5, 0xf0(r0) /* This much match your Abatron config */
+ stw r5, 0xf0(0) /* This much match your Abatron config */
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
tophys(r5, r5)
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 1007ec36b4cb..fb736fdb4193 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -247,6 +247,11 @@ disable:
return false;
}
+/*
+ * Handle a DABR or DAWR exception.
+ *
+ * Called in atomic context.
+ */
int hw_breakpoint_handler(struct die_args *args)
{
int rc = NOTIFY_STOP;
@@ -315,6 +320,8 @@ NOKPROBE_SYMBOL(hw_breakpoint_handler);
/*
* Handle single-step exceptions following a DABR hit.
+ *
+ * Called in atomic context.
*/
static int single_step_dabr_instruction(struct die_args *args)
{
@@ -355,6 +362,8 @@ NOKPROBE_SYMBOL(single_step_dabr_instruction);
/*
* Handle debug exception notifications.
+ *
+ * Called in atomic context.
*/
int hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d7d42bd448c4..dd062eef533b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -133,17 +133,28 @@ static int fail_iommu_bus_notify(struct notifier_block *nb,
return 0;
}
-static struct notifier_block fail_iommu_bus_notifier = {
+/*
+ * PCI and VIO buses need separate notifier_block structs, since they're linked
+ * list nodes. Sharing a notifier_block would mean that any notifiers later
+ * registered for PCI buses would also get called by VIO buses and vice versa.
+ */
+static struct notifier_block fail_iommu_pci_bus_notifier = {
.notifier_call = fail_iommu_bus_notify
};
+#ifdef CONFIG_IBMVIO
+static struct notifier_block fail_iommu_vio_bus_notifier = {
+ .notifier_call = fail_iommu_bus_notify
+};
+#endif
+
static int __init fail_iommu_setup(void)
{
#ifdef CONFIG_PCI
- bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
+ bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
#endif
#ifdef CONFIG_IBMVIO
- bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
+ bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
#endif
return 0;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index a2c258a8d736..b9cb9fa9207a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -66,23 +66,35 @@ void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
pci_dma_ops = dma_ops;
}
-/*
- * This function should run under locking protection, specifically
- * hose_spinlock.
- */
static int get_phb_number(struct device_node *dn)
{
int ret, phb_id = -1;
- u32 prop_32;
u64 prop;
/*
* Try fixed PHB numbering first, by checking archs and reading
- * the respective device-tree properties. Firstly, try powernv by
- * reading "ibm,opal-phbid", only present in OPAL environment.
+ * the respective device-tree properties. Firstly, try reading
+ * standard "linux,pci-domain", then try reading "ibm,opal-phbid"
+ * (only present in powernv OPAL environment), then try device-tree
+ * alias and as the last try to use lower bits of "reg" property.
*/
- ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+ ret = of_get_pci_domain_nr(dn);
+ if (ret >= 0) {
+ prop = ret;
+ ret = 0;
+ }
+ if (ret)
+ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+
if (ret) {
+ ret = of_alias_get_id(dn, "pci");
+ if (ret >= 0) {
+ prop = ret;
+ ret = 0;
+ }
+ }
+ if (ret) {
+ u32 prop_32;
ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
prop = prop_32;
}
@@ -90,18 +102,20 @@ static int get_phb_number(struct device_node *dn)
if (!ret)
phb_id = (int)(prop & (MAX_PHBS - 1));
+ spin_lock(&hose_spinlock);
+
/* We need to be sure to not use the same PHB number twice. */
if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
- return phb_id;
+ goto out_unlock;
- /*
- * If not pseries nor powernv, or if fixed PHB numbering tried to add
- * the same PHB number twice, then fallback to dynamic PHB numbering.
- */
+ /* If everything fails then fallback to dynamic PHB numbering. */
phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
BUG_ON(phb_id >= MAX_PHBS);
set_bit(phb_id, phb_bitmap);
+out_unlock:
+ spin_unlock(&hose_spinlock);
+
return phb_id;
}
@@ -112,10 +126,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
if (phb == NULL)
return NULL;
- spin_lock(&hose_spinlock);
+
phb->global_number = get_phb_number(dev);
+
+ spin_lock(&hose_spinlock);
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
+
phb->dn = dev;
phb->is_dynamic = slab_is_available();
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index d876eda92609..d28433f0fc8e 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -325,6 +325,7 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
INIT_LIST_HEAD(&pdn->list);
parent = of_get_parent(dn);
pdn->parent = parent ? PCI_DN(parent) : NULL;
+ of_node_put(parent);
if (pdn->parent)
list_add_tail(&pdn->list, &pdn->parent->child_list);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 537142b877b8..d1ba17501343 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -740,6 +740,13 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ /*
+ * As generic code authors expect to be able to use static keys
+ * in early_param() handlers, we initialize the static keys just
+ * before parsing early params (it's fine to call jump_label_init()
+ * more than once).
+ */
+ jump_label_init();
parse_early_param();
/* make sure we've parsed cmdline for mem= before this */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 35e246e39705..0793579f2bb9 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -51,10 +51,10 @@ struct rtas_t rtas = {
EXPORT_SYMBOL(rtas);
DEFINE_SPINLOCK(rtas_data_buf_lock);
-EXPORT_SYMBOL(rtas_data_buf_lock);
+EXPORT_SYMBOL_GPL(rtas_data_buf_lock);
-char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
-EXPORT_SYMBOL(rtas_data_buf);
+char rtas_data_buf[RTAS_DATA_BUF_SIZE] __aligned(SZ_4K);
+EXPORT_SYMBOL_GPL(rtas_data_buf);
unsigned long rtas_rmo_buf;
@@ -63,7 +63,7 @@ unsigned long rtas_rmo_buf;
* This is done like this so rtas_flash can be a module.
*/
void (*rtas_flash_term_hook)(int);
-EXPORT_SYMBOL(rtas_flash_term_hook);
+EXPORT_SYMBOL_GPL(rtas_flash_term_hook);
/* RTAS use home made raw locking instead of spin_lock_irqsave
* because those can be called from within really nasty contexts
@@ -311,7 +311,7 @@ void rtas_progress(char *s, unsigned short hex)
spin_unlock(&progress_lock);
}
-EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
+EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */
int rtas_token(const char *service)
{
@@ -321,7 +321,7 @@ int rtas_token(const char *service)
tokp = of_get_property(rtas.dev, service, NULL);
return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
}
-EXPORT_SYMBOL(rtas_token);
+EXPORT_SYMBOL_GPL(rtas_token);
int rtas_service_present(const char *service)
{
@@ -398,7 +398,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
}
if (buf)
- memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
+ memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
}
return buf;
@@ -481,7 +481,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
}
return ret;
}
-EXPORT_SYMBOL(rtas_call);
+EXPORT_SYMBOL_GPL(rtas_call);
/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
* code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
@@ -516,7 +516,7 @@ unsigned int rtas_busy_delay(int status)
return ms;
}
-EXPORT_SYMBOL(rtas_busy_delay);
+EXPORT_SYMBOL_GPL(rtas_busy_delay);
static int rtas_error_rc(int rtas_rc)
{
@@ -562,7 +562,7 @@ int rtas_get_power_level(int powerdomain, int *level)
return rtas_error_rc(rc);
return rc;
}
-EXPORT_SYMBOL(rtas_get_power_level);
+EXPORT_SYMBOL_GPL(rtas_get_power_level);
int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{
@@ -580,7 +580,7 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel)
return rtas_error_rc(rc);
return rc;
}
-EXPORT_SYMBOL(rtas_set_power_level);
+EXPORT_SYMBOL_GPL(rtas_set_power_level);
int rtas_get_sensor(int sensor, int index, int *state)
{
@@ -598,7 +598,7 @@ int rtas_get_sensor(int sensor, int index, int *state)
return rtas_error_rc(rc);
return rc;
}
-EXPORT_SYMBOL(rtas_get_sensor);
+EXPORT_SYMBOL_GPL(rtas_get_sensor);
int rtas_get_sensor_fast(int sensor, int index, int *state)
{
@@ -659,7 +659,7 @@ int rtas_set_indicator(int indicator, int index, int new_value)
return rtas_error_rc(rc);
return rc;
}
-EXPORT_SYMBOL(rtas_set_indicator);
+EXPORT_SYMBOL_GPL(rtas_set_indicator);
/*
* Ignoring RTAS extended delay
@@ -714,6 +714,7 @@ void __noreturn rtas_halt(void)
/* Must be in the RMO region, so we place it here */
static char rtas_os_term_buf[2048];
+static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE;
void rtas_os_term(char *str)
{
@@ -725,16 +726,20 @@ void rtas_os_term(char *str)
* this property may terminate the partition which we want to avoid
* since it interferes with panic_timeout.
*/
- if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
- RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
+ if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE)
return;
snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
+ /*
+ * Keep calling as long as RTAS returns a "try again" status,
+ * but don't use rtas_busy_delay(), which potentially
+ * schedules.
+ */
do {
- status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
+ status = rtas_call(ibm_os_term_token, 1, 1, NULL,
__pa(rtas_os_term_buf));
- } while (rtas_busy_delay(status));
+ } while (rtas_busy_delay_time(status));
if (status != 0)
printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
@@ -1215,6 +1220,13 @@ void __init rtas_initialize(void)
no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
rtas.entry = no_entry ? rtas.base : entry;
+ /*
+ * Discover these now to avoid device tree lookups in the
+ * panic path.
+ */
+ if (of_property_read_bool(rtas.dev, "ibm,extended-os-term"))
+ ibm_os_term_token = rtas_token("ibm,os-term");
+
/* If RTAS was found, allocate the RMO buffer for it and look for
* the stop-self token if any
*/
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 84f794782c62..7defca2f8e8b 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -710,9 +710,9 @@ static int __init rtas_flash_init(void)
if (!rtas_validate_flash_data.buf)
return -ENOMEM;
- flash_block_cache = kmem_cache_create("rtas_flash_cache",
- RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
- NULL);
+ flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache",
+ RTAS_BLK_SIZE, RTAS_BLK_SIZE,
+ 0, 0, RTAS_BLK_SIZE, NULL);
if (!flash_block_cache) {
printk(KERN_ERR "%s: failed to create block cache\n",
__func__);
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 5b905a2f4e4d..4a8f3526f5a5 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -25,6 +25,7 @@ sys_call_table:
#include <asm/syscall_table_64.h>
#undef __SYSCALL
#else
+ .p2align 2
#define __SYSCALL(nr, entry) .long entry
#include <asm/syscall_table_32.h>
#undef __SYSCALL
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index f9fd5f743eba..0bc39ff53233 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -36,6 +36,9 @@ _GLOBAL(ftrace_regs_caller)
/* Save the original return address in A's stack frame */
std r0,LRSAVE(r1)
+ /* Create a minimal stack frame for representing B */
+ stdu r1, -STACK_FRAME_MIN_SIZE(r1)
+
/* Create our stack frame + pt_regs */
stdu r1,-SWITCH_FRAME_SIZE(r1)
@@ -52,7 +55,7 @@ _GLOBAL(ftrace_regs_caller)
SAVE_10GPRS(22, r1)
/* Save previous stack pointer (r1) */
- addi r8, r1, SWITCH_FRAME_SIZE
+ addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
std r8, GPR1(r1)
/* Load special regs for save below */
@@ -65,6 +68,8 @@ _GLOBAL(ftrace_regs_caller)
mflr r7
/* Save it as pt_regs->nip */
std r7, _NIP(r1)
+ /* Also save it in B's stackframe header for proper unwind */
+ std r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
/* Save the read LR in pt_regs->link */
std r0, _LINK(r1)
@@ -121,7 +126,7 @@ ftrace_regs_call:
ld r2, 24(r1)
/* Pop our stack frame */
- addi r1, r1, SWITCH_FRAME_SIZE
+ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
#ifdef CONFIG_LIVEPATCH
/* Based on the cmpd above, if the NIP was altered handle livepatch */
@@ -145,7 +150,7 @@ ftrace_no_trace:
mflr r3
mtctr r3
REST_GPR(3, r1)
- addi r1, r1, SWITCH_FRAME_SIZE
+ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
mtlr r0
bctr
@@ -153,6 +158,9 @@ _GLOBAL(ftrace_caller)
/* Save the original return address in A's stack frame */
std r0, LRSAVE(r1)
+ /* Create a minimal stack frame for representing B */
+ stdu r1, -STACK_FRAME_MIN_SIZE(r1)
+
/* Create our stack frame + pt_regs */
stdu r1, -SWITCH_FRAME_SIZE(r1)
@@ -166,6 +174,7 @@ _GLOBAL(ftrace_caller)
/* Get the _mcount() call site out of LR */
mflr r7
std r7, _NIP(r1)
+ std r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
/* Save callee's TOC in the ABI compliant location */
std r2, 24(r1)
@@ -200,7 +209,7 @@ ftrace_call:
ld r2, 24(r1)
/* Pop our stack frame */
- addi r1, r1, SWITCH_FRAME_SIZE
+ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
/* Reload original LR */
ld r0, LRSAVE(r1)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index ecfa460f66d1..402a05f3a484 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -246,7 +246,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
if (panic_on_oops)
panic("Fatal exception");
- do_exit(signr);
+ make_task_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);
@@ -1424,10 +1424,12 @@ static int emulate_instruction(struct pt_regs *regs)
return -EINVAL;
}
+#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
return is_kernel_addr(addr);
}
+#endif
#ifdef CONFIG_MATH_EMULATION
static int emulate_math(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 8eb867dbad5f..8908bcb2e0a0 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -32,6 +32,7 @@ _GLOBAL(store_vr_state)
mfvscr v0
li r4, VRSTATE_VSCR
stvx v0, r4, r3
+ lvx v0, 0, r3
blr
EXPORT_SYMBOL(store_vr_state)
@@ -102,6 +103,7 @@ _GLOBAL(save_altivec)
mfvscr v0
li r4,VRSTATE_VSCR
stvx v0,r4,r7
+ lvx v0,0,r7
blr
#ifdef CONFIG_VSX
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 3ea360cad337..46dfb3701c6e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#endif
#define BSS_FIRST_SECTIONS *(.bss.prominit)
+#define RUNTIME_DISCARD_EXIT
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
@@ -394,9 +395,12 @@ SECTIONS
DISCARDS
/DISCARD/ : {
*(*.EMB.apuinfo)
- *(.glink .iplt .plt .rela* .comment)
+ *(.glink .iplt .plt .comment)
*(.gnu.version*)
*(.gnu.attributes)
*(.eh_frame)
+#ifndef CONFIG_RELOCATABLE
+ *(.rela*)
+#endif
}
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 9d7344835469..25f1aaf78026 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -31,7 +31,7 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
unsigned long n)
{
- int uninitialized_var(old_pid), old_lpid;
+ int old_pid, old_lpid;
unsigned long quadrant, ret = n;
bool is_load = !!to;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 8656b8d2ce55..841e6ed30f13 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -34,8 +34,8 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
# so it is only needed for modules, and only for older linkers which
# do not support --save-restore-funcs
-ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
-extra-$(CONFIG_PPC64) += crtsavres.o
+ifeq ($(call ld-ifversion, -lt, 225000000, y)$(CONFIG_PPC64),yy)
+always += crtsavres.o
endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 27650cd5857f..490162fb0893 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -469,6 +469,8 @@ static int do_fp_load(struct instruction_op *op, unsigned long ea,
} u;
nb = GETSIZE(op->type);
+ if (nb > sizeof(u))
+ return -EINVAL;
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
@@ -519,6 +521,8 @@ static int do_fp_store(struct instruction_op *op, unsigned long ea,
} u;
nb = GETSIZE(op->type);
+ if (nb > sizeof(u))
+ return -EINVAL;
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
@@ -563,6 +567,9 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
u8 b[sizeof(__vector128)];
} u = {};
+ if (size > sizeof(u))
+ return -EINVAL;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
@@ -590,6 +597,9 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
u8 b[sizeof(__vector128)];
} u;
+ if (size > sizeof(u))
+ return -EINVAL;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index 0a05e51964c1..90111c9e7521 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/prctl.h>
+#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/reg.h>
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 2bf7e1b4fd82..c4890f4b0b6c 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -442,6 +442,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
set_pte_at(vma->vm_mm, addr, ptep, pte);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* For hash translation mode, we use the deposited table to store hash slot
* information and they are stored at PTRS_PER_PMD offset from related pmd
@@ -463,6 +464,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
return true;
}
+#endif
/*
* Does the CPU support tlbie?
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index bdcb07a98cd3..dc8f3fb02ac2 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1018,8 +1018,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
pte_t entry, unsigned long address, int psize)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
- _PAGE_RW | _PAGE_EXEC);
+ unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
+ _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
unsigned long change = pte_val(entry) ^ pte_val(*ptep);
/*
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 2a82984356f8..5ab4f868e919 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -104,14 +104,14 @@ static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
#endif
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_sync_page(paddr, size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_sync_page(paddr, size, dir);
}
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index a84da92920f7..e7b9cc90fd9e 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -104,7 +104,7 @@ void pgtable_cache_add(unsigned int shift)
* as to leave enough 0 bits in the address to contain it. */
unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
HUGEPD_SHIFT_MASK + 1);
- struct kmem_cache *new;
+ struct kmem_cache *new = NULL;
/* It would be nice if this was a BUILD_BUG_ON(), but at the
* moment, gcc doesn't seem to recognize is_power_of_2 as a
@@ -117,7 +117,8 @@ void pgtable_cache_add(unsigned int shift)
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
- new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
+ if (name)
+ new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
if (!new)
panic("Could not allocate pgtable cache for order %d", shift);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 210f1c28b8e4..cfb97fabd532 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -178,7 +178,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
unsigned long nr_pfn = page_size / sizeof(struct page);
unsigned long start_pfn = page_to_pfn((struct page *)start);
- if ((start_pfn + nr_pfn) > altmap->end_pfn)
+ if ((start_pfn + nr_pfn - 1) > altmap->end_pfn)
return true;
if (start_pfn < altmap->base_pfn)
@@ -279,8 +279,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
start = _ALIGN_DOWN(start, page_size);
if (altmap) {
alt_start = altmap->base_pfn;
- alt_end = altmap->base_pfn + altmap->reserve +
- altmap->free + altmap->alloc + altmap->align;
+ alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
}
pr_debug("vmemmap_free %lx...%lx\n", start, end);
diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
index 6577897673dd..22f1a7c3f436 100644
--- a/arch/powerpc/mm/kasan/Makefile
+++ b/arch/powerpc/mm/kasan/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
KASAN_SANITIZE := n
+KCOV_INSTRUMENT := n
obj-$(CONFIG_PPC32) += kasan_init_32.o
diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c
index 784f8df17f73..f10f2158c8af 100644
--- a/arch/powerpc/mm/ptdump/shared.c
+++ b/arch/powerpc/mm/ptdump/shared.c
@@ -17,9 +17,9 @@ static const struct flag_info flag_array[] = {
.clear = " ",
}, {
.mask = _PAGE_RW,
- .val = _PAGE_RW,
- .set = "rw",
- .clear = "r ",
+ .val = 0,
+ .set = "r ",
+ .clear = "rw",
}, {
.mask = _PAGE_EXEC,
.val = _PAGE_EXEC,
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index c84bbd4298a0..4c9aaedd2b1b 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -64,6 +64,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
next_sp = fp[0];
if (next_sp == sp + STACK_INT_FRAME_SIZE &&
+ validate_sp(sp, current, STACK_INT_FRAME_SIZE) &&
fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
/*
* This looks like an interrupt frame for an
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 48e8f4b17b91..c5c7a30bd0fd 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1313,7 +1313,7 @@ static int h_24x7_event_init(struct perf_event *event)
}
domain = event_get_domain(event);
- if (domain >= HV_PERF_DOMAIN_MAX) {
+ if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) {
pr_devel("invalid domain %d\n", domain);
return -EINVAL;
}
diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
index 8965b4463d43..5e86371a20c7 100644
--- a/arch/powerpc/perf/hv-gpci-requests.h
+++ b/arch/powerpc/perf/hv-gpci-requests.h
@@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id)
)
#include I(REQUEST_END)
+#ifdef ENABLE_EVENTS_COUNTERINFO_V6
/*
* Not available for counter_info_version >= 0x8, use
* run_instruction_cycles_by_partition(0x100) instead.
@@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id)
__count(0x10, 8, cycles)
)
#include I(REQUEST_END)
+#endif
#define REQUEST_NAME system_performance_capabilities
#define REQUEST_NUM 0x40
@@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged)
)
#include I(REQUEST_END)
+#ifdef ENABLE_EVENTS_COUNTERINFO_V6
#define REQUEST_NAME processor_bus_utilization_abc_links
#define REQUEST_NUM 0x50
#define REQUEST_IDX_KIND "hw_chip_id=?"
@@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx)
__count(0x28, 8, instructions_completed)
)
#include I(REQUEST_END)
+#endif
/* Processor_core_power_mode (0x95) skipped, no counters */
/* Affinity_domain_information_by_virtual_processor (0xA0) skipped,
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 732cfc53e260..e85f1962be02 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -70,7 +70,7 @@ static struct attribute_group format_group = {
static struct attribute_group event_group = {
.name = "events",
- .attrs = hv_gpci_event_attrs,
+ /* .attrs is set in init */
};
#define HV_CAPS_ATTR(_name, _format) \
@@ -153,6 +153,20 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
+
+ /*
+ * ret value as 'H_PARAMETER' with detail_rc as 'GEN_BUF_TOO_SMALL',
+ * specifies that the current buffer size cannot accommodate
+ * all the information and a partial buffer returned.
+ * Since in this function we are only accessing data for a given starting index,
+ * we don't need to accommodate whole data and can get required count by
+ * accessing first entry data.
+ * Hence hcall fails only incase the ret value is other than H_SUCCESS or
+ * H_PARAMETER with detail_rc value as GEN_BUF_TOO_SMALL(0x1B).
+ */
+ if (ret == H_PARAMETER && be32_to_cpu(arg->params.detail_rc) == 0x1B)
+ ret = 0;
+
if (ret) {
pr_devel("hcall failed: 0x%lx\n", ret);
goto out;
@@ -217,6 +231,7 @@ static int h_gpci_event_init(struct perf_event *event)
{
u64 count;
u8 length;
+ unsigned long ret;
/* Not our event */
if (event->attr.type != event->pmu->type)
@@ -247,13 +262,23 @@ static int h_gpci_event_init(struct perf_event *event)
}
/* check if the request works... */
- if (single_gpci_request(event_get_request(event),
+ ret = single_gpci_request(event_get_request(event),
event_get_starting_index(event),
event_get_secondary_index(event),
event_get_counter_info_version(event),
event_get_offset(event),
length,
- &count)) {
+ &count);
+
+ /*
+ * ret value as H_AUTHORITY implies that partition is not permitted to retrieve
+ * performance information, and required to set
+ * "Enable Performance Information Collection" option.
+ */
+ if (ret == H_AUTHORITY)
+ return -EPERM;
+
+ if (ret) {
pr_devel("gpci hcall failed\n");
return -EINVAL;
}
@@ -280,6 +305,7 @@ static int hv_gpci_init(void)
int r;
unsigned long hret;
struct hv_perf_caps caps;
+ struct hv_gpci_request_buffer *arg;
hv_gpci_assert_offsets_correct();
@@ -298,6 +324,36 @@ static int hv_gpci_init(void)
/* sampling not supported */
h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ arg = (void *)get_cpu_var(hv_gpci_reqb);
+ memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
+
+ /*
+ * hcall H_GET_PERF_COUNTER_INFO populates the output
+ * counter_info_version value based on the system hypervisor.
+ * Pass the counter request 0x10 corresponds to request type
+ * 'Dispatch_timebase_by_processor', to get the supported
+ * counter_info_version.
+ */
+ arg->params.counter_request = cpu_to_be32(0x10);
+
+ r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
+ virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
+ if (r) {
+ pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r);
+ arg->params.counter_info_version_out = 0x8;
+ }
+
+ /*
+ * Use counter_info_version_out value to assign
+ * required hv-gpci event list.
+ */
+ if (arg->params.counter_info_version_out >= 0x8)
+ event_group.attrs = hv_gpci_event_attrs;
+ else
+ event_group.attrs = hv_gpci_event_attrs_v6;
+
+ put_cpu_var(hv_gpci_reqb);
+
r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
if (r)
return r;
diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h
index a3053eda5dcc..060e464d35c6 100644
--- a/arch/powerpc/perf/hv-gpci.h
+++ b/arch/powerpc/perf/hv-gpci.h
@@ -53,6 +53,7 @@ enum {
#define REQUEST_FILE "../hv-gpci-requests.h"
#define NAME_LOWER hv_gpci
#define NAME_UPPER HV_GPCI
+#define ENABLE_EVENTS_COUNTERINFO_V6
#include "req-gen/perf.h"
#undef REQUEST_FILE
#undef NAME_LOWER
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index d76e800a1f33..565dc073ceca 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -13,6 +13,7 @@
#include <asm/cputhreads.h>
#include <asm/smp.h>
#include <linux/string.h>
+#include <linux/spinlock.h>
/* Nest IMC data structures and variables */
@@ -49,7 +50,7 @@ static int trace_imc_mem_size;
* core and trace-imc
*/
static struct imc_pmu_ref imc_global_refc = {
- .lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
.id = 0,
.refc = 0,
};
@@ -291,6 +292,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attr_group->attrs = attrs;
do {
ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
+ if (!ev_val_str)
+ continue;
dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
if (!dev_str)
continue;
@@ -298,6 +301,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
attrs[j++] = dev_str;
if (pmu->events[i].scale) {
ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
+ if (!ev_scale_str)
+ continue;
dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
if (!dev_str)
continue;
@@ -307,6 +312,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
if (pmu->events[i].unit) {
ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
+ if (!ev_unit_str)
+ continue;
dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
if (!dev_str)
continue;
@@ -393,7 +400,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
get_hard_smp_processor_id(cpu));
/*
* If this is the last cpu in this chip then, skip the reference
- * count mutex lock and make the reference count on this chip zero.
+ * count lock and make the reference count on this chip zero.
*/
ref = get_nest_pmu_ref(cpu);
if (!ref)
@@ -455,15 +462,15 @@ static void nest_imc_counters_release(struct perf_event *event)
/*
* See if we need to disable the nest PMU.
* If no events are currently in use, then we have to take a
- * mutex to ensure that we don't race with another task doing
+ * lock to ensure that we don't race with another task doing
* enable or disable the nest counters.
*/
ref = get_nest_pmu_ref(event->cpu);
if (!ref)
return;
- /* Take the mutex lock for this node and then decrement the reference count */
- mutex_lock(&ref->lock);
+ /* Take the lock for this node and then decrement the reference count */
+ spin_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
@@ -475,7 +482,7 @@ static void nest_imc_counters_release(struct perf_event *event)
* an OPAL call to disable the engine in that node.
*
*/
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
return;
}
ref->refc--;
@@ -483,7 +490,7 @@ static void nest_imc_counters_release(struct perf_event *event)
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(event->cpu));
if (rc) {
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
return;
}
@@ -491,7 +498,7 @@ static void nest_imc_counters_release(struct perf_event *event)
WARN(1, "nest-imc: Invalid event reference count\n");
ref->refc = 0;
}
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
}
static int nest_imc_event_init(struct perf_event *event)
@@ -550,26 +557,25 @@ static int nest_imc_event_init(struct perf_event *event)
/*
* Get the imc_pmu_ref struct for this node.
- * Take the mutex lock and then increment the count of nest pmu events
- * inited.
+ * Take the lock and then increment the count of nest pmu events inited.
*/
ref = get_nest_pmu_ref(event->cpu);
if (!ref)
return -EINVAL;
- mutex_lock(&ref->lock);
+ spin_lock(&ref->lock);
if (ref->refc == 0) {
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(event->cpu));
if (rc) {
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
pr_err("nest-imc: Unable to start the counters for node %d\n",
node_id);
return rc;
}
}
++ref->refc;
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
event->destroy = nest_imc_counters_release;
return 0;
@@ -605,9 +611,8 @@ static int core_imc_mem_init(int cpu, int size)
return -ENOMEM;
mem_info->vbase = page_address(page);
- /* Init the mutex */
core_imc_refc[core_id].id = core_id;
- mutex_init(&core_imc_refc[core_id].lock);
+ spin_lock_init(&core_imc_refc[core_id].lock);
rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
__pa((void *)mem_info->vbase),
@@ -696,9 +701,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
} else {
/*
- * If this is the last cpu in this core then, skip taking refernce
- * count mutex lock for this core and directly zero "refc" for
- * this core.
+ * If this is the last cpu in this core then skip taking reference
+ * count lock for this core and directly zero "refc" for this core.
*/
opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(cpu));
@@ -713,11 +717,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
* last cpu in this core and core-imc event running
* in this cpu.
*/
- mutex_lock(&imc_global_refc.lock);
+ spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_CORE)
imc_global_refc.refc--;
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
}
return 0;
}
@@ -732,7 +736,7 @@ static int core_imc_pmu_cpumask_init(void)
static void reset_global_refc(struct perf_event *event)
{
- mutex_lock(&imc_global_refc.lock);
+ spin_lock(&imc_global_refc.lock);
imc_global_refc.refc--;
/*
@@ -744,7 +748,7 @@ static void reset_global_refc(struct perf_event *event)
imc_global_refc.refc = 0;
imc_global_refc.id = 0;
}
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
}
static void core_imc_counters_release(struct perf_event *event)
@@ -757,17 +761,17 @@ static void core_imc_counters_release(struct perf_event *event)
/*
* See if we need to disable the IMC PMU.
* If no events are currently in use, then we have to take a
- * mutex to ensure that we don't race with another task doing
+ * lock to ensure that we don't race with another task doing
* enable or disable the core counters.
*/
core_id = event->cpu / threads_per_core;
- /* Take the mutex lock and decrement the refernce count for this core */
+ /* Take the lock and decrement the refernce count for this core */
ref = &core_imc_refc[core_id];
if (!ref)
return;
- mutex_lock(&ref->lock);
+ spin_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
@@ -779,7 +783,7 @@ static void core_imc_counters_release(struct perf_event *event)
* an OPAL call to disable the engine in that core.
*
*/
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
return;
}
ref->refc--;
@@ -787,7 +791,7 @@ static void core_imc_counters_release(struct perf_event *event)
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(event->cpu));
if (rc) {
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
return;
}
@@ -795,7 +799,7 @@ static void core_imc_counters_release(struct perf_event *event)
WARN(1, "core-imc: Invalid event reference count\n");
ref->refc = 0;
}
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
reset_global_refc(event);
}
@@ -833,7 +837,6 @@ static int core_imc_event_init(struct perf_event *event)
if ((!pcmi->vbase))
return -ENODEV;
- /* Get the core_imc mutex for this core */
ref = &core_imc_refc[core_id];
if (!ref)
return -EINVAL;
@@ -841,22 +844,22 @@ static int core_imc_event_init(struct perf_event *event)
/*
* Core pmu units are enabled only when it is used.
* See if this is triggered for the first time.
- * If yes, take the mutex lock and enable the core counters.
+ * If yes, take the lock and enable the core counters.
* If not, just increment the count in core_imc_refc struct.
*/
- mutex_lock(&ref->lock);
+ spin_lock(&ref->lock);
if (ref->refc == 0) {
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(event->cpu));
if (rc) {
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
pr_err("core-imc: Unable to start the counters for core %d\n",
core_id);
return rc;
}
}
++ref->refc;
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
/*
* Since the system can run either in accumulation or trace-mode
@@ -867,7 +870,7 @@ static int core_imc_event_init(struct perf_event *event)
* to know whether any other trace/thread imc
* events are running.
*/
- mutex_lock(&imc_global_refc.lock);
+ spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
/*
* No other trace/thread imc events are running in
@@ -876,10 +879,10 @@ static int core_imc_event_init(struct perf_event *event)
imc_global_refc.id = IMC_DOMAIN_CORE;
imc_global_refc.refc++;
} else {
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
return -EBUSY;
}
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
event->destroy = core_imc_counters_release;
@@ -951,10 +954,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
/* Reduce the refc if thread-imc event running on this cpu */
- mutex_lock(&imc_global_refc.lock);
+ spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_THREAD)
imc_global_refc.refc--;
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
return 0;
}
@@ -994,7 +997,7 @@ static int thread_imc_event_init(struct perf_event *event)
if (!target)
return -EINVAL;
- mutex_lock(&imc_global_refc.lock);
+ spin_lock(&imc_global_refc.lock);
/*
* Check if any other trace/core imc events are running in the
* system, if not set the global id to thread-imc.
@@ -1003,10 +1006,10 @@ static int thread_imc_event_init(struct perf_event *event)
imc_global_refc.id = IMC_DOMAIN_THREAD;
imc_global_refc.refc++;
} else {
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
return -EBUSY;
}
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
event->pmu->task_ctx_nr = perf_sw_context;
event->destroy = reset_global_refc;
@@ -1128,25 +1131,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
/*
* imc pmus are enabled only when it is used.
* See if this is triggered for the first time.
- * If yes, take the mutex lock and enable the counters.
+ * If yes, take the lock and enable the counters.
* If not, just increment the count in ref count struct.
*/
ref = &core_imc_refc[core_id];
if (!ref)
return -EINVAL;
- mutex_lock(&ref->lock);
+ spin_lock(&ref->lock);
if (ref->refc == 0) {
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
pr_err("thread-imc: Unable to start the counter\
for core %d\n", core_id);
return -EINVAL;
}
}
++ref->refc;
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
return 0;
}
@@ -1163,12 +1166,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
return;
}
- mutex_lock(&ref->lock);
+ spin_lock(&ref->lock);
ref->refc--;
if (ref->refc == 0) {
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
pr_err("thread-imc: Unable to stop the counters\
for core %d\n", core_id);
return;
@@ -1176,7 +1179,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
} else if (ref->refc < 0) {
ref->refc = 0;
}
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
@@ -1217,9 +1220,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
}
}
- /* Init the mutex, if not already */
trace_imc_refc[core_id].id = core_id;
- mutex_init(&trace_imc_refc[core_id].lock);
+ spin_lock_init(&trace_imc_refc[core_id].lock);
mtspr(SPRN_LDBAR, 0);
return 0;
@@ -1239,10 +1241,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
* Reduce the refc if any trace-imc event running
* on this cpu.
*/
- mutex_lock(&imc_global_refc.lock);
+ spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == IMC_DOMAIN_TRACE)
imc_global_refc.refc--;
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
return 0;
}
@@ -1345,17 +1347,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
}
mtspr(SPRN_LDBAR, ldbar_value);
- mutex_lock(&ref->lock);
+ spin_lock(&ref->lock);
if (ref->refc == 0) {
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
return -EINVAL;
}
}
++ref->refc;
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
return 0;
}
@@ -1388,19 +1390,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
return;
}
- mutex_lock(&ref->lock);
+ spin_lock(&ref->lock);
ref->refc--;
if (ref->refc == 0) {
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
return;
}
} else if (ref->refc < 0) {
ref->refc = 0;
}
- mutex_unlock(&ref->lock);
+ spin_unlock(&ref->lock);
trace_imc_event_stop(event, flags);
}
@@ -1424,7 +1426,7 @@ static int trace_imc_event_init(struct perf_event *event)
* no other thread is running any core/thread imc
* events
*/
- mutex_lock(&imc_global_refc.lock);
+ spin_lock(&imc_global_refc.lock);
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
/*
* No core/thread imc events are running in the
@@ -1433,10 +1435,10 @@ static int trace_imc_event_init(struct perf_event *event)
imc_global_refc.id = IMC_DOMAIN_TRACE;
imc_global_refc.refc++;
} else {
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
return -EBUSY;
}
- mutex_unlock(&imc_global_refc.lock);
+ spin_unlock(&imc_global_refc.lock);
event->hw.idx = -1;
target = event->hw.target;
@@ -1509,10 +1511,10 @@ static int init_nest_pmu_ref(void)
i = 0;
for_each_node(nid) {
/*
- * Mutex lock to avoid races while tracking the number of
+ * Take the lock to avoid races while tracking the number of
* sessions using the chip's nest pmu units.
*/
- mutex_init(&nest_imc_refc[i].lock);
+ spin_lock_init(&nest_imc_refc[i].lock);
/*
* Loop to init the "id" with the node_id. Variable "i" initialized to
diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h
index fa9bc804e67a..6b2a59fefffa 100644
--- a/arch/powerpc/perf/req-gen/perf.h
+++ b/arch/powerpc/perf/req-gen/perf.h
@@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \
#define REQUEST_(r_name, r_value, r_idx_1, r_fields) \
r_fields
+/* Generate event list for platforms with counter_info_version 0x6 or below */
+static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = {
+#include REQUEST_FILE
+ NULL
+};
+
+/*
+ * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci
+ * events were deprecated for platform firmware that supports
+ * counter_info_version 0x8 or above.
+ * Those deprecated events are still part of platform firmware that
+ * support counter_info_version 0x6 and below. As per the getPerfCountInfo
+ * v1.018 documentation there is no counter_info_version 0x7.
+ * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of
+ * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms
+ * that supports counter_info_version 0x8 or above.
+ */
+#undef ENABLE_EVENTS_COUNTERINFO_V6
+
+/* Generate event list for platforms with counter_info_version 0x8 or above*/
static __maybe_unused struct attribute *hv_gpci_event_attrs[] = {
#include REQUEST_FILE
NULL
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 25ebe634a661..e9d3c6b241a8 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -178,6 +178,7 @@ config ISS4xx
config CURRITUCK
bool "IBM Currituck (476fpe) Support"
depends on PPC_47x
+ select I2C
select SWIOTLB
select 476FPE
select FORCE_PCI
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 30342b60aa63..42c3d40355d9 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -984,7 +984,7 @@ static void mpc5121_clk_provide_migration_support(void)
#define NODE_PREP do { \
of_address_to_resource(np, 0, &res); \
- snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \
+ snprintf(devname, sizeof(devname), "%pa.%s", &res.start, np->name); \
} while (0)
#define NODE_CHK(clkname, clkitem, regnode, regflag) do { \
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 05e19470d523..22e264bd3ed2 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -530,6 +530,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op)
err_bcom_rx_irq:
bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
err_bcom_rx:
+ free_irq(lpbfifo.irq, &lpbfifo);
err_irq:
iounmap(lpbfifo.regs);
lpbfifo.regs = NULL;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index fc98912f42cf..76a8102bdb98 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
{
int l1irq;
int l2irq;
- struct irq_chip *uninitialized_var(irqchip);
+ struct irq_chip *irqchip;
void *hndlr;
int type;
u32 reg;
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 4588ce632484..b6354054f883 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -107,7 +107,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
goto next;
unreg:
- platform_device_del(pdev);
+ platform_device_put(pdev);
err:
pr_err("%pOF: registration failed\n", np);
next:
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index f0330ce498d1..325dc8b53422 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -118,9 +118,9 @@ config GENERIC_CPU
depends on PPC64 && CPU_LITTLE_ENDIAN
select ARCH_HAS_FAST_MULTIPLIER
-config GENERIC_CPU
+config POWERPC_CPU
bool "Generic 32 bits powerpc"
- depends on PPC32 && !PPC_8xx
+ depends on PPC32 && !PPC_8xx && !PPC_85xx
config CELL_CPU
bool "Cell Broadband Engine"
@@ -151,11 +151,11 @@ config POWER9_CPU
config E5500_CPU
bool "Freescale e5500"
- depends on E500
+ depends on PPC64 && E500
config E6500_CPU
bool "Freescale e6500"
- depends on E500
+ depends on PPC64 && E500
config 860_CPU
bool "8xx family"
@@ -174,11 +174,23 @@ config G4_CPU
depends on PPC_BOOK3S_32
select ALTIVEC
+config E500_CPU
+ bool "e500 (8540)"
+ depends on PPC_85xx && !PPC_E500MC
+
+config E500MC_CPU
+ bool "e500mc"
+ depends on PPC_85xx && PPC_E500MC
+
+config TOOLCHAIN_DEFAULT_CPU
+ bool "Rely on the toolchain's implicit default CPU"
+ depends on PPC32
+
endchoice
config TARGET_CPU_BOOL
bool
- default !GENERIC_CPU
+ default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU
config TARGET_CPU
string
@@ -193,6 +205,9 @@ config TARGET_CPU
default "e300c2" if E300C2_CPU
default "e300c3" if E300C3_CPU
default "G4" if G4_CPU
+ default "8540" if E500_CPU
+ default "e500mc" if E500MC_CPU
+ default "powerpc" if POWERPC_CPU
config PPC_BOOK3S
def_bool y
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 57c4e0e86c88..ba33140e671d 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -226,6 +226,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
if (!prop) {
dev_dbg(&dev->dev,
"axon_msi: no msi-address-(32|64) properties found\n");
+ of_node_put(dn);
return -ENOENT;
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 2dd452a047cd..99e688498a9c 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -671,6 +671,7 @@ spufs_init_isolated_loader(void)
return;
loader = of_get_property(dn, "loader", &size);
+ of_node_put(dn);
if (!loader)
return;
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index d39a9213a3e6..7dd2e2f97aae 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -144,7 +144,7 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np)
}
io_base = ioremap(res.start, resource_size(&res));
- pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
+ pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
__flipper_quiesce(io_base);
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index de10c13de15c..c6b492ebb766 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -173,7 +173,7 @@ static struct irq_domain *hlwd_pic_init(struct device_node *np)
return NULL;
}
- pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
+ pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
__hlwd_quiesce(io_base);
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index f514d5d28cd4..3f3821eb4c36 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -97,9 +97,6 @@ static void __init linkstation_init_IRQ(void)
mpic_init(mpic);
}
-extern void avr_uart_configure(void);
-extern void avr_uart_send(const char);
-
static void __noreturn linkstation_restart(char *cmd)
{
local_irq_disable();
diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h
index 5ad12023e562..ebc258fa4858 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc10x.h
+++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h
@@ -156,4 +156,7 @@ int mpc10x_disable_store_gathering(struct pci_controller *hose);
/* For MPC107 boards that use the built-in openpic */
void mpc10x_set_openpic(void);
+void avr_uart_configure(void);
+void avr_uart_send(const char c);
+
#endif /* __PPC_KERNEL_MPC10X_H */
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 67e48b0a164e..c691453dfd3f 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -89,8 +89,8 @@ static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible)
hw_regs = ioremap(res.start, resource_size(&res));
if (hw_regs) {
- pr_info("%s at 0x%08x mapped to 0x%p\n", name,
- res.start, hw_regs);
+ pr_info("%s at 0x%pa mapped to 0x%p\n", name,
+ &res.start, hw_regs);
}
out_put:
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index f95fbdee6efe..d2900689d642 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -660,13 +660,13 @@ static void smp_core99_gpio_tb_freeze(int freeze)
#endif /* !CONFIG_PPC64 */
-/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
-volatile static long int core99_l2_cache;
-volatile static long int core99_l3_cache;
-
static void core99_init_caches(int cpu)
{
#ifndef CONFIG_PPC64
+ /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
+ static long int core99_l2_cache;
+ static long int core99_l3_cache;
+
if (!cpu_has_feature(CPU_FTR_L2CR))
return;
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index c164419e254d..dcec0f760c8f 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -278,6 +278,8 @@ int __init opal_event_init(void)
else
name = kasprintf(GFP_KERNEL, "opal");
+ if (!name)
+ continue;
/* Install interrupt handler */
rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
name, NULL);
diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
index dc599e787f78..0de530b5fead 100644
--- a/arch/powerpc/platforms/powernv/opal-powercap.c
+++ b/arch/powerpc/platforms/powernv/opal-powercap.c
@@ -196,6 +196,12 @@ void __init opal_powercap_init(void)
j = 0;
pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
+ if (!pcaps[i].pg.name) {
+ kfree(pcaps[i].pattrs);
+ kfree(pcaps[i].pg.attrs);
+ goto out_pcaps_pattrs;
+ }
+
if (has_min) {
powercap_add_attr(min, "powercap-min",
&pcaps[i].pattrs[j]);
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index fd510d961b8c..d5814c5046ba 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
ent->chip = chip;
snprintf(ent->name, 16, "%08x", chip);
ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
+ if (!ent->path.data) {
+ kfree(ent);
+ return -ENOMEM;
+ }
+
ent->path.size = strlen((char *)ent->path.data);
dir = debugfs_create_dir(ent->name, root);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 38e90270280b..c3f968429161 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -776,6 +776,7 @@ static void opal_export_attrs(void)
kobj = kobject_create_and_add("exports", opal_kobj);
if (!kobj) {
pr_warn("kobject_create_and_add() of exports failed\n");
+ of_node_put(np);
return;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 058223233088..df4457e743d3 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3008,7 +3008,8 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
int index;
int64_t rc;
- if (!res || !res->flags || res->start > res->end)
+ if (!res || !res->flags || res->start > res->end ||
+ res->flags & IORESOURCE_UNSET)
return;
if (res->flags & IORESOURCE_IO) {
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 7186e17cfd3d..ace649701783 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -63,6 +63,8 @@ int powernv_get_random_real_mode(unsigned long *v)
struct powernv_rng *rng;
rng = raw_cpu_read(powernv_rng);
+ if (!rng)
+ return 0;
*v = rng_whiten(rng, __raw_rm_readq(rng->regs_real));
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index f364909d0c08..23dc59e40edc 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -496,7 +496,7 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
int lmb_found;
int rc;
- pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
+ pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
lmb_found = 0;
for_each_drmem_lmb(lmb) {
@@ -510,14 +510,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
}
}
- if (!lmb_found)
+ if (!lmb_found) {
+ pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
rc = -EINVAL;
-
- if (rc)
- pr_info("Failed to hot-remove memory at %llx\n",
- lmb->base_addr);
- else
- pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
+ } else if (rc) {
+ pr_debug("Failed to hot-remove memory at %llx\n",
+ lmb->base_addr);
+ } else {
+ pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
+ }
return rc;
}
@@ -770,8 +771,8 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
if (!drmem_lmb_reserved(lmb))
continue;
- pr_info("Memory at %llx (drc index %x) was hot-added\n",
- lmb->base_addr, lmb->drc_index);
+ pr_debug("Memory at %llx (drc index %x) was hot-added\n",
+ lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
}
rc = 0;
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index b91eb0929ed1..55569e3c9db7 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -450,6 +450,7 @@ static int __init ibmebus_bus_init(void)
if (err) {
printk(KERN_WARNING "%s: device_register returned %i\n",
__func__, err);
+ put_device(&ibmebus_bus_device);
bus_unregister(&ibmebus_bus_type);
return err;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index c93b9a3bf237..52e466d4a33c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -522,8 +522,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
if (cmd) {
rc = init_cpu_associativity();
- if (rc)
+ if (rc) {
+ destroy_cpu_associativity();
goto out;
+ }
for_each_possible_cpu(cpu) {
disp = per_cpu_ptr(&vcpu_disp_data, cpu);
@@ -1416,22 +1418,22 @@ static inline void __init check_lp_set_hblkrm(unsigned int lp,
void __init pseries_lpar_read_hblkrm_characteristics(void)
{
+ const s32 token = rtas_token("ibm,get-system-parameter");
unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
int call_status, len, idx, bpsize;
if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
return;
- spin_lock(&rtas_data_buf_lock);
- memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
- call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
- NULL,
- SPLPAR_TLB_BIC_TOKEN,
- __pa(rtas_data_buf),
- RTAS_DATA_BUF_SIZE);
- memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
- local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
- spin_unlock(&rtas_data_buf_lock);
+ do {
+ spin_lock(&rtas_data_buf_lock);
+ memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
+ call_status = rtas_call(token, 3, 1, NULL, SPLPAR_TLB_BIC_TOKEN,
+ __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE);
+ memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
+ local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
+ spin_unlock(&rtas_data_buf_lock);
+ } while (rtas_busy_delay(call_status));
if (call_status != 0) {
pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 38c306551f76..e12cf62357f2 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -289,6 +289,7 @@ static void parse_mpp_x_data(struct seq_file *m)
*/
static void parse_system_parameter_string(struct seq_file *m)
{
+ const s32 token = rtas_token("ibm,get-system-parameter");
int call_status;
unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
@@ -298,16 +299,15 @@ static void parse_system_parameter_string(struct seq_file *m)
return;
}
- spin_lock(&rtas_data_buf_lock);
- memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
- call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
- NULL,
- SPLPAR_CHARACTERISTICS_TOKEN,
- __pa(rtas_data_buf),
- RTAS_DATA_BUF_SIZE);
- memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
- local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
- spin_unlock(&rtas_data_buf_lock);
+ do {
+ spin_lock(&rtas_data_buf_lock);
+ memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
+ call_status = rtas_call(token, 3, 1, NULL, SPLPAR_CHARACTERISTICS_TOKEN,
+ __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE);
+ memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
+ local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
+ spin_unlock(&rtas_data_buf_lock);
+ } while (rtas_busy_delay(call_status));
if (call_status != 0) {
printk(KERN_INFO
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
index 7c6d8b14f440..1f5fb3a8a1bc 100644
--- a/arch/powerpc/purgatory/Makefile
+++ b/arch/powerpc/purgatory/Makefile
@@ -4,6 +4,11 @@ KASAN_SANITIZE := n
targets += trampoline.o purgatory.ro kexec-purgatory.c
+# When profile-guided optimization is enabled, llvm emits two different
+# overlapping text sections, which is not supported by kexec. Remove profile
+# optimization flags.
+KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
+
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
$(obj)/purgatory.ro: $(obj)/trampoline.o FORCE
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 808e7118abfc..d276c5e96445 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -211,8 +211,10 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
dev_err(&pdev->dev,
"node %pOF has an invalid fsl,msi phandle %u\n",
hose->dn, np->phandle);
+ of_node_put(np);
return -EINVAL;
}
+ of_node_put(np);
}
for_each_pci_msi_entry(entry, pdev) {
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index ff0e2b156cb5..6adf2fdec799 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -520,6 +520,7 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
struct resource rsrc;
const int *bus_range;
u8 hdr_type, progif;
+ u32 class_code;
struct device_node *dev;
struct ccsr_pci __iomem *pci;
u16 temp;
@@ -593,6 +594,13 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
if (fsl_pcie_check_link(hose))
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
+ /* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
+ if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
+ early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
+ class_code &= 0xff;
+ class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
+ }
} else {
/*
* Set PBFR(PCI Bus Function Register)[10] = 1 to
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index 1d7a41205695..5ffaa60f1fa0 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -18,6 +18,7 @@ struct platform_device;
#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */
#define PCIE_LTSSM_L0 0x16 /* L0 state */
+#define PCIE_FSL_CSR_CLASSCODE 0x474 /* FSL GPEX CSR */
#define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */
#define PCIE_IP_REV_3_0 0x02080300 /* PCIE IP block version Rev3.0 */
#define PIWAR_EN 0x80000000 /* Enable */
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 49f9541954f8..3664ffcbb313 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -216,9 +216,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary)
(hose)->ops = &tsi108_direct_pci_ops;
- printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. "
- "Firmware bus number: %d->%d\n",
- rsrc.start, hose->first_busno, hose->last_busno);
+ pr_info("Found tsi108 PCI host bridge at 0x%pa. Firmware bus number: %d->%d\n",
+ &rsrc.start, hose->first_busno, hose->last_busno);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 3fd086533dcf..05c38da4bbee 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -778,7 +778,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
if (out_qpage)
*out_qpage = be64_to_cpu(qpage);
if (out_qsize)
- *out_qsize = be32_to_cpu(qsize);
+ *out_qsize = be64_to_cpu(qsize);
if (out_qeoi_page)
*out_qeoi_page = be64_to_cpu(qeoi_page);
if (out_escalate_irq)
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index 3f15615712b5..e6788bc06584 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -422,6 +422,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
if (!data->trig_mmio) {
+ iounmap(data->eoi_mmio);
pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
return -ENOMEM;
}
@@ -683,6 +684,7 @@ static bool xive_get_max_prio(u8 *max_prio)
}
reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
+ of_node_put(rootdn);
if (!reg) {
pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
return false;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index b21549a34447..e0a77af5c130 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -51,7 +51,7 @@ config RISCV
select PCI_MSI if PCI
select RISCV_TIMER
select GENERIC_IRQ_MULTI_HANDLER
- select GENERIC_ARCH_TOPOLOGY if SMP
+ select GENERIC_ARCH_TOPOLOGY
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MMIOWB
select HAVE_EBPF_JIT if 64BIT
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index d1a961539101..1641d8201412 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -35,6 +35,7 @@ else
endif
ifeq ($(CONFIG_LD_IS_LLD),y)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0)
KBUILD_CFLAGS += -mno-relax
KBUILD_AFLAGS += -mno-relax
ifneq ($(LLVM_IAS),1)
@@ -42,6 +43,7 @@ ifneq ($(LLVM_IAS),1)
KBUILD_AFLAGS += -Wa,-mno-relax
endif
endif
+endif
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
@@ -73,6 +75,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
KBUILD_CFLAGS += -fno-omit-frame-pointer
endif
+# Avoid generating .eh_frame sections.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 3ba4d93721d3..2dc5b01d62df 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -252,9 +252,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr))
__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
-#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
-#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
-#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
+#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
+#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
+#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
@@ -266,22 +266,22 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
-#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
-#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
-#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
+#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
+#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
+#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
#ifdef CONFIG_64BIT
__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
-#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
+#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
-#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count)
+#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
#endif
#include <asm-generic/io.h>
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index e076437cfafe..20f0cc9628da 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -235,7 +235,7 @@ do { \
might_fault(); \
access_ok(__p, sizeof(*__p)) ? \
__get_user((x), __p) : \
- ((x) = 0, -EFAULT); \
+ ((x) = (__force __typeof__(x))0, -EFAULT); \
})
#define __put_user_asm(insn, x, ptr, err) \
diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..66b13a522880
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/setup.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+
+#ifndef _UAPI_ASM_RISCV_SETUP_H
+#define _UAPI_ASM_RISCV_SETUP_H
+
+#define COMMAND_LINE_SIZE 1024
+
+#endif /* _UAPI_ASM_RISCV_SETUP_H */
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 330b34706aa0..9d4b4098874b 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -104,6 +104,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
{
struct pt_regs *childregs = task_pt_regs(p);
+ memset(&p->thread.s, 0, sizeof(p->thread.s));
+
/* p->thread holds context to be restored by __switch_to() */
if (unlikely(p->flags & PF_KTHREAD)) {
/* Kernel thread */
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 261f4087cc39..0576a6b2bcc5 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -46,6 +46,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
int cpuid;
+ store_cpu_topology(smp_processor_id());
+
/* This covers non-smp usecase mandated by "nosmp" option */
if (max_cpus == 0)
return;
@@ -142,8 +144,8 @@ asmlinkage __visible void __init smp_callin(void)
current->active_mm = mm;
trap_init();
+ store_cpu_topology(smp_processor_id());
notify_cpu_starting(smp_processor_id());
- update_siblings_masks(smp_processor_id());
set_cpu_online(smp_processor_id(), 1);
/*
* Remote TLB flushes are ignored while the CPU is offline, so emit
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 19e46f4160cc..5ba4d23971fd 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -89,7 +89,7 @@ void notrace walk_stackframe(struct task_struct *task,
while (!kstack_end(ksp)) {
if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
break;
- pc = (*ksp++) - 0x4;
+ pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
}
}
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 12f8a7fce78b..bb402685057a 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -18,10 +18,6 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
- if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
- if (unlikely(!(prot & PROT_READ)))
- return -EINVAL;
-
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
}
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 8aa70b519e04..726860a490f3 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -5,6 +5,7 @@
*/
#include <linux/of_clk.h>
+#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <asm/sbi.h>
@@ -28,4 +29,6 @@ void __init time_init(void)
of_clk_init(NULL);
timer_probe();
+
+ tick_setup_hrtimer_broadcast();
}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 473de3ae8bb7..c28d4debf592 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/irq.h>
+#include <linux/kexec.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
@@ -43,6 +44,9 @@ void die(struct pt_regs *regs, const char *str)
ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV);
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
@@ -53,7 +57,7 @@ void die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception");
if (ret != NOTIFY_STOP)
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index a4ee3a0e7d20..c533ac869aa2 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -20,6 +20,9 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
obj-y += vdso.o vdso-syms.o
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+ifneq ($(filter vgettimeofday, $(vdso-syms)),)
+CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
+endif
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index f66a091cb890..4c45adf23259 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -62,9 +62,11 @@ VERSION
LINUX_4.15 {
global:
__vdso_rt_sigreturn;
+#ifdef HAS_VGETTIMEOFDAY
__vdso_gettimeofday;
__vdso_clock_gettime;
__vdso_clock_getres;
+#endif
__vdso_getcpu;
__vdso_flush_icache;
local: *;
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index c54bd3c79955..9d1ec8c8b718 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -71,6 +71,8 @@ void flush_icache_pte(pte_t pte)
{
struct page *page = pte_page(pte);
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
flush_icache_all();
+ set_bit(PG_dcache_clean, &page->flags);
+ }
}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 247b8c859c44..1cfce62caa11 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -189,7 +189,7 @@ no_context:
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
die(regs, "Oops");
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
/*
* We ran out of memory, call the OOM killer, and return the userspace
diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c
index 45046630c56a..c42ab33bd452 100644
--- a/arch/s390/boot/compressed/decompressor.c
+++ b/arch/s390/boot/compressed/decompressor.c
@@ -80,6 +80,6 @@ void *decompress_kernel(void)
void *output = (void *)decompress_offset;
__decompress(_compressed_start, _compressed_end - _compressed_start,
- NULL, NULL, output, 0, NULL, error);
+ NULL, NULL, output, vmlinux.image_size, NULL, error);
return output;
}
diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
index 0b4965573656..88bacf4999c4 100644
--- a/arch/s390/boot/ipl_report.c
+++ b/arch/s390/boot/ipl_report.c
@@ -57,11 +57,19 @@ repeat:
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
safe_addr = INITRD_START + INITRD_SIZE;
+ if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
+ safe_addr = (unsigned long)comps + comps->len;
+ goto repeat;
+ }
for_each_rb_entry(comp, comps)
if (intersects(safe_addr, size, comp->addr, comp->len)) {
safe_addr = comp->addr + comp->len;
goto repeat;
}
+ if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
+ safe_addr = (unsigned long)certs + certs->len;
+ goto repeat;
+ }
for_each_rb_entry(cert, certs)
if (intersects(safe_addr, size, cert->addr, cert->len)) {
safe_addr = cert->addr + cert->len;
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index f0bc4dc3e9bf..6511d15ace45 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
int rc;
if (diag204_probe()) {
- pr_err("The hardware system does not support hypfs\n");
+ pr_info("The hardware system does not support hypfs\n");
return -ENODATA;
}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 70139d0791b6..ca4fc66a361f 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -501,9 +501,9 @@ fail_hypfs_sprp_exit:
hypfs_vm_exit();
fail_hypfs_diag_exit:
hypfs_diag_exit();
+ pr_err("Initialization of hypfs failed with rc=%i\n", rc);
fail_dbfs_exit:
hypfs_dbfs_exit();
- pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
device_initcall(hypfs_init)
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 310134015541..54f4bc5d1108 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -4,8 +4,8 @@
*
* Copyright IBM Corp. 1999, 2000
*/
-#ifndef DEBUG_H
-#define DEBUG_H
+#ifndef _ASM_S390_DEBUG_H
+#define _ASM_S390_DEBUG_H
#include <linux/string.h>
#include <linux/spinlock.h>
@@ -416,4 +416,4 @@ int debug_unregister_view(debug_info_t *id, struct debug_view *view);
#define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x)
#endif /* DASD_DEBUG */
-#endif /* DEBUG_H */
+#endif /* _ASM_S390_DEBUG_H */
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
index 34a7ae68485c..be16a6c0f127 100644
--- a/arch/s390/include/asm/fpu/api.h
+++ b/arch/s390/include/asm/fpu/api.h
@@ -76,7 +76,7 @@ static inline int test_fp_ctl(u32 fpc)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
-#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
+#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
struct kernel_fpu;
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 5e97a4353147..7837c791f7e8 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -16,7 +16,8 @@
"3: jl 1b\n" \
" lhi %0,0\n" \
"4: sacf 768\n" \
- EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
+ EX_TABLE(0b,4b) EX_TABLE(1b,4b) \
+ EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index de8f0bf5f238..487725e49b96 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -35,9 +35,11 @@ static inline bool is_hugepage_only_range(struct mm_struct *mm,
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
- if (len & ~HPAGE_MASK)
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
return -EINVAL;
- if (addr & ~HPAGE_MASK)
+ if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 287bb88f7698..2686bee800e3 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -11,6 +11,8 @@
/* I/O size constraints */
#define ZPCI_MAX_READ_SIZE 8
#define ZPCI_MAX_WRITE_SIZE 128
+#define ZPCI_BOUNDARY_SIZE (1 << 12)
+#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
@@ -125,16 +127,18 @@ out:
int zpci_write_block(volatile void __iomem *dst, const void *src,
unsigned long len);
-static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
+static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
{
- int count = len > max ? max : len, size = 1;
+ int offset = dst & ZPCI_BOUNDARY_MASK;
+ int size;
- while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
- dst = dst >> 1;
- src = src >> 1;
- size = size << 1;
- }
- return size;
+ size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
+ if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
+ return size;
+
+ if (size >= 8)
+ return 8;
+ return rounddown_pow_of_two(size);
}
static inline int zpci_memcpy_fromio(void *dst,
@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
int size, rc = 0;
while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) src,
- (u64) dst, n,
- ZPCI_MAX_READ_SIZE);
+ size = zpci_get_max_io_size((u64 __force) src,
+ (u64) dst, n,
+ ZPCI_MAX_READ_SIZE);
rc = zpci_read_single(dst, src, size);
if (rc)
break;
@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
return -EINVAL;
while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) dst,
- (u64) src, n,
- ZPCI_MAX_WRITE_SIZE);
+ size = zpci_get_max_io_size((u64 __force) dst,
+ (u64) src, n,
+ ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */
rc = zpci_write_block(dst, src, size);
else
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 918f0ba4f4d2..5e26e2c4641b 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -31,7 +31,7 @@
pcp_op_T__ *ptr__; \
preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
- prev__ = *ptr__; \
+ prev__ = READ_ONCE(*ptr__); \
do { \
old__ = prev__; \
new__ = old__ op (val); \
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f96a5857bbfd..01d7c281087f 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -44,7 +44,7 @@ struct save_area {
u64 fprs[16];
u32 fpc;
u32 prefix;
- u64 todpreg;
+ u32 todpreg;
u64 timer;
u64 todcmp;
u64 vxrs_low[16];
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 34bdc60c0b11..2100833adfb6 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -210,5 +210,5 @@ void die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
oops_exit();
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 7795cdee6427..5aed5bcadb51 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -429,6 +429,8 @@ static struct attribute_group ipl_ccw_attr_group_lpar = {
static struct attribute *ipl_unknown_attrs[] = {
&sys_ipl_type_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
NULL,
};
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 6f1388391620..a97c1755517e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -255,6 +255,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->prev_kprobe.kp = NULL;
}
NOKPROBE_SYMBOL(pop_kprobe);
@@ -509,12 +510,11 @@ static int post_kprobe_handler(struct pt_regs *regs)
if (!p)
return 0;
+ resume_execution(p, regs);
if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
-
- resume_execution(p, regs);
pop_kprobe(kcb);
preempt_enable_no_resched();
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index 76cd09879eaf..bf0596749ebd 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -29,6 +29,7 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len)
const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
struct module_signature *ms;
unsigned long sig_len;
+ int ret;
/* Skip signature verification when not secure IPLed. */
if (!ipl_secure_flag)
@@ -63,11 +64,18 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len)
return -EBADMSG;
}
- return verify_pkcs7_signature(kernel, kernel_len,
- kernel + kernel_len, sig_len,
- VERIFY_USE_PLATFORM_KEYRING,
- VERIFYING_MODULE_SIGNATURE,
- NULL, NULL);
+ ret = verify_pkcs7_signature(kernel, kernel_len,
+ kernel + kernel_len, sig_len,
+ VERIFY_USE_SECONDARY_KEYRING,
+ VERIFYING_MODULE_SIGNATURE,
+ NULL, NULL);
+ if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
+ ret = verify_pkcs7_signature(kernel, kernel_len,
+ kernel + kernel_len, sig_len,
+ VERIFY_USE_PLATFORM_KEYRING,
+ VERIFYING_MODULE_SIGNATURE,
+ NULL, NULL);
+ return ret;
}
#endif /* CONFIG_KEXEC_SIG */
@@ -177,8 +185,6 @@ static int kexec_file_add_ipl_report(struct kimage *image,
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
- if (image->type == KEXEC_TYPE_CRASH)
- buf.mem += crashk_res.start;
ptr = (void *)ipl_cert_list_addr;
end = ptr + ipl_cert_list_size;
@@ -215,6 +221,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
*lc_ipl_parmblock_ptr = (__u32)buf.mem;
+ if (image->type == KEXEC_TYPE_CRASH)
+ buf.mem += crashk_res.start;
+
ret = kexec_add_buffer(&buf);
out:
return ret;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 0a487fae763e..d8951274658b 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -179,7 +179,7 @@ void s390_handle_mcck(void)
"malfunction (code 0x%016lx).\n", mcck.mcck_code);
printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
current->comm, current->pid);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
}
EXPORT_SYMBOL_GPL(s390_handle_mcck);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 4e6299e2ca94..fdd5f37ac1fb 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -76,6 +76,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
memcpy(dst, src, arch_task_struct_size);
dst->thread.fpu.regs = dst->thread.fpu.fprs;
+
+ /*
+ * Don't transfer over the runtime instrumentation or the guarded
+ * storage control block pointers. These fields are cleared here instead
+ * of in copy_thread() to avoid premature freeing of associated memory
+ * on fork() failure. Wait to clear the RI flag because ->stack still
+ * refers to the source thread.
+ */
+ dst->thread.ri_cb = NULL;
+ dst->thread.gs_cb = NULL;
+ dst->thread.gs_bc_cb = NULL;
+
return 0;
}
@@ -133,13 +145,11 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
frame->childregs.flags = 0;
if (new_stackp)
frame->childregs.gprs[15] = new_stackp;
-
- /* Don't copy runtime instrumentation info */
- p->thread.ri_cb = NULL;
+ /*
+ * Clear the runtime instrumentation flag after the above childregs
+ * copy. The CB pointer was already cleared in arch_dup_task_struct().
+ */
frame->childregs.psw.mask &= ~PSW_MASK_RI;
- /* Don't copy guarded storage control block */
- p->thread.gs_cb = NULL;
- p->thread.gs_bc_cb = NULL;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index ad74472ce967..05e1367c23d5 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -413,6 +413,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* floating point control reg. is in the thread structure
*/
+ save_fpu_regs();
if ((unsigned int) data != 0 ||
test_fp_ctl(data >> (BITS_PER_LONG - 32)))
return -EINVAL;
@@ -502,9 +503,7 @@ long arch_ptrace(struct task_struct *child, long request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
- put_user(child->thread.last_break,
- (unsigned long __user *) data);
- return 0;
+ return put_user(child->thread.last_break, (unsigned long __user *)data);
case PTRACE_ENABLE_TE:
if (!MACHINE_HAS_TE)
return -EIO;
@@ -775,6 +774,7 @@ static int __poke_user_compat(struct task_struct *child,
/*
* floating point control reg. is in the thread structure
*/
+ save_fpu_regs();
if (test_fp_ctl(tmp))
return -EINVAL;
child->thread.fpu.fpc = data;
@@ -856,9 +856,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
- put_user(child->thread.last_break,
- (unsigned int __user *) data);
- return 0;
+ return put_user(child->thread.last_break, (unsigned int __user *)data);
}
return compat_ptrace_request(child, request, addr, data);
}
@@ -1010,9 +1008,7 @@ static int s390_fpregs_set(struct task_struct *target,
int rc = 0;
freg_t fprs[__NUM_FPRS];
- if (target == current)
- save_fpu_regs();
-
+ save_fpu_regs();
if (MACHINE_HAS_VX)
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
else
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8c51462f13fd..f0237dfad6e3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -145,7 +145,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
static inline int pcpu_stopped(struct pcpu *pcpu)
{
- u32 uninitialized_var(status);
+ u32 status;
if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
0, &status) != SIGP_CC_STATUS_STORED)
diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
index 888cc2f166db..ce6084e28d90 100644
--- a/arch/s390/kernel/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -460,9 +460,9 @@ static int sthyi_update_cache(u64 *rc)
*
* Fills the destination with system information returned by the STHYI
* instruction. The data is generated by emulation or execution of STHYI,
- * if available. The return value is the condition code that would be
- * returned, the rc parameter is the return code which is passed in
- * register R2 + 1.
+ * if available. The return value is either a negative error value or
+ * the condition code that would be returned, the rc parameter is the
+ * return code which is passed in register R2 + 1.
*/
int sthyi_fill(void *dst, u64 *rc)
{
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 7e0eb4020917..f8cc4e8524bf 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -15,6 +15,8 @@
/* Handle ro_after_init data on our own. */
#define RO_AFTER_INIT_DATA
+#define RUNTIME_DISCARD_EXIT
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/vmlinux.lds.h>
@@ -124,6 +126,7 @@ SECTIONS
/*
* Table with the patch locations to undo expolines
*/
+ . = ALIGN(4);
.nospec_call_table : {
__nospec_call_start = . ;
*(.s390_indirect*)
@@ -188,5 +191,6 @@ SECTIONS
DISCARDS
/DISCARD/ : {
*(.eh_frame)
+ *(.interp)
}
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 6e60cc2443b2..2c99de1cc92c 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -214,13 +214,13 @@ void vtime_flush(struct task_struct *tsk)
virt_timer_expire();
steal = S390_lowcore.steal_timer;
- avg_steal = S390_lowcore.avg_steal_timer / 2;
+ avg_steal = S390_lowcore.avg_steal_timer;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
account_steal_time(cputime_to_nsecs(steal));
avg_steal += steal;
}
- S390_lowcore.avg_steal_timer = avg_steal;
+ S390_lowcore.avg_steal_timer = avg_steal / 2;
}
/*
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index a389fa85cca2..5450d43d26ea 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -360,8 +360,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
*/
int handle_sthyi(struct kvm_vcpu *vcpu)
{
- int reg1, reg2, r = 0;
- u64 code, addr, cc = 0, rc = 0;
+ int reg1, reg2, cc = 0, r = 0;
+ u64 code, addr, rc = 0;
struct sthyi_sctns *sctns = NULL;
if (!test_kvm_facility(vcpu->kvm, 74))
@@ -392,7 +392,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
return -ENOMEM;
cc = sthyi_fill(sctns, &rc);
-
+ if (cc < 0) {
+ free_page((unsigned long)sctns);
+ return cc;
+ }
out:
if (!cc) {
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 8be5750fe5ac..a180fe54dc68 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -81,8 +81,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
- union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
+ union esca_sigp_ctrl new_val = {0}, old_val;
+ old_val = READ_ONCE(*sigp_ctrl);
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
@@ -93,8 +94,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
- union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
+ union bsca_sigp_ctrl new_val = {0}, old_val;
+ old_val = READ_ONCE(*sigp_ctrl);
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
@@ -124,16 +126,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
- union esca_sigp_ctrl old = *sigp_ctrl;
+ union esca_sigp_ctrl old;
+ old = READ_ONCE(*sigp_ctrl);
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
- union bsca_sigp_ctrl old = *sigp_ctrl;
+ union bsca_sigp_ctrl old;
+ old = READ_ONCE(*sigp_ctrl);
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 49dc00d82e5e..6a1b46e85dac 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1982,6 +1982,10 @@ static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
ms = slots->memslots + slotidx;
ofs = 0;
}
+
+ if (cur_gfn < ms->base_gfn)
+ ofs = 0;
+
ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
while ((slotidx > 0) && (ofs >= ms->npages)) {
slotidx--;
@@ -3344,10 +3348,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu_load(vcpu);
- if (test_fp_ctl(fpu->fpc)) {
- ret = -EINVAL;
- goto out;
- }
vcpu->run->s.regs.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
@@ -3355,7 +3355,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
else
memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
-out:
vcpu_put(vcpu);
return ret;
}
@@ -4527,6 +4526,22 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
return -EINVAL;
+ if (!kvm->arch.migration_mode)
+ return 0;
+
+ /*
+ * Turn off migration mode when:
+ * - userspace creates a new memslot with dirty logging off,
+ * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
+ * dirty logging is turned off.
+ * Migration mode expects dirty page logging being enabled to store
+ * its dirty bitmap.
+ */
+ if (change != KVM_MR_DELETE &&
+ !(mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ WARN(kvm_s390_vm_stop_migration(kvm),
+ "Failed to stop migration mode");
+
return 0;
}
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 4f6c22d72072..596b2a2cd837 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -168,7 +168,8 @@ static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
- bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
+ bitmap_and(apcb_s, apcb_s, apcb_h,
+ BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
return 0;
}
@@ -190,7 +191,8 @@ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
sizeof(struct kvm_s390_apcb1)))
return -EFAULT;
- bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
+ bitmap_and(apcb_s, apcb_s, apcb_h,
+ BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
return 0;
}
@@ -540,8 +542,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
scb_s->eca |= scb_o->eca & ECA_CEI;
/* Epoch Extension */
- if (test_kvm_facility(vcpu->kvm, 139))
+ if (test_kvm_facility(vcpu->kvm, 139)) {
scb_s->ecd |= scb_o->ecd & ECD_MEF;
+ scb_s->epdx = scb_o->epdx;
+ }
/* etoken */
if (test_kvm_facility(vcpu->kvm, 156))
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 0267405ab7c6..fcfd78f99cb4 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -339,7 +339,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
- : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
+ : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
return size;
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7b0bb475c166..9770381776a6 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -432,7 +432,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+ if ((trans_exc_code & store_indication) == 0x400)
+ access = VM_WRITE;
+ if (access == VM_WRITE)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index fc141893d028..bef7e07da98e 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -112,7 +112,7 @@ static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || pmd_large(*pmd))
continue;
- page = virt_to_page(pmd_val(*pmd));
+ page = phys_to_page(pmd_val(*pmd));
set_bit(PG_arch_1, &page->flags);
} while (pmd++, addr = next, addr != end);
}
@@ -130,8 +130,8 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
if (pud_none(*pud) || pud_large(*pud))
continue;
if (!pud_folded(*pud)) {
- page = virt_to_page(pud_val(*pud));
- for (i = 0; i < 3; i++)
+ page = phys_to_page(pud_val(*pud));
+ for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pmd(pud, addr, next);
@@ -151,8 +151,8 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
if (p4d_none(*p4d))
continue;
if (!p4d_folded(*p4d)) {
- page = virt_to_page(p4d_val(*p4d));
- for (i = 0; i < 3; i++)
+ page = phys_to_page(p4d_val(*p4d));
+ for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pud(p4d, addr, next);
@@ -173,8 +173,8 @@ static void mark_kernel_pgd(void)
if (pgd_none(*pgd))
continue;
if (!pgd_folded(*pgd)) {
- page = virt_to_page(pgd_val(*pgd));
- for (i = 0; i < 3; i++)
+ page = phys_to_page(pgd_val(*pgd));
+ for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_p4d(pgd, addr, next);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 28ca07360e97..c2ed9b859860 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -699,7 +699,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pte_clear(mm, addr, ptep);
}
if (reset)
- pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+ pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index b8ddacf1efe1..8d241f3c3b78 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -223,7 +223,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
/* combine single writes by using store-block insn */
void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
- zpci_memcpy_toio(to, from, count);
+ zpci_memcpy_toio(to, from, count * 8);
}
void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 64b1399a73f0..b32da43f7a62 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -543,6 +543,17 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
s->dma_length = 0;
}
}
+
+static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
+{
+ size_t n = BITS_TO_LONGS(bits);
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
+ return NULL;
+
+ return vzalloc(bytes);
+}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
@@ -579,13 +590,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->end_dma - zdev->start_dma + 1);
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
- zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
+ zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto free_dma_table;
}
if (!s390_iommu_strict) {
- zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
+ zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
goto free_bitmap;
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 921f0fc12f1f..675e6cb50584 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -64,7 +64,7 @@ static inline int __pcistg_mio_inuser(
asm volatile (
" sacf 256\n"
"0: llgc %[tmp],0(%[src])\n"
- " sllg %[val],%[val],8\n"
+ "4: sllg %[val],%[val],8\n"
" aghi %[src],1\n"
" ogr %[val],%[tmp]\n"
" brctg %[cnt],0b\n"
@@ -72,7 +72,7 @@ static inline int __pcistg_mio_inuser(
"2: ipm %[cc]\n"
" srl %[cc],28\n"
"3: sacf 768\n"
- EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
+ EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
:
[src] "+a" (src), [cnt] "+d" (cnt),
[val] "+d" (val), [tmp] "=d" (tmp),
@@ -100,9 +100,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
old_fs = enable_sacf_uaccess();
while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) dst,
- (u64 __force) src, n,
- ZPCI_MAX_WRITE_SIZE);
+ size = zpci_get_max_io_size((u64 __force) dst,
+ (u64 __force) src, n,
+ ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */
rc = __pcistb_mio_inuser(dst, src, size, &status);
else
@@ -220,10 +220,10 @@ static inline int __pcilg_mio_inuser(
"2: ahi %[shift],-8\n"
" srlg %[tmp],%[val],0(%[shift])\n"
"3: stc %[tmp],0(%[dst])\n"
- " aghi %[dst],1\n"
+ "5: aghi %[dst],1\n"
" brctg %[cnt],2b\n"
"4: sacf 768\n"
- EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
+ EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
:
[cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
@@ -250,9 +250,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
old_fs = enable_sacf_uaccess();
while (n > 0) {
- size = zpci_get_max_write_size((u64 __force) src,
- (u64 __force) dst, n,
- ZPCI_MAX_READ_SIZE);
+ size = zpci_get_max_io_size((u64 __force) src,
+ (u64 __force) dst, n,
+ ZPCI_MAX_READ_SIZE);
rc = __pcilg_mio_inuser(dst, src, size, &status);
if (rc)
break;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index f356ee674d89..671897a680ca 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -2,6 +2,7 @@
config SUPERH
def_bool y
select ARCH_HAS_BINFMT_FLAT if !MMU
+ select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_MIGHT_HAVE_PC_PARPORT
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 71acd3d9b9e8..dfc784f89797 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -26,6 +26,17 @@ config STACK_DEBUG
every function call and will therefore incur a major
performance hit. Most users should say N.
+config EARLY_PRINTK
+ bool "Early printk"
+ depends on SH_STANDARD_BIOS
+ help
+ Say Y here to redirect kernel printk messages to the serial port
+ used by the SH-IPL bootloader, starting very early in the boot
+ process and ending when the kernel's serial console is initialised.
+ This option is only useful while porting the kernel to a new machine,
+ when the kernel may crash or hang before the serial console is
+ initialised. If unsure, say N.
+
config 4KSTACKS
bool "Use 4Kb for kernel stacks instead of 8Kb"
depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 665cad452798..a80e2369f42b 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -529,7 +529,7 @@ static int __init ap325rxa_devices_setup(void)
device_initialize(&ap325rxa_ceu_device.dev);
dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&ap325rxa_ceu_device);
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index acaa97459531..3286afe2ea3d 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -1442,15 +1442,13 @@ static int __init arch_setup(void)
device_initialize(&ecovec_ceu_devices[0]->dev);
dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
- ceu0_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ecovec_ceu_devices[0]);
device_initialize(&ecovec_ceu_devices[1]->dev);
dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
- ceu1_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ecovec_ceu_devices[1]);
gpiod_add_lookup_table(&cn12_power_gpiod_table);
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 96538ba3aa32..90b876194124 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -603,7 +603,7 @@ static int __init kfr2r09_devices_setup(void)
device_initialize(&kfr2r09_ceu_device.dev);
dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&kfr2r09_ceu_device);
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 9ed369dad62d..8598290932ea 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -604,7 +604,7 @@ static int __init migor_devices_setup(void)
device_initialize(&migor_ceu_device.dev);
dma_declare_coherent_memory(&migor_ceu_device.dev,
ceu_dma_membase, ceu_dma_membase,
- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(&migor_ceu_device);
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 32f5dd944889..9e7b7cac36dc 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -939,15 +939,13 @@ static int __init devices_setup(void)
device_initialize(&ms7724se_ceu_devices[0]->dev);
dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
ceu0_dma_membase, ceu0_dma_membase,
- ceu0_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ms7724se_ceu_devices[0]);
device_initialize(&ms7724se_ceu_devices[1]->dev);
dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
ceu1_dma_membase, ceu1_dma_membase,
- ceu1_dma_membase +
- CEU_BUFFER_MEMORY_SIZE - 1);
+ CEU_BUFFER_MEMORY_SIZE);
platform_device_add(ms7724se_ceu_devices[1]);
return platform_add_devices(ms7724se_devices,
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 96c626c2cd0a..306fba1564e5 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -19,6 +19,18 @@
#include <cpu/dma.h>
/*
+ * Some of the SoCs feature two DMAC modules. In such a case, the channels are
+ * distributed equally among them.
+ */
+#ifdef SH_DMAC_BASE1
+#define SH_DMAC_NR_MD_CH (CONFIG_NR_ONCHIP_DMA_CHANNELS / 2)
+#else
+#define SH_DMAC_NR_MD_CH CONFIG_NR_ONCHIP_DMA_CHANNELS
+#endif
+
+#define SH_DMAC_CH_SZ 0x10
+
+/*
* Define the default configuration for dual address memory-memory transfer.
* The 0x400 value represents auto-request, external->external.
*/
@@ -29,7 +41,7 @@ static unsigned long dma_find_base(unsigned int chan)
unsigned long base = SH_DMAC_BASE0;
#ifdef SH_DMAC_BASE1
- if (chan >= 6)
+ if (chan >= SH_DMAC_NR_MD_CH)
base = SH_DMAC_BASE1;
#endif
@@ -40,13 +52,13 @@ static unsigned long dma_base_addr(unsigned int chan)
{
unsigned long base = dma_find_base(chan);
- /* Normalize offset calculation */
- if (chan >= 9)
- chan -= 6;
- if (chan >= 4)
- base += 0x10;
+ chan = (chan % SH_DMAC_NR_MD_CH) * SH_DMAC_CH_SZ;
+
+ /* DMAOR is placed inside the channel register space. Step over it. */
+ if (chan >= DMAOR)
+ base += SH_DMAC_CH_SZ;
- return base + (chan * 0x10);
+ return base + chan;
}
#ifdef CONFIG_SH_DMA_IRQ_MULTI
@@ -250,12 +262,11 @@ static int sh_dmac_get_dma_residue(struct dma_channel *chan)
#define NR_DMAOR 1
#endif
-/*
- * DMAOR bases are broken out amongst channel groups. DMAOR0 manages
- * channels 0 - 5, DMAOR1 6 - 11 (optional).
- */
-#define dmaor_read_reg(n) __raw_readw(dma_find_base((n)*6))
-#define dmaor_write_reg(n, data) __raw_writew(data, dma_find_base(n)*6)
+#define dmaor_read_reg(n) __raw_readw(dma_find_base((n) * \
+ SH_DMAC_NR_MD_CH) + DMAOR)
+#define dmaor_write_reg(n, data) __raw_writew(data, \
+ dma_find_base((n) * \
+ SH_DMAC_NR_MD_CH) + DMAOR)
static inline int dmaor_reset(int no)
{
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h
deleted file mode 100644
index 030df56bfdb2..000000000000
--- a/arch/sh/include/asm/bugs.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_BUGS_H
-#define __ASM_SH_BUGS_H
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-/*
- * I don't know of any Super-H bugs yet.
- */
-
-#include <asm/processor.h>
-
-extern void select_idle_routine(void);
-
-static void __init check_bugs(void)
-{
- extern unsigned long loops_per_jiffy;
- char *p = &init_utsname()->machine[2]; /* "sh" */
-
- select_idle_routine();
-
- current_cpu_data.loops_per_jiffy = loops_per_jiffy;
-
- switch (current_cpu_data.family) {
- case CPU_FAMILY_SH2:
- *p++ = '2';
- break;
- case CPU_FAMILY_SH2A:
- *p++ = '2';
- *p++ = 'a';
- break;
- case CPU_FAMILY_SH3:
- *p++ = '3';
- break;
- case CPU_FAMILY_SH4:
- *p++ = '4';
- break;
- case CPU_FAMILY_SH4A:
- *p++ = '4';
- *p++ = 'a';
- break;
- case CPU_FAMILY_SH4AL_DSP:
- *p++ = '4';
- *p++ = 'a';
- *p++ = 'l';
- *p++ = '-';
- *p++ = 'd';
- *p++ = 's';
- *p++ = 'p';
- break;
- case CPU_FAMILY_SH5:
- *p++ = '6';
- *p++ = '4';
- break;
- case CPU_FAMILY_UNKNOWN:
- /*
- * Specifically use CPU_FAMILY_UNKNOWN rather than
- * default:, so we're able to have the compiler whine
- * about unhandled enumerations.
- */
- break;
- }
-
- printk("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
-
-#ifndef __LITTLE_ENDIAN__
- /* 'eb' means 'Endian Big' */
- *p++ = 'e';
- *p++ = 'b';
-#endif
- *p = '\0';
-}
-#endif /* __ASM_SH_BUGS_H */
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 6fbf8c80e498..386786b1594a 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -173,6 +173,8 @@ extern unsigned int instruction_size(unsigned int insn);
#define instruction_size(insn) (4)
#endif
+void select_idle_routine(void);
+
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_SUPERH32
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 0e0ecc0132e3..58ae979798fa 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -51,6 +51,7 @@
#define SR_FD 0x00008000
#define SR_MD 0x40000000
+#define SR_USER_MASK 0x00000303 // M, Q, S, T bits
/*
* DSP structure and data
*/
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 8edb824049b9..0cb0ca149ac3 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -4,7 +4,7 @@
#include <asm-generic/sections.h>
-extern long __machvec_start, __machvec_end;
+extern char __machvec_start[], __machvec_end[];
extern char __uncached_start, __uncached_end;
extern char __start_eh_frame[], __stop_eh_frame[];
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index d342ea08843f..70a07f4f2142 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -21,7 +21,7 @@ static int __init scan_cache(unsigned long node, const char *uname,
if (!of_flat_dt_is_compatible(node, "jcore,cache"))
return 0;
- j2_ccr_base = (u32 __iomem *)of_flat_dt_translate_address(node);
+ j2_ccr_base = ioremap(of_flat_dt_translate_address(node), 4);
return 1;
}
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 934ff84844fa..c633fe4d7039 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -380,7 +380,7 @@ static int __init sq_api_init(void)
if (unlikely(!sq_cache))
return ret;
- sq_bitmap = kzalloc(size, GFP_KERNEL);
+ sq_bitmap = kcalloc(size, sizeof(long), GFP_KERNEL);
if (unlikely(!sq_bitmap))
goto out;
diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c
index b17514619b7e..eeb25a4fa55f 100644
--- a/arch/sh/kernel/dma-coherent.c
+++ b/arch/sh/kernel/dma-coherent.c
@@ -25,7 +25,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* Pages from the page allocator may have data present in
* cache. So flush the cache before using uncached memory.
*/
- arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
+ arch_sync_dma_for_device(virt_to_phys(ret), size,
DMA_BIDIRECTIONAL);
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
@@ -59,8 +59,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
iounmap(vaddr);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 4adbd4ade319..b603b7968b38 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -64,7 +64,7 @@ ENTRY(_stext)
ldc r0, r6_bank
#endif
-#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_OF_EARLY_FLATTREE
mov r4, r12 ! Store device tree blob pointer in r12
#endif
@@ -315,7 +315,7 @@ ENTRY(_stext)
10:
#endif
-#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_OF_EARLY_FLATTREE
mov.l 8f, r0 ! Make flat device tree available early.
jsr @r0
mov r12, r4
@@ -346,7 +346,7 @@ ENTRY(stack_start)
5: .long start_kernel
6: .long cpu_init
7: .long init_thread_union
-#if defined(CONFIG_OF_FLATTREE)
+#if defined(CONFIG_OF_EARLY_FLATTREE)
8: .long sh_fdt_init
#endif
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index c20fc5487e05..bcb8eabd7618 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -15,6 +15,7 @@
#include <linux/smp.h>
#include <linux/atomic.h>
#include <asm/pgalloc.h>
+#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/bl_bit.h>
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index beadbbdb4486..3e0a4306f1d5 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -19,8 +19,8 @@
#define MV_NAME_SIZE 32
#define for_each_mv(mv) \
- for ((mv) = (struct sh_machine_vector *)&__machvec_start; \
- (mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \
+ for ((mv) = (struct sh_machine_vector *)__machvec_start; \
+ (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \
(mv)++)
static struct sh_machine_vector * __init get_mv_byname(const char *name)
@@ -86,8 +86,8 @@ void __init sh_mv_setup(void)
if (!machvec_selected) {
unsigned long machvec_size;
- machvec_size = ((unsigned long)&__machvec_end -
- (unsigned long)&__machvec_start);
+ machvec_size = ((unsigned long)__machvec_end -
+ (unsigned long)__machvec_start);
/*
* Sanity check for machvec section alignment. Ensure
@@ -101,7 +101,7 @@ void __init sh_mv_setup(void)
* vector (usually the only one) from .machvec.init.
*/
if (machvec_size >= sizeof(struct sh_machine_vector))
- sh_mv = *(struct sh_machine_vector *)&__machvec_start;
+ sh_mv = *(struct sh_machine_vector *)__machvec_start;
}
printk(KERN_NOTICE "Booting machvec: %s\n", get_system_type());
diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c
index 11777867c6f5..a212b645b4cf 100644
--- a/arch/sh/kernel/nmi_debug.c
+++ b/arch/sh/kernel/nmi_debug.c
@@ -49,7 +49,7 @@ static int __init nmi_debug_setup(char *str)
register_die_notifier(&nmi_debug_nb);
if (*str != '=')
- return 0;
+ return 1;
for (p = str + 1; *p; p = sep + 1) {
sep = strchr(p, ',');
@@ -70,6 +70,6 @@ static int __init nmi_debug_setup(char *str)
break;
}
- return 0;
+ return 1;
}
__setup("nmi_debug", nmi_debug_setup);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 2c0e0f37a318..2221e057e6b4 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -43,6 +43,7 @@
#include <asm/smp.h>
#include <asm/mmu_context.h>
#include <asm/mmzone.h>
+#include <asm/processor.h>
#include <asm/sparsemem.h>
/*
@@ -243,7 +244,7 @@ void __init __weak plat_early_device_setup(void)
{
}
-#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_OF_EARLY_FLATTREE
void __ref sh_fdt_init(phys_addr_t dt_phys)
{
static int done = 0;
@@ -330,7 +331,7 @@ void __init setup_arch(char **cmdline_p)
/* Let earlyprintk output early console messages */
early_platform_driver_probe("earlyprintk", 1, 1);
-#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_OF_EARLY_FLATTREE
#ifdef CONFIG_USE_BUILTIN_DTB
unflatten_and_copy_device_tree();
#else
@@ -362,3 +363,57 @@ int test_mode_pin(int pin)
{
return sh_mv.mv_mode_pins() & pin;
}
+
+void __init arch_cpu_finalize_init(void)
+{
+ char *p = &init_utsname()->machine[2]; /* "sh" */
+
+ select_idle_routine();
+
+ current_cpu_data.loops_per_jiffy = loops_per_jiffy;
+
+ switch (current_cpu_data.family) {
+ case CPU_FAMILY_SH2:
+ *p++ = '2';
+ break;
+ case CPU_FAMILY_SH2A:
+ *p++ = '2';
+ *p++ = 'a';
+ break;
+ case CPU_FAMILY_SH3:
+ *p++ = '3';
+ break;
+ case CPU_FAMILY_SH4:
+ *p++ = '4';
+ break;
+ case CPU_FAMILY_SH4A:
+ *p++ = '4';
+ *p++ = 'a';
+ break;
+ case CPU_FAMILY_SH4AL_DSP:
+ *p++ = '4';
+ *p++ = 'a';
+ *p++ = 'l';
+ *p++ = '-';
+ *p++ = 'd';
+ *p++ = 's';
+ *p++ = 'p';
+ break;
+ case CPU_FAMILY_UNKNOWN:
+ /*
+ * Specifically use CPU_FAMILY_UNKNOWN rather than
+ * default:, so we're able to have the compiler whine
+ * about unhandled enumerations.
+ */
+ break;
+ }
+
+ pr_info("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
+
+#ifndef __LITTLE_ENDIAN__
+ /* 'eb' means 'Endian Big' */
+ *p++ = 'e';
+ *p++ = 'b';
+#endif
+ *p = '\0';
+}
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 24473fa6c3b6..f6e1a47ad7ca 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -116,6 +116,7 @@ static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
{
unsigned int err = 0;
+ unsigned int sr = regs->sr & ~SR_USER_MASK;
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(regs[1]);
@@ -131,6 +132,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
COPY(sr); COPY(pc);
#undef COPY
+ regs->sr = (regs->sr & SR_USER_MASK) | sr;
+
#ifdef CONFIG_SH_FPU
if (boot_cpu_data.flags & CPU_HAS_FPU) {
int owned_fp;
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 63cf17bc760d..6a228c00b73f 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -57,7 +57,7 @@ void die(const char *str, struct pt_regs *regs, long err)
if (panic_on_oops)
panic("Fatal exception");
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
void die_if_kernel(const char *str, struct pt_regs *regs, long err)
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 77a59d8c6b4d..ec3bae172b20 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -10,6 +10,7 @@ OUTPUT_ARCH(sh:sh5)
#define LOAD_OFFSET 0
OUTPUT_ARCH(sh)
#endif
+#define RUNTIME_DISCARD_EXIT
#include <asm/thread_info.h>
#include <asm/cache.h>
diff --git a/arch/sh/math-emu/sfp-util.h b/arch/sh/math-emu/sfp-util.h
index 784f541344f3..bda50762b3d3 100644
--- a/arch/sh/math-emu/sfp-util.h
+++ b/arch/sh/math-emu/sfp-util.h
@@ -67,7 +67,3 @@
} while (0)
#define abort() return 0
-
-#define __BYTE_ORDER __LITTLE_ENDIAN
-
-
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 349e27771cea..881f6a849148 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -52,6 +52,7 @@ config SPARC
config SPARC32
def_bool !64BIT
select ARCH_32BIT_OFF_T
+ select ARCH_HAS_CPU_FINALIZE_INIT if !SMP
select ARCH_HAS_SYNC_DMA_FOR_CPU
select GENERIC_ATOMIC64
select CLZ_TAB
@@ -321,7 +322,7 @@ config FORCE_MAX_ZONEORDER
This config option is actually maximum order plus one. For example,
a value of 13 means that the largest free memory block is 2^12 pages.
-if SPARC64
+if SPARC64 || COMPILE_TEST
source "kernel/power/Kconfig"
endif
diff --git a/arch/sparc/include/asm/bugs.h b/arch/sparc/include/asm/bugs.h
deleted file mode 100644
index 02fa369b9c21..000000000000
--- a/arch/sparc/include/asm/bugs.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* include/asm/bugs.h: Sparc probes for various bugs.
- *
- * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
- */
-
-#ifdef CONFIG_SPARC32
-#include <asm/cpudata.h>
-#endif
-
-extern unsigned long loops_per_jiffy;
-
-static void __init check_bugs(void)
-{
-#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
- cpu_data(0).udelay_val = loops_per_jiffy;
-#endif
-}
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index b87e0002131d..9d723c58557b 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -368,8 +368,8 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
/* IIep is write-through, not flushing on cpu to device transfer. */
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE)
dma_make_coherent(paddr, PAGE_ALIGN(size));
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index e6935d0ac1ec..c32590bdd312 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -696,7 +696,7 @@ err1:
return err;
}
-static const struct of_device_id grpci1_of_match[] __initconst = {
+static const struct of_device_id grpci1_of_match[] = {
{
.name = "GAISLER_PCIFBRG",
},
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index ca22f93d9045..dd06abc61657 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -887,7 +887,7 @@ err1:
return err;
}
-static const struct of_device_id grpci2_of_match[] __initconst = {
+static const struct of_device_id grpci2_of_match[] = {
{
.name = "GAISLER_GRPCI2",
},
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index afe1592a6d08..4373c1d64ab8 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -422,3 +422,10 @@ static int __init topology_init(void)
}
subsys_initcall(topology_init);
+
+#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
+void __init arch_cpu_finalize_init(void)
+{
+ cpu_data(0).udelay_val = loops_per_jiffy;
+}
+#endif
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 4ceecad556a9..dbf068ac54ff 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
printk("Instruction DUMP:");
instruction_dump ((unsigned long *) regs->pc);
- if(regs->psr & PSR_PS)
- do_exit(SIGKILL);
- do_exit(SIGSEGV);
+ make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
}
void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index f2b22c496fb9..17768680cbae 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2564,9 +2564,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
if (panic_on_oops)
panic("Fatal exception");
- if (regs->tstate & TSTATE_PRIV)
- do_exit(SIGKILL);
- do_exit(SIGSEGV);
+ make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
}
EXPORT_SYMBOL(die_if_kernel);
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index c56d3526a3bd..107a0bc668e9 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -5,6 +5,7 @@ menu "UML-specific options"
config UML
bool
default y
+ select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_KCOV
select ARCH_NO_PREEMPT
select HAVE_ARCH_AUDITSYSCALL
@@ -84,6 +85,19 @@ config LD_SCRIPT_DYN
depends on !LD_SCRIPT_STATIC
select MODULE_REL_CRCS if MODVERSIONS
+config LD_SCRIPT_DYN_RPATH
+ bool "set rpath in the binary" if EXPERT
+ default y
+ depends on LD_SCRIPT_DYN
+ help
+ Add /lib (and /lib64 for 64-bit) to the linux binary's rpath
+ explicitly.
+
+ You may need to turn this off if compiling for nix systems
+ that have their libraries in random /nix directories and
+ might otherwise unexpected use libraries from /lib or /lib64
+ instead of the desired ones.
+
config HOSTFS
tristate "Host filesystem"
help
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 275f5ffdf6f0..94cea8d46b22 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -118,7 +118,8 @@ archprepare:
$(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
+LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
+LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
$(call cc-option, -fno-stack-protector,) \
@@ -132,10 +133,18 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
# The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
+# Avoid binutils 2.39+ warnings by marking the stack non-executable and
+# ignorning warnings for the kallsyms sections.
+LDFLAGS_EXECSTACK = -z noexecstack
+ifeq ($(CONFIG_LD_IS_BFD),y)
+LDFLAGS_EXECSTACK += $(call ld-option,--no-warn-rwx-segments)
+endif
+
LD_FLAGS_CMDLINE = $(foreach opt,$(KBUILD_LDFLAGS),-Wl,$(opt))
# Used by link-vmlinux.sh which has special support for um link
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
+export LDFLAGS_vmlinux := $(LDFLAGS_EXECSTACK)
# When cleaning we don't include .config, so we don't include
# TT or skas makefiles and don't clean skas_ptregs.h.
diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig
index 73e98bb57bf5..4229ac9165e8 100644
--- a/arch/um/configs/i386_defconfig
+++ b/arch/um/configs/i386_defconfig
@@ -35,6 +35,7 @@ CONFIG_TTY_CHAN=y
CONFIG_XTERM_CHAN=y
CONFIG_CON_CHAN="pts"
CONFIG_SSL_CHAN="pts"
+CONFIG_SOUND=m
CONFIG_UML_SOUND=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig
index 3281d7600225..f6993a006727 100644
--- a/arch/um/configs/x86_64_defconfig
+++ b/arch/um/configs/x86_64_defconfig
@@ -33,6 +33,7 @@ CONFIG_TTY_CHAN=y
CONFIG_XTERM_CHAN=y
CONFIG_CON_CHAN="pts"
CONFIG_SSL_CHAN="pts"
+CONFIG_SOUND=m
CONFIG_UML_SOUND=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index 388096fb45a2..12f54a4a3747 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -104,24 +104,14 @@ config SSL_CHAN
config UML_SOUND
tristate "Sound support"
+ depends on SOUND
+ select SOUND_OSS_CORE
help
This option enables UML sound support. If enabled, it will pull in
- soundcore and the UML hostaudio relay, which acts as a intermediary
+ the UML hostaudio relay, which acts as a intermediary
between the host's dsp and mixer devices and the UML sound system.
It is safe to say 'Y' here.
-config SOUND
- tristate
- default UML_SOUND
-
-config SOUND_OSS_CORE
- bool
- default UML_SOUND
-
-config HOSTAUDIO
- tristate
- default UML_SOUND
-
endmenu
menu "UML Network Devices"
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index a290821e355c..4d7fb606a5f0 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_UML_NET) += net.o
obj-$(CONFIG_MCONSOLE) += mconsole.o
obj-$(CONFIG_MMAPPER) += mmapper_kern.o
obj-$(CONFIG_BLK_DEV_UBD) += ubd.o
-obj-$(CONFIG_HOSTAUDIO) += hostaudio.o
+obj-$(CONFIG_UML_SOUND) += hostaudio.o
obj-$(CONFIG_NULL_CHAN) += null.o
obj-$(CONFIG_PORT_CHAN) += port.o
obj-$(CONFIG_PTY_CHAN) += pty.o
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 327b728f7244..db15a456482f 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -204,7 +204,7 @@ static int uml_net_close(struct net_device *dev)
return 0;
}
-static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct uml_net_private *lp = netdev_priv(dev);
unsigned long flags;
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 769ffbd9e9a6..eb9dba8dcf95 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -746,6 +746,7 @@ static int vector_config(char *str, char **error_out)
if (parsed == NULL) {
*error_out = "vector_config failed to parse parameters";
+ kfree(params);
return -EINVAL;
}
diff --git a/arch/um/include/asm/bugs.h b/arch/um/include/asm/bugs.h
deleted file mode 100644
index 4473942a0839..000000000000
--- a/arch/um/include/asm/bugs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __UM_BUGS_H
-#define __UM_BUGS_H
-
-void check_bugs(void);
-
-#endif
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index ccafb62e8cce..42dc0e47d3ad 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -49,7 +49,7 @@ extern void do_uml_exitcalls(void);
* Are we disallowed to sleep? Used to choose between GFP_KERNEL and
* GFP_ATOMIC.
*/
-extern int __cant_sleep(void);
+extern int __uml_cant_sleep(void);
extern int get_current_pid(void);
extern int copy_from_user_proc(void *to, void *from, int size);
extern int cpu(void);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index d71dd7725bef..f185d19fd9b6 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -258,7 +258,7 @@ void arch_cpu_idle(void)
local_irq_enable();
}
-int __cant_sleep(void) {
+int __uml_cant_sleep(void) {
return in_atomic() || irqs_disabled() || in_interrupt();
/* Is in_interrupt() really needed? */
}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 0f40eccbd759..004ef4ebb57d 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -3,6 +3,7 @@
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -77,7 +78,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+ return *pos < nr_cpu_ids ? cpu_data + *pos : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
@@ -353,7 +354,7 @@ void __init setup_arch(char **cmdline_p)
setup_hostinfo(host_info, sizeof host_info);
}
-void __init check_bugs(void)
+void __init arch_cpu_finalize_init(void)
{
arch_check_bugs();
os_check_bugs();
diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S
index 16e49bfa2b42..53d719c04ba9 100644
--- a/arch/um/kernel/vmlinux.lds.S
+++ b/arch/um/kernel/vmlinux.lds.S
@@ -1,4 +1,4 @@
-
+#define RUNTIME_DISCARD_EXIT
KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER);
#ifdef CONFIG_LD_SCRIPT_STATIC
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 9fa6e4187d4f..57a27555092f 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -45,7 +45,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
unsigned long stack, sp;
int pid, fds[2], ret, n;
- stack = alloc_stack(0, __cant_sleep());
+ stack = alloc_stack(0, __uml_cant_sleep());
if (stack == 0)
return -ENOMEM;
@@ -69,7 +69,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
data.pre_data = pre_data;
data.argv = argv;
data.fd = fds[1];
- data.buf = __cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
+ data.buf = __uml_cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
uml_kmalloc(PATH_MAX, UM_GFP_KERNEL);
pid = clone(helper_child, (void *) sp, CLONE_VM, &data);
if (pid < 0) {
@@ -116,7 +116,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
unsigned long stack, sp;
int pid, status, err;
- stack = alloc_stack(0, __cant_sleep());
+ stack = alloc_stack(0, __uml_cant_sleep());
if (stack == 0)
return -ENOMEM;
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 4fb877b99dde..0571cc0a30fc 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -5,6 +5,7 @@
*/
#include <stdlib.h>
+#include <stdbool.h>
#include <unistd.h>
#include <sched.h>
#include <errno.h>
@@ -641,10 +642,24 @@ void halt_skas(void)
UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
}
+static bool noreboot;
+
+static int __init noreboot_cmd_param(char *str, int *add)
+{
+ noreboot = true;
+ return 0;
+}
+
+__uml_setup("noreboot", noreboot_cmd_param,
+"noreboot\n"
+" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
+" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
+" crashes in CI\n");
+
void reboot_skas(void)
{
block_signals_trace();
- UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
+ UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
}
void __switch_mm(struct mm_id *mm_idp)
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
index ecf2f390fad2..b76ac4df5da5 100644
--- a/arch/um/os-Linux/util.c
+++ b/arch/um/os-Linux/util.c
@@ -166,23 +166,38 @@ __uml_setup("quiet", quiet_cmd_param,
"quiet\n"
" Turns off information messages during boot.\n\n");
+/*
+ * The os_info/os_warn functions will be called by helper threads. These
+ * have a very limited stack size and using the libc formatting functions
+ * may overflow the stack.
+ * So pull in the kernel vscnprintf and use that instead with a fixed
+ * on-stack buffer.
+ */
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+
void os_info(const char *fmt, ...)
{
+ char buf[256];
va_list list;
+ int len;
if (quiet_info)
return;
va_start(list, fmt);
- vfprintf(stderr, fmt, list);
+ len = vscnprintf(buf, sizeof(buf), fmt, list);
+ fwrite(buf, len, 1, stderr);
va_end(list);
}
void os_warn(const char *fmt, ...)
{
+ char buf[256];
va_list list;
+ int len;
va_start(list, fmt);
- vfprintf(stderr, fmt, list);
+ len = vscnprintf(buf, sizeof(buf), fmt, list);
+ fwrite(buf, len, 1, stderr);
va_end(list);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6002252692af..df0a3a1b08ae 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -60,6 +60,7 @@ config X86
select ARCH_CLOCKSOURCE_DATA
select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
@@ -2500,6 +2501,25 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y
depends on X86_64 || X86_PAE
+config GDS_FORCE_MITIGATION
+ bool "Force GDS Mitigation"
+ depends on CPU_SUP_INTEL
+ default n
+ help
+ Gather Data Sampling (GDS) is a hardware vulnerability which allows
+ unprivileged speculative access to data which was previously stored in
+ vector registers.
+
+ This option is equivalent to setting gather_data_sampling=force on the
+ command line. The microcode mitigation is used if present, otherwise
+ AVX is disabled as a mitigation. On affected systems that are missing
+ the microcode any userspace code that unconditionally uses AVX will
+ break with this option set.
+
+ Setting this option on systems not vulnerable to GDS has no effect.
+
+ If in doubt, say N.
+
config ARCH_ENABLE_HUGEPAGE_MIGRATION
def_bool y
depends on X86_64 && HUGETLB_PAGE && MIGRATION
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 8e29c991ba3e..042bdf1ed653 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -372,7 +372,7 @@ config X86_CMOV
config X86_MINIMUM_CPU_FAMILY
int
default "64" if X86_64
- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
+ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
default "5" if X86_32 && X86_CMPXCHG64
default "4"
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 1db7913795f5..b3c1ae084180 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -44,7 +44,7 @@ ELF_FORMAT := elf64-x86-64
# Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example.
-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
+LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib64
LINK-y += -m64
endif
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 6539c50fb9aa..82500962f1b3 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
AFLAGS_header.o += -I$(objtree)/$(obj)
$(obj)/header.o: $(obj)/zoffset.h
-LDFLAGS_setup.elf := -m elf_i386 -T
+LDFLAGS_setup.elf := -m elf_i386 -z noexecstack -T
$(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
$(call if_changed,ld)
diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S
index 5521ea12f44e..aa9b96457584 100644
--- a/arch/x86/boot/bioscall.S
+++ b/arch/x86/boot/bioscall.S
@@ -32,7 +32,7 @@ intcall:
movw %dx, %si
movw %sp, %di
movw $11, %cx
- rep; movsd
+ rep; movsl
/* Pop full state from the stack */
popal
@@ -67,7 +67,7 @@ intcall:
jz 4f
movw %sp, %si
movw $11, %cx
- rep; movsd
+ rep; movsl
4: addw $44, %sp
/* Restore state and return */
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index ca866f1cca2e..4d79391bb787 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -110,66 +110,78 @@ typedef unsigned int addr_t;
static inline u8 rdfs8(addr_t addr)
{
+ u8 *ptr = (u8 *)absolute_pointer(addr);
u8 v;
- asm volatile("movb %%fs:%1,%0" : "=q" (v) : "m" (*(u8 *)addr));
+ asm volatile("movb %%fs:%1,%0" : "=q" (v) : "m" (*ptr));
return v;
}
static inline u16 rdfs16(addr_t addr)
{
+ u16 *ptr = (u16 *)absolute_pointer(addr);
u16 v;
- asm volatile("movw %%fs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr));
+ asm volatile("movw %%fs:%1,%0" : "=r" (v) : "m" (*ptr));
return v;
}
static inline u32 rdfs32(addr_t addr)
{
+ u32 *ptr = (u32 *)absolute_pointer(addr);
u32 v;
- asm volatile("movl %%fs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr));
+ asm volatile("movl %%fs:%1,%0" : "=r" (v) : "m" (*ptr));
return v;
}
static inline void wrfs8(u8 v, addr_t addr)
{
- asm volatile("movb %1,%%fs:%0" : "+m" (*(u8 *)addr) : "qi" (v));
+ u8 *ptr = (u8 *)absolute_pointer(addr);
+ asm volatile("movb %1,%%fs:%0" : "+m" (*ptr) : "qi" (v));
}
static inline void wrfs16(u16 v, addr_t addr)
{
- asm volatile("movw %1,%%fs:%0" : "+m" (*(u16 *)addr) : "ri" (v));
+ u16 *ptr = (u16 *)absolute_pointer(addr);
+ asm volatile("movw %1,%%fs:%0" : "+m" (*ptr) : "ri" (v));
}
static inline void wrfs32(u32 v, addr_t addr)
{
- asm volatile("movl %1,%%fs:%0" : "+m" (*(u32 *)addr) : "ri" (v));
+ u32 *ptr = (u32 *)absolute_pointer(addr);
+ asm volatile("movl %1,%%fs:%0" : "+m" (*ptr) : "ri" (v));
}
static inline u8 rdgs8(addr_t addr)
{
+ u8 *ptr = (u8 *)absolute_pointer(addr);
u8 v;
- asm volatile("movb %%gs:%1,%0" : "=q" (v) : "m" (*(u8 *)addr));
+ asm volatile("movb %%gs:%1,%0" : "=q" (v) : "m" (*ptr));
return v;
}
static inline u16 rdgs16(addr_t addr)
{
+ u16 *ptr = (u16 *)absolute_pointer(addr);
u16 v;
- asm volatile("movw %%gs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr));
+ asm volatile("movw %%gs:%1,%0" : "=r" (v) : "m" (*ptr));
return v;
}
static inline u32 rdgs32(addr_t addr)
{
+ u32 *ptr = (u32 *)absolute_pointer(addr);
u32 v;
- asm volatile("movl %%gs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr));
+ asm volatile("movl %%gs:%1,%0" : "=r" (v) : "m" (*ptr));
return v;
}
static inline void wrgs8(u8 v, addr_t addr)
{
- asm volatile("movb %1,%%gs:%0" : "+m" (*(u8 *)addr) : "qi" (v));
+ u8 *ptr = (u8 *)absolute_pointer(addr);
+ asm volatile("movb %1,%%gs:%0" : "+m" (*ptr) : "qi" (v));
}
static inline void wrgs16(u16 v, addr_t addr)
{
- asm volatile("movw %1,%%gs:%0" : "+m" (*(u16 *)addr) : "ri" (v));
+ u16 *ptr = (u16 *)absolute_pointer(addr);
+ asm volatile("movw %1,%%gs:%0" : "+m" (*ptr) : "ri" (v));
}
static inline void wrgs32(u32 v, addr_t addr)
{
- asm volatile("movl %1,%%gs:%0" : "+m" (*(u32 *)addr) : "ri" (v));
+ u32 *ptr = (u32 *)absolute_pointer(addr);
+ asm volatile("movl %1,%%gs:%0" : "+m" (*ptr) : "ri" (v));
}
/* Note: these only return true/false, not a signed return value! */
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 3db9261da0bc..4f15f3fbb2e4 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -58,6 +58,10 @@ else
KBUILD_LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
endif
+
+KBUILD_LDFLAGS += -z noexecstack
+KBUILD_LDFLAGS += $(call ld-option,--no-warn-rwx-segments)
+
LDFLAGS_vmlinux := -T
hostprogs-y := mkpiggy
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index d7c0fcc1dbf9..b788b986f335 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -210,7 +210,7 @@ ENDPROC(efi32_stub_entry)
#endif
.text
-.Lrelocated:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
/*
* Clear BSS (stack is currently empty)
@@ -261,6 +261,7 @@ ENDPROC(efi32_stub_entry)
*/
xorl %ebx, %ebx
jmp *%eax
+SYM_FUNC_END(.Lrelocated)
#ifdef CONFIG_EFI_STUB
.data
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 50c9eeb36f0d..d8164e6abaaf 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -381,11 +381,25 @@ ENTRY(startup_64)
/* Save the trampoline address in RCX */
movq %rax, %rcx
+ /* Set up 32-bit addressable stack */
+ leaq TRAMPOLINE_32BIT_STACK_END(%rcx), %rsp
+
+ /*
+ * Preserve live 64-bit registers on the stack: this is necessary
+ * because the architecture does not guarantee that GPRs will retain
+ * their full 64-bit values across a 32-bit mode switch.
+ */
+ pushq %rbp
+ pushq %rbx
+ pushq %rsi
+
/*
- * Load the address of trampoline_return() into RDI.
- * It will be used by the trampoline to return to the main code.
+ * Push the 64-bit address of trampoline_return() onto the new stack.
+ * It will be used by the trampoline to return to the main code. Due to
+ * the 32-bit mode switch, it cannot be kept it in a register either.
*/
leaq trampoline_return(%rip), %rdi
+ pushq %rdi
/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
pushq $__KERNEL32_CS
@@ -393,6 +407,11 @@ ENTRY(startup_64)
pushq %rax
lretq
trampoline_return:
+ /* Restore live 64-bit registers */
+ popq %rsi
+ popq %rbx
+ popq %rbp
+
/* Restore the stack, the 32-bit trampoline uses its own stack */
leaq boot_stack_end(%rbx), %rsp
@@ -517,7 +536,7 @@ ENDPROC(efi64_stub_entry)
#endif
.text
-.Lrelocated:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
/*
* Clear BSS (stack is currently empty)
@@ -546,6 +565,7 @@ ENDPROC(efi64_stub_entry)
* Jump to the decompressed kernel.
*/
jmp *%rax
+SYM_FUNC_END(.Lrelocated)
/*
* Adjust the global offset table
@@ -572,7 +592,7 @@ ENDPROC(efi64_stub_entry)
/*
* This is the 32-bit trampoline that will be copied over to low memory.
*
- * RDI contains the return address (might be above 4G).
+ * Return address is at the top of the stack (might be above 4G).
* ECX contains the base address of the trampoline memory.
* Non zero RDX means trampoline needs to enable 5-level paging.
*/
@@ -582,9 +602,6 @@ ENTRY(trampoline_32bit_src)
movl %eax, %ds
movl %eax, %ss
- /* Set up new stack */
- leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp
-
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
@@ -641,9 +658,10 @@ ENTRY(trampoline_32bit_src)
lret
.code64
-.Lpaging_enabled:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
/* Return from the trampoline */
- jmp *%rdi
+ retq
+SYM_FUNC_END(.Lpaging_enabled)
/*
* The trampoline code has a size limit.
@@ -653,11 +671,12 @@ ENTRY(trampoline_32bit_src)
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
.code32
-.Lno_longmode:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
1:
hlt
jmp 1b
+SYM_FUNC_END(.Lno_longmode)
#include "../../kernel/verify_cpu.S"
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index e3add857c2c9..c421af5a3cdc 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -33,7 +33,7 @@ static void copy_boot_params(void)
u16 cl_offset;
};
const struct old_cmdline * const oldcmd =
- (const struct old_cmdline *)OLD_CL_ADDRESS;
+ absolute_pointer(OLD_CL_ADDRESS);
BUILD_BUG_ON(sizeof(boot_params) != 4096);
memcpy(&boot_params.hdr, &hdr, sizeof(hdr));
diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S
index c22f9a7d1aeb..81658fe35380 100644
--- a/arch/x86/boot/pmjump.S
+++ b/arch/x86/boot/pmjump.S
@@ -40,13 +40,13 @@ GLOBAL(protected_mode_jump)
# Transition to 32-bit mode
.byte 0x66, 0xea # ljmpl opcode
-2: .long in_pm32 # offset
+2: .long .Lin_pm32 # offset
.word __BOOT_CS # segment
ENDPROC(protected_mode_jump)
.code32
.section ".text32","ax"
-GLOBAL(in_pm32)
+SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32)
# Set up data segments for flat 32-bit mode
movl %ecx, %ds
movl %ecx, %es
@@ -72,4 +72,4 @@ GLOBAL(in_pm32)
lldt %cx
jmpl *%eax # Jump to the 32-bit entrypoint
-ENDPROC(in_pm32)
+SYM_FUNC_END(.Lin_pm32)
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 04d72a5a8ce9..c9864ac9c014 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -19,6 +19,7 @@
#include <crypto/internal/simd.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
+#include <asm/unaligned.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
@@ -54,7 +55,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
- be128 *x = (be128 *)key;
u64 a, b;
if (keylen != GHASH_BLOCK_SIZE) {
@@ -63,8 +63,8 @@ static int ghash_setkey(struct crypto_shash *tfm,
}
/* perform multiplication by 'x' in GF(2^128) */
- a = be64_to_cpu(x->a);
- b = be64_to_cpu(x->b);
+ a = get_unaligned_be64(key);
+ b = get_unaligned_be64(key + 8);
ctx->shash.a = (b << 1) | (a >> 63);
ctx->shash.b = (a << 1) | (b >> 63);
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index b3f121478738..29e5675c6d4f 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -6,6 +6,8 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/msr.h>
+#include <asm/nospec-branch.h>
/*
@@ -146,27 +148,19 @@ For 32-bit we have the following conventions - kernel is built with
.endm
-.macro POP_REGS pop_rdi=1 skip_r11rcx=0
+.macro POP_REGS pop_rdi=1
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
- .if \skip_r11rcx
- popq %rsi
- .else
popq %r11
- .endif
popq %r10
popq %r9
popq %r8
popq %rax
- .if \skip_r11rcx
- popq %rsi
- .else
popq %rcx
- .endif
popq %rdx
popq %rsi
.if \pop_rdi
@@ -317,6 +311,62 @@ For 32-bit we have the following conventions - kernel is built with
#endif
/*
+ * IBRS kernel mitigation for Spectre_v2.
+ *
+ * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
+ * the regs it uses (AX, CX, DX). Must be called before the first RET
+ * instruction (NOTE! UNTRAIN_RET includes a RET instruction)
+ *
+ * The optional argument is used to save/restore the current value,
+ * which is used on the paranoid paths.
+ *
+ * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
+ */
+.macro IBRS_ENTER save_reg
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
+ movl $MSR_IA32_SPEC_CTRL, %ecx
+
+.ifnb \save_reg
+ rdmsr
+ shl $32, %rdx
+ or %rdx, %rax
+ mov %rax, \save_reg
+ test $SPEC_CTRL_IBRS, %eax
+ jz .Ldo_wrmsr_\@
+ lfence
+ jmp .Lend_\@
+.Ldo_wrmsr_\@:
+.endif
+
+ movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+ movl %edx, %eax
+ shr $32, %rdx
+ wrmsr
+.Lend_\@:
+.endm
+
+/*
+ * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
+ * regs. Must be called after the last RET.
+ */
+.macro IBRS_EXIT save_reg
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
+ movl $MSR_IA32_SPEC_CTRL, %ecx
+
+.ifnb \save_reg
+ mov \save_reg, %rdx
+.else
+ movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+ andl $(~SPEC_CTRL_IBRS), %edx
+.endif
+
+ movl %edx, %eax
+ shr $32, %rdx
+ wrmsr
+.Lend_\@:
+.endm
+
+/*
* Mitigate Spectre v1 for conditional swapgs code paths.
*
* FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index bde3e0f85425..740df9cc2196 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -750,7 +750,6 @@ ENTRY(__switch_to_asm)
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif
-#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
@@ -759,7 +758,6 @@ ENTRY(__switch_to_asm)
* speculative execution to prevent attack.
*/
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
-#endif
/* restore callee-saved registers */
popfl
@@ -1661,13 +1659,13 @@ ENTRY(async_page_fault)
END(async_page_fault)
#endif
-ENTRY(rewind_stack_do_exit)
+ENTRY(rewind_stack_and_make_dead)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
- call do_exit
+ call make_task_dead
1: jmp 1b
-END(rewind_stack_do_exit)
+END(rewind_stack_and_make_dead)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 2ba3d53ac5b1..640c7d36c26c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -172,6 +172,10 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
/* IRQs are off. */
movq %rax, %rdi
movq %rsp, %rsi
+
+ /* clobbers %rax, make sure it is after saving the syscall nr */
+ IBRS_ENTER
+
call do_syscall_64 /* returns with IRQs disabled */
TRACE_IRQS_IRETQ /* we're about to change IF */
@@ -248,8 +252,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
* perf profiles. Nothing jumps here.
*/
syscall_return_via_sysret:
- /* rcx and r11 are already restored (see code above) */
- POP_REGS pop_rdi=0 skip_r11rcx=1
+ IBRS_EXIT
+ POP_REGS pop_rdi=0
/*
* Now all regs are restored except RSP and RDI.
@@ -301,7 +305,6 @@ ENTRY(__switch_to_asm)
movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
#endif
-#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
@@ -310,7 +313,6 @@ ENTRY(__switch_to_asm)
* speculative execution to prevent attack.
*/
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
-#endif
/* restore callee-saved registers */
popq %r15
@@ -616,12 +618,13 @@ ret_from_intr:
jz retint_kernel
/* Interrupt came from user space */
-GLOBAL(retint_user)
+.Lretint_user:
mov %rsp,%rdi
call prepare_exit_to_usermode
TRACE_IRQS_IRETQ
GLOBAL(swapgs_restore_regs_and_return_to_usermode)
+ IBRS_EXIT
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates user mode. */
testb $3, CS(%rsp)
@@ -1248,7 +1251,13 @@ ENTRY(paranoid_entry)
*/
FENCE_SWAPGS_KERNEL_ENTRY
- ret
+ /*
+ * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
+ * CR3 above, keep the old value in a callee saved register.
+ */
+ IBRS_ENTER save_reg=%r15
+
+ RET
END(paranoid_entry)
/*
@@ -1276,12 +1285,20 @@ ENTRY(paranoid_exit)
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG
+
+ /*
+ * Must restore IBRS state before both CR3 and %GS since we need access
+ * to the per-CPU x86_spec_ctrl_shadow variable.
+ */
+ IBRS_EXIT save_reg=%r15
+
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
.Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
END(paranoid_exit)
+
/*
* Save all registers in pt_regs, and switch GS if needed.
*/
@@ -1301,6 +1318,7 @@ ENTRY(error_entry)
FENCE_SWAPGS_USER_ENTRY
/* We have user CR3. Change to kernel CR3. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+ IBRS_ENTER
.Lerror_entry_from_usermode_after_swapgs:
/* Put us onto the real thread stack. */
@@ -1356,6 +1374,7 @@ ENTRY(error_entry)
SWAPGS
FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+ IBRS_ENTER
/*
* Pretend that the exception came from user mode: set up pt_regs
@@ -1373,7 +1392,7 @@ ENTRY(error_exit)
TRACE_IRQS_OFF
testb $3, CS(%rsp)
jz retint_kernel
- jmp retint_user
+ jmp .Lretint_user
END(error_exit)
/*
@@ -1461,6 +1480,8 @@ ENTRY(nmi)
PUSH_AND_CLEAR_REGS rdx=(%rdx)
ENCODE_FRAME_POINTER
+ IBRS_ENTER
+
/*
* At this point we no longer need to worry about stack damage
* due to nesting -- we're on the normal thread stack and we're
@@ -1684,6 +1705,9 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi
+ /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
+ IBRS_EXIT save_reg=%r15
+
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
@@ -1733,7 +1757,7 @@ ENTRY(ignore_sysret)
END(ignore_sysret)
#endif
-ENTRY(rewind_stack_do_exit)
+ENTRY(rewind_stack_and_make_dead)
UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
@@ -1742,5 +1766,5 @@ ENTRY(rewind_stack_do_exit)
leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_REGS
- call do_exit
-END(rewind_stack_do_exit)
+ call make_task_dead
+END(rewind_stack_and_make_dead)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 39913770a44d..c3c4ea4a6711 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -4,7 +4,6 @@
*
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
*/
-#include "calling.h"
#include <asm/asm-offsets.h>
#include <asm/current.h>
#include <asm/errno.h>
@@ -17,6 +16,8 @@
#include <linux/linkage.h>
#include <linux/err.h>
+#include "calling.h"
+
.section .entry.text, "ax"
/*
@@ -106,6 +107,8 @@ ENTRY(entry_SYSENTER_compat)
xorl %r15d, %r15d /* nospec r15 */
cld
+ IBRS_ENTER
+
/*
* SYSENTER doesn't filter flags, so we need to clear NT and AC
* ourselves. To save a few cycles, we can check whether
@@ -253,6 +256,8 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
*/
TRACE_IRQS_OFF
+ IBRS_ENTER
+
movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
@@ -267,6 +272,9 @@ sysret32_from_system_call:
*/
STACKLEAK_ERASE
TRACE_IRQS_ON /* User mode traces as IRQs on. */
+
+ IBRS_EXIT
+
movq RBX(%rsp), %rbx /* pt_regs->rbx */
movq RBP(%rsp), %rbp /* pt_regs->rbp */
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
@@ -408,6 +416,7 @@ ENTRY(entry_INT80_compat)
* gate turned them off.
*/
TRACE_IRQS_OFF
+ IBRS_ENTER
movq %rsp, %rdi
call do_int80_syscall_32
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 0f2154106d01..5aae81f40bc3 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -178,7 +178,7 @@ quiet_cmd_vdso = VDSO $@
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -shared --hash-style=both --build-id \
- $(call ld-option, --eh-frame-hdr) -Bsymbolic
+ $(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack
GCOV_PROFILE := n
quiet_cmd_vdso_and_check = VDSO $@
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 3613cfb83c6d..8d7a4a2caf55 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -222,8 +222,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
- if (end >= TASK_SIZE_MAX)
- end = TASK_SIZE_MAX;
+ if (end >= DEFAULT_MAP_WINDOW)
+ end = DEFAULT_MAP_WINDOW;
end -= len;
if (end > start) {
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 3ea8056148d8..c3555e5237cf 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -969,7 +969,7 @@ static int __init amd_core_pmu_init(void)
* numbered counter following it.
*/
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
- even_ctr_mask |= 1 << i;
+ even_ctr_mask |= BIT_ULL(i);
pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index f2976204e8b5..8ae7b8b4d460 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4009,6 +4009,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 5965d341350c..6da90c6f3390 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -852,8 +852,13 @@ struct event_constraint intel_icl_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index aec6e63c6a04..0258e0065771 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -575,6 +575,22 @@ int snb_pci2phy_map_init(int devid)
return 0;
}
+static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * SNB IMC counters are 32-bit and are laid out back to back
+ * in MMIO space. Therefore we must use a 32-bit accessor function
+ * using readq() from uncore_mmio_read_counter() causes problems
+ * because it is reading 64-bit at a time. This is okay for the
+ * uncore_perf_event_update() function because it drops the upper
+ * 32-bits but not okay for plain uncore_read_counter() as invoked
+ * in uncore_pmu_event_start().
+ */
+ return (u64)readl(box->io_addr + hwc->event_base);
+}
+
static struct pmu snb_uncore_imc_pmu = {
.task_ctx_nr = perf_invalid_context,
.event_init = snb_uncore_imc_event_init,
@@ -594,7 +610,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
.disable_event = snb_uncore_imc_disable_event,
.enable_event = snb_uncore_imc_enable_event,
.hw_config = snb_uncore_imc_hw_config,
- .read_counter = uncore_mmio_read_counter,
+ .read_counter = snb_uncore_imc_read_counter,
};
static struct intel_uncore_type snb_uncore_imc = {
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 0f61f46e6086..fe2edc760e60 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2762,6 +2762,7 @@ static bool hswep_has_limit_sbox(unsigned int device)
return false;
pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
+ pci_dev_put(dev);
if (!hswep_get_chop(capid4))
return true;
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 9bb71abd66bd..37b36a8ce5fa 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -140,6 +140,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
set_personality_ia32(false);
setup_new_exec(bprm);
+ install_exec_creds(bprm);
regs->cs = __USER32_CS;
regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
@@ -156,8 +157,6 @@ static int load_aout_binary(struct linux_binprm *bprm)
if (retval < 0)
return retval;
- install_exec_creds(bprm);
-
if (N_MAGIC(ex) == OMAGIC) {
unsigned long text_addr, map_size;
diff --git a/arch/x86/include/asm/bugs.h b/arch/x86/include/asm/bugs.h
index 794eb2129bc6..6554ddb2ad49 100644
--- a/arch/x86/include/asm/bugs.h
+++ b/arch/x86/include/asm/bugs.h
@@ -4,8 +4,6 @@
#include <asm/processor.h>
-extern void check_bugs(void);
-
#if defined(CONFIG_CPU_SUP_INTEL)
void check_mpx_erratum(struct cpuinfo_x86 *c);
#else
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index 0c814cd9ea42..cdf39decf734 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -5,15 +5,22 @@
/*
* Declare drivers belonging to specific x86 CPUs
* Similar in spirit to pci_device_id and related PCI functions
+ *
+ * The wildcard initializers are in mod_devicetable.h because
+ * file2alias needs them. Sigh.
*/
-
#include <linux/mod_devicetable.h>
+/* Get the INTEL_FAM* model defines */
+#include <asm/intel-family.h>
+/* And the X86_VENDOR_* ones */
+#include <asm/processor.h>
+/* Centaur FAM6 models */
+#define X86_CENTAUR_FAM6_C7_A 0xa
#define X86_CENTAUR_FAM6_C7_D 0xd
#define X86_CENTAUR_FAM6_NANO 0xf
#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
-
/**
* X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
@@ -26,8 +33,11 @@
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
- * Backport version to keep the SRBDS pile consistant. No shorter variants
- * required for this.
+ * Use only if you need all selectors. Otherwise use one of the shorter
+ * macros of the X86_MATCH_* family. If there is no matching shorthand
+ * macro, consider to add one. If you really need to wrap one of the macros
+ * into another macro at the usage site for good reasons, then please
+ * start this local macro with X86_MATCH to allow easy grepping.
*/
#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
_steppings, _feature, _data) { \
@@ -39,6 +49,120 @@
.driver_data = (unsigned long) _data \
}
+/**
+ * X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Macro for CPU matching
+ * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@_vendor
+ * @_family: The family number or X86_FAMILY_ANY
+ * @_model: The model number, model constant or X86_MODEL_ANY
+ * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
+ * @_data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * The steppings arguments of X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE() is
+ * set to wildcards.
+ */
+#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, feature, data) \
+ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(vendor, family, model, \
+ X86_STEPPING_ANY, feature, data)
+
+/**
+ * X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
+ * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@vendor
+ * @family: The family number or X86_FAMILY_ANY
+ * @feature: A X86_FEATURE bit
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set to wildcards.
+ */
+#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, \
+ X86_MODEL_ANY, feature, data)
+
+/**
+ * X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature
+ * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@vendor
+ * @feature: A X86_FEATURE bit
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set to wildcards.
+ */
+#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
+ X86_MATCH_VENDOR_FAM_FEATURE(vendor, X86_FAMILY_ANY, feature, data)
+
+/**
+ * X86_MATCH_FEATURE - Macro for matching a CPU feature
+ * @feature: A X86_FEATURE bit
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set to wildcards.
+ */
+#define X86_MATCH_FEATURE(feature, data) \
+ X86_MATCH_VENDOR_FEATURE(ANY, feature, data)
+
+/* Transitional to keep the existing code working */
+#define X86_FEATURE_MATCH(feature) X86_MATCH_FEATURE(feature, NULL)
+
+/**
+ * X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model
+ * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@vendor
+ * @family: The family number or X86_FAMILY_ANY
+ * @model: The model number, model constant or X86_MODEL_ANY
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set to wildcards.
+ */
+#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, \
+ X86_FEATURE_ANY, data)
+
+/**
+ * X86_MATCH_VENDOR_FAM - Match vendor and family
+ * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@vendor
+ * @family: The family number or X86_FAMILY_ANY
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments to X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set of wildcards.
+ */
+#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
+ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data)
+
+/**
+ * X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model
+ * @model: The model name without the INTEL_FAM6_ prefix or ANY
+ * The model name is expanded to INTEL_FAM6_@model internally
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * The vendor is set to INTEL, the family to 6 and all other missing
+ * arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are set to wildcards.
+ *
+ * See X86_MATCH_VENDOR_FAM_MODEL_FEATURE() for further information.
+ */
+#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
+
/*
* Match specific microcode revisions.
*
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index ea866c7bf31d..0d1d37d8b279 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -133,7 +133,7 @@ extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
-static inline struct entry_stack *cpu_entry_stack(int cpu)
+static __always_inline struct entry_stack *cpu_entry_stack(int cpu)
{
return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
}
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 619c1f80a2ab..4466a47b7608 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -30,6 +30,8 @@ enum cpuid_leafs
CPUID_7_ECX,
CPUID_8000_0007_EBX,
CPUID_7_EDX,
+ CPUID_8000_001F_EAX,
+ CPUID_8000_0021_EAX,
};
#ifdef CONFIG_X86_FEATURE_NAMES
@@ -88,8 +90,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 19))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 21))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -111,8 +115,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 19))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 21))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 8c28a2365a92..f42286e9a2b1 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -13,8 +13,8 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 19 /* N 32-bit words worth of info */
-#define NBUGINTS 1 /* N 32-bit bug flags */
+#define NCAPINTS 21 /* N 32-bit words worth of info */
+#define NBUGINTS 2 /* N 32-bit bug flags */
/*
* Note: If the comment begins with a quoted string, that string is used
@@ -96,7 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
-#define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */
+/* FREE! ( 3*32+17) */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
@@ -201,17 +201,17 @@
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+/* FREE! ( 7*32+10) */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
-#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
+#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
-#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
+/* FREE! ( 7*32+20) */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
@@ -286,6 +286,11 @@
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
+#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
+#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
+#define X86_FEATURE_MSR_TSX_CTRL (11*32+18) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
@@ -302,6 +307,7 @@
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
+#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -369,6 +375,13 @@
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
+#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
+#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
+#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+
/*
* BUG word(s)
*/
@@ -406,5 +419,9 @@
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
+#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+#define X86_BUG_MMIO_UNKNOWN X86_BUG(28) /* CPU is too old and its MMIO Stale Data status is unknown */
+#define X86_BUG_GDS X86_BUG(29) /* CPU is affected by Gather Data Sampling */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index a5ea841cc6d2..8453260f6d9f 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -84,6 +84,8 @@
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
#define DISABLED_MASK17 0
#define DISABLED_MASK18 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+#define DISABLED_MASK19 0
+#define DISABLED_MASK20 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 5ed702e2c55f..330841bad616 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -41,7 +41,7 @@ extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
extern void fpu__init_cpu(void);
extern void fpu__init_system_xstate(void);
extern void fpu__init_cpu_xstate(void);
-extern void fpu__init_system(struct cpuinfo_x86 *c);
+extern void fpu__init_system(void);
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);
extern u64 fpu__get_supported_xfeatures_mask(void);
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 7741e211f7f5..333e61e6dbe7 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -721,7 +721,7 @@ struct hv_enlightened_vmcs {
u64 guest_rip;
u32 hv_clean_fields;
- u32 hv_padding_32;
+ u32 padding32_1;
u32 hv_synthetic_controls;
struct {
u32 nested_flush_hypercall:1;
@@ -729,7 +729,7 @@ struct hv_enlightened_vmcs {
u32 reserved:30;
} __packed hv_enlightenments_control;
u32 hv_vp_id;
-
+ u32 padding32_2;
u64 hv_vm_id;
u64 partition_assist_page;
u64 padding64_4[4];
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 89789e8c80f6..e16574c16e93 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -67,6 +67,8 @@ struct legacy_pic {
void (*make_irq)(unsigned int irq);
};
+void legacy_pic_pcat_compat(void);
+
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 5b07573c3bc8..6fdd863198ec 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -35,6 +35,9 @@
* The #define line may optionally include a comment including platform names.
*/
+/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
+#define INTEL_FAM6_ANY X86_MODEL_ANY
+
#define INTEL_FAM6_CORE_YONAH 0x0E
#define INTEL_FAM6_CORE2_MEROM 0x0F
@@ -93,6 +96,11 @@
#define INTEL_FAM6_LAKEFIELD 0x8A
#define INTEL_FAM6_ALDERLAKE 0x97
#define INTEL_FAM6_ALDERLAKE_L 0x9A
+#define INTEL_FAM6_ALDERLAKE_N 0xBE
+
+#define INTEL_FAM6_RAPTORLAKE 0xB7
+#define INTEL_FAM6_RAPTORLAKE_P 0xBA
+#define INTEL_FAM6_RAPTORLAKE_S 0xBF
/* "Small Core" Processors (Atom) */
@@ -126,6 +134,9 @@
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
+/* Family 5 */
+#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
+
/* Useful macros */
#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
{ \
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4bc476d7fa6c..80239c84b4dd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -563,6 +563,7 @@ struct kvm_vcpu_arch {
u64 ia32_misc_enable_msr;
u64 smbase;
u64 smi_count;
+ bool at_instruction_boundary;
bool tpr_access_reporting;
u64 ia32_xss;
u64 microcode_version;
@@ -981,6 +982,8 @@ struct kvm_vcpu_stat {
u64 irq_injections;
u64 nmi_injections;
u64 req_event;
+ u64 preemption_reported;
+ u64 preemption_other;
};
struct x86_instruction_info;
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 848ce43b9040..190c5ca537e9 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -77,6 +77,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
static inline int __init
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+static inline void mem_encrypt_init(void) { }
+
#define __bss_decrypted
#endif /* CONFIG_AMD_MEM_ENCRYPT */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 91a06cef50c1..394605e59f2b 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -5,10 +5,12 @@
#include <asm/cpu.h>
#include <linux/earlycpio.h>
#include <linux/initrd.h>
+#include <asm/microcode_amd.h>
struct ucode_patch {
struct list_head plist;
void *data; /* Intel uses only this one */
+ unsigned int size;
u32 patch_id;
u16 equiv_cpu;
};
@@ -130,7 +132,7 @@ static inline unsigned int x86_cpuid_family(void)
int __init microcode_init(void);
extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
-void reload_early_microcode(void);
+void reload_early_microcode(unsigned int cpu);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
extern bool initrd_gone;
void microcode_bsp_resume(void);
@@ -138,7 +140,7 @@ void microcode_bsp_resume(void);
static inline int __init microcode_init(void) { return 0; };
static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
-static inline void reload_early_microcode(void) { }
+static inline void reload_early_microcode(unsigned int cpu) { }
static inline void microcode_bsp_resume(void) { }
static inline bool
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 5c524d4f71cd..403a8e76b310 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -47,12 +47,14 @@ struct microcode_amd {
extern void __init load_ucode_amd_bsp(unsigned int family);
extern void load_ucode_amd_ap(unsigned int family);
extern int __init save_microcode_in_initrd_amd(unsigned int family);
-void reload_ucode_amd(void);
+void reload_ucode_amd(unsigned int cpu);
+extern void amd_check_microcode(void);
#else
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
static inline void load_ucode_amd_ap(unsigned int family) {}
static inline int __init
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
-void reload_ucode_amd(void) {}
+static inline void reload_ucode_amd(unsigned int cpu) {}
+static inline void amd_check_microcode(void) {}
#endif
#endif /* _ASM_X86_MICROCODE_AMD_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c56042916a7c..5a4a391f556a 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -47,6 +47,12 @@
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
+#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
+
+/* A mask for bits which the kernel toggles when controlling mitigations */
+#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
+ | SPEC_CTRL_RRSBA_DIS_S)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
@@ -82,6 +88,7 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
+#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
#define ARCH_CAP_SSB_NO BIT(4) /*
* Not susceptible to Speculative Store Bypass
@@ -129,6 +136,26 @@
* bit available to control VERW
* behavior.
*/
+#define ARCH_CAP_RRSBA BIT(19) /*
+ * Indicates RET may use predictors
+ * other than the RSB. With eIBRS
+ * enabled predictions in kernel mode
+ * are restricted to targets in
+ * kernel.
+ */
+#define ARCH_CAP_PBRSB_NO BIT(24) /*
+ * Not susceptible to Post-Barrier
+ * Return Stack Buffer Predictions.
+ */
+#define ARCH_CAP_GDS_CTRL BIT(25) /*
+ * CPU is vulnerable to Gather
+ * Data Sampling (GDS) and
+ * has controls for mitigation.
+ */
+#define ARCH_CAP_GDS_NO BIT(26) /*
+ * CPU is not vulnerable to Gather
+ * Data Sampling (GDS).
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -147,6 +174,8 @@
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
#define RNGDS_MITG_DIS BIT(0)
#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
+#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */
+#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
@@ -440,6 +469,13 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
+#define MSR_AMD64_TW_CFG 0xc0011023
+
+#define MSR_AMD64_DE_CFG 0xc0011029
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
+#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9
+
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
@@ -460,12 +496,17 @@
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
#define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+/* Zen4 */
+#define MSR_ZEN4_BP_CFG 0xc001102e
+#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
+
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
@@ -508,9 +549,6 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
-#define MSR_F10H_DECFG 0xc0011029
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index ece2b2c6d020..c8819358a332 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -4,11 +4,14 @@
#define _ASM_X86_NOSPEC_BRANCH_H_
#include <linux/static_key.h>
+#include <linux/frame.h>
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
+#include <asm/unwind_hints.h>
+#include <asm/percpu.h>
/*
* This should be used immediately before a retpoline alternative. It tells
@@ -44,6 +47,7 @@
* the optimal version — two calls, each with their own speculation
* trap should their return address end up getting used, in a loop.
*/
+#ifdef CONFIG_X86_64
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
mov $(nr/2), reg; \
771: \
@@ -59,9 +63,24 @@
lfence; \
jmp 775b; \
774: \
+ add $(BITS_PER_LONG/8) * 2, sp; \
dec reg; \
jnz 771b; \
+ /* barrier for jnz misprediction */ \
+ lfence;
+#else
+/*
+ * i386 doesn't unconditionally have LFENCE, as such it can't
+ * do a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp) \
+ .rept nr; \
+ call 772f; \
+ int3; \
+772:; \
+ .endr; \
add $(BITS_PER_LONG/8) * nr, sp;
+#endif
#ifdef __ASSEMBLY__
@@ -132,18 +151,28 @@
#endif
.endm
+.macro ISSUE_UNBALANCED_RET_GUARD
+ call .Lunbalanced_ret_guard_\@
+ int3
+.Lunbalanced_ret_guard_\@:
+ add $(BITS_PER_LONG/8), %_ASM_SP
+ lfence
+.endm
+
/*
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
* monstrosity above, manually.
*/
-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
-#ifdef CONFIG_RETPOLINE
- ANNOTATE_NOSPEC_ALTERNATIVE
- ALTERNATIVE "jmp .Lskip_rsb_\@", \
- __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
- \ftr
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
+.ifb \ftr2
+ ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
+.else
+ ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
+.endif
+ __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
+.Lunbalanced_\@:
+ ISSUE_UNBALANCED_RET_GUARD
.Lskip_rsb_\@:
-#endif
.endm
#else /* __ASSEMBLY__ */
@@ -218,6 +247,7 @@ enum spectre_v2_mitigation {
SPECTRE_V2_EIBRS,
SPECTRE_V2_EIBRS_RETPOLINE,
SPECTRE_V2_EIBRS_LFENCE,
+ SPECTRE_V2_IBRS,
};
/* The indirect branch speculation control variants */
@@ -281,6 +311,9 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
+DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
+extern void update_spec_ctrl_cond(u64 val);
+extern u64 spec_ctrl_current(void);
/*
* With retpoline, we must use IBRS to restrict branch prediction
@@ -290,18 +323,16 @@ extern u64 x86_spec_ctrl_base;
*/
#define firmware_restrict_branch_speculation_start() \
do { \
- u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
- \
preempt_disable(); \
- alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, \
+ spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \
} while (0)
#define firmware_restrict_branch_speculation_end() \
do { \
- u64 val = x86_spec_ctrl_base; \
- \
- alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, \
+ spec_ctrl_current(), \
X86_FEATURE_USE_IBRS_FW); \
preempt_enable(); \
} while (0)
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index bbfde3d2662f..4bcd9d0c7bee 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -11,13 +11,6 @@
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-/*
- * Too small node sizes may confuse the VM badly. Usually they
- * result from BIOS bugs. So dont recognize nodes as standalone
- * NUMA entities that have less than this amount of RAM listed:
- */
-#define NODE_MIN_SIZE (4*1024*1024)
-
extern int numa_off;
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 537c0dd4c3d4..9cbd86cf0deb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -986,4 +986,6 @@ enum taa_mitigations {
TAA_MITIGATION_TSX_DISABLED,
};
+extern bool gds_ucode_mitigated(void);
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 04c17be9b5fd..bc5b4d788c08 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
#define MRR_BIOS 0
#define MRR_APM 1
+void cpu_emergency_disable_virtualization(void);
+
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
void nmi_panic_self_stop(struct pt_regs *regs);
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 6847d85400a8..fb3d81347e33 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -101,6 +101,8 @@
#define REQUIRED_MASK16 0
#define REQUIRED_MASK17 0
#define REQUIRED_MASK18 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+#define REQUIRED_MASK19 0
+#define REQUIRED_MASK20 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
index f6b7fe2833cc..79b648743b84 100644
--- a/arch/x86/include/asm/resctrl_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -51,24 +51,27 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible.
* Must be called with preemption disabled.
*/
-static void __resctrl_sched_in(void)
+static inline void __resctrl_sched_in(struct task_struct *tsk)
{
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
u32 rmid = state->default_rmid;
+ u32 tmp;
/*
* If this task has a closid/rmid assigned, use it.
* Else use the closid/rmid assigned to this cpu.
*/
if (static_branch_likely(&rdt_alloc_enable_key)) {
- if (current->closid)
- closid = current->closid;
+ tmp = READ_ONCE(tsk->closid);
+ if (tmp)
+ closid = tmp;
}
if (static_branch_likely(&rdt_mon_enable_key)) {
- if (current->rmid)
- rmid = current->rmid;
+ tmp = READ_ONCE(tsk->rmid);
+ if (tmp)
+ rmid = tmp;
}
if (closid != state->cur_closid || rmid != state->cur_rmid) {
@@ -78,15 +81,15 @@ static void __resctrl_sched_in(void)
}
}
-static inline void resctrl_sched_in(void)
+static inline void resctrl_sched_in(struct task_struct *tsk)
{
if (static_branch_likely(&rdt_enable_key))
- __resctrl_sched_in();
+ __resctrl_sched_in(tsk);
}
#else
-static inline void resctrl_sched_in(void) {}
+static inline void resctrl_sched_in(struct task_struct *tsk) {}
#endif /* CONFIG_X86_CPU_RESCTRL */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ed8ec011a9fd..e832082b2761 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -94,27 +94,16 @@ extern unsigned long _brk_end;
void *extend_brk(size_t size, size_t align);
/*
- * Reserve space in the brk section. The name must be unique within
- * the file, and somewhat descriptive. The size is in bytes. Must be
- * used at file scope.
+ * Reserve space in the .brk section, which is a block of memory from which the
+ * caller is allowed to allocate very early (before even memblock is available)
+ * by calling extend_brk(). All allocated memory will be eventually converted
+ * to memblock. Any leftover unallocated memory will be freed.
*
- * (This uses a temp function to wrap the asm so we can pass it the
- * size parameter; otherwise we wouldn't be able to. We can't use a
- * "section" attribute on a normal variable because it always ends up
- * being @progbits, which ends up allocating space in the vmlinux
- * executable.)
+ * The size is in bytes.
*/
-#define RESERVE_BRK(name,sz) \
- static void __section(.discard.text) __used notrace \
- __brk_reservation_fn_##name##__(void) { \
- asm volatile ( \
- ".pushsection .brk_reservation,\"aw\",@nobits;" \
- ".brk." #name ":" \
- " 1:.skip %c0;" \
- " .size .brk." #name ", . - 1b;" \
- " .popsection" \
- : : "i" (sz)); \
- }
+#define RESERVE_BRK(name, size) \
+ __section(.bss..brk) __aligned(1) __used \
+ static char __brk_##name[size]
/* Helper for reserving space for arrays of things */
#define RESERVE_BRK_ARRAY(type, name, entries) \
@@ -132,12 +121,19 @@ asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
#endif /* __i386__ */
#endif /* _SETUP */
-#else
-#define RESERVE_BRK(name,sz) \
- .pushsection .brk_reservation,"aw",@nobits; \
-.brk.name: \
-1: .skip sz; \
- .size .brk.name,.-1b; \
+
+#else /* __ASSEMBLY */
+
+.macro __RESERVE_BRK name, size
+ .pushsection .bss..brk, "aw"
+SYM_DATA_START(__brk_\name)
+ .skip \size
+SYM_DATA_END(__brk_\name)
.popsection
+.endm
+
+#define RESERVE_BRK(name, size) __RESERVE_BRK name, size
+
#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_X86_SETUP_H */
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index e2389ce9bf58..2fac787f5bd0 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -14,12 +14,29 @@ struct pt_regs;
,,regs->di,,regs->si,,regs->dx \
,,regs->r10,,regs->r8,,regs->r9) \
+
+/* SYSCALL_PT_ARGS is Adapted from s390x */
+#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \
+ SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
+#define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \
+ SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di))
+#define SYSCALL_PT_ARG4(m, t1, t2, t3, t4) \
+ SYSCALL_PT_ARG3(m, t1, t2, t3), m(t4, (regs->si))
+#define SYSCALL_PT_ARG3(m, t1, t2, t3) \
+ SYSCALL_PT_ARG2(m, t1, t2), m(t3, (regs->dx))
+#define SYSCALL_PT_ARG2(m, t1, t2) \
+ SYSCALL_PT_ARG1(m, t1), m(t2, (regs->cx))
+#define SYSCALL_PT_ARG1(m, t1) m(t1, (regs->bx))
+#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
+
+#define __SC_COMPAT_CAST(t, a) \
+ (__typeof(__builtin_choose_expr(__TYPE_IS_L(t), 0, 0U))) \
+ (unsigned int)a
+
/* Mapping of registers to parameters for syscalls on i386 */
#define SC_IA32_REGS_TO_ARGS(x, ...) \
- __MAP(x,__SC_ARGS \
- ,,(unsigned int)regs->bx,,(unsigned int)regs->cx \
- ,,(unsigned int)regs->dx,,(unsigned int)regs->si \
- ,,(unsigned int)regs->di,,(unsigned int)regs->bp)
+ SYSCALL_PT_ARGS(x, __SC_COMPAT_CAST, \
+ __MAP(x, __SC_TYPE, __VA_ARGS__)) \
#ifdef CONFIG_IA32_EMULATION
/*
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index fda3e7747c22..331474296e6f 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -95,12 +95,6 @@ static inline int cpu_has_svm(const char **msg)
return 0;
}
- if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
- if (msg)
- *msg = "can't execute cpuid_8000000a";
- return 0;
- }
-
if (!boot_cpu_has(X86_FEATURE_SVM)) {
if (msg)
*msg = "svm not available";
@@ -120,7 +114,21 @@ static inline void cpu_svm_disable(void)
wrmsrl(MSR_VM_HSAVE_PA, 0);
rdmsrl(MSR_EFER, efer);
- wrmsrl(MSR_EFER, efer & ~EFER_SVME);
+ if (efer & EFER_SVME) {
+ /*
+ * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
+ * aren't blocked, e.g. if a fatal error occurred between CLGI
+ * and STGI. Note, STGI may #UD if SVM is disabled from NMI
+ * context between reading EFER and executing STGI. In that
+ * case, GIF must already be set, otherwise the NMI would have
+ * been blocked, so just eat the fault.
+ */
+ asm_volatile_goto("1: stgi\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ ::: "memory" : fault);
+fault:
+ wrmsrl(MSR_EFER, efer & ~EFER_SVME);
+ }
}
/** Makes sure SVM is disabled, if it is supported on the CPU
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 1835767aa335..d716fe938fc0 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -19,8 +19,8 @@
/*
* Definitions of Primary Processor-Based VM-Execution Controls.
*/
-#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
-#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
+#define CPU_BASED_INTR_WINDOW_EXITING 0x00000004
+#define CPU_BASED_USE_TSC_OFFSETTING 0x00000008
#define CPU_BASED_HLT_EXITING 0x00000080
#define CPU_BASED_INVLPG_EXITING 0x00000200
#define CPU_BASED_MWAIT_EXITING 0x00000400
@@ -31,7 +31,7 @@
#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
#define CPU_BASED_CR8_STORE_EXITING 0x00100000
#define CPU_BASED_TPR_SHADOW 0x00200000
-#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
+#define CPU_BASED_NMI_WINDOW_EXITING 0x00400000
#define CPU_BASED_MOV_DR_EXITING 0x00800000
#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
#define CPU_BASED_USE_IO_BITMAPS 0x02000000
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 3eb8411ab60e..e95b72ec19bc 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -33,7 +33,7 @@
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_INIT_SIGNAL 3
-#define EXIT_REASON_PENDING_INTERRUPT 7
+#define EXIT_REASON_INTERRUPT_WINDOW 7
#define EXIT_REASON_NMI_WINDOW 8
#define EXIT_REASON_TASK_SWITCH 9
#define EXIT_REASON_CPUID 10
@@ -94,7 +94,7 @@
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
{ EXIT_REASON_INIT_SIGNAL, "INIT_SIGNAL" }, \
- { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
+ { EXIT_REASON_INTERRUPT_WINDOW, "INTERRUPT_WINDOW" }, \
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
{ EXIT_REASON_CPUID, "CPUID" }, \
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 7b75658b7e9a..a5e41e66fcbd 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -140,6 +140,9 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
madt->address);
}
+ if (madt->flags & ACPI_MADT_PCAT_COMPAT)
+ legacy_pic_pcat_compat();
+
default_acpi_madt_oem_check(madt->header.oem_id,
madt->header.oem_table_id);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 9353bac92480..421facbeda91 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -374,6 +374,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
u8 insn_buff[MAX_PATCH_LEN];
DPRINTK("alt table %px, -> %px", start, end);
+
+ /*
+ * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
+ * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
+ * During the process, KASAN becomes confused seeing partial LA57
+ * conversion and triggers a false-positive out-of-bound report.
+ *
+ * Disable KASAN until the patching is complete.
+ */
+ kasan_disable_current();
+
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
@@ -434,6 +445,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
text_poke_early(instr, insn_buff, insn_buff_sz);
}
+
+ kasan_enable_current();
}
#ifdef CONFIG_SMP
@@ -772,8 +785,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
} else {
local_irq_save(flags);
memcpy(addr, opcode, len);
- local_irq_restore(flags);
sync_core();
+ local_irq_restore(flags);
/*
* Could also do a CLFLUSH here to speed up CPU recovery; but
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 68c734032523..a3b7b2fb04cb 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -410,10 +410,9 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
if (vector && !eilvt_entry_is_changeable(vector, new))
/* may not change if vectors are different */
return rsvd;
- rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
- } while (rsvd != new);
+ } while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new));
- rsvd &= ~APIC_EILVT_MASKED;
+ rsvd = new & ~APIC_EILVT_MASKED;
if (rsvd && rsvd != vector)
pr_info("LVT offset %d assigned for vector 0x%02x\n",
offset, rsvd);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 1622cff009c9..e4d63392c619 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2455,17 +2455,21 @@ static int io_apic_get_redir_entries(int ioapic)
unsigned int arch_dynirq_lower_bound(unsigned int from)
{
+ unsigned int ret;
+
/*
* dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
* gsi_top if ioapic_dynirq_base hasn't been initialized yet.
*/
- if (!ioapic_initialized)
- return gsi_top;
+ ret = ioapic_dynirq_base ? : gsi_top;
+
/*
- * For DT enabled machines ioapic_dynirq_base is irrelevant and not
- * updated. So simply return @from if ioapic_dynirq_base == 0.
+ * For DT enabled machines ioapic_dynirq_base is irrelevant and
+ * always 0. gsi_top can be 0 if there is no IO/APIC registered.
+ * 0 is an invalid interrupt number for dynamic allocations. Return
+ * @from instead.
*/
- return ioapic_dynirq_base ? : from;
+ return ret ? : from;
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 032a00e5d9fa..76c80e191a1b 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -97,7 +97,10 @@ static void init_x2apic_ldr(void)
static int x2apic_phys_probe(void)
{
- if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
+ if (!x2apic_mode)
+ return 0;
+
+ if (x2apic_phys || x2apic_fadt_phys())
return 1;
return apic == &apic_x2apic_phys;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 660270359d39..166d9991e711 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -238,12 +238,6 @@ extern int (*console_blank_hook)(int);
#endif
/*
- * The apm_bios device is one of the misc char devices.
- * This is its minor number.
- */
-#define APM_MINOR_DEV 134
-
-/*
* Various options can be changed at boot time as follows:
* (We allow underscores for compatibility with the modules code)
* apm=on/off enable/disable APM
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 88cef978380b..4e84203fc067 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -26,11 +26,6 @@
#include "cpu.h"
-static const int amd_erratum_383[];
-static const int amd_erratum_400[];
-static const int amd_erratum_1054[];
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
-
/*
* nodes_per_socket: Stores the number of nodes per socket.
* Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
@@ -38,6 +33,83 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
*/
static u32 nodes_per_socket = 1;
+/*
+ * AMD errata checking
+ *
+ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
+ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
+ * have an OSVW id assigned, which it takes as first argument. Both take a
+ * variable number of family-specific model-stepping ranges created by
+ * AMD_MODEL_RANGE().
+ *
+ * Example:
+ *
+ * const int amd_erratum_319[] =
+ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
+ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
+ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
+ */
+
+#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
+#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
+#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
+ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
+#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
+#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
+#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
+
+static const int amd_erratum_400[] =
+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
+
+static const int amd_erratum_383[] =
+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
+
+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
+static const int amd_erratum_1054[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
+
+static const int amd_zenbleed[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
+ AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
+
+static const int amd_erratum_1485[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
+ AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+{
+ int osvw_id = *erratum++;
+ u32 range;
+ u32 ms;
+
+ if (osvw_id >= 0 && osvw_id < 65536 &&
+ cpu_has(cpu, X86_FEATURE_OSVW)) {
+ u64 osvw_len;
+
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
+ if (osvw_id < osvw_len) {
+ u64 osvw_bits;
+
+ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
+ osvw_bits);
+ return osvw_bits & (1ULL << (osvw_id & 0x3f));
+ }
+ }
+
+ /* OSVW unavailable or ID unknown, match family-model-stepping range */
+ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
+ while ((range = *erratum++))
+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
+ (ms >= AMD_MODEL_RANGE_START(range)) &&
+ (ms <= AMD_MODEL_RANGE_END(range)))
+ return true;
+
+ return false;
+}
+
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
u32 gprs[8] = { 0 };
@@ -587,7 +659,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
* If BIOS has not enabled SME then don't advertise the
* SME feature (set in scattered.c).
* For SEV: If BIOS has not enabled SEV then don't advertise the
- * SEV feature (set in scattered.c).
+ * SEV and SEV_ES feature (set in scattered.c).
*
* In all cases, since support for SME and SEV requires long mode,
* don't advertise the feature under CONFIG_X86_32.
@@ -618,6 +690,7 @@ clear_all:
setup_clear_cpu_cap(X86_FEATURE_SME);
clear_sev:
setup_clear_cpu_cap(X86_FEATURE_SEV);
+ setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
}
}
@@ -794,8 +867,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
-#define MSR_AMD64_DE_CFG 0xC0011029
-
static void init_amd_ln(struct cpuinfo_x86 *c)
{
/*
@@ -894,12 +965,73 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
node_reclaim_distance = 32;
#endif
+ /* Fix up CPUID bits, but only if not virtualised. */
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
+
+ /* Erratum 1076: CPB feature bit not being set in CPUID. */
+ if (!cpu_has(c, X86_FEATURE_CPB))
+ set_cpu_cap(c, X86_FEATURE_CPB);
+
+ /*
+ * Zen3 (Fam19 model < 0x10) parts are not susceptible to
+ * Branch Type Confusion, but predate the allocation of the
+ * BTC_NO bit.
+ */
+ if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
+ set_cpu_cap(c, X86_FEATURE_BTC_NO);
+ }
+
/*
- * Fix erratum 1076: CPB feature bit not being set in CPUID.
- * Always set it, except when running under a hypervisor.
+ * Work around Erratum 1386. The XSAVES instruction malfunctions in
+ * certain circumstances on Zen1/2 uarch, and not all parts have had
+ * updated microcode at the time of writing (March 2023).
+ *
+ * Affected parts all have no supervisor XSAVE states, meaning that
+ * the XSAVEC instruction (which works fine) is equivalent.
*/
- if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
- set_cpu_cap(c, X86_FEATURE_CPB);
+ if (c->x86 == 0x17)
+ clear_cpu_cap(c, X86_FEATURE_XSAVES);
+}
+
+static bool cpu_has_zenbleed_microcode(void)
+{
+ u32 good_rev = 0;
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
+ case 0x60 ... 0x67: good_rev = 0x0860010b; break;
+ case 0x68 ... 0x6f: good_rev = 0x08608105; break;
+ case 0x70 ... 0x7f: good_rev = 0x08701032; break;
+ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
+
+ default:
+ return false;
+ break;
+ }
+
+ if (boot_cpu_data.microcode < good_rev)
+ return false;
+
+ return true;
+}
+
+static void zenbleed_check(struct cpuinfo_x86 *c)
+{
+ if (!cpu_has_amd_erratum(c, amd_zenbleed))
+ return;
+
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return;
+
+ if (!cpu_has(c, X86_FEATURE_AVX))
+ return;
+
+ if (!cpu_has_zenbleed_microcode()) {
+ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
+ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
+ } else {
+ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
+ }
}
static void init_amd(struct cpuinfo_x86 *c)
@@ -956,8 +1088,8 @@ static void init_amd(struct cpuinfo_x86 *c)
* msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present.
*/
- msr_set_bit(MSR_F10H_DECFG,
- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+ msr_set_bit(MSR_AMD64_DE_CFG,
+ MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
/* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
@@ -989,6 +1121,12 @@ static void init_amd(struct cpuinfo_x86 *c)
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
+
+ zenbleed_check(c);
+
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
+ cpu_has_amd_erratum(c, amd_erratum_1485))
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
}
#ifdef CONFIG_X86_32
@@ -1084,73 +1222,6 @@ static const struct cpu_dev amd_cpu_dev = {
cpu_dev_register(amd_cpu_dev);
-/*
- * AMD errata checking
- *
- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
- * have an OSVW id assigned, which it takes as first argument. Both take a
- * variable number of family-specific model-stepping ranges created by
- * AMD_MODEL_RANGE().
- *
- * Example:
- *
- * const int amd_erratum_319[] =
- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
- */
-
-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
-
-static const int amd_erratum_400[] =
- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
-
-static const int amd_erratum_383[] =
- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-
-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
-static const int amd_erratum_1054[] =
- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
-
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
-{
- int osvw_id = *erratum++;
- u32 range;
- u32 ms;
-
- if (osvw_id >= 0 && osvw_id < 65536 &&
- cpu_has(cpu, X86_FEATURE_OSVW)) {
- u64 osvw_len;
-
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
- if (osvw_id < osvw_len) {
- u64 osvw_bits;
-
- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
- osvw_bits);
- return osvw_bits & (1ULL << (osvw_id & 0x3f));
- }
- }
-
- /* OSVW unavailable or ID unknown, match family-model-stepping range */
- ms = (cpu->x86_model << 4) | cpu->x86_stepping;
- while ((range = *erratum++))
- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
- (ms >= AMD_MODEL_RANGE_START(range)) &&
- (ms <= AMD_MODEL_RANGE_END(range)))
- return true;
-
- return false;
-}
-
void set_dr_addr_mask(unsigned long mask, int dr)
{
if (!boot_cpu_has(X86_FEATURE_BPEXT))
@@ -1169,3 +1240,18 @@ void set_dr_addr_mask(unsigned long mask, int dr)
break;
}
}
+
+static void zenbleed_check_cpu(void *unused)
+{
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+
+ zenbleed_check(c);
+}
+
+void amd_check_microcode(void)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return;
+
+ on_each_cpu(zenbleed_check_cpu, NULL, 1);
+}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 09d02b1f6f71..48ae44cf7795 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -9,7 +9,6 @@
* - Andrew D. Balsa (code cleanup).
*/
#include <linux/init.h>
-#include <linux/utsname.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nospec.h>
@@ -25,9 +24,7 @@
#include <asm/msr.h>
#include <asm/vmx.h>
#include <asm/paravirt.h>
-#include <asm/alternative.h>
#include <asm/pgtable.h>
-#include <asm/set_memory.h>
#include <asm/intel-family.h>
#include <asm/e820/api.h>
#include <asm/hypervisor.h>
@@ -37,6 +34,8 @@
static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
+static void __init retbleed_select_mitigation(void);
+static void __init spectre_v2_user_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
@@ -45,17 +44,49 @@ static void __init md_clear_select_mitigation(void);
static void __init taa_select_mitigation(void);
static void __init mmio_select_mitigation(void);
static void __init srbds_select_mitigation(void);
+static void __init gds_select_mitigation(void);
-/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
+/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+/* The current value of the SPEC_CTRL MSR with task-specific bits set */
+DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
+
static DEFINE_MUTEX(spec_ctrl_mutex);
+/* Update SPEC_CTRL MSR and its cached copy unconditionally */
+static void update_spec_ctrl(u64 val)
+{
+ this_cpu_write(x86_spec_ctrl_current, val);
+ wrmsrl(MSR_IA32_SPEC_CTRL, val);
+}
+
/*
- * The vendor and possibly platform specific bits which can be modified in
- * x86_spec_ctrl_base.
+ * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
+ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
*/
-static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+void update_spec_ctrl_cond(u64 val)
+{
+ if (this_cpu_read(x86_spec_ctrl_current) == val)
+ return;
+
+ this_cpu_write(x86_spec_ctrl_current, val);
+
+ /*
+ * When KERNEL_IBRS this MSR is written on return-to-user, unless
+ * forced the update can be delayed until that time.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
+ wrmsrl(MSR_IA32_SPEC_CTRL, val);
+}
+
+u64 spec_ctrl_current(void)
+{
+ return this_cpu_read(x86_spec_ctrl_current);
+}
+EXPORT_SYMBOL_GPL(spec_ctrl_current);
/*
* AMD specific MSR info for Speculative Store Bypass control.
@@ -82,100 +113,57 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
-void __init check_bugs(void)
+void __init cpu_select_mitigations(void)
{
- identify_boot_cpu();
-
- /*
- * identify_boot_cpu() initialized SMT support information, let the
- * core code know.
- */
- cpu_smt_check_topology();
-
- if (!IS_ENABLED(CONFIG_SMP)) {
- pr_info("CPU: ");
- print_cpu_info(&boot_cpu_data);
- }
-
/*
* Read the SPEC_CTRL MSR to account for reserved bits which may
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
* init code as it is not enumerated and depends on the family.
*/
- if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
- /* Allow STIBP in MSR_SPEC_CTRL if supported */
- if (boot_cpu_has(X86_FEATURE_STIBP))
- x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+ /*
+ * Previously running kernel (kexec), may have some controls
+ * turned ON. Clear them and let the mitigations setup below
+ * rediscover them based on configuration.
+ */
+ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
+ }
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
- ssb_select_mitigation();
- l1tf_select_mitigation();
- md_clear_select_mitigation();
- srbds_select_mitigation();
-
- arch_smt_update();
-
-#ifdef CONFIG_X86_32
/*
- * Check whether we are able to run this kernel safely on SMP.
- *
- * - i386 is no longer supported.
- * - In order to run on anything without a TSC, we need to be
- * compiled for a i486.
+ * retbleed_select_mitigation() relies on the state set by
+ * spectre_v2_select_mitigation(); specifically it wants to know about
+ * spectre_v2=ibrs.
*/
- if (boot_cpu_data.x86 < 4)
- panic("Kernel requires i486+ for 'invlpg' and other features");
-
- init_utsname()->machine[1] =
- '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
- alternative_instructions();
-
- fpu__init_check_bugs();
-#else /* CONFIG_X86_64 */
- alternative_instructions();
-
+ retbleed_select_mitigation();
/*
- * Make sure the first 2MB area is not mapped by huge pages
- * There are typically fixed size MTRRs in there and overlapping
- * MTRRs into large pages causes slow downs.
- *
- * Right now we don't do that with gbpages because there seems
- * very little benefit for that case.
+ * spectre_v2_user_select_mitigation() relies on the state set by
+ * retbleed_select_mitigation(); specifically the STIBP selection is
+ * forced for UNRET.
*/
- if (!direct_gbpages)
- set_memory_4k((unsigned long)__va(0), 1);
-#endif
+ spectre_v2_user_select_mitigation();
+ ssb_select_mitigation();
+ l1tf_select_mitigation();
+ md_clear_select_mitigation();
+ srbds_select_mitigation();
+ gds_select_mitigation();
}
+/*
+ * NOTE: For VMX, this function is not called in the vmexit path.
+ * It uses vmx_spec_ctrl_restore_host() instead.
+ */
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{
- u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+ u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
struct thread_info *ti = current_thread_info();
- /* Is MSR_SPEC_CTRL implemented ? */
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
- /*
- * Restrict guest_spec_ctrl to supported values. Clear the
- * modifiable bits in the host base value and or the
- * modifiable bits from the guest value.
- */
- guestval = hostval & ~x86_spec_ctrl_mask;
- guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
-
- /* SSBD controlled in MSR_SPEC_CTRL */
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
- static_cpu_has(X86_FEATURE_AMD_SSBD))
- hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
-
- /* Conditional STIBP enabled? */
- if (static_branch_unlikely(&switch_to_cond_stibp))
- hostval |= stibp_tif_to_spec_ctrl(ti->flags);
-
if (hostval != guestval) {
msrval = setguest ? guestval : hostval;
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -396,7 +384,8 @@ static void __init mmio_select_mitigation(void)
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
- cpu_mitigations_off()) {
+ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
+ cpu_mitigations_off()) {
mmio_mitigation = MMIO_MITIGATION_OFF;
return;
}
@@ -501,6 +490,8 @@ out:
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
+ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
}
static void __init md_clear_select_mitigation(void)
@@ -611,6 +602,149 @@ static int __init srbds_parse_cmdline(char *str)
early_param("srbds", srbds_parse_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "GDS: " fmt
+
+enum gds_mitigations {
+ GDS_MITIGATION_OFF,
+ GDS_MITIGATION_UCODE_NEEDED,
+ GDS_MITIGATION_FORCE,
+ GDS_MITIGATION_FULL,
+ GDS_MITIGATION_FULL_LOCKED,
+ GDS_MITIGATION_HYPERVISOR,
+};
+
+#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION)
+static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
+#else
+static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
+#endif
+
+static const char * const gds_strings[] = {
+ [GDS_MITIGATION_OFF] = "Vulnerable",
+ [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
+ [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
+ [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
+ [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
+ [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
+};
+
+bool gds_ucode_mitigated(void)
+{
+ return (gds_mitigation == GDS_MITIGATION_FULL ||
+ gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
+}
+EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
+
+void update_gds_msr(void)
+{
+ u64 mcu_ctrl_after;
+ u64 mcu_ctrl;
+
+ switch (gds_mitigation) {
+ case GDS_MITIGATION_OFF:
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ mcu_ctrl |= GDS_MITG_DIS;
+ break;
+ case GDS_MITIGATION_FULL_LOCKED:
+ /*
+ * The LOCKED state comes from the boot CPU. APs might not have
+ * the same state. Make sure the mitigation is enabled on all
+ * CPUs.
+ */
+ case GDS_MITIGATION_FULL:
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ mcu_ctrl &= ~GDS_MITG_DIS;
+ break;
+ case GDS_MITIGATION_FORCE:
+ case GDS_MITIGATION_UCODE_NEEDED:
+ case GDS_MITIGATION_HYPERVISOR:
+ return;
+ };
+
+ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+
+ /*
+ * Check to make sure that the WRMSR value was not ignored. Writes to
+ * GDS_MITG_DIS will be ignored if this processor is locked but the boot
+ * processor was not.
+ */
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
+ WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
+}
+
+static void __init gds_select_mitigation(void)
+{
+ u64 mcu_ctrl;
+
+ if (!boot_cpu_has_bug(X86_BUG_GDS))
+ return;
+
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ gds_mitigation = GDS_MITIGATION_HYPERVISOR;
+ goto out;
+ }
+
+ if (cpu_mitigations_off())
+ gds_mitigation = GDS_MITIGATION_OFF;
+ /* Will verify below that mitigation _can_ be disabled */
+
+ /* No microcode */
+ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
+ if (gds_mitigation == GDS_MITIGATION_FORCE) {
+ /*
+ * This only needs to be done on the boot CPU so do it
+ * here rather than in update_gds_msr()
+ */
+ setup_clear_cpu_cap(X86_FEATURE_AVX);
+ pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
+ } else {
+ gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
+ }
+ goto out;
+ }
+
+ /* Microcode has mitigation, use it */
+ if (gds_mitigation == GDS_MITIGATION_FORCE)
+ gds_mitigation = GDS_MITIGATION_FULL;
+
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+ if (mcu_ctrl & GDS_MITG_LOCKED) {
+ if (gds_mitigation == GDS_MITIGATION_OFF)
+ pr_warn("Mitigation locked. Disable failed.\n");
+
+ /*
+ * The mitigation is selected from the boot CPU. All other CPUs
+ * _should_ have the same state. If the boot CPU isn't locked
+ * but others are then update_gds_msr() will WARN() of the state
+ * mismatch. If the boot CPU is locked update_gds_msr() will
+ * ensure the other CPUs have the mitigation enabled.
+ */
+ gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
+ }
+
+ update_gds_msr();
+out:
+ pr_info("%s\n", gds_strings[gds_mitigation]);
+}
+
+static int __init gds_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!boot_cpu_has_bug(X86_BUG_GDS))
+ return 0;
+
+ if (!strcmp(str, "off"))
+ gds_mitigation = GDS_MITIGATION_OFF;
+ else if (!strcmp(str, "force"))
+ gds_mitigation = GDS_MITIGATION_FORCE;
+
+ return 0;
+}
+early_param("gather_data_sampling", gds_parse_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V1 : " fmt
enum spectre_v1_mitigation {
@@ -702,12 +836,103 @@ static int __init nospectre_v1_cmdline(char *str)
}
early_param("nospectre_v1", nospectre_v1_cmdline);
-#undef pr_fmt
-#define pr_fmt(fmt) "Spectre V2 : " fmt
-
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
+#undef pr_fmt
+#define pr_fmt(fmt) "RETBleed: " fmt
+
+enum retbleed_mitigation {
+ RETBLEED_MITIGATION_NONE,
+ RETBLEED_MITIGATION_IBRS,
+ RETBLEED_MITIGATION_EIBRS,
+};
+
+enum retbleed_mitigation_cmd {
+ RETBLEED_CMD_OFF,
+ RETBLEED_CMD_AUTO,
+};
+
+const char * const retbleed_strings[] = {
+ [RETBLEED_MITIGATION_NONE] = "Vulnerable",
+ [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
+ [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
+};
+
+static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
+ RETBLEED_MITIGATION_NONE;
+static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
+ RETBLEED_CMD_AUTO;
+
+static int __init retbleed_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off"))
+ retbleed_cmd = RETBLEED_CMD_OFF;
+ else if (!strcmp(str, "auto"))
+ retbleed_cmd = RETBLEED_CMD_AUTO;
+ else
+ pr_err("Unknown retbleed option (%s). Defaulting to 'auto'\n", str);
+
+ return 0;
+}
+early_param("retbleed", retbleed_parse_cmdline);
+
+#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
+#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n"
+#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
+
+static void __init retbleed_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
+ return;
+
+ switch (retbleed_cmd) {
+ case RETBLEED_CMD_OFF:
+ return;
+
+ case RETBLEED_CMD_AUTO:
+ default:
+ /*
+ * The Intel mitigation (IBRS) was already selected in
+ * spectre_v2_select_mitigation().
+ */
+
+ break;
+ }
+
+ switch (retbleed_mitigation) {
+ default:
+ break;
+ }
+
+ /*
+ * Let IBRS trump all on Intel without affecting the effects of the
+ * retbleed= cmdline option.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ switch (spectre_v2_enabled) {
+ case SPECTRE_V2_IBRS:
+ retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
+ break;
+ case SPECTRE_V2_EIBRS:
+ case SPECTRE_V2_EIBRS_RETPOLINE:
+ case SPECTRE_V2_EIBRS_LFENCE:
+ retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
+ break;
+ default:
+ pr_err(RETBLEED_INTEL_MSG);
+ }
+ }
+
+ pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) "Spectre V2 : " fmt
+
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
SPECTRE_V2_USER_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
@@ -737,6 +962,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
+#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state)
@@ -778,6 +1004,7 @@ enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_EIBRS,
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
SPECTRE_V2_CMD_EIBRS_LFENCE,
+ SPECTRE_V2_CMD_IBRS,
};
enum spectre_v2_user_cmd {
@@ -818,13 +1045,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
}
+static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
+
static enum spectre_v2_user_cmd __init
-spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
+spectre_v2_parse_user_cmdline(void)
{
char arg[20];
int ret, i;
- switch (v2_cmd) {
+ switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE:
return SPECTRE_V2_USER_CMD_NONE;
case SPECTRE_V2_CMD_FORCE:
@@ -852,13 +1081,18 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
{
- return (mode == SPECTRE_V2_EIBRS ||
- mode == SPECTRE_V2_EIBRS_RETPOLINE ||
- mode == SPECTRE_V2_EIBRS_LFENCE);
+ return mode == SPECTRE_V2_EIBRS ||
+ mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+ mode == SPECTRE_V2_EIBRS_LFENCE;
+}
+
+static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
+{
+ return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
}
static void __init
-spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+spectre_v2_user_select_mitigation(void)
{
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
bool smt_possible = IS_ENABLED(CONFIG_SMP);
@@ -871,7 +1105,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
smt_possible = false;
- cmd = spectre_v2_parse_user_cmdline(v2_cmd);
+ cmd = spectre_v2_parse_user_cmdline();
switch (cmd) {
case SPECTRE_V2_USER_CMD_NONE:
goto set_mode;
@@ -919,8 +1153,15 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
}
/*
- * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
- * required.
+ * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
+ * is not required.
+ *
+ * Enhanced IBRS also protects against cross-thread branch target
+ * injection in user-mode as the IBRS bit remains always set which
+ * implicitly enables cross-thread protections. However, in legacy IBRS
+ * mode, the IBRS bit is set only on kernel entry and cleared on return
+ * to userspace. This disables the implicit cross-thread protection,
+ * so allow for STIBP to be selected in that case.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!smt_possible ||
@@ -949,6 +1190,7 @@ static const char * const spectre_v2_strings[] = {
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
+ [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
};
static const struct {
@@ -966,6 +1208,7 @@ static const struct {
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
{ "auto", SPECTRE_V2_CMD_AUTO, false },
+ { "ibrs", SPECTRE_V2_CMD_IBRS, false },
};
static void __init spec_v2_print_cond(const char *reason, bool secure)
@@ -1028,6 +1271,24 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}
+ if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+ pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
+ mitigation_options[i].option);
+ return SPECTRE_V2_CMD_AUTO;
+ }
+
+ if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
+ pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
+ mitigation_options[i].option);
+ return SPECTRE_V2_CMD_AUTO;
+ }
+
+ if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
+ pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
+ mitigation_options[i].option);
+ return SPECTRE_V2_CMD_AUTO;
+ }
+
spec_v2_print_cond(mitigation_options[i].option,
mitigation_options[i].secure);
return cmd;
@@ -1043,6 +1304,69 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
return SPECTRE_V2_RETPOLINE;
}
+/* Disable in-kernel use of non-RSB RET predictors */
+static void __init spec_ctrl_disable_kernel_rrsba(void)
+{
+ u64 ia32_cap;
+
+ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+ return;
+
+ ia32_cap = x86_read_arch_cap_msr();
+
+ if (ia32_cap & ARCH_CAP_RRSBA) {
+ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ }
+}
+
+static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+{
+ /*
+ * Similar to context switches, there are two types of RSB attacks
+ * after VM exit:
+ *
+ * 1) RSB underflow
+ *
+ * 2) Poisoned RSB entry
+ *
+ * When retpoline is enabled, both are mitigated by filling/clearing
+ * the RSB.
+ *
+ * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
+ * prediction isolation protections, RSB still needs to be cleared
+ * because of #2. Note that SMEP provides no protection here, unlike
+ * user-space-poisoned RSB entries.
+ *
+ * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
+ * bug is present then a LITE version of RSB protection is required,
+ * just a single call needs to retire before a RET is executed.
+ */
+ switch (mode) {
+ case SPECTRE_V2_NONE:
+ return;
+
+ case SPECTRE_V2_EIBRS_LFENCE:
+ case SPECTRE_V2_EIBRS:
+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
+ pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
+ }
+ return;
+
+ case SPECTRE_V2_EIBRS_RETPOLINE:
+ case SPECTRE_V2_RETPOLINE:
+ case SPECTRE_V2_LFENCE:
+ case SPECTRE_V2_IBRS:
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
+ return;
+ }
+
+ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
+ dump_stack();
+}
+
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -1067,6 +1391,14 @@ static void __init spectre_v2_select_mitigation(void)
break;
}
+ if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+ retbleed_cmd != RETBLEED_CMD_OFF &&
+ boot_cpu_has(X86_FEATURE_IBRS) &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+ mode = SPECTRE_V2_IBRS;
+ break;
+ }
+
mode = spectre_v2_select_retpoline();
break;
@@ -1083,6 +1415,10 @@ static void __init spectre_v2_select_mitigation(void)
mode = spectre_v2_select_retpoline();
break;
+ case SPECTRE_V2_CMD_IBRS:
+ mode = SPECTRE_V2_IBRS;
+ break;
+
case SPECTRE_V2_CMD_EIBRS:
mode = SPECTRE_V2_EIBRS;
break;
@@ -1099,10 +1435,9 @@ static void __init spectre_v2_select_mitigation(void)
if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
- if (spectre_v2_in_eibrs_mode(mode)) {
- /* Force it so VMEXIT will restore correctly */
+ if (spectre_v2_in_ibrs_mode(mode)) {
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ update_spec_ctrl(x86_spec_ctrl_base);
}
switch (mode) {
@@ -1110,6 +1445,12 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_EIBRS:
break;
+ case SPECTRE_V2_IBRS:
+ setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
+ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
+ pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
+ break;
+
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_EIBRS_LFENCE:
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
@@ -1121,43 +1462,86 @@ static void __init spectre_v2_select_mitigation(void)
break;
}
+ /*
+ * Disable alternate RSB predictions in kernel when indirect CALLs and
+ * JMPs gets protection against BHI and Intramode-BTI, but RET
+ * prediction from a non-RSB predictor is still a risk.
+ */
+ if (mode == SPECTRE_V2_EIBRS_LFENCE ||
+ mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+ mode == SPECTRE_V2_RETPOLINE)
+ spec_ctrl_disable_kernel_rrsba();
+
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
/*
- * If spectre v2 protection has been enabled, unconditionally fill
- * RSB during a context switch; this protects against two independent
- * issues:
+ * If Spectre v2 protection has been enabled, fill the RSB during a
+ * context switch. In general there are two types of RSB attacks
+ * across context switches, for which the CALLs/RETs may be unbalanced.
+ *
+ * 1) RSB underflow
+ *
+ * Some Intel parts have "bottomless RSB". When the RSB is empty,
+ * speculated return targets may come from the branch predictor,
+ * which could have a user-poisoned BTB or BHB entry.
+ *
+ * AMD has it even worse: *all* returns are speculated from the BTB,
+ * regardless of the state of the RSB.
+ *
+ * When IBRS or eIBRS is enabled, the "user -> kernel" attack
+ * scenario is mitigated by the IBRS branch prediction isolation
+ * properties, so the RSB buffer filling wouldn't be necessary to
+ * protect against this type of attack.
*
- * - RSB underflow (and switch to BTB) on Skylake+
- * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
+ * The "user -> user" attack scenario is mitigated by RSB filling.
+ *
+ * 2) Poisoned RSB entry
+ *
+ * If the 'next' in-kernel return stack is shorter than 'prev',
+ * 'next' could be tricked into speculating with a user-poisoned RSB
+ * entry.
+ *
+ * The "user -> kernel" attack scenario is mitigated by SMEP and
+ * eIBRS.
+ *
+ * The "user -> user" scenario, also known as SpectreBHB, requires
+ * RSB clearing.
+ *
+ * So to mitigate all cases, unconditionally fill RSB on context
+ * switches.
+ *
+ * FIXME: Is this pointless for retbleed-affected AMD?
*/
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
+ spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
+
/*
- * Retpoline means the kernel is safe because it has no indirect
- * branches. Enhanced IBRS protects firmware too, so, enable restricted
- * speculation around firmware calls only when Enhanced IBRS isn't
- * supported.
+ * Retpoline protects the kernel, but doesn't protect firmware. IBRS
+ * and Enhanced IBRS protect firmware too, so enable IBRS around
+ * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
+ * enabled.
*
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
* the user might select retpoline on the kernel command line and if
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
* enable IBRS around firmware calls.
*/
- if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
+ if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
/* Set up IBPB and STIBP depending on the general spectre V2 command */
- spectre_v2_user_select_mitigation(cmd);
+ spectre_v2_cmd = cmd;
}
static void update_stibp_msr(void * __unused)
{
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
+ update_spec_ctrl(val);
}
/* Update x86_spec_ctrl_base in case SMT state changed. */
@@ -1374,16 +1758,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
}
/*
- * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
- * bit in the mask to allow guests to use the mitigation even in the
- * case where the host does not enable it.
- */
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
- static_cpu_has(X86_FEATURE_AMD_SSBD)) {
- x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
- }
-
- /*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
@@ -1400,7 +1774,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
x86_amd_ssb_disable();
} else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ update_spec_ctrl(x86_spec_ctrl_base);
}
}
@@ -1528,6 +1902,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
if (ctrl == PR_SPEC_FORCE_DISABLE)
task_set_spec_ib_force_disable(task);
task_update_spec_tif(task);
+ if (task == current)
+ indirect_branch_prediction_barrier();
break;
default:
return -ERANGE;
@@ -1617,7 +1993,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
void x86_spec_ctrl_setup_ap(void)
{
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ update_spec_ctrl(x86_spec_ctrl_base);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_ssb_disable();
@@ -1835,6 +2211,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
static ssize_t mmio_stale_data_show_state(char *buf)
{
+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ return sysfs_emit(buf, "Unknown: No mitigations\n");
+
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
@@ -1879,6 +2258,19 @@ static char *ibpb_state(void)
return "";
}
+static char *pbrsb_eibrs_state(void)
+{
+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
+ if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
+ boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
+ return ", PBRSB-eIBRS: SW sequence";
+ else
+ return ", PBRSB-eIBRS: Vulnerable";
+ } else {
+ return ", PBRSB-eIBRS: Not affected";
+ }
+}
+
static ssize_t spectre_v2_show_state(char *buf)
{
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
@@ -1891,12 +2283,13 @@ static ssize_t spectre_v2_show_state(char *buf)
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
- return sprintf(buf, "%s%s%s%s%s%s\n",
+ return sprintf(buf, "%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
stibp_state(),
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+ pbrsb_eibrs_state(),
spectre_v2_module_string());
}
@@ -1905,6 +2298,16 @@ static ssize_t srbds_show_state(char *buf)
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
}
+static ssize_t retbleed_show_state(char *buf)
+{
+ return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
+}
+
+static ssize_t gds_show_state(char *buf)
+{
+ return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
+}
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -1948,8 +2351,15 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
return srbds_show_state(buf);
case X86_BUG_MMIO_STALE_DATA:
+ case X86_BUG_MMIO_UNKNOWN:
return mmio_stale_data_show_state(buf);
+ case X86_BUG_RETBLEED:
+ return retbleed_show_state(buf);
+
+ case X86_BUG_GDS:
+ return gds_show_state(buf);
+
default:
break;
}
@@ -2004,6 +2414,19 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
{
- return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
+ else
+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
+}
+
+ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
+}
+
+ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 305f30e45f3d..1592f309c3c1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -17,11 +17,16 @@
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
+#include <linux/mem_encrypt.h>
#include <linux/smp.h>
+#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
#include <asm/stackprotector.h>
+#include <linux/utsname.h>
+
+#include <asm/alternative.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/archrandom.h>
@@ -57,6 +62,7 @@
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
#endif
+#include <asm/set_memory.h>
#include "cpu.h"
@@ -444,8 +450,6 @@ static bool pku_disabled;
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
{
- struct pkru_state *pk;
-
/* check the boot processor, plus compile options for PKU: */
if (!cpu_feature_enabled(X86_FEATURE_PKU))
return;
@@ -456,9 +460,6 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
return;
cr4_set_bits(X86_CR4_PKE);
- pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
- if (pk)
- pk->pkru = init_pkru_value;
/*
* Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
* cpuid bit to be set. We need to ensure that we
@@ -961,6 +962,12 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
+ if (c->extended_cpuid_level >= 0x8000001f)
+ c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
+
+ if (c->extended_cpuid_level >= 0x80000021)
+ c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
+
init_scattered_cpuid_features(c);
init_speculation_control(c);
init_cqm(c);
@@ -1025,6 +1032,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_SWAPGS BIT(6)
#define NO_ITLB_MULTIHIT BIT(7)
#define NO_SPECTRE_V2 BIT(8)
+#define NO_EIBRS_PBRSB BIT(9)
+#define NO_MMIO BIT(10)
#define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -1045,6 +1054,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
/* Intel Family 6 */
+ VULNWL_INTEL(TIGERLAKE, NO_MMIO),
+ VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
+ VULNWL_INTEL(ALDERLAKE, NO_MMIO),
+ VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
+
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
@@ -1063,9 +1077,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
/*
* Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1075,66 +1089,88 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
* good enough for our purposes.
*/
- VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
+ VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
+ VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
/* AMD Family 0xf - 0x12 */
- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
/* Zhaoxin Family 7 */
- VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2),
- VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2),
+ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_MMIO),
+ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_MMIO),
{}
};
+#define VULNBL(vendor, family, model, blacklist) \
+ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
+
#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
INTEL_FAM6_##model, steppings, \
X86_FEATURE_ANY, issues)
+#define VULNBL_AMD(family, blacklist) \
+ VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
+
+#define VULNBL_HYGON(family, blacklist) \
+ VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
+
#define SRBDS BIT(0)
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
#define MMIO BIT(1)
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
#define MMIO_SBDS BIT(2)
+/* CPU is affected by RETbleed, speculating where you would not expect it */
+#define RETBLEED BIT(3)
+/* CPU is affected by SMT (cross-thread) return predictions */
+#define SMT_RSB BIT(4)
+/* CPU is affected by SRSO */
+#define SRSO BIT(5)
+/* CPU is affected by GDS */
+#define GDS BIT(6)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO),
- VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO),
+ VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
+ VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) |
- BIT(7) | BIT(0xB), MMIO),
- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO),
- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS),
- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO),
- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS),
- VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS),
- VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO),
- VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO),
- VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS),
- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO),
- VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
- VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO),
- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
+ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
+ VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
+ VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
+ VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
+ VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
+
+ VULNBL_AMD(0x15, RETBLEED),
+ VULNBL_AMD(0x16, RETBLEED),
+ VULNBL_AMD(0x17, RETBLEED),
+ VULNBL_HYGON(0x18, RETBLEED),
{}
};
@@ -1231,10 +1267,36 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Affected CPU list is generally enough to enumerate the vulnerability,
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
* not want the guest to enumerate the bug.
+ *
+ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
+ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
+ */
+ if (!arch_cap_mmio_immune(ia32_cap)) {
+ if (cpu_matches(cpu_vuln_blacklist, MMIO))
+ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
+ setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
+ }
+
+ if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
+ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+ setup_force_cpu_bug(X86_BUG_RETBLEED);
+ }
+
+ if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
+ !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+ !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+
+ /*
+ * Check if CPU is vulnerable to GDS. If running in a virtual machine on
+ * an affected processor, the VMM may have disabled the use of GATHER by
+ * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
+ * which means that AVX will be disabled.
*/
- if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
- !arch_cap_mmio_immune(ia32_cap))
- setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
+ boot_cpu_has(X86_FEATURE_AVX))
+ setup_force_cpu_bug(X86_BUG_GDS);
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
@@ -1321,8 +1383,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
cpu_set_bug_bits(c);
- fpu__init_system(c);
-
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
@@ -1714,6 +1774,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
update_srbds_msr();
+ if (boot_cpu_has_bug(X86_BUG_GDS))
+ update_gds_msr();
}
static __init int setup_noclflush(char *arg)
@@ -2012,8 +2074,6 @@ void cpu_init(void)
clear_all_debug_regs();
dbg_restore_debug_regs();
- fpu__init_cpu();
-
if (is_uv_system())
uv_cpu_init();
@@ -2071,8 +2131,6 @@ void cpu_init(void)
clear_all_debug_regs();
dbg_restore_debug_regs();
- fpu__init_cpu();
-
load_fixmap_gdt(cpu);
}
#endif
@@ -2088,6 +2146,8 @@ void microcode_check(void)
perf_check_microcode();
+ amd_check_microcode();
+
/* Reload CPUID max function as it might've changed. */
info.cpuid_level = cpuid_eax(0);
@@ -2117,3 +2177,69 @@ void arch_smt_update(void)
/* Check whether IPI broadcasting can be enabled */
apic_smt_update();
}
+
+void __init arch_cpu_finalize_init(void)
+{
+ identify_boot_cpu();
+
+ /*
+ * identify_boot_cpu() initialized SMT support information, let the
+ * core code know.
+ */
+ cpu_smt_check_topology();
+
+ if (!IS_ENABLED(CONFIG_SMP)) {
+ pr_info("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+ }
+
+ cpu_select_mitigations();
+
+ arch_smt_update();
+
+ if (IS_ENABLED(CONFIG_X86_32)) {
+ /*
+ * Check whether this is a real i386 which is not longer
+ * supported and fixup the utsname.
+ */
+ if (boot_cpu_data.x86 < 4)
+ panic("Kernel requires i486+ for 'invlpg' and other features");
+
+ init_utsname()->machine[1] =
+ '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+ }
+
+ /*
+ * Must be before alternatives because it might set or clear
+ * feature bits.
+ */
+ fpu__init_system();
+ fpu__init_cpu();
+
+ alternative_instructions();
+
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ /*
+ * Make sure the first 2MB area is not mapped by huge pages
+ * There are typically fixed size MTRRs in there and overlapping
+ * MTRRs into large pages causes slow downs.
+ *
+ * Right now we don't do that with gbpages because there seems
+ * very little benefit for that case.
+ */
+ if (!direct_gbpages)
+ set_memory_4k((unsigned long)__va(0), 1);
+ } else {
+ fpu__init_check_bugs();
+ }
+
+ /*
+ * This needs to be called before any devices perform DMA
+ * operations that might use the SWIOTLB bounce buffers. It will
+ * mark the bounce buffers as decrypted so that their usage will
+ * not cause "plain-text" data to be decrypted when accessed. It
+ * must be called after late_time_init() so that Hyper-V x86/x64
+ * hypercalls work when the SWIOTLB bounce buffers are decrypted.
+ */
+ mem_encrypt_init();
+}
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 4d04c127c4a7..8a64520b5310 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -76,9 +76,11 @@ extern void detect_ht(struct cpuinfo_x86 *c);
extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
unsigned int aperfmperf_get_khz(int cpu);
+void cpu_select_mitigations(void);
extern void x86_spec_ctrl_setup_ap(void);
extern void update_srbds_msr(void);
+extern void update_gds_msr(void);
extern u64 x86_read_arch_cap_msr(void);
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index b232bd0be78d..43586b7586c0 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -88,8 +88,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
if (!err)
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
- /* Socket ID is ApicId[6] for these processors. */
- c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+ /*
+ * Socket ID is ApicId[6] for the processors with model <= 0x3
+ * when running on host.
+ */
+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
+ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
cacheinfo_hygon_init_llc_id(c, cpu);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
@@ -335,8 +339,8 @@ static void init_hygon(struct cpuinfo_x86 *c)
* msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present.
*/
- msr_set_bit(MSR_F10H_DECFG,
- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+ msr_set_bit(MSR_AMD64_DE_CFG,
+ MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
/* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 44688917d51f..0418606ec3c0 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -187,6 +187,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
return false;
}
+#define MSR_IA32_TME_ACTIVATE 0x982
+
+/* Helpers to access TME_ACTIVATE MSR */
+#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
+#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
+
+#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
+#define TME_ACTIVATE_POLICY_AES_XTS_128 0
+
+#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
+
+#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
+#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
+
+/* Values for mktme_status (SW only construct) */
+#define MKTME_ENABLED 0
+#define MKTME_DISABLED 1
+#define MKTME_UNINITIALIZED 2
+static int mktme_status = MKTME_UNINITIALIZED;
+
+static void detect_tme_early(struct cpuinfo_x86 *c)
+{
+ u64 tme_activate, tme_policy, tme_crypto_algs;
+ int keyid_bits = 0, nr_keyids = 0;
+ static u64 tme_activate_cpu0 = 0;
+
+ rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+
+ if (mktme_status != MKTME_UNINITIALIZED) {
+ if (tme_activate != tme_activate_cpu0) {
+ /* Broken BIOS? */
+ pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
+ pr_err_once("x86/tme: MKTME is not usable\n");
+ mktme_status = MKTME_DISABLED;
+
+ /* Proceed. We may need to exclude bits from x86_phys_bits. */
+ }
+ } else {
+ tme_activate_cpu0 = tme_activate;
+ }
+
+ if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
+ pr_info_once("x86/tme: not enabled by BIOS\n");
+ mktme_status = MKTME_DISABLED;
+ return;
+ }
+
+ if (mktme_status != MKTME_UNINITIALIZED)
+ goto detect_keyid_bits;
+
+ pr_info("x86/tme: enabled by BIOS\n");
+
+ tme_policy = TME_ACTIVATE_POLICY(tme_activate);
+ if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
+ pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
+
+ tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
+ if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
+ pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
+ tme_crypto_algs);
+ mktme_status = MKTME_DISABLED;
+ }
+detect_keyid_bits:
+ keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
+ nr_keyids = (1UL << keyid_bits) - 1;
+ if (nr_keyids) {
+ pr_info_once("x86/mktme: enabled by BIOS\n");
+ pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
+ } else {
+ pr_info_once("x86/mktme: disabled by BIOS\n");
+ }
+
+ if (mktme_status == MKTME_UNINITIALIZED) {
+ /* MKTME is usable */
+ mktme_status = MKTME_ENABLED;
+ }
+
+ /*
+ * KeyID bits effectively lower the number of physical address
+ * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
+ */
+ c->x86_phys_bits -= keyid_bits;
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -339,6 +423,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/
if (detect_extended_topology_early(c) < 0)
detect_ht_early(c);
+
+ /*
+ * Adjust the number of physical bits early because it affects the
+ * valid bits of the MTRR mask registers.
+ */
+ if (cpu_has(c, X86_FEATURE_TME))
+ detect_tme_early(c);
}
#ifdef CONFIG_X86_32
@@ -540,90 +631,6 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
}
}
-#define MSR_IA32_TME_ACTIVATE 0x982
-
-/* Helpers to access TME_ACTIVATE MSR */
-#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
-#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
-
-#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
-#define TME_ACTIVATE_POLICY_AES_XTS_128 0
-
-#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
-
-#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
-#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
-
-/* Values for mktme_status (SW only construct) */
-#define MKTME_ENABLED 0
-#define MKTME_DISABLED 1
-#define MKTME_UNINITIALIZED 2
-static int mktme_status = MKTME_UNINITIALIZED;
-
-static void detect_tme(struct cpuinfo_x86 *c)
-{
- u64 tme_activate, tme_policy, tme_crypto_algs;
- int keyid_bits = 0, nr_keyids = 0;
- static u64 tme_activate_cpu0 = 0;
-
- rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
-
- if (mktme_status != MKTME_UNINITIALIZED) {
- if (tme_activate != tme_activate_cpu0) {
- /* Broken BIOS? */
- pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
- pr_err_once("x86/tme: MKTME is not usable\n");
- mktme_status = MKTME_DISABLED;
-
- /* Proceed. We may need to exclude bits from x86_phys_bits. */
- }
- } else {
- tme_activate_cpu0 = tme_activate;
- }
-
- if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
- pr_info_once("x86/tme: not enabled by BIOS\n");
- mktme_status = MKTME_DISABLED;
- return;
- }
-
- if (mktme_status != MKTME_UNINITIALIZED)
- goto detect_keyid_bits;
-
- pr_info("x86/tme: enabled by BIOS\n");
-
- tme_policy = TME_ACTIVATE_POLICY(tme_activate);
- if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
- pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
-
- tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
- if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
- pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
- tme_crypto_algs);
- mktme_status = MKTME_DISABLED;
- }
-detect_keyid_bits:
- keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
- nr_keyids = (1UL << keyid_bits) - 1;
- if (nr_keyids) {
- pr_info_once("x86/mktme: enabled by BIOS\n");
- pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
- } else {
- pr_info_once("x86/mktme: disabled by BIOS\n");
- }
-
- if (mktme_status == MKTME_UNINITIALIZED) {
- /* MKTME is usable */
- mktme_status = MKTME_ENABLED;
- }
-
- /*
- * KeyID bits effectively lower the number of physical address
- * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
- */
- c->x86_phys_bits -= keyid_bits;
-}
-
static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
@@ -758,9 +765,6 @@ static void init_intel(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_VMX))
detect_vmx_virtcap(c);
- if (cpu_has(c, X86_FEATURE_TME))
- detect_tme(c);
-
init_intel_misc_features(c);
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index 2f163e6646b6..ad6776081e60 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -16,12 +16,17 @@
* respective wildcard entries.
*
* A typical table entry would be to match a specific CPU
- * { X86_VENDOR_INTEL, 6, 0x12 }
- * or to match a specific CPU feature
- * { X86_FEATURE_MATCH(X86_FEATURE_FOOBAR) }
+ *
+ * X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL,
+ * X86_FEATURE_ANY, NULL);
*
* Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY,
- * %X86_MODEL_ANY, %X86_FEATURE_ANY or 0 (except for vendor)
+ * %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor)
+ *
+ * asm/cpu_device_id.h contains a set of useful macros which are shortcuts
+ * for various common selections. The above can be shortened to:
+ *
+ * X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL);
*
* Arrays used to match for this should also be declared using
* MODULE_DEVICE_TABLE(x86cpu, ...)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 9b98a7d8ac60..84c0e5c2518c 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -42,6 +42,7 @@
#include <linux/export.h>
#include <linux/jump_label.h>
#include <linux/set_memory.h>
+#include <linux/kexec.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
@@ -315,6 +316,7 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
struct llist_node *pending;
struct mce_evt_llist *l;
int apei_err = 0;
+ struct page *p;
/*
* Allow instrumentation around external facilities usage. Not that it
@@ -370,6 +372,20 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
if (!fake_panic) {
if (panic_timeout == 0)
panic_timeout = mca_cfg.panic_timeout;
+
+ /*
+ * Kdump skips the poisoned page in order to avoid
+ * touching the error bits again. Poison the page even
+ * if the error is fatal and the machine is about to
+ * panic.
+ */
+ if (kexec_crash_loaded()) {
+ if (final && (final->status & MCI_STATUS_ADDRV)) {
+ p = pfn_to_online_page(final->addr >> PAGE_SHIFT);
+ if (p)
+ SetPageHWPoison(p);
+ }
+ }
panic(msg);
} else
pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index a0e52bd00ecc..2852c99196fa 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -55,7 +55,9 @@ struct cont_desc {
};
static u32 ucode_new_rev;
-static u8 amd_ucode_patch[PATCH_MAX_SIZE];
+
+/* One blob per node. */
+static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
/*
* Microcode patch container file is prepended to the initrd in cpio
@@ -429,7 +431,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else
new_rev = &ucode_new_rev;
- patch = &amd_ucode_patch;
+ patch = &amd_ucode_patch[0];
#endif
desc.cpuid_1_eax = cpuid_1_eax;
@@ -441,7 +443,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
return ret;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
- if (rev >= mc->hdr.patch_id)
+
+ /*
+ * Allow application of the same revision to pick up SMT-specific
+ * changes even if the revision of the other SMT thread is already
+ * up-to-date.
+ */
+ if (rev > mc->hdr.patch_id)
return ret;
if (!__apply_microcode_amd(mc)) {
@@ -523,8 +531,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
- /* Check whether we have saved a new patch already: */
- if (*new_rev && rev < mc->hdr.patch_id) {
+ /*
+ * Check whether a new patch has been saved already. Also, allow application of
+ * the same revision in order to pick up SMT-thread-specific configuration even
+ * if the sibling SMT thread already has an up-to-date revision.
+ */
+ if (*new_rev && rev <= mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
*new_rev = mc->hdr.patch_id;
return;
@@ -538,8 +550,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
}
-static enum ucode_state
-load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
+static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
{
@@ -557,19 +568,19 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
if (!desc.mc)
return -EINVAL;
- ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
+ ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret > UCODE_UPDATED)
return -EINVAL;
return 0;
}
-void reload_ucode_amd(void)
+void reload_ucode_amd(unsigned int cpu)
{
- struct microcode_amd *mc;
u32 rev, dummy;
+ struct microcode_amd *mc;
- mc = (struct microcode_amd *)amd_ucode_patch;
+ mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
@@ -689,7 +700,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
/* need to apply patch? */
- if (rev >= mc_amd->hdr.patch_id) {
+ if (rev > mc_amd->hdr.patch_id) {
ret = UCODE_OK;
goto out;
}
@@ -783,6 +794,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
kfree(patch);
return -EINVAL;
}
+ patch->size = *patch_size;
mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
proc_id = mc_hdr->processor_rev_id;
@@ -834,9 +846,10 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
return UCODE_OK;
}
-static enum ucode_state
-load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
+static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
{
+ struct cpuinfo_x86 *c;
+ unsigned int nid, cpu;
struct ucode_patch *p;
enum ucode_state ret;
@@ -849,22 +862,22 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
return ret;
}
- p = find_patch(0);
- if (!p) {
- return ret;
- } else {
- if (boot_cpu_data.microcode >= p->patch_id)
- return ret;
+ for_each_node(nid) {
+ cpu = cpumask_first(cpumask_of_node(nid));
+ c = &cpu_data(cpu);
- ret = UCODE_NEW;
- }
+ p = find_patch(cpu);
+ if (!p)
+ continue;
- /* save BSP's matching patch for early load */
- if (!save)
- return ret;
+ if (c->microcode >= p->patch_id)
+ continue;
+
+ ret = UCODE_NEW;
- memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
- memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
+ memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
+ memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
+ }
return ret;
}
@@ -890,12 +903,11 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
struct cpuinfo_x86 *c = &cpu_data(cpu);
- bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
enum ucode_state ret = UCODE_NFOUND;
const struct firmware *fw;
/* reload ucode container only on the boot cpu */
- if (!refresh_fw || !bsp)
+ if (!refresh_fw)
return UCODE_OK;
if (c->x86 >= 0x15)
@@ -910,7 +922,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
if (!verify_container(fw->data, fw->size, false))
goto fw_release;
- ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
+ ret = load_microcode_amd(c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index c95a27513a30..834c5f723dae 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -322,7 +322,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
#endif
}
-void reload_early_microcode(void)
+void reload_early_microcode(unsigned int cpu)
{
int vendor, family;
@@ -336,7 +336,7 @@ void reload_early_microcode(void)
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
- reload_ucode_amd();
+ reload_ucode_amd(cpu);
break;
default:
break;
@@ -782,7 +782,7 @@ void microcode_bsp_resume(void)
if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu);
else if (!uci->mc)
- reload_early_microcode();
+ reload_early_microcode(cpu);
}
static struct syscore_ops mc_syscore_ops = {
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index b224d4dae2ff..896f456b3f5c 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -659,7 +659,6 @@ void load_ucode_intel_ap(void)
else
iup = &intel_ucode_patch;
-reget:
if (!*iup) {
patch = __load_ucode_intel(&uci);
if (!patch)
@@ -670,12 +669,7 @@ reget:
uci.mc = *iup;
- if (apply_microcode_early(&uci, true)) {
- /* Mixed-silicon system? Try to refetch the proper patch: */
- *iup = NULL;
-
- goto reget;
- }
+ apply_microcode_early(&uci, true);
}
static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 507039c20128..4482819bb13c 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -794,8 +794,6 @@ void mtrr_ap_init(void)
if (!use_intel() || mtrr_aps_delayed_init)
return;
- rcu_cpu_starting(smp_processor_id());
-
/*
* Ideally we should hold mtrr_mutex here to avoid mtrr entries
* changed, but this routine will be called in cpu boot time,
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index d7623e1b927d..f186470c2e66 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -416,6 +416,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
struct pseudo_lock_region *plr = rdtgrp->plr;
u32 rmid_p, closid_p;
unsigned long i;
+ u64 saved_msr;
#ifdef CONFIG_KASAN
/*
* The registers used for local register variables are also used
@@ -459,6 +460,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
* the buffer and evict pseudo-locked memory read earlier from the
* cache.
*/
+ saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
__wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
closid_p = this_cpu_read(pqr_state.cur_closid);
rmid_p = this_cpu_read(pqr_state.cur_rmid);
@@ -510,7 +512,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
__wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
/* Re-enable the hardware prefetcher(s) */
- wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
local_irq_enable();
plr->thread_done = 1;
@@ -867,6 +869,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
static int measure_cycles_lat_fn(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
+ u32 saved_low, saved_high;
unsigned long i;
u64 start, end;
void *mem_r;
@@ -875,6 +878,7 @@ static int measure_cycles_lat_fn(void *_plr)
/*
* Disable hardware prefetchers.
*/
+ rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
mem_r = READ_ONCE(plr->kmem);
/*
@@ -891,7 +895,7 @@ static int measure_cycles_lat_fn(void *_plr)
end = rdtsc_ordered();
trace_pseudo_lock_mem_latency((u32)(end - start));
}
- wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
local_irq_enable();
plr->thread_done = 1;
wake_up_interruptible(&plr->lock_thread_wq);
@@ -936,6 +940,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
struct perf_event *miss_event, *hit_event;
int hit_pmcnum, miss_pmcnum;
+ u32 saved_low, saved_high;
unsigned int line_size;
unsigned int size;
unsigned long i;
@@ -969,6 +974,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
/*
* Disable hardware prefetchers.
*/
+ rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
/* Initialize rest of local variables */
@@ -1027,7 +1033,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
*/
rmb();
/* Re-enable hardware prefetchers */
- wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
local_irq_enable();
out_hit:
perf_event_release_kernel(hit_event);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 28f786289fce..91016bb18d4f 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -311,7 +311,7 @@ static void update_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse
* the context switch code.
*/
- resctrl_sched_in();
+ resctrl_sched_in(current);
}
/*
@@ -532,7 +532,7 @@ static void _update_task_closid_rmid(void *task)
* Otherwise, the MSR is updated when the task is scheduled in.
*/
if (task == current)
- resctrl_sched_in();
+ resctrl_sched_in(task);
}
static void update_task_closid_rmid(struct task_struct *t)
@@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/
if (rdtgrp->type == RDTCTRL_GROUP) {
- tsk->closid = rdtgrp->closid;
- tsk->rmid = rdtgrp->mon.rmid;
+ WRITE_ONCE(tsk->closid, rdtgrp->closid);
+ WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
} else if (rdtgrp->type == RDTMON_GROUP) {
if (rdtgrp->mon.parent->closid == tsk->closid) {
- tsk->rmid = rdtgrp->mon.rmid;
+ WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
} else {
rdt_last_cmd_puts("Can't move task to different control group\n");
return -EINVAL;
@@ -577,8 +577,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
/*
* Ensure the task's closid and rmid are written before determining if
* the task is current that will decide if it will be interrupted.
+ * This pairs with the full barrier between the rq->curr update and
+ * resctrl_sched_in() during context switch.
*/
- barrier();
+ smp_mb();
/*
* By now, the task's closid and rmid are set. If the task is current
@@ -591,6 +593,18 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
return 0;
}
+static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
+{
+ return (rdt_alloc_capable &&
+ (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
+}
+
+static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
+{
+ return (rdt_mon_capable &&
+ (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
+}
+
/**
* rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
* @r: Resource group
@@ -606,8 +620,7 @@ int rdtgroup_tasks_assigned(struct rdtgroup *r)
rcu_read_lock();
for_each_process_thread(p, t) {
- if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
- (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
+ if (is_closid_match(t, r) || is_rmid_match(t, r)) {
ret = 1;
break;
}
@@ -702,12 +715,15 @@ unlock:
static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
{
struct task_struct *p, *t;
+ pid_t pid;
rcu_read_lock();
for_each_process_thread(p, t) {
- if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
- (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid))
- seq_printf(s, "%d\n", t->pid);
+ if (is_closid_match(t, r) || is_rmid_match(t, r)) {
+ pid = task_pid_vnr(t);
+ if (pid)
+ seq_printf(s, "%d\n", pid);
+ }
}
rcu_read_unlock();
}
@@ -2146,18 +2162,6 @@ static int reset_all_ctrls(struct rdt_resource *r)
return 0;
}
-static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
-{
- return (rdt_alloc_capable &&
- (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
-}
-
-static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
-{
- return (rdt_mon_capable &&
- (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
-}
-
/*
* Move tasks from one to the other group. If @from is NULL, then all tasks
* in the systems are moved unconditionally (used for teardown).
@@ -2175,22 +2179,26 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
for_each_process_thread(p, t) {
if (!from || is_closid_match(t, from) ||
is_rmid_match(t, from)) {
- t->closid = to->closid;
- t->rmid = to->mon.rmid;
+ WRITE_ONCE(t->closid, to->closid);
+ WRITE_ONCE(t->rmid, to->mon.rmid);
+
+ /*
+ * Order the closid/rmid stores above before the loads
+ * in task_curr(). This pairs with the full barrier
+ * between the rq->curr update and resctrl_sched_in()
+ * during context switch.
+ */
+ smp_mb();
-#ifdef CONFIG_SMP
/*
- * This is safe on x86 w/o barriers as the ordering
- * of writing to task_cpu() and t->on_cpu is
- * reverse to the reading here. The detection is
- * inaccurate as tasks might move or schedule
- * before the smp function call takes place. In
- * such a case the function call is pointless, but
+ * If the task is on a CPU, set the CPU in the mask.
+ * The detection is inaccurate as tasks might move or
+ * schedule before the smp function call takes place.
+ * In such a case the function call is pointless, but
* there is no other side effect.
*/
- if (mask && t->on_cpu)
+ if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
cpumask_set_cpu(task_cpu(t), mask);
-#endif
}
}
read_unlock(&tasklist_lock);
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 53004dbd55c4..37f716eaf0e6 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -26,6 +26,7 @@ struct cpuid_bit {
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
+ { X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
@@ -39,9 +40,6 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
- { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
- { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
- { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 24da5ee4f022..5729ed7bb3e7 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -79,7 +79,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->initial_apicid = edx;
- smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
#endif
return 0;
}
@@ -107,7 +107,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
*/
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
c->initial_apicid = edx;
- core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
index 032509adf9de..88a553ee7704 100644
--- a/arch/x86/kernel/cpu/tsx.c
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -55,24 +55,6 @@ void tsx_enable(void)
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
}
-static bool __init tsx_ctrl_is_supported(void)
-{
- u64 ia32_cap = x86_read_arch_cap_msr();
-
- /*
- * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
- * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
- *
- * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
- * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
- * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
- * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
- * tsx= cmdline requests will do nothing on CPUs without
- * MSR_IA32_TSX_CTRL support.
- */
- return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
-}
-
static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
{
if (boot_cpu_has_bug(X86_BUG_TAA))
@@ -86,9 +68,22 @@ void __init tsx_init(void)
char arg[5] = {};
int ret;
- if (!tsx_ctrl_is_supported())
+ /*
+ * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
+ * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+ *
+ * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+ * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+ * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+ * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+ * tsx= cmdline requests will do nothing on CPUs without
+ * MSR_IA32_TSX_CTRL support.
+ */
+ if (!(x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR))
return;
+ setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
+
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
if (ret >= 0) {
if (!strcmp(arg, "on")) {
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 0c319d09378d..539d8cf5904d 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -37,7 +37,6 @@
#include <linux/kdebug.h>
#include <asm/cpu.h>
#include <asm/reboot.h>
-#include <asm/virtext.h>
#include <asm/intel_pt.h>
#include <asm/crash.h>
#include <asm/cmdline.h>
@@ -94,15 +93,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
*/
cpu_crash_vmclear_loaded_vmcss();
- /* Disable VMX or SVM if needed.
- *
- * We need to disable virtualization on all CPUs.
- * Having VMX or SVM enabled on any CPU may break rebooting
- * after the kdump kernel has finished its task.
- */
- cpu_emergency_vmxoff();
- cpu_emergency_svm_disable();
-
/*
* Disable Intel PT to stop its logging
*/
@@ -161,12 +151,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
*/
cpu_crash_vmclear_loaded_vmcss();
- /* Booting kdump kernel with VMX or SVM enabled won't work,
- * because (among other limitations) we can't disable paging
- * with the virt flags.
- */
- cpu_emergency_vmxoff();
- cpu_emergency_svm_disable();
+ cpu_emergency_disable_virtualization();
/*
* Disable Intel PT to stop its logging
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index e07424e19274..9b2bbb66d0c8 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -171,7 +171,6 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
- stack = stack ? : get_stack_pointer(task, regs);
regs = unwind_get_entry_regs(&state, &partial);
/*
@@ -190,9 +189,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - hardirq stack
* - entry stack
*/
- for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+ for (stack = stack ?: get_stack_pointer(task, regs);
+ stack;
+ stack = stack_info.next_sp) {
const char *stack_name;
+ stack = PTR_ALIGN(stack, sizeof(long));
+
if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
/*
* We weren't on a valid stack. It's possible that
@@ -326,7 +329,7 @@ unsigned long oops_begin(void)
}
NOKPROBE_SYMBOL(oops_begin);
-void __noreturn rewind_stack_do_exit(int signr);
+void __noreturn rewind_stack_and_make_dead(int signr);
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
@@ -361,7 +364,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
* reuse the task stack and that existing poisons are invalid.
*/
kasan_unpoison_task_stack(current);
- rewind_stack_do_exit(signr);
+ rewind_stack_and_make_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index b271da0fa219..5e710ad6a3c0 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -50,7 +50,7 @@ void fpu__init_cpu(void)
fpu__init_cpu_xstate();
}
-static bool fpu__probe_without_cpuid(void)
+static bool __init fpu__probe_without_cpuid(void)
{
unsigned long cr0;
u16 fsw, fcw;
@@ -68,7 +68,7 @@ static bool fpu__probe_without_cpuid(void)
return fsw == 0 && (fcw & 0x103f) == 0x003f;
}
-static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
+static void __init fpu__init_system_early_generic(void)
{
if (!boot_cpu_has(X86_FEATURE_CPUID) &&
!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
@@ -139,9 +139,6 @@ static void __init fpu__init_system_generic(void)
unsigned int fpu_kernel_xstate_size;
EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
-/* Get alignment of the TYPE. */
-#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
-
/*
* Enforce that 'MEMBER' is the last field of 'TYPE'.
*
@@ -149,8 +146,8 @@ EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
* because that's how C aligns structs.
*/
#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
- BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
- TYPE_ALIGN(TYPE)))
+ BUILD_BUG_ON(sizeof(TYPE) != \
+ ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE)))
/*
* We append the 'struct fpu' to the task_struct:
@@ -293,10 +290,10 @@ static void __init fpu__init_parse_early_param(void)
* Called on the boot CPU once per system bootup, to set up the initial
* FPU state that is later cloned into all processes:
*/
-void __init fpu__init_system(struct cpuinfo_x86 *c)
+void __init fpu__init_system(void)
{
fpu__init_parse_early_param();
- fpu__init_system_early_generic(c);
+ fpu__init_system_early_generic();
/*
* The FPU has to be operational for some of the
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 046782df37a6..d8162f6baa5d 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -805,6 +805,14 @@ void __init fpu__init_system_xstate(void)
fpu__init_prepare_fx_sw_frame();
setup_init_fpu_buf();
setup_xstate_comp();
+
+ /*
+ * CPU capabilities initialization runs before FPU init. So
+ * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
+ * functional, set the feature bit so depending code works.
+ */
+ setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
+
print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index fe522691ac71..82753622f489 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,6 +32,7 @@
*/
static void init_8259A(int auto_eoi);
+static bool pcat_compat __ro_after_init;
static int i8259A_auto_eoi;
DEFINE_RAW_SPINLOCK(i8259A_lock);
@@ -114,6 +115,7 @@ static void make_8259A_irq(unsigned int irq)
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+ irq_set_status_flags(irq, IRQ_LEVEL);
enable_irq(irq);
lapic_assign_legacy_vector(irq, true);
}
@@ -300,15 +302,32 @@ static void unmask_8259A(void)
static int probe_8259A(void)
{
+ unsigned char new_val, probe_val = ~(1 << PIC_CASCADE_IR);
unsigned long flags;
- unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
- unsigned char new_val;
+
+ /*
+ * If MADT has the PCAT_COMPAT flag set, then do not bother probing
+ * for the PIC. Some BIOSes leave the PIC uninitialized and probing
+ * fails.
+ *
+ * Right now this causes problems as quite some code depends on
+ * nr_legacy_irqs() > 0 or has_legacy_pic() == true. This is silly
+ * when the system has an IO/APIC because then PIC is not required
+ * at all, except for really old machines where the timer interrupt
+ * must be routed through the PIC. So just pretend that the PIC is
+ * there and let legacy_pic->init() initialize it for nothing.
+ *
+ * Alternatively this could just try to initialize the PIC and
+ * repeat the probe, but for cases where there is no PIC that's
+ * just pointless.
+ */
+ if (pcat_compat)
+ return nr_legacy_irqs();
+
/*
- * Check to see if we have a PIC.
- * Mask all except the cascade and read
- * back the value we just wrote. If we don't
- * have a PIC, we will read 0xff as opposed to the
- * value we wrote.
+ * Check to see if we have a PIC. Mask all except the cascade and
+ * read back the value we just wrote. If we don't have a PIC, we
+ * will read 0xff as opposed to the value we wrote.
*/
raw_spin_lock_irqsave(&i8259A_lock, flags);
@@ -430,5 +449,9 @@ static int __init i8259A_init_ops(void)
return 0;
}
-
device_initcall(i8259A_init_ops);
+
+void __init legacy_pic_pcat_compat(void)
+{
+ pcat_compat = true;
+}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 16919a9671fa..faed27c8dc39 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -72,8 +72,10 @@ void __init init_ISA_irqs(void)
legacy_pic->init(0);
- for (i = 0; i < nr_legacy_irqs(); i++)
+ for (i = 0; i < nr_legacy_irqs(); i++) {
irq_set_chip_and_handler(i, chip, handle_level_irq);
+ irq_set_status_flags(i, IRQ_LEVEL);
+ }
}
void __init init_IRQ(void)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index b348dd506d58..90d41b22c5c2 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -43,8 +43,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
/* This function only handles jump-optimized kprobe */
if (kp && kprobe_optimized(kp)) {
op = container_of(kp, struct optimized_kprobe, kp);
- /* If op->list is not empty, op is under optimizing */
- if (list_empty(&op->list))
+ /* If op is optimized or under unoptimizing */
+ if (list_empty(&op->list) || optprobe_queued_unopt(op))
goto found;
}
}
@@ -314,7 +314,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
for (i = 1; i < op->optinsn.size; i++) {
p = get_kprobe(op->kp.addr + i);
- if (p && !kprobe_disabled(p))
+ if (p && !kprobe_disarmed(p))
return -EEXIST;
}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d81e34e614e0..35dca8a5ca90 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -24,8 +24,8 @@
static int kvmclock __initdata = 1;
static int kvmclock_vsyscall __initdata = 1;
-static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
-static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
+static int msr_kvm_system_time __ro_after_init;
+static int msr_kvm_wall_clock __ro_after_init;
static u64 kvm_sched_clock_offset __ro_after_init;
static int __init parse_no_kvmclock(char *arg)
@@ -189,7 +189,8 @@ static void kvm_setup_secondary_clock(void)
void kvmclock_disable(void)
{
- native_write_msr(msr_kvm_system_time, 0, 0);
+ if (msr_kvm_system_time)
+ native_write_msr(msr_kvm_system_time, 0, 0);
}
static void __init kvmclock_init_mem(void)
@@ -286,7 +287,10 @@ void __init kvmclock_init(void)
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
- } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+ } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+ msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
+ msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
+ } else {
return;
}
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 6b07faaa1579..23154d24b117 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -27,6 +27,11 @@ static __init int register_e820_pmem(void)
* simply here to trigger the module to load on demand.
*/
pdev = platform_device_alloc("e820_pmem", -1);
- return platform_device_add(pdev);
+
+ rc = platform_device_add(pdev);
+ if (rc)
+ platform_device_put(pdev);
+
+ return rc;
}
device_initcall(register_e820_pmem);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 571e38c9ee1d..b8de27bb6e09 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -449,7 +449,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
}
if (updmsr)
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ update_spec_ctrl_cond(msr);
}
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
@@ -659,6 +659,10 @@ static void amd_e400_idle(void)
*/
static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
{
+ /* User has disallowed the use of MWAIT. Fallback to HALT */
+ if (boot_option_idle_override == IDLE_NOMWAIT)
+ return 0;
+
if (c->x86_vendor != X86_VENDOR_INTEL)
return 0;
@@ -769,9 +773,8 @@ static int __init idle_setup(char *str)
} else if (!strcmp(str, "nomwait")) {
/*
* If the boot option of "idle=nomwait" is added,
- * it means that mwait will be disabled for CPU C2/C3
- * states. In such case it won't touch the variable
- * of boot_option_idle_override.
+ * it means that mwait will be disabled for CPU C1/C2/C3
+ * states.
*/
boot_option_idle_override = IDLE_NOMWAIT;
} else
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 352f876950ab..8ef71fa91ff8 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -293,7 +293,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
switch_fpu_finish(next_p);
/* Load the Intel cache allocation PQR MSR. */
- resctrl_sched_in();
+ resctrl_sched_in(next_p);
return prev_p;
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 633788362906..b77f0d9dad55 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -610,7 +610,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}
/* Load the Intel cache allocation PQR MSR. */
- resctrl_sched_in();
+ resctrl_sched_in(next_p);
return prev_p;
}
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 1daf8f2aa21f..c205f30147cc 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -95,7 +95,7 @@ static void ich_force_hpet_resume(void)
static void ich_force_enable_hpet(struct pci_dev *dev)
{
u32 val;
- u32 uninitialized_var(rcba);
+ u32 rcba;
int err = 0;
if (hpet_address || force_hpet_address)
@@ -185,7 +185,7 @@ static void hpet_print_force_info(void)
static void old_ich_force_hpet_resume(void)
{
u32 val;
- u32 uninitialized_var(gen_cntl);
+ u32 gen_cntl;
if (!force_hpet_address || !cached_dev)
return;
@@ -207,7 +207,7 @@ static void old_ich_force_hpet_resume(void)
static void old_ich_force_enable_hpet(struct pci_dev *dev)
{
u32 val;
- u32 uninitialized_var(gen_cntl);
+ u32 gen_cntl;
if (hpet_address || force_hpet_address)
return;
@@ -298,7 +298,7 @@ static void vt8237_force_hpet_resume(void)
static void vt8237_force_enable_hpet(struct pci_dev *dev)
{
- u32 uninitialized_var(val);
+ u32 val;
if (hpet_address || force_hpet_address)
return;
@@ -429,7 +429,7 @@ static void nvidia_force_hpet_resume(void)
static void nvidia_force_enable_hpet(struct pci_dev *dev)
{
- u32 uninitialized_var(val);
+ u32 val;
if (hpet_address || force_hpet_address)
return;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index fdef27a84d71..6fede2f00104 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -528,33 +528,29 @@ static inline void kb_wait(void)
}
}
-static void vmxoff_nmi(int cpu, struct pt_regs *regs)
-{
- cpu_emergency_vmxoff();
-}
+static inline void nmi_shootdown_cpus_on_restart(void);
-/* Use NMIs as IPIs to tell all CPUs to disable virtualization */
-static void emergency_vmx_disable_all(void)
+static void emergency_reboot_disable_virtualization(void)
{
/* Just make sure we won't change CPUs while doing this */
local_irq_disable();
/*
- * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
- * the machine, because the CPU blocks INIT when it's in VMX root.
+ * Disable virtualization on all CPUs before rebooting to avoid hanging
+ * the system, as VMX and SVM block INIT when running in the host.
*
* We can't take any locks and we may be on an inconsistent state, so
- * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
+ * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt.
*
- * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
- * doesn't prevent a different CPU from being in VMX root operation.
+ * Do the NMI shootdown even if virtualization is off on _this_ CPU, as
+ * other CPUs may have virtualization enabled.
*/
- if (cpu_has_vmx()) {
- /* Safely force _this_ CPU out of VMX root operation. */
- __cpu_emergency_vmxoff();
+ if (cpu_has_vmx() || cpu_has_svm(NULL)) {
+ /* Safely force _this_ CPU out of VMX/SVM operation. */
+ cpu_emergency_disable_virtualization();
- /* Halt and exit VMX root operation on the other CPUs. */
- nmi_shootdown_cpus(vmxoff_nmi);
+ /* Disable VMX/SVM and halt on other CPUs. */
+ nmi_shootdown_cpus_on_restart();
}
}
@@ -591,7 +587,7 @@ static void native_machine_emergency_restart(void)
unsigned short mode;
if (reboot_emergency)
- emergency_vmx_disable_all();
+ emergency_reboot_disable_virtualization();
tboot_shutdown(TB_SHUTDOWN_REBOOT);
@@ -796,6 +792,17 @@ void machine_crash_shutdown(struct pt_regs *regs)
/* This is the CPU performing the emergency shutdown work. */
int crashing_cpu = -1;
+/*
+ * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during
+ * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if
+ * GIF=0, i.e. if the crash occurred between CLGI and STGI.
+ */
+void cpu_emergency_disable_virtualization(void)
+{
+ cpu_emergency_vmxoff();
+ cpu_emergency_svm_disable();
+}
+
#if defined(CONFIG_SMP)
static nmi_shootdown_cb shootdown_callback;
@@ -818,7 +825,14 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
return NMI_HANDLED;
local_irq_disable();
- shootdown_callback(cpu, regs);
+ if (shootdown_callback)
+ shootdown_callback(cpu, regs);
+
+ /*
+ * Prepare the CPU for reboot _after_ invoking the callback so that the
+ * callback can safely use virtualization instructions, e.g. VMCLEAR.
+ */
+ cpu_emergency_disable_virtualization();
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
@@ -829,18 +843,32 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
return NMI_HANDLED;
}
-/*
- * Halt all other CPUs, calling the specified function on each of them
+/**
+ * nmi_shootdown_cpus - Stop other CPUs via NMI
+ * @callback: Optional callback to be invoked from the NMI handler
+ *
+ * The NMI handler on the remote CPUs invokes @callback, if not
+ * NULL, first and then disables virtualization to ensure that
+ * INIT is recognized during reboot.
*
- * This function can be used to halt all other CPUs on crash
- * or emergency reboot time. The function passed as parameter
- * will be called inside a NMI handler on all CPUs.
+ * nmi_shootdown_cpus() can only be invoked once. After the first
+ * invocation all other CPUs are stuck in crash_nmi_callback() and
+ * cannot respond to a second NMI.
*/
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
unsigned long msecs;
+
local_irq_disable();
+ /*
+ * Avoid certain doom if a shootdown already occurred; re-registering
+ * the NMI handler will cause list corruption, modifying the callback
+ * will do who knows what, etc...
+ */
+ if (WARN_ON_ONCE(crash_ipi_issued))
+ return;
+
/* Make a note of crashing cpu. Will be used in NMI callback. */
crashing_cpu = safe_smp_processor_id();
@@ -868,7 +896,17 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
msecs--;
}
- /* Leave the nmi callback set */
+ /*
+ * Leave the nmi callback set, shootdown is a one-time thing. Clearing
+ * the callback could result in a NULL pointer dereference if a CPU
+ * (finally) responds after the timeout expires.
+ */
+}
+
+static inline void nmi_shootdown_cpus_on_restart(void)
+{
+ if (!crash_ipi_issued)
+ nmi_shootdown_cpus(NULL);
}
/*
@@ -898,6 +936,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
/* No other CPUs to shoot down */
}
+static inline void nmi_shootdown_cpus_on_restart(void) { }
+
void run_crash_ipi_callback(struct pt_regs *regs)
{
}
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index b8d4e9c3c070..31ee37f87b2b 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -31,7 +31,7 @@
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
#include <asm/kexec.h>
-#include <asm/virtext.h>
+#include <asm/reboot.h>
/*
* Some notes on x86 processor bugs affecting SMP operation:
@@ -121,7 +121,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
return NMI_HANDLED;
- cpu_emergency_vmxoff();
+ cpu_emergency_disable_virtualization();
stop_this_cpu(NULL);
return NMI_HANDLED;
@@ -134,7 +134,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
asmlinkage __visible void smp_reboot_interrupt(void)
{
ipi_entering_ack_irq();
- cpu_emergency_vmxoff();
+ cpu_emergency_disable_virtualization();
stop_this_cpu(NULL);
irq_exit();
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8367bd7a9a81..d6a8efff9c61 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -99,6 +99,17 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
+struct mwait_cpu_dead {
+ unsigned int control;
+ unsigned int status;
+};
+
+/*
+ * Cache line aligned data for mwait_play_dead(). Separate on purpose so
+ * that it's unlikely to be touched by other CPUs.
+ */
+static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);
+
/* Logical package management. We might want to allocate that dynamically */
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
@@ -224,6 +235,8 @@ static void notrace start_secondary(void *unused)
#endif
load_current_idt();
cpu_init();
+ fpu__init_cpu();
+ rcu_cpu_starting(raw_smp_processor_id());
x86_cpuinit.early_percpu_clock_init();
preempt_disable();
smp_callin();
@@ -1674,10 +1687,10 @@ static bool wakeup_cpu0(void)
*/
static inline void mwait_play_dead(void)
{
+ struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
unsigned int eax, ebx, ecx, edx;
unsigned int highest_cstate = 0;
unsigned int highest_subcstate = 0;
- void *mwait_ptr;
int i;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
@@ -1712,13 +1725,6 @@ static inline void mwait_play_dead(void)
(highest_subcstate - 1);
}
- /*
- * This should be a memory location in a cache line which is
- * unlikely to be touched by other processors. The actual
- * content is immaterial as it is not actually modified in any way.
- */
- mwait_ptr = &current_thread_info()->flags;
-
wbinvd();
while (1) {
@@ -1730,9 +1736,9 @@ static inline void mwait_play_dead(void)
* case where we return around the loop.
*/
mb();
- clflush(mwait_ptr);
+ clflush(md);
mb();
- __monitor(mwait_ptr, 0, 0);
+ __monitor(md, 0, 0);
mb();
__mwait(eax, 0);
/*
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 653b7f617b61..fff04d285976 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -264,6 +264,22 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"Lenovo ideapad D330-10IGM"),
},
},
+ {
+ /* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "IdeaPad Duet 3 10IGL5"),
+ },
+ },
+ {
+ /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ },
+ },
{},
};
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index b934f9f68a16..c85634152d30 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -90,22 +90,27 @@ static struct orc_entry *orc_find(unsigned long ip);
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
struct ftrace_ops *ops;
- unsigned long caller;
+ unsigned long tramp_addr, offset;
ops = ftrace_ops_trampoline(ip);
if (!ops)
return NULL;
+ /* Set tramp_addr to the start of the code copied by the trampoline */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
- caller = (unsigned long)ftrace_regs_call;
+ tramp_addr = (unsigned long)ftrace_regs_caller;
else
- caller = (unsigned long)ftrace_call;
+ tramp_addr = (unsigned long)ftrace_caller;
+
+ /* Now place tramp_addr to the location within the trampoline ip is at */
+ offset = ip - ops->trampoline;
+ tramp_addr += offset;
/* Prevent unlikely recursion */
- if (ip == caller)
+ if (ip == tramp_addr)
return NULL;
- return orc_find(caller);
+ return orc_find(tramp_addr);
}
#else
static struct orc_entry *orc_ftrace_find(unsigned long ip)
@@ -682,7 +687,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
/* Otherwise, skip ahead to the user-specified starting frame: */
while (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
- state->sp < (unsigned long)first_frame))
+ state->sp <= (unsigned long)first_frame))
unwind_next_frame(state);
return;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index fae5b00cbccf..f51fc7fde3a0 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
switch (opc1) {
case 0xeb: /* jmp 8 */
case 0xe9: /* jmp 32 */
- case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
break;
+ case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
+ goto setup;
case 0xe8: /* call relative */
branch_clear_offset(auprobe, insn);
@@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
return -ENOTSUPP;
}
+setup:
auprobe->branch.opc1 = opc1;
auprobe->branch.ilen = insn->length;
auprobe->branch.offs = insn->immediate.value;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1afe211d7a7c..09506e492f1d 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -21,6 +21,8 @@
#define LOAD_OFFSET __START_KERNEL_map
#endif
+#define RUNTIME_DISCARD_EXIT
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
@@ -381,7 +383,7 @@ SECTIONS
.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
__brk_base = .;
. += 64 * 1024; /* 64k alignment slop space */
- *(.brk_reservation) /* areas brk users have reserved */
+ *(.bss..brk) /* areas brk users have reserved */
__brk_limit = .;
}
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 62c7f771a7cf..db3838667466 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -759,6 +759,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
+ entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
entry->edx = 0;
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
@@ -791,6 +792,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx = entry->edx = 0;
break;
case 0x8000001a:
+ entry->eax &= GENMASK(2, 0);
+ entry->ebx = entry->ecx = entry->edx = 0;
+ break;
case 0x8000001e:
break;
/*Add support for Centaur's CPUID instruction*/
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 7dec43b2c420..defae8082789 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -53,6 +53,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_7_ECX] = { 7, 0, CPUID_ECX},
[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
[CPUID_7_EDX] = { 7, 0, CPUID_EDX},
+ [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
};
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index ea48a2fb1677..1a9fa2903852 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -776,8 +776,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ctxt->mode, linear);
}
-static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
- enum x86emul_mode mode)
+static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
{
ulong linear;
int rc;
@@ -787,41 +786,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
- rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
+ rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
}
+static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
+{
+ u64 efer;
+ struct desc_struct cs;
+ u16 selector;
+ u32 base3;
+
+ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+
+ if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
+ /* Real mode. cpu must not have long mode active */
+ if (efer & EFER_LMA)
+ return X86EMUL_UNHANDLEABLE;
+ ctxt->mode = X86EMUL_MODE_REAL;
+ return X86EMUL_CONTINUE;
+ }
+
+ if (ctxt->eflags & X86_EFLAGS_VM) {
+ /* Protected/VM86 mode. cpu must not have long mode active */
+ if (efer & EFER_LMA)
+ return X86EMUL_UNHANDLEABLE;
+ ctxt->mode = X86EMUL_MODE_VM86;
+ return X86EMUL_CONTINUE;
+ }
+
+ if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
+ return X86EMUL_UNHANDLEABLE;
+
+ if (efer & EFER_LMA) {
+ if (cs.l) {
+ /* Proper long mode */
+ ctxt->mode = X86EMUL_MODE_PROT64;
+ } else if (cs.d) {
+ /* 32 bit compatibility mode*/
+ ctxt->mode = X86EMUL_MODE_PROT32;
+ } else {
+ ctxt->mode = X86EMUL_MODE_PROT16;
+ }
+ } else {
+ /* Legacy 32 bit / 16 bit mode */
+ ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+ }
+
+ return X86EMUL_CONTINUE;
+}
+
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
- return assign_eip(ctxt, dst, ctxt->mode);
+ return assign_eip(ctxt, dst);
}
-static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
- const struct desc_struct *cs_desc)
+static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
{
- enum x86emul_mode mode = ctxt->mode;
- int rc;
+ int rc = emulator_recalc_and_set_mode(ctxt);
-#ifdef CONFIG_X86_64
- if (ctxt->mode >= X86EMUL_MODE_PROT16) {
- if (cs_desc->l) {
- u64 efer = 0;
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
- if (efer & EFER_LMA)
- mode = X86EMUL_MODE_PROT64;
- } else
- mode = X86EMUL_MODE_PROT32; /* temporary value */
- }
-#endif
- if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
- mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
- rc = assign_eip(ctxt, dst, mode);
- if (rc == X86EMUL_CONTINUE)
- ctxt->mode = mode;
- return rc;
+ return assign_eip(ctxt, dst);
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1753,16 +1782,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
- if (!seg_desc.p) {
- err_vec = NP_VECTOR;
- goto exception;
- }
- old_desc = seg_desc;
- seg_desc.type |= 2; /* busy */
- ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
- sizeof(seg_desc), &ctxt->exception);
- if (ret != X86EMUL_CONTINUE)
- return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
@@ -1800,8 +1819,17 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (ret != X86EMUL_CONTINUE)
return ret;
if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
- ((u64)base3 << 32), ctxt))
- return emulate_gp(ctxt, 0);
+ ((u64)base3 << 32), ctxt))
+ return emulate_gp(ctxt, err_code);
+ }
+
+ if (seg == VCPU_SREG_TR) {
+ old_desc = seg_desc;
+ seg_desc.type |= 2; /* busy */
+ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
+ sizeof(seg_desc), &ctxt->exception);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
@@ -2021,7 +2049,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- if (ctxt->modrm_reg == VCPU_SREG_SS)
+ if (seg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
if (ctxt->op_bytes > 2)
rsp_increment(ctxt, ctxt->op_bytes - 2);
@@ -2238,7 +2266,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
+ rc = assign_eip_far(ctxt, ctxt->src.val);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
@@ -2319,7 +2347,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = assign_eip_far(ctxt, eip, &new_desc);
+ rc = assign_eip_far(ctxt, eip);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
@@ -2954,6 +2982,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
+ ctxt->mode = usermode;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
@@ -3550,7 +3579,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
+ rc = assign_eip_far(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
goto fail;
@@ -3697,11 +3726,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
- if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
+ int cr_num = ctxt->modrm_reg;
+ int r;
+
+ if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
+
+ if (cr_num == 0) {
+ /*
+ * CR0 write might have updated CR0.PE and/or CR0.PG
+ * which can affect the cpu's execution mode.
+ */
+ r = emulator_recalc_and_set_mode(ctxt);
+ if (r != X86EMUL_CONTINUE)
+ return r;
+ }
+
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index ca66459a2e89..8922f63f5566 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -309,6 +309,9 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
struct kvm_lapic_irq irq;
int ret, vector;
+ if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
+ return -EINVAL;
+
if (sint >= ARRAY_SIZE(synic->sint))
return -EINVAL;
@@ -552,10 +555,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
stimer_cleanup(stimer);
stimer->count = count;
- if (stimer->count == 0)
- stimer->config.enable = 0;
- else if (stimer->config.auto_enable)
- stimer->config.enable = 1;
+ if (!host) {
+ if (stimer->count == 0)
+ stimer->config.enable = 0;
+ else if (stimer->config.auto_enable)
+ stimer->config.enable = 1;
+ }
if (stimer->config.enable)
stimer_mark_pending(stimer, false);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3696b4de9d99..319ed873a111 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -955,6 +955,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
*r = -1;
if (irq->shorthand == APIC_DEST_SELF) {
+ if (KVM_BUG_ON(!src, kvm)) {
+ *r = 0;
+ return true;
+ }
*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
return true;
}
@@ -2240,13 +2244,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{
u32 reg = kvm_lapic_get_reg(apic, lvt_type);
int vector, mode, trig_mode;
+ int r;
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
vector = reg & APIC_VECTOR_MASK;
mode = reg & APIC_MODE_MASK;
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
- return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
- NULL);
+
+ r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
+ if (r && lvt_type == APIC_LVTPC)
+ kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
+ return r;
}
return 0;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 125970286f28..e9444e202c33 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -47,6 +47,7 @@
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
#include <asm/spec-ctrl.h>
+#include <asm/cpu_device_id.h>
#include <asm/virtext.h>
#include "trace.h"
@@ -4179,9 +4180,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
msr->data = 0;
switch (msr->index) {
- case MSR_F10H_DECFG:
- if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
- msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
+ case MSR_AMD64_DE_CFG:
+ if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
+ msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break;
default:
return 1;
@@ -4283,7 +4284,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0x1E;
}
break;
- case MSR_F10H_DECFG:
+ case MSR_AMD64_DE_CFG:
msr_info->data = svm->msr_decfg;
break;
default:
@@ -4450,7 +4451,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
- case MSR_F10H_DECFG: {
+ case MSR_AMD64_DE_CFG: {
struct kvm_msr_entry msr_entry;
msr_entry.index = msr->index;
@@ -5137,8 +5138,6 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- BUG_ON(!(gif_set(svm)));
-
trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
++vcpu->stat.irq_injections;
@@ -6247,7 +6246,8 @@ out:
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
-
+ if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
+ vcpu->arch.at_instruction_boundary = true;
}
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 9f61ae64b727..cd96c31fe2bb 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -11,6 +11,7 @@
#include "mmu.h"
#include "nested.h"
#include "trace.h"
+#include "vmx.h"
#include "x86.h"
static bool __read_mostly enable_shadow_vmcs = 1;
@@ -1060,7 +1061,7 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
/* reserved */
BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
- u64 vmx_basic = vmx->nested.msrs.basic;
+ u64 vmx_basic = vmcs_config.nested.basic;
if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
return -EINVAL;
@@ -1083,36 +1084,42 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
return 0;
}
-static int
-vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index,
+ u32 **low, u32 **high)
{
- u64 supported;
- u32 *lowp, *highp;
-
switch (msr_index) {
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
- lowp = &vmx->nested.msrs.pinbased_ctls_low;
- highp = &vmx->nested.msrs.pinbased_ctls_high;
+ *low = &msrs->pinbased_ctls_low;
+ *high = &msrs->pinbased_ctls_high;
break;
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- lowp = &vmx->nested.msrs.procbased_ctls_low;
- highp = &vmx->nested.msrs.procbased_ctls_high;
+ *low = &msrs->procbased_ctls_low;
+ *high = &msrs->procbased_ctls_high;
break;
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- lowp = &vmx->nested.msrs.exit_ctls_low;
- highp = &vmx->nested.msrs.exit_ctls_high;
+ *low = &msrs->exit_ctls_low;
+ *high = &msrs->exit_ctls_high;
break;
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- lowp = &vmx->nested.msrs.entry_ctls_low;
- highp = &vmx->nested.msrs.entry_ctls_high;
+ *low = &msrs->entry_ctls_low;
+ *high = &msrs->entry_ctls_high;
break;
case MSR_IA32_VMX_PROCBASED_CTLS2:
- lowp = &vmx->nested.msrs.secondary_ctls_low;
- highp = &vmx->nested.msrs.secondary_ctls_high;
+ *low = &msrs->secondary_ctls_low;
+ *high = &msrs->secondary_ctls_high;
break;
default:
BUG();
}
+}
+
+static int
+vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+ u32 *lowp, *highp;
+ u64 supported;
+
+ vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
supported = vmx_control_msr(*lowp, *highp);
@@ -1124,6 +1131,7 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
return -EINVAL;
+ vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
*lowp = data;
*highp = data >> 32;
return 0;
@@ -1137,10 +1145,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
/* reserved */
GENMASK_ULL(13, 9) | BIT_ULL(31);
- u64 vmx_misc;
-
- vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
- vmx->nested.msrs.misc_high);
+ u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
+ vmcs_config.nested.misc_high);
if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
return -EINVAL;
@@ -1168,10 +1174,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
{
- u64 vmx_ept_vpid_cap;
-
- vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
- vmx->nested.msrs.vpid_caps);
+ u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
+ vmcs_config.nested.vpid_caps);
/* Every bit is either reserved or a feature bit. */
if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
@@ -1182,20 +1186,21 @@ static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
return 0;
}
-static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
{
- u64 *msr;
-
switch (msr_index) {
case MSR_IA32_VMX_CR0_FIXED0:
- msr = &vmx->nested.msrs.cr0_fixed0;
- break;
+ return &msrs->cr0_fixed0;
case MSR_IA32_VMX_CR4_FIXED0:
- msr = &vmx->nested.msrs.cr4_fixed0;
- break;
+ return &msrs->cr4_fixed0;
default:
BUG();
}
+}
+
+static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+ const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
/*
* 1 bits (which indicates bits which "must-be-1" during VMX operation)
@@ -1204,7 +1209,7 @@ static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
if (!is_bitwise_subset(data, *msr, -1ULL))
return -EINVAL;
- *msr = data;
+ *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
return 0;
}
@@ -1265,7 +1270,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vmx->nested.msrs.vmcs_enum = data;
return 0;
case MSR_IA32_VMX_VMFUNC:
- if (data & ~vmx->nested.msrs.vmfunc_controls)
+ if (data & ~vmcs_config.nested.vmfunc_controls)
return -EINVAL;
vmx->nested.msrs.vmfunc_controls = data;
return 0;
@@ -2068,8 +2073,8 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
* EXEC CONTROLS
*/
exec_control = vmx_exec_control(vmx); /* L0's desires */
- exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
- exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+ exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
+ exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
exec_control &= ~CPU_BASED_TPR_SHADOW;
exec_control |= vmcs12->cpu_based_vm_exec_control;
@@ -2454,7 +2459,7 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
return -EINVAL;
if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
- nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)))
+ nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
return -EINVAL;
return 0;
@@ -2774,7 +2779,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 *exit_qual)
{
- bool ia32e;
+ bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
*exit_qual = ENTRY_FAIL_DEFAULT;
@@ -2791,6 +2796,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+ if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
+ return -EINVAL;
+
+ if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
+ CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
+ return -EINVAL;
+
/*
* If the load IA32_EFER VM-entry control is 1, the following checks
* are performed on the field for the IA32_EFER MSR:
@@ -2802,7 +2814,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
*/
if (to_vmx(vcpu)->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
- ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
@@ -2859,35 +2870,8 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->host_state.cr4 = cr4;
}
- asm(
- "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
- "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
- "je 1f \n\t"
- __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
- "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
- "1: \n\t"
- "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
-
- /* Check if vmlaunch or vmresume is needed */
- "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
-
- /*
- * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
- * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
- * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
- * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
- */
- "call vmx_vmenter\n\t"
-
- CC_SET(be)
- : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
- : [HOST_RSP]"r"((unsigned long)HOST_RSP),
- [loaded_vmcs]"r"(vmx->loaded_vmcs),
- [launched]"i"(offsetof(struct loaded_vmcs, launched)),
- [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
- [wordsize]"i"(sizeof(ulong))
- : "memory"
- );
+ vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
+ __vmx_vcpu_run_flags(vmx));
if (vmx->msr_autoload.host.nr)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
@@ -3061,14 +3045,16 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
u32 exit_qual;
evaluate_pending_interrupts = exec_controls_get(vmx) &
- (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+ (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
- if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
+ if (!vmx->nested.nested_run_pending ||
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
if (kvm_mpx_supported() &&
- !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ (!vmx->nested.nested_run_pending ||
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
/*
@@ -3110,7 +3096,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
}
enter_guest_mode(vcpu);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
@@ -3174,7 +3160,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
* 26.7 "VM-entry failures during or after loading guest state".
*/
vmentry_fail_vmexit_guest_mode:
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu);
@@ -3287,8 +3273,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
*/
if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
- !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
- !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
+ !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_NMI_WINDOW_EXITING) &&
+ !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_INTR_WINDOW_EXITING) &&
(vmcs12->guest_rflags & X86_EFLAGS_IF))) {
vmx->nested.nested_run_pending = 0;
return kvm_vcpu_halt(vcpu);
@@ -3447,7 +3433,16 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
u32 intr_info = nr | INTR_INFO_VALID_MASK;
if (vcpu->arch.exception.has_error_code) {
- vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
+ /*
+ * Intel CPUs do not generate error codes with bits 31:16 set,
+ * and more importantly VMX disallows setting bits 31:16 in the
+ * injected error code for VM-Entry. Drop the bits to mimic
+ * hardware and avoid inducing failure on nested VM-Entry if L1
+ * chooses to inject the exception back to L2. AMD CPUs _do_
+ * generate "full" 32-bit error codes, so KVM allows userspace
+ * to inject exception error codes with bits 31:16 set.
+ */
+ vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code;
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
@@ -3782,14 +3777,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
nested_vmx_abort(vcpu,
VMX_ABORT_SAVE_GUEST_MSR_FAIL);
}
-
- /*
- * Drop what we picked up for L2 via vmx_complete_interrupts. It is
- * preserved above and would only end up incorrectly in L1.
- */
- vcpu->arch.nmi_injected = false;
- kvm_clear_exception_queue(vcpu);
- kvm_clear_interrupt_queue(vcpu);
}
/*
@@ -4092,7 +4079,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
if (nested_cpu_has_preemption_timer(vmcs12))
hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
if (likely(!vmx->fail)) {
@@ -4124,8 +4111,30 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
WARN_ON_ONCE(nested_early_check);
}
+ /*
+ * Drop events/exceptions that were queued for re-injection to L2
+ * (picked up via vmx_complete_interrupts()), as well as exceptions
+ * that were pending for L2. Note, this must NOT be hoisted above
+ * prepare_vmcs12(), events/exceptions queued for re-injection need to
+ * be captured in vmcs12 (see vmcs12_save_pending_event()).
+ */
+ vcpu->arch.nmi_injected = false;
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
+
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+ /*
+ * If IBRS is advertised to the vCPU, KVM must flush the indirect
+ * branch predictors when transitioning from L2 to L1, as L1 expects
+ * hardware (KVM in this case) to provide separate predictor modes.
+ * Bare metal isolates VMX root (host) from VMX non-root (guest), but
+ * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
+ * separate modes for L2 vs L1.
+ */
+ if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ indirect_branch_prediction_barrier();
+
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
@@ -5384,10 +5393,10 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
return false;
case EXIT_REASON_TRIPLE_FAULT:
return true;
- case EXIT_REASON_PENDING_INTERRUPT:
- return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
+ case EXIT_REASON_INTERRUPT_WINDOW:
+ return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
case EXIT_REASON_NMI_WINDOW:
- return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
+ return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
case EXIT_REASON_TASK_SWITCH:
return true;
case EXIT_REASON_CPUID:
@@ -5877,8 +5886,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
msrs->procbased_ctls_low =
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
msrs->procbased_ctls_high &=
- CPU_BASED_VIRTUAL_INTR_PENDING |
- CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_INTR_WINDOW_EXITING |
+ CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
@@ -5924,7 +5933,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
SECONDARY_EXEC_RDRAND_EXITING |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_RDSEED_EXITING |
- SECONDARY_EXEC_XSAVES;
+ SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
/*
* We can emulate "VMCS shadowing," even if the hardware
diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h
new file mode 100644
index 000000000000..edc3f16cc189
--- /dev/null
+++ b/arch/x86/kvm/vmx/run_flags.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_RUN_FLAGS_H
+#define __KVM_X86_VMX_RUN_FLAGS_H
+
+#define VMX_RUN_VMRESUME (1 << 0)
+#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
+
+#endif /* __KVM_X86_VMX_RUN_FLAGS_H */
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index ca4252f81bf8..2850670c38bb 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -4,6 +4,7 @@
#include <asm/bitsperlong.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h>
+#include "run_flags.h"
#define WORD_SIZE (BITS_PER_LONG / 8)
@@ -30,76 +31,11 @@
.text
/**
- * vmx_vmenter - VM-Enter the current loaded VMCS
- *
- * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
- *
- * Returns:
- * %RFLAGS.CF is set on VM-Fail Invalid
- * %RFLAGS.ZF is set on VM-Fail Valid
- * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
- *
- * Note that VMRESUME/VMLAUNCH fall-through and return directly if
- * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
- * to vmx_vmexit.
- */
-ENTRY(vmx_vmenter)
- /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
- je 2f
-
-1: vmresume
- ret
-
-2: vmlaunch
- ret
-
-3: cmpb $0, kvm_rebooting
- je 4f
- ret
-4: ud2
-
- .pushsection .fixup, "ax"
-5: jmp 3b
- .popsection
-
- _ASM_EXTABLE(1b, 5b)
- _ASM_EXTABLE(2b, 5b)
-
-ENDPROC(vmx_vmenter)
-
-/**
- * vmx_vmexit - Handle a VMX VM-Exit
- *
- * Returns:
- * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
- *
- * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump
- * here after hardware loads the host's state, i.e. this is the destination
- * referred to by VMCS.HOST_RIP.
- */
-ENTRY(vmx_vmexit)
-#ifdef CONFIG_RETPOLINE
- ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
- /* Preserve guest's RAX, it's used to stuff the RSB. */
- push %_ASM_AX
-
- /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
-
- /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
- or $1, %_ASM_AX
-
- pop %_ASM_AX
-.Lvmexit_skip_rsb:
-#endif
- ret
-ENDPROC(vmx_vmexit)
-
-/**
* __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
- * @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
+ * @vmx: struct vcpu_vmx *
* @regs: unsigned long * (to guest registers)
- * @launched: %true if the VMCS has been launched
+ * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
+ * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
*
* Returns:
* 0 on VM-Exit, 1 on VM-Fail
@@ -118,24 +54,29 @@ ENTRY(__vmx_vcpu_run)
#endif
push %_ASM_BX
+ /* Save @vmx for SPEC_CTRL handling */
+ push %_ASM_ARG1
+
+ /* Save @flags for SPEC_CTRL handling */
+ push %_ASM_ARG3
+
/*
* Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
* @regs is needed after VM-Exit to save the guest's register values.
*/
push %_ASM_ARG2
- /* Copy @launched to BL, _ASM_ARG3 is volatile. */
+ /* Copy @flags to BL, _ASM_ARG3 is volatile. */
mov %_ASM_ARG3B, %bl
- /* Adjust RSP to account for the CALL to vmx_vmenter(). */
- lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
+ lea (%_ASM_SP), %_ASM_ARG2
call vmx_update_host_rsp
/* Load @regs to RAX. */
mov (%_ASM_SP), %_ASM_AX
/* Check if vmlaunch or vmresume is needed */
- cmpb $0, %bl
+ testb $VMX_RUN_VMRESUME, %bl
/* Load guest registers. Don't clobber flags. */
mov VCPU_RBX(%_ASM_AX), %_ASM_BX
@@ -157,11 +98,25 @@ ENTRY(__vmx_vcpu_run)
/* Load guest RAX. This kills the @regs pointer! */
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
- /* Enter guest mode */
- call vmx_vmenter
+ /* Check EFLAGS.ZF from 'testb' above */
+ jz .Lvmlaunch
- /* Jump on VM-Fail. */
- jbe 2f
+/*
+ * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
+ * the 'vmx_vmexit' label below.
+ */
+.Lvmresume:
+ vmresume
+ jmp .Lvmfail
+
+.Lvmlaunch:
+ vmlaunch
+ jmp .Lvmfail
+
+ _ASM_EXTABLE(.Lvmresume, .Lfixup)
+ _ASM_EXTABLE(.Lvmlaunch, .Lfixup)
+
+SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
/* Temporarily save guest's RAX. */
push %_ASM_AX
@@ -188,19 +143,21 @@ ENTRY(__vmx_vcpu_run)
mov %r15, VCPU_R15(%_ASM_AX)
#endif
- /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
- xor %eax, %eax
+ /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
+ xor %ebx, %ebx
+.Lclear_regs:
/*
- * Clear all general purpose registers except RSP and RAX to prevent
+ * Clear all general purpose registers except RSP and RBX to prevent
* speculative use of the guest's values, even those that are reloaded
* via the stack. In theory, an L1 cache miss when restoring registers
* could lead to speculative execution with the guest's values.
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
* free. RSP and RAX are exempt as RSP is restored by hardware during
- * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
+ * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
+ * value.
*/
-1: xor %ebx, %ebx
+ xor %eax, %eax
xor %ecx, %ecx
xor %edx, %edx
xor %esi, %esi
@@ -219,8 +176,32 @@ ENTRY(__vmx_vcpu_run)
/* "POP" @regs. */
add $WORD_SIZE, %_ASM_SP
- pop %_ASM_BX
+ /*
+ * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
+ * the first unbalanced RET after vmexit!
+ *
+ * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
+ * entries and (in some cases) RSB underflow.
+ *
+ * eIBRS has its own protection against poisoned RSB, so it doesn't
+ * need the RSB filling sequence. But it does need to be enabled, and a
+ * single call to retire, before the first unbalanced RET.
+ */
+
+ FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
+ X86_FEATURE_RSB_VMEXIT_LITE
+
+
+ pop %_ASM_ARG2 /* @flags */
+ pop %_ASM_ARG1 /* @vmx */
+
+ call vmx_spec_ctrl_restore_host
+
+ /* Put return value in AX */
+ mov %_ASM_BX, %_ASM_AX
+
+ pop %_ASM_BX
#ifdef CONFIG_X86_64
pop %r12
pop %r13
@@ -233,11 +214,20 @@ ENTRY(__vmx_vcpu_run)
pop %_ASM_BP
ret
- /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
-2: mov $1, %eax
- jmp 1b
+.Lfixup:
+ cmpb $0, kvm_rebooting
+ jne .Lvmfail
+ ud2
+.Lvmfail:
+ /* VM-Fail: set return value to 1 */
+ mov $1, %_ASM_BX
+ jmp .Lclear_regs
+
ENDPROC(__vmx_vcpu_run)
+
+.section .text, "ax"
+
/**
* vmread_error_trampoline - Trampoline from inline asm to vmread_error()
* @field: VMCS field encoding that failed
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4bd1bf6214ee..c93070829790 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -31,6 +31,7 @@
#include <asm/apic.h>
#include <asm/asm.h>
#include <asm/cpu.h>
+#include <asm/cpu_device_id.h>
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/fpu/internal.h>
@@ -358,9 +359,9 @@ static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
if (!vmx->disable_fb_clear)
return;
- rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
+ msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
msr |= FB_CLEAR_DIS;
- wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
+ native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
/* Cache the MSR value to avoid reading it later */
vmx->msr_ia32_mcu_opt_ctrl = msr;
}
@@ -371,7 +372,7 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
return;
vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
- wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
+ native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
}
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
@@ -862,6 +863,24 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
return true;
}
+unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
+{
+ unsigned int flags = 0;
+
+ if (vmx->loaded_vmcs->launched)
+ flags |= VMX_RUN_VMRESUME;
+
+ /*
+ * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
+ * to change it directly without causing a vmexit. In that case read
+ * it after vmexit and store it in vmx->spec_ctrl.
+ */
+ if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
+ flags |= VMX_RUN_SAVE_SPEC_CTRL;
+
+ return flags;
+}
+
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit)
{
@@ -1378,8 +1397,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
/*
* No indirect branch prediction barrier needed when switching
- * the active VMCS within a guest, e.g. on nested VM-Enter.
- * The L1 VMM can protect itself with retpolines, IBPB or IBRS.
+ * the active VMCS within a vCPU, unless IBRS is advertised to
+ * the vCPU. To minimize the number of IBPBs executed, KVM
+ * performs IBPB on nested VM-Exit (a single nested transition
+ * may switch the active VMCS multiple times).
*/
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
indirect_branch_prediction_barrier();
@@ -1657,7 +1678,17 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
kvm_deliver_exception_payload(vcpu);
if (has_error_code) {
- vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+ /*
+ * Despite the error code being architecturally defined as 32
+ * bits, and the VMCS field being 32 bits, Intel CPUs and thus
+ * VMX don't actually supporting setting bits 31:16. Hardware
+ * will (should) never provide a bogus error code, but AMD CPUs
+ * do generate error codes with bits 31:16 set, and so KVM's
+ * ABI lets userspace shove in arbitrary 32-bit values. Drop
+ * the upper bits to avoid VM-Fail, losing information that
+ * does't really exist is preferable to killing the VM.
+ */
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
@@ -1751,7 +1782,7 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (is_guest_mode(vcpu) &&
- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
+ (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
return vcpu->arch.tsc_offset;
@@ -1769,7 +1800,7 @@ static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
* to the newly set TSC to get L2's TSC.
*/
if (is_guest_mode(vcpu) &&
- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
+ (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
g_tsc_offset = vmcs12->tsc_offset;
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
@@ -2396,7 +2427,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
CPU_BASED_CR3_STORE_EXITING |
CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_MOV_DR_EXITING |
- CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_USE_TSC_OFFSETTING |
CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING |
@@ -3212,18 +3243,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
u32 ar;
- if (var->unusable || !var->present)
- ar = 1 << 16;
- else {
- ar = var->type & 15;
- ar |= (var->s & 1) << 4;
- ar |= (var->dpl & 3) << 5;
- ar |= (var->present & 1) << 7;
- ar |= (var->avl & 1) << 12;
- ar |= (var->l & 1) << 13;
- ar |= (var->db & 1) << 14;
- ar |= (var->g & 1) << 15;
- }
+ ar = var->type & 15;
+ ar |= (var->s & 1) << 4;
+ ar |= (var->dpl & 3) << 5;
+ ar |= (var->present & 1) << 7;
+ ar |= (var->avl & 1) << 12;
+ ar |= (var->l & 1) << 13;
+ ar |= (var->db & 1) << 14;
+ ar |= (var->g & 1) << 15;
+ ar |= (var->unusable || !var->present) << 16;
return ar;
}
@@ -4429,7 +4457,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
- exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
+ exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
}
static void enable_nmi_window(struct kvm_vcpu *vcpu)
@@ -4440,7 +4468,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
return;
}
- exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
+ exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
}
static void vmx_inject_irq(struct kvm_vcpu *vcpu)
@@ -4773,7 +4801,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
return 0;
}
-static int handle_external_interrupt(struct kvm_vcpu *vcpu)
+static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.irq_exits;
return 1;
@@ -5045,21 +5073,6 @@ static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
vmcs_writel(GUEST_DR7, val);
}
-static int handle_cpuid(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_cpuid(vcpu);
-}
-
-static int handle_rdmsr(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_rdmsr(vcpu);
-}
-
-static int handle_wrmsr(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_wrmsr(vcpu);
-}
-
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
{
kvm_apic_update_ppr(vcpu);
@@ -5068,7 +5081,7 @@ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
static int handle_interrupt_window(struct kvm_vcpu *vcpu)
{
- exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
+ exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -5076,11 +5089,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
return 1;
}
-static int handle_halt(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_halt(vcpu);
-}
-
static int handle_vmcall(struct kvm_vcpu *vcpu)
{
return kvm_emulate_hypercall(vcpu);
@@ -5286,7 +5294,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
static int handle_nmi_window(struct kvm_vcpu *vcpu)
{
WARN_ON_ONCE(!enable_vnmi);
- exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
+ exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
++vcpu->stat.nmi_window_exits;
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -5307,7 +5315,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
intr_window_requested = exec_controls_get(vmx) &
- CPU_BASED_VIRTUAL_INTR_PENDING;
+ CPU_BASED_INTR_WINDOW_EXITING;
while (vmx->emulation_required && count-- != 0) {
if (intr_window_requested && vmx_interrupt_allowed(vcpu))
@@ -5628,11 +5636,11 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_IO_INSTRUCTION] = handle_io,
[EXIT_REASON_CR_ACCESS] = handle_cr,
[EXIT_REASON_DR_ACCESS] = handle_dr,
- [EXIT_REASON_CPUID] = handle_cpuid,
- [EXIT_REASON_MSR_READ] = handle_rdmsr,
- [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
- [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
- [EXIT_REASON_HLT] = handle_halt,
+ [EXIT_REASON_CPUID] = kvm_emulate_cpuid,
+ [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr,
+ [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr,
+ [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window,
+ [EXIT_REASON_HLT] = kvm_emulate_halt,
[EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_RDPMC] = handle_rdpmc,
@@ -6006,9 +6014,23 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
}
if (exit_reason < kvm_vmx_max_exit_handlers
- && kvm_vmx_exit_handlers[exit_reason])
+ && kvm_vmx_exit_handlers[exit_reason]) {
+#ifdef CONFIG_RETPOLINE
+ if (exit_reason == EXIT_REASON_MSR_WRITE)
+ return kvm_emulate_wrmsr(vcpu);
+ else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
+ return handle_preemption_timer(vcpu);
+ else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
+ return handle_interrupt_window(vcpu);
+ else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
+ return handle_external_interrupt(vcpu);
+ else if (exit_reason == EXIT_REASON_HLT)
+ return kvm_emulate_halt(vcpu);
+ else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
+ return handle_ept_misconfig(vcpu);
+#endif
return kvm_vmx_exit_handlers[exit_reason](vcpu);
- else {
+ } else {
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason);
dump_vmcs();
@@ -6336,6 +6358,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
);
kvm_after_interrupt(vcpu);
+ vcpu->arch.at_instruction_boundary = true;
}
STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff);
@@ -6539,7 +6562,30 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
}
}
-bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
+void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
+ unsigned int flags)
+{
+ u64 hostval = this_cpu_read(x86_spec_ctrl_current);
+
+ if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
+ return;
+
+ if (flags & VMX_RUN_SAVE_SPEC_CTRL)
+ vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
+
+ /*
+ * If the guest/host SPEC_CTRL values differ, restore the host value.
+ *
+ * For legacy IBRS, the IBRS bit always needs to be written after
+ * transitioning from a less privileged predictor mode, regardless of
+ * whether the guest/host values differ.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
+ vmx->spec_ctrl != hostval)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
+
+ barrier_nospec();
+}
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -6628,32 +6674,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
write_cr2(vcpu->arch.cr2);
vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
- vmx->loaded_vmcs->launched);
+ __vmx_vcpu_run_flags(vmx));
vcpu->arch.cr2 = read_cr2();
vmx_enable_fb_clear(vmx);
- /*
- * We do not use IBRS in the kernel. If this vCPU has used the
- * SPEC_CTRL MSR it may have left it on; save the value and
- * turn it off. This is much more efficient than blindly adding
- * it to the atomic save/restore list. Especially as the former
- * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
- *
- * For non-nested case:
- * If the L01 MSR bitmap does not intercept the MSR, then we need to
- * save it.
- *
- * For nested case:
- * If the L02 MSR bitmap does not intercept the MSR, then we need to
- * save it.
- */
- if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
- vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
-
- x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
-
/* All fields are clean at this point */
if (static_branch_unlikely(&enable_evmcs))
current_evmcs->hv_clean_fields |=
@@ -7241,6 +7267,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
break;
+ case x86_intercept_pause:
+ /*
+ * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
+ * with vanilla NOPs in the emulator. Apply the interception
+ * check only to actual PAUSE instructions. Don't check
+ * PAUSE-loop-exiting, software can't expect a given PAUSE to
+ * exit, i.e. KVM is within its rights to allow L2 to execute
+ * the PAUSE.
+ */
+ if ((info->rep_prefix != REPE_PREFIX) ||
+ !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
+ return X86EMUL_CONTINUE;
+
+ break;
+
/* TODO: check more intercepts... */
default:
break;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 7a3362ab5986..4d5be4610af8 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -10,6 +10,7 @@
#include "capabilities.h"
#include "ops.h"
#include "vmcs.h"
+#include "run_flags.h"
extern const u32 vmx_msr_index[];
extern u64 host_efer;
@@ -336,6 +337,10 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
+void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
+unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
+bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
+ unsigned int flags);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d0b297583df8..07154cae7a15 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -207,6 +207,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "nmi_injections", VCPU_STAT(nmi_injections) },
{ "req_event", VCPU_STAT(req_event) },
{ "l1d_flush", VCPU_STAT(l1d_flush) },
+ { "preemption_reported", VCPU_STAT(preemption_reported) },
+ { "preemption_other", VCPU_STAT(preemption_other) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
@@ -1337,7 +1339,7 @@ static const u32 msr_based_features_all[] = {
MSR_IA32_VMX_EPT_VPID_CAP,
MSR_IA32_VMX_VMFUNC,
- MSR_F10H_DECFG,
+ MSR_AMD64_DE_CFG,
MSR_IA32_UCODE_REV,
MSR_IA32_ARCH_CAPABILITIES,
};
@@ -1407,6 +1409,9 @@ static u64 kvm_get_arch_capabilities(void)
/* Guests don't need to know "Fill buffer clear control" exists */
data &= ~ARCH_CAP_FB_CLEAR_CTRL;
+ if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
+ data |= ARCH_CAP_GDS_NO;
+
return data;
}
@@ -2715,6 +2720,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
case MSR_AMD64_DC_CFG:
+ case MSR_AMD64_TW_CFG:
case MSR_F15H_EX_CFG:
break;
@@ -3024,6 +3030,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_BU_CFG2:
case MSR_IA32_PERF_CTL:
case MSR_AMD64_DC_CFG:
+ case MSR_AMD64_TW_CFG:
case MSR_F15H_EX_CFG:
msr_info->data = 0;
break;
@@ -3562,6 +3569,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
struct kvm_steal_time *st;
+ /*
+ * The vCPU can be marked preempted if and only if the VM-Exit was on
+ * an instruction boundary and will not trigger guest emulation of any
+ * kind (see vcpu_run). Vendor specific code controls (conservatively)
+ * when this is true, for example allowing the vCPU to be marked
+ * preempted if and only if the VM-Exit was due to a host interrupt.
+ */
+ if (!vcpu->arch.at_instruction_boundary) {
+ vcpu->stat.preemption_other++;
+ return;
+ }
+
+ vcpu->stat.preemption_reported++;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
@@ -3948,12 +3968,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
{
unsigned long val;
+ memset(dbgregs, 0, sizeof(*dbgregs));
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
kvm_get_dr(vcpu, 6, &val);
dbgregs->dr6 = val;
dbgregs->dr7 = vcpu->arch.dr7;
- dbgregs->flags = 0;
- memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -6788,7 +6807,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
write_fault_to_spt,
emulation_type))
return 1;
- if (ctxt->have_exception) {
+
+ if (ctxt->have_exception &&
+ !(emulation_type & EMULTYPE_SKIP)) {
/*
* #UD should result in just EMULATION_FAILED, and trap-like
* exception should not be encountered during decode.
@@ -8445,6 +8466,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
+ /*
+ * If another guest vCPU requests a PV TLB flush in the middle
+ * of instruction emulation, the rest of the emulation could
+ * use a stale page translation. Assume that any code after
+ * this point can start executing an instruction.
+ */
+ vcpu->arch.at_instruction_boundary = false;
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
@@ -10329,9 +10357,9 @@ void kvm_arch_end_assignment(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
-bool kvm_arch_has_assigned_device(struct kvm *kvm)
+bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
{
- return atomic_read(&kvm->arch.assigned_device_count);
+ return arch_atomic_read(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
index a9bdf0805be0..1329d7ca05b5 100644
--- a/arch/x86/lib/iomap_copy_64.S
+++ b/arch/x86/lib/iomap_copy_64.S
@@ -10,6 +10,6 @@
*/
ENTRY(__iowrite32_copy)
movl %edx,%ecx
- rep movsd
+ rep movsl
ret
ENDPROC(__iowrite32_copy)
diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c
index a018ec4fba53..c97be9a1430a 100644
--- a/arch/x86/lib/misc.c
+++ b/arch/x86/lib/misc.c
@@ -6,7 +6,7 @@
*/
int num_digits(int val)
{
- int m = 10;
+ long long m = 10;
int d = 1;
if (val < 0) {
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 968d7005f4a7..f50cc210a981 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -26,18 +26,31 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
for (; addr < end; addr = next) {
pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
+ bool use_gbpage;
next = (addr & PUD_MASK) + PUD_SIZE;
if (next > end)
next = end;
- if (info->direct_gbpages) {
- pud_t pudval;
+ /* if this is already a gbpage, this portion is already mapped */
+ if (pud_large(*pud))
+ continue;
+
+ /* Is using a gbpage allowed? */
+ use_gbpage = info->direct_gbpages;
- if (pud_present(*pud))
- continue;
+ /* Don't use gbpage if it maps more than the requested region. */
+ /* at the begining: */
+ use_gbpage &= ((addr & ~PUD_MASK) == 0);
+ /* ... or at the end: */
+ use_gbpage &= ((next & ~PUD_MASK) == 0);
+
+ /* Never overwrite existing mappings */
+ use_gbpage &= !pud_present(*pud);
+
+ if (use_gbpage) {
+ pud_t pudval;
- addr &= PUD_MASK;
pudval = __pud((addr - info->offset) | info->page_flag);
set_pud(pud, pudval);
continue;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index af352e228fa2..086b274fa60f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -7,8 +7,10 @@
#include <linux/swapops.h>
#include <linux/kmemleak.h>
#include <linux/sched/task.h>
+#include <linux/sched/mm.h>
#include <asm/set_memory.h>
+#include <asm/cpu_device_id.h>
#include <asm/e820/api.h>
#include <asm/init.h>
#include <asm/page.h>
@@ -25,6 +27,7 @@
#include <asm/cpufeature.h>
#include <asm/pti.h>
#include <asm/text-patching.h>
+#include <asm/paravirt.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -208,6 +211,24 @@ static void __init probe_page_size_mask(void)
}
}
+#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
+ .family = 6, \
+ .model = _model, \
+ }
+/*
+ * INVLPG may not properly flush Global entries
+ * on these CPUs when PCIDs are enabled.
+ */
+static const struct x86_cpu_id invlpg_miss_ids[] = {
+ INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
+ INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
+ INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
+ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
+ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
+ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
+ {}
+};
+
static void setup_pcid(void)
{
if (!IS_ENABLED(CONFIG_X86_64))
@@ -216,6 +237,12 @@ static void setup_pcid(void)
if (!boot_cpu_has(X86_FEATURE_PCID))
return;
+ if (x86_match_cpu(invlpg_miss_ids)) {
+ pr_info("Incomplete global flushes, disabling PCID");
+ setup_clear_cpu_cap(X86_FEATURE_PCID);
+ return;
+ }
+
if (boot_cpu_has(X86_FEATURE_PGE)) {
/*
* This can't be cr4_set_bits_and_update_boot() -- the
@@ -710,9 +737,12 @@ void __init poking_init(void)
spinlock_t *ptl;
pte_t *ptep;
- poking_mm = copy_init_mm();
+ poking_mm = mm_alloc();
BUG_ON(!poking_mm);
+ /* Xen PV guests need the PGD to be pinned. */
+ paravirt_arch_dup_mmap(NULL, poking_mm);
+
/*
* Randomize the poking address, but make sure that the following page
* will be mapped at the same PMD. We need 2 pages, so find space for 3,
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index a353f88d299d..137714df879e 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -214,9 +214,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PHYSICAL_PAGE_MASK;
+ phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
+ /*
+ * Mask out any bits not part of the actual physical
+ * address, like memory encryption bits.
+ */
+ phys_addr &= PHYSICAL_PAGE_MASK;
+
retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
pcm, &new_pcm);
if (retval) {
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index dc6182eecefa..a37c2873fdb5 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -182,11 +182,11 @@ static void __meminit init_trampoline_pud(void)
set_p4d(p4d_tramp,
__p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
- set_pgd(&trampoline_pgd_entry,
- __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+ trampoline_pgd_entry =
+ __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));
} else {
- set_pgd(&trampoline_pgd_entry,
- __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
+ trampoline_pgd_entry =
+ __pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
}
}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index ebc8e3af1c67..77d4aa57fd35 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -579,7 +579,8 @@ void __init sme_enable(struct boot_params *bp)
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
((u64)bp->ext_cmd_line_ptr << 32));
- cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
+ if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+ return;
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
sme_me_mask = me_mask;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 4123100e0eaf..7316dca7e846 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -581,13 +581,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (start >= end)
continue;
- /*
- * Don't confuse VM with a node that doesn't have the
- * minimum amount of memory:
- */
- if (end && (end - start) < NODE_MIN_SIZE)
- continue;
-
alloc_node_data(nid);
}
@@ -822,7 +815,7 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable)
return;
}
mask = node_to_cpumask_map[node];
- if (!mask) {
+ if (!cpumask_available(mask)) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
return;
@@ -868,7 +861,7 @@ const struct cpumask *cpumask_of_node(int node)
dump_stack();
return cpu_none_mask;
}
- if (node_to_cpumask_map[node] == NULL) {
+ if (!cpumask_available(node_to_cpumask_map[node])) {
printk(KERN_WARNING
"cpumask_of_node(%d): no node_to_cpumask_map!\n",
node);
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index c6f84c0b5d7a..ca77af96033b 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -10,7 +10,6 @@
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/mmu_context.h> /* vma_pkey() */
-#include <asm/fpu/internal.h> /* init_fpstate */
int __execute_only_pkey(struct mm_struct *mm)
{
@@ -154,7 +153,6 @@ static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
static ssize_t init_pkru_write_file(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
- struct pkru_state *pk;
char buf[32];
ssize_t len;
u32 new_init_pkru;
@@ -177,10 +175,6 @@ static ssize_t init_pkru_write_file(struct file *file,
return -EINVAL;
WRITE_ONCE(init_pkru_value, new_init_pkru);
- pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
- if (!pk)
- return -EINVAL;
- pk->pkru = new_init_pkru;
return count;
}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 76959a7d88c8..94291e0ddcb7 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -7,6 +7,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <asm/amd_nb.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
@@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
#endif
+
+#ifdef CONFIG_AMD_NB
+
+#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
+#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
+
+static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
+{
+ u32 data;
+
+ if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
+ data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
+ if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
+ pci_err(dev, "Failed to write data 0x%x\n", data);
+ } else {
+ pci_err(dev, "Failed to read data\n");
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
+#endif
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 99a28ce2244c..90735895aa44 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -81,7 +81,7 @@ static void send_ebook_state(void)
return;
}
- if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state)
+ if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state)
return; /* Nothing new to report. */
input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 20dd7023132f..44dd7ac084ca 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -522,15 +522,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
static void pm_save_spec_msr(void)
{
- u32 spec_msr_id[] = {
- MSR_IA32_SPEC_CTRL,
- MSR_IA32_TSX_CTRL,
- MSR_TSX_FORCE_ABORT,
- MSR_IA32_MCU_OPT_CTRL,
- MSR_AMD64_LS_CFG,
+ struct msr_enumeration {
+ u32 msr_no;
+ u32 feature;
+ } msr_enum[] = {
+ { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
+ { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
+ { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
+ { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
+ { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
+ { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
};
+ int i;
- msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
+ for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
+ if (boot_cpu_has(msr_enum[i].feature))
+ msr_build_context(&msr_enum[i].msr_no, 1);
+ }
}
static int pm_check_save_msr(void)
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 9733d1cc791d..8309e230aeed 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -14,6 +14,11 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
+# When profile-guided optimization is enabled, llvm emits two different
+# overlapping text sections, which is not supported by kexec. Remove profile
+# optimization flags.
+KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
+
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro
@@ -27,7 +32,7 @@ KCOV_INSTRUMENT := n
# make up the standalone purgatory.ro
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
-PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
@@ -58,6 +63,9 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
CFLAGS_string.o += $(PURGATORY_CFLAGS)
+AFLAGS_REMOVE_setup-x86_$(BITS).o += -g -Wa,-gdwarf-2
+AFLAGS_REMOVE_entry64.o += -g -Wa,-gdwarf-2
+
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
index 05ac9c17c811..dad6198f1a26 100644
--- a/arch/x86/realmode/rm/wakeup_asm.S
+++ b/arch/x86/realmode/rm/wakeup_asm.S
@@ -73,7 +73,7 @@ ENTRY(wakeup_start)
movw %ax, %fs
movw %ax, %gs
- lidtl wakeup_idt
+ lidtl .Lwakeup_idt
/* Clear the EFLAGS */
pushl $0
@@ -171,8 +171,8 @@ END(wakeup_gdt)
/* This is the standard real-mode IDT */
.balign 16
-GLOBAL(wakeup_idt)
+.Lwakeup_idt:
.word 0xffff /* limit */
.long 0 /* address */
.word 0
-END(wakeup_idt)
+END(.Lwakeup_idt)
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 1c3a1962cade..0043fd374a62 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -596,6 +596,14 @@ static void print_absolute_relocs(void)
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
continue;
}
+ /*
+ * Do not perform relocations in .notes section; any
+ * values there are meant for pre-boot consumption (e.g.
+ * startup_xen).
+ */
+ if (sec_applies->shdr.sh_type == SHT_NOTE) {
+ continue;
+ }
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
diff --git a/arch/x86/um/shared/sysdep/syscalls_32.h b/arch/x86/um/shared/sysdep/syscalls_32.h
index 68fd2cf526fd..f6e9f84397e7 100644
--- a/arch/x86/um/shared/sysdep/syscalls_32.h
+++ b/arch/x86/um/shared/sysdep/syscalls_32.h
@@ -6,10 +6,9 @@
#include <asm/unistd.h>
#include <sysdep/ptrace.h>
-typedef long syscall_handler_t(struct pt_regs);
+typedef long syscall_handler_t(struct syscall_args);
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
- ((long (*)(struct syscall_args)) \
- (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
+ ((*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
index ac8eee093f9c..66162eafd8e8 100644
--- a/arch/x86/um/tls_32.c
+++ b/arch/x86/um/tls_32.c
@@ -65,9 +65,6 @@ static int get_free_idx(struct task_struct* task)
struct thread_struct *t = &task->thread;
int idx;
- if (!t->arch.tls_array)
- return GDT_ENTRY_TLS_MIN;
-
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
if (!t->arch.tls_array[idx].present)
return idx + GDT_ENTRY_TLS_MIN;
@@ -240,9 +237,6 @@ static int get_tls_entry(struct task_struct *task, struct user_desc *info,
{
struct thread_struct *t = &task->thread;
- if (!t->arch.tls_array)
- goto clear;
-
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index 0caddd6acb22..bec115036f87 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -62,7 +62,7 @@ quiet_cmd_vdso = VDSO $@
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv
+VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack
GCOV_PROFILE := n
#
diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c
index 891868756a51..6d5269093f7c 100644
--- a/arch/x86/um/vdso/um_vdso.c
+++ b/arch/x86/um/vdso/um_vdso.c
@@ -17,8 +17,10 @@ int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+ asm("syscall"
+ : "=a" (ret)
+ : "0" (__NR_clock_gettime), "D" (clock), "S" (ts)
+ : "rcx", "r11", "memory");
return ret;
}
@@ -29,8 +31,10 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+ asm("syscall"
+ : "=a" (ret)
+ : "0" (__NR_gettimeofday), "D" (tv), "S" (tz)
+ : "rcx", "r11", "memory");
return ret;
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 7a43b2ae19f1..a76ba342a669 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
void xen_smp_intr_free(unsigned int cpu)
{
+ kfree(per_cpu(xen_resched_irq, cpu).name);
+ per_cpu(xen_resched_irq, cpu).name = NULL;
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
per_cpu(xen_resched_irq, cpu).irq = -1;
- kfree(per_cpu(xen_resched_irq, cpu).name);
- per_cpu(xen_resched_irq, cpu).name = NULL;
}
+ kfree(per_cpu(xen_callfunc_irq, cpu).name);
+ per_cpu(xen_callfunc_irq, cpu).name = NULL;
if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
per_cpu(xen_callfunc_irq, cpu).irq = -1;
- kfree(per_cpu(xen_callfunc_irq, cpu).name);
- per_cpu(xen_callfunc_irq, cpu).name = NULL;
}
+ kfree(per_cpu(xen_debug_irq, cpu).name);
+ per_cpu(xen_debug_irq, cpu).name = NULL;
if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
per_cpu(xen_debug_irq, cpu).irq = -1;
- kfree(per_cpu(xen_debug_irq, cpu).name);
- per_cpu(xen_debug_irq, cpu).name = NULL;
}
+ kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
+ per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
NULL);
per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
- kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
- per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
}
}
@@ -65,6 +65,9 @@ int xen_smp_intr_init(unsigned int cpu)
char *resched_name, *callfunc_name, *debug_name;
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
+ if (!resched_name)
+ goto fail_mem;
+ per_cpu(xen_resched_irq, cpu).name = resched_name;
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
cpu,
xen_reschedule_interrupt,
@@ -74,9 +77,11 @@ int xen_smp_intr_init(unsigned int cpu)
if (rc < 0)
goto fail;
per_cpu(xen_resched_irq, cpu).irq = rc;
- per_cpu(xen_resched_irq, cpu).name = resched_name;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
+ if (!callfunc_name)
+ goto fail_mem;
+ per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
cpu,
xen_call_function_interrupt,
@@ -86,18 +91,27 @@ int xen_smp_intr_init(unsigned int cpu)
if (rc < 0)
goto fail;
per_cpu(xen_callfunc_irq, cpu).irq = rc;
- per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
- debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
- rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
- IRQF_PERCPU | IRQF_NOBALANCING,
- debug_name, NULL);
- if (rc < 0)
- goto fail;
- per_cpu(xen_debug_irq, cpu).irq = rc;
- per_cpu(xen_debug_irq, cpu).name = debug_name;
+ if (!xen_fifo_events) {
+ debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
+ if (!debug_name)
+ goto fail_mem;
+
+ per_cpu(xen_debug_irq, cpu).name = debug_name;
+ rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
+ xen_debug_interrupt,
+ IRQF_PERCPU | IRQF_NOBALANCING,
+ debug_name, NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_debug_irq, cpu).irq = rc;
+ }
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
+ if (!callfunc_name)
+ goto fail_mem;
+
+ per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
cpu,
xen_call_function_single_interrupt,
@@ -107,10 +121,11 @@ int xen_smp_intr_init(unsigned int cpu)
if (rc < 0)
goto fail;
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
- per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
return 0;
+ fail_mem:
+ rc = -ENOMEM;
fail:
xen_smp_intr_free(cpu);
return rc;
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 9d9777ded5f7..3a0a27d94c05 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -28,6 +28,7 @@
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/cpu.h>
+#include <asm/fpu/internal.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
@@ -61,6 +62,7 @@ static void cpu_bringup(void)
cr4_init();
cpu_init();
+ fpu__init_cpu();
touch_softlockup_watchdog();
preempt_disable();
@@ -98,18 +100,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
void xen_smp_intr_free_pv(unsigned int cpu)
{
+ kfree(per_cpu(xen_irq_work, cpu).name);
+ per_cpu(xen_irq_work, cpu).name = NULL;
if (per_cpu(xen_irq_work, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
per_cpu(xen_irq_work, cpu).irq = -1;
- kfree(per_cpu(xen_irq_work, cpu).name);
- per_cpu(xen_irq_work, cpu).name = NULL;
}
+ kfree(per_cpu(xen_pmu_irq, cpu).name);
+ per_cpu(xen_pmu_irq, cpu).name = NULL;
if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
per_cpu(xen_pmu_irq, cpu).irq = -1;
- kfree(per_cpu(xen_pmu_irq, cpu).name);
- per_cpu(xen_pmu_irq, cpu).name = NULL;
}
}
@@ -119,6 +121,7 @@ int xen_smp_intr_init_pv(unsigned int cpu)
char *callfunc_name, *pmu_name;
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
+ per_cpu(xen_irq_work, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
cpu,
xen_irq_work_interrupt,
@@ -128,10 +131,10 @@ int xen_smp_intr_init_pv(unsigned int cpu)
if (rc < 0)
goto fail;
per_cpu(xen_irq_work, cpu).irq = rc;
- per_cpu(xen_irq_work, cpu).name = callfunc_name;
if (is_xen_pmu) {
pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
+ per_cpu(xen_pmu_irq, cpu).name = pmu_name;
rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
xen_pmu_irq_handler,
IRQF_PERCPU|IRQF_NOBALANCING,
@@ -139,7 +142,6 @@ int xen_smp_intr_init_pv(unsigned int cpu)
if (rc < 0)
goto fail;
per_cpu(xen_pmu_irq, cpu).irq = rc;
- per_cpu(xen_pmu_irq, cpu).name = pmu_name;
}
return 0;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d817b7c862a6..00d2ec73017e 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu)
cpu, per_cpu(lock_kicker_irq, cpu));
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
+ per_cpu(irq_name, cpu) = name;
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu,
dummy_handler,
@@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu)
if (irq >= 0) {
disable_irq(irq); /* make sure it's never delivered */
per_cpu(lock_kicker_irq, cpu) = irq;
- per_cpu(irq_name, cpu) = name;
}
printk("cpu %d spinlock event irq %d\n", cpu, irq);
@@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu)
if (!xen_pvspin)
return;
+ kfree(per_cpu(irq_name, cpu));
+ per_cpu(irq_name, cpu) = NULL;
/*
* When booting the kernel with 'mitigations=auto,nosmt', the secondary
* CPUs are not activated, and lock_kicker_irq is not initialized.
@@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu)
unbind_from_irqhandler(irq, NULL);
per_cpu(lock_kicker_irq, cpu) = -1;
- kfree(per_cpu(irq_name, cpu));
- per_cpu(irq_name, cpu) = NULL;
}
PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 45a441c33d6d..120e2bcf20f8 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -30,6 +30,8 @@ extern struct start_info *xen_start_info;
extern struct shared_info xen_dummy_shared_info;
extern struct shared_info *HYPERVISOR_shared_info;
+extern bool xen_fifo_events;
+
void xen_setup_mfn_list_list(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 294846117fc2..41ad60bb0fbc 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -9,8 +9,7 @@
# KBUILD_CFLAGS used when building rest of boot (takes effect recursively)
-KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
-HOSTFLAGS += -Iarch/$(ARCH)/boot/include
+KBUILD_CFLAGS += -fno-builtin
BIG_ENDIAN := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
diff --git a/arch/xtensa/boot/lib/zmem.c b/arch/xtensa/boot/lib/zmem.c
index e3ecd743c515..b89189355122 100644
--- a/arch/xtensa/boot/lib/zmem.c
+++ b/arch/xtensa/boot/lib/zmem.c
@@ -4,13 +4,14 @@
/* bits taken from ppc */
extern void *avail_ram, *end_avail;
+void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp);
-void exit (void)
+static void exit(void)
{
for (;;);
}
-void *zalloc(unsigned size)
+static void *zalloc(unsigned int size)
{
void *p = avail_ram;
diff --git a/arch/xtensa/include/asm/bugs.h b/arch/xtensa/include/asm/bugs.h
deleted file mode 100644
index 69b29d198249..000000000000
--- a/arch/xtensa/include/asm/bugs.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * include/asm-xtensa/bugs.h
- *
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Xtensa processors don't have any bugs. :)
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- */
-
-#ifndef _XTENSA_BUGS_H
-#define _XTENSA_BUGS_H
-
-static void check_bugs(void) { }
-
-#endif /* _XTENSA_BUGS_H */
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
index 5b4acb7d1c07..02c93e08d592 100644
--- a/arch/xtensa/include/asm/core.h
+++ b/arch/xtensa/include/asm/core.h
@@ -6,6 +6,10 @@
#include <variant/core.h>
+#ifndef XCHAL_HAVE_DIV32
+#define XCHAL_HAVE_DIV32 0
+#endif
+
#ifndef XCHAL_HAVE_EXCLUSIVE
#define XCHAL_HAVE_EXCLUSIVE 0
#endif
@@ -18,4 +22,13 @@
#define XCHAL_SPANNING_WAY 0
#endif
+#ifndef XCHAL_HW_MIN_VERSION
+#if defined(XCHAL_HW_MIN_VERSION_MAJOR) && defined(XCHAL_HW_MIN_VERSION_MINOR)
+#define XCHAL_HW_MIN_VERSION (XCHAL_HW_MIN_VERSION_MAJOR * 100 + \
+ XCHAL_HW_MIN_VERSION_MINOR)
+#else
+#define XCHAL_HW_MIN_VERSION 0
+#endif
+#endif
+
#endif
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 09c56cba442e..5a42d663612b 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -181,10 +181,6 @@ static inline unsigned long ___pa(unsigned long va)
#define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
-#ifdef CONFIG_DISCONTIGMEM
-# error CONFIG_DISCONTIGMEM not supported
-#endif
-
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 154979d62b73..2b86a2a04236 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -44,8 +44,8 @@ static void do_cache_op(phys_addr_t paddr, size_t size,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -62,8 +62,8 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index 86c9ba963155..883f6359e70f 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -13,17 +13,26 @@
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <asm/core.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
+#define XTENSA_HWVERSION_RG_2015_0 260000
+
+#if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0
+#define XTENSA_PMU_ERI_BASE 0x00101000
+#else
+#define XTENSA_PMU_ERI_BASE 0x00001000
+#endif
+
/* Global control/status for all perf counters */
-#define XTENSA_PMU_PMG 0x1000
+#define XTENSA_PMU_PMG XTENSA_PMU_ERI_BASE
/* Perf counter values */
-#define XTENSA_PMU_PM(i) (0x1080 + (i) * 4)
+#define XTENSA_PMU_PM(i) (XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4)
/* Perf counter control registers */
-#define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4)
+#define XTENSA_PMU_PMCTRL(i) (XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4)
/* Perf counter status registers */
-#define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4)
+#define XTENSA_PMU_PMSTAT(i) (XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4)
#define XTENSA_PMU_PMG_PMEN 0x1
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 4a6c495ce9b6..16af8e514cb3 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -543,5 +543,5 @@ void die(const char * str, struct pt_regs * regs, long err)
if (panic_on_oops)
panic("Fatal exception");
- do_exit(err);
+ make_task_dead(err);
}
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index fa9f3893b002..d54bcaa194d4 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -204,7 +204,7 @@ static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
}
-unsigned short tuntap_protocol(struct sk_buff *skb)
+static unsigned short tuntap_protocol(struct sk_buff *skb)
{
return eth_type_trans(skb, skb->dev);
}
@@ -231,7 +231,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
if (*init == ',') {
- rem = split_if_spec(init + 1, &mac_str, &dev_name);
+ rem = split_if_spec(init + 1, &mac_str, &dev_name, NULL);
if (rem != NULL) {
pr_err("%s: extra garbage on specification : '%s'\n",
dev->name, rem);
@@ -477,7 +477,7 @@ static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
-void iss_net_user_timer_expire(struct timer_list *unused)
+static void iss_net_user_timer_expire(struct timer_list *unused)
{
}
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 09d721b1f6ac..59fd1b10b5f3 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -594,6 +594,10 @@ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
struct bfq_group *bfqg;
while (blkg) {
+ if (!blkg->online) {
+ blkg = blkg->parent;
+ continue;
+ }
bfqg = blkg_to_bfqg(blkg);
if (bfqg->online) {
bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 962701d3f46b..c73c8b0f5e40 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -420,6 +420,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
*/
void bfq_schedule_dispatch(struct bfq_data *bfqd)
{
+ lockdep_assert_held(&bfqd->lock);
+
if (bfqd->queued != 0) {
bfq_log(bfqd, "schedule dispatch");
blk_mq_run_hw_queues(bfqd->queue, true);
@@ -6257,8 +6259,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_bfqq_expire(bfqd, bfqq, true, reason);
schedule_dispatch:
- spin_unlock_irqrestore(&bfqd->lock, flags);
bfq_schedule_dispatch(bfqd);
+ spin_unlock_irqrestore(&bfqd->lock, flags);
}
/*
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index ec295be93ca0..247f7c480e66 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -424,6 +424,7 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
bip->bip_vcnt = bip_src->bip_vcnt;
bip->bip_iter = bip_src->bip_iter;
+ bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
return 0;
}
diff --git a/block/bio.c b/block/bio.c
index 08dbdc32ceaa..e3d3e75c97e0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -710,7 +710,7 @@ static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
if ((addr1 | mask) != (addr2 | mask))
return false;
- if (bv->bv_len + len > queue_max_segment_size(q))
+ if (len > queue_max_segment_size(q) - bv->bv_len)
return false;
return __bio_try_merge_page(bio, page, len, offset, same_page);
}
@@ -884,7 +884,7 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
return;
bio_for_each_segment_all(bvec, bio, iter_all) {
- if (mark_dirty && !PageCompound(bvec->bv_page))
+ if (mark_dirty)
set_page_dirty_lock(bvec->bv_page);
put_page(bvec->bv_page);
}
@@ -1691,8 +1691,7 @@ void bio_set_pages_dirty(struct bio *bio)
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
- if (!PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
+ set_page_dirty_lock(bvec->bv_page);
}
}
@@ -1740,7 +1739,7 @@ void bio_check_pages_dirty(struct bio *bio)
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
- if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
+ if (!PageDirty(bvec->bv_page))
goto defer;
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index dde8d0acfb34..cd085a0e5e4a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1445,6 +1445,10 @@ retry:
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_init_fn(blkg->pd[pol->plid]);
+ if (pol->pd_online_fn)
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ pol->pd_online_fn(blkg->pd[pol->plid]);
+
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 5808baa950c3..030de4fdf9b1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -793,10 +793,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
-
- WARN_ONCE(1,
- "generic_make_request: Trying to write "
- "to read-only block-device %s (partno %d)\n",
+ pr_warn("Trying to write to read-only block-device %s (partno %d)\n",
bio_devname(bio, b), part->partno);
/* Older lvm-tools actually trigger this */
return false;
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index ef287c33d6d9..e7d5aafa5e99 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -248,7 +248,9 @@ enum {
/* 1/64k is granular enough and can easily be handled w/ u32 */
HWEIGHT_WHOLE = 1 << 16,
+};
+enum {
/*
* As vtime is used to calculate the cost of each IO, it needs to
* be fairly high precision. For example, it should be able to
@@ -271,6 +273,11 @@ enum {
VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
VRATE_CLAMP_ADJ_PCT = 4,
+ /* switch iff the conditions are met for longer than this */
+ AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
+};
+
+enum {
/* if IOs end up waiting for requests, issue less */
RQ_WAIT_BUSY_PCT = 5,
@@ -288,9 +295,6 @@ enum {
SURPLUS_SCALE_ABS = HWEIGHT_WHOLE / 50, /* + 2% */
SURPLUS_MIN_ADJ_DELTA = HWEIGHT_WHOLE / 33, /* 3% */
- /* switch iff the conditions are met for longer than this */
- AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
-
/*
* Count IO size in 4k pages. The 12bit shift helps keeping
* size-proportional components of cost calculation in closer
@@ -785,9 +789,14 @@ static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
*page = *seqio = *randio = 0;
- if (bps)
- *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
- DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
+ if (bps) {
+ u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
+
+ if (bps_pages)
+ *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
+ else
+ *page = 1;
+ }
if (seqiops) {
v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5219064cd72b..711fa0fcd211 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -477,7 +477,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist,
struct scatterlist **sg)
{
- struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
+ struct bio_vec bvec, bvprv = { NULL };
struct bvec_iter iter;
int nsegs = 0;
bool new_bio = false;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 121f4c1e0697..772a6c6b1634 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -883,6 +883,9 @@ void blk_mq_debugfs_register_hctx(struct request_queue *q,
char name[20];
int i;
+ if (!q->debugfs_dir)
+ return;
+
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index f422c7feea7e..126a9a635594 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -59,8 +59,7 @@ void blk_mq_sched_assign_ioc(struct request *rq)
}
/*
- * Mark a hardware queue as needing a restart. For shared queues, maintain
- * a count of how many hardware queues are marked for restart.
+ * Mark a hardware queue as needing a restart.
*/
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
@@ -92,13 +91,17 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
/*
* Only SCSI implements .get_budget and .put_budget, and SCSI restarts
* its queue by itself in its completion handler, so we don't need to
- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
+ * restart queue if .get_budget() fails to get the budget.
+ *
+ * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
+ * be run again. This is necessary to avoid starving flushes.
*/
-static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
+static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
LIST_HEAD(rq_list);
+ int ret = 0;
do {
struct request *rq;
@@ -106,6 +109,11 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
break;
+ if (!list_empty_careful(&hctx->dispatch)) {
+ ret = -EAGAIN;
+ break;
+ }
+
if (!blk_mq_get_dispatch_budget(hctx))
break;
@@ -122,6 +130,8 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
*/
list_add(&rq->queuelist, &rq_list);
} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
+
+ return ret;
}
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
@@ -138,17 +148,26 @@ static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
/*
* Only SCSI implements .get_budget and .put_budget, and SCSI restarts
* its queue by itself in its completion handler, so we don't need to
- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
+ * restart queue if .get_budget() fails to get the budget.
+ *
+ * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
+ * to be run again. This is necessary to avoid starving flushes.
*/
-static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
+static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
LIST_HEAD(rq_list);
struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
+ int ret = 0;
do {
struct request *rq;
+ if (!list_empty_careful(&hctx->dispatch)) {
+ ret = -EAGAIN;
+ break;
+ }
+
if (!sbitmap_any_bit_set(&hctx->ctx_map))
break;
@@ -174,21 +193,17 @@ static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
WRITE_ONCE(hctx->dispatch_from, ctx);
+ return ret;
}
-void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
+int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
+ int ret = 0;
LIST_HEAD(rq_list);
- /* RCU or SRCU read lock is needed before checking quiesced flag */
- if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
- return;
-
- hctx->run++;
-
/*
* If we have previous entries on our dispatch list, grab them first for
* more fair dispatch.
@@ -217,19 +232,41 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
blk_mq_sched_mark_restart_hctx(hctx);
if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
if (has_sched_dispatch)
- blk_mq_do_dispatch_sched(hctx);
+ ret = blk_mq_do_dispatch_sched(hctx);
else
- blk_mq_do_dispatch_ctx(hctx);
+ ret = blk_mq_do_dispatch_ctx(hctx);
}
} else if (has_sched_dispatch) {
- blk_mq_do_dispatch_sched(hctx);
+ ret = blk_mq_do_dispatch_sched(hctx);
} else if (hctx->dispatch_busy) {
/* dequeue request one by one from sw queue if queue is busy */
- blk_mq_do_dispatch_ctx(hctx);
+ ret = blk_mq_do_dispatch_ctx(hctx);
} else {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
blk_mq_dispatch_rq_list(q, &rq_list, false);
}
+
+ return ret;
+}
+
+void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+
+ /* RCU or SRCU read lock is needed before checking quiesced flag */
+ if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
+ return;
+
+ hctx->run++;
+
+ /*
+ * A return of -EAGAIN is an indication that hctx->dispatch is not
+ * empty and we must run again in order to avoid starving flushes.
+ */
+ if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
+ if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
+ blk_mq_run_hw_queue(hctx, true);
+ }
}
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 5dafd7a8ec91..7abd66d1228a 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -250,7 +250,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
struct blk_mq_ctx *ctx;
- int i, ret;
+ int i, j, ret;
if (!hctx->nr_ctx)
return 0;
@@ -262,9 +262,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
hctx_for_each_ctx(hctx, ctx, i) {
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
if (ret)
- break;
+ goto out;
}
+ return 0;
+out:
+ hctx_for_each_ctx(hctx, ctx, j) {
+ if (j < i)
+ kobject_del(&ctx->kobj);
+ }
+ kobject_del(&hctx->kobj);
return ret;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 84798d09ca46..03f4eb37dfc7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1112,7 +1112,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
- struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
+ struct sbitmap_queue *sbq;
struct wait_queue_head *wq;
wait_queue_entry_t *wait;
bool ret;
@@ -1135,6 +1135,10 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
if (!list_empty_careful(&wait->entry))
return false;
+ if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
+ sbq = &hctx->tags->breserved_tags;
+ else
+ sbq = &hctx->tags->bitmap_tags;
wq = &bt_wait_ptr(sbq, hctx)->wait;
spin_lock_irq(&wq->lock);
@@ -1150,6 +1154,22 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
__add_wait_queue(wq, wait);
/*
+ * Add one explicit barrier since blk_mq_get_driver_tag() may
+ * not imply barrier in case of failure.
+ *
+ * Order adding us to wait queue and allocating driver tag.
+ *
+ * The pair is the one implied in sbitmap_queue_wake_up() which
+ * orders clearing sbitmap tag bits and waitqueue_active() in
+ * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
+ *
+ * Otherwise, re-order of adding wait queue and getting driver tag
+ * may cause __sbitmap_queue_wake_up() to wake up nothing because
+ * the waitqueue_active() may not observe us in wait queue.
+ */
+ smp_mb();
+
+ /*
* It's possible that a tag was freed in the window between the
* allocation failure and adding the hardware queue to the wait
* queue.
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index bd870f9ae458..43444934b985 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1374,6 +1374,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
+ rcu_read_lock();
/*
* Update has_rules[] flags for the updated tg's subtree. A tg is
* considered to have rules if either the tg itself or any of its
@@ -1401,6 +1402,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
this_tg->latency_target = max(this_tg->latency_target,
parent_tg->latency_target);
}
+ rcu_read_unlock();
/*
* We're already holding queue_lock and know @tg is valid. Let's
diff --git a/block/opal_proto.h b/block/opal_proto.h
index 5532412d567c..354b4e93bf63 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -65,6 +65,7 @@ enum opal_response_token {
#define SHORT_ATOM_BYTE 0xBF
#define MEDIUM_ATOM_BYTE 0xDF
#define LONG_ATOM_BYTE 0xE3
+#define EMPTY_ATOM_BYTE 0xFF
#define OPAL_INVAL_PARAM 12
#define OPAL_MANUFACTURED_INACTIVE 0x08
diff --git a/block/partition-generic.c b/block/partition-generic.c
index aee643ce13d1..e69452c1f5ad 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -272,6 +272,7 @@ void delete_partition(struct gendisk *disk, int partno)
struct disk_part_tbl *ptbl =
rcu_dereference_protected(disk->part_tbl, 1);
struct hd_struct *part;
+ struct block_device *bdev;
if (partno >= ptbl->len)
return;
@@ -292,6 +293,12 @@ void delete_partition(struct gendisk *disk, int partno)
* "in-use" until we really free the gendisk.
*/
blk_invalidate_devt(part_devt(part));
+
+ bdev = bdget(part_devt(part));
+ if (bdev) {
+ remove_inode_hash(bdev->bd_inode);
+ bdput(bdev);
+ }
hd_struct_kill(part);
}
diff --git a/block/partitions/amiga.c b/block/partitions/amiga.c
index 560936617d9c..484a51bce39b 100644
--- a/block/partitions/amiga.c
+++ b/block/partitions/amiga.c
@@ -11,11 +11,19 @@
#define pr_fmt(fmt) fmt
#include <linux/types.h>
+#include <linux/mm_types.h>
+#include <linux/overflow.h>
#include <linux/affs_hardblocks.h>
#include "check.h"
#include "amiga.h"
+/* magic offsets in partition DosEnvVec */
+#define NR_HD 3
+#define NR_SECT 5
+#define LO_CYL 9
+#define HI_CYL 10
+
static __inline__ u32
checksum_block(__be32 *m, int size)
{
@@ -32,8 +40,12 @@ int amiga_partition(struct parsed_partitions *state)
unsigned char *data;
struct RigidDiskBlock *rdb;
struct PartitionBlock *pb;
- int start_sect, nr_sects, blk, part, res = 0;
- int blksize = 1; /* Multiplier for disk block size */
+ u64 start_sect, nr_sects;
+ sector_t blk, end_sect;
+ u32 cylblk; /* rdb_CylBlocks = nr_heads*sect_per_track */
+ u32 nr_hd, nr_sect, lo_cyl, hi_cyl;
+ int part, res = 0;
+ unsigned int blksize = 1; /* Multiplier for disk block size */
int slot = 1;
char b[BDEVNAME_SIZE];
@@ -43,7 +55,7 @@ int amiga_partition(struct parsed_partitions *state)
data = read_part_sector(state, blk, &sect);
if (!data) {
if (warn_no_part)
- pr_err("Dev %s: unable to read RDB block %d\n",
+ pr_err("Dev %s: unable to read RDB block %llu\n",
bdevname(state->bdev, b), blk);
res = -1;
goto rdb_done;
@@ -60,12 +72,12 @@ int amiga_partition(struct parsed_partitions *state)
*(__be32 *)(data+0xdc) = 0;
if (checksum_block((__be32 *)data,
be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F)==0) {
- pr_err("Trashed word at 0xd0 in block %d ignored in checksum calculation\n",
+ pr_err("Trashed word at 0xd0 in block %llu ignored in checksum calculation\n",
blk);
break;
}
- pr_err("Dev %s: RDB in block %d has bad checksum\n",
+ pr_err("Dev %s: RDB in block %llu has bad checksum\n",
bdevname(state->bdev, b), blk);
}
@@ -81,12 +93,17 @@ int amiga_partition(struct parsed_partitions *state)
}
blk = be32_to_cpu(rdb->rdb_PartitionList);
put_dev_sector(sect);
- for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
- blk *= blksize; /* Read in terms partition table understands */
+ for (part = 1; (s32) blk>0 && part<=16; part++, put_dev_sector(sect)) {
+ /* Read in terms partition table understands */
+ if (check_mul_overflow(blk, (sector_t) blksize, &blk)) {
+ pr_err("Dev %s: overflow calculating partition block %llu! Skipping partitions %u and beyond\n",
+ bdevname(state->bdev, b), blk, part);
+ break;
+ }
data = read_part_sector(state, blk, &sect);
if (!data) {
if (warn_no_part)
- pr_err("Dev %s: unable to read partition block %d\n",
+ pr_err("Dev %s: unable to read partition block %llu\n",
bdevname(state->bdev, b), blk);
res = -1;
goto rdb_done;
@@ -98,19 +115,70 @@ int amiga_partition(struct parsed_partitions *state)
if (checksum_block((__be32 *)pb, be32_to_cpu(pb->pb_SummedLongs) & 0x7F) != 0 )
continue;
- /* Tell Kernel about it */
+ /* RDB gives us more than enough rope to hang ourselves with,
+ * many times over (2^128 bytes if all fields max out).
+ * Some careful checks are in order, so check for potential
+ * overflows.
+ * We are multiplying four 32 bit numbers to one sector_t!
+ */
+
+ nr_hd = be32_to_cpu(pb->pb_Environment[NR_HD]);
+ nr_sect = be32_to_cpu(pb->pb_Environment[NR_SECT]);
+
+ /* CylBlocks is total number of blocks per cylinder */
+ if (check_mul_overflow(nr_hd, nr_sect, &cylblk)) {
+ pr_err("Dev %s: heads*sects %u overflows u32, skipping partition!\n",
+ bdevname(state->bdev, b), cylblk);
+ continue;
+ }
+
+ /* check for consistency with RDB defined CylBlocks */
+ if (cylblk > be32_to_cpu(rdb->rdb_CylBlocks)) {
+ pr_warn("Dev %s: cylblk %u > rdb_CylBlocks %u!\n",
+ bdevname(state->bdev, b), cylblk,
+ be32_to_cpu(rdb->rdb_CylBlocks));
+ }
+
+ /* RDB allows for variable logical block size -
+ * normalize to 512 byte blocks and check result.
+ */
+
+ if (check_mul_overflow(cylblk, blksize, &cylblk)) {
+ pr_err("Dev %s: partition %u bytes per cyl. overflows u32, skipping partition!\n",
+ bdevname(state->bdev, b), part);
+ continue;
+ }
+
+ /* Calculate partition start and end. Limit of 32 bit on cylblk
+ * guarantees no overflow occurs if LBD support is enabled.
+ */
+
+ lo_cyl = be32_to_cpu(pb->pb_Environment[LO_CYL]);
+ start_sect = ((u64) lo_cyl * cylblk);
+
+ hi_cyl = be32_to_cpu(pb->pb_Environment[HI_CYL]);
+ nr_sects = (((u64) hi_cyl - lo_cyl + 1) * cylblk);
- nr_sects = (be32_to_cpu(pb->pb_Environment[10]) + 1 -
- be32_to_cpu(pb->pb_Environment[9])) *
- be32_to_cpu(pb->pb_Environment[3]) *
- be32_to_cpu(pb->pb_Environment[5]) *
- blksize;
if (!nr_sects)
continue;
- start_sect = be32_to_cpu(pb->pb_Environment[9]) *
- be32_to_cpu(pb->pb_Environment[3]) *
- be32_to_cpu(pb->pb_Environment[5]) *
- blksize;
+
+ /* Warn user if partition end overflows u32 (AmigaDOS limit) */
+
+ if ((start_sect + nr_sects) > UINT_MAX) {
+ pr_warn("Dev %s: partition %u (%llu-%llu) needs 64 bit device support!\n",
+ bdevname(state->bdev, b), part,
+ start_sect, start_sect + nr_sects);
+ }
+
+ if (check_add_overflow(start_sect, nr_sects, &end_sect)) {
+ pr_err("Dev %s: partition %u (%llu-%llu) needs LBD device support, skipping partition!\n",
+ bdevname(state->bdev, b), part,
+ start_sect, end_sect);
+ continue;
+ }
+
+ /* Tell Kernel about it */
+
put_partition(state,slot++,start_sect,nr_sects);
{
/* Be even more informative to aid mounting */
diff --git a/block/sed-opal.c b/block/sed-opal.c
index b4c761973ac1..1a0876251529 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -88,8 +88,8 @@ struct opal_dev {
u64 lowest_lba;
size_t pos;
- u8 cmd[IO_BUFFER_LENGTH];
- u8 resp[IO_BUFFER_LENGTH];
+ u8 *cmd;
+ u8 *resp;
struct parsed_resp parsed;
size_t prev_d_len;
@@ -893,16 +893,20 @@ static int response_parse(const u8 *buf, size_t length,
token_length = response_parse_medium(iter, pos);
else if (pos[0] <= LONG_ATOM_BYTE) /* long atom */
token_length = response_parse_long(iter, pos);
+ else if (pos[0] == EMPTY_ATOM_BYTE) /* empty atom */
+ token_length = 1;
else /* TOKEN */
token_length = response_parse_token(iter, pos);
if (token_length < 0)
return token_length;
+ if (pos[0] != EMPTY_ATOM_BYTE)
+ num_entries++;
+
pos += token_length;
total -= token_length;
iter++;
- num_entries++;
}
resp->num = num_entries;
@@ -2019,6 +2023,8 @@ void free_opal_dev(struct opal_dev *dev)
return;
clean_opal_dev(dev);
+ kfree(dev->resp);
+ kfree(dev->cmd);
kfree(dev);
}
EXPORT_SYMBOL(free_opal_dev);
@@ -2031,17 +2037,39 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
if (!dev)
return NULL;
+ /*
+ * Presumably DMA-able buffers must be cache-aligned. Kmalloc makes
+ * sure the allocated buffer is DMA-safe in that regard.
+ */
+ dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
+ if (!dev->cmd)
+ goto err_free_dev;
+
+ dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
+ if (!dev->resp)
+ goto err_free_cmd;
+
INIT_LIST_HEAD(&dev->unlk_lst);
mutex_init(&dev->dev_lock);
dev->data = data;
dev->send_recv = send_recv;
if (check_opal_support(dev) != 0) {
pr_debug("Opal is not supported on this device\n");
- kfree(dev);
- return NULL;
+ goto err_free_resp;
}
return dev;
+
+err_free_resp:
+ kfree(dev->resp);
+
+err_free_cmd:
+ kfree(dev->cmd);
+
+err_free_dev:
+ kfree(dev);
+
+ return NULL;
}
EXPORT_SYMBOL(init_opal_dev);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 4a2e91baabde..b4b80f50de91 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -259,6 +259,13 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
if (!type->setauthsize)
goto unlock;
err = type->setauthsize(ask->private, optlen);
+ break;
+ case ALG_SET_KEY_TYPE:
+ if (sock->state == SS_CONNECTED)
+ goto unlock;
+ if (!type->setkeytype)
+ goto unlock;
+ err = type->setkeytype(ask->private, optval, optlen);
}
unlock:
@@ -1029,9 +1036,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
void af_alg_free_resources(struct af_alg_async_req *areq)
{
struct sock *sk = areq->sk;
+ struct af_alg_ctx *ctx;
af_alg_free_areq_sgls(areq);
sock_kfree_s(sk, areq, areq->areqlen);
+
+ ctx = alg_sk(sk)->private;
+ ctx->inflight = false;
}
EXPORT_SYMBOL_GPL(af_alg_free_resources);
@@ -1095,11 +1106,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen)
{
- struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+ struct af_alg_ctx *ctx = alg_sk(sk)->private;
+ struct af_alg_async_req *areq;
+
+ /* Only one AIO request can be in flight. */
+ if (ctx->inflight)
+ return ERR_PTR(-EBUSY);
+ areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
if (unlikely(!areq))
return ERR_PTR(-ENOMEM);
+ ctx->inflight = true;
+
areq->areqlen = areqlen;
areq->sk = sk;
areq->last_rsgl = NULL;
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 7d5cf4939423..ceb0c2fb2b24 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -119,6 +119,12 @@ static int akcipher_default_op(struct akcipher_request *req)
return -ENOSYS;
}
+static int akcipher_default_set_key(struct crypto_akcipher *tfm,
+ const void *key, unsigned int keylen)
+{
+ return -ENOSYS;
+}
+
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
@@ -131,6 +137,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg)
alg->encrypt = akcipher_default_op;
if (!alg->decrypt)
alg->decrypt = akcipher_default_op;
+ if (!alg->set_priv_key)
+ alg->set_priv_key = akcipher_default_set_key;
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index fff52bc9d97d..afe148d0f72d 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -217,6 +217,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
+ !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 30069a92a9b2..80352e55829b 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -309,6 +309,12 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
return crypto_skcipher_setkey(private, key, keylen);
}
+static int skcipher_setkeytype(void *private, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto_skcipher_setkeytype(private, key, keylen);
+}
+
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
@@ -368,6 +374,7 @@ static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
+ .setkeytype = skcipher_setkeytype,
.accept = skcipher_accept_parent,
.accept_nokey = skcipher_accept_parent_nokey,
.ops = &algif_skcipher_ops,
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index ce49820caa97..01e54450c846 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
}
if (sinfo->msgdigest_len != sig->digest_size) {
- pr_debug("Sig %u: Invalid digest size (%u)\n",
- sinfo->index, sinfo->msgdigest_len);
+ pr_warn("Sig %u: Invalid digest size (%u)\n",
+ sinfo->index, sinfo->msgdigest_len);
ret = -EBADMSG;
goto error;
}
if (memcmp(sig->digest, sinfo->msgdigest,
sinfo->msgdigest_len) != 0) {
- pr_debug("Sig %u: Message digest doesn't match\n",
- sinfo->index);
+ pr_warn("Sig %u: Message digest doesn't match\n",
+ sinfo->index);
ret = -EKEYREJECTED;
goto error;
}
@@ -488,7 +488,7 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
const void *data, size_t datalen)
{
if (pkcs7->data) {
- pr_debug("Data already supplied\n");
+ pr_warn("Data already supplied\n");
return -EINVAL;
}
pkcs7->data = data;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index e5fae4e838c0..2d57b0b3f9a4 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -255,9 +255,10 @@ int public_key_verify_signature(const struct public_key *pkey,
struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
- struct scatterlist src_sg[2];
+ struct scatterlist src_sg;
char alg_name[CRYPTO_MAX_ALG_NAME];
- char *key, *ptr;
+ char *buf, *ptr;
+ size_t buf_len;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -281,28 +282,31 @@ int public_key_verify_signature(const struct public_key *pkey,
if (!req)
goto error_free_tfm;
- key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
- GFP_KERNEL);
- if (!key)
+ buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+ sig->s_size + sig->digest_size);
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
goto error_free_req;
- memcpy(key, pkey->key, pkey->keylen);
- ptr = key + pkey->keylen;
+ memcpy(buf, pkey->key, pkey->keylen);
+ ptr = buf + pkey->keylen;
ptr = pkey_pack_u32(ptr, pkey->algo);
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
+ ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
+ ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen);
if (ret)
- goto error_free_key;
+ goto error_free_buf;
- sg_init_table(src_sg, 2);
- sg_set_buf(&src_sg[0], sig->s, sig->s_size);
- sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
- akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
+ memcpy(buf, sig->s, sig->s_size);
+ memcpy(buf + sig->s_size, sig->digest, sig->digest_size);
+
+ sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size);
+ akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size,
sig->digest_size);
crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
@@ -310,8 +314,8 @@ int public_key_verify_signature(const struct public_key *pkey,
crypto_req_done, &cwait);
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
-error_free_key:
- kfree(key);
+error_free_buf:
+ kfree(buf);
error_free_req:
akcipher_request_free(req);
error_free_tfm:
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index cc9dbcecaaca..0701bb161b63 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
break;
default:
- pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic);
+ pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
return -ELIBBAD;
}
@@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
ctx->certs_size = ddir->certs.size;
if (!ddir->certs.virtual_address || !ddir->certs.size) {
- pr_debug("Unsigned PE binary\n");
+ pr_warn("Unsigned PE binary\n");
return -ENODATA;
}
@@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
unsigned len;
if (ctx->sig_len < sizeof(wrapper)) {
- pr_debug("Signature wrapper too short\n");
+ pr_warn("Signature wrapper too short\n");
return -ELIBBAD;
}
@@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
pr_debug("sig wrapper = { %x, %x, %x }\n",
wrapper.length, wrapper.revision, wrapper.cert_type);
- /* Both pesign and sbsign round up the length of certificate table
- * (in optional header data directories) to 8 byte alignment.
+ /* sbsign rounds up the length of certificate table (in optional
+ * header data directories) to 8 byte alignment. However, the PE
+ * specification states that while entries are 8-byte aligned, this is
+ * not included in their length, and as a result, pesign has not
+ * rounded up since 0.110.
*/
- if (round_up(wrapper.length, 8) != ctx->sig_len) {
- pr_debug("Signature wrapper len wrong\n");
+ if (wrapper.length > ctx->sig_len) {
+ pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
+ ctx->sig_len, wrapper.length);
return -ELIBBAD;
}
if (wrapper.revision != WIN_CERT_REVISION_2_0) {
- pr_debug("Signature is not revision 2.0\n");
+ pr_warn("Signature is not revision 2.0\n");
return -ENOTSUPP;
}
if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
- pr_debug("Signature certificate type is not PKCS\n");
+ pr_warn("Signature certificate type is not PKCS\n");
return -ENOTSUPP;
}
@@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
ctx->sig_offset += sizeof(wrapper);
ctx->sig_len -= sizeof(wrapper);
if (ctx->sig_len < 4) {
- pr_debug("Signature data missing\n");
+ pr_warn("Signature data missing\n");
return -EKEYREJECTED;
}
@@ -194,7 +198,7 @@ check_len:
return 0;
}
not_pkcs7:
- pr_debug("Signature data not PKCS#7\n");
+ pr_warn("Signature data not PKCS#7\n");
return -ELIBBAD;
}
@@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
digest_size = crypto_shash_digestsize(tfm);
if (digest_size != ctx->digest_len) {
- pr_debug("Digest size mismatch (%zx != %x)\n",
- digest_size, ctx->digest_len);
+ pr_warn("Digest size mismatch (%zx != %x)\n",
+ digest_size, ctx->digest_len);
ret = -EBADMSG;
goto error_no_desc;
}
@@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
* PKCS#7 certificate.
*/
if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
- pr_debug("Digest mismatch\n");
+ pr_warn("Digest mismatch\n");
ret = -EKEYREJECTED;
} else {
pr_debug("The digests match!\n");
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index d964cc82b69c..4711dace329e 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -129,6 +129,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
goto out;
+ if (cert->unsupported_sig) {
+ ret = 0;
+ goto out;
+ }
+
ret = public_key_verify_signature(cert->pub, cert->sig);
if (ret < 0) {
if (ret == -ENOPKG) {
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 48a33817de11..c051723d9cec 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -403,6 +403,14 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
return cipher->setkey(tfm, key, keylen);
}
+static int setkeytype(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
+
+ return cipher->setkeytype(tfm, key, keylen);
+}
+
static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
@@ -473,6 +481,7 @@ static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
unsigned long addr;
crt->setkey = setkey;
+ crt->setkeytype = setkeytype;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 9329d9dcc210..df80752fb649 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1515,6 +1515,14 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
return 0;
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
+ if (IS_ERR(drbg->jent)) {
+ const int err = PTR_ERR(drbg->jent);
+
+ drbg->jent = NULL;
+ if (fips_enabled)
+ return err;
+ pr_info("DRBG: Continuing without Jitter RNG\n");
+ }
return 0;
}
@@ -1570,14 +1578,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
if (ret)
goto free_everything;
- if (IS_ERR(drbg->jent)) {
- ret = PTR_ERR(drbg->jent);
- drbg->jent = NULL;
- if (fips_enabled || ret != -ENOENT)
- goto free_everything;
- pr_info("DRBG: Continuing without Jitter RNG\n");
- }
-
reseed = false;
}
diff --git a/crypto/essiv.c b/crypto/essiv.c
index a8befc8fb06e..aa6b89e91ac8 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -188,8 +188,12 @@ static void essiv_aead_done(struct crypto_async_request *areq, int err)
struct aead_request *req = areq->data;
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
- if (rctx->assoc)
- kfree(rctx->assoc);
+ if (err == -EINPROGRESS)
+ goto out;
+
+ kfree(rctx->assoc);
+
+out:
aead_request_complete(req, err);
}
@@ -265,7 +269,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
err = enc ? crypto_aead_encrypt(subreq) :
crypto_aead_decrypt(subreq);
- if (rctx->assoc && err != -EINPROGRESS)
+ if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY)
kfree(rctx->assoc);
return err;
}
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 276d2fd9e911..63e64164900e 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -118,6 +118,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
+ if (err == -EBUSY)
+ return -EAGAIN;
return err;
}
@@ -165,6 +167,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
+ if (err == -EBUSY)
+ return -EAGAIN;
return err;
}
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 9cbafaf6dd85..5b4b12b8a71a 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -213,16 +213,14 @@ static void pkcs1pad_encrypt_sign_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
if (err == -EINPROGRESS)
- return;
+ goto out;
+
+ err = pkcs1pad_encrypt_sign_complete(req, err);
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req,
- pkcs1pad_encrypt_sign_complete(req, err));
+out:
+ akcipher_request_complete(req, err);
}
static int pkcs1pad_encrypt(struct akcipher_request *req)
@@ -331,15 +329,14 @@ static void pkcs1pad_decrypt_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
if (err == -EINPROGRESS)
- return;
+ goto out;
+
+ err = pkcs1pad_decrypt_complete(req, err);
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
+out:
+ akcipher_request_complete(req, err);
}
static int pkcs1pad_decrypt(struct akcipher_request *req)
@@ -511,15 +508,14 @@ static void pkcs1pad_verify_complete_cb(
struct crypto_async_request *child_async_req, int err)
{
struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
if (err == -EINPROGRESS)
- return;
+ goto out;
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
+ err = pkcs1pad_verify_complete(req, err);
+
+out:
+ akcipher_request_complete(req, err);
}
/*
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 4d50750d01c6..ec849790f728 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -124,6 +124,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req);
struct scomp_scratch *scratch;
+ unsigned int dlen;
int ret;
if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
@@ -135,6 +136,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req->dlen = SCOMP_SCRATCH_SIZE;
+ dlen = req->dlen;
+
scratch = raw_cpu_ptr(&scomp_scratch);
spin_lock(&scratch->lock);
@@ -152,6 +155,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
ret = -ENOMEM;
goto out;
}
+ } else if (req->dlen > dlen) {
+ ret = -ENOSPC;
+ goto out;
}
scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
1);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 96d222c32acc..ae1d67d03625 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -25,7 +25,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
- if (err == -EINPROGRESS)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
if (err)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 22753c1c7202..91aa87c9fbd1 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -617,6 +617,23 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
return 0;
}
+static int skcipher_setkeytype_blkcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_blkcipher *blkcipher = *ctx;
+ int err;
+
+ crypto_blkcipher_clear_flags(blkcipher, ~0);
+ crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_blkcipher_setkeytype(blkcipher, key, keylen);
+ crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
+ CRYPTO_TFM_RES_MASK);
+
+ return err;
+}
+
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
int (*crypt)(struct blkcipher_desc *,
struct scatterlist *,
@@ -683,6 +700,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
tfm->exit = crypto_exit_skcipher_ops_blkcipher;
skcipher->setkey = skcipher_setkey_blkcipher;
+ skcipher->setkeytype = skcipher_setkeytype_blkcipher;
skcipher->encrypt = skcipher_encrypt_blkcipher;
skcipher->decrypt = skcipher_decrypt_blkcipher;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 0cece1f883eb..12dab10d36b2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1281,15 +1281,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
goto out_free_tfm;
}
-
- for (i = 0; i < num_mb; ++i)
- if (testmgr_alloc_buf(data[i].xbuf)) {
- while (i--)
- testmgr_free_buf(data[i].xbuf);
- goto out_free_tfm;
- }
-
-
for (i = 0; i < num_mb; ++i) {
data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index 91d0b0fc392b..5dc91aa0ed61 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -12,6 +12,7 @@
#include <linux/ratelimit.h>
#include <linux/edac.h>
#include <linux/ras.h>
+#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
@@ -140,8 +141,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
int cpu = mce->extcpu;
struct acpi_hest_generic_status *estatus, *tmp;
struct acpi_hest_generic_data *gdata;
- const guid_t *fru_id = &guid_null;
- char *fru_text = "";
+ const guid_t *fru_id;
+ char *fru_text;
guid_t *sec_type;
static u32 err_seq;
@@ -162,17 +163,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
/* log event via trace */
err_seq++;
- gdata = (struct acpi_hest_generic_data *)(tmp + 1);
- if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
- fru_id = (guid_t *)gdata->fru_id;
- if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
- fru_text = gdata->fru_text;
- sec_type = (guid_t *)gdata->section_type;
- if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
- struct cper_sec_mem_err *mem = (void *)(gdata + 1);
- if (gdata->error_data_length >= sizeof(*mem))
- trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
- (u8)gdata->error_severity);
+ apei_estatus_for_each_section(tmp, gdata) {
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+ fru_id = (guid_t *)gdata->fru_id;
+ else
+ fru_id = &guid_null;
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+ fru_text = gdata->fru_text;
+ else
+ fru_text = "";
+ sec_type = (guid_t *)gdata->section_type;
+ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ struct cper_sec_mem_err *mem = (void *)(gdata + 1);
+
+ if (gdata->error_data_length >= sizeof(*mem))
+ trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
+ (u8)gdata->error_severity);
+ }
}
out:
@@ -309,9 +316,10 @@ static void __exit extlog_exit(void)
{
edac_set_report_status(old_edac_report_status);
mce_unregister_decode_chain(&extlog_mce_dec);
- ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
- if (extlog_l1_addr)
+ if (extlog_l1_addr) {
+ ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
+ }
if (elog_addr)
acpi_os_unmap_iomem(elog_addr, elog_size);
release_mem_region(elog_base, elog_size);
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 433376e819bb..c79266b8029a 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -98,7 +98,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
struct acpi_lpit_native *lpit_native)
{
info->frequency = lpit_native->counter_frequency ?
- lpit_native->counter_frequency : tsc_khz * 1000;
+ lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
if (!info->frequency)
info->frequency = 1;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 751ed38f2a10..b939a6736d0b 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -401,6 +401,9 @@ static int register_device_clock(struct acpi_device *adev,
if (!lpss_clk_dev)
lpt_register_clock_device();
+ if (IS_ERR(lpss_clk_dev))
+ return PTR_ERR(lpss_clk_dev);
+
clk_data = platform_get_drvdata(lpss_clk_dev);
if (!clk_data)
return -ENODEV;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index c11d736a8db7..35de23ff50d6 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -88,7 +88,7 @@ static void round_robin_cpu(unsigned int tsk_index)
cpumask_var_t tmp;
int cpu;
unsigned long min_weight = -1;
- unsigned long uninitialized_var(preferred_cpu);
+ unsigned long preferred_cpu;
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 81cd47d29932..fd33fdbaffa9 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -498,6 +498,22 @@ static const struct dmi_system_id video_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
},
},
+ {
+ .callback = video_disable_backlight_sysfs_if,
+ .ident = "Toshiba Satellite Z830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Z830"),
+ },
+ },
+ {
+ .callback = video_disable_backlight_sysfs_if,
+ .ident = "Toshiba Portege Z830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE Z830"),
+ },
+ },
/*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme.
@@ -552,6 +568,15 @@ static const struct dmi_system_id video_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
},
},
+ {
+ .callback = video_set_report_key_events,
+ .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS),
+ .ident = "COLORFUL X15 AT 23",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "COLORFUL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X15 AT 23"),
+ },
+ },
/*
* Some machines change the brightness themselves when a brightness
* hotkey gets pressed, despite us telling them not to. In this case
@@ -1768,12 +1793,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
return;
count++;
- acpi_get_parent(device->dev->handle, &acpi_parent);
-
- pdev = acpi_get_pci_dev(acpi_parent);
- if (pdev) {
- parent = &pdev->dev;
- pci_dev_put(pdev);
+ if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
+ pdev = acpi_get_pci_dev(acpi_parent);
+ if (pdev) {
+ parent = &pdev->dev;
+ pci_dev_put(pdev);
+ }
}
memset(&props, 0, sizeof(struct backlight_properties));
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 59700433a96e..f919811156b1 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -3,7 +3,7 @@
# Makefile for ACPICA Core interpreter
#
-ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA
+ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 63fe30e86807..7f14403165dd 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -571,6 +571,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
object_info =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info));
+ if (!object_info)
+ return (AE_NO_MEMORY);
+
/* Walk the namespace from the root */
(void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index f59b4d944f7f..603483f8332b 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
status = AE_NO_MEMORY;
- goto cleanup;
+ goto pop_walk_state;
}
info->parameters = &this_walk_state->operands[0];
@@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
ACPI_FREE(info);
if (ACPI_FAILURE(status)) {
- goto cleanup;
+ goto pop_walk_state;
}
next_walk_state->method_nesting_depth =
@@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
+pop_walk_state:
+
+ /* On error, pop the walk state to be deleted from thread */
+
+ acpi_ds_pop_walk_state(thread);
+
cleanup:
/* On error, we must terminate the method properly */
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index de79f835a373..7979d52dfbc9 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ds_init_aml_walk);
walk_state->parser_state.aml =
- walk_state->parser_state.aml_start = aml_start;
- walk_state->parser_state.aml_end =
- walk_state->parser_state.pkg_end = aml_start + aml_length;
+ walk_state->parser_state.aml_start =
+ walk_state->parser_state.aml_end =
+ walk_state->parser_state.pkg_end = aml_start;
+ /* Avoid undefined behavior: applying zero offset to null pointer */
+ if (aml_length != 0) {
+ walk_state->parser_state.aml_end += aml_length;
+ walk_state->parser_state.pkg_end += aml_length;
+ }
/* The next_op of the next_walk will be the beginning of the method */
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index cd576153257c..f1495b88bf13 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
*
* The table is used to implement the Microsoft port access rules that
* first appeared in Windows XP. Some ports are always illegal, and some
- * ports are only illegal if the BIOS calls _OSI with a win_XP string or
- * later (meaning that the BIOS itelf is post-XP.)
+ * ports are only illegal if the BIOS calls _OSI with nothing newer than
+ * the specific _OSI strings.
*
* This provides ACPICA with the desired port protections and
* Microsoft compatibility.
@@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
- if (acpi_gbl_osi_data >= port_info->osi_dependency) {
+ if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL ||
+ acpi_gbl_osi_data == port_info->osi_dependency) {
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
"Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index be86fea8e4d4..57e6488f6933 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
* Try to fix if there was no return object. Warning if failed to fix.
*/
if (!return_object) {
- if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
- if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ if (expected_btypes) {
+ if (!(expected_btypes & ACPI_RTYPE_NONE) &&
+ package_index != ACPI_NOT_PACKAGE_ELEMENT) {
ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname,
ACPI_WARN_ALWAYS,
@@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
if (ACPI_SUCCESS(status)) {
return (AE_OK); /* Repair was successful */
}
- } else {
+ }
+
+ if (expected_btypes != ACPI_RTYPE_NONE) {
ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname,
ACPI_WARN_ALWAYS,
"Missing expected return value"));
+ return (AE_AML_NO_RETURN_VALUE);
}
-
- return (AE_AML_NO_RETURN_VALUE);
}
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 43775c5ce17c..2f9b226ec4f6 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
- AML_FLAGS_EXEC_0A_0T_1R),
+ AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* ACPI 5.0 opcodes */
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 1fb8327f3c3b..9c0b94d1c4ba 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
status = acpi_ut_walk_package_tree(source_obj, dest_obj,
acpi_ut_copy_ielement_to_ielement,
walk_state);
- if (ACPI_FAILURE(status)) {
-
- /* On failure, delete the destination package object */
-
- acpi_ut_remove_reference(dest_obj);
- }
-
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 76b7539a37a9..a06f35528c9a 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -29,16 +29,26 @@
#undef pr_fmt
#define pr_fmt(fmt) "BERT: " fmt
+
+#define ACPI_BERT_PRINT_MAX_RECORDS 5
#define ACPI_BERT_PRINT_MAX_LEN 1024
static int bert_disable;
+/*
+ * Print "all" the error records in the BERT table, but avoid huge spam to
+ * the console if the BIOS included oversize records, or too many records.
+ * Skipping some records here does not lose anything because the full
+ * data is available to user tools in:
+ * /sys/firmware/acpi/tables/data/BERT
+ */
static void __init bert_print_all(struct acpi_bert_region *region,
unsigned int region_len)
{
struct acpi_hest_generic_status *estatus =
(struct acpi_hest_generic_status *)region;
int remain = region_len;
+ int printed = 0, skipped = 0;
u32 estatus_len;
while (remain >= sizeof(struct acpi_bert_region)) {
@@ -46,24 +56,26 @@ static void __init bert_print_all(struct acpi_bert_region *region,
if (remain < estatus_len) {
pr_err(FW_BUG "Truncated status block (length: %u).\n",
estatus_len);
- return;
+ break;
}
/* No more error records. */
if (!estatus->block_status)
- return;
+ break;
if (cper_estatus_check(estatus)) {
pr_err(FW_BUG "Invalid error record.\n");
- return;
+ break;
}
- pr_info_once("Error records from previous boot:\n");
- if (region_len < ACPI_BERT_PRINT_MAX_LEN)
+ if (estatus_len < ACPI_BERT_PRINT_MAX_LEN &&
+ printed < ACPI_BERT_PRINT_MAX_RECORDS) {
+ pr_info_once("Error records from previous boot:\n");
cper_estatus_print(KERN_INFO HW_ERR, estatus);
- else
- pr_info_once("Max print length exceeded, table data is available at:\n"
- "/sys/firmware/acpi/tables/data/BERT");
+ printed++;
+ } else {
+ skipped++;
+ }
/*
* Because the boot error source is "one-time polled" type,
@@ -75,6 +87,9 @@ static void __init bert_print_all(struct acpi_bert_region *region,
estatus = (void *)estatus + estatus_len;
remain -= estatus_len;
}
+
+ if (skipped)
+ pr_info(HW_ERR "Skipped %d error records\n", skipped);
}
static int __init setup_bert_disable(char *str)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 553c89b0bdcb..13ae2ce9f50d 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -503,7 +503,6 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
node = iort_get_iort_node(dev->fwnode);
if (node)
return node;
-
/*
* if not, then it should be a platform device defined in
* DSDT/SSDT (with Named Component node in IORT)
@@ -522,22 +521,22 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
}
/**
- * iort_msi_map_rid() - Map a MSI requester ID for a device
+ * iort_msi_map_id() - Map a MSI input ID for a device
* @dev: The device for which the mapping is to be done.
- * @req_id: The device requester ID.
+ * @input_id: The device input ID.
*
- * Returns: mapped MSI RID on success, input requester ID otherwise
+ * Returns: mapped MSI ID on success, input ID otherwise
*/
-u32 iort_msi_map_rid(struct device *dev, u32 req_id)
+u32 iort_msi_map_id(struct device *dev, u32 input_id)
{
struct acpi_iort_node *node;
u32 dev_id;
node = iort_find_dev_node(dev);
if (!node)
- return req_id;
+ return input_id;
- iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
+ iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
return dev_id;
}
@@ -594,13 +593,13 @@ static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
/**
* iort_dev_find_its_id() - Find the ITS identifier for a device
* @dev: The device.
- * @req_id: Device's requester ID
+ * @id: Device's ID
* @idx: Index of the ITS identifier list.
* @its_id: ITS identifier.
*
* Returns: 0 on success, appropriate error value otherwise
*/
-static int iort_dev_find_its_id(struct device *dev, u32 req_id,
+static int iort_dev_find_its_id(struct device *dev, u32 id,
unsigned int idx, int *its_id)
{
struct acpi_iort_its_group *its;
@@ -610,7 +609,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
if (!node)
return -ENXIO;
- node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
+ node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
if (!node)
return -ENXIO;
@@ -633,19 +632,20 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
*
* Returns: the MSI domain for this device, NULL otherwise
*/
-struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
+struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
+ enum irq_domain_bus_token bus_token)
{
struct fwnode_handle *handle;
int its_id;
- if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
+ if (iort_dev_find_its_id(dev, id, 0, &its_id))
return NULL;
handle = iort_find_domain_token(its_id);
if (!handle)
return NULL;
- return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
+ return irq_find_matching_fwnode(handle, bus_token);
}
static void iort_set_device_domain(struct device *dev,
@@ -1393,7 +1393,10 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip08 Platform */
{"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
- "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
+ "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
+ /* HiSilicon Hip09 Platform */
+ {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{ }
};
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 974c2df13da1..a49a09e3de1b 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -465,7 +465,7 @@ static int extract_package(struct acpi_battery *battery,
u8 *ptr = (u8 *)battery + offsets[i].offset;
if (element->type == ACPI_TYPE_STRING ||
element->type == ACPI_TYPE_BUFFER)
- strncpy(ptr, element->string.pointer, 32);
+ strscpy(ptr, element->string.pointer, 32);
else if (element->type == ACPI_TYPE_INTEGER) {
strncpy(ptr, (u8 *)&element->integer.value,
sizeof(u64));
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 0521b1d4c2fd..d24cbba14e91 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -626,33 +626,6 @@ int pcc_data_alloc(int pcc_ss_id)
return 0;
}
-/* Check if CPPC revision + num_ent combination is supported */
-static bool is_cppc_supported(int revision, int num_ent)
-{
- int expected_num_ent;
-
- switch (revision) {
- case CPPC_V2_REV:
- expected_num_ent = CPPC_V2_NUM_ENT;
- break;
- case CPPC_V3_REV:
- expected_num_ent = CPPC_V3_NUM_ENT;
- break;
- default:
- pr_debug("Firmware exports unsupported CPPC revision: %d\n",
- revision);
- return false;
- }
-
- if (expected_num_ent != num_ent) {
- pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
- num_ent, expected_num_ent, revision);
- return false;
- }
-
- return true;
-}
-
/*
* An example CPC table looks like the following.
*
@@ -748,7 +721,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
cpc_obj->type);
goto out_free;
}
- cpc_ptr->num_entries = num_ent;
/* Second entry should be revision. */
cpc_obj = &out_obj->package.elements[1];
@@ -759,10 +731,32 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
cpc_obj->type);
goto out_free;
}
- cpc_ptr->version = cpc_rev;
- if (!is_cppc_supported(cpc_rev, num_ent))
+ if (cpc_rev < CPPC_V2_REV) {
+ pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
+ pr->id);
goto out_free;
+ }
+
+ /*
+ * Disregard _CPC if the number of entries in the return pachage is not
+ * as expected, but support future revisions being proper supersets of
+ * the v3 and only causing more entries to be returned by _CPC.
+ */
+ if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
+ (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
+ (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
+ pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
+ num_ent, pr->id);
+ goto out_free;
+ }
+ if (cpc_rev > CPPC_V3_REV) {
+ num_ent = CPPC_V3_NUM_ENT;
+ cpc_rev = CPPC_V3_REV;
+ }
+
+ cpc_ptr->num_entries = num_ent;
+ cpc_ptr->version = cpc_rev;
/* Iterate through remaining entries in _CPC */
for (i = 2; i < num_ent; i++) {
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index fe8c7e79f472..566067a855a1 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -156,8 +156,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
return 0;
len = snprintf(modalias, size, "acpi:");
- if (len <= 0)
- return len;
+ if (len >= size)
+ return -ENOMEM;
size -= len;
@@ -210,8 +210,10 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
ACPI_FREE(buf.pointer);
- if (len <= 0)
- return len;
+ if (len >= size)
+ return -ENOMEM;
+
+ size -= len;
of_compatible = acpi_dev->data.of_compatible;
if (of_compatible->type == ACPI_TYPE_PACKAGE) {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e5b92958c299..c7baccd47b89 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1118,6 +1118,7 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
acpi_ec_remove_query_handlers(ec, false, query_bit);
+ flush_workqueue(ec_query_wq);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
@@ -2119,13 +2120,6 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
},
},
{
- .ident = "ThinkPad X1 Carbon 6th",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
- },
- },
- {
.ident = "ThinkPad X1 Yoga 3rd",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index e209081d644b..6a9490ad78ce 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -52,6 +52,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
int polarity)
{
struct irq_fwspec fwspec;
+ unsigned int irq;
if (WARN_ON(!acpi_gsi_domain_id)) {
pr_warn("GSI: No registered irqchip, giving up\n");
@@ -63,7 +64,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
fwspec.param_count = 2;
- return irq_create_fwspec_mapping(&fwspec);
+ irq = irq_create_fwspec_mapping(&fwspec);
+ if (!irq)
+ return -EINVAL;
+
+ return irq;
}
EXPORT_SYMBOL_GPL(acpi_register_gsi);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 0fe4f3ed72ca..793b8d9d749a 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3599,8 +3599,8 @@ void acpi_nfit_shutdown(void *data)
mutex_lock(&acpi_desc->init_mutex);
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
- cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
+ cancel_delayed_work_sync(&acpi_desc->dwork);
/*
* Bounce the nvdimm bus lock to make sure any in-flight
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 47e43c949825..ed2f880b63b5 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -41,6 +41,8 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#ifdef CONFIG_ARM64
+
#define AL_ECAM(table_id, rev, seg, ops) \
{ "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
@@ -162,6 +164,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
ALTRA_ECAM_QUIRK(1, 13),
ALTRA_ECAM_QUIRK(1, 14),
ALTRA_ECAM_QUIRK(1, 15),
+#endif /* ARM64 */
};
static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0ea5e677f00e..0aca77cb8301 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1531,6 +1531,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
acpi_processor_registered--;
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
+
+ kfree(dev);
}
pr->flags.power_setup_done = 0;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 5909e8fa4013..6937aedc0a03 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -56,6 +56,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long ppc = 0;
+ s32 qos_value;
+ int index;
int ret;
if (!pr)
@@ -75,17 +77,30 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
return -ENODEV;
}
+ index = ppc;
+
+ if (pr->performance_platform_limit == index ||
+ ppc >= pr->performance->state_count)
+ return 0;
+
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
- (int)ppc, ppc ? "" : "not");
+ index, index ? "is" : "is not");
- pr->performance_platform_limit = (int)ppc;
+ pr->performance_platform_limit = index;
- if (ppc >= pr->performance->state_count ||
- unlikely(!freq_qos_request_active(&pr->perflib_req)))
+ if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
- ret = freq_qos_update_request(&pr->perflib_req,
- pr->performance->states[ppc].core_frequency * 1000);
+ /*
+ * If _PPC returns 0, it means that all of the available states can be
+ * used ("no limit").
+ */
+ if (index == 0)
+ qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ else
+ qos_value = pr->performance->states[index].core_frequency * 1000;
+
+ ret = freq_qos_update_request(&pr->perflib_req, qos_value);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
pr->id, ret);
@@ -168,9 +183,16 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (!pr)
continue;
+ /*
+ * Reset performance_platform_limit in case there is a stale
+ * value in it, so as to make it match the "no limit" QoS value
+ * below.
+ */
+ pr->performance_platform_limit = 0;
+
ret = freq_qos_add_request(&policy->constraints,
- &pr->perflib_req,
- FREQ_QOS_MAX, INT_MAX);
+ &pr->perflib_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 41feb88ee92d..458b4d99fb4e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -150,7 +150,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
if (pr)
freq_qos_remove_request(&pr->thermal_req);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 1b0aeb832044..5906e247b9fa 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -152,10 +152,10 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
return acpi_nondev_subnode_data_ok(handle, link, list, parent);
}
-static int acpi_add_nondev_subnodes(acpi_handle scope,
- const union acpi_object *links,
- struct list_head *list,
- struct fwnode_handle *parent)
+static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ const union acpi_object *links,
+ struct list_head *list,
+ struct fwnode_handle *parent)
{
bool ret = false;
int i;
@@ -646,6 +646,7 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
* @index: Index of the reference to return
* @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments
+ * (may be NULL)
*
* Find property with @name, verifify that it is a package containing at least
* one object reference and if so, store the ACPI device object pointer to the
@@ -704,6 +705,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret)
return ret == -ENODEV ? -EINVAL : ret;
+ if (!args)
+ return 0;
+
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 48ca9a844f06..ca51d1558111 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -16,6 +16,7 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/dmi.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -380,21 +381,143 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity)
}
EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type);
-static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
+static const struct dmi_system_id medion_laptop[] = {
+ {
+ .ident = "MEDION P15651",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_BOARD_NAME, "M15T"),
+ },
+ },
+ { }
+};
+
+static const struct dmi_system_id asus_laptop[] = {
+ {
+ .ident = "Asus Vivobook K3402ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"),
+ },
+ },
+ {
+ .ident = "Asus Vivobook K3502ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"),
+ },
+ },
+ {
+ .ident = "Asus Vivobook S5402ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"),
+ },
+ },
+ {
+ .ident = "Asus Vivobook S5602ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ },
+ },
+ {
+ .ident = "Asus ExpertBook B1402CBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
+ },
+ },
+ {
+ .ident = "Asus ExpertBook B1502CBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
+ },
+ },
+ {
+ .ident = "Asus ExpertBook B2402CBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
+ },
+ },
+ {
+ /* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
+ },
+ },
+ {
+ /* Asus ExpertBook B1402CVA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
+ },
+ },
+ {
+ /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
+ },
+ },
+ {
+ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
+ },
+ },
+ {
+ .ident = "Asus ExpertBook B2502",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
+ },
+ },
+ { }
+};
+
+struct irq_override_cmp {
+ const struct dmi_system_id *system;
+ unsigned char irq;
+ unsigned char triggering;
+ unsigned char polarity;
+ unsigned char shareable;
+};
+
+static const struct irq_override_cmp skip_override_table[] = {
+ { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
+ { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
+};
+
+static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+ u8 shareable)
{
- res->start = gsi;
- res->end = gsi;
- res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
+ const struct irq_override_cmp *entry = &skip_override_table[i];
+
+ if (dmi_check_system(entry->system) &&
+ entry->irq == gsi &&
+ entry->triggering == triggering &&
+ entry->polarity == polarity &&
+ entry->shareable == shareable)
+ return false;
+ }
+
+ return true;
}
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 triggering, u8 polarity, u8 shareable,
- bool legacy)
+ bool check_override)
{
int irq, p, t;
if (!valid_IRQ(gsi)) {
- acpi_dev_irqresource_disabled(res, gsi);
+ irqresource_disabled(res, gsi);
return;
}
@@ -408,7 +531,9 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
* using extended IRQ descriptors we take the IRQ configuration
* from _CRS directly.
*/
- if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
+ if (check_override &&
+ acpi_dev_irq_override(gsi, triggering, polarity, shareable) &&
+ !acpi_get_override_irq(gsi, &t, &p)) {
u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
@@ -426,7 +551,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
res->start = irq;
res->end = irq;
} else {
- acpi_dev_irqresource_disabled(res, gsi);
+ irqresource_disabled(res, gsi);
}
}
@@ -463,7 +588,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
*/
irq = &ares->data.irq;
if (index >= irq->interrupt_count) {
- acpi_dev_irqresource_disabled(res, 0);
+ irqresource_disabled(res, 0);
return false;
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
@@ -473,7 +598,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
if (index >= ext_irq->interrupt_count) {
- acpi_dev_irqresource_disabled(res, 0);
+ irqresource_disabled(res, 0);
return false;
}
if (is_gsi(ext_irq))
@@ -481,7 +606,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
ext_irq->triggering, ext_irq->polarity,
ext_irq->shareable, false);
else
- acpi_dev_irqresource_disabled(res, 0);
+ irqresource_disabled(res, 0);
break;
default:
res->flags = 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5d4be80ee6cb..629e6044f623 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -322,18 +322,14 @@ static int acpi_scan_device_check(struct acpi_device *adev)
* again).
*/
if (adev->handler) {
- dev_warn(&adev->dev, "Already enumerated\n");
- return -EALREADY;
+ dev_dbg(&adev->dev, "Already enumerated\n");
+ return 0;
}
error = acpi_bus_scan(adev->handle);
if (error) {
dev_warn(&adev->dev, "Namespace scan failure\n");
return error;
}
- if (!adev->handler) {
- dev_warn(&adev->dev, "Enumeration failure\n");
- error = -ENODEV;
- }
} else {
error = acpi_scan_device_not_present(adev);
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 34966128293b..b9d203569ac1 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -361,6 +361,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Lenovo G40-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
+ },
+ },
/*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 383c7029d3ce..e9c16b7ddd8b 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1153,8 +1153,6 @@ static int acpi_thermal_resume(struct device *dev)
return -EINVAL;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (!(&tz->trips.active[i]))
- break;
if (!tz->trips.active[i].flags.valid)
break;
tz->trips.active[i].flags.enabled = 1;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index de4142723ff4..be9c70806b62 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -311,12 +311,21 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ /* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */
+ .callback = video_detect_force_native,
+ /* Lenovo Ideapad Z470 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */
.callback = video_detect_force_native,
.ident = "Lenovo Ideapad Z570",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
},
},
{
@@ -387,7 +396,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
.callback = video_detect_force_native,
.ident = "Clevo NL5xRU",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
},
@@ -395,59 +403,139 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
.callback = video_detect_force_native,
.ident = "Clevo NL5xRU",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
},
{
.callback = video_detect_force_native,
.ident = "Clevo NL5xRU",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
+ .ident = "Clevo NL5xNU",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ },
+ /*
+ * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10,
+ * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo
+ * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description
+ * above.
+ */
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF5PU1G",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF4NU1F",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF4NU1F",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
+ DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
+ .ident = "TongFang PF5NU1G",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF5NU1G",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
+ DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
+ .ident = "TongFang PF5LUXG",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
+ },
+ },
+ /*
+ * More Tongfang devices with the same issue as the Clevo NL5xRU and
+ * NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above.
+ */
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GKxNRxx",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
+ .ident = "TongFang GKxNRxx",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
+ .ident = "TongFang GKxNRxx",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GKxNRxx",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GMxNGxx",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GMxZGxx",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GMxRGxx",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
},
},
-
/*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 702284bcd467..252b0b43d50e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -364,6 +364,7 @@ static void amba_device_release(struct device *dev)
{
struct amba_device *d = to_amba_device(dev);
+ of_node_put(d->dev.of_node);
if (d->res.parent)
release_resource(&d->res);
kfree(d);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b9fb2a926944..07203fef29cb 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -840,6 +840,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
{
WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
+
+ /* (e)poll-based threads require an explicit wakeup signal when
+ * queuing their own work; they rely on these events to consume
+ * messages without I/O block. Without it, threads risk waiting
+ * indefinitely without handling the work.
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ thread->pid == current->pid && !thread->process_todo)
+ wake_up_interruptible_sync(&thread->wait);
+
thread->process_todo = true;
}
@@ -1748,6 +1758,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
+ if (ret && ref == new_ref) {
+ /*
+ * Cleanup the failed reference here as the target
+ * could now be dead and have already released its
+ * references by now. Calling on the new reference
+ * with strong=0 and a tmp_refs will not decrement
+ * the node. The new_ref gets kfree'd below.
+ */
+ binder_cleanup_ref_olocked(new_ref);
+ ref = NULL;
+ }
+
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
@@ -2001,15 +2023,21 @@ static void binder_cleanup_transaction(struct binder_transaction *t,
/**
* binder_get_object() - gets object and checks for valid metadata
* @proc: binder_proc owning the buffer
+ * @u: sender's user pointer to base of buffer
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the @buffer at which to validate an object.
* @object: struct binder_object to read into
*
- * Return: If there's a valid metadata object at @offset in @buffer, the
+ * Copy the binder object at the given offset into @object. If @u is
+ * provided then the copy is from the sender's buffer. If not, then
+ * it is copied from the target's @buffer.
+ *
+ * Return: If there's a valid metadata object at @offset, the
* size of that object. Otherwise, it returns zero. The object
* is read into the struct binder_object pointed to by @object.
*/
static size_t binder_get_object(struct binder_proc *proc,
+ const void __user *u,
struct binder_buffer *buffer,
unsigned long offset,
struct binder_object *object)
@@ -2019,10 +2047,16 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
- offset, read_size))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr))
return 0;
+ if (u) {
+ if (copy_from_user(object, u + offset, read_size))
+ return 0;
+ } else {
+ if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+ offset, read_size))
+ return 0;
+ }
/* Ok, now see if we read a complete object. */
hdr = &object->hdr;
@@ -2095,7 +2129,7 @@ static struct binder_buffer_object *binder_validate_ptr(
b, buffer_offset,
sizeof(object_offset)))
return NULL;
- object_size = binder_get_object(proc, b, object_offset, object);
+ object_size = binder_get_object(proc, NULL, b, object_offset, object);
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
return NULL;
if (object_offsetp)
@@ -2160,7 +2194,8 @@ static bool binder_validate_fixup(struct binder_proc *proc,
unsigned long buffer_offset;
struct binder_object last_object;
struct binder_buffer_object *last_bbo;
- size_t object_size = binder_get_object(proc, b, last_obj_offset,
+ size_t object_size = binder_get_object(proc, NULL, b,
+ last_obj_offset,
&last_object);
if (object_size != sizeof(*last_bbo))
return false;
@@ -2245,24 +2280,23 @@ static void binder_deferred_fd_close(int fd)
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer,
- binder_size_t failed_at,
+ binder_size_t off_end_offset,
bool is_failure)
{
int debug_id = buffer->debug_id;
- binder_size_t off_start_offset, buffer_offset, off_end_offset;
+ binder_size_t off_start_offset, buffer_offset;
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size,
- (unsigned long long)failed_at);
+ (unsigned long long)off_end_offset);
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
- off_end_offset = is_failure && failed_at ? failed_at :
- off_start_offset + buffer->offsets_size;
+
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
@@ -2273,7 +2307,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
buffer, buffer_offset,
sizeof(object_offset)))
- object_size = binder_get_object(proc, buffer,
+ object_size = binder_get_object(proc, NULL, buffer,
object_offset, &object);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
@@ -2422,6 +2456,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
}
}
+/* Clean up all the objects in the buffer */
+static inline void binder_release_entire_buffer(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_buffer *buffer,
+ bool is_failure)
+{
+ binder_size_t off_end_offset;
+
+ off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
+ off_end_offset += buffer->offsets_size;
+
+ binder_transaction_buffer_release(proc, thread, buffer,
+ off_end_offset, is_failure);
+}
+
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
@@ -2611,16 +2660,266 @@ err_fd_not_accepted:
return ret;
}
-static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+/**
+ * struct binder_ptr_fixup - data to be fixed-up in target buffer
+ * @offset offset in target buffer to fixup
+ * @skip_size bytes to skip in copy (fixup will be written later)
+ * @fixup_data data to write at fixup offset
+ * @node list node
+ *
+ * This is used for the pointer fixup list (pf) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_ptr_fixup {
+ binder_size_t offset;
+ size_t skip_size;
+ binder_uintptr_t fixup_data;
+ struct list_head node;
+};
+
+/**
+ * struct binder_sg_copy - scatter-gather data to be copied
+ * @offset offset in target buffer
+ * @sender_uaddr user address in source buffer
+ * @length bytes to copy
+ * @node list node
+ *
+ * This is used for the sg copy list (sgc) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_sg_copy {
+ binder_size_t offset;
+ const void __user *sender_uaddr;
+ size_t length;
+ struct list_head node;
+};
+
+/**
+ * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
+ * @alloc: binder_alloc associated with @buffer
+ * @buffer: binder buffer in target process
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Processes all elements of @sgc_head, applying fixups from @pf_head
+ * and copying the scatter-gather data from the source process' user
+ * buffer to the target's buffer. It is expected that the list creation
+ * and processing all occurs during binder_transaction() so these lists
+ * are only accessed in local context.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ int ret = 0;
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *tmppf;
+ struct binder_ptr_fixup *pf =
+ list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
+ node);
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < sgc->length) {
+ size_t copy_size;
+ size_t bytes_left = sgc->length - bytes_copied;
+ size_t offset = sgc->offset + bytes_copied;
+
+ /*
+ * We copy up to the fixup (pointed to by pf)
+ */
+ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
+ : bytes_left;
+ if (!ret && copy_size)
+ ret = binder_alloc_copy_user_to_buffer(
+ alloc, buffer,
+ offset,
+ sgc->sender_uaddr + bytes_copied,
+ copy_size);
+ bytes_copied += copy_size;
+ if (copy_size != bytes_left) {
+ BUG_ON(!pf);
+ /* we stopped at a fixup offset */
+ if (pf->skip_size) {
+ /*
+ * we are just skipping. This is for
+ * BINDER_TYPE_FDA where the translated
+ * fds will be fixed up when we get
+ * to target context.
+ */
+ bytes_copied += pf->skip_size;
+ } else {
+ /* apply the fixup indicated by pf */
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(
+ alloc, buffer,
+ pf->offset,
+ &pf->fixup_data,
+ sizeof(pf->fixup_data));
+ bytes_copied += sizeof(pf->fixup_data);
+ }
+ list_del(&pf->node);
+ kfree(pf);
+ pf = list_first_entry_or_null(pf_head,
+ struct binder_ptr_fixup, node);
+ }
+ }
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ BUG_ON(pf->skip_size == 0);
+ list_del(&pf->node);
+ kfree(pf);
+ }
+ BUG_ON(!list_empty(sgc_head));
+
+ return ret > 0 ? -EINVAL : ret;
+}
+
+/**
+ * binder_cleanup_deferred_txn_lists() - free specified lists
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Called to clean up @sgc_head and @pf_head if there is an
+ * error.
+ */
+static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *pf, *tmppf;
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ list_del(&pf->node);
+ kfree(pf);
+ }
+}
+
+/**
+ * binder_defer_copy() - queue a scatter-gather buffer for copy
+ * @sgc_head: list_head of scatter-gather copy list
+ * @offset: binder buffer offset in target process
+ * @sender_uaddr: user address in source process
+ * @length: bytes to copy
+ *
+ * Specify a scatter-gather block to be copied. The actual copy must
+ * be deferred until all the needed fixups are identified and queued.
+ * Then the copy and fixups are done together so un-translated values
+ * from the source are never visible in the target buffer.
+ *
+ * We are guaranteed that repeated calls to this function will have
+ * monotonically increasing @offset values so the list will naturally
+ * be ordered.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
+ const void __user *sender_uaddr, size_t length)
+{
+ struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+
+ if (!bc)
+ return -ENOMEM;
+
+ bc->offset = offset;
+ bc->sender_uaddr = sender_uaddr;
+ bc->length = length;
+ INIT_LIST_HEAD(&bc->node);
+
+ /*
+ * We are guaranteed that the deferred copies are in-order
+ * so just add to the tail.
+ */
+ list_add_tail(&bc->node, sgc_head);
+
+ return 0;
+}
+
+/**
+ * binder_add_fixup() - queue a fixup to be applied to sg copy
+ * @pf_head: list_head of binder ptr fixup list
+ * @offset: binder buffer offset in target process
+ * @fixup: bytes to be copied for fixup
+ * @skip_size: bytes to skip when copying (fixup will be applied later)
+ *
+ * Add the specified fixup to a list ordered by @offset. When copying
+ * the scatter-gather buffers, the fixup will be copied instead of
+ * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
+ * will be applied later (in target process context), so we just skip
+ * the bytes specified by @skip_size. If @skip_size is 0, we copy the
+ * value in @fixup.
+ *
+ * This function is called *mostly* in @offset order, but there are
+ * exceptions. Since out-of-order inserts are relatively uncommon,
+ * we insert the new element by searching backward from the tail of
+ * the list.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
+ binder_uintptr_t fixup, size_t skip_size)
+{
+ struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ struct binder_ptr_fixup *tmppf;
+
+ if (!pf)
+ return -ENOMEM;
+
+ pf->offset = offset;
+ pf->fixup_data = fixup;
+ pf->skip_size = skip_size;
+ INIT_LIST_HEAD(&pf->node);
+
+ /* Fixups are *mostly* added in-order, but there are some
+ * exceptions. Look backwards through list for insertion point.
+ */
+ list_for_each_entry_reverse(tmppf, pf_head, node) {
+ if (tmppf->offset < pf->offset) {
+ list_add(&pf->node, &tmppf->node);
+ return 0;
+ }
+ }
+ /*
+ * if we get here, then the new offset is the lowest so
+ * insert at the head
+ */
+ list_add(&pf->node, pf_head);
+ return 0;
+}
+
+static int binder_translate_fd_array(struct list_head *pf_head,
+ struct binder_fd_array_object *fda,
+ const void __user *sender_ubuffer,
struct binder_buffer_object *parent,
+ struct binder_buffer_object *sender_uparent,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
binder_size_t fdi, fd_buf_size;
binder_size_t fda_offset;
+ const void __user *sender_ufda_base;
struct binder_proc *proc = thread->proc;
- struct binder_proc *target_proc = t->to_proc;
+ int ret;
+
+ if (fda->num_fds == 0)
+ return 0;
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -2644,19 +2943,25 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
*/
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
fda->parent_offset;
- if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
+ sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
+ fda->parent_offset;
+
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
+ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
return -EINVAL;
}
+ ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
+ if (ret)
+ return ret;
+
for (fdi = 0; fdi < fda->num_fds; fdi++) {
u32 fd;
- int ret;
binder_size_t offset = fda_offset + fdi * sizeof(fd);
+ binder_size_t sender_uoffset = fdi * sizeof(fd);
- ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
- &fd, t->buffer,
- offset, sizeof(fd));
+ ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
if (!ret)
ret = binder_translate_fd(fd, offset, t, thread,
in_reply_to);
@@ -2666,7 +2971,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
return 0;
}
-static int binder_fixup_parent(struct binder_transaction *t,
+static int binder_fixup_parent(struct list_head *pf_head,
+ struct binder_transaction *t,
struct binder_thread *thread,
struct binder_buffer_object *bp,
binder_size_t off_start_offset,
@@ -2712,14 +3018,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
}
buffer_offset = bp->parent_offset +
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
- if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
- &bp->buffer, sizeof(bp->buffer))) {
- binder_user_error("%d:%d got transaction with invalid parent offset\n",
- proc->pid, thread->pid);
- return -EINVAL;
- }
-
- return 0;
+ return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
}
/**
@@ -2840,6 +3139,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
binder_size_t sg_buf_offset, sg_buf_end_offset;
+ binder_size_t user_offset = 0;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -2854,6 +3154,12 @@ static void binder_transaction(struct binder_proc *proc,
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
+ struct list_head sgc_head;
+ struct list_head pf_head;
+ const void __user *user_buffer = (const void __user *)
+ (uintptr_t)tr->data.ptr.buffer;
+ INIT_LIST_HEAD(&sgc_head);
+ INIT_LIST_HEAD(&pf_head);
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -3167,19 +3473,6 @@ static void binder_transaction(struct binder_proc *proc,
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
- t->buffer, 0,
- (const void __user *)
- (uintptr_t)tr->data.ptr.buffer,
- tr->data_size)) {
- binder_user_error("%d:%d got transaction with invalid data ptr\n",
- proc->pid, thread->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -EFAULT;
- return_error_line = __LINE__;
- goto err_copy_data_failed;
- }
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
t->buffer,
ALIGN(tr->data_size, sizeof(void *)),
(const void __user *)
@@ -3222,6 +3515,7 @@ static void binder_transaction(struct binder_proc *proc,
size_t object_size;
struct binder_object object;
binder_size_t object_offset;
+ binder_size_t copy_size;
if (binder_alloc_copy_from_buffer(&target_proc->alloc,
&object_offset,
@@ -3233,8 +3527,27 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- object_size = binder_get_object(target_proc, t->buffer,
- object_offset, &object);
+
+ /*
+ * Copy the source user buffer up to the next object
+ * that will be processed.
+ */
+ copy_size = object_offset - user_offset;
+ if (copy_size && (user_offset > object_offset ||
+ binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ copy_size))) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+ object_size = binder_get_object(target_proc, user_buffer,
+ t->buffer, object_offset, &object);
if (object_size == 0 || object_offset < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid,
@@ -3246,6 +3559,11 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
+ /*
+ * Set offset to the next buffer fragment to be
+ * copied
+ */
+ user_offset = object_offset + object_size;
hdr = &object.hdr;
off_min = object_offset + object_size;
@@ -3308,6 +3626,8 @@ static void binder_transaction(struct binder_proc *proc,
case BINDER_TYPE_FDA: {
struct binder_object ptr_object;
binder_size_t parent_offset;
+ struct binder_object user_object;
+ size_t user_parent_size;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
size_t num_valid = (buffer_offset - off_start_offset) /
@@ -3339,11 +3659,35 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_parent;
}
- ret = binder_translate_fd_array(fda, parent, t, thread,
- in_reply_to);
- if (ret < 0) {
+ /*
+ * We need to read the user version of the parent
+ * object to get the original user offset
+ */
+ user_parent_size =
+ binder_get_object(proc, user_buffer, t->buffer,
+ parent_offset, &user_object);
+ if (user_parent_size != sizeof(user_object.bbo)) {
+ binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
+ proc->pid, thread->pid,
+ user_parent_size,
+ sizeof(user_object.bbo));
return_error = BR_FAILED_REPLY;
- return_error_param = ret;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_parent;
+ }
+ ret = binder_translate_fd_array(&pf_head, fda,
+ user_buffer, parent,
+ &user_object.bbo, t,
+ thread, in_reply_to);
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fda, sizeof(*fda));
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret > 0 ? -EINVAL : ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
@@ -3365,19 +3709,14 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
- t->buffer,
- sg_buf_offset,
- (const void __user *)
- (uintptr_t)bp->buffer,
- bp->length)) {
- binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
- proc->pid, thread->pid);
- return_error_param = -EFAULT;
+ ret = binder_defer_copy(&sgc_head, sg_buf_offset,
+ (const void __user *)(uintptr_t)bp->buffer,
+ bp->length);
+ if (ret) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
return_error_line = __LINE__;
- goto err_copy_data_failed;
+ goto err_translate_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)
@@ -3386,7 +3725,8 @@ static void binder_transaction(struct binder_proc *proc,
num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
- ret = binder_fixup_parent(t, thread, bp,
+ ret = binder_fixup_parent(&pf_head, t,
+ thread, bp,
off_start_offset,
num_valid,
last_fixup_obj_off,
@@ -3413,6 +3753,30 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_object_type;
}
}
+ /* Done processing objects, copy the rest of the buffer */
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ tr->data_size - user_offset)) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+
+ ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
+ &sgc_head, &pf_head);
+ if (ret) {
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
@@ -3479,6 +3843,7 @@ err_bad_object_type:
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
+ binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
@@ -3589,7 +3954,7 @@ binder_free_buf(struct binder_proc *proc,
binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
+ binder_release_entire_buffer(proc, thread, buffer, is_failure);
binder_alloc_free_buf(&proc->alloc, buffer);
}
@@ -4822,7 +5187,7 @@ static __poll_t binder_poll(struct file *filp,
thread = binder_get_thread(proc);
if (!thread)
- return POLLERR;
+ return EPOLLERR;
binder_inner_proc_lock(thread->proc);
thread->looper |= BINDER_LOOPER_STATE_POLL;
@@ -6083,6 +6448,7 @@ const struct file_operations binder_fops = {
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
+ .may_pollfree = true,
};
static int __init init_binder_device(const char *name)
@@ -6199,6 +6565,7 @@ err_init_binder_device_failed:
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+ binder_alloc_shrinker_exit();
return ret;
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index b5022a7f6bae..a331e9f82125 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
- down_read(&mm->mmap_sem);
+ down_write(&mm->mmap_sem);
vma = alloc->vma;
}
@@ -271,8 +271,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
- up_read(&mm->mmap_sem);
- mmput(mm);
+ up_write(&mm->mmap_sem);
+ mmput_async(mm);
}
return 0;
@@ -304,8 +304,8 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
- up_read(&mm->mmap_sem);
- mmput(mm);
+ up_write(&mm->mmap_sem);
+ mmput_async(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
@@ -378,17 +378,17 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL);
}
- if (is_async &&
- alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
+ if (is_async && alloc->free_async_space < size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
return ERR_PTR(-ENOSPC);
}
- /* Pad 0-size buffers so they get assigned unique addresses */
- size = max(size, sizeof(void *));
-
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@@ -488,7 +488,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
if (is_async) {
- alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ alloc->free_async_space -= size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
@@ -515,7 +515,7 @@ err_alloc_buf_struct_failed:
* is the sum of the three given sizes (each rounded up to
* pointer-sized boundary)
*
- * Return: The allocated buffer or %NULL if error
+ * Return: The allocated buffer or %ERR_PTR(-errno) if error
*/
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
@@ -613,8 +613,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
- alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
-
+ alloc->free_async_space += buffer_size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
@@ -662,7 +661,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
/*
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
- * binder_alloc_free_buf_locked(). However, that could
+ * binder_free_buf_locked(). However, that could
* increase contention for the alloc mutex if clear_on_free
* is used frequently for large buffers. The mutex is not
* needed for correctness here.
@@ -953,7 +952,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
goto err_mmget;
if (!down_read_trylock(&mm->mmap_sem))
goto err_down_read_mmap_sem_failed;
- vma = binder_alloc_get_vma(alloc);
+ vma = find_vma(mm, page_addr);
+ if (vma && vma != binder_alloc_get_vma(alloc))
+ goto err_invalid_vma;
list_lru_isolate(lru, item);
spin_unlock(lock);
@@ -979,6 +980,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
+err_invalid_vma:
+ up_read(&mm->mmap_sem);
err_down_read_mmap_sem_failed:
mmput_async(mm);
err_mmget:
@@ -1037,6 +1040,12 @@ int binder_alloc_shrinker_init(void)
return ret;
}
+void binder_alloc_shrinker_exit(void)
+{
+ unregister_shrinker(&binder_shrinker);
+ list_lru_destroy(&binder_alloc_lru);
+}
+
/**
* check_buffer() - verify that buffer/offset is safe to access
* @alloc: binder_alloc for this proc
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 288d0f478aa3..02a19afd9506 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -122,6 +122,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
extern int binder_alloc_shrinker_init(void);
+extern void binder_alloc_shrinker_exit(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6f572967b555..84c7519dddb1 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -48,6 +48,7 @@ enum {
enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
+ board_ahci_43bit_dma,
board_ahci_ign_iferr,
board_ahci_mobile,
board_ahci_nomsi,
@@ -81,6 +82,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
static void ahci_shutdown_one(struct pci_dev *dev);
+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -125,6 +127,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_43bit_dma] = {
+ AHCI_HFLAGS (AHCI_HFLAG_43BIT_ONLY),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_ign_iferr] = {
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
@@ -560,11 +569,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
- /* Asmedia */
+ /* ASMedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
- { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
- { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
@@ -617,6 +626,11 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
+ dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+ hpriv->saved_port_map = 0x3f;
+ }
+
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->force_port_map = 1;
@@ -639,6 +653,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
ahci_save_initial_config(&pdev->dev, hpriv);
}
+static int ahci_pci_reset_controller(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ /*
+ * If platform firmware failed to enable ports, try to enable
+ * them here.
+ */
+ ahci_intel_pcs_quirk(pdev, hpriv);
+
+ return 0;
+}
+
static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
@@ -841,7 +874,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
@@ -876,7 +909,7 @@ static int ahci_pci_device_resume(struct device *dev)
ahci_mcp89_apple_enable(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
@@ -891,11 +924,20 @@ static int ahci_pci_device_resume(struct device *dev)
#endif /* CONFIG_PM */
-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+static int ahci_configure_dma_masks(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
{
- const int dma_bits = using_dac ? 64 : 32;
+ int dma_bits;
int rc;
+ if (hpriv->cap & HOST_CAP_64) {
+ dma_bits = 64;
+ if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
+ dma_bits = 43;
+ } else {
+ dma_bits = 32;
+ }
+
/*
* If the device fixup already set the dma_mask to some non-standard
* value, don't extend it here. This happens on STA2X11, for example.
@@ -1741,12 +1783,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
- /*
- * If platform firmware failed to enable ports, try to enable
- * them here.
- */
- ahci_intel_pcs_quirk(pdev, hpriv);
-
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
@@ -1824,6 +1860,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
+ if (!(hpriv->cap & HOST_CAP_PART))
+ host->flags |= ATA_HOST_NO_PART;
+
+ if (!(hpriv->cap & HOST_CAP_SSC))
+ host->flags |= ATA_HOST_NO_SSC;
+
+ if (!(hpriv->cap2 & HOST_CAP2_SDS))
+ host->flags |= ATA_HOST_NO_DEVSLP;
+
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
@@ -1852,11 +1897,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_gtf_filter_workaround(host);
/* initialize adapter */
- rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+ rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
return rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 732912cd4e08..bb1e52212f64 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -24,6 +24,7 @@
#include <linux/libata.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
+#include <linux/bits.h>
/* Enclosure Management Control */
#define EM_CTRL_MSG_TYPE 0x000f0000
@@ -54,12 +55,12 @@ enum {
AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
AHCI_CMD_TBL_AR_SZ +
(AHCI_RX_FIS_SZ * 16),
- AHCI_IRQ_ON_SG = (1 << 31),
- AHCI_CMD_ATAPI = (1 << 5),
- AHCI_CMD_WRITE = (1 << 6),
- AHCI_CMD_PREFETCH = (1 << 7),
- AHCI_CMD_RESET = (1 << 8),
- AHCI_CMD_CLR_BUSY = (1 << 10),
+ AHCI_IRQ_ON_SG = BIT(31),
+ AHCI_CMD_ATAPI = BIT(5),
+ AHCI_CMD_WRITE = BIT(6),
+ AHCI_CMD_PREFETCH = BIT(7),
+ AHCI_CMD_RESET = BIT(8),
+ AHCI_CMD_CLR_BUSY = BIT(10),
RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */
RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
@@ -77,37 +78,37 @@ enum {
HOST_CAP2 = 0x24, /* host capabilities, extended */
/* HOST_CTL bits */
- HOST_RESET = (1 << 0), /* reset controller; self-clear */
- HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
- HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */
- HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+ HOST_RESET = BIT(0), /* reset controller; self-clear */
+ HOST_IRQ_EN = BIT(1), /* global IRQ enable */
+ HOST_MRSM = BIT(2), /* MSI Revert to Single Message */
+ HOST_AHCI_EN = BIT(31), /* AHCI enabled */
/* HOST_CAP bits */
- HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
- HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
- HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
- HOST_CAP_PART = (1 << 13), /* Partial state capable */
- HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
- HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
- HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
- HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
- HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
- HOST_CAP_CLO = (1 << 24), /* Command List Override support */
- HOST_CAP_LED = (1 << 25), /* Supports activity LED */
- HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
- HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
- HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
- HOST_CAP_SNTF = (1 << 29), /* SNotification register */
- HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
- HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+ HOST_CAP_SXS = BIT(5), /* Supports External SATA */
+ HOST_CAP_EMS = BIT(6), /* Enclosure Management support */
+ HOST_CAP_CCC = BIT(7), /* Command Completion Coalescing */
+ HOST_CAP_PART = BIT(13), /* Partial state capable */
+ HOST_CAP_SSC = BIT(14), /* Slumber state capable */
+ HOST_CAP_PIO_MULTI = BIT(15), /* PIO multiple DRQ support */
+ HOST_CAP_FBS = BIT(16), /* FIS-based switching support */
+ HOST_CAP_PMP = BIT(17), /* Port Multiplier support */
+ HOST_CAP_ONLY = BIT(18), /* Supports AHCI mode only */
+ HOST_CAP_CLO = BIT(24), /* Command List Override support */
+ HOST_CAP_LED = BIT(25), /* Supports activity LED */
+ HOST_CAP_ALPM = BIT(26), /* Aggressive Link PM support */
+ HOST_CAP_SSS = BIT(27), /* Staggered Spin-up */
+ HOST_CAP_MPS = BIT(28), /* Mechanical presence switch */
+ HOST_CAP_SNTF = BIT(29), /* SNotification register */
+ HOST_CAP_NCQ = BIT(30), /* Native Command Queueing */
+ HOST_CAP_64 = BIT(31), /* PCI DAC (64-bit DMA) support */
/* HOST_CAP2 bits */
- HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
- HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
- HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
- HOST_CAP2_SDS = (1 << 3), /* Support device sleep */
- HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */
- HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */
+ HOST_CAP2_BOH = BIT(0), /* BIOS/OS handoff supported */
+ HOST_CAP2_NVMHCI = BIT(1), /* NVMHCI supported */
+ HOST_CAP2_APST = BIT(2), /* Automatic partial to slumber */
+ HOST_CAP2_SDS = BIT(3), /* Support device sleep */
+ HOST_CAP2_SADM = BIT(4), /* Support aggressive DevSlp */
+ HOST_CAP2_DESO = BIT(5), /* DevSlp from slumber only */
/* registers for each SATA port */
PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -129,24 +130,24 @@ enum {
PORT_DEVSLP = 0x44, /* device sleep */
/* PORT_IRQ_{STAT,MASK} bits */
- PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
- PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
- PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
- PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
- PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
- PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
- PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
- PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
-
- PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
- PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
- PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
- PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
- PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
- PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
- PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
- PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
- PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+ PORT_IRQ_COLD_PRES = BIT(31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = BIT(30), /* task file error */
+ PORT_IRQ_HBUS_ERR = BIT(29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = BIT(28), /* host bus data error */
+ PORT_IRQ_IF_ERR = BIT(27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = BIT(26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = BIT(24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = BIT(23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = BIT(22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = BIT(7), /* device interlock */
+ PORT_IRQ_CONNECT = BIT(6), /* port connect change status */
+ PORT_IRQ_SG_DONE = BIT(5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = BIT(4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = BIT(3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = BIT(2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = BIT(1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = BIT(0), /* D2H Register FIS rx'd */
PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
PORT_IRQ_IF_ERR |
@@ -162,34 +163,34 @@ enum {
PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
/* PORT_CMD bits */
- PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
- PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
- PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
- PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
- PORT_CMD_ESP = (1 << 21), /* External Sata Port */
- PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */
- PORT_CMD_PMP = (1 << 17), /* PMP attached */
- PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
- PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
- PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
- PORT_CMD_CLO = (1 << 3), /* Command list override */
- PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
- PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
- PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
-
- PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
- PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
- PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
- PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+ PORT_CMD_ASP = BIT(27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = BIT(26), /* Aggressive Link PM enable */
+ PORT_CMD_ATAPI = BIT(24), /* Device is ATAPI */
+ PORT_CMD_FBSCP = BIT(22), /* FBS Capable Port */
+ PORT_CMD_ESP = BIT(21), /* External Sata Port */
+ PORT_CMD_HPCP = BIT(18), /* HotPlug Capable Port */
+ PORT_CMD_PMP = BIT(17), /* PMP attached */
+ PORT_CMD_LIST_ON = BIT(15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = BIT(14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = BIT(4), /* Enable FIS receive DMA engine */
+ PORT_CMD_CLO = BIT(3), /* Command list override */
+ PORT_CMD_POWER_ON = BIT(2), /* Power up device */
+ PORT_CMD_SPIN_UP = BIT(1), /* Spin up device */
+ PORT_CMD_START = BIT(0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_MASK = (0xfu << 28), /* i/f ICC state mask */
+ PORT_CMD_ICC_ACTIVE = (0x1u << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2u << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6u << 28), /* Put i/f in slumber state */
/* PORT_FBS bits */
PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
- PORT_FBS_SDE = (1 << 2), /* FBS single device error */
- PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
- PORT_FBS_EN = (1 << 0), /* Enable FBS */
+ PORT_FBS_SDE = BIT(2), /* FBS single device error */
+ PORT_FBS_DEC = BIT(1), /* FBS device error clear */
+ PORT_FBS_EN = BIT(0), /* Enable FBS */
/* PORT_DEVSLP bits */
PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */
@@ -197,52 +198,53 @@ enum {
PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */
PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */
PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */
- PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */
- PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */
+ PORT_DEVSLP_DSP = BIT(1), /* DevSlp present */
+ PORT_DEVSLP_ADSE = BIT(0), /* Aggressive DevSlp enable */
/* hpriv->flags bits */
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
- AHCI_HFLAG_NO_NCQ = (1 << 0),
- AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
- AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
- AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
- AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
- AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
- AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
- AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
- AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
- AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
- AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
- link offline */
- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
- AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
- AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
- AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on
- port start (wait until
- error-handling stage) */
- AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
- AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
+ AHCI_HFLAG_NO_NCQ = BIT(0),
+ AHCI_HFLAG_IGN_IRQ_IF_ERR = BIT(1), /* ignore IRQ_IF_ERR */
+ AHCI_HFLAG_IGN_SERR_INTERNAL = BIT(2), /* ignore SERR_INTERNAL */
+ AHCI_HFLAG_32BIT_ONLY = BIT(3), /* force 32bit */
+ AHCI_HFLAG_MV_PATA = BIT(4), /* PATA port */
+ AHCI_HFLAG_NO_MSI = BIT(5), /* no PCI MSI */
+ AHCI_HFLAG_NO_PMP = BIT(6), /* no PMP */
+ AHCI_HFLAG_SECT255 = BIT(8), /* max 255 sectors */
+ AHCI_HFLAG_YES_NCQ = BIT(9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = BIT(10), /* don't suspend */
+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = BIT(11), /* treat SRST timeout as
+ link offline */
+ AHCI_HFLAG_NO_SNTF = BIT(12), /* no sntf */
+ AHCI_HFLAG_NO_FPDMA_AA = BIT(13), /* no FPDMA AA */
+ AHCI_HFLAG_YES_FBS = BIT(14), /* force FBS cap on */
+ AHCI_HFLAG_DELAY_ENGINE = BIT(15), /* do not start engine on
+ port start (wait until
+ error-handling stage) */
+ AHCI_HFLAG_NO_DEVSLP = BIT(17), /* no device sleep */
+ AHCI_HFLAG_NO_FBS = BIT(18), /* no FBS */
#ifdef CONFIG_PCI_MSI
- AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */
+ AHCI_HFLAG_MULTI_MSI = BIT(20), /* per-port MSI(-X) */
#else
/* compile out MSI infrastructure */
AHCI_HFLAG_MULTI_MSI = 0,
#endif
- AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
- AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
- AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
- only registers */
- AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
- SATA_MOBILE_LPM_POLICY
- as default lpm_policy */
- AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
- suspend/resume */
- AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
- from phy_power_on() */
- AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */
+ AHCI_HFLAG_WAKE_BEFORE_STOP = BIT(22), /* wake before DMA stop */
+ AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */
+ AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read
+ only registers */
+ AHCI_HFLAG_IS_MOBILE = BIT(25), /* mobile chipset, use
+ SATA_MOBILE_LPM_POLICY
+ as default lpm_policy */
+ AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
+ suspend/resume */
+ AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = BIT(27), /* ignore -EOPNOTSUPP
+ from phy_power_on() */
+ AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
+ AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
/* ap->flags bits */
@@ -254,26 +256,26 @@ enum {
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
/* em constants */
- EM_MAX_SLOTS = 8,
+ EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
EM_MAX_RETRY = 5,
/* em_ctl bits */
- EM_CTL_RST = (1 << 9), /* Reset */
- EM_CTL_TM = (1 << 8), /* Transmit Message */
- EM_CTL_MR = (1 << 0), /* Message Received */
- EM_CTL_ALHD = (1 << 26), /* Activity LED */
- EM_CTL_XMT = (1 << 25), /* Transmit Only */
- EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
- EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */
- EM_CTL_SES = (1 << 18), /* SES-2 messages supported */
- EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */
- EM_CTL_LED = (1 << 16), /* LED messages supported */
+ EM_CTL_RST = BIT(9), /* Reset */
+ EM_CTL_TM = BIT(8), /* Transmit Message */
+ EM_CTL_MR = BIT(0), /* Message Received */
+ EM_CTL_ALHD = BIT(26), /* Activity LED */
+ EM_CTL_XMT = BIT(25), /* Transmit Only */
+ EM_CTL_SMB = BIT(24), /* Single Message Buffer */
+ EM_CTL_SGPIO = BIT(19), /* SGPIO messages supported */
+ EM_CTL_SES = BIT(18), /* SES-2 messages supported */
+ EM_CTL_SAFTE = BIT(17), /* SAF-TE messages supported */
+ EM_CTL_LED = BIT(16), /* LED messages supported */
/* em message type */
- EM_MSG_TYPE_LED = (1 << 0), /* LED */
- EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
- EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
- EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
+ EM_MSG_TYPE_LED = BIT(0), /* LED */
+ EM_MSG_TYPE_SAFTE = BIT(1), /* SAF-TE */
+ EM_MSG_TYPE_SES2 = BIT(2), /* SES-2 */
+ EM_MSG_TYPE_SGPIO = BIT(3), /* SGPIO */
};
struct ahci_cmd_hdr {
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index bfc617cc8ac5..8460b464f4db 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -1239,4 +1239,4 @@ module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("ahci:imx");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index fec2e9754aed..61b5ba8dc1d2 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1199,6 +1199,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
return sprintf(buf, "%d\n", emp->blink_policy);
}
+static void ahci_port_clear_pending_irq(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* clear SError */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
+}
+
static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
@@ -1213,18 +1233,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
if (rc)
dev_warn(dev, "%s (%d)\n", emsg, rc);
- /* clear SError */
- tmp = readl(port_mmio + PORT_SCR_ERR);
- VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
- writel(tmp, port_mmio + PORT_SCR_ERR);
-
- /* clear port IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
- if (tmp)
- writel(tmp, port_mmio + PORT_IRQ_STAT);
-
- writel(1 << port_no, mmio + HOST_IRQ_STAT);
+ ahci_port_clear_pending_irq(ap);
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
@@ -1554,6 +1563,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
tf.command = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ ahci_port_clear_pending_irq(ap);
+
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 8a963d2a951d..c0ac25b80a1f 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -451,14 +451,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
}
}
- hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
+ /*
+ * Too many sub-nodes most likely means having something wrong with
+ * the firmware.
+ */
+ child_nodes = of_get_child_count(dev->of_node);
+ if (child_nodes > AHCI_MAX_PORTS) {
+ rc = -EINVAL;
+ goto err_out;
+ }
/*
* If no sub-node was found, we still need to set nports to
* one in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
- if (!child_nodes)
+ if (child_nodes)
+ hpriv->nports = child_nodes;
+ else
hpriv->nports = 1;
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fc37e075f3e1..e2cf9859c67b 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3096,7 +3096,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
- else
+ else if (link->sata_spd)
return -EINVAL;
/* were we already at the bottom? */
@@ -3981,10 +3981,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER_WITH_PARTIAL:
case ATA_LPM_MIN_POWER:
- if (ata_link_nr_enabled(link) > 0)
- /* no restrictions on LPM transitions */
+ if (ata_link_nr_enabled(link) > 0) {
+ /* assume no restrictions on LPM transitions */
scontrol &= ~(0x7 << 8);
- else {
+
+ /*
+ * If the controller does not support partial, slumber,
+ * or devsleep, then disallow these transitions.
+ */
+ if (link->ap->host->flags & ATA_HOST_NO_PART)
+ scontrol |= (0x1 << 8);
+
+ if (link->ap->host->flags & ATA_HOST_NO_SSC)
+ scontrol |= (0x2 << 8);
+
+ if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
+ scontrol |= (0x4 << 8);
+ } else {
/* empty port, power off */
scontrol &= ~0xf;
scontrol |= (0x1 << 2);
@@ -4542,6 +4555,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* These specific Pioneer models have LPM issues */
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+
/* Crucial BX100 SSD 500GB has broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
@@ -5734,17 +5751,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
struct ata_link *link;
unsigned long flags;
- /* Previous resume operation might still be in
- * progress. Wait for PM_PENDING to clear.
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * A previous PM operation might still be in progress. Wait for
+ * ATA_PFLAG_PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ spin_lock_irqsave(ap->lock, flags);
}
- /* request PM ops to EH */
- spin_lock_irqsave(ap->lock, flags);
-
+ /* Request PM operation to EH */
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@@ -5756,10 +5775,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
- if (!async) {
+ if (!async)
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- }
}
/*
@@ -5925,7 +5942,7 @@ void ata_host_resume(struct ata_host *host)
#endif
const struct device_type ata_port_type = {
- .name = "ata_port",
+ .name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
@@ -6728,11 +6745,30 @@ static void ata_port_detach(struct ata_port *ap)
if (!ap->ops->error_handler)
goto skip_eh;
- /* tell EH we're leaving & flush EH */
+ /* Wait for any ongoing EH */
+ ata_port_wait_eh(ap);
+
+ mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
+
+ /* Remove scsi devices */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ALL) {
+ if (dev->sdev) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ scsi_remove_device(dev->sdev);
+ spin_lock_irqsave(ap->lock, flags);
+ dev->sdev = NULL;
+ }
+ }
+ }
+
+ /* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
+
spin_unlock_irqrestore(ap->lock, flags);
+ mutex_unlock(&ap->scsi_scan_mutex);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c6a21a784ec6..fa3f08ca5f6c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2329,6 +2329,7 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
@@ -2421,7 +2422,7 @@ static void ata_eh_link_report(struct ata_link *link)
struct ata_eh_context *ehc = &link->eh_context;
struct ata_queued_cmd *qc;
const char *frozen, *desc;
- char tries_buf[6] = "";
+ char tries_buf[16] = "";
int tag, nr_failed = 0;
if (ehc->i.flags & ATA_EHI_QUIET)
@@ -2900,18 +2901,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
postreset(slave, classes);
}
- /*
- * Some controllers can't be frozen very well and may set spurious
- * error conditions during reset. Clear accumulated error
- * information and re-thaw the port if frozen. As reset is the
- * final recovery action and we cross check link onlineness against
- * device classification later, no hotplug event is lost by this.
- */
+ /* clear cached SError */
spin_lock_irqsave(link->ap->lock, flags);
- memset(&link->eh_info, 0, sizeof(link->eh_info));
+ link->eh_info.serror = 0;
if (slave)
- memset(&slave->eh_info, 0, sizeof(link->eh_info));
- ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ slave->eh_info.serror = 0;
spin_unlock_irqrestore(link->ap->lock, flags);
if (ap->pflags & ATA_PFLAG_FROZEN)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2b247014ba45..c621c98c6057 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -162,7 +162,7 @@ static ssize_t ata_scsi_park_show(struct device *device,
struct ata_link *link;
struct ata_device *dev;
unsigned long now;
- unsigned int uninitialized_var(msecs);
+ unsigned int msecs;
int rc = 0;
ap = ata_shost_to_port(sdev->host);
@@ -3036,18 +3036,36 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
return 0;
}
-static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno)
{
- if (!sata_pmp_attached(ap)) {
- if (likely(devno >= 0 &&
- devno < ata_link_max_devices(&ap->link)))
+ /*
+ * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case),
+ * or 2 (IDE master + slave case). However, the former case includes
+ * libsas hosted devices which are numbered per scsi host, leading
+ * to devno potentially being larger than 0 but with each struct
+ * ata_device having its own struct ata_port and struct ata_link.
+ * To accommodate these, ignore devno and always use device number 0.
+ */
+ if (likely(!sata_pmp_attached(ap))) {
+ int link_max_devices = ata_link_max_devices(&ap->link);
+
+ if (link_max_devices == 1)
+ return &ap->link.device[0];
+
+ if (devno < link_max_devices)
return &ap->link.device[devno];
- } else {
- if (likely(devno >= 0 &&
- devno < ap->nr_pmp_links))
- return &ap->pmp_link[devno].device[0];
+
+ return NULL;
}
+ /*
+ * For PMP-attached devices, the device number corresponds to C
+ * (channel) of SCSI [H:C:I:L], indicating the port pmp link
+ * for the device.
+ */
+ if (devno < ap->nr_pmp_links)
+ return &ap->pmp_link[devno].device[0];
+
return NULL;
}
@@ -4526,7 +4544,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
- if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
else
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index c4f36312b8a4..9e49dab8dc78 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap)
put_device(dev);
}
+static const struct device_type ata_port_sas_type = {
+ .name = ATA_PORT_TYPE_NAME,
+};
+
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
@@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
- dev->type = &ata_port_type;
+ if (ap->flags & ATA_FLAG_SAS_HOST)
+ dev->type = &ata_port_sas_type;
+ else
+ dev->type = &ata_port_type;
dev->parent = parent;
ata_host_get(ap->host);
@@ -317,7 +324,6 @@ int ata_tport_add(struct device *parent,
tport_err:
transport_destroy_device(dev);
put_device(dev);
- ata_host_put(ap->host);
return error;
}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index cd8090ad43e5..562635c58d31 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -30,6 +30,8 @@ enum {
ATA_DNXFER_QUIET = (1 << 31),
};
+#define ATA_PORT_TYPE_NAME "ata_port"
+
extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 34cb104f6b43..bc30e2f305be 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -570,6 +570,7 @@ static struct platform_driver pata_ftide010_driver = {
};
module_platform_driver(pata_ftide010_driver);
+MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 43bb224430d3..8892931ea867 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1);
+ if (!ctl_addr)
+ return -ENOMEM;
+
ap->ioaddr.altstatus_addr = ctl_addr;
ap->ioaddr.ctl_addr = ctl_addr;
ap->ops = &isapnp_port_ops;
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index abc0e87ca1a8..43215a4c1e54 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -135,12 +135,12 @@ static void ixp4xx_setup_port(struct ata_port *ap,
static int ixp4xx_pata_probe(struct platform_device *pdev)
{
- unsigned int irq;
struct resource *cs0, *cs1;
struct ata_host *host;
struct ata_port *ap;
struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
int ret;
+ int irq;
cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index d91ba47f2fc4..4405d255e3aa 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -278,9 +278,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
outb(inb(0x1F4) & 0x07, 0x1F4);
rt = inb(0x1F3);
- rt &= 0x07 << (3 * adev->devno);
+ rt &= ~(0x07 << (3 * !adev->devno));
if (pio)
- rt |= (1 + 3 * pio) << (3 * adev->devno);
+ rt |= (1 + 3 * pio) << (3 * !adev->devno);
+ outb(rt, 0x1F3);
udelay(100);
outb(inb(0x1F2) | 0x01, 0x1F2);
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 4b2ba813dcab..491853b9f085 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -261,7 +261,7 @@ static u8 ns87560_check_status(struct ata_port *ap)
* LOCKING:
* Inherited from caller.
*/
-void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index f793564f3d78..6fd54e968d10 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -435,6 +435,7 @@ static struct platform_driver gemini_sata_driver = {
};
module_platform_driver(gemini_sata_driver);
+MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 363073e7b653..605e992d25df 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2914,6 +2914,7 @@ close_card_oam(struct idt77252_dev *card)
recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
}
+ kfree(vc);
}
}
}
@@ -2934,6 +2935,8 @@ open_card_ubr0(struct idt77252_dev *card)
vc->scq = alloc_scq(card, vc->class);
if (!vc->scq) {
printk("%s: can't get SCQ.\n", card->name);
+ kfree(card->vcs[0]);
+ card->vcs[0] = NULL;
return -ENOMEM;
}
@@ -2957,6 +2960,15 @@ open_card_ubr0(struct idt77252_dev *card)
return 0;
}
+static void
+close_card_ubr0(struct idt77252_dev *card)
+{
+ struct vc_map *vc = card->vcs[0];
+
+ free_scq(card, vc->scq);
+ kfree(vc);
+}
+
static int
idt77252_dev_open(struct idt77252_dev *card)
{
@@ -3006,6 +3018,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
struct idt77252_dev *card = dev->dev_data;
u32 conf;
+ close_card_ubr0(card);
close_card_oam(card);
conf = SAR_CFG_RXPTH | /* enable receive path */
@@ -3766,6 +3779,7 @@ static void __exit idt77252_exit(void)
card = idt77252_chain;
dev = card->atmdev;
idt77252_chain = card->next;
+ del_timer_sync(&card->tst_timer);
if (dev->phy->stop)
dev->phy->stop(dev);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 46990352b5d3..bfc889367d5e 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2290,19 +2290,21 @@ static int get_esi(struct atm_dev *dev)
static int reset_sar(struct atm_dev *dev)
{
IADEV *iadev;
- int i, error = 1;
+ int i, error;
unsigned int pci[64];
iadev = INPH_IA_DEV(dev);
- for(i=0; i<64; i++)
- if ((error = pci_read_config_dword(iadev->pci,
- i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
- return error;
+ for (i = 0; i < 64; i++) {
+ error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
+ if (error != PCIBIOS_SUCCESSFUL)
+ return error;
+ }
writel(0, iadev->reg+IPHASE5575_EXT_RESET);
- for(i=0; i<64; i++)
- if ((error = pci_write_config_dword(iadev->pci,
- i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
- return error;
+ for (i = 0; i < 64; i++) {
+ error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
+ if (error != PCIBIOS_SUCCESSFUL)
+ return error;
+ }
udelay(5);
return 0;
}
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c32f7dd9879a..9f2148daf8ad 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
struct sk_buff *skb;
unsigned int len;
- spin_lock(&card->cli_queue_lock);
+ spin_lock_bh(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
- spin_unlock(&card->cli_queue_lock);
+ spin_unlock_bh(&card->cli_queue_lock);
if(skb == NULL)
return sprintf(buf, "No data.\n");
@@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc)
struct pkt_hdr *header;
/* Remove any yet-to-be-transmitted packets from the pending queue */
- spin_lock(&card->tx_queue_lock);
+ spin_lock_bh(&card->tx_queue_lock);
skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
if (SKB_CB(skb)->vcc == vcc) {
skb_unlink(skb, &card->tx_queue[port]);
solos_pop(vcc, skb);
}
}
- spin_unlock(&card->tx_queue_lock);
+ spin_unlock_bh(&card->tx_queue_lock);
skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 57f97b95a453..165eebe06e39 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -940,7 +940,7 @@ static int open_tx_first(struct atm_vcc *vcc)
vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
else {
- int uninitialized_var(pcr);
+ int pcr;
if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 503404f3280e..0dfe20d18b8d 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -538,4 +538,23 @@ void __init init_cpu_topology(void)
else if (of_have_populated_dt() && parse_dt_topology())
reset_cpu_topology();
}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+
+ if (cpuid_topo->package_id != -1)
+ goto topology_populated;
+
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
+
+ pr_debug("CPU%u: package %d core %d thread %d\n",
+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
+ cpuid_topo->thread_id);
+
+topology_populated:
+ update_siblings_masks(cpuid);
+}
#endif
diff --git a/drivers/base/class.c b/drivers/base/class.c
index d8a6a5864c2e..61784503ca40 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -191,6 +191,11 @@ int __class_register(struct class *cls, struct lock_class_key *key)
}
error = class_add_groups(class_get(cls), cls->class_groups);
class_put(cls);
+ if (error) {
+ kobject_del(&cp->subsys.kobj);
+ kfree_const(cp->subsys.kobj.name);
+ kfree(cp);
+ }
return error;
}
EXPORT_SYMBOL_GPL(__class_register);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 1b016fdd1a75..6fb31f489452 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -3399,6 +3399,50 @@ define_dev_printk_level(_dev_info, KERN_INFO);
#endif
+/**
+ * dev_err_probe - probe error check and log helper
+ * @dev: the pointer to the struct device
+ * @err: error value to test
+ * @fmt: printf-style format string
+ * @...: arguments as specified in the format string
+ *
+ * This helper implements common pattern present in probe functions for error
+ * checking: print debug or error message depending if the error value is
+ * -EPROBE_DEFER and propagate error upwards.
+ * It replaces code sequence::
+ * if (err != -EPROBE_DEFER)
+ * dev_err(dev, ...);
+ * else
+ * dev_dbg(dev, ...);
+ * return err;
+ *
+ * with::
+ *
+ * return dev_err_probe(dev, err, ...);
+ *
+ * Returns @err.
+ *
+ */
+int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ else
+ dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+
+ va_end(args);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_err_probe);
+
static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
{
return fwnode && !IS_ERR(fwnode->secondary);
@@ -3474,6 +3518,13 @@ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
}
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
+{
+ dev->fwnode = fwnode;
+ dev->of_node = to_of_node(fwnode);
+}
+EXPORT_SYMBOL_GPL(device_set_node);
+
int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 9b5edf1dfe9e..5e0c1fd27720 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -491,7 +491,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
bool cpu_is_hotpluggable(unsigned cpu)
{
struct device *dev = get_cpu_device(cpu);
- return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable
+ && tick_nohz_cpu_hotpluggable(cpu);
}
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
@@ -574,6 +575,18 @@ ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
return sysfs_emit(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_gds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -584,6 +597,8 @@ static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
+static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -596,6 +611,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
&dev_attr_mmio_stale_data.attr,
+ &dev_attr_retbleed.attr,
+ &dev_attr_gather_data_sampling.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 6f85280fef8d..7941a8fd2284 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -818,6 +818,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
+ /*
+ * Device can't match with a driver right now, so don't attempt
+ * to match or bind with other drivers on the bus.
+ */
+ return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d", ret);
return ret;
@@ -1037,6 +1042,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
+ bool async = false;
int ret;
/*
@@ -1056,9 +1062,18 @@ static int __driver_attach(struct device *dev, void *data)
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
} else if (ret < 0) {
- dev_dbg(dev, "Bus failed to match device: %d", ret);
- return ret;
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
} /* ret > 0 means positive match */
if (driver_allows_async_probing(drv)) {
@@ -1074,9 +1089,11 @@ static int __driver_attach(struct device *dev, void *data)
if (!dev->driver) {
get_device(dev);
dev->p->async_driver = drv;
- async_schedule_dev(__driver_attach_async_helper, dev);
+ async = true;
}
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__driver_attach_async_helper, dev);
return 0;
}
@@ -1145,8 +1162,6 @@ static void __device_release_driver(struct device *dev, struct device *parent)
else if (drv->remove)
drv->remove(dev);
- device_links_driver_cleanup(dev);
-
devres_release_all(dev);
arch_teardown_dma_ops(dev);
dev->driver = NULL;
@@ -1156,6 +1171,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
pm_runtime_reinit(dev);
dev_pm_set_driver_flags(dev, 0);
+ device_links_driver_cleanup(dev);
+
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);
if (dev->bus)
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index e42d0b514384..e9398bcffab6 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -29,6 +29,47 @@ struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
+ /*
+ * Here, mutex is required to serialize the calls to del_wk work between
+ * user/kernel space which happens when devcd is added with device_add()
+ * and that sends uevent to user space. User space reads the uevents,
+ * and calls to devcd_data_write() which try to modify the work which is
+ * not even initialized/queued from devcoredump.
+ *
+ *
+ *
+ * cpu0(X) cpu1(Y)
+ *
+ * dev_coredump() uevent sent to user space
+ * device_add() ======================> user space process Y reads the
+ * uevents writes to devcd fd
+ * which results into writes to
+ *
+ * devcd_data_write()
+ * mod_delayed_work()
+ * try_to_grab_pending()
+ * del_timer()
+ * debug_assert_init()
+ * INIT_DELAYED_WORK()
+ * schedule_delayed_work()
+ *
+ *
+ * Also, mutex alone would not be enough to avoid scheduling of
+ * del_wk work after it get flush from a call to devcd_free()
+ * mentioned as below.
+ *
+ * disabled_store()
+ * devcd_free()
+ * mutex_lock() devcd_data_write()
+ * flush_delayed_work()
+ * mutex_unlock()
+ * mutex_lock()
+ * mod_delayed_work()
+ * mutex_unlock()
+ * So, delete_work flag is required.
+ */
+ struct mutex mutex;
+ bool delete_work;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
@@ -88,7 +129,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work) {
+ devcd->delete_work = true;
+ mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ }
+ mutex_unlock(&devcd->mutex);
return count;
}
@@ -116,7 +162,12 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work)
+ devcd->delete_work = true;
+
flush_delayed_work(&devcd->del_wk);
+ mutex_unlock(&devcd->mutex);
return 0;
}
@@ -126,6 +177,30 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
return sprintf(buf, "%d\n", devcd_disabled);
}
+/*
+ *
+ * disabled_store() worker()
+ * class_for_each_device(&devcd_class,
+ * NULL, NULL, devcd_free)
+ * ...
+ * ...
+ * while ((dev = class_dev_iter_next(&iter))
+ * devcd_del()
+ * device_del()
+ * put_device() <- last reference
+ * error = fn(dev, data) devcd_dev_release()
+ * devcd_free(dev, data) kfree(devcd)
+ * mutex_lock(&devcd->mutex);
+ *
+ *
+ * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * running devcd_del() and result in memory abort while acquiring devcd->mutex which
+ * is called after kfree of devcd memory after dropping its last reference with
+ * put_device(). However, this will not happens as fn(dev, data) runs
+ * with its own reference to device via klist_node so it is not its last reference.
+ * so, above situation would not occur.
+ */
+
static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
@@ -282,13 +357,17 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
+ devcd->delete_work = false;
+ mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
dev_set_name(&devcd->devcd_dev, "devcd%d",
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
+ mutex_lock(&devcd->mutex);
+ dev_set_uevent_suppress(&devcd->devcd_dev, true);
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -300,12 +379,15 @@ void dev_coredumpm(struct device *dev, struct module *owner,
"devcoredump"))
/* nothing - symlink will be missing */;
+ dev_set_uevent_suppress(&devcd->devcd_dev, false);
+ kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
-
+ mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
+ mutex_unlock(&devcd->mutex);
put_module:
module_put(owner);
free:
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 4e5ca632f35e..ef14566a4971 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -30,6 +30,75 @@ static struct device *next_device(struct klist_iter *i)
}
/**
+ * driver_set_override() - Helper to set or clear driver override.
+ * @dev: Device to change
+ * @override: Address of string to change (e.g. &device->driver_override);
+ * The contents will be freed and hold newly allocated override.
+ * @s: NUL-terminated string, new driver name to force a match, pass empty
+ * string to clear it ("" or "\n", where the latter is only for sysfs
+ * interface).
+ * @len: length of @s
+ *
+ * Helper to set or clear driver override in a device, intended for the cases
+ * when the driver_override field is allocated by driver/bus code.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len)
+{
+ const char *new, *old;
+ char *cp;
+
+ if (!override || !s)
+ return -EINVAL;
+
+ /*
+ * The stored value will be used in sysfs show callback (sysfs_emit()),
+ * which has a length limit of PAGE_SIZE and adds a trailing newline.
+ * Thus we can store one character less to avoid truncation during sysfs
+ * show.
+ */
+ if (len >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (!len) {
+ /* Empty string passed - clear override */
+ device_lock(dev);
+ old = *override;
+ *override = NULL;
+ device_unlock(dev);
+ kfree(old);
+
+ return 0;
+ }
+
+ cp = strnchr(s, len, '\n');
+ if (cp)
+ len = cp - s;
+
+ new = kstrndup(s, len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ device_lock(dev);
+ old = *override;
+ if (cp != s) {
+ *override = new;
+ } else {
+ /* "\n" passed - clear override */
+ kfree(new);
+ *override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(driver_set_override);
+
+/**
* driver_for_each_device - Iterator for devices bound to a driver.
* @drv: Driver we're iterating.
* @start: Device to begin with
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 75623b914b8c..0ed43d185a90 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -973,31 +973,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = pdev->driver_override;
- if (strlen(driver_override)) {
- pdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- pdev->driver_override = NULL;
- }
- device_unlock(dev);
+ int ret;
- kfree(old);
+ ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 8428d02cfe58..eed4c865a4bf 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
/*
* Traverse all sub-domains within the domain. This can be
* done without any additional locking as the link->performance_state
- * field is protected by the master genpd->lock, which is already taken.
+ * field is protected by the parent genpd->lock, which is already taken.
*
* Also note that link->performance_state (subdomain's performance state
- * requirement to master domain) is different from
- * link->slave->performance_state (current performance state requirement
+ * requirement to parent domain) is different from
+ * link->child->performance_state (current performance state requirement
* of the devices/sub-domains of the subdomain) and so can have a
* different value.
*
* Note that we also take vote from powered-off sub-domains into account
* as the same is done for devices right now.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
if (link->performance_state > state)
state = link->performance_state;
}
@@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
- struct generic_pm_domain *master;
+ struct generic_pm_domain *parent;
struct gpd_link *link;
- int master_state, ret;
+ int parent_state, ret;
if (state == genpd->performance_state)
return 0;
- /* Propagate to masters of genpd */
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- master = link->master;
+ /* Propagate to parents of genpd */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ parent = link->parent;
- if (!master->set_performance_state)
+ if (!parent->set_performance_state)
continue;
- /* Find master's performance state */
+ /* Find parent's performance state */
ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- master->opp_table,
+ parent->opp_table,
state);
if (unlikely(ret < 0))
goto err;
- master_state = ret;
+ parent_state = ret;
- genpd_lock_nested(master, depth + 1);
+ genpd_lock_nested(parent, depth + 1);
link->prev_performance_state = link->performance_state;
- link->performance_state = master_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- ret = _genpd_set_performance_state(master, master_state, depth + 1);
+ link->performance_state = parent_state;
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
if (ret)
link->performance_state = link->prev_performance_state;
- genpd_unlock(master);
+ genpd_unlock(parent);
if (ret)
goto err;
@@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
err:
/* Encountered an error, lets rollback */
- list_for_each_entry_continue_reverse(link, &genpd->slave_links,
- slave_node) {
- master = link->master;
+ list_for_each_entry_continue_reverse(link, &genpd->child_links,
+ child_node) {
+ parent = link->parent;
- if (!master->set_performance_state)
+ if (!parent->set_performance_state)
continue;
- genpd_lock_nested(master, depth + 1);
+ genpd_lock_nested(parent, depth + 1);
- master_state = link->prev_performance_state;
- link->performance_state = master_state;
+ parent_state = link->prev_performance_state;
+ link->performance_state = parent_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- if (_genpd_set_performance_state(master, master_state, depth + 1)) {
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
pr_err("%s: Failed to roll back to %d performance state\n",
- master->name, master_state);
+ parent->name, parent_state);
}
- genpd_unlock(master);
+ genpd_unlock(parent);
}
return ret;
@@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call genpd_power_on() for the master yet after
+ * managed to call genpd_power_on() for the parent yet after
* incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let
* the genpd_power_on() restore power for us (this shouldn't
@@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
genpd->status = GPD_STATE_POWER_OFF;
genpd_update_accounting(genpd);
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
}
return 0;
}
/**
- * genpd_power_on - Restore power to a given PM domain and its masters.
+ * genpd_power_on - Restore power to a given PM domain and its parents.
* @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
*
- * Restore power to @genpd and all of its masters so that it is possible to
+ * Restore power to @genpd and all of its parents so that it is possible to
* resume a device belonging to it.
*/
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
@@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
/*
* The list is guaranteed not to change while the loop below is being
- * executed, unless one of the masters' .power_on() callbacks fiddles
+ * executed, unless one of the parents' .power_on() callbacks fiddles
* with it.
*/
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
- genpd_sd_counter_inc(master);
+ genpd_sd_counter_inc(parent);
- genpd_lock_nested(master, depth + 1);
- ret = genpd_power_on(master, depth + 1);
- genpd_unlock(master);
+ genpd_lock_nested(parent, depth + 1);
+ ret = genpd_power_on(parent, depth + 1);
+ genpd_unlock(parent);
if (ret) {
- genpd_sd_counter_dec(master);
+ genpd_sd_counter_dec(parent);
goto err;
}
}
@@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
err:
list_for_each_entry_continue_reverse(link,
- &genpd->slave_links,
- slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
+ &genpd->child_links,
+ child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
}
return ret;
@@ -920,7 +920,7 @@ static int __init genpd_power_off_unused(void)
return 0;
}
-late_initcall(genpd_power_off_unused);
+late_initcall_sync(genpd_power_off_unused);
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
@@ -943,13 +943,13 @@ static bool genpd_present(const struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_SLEEP
/**
- * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
+ * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
* @genpd: PM domain to power off, if possible.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
*
* Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so. Also, in that case propagate to its masters.
+ * hibernation) and do that if so. Also, in that case propagate to its parents.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
@@ -974,21 +974,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
genpd->status = GPD_STATE_POWER_OFF;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
+ genpd_lock_nested(link->parent, depth + 1);
- genpd_sync_power_off(link->master, use_lock, depth + 1);
+ genpd_sync_power_off(link->parent, use_lock, depth + 1);
if (use_lock)
- genpd_unlock(link->master);
+ genpd_unlock(link->parent);
}
}
/**
- * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
+ * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
* @genpd: PM domain to power on.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
@@ -1005,16 +1005,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
if (genpd_status_on(genpd))
return;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_inc(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_inc(link->parent);
if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
+ genpd_lock_nested(link->parent, depth + 1);
- genpd_sync_power_on(link->master, use_lock, depth + 1);
+ genpd_sync_power_on(link->parent, use_lock, depth + 1);
if (use_lock)
- genpd_unlock(link->master);
+ genpd_unlock(link->parent);
}
_genpd_power_on(genpd, false);
@@ -1454,12 +1454,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd,
if (!genpd_is_cpu_domain(genpd))
return;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
- genpd_lock_nested(master, depth + 1);
- genpd_update_cpumask(master, cpu, set, depth + 1);
- genpd_unlock(master);
+ genpd_lock_nested(parent, depth + 1);
+ genpd_update_cpumask(parent, cpu, set, depth + 1);
+ genpd_unlock(parent);
}
if (set)
@@ -1647,17 +1647,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(itr, &genpd->master_links, master_node) {
- if (itr->slave == subdomain && itr->master == genpd) {
+ list_for_each_entry(itr, &genpd->parent_links, parent_node) {
+ if (itr->child == subdomain && itr->parent == genpd) {
ret = -EINVAL;
goto out;
}
}
- link->master = genpd;
- list_add_tail(&link->master_node, &genpd->master_links);
- link->slave = subdomain;
- list_add_tail(&link->slave_node, &subdomain->slave_links);
+ link->parent = genpd;
+ list_add_tail(&link->parent_node, &genpd->parent_links);
+ link->child = subdomain;
+ list_add_tail(&link->child_node, &subdomain->child_links);
if (genpd_status_on(subdomain))
genpd_sd_counter_inc(genpd);
@@ -1671,7 +1671,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
- * @genpd: Master PM domain to add the subdomain to.
+ * @genpd: Leader PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
*/
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
@@ -1689,7 +1689,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
- * @genpd: Master PM domain to remove the subdomain from.
+ * @genpd: Leader PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed.
*/
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
@@ -1704,19 +1704,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
genpd_lock(subdomain);
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
- if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+ if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n",
genpd->name, subdomain->name);
ret = -EBUSY;
goto out;
}
- list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
- if (link->slave != subdomain)
+ list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
+ if (link->child != subdomain)
continue;
- list_del(&link->master_node);
- list_del(&link->slave_node);
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
kfree(link);
if (genpd_status_on(subdomain))
genpd_sd_counter_dec(genpd);
@@ -1781,8 +1781,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
- INIT_LIST_HEAD(&genpd->master_links);
- INIT_LIST_HEAD(&genpd->slave_links);
+ INIT_LIST_HEAD(&genpd->parent_links);
+ INIT_LIST_HEAD(&genpd->child_links);
INIT_LIST_HEAD(&genpd->dev_list);
genpd_lock_init(genpd);
genpd->gov = gov;
@@ -1858,15 +1858,15 @@ static int genpd_remove(struct generic_pm_domain *genpd)
return -EBUSY;
}
- if (!list_empty(&genpd->master_links) || genpd->device_count) {
+ if (!list_empty(&genpd->parent_links) || genpd->device_count) {
genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY;
}
- list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
- list_del(&link->master_node);
- list_del(&link->slave_node);
+ list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
kfree(link);
}
@@ -2596,10 +2596,10 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
err = of_property_read_u32(state_node, "min-residency-us", &residency);
if (!err)
- genpd_state->residency_ns = 1000 * residency;
+ genpd_state->residency_ns = 1000LL * residency;
- genpd_state->power_on_latency_ns = 1000 * exit_latency;
- genpd_state->power_off_latency_ns = 1000 * entry_latency;
+ genpd_state->power_on_latency_ns = 1000LL * exit_latency;
+ genpd_state->power_off_latency_ns = 1000LL * entry_latency;
genpd_state->fwnode = &state_node->fwnode;
return 0;
@@ -2622,6 +2622,10 @@ static int genpd_iterate_idle_states(struct device_node *dn,
np = it.node;
if (!of_match_node(idle_state_match, np))
continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
if (states) {
ret = genpd_parse_state(&states[i], np);
if (ret) {
@@ -2789,12 +2793,12 @@ static int genpd_summary_one(struct seq_file *s,
/*
* Modifications on the list require holding locks on both
- * master and slave, so we are safe.
+ * parent and child, so we are safe.
* Also genpd->name is immutable.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
- seq_printf(s, "%s", link->slave->name);
- if (!list_is_last(&link->master_node, &genpd->master_links))
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ seq_printf(s, "%s", link->child->name);
+ if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
}
@@ -2822,7 +2826,7 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status slaves\n");
+ seq_puts(s, "domain status children\n");
seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------\n");
@@ -2877,8 +2881,8 @@ static int sub_domains_show(struct seq_file *s, void *data)
if (ret)
return -ERESTARTSYS;
- list_for_each_entry(link, &genpd->master_links, master_node)
- seq_printf(s, "%s\n", link->slave->name);
+ list_for_each_entry(link, &genpd->parent_links, parent_node)
+ seq_printf(s, "%s\n", link->child->name);
genpd_unlock(genpd);
return ret;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index daa8c7689f7e..490ed7deb99a 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*
* All subdomains have been powered off already at this point.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
- struct generic_pm_domain *sd = link->slave;
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *sd = link->child;
s64 sd_max_off_ns = sd->max_off_time_ns;
if (sd_max_off_ns < 0)
@@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
}
/*
- * We have to invalidate the cached results for the masters, so
+ * We have to invalidate the cached results for the parents, so
* use the observation that default_power_down_ok() is not
- * going to be called for any master until this instance
+ * going to be called for any parent until this instance
* returns.
*/
- list_for_each_entry(link, &genpd->slave_links, slave_node)
- link->master->max_off_time_changed = true;
+ list_for_each_entry(link, &genpd->child_links, child_node)
+ link->parent->max_off_time_changed = true;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false;
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 39a06a0cfdaa..310abe7251fc 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -25,8 +25,11 @@ extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
- WAKE_IRQ_DEDICATED_MANAGED)
+ WAKE_IRQ_DEDICATED_MANAGED | \
+ WAKE_IRQ_DEDICATED_REVERSE)
+#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
struct wake_irq {
struct device *dev;
@@ -39,7 +42,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status);
-extern void dev_pm_disable_wake_irq_check(struct device *dev);
+extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
+extern void dev_pm_enable_wake_irq_complete(struct device *dev);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8fbd376471de..d301a6de762d 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -446,7 +446,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
- if (dev->power.no_callbacks)
+ callback = RPM_GET_CALLBACK(dev, runtime_idle);
+
+ /* If no callback assume success. */
+ if (!callback || dev->power.no_callbacks)
goto out;
/* Carry out an asynchronous or a synchronous idle notification. */
@@ -462,10 +465,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- callback = RPM_GET_CALLBACK(dev, runtime_idle);
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
- if (callback)
- retval = __rpm_callback(callback, dev);
+ retval = callback(dev);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
@@ -649,6 +659,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval)
goto fail;
+ dev_pm_enable_wake_irq_complete(dev);
+
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -694,7 +706,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
- dev_pm_disable_wake_irq_check(dev);
+ dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -877,7 +889,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
- dev_pm_disable_wake_irq_check(dev);
+ dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
@@ -1036,8 +1048,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1068,8 +1082,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1113,28 +1129,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
+ * pm_runtime_get_if_active - Conditionally bump up the device's usage counter.
* @dev: Device to handle.
*
* Return -EINVAL if runtime PM is disabled for the device.
*
- * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
- * and the runtime PM usage counter is nonzero, increment the counter and
- * return 1. Otherwise return 0 without changing the counter.
+ * Otherwise, if the device's runtime PM status is RPM_ACTIVE and either
+ * ign_usage_count is true or the device's usage_count is non-zero, increment
+ * the counter and return 1. Otherwise return 0 without changing the counter.
+ *
+ * If ign_usage_count is true, the function can be used to prevent suspending
+ * the device when its runtime PM status is RPM_ACTIVE.
+ *
+ * If ign_usage_count is false, the function can be used to prevent suspending
+ * the device when both its runtime PM status is RPM_ACTIVE and its usage_count
+ * is non-zero.
+ *
+ * The caller is resposible for putting the device's usage count when ther
+ * return value is greater than zero.
*/
-int pm_runtime_get_if_in_use(struct device *dev)
+int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
- retval = dev->power.disable_depth > 0 ? -EINVAL :
- dev->power.runtime_status == RPM_ACTIVE
- && atomic_inc_not_zero(&dev->power.usage_count);
+ if (dev->power.disable_depth > 0) {
+ retval = -EINVAL;
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
+ retval = 0;
+ } else if (ign_usage_count) {
+ retval = 1;
+ atomic_inc(&dev->power.usage_count);
+ } else {
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
+ }
+ trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
+
return retval;
}
-EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
* __pm_runtime_set_status - Set runtime PM status of a device.
@@ -1426,6 +1461,28 @@ void pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_disable_action(void *data)
+{
+ pm_runtime_dont_use_autosuspend(data);
+ pm_runtime_disable(data);
+}
+
+/**
+ * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
+ *
+ * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
+ * you at driver exit time if needed.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_enable(struct device *dev)
+{
+ pm_runtime_enable(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
@@ -1464,6 +1521,8 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count))
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+ else
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1531,6 +1590,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
+ } else {
+ trace_rpm_usage_rcuidle(dev, 0);
}
}
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5ce77d1ef9fc..46654adf00a1 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -145,24 +145,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
return IRQ_HANDLED;
}
-/**
- * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
- * @dev: Device entry
- * @irq: Device wake-up interrupt
- *
- * Unless your hardware has separate wake-up interrupts in addition
- * to the device IO interrupts, you don't need this.
- *
- * Sets up a threaded interrupt handler for a device that has
- * a dedicated wake-up interrupt in addition to the device IO
- * interrupt.
- *
- * The interrupt starts disabled, and needs to be managed for
- * the device by the bus code or the device driver using
- * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
- * functions.
- */
-int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
{
struct wake_irq *wirq;
int err;
@@ -200,7 +183,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
- wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
return err;
@@ -213,9 +196,58 @@ err_free:
return err;
}
+
+
+/**
+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has
+ * a dedicated wake-up interrupt in addition to the device IO
+ * interrupt.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
+}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
+ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
+ * with reverse enable ordering
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has a dedicated
+ * wake-up interrupt in addition to the device IO interrupt. It sets
+ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
+ * to enable dedicated wake-up interrupt after running the runtime suspend
+ * callback for @dev.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
+
+/**
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
*
@@ -285,25 +317,56 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
return;
enable:
- enable_irq(wirq->irq);
+ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
+ enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
+ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
-void dev_pm_disable_wake_irq_check(struct device *dev)
+void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
return;
- if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+ if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
+ wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
disable_irq_nosync(wirq->irq);
+ }
+}
+
+/**
+ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
+ * @dev: Device using the wake IRQ
+ *
+ * Enable wake IRQ conditionally based on status, mainly used if want to
+ * enable wake IRQ after running ->runtime_suspend() which depends on
+ * WAKE_IRQ_DEDICATED_REVERSE.
+ *
+ * Should be only called from rpm_suspend() path.
+ */
+void dev_pm_enable_wake_irq_complete(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ enable_irq(wirq->irq);
}
/**
@@ -320,7 +383,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
if (device_may_wakeup(wirq->dev)) {
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
- !pm_runtime_status_suspended(wirq->dev))
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
@@ -343,7 +406,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
disable_irq_wake(wirq->irq);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
- !pm_runtime_status_suspended(wirq->dev))
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
disable_irq_nosync(wirq->irq);
}
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index d7c01b70e43d..af6ac068ca5d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -104,6 +104,10 @@ struct regmap {
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
int (*reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
bool defer_caching;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index fabf87058d80..d65715b9e129 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -277,7 +277,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
blk = krealloc(rbnode->block,
blklen * map->cache_word_size,
- GFP_KERNEL);
+ map->alloc_flags);
if (!blk)
return -ENOMEM;
@@ -286,7 +286,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present),
- GFP_KERNEL);
+ map->alloc_flags);
if (!present)
return -ENOMEM;
@@ -320,7 +320,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
const struct regmap_range *range;
int i;
- rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
+ rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags);
if (!rbnode)
return NULL;
@@ -346,13 +346,13 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
}
rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
- GFP_KERNEL);
+ map->alloc_flags);
if (!rbnode->block)
goto err_free;
rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
sizeof(*rbnode->cache_present),
- GFP_KERNEL);
+ map->alloc_flags);
if (!rbnode->cache_present)
goto err_free_block;
@@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
if (!rbnode)
return -ENOMEM;
regcache_rbtree_set_register(map, rbnode,
- reg - rbnode->base_reg, value);
+ (reg - rbnode->base_reg) / map->reg_stride,
+ value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 7f4b3b62492c..7fdd702e564a 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -343,6 +343,9 @@ int regcache_sync(struct regmap *map)
const char *name;
bool bypass;
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
@@ -412,6 +415,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
const char *name;
bool bypass;
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 4f2ff1b2b450..214d19d1337d 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -49,7 +49,7 @@ static ssize_t regmap_name_read_file(struct file *file,
name = map->dev->driver->name;
ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
- if (ret < 0) {
+ if (ret >= PAGE_SIZE) {
kfree(buf);
return ret;
}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index ac9b31c57967..6d934c35a770 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
static struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
- .max_raw_read = I2C_SMBUS_BLOCK_MAX,
- .max_raw_write = I2C_SMBUS_BLOCK_MAX,
+ .max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
+ .max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
};
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 43c0452a8ba9..aa9c6e0ff878 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -771,12 +771,15 @@ struct regmap *__regmap_init(struct device *dev,
map->reg_stride_order = ilog2(map->reg_stride);
else
map->reg_stride_order = -1;
- map->use_single_read = config->use_single_read || !bus || !bus->read;
- map->use_single_write = config->use_single_write || !bus || !bus->write;
- map->can_multi_write = config->can_multi_write && bus && bus->write;
+ map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
+ map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
+ map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
if (bus) {
map->max_raw_read = bus->max_raw_read;
map->max_raw_write = bus->max_raw_write;
+ } else if (config->max_raw_read && config->max_raw_write) {
+ map->max_raw_read = config->max_raw_read;
+ map->max_raw_write = config->max_raw_write;
}
map->dev = dev;
map->bus = bus;
@@ -810,9 +813,19 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- if (!bus) {
+ if (config && config->read && config->write) {
+ map->reg_read = _regmap_bus_read;
+
+ /* Bulk read/write */
+ map->read = config->read;
+ map->write = config->write;
+
+ reg_endian = REGMAP_ENDIAN_NATIVE;
+ val_endian = REGMAP_ENDIAN_NATIVE;
+ } else if (!bus) {
map->reg_read = config->reg_read;
map->reg_write = config->reg_write;
+ map->reg_update_bits = config->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
@@ -825,10 +838,13 @@ struct regmap *__regmap_init(struct device *dev,
} else {
map->reg_read = _regmap_bus_read;
map->reg_update_bits = bus->reg_update_bits;
- }
+ /* Bulk read/write */
+ map->read = bus->read;
+ map->write = bus->write;
- reg_endian = regmap_get_reg_endian(bus, config);
- val_endian = regmap_get_val_endian(dev, bus, config);
+ reg_endian = regmap_get_reg_endian(bus, config);
+ val_endian = regmap_get_val_endian(dev, bus, config);
+ }
switch (config->reg_bits + map->reg_shift) {
case 2:
@@ -1363,7 +1379,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
/* If the user didn't specify a name match any */
if (data)
- return !strcmp((*r)->name, data);
+ return (*r)->name && !strcmp((*r)->name, data);
else
return 1;
}
@@ -1479,8 +1495,6 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
size_t len;
int i;
- WARN_ON(!map->bus);
-
/* Check for unwritable or noinc registers in range
* before we start
*/
@@ -1495,17 +1509,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
}
if (!map->cache_bypass && map->format.parse_val) {
- unsigned int ival;
+ unsigned int ival, offset;
int val_bytes = map->format.val_bytes;
- for (i = 0; i < val_len / val_bytes; i++) {
- ival = map->format.parse_val(val + (i * val_bytes));
- ret = regcache_write(map,
- reg + regmap_get_offset(map, i),
- ival);
+
+ /* Cache the last written value for noinc writes */
+ i = noinc ? val_len - val_bytes : 0;
+ for (; i < val_len; i += val_bytes) {
+ ival = map->format.parse_val(val + i);
+ offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
+ ret = regcache_write(map, reg + offset, ival);
if (ret) {
dev_err(map->dev,
"Error in caching of register: %x ret: %d\n",
- reg + regmap_get_offset(map, i), ret);
+ reg + offset, ret);
return ret;
}
}
@@ -1560,7 +1576,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
val = work_val;
}
- if (map->async && map->bus->async_write) {
+ if (map->async && map->bus && map->bus->async_write) {
struct regmap_async *async;
trace_regmap_async_write_start(map, reg, val_len);
@@ -1628,11 +1644,11 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
* write.
*/
if (val == work_val)
- ret = map->bus->write(map->bus_context, map->work_buf,
- map->format.reg_bytes +
- map->format.pad_bytes +
- val_len);
- else if (map->bus->gather_write)
+ ret = map->write(map->bus_context, map->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes +
+ val_len);
+ else if (map->bus && map->bus->gather_write)
ret = map->bus->gather_write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes,
@@ -1650,7 +1666,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
memcpy(buf, map->work_buf, map->format.reg_bytes);
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
- ret = map->bus->write(map->bus_context, buf, len);
+ ret = map->write(map->bus_context, buf, len);
kfree(buf);
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
@@ -1707,7 +1723,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
struct regmap_range_node *range;
struct regmap *map = context;
- WARN_ON(!map->bus || !map->format.format_write);
+ WARN_ON(!map->format.format_write);
range = _regmap_range_lookup(map, reg);
if (range) {
@@ -1720,8 +1736,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
trace_regmap_hw_write_start(map, reg, 1);
- ret = map->bus->write(map->bus_context, map->work_buf,
- map->format.buf_size);
+ ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
trace_regmap_hw_write_done(map, reg, 1);
@@ -1741,7 +1756,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
{
struct regmap *map = context;
- WARN_ON(!map->bus || !map->format.format_val);
+ WARN_ON(!map->format.format_val);
map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0);
@@ -1755,7 +1770,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
static inline void *_regmap_map_get_context(struct regmap *map)
{
- return (map->bus) ? map : map->bus_context;
+ return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
}
int _regmap_write(struct regmap *map, unsigned int reg,
@@ -1850,6 +1865,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
size_t val_count = val_len / val_bytes;
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
+ size_t max_data = map->max_raw_write - map->format.reg_bytes -
+ map->format.pad_bytes;
int ret, i;
if (!val_count)
@@ -1857,8 +1874,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (map->use_single_write)
chunk_regs = 1;
- else if (map->max_raw_write && val_len > map->max_raw_write)
- chunk_regs = map->max_raw_write / val_bytes;
+ else if (map->max_raw_write && val_len > max_data)
+ chunk_regs = max_data / val_bytes;
chunk_count = val_count / chunk_regs;
chunk_bytes = chunk_regs * val_bytes;
@@ -2162,7 +2179,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
u8 = buf;
*u8 |= map->write_flag_mask;
- ret = map->bus->write(map->bus_context, buf, len);
+ ret = map->write(map->bus_context, buf, len);
kfree(buf);
@@ -2460,9 +2477,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
struct regmap_range_node *range;
int ret;
- WARN_ON(!map->bus);
-
- if (!map->bus || !map->bus->read)
+ if (!map->read)
return -EINVAL;
range = _regmap_range_lookup(map, reg);
@@ -2478,9 +2493,9 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
map->read_flag_mask);
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
- ret = map->bus->read(map->bus_context, map->work_buf,
- map->format.reg_bytes + map->format.pad_bytes,
- val, val_len);
+ ret = map->read(map->bus_context, map->work_buf,
+ map->format.reg_bytes + map->format.pad_bytes,
+ val, val_len);
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
@@ -2591,8 +2606,6 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
- if (!map->bus)
- return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2607,7 +2620,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
- if (!map->bus->read) {
+ if (!map->read) {
ret = -ENOTSUPP;
goto out;
}
@@ -2667,7 +2680,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_read);
* @val: Pointer to data buffer
* @val_len: Length of output buffer in bytes.
*
- * The regmap API usually assumes that bulk bus read operations will read a
+ * The regmap API usually assumes that bulk read operations will read a
* range of registers. Some devices have certain registers for which a read
* operation read will read from an internal FIFO.
*
@@ -2685,10 +2698,6 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
size_t read_len;
int ret;
- if (!map->bus)
- return -EINVAL;
- if (!map->bus->read)
- return -ENOTSUPP;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2802,7 +2811,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (val_count == 0)
return -EINVAL;
- if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
+ if (map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
return ret;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 4c3b9813b284..636d52d1a1b8 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -604,6 +604,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (nargs > NR_FWNODE_REFERENCE_ARGS)
return -EINVAL;
+ if (!args)
+ return 0;
+
args->fwnode = software_node_get(refnode);
args->nargs = nargs;
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 3bb7beb127a9..88336f093dec 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -84,7 +84,7 @@ test_platform_device_register_node(char *name, int id, int nid)
pdev = platform_device_alloc(name, id);
if (!pdev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (nid != NUMA_NO_NODE)
set_dev_node(&pdev->dev, nid);
@@ -146,7 +146,7 @@ static int __init test_async_probe_init(void)
calltime = ktime_get();
for_each_online_cpu(cpu) {
nid = cpu_to_node(cpu);
- pdev = &sync_dev[sync_id];
+ pdev = &async_dev[async_id];
*pdev = test_platform_device_register_node("test_async_driver",
async_id,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 8a08cbb958d1..c867211f5ed7 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -299,15 +299,6 @@ config BLK_DEV_SKD
Use device /dev/skd$N amd /dev/skd$Np$M.
-config BLK_DEV_SX8
- tristate "Promise SATA SX8 support"
- depends on PCI
- ---help---
- Saying Y or M here will enable support for the
- Promise SATA SX8 controllers.
-
- Use devices /dev/sx8/$N and /dev/sx8/$Np$M.
-
config BLK_DEV_RAM
tristate "RAM block device support"
---help---
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a53cc1e3a2d3..d8f2f72c3b62 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -26,8 +26,6 @@ obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
-obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
-
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 3cf9bc5d8d95..3d5117be57f9 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -420,13 +420,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
rcu_read_lock();
for_each_netdev_rcu(&init_net, ifp) {
dev_hold(ifp);
- if (!is_aoe_netif(ifp))
- goto cont;
+ if (!is_aoe_netif(ifp)) {
+ dev_put(ifp);
+ continue;
+ }
skb = new_skb(sizeof *h + sizeof *ch);
if (skb == NULL) {
printk(KERN_INFO "aoe: skb alloc failure\n");
- goto cont;
+ dev_put(ifp);
+ continue;
}
skb_put(skb, sizeof *h + sizeof *ch);
skb->dev = ifp;
@@ -441,9 +444,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
h->major = cpu_to_be16(aoemajor);
h->minor = aoeminor;
h->cmd = AOECMD_CFG;
-
-cont:
- dev_put(ifp);
}
rcu_read_unlock();
}
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 63773a90581d..1e66c7a188a1 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -64,6 +64,7 @@ tx(int id) __must_hold(&txlock)
pr_warn("aoe: packet could not be sent on %s. %s\n",
ifp ? ifp->name : "netif",
"consider increasing tx_queue_len");
+ dev_put(ifp);
spin_lock_irq(&txlock);
}
return 0;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 5ece2fd70d9c..f3a96c76f5a4 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2778,7 +2778,7 @@ static int init_submitter(struct drbd_device *device)
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
{
struct drbd_resource *resource = adm_ctx->resource;
- struct drbd_connection *connection;
+ struct drbd_connection *connection, *n;
struct drbd_device *device;
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
@@ -2906,7 +2906,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
- for_each_connection(connection, resource) {
+ for_each_connection_safe(connection, n, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
if (peer_device)
kref_put(&connection->kref, drbd_destroy_connection);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 52dd517ff541..43c142ea364b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -3426,7 +3426,7 @@ int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource;
- struct drbd_device *uninitialized_var(device);
+ struct drbd_device *device;
int minor, err, retcode;
struct drbd_genlmsghdr *dh;
struct device_info device_info;
@@ -3515,7 +3515,7 @@ int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource = NULL, *next_resource;
- struct drbd_connection *uninitialized_var(connection);
+ struct drbd_connection *connection;
int err = 0, retcode;
struct drbd_genlmsghdr *dh;
struct connection_info connection_info;
@@ -3677,7 +3677,7 @@ int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource;
- struct drbd_device *uninitialized_var(device);
+ struct drbd_device *device;
struct drbd_peer_device *peer_device = NULL;
int minor, err, retcode;
struct drbd_genlmsghdr *dh;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 2b3103c30857..d94f41a0abbe 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1298,7 +1298,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
- bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ad9b767ec40d..dffbcc5c4ccf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1415,6 +1415,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
info->lo_number = lo->lo_number;
info->lo_offset = lo->lo_offset;
info->lo_sizelimit = lo->lo_sizelimit;
+
+ /* loff_t vars have been assigned __u64 */
+ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+ return -EOVERFLOW;
+
info->lo_flags = lo->lo_flags;
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
@@ -2050,7 +2055,7 @@ static int loop_add(struct loop_device **l, int i)
lo->tag_set.queue_depth = 128;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 09323b0510f0..f2d847ffcbc7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1327,10 +1327,12 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
- if (ret)
+ if (ret) {
sock_shutdown(nbd);
- flush_workqueue(nbd->recv_workq);
+ nbd_clear_que(nbd);
+ }
+ flush_workqueue(nbd->recv_workq);
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */
@@ -1607,7 +1609,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
return -EIO;
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
- if (!dir) {
+ if (IS_ERR(dir)) {
dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
nbd_name(nbd));
return -EIO;
@@ -1633,7 +1635,7 @@ static int nbd_dbg_init(void)
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
- if (!dbg_dir)
+ if (IS_ERR(dbg_dir))
return -EIO;
nbd_dbg_dir = dbg_dir;
@@ -1706,7 +1708,8 @@ static int nbd_dev_add(int index)
if (err == -ENOSPC)
err = -EEXIST;
} else {
- err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
+ err = idr_alloc(&nbd_index_idr, nbd, 0,
+ (MINORMASK >> part_shift) + 1, GFP_KERNEL);
if (err >= 0)
index = err;
}
@@ -2310,6 +2313,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
}
dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
+ if (!dev_list) {
+ nlmsg_free(reply);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
if (index == -1) {
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
if (ret) {
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 13eae973eaea..6cbdd8a691d2 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1711,8 +1711,13 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
- dev->index = nullb->index;
+ rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ if (rv < 0) {
+ mutex_unlock(&lock);
+ goto out_cleanup_zone;
+ }
+ nullb->index = rv;
+ dev->index = rv;
mutex_unlock(&lock);
blk_queue_logical_block_size(nullb->q, dev->blocksize);
@@ -1724,13 +1729,16 @@ static int null_add_dev(struct nullb_device *dev)
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_zone;
+ goto out_ida_free;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
return 0;
+
+out_ida_free:
+ ida_free(&nullb_indexes, nullb->index);
out_cleanup_zone:
if (dev->zoned)
null_zone_exit(dev);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 55745d6953f0..fd0cdced8024 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -626,9 +626,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
-static int rbd_dev_header_info(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -1098,15 +1097,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
}
+static void rbd_image_header_cleanup(struct rbd_image_header *header)
+{
+ kfree(header->object_prefix);
+ ceph_put_snap_context(header->snapc);
+ kfree(header->snap_sizes);
+ kfree(header->snap_names);
+
+ memset(header, 0, sizeof(*header));
+}
+
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
-static int rbd_header_from_disk(struct rbd_device *rbd_dev,
- struct rbd_image_header_ondisk *ondisk)
+static int rbd_header_from_disk(struct rbd_image_header *header,
+ struct rbd_image_header_ondisk *ondisk,
+ bool first_time)
{
- struct rbd_image_header *header = &rbd_dev->header;
- bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
@@ -1173,11 +1181,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
- rbd_init_layout(rbd_dev);
- } else {
- ceph_put_snap_context(header->snapc);
- kfree(header->snap_names);
- kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
@@ -1493,14 +1496,30 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
/*
* Must be called after rbd_obj_calc_img_extents().
*/
-static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
+static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
{
- if (!obj_req->num_img_extents ||
- (rbd_obj_is_entire(obj_req) &&
- !obj_req->img_request->snapc->num_snaps))
- return false;
+ rbd_assert(obj_req->img_request->snapc);
- return true;
+ if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
+ dout("%s %p objno %llu discard\n", __func__, obj_req,
+ obj_req->ex.oe_objno);
+ return;
+ }
+
+ if (!obj_req->num_img_extents) {
+ dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
+ obj_req->ex.oe_objno);
+ return;
+ }
+
+ if (rbd_obj_is_entire(obj_req) &&
+ !obj_req->img_request->snapc->num_snaps) {
+ dout("%s %p objno %llu entire\n", __func__, obj_req,
+ obj_req->ex.oe_objno);
+ return;
+ }
+
+ obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
}
static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
@@ -1599,6 +1618,7 @@ __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
static struct ceph_osd_request *
rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
{
+ rbd_assert(obj_req->img_request->snapc);
return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
num_ops);
}
@@ -1727,11 +1747,14 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
* Caller is responsible for filling in the list of object requests
* that comprises the image request, and the Linux request pointer
* (if there is one).
+ *
+ * Only snap_id is captured here, for reads. For writes, snapshot
+ * context is captured in rbd_img_object_requests() after exclusive
+ * lock is ensured to be held.
*/
static struct rbd_img_request *rbd_img_request_create(
struct rbd_device *rbd_dev,
- enum obj_operation_type op_type,
- struct ceph_snap_context *snapc)
+ enum obj_operation_type op_type)
{
struct rbd_img_request *img_request;
@@ -1743,8 +1766,6 @@ static struct rbd_img_request *rbd_img_request_create(
img_request->op_type = op_type;
if (!rbd_img_is_write(img_request))
img_request->snap_id = rbd_dev->spec->snap_id;
- else
- img_request->snapc = snapc;
if (rbd_dev_parent_get(rbd_dev))
img_request_layered_set(img_request);
@@ -2087,7 +2108,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_data *osd_data;
u64 objno;
- u8 state, new_state, uninitialized_var(current_state);
+ u8 state, new_state, current_state;
bool has_current_state;
void *p;
@@ -2389,9 +2410,6 @@ static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
if (ret)
return ret;
- if (rbd_obj_copyup_enabled(obj_req))
- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
-
obj_req->write_state = RBD_OBJ_WRITE_START;
return 0;
}
@@ -2497,8 +2515,6 @@ static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
if (ret)
return ret;
- if (rbd_obj_copyup_enabled(obj_req))
- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
if (!obj_req->num_img_extents) {
obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
if (rbd_obj_is_entire(obj_req))
@@ -2935,7 +2951,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
int ret;
child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
- OBJ_OP_READ, NULL);
+ OBJ_OP_READ);
if (!child_img_req)
return -ENOMEM;
@@ -3439,6 +3455,7 @@ again:
case RBD_OBJ_WRITE_START:
rbd_assert(!*result);
+ rbd_obj_set_copyup_enabled(obj_req);
if (rbd_obj_write_is_noop(obj_req))
return true;
@@ -3586,14 +3603,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
static void rbd_lock_del_request(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
- bool need_wakeup;
+ bool need_wakeup = false;
lockdep_assert_held(&rbd_dev->lock_rwsem);
spin_lock(&rbd_dev->lock_lists_lock);
- rbd_assert(!list_empty(&img_req->lock_item));
- list_del_init(&img_req->lock_item);
- need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
- list_empty(&rbd_dev->running_list));
+ if (!list_empty(&img_req->lock_item)) {
+ list_del_init(&img_req->lock_item);
+ need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+ list_empty(&rbd_dev->running_list));
+ }
spin_unlock(&rbd_dev->lock_lists_lock);
if (need_wakeup)
complete(&rbd_dev->releasing_wait);
@@ -3625,9 +3643,19 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
static void rbd_img_object_requests(struct rbd_img_request *img_req)
{
+ struct rbd_device *rbd_dev = img_req->rbd_dev;
struct rbd_obj_request *obj_req;
rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
+ rbd_assert(!need_exclusive_lock(img_req) ||
+ __rbd_is_lock_owner(rbd_dev));
+
+ if (rbd_img_is_write(img_req)) {
+ rbd_assert(!img_req->snapc);
+ down_read(&rbd_dev->header_rwsem);
+ img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ up_read(&rbd_dev->header_rwsem);
+ }
for_each_obj_request(img_req, obj_req) {
int result = 0;
@@ -3645,7 +3673,6 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req)
static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
{
- struct rbd_device *rbd_dev = img_req->rbd_dev;
int ret;
again:
@@ -3666,9 +3693,6 @@ again:
if (*result)
return true;
- rbd_assert(!need_exclusive_lock(img_req) ||
- __rbd_is_lock_owner(rbd_dev));
-
rbd_img_object_requests(img_req);
if (!img_req->pending.num_pending) {
*result = img_req->pending.result;
@@ -3974,14 +3998,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
return;
}
- list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
+ while (!list_empty(&rbd_dev->acquiring_list)) {
+ img_req = list_first_entry(&rbd_dev->acquiring_list,
+ struct rbd_img_request, lock_item);
mutex_lock(&img_req->state_mutex);
rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
+ if (!result)
+ list_move_tail(&img_req->lock_item,
+ &rbd_dev->running_list);
+ else
+ list_del_init(&img_req->lock_item);
rbd_img_schedule(img_req, result);
mutex_unlock(&img_req->state_mutex);
}
-
- list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
static int get_lock_owner_info(struct rbd_device *rbd_dev,
@@ -4130,6 +4159,10 @@ static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
{
int ret;
+ ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ return ret;
+
if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
ret = rbd_object_map_open(rbd_dev);
if (ret)
@@ -4788,7 +4821,6 @@ static void rbd_queue_workfn(struct work_struct *work)
struct request *rq = blk_mq_rq_from_pdu(work);
struct rbd_device *rbd_dev = rq->q->queuedata;
struct rbd_img_request *img_request;
- struct ceph_snap_context *snapc = NULL;
u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
u64 length = blk_rq_bytes(rq);
enum obj_operation_type op_type;
@@ -4853,10 +4885,6 @@ static void rbd_queue_workfn(struct work_struct *work)
down_read(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
- if (op_type != OBJ_OP_READ) {
- snapc = rbd_dev->header.snapc;
- ceph_get_snap_context(snapc);
- }
up_read(&rbd_dev->header_rwsem);
if (offset + length > mapping_size) {
@@ -4866,13 +4894,12 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
+ img_request = rbd_img_request_create(rbd_dev, op_type);
if (!img_request) {
result = -ENOMEM;
goto err_rq;
}
img_request->rq = rq;
- snapc = NULL; /* img_request consumes a ref */
dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
img_request, obj_op_name(op_type), offset, length);
@@ -4894,7 +4921,6 @@ err_rq:
if (result)
rbd_warn(rbd_dev, "%s %llx at %llx result %d",
obj_op_name(op_type), length, offset, result);
- ceph_put_snap_context(snapc);
err:
blk_mq_end_request(rq, errno_to_blk_status(result));
}
@@ -4966,7 +4992,9 @@ out_req:
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
-static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
@@ -5014,7 +5042,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
- ret = rbd_header_from_disk(rbd_dev, ondisk);
+ ret = rbd_header_from_disk(header, ondisk, first_time);
out:
kfree(ondisk);
@@ -5058,43 +5086,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
}
}
-static int rbd_dev_refresh(struct rbd_device *rbd_dev)
-{
- u64 mapping_size;
- int ret;
-
- down_write(&rbd_dev->header_rwsem);
- mapping_size = rbd_dev->mapping.size;
-
- ret = rbd_dev_header_info(rbd_dev);
- if (ret)
- goto out;
-
- /*
- * If there is a parent, see if it has disappeared due to the
- * mapped image getting flattened.
- */
- if (rbd_dev->parent) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
- if (ret)
- goto out;
- }
-
- if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
- rbd_dev->mapping.size = rbd_dev->header.image_size;
- } else {
- /* validate mapped snapshot's EXISTS flag */
- rbd_exists_validate(rbd_dev);
- }
-
-out:
- up_write(&rbd_dev->header_rwsem);
- if (!ret && mapping_size != rbd_dev->mapping.size)
- rbd_dev_update_size(rbd_dev);
-
- return ret;
-}
-
static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
@@ -5529,8 +5520,7 @@ static void rbd_dev_release(struct device *dev)
module_put(THIS_MODULE);
}
-static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
- struct rbd_spec *spec)
+static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
@@ -5575,9 +5565,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->dev.parent = &rbd_root_dev;
device_initialize(&rbd_dev->dev);
- rbd_dev->rbd_client = rbdc;
- rbd_dev->spec = spec;
-
return rbd_dev;
}
@@ -5590,12 +5577,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
{
struct rbd_device *rbd_dev;
- rbd_dev = __rbd_dev_create(rbdc, spec);
+ rbd_dev = __rbd_dev_create(spec);
if (!rbd_dev)
return NULL;
- rbd_dev->opts = opts;
-
/* get an id and fill in device name */
rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
minor_to_rbd_dev_id(1 << MINORBITS),
@@ -5612,6 +5597,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
/* we have a ref from do_rbd_add() */
__module_get(THIS_MODULE);
+ rbd_dev->rbd_client = rbdc;
+ rbd_dev->spec = spec;
+ rbd_dev->opts = opts;
+
dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
return rbd_dev;
@@ -5666,17 +5655,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
-static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
-{
- return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
- &rbd_dev->header.obj_order,
- &rbd_dev->header.image_size);
-}
-
-static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
+ char **pobject_prefix)
{
size_t size;
void *reply_buf;
+ char *object_prefix;
int ret;
void *p;
@@ -5694,16 +5678,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
goto out;
p = reply_buf;
- rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
- p + ret, NULL, GFP_NOIO);
+ object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
+ GFP_NOIO);
+ if (IS_ERR(object_prefix)) {
+ ret = PTR_ERR(object_prefix);
+ goto out;
+ }
ret = 0;
- if (IS_ERR(rbd_dev->header.object_prefix)) {
- ret = PTR_ERR(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- } else {
- dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
- }
+ *pobject_prefix = object_prefix;
+ dout(" object_prefix = %s\n", object_prefix);
out:
kfree(reply_buf);
@@ -5748,12 +5732,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
-static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
-{
- return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
- &rbd_dev->header.features);
-}
-
/*
* These are generic image flags, but since they are used only for
* object map, store them in rbd_dev->object_map_flags.
@@ -5790,6 +5768,14 @@ struct parent_image_info {
u64 overlap;
};
+static void rbd_parent_info_cleanup(struct parent_image_info *pii)
+{
+ kfree(pii->pool_ns);
+ kfree(pii->image_id);
+
+ memset(pii, 0, sizeof(*pii));
+}
+
/*
* The caller is responsible for @pii.
*/
@@ -5859,6 +5845,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
if (pii->has_overlap)
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
+ pii->has_overlap, pii->overlap);
return 0;
e_inval:
@@ -5897,14 +5886,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
pii->has_overlap = true;
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
+ pii->has_overlap, pii->overlap);
return 0;
e_inval:
return -EINVAL;
}
-static int get_parent_info(struct rbd_device *rbd_dev,
- struct parent_image_info *pii)
+static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
{
struct page *req_page, *reply_page;
void *p;
@@ -5932,7 +5924,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
return ret;
}
-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
struct parent_image_info pii = { 0 };
@@ -5942,37 +5934,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
if (!parent_spec)
return -ENOMEM;
- ret = get_parent_info(rbd_dev, &pii);
+ ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out_err;
- dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
- __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
- pii.has_overlap, pii.overlap);
-
- if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
- /*
- * Either the parent never existed, or we have
- * record of it but the image got flattened so it no
- * longer has a parent. When the parent of a
- * layered image disappears we immediately set the
- * overlap to 0. The effect of this is that all new
- * requests will be treated as if the image had no
- * parent.
- *
- * If !pii.has_overlap, the parent image spec is not
- * applicable. It's there to avoid duplication in each
- * snapshot record.
- */
- if (rbd_dev->parent_overlap) {
- rbd_dev->parent_overlap = 0;
- rbd_dev_parent_put(rbd_dev);
- pr_info("%s: clone image has been flattened\n",
- rbd_dev->disk->disk_name);
- }
-
+ if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
goto out; /* No parent? No problem. */
- }
/* The ceph file layout needs to fit pool id in 32 bits */
@@ -5984,58 +5951,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
/*
- * The parent won't change (except when the clone is
- * flattened, already handled that). So we only need to
- * record the parent spec we have not already done so.
+ * The parent won't change except when the clone is flattened,
+ * so we only need to record the parent image spec once.
*/
- if (!rbd_dev->parent_spec) {
- parent_spec->pool_id = pii.pool_id;
- if (pii.pool_ns && *pii.pool_ns) {
- parent_spec->pool_ns = pii.pool_ns;
- pii.pool_ns = NULL;
- }
- parent_spec->image_id = pii.image_id;
- pii.image_id = NULL;
- parent_spec->snap_id = pii.snap_id;
-
- rbd_dev->parent_spec = parent_spec;
- parent_spec = NULL; /* rbd_dev now owns this */
+ parent_spec->pool_id = pii.pool_id;
+ if (pii.pool_ns && *pii.pool_ns) {
+ parent_spec->pool_ns = pii.pool_ns;
+ pii.pool_ns = NULL;
}
+ parent_spec->image_id = pii.image_id;
+ pii.image_id = NULL;
+ parent_spec->snap_id = pii.snap_id;
+
+ rbd_assert(!rbd_dev->parent_spec);
+ rbd_dev->parent_spec = parent_spec;
+ parent_spec = NULL; /* rbd_dev now owns this */
/*
- * We always update the parent overlap. If it's zero we issue
- * a warning, as we will proceed as if there was no parent.
+ * Record the parent overlap. If it's zero, issue a warning as
+ * we will proceed as if there is no parent.
*/
- if (!pii.overlap) {
- if (parent_spec) {
- /* refresh, careful to warn just once */
- if (rbd_dev->parent_overlap)
- rbd_warn(rbd_dev,
- "clone now standalone (overlap became 0)");
- } else {
- /* initial probe */
- rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
- }
- }
+ if (!pii.overlap)
+ rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
- kfree(pii.pool_ns);
- kfree(pii.image_id);
+ rbd_parent_info_cleanup(&pii);
rbd_spec_put(parent_spec);
return ret;
}
-static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
+ u64 *stripe_unit, u64 *stripe_count)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
- void *p;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
@@ -6047,27 +6002,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
if (ret < size)
return -ERANGE;
- p = &striping_info_buf;
- rbd_dev->header.stripe_unit = ceph_decode_64(&p);
- rbd_dev->header.stripe_count = ceph_decode_64(&p);
+ *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
+ *stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
+ dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
+ *stripe_count);
+
return 0;
}
-static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
{
- __le64 data_pool_id;
+ __le64 data_pool_buf;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_data_pool",
- NULL, 0, &data_pool_id, sizeof(data_pool_id));
+ NULL, 0, &data_pool_buf,
+ sizeof(data_pool_buf));
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
- if (ret < sizeof(data_pool_id))
+ if (ret < sizeof(data_pool_buf))
return -EBADMSG;
- rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
- WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
+ *data_pool_id = le64_to_cpu(data_pool_buf);
+ dout(" data_pool_id = %lld\n", *data_pool_id);
+ WARN_ON(*data_pool_id == CEPH_NOPOOL);
+
return 0;
}
@@ -6259,7 +6220,8 @@ out_err:
return ret;
}
-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
+ struct ceph_snap_context **psnapc)
{
size_t size;
int ret;
@@ -6320,9 +6282,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
- ceph_put_snap_context(rbd_dev->header.snapc);
- rbd_dev->header.snapc = snapc;
-
+ *psnapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
@@ -6371,38 +6331,42 @@ out:
return snap_name;
}
-static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
- bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
- ret = rbd_dev_v2_image_size(rbd_dev);
+ ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
+ first_time ? &header->obj_order : NULL,
+ &header->image_size);
if (ret)
return ret;
if (first_time) {
- ret = rbd_dev_v2_header_onetime(rbd_dev);
+ ret = rbd_dev_v2_header_onetime(rbd_dev, header);
if (ret)
return ret;
}
- ret = rbd_dev_v2_snap_context(rbd_dev);
- if (ret && first_time) {
- kfree(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- }
+ ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-static int rbd_dev_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_assert(!header->object_prefix && !header->snapc);
if (rbd_dev->image_format == 1)
- return rbd_dev_v1_header_info(rbd_dev);
+ return rbd_dev_v1_header_info(rbd_dev, header, first_time);
- return rbd_dev_v2_header_info(rbd_dev);
+ return rbd_dev_v2_header_info(rbd_dev, header, first_time);
}
/*
@@ -6752,60 +6716,49 @@ out:
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
- struct rbd_image_header *header;
-
rbd_dev_parent_put(rbd_dev);
rbd_object_map_free(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
- header = &rbd_dev->header;
- ceph_put_snap_context(header->snapc);
- kfree(header->snap_sizes);
- kfree(header->snap_names);
- kfree(header->object_prefix);
- memset(header, 0, sizeof (*header));
+ rbd_image_header_cleanup(&rbd_dev->header);
}
-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
{
int ret;
- ret = rbd_dev_v2_object_prefix(rbd_dev);
+ ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
if (ret)
- goto out_err;
+ return ret;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
- ret = rbd_dev_v2_features(rbd_dev);
+ ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
+ &header->features);
if (ret)
- goto out_err;
+ return ret;
/* If the image supports fancy striping, get its parameters */
- if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
- ret = rbd_dev_v2_striping_info(rbd_dev);
- if (ret < 0)
- goto out_err;
+ if (header->features & RBD_FEATURE_STRIPINGV2) {
+ ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
+ &header->stripe_count);
+ if (ret)
+ return ret;
}
- if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
- ret = rbd_dev_v2_data_pool(rbd_dev);
+ if (header->features & RBD_FEATURE_DATA_POOL) {
+ ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
if (ret)
- goto out_err;
+ return ret;
}
- rbd_init_layout(rbd_dev);
return 0;
-
-out_err:
- rbd_dev->header.features = 0;
- kfree(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- return ret;
}
/*
@@ -6827,7 +6780,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
goto out_err;
}
- parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
+ parent = __rbd_dev_create(rbd_dev->parent_spec);
if (!parent) {
ret = -ENOMEM;
goto out_err;
@@ -6837,8 +6790,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
* Images related by parent/child relationships always share
* rbd_client and spec/parent_spec, so bump their refcounts.
*/
- __rbd_get_client(rbd_dev->rbd_client);
- rbd_spec_get(rbd_dev->parent_spec);
+ parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
+ parent->spec = rbd_spec_get(rbd_dev->parent_spec);
ret = rbd_dev_image_probe(parent, depth);
if (ret < 0)
@@ -6983,10 +6936,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
if (!depth)
down_write(&rbd_dev->header_rwsem);
- ret = rbd_dev_header_info(rbd_dev);
+ ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
if (ret)
goto err_out_probe;
+ rbd_init_layout(rbd_dev);
+
/*
* If this image is the one being mapped, we have pool name and
* id, image name and id, and snap name - need to fill snap id.
@@ -7020,7 +6975,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
+ ret = rbd_dev_setup_parent(rbd_dev);
if (ret)
goto err_out_probe;
}
@@ -7046,6 +7001,112 @@ err_out_format:
return ret;
}
+static void rbd_dev_update_header(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
+{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
+
+ if (rbd_dev->header.image_size != header->image_size) {
+ rbd_dev->header.image_size = header->image_size;
+
+ if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
+ rbd_dev->mapping.size = header->image_size;
+ rbd_dev_update_size(rbd_dev);
+ }
+ }
+
+ if (rbd_dev->spec->snap_id != CEPH_NOSNAP) {
+ /* validate mapped snapshot's EXISTS flag */
+ rbd_exists_validate(rbd_dev);
+ }
+
+ ceph_put_snap_context(rbd_dev->header.snapc);
+ rbd_dev->header.snapc = header->snapc;
+ header->snapc = NULL;
+
+ if (rbd_dev->image_format == 1) {
+ kfree(rbd_dev->header.snap_names);
+ rbd_dev->header.snap_names = header->snap_names;
+ header->snap_names = NULL;
+
+ kfree(rbd_dev->header.snap_sizes);
+ rbd_dev->header.snap_sizes = header->snap_sizes;
+ header->snap_sizes = NULL;
+ }
+}
+
+static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
+{
+ if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
+ /*
+ * Either the parent never existed, or we have
+ * record of it but the image got flattened so it no
+ * longer has a parent. When the parent of a
+ * layered image disappears we immediately set the
+ * overlap to 0. The effect of this is that all new
+ * requests will be treated as if the image had no
+ * parent.
+ *
+ * If !pii.has_overlap, the parent image spec is not
+ * applicable. It's there to avoid duplication in each
+ * snapshot record.
+ */
+ if (rbd_dev->parent_overlap) {
+ rbd_dev->parent_overlap = 0;
+ rbd_dev_parent_put(rbd_dev);
+ pr_info("%s: clone has been flattened\n",
+ rbd_dev->disk->disk_name);
+ }
+ } else {
+ rbd_assert(rbd_dev->parent_spec);
+
+ /*
+ * Update the parent overlap. If it became zero, issue
+ * a warning as we will proceed as if there is no parent.
+ */
+ if (!pii->overlap && rbd_dev->parent_overlap)
+ rbd_warn(rbd_dev,
+ "clone has become standalone (overlap 0)");
+ rbd_dev->parent_overlap = pii->overlap;
+ }
+}
+
+static int rbd_dev_refresh(struct rbd_device *rbd_dev)
+{
+ struct rbd_image_header header = { 0 };
+ struct parent_image_info pii = { 0 };
+ int ret;
+
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+ ret = rbd_dev_header_info(rbd_dev, &header, false);
+ if (ret)
+ goto out;
+
+ /*
+ * If there is a parent, see if it has disappeared due to the
+ * mapped image getting flattened.
+ */
+ if (rbd_dev->parent) {
+ ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
+ if (ret)
+ goto out;
+ }
+
+ down_write(&rbd_dev->header_rwsem);
+ rbd_dev_update_header(rbd_dev, &header);
+ if (rbd_dev->parent)
+ rbd_dev_update_parent(rbd_dev, &pii);
+ up_write(&rbd_dev->header_rwsem);
+
+out:
+ rbd_parent_info_cleanup(&pii);
+ rbd_image_header_cleanup(&header);
+ return ret;
+}
+
static ssize_t do_rbd_add(struct bus_type *bus,
const char *buf,
size_t count)
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6b2fd630de85..6622dd1aa07b 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -983,6 +983,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
print_version();
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
err = -ENODEV;
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
deleted file mode 100644
index 4478eb7efee0..000000000000
--- a/drivers/block/sx8.c
+++ /dev/null
@@ -1,1586 +0,0 @@
-/*
- * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
- *
- * Copyright 2004-2005 Red Hat, Inc.
- *
- * Author/maintainer: Jeff Garzik <jgarzik@pobox.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/blk-mq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/compiler.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/ktime.h>
-#include <linux/hdreg.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-#include <linux/scatterlist.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#if 0
-#define CARM_DEBUG
-#define CARM_VERBOSE_DEBUG
-#else
-#undef CARM_DEBUG
-#undef CARM_VERBOSE_DEBUG
-#endif
-#undef CARM_NDEBUG
-
-#define DRV_NAME "sx8"
-#define DRV_VERSION "1.0"
-#define PFX DRV_NAME ": "
-
-MODULE_AUTHOR("Jeff Garzik");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Promise SATA SX8 block driver");
-MODULE_VERSION(DRV_VERSION);
-
-/*
- * SX8 hardware has a single message queue for all ATA ports.
- * When this driver was written, the hardware (firmware?) would
- * corrupt data eventually, if more than one request was outstanding.
- * As one can imagine, having 8 ports bottlenecking on a single
- * command hurts performance.
- *
- * Based on user reports, later versions of the hardware (firmware?)
- * seem to be able to survive with more than one command queued.
- *
- * Therefore, we default to the safe option -- 1 command -- but
- * allow the user to increase this.
- *
- * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
- * but problems seem to occur when you exceed ~30, even on newer hardware.
- */
-static int max_queue = 1;
-module_param(max_queue, int, 0444);
-MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
-
-
-#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN)
-
-/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
-#define TAG_ENCODE(tag) (((tag) << 16) | 0xf)
-#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f)
-#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
-
-/* note: prints function name for you */
-#ifdef CARM_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef CARM_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* CARM_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* CARM_DEBUG */
-
-#ifdef CARM_NDEBUG
-#define assert(expr)
-#else
-#define assert(expr) \
- if(unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#endif
-
-/* defines only for the constants which don't work well as enums */
-struct carm_host;
-
-enum {
- /* adapter-wide limits */
- CARM_MAX_PORTS = 8,
- CARM_SHM_SIZE = (4096 << 7),
- CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS,
- CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1,
-
- /* command message queue limits */
- CARM_MAX_REQ = 64, /* max command msgs per host */
- CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */
-
- /* S/G limits, host-wide and per-request */
- CARM_MAX_REQ_SG = 32, /* max s/g entries per request */
- CARM_MAX_HOST_SG = 600, /* max s/g entries per host */
- CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */
-
- /* hardware registers */
- CARM_IHQP = 0x1c,
- CARM_INT_STAT = 0x10, /* interrupt status */
- CARM_INT_MASK = 0x14, /* interrupt mask */
- CARM_HMUC = 0x18, /* host message unit control */
- RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */
- RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */
- RBUF_BYTE_SZ = 0x28,
- CARM_RESP_IDX = 0x2c,
- CARM_CMS0 = 0x30, /* command message size reg 0 */
- CARM_LMUC = 0x48,
- CARM_HMPHA = 0x6c,
- CARM_INITC = 0xb5,
-
- /* bits in CARM_INT_{STAT,MASK} */
- INT_RESERVED = 0xfffffff0,
- INT_WATCHDOG = (1 << 3), /* watchdog timer */
- INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */
- INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */
- INT_RESPONSE = (1 << 0), /* response msg available */
- INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW,
- INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW |
- INT_RESPONSE,
-
- /* command messages, and related register bits */
- CARM_HAVE_RESP = 0x01,
- CARM_MSG_READ = 1,
- CARM_MSG_WRITE = 2,
- CARM_MSG_VERIFY = 3,
- CARM_MSG_GET_CAPACITY = 4,
- CARM_MSG_FLUSH = 5,
- CARM_MSG_IOCTL = 6,
- CARM_MSG_ARRAY = 8,
- CARM_MSG_MISC = 9,
- CARM_CME = (1 << 2),
- CARM_RME = (1 << 1),
- CARM_WZBC = (1 << 0),
- CARM_RMI = (1 << 0),
- CARM_Q_FULL = (1 << 3),
- CARM_MSG_SIZE = 288,
- CARM_Q_LEN = 48,
-
- /* CARM_MSG_IOCTL messages */
- CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */
- CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */
- CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */
-
- IOC_SCAN_CHAN_NODEV = 0x1f,
- IOC_SCAN_CHAN_OFFSET = 0x40,
-
- /* CARM_MSG_ARRAY messages */
- CARM_ARRAY_INFO = 0,
-
- ARRAY_NO_EXIST = (1 << 31),
-
- /* response messages */
- RMSG_SZ = 8, /* sizeof(struct carm_response) */
- RMSG_Q_LEN = 48, /* resp. msg list length */
- RMSG_OK = 1, /* bit indicating msg was successful */
- /* length of entire resp. msg buffer */
- RBUF_LEN = RMSG_SZ * RMSG_Q_LEN,
-
- PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */
-
- /* CARM_MSG_MISC messages */
- MISC_GET_FW_VER = 2,
- MISC_ALLOC_MEM = 3,
- MISC_SET_TIME = 5,
-
- /* MISC_GET_FW_VER feature bits */
- FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */
- FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */
- FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */
-
- /* carm_host flags */
- FL_NON_RAID = FW_VER_NON_RAID,
- FL_4PORT = FW_VER_4PORT,
- FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
- FL_DYN_MAJOR = (1 << 17),
-};
-
-enum {
- CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */
-};
-
-enum scatter_gather_types {
- SGT_32BIT = 0,
- SGT_64BIT = 1,
-};
-
-enum host_states {
- HST_INVALID, /* invalid state; never used */
- HST_ALLOC_BUF, /* setting up master SHM area */
- HST_ERROR, /* we never leave here */
- HST_PORT_SCAN, /* start dev scan */
- HST_DEV_SCAN_START, /* start per-device probe */
- HST_DEV_SCAN, /* continue per-device probe */
- HST_DEV_ACTIVATE, /* activate devices we found */
- HST_PROBE_FINISHED, /* probe is complete */
- HST_PROBE_START, /* initiate probe */
- HST_SYNC_TIME, /* tell firmware what time it is */
- HST_GET_FW_VER, /* get firmware version, adapter port cnt */
-};
-
-#ifdef CARM_DEBUG
-static const char *state_name[] = {
- "HST_INVALID",
- "HST_ALLOC_BUF",
- "HST_ERROR",
- "HST_PORT_SCAN",
- "HST_DEV_SCAN_START",
- "HST_DEV_SCAN",
- "HST_DEV_ACTIVATE",
- "HST_PROBE_FINISHED",
- "HST_PROBE_START",
- "HST_SYNC_TIME",
- "HST_GET_FW_VER",
-};
-#endif
-
-struct carm_port {
- unsigned int port_no;
- struct gendisk *disk;
- struct carm_host *host;
-
- /* attached device characteristics */
- u64 capacity;
- char name[41];
- u16 dev_geom_head;
- u16 dev_geom_sect;
- u16 dev_geom_cyl;
-};
-
-struct carm_request {
- int n_elem;
- unsigned int msg_type;
- unsigned int msg_subtype;
- unsigned int msg_bucket;
- struct scatterlist sg[CARM_MAX_REQ_SG];
-};
-
-struct carm_host {
- unsigned long flags;
- void __iomem *mmio;
- void *shm;
- dma_addr_t shm_dma;
-
- int major;
- int id;
- char name[32];
-
- spinlock_t lock;
- struct pci_dev *pdev;
- unsigned int state;
- u32 fw_ver;
-
- struct blk_mq_tag_set tag_set;
- struct request_queue *oob_q;
- unsigned int n_oob;
-
- unsigned int hw_sg_used;
-
- unsigned int resp_idx;
-
- unsigned int wait_q_prod;
- unsigned int wait_q_cons;
- struct request_queue *wait_q[CARM_MAX_WAIT_Q];
-
- void *msg_base;
- dma_addr_t msg_dma;
-
- int cur_scan_dev;
- unsigned long dev_active;
- unsigned long dev_present;
- struct carm_port port[CARM_MAX_PORTS];
-
- struct work_struct fsm_task;
-
- struct completion probe_comp;
-};
-
-struct carm_response {
- __le32 ret_handle;
- __le32 status;
-} __attribute__((packed));
-
-struct carm_msg_sg {
- __le32 start;
- __le32 len;
-} __attribute__((packed));
-
-struct carm_msg_rw {
- u8 type;
- u8 id;
- u8 sg_count;
- u8 sg_type;
- __le32 handle;
- __le32 lba;
- __le16 lba_count;
- __le16 lba_high;
- struct carm_msg_sg sg[32];
-} __attribute__((packed));
-
-struct carm_msg_allocbuf {
- u8 type;
- u8 subtype;
- u8 n_sg;
- u8 sg_type;
- __le32 handle;
- __le32 addr;
- __le32 len;
- __le32 evt_pool;
- __le32 n_evt;
- __le32 rbuf_pool;
- __le32 n_rbuf;
- __le32 msg_pool;
- __le32 n_msg;
- struct carm_msg_sg sg[8];
-} __attribute__((packed));
-
-struct carm_msg_ioctl {
- u8 type;
- u8 subtype;
- u8 array_id;
- u8 reserved1;
- __le32 handle;
- __le32 data_addr;
- u32 reserved2;
-} __attribute__((packed));
-
-struct carm_msg_sync_time {
- u8 type;
- u8 subtype;
- u16 reserved1;
- __le32 handle;
- u32 reserved2;
- __le32 timestamp;
-} __attribute__((packed));
-
-struct carm_msg_get_fw_ver {
- u8 type;
- u8 subtype;
- u16 reserved1;
- __le32 handle;
- __le32 data_addr;
- u32 reserved2;
-} __attribute__((packed));
-
-struct carm_fw_ver {
- __le32 version;
- u8 features;
- u8 reserved1;
- u16 reserved2;
-} __attribute__((packed));
-
-struct carm_array_info {
- __le32 size;
-
- __le16 size_hi;
- __le16 stripe_size;
-
- __le32 mode;
-
- __le16 stripe_blk_sz;
- __le16 reserved1;
-
- __le16 cyl;
- __le16 head;
-
- __le16 sect;
- u8 array_id;
- u8 reserved2;
-
- char name[40];
-
- __le32 array_status;
-
- /* device list continues beyond this point? */
-} __attribute__((packed));
-
-static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static void carm_remove_one (struct pci_dev *pdev);
-static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static const struct pci_device_id carm_pci_tbl[] = {
- { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { } /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
-
-static struct pci_driver carm_driver = {
- .name = DRV_NAME,
- .id_table = carm_pci_tbl,
- .probe = carm_init_one,
- .remove = carm_remove_one,
-};
-
-static const struct block_device_operations carm_bd_ops = {
- .owner = THIS_MODULE,
- .getgeo = carm_bdev_getgeo,
-};
-
-static unsigned int carm_host_id;
-static unsigned long carm_major_alloc;
-
-
-
-static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct carm_port *port = bdev->bd_disk->private_data;
-
- geo->heads = (u8) port->dev_geom_head;
- geo->sectors = (u8) port->dev_geom_sect;
- geo->cylinders = port->dev_geom_cyl;
- return 0;
-}
-
-static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
-
-static inline int carm_lookup_bucket(u32 msg_size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
- if (msg_size <= msg_sizes[i])
- return i;
-
- return -ENOENT;
-}
-
-static void carm_init_buckets(void __iomem *mmio)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
- writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
-}
-
-static inline void *carm_ref_msg(struct carm_host *host,
- unsigned int msg_idx)
-{
- return host->msg_base + (msg_idx * CARM_MSG_SIZE);
-}
-
-static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
- unsigned int msg_idx)
-{
- return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
-}
-
-static int carm_send_msg(struct carm_host *host,
- struct carm_request *crq, unsigned tag)
-{
- void __iomem *mmio = host->mmio;
- u32 msg = (u32) carm_ref_msg_dma(host, tag);
- u32 cm_bucket = crq->msg_bucket;
- u32 tmp;
- int rc = 0;
-
- VPRINTK("ENTER\n");
-
- tmp = readl(mmio + CARM_HMUC);
- if (tmp & CARM_Q_FULL) {
-#if 0
- tmp = readl(mmio + CARM_INT_MASK);
- tmp |= INT_Q_AVAILABLE;
- writel(tmp, mmio + CARM_INT_MASK);
- readl(mmio + CARM_INT_MASK); /* flush */
-#endif
- DPRINTK("host msg queue full\n");
- rc = -EBUSY;
- } else {
- writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
- readl(mmio + CARM_IHQP); /* flush */
- }
-
- return rc;
-}
-
-static int carm_array_info (struct carm_host *host, unsigned int array_idx)
-{
- struct carm_msg_ioctl *ioc;
- u32 msg_data;
- dma_addr_t msg_dma;
- struct carm_request *crq;
- struct request *rq;
- int rc;
-
- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq)) {
- rc = -ENOMEM;
- goto err_out;
- }
- crq = blk_mq_rq_to_pdu(rq);
-
- ioc = carm_ref_msg(host, rq->tag);
- msg_dma = carm_ref_msg_dma(host, rq->tag);
- msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
-
- crq->msg_type = CARM_MSG_ARRAY;
- crq->msg_subtype = CARM_ARRAY_INFO;
- rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
- sizeof(struct carm_array_info));
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_ARRAY;
- ioc->subtype = CARM_ARRAY_INFO;
- ioc->array_id = (u8) array_idx;
- ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- spin_lock_irq(&host->lock);
- assert(host->state == HST_DEV_SCAN_START ||
- host->state == HST_DEV_SCAN);
- spin_unlock_irq(&host->lock);
-
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
-
- return 0;
-
-err_out:
- spin_lock_irq(&host->lock);
- host->state = HST_ERROR;
- spin_unlock_irq(&host->lock);
- return rc;
-}
-
-typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
-
-static int carm_send_special (struct carm_host *host, carm_sspc_t func)
-{
- struct request *rq;
- struct carm_request *crq;
- struct carm_msg_ioctl *ioc;
- void *mem;
- unsigned int msg_size;
- int rc;
-
- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
- crq = blk_mq_rq_to_pdu(rq);
-
- mem = carm_ref_msg(host, rq->tag);
-
- msg_size = func(host, rq->tag, mem);
-
- ioc = mem;
- crq->msg_type = ioc->type;
- crq->msg_subtype = ioc->subtype;
- rc = carm_lookup_bucket(msg_size);
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
-
- return 0;
-}
-
-static unsigned int carm_fill_sync_time(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_sync_time *st = mem;
-
- time64_t tv = ktime_get_real_seconds();
-
- memset(st, 0, sizeof(*st));
- st->type = CARM_MSG_MISC;
- st->subtype = MISC_SET_TIME;
- st->handle = cpu_to_le32(TAG_ENCODE(idx));
- st->timestamp = cpu_to_le32(tv);
-
- return sizeof(struct carm_msg_sync_time);
-}
-
-static unsigned int carm_fill_alloc_buf(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_allocbuf *ab = mem;
-
- memset(ab, 0, sizeof(*ab));
- ab->type = CARM_MSG_MISC;
- ab->subtype = MISC_ALLOC_MEM;
- ab->handle = cpu_to_le32(TAG_ENCODE(idx));
- ab->n_sg = 1;
- ab->sg_type = SGT_32BIT;
- ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
- ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1);
- ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024));
- ab->n_evt = cpu_to_le32(1024);
- ab->rbuf_pool = cpu_to_le32(host->shm_dma);
- ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN);
- ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN);
- ab->n_msg = cpu_to_le32(CARM_Q_LEN);
- ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
- ab->sg[0].len = cpu_to_le32(65536);
-
- return sizeof(struct carm_msg_allocbuf);
-}
-
-static unsigned int carm_fill_scan_channels(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_ioctl *ioc = mem;
- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
- IOC_SCAN_CHAN_OFFSET);
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_IOCTL;
- ioc->subtype = CARM_IOC_SCAN_CHAN;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- /* fill output data area with "no device" default values */
- mem += IOC_SCAN_CHAN_OFFSET;
- memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
-
- return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
-}
-
-static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_get_fw_ver *ioc = mem;
- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_MISC;
- ioc->subtype = MISC_GET_FW_VER;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- return sizeof(struct carm_msg_get_fw_ver) +
- sizeof(struct carm_fw_ver);
-}
-
-static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
-{
- unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
-
- blk_mq_stop_hw_queues(q);
- VPRINTK("STOPPED QUEUE %p\n", q);
-
- host->wait_q[idx] = q;
- host->wait_q_prod++;
- BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
-}
-
-static inline struct request_queue *carm_pop_q(struct carm_host *host)
-{
- unsigned int idx;
-
- if (host->wait_q_prod == host->wait_q_cons)
- return NULL;
-
- idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
- host->wait_q_cons++;
-
- return host->wait_q[idx];
-}
-
-static inline void carm_round_robin(struct carm_host *host)
-{
- struct request_queue *q = carm_pop_q(host);
- if (q) {
- blk_mq_start_hw_queues(q);
- VPRINTK("STARTED QUEUE %p\n", q);
- }
-}
-
-static inline enum dma_data_direction carm_rq_dir(struct request *rq)
-{
- return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-}
-
-static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct request_queue *q = hctx->queue;
- struct request *rq = bd->rq;
- struct carm_port *port = q->queuedata;
- struct carm_host *host = port->host;
- struct carm_request *crq = blk_mq_rq_to_pdu(rq);
- struct carm_msg_rw *msg;
- struct scatterlist *sg;
- int i, n_elem = 0, rc;
- unsigned int msg_size;
- u32 tmp;
-
- crq->n_elem = 0;
- sg_init_table(crq->sg, CARM_MAX_REQ_SG);
-
- blk_mq_start_request(rq);
-
- spin_lock_irq(&host->lock);
- if (req_op(rq) == REQ_OP_DRV_OUT)
- goto send_msg;
-
- /* get scatterlist from block layer */
- sg = &crq->sg[0];
- n_elem = blk_rq_map_sg(q, rq, sg);
- if (n_elem <= 0)
- goto out_ioerr;
-
- /* map scatterlist to PCI bus addresses */
- n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq));
- if (n_elem <= 0)
- goto out_ioerr;
-
- /* obey global hardware limit on S/G entries */
- if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem)
- goto out_resource;
-
- crq->n_elem = n_elem;
- host->hw_sg_used += n_elem;
-
- /*
- * build read/write message
- */
-
- VPRINTK("build msg\n");
- msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag);
-
- if (rq_data_dir(rq) == WRITE) {
- msg->type = CARM_MSG_WRITE;
- crq->msg_type = CARM_MSG_WRITE;
- } else {
- msg->type = CARM_MSG_READ;
- crq->msg_type = CARM_MSG_READ;
- }
-
- msg->id = port->port_no;
- msg->sg_count = n_elem;
- msg->sg_type = SGT_32BIT;
- msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
- msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
- tmp = (blk_rq_pos(rq) >> 16) >> 16;
- msg->lba_high = cpu_to_le16( (u16) tmp );
- msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
-
- msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
- for (i = 0; i < n_elem; i++) {
- struct carm_msg_sg *carm_sg = &msg->sg[i];
- carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
- carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
- msg_size += sizeof(struct carm_msg_sg);
- }
-
- rc = carm_lookup_bucket(msg_size);
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-send_msg:
- /*
- * queue read/write message to hardware
- */
- VPRINTK("send msg, tag == %u\n", rq->tag);
- rc = carm_send_msg(host, crq, rq->tag);
- if (rc) {
- host->hw_sg_used -= n_elem;
- goto out_resource;
- }
-
- spin_unlock_irq(&host->lock);
- return BLK_STS_OK;
-out_resource:
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq));
- carm_push_q(host, q);
- spin_unlock_irq(&host->lock);
- return BLK_STS_DEV_RESOURCE;
-out_ioerr:
- carm_round_robin(host);
- spin_unlock_irq(&host->lock);
- return BLK_STS_IOERR;
-}
-
-static void carm_handle_array_info(struct carm_host *host,
- struct carm_request *crq, u8 *mem,
- blk_status_t error)
-{
- struct carm_port *port;
- u8 *msg_data = mem + sizeof(struct carm_array_info);
- struct carm_array_info *desc = (struct carm_array_info *) msg_data;
- u64 lo, hi;
- int cur_port;
- size_t slen;
-
- DPRINTK("ENTER\n");
-
- if (error)
- goto out;
- if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
- goto out;
-
- cur_port = host->cur_scan_dev;
-
- /* should never occur */
- if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
- printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
- cur_port, (int) desc->array_id);
- goto out;
- }
-
- port = &host->port[cur_port];
-
- lo = (u64) le32_to_cpu(desc->size);
- hi = (u64) le16_to_cpu(desc->size_hi);
-
- port->capacity = lo | (hi << 32);
- port->dev_geom_head = le16_to_cpu(desc->head);
- port->dev_geom_sect = le16_to_cpu(desc->sect);
- port->dev_geom_cyl = le16_to_cpu(desc->cyl);
-
- host->dev_active |= (1 << cur_port);
-
- strncpy(port->name, desc->name, sizeof(port->name));
- port->name[sizeof(port->name) - 1] = 0;
- slen = strlen(port->name);
- while (slen && (port->name[slen - 1] == ' ')) {
- port->name[slen - 1] = 0;
- slen--;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
- pci_name(host->pdev), port->port_no,
- (unsigned long long) port->capacity);
- printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
- pci_name(host->pdev), port->port_no, port->name);
-
-out:
- assert(host->state == HST_DEV_SCAN);
- schedule_work(&host->fsm_task);
-}
-
-static void carm_handle_scan_chan(struct carm_host *host,
- struct carm_request *crq, u8 *mem,
- blk_status_t error)
-{
- u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
- unsigned int i, dev_count = 0;
- int new_state = HST_DEV_SCAN_START;
-
- DPRINTK("ENTER\n");
-
- if (error) {
- new_state = HST_ERROR;
- goto out;
- }
-
- /* TODO: scan and support non-disk devices */
- for (i = 0; i < 8; i++)
- if (msg_data[i] == 0) { /* direct-access device (disk) */
- host->dev_present |= (1 << i);
- dev_count++;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
- pci_name(host->pdev), dev_count);
-
-out:
- assert(host->state == HST_PORT_SCAN);
- host->state = new_state;
- schedule_work(&host->fsm_task);
-}
-
-static void carm_handle_generic(struct carm_host *host,
- struct carm_request *crq, blk_status_t error,
- int cur_state, int next_state)
-{
- DPRINTK("ENTER\n");
-
- assert(host->state == cur_state);
- if (error)
- host->state = HST_ERROR;
- else
- host->state = next_state;
- schedule_work(&host->fsm_task);
-}
-
-static inline void carm_handle_resp(struct carm_host *host,
- __le32 ret_handle_le, u32 status)
-{
- u32 handle = le32_to_cpu(ret_handle_le);
- unsigned int msg_idx;
- struct request *rq;
- struct carm_request *crq;
- blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
- u8 *mem;
-
- VPRINTK("ENTER, handle == 0x%x\n", handle);
-
- if (unlikely(!TAG_VALID(handle))) {
- printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
- pci_name(host->pdev), handle);
- return;
- }
-
- msg_idx = TAG_DECODE(handle);
- VPRINTK("tag == %u\n", msg_idx);
-
- rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx);
- crq = blk_mq_rq_to_pdu(rq);
-
- /* fast path */
- if (likely(crq->msg_type == CARM_MSG_READ ||
- crq->msg_type == CARM_MSG_WRITE)) {
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem,
- carm_rq_dir(rq));
- goto done;
- }
-
- mem = carm_ref_msg(host, msg_idx);
-
- switch (crq->msg_type) {
- case CARM_MSG_IOCTL: {
- switch (crq->msg_subtype) {
- case CARM_IOC_SCAN_CHAN:
- carm_handle_scan_chan(host, crq, mem, error);
- goto done;
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- case CARM_MSG_MISC: {
- switch (crq->msg_subtype) {
- case MISC_ALLOC_MEM:
- carm_handle_generic(host, crq, error,
- HST_ALLOC_BUF, HST_SYNC_TIME);
- goto done;
- case MISC_SET_TIME:
- carm_handle_generic(host, crq, error,
- HST_SYNC_TIME, HST_GET_FW_VER);
- goto done;
- case MISC_GET_FW_VER: {
- struct carm_fw_ver *ver = (struct carm_fw_ver *)
- (mem + sizeof(struct carm_msg_get_fw_ver));
- if (!error) {
- host->fw_ver = le32_to_cpu(ver->version);
- host->flags |= (ver->features & FL_FW_VER_MASK);
- }
- carm_handle_generic(host, crq, error,
- HST_GET_FW_VER, HST_PORT_SCAN);
- goto done;
- }
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- case CARM_MSG_ARRAY: {
- switch (crq->msg_subtype) {
- case CARM_ARRAY_INFO:
- carm_handle_array_info(host, crq, mem, error);
- break;
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- default:
- /* unknown / invalid response */
- goto err_out;
- }
-
- return;
-
-err_out:
- printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
- pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
- error = BLK_STS_IOERR;
-done:
- host->hw_sg_used -= crq->n_elem;
- blk_mq_end_request(blk_mq_rq_from_pdu(crq), error);
-
- if (host->hw_sg_used <= CARM_SG_LOW_WATER)
- carm_round_robin(host);
-}
-
-static inline void carm_handle_responses(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- struct carm_response *resp = (struct carm_response *) host->shm;
- unsigned int work = 0;
- unsigned int idx = host->resp_idx % RMSG_Q_LEN;
-
- while (1) {
- u32 status = le32_to_cpu(resp[idx].status);
-
- if (status == 0xffffffff) {
- VPRINTK("ending response on index %u\n", idx);
- writel(idx << 3, mmio + CARM_RESP_IDX);
- break;
- }
-
- /* response to a message we sent */
- else if ((status & (1 << 31)) == 0) {
- VPRINTK("handling msg response on index %u\n", idx);
- carm_handle_resp(host, resp[idx].ret_handle, status);
- resp[idx].status = cpu_to_le32(0xffffffff);
- }
-
- /* asynchronous events the hardware throws our way */
- else if ((status & 0xff000000) == (1 << 31)) {
- u8 *evt_type_ptr = (u8 *) &resp[idx];
- u8 evt_type = *evt_type_ptr;
- printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
- pci_name(host->pdev), (int) evt_type);
- resp[idx].status = cpu_to_le32(0xffffffff);
- }
-
- idx = NEXT_RESP(idx);
- work++;
- }
-
- VPRINTK("EXIT, work==%u\n", work);
- host->resp_idx += work;
-}
-
-static irqreturn_t carm_interrupt(int irq, void *__host)
-{
- struct carm_host *host = __host;
- void __iomem *mmio;
- u32 mask;
- int handled = 0;
- unsigned long flags;
-
- if (!host) {
- VPRINTK("no host\n");
- return IRQ_NONE;
- }
-
- spin_lock_irqsave(&host->lock, flags);
-
- mmio = host->mmio;
-
- /* reading should also clear interrupts */
- mask = readl(mmio + CARM_INT_STAT);
-
- if (mask == 0 || mask == 0xffffffff) {
- VPRINTK("no work, mask == 0x%x\n", mask);
- goto out;
- }
-
- if (mask & INT_ACK_MASK)
- writel(mask, mmio + CARM_INT_STAT);
-
- if (unlikely(host->state == HST_INVALID)) {
- VPRINTK("not initialized yet, mask = 0x%x\n", mask);
- goto out;
- }
-
- if (mask & CARM_HAVE_RESP) {
- handled = 1;
- carm_handle_responses(host);
- }
-
-out:
- spin_unlock_irqrestore(&host->lock, flags);
- VPRINTK("EXIT\n");
- return IRQ_RETVAL(handled);
-}
-
-static void carm_fsm_task (struct work_struct *work)
-{
- struct carm_host *host =
- container_of(work, struct carm_host, fsm_task);
- unsigned long flags;
- unsigned int state;
- int rc, i, next_dev;
- int reschedule = 0;
- int new_state = HST_INVALID;
-
- spin_lock_irqsave(&host->lock, flags);
- state = host->state;
- spin_unlock_irqrestore(&host->lock, flags);
-
- DPRINTK("ENTER, state == %s\n", state_name[state]);
-
- switch (state) {
- case HST_PROBE_START:
- new_state = HST_ALLOC_BUF;
- reschedule = 1;
- break;
-
- case HST_ALLOC_BUF:
- rc = carm_send_special(host, carm_fill_alloc_buf);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_SYNC_TIME:
- rc = carm_send_special(host, carm_fill_sync_time);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_GET_FW_VER:
- rc = carm_send_special(host, carm_fill_get_fw_ver);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_PORT_SCAN:
- rc = carm_send_special(host, carm_fill_scan_channels);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_DEV_SCAN_START:
- host->cur_scan_dev = -1;
- new_state = HST_DEV_SCAN;
- reschedule = 1;
- break;
-
- case HST_DEV_SCAN:
- next_dev = -1;
- for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
- if (host->dev_present & (1 << i)) {
- next_dev = i;
- break;
- }
-
- if (next_dev >= 0) {
- host->cur_scan_dev = next_dev;
- rc = carm_array_info(host, next_dev);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- } else {
- new_state = HST_DEV_ACTIVATE;
- reschedule = 1;
- }
- break;
-
- case HST_DEV_ACTIVATE: {
- int activated = 0;
- for (i = 0; i < CARM_MAX_PORTS; i++)
- if (host->dev_active & (1 << i)) {
- struct carm_port *port = &host->port[i];
- struct gendisk *disk = port->disk;
-
- set_capacity(disk, port->capacity);
- add_disk(disk);
- activated++;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
- pci_name(host->pdev), activated);
-
- new_state = HST_PROBE_FINISHED;
- reschedule = 1;
- break;
- }
-
- case HST_PROBE_FINISHED:
- complete(&host->probe_comp);
- break;
-
- case HST_ERROR:
- /* FIXME: TODO */
- break;
-
- default:
- /* should never occur */
- printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
- assert(0);
- break;
- }
-
- if (new_state != HST_INVALID) {
- spin_lock_irqsave(&host->lock, flags);
- host->state = new_state;
- spin_unlock_irqrestore(&host->lock, flags);
- }
- if (reschedule)
- schedule_work(&host->fsm_task);
-}
-
-static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
-{
- unsigned int i;
-
- for (i = 0; i < 50000; i++) {
- u32 tmp = readl(mmio + CARM_LMUC);
- udelay(100);
-
- if (test_bit) {
- if ((tmp & bits) == bits)
- return 0;
- } else {
- if ((tmp & bits) == 0)
- return 0;
- }
-
- cond_resched();
- }
-
- printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
- bits, test_bit ? "yes" : "no");
- return -EBUSY;
-}
-
-static void carm_init_responses(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- unsigned int i;
- struct carm_response *resp = (struct carm_response *) host->shm;
-
- for (i = 0; i < RMSG_Q_LEN; i++)
- resp[i].status = cpu_to_le32(0xffffffff);
-
- writel(0, mmio + CARM_RESP_IDX);
-}
-
-static int carm_init_host(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- u32 tmp;
- u8 tmp8;
- int rc;
-
- DPRINTK("ENTER\n");
-
- writel(0, mmio + CARM_INT_MASK);
-
- tmp8 = readb(mmio + CARM_INITC);
- if (tmp8 & 0x01) {
- tmp8 &= ~0x01;
- writeb(tmp8, mmio + CARM_INITC);
- readb(mmio + CARM_INITC); /* flush */
-
- DPRINTK("snooze...\n");
- msleep(5000);
- }
-
- tmp = readl(mmio + CARM_HMUC);
- if (tmp & CARM_CME) {
- DPRINTK("CME bit present, waiting\n");
- rc = carm_init_wait(mmio, CARM_CME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 1 failed\n");
- return rc;
- }
- }
- if (tmp & CARM_RME) {
- DPRINTK("RME bit present, waiting\n");
- rc = carm_init_wait(mmio, CARM_RME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 2 failed\n");
- return rc;
- }
- }
-
- tmp &= ~(CARM_RME | CARM_CME);
- writel(tmp, mmio + CARM_HMUC);
- readl(mmio + CARM_HMUC); /* flush */
-
- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 3 failed\n");
- return rc;
- }
-
- carm_init_buckets(mmio);
-
- writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
- writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
- writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
-
- tmp = readl(mmio + CARM_HMUC);
- tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
- writel(tmp, mmio + CARM_HMUC);
- readl(mmio + CARM_HMUC); /* flush */
-
- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 4 failed\n");
- return rc;
- }
-
- writel(0, mmio + CARM_HMPHA);
- writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
-
- carm_init_responses(host);
-
- /* start initialization, probing state machine */
- spin_lock_irq(&host->lock);
- assert(host->state == HST_INVALID);
- host->state = HST_PROBE_START;
- spin_unlock_irq(&host->lock);
- schedule_work(&host->fsm_task);
-
- DPRINTK("EXIT\n");
- return 0;
-}
-
-static const struct blk_mq_ops carm_mq_ops = {
- .queue_rq = carm_queue_rq,
-};
-
-static int carm_init_disk(struct carm_host *host, unsigned int port_no)
-{
- struct carm_port *port = &host->port[port_no];
- struct gendisk *disk;
- struct request_queue *q;
-
- port->host = host;
- port->port_no = port_no;
-
- disk = alloc_disk(CARM_MINORS_PER_MAJOR);
- if (!disk)
- return -ENOMEM;
-
- port->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "/%u",
- (unsigned int)host->id * CARM_MAX_PORTS + port_no);
- disk->major = host->major;
- disk->first_minor = port_no * CARM_MINORS_PER_MAJOR;
- disk->fops = &carm_bd_ops;
- disk->private_data = port;
-
- q = blk_mq_init_queue(&host->tag_set);
- if (IS_ERR(q))
- return PTR_ERR(q);
-
- blk_queue_max_segments(q, CARM_MAX_REQ_SG);
- blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
-
- q->queuedata = port;
- disk->queue = q;
- return 0;
-}
-
-static void carm_free_disk(struct carm_host *host, unsigned int port_no)
-{
- struct carm_port *port = &host->port[port_no];
- struct gendisk *disk = port->disk;
-
- if (!disk)
- return;
-
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (disk->queue)
- blk_cleanup_queue(disk->queue);
- put_disk(disk);
-}
-
-static int carm_init_shm(struct carm_host *host)
-{
- host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
- &host->shm_dma, GFP_KERNEL);
- if (!host->shm)
- return -ENOMEM;
-
- host->msg_base = host->shm + RBUF_LEN;
- host->msg_dma = host->shm_dma + RBUF_LEN;
-
- memset(host->shm, 0xff, RBUF_LEN);
- memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
-
- return 0;
-}
-
-static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct carm_host *host;
- int rc;
- struct request_queue *q;
- unsigned int i;
-
- printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
-
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
- pci_name(pdev));
- goto err_out_regions;
- }
-
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host) {
- printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
- pci_name(pdev));
- rc = -ENOMEM;
- goto err_out_regions;
- }
-
- host->pdev = pdev;
- spin_lock_init(&host->lock);
- INIT_WORK(&host->fsm_task, carm_fsm_task);
- init_completion(&host->probe_comp);
-
- host->mmio = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!host->mmio) {
- printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
- pci_name(pdev));
- rc = -ENOMEM;
- goto err_out_kfree;
- }
-
- rc = carm_init_shm(host);
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
- pci_name(pdev));
- goto err_out_iounmap;
- }
-
- memset(&host->tag_set, 0, sizeof(host->tag_set));
- host->tag_set.ops = &carm_mq_ops;
- host->tag_set.cmd_size = sizeof(struct carm_request);
- host->tag_set.nr_hw_queues = 1;
- host->tag_set.nr_maps = 1;
- host->tag_set.queue_depth = max_queue;
- host->tag_set.numa_node = NUMA_NO_NODE;
- host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-
- rc = blk_mq_alloc_tag_set(&host->tag_set);
- if (rc)
- goto err_out_dma_free;
-
- q = blk_mq_init_queue(&host->tag_set);
- if (IS_ERR(q)) {
- rc = PTR_ERR(q);
- blk_mq_free_tag_set(&host->tag_set);
- goto err_out_dma_free;
- }
-
- host->oob_q = q;
- q->queuedata = host;
-
- /*
- * Figure out which major to use: 160, 161, or dynamic
- */
- if (!test_and_set_bit(0, &carm_major_alloc))
- host->major = 160;
- else if (!test_and_set_bit(1, &carm_major_alloc))
- host->major = 161;
- else
- host->flags |= FL_DYN_MAJOR;
-
- host->id = carm_host_id;
- sprintf(host->name, DRV_NAME "%d", carm_host_id);
-
- rc = register_blkdev(host->major, host->name);
- if (rc < 0)
- goto err_out_free_majors;
- if (host->flags & FL_DYN_MAJOR)
- host->major = rc;
-
- for (i = 0; i < CARM_MAX_PORTS; i++) {
- rc = carm_init_disk(host, i);
- if (rc)
- goto err_out_blkdev_disks;
- }
-
- pci_set_master(pdev);
-
- rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host);
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
- pci_name(pdev));
- goto err_out_blkdev_disks;
- }
-
- rc = carm_init_host(host);
- if (rc)
- goto err_out_free_irq;
-
- DPRINTK("waiting for probe_comp\n");
- wait_for_completion(&host->probe_comp);
-
- printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
- host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
- (unsigned long long)pci_resource_start(pdev, 0),
- pdev->irq, host->major);
-
- carm_host_id++;
- pci_set_drvdata(pdev, host);
- return 0;
-
-err_out_free_irq:
- free_irq(pdev->irq, host);
-err_out_blkdev_disks:
- for (i = 0; i < CARM_MAX_PORTS; i++)
- carm_free_disk(host, i);
- unregister_blkdev(host->major, host->name);
-err_out_free_majors:
- if (host->major == 160)
- clear_bit(0, &carm_major_alloc);
- else if (host->major == 161)
- clear_bit(1, &carm_major_alloc);
- blk_cleanup_queue(host->oob_q);
- blk_mq_free_tag_set(&host->tag_set);
-err_out_dma_free:
- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
-err_out_iounmap:
- iounmap(host->mmio);
-err_out_kfree:
- kfree(host);
-err_out_regions:
- pci_release_regions(pdev);
-err_out:
- pci_disable_device(pdev);
- return rc;
-}
-
-static void carm_remove_one (struct pci_dev *pdev)
-{
- struct carm_host *host = pci_get_drvdata(pdev);
- unsigned int i;
-
- if (!host) {
- printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
- pci_name(pdev));
- return;
- }
-
- free_irq(pdev->irq, host);
- for (i = 0; i < CARM_MAX_PORTS; i++)
- carm_free_disk(host, i);
- unregister_blkdev(host->major, host->name);
- if (host->major == 160)
- clear_bit(0, &carm_major_alloc);
- else if (host->major == 161)
- clear_bit(1, &carm_major_alloc);
- blk_cleanup_queue(host->oob_q);
- blk_mq_free_tag_set(&host->tag_set);
- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
- iounmap(host->mmio);
- kfree(host);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-module_pci_driver(carm_driver);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 9b3ea86c20e5..3afc07b59477 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1063,14 +1063,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ /* Ensure no requests in virtqueues before deleting vqs. */
+ blk_mq_freeze_queue(vblk->disk->queue);
+
/* Ensure we don't receive any more interrupts */
vdev->config->reset(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
- blk_mq_quiesce_queue(vblk->disk->queue);
-
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
@@ -1088,7 +1089,7 @@ static int virtblk_restore(struct virtio_device *vdev)
virtio_device_ready(vdev);
- blk_mq_unquiesce_queue(vblk->disk->queue);
+ blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
#endif
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d0538c03f033..da67621ebc21 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -779,7 +779,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
+ if (req_op(req) == REQ_OP_FLUSH ||
+ (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) {
/*
* Ideally we can do an unordered flush-to-disk.
* In case the backend onlysupports barriers, use that.
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 2beb2321825e..e7e3c8e0ed0e 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -471,7 +471,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
return data;
}
-static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
{
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
const unsigned char *p_left = data, *p_h4;
@@ -510,25 +510,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
bt_dev_err(bdev->hdev,
"Frame reassembly failed (%d)", err);
bdev->rx_skb = NULL;
- return err;
+ return;
}
sz_left -= sz_h4;
p_left += sz_h4;
}
-
- return 0;
}
static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
size_t count)
{
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
- int err;
- err = btmtkuart_recv(bdev->hdev, data, count);
- if (err < 0)
- return err;
+ btmtkuart_recv(bdev->hdev, data, count);
bdev->hdev->stat.byte_rx += count;
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 2acb719e596f..11c7e04bf394 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
return 0;
}
+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ int ret;
+
+ ret = qca_set_bdaddr_rome(hdev, bdaddr);
+ if (ret)
+ return ret;
+
+ /* The firmware stops responding for a while after setting the bdaddr,
+ * causing timeouts for subsequent commands. Sleep a bit to avoid this.
+ */
+ usleep_range(1000, 10000);
+ return 0;
+}
+
static int btqcomsmd_probe(struct platform_device *pdev)
{
struct btqcomsmd *btq;
@@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->close = btqcomsmd_close;
hdev->send = btqcomsmd_send;
hdev->setup = btqcomsmd_setup;
- hdev->set_bdaddr = qca_set_bdaddr_rome;
+ hdev->set_bdaddr = btqcomsmd_set_bdaddr;
ret = hci_register_dev(hdev);
if (ret < 0)
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index fd9571d5fdac..2c846f19bbd4 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -346,6 +346,7 @@ static void btsdio_remove(struct sdio_func *func)
if (!data)
return;
+ cancel_work_sync(&data->work);
hdev = data->hdev;
sdio_set_drvdata(func, NULL);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c8f2b991e9cf..dbba6a09e51e 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -57,6 +57,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_IFNUM_2 0x80000
#define BTUSB_CW6622 0x100000
#define BTUSB_MEDIATEK 0x200000
+#define BTUSB_WIDEBAND_SPEECH 0x400000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -332,20 +333,42 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL },
/* Intel Bluetooth devices */
- { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
- { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL },
- { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE },
+ /* Realtek 8822CE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
+ /* Realtek 8852BE Bluetooth devices */
+ { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -563,13 +586,13 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_lock_irqsave(&data->rxlock, flags);
- kfree_skb(data->evt_skb);
+ dev_kfree_skb_irq(data->evt_skb);
data->evt_skb = NULL;
- kfree_skb(data->acl_skb);
+ dev_kfree_skb_irq(data->acl_skb);
data->acl_skb = NULL;
- kfree_skb(data->sco_skb);
+ dev_kfree_skb_irq(data->sco_skb);
data->sco_skb = NULL;
spin_unlock_irqrestore(&data->rxlock, flags);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index cf4a56095817..8055f63603f4 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
i++;
__skb_unlink(skb, &bcsp->unack);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
if (skb_queue_empty(&bcsp->unack))
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index bf3e23104194..e77da593f290 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -298,7 +298,7 @@ static void h5_pkt_cull(struct h5 *h5)
break;
__skb_unlink(skb, &h5->unack);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
if (skb_queue_empty(&h5->unack))
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 31f25153087d..02c96c603768 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1230,7 +1230,11 @@ static struct platform_driver intel_driver = {
int __init intel_init(void)
{
- platform_driver_register(&intel_driver);
+ int err;
+
+ err = platform_driver_register(&intel_driver);
+ if (err)
+ return err;
return hci_uart_register_proto(&intel_proto);
}
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index d9a4c6c691e0..d4c71811bab6 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
default:
BT_ERR("illegal hcill state: %ld (losing packet)",
ll->hcill_state);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
break;
}
@@ -626,6 +626,7 @@ static int ll_setup(struct hci_uart *hu)
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
msleep(5);
gpiod_set_value_cansleep(lldev->enable_gpio, 1);
+ msleep(500);
err = serdev_device_wait_for_cts(serdev, true, 200);
if (err) {
bt_dev_err(hu->hdev, "Failed to get CTS");
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 6463350b7977..82db15585196 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -734,7 +734,11 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev)
return err;
}
- clk_prepare_enable(sysclk);
+ err = clk_prepare_enable(sysclk);
+ if (err) {
+ dev_err(dev, "could not enable sysclk: %d", err);
+ return err;
+ }
btdev->sysclk_speed = clk_get_rate(sysclk);
clk_disable_unprepare(sysclk);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e3164c200eac..467137c47e4f 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -816,7 +816,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
default:
BT_ERR("Illegal tx state: %d (losing packet)",
qca->tx_ibs_state);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
break;
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 65e41c1d760f..6a0ddf266ad8 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -67,7 +67,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
struct vhci_data *data = hci_get_drvdata(hdev);
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+ mutex_lock(&data->open_mutex);
skb_queue_tail(&data->readq, skb);
+ mutex_unlock(&data->open_mutex);
wake_up_interruptible(&data->read_wait);
return 0;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 47c2bb444ab4..072dc40e29a6 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -136,11 +136,12 @@ config SUNXI_RSB
config TEGRA_ACONNECT
tristate "Tegra ACONNECT Bus Driver"
- depends on ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA
depends on OF && PM
help
Driver for the Tegra ACONNECT bus which is used to interface with
- the devices inside the Audio Processing Engine (APE) for Tegra210.
+ the devices inside the Audio Processing Engine (APE) for
+ Tegra210 and later.
config TEGRA_GMI
tristate "Tegra Generic Memory Interface bus driver"
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 2e9252d37a18..0922cbe8900b 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -504,13 +504,13 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
{
struct acpi_device *adev = ACPI_COMPANION(hostdev);
struct acpi_device *child;
+ struct platform_device *pdev;
int ret;
/* Only consider the children of the host */
list_for_each_entry(child, &adev->children, node) {
const char *hid = acpi_device_hid(child);
const struct hisi_lpc_acpi_cell *cell;
- struct platform_device *pdev;
const struct resource *res;
bool found = false;
int num_res;
@@ -572,22 +572,24 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
ret = platform_device_add_resources(pdev, res, num_res);
if (ret)
- goto fail;
+ goto fail_put_device;
ret = platform_device_add_data(pdev, cell->pdata,
cell->pdata_size);
if (ret)
- goto fail;
+ goto fail_put_device;
ret = platform_device_add(pdev);
if (ret)
- goto fail;
+ goto fail_put_device;
acpi_device_set_enumerated(child);
}
return 0;
+fail_put_device:
+ platform_device_put(pdev);
fail:
hisi_lpc_acpi_remove(hostdev);
return ret;
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 28bb65a5613f..201767823edb 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -192,8 +192,8 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
+ int ret = 0, have_child = 0;
struct device_node *child;
- int ret, have_child = 0;
struct cs_timing_state ts = {};
u32 reg;
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 68413bf9cf87..d87f698d38a3 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -833,6 +833,12 @@ static int moxtet_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id moxtet_spi_ids[] = {
+ { "moxtet" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, moxtet_spi_ids);
+
static const struct of_device_id moxtet_dt_ids[] = {
{ .compatible = "cznic,moxtet" },
{},
@@ -844,6 +850,7 @@ static struct spi_driver moxtet_spi_driver = {
.name = "moxtet",
.of_match_table = moxtet_dt_ids,
},
+ .id_table = moxtet_spi_ids,
.probe = moxtet_probe,
.remove = moxtet_remove,
};
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 9b1a5e62417c..98cbb18f17fa 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -268,6 +268,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
/* common code that starts a transfer */
static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
{
+ u32 int_mask, status;
+ bool timeout;
+
if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
dev_dbg(rsb->dev, "RSB transfer still in progress\n");
return -EBUSY;
@@ -275,13 +278,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
reinit_completion(&rsb->complete);
- writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER,
- rsb->regs + RSB_INTE);
+ int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
+ writel(int_mask, rsb->regs + RSB_INTE);
writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
rsb->regs + RSB_CTRL);
- if (!wait_for_completion_io_timeout(&rsb->complete,
- msecs_to_jiffies(100))) {
+ if (irqs_disabled()) {
+ timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
+ status, (status & int_mask),
+ 10, 100000);
+ writel(status, rsb->regs + RSB_INTS);
+ } else {
+ timeout = !wait_for_completion_io_timeout(&rsb->complete,
+ msecs_to_jiffies(100));
+ status = rsb->status;
+ }
+
+ if (timeout) {
dev_dbg(rsb->dev, "RSB timeout\n");
/* abort the transfer */
@@ -293,18 +306,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
return -ETIMEDOUT;
}
- if (rsb->status & RSB_INTS_LOAD_BSY) {
+ if (status & RSB_INTS_LOAD_BSY) {
dev_dbg(rsb->dev, "RSB busy\n");
return -EBUSY;
}
- if (rsb->status & RSB_INTS_TRANS_ERR) {
- if (rsb->status & RSB_INTS_TRANS_ERR_ACK) {
+ if (status & RSB_INTS_TRANS_ERR) {
+ if (status & RSB_INTS_TRANS_ERR_ACK) {
dev_dbg(rsb->dev, "RSB slave nack\n");
return -EINVAL;
}
- if (rsb->status & RSB_INTS_TRANS_ERR_DATA) {
+ if (status & RSB_INTS_TRANS_ERR_DATA) {
dev_dbg(rsb->dev, "RSB transfer data error\n");
return -EIO;
}
@@ -768,7 +781,13 @@ static int __init sunxi_rsb_init(void)
return ret;
}
- return platform_driver_register(&sunxi_rsb_driver);
+ ret = platform_driver_register(&sunxi_rsb_driver);
+ if (ret) {
+ bus_unregister(&sunxi_rsb_bus);
+ return ret;
+ }
+
+ return 0;
}
module_init(sunxi_rsb_init);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 44aeceaccfa4..70339f73181e 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1023,6 +1023,11 @@ static int sysc_enable_module(struct device *dev)
if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
best_mode = SYSC_IDLE_NO;
+
+ /* Clear WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg &= ~BIT(regbits->enwkup_shift);
} else {
best_mode = fls(ddata->cfg.sidlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
@@ -1143,6 +1148,13 @@ set_sidle:
}
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
+ }
+
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
if (regbits->autoidle_shift >= 0 &&
@@ -1371,14 +1383,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff,
0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@@ -1809,7 +1823,7 @@ static int sysc_reset(struct sysc *ddata)
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
- if (ddata->legacy_mode || sysc_offset < 0 ||
+ if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
return 0;
@@ -1819,9 +1833,13 @@ static int sysc_reset(struct sysc *ddata)
if (ddata->pre_reset_quirk)
ddata->pre_reset_quirk(ddata);
- sysc_val = sysc_read_sysconfig(ddata);
- sysc_val |= sysc_mask;
- sysc_write(ddata, sysc_offset, sysc_val);
+ if (sysc_offset >= 0) {
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+ /* Flush posted write */
+ sysc_val = sysc_read_sysconfig(ddata);
+ }
if (ddata->cfg.srst_udelay)
usleep_range(ddata->cfg.srst_udelay,
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index d68d05d5d383..c6f181702b9a 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -90,6 +90,9 @@ parisc_agp_tlbflush(struct agp_memory *mem)
{
struct _parisc_agp_info *info = &parisc_agp_info;
+ /* force fdc ops to be visible to IOMMU */
+ asm_io_sync();
+
writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
readq(info->ioc_regs+IOC_PCOM); /* flush */
}
@@ -158,6 +161,7 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
info->gatt[j] =
parisc_agp_mask_memory(agp_bridge,
paddr, type);
+ asm_io_fdc(&info->gatt[j]);
}
}
@@ -191,7 +195,16 @@ static unsigned long
parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
int type)
{
- return SBA_PDIR_VALID_BIT | addr;
+ unsigned ci; /* coherent index */
+ dma_addr_t pa;
+
+ pa = addr & IOVP_MASK;
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa)));
+
+ pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+
+ return cpu_to_le64(pa);
}
static void
@@ -381,8 +394,6 @@ find_quicksilver(struct device *dev, void *data)
static int __init
parisc_agp_init(void)
{
- extern struct sba_device *sba_list;
-
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 9959c762da2f..db3dd467194c 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -143,15 +143,19 @@ static int __init mod_init(void)
found:
err = pci_read_config_dword(pdev, 0x58, &pmbase);
if (err)
- return err;
+ goto put_dev;
pmbase &= 0x0000FF00;
- if (pmbase == 0)
- return -EIO;
+ if (pmbase == 0) {
+ err = -EIO;
+ goto put_dev;
+ }
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!priv) {
+ err = -ENOMEM;
+ goto put_dev;
+ }
if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
@@ -185,6 +189,8 @@ err_iomap:
release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
out:
kfree(priv);
+put_dev:
+ pci_dev_put(pdev);
return err;
}
@@ -200,6 +206,8 @@ static void __exit mod_exit(void)
release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ pci_dev_put(priv->pcidev);
+
kfree(priv);
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 559ca503b7b6..d2bd8473e16f 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -24,10 +24,13 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#define RNG_MODULE_NAME "hw_random"
+#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
+
static struct hwrng *current_rng;
/* the current rng has been explicitly chosen by user via sysfs */
static int cur_rng_set_by_user;
@@ -59,7 +62,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
static size_t rng_buffer_size(void)
{
- return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
+ return RNG_BUFFER_SIZE;
}
static void add_early_randomness(struct hwrng *rng)
@@ -202,6 +205,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
static ssize_t rng_dev_read(struct file *filp, char __user *buf,
size_t size, loff_t *offp)
{
+ u8 buffer[RNG_BUFFER_SIZE];
ssize_t ret = 0;
int err = 0;
int bytes_read, len;
@@ -229,34 +233,37 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (bytes_read < 0) {
err = bytes_read;
goto out_unlock_reading;
+ } else if (bytes_read == 0 &&
+ (filp->f_flags & O_NONBLOCK)) {
+ err = -EAGAIN;
+ goto out_unlock_reading;
}
+
data_avail = bytes_read;
}
- if (!data_avail) {
- if (filp->f_flags & O_NONBLOCK) {
- err = -EAGAIN;
- goto out_unlock_reading;
- }
- } else {
- len = data_avail;
+ len = data_avail;
+ if (len) {
if (len > size)
len = size;
data_avail -= len;
- if (copy_to_user(buf + ret, rng_buffer + data_avail,
- len)) {
+ memcpy(buffer, rng_buffer + data_avail, len);
+ }
+ mutex_unlock(&reading_mutex);
+ put_rng(rng);
+
+ if (len) {
+ if (copy_to_user(buf + ret, buffer, len)) {
err = -EFAULT;
- goto out_unlock_reading;
+ goto out;
}
size -= len;
ret += len;
}
- mutex_unlock(&reading_mutex);
- put_rng(rng);
if (need_resched())
schedule_timeout_interruptible(1);
@@ -267,6 +274,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
}
out:
+ memzero_explicit(buffer, sizeof(buffer));
return ret ? : err;
out_unlock_reading:
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index e1d421a36a13..2f8289865ec8 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -51,10 +51,15 @@ static const struct pci_device_id pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
+struct amd_geode_priv {
+ struct pci_dev *pcidev;
+ void __iomem *membase;
+};
static int geode_rng_data_read(struct hwrng *rng, u32 *data)
{
- void __iomem *mem = (void __iomem *)rng->priv;
+ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
+ void __iomem *mem = priv->membase;
*data = readl(mem + GEODE_RNG_DATA_REG);
@@ -63,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
static int geode_rng_data_present(struct hwrng *rng, int wait)
{
- void __iomem *mem = (void __iomem *)rng->priv;
+ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
+ void __iomem *mem = priv->membase;
int data, i;
for (i = 0; i < 20; i++) {
@@ -90,6 +96,7 @@ static int __init mod_init(void)
const struct pci_device_id *ent;
void __iomem *mem;
unsigned long rng_base;
+ struct amd_geode_priv *priv;
for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev);
@@ -97,17 +104,26 @@ static int __init mod_init(void)
goto found;
}
/* Device not found. */
- goto out;
+ return err;
found:
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto put_dev;
+ }
+
rng_base = pci_resource_start(pdev, 0);
if (rng_base == 0)
- goto out;
+ goto free_priv;
err = -ENOMEM;
mem = ioremap(rng_base, 0x58);
if (!mem)
- goto out;
- geode_rng.priv = (unsigned long)mem;
+ goto free_priv;
+
+ geode_rng.priv = (unsigned long)priv;
+ priv->membase = mem;
+ priv->pcidev = pdev;
pr_info("AMD Geode RNG detected\n");
err = hwrng_register(&geode_rng);
@@ -116,20 +132,26 @@ found:
err);
goto err_unmap;
}
-out:
return err;
err_unmap:
iounmap(mem);
- goto out;
+free_priv:
+ kfree(priv);
+put_dev:
+ pci_dev_put(pdev);
+ return err;
}
static void __exit mod_exit(void)
{
- void __iomem *mem = (void __iomem *)geode_rng.priv;
+ struct amd_geode_priv *priv;
+ priv = (struct amd_geode_priv *)geode_rng.priv;
hwrng_unregister(&geode_rng);
- iounmap(mem);
+ iounmap(priv->membase);
+ pci_dev_put(priv->pcidev);
+ kfree(priv);
}
module_init(mod_init);
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 0576801944fd..2e902419601d 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -99,7 +99,7 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
- ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
if (!ret) {
imx_rngc_irq_mask_clear(rngc);
return -ETIMEDOUT;
@@ -182,9 +182,7 @@ static int imx_rngc_init(struct hwrng *rng)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
- ret = wait_for_completion_timeout(&rngc->rng_op_done,
- RNGC_TIMEOUT);
-
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
if (!ret) {
imx_rngc_irq_mask_clear(rngc);
return -ETIMEDOUT;
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 92be1c0ab99f..b2d3da17daa8 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -202,10 +202,12 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
}
- priv->rng.name = "iproc-rng200",
- priv->rng.read = iproc_rng200_read,
- priv->rng.init = iproc_rng200_init,
- priv->rng.cleanup = iproc_rng200_cleanup,
+ dev_set_drvdata(dev, priv);
+
+ priv->rng.name = "iproc-rng200";
+ priv->rng.read = iproc_rng200_read;
+ priv->rng.init = iproc_rng200_init;
+ priv->rng.cleanup = iproc_rng200_cleanup;
/* Register driver */
ret = devm_hwrng_register(dev, &priv->rng);
@@ -219,6 +221,28 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused iproc_rng200_suspend(struct device *dev)
+{
+ struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
+
+ iproc_rng200_cleanup(&priv->rng);
+
+ return 0;
+}
+
+static int __maybe_unused iproc_rng200_resume(struct device *dev)
+{
+ struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
+
+ iproc_rng200_init(&priv->rng);
+
+ return 0;
+}
+
+static const struct dev_pm_ops iproc_rng200_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume)
+};
+
static const struct of_device_id iproc_rng200_of_match[] = {
{ .compatible = "brcm,bcm7211-rng200", },
{ .compatible = "brcm,bcm7278-rng200", },
@@ -231,6 +255,7 @@ static struct platform_driver iproc_rng200_driver = {
.driver = {
.name = "iproc-rng200",
.of_match_table = iproc_rng200_of_match,
+ .pm = &iproc_rng200_pm_ops,
},
.probe = iproc_rng200_probe,
};
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 863448360a7d..f708a99619ec 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -41,7 +42,6 @@
struct st_rng_data {
void __iomem *base;
- struct clk *clk;
struct hwrng ops;
};
@@ -86,26 +86,18 @@ static int st_rng_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
ddata->ops.priv = (unsigned long)ddata;
ddata->ops.read = st_rng_read;
ddata->ops.name = pdev->name;
ddata->base = base;
- ddata->clk = clk;
-
- dev_set_drvdata(&pdev->dev, ddata);
ret = devm_hwrng_register(&pdev->dev, &ddata->ops);
if (ret) {
dev_err(&pdev->dev, "Failed to register HW RNG\n");
- clk_disable_unprepare(clk);
return ret;
}
@@ -114,16 +106,7 @@ static int st_rng_probe(struct platform_device *pdev)
return 0;
}
-static int st_rng_remove(struct platform_device *pdev)
-{
- struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev);
-
- clk_disable_unprepare(ddata->clk);
-
- return 0;
-}
-
-static const struct of_device_id st_rng_match[] = {
+static const struct of_device_id st_rng_match[] __maybe_unused = {
{ .compatible = "st,rng" },
{},
};
@@ -135,7 +118,6 @@ static struct platform_driver st_rng_driver = {
.of_match_table = of_match_ptr(st_rng_match),
},
.probe = st_rng_probe,
- .remove = st_rng_remove
};
module_platform_driver(st_rng_driver);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 718d8c087650..145d7b1055c0 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -4,6 +4,7 @@
* Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
*/
+#include <asm/barrier.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/scatterlist.h>
@@ -17,71 +18,111 @@ static DEFINE_IDA(rng_index_ida);
struct virtrng_info {
struct hwrng hwrng;
struct virtqueue *vq;
- struct completion have_data;
char name[25];
- unsigned int data_avail;
int index;
- bool busy;
bool hwrng_register_done;
bool hwrng_removed;
+ /* data transfer */
+ struct completion have_data;
+ unsigned int data_avail;
+ unsigned int data_idx;
+ /* minimal size returned by rng_buffer_size() */
+#if SMP_CACHE_BYTES < 32
+ u8 data[32];
+#else
+ u8 data[SMP_CACHE_BYTES];
+#endif
};
static void random_recv_done(struct virtqueue *vq)
{
struct virtrng_info *vi = vq->vdev->priv;
+ unsigned int len;
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
+ if (!virtqueue_get_buf(vi->vq, &len))
return;
+ smp_store_release(&vi->data_avail, len);
complete(&vi->have_data);
}
-/* The host will fill any buffer we give it with sweet, sweet randomness. */
-static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size)
+static void request_entropy(struct virtrng_info *vi)
{
struct scatterlist sg;
- sg_init_one(&sg, buf, size);
+ reinit_completion(&vi->have_data);
+ vi->data_idx = 0;
+
+ sg_init_one(&sg, vi->data, sizeof(vi->data));
/* There should always be room for one buffer. */
- virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL);
+ virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL);
virtqueue_kick(vi->vq);
}
+static unsigned int copy_data(struct virtrng_info *vi, void *buf,
+ unsigned int size)
+{
+ size = min_t(unsigned int, size, vi->data_avail);
+ memcpy(buf, vi->data + vi->data_idx, size);
+ vi->data_idx += size;
+ vi->data_avail -= size;
+ if (vi->data_avail == 0)
+ request_entropy(vi);
+ return size;
+}
+
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{
int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+ unsigned int chunk;
+ size_t read;
if (vi->hwrng_removed)
return -ENODEV;
- if (!vi->busy) {
- vi->busy = true;
- reinit_completion(&vi->have_data);
- register_buffer(vi, buf, size);
+ read = 0;
+
+ /* copy available data */
+ if (smp_load_acquire(&vi->data_avail)) {
+ chunk = copy_data(vi, buf, size);
+ size -= chunk;
+ read += chunk;
}
if (!wait)
- return 0;
-
- ret = wait_for_completion_killable(&vi->have_data);
- if (ret < 0)
- return ret;
+ return read;
+
+ /* We have already copied available entropy,
+ * so either size is 0 or data_avail is 0
+ */
+ while (size != 0) {
+ /* data_avail is 0 but a request is pending */
+ ret = wait_for_completion_killable(&vi->have_data);
+ if (ret < 0)
+ return ret;
+ /* if vi->data_avail is 0, we have been interrupted
+ * by a cleanup, but buffer stays in the queue
+ */
+ if (vi->data_avail == 0)
+ return read;
- vi->busy = false;
+ chunk = copy_data(vi, buf + read, size);
+ size -= chunk;
+ read += chunk;
+ }
- return vi->data_avail;
+ return read;
}
static void virtio_cleanup(struct hwrng *rng)
{
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
- if (vi->busy)
- wait_for_completion(&vi->have_data);
+ complete(&vi->have_data);
}
static int probe_common(struct virtio_device *vdev)
@@ -117,6 +158,9 @@ static int probe_common(struct virtio_device *vdev)
goto err_find;
}
+ /* we always have a pending entropy request */
+ request_entropy(vi);
+
return 0;
err_find:
@@ -132,9 +176,9 @@ static void remove_common(struct virtio_device *vdev)
vi->hwrng_removed = true;
vi->data_avail = 0;
+ vi->data_idx = 0;
complete(&vi->have_data);
vdev->config->reset(vdev);
- vi->busy = false;
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
vdev->config->del_vqs(vdev);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 736970312bbc..0833a2ac2f69 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1298,6 +1298,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
+ struct module *owner;
if (!acquire_ipmi_user(user, &i)) {
/*
@@ -1358,8 +1359,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
kfree(rcvr);
}
+ owner = intf->owner;
kref_put(&intf->refcount, intf_free);
- module_put(intf->owner);
+ module_put(owner);
}
int ipmi_destroy_user(struct ipmi_user *user)
@@ -3535,12 +3537,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg,
unsigned char err)
{
+ int rv;
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = err;
msg->rsp_size = 3;
- /* It's an error, so it will never requeue, no need to check return. */
- handle_one_recv_msg(intf, msg);
+
+ /* This will never requeue, but it may ask us to free the message. */
+ rv = handle_one_recv_msg(intf, msg);
+ if (rv == 0)
+ ipmi_free_smi_msg(msg);
}
static void cleanup_smi_msgs(struct ipmi_smi *intf)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b6e7df9e8850..c207f15dc7d1 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2086,6 +2086,11 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->io.io_cleanup = NULL;
}
+ if (rv && new_smi->si_sm) {
+ kfree(new_smi->si_sm);
+ new_smi->si_sm = NULL;
+ }
+
return rv;
}
@@ -2157,6 +2162,20 @@ skip_fallback_noirq:
}
module_init(init_ipmi_si);
+static void wait_msg_processed(struct smi_info *smi_info)
+{
+ unsigned long jiffies_now;
+ long time_diff;
+
+ while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+ jiffies_now = jiffies;
+ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+ * SI_USEC_PER_JIFFY);
+ smi_event_handler(smi_info, time_diff);
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
static void shutdown_smi(void *send_info)
{
struct smi_info *smi_info = send_info;
@@ -2191,16 +2210,13 @@ static void shutdown_smi(void *send_info)
* in the BMC. Note that timers and CPU interrupts are off,
* so no need for locks.
*/
- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
- poll(smi_info);
- schedule_timeout_uninterruptible(1);
- }
+ wait_msg_processed(smi_info);
+
if (smi_info->handlers)
disable_si_irq(smi_info);
- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
- poll(smi_info);
- schedule_timeout_uninterruptible(1);
- }
+
+ wait_msg_processed(smi_info);
+
if (smi_info->handlers)
smi_info->handlers->cleanup(smi_info->si_sm);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 60fb6c62f224..a1b080dfa960 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -79,7 +79,8 @@
/*
* Timer values
*/
-#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
+#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
+#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
/* How many times to we retry sending/receiving the message. */
@@ -87,7 +88,9 @@
#define SSIF_RECV_RETRIES 250
#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
+#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
/*
@@ -97,7 +100,7 @@
#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250)
enum ssif_intf_state {
- SSIF_NORMAL,
+ SSIF_IDLE,
SSIF_GETTING_FLAGS,
SSIF_GETTING_EVENTS,
SSIF_CLEARING_FLAGS,
@@ -105,8 +108,8 @@ enum ssif_intf_state {
/* FIXME - add watchdog stuff. */
};
-#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \
- && (ssif)->curr_msg == NULL)
+#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
+ && (ssif)->curr_msg == NULL)
/*
* Indexes into stats[] in ssif_info below.
@@ -236,6 +239,9 @@ struct ssif_info {
bool got_alert;
bool waiting_alert;
+ /* Used to inform the timeout that it should do a resend. */
+ bool do_resend;
+
/*
* If set to true, this will request events the next time the
* state machine is idle.
@@ -248,12 +254,6 @@ struct ssif_info {
*/
bool req_flags;
- /*
- * Used to perform timer operations when run-to-completion
- * mode is on. This is a countdown timer.
- */
- int rtc_us_timer;
-
/* Used for sending/receiving data. +1 for the length. */
unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
unsigned int data_len;
@@ -353,9 +353,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info,
/*
* Must be called with the message lock held. This will release the
- * message lock. Note that the caller will check SSIF_IDLE and start a
- * new operation, so there is no need to check for new messages to
- * start in here.
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
*/
static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
{
@@ -372,7 +372,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
if (start_send(ssif_info, msg, 3) != 0) {
/* Error, just go to normal state. */
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
}
}
@@ -387,7 +387,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
mb[1] = IPMI_GET_MSG_FLAGS_CMD;
if (start_send(ssif_info, mb, 2) != 0)
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
}
static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
@@ -398,7 +398,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->curr_msg = NULL;
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
ipmi_free_smi_msg(msg);
}
@@ -412,7 +412,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
msg = ipmi_alloc_smi_msg();
if (!msg) {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -435,7 +435,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
msg = ipmi_alloc_smi_msg();
if (!msg) {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -453,9 +453,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
/*
* Must be called with the message lock held. This will release the
- * message lock. Note that the caller will check SSIF_IDLE and start a
- * new operation, so there is no need to check for new messages to
- * start in here.
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
*/
static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
{
@@ -471,7 +471,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
/* Events available. */
start_event_fetch(ssif_info, flags);
else {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
}
}
@@ -515,7 +515,7 @@ static int ipmi_ssif_thread(void *data)
return 0;
}
-static int ssif_i2c_send(struct ssif_info *ssif_info,
+static void ssif_i2c_send(struct ssif_info *ssif_info,
ssif_i2c_done handler,
int read_write, int command,
unsigned char *data, unsigned int size)
@@ -527,7 +527,6 @@ static int ssif_i2c_send(struct ssif_info *ssif_info,
ssif_info->i2c_data = data;
ssif_info->i2c_size = size;
complete(&ssif_info->wake_thread);
- return 0;
}
@@ -536,40 +535,37 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
static void start_get(struct ssif_info *ssif_info)
{
- int rv;
-
- ssif_info->rtc_us_timer = 0;
ssif_info->multi_pos = 0;
- rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
- SSIF_IPMI_RESPONSE,
- ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
- if (rv < 0) {
- /* request failed, just return the error. */
- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- dev_dbg(&ssif_info->client->dev,
- "Error from i2c_non_blocking_op(5)\n");
-
- msg_done_handler(ssif_info, -EIO, NULL, 0);
- }
+ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_RESPONSE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
}
+static void start_resend(struct ssif_info *ssif_info);
+
static void retry_timeout(struct timer_list *t)
{
struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
unsigned long oflags, *flags;
- bool waiting;
+ bool waiting, resend;
if (ssif_info->stopping)
return;
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ resend = ssif_info->do_resend;
+ ssif_info->do_resend = false;
waiting = ssif_info->waiting_alert;
ssif_info->waiting_alert = false;
ipmi_ssif_unlock_cond(ssif_info, flags);
if (waiting)
start_get(ssif_info);
+ if (resend) {
+ start_resend(ssif_info);
+ ssif_inc_stat(ssif_info, send_retries);
+ }
}
static void watch_timeout(struct timer_list *t)
@@ -584,7 +580,7 @@ static void watch_timeout(struct timer_list *t)
if (ssif_info->watch_timeout) {
mod_timer(&ssif_info->watch_timer,
jiffies + ssif_info->watch_timeout);
- if (SSIF_IDLE(ssif_info)) {
+ if (IS_SSIF_IDLE(ssif_info)) {
start_flag_fetch(ssif_info, flags); /* Releases lock */
return;
}
@@ -618,14 +614,11 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
start_get(ssif_info);
}
-static int start_resend(struct ssif_info *ssif_info);
-
static void msg_done_handler(struct ssif_info *ssif_info, int result,
unsigned char *data, unsigned int len)
{
struct ipmi_smi_msg *msg;
unsigned long oflags, *flags;
- int rv;
/*
* We are single-threaded here, so no need for a lock until we
@@ -639,7 +632,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->waiting_alert = true;
- ssif_info->rtc_us_timer = SSIF_MSG_USEC;
if (!ssif_info->stopping)
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_JIFFIES);
@@ -671,17 +663,10 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_info->multi_len = len;
ssif_info->multi_pos = 1;
- rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
- SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
- ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
- if (rv < 0) {
- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- dev_dbg(&ssif_info->client->dev,
- "Error from i2c_non_blocking_op(1)\n");
-
- result = -EIO;
- } else
- return;
+ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ return;
} else if (ssif_info->multi_pos) {
/* Middle of multi-part read. Start the next transaction. */
int i;
@@ -743,19 +728,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_info->multi_pos++;
- rv = ssif_i2c_send(ssif_info, msg_done_handler,
- I2C_SMBUS_READ,
- SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
- ssif_info->recv,
- I2C_SMBUS_BLOCK_DATA);
- if (rv < 0) {
- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- dev_dbg(&ssif_info->client->dev,
- "Error from ssif_i2c_send\n");
-
- result = -EIO;
- } else
- return;
+ ssif_i2c_send(ssif_info, msg_done_handler,
+ I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv,
+ I2C_SMBUS_BLOCK_DATA);
+ return;
}
}
@@ -787,7 +765,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
switch (ssif_info->ssif_state) {
- case SSIF_NORMAL:
+ case SSIF_IDLE:
ipmi_ssif_unlock_cond(ssif_info, flags);
if (!msg)
break;
@@ -805,7 +783,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
* Error fetching flags, or invalid length,
* just give up for now.
*/
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
dev_warn(&ssif_info->client->dev,
"Error getting flags: %d %d, %x\n",
@@ -813,9 +791,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
/*
- * Don't abort here, maybe it was a queued
- * response to a previous command.
+ * Recv error response, give up.
*/
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
dev_warn(&ssif_info->client->dev,
"Invalid response getting flags: %x %x\n",
@@ -840,7 +818,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
"Invalid response clearing flags: %x %x\n",
data[0], data[1]);
}
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
break;
@@ -918,7 +896,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
- if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
if (ssif_info->req_events)
start_event_fetch(ssif_info, flags);
else if (ssif_info->req_flags)
@@ -936,37 +914,27 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
static void msg_written_handler(struct ssif_info *ssif_info, int result,
unsigned char *data, unsigned int len)
{
- int rv;
-
/* We are single-threaded here, so no need for a lock. */
if (result < 0) {
ssif_info->retries_left--;
if (ssif_info->retries_left > 0) {
- if (!start_resend(ssif_info)) {
- ssif_inc_stat(ssif_info, send_retries);
- return;
- }
- /* request failed, just return the error. */
- ssif_inc_stat(ssif_info, send_errors);
-
- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- dev_dbg(&ssif_info->client->dev,
- "%s: Out of retries\n", __func__);
- msg_done_handler(ssif_info, -EIO, NULL, 0);
+ /*
+ * Wait the retry timeout time per the spec,
+ * then redo the send.
+ */
+ ssif_info->do_resend = true;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_REQ_RETRY_JIFFIES);
return;
}
ssif_inc_stat(ssif_info, send_errors);
- /*
- * Got an error on transmit, let the done routine
- * handle it.
- */
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
dev_dbg(&ssif_info->client->dev,
- "%s: Error %d\n", __func__, result);
+ "%s: Out of retries\n", __func__);
- msg_done_handler(ssif_info, result, NULL, 0);
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
return;
}
@@ -1000,18 +968,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
ssif_info->multi_data = NULL;
}
- rv = ssif_i2c_send(ssif_info, msg_written_handler,
- I2C_SMBUS_WRITE, cmd,
- data_to_send, I2C_SMBUS_BLOCK_DATA);
- if (rv < 0) {
- /* request failed, just return the error. */
- ssif_inc_stat(ssif_info, send_errors);
-
- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- dev_dbg(&ssif_info->client->dev,
- "Error from i2c_non_blocking_op(3)\n");
- msg_done_handler(ssif_info, -EIO, NULL, 0);
- }
+ ssif_i2c_send(ssif_info, msg_written_handler,
+ I2C_SMBUS_WRITE, cmd,
+ data_to_send, I2C_SMBUS_BLOCK_DATA);
} else {
/* Ready to request the result. */
unsigned long oflags, *flags;
@@ -1029,7 +988,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
/* Wait a jiffie then request the next message */
ssif_info->waiting_alert = true;
ssif_info->retries_left = SSIF_RECV_RETRIES;
- ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
if (!ssif_info->stopping)
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_PART_JIFFIES);
@@ -1038,9 +996,8 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
}
}
-static int start_resend(struct ssif_info *ssif_info)
+static void start_resend(struct ssif_info *ssif_info)
{
- int rv;
int command;
ssif_info->got_alert = false;
@@ -1062,12 +1019,8 @@ static int start_resend(struct ssif_info *ssif_info)
ssif_info->data[0] = ssif_info->data_len;
}
- rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
- command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
- if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG))
- dev_dbg(&ssif_info->client->dev,
- "Error from i2c_non_blocking_op(4)\n");
- return rv;
+ ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+ command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
}
static int start_send(struct ssif_info *ssif_info,
@@ -1082,7 +1035,8 @@ static int start_send(struct ssif_info *ssif_info,
ssif_info->retries_left = SSIF_SEND_RETRIES;
memcpy(ssif_info->data + 1, data, len);
ssif_info->data_len = len;
- return start_resend(ssif_info);
+ start_resend(ssif_info);
+ return 0;
}
/* Must be called with the message lock held. */
@@ -1092,7 +1046,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
unsigned long oflags;
restart:
- if (!SSIF_IDLE(ssif_info)) {
+ if (!IS_SSIF_IDLE(ssif_info)) {
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -1315,7 +1269,7 @@ static void shutdown_ssif(void *send_info)
dev_set_drvdata(&ssif_info->client->dev, NULL);
/* make sure the driver is not looking for flags any more. */
- while (ssif_info->ssif_state != SSIF_NORMAL)
+ while (ssif_info->ssif_state != SSIF_IDLE)
schedule_timeout(1);
ssif_info->stopping = true;
@@ -1382,8 +1336,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
if (ret) {
retry_cnt--;
- if (retry_cnt > 0)
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
goto retry1;
+ }
return -ENODEV;
}
@@ -1454,7 +1410,7 @@ static struct ssif_addr_info *ssif_info_find(unsigned short addr,
restart:
list_for_each_entry(info, &ssif_infos, link) {
if (info->binfo.addr == addr) {
- if (info->addr_src == SI_SMBIOS)
+ if (info->addr_src == SI_SMBIOS && !info->adapter_name)
info->adapter_name = kstrdup(adapter_name,
GFP_KERNEL);
@@ -1523,8 +1479,10 @@ retry_write:
32, msg);
if (ret) {
retry_cnt--;
- if (retry_cnt > 0)
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
goto retry_write;
+ }
dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
return ret;
}
@@ -1651,6 +1609,11 @@ static int ssif_add_infos(struct i2c_client *client)
info->addr_src = SI_ACPI;
info->client = client;
info->adapter_name = kstrdup(client->adapter->name, GFP_KERNEL);
+ if (!info->adapter_name) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
info->binfo.addr = client->addr;
list_add_tail(&info->link, &ssif_infos);
return 0;
@@ -1886,7 +1849,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
spin_lock_init(&ssif_info->lock);
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 72ad7fff64a7..ccb62c480bdd 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -498,7 +498,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(1, &panic_done_count);
+ atomic_add(2, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -508,7 +508,7 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_recv_msg,
1);
if (rv)
- atomic_sub(1, &panic_done_count);
+ atomic_sub(2, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -532,12 +532,12 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
- atomic_add(1, &panic_done_count);
+ atomic_add(2, &panic_done_count);
rv = __ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
if (rv) {
- atomic_sub(1, &panic_done_count);
+ atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6b56bff9b68c..c5025ae6a37e 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -953,8 +953,8 @@ static const struct memdev {
#endif
[5] = { "zero", 0666, &zero_fops, 0 },
[7] = { "full", 0666, &full_fops, 0 },
- [8] = { "random", 0666, &random_fops, 0 },
- [9] = { "urandom", 0666, &urandom_fops, 0 },
+ [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
+ [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
#ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1ef94d112521..364dd5eaebda 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -890,20 +890,23 @@ void __init add_bootloader_randomness(const void *buf, size_t len)
}
struct fast_pool {
- struct work_struct mix;
unsigned long pool[4];
unsigned long last;
unsigned int count;
+ struct timer_list mix;
};
+static void mix_interrupt_randomness(struct timer_list *work);
+
static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
#ifdef CONFIG_64BIT
#define FASTMIX_PERM SIPHASH_PERMUTATION
- .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
#else
#define FASTMIX_PERM HSIPHASH_PERMUTATION
- .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
#endif
+ .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
};
/*
@@ -945,7 +948,7 @@ int __cold random_online_cpu(unsigned int cpu)
}
#endif
-static void mix_interrupt_randomness(struct work_struct *work)
+static void mix_interrupt_randomness(struct timer_list *work)
{
struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
/*
@@ -976,7 +979,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
local_irq_enable();
mix_pool_bytes(pool, sizeof(pool));
- credit_init_bits(max(1u, (count & U16_MAX) / 64));
+ credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
memzero_explicit(pool, sizeof(pool));
}
@@ -999,10 +1002,11 @@ void add_interrupt_randomness(int irq)
if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
return;
- if (unlikely(!fast_pool->mix.func))
- INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
fast_pool->count |= MIX_INFLIGHT;
- queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
+ if (!timer_pending(&fast_pool->mix)) {
+ fast_pool->mix.expires = jiffies;
+ add_timer_on(&fast_pool->mix, raw_smp_processor_id());
+ }
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1294,6 +1298,11 @@ static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;
+ if (!crng_ready() &&
+ ((kiocb->ki_flags & IOCB_NOWAIT) ||
+ (kiocb->ki_filp->f_flags & O_NONBLOCK)))
+ return -EAGAIN;
+
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index b86ee5b18855..7736c637848a 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -396,13 +396,14 @@ int tpm_pm_suspend(struct device *dev)
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
return 0;
- if (!tpm_chip_start(chip)) {
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_STATE);
else
rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
- tpm_chip_stop(chip);
+ tpm_put_ops(chip);
}
return rc;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a9dcf31eadd2..5fe52a6839b5 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev,
iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
TPM2_TIMEOUT_C)) {
- dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
+ dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
return -ETIME;
}
@@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device)
/* Should the FIFO driver handle this? */
sm = buf->start_method;
- if (sm == ACPI_TPM2_MEMORY_MAPPED)
- return -ENODEV;
+ if (sm == ACPI_TPM2_MEMORY_MAPPED) {
+ rc = -ENODEV;
+ goto out;
+ }
priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ if (!priv) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
@@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device)
FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
buf->header.length,
ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
priv->smc_func_id = crb_smc->smc_func_id;
@@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device)
rc = crb_map_io(device, priv, buf);
if (rc)
- return rc;
+ goto out;
chip = tpmm_chip_alloc(dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto out;
+ }
dev_set_drvdata(&chip->dev, priv);
chip->acpi_dev_handle = device->handle;
chip->flags = TPM_CHIP_FLAG_TPM2;
- return tpm_chip_register(chip);
+ rc = tpm_chip_register(chip);
+
+out:
+ acpi_put_table((struct acpi_table_header *)buf);
+ return rc;
}
static int crb_acpi_remove(struct acpi_device *device)
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c722e3b3121a..5456d8e2eef2 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -83,6 +83,22 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
},
},
+ {
+ .callback = tpm_tis_disable_irq,
+ .ident = "ThinkStation P360 Tiny",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"),
+ },
+ },
+ {
+ .callback = tpm_tis_disable_irq,
+ .ident = "ThinkPad L490",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
+ },
+ },
{}
};
@@ -125,6 +141,7 @@ static int check_acpi_tpm2(struct device *dev)
const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
struct acpi_table_tpm2 *tbl;
acpi_status st;
+ int ret = 0;
if (!aid || aid->driver_data != DEVICE_IS_TPM2)
return 0;
@@ -132,8 +149,7 @@ static int check_acpi_tpm2(struct device *dev)
/* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
* table is mandatory
*/
- st =
- acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
return -EINVAL;
@@ -141,9 +157,10 @@ static int check_acpi_tpm2(struct device *dev)
/* The tpm2_crb driver handles this device */
if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
- return -ENODEV;
+ ret = -ENODEV;
- return 0;
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return ret;
}
#else
static int check_acpi_tpm2(struct device *dev)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 70f785994228..a084f732c180 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -266,6 +266,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
int size = 0;
int status;
u32 expected;
+ int rc;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -285,8 +286,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
- size += recv_data(chip, &buf[TPM_HEADER_SIZE],
- expected - TPM_HEADER_SIZE);
+ rc = recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (rc < 0) {
+ size = rc;
+ goto out;
+ }
+ size += rc;
if (size < expected) {
dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
@@ -415,10 +421,17 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
int rc;
u32 ordinal;
unsigned long dur;
-
- rc = tpm_tis_send_data(chip, buf, len);
- if (rc < 0)
- return rc;
+ unsigned int try;
+
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_tis_send_data(chip, buf, len);
+ if (rc >= 0)
+ /* Data transfer done successfully */
+ break;
+ else if (rc != -EIO)
+ /* Data transfer failed, not recoverable */
+ return rc;
+ }
/* go and do it */
rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
@@ -613,7 +626,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
{
const char *desc = "attempting to generate an interrupt";
u32 cap2;
@@ -622,7 +635,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
ret = request_locality(chip, 0);
if (ret < 0)
- return ret;
+ return;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
@@ -630,8 +643,6 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
release_locality(chip, 0);
-
- return ret;
}
/* Register the IRQ and issue a command that will cause an interrupt. If an
@@ -661,42 +672,37 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
if (rc < 0)
- return rc;
+ goto restore_irqs;
rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
if (rc < 0)
- return rc;
+ goto restore_irqs;
/* Clear all existing */
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
if (rc < 0)
- return rc;
-
+ goto restore_irqs;
/* Turn on */
rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
intmask | TPM_GLOBAL_INT_ENABLE);
if (rc < 0)
- return rc;
+ goto restore_irqs;
priv->irq_tested = false;
/* Generate an interrupt by having the core call through to
* tpm_tis_send
*/
- rc = tpm_tis_gen_interrupt(chip);
- if (rc < 0)
- return rc;
+ tpm_tis_gen_interrupt(chip);
+restore_irqs:
/* tpm_tis_send will either confirm the interrupt is working or it
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- rc = tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
- if (rc < 0)
- return rc;
-
- return 1;
+ tpm_tis_write8(priv, original_int_vec,
+ TPM_INT_VECTOR(priv->locality));
+ return -1;
}
return 0;
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 2f6e087ec496..ff6b88fa4f47 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -693,37 +693,21 @@ static struct miscdevice vtpmx_miscdev = {
.fops = &vtpmx_fops,
};
-static int vtpmx_init(void)
-{
- return misc_register(&vtpmx_miscdev);
-}
-
-static void vtpmx_cleanup(void)
-{
- misc_deregister(&vtpmx_miscdev);
-}
-
static int __init vtpm_module_init(void)
{
int rc;
- rc = vtpmx_init();
- if (rc) {
- pr_err("couldn't create vtpmx device\n");
- return rc;
- }
-
workqueue = create_workqueue("tpm-vtpm");
if (!workqueue) {
pr_err("couldn't create workqueue\n");
- rc = -ENOMEM;
- goto err_vtpmx_cleanup;
+ return -ENOMEM;
}
- return 0;
-
-err_vtpmx_cleanup:
- vtpmx_cleanup();
+ rc = misc_register(&vtpmx_miscdev);
+ if (rc) {
+ pr_err("couldn't create vtpmx device\n");
+ destroy_workqueue(workqueue);
+ }
return rc;
}
@@ -731,7 +715,7 @@ err_vtpmx_cleanup:
static void __exit vtpm_module_exit(void)
{
destroy_workqueue(workqueue);
- vtpmx_cleanup();
+ misc_deregister(&vtpmx_miscdev);
}
module_init(vtpm_module_init);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c44247d0b83e..43e55e983e66 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -63,7 +63,7 @@ config COMMON_CLK_RK808
config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
- depends on REGMAP
+ select REGMAP
default MFD_HI655X_PMIC
---help---
This driver supports the hi655x PMIC clock. This
@@ -136,6 +136,39 @@ config COMMON_CLK_SI570
This driver supports Silicon Labs 570/571/598/599 programmable
clock generators.
+config COMMON_CLK_SI5324
+ tristate "Clock driver for SiLabs 5324 and compatible devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help--
+ This driver supports Silicon Labs 5324/5319/5328 programmable
+ clock generators. Dynamic programming of the oscillator is done
+ via I2C.
+
+config COMMON_CLK_IDT8T49N24X
+ tristate "Clock driver for IDT 8T49N24x"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports the IDT 8T49N24x universal frequency translator
+ product family. The only chip in the family that is currently
+ supported is the 8T49N241. The driver supports setting the rate for
+ all four outputs on the chip and automatically calculating/setting
+ the appropriate VCO value.
+
+ The driver can read a full register map from the DT,
+ and will use that register map to initialize the attached part
+ (via I2C) when the system boots. Any configuration not supported
+ by the common clock framework must be done via the full register
+ map, including optimized settings.
+
+ All outputs are currently assumed to be LVDS, unless overridden
+ in the full register map in the DT.
+
config COMMON_CLK_CDCE706
tristate "Clock driver for TI CDCE706 clock synthesizer"
depends on I2C
@@ -302,6 +335,7 @@ config COMMON_CLK_BD718XX
config COMMON_CLK_FIXED_MMIO
bool "Clock driver for Memory Mapped Fixed values"
depends on COMMON_CLK && OF
+ depends on HAS_IOMEM
help
Support for Memory Mapped IO Fixed clocks
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 0138fb14e6f8..c44dda072e24 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
+obj-$(CONFIG_COMMON_CLK_SI5324) += si5324drv.o clk-si5324.o
obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_U300) += clk-u300.o
@@ -77,6 +78,7 @@ obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
+obj-$(CONFIG_COMMON_CLK_IDT8T49N24X) += idt/
obj-y += imgtec/
obj-y += imx/
obj-y += ingenic/
@@ -117,4 +119,4 @@ obj-$(CONFIG_X86) += x86/
endif
obj-$(CONFIG_ARCH_ZX) += zte/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
-obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/
+obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index e637bd6b295b..b4e6a7923233 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -967,9 +967,9 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
return div;
}
-static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
- unsigned long parent_rate,
- u32 div)
+static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
+ unsigned long parent_rate,
+ u32 div)
{
const struct bcm2835_clock_data *data = clock->data;
u64 temp;
@@ -1756,7 +1756,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.load_mask = CM_PLLC_LOADPER,
.hold_mask = CM_PLLC_HOLDPER,
.fixed_divider = 1,
- .flags = CLK_SET_RATE_PARENT),
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
/*
* PLLD is the display PLL, used to drive DSI display panels.
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 274441e2ddb2..8f0619f362e3 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -736,6 +736,7 @@ void iproc_pll_clk_setup(struct device_node *node,
const char *parent_name;
struct iproc_clk *iclk_array;
struct clk_hw_onecell_data *clk_data;
+ const char *clk_name;
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
return;
@@ -783,7 +784,12 @@ void iproc_pll_clk_setup(struct device_node *node,
iclk = &iclk_array[0];
iclk->pll = pll;
- init.name = node->name;
+ ret = of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ if (WARN_ON(ret))
+ goto err_pll_register;
+
+ init.name = clk_name;
init.ops = &iproc_pll_ops;
init.flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
@@ -803,13 +809,11 @@ void iproc_pll_clk_setup(struct device_node *node,
goto err_pll_register;
clk_data->hws[0] = &iclk->hw;
+ parent_name = clk_name;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
- const char *clk_name;
-
memset(&init, 0, sizeof(init));
- parent_name = node->name;
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 1654fd0eedc9..a790a8ca02ff 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -113,7 +113,7 @@ static unsigned long raspberrypi_fw_pll_get_rate(struct clk_hw *hw,
RPI_FIRMWARE_ARM_CLK_ID,
&val);
if (ret)
- return ret;
+ return 0;
return val * RPI_FIRMWARE_PLLB_ARM_DIV_RATE;
}
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index bccdfa00fd37..67a9edbba29c 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -500,12 +500,15 @@ static void __init berlin2_clock_setup(struct device_node *np)
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ of_node_put(parent_np);
return;
+ }
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
if (!gbase)
return;
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index e9518d35f262..dd2784bb75b6 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -286,19 +286,23 @@ static void __init berlin2q_clock_setup(struct device_node *np)
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ of_node_put(parent_np);
return;
+ }
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
if (!gbase) {
+ of_node_put(parent_np);
pr_err("%pOF: Unable to map global base\n", np);
return;
}
/* BG2Q CPU PLL is not part of global registers */
cpupll_base = of_iomap(parent_np, 1);
+ of_node_put(parent_np);
if (!cpupll_base) {
pr_err("%pOF: Unable to map cpupll base\n", np);
iounmap(gbase);
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 48122f574cb6..8a6c6b9c9a6a 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -579,7 +579,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */
/* P-Bus (BCLK) clock divider */
- hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0,
+ hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
ast2600_div_table,
&aspeed_g6_clk_lock);
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index 308b353815e1..470d91d7314d 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -705,6 +705,10 @@ static int cdce925_probe(struct i2c_client *client,
for (i = 0; i < data->chip_info->num_plls; ++i) {
pll_clk_name[i] = kasprintf(GFP_KERNEL, "%pOFn.pll%d",
client->dev.of_node, i);
+ if (!pll_clk_name[i]) {
+ err = -ENOMEM;
+ goto error;
+ }
init.name = pll_clk_name[i];
data->pll[i].chip = data;
data->pll[i].hw.init = &init;
@@ -746,6 +750,10 @@ static int cdce925_probe(struct i2c_client *client,
init.num_parents = 1;
init.parent_names = &parent_name; /* Mux Y1 to input */
init.name = kasprintf(GFP_KERNEL, "%pOFn.Y1", client->dev.of_node);
+ if (!init.name) {
+ err = -ENOMEM;
+ goto error;
+ }
data->clk[0].chip = data;
data->clk[0].hw.init = &init;
data->clk[0].index = 0;
@@ -764,6 +772,10 @@ static int cdce925_probe(struct i2c_client *client,
for (i = 1; i < data->chip_info->num_outputs; ++i) {
init.name = kasprintf(GFP_KERNEL, "%pOFn.Y%d",
client->dev.of_node, i+1);
+ if (!init.name) {
+ err = -ENOMEM;
+ goto error;
+ }
data->clk[i].chip = data;
data->clk[i].hw.init = &init;
data->clk[i].index = i;
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 2ef819606c41..1a4e6340f95c 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
else
return rc;
}
- if (clkspec.np == node && !clk_supplier)
+ if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
return 0;
+ }
pclk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(pclk)) {
if (PTR_ERR(pclk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get parent clock %d for %pOF\n",
@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
if (rc < 0)
goto err;
if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
rc = 0;
goto err;
}
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
else
return rc;
}
- if (clkspec.np == node && !clk_supplier)
+ if (clkspec.np == node && !clk_supplier) {
+ of_node_put(clkspec.np);
return 0;
+ }
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
pr_warn("clk: couldn't get clock %d for %pOF\n",
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index f9d5b7334341..737aa70e2cb3 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -4,42 +4,101 @@
#include <linux/export.h>
#include <linux/gfp.h>
+struct devm_clk_state {
+ struct clk *clk;
+ void (*exit)(struct clk *clk);
+};
+
static void devm_clk_release(struct device *dev, void *res)
{
- clk_put(*(struct clk **)res);
+ struct devm_clk_state *state = res;
+
+ if (state->exit)
+ state->exit(state->clk);
+
+ clk_put(state->clk);
}
-struct clk *devm_clk_get(struct device *dev, const char *id)
+static struct clk *__devm_clk_get(struct device *dev, const char *id,
+ struct clk *(*get)(struct device *dev, const char *id),
+ int (*init)(struct clk *clk),
+ void (*exit)(struct clk *clk))
{
- struct clk **ptr, *clk;
+ struct devm_clk_state *state;
+ struct clk *clk;
+ int ret;
- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
+ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
+ if (!state)
return ERR_PTR(-ENOMEM);
- clk = clk_get(dev, id);
- if (!IS_ERR(clk)) {
- *ptr = clk;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
+ clk = get(dev, id);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
}
+ if (init) {
+ ret = init(clk);
+ if (ret)
+ goto err_clk_init;
+ }
+
+ state->clk = clk;
+ state->exit = exit;
+
+ devres_add(dev, state);
+
return clk;
+
+err_clk_init:
+
+ clk_put(clk);
+err_clk_get:
+
+ devres_free(state);
+ return ERR_PTR(ret);
+}
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get);
-struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
{
- struct clk *clk = devm_clk_get(dev, id);
+ return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
- if (clk == ERR_PTR(-ENOENT))
- return NULL;
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get,
+ clk_prepare_enable, clk_disable_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
- return clk;
+struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get_optional);
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional,
+ clk_prepare, clk_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
+
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional,
+ clk_prepare_enable, clk_disable_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
@@ -146,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
- struct clk **ptr, *clk;
+ struct devm_clk_state *state;
+ struct clk *clk;
- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
+ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
+ if (!state)
return ERR_PTR(-ENOMEM);
clk = of_clk_get_by_name(np, con_id);
if (!IS_ERR(clk)) {
- *ptr = clk;
- devres_add(dev, ptr);
+ state->clk = clk;
+ devres_add(dev, state);
} else {
- devres_free(ptr);
+ devres_free(state);
}
return clk;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 8b343e59dc61..4ef027f62880 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -211,8 +211,9 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
{
_of_fixed_factor_clk_setup(node);
}
-CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
- of_fixed_factor_clk_setup);
+
+CLK_OF_DECLARE_DRIVER(fixed_factor_clk, "fixed-factor-clock",
+ of_fixed_factor_clk_setup);
static int of_fixed_factor_clk_remove(struct platform_device *pdev)
{
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 1b99fc962745..289fc4d959ab 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -56,7 +56,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
{
struct clk_gate *gate = to_clk_gate(hw);
int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
u32 reg;
set ^= enable;
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index 27a86b7a34db..c82df105b0a2 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -647,7 +647,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
return;
npcm7xx_init_fail:
- kfree(npcm7xx_clk_data->hws);
+ kfree(npcm7xx_clk_data);
npcm7xx_init_np_err:
iounmap(clk_base);
npcm7xx_init_error:
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index 78d5ea669fea..2fe36f579ac5 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -207,7 +207,7 @@ static const struct of_device_id oxnas_stdclk_dt_ids[] = {
static int oxnas_stdclk_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *parent_np;
const struct oxnas_stdclk_data *data;
const struct of_device_id *id;
struct regmap *regmap;
@@ -219,7 +219,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
return -ENODEV;
data = id->data;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "failed to have parent regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index e3cdb4a282fe..d68de3773eb1 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -170,6 +170,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
sclk->info = handle->clk_ops->info_get(handle, idx);
if (!sclk->info) {
dev_dbg(dev, "invalid clock info for idx %d\n", idx);
+ devm_kfree(dev, sclk);
continue;
}
diff --git a/drivers/clk/clk-si5324.c b/drivers/clk/clk-si5324.c
new file mode 100644
index 000000000000..7cfe75d7e6a4
--- /dev/null
+++ b/drivers/clk/clk-si5324.c
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * clk-si5324.c - Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <vgannava.xilinx.com>
+ * Leon Woestenberg <leon@sidebranch.com>
+ */
+
+#include <asm/div64.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "clk-si5324.h"
+#include "si5324.h"
+#include "si5324drv.h"
+
+struct si5324_driver_data;
+
+/**
+ * struct si5324_parameters - si5324 core parameters
+ *
+ * @n1_hs_min: Minimum high-speed n1 output divider
+ * @n1_hs_max: Maximum high-speed n1 output divider
+ * @n1_hs: n1 high-speed output divider
+ * @nc1_ls_min: Minimum low-speed clkout1 output divider
+ * @nc1_ls_max: Maximum low-speed clkout1 output divider
+ * @nc1_ls: Clkout1 low-speed output divider
+ * @nc2_ls_min: Minimum low-speed clkout2 output divider
+ * @nc2_ls_max: Maximum low-speed clkout2 output divider
+ * @nc2_ls: Clkout2 low-speed output divider
+ * @n2_hs: High-speed feedback divider
+ * @n2_ls_min: Minimum low-speed feedback divider
+ * @n2_ls_max: Maximum low-speed feedback divider
+ * @n2_ls: Low-speed feedback divider
+ * @n31_min: Minimum input divider for clk1
+ * @n31_max: Maximum input divider for clk1
+ * @n31: Input divider for clk1
+ * @n32_min: Minimum input divider for clk2
+ * @n32_max: Maximum input divider for clk2
+ * @n32: Input divider for clk2
+ * @fin: Input frequency
+ * @fout: Output frequency
+ * @fosc: Osc frequency
+ * @best_delta_fout: Delta out frequency
+ * @best_fout: Best output frequency
+ * @best_n1_hs: Best high speed output divider
+ * @best_nc1_ls: Best low speed clkout1 divider
+ * @best_n2_hs: Best high speed feedback divider
+ * @best_n2_ls: Best low speed feedback divider
+ * @best_n3: Best input clock divider
+ * @valid: Validility
+ */
+struct si5324_parameters {
+ u32 n1_hs_min;
+ u32 n1_hs_max;
+ u32 n1_hs;
+ u32 nc1_ls_min;
+ u32 nc1_ls_max;
+ u32 nc1_ls;
+ u32 nc2_ls_min;
+ u32 nc2_ls_max;
+ u32 nc2_ls;
+ u32 n2_hs;
+ u32 n2_ls_min;
+ u32 n2_ls_max;
+ u32 n2_ls;
+ u32 n31_min;
+ u32 n31_max;
+ u32 n31;
+ u32 n32_min;
+ u32 n32_max;
+ u32 n32;
+ u64 fin;
+ u64 fout;
+ u64 fosc;
+ u64 best_delta_fout;
+ u64 best_fout;
+ u32 best_n1_hs;
+ u32 best_nc1_ls;
+ u32 best_n2_hs;
+ u32 best_n2_ls;
+ u32 best_n3;
+ int valid;
+};
+
+/**
+ * struct si5324_hw_data - Clock parameters
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @drvdata: Driver private data
+ * @num: Differential pair clock number
+ */
+struct si5324_hw_data {
+ struct clk_hw hw;
+ struct si5324_driver_data *drvdata;
+ unsigned char num;
+};
+
+/**
+ * struct si5324_driver_data - Driver parameters
+ * @client: I2C client pointer
+ * @regmap: Device's regmap
+ * @onecell: Clock onecell data
+ * @params: Device parameters
+ * @pxtal: Clock
+ * @pxtal_name: Clock name
+ * @xtal: Reference clock
+ * @pclkin1: Clock in 1
+ * @pclkin1_name: Clock in 1 name
+ * @clkin1: Differential input clock 1
+ * @pclkin2: Clock in 2
+ * @pclkin2_name: Clock in 2 name
+ * @clkin2: Differential input clock 2
+ * @pll: Pll clock
+ * @clkout: Output clock
+ * @rate_clkout0: Clock out 0 rate
+ * @rate_clkout1: Clock out 1 rate
+ */
+struct si5324_driver_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct clk_onecell_data onecell;
+ struct si5324_parameters params;
+ struct clk *pxtal;
+ const char *pxtal_name;
+ struct clk_hw xtal;
+ struct clk *pclkin1;
+ const char *pclkin1_name;
+ struct clk_hw clkin1;
+ struct clk *pclkin2;
+ const char *pclkin2_name;
+ struct clk_hw clkin2;
+ struct si5324_hw_data pll;
+ struct si5324_hw_data *clkout;
+ unsigned long rate_clkout0;
+ unsigned long rate_clkout1;
+};
+
+static const char * const si5324_input_names[] = {
+ "xtal", "clkin1", "clkin2"
+};
+
+static const char * const si5324_pll_name = "pll";
+
+static const char * const si5324_clkout_names[] = {
+ "clk0", "clk1"
+};
+
+enum si53xx_variant {
+ si5319,
+ si5324,
+ si5328
+};
+
+static const char * const si53xx_variant_name[] = {
+ "si5319", "si5324", "si5328"
+};
+
+/**
+ * si5324_reg_read - Read a single si5324 register.
+ *
+ * @drvdata: Device to read from.
+ * @reg: Register to read.
+ *
+ * This function reads data from a single register
+ *
+ * Return: Data of the register on success, error number on failure
+ */
+static inline int
+si5324_reg_read(struct si5324_driver_data *drvdata, u8 reg)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(drvdata->regmap, reg, &val);
+ if (ret < 0) {
+ dev_err(&drvdata->client->dev,
+ "unable to read from reg%02x\n", reg);
+ return ret;
+ }
+
+ return (u8)val;
+}
+
+/**
+ * si5324_bulk_read - Read multiple si5324 registers
+ *
+ * @drvdata: Device to read from
+ * @reg: First register to be read from
+ * @count: Number of registers
+ * @buf: Pointer to store read value
+ *
+ * This function reads from multiple registers which are in
+ * sequential order
+ *
+ * Return: Number of bytes read
+ */
+static inline int si5324_bulk_read(struct si5324_driver_data *drvdata,
+ u8 reg, u8 count, u8 *buf)
+{
+ return regmap_bulk_read(drvdata->regmap, reg, buf, count);
+}
+
+/**
+ * si5324_reg_write - Write a single si5324 register.
+ *
+ * @drvdata: Device to write to.
+ * @reg: Register to write to.
+ * @val: Value to write.
+ *
+ * This function writes into a single register
+ *
+ * Return: Zero on success, a negative error number on failure.
+ *
+ */
+static inline int si5324_reg_write(struct si5324_driver_data *drvdata,
+ u8 reg, u8 val)
+{
+ int ret = regmap_write(drvdata->regmap, reg, val);
+
+ dev_dbg(&drvdata->client->dev, "%s 0x%02x @%02d\n", __func__,
+ (int)val, (int)reg);
+ return ret;
+}
+
+/**
+ * si5324_bulk_write - Write into multiple si5324 registers
+ *
+ * @drvdata: Device to write to
+ * @reg: First register
+ * @count: Number of registers
+ * @buf: Block of data to be written
+ *
+ * This function writes into multiple registers.
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static inline int si5324_bulk_write(struct si5324_driver_data *drvdata,
+ u8 reg, u8 count, const u8 *buf)
+{
+ return regmap_raw_write(drvdata->regmap, reg, buf, count);
+}
+
+/**
+ * si5324_set_bits - Set the value of a bitfield in a si5324 register
+ *
+ * @drvdata: Device to write to.
+ * @reg: Register to write to.
+ * @mask: Mask of bits to set.
+ * @val: Value to set (unshifted)
+ *
+ * This function set particular bits in register
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static inline int si5324_set_bits(struct si5324_driver_data *drvdata,
+ u8 reg, u8 mask, u8 val)
+{
+ return regmap_update_bits(drvdata->regmap, reg, mask, val);
+}
+
+/**
+ * si5324_bulk_scatter_write - Write into multiple si5324 registers
+ *
+ * @drvdata: Device to write to
+ * @count: Number of registers
+ * @buf: Register and data to write
+ *
+ * This function writes into multiple registers which are need not
+ * to be in sequential order.
+ *
+ * Return: Number of bytes written
+ */
+static inline int
+si5324_bulk_scatter_write(struct si5324_driver_data *drvdata,
+ u8 count, const u8 *buf)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ ret = si5324_reg_write(drvdata, buf[i * 2], buf[i * 2 + 1]);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+/**
+ * si5324_initialize - Initializes si5324 device
+ *
+ * @drvdata: Device instance
+ *
+ * This function initializes si5324 with the following settings
+ * Keep reset asserted for 20ms
+ * 1. freerun mode
+ * 2. Disable output clocks during calibration
+ * 3. Clock selection mode : default value, manual
+ * 4. output signal format : LVDS for clkout1, disable clkout2
+ * 5. CS_CA pin in ignored
+ * 6. Set lock time to 13.3ms
+ * 7. Enables the fastlock.
+ *
+ * Return: Zero on success, negative number on failure.
+ */
+static int si5324_initialize(struct si5324_driver_data *drvdata)
+{
+ int ret = 0;
+
+ si5324_set_bits(drvdata, SI5324_RESET_CALIB,
+ SI5324_RST_ALL, SI5324_RST_ALL);
+ msleep(SI5324_RESET_DELAY_MS);
+ si5324_set_bits(drvdata, SI5324_RESET_CALIB, SI5324_RST_ALL, 0);
+ msleep(SI5324_RESET_DELAY_MS);
+
+ ret = si5324_reg_read(drvdata, SI5324_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CONTROL,
+ (ret | SI5324_CONTROL_FREE_RUN));
+
+ ret = si5324_reg_read(drvdata, SI5324_CKSEL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CKSEL, (ret | SI5324_CKSEL_SQL_ICAL));
+ si5324_reg_write(drvdata, SI3324_AUTOSEL, SI5324_AUTOSEL_DEF);
+ si5324_reg_write(drvdata, SI5324_OUTPUT_SIGFMT,
+ SI5324_OUTPUT_SF1_DEFAULT);
+
+ ret = si5324_reg_read(drvdata, SI5324_DSBL_CLKOUT);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_DSBL_CLKOUT,
+ (ret | SI5324_DSBL_CLKOUT2));
+ ret = si5324_reg_read(drvdata, SI5324_POWERDOWN);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_POWERDOWN, (ret | SI5324_PD_CK2));
+ si5324_reg_write(drvdata, SI5324_FOS_LOCKT, SI5324_FOS_DEFAULT);
+
+ ret = si5324_reg_read(drvdata, SI5324_CK_ACTV_SEL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CK_ACTV_SEL, SI5324_CK_DEFAULT);
+ ret = si5324_reg_read(drvdata, SI5324_FASTLOCK);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_FASTLOCK, (ret | SI5324_FASTLOCK_EN));
+ return 0;
+}
+
+/**
+ * si5324_read_parameters - Reads clock divider parameters
+ *
+ * @drvdata: Device to read from
+ *
+ * This function reads the clock divider parameters into driver structure.
+ *
+ * Following table gives the buffer index, register number and
+ * register name with bit fields
+ * 0 25 N1_HS[2:0]
+ * 6 31 NC1_LS[19:16]
+ * 7 32 NC1_LS[15:8]
+ * 8 33 NC1_LS[7:0]
+ * 9 34 NC2_LS[19:16]
+ * 10 35 NC2_LS[15:8]
+ * 11 36 NC2_LS[7:0]
+ * 15 40 N2_HS[2:0] N2_LS[19:16]
+ * 16 41 N2_LS[15:8]
+ * 17 42 N2_LS[7:0]
+ * 18 43 N31[18:16]
+ * 19 44 N31[15:8]
+ * 20 45 N31[7:0]
+ * 21 46 N32[18:16]
+ * 22 47 N32[15:8]
+ * 23 48 N32[7:0]
+ */
+static void si5324_read_parameters(struct si5324_driver_data *drvdata)
+{
+ u8 buf[SI5324_PARAM_LEN];
+
+ si5324_bulk_read(drvdata, SI5324_N1_HS, SI5324_N1_PARAM_LEN, &buf[0]);
+ si5324_bulk_read(drvdata, SI5324_NC1_LS_H, SI5324_NC_PARAM_LEN,
+ &buf[6]);
+ si5324_bulk_read(drvdata, SI5324_N2_HS_LS_H, SI5324_N2_PARAM_LEN,
+ &buf[15]);
+
+ drvdata->params.n1_hs = (buf[0] >> SI5324_N1_HS_VAL_SHIFT);
+ drvdata->params.n1_hs += 4;
+
+ drvdata->params.nc1_ls = ((buf[6] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[7] << SI5324_LSHIFT) |
+ buf[8];
+ drvdata->params.nc1_ls += 1;
+ drvdata->params.nc2_ls = ((buf[9] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[10] << SI5324_LSHIFT) |
+ buf[11];
+ drvdata->params.nc2_ls += 1;
+ drvdata->params.n2_ls = ((buf[15] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[16] << SI5324_LSHIFT) |
+ buf[17];
+ drvdata->params.n2_ls += 1;
+ drvdata->params.n2_hs = buf[15] >> SI5324_N2_HS_LS_H_VAL_SHIFT;
+ drvdata->params.n2_hs += 4;
+ drvdata->params.n31 = ((buf[18] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[19] << SI5324_LSHIFT) |
+ buf[20];
+ drvdata->params.n31 += 1;
+ drvdata->params.n32 = ((buf[21] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[22] << SI5324_LSHIFT) |
+ buf[23];
+ drvdata->params.n32 += 1;
+ drvdata->params.valid = 1;
+}
+
+static bool si5324_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return true;
+}
+
+/**
+ * si5324_regmap_is_readable - Checks the register is readable or not
+ *
+ * @dev: Registered device
+ * @reg: Register offset
+ *
+ * Checks the register is readable or not.
+ *
+ * Return: True if the register is reabdle, False if it is not readable.
+ */
+static bool si5324_regmap_is_readable(struct device *dev, unsigned int reg)
+{
+ if ((reg > SI5324_POWERDOWN && reg < SI5324_FOS_LOCKT) ||
+ (reg > SI5324_N1_HS && reg < SI5324_NC1_LS_H) ||
+ (reg > SI5324_NC2_LS_L && reg < SI5324_N2_HS_LS_H) ||
+ (reg > SI5324_N32_CLKIN_L && reg < SI5324_FOS_CLKIN_RATE) ||
+ (reg > SI5324_FOS_CLKIN_RATE && reg < SI5324_PLL_ACTV_CLK) ||
+ reg > SI5324_SKEW2)
+ return false;
+
+ return true;
+}
+
+/**
+ * si5324_regmap_is_writable - Checks the register is writable or not
+ *
+ * @dev: Registered device
+ * @reg: Register offset
+ *
+ * Checks the register is writable or not.
+ *
+ * Return: True if the register is writeable, False if it's not writeable.
+ */
+static bool si5324_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ if ((reg > SI5324_POWERDOWN && reg < SI5324_FOS_LOCKT) ||
+ (reg > SI5324_N1_HS && reg < SI5324_NC1_LS_H) ||
+ (reg > SI5324_NC2_LS_L && reg < SI5324_N2_HS_LS_H) ||
+ (reg > SI5324_N32_CLKIN_L && reg < SI5324_FOS_CLKIN_RATE) ||
+ (reg > SI5324_FOS_CLKIN_RATE && reg < SI5324_PLL_ACTV_CLK) ||
+ reg > SI5324_SKEW2 ||
+ (reg >= SI5324_PLL_ACTV_CLK && reg <= SI5324_CLKIN_LOL_STATUS) ||
+ (reg >= SI5324_PARTNO_H && reg <= SI5324_PARTNO_L))
+ return false;
+
+ return true;
+}
+
+static const struct regmap_config si5324_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 144,
+ .writeable_reg = si5324_regmap_is_writeable,
+ .readable_reg = si5324_regmap_is_readable,
+ .volatile_reg = si5324_regmap_is_volatile,
+};
+
+static int si5324_xtal_prepare(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static void si5324_xtal_unprepare(struct clk_hw *hw)
+{
+}
+
+static const struct clk_ops si5324_xtal_ops = {
+ .prepare = si5324_xtal_prepare,
+ .unprepare = si5324_xtal_unprepare,
+};
+
+/**
+ * si5324_clkin_prepare - Prepare the clkin
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ *
+ * This function enables the particular clk
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static int si5324_clkin_prepare(struct clk_hw *hw)
+{
+ int ret = 0;
+ struct si5324_driver_data *drvdata;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (hwdata->num == SI5324_CLKIN1) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin1);
+ ret = si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ ret = si5324_set_bits(drvdata, SI5324_POWERDOWN, SI5324_PD_CK1 |
+ SI5324_PD_CK2, SI5324_PD_CK2);
+ } else if (hwdata->num == SI5324_CLKIN2) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin2);
+ ret = si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ ret = si5324_set_bits(drvdata, SI5324_POWERDOWN, SI5324_PD_CK1 |
+ SI5324_PD_CK2, SI5324_PD_CK1);
+ }
+
+ return ret;
+}
+
+/**
+ * si5324_clkin_unprepare - Unprepare the clkin
+ *
+ * @hw: Clock hardware
+ *
+ * This function enables the particular clk.
+ */
+static void si5324_clkin_unprepare(struct clk_hw *hw)
+{
+ struct si5324_driver_data *drvdata;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (hwdata->num == SI5324_CLKIN1) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin1);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ } else if (hwdata->num == SI5324_CLKIN2) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin2);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ }
+}
+
+static unsigned long si5324_clkin_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static const struct clk_ops si5324_clkin_ops = {
+ .prepare = si5324_clkin_prepare,
+ .unprepare = si5324_clkin_unprepare,
+ .recalc_rate = si5324_clkin_recalc_rate,
+};
+
+static int si5324_pll_reparent(struct si5324_driver_data *drvdata,
+ int num, enum si5324_pll_src parent)
+{
+ if (parent == SI5324_PLL_SRC_XTAL) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN,
+ SI5324_CONTROL_FREE_RUN);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT,
+ 1 << SI5324_CKSEL_SHIFT);
+ } else if (parent == SI5324_PLL_SRC_CLKIN1) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK2);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT, 0);
+ } else if (parent == SI5324_PLL_SRC_CLKIN2) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT,
+ 1 << SI5324_CKSEL_SHIFT);
+ }
+
+ return 0;
+}
+
+static unsigned char si5324_pll_get_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+/**
+ * si5324_pll_set_parent - Set parent of clock
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @index: Parent index
+ *
+ * This function sets the paraent of clock.
+ *
+ * Return: 0 on success, negative error number on failure
+ */
+static int si5324_pll_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+ enum si5324_pll_src parent;
+
+ if (index == SI5324_SRC_XTAL)
+ parent = SI5324_PLL_SRC_XTAL;
+ else if (index == SI5324_SRC_CLKIN1)
+ parent = SI5324_PLL_SRC_CLKIN1;
+ else if (index == SI5324_SRC_CLKIN2)
+ parent = SI5324_PLL_SRC_CLKIN2;
+ else
+ return -EINVAL;
+
+ return si5324_pll_reparent(hwdata->drvdata, hwdata->num, parent);
+}
+
+/**
+ * si5324_pll_recalc_rate - Recalculate clock frequency
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @parent_rate: Clock frequency of parent clock
+ *
+ * This function recalculate clock frequency.
+ *
+ * Return: Current clock frequency
+ */
+static unsigned long si5324_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long rate;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (!hwdata->drvdata->params.valid)
+ si5324_read_parameters(hwdata->drvdata);
+ WARN_ON(!hwdata->drvdata->params.valid);
+
+ rate = parent_rate * hwdata->drvdata->params.n2_ls *
+ hwdata->drvdata->params.n2_hs;
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: n2_ls = %u, n2_hs = %u, parent_rate = %lu, rate = %lu\n",
+ __func__, clk_hw_get_name(hw),
+ hwdata->drvdata->params.n2_ls, hwdata->drvdata->params.n2_hs,
+ parent_rate, (unsigned long)rate);
+
+ return rate;
+}
+
+static long si5324_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static int si5324_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static const struct clk_ops si5324_pll_ops = {
+ .set_parent = si5324_pll_set_parent,
+ .get_parent = si5324_pll_get_parent,
+ .recalc_rate = si5324_pll_recalc_rate,
+ .round_rate = si5324_pll_round_rate,
+ .set_rate = si5324_pll_set_rate,
+};
+
+static int si5324_clkout_set_drive_strength(
+ struct si5324_driver_data *drvdata, int num,
+ enum si5324_drive_strength drive)
+{
+ return 0;
+}
+
+static int si5324_clkout_prepare(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static void si5324_clkout_unprepare(struct clk_hw *hw)
+{
+}
+
+static unsigned long si5324_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long rate;
+
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ rate = hwdata->drvdata->rate_clkout0;
+
+ return rate;
+}
+
+/**
+ * si5324_clkout_round_rate - selects the closest value to requested one.
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @rate: Clock rate
+ * @parent_rate: Parent clock rate
+ *
+ * This function selects the rate closest to the requested one.
+ *
+ * Return: Clock rate on success, negative error number on failure
+ */
+static long si5324_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 ncn_ls, n2_ls, n3n, actual_rate;
+ u8 n1_hs, n2_hs, bwsel;
+ int ret;
+
+ ret = si5324_calcfreqsettings(SI5324_REF_CLOCK, rate, &actual_rate,
+ &n1_hs, &ncn_ls, &n2_hs, &n2_ls, &n3n,
+ &bwsel);
+ if (ret < 0)
+ return ret;
+
+ return actual_rate;
+}
+
+static int si5324_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ u32 ncn_ls, n2_ls, n3n, actual_rate;
+ u8 n1_hs, n2_hs, bwsel, buf[SI5324_OUT_REGS * 2];
+ int i, ret, rc;
+
+ ret = si5324_calcfreqsettings(SI5324_REF_CLOCK, rate, &actual_rate,
+ &n1_hs, &ncn_ls, &n2_hs, &n2_ls, &n3n,
+ &bwsel);
+ if (ret < 0)
+ return ret;
+
+ hwdata->drvdata->rate_clkout0 = rate;
+ i = 0;
+
+ /* Enable Free running mode */
+ buf[i] = SI5324_CONTROL;
+ buf[i + 1] = SI5324_FREE_RUN_EN;
+ i += 2;
+
+ /* Loop bandwidth */
+ buf[i] = SI5324_BWSEL;
+ buf[i + 1] = (bwsel << SI5324_BWSEL_SHIFT) | SI5324_BWSEL_DEF_VAL;
+ i += 2;
+
+ /* Enable reference clock 2 in free running mode */
+ buf[i] = SI5324_POWERDOWN;
+ /* Enable input clock 2, Disable input clock 1 */
+ buf[i + 1] = SI5324_PD_CK1_DIS;
+ i += 2;
+
+ /* N1_HS */
+ buf[i] = SI5324_N1_HS;
+ buf[i + 1] = n1_hs << SI5324_N1_HS_VAL_SHIFT;
+ i += 2;
+
+ /* NC1_LS */
+ buf[i] = SI5324_NC1_LS_H;
+ buf[i + 1] = (u8)((ncn_ls & 0x000F0000) >> 16);
+ buf[i + 2] = SI5324_NC1_LS_M;
+ buf[i + 3] = (u8)((ncn_ls & 0x0000FF00) >> 8);
+ buf[i + 4] = SI5324_NC1_LS_L;
+ buf[i + 5] = (u8)(ncn_ls & 0x000000FF);
+ i += 6;
+
+ /* N2_HS and N2_LS */
+ buf[i] = SI5324_N2_HS_LS_H;
+ buf[i + 1] = (n2_hs << SI5324_N2_HS_LS_H_VAL_SHIFT);
+ buf[i + 1] |= (u8)((n2_ls & 0x000F0000) >> 16);
+ buf[i + 2] = SI5324_N2_LS_H;
+ buf[i + 3] = (u8)((n2_ls & 0x0000FF00) >> 8);
+ buf[i + 4] = SI5324_N2_LS_L;
+ buf[i + 5] = (u8)(n2_ls & 0x000000FF);
+ i += 6;
+
+ /* N32 (CLKIN2 or XTAL in FREERUNNING mode) */
+ buf[i] = SI5324_N32_CLKIN_H;
+ buf[i + 2] = SI5324_N32_CLKIN_M;
+ buf[i + 4] = SI5324_N32_CLKIN_L;
+ buf[i + 1] = (u8)((n3n & 0x00070000) >> 16);
+ buf[i + 3] = (u8)((n3n & 0x0000FF00) >> 8);
+ buf[i + 5] = (u8)(n3n & 0x000000FF);
+ i += 6;
+
+ /* Start calibration */
+ buf[i] = SI5324_RESET_CALIB;
+ buf[i + 1] = SI5324_CALIB_EN;
+ i += 2;
+
+ hwdata->drvdata->params.valid = 0;
+ rc = si5324_bulk_scatter_write(hwdata->drvdata, SI5324_OUT_REGS, buf);
+
+ return rc;
+}
+
+static const struct clk_ops si5324_clkout_ops = {
+ .prepare = si5324_clkout_prepare,
+ .unprepare = si5324_clkout_unprepare,
+ .recalc_rate = si5324_clkout_recalc_rate,
+ .round_rate = si5324_clkout_round_rate,
+ .set_rate = si5324_clkout_set_rate,
+};
+
+static const struct of_device_id si5324_dt_ids[] = {
+ { .compatible = "silabs,si5319" },
+ { .compatible = "silabs,si5324" },
+ { .compatible = "silabs,si5328" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si5324_dt_ids);
+
+static int si5324_dt_parse(struct i2c_client *client)
+{
+ struct device_node *child, *np = client->dev.of_node;
+ struct si5324_platform_data *pdata;
+ struct property *prop;
+ const __be32 *p;
+ int num = 0;
+ u32 val;
+
+ if (!np)
+ return 0;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /*
+ * property silabs,pll-source : <num src>, [<..>]
+ * allow to selectively set pll source
+ */
+ of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) {
+ if (num >= 1) {
+ dev_err(&client->dev,
+ "invalid pll %d on pll-source prop\n", num);
+ return -EINVAL;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(&client->dev,
+ "missing pll-source for pll %d\n", num);
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 0:
+ dev_dbg(&client->dev, "using xtal as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_XTAL;
+ break;
+ case 1:
+ dev_dbg(&client->dev,
+ "using clkin1 as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_CLKIN1;
+ break;
+ case 2:
+ dev_dbg(&client->dev,
+ "using clkin2 as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_CLKIN2;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid parent %d for pll %d\n", val, num);
+ return -EINVAL;
+ }
+ }
+ /* per clkout properties */
+ for_each_child_of_node(np, child) {
+ if (of_property_read_u32(child, "reg", &num)) {
+ dev_err(&client->dev, "missing reg property of %s\n",
+ child->name);
+ goto put_child;
+ }
+
+ if (num >= 2) {
+ dev_err(&client->dev, "invalid clkout %d\n", num);
+ goto put_child;
+ }
+
+ if (!of_property_read_u32(child, "silabs,drive-strength",
+ &val)) {
+ switch (val) {
+ case SI5324_DRIVE_2MA:
+ case SI5324_DRIVE_4MA:
+ case SI5324_DRIVE_6MA:
+ case SI5324_DRIVE_8MA:
+ pdata->clkout[num].drive = val;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid drive strength %d for clkout %d\n",
+ val, num);
+ goto put_child;
+ }
+ }
+
+ if (!of_property_read_u32(child, "clock-frequency", &val)) {
+ dev_dbg(&client->dev, "clock-frequency = %u\n", val);
+ pdata->clkout[num].rate = val;
+ } else {
+ dev_err(&client->dev,
+ "missing clock-frequency property of %s\n",
+ child->name);
+ goto put_child;
+ }
+ }
+ client->dev.platform_data = pdata;
+
+ return 0;
+put_child:
+ of_node_put(child);
+ return -EINVAL;
+}
+
+static u8 instance;
+
+static int si5324_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct si5324_platform_data *pdata;
+ struct si5324_driver_data *drvdata;
+ struct clk_init_data init;
+ struct clk *clk;
+ const char *parent_names[3];
+ char inst_names[NUM_NAME_IDS][MAX_NAME_LEN];
+ u8 num_parents, num_clocks;
+ int ret, n;
+ enum si53xx_variant variant = id->driver_data;
+
+ if (variant > si5328) {
+ dev_err(&client->dev, "si53xx device not present\n");
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "%s probed\n", si53xx_variant_name[variant]);
+ ret = si5324_dt_parse(client);
+ if (ret)
+ return ret;
+
+ pdata = client->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ drvdata = devm_kzalloc(&client->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->client = client;
+ drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
+ drvdata->pclkin1 = devm_clk_get(&client->dev, "clkin1");
+ drvdata->pclkin2 = devm_clk_get(&client->dev, "clkin2");
+
+ if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
+ PTR_ERR(drvdata->pclkin1) == -EPROBE_DEFER ||
+ PTR_ERR(drvdata->pclkin2) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ drvdata->regmap = devm_regmap_init_i2c(client, &si5324_regmap_config);
+ if (IS_ERR(drvdata->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(drvdata->regmap);
+ }
+
+ i2c_set_clientdata(client, drvdata);
+ si5324_initialize(drvdata);
+
+ /* setup input clock configuration */
+ ret = si5324_pll_reparent(drvdata, 0, pdata->pll_src);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to reparent pll to %d\n",
+ pdata->pll_src);
+ return ret;
+ }
+
+ for (n = 0; n < SI5324_MAX_CLKOUTS; n++) {
+ ret = si5324_clkout_set_drive_strength(drvdata, n,
+ pdata->clkout[n].drive);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed set drive strength of clkout%d to %d\n",
+ n, pdata->clkout[n].drive);
+ return ret;
+ }
+ }
+
+ if (!IS_ERR(drvdata->pxtal))
+ clk_prepare_enable(drvdata->pxtal);
+ if (!IS_ERR(drvdata->pclkin1))
+ clk_prepare_enable(drvdata->pclkin1);
+ if (!IS_ERR(drvdata->pclkin2))
+ clk_prepare_enable(drvdata->pclkin2);
+
+ /* create instance names by appending instance id */
+ for (n = 0; n < SI5324_SRC_CLKS; n++) {
+ sprintf(inst_names[n], "%s_%d", si5324_input_names[n],
+ instance);
+ }
+ sprintf(inst_names[3], "%s_%d", si5324_pll_name, instance);
+ for (n = 0; n < SI5324_MAX_CLKOUTS; n++) {
+ sprintf(inst_names[n + 4], "%s_%d", si5324_clkout_names[n],
+ instance);
+ }
+
+ /* register xtal input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[0];
+ init.ops = &si5324_xtal_ops;
+ init.flags = 0;
+
+ if (!IS_ERR(drvdata->pxtal)) {
+ drvdata->pxtal_name = __clk_get_name(drvdata->pxtal);
+ init.parent_names = &drvdata->pxtal_name;
+ init.num_parents = 1;
+ }
+ drvdata->xtal.init = &init;
+
+ clk = devm_clk_register(&client->dev, &drvdata->xtal);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clkin1 input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[1];
+ init.ops = &si5324_clkin_ops;
+ if (!IS_ERR(drvdata->pclkin1)) {
+ drvdata->pclkin1_name = __clk_get_name(drvdata->pclkin1);
+ init.parent_names = &drvdata->pclkin1_name;
+ init.num_parents = 1;
+ }
+
+ drvdata->clkin1.init = &init;
+ clk = devm_clk_register(&client->dev, &drvdata->clkin1);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clkin2 input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[2];
+ init.ops = &si5324_clkin_ops;
+ if (!IS_ERR(drvdata->pclkin2)) {
+ drvdata->pclkin2_name = __clk_get_name(drvdata->pclkin2);
+ init.parent_names = &drvdata->pclkin2_name;
+ init.num_parents = 1;
+ }
+
+ drvdata->clkin2.init = &init;
+ clk = devm_clk_register(&client->dev, &drvdata->clkin2);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* Si5324 allows to mux xtal or clkin1 or clkin2 to PLL input */
+ num_parents = SI5324_SRC_CLKS;
+ parent_names[0] = inst_names[0];
+ parent_names[1] = inst_names[1];
+ parent_names[2] = inst_names[2];
+
+ /* register PLL */
+ drvdata->pll.drvdata = drvdata;
+ drvdata->pll.hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[3];
+ init.ops = &si5324_pll_ops;
+ init.flags = 0;
+ init.flags |= CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk = devm_clk_register(&client->dev, &drvdata->pll.hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clk out divider */
+ num_clocks = 2;
+ num_parents = 1;
+ parent_names[0] = inst_names[3];
+
+ drvdata->clkout = devm_kzalloc(&client->dev, num_clocks *
+ sizeof(*drvdata->clkout), GFP_KERNEL);
+
+ drvdata->onecell.clk_num = num_clocks;
+ drvdata->onecell.clks = devm_kzalloc(&client->dev,
+ num_clocks *
+ sizeof(*drvdata->onecell.clks),
+ GFP_KERNEL);
+
+ if (WARN_ON(!drvdata->clkout) || !drvdata->onecell.clks) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
+
+ for (n = 0; n < num_clocks; n++) {
+ drvdata->clkout[n].num = n;
+ drvdata->clkout[n].drvdata = drvdata;
+ drvdata->clkout[n].hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[4 + n];
+ init.ops = &si5324_clkout_ops;
+ init.flags = 0;
+ init.flags |= CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk = devm_clk_register(&client->dev, &drvdata->clkout[n].hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+ /* refer to output clock in onecell */
+ drvdata->onecell.clks[n] = clk;
+
+ /* set initial clkout rate */
+ if (pdata->clkout[n].rate != 0) {
+ int ret;
+
+ ret = clk_set_rate(clk, pdata->clkout[n].rate);
+ if (ret != 0) {
+ dev_err(&client->dev, "Cannot set rate : %d\n",
+ ret);
+ }
+ }
+ }
+
+ ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+ &drvdata->onecell);
+ if (ret) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ goto err_clk;
+ }
+
+ dev_info(&client->dev, "%s probe successful\n",
+ si53xx_variant_name[variant]);
+ instance++;
+ return 0;
+
+err_clk:
+ if (!IS_ERR(drvdata->pxtal))
+ clk_disable_unprepare(drvdata->pxtal);
+ if (!IS_ERR(drvdata->pclkin1))
+ clk_disable_unprepare(drvdata->pclkin1);
+ if (!IS_ERR(drvdata->pclkin2))
+ clk_disable_unprepare(drvdata->pclkin2);
+
+ return ret;
+}
+
+static int si5324_i2c_remove(struct i2c_client *client)
+{
+ of_clk_del_provider(client->dev.of_node);
+ return 0;
+}
+
+static const struct i2c_device_id si5324_i2c_ids[] = {
+ { "si5319", si5319 },
+ { "si5324", si5324 },
+ { "si5328", si5328 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si5324_i2c_ids);
+
+static struct i2c_driver si5324_driver = {
+ .driver = {
+ .name = "si5324",
+ .of_match_table = of_match_ptr(si5324_dt_ids),
+ },
+ .probe = si5324_i2c_probe,
+ .remove = si5324_i2c_remove,
+ .id_table = si5324_i2c_ids,
+};
+module_i2c_driver(si5324_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao G <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Silicon Labs 5319/5324/5328 clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-si5324.h b/drivers/clk/clk-si5324.h
new file mode 100644
index 000000000000..48e62a67f56e
--- /dev/null
+++ b/drivers/clk/clk-si5324.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Authors: Leon Woestenberg <leon@sidebranch.com>
+ * Venkateshwar Rao <vgannava@xilinx.com>
+ */
+
+#ifndef _CLK_SI5324_H_
+#define _CLK_SI5324_H_
+
+#define SI5324_BUS_BASE_ADDR 0x68
+
+#define SI5324_CONTROL 0
+#define SI5324_CONTROL_FREE_RUN BIT(6)
+#define SI5324_FREE_RUN_EN 0x54
+
+#define SI5324_INCK_PRIOR 1
+#define SI5324_INCK_PRIOR_1_MASK 0xC
+#define SI5324_INCK_PRIOI_2_MASK 0x3
+
+#define SI5324_BWSEL 2
+#define SI5324_BWSEL_MASK 0xF0
+#define SI5324_BWSEL_SHIFT 4
+#define SI5324_BWSEL_DEF_VAL 2
+
+#define SI5324_CKSEL 3
+#define SI5324_CKSEL_SQL_ICAL BIT(4)
+#define SI5324_CKSEL_SHIFT 6
+#define SI5324_CK_SEL 3
+
+#define SI3324_AUTOSEL 4
+#define SI5324_AUTOSEL_DEF 0x12
+
+#define SI5324_ICMOS 5
+#define SI5324_OUTPUT_SIGFMT 6
+#define SI5324_OUTPUT_SF1_DEFAULT 0xF
+#define SI5324_REFFRE_FOS 7
+#define SI5324_HLOG 8
+#define SI5324_AVG_HIST 9
+#define SI5324_DSBL_CLKOUT 10
+#define SI5324_DSBL_CLKOUT2 BIT(3)
+#define SI5324_POWERDOWN 11
+#define SI5324_PD_CK1 BIT(0)
+#define SI5324_PD_CK2 BIT(1)
+#define SI5324_PD_CK1_DIS 0x41
+#define SI5324_PD_CK2_DIS 0x42
+#define SI5324_FOS_LOCKT 19
+#define SI5324_FOS_DEFAULT 0x23
+#define SI5324_CK_ACTV_SEL 21
+#define SI5324_CK_DEFAULT 0xFC
+#define SI5324_CK_ACTV BIT(1)
+#define SI5324_CK_SELPIN BIT(1)
+#define SI5324_LOS_MSK 23
+#define SI5324_FOS_L0L_MASK 24
+
+/* output clock dividers */
+#define SI5324_N1_HS 25
+#define SI5324_N1_HS_VAL_SHIFT 5
+#define SI5324_HSHIFT 16
+#define SI5324_LSHIFT 8
+#define SI5324_NC1_LS_H 31
+#define SI5324_NC1_LS_M 32
+#define SI5324_NC1_LS_L 33
+#define SI5324_DIV_LS_MASK 0x0F
+#define SI5324_DIV_HS_MASK 0xF0
+#define SI5324_NC2_LS_H 34
+#define SI5324_NC2_LS_M 35
+#define SI5324_NC2_LS_L 36
+
+#define SI5324_N2_HS_LS_H 40
+#define SI5324_N2_HS_LS_H_VAL_SHIFT 5
+#define SI5324_N2_LS_H 41
+#define SI5324_N2_LS_L 42
+#define SI5324_N31_CLKIN_H 43
+#define SI5324_N31_CLKIN_M 44
+#define SI5324_N31_CLKIN_L 45
+#define SI5324_N32_CLKIN_H 46
+#define SI5324_N32_CLKIN_M 47
+#define SI5324_N32_CLKIN_L 48
+#define SI5324_FOS_CLKIN_RATE 55
+#define SI5324_PLL_ACTV_CLK 128
+#define SI5324_LOS_STATUS 129
+#define SI5324_CLKIN_LOL_STATUS 130
+#define SI5324_LOS_FLG 131
+#define SI5324_FOS_FLG 132
+#define SI5324_PARTNO_H 134
+#define SI5324_PARTNO_L 135
+
+#define SI5324_RESET_CALIB 136
+#define SI5324_RST_ALL BIT(7)
+#define SI5324_CALIB_EN BIT(6)
+
+#define SI5324_FASTLOCK 137
+#define SI5324_FASTLOCK_EN BIT(0)
+#define SI5324_LOS1_LOS2_EN 138
+#define SI5324_SKEW1 142
+#define SI5324_SKEW2 143
+
+/* selects 2kHz to 710 MHz */
+#define SI5324_CLKIN_MIN_FREQ 2000
+#define SI5324_CLKIN_MAX_FREQ (710 * 1000 * 1000)
+
+/* generates 2kHz to 945 MHz */
+#define SI5324_CLKOUT_MIN_FREQ 2000
+#define SI5324_CLKOUT_MAX_FREQ (945 * 1000 * 1000)
+
+/* The following constants define the limits of the divider settings. */
+#define SI5324_N1_HS_MIN 6
+#define SI5324_N1_HS_MAX 11
+#define SI5324_NC_LS_MIN 1
+#define SI5324_NC_LS_MAX 0x100000
+#define SI5324_N2_HS_MIN 4
+#define SI5324_N2_HS_MAX 11
+#define SI5324_N2_LS_MIN 2
+#define SI5324_N2_LS_MAX 0x100000
+#define SI5324_N3_MIN 1
+#define SI5324_N3_MAX 0x080000
+
+#define SI5324_SRC_XTAL 0
+#define SI5324_SRC_CLKIN1 1
+#define SI5324_SRC_CLKIN2 2
+#define SI5324_SRC_CLKS 3
+
+#define SI5324_CLKIN1 0
+#define SI5324_CLKIN2 1
+#define SI5324_MAX_CLKOUTS 2
+#define NUM_NAME_IDS 6 /* 3 clkin, 1 pll, 2 clkout */
+#define MAX_NAME_LEN 11
+#define SI5324_PARAM_LEN 24
+#define SI5324_NC_PARAM_LEN 6
+#define SI5324_OUT_REGS 14
+#define SI5324_N1_PARAM_LEN 1
+#define SI5324_N2_PARAM_LEN 9
+#define SI5324_REF_CLOCK 114285000UL
+#define SI5324_RESET_DELAY_MS 20
+
+#endif
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 07ef9995b3cb..31504e52a67c 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -732,10 +732,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
r[0] = r_div ? (r_div & 0xff) : 1;
r[1] = (r_div >> 8) & 0xff;
r[2] = (r_div >> 16) & 0xff;
- err = regmap_bulk_write(output->data->regmap,
+ return regmap_bulk_write(output->data->regmap,
SI5341_OUT_R_REG(output), r, 3);
-
- return 0;
}
static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 13332f89e034..6f272d9640d8 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -251,6 +251,17 @@ static bool clk_core_is_enabled(struct clk_core *core)
}
}
+ /*
+ * This could be called with the enable lock held, or from atomic
+ * context. If the parent isn't enabled already, we can't do
+ * anything here. We can also assume this clock isn't enabled.
+ */
+ if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
+ if (!clk_core_is_enabled(core->parent)) {
+ ret = false;
+ goto done;
+ }
+
ret = core->ops->is_enabled(core->hw);
done:
if (core->rpm_enabled)
@@ -291,6 +302,34 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
+static unsigned int sibling;
+
+static void clk_show_subtree(struct clk_core *c,
+ int level)
+{
+ struct clk_core *child;
+
+ if (!c)
+ return;
+
+ if (level == 1)
+ sibling++;
+
+ hlist_for_each_entry(child, &c->children, child_node)
+ clk_show_subtree(child, level + 1);
+}
+
+unsigned int clk_get_children(char *name)
+{
+ struct clk_core *core;
+ struct clk *pclk = __clk_lookup(name);
+ sibling = 0;
+
+ core = pclk->core;
+ clk_show_subtree(core, 0);
+ return sibling;
+}
+
static struct clk_core *__clk_lookup_subtree(const char *name,
struct clk_core *core)
{
@@ -414,6 +453,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
if (IS_ERR(hw))
return ERR_CAST(hw);
+ if (!hw)
+ return NULL;
+
return hw->core;
}
@@ -852,10 +894,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
- clk_pm_runtime_put(core);
-
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
+ clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
@@ -3070,6 +3111,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
unsigned int i, char terminator)
{
struct clk_core *parent;
+ const char *name = NULL;
/*
* Go through the following options to fetch a parent's name.
@@ -3084,18 +3126,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
* registered (yet).
*/
parent = clk_core_get_parent_by_index(core, i);
- if (parent)
+ if (parent) {
seq_puts(s, parent->name);
- else if (core->parents[i].name)
+ } else if (core->parents[i].name) {
seq_puts(s, core->parents[i].name);
- else if (core->parents[i].fw_name)
+ } else if (core->parents[i].fw_name) {
seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
- else if (core->parents[i].index >= 0)
- seq_puts(s,
- of_clk_get_parent_name(core->of_node,
- core->parents[i].index));
- else
- seq_puts(s, "(missing)");
+ } else {
+ if (core->parents[i].index >= 0)
+ name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
+ if (!name)
+ name = "(missing)";
+
+ seq_puts(s, name);
+ }
seq_putc(s, terminator);
}
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
index ad0c7f350cf0..60d8a27a9082 100644
--- a/drivers/clk/hisilicon/clk-hi3519.c
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -130,7 +130,7 @@ static void hi3519_clk_unregister(struct platform_device *pdev)
of_clk_del_provider(pdev->dev.of_node);
hisi_clk_unregister_gate(hi3519_gate_clks,
- ARRAY_SIZE(hi3519_mux_clks),
+ ARRAY_SIZE(hi3519_gate_clks),
crg->clk_data);
hisi_clk_unregister_mux(hi3519_mux_clks,
ARRAY_SIZE(hi3519_mux_clks),
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index a3d04c7c3da8..eb9c139babc3 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -467,8 +467,10 @@ static void __init hi3620_mmc_clk_init(struct device_node *node)
return;
clk_data->clks = kcalloc(num, sizeof(*clk_data->clks), GFP_KERNEL);
- if (!clk_data->clks)
+ if (!clk_data->clks) {
+ kfree(clk_data);
return;
+ }
for (i = 0; i < num; i++) {
struct hisi_mmc_clock *mmc_clk = &hi3620_mmc_clks[i];
diff --git a/drivers/clk/idt/Makefile b/drivers/clk/idt/Makefile
new file mode 100644
index 000000000000..4cf2b6e4801d
--- /dev/null
+++ b/drivers/clk/idt/Makefile
@@ -0,0 +1,3 @@
+obj-y += clk-idt8t49n24x-core.o
+obj-y += clk-idt8t49n24x-debugfs.o
+obj-y += clk-idt8t49n24x.o
diff --git a/drivers/clk/idt/clk-idt8t49n24x-core.c b/drivers/clk/idt/clk-idt8t49n24x-core.c
new file mode 100644
index 000000000000..ad23014e708f
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-core.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x-core.c - Program 8T49N24x settings via I2C (common code)
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include "clk-idt8t49n24x-core.h"
+
+/*
+ * In Timing Commander, Q0 is changed from 25MHz to Q0 75MHz, the following
+ * changes occur:
+ *
+ * 2 bytes change in EEPROM data string.
+ *
+ * DSM_INT R0025[0],R0026[7:0] : 35 => 30
+ * NS2_Q0 R0040[7:0],R0041[7:0] : 14 => 4
+ *
+ * In EEPROM
+ * 1. R0026
+ * 2. R0041
+ *
+ * Note that VCO_Frequency (metadata) also changed (3500 =>3000).
+ * This reflects a change to DSM_INT.
+ *
+ * Note that the Timing Commander code has workarounds in the workflow scripts
+ * to handle dividers for the 8T49N241 (because the development of that GUI
+ * predates chip override functionality). That affects NS1_Qx (x in 1-3)
+ * and NS2_Qx. NS1_Qx contains the upper bits of NS_Qx, and NS2_Qx contains
+ * the lower bits. That is NOT the case for Q0, though. In that case NS1_Q0
+ * is the 1st stage output divider (/5, /6, /4) and NS2_Q0 is the 16-bit
+ * second stage (with actual divide being twice the value stored in the
+ * register).
+ *
+ * NS1_Q0 R003F[1:0]
+ */
+
+#define IDT24x_VCO_MIN 2999997000u
+#define IDT24x_VCO_MAX 4000004000u
+#define IDT24x_VCO_OPT 3500000000u
+#define IDT24x_MIN_INT_DIVIDER 6
+#define IDT24x_MIN_NS1 4
+#define IDT24x_MAX_NS1 6
+
+static u8 q0_ns1_options[3] = { 5, 6, 4 };
+
+/**
+ * bits_to_shift - num bits to shift given specified mask
+ * @mask: 32-bit word input to count zero bits on right
+ *
+ * Given a bit mask indicating where a value will be stored in
+ * a register, return the number of bits you need to shift the value
+ * before ORing it into the register value.
+ *
+ * Return: number of bits to shift
+ */
+int bits_to_shift(unsigned int mask)
+{
+ /* the number of zero bits on the right */
+ unsigned int c = 32;
+
+ mask &= ~mask + 1;
+ if (mask)
+ c--;
+ if (mask & 0x0000FFFF)
+ c -= 16;
+ if (mask & 0x00FF00FF)
+ c -= 8;
+ if (mask & 0x0F0F0F0F)
+ c -= 4;
+ if (mask & 0x33333333)
+ c -= 2;
+ if (mask & 0x55555555)
+ c -= 1;
+ return c;
+}
+
+/*
+ * TODO: Consider replacing this with regmap_multi_reg_write, which
+ * supports introducing a delay after each write. Experiment to see if
+ * the writes succeed consistently when using that API.
+ */
+static int regmap_bulk_write_with_retry(
+ struct regmap *map, unsigned int offset, u8 val[],
+ int val_count, int max_attempts)
+{
+ int err = 0;
+ int count = 1;
+
+ do {
+ err = regmap_bulk_write(map, offset, val, val_count);
+ if (err == 0)
+ return 0;
+
+ usleep_range(100, 200);
+ } while (count++ <= max_attempts);
+ return err;
+}
+
+static int regmap_write_with_retry(
+ struct regmap *map, unsigned int offset, unsigned int val,
+ int max_attempts)
+{
+ int err = 0;
+ int count = 1;
+
+ do {
+ err = regmap_write(map, offset, val);
+ if (err == 0)
+ return 0;
+ usleep_range(100, 200);
+ } while (count++ <= max_attempts);
+ return err;
+}
+
+/*
+ * TODO: Consider using regmap_multi_reg_write instead. Explore
+ * use of regmap to configure WRITE_BLOCK_SIZE, and using the delay
+ * mechanism in regmap_multi_reg_write instead of retrying multiple
+ * times (regmap_bulk_write_with_retry).
+ */
+int i2cwritebulk(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, u8 val[], size_t val_count)
+{
+ char dbg[128];
+ u8 block[WRITE_BLOCK_SIZE];
+ unsigned int block_offset = reg;
+ int x;
+ int err = 0;
+ int currentOffset = 0;
+
+ dev_dbg(&client->dev, "I2C->0x%04x : [hex] . First byte: %02x, Second byte: %02x",
+ reg, reg >> 8, reg & 0xFF);
+ dbg[0] = 0;
+
+ for (x = 0; x < val_count; x++) {
+ char data[4];
+
+ block[currentOffset++] = val[x];
+ sprintf(data, "%02x ", val[x]);
+ strcat(dbg, data);
+ if (x > 0 && (x + 1) % WRITE_BLOCK_SIZE == 0) {
+ dev_dbg(&client->dev, "%s", dbg);
+ dbg[0] = '\0';
+ sprintf(dbg,
+ "(loop) calling regmap_bulk_write @ 0x%04x [%d bytes]",
+ block_offset, WRITE_BLOCK_SIZE);
+ dev_dbg(&client->dev, "%s", dbg);
+ dbg[0] = '\0';
+ err = regmap_bulk_write_with_retry(
+ map, block_offset, block, WRITE_BLOCK_SIZE, 5);
+ if (err != 0)
+ break;
+ block_offset += WRITE_BLOCK_SIZE;
+ currentOffset = 0;
+ }
+ }
+ if (err == 0 && currentOffset > 0) {
+ dev_dbg(&client->dev, "%s", dbg);
+ dev_dbg(&client->dev, "(final) calling regmap_bulk_write @ 0x%04x [%d bytes]",
+ block_offset, currentOffset);
+ err = regmap_bulk_write_with_retry(
+ map, block_offset, block, currentOffset, 5);
+ }
+
+ return err;
+}
+
+static int i2cwrite(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ int err;
+
+ dev_dbg(&client->dev, "I2C->0x%x : [hex] %x", reg, val);
+ err = regmap_write_with_retry(map, reg, val, 5);
+ usleep_range(100, 200);
+ return err;
+}
+
+static int i2cwritewithmask(
+ struct i2c_client *client, struct regmap *map, unsigned int reg,
+ u8 val, u8 original, u8 mask)
+{
+ return i2cwrite(client, map, reg,
+ ((val << bits_to_shift(mask)) & mask) | (original & ~mask));
+}
+
+int idt24x_get_offsets(
+ u8 output_num,
+ struct clk_register_offsets *offsets)
+{
+ switch (output_num) {
+ case 0:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN0_MASK;
+ offsets->dis_mask = IDT24x_REG_Q0_DIS_MASK;
+ offsets->ns1_offset = IDT24x_REG_NS1_Q0;
+ offsets->ns1_offset_mask = IDT24x_REG_NS1_Q0_MASK;
+ offsets->ns2_15_8_offset = IDT24x_REG_NS2_Q0_15_8;
+ offsets->ns2_7_0_offset = IDT24x_REG_NS2_Q0_7_0;
+ break;
+ case 1:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN1_MASK;
+ offsets->dis_mask = IDT24x_REG_Q1_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q1_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q1_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q1_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q1_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q1_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q1_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q1_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q1_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q1_7_0;
+ break;
+ case 2:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN2_MASK;
+ offsets->dis_mask = IDT24x_REG_Q2_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q2_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q2_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q2_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q2_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q2_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q2_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q2_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q2_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q2_7_0;
+ break;
+ case 3:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN3_MASK;
+ offsets->dis_mask = IDT24x_REG_Q3_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q3_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q3_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q3_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q3_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q3_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q3_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q3_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q3_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q3_7_0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_calc_div_q0 - Calculate dividers and VCO freq to generate
+ * the specified Q0 frequency.
+ * @chip: Device data structure. contains all requested frequencies
+ * for all outputs.
+ *
+ * The actual output divider is ns1 * ns2 * 2. fOutput = fVCO / (ns1 * ns2 * 2)
+ *
+ * The options for ns1 (when the source is the VCO) are 4,5,6. ns2 is a
+ * 16-bit value.
+ *
+ * chip->divs: structure for specifying ns1/ns2 values. If 0 after this
+ * function, Q0 is not requested
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_calc_div_q0(struct clk_idt24x_chip *chip)
+{
+ u8 x;
+ u32 min_div, max_div, best_vco = 0;
+ u16 min_ns2, max_ns2;
+ bool is_lower_vco = false;
+
+ chip->divs.ns1_q0 = 0;
+ chip->divs.ns2_q0 = 0;
+
+ if (chip->clk[0].requested == 0)
+ return 0;
+
+ min_div = div64_u64(
+ (u64)IDT24x_VCO_MIN, chip->clk[0].requested * 2) * 2;
+ max_div = div64_u64(
+ (u64)IDT24x_VCO_MAX, chip->clk[0].requested * 2) * 2;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. requested: %u, min_div: %u, max_div: %u",
+ __func__, chip->clk[0].requested, min_div, max_div);
+
+ min_ns2 = div64_u64((u64)min_div, IDT24x_MAX_NS1 * 2);
+ max_ns2 = div64_u64((u64)max_div, IDT24x_MIN_NS1 * 2);
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. min_ns2: %u, max_ns2: %u", __func__, min_ns2, max_ns2);
+
+ for (x = 0; x < ARRAY_SIZE(q0_ns1_options); x++) {
+ u16 y = min_ns2;
+
+ while (y <= max_ns2) {
+ u32 actual_div = q0_ns1_options[x] * y * 2;
+ u32 current_vco = actual_div *
+ chip->clk[0].requested;
+
+ if (current_vco < IDT24x_VCO_MIN)
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. ignore div: (ns1=%u * ns2=%u * 2 * %u) == %u < %u",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco, IDT24x_VCO_MIN);
+ else if (current_vco > IDT24x_VCO_MAX) {
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. ignore div: (ns1=%u * ns2=%u * 2 * %u) == %u > %u. EXIT LOOP.",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco, IDT24x_VCO_MAX);
+ y = max_ns2;
+ } else {
+ bool use = false;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. contender: (ns1=%u * ns2=%u * 2 * %u) == %u [in range]",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco);
+ if (current_vco <= IDT24x_VCO_OPT) {
+ if (current_vco > best_vco ||
+ !is_lower_vco) {
+ is_lower_vco = true;
+ use = true;
+ }
+ } else if (!is_lower_vco &&
+ current_vco > best_vco)
+ use = true;
+ if (use) {
+ chip->divs.ns1_q0 = x;
+ chip->divs.ns2_q0 = y;
+ best_vco = current_vco;
+ }
+ }
+ y++;
+ }
+ }
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. best: (ns1=%u [/%u] * ns2=%u * 2 * %u) == %u",
+ __func__, chip->divs.ns1_q0, q0_ns1_options[chip->divs.ns1_q0],
+ chip->divs.ns2_q0, chip->clk[0].requested, best_vco);
+ return 0;
+}
+
+/**
+ * idt24x_calc_divs - Calculate dividers to generate the specified frequency.
+ * @chip: Device data structure. contains all requested frequencies
+ * for all outputs.
+ *
+ * Calculate the clock dividers (dsmint, dsmfrac for vco; ns1/ns2 for q0,
+ * n/nfrac for q1-3) for a given target frequency.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_calc_divs(struct clk_idt24x_chip *chip)
+{
+ u32 vco = 0;
+ int result;
+
+ result = idt24x_calc_div_q0(chip);
+ if (result < 0)
+ return result;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: after idt24x_calc_div_q0. ns1: %u [/%u], ns2: %u",
+ __func__, chip->divs.ns1_q0, q0_ns1_options[chip->divs.ns1_q0],
+ chip->divs.ns2_q0);
+
+ chip->divs.dsmint = 0;
+ chip->divs.dsmfrac = 0;
+
+ if (chip->clk[0].requested > 0) {
+ /* Q0 is in use and is governing the actual VCO freq */
+ vco = q0_ns1_options[chip->divs.ns1_q0] * chip->divs.ns2_q0 *
+ 2 * chip->clk[0].requested;
+ } else {
+ u32 freq = 0;
+ u32 walk;
+ u32 min_div, max_div;
+ bool is_lower_vco = false;
+
+ /*
+ * Q0 is not in use. Use the first requested (fractional)
+ * output frequency as the one controlling the VCO.
+ */
+ for (walk = 1; walk < NUM_OUTPUTS; walk++) {
+ if (chip->clk[walk].requested != 0) {
+ freq = chip->clk[walk].requested;
+ break;
+ }
+ }
+
+ if (freq == 0) {
+ dev_err(&chip->i2c_client->dev,
+ "%s: NO FREQUENCIES SPECIFIED", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * First, determine the min/max div for the output frequency.
+ */
+ min_div = IDT24x_MIN_INT_DIVIDER;
+ max_div = div64_u64((u64)IDT24x_VCO_MAX, freq * 2) * 2;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: calc_divs for fractional output. freq: %u, min_div: %u, max_div: %u",
+ __func__, freq, min_div, max_div);
+
+ walk = min_div;
+
+ while (walk <= max_div) {
+ u32 current_vco = freq * walk;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: calc_divs for fractional output. walk: %u, freq: %u, vco: %u",
+ __func__, walk, freq, vco);
+ if (current_vco >= IDT24x_VCO_MIN &&
+ vco <= IDT24x_VCO_MAX) {
+ if (current_vco <= IDT24x_VCO_OPT) {
+ if (current_vco > vco ||
+ !is_lower_vco) {
+ is_lower_vco = true;
+ vco = current_vco;
+ }
+ } else if (!is_lower_vco && current_vco > vco) {
+ vco = current_vco;
+ }
+ }
+ /* Divider must be even. */
+ walk += 2;
+ }
+ }
+
+ if (vco != 0) {
+ u32 pfd;
+ u64 rem;
+ int x;
+
+ /* Setup dividers for outputs with fractional dividers. */
+ for (x = 1; x < NUM_OUTPUTS; x++) {
+ if (chip->clk[x].requested != 0) {
+ /*
+ * The value written to the chip is half
+ * the calculated divider.
+ */
+ chip->divs.nint[x - 1] = div64_u64_rem(
+ (u64)vco,
+ chip->clk[x].requested * 2,
+ &rem);
+ chip->divs.nfrac[x - 1] = div64_u64(
+ rem * 1 << 28,
+ chip->clk[x].requested * 2);
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: div to get Q%i freq %u from vco %u: int part: %u, rem: %llu, frac part: %u",
+ __func__, x,
+ chip->clk[x].requested,
+ vco, chip->divs.nint[x - 1], rem,
+ chip->divs.nfrac[x - 1]);
+ }
+ }
+
+ /* Calculate freq for pfd */
+ pfd = chip->input_clk_freq * (chip->doubler_disabled ? 1 : 2);
+
+ /*
+ * Calculate dsmint & dsmfrac:
+ * -----------------------------
+ * dsm = float(vco)/float(pfd)
+ * dsmfrac = dsm-floor(dsm) * 2^21
+ * rem = vco % pfd
+ * therefore:
+ * dsmfrac = (rem * 2^21)/pfd
+ */
+ chip->divs.dsmint = div64_u64_rem(vco, pfd, &rem);
+ chip->divs.dsmfrac = div64_u64(rem * 1 << 21, pfd);
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: vco: %u, pfd: %u, dsmint: %u, dsmfrac: %u, rem: %llu",
+ __func__, vco, pfd, chip->divs.dsmint,
+ chip->divs.dsmfrac, rem);
+ } else {
+ dev_err(&chip->i2c_client->dev,
+ "%s: no integer divider in range found. NOT SUPPORTED.",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_enable_output - Enable/disable a particular output
+ * @chip: Device data structure
+ * @output: Output to enable/disable
+ * @enable: Enable (true/false)
+ *
+ * Return: passes on regmap_write return value.
+ */
+static int idt24x_enable_output(
+ struct clk_idt24x_chip *chip, u8 output, bool enable)
+{
+ struct clk_register_offsets offsets;
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+
+ /*
+ * When an output is enabled, enable it in the original
+ * data read from the chip and cached. Otherwise it may be
+ * accidentally turned off when another output is enabled.
+ *
+ * E.g., the driver starts with all outputs off in reg_out_en_x.
+ * Q1 is enabled with the appropriate mask. Q2 is then enabled,
+ * which results in Q1 being turned back off (because Q1 was off
+ * in reg_out_en_x).
+ */
+
+ err = idt24x_get_offsets(output, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets for %d: %i",
+ __func__, output, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_out_en_x before: 0x%x, reg_out_mode_0_1 before: 0x%x, reg_out_mode_2_3 before: 0x%x, reg_qx_dis before: 0x%x",
+ __func__, output, enable, chip->reg_out_en_x,
+ chip->reg_out_mode_0_1, chip->reg_out_mode_2_3,
+ chip->reg_qx_dis);
+
+ chip->reg_out_en_x = chip->reg_out_en_x & ~offsets.oe_mask;
+ if (enable)
+ chip->reg_out_en_x |= (1 << bits_to_shift(offsets.oe_mask));
+
+ chip->reg_qx_dis = chip->reg_qx_dis & ~offsets.dis_mask;
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_qx_dis mask: 0x%x, before checking enable: 0x%x",
+ __func__, output, enable, offsets.dis_mask,
+ chip->reg_qx_dis);
+ if (!enable)
+ chip->reg_qx_dis |= (1 << bits_to_shift(offsets.dis_mask));
+
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_out_en_x after: 0x%x, reg_qx_dis after: 0x%x",
+ __func__, output, enable, chip->reg_out_en_x,
+ chip->reg_qx_dis);
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTEN, chip->reg_out_en_x);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTEN: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTMODE0_1,
+ chip->reg_out_mode_0_1);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTMODE0_1: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTMODE2_3,
+ chip->reg_out_mode_2_3);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTMODE2_3: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_Q_DIS, chip->reg_qx_dis);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_Q_DIS: %i",
+ __func__, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * idt24x_update_device - write registers to the chip
+ * @chip: Device data structure
+ *
+ * Write all values to hardware that we have calculated.
+ *
+ * Return: passes on regmap_bulk_write return value.
+ */
+static int idt24x_update_device(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ int x = -1;
+
+ dev_dbg(&client->dev,
+ "%s: setting DSM_INT_8 (val %u @ %u)",
+ __func__, chip->divs.dsmint >> 8,
+ IDT24x_REG_DSM_INT_8);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_DSM_INT_8,
+ (chip->divs.dsmint >> 8) & IDT24x_REG_DSM_INT_8_MASK,
+ chip->reg_dsm_int_8, IDT24x_REG_DSM_INT_8_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSM_INT_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting DSM_INT_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmint & 0xFF,
+ IDT24x_REG_DSM_INT_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSM_INT_7_0,
+ chip->divs.dsmint & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSM_INT_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_20_16 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmfrac >> 16,
+ IDT24x_REG_DSMFRAC_20_16);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_20_16,
+ (chip->divs.dsmfrac >> 16) & IDT24x_REG_DSMFRAC_20_16_MASK,
+ chip->reg_dsm_int_8, IDT24x_REG_DSMFRAC_20_16_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_20_16: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_15_8 (val %u @ 0x%x)",
+ __func__, (chip->divs.dsmfrac >> 8) & 0xFF,
+ IDT24x_REG_DSMFRAC_15_8);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_15_8,
+ (chip->divs.dsmfrac >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_15_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmfrac & 0xFF,
+ IDT24x_REG_DSMFRAC_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_7_0,
+ chip->divs.dsmfrac & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS1_Q0 (val %u @ 0x%x)",
+ __func__, chip->divs.ns1_q0, IDT24x_REG_NS1_Q0);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_NS1_Q0,
+ chip->divs.ns1_q0 & IDT24x_REG_NS1_Q0_MASK,
+ chip->reg_ns1_q0, IDT24x_REG_NS1_Q0_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS1_Q0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS2_Q0_15_8 (val %u @ 0x%x)",
+ __func__, (chip->divs.ns2_q0 >> 8) & 0xFF,
+ IDT24x_REG_NS2_Q0_15_8);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_NS2_Q0_15_8,
+ (chip->divs.ns2_q0 >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS2_Q0_15_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS2_Q0_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.ns2_q0 & 0xFF,
+ IDT24x_REG_NS2_Q0_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_NS2_Q0_7_0,
+ chip->divs.ns2_q0 & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS2_Q0_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_enable_output for Q0. requestedFreq: %u",
+ __func__, chip->clk[0].requested);
+ idt24x_enable_output(chip, 0, chip->clk[0].requested != 0);
+
+ dev_dbg(&client->dev,
+ "%s: writing values for q1-q3", __func__);
+ for (x = 1; x < NUM_OUTPUTS; x++) {
+ struct clk_register_offsets offsets;
+
+ if (chip->clk[x].requested != 0) {
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_get_offsets for %u",
+ __func__, x);
+ err = idt24x_get_offsets(x, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: (q%u, nint: %u, nfrac: %u)",
+ __func__, x, chip->divs.nint[x - 1],
+ chip->divs.nfrac[x - 1]);
+
+ dev_dbg(&client->dev,
+ "%s: setting n_17_16_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nint[x - 1] >> 16,
+ offsets.n_17_16_offset);
+ err = i2cwritewithmask(
+ client, chip->regmap, offsets.n_17_16_offset,
+ (chip->divs.nint[x - 1] >> 16) &
+ offsets.n_17_16_mask,
+ chip->reg_n_qx_17_16[x - 1],
+ offsets.n_17_16_mask);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_17_16_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting n_15_8_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nint[x - 1] >> 8) & 0xFF,
+ offsets.n_15_8_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.n_15_8_offset,
+ (chip->divs.nint[x - 1] >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_15_8_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting n_7_0_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nint[x - 1] & 0xFF,
+ offsets.n_7_0_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.n_7_0_offset,
+ chip->divs.nint[x - 1] & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_7_0_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_27_24_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 24),
+ offsets.nfrac_27_24_offset);
+ err = i2cwritewithmask(
+ client, chip->regmap,
+ offsets.nfrac_27_24_offset,
+ (chip->divs.nfrac[x - 1] >> 24) &
+ offsets.nfrac_27_24_mask,
+ chip->reg_nfrac_qx_27_24[x - 1],
+ offsets.nfrac_27_24_mask);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_27_24_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_23_16_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 16) & 0xFF,
+ offsets.nfrac_23_16_offset);
+ err = i2cwrite(
+ client, chip->regmap,
+ offsets.nfrac_23_16_offset,
+ (chip->divs.nfrac[x - 1] >> 16) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_23_16_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_15_8_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 8) & 0xFF,
+ offsets.nfrac_15_8_offset);
+ err = i2cwrite(
+ client, chip->regmap,
+ offsets.nfrac_15_8_offset,
+ (chip->divs.nfrac[x - 1] >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_15_8_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_7_0_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nfrac[x - 1] & 0xFF,
+ offsets.nfrac_7_0_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.nfrac_7_0_offset,
+ chip->divs.nfrac[x - 1] & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_7_0_offset: %i",
+ __func__, err);
+ return err;
+ }
+ }
+ idt24x_enable_output(chip, x,
+ chip->clk[x].requested != 0);
+ chip->clk[x].actual = chip->clk[x].requested;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_set_frequency - Adjust output frequency on the attached chip.
+ * @chip: Device data structure, including all requested frequencies.
+ *
+ * Return: 0 on success.
+ */
+int idt24x_set_frequency(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ int x;
+ bool all_disabled = true;
+
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ if (chip->clk[x].requested == 0) {
+ idt24x_enable_output(chip, x, false);
+ chip->clk[x].actual = 0;
+ } else {
+ all_disabled = false;
+ }
+ }
+
+ if (all_disabled)
+ /*
+ * no requested frequencies, so nothing else to calculate
+ * or write to the chip. If the consumer wants to disable
+ * all outputs, they can request 0 for all frequencies.
+ */
+ return 0;
+
+ if (chip->input_clk_freq == 0) {
+ dev_err(&client->dev,
+ "%s: no input frequency; can't continue.", __func__);
+ return -EINVAL;
+ }
+
+ err = idt24x_calc_divs(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_calc_divs: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = idt24x_update_device(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error updating the device: %i",
+ __func__, err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/idt/clk-idt8t49n24x-core.h b/drivers/clk/idt/clk-idt8t49n24x-core.h
new file mode 100644
index 000000000000..247ec070c621
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-core.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* clk-idt8t49n24x-core.h - Program 8T49N24x settings via I2C (common code)
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#ifndef __IDT_CLK_IDT8T49N24X_CORE_H_
+#define __IDT_CLK_IDT8T49N24X_CORE_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+/*
+ * The configurations in the settings file have 0x317 registers (last offset
+ * is 0x316).
+ */
+#define NUM_CONFIG_REGISTERS 0x317
+#define NUM_INPUTS 2
+#define NUM_OUTPUTS 4
+#define DEBUGFS_BUFFER_LENGTH 200
+#define WRITE_BLOCK_SIZE 32
+
+/* Non output-specific registers */
+#define IDT24x_REG_DBL_DIS 0x6C
+#define IDT24x_REG_DBL_DIS_MASK 0x01
+#define IDT24x_REG_DSM_INT_8 0x25
+#define IDT24x_REG_DSM_INT_8_MASK 0x01
+#define IDT24x_REG_DSM_INT_7_0 0x26
+#define IDT24x_REG_DSMFRAC_20_16 0x28
+#define IDT24x_REG_DSMFRAC_20_16_MASK 0x1F
+#define IDT24x_REG_DSMFRAC_15_8 0x29
+#define IDT24x_REG_DSMFRAC_7_0 0x2A
+#define IDT24x_REG_OUTEN 0x39
+#define IDT24x_REG_OUTMODE0_1 0x3E
+#define IDT24x_REG_OUTMODE2_3 0x3D
+#define IDT24x_REG_Q_DIS 0x6F
+
+/* Q0 */
+#define IDT24x_REG_OUTEN0_MASK 0x01
+#define IDT24x_REG_OUTMODE0_MASK 0x0E
+#define IDT24x_REG_Q0_DIS_MASK 0x01
+#define IDT24x_REG_NS1_Q0 0x3F
+#define IDT24x_REG_NS1_Q0_MASK 0x03
+#define IDT24x_REG_NS2_Q0_15_8 0x40
+#define IDT24x_REG_NS2_Q0_7_0 0x41
+
+/* Q1 */
+#define IDT24x_REG_OUTEN1_MASK 0x02
+#define IDT24x_REG_OUTMODE1_MASK 0xE0
+#define IDT24x_REG_Q1_DIS_MASK 0x02
+#define IDT24x_REG_N_Q1_17_16 0x42
+#define IDT24x_REG_N_Q1_17_16_MASK 0x03
+#define IDT24x_REG_N_Q1_15_8 0x43
+#define IDT24x_REG_N_Q1_7_0 0x44
+#define IDT24x_REG_NFRAC_Q1_27_24 0x57
+#define IDT24x_REG_NFRAC_Q1_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q1_23_16 0x58
+#define IDT24x_REG_NFRAC_Q1_15_8 0x59
+#define IDT24x_REG_NFRAC_Q1_7_0 0x5A
+
+/* Q2 */
+#define IDT24x_REG_OUTEN2_MASK 0x04
+#define IDT24x_REG_OUTMODE2_MASK 0x0E
+#define IDT24x_REG_Q2_DIS_MASK 0x04
+#define IDT24x_REG_N_Q2_17_16 0x45
+#define IDT24x_REG_N_Q2_17_16_MASK 0x03
+#define IDT24x_REG_N_Q2_15_8 0x46
+#define IDT24x_REG_N_Q2_7_0 0x47
+#define IDT24x_REG_NFRAC_Q2_27_24 0x5B
+#define IDT24x_REG_NFRAC_Q2_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q2_23_16 0x5C
+#define IDT24x_REG_NFRAC_Q2_15_8 0x5D
+#define IDT24x_REG_NFRAC_Q2_7_0 0x5E
+
+/* Q3 */
+#define IDT24x_REG_OUTEN3_MASK 0x08
+#define IDT24x_REG_OUTMODE3_MASK 0xE0
+#define IDT24x_REG_Q3_DIS_MASK 0x08
+#define IDT24x_REG_N_Q3_17_16 0x48
+#define IDT24x_REG_N_Q3_17_16_MASK 0x03
+#define IDT24x_REG_N_Q3_15_8 0x49
+#define IDT24x_REG_N_Q3_7_0 0x4A
+#define IDT24x_REG_NFRAC_Q3_27_24 0x5F
+#define IDT24x_REG_NFRAC_Q3_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q3_23_16 0x60
+#define IDT24x_REG_NFRAC_Q3_15_8 0x61
+#define IDT24x_REG_NFRAC_Q3_7_0 0x62
+
+/**
+ * struct idt24x_output - device output information
+ * @hw: hw registration info for this specific output clcok. This gets
+ * passed as an argument to CCF api calls (e.g., set_rate).
+ * container_of can then be used to get the reference to this
+ * struct.
+ * @chip: store a reference to the parent device structure. container_of
+ * cannot be used to get to the parent device structure from
+ * idt24x_output, because clk_idt24x_chip contains an array of
+ * output structs (for future enhancements to support devices
+ * with different numbers of output clocks).
+ * @index: identifies output on the chip; used in debug statements
+ * @requested: requested output clock frequency (in Hz)
+ * @actual: actual output clock frequency (in Hz). Will only be set after
+ * successful update of the device.
+ * @debug_freq: stores value for debugfs file. Use this instead of requested
+ * struct var because debugfs expects u64, not u32.
+ */
+struct idt24x_output {
+ struct clk_hw hw;
+ struct clk_idt24x_chip *chip;
+ u8 index;
+ u32 requested;
+ u32 actual;
+ u64 debug_freq;
+};
+
+/**
+ * struct idt24x_dividers - output dividers
+ * @dsmint: int component of feedback divider for VCO (2-stage divider)
+ * @dsmfrac: fractional component of feedback divider for VCO
+ * @ns1_q0: ns1 divider component for Q0
+ * @ns2_q0: ns2 divider component for Q0
+ * @nint: int divider component for Q1-3
+ * @nfrac: fractional divider component for Q1-3
+ */
+struct idt24x_dividers {
+ u16 dsmint;
+ u32 dsmfrac;
+
+ u8 ns1_q0;
+ u16 ns2_q0;
+
+ u32 nint[3];
+ u32 nfrac[3];
+};
+
+/**
+ * struct clk_idt24x_chip - device info for chip
+ * @regmap: register map used to perform i2c writes to the chip
+ * @i2c_client: i2c_client struct passed to probe
+ * @min_freq: min frequency for this chip
+ * @max_freq: max frequency for this chip
+ * @settings: filled in if full register map is specified in the DT
+ * @has_settings: true if settings array is valid
+ * @input_clk: ptr to input clock specified in DT
+ * @input_clk_num: which input clock was specified. 0-based. A value of
+ * NUM_INPUTS indicates that a XTAL is used as the input.
+ * @input_clk_nb: notification support (if input clk changes)
+ * @input_clk_freq: current freq of input_clk
+ * @doubler_disabled: whether input doubler is enabled. This value is read
+ * from the hw on probe (in case it is set in @settings).
+ * @clk: array of outputs. One entry per output supported by the
+ * chip. Frequencies requested via the ccf api will be
+ * recorded in this array.
+ * @reg_dsm_int_8: record current value from hw to avoid modifying
+ * when writing register values
+ * @reg_dsm_frac_20_16: record current value
+ * @reg_out_en_x: record current value
+ * @reg_out_mode_0_1: record current value
+ * @reg_out_mode_2_3: record current value
+ * @reg_qx_dis: record current value
+ * @reg_ns1_q0: record current value
+ * @reg_n_qx_17_16: record current value
+ * @reg_nfrac_qx_27_24: record current value
+ * @divs: output divider values for all outputs
+ * @debugfs_dirroot: debugfs support
+ * @debugfs_fileaction: debugfs support
+ * @debugfs_filei2c: debugfs support
+ * @debugfs_map: debugfs support
+ * @dbg_cache: debugfs support
+ * @debugfs_fileqfreq: debugfs support
+ */
+struct clk_idt24x_chip {
+ struct regmap *regmap;
+ struct i2c_client *i2c_client;
+
+ u32 min_freq;
+ u32 max_freq;
+
+ u8 settings[NUM_CONFIG_REGISTERS];
+
+ bool has_settings;
+
+ struct clk *input_clk;
+ int input_clk_num;
+ struct notifier_block input_clk_nb;
+ u32 input_clk_freq;
+
+ bool doubler_disabled;
+
+ struct idt24x_output clk[NUM_OUTPUTS];
+
+ unsigned int reg_dsm_int_8;
+ unsigned int reg_dsm_frac_20_16;
+ unsigned int reg_out_en_x;
+ unsigned int reg_out_mode_0_1;
+ unsigned int reg_out_mode_2_3;
+ unsigned int reg_qx_dis;
+ unsigned int reg_ns1_q0;
+ unsigned int reg_n_qx_17_16[3];
+ unsigned int reg_nfrac_qx_27_24[3];
+
+ struct idt24x_dividers divs;
+
+ struct dentry *debugfs_dirroot, *debugfs_fileaction, *debugfs_filei2c,
+ *debugfs_map;
+ char dbg_cache[DEBUGFS_BUFFER_LENGTH];
+ struct dentry *debugfs_fileqfreq[4];
+};
+
+#define to_idt24x_output(_hw) \
+ container_of(_hw, struct idt24x_output, hw)
+#define to_clk_idt24x_from_client(_client) \
+ container_of(_client, struct clk_idt24x_chip, i2c_client)
+#define to_clk_idt24x_from_nb(_nb) \
+ container_of(_nb, struct clk_idt24x_chip, input_clk_nb)
+
+/**
+ * struct clk_register_offsets - register offsets for current context
+ * @oe_offset: offset for current output enable and mode
+ * @oe_mask: mask for current output enable
+ * @dis_mask: mask for current output disable
+ * @n_17_16_offset: offset for current output int divider (bits 17:16)
+ * @n_17_16_mask: mask for current output int divider (bits 17:16)
+ * @n_15_8_offset: offset for current output int divider (bits 15:8)
+ * @n_7_0_offset: offset for current output int divider (bits 7:0)
+ * @nfrac_27_24_offset: offset for current output frac divider (bits 27:24)
+ * @nfrac_27_24_mask: mask for current output frac divider (bits 27:24)
+ * @nfrac_23_16_offset: offset for current output frac divider (bits 23:16)
+ * @nfrac_15_8_offset: offset for current output frac divider (bits 15:8)
+ * @nfrac_7_0_offset: offset for current output frac divider (bits 7:0)
+ * @ns1_offset: offset for stage 1 div for output Q0
+ * @ns1_offset_mask: mask for stage 1 div for output Q0
+ * @ns2_15_8_offset: offset for stage 2 div for output Q0 (bits 15:8)
+ * @ns2_7_0_offset: offset for stage 2 div for output Q0 (bits 7:0)
+ */
+struct clk_register_offsets {
+ u16 oe_offset;
+ u8 oe_mask;
+ u8 dis_mask;
+
+ u16 n_17_16_offset;
+ u8 n_17_16_mask;
+ u16 n_15_8_offset;
+ u16 n_7_0_offset;
+ u16 nfrac_27_24_offset;
+ u8 nfrac_27_24_mask;
+ u16 nfrac_23_16_offset;
+ u16 nfrac_15_8_offset;
+ u16 nfrac_7_0_offset;
+
+ u16 ns1_offset;
+ u8 ns1_offset_mask;
+ u16 ns2_15_8_offset;
+ u16 ns2_7_0_offset;
+};
+
+int bits_to_shift(unsigned int mask);
+int i2cwritebulk(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, u8 val[], size_t val_count);
+int idt24x_get_offsets(
+ u8 output_num,
+ struct clk_register_offsets *offsets);
+int idt24x_set_frequency(struct clk_idt24x_chip *chip);
+
+#endif /* __IDT_CLK_IDT8T49N24X_CORE_H_ */
diff --git a/drivers/clk/idt/clk-idt8t49n24x-debugfs.c b/drivers/clk/idt/clk-idt8t49n24x-debugfs.c
new file mode 100644
index 000000000000..967a9df8701c
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-debugfs.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x-debugfs.c - Debugfs support for 8T49N24x
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "clk-idt8t49n24x-debugfs.h"
+
+static struct clk_idt24x_chip *idt24x_chip_fordebugfs;
+
+static int idt24x_read_all_settings(
+ struct clk_idt24x_chip *chip, char *output_buffer, int count)
+{
+ u8 settings[NUM_CONFIG_REGISTERS];
+ int err = 0;
+ int x;
+
+ err = regmap_bulk_read(
+ chip->regmap, 0x0, settings, NUM_CONFIG_REGISTERS);
+ if (!err) {
+ output_buffer[0] = '\0';
+ for (x = 0; x < ARRAY_SIZE(settings); x++) {
+ char dbg[4];
+
+ if ((strlen(output_buffer) + 4) > count)
+ return -EINVAL;
+ sprintf(dbg, "%02x ", settings[x]);
+ strcat(output_buffer, dbg);
+ }
+ }
+ return err;
+}
+
+/**
+ * idt24x_debugfs_writer_action - Write handler for the "action" debugfs file.
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Return: result of call to simple_write_to_buffer
+ *
+ * Use the "action" file as a trigger for setting all requested
+ * rates. The driver doesn't get any notification when the files
+ * representing the Qx outputs are written to, so something else is
+ * needed to notify the driver that the device should be udpated.
+ *
+ * It doesn't matter what you write to the action debugs file. When the
+ * handler is called, the device will be updated.
+ */
+static ssize_t idt24x_debugfs_writer_action(
+ struct file *fp, const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ int err = 0;
+ int x;
+ u32 freq;
+ bool needs_update = true;
+ struct i2c_client *client = idt24x_chip_fordebugfs->i2c_client;
+
+ if (count > DEBUGFS_BUFFER_LENGTH)
+ return -EINVAL;
+
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ freq = idt24x_chip_fordebugfs->clk[x].debug_freq;
+ if (freq) {
+ needs_update = false;
+ dev_dbg(&client->dev,
+ "%s: calling clk_set_rate with debug frequency for Q%i",
+ __func__, x);
+ err = clk_set_rate(
+ idt24x_chip_fordebugfs->clk[x].hw.clk, freq);
+ if (err) {
+ dev_err(&client->dev,
+ "error calling clk_set_rate for Q%i (%i)\n",
+ x, err);
+ }
+ } else {
+ needs_update = true;
+ idt24x_chip_fordebugfs->clk[x].requested = 0;
+ dev_dbg(&client->dev,
+ "%s: debug frequency for Q%i not set; make sure clock is disabled",
+ __func__, x);
+ }
+ }
+
+ if (needs_update) {
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_set_frequency to ensure any clocks that should be disabled are turned off.",
+ __func__);
+ err = idt24x_set_frequency(idt24x_chip_fordebugfs);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "%s: error calling idt24x_set_frequency (%i)\n",
+ __func__, err);
+ return err;
+ }
+ }
+
+ return simple_write_to_buffer(
+ idt24x_chip_fordebugfs->dbg_cache, DEBUGFS_BUFFER_LENGTH,
+ position, user_buffer, count);
+}
+
+/**
+ * idt24x_debugfs_reader_action - Read the "action" debugfs file.
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Return: whatever was last written to the "action" debugfs file.
+ */
+static ssize_t idt24x_debugfs_reader_action(
+ struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ return simple_read_from_buffer(
+ user_buffer, count, position, idt24x_chip_fordebugfs->dbg_cache,
+ DEBUGFS_BUFFER_LENGTH);
+}
+
+/**
+ * idt24x_debugfs_reader_map - display the current registers on the device
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Reads the current register map from the attached chip via I2C and
+ * returns it.
+ *
+ * Return: result of call to simple_read_from_buffer
+ */
+static ssize_t idt24x_debugfs_reader_map(
+ struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ int err = 0;
+ char *buf = kzalloc(5000, GFP_KERNEL);
+
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "calling idt24x_read_all_settings (count: %zu)\n", count);
+ err = idt24x_read_all_settings(idt24x_chip_fordebugfs, buf, 5000);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "error calling idt24x_read_all_settings (%i)\n", err);
+ return 0;
+ }
+ /* TMGCDR-1456. We're returning 1 byte too few. */
+ err = simple_read_from_buffer(
+ user_buffer, count, position, buf, strlen(buf));
+ kfree(buf);
+ return err;
+}
+
+/**
+ * idt24x_handle_i2c_debug_token - process "token" written to the i2c file
+ * @dev: pointer to device structure
+ * @token: pointer to current char being examined
+ * @reg: pass in current register, or return register from token.
+ * @val: resulting array of bytes being parsed
+ * @nextbyte: position in val array to store next byte
+ *
+ * Utility function to operate on the current "token" (from within a
+ * space-delimited string) written to the i2c debugfs file. It will
+ * either be a register offset or a byte to be added to the val array.
+ * If it is added to the val array, auto-increment nextbyte.
+ *
+ * Return: 0 for success
+ */
+static int idt24x_handle_i2c_debug_token(
+ const struct device *dev, char *token, unsigned int *reg,
+ u8 val[], u16 *nextbyte)
+{
+ int err = 0;
+
+ dev_dbg(dev, "got token (%s)\n", token);
+ if (*reg == -1) {
+ err = kstrtouint(token, 16, reg);
+ if (!err)
+ dev_dbg(dev, "hex register address == 0x%x\n", *reg);
+ } else {
+ u8 temp;
+
+ err = kstrtou8(token, 16, &temp);
+ if (!err) {
+ dev_dbg(dev, "data byte == 0x%x\n", temp);
+ val[*nextbyte] = temp;
+ *nextbyte += 1;
+ }
+ }
+ if (err == -ERANGE)
+ dev_err(dev, "ERANGE error when parsing data\n");
+ else if (err == -EINVAL)
+ dev_err(dev, "EINVAL error when parsing data\n");
+ else if (err)
+ dev_err(dev, "error when parsing data: %i\n", err);
+ return err;
+}
+
+/**
+ * idt24x_debugfs_writer_i2c - debugfs handler for i2c file
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Handler for the "i2c" debugfs file. Write to this file to write bytes
+ * via I2C to a particular offset.
+ *
+ * Usage: echo 006c 01 02 0D FF > i2c
+ *
+ * First 4 chars are the 2-byte i2c register offset. Then follow that
+ * with a sequence of 2-char bytes in hex format that you want to write
+ * starting at that offset.
+ *
+ * Return: result of simple_write_to_buffer
+ */
+static ssize_t idt24x_debugfs_writer_i2c(struct file *fp,
+ const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ int err = 0;
+ int x = 0;
+ int start = 0;
+ ssize_t written;
+ unsigned int reg = -1;
+ u8 val[WRITE_BLOCK_SIZE];
+ u16 nextbyte = 0;
+ char token[16];
+
+ if (count > DEBUGFS_BUFFER_LENGTH)
+ return -EINVAL;
+
+ written = simple_write_to_buffer(
+ idt24x_chip_fordebugfs->dbg_cache, DEBUGFS_BUFFER_LENGTH,
+ position, user_buffer, count);
+ if (written != count) {
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "write count != expected count");
+ return written;
+ }
+
+ for (x = 0; x < count; x++) {
+ token[x - start] = idt24x_chip_fordebugfs->dbg_cache[x];
+ if (idt24x_chip_fordebugfs->dbg_cache[x] == ' ') {
+ token[x - start] = '\0';
+ err = idt24x_handle_i2c_debug_token(
+ &idt24x_chip_fordebugfs->i2c_client->dev,
+ token, &reg, val, &nextbyte);
+ if (err)
+ break;
+ start = x + 1;
+ }
+ }
+
+ /* handle the last token */
+ if (!err) {
+ token[count - start] = '\0';
+ err = idt24x_handle_i2c_debug_token(
+ &idt24x_chip_fordebugfs->i2c_client->dev, token, &reg,
+ val, &nextbyte);
+ }
+
+ if (!err && reg != -1 && nextbyte > 0) {
+ err = i2cwritebulk(
+ idt24x_chip_fordebugfs->i2c_client,
+ idt24x_chip_fordebugfs->regmap,
+ reg, val, nextbyte);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "error writing data chip (%i)\n", err);
+ return err;
+ }
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "successfully wrote i2c data to chip");
+ }
+
+ return written;
+}
+
+static const struct file_operations idt24x_fops_debug_action = {
+ .read = idt24x_debugfs_reader_action,
+ .write = idt24x_debugfs_writer_action,
+};
+
+static const struct file_operations idt24x_fops_debug_map = {
+ .read = idt24x_debugfs_reader_map
+};
+
+static const struct file_operations idt24x_fops_debug_i2c = {
+ .write = idt24x_debugfs_writer_i2c,
+};
+
+/**
+ * idt24x_expose_via_debugfs - Set up all debugfs files
+ * @client: pointer to i2c_client structure
+ * @chip: Device data structure
+ *
+ * Sets up all debugfs files to use for debugging the driver.
+ * Return: error code. 0 if success or debugfs doesn't appear to be enabled.
+ */
+int idt24x_expose_via_debugfs(struct i2c_client *client,
+ struct clk_idt24x_chip *chip)
+{
+ int output_num;
+
+ /*
+ * create root directory in /sys/kernel/debugfs
+ */
+ chip->debugfs_dirroot = debugfs_create_dir("idt24x", NULL);
+ if (!chip->debugfs_dirroot) {
+ /* debugfs probably not enabled. Don't fail the probe. */
+ return 0;
+ }
+
+ /*
+ * create files in the root directory. This requires read and
+ * write file operations
+ */
+ chip->debugfs_fileaction = debugfs_create_file(
+ "action", 0644, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_action);
+ if (!chip->debugfs_fileaction) {
+ dev_err(&client->dev,
+ "%s: error creating action file", __func__);
+ return (-ENODEV);
+ }
+
+ chip->debugfs_map = debugfs_create_file(
+ "map", 0444, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_map);
+ if (!chip->debugfs_map) {
+ dev_err(&client->dev,
+ "%s: error creating map file", __func__);
+ return (-ENODEV);
+ }
+
+ for (output_num = 0; output_num < NUM_OUTPUTS; output_num++) {
+ char name[5];
+
+ sprintf(name, "q%d", output_num);
+ chip->debugfs_fileqfreq[output_num] = debugfs_create_u64(
+ name, 0644, chip->debugfs_dirroot,
+ &chip->clk[output_num].debug_freq);
+ if (!chip->debugfs_fileqfreq[output_num]) {
+ dev_err(&client->dev,
+ "%s: error creating %s debugfs file",
+ __func__, name);
+ return (-ENODEV);
+ }
+ }
+
+ chip->debugfs_filei2c = debugfs_create_file(
+ "i2c", 0644, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_i2c);
+ if (!chip->debugfs_filei2c) {
+ dev_err(&client->dev,
+ "%s: error creating i2c file", __func__);
+ return (-ENODEV);
+ }
+
+ dev_dbg(&client->dev, "%s: success", __func__);
+ idt24x_chip_fordebugfs = chip;
+ return 0;
+}
+
+void idt24x_cleanup_debugfs(struct clk_idt24x_chip *chip)
+{
+ debugfs_remove_recursive(chip->debugfs_dirroot);
+}
diff --git a/drivers/clk/idt/clk-idt8t49n24x-debugfs.h b/drivers/clk/idt/clk-idt8t49n24x-debugfs.h
new file mode 100644
index 000000000000..673016c8e747
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-debugfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* clk-idt8t49n24x-debugfs.h - Debugfs support for 8T49N24x
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#ifndef __IDT_CLK_IDT8T49N24X_DEBUGFS_H_
+#define __IDT_CLK_IDT8T49N24X_DEBUGFS_H_
+
+#include "clk-idt8t49n24x-core.h"
+
+int idt24x_expose_via_debugfs(struct i2c_client *client,
+ struct clk_idt24x_chip *chip);
+void idt24x_cleanup_debugfs(struct clk_idt24x_chip *chip);
+
+#endif /* __IDT_CLK_IDT8T49N24X_DEBUGFS_H_*/
diff --git a/drivers/clk/idt/clk-idt8t49n24x.c b/drivers/clk/idt/clk-idt8t49n24x.c
new file mode 100644
index 000000000000..79289a5a1934
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x.c - Program 8T49N24x settings via I2C.
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "clk-idt8t49n24x-core.h"
+#include "clk-idt8t49n24x-debugfs.h"
+
+#define OUTPUTMODE_HIGHZ 0
+#define OUTPUTMODE_LVDS 2
+#define IDT24x_MIN_FREQ 1000000L
+#define IDT24x_MAX_FREQ 300000000L
+#define DRV_NAME "idt8t49n24x"
+
+enum clk_idt24x_variant {
+ idt24x
+};
+
+static u32 mask_and_shift(u32 value, u8 mask)
+{
+ value &= mask;
+ return value >> bits_to_shift(mask);
+}
+
+/**
+ * idt24x_set_output_mode - Set the mode for a particular clock
+ * output in the register.
+ * @reg: The current register value before setting the mode.
+ * @mask: The bitmask identifying where in the register the
+ * output mode is stored.
+ * @mode: The mode to set.
+ *
+ * Return: the new register value with the specified mode bits set.
+ */
+static int idt24x_set_output_mode(u32 reg, u8 mask, u8 mode)
+{
+ if (((reg & mask) >> bits_to_shift(mask)) == OUTPUTMODE_HIGHZ) {
+ reg = reg & ~mask;
+ reg |= (OUTPUTMODE_LVDS << bits_to_shift(mask));
+ }
+ return reg;
+}
+
+/**
+ * idt24x_read_from_hw - Get the current values on the hw
+ * @chip: Device data structure
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_read_from_hw(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ u32 tmp, tmp2;
+ u8 output;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_DSM_INT_8,
+ &chip->reg_dsm_int_8);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DSM_INT_8: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_dsm_int_8: 0x%x",
+ __func__, chip->reg_dsm_int_8);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_DSMFRAC_20_16_MASK,
+ &chip->reg_dsm_frac_20_16);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DSMFRAC_20_16_MASK: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_dsm_frac_20_16: 0x%x",
+ __func__, chip->reg_dsm_frac_20_16);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTEN, &chip->reg_out_en_x);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTEN: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_out_en_x: 0x%x",
+ __func__, chip->reg_out_en_x);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTMODE0_1, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTMODE0_1: %i",
+ __func__, err);
+ return err;
+ }
+
+ tmp2 = idt24x_set_output_mode(
+ tmp, IDT24x_REG_OUTMODE0_MASK, OUTPUTMODE_LVDS);
+ tmp2 = idt24x_set_output_mode(
+ tmp2, IDT24x_REG_OUTMODE1_MASK, OUTPUTMODE_LVDS);
+ dev_dbg(&client->dev,
+ "%s: reg_out_mode_0_1 original: 0x%x. After setting OUT0/1 to LVDS if necessary: 0x%x",
+ __func__, tmp, tmp2);
+ chip->reg_out_mode_0_1 = tmp2;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTMODE2_3, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTMODE2_3: %i",
+ __func__, err);
+ return err;
+ }
+
+ tmp2 = idt24x_set_output_mode(
+ tmp, IDT24x_REG_OUTMODE2_MASK, OUTPUTMODE_LVDS);
+ tmp2 = idt24x_set_output_mode(
+ tmp2, IDT24x_REG_OUTMODE3_MASK, OUTPUTMODE_LVDS);
+ dev_dbg(&client->dev,
+ "%s: reg_out_mode_2_3 original: 0x%x. After setting OUT2/3 to LVDS if necessary: 0x%x",
+ __func__, tmp, tmp2);
+ chip->reg_out_mode_2_3 = tmp2;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_Q_DIS, &chip->reg_qx_dis);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_Q_DIS: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_qx_dis: 0x%x",
+ __func__, chip->reg_qx_dis);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_NS1_Q0, &chip->reg_ns1_q0);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_NS1_Q0: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_ns1_q0: 0x%x",
+ __func__, chip->reg_ns1_q0);
+
+ for (output = 1; output <= 3; output++) {
+ struct clk_register_offsets offsets;
+
+ err = idt24x_get_offsets(output, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = regmap_read(chip->regmap, offsets.n_17_16_offset,
+ &chip->reg_n_qx_17_16[output - 1]);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading n_17_16_offset for output %d (offset: 0x%x): %i",
+ __func__, output, offsets.n_17_16_offset, err);
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "%s: reg_n_qx_17_16[Q%u]: 0x%x",
+ __func__, output, chip->reg_n_qx_17_16[output - 1]);
+
+ err = regmap_read(chip->regmap, offsets.nfrac_27_24_offset,
+ &chip->reg_nfrac_qx_27_24[output - 1]);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading nfrac_27_24_offset for output %d (offset: 0x%x): %i",
+ __func__, output,
+ offsets.nfrac_27_24_offset, err);
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "%s: reg_nfrac_qx_27_24[Q%u]: 0x%x",
+ __func__, output,
+ chip->reg_nfrac_qx_27_24[output - 1]);
+ }
+
+ dev_info(&client->dev,
+ "%s: initial values read from chip successfully",
+ __func__);
+
+ /* Also read DBL_DIS to determine whether the doubler is disabled. */
+ err = regmap_read(chip->regmap, IDT24x_REG_DBL_DIS, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DBL_DIS: %i",
+ __func__, err);
+ return err;
+ }
+ chip->doubler_disabled = mask_and_shift(tmp, IDT24x_REG_DBL_DIS_MASK);
+ dev_dbg(&client->dev, "%s: doubler_disabled: %d",
+ __func__, chip->doubler_disabled);
+
+ return 0;
+}
+
+/**
+ * idt24x_set_rate - Sets the specified output clock to the specified rate.
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @rate: the rate (in Hz) for the specified clock.
+ * @parent_rate:(not sure) the rate for a parent signal (e.g.,
+ * the VCO feeding the output)
+ *
+ * This function will call idt24_set_frequency, which means it will
+ * calculate divider for all requested outputs and update the attached
+ * device (issue I2C commands to update the registers).
+ *
+ * Return: 0 on success.
+ */
+static int idt24x_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+
+ /*
+ * hw->clk is the pointer to the specific output clock the user is
+ * requesting. We use hw to get back to the output structure for
+ * the output clock. Set the requested rate in the output structure.
+ * Note that container_of cannot be used to find the device structure
+ * (clk_idt24x_chip) from clk_hw, because clk_idt24x_chip has an array
+ * of idt24x_output structs. That is why it is necessary to use
+ * output->chip to access the device structure.
+ */
+ struct idt24x_output *output = to_idt24x_output(hw);
+ struct i2c_client *client = output->chip->i2c_client;
+
+ if (rate < output->chip->min_freq || rate > output->chip->max_freq) {
+ dev_err(&client->dev,
+ "requested frequency (%luHz) is out of range\n", rate);
+ return -EINVAL;
+ }
+
+ /*
+ * Set the requested frequency in the output data structure, and then
+ * call idt24x_set_frequency. idt24x_set_frequency considers all
+ * requested frequencies when deciding on a vco frequency and
+ * calculating dividers.
+ */
+ output->requested = rate;
+
+ /*
+ * Also set in the memory location used by the debugfs file
+ * that exposes the output clock frequency. That allows querying
+ * the current rate via debugfs.
+ */
+ output->debug_freq = rate;
+
+ dev_info(&client->dev,
+ "%s. calling idt24x_set_frequency for Q%u. rate: %lu",
+ __func__, output->index, rate);
+ err = idt24x_set_frequency(output->chip);
+
+ if (err != 0)
+ dev_err(&client->dev, "error calling set_frequency: %d", err);
+
+ return err;
+}
+
+/**
+ * idt24x_round_rate - get valid rate that is closest to the requested rate
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @rate: the rate (in Hz) for the specified clock.
+ * @parent_rate:(not sure) the rate for a parent signal (e.g., the VCO
+ * feeding the output). This is an i/o param.
+ * If the driver supports a parent clock for the output (e.g.,
+ * the VCO(?), then set this param to indicate what the rate of
+ * the parent would be (e.g., the VCO frequency) if the rounded
+ * rate is used.
+ *
+ * Returns the closest rate to the requested rate actually supported by the
+ * chip.
+ *
+ * Return: adjusted rate
+ */
+static long idt24x_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * The chip has fractional output dividers, so assume it
+ * can provide the requested rate.
+ *
+ * TODO: figure out the closest rate that chip can support
+ * within a low error threshold and return that rate.
+ */
+ return rate;
+}
+
+/**
+ * idt24x_recalc_rate - return the frequency being provided by the clock.
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @parent_rate: (not sure) the rate for a parent signal (e.g., the
+ * VCO feeding the output)
+ *
+ * This API appears to be used to read the current values from the hardware
+ * and report the frequency being provided by the clock. Without this function,
+ * the clock will be initialized to 0 by default. The OS appears to be
+ * calling this to find out what the current value of the clock is at
+ * startup, so it can determine when .set_rate is actually changing the
+ * frequency.
+ *
+ * Return: the frequency of the specified clock.
+ */
+static unsigned long idt24x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct idt24x_output *output = to_idt24x_output(hw);
+
+ return output->requested;
+}
+
+/*
+ * Note that .prepare and .unprepare appear to be used more in Gates.
+ * They do not appear to be necessary for this device.
+ * Instead, update the device when .set_rate is called.
+ */
+static const struct clk_ops idt24x_clk_ops = {
+ .recalc_rate = idt24x_recalc_rate,
+ .round_rate = idt24x_round_rate,
+ .set_rate = idt24x_set_rate,
+};
+
+static bool idt24x_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static bool idt24x_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ return true;
+}
+
+static const struct regmap_config idt24x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 0xff,
+ .writeable_reg = idt24x_regmap_is_writeable,
+ .volatile_reg = idt24x_regmap_is_volatile,
+};
+
+/**
+ * idt24x_clk_notifier_cb - Clock rate change callback
+ * @nb: Pointer to notifier block
+ * @event: Notification reason
+ * @data: Pointer to notification data object
+ *
+ * This function is called when the input clock frequency changes.
+ * The callback checks whether a valid bus frequency can be generated after the
+ * change. If so, the change is acknowledged, otherwise the change is aborted.
+ * New dividers are written to the HW in the pre- or post change notification
+ * depending on the scaling direction.
+ *
+ * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
+ * to acknowledge the change, NOTIFY_DONE if the notification is
+ * considered irrelevant.
+ */
+static int idt24x_clk_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct clk_idt24x_chip *chip = to_clk_idt24x_from_nb(nb);
+ int err = 0;
+
+ dev_info(&chip->i2c_client->dev,
+ "%s: input frequency changed: %lu Hz. event: %lu",
+ __func__, ndata->new_rate, event);
+
+ switch (event) {
+ case PRE_RATE_CHANGE: {
+ dev_dbg(&chip->i2c_client->dev, "PRE_RATE_CHANGE\n");
+ return NOTIFY_OK;
+ }
+ case POST_RATE_CHANGE:
+ chip->input_clk_freq = ndata->new_rate;
+ /*
+ * Can't call clock API clk_set_rate here; I believe
+ * it will be ignored if the rate is the same as we
+ * set previously. Need to call our internal function.
+ */
+ dev_dbg(&chip->i2c_client->dev,
+ "POST_RATE_CHANGE. Calling idt24x_set_frequency\n");
+ err = idt24x_set_frequency(chip);
+ if (err)
+ dev_err(&chip->i2c_client->dev,
+ "error calling idt24x_set_frequency (%i)\n",
+ err);
+ return NOTIFY_OK;
+ case ABORT_RATE_CHANGE:
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct clk_hw *of_clk_idt24x_get(
+ struct of_phandle_args *clkspec, void *_data)
+{
+ struct clk_idt24x_chip *chip = _data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ARRAY_SIZE(chip->clk)) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &chip->clk[idx].hw;
+}
+
+/**
+ * idt24x_probe - main entry point for ccf driver
+ * @client: pointer to i2c_client structure
+ * @id: pointer to i2c_device_id structure
+ *
+ * Main entry point function that gets called to initialize the driver.
+ *
+ * Return: 0 for success.
+ */
+static int idt24x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct clk_idt24x_chip *chip;
+ struct clk_init_data init;
+
+ int err = 0;
+ int x;
+ char buf[6];
+
+ dev_info(&client->dev, "%s", __func__);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ init.ops = &idt24x_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ chip->i2c_client = client;
+
+ chip->min_freq = IDT24x_MIN_FREQ;
+ chip->max_freq = IDT24x_MAX_FREQ;
+
+ for (x = 0; x < NUM_INPUTS + 1; x++) {
+ char name[12];
+
+ sprintf(name, x == NUM_INPUTS ? "input-xtal" : "input-clk%i",
+ x);
+ dev_dbg(&client->dev, "attempting to get %s", name);
+ chip->input_clk = devm_clk_get(&client->dev, name);
+ if (IS_ERR(chip->input_clk)) {
+ err = PTR_ERR(chip->input_clk);
+ /*
+ * TODO: Handle EPROBE_DEFER error, which indicates
+ * that the input_clk isn't available now but may be
+ * later when the appropriate module is loaded.
+ */
+ } else {
+ err = 0;
+ chip->input_clk_num = x;
+ break;
+ }
+ }
+
+ if (err) {
+ dev_err(&client->dev, "Unable to get input clock (%u).", err);
+ chip->input_clk = NULL;
+ return err;
+ }
+
+ chip->input_clk_freq = clk_get_rate(chip->input_clk);
+ dev_dbg(&client->dev, "Got input-freq from input-clk in device tree: %uHz",
+ chip->input_clk_freq);
+
+ chip->input_clk_nb.notifier_call = idt24x_clk_notifier_cb;
+ if (clk_notifier_register(chip->input_clk, &chip->input_clk_nb))
+ dev_warn(&client->dev,
+ "Unable to register clock notifier for input_clk.");
+
+ dev_dbg(&client->dev, "%s: about to read settings: %zu",
+ __func__, ARRAY_SIZE(chip->settings));
+
+ err = of_property_read_u8_array(
+ client->dev.of_node, "settings", chip->settings,
+ ARRAY_SIZE(chip->settings));
+ if (!err) {
+ dev_dbg(&client->dev, "settings property specified in DT");
+ chip->has_settings = true;
+ } else {
+ if (err == -EOVERFLOW) {
+ dev_alert(&client->dev,
+ "EOVERFLOW error trying to read the settings. ARRAY_SIZE: %zu",
+ ARRAY_SIZE(chip->settings));
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "settings property not specified in DT (or there was an error that can be ignored: %i). The settings property is optional.",
+ err);
+ }
+
+ /*
+ * Requested output frequencies cannot be specified in the DT.
+ * Either a consumer needs to use the clock API to request the rate,
+ * or use debugfs to set the rate from user space. Use clock-names in
+ * DT to specify the output clock.
+ */
+
+ chip->regmap = devm_regmap_init_i2c(client, &idt24x_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(chip->regmap);
+ }
+
+ dev_dbg(&client->dev, "%s: call i2c_set_clientdata", __func__);
+ i2c_set_clientdata(client, chip);
+
+ if (chip->has_settings) {
+ /*
+ * A raw settings array was specified in the DT. Write the
+ * settings to the device immediately.
+ */
+ err = i2cwritebulk(
+ chip->i2c_client, chip->regmap, 0, chip->settings,
+ ARRAY_SIZE(chip->settings));
+ if (err) {
+ dev_err(&client->dev,
+ "error writing all settings to chip (%i)\n",
+ err);
+ return err;
+ }
+ dev_dbg(&client->dev, "successfully wrote full settings array");
+ }
+
+ /*
+ * Whether or not settings were written to the device, read all
+ * current values from the hw.
+ */
+ dev_dbg(&client->dev, "read from HW");
+ err = idt24x_read_from_hw(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "failed calling idt24x_read_from_hw (%i)\n", err);
+ return err;
+ }
+
+ /* Create all 4 clocks */
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ init.name = kasprintf(
+ GFP_KERNEL, "%s.Q%i", client->dev.of_node->name, x);
+ chip->clk[x].chip = chip;
+ chip->clk[x].hw.init = &init;
+ chip->clk[x].index = x;
+ err = devm_clk_hw_register(&client->dev, &chip->clk[x].hw);
+ kfree(init.name); /* clock framework made a copy of the name */
+ if (err) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return err;
+ }
+ dev_dbg(&client->dev, "successfully registered Q%i", x);
+ }
+
+ if (err) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return err;
+ }
+
+ err = of_clk_add_hw_provider(
+ client->dev.of_node, of_clk_idt24x_get, chip);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return err;
+ }
+
+ err = idt24x_expose_via_debugfs(client, chip);
+ if (err) {
+ dev_err(&client->dev,
+ "error calling idt24x_expose_via_debugfs: %i\n", err);
+ return err;
+ }
+
+ if (chip->input_clk_num == NUM_INPUTS)
+ sprintf(buf, "XTAL");
+ else
+ sprintf(buf, "CLK%i", chip->input_clk_num);
+ dev_info(&client->dev, "probe success. input freq: %uHz (%s), settings string? %s\n",
+ chip->input_clk_freq, buf,
+ chip->has_settings ? "true" : "false");
+ return 0;
+}
+
+static int idt24x_remove(struct i2c_client *client)
+{
+ struct clk_idt24x_chip *chip = to_clk_idt24x_from_client(&client);
+
+ dev_info(&client->dev, "%s", __func__);
+ of_clk_del_provider(client->dev.of_node);
+ idt24x_cleanup_debugfs(chip);
+
+ if (!chip->input_clk)
+ clk_notifier_unregister(
+ chip->input_clk, &chip->input_clk_nb);
+ return 0;
+}
+
+static const struct i2c_device_id idt24x_id[] = {
+ { "idt8t49n24x", idt24x },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, idt24x_id);
+
+static const struct of_device_id idt24x_of_match[] = {
+ { .compatible = "idt,idt8t49n241" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idt24x_of_match);
+
+static struct i2c_driver idt24x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = idt24x_of_match,
+ },
+ .probe = idt24x_probe,
+ .remove = idt24x_remove,
+ .id_table = idt24x_id,
+};
+
+module_i2c_driver(idt24x_driver);
+
+MODULE_DESCRIPTION("8T49N24x ccf driver");
+MODULE_AUTHOR("David Cater <david.cater@idt.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 1ac0c7990392..087170a9b148 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -30,5 +30,6 @@ config CLK_IMX8QXP
bool "IMX8QXP SCU Clock"
depends on ARCH_MXC && IMX_SCU && ARM64
select MXC_CLK_SCU
+ select MXC_CLK
help
Build the driver for IMX8QXP SCU based clocks.
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index d3486ee79ab5..78122188ac39 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -95,7 +95,7 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
int prediv_value;
int div_value;
int ret;
- u32 val;
+ u32 orig, val;
ret = imx8m_clk_composite_compute_dividers(rate, parent_rate,
&prediv_value, &div_value);
@@ -104,13 +104,15 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
spin_lock_irqsave(divider->lock, flags);
- val = readl(divider->reg);
- val &= ~((clk_div_mask(divider->width) << divider->shift) |
- (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
+ orig = readl(divider->reg);
+ val = orig & ~((clk_div_mask(divider->width) << divider->shift) |
+ (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
val |= (u32)(prediv_value - 1) << divider->shift;
val |= (u32)(div_value - 1) << PCG_DIV_SHIFT;
- writel(val, divider->reg);
+
+ if (val != orig)
+ writel(val, divider->reg);
spin_unlock_irqrestore(divider->lock, flags);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index c4685c01929a..579b638b09b8 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -287,13 +287,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
- hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels));
hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
- hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels));
hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels));
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 172589e94f60..ec34c5241636 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -26,73 +26,6 @@ static u32 share_count_disp;
static u32 share_count_pdm;
static u32 share_count_nand;
-static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = {
- PLL_1416X_RATE(1800000000U, 225, 3, 0),
- PLL_1416X_RATE(1600000000U, 200, 3, 0),
- PLL_1416X_RATE(1200000000U, 300, 3, 1),
- PLL_1416X_RATE(1000000000U, 250, 3, 1),
- PLL_1416X_RATE(800000000U, 200, 3, 1),
- PLL_1416X_RATE(750000000U, 250, 2, 2),
- PLL_1416X_RATE(700000000U, 350, 3, 2),
- PLL_1416X_RATE(600000000U, 300, 3, 2),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = {
- PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
- PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
- PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_drampll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
-};
-
-static struct imx_pll14xx_clk imx8mm_audio_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_audiopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_audiopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_video_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_videopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_videopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_dram_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_drampll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_drampll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_arm_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_gpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_vpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_sys_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
static const char *pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
@@ -396,16 +329,16 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mm_audio_pll);
- clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mm_audio_pll);
- clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mm_video_pll);
- clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mm_dram_pll);
- clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mm_gpu_pll);
- clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mm_vpu_pll);
- clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mm_arm_pll);
- clks[IMX8MM_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mm_sys_pll);
- clks[IMX8MM_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mm_sys_pll);
- clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mm_sys_pll);
+ clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+ clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+ clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
+ clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+ clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ clks[IMX8MM_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx_1416x_pll);
+ clks[IMX8MM_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx_1416x_pll);
+ clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
/* PLL bypass out */
clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 882b42efd258..9d33321c89bd 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -189,27 +189,27 @@ static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out
"sys_pll3_out", "clk_ext4", };
static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "video_pll1_out", "sys_pll1_133m", "dummy",
"clk_ext3", "clk_ext4", };
static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "video_pll1_out", "sys_pll1_133m", "dummy",
"clk_ext3", "clk_ext4", };
static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "video_pll1_out", "sys_pll1_133m", "dummy",
"clk_ext2", "clk_ext3", };
static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "video_pll1_out", "sys_pll1_133m", "dummy",
"clk_ext3", "clk_ext4", };
static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "video_pll1_out", "sys_pll1_133m", "dummy",
"clk_ext3", "clk_ext4", };
static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "video_pll1_out", "sys_pll1_133m", "dummy",
"clk_ext2", "clk_ext3", };
static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
@@ -582,7 +582,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
clks[IMX8MN_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
clks[IMX8MN_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
- clks[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
+ clks[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
clks[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
clks[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
clks[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 047f1d8fe323..c43e9653b415 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -41,6 +41,36 @@ struct clk_pll14xx {
#define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
+const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
+ PLL_1416X_RATE(1800000000U, 225, 3, 0),
+ PLL_1416X_RATE(1600000000U, 200, 3, 0),
+ PLL_1416X_RATE(1200000000U, 300, 3, 1),
+ PLL_1416X_RATE(1000000000U, 250, 3, 1),
+ PLL_1416X_RATE(800000000U, 200, 3, 1),
+ PLL_1416X_RATE(750000000U, 250, 2, 2),
+ PLL_1416X_RATE(700000000U, 350, 3, 2),
+ PLL_1416X_RATE(600000000U, 300, 3, 2),
+};
+
+const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+ PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+ PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
+ PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
+};
+
+struct imx_pll14xx_clk imx_1443x_pll = {
+ .type = PLL_1443X,
+ .rate_table = imx_pll1443x_tbl,
+ .rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
+};
+
+struct imx_pll14xx_clk imx_1416x_pll = {
+ .type = PLL_1416X,
+ .rate_table = imx_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx_pll1416x_tbl),
+};
+
static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
struct clk_pll14xx *pll, unsigned long rate)
{
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 6fe64ff8ffa1..30ddbc1ced2e 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -50,6 +50,9 @@ struct imx_pll14xx_clk {
int flags;
};
+extern struct imx_pll14xx_clk imx_1416x_pll;
+extern struct imx_pll14xx_clk imx_1443x_pll;
+
#define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 926696fba3f4..d8739871e114 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -100,15 +100,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw)
bool enabled = false;
/*
- * If the SoC has no global TCU clock, we must ungate the channel's
- * clock to be able to access its registers.
- * If we have a TCU clock, it will be enabled automatically as it has
- * been attached to the regmap.
+ * According to the programming manual, a timer channel's registers can
+ * only be accessed when the channel's stop bit is clear.
*/
- if (!tcu->clk) {
- enabled = !!ingenic_tcu_is_enabled(hw);
- regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
- }
+ enabled = !!ingenic_tcu_is_enabled(hw);
+ regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
return enabled;
}
@@ -119,8 +115,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw)
const struct ingenic_tcu_clk_info *info = tcu_clk->info;
struct ingenic_tcu *tcu = tcu_clk->tcu;
- if (!tcu->clk)
- regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
+ regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
}
static u8 ingenic_tcu_get_parent(struct clk_hw *hw)
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index d59a7621bb20..6bbdd4705d71 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -209,7 +209,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
}
clk = clk_register_pll(NULL, node->name, parent_name, pll_data);
- if (clk) {
+ if (!IS_ERR_OR_NULL(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
return;
}
@@ -281,12 +281,13 @@ static void __init of_pll_div_clk_init(struct device_node *node)
clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
mask, 0, NULL);
- if (clk) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- } else {
+ if (IS_ERR(clk)) {
pr_err("%s: error registering divider %s\n", __func__, clk_name);
iounmap(reg);
+ return;
}
+
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
@@ -328,10 +329,12 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
clk = clk_register_mux(NULL, clk_name, (const char **)&parents,
ARRAY_SIZE(parents) , 0, reg, shift, mask,
0, NULL);
- if (clk)
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- else
+ if (IS_ERR(clk)) {
pr_err("%s: error registering mux %s\n", __func__, clk_name);
+ return;
+ }
+
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 64ea895f1a7d..8e28e3489ded 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -287,6 +287,8 @@ static int _sci_clk_build(struct sci_clk_provider *provider,
name = kasprintf(GFP_KERNEL, "clk:%d:%d", sci_clk->dev_id,
sci_clk->clk_id);
+ if (!name)
+ return -ENOMEM;
init.name = name;
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 695be0f77427..c67cd73aca17 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -675,6 +675,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
return PTR_ERR(base);
clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
clk_data);
@@ -742,6 +744,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
if (!infra_clk_data) {
infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
+ if (!infra_clk_data)
+ return;
for (i = 0; i < CLK_INFRA_NR; i++)
infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
@@ -768,6 +772,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (!infra_clk_data) {
infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
+ if (!infra_clk_data)
+ return -ENOMEM;
} else {
for (i = 0; i < CLK_INFRA_NR; i++) {
if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
@@ -896,6 +902,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
return PTR_ERR(base);
clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
clk_data);
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 00920182bbe6..f7b5ec749ab9 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -1216,6 +1216,8 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
@@ -1237,6 +1239,8 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
return PTR_ERR(base);
clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
clk_data);
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index f62b0428da0e..9bef47161bde 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -392,6 +392,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
return PTR_ERR(base);
clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
clk_data);
@@ -564,6 +566,8 @@ static void mtk_infrasys_init_early(struct device_node *node)
if (!infra_clk_data) {
infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
+ if (!infra_clk_data)
+ return;
for (i = 0; i < CLK_INFRA_NR; i++)
infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
@@ -588,6 +592,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (!infra_clk_data) {
infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
+ if (!infra_clk_data)
+ return -ENOMEM;
} else {
for (i = 0; i < CLK_INFRA_NR; i++) {
if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index 88279d0ea1a7..3ab7b672f8c7 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -83,6 +83,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
int r;
clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data);
@@ -105,6 +107,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
int r;
clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK,
clk_data);
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index d6233994af5a..1e6abbddf02c 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -581,6 +581,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
return PTR_ERR(base);
clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
clk_data);
@@ -605,6 +607,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
int r;
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
clk_data);
@@ -633,6 +637,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
return PTR_ERR(base);
clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index 37b4162c5882..3a33014eee7f 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -18,9 +18,9 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_MFG(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \
- &mtk_clk_gate_ops_setclr)
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0)
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index cb939c071b0c..89916acf0bc3 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -25,7 +25,7 @@ static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev,
struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
unsigned int reg = data->regofs + ((id / 32) << 4);
- return regmap_write(data->regmap, reg, 1);
+ return regmap_write(data->regmap, reg, BIT(id % 32));
}
static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
@@ -34,7 +34,7 @@ static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4;
- return regmap_write(data->regmap, reg, 1);
+ return regmap_write(data->regmap, reg, BIT(id % 32));
}
static int mtk_reset_assert(struct reset_controller_dev *rcdev,
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index bf8bea675d24..6c7110a96194 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -36,6 +36,7 @@ int meson_aoclkc_probe(struct platform_device *pdev)
struct meson_aoclk_reset_controller *rstc;
struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *np;
struct regmap *regmap;
int ret, clkid;
@@ -47,7 +48,9 @@ int meson_aoclkc_probe(struct platform_device *pdev)
if (!rstc)
return -ENOMEM;
- regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ np = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index a7cb1e7aedc4..18ae38787268 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -17,6 +17,7 @@ int meson_eeclkc_probe(struct platform_device *pdev)
{
const struct meson_eeclkc_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *np;
struct regmap *map;
int ret, i;
@@ -25,7 +26,9 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return -EINVAL;
/* Get the hhi system controller node */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev,
"failed to get HHI regmap\n");
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 082178a0f41a..efddf0d152a4 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -3684,13 +3684,16 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
+ struct device_node *parent_np;
const char *notifier_clk_name;
struct clk *notifier_clk;
void __iomem *clk_base;
struct regmap *map;
int i, ret;
- map = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(map)) {
pr_info("failed to get HHI regmap - Trying obsolete regs\n");
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index f110c02e83cb..9674c6c06dca 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -258,18 +258,21 @@ static void __init pxa168_clk_init(struct device_node *np)
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
+ kfree(pxa_unit);
return;
}
pxa_unit->apmu_base = of_iomap(np, 1);
if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
+ kfree(pxa_unit);
return;
}
pxa_unit->apbc_base = of_iomap(np, 2);
if (!pxa_unit->apbc_base) {
pr_err("failed to map apbc registers\n");
+ kfree(pxa_unit);
return;
}
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 1b2cefef7431..a8a2cfa83290 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1521,6 +1521,8 @@ static struct clk_branch cam_cc_sys_tmr_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
.pd = {
@@ -1554,6 +1556,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1563,6 +1566,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
index 59f1af415b58..e74fc81a14d0 100644
--- a/drivers/clk/qcom/clk-krait.c
+++ b/drivers/clk/qcom/clk-krait.c
@@ -32,11 +32,16 @@ static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
}
krait_set_l2_indirect_reg(mux->offset, regval);
- spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
/* Wait for switch to complete. */
mb();
udelay(1);
+
+ /*
+ * Unlock now to make sure the mux register is not
+ * modified while switching to the new parent.
+ */
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
}
static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
@@ -93,6 +98,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
if (d->lpl)
mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
+ else
+ mask <<= d->shift;
spin_lock_irqsave(&krait_clock_reg_lock, flags);
val = krait_get_l2_indirect_reg(d->offset);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 89c1adeb84d4..40374f3d3d20 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -146,17 +146,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
static unsigned long
calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
{
- if (hid_div) {
- rate *= 2;
- rate /= hid_div + 1;
- }
+ if (hid_div)
+ rate = mult_frac(rate, 2, hid_div + 1);
- if (mode) {
- u64 tmp = rate;
- tmp *= m;
- do_div(tmp, n);
- rate = tmp;
- }
+ if (mode)
+ rate = mult_frac(rate, m, n);
return rate;
}
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 0cc4909b5dbe..cb7a2d9247b0 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -569,6 +569,8 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
+ .en_few_wait_val = 0x6,
+ .en_rest_wait_val = 0x5,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index de48ba7eba3a..052e168d2a2a 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -423,7 +423,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
},
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -470,7 +469,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
},
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -503,7 +501,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
},
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -537,7 +534,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
},
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -551,7 +547,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
},
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -616,7 +611,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
},
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
- .flags = CLK_SET_RATE_PARENT,
},
};
@@ -667,6 +661,7 @@ static struct clk_branch gcc_sleep_clk_src = {
},
.num_parents = 1,
.ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -1788,8 +1783,10 @@ static struct clk_regmap_div nss_port4_tx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_RX, 5, 0, 0),
F(78125000, P_UNIPHY1_RX, 4, 0, 0),
F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_RX, 1, 0, 0),
F(156250000, P_UNIPHY1_RX, 2, 0, 0),
F(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
@@ -1828,8 +1825,10 @@ static struct clk_regmap_div nss_port5_rx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_TX, 5, 0, 0),
F(78125000, P_UNIPHY1_TX, 4, 0, 0),
F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_TX, 1, 0, 0),
F(156250000, P_UNIPHY1_TX, 2, 0, 0),
F(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
@@ -1867,8 +1866,10 @@ static struct clk_regmap_div nss_port5_tx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_RX, 5, 0, 0),
F(25000000, P_UNIPHY2_RX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_RX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_RX, 1, 0, 0),
F(125000000, P_UNIPHY2_RX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_RX, 2, 0, 0),
F(312500000, P_UNIPHY2_RX, 1, 0, 0),
@@ -1907,8 +1908,10 @@ static struct clk_regmap_div nss_port6_rx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_TX, 5, 0, 0),
F(25000000, P_UNIPHY2_TX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_TX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_TX, 1, 0, 0),
F(125000000, P_UNIPHY2_TX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_TX, 2, 0, 0),
F(312500000, P_UNIPHY2_TX, 1, 0, 0),
@@ -3346,6 +3349,7 @@ static struct clk_branch gcc_nssnoc_ubi1_ahb_clk = {
static struct clk_branch gcc_ubi0_ahb_clk = {
.halt_reg = 0x6820c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6820c,
.enable_mask = BIT(0),
@@ -3363,6 +3367,7 @@ static struct clk_branch gcc_ubi0_ahb_clk = {
static struct clk_branch gcc_ubi0_axi_clk = {
.halt_reg = 0x68200,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68200,
.enable_mask = BIT(0),
@@ -3380,6 +3385,7 @@ static struct clk_branch gcc_ubi0_axi_clk = {
static struct clk_branch gcc_ubi0_nc_axi_clk = {
.halt_reg = 0x68204,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68204,
.enable_mask = BIT(0),
@@ -3397,6 +3403,7 @@ static struct clk_branch gcc_ubi0_nc_axi_clk = {
static struct clk_branch gcc_ubi0_core_clk = {
.halt_reg = 0x68210,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68210,
.enable_mask = BIT(0),
@@ -3414,6 +3421,7 @@ static struct clk_branch gcc_ubi0_core_clk = {
static struct clk_branch gcc_ubi0_mpt_clk = {
.halt_reg = 0x68208,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68208,
.enable_mask = BIT(0),
@@ -3431,6 +3439,7 @@ static struct clk_branch gcc_ubi0_mpt_clk = {
static struct clk_branch gcc_ubi1_ahb_clk = {
.halt_reg = 0x6822c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6822c,
.enable_mask = BIT(0),
@@ -3448,6 +3457,7 @@ static struct clk_branch gcc_ubi1_ahb_clk = {
static struct clk_branch gcc_ubi1_axi_clk = {
.halt_reg = 0x68220,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68220,
.enable_mask = BIT(0),
@@ -3465,6 +3475,7 @@ static struct clk_branch gcc_ubi1_axi_clk = {
static struct clk_branch gcc_ubi1_nc_axi_clk = {
.halt_reg = 0x68224,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68224,
.enable_mask = BIT(0),
@@ -3482,6 +3493,7 @@ static struct clk_branch gcc_ubi1_nc_axi_clk = {
static struct clk_branch gcc_ubi1_core_clk = {
.halt_reg = 0x68230,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68230,
.enable_mask = BIT(0),
@@ -3499,6 +3511,7 @@ static struct clk_branch gcc_ubi1_core_clk = {
static struct clk_branch gcc_ubi1_mpt_clk = {
.halt_reg = 0x68228,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68228,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index 8bed02a748ab..470a277603a9 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -58,7 +58,7 @@ static struct clk_regmap pll0_vote = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "pll0_vote",
- .parent_names = (const char *[]){ "pll8" },
+ .parent_names = (const char *[]){ "pll0" },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index bd32212f37e6..5982253b0330 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -25,11 +25,9 @@ enum {
P_CORE_BI_PLL_TEST_SE,
P_DSI0_PHY_PLL_OUT_BYTECLK,
P_DSI0_PHY_PLL_OUT_DSICLK,
- P_GPLL0_OUT_AUX,
P_GPLL0_OUT_MAIN,
P_GPLL1_OUT_MAIN,
P_GPLL3_OUT_MAIN,
- P_GPLL4_OUT_AUX,
P_GPLL4_OUT_MAIN,
P_GPLL6_OUT_AUX,
P_HDMI_PHY_PLL_CLK,
@@ -109,28 +107,24 @@ static const char * const gcc_parent_names_4[] = {
static const struct parent_map gcc_parent_map_5[] = {
{ P_XO, 0 },
{ P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
- { P_GPLL0_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_5[] = {
"cxo",
- "dsi0pll_byteclk_src",
- "gpll0_out_aux",
+ "dsi0pllbyte",
"core_bi_pll_test_se",
};
static const struct parent_map gcc_parent_map_6[] = {
{ P_XO, 0 },
{ P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
- { P_GPLL0_OUT_AUX, 3 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_6[] = {
"cxo",
- "dsi0_phy_pll_out_byteclk",
- "gpll0_out_aux",
+ "dsi0pllbyte",
"core_bi_pll_test_se",
};
@@ -139,7 +133,6 @@ static const struct parent_map gcc_parent_map_7[] = {
{ P_GPLL0_OUT_MAIN, 1 },
{ P_GPLL3_OUT_MAIN, 2 },
{ P_GPLL6_OUT_AUX, 3 },
- { P_GPLL4_OUT_AUX, 4 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
@@ -148,7 +141,6 @@ static const char * const gcc_parent_names_7[] = {
"gpll0_out_main",
"gpll3_out_main",
"gpll6_out_aux",
- "gpll4_out_aux",
"core_bi_pll_test_se",
};
@@ -175,7 +167,7 @@ static const struct parent_map gcc_parent_map_9[] = {
static const char * const gcc_parent_names_9[] = {
"cxo",
"gpll0_out_main",
- "dsi0_phy_pll_out_dsiclk",
+ "dsi0pll",
"gpll6_out_aux",
"core_bi_pll_test_se",
};
@@ -207,14 +199,12 @@ static const char * const gcc_parent_names_11[] = {
static const struct parent_map gcc_parent_map_12[] = {
{ P_XO, 0 },
{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
- { P_GPLL0_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_12[] = {
"cxo",
- "dsi0pll_pclk_src",
- "gpll0_out_aux",
+ "dsi0pll",
"core_bi_pll_test_se",
};
@@ -237,40 +227,34 @@ static const char * const gcc_parent_names_13[] = {
static const struct parent_map gcc_parent_map_14[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL4_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_14[] = {
"cxo",
"gpll0_out_main",
- "gpll4_out_aux",
"core_bi_pll_test_se",
};
static const struct parent_map gcc_parent_map_15[] = {
{ P_XO, 0 },
- { P_GPLL0_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_15[] = {
"cxo",
- "gpll0_out_aux",
"core_bi_pll_test_se",
};
static const struct parent_map gcc_parent_map_16[] = {
{ P_XO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL0_OUT_AUX, 2 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
static const char * const gcc_parent_names_16[] = {
"cxo",
"gpll0_out_main",
- "gpll0_out_aux",
"core_bi_pll_test_se",
};
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index ee908fbfeab1..68ea6a846a15 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -250,7 +250,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_ahb_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -273,7 +273,7 @@ static struct clk_rcg2 gcc_emac_ptp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_emac_ptp_clk_src",
.parent_data = gcc_parents_5,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -299,7 +299,7 @@ static struct clk_rcg2 gcc_emac_rgmii_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_emac_rgmii_clk_src",
.parent_data = gcc_parents_5,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -323,7 +323,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk_src",
.parent_data = gcc_parents_1,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -338,7 +338,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk_src",
.parent_data = gcc_parents_1,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -353,7 +353,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk_src",
.parent_data = gcc_parents_1,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -374,7 +374,7 @@ static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk_src",
.parent_data = gcc_parents_2,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -389,7 +389,7 @@ static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_aux_clk_src",
.parent_data = gcc_parents_2,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -410,7 +410,7 @@ static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_phy_refgen_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -432,7 +432,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -455,7 +455,7 @@ static struct clk_rcg2 gcc_qspi_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qspi_core_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -489,7 +489,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s0_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -504,7 +504,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s1_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -519,7 +519,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s2_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -534,7 +534,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s3_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -549,7 +549,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s4_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -564,7 +564,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s5_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -579,7 +579,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s6_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -594,7 +594,7 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_s7_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -609,7 +609,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s0_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -624,7 +624,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s1_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -639,7 +639,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s2_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -654,7 +654,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s3_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -669,7 +669,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s4_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -684,7 +684,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_s5_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -699,7 +699,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s0_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -714,7 +714,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s1_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -729,7 +729,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s2_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -744,7 +744,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s3_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -759,7 +759,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s4_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -774,7 +774,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap2_s5_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -800,8 +800,8 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parents_6,
- .num_parents = 5,
- .flags = CLK_SET_RATE_PARENT,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+ .flags = CLK_OPS_PARENT_ENABLE,
.ops = &clk_rcg2_floor_ops,
},
};
@@ -825,7 +825,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parents_3,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_floor_ops,
},
@@ -845,7 +845,7 @@ static struct clk_rcg2 gcc_tsif_ref_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ref_clk_src",
.parent_data = gcc_parents_7,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_parents_7),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -869,7 +869,7 @@ static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_card_axi_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -892,7 +892,7 @@ static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_card_ice_core_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -912,7 +912,7 @@ static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_card_phy_aux_clk_src",
.parent_data = gcc_parents_4,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -934,7 +934,7 @@ static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_card_unipro_core_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -958,7 +958,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_axi_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -973,7 +973,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_ice_core_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -988,7 +988,7 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk_src",
.parent_data = gcc_parents_4,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -1003,7 +1003,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_unipro_core_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -1027,7 +1027,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_master_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -1049,7 +1049,7 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -1064,7 +1064,7 @@ static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sec_master_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -1079,7 +1079,7 @@ static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sec_mock_utmi_clk_src",
.parent_data = gcc_parents_0,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -1094,7 +1094,7 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_aux_clk_src",
.parent_data = gcc_parents_2,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
@@ -1109,7 +1109,7 @@ static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_sec_phy_aux_clk_src",
.parent_data = gcc_parents_2,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index e40efba1bf7d..7a116f62168b 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -22,8 +22,6 @@
#define CX_GMU_CBCR_SLEEP_SHIFT 4
#define CX_GMU_CBCR_WAKE_MASK 0xf
#define CX_GMU_CBCR_WAKE_SHIFT 8
-#define CLK_DIS_WAIT_SHIFT 12
-#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT)
enum {
P_BI_TCXO,
@@ -124,6 +122,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
+ .clk_dis_wait_val = 0x8,
.pd = {
.name = "gpu_cx_gdsc",
},
@@ -221,10 +220,6 @@ static int gpu_cc_sdm845_probe(struct platform_device *pdev)
value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
regmap_update_bits(regmap, 0x1098, mask, value);
- /* Configure clk_dis_wait for gpu_cx_gdsc */
- regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK,
- 8 << CLK_DIS_WAIT_SHIFT);
-
return qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
}
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 819d194be8f7..5a44dc8bd25f 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -13,14 +13,16 @@
static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
+ struct qcom_reset_controller *rst = to_qcom_reset_controller(rcdev);
+
rcdev->ops->assert(rcdev, id);
- udelay(1);
+ udelay(rst->reset_map[id].udelay ?: 1); /* use 1 us as default */
rcdev->ops->deassert(rcdev, id);
return 0;
}
-static int
-qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+static int qcom_reset_set_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct qcom_reset_controller *rst;
const struct qcom_reset_map *map;
@@ -28,23 +30,24 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
rst = to_qcom_reset_controller(rcdev);
map = &rst->reset_map[id];
- mask = BIT(map->bit);
+ mask = map->bitmask ? map->bitmask : BIT(map->bit);
+
+ regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0);
+
+ /* Read back the register to ensure write completion, ignore the value */
+ regmap_read(rst->regmap, map->reg, &mask);
- return regmap_update_bits(rst->regmap, map->reg, mask, mask);
+ return 0;
}
-static int
-qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+static int qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
- struct qcom_reset_controller *rst;
- const struct qcom_reset_map *map;
- u32 mask;
-
- rst = to_qcom_reset_controller(rcdev);
- map = &rst->reset_map[id];
- mask = BIT(map->bit);
+ return qcom_reset_set_assert(rcdev, id, true);
+}
- return regmap_update_bits(rst->regmap, map->reg, mask, 0);
+static int qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return qcom_reset_set_assert(rcdev, id, false);
}
const struct reset_control_ops qcom_reset_ops = {
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 2a08b5e282c7..9a47c838d9b1 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -11,6 +11,8 @@
struct qcom_reset_map {
unsigned int reg;
u8 bit;
+ u8 udelay;
+ u32 bitmask;
};
struct regmap;
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index cf65d4e0e116..2772eb0b8ba7 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -213,7 +213,7 @@ const struct cpg_mssr_info r7s9210_cpg_mssr_info __initconst = {
.cpg_clk_register = rza2_cpg_clk_register,
/* RZ/A2 has Standby Control Registers */
- .stbyctrl = true,
+ .reg_layout = CLK_REG_LAYOUT_RZ_A,
};
static void __init r7s9210_cpg_mssr_early_init(struct device_node *np)
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index f2dc625b745d..75954ac1fb9b 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -286,8 +286,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.name = "uart_group_012",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_UART,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
- .dual.sel = ((0xec / 4) << 5) | 24,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
+ .dual.sel = ((0x34 / 4) << 5) | 30,
.dual.group = 0,
},
{
@@ -295,8 +295,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.name = "uart_group_34567",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_P2_PG,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
- .dual.sel = ((0x34 / 4) << 5) | 30,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
+ .dual.sel = ((0xec / 4) << 5) | 24,
.dual.group = 1,
},
D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
@@ -386,7 +386,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd,
int error;
int index;
- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
+ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++,
&clkspec)) {
if (clkspec.np != pd->dev.of_node)
continue;
@@ -399,7 +399,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd,
if (error)
return error;
}
- i++;
}
return 0;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 6f9612c169af..d0ccb52b0270 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -111,12 +111,12 @@ static const u16 srcr[] = {
* @rcdev: Optional reset controller entity
* @dev: CPG/MSSR device
* @base: CPG/MSSR register block base address
+ * @reg_layout: CPG/MSSR register layout
* @rmw_lock: protects RMW register accesses
* @np: Device node in DT for this CPG/MSSR module
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
- * @stbyctrl: This device has Standby Control Registers
* @notifiers: Notifier chain to save/restore clock state for system resume
* @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
* @smstpcr_saved[].val: Saved values of SMSTPCR[]
@@ -128,13 +128,13 @@ struct cpg_mssr_priv {
#endif
struct device *dev;
void __iomem *base;
+ enum clk_reg_layout reg_layout;
spinlock_t rmw_lock;
struct device_node *np;
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
- bool stbyctrl;
struct raw_notifier_head notifiers;
struct {
@@ -177,7 +177,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
enable ? "ON" : "OFF");
spin_lock_irqsave(&priv->rmw_lock, flags);
- if (priv->stbyctrl) {
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
value = readb(priv->base + STBCR(reg));
if (enable)
value &= ~bitmask;
@@ -199,7 +199,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
spin_unlock_irqrestore(&priv->rmw_lock, flags);
- if (!enable || priv->stbyctrl)
+ if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
return 0;
for (i = 1000; i > 0; --i) {
@@ -233,7 +233,7 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
struct cpg_mssr_priv *priv = clock->priv;
u32 value;
- if (priv->stbyctrl)
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
value = readb(priv->base + STBCR(clock->index / 32));
else
value = readl(priv->base + MSTPSR(clock->index / 32));
@@ -272,7 +272,7 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
case CPG_MOD:
type = "module";
- if (priv->stbyctrl) {
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
idx = MOD_CLK_PACK_10(clkidx);
range_check = 7 - (clkidx % 10);
} else {
@@ -800,7 +800,8 @@ static int cpg_mssr_suspend_noirq(struct device *dev)
/* Save module registers with bits under our control */
for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
if (priv->smstpcr_saved[reg].mask)
- priv->smstpcr_saved[reg].val = priv->stbyctrl ?
+ priv->smstpcr_saved[reg].val =
+ priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
readb(priv->base + STBCR(reg)) :
readl(priv->base + SMSTPCR(reg));
}
@@ -830,7 +831,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
if (!mask)
continue;
- if (priv->stbyctrl)
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
oldval = readb(priv->base + STBCR(reg));
else
oldval = readl(priv->base + SMSTPCR(reg));
@@ -839,7 +840,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
if (newval == oldval)
continue;
- if (priv->stbyctrl) {
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
writeb(newval, priv->base + STBCR(reg));
/* dummy read to ensure write has completed */
readb(priv->base + STBCR(reg));
@@ -861,8 +862,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
}
if (!i)
- dev_warn(dev, "Failed to enable %s%u[0x%x]\n",
- priv->stbyctrl ? "STB" : "SMSTP", reg,
+ dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
oldval & mask);
}
@@ -907,12 +907,11 @@ static int __init cpg_mssr_common_init(struct device *dev,
goto out_err;
}
- cpg_mssr_priv = priv;
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
- priv->stbyctrl = info->stbyctrl;
+ priv->reg_layout = info->reg_layout;
for (i = 0; i < nclks; i++)
priv->clks[i] = ERR_PTR(-ENOENT);
@@ -921,6 +920,8 @@ static int __init cpg_mssr_common_init(struct device *dev,
if (error)
goto out_err;
+ cpg_mssr_priv = priv;
+
return 0;
out_err:
@@ -990,7 +991,7 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
return error;
/* Reset Controller not supported for Standby Control SoCs */
- if (info->stbyctrl)
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
return 0;
error = cpg_mssr_reset_controller_register(priv);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 4ddcdf3bfb95..f05521b5fa6e 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -85,6 +85,11 @@ struct mssr_mod_clk {
struct device_node;
+enum clk_reg_layout {
+ CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3 = 0,
+ CLK_REG_LAYOUT_RZ_A,
+};
+
/**
* SoC-specific CPG/MSSR Description
*
@@ -105,6 +110,7 @@ struct device_node;
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
+ * @reg_layout: CPG/MSSR register layout from enum clk_reg_layout
*
* @core_pm_clks: Array with IDs of Core Clocks that are suitable for Power
* Management, in addition to Module Clocks
@@ -112,10 +118,6 @@ struct device_node;
*
* @init: Optional callback to perform SoC-specific initialization
* @cpg_clk_register: Optional callback to handle special Core Clock types
- *
- * @stbyctrl: This device has Standby Control Registers which are 8-bits
- * wide, no status registers (MSTPSR) and have different address
- * offsets.
*/
struct cpg_mssr_info {
@@ -130,7 +132,7 @@ struct cpg_mssr_info {
unsigned int num_core_clks;
unsigned int last_dt_core_clk;
unsigned int num_total_core_clks;
- bool stbyctrl;
+ enum clk_reg_layout reg_layout;
/* Module Clocks */
const struct mssr_mod_clk *mod_clks;
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 198417d56300..aa8a299ff704 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -963,6 +963,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
return mux_clk;
err_pll:
+ kfree(pll->rate_table);
clk_unregister(mux_clk);
mux_clk = pll_clk;
err_mux:
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 4b1122e98e16..ddfe1c402e80 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -489,7 +489,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
- GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 77aebfb1d6d5..730020fcc7fe 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -751,6 +751,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"pclk_peri",
"hclk_cpubus",
"hclk_vio_bus",
+ "sclk_mac_lbtest",
};
static struct rockchip_clk_provider *__init rk3188_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index ce1d2446f142..1a75cc9f68ef 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -1259,7 +1259,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKSEL_CON(56), 6, 2, MFLAGS,
RK3399_CLKGATE_CON(10), 7, GFLAGS),
- COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, 0,
+ COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, CLK_SET_RATE_PARENT,
RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS),
/* gic */
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index ac70ad785d8e..33df20f813d5 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -1390,6 +1390,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
if (ret) {
pr_err("%s: failed to register pll clock %s : %d\n",
__func__, pll_clk->name, ret);
+ kfree(pll->rate_table);
kfree(pll);
return;
}
diff --git a/drivers/clk/si5324.h b/drivers/clk/si5324.h
new file mode 100644
index 000000000000..b3826e7b2f84
--- /dev/null
+++ b/drivers/clk/si5324.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock generator platform data
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_SI5324_H__
+#define __LINUX_PLATFORM_DATA_SI5324_H__
+
+/**
+ * enum si5324_pll_src - Si5324 pll clock source
+ *
+ * @SI5324_PLL_SRC_DEFAULT: Default, do not change eeprom config
+ * @SI5324_PLL_SRC_XTAL: Pll source clock is XTAL input
+ * @SI5324_PLL_SRC_CLKIN1: Pll source clock is CLKIN1 input
+ * @SI5324_PLL_SRC_CLKIN2: Pll source clock is CLKIN2 input
+ *
+ * Defines enums for clock sources.
+ */
+enum si5324_pll_src {
+ SI5324_PLL_SRC_XTAL = 0,
+ SI5324_PLL_SRC_CLKIN1 = 1,
+ SI5324_PLL_SRC_CLKIN2 = 2,
+};
+
+/**
+ * enum si5324_drive_strength - Si5324 clock output drive strength
+ *
+ * @SI5324_DRIVE_DEFAULT: Default, do not change eeprom config
+ * @SI5324_DRIVE_2MA: 2mA clock output drive strength
+ * @SI5324_DRIVE_4MA: 4mA clock output drive strength
+ * @SI5324_DRIVE_6MA: 6mA clock output drive strength
+ * @SI5324_DRIVE_8MA: 8mA clock output drive strength
+ *
+ * Defines enums for drive strength
+ */
+enum si5324_drive_strength {
+ SI5324_DRIVE_DEFAULT = 0,
+ SI5324_DRIVE_2MA = 2,
+ SI5324_DRIVE_4MA = 4,
+ SI5324_DRIVE_6MA = 6,
+ SI5324_DRIVE_8MA = 8,
+};
+
+/**
+ * struct si5324_clkout_config - Si5324 clock output configuration
+ *
+ * @drive: output drive strength
+ * @rate: clkout rate
+ */
+struct si5324_clkout_config {
+ enum si5324_drive_strength drive;
+ unsigned long rate;
+};
+
+/**
+ * struct si5324_platform_data - Platform data for the Si5324 clock driver
+ *
+ * @pll_src: Pll source clock setting
+ * @clkout: Array of clkout configuration
+ */
+struct si5324_platform_data {
+ enum si5324_pll_src pll_src;
+ struct si5324_clkout_config clkout[2];
+};
+
+#endif
diff --git a/drivers/clk/si5324drv.c b/drivers/clk/si5324drv.c
new file mode 100644
index 000000000000..5c064a329e73
--- /dev/null
+++ b/drivers/clk/si5324drv.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <vgannava.xilinx.com>
+ * Leon Woestenberg <leon@sidebranch.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include "si5324drv.h"
+
+/**
+ * si5324_rate_approx - Find closest rational approximation N2_LS/N3 fraction.
+ *
+ * @f: Holds the N2_LS/N3 fraction in 36.28 fixed point notation.
+ * @md: Holds the maximum denominator (N3) value allowed.
+ * @num: Store the numinator (N2_LS) found.
+ * @denom: Store the denominator (N3) found.
+ *
+ * This function finds the closest rational approximation.
+ * It allows only n/1 solution and as a part of the calculation
+ * multiply fraction until no digits after the decimal point and
+ * continued fraction and check denominator at each step.
+ */
+void si5324_rate_approx(u64 f, u64 md, u32 *num, u32 *denom)
+{
+ u64 a, h[3] = { 0, 1, 0 }, k[3] = { 1, 0, 0 };
+ u64 x, d, m, n = 1;
+ int i = 0;
+
+ if (md <= 1) {
+ *denom = 1;
+ *num = (u32)(f >> 28);
+ return;
+ }
+
+ n <<= 28;
+ for (i = 0; i < 28; i++) {
+ if ((f & 0x1) == 0) {
+ n >>= 1;
+ f >>= 1;
+ } else {
+ break;
+ }
+ }
+ d = f;
+
+ for (i = 0; i < 64; i++) {
+ a = n ? (div64_u64(d, n)) : 0;
+ if (i && !a)
+ break;
+ x = d;
+ d = n;
+ div64_u64_rem(x, n, &m);
+ n = m;
+ x = a;
+ if (k[1] * a + k[0] >= md) {
+ x = div64_u64((md - k[0]), k[1]);
+ if (x * 2 >= a || k[1] >= md)
+ i = 65;
+ else
+ break;
+ }
+ h[2] = x * h[1] + h[0];
+ h[0] = h[1];
+ h[1] = h[2];
+ k[2] = x * k[1] + k[0];
+ k[0] = k[1];
+ k[1] = k[2];
+ }
+
+ *denom = (u32)k[1];
+ *num = (u32)h[1];
+}
+
+/**
+ * si5324_find_n2ls - Search through the possible settings for the N2_LS.
+ *
+ * @settings: Holds the settings up till now.
+ *
+ * This function finds the best setting for N2_LS and N3n with the values
+ * for N1_HS, NCn_LS, and N2_HS.
+ *
+ * Return: 1 when the best possible result has been found, 0 on failure.
+ */
+static int si5324_find_n2ls(struct si5324_settingst *settings)
+{
+ u32 result = 0;
+ u64 f3_actual;
+ u64 fosc_actual;
+ u64 fout_actual;
+ u64 delta_fout;
+ u64 n2_ls_div_n3, mult_res;
+ u32 mult;
+
+ n2_ls_div_n3 = div64_u64(div64_u64(div64_u64(settings->fosc,
+ (settings->fin >> SI5324_FIN_FOUT_SHIFT)),
+ (u64)settings->n2_hs), (u64)2);
+
+ si5324_rate_approx(n2_ls_div_n3, settings->n31_max, &settings->n2_ls,
+ &settings->n31);
+ settings->n2_ls *= 2;
+
+ if (settings->n2_ls < settings->n2_ls_min) {
+ mult = div64_u64(settings->n2_ls_min, settings->n2_ls);
+ div64_u64_rem(settings->n2_ls_min, settings->n2_ls, &mult_res);
+ mult = mult_res ? mult + 1 : mult;
+ settings->n2_ls *= mult;
+ settings->n31 *= mult;
+ }
+
+ if (settings->n31 < settings->n31_min) {
+ mult = div64_u64(settings->n31_min, settings->n31);
+ div64_u64_rem(settings->n31_min, settings->n31, &mult_res);
+ mult = mult_res ? mult + 1 : mult;
+ settings->n2_ls *= mult;
+ settings->n31 *= mult;
+ }
+ pr_debug("Trying N2_LS = %d N3 = %d.\n", settings->n2_ls,
+ settings->n31);
+
+ if (settings->n2_ls < settings->n2_ls_min ||
+ settings->n2_ls > settings->n2_ls_max) {
+ pr_info("N2_LS out of range.\n");
+ } else if ((settings->n31 < settings->n31_min) ||
+ (settings->n31 > settings->n31_max)) {
+ pr_info("N3 out of range.\n");
+ } else {
+ f3_actual = div64_u64(settings->fin, settings->n31);
+ fosc_actual = f3_actual * settings->n2_hs * settings->n2_ls;
+ fout_actual = div64_u64(fosc_actual,
+ (settings->n1_hs * settings->nc1_ls));
+ delta_fout = fout_actual - settings->fout;
+
+ if ((f3_actual < ((u64)SI5324_F3_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (f3_actual > ((u64)SI5324_F3_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("F3 frequency out of range.\n");
+ } else if ((fosc_actual < ((u64)SI5324_FOSC_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (fosc_actual > ((u64)SI5324_FOSC_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("Fosc frequency out of range.\n");
+ } else if ((fout_actual < ((u64)SI5324_FOUT_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (fout_actual > ((u64)SI5324_FOUT_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("Fout frequency out of range.\n");
+ } else {
+ pr_debug("Found solution: fout = %dHz delta = %dHz.\n",
+ (u32)(fout_actual >> SI5324_FIN_FOUT_SHIFT),
+ (u32)(delta_fout >> SI5324_FIN_FOUT_SHIFT));
+ pr_debug("fosc = %dkHz f3 = %dHz.\n",
+ (u32)((fosc_actual >> SI5324_FIN_FOUT_SHIFT) /
+ 1000),
+ (u32)(f3_actual >> SI5324_FIN_FOUT_SHIFT));
+
+ if (((u64)abs(delta_fout)) <
+ settings->best_delta_fout) {
+ settings->best_n1_hs = settings->n1_hs;
+ settings->best_nc1_ls = settings->nc1_ls;
+ settings->best_n2_hs = settings->n2_hs;
+ settings->best_n2_ls = settings->n2_ls;
+ settings->best_n3 = settings->n31;
+ settings->best_fout = fout_actual;
+ settings->best_delta_fout = abs(delta_fout);
+ if (delta_fout == 0)
+ result = 1;
+ }
+ }
+ }
+ return result;
+}
+
+/**
+ * si5324_find_n2 - Find a valid setting for N2_HS and N2_LS.
+ *
+ * @settings: Holds the settings up till now.
+ *
+ * This function finds a valid settings for N2_HS and N2_LS. Iterates over
+ * all possibilities of N2_HS and then performs a binary search over the
+ * N2_LS values.
+ *
+ * Return: 1 when the best possible result has been found.
+ */
+static int si5324_find_n2(struct si5324_settingst *settings)
+{
+ u32 result = 0;
+
+ for (settings->n2_hs = SI5324_N2_HS_MAX; settings->n2_hs >=
+ SI5324_N2_HS_MIN; settings->n2_hs--) {
+ pr_debug("Trying N2_HS = %d.\n", settings->n2_hs);
+ settings->n2_ls_min = (u32)(div64_u64(settings->fosc,
+ ((u64)(SI5324_F3_MAX * settings->n2_hs)
+ << SI5324_FIN_FOUT_SHIFT)));
+
+ if (settings->n2_ls_min < SI5324_N2_LS_MIN)
+ settings->n2_ls_min = SI5324_N2_LS_MIN;
+
+ settings->n2_ls_max = (u32)(div64_u64(settings->fosc,
+ ((u64)(SI5324_F3_MIN *
+ settings->n2_hs) <<
+ SI5324_FIN_FOUT_SHIFT)));
+ if (settings->n2_ls_max > SI5324_N2_LS_MAX)
+ settings->n2_ls_max = SI5324_N2_LS_MAX;
+
+ result = si5324_find_n2ls(settings);
+ if (result)
+ break;
+ }
+ return result;
+}
+
+/**
+ * si5324_calc_ncls_limits - Calculates the valid range for NCn_LS.
+ *
+ * @settings: Holds the input and output frequencies and the setting
+ * for N1_HS.
+ *
+ * This function calculates the valid range for NCn_LS with the value
+ * for the output frequency and N1_HS already set in settings.
+ *
+ * Return: -1 when there are no valid settings, 0 otherwise.
+ */
+int si5324_calc_ncls_limits(struct si5324_settingst *settings)
+{
+ settings->nc1_ls_min = div64_u64(settings->n1_hs_min,
+ settings->n1_hs);
+
+ if (settings->nc1_ls_min < SI5324_NC_LS_MIN)
+ settings->nc1_ls_min = SI5324_NC_LS_MIN;
+ if (settings->nc1_ls_min > 1 && (settings->nc1_ls_min & 0x1) == 1)
+ settings->nc1_ls_min++;
+ settings->nc1_ls_max = div64_u64(settings->n1_hs_max, settings->n1_hs);
+
+ if (settings->nc1_ls_max > SI5324_NC_LS_MAX)
+ settings->nc1_ls_max = SI5324_NC_LS_MAX;
+
+ if ((settings->nc1_ls_max & 0x1) == 1)
+ settings->nc1_ls_max--;
+ if ((settings->nc1_ls_max * settings->n1_hs < settings->n1_hs_min) ||
+ (settings->nc1_ls_min * settings->n1_hs > settings->n1_hs_max))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * si5324_find_ncls - Find a valid setting for NCn_LS
+ *
+ * @settings: Holds the input and output frequencies, the setting for
+ * N1_HS, and the limits for NCn_LS.
+ *
+ * This function find a valid setting for NCn_LS that can deliver the correct
+ * output frequency. Assumes that the valid range is relatively small
+ * so a full search can be done (should be true for video clock frequencies).
+ *
+ * Return: 1 when the best possible result has been found.
+ */
+static int si5324_find_ncls(struct si5324_settingst *settings)
+{
+ u64 fosc_1;
+ u32 result;
+
+ fosc_1 = settings->fout * settings->n1_hs;
+ for (settings->nc1_ls = settings->nc1_ls_min;
+ settings->nc1_ls <= settings->nc1_ls_max;) {
+ settings->fosc = fosc_1 * settings->nc1_ls;
+ pr_debug("Trying NCn_LS = %d: fosc = %dkHz.\n",
+ settings->nc1_ls,
+ (u32)(div64_u64((settings->fosc >>
+ SI5324_FIN_FOUT_SHIFT), 1000)));
+
+ result = si5324_find_n2(settings);
+ if (result)
+ break;
+ if (settings->nc1_ls == 1)
+ settings->nc1_ls++;
+ else
+ settings->nc1_ls += 2;
+ }
+ return result;
+}
+
+/**
+ * si5324_calcfreqsettings - Calculate the frequency settings
+ *
+ * @clkinfreq: Frequency of the input clock.
+ * @clkoutfreq: Desired output clock frequency.
+ * @clkactual: Actual clock frequency.
+ * @n1_hs: Set to the value for the N1_HS register.
+ * @ncn_ls: Set to the value for the NCn_LS register.
+ * @n2_hs: Set to the value for the N2_HS register.
+ * @n2_ls: Set to the value for the N2_LS register.
+ * @n3n: Set to the value for the N3n register.
+ * @bwsel: Set to the value for the BW_SEL register.
+ *
+ * This funciton calculates the frequency settings for the desired output
+ * frequency.
+ *
+ * Return: SI5324_SUCCESS for success, SI5324_ERR_FREQ when the
+ * requested frequency cannot be generated.
+ */
+int si5324_calcfreqsettings(u32 clkinfreq, u32 clkoutfreq, u32 *clkactual,
+ u8 *n1_hs, u32 *ncn_ls, u8 *n2_hs, u32 *n2_ls,
+ u32 *n3n, u8 *bwsel)
+{
+ struct si5324_settingst settings;
+ int result;
+
+ settings.fin = (u64)clkinfreq << SI5324_FIN_FOUT_SHIFT;
+ settings.fout = (u64)clkoutfreq << SI5324_FIN_FOUT_SHIFT;
+ settings.best_delta_fout = settings.fout;
+
+ settings.n1_hs_min = (int)(div64_u64(SI5324_FOSC_MIN, clkoutfreq));
+ if (settings.n1_hs_min < SI5324_N1_HS_MIN * SI5324_NC_LS_MIN)
+ settings.n1_hs_min = SI5324_N1_HS_MIN * SI5324_NC_LS_MIN;
+
+ settings.n1_hs_max = (int)(div64_u64(SI5324_FOSC_MAX, clkoutfreq));
+ if (settings.n1_hs_max > SI5324_N1_HS_MAX * SI5324_NC_LS_MAX)
+ settings.n1_hs_max = SI5324_N1_HS_MAX * SI5324_NC_LS_MAX;
+
+ settings.n31_min = div64_u64(clkinfreq, SI5324_F3_MAX);
+ if (settings.n31_min < SI5324_N3_MIN)
+ settings.n31_min = SI5324_N3_MIN;
+
+ settings.n31_max = div64_u64(clkinfreq, SI5324_F3_MIN);
+ if (settings.n31_max > SI5324_N3_MAX)
+ settings.n31_max = SI5324_N3_MAX;
+
+ /* Find a valid oscillator frequency with the highest setting of N1_HS
+ * possible (reduces power)
+ */
+ for (settings.n1_hs = SI5324_N1_HS_MAX;
+ settings.n1_hs >= SI5324_N1_HS_MIN; settings.n1_hs--) {
+ pr_debug("Trying N1_HS = %d.\n", settings.n1_hs);
+
+ result = si5324_calc_ncls_limits(&settings);
+ if (result) {
+ pr_debug("No valid settings\n");
+ continue;
+ }
+ result = si5324_find_ncls(&settings);
+ if (result)
+ break;
+ }
+
+ pr_debug("Si5324: settings.best_delta_fout = %llu\n",
+ (unsigned long long)settings.best_delta_fout);
+ pr_debug("Si5324: settings.fout = %llu\n",
+ (unsigned long long)settings.fout);
+
+ if (settings.best_delta_fout == settings.fout) {
+ pr_debug("Si5324: No valid settings found.");
+ return SI5324_ERR_FREQ;
+ }
+ pr_debug("Si5324: Found solution: fout = %dHz.\n",
+ (u32)(settings.best_fout >> 28));
+
+ /* Post processing: convert temporary values to actual registers */
+ *n1_hs = (u8)settings.best_n1_hs - 4;
+ *ncn_ls = settings.best_nc1_ls - 1;
+ *n2_hs = (u8)settings.best_n2_hs - 4;
+ *n2_ls = settings.best_n2_ls - 1;
+ *n3n = settings.best_n3 - 1;
+ /*
+ * How must the bandwidth selection be determined?
+ * Not all settings will be valid.
+ * refclk 2, 0xA2, BWSEL_REG=1010 (?)
+ * free running 2, 0x42, BWSEL_REG=0100 (?)
+ */
+ *bwsel = 6;
+
+ if (clkactual)
+ *clkactual = (settings.best_fout >> SI5324_FIN_FOUT_SHIFT);
+
+ return SI5324_SUCCESS;
+}
diff --git a/drivers/clk/si5324drv.h b/drivers/clk/si5324drv.h
new file mode 100644
index 000000000000..28ea3050d5fb
--- /dev/null
+++ b/drivers/clk/si5324drv.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ */
+
+#ifndef SI5324DRV_H_
+#define SI5324DRV_H_
+
+#include <linux/types.h>
+
+/******************************************************************************
+ * User settable defines that depend on the specific board design.
+ * The defaults are for the Xilinx KC705 board.
+ *****************************************************************************/
+
+#define SI5324_XTAL_FREQ 114285000UL
+
+/******************************************************************************
+ * Defines independent on the specific board design. Should not be changed.
+ *****************************************************************************/
+
+#define SI5324_SUCCESS 0 /*< Operation was successful */
+#define SI5324_ERR_IIC -1 /*< IIC error occurred */
+#define SI5324_ERR_FREQ -2 /*< Could not calculate frequency setting */
+#define SI5324_ERR_PARM -3 /*< Invalid parameter */
+
+#define SI5324_CLKSRC_CLK1 1 /*< Use clock input 1 */
+#define SI5324_CLKSRC_CLK2 2 /*< Use clock input 2 */
+#define SI5324_CLKSRC_XTAL 3 /*< Use crystal (free running mode) */
+
+#define SI5324_FOSC_MIN 4850000000UL /*< Min oscillator frequency */
+#define SI5324_FOSC_MAX 5670000000UL /*< Max oscillator frequency */
+#define SI5324_F3_MIN 10000 /*< Min phase detector frequency */
+#define SI5324_F3_MAX 2000000 /*< Max phase detector frequency */
+#define SI5324_FIN_MIN 2000 /*< Min input frequency */
+#define SI5324_FIN_MAX 710000000UL /*< Max input frequency */
+#define SI5324_FOUT_MIN 2000 /*< Min output frequency */
+#define SI5324_FOUT_MAX 945000000UL /*< Max output frequency */
+
+#define SI5324_N1_HS_MIN 6
+#define SI5324_N1_HS_MAX 11
+#define SI5324_NC_LS_MIN 1
+#define SI5324_NC_LS_MAX 0x100000
+#define SI5324_N2_HS_MIN 4
+#define SI5324_N2_HS_MAX 11
+#define SI5324_N2_LS_MIN 2 /* even values only */
+#define SI5324_N2_LS_MAX 0x100000
+#define SI5324_N3_MIN 1
+#define SI5324_N3_MAX 0x080000
+#define SI5324_FIN_FOUT_SHIFT 28
+
+struct si5324_settingst {
+ /* high-speed output divider */
+ u32 n1_hs_min;
+ u32 n1_hs_max;
+ u32 n1_hs;
+
+ /* low-speed output divider for clkout1 */
+ u32 nc1_ls_min;
+ u32 nc1_ls_max;
+ u32 nc1_ls;
+
+ /* low-speed output divider for clkout2 */
+ u32 nc2_ls_min;
+ u32 nc2_ls_max;
+ u32 nc2_ls;
+
+ /* high-speed feedback divider (PLL multiplier) */
+ u32 n2_hs;
+ /* low-speed feedback divider (PLL multiplier) */
+ u32 n2_ls_min;
+ u32 n2_ls_max;
+ u32 n2_ls;
+
+ /* input divider for clk1 */
+ u32 n31_min;
+ u32 n31_max;
+ u32 n31;
+
+ u64 fin;
+ u64 fout;
+ u64 fosc;
+ u64 best_delta_fout;
+ u64 best_fout;
+ u32 best_n1_hs;
+ u32 best_nc1_ls;
+ u32 best_n2_hs;
+ u32 best_n2_ls;
+ u32 best_n3;
+};
+
+int si5324_calcfreqsettings(u32 clkinfreq, u32 clkoutfreq, u32 *clkactual,
+ u8 *n1_hs, u32 *ncn_ls, u8 *n2_hs,
+ u32 *n2_ls, u32 *n3n, u8 *bwsel);
+void si5324_rate_approx(u64 f, u64 md, u32 *num, u32 *denom);
+int si5324_calc_ncls_limits(struct si5324_settingst *settings);
+
+#endif /* SI5324DRV_H_ */
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index cf94a12459ea..ee2a2d284113 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -174,21 +174,24 @@ void __init socfpga_gate_init(struct device_node *node)
u32 div_reg[3];
u32 clk_phase[2];
u32 fixed_div;
- struct clk *clk;
+ struct clk_hw *hw_clk;
struct socfpga_gate_clk *socfpga_clk;
const char *clk_name = node->name;
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
struct clk_ops *ops;
int rc;
+ int err;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (WARN_ON(!socfpga_clk))
return;
ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
- if (WARN_ON(!ops))
+ if (WARN_ON(!ops)) {
+ kfree(socfpga_clk);
return;
+ }
rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
if (rc)
@@ -238,12 +241,15 @@ void __init socfpga_gate_init(struct device_node *node)
init.parent_names = parent_name;
socfpga_clk->hw.hw.init = &init;
- clk = clk_register(NULL, &socfpga_clk->hw.hw);
- if (WARN_ON(IS_ERR(clk))) {
+ hw_clk = &socfpga_clk->hw.hw;
+
+ err = clk_hw_register(NULL, hw_clk);
+ if (err) {
+ kfree(ops);
kfree(socfpga_clk);
return;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
if (WARN_ON(rc))
return;
}
diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c
index 5e0c4b45f77f..43707e2d7248 100644
--- a/drivers/clk/socfpga/clk-periph.c
+++ b/drivers/clk/socfpga/clk-periph.c
@@ -51,7 +51,7 @@ static __init void __socfpga_periph_init(struct device_node *node,
const struct clk_ops *ops)
{
u32 reg;
- struct clk *clk;
+ struct clk_hw *hw_clk;
struct socfpga_periph_clk *periph_clk;
const char *clk_name = node->name;
const char *parent_name[SOCFPGA_MAX_PARENTS];
@@ -94,13 +94,13 @@ static __init void __socfpga_periph_init(struct device_node *node,
init.parent_names = parent_name;
periph_clk->hw.hw.init = &init;
+ hw_clk = &periph_clk->hw.hw;
- clk = clk_register(NULL, &periph_clk->hw.hw);
- if (WARN_ON(IS_ERR(clk))) {
+ if (clk_hw_register(NULL, hw_clk)) {
kfree(periph_clk);
return;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
}
void __init socfpga_periph_init(struct device_node *node)
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index dc65cc0fd3bd..004e196492c4 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -70,17 +70,18 @@ static struct clk_ops clk_pll_ops = {
.get_parent = clk_pll_get_parent,
};
-static __init struct clk *__socfpga_pll_init(struct device_node *node,
+static __init struct clk_hw *__socfpga_pll_init(struct device_node *node,
const struct clk_ops *ops)
{
u32 reg;
- struct clk *clk;
+ struct clk_hw *hw_clk;
struct socfpga_pll *pll_clk;
const char *clk_name = node->name;
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
struct device_node *clkmgr_np;
int rc;
+ int err;
of_property_read_u32(node, "reg", &reg);
@@ -108,13 +109,15 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
clk_pll_ops.enable = clk_gate_ops.enable;
clk_pll_ops.disable = clk_gate_ops.disable;
- clk = clk_register(NULL, &pll_clk->hw.hw);
- if (WARN_ON(IS_ERR(clk))) {
+ hw_clk = &pll_clk->hw.hw;
+
+ err = clk_hw_register(NULL, hw_clk);
+ if (err) {
kfree(pll_clk);
- return NULL;
+ return ERR_PTR(err);
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
- return clk;
+ rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
+ return hw_clk;
}
void __init socfpga_pll_init(struct device_node *node)
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index a156bd0c6af7..9eff05386ef9 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -943,9 +943,10 @@ static void __init st_of_quadfs_setup(struct device_node *np,
clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, data,
reg, lock);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ kfree(lock);
goto err_exit;
- else
+ } else
pr_debug("%s: parent %s rate %u\n",
__clk_get_name(clk),
__clk_get_name(clk_get_parent(clk)),
diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
index de33414fc5c2..c6a6ce98ca03 100644
--- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
@@ -43,7 +43,7 @@ int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode)
EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode);
/**
- * sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode
+ * sunxi_ccu_get_mmc_timing_mode: Get the current MMC clock timing mode
* @clk: clock to query
*
* Returns 0 if the clock is in old timing mode, > 0 if it is in
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index a66263b6490d..00845044c98e 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -159,7 +159,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw,
err = tegra_bpmp_clk_transfer(clk->bpmp, &msg);
if (err < 0)
- return err;
+ return 0;
return response.rate;
}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 0c1b83bedb73..eb2411a4cd78 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -459,6 +459,7 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
err = load_one_timing_from_dt(tegra, timing, child);
if (err) {
of_node_put(child);
+ kfree(tegra->timings);
return err;
}
@@ -510,6 +511,7 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
err = load_timings_from_dt(tegra, node, node_ram_code);
if (err) {
of_node_put(node);
+ kfree(tegra);
return ERR_PTR(err);
}
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 4efcaaf51b3a..586b091651dd 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1337,6 +1337,7 @@ static void __init tegra114_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index bcd871134f45..3f74497d73e5 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -18,24 +18,24 @@
#define MISC_CLK_ENB 0x48
#define OSC_CTRL 0x50
-#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
-#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
-#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
-#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
-#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
-#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
-
-#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
-#define OSC_CTRL_PLL_REF_DIV_1 (0<<28)
-#define OSC_CTRL_PLL_REF_DIV_2 (1<<28)
-#define OSC_CTRL_PLL_REF_DIV_4 (2<<28)
+#define OSC_CTRL_OSC_FREQ_MASK (3u<<30)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0u<<30)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (1u<<30)
+#define OSC_CTRL_OSC_FREQ_12MHZ (2u<<30)
+#define OSC_CTRL_OSC_FREQ_26MHZ (3u<<30)
+#define OSC_CTRL_MASK (0x3f2u | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3u<<28)
+#define OSC_CTRL_PLL_REF_DIV_1 (0u<<28)
+#define OSC_CTRL_PLL_REF_DIV_2 (1u<<28)
+#define OSC_CTRL_PLL_REF_DIV_4 (2u<<28)
#define OSC_FREQ_DET 0x58
-#define OSC_FREQ_DET_TRIG (1<<31)
+#define OSC_FREQ_DET_TRIG (1u<<31)
#define OSC_FREQ_DET_STATUS 0x5c
-#define OSC_FREQ_DET_BUSY (1<<31)
-#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+#define OSC_FREQ_DET_BUSYu (1<<31)
+#define OSC_FREQ_DET_CNT_MASK 0xFFFFu
#define TEGRA20_CLK_PERIPH_BANKS 3
@@ -1151,6 +1151,7 @@ static void __init tegra20_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
BUG();
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index df172d5772d7..34155b5b994d 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3523,6 +3523,7 @@ static void __init tegra210_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index f65e16c4f3c4..62ea790d79f9 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -252,14 +252,16 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
if (rc) {
pr_err("%s: failed to lookup atl clock %d\n", __func__,
i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto pm_put;
}
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
pr_err("%s: failed to get atl clock %d from provider\n",
__func__, i);
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto pm_put;
}
cdesc = to_atl_desc(__clk_get_hw(clk));
@@ -292,8 +294,9 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
if (cdesc->enabled)
atl_clk_enable(__clk_get_hw(clk));
}
- pm_runtime_put_sync(cinfo->dev);
+pm_put:
+ pm_runtime_put_sync(cinfo->dev);
return ret;
}
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index ffbb9008c1c9..f7cde869c7f7 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -37,6 +37,7 @@ static void __iomem *zynq_clkc_base;
#define SLCR_CAN_MIOCLK_CTRL (zynq_clkc_base + 0x60)
#define SLCR_DBG_CLK_CTRL (zynq_clkc_base + 0x64)
#define SLCR_PCAP_CLK_CTRL (zynq_clkc_base + 0x68)
+#define SLCR_TOPSW_CLK_CTRL (zynq_clkc_base + 0x6c)
#define SLCR_FPGA0_CLK_CTRL (zynq_clkc_base + 0x70)
#define SLCR_621_TRUE (zynq_clkc_base + 0xc4)
#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204)
@@ -99,6 +100,48 @@ static const char *const gem1_emio_input_names[] __initconst = {
static const char *const swdt_ext_clk_input_names[] __initconst = {
"swdt_ext_clk"};
+#ifdef CONFIG_SUSPEND
+static struct clk *iopll_save_parent;
+
+#define TOPSW_CLK_CTRL_DIS_MASK BIT(0)
+
+int zynq_clk_suspend_early(void)
+{
+ int ret;
+
+ iopll_save_parent = clk_get_parent(clks[iopll]);
+
+ ret = clk_set_parent(clks[iopll], ps_clk);
+ if (ret)
+ pr_info("%s: reparent iopll failed %d\n", __func__, ret);
+
+ return 0;
+}
+
+void zynq_clk_resume_late(void)
+{
+ clk_set_parent(clks[iopll], iopll_save_parent);
+}
+
+void zynq_clk_topswitch_enable(void)
+{
+ u32 reg;
+
+ reg = readl(SLCR_TOPSW_CLK_CTRL);
+ reg &= ~TOPSW_CLK_CTRL_DIS_MASK;
+ writel(reg, SLCR_TOPSW_CLK_CTRL);
+}
+
+void zynq_clk_topswitch_disable(void)
+{
+ u32 reg;
+
+ reg = readl(SLCR_TOPSW_CLK_CTRL);
+ reg |= TOPSW_CLK_CTRL_DIS_MASK;
+ writel(reg, SLCR_TOPSW_CLK_CTRL);
+}
+#endif
+
static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
const char *clk_name, void __iomem *fclk_ctrl_reg,
const char **parents, int enable)
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 0af8f74c5fa5..880ea34c0038 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -85,7 +85,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
static const struct clk_ops zynqmp_clk_mux_ops = {
.get_parent = zynqmp_clk_mux_get_parent,
.set_parent = zynqmp_clk_mux_set_parent,
- .determine_rate = __clk_mux_determine_rate,
+ .determine_rate = __clk_mux_determine_rate_closest,
};
static const struct clk_ops zynqmp_clk_mux_ro_ops = {
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 6f057ab9df03..06cd4401e4f1 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -34,6 +34,8 @@
#define END_OF_PARENTS 1
#define RESERVED_CLK_NAME ""
+#define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
+#define CLK_TYPE_FLAG_BITS 8
#define CLK_GET_NAME_RESP_LEN 16
#define CLK_GET_TOPOLOGY_RESP_WORDS 3
#define CLK_GET_PARENTS_RESP_WORDS 3
@@ -396,6 +398,9 @@ static int __zynqmp_clock_get_topology(struct clock_topology *topology,
topology[*nnodes].type_flag =
FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS,
response->topology[i]);
+ topology[*nnodes].type_flag |=
+ FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, response->topology[i]) <<
+ CLK_TYPE_FLAG_BITS;
(*nnodes)++;
}
@@ -679,6 +684,13 @@ static void zynqmp_get_clock_info(void)
FIELD_PREP(CLK_ATTR_NODE_INDEX, i);
zynqmp_pm_clock_get_name(clock[i].clk_id, &name);
+
+ /*
+ * Terminate with NULL character in case name provided by firmware
+ * is longer and truncated due to size limit.
+ */
+ name.name[sizeof(name.name) - 1] = '\0';
+
if (!strcmp(name.name, RESERVED_CLK_NAME))
continue;
strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN);
@@ -752,6 +764,7 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
static const struct of_device_id zynqmp_clock_of_match[] = {
{.compatible = "xlnx,zynqmp-clk"},
+ {.compatible = "xlnx,versal-clk"},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_clock_of_match);
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index d8f5b70d2709..cb118775778a 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -25,7 +25,7 @@
#define to_zynqmp_clk_divider(_hw) \
container_of(_hw, struct zynqmp_clk_divider, hw)
-#define CLK_FRAC BIT(13) /* has a fractional parent */
+#define CLK_FRAC BIT(8) /* has a fractional parent */
/**
* struct zynqmp_clk_divider - adjustable divider clock
@@ -34,19 +34,38 @@
* @is_frac: The divider is a fractional divider
* @clk_id: Id of clock
* @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
+ * @max_div: Maximum supported divisor
*/
struct zynqmp_clk_divider {
struct clk_hw hw;
- u8 flags;
+ u16 flags;
bool is_frac;
u32 clk_id;
u32 div_type;
+ u32 max_div;
};
static inline int zynqmp_divider_get_val(unsigned long parent_rate,
- unsigned long rate)
+ unsigned long rate, u16 flags)
{
- return DIV_ROUND_CLOSEST(parent_rate, rate);
+ int up, down;
+ unsigned long up_rate, down_rate;
+
+ if (flags & CLK_DIVIDER_POWER_OF_TWO) {
+ up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ down = parent_rate / rate;
+
+ up = __roundup_pow_of_two(up);
+ down = __rounddown_pow_of_two(down);
+
+ up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
+ down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
+
+ return (rate - up_rate) <= (down_rate - rate) ? up : down;
+
+ } else {
+ return DIV_ROUND_CLOSEST(parent_rate, rate);
+ }
}
/**
@@ -78,6 +97,9 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
else
value = div >> 16;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ value = 1 << value;
+
if (!value) {
WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
@@ -88,6 +110,34 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL(parent_rate, value);
}
+static void zynqmp_compute_divider(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u32 max_div,
+ int *bestdiv)
+{
+ int div1;
+ int div2;
+ long error = LONG_MAX;
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ struct zynqmp_clk_divider *pdivider = to_zynqmp_clk_divider(parent_hw);
+
+ if (!pdivider)
+ return;
+
+ *bestdiv = 1;
+ for (div1 = 1; div1 <= pdivider->max_div; div1++) {
+ for (div2 = 1; div2 <= max_div; div2++) {
+ long new_error = ((parent_rate / div1) / div2) - rate;
+
+ if (abs(new_error) < abs(error)) {
+ *bestdiv = div2;
+ error = new_error;
+ }
+ }
+ }
+}
+
/**
* zynqmp_clk_divider_round_rate() - Round rate of divider clock
* @hw: handle between common and hardware-specific interfaces
@@ -120,13 +170,29 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
else
bestdiv = bestdiv >> 16;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ bestdiv = 1 << bestdiv;
+
return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
}
- bestdiv = zynqmp_divider_get_val(*prate, rate);
+ bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
+
+ /*
+ * In case of two divisors, compute best divider values and return
+ * divider2 value based on compute value. div1 will be automatically
+ * set to optimum based on required total divider value.
+ */
+ if (div_type == TYPE_DIV2 &&
+ (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ zynqmp_compute_divider(hw, rate, *prate,
+ divider->max_div, &bestdiv);
+ }
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
bestdiv = rate % *prate ? 1 : bestdiv;
+
+ bestdiv = min_t(u32, bestdiv, divider->max_div);
*prate = rate * bestdiv;
return rate;
@@ -151,7 +217,7 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
int ret;
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- value = zynqmp_divider_get_val(parent_rate, rate);
+ value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
if (div_type == TYPE_DIV1) {
div = value & 0xFFFF;
div |= 0xffff << 16;
@@ -160,6 +226,9 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
div |= value << 16;
}
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ div = __ffs(div);
+
ret = eemi_ops->clock_setdivider(clk_id, div);
if (ret)
@@ -195,6 +264,9 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
struct clk_hw *hw;
struct clk_init_data init;
int ret;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
/* allocate the divider */
div = kzalloc(sizeof(*div), GFP_KERNEL);
@@ -215,6 +287,21 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
div->clk_id = clk_id;
div->div_type = nodes->type;
+ /*
+ * To achieve best possible rate, maximum limit of divider is required
+ * while computation. Get maximum supported divisor from firmware. To
+ * maintain backward compatibility assign maximum possible value(0xFFFF)
+ * if query for max divisor is not successful.
+ */
+ qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
+ qdata.arg1 = clk_id;
+ qdata.arg2 = nodes->type;
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ div->max_div = 0XFFFF;
+ else
+ div->max_div = ret_payload[1];
+
hw = &div->hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 18fee827602a..4036f5c01fde 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -98,26 +98,25 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
u32 fbdiv;
- long rate_div, f;
+ u32 mult, div;
- /* Enable the fractional mode if needed */
- rate_div = (rate * FRAC_DIV) / *prate;
- f = rate_div % FRAC_DIV;
- if (f) {
- if (rate > PS_PLL_VCO_MAX) {
- fbdiv = rate / PS_PLL_VCO_MAX;
- rate = rate / (fbdiv + 1);
- }
- if (rate < PS_PLL_VCO_MIN) {
- fbdiv = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
- rate = rate * fbdiv;
- }
- return rate;
+ /* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */
+ if (rate > PS_PLL_VCO_MAX) {
+ div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX);
+ rate = rate / div;
+ }
+ if (rate < PS_PLL_VCO_MIN) {
+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
+ rate = rate * mult;
}
fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
- fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- return *prate * fbdiv;
+ if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) {
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ rate = *prate * fbdiv;
+ }
+
+ return rate;
}
/**
@@ -188,9 +187,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
frac = (parent_rate * f) / FRAC_DIV;
ret = eemi_ops->clock_setdivider(clk_id, m);
- if (ret)
- pr_warn_once("%s() set divider failed for %s, ret = %d\n",
- __func__, clk_name, ret);
+ if (ret) {
+ if (ret == -EUSERS)
+ WARN(1, "More than allowed devices are using the %s, which is forbidden\n", clk_name);
+ pr_err("%s() set divider failed for %s, ret = %d\n",
+ __func__, clk_name, ret);
+ }
eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index a0570213170d..b1ec79ddb7f2 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -231,6 +231,8 @@ static const struct sh_cmt_info sh_cmt_info[] = {
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
+#define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */
+
static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
if (ch->iostart)
@@ -845,6 +847,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
unsigned int hwidx, bool clockevent,
bool clocksource, struct sh_cmt_device *cmt)
{
+ u32 value;
int ret;
/* Skip unused channels. */
@@ -874,6 +877,11 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
ch->ioctrl = ch->iostart + 0x10;
ch->timer_bit = 0;
+
+ /* Enable the clock supply to the channel */
+ value = ioread32(cmt->mapbase + CMCLKE);
+ value |= BIT(hwidx);
+ iowrite32(value, cmt->mapbase + CMCLKE);
break;
}
@@ -1006,12 +1014,10 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
else
cmt->rate = clk_get_rate(cmt->clk) / 8;
- clk_disable(cmt->clk);
-
/* Map the memory resource(s). */
ret = sh_cmt_map_memory(cmt);
if (ret < 0)
- goto err_clk_unprepare;
+ goto err_clk_disable;
/* Allocate and setup the channels. */
cmt->num_channels = hweight8(cmt->hw_channels);
@@ -1039,6 +1045,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
mask &= ~(1 << hwidx);
}
+ clk_disable(cmt->clk);
+
platform_set_drvdata(pdev, cmt);
return 0;
@@ -1046,6 +1054,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
err_unmap:
kfree(cmt->channels);
iounmap(cmt->mapbase);
+err_clk_disable:
+ clk_disable(cmt->clk);
err_clk_unprepare:
clk_unprepare(cmt->clk);
err_clk_put:
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
index 7427b07495a8..906c1bfdccad 100644
--- a/drivers/clocksource/timer-atmel-tcb.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -310,6 +310,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
writel(mck_divisor_idx /* likely divide-by-8 */
| ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP /* free-run */
+ | ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
| ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
| ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
tcaddr + ATMEL_TC_REG(0, CMR));
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 160bc6597de5..bd49385178d0 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -15,6 +15,8 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
/*
* This driver configures the 2 16/32-bit count-up timers as follows:
@@ -464,13 +466,7 @@ out_kfree:
return err;
}
-/**
- * ttc_timer_init - Initialize the timer
- *
- * Initializes the timer hardware and register the clock source and clock event
- * timers with Linux kernal timer framework
- */
-static int __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_probe(struct platform_device *pdev)
{
unsigned int irq;
void __iomem *timer_baseaddr;
@@ -478,6 +474,7 @@ static int __init ttc_timer_init(struct device_node *timer)
static int initialized;
int clksel, ret;
u32 timer_width = 16;
+ struct device_node *timer = pdev->dev.of_node;
if (initialized)
return 0;
@@ -489,10 +486,10 @@ static int __init ttc_timer_init(struct device_node *timer)
* and use it. Note that the event timer uses the interrupt and it's the
* 2nd TTC hence the irq_of_parse_and_map(,1)
*/
- timer_baseaddr = of_iomap(timer, 0);
- if (!timer_baseaddr) {
+ timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL);
+ if (IS_ERR(timer_baseaddr)) {
pr_err("ERROR: invalid timer base address\n");
- return -ENXIO;
+ return PTR_ERR(timer_baseaddr);
}
irq = irq_of_parse_and_map(timer, 1);
@@ -516,20 +513,40 @@ static int __init ttc_timer_init(struct device_node *timer)
clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) {
pr_err("ERROR: timer input clock not found\n");
- return PTR_ERR(clk_ce);
+ ret = PTR_ERR(clk_ce);
+ goto put_clk_cs;
}
ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
if (ret)
- return ret;
+ goto put_clk_ce;
ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
if (ret)
- return ret;
+ goto put_clk_ce;
pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
return 0;
+
+put_clk_ce:
+ clk_put(clk_ce);
+put_clk_cs:
+ clk_put(clk_cs);
+ return ret;
}
-TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
+static const struct of_device_id ttc_timer_of_match[] = {
+ {.compatible = "cdns,ttc"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ttc_timer_of_match);
+
+static struct platform_driver ttc_timer_driver = {
+ .driver = {
+ .name = "cdns_ttc_timer",
+ .of_match_table = ttc_timer_of_match,
+ },
+};
+builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe);
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index e421946a91c5..3dc0c6ceed02 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -18,7 +18,7 @@
#include <clocksource/timer-davinci.h>
#undef pr_fmt
-#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+#define pr_fmt(fmt) "%s: " fmt, __func__
#define DAVINCI_TIMER_REG_TIM12 0x10
#define DAVINCI_TIMER_REG_TIM34 0x14
@@ -250,30 +250,32 @@ int __init davinci_timer_register(struct clk *clk,
rv = clk_prepare_enable(clk);
if (rv) {
- pr_err("Unable to prepare and enable the timer clock");
+ pr_err("Unable to prepare and enable the timer clock\n");
return rv;
}
if (!request_mem_region(timer_cfg->reg.start,
resource_size(&timer_cfg->reg),
"davinci-timer")) {
- pr_err("Unable to request memory region");
- return -EBUSY;
+ pr_err("Unable to request memory region\n");
+ rv = -EBUSY;
+ goto exit_clk_disable;
}
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
if (!base) {
- pr_err("Unable to map the register range");
- return -ENOMEM;
+ pr_err("Unable to map the register range\n");
+ rv = -ENOMEM;
+ goto exit_mem_region;
}
davinci_timer_init(base);
tick_rate = clk_get_rate(clk);
- clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL | __GFP_NOFAIL);
+ clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
if (!clockevent) {
- pr_err("Error allocating memory for clockevent data");
- return -ENOMEM;
+ rv = -ENOMEM;
+ goto exit_iounmap_base;
}
clockevent->dev.name = "tim12";
@@ -298,8 +300,8 @@ int __init davinci_timer_register(struct clk *clk,
davinci_timer_irq_timer, IRQF_TIMER,
"clockevent/tim12", clockevent);
if (rv) {
- pr_err("Unable to request the clockevent interrupt");
- return rv;
+ pr_err("Unable to request the clockevent interrupt\n");
+ goto exit_free_clockevent;
}
davinci_clocksource.dev.rating = 300;
@@ -325,14 +327,28 @@ int __init davinci_timer_register(struct clk *clk,
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
- pr_err("Unable to register clocksource");
- return rv;
+ pr_err("Unable to register clocksource\n");
+ goto exit_free_irq;
}
sched_clock_register(davinci_timer_read_sched_clock,
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
return 0;
+
+exit_free_irq:
+ free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
+ clockevent);
+exit_free_clockevent:
+ kfree(clockevent);
+exit_iounmap_base:
+ iounmap(base);
+exit_mem_region:
+ release_mem_region(timer_cfg->reg.start,
+ resource_size(&timer_cfg->reg));
+exit_clk_disable:
+ clk_disable_unprepare(clk);
+ return rv;
}
static int __init of_davinci_timer_register(struct device_node *np)
@@ -343,20 +359,20 @@ static int __init of_davinci_timer_register(struct device_node *np)
rv = of_address_to_resource(np, 0, &timer_cfg.reg);
if (rv) {
- pr_err("Unable to get the register range for timer");
+ pr_err("Unable to get the register range for timer\n");
return rv;
}
rv = of_irq_to_resource_table(np, timer_cfg.irq,
DAVINCI_TIMER_NUM_IRQS);
if (rv != DAVINCI_TIMER_NUM_IRQS) {
- pr_err("Unable to get the interrupts for timer");
+ pr_err("Unable to get the interrupts for timer\n");
return rv;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("Unable to get the timer clock");
+ pr_err("Unable to get the timer clock\n");
return PTR_ERR(clk);
}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 706c0d0ff56c..268c09417fa2 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -460,12 +460,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
return -ENOMEM;
imxtm->base = of_iomap(np, 0);
- if (!imxtm->base)
- return -ENXIO;
+ if (!imxtm->base) {
+ ret = -ENXIO;
+ goto err_kfree;
+ }
imxtm->irq = irq_of_parse_and_map(np, 0);
- if (imxtm->irq <= 0)
- return -EINVAL;
+ if (imxtm->irq <= 0) {
+ ret = -EINVAL;
+ goto err_kfree;
+ }
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
@@ -478,11 +482,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
ret = _mxc_timer_init(imxtm);
if (ret)
- return ret;
+ goto err_kfree;
initialized = 1;
return 0;
+
+err_kfree:
+ kfree(imxtm);
+ return ret;
}
static int __init imx1_timer_init_dt(struct device_node *np)
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e3be5c2f57b8..4b04ffbe5e7e 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -26,7 +26,7 @@ static int riscv_clock_next_event(unsigned long delta,
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.set_next_event = riscv_clock_next_event,
};
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index f261a57af1c0..919d6f1ced8d 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -57,10 +57,6 @@ struct quad8_iio {
#define QUAD8_REG_CHAN_OP 0x11
#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
-/* Borrow Toggle flip-flop */
-#define QUAD8_FLAG_BT BIT(0)
-/* Carry Toggle flip-flop */
-#define QUAD8_FLAG_CT BIT(1)
/* Error flag */
#define QUAD8_FLAG_E BIT(4)
/* Up/Down flag */
@@ -97,9 +93,6 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
{
struct quad8_iio *const priv = iio_priv(indio_dev);
const int base_offset = priv->base + 2 * chan->channel;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
int i;
switch (mask) {
@@ -110,12 +103,7 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
- flags = inb(base_offset + 1);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- *val = (borrow ^ carry) << 24;
+ *val = 0;
mutex_lock(&priv->lock);
@@ -639,19 +627,9 @@ static int quad8_count_read(struct counter_device *counter,
{
struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
- unsigned long position;
+ unsigned long position = 0;
int i;
- flags = inb(base_offset + 1);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- position = (unsigned long)(borrow ^ carry) << 24;
-
mutex_lock(&priv->lock);
/* Reset Byte Pointer; transfer Counter to Output Latch */
@@ -1204,8 +1182,8 @@ static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
mutex_unlock(&priv->lock);
- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
- return sprintf(buf, "33554431\n");
+ /* By default 0xFFFFFF (24 bits unsigned) is maximum count */
+ return sprintf(buf, "16777215\n");
}
static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index bbc930a5962c..95f8f2e217db 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
/* ensure CMP & ARR registers are properly written */
ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
- (val & STM32_LPTIM_CMPOK_ARROK),
+ (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret)
return ret;
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4195834a4591..cf7ebe3bd1ad 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -30,6 +30,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index e2df9d112106..72fd06fa0b59 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -18,6 +18,7 @@
#include <asm/msr.h>
#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
#include "cpufreq_ondemand.h"
@@ -123,6 +124,8 @@ static int __init amd_freq_sensitivity_init(void)
if (!pcidev) {
if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV;
+ } else {
+ pci_dev_put(pcidev);
}
if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 2de7fd18f66a..f0be8a43ec49 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -443,7 +443,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
return -ENODEV;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
dev_err(cpu_dev, "Cannot get clock for CPU0\n");
return PTR_ERR(clk);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index a3c82f530d60..541486217984 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -410,7 +410,11 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
if (ret)
return ERR_PTR(ret);
- table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table),
+ /*
+ * We allocate space for the 5 different P-STATES AVS,
+ * plus extra space for a terminating element.
+ */
+ table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table),
GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 1200842c3da4..5d28553b69f5 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -126,6 +126,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
+ { .compatible = "nvidia,tegra234", },
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index af9f34804862..11b9edc713ba 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -441,8 +441,10 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
cpufreq_notify_post_transition(policy, freqs, transition_failed);
+ spin_lock(&policy->transition_lock);
policy->transition_ongoing = false;
policy->transition_task = NULL;
+ spin_unlock(&policy->transition_lock);
wake_up(&policy->transition_wait);
}
@@ -1204,6 +1206,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
goto err_free_rcpumask;
+ init_completion(&policy->kobj_unregister);
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
@@ -1242,7 +1245,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
- init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
policy->cpu = cpu;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index edef3399c979..84f6dbd4e979 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -210,6 +210,14 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.suspend = cpufreq_generic_suspend,
};
+static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
+{
+ int ret = dev_pm_opp_disable(dev, freq);
+
+ if (ret < 0 && ret != -ENODEV)
+ dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
+}
+
#define OCOTP_CFG3 0x440
#define OCOTP_CFG3_SPEED_SHIFT 16
#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
@@ -245,17 +253,15 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
val &= 0x3;
if (val < OCOTP_CFG3_SPEED_996MHZ)
- if (dev_pm_opp_disable(dev, 996000000))
- dev_warn(dev, "failed to disable 996MHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 996000000);
if (of_machine_is_compatible("fsl,imx6q") ||
of_machine_is_compatible("fsl,imx6qp")) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
- if (dev_pm_opp_disable(dev, 852000000))
- dev_warn(dev, "failed to disable 852MHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 852000000);
+
if (val != OCOTP_CFG3_SPEED_1P2GHZ)
- if (dev_pm_opp_disable(dev, 1200000000))
- dev_warn(dev, "failed to disable 1.2GHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 1200000000);
}
iounmap(base);
put_node:
@@ -308,20 +314,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
- if (of_machine_is_compatible("fsl,imx6ul")) {
+ if (of_machine_is_compatible("fsl,imx6ul"))
if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
- if (dev_pm_opp_disable(dev, 696000000))
- dev_warn(dev, "failed to disable 696MHz OPP\n");
- }
+ imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
- if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
- if (dev_pm_opp_disable(dev, 792000000))
- dev_warn(dev, "failed to disable 792MHz OPP\n");
+ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
+ imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
- if (dev_pm_opp_disable(dev, 900000000))
- dev_warn(dev, "failed to disable 900MHz OPP\n");
+ imx6x_disable_freq_in_opp(dev, 900000000);
}
return ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 88fe803a044d..06302eaa3e94 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -442,20 +442,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
(u32) cpu->acpi_perf_data.states[i].control);
}
- /*
- * The _PSS table doesn't contain whole turbo frequency range.
- * This just contains +1 MHZ above the max non turbo frequency,
- * with control value corresponding to max turbo ratio. But
- * when cpufreq set policy is called, it will call with this
- * max frequency, which will cause a reduced performance as
- * this driver uses real max turbo frequency as the max
- * frequency. So correct this frequency in _PSS table to
- * correct max turbo frequency based on the turbo state.
- * Also need to convert to MHz as _PSS freq is in MHz.
- */
- if (!global.turbo_disabled)
- cpu->acpi_perf_data.states[0].core_frequency =
- policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
pr_debug("_PPC limits will be enforced\n");
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 1b2ec3be59eb..c658e8e49f90 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1101,7 +1101,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
kfree(data->powernow_table);
kfree(data);
- for_each_cpu(cpu, pol->cpus)
+ /* pol->cpus will be empty here, use related_cpus instead. */
+ for_each_cpu(cpu, pol->related_cpus)
per_cpu(powernow_data, cpu) = NULL;
return 0;
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index d06d21a9525d..74702065730c 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -224,6 +224,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
* also be 0 on platforms with missing DT idle states or legacy DT
* configuration predating the DT idle states bindings.
*/
- return i;
+ return state_idx - start_idx;
}
EXPORT_SYMBOL_GPL(dt_init_idle_driver);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1f6308cdf79a..24c03560ca3e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -700,6 +700,36 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+config CRYPTO_DEV_ZYNQMP_SHA3
+ tristate "Support for Xilinx ZynqMP SHA3 hw accelerator"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select CRYPTO_HASH
+ help
+ ZynqMP processors have Keccak-SHA384 hw accelerator.
+
+ Select this if you want to use the ZynqMP module for
+ Keccak-SHA384 algorithms.
+
+config CRYPTO_DEV_XILINX_RSA
+ tristate "Support for Xilinx ZynqMP RSA hw accelerator"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER
+ help
+ Xilinx processors have RSA module accelerator. Select this if you
+ want to use the ZynqMP module for RSA algorithms.
+
+config CRYPTO_DEV_ZYNQMP_AES
+ tristate "Support for Xilinx ZynqMP AES hw accelerator"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER
+ help
+ Xilinx processors have AES-GCM engine used for symmetric key
+ encryption and decryption. This driver interfaces with AES hw
+ accelerator. Select this if you want to use the ZynqMP module
+ for AES algorithms.
+
config CRYPTO_DEV_MEDIATEK
tristate "MediaTek's EIP97 Cryptographic Engine driver"
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index afc4753b5d28..d70e7162838d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -47,4 +47,7 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes.o
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
+obj-$(CONFIG_CRYPTO_DEV_XILINX_RSA) += zynqmp-rsa.o
obj-y += hisilicon/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 230e8902c727..b58f21dd3fed 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -521,7 +521,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
{
struct skcipher_request *req;
struct scatterlist *dst;
- dma_addr_t addr;
req = skcipher_request_cast(pd_uinfo->async_req);
@@ -530,8 +529,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req->cryptlen, req->dst);
} else {
dst = pd_uinfo->dest_va;
- addr = dma_map_page(dev->core_dev->device, sg_page(dst),
- dst->offset, dst->length, DMA_FROM_DEVICE);
+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
+ DMA_FROM_DEVICE);
}
if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
@@ -556,10 +555,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct ahash_request *ahash_req;
ahash_req = ahash_request_cast(pd_uinfo->async_req);
- ctx = crypto_tfm_ctx(ahash_req->base.tfm);
+ ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
- crypto_tfm_ctx(ahash_req->base.tfm));
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index fdd994ee55e2..2db88213f13e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -553,7 +553,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
return -EINVAL;
}
- ctx->cdata.key_virt = key;
+ memcpy(ctx->key, key, keylen);
+ ctx->cdata.key_virt = ctx->key;
ctx->cdata.keylen = keylen - saltlen;
return chachapoly_set_sh_desc(aead);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 28692d068176..b8d277bfbd32 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -639,7 +639,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
return -EINVAL;
}
- ctx->cdata.key_virt = key;
+ memcpy(ctx->key, key, keylen);
+ ctx->cdata.key_virt = ctx->key;
ctx->cdata.keylen = keylen - saltlen;
return chachapoly_set_sh_desc(aead);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 30e3f41ed872..e0bba20c13cb 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -225,7 +225,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
if (len && *buff)
break;
- sg_miter_next(&miter);
+ if (!sg_miter_next(&miter))
+ break;
+
buff = miter.addr;
len = miter.length;
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 781949027451..d9362199423f 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -254,6 +254,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
const struct firmware *fw_entry;
struct device *dev = &cpt->pdev->dev;
struct ucode_header *ucode;
+ unsigned int code_length;
struct microcode *mcode;
int j, ret = 0;
@@ -264,11 +265,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ucode = (struct ucode_header *)fw_entry->data;
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
- mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size) {
+ code_length = ntohl(ucode->code_length);
+ if (code_length == 0 || code_length >= INT_MAX / 2) {
ret = -EINVAL;
goto fw_release;
}
+ mcode->code_size = code_length * 2;
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index b3eea329f840..e416456b2b8a 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -642,11 +642,27 @@ static void ccp_dma_release(struct ccp_device *ccp)
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
+
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
}
+static void ccp_dma_release_channels(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -767,6 +783,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
+ ccp_dma_release_channels(ccp);
dma_async_device_unregister(dma_dev);
ccp_dma_release(ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index e826c4b6b3af..4865eb047866 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -178,8 +178,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir);
- if (dma_mapping_error(wa->dev, wa->dma.address))
+ if (dma_mapping_error(wa->dev, wa->dma.address)) {
+ kfree(wa->address);
+ wa->address = NULL;
return -ENOMEM;
+ }
wa->dma.length = len;
}
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 566999738698..47077dd77f5d 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -59,7 +59,7 @@ void __init cc_debugfs_global_init(void)
cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
}
-void __exit cc_debugfs_global_fini(void)
+void cc_debugfs_global_fini(void)
{
debugfs_remove(cc_debugfs_dir);
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8b8eee513c27..3d59fef1fbee 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -653,10 +653,17 @@ static struct platform_driver ccree_driver = {
static int __init ccree_init(void)
{
- cc_hash_global_init();
+ int rc;
+
cc_debugfs_global_init();
- return platform_driver_register(&ccree_driver);
+ rc = platform_driver_register(&ccree_driver);
+ if (rc) {
+ cc_debugfs_global_fini();
+ return rc;
+ }
+
+ return 0;
}
module_init(ccree_init);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index bc71bdf44a9f..9f67df0a4921 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -39,12 +39,19 @@ static const u32 cc_sha256_init[] = {
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
static const u32 cc_digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static u64 cc_sha384_init[] = {
- SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
- SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static u64 cc_sha512_init[] = {
- SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
- SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+
+/*
+ * Due to the way the HW works, every double word in the SHA384 and SHA512
+ * larval hashes must be stored in hi/lo order
+ */
+#define hilo(x) upper_32_bits(x), lower_32_bits(x)
+static const u32 cc_sha384_init[] = {
+ hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
+ hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
+static const u32 cc_sha512_init[] = {
+ hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
+ hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
+
static const u32 cc_sm3_init[] = {
SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
@@ -1948,8 +1955,8 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
}
if (large_sha_supported) {
- cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
- (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
+ cc_set_sram_desc(cc_sha384_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_sha384_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
@@ -1957,8 +1964,8 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
sram_buff_ofs += sizeof(cc_sha384_init);
larval_seq_len = 0;
- cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
- (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
+ cc_set_sram_desc(cc_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_sha512_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
@@ -1969,28 +1976,6 @@ init_digest_const_err:
return rc;
}
-static void __init cc_swap_dwords(u32 *buf, unsigned long size)
-{
- int i;
- u32 tmp;
-
- for (i = 0; i < size; i += 2) {
- tmp = buf[i];
- buf[i] = buf[i + 1];
- buf[i + 1] = tmp;
- }
-}
-
-/*
- * Due to the way the HW works we need to swap every
- * double word in the SHA384 and SHA512 larval hashes
- */
-void __init cc_hash_global_init(void)
-{
- cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
- cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
-}
-
int cc_hash_alloc(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle;
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 0d6dc61484d7..3dbd0abefea0 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -104,6 +104,4 @@ cc_digest_len_addr(void *drvdata, u32 mode);
*/
cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
-void cc_hash_global_init(void);
-
#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 3b79bcd03e7b..40054731f800 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -2092,7 +2092,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
if (tp->snd_una != snd_una) {
tp->snd_una = snd_una;
- tp->rcv_tstamp = tcp_time_stamp(tp);
+ tp->rcv_tstamp = tcp_jiffies32;
if (tp->snd_una == tp->snd_nxt &&
!csk_flag_nochk(csk, CSK_TX_FAILOVER))
csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 4ad4ffd90cee..2402941a7f2f 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -449,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
*/
}
- mutex_lock(&ctx->queue->queuelock);
+ spin_lock_bh(&ctx->queue->queuelock);
/* Put the IV in place for chained cases */
switch (ctx->cipher_alg) {
case SEC_C_AES_CBC_128:
@@ -509,7 +509,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
list_del(&backlog_req->backlog_head);
}
}
- mutex_unlock(&ctx->queue->queuelock);
+ spin_unlock_bh(&ctx->queue->queuelock);
mutex_lock(&sec_req->lock);
list_del(&sec_req_el->head);
@@ -798,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
*/
/* Grab a big lock for a long time to avoid concurrency issues */
- mutex_lock(&queue->queuelock);
+ spin_lock_bh(&queue->queuelock);
/*
* Can go on to queue if we have space in either:
@@ -814,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
ret = -EBUSY;
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto out;
}
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto err_free_elements;
}
ret = sec_send_request(sec_req, queue);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
if (ret)
goto err_free_elements;
@@ -881,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
if (IS_ERR(ctx->queue))
return PTR_ERR(ctx->queue);
- mutex_init(&ctx->queue->queuelock);
+ spin_lock_init(&ctx->queue->queuelock);
ctx->queue->havesoftqueue = false;
return 0;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 4d9063a8b10b..0bf4d7c3856c 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -347,7 +347,7 @@ struct sec_queue {
DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
bool havesoftqueue;
- struct mutex queuelock;
+ spinlock_t queuelock;
void *shadow[SEC_QUEUE_LEN];
};
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index fe4cc8babe1c..17cc44f14e5c 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -356,12 +356,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
static void img_hash_dma_task(unsigned long d)
{
struct img_hash_dev *hdev = (struct img_hash_dev *)d;
- struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+ struct img_hash_request_ctx *ctx;
u8 *addr;
size_t nbytes, bleft, wsend, len, tbc;
struct scatterlist tsg;
- if (!hdev->req || !ctx->sg)
+ if (!hdev->req)
+ return;
+
+ ctx = ahash_request_ctx(hdev->req);
+ if (!ctx->sg)
return;
addr = sg_virt(ctx->sg);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4d9d97c59ee3..04638e183351 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1090,11 +1090,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
static int safexcel_request_ring_irq(void *pdev, int irqid,
int is_pci_dev,
+ int ring_id,
irq_handler_t handler,
irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
- int ret, irq;
+ int ret, irq, cpu;
struct device *dev;
if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
@@ -1132,6 +1133,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
return ret;
}
+ /* Set affinity */
+ cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
return irq;
}
@@ -1462,19 +1467,23 @@ static int safexcel_probe_generic(void *pdev,
&priv->ring[i].rdr);
if (ret) {
dev_err(dev, "Failed to initialize rings\n");
- return ret;
+ goto err_cleanup_rings;
}
priv->ring[i].rdr_req = devm_kcalloc(dev,
EIP197_DEFAULT_RING_SIZE,
sizeof(*priv->ring[i].rdr_req),
GFP_KERNEL);
- if (!priv->ring[i].rdr_req)
- return -ENOMEM;
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
- if (!ring_irq)
- return -ENOMEM;
+ if (!ring_irq) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq->priv = priv;
ring_irq->ring = i;
@@ -1482,14 +1491,17 @@ static int safexcel_probe_generic(void *pdev,
irq = safexcel_request_ring_irq(pdev,
EIP197_IRQ_NUMBER(i, is_pci_dev),
is_pci_dev,
+ i,
safexcel_irq_ring,
safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
- return irq;
+ ret = irq;
+ goto err_cleanup_rings;
}
+ priv->ring[i].irq = irq;
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
INIT_WORK(&priv->ring[i].work_data.work,
@@ -1498,8 +1510,10 @@ static int safexcel_probe_generic(void *pdev,
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue =
create_singlethread_workqueue(wq_name);
- if (!priv->ring[i].workqueue)
- return -ENOMEM;
+ if (!priv->ring[i].workqueue) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
priv->ring[i].requests = 0;
priv->ring[i].busy = false;
@@ -1516,16 +1530,26 @@ static int safexcel_probe_generic(void *pdev,
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "HW init failed (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
return 0;
+
+err_cleanup_rings:
+ for (i = 0; i < priv->config.rings; i++) {
+ if (priv->ring[i].irq)
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
+ if (priv->ring[i].workqueue)
+ destroy_workqueue(priv->ring[i].workqueue);
+ }
+
+ return ret;
}
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
@@ -1627,8 +1651,10 @@ static int safexcel_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
- for (i = 0; i < priv->config.rings; i++)
+ for (i = 0; i < priv->config.rings; i++) {
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
destroy_workqueue(priv->ring[i].workqueue);
+ }
return 0;
}
@@ -1658,6 +1684,8 @@ static const struct of_device_id safexcel_of_match_table[] = {
{},
};
+MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
+
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
.remove = safexcel_remove,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 930cc48a6f85..6a4d7f09bca9 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -640,6 +640,9 @@ struct safexcel_ring {
*/
struct crypto_async_request *req;
struct crypto_async_request *backlog;
+
+ /* irq of this ring */
+ int irq;
};
/* EIP integration context flags */
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 708dc63b2f09..c7d433d1cd99 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -287,7 +287,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
err = verify_skcipher_des3_key(cipher, key);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index dc15b06e96ab..80127344d82b 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1278,6 +1278,7 @@ struct n2_hash_tmpl {
const u32 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
+ u8 statesize;
u8 block_size;
u8 auth_type;
u8 hmac_type;
@@ -1309,6 +1310,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_MD5,
.hw_op_hashsz = MD5_DIGEST_SIZE,
.digest_size = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
.block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1",
.hash_zero = sha1_zero_message_hash,
@@ -1317,6 +1319,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_SHA1,
.hw_op_hashsz = SHA1_DIGEST_SIZE,
.digest_size = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
.block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256",
.hash_zero = sha256_zero_message_hash,
@@ -1325,6 +1328,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_SHA256,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
.block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224",
.hash_zero = sha224_zero_message_hash,
@@ -1333,6 +1337,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_RESERVED,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
.block_size = SHA224_BLOCK_SIZE },
};
#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
@@ -1474,6 +1479,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
halg = &ahash->halg;
halg->digestsize = tmpl->digest_size;
+ halg->statesize = tmpl->statesize;
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index 015155da59c2..76139865d7fa 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
nx-crypto-objs := nx.o \
- nx_debugfs.o \
nx-aes-cbc.o \
nx-aes-ecb.o \
nx-aes-gcm.o \
@@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \
nx-sha256.o \
nx-sha512.o
+nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 7ecca168f8c4..5c77aba450cf 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -169,8 +169,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
void nx_debugfs_init(struct nx_crypto_driver *);
void nx_debugfs_fini(struct nx_crypto_driver *);
#else
-#define NX_DEBUGFS_INIT(drv) (0)
-#define NX_DEBUGFS_FINI(drv) (0)
+#define NX_DEBUGFS_INIT(drv) do {} while (0)
+#define NX_DEBUGFS_FINI(drv) do {} while (0)
#endif
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index f8a146554b1f..dbab9e38223e 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2141,7 +2141,7 @@ static int omap_sham_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_irq_safe(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get sync: %d\n", err);
goto err_pm;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8ac8ec6decd5..19186617eafc 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -43,7 +43,6 @@
#define FLAGS_MODE_MASK 0x000f
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
-#define FLAGS_NEW_KEY BIT(3)
#define SAHARA_HDR_BASE 0x00800000
#define SAHARA_HDR_SKHA_ALG_AES 0
@@ -141,8 +140,6 @@ struct sahara_hw_link {
};
struct sahara_ctx {
- unsigned long flags;
-
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
@@ -445,27 +442,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
int ret;
int i, j;
int idx = 0;
+ u32 len;
- /* Copy new key if necessary */
- if (ctx->flags & FLAGS_NEW_KEY) {
- memcpy(dev->key_base, ctx->key, ctx->keylen);
- ctx->flags &= ~FLAGS_NEW_KEY;
+ memcpy(dev->key_base, ctx->key, ctx->keylen);
- if (dev->flags & FLAGS_CBC) {
- dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
- dev->hw_desc[idx]->p1 = dev->iv_phys_base;
- } else {
- dev->hw_desc[idx]->len1 = 0;
- dev->hw_desc[idx]->p1 = 0;
- }
- dev->hw_desc[idx]->len2 = ctx->keylen;
- dev->hw_desc[idx]->p2 = dev->key_phys_base;
- dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+ if (dev->flags & FLAGS_CBC) {
+ dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+ dev->hw_desc[idx]->p1 = dev->iv_phys_base;
+ } else {
+ dev->hw_desc[idx]->len1 = 0;
+ dev->hw_desc[idx]->p1 = 0;
+ }
+ dev->hw_desc[idx]->len2 = ctx->keylen;
+ dev->hw_desc[idx]->p2 = dev->key_phys_base;
+ dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+ dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
- dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+ idx++;
- idx++;
- }
dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
if (dev->nb_in_sg < 0) {
@@ -487,24 +481,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
DMA_TO_DEVICE);
if (ret != dev->nb_in_sg) {
dev_err(dev->device, "couldn't map in sg\n");
- goto unmap_in;
+ return -EINVAL;
}
+
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
if (ret != dev->nb_out_sg) {
dev_err(dev->device, "couldn't map out sg\n");
- goto unmap_out;
+ goto unmap_in;
}
/* Create input links */
dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
sg = dev->in_sg;
+ len = dev->total;
for (i = 0; i < dev->nb_in_sg; i++) {
- dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg - 1)) {
dev->hw_link[i]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
@@ -513,12 +510,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
/* Create output links */
dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
sg = dev->out_sg;
+ len = dev->total;
for (j = i; j < dev->nb_out_sg + i; j++) {
- dev->hw_link[j]->len = sg->length;
+ dev->hw_link[j]->len = min(len, sg->length);
dev->hw_link[j]->p = sg->dma_address;
if (j == (dev->nb_out_sg + i - 1)) {
dev->hw_link[j]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
sg = sg_next(sg);
}
@@ -537,9 +536,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
return 0;
-unmap_out:
- dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_FROM_DEVICE);
unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
@@ -584,16 +580,17 @@ static int sahara_aes_process(struct ablkcipher_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!timeout) {
- dev_err(dev->device, "AES timeout\n");
- return -ETIMEDOUT;
- }
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
+ if (!timeout) {
+ dev_err(dev->device, "AES timeout\n");
+ return -ETIMEDOUT;
+ }
+
return 0;
}
@@ -608,7 +605,6 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/* SAHARA only supports 128bit keys */
if (keylen == AES_KEYSIZE_128) {
memcpy(ctx->key, key, keylen);
- ctx->flags |= FLAGS_NEW_KEY;
return 0;
}
@@ -796,6 +792,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
int start)
{
struct scatterlist *sg;
+ unsigned int len;
unsigned int i;
int ret;
@@ -817,12 +814,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
if (!ret)
return -EFAULT;
+ len = rctx->total;
for (i = start; i < dev->nb_in_sg + start; i++) {
- dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg + start - 1)) {
dev->hw_link[i]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
@@ -903,24 +902,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
return 0;
}
-static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
-{
- if (!sg || !sg->length)
- return nbytes;
-
- while (nbytes && sg) {
- if (nbytes <= sg->length) {
- sg->length = nbytes;
- sg_mark_end(sg);
- break;
- }
- nbytes -= sg->length;
- sg = sg_next(sg);
- }
-
- return nbytes;
-}
-
static int sahara_sha_prepare_request(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -957,36 +938,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
hash_later, 0);
}
- /* nbytes should now be multiple of blocksize */
- req->nbytes = req->nbytes - hash_later;
-
- sahara_walk_and_recalc(req->src, req->nbytes);
-
+ rctx->total = len - hash_later;
/* have data from previous operation and current */
if (rctx->buf_cnt && req->nbytes) {
sg_init_table(rctx->in_sg_chain, 2);
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
-
sg_chain(rctx->in_sg_chain, 2, req->src);
-
- rctx->total = req->nbytes + rctx->buf_cnt;
rctx->in_sg = rctx->in_sg_chain;
-
- req->src = rctx->in_sg_chain;
/* only data from previous operation */
} else if (rctx->buf_cnt) {
- if (req->src)
- rctx->in_sg = req->src;
- else
- rctx->in_sg = rctx->in_sg_chain;
- /* buf was copied into rembuf above */
+ rctx->in_sg = rctx->in_sg_chain;
sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
- rctx->total = rctx->buf_cnt;
/* no data from previous operation */
} else {
rctx->in_sg = req->src;
- rctx->total = req->nbytes;
- req->src = rctx->in_sg;
}
/* on next call, we only have the remaining data in the buffer */
@@ -1007,7 +972,10 @@ static int sahara_sha_process(struct ahash_request *req)
return ret;
if (rctx->first) {
- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ if (ret)
+ return ret;
+
dev->hw_desc[0]->next = 0;
rctx->first = 0;
} else {
@@ -1015,7 +983,10 @@ static int sahara_sha_process(struct ahash_request *req)
sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
dev->hw_desc[0]->next = dev->hw_phys_desc[1];
- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ if (ret)
+ return ret;
+
dev->hw_desc[1]->next = 0;
}
@@ -1028,18 +999,19 @@ static int sahara_sha_process(struct ahash_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!timeout) {
- dev_err(dev->device, "SHA timeout\n");
- return -ETIMEDOUT;
- }
if (rctx->sg_in_idx)
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
+ if (!timeout) {
+ dev_err(dev->device, "SHA timeout\n");
+ return -ETIMEDOUT;
+ }
+
memcpy(rctx->context, dev->context_base, rctx->context_size);
- if (req->result)
+ if (req->result && rctx->last)
memcpy(req->result, rctx->context, rctx->digest_size);
return 0;
@@ -1183,8 +1155,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct sahara_sha_reqctx) +
- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+ sizeof(struct sahara_sha_reqctx));
return 0;
}
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 2ecc970f5cae..b66e06818afc 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -98,7 +98,7 @@ static struct stm32_crc *stm32_crc_get_next_crc(void)
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
- crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
+ crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
if (crc)
list_move_tail(&crc->list, &crc_list.dev_list);
spin_unlock_bh(&crc_list.lock);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index dcce15b55809..393d0cae10f1 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -562,9 +562,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
}
for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+ sg[0] = *tsg;
len = sg->length;
- sg[0] = *tsg;
if (sg_is_last(sg)) {
if (hdev->dma_mode == 1) {
len = (ALIGN(sg->length, 16) - 16);
@@ -1553,9 +1553,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (!hdev)
return -ENODEV;
- ret = pm_runtime_resume_and_get(hdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(hdev->dev);
stm32_hash_unregister_algs(hdev);
@@ -1571,7 +1569,8 @@ static int stm32_hash_remove(struct platform_device *pdev)
pm_runtime_disable(hdev->dev);
pm_runtime_put_noidle(hdev->dev);
- clk_disable_unprepare(hdev->clk);
+ if (ret >= 0)
+ clk_disable_unprepare(hdev->clk);
return 0;
}
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index 01b625e4e5ad..6d3deb025b2a 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -5,7 +5,6 @@ config CRYPTO_DEV_VIRTIO
select CRYPTO_AEAD
select CRYPTO_BLKCIPHER
select CRYPTO_ENGINE
- default m
help
This driver provides support for virtio crypto device. If you
choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 1c6e00da5a29..947a6e01d93f 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -10,6 +10,7 @@
#include <linux/virtio.h>
#include <linux/crypto.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
@@ -27,6 +28,7 @@ struct data_queue {
char name[32];
struct crypto_engine *engine;
+ struct tasklet_struct done_task;
};
struct virtio_crypto {
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index c8a962c62663..469da86c4084 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -22,27 +22,28 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
}
}
-static void virtcrypto_dataq_callback(struct virtqueue *vq)
+static void virtcrypto_done_task(unsigned long data)
{
- struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct data_queue *data_vq = (struct data_queue *)data;
+ struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
- unsigned long flags;
unsigned int len;
- unsigned int qid = vq->index;
- spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
- spin_unlock_irqrestore(
- &vcrypto->data_vq[qid].lock, flags);
if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len);
- spin_lock_irqsave(
- &vcrypto->data_vq[qid].lock, flags);
}
} while (!virtqueue_enable_cb(vq));
- spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct data_queue *dq = &vcrypto->data_vq[vq->index];
+
+ tasklet_schedule(&dq->done_task);
}
static int virtcrypto_find_vqs(struct virtio_crypto *vi)
@@ -99,6 +100,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM;
goto err_engine;
}
+ tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
+ (unsigned long)&vi->data_vq[i]);
}
kfree(names);
@@ -431,11 +434,14 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
static void virtcrypto_remove(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
+ int i;
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
+ for (i = 0; i < vcrypto->max_data_queues; i++)
+ tasklet_kill(&vcrypto->data_vq[i].done_task);
vdev->config->reset(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);
diff --git a/drivers/crypto/zynqmp-aes.c b/drivers/crypto/zynqmp-aes.c
new file mode 100644
index 000000000000..595447b31185
--- /dev/null
+++ b/drivers/crypto/zynqmp-aes.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP AES Driver.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_AES_QUEUE_LENGTH 1
+#define ZYNQMP_AES_IV_SIZE 12
+#define ZYNQMP_AES_GCM_SIZE 16
+#define ZYNQMP_AES_KEY_SIZE 32
+
+#define ZYNQMP_AES_DECRYPT 0
+#define ZYNQMP_AES_ENCRYPT 1
+
+#define ZYNQMP_AES_KUP_KEY 0
+
+#define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01
+#define ZYNQMP_AES_SIZE_ERR 0x06
+#define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13
+#define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300
+
+#define ZYNQMP_AES_BLOCKSIZE 0x04
+
+struct zynqmp_aes_dev {
+ struct list_head list;
+ struct device *dev;
+ /* the lock protects queue and dev list */
+ spinlock_t lock;
+ struct crypto_queue queue;
+};
+
+struct zynqmp_aes_op {
+ struct zynqmp_aes_dev *dd;
+ void *src;
+ void *dst;
+ int len;
+ u8 key[ZYNQMP_AES_KEY_SIZE];
+ u8 *iv;
+ u32 keylen;
+ u32 keytype;
+};
+
+struct zynqmp_aes_data {
+ u64 src;
+ u64 iv;
+ u64 key;
+ u64 dst;
+ u64 size;
+ u64 optype;
+ u64 keysrc;
+};
+
+struct zynqmp_aes_drv {
+ struct list_head dev_list;
+ /* the lock protects dev list */
+ spinlock_t lock;
+};
+
+static struct zynqmp_aes_drv zynqmp_aes = {
+ .dev_list = LIST_HEAD_INIT(zynqmp_aes.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(zynqmp_aes.lock),
+};
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+static struct zynqmp_aes_dev *zynqmp_aes_find_dev(struct zynqmp_aes_op *ctx)
+{
+ struct zynqmp_aes_dev *aes_dd = NULL;
+ struct zynqmp_aes_dev *tmp;
+
+ spin_lock_bh(&zynqmp_aes.lock);
+ if (!ctx->dd) {
+ list_for_each_entry(tmp, &zynqmp_aes.dev_list, list) {
+ aes_dd = tmp;
+ break;
+ }
+ ctx->dd = aes_dd;
+ } else {
+ aes_dd = ctx->dd;
+ }
+ spin_unlock_bh(&zynqmp_aes.lock);
+
+ return aes_dd;
+}
+
+static int zynqmp_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct zynqmp_aes_op *op = crypto_tfm_ctx(tfm);
+
+ op->keylen = len;
+ memcpy(op->key, key, len);
+
+ return 0;
+}
+
+static int zynqmp_setkeytype(struct crypto_tfm *tfm, const u8 *keytype,
+ unsigned int len)
+{
+ struct zynqmp_aes_op *op = crypto_tfm_ctx(tfm);
+
+ op->keytype = (u32)(*keytype);
+
+ return 0;
+}
+
+static int zynqmp_aes_xcrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes,
+ unsigned int flags)
+{
+ struct zynqmp_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct zynqmp_aes_dev *dd = zynqmp_aes_find_dev(op);
+ int err, ret, copy_bytes, src_data = 0, dst_data = 0;
+ dma_addr_t dma_addr, dma_addr_buf;
+ struct zynqmp_aes_data *abuf;
+ struct blkcipher_walk walk;
+ unsigned int data_size;
+ size_t dma_size;
+ char *kbuf;
+
+ if (!eemi_ops->aes)
+ return -ENOTSUPP;
+
+ if (op->keytype == ZYNQMP_AES_KUP_KEY)
+ dma_size = nbytes + ZYNQMP_AES_KEY_SIZE
+ + ZYNQMP_AES_IV_SIZE;
+ else
+ dma_size = nbytes + ZYNQMP_AES_IV_SIZE;
+
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ abuf = dma_alloc_coherent(dd->dev, sizeof(struct zynqmp_aes_data),
+ &dma_addr_buf, GFP_KERNEL);
+ if (!abuf) {
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+ return -ENOMEM;
+ }
+
+ data_size = nbytes;
+ blkcipher_walk_init(&walk, dst, src, data_size);
+ err = blkcipher_walk_virt(desc, &walk);
+ op->iv = walk.iv;
+
+ while ((nbytes = walk.nbytes)) {
+ op->src = walk.src.virt.addr;
+ memcpy(kbuf + src_data, op->src, nbytes);
+ src_data = src_data + nbytes;
+ nbytes &= (ZYNQMP_AES_BLOCKSIZE - 1);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ memcpy(kbuf + data_size, op->iv, ZYNQMP_AES_IV_SIZE);
+ abuf->src = dma_addr;
+ abuf->dst = dma_addr;
+ abuf->iv = abuf->src + data_size;
+ abuf->size = data_size - ZYNQMP_AES_GCM_SIZE;
+ abuf->optype = flags;
+ abuf->keysrc = op->keytype;
+
+ if (op->keytype == ZYNQMP_AES_KUP_KEY) {
+ memcpy(kbuf + data_size + ZYNQMP_AES_IV_SIZE,
+ op->key, ZYNQMP_AES_KEY_SIZE);
+
+ abuf->key = abuf->src + data_size + ZYNQMP_AES_IV_SIZE;
+ } else {
+ abuf->key = 0;
+ }
+ eemi_ops->aes(dma_addr_buf, &ret);
+
+ if (ret != 0) {
+ switch (ret) {
+ case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
+ dev_err(dd->dev, "ERROR: Gcm Tag mismatch\n\r");
+ break;
+ case ZYNQMP_AES_SIZE_ERR:
+ dev_err(dd->dev, "ERROR : Non word aligned data\n\r");
+ break;
+ case ZYNQMP_AES_WRONG_KEY_SRC_ERR:
+ dev_err(dd->dev, "ERROR: Wrong KeySrc, enable secure mode\n\r");
+ break;
+ case ZYNQMP_AES_PUF_NOT_PROGRAMMED:
+ dev_err(dd->dev, "ERROR: PUF is not registered\r\n");
+ break;
+ default:
+ dev_err(dd->dev, "ERROR: Invalid");
+ break;
+ }
+ goto END;
+ }
+ if (flags)
+ copy_bytes = data_size;
+ else
+ copy_bytes = data_size - ZYNQMP_AES_GCM_SIZE;
+
+ blkcipher_walk_init(&walk, dst, src, copy_bytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ memcpy(walk.dst.virt.addr, kbuf + dst_data, nbytes);
+ dst_data = dst_data + nbytes;
+ nbytes &= (ZYNQMP_AES_BLOCKSIZE - 1);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+END:
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+ dma_free_coherent(dd->dev, sizeof(struct zynqmp_aes_data),
+ abuf, dma_addr_buf);
+ return err;
+}
+
+static int zynqmp_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_aes_xcrypt(desc, dst, src, nbytes, ZYNQMP_AES_DECRYPT);
+}
+
+static int zynqmp_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_aes_xcrypt(desc, dst, src, nbytes, ZYNQMP_AES_ENCRYPT);
+}
+
+static struct crypto_alg zynqmp_alg = {
+ .cra_name = "xilinx-zynqmp-aes",
+ .cra_driver_name = "zynqmp-aes",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = ZYNQMP_AES_BLOCKSIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_aes_op),
+ .cra_alignmask = 15,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = 0,
+ .max_keysize = ZYNQMP_AES_KEY_SIZE,
+ .setkey = zynqmp_setkey_blk,
+ .setkeytype = zynqmp_setkeytype,
+ .encrypt = zynqmp_aes_encrypt,
+ .decrypt = zynqmp_aes_decrypt,
+ .ivsize = ZYNQMP_AES_IV_SIZE,
+ }
+ }
+};
+
+static const struct of_device_id zynqmp_aes_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-aes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
+
+static int zynqmp_aes_probe(struct platform_device *pdev)
+{
+ struct zynqmp_aes_dev *aes_dd;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ aes_dd = devm_kzalloc(dev, sizeof(*aes_dd), GFP_KERNEL);
+ if (!aes_dd)
+ return -ENOMEM;
+
+ aes_dd->dev = dev;
+ platform_set_drvdata(pdev, aes_dd);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(44));
+ if (ret < 0) {
+ dev_err(dev, "no usable DMA configuration");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&aes_dd->list);
+ crypto_init_queue(&aes_dd->queue, ZYNQMP_AES_QUEUE_LENGTH);
+ list_add_tail(&aes_dd->list, &zynqmp_aes.dev_list);
+
+ ret = crypto_register_alg(&zynqmp_alg);
+ if (ret)
+ goto err_algs;
+
+ dev_info(dev, "AES Successfully Registered\n\r");
+ return 0;
+
+err_algs:
+ list_del(&aes_dd->list);
+ dev_err(dev, "initialization failed.\n");
+
+ return ret;
+}
+
+static int zynqmp_aes_remove(struct platform_device *pdev)
+{
+ struct zynqmp_aes_dev *aes_dd;
+
+ aes_dd = platform_get_drvdata(pdev);
+ if (!aes_dd)
+ return -ENODEV;
+ list_del(&aes_dd->list);
+ crypto_unregister_alg(&zynqmp_alg);
+ return 0;
+}
+
+static struct platform_driver xilinx_aes_driver = {
+ .probe = zynqmp_aes_probe,
+ .remove = zynqmp_aes_remove,
+ .driver = {
+ .name = "zynqmp_aes",
+ .of_match_table = of_match_ptr(zynqmp_aes_dt_ids),
+ },
+};
+
+module_platform_driver(xilinx_aes_driver);
+
+MODULE_DESCRIPTION("Xilinx ZynqMP AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_AUTHOR("Kalyani Akula <kalyani.akula@xilinx.com>");
diff --git a/drivers/crypto/zynqmp-rsa.c b/drivers/crypto/zynqmp-rsa.c
new file mode 100644
index 000000000000..ec93401aab94
--- /dev/null
+++ b/drivers/crypto/zynqmp-rsa.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_RSA_QUEUE_LENGTH 1
+#define ZYNQMP_RSA_MAX_KEY_SIZE 1024
+#define ZYNQMP_RSA_BLOCKSIZE 64
+
+struct zynqmp_rsa_dev;
+
+struct zynqmp_rsa_op {
+ struct zynqmp_rsa_dev *dd;
+ void *src;
+ void *dst;
+ int len;
+ u8 key[ZYNQMP_RSA_MAX_KEY_SIZE];
+ u8 *iv;
+ u32 keylen;
+};
+
+struct zynqmp_rsa_dev {
+ struct list_head list;
+ struct device *dev;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+ struct crypto_queue queue;
+};
+
+struct zynqmp_rsa_drv {
+ struct list_head dev_list;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+};
+
+static struct zynqmp_rsa_drv zynqmp_rsa = {
+ .dev_list = LIST_HEAD_INIT(zynqmp_rsa.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(zynqmp_rsa.lock),
+};
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+static struct zynqmp_rsa_dev *zynqmp_rsa_find_dev(struct zynqmp_rsa_op *ctx)
+{
+ struct zynqmp_rsa_dev *rsa_dd = NULL;
+ struct zynqmp_rsa_dev *tmp;
+
+ spin_lock_bh(&zynqmp_rsa.lock);
+ if (!ctx->dd) {
+ list_for_each_entry(tmp, &zynqmp_rsa.dev_list, list) {
+ rsa_dd = tmp;
+ break;
+ }
+ ctx->dd = rsa_dd;
+ } else {
+ rsa_dd = ctx->dd;
+ }
+ spin_unlock_bh(&zynqmp_rsa.lock);
+
+ return rsa_dd;
+}
+
+static int zynqmp_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct zynqmp_rsa_op *op = crypto_tfm_ctx(tfm);
+
+ op->keylen = len;
+ memcpy(op->key, key, len);
+ return 0;
+}
+
+static int zynqmp_rsa_xcrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, unsigned int flags)
+{
+ struct zynqmp_rsa_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct zynqmp_rsa_dev *dd = zynqmp_rsa_find_dev(op);
+ int err, datasize, src_data = 0, dst_data = 0;
+ struct blkcipher_walk walk;
+ char *kbuf;
+ size_t dma_size;
+ dma_addr_t dma_addr;
+
+ if (!eemi_ops->rsa)
+ return -ENOTSUPP;
+
+ dma_size = nbytes + op->keylen;
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((datasize = walk.nbytes)) {
+ op->src = walk.src.virt.addr;
+ memcpy(kbuf + src_data, op->src, datasize);
+ src_data = src_data + datasize;
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ memcpy(kbuf + nbytes, op->key, op->keylen);
+ eemi_ops->rsa(dma_addr, nbytes, flags);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((datasize = walk.nbytes)) {
+ memcpy(walk.dst.virt.addr, kbuf + dst_data, datasize);
+ dst_data = dst_data + datasize;
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+ return err;
+}
+
+static int
+zynqmp_rsa_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_rsa_xcrypt(desc, dst, src, nbytes, 0);
+}
+
+static int
+zynqmp_rsa_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_rsa_xcrypt(desc, dst, src, nbytes, 1);
+}
+
+static struct crypto_alg zynqmp_alg = {
+ .cra_name = "xilinx-zynqmp-rsa",
+ .cra_driver_name = "zynqmp-rsa",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = ZYNQMP_RSA_BLOCKSIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_rsa_op),
+ .cra_alignmask = 15,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = 0,
+ .max_keysize = ZYNQMP_RSA_MAX_KEY_SIZE,
+ .setkey = zynqmp_setkey_blk,
+ .encrypt = zynqmp_rsa_encrypt,
+ .decrypt = zynqmp_rsa_decrypt,
+ .ivsize = 1,
+ }
+ }
+};
+
+static const struct of_device_id zynqmp_rsa_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-rsa" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_rsa_dt_ids);
+
+static int zynqmp_rsa_probe(struct platform_device *pdev)
+{
+ struct zynqmp_rsa_dev *rsa_dd;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ rsa_dd = devm_kzalloc(&pdev->dev, sizeof(*rsa_dd), GFP_KERNEL);
+ if (!rsa_dd)
+ return -ENOMEM;
+
+ rsa_dd->dev = dev;
+ platform_set_drvdata(pdev, rsa_dd);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret < 0)
+ dev_err(dev, "no usable DMA configuration");
+
+ INIT_LIST_HEAD(&rsa_dd->list);
+ spin_lock_init(&rsa_dd->lock);
+ crypto_init_queue(&rsa_dd->queue, ZYNQMP_RSA_QUEUE_LENGTH);
+ spin_lock(&zynqmp_rsa.lock);
+ list_add_tail(&rsa_dd->list, &zynqmp_rsa.dev_list);
+ spin_unlock(&zynqmp_rsa.lock);
+
+ ret = crypto_register_alg(&zynqmp_alg);
+ if (ret)
+ goto err_algs;
+
+ return 0;
+
+err_algs:
+ spin_lock(&zynqmp_rsa.lock);
+ list_del(&rsa_dd->list);
+ spin_unlock(&zynqmp_rsa.lock);
+ dev_err(dev, "initialization failed.\n");
+ return ret;
+}
+
+static int zynqmp_rsa_remove(struct platform_device *pdev)
+{
+ crypto_unregister_alg(&zynqmp_alg);
+ return 0;
+}
+
+static struct platform_driver xilinx_rsa_driver = {
+ .probe = zynqmp_rsa_probe,
+ .remove = zynqmp_rsa_remove,
+ .driver = {
+ .name = "zynqmp_rsa",
+ .of_match_table = of_match_ptr(zynqmp_rsa_dt_ids),
+ },
+};
+
+module_platform_driver(xilinx_rsa_driver);
+
+MODULE_DESCRIPTION("ZynqMP RSA hw acceleration support.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
diff --git a/drivers/crypto/zynqmp-sha.c b/drivers/crypto/zynqmp-sha.c
new file mode 100644
index 000000000000..3a15fedbad98
--- /dev/null
+++ b/drivers/crypto/zynqmp-sha.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_SHA3_INIT 1
+#define ZYNQMP_SHA3_UPDATE 2
+#define ZYNQMP_SHA3_FINAL 4
+
+#define ZYNQMP_SHA_QUEUE_LENGTH 1
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+struct zynqmp_sha_dev;
+
+/*
+ * .statesize = sizeof(struct zynqmp_sha_reqctx) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
+struct zynqmp_sha_reqctx {
+ struct zynqmp_sha_dev *dd;
+ unsigned long flags;
+};
+
+struct zynqmp_sha_ctx {
+ struct zynqmp_sha_dev *dd;
+ unsigned long flags;
+};
+
+struct zynqmp_sha_dev {
+ struct list_head list;
+ struct device *dev;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+ int err;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+};
+
+struct zynqmp_sha_drv {
+ struct list_head dev_list;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+};
+
+static struct zynqmp_sha_drv zynqmp_sha = {
+ .dev_list = LIST_HEAD_INIT(zynqmp_sha.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(zynqmp_sha.lock),
+};
+
+static int zynqmp_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct zynqmp_sha_dev *dd = NULL;
+ struct zynqmp_sha_dev *tmp;
+ int ret;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ spin_lock_bh(&zynqmp_sha.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &zynqmp_sha.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&zynqmp_sha.lock);
+
+ ctx->dd = dd;
+ dev_dbg(dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ ret = eemi_ops->sha_hash(0, 0, ZYNQMP_SHA3_INIT);
+
+ return ret;
+}
+
+static int zynqmp_sha_update(struct ahash_request *req)
+{
+ struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct zynqmp_sha_dev *dd = tctx->dd;
+ char *kbuf;
+ size_t dma_size = req->nbytes;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!req->nbytes)
+ return 0;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(kbuf, req->src, 0, req->nbytes, 0);
+ __flush_cache_user_range((unsigned long)kbuf,
+ (unsigned long)kbuf + dma_size);
+ ret = eemi_ops->sha_hash(dma_addr, req->nbytes, ZYNQMP_SHA3_UPDATE);
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_sha_final(struct ahash_request *req)
+{
+ struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct zynqmp_sha_dev *dd = tctx->dd;
+ char *kbuf;
+ size_t dma_size = SHA384_DIGEST_SIZE;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = eemi_ops->sha_hash(dma_addr, dma_size, ZYNQMP_SHA3_FINAL);
+ memcpy(req->result, kbuf, 48);
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_sha_finup(struct ahash_request *req)
+{
+ zynqmp_sha_update(req);
+ zynqmp_sha_final(req);
+
+ return 0;
+}
+
+static int zynqmp_sha_digest(struct ahash_request *req)
+{
+ zynqmp_sha_init(req);
+ zynqmp_sha_update(req);
+ zynqmp_sha_final(req);
+
+ return 0;
+}
+
+static int zynqmp_sha_export(struct ahash_request *req, void *out)
+{
+ const struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int zynqmp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
+static int zynqmp_sha_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct zynqmp_sha_reqctx));
+
+ return 0;
+}
+
+static struct ahash_alg sha3_alg = {
+ .init = zynqmp_sha_init,
+ .update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
+ .finup = zynqmp_sha_finup,
+ .digest = zynqmp_sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "xilinx-keccak-384",
+ .cra_driver_name = "zynqmp-keccak-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = zynqmp_sha_cra_init,
+ }
+ }
+};
+
+static const struct of_device_id zynqmp_sha_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-keccak-384" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_sha_dt_ids);
+
+static int zynqmp_sha_probe(struct platform_device *pdev)
+{
+ struct zynqmp_sha_dev *sha_dd;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
+ if (!sha_dd)
+ return -ENOMEM;
+
+ sha_dd->dev = dev;
+ platform_set_drvdata(pdev, sha_dd);
+ INIT_LIST_HEAD(&sha_dd->list);
+ spin_lock_init(&sha_dd->lock);
+ crypto_init_queue(&sha_dd->queue, ZYNQMP_SHA_QUEUE_LENGTH);
+ spin_lock(&zynqmp_sha.lock);
+ list_add_tail(&sha_dd->list, &zynqmp_sha.dev_list);
+ spin_unlock(&zynqmp_sha.lock);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (err < 0)
+ dev_err(dev, "no usable DMA configuration");
+
+ err = crypto_register_ahash(&sha3_alg);
+ if (err)
+ goto err_algs;
+
+ return 0;
+
+err_algs:
+ spin_lock(&zynqmp_sha.lock);
+ list_del(&sha_dd->list);
+ spin_unlock(&zynqmp_sha.lock);
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int zynqmp_sha_remove(struct platform_device *pdev)
+{
+ static struct zynqmp_sha_dev *sha_dd;
+
+ sha_dd = platform_get_drvdata(pdev);
+
+ if (!sha_dd)
+ return -ENODEV;
+
+ spin_lock(&zynqmp_sha.lock);
+ list_del(&sha_dd->list);
+ spin_unlock(&zynqmp_sha.lock);
+
+ crypto_unregister_ahash(&sha3_alg);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_sha_driver = {
+ .probe = zynqmp_sha_probe,
+ .remove = zynqmp_sha_remove,
+ .driver = {
+ .name = "zynqmp-keccak-384",
+ .of_match_table = of_match_ptr(zynqmp_sha_dt_ids),
+ },
+};
+
+module_platform_driver(zynqmp_sha_driver);
+
+MODULE_DESCRIPTION("ZynqMP SHA3 hw acceleration support.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index c79652ee94be..31e6cb5211bc 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -595,6 +595,7 @@ static void devfreq_dev_release(struct device *dev)
devfreq->profile->exit(devfreq->dev.parent);
mutex_destroy(&devfreq->lock);
+ srcu_cleanup_notifier_head(&devfreq->transition_notifier_list);
kfree(devfreq);
}
@@ -603,8 +604,7 @@ static void devfreq_dev_release(struct device *dev)
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
* @governor_name: name of the policy to choose frequency.
- * @data: private data for the governor. The devfreq framework does not
- * touch this value.
+ * @data: devfreq driver pass to governors, governor should not change it.
*/
struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
@@ -788,8 +788,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res)
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
* @governor_name: name of the policy to choose frequency.
- * @data: private data for the governor. The devfreq framework does not
- * touch this value.
+ * @data: devfreq driver pass to governors, governor should not change it.
*
* This function manages automatically the memory of devfreq device using device
* resource management and simplify the free operation for memory of devfreq
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index af94942fcf95..a3ae4dc4668b 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -21,7 +21,7 @@ struct userspace_data {
static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
{
- struct userspace_data *data = df->data;
+ struct userspace_data *data = df->governor_data;
if (data->valid)
*freq = data->user_frequency;
@@ -40,7 +40,7 @@ static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
int err = 0;
mutex_lock(&devfreq->lock);
- data = devfreq->data;
+ data = devfreq->governor_data;
sscanf(buf, "%lu", &wanted);
data->user_frequency = wanted;
@@ -60,7 +60,7 @@ static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
int err = 0;
mutex_lock(&devfreq->lock);
- data = devfreq->data;
+ data = devfreq->governor_data;
if (data->valid)
err = sprintf(buf, "%lu\n", data->user_frequency);
@@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq)
goto out;
}
data->valid = false;
- devfreq->data = data;
+ devfreq->governor_data = data;
err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
out:
@@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq)
if (devfreq->dev.kobj.sd)
sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
- kfree(devfreq->data);
- devfreq->data = NULL;
+ kfree(devfreq->governor_data);
+ devfreq->governor_data = NULL;
}
static int devfreq_userspace_handler(struct devfreq *devfreq,
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index c9aa15fb86a9..d07bceb3e34b 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -110,6 +110,12 @@ static char dio_no_name[] = { 0 };
#endif /* CONFIG_DIO_CONSTANTS */
+static void dio_dev_release(struct device *dev)
+{
+ struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev);
+ kfree(ddev);
+}
+
int __init dio_find(int deviceid)
{
/* Called to find a DIO device before the full bus scan has run.
@@ -222,6 +228,7 @@ static int __init dio_init(void)
dev->bus = &dio_bus;
dev->dev.parent = &dio_bus.dev;
dev->dev.bus = &dio_bus_type;
+ dev->dev.release = dio_dev_release;
dev->scode = scode;
dev->resource.start = pa;
dev->resource.end = pa + DIO_SIZE(scode, va);
@@ -249,6 +256,7 @@ static int __init dio_init(void)
if (error) {
pr_err("DIO: Error registering device %s\n",
dev->name);
+ put_device(&dev->dev);
continue;
}
error = dio_create_sysfs_dev_files(dev);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 6713cfb1995c..7e7356970d5f 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
*/
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
+ LIST_HEAD(signalled);
struct sync_pt *pt, *next;
trace_sync_timeline(obj);
@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
if (!timeline_fence_signaled(&pt->base))
break;
- list_del_init(&pt->link);
+ dma_fence_get(&pt->base);
+
+ list_move_tail(&pt->link, &signalled);
rb_erase(&pt->node, &obj->pt_tree);
- /*
- * A signal callback may release the last reference to this
- * fence, causing it to be freed. That operation has to be
- * last to avoid a use after free inside this loop, and must
- * be after we remove the fence from the timeline in order to
- * prevent deadlocking on timeline->lock inside
- * timeline_fence_release().
- */
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
+
+ list_for_each_entry_safe(pt, next, &signalled, link) {
+ list_del_init(&pt->link);
+ dma_fence_put(&pt->base);
+ }
}
/**
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index c6e9b7bd7618..80ccdf96093f 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -287,7 +287,23 @@ static struct miscdevice udmabuf_misc = {
static int __init udmabuf_dev_init(void)
{
- return misc_register(&udmabuf_misc);
+ int ret;
+
+ ret = misc_register(&udmabuf_misc);
+ if (ret < 0) {
+ pr_err("Could not initialize udmabuf device\n");
+ return ret;
+ }
+
+ ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
+ DMA_BIT_MASK(64));
+ if (ret < 0) {
+ pr_err("Could not setup DMA mask for udmabuf device\n");
+ misc_deregister(&udmabuf_misc);
+ return ret;
+ }
+
+ return 0;
}
static void __exit udmabuf_dev_exit(void)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1322461f1f3c..48faf66bf9fa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -208,6 +208,7 @@ config FSL_DMA
config FSL_EDMA
tristate "Freescale eDMA engine support"
depends on OF
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -268,6 +269,7 @@ config IMX_SDMA
config INTEL_IDMA64
tristate "Intel integrated DMA 64-bit support"
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -580,16 +582,16 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
tristate "NVIDIA Tegra210 ADMA support"
- depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
+ depends on (ARCH_TEGRA || COMPILE_TEST)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support for the NVIDIA Tegra210 ADMA controller driver. The
- DMA controller has multiple DMA channels and is used to service
- various audio clients in the Tegra210 audio processing engine
- (APE). This DMA controller transfers data from memory to
- peripheral and vice versa. It does not support memory to
- memory data transfer.
+ Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA
+ controller driver. The DMA controller has multiple DMA channels
+ and is used to service various audio clients in the Tegra210
+ audio processing engine (APE). This DMA controller transfers
+ data from memory to peripheral and vice versa. It does not
+ support memory to memory data transfer.
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
@@ -618,6 +620,8 @@ config XGENE_DMA
help
Enable support for the APM X-Gene SoC DMA engine.
+source "drivers/dma/xilinx/Kconfig"
+
config XILINX_DMA
tristate "Xilinx AXI DMAS Engine"
depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
@@ -644,6 +648,18 @@ config XILINX_ZYNQMP_DMA
help
Enable support for Xilinx ZynqMP DMA controller.
+config XILINX_PS_PCIE_DMA
+ tristate "Xilinx PS PCIe DMA support"
+ depends on (PCI && X86_64 || ARM64)
+ select DMA_ENGINE
+ help
+ Enable support for the Xilinx PS PCIe DMA engine present
+ in recent Xilinx ZynqMP chipsets.
+
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+
config ZX_DMA
tristate "ZTE ZX DMA support"
depends on ARCH_ZX || COMPILE_TEST
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 303bc3e601a1..a43097577573 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -246,6 +246,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
ATC_SPIP_BOUNDARY(first->boundary));
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
ATC_DPIP_BOUNDARY(first->boundary));
+ /* Don't allow CPU to reorder channel enable. */
+ wmb();
dma_writel(atdma, CHER, atchan->mask);
vdbg_dump_regs(atchan);
@@ -306,7 +308,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
- u32 ctrla, dscr, trials;
+ u32 ctrla, dscr;
+ unsigned int i;
/*
* If the cookie doesn't match to the currently running transfer then
@@ -376,7 +379,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
dscr = channel_readl(atchan, DSCR);
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
- for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
+ for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
u32 new_dscr;
rmb(); /* ensure DSCR is read after CTRLA */
@@ -402,7 +405,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
}
- if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
+ if (unlikely(i == ATC_MAX_DSCR_TRIALS))
return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
@@ -550,10 +553,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
bad_desc = atc_first_active(atchan);
list_del_init(&bad_desc->desc_node);
- /* As we are stopped, take advantage to push queued descriptors
- * in active_list */
- list_splice_init(&atchan->queue, atchan->active_list.prev);
-
/* Try to restart the controller */
if (!list_empty(&atchan->active_list))
atc_dostart(atchan, atc_first_active(atchan));
@@ -674,19 +673,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&atchan->lock, flags);
cookie = dma_cookie_assign(tx);
- if (list_empty(&atchan->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
- desc->txd.cookie);
- atc_dostart(atchan, desc);
- list_add_tail(&desc->desc_node, &atchan->active_list);
- } else {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &atchan->queue);
- }
-
+ list_add_tail(&desc->desc_node, &atchan->queue);
spin_unlock_irqrestore(&atchan->lock, flags);
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
+ desc->txd.cookie);
return cookie;
}
@@ -1957,7 +1948,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
plat_dat->nr_channels);
- dma_async_device_register(&atdma->dma_common);
+ err = dma_async_device_register(&atdma->dma_common);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register: %d.\n", err);
+ goto err_dma_async_device_register;
+ }
/*
* Do not return an error if the dmac node is not present in order to
@@ -1977,6 +1972,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
+err_dma_async_device_register:
dma_pool_destroy(atdma->memset_pool);
err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index fe8a5853ec49..65fa3e395bf0 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -164,13 +164,13 @@
/* LLI == Linked List Item; aka DMA buffer descriptor */
struct at_lli {
/* values that are not changed by hardware */
- dma_addr_t saddr;
- dma_addr_t daddr;
+ u32 saddr;
+ u32 daddr;
/* value that may get written back: */
- u32 ctrla;
+ u32 ctrla;
/* more values that are not changed by hardware */
- u32 ctrlb;
- dma_addr_t dscr; /* chain to next lli */
+ u32 ctrlb;
+ u32 dscr; /* chain to next lli */
};
/**
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a406b3c0d170..fdf3b5be2d50 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -212,6 +212,7 @@ struct at_xdmac {
int irq;
struct clk *clk;
u32 save_gim;
+ u32 save_gs;
struct dma_pool *at_xdmac_desc_pool;
struct at_xdmac_chan chan[0];
};
@@ -1922,6 +1923,7 @@ static int atmel_xdmac_suspend(struct device *dev)
}
}
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+ atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
at_xdmac_off(atxdmac);
clk_disable_unprepare(atxdmac->clk);
@@ -1958,7 +1960,8 @@ static int atmel_xdmac_resume(struct device *dev)
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
wmb();
- at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ if (atxdmac->save_gs & atchan->mask)
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
}
}
return 0;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4b604086b1b3..d304731d05fd 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -212,7 +212,8 @@ static int dma_chan_get(struct dma_chan *chan)
/* The channel is already in use, update client count */
if (chan->client_count) {
__module_get(owner);
- goto out;
+ chan->client_count++;
+ return 0;
}
if (!try_module_get(owner))
@@ -225,11 +226,11 @@ static int dma_chan_get(struct dma_chan *chan)
goto err_out;
}
+ chan->client_count++;
+
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
-out:
- chan->client_count++;
return 0;
err_out:
@@ -970,6 +971,13 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ if (dma_has_cap(DMA_SG, device->cap_mask) && !device->device_prep_dma_sg) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_SG");
+ return -EIO;
+ }
+
if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 238936e2dfe2..99a1dba4544f 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -44,10 +44,15 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
+static unsigned int sg_buffers = 1;
+module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sg_buffers,
+ "Number of scatter gather buffers (default: 1)");
+
static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
- "dmatest 0-memcpy 1-memset (default: 0)");
+ "dmatest 0-memcpy 1-slave_sg 2-memset (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -601,6 +606,9 @@ static int dmatest_func(void *data)
params->alignment;
src->cnt = dst->cnt = 1;
is_memset = true;
+ } else if (thread->type == DMA_SG) {
+ align = dev->copy_align;
+ src->cnt = dst->cnt = sg_buffers;
} else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -668,9 +676,13 @@ static int dmatest_func(void *data)
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
unsigned int len;
+ struct scatterlist tx_sg[src->cnt];
+ struct scatterlist rx_sg[src->cnt];
total_tests++;
+ align = 3;
+
if (params->transfer_size) {
if (params->transfer_size >= buf_size) {
pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
@@ -758,6 +770,15 @@ static int dmatest_func(void *data)
um->bidi_cnt++;
}
+ sg_init_table(tx_sg, src->cnt);
+ sg_init_table(rx_sg, src->cnt);
+ for (i = 0; i < src->cnt; i++) {
+ sg_dma_address(&rx_sg[i]) = srcs[i];
+ sg_dma_address(&tx_sg[i]) = dsts[i] + dst->off;
+ sg_dma_len(&tx_sg[i]) = len;
+ sg_dma_len(&rx_sg[i]) = len;
+ }
+
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dsts[0] + dst->off,
@@ -767,6 +788,9 @@ static int dmatest_func(void *data)
dsts[0] + dst->off,
*(src->aligned[0] + src->off),
len, flags);
+ else if (thread->type == DMA_SG)
+ tx = dev->device_prep_dma_sg(chan, tx_sg, src->cnt,
+ rx_sg, src->cnt, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dsts[0] + dst->off,
@@ -944,6 +968,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
op = "copy";
else if (type == DMA_MEMSET)
op = "set";
+ else if (type == DMA_SG)
+ op = "sg";
else if (type == DMA_XOR)
op = "xor";
else if (type == DMA_PQ)
@@ -1007,12 +1033,19 @@ static int dmatest_add_channel(struct dmatest_info *info,
}
if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
- if (dmatest == 1) {
+ if (dmatest == 2) {
cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
thread_count += cnt > 0 ? cnt : 0;
}
}
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
+ if (dmatest == 1) {
+ cnt = dmatest_add_threads(info, dtc, DMA_SG);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+ }
+
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(info, dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
@@ -1090,6 +1123,7 @@ static void add_threaded_test(struct dmatest_info *info)
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_MEMSET);
request_channels(info, DMA_XOR);
+ request_channels(info, DMA_SG);
request_channels(info, DMA_PQ);
}
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a1ce307c502f..0b970b91750c 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -551,6 +551,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
/* Remove the completed descriptor from issued list */
list_del(&vd->node);
@@ -565,6 +570,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* Try to restart the controller */
axi_chan_start_first_queued(chan);
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index afbd1a459019..25d65f64cd50 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -165,7 +165,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
}
-static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+static int dw_edma_start_transfer(struct dw_edma_chan *chan)
{
struct dw_edma_chunk *child;
struct dw_edma_desc *desc;
@@ -173,16 +173,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
vd = vchan_next_desc(&chan->vc);
if (!vd)
- return;
+ return 0;
desc = vd2dw_edma_desc(vd);
if (!desc)
- return;
+ return 0;
child = list_first_entry_or_null(&desc->chunk->list,
struct dw_edma_chunk, list);
if (!child)
- return;
+ return 0;
dw_edma_v0_core_start(child, !desc->xfer_sz);
desc->xfer_sz += child->ll_region.sz;
@@ -190,6 +190,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
list_del(&child->list);
kfree(child);
desc->chunks_alloc--;
+
+ return 1;
}
static int dw_edma_device_config(struct dma_chan *dchan,
@@ -273,9 +275,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
unsigned long flags;
+ if (!chan->configured)
+ return;
+
spin_lock_irqsave(&chan->vc.lock, flags);
- if (chan->configured && chan->request == EDMA_REQ_NONE &&
- chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
+ if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
+ chan->status == EDMA_ST_IDLE) {
chan->status = EDMA_ST_BUSY;
dw_edma_start_transfer(chan);
}
@@ -483,14 +488,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
switch (chan->request) {
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
- if (desc->chunks_alloc) {
- chan->status = EDMA_ST_BUSY;
- dw_edma_start_transfer(chan);
- } else {
+ if (!desc->chunks_alloc) {
list_del(&vd->node);
vchan_cookie_complete(vd);
- chan->status = EDMA_ST_IDLE;
}
+
+ /* Continue transferring if there are remaining chunks or issued requests.
+ */
+ chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
break;
case EDMA_REQ_STOP:
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index f5a1ae164193..bd9b68e21ba7 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
+#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor offset */
#define QDMA_CCDF_STATUS 20
@@ -372,7 +373,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
+ FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF;
sdf->data = QDMA_SDDF_CMD(cmd);
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
@@ -502,11 +504,11 @@ static struct fsl_qdma_queue
queue_temp = queue_head + i + (j * queue_num);
queue_temp->cq =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- queue_size[i],
- &queue_temp->bus_addr,
- GFP_KERNEL);
+ dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ queue_size[i],
+ &queue_temp->bus_addr,
+ GFP_KERNEL);
if (!queue_temp->cq)
return NULL;
queue_temp->block_base = fsl_qdma->block_base +
@@ -551,11 +553,11 @@ static struct fsl_qdma_queue
/*
* Buffer for queue command
*/
- status_head->cq = dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- status_size,
- &status_head->bus_addr,
- GFP_KERNEL);
+ status_head->cq = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ status_size,
+ &status_head->bus_addr,
+ GFP_KERNEL);
if (!status_head->cq) {
devm_kfree(&pdev->dev, status_head);
return NULL;
@@ -754,7 +756,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
int i;
int cpu;
int ret;
- char irq_name[20];
+ char irq_name[32];
fsl_qdma->error_irq =
platform_get_irq_byname(pdev, "qdma-error");
@@ -1150,10 +1152,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
- ret = fsl_qdma_irq_init(pdev, fsl_qdma);
- if (ret)
- return ret;
-
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@@ -1192,16 +1190,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
- ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
- dev_err(&pdev->dev,
- "Can't register NXP Layerscape qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
- ret = fsl_qdma_reg_init(fsl_qdma);
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
@@ -1221,8 +1222,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
static int fsl_qdma_remove(struct platform_device *pdev)
{
- int i;
- struct fsl_qdma_queue *status;
struct device_node *np = pdev->dev.of_node;
struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
@@ -1231,11 +1230,6 @@ static int fsl_qdma_remove(struct platform_device *pdev)
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_qdma->dma_dev);
- for (i = 0; i < fsl_qdma->block_number; i++) {
- status = fsl_qdma->status[i];
- dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
- status->n_cq, status->cq, status->bus_addr);
- }
return 0;
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 8ec7a7041e84..8dbff2f6c3b8 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1360,10 +1360,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
sdma_config_ownership(sdmac, false, true, false);
if (sdma_load_context(sdmac))
- goto err_desc_out;
+ goto err_bd_out;
return desc;
+err_bd_out:
+ sdma_free_bd(desc);
err_desc_out:
kfree(desc);
err_out:
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 8e2a4d1f0be5..997839c2130e 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -653,7 +653,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
if (active - i == 0) {
dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
__func__);
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
/* microsecond delay by sysfs variable per pending descriptor */
@@ -679,7 +679,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (chanerr &
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
}
@@ -876,7 +876,7 @@ static void check_active(struct ioatdma_chan *ioat_chan)
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
void ioat_timer_event(struct timer_list *t)
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index e12b754e6398..60d3c5f09ad6 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -191,7 +191,13 @@ static int mcf_edma_probe(struct platform_device *pdev)
return -EINVAL;
}
- chans = pdata->dma_channels;
+ if (!pdata->dma_channels) {
+ dev_info(&pdev->dev, "setting default channel number to 64");
+ chans = 64;
+ } else {
+ chans = pdata->dma_channels;
+ }
+
len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
if (!mcf_edma)
@@ -203,11 +209,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
mcf_edma->drvdata = &mcf_data;
mcf_edma->big_endian = 1;
- if (!mcf_edma->n_chans) {
- dev_info(&pdev->dev, "setting default channel number to 64");
- mcf_edma->n_chans = 64;
- }
-
mutex_init(&mcf_edma->fsl_edma_mutex);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 7718d09e3d29..5d1ba3ba3755 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -450,9 +450,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
- synchronize_irq(c->irq);
-
spin_unlock_irqrestore(&c->vc.lock, flags);
+ synchronize_irq(c->irq);
return 0;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 889a94af4c85..d5109d96ebf1 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -751,7 +751,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
- ret = EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
goto disable_reg_clk;
}
if (!IS_ERR(xor_dev->clk)) {
@@ -895,6 +895,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
tasklet_kill(&xor_dev->irq_tasklet);
clk_disable_unprepare(xor_dev->clk);
+ clk_disable_unprepare(xor_dev->reg_clk);
return 0;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 77a6495bb6b1..1d7d4b8d810a 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -403,6 +403,12 @@ enum desc_status {
*/
BUSY,
/*
+ * Pause was called while descriptor was BUSY. Due to hardware
+ * limitations, only termination is possible for descriptors
+ * that have been paused.
+ */
+ PAUSED,
+ /*
* Sitting on the channel work_list but xfer done
* by PL330 core
*/
@@ -1048,7 +1054,7 @@ static bool _trigger(struct pl330_thread *thrd)
return true;
}
-static bool _start(struct pl330_thread *thrd)
+static bool pl330_start_thread(struct pl330_thread *thrd)
{
switch (_state(thrd)) {
case PL330_STATE_FAULT_COMPLETING:
@@ -1696,7 +1702,7 @@ static int pl330_update(struct pl330_dmac *pl330)
thrd->req_running = -1;
/* Get going again ASAP */
- _start(thrd);
+ pl330_start_thread(thrd);
/* For now, just make a list of callbacks to be done */
list_add_tail(&descdone->rqd, &pl330->req_done);
@@ -2035,7 +2041,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
list_for_each_entry(desc, &pch->work_list, node) {
/* If already submitted */
- if (desc->status == BUSY)
+ if (desc->status == BUSY || desc->status == PAUSED)
continue;
ret = pl330_submit_req(pch->thread, desc);
@@ -2083,7 +2089,7 @@ static void pl330_tasklet(unsigned long data)
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
+ pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
}
@@ -2101,7 +2107,7 @@ static void pl330_tasklet(unsigned long data)
if (power_down) {
pch->active = true;
spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
+ pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = false;
}
@@ -2322,6 +2328,7 @@ static int pl330_pause(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct pl330_dmac *pl330 = pch->dmac;
+ struct dma_pl330_desc *desc;
unsigned long flags;
pm_runtime_get_sync(pl330->ddma.dev);
@@ -2331,6 +2338,10 @@ static int pl330_pause(struct dma_chan *chan)
_stop(pch->thread);
spin_unlock(&pl330->lock);
+ list_for_each_entry(desc, &pch->work_list, node) {
+ if (desc->status == BUSY)
+ desc->status = PAUSED;
+ }
spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
@@ -2421,7 +2432,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
else if (running && desc == running)
transferred =
pl330_get_current_xferred_count(pch, desc);
- else if (desc->status == BUSY)
+ else if (desc->status == BUSY || desc->status == PAUSED)
/*
* Busy but not running means either just enqueued,
* or finished and not yet marked done
@@ -2438,6 +2449,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
case DONE:
ret = DMA_COMPLETE;
break;
+ case PAUSED:
+ ret = DMA_PAUSED;
+ break;
case PREP:
case BUSY:
ret = DMA_IN_PROGRESS;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index b4ef4f19f7de..9ce75ff9fa1c 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -723,7 +723,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
dma_addr_t dma;
struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
- BUG_ON(sw_desc->nb_desc == 0);
for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
if (i > 0)
dma = sw_desc->hw_desc[i - 1]->ddadr;
@@ -1249,14 +1248,14 @@ static int pxad_init_phys(struct platform_device *op,
return -ENOMEM;
for (i = 0; i < nb_phy_chans; i++)
- if (platform_get_irq(op, i) > 0)
+ if (platform_get_irq_optional(op, i) > 0)
nr_irq++;
for (i = 0; i < nb_phy_chans; i++) {
phy = &pdev->phys[i];
phy->base = pdev->base;
phy->idx = i;
- irq = platform_get_irq(op, i);
+ irq = platform_get_irq_optional(op, i);
if ((nr_irq > 1) && (irq > 0))
ret = devm_request_irq(&op->dev, irq,
pxad_chan_handler,
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index eba942441e38..10a8a6d4e860 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1824,7 +1824,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
dmac->dev->dma_parms = &dmac->parms;
- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ if (ret)
+ return ret;
+
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9c121a4b33ad..f97d80343aea 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -25,7 +25,7 @@ struct sh_dmae_chan {
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
void __iomem *base;
- char dev_id[16]; /* unique name per DMAC of channel */
+ char dev_id[32]; /* unique name per DMAC of channel */
int pm_error;
dma_addr_t slave_addr;
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index b966115bfad1..4f0c50106321 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1201,11 +1201,8 @@ static int sprd_dma_remove(struct platform_device *pdev)
{
struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
struct sprd_dma_chn *c, *cn;
- int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- return ret;
+ pm_runtime_get_sync(&pdev->dev);
/* explicitly free the irq */
if (sdev->irq > 0)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 6671bfe08489..fa0ad8581edc 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3599,6 +3599,10 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->lcla_pool.lock);
base->irq = platform_get_irq(pdev, 0);
+ if (base->irq < 0) {
+ ret = base->irq;
+ goto destroy_cache;
+ }
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
@@ -3696,6 +3700,7 @@ static int __init d40_probe(struct platform_device *pdev)
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
+ pm_runtime_disable(base->dev);
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index c902c2480640..f90feb081861 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -510,7 +510,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
src_maxburst = chan->dma_config.src_maxburst;
dst_maxburst = chan->dma_config.dst_maxburst;
- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
@@ -938,7 +938,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
if (!desc)
return NULL;
- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
@@ -1207,6 +1207,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
unsigned long flags;
u32 status, reg;
+ /* Transfer can be terminated */
+ if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
+ return -EPERM;
+
hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
spin_lock_irqsave(&chan->vchan.lock, flags);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 9068591bd684..d82c25138749 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -224,7 +224,7 @@ static int tegra_adma_init(struct tegra_adma *tdma)
int ret;
/* Clear any interrupts */
- tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
+ tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 80b780e49971..b570f08888ee 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2361,7 +2361,7 @@ static int edma_probe(struct platform_device *pdev)
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 0);
- if (irq >= 0) {
+ if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
@@ -2377,7 +2377,7 @@ static int edma_probe(struct platform_device *pdev)
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 2);
- if (irq >= 0) {
+ if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
diff --git a/drivers/dma/xilinx/Kconfig b/drivers/dma/xilinx/Kconfig
new file mode 100644
index 000000000000..58fa0aa7f267
--- /dev/null
+++ b/drivers/dma/xilinx/Kconfig
@@ -0,0 +1,60 @@
+#
+# XILINX DMA Engines configuration
+#
+
+menuconfig XILINX_DMA_ENGINES
+ bool "Xilinx DMA Engines"
+ help
+ Enable support for the Xilinx DMA controllers. It supports three DMA
+ engines: Axi Central DMA (memory to memory transfer), Axi DMA (memory and
+ device transfer), and Axi VDMA (memory and video device transfer).
+
+if XILINX_DMA_ENGINES
+
+config XILINX_DMATEST
+ tristate "DMA Test client for AXI DMA"
+ depends on XILINX_DMA
+ help
+ Simple DMA test client. Say N unless you're debugging a
+ DMA Device driver.
+
+config XILINX_VDMATEST
+ tristate "DMA Test client for VDMA"
+ depends on XILINX_DMA
+ help
+ Simple DMA test client. Say N unless you're debugging a
+ DMA Device driver.
+
+config XILINX_DPDMA
+ tristate "Xilinx DPDMA Engine"
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx DisplayPort DMA.
+
+config XILINX_DPDMA_DEBUG_FS
+ bool "Xilinx DPDMA debugfs"
+ depends on DEBUG_FS && XILINX_DPDMA
+ help
+ Enable the debugfs code for DPDMA driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config XILINX_PS_PCIE_DMA_TEST
+ tristate "Xilinx PS PCIe DMA test client"
+ depends on XILINX_PS_PCIE_DMA
+ help
+ Enable support for the test client of Xilinx PS PCIe DMA engine
+ in recent Xilinx ZynqMP chipsets.
+
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+endif # XILINX_DMA_ENGINES
+
+config XILINX_FRMBUF
+ tristate "Xilinx Framebuffer"
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx Framebuffer DMA.
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index e921de575b55..ca7f5e2602b9 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1,3 +1,10 @@
+obj-$(CONFIG_XILINX_DMATEST) += axidmatest.o
+obj-$(CONFIG_XILINX_VDMATEST) += vdmatest.o
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_XILINX_DPDMA) += xilinx_dpdma.o
obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
+xilinx_ps_pcie_dma-objs := xilinx_ps_pcie_main.o xilinx_ps_pcie_platform.o
+obj-$(CONFIG_XILINX_PS_PCIE_DMA) += xilinx_ps_pcie_dma.o
+obj-$(CONFIG_XILINX_PS_PCIE_DMA_TEST) += xilinx_ps_pcie_dma_client.o
+obj-$(CONFIG_XILINX_FRMBUF) += xilinx_frmbuf.o
diff --git a/drivers/dma/xilinx/axidmatest.c b/drivers/dma/xilinx/axidmatest.c
new file mode 100644
index 000000000000..8a1ee436042c
--- /dev/null
+++ b/drivers/dma/xilinx/axidmatest.c
@@ -0,0 +1,701 @@
+/*
+ * XILINX AXI DMA Engine test module
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * Based on Atmel DMA Test Client
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched/task.h>
+#include <linux/dma/xilinx_dma.h>
+
+static unsigned int test_buf_size = 16384;
+module_param(test_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static unsigned int iterations = 5;
+module_param(iterations, uint, S_IRUGO);
+MODULE_PARM_DESC(iterations,
+ "Iterations before stopping test (default: infinite)");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
+
+struct dmatest_slave_thread {
+ struct list_head node;
+ struct task_struct *task;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
+
+struct dmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
+};
+
+/*
+ * These are protected by dma_list_mutex since they're only used by
+ * the DMA filter function callback
+ */
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static LIST_HEAD(dmatest_channels);
+static unsigned int nr_channels;
+
+static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
+{
+ unsigned long long per_sec = 1000000;
+
+ if (runtime <= 0)
+ return 0;
+
+ /* drop precision until runtime is 32-bits */
+ while (runtime > UINT_MAX) {
+ runtime >>= 1;
+ per_sec <<= 1;
+ }
+
+ per_sec *= val;
+ do_div(per_sec, runtime);
+ return per_sec;
+}
+
+static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
+{
+ return dmatest_persec(runtime, len >> 10);
+}
+
+static bool is_threaded_test_run(struct dmatest_chan *tx_dtc,
+ struct dmatest_chan *rx_dtc)
+{
+ struct dmatest_slave_thread *thread;
+ int ret = false;
+
+ list_for_each_entry(thread, &tx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+
+ list_for_each_entry(thread, &rx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+ return ret;
+}
+
+static unsigned long dmatest_random(void)
+{
+ unsigned long buf;
+
+ get_random_bytes(&buf, sizeof(buf));
+ return buf;
+}
+
+static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf++;
+ }
+}
+
+static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ }
+}
+
+static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn(
+ "%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn(
+ "%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn(
+ "%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn(
+ "%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ unsigned int i;
+ unsigned int error_count = 0;
+ u8 actual;
+ u8 expected;
+ u8 *buf;
+ unsigned int counter_orig = counter;
+
+ for (; (buf = *bufs); bufs++) {
+ counter = counter_orig;
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ if (actual != expected) {
+ if (error_count < 32)
+ dmatest_mismatch(actual, pattern, i,
+ counter, is_srcbuf);
+ error_count++;
+ }
+ counter++;
+ }
+ }
+
+ if (error_count > 32)
+ pr_warn("%s: %u errors suppressed\n",
+ current->comm, error_count - 32);
+
+ return error_count;
+}
+
+static void dmatest_slave_tx_callback(void *completion)
+{
+ complete(completion);
+}
+
+static void dmatest_slave_rx_callback(void *completion)
+{
+ complete(completion);
+}
+
+/* Function for slave transfers
+ * Each thread requires 2 channels, one for transmit, and one for receive
+ */
+static int dmatest_slave_func(void *data)
+{
+ struct dmatest_slave_thread *thread = data;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ const char *thread_name;
+ unsigned int src_off, dst->off, len;
+ unsigned int error_count;
+ unsigned int failed_tests = 0;
+ unsigned int total_tests = 0;
+ dma_cookie_t tx_cookie;
+ dma_cookie_t rx_cookie;
+ enum dma_status status;
+ enum dma_ctrl_flags flags;
+ int ret;
+ int src->cnt;
+ int dst->cnt;
+ int bd_cnt = 11;
+ int i;
+ ktime_t ktime, start, diff;
+ ktime_t filltime = 0;
+ ktime_t comparetime = 0;
+ s64 runtime = 0;
+ unsigned long long total_len = 0;
+ thread_name = current->comm;
+
+ ret = -ENOMEM;
+
+ smp_rmb();
+ tx_chan = thread->tx_chan;
+ rx_chan = thread->rx_chan;
+ src->cnt = dst->cnt = bd_cnt;
+
+ thread->srcs = kcalloc(src->cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->srcs)
+ goto err_srcs;
+ for (i = 0; i < src->cnt; i++) {
+ thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcs[i])
+ goto err_srcbuf;
+ }
+ thread->srcs[i] = NULL;
+
+ thread->dsts = kcalloc(dst->cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->dsts)
+ goto err_dsts;
+ for (i = 0; i < dst->cnt; i++) {
+ thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dsts[i])
+ goto err_dstbuf;
+ }
+ thread->dsts[i] = NULL;
+
+ set_user_nice(current, 10);
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ ktime = ktime_get();
+ while (!kthread_should_stop()
+ && !(iterations && total_tests >= iterations)) {
+ struct dma_device *tx_dev = tx_chan->device;
+ struct dma_device *rx_dev = rx_chan->device;
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct dma_async_tx_descriptor *rxd = NULL;
+ dma_addr_t dma_srcs[src->cnt];
+ dma_addr_t dma_dsts[dst->cnt];
+ struct completion rx_cmp;
+ struct completion tx_cmp;
+ unsigned long rx_tmo =
+ msecs_to_jiffies(300000); /* RX takes longer */
+ unsigned long tx_tmo = msecs_to_jiffies(30000);
+ u8 align = 0;
+ struct scatterlist tx_sg[bd_cnt];
+ struct scatterlist rx_sg[bd_cnt];
+
+ total_tests++;
+
+ /* honor larger alignment restrictions */
+ align = tx_dev->copy_align;
+ if (rx_dev->copy_align > align)
+ align = rx_dev->copy_align;
+
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = dmatest_random() % test_buf_size + 1;
+ len = (len >> align) << align;
+ if (!len)
+ len = 1 << align;
+ total_len += len;
+ src_off = dmatest_random() % (test_buf_size - len + 1);
+ dst->off = dmatest_random() % (test_buf_size - len + 1);
+
+ src_off = (src_off >> align) << align;
+ dst->off = (dst->off >> align) << align;
+
+ start = ktime_get();
+ dmatest_init_srcs(thread->srcs, src_off, len);
+ dmatest_init_dsts(thread->dsts, dst->off, len);
+ diff = ktime_sub(ktime_get(), start);
+ filltime = ktime_add(filltime, diff);
+
+ for (i = 0; i < src->cnt; i++) {
+ u8 *buf = thread->srcs[i] + src_off;
+
+ dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
+ DMA_MEM_TO_DEV);
+ }
+
+ for (i = 0; i < dst->cnt; i++) {
+ dma_dsts[i] = dma_map_single(rx_dev->dev,
+ thread->dsts[i],
+ test_buf_size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ sg_init_table(tx_sg, bd_cnt);
+ sg_init_table(rx_sg, bd_cnt);
+
+ for (i = 0; i < bd_cnt; i++) {
+ sg_dma_address(&tx_sg[i]) = dma_srcs[i];
+ sg_dma_address(&rx_sg[i]) = dma_dsts[i] + dst->off;
+
+ sg_dma_len(&tx_sg[i]) = len;
+ sg_dma_len(&rx_sg[i]) = len;
+
+ }
+
+ rxd = rx_dev->device_prep_slave_sg(rx_chan, rx_sg, bd_cnt,
+ DMA_DEV_TO_MEM, flags, NULL);
+
+ txd = tx_dev->device_prep_slave_sg(tx_chan, tx_sg, bd_cnt,
+ DMA_MEM_TO_DEV, flags, NULL);
+
+ if (!rxd || !txd) {
+ for (i = 0; i < src->cnt; i++)
+ dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
+ DMA_MEM_TO_DEV);
+ for (i = 0; i < dst->cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_BIDIRECTIONAL);
+ pr_warn(
+ "%s: #%u: prep error with src_off=0x%x ",
+ thread_name, total_tests - 1, src_off);
+ pr_warn("dst->off=0x%x len=0x%x\n",
+ dst->off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+
+ init_completion(&rx_cmp);
+ rxd->callback = dmatest_slave_rx_callback;
+ rxd->callback_param = &rx_cmp;
+ rx_cookie = rxd->tx_submit(rxd);
+
+ init_completion(&tx_cmp);
+ txd->callback = dmatest_slave_tx_callback;
+ txd->callback_param = &tx_cmp;
+ tx_cookie = txd->tx_submit(txd);
+
+ if (dma_submit_error(rx_cookie) ||
+ dma_submit_error(tx_cookie)) {
+ pr_warn(
+ "%s: #%u: submit error %d/%d with src_off=0x%x ",
+ thread_name, total_tests - 1,
+ rx_cookie, tx_cookie, src_off);
+ pr_warn("dst->off=0x%x len=0x%x\n",
+ dst->off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+ dma_async_issue_pending(tx_chan);
+ dma_async_issue_pending(rx_chan);
+
+ tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
+
+ status = dma_async_is_tx_complete(tx_chan, tx_cookie,
+ NULL, NULL);
+
+ if (tx_tmo == 0) {
+ pr_warn("%s: #%u: tx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: tx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
+ status = dma_async_is_tx_complete(rx_chan, rx_cookie,
+ NULL, NULL);
+
+ if (rx_tmo == 0) {
+ pr_warn("%s: #%u: rx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: rx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ /* Unmap by myself */
+ for (i = 0; i < dst->cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size, DMA_BIDIRECTIONAL);
+
+ error_count = 0;
+ start = ktime_get();
+ pr_debug("%s: verifying source buffer...\n", thread_name);
+ error_count += dmatest_verify(thread->srcs, 0, src_off,
+ 0, PATTERN_SRC, true);
+ error_count += dmatest_verify(thread->srcs, src_off,
+ src_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, true);
+ error_count += dmatest_verify(thread->srcs, src_off + len,
+ test_buf_size, src_off + len,
+ PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n",
+ thread->task->comm);
+ error_count += dmatest_verify(thread->dsts, 0, dst->off,
+ 0, PATTERN_DST, false);
+ error_count += dmatest_verify(thread->dsts, dst->off,
+ dst->off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, false);
+ error_count += dmatest_verify(thread->dsts, dst->off + len,
+ test_buf_size, dst->off + len,
+ PATTERN_DST, false);
+ diff = ktime_sub(ktime_get(), start);
+ comparetime = ktime_add(comparetime, diff);
+
+ if (error_count) {
+ pr_warn("%s: #%u: %u errors with ",
+ thread_name, total_tests - 1, error_count);
+ pr_warn("src_off=0x%x dst->off=0x%x len=0x%x\n",
+ src_off, dst->off, len);
+ failed_tests++;
+ } else {
+ pr_debug("%s: #%u: No errors with ",
+ thread_name, total_tests - 1);
+ pr_debug("src_off=0x%x dst->off=0x%x len=0x%x\n",
+ src_off, dst->off, len);
+ }
+ }
+
+ ktime = ktime_sub(ktime_get(), ktime);
+ ktime = ktime_sub(ktime, comparetime);
+ ktime = ktime_sub(ktime, filltime);
+ runtime = ktime_to_us(ktime);
+
+ ret = 0;
+ for (i = 0; thread->dsts[i]; i++)
+ kfree(thread->dsts[i]);
+err_dstbuf:
+ kfree(thread->dsts);
+err_dsts:
+ for (i = 0; thread->srcs[i]; i++)
+ kfree(thread->srcs[i]);
+err_srcbuf:
+ kfree(thread->srcs);
+err_srcs:
+ pr_notice("%s: terminating after %u tests, %u failures %llu iops %llu KB/s (status %d)\n",
+ thread_name, total_tests, failed_tests,
+ dmatest_persec(runtime, total_tests),
+ dmatest_KBs(runtime, total_len), ret);
+
+ thread->done = true;
+ wake_up(&thread_wait);
+
+ return ret;
+}
+
+static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
+{
+ struct dmatest_slave_thread *thread;
+ struct dmatest_slave_thread *_thread;
+ int ret;
+
+ list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
+ ret = kthread_stop(thread->task);
+ pr_debug("dmatest: thread %s exited with status %d\n",
+ thread->task->comm, ret);
+ list_del(&thread->node);
+ put_task_struct(thread->task);
+ kfree(thread);
+ }
+ kfree(dtc);
+}
+
+static int dmatest_add_slave_threads(struct dmatest_chan *tx_dtc,
+ struct dmatest_chan *rx_dtc)
+{
+ struct dmatest_slave_thread *thread;
+ struct dma_chan *tx_chan = tx_dtc->chan;
+ struct dma_chan *rx_chan = rx_dtc->chan;
+ int ret;
+
+ thread = kzalloc(sizeof(struct dmatest_slave_thread), GFP_KERNEL);
+ if (!thread) {
+ pr_warn("dmatest: No memory for slave thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ }
+
+ thread->tx_chan = tx_chan;
+ thread->rx_chan = rx_chan;
+ thread->type = (enum dma_transaction_type)DMA_SLAVE;
+ smp_wmb();
+ thread->task = kthread_run(dmatest_slave_func, thread, "%s-%s",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ ret = PTR_ERR(thread->task);
+ if (IS_ERR(thread->task)) {
+ pr_warn("dmatest: Failed to run thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ kfree(thread);
+ return ret;
+ }
+
+ /* srcbuf and dstbuf are allocated by the thread itself */
+ get_task_struct(thread->task);
+ list_add_tail(&thread->node, &tx_dtc->threads);
+
+ /* Added one thread with 2 channels */
+ return 1;
+}
+
+static int dmatest_add_slave_channels(struct dma_chan *tx_chan,
+ struct dma_chan *rx_chan)
+{
+ struct dmatest_chan *tx_dtc;
+ struct dmatest_chan *rx_dtc;
+ unsigned int thread_count = 0;
+
+ tx_dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
+ if (!tx_dtc) {
+ pr_warn("dmatest: No memory for tx %s\n",
+ dma_chan_name(tx_chan));
+ return -ENOMEM;
+ }
+
+ rx_dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
+ if (!rx_dtc) {
+ pr_warn("dmatest: No memory for rx %s\n",
+ dma_chan_name(rx_chan));
+ return -ENOMEM;
+ }
+
+ tx_dtc->chan = tx_chan;
+ rx_dtc->chan = rx_chan;
+ INIT_LIST_HEAD(&tx_dtc->threads);
+ INIT_LIST_HEAD(&rx_dtc->threads);
+
+ dmatest_add_slave_threads(tx_dtc, rx_dtc);
+ thread_count += 1;
+
+ pr_info("dmatest: Started %u threads using %s %s\n",
+ thread_count, dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ list_add_tail(&tx_dtc->node, &dmatest_channels);
+ list_add_tail(&rx_dtc->node, &dmatest_channels);
+ nr_channels += 2;
+
+ if (iterations)
+ wait_event(thread_wait, !is_threaded_test_run(tx_dtc, rx_dtc));
+
+ return 0;
+}
+
+static int xilinx_axidmatest_probe(struct platform_device *pdev)
+{
+ struct dma_chan *chan, *rx_chan;
+ int err;
+
+ chan = dma_request_chan(&pdev->dev, "axidma0");
+ if (IS_ERR(chan)) {
+ err = PTR_ERR(chan);
+ if (err != -EPROBE_DEFER)
+ pr_err("xilinx_dmatest: No Tx channel\n");
+ return err;
+ }
+
+ rx_chan = dma_request_chan(&pdev->dev, "axidma1");
+ if (IS_ERR(rx_chan)) {
+ err = PTR_ERR(rx_chan);
+ if (err != -EPROBE_DEFER)
+ pr_err("xilinx_dmatest: No Rx channel\n");
+ goto free_tx;
+ }
+
+ err = dmatest_add_slave_channels(chan, rx_chan);
+ if (err) {
+ pr_err("xilinx_dmatest: Unable to add channels\n");
+ goto free_rx;
+ }
+
+ return 0;
+
+free_rx:
+ dma_release_channel(rx_chan);
+free_tx:
+ dma_release_channel(chan);
+
+ return err;
+}
+
+static int xilinx_axidmatest_remove(struct platform_device *pdev)
+{
+ struct dmatest_chan *dtc, *_dtc;
+ struct dma_chan *chan;
+
+ list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+ list_del(&dtc->node);
+ chan = dtc->chan;
+ dmatest_cleanup_channel(dtc);
+ pr_info("xilinx_dmatest: dropped channel %s\n",
+ dma_chan_name(chan));
+ dmaengine_terminate_all(chan);
+ dma_release_channel(chan);
+ }
+ return 0;
+}
+
+static const struct of_device_id xilinx_axidmatest_of_ids[] = {
+ { .compatible = "xlnx,axi-dma-test-1.00.a",},
+ {}
+};
+
+static struct platform_driver xilinx_axidmatest_driver = {
+ .driver = {
+ .name = "xilinx_axidmatest",
+ .of_match_table = xilinx_axidmatest_of_ids,
+ },
+ .probe = xilinx_axidmatest_probe,
+ .remove = xilinx_axidmatest_remove,
+};
+
+static int __init axidma_init(void)
+{
+ return platform_driver_register(&xilinx_axidmatest_driver);
+
+}
+late_initcall(axidma_init);
+
+static void __exit axidma_exit(void)
+{
+ platform_driver_unregister(&xilinx_axidmatest_driver);
+}
+module_exit(axidma_exit)
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx AXI DMA Test Client");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/vdmatest.c b/drivers/dma/xilinx/vdmatest.c
new file mode 100644
index 000000000000..f578e97df8b8
--- /dev/null
+++ b/drivers/dma/xilinx/vdmatest.c
@@ -0,0 +1,662 @@
+/*
+ * XILINX VDMA Engine test client driver
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on Atmel DMA Test Client
+ *
+ * Description:
+ * This is a simple Xilinx VDMA test client for AXI VDMA driver.
+ * This test assumes both the channels of VDMA are enabled in the
+ * hardware design and configured in back-to-back connection. Test
+ * starts by pumping the data onto one channel (MM2S) and then
+ * compares the data that is received on the other channel (S2MM).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma/xilinx_dma.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/sched/task.h>
+#include <linux/wait.h>
+
+static unsigned int test_buf_size = 64;
+module_param(test_buf_size, uint, 0444);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static unsigned int iterations = 1;
+module_param(iterations, uint, 0444);
+MODULE_PARM_DESC(iterations,
+ "Iterations before stopping test (default: infinite)");
+
+static unsigned int hsize = 64;
+module_param(hsize, uint, 0444);
+MODULE_PARM_DESC(hsize, "Horizontal size in bytes");
+
+static unsigned int vsize = 32;
+module_param(vsize, uint, 0444);
+MODULE_PARM_DESC(vsize, "Vertical size in bytes");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
+
+/* Maximum number of frame buffers */
+#define MAX_NUM_FRAMES 32
+
+/**
+ * struct vdmatest_slave_thread - VDMA test thread
+ * @node: Thread node
+ * @task: Task structure pointer
+ * @tx_chan: Tx channel pointer
+ * @rx_chan: Rx Channel pointer
+ * @srcs: Source buffer
+ * @dsts: Destination buffer
+ * @type: DMA transaction type
+ */
+struct xilinx_vdmatest_slave_thread {
+ struct list_head node;
+ struct task_struct *task;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
+
+/**
+ * struct vdmatest_chan - VDMA Test channel
+ * @node: Channel node
+ * @chan: DMA channel pointer
+ * @threads: List of VDMA test threads
+ */
+struct xilinx_vdmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
+};
+
+/* Global variables */
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static LIST_HEAD(xilinx_vdmatest_channels);
+static unsigned int nr_channels;
+static unsigned int frm_cnt;
+static dma_addr_t dma_srcs[MAX_NUM_FRAMES];
+static dma_addr_t dma_dsts[MAX_NUM_FRAMES];
+static struct dma_interleaved_template xt;
+
+static bool is_threaded_test_run(struct xilinx_vdmatest_chan *tx_dtc,
+ struct xilinx_vdmatest_chan *rx_dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread;
+ int ret = false;
+
+ list_for_each_entry(thread, &tx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+
+ list_for_each_entry(thread, &rx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+ return ret;
+}
+
+static void xilinx_vdmatest_init_srcs(u8 **bufs, unsigned int start,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for (; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);
+ for (; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf++;
+ }
+}
+
+static void xilinx_vdmatest_init_dsts(u8 **bufs, unsigned int start,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for (; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for (; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ }
+}
+
+static void xilinx_vdmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn(
+ "%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn(
+ "%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn(
+ "%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn(
+ "%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int xilinx_vdmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ unsigned int i, error_count = 0;
+ u8 actual, expected, *buf;
+ unsigned int counter_orig = counter;
+
+ for (; (buf = *bufs); bufs++) {
+ counter = counter_orig;
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ if (actual != expected) {
+ if (error_count < 32)
+ xilinx_vdmatest_mismatch(actual,
+ pattern, i,
+ counter, is_srcbuf);
+ error_count++;
+ }
+ counter++;
+ }
+ }
+
+ if (error_count > 32)
+ pr_warn("%s: %u errors suppressed\n",
+ current->comm, error_count - 32);
+
+ return error_count;
+}
+
+static void xilinx_vdmatest_slave_tx_callback(void *completion)
+{
+ pr_debug("Got tx callback\n");
+ complete(completion);
+}
+
+static void xilinx_vdmatest_slave_rx_callback(void *completion)
+{
+ pr_debug("Got rx callback\n");
+ complete(completion);
+}
+
+/*
+ * Function for slave transfers
+ * Each thread requires 2 channels, one for transmit, and one for receive
+ */
+static int xilinx_vdmatest_slave_func(void *data)
+{
+ struct xilinx_vdmatest_slave_thread *thread = data;
+ struct dma_chan *tx_chan, *rx_chan;
+ const char *thread_name;
+ unsigned int len, error_count;
+ unsigned int failed_tests = 0, total_tests = 0;
+ dma_cookie_t tx_cookie = 0, rx_cookie = 0;
+ enum dma_status status;
+ enum dma_ctrl_flags flags;
+ int ret = -ENOMEM, i;
+ struct xilinx_vdma_config config;
+
+ thread_name = current->comm;
+
+ /* Limit testing scope here */
+ test_buf_size = hsize * vsize;
+
+ /* This barrier ensures 'thread' is initialized and
+ * we get valid DMA channels
+ */
+ smp_rmb();
+ tx_chan = thread->tx_chan;
+ rx_chan = thread->rx_chan;
+
+ thread->srcs = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->srcs)
+ goto err_srcs;
+ for (i = 0; i < frm_cnt; i++) {
+ thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcs[i])
+ goto err_srcbuf;
+ }
+
+ thread->dsts = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->dsts)
+ goto err_dsts;
+ for (i = 0; i < frm_cnt; i++) {
+ thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dsts[i])
+ goto err_dstbuf;
+ }
+
+ set_user_nice(current, 10);
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ while (!kthread_should_stop()
+ && !(iterations && total_tests >= iterations)) {
+ struct dma_device *tx_dev = tx_chan->device;
+ struct dma_device *rx_dev = rx_chan->device;
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct dma_async_tx_descriptor *rxd = NULL;
+ struct completion rx_cmp, tx_cmp;
+ unsigned long rx_tmo =
+ msecs_to_jiffies(30000); /* RX takes longer */
+ unsigned long tx_tmo = msecs_to_jiffies(30000);
+ u8 align = 0;
+
+ total_tests++;
+
+ /* honor larger alignment restrictions */
+ align = tx_dev->copy_align;
+ if (rx_dev->copy_align > align)
+ align = rx_dev->copy_align;
+
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = test_buf_size;
+ xilinx_vdmatest_init_srcs(thread->srcs, 0, len);
+ xilinx_vdmatest_init_dsts(thread->dsts, 0, len);
+
+ /* Zero out configuration */
+ memset(&config, 0, sizeof(struct xilinx_vdma_config));
+
+ /* Set up hardware configuration information */
+ config.frm_cnt_en = 1;
+ config.coalesc = frm_cnt * 10;
+ config.park = 1;
+ xilinx_vdma_channel_set_config(tx_chan, &config);
+
+ xilinx_vdma_channel_set_config(rx_chan, &config);
+
+ for (i = 0; i < frm_cnt; i++) {
+ dma_dsts[i] = dma_map_single(rx_dev->dev,
+ thread->dsts[i],
+ test_buf_size,
+ DMA_DEV_TO_MEM);
+
+ if (dma_mapping_error(rx_dev->dev, dma_dsts[i])) {
+ failed_tests++;
+ continue;
+ }
+ xt.dst_start = dma_dsts[i];
+ xt.dir = DMA_DEV_TO_MEM;
+ xt.numf = vsize;
+ xt.sgl[0].size = hsize;
+ xt.sgl[0].icg = 0;
+ xt.frame_size = 1;
+ rxd = rx_dev->device_prep_interleaved_dma(rx_chan,
+ &xt, flags);
+ rx_cookie = rxd->tx_submit(rxd);
+ }
+
+ for (i = 0; i < frm_cnt; i++) {
+ u8 *buf = thread->srcs[i];
+
+ dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
+ DMA_MEM_TO_DEV);
+
+ if (dma_mapping_error(tx_dev->dev, dma_srcs[i])) {
+ failed_tests++;
+ continue;
+ }
+ xt.src_start = dma_srcs[i];
+ xt.dir = DMA_MEM_TO_DEV;
+ xt.numf = vsize;
+ xt.sgl[0].size = hsize;
+ xt.sgl[0].icg = 0;
+ xt.frame_size = 1;
+ txd = tx_dev->device_prep_interleaved_dma(tx_chan,
+ &xt, flags);
+ tx_cookie = txd->tx_submit(txd);
+ }
+
+ if (!rxd || !txd) {
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
+ DMA_MEM_TO_DEV);
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_DEV_TO_MEM);
+ pr_warn("%s: #%u: prep error with len=0x%x ",
+ thread_name, total_tests - 1, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+
+ init_completion(&rx_cmp);
+ rxd->callback = xilinx_vdmatest_slave_rx_callback;
+ rxd->callback_param = &rx_cmp;
+
+ init_completion(&tx_cmp);
+ txd->callback = xilinx_vdmatest_slave_tx_callback;
+ txd->callback_param = &tx_cmp;
+
+ if (dma_submit_error(rx_cookie) ||
+ dma_submit_error(tx_cookie)) {
+ pr_warn("%s: #%u: submit error %d/%d with len=0x%x ",
+ thread_name, total_tests - 1,
+ rx_cookie, tx_cookie, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+ dma_async_issue_pending(tx_chan);
+ dma_async_issue_pending(rx_chan);
+
+ tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
+
+ status = dma_async_is_tx_complete(tx_chan, tx_cookie,
+ NULL, NULL);
+
+ if (tx_tmo == 0) {
+ pr_warn("%s: #%u: tx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: tx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
+ status = dma_async_is_tx_complete(rx_chan, rx_cookie,
+ NULL, NULL);
+
+ if (rx_tmo == 0) {
+ pr_warn("%s: #%u: rx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: rx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ /* Unmap by myself */
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size, DMA_DEV_TO_MEM);
+
+ error_count = 0;
+
+ pr_debug("%s: verifying source buffer...\n", thread_name);
+ error_count += xilinx_vdmatest_verify(thread->srcs, 0, 0,
+ 0, PATTERN_SRC, true);
+ error_count += xilinx_vdmatest_verify(thread->srcs, 0,
+ len, 0, PATTERN_SRC | PATTERN_COPY, true);
+ error_count += xilinx_vdmatest_verify(thread->srcs, len,
+ test_buf_size, len, PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n",
+ thread->task->comm);
+ error_count += xilinx_vdmatest_verify(thread->dsts, 0, 0,
+ 0, PATTERN_DST, false);
+ error_count += xilinx_vdmatest_verify(thread->dsts, 0,
+ len, 0, PATTERN_SRC | PATTERN_COPY, false);
+ error_count += xilinx_vdmatest_verify(thread->dsts, len,
+ test_buf_size, len, PATTERN_DST, false);
+
+ if (error_count) {
+ pr_warn("%s: #%u: %u errors with len=0x%x\n",
+ thread_name, total_tests - 1, error_count, len);
+ failed_tests++;
+ } else {
+ pr_debug("%s: #%u: No errors with len=0x%x\n",
+ thread_name, total_tests - 1, len);
+ }
+ }
+
+ ret = 0;
+ for (i = 0; thread->dsts[i]; i++)
+ kfree(thread->dsts[i]);
+err_dstbuf:
+ kfree(thread->dsts);
+err_dsts:
+ for (i = 0; thread->srcs[i]; i++)
+ kfree(thread->srcs[i]);
+err_srcbuf:
+ kfree(thread->srcs);
+err_srcs:
+ pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
+ thread_name, total_tests, failed_tests, ret);
+
+ thread->done = true;
+ wake_up(&thread_wait);
+
+ return ret;
+}
+
+static void xilinx_vdmatest_cleanup_channel(struct xilinx_vdmatest_chan *dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread, *_thread;
+ int ret;
+
+ list_for_each_entry_safe(thread, _thread,
+ &dtc->threads, node) {
+ ret = kthread_stop(thread->task);
+ pr_info("xilinx_vdmatest: thread %s exited with status %d\n",
+ thread->task->comm, ret);
+ list_del(&thread->node);
+ put_task_struct(thread->task);
+ kfree(thread);
+ }
+ kfree(dtc);
+}
+
+static int
+xilinx_vdmatest_add_slave_threads(struct xilinx_vdmatest_chan *tx_dtc,
+ struct xilinx_vdmatest_chan *rx_dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread;
+ struct dma_chan *tx_chan = tx_dtc->chan;
+ struct dma_chan *rx_chan = rx_dtc->chan;
+
+ thread = kzalloc(sizeof(struct xilinx_vdmatest_slave_thread),
+ GFP_KERNEL);
+ if (!thread)
+ pr_warn("xilinx_vdmatest: No memory for slave thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ thread->tx_chan = tx_chan;
+ thread->rx_chan = rx_chan;
+ thread->type = (enum dma_transaction_type)DMA_SLAVE;
+
+ /* This barrier ensures the DMA channels in the 'thread'
+ * are initialized
+ */
+ smp_wmb();
+ thread->task = kthread_run(xilinx_vdmatest_slave_func, thread, "%s-%s",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ if (IS_ERR(thread->task)) {
+ pr_warn("xilinx_vdmatest: Failed to run thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ kfree(thread);
+ return PTR_ERR(thread->task);
+ }
+
+ get_task_struct(thread->task);
+ list_add_tail(&thread->node, &tx_dtc->threads);
+
+ /* Added one thread with 2 channels */
+ return 1;
+}
+
+static int xilinx_vdmatest_add_slave_channels(struct dma_chan *tx_chan,
+ struct dma_chan *rx_chan)
+{
+ struct xilinx_vdmatest_chan *tx_dtc, *rx_dtc;
+ unsigned int thread_count = 0;
+
+ tx_dtc = kmalloc(sizeof(struct xilinx_vdmatest_chan), GFP_KERNEL);
+ if (!tx_dtc)
+ return -ENOMEM;
+
+ rx_dtc = kmalloc(sizeof(struct xilinx_vdmatest_chan), GFP_KERNEL);
+ if (!rx_dtc)
+ return -ENOMEM;
+
+ tx_dtc->chan = tx_chan;
+ rx_dtc->chan = rx_chan;
+ INIT_LIST_HEAD(&tx_dtc->threads);
+ INIT_LIST_HEAD(&rx_dtc->threads);
+
+ xilinx_vdmatest_add_slave_threads(tx_dtc, rx_dtc);
+ thread_count += 1;
+
+ pr_info("xilinx_vdmatest: Started %u threads using %s %s\n",
+ thread_count, dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ list_add_tail(&tx_dtc->node, &xilinx_vdmatest_channels);
+ list_add_tail(&rx_dtc->node, &xilinx_vdmatest_channels);
+ nr_channels += 2;
+
+ if (iterations)
+ wait_event(thread_wait, !is_threaded_test_run(tx_dtc, rx_dtc));
+
+ return 0;
+}
+
+static int xilinx_vdmatest_probe(struct platform_device *pdev)
+{
+ struct dma_chan *chan, *rx_chan;
+ int err;
+
+ err = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,num-fstores", &frm_cnt);
+ if (err < 0) {
+ pr_err("xilinx_vdmatest: missing xlnx,num-fstores property\n");
+ return err;
+ }
+
+ chan = dma_request_slave_channel(&pdev->dev, "vdma0");
+ if (IS_ERR(chan)) {
+ pr_err("xilinx_vdmatest: No Tx channel\n");
+ return PTR_ERR(chan);
+ }
+
+ rx_chan = dma_request_slave_channel(&pdev->dev, "vdma1");
+ if (IS_ERR(rx_chan)) {
+ err = PTR_ERR(rx_chan);
+ pr_err("xilinx_vdmatest: No Rx channel\n");
+ goto free_tx;
+ }
+
+ err = xilinx_vdmatest_add_slave_channels(chan, rx_chan);
+ if (err) {
+ pr_err("xilinx_vdmatest: Unable to add channels\n");
+ goto free_rx;
+ }
+ return 0;
+
+free_rx:
+ dma_release_channel(rx_chan);
+free_tx:
+ dma_release_channel(chan);
+
+ return err;
+}
+
+static int xilinx_vdmatest_remove(struct platform_device *pdev)
+{
+ struct xilinx_vdmatest_chan *dtc, *_dtc;
+ struct dma_chan *chan;
+
+ list_for_each_entry_safe(dtc, _dtc, &xilinx_vdmatest_channels, node) {
+ list_del(&dtc->node);
+ chan = dtc->chan;
+ xilinx_vdmatest_cleanup_channel(dtc);
+ pr_info("xilinx_vdmatest: dropped channel %s\n",
+ dma_chan_name(chan));
+ dmaengine_terminate_async(chan);
+ dma_release_channel(chan);
+ }
+ return 0;
+}
+
+static const struct of_device_id xilinx_vdmatest_of_ids[] = {
+ { .compatible = "xlnx,axi-vdma-test-1.00.a",},
+ {}
+};
+
+static struct platform_driver xilinx_vdmatest_driver = {
+ .driver = {
+ .name = "xilinx_vdmatest",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_vdmatest_of_ids,
+ },
+ .probe = xilinx_vdmatest_probe,
+ .remove = xilinx_vdmatest_remove,
+};
+
+module_platform_driver(xilinx_vdmatest_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx AXI VDMA Test Client");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 7729b8d22553..cdaf4bd2258a 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -119,7 +119,7 @@
#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
/* HW specific definitions */
-#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@@ -173,18 +173,6 @@
#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
-/* Multi-Channel DMA Descriptor offsets*/
-#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
-#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
-
-/* Multi-Channel DMA Masks/Shifts */
-#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
-#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
-#define XILINX_DMA_BD_STRIDE_SHIFT 0
-#define XILINX_DMA_BD_VSIZE_SHIFT 19
-
/* AXI CDMA Specific Registers/Offsets */
#define XILINX_CDMA_REG_SRCADDR 0x18
#define XILINX_CDMA_REG_DSTADDR 0x20
@@ -221,8 +209,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @mcdma_control: Control field for mcdma @0x10
- * @vsize_stride: Vsize and Stride field for mcdma @0x14
+ * @reserved1: Reserved @0x10
+ * @reserved2: Reserved @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,8 +220,8 @@ struct xilinx_axidma_desc_hw {
u32 next_desc_msb;
u32 buf_addr;
u32 buf_addr_msb;
- u32 mcdma_control;
- u32 vsize_stride;
+ u32 reserved1;
+ u32 reserved2;
u32 control;
u32 status;
u32 app[XILINX_DMA_NUM_APP_WORDS];
@@ -303,12 +291,16 @@ struct xilinx_cdma_tx_segment {
* @segments: TX segments list
* @node: Node in the channel descriptors list
* @cyclic: Check for cyclic transfers.
+ * @err: Whether the descriptor has an error.
+ * @residue: Residue of the completed descriptor
*/
struct xilinx_dma_tx_descriptor {
struct dma_async_tx_descriptor async_tx;
struct list_head segments;
struct list_head node;
bool cyclic;
+ bool err;
+ u32 residue;
};
/**
@@ -340,14 +332,12 @@ struct xilinx_dma_tx_descriptor {
* @desc_pendingcount: Descriptor pending count
* @ext_addr: Indicates 64 bit addressing is supported by dma channel
* @desc_submitcount: Descriptor h/w submitted count
- * @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
* @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @cyclic_seg_p: Physical allocated segments base for cyclic dma
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
- * @tdest: TDEST value for mcdma
* @has_vflip: S2MM vertical flip
*/
struct xilinx_dma_chan {
@@ -378,14 +368,12 @@ struct xilinx_dma_chan {
u32 desc_pendingcount;
bool ext_addr;
u32 desc_submitcount;
- u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
dma_addr_t seg_p;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
dma_addr_t cyclic_seg_p;
void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
- u16 tdest;
bool has_vflip;
};
@@ -416,7 +404,6 @@ struct xilinx_dma_config {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
- * @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
* @pdev: Platform device structure pointer
@@ -435,7 +422,6 @@ struct xilinx_dma_device {
struct device *dev;
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- bool mcdma;
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
@@ -615,6 +601,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
}
spin_unlock_irqrestore(&chan->lock, flags);
+ if (!segment)
+ dev_dbg(chan->dev, "Could not find free tx segment\n");
+
return segment;
}
@@ -792,6 +781,51 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
}
/**
+ * xilinx_dma_get_residue - Compute residue for a given descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ *
+ * Return: The number of residue bytes for the descriptor.
+ */
+static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_cdma_tx_segment *cdma_seg;
+ struct xilinx_axidma_tx_segment *axidma_seg;
+ struct xilinx_cdma_desc_hw *cdma_hw;
+ struct xilinx_axidma_desc_hw *axidma_hw;
+ struct list_head *entry;
+ u32 residue = 0;
+
+ /**
+ * VDMA and simple mode do not support residue reporting, so the
+ * residue field will always be 0.
+ */
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA || !chan->has_sg)
+ return residue;
+
+ list_for_each(entry, &desc->segments) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ cdma_seg = list_entry(entry,
+ struct xilinx_cdma_tx_segment,
+ node);
+ cdma_hw = &cdma_seg->hw;
+ residue += (cdma_hw->control - cdma_hw->status) &
+ chan->xdev->max_buffer_len;
+ } else {
+ axidma_seg = list_entry(entry,
+ struct xilinx_axidma_tx_segment,
+ node);
+ axidma_hw = &axidma_seg->hw;
+ residue += (axidma_hw->control - axidma_hw->status) &
+ chan->xdev->max_buffer_len;
+ }
+ }
+
+ return residue;
+}
+
+/**
* xilinx_dma_chan_handle_cyclic - Cyclic dma callback
* @chan: Driver specific dma channel
* @desc: dma transaction descriptor
@@ -825,7 +859,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- struct dmaengine_desc_callback cb;
+ struct dmaengine_result result;
if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -835,14 +869,22 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Remove from the list of running transactions */
list_del(&desc->node);
- /* Run the link descriptor callback function */
- dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (dmaengine_desc_callback_valid(&cb)) {
- spin_unlock_irqrestore(&chan->lock, flags);
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock_irqsave(&chan->lock, flags);
+ if (unlikely(desc->err)) {
+ if (chan->direction == DMA_DEV_TO_MEM)
+ result.result = DMA_TRANS_READ_FAILED;
+ else
+ result.result = DMA_TRANS_WRITE_FAILED;
+ } else {
+ result.result = DMA_TRANS_NOERROR;
}
+ result.residue = desc->residue;
+
+ /* Run the link descriptor callback function */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
+ spin_lock_irqsave(&chan->lock, flags);
+
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
@@ -1012,33 +1054,22 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
enum dma_status ret;
unsigned long flags;
- u32 residue = 0;
ret = dma_cookie_status(dchan, cookie, txstate);
if (ret == DMA_COMPLETE || !txstate)
return ret;
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
- desc = list_last_entry(&chan->active_list,
- struct xilinx_dma_tx_descriptor, node);
- if (chan->has_sg) {
- list_for_each_entry(segment, &desc->segments, node) {
- hw = &segment->hw;
- residue += (hw->control - hw->status) &
- chan->xdev->max_buffer_len;
- }
- }
- spin_unlock_irqrestore(&chan->lock, flags);
+ desc = list_last_entry(&chan->active_list,
+ struct xilinx_dma_tx_descriptor, node);
+ desc->residue = xilinx_dma_get_residue(chan, desc);
- chan->residue = residue;
- dma_set_residue(txstate, chan->residue);
- }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dma_set_residue(txstate, desc->residue);
return ret;
}
@@ -1057,8 +1088,9 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
/* Wait for the hardware to halt */
return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
- val & XILINX_DMA_DMASR_HALTED, 0,
- XILINX_DMA_LOOP_COUNT);
+ val | (XILINX_DMA_DMASR_IDLE |
+ XILINX_DMA_DMASR_HALTED),
+ 0, XILINX_DMA_LOOP_COUNT);
}
/**
@@ -1288,10 +1320,10 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->err)
return;
- if (list_empty(&chan->pending_list))
+ if (!chan->idle)
return;
- if (!chan->idle)
+ if (list_empty(&chan->pending_list))
return;
head_desc = list_first_entry(&chan->pending_list,
@@ -1310,53 +1342,23 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
}
- if (chan->has_sg && !chan->xdev->mcdma)
+ if (chan->has_sg)
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
- if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_CDESC(chan->tdest),
- head_desc->async_tx.phys);
- }
- }
- }
-
xilinx_dma_start(chan);
if (chan->err)
return;
/* Start the transfer */
- if (chan->has_sg && !chan->xdev->mcdma) {
+ if (chan->has_sg) {
if (chan->cyclic)
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
chan->cyclic_seg_v->phys);
else
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
- } else if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_TDESC(chan->tdest),
- tail_segment->phys);
- }
- }
} else {
struct xilinx_axidma_tx_segment *segment;
struct xilinx_axidma_desc_hw *hw;
@@ -1408,6 +1410,9 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ desc->residue = xilinx_dma_get_residue(chan, desc);
+ desc->err = chan->err;
+
list_del(&desc->node);
if (!desc->cyclic)
dma_cookie_complete(&desc->async_tx);
@@ -1579,7 +1584,12 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
*/
append:
list_add_tail(&desc->node, &chan->pending_list);
- chan->desc_pendingcount++;
+ /*
+ * In CDMA each segment is considered as a descriptor, so increment
+ * pending count in prep_slave_* implementation.
+ */
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_CDMA)
+ chan->desc_pendingcount++;
if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
&& unlikely(chan->desc_pendingcount > chan->num_frms)) {
@@ -1761,6 +1771,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
+ chan->desc_pendingcount++;
desc->async_tx.phys = segment->phys;
hw->next_desc = segment->phys;
@@ -1773,6 +1784,117 @@ error:
}
/**
+ * xilinx_cdma_prep_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_cdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_cdma_desc_hw *hw;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+
+ if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
+ return NULL;
+
+ if (unlikely(dst_sg == NULL || src_sg == NULL))
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+ /*
+ * loop until there is either no more source or no more destination
+ * scatterlist entry
+ */
+ while (true) {
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, chan->xdev->max_buffer_len);
+ if (len == 0)
+ goto fetch;
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_cdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+ hw = &segment->hw;
+ hw->control = len;
+ hw->src_addr = dma_src;
+ hw->dest_addr = dma_dst;
+ if (chan->ext_addr) {
+ hw->src_addr_msb = upper_32_bits(dma_src);
+ hw->dest_addr_msb = upper_32_bits(dma_dst);
+ }
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ dst_avail -= len;
+ src_avail -= len;
+ list_add_tail(&segment->node, &desc->segments);
+ chan->desc_pendingcount++;
+
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+ prev->hw.next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -1979,90 +2101,6 @@ error:
}
/**
- * xilinx_dma_prep_interleaved - prepare a descriptor for a
- * DMA_SLAVE transaction
- * @dchan: DMA channel
- * @xt: Interleaved template pointer
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *
-xilinx_dma_prep_interleaved(struct dma_chan *dchan,
- struct dma_interleaved_template *xt,
- unsigned long flags)
-{
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
-
- if (!is_slave_direction(xt->dir))
- return NULL;
-
- if (!xt->numf || !xt->sgl[0].size)
- return NULL;
-
- if (xt->frame_size != 1)
- return NULL;
-
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
-
- chan->direction = xt->dir;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
-
- hw = &segment->hw;
-
- /* Fill in the descriptor */
- if (xt->dir != DMA_MEM_TO_DEV)
- hw->buf_addr = xt->dst_start;
- else
- hw->buf_addr = xt->src_start;
-
- hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
- hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
- XILINX_DMA_BD_VSIZE_MASK;
- hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
- XILINX_DMA_BD_STRIDE_MASK;
- hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
-
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
-
-
- segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
-
- /* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (xt->dir == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_DMA_BD_SOP;
- segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- segment->hw.control |= XILINX_DMA_BD_EOP;
- }
-
- return &desc->async_tx;
-
-error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
-}
-
-/**
* xilinx_dma_terminate_all - Halt the channel and free descriptors
* @dchan: Driver specific DMA Channel pointer
*
@@ -2074,16 +2112,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
u32 reg;
int err;
- if (chan->cyclic)
- xilinx_dma_chan_reset(chan);
-
- err = chan->stop_transfer(chan);
- if (err) {
- dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- chan->err = true;
+ if (!chan->cyclic) {
+ err = chan->stop_transfer(chan);
+ if (err) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, dma_ctrl_read(chan,
+ XILINX_DMA_REG_DMASR));
+ chan->err = true;
+ }
}
+ xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */
chan->terminating = true;
xilinx_dma_free_descriptors(chan);
@@ -2450,7 +2489,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
chan->direction = DMA_MEM_TO_DEV;
chan->id = chan_id;
- chan->tdest = chan_id;
+ xdev->common.directions = BIT(DMA_MEM_TO_DEV);
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2467,7 +2506,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
"xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id;
- chan->tdest = chan_id - xdev->nr_channels;
+ xdev->common.directions |= BIT(DMA_DEV_TO_MEM);
chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip");
if (chan->has_vflip) {
@@ -2555,12 +2594,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i;
- u32 nr_channels = 1;
-
- ret = of_property_read_u32(node, "dma-channels", &nr_channels);
- if ((ret < 0) && xdev->mcdma)
- dev_warn(xdev->dev, "missing dma-channels property\n");
+ int i, nr_channels = 1;
for (i = 0; i < nr_channels; i++)
xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
@@ -2626,7 +2660,6 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
- struct resource *io;
u32 num_frames, addr_width, len_width;
int i, err;
@@ -2652,16 +2685,15 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return err;
/* Request and map I/O memory */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xdev->regs = devm_ioremap_resource(&pdev->dev, io);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
-
+ xdev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xdev->regs)) {
+ err = PTR_ERR(xdev->regs);
+ goto disable_clks;
+ }
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
if (!of_property_read_u32(node, "xlnx,sg-length-width",
&len_width)) {
if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
@@ -2683,7 +2715,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(xdev->dev,
"missing xlnx,num-fstores property\n");
- return err;
+ goto disable_clks;
}
err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -2703,7 +2735,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ if (err < 0) {
+ dev_err(xdev->dev, "DMA mask error %d\n", err);
+ goto disable_clks;
+ }
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -2714,6 +2750,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
}
+ xdev->common.dst_addr_widths = BIT(addr_width / 8);
+ xdev->common.src_addr_widths = BIT(addr_width / 8);
xdev->common.device_alloc_chan_resources =
xilinx_dma_alloc_chan_resources;
xdev->common.device_free_chan_resources =
@@ -2726,14 +2764,17 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
- xdev->common.device_prep_interleaved_dma =
- xilinx_dma_prep_interleaved;
- /* Residue calculation is supported by only AXI DMA */
+ /* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+ dma_cap_set(DMA_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ xdev->common.device_prep_dma_sg = xilinx_cdma_prep_sg;
+ /* Residue calculation is supported by only AXI DMA and CDMA */
+ xdev->common.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
} else {
xdev->common.device_prep_interleaved_dma =
xilinx_vdma_dma_prep_interleaved;
@@ -2744,8 +2785,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Initialize the channels */
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
- if (err < 0)
- goto disable_clks;
+ if (err < 0) {
+ of_node_put(child);
+ goto error;
+ }
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2778,12 +2821,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return 0;
-disable_clks:
- xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->nr_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+ xdma_disable_allclks(xdev);
return err;
}
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
new file mode 100644
index 000000000000..72124a52983d
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -0,0 +1,2323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP DPDMA Engine driver
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "../dmaengine.h"
+
+/* DPDMA registers */
+#define XILINX_DPDMA_ERR_CTRL 0x0
+#define XILINX_DPDMA_ISR 0x4
+#define XILINX_DPDMA_IMR 0x8
+#define XILINX_DPDMA_IEN 0xc
+#define XILINX_DPDMA_IDS 0x10
+#define XILINX_DPDMA_INTR_DESC_DONE_MASK (0x3f << 0)
+#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT 0
+#define XILINX_DPDMA_INTR_NO_OSTAND_MASK (0x3f << 6)
+#define XILINX_DPDMA_INTR_NO_OSTAND_SHIFT 6
+#define XILINX_DPDMA_INTR_AXI_ERR_MASK (0x3f << 12)
+#define XILINX_DPDMA_INTR_AXI_ERR_SHIFT 12
+#define XILINX_DPDMA_INTR_DESC_ERR_MASK (0x3f << 18)
+#define XILINX_DPDMA_INTR_DESC_ERR_SHIFT 16
+#define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
+#define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
+#define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
+#define XILINX_DPDMA_INTR_VSYNC BIT(27)
+#define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x41000
+#define XILINX_DPDMA_INTR_CHAN_ERR 0xfff000
+#define XILINX_DPDMA_INTR_GLOBAL_ERR 0x7000000
+#define XILINX_DPDMA_INTR_ERR_ALL 0x7fff000
+#define XILINX_DPDMA_INTR_CHAN_MASK 0x41041
+#define XILINX_DPDMA_INTR_GLOBAL_MASK 0xf000000
+#define XILINX_DPDMA_INTR_ALL 0xfffffff
+#define XILINX_DPDMA_EISR 0x14
+#define XILINX_DPDMA_EIMR 0x18
+#define XILINX_DPDMA_EIEN 0x1c
+#define XILINX_DPDMA_EIDS 0x20
+#define XILINX_DPDMA_EINTR_INV_APB BIT(0)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK (0x3f << 1)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_SHIFT 1
+#define XILINX_DPDMA_EINTR_PRE_ERR_MASK (0x3f << 7)
+#define XILINX_DPDMA_EINTR_PRE_ERR_SHIFT 7
+#define XILINX_DPDMA_EINTR_CRC_ERR_MASK (0x3f << 13)
+#define XILINX_DPDMA_EINTR_CRC_ERR_SHIFT 13
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK (0x3f << 19)
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_SHIFT 19
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK (0x3f << 25)
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_SHIFT 25
+#define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
+#define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x2082082
+#define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
+#define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
+#define XILINX_DPDMA_EINTR_ALL 0xffffffff
+#define XILINX_DPDMA_CNTL 0x100
+#define XILINX_DPDMA_GBL 0x104
+#define XILINX_DPDMA_GBL_TRIG_SHIFT 0
+#define XILINX_DPDMA_GBL_RETRIG_SHIFT 6
+#define XILINX_DPDMA_ALC0_CNTL 0x108
+#define XILINX_DPDMA_ALC0_STATUS 0x10c
+#define XILINX_DPDMA_ALC0_MAX 0x110
+#define XILINX_DPDMA_ALC0_MIN 0x114
+#define XILINX_DPDMA_ALC0_ACC 0x118
+#define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
+#define XILINX_DPDMA_ALC1_CNTL 0x120
+#define XILINX_DPDMA_ALC1_STATUS 0x124
+#define XILINX_DPDMA_ALC1_MAX 0x128
+#define XILINX_DPDMA_ALC1_MIN 0x12c
+#define XILINX_DPDMA_ALC1_ACC 0x130
+#define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
+
+/* Channel register */
+#define XILINX_DPDMA_CH_BASE 0x200
+#define XILINX_DPDMA_CH_OFFSET 0x100
+#define XILINX_DPDMA_CH_DESC_START_ADDRE 0x0
+#define XILINX_DPDMA_CH_DESC_START_ADDR 0x4
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x8
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0xc
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x10
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x14
+#define XILINX_DPDMA_CH_CNTL 0x18
+#define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
+#define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT 2
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT 6
+#define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT 10
+#define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
+#define XILINX_DPDMA_CH_STATUS 0x1c
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK (0xf << 21)
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT 21
+#define XILINX_DPDMA_CH_VDO 0x20
+#define XILINX_DPDMA_CH_PYLD_SZ 0x24
+#define XILINX_DPDMA_CH_DESC_ID 0x28
+
+/* DPDMA descriptor fields */
+#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE (0xa5)
+#define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
+#define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
+#define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
+#define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
+#define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
+#define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
+#define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
+#define XILINX_DPDMA_DESC_ID_MASK (0xffff << 0)
+#define XILINX_DPDMA_DESC_ID_SHIFT (0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK (0x3ffff << 0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT (0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK (0x3fff << 18)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT (18)
+#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK (0xfff)
+#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT (16)
+
+#define XILINX_DPDMA_ALIGN_BYTES 256
+#define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
+
+#define XILINX_DPDMA_NUM_CHAN 6
+#define XILINX_DPDMA_PAGE_MASK ((1 << 12) - 1)
+#define XILINX_DPDMA_PAGE_SHIFT 12
+
+/**
+ * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
+ * @control: control configuration field
+ * @desc_id: descriptor ID
+ * @xfer_size: transfer size
+ * @hsize_stride: horizontal size and stride
+ * @timestamp_lsb: LSB of time stamp
+ * @timestamp_msb: MSB of time stamp
+ * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
+ * @next_desc: next descriptor 32 bit address
+ * @src_addr: payload source address (lower 32 bit of 1st 4KB page)
+ * @addr_ext_23: upper 16 bit of 48 bit address (src_addr2 and src_addr3)
+ * @addr_ext_45: upper 16 bit of 48 bit address (src_addr4 and src_addr5)
+ * @src_addr2: payload source address (lower 32 bit of 2nd 4KB page)
+ * @src_addr3: payload source address (lower 32 bit of 3rd 4KB page)
+ * @src_addr4: payload source address (lower 32 bit of 4th 4KB page)
+ * @src_addr5: payload source address (lower 32 bit of 5th 4KB page)
+ * @crc: descriptor CRC
+ */
+struct xilinx_dpdma_hw_desc {
+ u32 control;
+ u32 desc_id;
+ u32 xfer_size;
+ u32 hsize_stride;
+ u32 timestamp_lsb;
+ u32 timestamp_msb;
+ u32 addr_ext;
+ u32 next_desc;
+ u32 src_addr;
+ u32 addr_ext_23;
+ u32 addr_ext_45;
+ u32 src_addr2;
+ u32 src_addr3;
+ u32 src_addr4;
+ u32 src_addr5;
+ u32 crc;
+} __aligned(XILINX_DPDMA_ALIGN_BYTES);
+
+/**
+ * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
+ * @hw: DPDMA hardware descriptor
+ * @node: list node for software descriptors
+ * @phys: physical address of the software descriptor
+ */
+struct xilinx_dpdma_sw_desc {
+ struct xilinx_dpdma_hw_desc hw;
+ struct list_head node;
+ dma_addr_t phys;
+};
+
+/**
+ * enum xilinx_dpdma_tx_desc_status - DPDMA tx descriptor status
+ * @PREPARED: descriptor is prepared for transaction
+ * @ACTIVE: transaction is (being) done successfully
+ * @ERRORED: descriptor generates some errors
+ */
+enum xilinx_dpdma_tx_desc_status {
+ PREPARED,
+ ACTIVE,
+ ERRORED
+};
+
+/**
+ * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
+ * @async_tx: DMA async transaction descriptor
+ * @descriptors: list of software descriptors
+ * @node: list node for transaction descriptors
+ * @status: tx descriptor status
+ * @done_cnt: number of complete notification to deliver
+ */
+struct xilinx_dpdma_tx_desc {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head descriptors;
+ struct list_head node;
+ enum xilinx_dpdma_tx_desc_status status;
+ unsigned int done_cnt;
+};
+
+/**
+ * enum xilinx_dpdma_chan_id - DPDMA channel ID
+ * @VIDEO0: video 1st channel
+ * @VIDEO1: video 2nd channel for multi plane yuv formats
+ * @VIDEO2: video 3rd channel for multi plane yuv formats
+ * @GRAPHICS: graphics channel
+ * @AUDIO0: 1st audio channel
+ * @AUDIO1: 2nd audio channel
+ */
+enum xilinx_dpdma_chan_id {
+ VIDEO0,
+ VIDEO1,
+ VIDEO2,
+ GRAPHICS,
+ AUDIO0,
+ AUDIO1
+};
+
+/**
+ * enum xilinx_dpdma_chan_status - DPDMA channel status
+ * @IDLE: idle state
+ * @STREAMING: actively streaming state
+ */
+enum xilinx_dpdma_chan_status {
+ IDLE,
+ STREAMING
+};
+
+/*
+ * DPDMA descriptor placement
+ * --------------------------
+ * DPDMA descritpor life time is described with following placements:
+ *
+ * allocated_desc -> submitted_desc -> pending_desc -> active_desc -> done_list
+ *
+ * Transition is triggered as following:
+ *
+ * -> allocated_desc : a descriptor allocation
+ * allocated_desc -> submitted_desc: a descriptor submission
+ * submitted_desc -> pending_desc: request to issue pending a descriptor
+ * pending_desc -> active_desc: VSYNC intr when a desc is scheduled to DPDMA
+ * active_desc -> done_list: VSYNC intr when DPDMA switches to a new desc
+ */
+
+/**
+ * struct xilinx_dpdma_chan - DPDMA channel
+ * @common: generic dma channel structure
+ * @reg: register base address
+ * @id: channel ID
+ * @wait_to_stop: queue to wait for outstanding transacitons before stopping
+ * @status: channel status
+ * @first_frame: flag for the first frame of stream
+ * @video_group: flag if multi-channel operation is needed for video channels
+ * @lock: lock to access struct xilinx_dpdma_chan
+ * @desc_pool: descriptor allocation pool
+ * @done_task: done IRQ bottom half handler
+ * @err_task: error IRQ bottom half handler
+ * @allocated_desc: allocated descriptor
+ * @submitted_desc: submitted descriptor
+ * @pending_desc: pending descriptor to be scheduled in next period
+ * @active_desc: descriptor that the DPDMA channel is active on
+ * @done_list: done descriptor list
+ * @xdev: DPDMA device
+ */
+struct xilinx_dpdma_chan {
+ struct dma_chan common;
+ void __iomem *reg;
+ enum xilinx_dpdma_chan_id id;
+
+ wait_queue_head_t wait_to_stop;
+ enum xilinx_dpdma_chan_status status;
+ bool first_frame;
+ bool video_group;
+
+ spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
+ struct dma_pool *desc_pool;
+ struct tasklet_struct done_task;
+ struct tasklet_struct err_task;
+
+ struct xilinx_dpdma_tx_desc *allocated_desc;
+ struct xilinx_dpdma_tx_desc *submitted_desc;
+ struct xilinx_dpdma_tx_desc *pending_desc;
+ struct xilinx_dpdma_tx_desc *active_desc;
+ struct list_head done_list;
+
+ struct xilinx_dpdma_device *xdev;
+};
+
+/**
+ * struct xilinx_dpdma_device - DPDMA device
+ * @common: generic dma device structure
+ * @reg: register base address
+ * @dev: generic device structure
+ * @axi_clk: axi clock
+ * @chan: DPDMA channels
+ * @ext_addr: flag for 64 bit system (48 bit addressing)
+ * @desc_addr: descriptor addressing callback (32 bit vs 64 bit)
+ */
+struct xilinx_dpdma_device {
+ struct dma_device common;
+ void __iomem *reg;
+ struct device *dev;
+
+ struct clk *axi_clk;
+ struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
+
+ bool ext_addr;
+ void (*desc_addr)(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[], unsigned int num_src_addr);
+};
+
+#ifdef CONFIG_XILINX_DPDMA_DEBUG_FS
+#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+enum xilinx_dpdma_testcases {
+ DPDMA_TC_INTR_DONE,
+ DPDMA_TC_NONE
+};
+
+struct xilinx_dpdma_debugfs {
+ enum xilinx_dpdma_testcases testcase;
+ u16 xilinx_dpdma_intr_done_count;
+ enum xilinx_dpdma_chan_id chan_id;
+};
+
+static struct xilinx_dpdma_debugfs dpdma_debugfs;
+struct xilinx_dpdma_debugfs_request {
+ const char *req;
+ enum xilinx_dpdma_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
+{
+ if (chan_id == dpdma_debugfs.chan_id)
+ dpdma_debugfs.xilinx_dpdma_intr_done_count++;
+}
+
+static s64 xilinx_dpdma_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static ssize_t
+xilinx_dpdma_debugfs_desc_done_intr_write(char **dpdma_test_arg)
+{
+ char *arg;
+ char *arg_chan_id;
+ s64 id;
+
+ arg = strsep(dpdma_test_arg, " ");
+ if (strncasecmp(arg, "start", 5) != 0)
+ return -EINVAL;
+
+ arg_chan_id = strsep(dpdma_test_arg, " ");
+ id = xilinx_dpdma_debugfs_argument_value(arg_chan_id);
+
+ if (id < 0 || !IN_RANGE(id, VIDEO0, AUDIO1))
+ return -EINVAL;
+
+ dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
+ dpdma_debugfs.xilinx_dpdma_intr_done_count = 0;
+ dpdma_debugfs.chan_id = id;
+
+ return 0;
+}
+
+static ssize_t xilinx_dpdma_debugfs_desc_done_intr_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%d",
+ dpdma_debugfs.xilinx_dpdma_intr_done_count);
+
+ return 0;
+}
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
+ {"DESCRIPTOR_DONE_INTR", DPDMA_TC_INTR_DONE,
+ xilinx_dpdma_debugfs_desc_done_intr_read,
+ xilinx_dpdma_debugfs_desc_done_intr_write},
+};
+
+static ssize_t xilinx_dpdma_debugfs_write(struct file *f, const char __user
+ *buf, size_t size, loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *dpdma_test_req;
+ int ret;
+ int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ /* Supporting single instance of test as of now*/
+ if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name from a user request */
+ dpdma_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
+ if (!strcasecmp(dpdma_test_req, dpdma_debugfs_reqs[i].req)) {
+ if (!dpdma_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ break;
+ }
+ }
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ enum xilinx_dpdma_testcases tc;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+ return -ENOMEM;
+ }
+
+ tc = dpdma_debugfs.testcase;
+ if (tc == DPDMA_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dpdma_debugfs_reqs[tc].read_handler(&kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static const struct file_operations fops_xilinx_dpdma_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dpdma_debugfs_read,
+ .write = xilinx_dpdma_debugfs_write,
+};
+
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+ int err;
+ struct dentry *xilinx_dpdma_debugfs_dir, *xilinx_dpdma_debugfs_file;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ xilinx_dpdma_debugfs_dir = debugfs_create_dir("dpdma", NULL);
+ if (!xilinx_dpdma_debugfs_dir) {
+ dev_err(dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dpdma_debugfs_file =
+ debugfs_create_file("testcase", 0444,
+ xilinx_dpdma_debugfs_dir, NULL,
+ &fops_xilinx_dpdma_dbgfs);
+ if (!xilinx_dpdma_debugfs_file) {
+ dev_err(dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dpdma_debugfs_dir);
+ xilinx_dpdma_debugfs_dir = NULL;
+ return err;
+}
+
+#else
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+ return 0;
+}
+
+static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
+{
+}
+#endif /* CONFIG_XILINX_DPDMA_DEBUG_FS */
+
+#define to_dpdma_tx_desc(tx) \
+ container_of(tx, struct xilinx_dpdma_tx_desc, async_tx)
+
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_dpdma_chan, common)
+
+/* IO operations */
+
+static inline u32 dpdma_read(void __iomem *base, u32 offset)
+{
+ return ioread32(base + offset);
+}
+
+static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+}
+
+static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
+}
+
+static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
+{
+ dpdma_write(base, offset, dpdma_read(base, offset) | set);
+}
+
+/* Xilinx DPDMA descriptor operations */
+
+/**
+ * xilinx_dpdma_sw_desc_next_32 - Set 32 bit address of a next sw descriptor
+ * @sw_desc: current software descriptor
+ * @next: next descriptor
+ *
+ * Update the current sw descriptor @sw_desc with 32 bit address of the next
+ * descriptor @next.
+ */
+static inline void
+xilinx_dpdma_sw_desc_next_32(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *next)
+{
+ sw_desc->hw.next_desc = next->phys;
+}
+
+/**
+ * xilinx_dpdma_sw_desc_addr_32 - Update the sw descriptor with 32 bit address
+ * @sw_desc: software descriptor
+ * @prev: previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Update the descriptor @sw_desc with 32 bit address.
+ */
+static void xilinx_dpdma_sw_desc_addr_32(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[],
+ unsigned int num_src_addr)
+{
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+ unsigned int i;
+
+ hw_desc->src_addr = dma_addr[0];
+
+ if (prev)
+ xilinx_dpdma_sw_desc_next_32(prev, sw_desc);
+
+ for (i = 1; i < num_src_addr; i++) {
+ u32 *addr = &hw_desc->src_addr2;
+ u32 frag_addr;
+
+ frag_addr = dma_addr[i];
+ addr[i - 1] = frag_addr;
+ }
+}
+
+/**
+ * xilinx_dpdma_sw_desc_next_64 - Set 64 bit address of a next sw descriptor
+ * @sw_desc: current software descriptor
+ * @next: next descriptor
+ *
+ * Update the current sw descriptor @sw_desc with 64 bit address of the next
+ * descriptor @next.
+ */
+static inline void
+xilinx_dpdma_sw_desc_next_64(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *next)
+{
+ sw_desc->hw.next_desc = lower_32_bits(next->phys);
+ sw_desc->hw.addr_ext |= upper_32_bits(next->phys) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+}
+
+/**
+ * xilinx_dpdma_sw_desc_addr_64 - Update the sw descriptor with 64 bit address
+ * @sw_desc: software descriptor
+ * @prev: previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Update the descriptor @sw_desc with 64 bit address.
+ */
+static void xilinx_dpdma_sw_desc_addr_64(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[],
+ unsigned int num_src_addr)
+{
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+ unsigned int i;
+ u32 src_addr_extn;
+
+ hw_desc->src_addr = lower_32_bits(dma_addr[0]);
+ src_addr_extn = upper_32_bits(dma_addr[0]) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+ hw_desc->addr_ext |= (src_addr_extn <<
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT);
+
+ if (prev)
+ xilinx_dpdma_sw_desc_next_64(prev, sw_desc);
+
+ for (i = 1; i < num_src_addr; i++) {
+ u32 *addr = &hw_desc->src_addr2;
+ u32 *addr_ext = &hw_desc->addr_ext_23;
+ u64 frag_addr;
+
+ frag_addr = dma_addr[i];
+ addr[i] = (u32)frag_addr;
+
+ frag_addr >>= 32;
+ frag_addr &= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+ frag_addr <<= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT * (i % 2);
+ addr_ext[i / 2] = frag_addr;
+ }
+}
+
+/* Xilinx DPDMA channel descriptor operations */
+
+/**
+ * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a software descriptor from the channel's descriptor pool.
+ *
+ * Return: a software descriptor or NULL.
+ */
+static struct xilinx_dpdma_sw_desc *
+xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ dma_addr_t phys;
+
+ sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->phys = phys;
+
+ return sw_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
+ * @chan: DPDMA channel
+ * @sw_desc: software descriptor to free
+ *
+ * Free a software descriptor from the channel's descriptor pool.
+ */
+static void
+xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_sw_desc *sw_desc)
+{
+ dma_pool_free(chan->desc_pool, sw_desc, sw_desc->phys);
+}
+
+/**
+ * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor to dump
+ *
+ * Dump contents of a tx descriptor
+ */
+static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct device *dev = chan->xdev->dev;
+ unsigned int i = 0;
+
+ dev_dbg(dev, "------- TX descriptor dump start -------\n");
+ dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
+
+ list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+
+ dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
+ dev_dbg(dev, "descriptor phys: %pad\n", &sw_desc->phys);
+ dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
+ dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
+ dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
+ dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
+ dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
+ dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
+ dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
+ dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
+ dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
+ dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
+ dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
+ dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
+ dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
+ dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
+ dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
+ dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
+ }
+
+ dev_dbg(dev, "------- TX descriptor dump end -------\n");
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a tx descriptor.
+ *
+ * Return: a tx descriptor or NULL.
+ */
+static struct xilinx_dpdma_tx_desc *
+xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+
+ tx_desc = kzalloc(sizeof(*tx_desc), GFP_ATOMIC);
+ if (!tx_desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&tx_desc->descriptors);
+ tx_desc->status = PREPARED;
+
+ return tx_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_tx_desc - Free a transaction descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor
+ *
+ * Free the tx descriptor @tx_desc including its software descriptors.
+ */
+static void
+xilinx_dpdma_chan_free_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc, *next;
+
+ if (!tx_desc)
+ return;
+
+ list_for_each_entry_safe(sw_desc, next, &tx_desc->descriptors, node) {
+ list_del(&sw_desc->node);
+ xilinx_dpdma_chan_free_sw_desc(chan, sw_desc);
+ }
+
+ kfree(tx_desc);
+}
+
+/**
+ * xilinx_dpdma_chan_submit_tx_desc - Submit a transaction descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor
+ *
+ * Submit the tx descriptor @tx_desc to the channel @chan.
+ *
+ * Return: a cookie assigned to the tx descriptor
+ */
+static dma_cookie_t
+xilinx_dpdma_chan_submit_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->submitted_desc) {
+ cookie = chan->submitted_desc->async_tx.cookie;
+ goto out_unlock;
+ }
+
+ cookie = dma_cookie_assign(&tx_desc->async_tx);
+
+ /* Assign the cookie to descriptors in this transaction */
+ /* Only 16 bit will be used, but it should be enough */
+ list_for_each_entry(sw_desc, &tx_desc->descriptors, node)
+ sw_desc->hw.desc_id = cookie;
+
+ if (tx_desc != chan->allocated_desc)
+ dev_err(chan->xdev->dev, "desc != allocated_desc\n");
+ else
+ chan->allocated_desc = NULL;
+ chan->submitted_desc = tx_desc;
+
+ if (chan->id == VIDEO1 || chan->id == VIDEO2) {
+ chan->video_group = true;
+ chan->xdev->chan[VIDEO0]->video_group = true;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_dpdma_chan_free_desc_list - Free a descriptor list
+ * @chan: DPDMA channel
+ * @list: tx descriptor list
+ *
+ * Free tx descriptors in the list @list.
+ */
+static void xilinx_dpdma_chan_free_desc_list(struct xilinx_dpdma_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc, *next;
+
+ list_for_each_entry_safe(tx_desc, next, list, node) {
+ list_del(&tx_desc->node);
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+ }
+}
+
+/**
+ * xilinx_dpdma_chan_free_all_desc - Free all descriptors of the channel
+ * @chan: DPDMA channel
+ *
+ * Free all descriptors associated with the channel. The channel should be
+ * disabled before this function is called, otherwise, this function may
+ * result in misbehavior of the system due to remaining outstanding
+ * transactions.
+ */
+static void xilinx_dpdma_chan_free_all_desc(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_dbg(chan->xdev->dev, "chan->status = %s\n",
+ chan->status == STREAMING ? "STREAMING" : "IDLE");
+
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->allocated_desc);
+ chan->allocated_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->submitted_desc);
+ chan->submitted_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->pending_desc);
+ chan->pending_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
+ chan->active_desc = NULL;
+ xilinx_dpdma_chan_free_desc_list(chan, &chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_cleanup_desc - Clean up descriptors
+ * @chan: DPDMA channel
+ *
+ * Trigger the complete callbacks of descriptors with finished transactions.
+ * Free descriptors which are no longer in use.
+ */
+static void xilinx_dpdma_chan_cleanup_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *desc;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ unsigned long flags;
+ unsigned int cnt, i;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ while (!list_empty(&chan->done_list)) {
+ desc = list_first_entry(&chan->done_list,
+ struct xilinx_dpdma_tx_desc, node);
+ list_del(&desc->node);
+
+ cnt = desc->done_cnt;
+ desc->done_cnt = 0;
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ for (i = 0; i < cnt; i++)
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ xilinx_dpdma_chan_free_tx_desc(chan, desc);
+ }
+
+ if (chan->active_desc) {
+ cnt = chan->active_desc->done_cnt;
+ chan->active_desc->done_cnt = 0;
+ callback = chan->active_desc->async_tx.callback;
+ callback_param = chan->active_desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ for (i = 0; i < cnt; i++)
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_desc_active - Set the descriptor as active
+ * @chan: DPDMA channel
+ *
+ * Make the pending descriptor @chan->pending_desc as active. This function
+ * should be called when the channel starts operating on the pending descriptor.
+ */
+static void xilinx_dpdma_chan_desc_active(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->pending_desc)
+ goto out_unlock;
+
+ if (chan->active_desc)
+ list_add_tail(&chan->active_desc->node, &chan->done_list);
+
+ chan->active_desc = chan->pending_desc;
+ chan->pending_desc = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_desc_done_intr - Mark the current descriptor as 'done'
+ * @chan: DPDMA channel
+ *
+ * Mark the current active descriptor @chan->active_desc as 'done'. This
+ * function should be called to mark completion of the currently active
+ * descriptor.
+ */
+static void xilinx_dpdma_chan_desc_done_intr(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_dpdma_debugfs_intr_done_count_incr(chan->id);
+
+ if (!chan->active_desc) {
+ dev_dbg(chan->xdev->dev, "done intr with no active desc\n");
+ goto out_unlock;
+ }
+
+ chan->active_desc->done_cnt++;
+ if (chan->active_desc->status == PREPARED) {
+ dma_cookie_complete(&chan->active_desc->async_tx);
+ chan->active_desc->status = ACTIVE;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ tasklet_schedule(&chan->done_task);
+}
+
+/**
+ * xilinx_dpdma_chan_prep_slave_sg - Prepare a scatter-gather dma descriptor
+ * @chan: DPDMA channel
+ * @sgl: scatter-gather list
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given scatter-gather transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_slave_sg(struct xilinx_dpdma_chan *chan,
+ struct scatterlist *sgl)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ struct scatterlist *iter = sgl;
+ u32 line_size = 0;
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ while (!sg_is_chain(iter))
+ line_size += sg_dma_len(iter++);
+
+ while (sgl) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+ dma_addr_t dma_addr[4];
+ unsigned int num_pages = 0;
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ while (!sg_is_chain(sgl) && !sg_is_last(sgl)) {
+ dma_addr[num_pages] = sg_dma_address(sgl++);
+ if (!IS_ALIGNED(dma_addr[num_pages++],
+ XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+ }
+
+ chan->xdev->desc_addr(sw_desc, last, dma_addr, num_pages);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = line_size;
+ hw_desc->hsize_stride =
+ line_size << XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_FRAG_MODE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+ last = sw_desc;
+ if (sg_is_last(sgl))
+ break;
+ sgl = sg_chain_ptr(sgl);
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ if (chan->xdev->ext_addr)
+ xilinx_dpdma_sw_desc_next_64(last, sw_desc);
+ else
+ xilinx_dpdma_sw_desc_next_32(last, sw_desc);
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+
+ if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ chan->xdev->desc_addr(sw_desc, last, &buf_addr, 1);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = period_len;
+ hw_desc->hsize_stride =
+ period_len <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->hsize_stride |=
+ period_len <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+ buf_addr += period_len;
+ last = sw_desc;
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ if (chan->xdev->ext_addr)
+ xilinx_dpdma_sw_desc_next_64(last, sw_desc);
+ else
+ xilinx_dpdma_sw_desc_next_32(last, sw_desc);
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_prep_interleaved - Prepare a interleaved dma descriptor
+ * @chan: DPDMA channel
+ * @xt: dma interleaved template
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * based on @xt.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_interleaved(struct xilinx_dpdma_chan *chan,
+ struct dma_interleaved_template *xt)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct xilinx_dpdma_hw_desc *hw_desc;
+ size_t hsize = xt->sgl[0].size;
+ size_t stride = hsize + xt->sgl[0].icg;
+
+ if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ return NULL;
+ }
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ chan->xdev->desc_addr(sw_desc, sw_desc, &xt->src_start, 1);
+ hw_desc = &sw_desc->hw;
+ hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
+ hw_desc->xfer_size = hsize * xt->numf;
+ hw_desc->hsize_stride = hsize <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->hsize_stride |= (stride / 16) <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/* Xilinx DPDMA channel operations */
+
+/**
+ * xilinx_dpdma_chan_enable - Enable the channel
+ * @chan: DPDMA channel
+ *
+ * Enable the channel and its interrupts. Set the QoS values for video class.
+ */
+static inline void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
+{
+ u32 reg;
+
+ reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+ reg |= XILINX_DPDMA_INTR_GLOBAL_MASK;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+ reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+ reg |= XILINX_DPDMA_INTR_GLOBAL_ERR;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+ reg = XILINX_DPDMA_CH_CNTL_ENABLE;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT;
+ dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
+}
+
+/**
+ * xilinx_dpdma_chan_disable - Disable the channel
+ * @chan: DPDMA channel
+ *
+ * Disable the channel and its interrupts.
+ */
+static inline void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
+{
+ u32 reg;
+
+ reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+ reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+ dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+}
+
+/**
+ * xilinx_dpdma_chan_pause - Pause the channel
+ * @chan: DPDMA channel
+ *
+ * Pause the channel.
+ */
+static inline void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
+{
+ dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+/**
+ * xilinx_dpdma_chan_unpause - Unpause the channel
+ * @chan: DPDMA channel
+ *
+ * Unpause the channel.
+ */
+static inline void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
+{
+ dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+static u32
+xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ u32 i = 0, ret = 0;
+
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status != STREAMING)
+ return 0;
+
+ if (xdev->chan[i]->video_group)
+ ret |= BIT(i);
+ }
+
+ return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_issue_pending - Issue the pending descriptor
+ * @chan: DPDMA channel
+ *
+ * Issue the first pending descriptor from @chan->submitted_desc. If the channel
+ * is already streaming, the channel is re-triggered with the pending
+ * descriptor.
+ */
+static void xilinx_dpdma_chan_issue_pending(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ unsigned long flags;
+ u32 reg, channels;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->submitted_desc || chan->pending_desc)
+ goto out_unlock;
+
+ chan->pending_desc = chan->submitted_desc;
+ chan->submitted_desc = NULL;
+
+ sw_desc = list_first_entry(&chan->pending_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
+ (u32)sw_desc->phys);
+ if (xdev->ext_addr)
+ dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
+ ((u64)sw_desc->phys >> 32) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK);
+
+ if (chan->first_frame) {
+ chan->first_frame = false;
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ if (!channels)
+ goto out_unlock;
+ reg = channels << XILINX_DPDMA_GBL_TRIG_SHIFT;
+ } else {
+ reg = 1 << (XILINX_DPDMA_GBL_TRIG_SHIFT + chan->id);
+ }
+ } else {
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ if (!channels)
+ goto out_unlock;
+ reg = channels << XILINX_DPDMA_GBL_RETRIG_SHIFT;
+ } else {
+ reg = 1 << (XILINX_DPDMA_GBL_RETRIG_SHIFT + chan->id);
+ }
+ }
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_start - Start the channel
+ * @chan: DPDMA channel
+ *
+ * Start the channel by enabling interrupts and triggering the channel.
+ * If the channel is enabled already or there's no pending descriptor, this
+ * function won't do anything on the channel.
+ */
+static void xilinx_dpdma_chan_start(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->submitted_desc || chan->status == STREAMING)
+ goto out_unlock;
+
+ xilinx_dpdma_chan_unpause(chan);
+ xilinx_dpdma_chan_enable(chan);
+ chan->first_frame = true;
+ chan->status = STREAMING;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_ostand - Number of outstanding transactions
+ * @chan: DPDMA channel
+ *
+ * Read and return the number of outstanding transactions from register.
+ *
+ * Return: Number of outstanding transactions from the status register.
+ */
+static inline u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
+{
+ return dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS) >>
+ XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT &
+ XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK;
+}
+
+/**
+ * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
+ * @chan: DPDMA channel
+ *
+ * Notify waiters for no outstanding event, so waiters can stop the channel
+ * safely. This function is supposed to be called when 'no outstanding'
+ * interrupt is generated. The 'no outstanding' interrupt is disabled and
+ * should be re-enabled when this event is handled. If the channel status
+ * register still shows some number of outstanding transactions, the interrupt
+ * remains enabled.
+ *
+ * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
+ * transaction(s).
+ */
+static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ u32 cnt;
+
+ cnt = xilinx_dpdma_chan_ostand(chan);
+ if (cnt) {
+ dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
+ return -EWOULDBLOCK;
+ }
+
+ /* Disable 'no outstanding' interrupt */
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
+ 1 << (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ wake_up(&chan->wait_to_stop);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding intr
+ * @chan: DPDMA channel
+ *
+ * Wait for the no outstanding transaction interrupt. This functions can sleep
+ * for 50ms.
+ *
+ * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
+ * from wait_event_interruptible_timeout().
+ */
+static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ int ret;
+
+ /* Wait for a no outstanding transaction interrupt upto 50msec */
+ ret = wait_event_interruptible_timeout(chan->wait_to_stop,
+ !xilinx_dpdma_chan_ostand(chan),
+ msecs_to_jiffies(50));
+ if (ret > 0) {
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+ 1 <<
+ (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ return 0;
+ }
+
+ dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
+ xilinx_dpdma_chan_ostand(chan));
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
+ * @chan: DPDMA channel
+ *
+ * Poll the outstanding transaction status, and return when there's no
+ * outstanding transaction. This functions can be used in the interrupt context
+ * or where the atomicity is required. Calling thread may wait more than 50ms.
+ *
+ * Return: 0 on success, or -ETIMEDOUT.
+ */
+static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ u32 cnt, loop = 50000;
+
+ /* Poll at least for 50ms (20 fps). */
+ do {
+ cnt = xilinx_dpdma_chan_ostand(chan);
+ udelay(1);
+ } while (loop-- > 0 && cnt);
+
+ if (loop) {
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+ 1 <<
+ (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ return 0;
+ }
+
+ dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
+ xilinx_dpdma_chan_ostand(chan));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xilinx_dpdma_chan_stop - Stop the channel
+ * @chan: DPDMA channel
+ * @poll: flag whether to poll or wait
+ *
+ * Stop the channel with the following sequence: 1. Pause, 2. Wait (sleep) for
+ * no outstanding transaction interrupt, 3. Disable the channel.
+ *
+ * Return: 0 on success, or an error from xilinx_dpdma_chan_poll/wait_ostand().
+ */
+static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan, bool poll)
+{
+ unsigned long flags;
+ bool ret;
+
+ xilinx_dpdma_chan_pause(chan);
+ if (poll)
+ ret = xilinx_dpdma_chan_poll_no_ostand(chan);
+ else
+ ret = xilinx_dpdma_chan_wait_no_ostand(chan);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ xilinx_dpdma_chan_disable(chan);
+ chan->status = IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_resources - Allocate resources for the channel
+ * @chan: DPDMA channel
+ *
+ * Allocate a descriptor pool for the channel.
+ *
+ * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
+ */
+static int xilinx_dpdma_chan_alloc_resources(struct xilinx_dpdma_chan *chan)
+{
+ chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
+ chan->xdev->dev,
+ sizeof(struct xilinx_dpdma_sw_desc),
+ __alignof__(struct xilinx_dpdma_sw_desc), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->xdev->dev,
+ "failed to allocate a descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_free_resources - Free all resources for the channel
+ * @chan: DPDMA channel
+ *
+ * Free all descriptors and the descriptor pool for the channel.
+ */
+static void xilinx_dpdma_chan_free_resources(struct xilinx_dpdma_chan *chan)
+{
+ xilinx_dpdma_chan_free_all_desc(chan);
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_terminate_all - Terminate the channel and descriptors
+ * @chan: DPDMA channel
+ *
+ * Stop the channel and free all associated descriptors. Poll the no outstanding
+ * transaction interrupt as this can be called from an atomic context.
+ *
+ * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
+ */
+static int xilinx_dpdma_chan_terminate_all(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ int ret;
+ unsigned int i;
+
+ if (chan->video_group) {
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_pause(xdev->chan[i]);
+ xdev->chan[i]->video_group = false;
+ }
+ }
+ }
+
+ ret = xilinx_dpdma_chan_stop(chan, true);
+ if (ret)
+ return ret;
+
+ xilinx_dpdma_chan_free_all_desc(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_synchronize - Synchronize all outgoing transfer
+ * @chan: DPDMA channel
+ *
+ * Stop the channel and free all associated descriptors. As this can't be
+ * called in an atomic context, sleep-wait for no outstanding transaction
+ * interrupt. Then kill all related tasklets.
+ *
+ * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
+ */
+static int xilinx_dpdma_chan_synchronize(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ int ret;
+ unsigned int i;
+
+ if (chan->video_group) {
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_pause(xdev->chan[i]);
+ xdev->chan[i]->video_group = false;
+ }
+ }
+ }
+
+ ret = xilinx_dpdma_chan_stop(chan, false);
+ if (ret)
+ return ret;
+
+ tasklet_kill(&chan->err_task);
+ tasklet_kill(&chan->done_task);
+ xilinx_dpdma_chan_free_all_desc(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_err - Detect any channel error
+ * @chan: DPDMA channel
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: true if any channel error occurs, or false otherwise.
+ */
+static bool
+xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
+{
+ if (!chan)
+ return false;
+
+ if (chan->status == STREAMING &&
+ ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
+ (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
+ return true;
+
+ return false;
+}
+
+/**
+ * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
+ * @chan: DPDMA channel
+ *
+ * This function is called when any channel error or any global error occurs.
+ * The function disables the paused channel by errors and determines
+ * if the current active descriptor can be rescheduled depending on
+ * the descriptor status.
+ */
+static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ struct device *dev = xdev->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_dbg(dev, "cur desc addr = 0x%04x%08x\n",
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
+ dev_dbg(dev, "cur payload addr = 0x%04x%08x\n",
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
+
+ xilinx_dpdma_chan_disable(chan);
+ chan->status = IDLE;
+
+ if (!chan->active_desc)
+ goto out_unlock;
+
+ xilinx_dpdma_chan_dump_tx_desc(chan, chan->active_desc);
+
+ switch (chan->active_desc->status) {
+ case ERRORED:
+ dev_dbg(dev, "repeated error on desc\n");
+ break;
+ case ACTIVE:
+ case PREPARED:
+ /* Reschedule if there's no new descriptor */
+ if (!chan->pending_desc && !chan->submitted_desc) {
+ chan->active_desc->status = ERRORED;
+ chan->submitted_desc = chan->active_desc;
+ } else {
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
+ }
+ break;
+ }
+ chan->active_desc = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/* DMA tx descriptor */
+
+static dma_cookie_t xilinx_dpdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(tx->chan);
+ struct xilinx_dpdma_tx_desc *tx_desc = to_dpdma_tx_desc(tx);
+
+ return xilinx_dpdma_chan_submit_tx_desc(chan, tx_desc);
+}
+
+/* DMA channel operations */
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (!sgl || sg_len < 2)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_slave_sg(chan, sgl);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+ period_len);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (xt->dir != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_interleaved(chan, xt);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ dma_cookie_init(dchan);
+
+ return xilinx_dpdma_chan_alloc_resources(chan);
+}
+
+static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_dpdma_chan_free_resources(chan);
+}
+
+static enum dma_status xilinx_dpdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_dpdma_chan_start(chan);
+ xilinx_dpdma_chan_issue_pending(chan);
+}
+
+static int xilinx_dpdma_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ if (config->direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int xilinx_dpdma_pause(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
+
+ return 0;
+}
+
+static int xilinx_dpdma_resume(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
+
+ return 0;
+}
+
+static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
+{
+ return xilinx_dpdma_chan_terminate_all(to_xilinx_chan(dchan));
+}
+
+static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_synchronize(to_xilinx_chan(dchan));
+}
+
+/* Xilinx DPDMA device operations */
+
+/**
+ * xilinx_dpdma_err - Detect any global error
+ * @isr: Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: True if any global error occurs, or false otherwise.
+ */
+static bool xilinx_dpdma_err(u32 isr, u32 eisr)
+{
+ if ((isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
+ eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR))
+ return true;
+
+ return false;
+}
+
+/**
+ * xilinx_dpdma_handle_err_intr - Handle DPDMA error interrupt
+ * @xdev: DPDMA device
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Handle if any error occurs based on @isr and @eisr. This function disables
+ * corresponding error interrupts, and those should be re-enabled once handling
+ * is done.
+ */
+static void xilinx_dpdma_handle_err_intr(struct xilinx_dpdma_device *xdev,
+ u32 isr, u32 eisr)
+{
+ bool err = xilinx_dpdma_err(isr, eisr);
+ unsigned int i;
+
+ dev_dbg_ratelimited(xdev->dev,
+ "error intr: isr = 0x%08x, eisr = 0x%08x\n",
+ isr, eisr);
+
+ /* Disable channel error interrupts until errors are handled. */
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
+ isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
+ eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
+ tasklet_schedule(&xdev->chan[i]->err_task);
+}
+
+/**
+ * xilinx_dpdma_handle_vsync_intr - Handle the VSYNC interrupt
+ * @xdev: DPDMA device
+ *
+ * Handle the VSYNC event. At this point, the current frame becomes active,
+ * which means the DPDMA actually starts fetching, and the next frame can be
+ * scheduled.
+ */
+static void xilinx_dpdma_handle_vsync_intr(struct xilinx_dpdma_device *xdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++) {
+ if (xdev->chan[i] &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_desc_active(xdev->chan[i]);
+ xilinx_dpdma_chan_issue_pending(xdev->chan[i]);
+ }
+ }
+}
+
+/**
+ * xilinx_dpdma_enable_intr - Enable interrupts
+ * @xdev: DPDMA device
+ *
+ * Enable interrupts.
+ */
+static void xilinx_dpdma_enable_intr(struct xilinx_dpdma_device *xdev)
+{
+ dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
+}
+
+/**
+ * xilinx_dpdma_disable_intr - Disable interrupts
+ * @xdev: DPDMA device
+ *
+ * Disable interrupts.
+ */
+static void xilinx_dpdma_disable_intr(struct xilinx_dpdma_device *xdev)
+{
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
+}
+
+/* Interrupt handling operations*/
+
+/**
+ * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
+ * @data: tasklet data to be casted to DPDMA channel structure
+ *
+ * Per channel error handling tasklet. This function waits for the outstanding
+ * transaction to complete and triggers error handling. After error handling,
+ * re-enable channel error interrupts, and restart the channel if needed.
+ */
+static void xilinx_dpdma_chan_err_task(unsigned long data)
+{
+ struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+
+ /* Proceed error handling even when polling fails. */
+ xilinx_dpdma_chan_poll_no_ostand(chan);
+
+ xilinx_dpdma_chan_handle_err(chan);
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
+ XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
+ XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
+
+ xilinx_dpdma_chan_start(chan);
+ xilinx_dpdma_chan_issue_pending(chan);
+}
+
+/**
+ * xilinx_dpdma_chan_done_task - Per channel tasklet for done interrupt handling
+ * @data: tasklet data to be casted to DPDMA channel structure
+ *
+ * Per channel done interrupt handling tasklet.
+ */
+static void xilinx_dpdma_chan_done_task(unsigned long data)
+{
+ struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+
+ xilinx_dpdma_chan_cleanup_desc(chan);
+}
+
+static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dpdma_device *xdev = data;
+ u32 status, error, i;
+ unsigned long masked;
+
+ status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
+ error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
+ if (!status && !error)
+ return IRQ_NONE;
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
+
+ if (status & XILINX_DPDMA_INTR_VSYNC)
+ xilinx_dpdma_handle_vsync_intr(xdev);
+
+ masked = (status & XILINX_DPDMA_INTR_DESC_DONE_MASK) >>
+ XILINX_DPDMA_INTR_DESC_DONE_SHIFT;
+ if (masked)
+ for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
+ xilinx_dpdma_chan_desc_done_intr(xdev->chan[i]);
+
+ masked = (status & XILINX_DPDMA_INTR_NO_OSTAND_MASK) >>
+ XILINX_DPDMA_INTR_NO_OSTAND_SHIFT;
+ if (masked)
+ for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
+ xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
+
+ masked = status & XILINX_DPDMA_INTR_ERR_ALL;
+ if (masked || error)
+ xilinx_dpdma_handle_err_intr(xdev, masked, error);
+
+ return IRQ_HANDLED;
+}
+
+/* Initialization operations */
+
+static struct xilinx_dpdma_chan *
+xilinx_dpdma_chan_probe(struct device_node *node,
+ struct xilinx_dpdma_device *xdev)
+{
+ struct xilinx_dpdma_chan *chan;
+
+ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_device_is_compatible(node, "xlnx,video0")) {
+ chan->id = VIDEO0;
+ } else if (of_device_is_compatible(node, "xlnx,video1")) {
+ chan->id = VIDEO1;
+ } else if (of_device_is_compatible(node, "xlnx,video2")) {
+ chan->id = VIDEO2;
+ } else if (of_device_is_compatible(node, "xlnx,graphics")) {
+ chan->id = GRAPHICS;
+ } else if (of_device_is_compatible(node, "xlnx,audio0")) {
+ chan->id = AUDIO0;
+ } else if (of_device_is_compatible(node, "xlnx,audio1")) {
+ chan->id = AUDIO1;
+ } else {
+ dev_err(xdev->dev, "invalid channel compatible string in DT\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE + XILINX_DPDMA_CH_OFFSET *
+ chan->id;
+ chan->status = IDLE;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->done_list);
+ init_waitqueue_head(&chan->wait_to_stop);
+
+ tasklet_init(&chan->done_task, xilinx_dpdma_chan_done_task,
+ (unsigned long)chan);
+ tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
+ (unsigned long)chan);
+
+ chan->common.device = &xdev->common;
+ chan->xdev = xdev;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+ xdev->chan[chan->id] = chan;
+
+ return chan;
+}
+
+static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
+{
+ tasklet_kill(&chan->err_task);
+ tasklet_kill(&chan->done_task);
+ list_del(&chan->common.device_node);
+}
+
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
+ uint32_t chan_id = dma_spec->args[0];
+
+ if (chan_id >= XILINX_DPDMA_NUM_CHAN)
+ return NULL;
+
+ if (!xdev->chan[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+static int xilinx_dpdma_probe(struct platform_device *pdev)
+{
+ struct xilinx_dpdma_device *xdev;
+ struct xilinx_dpdma_chan *chan;
+ struct dma_device *ddev;
+ struct resource *res;
+ struct device_node *node, *child;
+ u32 i;
+ int irq, ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ ddev = &xdev->common;
+ ddev->dev = &pdev->dev;
+ node = xdev->dev->of_node;
+
+ xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
+ if (IS_ERR(xdev->axi_clk))
+ return PTR_ERR(xdev->axi_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xdev->reg))
+ return PTR_ERR(xdev->reg);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(xdev->dev, "failed to get platform irq\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(xdev->dev, irq, xilinx_dpdma_irq_handler,
+ IRQF_SHARED, dev_name(xdev->dev), xdev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
+ ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
+
+ ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
+ ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+ ddev->device_prep_slave_sg = xilinx_dpdma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
+ ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
+ ddev->device_tx_status = xilinx_dpdma_tx_status;
+ ddev->device_issue_pending = xilinx_dpdma_issue_pending;
+ ddev->device_config = xilinx_dpdma_config;
+ ddev->device_pause = xilinx_dpdma_pause;
+ ddev->device_resume = xilinx_dpdma_resume;
+ ddev->device_terminate_all = xilinx_dpdma_terminate_all;
+ ddev->device_synchronize = xilinx_dpdma_synchronize;
+ ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
+ ddev->directions = BIT(DMA_MEM_TO_DEV);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ for_each_child_of_node(node, child) {
+ chan = xilinx_dpdma_chan_probe(child, xdev);
+ if (IS_ERR(chan)) {
+ dev_err(xdev->dev, "failed to probe a channel\n");
+ ret = PTR_ERR(chan);
+ goto error;
+ }
+ }
+
+ xdev->ext_addr = sizeof(dma_addr_t) > 4;
+ if (xdev->ext_addr)
+ xdev->desc_addr = xilinx_dpdma_sw_desc_addr_64;
+ else
+ xdev->desc_addr = xilinx_dpdma_sw_desc_addr_32;
+
+ ret = clk_prepare_enable(xdev->axi_clk);
+ if (ret) {
+ dev_err(xdev->dev, "failed to enable the axi clock\n");
+ goto error;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error_dma_async;
+ }
+
+ ret = of_dma_controller_register(xdev->dev->of_node,
+ of_dma_xilinx_xlate, ddev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
+ goto error_of_dma;
+ }
+
+ xilinx_dpdma_enable_intr(xdev);
+
+ xilinx_dpdma_debugfs_init(&pdev->dev);
+
+ dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
+
+ return 0;
+
+error_of_dma:
+ dma_async_device_unregister(ddev);
+error_dma_async:
+ clk_disable_unprepare(xdev->axi_clk);
+error:
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (xdev->chan[i])
+ xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+ return ret;
+}
+
+static int xilinx_dpdma_remove(struct platform_device *pdev)
+{
+ struct xilinx_dpdma_device *xdev;
+ unsigned int i;
+
+ xdev = platform_get_drvdata(pdev);
+
+ xilinx_dpdma_disable_intr(xdev);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&xdev->common);
+ clk_disable_unprepare(xdev->axi_clk);
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (xdev->chan[i])
+ xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dpdma_of_match[] = {
+ { .compatible = "xlnx,dpdma",},
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
+
+static struct platform_driver xilinx_dpdma_driver = {
+ .probe = xilinx_dpdma_probe,
+ .remove = xilinx_dpdma_remove,
+ .driver = {
+ .name = "xilinx-dpdma",
+ .of_match_table = xilinx_dpdma_of_match,
+ },
+};
+
+module_platform_driver(xilinx_dpdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DPDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_frmbuf.c b/drivers/dma/xilinx/xilinx_frmbuf.c
new file mode 100644
index 000000000000..f784eda90156
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_frmbuf.c
@@ -0,0 +1,1709 @@
+/*
+ * DMAEngine driver for Xilinx Framebuffer IP
+ *
+ * Copyright (C) 2016,2017 Xilinx, Inc. All rights reserved.
+ *
+ * Authors: Radhey Shyam Pandey <radheys@xilinx.com>
+ * John Nichols <jnichol@xilinx.com>
+ * Jeffrey Mouroux <jmouroux@xilinx.com>
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ * The AXI Framebuffer core is a soft Xilinx IP core that
+ * provides high-bandwidth direct memory access between memory
+ * and AXI4-Stream.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/dmapool.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "../dmaengine.h"
+
+/* Register/Descriptor Offsets */
+#define XILINX_FRMBUF_CTRL_OFFSET 0x00
+#define XILINX_FRMBUF_GIE_OFFSET 0x04
+#define XILINX_FRMBUF_IE_OFFSET 0x08
+#define XILINX_FRMBUF_ISR_OFFSET 0x0c
+#define XILINX_FRMBUF_WIDTH_OFFSET 0x10
+#define XILINX_FRMBUF_HEIGHT_OFFSET 0x18
+#define XILINX_FRMBUF_STRIDE_OFFSET 0x20
+#define XILINX_FRMBUF_FMT_OFFSET 0x28
+#define XILINX_FRMBUF_ADDR_OFFSET 0x30
+#define XILINX_FRMBUF_ADDR2_OFFSET 0x3c
+#define XILINX_FRMBUF_FID_OFFSET 0x48
+
+/* Control Registers */
+#define XILINX_FRMBUF_CTRL_AP_START BIT(0)
+#define XILINX_FRMBUF_CTRL_AP_DONE BIT(1)
+#define XILINX_FRMBUF_CTRL_AP_IDLE BIT(2)
+#define XILINX_FRMBUF_CTRL_AP_READY BIT(3)
+#define XILINX_FRMBUF_CTRL_FLUSH BIT(5)
+#define XILINX_FRMBUF_CTRL_FLUSH_DONE BIT(6)
+#define XILINX_FRMBUF_CTRL_AUTO_RESTART BIT(7)
+#define XILINX_FRMBUF_GIE_EN BIT(0)
+
+/* Interrupt Status and Control */
+#define XILINX_FRMBUF_IE_AP_DONE BIT(0)
+#define XILINX_FRMBUF_IE_AP_READY BIT(1)
+
+#define XILINX_FRMBUF_ISR_AP_DONE_IRQ BIT(0)
+#define XILINX_FRMBUF_ISR_AP_READY_IRQ BIT(1)
+
+#define XILINX_FRMBUF_ISR_ALL_IRQ_MASK \
+ (XILINX_FRMBUF_ISR_AP_DONE_IRQ | \
+ XILINX_FRMBUF_ISR_AP_READY_IRQ)
+
+/* Video Format Register Settings */
+#define XILINX_FRMBUF_FMT_RGBX8 10
+#define XILINX_FRMBUF_FMT_YUVX8 11
+#define XILINX_FRMBUF_FMT_YUYV8 12
+#define XILINX_FRMBUF_FMT_RGBA8 13
+#define XILINX_FRMBUF_FMT_YUVA8 14
+#define XILINX_FRMBUF_FMT_RGBX10 15
+#define XILINX_FRMBUF_FMT_YUVX10 16
+#define XILINX_FRMBUF_FMT_Y_UV8 18
+#define XILINX_FRMBUF_FMT_Y_UV8_420 19
+#define XILINX_FRMBUF_FMT_RGB8 20
+#define XILINX_FRMBUF_FMT_YUV8 21
+#define XILINX_FRMBUF_FMT_Y_UV10 22
+#define XILINX_FRMBUF_FMT_Y_UV10_420 23
+#define XILINX_FRMBUF_FMT_Y8 24
+#define XILINX_FRMBUF_FMT_Y10 25
+#define XILINX_FRMBUF_FMT_BGRA8 26
+#define XILINX_FRMBUF_FMT_BGRX8 27
+#define XILINX_FRMBUF_FMT_UYVY8 28
+#define XILINX_FRMBUF_FMT_BGR8 29
+#define XILINX_FRMBUF_FMT_RGBX12 30
+#define XILINX_FRMBUF_FMT_RGB16 35
+
+/* FID Register */
+#define XILINX_FRMBUF_FID_MASK BIT(0)
+
+#define XILINX_FRMBUF_ALIGN_MUL 8
+
+#define WAIT_FOR_FLUSH_DONE 25
+
+/* Pixels per clock property flag */
+#define XILINX_PPC_PROP BIT(0)
+#define XILINX_FLUSH_PROP BIT(1)
+#define XILINX_FID_PROP BIT(2)
+#define XILINX_CLK_PROP BIT(3)
+
+#define XILINX_FRMBUF_MAX_HEIGHT (4320)
+#define XILINX_FRMBUF_MIN_HEIGHT (64)
+#define XILINX_FRMBUF_MAX_WIDTH (8192)
+#define XILINX_FRMBUF_MIN_WIDTH (64)
+
+/**
+ * struct xilinx_frmbuf_desc_hw - Hardware Descriptor
+ * @luma_plane_addr: Luma or packed plane buffer address
+ * @chroma_plane_addr: Chroma plane buffer address
+ * @vsize: Vertical Size
+ * @hsize: Horizontal Size
+ * @stride: Number of bytes between the first
+ * pixels of each horizontal line
+ */
+struct xilinx_frmbuf_desc_hw {
+ dma_addr_t luma_plane_addr;
+ dma_addr_t chroma_plane_addr;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+};
+
+/**
+ * struct xilinx_frmbuf_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @hw: Hardware descriptor
+ * @node: Node in the channel descriptors list
+ * @fid: Field ID of buffer
+ * @earlycb: Whether the callback should be called when in staged state
+ */
+struct xilinx_frmbuf_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct xilinx_frmbuf_desc_hw hw;
+ struct list_head node;
+ u32 fid;
+ u32 earlycb;
+};
+
+/**
+ * struct xilinx_frmbuf_chan - Driver specific dma channel structure
+ * @xdev: Driver specific device structure
+ * @lock: Descriptor operation lock
+ * @chan_node: Member of a list of framebuffer channel instances
+ * @pending_list: Descriptors waiting
+ * @done_list: Complete descriptors
+ * @staged_desc: Next buffer to be programmed
+ * @active_desc: Currently active buffer being read/written to
+ * @common: DMA common channel
+ * @dev: The dma device
+ * @write_addr: callback that will write dma addresses to IP (32 or 64 bit)
+ * @irq: Channel IRQ
+ * @direction: Transfer direction
+ * @idle: Channel idle state
+ * @tasklet: Cleanup work after irq
+ * @vid_fmt: Reference to currently assigned video format description
+ * @hw_fid: FID enabled in hardware flag
+ * @mode: Select operation mode
+ */
+struct xilinx_frmbuf_chan {
+ struct xilinx_frmbuf_device *xdev;
+ /* Descriptor operation lock */
+ spinlock_t lock;
+ struct list_head chan_node;
+ struct list_head pending_list;
+ struct list_head done_list;
+ struct xilinx_frmbuf_tx_descriptor *staged_desc;
+ struct xilinx_frmbuf_tx_descriptor *active_desc;
+ struct dma_chan common;
+ struct device *dev;
+ void (*write_addr)(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t value);
+ int irq;
+ enum dma_transfer_direction direction;
+ bool idle;
+ struct tasklet_struct tasklet;
+ const struct xilinx_frmbuf_format_desc *vid_fmt;
+ bool hw_fid;
+ enum operation_mode mode;
+};
+
+/**
+ * struct xilinx_frmbuf_format_desc - lookup table to match fourcc to format
+ * @dts_name: Device tree name for this entry.
+ * @id: Format ID
+ * @bpw: Bits of pixel data + padding in a 32-bit word (luma plane for semi-pl)
+ * @ppw: Number of pixels represented in a 32-bit word (luma plane for semi-pl)
+ * @num_planes: Expected number of plane buffers in framebuffer for this format
+ * @drm_fmt: DRM video framework equivalent fourcc code
+ * @v4l2_fmt: Video 4 Linux framework equivalent fourcc code
+ * @fmt_bitmask: Flag identifying this format in device-specific "enabled"
+ * bitmap
+ */
+struct xilinx_frmbuf_format_desc {
+ const char *dts_name;
+ u32 id;
+ u32 bpw;
+ u32 ppw;
+ u32 num_planes;
+ u32 drm_fmt;
+ u32 v4l2_fmt;
+ u32 fmt_bitmask;
+};
+
+static LIST_HEAD(frmbuf_chan_list);
+static DEFINE_MUTEX(frmbuf_chan_list_lock);
+
+static const struct xilinx_frmbuf_format_desc xilinx_frmbuf_formats[] = {
+ {
+ .dts_name = "xbgr8888",
+ .id = XILINX_FRMBUF_FMT_RGBX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .v4l2_fmt = V4L2_PIX_FMT_BGRX32,
+ .fmt_bitmask = BIT(0),
+ },
+ {
+ .dts_name = "xbgr2101010",
+ .id = XILINX_FRMBUF_FMT_RGBX10,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR30,
+ .fmt_bitmask = BIT(1),
+ },
+ {
+ .dts_name = "xrgb8888",
+ .id = XILINX_FRMBUF_FMT_BGRX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR32,
+ .fmt_bitmask = BIT(2),
+ },
+ {
+ .dts_name = "xvuy8888",
+ .id = XILINX_FRMBUF_FMT_YUVX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XVUY8888,
+ .v4l2_fmt = V4L2_PIX_FMT_XVUY32,
+ .fmt_bitmask = BIT(5),
+ },
+ {
+ .dts_name = "vuy888",
+ .id = XILINX_FRMBUF_FMT_YUV8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_VUY888,
+ .v4l2_fmt = V4L2_PIX_FMT_VUY24,
+ .fmt_bitmask = BIT(6),
+ },
+ {
+ .dts_name = "yuvx2101010",
+ .id = XILINX_FRMBUF_FMT_YUVX10,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XVUY2101010,
+ .v4l2_fmt = V4L2_PIX_FMT_XVUY10,
+ .fmt_bitmask = BIT(7),
+ },
+ {
+ .dts_name = "yuyv",
+ .id = XILINX_FRMBUF_FMT_YUYV8,
+ .bpw = 32,
+ .ppw = 2,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .v4l2_fmt = V4L2_PIX_FMT_YUYV,
+ .fmt_bitmask = BIT(8),
+ },
+ {
+ .dts_name = "uyvy",
+ .id = XILINX_FRMBUF_FMT_UYVY8,
+ .bpw = 32,
+ .ppw = 2,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .v4l2_fmt = V4L2_PIX_FMT_UYVY,
+ .fmt_bitmask = BIT(9),
+ },
+ {
+ .dts_name = "nv16",
+ .id = XILINX_FRMBUF_FMT_Y_UV8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_NV16,
+ .v4l2_fmt = V4L2_PIX_FMT_NV16M,
+ .fmt_bitmask = BIT(11),
+ },
+ {
+ .dts_name = "nv16",
+ .id = XILINX_FRMBUF_FMT_Y_UV8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_NV16,
+ .fmt_bitmask = BIT(11),
+ },
+ {
+ .dts_name = "nv12",
+ .id = XILINX_FRMBUF_FMT_Y_UV8_420,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_NV12,
+ .v4l2_fmt = V4L2_PIX_FMT_NV12M,
+ .fmt_bitmask = BIT(12),
+ },
+ {
+ .dts_name = "nv12",
+ .id = XILINX_FRMBUF_FMT_Y_UV8_420,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_NV12,
+ .fmt_bitmask = BIT(12),
+ },
+ {
+ .dts_name = "xv15",
+ .id = XILINX_FRMBUF_FMT_Y_UV10_420,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_XV15,
+ .v4l2_fmt = V4L2_PIX_FMT_XV15M,
+ .fmt_bitmask = BIT(13),
+ },
+ {
+ .dts_name = "xv15",
+ .id = XILINX_FRMBUF_FMT_Y_UV10_420,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_XV15,
+ .fmt_bitmask = BIT(13),
+ },
+ {
+ .dts_name = "xv20",
+ .id = XILINX_FRMBUF_FMT_Y_UV10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_XV20,
+ .v4l2_fmt = V4L2_PIX_FMT_XV20M,
+ .fmt_bitmask = BIT(14),
+ },
+ {
+ .dts_name = "xv20",
+ .id = XILINX_FRMBUF_FMT_Y_UV10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_XV20,
+ .fmt_bitmask = BIT(14),
+ },
+ {
+ .dts_name = "bgr888",
+ .id = XILINX_FRMBUF_FMT_RGB8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB24,
+ .fmt_bitmask = BIT(15),
+ },
+ {
+ .dts_name = "y8",
+ .id = XILINX_FRMBUF_FMT_Y8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_Y8,
+ .v4l2_fmt = V4L2_PIX_FMT_GREY,
+ .fmt_bitmask = BIT(16),
+ },
+ {
+ .dts_name = "y10",
+ .id = XILINX_FRMBUF_FMT_Y10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_Y10,
+ .v4l2_fmt = V4L2_PIX_FMT_Y10,
+ .fmt_bitmask = BIT(17),
+ },
+ {
+ .dts_name = "rgb888",
+ .id = XILINX_FRMBUF_FMT_BGR8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR24,
+ .fmt_bitmask = BIT(18),
+ },
+ {
+ .dts_name = "abgr8888",
+ .id = XILINX_FRMBUF_FMT_RGBA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(19),
+ },
+ {
+ .dts_name = "argb8888",
+ .id = XILINX_FRMBUF_FMT_BGRA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(20),
+ },
+ {
+ .dts_name = "avuy8888",
+ .id = XILINX_FRMBUF_FMT_YUVA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_AVUY,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(21),
+ },
+ {
+ .dts_name = "xbgr4121212",
+ .id = XILINX_FRMBUF_FMT_RGBX12,
+ .bpw = 40,
+ .ppw = 1,
+ .num_planes = 1,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR40,
+ .fmt_bitmask = BIT(22),
+ },
+ {
+ .dts_name = "rgb16",
+ .id = XILINX_FRMBUF_FMT_RGB16,
+ .bpw = 48,
+ .ppw = 1,
+ .num_planes = 1,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR48,
+ .fmt_bitmask = BIT(23),
+ },
+};
+
+/**
+ * struct xilinx_frmbuf_feature - dt or IP property structure
+ * @direction: dma transfer mode and direction
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xilinx_frmbuf_feature {
+ enum dma_transfer_direction direction;
+ u32 flags;
+};
+
+/**
+ * struct xilinx_frmbuf_device - dma device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific dma channel
+ * @rst_gpio: GPIO reset
+ * @enabled_vid_fmts: Bitmask of video formats enabled in hardware
+ * @drm_memory_fmts: Array of supported DRM fourcc codes
+ * @drm_fmt_cnt: Count of supported DRM fourcc codes
+ * @v4l2_memory_fmts: Array of supported V4L2 fourcc codes
+ * @v4l2_fmt_cnt: Count of supported V4L2 fourcc codes
+ * @cfg: Pointer to Framebuffer Feature config struct
+ * @max_width: Maximum pixel width supported in IP.
+ * @max_height: Maximum number of lines supported in IP.
+ * @ap_clk: Video core clock
+ */
+struct xilinx_frmbuf_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct xilinx_frmbuf_chan chan;
+ struct gpio_desc *rst_gpio;
+ u32 enabled_vid_fmts;
+ u32 drm_memory_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+ u32 drm_fmt_cnt;
+ u32 v4l2_memory_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+ u32 v4l2_fmt_cnt;
+ const struct xilinx_frmbuf_feature *cfg;
+ u32 max_width;
+ u32 max_height;
+ struct clk *ap_clk;
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbwr_cfg_v20 = {
+ .direction = DMA_DEV_TO_MEM,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbwr_cfg_v21 = {
+ .direction = DMA_DEV_TO_MEM,
+ .flags = XILINX_PPC_PROP | XILINX_FLUSH_PROP
+ | XILINX_FID_PROP | XILINX_CLK_PROP,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbrd_cfg_v20 = {
+ .direction = DMA_MEM_TO_DEV,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbrd_cfg_v21 = {
+ .direction = DMA_MEM_TO_DEV,
+ .flags = XILINX_PPC_PROP | XILINX_FLUSH_PROP
+ | XILINX_FID_PROP | XILINX_CLK_PROP,
+};
+
+static const struct of_device_id xilinx_frmbuf_of_ids[] = {
+ { .compatible = "xlnx,axi-frmbuf-wr-v2",
+ .data = (void *)&xlnx_fbwr_cfg_v20},
+ { .compatible = "xlnx,axi-frmbuf-wr-v2.1",
+ .data = (void *)&xlnx_fbwr_cfg_v21},
+ { .compatible = "xlnx,axi-frmbuf-rd-v2",
+ .data = (void *)&xlnx_fbrd_cfg_v20},
+ { .compatible = "xlnx,axi-frmbuf-rd-v2.1",
+ .data = (void *)&xlnx_fbrd_cfg_v21},
+ {/* end of list */}
+};
+
+/******************************PROTOTYPES*************************************/
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_frmbuf_chan, common)
+#define to_dma_tx_descriptor(tx) \
+ container_of(tx, struct xilinx_frmbuf_tx_descriptor, async_tx)
+
+static inline u32 frmbuf_read(struct xilinx_frmbuf_chan *chan, u32 reg)
+{
+ return ioread32(chan->xdev->regs + reg);
+}
+
+static inline void frmbuf_write(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 value)
+{
+ iowrite32(value, chan->xdev->regs + reg);
+}
+
+static inline void frmbuf_writeq(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u64 value)
+{
+ iowrite32(lower_32_bits(value), chan->xdev->regs + reg);
+ iowrite32(upper_32_bits(value), chan->xdev->regs + reg + 4);
+}
+
+static void writeq_addr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ frmbuf_writeq(chan, reg, (u64)addr);
+}
+
+static void write_addr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ frmbuf_write(chan, reg, addr);
+}
+
+static inline void frmbuf_clr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 clr)
+{
+ frmbuf_write(chan, reg, frmbuf_read(chan, reg) & ~clr);
+}
+
+static inline void frmbuf_set(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 set)
+{
+ frmbuf_write(chan, reg, frmbuf_read(chan, reg) | set);
+}
+
+static void frmbuf_init_format_array(struct xilinx_frmbuf_device *xdev)
+{
+ u32 i, cnt;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_frmbuf_formats); i++) {
+ if (!(xdev->enabled_vid_fmts &
+ xilinx_frmbuf_formats[i].fmt_bitmask))
+ continue;
+
+ if (xilinx_frmbuf_formats[i].drm_fmt) {
+ cnt = xdev->drm_fmt_cnt++;
+ xdev->drm_memory_fmts[cnt] =
+ xilinx_frmbuf_formats[i].drm_fmt;
+ }
+
+ if (xilinx_frmbuf_formats[i].v4l2_fmt) {
+ cnt = xdev->v4l2_fmt_cnt++;
+ xdev->v4l2_memory_fmts[cnt] =
+ xilinx_frmbuf_formats[i].v4l2_fmt;
+ }
+ }
+}
+
+static struct xilinx_frmbuf_chan *frmbuf_find_chan(struct dma_chan *chan)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+ bool found_xchan = false;
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_for_each_entry(xil_chan, &frmbuf_chan_list, chan_node) {
+ if (chan == &xil_chan->common) {
+ found_xchan = true;
+ break;
+ }
+ }
+ mutex_unlock(&frmbuf_chan_list_lock);
+
+ if (!found_xchan) {
+ dev_dbg(chan->device->dev,
+ "dma chan not a Video Framebuffer channel instance\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return xil_chan;
+}
+
+static struct xilinx_frmbuf_device *frmbuf_find_dev(struct dma_chan *chan)
+{
+ struct xilinx_frmbuf_chan *xchan, *temp;
+ struct xilinx_frmbuf_device *xdev;
+ bool is_frmbuf_chan = false;
+
+ list_for_each_entry_safe(xchan, temp, &frmbuf_chan_list, chan_node) {
+ if (chan == &xchan->common)
+ is_frmbuf_chan = true;
+ }
+
+ if (!is_frmbuf_chan)
+ return ERR_PTR(-ENODEV);
+
+ xchan = to_xilinx_chan(chan);
+ xdev = container_of(xchan, struct xilinx_frmbuf_device, chan);
+
+ return xdev;
+}
+
+static int frmbuf_verify_format(struct dma_chan *chan, u32 fourcc, u32 type)
+{
+ struct xilinx_frmbuf_chan *xil_chan = to_xilinx_chan(chan);
+ u32 i, sz = ARRAY_SIZE(xilinx_frmbuf_formats);
+
+ for (i = 0; i < sz; i++) {
+ if ((type == XDMA_DRM &&
+ fourcc != xilinx_frmbuf_formats[i].drm_fmt) ||
+ (type == XDMA_V4L2 &&
+ fourcc != xilinx_frmbuf_formats[i].v4l2_fmt))
+ continue;
+
+ if (!(xilinx_frmbuf_formats[i].fmt_bitmask &
+ xil_chan->xdev->enabled_vid_fmts))
+ return -EINVAL;
+
+ /*
+ * The Alpha color formats are supported in Framebuffer Read
+ * IP only as corresponding DRM formats.
+ */
+ if (type == XDMA_DRM &&
+ (xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_ABGR8888 ||
+ xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_ARGB8888 ||
+ xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_AVUY) &&
+ xil_chan->direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ xil_chan->vid_fmt = &xilinx_frmbuf_formats[i];
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void xilinx_xdma_set_config(struct dma_chan *chan, u32 fourcc, u32 type)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+ int ret;
+
+ xil_chan = frmbuf_find_chan(chan);
+ if (IS_ERR(xil_chan))
+ return;
+ ret = frmbuf_verify_format(chan, fourcc, type);
+ if (ret == -EINVAL) {
+ dev_err(chan->device->dev,
+ "Framebuffer not configured for fourcc 0x%x\n",
+ fourcc);
+ return;
+ }
+}
+
+void xilinx_xdma_set_mode(struct dma_chan *chan, enum operation_mode
+ mode)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+
+ xil_chan = frmbuf_find_chan(chan);
+ if (IS_ERR(xil_chan))
+ return;
+
+ xil_chan->mode = mode;
+
+ return;
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_set_mode);
+
+void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc)
+{
+ xilinx_xdma_set_config(chan, drm_fourcc, XDMA_DRM);
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_drm_config);
+
+void xilinx_xdma_v4l2_config(struct dma_chan *chan, u32 v4l2_fourcc)
+{
+ xilinx_xdma_set_config(chan, v4l2_fourcc, XDMA_V4L2);
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_v4l2_config);
+
+int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ struct xilinx_frmbuf_device *xdev;
+
+ xdev = frmbuf_find_dev(chan);
+
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ *fmt_cnt = xdev->drm_fmt_cnt;
+ *fmts = xdev->drm_memory_fmts;
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_drm_vid_fmts);
+
+int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ struct xilinx_frmbuf_device *xdev;
+
+ xdev = frmbuf_find_dev(chan);
+
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ *fmt_cnt = xdev->v4l2_fmt_cnt;
+ *fmts = xdev->v4l2_memory_fmts;
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_v4l2_vid_fmts);
+
+int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 *fid)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (!async_tx || !fid)
+ return -EINVAL;
+
+ if (xdev->chan.direction != DMA_DEV_TO_MEM)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ *fid = desc->fid;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_fid);
+
+int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 fid)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (fid > 1 || !async_tx)
+ return -EINVAL;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (xdev->chan.direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ desc->fid = fid;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_set_fid);
+
+int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *earlycb)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (!async_tx || !earlycb)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ *earlycb = desc->earlycb;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_earlycb);
+
+int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 earlycb)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (!async_tx)
+ return -EINVAL;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ desc->earlycb = earlycb;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_set_earlycb);
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success or error code on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_frmbuf_device *xdev = ofdma->of_dma_data;
+
+ return dma_get_slave_channel(&xdev->chan.common);
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors alloc and free
+ */
+
+/**
+ * xilinx_frmbuf_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific dma channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_frmbuf_tx_descriptor *
+xilinx_frmbuf_alloc_tx_descriptor(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ return desc;
+}
+
+/**
+ * xilinx_frmbuf_free_desc_list - Free descriptors list
+ * @chan: Driver specific dma channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_frmbuf_free_desc_list(struct xilinx_frmbuf_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+}
+
+/**
+ * xilinx_frmbuf_free_descriptors - Free channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_free_descriptors(struct xilinx_frmbuf_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_frmbuf_free_desc_list(chan, &chan->pending_list);
+ xilinx_frmbuf_free_desc_list(chan, &chan->done_list);
+ kfree(chan->active_desc);
+ kfree(chan->staged_desc);
+
+ chan->staged_desc = NULL;
+ chan->active_desc = NULL;
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_frmbuf_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_frmbuf_free_descriptors(chan);
+}
+
+/**
+ * xilinx_frmbuf_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_chan_desc_cleanup(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ kfree(desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx frmbuf channel structure
+ */
+static void xilinx_frmbuf_do_tasklet(unsigned long data)
+{
+ struct xilinx_frmbuf_chan *chan = (struct xilinx_frmbuf_chan *)data;
+
+ xilinx_frmbuf_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_frmbuf_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_alloc_chan_resources(struct dma_chan *dchan)
+{
+ dma_cookie_init(dchan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_tx_status - Get frmbuf transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: fmrbuf transaction status
+ */
+static enum dma_status xilinx_frmbuf_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xilinx_frmbuf_halt - Halt frmbuf channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_halt(struct xilinx_frmbuf_chan *chan)
+{
+ frmbuf_clr(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_AP_START |
+ chan->mode);
+ chan->idle = true;
+}
+
+/**
+ * xilinx_frmbuf_start - Start dma channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_start(struct xilinx_frmbuf_chan *chan)
+{
+ frmbuf_set(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_AP_START |
+ chan->mode);
+ chan->idle = false;
+}
+
+/**
+ * xilinx_frmbuf_complete_descriptor - Mark the active descriptor as complete
+ * This function is invoked with spinlock held
+ * @chan : xilinx frmbuf channel
+ *
+ * CONTEXT: hardirq
+ */
+static void xilinx_frmbuf_complete_descriptor(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc = chan->active_desc;
+
+ /*
+ * In case of frame buffer write, read the fid register
+ * and associate it with descriptor
+ */
+ if (chan->direction == DMA_DEV_TO_MEM && chan->hw_fid)
+ desc->fid = frmbuf_read(chan, XILINX_FRMBUF_FID_OFFSET) &
+ XILINX_FRMBUF_FID_MASK;
+
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+}
+
+/**
+ * xilinx_frmbuf_start_transfer - Starts frmbuf transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_frmbuf_start_transfer(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (!chan->idle)
+ return;
+
+ if (chan->staged_desc) {
+ chan->active_desc = chan->staged_desc;
+ chan->staged_desc = NULL;
+ }
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xilinx_frmbuf_tx_descriptor,
+ node);
+
+ if (desc->earlycb == EARLY_CALLBACK_START_DESC) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ callback(callback_param);
+ desc->async_tx.callback = NULL;
+ chan->active_desc = desc;
+ }
+ }
+
+ /* Start the transfer */
+ chan->write_addr(chan, XILINX_FRMBUF_ADDR_OFFSET,
+ desc->hw.luma_plane_addr);
+ chan->write_addr(chan, XILINX_FRMBUF_ADDR2_OFFSET,
+ desc->hw.chroma_plane_addr);
+
+ /* HW expects these parameters to be same for one transaction */
+ frmbuf_write(chan, XILINX_FRMBUF_WIDTH_OFFSET, desc->hw.hsize);
+ frmbuf_write(chan, XILINX_FRMBUF_STRIDE_OFFSET, desc->hw.stride);
+ frmbuf_write(chan, XILINX_FRMBUF_HEIGHT_OFFSET, desc->hw.vsize);
+ frmbuf_write(chan, XILINX_FRMBUF_FMT_OFFSET, chan->vid_fmt->id);
+
+ /* If it is framebuffer read IP set the FID */
+ if (chan->direction == DMA_MEM_TO_DEV && chan->hw_fid)
+ frmbuf_write(chan, XILINX_FRMBUF_FID_OFFSET, desc->fid);
+
+ /* Start the hardware */
+ xilinx_frmbuf_start(chan);
+ list_del(&desc->node);
+
+ /* No staging descriptor required when auto restart is disabled */
+ if (chan->mode == AUTO_RESTART)
+ chan->staged_desc = desc;
+ else
+ chan->active_desc = desc;
+}
+
+/**
+ * xilinx_frmbuf_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_frmbuf_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ xilinx_frmbuf_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_reset - Reset frmbuf channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_reset(struct xilinx_frmbuf_chan *chan)
+{
+ /* reset ip */
+ gpiod_set_value(chan->xdev->rst_gpio, 1);
+ udelay(1);
+ gpiod_set_value(chan->xdev->rst_gpio, 0);
+}
+
+/**
+ * xilinx_frmbuf_chan_reset - Reset frmbuf channel and enable interrupts
+ * @chan: Driver specific frmbuf channel
+ */
+static void xilinx_frmbuf_chan_reset(struct xilinx_frmbuf_chan *chan)
+{
+ xilinx_frmbuf_reset(chan);
+ frmbuf_write(chan, XILINX_FRMBUF_IE_OFFSET, XILINX_FRMBUF_IE_AP_READY);
+ frmbuf_write(chan, XILINX_FRMBUF_GIE_OFFSET, XILINX_FRMBUF_GIE_EN);
+}
+
+/**
+ * xilinx_frmbuf_irq_handler - frmbuf Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx frmbuf channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_frmbuf_irq_handler(int irq, void *data)
+{
+ struct xilinx_frmbuf_chan *chan = data;
+ u32 status;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ status = frmbuf_read(chan, XILINX_FRMBUF_ISR_OFFSET);
+ if (!(status & XILINX_FRMBUF_ISR_ALL_IRQ_MASK))
+ return IRQ_NONE;
+
+ frmbuf_write(chan, XILINX_FRMBUF_ISR_OFFSET,
+ status & XILINX_FRMBUF_ISR_ALL_IRQ_MASK);
+
+ /* Check if callback function needs to be called early */
+ desc = chan->staged_desc;
+ if (desc && desc->earlycb == EARLY_CALLBACK) {
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ callback(callback_param);
+ desc->async_tx.callback = NULL;
+ }
+ }
+
+ if (status & XILINX_FRMBUF_ISR_AP_READY_IRQ) {
+ spin_lock(&chan->lock);
+ chan->idle = true;
+ if (chan->active_desc) {
+ xilinx_frmbuf_complete_descriptor(chan);
+ chan->active_desc = NULL;
+ }
+ xilinx_frmbuf_start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_frmbuf_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_frmbuf_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &chan->pending_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_frmbuf_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_frmbuf_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_frmbuf_tx_descriptor *desc;
+ struct xilinx_frmbuf_desc_hw *hw;
+ u32 vsize, hsize;
+
+ if (chan->direction != xt->dir || !chan->vid_fmt)
+ goto error;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ goto error;
+
+ if (xt->frame_size != chan->vid_fmt->num_planes)
+ goto error;
+
+ vsize = xt->numf;
+ hsize = (xt->sgl[0].size * chan->vid_fmt->ppw * 8) /
+ chan->vid_fmt->bpw;
+ /* hsize calc should not have resulted in an odd number */
+ if (hsize & 1)
+ hsize++;
+
+ if (vsize > chan->xdev->max_height || hsize > chan->xdev->max_width) {
+ dev_dbg(chan->xdev->dev,
+ "vsize %d max vsize %d hsize %d max hsize %d\n",
+ vsize, chan->xdev->max_height, hsize,
+ chan->xdev->max_width);
+ dev_err(chan->xdev->dev, "Requested size not supported!\n");
+ goto error;
+ }
+
+ desc = xilinx_frmbuf_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_frmbuf_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ hw = &desc->hw;
+ hw->vsize = xt->numf;
+ hw->stride = xt->sgl[0].icg + xt->sgl[0].size;
+ hw->hsize = (xt->sgl[0].size * chan->vid_fmt->ppw * 8) /
+ chan->vid_fmt->bpw;
+
+ /* hsize calc should not have resulted in an odd number */
+ if (hw->hsize & 1)
+ hw->hsize++;
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ hw->luma_plane_addr = xt->src_start;
+ if (xt->frame_size == 2)
+ hw->chroma_plane_addr =
+ xt->src_start +
+ xt->numf * hw->stride +
+ xt->sgl[0].src_icg;
+ } else {
+ hw->luma_plane_addr = xt->dst_start;
+ if (xt->frame_size == 2)
+ hw->chroma_plane_addr =
+ xt->dst_start +
+ xt->numf * hw->stride +
+ xt->sgl[0].dst_icg;
+ }
+
+ return &desc->async_tx;
+
+error:
+ dev_err(chan->xdev->dev,
+ "Invalid dma template or missing dma video fmt config\n");
+ return NULL;
+}
+
+/**
+ * xilinx_frmbuf_terminate_all - Halt the channel and free descriptors
+ * @dchan: Driver specific dma channel pointer
+ *
+ * Return: 0
+ */
+static int xilinx_frmbuf_terminate_all(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_frmbuf_halt(chan);
+ xilinx_frmbuf_free_descriptors(chan);
+ /* worst case frame-to-frame boundary; ensure frame output complete */
+ msleep(50);
+
+ if (chan->xdev->cfg->flags & XILINX_FLUSH_PROP) {
+ u8 count;
+
+ /*
+ * Flush the framebuffer FIFO and
+ * wait for max 50ms for flush done
+ */
+ frmbuf_set(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_FLUSH);
+ for (count = WAIT_FOR_FLUSH_DONE; count > 0; count--) {
+ if (frmbuf_read(chan, XILINX_FRMBUF_CTRL_OFFSET) &
+ XILINX_FRMBUF_CTRL_FLUSH_DONE)
+ break;
+ usleep_range(2000, 2100);
+ }
+
+ if (!count)
+ dev_err(chan->xdev->dev, "Framebuffer Flush not done!\n");
+ }
+
+ xilinx_frmbuf_chan_reset(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_synchronize - kill tasklet to stop further descr processing
+ * @dchan: Driver specific dma channel pointer
+ */
+static void xilinx_frmbuf_synchronize(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_frmbuf_chan_remove - Per Channel remove function
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_chan_remove(struct xilinx_frmbuf_chan *chan)
+{
+ /* Disable all interrupts */
+ frmbuf_clr(chan, XILINX_FRMBUF_IE_OFFSET,
+ XILINX_FRMBUF_ISR_ALL_IRQ_MASK);
+
+ tasklet_kill(&chan->tasklet);
+ list_del(&chan->common.device_node);
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_del(&chan->chan_node);
+ mutex_unlock(&frmbuf_chan_list_lock);
+}
+
+/**
+ * xilinx_frmbuf_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_chan_probe(struct xilinx_frmbuf_device *xdev,
+ struct device_node *node)
+{
+ struct xilinx_frmbuf_chan *chan;
+ int err;
+ u32 dma_addr_size;
+
+ chan = &xdev->chan;
+
+ chan->dev = xdev->dev;
+ chan->xdev = xdev;
+ chan->idle = true;
+ chan->mode = AUTO_RESTART;
+
+ err = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &dma_addr_size);
+ if (err || (dma_addr_size != 32 && dma_addr_size != 64)) {
+ dev_err(xdev->dev, "missing or invalid addr width dts prop\n");
+ return err;
+ }
+
+ if (dma_addr_size == 64 && sizeof(dma_addr_t) == sizeof(u64))
+ chan->write_addr = writeq_addr;
+ else
+ chan->write_addr = write_addr;
+
+ if (xdev->cfg->flags & XILINX_FID_PROP)
+ chan->hw_fid = of_property_read_bool(node, "xlnx,fid");
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ chan->irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(xdev->dev, chan->irq, xilinx_frmbuf_irq_handler,
+ IRQF_SHARED, "xilinx_framebuffer", chan);
+
+ if (err) {
+ dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+ return err;
+ }
+
+ tasklet_init(&chan->tasklet, xilinx_frmbuf_do_tasklet,
+ (unsigned long)chan);
+
+ /*
+ * Initialize the DMA channel and add it to the DMA engine channels
+ * list.
+ */
+ chan->common.device = &xdev->common;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_add_tail(&chan->chan_node, &frmbuf_chan_list);
+ mutex_unlock(&frmbuf_chan_list_lock);
+
+ xilinx_frmbuf_chan_reset(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct xilinx_frmbuf_device *xdev;
+ struct resource *io;
+ enum dma_transfer_direction dma_dir;
+ const struct of_device_id *match;
+ int err;
+ u32 i, j, align, ppc;
+ int hw_vid_fmt_cnt;
+ const char *vid_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+
+ match = of_match_node(xilinx_frmbuf_of_ids, node);
+ if (!match)
+ return -ENODEV;
+
+ xdev->cfg = match->data;
+
+ dma_dir = (enum dma_transfer_direction)xdev->cfg->direction;
+
+ if (xdev->cfg->flags & XILINX_CLK_PROP) {
+ xdev->ap_clk = devm_clk_get(xdev->dev, "ap_clk");
+ if (IS_ERR(xdev->ap_clk)) {
+ err = PTR_ERR(xdev->ap_clk);
+ dev_err(xdev->dev, "failed to get ap_clk (%d)\n", err);
+ return err;
+ }
+ } else {
+ dev_info(xdev->dev, "assuming clock is enabled!\n");
+ }
+
+ xdev->rst_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xdev->rst_gpio)) {
+ err = PTR_ERR(xdev->rst_gpio);
+ if (err == -EPROBE_DEFER)
+ dev_info(&pdev->dev,
+ "Probe deferred due to GPIO reset defer\n");
+ else
+ dev_err(&pdev->dev,
+ "Unable to locate reset property in dt\n");
+ return err;
+ }
+
+ gpiod_set_value_cansleep(xdev->rst_gpio, 0x0);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(xdev->regs))
+ return PTR_ERR(xdev->regs);
+
+ err = of_property_read_u32(node, "xlnx,max-height", &xdev->max_height);
+ if (err < 0) {
+ dev_err(xdev->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xdev->max_height > XILINX_FRMBUF_MAX_HEIGHT ||
+ xdev->max_height < XILINX_FRMBUF_MIN_HEIGHT) {
+ dev_err(&pdev->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(node, "xlnx,max-width", &xdev->max_width);
+ if (err < 0) {
+ dev_err(xdev->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xdev->max_width > XILINX_FRMBUF_MAX_WIDTH ||
+ xdev->max_width < XILINX_FRMBUF_MIN_WIDTH) {
+ dev_err(&pdev->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ /* Initialize the DMA engine */
+ if (xdev->cfg->flags & XILINX_PPC_PROP) {
+ err = of_property_read_u32(node, "xlnx,pixels-per-clock", &ppc);
+ if (err || (ppc != 1 && ppc != 2 && ppc != 4)) {
+ dev_err(&pdev->dev, "missing or invalid pixels per clock dts prop\n");
+ return err;
+ }
+
+ err = of_property_read_u32(node, "xlnx,dma-align", &align);
+ if (err)
+ align = ppc * XILINX_FRMBUF_ALIGN_MUL;
+
+ if (align < (ppc * XILINX_FRMBUF_ALIGN_MUL) ||
+ ffs(align) != fls(align)) {
+ dev_err(&pdev->dev, "invalid dma align dts prop\n");
+ return -EINVAL;
+ }
+ } else {
+ align = 16;
+ }
+
+ xdev->common.copy_align = fls(align) - 1;
+ xdev->common.dev = &pdev->dev;
+
+ if (xdev->cfg->flags & XILINX_CLK_PROP) {
+ err = clk_prepare_enable(xdev->ap_clk);
+ if (err) {
+ dev_err(&pdev->dev, " failed to enable ap_clk (%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+
+ /* Initialize the channels */
+ err = xilinx_frmbuf_chan_probe(xdev, node);
+ if (err < 0)
+ goto disable_clk;
+
+ xdev->chan.direction = dma_dir;
+
+ if (xdev->chan.direction == DMA_DEV_TO_MEM) {
+ xdev->common.directions = BIT(DMA_DEV_TO_MEM);
+ dev_info(&pdev->dev, "Xilinx AXI frmbuf DMA_DEV_TO_MEM\n");
+ } else if (xdev->chan.direction == DMA_MEM_TO_DEV) {
+ xdev->common.directions = BIT(DMA_MEM_TO_DEV);
+ dev_info(&pdev->dev, "Xilinx AXI frmbuf DMA_MEM_TO_DEV\n");
+ } else {
+ err = -EINVAL;
+ goto remove_chan;
+ }
+
+ /* read supported video formats and update internal table */
+ hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
+
+ err = of_property_read_string_array(node, "xlnx,vid-formats",
+ vid_fmts, hw_vid_fmt_cnt);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Missing or invalid xlnx,vid-formats dts prop\n");
+ goto remove_chan;
+ }
+
+ for (i = 0; i < hw_vid_fmt_cnt; i++) {
+ const char *vid_fmt_name = vid_fmts[i];
+
+ for (j = 0; j < ARRAY_SIZE(xilinx_frmbuf_formats); j++) {
+ const char *dts_name =
+ xilinx_frmbuf_formats[j].dts_name;
+
+ if (strcmp(vid_fmt_name, dts_name))
+ continue;
+
+ xdev->enabled_vid_fmts |=
+ xilinx_frmbuf_formats[j].fmt_bitmask;
+ }
+ }
+
+ /* Determine supported vid framework formats */
+ frmbuf_init_format_array(xdev);
+
+ xdev->common.device_alloc_chan_resources =
+ xilinx_frmbuf_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xilinx_frmbuf_free_chan_resources;
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_frmbuf_dma_prep_interleaved;
+ xdev->common.device_terminate_all = xilinx_frmbuf_terminate_all;
+ xdev->common.device_synchronize = xilinx_frmbuf_synchronize;
+ xdev->common.device_tx_status = xilinx_frmbuf_tx_status;
+ xdev->common.device_issue_pending = xilinx_frmbuf_issue_pending;
+
+ platform_set_drvdata(pdev, xdev);
+
+ /* Register the DMA engine with the core */
+ dma_async_device_register(&xdev->common);
+
+ err = of_dma_controller_register(node, of_dma_xilinx_xlate, xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "Xilinx AXI FrameBuffer Engine Driver Probed!!\n");
+
+ return 0;
+error:
+ dma_async_device_unregister(&xdev->common);
+remove_chan:
+ xilinx_frmbuf_chan_remove(&xdev->chan);
+disable_clk:
+ clk_disable_unprepare(xdev->ap_clk);
+ return err;
+}
+
+/**
+ * xilinx_frmbuf_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_frmbuf_remove(struct platform_device *pdev)
+{
+ struct xilinx_frmbuf_device *xdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&xdev->common);
+ xilinx_frmbuf_chan_remove(&xdev->chan);
+ clk_disable_unprepare(xdev->ap_clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, xilinx_frmbuf_of_ids);
+
+static struct platform_driver xilinx_frmbuf_driver = {
+ .driver = {
+ .name = "xilinx-frmbuf",
+ .of_match_table = xilinx_frmbuf_of_ids,
+ },
+ .probe = xilinx_frmbuf_probe,
+ .remove = xilinx_frmbuf_remove,
+};
+
+module_platform_driver(xilinx_frmbuf_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Framebuffer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie.h b/drivers/dma/xilinx/xilinx_ps_pcie.h
new file mode 100644
index 000000000000..81d634d15447
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie.h
@@ -0,0 +1,44 @@
+/*
+ * Xilinx PS PCIe DMA Engine platform header file
+ *
+ * Copyright (C) 2010-2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#ifndef __XILINX_PS_PCIE_H
+#define __XILINX_PS_PCIE_H
+
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include <linux/dma/xilinx_ps_pcie_dma.h>
+
+/**
+ * dma_platform_driver_register - This will be invoked by module init
+ *
+ * Return: returns status of platform_driver_register
+ */
+int dma_platform_driver_register(void);
+/**
+ * dma_platform_driver_unregister - This will be invoked by module exit
+ *
+ * Return: returns void after unregustering platform driver
+ */
+void dma_platform_driver_unregister(void);
+
+#endif
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c b/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c
new file mode 100644
index 000000000000..2996133837f0
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c
@@ -0,0 +1,1402 @@
+/*
+ * XILINX PS PCIe DMA Engine test module
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cdev.h>
+#include <linux/dma-direction.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/dma/xilinx_ps_pcie_dma.h>
+
+#include "../dmaengine.h"
+
+#define DRV_MODULE_NAME "ps_pcie_dma_client"
+
+#define DMA_SCRATCH0_REG_OFFSET (0x50)
+#define DMA_SCRATCH1_REG_OFFSET (0x54)
+#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74)
+
+#define DMA_SW_INTR_ASSRT_BIT BIT(3)
+
+#define DMA_BAR_NUMBER 0
+
+#define CHAR_DRIVER_NAME "ps_pcie_dmachan"
+
+#define PIO_CHAR_DRIVER_NAME "ps_pcie_pio"
+#define EP_TRANSLATION_CHECK 0xCCCCCCCC
+
+#define PIO_MEMORY_BAR_NUMBER 2
+
+#define XPIO_CLIENT_MAGIC 'P'
+#define IOCTL_EP_CHECK_TRANSLATION _IO(XPIO_CLIENT_MAGIC, 0x01)
+
+#define XPS_PCIE_DMA_CLIENT_MAGIC 'S'
+
+#define IGET_ASYNC_TRANSFERINFO _IO(XPS_PCIE_DMA_CLIENT_MAGIC, 0x01)
+#define ISET_ASYNC_TRANSFERINFO _IO(XPS_PCIE_DMA_CLIENT_MAGIC, 0x02)
+
+#define DMA_TRANSACTION_SUCCESSFUL 1
+#define DMA_TRANSACTION_FAILURE 0
+
+#define MAX_LIST 1024
+
+struct dma_transfer_info {
+ char __user *buff_address;
+ unsigned int buff_size;
+ loff_t offset;
+ enum dma_data_direction direction;
+};
+
+struct buff_info {
+ bool status;
+ unsigned int buff_size;
+ char __user *buff_address;
+};
+
+struct usrbuff_info {
+ struct buff_info buff_list[MAX_LIST];
+ unsigned int expected;
+};
+
+enum pio_status {
+ PIO_SUPPORTED = 0,
+ PIO_NOT_SUPPORTED
+};
+
+enum dma_transfer_mode {
+ MEMORY_MAPPED = 0,
+ STREAMING
+};
+
+struct dma_deviceproperties {
+ u16 pci_vendorid;
+ u16 pci_deviceid;
+ u16 board_number;
+ enum pio_status pio_transfers;
+ enum dma_transfer_mode mode;
+ enum dma_data_direction direction[MAX_ALLOWED_CHANNELS_IN_HW];
+};
+
+struct xlnx_completed_info {
+ struct list_head clist;
+ struct buff_info buffer;
+};
+
+struct xlnx_ps_pcie_dma_client_channel {
+ struct device *dev;
+ struct dma_chan *chan;
+ struct ps_pcie_dma_channel_match match;
+ enum dma_data_direction direction;
+ enum dma_transfer_mode mode;
+ struct xlnx_completed_info completed;
+ spinlock_t channel_lock; /* Lock to serialize transfers on channel */
+};
+
+struct xlnx_ps_pcie_dma_client_device {
+ struct dma_deviceproperties *properties;
+
+ struct xlnx_ps_pcie_dma_client_channel
+ pcie_dma_chan[MAX_ALLOWED_CHANNELS_IN_HW];
+
+ dev_t char_device;
+ struct cdev xps_pcie_chardev;
+ struct device *chardev[MAX_ALLOWED_CHANNELS_IN_HW];
+
+ dev_t pio_char_device;
+ struct cdev xpio_char_dev;
+ struct device *xpio_char_device;
+ struct mutex pio_chardev_mutex; /* Exclusive access to ioctl */
+ struct completion trans_cmpltn;
+ u32 pio_translation_size;
+
+ struct list_head dev_node;
+};
+
+struct xlnx_ps_pcie_dma_asynchronous_transaction {
+ dma_cookie_t cookie;
+ struct page **cache_pages;
+ unsigned int num_pages;
+ struct sg_table *sg;
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ struct xlnx_completed_info *buffer_info;
+ struct dma_async_tx_descriptor **txd;
+};
+
+static struct class *g_ps_pcie_dma_client_class; /* global device class */
+static struct list_head g_ps_pcie_dma_client_list;
+
+/*
+ * Keep adding to this list to interact with multiple DMA devices
+ */
+static struct dma_deviceproperties g_dma_deviceproperties_list[] = {
+ {
+ .pci_vendorid = PCI_VENDOR_ID_XILINX,
+ .pci_deviceid = ZYNQMP_DMA_DEVID,
+ .board_number = 0,
+ .pio_transfers = PIO_SUPPORTED,
+ .mode = MEMORY_MAPPED,
+ /* Make sure the channel direction is same
+ * as what is configured in DMA device
+ */
+ .direction = {DMA_TO_DEVICE, DMA_FROM_DEVICE,
+ DMA_TO_DEVICE, DMA_FROM_DEVICE}
+ }
+};
+
+/**
+ * ps_pcie_dma_sync_transfer_cbk - Callback handler for Synchronous transfers.
+ * Handles both S2C and C2S transfer call backs.
+ * Indicates to blocked applications that DMA transfers are complete
+ *
+ * @data: Callback parameter
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_sync_transfer_cbk(void *data)
+{
+ struct completion *compl = (struct completion *)data;
+
+ if (compl)
+ complete(compl);
+}
+
+/**
+ * initiate_sync_transfer - Programs both Source Q
+ * and Destination Q of channel after setting up sg lists and transaction
+ * specific data. This functions waits until transaction completion is notified
+ *
+ * @channel: Pointer to the PS PCIe DMA channel structure
+ * @buffer: User land virtual address containing data to be sent or received
+ * @length: Length of user land buffer
+ * @f_offset: AXI domain address to which data pointed by user buffer has to
+ * be sent/received from
+ * @direction: Transfer of data direction
+ *
+ * Return: 0 on success and non zero value for failure
+ */
+static ssize_t initiate_sync_transfer(
+ struct xlnx_ps_pcie_dma_client_channel *channel,
+ const char __user *buffer, size_t length,
+ loff_t *f_offset, enum dma_data_direction direction)
+{
+ int offset;
+ unsigned int alloc_pages;
+ unsigned long first, last, nents = 1;
+ struct page **cache_pages;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor **txd = NULL;
+ dma_cookie_t cookie = 0;
+ enum dma_ctrl_flags flags = 0;
+ int err;
+ struct sg_table *sg;
+ enum dma_transfer_direction d_direction;
+ int i;
+ struct completion *cmpl_ptr;
+ enum dma_status status;
+ struct scatterlist *selem;
+ size_t elem_len = 0;
+
+ chan = channel->chan;
+ device = chan->device;
+
+ offset = offset_in_page(buffer);
+ first = ((unsigned long)buffer & PAGE_MASK) >> PAGE_SHIFT;
+ last = (((unsigned long)buffer + length - 1) & PAGE_MASK) >>
+ PAGE_SHIFT;
+ alloc_pages = (last - first) + 1;
+
+ cache_pages = devm_kzalloc(channel->dev,
+ (alloc_pages * (sizeof(struct page *))),
+ GFP_ATOMIC);
+ if (!cache_pages) {
+ dev_err(channel->dev,
+ "Unable to allocate memory for page table holder\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_cachepages_alloc;
+ }
+
+ err = get_user_pages_fast((unsigned long)buffer, alloc_pages,
+ !(direction), cache_pages);
+ if (err <= 0) {
+ dev_err(channel->dev, "Unable to pin user pages\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_pin_pages;
+ } else if (err < alloc_pages) {
+ dev_err(channel->dev, "Only pinned few user pages %d\n", err);
+ err = PTR_ERR(cache_pages);
+ for (i = 0; i < err; i++)
+ put_page(cache_pages[i]);
+ goto err_out_pin_pages;
+ }
+
+ sg = devm_kzalloc(channel->dev, sizeof(struct sg_table), GFP_ATOMIC);
+ if (!sg) {
+ err = PTR_ERR(sg);
+ goto err_out_alloc_sg_table;
+ }
+
+ err = sg_alloc_table_from_pages(sg, cache_pages, alloc_pages, offset,
+ length, GFP_ATOMIC);
+ if (err < 0) {
+ dev_err(channel->dev, "Unable to create sg table\n");
+ goto err_out_sg_to_sgl;
+ }
+
+ err = dma_map_sg(channel->dev, sg->sgl, sg->nents, direction);
+ if (err == 0) {
+ dev_err(channel->dev, "Unable to map buffer to sg table\n");
+ err = PTR_ERR(sg);
+ goto err_out_dma_map_sg;
+ }
+
+ cmpl_ptr = devm_kzalloc(channel->dev, sizeof(struct completion),
+ GFP_ATOMIC);
+ if (!cmpl_ptr) {
+ err = PTR_ERR(cmpl_ptr);
+ goto err_out_cmpl_ptr;
+ }
+
+ init_completion(cmpl_ptr);
+
+ if (channel->mode == MEMORY_MAPPED)
+ nents = sg->nents;
+
+ txd = devm_kzalloc(channel->dev, sizeof(*txd)
+ * nents, GFP_ATOMIC);
+ if (!txd) {
+ err = PTR_ERR(txd);
+ goto err_out_cmpl_ptr;
+ }
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0, selem = (sg->sgl); i < sg->nents; i++,
+ selem = sg_next(selem)) {
+ if ((i + 1) == sg->nents)
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+ if (direction == DMA_TO_DEVICE) {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->dma_address, selem->length,
+ flags);
+ } else {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ selem->dma_address,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->length, flags);
+ }
+
+ elem_len += selem->length;
+
+ if (!txd[i]) {
+ err = PTR_ERR(txd[i]);
+ goto err_out_no_prep_sg_async_desc;
+ }
+ }
+ } else {
+ if (direction == DMA_TO_DEVICE)
+ d_direction = DMA_MEM_TO_DEV;
+ else
+ d_direction = DMA_DEV_TO_MEM;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ txd[0] = device->device_prep_slave_sg(chan, sg->sgl, sg->nents,
+ d_direction, flags, NULL);
+ if (!txd[0]) {
+ err = PTR_ERR(txd[0]);
+ goto err_out_no_slave_sg_async_descriptor;
+ }
+ }
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0; i < sg->nents; i++) {
+ if ((i + 1) == sg->nents) {
+ txd[i]->callback =
+ ps_pcie_dma_sync_transfer_cbk;
+ txd[i]->callback_param = cmpl_ptr;
+ }
+
+ cookie = txd[i]->tx_submit(txd[i]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+ }
+ } else {
+ txd[0]->callback = ps_pcie_dma_sync_transfer_cbk;
+ txd[0]->callback_param = cmpl_ptr;
+
+ cookie = txd[0]->tx_submit(txd[0]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+ }
+
+ dma_async_issue_pending(chan);
+
+ wait_for_completion_killable(cmpl_ptr);
+
+ status = dmaengine_tx_status(chan, cookie, NULL);
+ if (status == DMA_COMPLETE)
+ err = length;
+ else
+ err = -1;
+
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+ devm_kfree(channel->dev, cmpl_ptr);
+ devm_kfree(channel->dev, txd);
+ sg_free_table(sg);
+ devm_kfree(channel->dev, sg);
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+ devm_kfree(channel->dev, cache_pages);
+
+ return (ssize_t)err;
+
+free_transaction:
+err_out_no_prep_sg_async_desc:
+err_out_no_slave_sg_async_descriptor:
+ devm_kfree(channel->dev, cmpl_ptr);
+ devm_kfree(channel->dev, txd);
+err_out_cmpl_ptr:
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+err_out_dma_map_sg:
+ sg_free_table(sg);
+err_out_sg_to_sgl:
+ devm_kfree(channel->dev, sg);
+err_out_alloc_sg_table:
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+err_out_pin_pages:
+ devm_kfree(channel->dev, cache_pages);
+err_out_cachepages_alloc:
+
+ return (ssize_t)err;
+}
+
+static ssize_t
+ps_pcie_dma_read(struct file *file,
+ char __user *buffer,
+ size_t length,
+ loff_t *f_offset)
+{
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ ssize_t ret;
+
+ chan = file->private_data;
+
+ if (chan->direction != DMA_FROM_DEVICE) {
+ dev_err(chan->dev, "Invalid data direction for channel\n");
+ ret = -EINVAL;
+ goto c2s_err_direction;
+ }
+
+ ret = initiate_sync_transfer(chan, buffer, length, f_offset,
+ DMA_FROM_DEVICE);
+
+ if (ret != length)
+ dev_dbg(chan->dev, "Read synchronous transfer unsuccessful\n");
+
+c2s_err_direction:
+ return ret;
+}
+
+static ssize_t
+ps_pcie_dma_write(struct file *file,
+ const char __user *buffer,
+ size_t length,
+ loff_t *f_offset)
+{
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ ssize_t ret;
+
+ chan = file->private_data;
+
+ if (chan->direction != DMA_TO_DEVICE) {
+ dev_err(chan->dev,
+ "Invalid data direction for channel\n");
+ ret = -EINVAL;
+ goto s2c_err_direction;
+ }
+
+ ret = initiate_sync_transfer(chan, buffer, length, f_offset,
+ DMA_TO_DEVICE);
+
+ if (ret != length)
+ dev_dbg(chan->dev, "Write synchronous transfer unsuccessful\n");
+
+s2c_err_direction:
+ return ret;
+}
+
+static int ps_pcie_dma_open(struct inode *in, struct file *file)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ int minor_num = iminor(in);
+
+ xdev = container_of(in->i_cdev,
+ struct xlnx_ps_pcie_dma_client_device,
+ xps_pcie_chardev);
+
+ file->private_data = &xdev->pcie_dma_chan[minor_num];
+
+ return 0;
+}
+
+static int ps_pcie_dma_release(struct inode *in, struct file *filp)
+{
+ return 0;
+}
+
+static int update_completed_info(struct xlnx_ps_pcie_dma_client_channel *chan,
+ struct usrbuff_info *usr_buff)
+{
+ int retval = 0;
+ unsigned int expected, count = 0;
+ struct xlnx_completed_info *entry;
+ struct xlnx_completed_info *next;
+
+ if (list_empty(&chan->completed.clist))
+ goto update_expected;
+
+ if (copy_from_user((void *)&expected,
+ (void __user *)&usr_buff->expected,
+ sizeof(unsigned int)) != 0) {
+ pr_err("Expected count copy failure\n");
+ retval = -ENXIO;
+ return retval;
+ }
+
+ if (expected > MAX_LIST) {
+ retval = -ENXIO;
+ return retval;
+ }
+
+ list_for_each_entry_safe(entry, next, &chan->completed.clist, clist) {
+ if (copy_to_user((void __user *)(usr_buff->buff_list + count),
+ (void *)&entry->buffer,
+ sizeof(struct buff_info)) != 0) {
+ pr_err("update user completed count copy failed\n");
+ retval = -ENXIO;
+ break;
+ }
+ count++;
+ spin_lock(&chan->channel_lock);
+ list_del(&entry->clist);
+ spin_unlock(&chan->channel_lock);
+ devm_kfree(chan->dev, entry);
+ if (count == expected)
+ break;
+ }
+
+update_expected:
+ if (copy_to_user((void __user *)&usr_buff->expected, (void *)&count,
+ (sizeof(unsigned int))) != 0) {
+ pr_err("update user expected count copy failure\n");
+ retval = -ENXIO;
+ }
+
+ return retval;
+}
+
+/**
+ * ps_pcie_dma_async_transfer_cbk - Callback handler for Asynchronous transfers.
+ * Handles both S2C and C2S transfer call backs. Stores transaction information
+ * in a list for a user application to poll for this information
+ *
+ * @data: Callback parameter
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_async_transfer_cbk(void *data)
+{
+ struct xlnx_ps_pcie_dma_asynchronous_transaction *trans =
+ (struct xlnx_ps_pcie_dma_asynchronous_transaction *)data;
+ enum dma_status status;
+ struct dma_tx_state state;
+ unsigned int i;
+
+ dma_unmap_sg(trans->chan->dev, trans->sg->sgl, trans->sg->nents,
+ trans->chan->direction);
+ sg_free_table(trans->sg);
+ devm_kfree(trans->chan->dev, trans->sg);
+ devm_kfree(trans->chan->dev, trans->txd);
+ for (i = 0; i < trans->num_pages; i++)
+ put_page(trans->cache_pages[i]);
+ devm_kfree(trans->chan->dev, trans->cache_pages);
+
+ status = dmaengine_tx_status(trans->chan->chan, trans->cookie, &state);
+
+ if (status == DMA_COMPLETE)
+ trans->buffer_info->buffer.status = DMA_TRANSACTION_SUCCESSFUL;
+ else
+ trans->buffer_info->buffer.status = DMA_TRANSACTION_SUCCESSFUL;
+
+ spin_lock(&trans->chan->channel_lock);
+ list_add_tail(&trans->buffer_info->clist,
+ &trans->chan->completed.clist);
+ spin_unlock(&trans->chan->channel_lock);
+ devm_kfree(trans->chan->dev, trans);
+}
+
+/**
+ * initiate_async_transfer - Programs both Source Q
+ * and Destination Q of channel after setting up sg lists and transaction
+ * specific data. This functions returns after setting up transfer
+ *
+ * @channel: Pointer to the PS PCIe DMA channel structure
+ * @buffer: User land virtual address containing data to be sent or received
+ * @length: Length of user land buffer
+ * @f_offset: AXI domain address to which data pointed by user buffer has to
+ * be sent/received from
+ * @direction: Transfer of data direction
+ *
+ * Return: 0 on success and non zero value for failure
+ */
+static int initiate_async_transfer(
+ struct xlnx_ps_pcie_dma_client_channel *channel,
+ char __user *buffer, size_t length, loff_t *f_offset,
+ enum dma_data_direction direction)
+{
+ int offset;
+ unsigned int alloc_pages;
+ unsigned long first, last, nents = 1;
+ struct page **cache_pages;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor **txd = NULL;
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags = 0;
+ struct xlnx_ps_pcie_dma_asynchronous_transaction *trans;
+ int err;
+ struct sg_table *sg;
+ enum dma_transfer_direction d_direction;
+ int i;
+ struct scatterlist *selem;
+ size_t elem_len = 0;
+
+ chan = channel->chan;
+ device = chan->device;
+
+ offset = offset_in_page(buffer);
+ first = ((unsigned long)buffer & PAGE_MASK) >> PAGE_SHIFT;
+ last = (((unsigned long)buffer + length - 1) & PAGE_MASK) >>
+ PAGE_SHIFT;
+ alloc_pages = (last - first) + 1;
+
+ cache_pages = devm_kzalloc(channel->dev,
+ (alloc_pages * (sizeof(struct page *))),
+ GFP_ATOMIC);
+ if (!cache_pages) {
+ err = PTR_ERR(cache_pages);
+ goto err_out_cachepages_alloc;
+ }
+
+ err = get_user_pages_fast((unsigned long)buffer, alloc_pages,
+ !(direction), cache_pages);
+ if (err <= 0) {
+ dev_err(channel->dev, "Unable to pin user pages\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_pin_pages;
+ } else if (err < alloc_pages) {
+ dev_err(channel->dev, "Only pinned few user pages %d\n", err);
+ err = PTR_ERR(cache_pages);
+ for (i = 0; i < err; i++)
+ put_page(cache_pages[i]);
+ goto err_out_pin_pages;
+ }
+
+ sg = devm_kzalloc(channel->dev, sizeof(struct sg_table), GFP_ATOMIC);
+ if (!sg) {
+ err = PTR_ERR(sg);
+ goto err_out_alloc_sg_table;
+ }
+
+ err = sg_alloc_table_from_pages(sg, cache_pages, alloc_pages, offset,
+ length, GFP_ATOMIC);
+ if (err < 0) {
+ dev_err(channel->dev, "Unable to create sg table\n");
+ goto err_out_sg_to_sgl;
+ }
+
+ err = dma_map_sg(channel->dev, sg->sgl, sg->nents, direction);
+ if (err == 0) {
+ dev_err(channel->dev,
+ "Unable to map user buffer to sg table\n");
+ err = PTR_ERR(sg);
+ goto err_out_dma_map_sg;
+ }
+
+ trans = devm_kzalloc(channel->dev, sizeof(*trans), GFP_ATOMIC);
+ if (!trans) {
+ err = PTR_ERR(trans);
+ goto err_out_trans_ptr;
+ }
+
+ trans->buffer_info = devm_kzalloc(channel->dev,
+ sizeof(struct xlnx_completed_info),
+ GFP_ATOMIC);
+
+ if (!trans->buffer_info) {
+ err = PTR_ERR(trans->buffer_info);
+ goto err_out_no_completion_info;
+ }
+
+ if (channel->mode == MEMORY_MAPPED)
+ nents = sg->nents;
+
+ txd = devm_kzalloc(channel->dev,
+ sizeof(*txd) * nents, GFP_ATOMIC);
+ if (!txd) {
+ err = PTR_ERR(txd);
+ goto err_out_no_completion_info;
+ }
+
+ trans->txd = txd;
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0, selem = (sg->sgl); i < sg->nents; i++,
+ selem = sg_next(selem)) {
+ if ((i + 1) == sg->nents)
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+ if (direction == DMA_TO_DEVICE) {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->dma_address, selem->length,
+ flags);
+ } else {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ selem->dma_address,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->length, flags);
+ }
+
+ elem_len += selem->length;
+
+ if (!txd[i]) {
+ err = PTR_ERR(txd[i]);
+ goto err_out_no_prep_sg_async_desc;
+ }
+ }
+ } else {
+ if (direction == DMA_TO_DEVICE)
+ d_direction = DMA_MEM_TO_DEV;
+ else
+ d_direction = DMA_DEV_TO_MEM;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ txd[0] = device->device_prep_slave_sg(chan, sg->sgl, sg->nents,
+ d_direction, flags, NULL);
+ if (!txd[0]) {
+ err = PTR_ERR(txd[0]);
+ goto err_out_no_slave_sg_async_descriptor;
+ }
+ }
+
+ trans->buffer_info->buffer.buff_address = buffer;
+ trans->buffer_info->buffer.buff_size = length;
+ trans->cache_pages = cache_pages;
+ trans->num_pages = alloc_pages;
+ trans->chan = channel;
+ trans->sg = sg;
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0; i < sg->nents; i++) {
+ cookie = txd[i]->tx_submit(txd[i]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+
+ if ((i + 1) == sg->nents) {
+ txd[i]->callback =
+ ps_pcie_dma_async_transfer_cbk;
+ txd[i]->callback_param = trans;
+ trans->cookie = cookie;
+ }
+ }
+
+ } else {
+ txd[0]->callback = ps_pcie_dma_async_transfer_cbk;
+ txd[0]->callback_param = trans;
+
+ cookie = txd[0]->tx_submit(txd[0]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+
+ trans->cookie = cookie;
+ }
+
+ dma_async_issue_pending(chan);
+
+ return length;
+
+free_transaction:
+err_out_no_prep_sg_async_desc:
+err_out_no_slave_sg_async_descriptor:
+ devm_kfree(channel->dev, trans->buffer_info);
+ devm_kfree(channel->dev, txd);
+err_out_no_completion_info:
+ devm_kfree(channel->dev, trans);
+err_out_trans_ptr:
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+err_out_dma_map_sg:
+ sg_free_table(sg);
+err_out_sg_to_sgl:
+ devm_kfree(channel->dev, sg);
+err_out_alloc_sg_table:
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+err_out_pin_pages:
+ devm_kfree(channel->dev, cache_pages);
+err_out_cachepages_alloc:
+
+ return err;
+}
+
+static long ps_pcie_dma_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int retval = 0;
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ struct dma_transfer_info transfer_info;
+
+ if (_IOC_TYPE(cmd) != XPS_PCIE_DMA_CLIENT_MAGIC)
+ return -ENOTTY;
+
+ chan = filp->private_data;
+
+ switch (cmd) {
+ case ISET_ASYNC_TRANSFERINFO:
+ if (copy_from_user((void *)&transfer_info,
+ (void __user *)arg,
+ sizeof(struct dma_transfer_info)) != 0) {
+ pr_err("Copy from user asynchronous params\n");
+ retval = -ENXIO;
+ return retval;
+ }
+ if (transfer_info.direction != chan->direction) {
+ retval = -EINVAL;
+ return retval;
+ }
+ retval = initiate_async_transfer(chan,
+ transfer_info.buff_address,
+ transfer_info.buff_size,
+ &transfer_info.offset,
+ transfer_info.direction);
+ break;
+ case IGET_ASYNC_TRANSFERINFO:
+ retval = update_completed_info(chan,
+ (struct usrbuff_info *)arg);
+ break;
+ default:
+ pr_err("Unsupported ioctl command received\n");
+ retval = -1;
+ }
+
+ return (long)retval;
+}
+
+static const struct file_operations ps_pcie_dma_comm_fops = {
+ .owner = THIS_MODULE,
+ .read = ps_pcie_dma_read,
+ .write = ps_pcie_dma_write,
+ .unlocked_ioctl = ps_pcie_dma_ioctl,
+ .open = ps_pcie_dma_open,
+ .release = ps_pcie_dma_release,
+};
+
+static void pio_sw_intr_cbk(void *data)
+{
+ struct completion *compl = (struct completion *)data;
+
+ if (compl)
+ complete(compl);
+}
+
+static long pio_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ char *bar_memory = NULL;
+ u32 translation_size = 0;
+ long err = 0;
+ struct dma_async_tx_descriptor *intr_txd = NULL;
+ dma_cookie_t cookie;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ enum dma_ctrl_flags flags;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = filp->private_data;
+ chan = xdev->pcie_dma_chan[0].chan;
+ device = chan->device;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ xlnx_match =
+ (struct ps_pcie_dma_channel_match *)chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ DMA_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ xdev = filp->private_data;
+
+ switch (cmd) {
+ case IOCTL_EP_CHECK_TRANSLATION:
+
+ mutex_lock(&xdev->pio_chardev_mutex);
+ reinit_completion(&xdev->trans_cmpltn);
+
+ intr_txd = device->device_prep_dma_interrupt(chan, flags);
+ if (!intr_txd) {
+ err = -EAGAIN;
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ return err;
+ }
+
+ intr_txd->callback = pio_sw_intr_cbk;
+ intr_txd->callback_param = &xdev->trans_cmpltn;
+
+ cookie = intr_txd->tx_submit(intr_txd);
+ if (dma_submit_error(cookie)) {
+ err = cookie;
+ pr_err("Unable to submit interrupt transaction\n");
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ return err;
+ }
+
+ dma_async_issue_pending(chan);
+
+ iowrite32(EP_TRANSLATION_CHECK, (void __iomem *)(bar_memory +
+ DMA_SCRATCH0_REG_OFFSET));
+ iowrite32(DMA_SW_INTR_ASSRT_BIT, (void __iomem *)(bar_memory +
+ DMA_AXI_INTR_ASSRT_REG_OFFSET));
+
+ wait_for_completion_interruptible(&xdev->trans_cmpltn);
+ translation_size = ioread32((void __iomem *)bar_memory +
+ DMA_SCRATCH1_REG_OFFSET);
+ if (translation_size > 0)
+ xdev->pio_translation_size = translation_size;
+ else
+ err = -EAGAIN;
+ iowrite32(0, (void __iomem *)(bar_memory +
+ DMA_SCRATCH1_REG_OFFSET));
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static ssize_t
+pio_read(struct file *file, char __user *buffer, size_t length,
+ loff_t *f_offset)
+{
+ char *bar_memory = NULL;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ ssize_t num_bytes = 0;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = file->private_data;
+ xlnx_match = (struct ps_pcie_dma_channel_match *)
+ xdev->pcie_dma_chan[0].chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ PIO_MEMORY_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ if (length > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer length supplied at PIO read\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ if ((length + *f_offset)
+ > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer offset supplied at PIO read\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ bar_memory += *f_offset;
+
+ num_bytes = copy_to_user(buffer, bar_memory, length);
+ if (num_bytes != 0) {
+ pr_err("Error! copy_to_user failed at PIO read\n");
+ num_bytes = length - num_bytes;
+ } else {
+ num_bytes = length;
+ }
+
+ return num_bytes;
+}
+
+static ssize_t
+pio_write(struct file *file, const char __user *buffer,
+ size_t length, loff_t *f_offset)
+{
+ char *bar_memory = NULL;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ ssize_t num_bytes = 0;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = file->private_data;
+ xlnx_match = (struct ps_pcie_dma_channel_match *)
+ xdev->pcie_dma_chan[0].chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ PIO_MEMORY_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ if (length > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer length supplied at PIO write\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ if ((length + *f_offset)
+ > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer offset supplied at PIO write\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ bar_memory += *f_offset;
+
+ num_bytes = copy_from_user(bar_memory, buffer, length);
+
+ if (num_bytes != 0) {
+ pr_err("Error! copy_from_user failed at PIO write\n");
+ num_bytes = length - num_bytes;
+ } else {
+ num_bytes = length;
+ }
+
+ return num_bytes;
+}
+
+static int pio_open(struct inode *in, struct file *file)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+
+ xdev = container_of(in->i_cdev,
+ struct xlnx_ps_pcie_dma_client_device,
+ xpio_char_dev);
+
+ file->private_data = xdev;
+
+ return 0;
+}
+
+static int pio_release(struct inode *in, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations ps_pcie_pio_fops = {
+ .owner = THIS_MODULE,
+ .read = pio_read,
+ .write = pio_write,
+ .unlocked_ioctl = pio_ioctl,
+ .open = pio_open,
+ .release = pio_release,
+};
+
+static void destroy_char_iface_for_pio(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->pio_char_device), 0));
+ cdev_del(&xdev->xpio_char_dev);
+ unregister_chrdev_region(xdev->pio_char_device, 1);
+}
+
+static void destroy_char_iface_for_dma(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int i;
+ struct xlnx_completed_info *entry, *next;
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ list_for_each_entry_safe(entry, next,
+ &xdev->pcie_dma_chan[i].completed.clist,
+ clist) {
+ spin_lock(&xdev->pcie_dma_chan[i].channel_lock);
+ list_del(&entry->clist);
+ spin_unlock(&xdev->pcie_dma_chan[i].channel_lock);
+ kfree(entry);
+ }
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->char_device), i));
+ }
+ cdev_del(&xdev->xps_pcie_chardev);
+ unregister_chrdev_region(xdev->char_device, MAX_ALLOWED_CHANNELS_IN_HW);
+}
+
+static void delete_char_dev_interfaces(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ destroy_char_iface_for_dma(xdev);
+ if (xdev->properties->pio_transfers == PIO_SUPPORTED)
+ destroy_char_iface_for_pio(xdev);
+}
+
+static void release_dma_channels(struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int i;
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++)
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+}
+
+static void delete_char_devices(void)
+{
+ struct xlnx_ps_pcie_dma_client_device *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &g_ps_pcie_dma_client_list,
+ dev_node) {
+ list_del(&entry->dev_node);
+ delete_char_dev_interfaces(entry);
+ release_dma_channels(entry);
+ kfree(entry);
+ }
+}
+
+static bool ps_pcie_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct ps_pcie_dma_channel_match *client_match =
+ (struct ps_pcie_dma_channel_match *)param;
+
+ struct ps_pcie_dma_channel_match *dma_channel_match =
+ (struct ps_pcie_dma_channel_match *)chan->private;
+
+ if (client_match && dma_channel_match) {
+ if (client_match->pci_vendorid != 0 &&
+ dma_channel_match->pci_vendorid != 0) {
+ if (client_match->pci_vendorid == dma_channel_match->pci_vendorid) {
+ if (client_match->pci_deviceid == dma_channel_match->pci_deviceid &&
+ client_match->channel_number == dma_channel_match->channel_number &&
+ client_match->direction == dma_channel_match->direction) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+static int acquire_dma_channels(struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+ int i;
+ dma_cap_mask_t mask;
+ struct ps_pcie_dma_channel_match *match;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE | DMA_PRIVATE, mask);
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ match = &xdev->pcie_dma_chan[i].match;
+ match->board_number = xdev->properties->board_number;
+ match->pci_deviceid = xdev->properties->pci_deviceid;
+ match->pci_vendorid = xdev->properties->pci_vendorid;
+ match->channel_number = i;
+ match->direction = xdev->properties->direction[i];
+
+ xdev->pcie_dma_chan[i].chan =
+ dma_request_channel(mask, ps_pcie_dma_filter, match);
+
+ if (!xdev->pcie_dma_chan[i].chan) {
+ pr_err("Error channel handle %d board %d channel\n",
+ match->board_number,
+ match->channel_number);
+ err = -EINVAL;
+ goto err_out_no_channels;
+ }
+ xdev->pcie_dma_chan[i].dev =
+ xdev->pcie_dma_chan[i].chan->device->dev;
+ xdev->pcie_dma_chan[i].direction =
+ xdev->properties->direction[i];
+ xdev->pcie_dma_chan[i].mode =
+ xdev->properties->mode;
+ INIT_LIST_HEAD(&xdev->pcie_dma_chan[i].completed.clist);
+ spin_lock_init(&xdev->pcie_dma_chan[i].channel_lock);
+ }
+
+ return 0;
+
+err_out_no_channels:
+ while (i > 0) {
+ i--;
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+ }
+ return err;
+}
+
+static int create_char_dev_iface_for_dma_device(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err = 0;
+ int i;
+
+ WARN_ON(!xdev);
+
+ err = alloc_chrdev_region(&xdev->char_device, 0,
+ MAX_ALLOWED_CHANNELS_IN_HW,
+ CHAR_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("Unable to allocate char device region\n");
+ return err;
+ }
+
+ xdev->xps_pcie_chardev.owner = THIS_MODULE;
+ cdev_init(&xdev->xps_pcie_chardev, &ps_pcie_dma_comm_fops);
+ xdev->xps_pcie_chardev.dev = xdev->char_device;
+
+ err = cdev_add(&xdev->xps_pcie_chardev, xdev->char_device,
+ MAX_ALLOWED_CHANNELS_IN_HW);
+ if (err < 0) {
+ pr_err("PS PCIe DMA unable to add cdev\n");
+ goto err_out_cdev_add;
+ }
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ xdev->chardev[i] =
+ device_create(g_ps_pcie_dma_client_class,
+ xdev->pcie_dma_chan[i].dev,
+ MKDEV(MAJOR(xdev->char_device), i),
+ xdev,
+ "%s%d_%d", CHAR_DRIVER_NAME,
+ i, xdev->properties->board_number);
+
+ if (!xdev->chardev[i]) {
+ err = PTR_ERR(xdev->chardev[i]);
+ pr_err(
+ "PS PCIe DMA Unable to create device %d\n", i);
+ goto err_out_dev_create;
+ }
+ }
+
+ return 0;
+
+err_out_dev_create:
+ while (--i >= 0) {
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->char_device), i));
+ }
+ cdev_del(&xdev->xps_pcie_chardev);
+err_out_cdev_add:
+ unregister_chrdev_region(xdev->char_device, MAX_ALLOWED_CHANNELS_IN_HW);
+ return err;
+}
+
+static int create_char_dev_iface_for_pio(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+
+ err = alloc_chrdev_region(&xdev->pio_char_device, 0, 1,
+ PIO_CHAR_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("Unable to allocate pio character device region\n");
+ return err;
+ }
+
+ xdev->xpio_char_dev.owner = THIS_MODULE;
+ cdev_init(&xdev->xpio_char_dev, &ps_pcie_pio_fops);
+ xdev->xpio_char_dev.dev = xdev->pio_char_device;
+
+ err = cdev_add(&xdev->xpio_char_dev, xdev->pio_char_device, 1);
+ if (err < 0) {
+ pr_err("PS PCIe DMA unable to add cdev for pio\n");
+ goto err_out_pio_cdev_add;
+ }
+
+ xdev->xpio_char_device =
+ device_create(g_ps_pcie_dma_client_class,
+ xdev->pcie_dma_chan[0].dev,
+ MKDEV(MAJOR(xdev->pio_char_device), 0),
+ xdev, "%s_%d", PIO_CHAR_DRIVER_NAME,
+ xdev->properties->board_number);
+
+ if (!xdev->xpio_char_device) {
+ err = PTR_ERR(xdev->xpio_char_device);
+ pr_err("PS PCIe DMA Unable to create pio device\n");
+ goto err_out_pio_dev_create;
+ }
+
+ mutex_init(&xdev->pio_chardev_mutex);
+ xdev->pio_translation_size = 0;
+ init_completion(&xdev->trans_cmpltn);
+
+ return 0;
+
+err_out_pio_dev_create:
+ cdev_del(&xdev->xpio_char_dev);
+err_out_pio_cdev_add:
+ unregister_chrdev_region(xdev->pio_char_device, 1);
+ return err;
+}
+
+static int create_char_dev_interfaces(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+
+ err = create_char_dev_iface_for_dma_device(xdev);
+
+ if (err != 0) {
+ pr_err("Unable to create char dev dma iface %d\n",
+ xdev->properties->pci_deviceid);
+ goto no_char_iface_for_dma;
+ }
+
+ if (xdev->properties->pio_transfers == PIO_SUPPORTED) {
+ err = create_char_dev_iface_for_pio(xdev);
+ if (err != 0) {
+ pr_err("Unable to create char dev pio iface %d\n",
+ xdev->properties->pci_deviceid);
+ goto no_char_iface_for_pio;
+ }
+ }
+
+ return 0;
+
+no_char_iface_for_pio:
+ destroy_char_iface_for_dma(xdev);
+no_char_iface_for_dma:
+ return err;
+}
+
+static int setup_char_devices(u16 dev_prop_index)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ int err;
+ int i;
+
+ xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
+ if (!xdev) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ xdev->properties = &g_dma_deviceproperties_list[dev_prop_index];
+
+ err = acquire_dma_channels(xdev);
+ if (err != 0) {
+ pr_err("Unable to acquire dma channels %d\n",
+ dev_prop_index);
+ goto err_no_dma_channels;
+ }
+
+ err = create_char_dev_interfaces(xdev);
+ if (err != 0) {
+ pr_err("Unable to create char dev interfaces %d\n",
+ dev_prop_index);
+ goto err_no_char_dev_ifaces;
+ }
+
+ list_add_tail(&xdev->dev_node, &g_ps_pcie_dma_client_list);
+
+ return 0;
+
+err_no_char_dev_ifaces:
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++)
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+err_no_dma_channels:
+ kfree(xdev);
+ return err;
+}
+
+/**
+ * ps_pcie_dma_client_init - Driver init function
+ *
+ * Return: 0 on success. Non zero on failure
+ */
+static int __init ps_pcie_dma_client_init(void)
+{
+ int err;
+ int i;
+ size_t num_dma_dev_properties;
+
+ INIT_LIST_HEAD(&g_ps_pcie_dma_client_list);
+
+ g_ps_pcie_dma_client_class = class_create(THIS_MODULE, DRV_MODULE_NAME);
+ if (IS_ERR(g_ps_pcie_dma_client_class)) {
+ pr_err("%s failed to create class\n", DRV_MODULE_NAME);
+ return PTR_ERR(g_ps_pcie_dma_client_class);
+ }
+
+ num_dma_dev_properties = ARRAY_SIZE(g_dma_deviceproperties_list);
+ for (i = 0; i < num_dma_dev_properties; i++) {
+ err = setup_char_devices(i);
+ if (err) {
+ pr_err("Error creating char devices for %d\n", i);
+ goto err_no_char_devices;
+ }
+ }
+
+ pr_info("PS PCIe DMA Client Driver Init successful\n");
+ return 0;
+
+err_no_char_devices:
+ delete_char_devices();
+
+ if (g_ps_pcie_dma_client_class)
+ class_destroy(g_ps_pcie_dma_client_class);
+ return err;
+}
+late_initcall(ps_pcie_dma_client_init);
+
+/**
+ * ps_pcie_dma_client_exit - Driver exit function
+ *
+ */
+static void __exit ps_pcie_dma_client_exit(void)
+{
+ delete_char_devices();
+
+ if (g_ps_pcie_dma_client_class)
+ class_destroy(g_ps_pcie_dma_client_class);
+}
+
+module_exit(ps_pcie_dma_client_exit);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx PS PCIe DMA client Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_main.c b/drivers/dma/xilinx/xilinx_ps_pcie_main.c
new file mode 100644
index 000000000000..cb3151219083
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_main.c
@@ -0,0 +1,200 @@
+/*
+ * XILINX PS PCIe driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices.
+ * This PCIe driver creates a platform device with specific platform
+ * info enabling creation of DMA device corresponding to the channel
+ * information provided in the properties
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "xilinx_ps_pcie.h"
+#include "../dmaengine.h"
+
+#define DRV_MODULE_NAME "ps_pcie_dma"
+
+static int ps_pcie_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void ps_pcie_dma_remove(struct pci_dev *pdev);
+
+static u32 channel_properties_pcie_axi[] = {
+ (u32)(PCIE_AXI_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
+ (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
+ (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
+
+static u32 channel_properties_axi_pcie[] = {
+ (u32)(AXI_PCIE_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
+ (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
+ (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
+
+static struct property_entry generic_pcie_ep_property[] = {
+ PROPERTY_ENTRY_U32("numchannels", (u32)MAX_NUMBER_OF_CHANNELS),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel0",
+ channel_properties_pcie_axi),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel1",
+ channel_properties_axi_pcie),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel2",
+ channel_properties_pcie_axi),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel3",
+ channel_properties_axi_pcie),
+ { },
+};
+
+static const struct platform_device_info xlnx_std_platform_dev_info = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .properties = generic_pcie_ep_property,
+};
+
+/**
+ * ps_pcie_dma_probe - Driver probe function
+ * @pdev: Pointer to the pci_dev structure
+ * @ent: pci device id
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ps_pcie_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+ struct platform_device *platform_dev;
+ struct platform_device_info platform_dev_info;
+
+ dev_info(&pdev->dev, "PS PCIe DMA Driver probe\n");
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&pdev->dev, "Cannot set 64 bit DMA mask\n");
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&pdev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /* For Root DMA platform device will be created through device tree */
+ if (pdev->vendor == PCI_VENDOR_ID_XILINX &&
+ pdev->device == ZYNQMP_RC_DMA_DEVID)
+ return 0;
+
+ memcpy(&platform_dev_info, &xlnx_std_platform_dev_info,
+ sizeof(xlnx_std_platform_dev_info));
+
+ /* Do device specific channel configuration changes to
+ * platform_dev_info.properties if required
+ * More information on channel properties can be found
+ * at Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
+ */
+
+ platform_dev_info.parent = &pdev->dev;
+ platform_dev_info.data = &pdev;
+ platform_dev_info.size_data = sizeof(struct pci_dev **);
+
+ platform_dev = platform_device_register_full(&platform_dev_info);
+ if (IS_ERR(platform_dev)) {
+ dev_err(&pdev->dev,
+ "Cannot create platform device, aborting\n");
+ return PTR_ERR(platform_dev);
+ }
+
+ pci_set_drvdata(pdev, platform_dev);
+
+ dev_info(&pdev->dev, "PS PCIe DMA driver successfully probed\n");
+
+ return 0;
+}
+
+static struct pci_device_id ps_pcie_dma_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_DMA_DEVID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_RC_DMA_DEVID) },
+ { }
+};
+
+static struct pci_driver ps_pcie_dma_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = ps_pcie_dma_tbl,
+ .probe = ps_pcie_dma_probe,
+ .remove = ps_pcie_dma_remove,
+};
+
+/**
+ * ps_pcie_init - Driver init function
+ *
+ * Return: 0 on success. Non zero on failure
+ */
+static int __init ps_pcie_init(void)
+{
+ int ret;
+
+ pr_info("%s init()\n", DRV_MODULE_NAME);
+
+ ret = pci_register_driver(&ps_pcie_dma_driver);
+ if (ret)
+ return ret;
+
+ ret = dma_platform_driver_register();
+ if (ret)
+ pci_unregister_driver(&ps_pcie_dma_driver);
+
+ return ret;
+}
+
+/**
+ * ps_pcie_dma_remove - Driver remove function
+ * @pdev: Pointer to the pci_dev structure
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_remove(struct pci_dev *pdev)
+{
+ struct platform_device *platform_dev;
+
+ platform_dev = (struct platform_device *)pci_get_drvdata(pdev);
+
+ if (platform_dev)
+ platform_device_unregister(platform_dev);
+}
+
+/**
+ * ps_pcie_exit - Driver exit function
+ *
+ * Return: void
+ */
+static void __exit ps_pcie_exit(void)
+{
+ pr_info("%s exit()\n", DRV_MODULE_NAME);
+
+ dma_platform_driver_unregister();
+ pci_unregister_driver(&ps_pcie_dma_driver);
+}
+
+module_init(ps_pcie_init);
+module_exit(ps_pcie_exit);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx PS PCIe DMA Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_platform.c b/drivers/dma/xilinx/xilinx_ps_pcie_platform.c
new file mode 100644
index 000000000000..7599c3a9d300
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_platform.c
@@ -0,0 +1,3170 @@
+/*
+ * XILINX PS PCIe DMA driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "xilinx_ps_pcie.h"
+#include "../dmaengine.h"
+
+#define PLATFORM_DRIVER_NAME "ps_pcie_pform_dma"
+#define MAX_BARS 6
+
+#define DMA_BAR_NUMBER 0
+
+#define MIN_SW_INTR_TRANSACTIONS 2
+
+#define CHANNEL_PROPERTY_LENGTH 50
+#define WORKQ_NAME_SIZE 100
+#define INTR_HANDLR_NAME_SIZE 100
+
+#define PS_PCIE_DMA_IRQ_NOSHARE 0
+
+#define MAX_COALESCE_COUNT 255
+
+#define DMA_CHANNEL_REGS_SIZE 0x80
+
+#define DMA_SRCQPTRLO_REG_OFFSET (0x00) /* Source Q pointer Lo */
+#define DMA_SRCQPTRHI_REG_OFFSET (0x04) /* Source Q pointer Hi */
+#define DMA_SRCQSZ_REG_OFFSET (0x08) /* Source Q size */
+#define DMA_SRCQLMT_REG_OFFSET (0x0C) /* Source Q limit */
+#define DMA_DSTQPTRLO_REG_OFFSET (0x10) /* Destination Q pointer Lo */
+#define DMA_DSTQPTRHI_REG_OFFSET (0x14) /* Destination Q pointer Hi */
+#define DMA_DSTQSZ_REG_OFFSET (0x18) /* Destination Q size */
+#define DMA_DSTQLMT_REG_OFFSET (0x1C) /* Destination Q limit */
+#define DMA_SSTAQPTRLO_REG_OFFSET (0x20) /* Source Status Q pointer Lo */
+#define DMA_SSTAQPTRHI_REG_OFFSET (0x24) /* Source Status Q pointer Hi */
+#define DMA_SSTAQSZ_REG_OFFSET (0x28) /* Source Status Q size */
+#define DMA_SSTAQLMT_REG_OFFSET (0x2C) /* Source Status Q limit */
+#define DMA_DSTAQPTRLO_REG_OFFSET (0x30) /* Destination Status Q pointer Lo */
+#define DMA_DSTAQPTRHI_REG_OFFSET (0x34) /* Destination Status Q pointer Hi */
+#define DMA_DSTAQSZ_REG_OFFSET (0x38) /* Destination Status Q size */
+#define DMA_DSTAQLMT_REG_OFFSET (0x3C) /* Destination Status Q limit */
+#define DMA_SRCQNXT_REG_OFFSET (0x40) /* Source Q next */
+#define DMA_DSTQNXT_REG_OFFSET (0x44) /* Destination Q next */
+#define DMA_SSTAQNXT_REG_OFFSET (0x48) /* Source Status Q next */
+#define DMA_DSTAQNXT_REG_OFFSET (0x4C) /* Destination Status Q next */
+#define DMA_SCRATCH0_REG_OFFSET (0x50) /* Scratch pad register 0 */
+
+#define DMA_PCIE_INTR_CNTRL_REG_OFFSET (0x60) /* DMA PCIe intr control reg */
+#define DMA_PCIE_INTR_STATUS_REG_OFFSET (0x64) /* DMA PCIe intr status reg */
+#define DMA_AXI_INTR_CNTRL_REG_OFFSET (0x68) /* DMA AXI intr control reg */
+#define DMA_AXI_INTR_STATUS_REG_OFFSET (0x6C) /* DMA AXI intr status reg */
+#define DMA_PCIE_INTR_ASSRT_REG_OFFSET (0x70) /* PCIe intr assert reg */
+#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74) /* AXI intr assert register */
+#define DMA_CNTRL_REG_OFFSET (0x78) /* DMA control register */
+#define DMA_STATUS_REG_OFFSET (0x7C) /* DMA status register */
+
+#define DMA_CNTRL_RST_BIT BIT(1)
+#define DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT BIT(2)
+#define DMA_CNTRL_ENABL_BIT BIT(0)
+#define DMA_STATUS_DMA_PRES_BIT BIT(15)
+#define DMA_STATUS_DMA_RUNNING_BIT BIT(0)
+#define DMA_QPTRLO_QLOCAXI_BIT BIT(0)
+#define DMA_QPTRLO_Q_ENABLE_BIT BIT(1)
+#define DMA_INTSTATUS_DMAERR_BIT BIT(1)
+#define DMA_INTSTATUS_SGLINTR_BIT BIT(2)
+#define DMA_INTSTATUS_SWINTR_BIT BIT(3)
+#define DMA_INTCNTRL_ENABLINTR_BIT BIT(0)
+#define DMA_INTCNTRL_DMAERRINTR_BIT BIT(1)
+#define DMA_INTCNTRL_DMASGINTR_BIT BIT(2)
+#define DMA_SW_INTR_ASSRT_BIT BIT(3)
+
+#define SOURCE_CONTROL_BD_BYTE_COUNT_MASK GENMASK(23, 0)
+#define SOURCE_CONTROL_BD_LOC_AXI BIT(24)
+#define SOURCE_CONTROL_BD_EOP_BIT BIT(25)
+#define SOURCE_CONTROL_BD_INTR_BIT BIT(26)
+#define SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT BIT(25)
+#define SOURCE_CONTROL_ATTRIBUTES_MASK GENMASK(31, 28)
+#define SRC_CTL_ATTRIB_BIT_SHIFT (29)
+
+#define STA_BD_COMPLETED_BIT BIT(0)
+#define STA_BD_SOURCE_ERROR_BIT BIT(1)
+#define STA_BD_DESTINATION_ERROR_BIT BIT(2)
+#define STA_BD_INTERNAL_ERROR_BIT BIT(3)
+#define STA_BD_UPPER_STATUS_NONZERO_BIT BIT(31)
+#define STA_BD_BYTE_COUNT_MASK GENMASK(30, 4)
+
+#define STA_BD_BYTE_COUNT_SHIFT 4
+
+#define DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT (16)
+
+#define DMA_SRC_Q_LOW_BIT_SHIFT GENMASK(5, 0)
+
+#define MAX_TRANSFER_LENGTH 0x1000000
+
+#define AXI_ATTRIBUTE 0x3
+#define PCI_ATTRIBUTE 0x2
+
+#define ROOTDMA_Q_READ_ATTRIBUTE 0x8
+
+/*
+ * User Id programmed into Source Q will be copied into Status Q of Destination
+ */
+#define DEFAULT_UID 1
+
+/*
+ * DMA channel registers
+ */
+struct DMA_ENGINE_REGISTERS {
+ u32 src_q_low; /* 0x00 */
+ u32 src_q_high; /* 0x04 */
+ u32 src_q_size; /* 0x08 */
+ u32 src_q_limit; /* 0x0C */
+ u32 dst_q_low; /* 0x10 */
+ u32 dst_q_high; /* 0x14 */
+ u32 dst_q_size; /* 0x18 */
+ u32 dst_q_limit; /* 0x1c */
+ u32 stas_q_low; /* 0x20 */
+ u32 stas_q_high; /* 0x24 */
+ u32 stas_q_size; /* 0x28 */
+ u32 stas_q_limit; /* 0x2C */
+ u32 stad_q_low; /* 0x30 */
+ u32 stad_q_high; /* 0x34 */
+ u32 stad_q_size; /* 0x38 */
+ u32 stad_q_limit; /* 0x3C */
+ u32 src_q_next; /* 0x40 */
+ u32 dst_q_next; /* 0x44 */
+ u32 stas_q_next; /* 0x48 */
+ u32 stad_q_next; /* 0x4C */
+ u32 scrathc0; /* 0x50 */
+ u32 scrathc1; /* 0x54 */
+ u32 scrathc2; /* 0x58 */
+ u32 scrathc3; /* 0x5C */
+ u32 pcie_intr_cntrl; /* 0x60 */
+ u32 pcie_intr_status; /* 0x64 */
+ u32 axi_intr_cntrl; /* 0x68 */
+ u32 axi_intr_status; /* 0x6C */
+ u32 pcie_intr_assert; /* 0x70 */
+ u32 axi_intr_assert; /* 0x74 */
+ u32 dma_channel_ctrl; /* 0x78 */
+ u32 dma_channel_status; /* 0x7C */
+} __attribute__((__packed__));
+
+/**
+ * struct SOURCE_DMA_DESCRIPTOR - Source Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @user_id: User id gets copied to status q of destination
+ */
+struct SOURCE_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+/**
+ * struct DEST_DMA_DESCRIPTOR - Destination Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @reserved: Reserved field
+ */
+struct DEST_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 reserved;
+} __attribute__((__packed__));
+
+/**
+ * struct STATUS_DMA_DESCRIPTOR - Status Hardware Descriptor
+ * @status_flag_byte_count: Byte count/buffer length and status flags
+ * @user_handle: User handle gets copied from src/dstq on completion
+ * @user_id: User id gets copied from srcq
+ */
+struct STATUS_DMA_DESCRIPTOR {
+ u32 status_flag_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+enum PACKET_CONTEXT_AVAILABILITY {
+ FREE = 0, /*Packet transfer Parameter context is free.*/
+ IN_USE /*Packet transfer Parameter context is in use.*/
+};
+
+struct ps_pcie_transfer_elements {
+ struct list_head node;
+ dma_addr_t src_pa;
+ dma_addr_t dst_pa;
+ u32 transfer_bytes;
+};
+
+struct ps_pcie_tx_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head transfer_nodes;
+ u32 src_elements;
+ u32 dst_elements;
+ u32 total_transfer_bytes;
+};
+
+struct ps_pcie_intr_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_intr_tx;
+};
+
+/*
+ * The context structure stored for each DMA transaction
+ * This structure is maintained separately for Src Q and Destination Q
+ * @availability_status: Indicates whether packet context is available
+ * @idx_sop: Indicates starting index of buffer descriptor for a transfer
+ * @idx_eop: Indicates ending index of buffer descriptor for a transfer
+ * @sgl: Indicates either src or dst sglist for the transaction
+ */
+struct PACKET_TRANSFER_PARAMS {
+ enum PACKET_CONTEXT_AVAILABILITY availability_status;
+ u16 idx_sop;
+ u16 idx_eop;
+ struct ps_pcie_tx_segment *seg;
+};
+
+enum CHANNEL_STATE {
+ CHANNEL_RESOURCE_UNALLOCATED = 0, /* Channel resources not allocated */
+ CHANNEL_UNAVIALBLE, /* Channel inactive */
+ CHANNEL_AVAILABLE, /* Channel available for transfers */
+ CHANNEL_ERROR /* Channel encountered errors */
+};
+
+enum BUFFER_LOCATION {
+ BUFFER_LOC_PCI = 0,
+ BUFFER_LOC_AXI,
+ BUFFER_LOC_INVALID
+};
+
+enum dev_channel_properties {
+ DMA_CHANNEL_DIRECTION = 0,
+ NUM_DESCRIPTORS,
+ NUM_QUEUES,
+ COALESE_COUNT,
+ POLL_TIMER_FREQUENCY
+};
+
+/*
+ * struct ps_pcie_dma_chan - Driver specific DMA channel structure
+ * @xdev: Driver specific device structure
+ * @dev: The dma device
+ * @common: DMA common channel
+ * @chan_base: Pointer to Channel registers
+ * @channel_number: DMA channel number in the device
+ * @num_queues: Number of queues per channel.
+ * It should be four for memory mapped case and
+ * two for Streaming case
+ * @direction: Transfer direction
+ * @state: Indicates channel state
+ * @channel_lock: Spin lock to be used before changing channel state
+ * @cookie_lock: Spin lock to be used before assigning cookie for a transaction
+ * @coalesce_count: Indicates number of packet transfers before interrupts
+ * @poll_timer_freq:Indicates frequency of polling for completed transactions
+ * @poll_timer: Timer to poll dma buffer descriptors if coalesce count is > 0
+ * @src_avail_descriptors: Available sgl source descriptors
+ * @src_desc_lock: Lock for synchronizing src_avail_descriptors
+ * @dst_avail_descriptors: Available sgl destination descriptors
+ * @dst_desc_lock: Lock for synchronizing
+ * dst_avail_descriptors
+ * @src_sgl_bd_pa: Physical address of Source SGL buffer Descriptors
+ * @psrc_sgl_bd: Virtual address of Source SGL buffer Descriptors
+ * @src_sgl_freeidx: Holds index of Source SGL buffer descriptor to be filled
+ * @sglDestinationQLock:Lock to serialize Destination Q updates
+ * @dst_sgl_bd_pa: Physical address of Dst SGL buffer Descriptors
+ * @pdst_sgl_bd: Virtual address of Dst SGL buffer Descriptors
+ * @dst_sgl_freeidx: Holds index of Destination SGL
+ * @src_sta_bd_pa: Physical address of StatusQ buffer Descriptors
+ * @psrc_sta_bd: Virtual address of Src StatusQ buffer Descriptors
+ * @src_staprobe_idx: Holds index of Status Q to be examined for SrcQ updates
+ * @src_sta_hw_probe_idx: Holds index of maximum limit of Status Q for hardware
+ * @dst_sta_bd_pa: Physical address of Dst StatusQ buffer Descriptor
+ * @pdst_sta_bd: Virtual address of Dst Status Q buffer Descriptors
+ * @dst_staprobe_idx: Holds index of Status Q to be examined for updates
+ * @dst_sta_hw_probe_idx: Holds index of max limit of Dst Status Q for hardware
+ * @@read_attribute: Describes the attributes of buffer in srcq
+ * @@write_attribute: Describes the attributes of buffer in dstq
+ * @@intr_status_offset: Register offset to be cheked on receiving interrupt
+ * @@intr_status_offset: Register offset to be used to control interrupts
+ * @ppkt_ctx_srcq: Virtual address of packet context to Src Q updates
+ * @idx_ctx_srcq_head: Holds index of packet context to be filled for Source Q
+ * @idx_ctx_srcq_tail: Holds index of packet context to be examined for Source Q
+ * @ppkt_ctx_dstq: Virtual address of packet context to Dst Q updates
+ * @idx_ctx_dstq_head: Holds index of packet context to be filled for Dst Q
+ * @idx_ctx_dstq_tail: Holds index of packet context to be examined for Dst Q
+ * @pending_list_lock: Lock to be taken before updating pending transfers list
+ * @pending_list: List of transactions submitted to channel
+ * @active_list_lock: Lock to be taken before transferring transactions from
+ * pending list to active list which will be subsequently
+ * submitted to hardware
+ * @active_list: List of transactions that will be submitted to hardware
+ * @pending_interrupts_lock: Lock to be taken before updating pending Intr list
+ * @pending_interrupts_list: List of interrupt transactions submitted to channel
+ * @active_interrupts_lock: Lock to be taken before transferring transactions
+ * from pending interrupt list to active interrupt list
+ * @active_interrupts_list: List of interrupt transactions that are active
+ * @transactions_pool: Mem pool to allocate dma transactions quickly
+ * @intr_transactions_pool: Mem pool to allocate interrupt transactions quickly
+ * @sw_intrs_wrkq: Work Q which performs handling of software intrs
+ * @handle_sw_intrs:Work function handling software interrupts
+ * @maintenance_workq: Work Q to perform maintenance tasks during stop or error
+ * @handle_chan_reset: Work that invokes channel reset function
+ * @handle_chan_shutdown: Work that invokes channel shutdown function
+ * @handle_chan_terminate: Work that invokes channel transactions termination
+ * @chan_shutdown_complt: Completion variable which says shutdown is done
+ * @chan_terminate_complete: Completion variable which says terminate is done
+ * @primary_desc_cleanup: Work Q which performs work related to sgl handling
+ * @handle_primary_desc_cleanup: Work that invokes src Q, dst Q cleanup
+ * and programming
+ * @chan_programming: Work Q which performs work related to channel programming
+ * @handle_chan_programming: Work that invokes channel programming function
+ * @srcq_desc_cleanup: Work Q which performs src Q descriptor cleanup
+ * @handle_srcq_desc_cleanup: Work function handling Src Q completions
+ * @dstq_desc_cleanup: Work Q which performs dst Q descriptor cleanup
+ * @handle_dstq_desc_cleanup: Work function handling Dst Q completions
+ * @srcq_work_complete: Src Q Work completion variable for primary work
+ * @dstq_work_complete: Dst Q Work completion variable for primary work
+ */
+struct ps_pcie_dma_chan {
+ struct xlnx_pcie_dma_device *xdev;
+ struct device *dev;
+
+ struct dma_chan common;
+
+ struct DMA_ENGINE_REGISTERS *chan_base;
+ u16 channel_number;
+
+ u32 num_queues;
+ enum dma_data_direction direction;
+ enum BUFFER_LOCATION srcq_buffer_location;
+ enum BUFFER_LOCATION dstq_buffer_location;
+
+ u32 total_descriptors;
+
+ enum CHANNEL_STATE state;
+ spinlock_t channel_lock; /* For changing channel state */
+
+ spinlock_t cookie_lock; /* For acquiring cookie from dma framework*/
+
+ u32 coalesce_count;
+ u32 poll_timer_freq;
+
+ struct timer_list poll_timer;
+
+ u32 src_avail_descriptors;
+ spinlock_t src_desc_lock; /* For handling srcq available descriptors */
+
+ u32 dst_avail_descriptors;
+ spinlock_t dst_desc_lock; /* For handling dstq available descriptors */
+
+ dma_addr_t src_sgl_bd_pa;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_sgl_bd;
+ u32 src_sgl_freeidx;
+
+ dma_addr_t dst_sgl_bd_pa;
+ struct DEST_DMA_DESCRIPTOR *pdst_sgl_bd;
+ u32 dst_sgl_freeidx;
+
+ dma_addr_t src_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *psrc_sta_bd;
+ u32 src_staprobe_idx;
+ u32 src_sta_hw_probe_idx;
+
+ dma_addr_t dst_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *pdst_sta_bd;
+ u32 dst_staprobe_idx;
+ u32 dst_sta_hw_probe_idx;
+
+ u32 read_attribute;
+ u32 write_attribute;
+
+ u32 intr_status_offset;
+ u32 intr_control_offset;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_srcq;
+ u16 idx_ctx_srcq_head;
+ u16 idx_ctx_srcq_tail;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_dstq;
+ u16 idx_ctx_dstq_head;
+ u16 idx_ctx_dstq_tail;
+
+ spinlock_t pending_list_lock; /* For handling dma pending_list */
+ struct list_head pending_list;
+ spinlock_t active_list_lock; /* For handling dma active_list */
+ struct list_head active_list;
+
+ spinlock_t pending_interrupts_lock; /* For dma pending interrupts list*/
+ struct list_head pending_interrupts_list;
+ spinlock_t active_interrupts_lock; /* For dma active interrupts list*/
+ struct list_head active_interrupts_list;
+
+ mempool_t *transactions_pool;
+ mempool_t *tx_elements_pool;
+ mempool_t *intr_transactions_pool;
+
+ struct workqueue_struct *sw_intrs_wrkq;
+ struct work_struct handle_sw_intrs;
+
+ struct workqueue_struct *maintenance_workq;
+ struct work_struct handle_chan_reset;
+ struct work_struct handle_chan_shutdown;
+ struct work_struct handle_chan_terminate;
+
+ struct completion chan_shutdown_complt;
+ struct completion chan_terminate_complete;
+
+ struct workqueue_struct *primary_desc_cleanup;
+ struct work_struct handle_primary_desc_cleanup;
+
+ struct workqueue_struct *chan_programming;
+ struct work_struct handle_chan_programming;
+
+ struct workqueue_struct *srcq_desc_cleanup;
+ struct work_struct handle_srcq_desc_cleanup;
+ struct completion srcq_work_complete;
+
+ struct workqueue_struct *dstq_desc_cleanup;
+ struct work_struct handle_dstq_desc_cleanup;
+ struct completion dstq_work_complete;
+};
+
+/*
+ * struct xlnx_pcie_dma_device - Driver specific platform device structure
+ * @is_rootdma: Indicates whether the dma instance is root port dma
+ * @dma_buf_ext_addr: Indicates whether target system is 32 bit or 64 bit
+ * @bar_mask: Indicates available pcie bars
+ * @board_number: Count value of platform device
+ * @dev: Device structure pointer for pcie device
+ * @channels: Pointer to device DMA channels structure
+ * @common: DMA device structure
+ * @num_channels: Number of channels active for the device
+ * @reg_base: Base address of first DMA channel of the device
+ * @irq_vecs: Number of irq vectors allocated to pci device
+ * @pci_dev: Parent pci device which created this platform device
+ * @bar_info: PCIe bar related information
+ * @platform_irq_vec: Platform irq vector number for root dma
+ * @rootdma_vendor: PCI Vendor id for root dma
+ * @rootdma_device: PCI Device id for root dma
+ */
+struct xlnx_pcie_dma_device {
+ bool is_rootdma;
+ bool dma_buf_ext_addr;
+ u32 bar_mask;
+ u16 board_number;
+ struct device *dev;
+ struct ps_pcie_dma_chan *channels;
+ struct dma_device common;
+ int num_channels;
+ int irq_vecs;
+ void __iomem *reg_base;
+ struct pci_dev *pci_dev;
+ struct BAR_PARAMS bar_info[MAX_BARS];
+ int platform_irq_vec;
+ u16 rootdma_vendor;
+ u16 rootdma_device;
+};
+
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct ps_pcie_dma_chan, common)
+#define to_ps_pcie_dma_tx_descriptor(tx) \
+ container_of(tx, struct ps_pcie_tx_segment, async_tx)
+#define to_ps_pcie_dma_tx_intr_descriptor(tx) \
+ container_of(tx, struct ps_pcie_intr_segment, async_intr_tx)
+
+/* Function Protypes */
+static u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg);
+static void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value);
+static void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static int irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int irq_probe(struct xlnx_pcie_dma_device *xdev);
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan);
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data);
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data);
+static int init_hw_components(struct ps_pcie_dma_chan *chan);
+static int init_sw_components(struct ps_pcie_dma_chan *chan);
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan);
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan);
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan);
+static void poll_completed_transactions(struct timer_list *t);
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void handle_error(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void ps_pcie_chan_program_work(struct work_struct *work);
+static void dst_cleanup_work(struct work_struct *work);
+static void src_cleanup_work(struct work_struct *work);
+static void ps_pcie_chan_primary_work(struct work_struct *work);
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number);
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan);
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan);
+static void terminate_transactions_work(struct work_struct *work);
+static void chan_shutdown_work(struct work_struct *work);
+static void chan_reset_work(struct work_struct *work);
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan);
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan);
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx);
+static struct dma_async_tx_descriptor *
+xlnx_ps_pcie_dma_prep_memcpy(struct dma_chan *channel, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len,
+ unsigned long flags);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags);
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel);
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel);
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev);
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev);
+
+/* IO accessors */
+static inline u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg)
+{
+ return ioread32((void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ iowrite32(value, (void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) & ~mask);
+}
+
+static inline void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) | mask);
+}
+
+/**
+ * ps_pcie_dma_dev_intr_handler - This will be invoked for MSI/Legacy interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ (struct xlnx_pcie_dma_device *)data;
+ struct ps_pcie_dma_chan *chan = NULL;
+ int i;
+ int err = -1;
+ int ret = -1;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = ps_pcie_check_intr_status(chan);
+ if (err == 0)
+ ret = 0;
+ }
+
+ return (ret == 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * ps_pcie_dma_chan_intr_handler - This will be invoked for MSI-X interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data)
+{
+ struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)data;
+
+ ps_pcie_check_intr_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * chan_intr_setup - Requests Interrupt handler for individual channels
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ struct ps_pcie_dma_chan *chan;
+ int i;
+ int err = 0;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i),
+ ps_pcie_dma_chan_intr_handler,
+ PS_PCIE_DMA_IRQ_NOSHARE,
+ "PS PCIe DMA Chan Intr handler", chan);
+ if (err) {
+ dev_err(xdev->dev,
+ "Irq %d for chan %d error %d\n",
+ pci_irq_vector(xdev->pci_dev, i),
+ chan->channel_number, err);
+ break;
+ }
+ }
+
+ if (err) {
+ while (--i >= 0) {
+ chan = &xdev->channels[i];
+ devm_free_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i), chan);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * device_intr_setup - Requests interrupt handler for DMA device
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ unsigned long intr_flags = IRQF_SHARED;
+
+ if (xdev->pci_dev->msix_enabled || xdev->pci_dev->msi_enabled)
+ intr_flags = PS_PCIE_DMA_IRQ_NOSHARE;
+
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, 0),
+ ps_pcie_dma_dev_intr_handler,
+ intr_flags,
+ "PS PCIe DMA Intr Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ pci_irq_vector(xdev->pci_dev, 0));
+
+ return err;
+}
+
+/**
+ * irq_setup - Requests interrupts based on the interrupt type detected
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ if (xdev->irq_vecs == xdev->num_channels)
+ err = chan_intr_setup(xdev);
+ else
+ err = device_intr_setup(xdev);
+
+ return err;
+}
+
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ err = devm_request_irq(xdev->dev,
+ xdev->platform_irq_vec,
+ ps_pcie_dma_dev_intr_handler,
+ IRQF_SHARED,
+ "PS PCIe Root DMA Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ xdev->platform_irq_vec);
+
+ return err;
+}
+
+/**
+ * irq_probe - Checks which interrupt types can be serviced by hardware
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: Number of interrupt vectors when successful or -ENOSPC on failure
+ */
+static int irq_probe(struct xlnx_pcie_dma_device *xdev)
+{
+ struct pci_dev *pdev;
+
+ pdev = xdev->pci_dev;
+
+ xdev->irq_vecs = pci_alloc_irq_vectors(pdev, 1, xdev->num_channels,
+ PCI_IRQ_ALL_TYPES);
+ return xdev->irq_vecs;
+}
+
+/**
+ * ps_pcie_check_intr_status - Checks channel interrupt status
+ *
+ * @chan: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: 0 if interrupt is pending on channel
+ * -1 if no interrupt is pending on channel
+ */
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan)
+{
+ int err = -1;
+ u32 status;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return err;
+
+ status = ps_pcie_dma_read(chan, chan->intr_status_offset);
+
+ if (status & DMA_INTSTATUS_SGLINTR_BIT) {
+ if (chan->primary_desc_cleanup) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SGLINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_SWINTR_BIT) {
+ if (chan->sw_intrs_wrkq)
+ queue_work(chan->sw_intrs_wrkq, &chan->handle_sw_intrs);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SWINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_DMAERR_BIT) {
+ dev_err(chan->dev,
+ "DMA Channel %d ControlStatus Reg: 0x%x",
+ chan->channel_number, status);
+ dev_err(chan->dev,
+ "Chn %d SrcQLmt = %d SrcQSz = %d SrcQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->src_q_limit,
+ chan->chan_base->src_q_size,
+ chan->chan_base->src_q_next);
+ dev_err(chan->dev,
+ "Chn %d SrcStaLmt = %d SrcStaSz = %d SrcStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stas_q_limit,
+ chan->chan_base->stas_q_size,
+ chan->chan_base->stas_q_next);
+ dev_err(chan->dev,
+ "Chn %d DstQLmt = %d DstQSz = %d DstQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->dst_q_limit,
+ chan->chan_base->dst_q_size,
+ chan->chan_base->dst_q_next);
+ dev_err(chan->dev,
+ "Chan %d DstStaLmt = %d DstStaSz = %d DstStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stad_q_limit,
+ chan->chan_base->stad_q_size,
+ chan->chan_base->stad_q_next);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT);
+
+ handle_error(chan);
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int init_hw_components(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd && chan->psrc_sta_bd) {
+ /* Programming SourceQ and StatusQ bd addresses */
+ chan->chan_base->src_q_next = 0;
+ chan->chan_base->src_q_high =
+ upper_32_bits(chan->src_sgl_bd_pa);
+ chan->chan_base->src_q_size = chan->total_descriptors;
+ chan->chan_base->src_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->src_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->src_q_low = 0;
+ }
+ chan->chan_base->src_q_low |=
+ (lower_32_bits((chan->src_sgl_bd_pa))
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stas_q_next = 0;
+ chan->chan_base->stas_q_high =
+ upper_32_bits(chan->src_sta_bd_pa);
+ chan->chan_base->stas_q_size = chan->total_descriptors;
+ chan->chan_base->stas_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stas_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stas_q_low = 0;
+ }
+ chan->chan_base->stas_q_low |=
+ (lower_32_bits(chan->src_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ if (chan->pdst_sgl_bd && chan->pdst_sta_bd) {
+ /* Programming DestinationQ and StatusQ buffer descriptors */
+ chan->chan_base->dst_q_next = 0;
+ chan->chan_base->dst_q_high =
+ upper_32_bits(chan->dst_sgl_bd_pa);
+ chan->chan_base->dst_q_size = chan->total_descriptors;
+ chan->chan_base->dst_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->dst_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->dst_q_low = 0;
+ }
+ chan->chan_base->dst_q_low |=
+ (lower_32_bits(chan->dst_sgl_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stad_q_next = 0;
+ chan->chan_base->stad_q_high =
+ upper_32_bits(chan->dst_sta_bd_pa);
+ chan->chan_base->stad_q_size = chan->total_descriptors;
+ chan->chan_base->stad_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stad_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stad_q_low = 0;
+ }
+ chan->chan_base->stad_q_low |=
+ (lower_32_bits(chan->dst_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ return 0;
+}
+
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+}
+
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->dstq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+ chan->write_attribute |= SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT;
+}
+
+static int init_sw_components(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->ppkt_ctx_srcq && chan->psrc_sgl_bd &&
+ chan->psrc_sta_bd) {
+ memset(chan->ppkt_ctx_srcq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sgl_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->src_avail_descriptors = chan->total_descriptors;
+
+ chan->src_sgl_freeidx = 0;
+ chan->src_staprobe_idx = 0;
+ chan->src_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_srcq_head = 0;
+ chan->idx_ctx_srcq_tail = 0;
+ }
+
+ if (chan->ppkt_ctx_dstq && chan->pdst_sgl_bd &&
+ chan->pdst_sta_bd) {
+ memset(chan->ppkt_ctx_dstq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sgl_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->dst_avail_descriptors = chan->total_descriptors;
+
+ chan->dst_sgl_freeidx = 0;
+ chan->dst_staprobe_idx = 0;
+ chan->dst_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_dstq_head = 0;
+ chan->idx_ctx_dstq_tail = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ps_pcie_chan_reset - Resets channel, by programming relevant registers
+ *
+ * @chan: PS PCIe DMA channel information holder
+ * Return: void
+ */
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan)
+{
+ /* Enable channel reset */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+
+ mdelay(10);
+
+ /* Disable channel reset */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+}
+
+/**
+ * poll_completed_transactions - Function invoked by poll timer
+ *
+ * @t: Pointer to timer triggering this callback
+ * Return: void
+ */
+static void poll_completed_transactions(struct timer_list *t)
+{
+ struct ps_pcie_dma_chan *chan = from_timer(chan, t, poll_timer);
+
+ if (chan->state == CHANNEL_AVAILABLE) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (seg->src_elements) {
+ if (chan->src_avail_descriptors >=
+ seg->src_elements) {
+ return true;
+ }
+ } else if (seg->dst_elements) {
+ if (chan->dst_avail_descriptors >=
+ seg->dst_elements) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (chan->src_avail_descriptors >=
+ seg->src_elements &&
+ chan->dst_avail_descriptors >=
+ seg->dst_elements) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (chan->num_queues == DEFAULT_DMA_QUEUES)
+ return check_descriptors_for_all_queues(chan, seg);
+ else
+ return check_descriptors_for_two_queues(chan, seg);
+}
+
+static void handle_error(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->state != CHANNEL_AVAILABLE)
+ return;
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_ERROR;
+ spin_unlock(&chan->channel_lock);
+
+ if (chan->maintenance_workq)
+ queue_work(chan->maintenance_workq, &chan->handle_chan_reset);
+}
+
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct SOURCE_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ u32 i = 0;
+
+ pkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "src pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ /* Get the address of the next available DMA Descriptor */
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->src_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ list_for_each_entry(ele, &seg->transfer_nodes, node) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)ele->src_pa;
+ } else {
+ pdesc->system_address =
+ (u32)ele->src_pa;
+ }
+
+ pdesc->control_byte_count = (ele->transfer_bytes &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->read_attribute;
+
+ pdesc->user_handle = chan->idx_ctx_srcq_head;
+ pdesc->user_id = DEFAULT_UID;
+ /* Check if this is last descriptor */
+ if (i == (seg->src_elements - 1)) {
+ pkt_ctx->idx_eop = chan->src_sgl_freeidx;
+ pdesc->control_byte_count |= SOURCE_CONTROL_BD_EOP_BIT;
+ if ((seg->async_tx.flags & DMA_PREP_INTERRUPT) ==
+ DMA_PREP_INTERRUPT) {
+ pdesc->control_byte_count |=
+ SOURCE_CONTROL_BD_INTR_BIT;
+ }
+ }
+ chan->src_sgl_freeidx++;
+ if (chan->src_sgl_freeidx == chan->total_descriptors)
+ chan->src_sgl_freeidx = 0;
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors--;
+ spin_unlock(&chan->src_desc_lock);
+ i++;
+ }
+
+ chan->chan_base->src_q_limit = chan->src_sgl_freeidx;
+ chan->idx_ctx_srcq_head++;
+ if (chan->idx_ctx_srcq_head == chan->total_descriptors)
+ chan->idx_ctx_srcq_head = 0;
+}
+
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct DEST_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ u32 i = 0;
+
+ pkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "dst pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->dst_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ list_for_each_entry(ele, &seg->transfer_nodes, node) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)ele->dst_pa;
+ } else {
+ pdesc->system_address =
+ (u32)ele->dst_pa;
+ }
+ pdesc->control_byte_count = (ele->transfer_bytes &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->write_attribute;
+
+ pdesc->user_handle = chan->idx_ctx_dstq_head;
+ /* Check if this is last descriptor */
+ if (i == (seg->dst_elements - 1))
+ pkt_ctx->idx_eop = chan->dst_sgl_freeidx;
+ chan->dst_sgl_freeidx++;
+ if (chan->dst_sgl_freeidx == chan->total_descriptors)
+ chan->dst_sgl_freeidx = 0;
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors--;
+ spin_unlock(&chan->dst_desc_lock);
+ i++;
+ }
+
+ chan->chan_base->dst_q_limit = chan->dst_sgl_freeidx;
+ chan->idx_ctx_dstq_head++;
+ if (chan->idx_ctx_dstq_head == chan->total_descriptors)
+ chan->idx_ctx_dstq_head = 0;
+}
+
+static void ps_pcie_chan_program_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan,
+ handle_chan_programming);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ while (chan->state == CHANNEL_AVAILABLE) {
+ spin_lock(&chan->active_list_lock);
+ seg = list_first_entry_or_null(&chan->active_list,
+ struct ps_pcie_tx_segment, node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (!seg)
+ break;
+
+ if (check_descriptor_availability(chan, seg) == false)
+ break;
+
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (seg->src_elements)
+ xlnx_ps_pcie_update_srcq(chan, seg);
+
+ if (seg->dst_elements)
+ xlnx_ps_pcie_update_dstq(chan, seg);
+ }
+}
+
+/**
+ * dst_cleanup_work - Goes through all completed elements in status Q
+ * and invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void dst_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_dstq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct DEST_DMA_DESCRIPTOR *pdst_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 dstq_desc_idx;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Destination Err",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Source Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Internal Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ /* we are using 64 bit USER field. */
+ if ((psta_bd->status_flag_byte_count &
+ STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d for chan %d has NON ZERO",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+
+ chan->idx_ctx_dstq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count &
+ STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->dst_staprobe_idx++;
+
+ if (chan->dst_staprobe_idx == chan->total_descriptors)
+ chan->dst_staprobe_idx = 0;
+
+ chan->dst_sta_hw_probe_idx++;
+
+ if (chan->dst_sta_hw_probe_idx == chan->total_descriptors)
+ chan->dst_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stad_q_limit = chan->dst_sta_hw_probe_idx;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ dstq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ pdst_bd = chan->pdst_sgl_bd + dstq_desc_idx;
+ memset(pdst_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors++;
+ spin_unlock(&chan->dst_desc_lock);
+
+ if (dstq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+
+ dstq_desc_idx++;
+
+ if (dstq_desc_idx == chan->total_descriptors)
+ dstq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->seg->total_transfer_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &ppkt_ctx->seg->transfer_nodes,
+ node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->dstq_work_complete);
+}
+
+/**
+ * src_cleanup_work - Goes through all completed elements in status Q and
+ * invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void src_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan, handle_srcq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 srcq_desc_idx;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Dst Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Source Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Internal Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if ((psta_bd->status_flag_byte_count
+ & STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has NonZero",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ chan->idx_ctx_srcq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count
+ & STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->src_staprobe_idx++;
+
+ if (chan->src_staprobe_idx == chan->total_descriptors)
+ chan->src_staprobe_idx = 0;
+
+ chan->src_sta_hw_probe_idx++;
+
+ if (chan->src_sta_hw_probe_idx == chan->total_descriptors)
+ chan->src_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stas_q_limit = chan->src_sta_hw_probe_idx;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ srcq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ psrc_bd = chan->psrc_sgl_bd + srcq_desc_idx;
+ memset(psrc_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors++;
+ spin_unlock(&chan->src_desc_lock);
+
+ if (srcq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+ srcq_desc_idx++;
+
+ if (srcq_desc_idx == chan->total_descriptors)
+ srcq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->seg->total_transfer_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &ppkt_ctx->seg->transfer_nodes,
+ node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->srcq_work_complete);
+}
+
+/**
+ * ps_pcie_chan_primary_work - Masks out interrupts, invokes source Q and
+ * destination Q processing. Waits for source Q and destination Q processing
+ * and re enables interrupts. Same work is invoked by timer if coalesce count
+ * is greater than zero and interrupts are not invoked before the timeout period
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void ps_pcie_chan_primary_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan,
+ handle_primary_desc_cleanup);
+
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->psrc_sgl_bd) {
+ reinit_completion(&chan->srcq_work_complete);
+ if (chan->srcq_desc_cleanup)
+ queue_work(chan->srcq_desc_cleanup,
+ &chan->handle_srcq_desc_cleanup);
+ }
+ if (chan->pdst_sgl_bd) {
+ reinit_completion(&chan->dstq_work_complete);
+ if (chan->dstq_desc_cleanup)
+ queue_work(chan->dstq_desc_cleanup,
+ &chan->handle_dstq_desc_cleanup);
+ }
+
+ if (chan->psrc_sgl_bd)
+ wait_for_completion_interruptible(&chan->srcq_work_complete);
+ if (chan->pdst_sgl_bd)
+ wait_for_completion_interruptible(&chan->dstq_work_complete);
+
+ /* Enable interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->chan_programming) {
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+ }
+
+ if (chan->coalesce_count > 0 && chan->poll_timer.function)
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct resource *r;
+
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit DMA mask\n");
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = dma_set_coherent_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = dma_set_coherent_mask(&platform_dev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ r = platform_get_resource_byname(platform_dev, IORESOURCE_MEM,
+ "ps_pcie_regbase");
+ if (!r) {
+ dev_err(&platform_dev->dev,
+ "Unable to find memory resource for root dma\n");
+ return PTR_ERR(r);
+ }
+
+ xdev->reg_base = devm_ioremap_resource(&platform_dev->dev, r);
+ if (IS_ERR(xdev->reg_base)) {
+ dev_err(&platform_dev->dev, "ioresource error for root dma\n");
+ return PTR_ERR(xdev->reg_base);
+ }
+
+ xdev->platform_irq_vec =
+ platform_get_irq_byname(platform_dev,
+ "ps_pcie_rootdma_intr");
+ if (xdev->platform_irq_vec < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to get interrupt number for root dma\n");
+ return xdev->platform_irq_vec;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_vendorid",
+ &xdev->rootdma_vendor);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Vendor Id\n");
+ return err;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_deviceid",
+ &xdev->rootdma_device);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Device Id\n");
+ return err;
+ }
+
+ xdev->common.dev = xdev->dev;
+
+ return 0;
+}
+
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct pci_dev *pdev;
+ u16 i;
+ void __iomem * const *pci_iomap;
+ unsigned long pci_bar_length;
+
+ pdev = *((struct pci_dev **)(platform_dev->dev.platform_data));
+ xdev->pci_dev = pdev;
+
+ for (i = 0; i < MAX_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ xdev->bar_mask = xdev->bar_mask | (1 << (i));
+ }
+
+ err = pcim_iomap_regions(pdev, xdev->bar_mask, PLATFORM_DRIVER_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot request PCI regions, aborting\n");
+ return err;
+ }
+
+ pci_iomap = pcim_iomap_table(pdev);
+ if (!pci_iomap) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ for (i = 0; i < MAX_BARS; i++) {
+ pci_bar_length = pci_resource_len(pdev, i);
+ if (pci_bar_length == 0) {
+ xdev->bar_info[i].BAR_LENGTH = 0;
+ xdev->bar_info[i].BAR_PHYS_ADDR = 0;
+ xdev->bar_info[i].BAR_VIRT_ADDR = NULL;
+ } else {
+ xdev->bar_info[i].BAR_LENGTH =
+ pci_bar_length;
+ xdev->bar_info[i].BAR_PHYS_ADDR =
+ pci_resource_start(pdev, i);
+ xdev->bar_info[i].BAR_VIRT_ADDR =
+ (void *)pci_iomap[i];
+ }
+ }
+
+ xdev->reg_base = pci_iomap[DMA_BAR_NUMBER];
+
+ err = irq_probe(xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot probe irq lines for device %d\n",
+ platform_dev->id);
+ return err;
+ }
+
+ xdev->common.dev = &pdev->dev;
+
+ return 0;
+}
+
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number)
+{
+ int i;
+ char propertyname[CHANNEL_PROPERTY_LENGTH];
+ int numvals, ret;
+ u32 *val;
+ struct ps_pcie_dma_chan *channel;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+
+ snprintf(propertyname, CHANNEL_PROPERTY_LENGTH,
+ "ps_pcie_channel%d", channel_number);
+
+ channel = &xdev->channels[channel_number];
+
+ spin_lock_init(&channel->channel_lock);
+ spin_lock_init(&channel->cookie_lock);
+
+ INIT_LIST_HEAD(&channel->pending_list);
+ spin_lock_init(&channel->pending_list_lock);
+
+ INIT_LIST_HEAD(&channel->active_list);
+ spin_lock_init(&channel->active_list_lock);
+
+ spin_lock_init(&channel->src_desc_lock);
+ spin_lock_init(&channel->dst_desc_lock);
+
+ INIT_LIST_HEAD(&channel->pending_interrupts_list);
+ spin_lock_init(&channel->pending_interrupts_lock);
+
+ INIT_LIST_HEAD(&channel->active_interrupts_list);
+ spin_lock_init(&channel->active_interrupts_lock);
+
+ init_completion(&channel->srcq_work_complete);
+ init_completion(&channel->dstq_work_complete);
+ init_completion(&channel->chan_shutdown_complt);
+ init_completion(&channel->chan_terminate_complete);
+
+ if (device_property_present(&platform_dev->dev, propertyname)) {
+ numvals = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, NULL, 0);
+
+ if (numvals < 0)
+ return numvals;
+
+ val = devm_kzalloc(&platform_dev->dev, sizeof(u32) * numvals,
+ GFP_KERNEL);
+
+ if (!val)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, val,
+ numvals);
+ if (ret < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to read property %s\n", propertyname);
+ return ret;
+ }
+
+ for (i = 0; i < numvals; i++) {
+ switch (i) {
+ case DMA_CHANNEL_DIRECTION:
+ channel->direction =
+ (val[DMA_CHANNEL_DIRECTION] ==
+ PCIE_AXI_DIRECTION) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ break;
+ case NUM_DESCRIPTORS:
+ channel->total_descriptors =
+ val[NUM_DESCRIPTORS];
+ if (channel->total_descriptors >
+ MAX_DESCRIPTORS) {
+ dev_info(&platform_dev->dev,
+ "Descriptors > alowd max\n");
+ channel->total_descriptors =
+ MAX_DESCRIPTORS;
+ }
+ break;
+ case NUM_QUEUES:
+ channel->num_queues = val[NUM_QUEUES];
+ switch (channel->num_queues) {
+ case DEFAULT_DMA_QUEUES:
+ break;
+ case TWO_DMA_QUEUES:
+ break;
+ default:
+ dev_info(&platform_dev->dev,
+ "Incorrect Q number for dma chan\n");
+ channel->num_queues = DEFAULT_DMA_QUEUES;
+ }
+ break;
+ case COALESE_COUNT:
+ channel->coalesce_count = val[COALESE_COUNT];
+
+ if (channel->coalesce_count >
+ MAX_COALESCE_COUNT) {
+ dev_info(&platform_dev->dev,
+ "Invalid coalesce Count\n");
+ channel->coalesce_count =
+ MAX_COALESCE_COUNT;
+ }
+ break;
+ case POLL_TIMER_FREQUENCY:
+ channel->poll_timer_freq =
+ val[POLL_TIMER_FREQUENCY];
+ break;
+ default:
+ dev_err(&platform_dev->dev,
+ "Check order of channel properties!\n");
+ }
+ }
+ } else {
+ dev_err(&platform_dev->dev,
+ "Property %s not present. Invalid configuration!\n",
+ propertyname);
+ return -ENOTSUPP;
+ }
+
+ if (channel->direction == DMA_TO_DEVICE) {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_AXI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_INVALID;
+ }
+ } else {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_AXI;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_INVALID;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ }
+ }
+
+ channel->xdev = xdev;
+ channel->channel_number = channel_number;
+
+ if (xdev->is_rootdma) {
+ channel->dev = xdev->dev;
+ channel->intr_status_offset = DMA_AXI_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_AXI_INTR_CNTRL_REG_OFFSET;
+ } else {
+ channel->dev = &xdev->pci_dev->dev;
+ channel->intr_status_offset = DMA_PCIE_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_PCIE_INTR_CNTRL_REG_OFFSET;
+ }
+
+ channel->chan_base =
+ (struct DMA_ENGINE_REGISTERS *)((__force char *)(xdev->reg_base) +
+ (channel_number * DMA_CHANNEL_REGS_SIZE));
+
+ if ((channel->chan_base->dma_channel_status &
+ DMA_STATUS_DMA_PRES_BIT) == 0) {
+ dev_err(&platform_dev->dev,
+ "Hardware reports channel not present\n");
+ return -ENOTSUPP;
+ }
+
+ update_channel_read_attribute(channel);
+ update_channel_write_attribute(channel);
+
+ xlnx_match = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_channel_match),
+ GFP_KERNEL);
+
+ if (!xlnx_match)
+ return -ENOMEM;
+
+ if (xdev->is_rootdma) {
+ xlnx_match->pci_vendorid = xdev->rootdma_vendor;
+ xlnx_match->pci_deviceid = xdev->rootdma_device;
+ } else {
+ xlnx_match->pci_vendorid = xdev->pci_dev->vendor;
+ xlnx_match->pci_deviceid = xdev->pci_dev->device;
+ xlnx_match->bar_params = xdev->bar_info;
+ }
+
+ xlnx_match->board_number = xdev->board_number;
+ xlnx_match->channel_number = channel_number;
+ xlnx_match->direction = xdev->channels[channel_number].direction;
+
+ channel->common.private = (void *)xlnx_match;
+
+ channel->common.device = &xdev->common;
+ list_add_tail(&channel->common.device_node, &xdev->common.channels);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan)
+{
+ mempool_destroy(chan->transactions_pool);
+
+ mempool_destroy(chan->tx_elements_pool);
+
+ mempool_destroy(chan->intr_transactions_pool);
+}
+
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+
+ if (chan->dstq_desc_cleanup)
+ destroy_workqueue(chan->dstq_desc_cleanup);
+
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+}
+
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan)
+{
+ kfree(chan->ppkt_ctx_srcq);
+
+ kfree(chan->ppkt_ctx_dstq);
+}
+
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan)
+{
+ ssize_t size;
+
+ if (chan->psrc_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+ }
+
+ if (chan->psrc_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+ }
+
+ if (chan->pdst_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sta_bd,
+ chan->dst_sta_bd_pa);
+ }
+}
+
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan)
+{
+ u32 reg = chan->coalesce_count;
+
+ reg = reg << DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT;
+
+ /* Enable Interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ reg | DMA_INTCNTRL_ENABLINTR_BIT |
+ DMA_INTCNTRL_DMAERRINTR_BIT |
+ DMA_INTCNTRL_DMASGINTR_BIT);
+
+ /* Enable DMA */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET,
+ DMA_CNTRL_ENABL_BIT |
+ DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_AVAILABLE;
+ spin_unlock(&chan->channel_lock);
+
+ /* Activate timer if required */
+ if (chan->coalesce_count > 0 && !chan->poll_timer.function)
+ xlnx_ps_pcie_alloc_poll_timer(chan);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan)
+{
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ /* Delete timer if it is created */
+ if (chan->coalesce_count > 0 && !chan->poll_timer.function)
+ xlnx_ps_pcie_free_poll_timer(chan);
+
+ /* Flush descriptor cleaning work queues */
+ if (chan->primary_desc_cleanup)
+ flush_workqueue(chan->primary_desc_cleanup);
+
+ /* Flush channel programming work queue */
+ if (chan->chan_programming)
+ flush_workqueue(chan->chan_programming);
+
+ /* Clear the persistent bits */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT |
+ DMA_INTSTATUS_SGLINTR_BIT |
+ DMA_INTSTATUS_SWINTR_BIT);
+
+ /* Disable DMA channel */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_ENABL_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_UNAVIALBLE;
+ spin_unlock(&chan->channel_lock);
+}
+
+static void ivk_cbk_intr_seg(struct ps_pcie_intr_segment *intr_seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt;
+
+ rslt.result = result;
+ rslt.residue = 0;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx, &rslt);
+}
+
+static void ivk_cbk_seg(struct ps_pcie_tx_segment *seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt, *prslt;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ rslt.result = result;
+ if (seg->src_elements &&
+ chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue = seg->total_transfer_bytes;
+ prslt = &rslt;
+ } else if (seg->dst_elements &&
+ chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue = seg->total_transfer_bytes;
+ prslt = &rslt;
+ } else {
+ prslt = NULL;
+ }
+
+ dmaengine_desc_get_callback_invoke(&seg->async_tx, prslt);
+}
+
+static void ivk_cbk_ctx(struct PACKET_TRANSFER_PARAMS *ppkt_ctxt,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ if (ppkt_ctxt->availability_status == IN_USE) {
+ if (ppkt_ctxt->seg) {
+ ivk_cbk_seg(ppkt_ctxt->seg, chan, result);
+ mempool_free(ppkt_ctxt->seg,
+ chan->transactions_pool);
+ }
+ }
+}
+
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan)
+{
+ int i;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctxt;
+ struct ps_pcie_tx_segment *seg, *seg_nxt;
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ if (chan->ppkt_ctx_srcq) {
+ if (chan->idx_ctx_srcq_tail != chan->idx_ctx_srcq_head) {
+ i = chan->idx_ctx_srcq_tail;
+ while (i != chan->idx_ctx_srcq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_srcq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_READ_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ if (chan->ppkt_ctx_dstq) {
+ if (chan->idx_ctx_dstq_tail != chan->idx_ctx_dstq_head) {
+ i = chan->idx_ctx_dstq_tail;
+ while (i != chan->idx_ctx_dstq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_dstq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_WRITE_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->active_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->pending_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->pending_list_lock);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->pending_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->pending_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+}
+
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan)
+{
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ ivk_cbk_for_pending(chan);
+
+ ps_pcie_chan_reset(chan);
+
+ init_sw_components(chan);
+ init_hw_components(chan);
+
+ xlnx_ps_pcie_channel_activate(chan);
+}
+
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->poll_timer.function) {
+ del_timer_sync(&chan->poll_timer);
+ chan->poll_timer.function = NULL;
+ }
+}
+
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ timer_setup(&chan->poll_timer, poll_completed_transactions, 0);
+ chan->poll_timer.expires = jiffies + chan->poll_timer_freq;
+
+ add_timer(&chan->poll_timer);
+
+ return 0;
+}
+
+static void terminate_transactions_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_terminate);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+ ivk_cbk_for_pending(chan);
+ xlnx_ps_pcie_channel_activate(chan);
+
+ complete(&chan->chan_terminate_complete);
+}
+
+static void chan_shutdown_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_shutdown);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ complete(&chan->chan_shutdown_complt);
+}
+
+static void chan_reset_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_reset);
+
+ xlnx_ps_pcie_reset_channel(chan);
+}
+
+static void sw_intr_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_sw_intrs);
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx,
+ NULL);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ }
+}
+
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan)
+{
+ char wq_name[WORKQ_NAME_SIZE];
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d descriptor programming wq",
+ chan->channel_number);
+ chan->chan_programming =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->chan_programming) {
+ dev_err(chan->dev,
+ "Unable to create programming wq for chan %d",
+ chan->channel_number);
+ goto err_no_desc_program_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_programming,
+ ps_pcie_chan_program_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d primary cleanup wq", chan->channel_number);
+ chan->primary_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->primary_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create primary cleanup wq for channel %d",
+ chan->channel_number);
+ goto err_no_primary_clean_wq;
+ } else {
+ INIT_WORK(&chan->handle_primary_desc_cleanup,
+ ps_pcie_chan_primary_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d maintenance works wq",
+ chan->channel_number);
+ chan->maintenance_workq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->maintenance_workq) {
+ dev_err(chan->dev,
+ "Unable to create maintenance wq for channel %d",
+ chan->channel_number);
+ goto err_no_maintenance_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_reset, chan_reset_work);
+ INIT_WORK(&chan->handle_chan_shutdown, chan_shutdown_work);
+ INIT_WORK(&chan->handle_chan_terminate,
+ terminate_transactions_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d software Interrupts wq",
+ chan->channel_number);
+ chan->sw_intrs_wrkq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->sw_intrs_wrkq) {
+ dev_err(chan->dev,
+ "Unable to create sw interrupts wq for channel %d",
+ chan->channel_number);
+ goto err_no_sw_intrs_wq;
+ } else {
+ INIT_WORK(&chan->handle_sw_intrs, sw_intr_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ if (chan->psrc_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d srcq handling wq",
+ chan->channel_number);
+ chan->srcq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->srcq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create src q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_src_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_srcq_desc_cleanup,
+ src_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d dstq handling wq",
+ chan->channel_number);
+ chan->dstq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->dstq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create dst q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_dst_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_dstq_desc_cleanup,
+ dst_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ return 0;
+err_no_dst_q_completion_wq:
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+err_no_src_q_completion_wq:
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+err_no_sw_intrs_wq:
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+err_no_maintenance_wq:
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+err_no_primary_clean_wq:
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+err_no_desc_program_wq:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan)
+{
+ chan->transactions_pool =
+ mempool_create_kmalloc_pool(chan->total_descriptors,
+ sizeof(struct ps_pcie_tx_segment));
+
+ if (!chan->transactions_pool)
+ goto no_transactions_pool;
+
+ chan->tx_elements_pool =
+ mempool_create_kmalloc_pool(chan->total_descriptors,
+ sizeof(struct ps_pcie_transfer_elements));
+
+ if (!chan->tx_elements_pool)
+ goto no_tx_elements_pool;
+
+ chan->intr_transactions_pool =
+ mempool_create_kmalloc_pool(MIN_SW_INTR_TRANSACTIONS,
+ sizeof(struct ps_pcie_intr_segment));
+
+ if (!chan->intr_transactions_pool)
+ goto no_intr_transactions_pool;
+
+ return 0;
+
+no_intr_transactions_pool:
+ mempool_destroy(chan->tx_elements_pool);
+no_tx_elements_pool:
+ mempool_destroy(chan->transactions_pool);
+no_transactions_pool:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd) {
+ chan->ppkt_ctx_srcq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_srcq) {
+ dev_err(chan->dev,
+ "Src pkt cxt allocation for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_src_pkt_ctx;
+ }
+ }
+
+ if (chan->pdst_sgl_bd) {
+ chan->ppkt_ctx_dstq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_dstq) {
+ dev_err(chan->dev,
+ "Dst pkt cxt for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_dst_pkt_ctx;
+ }
+ }
+
+ return 0;
+
+err_no_dst_pkt_ctx:
+ kfree(chan->ppkt_ctx_srcq);
+
+err_no_src_pkt_ctx:
+ return -ENOMEM;
+}
+
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ void *sgl_base;
+ void *sta_base;
+ dma_addr_t phy_addr_sglbase;
+ dma_addr_t phy_addr_stabase;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+
+ sgl_base = dma_alloc_coherent(chan->dev, size, &phy_addr_sglbase,
+ GFP_KERNEL);
+
+ if (!sgl_base) {
+ dev_err(chan->dev,
+ "Sgl bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sgl_bds;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ sta_base = dma_alloc_coherent(chan->dev, size, &phy_addr_stabase,
+ GFP_KERNEL);
+
+ if (!sta_base) {
+ dev_err(chan->dev,
+ "Sta bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sta_bds;
+ }
+
+ if (chan->direction == DMA_TO_DEVICE) {
+ chan->psrc_sgl_bd = sgl_base;
+ chan->src_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->psrc_sta_bd = sta_base;
+ chan->src_sta_bd_pa = phy_addr_stabase;
+
+ chan->pdst_sgl_bd = NULL;
+ chan->dst_sgl_bd_pa = 0;
+
+ chan->pdst_sta_bd = NULL;
+ chan->dst_sta_bd_pa = 0;
+
+ } else if (chan->direction == DMA_FROM_DEVICE) {
+ chan->psrc_sgl_bd = NULL;
+ chan->src_sgl_bd_pa = 0;
+
+ chan->psrc_sta_bd = NULL;
+ chan->src_sta_bd_pa = 0;
+
+ chan->pdst_sgl_bd = sgl_base;
+ chan->dst_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->pdst_sta_bd = sta_base;
+ chan->dst_sta_bd_pa = phy_addr_stabase;
+
+ } else {
+ dev_err(chan->dev,
+ "%d %s() Unsupported channel direction\n",
+ __LINE__, __func__);
+ goto unsupported_channel_direction;
+ }
+
+ return 0;
+
+unsupported_channel_direction:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sta_base, phy_addr_stabase);
+err_no_sta_bds:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sgl_base, phy_addr_sglbase);
+err_no_sgl_bds:
+
+ return -ENOMEM;
+}
+
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ chan->psrc_sgl_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->src_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail src q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct DEST_DMA_DESCRIPTOR);
+ chan->pdst_sgl_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->dst_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail dst q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ chan->psrc_sta_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->src_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate src q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sta_descriptors;
+ }
+
+ chan->pdst_sta_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->dst_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate Dst q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sta_descriptors;
+ }
+
+ return 0;
+
+err_no_dst_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+err_no_src_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+err_no_dst_sgl_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+
+err_no_src_sgl_descriptors:
+ return -ENOMEM;
+}
+
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return;
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state == CHANNEL_RESOURCE_UNALLOCATED)
+ return;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_shutdown_complt))
+ reinit_completion(&chan->chan_shutdown_complt);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_shutdown);
+ wait_for_completion_interruptible(&chan->chan_shutdown_complt);
+
+ xlnx_ps_pcie_free_worker_queues(chan);
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+ xlnx_ps_pcie_destroy_mempool(chan);
+ xlnx_ps_pcie_free_descriptors(chan);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_RESOURCE_UNALLOCATED;
+ spin_unlock(&chan->channel_lock);
+ }
+}
+
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return PTR_ERR(dchan);
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state != CHANNEL_RESOURCE_UNALLOCATED)
+ return 0;
+
+ if (chan->num_queues == DEFAULT_DMA_QUEUES) {
+ if (dma_alloc_decriptors_all_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ } else if (chan->num_queues == TWO_DMA_QUEUES) {
+ if (dma_alloc_descriptors_two_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for two queues of channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ }
+
+ if (xlnx_ps_pcie_alloc_mempool(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate memory pool for channel %d\n",
+ chan->channel_number);
+ goto err_no_mempools;
+ }
+
+ if (xlnx_ps_pcie_alloc_pkt_contexts(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate packet contexts for channel %d\n",
+ chan->channel_number);
+ goto err_no_pkt_ctxts;
+ }
+
+ if (xlnx_ps_pcie_alloc_worker_threads(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate worker queues for channel %d\n",
+ chan->channel_number);
+ goto err_no_worker_queues;
+ }
+
+ xlnx_ps_pcie_reset_channel(chan);
+
+ dma_cookie_init(dchan);
+
+ return 0;
+
+err_no_worker_queues:
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+err_no_pkt_ctxts:
+ xlnx_ps_pcie_destroy_mempool(chan);
+err_no_mempools:
+ xlnx_ps_pcie_free_descriptors(chan);
+err_no_descriptors:
+ return -ENOMEM;
+}
+
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_intr_segment *intr_seg =
+ to_ps_pcie_dma_tx_intr_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_interrupts_lock);
+ list_add_tail(&intr_seg->node, &chan->pending_interrupts_list);
+ spin_unlock(&chan->pending_interrupts_lock);
+
+ return cookie;
+}
+
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_tx_segment *seg = to_ps_pcie_dma_tx_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_list_lock);
+ list_add_tail(&seg->node, &chan->pending_list);
+ spin_unlock(&chan->pending_list_lock);
+
+ return cookie;
+}
+
+/**
+ * xlnx_ps_pcie_dma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @channel: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xlnx_ps_pcie_dma_prep_memcpy(struct dma_chan *channel, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len,
+ unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ struct ps_pcie_transfer_elements *ele_nxt = NULL;
+ u32 i;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (chan->num_queues != DEFAULT_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_slave_sg for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Tx segment alloc for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+ INIT_LIST_HEAD(&seg->transfer_nodes);
+
+ for (i = 0; i < len / MAX_TRANSFER_LENGTH; i++) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+ ele->src_pa = dma_src + (i * MAX_TRANSFER_LENGTH);
+ ele->dst_pa = dma_dst + (i * MAX_TRANSFER_LENGTH);
+ ele->transfer_bytes = MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->src_elements++;
+ seg->dst_elements++;
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ ele = NULL;
+ }
+
+ if (len % MAX_TRANSFER_LENGTH) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+ ele->src_pa = dma_src + (i * MAX_TRANSFER_LENGTH);
+ ele->dst_pa = dma_dst + (i * MAX_TRANSFER_LENGTH);
+ ele->transfer_bytes = len % MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->src_elements++;
+ seg->dst_elements++;
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ }
+
+ if (seg->src_elements > chan->total_descriptors) {
+ dev_err(chan->dev, "Insufficient descriptors in channel %d for dma transaction\n",
+ chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+
+err_elements_prep_memcpy:
+ list_for_each_entry_safe(ele, ele_nxt, &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+ struct scatterlist *sgl_ptr;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ struct ps_pcie_transfer_elements *ele_nxt = NULL;
+ u32 i, j;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (!(is_slave_direction(direction)))
+ return NULL;
+
+ if (!sgl || sg_len == 0)
+ return NULL;
+
+ if (chan->num_queues != TWO_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_dma_memcpy is supported channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Unable to allocate tx segment channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+
+ for_each_sg(sgl, sgl_ptr, sg_len, j) {
+ for (i = 0; i < sg_dma_len(sgl_ptr) / MAX_TRANSFER_LENGTH; i++) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+ if (chan->direction == DMA_TO_DEVICE) {
+ ele->src_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->src_elements++;
+ } else {
+ ele->dst_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->dst_elements++;
+ }
+ ele->transfer_bytes = MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ ele = NULL;
+ }
+ if (sg_dma_len(sgl_ptr) % MAX_TRANSFER_LENGTH) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+ if (chan->direction == DMA_TO_DEVICE) {
+ ele->src_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->src_elements++;
+ } else {
+ ele->dst_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->dst_elements++;
+ }
+ ele->transfer_bytes = sg_dma_len(sgl_ptr) %
+ MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ }
+ }
+
+ if (max(seg->src_elements, seg->dst_elements) >
+ chan->total_descriptors) {
+ dev_err(chan->dev, "Insufficient descriptors in channel %d for dma transaction\n",
+ chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+
+err_elements_prep_slave_sg:
+ list_for_each_entry_safe(ele, ele_nxt, &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ return NULL;
+}
+
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return;
+
+ chan = to_xilinx_chan(channel);
+
+ if (!list_empty(&chan->pending_list)) {
+ spin_lock(&chan->pending_list_lock);
+ spin_lock(&chan->active_list_lock);
+ list_splice_tail_init(&chan->pending_list,
+ &chan->active_list);
+ spin_unlock(&chan->active_list_lock);
+ spin_unlock(&chan->pending_list_lock);
+ }
+
+ if (!list_empty(&chan->pending_interrupts_list)) {
+ spin_lock(&chan->pending_interrupts_lock);
+ spin_lock(&chan->active_interrupts_lock);
+ list_splice_tail_init(&chan->pending_interrupts_list,
+ &chan->active_interrupts_list);
+ spin_unlock(&chan->active_interrupts_lock);
+ spin_unlock(&chan->pending_interrupts_lock);
+ }
+
+ if (chan->chan_programming)
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+}
+
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return PTR_ERR(channel);
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return 1;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_terminate_complete))
+ reinit_completion(&chan->chan_terminate_complete);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_terminate);
+ wait_for_completion_interruptible(
+ &chan->chan_terminate_complete);
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan;
+ struct ps_pcie_intr_segment *intr_segment = NULL;
+
+ if (!channel)
+ return NULL;
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ intr_segment = mempool_alloc(chan->intr_transactions_pool, GFP_ATOMIC);
+
+ memset(intr_segment, 0, sizeof(*intr_segment));
+
+ dma_async_tx_descriptor_init(&intr_segment->async_intr_tx,
+ &chan->common);
+ intr_segment->async_intr_tx.flags = flags;
+ async_tx_ack(&intr_segment->async_intr_tx);
+ intr_segment->async_intr_tx.tx_submit = xilinx_intr_tx_submit;
+
+ return &intr_segment->async_intr_tx;
+}
+
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev)
+{
+ int err, i;
+ struct xlnx_pcie_dma_device *xdev;
+ static u16 board_number;
+
+ xdev = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct xlnx_pcie_dma_device), GFP_KERNEL);
+
+ if (!xdev)
+ return -ENOMEM;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ xdev->dma_buf_ext_addr = true;
+#else
+ xdev->dma_buf_ext_addr = false;
+#endif
+
+ xdev->is_rootdma = device_property_read_bool(&platform_dev->dev,
+ "rootdma");
+
+ xdev->dev = &platform_dev->dev;
+ xdev->board_number = board_number;
+
+ err = device_property_read_u32(&platform_dev->dev, "numchannels",
+ &xdev->num_channels);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find numchannels property\n");
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->num_channels == 0 || xdev->num_channels >
+ MAX_ALLOWED_CHANNELS_IN_HW) {
+ dev_warn(&platform_dev->dev,
+ "Invalid xlnx-num_channels property value\n");
+ xdev->num_channels = MAX_ALLOWED_CHANNELS_IN_HW;
+ }
+
+ xdev->channels =
+ (struct ps_pcie_dma_chan *)devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_chan)
+ * xdev->num_channels,
+ GFP_KERNEL);
+ if (!xdev->channels) {
+ err = -ENOMEM;
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->is_rootdma)
+ err = read_rootdma_config(platform_dev, xdev);
+ else
+ err = read_epdma_config(platform_dev, xdev);
+
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to initialize dma configuration\n");
+ goto platform_driver_probe_return;
+ }
+
+ /* Initialize the DMA engine */
+ INIT_LIST_HEAD(&xdev->common.channels);
+
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, xdev->common.cap_mask);
+ dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+
+ xdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ xdev->common.device_alloc_chan_resources =
+ xlnx_ps_pcie_dma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xlnx_ps_pcie_dma_free_chan_resources;
+ xdev->common.device_terminate_all = xlnx_ps_pcie_dma_terminate_all;
+ xdev->common.device_tx_status = dma_cookie_status;
+ xdev->common.device_issue_pending = xlnx_ps_pcie_dma_issue_pending;
+ xdev->common.device_prep_dma_interrupt =
+ xlnx_ps_pcie_dma_prep_interrupt;
+ xdev->common.device_prep_dma_memcpy = xlnx_ps_pcie_dma_prep_memcpy;
+ xdev->common.device_prep_slave_sg = xlnx_ps_pcie_dma_prep_slave_sg;
+ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ err = probe_channel_properties(platform_dev, xdev, i);
+
+ if (err != 0) {
+ dev_err(xdev->dev,
+ "Unable to read channel properties\n");
+ goto platform_driver_probe_return;
+ }
+ }
+
+ if (xdev->is_rootdma)
+ err = platform_irq_setup(xdev);
+ else
+ err = irq_setup(xdev);
+ if (err) {
+ dev_err(xdev->dev, "Cannot request irq lines for device %d\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev,
+ "Unable to register board %d with dma framework\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ platform_set_drvdata(platform_dev, xdev);
+
+ board_number++;
+
+ dev_info(&platform_dev->dev, "PS PCIe Platform driver probed\n");
+ return 0;
+
+platform_driver_probe_return:
+ return err;
+}
+
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ platform_get_drvdata(platform_dev);
+ int i;
+
+ for (i = 0; i < xdev->num_channels; i++)
+ xlnx_ps_pcie_dma_free_chan_resources(&xdev->channels[i].common);
+
+ dma_async_device_unregister(&xdev->common);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id xlnx_pcie_root_dma_of_ids[] = {
+ { .compatible = "xlnx,ps_pcie_dma-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnx_pcie_root_dma_of_ids);
+#endif
+
+static struct platform_driver xlnx_pcie_dma_driver = {
+ .driver = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .of_match_table = of_match_ptr(xlnx_pcie_root_dma_of_ids),
+ .owner = THIS_MODULE,
+ },
+ .probe = xlnx_pcie_dma_driver_probe,
+ .remove = xlnx_pcie_dma_driver_remove,
+};
+
+int dma_platform_driver_register(void)
+{
+ return platform_driver_register(&xlnx_pcie_dma_driver);
+}
+
+void dma_platform_driver_unregister(void)
+{
+ platform_driver_unregister(&xlnx_pcie_dma_driver);
+}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index b61d0c79dffb..a22ad095edb6 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -853,6 +853,98 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
}
/**
+ * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct zynqmp_dma_desc_sw *new, *first = NULL;
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ void *desc = NULL, *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ u32 desc_cnt = 0, i;
+ struct scatterlist *sg;
+
+ for_each_sg(src_sg, sg, src_sg_len, i)
+ desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
+ ZYNQMP_DMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&chan->lock);
+ if (desc_cnt > chan->desc_free_cnt) {
+ spin_unlock_bh(&chan->lock);
+ dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+ return NULL;
+ }
+ chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+ spin_unlock_bh(&chan->lock);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ new = zynqmp_dma_get_descriptor(chan);
+ desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+ if (len == 0)
+ goto fetch;
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
+ len, prev);
+ prev = desc;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ zynqmp_dma_desc_config_eod(chan, desc);
+ first->async_tx.flags = flags;
+ return &first->async_tx;
+}
+
+/**
* zynqmp_dma_chan_remove - Channel remove function
* @chan: ZynqMP DMA channel pointer
*/
@@ -1051,9 +1143,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&zdev->common.channels);
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_cap_set(DMA_SG, zdev->common.cap_mask);
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
p = &zdev->common;
+ p->device_prep_dma_sg = zynqmp_dma_prep_sg;
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p->device_terminate_all = zynqmp_dma_device_terminate_all;
p->device_issue_pending = zynqmp_dma_issue_pending;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 417dad635526..10e02429a16b 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -334,6 +334,13 @@ config EDAC_CPC925
a companion chip to the PowerPC 970 family of
processors.
+config EDAC_PL310_L2
+ tristate "Pl310 L2 Cache Controller"
+ depends on ARM
+ help
+ Support for parity error detection on L2 cache controller
+ data and tag ram memory
+
config EDAC_HIGHBANK_MC
tristate "Highbank Memory Controller"
depends on ARCH_HIGHBANK
@@ -480,6 +487,13 @@ config EDAC_SYNOPSYS
Support for error detection and correction on the Synopsys DDR
memory controller.
+config EDAC_ZYNQMP_OCM
+ tristate "Xilinx ZynqMP OCM Controller"
+ depends on ARCH_ZYNQMP
+ help
+ Support for error detection and correction on the xilinx ZynqMP OCM
+ controller.
+
config EDAC_XGENE
tristate "APM X-Gene SoC"
depends on (ARM64 || COMPILE_TEST)
@@ -487,6 +501,14 @@ config EDAC_XGENE
Support for error detection and correction on the
APM X-Gene family of SOCs.
+config EDAC_CORTEX_ARM64
+ tristate "ARM Cortex A57/A53"
+ default y if !CPU_IDLE
+ depends on !CPU_IDLE && ARM64
+ help
+ Support for error detection and correction on the
+ ARM Cortex A57 and A53.
+
config EDAC_TI
tristate "Texas Instruments DDR3 ECC Controller"
depends on ARCH_KEYSTONE || SOC_DRA7XX
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index d77200c9680b..9a8ddec9040b 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -80,9 +80,12 @@ obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SIFIVE) += sifive_edac.o
+obj-$(CONFIG_EDAC_PL310_L2) += pl310_edac_l2.o
obj-$(CONFIG_EDAC_ARMADA_XP) += armada_xp_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
+obj-$(CONFIG_EDAC_CORTEX_ARM64) += cortex_arm64_edac.o
+obj-$(CONFIG_EDAC_ZYNQMP_OCM) += zynqmp_ocm_edac.o
obj-$(CONFIG_EDAC_TI) += ti_edac.o
obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
diff --git a/drivers/edac/cortex_arm64_edac.c b/drivers/edac/cortex_arm64_edac.c
new file mode 100644
index 000000000000..db89ee0c3cc3
--- /dev/null
+++ b/drivers/edac/cortex_arm64_edac.c
@@ -0,0 +1,470 @@
+/*
+ * Cortex A57 and A53 EDAC
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijeshkumar.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <ras/ras_event.h>
+
+#include "edac_module.h"
+
+#define DRV_NAME "cortex_edac"
+
+#define CPUMERRSR_EL1_INDEX(x, y) ((x) & (y))
+#define CPUMERRSR_EL1_BANK_WAY(x, y) (((x) >> 18) & (y))
+#define CPUMERRSR_EL1_RAMID(x) (((x) >> 24) & 0x7f)
+#define CPUMERRSR_EL1_VALID(x) ((x) & (1 << 31))
+#define CPUMERRSR_EL1_REPEAT(x) (((x) >> 32) & 0x7f)
+#define CPUMERRSR_EL1_OTHER(x) (((x) >> 40) & 0xff)
+#define CPUMERRSR_EL1_FATAL(x) ((x) & (1UL << 63))
+#define L1_I_TAG_RAM 0x00
+#define L1_I_DATA_RAM 0x01
+#define L1_D_TAG_RAM 0x08
+#define L1_D_DATA_RAM 0x09
+#define L1_D_DIRTY_RAM 0x14
+#define TLB_RAM 0x18
+
+#define L2MERRSR_EL1_CPUID_WAY(x) (((x) >> 18) & 0xf)
+#define L2MERRSR_EL1_RAMID(x) (((x) >> 24) & 0x7f)
+#define L2MERRSR_EL1_VALID(x) ((x) & (1 << 31))
+#define L2MERRSR_EL1_REPEAT(x) (((x) >> 32) & 0xff)
+#define L2MERRSR_EL1_OTHER(x) (((x) >> 40) & 0xff)
+#define L2MERRSR_EL1_FATAL(x) ((x) & (1UL << 63))
+#define L2_TAG_RAM 0x10
+#define L2_DATA_RAM 0x11
+#define L2_SNOOP_RAM 0x12
+#define L2_DIRTY_RAM 0x14
+#define L2_INCLUSION_PF_RAM 0x18
+
+#define L1_CACHE 0
+#define L2_CACHE 1
+
+#define EDAC_MOD_STR DRV_NAME
+
+/* Error injectin macros*/
+#define L1_DCACHE_ERRINJ_ENABLE (1 << 6)
+#define L1_DCACHE_ERRINJ_DISABLE (~(1 << 6))
+#define L2_DCACHE_ERRINJ_ENABLE (1 << 29)
+#define L2_DCACHE_ERRINJ_DISABLE (~(1 << 29))
+#define L2_ECC_PROTECTION (1 << 22)
+
+static int poll_msec = 100;
+
+struct cortex_arm64_edac {
+ struct edac_device_ctl_info *edac_ctl;
+};
+
+static inline u64 read_cpumerrsr_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c2_2" : "=r" (val));
+ return val;
+}
+
+static inline void write_cpumerrsr_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c2_2, %0" :: "r" (val));
+}
+
+static inline u64 read_l2merrsr_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c2_3" : "=r" (val));
+ return val;
+}
+
+static inline void write_l2merrsr_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c2_3, %0" :: "r" (val));
+}
+
+static inline void cortexa53_edac_busy_on_inst(void)
+{
+ asm volatile("isb sy");
+}
+
+static inline void cortexa53_edac_busy_on_data(void)
+{
+ asm volatile("dsb sy");
+}
+
+static inline void write_l2actrl_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c0_0, %0" :: "r" (val));
+ cortexa53_edac_busy_on_inst();
+}
+
+static inline u64 read_l2actrl_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c0_0" : "=r" (val));
+ return val;
+}
+
+static inline u64 read_l2ctlr_el1(void)
+{
+ u64 rval;
+
+ asm volatile("mrs %0, S3_1_C11_C0_2" : "=r" (rval));
+ return rval;
+
+}
+
+static inline u64 read_l1actrl_el1(void)
+{
+ u64 rval;
+
+ asm volatile("mrs %0, S3_1_C15_C2_0" : "=r" (rval));
+ return rval;
+}
+
+static inline void write_l1actrl_el1(u64 val)
+{
+ asm volatile("msr S3_1_C15_C2_0, %0" :: "r" (val));
+}
+
+static void parse_cpumerrsr(void *arg)
+{
+ int cpu, partnum, way;
+ unsigned int index = 0;
+ u64 val = read_cpumerrsr_el1();
+ int repeat_err, other_err;
+
+ /* we do not support fatal error handling so far */
+ if (CPUMERRSR_EL1_FATAL(val))
+ return;
+
+ /* check if we have valid error before continuing */
+ if (!CPUMERRSR_EL1_VALID(val))
+ return;
+
+ cpu = smp_processor_id();
+ partnum = read_cpuid_part_number();
+ repeat_err = CPUMERRSR_EL1_REPEAT(val);
+ other_err = CPUMERRSR_EL1_OTHER(val);
+
+ /* way/bank and index address bit ranges are different between
+ * A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57) {
+ index = CPUMERRSR_EL1_INDEX(val, 0x1ffff);
+ way = CPUMERRSR_EL1_BANK_WAY(val, 0x1f);
+ } else {
+ index = CPUMERRSR_EL1_INDEX(val, 0xfff);
+ way = CPUMERRSR_EL1_BANK_WAY(val, 0x7);
+ }
+
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "CPU%d L1 error detected!\n", cpu);
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "index=%#x, RAMID=", index);
+
+ switch (CPUMERRSR_EL1_RAMID(val)) {
+ case L1_I_TAG_RAM:
+ pr_cont("'L1-I Tag RAM' (way %d)", way);
+ break;
+ case L1_I_DATA_RAM:
+ pr_cont("'L1-I Data RAM' (bank %d)", way);
+ break;
+ case L1_D_TAG_RAM:
+ pr_cont("'L1-D Tag RAM' (way %d)", way);
+ break;
+ case L1_D_DATA_RAM:
+ pr_cont("'L1-D Data RAM' (bank %d)", way);
+ break;
+ case L1_D_DIRTY_RAM:
+ pr_cont("'L1 Dirty RAM'");
+ break;
+ case TLB_RAM:
+ pr_cont("'TLB RAM'");
+ break;
+ default:
+ pr_cont("'unknown'");
+ break;
+ }
+
+ pr_cont(", repeat=%d, other=%d (CPUMERRSR_EL1=%#llx)\n", repeat_err,
+ other_err, val);
+
+ trace_mc_event(HW_EVENT_ERR_CORRECTED, "L1 non-fatal error",
+ "", repeat_err, 0, 0, 0, -1, index, 0, 0, DRV_NAME);
+ write_cpumerrsr_el1(0);
+}
+
+static void a57_parse_l2merrsr_way(u8 ramid, u8 val)
+{
+ switch (ramid) {
+ case L2_TAG_RAM:
+ case L2_DATA_RAM:
+ case L2_DIRTY_RAM:
+ pr_cont("(cpu%d tag, way %d)", val / 2, val % 2);
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("(cpu%d tag, way %d)", (val & 0x6) >> 1,
+ (val & 0x1));
+ break;
+ }
+}
+
+static void a53_parse_l2merrsr_way(u8 ramid, u8 val)
+{
+ switch (ramid) {
+ case L2_TAG_RAM:
+ pr_cont("(way %d)", val);
+ case L2_DATA_RAM:
+ pr_cont("(bank %d)", val);
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("(cpu%d tag, way %d)", val / 2, val % 4);
+ break;
+ }
+}
+
+static void parse_l2merrsr(void *arg)
+{
+ int cpu, partnum;
+ unsigned int index;
+ int repeat_err, other_err;
+ u64 val = read_l2merrsr_el1();
+
+ /* we do not support fatal error handling so far */
+ if (L2MERRSR_EL1_FATAL(val))
+ return;
+
+ /* check if we have valid error before continuing */
+ if (!L2MERRSR_EL1_VALID(val))
+ return;
+
+ cpu = smp_processor_id();
+ partnum = read_cpuid_part_number();
+ repeat_err = L2MERRSR_EL1_REPEAT(val);
+ other_err = L2MERRSR_EL1_OTHER(val);
+
+ /* index address range is different between A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57)
+ index = val & 0x1ffff;
+ else
+ index = (val >> 3) & 0x3fff;
+
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "CPU%d L2 error detected!\n", cpu);
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "index=%#x RAMID=", index);
+
+ switch (L2MERRSR_EL1_RAMID(val)) {
+ case L2_TAG_RAM:
+ pr_cont("'L2 Tag RAM'");
+ break;
+ case L2_DATA_RAM:
+ pr_cont("'L2 Data RAM'");
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("'L2 Snoop tag RAM'");
+ break;
+ case L2_DIRTY_RAM:
+ pr_cont("'L2 Dirty RAM'");
+ break;
+ case L2_INCLUSION_PF_RAM:
+ pr_cont("'L2 inclusion PF RAM'");
+ break;
+ default:
+ pr_cont("unknown");
+ break;
+ }
+
+ /* cpuid/way bit description is different between A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57)
+ a57_parse_l2merrsr_way(L2MERRSR_EL1_RAMID(val),
+ L2MERRSR_EL1_CPUID_WAY(val));
+ else
+ a53_parse_l2merrsr_way(L2MERRSR_EL1_RAMID(val),
+ L2MERRSR_EL1_CPUID_WAY(val));
+
+ pr_cont(", repeat=%d, other=%d (L2MERRSR_EL1=%#llx)\n", repeat_err,
+ other_err, val);
+ trace_mc_event(HW_EVENT_ERR_CORRECTED, "L2 non-fatal error",
+ "", repeat_err, 0, 0, 0, -1, index, 0, 0, DRV_NAME);
+ write_l2merrsr_el1(0);
+}
+
+static void cortex_arm64_edac_check(struct edac_device_ctl_info *edac_ctl)
+{
+ int cpu;
+ struct cpumask cluster_mask, old_mask;
+
+ cpumask_clear(&cluster_mask);
+ cpumask_clear(&old_mask);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ /* Check CPU L1 error */
+ smp_call_function_single(cpu, parse_cpumerrsr, NULL, 0);
+ cpumask_copy(&cluster_mask, topology_core_cpumask(cpu));
+ if (cpumask_equal(&cluster_mask, &old_mask))
+ continue;
+ cpumask_copy(&old_mask, &cluster_mask);
+ /* Check CPU L2 error */
+ smp_call_function_any(&cluster_mask, parse_l2merrsr, NULL, 0);
+ }
+ put_online_cpus();
+}
+
+static ssize_t cortexa53_edac_inject_L2_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ return sprintf(data, "L2ACTLR_EL1: [0x%llx]\n\r", read_l2actrl_el1());
+}
+
+static ssize_t cortexa53_edac_inject_L2_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ u64 l2actrl, l2ecc;
+
+ if (!data)
+ return -EFAULT;
+
+ l2ecc = read_l2ctlr_el1();
+ if ((l2ecc & L2_ECC_PROTECTION)) {
+ l2actrl = read_l2actrl_el1();
+ l2actrl = l2actrl | L2_DCACHE_ERRINJ_ENABLE;
+ write_l2actrl_el1(l2actrl);
+ cortexa53_edac_busy_on_inst();
+ } else {
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "L2 ECC not enabled\n");
+ }
+
+ return count;
+}
+
+static ssize_t cortexa53_edac_inject_L1_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ return sprintf(data, "L1CTLR_EL1: [0x%llx]\n\r", read_l1actrl_el1());
+}
+
+static ssize_t cortexa53_edac_inject_L1_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ u64 l1actrl;
+
+ if (!data)
+ return -EFAULT;
+
+ l1actrl = read_l1actrl_el1();
+ l1actrl |= L1_DCACHE_ERRINJ_ENABLE;
+ write_l1actrl_el1(l1actrl);
+ cortexa53_edac_busy_on_inst();
+
+ return count;
+}
+
+static struct edac_dev_sysfs_attribute cortexa53_edac_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_L2_Cache_Error",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = cortexa53_edac_inject_L2_show,
+ .store = cortexa53_edac_inject_L2_store},
+ {
+ .attr = {
+ .name = "inject_L1_Cache_Error",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = cortexa53_edac_inject_L1_show,
+ .store = cortexa53_edac_inject_L1_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void cortexa53_set_edac_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = cortexa53_edac_sysfs_attributes;
+}
+
+static int cortex_arm64_edac_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct cortex_arm64_edac *drv;
+ struct device *dev = &pdev->dev;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ /* Only POLL mode is supported */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ drv->edac_ctl = edac_device_alloc_ctl_info(0, "cpu_cache", 1, "L", 2,
+ 0, NULL, 0,
+ edac_device_alloc_index());
+ if (IS_ERR(drv->edac_ctl))
+ return -ENOMEM;
+
+ drv->edac_ctl->poll_msec = poll_msec;
+ drv->edac_ctl->edac_check = cortex_arm64_edac_check;
+ drv->edac_ctl->dev = dev;
+ drv->edac_ctl->mod_name = dev_name(dev);
+ drv->edac_ctl->dev_name = dev_name(dev);
+ drv->edac_ctl->ctl_name = "cache_err";
+ platform_set_drvdata(pdev, drv);
+
+ cortexa53_set_edac_sysfs_attributes(drv->edac_ctl);
+
+ rc = edac_device_add_device(drv->edac_ctl);
+ if (rc)
+ edac_device_free_ctl_info(drv->edac_ctl);
+
+ return rc;
+}
+
+static int cortex_arm64_edac_remove(struct platform_device *pdev)
+{
+ struct cortex_arm64_edac *drv = dev_get_drvdata(&pdev->dev);
+ struct edac_device_ctl_info *edac_ctl = drv->edac_ctl;
+
+ edac_device_del_device(edac_ctl->dev);
+ edac_device_free_ctl_info(edac_ctl);
+
+ return 0;
+}
+
+static const struct of_device_id cortex_arm64_edac_of_match[] = {
+ { .compatible = "arm,cortex-a57-edac" },
+ { .compatible = "arm,cortex-a53-edac" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cortex_arm64_edac_of_match);
+
+static struct platform_driver cortex_arm64_edac_driver = {
+ .probe = cortex_arm64_edac_probe,
+ .remove = cortex_arm64_edac_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = cortex_arm64_edac_of_match,
+ },
+};
+module_platform_driver(cortex_arm64_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Brijesh Singh <brijeshkumar.singh@amd.com>");
+MODULE_DESCRIPTION("Cortex A57 and A53 EDAC driver");
+module_param(poll_msec, int, 0444);
+MODULE_PARM_DESC(poll_msec, "EDAC monitor poll interval in msec");
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 65cf2b9355c4..0ac6c49ecdbf 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -34,6 +34,9 @@
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
+/* Default workqueue processing interval on this instance, in msecs */
+#define DEFAULT_POLL_INTERVAL 1000
+
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
@@ -366,7 +369,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
* whole one second to save timers firing all over the period
* between integral seconds
*/
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -396,7 +399,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -424,17 +427,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
* Then restart the workq on the new delay
*/
void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
- unsigned long value)
+ unsigned long msec)
{
- unsigned long jiffs = msecs_to_jiffies(value);
-
- if (value == 1000)
- jiffs = round_jiffies_relative(value);
-
- edac_dev->poll_msec = value;
- edac_dev->delay = jiffs;
+ edac_dev->poll_msec = msec;
+ edac_dev->delay = msecs_to_jiffies(msec);
- edac_mod_work(&edac_dev->work, jiffs);
+ /* See comment in edac_device_workq_setup() above */
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ else
+ edac_mod_work(&edac_dev->work, edac_dev->delay);
}
int edac_device_alloc_index(void)
@@ -473,11 +475,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
- /*
- * enable workq processing on this instance,
- * default = 1000 msec
- */
- edac_device_workq_setup(edac_dev, 1000);
+ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 388427d378b1..2369a56a08cc 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -57,7 +57,7 @@ bool edac_stop_work(struct delayed_work *work);
bool edac_mod_work(struct delayed_work *work, unsigned long delay);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
- *edac_dev, unsigned long value);
+ *edac_dev, unsigned long msec);
extern void edac_mc_reset_delay_period(unsigned long value);
extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 61b76ec226af..19fba258ae10 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
drvdata = mci->pvt_info;
platform_set_drvdata(pdev, mci);
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
- return -ENOMEM;
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ res = -ENOMEM;
+ goto free;
+ }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -243,6 +245,7 @@ err2:
edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, NULL);
+free:
edac_mc_free(mci);
return res;
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 29576922df78..a887c3313431 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -53,11 +53,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
if (unlikely(pci_enable_device(pdev) < 0)) {
edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
bus, dev, fun);
+ pci_dev_put(pdev);
return NULL;
}
- pci_dev_get(pdev);
-
return pdev;
}
diff --git a/drivers/edac/pl310_edac_l2.c b/drivers/edac/pl310_edac_l2.c
new file mode 100644
index 000000000000..57f2f5b022d8
--- /dev/null
+++ b/drivers/edac/pl310_edac_l2.c
@@ -0,0 +1,233 @@
+/*
+ * Pl310 L2 Cache EDAC Driver
+ *
+ * Copyright (C) 2013-2014 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <asm/hardware/cache-l2x0.h>
+#include "edac_module.h"
+
+/* Auxilary control register definitions */
+#define L2X0_AUX_CTRL_PARITY_MASK BIT(21)
+
+/* Interrupt imask/status/clear register definitions */
+#define L2X0_INTR_PARRD_MASK 0x4
+#define L2X0_INTR_PARRT_MASK 0x2
+
+/**
+ * struct pl310_edac_l2_priv - Zynq L2 cache controller private instance data
+ * @base: Base address of the controller
+ * @irq: Interrupt number
+ */
+struct pl310_edac_l2_priv {
+ void __iomem *base;
+ int irq;
+};
+
+/**
+ * pl310_edac_l2_parityerr_check - Check controller staus for parity errors
+ * @dci: Pointer to the edac device controller instance
+ *
+ * This routine is used to check and post parity errors
+ */
+static void pl310_edac_l2_parityerr_check(struct edac_device_ctl_info *dci)
+{
+ struct pl310_edac_l2_priv *priv = dci->pvt_info;
+ u32 regval;
+
+ regval = readl(priv->base + L2X0_RAW_INTR_STAT);
+ if (regval & L2X0_INTR_PARRD_MASK) {
+ /* Data parity error will be reported as correctable error */
+ writel(L2X0_INTR_PARRD_MASK, priv->base + L2X0_INTR_CLEAR);
+ edac_device_handle_ce(dci, 0, 0, dci->ctl_name);
+ }
+ if (regval & L2X0_INTR_PARRT_MASK) {
+ /* tag parity error will be reported as uncorrectable error */
+ writel(L2X0_INTR_PARRT_MASK, priv->base + L2X0_INTR_CLEAR);
+ edac_device_handle_ue(dci, 0, 0, dci->ctl_name);
+ }
+}
+
+/**
+ * pl310_edac_l2_int_handler - ISR fucntion for l2cahe controller
+ * @irq: Irq Number
+ * @device: Pointer to the edac device controller instance
+ *
+ * This routine is triggered whenever there is parity error detected
+ *
+ * Return: Always returns IRQ_HANDLED
+ */
+static irqreturn_t pl310_edac_l2_int_handler(int irq, void *device)
+{
+ pl310_edac_l2_parityerr_check((struct edac_device_ctl_info *)device);
+ return IRQ_HANDLED;
+}
+
+/**
+ * pl310_edac_l2_poll_handler - Poll the status reg for parity errors
+ * @dci: Pointer to the edac device controller instance
+ *
+ * This routine is used to check and post parity errors and is called by
+ * the EDAC polling thread
+ */
+static void pl310_edac_l2_poll_handler(struct edac_device_ctl_info *dci)
+{
+ pl310_edac_l2_parityerr_check(dci);
+}
+
+/**
+ * pl310_edac_l2_get_paritystate - check the parity enable/disable status
+ * @base: Pointer to the contoller base address
+ *
+ * This routine returns the parity enable/diable status for the controller
+ *
+ * Return: true/false - parity enabled/disabled.
+ */
+static bool pl310_edac_l2_get_paritystate(void __iomem *base)
+{
+ u32 regval;
+
+ regval = readl(base + L2X0_AUX_CTRL);
+ if (regval & L2X0_AUX_CTRL_PARITY_MASK)
+ return true;
+
+ return false;
+}
+
+/**
+ * pl310_edac_l2_probe - Check controller and bind driver
+ * @pdev: Pointer to the platform_device struct
+ *
+ * This routine probes a specific arm,pl310-cache instance for binding
+ * with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int pl310_edac_l2_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct pl310_edac_l2_priv *priv;
+ int rc;
+ struct resource *res;
+ void __iomem *baseaddr;
+ u32 regval;
+
+ /* Get the data from the platform device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ /* Check for the ecc enable status */
+ if (pl310_edac_l2_get_paritystate(baseaddr) == false) {
+ dev_err(&pdev->dev, "parity check not enabled\n");
+ return -ENXIO;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*priv), "l2cache",
+ 1, "L", 1, 1, NULL, 0,
+ edac_device_alloc_index());
+ if (IS_ERR(dci))
+ return PTR_ERR(dci);
+
+ priv = dci->pvt_info;
+ priv->base = baseaddr;
+ dci->dev = &pdev->dev;
+ dci->mod_name = "pl310_edac_l2";
+ dci->ctl_name = "pl310_l2_controller";
+ dci->dev_name = dev_name(&pdev->dev);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ rc = devm_request_irq(&pdev->dev, priv->irq,
+ pl310_edac_l2_int_handler,
+ 0, dev_name(&pdev->dev), (void *)dci);
+ if (rc < 0) {
+ dci->edac_check = pl310_edac_l2_poll_handler;
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+
+ rc = edac_device_add_device(dci);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register with EDAC core\n");
+ goto del_edac_device;
+ }
+
+ if (edac_op_state != EDAC_OPSTATE_POLL) {
+ regval = readl(priv->base+L2X0_INTR_MASK);
+ regval |= (L2X0_INTR_PARRD_MASK | L2X0_INTR_PARRT_MASK);
+ writel(regval, priv->base+L2X0_INTR_MASK);
+ }
+
+ return rc;
+
+del_edac_device:
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return rc;
+}
+
+/**
+ * pl310_edac_l2_remove - Unbind driver from controller
+ * @pdev: Pointer to the platform_device struct
+ *
+ * This routine unbinds the EDAC device controller instance associated
+ * with the specified arm,pl310-cache controller described by the
+ * OpenFirmware device tree node passed as a parameter.
+ *
+ * Return: Always returns 0
+ */
+static int pl310_edac_l2_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct pl310_edac_l2_priv *priv = dci->pvt_info;
+ u32 regval;
+
+ if (edac_op_state != EDAC_OPSTATE_POLL) {
+ regval = readl(priv->base+L2X0_INTR_MASK);
+ regval &= ~(L2X0_INTR_PARRD_MASK | L2X0_INTR_PARRT_MASK);
+ writel(regval, priv->base+L2X0_INTR_MASK);
+ }
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+/* Device tree node type and compatible tuples this driver can match on */
+static const struct of_device_id pl310_edac_l2_match[] = {
+ { .compatible = "arm,pl310-cache", },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, pl310_edac_l2_match);
+
+static struct platform_driver pl310_edac_l2_driver = {
+ .driver = {
+ .name = "pl310-edac-l2",
+ .of_match_table = pl310_edac_l2_match,
+ },
+ .probe = pl310_edac_l2_probe,
+ .remove = pl310_edac_l2_remove,
+};
+
+module_platform_driver(pl310_edac_l2_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("pl310 L2 EDAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 97a27e42dd61..c45519f59dc1 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -252,7 +252,7 @@ clear:
static int
dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
{
- struct llcc_drv_data *drv = edev_ctl->pvt_info;
+ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
int ret;
ret = dump_syn_reg_values(drv, bank, err_type);
@@ -289,7 +289,7 @@ static irqreturn_t
llcc_ecc_irq_handler(int irq, void *edev_ctl)
{
struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
+ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
irqreturn_t irq_rc = IRQ_NONE;
u32 drp_error, trp_error, i;
int ret;
@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
edev_ctl->dev_name = dev_name(dev);
edev_ctl->ctl_name = "llcc";
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
- edev_ctl->pvt_info = llcc_driv_data;
rc = edac_device_add_device(edev_ctl);
if (rc)
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index b1d717cb8df9..f382cc70f9aa 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -459,7 +459,7 @@ rir_found:
}
static u8 skx_close_row[] = {
- 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34
};
static u8 skx_close_column[] = {
@@ -467,7 +467,7 @@ static u8 skx_close_column[] = {
};
static u8 skx_open_row[] = {
- 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34
};
static u8 skx_open_column[] = {
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 34be60fe6892..0fffb393415b 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE,
ocx_com_errors, ctx->reg_com_int);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
for (lane = 0; lane < OCX_RX_LANES; lane++)
if (ctx->reg_com_int & BIT(lane)) {
@@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
lane, ctx->reg_lane_int[lane],
lane, ctx->reg_lane_stat11[lane]);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
decode_register(other, OCX_OTHER_SIZE,
ocx_lane_errors,
ctx->reg_lane_int[lane]);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
}
if (ctx->reg_com_int & OCX_COM_INT_CE)
@@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
decode_register(other, OCX_OTHER_SIZE,
ocx_com_link_errors, ctx->reg_com_link_int);
- strncat(msg, other, OCX_MESSAGE_SIZE);
+ strlcat(msg, other, OCX_MESSAGE_SIZE);
if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
@@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
- strncat(msg, other, L2C_MESSAGE_SIZE);
+ strlcat(msg, other, L2C_MESSAGE_SIZE);
if (ctx->reg_int & mask_ue)
edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
diff --git a/drivers/edac/zynqmp_ocm_edac.c b/drivers/edac/zynqmp_ocm_edac.c
new file mode 100644
index 000000000000..4957a8c9d02d
--- /dev/null
+++ b/drivers/edac/zynqmp_ocm_edac.c
@@ -0,0 +1,651 @@
+/*
+ * Xilinx ZynqMP OCM ECC Driver
+ * This driver is based on mpc85xx_edac.c drivers
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details
+ */
+
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include "edac_module.h"
+
+#define ZYNQMP_OCM_EDAC_MSG_SIZE 256
+
+#define ZYNQMP_OCM_EDAC_STRING "zynqmp_ocm"
+#define ZYNQMP_OCM_EDAC_MOD_VER "1"
+
+/* Controller registers */
+#define CTRL_OFST 0x0
+#define OCM_ISR_OFST 0x04
+#define OCM_IMR_OFST 0x08
+#define OCM_IEN_OFST 0x0C
+#define OCM_IDS_OFST 0x10
+
+/* ECC control register */
+#define ECC_CTRL_OFST 0x14
+
+/* Correctable error info registers */
+#define CE_FFA_OFST 0x1C
+#define CE_FFD0_OFST 0x20
+#define CE_FFD1_OFST 0x24
+#define CE_FFD2_OFST 0x28
+#define CE_FFD3_OFST 0x2C
+#define CE_FFE_OFST 0x30
+
+/* Uncorrectable error info registers */
+#define UE_FFA_OFST 0x34
+#define UE_FFD0_OFST 0x38
+#define UE_FFD1_OFST 0x3C
+#define UE_FFD2_OFST 0x40
+#define UE_FFD3_OFST 0x44
+#define UE_FFE_OFST 0x48
+
+/* ECC control register bit field definitions */
+#define ECC_CTRL_CLR_CE_ERR 0x40
+#define ECC_CTRL_CLR_UE_ERR 0x80
+
+/* Fault injection data and count registers */
+#define OCM_FID0_OFST 0x4C
+#define OCM_FID1_OFST 0x50
+#define OCM_FID2_OFST 0x54
+#define OCM_FID3_OFST 0x58
+#define OCM_FIC_OFST 0x74
+
+/* Interrupt masks */
+#define OCM_CEINTR_MASK 0x40
+#define OCM_UEINTR_MASK 0x80
+#define OCM_ECC_ENABLE_MASK 0x1
+#define OCM_FICOUNT_MASK 0x0FFFFFFF
+#define OCM_BASEVAL 0xFFFC0000
+#define EDAC_DEVICE "ZynqMP-OCM"
+#define OCM_CEUE_MASK 0xC0
+
+/**
+ * struct ecc_error_info - ECC error log information
+ * @addr: Fault generated at this address
+ * @data0: Generated fault data
+ * @data1: Generated fault data
+ */
+struct ecc_error_info {
+ u32 addr;
+ u32 data0;
+ u32 data1;
+};
+
+/**
+ * struct zynqmp_ocm_ecc_status - ECC status information to report
+ * @ce_cnt: Correctable error count
+ * @ue_cnt: Uncorrectable error count
+ * @ceinfo: Correctable error log information
+ * @ueinfo: Uncorrectable error log information
+ */
+struct zynqmp_ocm_ecc_status {
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct ecc_error_info ceinfo;
+ struct ecc_error_info ueinfo;
+};
+
+/**
+ * struct zynqmp_ocm_edac_priv - DDR memory controller private instance data
+ * @baseaddr: Base address of the DDR controller
+ * @message: Buffer for framing the event specific info
+ * @stat: ECC status information
+ * @p_data: Pointer to platform data
+ * @ce_cnt: Correctable Error count
+ * @ue_cnt: Uncorrectable Error count
+ * @ce_bitpos: Bit position for Correctable Error
+ * @ue_bitpos0: First bit position for Uncorrectable Error
+ * @ue_bitpos1: Second bit position for Uncorrectable Error
+ */
+struct zynqmp_ocm_edac_priv {
+ void __iomem *baseaddr;
+ char message[ZYNQMP_OCM_EDAC_MSG_SIZE];
+ struct zynqmp_ocm_ecc_status stat;
+ const struct zynqmp_ocm_platform_data *p_data;
+ u32 ce_cnt;
+ u32 ue_cnt;
+ u8 ce_bitpos;
+ u8 ue_bitpos0;
+ u8 ue_bitpos1;
+};
+
+/**
+ * zynqmp_ocm_edac_geterror_info - Get the current ecc error info
+ * @base: Pointer to the base address of the ddr memory controller
+ * @p: Pointer to the ocm ecc status structure
+ * @mask: Status register mask value
+ *
+ * Determines there is any ecc error or not
+ *
+ */
+static void zynqmp_ocm_edac_geterror_info(void __iomem *base,
+ struct zynqmp_ocm_ecc_status *p, int mask)
+{
+ if (mask & OCM_CEINTR_MASK) {
+ p->ce_cnt++;
+ p->ceinfo.data0 = readl(base + CE_FFD0_OFST);
+ p->ceinfo.data1 = readl(base + CE_FFD1_OFST);
+ p->ceinfo.addr = (OCM_BASEVAL | readl(base + CE_FFA_OFST));
+ writel(ECC_CTRL_CLR_CE_ERR, base + OCM_ISR_OFST);
+ } else if (mask & OCM_UEINTR_MASK) {
+ p->ue_cnt++;
+ p->ueinfo.data0 = readl(base + UE_FFD0_OFST);
+ p->ueinfo.data1 = readl(base + UE_FFD1_OFST);
+ p->ueinfo.addr = (OCM_BASEVAL | readl(base + UE_FFA_OFST));
+ writel(ECC_CTRL_CLR_UE_ERR, base + OCM_ISR_OFST);
+ }
+}
+
+/**
+ * zynqmp_ocm_edac_handle_error - Handle controller error types CE and UE
+ * @dci: Pointer to the edac device controller instance
+ * @p: Pointer to the ocm ecc status structure
+ *
+ * Handles the controller ECC correctable and un correctable error.
+ */
+static void zynqmp_ocm_edac_handle_error(struct edac_device_ctl_info *dci,
+ struct zynqmp_ocm_ecc_status *p)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ struct ecc_error_info *pinf;
+
+ if (p->ce_cnt) {
+ pinf = &p->ceinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\n\rOCM ECC error type :%s\n\r"
+ "Addr: [0x%X]\n\rFault Data[31:0]: [0x%X]\n\r"
+ "Fault Data[63:32]: [0x%X]",
+ "CE", pinf->addr, pinf->data0, pinf->data1);
+ edac_device_handle_ce(dci, 0, 0, priv->message);
+ }
+
+ if (p->ue_cnt) {
+ pinf = &p->ueinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\n\rOCM ECC error type :%s\n\r"
+ "Addr: [0x%X]\n\rFault Data[31:0]: [0x%X]\n\r"
+ "Fault Data[63:32]: [0x%X]",
+ "UE", pinf->addr, pinf->data0, pinf->data1);
+ edac_device_handle_ue(dci, 0, 0, priv->message);
+ }
+
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * zynqmp_ocm_edac_intr_handler - isr routine
+ * @irq: irq number
+ * @dev_id: device id poniter
+ *
+ * This is the Isr routine called by edac core interrupt thread.
+ * Used to check and post ECC errors.
+ *
+ * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise
+ */
+static irqreturn_t zynqmp_ocm_edac_intr_handler(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *dci = dev_id;
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ int regval;
+
+ regval = readl(priv->baseaddr + OCM_ISR_OFST);
+ if (!(regval & OCM_CEUE_MASK))
+ return IRQ_NONE;
+
+ zynqmp_ocm_edac_geterror_info(priv->baseaddr,
+ &priv->stat, regval);
+
+ priv->ce_cnt += priv->stat.ce_cnt;
+ priv->ue_cnt += priv->stat.ue_cnt;
+ zynqmp_ocm_edac_handle_error(dci, &priv->stat);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * zynqmp_ocm_edac_get_eccstate - Return the controller ecc status
+ * @base: Pointer to the ddr memory controller base address
+ *
+ * Get the ECC enable/disable status for the controller
+ *
+ * Return: ecc status 0/1.
+ */
+static bool zynqmp_ocm_edac_get_eccstate(void __iomem *base)
+{
+ return readl(base + ECC_CTRL_OFST) & OCM_ECC_ENABLE_MASK;
+}
+
+static const struct of_device_id zynqmp_ocm_edac_match[] = {
+ { .compatible = "xlnx,zynqmp-ocmc-1.0"},
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_ocm_edac_match);
+
+/**
+ * zynqmp_ocm_edac_inject_fault_count_show - Shows fault injection count
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the fault injection count, once the counter reaches
+ * zero, it injects errors
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_fault_count_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ return sprintf(data, "FIC: 0x%x\n\r",
+ readl(priv->baseaddr + OCM_FIC_OFST));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_fault_count_store - write fi count
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Update the fault injection count register, once the counter reaches
+ * zero, it injects errors
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_fault_count_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ u32 ficount;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtouint(data, 0, &ficount))
+ return -EINVAL;
+
+ ficount &= OCM_FICOUNT_MASK;
+ writel(ficount, priv->baseaddr + OCM_FIC_OFST);
+
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_cebitpos_show - Shows CE bit position
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the Correctable error bit position,
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_cebitpos_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ce_bitpos <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_cebitpos_store - Set CE bit postion
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set any one bit to inject CE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_cebitpos_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ce_bitpos))
+ return -EINVAL;
+
+ if (priv->ce_bitpos <= 31) {
+ writel(1 << priv->ce_bitpos, priv->baseaddr + OCM_FID0_OFST);
+ writel(0, priv->baseaddr + OCM_FID1_OFST);
+ } else if (priv->ce_bitpos >= 32 && priv->ce_bitpos <= 63) {
+ writel(1 << (priv->ce_bitpos - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ writel(0, priv->baseaddr + OCM_FID0_OFST);
+ } else {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit number > 64 is not valid\n");
+ }
+
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos0_show - Shows UE bit postion0
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the one of bit position for UE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos0_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ue_bitpos0 <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos0_store - set UE bit position0
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set the first bit postion for UE Error generation,we need to configure
+ * any two bitpositions to inject UE Error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos0_store(
+ struct edac_device_ctl_info *dci,
+ const char *data, size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ue_bitpos0))
+ return -EINVAL;
+
+ if (priv->ue_bitpos0 <= 31)
+ writel(1 << priv->ue_bitpos0, priv->baseaddr + OCM_FID0_OFST);
+ else if (priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63)
+ writel(1 << (priv->ue_bitpos0 - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ else
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit position > 64 is not valid\n");
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "Set another bit position for UE\n");
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos1_show - Shows UE bit postion1
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the second bit postion configured for UE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos1_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ue_bitpos1 <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitposition1_store - Set UE second bit postion
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set the second bit postion for UE Error generation,we need to configure
+ * any two bitpositions to inject UE Error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos1_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ u32 mask;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ue_bitpos1))
+ return -EINVAL;
+
+ if (priv->ue_bitpos0 == priv->ue_bitpos1) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit positions should not be equal\n");
+ return -EINVAL;
+ }
+
+ /* If both bit postions are referring to 32 bit data, then configure
+ * only FID0 register or if it is 64 bit data, then configure only
+ * FID1 register.
+ */
+ if (priv->ue_bitpos0 <= 31 &&
+ priv->ue_bitpos1 <= 31) {
+ mask = (1 << priv->ue_bitpos0);
+ mask |= (1 << priv->ue_bitpos1);
+ writel(mask, priv->baseaddr + OCM_FID0_OFST);
+ writel(0, priv->baseaddr + OCM_FID1_OFST);
+ } else if ((priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63) &&
+ (priv->ue_bitpos1 >= 32 && priv->ue_bitpos1 <= 63)) {
+ mask = (1 << (priv->ue_bitpos0 - 32));
+ mask |= (1 << (priv->ue_bitpos1 - 32));
+ writel(mask, priv->baseaddr + OCM_FID1_OFST);
+ writel(0, priv->baseaddr + OCM_FID0_OFST);
+ }
+
+ /* If one bit position is referring a bit in 32 bit data and other in
+ * 64 bit data, just configure FID0/FID1 based on uebitpos1.
+ */
+ if ((priv->ue_bitpos0 <= 31) &&
+ (priv->ue_bitpos1 >= 32 && priv->ue_bitpos1 <= 63)) {
+ writel(1 << (priv->ue_bitpos1 - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ } else if ((priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63) &&
+ (priv->ue_bitpos1 <= 31)) {
+ writel(1 << priv->ue_bitpos1,
+ priv->baseaddr + OCM_FID0_OFST);
+ } else {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit position > 64 is not valid, Valid bits:[63:0]\n");
+ }
+
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "UE at Bit Position0: %d Bit Position1: %d\n",
+ priv->ue_bitpos0, priv->ue_bitpos1);
+ return count;
+}
+
+static struct edac_dev_sysfs_attribute zynqmp_ocm_edac_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_cebitpos",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_cebitpos_show,
+ .store = zynqmp_ocm_edac_inject_cebitpos_store},
+ {
+ .attr = {
+ .name = "inject_uebitpos0",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_uebitpos0_show,
+ .store = zynqmp_ocm_edac_inject_uebitpos0_store},
+ {
+ .attr = {
+ .name = "inject_uebitpos1",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_uebitpos1_show,
+ .store = zynqmp_ocm_edac_inject_uebitpos1_store},
+ {
+ .attr = {
+ .name = "inject_fault_count",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_fault_count_show,
+ .store = zynqmp_ocm_edac_inject_fault_count_store},
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+/**
+ * zynqmp_set_ocm_edac_sysfs_attributes - create sysfs attributes
+ * @edac_dev: Pointer to the edac device struct
+ *
+ * Creates sysfs entires for error injection
+ * Return: None.
+ */
+static void zynqmp_set_ocm_edac_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = zynqmp_ocm_edac_sysfs_attributes;
+}
+
+/**
+ * zynqmp_ocm_edac_probe - Check controller and bind driver
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Probes a specific controller instance for binding with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int zynqmp_ocm_edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct zynqmp_ocm_edac_priv *priv;
+ int irq, status;
+ struct resource *res;
+ void __iomem *baseaddr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ if (!zynqmp_ocm_edac_get_eccstate(baseaddr)) {
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "ECC not enabled - Disabling EDAC driver\n");
+ return -ENXIO;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
+ 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!dci) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to allocate EDAC device\n");
+ return -ENOMEM;
+ }
+
+ priv = dci->pvt_info;
+ platform_set_drvdata(pdev, dci);
+ dci->dev = &pdev->dev;
+ priv->baseaddr = baseaddr;
+ dci->mod_name = pdev->dev.driver->name;
+ dci->ctl_name = ZYNQMP_OCM_EDAC_STRING;
+ dci->dev_name = dev_name(&pdev->dev);
+
+ zynqmp_set_ocm_edac_sysfs_attributes(dci);
+ if (edac_device_add_device(dci))
+ goto free_dev_ctl;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "No irq %d in DT\n", irq);
+ return irq;
+ }
+
+ status = devm_request_irq(&pdev->dev, irq,
+ zynqmp_ocm_edac_intr_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (status < 0) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Failed to request Irq\n");
+ goto free_edac_dev;
+ }
+
+ writel(OCM_CEUE_MASK, priv->baseaddr + OCM_IEN_OFST);
+
+ return 0;
+
+free_edac_dev:
+ edac_device_del_device(&pdev->dev);
+free_dev_ctl:
+ edac_device_free_ctl_info(dci);
+
+ return -1;
+}
+
+/**
+ * zynqmp_ocm_edac_remove - Unbind driver from controller
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Return: Unconditionally 0
+ */
+static int zynqmp_ocm_edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ writel(OCM_CEUE_MASK, priv->baseaddr + OCM_IDS_OFST);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_ocm_edac_driver = {
+ .driver = {
+ .name = "zynqmp-ocm-edac",
+ .of_match_table = zynqmp_ocm_edac_match,
+ },
+ .probe = zynqmp_ocm_edac_probe,
+ .remove = zynqmp_ocm_edac_remove,
+};
+
+module_platform_driver(zynqmp_ocm_edac_driver);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("ZynqMP OCM ECC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 6b905c3d30f4..12f9ae2aac11 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -196,6 +196,14 @@ static const struct __extcon_info {
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
* @attrs: the array pointing to attr_name and attr_state for attr_g
+ * @usb_propval: the array of USB connector properties
+ * @chg_propval: the array of charger connector properties
+ * @jack_propval: the array of jack connector properties
+ * @disp_propval: the array of display connector properties
+ * @usb_bits: the bit array of the USB connector property capabilities
+ * @chg_bits: the bit array of the charger connector property capabilities
+ * @jack_bits: the bit array of the jack connector property capabilities
+ * @disp_bits: the bit array of the display connector property capabilities
*/
struct extcon_cable {
struct extcon_dev *edev;
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index f3b3953cac83..d446a7262941 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
*/
card->bm_generation = generation;
- if (root_device == NULL) {
+ if (card->gap_count == 0) {
+ /*
+ * If self IDs have inconsistent gap counts, do a
+ * bus reset ASAP. The config rom read might never
+ * complete, so don't wait for it. However, still
+ * send a PHY configuration packet prior to the
+ * bus reset. The PHY configuration packet might
+ * fail, but 1394-2008 8.4.5.2 explicitly permits
+ * it in this case, so it should be safe to try.
+ */
+ new_root_id = local_id;
+ /*
+ * We must always send a bus reset if the gap count
+ * is inconsistent, so bypass the 5-reset limit.
+ */
+ card->bm_retries = 0;
+ } else if (root_device == NULL) {
/*
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
@@ -484,7 +500,19 @@ static void bm_work(struct work_struct *work)
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
- reset_bus(card, true);
+ /*
+ * Where possible, use a short bus reset to minimize
+ * disruption to isochronous transfers. But in the event
+ * of a gap count inconsistency, use a long bus reset.
+ *
+ * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
+ * may set different gap counts after a bus reset. On a mixed
+ * 1394/1394a bus, a short bus reset can get doubled. Some
+ * nodes may treat the double reset as one bus reset and others
+ * may treat it as two, causing a gap count inconsistency
+ * again. Using a long bus reset prevents this.
+ */
+ reset_bus(card, card->gap_count != 0);
/* Will allocate broadcast channel after the reset. */
goto out;
}
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 3a43e5d6ed3b..4fe36d549d61 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -818,8 +818,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
r = container_of(resource, struct inbound_transaction_resource,
resource);
- if (is_fcp_request(r->request))
+ if (is_fcp_request(r->request)) {
+ kfree(r->data);
goto out;
+ }
if (a->length != fw_get_response_length(r->request)) {
ret = -EINVAL;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index b785e936244f..82aacff0c4f9 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -100,10 +100,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
* @buf: where to put the string
* @size: size of @buf, in bytes
*
- * The string is taken from a minimal ASCII text descriptor leaf after
- * the immediate entry with @key. The string is zero-terminated.
- * An overlong string is silently truncated such that it and the
- * zero byte fit into @size.
+ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
+ * @key. The string is zero-terminated. An overlong string is silently truncated such that it
+ * and the zero byte fit into @size.
*
* Returns strlen(buf) or a negative error code.
*/
@@ -719,14 +718,11 @@ static void create_units(struct fw_device *device)
fw_unit_attributes,
&unit->attribute_group);
- if (device_register(&unit->device) < 0)
- goto skip_unit;
-
fw_device_get(device);
- continue;
-
- skip_unit:
- kfree(unit);
+ if (device_register(&unit->device) < 0) {
+ put_device(&unit->device);
+ continue;
+ }
}
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 522f3addb5bd..6603f13f5de9 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40
+// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
+// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
+// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
+// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
+// while it is probable due to detection of any type of PCIe error.
+#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
+
+#if IS_ENABLED(CONFIG_X86)
+
+static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
+{
+ return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
+}
+
+#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
+
+static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
+{
+ const struct pci_dev *pcie_to_pci_bridge;
+
+ // Detect any type of AMD Ryzen machine.
+ if (!static_cpu_has(X86_FEATURE_ZEN))
+ return false;
+
+ // Detect VIA VT6306/6307/6308.
+ if (pdev->vendor != PCI_VENDOR_ID_VIA)
+ return false;
+ if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
+ return false;
+
+ // Detect Asmedia ASM1083/1085.
+ pcie_to_pci_bridge = pdev->bus->self;
+ if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
+ return false;
+ if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
+ return false;
+
+ return true;
+}
+
+#else
+#define has_reboot_by_cycle_timer_read_quirk(ohci) false
+#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
+#endif
+
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, revision, flags;
@@ -1099,7 +1144,7 @@ static void context_tasklet(unsigned long data)
static int context_add_buffer(struct context *ctx)
{
struct descriptor_buffer *desc;
- dma_addr_t uninitialized_var(bus_addr);
+ dma_addr_t bus_addr;
int offset;
/*
@@ -1289,7 +1334,7 @@ static int at_context_queue_packet(struct context *ctx,
struct fw_packet *packet)
{
struct fw_ohci *ohci = ctx->ohci;
- dma_addr_t d_bus, uninitialized_var(payload_bus);
+ dma_addr_t d_bus, payload_bus;
struct driver_data *driver_data;
struct descriptor *d, *last;
__le32 *header;
@@ -1717,6 +1762,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci)
s32 diff01, diff12;
int i;
+ if (has_reboot_by_cycle_timer_read_quirk(ohci))
+ return 0;
+
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if (ohci->quirks & QUIRK_CYCLE_TIMER) {
@@ -2445,7 +2493,7 @@ static int ohci_set_config_rom(struct fw_card *card,
{
struct fw_ohci *ohci;
__be32 *next_config_rom;
- dma_addr_t uninitialized_var(next_config_rom_bus);
+ dma_addr_t next_config_rom_bus;
ohci = fw_ohci(card);
@@ -2933,10 +2981,10 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
int type, int channel, size_t header_size)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct iso_context *uninitialized_var(ctx);
- descriptor_callback_t uninitialized_var(callback);
- u64 *uninitialized_var(channels);
- u32 *uninitialized_var(mask), uninitialized_var(regs);
+ struct iso_context *ctx;
+ descriptor_callback_t callback;
+ u64 *channels;
+ u32 *mask, regs;
int index, ret = -EBUSY;
spin_lock_irq(&ohci->lock);
@@ -3619,6 +3667,9 @@ static int pci_probe(struct pci_dev *dev,
if (param_quirks)
ohci->quirks = param_quirks;
+ if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
+ ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
+
/*
* Because dma_alloc_coherent() allocates at least one page,
* we save space by using a common buffer for the AR request/
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index f0b055bc027c..9dae841b6167 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -737,6 +737,39 @@ static int scmi_mailbox_check(struct device_node *np, int idx)
idx, NULL);
}
+static int scmi_mailbox_chan_validate(struct device *cdev)
+{
+ int num_mb, num_sh, ret = 0;
+ struct device_node *np = cdev->of_node;
+
+ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+ /* Bail out if mboxes and shmem descriptors are inconsistent */
+ if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
+ dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
+ of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (num_sh > 1) {
+ struct device_node *np_tx, *np_rx;
+
+ np_tx = of_parse_phandle(np, "shmem", 0);
+ np_rx = of_parse_phandle(np, "shmem", 1);
+ /* SCMI Tx and Rx shared mem areas have to be distinct */
+ if (!np_tx || !np_rx || np_tx == np_rx) {
+ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ }
+
+ of_node_put(np_tx);
+ of_node_put(np_rx);
+ }
+
+ return ret;
+}
+
static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
int prot_id, bool tx)
{
@@ -760,6 +793,10 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
goto idr_alloc;
}
+ ret = scmi_mailbox_chan_validate(dev);
+ if (ret)
+ return ret;
+
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 177874adccf0..b0c8962b9885 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -106,9 +106,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
+ dev_set_drvdata(dev, scmi_pd_data);
+
return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+ int i;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_genpd_del_provider(np);
+
+ scmi_pd_data = dev_get_drvdata(dev);
+ for (i = 0; i < scmi_pd_data->num_domains; i++) {
+ if (!scmi_pd_data->domains[i])
+ continue;
+ pm_genpd_remove(scmi_pd_data->domains[i]);
+ }
+}
+
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWER },
{ },
@@ -118,6 +137,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_power_domain_driver = {
.name = "scmi-power-domain",
.probe = scmi_pm_domain_probe,
+ .remove = scmi_pm_domain_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_power_domain_driver);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index e2995ec14401..72634e9d8116 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
info->firmware_version = le32_to_cpu(caps.platform_version);
}
/* Ignore error if not implemented */
- if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+ if (info->is_legacy && ret == -EOPNOTSUPP)
return 0;
return ret;
@@ -905,13 +905,14 @@ static int scpi_probe(struct platform_device *pdev)
struct resource res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ struct scpi_drvinfo *scpi_drvinfo;
- scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL);
- if (!scpi_info)
+ scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
+ if (!scpi_drvinfo)
return -ENOMEM;
if (of_match_device(legacy_scpi_of_match, &pdev->dev))
- scpi_info->is_legacy = true;
+ scpi_drvinfo->is_legacy = true;
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
@@ -919,19 +920,19 @@ static int scpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
- GFP_KERNEL);
- if (!scpi_info->channels)
+ scpi_drvinfo->channels =
+ devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
+ if (!scpi_drvinfo->channels)
return -ENOMEM;
- ret = devm_add_action(dev, scpi_free_channels, scpi_info);
+ ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
if (ret)
return ret;
- for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
+ for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
resource_size_t size;
- int idx = scpi_info->num_chans;
- struct scpi_chan *pchan = scpi_info->channels + idx;
+ int idx = scpi_drvinfo->num_chans;
+ struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
@@ -975,45 +976,53 @@ static int scpi_probe(struct platform_device *pdev)
return ret;
}
- scpi_info->commands = scpi_std_commands;
+ scpi_drvinfo->commands = scpi_std_commands;
- platform_set_drvdata(pdev, scpi_info);
+ platform_set_drvdata(pdev, scpi_drvinfo);
- if (scpi_info->is_legacy) {
+ if (scpi_drvinfo->is_legacy) {
/* Replace with legacy variants */
scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
- scpi_info->commands = scpi_legacy_commands;
+ scpi_drvinfo->commands = scpi_legacy_commands;
/* Fill priority bitmap */
for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
set_bit(legacy_hpriority_cmds[idx],
- scpi_info->cmd_priority);
+ scpi_drvinfo->cmd_priority);
}
- ret = scpi_init_versions(scpi_info);
+ scpi_info = scpi_drvinfo;
+
+ ret = scpi_init_versions(scpi_drvinfo);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
+ scpi_info = NULL;
return ret;
}
- if (scpi_info->is_legacy && !scpi_info->protocol_version &&
- !scpi_info->firmware_version)
+ if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
+ !scpi_drvinfo->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
- scpi_info->firmware_version));
- scpi_info->scpi_ops = &scpi_ops;
+ scpi_drvinfo->firmware_version));
+
+ scpi_drvinfo->scpi_ops = &scpi_ops;
- return devm_of_platform_populate(dev);
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ scpi_info = NULL;
+
+ return ret;
}
static const struct of_device_id scpi_of_match[] = {
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index e497785cd99f..b0e8752174c6 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -44,6 +44,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
/* entry point from firmware to arch asm code */
static unsigned long sdei_entry_point;
+static int sdei_hp_state;
+
struct sdei_event {
/* These three are protected by the sdei_list_lock */
struct list_head list;
@@ -305,8 +307,6 @@ int sdei_mask_local_cpu(void)
{
int err;
- WARN_ON_ONCE(preemptible());
-
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to mask CPU[%u]: %d\n",
@@ -319,6 +319,7 @@ int sdei_mask_local_cpu(void)
static void _ipi_mask_cpu(void *ignored)
{
+ WARN_ON_ONCE(preemptible());
sdei_mask_local_cpu();
}
@@ -326,8 +327,6 @@ int sdei_unmask_local_cpu(void)
{
int err;
- WARN_ON_ONCE(preemptible());
-
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to unmask CPU[%u]: %d\n",
@@ -340,6 +339,7 @@ int sdei_unmask_local_cpu(void)
static void _ipi_unmask_cpu(void *ignored)
{
+ WARN_ON_ONCE(preemptible());
sdei_unmask_local_cpu();
}
@@ -347,6 +347,8 @@ static void _ipi_private_reset(void *ignored)
{
int err;
+ WARN_ON_ONCE(preemptible());
+
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
NULL);
if (err && err != -EIO)
@@ -393,8 +395,6 @@ static void _local_event_enable(void *data)
int err;
struct sdei_crosscall_args *arg = data;
- WARN_ON_ONCE(preemptible());
-
err = sdei_api_event_enable(arg->event->event_num);
sdei_cross_call_return(arg, err);
@@ -485,8 +485,6 @@ static void _local_event_unregister(void *data)
int err;
struct sdei_crosscall_args *arg = data;
- WARN_ON_ONCE(preemptible());
-
err = sdei_api_event_unregister(arg->event->event_num);
sdei_cross_call_return(arg, err);
@@ -575,8 +573,6 @@ static void _local_event_register(void *data)
struct sdei_registered_event *reg;
struct sdei_crosscall_args *arg = data;
- WARN_ON(preemptible());
-
reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
reg, 0, 0);
@@ -756,6 +752,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
{
int rv;
+ WARN_ON_ONCE(preemptible());
+
switch (action) {
case CPU_PM_ENTER:
rv = sdei_mask_local_cpu();
@@ -804,7 +802,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
- cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+ cpuhp_remove_state(sdei_entry_point);
err = sdei_unregister_shared();
if (err)
@@ -825,12 +823,15 @@ static int sdei_device_thaw(struct device *dev)
return err;
}
- err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
- if (err)
+ if (err < 0) {
pr_warn("Failed to re-register CPU hotplug notifier...\n");
+ return err;
+ }
- return err;
+ sdei_hp_state = err;
+ return 0;
}
static int sdei_device_restore(struct device *dev)
@@ -862,7 +863,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
* We are going to reset the interface, after this there is no point
* doing work when we take CPUs offline.
*/
- cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+ cpuhp_remove_state(sdei_hp_state);
sdei_platform_reset();
@@ -1044,13 +1045,15 @@ static int sdei_probe(struct platform_device *pdev)
goto remove_cpupm;
}
- err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
- if (err) {
+ if (err < 0) {
pr_warn("Failed to register CPU hotplug notifier...\n");
goto remove_reboot;
}
+ sdei_hp_state = err;
+
return 0;
remove_reboot:
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index b1395133389e..78c02717c368 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -242,29 +242,6 @@ failed:
}
/**
- * efi_capsule_flush - called by file close or file flush
- * @file: file pointer
- * @id: not used
- *
- * If a capsule is being partially uploaded then calling this function
- * will be treated as upload termination and will free those completed
- * buffer pages and -ECANCELED will be returned.
- **/
-static int efi_capsule_flush(struct file *file, fl_owner_t id)
-{
- int ret = 0;
- struct capsule_info *cap_info = file->private_data;
-
- if (cap_info->index > 0) {
- pr_err("capsule upload not complete\n");
- efi_free_all_buff_pages(cap_info);
- ret = -ECANCELED;
- }
-
- return ret;
-}
-
-/**
* efi_capsule_release - called by file close
* @inode: not used
* @file: file pointer
@@ -276,6 +253,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
{
struct capsule_info *cap_info = file->private_data;
+ if (cap_info->index > 0 &&
+ (cap_info->header.headersize == 0 ||
+ cap_info->count < cap_info->total_size)) {
+ pr_err("capsule upload not complete\n");
+ efi_free_all_buff_pages(cap_info);
+ }
+
kfree(cap_info->pages);
kfree(cap_info->phys);
kfree(file->private_data);
@@ -307,7 +291,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
if (!cap_info->phys) {
kfree(cap_info->pages);
kfree(cap_info);
@@ -323,7 +307,6 @@ static const struct file_operations efi_capsule_fops = {
.owner = THIS_MODULE,
.open = efi_capsule_open,
.write = efi_capsule_write,
- .flush = efi_capsule_flush,
.release = efi_capsule_release,
.llseek = no_llseek,
};
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8fd74a7501d4..ed31b08855f9 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -345,8 +345,8 @@ static int __init efisubsys_init(void)
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
- destroy_workqueue(efi_rts_wq);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_destroy_wq;
}
error = generic_ops_register();
@@ -382,7 +382,10 @@ err_unregister:
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);
- destroy_workqueue(efi_rts_wq);
+err_destroy_wq:
+ if (efi_rts_wq)
+ destroy_workqueue(efi_rts_wq);
+
return error;
}
@@ -546,7 +549,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
seed = early_memremap(efi.rng_seed, sizeof(*seed));
if (seed != NULL) {
- size = READ_ONCE(seed->size);
+ size = min(seed->size, EFI_RANDOM_SEED_SIZE);
early_memunmap(seed, sizeof(*seed));
} else {
pr_err("Could not map UEFI random seed!\n");
@@ -1019,6 +1022,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; ) {
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ if (!rsv)
+ return -ENOMEM;
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0bf0190917e0..9d9a3b698351 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -291,14 +291,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
goto fail;
}
- /*
- * Now that we have done our final memory allocation (and free)
- * we can get the memory map key needed for exit_boot_services().
- */
- status = efi_get_memory_map(sys_table, &map);
- if (status != EFI_SUCCESS)
- goto fail_free_new_fdt;
-
status = update_fdt(sys_table, (void *)fdt_addr, fdt_size,
(void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
initrd_addr, initrd_size);
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index edba5e7a3743..c8a1ef872e0d 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -19,7 +19,7 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
#define get_efi_var(name, vendor, ...) \
efi_call_runtime(get_variable, \
@@ -58,8 +58,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
/*
* See if a user has put the shim into insecure mode. If so, and if the
- * variable doesn't have the runtime attribute set, we might as well
- * honor that.
+ * variable doesn't have the non-volatile attribute set, we might as
+ * well honor that.
*/
size = sizeof(moksbstate);
status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -68,7 +68,7 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
/* If it fails, we don't care why. Default to secure */
if (status != EFI_SUCCESS)
goto secure_boot_enabled;
- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 5d343dc8e535..5f577357c3e7 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -32,7 +32,7 @@ int __init efi_memattr_init(void)
return -ENOMEM;
}
- if (tbl->version > 1) {
+ if (tbl->version > 2) {
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
tbl->version);
goto unmap;
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 136b9c7f9ac9..a6735f1a16d1 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work;
\
if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \
pr_warn_once("EFI Runtime Services are disabled!\n"); \
+ efi_rts_work.status = EFI_DEVICE_ERROR; \
goto exit; \
} \
\
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 8d132e4f008a..568074148f62 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -152,20 +152,22 @@ static int coreboot_table_probe(struct platform_device *pdev)
if (!ptr)
return -ENOMEM;
- ret = bus_register(&coreboot_bus_type);
- if (!ret) {
- ret = coreboot_table_populate(dev, ptr);
- if (ret)
- bus_unregister(&coreboot_bus_type);
- }
+ ret = coreboot_table_populate(dev, ptr);
+
memunmap(ptr);
return ret;
}
+static int __cb_dev_unregister(struct device *dev, void *dummy)
+{
+ device_unregister(dev);
+ return 0;
+}
+
static int coreboot_table_remove(struct platform_device *pdev)
{
- bus_unregister(&coreboot_bus_type);
+ bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
return 0;
}
@@ -195,6 +197,32 @@ static struct platform_driver coreboot_table_driver = {
.of_match_table = of_match_ptr(coreboot_of_match),
},
};
-module_platform_driver(coreboot_table_driver);
+
+static int __init coreboot_table_driver_init(void)
+{
+ int ret;
+
+ ret = bus_register(&coreboot_bus_type);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&coreboot_table_driver);
+ if (ret) {
+ bus_unregister(&coreboot_bus_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit coreboot_table_driver_exit(void)
+{
+ platform_driver_unregister(&coreboot_table_driver);
+ bus_unregister(&coreboot_bus_type);
+}
+
+module_init(coreboot_table_driver_init);
+module_exit(coreboot_table_driver_exit);
+
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index 916f26adc595..922c079d13c8 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
fb->green_mask_pos == formats[i].green.offset &&
fb->green_mask_size == formats[i].green.length &&
fb->blue_mask_pos == formats[i].blue.offset &&
- fb->blue_mask_size == formats[i].blue.length &&
- fb->reserved_mask_pos == formats[i].transp.offset &&
- fb->reserved_mask_size == formats[i].transp.length)
+ fb->blue_mask_size == formats[i].blue.length)
pdata.format = formats[i].name;
}
if (!pdata.format)
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index edaa4e5d84ad..94753ad18ca6 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -359,9 +359,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
memcpy(data, gsmi_dev.data_buf->start, *data_size);
/* All variables are have the following attributes */
- *attr = EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS;
+ if (attr)
+ *attr = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
}
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
@@ -679,6 +680,15 @@ static struct notifier_block gsmi_die_notifier = {
static int gsmi_panic_callback(struct notifier_block *nb,
unsigned long reason, void *arg)
{
+
+ /*
+ * Panic callbacks are executed with all other CPUs stopped,
+ * so we must not attempt to spin waiting for gsmi_dev.lock
+ * to be released.
+ */
+ if (spin_is_locked(&gsmi_dev.lock))
+ return NOTIFY_DONE;
+
gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
return NOTIFY_DONE;
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index b9fdc20b4eb9..eda25d506059 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -585,8 +585,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
static void qcom_scm_shutdown(struct platform_device *pdev)
{
/* Clean shutdown, disable download mode to allow normal restart */
- if (download_mode)
- qcom_scm_set_download_mode(false);
+ qcom_scm_set_download_mode(false);
}
static const struct of_device_id qcom_scm_dt_match[] = {
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index da26a584dca0..1a690b2c1e2f 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -7,6 +7,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/kref.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -27,6 +28,8 @@ struct rpi_firmware {
struct mbox_chan *chan; /* The property channel. */
struct completion c;
u32 enabled;
+
+ struct kref consumers;
};
static DEFINE_MUTEX(transaction_lock);
@@ -214,12 +217,38 @@ static void rpi_register_clk_driver(struct device *dev)
-1, NULL, 0);
}
+static void rpi_firmware_delete(struct kref *kref)
+{
+ struct rpi_firmware *fw = container_of(kref, struct rpi_firmware,
+ consumers);
+
+ mbox_free_channel(fw->chan);
+ kfree(fw);
+}
+
+void rpi_firmware_put(struct rpi_firmware *fw)
+{
+ kref_put(&fw->consumers, rpi_firmware_delete);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_put);
+
+static void devm_rpi_firmware_put(void *data)
+{
+ struct rpi_firmware *fw = data;
+
+ rpi_firmware_put(fw);
+}
+
static int rpi_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rpi_firmware *fw;
- fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ /*
+ * Memory will be freed by rpi_firmware_delete() once all users have
+ * released their firmware handles. Don't use devm_kzalloc() here.
+ */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
if (!fw)
return -ENOMEM;
@@ -232,10 +261,12 @@ static int rpi_firmware_probe(struct platform_device *pdev)
int ret = PTR_ERR(fw->chan);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get mbox channel: %d\n", ret);
+ kfree(fw);
return ret;
}
init_completion(&fw->c);
+ kref_init(&fw->consumers);
platform_set_drvdata(pdev, fw);
@@ -264,7 +295,8 @@ static int rpi_firmware_remove(struct platform_device *pdev)
rpi_hwmon = NULL;
platform_device_unregister(rpi_clk);
rpi_clk = NULL;
- mbox_free_channel(fw->chan);
+
+ rpi_firmware_put(fw);
return 0;
}
@@ -273,19 +305,51 @@ static int rpi_firmware_remove(struct platform_device *pdev)
* rpi_firmware_get - Get pointer to rpi_firmware structure.
* @firmware_node: Pointer to the firmware Device Tree node.
*
+ * The reference to rpi_firmware has to be released with rpi_firmware_put().
+ *
* Returns NULL is the firmware device is not ready.
*/
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
struct platform_device *pdev = of_find_device_by_node(firmware_node);
+ struct rpi_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+ if (!fw)
+ return NULL;
+
+ if (!kref_get_unless_zero(&fw->consumers))
+ return NULL;
+
+ return fw;
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
+/**
+ * devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node: Pointer to the firmware Device Tree node.
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ struct rpi_firmware *fw;
+
+ fw = rpi_firmware_get(firmware_node);
+ if (!fw)
+ return NULL;
+
+ if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
+ return NULL;
+
+ return fw;
+}
+EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
+
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 08c422380a00..c871b4227a64 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -615,8 +615,8 @@ svc_create_memory_pool(struct platform_device *pdev,
end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
paddr = begin;
size = end - begin;
- va = memremap(paddr, size, MEMREMAP_WC);
- if (!va) {
+ va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
+ if (IS_ERR(va)) {
dev_err(dev, "fail to remap shared memory\n");
return ERR_PTR(-EINVAL);
}
@@ -981,8 +981,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return ret;
genpool = svc_create_memory_pool(pdev, sh_memory);
- if (!genpool)
- return -ENOMEM;
+ if (IS_ERR(genpool))
+ return PTR_ERR(genpool);
/* allocate service controller and supporting channel */
controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 4126be9e3216..00259a5f3b3b 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -179,7 +179,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct resource *res;
- char debug_name[50] = "ti_sci_debug@";
+ char debug_name[50];
/* Debug region is optional */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -196,10 +196,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
/* Setup NULL termination */
info->debug_buffer[info->debug_region_size] = 0;
- info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
- sizeof(debug_name) -
- sizeof("ti_sci_debug@")),
- 0444, NULL, info, &ti_sci_debug_fops);
+ snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
+ dev_name(dev));
+ info->d = debugfs_create_file(debug_name, 0444, NULL, info,
+ &ti_sci_debug_fops);
if (IS_ERR(info->d))
return PTR_ERR(info->d);
@@ -208,19 +208,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
return 0;
}
-/**
- * ti_sci_debugfs_destroy() - clean up log debug file
- * @pdev: platform device pointer
- * @info: Pointer to SCI entity information
- */
-static void ti_sci_debugfs_destroy(struct platform_device *pdev,
- struct ti_sci_info *info)
-{
- if (IS_ERR(info->debug_region))
- return;
-
- debugfs_remove(info->d);
-}
#else /* CONFIG_DEBUG_FS */
static inline int ti_sci_debugfs_create(struct platform_device *dev,
struct ti_sci_info *info)
@@ -3527,43 +3514,12 @@ out:
return ret;
}
-static int ti_sci_remove(struct platform_device *pdev)
-{
- struct ti_sci_info *info;
- struct device *dev = &pdev->dev;
- int ret = 0;
-
- of_platform_depopulate(dev);
-
- info = platform_get_drvdata(pdev);
-
- if (info->nb.notifier_call)
- unregister_restart_handler(&info->nb);
-
- mutex_lock(&ti_sci_list_mutex);
- if (info->users)
- ret = -EBUSY;
- else
- list_del(&info->node);
- mutex_unlock(&ti_sci_list_mutex);
-
- if (!ret) {
- ti_sci_debugfs_destroy(pdev, info);
-
- /* Safe to free channels since no more users */
- mbox_free_channel(info->chan_tx);
- mbox_free_channel(info->chan_rx);
- }
-
- return ret;
-}
-
static struct platform_driver ti_sci_driver = {
.probe = ti_sci_probe,
- .remove = ti_sci_remove,
.driver = {
.name = "ti-sci",
.of_match_table = of_match_ptr(ti_sci_of_match),
+ .suppress_bind_attrs = true,
},
};
module_platform_driver(ti_sci_driver);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index f0d068c03944..57cd04062994 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -6,7 +6,7 @@
* The system works in a message response protocol
* See: http://processors.wiki.ti.com/index.php/TISCI for details
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __TI_SCI_H
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
index bd33bbf70daf..0860f29a8ec8 100644
--- a/drivers/firmware/xilinx/Kconfig
+++ b/drivers/firmware/xilinx/Kconfig
@@ -21,4 +21,10 @@ config ZYNQMP_FIRMWARE_DEBUG
Say yes to enable ZynqMP firmware interface debug APIs.
If in doubt, say N.
+config ZYNQMP_FIRMWARE_SECURE
+ bool "Enable Xilinx Zynq MPSoC secure firmware loading APIs"
+ help
+ Say yes to enable ZynqMP secure firmware loading APIs.
+ In doubt, say N
+
endmenu
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
index 875a53703c82..1b57bb14ad94 100644
--- a/drivers/firmware/xilinx/Makefile
+++ b/drivers/firmware/xilinx/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares
-obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ggs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE_SECURE) += zynqmp-secure.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index c6d0724da4db..1f8dc360a249 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -31,13 +31,88 @@ static char debugfs_buf[PAGE_SIZE];
#define PM_API(id) {id, #id, strlen(#id)}
static struct pm_api_info pm_api_list[] = {
+ PM_API(PM_REQUEST_SUSPEND),
+ PM_API(PM_SELF_SUSPEND),
+ PM_API(PM_FORCE_POWERDOWN),
+ PM_API(PM_ABORT_SUSPEND),
+ PM_API(PM_REQUEST_WAKEUP),
+ PM_API(PM_SET_WAKEUP_SOURCE),
+ PM_API(PM_SYSTEM_SHUTDOWN),
+ PM_API(PM_REQUEST_NODE),
+ PM_API(PM_RELEASE_NODE),
+ PM_API(PM_SET_REQUIREMENT),
+ PM_API(PM_SET_MAX_LATENCY),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_SET_CONFIGURATION),
+ PM_API(PM_GET_NODE_STATUS),
+ PM_API(PM_GET_OPERATING_CHARACTERISTIC),
+ PM_API(PM_REGISTER_NOTIFIER),
+ PM_API(PM_RESET_ASSERT),
+ PM_API(PM_RESET_GET_STATUS),
+ PM_API(PM_GET_CHIPID),
+ PM_API(PM_PINCTRL_GET_FUNCTION),
+ PM_API(PM_PINCTRL_SET_FUNCTION),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_GET),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_SET),
+ PM_API(PM_IOCTL),
+ PM_API(PM_CLOCK_ENABLE),
+ PM_API(PM_CLOCK_DISABLE),
+ PM_API(PM_CLOCK_GETSTATE),
+ PM_API(PM_CLOCK_SETDIVIDER),
+ PM_API(PM_CLOCK_GETDIVIDER),
+ PM_API(PM_CLOCK_SETRATE),
+ PM_API(PM_CLOCK_GETRATE),
+ PM_API(PM_CLOCK_SETPARENT),
+ PM_API(PM_CLOCK_GETPARENT),
PM_API(PM_QUERY_DATA),
};
struct dentry *firmware_debugfs_root;
/**
+ * zynqmp_pm_self_suspend - PM call for master to suspend itself
+ * @node: Node ID of the master or subsystem
+ * @latency: Requested maximum wakeup latency (not supported)
+ * @state: Requested state (not supported)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_self_suspend(const u32 node, const u32 latency,
+ const u32 state)
+{
+ return zynqmp_pm_invoke_fn(PM_SELF_SUSPEND, node, latency,
+ state, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_abort_suspend - PM call to announce that a prior suspend request
+ * is to be aborted.
+ * @reason: Reason for the abort
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_abort_suspend(const enum zynqmp_pm_abort_reason reason)
+{
+ return zynqmp_pm_invoke_fn(PM_ABORT_SUSPEND, reason, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_register_notifier - Register the PU to be notified of PM events
+ * @node: Node ID of the slave
+ * @event: The event to be notified about
+ * @wake: Wake up on event
+ * @enable: Enable or disable the notifier
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
+ wake, enable, NULL);
+}
+
+/**
* zynqmp_pm_argument_value() - Extract argument value from a PM-API request
* @arg: Entered PM-API argument in string format
*
@@ -87,6 +162,7 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
{
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 pm_api_version;
+ u64 rate;
int ret;
struct zynqmp_pm_query_data qdata = {0};
@@ -96,6 +172,191 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
+ case PM_REQUEST_SUSPEND:
+ ret = eemi_ops->request_suspend(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO,
+ pm_api_arg[2] ? pm_api_arg[2] :
+ ZYNQMP_PM_MAX_LATENCY, 0);
+ break;
+ case PM_SELF_SUSPEND:
+ ret = zynqmp_pm_self_suspend(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_MAX_LATENCY, 0);
+ break;
+ case PM_FORCE_POWERDOWN:
+ ret = eemi_ops->force_powerdown(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_ABORT_SUSPEND:
+ ret = zynqmp_pm_abort_suspend(pm_api_arg[0] ? pm_api_arg[0] :
+ ZYNQMP_PM_ABORT_REASON_UNKNOWN);
+ break;
+ case PM_REQUEST_WAKEUP:
+ ret = eemi_ops->request_wakeup(pm_api_arg[0],
+ pm_api_arg[1], pm_api_arg[2],
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_SET_WAKEUP_SOURCE:
+ ret = eemi_ops->set_wakeup_source(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_SYSTEM_SHUTDOWN:
+ ret = eemi_ops->system_shutdown(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_REQUEST_NODE:
+ ret = eemi_ops->request_node(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ pm_api_arg[2] ? pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_RELEASE_NODE:
+ ret = eemi_ops->release_node(pm_api_arg[0]);
+ break;
+ case PM_SET_REQUIREMENT:
+ ret = eemi_ops->set_requirement(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_CONTEXT,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_SET_MAX_LATENCY:
+ ret = eemi_ops->set_max_latency(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_MAX_LATENCY);
+ break;
+ case PM_SET_CONFIGURATION:
+ ret = eemi_ops->set_configuration(pm_api_arg[0]);
+ break;
+ case PM_GET_NODE_STATUS:
+ ret = eemi_ops->get_node_status(pm_api_arg[0],
+ &pm_api_ret[0],
+ &pm_api_ret[1],
+ &pm_api_ret[2]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n",
+ pm_api_arg[0], pm_api_ret[0],
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
+ case PM_GET_OPERATING_CHARACTERISTIC:
+ ret = eemi_ops->get_operating_characteristic(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_POWER,
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_OPERATING_CHARACTERISTIC:\n\tNodeId: %llu\n\tType: %llu\n\tResult: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_REGISTER_NOTIFIER:
+ ret = zynqmp_pm_register_notifier(pm_api_arg[0],
+ pm_api_arg[1] ?
+ pm_api_arg[1] : 0,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ?
+ pm_api_arg[3] : 0);
+ break;
+ case PM_RESET_ASSERT:
+ ret = eemi_ops->reset_assert(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_RESET_GET_STATUS:
+ ret = eemi_ops->reset_get_status(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Reset status: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_GET_CHIPID:
+ ret = eemi_ops->get_chipid(&pm_api_ret[0], &pm_api_ret[1]);
+ if (!ret)
+ sprintf(debugfs_buf, "Idcode: %#x, Version:%#x\n",
+ pm_api_ret[0], pm_api_ret[1]);
+ break;
+ case PM_PINCTRL_GET_FUNCTION:
+ ret = eemi_ops->pinctrl_get_function(pm_api_arg[0],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Current set function for the pin: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_SET_FUNCTION:
+ ret = eemi_ops->pinctrl_set_function(pm_api_arg[0],
+ pm_api_arg[1]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_GET:
+ ret = eemi_ops->pinctrl_get_config(pm_api_arg[0], pm_api_arg[1],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Pin: %llu, Param: %llu, Value: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_SET:
+ ret = eemi_ops->pinctrl_set_config(pm_api_arg[0],
+ pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_IOCTL:
+ ret = eemi_ops->ioctl(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2], pm_api_arg[3],
+ &pm_api_ret[0]);
+ if (!ret && (pm_api_arg[1] == IOCTL_GET_RPU_OPER_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_DATA ||
+ pm_api_arg[1] == IOCTL_READ_GGS ||
+ pm_api_arg[1] == IOCTL_READ_PGGS ||
+ pm_api_arg[1] == IOCTL_PROBE_COUNTER_READ))
+ sprintf(debugfs_buf, "IOCTL return value: %u\n",
+ pm_api_ret[1]);
+ break;
+ case PM_CLOCK_ENABLE:
+ ret = eemi_ops->clock_enable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_DISABLE:
+ ret = eemi_ops->clock_disable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_GETSTATE:
+ ret = eemi_ops->clock_getstate(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock state: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETDIVIDER:
+ ret = eemi_ops->clock_setdivider(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETDIVIDER:
+ ret = eemi_ops->clock_getdivider(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Divider Value: %d\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETRATE:
+ ret = eemi_ops->clock_setrate(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETRATE:
+ ret = eemi_ops->clock_getrate(pm_api_arg[0], &rate);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock rate :%llu\n", rate);
+ break;
+ case PM_CLOCK_SETPARENT:
+ ret = eemi_ops->clock_setparent(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETPARENT:
+ ret = eemi_ops->clock_getparent(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Clock parent Index: %u\n", pm_api_ret[0]);
+ break;
case PM_QUERY_DATA:
qdata.qid = pm_api_arg[0];
qdata.arg1 = pm_api_arg[1];
diff --git a/drivers/firmware/xilinx/zynqmp-ggs.c b/drivers/firmware/xilinx/zynqmp-ggs.c
new file mode 100644
index 000000000000..42179ad73c7f
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-ggs.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+static ssize_t read_register(char *buf, u32 ioctl_id, u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return -EFAULT;
+
+ ret = eemi_ops->ioctl(0, ioctl_id, reg, 0, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t write_register(const char *buf, size_t count, u32 read_ioctl,
+ u32 write_ioctl, u32 reg)
+{
+ char *kern_buff, *inbuf, *tok;
+ long mask, value;
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return -EFAULT;
+
+ kern_buff = kzalloc(count, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+
+ ret = strlcpy(kern_buff, buf, count);
+ if (ret < 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ inbuf = kern_buff;
+
+ /* Read the write mask */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = kstrtol(tok, 16, &mask);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Read the write value */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = kstrtol(tok, 16, &value);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = eemi_ops->ioctl(0, read_ioctl, reg, 0, ret_payload);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret_payload[1] &= ~mask;
+ value &= mask;
+ value |= ret_payload[1];
+
+ ret = eemi_ops->ioctl(0, write_ioctl, reg, value, NULL);
+ if (ret)
+ ret = -EFAULT;
+
+err:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/**
+ * ggs_show - Show global general storage (ggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ * @reg: Register number
+ *
+ * Return:Number of bytes printed into the buffer.
+ *
+ * Helper function for viewing a ggs register value.
+ *
+ * User-space interface for viewing the content of the ggs0 register.
+ * cat /sys/firmware/zynqmp/ggs0
+ */
+static ssize_t ggs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ return read_register(buf, IOCTL_READ_GGS, reg);
+}
+
+/**
+ * ggs_store - Store global general storage (ggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Size of buf
+ * @reg: Register number
+ *
+ * Return: count argument if request succeeds, the corresponding
+ * error code otherwise
+ *
+ * Helper function for storing a ggs register value.
+ *
+ * For example, the user-space interface for storing a value to the
+ * ggs0 register:
+ * echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/ggs0
+ */
+static ssize_t ggs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count,
+ u32 reg)
+{
+ if (!kobj || !attr || !buf || !count || reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ return write_register(buf, count, IOCTL_READ_GGS, IOCTL_WRITE_GGS, reg);
+}
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(kobj, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(kobj, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+/**
+ * pggs_show - Show persistent global general storage (pggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ * @reg: Register number
+ *
+ * Return:Number of bytes printed into the buffer.
+ *
+ * Helper function for viewing a pggs register value.
+ */
+static ssize_t pggs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ return read_register(buf, IOCTL_READ_PGGS, reg);
+}
+
+/**
+ * pggs_store - Store persistent global general storage (pggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Size of buf
+ * @reg: Register number
+ *
+ * Return: count argument if request succeeds, the corresponding
+ * error code otherwise
+ *
+ * Helper function for storing a pggs register value.
+ */
+static ssize_t pggs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count,
+ u32 reg)
+{
+ return write_register(buf, count, IOCTL_READ_PGGS,
+ IOCTL_WRITE_PGGS, reg);
+}
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(kobj, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(kobj, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static struct kobj_attribute zynqmp_attr_ggs0 = __ATTR_RW(ggs0);
+static struct kobj_attribute zynqmp_attr_ggs1 = __ATTR_RW(ggs1);
+static struct kobj_attribute zynqmp_attr_ggs2 = __ATTR_RW(ggs2);
+static struct kobj_attribute zynqmp_attr_ggs3 = __ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static struct kobj_attribute zynqmp_attr_pggs0 = __ATTR_RW(pggs0);
+static struct kobj_attribute zynqmp_attr_pggs1 = __ATTR_RW(pggs1);
+static struct kobj_attribute zynqmp_attr_pggs2 = __ATTR_RW(pggs2);
+static struct kobj_attribute zynqmp_attr_pggs3 = __ATTR_RW(pggs3);
+
+static struct attribute *attrs[] = {
+ &zynqmp_attr_ggs0.attr,
+ &zynqmp_attr_ggs1.attr,
+ &zynqmp_attr_ggs2.attr,
+ &zynqmp_attr_ggs3.attr,
+ &zynqmp_attr_pggs0.attr,
+ &zynqmp_attr_pggs1.attr,
+ &zynqmp_attr_pggs2.attr,
+ &zynqmp_attr_pggs3.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = attrs,
+ NULL,
+};
+
+int zynqmp_pm_ggs_init(struct kobject *parent_kobj)
+{
+ return sysfs_create_group(parent_kobj, &attr_group);
+}
diff --git a/drivers/firmware/xilinx/zynqmp-secure.c b/drivers/firmware/xilinx/zynqmp-secure.c
new file mode 100644
index 000000000000..0647e4e373d4
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-secure.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP SecureFw Driver.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+#define ZYNQMP_AES_KEY_SIZE 64
+
+static u8 key[ZYNQMP_AES_KEY_SIZE] = {0};
+static dma_addr_t dma_addr;
+static u8 *keyptr;
+static size_t dma_size;
+static char *kbuf;
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+static ssize_t secure_load_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ const struct firmware *fw;
+ char image_name[NAME_MAX];
+ u64 dst, ret;
+ int len;
+
+ if (IS_ERR(eemi_ops) || !eemi_ops->secure_image)
+ return -EFAULT;
+
+ strncpy(image_name, buf, NAME_MAX);
+ len = strlen(image_name);
+ if (image_name[len - 1] == '\n')
+ image_name[len - 1] = 0;
+
+ ret = request_firmware(&fw, image_name, dev);
+ if (ret) {
+ dev_err(dev, "Error requesting firmware %s\n", image_name);
+ return ret;
+ }
+ dma_size = fw->size;
+
+ if (keyptr)
+ dma_size = fw->size + ZYNQMP_AES_KEY_SIZE;
+
+ kbuf = dma_alloc_coherent(dev, dma_size,
+ &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, fw->data, fw->size);
+
+ if (keyptr)
+ memcpy(kbuf + fw->size, key, ZYNQMP_AES_KEY_SIZE);
+
+ /* To ensure cache coherency */
+ __flush_cache_user_range((unsigned long)kbuf,
+ (unsigned long)kbuf + dma_size);
+ release_firmware(fw);
+
+ if (keyptr)
+ ret = eemi_ops->secure_image(dma_addr, dma_addr + fw->size,
+ &dst);
+ else
+ ret = eemi_ops->secure_image(dma_addr, 0, &dst);
+
+ if (ret) {
+ dev_info(dev, "Failed to load secure image \r\n");
+ return ret;
+ }
+ dev_info(dev, "Verified image at 0x%llx\n", dst);
+
+ return count;
+}
+
+static ssize_t key_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, ZYNQMP_AES_KEY_SIZE + 1, "%s\n", key);
+}
+
+static ssize_t key_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ memcpy(key, buf, count);
+ keyptr = &key[0];
+ return count;
+}
+
+static ssize_t secure_load_done_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+ if (value)
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(key);
+static DEVICE_ATTR_WO(secure_load);
+static DEVICE_ATTR_WO(secure_load_done);
+
+static struct attribute *securefw_attrs[] = {
+ &dev_attr_secure_load_done.attr,
+ &dev_attr_secure_load.attr,
+ &dev_attr_key.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(securefw);
+
+static int securefw_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct platform_device *securefw_pdev;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ securefw_pdev = pdev;
+
+ securefw_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ ret = of_dma_configure(&securefw_pdev->dev, NULL, true);
+ if (ret < 0) {
+ dev_info(&securefw_pdev->dev, "Cannot setup DMA ops\r\n");
+ return ret;
+ }
+
+ ret = sysfs_create_groups(&securefw_pdev->dev.kobj, securefw_groups);
+ if (ret)
+ return ret;
+
+ dev_info(&securefw_pdev->dev, "securefw probed\r\n");
+ return ret;
+}
+
+static int securefw_remove(struct platform_device *pdev)
+{
+ sysfs_remove_groups(&pdev->dev.kobj, securefw_groups);
+ return 0;
+}
+
+static struct platform_driver securefw_driver = {
+ .driver = {
+ .name = "securefw",
+ },
+ .probe = securefw_probe,
+ .remove = securefw_remove,
+};
+
+static struct platform_device *securefw_dev_reg;
+
+static int __init zynqmp_secure_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&securefw_driver);
+ if (ret)
+ return ret;
+
+ securefw_dev_reg = platform_device_register_simple("securefw", -1,
+ NULL, 0);
+ if (IS_ERR(securefw_dev_reg)) {
+ ret = PTR_ERR(securefw_dev_reg);
+ platform_driver_unregister(&securefw_driver);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit zynqmp_secure_exit(void)
+{
+ platform_device_unregister(securefw_dev_reg);
+ platform_driver_unregister(&securefw_driver);
+}
+
+module_init(zynqmp_secure_init);
+module_exit(zynqmp_secure_exit);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index fd3d83745208..5d294d23cf6d 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -24,8 +24,13 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static unsigned long register_address;
+
static const struct zynqmp_eemi_ops *eemi_ops_tbl;
+static bool feature_check_enabled;
+static u32 zynqmp_pm_features[PM_API_MAX];
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -44,10 +49,14 @@ static int zynqmp_pm_ret_code(u32 ret_status)
case XST_PM_SUCCESS:
case XST_PM_DOUBLE_REQ:
return 0;
+ case XST_PM_NO_FEATURE:
+ return -ENOTSUPP;
case XST_PM_NO_ACCESS:
return -EACCES;
case XST_PM_ABORT_SUSPEND:
return -ECANCELED;
+ case XST_PM_MULT_USER:
+ return -EUSERS;
case XST_PM_INTERNAL:
case XST_PM_CONFLICT:
case XST_PM_INVALID_NODE:
@@ -127,6 +136,39 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
}
/**
+ * zynqmp_pm_feature() - Check weather given feature is supported or not
+ * @api_id: API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_feature(u32 api_id)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u64 smc_arg[2];
+
+ if (!feature_check_enabled)
+ return 0;
+
+ /* Return value if feature is already checked */
+ if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
+ return zynqmp_pm_features[api_id];
+
+ smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+ smc_arg[1] = api_id;
+
+ ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+ if (ret) {
+ zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
+ return PM_FEATURE_INVALID;
+ }
+
+ zynqmp_pm_features[api_id] = ret_payload[1];
+
+ return zynqmp_pm_features[api_id];
+}
+
+/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
@@ -160,6 +202,9 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
*/
u64 smc_arg[4];
+ if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
+ return -ENOTSUPP;
+
smc_arg[0] = PM_SIP_SVC | pm_api_id;
smc_arg[1] = ((u64)arg1 << 32) | arg0;
smc_arg[2] = ((u64)arg3 << 32) | arg2;
@@ -197,11 +242,11 @@ static int zynqmp_pm_get_api_version(u32 *version)
/**
* zynqmp_pm_get_chipid - Get silicon ID registers
- * @idcode: IDCODE register
- * @version: version register
+ * @idcode: IDCODE register
+ * @version: version register
*
- * Return: Returns the status of the operation and the idcode and version
- * registers in @idcode and @version.
+ * Return: Returns the status of the operation and the idcode and version
+ * registers in @idcode and @version.
*/
static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
{
@@ -461,6 +506,36 @@ static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
}
/**
+ * versal_is_valid_ioctl() - Check whether IOCTL ID is valid or not for versal
+ * @ioctl_id: IOCTL ID
+ *
+ * Return: 1 if IOCTL is valid else 0
+ */
+static inline int versal_is_valid_ioctl(u32 ioctl_id)
+{
+ switch (ioctl_id) {
+ case IOCTL_GET_RPU_OPER_MODE:
+ case IOCTL_SET_RPU_OPER_MODE:
+ case IOCTL_RPU_BOOT_ADDR_CONFIG:
+ case IOCTL_TCM_COMB_CONFIG:
+ case IOCTL_SET_TAPDELAY_BYPASS:
+ case IOCTL_SD_DLL_RESET:
+ case IOCTL_SET_SD_TAPDELAY:
+ case IOCTL_WRITE_GGS:
+ case IOCTL_READ_GGS:
+ case IOCTL_WRITE_PGGS:
+ case IOCTL_READ_PGGS:
+ case IOCTL_SET_BOOT_HEALTH_STATUS:
+ case IOCTL_PROBE_COUNTER_READ:
+ case IOCTL_PROBE_COUNTER_WRITE:
+ case IOCTL_USB_SET_STATE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/**
* zynqmp_is_valid_ioctl() - Check whether IOCTL ID is valid or not
* @ioctl_id: IOCTL ID
*
@@ -473,6 +548,23 @@ static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
case IOCTL_GET_PLL_FRAC_MODE:
case IOCTL_SET_PLL_FRAC_DATA:
case IOCTL_GET_PLL_FRAC_DATA:
+ case IOCTL_GET_RPU_OPER_MODE:
+ case IOCTL_SET_RPU_OPER_MODE:
+ case IOCTL_RPU_BOOT_ADDR_CONFIG:
+ case IOCTL_TCM_COMB_CONFIG:
+ case IOCTL_SET_TAPDELAY_BYPASS:
+ case IOCTL_SET_SGMII_MODE:
+ case IOCTL_SD_DLL_RESET:
+ case IOCTL_SET_SD_TAPDELAY:
+ case IOCTL_WRITE_GGS:
+ case IOCTL_READ_GGS:
+ case IOCTL_WRITE_PGGS:
+ case IOCTL_READ_PGGS:
+ case IOCTL_ULPI_RESET:
+ case IOCTL_SET_BOOT_HEALTH_STATUS:
+ case IOCTL_AFI:
+ case IOCTL_PROBE_COUNTER_READ:
+ case IOCTL_PROBE_COUNTER_WRITE:
return 1;
default:
return 0;
@@ -494,8 +586,13 @@ static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
u32 *out)
{
- if (!zynqmp_is_valid_ioctl(ioctl_id))
- return -EINVAL;
+ if (of_find_compatible_node(NULL, NULL, "xlnx,versal")) {
+ if (!versal_is_valid_ioctl(ioctl_id))
+ return -EINVAL;
+ } else {
+ if (!zynqmp_is_valid_ioctl(ioctl_id))
+ return -EINVAL;
+ }
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, ioctl_id,
arg1, arg2, out);
@@ -540,6 +637,22 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
}
/**
+ * zynqmp_pm_load_pdi - Load and process pdi
+ * @src: Source device where PDI is located
+ * @address: Pdi src address
+ *
+ * This function provides support to load pdi from linux
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src,
+ lower_32_bits(address),
+ upper_32_bits(address), 0, NULL);
+}
+
+/**
* zynqmp_pm_fpga_load - Perform the fpga load
* @address: Address to write to
* @size: pl bitstream size
@@ -664,6 +777,449 @@ static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
qos, ack, NULL);
}
+/**
+ * zynqmp_pm_fpga_read - Perform the fpga configuration readback
+ * @reg_numframes: Configuration register offset (or) Number of frames to read
+ * @phys_address: Physical Address of the buffer
+ * @readback_type: Type of fpga readback operation
+ * @value: Value to read
+ *
+ * This function provides access to xilfpga library to perform
+ * fpga configuration readback.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_read(const u32 reg_numframes, const u64 phys_address,
+ u32 readback_type, u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, reg_numframes,
+ lower_32_bits(phys_address),
+ upper_32_bits(phys_address), readback_type,
+ ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
+ * @address: Address of the data/ Address of output buffer where
+ * hash should be stored.
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - for initializing csudma driver and SHA3(Here address
+ * and size inputs can be NULL).
+ * BIT(1) - to call Sha3_Update API which can be called multiple
+ * times when data is not contiguous.
+ * BIT(2) - to get final hash of the whole updated data.
+ * Hash will be overwritten at provided address with
+ * 48 bytes.
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ u32 lower_32_bits = (u32)address;
+ u32 upper_32_bits = (u32)(address >> 32);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_32_bits, lower_32_bits,
+ size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_rsa - Access RSA hardware to encrypt/decrypt the data with RSA.
+ * @address: Address of the data
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - Encryption/Decryption
+ * 0 - RSA decryption with private key
+ * 1 - RSA encryption with public key.
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_rsa(const u64 address, const u32 size, const u32 flags)
+{
+ u32 lower_32_bits = (u32)address;
+ u32 upper_32_bits = (u32)(address >> 32);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_RSA, upper_32_bits, lower_32_bits,
+ size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
+ * AES-GCM core.
+ * @address: Address of the AesParams structure.
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
+ lower_32_bits(address),
+ 0, 0, ret_payload);
+ *out = ret_payload[1];
+ return ret;
+}
+
+/**
+ * zynqmp_pm_request_suspend - PM call to request for another PU or subsystem to
+ * be suspended gracefully.
+ * @node: Node ID of the targeted PU or subsystem
+ * @ack: Flag to specify whether acknowledge is requested
+ * @latency: Requested wakeup latency (not supported)
+ * @state: Requested state (not supported)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_suspend(const u32 node,
+ const enum zynqmp_pm_request_ack ack,
+ const u32 latency,
+ const u32 state)
+{
+ return zynqmp_pm_invoke_fn(PM_REQUEST_SUSPEND, node, ack,
+ latency, state, NULL);
+}
+
+/**
+ * zynqmp_pm_force_powerdown - PM call to request for another PU or subsystem to
+ * be powered down forcefully
+ * @target: Node ID of the targeted PU or subsystem
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_force_powerdown(const u32 target,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, target, ack, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_request_wakeup - PM call to wake up selected master or subsystem
+ * @node: Node ID of the master or subsystem
+ * @set_addr: Specifies whether the address argument is relevant
+ * @address: Address from which to resume when woken up
+ * @ack: Flag to specify whether acknowledge requested
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_wakeup(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack)
+{
+ /* set_addr flag is encoded into 1st bit of address */
+ return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr,
+ address >> 32, ack, NULL);
+}
+
+/**
+ * zynqmp_pm_set_wakeup_source - PM call to specify the wakeup source
+ * while suspended
+ * @target: Node ID of the targeted PU or subsystem
+ * @wakeup_node:Node ID of the wakeup peripheral
+ * @enable: Enable or disable the specified peripheral as wake source
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_wakeup_source(const u32 target,
+ const u32 wakeup_node,
+ const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_WAKEUP_SOURCE, target,
+ wakeup_node, enable, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_max_latency - PM call to set wakeup latency requirements
+ * @node: Node ID of the slave
+ * @latency: Requested maximum wakeup latency
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_max_latency(const u32 node, const u32 latency)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_MAX_LATENCY, node, latency,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_configuration - PM call to set system configuration
+ * @physical_addr: Physical 32-bit address of data structure in memory
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_configuration(const u32 physical_addr)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_CONFIGURATION, physical_addr, 0,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_get_node_status - PM call to request a node's current power state
+ * @node: ID of the component or sub-system in question
+ * @status: Current operating state of the requested node
+ * @requirements: Current requirements asserted on the node,
+ * used for slave nodes only.
+ * @usage: Usage information, used for slave nodes only:
+ * PM_USAGE_NO_MASTER - No master is currently using
+ * the node
+ * PM_USAGE_CURRENT_MASTER - Only requesting master is
+ * currently using the node
+ * PM_USAGE_OTHER_MASTER - Only other masters are
+ * currently using the node
+ * PM_USAGE_BOTH_MASTERS - Both the current and at least
+ * one other master is currently
+ * using the node
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, node, 0, 0,
+ 0, ret_payload);
+ if (ret_payload[0] == XST_PM_SUCCESS) {
+ *status = ret_payload[1];
+ if (requirements)
+ *requirements = ret_payload[2];
+ if (usage)
+ *usage = ret_payload[3];
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_get_operating_characteristic - PM call to request operating
+ * characteristic information
+ * @node: Node ID of the slave
+ * @type: Type of the operating characteristic requested
+ * @result: Used to return the requsted operating characteristic
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_operating_characteristic(const u32 node,
+ const enum zynqmp_pm_opchar_type type,
+ u32 *const result)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!result)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_OPERATING_CHARACTERISTIC,
+ node, type, 0, 0, ret_payload);
+ if (ret_payload[0] == XST_PM_SUCCESS)
+ *result = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_request - Request Pin from firmware
+ * @pin: Pin number to request
+ *
+ * This function requests pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_release - Inform firmware that Pin control is released
+ * @pin: Pin number to release
+ *
+ * This function release pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
+ * @pin: Pin number
+ * @id: Buffer to store function ID
+ *
+ * This function provides the function currently set for the given pin.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!id)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
+ 0, 0, ret_payload);
+ *id = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_set_function - Set requested function for the pin
+ * @pin: Pin number
+ * @id: Function ID to set
+ *
+ * This function sets requested function for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_get_config - Get configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to get
+ * @value: Buffer to store parameter value
+ *
+ * This function gets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
+ 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_set_config - Set configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to set
+ * @value: Parameter value to set
+ *
+ * This function sets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
+ param, value, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_config_reg_access - PM Config API for Config register access
+ * @register_access_id: ID of the requested REGISTER_ACCESS
+ * @address: Address of the register to be accessed
+ * @mask: Mask to be written to the register
+ * @value: Value to be written to the register
+ * @out: Returned output value
+ *
+ * This function calls REGISTER_ACCESS to configure CSU/PMU registers.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+
+static int zynqmp_pm_config_reg_access(u32 register_access_id, u32 address,
+ u32 mask, u32 value, u32 *out)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_ACCESS, register_access_id,
+ address, mask, value, out);
+}
+
+/**
+ * zynqmp_pm_efuse_access - Provides access to efuse memory.
+ * @address: Address of the efuse params structure
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_efuse_access(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_EFUSE_ACCESS, upper_32_bits(address),
+ lower_32_bits(address), 0, 0, ret_payload);
+ *out = ret_payload[1];
+
+ return ret;
+}
+
+static int zynqmp_pm_secure_load(const u64 src_addr, u64 key_addr, u64 *dst)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret_value;
+
+ if (!dst)
+ return -EINVAL;
+
+ ret_value = zynqmp_pm_invoke_fn(PM_SECURE_IMAGE,
+ lower_32_bits(src_addr),
+ upper_32_bits(src_addr),
+ lower_32_bits(key_addr),
+ upper_32_bits(key_addr),
+ ret_payload);
+ *dst = ((u64)ret_payload[1] << 32) | ret_payload[2];
+
+ return ret_value;
+}
+
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
.get_chipid = zynqmp_pm_get_chipid,
@@ -687,6 +1243,29 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.set_requirement = zynqmp_pm_set_requirement,
.fpga_load = zynqmp_pm_fpga_load,
.fpga_get_status = zynqmp_pm_fpga_get_status,
+ .fpga_read = zynqmp_pm_fpga_read,
+ .sha_hash = zynqmp_pm_sha_hash,
+ .rsa = zynqmp_pm_rsa,
+ .request_suspend = zynqmp_pm_request_suspend,
+ .force_powerdown = zynqmp_pm_force_powerdown,
+ .request_wakeup = zynqmp_pm_request_wakeup,
+ .set_wakeup_source = zynqmp_pm_set_wakeup_source,
+ .system_shutdown = zynqmp_pm_system_shutdown,
+ .set_max_latency = zynqmp_pm_set_max_latency,
+ .set_configuration = zynqmp_pm_set_configuration,
+ .get_node_status = zynqmp_pm_get_node_status,
+ .get_operating_characteristic = zynqmp_pm_get_operating_characteristic,
+ .pinctrl_request = zynqmp_pm_pinctrl_request,
+ .pinctrl_release = zynqmp_pm_pinctrl_release,
+ .pinctrl_get_function = zynqmp_pm_pinctrl_get_function,
+ .pinctrl_set_function = zynqmp_pm_pinctrl_set_function,
+ .pinctrl_get_config = zynqmp_pm_pinctrl_get_config,
+ .pinctrl_set_config = zynqmp_pm_pinctrl_set_config,
+ .register_access = zynqmp_pm_config_reg_access,
+ .aes = zynqmp_pm_aes_engine,
+ .efuse_access = zynqmp_pm_efuse_access,
+ .secure_image = zynqmp_pm_secure_load,
+ .pdi_load = zynqmp_pm_load_pdi,
};
/**
@@ -704,6 +1283,350 @@ const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
+
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
+};
+
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
+/**
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
+ *
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
+ */
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+/**
+ * shutdown_scope_show - Show shutdown_scope sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ *
+ * User-space interface for viewing the available scope options for system
+ * shutdown. Scope option for next shutdown call is marked with [].
+ *
+ * Usage: cat /sys/firmware/zynqmp/shutdown_scope
+ *
+ * Return: Number of bytes printed into the buffer.
+ */
+static ssize_t shutdown_scope_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+/**
+ * shutdown_scope_store - Store shutdown_scope sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the scope for the next system shutdown.
+ * Usage: echo <scope> > /sys/firmware/zynqmp/shutdown_scope
+ *
+ * The Linux shutdown functionality implemented via PSCI system_off does not
+ * include an option to set a scope, i.e. which parts of the system to shut
+ * down.
+ *
+ * This API function allows to set the shutdown scope for the next shutdown
+ * request by passing it to the ATF running in EL3. When the next shutdown
+ * is performed, the platform specific portion of PSCI-system_off can use
+ * the chosen shutdown scope.
+ *
+ * subsystem: Only the APU along with all of its peripherals not used by other
+ * processing units will be shut down. This may result in the FPD
+ * power domain being shut down provided that no other processing
+ * unit uses FPD peripherals or DRAM.
+ * ps_only: The complete PS will be shut down, including the RPU, PMU, etc.
+ * Only the PL domain (FPGA) remains untouched.
+ * system: The complete system/device is shut down.
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t shutdown_scope_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static struct kobj_attribute zynqmp_attr_shutdown_scope =
+ __ATTR_RW(shutdown_scope);
+
+/**
+ * health_status_store - Store health_status sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the boot health status.
+ * Usage: echo <value> > /sys/firmware/zynqmp/health_status
+ *
+ * Value:
+ * 1 - Set healthy bit to 1
+ * 0 - Unset healthy bit
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t health_status_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_ioctl(0, IOCTL_SET_BOOT_HEALTH_STATUS, value, 0, NULL);
+ if (ret) {
+ pr_err("unable to set healthy bit value to %u\n", value);
+ return ret;
+ }
+
+ return count;
+}
+
+static struct kobj_attribute zynqmp_attr_health_status =
+ __ATTR_WO(health_status);
+
+/**
+ * config_reg_store - Write config_reg sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the config register.
+ *
+ * To write any CSU/PMU register
+ * echo <address> <mask> <values> > /sys/firmware/zynqmp/config_reg
+ * Usage:
+ * echo 0x345AB234 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/config_reg
+ *
+ * To Read any CSU/PMU register, write address to the variable like below
+ * echo <address> > /sys/firmware/zynqmp/config_reg
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t config_reg_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char *kern_buff, *inbuf, *tok;
+ unsigned long address, value, mask;
+ int ret;
+
+ kern_buff = kzalloc(count, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+
+ ret = strlcpy(kern_buff, buf, count);
+ if (ret < 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ inbuf = kern_buff;
+
+ /* Read the addess */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = kstrtol(tok, 16, &address);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ /* Read the write value */
+ tok = strsep(&inbuf, " ");
+ /*
+ * If parameter provided is only address, then its a read operation.
+ * Store the address in a global variable and retrieve whenever
+ * required.
+ */
+ if (!tok) {
+ register_address = address;
+ goto err;
+ }
+ register_address = address;
+
+ ret = kstrtol(tok, 16, &mask);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = kstrtol(tok, 16, &value);
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = zynqmp_pm_config_reg_access(CONFIG_REG_WRITE, address,
+ mask, value, NULL);
+ if (ret)
+ pr_err("unable to write value to %lx\n", value);
+err:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+ return count;
+}
+
+/**
+ * config_reg_show - Read config_reg sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ *
+ * User-space interface for getting the config register.
+ *
+ * To Read any CSU/PMU register, write address to the variable like below
+ * echo <address> > /sys/firmware/zynqmp/config_reg
+ *
+ * Then Read the address using below command
+ * cat /sys/firmware/zynqmp/config_reg
+ *
+ * Return: number of chars written to buf.
+ */
+static ssize_t config_reg_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_config_reg_access(CONFIG_REG_READ, register_address,
+ 0, 0, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static struct kobj_attribute zynqmp_attr_config_reg =
+ __ATTR_RW(config_reg);
+
+static struct attribute *attrs[] = {
+ &zynqmp_attr_shutdown_scope.attr,
+ &zynqmp_attr_health_status.attr,
+ &zynqmp_attr_config_reg.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = attrs,
+ NULL,
+};
+
+static int zynqmp_pm_sysfs_init(void)
+{
+ struct kobject *zynqmp_kobj;
+ int ret;
+
+ zynqmp_kobj = kobject_create_and_add("zynqmp", firmware_kobj);
+ if (!zynqmp_kobj) {
+ pr_err("zynqmp: Firmware kobj add failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_group(zynqmp_kobj, &attr_group);
+ if (ret) {
+ pr_err("%s() sysfs creation fail with error %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = zynqmp_pm_ggs_init(zynqmp_kobj);
+ if (ret) {
+ pr_err("%s() GGS init fail with error %d\n",
+ __func__, ret);
+ goto err;
+ }
+err:
+ return ret;
+}
+
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -711,8 +1634,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
int ret;
np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
- if (!np)
- return 0;
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
+ if (!np)
+ return 0;
+
+ feature_check_enabled = true;
+ }
of_node_put(np);
ret = get_set_conduit_method(dev->of_node);
@@ -748,6 +1676,12 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
/* Assign eemi_ops_table */
eemi_ops_tbl = &eemi_ops;
+ ret = zynqmp_pm_sysfs_init();
+ if (ret) {
+ pr_err("%s() sysfs init fail with error %d\n", __func__, ret);
+ return ret;
+ }
+
zynqmp_pm_api_debugfs_init();
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
@@ -770,6 +1704,7 @@ static int zynqmp_firmware_remove(struct platform_device *pdev)
static const struct of_device_id zynqmp_firmware_of_match[] = {
{.compatible = "xlnx,zynqmp-firmware"},
+ {.compatible = "xlnx,versal-firmware-wip"},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 73c779e920ed..609ea88f4b70 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -91,6 +91,24 @@ config FPGA_MGR_TS73XX
FPGA manager driver support for the Altera Cyclone II FPGA
present on the TS-73xx SBC boards.
+config FPGA_MGR_ZYNQ_AFI_FPGA
+ bool "Xilinx AFI FPGA"
+ depends on FPGA_MGR_ZYNQ_FPGA
+ help
+ Zynq AFI driver support for writing to the AFI registers
+ for configuring the PS_PL interface. For some of the bitstream
+ or designs to work the PS to PL interfaces need to be configured
+ like the data bus-width etc.
+
+config XILINX_AFI_FPGA
+ bool "Xilinx AFI FPGA"
+ depends on FPGA_MGR_ZYNQMP_FPGA || COMPILE_TEST
+ help
+ FPGA manager driver support for writing to the AFI registers
+ for configuring the PS_PL interface. For some of the bitstream
+ or designs to work the PS to PL interfaces need to be configured
+ like the datawidth etc.
+
config FPGA_BRIDGE
tristate "FPGA Bridge Framework"
help
@@ -138,6 +156,13 @@ config OF_FPGA_REGION
Support for loading FPGA images by applying a Device Tree
overlay.
+config FPGA_MGR_DEBUG_FS
+ tristate "FPGA debug fs"
+ select DEBUG_FS
+ help
+ FPGA manager debug provides support for reading fpga configuration
+ information.
+
config FPGA_DFL
tristate "FPGA Device Feature List (DFL) support"
select FPGA_BRIDGE
@@ -215,4 +240,13 @@ config FPGA_MGR_ZYNQMP_FPGA
to configure the programmable logic(PL) through PS
on ZynqMP SoC.
+config FPGA_MGR_VERSAL_FPGA
+ tristate "Xilinx Versal FPGA"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ Select this option to enable FPGA manager driver support for
+ Xilinx Versal SOC. This driver uses the versal soc firmware
+ interface to load programmable logic(PL) images
+ on versal soc.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 4865b74b00a4..ba9839cec7e9 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -17,7 +17,10 @@ obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+obj-$(CONFIG_FPGA_MGR_ZYNQ_AFI_FPGA) += zynq-afi.o
obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
+obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
+obj-$(CONFIG_XILINX_AFI_FPGA) += xilinx-afi.o
obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index 2cf25fd5e897..75b4b3ec933a 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -108,7 +108,7 @@ static int alt_pr_fpga_write(struct fpga_manager *mgr, const char *buf,
u32 *buffer_32 = (u32 *)buf;
size_t i = 0;
- if (count <= 0)
+ if (!count)
return -EINVAL;
/* Write out the complete 32-bit chunks */
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 4bab9028940a..b4088b14a800 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -13,6 +13,12 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+/* For enabling manual bridge set(enable/disable) function */
+#ifdef CONFIG_DEBUG_KERNEL
+#undef DEBUG
+#define DEBUG
+#endif
+
static DEFINE_IDA(fpga_bridge_ida);
static struct class *fpga_bridge_class;
@@ -115,7 +121,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
/**
* fpga_bridge_get - get an exclusive reference to a fpga bridge
* @dev: parent device that fpga bridge was registered with
- * @info: fpga manager info
+ * @info: fpga image specific information
*
* Given a device, get an exclusive reference to a fpga bridge.
*
@@ -304,9 +310,33 @@ static ssize_t state_show(struct device *dev,
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
+#ifdef DEBUG
+static ssize_t set_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+ long enable;
+ int ret;
+
+ ret = kstrtol(buf, 16, &enable);
+ if (ret)
+ return ret;
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ enable = bridge->br_ops->enable_set(bridge, !!enable);
+
+ return count;
+}
+static DEVICE_ATTR_WO(set);
+#endif
+
static struct attribute *fpga_bridge_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
+#ifdef DEBUG
+ &dev_attr_set.attr,
+#endif
NULL,
};
ATTRIBUTE_GROUPS(fpga_bridge);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index e05104f5e40c..108918971310 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -8,6 +8,7 @@
* With code from the mailing list:
* Copyright (C) 2013 Xilinx, Inc.
*/
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/fpga/fpga-mgr.h>
#include <linux/idr.h>
@@ -328,6 +329,11 @@ static int fpga_mgr_firmware_load(struct fpga_manager *mgr,
mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ;
+ /* flags indicates whether to do full or partial reconfiguration */
+ info->flags = mgr->flags;
+ memcpy(info->key, mgr->key, ENCRYPTED_KEY_LEN);
+ memcpy(info->iv, mgr->key, ENCRYPTED_IV_LEN);
+
ret = request_firmware(&fw, image_name, dev);
if (ret) {
mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ_ERR;
@@ -406,6 +412,91 @@ static ssize_t state_show(struct device *dev,
return sprintf(buf, "%s\n", state_str[mgr->state]);
}
+static ssize_t firmware_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ unsigned int len;
+ char image_name[NAME_MAX];
+ int ret;
+
+ /* struct with information about the FPGA image to program. */
+ struct fpga_image_info info = {0};
+
+ /* lose terminating \n */
+ strcpy(image_name, buf);
+ len = strlen(image_name);
+ if (image_name[len - 1] == '\n')
+ image_name[len - 1] = 0;
+
+ ret = fpga_mgr_firmware_load(mgr, &info, image_name);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t key_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return snprintf(buf, ENCRYPTED_KEY_LEN + 1, "%s\n", mgr->key);
+}
+
+static ssize_t key_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ memcpy(mgr->key, buf, count);
+
+ return count;
+}
+
+static ssize_t iv_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return snprintf(buf, ENCRYPTED_IV_LEN + 1, "%s\r\n", mgr->iv);
+}
+
+static ssize_t iv_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ memcpy(mgr->iv, buf, count);
+
+ return count;
+}
+
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return sprintf(buf, "%lx\n", mgr->flags);
+}
+
+static ssize_t flags_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ int ret;
+
+ ret = kstrtol(buf, 16, &mgr->flags);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -428,17 +519,40 @@ static ssize_t status_show(struct device *dev,
len += sprintf(buf + len, "reconfig IP protocol error\n");
if (status & FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR)
len += sprintf(buf + len, "reconfig fifo overflow error\n");
+ if (status & FPGA_MGR_STATUS_SECURITY_ERR)
+ len += sprintf(buf + len, "reconfig security error\n");
+ if (status & FPGA_MGR_STATUS_DEVICE_INIT_ERR)
+ len += sprintf(buf + len,
+ "initialization has not finished\n");
+ if (status & FPGA_MGR_STATUS_SIGNAL_ERR)
+ len += sprintf(buf + len, "device internal signal error\n");
+ if (status & FPGA_MGR_STATUS_HIGH_Z_STATE_ERR)
+ len += sprintf(buf + len,
+ "all I/Os are placed in High-Z state\n");
+ if (status & FPGA_MGR_STATUS_EOS_ERR)
+ len += sprintf(buf + len,
+ "start-up sequence has not finished\n");
+ if (status & FPGA_MGR_STATUS_FIRMWARE_REQ_ERR)
+ len += sprintf(buf + len, "firmware request error\n");
return len;
}
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_WO(firmware);
+static DEVICE_ATTR_RW(flags);
+static DEVICE_ATTR_RW(key);
+static DEVICE_ATTR_RW(iv);
static DEVICE_ATTR_RO(status);
static struct attribute *fpga_mgr_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_key.attr,
+ &dev_attr_iv.attr,
&dev_attr_status.attr,
NULL,
};
@@ -512,6 +626,116 @@ void fpga_mgr_put(struct fpga_manager *mgr)
}
EXPORT_SYMBOL_GPL(fpga_mgr_put);
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+#include <linux/debugfs.h>
+
+static int fpga_mgr_read(struct seq_file *s, void *data)
+{
+ struct fpga_manager *mgr = (struct fpga_manager *)s->private;
+ int ret = 0;
+
+ if (!mgr->mops->read)
+ return -ENOENT;
+
+ if (!mutex_trylock(&mgr->ref_mutex))
+ return -EBUSY;
+
+ if (mgr->state != FPGA_MGR_STATE_OPERATING) {
+ ret = -EPERM;
+ goto err_unlock;
+ }
+
+ /* Read the FPGA configuration data from the fabric */
+ ret = mgr->mops->read(mgr, s);
+ if (ret)
+ dev_err(&mgr->dev, "Error while reading configuration data from FPGA\n");
+
+err_unlock:
+ mutex_unlock(&mgr->ref_mutex);
+
+ return ret;
+}
+
+static int fpga_mgr_read_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fpga_mgr_read, inode->i_private);
+}
+
+static const struct file_operations fpga_mgr_ops_image = {
+ .owner = THIS_MODULE,
+ .open = fpga_mgr_read_open,
+ .read = seq_read,
+};
+
+/**
+ * fpga_mgr_debugfs_buf_load() - debugfs write function
+ * @file: User file
+ * @ptr: Fpga Image Address pointer
+ * @len: Length of the image
+ * @off: Offset within the file
+ *
+ * Return: Number of bytes if request succeeds,
+ * the corresponding error code otherwise
+ */
+static ssize_t fpga_mgr_debugfs_buf_load(struct file *file,
+ const char __user *ptr, size_t len,
+ loff_t *off)
+{
+ struct fpga_manager *mgr = file->private_data;
+ struct device *dev = &mgr->dev;
+ char *buf;
+ int ret = 0;
+
+ /* struct with information about the FPGA image to program. */
+ struct fpga_image_info info = {0};
+
+ /* flags indicates whether to do full or partial reconfiguration */
+ info.flags = mgr->flags;
+
+ ret = fpga_mgr_lock(mgr);
+ if (ret) {
+ dev_err(dev, "FPGA manager is busy\n");
+ return -EBUSY;
+ }
+
+ buf = vmalloc(len);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto mgr_unlock;
+ }
+
+ if (copy_from_user(buf, ptr, len)) {
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ info.buf = buf;
+ info.count = len;
+
+ ret = fpga_mgr_load(mgr, &info);
+ if (ret) {
+ dev_err(dev, "fpga_mgr_load returned with value %d\n\r", ret);
+ goto free_buf;
+ }
+
+free_buf:
+ vfree(buf);
+mgr_unlock:
+ fpga_mgr_unlock(mgr);
+
+ if (ret)
+ return ret;
+ else
+ return len;
+}
+
+static const struct file_operations fpga_mgr_ops_load = {
+ .open = simple_open,
+ .write = fpga_mgr_debugfs_buf_load,
+ .llseek = default_llseek,
+};
+#endif
+
/**
* fpga_mgr_lock - Lock FPGA manager for exclusive use
* @mgr: fpga manager
@@ -680,6 +904,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
int fpga_mgr_register(struct fpga_manager *mgr)
{
int ret;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ struct dentry *d, *parent;
+#endif
/*
* Initialize framework state by requesting low level driver read state
@@ -692,6 +919,33 @@ int fpga_mgr_register(struct fpga_manager *mgr)
if (ret)
goto error_device;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ mgr->dir = debugfs_create_dir("fpga", NULL);
+ if (!mgr->dir)
+ goto error_device;
+
+ parent = mgr->dir;
+ d = debugfs_create_dir(mgr->dev.kobj.name, parent);
+ if (!d) {
+ debugfs_remove_recursive(parent);
+ goto error_device;
+ }
+
+ parent = d;
+ d = debugfs_create_file("image", 0644, parent, mgr,
+ &fpga_mgr_ops_image);
+ if (!d) {
+ debugfs_remove_recursive(mgr->dir);
+ goto error_device;
+ }
+
+ d = debugfs_create_file("load", 0644, parent, mgr,
+ &fpga_mgr_ops_load);
+ if (!d) {
+ debugfs_remove_recursive(mgr->dir);
+ goto error_device;
+ }
+#endif
dev_info(&mgr->dev, "%s registered\n", mgr->name);
return 0;
@@ -713,6 +967,10 @@ void fpga_mgr_unregister(struct fpga_manager *mgr)
{
dev_info(&mgr->dev, "%s %s\n", __func__, mgr->name);
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ debugfs_remove_recursive(mgr->dir);
+#endif
+
/*
* If the low level driver provides a method for putting fpga into
* a desired state upon unregister, do it.
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index bde5a9d460c5..9c8593e5a4fe 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/reset.h>
static DEFINE_IDA(fpga_region_ida);
static struct class *fpga_region_class;
@@ -98,6 +99,7 @@ int fpga_region_program_fpga(struct fpga_region *region)
struct device *dev = &region->dev;
struct fpga_image_info *info = region->info;
int ret;
+ struct reset_control *rstc;
region = fpga_region_get(region);
if (IS_ERR(region)) {
@@ -141,7 +143,15 @@ int fpga_region_program_fpga(struct fpga_region *region)
goto err_put_br;
}
+ rstc = of_reset_control_array_get(info->overlay, false, true, true);
+ if (IS_ERR(rstc))
+ goto err_put_br;
+
+ reset_control_reset(rstc);
+ reset_control_put(rstc);
+
fpga_mgr_unlock(region->mgr);
+
fpga_region_put(region);
return 0;
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 559839a960b6..04452520fd81 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -218,9 +218,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
/* Allocate buffers from the service layer's pool. */
for (i = 0; i < NUM_SVC_BUFS; i++) {
kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
- if (!kbuf) {
+ if (IS_ERR(kbuf)) {
s10_free_buffers(mgr);
- ret = -ENOMEM;
+ ret = PTR_ERR(kbuf);
goto init_done;
}
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
new file mode 100644
index 000000000000..5538838c0bc8
--- /dev/null
+++ b/drivers/fpga/versal-fpga.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+/* Constant Definitions */
+#define PDI_SOURCE_TYPE 0xF
+
+/**
+ * struct versal_fpga_priv - Private data structure
+ * @dev: Device data structure
+ * @flags: flags which is used to identify the PL Image type
+ */
+struct versal_fpga_priv {
+ struct device *dev;
+ u32 flags;
+};
+
+static int versal_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t size)
+{
+ struct versal_fpga_priv *priv;
+
+ priv = mgr->priv;
+ priv->flags = info->flags;
+
+ return 0;
+}
+
+static int versal_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t size)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct versal_fpga_priv *priv;
+ dma_addr_t dma_addr;
+ char *kbuf;
+ int ret;
+
+ if (IS_ERR(eemi_ops) || !eemi_ops->pdi_load)
+ return -ENXIO;
+
+ priv = mgr->priv;
+
+ kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, buf, size);
+
+ wmb(); /* ensure all writes are done before initiate FW call */
+
+ ret = eemi_ops->pdi_load(PDI_SOURCE_TYPE, dma_addr);
+
+ dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int versal_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ return 0;
+}
+
+static enum fpga_mgr_states versal_fpga_ops_state(struct fpga_manager *mgr)
+{
+ return FPGA_MGR_STATE_OPERATING;
+}
+
+static const struct fpga_manager_ops versal_fpga_ops = {
+ .state = versal_fpga_ops_state,
+ .write_init = versal_fpga_ops_write_init,
+ .write = versal_fpga_ops_write,
+ .write_complete = versal_fpga_ops_write_complete,
+};
+
+static int versal_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct versal_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int err, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret < 0) {
+ dev_err(dev, "no usable DMA configuration");
+ return ret;
+ }
+
+ mgr = fpga_mgr_create(dev, "Xilinx Versal FPGA Manager",
+ &versal_fpga_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mgr);
+
+ err = fpga_mgr_register(mgr);
+ if (err) {
+ dev_err(dev, "unable to register FPGA manager");
+ fpga_mgr_free(mgr);
+ return err;
+ }
+
+ return 0;
+}
+
+static int versal_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+
+ fpga_mgr_unregister(mgr);
+ fpga_mgr_free(mgr);
+
+ return 0;
+}
+
+static const struct of_device_id versal_fpga_of_match[] = {
+ { .compatible = "xlnx,versal-fpga", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, versal_fpga_of_match);
+
+static struct platform_driver versal_fpga_driver = {
+ .probe = versal_fpga_probe,
+ .remove = versal_fpga_remove,
+ .driver = {
+ .name = "versal_fpga_manager",
+ .of_match_table = of_match_ptr(versal_fpga_of_match),
+ },
+};
+
+module_platform_driver(versal_fpga_driver);
+
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_AUTHOR("Appana Durga Kedareswara rao <appanad.durga.rao@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Versal FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/xilinx-afi.c b/drivers/fpga/xilinx-afi.c
new file mode 100644
index 000000000000..ae3caf9849df
--- /dev/null
+++ b/drivers/fpga/xilinx-afi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA AFI bridge.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/**
+ * struct afi_fpga - AFI register description
+ * @value: value to be written to the register
+ * @regid: Register id for the register to be written
+ */
+struct afi_fpga {
+ u32 value;
+ u32 regid;
+};
+
+static int afi_fpga_probe(struct platform_device *pdev)
+{
+ struct afi_fpga *afi_fpga;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ int i, entries, pairs;
+ u32 reg, val;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ afi_fpga = devm_kzalloc(&pdev->dev, sizeof(*afi_fpga), GFP_KERNEL);
+ if (!afi_fpga)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, afi_fpga);
+
+ entries = of_property_count_u32_elems(np, "config-afi");
+ if (!entries || (entries % 2)) {
+ dev_err(&pdev->dev, "Invalid number of registers\n");
+ return -EINVAL;
+ }
+ pairs = entries / 2;
+
+ for (i = 0; i < pairs; i++) {
+ ret = of_property_read_u32_index(np, "config-afi", i * 2,
+ &reg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read register\n");
+ return -EINVAL;
+ }
+ ret = of_property_read_u32_index(np, "config-afi", i * 2 + 1,
+ &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read value\n");
+ return -EINVAL;
+ }
+ ret = eemi_ops->ioctl(0, IOCTL_AFI, reg, val, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "AFI register write error %d\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id afi_fpga_ids[] = {
+ { .compatible = "xlnx,afi-fpga" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, afi_fpga_ids);
+
+static struct platform_driver afi_fpga_driver = {
+ .driver = {
+ .name = "afi-fpga",
+ .of_match_table = afi_fpga_ids,
+ },
+ .probe = afi_fpga_probe,
+};
+module_platform_driver(afi_fpga_driver);
+
+MODULE_DESCRIPTION("FPGA afi module");
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynq-afi.c b/drivers/fpga/zynq-afi.c
new file mode 100644
index 000000000000..7ce0d089e878
--- /dev/null
+++ b/drivers/fpga/zynq-afi.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA AFI driver.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Registers and special values for doing register-based operations */
+#define AFI_RDCHAN_CTRL_OFFSET 0x00
+#define AFI_WRCHAN_CTRL_OFFSET 0x14
+
+#define AFI_BUSWIDTH_MASK 0x01
+
+/**
+ * struct afi_fpga - AFI register description
+ * @membase: pointer to register struct
+ * @afi_width: AFI bus width to be written
+ */
+struct zynq_afi_fpga {
+ void __iomem *membase;
+ u32 afi_width;
+};
+
+static int zynq_afi_fpga_probe(struct platform_device *pdev)
+{
+ struct zynq_afi_fpga *afi_fpga;
+ struct resource *res;
+ u32 reg_val;
+ u32 val;
+
+ afi_fpga = devm_kzalloc(&pdev->dev, sizeof(*afi_fpga), GFP_KERNEL);
+ if (!afi_fpga)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ afi_fpga->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(afi_fpga->membase))
+ return PTR_ERR(afi_fpga->membase);
+
+ val = device_property_read_u32(&pdev->dev, "xlnx,afi-width",
+ &afi_fpga->afi_width);
+ if (val) {
+ dev_err(&pdev->dev, "Fail to get the afi bus width\n");
+ return -EINVAL;
+ }
+
+ reg_val = readl(afi_fpga->membase + AFI_RDCHAN_CTRL_OFFSET);
+ reg_val &= ~AFI_BUSWIDTH_MASK;
+ writel(reg_val | afi_fpga->afi_width,
+ afi_fpga->membase + AFI_RDCHAN_CTRL_OFFSET);
+ reg_val = readl(afi_fpga->membase + AFI_WRCHAN_CTRL_OFFSET);
+ reg_val &= ~AFI_BUSWIDTH_MASK;
+ writel(reg_val | afi_fpga->afi_width,
+ afi_fpga->membase + AFI_WRCHAN_CTRL_OFFSET);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_afi_fpga_ids[] = {
+ { .compatible = "xlnx,zynq-afi-fpga" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, zynq_afi_fpga_ids);
+
+static struct platform_driver zynq_afi_fpga_driver = {
+ .driver = {
+ .name = "zynq-afi-fpga",
+ .of_match_table = zynq_afi_fpga_ids,
+ },
+ .probe = zynq_afi_fpga_probe,
+};
+module_platform_driver(zynq_afi_fpga_driver);
+
+MODULE_DESCRIPTION("ZYNQ FPGA AFI module");
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index b8a88d21d038..676b5d33b699 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2019 Xilinx, Inc.
+ * Copyright (C) 2016 - 2019 Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/fpga/fpga-mgr.h>
#include <linux/io.h>
@@ -10,19 +11,86 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/string.h>
+#include <linux/seq_file.h>
#include <linux/firmware/xlnx-zynqmp.h>
/* Constant Definitions */
#define IXR_FPGA_DONE_MASK BIT(3)
+#define IXR_FPGA_ENCRYPTION_EN 0x00000008U
+
+#define READ_DMA_SIZE 0x200
+#define DUMMY_FRAMES_SIZE 0x64
+#define PCAP_READ_CLKFREQ 25000000
+
+/* Error Register */
+#define IXR_FPGA_ERR_CRC_ERR BIT(0)
+#define IXR_FPGA_ERR_SECURITY_ERR BIT(16)
+
+/* Signal Status Register */
+#define IXR_FPGA_END_OF_STARTUP BIT(4)
+#define IXR_FPGA_GST_CFG_B BIT(5)
+#define IXR_FPGA_INIT_B_INTERNAL BIT(11)
+#define IXR_FPGA_DONE_INTERNAL_SIGNAL BIT(13)
+
+#define IXR_FPGA_CONFIG_STAT_OFFSET 7U
+#define IXR_FPGA_READ_CONFIG_TYPE 0U
+
+static bool readback_type;
+module_param(readback_type, bool, 0644);
+MODULE_PARM_DESC(readback_type,
+ "readback_type 0-configuration register read "
+ "1- configuration data read (default: 0)");
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+/**
+ * struct zynqmp_configreg - Configuration register offsets
+ * @reg: Name of the configuration register.
+ * @offset: Register offset.
+ */
+struct zynqmp_configreg {
+ char *reg;
+ u32 offset;
+};
+
+static struct zynqmp_configreg cfgreg[] = {
+ {.reg = "CRC", .offset = 0},
+ {.reg = "FAR", .offset = 1},
+ {.reg = "FDRI", .offset = 2},
+ {.reg = "FDRO", .offset = 3},
+ {.reg = "CMD", .offset = 4},
+ {.reg = "CTRL0", .offset = 5},
+ {.reg = "MASK", .offset = 6},
+ {.reg = "STAT", .offset = 7},
+ {.reg = "LOUT", .offset = 8},
+ {.reg = "COR0", .offset = 9},
+ {.reg = "MFWR", .offset = 10},
+ {.reg = "CBC", .offset = 11},
+ {.reg = "IDCODE", .offset = 12},
+ {.reg = "AXSS", .offset = 13},
+ {.reg = "COR1", .offset = 14},
+ {.reg = "WBSTR", .offset = 16},
+ {.reg = "TIMER", .offset = 17},
+ {.reg = "BOOTSTS", .offset = 22},
+ {.reg = "CTRL1", .offset = 24},
+ {}
+};
/**
* struct zynqmp_fpga_priv - Private data structure
* @dev: Device data structure
+ * @lock: Mutex lock for device
+ * @clk: Clock resource for pcap controller
* @flags: flags which is used to identify the bitfile type
+ * @size: Size of the Bitstream used for readback
*/
struct zynqmp_fpga_priv {
struct device *dev;
+ struct mutex lock;
+ struct clk *clk;
+ char *key;
u32 flags;
+ u32 size;
};
static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
@@ -33,40 +101,62 @@ static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
priv = mgr->priv;
priv->flags = info->flags;
+ priv->key = info->key;
return 0;
}
static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
- const char *buf, size_t size)
+ const char *buf, size_t size)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_fpga_priv *priv;
- dma_addr_t dma_addr;
- u32 eemi_flags = 0;
char *kbuf;
+ size_t dma_size;
+ dma_addr_t dma_addr;
int ret;
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
+ if (!eemi_ops->fpga_load)
return -ENXIO;
priv = mgr->priv;
+ priv->size = size;
- kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
+ if (!mutex_trylock(&priv->lock))
+ return -EBUSY;
- memcpy(kbuf, buf, size);
+ ret = clk_enable(priv->clk);
+ if (ret)
+ goto err_unlock;
- wmb(); /* ensure all writes are done before initiate FW call */
+ if (priv->flags & IXR_FPGA_ENCRYPTION_EN)
+ dma_size = size + ENCRYPTED_KEY_LEN;
+ else
+ dma_size = size;
- if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
- eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
+ kbuf = dma_alloc_coherent(priv->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
- ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+ memcpy(kbuf, buf, size);
- dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+ if (priv->flags & IXR_FPGA_ENCRYPTION_EN)
+ memcpy(kbuf + size, priv->key, ENCRYPTED_KEY_LEN);
+ wmb(); /* ensure all writes are done before initiate FW call */
+
+ if (priv->flags & IXR_FPGA_ENCRYPTION_EN)
+ ret = eemi_ops->fpga_load(dma_addr, dma_addr + size,
+ priv->flags);
+ else
+ ret = eemi_ops->fpga_load(dma_addr, size, priv->flags);
+
+ dma_free_coherent(priv->dev, dma_size, kbuf, dma_addr);
+disable_clk:
+ clk_disable(priv->clk);
+err_unlock:
+ mutex_unlock(&priv->lock);
return ret;
}
@@ -78,10 +168,9 @@ static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 status;
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
+ if (!eemi_ops->fpga_get_status)
return FPGA_MGR_STATE_UNKNOWN;
eemi_ops->fpga_get_status(&status);
@@ -91,25 +180,195 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
return FPGA_MGR_STATE_UNKNOWN;
}
+static u64 zynqmp_fpga_ops_status(struct fpga_manager *mgr)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ unsigned int *buf, reg_val;
+ dma_addr_t dma_addr;
+ u64 status = 0;
+ int ret;
+
+ if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_read)
+ return FPGA_MGR_STATUS_FIRMWARE_REQ_ERR;
+
+ buf = dma_alloc_coherent(mgr->dev.parent, READ_DMA_SIZE,
+ &dma_addr, GFP_KERNEL);
+ if (!buf)
+ return FPGA_MGR_STATUS_FIRMWARE_REQ_ERR;
+
+ ret = eemi_ops->fpga_read(IXR_FPGA_CONFIG_STAT_OFFSET, dma_addr,
+ IXR_FPGA_READ_CONFIG_TYPE, &reg_val);
+ if (ret) {
+ status = FPGA_MGR_STATUS_FIRMWARE_REQ_ERR;
+ goto free_dmabuf;
+ }
+
+ if (reg_val & IXR_FPGA_ERR_CRC_ERR)
+ status |= FPGA_MGR_STATUS_CRC_ERR;
+ if (reg_val & IXR_FPGA_ERR_SECURITY_ERR)
+ status |= FPGA_MGR_STATUS_SECURITY_ERR;
+ if (!(reg_val & IXR_FPGA_INIT_B_INTERNAL))
+ status |= FPGA_MGR_STATUS_DEVICE_INIT_ERR;
+ if (!(reg_val & IXR_FPGA_DONE_INTERNAL_SIGNAL))
+ status |= FPGA_MGR_STATUS_SIGNAL_ERR;
+ if (!(reg_val & IXR_FPGA_GST_CFG_B))
+ status |= FPGA_MGR_STATUS_HIGH_Z_STATE_ERR;
+ if (!(reg_val & IXR_FPGA_END_OF_STARTUP))
+ status |= FPGA_MGR_STATUS_EOS_ERR;
+
+free_dmabuf:
+ dma_free_coherent(mgr->dev.parent, READ_DMA_SIZE, buf, dma_addr);
+
+ return status;
+}
+
+static int zynqmp_fpga_read_cfgreg(struct fpga_manager *mgr,
+ struct seq_file *s)
+{
+ struct zynqmp_fpga_priv *priv = mgr->priv;
+ int ret, val;
+ unsigned int *buf;
+ dma_addr_t dma_addr;
+ struct zynqmp_configreg *p = cfgreg;
+
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ buf = dma_alloc_coherent(mgr->dev.parent, READ_DMA_SIZE,
+ &dma_addr, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
+
+ seq_puts(s, "zynqMP FPGA Configuration register contents are\n");
+
+ while (p->reg) {
+ ret = eemi_ops->fpga_read(p->offset, dma_addr, readback_type,
+ &val);
+ if (ret)
+ goto free_dmabuf;
+ seq_printf(s, "%s --> \t %x \t\r\n", p->reg, val);
+ p++;
+ }
+
+free_dmabuf:
+ dma_free_coherent(mgr->dev.parent, READ_DMA_SIZE, buf,
+ dma_addr);
+disable_clk:
+ clk_disable(priv->clk);
+
+ return ret;
+}
+
+static int zynqmp_fpga_read_cfgdata(struct fpga_manager *mgr,
+ struct seq_file *s)
+{
+ struct zynqmp_fpga_priv *priv;
+ int ret, data_offset;
+ unsigned int *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+ int clk_rate;
+
+ priv = mgr->priv;
+ size = priv->size + READ_DMA_SIZE + DUMMY_FRAMES_SIZE;
+
+ /*
+ * There is no h/w flow control for pcap read
+ * to prevent the FIFO from over flowing, reduce
+ * the PCAP operating frequency.
+ */
+ clk_rate = clk_get_rate(priv->clk);
+ clk_unprepare(priv->clk);
+ ret = clk_set_rate(priv->clk, PCAP_READ_CLKFREQ);
+ if (ret) {
+ dev_err(&mgr->dev, "Unable to reduce the PCAP freq %d\n", ret);
+ goto prepare_clk;
+ }
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&mgr->dev, "Cannot enable clock.\n");
+ goto restore_pcap_clk;
+ }
+
+ buf = dma_alloc_coherent(mgr->dev.parent, size, &dma_addr,
+ GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
+
+ seq_puts(s, "zynqMP FPGA Configuration data contents are\n");
+ ret = eemi_ops->fpga_read((priv->size + DUMMY_FRAMES_SIZE) / 4,
+ dma_addr, readback_type, &data_offset);
+ if (ret)
+ goto free_dmabuf;
+
+ seq_write(s, &buf[data_offset], priv->size);
+
+free_dmabuf:
+ dma_free_coherent(mgr->dev.parent, size, buf, dma_addr);
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+restore_pcap_clk:
+ clk_set_rate(priv->clk, clk_rate);
+prepare_clk:
+ clk_prepare(priv->clk);
+
+ return ret;
+}
+
+static int zynqmp_fpga_ops_read(struct fpga_manager *mgr, struct seq_file *s)
+{
+ struct zynqmp_fpga_priv *priv = mgr->priv;
+ int ret;
+
+ if (!eemi_ops->fpga_read)
+ return -ENXIO;
+
+ if (!mutex_trylock(&priv->lock))
+ return -EBUSY;
+
+ if (readback_type)
+ ret = zynqmp_fpga_read_cfgdata(mgr, s);
+ else
+ ret = zynqmp_fpga_read_cfgreg(mgr, s);
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
static const struct fpga_manager_ops zynqmp_fpga_ops = {
.state = zynqmp_fpga_ops_state,
+ .status = zynqmp_fpga_ops_status,
.write_init = zynqmp_fpga_ops_write_init,
.write = zynqmp_fpga_ops_write,
.write_complete = zynqmp_fpga_ops_write_complete,
+ .read = zynqmp_fpga_ops_read,
};
static int zynqmp_fpga_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zynqmp_fpga_priv *priv;
- struct fpga_manager *mgr;
int ret;
+ struct fpga_manager *mgr;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
+ mutex_init(&priv->lock);
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret < 0)
+ dev_err(dev, "no usable DMA configuration");
mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager",
&zynqmp_fpga_ops, priv);
@@ -118,9 +377,23 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mgr);
+ priv->clk = devm_clk_get(dev, "ref_clk");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(dev, "failed to to get pcp ref_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare(priv->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+
ret = fpga_mgr_register(mgr);
if (ret) {
dev_err(dev, "unable to register FPGA manager");
+ clk_unprepare(priv->clk);
return ret;
}
@@ -129,9 +402,14 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
static int zynqmp_fpga_remove(struct platform_device *pdev)
{
- struct fpga_manager *mgr = platform_get_drvdata(pdev);
+ struct zynqmp_fpga_priv *priv;
+ struct fpga_manager *mgr;
+
+ mgr = platform_get_drvdata(pdev);
+ priv = mgr->priv;
fpga_mgr_unregister(mgr);
+ clk_unprepare(priv->clk);
return 0;
}
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index cb980a60af0e..09e571578232 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -1271,6 +1271,9 @@ int fsi_master_register(struct fsi_master *master)
mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
+ if (master->idx < 0)
+ return master->idx;
+
dev_set_name(&master->dev, "fsi%d", master->idx);
rc = device_register(&master->dev);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 04d10ea8d343..a7fc04bf6550 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -1438,3 +1438,4 @@ static struct platform_driver fsi_master_acf = {
module_platform_driver(fsi_master_acf);
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FW_FILE_NAME);
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index c8ccc99e214f..84a60d2d8e8a 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -640,7 +640,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
}
ffdc_iov.iov_base = ffdc;
ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
- iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
+ iov_iter_kvec(&ffdc_iter, READ, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
cmd[0] = cpu_to_be32(2);
cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
@@ -737,7 +737,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
rbytes = (*resp_len) * sizeof(__be32);
resp_iov.iov_base = response;
resp_iov.iov_len = rbytes;
- iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes);
+ iov_iter_kvec(&resp_iter, READ, &resp_iov, 1, rbytes);
/* Perform the command */
mutex_lock(&sbefifo->lock);
@@ -817,7 +817,7 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
/* Prepare iov iterator */
resp_iov.iov_base = buf;
resp_iov.iov_len = len;
- iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
+ iov_iter_init(&resp_iter, READ, &resp_iov, 1, len);
/* Perform the command */
mutex_lock(&sbefifo->lock);
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index e81307f9754e..30aa7f82fc5b 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -128,8 +128,6 @@ static int gen_74x164_probe(struct spi_device *spi)
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
-
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@@ -154,6 +152,8 @@ static int gen_74x164_probe(struct spi_device *spi)
goto exit_destroy;
}
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+
ret = gpiochip_add_data(&chip->gpio_chip, chip);
if (!ret)
return 0;
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index fdcebe59510d..68d95051dd0e 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -231,7 +231,10 @@ found:
ioport_unmap(gp.pm);
goto out;
}
+ return 0;
+
out:
+ pci_dev_put(pdev);
return err;
}
@@ -239,6 +242,7 @@ static void __exit amd_gpio_exit(void)
{
gpiochip_remove(&gp.chip);
ioport_unmap(gp.pm);
+ pci_dev_put(gp.pdev);
}
module_init(amd_gpio_init);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 22e0d6fcab1c..b7e93d10a6ab 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -967,7 +967,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
else if (param == PIN_CONFIG_BIAS_DISABLE ||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
param == PIN_CONFIG_DRIVE_STRENGTH)
- return pinctrl_gpio_set_config(offset, config);
+ return pinctrl_gpio_set_config(chip->base + offset, config);
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index e0b025689625..576cb2d0708f 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -333,7 +333,7 @@ static struct irq_chip gpio_irqchip = {
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};
static void gpio_irq_handler(struct irq_desc *desc)
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index a69b3faf51ef..e50e27304c38 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -318,20 +318,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
state = sprd_eic_get(chip, offset);
- if (state)
+ if (state) {
sprd_eic_update(chip, offset,
SPRD_EIC_DBNC_IEV, 0);
- else
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IC, 1);
+ } else {
sprd_eic_update(chip, offset,
SPRD_EIC_DBNC_IEV, 1);
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IC, 1);
+ }
break;
default:
return -ENOTSUPP;
@@ -343,20 +350,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
state = sprd_eic_get(chip, offset);
- if (state)
+ if (state) {
sprd_eic_update(chip, offset,
SPRD_EIC_LATCH_INTPOL, 0);
- else
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTCLR, 1);
+ } else {
sprd_eic_update(chip, offset,
SPRD_EIC_LATCH_INTPOL, 1);
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTCLR, 1);
+ }
break;
default:
return -ENOTSUPP;
@@ -370,29 +384,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
default:
@@ -405,29 +424,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
default:
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 9c1c4d81aa7b..3e983d98837b 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -339,7 +339,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
priv->offset = i;
priv->desc = &gc->gpiodev->descs[i];
- debugfs_create_file(name, 0200, chip->dbg_dir, priv,
+ debugfs_create_file(name, 0600, chip->dbg_dir, priv,
&gpio_mockup_debugfs_ops);
}
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index d72a3a5507b0..f3bf82efea8e 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -190,6 +190,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index c77d474185f3..2e4b6b176875 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -229,7 +229,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
writel(1 << gpio_idx, port->base + GPIO_ISR);
- return 0;
+ return port->gc.direction_input(&port->gc, gpio_idx);
}
static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 317f54f19477..c81d73d5e015 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1198,7 +1198,9 @@ static int pca953x_suspend(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, true);
+ mutex_unlock(&chip->i2c_lock);
if (atomic_read(&chip->wakeup_path))
device_set_wakeup_path(dev);
@@ -1221,13 +1223,17 @@ static int pca953x_resume(struct device *dev)
}
}
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&chip->i2c_lock);
return ret;
+ }
ret = regcache_sync(chip->regmap);
+ mutex_unlock(&chip->i2c_lock);
if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index 05000cace9b2..abe01518bf19 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -338,6 +338,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
+ pmic_eic->chip.can_sleep = true;
pmic_eic->intc.name = dev_name(&pdev->dev);
pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 432c487f77b4..5c770b7891f7 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -243,6 +243,7 @@ static bool pxa_gpio_has_pinctrl(void)
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
+ case MMP_GPIO:
return false;
default:
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 5e375186f90e..2a5e6263570f 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -195,7 +195,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
- return ret;
+ goto err_remove_domain;
gc = tb10x_gpio->domain->gc->gc[0];
gc->reg_base = tb10x_gpio->base;
@@ -209,6 +209,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
}
return 0;
+
+err_remove_domain:
+ irq_domain_remove(tb10x_gpio->domain);
+ return ret;
}
static int tb10x_gpio_remove(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index de14949a3fe5..92c1f2baa4bf 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -43,9 +43,10 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
unsigned offset, bool enabled)
{
struct timbgpio *tgpio = gpiochip_get_data(gpio);
+ unsigned long flags;
u32 reg;
- spin_lock(&tgpio->lock);
+ spin_lock_irqsave(&tgpio->lock, flags);
reg = ioread32(tgpio->membase + offset);
if (enabled)
@@ -54,7 +55,7 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
reg &= ~(1 << index);
iowrite32(reg, tgpio->membase + offset);
- spin_unlock(&tgpio->lock);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
return 0;
}
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index aff6e504c666..9704cff9b4aa 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ /* Set the initial value */
+ tps68470_gpio_set(gc, offset, value);
+
/* rest are always outputs */
if (offset >= TPS68470_N_REGULAR_GPIO)
return 0;
- /* Set the initial value */
- tps68470_gpio_set(gc, offset, value);
-
return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
TPS68470_GPIO_MODE_MASK,
TPS68470_GPIO_MODE_OUT_CMOS);
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 1ae612c796ee..c2c38f13801f 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -127,14 +127,14 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
unsigned long mask = BIT(gpio);
u32 val;
+ vf610_gpio_set(chip, gpio, value);
+
if (port->sdata && port->sdata->have_paddr) {
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
- vf610_gpio_set(chip, gpio, value);
-
return pinctrl_gpio_direction_output(chip->base + gpio);
}
@@ -304,7 +304,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc = &port->gc;
gc->of_node = np;
gc->parent = dev;
- gc->label = "vf610-gpio";
+ gc->label = dev_name(dev);
gc->ngpio = VF610_GPIO_PER_PORT;
gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 67f9f82e0db0..3e512c8e0382 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -10,19 +10,31 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
/* Register Offset Definitions */
-#define XGPIO_DATA_OFFSET (0x0) /* Data register */
-#define XGPIO_TRI_OFFSET (0x4) /* I/O direction register */
+#define XGPIO_DATA_OFFSET 0x0 /* Data register */
+#define XGPIO_TRI_OFFSET 0x4 /* I/O direction register */
+#define XGPIO_GIER_OFFSET 0x11c /* Global Interrupt Enable */
+#define XGPIO_GIER_IE BIT(31)
+
+#define XGPIO_IPISR_OFFSET 0x120 /* IP Interrupt Status */
+#define XGPIO_IPIER_OFFSET 0x128 /* IP Interrupt Enable */
#define XGPIO_CHANNEL_OFFSET 0x8
/* Read/Write access to the GPIO registers */
-#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_X86)
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARM64)
# define xgpio_readreg(offset) readl(offset)
# define xgpio_writereg(offset, val) writel(val, offset)
#else
@@ -34,44 +46,30 @@
* struct xgpio_instance - Stores information about GPIO device
* @gc: GPIO chip
* @regs: register block
- * @gpio_width: GPIO width for every channel
* @gpio_state: GPIO state shadow register
* @gpio_dir: GPIO direction shadow register
+ * @offset: GPIO channel offset
+ * @irq_base: GPIO channel irq base address
+ * @irq_enable: GPIO irq enable/disable bitfield
+ * @no_init: No intitialisation at probe
* @gpio_lock: Lock used for synchronization
+ * @irq_domain: irq_domain of the controller
+ * @clk: clock resource for this driver
*/
struct xgpio_instance {
struct gpio_chip gc;
void __iomem *regs;
- unsigned int gpio_width[2];
- u32 gpio_state[2];
- u32 gpio_dir[2];
- spinlock_t gpio_lock[2];
+ u32 gpio_state;
+ u32 gpio_dir;
+ u32 offset;
+ int irq_base;
+ u32 irq_enable;
+ bool no_init;
+ spinlock_t gpio_lock;
+ struct irq_domain *irq_domain;
+ struct clk *clk;
};
-static inline int xgpio_index(struct xgpio_instance *chip, int gpio)
-{
- if (gpio >= chip->gpio_width[0])
- return 1;
-
- return 0;
-}
-
-static inline int xgpio_regoffset(struct xgpio_instance *chip, int gpio)
-{
- if (xgpio_index(chip, gpio))
- return XGPIO_CHANNEL_OFFSET;
-
- return 0;
-}
-
-static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
-{
- if (xgpio_index(chip, gpio))
- return gpio - chip->gpio_width[0];
-
- return gpio;
-}
-
/**
* xgpio_get - Read the specified signal of the GPIO device.
* @gc: Pointer to gpio_chip device structure.
@@ -86,12 +84,10 @@ static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct xgpio_instance *chip = gpiochip_get_data(gc);
- u32 val;
- val = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio));
+ void __iomem *regs = chip->regs + chip->offset;
- return !!(val & BIT(xgpio_offset(chip, gpio)));
+ return !!(xgpio_readreg(regs + XGPIO_DATA_OFFSET) & BIT(gpio));
}
/**
@@ -107,21 +103,20 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ void __iomem *regs = chip->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
if (val)
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(gpio);
else
- chip->gpio_state[index] &= ~BIT(offset);
+ chip->gpio_state &= ~BIT(gpio);
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -138,37 +133,27 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, 0);
- int offset, i;
+ void __iomem *regs = chip->regs;
+ int i;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signals */
for (i = 0; i < gc->ngpio; i++) {
if (*mask == 0)
break;
- /* Once finished with an index write it out to the register */
- if (index != xgpio_index(chip, i)) {
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET,
- chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
- index = xgpio_index(chip, i);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
- }
if (__test_and_clear_bit(i, mask)) {
- offset = xgpio_offset(chip, i);
if (test_bit(i, bits))
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(i);
else
- chip->gpio_state[index] &= ~BIT(offset);
+ chip->gpio_state &= ~BIT(i);
}
}
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -176,6 +161,8 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
*
+ * This function sets the direction of specified GPIO signal as input.
+ *
* Return:
* 0 - if direction of GPIO signals is set as input
* otherwise it returns negative error value.
@@ -184,17 +171,15 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ void __iomem *regs = chip->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
- chip->gpio_dir[index] |= BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
+ chip->gpio_dir |= BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_TRI_OFFSET, chip->gpio_dir);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -215,25 +200,23 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ void __iomem *regs = chip->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
if (val)
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(gpio);
else
- chip->gpio_state[index] &= ~BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
+ chip->gpio_state &= ~BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
/* Clear the GPIO bit in shadow register and set direction as output */
- chip->gpio_dir[index] &= ~BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
+ chip->gpio_dir &= ~BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_TRI_OFFSET, chip->gpio_dir);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -244,86 +227,383 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
*/
static void xgpio_save_regs(struct xgpio_instance *chip)
{
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET, chip->gpio_state[0]);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET, chip->gpio_dir[0]);
+ if (chip->no_init) {
+ chip->gpio_state = xgpio_readreg(chip->regs +
+ XGPIO_DATA_OFFSET);
+ chip->gpio_dir = xgpio_readreg(chip->regs + XGPIO_TRI_OFFSET);
+ } else {
+ xgpio_writereg(chip->regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
+ xgpio_writereg(chip->regs + chip->offset + XGPIO_TRI_OFFSET,
+ chip->gpio_dir);
+ }
+}
+
+/**
+ * xgpio_xlate - Translate gpio_spec to the GPIO number and flags
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpiospec: gpio specifier as found in the device tree
+ * @flags: A flags pointer based on binding
+ *
+ * Return:
+ * irq number otherwise -EINVAL
+ */
+static int xgpio_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
+{
+ struct xgpio_instance *chip = gpiochip_get_data(gc);
+ if (gc->of_gpio_n_cells == 3 && flags)
+ *flags = gpiospec->args[2];
+
+ if (gpiospec->args[1] == chip->offset)
+ return gpiospec->args[0];
+
+ return -EINVAL;
+}
+
+/**
+ * xgpio_irq_mask - Write the specified signal of the GPIO device.
+ * @irq_data: per irq and chip data passed down to chip functions
+ */
+static void xgpio_irq_mask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ u32 offset = irq_data->irq - chip->irq_base;
+ u32 temp;
+
+ pr_debug("%s: Disable %d irq, irq_enable_mask 0x%x\n",
+ __func__, offset, chip->irq_enable);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
- if (!chip->gpio_width[1])
- return;
+ chip->irq_enable &= ~BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
- chip->gpio_state[1]);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
- chip->gpio_dir[1]);
+ if (!chip->irq_enable) {
+ /* Enable per channel interrupt */
+ temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
+ temp &= chip->offset / XGPIO_CHANNEL_OFFSET + 1;
+ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
+
+ /* Disable global interrupt if channel interrupts are unused */
+ temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
+ if (!temp)
+ xgpio_writereg(chip->regs + XGPIO_GIER_OFFSET,
+ ~XGPIO_GIER_IE);
+
+ }
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
+ * xgpio_irq_unmask - Write the specified signal of the GPIO device.
+ * @irq_data: per irq and chip data passed down to chip functions
+ */
+static void xgpio_irq_unmask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ u32 offset = irq_data->irq - chip->irq_base;
+ u32 temp;
+
+ pr_debug("%s: Enable %d irq, irq_enable_mask 0x%x\n",
+ __func__, offset, chip->irq_enable);
+
+ /* Setup pin as input */
+ xgpio_dir_in(&chip->gc, offset);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable |= BIT(offset);
+
+ if (chip->irq_enable) {
+
+ /* Enable per channel interrupt */
+ temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
+ temp |= chip->offset / XGPIO_CHANNEL_OFFSET + 1;
+ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
+
+ /* Enable global interrupts */
+ xgpio_writereg(chip->regs + XGPIO_GIER_OFFSET, XGPIO_GIER_IE);
+ }
+
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_set_irq_type - Write the specified signal of the GPIO device.
+ * @irq_data: Per irq and chip data passed down to chip functions
+ * @type: Interrupt type that is to be set for the gpio pin
+ *
+ * Return:
+ * 0 if interrupt type is supported otherwise otherwise -EINVAL
+ */
+static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
+{
+ /* Only rising edge case is supported now */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ return 0;
+
+ return -EINVAL;
+}
+
+/* irq chip descriptor */
+static struct irq_chip xgpio_irqchip = {
+ .name = "xgpio",
+ .irq_mask = xgpio_irq_mask,
+ .irq_unmask = xgpio_irq_unmask,
+ .irq_set_type = xgpio_set_irq_type,
+};
+
+/**
+ * xgpio_to_irq - Find out gpio to Linux irq mapping
+ * @gc: Pointer to gpio_chip device structure.
+ * @offset: Gpio pin offset
+ *
+ * Return:
+ * irq number otherwise -EINVAL
+ */
+static int xgpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct xgpio_instance *chip = gpiochip_get_data(gc);
+
+ return irq_find_mapping(chip->irq_domain, offset);
+}
+
+/**
+ * xgpio_irqhandler - Gpio interrupt service routine
+ * @desc: Pointer to interrupt description
+ */
+static void xgpio_irqhandler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+
+ struct xgpio_instance *chip = (struct xgpio_instance *)
+ irq_get_handler_data(irq);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ int offset;
+ unsigned long val;
+
+ chained_irq_enter(irqchip, desc);
+
+ val = xgpio_readreg(chip->regs + chip->offset);
+ /* Only rising edge is supported */
+ val &= chip->irq_enable;
+
+ for_each_set_bit(offset, &val, chip->gc.ngpio) {
+ generic_handle_irq(chip->irq_base + offset);
+ }
+
+ xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET,
+ chip->offset / XGPIO_CHANNEL_OFFSET + 1);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
+
+/**
+ * xgpio_irq_setup - Allocate irq for gpio and setup appropriate functions
+ * @np: Device node of the GPIO chip
+ * @chip: Pointer to private gpio channel structure
+ *
+ * Return:
+ * 0 if success, otherwise -1
+ */
+static int xgpio_irq_setup(struct device_node *np, struct xgpio_instance *chip)
+{
+ u32 pin_num;
+ struct resource res;
+
+ int ret = of_irq_to_resource(np, 0, &res);
+
+ if (ret <= 0) {
+ pr_info("GPIO IRQ not connected\n");
+ return 0;
+ }
+
+ chip->gc.to_irq = xgpio_to_irq;
+
+ chip->irq_base = irq_alloc_descs(-1, 0, chip->gc.ngpio, 0);
+ if (chip->irq_base < 0) {
+ pr_err("Couldn't allocate IRQ numbers\n");
+ return -1;
+ }
+ chip->irq_domain = irq_domain_add_legacy(np, chip->gc.ngpio,
+ chip->irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
+ /*
+ * set the irq chip, handler and irq chip data for callbacks for
+ * each pin
+ */
+ for (pin_num = 0; pin_num < chip->gc.ngpio; pin_num++) {
+ u32 gpio_irq = irq_find_mapping(chip->irq_domain, pin_num);
+
+ irq_set_lockdep_class(gpio_irq, &gpio_lock_class,
+ &gpio_request_class);
+ pr_debug("IRQ Base: %d, Pin %d = IRQ %d\n",
+ chip->irq_base, pin_num, gpio_irq);
+ irq_set_chip_and_handler(gpio_irq, &xgpio_irqchip,
+ handle_simple_irq);
+ irq_set_chip_data(gpio_irq, (void *)chip);
+ }
+ irq_set_handler_data(res.start, (void *)chip);
+ irq_set_chained_handler(res.start, xgpio_irqhandler);
+
+ return 0;
+}
+
+static int xgpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret = pm_runtime_get_sync(chip->parent);
+
+ /*
+ * If the device is already active pm_runtime_get() will return 1 on
+ * success, but gpio_request still needs to return 0.
+ */
+ return ret < 0 ? ret : 0;
+}
+
+static void xgpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pm_runtime_put(chip->parent);
+}
+
+static int __maybe_unused xgpio_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+ struct irq_data *data;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_dbg(dev, "failed to get IRQ\n");
+ return 0;
+ }
+
+ data = irq_get_irq_data(irq);
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+ struct irq_data *data;
+
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_dbg(dev, "failed to get IRQ\n");
+ return 0;
+ }
+
+ data = irq_get_irq_data(irq);
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static int xgpio_remove(struct platform_device *pdev)
+{
+ struct xgpio_instance *chip = platform_get_drvdata(pdev);
+
+ if (!pm_runtime_suspended(&pdev->dev))
+ clk_disable(chip->clk);
+ clk_unprepare(chip->clk);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ clk_disable(gpio->clk);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ return clk_enable(gpio->clk);
+}
+
+static const struct dev_pm_ops xgpio_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
+ SET_RUNTIME_PM_OPS(xgpio_runtime_suspend,
+ xgpio_runtime_resume, NULL)
+};
+
+/**
* xgpio_of_probe - Probe method for the GPIO device.
- * @pdev: pointer to the platform device
+ * @pdev: platform device instance
+ *
+ * This function probes the GPIO device in the device tree. It initializes the
+ * driver data structure.
*
* Return:
* It returns 0, if the driver is bound to the GPIO device, or
* a negative value if there is an error.
*/
-static int xgpio_probe(struct platform_device *pdev)
+static int xgpio_of_probe(struct platform_device *pdev)
{
- struct xgpio_instance *chip;
- int status = 0;
struct device_node *np = pdev->dev.of_node;
- u32 is_dual;
+ struct xgpio_instance *chip, *chip_dual;
+ int status = 0;
+ const u32 *tree_info;
+ u32 ngpio;
+ u32 cells = 2;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- platform_set_drvdata(pdev, chip);
-
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]);
+ of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state);
+
+ /* By default, all pins are inputs */
+ chip->gpio_dir = 0xFFFFFFFF;
/* Update GPIO direction shadow register with default value */
- if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
- chip->gpio_dir[0] = 0xFFFFFFFF;
+ of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir);
+
+ chip->no_init = of_property_read_bool(np, "xlnx,no-init");
+
+ /* Update cells with gpio-cells value */
+ of_property_read_u32(np, "#gpio-cells", &cells);
/*
* Check device node and parent device node for device width
* and assume default width of 32
*/
- if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0]))
- chip->gpio_width[0] = 32;
-
- spin_lock_init(&chip->gpio_lock[0]);
-
- if (of_property_read_u32(np, "xlnx,is-dual", &is_dual))
- is_dual = 0;
-
- if (is_dual) {
- /* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default-2",
- &chip->gpio_state[1]);
-
- /* Update GPIO direction shadow register with default value */
- if (of_property_read_u32(np, "xlnx,tri-default-2",
- &chip->gpio_dir[1]))
- chip->gpio_dir[1] = 0xFFFFFFFF;
-
- /*
- * Check device node and parent device node for device width
- * and assume default width of 32
- */
- if (of_property_read_u32(np, "xlnx,gpio2-width",
- &chip->gpio_width[1]))
- chip->gpio_width[1] = 32;
+ if (of_property_read_u32(np, "xlnx,gpio-width", &ngpio))
+ ngpio = 32;
+ chip->gc.ngpio = (u16)ngpio;
- spin_lock_init(&chip->gpio_lock[1]);
- }
+ spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
- chip->gc.ngpio = chip->gpio_width[0] + chip->gpio_width[1];
chip->gc.parent = &pdev->dev;
+ chip->gc.owner = THIS_MODULE;
+ chip->gc.of_xlate = xgpio_xlate;
+ chip->gc.of_gpio_n_cells = cells;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
chip->gc.get = xgpio_get;
chip->gc.set = xgpio_set;
+ chip->gc.request = xgpio_request;
+ chip->gc.free = xgpio_free;
chip->gc.set_multiple = xgpio_set_multiple;
chip->gc.label = dev_name(&pdev->dev);
@@ -334,15 +614,121 @@ static int xgpio_probe(struct platform_device *pdev)
return PTR_ERR(chip->regs);
}
- xgpio_save_regs(chip);
+ platform_set_drvdata(pdev, chip);
+
+ chip->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(chip->clk)) {
+ if (PTR_ERR(chip->clk) != -ENOENT) {
+ if (PTR_ERR(chip->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Input clock not found\n");
+ return PTR_ERR(chip->clk);
+ }
+
+ /*
+ * Clock framework support is optional, continue on
+ * anyways if we don't find a matching clock.
+ */
+ chip->clk = NULL;
+ }
+
+ status = clk_prepare_enable(chip->clk);
+ if (status < 0) {
+ dev_err(&pdev->dev, "Failed to prepare clk\n");
+ return status;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ xgpio_save_regs(chip);
status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
if (status) {
dev_err(&pdev->dev, "failed to add GPIO chip\n");
- return status;
+ goto err_unprepare_clk;
}
+ status = xgpio_irq_setup(np, chip);
+ if (status) {
+ pr_err("%s: GPIO IRQ initialization failed %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
+
+ pr_info("XGpio: %s: registered, base is %d\n", np->full_name,
+ chip->gc.base);
+
+ tree_info = of_get_property(np, "xlnx,is-dual", NULL);
+ if (tree_info && be32_to_cpup(tree_info)) {
+ chip_dual = devm_kzalloc(&pdev->dev, sizeof(*chip_dual),
+ GFP_KERNEL);
+ if (!chip_dual)
+ goto err_pm_put;
+
+ /* Add dual channel offset */
+ chip_dual->offset = XGPIO_CHANNEL_OFFSET;
+
+ /* Update GPIO state shadow register with default value */
+ of_property_read_u32(np, "xlnx,dout-default-2",
+ &chip_dual->gpio_state);
+
+ /* By default, all pins are inputs */
+ chip_dual->gpio_dir = 0xFFFFFFFF;
+
+ /* Update GPIO direction shadow register with default value */
+ of_property_read_u32(np, "xlnx,tri-default-2",
+ &chip_dual->gpio_dir);
+
+ /*
+ * Check device node and parent device node for device width
+ * and assume default width of 32
+ */
+ if (of_property_read_u32(np, "xlnx,gpio2-width", &ngpio))
+ ngpio = 32;
+ chip_dual->gc.ngpio = (u16)ngpio;
+
+ spin_lock_init(&chip_dual->gpio_lock);
+
+ chip_dual->gc.parent = &pdev->dev;
+ chip_dual->gc.owner = THIS_MODULE;
+ chip_dual->gc.of_xlate = xgpio_xlate;
+ chip_dual->gc.of_gpio_n_cells = cells;
+ chip_dual->gc.direction_input = xgpio_dir_in;
+ chip_dual->gc.direction_output = xgpio_dir_out;
+ chip_dual->gc.get = xgpio_get;
+ chip_dual->gc.set = xgpio_set;
+ chip_dual->gc.request = xgpio_request;
+ chip_dual->gc.free = xgpio_free;
+ chip_dual->gc.set_multiple = xgpio_set_multiple;
+
+ status = xgpio_irq_setup(np, chip_dual);
+ if (status) {
+ pr_err("%s: GPIO IRQ initialization failed %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
+
+ xgpio_save_regs(chip_dual);
+ /* Call the OF gpio helper to setup and register the GPIO dev */
+ status = devm_gpiochip_add_data(&pdev->dev, &chip_dual->gc, chip_dual);
+ if (status) {
+ pr_err("%s: error in probe function with status %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
+ pr_info("XGpio: %s: dual channel registered, base is %d\n",
+ np->full_name, chip_dual->gc.base);
+ }
+
+ pm_runtime_put(&pdev->dev);
+
return 0;
+
+err_pm_put:
+ pm_runtime_put(&pdev->dev);
+err_unprepare_clk:
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(chip->clk);
+ return status;
}
static const struct of_device_id xgpio_of_match[] = {
@@ -352,24 +738,27 @@ static const struct of_device_id xgpio_of_match[] = {
MODULE_DEVICE_TABLE(of, xgpio_of_match);
-static struct platform_driver xgpio_plat_driver = {
- .probe = xgpio_probe,
- .driver = {
- .name = "gpio-xilinx",
- .of_match_table = xgpio_of_match,
+static struct platform_driver xilinx_gpio_driver = {
+ .probe = xgpio_of_probe,
+ .remove = xgpio_remove,
+ .driver = {
+ .name = "xilinx-gpio",
+ .of_match_table = xgpio_of_match,
+ .pm = &xgpio_dev_pm_ops,
},
};
static int __init xgpio_init(void)
{
- return platform_driver_register(&xgpio_plat_driver);
+ return platform_driver_register(&xilinx_gpio_driver);
}
+/* Make sure we get initialized before anyone else tries to use us */
subsys_initcall(xgpio_init);
static void __exit xgpio_exit(void)
{
- platform_driver_unregister(&xgpio_plat_driver);
+ platform_driver_unregister(&xilinx_gpio_driver);
}
module_exit(xgpio_exit);
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 25a42605aa81..645fc6133df3 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -10,6 +10,7 @@
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -21,6 +22,9 @@
/* Maximum banks */
#define ZYNQ_GPIO_MAX_BANK 4
#define ZYNQMP_GPIO_MAX_BANK 6
+#define VERSAL_GPIO_MAX_BANK 4
+#define PMC_GPIO_MAX_BANK 5
+#define VERSAL_UNUSED_BANKS 2
#define ZYNQ_GPIO_BANK0_NGPIO 32
#define ZYNQ_GPIO_BANK1_NGPIO 22
@@ -95,6 +99,7 @@
/* set to differentiate zynq from zynqmp, 0=zynqmp, 1=zynq */
#define ZYNQ_GPIO_QUIRK_IS_ZYNQ BIT(0)
#define GPIO_QUIRK_DATA_RO_BUG BIT(1)
+#define GPIO_QUIRK_VERSAL BIT(2)
struct gpio_regs {
u32 datamsw[ZYNQMP_GPIO_MAX_BANK];
@@ -116,6 +121,7 @@ struct gpio_regs {
* @irq: interrupt for the GPIO device
* @p_data: pointer to platform data
* @context: context registers
+ * @dirlock: lock used for direction in/out synchronization
*/
struct zynq_gpio {
struct gpio_chip chip;
@@ -124,6 +130,7 @@ struct zynq_gpio {
int irq;
const struct zynq_platform_data *p_data;
struct gpio_regs context;
+ spinlock_t dirlock;
};
/**
@@ -196,6 +203,8 @@ static inline void zynq_gpio_get_bank_pin(unsigned int pin_num,
gpio->p_data->bank_min[bank];
return;
}
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank = bank + VERSAL_UNUSED_BANKS;
}
/* default */
@@ -297,6 +306,7 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
{
u32 reg;
unsigned int bank_num, bank_pin_num;
+ unsigned long flags;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
@@ -310,9 +320,11 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
return -EINVAL;
/* clear the bit in direction mode reg to set the pin as input */
+ spin_lock_irqsave(&gpio->dirlock, flags);
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
reg &= ~BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ spin_unlock_irqrestore(&gpio->dirlock, flags);
return 0;
}
@@ -334,11 +346,13 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
{
u32 reg;
unsigned int bank_num, bank_pin_num;
+ unsigned long flags;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
/* set the GPIO pin as output */
+ spin_lock_irqsave(&gpio->dirlock, flags);
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
reg |= BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
@@ -347,6 +361,7 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num));
reg |= BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num));
+ spin_unlock_irqrestore(&gpio->dirlock, flags);
/* set the state of the pin */
zynq_gpio_set_value(chip, pin, state);
@@ -644,6 +659,8 @@ static void zynq_gpio_irqhandler(struct irq_desc *desc)
int_enb = readl_relaxed(gpio->base_addr +
ZYNQ_GPIO_INTMASK_OFFSET(bank_num));
zynq_gpio_handle_bank_irq(gpio, bank_num, int_sts & ~int_enb);
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
}
chained_irq_exit(irqchip, desc);
@@ -673,6 +690,8 @@ static void zynq_gpio_save_context(struct zynq_gpio *gpio)
gpio->context.int_any[bank_num] =
readl_relaxed(gpio->base_addr +
ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
}
}
@@ -712,6 +731,9 @@ static int __maybe_unused zynq_gpio_suspend(struct device *dev)
struct zynq_gpio *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
+ if (!device_may_wakeup(dev))
+ disable_irq(gpio->irq);
+
if (!irqd_is_wakeup_set(data)) {
zynq_gpio_save_context(gpio);
return pm_runtime_force_suspend(dev);
@@ -726,6 +748,9 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
struct irq_data *data = irq_get_irq_data(gpio->irq);
int ret;
+ if (!device_may_wakeup(dev))
+ enable_irq(gpio->irq);
+
if (!irqd_is_wakeup_set(data)) {
ret = pm_runtime_force_resume(dev);
zynq_gpio_restore_context(gpio);
@@ -775,6 +800,31 @@ static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
zynq_gpio_runtime_resume, NULL)
};
+static const struct zynq_platform_data versal_gpio_def = {
+ .label = "versal_gpio",
+ .quirks = GPIO_QUIRK_VERSAL,
+ .ngpio = 58,
+ .max_bank = VERSAL_GPIO_MAX_BANK,
+ .bank_min[0] = 0,
+ .bank_max[0] = 25, /* 0 to 25 are connected to MIOs (26 pins) */
+ .bank_min[3] = 26,
+ .bank_max[3] = 57, /* Bank 3 is connected to FMIOs (32 pins) */
+};
+
+static const struct zynq_platform_data pmc_gpio_def = {
+ .label = "pmc_gpio",
+ .ngpio = 116,
+ .max_bank = PMC_GPIO_MAX_BANK,
+ .bank_min[0] = 0,
+ .bank_max[0] = 25, /* 0 to 25 are connected to MIOs (26 pins) */
+ .bank_min[1] = 26,
+ .bank_max[1] = 51, /* Bank 1 are connected to MIOs (26 pins) */
+ .bank_min[3] = 52,
+ .bank_max[3] = 83, /* Bank 3 is connected to EMIOs (32 pins) */
+ .bank_min[4] = 84,
+ .bank_max[4] = 115, /* Bank 4 is connected to EMIOs (32 pins) */
+};
+
static const struct zynq_platform_data zynqmp_gpio_def = {
.label = "zynqmp_gpio",
.quirks = GPIO_QUIRK_DATA_RO_BUG,
@@ -812,6 +862,8 @@ static const struct zynq_platform_data zynq_gpio_def = {
static const struct of_device_id zynq_gpio_of_match[] = {
{ .compatible = "xlnx,zynq-gpio-1.0", .data = &zynq_gpio_def },
{ .compatible = "xlnx,zynqmp-gpio-1.0", .data = &zynqmp_gpio_def },
+ { .compatible = "xlnx,versal-gpio-1.0", .data = &versal_gpio_def },
+ { .compatible = "xlnx,pmc-gpio-1.0", .data = &pmc_gpio_def },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, zynq_gpio_of_match);
@@ -882,6 +934,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
return ret;
}
+ spin_lock_init(&gpio->dirlock);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
@@ -889,9 +943,12 @@ static int zynq_gpio_probe(struct platform_device *pdev)
goto err_pm_dis;
/* disable interrupts for all banks */
- for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++)
+ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
+ }
/* Set up the GPIO irqchip */
girq = &chip->irq;
@@ -916,6 +973,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
goto err_pm_put;
}
+ irq_set_status_flags(gpio->irq, IRQ_DISABLE_UNLAZY);
+ device_init_wakeup(&pdev->dev, 1);
pm_runtime_put(&pdev->dev);
return 0;
@@ -969,7 +1028,7 @@ static int __init zynq_gpio_init(void)
{
return platform_driver_register(&zynq_gpio_driver);
}
-postcore_initcall(zynq_gpio_init);
+subsys_initcall(zynq_gpio_init);
static void __exit zynq_gpio_exit(void)
{
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d71c7b9b9665..51d957f9cec7 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1466,6 +1466,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
.ignore_wake = "INT33FF:01@0",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 0.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index b1dcd2dd52e6..73807c897391 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -734,7 +734,8 @@ int of_mm_gpiochip_add_data(struct device_node *np,
if (mm_gc->save_regs)
mm_gc->save_regs(mm_gc);
- mm_gc->gc.of_node = np;
+ of_node_put(mm_gc->gc.of_node);
+ mm_gc->gc.of_node = of_node_get(np);
ret = gpiochip_add_data(gc, data);
if (ret)
@@ -742,6 +743,7 @@ int of_mm_gpiochip_add_data(struct device_node *np,
return 0;
err2:
+ of_node_put(np);
iounmap(mm_gc->regs);
err1:
kfree(gc->label);
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 558cd900d399..b993416961a7 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -490,14 +490,17 @@ static ssize_t export_store(struct class *class,
}
status = gpiod_set_transitory(desc, false);
- if (!status) {
- status = gpiod_export(desc, true);
- if (status < 0)
- gpiod_free(desc);
- else
- set_bit(FLAG_SYSFS, &desc->flags);
+ if (status) {
+ gpiod_free(desc);
+ goto done;
}
+ status = gpiod_export(desc, true);
+ if (status < 0)
+ gpiod_free(desc);
+ else
+ set_bit(FLAG_SYSFS, &desc->flags);
+
done:
if (status)
pr_debug("%s: status %d\n", __func__, status);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 649f17dfcf45..4b319f505b06 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -27,6 +27,7 @@ menuconfig DRM
config DRM_MIPI_DBI
tristate
depends on DRM
+ select DRM_KMS_HELPER
config DRM_MIPI_DSI
bool
@@ -319,6 +320,10 @@ source "drivers/gpu/drm/bridge/Kconfig"
source "drivers/gpu/drm/sti/Kconfig"
+source "drivers/gpu/drm/xilinx/Kconfig"
+
+source "drivers/gpu/drm/zocl/Kconfig"
+
source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/ingenic/Kconfig"
@@ -345,6 +350,8 @@ source "drivers/gpu/drm/tiny/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
+source "drivers/gpu/drm/xlnx/Kconfig"
+
source "drivers/gpu/drm/tve200/Kconfig"
source "drivers/gpu/drm/xen/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 82ff826b33cc..2fc86aee94c4 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -98,6 +98,8 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/
+obj-$(CONFIG_DRM_XILINX) += xilinx/
+obj-$(CONFIG_DRM_ZOCL)) += zocl/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
@@ -113,6 +115,7 @@ obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
+obj-$(CONFIG_DRM_XLNX) += xlnx/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 3107b9575929..eef7517c9d24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -88,7 +88,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
return NULL;
fence = container_of(f, struct amdgpu_amdkfd_fence, base);
- if (fence && f->ops == &amdkfd_fence_ops)
+ if (f->ops == &amdkfd_fence_ops)
return fence;
return NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index b1172d93c99c..ba604985cad9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -313,6 +313,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
if (!found)
return false;
+ pci_dev_put(pdev);
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index e0d2f79571ef..94a626fe9118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -178,6 +178,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
}
rcu_read_unlock();
+ *result = NULL;
return -ENOENT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 0e1cacf73169..fac43a9f553c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -316,8 +316,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if (!amdgpu_connector->edid) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) {
amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
+ drm_connector_update_edid_property(connector, amdgpu_connector->edid);
+ }
}
}
@@ -1646,10 +1648,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
- if (amdgpu_audio != 0)
+ if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1771,6 +1775,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1824,6 +1829,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1874,6 +1880,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7eeb98fe50ed..f9c725a8991b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -43,7 +43,6 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_gem_object *gobj;
struct amdgpu_bo *bo;
unsigned long size;
- int r;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@@ -58,23 +57,14 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
drm_gem_object_put_unlocked(gobj);
size = amdgpu_bo_size(bo);
- if (size != PAGE_SIZE || (data->offset + 8) > size) {
- r = -EINVAL;
- goto error_unref;
- }
+ if (size != PAGE_SIZE || data->offset > (size - 8))
+ return -EINVAL;
- if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
- r = -EINVAL;
- goto error_unref;
- }
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
+ return -EINVAL;
*offset = data->offset;
-
return 0;
-
-error_unref:
- amdgpu_bo_unref(&bo);
- return r;
}
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -151,7 +141,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
}
for (i = 0; i < p->nchunks; i++) {
- struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
+ struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
@@ -1575,15 +1565,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r > 0 && fence->error)
+ r = fence->error;
+
dma_fence_put(fence);
if (r < 0)
return r;
if (r == 0)
break;
-
- if (fence->error)
- return fence->error;
}
memset(wait, 0, sizeof(*wait));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a9a81e55777b..48b8b5600402 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -392,6 +392,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
ssize_t result = 0;
int r;
+ if (!adev->smc_rreg)
+ return -EOPNOTSUPP;
+
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@@ -431,6 +434,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
ssize_t result = 0;
int r;
+ if (!adev->smc_wreg)
+ return -EOPNOTSUPP;
+
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d0e1fd011de5..9dcb38bab0e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -759,6 +759,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
u16 cmd;
int r;
+ if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+ return 0;
+
/* Bypass for VF */
if (amdgpu_sriov_vf(adev))
return 0;
@@ -844,6 +847,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+ release_firmware(adev->pm.fw);
if (fw_ver < 0x00160e00)
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 9d61d1b7a569..858e2a8a9b71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -329,11 +329,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto release_object;
- if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
- r = amdgpu_mn_register(bo, args->addr);
- if (r)
- goto release_object;
- }
+ r = amdgpu_mn_register(bo, args->addr);
+ if (r)
+ goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 59fd9ebf3a58..1f4acb4c3efb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -471,6 +471,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
crtc = (struct drm_crtc *)minfo->crtcs[i];
if (crtc && crtc->base.id == info->mode_crtc.id) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
ui32 = amdgpu_crtc->crtc_id;
found = 1;
break;
@@ -489,7 +490,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (ret)
return ret;
- ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
+ ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
return ret ? -EFAULT : 0;
}
case AMDGPU_INFO_HW_IP_COUNT: {
@@ -625,17 +626,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
- unsigned n, alloc_size;
+ unsigned int n, alloc_size;
uint32_t *regs;
- unsigned se_num = (info->read_mmr_reg.instance >>
+ unsigned int se_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SE_INDEX_MASK;
- unsigned sh_num = (info->read_mmr_reg.instance >>
+ unsigned int sh_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK;
/* set full masks if the userspace set all bits
- * in the bitfields */
+ * in the bitfields
+ */
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff;
else if (se_num >= AMDGPU_GFX_MAX_SE)
@@ -648,6 +650,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (info->read_mmr_reg.count > 128)
return -EINVAL;
+ if (info->read_mmr_reg.count > 128)
+ return -EINVAL;
+
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
@@ -766,7 +771,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
- unsigned i;
+ unsigned int i;
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
struct amd_vce_state *vce_state;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4cc0dd494a42..fc508927c6f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -892,6 +892,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
+ /* Check domain to be pinned to against preferred domains */
+ if (bo->preferred_domains & domain)
+ domain = bo->preferred_domains & domain;
+
/* A shared bo cannot be migrated to VRAM */
if (bo->prime_shared_count) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index c799691dfa84..94a503dc08b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -58,6 +58,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx_mgr *mgr;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
@@ -71,8 +72,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
return r;
}
- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ idr_for_each_entry(&mgr->ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
+ mutex_unlock(&mgr->lock);
fdput(f);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 95e5e93edd18..7e840e560513 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -218,7 +218,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
*/
fence_owner = amdgpu_sync_get_owner(f);
if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
- owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+ owner != AMDGPU_FENCE_OWNER_KFD)
continue;
if (amdgpu_sync_same_dev(adev, f)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b0b2bdc750df..3e59b4e15023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -48,6 +48,8 @@ struct amdgpu_vf_error_buffer {
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
+enum idh_request;
+
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
@@ -56,7 +58,8 @@ struct amdgpu_virt_ops {
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
- void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+ void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
+ u32 data1, u32 data2, u32 data3);
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index fb47ddc6f7f4..dcf23b43f323 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3076,6 +3076,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
+ /* No valid flags defined yet */
+ if (args->in.flags)
+ return -EINVAL;
+
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* current, we only have requirement to reserve vmid from gfxhub */
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index cae426c7c086..e1503b52519a 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -308,7 +308,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
DEBUG("IMM 0x%02X\n", val);
return val;
}
- return 0;
+ break;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index b81bb414fcb3..ee7e218c7dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1384,7 +1384,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1419,12 +1418,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1434,14 +1428,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
-
- tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@@ -1465,15 +1453,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1486,26 +1482,38 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ bridge_cfg &
+ PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ gpu_cfg &
+ PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1520,15 +1528,16 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1d8739a4fbca..a84deb3c79a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3527,8 +3527,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
return r;
+ }
gfx_v10_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5906a8951a6c..4eba6b2d9cde 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2472,7 +2472,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_tiling_mode_table_init(adev);
- gfx_v9_0_setup_rb(adev);
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
@@ -3747,8 +3748,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
return r;
+ }
gfx_v9_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
@@ -3907,7 +3910,8 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 63205de4a565..7a50713268b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1534,7 +1534,6 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index c8a5a5698edd..6eb6f05c1136 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2733,10 +2733,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
adev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 641f1258f08d..e60157fe7a7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 23de332f3c6e..7ca7a68d588e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1911,9 +1911,11 @@ static int sdma_v4_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
return 0;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
}
sdma_v4_0_ctx_switch_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 493af42152f2..53f7719d688e 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1633,7 +1633,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
static void si_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1668,12 +1667,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1682,14 +1676,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
-
- tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -1706,15 +1694,23 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
}
for (i = 0; i < 10; i++) {
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -1726,25 +1722,37 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
mdelay(100);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ bridge_cfg &
+ PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ gpu_cfg &
+ PCI_EXP_LNKCTL_HAWD);
+
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -1757,15 +1765,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 9931d5c17cfb..6d7fd45b3129 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7349,10 +7349,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
kcalloc(4,
sizeof(struct amdgpu_clock_voltage_dependency_entry),
GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
- amdgpu_free_extended_power_table(adev);
+ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
return -ENOMEM;
- }
+
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 5f8c8786cac5..e985d1cdc142 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -329,8 +329,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- if (adev->flags & AMD_IS_APU)
- return reference_clock;
+ if (adev->flags & AMD_IS_APU) {
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ /* vbios says 48Mhz, but the actual freq is 100Mhz */
+ return 10000;
+ default:
+ return reference_clock;
+ }
+ }
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index adbb2fec2e0f..4fd7dcef2e38 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -529,16 +529,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
struct kfd_event_waiter *event_waiters;
uint32_t i;
- event_waiters = kmalloc_array(num_events,
- sizeof(struct kfd_event_waiter),
- GFP_KERNEL);
+ event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
+ GFP_KERNEL);
if (!event_waiters)
return NULL;
- for (i = 0; (event_waiters) && (i < num_events) ; i++) {
+ for (i = 0; i < num_events; i++)
init_wait(&event_waiters[i].wait);
- event_waiters[i].activated = false;
- }
return event_waiters;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index d3380c5bdbde..d978fcac2665 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -101,18 +101,19 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
&(mqd_mem_obj->gtt_mem),
&(mqd_mem_obj->gpu_addr),
(void *)&(mqd_mem_obj->cpu_ptr), true);
+
+ if (retval) {
+ kfree(mqd_mem_obj);
+ return NULL;
+ }
} else {
retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
&mqd_mem_obj);
- }
-
- if (retval) {
- kfree(mqd_mem_obj);
- return NULL;
+ if (retval)
+ return NULL;
}
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index de33864af70b..3f3242783e1c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1206,12 +1206,14 @@ static int dm_resume(void *handle)
list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->dc_link)
+ continue;
+
/*
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->dc_link &&
- aconnector->dc_link->type == dc_connection_mst_branch)
+ if (aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -1436,13 +1438,12 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
aconnector->edid =
(struct edid *)sink->dc_edid.raw_edid;
- drm_connector_update_edid_property(connector,
- aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
}
+ drm_connector_update_edid_property(connector, aconnector->edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
} else {
@@ -5775,6 +5776,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
continue;
dc_plane = dm_new_plane_state->dc_state;
+ if (!dc_plane)
+ continue;
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
@@ -7028,8 +7031,9 @@ static int dm_update_plane_state(struct dc *dc,
return -EINVAL;
}
+ if (dm_old_plane_state->dc_state)
+ dc_plane_state_release(dm_old_plane_state->dc_state);
- dc_plane_state_release(dm_old_plane_state->dc_state);
dm_new_plane_state->dc_state = NULL;
*lock_and_validation_needed = true;
@@ -7410,8 +7414,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- if (dm_old_con_state->abm_level !=
- dm_new_con_state->abm_level)
+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+ dm_old_con_state->scaling != dm_new_con_state->scaling)
new_crtc_state->connectors_changed = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
index 6ca288fb5fb9..2d46bc527b21 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
@@ -26,12 +26,12 @@
#include "bw_fixed.h"
-#define MIN_I64 \
- (int64_t)(-(1LL << 63))
-
#define MAX_I64 \
(int64_t)((1ULL << 63) - 1)
+#define MIN_I64 \
+ (-MAX_I64 - 1)
+
#define FRACTIONAL_PART_MASK \
((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index c026b393f3c5..940aa621485c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -92,8 +92,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR2020_TYPE,
- { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
- 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
+ { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
+ 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
{ COLOR_SPACE_YCBCR709_BLACK_TYPE,
{ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index de246e183d6b..cdcd5051dd66 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1315,6 +1315,9 @@ bool dc_remove_plane_from_context(
struct dc_stream_status *stream_status = NULL;
struct resource_pool *pool = dc->res_pool;
+ if (!plane_state)
+ return true;
+
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
stream_status = &context->stream_status[i];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index bb09243758fe..71b10b45a9b9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -492,7 +492,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
- if (res_ctx->pipe_ctx[i].stream != stream)
+ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
continue;
return tg->funcs->get_frame_count(tg);
@@ -551,7 +551,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
- if (res_ctx->pipe_ctx[i].stream != stream)
+ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
continue;
tg->funcs->get_scanoutpos(tg,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index eca67d5d5b10..721be82ccebe 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 6fd57cfb112f..96fdc18ecb3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -778,7 +778,7 @@ static void dce_transform_set_pixel_storage_depth(
color_depth = COLOR_DEPTH_101010;
pixel_depth = 0;
expan_mode = 1;
- BREAK_TO_DEBUGGER();
+ DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth);
break;
}
@@ -792,8 +792,7 @@ static void dce_transform_set_pixel_storage_depth(
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
/*we should use unsupported capabilities
* unless it is required by w/a*/
- DC_LOG_WARNING("%s: Capability not supported",
- __func__);
+ DC_LOG_DC("%s: Capability not supported", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 7c52f7f9196c..3c096ad39096 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -340,7 +340,8 @@ static const struct dce_audio_registers audio_regs[] = {
audio_regs(2),
audio_regs(3),
audio_regs(4),
- audio_regs(5)
+ audio_regs(5),
+ audio_regs(6),
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index fa3acf60e7bd..3059739695d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1470,6 +1470,9 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ if (!stream)
+ return false;
+
if (dpp == NULL)
return false;
@@ -1492,8 +1495,8 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
- if (stream != NULL && stream->ctx != NULL &&
- stream->out_transfer_func != NULL) {
+ if (stream->ctx &&
+ stream->out_transfer_func) {
log_tf(stream->ctx,
stream->out_transfer_func,
dpp->regamma_params.hw_points_num);
@@ -2902,7 +2905,9 @@ static void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
- res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 8b2f29f6dabd..7044f7ada68e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -118,6 +118,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
@@ -193,8 +199,9 @@ struct mpcc *mpc1_insert_plane(
/* check insert_above_mpcc exist in tree->opp_list */
struct mpcc *temp_mpcc = tree->opp_list;
- while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
- temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc != insert_above_mpcc)
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc == NULL)
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e74a07d03fde..4b0200e96eb7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -425,6 +425,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1,
1, 1000);
} else {
+
+ //last chance to clear underflow, otherwise, it will always there due to clock is off.
+ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+ optc->funcs->clear_optc_underflow(optc);
+
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 5a188b2bc033..0a00bd8e00ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -488,6 +488,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index e042d8ce05b4..22d105635e33 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1486,6 +1486,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
@@ -1514,6 +1515,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
} else
hw_x = coordinates_x[i].x;
+ if (dc_fixpt_le(one, hw_x))
+ hw_x = one;
+
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 7d67cb2c61f0..ed5c9edfdcc5 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -250,24 +250,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
- unsigned int min_frame_duration_in_ns = 0;
- unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-
- min_frame_duration_in_ns = ((unsigned int) (div64_u64(
- (1000000000ULL * 1000000),
- in_out_vrr->max_refresh_in_uhz)));
+ unsigned int max_render_time_in_us =
+ in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > max_render_time_in_us) {
+ } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- in_out_vrr->btr.btr_active = true;
+ if (!in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.btr_active = true;
+ }
}
/* BTR set to "not active" so disengage */
@@ -322,24 +320,50 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
+ * - Delta for CEIL: delta_from_mid_point_in_us_1
+ * - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ if (mid_point_frames_ceil &&
+ (last_render_time_in_us / mid_point_frames_ceil) <
+ in_out_vrr->min_duration_in_us) {
+ /* Check for out of range.
+ * If using CEIL produces a value that is out of range,
+ * then we are forced to use FLOOR.
+ */
+ frames_to_insert = mid_point_frames_floor;
+ } else if (mid_point_frames_floor < 2) {
+ /* Check if FLOOR would result in non-LFC. In this case
+ * choose to use CEIL
+ */
+ frames_to_insert = mid_point_frames_ceil;
+ } else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ /* If choosing CEIL results in a frame duration that is
+ * closer to the mid point of the range.
+ * Choose CEIL
+ */
frames_to_insert = mid_point_frames_ceil;
- delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
- delta_from_mid_point_in_us_1;
} else {
+ /* If choosing FLOOR results in a frame duration that is
+ * closer to the mid point of the range.
+ * Choose FLOOR
+ */
frames_to_insert = mid_point_frames_floor;
- delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
- delta_from_mid_point_in_us_2;
}
/* Prefer current frame multiplier when BTR is enabled unless it drifts
* too far from the midpoint
*/
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
+ delta_from_mid_point_in_us_1;
+ } else {
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
+ delta_from_mid_point_in_us_2;
+ }
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- in_out_vrr->max_duration_in_us) &&
+ max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -348,8 +372,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
- if (last_render_time_in_us / frames_to_insert <
- in_out_vrr->min_duration_in_us){
+ if (frames_to_insert &&
+ (last_render_time_in_us / frames_to_insert) <
+ in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}
@@ -792,6 +817,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
in_out_vrr->supported = true;
}
@@ -808,6 +838,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
+
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dc187844d10b..dbe7835aabcf 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
+ uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 0b6a057e0a4c..5aac8d545bdc 100644
--- a/drivers/gpu/drm/amd/include/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
typedef struct _ATOM_PPLIB_STATE
{
UCHAR ucNonClockStateIndex;
- UCHAR ucClockStateIndices[1]; // variable-sized
+ UCHAR ucClockStateIndices[]; // variable-sized
} ATOM_PPLIB_STATE;
@@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
/**
* Driver will read the first ucNumDPMLevels in this array
*/
- UCHAR clockInfoIndex[1];
+ UCHAR clockInfoIndex[];
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
index 1e870f58dd12..0c61e2bc14cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
typedef struct _ATOM_Tonga_State_Array {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
+ ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_State_Array;
typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
@@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_MCLK_Dependency_Table;
typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
@@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_SCLK_Dependency_Table;
typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
@@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Polaris_SCLK_Dependency_Table;
typedef struct _ATOM_Tonga_PCIE_Record {
@@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
typedef struct _ATOM_Tonga_PCIE_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_PCIE_Table;
typedef struct _ATOM_Polaris10_PCIE_Record {
@@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
typedef struct _ATOM_Polaris10_PCIE_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Polaris10_PCIE_Table;
@@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
typedef struct _ATOM_Tonga_MM_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_MM_Dependency_Table;
typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
@@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_Voltage_Lookup_Table;
typedef struct _ATOM_Tonga_Fan_Table {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 947e4fa3c5e6..d499add3601a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2894,7 +2894,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
- int32_t input_index, input_clk, input_vol, i;
+ int32_t input_clk, input_vol, i;
+ uint32_t input_index;
int od8_id;
int ret;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index b848270e0a1f..31527fb66b5c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -1171,7 +1171,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
return 0;
}
-static void
+static int
komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
struct komeda_pipeline_state *new)
{
@@ -1190,8 +1190,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
c = komeda_pipeline_get_component(pipe, id);
c_st = komeda_component_get_state_and_set_user(c,
drm_st, NULL, new->crtc);
+ if (PTR_ERR(c_st) == -EDEADLK)
+ return -EDEADLK;
WARN_ON(IS_ERR(c_st));
}
+
+ return 0;
}
/* release unclaimed pipeline resource */
@@ -1213,9 +1217,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
if (WARN_ON(IS_ERR_OR_NULL(st)))
return -EINVAL;
- komeda_pipeline_unbound_components(pipe, st);
+ return komeda_pipeline_unbound_components(pipe, st);
- return 0;
}
void komeda_pipeline_disable(struct komeda_pipeline *pipe,
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 07f0da4d9ba1..8e7c5ce71608 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -4,6 +4,8 @@
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
+#include <linux/bitfield.h>
+
#include <drm/armada_drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -446,8 +448,8 @@ static int armada_overlay_get_property(struct drm_plane *plane,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 0);
} else if (property == priv->colorkey_mode_prop) {
- *val = (drm_to_overlay_state(state)->colorkey_mode &
- CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
+ *val = FIELD_GET(CFG_CKMODE_MASK,
+ drm_to_overlay_state(state)->colorkey_mode);
} else if (property == priv->brightness_prop) {
*val = drm_to_overlay_state(state)->brightness + 256;
} else if (property == priv->contrast_prop) {
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 2d1b18619743..60629a43207c 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -294,7 +294,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
;
} while (ast_read32(ast, 0x10100) != 0xa8);
} else {/* AST2100/1100 */
- if (ast->chip == AST2100 || ast->chip == 2200)
+ if (ast->chip == AST2100 || ast->chip == AST2200)
dram_reg_info = ast2100_dram_table_data;
else
dram_reg_info = ast1100_dram_table_data;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 52b2adfdc877..90a721226231 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -384,10 +384,7 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
- unsigned int offset = adv7511->type == ADV7533 ?
- ADV7533_REG_CEC_OFFSET : 0;
-
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index a20a45c0b353..ddd1305b82b2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -316,7 +316,7 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
goto err_cec_alloc;
}
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_SOFT_RESET + offset, 0x01);
@@ -343,7 +343,7 @@ err_cec_alloc:
dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
ret);
err_cec_parse_dt:
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
return ret == -EPROBE_DEFER ? ret : 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index e7bf32f234d7..2cdfbdcbf02d 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -756,8 +756,13 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
- regmap_update_bits(adv7511->regmap, 0xfb,
- 0x6, low_refresh_rate << 1);
+ if (adv7511->type == ADV7511)
+ regmap_update_bits(adv7511->regmap, 0xfb,
+ 0x6, low_refresh_rate << 1);
+ else
+ regmap_update_bits(adv7511->regmap, 0x4a,
+ 0xc, low_refresh_rate << 2);
+
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
@@ -985,6 +990,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
ADV7511_CEC_I2C_ADDR_DEFAULT);
if (IS_ERR(adv->i2c_cec))
return PTR_ERR(adv->i2c_cec);
+
+ regmap_write(adv->regmap, ADV7511_REG_CEC_I2C_ADDR,
+ adv->i2c_cec->addr << 1);
+
i2c_set_clientdata(adv->i2c_cec, adv);
adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
@@ -1189,9 +1198,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (ret)
goto err_i2c_unregister_packet;
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
- adv7511->i2c_cec->addr << 1);
-
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
if (i2c->irq) {
@@ -1301,10 +1307,21 @@ static struct i2c_driver adv7511_driver = {
static int __init adv7511_init(void)
{
- if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
- mipi_dsi_driver_register(&adv7533_dsi_driver);
+ int ret;
- return i2c_add_driver(&adv7511_driver);
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+ ret = mipi_dsi_driver_register(&adv7533_dsi_driver);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_add_driver(&adv7511_driver);
+ if (ret) {
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&adv7533_dsi_driver);
+ }
+
+ return ret;
}
module_init(adv7511_init);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index c6a51d1c7ec9..df606a567566 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1823,12 +1823,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove);
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
clk_disable_unprepare(dp->clock);
-
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
@@ -1843,13 +1837,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
return ret;
}
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_resume);
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 5302dd90a7a5..14d578fce09f 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -279,7 +279,9 @@ static void ge_b850v3_lvds_remove(void)
* This check is to avoid both the drivers
* removing the bridge in their remove() function
*/
- if (!ge_b850v3_lvds_ptr)
+ if (!ge_b850v3_lvds_ptr ||
+ !ge_b850v3_lvds_ptr->stdp2690_i2c ||
+ !ge_b850v3_lvds_ptr->stdp4028_i2c)
goto out;
drm_bridge_remove(&ge_b850v3_lvds_ptr->bridge);
@@ -424,7 +426,11 @@ static int __init stdpxxxx_ge_b850v3_init(void)
if (ret)
return ret;
- return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
+ ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
+ if (ret)
+ i2c_del_driver(&stdp4028_ge_b850v3_fw_driver);
+
+ return ret;
}
module_init(stdpxxxx_ge_b850v3_init);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d4a1cc5052c3..289a729d8ec4 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -54,13 +54,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
int ret;
ret = i2c_master_send(ptn_bridge->client, &addr, 1);
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
return ret;
}
ret = i2c_master_recv(ptn_bridge->client, buf, len);
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
return ret;
}
@@ -78,7 +78,7 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
buf[1] = val;
ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index fb0b64c965b7..a3fc8dd507a7 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -604,7 +604,7 @@ static void *sii8620_burst_get_tx_buf(struct sii8620 *ctx, int len)
u8 *buf = &ctx->burst.tx_buf[ctx->burst.tx_count];
int size = len + 2;
- if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+ if (ctx->burst.tx_count + size >= ARRAY_SIZE(ctx->burst.tx_buf)) {
dev_err(ctx->dev, "TX-BLK buffer exhausted\n");
ctx->error = -EINVAL;
return NULL;
@@ -621,7 +621,7 @@ static u8 *sii8620_burst_get_rx_buf(struct sii8620 *ctx, int len)
u8 *buf = &ctx->burst.rx_buf[ctx->burst.rx_count];
int size = len + 1;
- if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+ if (ctx->burst.rx_count + size >= ARRAY_SIZE(ctx->burst.rx_buf)) {
dev_err(ctx->dev, "RX-BLK buffer exhausted\n");
ctx->error = -EINVAL;
return NULL;
@@ -985,7 +985,7 @@ static void sii8620_set_auto_zone(struct sii8620 *ctx)
static void sii8620_stop_video(struct sii8620 *ctx)
{
- u8 uninitialized_var(val);
+ u8 val;
sii8620_write_seq_static(ctx,
REG_TPI_INTR_EN, 0,
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 170f162ffa55..121250473761 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -180,7 +180,7 @@ static void tc358764_read(struct tc358764 *ctx, u16 addr, u32 *val)
if (ret >= 0)
le32_to_cpus(val);
- dev_dbg(ctx->dev, "read: %d, addr: %d\n", addr, *val);
+ dev_dbg(ctx->dev, "read: addr=0x%04x data=0x%08x\n", addr, *val);
}
static void tc358764_write(struct tc358764 *ctx, u16 addr, u32 val)
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 0454675a44cb..a58943115241 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1574,7 +1574,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
} else {
if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
dev_err(dev, "failed to parse HPD number\n");
- return ret;
+ return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index dbb4a374cb64..20ea1c6bc8bb 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -460,9 +460,9 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
&pdata->bridge.encoder->crtc->state->adjusted_mode;
u8 hsync_polarity = 0, vsync_polarity = 0;
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
hsync_polarity = CHA_HSYNC_POLARITY;
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
vsync_polarity = CHA_VSYNC_POLARITY;
ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 14aeaf736321..47649186fed7 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -97,6 +97,12 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
if (!state->planes)
goto fail;
+ /*
+ * Because drm_atomic_state can be committed asynchronously we need our
+ * own reference and cannot rely on the on implied by drm_file in the
+ * ioctl call.
+ */
+ drm_dev_get(dev);
state->dev = dev;
DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
@@ -256,7 +262,8 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
void __drm_atomic_state_free(struct kref *ref)
{
struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
- struct drm_mode_config *config = &state->dev->mode_config;
+ struct drm_device *dev = state->dev;
+ struct drm_mode_config *config = &dev->mode_config;
drm_atomic_state_clear(state);
@@ -268,6 +275,8 @@ void __drm_atomic_state_free(struct kref *ref)
drm_atomic_state_default_release(state);
kfree(state);
}
+
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(__drm_atomic_state_free);
@@ -1006,6 +1015,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
+ drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
if (state->writeback_job && state->writeback_job->fb)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5e906ea6df67..d91d6c063a1d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -445,8 +445,9 @@ mode_fixup(struct drm_atomic_state *state)
encoder = new_conn_state->best_encoder;
funcs = encoder->helper_private;
- ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
- &new_crtc_state->adjusted_mode);
+ ret = drm_bridge_chain_mode_fixup(encoder->bridge,
+ &new_crtc_state->mode,
+ &new_crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
return -EINVAL;
@@ -511,7 +512,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
return ret;
}
- ret = drm_bridge_mode_valid(encoder->bridge, mode);
+ ret = drm_bridge_chain_mode_valid(encoder->bridge, mode);
if (ret != MODE_OK) {
DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
return ret;
@@ -588,6 +589,7 @@ mode_valid(struct drm_atomic_state *state)
* &drm_crtc_state.connectors_changed is set when a connector is added or
* removed from the crtc. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
+ * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
@@ -654,6 +656,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return -EINVAL;
}
+
+ if (drm_dev_has_vblank(dev))
+ new_crtc_state->no_vblank = false;
+ else
+ new_crtc_state->no_vblank = true;
}
ret = handle_conflicting_encoders(state, false);
@@ -1030,7 +1037,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
*/
- drm_atomic_bridge_disable(encoder->bridge, old_state);
+ drm_atomic_bridge_chain_disable(encoder->bridge, old_state);
/* Right function depends upon target state. */
if (funcs) {
@@ -1044,7 +1051,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
- drm_atomic_bridge_post_disable(encoder->bridge, old_state);
+ drm_atomic_bridge_chain_post_disable(encoder->bridge,
+ old_state);
}
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1078,7 +1086,16 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
continue;
ret = drm_crtc_vblank_get(crtc);
- WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
+ /*
+ * Self-refresh is not a true "disable"; ensure vblank remains
+ * enabled.
+ */
+ if (new_crtc_state->self_refresh_active)
+ WARN_ONCE(ret != 0,
+ "driver disabled vblank in self-refresh\n");
+ else
+ WARN_ONCE(ret != -EINVAL,
+ "driver forgot to call drm_crtc_vblank_off()\n");
if (ret == 0)
drm_crtc_vblank_put(crtc);
}
@@ -1225,7 +1242,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->mode_set(encoder, mode, adjusted_mode);
}
- drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
+ drm_bridge_chain_mode_set(encoder->bridge, mode,
+ adjusted_mode);
}
}
@@ -1342,7 +1360,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
*/
- drm_atomic_bridge_pre_enable(encoder->bridge, old_state);
+ drm_atomic_bridge_chain_pre_enable(encoder->bridge, old_state);
if (funcs) {
if (funcs->atomic_enable)
@@ -1353,7 +1371,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs->commit(encoder);
}
- drm_atomic_bridge_enable(encoder->bridge, old_state);
+ drm_atomic_bridge_chain_enable(encoder->bridge, old_state);
}
drm_atomic_helper_commit_writebacks(dev, old_state);
@@ -2202,7 +2220,9 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* when a job is queued, and any change to the pipeline that does not touch the
* connector is leading to timeouts when calling
* drm_atomic_helper_wait_for_vblanks() or
- * drm_atomic_helper_wait_for_flip_done().
+ * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
+ * connectors, this function can also fake VBLANK events for CRTCs without
+ * VBLANK interrupt.
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 7a26bfb5329c..000fc820638a 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -75,15 +75,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
state->mode_blob = NULL;
if (mode) {
+ struct drm_property_blob *blob;
+
drm_mode_convert_to_umode(&umode, mode);
- state->mode_blob =
- drm_property_create_blob(state->crtc->dev,
- sizeof(umode),
- &umode);
- if (IS_ERR(state->mode_blob))
- return PTR_ERR(state->mode_blob);
+ blob = drm_property_create_blob(crtc->dev,
+ sizeof(umode), &umode);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
drm_mode_copy(&state->mode, mode);
+
+ state->mode_blob = blob;
state->enable = true;
DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
mode->name, crtc->base.id, crtc->name, state);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index cba537c99e43..54c874493c57 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -172,8 +172,8 @@ void drm_bridge_detach(struct drm_bridge *bridge)
*/
/**
- * drm_bridge_mode_fixup - fixup proposed mode for all bridges in the
- * encoder chain
+ * drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
+ * encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
@@ -186,9 +186,9 @@ void drm_bridge_detach(struct drm_bridge *bridge)
* RETURNS:
* true on success, false on failure
*/
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
bool ret = true;
@@ -198,15 +198,16 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
if (bridge->funcs->mode_fixup)
ret = bridge->funcs->mode_fixup(bridge, mode, adjusted_mode);
- ret = ret && drm_bridge_mode_fixup(bridge->next, mode, adjusted_mode);
+ ret = ret && drm_bridge_chain_mode_fixup(bridge->next, mode,
+ adjusted_mode);
return ret;
}
-EXPORT_SYMBOL(drm_bridge_mode_fixup);
+EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
/**
- * drm_bridge_mode_valid - validate the mode against all bridges in the
- * encoder chain.
+ * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
+ * encoder chain.
* @bridge: bridge control structure
* @mode: desired mode to be validated
*
@@ -219,8 +220,9 @@ EXPORT_SYMBOL(drm_bridge_mode_fixup);
* RETURNS:
* MODE_OK on success, drm_mode_status Enum error code on failure
*/
-enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_mode *mode)
+enum drm_mode_status
+drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
enum drm_mode_status ret = MODE_OK;
@@ -233,12 +235,12 @@ enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
if (ret != MODE_OK)
return ret;
- return drm_bridge_mode_valid(bridge->next, mode);
+ return drm_bridge_chain_mode_valid(bridge->next, mode);
}
-EXPORT_SYMBOL(drm_bridge_mode_valid);
+EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
/**
- * drm_bridge_disable - disables all bridges in the encoder chain
+ * drm_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
@@ -247,20 +249,21 @@ EXPORT_SYMBOL(drm_bridge_mode_valid);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_disable(struct drm_bridge *bridge)
+void drm_bridge_chain_disable(struct drm_bridge *bridge)
{
if (!bridge)
return;
- drm_bridge_disable(bridge->next);
+ drm_bridge_chain_disable(bridge->next);
if (bridge->funcs->disable)
bridge->funcs->disable(bridge);
}
-EXPORT_SYMBOL(drm_bridge_disable);
+EXPORT_SYMBOL(drm_bridge_chain_disable);
/**
- * drm_bridge_post_disable - cleans up after disabling all bridges in the encoder chain
+ * drm_bridge_chain_post_disable - cleans up after disabling all bridges in the
+ * encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.post_disable op for all the bridges in the
@@ -269,7 +272,7 @@ EXPORT_SYMBOL(drm_bridge_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_post_disable(struct drm_bridge *bridge)
+void drm_bridge_chain_post_disable(struct drm_bridge *bridge)
{
if (!bridge)
return;
@@ -277,25 +280,25 @@ void drm_bridge_post_disable(struct drm_bridge *bridge)
if (bridge->funcs->post_disable)
bridge->funcs->post_disable(bridge);
- drm_bridge_post_disable(bridge->next);
+ drm_bridge_chain_post_disable(bridge->next);
}
-EXPORT_SYMBOL(drm_bridge_post_disable);
+EXPORT_SYMBOL(drm_bridge_chain_post_disable);
/**
- * drm_bridge_mode_set - set proposed mode for all bridges in the
- * encoder chain
+ * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
+ * encoder chain
* @bridge: bridge control structure
- * @mode: desired mode to be set for the bridge
- * @adjusted_mode: updated mode that works for this bridge
+ * @mode: desired mode to be set for the encoder chain
+ * @adjusted_mode: updated mode that works for this encoder chain
*
* Calls &drm_bridge_funcs.mode_set op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
+void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
if (!bridge)
return;
@@ -303,13 +306,13 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
if (bridge->funcs->mode_set)
bridge->funcs->mode_set(bridge, mode, adjusted_mode);
- drm_bridge_mode_set(bridge->next, mode, adjusted_mode);
+ drm_bridge_chain_mode_set(bridge->next, mode, adjusted_mode);
}
-EXPORT_SYMBOL(drm_bridge_mode_set);
+EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
- * drm_bridge_pre_enable - prepares for enabling all
- * bridges in the encoder chain
+ * drm_bridge_chain_pre_enable - prepares for enabling all bridges in the
+ * encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
@@ -318,20 +321,20 @@ EXPORT_SYMBOL(drm_bridge_mode_set);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_pre_enable(struct drm_bridge *bridge)
+void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
{
if (!bridge)
return;
- drm_bridge_pre_enable(bridge->next);
+ drm_bridge_chain_pre_enable(bridge->next);
if (bridge->funcs->pre_enable)
bridge->funcs->pre_enable(bridge);
}
-EXPORT_SYMBOL(drm_bridge_pre_enable);
+EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
/**
- * drm_bridge_enable - enables all bridges in the encoder chain
+ * drm_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
*
* Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
@@ -340,7 +343,7 @@ EXPORT_SYMBOL(drm_bridge_pre_enable);
*
* Note that the bridge passed should be the one closest to the encoder
*/
-void drm_bridge_enable(struct drm_bridge *bridge)
+void drm_bridge_chain_enable(struct drm_bridge *bridge)
{
if (!bridge)
return;
@@ -348,12 +351,12 @@ void drm_bridge_enable(struct drm_bridge *bridge)
if (bridge->funcs->enable)
bridge->funcs->enable(bridge);
- drm_bridge_enable(bridge->next);
+ drm_bridge_chain_enable(bridge->next);
}
-EXPORT_SYMBOL(drm_bridge_enable);
+EXPORT_SYMBOL(drm_bridge_chain_enable);
/**
- * drm_atomic_bridge_disable - disables all bridges in the encoder chain
+ * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
* @state: atomic state being committed
*
@@ -364,24 +367,24 @@ EXPORT_SYMBOL(drm_bridge_enable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
if (!bridge)
return;
- drm_atomic_bridge_disable(bridge->next, state);
+ drm_atomic_bridge_chain_disable(bridge->next, state);
if (bridge->funcs->atomic_disable)
bridge->funcs->atomic_disable(bridge, state);
else if (bridge->funcs->disable)
bridge->funcs->disable(bridge);
}
-EXPORT_SYMBOL(drm_atomic_bridge_disable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
/**
- * drm_atomic_bridge_post_disable - cleans up after disabling all bridges in the
- * encoder chain
+ * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
+ * in the encoder chain
* @bridge: bridge control structure
* @state: atomic state being committed
*
@@ -392,8 +395,8 @@ EXPORT_SYMBOL(drm_atomic_bridge_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
if (!bridge)
return;
@@ -403,13 +406,13 @@ void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
else if (bridge->funcs->post_disable)
bridge->funcs->post_disable(bridge);
- drm_atomic_bridge_post_disable(bridge->next, state);
+ drm_atomic_bridge_chain_post_disable(bridge->next, state);
}
-EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
/**
- * drm_atomic_bridge_pre_enable - prepares for enabling all bridges in the
- * encoder chain
+ * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
+ * the encoder chain
* @bridge: bridge control structure
* @state: atomic state being committed
*
@@ -420,23 +423,23 @@ EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
if (!bridge)
return;
- drm_atomic_bridge_pre_enable(bridge->next, state);
+ drm_atomic_bridge_chain_pre_enable(bridge->next, state);
if (bridge->funcs->atomic_pre_enable)
bridge->funcs->atomic_pre_enable(bridge, state);
else if (bridge->funcs->pre_enable)
bridge->funcs->pre_enable(bridge);
}
-EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
/**
- * drm_atomic_bridge_enable - enables all bridges in the encoder chain
+ * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
* @bridge: bridge control structure
* @state: atomic state being committed
*
@@ -447,8 +450,8 @@ EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
*
* Note: the bridge passed should be the one closest to the encoder
*/
-void drm_atomic_bridge_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
if (!bridge)
return;
@@ -458,9 +461,9 @@ void drm_atomic_bridge_enable(struct drm_bridge *bridge,
else if (bridge->funcs->enable)
bridge->funcs->enable(bridge);
- drm_atomic_bridge_enable(bridge->next, state);
+ drm_atomic_bridge_chain_enable(bridge->next, state);
}
-EXPORT_SYMBOL(drm_atomic_bridge_enable);
+EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
#ifdef CONFIG_OF
/**
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index bf1bdb0aac19..10769efaf7cb 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -281,6 +281,9 @@ static bool drm_client_target_cloned(struct drm_device *dev,
can_clone = true;
dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false);
+ if (!dmt_mode)
+ goto fail;
+
for (i = 0; i < connector_count; i++) {
if (!enabled[i])
continue;
@@ -296,11 +299,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
if (!modes[i])
can_clone = false;
}
+ kfree(dmt_mode);
if (can_clone) {
DRM_DEBUG_KMS("can clone using 1024x768\n");
return true;
}
+fail:
DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
return false;
}
@@ -785,6 +790,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
break;
}
+ kfree(modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode);
drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 11a81e8ba963..123b04f44ca8 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -474,6 +474,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
mutex_destroy(&connector->mutex);
memset(connector, 0, sizeof(*connector));
+
+ if (dev->registered)
+ drm_sysfs_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_connector_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 4936e1080e41..85d85a0ba85f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -535,8 +535,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
- int ret;
- int i;
+ int ret, i, num_connectors = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
@@ -694,6 +693,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector->name);
connector_set[i] = connector;
+ num_connectors++;
}
}
@@ -702,7 +702,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.y = crtc_req->y;
set.mode = mode;
set.connectors = connector_set;
- set.num_connectors = crtc_req->count_connectors;
+ set.num_connectors = num_connectors;
set.fb = fb;
if (drm_drv_uses_atomic_modeset(dev))
@@ -715,7 +715,7 @@ out:
drm_framebuffer_put(fb);
if (connector_set) {
- for (i = 0; i < crtc_req->count_connectors; i++) {
+ for (i = 0; i < num_connectors; i++) {
if (connector_set[i])
drm_connector_put(connector_set[i]);
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 1ff13af13324..7b396b7277ee 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1806,14 +1806,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
+ if (!mstb)
+ return NULL;
+
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
- if (!port->mstb)
- continue;
-
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 769feefeeeef..c9529a808b8e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -984,8 +984,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_modeset_register_all(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_modeset_register_all(dev);
+ if (ret)
+ goto err_unload;
+ }
ret = 0;
@@ -997,6 +1000,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock;
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
err_minors:
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 2dc6dd6230d7..7b27fe16bcbb 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2771,7 +2771,7 @@ static int drm_cvt_modes(struct drm_connector *connector,
const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
- int uninitialized_var(width), height;
+ int width, height;
cvt = &(timing->data.other_data.data.cvt[i]);
if (!memcmp(cvt->code, empty, 3))
@@ -2779,6 +2779,8 @@ static int drm_cvt_modes(struct drm_connector *connector,
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
switch (cvt->code[1] & 0x0c) {
+ /* default - because compiler doesn't see that we've enumerated all cases */
+ default:
case 0x00:
width = height * 4 / 3;
break;
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 7fb47b7b8b44..80ce9e1040de 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -170,7 +170,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
struct drm_bridge *next;
while (bridge) {
- next = bridge->next;
+ next = drm_bridge_get_next_bridge(bridge);
drm_bridge_detach(bridge);
bridge = next;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4ae68bf04892..9eb490a6e016 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1340,6 +1340,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
+ var->xres_virtual = fb->width;
+ var->yres_virtual = fb->height;
+
/*
* Workaround for SDL 1.2, which is known to be setting all pixel format
* fields values to zero in some cases. We treat this situation as a
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index ea34bc991858..e27a15a6fe79 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -369,7 +369,7 @@ int drm_open(struct inode *inode, struct file *filp)
{
struct drm_device *dev;
struct drm_minor *minor;
- int retcode;
+ int retcode = 0;
int need_setup = 0;
minor = drm_minor_acquire(iminor(inode));
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index c630064ccf41..21d1afe1cded 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include <drm/drm_fourcc.h>
static char printable_char(int c)
@@ -178,6 +179,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#ifdef __BIG_ENDIAN
+ { .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#endif
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
@@ -224,6 +229,8 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XV15, .depth = 0, .num_planes = 2, .pixels_per_macropixel = { 3, 3, 0 }, .bytes_per_macropixel = { 4, 8, 0 }, .hsub = 2, .vsub = 2, }, /* FIXME consider is_yuv = true */
+ { .format = DRM_FORMAT_XV20, .depth = 0, .num_planes = 2, .pixels_per_macropixel = { 3, 3, 0 }, .bytes_per_macropixel = { 4, 8, 0 }, .hsub = 2, .vsub = 1, },
{ .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
@@ -274,6 +281,11 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
.is_yuv = true },
+ { .format = DRM_FORMAT_AVUY, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XVUY8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XVUY2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_Y8, .depth = 0, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_Y10, .depth = 0, .num_planes = 1, .pixels_per_macropixel = { 3, 0, 0 }, .bytes_per_macropixel = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
};
unsigned int i;
@@ -393,3 +405,38 @@ uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
drm_format_info_block_height(info, plane));
}
EXPORT_SYMBOL(drm_format_info_min_pitch);
+
+/**
+ * drm_format_plane_width_bytes - bytes of the given width of the plane
+ * @info: DRM format information
+ * @plane: plane index
+ * @width: width to get the number of bytes
+ *
+ * This returns the number of bytes for given @width and @plane.
+ * The @cpp or macro pixel information should be valid.
+ *
+ * Returns:
+ * The bytes of @width of @plane. 0 for invalid format info.
+ */
+int drm_format_plane_width_bytes(const struct drm_format_info *info,
+ int plane, int width)
+{
+ if (!info || plane >= info->num_planes)
+ return 0;
+
+ if (info->cpp[plane])
+ return info->cpp[plane] * width;
+
+ if (WARN_ON(!info->bytes_per_macropixel[plane] ||
+ !info->pixels_per_macropixel[plane])) {
+ struct drm_format_name_buf buf;
+
+ DRM_WARN("Either cpp or macro-pixel info should be valid: %s\n",
+ drm_get_format_name(info->format, &buf));
+ return 0;
+ }
+
+ return DIV_ROUND_UP(width * info->bytes_per_macropixel[plane],
+ info->pixels_per_macropixel[plane]);
+}
+EXPORT_SYMBOL(drm_format_plane_width_bytes);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57564318ceea..f081a1e5b8d5 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -295,7 +295,8 @@ drm_internal_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret;
- if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+ if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS |
+ DRM_MODE_FB_ALTERNATE_TOP | DRM_MODE_FB_ALTERNATE_BOTTOM)) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
@@ -580,7 +581,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
- int ret;
+ int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 1fdc85a71cec..d6a72f3cb1fb 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -167,21 +167,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -255,7 +240,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -1292,7 +1277,7 @@ retry:
ret = dma_resv_lock_slow_interruptible(obj->resv,
acquire_ctx);
if (ret) {
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
@@ -1317,7 +1302,7 @@ retry:
goto retry;
}
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index d88f4230c221..d266fbb37319 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -554,10 +554,8 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
ret = drm_gem_shmem_get_pages(shmem);
- if (ret) {
- drm_gem_vm_close(vma);
+ if (ret)
return ret;
- }
/* VM_PFNMAP was set by drm_gem_mmap() */
vma->vm_flags &= ~VM_PFNMAP;
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index fd751078bae1..32eee6fb7e02 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -73,6 +73,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
}
}
+/*
+ * Note that on error, drm_gem_vram_init will free the buffer object.
+ */
+
static int drm_gem_vram_init(struct drm_device *dev,
struct ttm_bo_device *bdev,
struct drm_gem_vram_object *gbo,
@@ -86,8 +90,10 @@ static int drm_gem_vram_init(struct drm_device *dev,
gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, &gbo->bo.base, size);
- if (ret)
+ if (ret) {
+ kfree(gbo);
return ret;
+ }
acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
@@ -98,13 +104,13 @@ static int drm_gem_vram_init(struct drm_device *dev,
&gbo->placement, pg_align, interruptible, acc_size,
NULL, NULL, ttm_buffer_object_destroy);
if (ret)
- goto err_drm_gem_object_release;
+ /*
+ * A failing ttm_bo_init will call ttm_buffer_object_destroy
+ * to release gbo->bo.base and kfree gbo.
+ */
+ return ret;
return 0;
-
-err_drm_gem_object_release:
- drm_gem_object_release(&gbo->bo.base);
- return ret;
}
/**
@@ -134,13 +140,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
if (ret < 0)
- goto err_kfree;
+ return ERR_PTR(ret);
return gbo;
-
-err_kfree:
- kfree(gbo);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_gem_vram_create);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 51a2055c8f18..41a9a9bae584 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -59,8 +59,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 76b6676b0106..5b93150b1141 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -473,7 +473,13 @@ EXPORT_SYMBOL(drm_invalid_op);
*/
static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
{
- int len;
+ size_t len;
+
+ /* don't attempt to copy a NULL pointer */
+ if (WARN_ONCE(!value, "BUG: the value to copy was not set!")) {
+ *buf_len = 0;
+ return 0;
+ }
/* don't overflow userbuf */
len = strlen(value);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 4042f5b39765..2a7ef1051b39 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -1156,6 +1156,13 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
size_t chunk;
int ret;
+ /* In __spi_validate, there's a validation that no partial transfers
+ * are accepted (xfer->len % w_size must be zero).
+ * Here we align max_chunk to multiple of 2 (16bits),
+ * to prevent transfers from being rejected.
+ */
+ max_chunk = ALIGN_DOWN(max_chunk, 2);
+
spi_message_init_with_transfers(&m, &tr, 1);
while (len) {
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index b99f96dcc6f1..963918dc8ad8 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
return dsi;
}
- dsi->dev.of_node = info->node;
+ device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
strlcpy(dsi->name, info->type, sizeof(dsi->name));
@@ -300,6 +300,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ if (dsi->attached)
+ mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
@@ -322,11 +324,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister);
int mipi_dsi_attach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ int ret;
if (!ops || !ops->attach)
return -ENOSYS;
- return ops->attach(dsi->host, dsi);
+ ret = ops->attach(dsi->host, dsi);
+ if (ret)
+ return ret;
+
+ dsi->attached = true;
+
+ return 0;
}
EXPORT_SYMBOL(mipi_dsi_attach);
@@ -338,9 +347,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ if (WARN_ON(!dsi->attached))
+ return -EINVAL;
+
if (!ops || !ops->detach)
return -ENOSYS;
+ dsi->attached = false;
+
return ops->detach(dsi->host, dsi);
}
EXPORT_SYMBOL(mipi_dsi_detach);
@@ -1090,6 +1104,58 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
+/**
+ * mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value
+ * of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 brightness)
+{
+ u8 payload[2] = { brightness >> 8, brightness & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ payload, sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large);
+
+/**
+ * mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit
+ * brightness value of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 *brightness)
+{
+ u8 brightness_be[2];
+ ssize_t err;
+
+ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ brightness_be, sizeof(brightness_be));
+ if (err <= 0) {
+ if (err == 0)
+ err = -ENODATA;
+
+ return err;
+ }
+
+ *brightness = (brightness_be[0] << 8) | brightness_be[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large);
+
static int mipi_dsi_drv_probe(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 6b0bf42039cf..ed7985c0535a 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -44,13 +44,21 @@ static LIST_HEAD(panel_list);
/**
* drm_panel_init - initialize a panel
* @panel: DRM panel
+ * @dev: parent device of the panel
+ * @funcs: panel operations
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
*
- * Sets up internal fields of the panel so that it can subsequently be added
- * to the registry.
+ * Initialize the panel structure for subsequent registration with
+ * drm_panel_add().
*/
-void drm_panel_init(struct drm_panel *panel)
+void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ const struct drm_panel_funcs *funcs, int connector_type)
{
INIT_LIST_HEAD(&panel->list);
+ panel->dev = dev;
+ panel->funcs = funcs;
+ panel->connector_type = connector_type;
}
EXPORT_SYMBOL(drm_panel_init);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index f5ab891731d0..43de9dfcba19 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -44,6 +44,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data gpd_onemix2s = {
+ .width = 1200,
+ .height = 1920,
+ .bios_dates = (const char * const []){ "05/21/2018", "10/26/2018",
+ "03/04/2019", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.width = 1200,
.height = 1920,
@@ -128,6 +136,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Acer Switch V 10 (SW5-017) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Anbernic Win600 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Win600"),
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -260,10 +280,29 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
- }, { /* Lenovo Yoga Book X90F / X91F / X91L */
+ }, { /* Lenovo Ideapad D330-10IGL (HD) */
.matches = {
- /* Non exact match to match all versions */
- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo IdeaPad Duet 3 10IGL5 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Book X90F / X90L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */
@@ -298,6 +337,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* One Mix 2S (generic strings, also match on bios date) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_onemix2s,
},
{}
};
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 6bdebcca5690..89742b059f91 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -1203,6 +1203,7 @@ retry:
out:
if (fb)
drm_framebuffer_put(fb);
+ fb = NULL;
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 0a2316e0e812..6b7cf0170f9d 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -187,29 +187,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ef2c468205a2..4c8bb93a89fb 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -112,7 +112,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
continue;
}
- ret = drm_bridge_mode_valid(encoder->bridge, mode);
+ ret = drm_bridge_chain_mode_valid(encoder->bridge, mode);
if (ret != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
@@ -460,8 +460,9 @@ retry:
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
- schedule_delayed_work(&dev->mode_config.output_poll_work,
- 0);
+ mod_delayed_work(system_wq,
+ &dev->mode_config.output_poll_work,
+ 0);
}
/* Re-enable polling in case the global poll config changed. */
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index e558381c4c96..9baf95a4fc9f 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -325,6 +325,15 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (!syncobj)
return -ENOENT;
+ /* Waiting for userspace with locks help is illegal cause that can
+ * trivial deadlock with page faults for example. Make lockdep complain
+ * about it early on.
+ */
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ might_sleep();
+ lockdep_assert_none_held_once();
+ }
+
*fence = drm_syncobj_fence_get(syncobj);
if (*fence) {
@@ -889,6 +898,10 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ lockdep_assert_none_held_once();
+
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
if (points == NULL)
return -ENOMEM;
@@ -921,7 +934,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
fence = drm_syncobj_fence_get(syncobjs[i]);
if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
dma_fence_put(fence);
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
continue;
} else {
timeout = -EINVAL;
@@ -954,7 +968,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 552ec82e9bc5..c98ed8146242 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -69,6 +69,12 @@
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
+ *
+ * Drivers for hardware without support for vertical-blanking interrupts
+ * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * automatically generate fake vblank events as part of the display update.
+ * This functionality also can be controlled by the driver by enabling and
+ * disabling struct drm_crtc_state.no_vblank.
*/
/* Retry timestamp calculation up to 3 times to satisfy
@@ -489,6 +495,28 @@ err:
EXPORT_SYMBOL(drm_vblank_init);
/**
+ * drm_dev_has_vblank - test if vblanking has been initialized for
+ * a device
+ * @dev: the device
+ *
+ * Drivers may call this function to test if vblank support is
+ * initialized for a device. For most hardware this means that vblanking
+ * can also be enabled.
+ *
+ * Atomic helpers use this function to initialize
+ * &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset().
+ *
+ * Returns:
+ * True if vblanking has been initialized for the given device, false
+ * otherwise.
+ */
+bool drm_dev_has_vblank(const struct drm_device *dev)
+{
+ return dev->num_crtcs != 0;
+}
+EXPORT_SYMBOL(drm_dev_has_vblank);
+
+/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 648cf0207309..67901f4586a3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -125,9 +125,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
return;
etnaviv_dump_core = false;
- mutex_lock(&gpu->mmu_context->lock);
+ mutex_lock(&submit->mmu_context->lock);
- mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
+ mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
n_obj = 5;
@@ -157,7 +157,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL);
if (!iter.start) {
- mutex_unlock(&gpu->mmu_context->lock);
+ mutex_unlock(&submit->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@@ -169,18 +169,18 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
memset(iter.hdr, 0, iter.data - iter.start);
etnaviv_core_dump_registers(&iter, gpu);
- etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
+ etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer,
- &gpu->mmu_context->cmdbuf_mapping));
+ &submit->mmu_context->cmdbuf_mapping));
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf,
- &gpu->mmu_context->cmdbuf_mapping));
+ &submit->mmu_context->cmdbuf_mapping));
- mutex_unlock(&gpu->mmu_context->lock);
+ mutex_unlock(&submit->mmu_context->lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 519948637186..5107a0f5bc7f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
* because display controller, GPU, etc. are not coherent.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
- dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+ dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
@@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj
* discard those writes.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
- dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
/* called with etnaviv_obj->lock held */
@@ -403,9 +403,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
- dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
- etnaviv_obj->sgt->nents,
- etnaviv_op_to_dma_dir(op));
+ dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
+ etnaviv_op_to_dma_dir(op));
etnaviv_obj->last_cpu_prep_op = op;
}
@@ -420,8 +419,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
/* fini without a prep is almost certainly a userspace error */
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
- dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
- etnaviv_obj->sgt->nents,
+ dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
etnaviv_obj->last_cpu_prep_op = 0;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index f24dd21c2363..4400f578685a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -93,7 +93,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
- return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+ int ret;
+
+ ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+ if (!ret) {
+ /* Drop the reference acquired by drm_gem_mmap_obj(). */
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
+ }
+
+ return ret;
}
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index db35736d47af..8c6f9752692d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -392,6 +392,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
if (gpu->identity.model == chipModel_GC700)
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
+ /* These models/revisions don't have the 2D pipe bit */
+ if ((gpu->identity.model == chipModel_GC500 &&
+ gpu->identity.revision <= 2) ||
+ gpu->identity.model == chipModel_GC300)
+ gpu->identity.features |= chipFeatures_PIPE_2D;
+
if ((gpu->identity.model == chipModel_GC500 &&
gpu->identity.revision < 2) ||
(gpu->identity.model == chipModel_GC300 &&
@@ -425,8 +431,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
}
- /* GC600 idle register reports zero bits where modules aren't present */
- if (gpu->identity.model == chipModel_GC600)
+ /* GC600/300 idle register reports zero bits where modules aren't present */
+ if (gpu->identity.model == chipModel_GC600 ||
+ gpu->identity.model == chipModel_GC300)
gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
VIVS_HI_IDLE_STATE_RA |
VIVS_HI_IDLE_STATE_SE |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 790cbb20aaeb..44fbc0a123bf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -73,17 +73,17 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
{ struct scatterlist *sg;
unsigned int da = iova;
- unsigned int i, j;
+ unsigned int i;
int ret;
if (!context || !sgt)
return -EINVAL;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_dma_address(sg) - sg->offset;
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ phys_addr_t pa = sg_dma_address(sg) - sg->offset;
size_t bytes = sg_dma_len(sg) + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
@@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
return 0;
fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg_dma_len(sg) + sg->offset;
-
- etnaviv_context_unmap(context, da, bytes);
- da += bytes;
- }
+ etnaviv_context_unmap(context, iova, da - iova);
return ret;
}
@@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
unsigned int da = iova;
int i;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_dma_sg(sgt, sg, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
etnaviv_context_unmap(context, da, bytes);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 9c262daf5816..78c934a21bd7 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -318,9 +318,9 @@ static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
- struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
struct exynos_drm_plane_state *state =
- to_exynos_plane_state(plane.base.state);
+ to_exynos_plane_state(plane->base.state);
unsigned int alpha = state->base.alpha;
unsigned int pixel_alpha;
unsigned long val;
@@ -775,8 +775,8 @@ static int decon_conf_irq(struct decon_context *ctx, const char *name,
return irq;
}
}
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(ctx->dev, irq, handler, flags, "drm_decon", ctx);
+ ret = devm_request_irq(ctx->dev, irq, handler,
+ flags | IRQF_NO_AUTOEN, "drm_decon", ctx);
if (ret < 0) {
dev_err(ctx->dev, "IRQ %s request failed\n", name);
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 6fd40410dfd2..afca5fc46020 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -800,31 +800,40 @@ static int exynos7_decon_resume(struct device *dev)
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
ret);
- return ret;
+ goto err_pclk_enable;
}
ret = clk_prepare_enable(ctx->aclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
ret);
- return ret;
+ goto err_aclk_enable;
}
ret = clk_prepare_enable(ctx->eclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
ret);
- return ret;
+ goto err_eclk_enable;
}
ret = clk_prepare_enable(ctx->vclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
ret);
- return ret;
+ goto err_vclk_enable;
}
return 0;
+
+err_vclk_enable:
+ clk_disable_unprepare(ctx->eclk);
+err_eclk_enable:
+ clk_disable_unprepare(ctx->aclk);
+err_aclk_enable:
+ clk_disable_unprepare(ctx->pclk);
+err_pclk_enable:
+ return ret;
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index e0cfae744afc..01c5fbf9083a 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -109,7 +109,6 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
if (ret) {
DRM_DEV_ERROR(dp->dev,
"Failed to attach bridge to drm\n");
- bridge->next = NULL;
return ret;
}
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 77ce78986408..c10eea6db9a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -39,13 +39,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
if (exynos_crtc->ops->disable)
exynos_crtc->ops->disable(exynos_crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event && !crtc->state->active) {
- spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
-
crtc->state->event = NULL;
}
+ spin_unlock_irq(&crtc->dev->event_lock);
}
static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index a3c9d8b9e1a1..e07d31b9a921 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -133,18 +133,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
return 0;
if (!priv->mapping) {
- void *mapping;
+ void *mapping = NULL;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
mapping = arm_iommu_create_mapping(&platform_bus_type,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
- else
- mapping = ERR_PTR(-ENODEV);
- if (IS_ERR(mapping))
- return PTR_ERR(mapping);
+ if (!mapping)
+ return -ENODEV;
priv->mapping = mapping;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ba0f868b2477..29577a7eaea8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -346,6 +346,7 @@ err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
exynos_drm_cleanup_dma(drm);
kfree(private);
+ dev_set_drvdata(dev, NULL);
err_free_drm:
drm_dev_put(drm);
@@ -360,6 +361,7 @@ static void exynos_drm_unbind(struct device *dev)
exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
@@ -397,9 +399,18 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
return 0;
}
+static void exynos_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm)
+ drm_atomic_helper_shutdown(drm);
+}
+
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
.remove = exynos_drm_platform_remove,
+ .shutdown = exynos_drm_platform_shutdown,
.driver = {
.name = "exynos-drm",
.pm = &exynos_drm_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index b83acd696774..04220a51d839 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -544,9 +544,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
unsigned long best_freq = 0;
u32 min_delta = 0xffffffff;
u8 p_min, p_max;
- u8 _p, uninitialized_var(best_p);
- u16 _m, uninitialized_var(best_m);
- u8 _s, uninitialized_var(best_s);
+ u8 _p, best_p;
+ u16 _m, best_m;
+ u8 _s, best_s;
p_min = DIV_ROUND_UP(fin, (12 * MHZ));
p_max = fin / (6 * MHZ);
@@ -1350,10 +1350,9 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
}
te_gpio_irq = gpio_to_irq(dsi->te_gpio);
- irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN);
ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
- IRQF_TRIGGER_RISING, "TE", dsi);
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
if (ret) {
dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
gpio_free(dsi->te_gpio);
@@ -1389,7 +1388,7 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
if (ret < 0)
goto err_put_sync;
} else {
- drm_bridge_pre_enable(dsi->out_bridge);
+ drm_bridge_chain_pre_enable(dsi->out_bridge);
}
exynos_dsi_set_display_mode(dsi);
@@ -1400,7 +1399,7 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
if (ret < 0)
goto err_display_disable;
} else {
- drm_bridge_enable(dsi->out_bridge);
+ drm_bridge_chain_enable(dsi->out_bridge);
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
@@ -1425,10 +1424,10 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
drm_panel_disable(dsi->panel);
- drm_bridge_disable(dsi->out_bridge);
+ drm_bridge_chain_disable(dsi->out_bridge);
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
- drm_bridge_post_disable(dsi->out_bridge);
+ drm_bridge_chain_post_disable(dsi->out_bridge);
dsi->state &= ~DSIM_STATE_ENABLED;
pm_runtime_put_sync(dsi->dev);
}
@@ -1792,9 +1791,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
return dsi->irq;
}
- irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
- exynos_dsi_irq, IRQF_ONESHOT,
+ exynos_dsi_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
dev_name(dev), dsi);
if (ret) {
dev_err(dev, "failed to request dsi irq\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 34e6b22173fa..4fe4ca41665b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -637,9 +637,9 @@ static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
struct drm_framebuffer *fb, int width)
{
- struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
struct exynos_drm_plane_state *state =
- to_exynos_plane_state(plane.base.state);
+ to_exynos_plane_state(plane->base.state);
uint32_t pixel_format = fb->format->format;
unsigned int alpha = state->base.alpha;
u32 val = WINCONx_ENWIN;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fcee33a43aca..2df04de7f435 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1332,7 +1332,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
/* Let the runqueue know that there is work to do. */
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
- if (runqueue_node->async)
+ if (req->async)
goto out;
wait_for_completion(&runqueue_node->complete);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
index 74ea3c26dead..1a5ae781b56c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
-int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
return 0;
}
-void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{ }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 45e9aee8366a..bcf830c5b8ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1344,7 +1344,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
for (i = 0; i < ctx->num_clocks; i++) {
ret = clk_prepare_enable(ctx->clocks[i]);
if (ret) {
- while (--i > 0)
+ while (--i >= 0)
clk_disable_unprepare(ctx->clocks[i]);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 65b891cb9c50..d882a22dfd6e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -483,8 +483,6 @@ static int vidi_remove(struct platform_device *pdev)
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
-
- return -EINVAL;
}
component_del(&pdev->dev, &vidi_component_ops);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 0073a2b3b80a..93b2af4936d0 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1850,6 +1850,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
crtc->pipe_clk = &hdata->phy_clk;
ret = hdmi_create_connector(encoder);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index a92fd6c70b09..8de9bc8343a2 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -70,8 +70,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(fsl_connector->panel);
}
-static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
if (mode->hdisplay & 0xf)
return MODE_ERROR;
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 399b1542509f..59f02f39e952 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -146,6 +146,11 @@ static struct intel_quirk intel_quirks[] = {
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ /* ECS Liva Q2 */
+ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ /* HP Notebook - 14-r206nv */
+ { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
void intel_init_quirks(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index adeb1c840976..4d09b247474d 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2693,13 +2693,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_sdvo_connector)
return false;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
- }
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
@@ -2753,7 +2750,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- intel_sdvo->controlled_output |= type;
intel_sdvo_connector->output_flag = type;
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
@@ -2794,13 +2790,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
- }
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
@@ -2830,13 +2823,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
- }
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
@@ -2869,16 +2859,39 @@ err:
return false;
}
+static u16 intel_sdvo_filter_output_flags(u16 flags)
+{
+ flags &= SDVO_OUTPUT_MASK;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+ if (!(flags & SDVO_OUTPUT_TMDS0))
+ flags &= ~SDVO_OUTPUT_TMDS1;
+
+ if (!(flags & SDVO_OUTPUT_RGB0))
+ flags &= ~SDVO_OUTPUT_RGB1;
+
+ if (!(flags & SDVO_OUTPUT_LVDS0))
+ flags &= ~SDVO_OUTPUT_LVDS1;
+
+ return flags;
+}
+
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
{
- /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+
+ flags = intel_sdvo_filter_output_flags(flags);
+
+ intel_sdvo->controlled_output = flags;
+
+ intel_sdvo_select_ddc_bus(i915, intel_sdvo);
if (flags & SDVO_OUTPUT_TMDS0)
if (!intel_sdvo_dvi_init(intel_sdvo, 0))
return false;
- if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (flags & SDVO_OUTPUT_TMDS1)
if (!intel_sdvo_dvi_init(intel_sdvo, 1))
return false;
@@ -2899,7 +2912,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
- if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (flags & SDVO_OUTPUT_RGB1)
if (!intel_sdvo_analog_init(intel_sdvo, 1))
return false;
@@ -2907,14 +2920,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
if (!intel_sdvo_lvds_init(intel_sdvo, 0))
return false;
- if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (flags & SDVO_OUTPUT_LVDS1)
if (!intel_sdvo_lvds_init(intel_sdvo, 1))
return false;
- if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ if (flags == 0) {
unsigned char bytes[2];
- intel_sdvo->controlled_output = 0;
memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(intel_sdvo),
@@ -3321,8 +3333,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
*/
intel_sdvo->base.cloneable = 0;
- intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
-
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
goto err_output;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 96ce95c8ac5a..8826358b1741 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -36,13 +36,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err_unpin_pages;
}
- ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+ ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->mm.pages->sgl;
dst = st->sgl;
- for (i = 0; i < obj->mm.pages->nents; i++) {
+ for (i = 0; i < obj->mm.pages->orig_nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f6d7f5d307d7..b1ba08e3e101 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -348,6 +348,10 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
if (!i915_mmio_reg_offset(rb.reg))
continue;
+ if (INTEL_GEN(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS))
+ rb.bit = _MASKED_BIT_ENABLE(rb.bit);
+
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 398068b17389..d8b40cc091da 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -475,7 +475,7 @@ static struct i915_request *
__unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
- struct list_head *uninitialized_var(pl);
+ struct list_head *pl;
int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->active.lock);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8cea42379dd7..48aed34e19df 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -257,6 +257,7 @@ out:
static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
{
struct intel_uncore *uncore = gt->uncore;
+ int loops = 2;
int err;
/*
@@ -264,17 +265,38 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
* for fifo space for the write or forcewake the chip for
* the read
*/
- intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
+ do {
+ intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
- /* Wait for the device to ack the reset requests */
- err = __intel_wait_for_register_fw(uncore,
- GEN6_GDRST, hw_domain_mask, 0,
- 500, 0,
- NULL);
+ /*
+ * Wait for the device to ack the reset requests.
+ *
+ * On some platforms, e.g. Jasperlake, we see that the
+ * engine register state is not cleared until shortly after
+ * GDRST reports completion, causing a failure as we try
+ * to immediately resume while the internal state is still
+ * in flux. If we immediately repeat the reset, the second
+ * reset appears to serialise with the first, and since
+ * it is a no-op, the registers should retain their reset
+ * value. However, there is still a concern that upon
+ * leaving the second reset, the internal engine state
+ * is still in flux and not ready for resuming.
+ */
+ err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
+ hw_domain_mask, 0,
+ 2000, 0,
+ NULL);
+ } while (err == 0 && --loops);
if (err)
DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
hw_domain_mask);
+ /*
+ * As we have observed that the engine state is still volatile
+ * after GDRST is acked, impose a small delay to let everything settle.
+ */
+ udelay(50);
+
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index eee9fcbe0434..125f7bb67bee 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1208,7 +1208,7 @@ int intel_ring_pin(struct intel_ring *ring)
if (unlikely(ret))
goto err_unpin;
- if (i915_vma_is_map_and_fenceable(vma))
+ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
addr = (void __force *)i915_vma_pin_iomap(vma);
else
addr = i915_gem_object_pin_map(vma->obj,
@@ -1252,7 +1252,7 @@ void intel_ring_unpin(struct intel_ring *ring)
intel_ring_reset(ring, ring->emit);
i915_vma_unset_ggtt_write(vma);
- if (i915_vma_is_map_and_fenceable(vma))
+ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
i915_vma_unpin_iomap(vma);
else
i915_gem_object_unpin_map(vma->obj);
@@ -1268,10 +1268,11 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
{
struct i915_address_space *vm = &ggtt->vm;
struct drm_i915_private *i915 = vm->i915;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = NULL;
struct i915_vma *vma;
- obj = i915_gem_object_create_stolen(i915, size);
+ if (!HAS_LLC(i915))
+ obj = i915_gem_object_create_stolen(i915, size);
if (!obj)
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 7dc7bb850d0a..a7913410003d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1186,10 +1186,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
- if (ret) {
- ppgtt_invalidate_spt(spt);
- return ret;
- }
+ if (ret)
+ goto err;
sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */
@@ -1208,6 +1206,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index);
return 0;
+err:
+ /* Cancel the existing addess mappings of DMA addr. */
+ for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
+ gvt_vdbg_mm("invalidate 4K entry\n");
+ ppgtt_invalidate_pte(sub_spt, &sub_se);
+ }
+ /* Release the new allocated spt. */
+ trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
+ sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
+ ppgtt_free_spt(sub_spt);
+ return ret;
}
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 245c20d36f1b..45ffccdcd50e 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -654,7 +654,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_vgpu_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 058dcd541644..c1dc225d8436 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -632,6 +632,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
!workload->shadow_mm->ppgtt_mm.shadowed) {
+ intel_vgpu_unpin_mm(workload->shadow_mm);
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1974e4c78a43..b3f0c20d5ba5 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -418,7 +418,8 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
- .has_rc6p = 1, \
+ /* snb does support rc6p, but enabling it causes various issues */ \
+ .has_rc6p = 0, \
.has_rps = true, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 31, \
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 62f8a47fda45..dc5483b31c1b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1926,13 +1926,14 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
unsigned int slow_timeout_ms,
u32 *out_value)
{
- u32 uninitialized_var(reg_value);
+ u32 reg_value = 0;
#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
int ret;
/* Catch any overuse of this function */
might_sleep_if(slow_timeout_ms);
GEM_BUG_ON(fast_timeout_us > 20000);
+ GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
ret = -ETIMEDOUT;
if (fast_timeout_us && fast_timeout_us <= 20000)
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index f91c3eb7697b..cc300ed456e1 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -237,8 +237,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int imx_tve_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+imx_tve_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 75ec703d22e0..89513c7c50d5 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -300,8 +300,10 @@ static int lima_pdev_probe(struct platform_device *pdev)
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
+ if (IS_ERR(ddev)) {
+ err = PTR_ERR(ddev);
+ goto err_out0;
+ }
ddev->dev_private = ldev;
ldev->ddev = ddev;
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 8c8c92fc82e9..a580b9cadb0c 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -942,6 +942,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
bridge = of_drm_find_bridge(child);
if (!bridge) {
dev_err(dev, "failed to find bridge\n");
+ of_node_put(child);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 48de07e9059e..7c68a3933915 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -50,13 +50,7 @@ enum mtk_dpi_out_channel_swap {
};
enum mtk_dpi_out_color_format {
- MTK_DPI_COLOR_FORMAT_RGB,
- MTK_DPI_COLOR_FORMAT_RGB_FULL,
- MTK_DPI_COLOR_FORMAT_YCBCR_444,
- MTK_DPI_COLOR_FORMAT_YCBCR_422,
- MTK_DPI_COLOR_FORMAT_XV_YCC,
- MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL,
- MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL
+ MTK_DPI_COLOR_FORMAT_RGB
};
struct mtk_dpi {
@@ -355,24 +349,11 @@ static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
enum mtk_dpi_out_color_format format)
{
- if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_444) ||
- (format == MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL)) {
- mtk_dpi_config_yuv422_enable(dpi, false);
- mtk_dpi_config_csc_enable(dpi, true);
- mtk_dpi_config_swap_input(dpi, false);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_BGR);
- } else if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_422) ||
- (format == MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL)) {
- mtk_dpi_config_yuv422_enable(dpi, true);
- mtk_dpi_config_csc_enable(dpi, true);
- mtk_dpi_config_swap_input(dpi, true);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
- } else {
- mtk_dpi_config_yuv422_enable(dpi, false);
- mtk_dpi_config_csc_enable(dpi, false);
- mtk_dpi_config_swap_input(dpi, false);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
- }
+ /* only support RGB888 */
+ mtk_dpi_config_yuv422_enable(dpi, false);
+ mtk_dpi_config_csc_enable(dpi, false);
+ mtk_dpi_config_swap_input(dpi, false);
+ mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
}
static void mtk_dpi_power_off(struct mtk_dpi *dpi)
@@ -383,9 +364,6 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
if (--dpi->refcount != 0)
return;
- if (dpi->pinctrl && dpi->pins_gpio)
- pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
-
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
clk_disable_unprepare(dpi->engine_clk);
@@ -410,10 +388,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
goto err_pixel;
}
- if (dpi->pinctrl && dpi->pins_dpi)
- pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
-
- mtk_dpi_enable(dpi);
return 0;
err_pixel:
@@ -549,14 +523,21 @@ static void mtk_dpi_encoder_disable(struct drm_encoder *encoder)
struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
mtk_dpi_power_off(dpi);
+
+ if (dpi->pinctrl && dpi->pins_gpio)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
}
static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
{
struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+ if (dpi->pinctrl && dpi->pins_dpi)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+
mtk_dpi_power_on(dpi);
mtk_dpi_set_display_mode(dpi, &dpi->mode);
+ mtk_dpi_enable(dpi);
}
static int mtk_dpi_atomic_check(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index f370d41b3d04..9bbad3350069 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -73,11 +73,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
struct drm_crtc *crtc = &mtk_crtc->base;
unsigned long flags;
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
- drm_crtc_vblank_put(crtc);
- mtk_crtc->event = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ if (mtk_crtc->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
+ drm_crtc_vblank_put(crtc);
+ mtk_crtc->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
}
static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index f98bb2e26372..5569454ad9e4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -417,6 +417,7 @@ static int mtk_drm_bind(struct device *dev)
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
+ private->drm = NULL;
drm_dev_put(drm);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index ca672f1d140d..5f87d56d2d1d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -142,8 +142,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
- if (ret)
- drm_gem_vm_close(vma);
return ret;
}
@@ -269,9 +267,13 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
}
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
-
+ if (!mtk_gem->kvaddr) {
+ kfree(sgt);
+ kfree(mtk_gem->pages);
+ return ERR_PTR(-ENOMEM);
+ }
out:
- kfree((void *)sgt);
+ kfree(sgt);
return mtk_gem->kvaddr;
}
@@ -284,6 +286,6 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
return;
vunmap(vaddr);
- mtk_gem->kvaddr = 0;
- kfree((void *)mtk_gem->pages);
+ mtk_gem->kvaddr = NULL;
+ kfree(mtk_gem->pages);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 224afb666881..9af900076dee 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -64,8 +64,8 @@
#define DSI_PS_WC 0x3fff
#define DSI_PS_SEL (3 << 16)
#define PACKED_PS_16BIT_RGB565 (0 << 16)
-#define LOOSELY_PS_18BIT_RGB666 (1 << 16)
-#define PACKED_PS_18BIT_RGB666 (2 << 16)
+#define PACKED_PS_18BIT_RGB666 (1 << 16)
+#define LOOSELY_PS_24BIT_RGB666 (2 << 16)
#define PACKED_PS_24BIT_RGB888 (3 << 16)
#define DSI_VSA_NL 0x20
@@ -321,10 +321,10 @@ static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
break;
case MIPI_DSI_FMT_RGB666:
- ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
+ ps_bpp_mode |= LOOSELY_PS_24BIT_RGB666;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
- ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
+ ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
break;
case MIPI_DSI_FMT_RGB565:
ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
@@ -375,7 +375,7 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB666:
- tmp_reg = LOOSELY_PS_18BIT_RGB666;
+ tmp_reg = LOOSELY_PS_24BIT_RGB666;
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
@@ -645,6 +645,8 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
+ /* set the lane number as 0 to pull down mipi */
+ writel(0, dsi->regs + DSI_TXRX_CTRL);
mtk_dsi_disable(dsi);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 6b22fd63c3f5..74a54a9e3533 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1237,17 +1237,19 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
struct drm_display_mode *mode)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+ struct drm_bridge *next_bridge;
dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
mode->hdisplay, mode->vdisplay, mode->vrefresh,
!!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
- if (hdmi->bridge.next) {
+ next_bridge = drm_bridge_get_next_bridge(&hdmi->bridge);
+ if (next_bridge) {
struct drm_display_mode adjusted_mode;
drm_mode_copy(&adjusted_mode, mode);
- if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
- &adjusted_mode))
+ if (!drm_bridge_chain_mode_fixup(next_bridge, mode,
+ &adjusted_mode))
return MODE_BAD;
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 61a6536e7e61..9a39afc3939b 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -124,8 +124,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
- if (remote)
+ if (remote) {
+ of_node_put(remote);
+ of_node_put(ep);
return true;
+ }
}
return false;
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index ed543227b00d..53f5d0581c35 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -128,7 +128,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
canvas_id_osd1 = priv->canvas_id_osd1;
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 33698814c022..8d0938525978 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -91,7 +91,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
- writel((m[11] & 0x1fff) << 16,
+ writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
@@ -400,17 +400,17 @@ void meson_viu_init(struct meson_drm *priv)
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
- VIU_OSD_BLEND_REORDER(1, 0) |
- VIU_OSD_BLEND_REORDER(2, 0) |
- VIU_OSD_BLEND_REORDER(3, 0) |
- VIU_OSD_BLEND_DIN_EN(1) |
- VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
- VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
- VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
- VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
- VIU_OSD_BLEND_HOLD_LINES(4),
- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
+ (u32)VIU_OSD_BLEND_REORDER(1, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(2, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(3, 0) |
+ (u32)VIU_OSD_BLEND_DIN_EN(1) |
+ (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
+ (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
+ (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
+ (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
+ (u32)VIU_OSD_BLEND_HOLD_LINES(4);
+ writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index 154837688ab0..5df1957c8e41 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
+ writel_relaxed(0x42020,
+ priv->io_base + _REG(VPP_DUMMY_DATA));
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index e3579e5ffa14..65c2c5361e5f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -71,7 +71,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
* since we've already mapped it once in
* submit_reloc()
*/
- if (WARN_ON(!ptr))
+ if (WARN_ON(IS_ERR_OR_NULL(ptr)))
return;
for (i = 0; i < dwords; i++) {
@@ -135,8 +135,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_RING(ring, 1);
/* Enable local preemption for finegrain preemption */
- OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
- OUT_RING(ring, 0x02);
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x1);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 68cccfa2870a..9c8eb1ae4acf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -200,7 +200,7 @@ static const struct a6xx_shader_block {
SHADER(A6XX_SP_LB_3_DATA, 0x800),
SHADER(A6XX_SP_LB_4_DATA, 0x800),
SHADER(A6XX_SP_LB_5_DATA, 0x200),
- SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
+ SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
SHADER(A6XX_SP_UAV_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 0888e0df660d..2a727ab0faf7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -233,8 +233,11 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
if (ret)
return NULL;
- /* Make sure pm runtime is active and reset any previous errors */
- pm_runtime_set_active(&pdev->dev);
+ /*
+ * Now that we have firmware loaded, and are ready to begin
+ * booting the gpu, go ahead and enable runpm:
+ */
+ pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 3802ad38c519..6f83253a8c58 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -899,7 +899,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
pm_runtime_set_autosuspend_delay(&pdev->dev,
adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
@@ -908,11 +907,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
+ struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
+ if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
+ pm_runtime_disable(&priv->gpu_pdev->dev);
+
icc_put(gpu->icc_path);
msm_gpu_cleanup(&adreno_gpu->base);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c7441fb8313e..e1a8989b7835 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -47,11 +47,9 @@ enum {
ADRENO_FW_MAX,
};
-enum adreno_quirks {
- ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
- ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
- ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
-};
+#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
+#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1)
+#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
struct adreno_rev {
uint8_t core;
@@ -74,7 +72,7 @@ struct adreno_info {
const char *name;
const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
- enum adreno_quirks quirks;
+ u64 quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
u32 inactive_period;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index cf4b9b5964c6..cd6c3518ba02 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -15,19 +15,6 @@
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
- * enum dpu_core_perf_data_bus_id - data bus identifier
- * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
- * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
- * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
- */
-enum dpu_core_perf_data_bus_id {
- DPU_CORE_PERF_DATA_BUS_ID_MNOC,
- DPU_CORE_PERF_DATA_BUS_ID_LLCC,
- DPU_CORE_PERF_DATA_BUS_ID_EBI,
- DPU_CORE_PERF_DATA_BUS_ID_MAX,
-};
-
-/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
* @bw_ctl: arbitrated bandwidth request
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4aed5e9a84a4..2e28db60f4d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -651,7 +651,10 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
dpu_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
+ if (cstate)
+ __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
/**
@@ -833,6 +836,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+ if (!pstates)
+ return -ENOMEM;
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 99d449ce4a07..03d671d23bf7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -44,6 +44,9 @@
(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
##__VA_ARGS__)
+#define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
/*
* Two to anticipate panels that can do cmd/vid dynamic switching
* plan is to create all possible physical encoder types, and switch between
@@ -2151,7 +2154,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
return;
}
- DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+ DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
event = DPU_ENCODER_FRAME_EVENT_ERROR;
trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index b9c84fb4d4a1..311863a05a6f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -258,12 +258,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
mode.htotal >>= 1;
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
+ mode.hskew >>= 1;
DPU_DEBUG_VIDENC(phys_enc,
- "split_role %d, halve horizontal %d %d %d %d\n",
+ "split_role %d, halve horizontal %d %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
- mode.hsync_start, mode.hsync_end);
+ mode.hsync_start, mode.hsync_end,
+ mode.hskew);
}
drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index c08c67338d73..a74f8ae1a894 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -596,12 +596,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
- for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
- u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
-
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
- dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
- dpu_kms->hw_vbif[vbif_idx] = NULL;
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i]) {
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
+ dpu_kms->hw_vbif[i] = NULL;
}
}
}
@@ -899,7 +897,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 4c889aabdaf9..6a4813505c33 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -50,6 +50,7 @@
} while (0)
#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+#define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
/**
* ktime_compare_safe - compare two ktime structures
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 8d24b79fd400..5e6bb2f306be 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -11,6 +11,14 @@
#include "dpu_hw_vbif.h"
#include "dpu_trace.h"
+static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
+{
+ if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
+ return dpu_kms->hw_vbif[vbif_idx];
+
+ return NULL;
+}
+
/**
* _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
* @vbif: Pointer to hardware vbif driver
@@ -148,11 +156,11 @@ exit:
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
- int ret, i;
+ int ret;
if (!dpu_kms) {
DPU_ERROR("invalid arguments\n");
@@ -160,12 +168,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
}
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
- vbif = dpu_kms->hw_vbif[i];
- }
-
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
vbif != 0, mdp != 0);
@@ -208,7 +211,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
@@ -220,13 +223,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
- vbif = dpu_kms->hw_vbif[i];
- break;
- }
- }
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index f34dca5d4532..38274227f2d5 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -268,6 +268,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ unsigned long flags;
DBG("%s", mdp4_crtc->name);
@@ -280,6 +281,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp4_crtc->event);
+ spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
+ }
+
mdp4_crtc->enabled = false;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index 9262ed2dc8c3..bdcc3e91bf88 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -61,8 +61,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 03d60eb09257..cc60842b47e9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1050,7 +1050,10 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
mdp5_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+ if (mdp5_cstate)
+ __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
drm_crtc_vblank_reset(crtc);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index a4f5cb90f3e8..e4b8a789835a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -123,12 +123,13 @@ int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
- struct mdp5_global_state *state = mdp5_get_global_state(s);
+ struct mdp5_global_state *state;
struct mdp5_hw_pipe_state *new_state;
if (!hwpipe)
return 0;
+ state = mdp5_get_global_state(s);
if (IS_ERR(state))
return PTR_ERR(state);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 0dc23c86747e..e1c1b4ad5ed0 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -221,8 +221,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane,
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
- if (state->fb)
- drm_framebuffer_put(state->fb);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(pstate);
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 16194971a99f..fedade8069fd 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -203,6 +203,12 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return -EINVAL;
priv = dev->dev_private;
+
+ if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+ DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+ return -ENOSPC;
+ }
+
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index b7b7c1a9164a..726c88139457 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -97,7 +97,7 @@ static const char * const dsi_8996_bus_clk_names[] = {
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 2,
+ .num = 3,
.regs = {
{"vdda", 18160, 1 }, /* 1.25 V */
{"vcca", 17000, 32 }, /* 0.925 V */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 743142e15b4c..419cad31830e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1051,9 +1051,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
{
+ u32 data;
+
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
return;
+ data = dsi_read(msm_host, REG_DSI_STATUS0);
+
+ /* if video mode engine is not busy, its because
+ * either timing engine was not turned on or the
+ * DSI controller has finished transmitting the video
+ * data already, so no need to wait in those cases
+ */
+ if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
+ return;
+
if (msm_host->power_on && msm_host->enabled) {
dsi_wait4video_done(msm_host);
/* delay 4 ms to skip BLLP */
@@ -1877,6 +1889,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
+ if (!msm_host->workqueue)
+ return -ENOMEM;
+
INIT_WORK(&msm_host->err_work, dsi_err_worker);
INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 60d50643d0b5..925262ea6f14 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
- timing->shared_timings.clk_pre_inc_by_2 = 0;
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
@@ -464,7 +464,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
struct device *dev = &phy->pdev->dev;
int ret;
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
@@ -604,6 +606,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
}
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index e4c9ff934e5b..74b806b3e65f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -248,6 +248,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+ if (!hdmi->workq) {
+ ret = -ENOMEM;
+ goto fail;
+ }
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
@@ -293,6 +297,11 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
struct platform_device *pdev = hdmi->pdev;
int ret;
+ if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+ DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+ return -ENOSPC;
+ }
+
hdmi->dev = dev;
hdmi->encoder = encoder;
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index cd59a5918038..50a25c119f4d 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -20,7 +20,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
return ERR_PTR(-ENOMEM);
fctx->dev = dev;
- strncpy(fctx->name, name, sizeof(fctx->name));
+ strscpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index c7832a951039..a6b024b06b36 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -191,6 +191,9 @@ static int rd_open(struct inode *inode, struct file *file)
file->private_data = rd;
rd->open = true;
+ /* Reset fifo to clear any previously unread data: */
+ rd->fifo.head = rd->fifo.tail = 0;
+
/* the parsing tools need to know gpu-id to know which
* register database to load.
*/
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 33916b7b2c50..e8cb02803d9b 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -8,6 +8,7 @@ config DRM_MXSFB
tristate "i.MX23/i.MX28/i.MX6SX MXSFB LCD controller"
depends on DRM && OF
depends on COMMON_CLK
+ depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
select DRM_MXS
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f7603be569fc..9f9c70734180 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -276,8 +276,10 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
break;
}
- if (WARN_ON(pi < 0))
+ if (WARN_ON(pi < 0)) {
+ kfree(nvbo);
return ERR_PTR(-EINVAL);
+ }
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 496e7dcd6b7d..4e28e89b51dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -538,6 +538,19 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
}
}
+static void
+nouveau_connector_set_edid(struct nouveau_connector *nv_connector,
+ struct edid *edid)
+{
+ if (nv_connector->edid != edid) {
+ struct edid *old_edid = nv_connector->edid;
+
+ drm_connector_update_edid_property(&nv_connector->base, edid);
+ kfree(old_edid);
+ nv_connector->edid = edid;
+ }
+}
+
static enum drm_connector_status
nouveau_connector_detect(struct drm_connector *connector, bool force)
{
@@ -551,13 +564,6 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
int ret;
enum drm_connector_status conn_status = connector_status_disconnected;
- /* Cleanup the previous EDID block. */
- if (nv_connector->edid) {
- drm_connector_update_edid_property(connector, NULL);
- kfree(nv_connector->edid);
- nv_connector->edid = NULL;
- }
-
/* Outputs are only polled while runtime active, so resuming the
* device here is unnecessary (and would deadlock upon runtime suspend
* because it waits for polling to finish). We do however, want to
@@ -570,22 +576,23 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev->dev);
+ nouveau_connector_set_edid(nv_connector, NULL);
return conn_status;
}
}
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
+ struct edid *new_edid;
+
if ((vga_switcheroo_handler_flags() &
VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
nv_connector->type == DCB_CONNECTOR_LVDS)
- nv_connector->edid = drm_get_edid_switcheroo(connector,
- i2c);
+ new_edid = drm_get_edid_switcheroo(connector, i2c);
else
- nv_connector->edid = drm_get_edid(connector, i2c);
+ new_edid = drm_get_edid(connector, i2c);
- drm_connector_update_edid_property(connector,
- nv_connector->edid);
+ nouveau_connector_set_edid(nv_connector, new_edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
connector->name);
@@ -619,6 +626,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
conn_status = connector_status_connected;
drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
goto out;
+ } else {
+ nouveau_connector_set_edid(nv_connector, NULL);
}
nv_encoder = nouveau_connector_of_detect(connector);
@@ -661,24 +670,20 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
+ struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- /* Cleanup the previous EDID block. */
- if (nv_connector->edid) {
- drm_connector_update_edid_property(connector, NULL);
- kfree(nv_connector->edid);
- nv_connector->edid = NULL;
- }
-
nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
if (!nv_encoder)
- return connector_status_disconnected;
+ goto out;
/* Try retrieving EDID via DDC */
if (!drm->vbios.fp_no_ddc) {
status = nouveau_connector_detect(connector, force);
- if (status == connector_status_connected)
+ if (status == connector_status_connected) {
+ edid = nv_connector->edid;
goto out;
+ }
}
/* On some laptops (Sony, i'm looking at you) there appears to
@@ -691,7 +696,8 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
* valid - it's not (rh#613284)
*/
if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
- if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
+ edid = nouveau_acpi_edid(dev, connector);
+ if (edid) {
status = connector_status_connected;
goto out;
}
@@ -711,12 +717,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
* stored for the panel stored in them.
*/
if (!drm->vbios.fp_no_ddc) {
- struct edid *edid =
- (struct edid *)nouveau_bios_embedded_edid(dev);
+ edid = (struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
- nv_connector->edid =
- kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
- if (nv_connector->edid)
+ edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ if (edid)
status = connector_status_connected;
}
}
@@ -729,8 +733,9 @@ out:
status = connector_status_unknown;
#endif
- drm_connector_update_edid_property(connector, nv_connector->edid);
- nouveau_connector_set_encoder(connector, nv_encoder);
+ nouveau_connector_set_edid(nv_connector, edid);
+ if (nv_encoder)
+ nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
@@ -971,7 +976,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
* "native" mode as some VBIOS tables require us to use the
* pixel clock as part of the lookup...
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
nouveau_connector_detect_depth(connector);
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e18938972a89..a3ca5a38203f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -123,10 +123,16 @@ nouveau_name(struct drm_device *dev)
static inline bool
nouveau_cli_work_ready(struct dma_fence *fence)
{
- if (!dma_fence_is_signaled(fence))
- return false;
- dma_fence_put(fence);
- return true;
+ bool ret = true;
+
+ spin_lock_irq(fence->lock);
+ if (!dma_fence_is_signaled_locked(fence))
+ ret = false;
+ spin_unlock_irq(fence->lock);
+
+ if (ret == true)
+ dma_fence_put(fence);
+ return ret;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index f9ee562f72d3..25cbe4ef383b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -90,7 +90,6 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
obj = ERR_PTR(ret);
goto unlock;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 77061182a1cf..a95999393c84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -108,6 +108,9 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
} else {
ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
mem->mem.size, &tmp);
+ if (ret)
+ goto done;
+
vma->addr = tmp.addr;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 478b4723d0f9..c88ceb486f3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -121,6 +121,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *);
extern const struct gf100_grctx_func gk110_grctx;
void gk110_grctx_generate_r419eb0(struct gf100_gr *);
+void gk110_grctx_generate_r419f78(struct gf100_gr *);
extern const struct gf100_grctx_func gk110b_grctx;
extern const struct gf100_grctx_func gk208_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 304e9d268bad..f894f8254824 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -916,7 +916,9 @@ static void
gk104_grctx_generate_r419f78(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
- nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
+
+ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
+ nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 86547cfc38dc..e88740d4e54d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
}
+void
+gk110_grctx_generate_r419f78(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
+ nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000);
+}
+
const struct gf100_grctx_func
gk110_grctx = {
.main = gf100_grctx_generate_main,
@@ -852,4 +861,5 @@ gk110_grctx = {
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419eb0 = gk110_grctx_generate_r419eb0,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index ebb947bd1446..086e4d49e112 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -101,4 +101,5 @@ gk110b_grctx = {
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419eb0 = gk110_grctx_generate_r419eb0,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 4d40512b5c99..0bf438c3f7cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -566,4 +566,5 @@ gk208_grctx = {
.dist_skip_table = gf117_grctx_generate_dist_skip_table,
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 0b3964e6b36e..acdf0932a99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -991,4 +991,5 @@ gm107_grctx = {
.r406500 = gm107_grctx_generate_r406500,
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r419e00 = gm107_grctx_generate_r419e00,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 8bff14ae16b0..f0368d9a0154 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -33,7 +33,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
{
u32 p = *addr;
- if (*addr > bios->image0_size && bios->imaged_addr) {
+ if (*addr >= bios->image0_size && bios->imaged_addr) {
*addr -= bios->image0_size;
*addr += bios->imaged_addr;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 4b571cc6bc70..6597def18627 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
return (void *)fw;
}
+static void
+shadow_fw_release(void *fw)
+{
+ release_firmware(fw);
+}
+
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
- .fini = (void(*)(void *))release_firmware,
+ .fini = shadow_fw_release,
.read = shadow_fw_read,
.rw = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index be91cffc3b52..315000b2f8e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -32,7 +32,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
type = 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
- type |= 0x00000004; /* HUB_ONLY */
+ type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
mutex_lock(&subdev->mutex);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b30fcaa2d0f5..993d48fb8064 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1444,22 +1444,26 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
{
struct dsi_data *dsi = s->private;
unsigned long flags;
- struct dsi_irq_stats stats;
+ struct dsi_irq_stats *stats;
+
+ stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
spin_lock_irqsave(&dsi->irq_stats_lock, flags);
- stats = dsi->irq_stats;
+ *stats = dsi->irq_stats;
memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
dsi->irq_stats.last_reset = jiffies;
spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
seq_printf(s, "period %u ms\n",
- jiffies_to_msecs(jiffies - stats.last_reset));
+ jiffies_to_msecs(jiffies - stats->last_reset));
- seq_printf(s, "irqs %d\n", stats.irq_count);
+ seq_printf(s, "irqs %d\n", stats->irq_count);
#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+ seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]);
seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
@@ -1483,10 +1487,10 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
#define PIS(x) \
seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
- stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+ stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
seq_printf(s, "-- VC interrupts --\n");
PIS(CS);
@@ -1502,7 +1506,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, \
- stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+ stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
seq_printf(s, "-- CIO interrupts --\n");
PIS(ERRSYNCESC1);
@@ -1527,6 +1531,8 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
PIS(ULPSACTIVENOT_ALL1);
#undef PIS
+ kfree(stats);
+
return 0;
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index ac93dae2a9c8..7f1d0a9afafb 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1173,6 +1173,7 @@ static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
default:
break;
}
+ of_node_put(port);
}
}
@@ -1205,11 +1206,13 @@ static int dss_init_ports(struct dss_device *dss)
default:
break;
}
+ of_node_put(port);
}
return 0;
error:
+ of_node_put(port);
__dss_uninit_ports(dss, i);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2983c003698e..a4645b78f737 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -216,8 +216,8 @@ static int omap_display_id(struct omap_dss_device *output)
} else if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
- while (bridge->next)
- bridge = bridge->next;
+ while (drm_bridge_get_next_bridge(bridge))
+ bridge = drm_bridge_get_next_bridge(bridge);
node = bridge->of_node;
} else if (output->panel) {
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 6fe14111cd95..b626b543a992 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -125,7 +125,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
for (dssdev = output; dssdev; dssdev = dssdev->next)
omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
- for (bridge = output->bridge; bridge; bridge = bridge->next) {
+ for (bridge = output->bridge; bridge;
+ bridge = drm_bridge_get_next_bridge(bridge)) {
if (!bridge->timings)
continue;
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 5f72c922a04b..a0574dc03e16 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -350,9 +350,8 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel);
- vpanel->panel.dev = dev;
- vpanel->panel.funcs = &versatile_panel_drm_funcs;
+ drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&vpanel->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index dabf59e0f56f..98f184b81187 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -204,9 +204,8 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &feiyang_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd)) {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 3c58f63adbf7..24955bec1958 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -895,9 +895,8 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel);
- ili->panel.dev = dev;
- ili->panel.funcs = &ili9322_drm_funcs;
+ drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ili->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 3ad4a46c4e94..e8789e460a16 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -433,9 +433,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &ili9881c_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power)) {
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index df90b6607981..327fca97977e 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -487,9 +487,8 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
if (IS_ERR(innolux->backlight))
return PTR_ERR(innolux->backlight);
- drm_panel_init(&innolux->base);
- innolux->base.funcs = &innolux_panel_funcs;
- innolux->base.dev = dev;
+ drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
err = drm_panel_add(&innolux->base);
if (err < 0)
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index ff3e89e61e3f..56364a93f0b8 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -437,9 +437,8 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return ret;
}
- drm_panel_init(&jdi->base);
- jdi->base.funcs = &jdi_panel_funcs;
- jdi->base.dev = &jdi->dsi->dev;
+ drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&jdi->base);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 1e7fecab72a9..2c576e7eee72 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -391,9 +391,8 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
if (IS_ERR(kingdisplay->backlight))
return PTR_ERR(kingdisplay->backlight);
- drm_panel_init(&kingdisplay->base);
- kingdisplay->base.funcs = &kingdisplay_panel_funcs;
- kingdisplay->base.dev = &kingdisplay->link->dev;
+ drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
+ &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&kingdisplay->base);
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index ee4379729a5b..7a1385e834f0 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -196,9 +196,8 @@ static int lb035q02_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &lb035q02_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 41bf02d122a1..db4865a4c2b9 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -259,9 +259,8 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &lg4573_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index bf5fcc3e5379..2405f26e5d31 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -254,9 +254,8 @@ static int panel_lvds_probe(struct platform_device *pdev)
*/
/* Register the panel. */
- drm_panel_init(&lvds->panel);
- lvds->panel.dev = lvds->dev;
- lvds->panel.funcs = &panel_lvds_funcs;
+ drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
ret = drm_panel_add(&lvds->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 20f17e46e65d..fd593532ab23 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -205,9 +205,8 @@ static int nl8048_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &nl8048_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 2ad1063b068d..60ccedce530c 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -292,9 +292,8 @@ static int nt39016_probe(struct spi_device *spi)
return err;
}
- drm_panel_init(&panel->drm_panel);
- panel->drm_panel.dev = dev;
- panel->drm_panel.funcs = &nt39016_funcs;
+ drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
err = drm_panel_add(&panel->drm_panel);
if (err < 0) {
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 2bae1db3ff34..f2a72ee6ee07 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -288,9 +288,8 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
if (IS_ERR(lcd->backlight))
return PTR_ERR(lcd->backlight);
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = dev;
- lcd->panel.funcs = &lcd_olinuxino_funcs;
+ drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index c7b48df8869a..938826f32665 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -455,12 +455,11 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &otm8009a_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
- dsi->host->dev, ctx,
+ dev, ctx,
&otm8009a_backlight_ops,
NULL);
if (IS_ERR(ctx->bl_dev)) {
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index e0e20ecff916..2b40913899d8 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -166,9 +166,8 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
if (IS_ERR(osd101t2587->backlight))
return PTR_ERR(osd101t2587->backlight);
- drm_panel_init(&osd101t2587->base);
- osd101t2587->base.funcs = &osd101t2587_panel_funcs;
- osd101t2587->base.dev = &osd101t2587->dsi->dev;
+ drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
+ &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&osd101t2587->base);
}
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 3dff0b3f73c2..664605071d34 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -223,9 +223,8 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
return -EPROBE_DEFER;
}
- drm_panel_init(&wuxga_nt->base);
- wuxga_nt->base.funcs = &wuxga_nt_panel_funcs;
- wuxga_nt->base.dev = &wuxga_nt->dsi->dev;
+ drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
+ &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&wuxga_nt->base);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index a621dd28ff70..2ccb74debc8a 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -433,9 +433,8 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
return PTR_ERR(ts->dsi);
}
- drm_panel_init(&ts->base);
- ts->base.dev = dev;
- ts->base.funcs = &rpi_touchscreen_funcs;
+ drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 6a5d37006103..fd67fc6185c4 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -606,9 +606,8 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- drm_panel_init(&panel->panel);
- panel->panel.funcs = &rad_panel_funcs;
- panel->panel.dev = dev;
+ drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
ret = drm_panel_add(&panel->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index ba889625ad43..994e855721f4 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -404,9 +404,8 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &rm68200_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index b9109922397f..31234b79d3b1 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -343,9 +343,8 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &jh057n_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 3c15764f0c03..170a5cda21b9 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -173,9 +173,8 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &rb070d30_panel_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3be902dcedc0..250809ba37c7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -351,9 +351,8 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &ld9040_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index f75bef24e050..e3a0397e953e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -215,9 +215,8 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel);
- s6->panel.dev = dev;
- s6->panel.funcs = &s6d16d0_drm_funcs;
+ drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&s6->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index b923de23ed65..938ab72c5540 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -732,9 +732,8 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e3ha2_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index cd90fa700c49..a60635e9226d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -466,9 +466,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63j0x03_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
&s6e63j0x03_bl_ops, NULL);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 142d395ea512..ba01af0b14fd 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -473,9 +473,8 @@ static int s6e63m0_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63m0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = s6e63m0_backlight_register(ctx);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 81858267723a..dbced6501204 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1017,9 +1017,8 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e8aa0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 18b22b1294fb..b3619ba443bd 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -274,9 +274,8 @@ static int seiko_panel_probe(struct device *dev,
return -EPROBE_DEFER;
}
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &seiko_panel_funcs;
+ drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
err = drm_panel_add(&panel->base);
if (err < 0)
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index e910b4ad1310..5e136c3ba185 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -329,9 +329,8 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->backlight))
return PTR_ERR(sharp->backlight);
- drm_panel_init(&sharp->base);
- sharp->base.funcs = &sharp_panel_funcs;
- sharp->base.dev = &sharp->link1->dev;
+ drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&sharp->base);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index 46cd9a250129..eeab7998c7de 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -185,9 +185,8 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return PTR_ERR(lcd->ud_gpio);
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &pdev->dev;
- lcd->panel.funcs = &ls037v7dw01_funcs;
+ drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index c39abde9f9f1..b963ba4ab589 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -264,9 +264,8 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
if (IS_ERR(sharp_nt->backlight))
return PTR_ERR(sharp_nt->backlight);
- drm_panel_init(&sharp_nt->base);
- sharp_nt->base.funcs = &sharp_nt_panel_funcs;
- sharp_nt->base.dev = &sharp_nt->dsi->dev;
+ drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev,
+ &sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&sharp_nt->base);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 312a3c4e2331..7efd7f76c6f5 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -94,6 +94,7 @@ struct panel_desc {
u32 bus_format;
u32 bus_flags;
+ int connector_type;
};
struct panel_simple {
@@ -464,9 +465,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &panel_simple_funcs;
+ drm_panel_init(&panel->base, dev, &panel_simple_funcs,
+ desc->connector_type);
err = drm_panel_add(&panel->base);
if (err < 0)
@@ -531,8 +531,8 @@ static const struct panel_desc ampire_am_480272h3tmqw_t01h = {
.num_modes = 1,
.bpc = 8,
.size = {
- .width = 105,
- .height = 67,
+ .width = 99,
+ .height = 58,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
@@ -833,6 +833,7 @@ static const struct panel_desc auo_g133han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g185han01_timings = {
@@ -862,6 +863,7 @@ static const struct panel_desc auo_g185han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_p320hvn03_timings = {
@@ -890,6 +892,7 @@ static const struct panel_desc auo_p320hvn03 = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -916,7 +919,9 @@ static const struct panel_desc auo_t215hvn01 = {
.delay = {
.disable = 5,
.unprepare = 1000,
- }
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode avic_tm070ddh03_mode = {
@@ -1205,6 +1210,7 @@ static const struct panel_desc dlc_dlc0700yzg_1 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing dlc_dlc1010gig_timing = {
@@ -1235,6 +1241,7 @@ static const struct panel_desc dlc_dlc1010gig = {
.unprepare = 60,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode edt_et035012dm6_mode = {
@@ -1501,6 +1508,7 @@ static const struct panel_desc hannstar_hsd070pww1 = {
.height = 94,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -1525,6 +1533,7 @@ static const struct panel_desc hannstar_hsd100pxn1 = {
.height = 152,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
@@ -1577,6 +1586,7 @@ static const struct panel_desc innolux_at043tn24 = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
@@ -1631,18 +1641,19 @@ static const struct panel_desc innolux_g070y2_l01 = {
.unprepare = 800,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 },
- .hfront_porch = { 41, 80, 100 },
- .hback_porch = { 40, 79, 99 },
- .hsync_len = { 1, 1, 1 },
+ .hfront_porch = { 30, 60, 70 },
+ .hback_porch = { 30, 60, 70 },
+ .hsync_len = { 22, 40, 60 },
.vactive = { 800, 800, 800 },
- .vfront_porch = { 5, 11, 14 },
- .vback_porch = { 4, 11, 14 },
- .vsync_len = { 1, 1, 1 },
+ .vfront_porch = { 3, 8, 14 },
+ .vback_porch = { 3, 8, 14 },
+ .vsync_len = { 4, 7, 12 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
@@ -1659,6 +1670,8 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g121i1_l01_timing = {
@@ -1686,6 +1699,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.disable = 20,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_g121x1_l03_mode = {
@@ -1869,6 +1883,7 @@ static const struct panel_desc koe_tx31d200vm0baa = {
.height = 109,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing kyo_tcg121xglp_timing = {
@@ -1893,6 +1908,7 @@ static const struct panel_desc kyo_tcg121xglp = {
.height = 184,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lemaker_bl035_rgb_002_mode = {
@@ -1941,6 +1957,7 @@ static const struct panel_desc lg_lb070wv8 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
@@ -2097,6 +2114,7 @@ static const struct panel_desc mitsubishi_aa070mc01 = {
.disable = 400,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
@@ -2125,6 +2143,7 @@ static const struct panel_desc nec_nl12880bc20_05 = {
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
@@ -2227,6 +2246,7 @@ static const struct panel_desc nlt_nl192108ac18_02d = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nvd_9128_mode = {
@@ -2250,6 +2270,7 @@ static const struct panel_desc nvd_9128 = {
.height = 88,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
@@ -2662,6 +2683,7 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.height = 136,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing sharp_lq123p1jx31_timing = {
@@ -2841,6 +2863,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
.height = 95,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
@@ -2865,6 +2888,7 @@ static const struct panel_desc tianma_tm070rvhg71 = {
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
@@ -2947,6 +2971,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -3017,6 +3042,7 @@ static const struct panel_desc urt_umsh_8596md_lvds = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct panel_desc urt_umsh_8596md_parallel = {
@@ -3509,6 +3535,34 @@ static const struct panel_desc_dsi auo_b080uan01 = {
.lanes = 4,
};
+static const struct drm_display_mode auo_b101uan01_mode = {
+ .clock = 154500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 185,
+ .hsync_end = 1920 + 185,
+ .htotal = 1920 + 185 + 925,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 3,
+ .vsync_end = 1200 + 3 + 5,
+ .vtotal = 1200 + 3 + 5 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi auo_b101uan01 = {
+ .desc = {
+ .modes = &auo_b101uan01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 272,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct drm_display_mode boe_tv080wum_nl0_mode = {
.clock = 160000,
.hdisplay = 1200,
@@ -3688,6 +3742,9 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "auo,b080uan01",
.data = &auo_b080uan01
}, {
+ .compatible = "auo,b101uan01",
+ .data = &auo_b101uan01
+ }, {
.compatible = "boe,tv080wum-nl0",
.data = &boe_tv080wum_nl0
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 09c5d9a6f9fa..1d2fd6cc6674 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -369,7 +369,8 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(st7701->backlight))
return PTR_ERR(st7701->backlight);
- drm_panel_init(&st7701->panel);
+ drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
@@ -381,8 +382,6 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
* ts8550b and there is no valid documentation for that.
*/
st7701->sleep_delay = 120 + desc->panel_sleep_delay;
- st7701->panel.funcs = &st7701_funcs;
- st7701->panel.dev = &dsi->dev;
ret = drm_panel_add(&st7701->panel);
if (ret < 0)
@@ -392,7 +391,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
st7701->dsi = dsi;
st7701->desc = desc;
- return mipi_dsi_attach(dsi);
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ goto err_attach;
+
+ return 0;
+
+err_attach:
+ drm_panel_remove(&st7701->panel);
+ return ret;
}
static int st7701_dsi_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 3b2612ae931e..108a85bb6667 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -381,9 +381,8 @@ static int st7789v_probe(struct spi_device *spi)
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &st7789v_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ctx->power = devm_regulator_get(&spi->dev, "power");
if (IS_ERR(ctx->power))
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 3d5b9c4f68d9..d6387d8f88a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -648,9 +648,8 @@ static int acx565akm_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &acx565akm_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_add(&lcd->panel);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index f2baff827f50..c44d6a65c0aa 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -347,9 +347,8 @@ static int td028ttec1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &td028ttec1_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index ba163c779084..621b65feec07 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -458,9 +458,8 @@ static int td043mtea1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &td043mtea1_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_add(&lcd->panel);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 71591e5f5938..1a5418ae2ccf 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -457,9 +457,8 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel);
- tpg->panel.dev = dev;
- tpg->panel.funcs = &tpg110_drm_funcs;
+ drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
spi_set_drvdata(spi, tpg);
return drm_panel_add(&tpg->panel);
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 77e1311b7c69..0feea2456e14 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -518,9 +518,8 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
/* dual port */
gpiod_set_value(ctx->mode_gpio, 0);
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &truly_nt35597_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &truly_nt35597_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
index 86cdc0ce79e6..77f4d32e5204 100644
--- a/drivers/gpu/drm/panfrost/Kconfig
+++ b/drivers/gpu/drm/panfrost/Kconfig
@@ -3,7 +3,8 @@
config DRM_PANFROST
tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
depends on DRM
- depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
depends on MMU
select DRM_SCHED
select IOMMU_SUPPORT
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 8a014dc11571..b17f3022db5a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -233,7 +233,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
if (pm_runtime_active(pfdev->dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
- pm_runtime_put_sync_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
}
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
@@ -502,6 +502,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]);
+ pages[i] = NULL;
goto err_pages;
}
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 92ffed5c1d69..857fdf7b314e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2192,11 +2192,12 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
/*
* On DCE32 any encoder can drive any block so usually just use crtc id,
- * but Apple thinks different at least on iMac10,1, so there use linkb,
+ * but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb,
* otherwise the internal eDP panel will stay dark.
*/
if (ASIC_IS_DCE32(rdev)) {
- if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+ if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") ||
+ dmi_match(DMI_PRODUCT_NAME, "iMac11,2"))
enc_idx = (dig->linkb) ? 1 : 0;
else
enc_idx = radeon_crtc->crtc_id;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 1e62e7bbf1b1..5403f4c902b6 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5556,6 +5556,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
u8 frev, crev;
u8 *power_state_offset;
struct ci_ps *ps;
+ int ret;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
@@ -5585,11 +5586,15 @@ static int ci_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
- return -EINVAL;
+ if (!rdev->pm.power_state[i].clock_info) {
+ ret = -EINVAL;
+ goto err_free_ps;
+ }
ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
- if (ps == NULL)
- return -ENOMEM;
+ if (ps == NULL) {
+ ret = -ENOMEM;
+ goto err_free_ps;
+ }
rdev->pm.dpm.ps[i].ps_priv = ps;
ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
@@ -5629,6 +5634,12 @@ static int ci_parse_power_table(struct radeon_device *rdev)
}
return 0;
+
+err_free_ps:
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++)
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ kfree(rdev->pm.dpm.ps);
+ return ret;
}
static int ci_get_vbios_boot_values(struct radeon_device *rdev,
@@ -5717,25 +5728,26 @@ int ci_dpm_init(struct radeon_device *rdev)
ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = r600_get_platform_caps(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = r600_parse_extended_power_table(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = ci_parse_power_table(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
+ r600_free_extended_power_table(rdev);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 897442754fd0..c338bb82a122 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9504,7 +9504,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -9546,12 +9545,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -9561,14 +9555,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
-
- tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -9586,15 +9574,23 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -9607,26 +9603,38 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ bridge_cfg &
+ PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ gpu_cfg &
+ PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -9640,15 +9648,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 32ed60f1048b..b31d65a6752f 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -559,8 +559,12 @@ static int cypress_populate_mclk_value(struct radeon_device *rdev,
ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
u32 reference_clock = rdev->clock.mpll.reference_freq;
u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
- u32 clk_v = ss.percentage *
+ u32 clk_s, clk_v;
+
+ if (!decoded_ref)
+ return -EINVAL;
+ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+ clk_v = ss.percentage *
(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
mpll_ss1 &= ~CLKV_MASK;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d978a3d9c82..2156590c3d15 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4819,14 +4819,15 @@ restart_ih:
break;
case 44: /* hdmi */
afmt_idx = src_data;
- if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
if (afmt_idx > 5) {
DRM_ERROR("Unhandled interrupt: %d %d\n",
src_id, src_data);
break;
}
+
+ if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 410f626a39d4..1fa48853b54e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -826,7 +826,7 @@ int ni_init_microcode(struct radeon_device *rdev)
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
+ rdev->smc_fw->size, fw_name);
err = -EINVAL;
}
}
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index bd2e577c701f..cad7a73a551f 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2241,8 +2241,12 @@ static int ni_populate_mclk_value(struct radeon_device *rdev,
ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
u32 reference_clock = rdev->clock.mpll.reference_freq;
u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
- u32 clk_v = ss.percentage *
+ u32 clk_s, clk_v;
+
+ if (!decoded_ref)
+ return -EINVAL;
+ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+ clk_v = ss.percentage *
(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
mpll_ss1 &= ~CLKV_MASK;
@@ -2740,10 +2744,10 @@ static int ni_set_mc_special_registers(struct radeon_device *rdev,
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
- if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
break;
case MC_SEQ_RESERVE_M >> 2:
+ if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
temp_reg = RREG32(MC_PMG_CMD_MRS1);
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
@@ -2752,8 +2756,6 @@ static int ni_set_mc_special_registers(struct radeon_device *rdev,
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
j++;
- if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
break;
default:
break;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 110fb38004b1..9d2e6112f70a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2313,7 +2313,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
- size = track->arrays[i].esize * track->max_indx * 4;
+ size = track->arrays[i].esize * track->max_indx * 4UL;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
@@ -2332,7 +2332,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
break;
case 2:
for (i = 0; i < track->num_arrays; i++) {
- size = track->arrays[i].esize * (nverts - 1) * 4;
+ size = track->arrays[i].esize * (nverts - 1) * 4UL;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d6c28a5d77ab..19c9e86b2aaf 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1278,7 +1278,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 4;
- track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+ track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
@@ -1305,7 +1305,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- track->htile_offset = radeon_get_ib_value(p, idx) << 8;
+ track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
track->htile_bo = reloc->robj;
track->db_dirty = true;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 756a50e8aff2..8c8e13ec3cd6 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -227,6 +227,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
if (!found)
return false;
+ pci_dev_put(pdev);
rdev->bios = kmalloc(size, GFP_KERNEL);
if (!rdev->bios) {
@@ -612,13 +613,14 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
unsigned offset;
+ bool r = false;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
- return false;
+ goto out;
}
vfct = (UEFI_ACPI_VFCT *)hdr;
@@ -631,13 +633,13 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
offset += sizeof(VFCT_IMAGE_HEADER);
if (offset > tbl_size) {
DRM_ERROR("ACPI VFCT image header truncated\n");
- return false;
+ goto out;
}
offset += vhdr->ImageLength;
if (offset > tbl_size) {
DRM_ERROR("ACPI VFCT image truncated\n");
- return false;
+ goto out;
}
if (vhdr->ImageLength &&
@@ -649,15 +651,18 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
rdev->bios = kmemdup(&vbios->VbiosContent,
vhdr->ImageLength,
GFP_KERNEL);
+ if (rdev->bios)
+ r = true;
- if (!rdev->bios)
- return false;
- return true;
+ goto out;
}
}
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
- return false;
+
+out:
+ acpi_put_table(hdr);
+ return r;
}
#else
static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7b5460678382..ba64dad1d7c9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -271,7 +271,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i;
+ u64 size;
+ unsigned i;
u32 ring = RADEON_CS_RING_GFX;
s32 priority = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5d017f0aec66..0d0ae89a8568 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1022,6 +1022,7 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
+ kfree(rdev->mode_info.atom_context->iio);
}
kfree(rdev->mode_info.atom_context);
rdev->mode_info.atom_context = NULL;
@@ -1623,6 +1624,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
+ } else {
+ /* finish executing delayed work */
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 27b168936b2a..c7f50d9f7e37 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -682,11 +682,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
if (radeon_crtc == NULL)
return;
+ radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
+ if (!radeon_crtc->flip_queue) {
+ kfree(radeon_crtc);
+ return;
+ }
+
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
- radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
rdev->mode_info.crtcs[index] = radeon_crtc;
if (rdev->family >= CHIP_BONAIRE) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b2b076606f54..e164b3c7a234 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -384,7 +384,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
- struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@@ -397,13 +396,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
- robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_put_unlocked(gobj);
up_read(&rdev->exclusive_lock);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e0ad547786e8..ef20c1f9b895 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -1206,13 +1206,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &vm->page_directory);
- if (r)
+ if (r) {
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
-
+ }
r = radeon_vm_clear_bo(rdev, vm->page_directory);
if (r) {
radeon_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
+ kfree(vm->page_tables);
+ vm->page_tables = NULL;
return r;
}
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
index 327d65a76e1f..79b2de65e905 100644
--- a/drivers/gpu/drm/radeon/rv740_dpm.c
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -250,8 +250,12 @@ int rv740_populate_mclk_value(struct radeon_device *rdev,
ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
u32 reference_clock = rdev->clock.mpll.reference_freq;
u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
- u32 clk_v = 0x40000 * ss.percentage *
+ u32 clk_s, clk_v;
+
+ if (!decoded_ref)
+ return -EINVAL;
+ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+ clk_v = 0x40000 * ss.percentage *
(dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000);
mpll_ss1 &= ~CLKV_MASK;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 1d8efb0eefdb..ae74e8b01ee0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3257,7 +3257,7 @@ static void si_gpu_init(struct radeon_device *rdev)
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
- }
+ }
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
@@ -3616,6 +3616,10 @@ static int si_cp_start(struct radeon_device *rdev)
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
r = radeon_ring_lock(rdev, ring, 2);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
/* clear the compute context state */
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
@@ -7087,7 +7091,6 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -7129,12 +7132,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -7144,14 +7142,8 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
-
- tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -7169,15 +7161,23 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -7190,26 +7190,38 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ bridge_cfg &
+ PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ gpu_cfg &
+ PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -7223,15 +7235,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index b95d5d390caf..45d04996adf5 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
+ if (!rdev->pm.power_state[i].clock_info) {
+ kfree(rdev->pm.dpm.ps);
return -EINVAL;
+ }
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 65302f9d025e..fbb93d0feb71 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1771,8 +1771,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
+ if (!rdev->pm.power_state[i].clock_info) {
+ kfree(rdev->pm.dpm.ps);
return -EINVAL;
+ }
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ce98c08aa8b4..48281e29b5d4 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -401,7 +401,15 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
- return component_add(dev, &rockchip_dp_component_ops);
+ ret = component_add(dev, &rockchip_dp_component_ops);
+ if (ret)
+ goto err_dp_remove;
+
+ return 0;
+
+err_dp_remove:
+ analogix_dp_remove(dp->adp);
+ return ret;
}
static int rockchip_dp_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 8f299d76b69b..df2656471e31 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -275,8 +275,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
@@ -562,7 +563,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
- memcpy(&dp->mode, adjusted, sizeof(*mode));
+ drm_mode_copy(&dp->mode, adjusted);
}
static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
@@ -1146,6 +1147,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
struct cdn_dp_device *dp;
struct extcon_dev *extcon;
struct phy *phy;
+ int ret;
int i;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
@@ -1186,9 +1188,19 @@ static int cdn_dp_probe(struct platform_device *pdev)
mutex_init(&dp->lock);
dev_set_drvdata(dev, dp);
- cdn_dp_audio_codec_init(dp, dev);
+ ret = cdn_dp_audio_codec_init(dp, dev);
+ if (ret)
+ return ret;
+
+ ret = component_add(dev, &cdn_dp_component_ops);
+ if (ret)
+ goto err_audio_deinit;
- return component_add(dev, &cdn_dp_component_ops);
+ return 0;
+
+err_audio_deinit:
+ platform_device_unregister(dp->audio_pdev);
+ return ret;
}
static int cdn_dp_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index f7191ae2266f..2dfaa9b73c28 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -481,8 +481,8 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
unsigned long best_freq = 0;
unsigned long fvco_min, fvco_max, fin, fout;
unsigned int min_prediv, max_prediv;
- unsigned int _prediv, uninitialized_var(best_prediv);
- unsigned long _fbdiv, uninitialized_var(best_fbdiv);
+ unsigned int _prediv, best_prediv;
+ unsigned long _fbdiv, best_fbdiv;
unsigned long min_delta = ULONG_MAX;
dsi->format = format;
@@ -1123,5 +1123,11 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = {
.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
.pm = &dw_mipi_dsi_rockchip_pm_ops,
.name = "dw-mipi-dsi-rockchip",
+ /*
+ * For dual-DSI display, one DSI pokes at the other DSI's
+ * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not
+ * safe for asynchronous probe.
+ */
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index ed344a795b4d..95888e64bd8b 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -401,7 +401,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
- value = mode->hsync_start - mode->hdisplay;
+ value = mode->htotal - mode->hsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
@@ -416,7 +416,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
value = mode->vtotal - mode->vdisplay;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
- value = mode->vsync_start - mode->vdisplay;
+ value = mode->vtotal - mode->vsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
value = mode->vsync_end - mode->vsync_start;
@@ -487,7 +487,7 @@ static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
inno_hdmi_setup(hdmi, adj_mode);
/* Store the display mode for plugin/DPMS poweron events */
- memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
+ drm_mode_copy(&hdmi->previous_mode, adj_mode);
}
static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 85fc5f01f761..4a81c5c8a550 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -382,7 +382,7 @@ rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
/* Store the display mode for plugin/DPMS poweron events. */
- memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
+ drm_mode_copy(&hdmi->previous_mode, adj_mode);
}
static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 291e89b4045f..96cc794147b5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -249,9 +249,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
else
ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
- if (ret)
- drm_gem_vm_close(vma);
-
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 2e4e1933a43c..f2edb9421476 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -234,14 +234,22 @@ static inline void vop_cfg_done(struct vop *vop)
VOP_REG_SET(vop, common, cfg_done, 1);
}
-static bool has_rb_swapped(uint32_t format)
+static bool has_rb_swapped(uint32_t version, uint32_t format)
{
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_BGR888:
case DRM_FORMAT_BGR565:
return true;
+ /*
+ * full framework (IP version 3.x) only need rb swapped for RGB888 and
+ * little framework (IP version 2.x) only need rb swapped for BGR888,
+ * check for 3.x to also only rb swap BGR888 for unknown vop version
+ */
+ case DRM_FORMAT_RGB888:
+ return VOP_MAJOR(version) == 3;
+ case DRM_FORMAT_BGR888:
+ return VOP_MAJOR(version) != 3;
default:
return false;
}
@@ -654,13 +662,13 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
if (crtc->state->self_refresh_active)
rockchip_drm_set_win_enabled(crtc, false);
+ if (crtc->state->self_refresh_active)
+ goto out;
+
mutex_lock(&vop->vop_lock);
drm_crtc_vblank_off(crtc);
- if (crtc->state->self_refresh_active)
- goto out;
-
/*
* Vop standby will take effect at end of current frame,
* if dsp hold valid irq happen, it means standby complete.
@@ -692,9 +700,9 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
-out:
mutex_unlock(&vop->vop_lock);
+out:
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
@@ -886,7 +894,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, dsp_info, dsp_info);
VOP_WIN_SET(vop, win, dsp_st, dsp_st);
- rb_swap = has_rb_swapped(fb->format->format);
+ rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
/*
@@ -1288,7 +1296,11 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *rockchip_state;
- rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state),
+ sizeof(*rockchip_state), GFP_KERNEL);
if (!rockchip_state)
return NULL;
@@ -1313,7 +1325,10 @@ static void vop_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
vop_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
+ if (crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
#ifdef CONFIG_DRM_ANALOGIX_DP
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 64aefa856896..aa8212c721b1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -366,7 +366,6 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_put_port;
} else if (ret) {
DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
- ret = -EPROBE_DEFER;
goto err_put_port;
}
if (lvds->panel)
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index e55870190bf5..d0f1384d0fba 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -287,7 +287,7 @@ static void sti_dvo_set_mode(struct drm_bridge *bridge,
DRM_DEBUG_DRIVER("\n");
- memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode));
+ drm_mode_copy(&dvo->mode, mode);
/* According to the path used (main or aux), the dvo clocks should
* have a different parent clock. */
@@ -345,8 +345,9 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
#define CLK_TOLERANCE_HZ 50
-static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+sti_dvo_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 94e404f13234..b321e5525771 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -522,7 +522,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
DRM_DEBUG_DRIVER("\n");
- memcpy(&hda->mode, mode, sizeof(struct drm_display_mode));
+ drm_mode_copy(&hda->mode, mode);
if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
DRM_ERROR("Undefined mode\n");
@@ -600,8 +600,9 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
#define CLK_TOLERANCE_HZ 50
-static int sti_hda_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+sti_hda_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 9862c322f0c4..c5547fedebe3 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -933,7 +933,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge,
DRM_DEBUG_DRIVER("\n");
/* Copy the drm display mode in the connector local structure */
- memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode));
+ drm_mode_copy(&hdmi->mode, mode);
/* Update clock framerate according to the selected mode */
ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
@@ -996,8 +996,9 @@ fail:
#define CLK_TOLERANCE_HZ 50
-static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+sti_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
int target = mode->clock * 1000;
int target_min = target - CLK_TOLERANCE_HZ;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index eb3b2350687f..193c7f979bca 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -753,21 +753,19 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
static int sun4i_tcon_init_clocks(struct device *dev,
struct sun4i_tcon *tcon)
{
- tcon->clk = devm_clk_get(dev, "ahb");
+ tcon->clk = devm_clk_get_enabled(dev, "ahb");
if (IS_ERR(tcon->clk)) {
dev_err(dev, "Couldn't get the TCON bus clock\n");
return PTR_ERR(tcon->clk);
}
- clk_prepare_enable(tcon->clk);
if (tcon->quirks->has_channel_0) {
- tcon->sclk0 = devm_clk_get(dev, "tcon-ch0");
+ tcon->sclk0 = devm_clk_get_enabled(dev, "tcon-ch0");
if (IS_ERR(tcon->sclk0)) {
dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
return PTR_ERR(tcon->sclk0);
}
}
- clk_prepare_enable(tcon->sclk0);
if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -780,12 +778,6 @@ static int sun4i_tcon_init_clocks(struct device *dev,
return 0;
}
-static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
-{
- clk_disable_unprepare(tcon->sclk0);
- clk_disable_unprepare(tcon->clk);
-}
-
static int sun4i_tcon_init_irq(struct device *dev,
struct sun4i_tcon *tcon)
{
@@ -1202,14 +1194,14 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
ret = sun4i_tcon_init_regmap(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON regmap\n");
- goto err_free_clocks;
+ goto err_assert_reset;
}
if (tcon->quirks->has_channel_0) {
ret = sun4i_dclk_create(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't create our TCON dot clock\n");
- goto err_free_clocks;
+ goto err_assert_reset;
}
}
@@ -1272,8 +1264,6 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
err_free_dotclock:
if (tcon->quirks->has_channel_0)
sun4i_dclk_free(tcon);
-err_free_clocks:
- sun4i_tcon_free_clocks(tcon);
err_assert_reset:
reset_control_assert(tcon->lcd_rst);
return ret;
@@ -1287,7 +1277,6 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
list_del(&tcon->list);
if (tcon->quirks->has_channel_0)
sun4i_dclk_free(tcon);
- sun4i_tcon_free_clocks(tcon);
}
static const struct component_ops sun4i_tcon_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index f2b288037b90..a18efd305519 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -365,8 +365,7 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
- u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+ u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
if (delay > mode->vtotal)
delay = delay % mode->vtotal;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c410221824c1..923899b95c88 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2458,8 +2458,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
usleep_range(2000, 4000);
err = reset_control_assert(dc->rst);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(dc->clk);
return err;
+ }
usleep_range(2000, 4000);
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index a0f6f9b0d258..a84d19087d09 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -447,10 +447,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->regs);
dpaux->irq = platform_get_irq(pdev, 0);
- if (dpaux->irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
- return -ENXIO;
- }
+ if (dpaux->irq < 0)
+ return dpaux->irq;
if (!pdev->dev.pm_domain) {
dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 13413d2b2602..03ccfb6ce8a8 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1451,9 +1451,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
if (np) {
struct platform_device *gangster = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!gangster)
+ return -EPROBE_DEFER;
dsi->slave = platform_get_drvdata(gangster);
- of_node_put(np);
if (!dsi->slave) {
put_device(&gangster->dev);
@@ -1501,48 +1503,58 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (!pdev->dev.pm_domain) {
dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
- if (IS_ERR(dsi->rst))
- return PTR_ERR(dsi->rst);
+ if (IS_ERR(dsi->rst)) {
+ err = PTR_ERR(dsi->rst);
+ goto remove;
+ }
}
dsi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dsi->clk)) {
- dev_err(&pdev->dev, "cannot get DSI clock\n");
- return PTR_ERR(dsi->clk);
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
+ "cannot get DSI clock\n");
+ goto remove;
}
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
if (IS_ERR(dsi->clk_lp)) {
- dev_err(&pdev->dev, "cannot get low-power clock\n");
- return PTR_ERR(dsi->clk_lp);
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
+ "cannot get low-power clock\n");
+ goto remove;
}
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dsi->clk_parent)) {
- dev_err(&pdev->dev, "cannot get parent clock\n");
- return PTR_ERR(dsi->clk_parent);
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
+ "cannot get parent clock\n");
+ goto remove;
}
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
if (IS_ERR(dsi->vdd)) {
- dev_err(&pdev->dev, "cannot get VDD supply\n");
- return PTR_ERR(dsi->vdd);
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
+ "cannot get VDD supply\n");
+ goto remove;
}
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- return err;
+ goto remove;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs))
- return PTR_ERR(dsi->regs);
+ if (IS_ERR(dsi->regs)) {
+ err = PTR_ERR(dsi->regs);
+ goto remove;
+ }
- dsi->mipi = tegra_mipi_request(&pdev->dev);
- if (IS_ERR(dsi->mipi))
- return PTR_ERR(dsi->mipi);
+ dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
+ if (IS_ERR(dsi->mipi)) {
+ err = PTR_ERR(dsi->mipi);
+ goto remove;
+ }
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -1570,9 +1582,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
return 0;
unregister:
+ pm_runtime_disable(&pdev->dev);
mipi_dsi_host_unregister(&dsi->host);
mipi_free:
tegra_mipi_free(dsi->mipi);
+remove:
+ tegra_output_remove(&dsi->output);
return err;
}
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e34325c83d28..361e67e4cd98 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -155,6 +155,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
if (gem->size < size) {
err = -EINVAL;
+ drm_gem_object_put(gem);
goto unreference;
}
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index bdcaa4c7168c..806249c0b7e8 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -127,8 +127,10 @@ int tegra_output_probe(struct tegra_output *output)
GPIOD_IN,
"HDMI hotplug detect");
if (IS_ERR(output->hpd_gpio)) {
- if (PTR_ERR(output->hpd_gpio) != -ENOENT)
- return PTR_ERR(output->hpd_gpio);
+ if (PTR_ERR(output->hpd_gpio) != -ENOENT) {
+ err = PTR_ERR(output->hpd_gpio);
+ goto put_i2c;
+ }
output->hpd_gpio = NULL;
}
@@ -137,7 +139,7 @@ int tegra_output_probe(struct tegra_output *output)
err = gpiod_to_irq(output->hpd_gpio);
if (err < 0) {
dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
- return err;
+ goto put_i2c;
}
output->hpd_irq = err;
@@ -150,7 +152,7 @@ int tegra_output_probe(struct tegra_output *output)
if (err < 0) {
dev_err(output->dev, "failed to request IRQ#%u: %d\n",
output->hpd_irq, err);
- return err;
+ goto put_i2c;
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
@@ -168,6 +170,12 @@ int tegra_output_probe(struct tegra_output *output)
return -ENOMEM;
return 0;
+
+put_i2c:
+ if (output->ddc)
+ i2c_put_adapter(output->ddc);
+
+ return err;
}
void tegra_output_remove(struct tegra_output *output)
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 0419b6105c8a..ccd084abc8c9 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -906,7 +906,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
struct drm_dp_link *link)
{
const u64 f = 100000, link_rate = link->rate * 1000;
- const u64 pclk = mode->clock * 1000;
+ const u64 pclk = (u64)mode->clock * 1000;
u64 input, output, watermark, num;
struct tegra_sor_params params;
u32 num_syms_per_line;
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 8a27a6acee61..9ea2e7beef0c 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -151,35 +151,45 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
}
drm_connector_list_iter_end(&conn_iter);
- if (connector && connector->display_info.num_bus_formats) {
- u32 bus_format = connector->display_info.bus_formats[0];
-
- switch (bus_format) {
- case MEDIA_BUS_FMT_RGB888_1X24:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
- DPI_FORMAT);
- break;
- case MEDIA_BUS_FMT_BGR888_1X24:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
- DPI_FORMAT);
- dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
- break;
- case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
- DPI_FORMAT);
- break;
- case MEDIA_BUS_FMT_RGB666_1X18:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
- DPI_FORMAT);
- break;
- case MEDIA_BUS_FMT_RGB565_1X16:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
- DPI_FORMAT);
- break;
- default:
- DRM_ERROR("Unknown media bus format %d\n", bus_format);
- break;
+ if (connector) {
+ if (connector->display_info.num_bus_formats) {
+ u32 bus_format = connector->display_info.bus_formats[0];
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+ DPI_FORMAT);
+ dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR,
+ DPI_ORDER);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_1,
+ DPI_FORMAT);
+ break;
+ default:
+ DRM_ERROR("Unknown media bus format %d\n",
+ bus_format);
+ break;
+ }
}
+
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ dpi_c |= DPI_PIXEL_CLK_INVERT;
+
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ dpi_c |= DPI_OUTPUT_ENABLE_INVERT;
} else {
/* Default to 24bit if no connector found. */
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 0d78ba017a29..36688a56c91c 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -392,7 +392,12 @@ static int __init vc4_drm_register(void)
if (ret)
return ret;
- return platform_driver_register(&vc4_platform_driver);
+ ret = platform_driver_register(&vc4_platform_driver);
+ if (ret)
+ platform_unregister_drivers(component_drivers,
+ ARRAY_SIZE(component_drivers));
+
+ return ret;
}
static void __exit vc4_drm_unregister(void)
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 0983949cc8c9..67bfbffdb65c 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -752,9 +752,9 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
- drm_bridge_disable(dsi->bridge);
+ drm_bridge_chain_disable(dsi->bridge);
vc4_dsi_ulps(dsi, true);
- drm_bridge_post_disable(dsi->bridge);
+ drm_bridge_chain_post_disable(dsi->bridge);
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
@@ -791,11 +791,9 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
/* Find what divider gets us a faster clock than the requested
* pixel clock.
*/
- for (divider = 1; divider < 8; divider++) {
- if (parent_rate / divider < pll_clock) {
- divider--;
+ for (divider = 1; divider < 255; divider++) {
+ if (parent_rate / (divider + 1) < pll_clock)
break;
- }
}
/* Now that we've picked a PLL divider, calculate back to its
@@ -1054,7 +1052,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_ulps(dsi, false);
- drm_bridge_pre_enable(dsi->bridge);
+ drm_bridge_chain_pre_enable(dsi->bridge);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
DSI_PORT_WRITE(DISP0_CTRL,
@@ -1071,7 +1069,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_DISP0_ENABLE);
}
- drm_bridge_enable(dsi->bridge);
+ drm_bridge_chain_enable(dsi->bridge);
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 363f456ea713..cdcd19698b3c 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -288,16 +288,16 @@ static int vc4_plane_margins_adj(struct drm_plane_state *pstate)
adjhdisplay,
crtc_state->mode.hdisplay);
vc4_pstate->crtc_x += left;
- if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - left)
- vc4_pstate->crtc_x = crtc_state->mode.hdisplay - left;
+ if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - right)
+ vc4_pstate->crtc_x = crtc_state->mode.hdisplay - right;
adjvdisplay = crtc_state->mode.vdisplay - (top + bottom);
vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y *
adjvdisplay,
crtc_state->mode.vdisplay);
vc4_pstate->crtc_y += top;
- if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - top)
- vc4_pstate->crtc_y = crtc_state->mode.vdisplay - top;
+ if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - bottom)
+ vc4_pstate->crtc_y = crtc_state->mode.vdisplay - bottom;
vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w *
adjhdisplay,
@@ -317,7 +317,6 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
- u32 subpixel_src_mask = (1 << 16) - 1;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -339,18 +338,15 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
for (i = 0; i < num_planes; i++)
vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
- /* We don't support subpixel source positioning for scaling. */
- if ((state->src.x1 & subpixel_src_mask) ||
- (state->src.x2 & subpixel_src_mask) ||
- (state->src.y1 & subpixel_src_mask) ||
- (state->src.y2 & subpixel_src_mask)) {
- return -EINVAL;
- }
-
- vc4_state->src_x = state->src.x1 >> 16;
- vc4_state->src_y = state->src.y1 >> 16;
- vc4_state->src_w[0] = (state->src.x2 - state->src.x1) >> 16;
- vc4_state->src_h[0] = (state->src.y2 - state->src.y1) >> 16;
+ /*
+ * We don't support subpixel source positioning for scaling,
+ * but fractional coordinates can be generated by clipping
+ * so just round for now
+ */
+ vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16);
+ vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16);
+ vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x;
+ vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y;
vc4_state->crtc_x = state->dst.x1;
vc4_state->crtc_y = state->dst.y1;
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 7402bc768664..0c764fd8399a 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -256,7 +256,7 @@ static void vc4_vec_ntsc_j_mode_set(struct vc4_vec *vec)
static const struct drm_display_mode ntsc_mode = {
DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
- 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0,
+ 480, 480 + 7, 480 + 7 + 6, 525, 0,
DRM_MODE_FLAG_INTERLACE)
};
@@ -278,7 +278,7 @@ static void vc4_vec_pal_m_mode_set(struct vc4_vec *vec)
static const struct drm_display_mode pal_mode = {
DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
- 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0,
+ 576, 576 + 4, 576 + 4 + 6, 625, 0,
DRM_MODE_FLAG_INTERLACE)
};
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 9268f6fc3f66..92895937aba9 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile)
{
idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
idr_destroy(&vfile->fence_idr);
+ mutex_destroy(&vfile->fence_mutex);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0a88ef11b9d3..5ae132e37277 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -327,10 +327,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
return ret;
}
- drm_gem_object_put_unlocked(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
+
+ /*
+ * The handle owns the reference now. But we must drop our
+ * remaining reference *after* we no longer need to dereference
+ * the obj. Otherwise userspace could guess the handle and
+ * race closing it from another thread.
+ */
+ drm_gem_object_put_unlocked(obj);
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e3d20048075b..8db3b3ddbb64 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1623,7 +1623,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
- ((unsigned long) header + header->size + sizeof(header));
+ ((unsigned long) header + header->size + sizeof(*header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(*cmd));
struct vmw_resource *ctx;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0b800c354049..2492d3a19f36 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -182,7 +182,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1) {
+ box->d != 1 || box_count != 1 ||
+ box->w > 64 || box->h > 64) {
/* TODO handle none page aligned offsets */
/* TODO handle more dst & src != 0 */
/* TODO handle more then one copy */
diff --git a/drivers/gpu/drm/xilinx/Kconfig b/drivers/gpu/drm/xilinx/Kconfig
new file mode 100644
index 000000000000..23e78d6d4a4b
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/Kconfig
@@ -0,0 +1,59 @@
+config DRM_XILINX
+ tristate "Xilinx DRM"
+ depends on DRM && HAVE_CLK
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DMADEVICES
+ select DRM_XILINX_DP_SUB
+ select XILINX_FRMBUF
+ help
+ DRM display driver for Xilinx IP based pipelines.
+
+config DRM_XILINX_DP
+ tristate "Xilinx DRM Display Port Driver"
+ depends on DRM_XILINX
+ help
+ DRM driver for Xilinx Display Port IP.
+
+config DRM_XILINX_DP_DEBUG_FS
+ bool "Xilinx DRM DP debugfs"
+ depends on DEBUG_FS && DRM_XILINX_DP
+ help
+ Enable the debugfs code for DPDMA driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_XILINX_DP_SUB
+ tristate "Xilinx DRM Display Port Subsystem Driver"
+ depends on DRM_XILINX
+ select DRM_XILINX_DP
+ help
+ DRM driver for Xilinx Display Port Subsystem.
+
+config DRM_XILINX_DP_SUB_DEBUG_FS
+ bool "Xilinx DRM DPSUB debugfs"
+ depends on DEBUG_FS && DRM_XILINX_DP_SUB
+ select DRM_XILINX_DP_DEBUG_FS
+ help
+ Enable the debugfs code for DP Sub driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_XILINX_MIPI_DSI
+ tristate "Xilinx DRM MIPI DSI Driver"
+ depends on DRM_XILINX
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ DRM driver for Xilinx MIPI DSI IP.
+
+config DRM_XILINX_SDI
+ tristate "Xilinx DRM SDI Subsystem Driver"
+ depends on DRM_XILINX
+ help
+ DRM driver for Xilinx SDI Tx Subsystem.
diff --git a/drivers/gpu/drm/xilinx/Makefile b/drivers/gpu/drm/xilinx/Makefile
new file mode 100644
index 000000000000..19bc1541ca17
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+xilinx_drm-y := xilinx_drm_crtc.o xilinx_drm_connector.o xilinx_drm_drv.o \
+ xilinx_drm_encoder.o xilinx_drm_fb.o xilinx_drm_gem.o \
+ xilinx_drm_plane.o
+xilinx_drm-y += xilinx_cresample.o xilinx_osd.o xilinx_rgb2yuv.o xilinx_vtc.o
+
+obj-$(CONFIG_DRM_XILINX) += xilinx_drm.o
+obj-$(CONFIG_DRM_XILINX_DP) += xilinx_drm_dp.o
+obj-$(CONFIG_DRM_XILINX_DP_SUB) += xilinx_drm_dp_sub.o
+obj-$(CONFIG_DRM_XILINX_MIPI_DSI) += xilinx_drm_dsi.o
+obj-$(CONFIG_DRM_XILINX_SDI) += xilinx_drm_sdi.o
diff --git a/drivers/gpu/drm/xilinx/xilinx_cresample.c b/drivers/gpu/drm/xilinx/xilinx_cresample.c
new file mode 100644
index 000000000000..6ddad66913ae
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_cresample.c
@@ -0,0 +1,154 @@
+/*
+ * Xilinx Chroma Resampler support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_cresample.h"
+
+/* registers */
+/* general control registers */
+#define CRESAMPLE_CONTROL 0x0000
+
+/* horizontal and vertical active frame size */
+#define CRESAMPLE_ACTIVE_SIZE 0x0020
+
+/* control register bit definition */
+#define CRESAMPLE_CTL_EN (1 << 0) /* enable */
+#define CRESAMPLE_CTL_RU (1 << 1) /* reg update */
+#define CRESAMPLE_CTL_RESET (1 << 31) /* instant reset */
+
+struct xilinx_cresample {
+ void __iomem *base;
+ const char *input_format_name;
+ const char *output_format_name;
+};
+
+/* enable cresample */
+void xilinx_cresample_enable(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg | CRESAMPLE_CTL_EN);
+}
+
+/* disable cresample */
+void xilinx_cresample_disable(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg & ~CRESAMPLE_CTL_EN);
+}
+
+/* configure cresample */
+void xilinx_cresample_configure(struct xilinx_cresample *cresample,
+ int hactive, int vactive)
+{
+ /* configure hsize and vsize */
+ xilinx_drm_writel(cresample->base, CRESAMPLE_ACTIVE_SIZE,
+ (vactive << 16) | hactive);
+}
+
+/* reset cresample */
+void xilinx_cresample_reset(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ CRESAMPLE_CTL_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg | CRESAMPLE_CTL_RU);
+}
+
+/* get an input format */
+const char *
+xilinx_cresample_get_input_format_name(struct xilinx_cresample *cresample)
+{
+ return cresample->input_format_name;
+}
+
+/* get an output format */
+const char *
+xilinx_cresample_get_output_format_name(struct xilinx_cresample *cresample)
+{
+ return cresample->output_format_name;
+}
+
+static const struct of_device_id xilinx_cresample_of_match[] = {
+ { .compatible = "xlnx,v-cresample-3.01.a" },
+ { /* end of table */ },
+};
+
+struct xilinx_cresample *xilinx_cresample_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_cresample *cresample;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_cresample_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ cresample = devm_kzalloc(dev, sizeof(*cresample), GFP_KERNEL);
+ if (!cresample)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ cresample->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(cresample->base))
+ return ERR_CAST(cresample->base);
+
+ ret = of_property_read_string(node, "xlnx,input-format",
+ &cresample->input_format_name);
+ if (ret) {
+ dev_warn(dev, "failed to get an input format prop\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_string(node, "xlnx,output-format",
+ &cresample->output_format_name);
+ if (ret) {
+ dev_warn(dev, "failed to get an output format prop\n");
+ return ERR_PTR(ret);
+ }
+
+ xilinx_cresample_reset(cresample);
+
+ return cresample;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_cresample.h b/drivers/gpu/drm/xilinx/xilinx_cresample.h
new file mode 100644
index 000000000000..34323c722881
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_cresample.h
@@ -0,0 +1,40 @@
+/*
+ * Xilinx Chroma Resampler Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_CRESAMPLE_H_
+#define _XILINX_CRESAMPLE_H_
+
+struct xilinx_cresample;
+
+void xilinx_cresample_configure(struct xilinx_cresample *cresample,
+ int hactive, int vactive);
+void xilinx_cresample_reset(struct xilinx_cresample *cresample);
+void xilinx_cresample_enable(struct xilinx_cresample *cresample);
+void xilinx_cresample_disable(struct xilinx_cresample *cresample);
+
+const char *
+xilinx_cresample_get_input_format_name(struct xilinx_cresample *cresample);
+const char *
+xilinx_cresample_get_output_format_name(struct xilinx_cresample *cresample);
+
+struct device;
+struct device_node;
+
+struct xilinx_cresample *xilinx_cresample_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_CRESAMPLE_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_connector.c b/drivers/gpu/drm/xilinx/xilinx_drm_connector.c
new file mode 100644
index 000000000000..b37bb50108da
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_connector.c
@@ -0,0 +1,204 @@
+/*
+ * Xilinx DRM connector driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/device.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_connector.h"
+
+struct xilinx_drm_connector {
+ struct drm_connector base;
+ struct drm_encoder *encoder;
+};
+
+struct xilinx_drm_connector_type {
+ const char *name;
+ const int type;
+};
+
+#define to_xilinx_connector(x) \
+ container_of(x, struct xilinx_drm_connector, base)
+
+/* get mode list */
+static int xilinx_drm_connector_get_modes(struct drm_connector *base_connector)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+ int count = 0;
+
+ if (encoder_sfuncs->get_modes)
+ count = encoder_sfuncs->get_modes(encoder, base_connector);
+
+ return count;
+}
+
+/* check if mode is valid */
+static int xilinx_drm_connector_mode_valid(struct drm_connector *base_connector,
+ struct drm_display_mode *mode)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+ int ret = MODE_OK;
+
+ if (encoder_sfuncs->mode_valid)
+ ret = encoder_sfuncs->mode_valid(encoder, mode);
+
+ return ret;
+}
+
+/* find best encoder: return stored encoder */
+static struct drm_encoder *
+xilinx_drm_connector_best_encoder(struct drm_connector *base_connector)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+
+ return connector->encoder;
+}
+
+static struct drm_connector_helper_funcs xilinx_drm_connector_helper_funcs = {
+ .get_modes = xilinx_drm_connector_get_modes,
+ .mode_valid = xilinx_drm_connector_mode_valid,
+ .best_encoder = xilinx_drm_connector_best_encoder,
+};
+
+static enum drm_connector_status
+xilinx_drm_connector_detect(struct drm_connector *base_connector, bool force)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ enum drm_connector_status status = connector_status_unknown;
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+
+ if (encoder_sfuncs->detect)
+ status = encoder_sfuncs->detect(encoder, base_connector);
+
+ /* some connector ignores the first hpd, so try again if forced */
+ if (force && (status != connector_status_connected))
+ status = encoder_sfuncs->detect(encoder, base_connector);
+
+ DRM_DEBUG_KMS("status: %d\n", status);
+
+ return status;
+}
+
+/* destroy connector */
+void xilinx_drm_connector_destroy(struct drm_connector *base_connector)
+{
+ drm_connector_unregister(base_connector);
+ drm_connector_cleanup(base_connector);
+}
+
+static const struct drm_connector_funcs xilinx_drm_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = xilinx_drm_connector_detect,
+ .destroy = xilinx_drm_connector_destroy,
+};
+
+static const struct xilinx_drm_connector_type connector_types[] = {
+ { "HDMIA", DRM_MODE_CONNECTOR_HDMIA },
+ { "DisplayPort", DRM_MODE_CONNECTOR_DisplayPort },
+};
+
+/* create connector */
+struct drm_connector *
+xilinx_drm_connector_create(struct drm_device *drm,
+ struct drm_encoder *base_encoder, int id)
+{
+ struct xilinx_drm_connector *connector;
+ const char *string;
+ int type = DRM_MODE_CONNECTOR_Unknown;
+ int i, ret;
+
+ connector = devm_kzalloc(drm->dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ connector->base.polled = DRM_CONNECTOR_POLL_HPD |
+ DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ ret = of_property_read_string_index(drm->dev->of_node,
+ "xlnx,connector-type", id, &string);
+ if (ret < 0) {
+ dev_err(drm->dev, "No connector type in DT\n");
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(connector_types); i++)
+ if (strcmp(connector_types[i].name, string) == 0) {
+ type = connector_types[i].type;
+ break;
+ }
+
+ if (type == DRM_MODE_CONNECTOR_Unknown) {
+ dev_err(drm->dev, "Unknown connector type in DT\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = drm_connector_init(drm, &connector->base,
+ &xilinx_drm_connector_funcs, type);
+ if (ret) {
+ DRM_ERROR("failed to initialize connector\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_connector_helper_add(&connector->base,
+ &xilinx_drm_connector_helper_funcs);
+
+ /* add entry for connector */
+ ret = drm_connector_register(&connector->base);
+ if (ret) {
+ DRM_ERROR("failed to register a connector\n");
+ goto err_register;
+ }
+
+ /* connect connector and encoder */
+ ret = drm_connector_attach_encoder(&connector->base, base_encoder);
+ if (ret) {
+ DRM_ERROR("failed to attach connector to encoder\n");
+ goto err_attach;
+ }
+ connector->encoder = base_encoder;
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+
+ return &connector->base;
+
+err_attach:
+ drm_connector_unregister(&connector->base);
+err_register:
+ drm_connector_cleanup(&connector->base);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_connector.h b/drivers/gpu/drm/xilinx/xilinx_drm_connector.h
new file mode 100644
index 000000000000..750bfd8d1e86
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_connector.h
@@ -0,0 +1,29 @@
+/*
+ * Xilinx DRM connector header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_CONNECTOR_H_
+#define _XILINX_DRM_CONNECTOR_H_
+
+struct drm_device;
+struct drm_connector;
+
+struct drm_connector *
+xilinx_drm_connector_create(struct drm_device *drm,
+ struct drm_encoder *base_encoder, int id);
+void xilinx_drm_connector_destroy(struct drm_connector *base_connector);
+
+#endif /* _XILINX_DRM_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c
new file mode 100644
index 000000000000..66513b13b045
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c
@@ -0,0 +1,595 @@
+/*
+ * Xilinx DRM crtc driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+
+#include <video/videomode.h>
+
+#include "xilinx_drm_crtc.h"
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_plane.h"
+
+#include "xilinx_cresample.h"
+#include "xilinx_rgb2yuv.h"
+#include "xilinx_vtc.h"
+#include "xilinx_drm_sdi.h"
+
+struct xilinx_drm_crtc {
+ struct drm_crtc base;
+ struct xilinx_cresample *cresample;
+ struct xilinx_rgb2yuv *rgb2yuv;
+ struct clk *pixel_clock;
+ bool pixel_clock_enabled;
+ struct xilinx_vtc *vtc;
+ struct xilinx_drm_plane_manager *plane_manager;
+ int dpms;
+ unsigned int alpha;
+ struct drm_pending_vblank_event *event;
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct xilinx_sdi *sdi;
+};
+
+#define to_xilinx_crtc(x) container_of(x, struct xilinx_drm_crtc, base)
+
+/* set crtc dpms */
+static void xilinx_drm_crtc_dpms(struct drm_crtc *base_crtc, int dpms)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ int ret;
+
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", crtc->dpms, dpms);
+
+ if (crtc->dpms == dpms)
+ return;
+
+ crtc->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (!crtc->pixel_clock_enabled) {
+ ret = clk_prepare_enable(crtc->pixel_clock);
+ if (ret)
+ DRM_ERROR("failed to enable a pixel clock\n");
+ else
+ crtc->pixel_clock_enabled = true;
+ }
+
+ xilinx_drm_plane_manager_dpms(crtc->plane_manager, dpms);
+ xilinx_drm_plane_dpms(base_crtc->primary, dpms);
+ if (crtc->rgb2yuv)
+ xilinx_rgb2yuv_enable(crtc->rgb2yuv);
+ if (crtc->cresample)
+ xilinx_cresample_enable(crtc->cresample);
+ if (crtc->vtc)
+ xilinx_vtc_enable(crtc->vtc);
+ break;
+ default:
+ if (crtc->vtc) {
+ xilinx_vtc_disable(crtc->vtc);
+ xilinx_vtc_reset(crtc->vtc);
+ }
+ if (crtc->cresample) {
+ xilinx_cresample_disable(crtc->cresample);
+ xilinx_cresample_reset(crtc->cresample);
+ }
+ if (crtc->rgb2yuv) {
+ xilinx_rgb2yuv_disable(crtc->rgb2yuv);
+ xilinx_rgb2yuv_reset(crtc->rgb2yuv);
+ }
+ xilinx_drm_plane_dpms(base_crtc->primary, dpms);
+ xilinx_drm_plane_manager_dpms(crtc->plane_manager, dpms);
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+ break;
+ }
+}
+
+/* prepare crtc */
+static void xilinx_drm_crtc_prepare(struct drm_crtc *base_crtc)
+{
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+}
+
+/* apply mode to crtc pipe */
+static void xilinx_drm_crtc_commit(struct drm_crtc *base_crtc)
+{
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_ON);
+ xilinx_drm_plane_commit(base_crtc->primary);
+}
+
+/* fix mode */
+static bool xilinx_drm_crtc_mode_fixup(struct drm_crtc *base_crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* no op */
+ return true;
+}
+
+/* set new mode in crtc pipe */
+static int xilinx_drm_crtc_mode_set(struct drm_crtc *base_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct videomode vm;
+ long diff;
+ int ret;
+
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+
+ /* set pixel clock */
+ ret = clk_set_rate(crtc->pixel_clock, adjusted_mode->clock * 1000);
+ if (ret) {
+ DRM_ERROR("failed to set a pixel clock\n");
+ return ret;
+ }
+
+ diff = clk_get_rate(crtc->pixel_clock) - adjusted_mode->clock * 1000;
+ if (abs(diff) > (adjusted_mode->clock * 1000) / 20)
+ DRM_DEBUG_KMS("actual pixel clock rate(%d) is off by %ld\n",
+ adjusted_mode->clock, diff);
+
+ if (crtc->vtc) {
+ /* set video timing */
+ vm.hactive = adjusted_mode->hdisplay;
+ vm.hfront_porch = adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay;
+ vm.hback_porch = adjusted_mode->htotal -
+ adjusted_mode->hsync_end;
+ vm.hsync_len = adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ xilinx_vtc_config_sig(crtc->vtc, &vm);
+ }
+
+ /* configure cresample and rgb2yuv */
+ if (crtc->cresample)
+ xilinx_cresample_configure(crtc->cresample,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ if (crtc->rgb2yuv)
+ xilinx_rgb2yuv_configure(crtc->rgb2yuv,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+
+ /* configure a plane: vdma and osd layer */
+ xilinx_drm_plane_manager_mode_set(crtc->plane_manager,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ ret = xilinx_drm_plane_mode_set(base_crtc->primary,
+ base_crtc->primary->fb, 0, 0,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay,
+ x, y,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int _xilinx_drm_crtc_mode_set_base(struct drm_crtc *base_crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ int ret;
+
+ /* configure a plane */
+ xilinx_drm_plane_manager_mode_set(crtc->plane_manager,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay);
+ ret = xilinx_drm_plane_mode_set(base_crtc->primary,
+ fb, 0, 0,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay,
+ x, y,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ /* apply the new fb addr */
+ xilinx_drm_crtc_commit(base_crtc);
+
+ return 0;
+}
+
+/* update address and information from fb */
+static int xilinx_drm_crtc_mode_set_base(struct drm_crtc *base_crtc,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ /* configure a plane */
+ return _xilinx_drm_crtc_mode_set_base(base_crtc, base_crtc->primary->fb,
+ x, y);
+}
+
+static struct drm_crtc_helper_funcs xilinx_drm_crtc_helper_funcs = {
+ .dpms = xilinx_drm_crtc_dpms,
+ .prepare = xilinx_drm_crtc_prepare,
+ .commit = xilinx_drm_crtc_commit,
+ .mode_fixup = xilinx_drm_crtc_mode_fixup,
+ .mode_set = xilinx_drm_crtc_mode_set,
+ .mode_set_base = xilinx_drm_crtc_mode_set_base,
+};
+
+/* destroy crtc */
+void xilinx_drm_crtc_destroy(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ /* make sure crtc is off */
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+
+ drm_crtc_cleanup(base_crtc);
+
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_put(crtc->dp_sub);
+
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+
+ xilinx_drm_plane_remove_manager(crtc->plane_manager);
+}
+
+/* cancel page flip functions */
+void xilinx_drm_crtc_cancel_page_flip(struct drm_crtc *base_crtc,
+ struct drm_file *file)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = crtc->event;
+ if (event && (event->base.file_priv == file)) {
+ crtc->event = NULL;
+ kfree(&event->base);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+/* finish page flip functions */
+static void xilinx_drm_crtc_finish_page_flip(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = crtc->event;
+ crtc->event = NULL;
+ if (event) {
+ drm_crtc_send_vblank_event(base_crtc, event);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+/* page flip functions */
+static int xilinx_drm_crtc_page_flip(struct drm_crtc *base_crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ if (crtc->event) {
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+
+ /* configure a plane */
+ ret = _xilinx_drm_crtc_mode_set_base(base_crtc, fb,
+ base_crtc->x, base_crtc->y);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ base_crtc->primary->fb = fb;
+
+ if (event) {
+ event->pipe = 0;
+ drm_crtc_vblank_get(base_crtc);
+ spin_lock_irqsave(&drm->event_lock, flags);
+ crtc->event = event;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+
+ return 0;
+}
+
+/* vblank interrupt handler */
+static void xilinx_drm_crtc_vblank_handler(void *data)
+{
+ struct drm_crtc *base_crtc = data;
+ struct drm_device *drm;
+
+ if (!base_crtc)
+ return;
+
+ drm = base_crtc->dev;
+
+ drm_handle_vblank(drm, 0);
+ xilinx_drm_crtc_finish_page_flip(base_crtc);
+}
+
+/* enable vblank interrupt */
+void xilinx_drm_crtc_enable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ if (crtc->vtc)
+ xilinx_vtc_enable_vblank_intr(crtc->vtc,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_enable_vblank(crtc->dp_sub,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+#ifdef CONFIG_DRM_XILINX_SDI
+ if (crtc->sdi)
+ xilinx_drm_sdi_enable_vblank(crtc->sdi,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+#endif
+}
+
+/* disable vblank interrupt */
+void xilinx_drm_crtc_disable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_disable_vblank(crtc->dp_sub);
+ if (crtc->vtc)
+ xilinx_vtc_disable_vblank_intr(crtc->vtc);
+#ifdef CONFIG_DRM_XILINX_SDI
+ if (crtc->sdi)
+ xilinx_drm_sdi_disable_vblank(crtc->sdi);
+#endif
+}
+
+/**
+ * xilinx_drm_crtc_restore - Restore the crtc states
+ * @base_crtc: base crtc object
+ *
+ * Restore the crtc states to the default ones. The request is propagated
+ * to the plane driver.
+ */
+void xilinx_drm_crtc_restore(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ xilinx_drm_plane_restore(crtc->plane_manager);
+}
+
+/* check max width */
+unsigned int xilinx_drm_crtc_get_max_width(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_max_width(base_crtc->primary);
+}
+
+/* check format */
+bool xilinx_drm_crtc_check_format(struct drm_crtc *base_crtc, uint32_t fourcc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ return xilinx_drm_plane_check_format(crtc->plane_manager, fourcc);
+}
+
+/* get format */
+uint32_t xilinx_drm_crtc_get_format(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_format(base_crtc->primary);
+}
+
+/**
+ * xilinx_drm_crtc_get_align - Get the alignment value for pitch
+ * @base_crtc: Base crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_crtc_get_align(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_align(base_crtc->primary);
+}
+
+static struct drm_crtc_funcs xilinx_drm_crtc_funcs = {
+ .destroy = xilinx_drm_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = xilinx_drm_crtc_page_flip,
+};
+
+/* create crtc */
+struct drm_crtc *xilinx_drm_crtc_create(struct drm_device *drm)
+{
+ struct xilinx_drm_crtc *crtc;
+ struct drm_plane *primary_plane;
+ struct device_node *sub_node;
+ int possible_crtcs = 1;
+ int ret;
+
+ crtc = devm_kzalloc(drm->dev, sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ /* probe chroma resampler and enable */
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,cresample", 0);
+ if (sub_node) {
+ crtc->cresample = xilinx_cresample_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->cresample)) {
+ DRM_ERROR("failed to probe a cresample\n");
+ return ERR_CAST(crtc->cresample);
+ }
+ }
+
+ /* probe color space converter and enable */
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,rgb2yuv", 0);
+ if (sub_node) {
+ crtc->rgb2yuv = xilinx_rgb2yuv_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->rgb2yuv)) {
+ DRM_ERROR("failed to probe a rgb2yuv\n");
+ return ERR_CAST(crtc->rgb2yuv);
+ }
+ }
+
+ /* probe a plane manager */
+ crtc->plane_manager = xilinx_drm_plane_probe_manager(drm);
+ if (IS_ERR(crtc->plane_manager)) {
+ if (PTR_ERR(crtc->plane_manager) != -EPROBE_DEFER)
+ DRM_ERROR("failed to probe a plane manager\n");
+ return ERR_CAST(crtc->plane_manager);
+ }
+
+ /* create a primary plane. there's only one crtc now */
+ primary_plane = xilinx_drm_plane_create_primary(crtc->plane_manager,
+ possible_crtcs);
+ if (IS_ERR(primary_plane)) {
+ DRM_ERROR("failed to create a primary plane for crtc\n");
+ ret = PTR_ERR(primary_plane);
+ goto err_plane;
+ }
+
+ /* create extra planes */
+ xilinx_drm_plane_create_planes(crtc->plane_manager, possible_crtcs);
+
+ crtc->pixel_clock = devm_clk_get(drm->dev, NULL);
+ if (IS_ERR(crtc->pixel_clock)) {
+ if (PTR_ERR(crtc->pixel_clock) == -EPROBE_DEFER) {
+ ret = PTR_ERR(crtc->pixel_clock);
+ goto err_plane;
+ } else {
+ DRM_DEBUG_KMS("failed to get pixel clock\n");
+ crtc->pixel_clock = NULL;
+ }
+ }
+
+ ret = clk_prepare_enable(crtc->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ crtc->pixel_clock_enabled = false;
+ goto err_plane;
+ }
+ clk_disable_unprepare(crtc->pixel_clock);
+
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,vtc", 0);
+ if (sub_node) {
+ crtc->vtc = xilinx_vtc_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->vtc)) {
+ DRM_ERROR("failed to probe video timing controller\n");
+ ret = PTR_ERR(crtc->vtc);
+ goto err_pixel_clk;
+ }
+ }
+
+ crtc->dp_sub = xilinx_drm_dp_sub_of_get(drm->dev->of_node);
+ if (IS_ERR(crtc->dp_sub)) {
+ ret = PTR_ERR(crtc->dp_sub);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to get a dp_sub\n");
+ goto err_pixel_clk;
+ }
+
+#ifdef CONFIG_DRM_XILINX_SDI
+ crtc->sdi = xilinx_drm_sdi_of_get(drm->dev->of_node);
+ if (IS_ERR(crtc->sdi)) {
+ ret = PTR_ERR(crtc->sdi);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to get a sdi\n");
+ goto err_pixel_clk;
+ }
+#endif
+ crtc->dpms = DRM_MODE_DPMS_OFF;
+
+ /* initialize drm crtc */
+ ret = drm_crtc_init_with_planes(drm, &crtc->base, primary_plane,
+ NULL, &xilinx_drm_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize crtc\n");
+ goto err_pixel_clk;
+ }
+ drm_crtc_helper_add(&crtc->base, &xilinx_drm_crtc_helper_funcs);
+
+ return &crtc->base;
+
+err_pixel_clk:
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+err_plane:
+ xilinx_drm_plane_remove_manager(crtc->plane_manager);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h
new file mode 100644
index 000000000000..3566e0eba036
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx DRM crtc header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_CRTC_H_
+#define _XILINX_DRM_CRTC_H_
+
+struct drm_device;
+struct drm_crtc;
+
+void xilinx_drm_crtc_enable_vblank(struct drm_crtc *base_crtc);
+void xilinx_drm_crtc_disable_vblank(struct drm_crtc *base_crtc);
+void xilinx_drm_crtc_cancel_page_flip(struct drm_crtc *base_crtc,
+ struct drm_file *file);
+
+void xilinx_drm_crtc_restore(struct drm_crtc *base_crtc);
+
+unsigned int xilinx_drm_crtc_get_max_width(struct drm_crtc *base_crtc);
+bool xilinx_drm_crtc_check_format(struct drm_crtc *base_crtc, uint32_t fourcc);
+uint32_t xilinx_drm_crtc_get_format(struct drm_crtc *base_crtc);
+unsigned int xilinx_drm_crtc_get_align(struct drm_crtc *base_crtc);
+
+struct drm_crtc *xilinx_drm_crtc_create(struct drm_device *drm);
+void xilinx_drm_crtc_destroy(struct drm_crtc *base_crtc);
+
+#endif /* _XILINX_DRM_CRTC_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp.c b/drivers/gpu/drm/xilinx/xilinx_drm_dp.c
new file mode 100644
index 000000000000..fdb5e74cc96c
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp.c
@@ -0,0 +1,2186 @@
+/*
+ * Xilinx DRM DisplayPort encoder driver for Xilinx
+ *
+ * Copyright (C) 2014 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+
+static uint xilinx_drm_dp_aux_timeout_ms = 50;
+module_param_named(aux_timeout_ms, xilinx_drm_dp_aux_timeout_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms,
+ "DP aux timeout value in msec (default: 50)");
+
+static uint xilinx_drm_dp_power_on_delay_ms = 4;
+module_param_named(power_on_delay_ms, xilinx_drm_dp_power_on_delay_ms, uint,
+ 0644);
+MODULE_PARM_DESC(power_on_delay,
+ "Delay after power on request in msec (default: 4)");
+
+/* Link configuration registers */
+#define XILINX_DP_TX_LINK_BW_SET 0x0
+#define XILINX_DP_TX_LANE_CNT_SET 0x4
+#define XILINX_DP_TX_ENHANCED_FRAME_EN 0x8
+#define XILINX_DP_TX_TRAINING_PATTERN_SET 0xc
+#define XILINX_DP_TX_SCRAMBLING_DISABLE 0x14
+#define XILINX_DP_TX_DOWNSPREAD_CTL 0x18
+#define XILINX_DP_TX_SW_RESET 0x1c
+#define XILINX_DP_TX_SW_RESET_STREAM1 BIT(0)
+#define XILINX_DP_TX_SW_RESET_STREAM2 BIT(1)
+#define XILINX_DP_TX_SW_RESET_STREAM3 BIT(2)
+#define XILINX_DP_TX_SW_RESET_STREAM4 BIT(3)
+#define XILINX_DP_TX_SW_RESET_AUX BIT(7)
+#define XILINX_DP_TX_SW_RESET_ALL (XILINX_DP_TX_SW_RESET_STREAM1 | \
+ XILINX_DP_TX_SW_RESET_STREAM2 | \
+ XILINX_DP_TX_SW_RESET_STREAM3 | \
+ XILINX_DP_TX_SW_RESET_STREAM4 | \
+ XILINX_DP_TX_SW_RESET_AUX)
+
+/* Core enable registers */
+#define XILINX_DP_TX_ENABLE 0x80
+#define XILINX_DP_TX_ENABLE_MAIN_STREAM 0x84
+#define XILINX_DP_TX_FORCE_SCRAMBLER_RESET 0xc0
+#define XILINX_DP_TX_VERSION 0xf8
+#define XILINX_DP_TX_VERSION_MAJOR_MASK (0xff << 24)
+#define XILINX_DP_TX_VERSION_MAJOR_SHIFT 24
+#define XILINX_DP_TX_VERSION_MINOR_MASK (0xff << 16)
+#define XILINX_DP_TX_VERSION_MINOR_SHIFT 16
+#define XILINX_DP_TX_VERSION_REVISION_MASK (0xf << 12)
+#define XILINX_DP_TX_VERSION_REVISION_SHIFT 12
+#define XILINX_DP_TX_VERSION_PATCH_MASK (0xf << 8)
+#define XILINX_DP_TX_VERSION_PATCH_SHIFT 8
+#define XILINX_DP_TX_VERSION_INTERNAL_MASK (0xff << 0)
+#define XILINX_DP_TX_VERSION_INTERNAL_SHIFT 0
+
+/* Core ID registers */
+#define XILINX_DP_TX_CORE_ID 0xfc
+#define XILINX_DP_TX_CORE_ID_MAJOR_MASK (0xff << 24)
+#define XILINX_DP_TX_CORE_ID_MAJOR_SHIFT 24
+#define XILINX_DP_TX_CORE_ID_MINOR_MASK (0xff << 16)
+#define XILINX_DP_TX_CORE_ID_MINOR_SHIFT 16
+#define XILINX_DP_TX_CORE_ID_REVISION_MASK (0xff << 8)
+#define XILINX_DP_TX_CORE_ID_REVISION_SHIFT 8
+#define XILINX_DP_TX_CORE_ID_DIRECTION BIT(0)
+
+/* AUX channel interface registers */
+#define XILINX_DP_TX_AUX_COMMAND 0x100
+#define XILINX_DP_TX_AUX_COMMAND_CMD_SHIFT 8
+#define XILINX_DP_TX_AUX_COMMAND_ADDRESS_ONLY BIT(12)
+#define XILINX_DP_TX_AUX_COMMAND_BYTES_SHIFT 0
+#define XILINX_DP_TX_AUX_WRITE_FIFO 0x104
+#define XILINX_DP_TX_AUX_ADDRESS 0x108
+#define XILINX_DP_TX_CLK_DIVIDER 0x10c
+#define XILINX_DP_TX_CLK_DIVIDER_MHZ 1000000
+#define XILINX_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT 8
+#define XILINX_DP_TX_INTR_SIGNAL_STATE 0x130
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_HPD BIT(0)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REQUEST BIT(1)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY BIT(2)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT BIT(3)
+#define XILINX_DP_TX_AUX_REPLY_DATA 0x134
+#define XILINX_DP_TX_AUX_REPLY_CODE 0x138
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_ACK (0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_NACK BIT(0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_DEFER BIT(1)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_ACK (0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_NACK BIT(2)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_DEFER BIT(3)
+#define XILINX_DP_TX_AUX_REPLY_CNT 0x13c
+#define XILINX_DP_TX_AUX_REPLY_CNT_MASK 0xff
+#define XILINX_DP_TX_INTR_STATUS 0x140
+#define XILINX_DP_TX_INTR_MASK 0x144
+#define XILINX_DP_TX_INTR_HPD_IRQ BIT(0)
+#define XILINX_DP_TX_INTR_HPD_EVENT BIT(1)
+#define XILINX_DP_TX_INTR_REPLY_RECV BIT(2)
+#define XILINX_DP_TX_INTR_REPLY_TIMEOUT BIT(3)
+#define XILINX_DP_TX_INTR_HPD_PULSE BIT(4)
+#define XILINX_DP_TX_INTR_EXT_PKT_TXD BIT(5)
+#define XILINX_DP_TX_INTR_LIV_ABUF_UNDRFLW BIT(12)
+#define XILINX_DP_TX_INTR_VBLANK_START BIT(13)
+#define XILINX_DP_TX_INTR_PIXEL0_MATCH BIT(14)
+#define XILINX_DP_TX_INTR_PIXEL1_MATCH BIT(15)
+#define XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK 0x3f0000
+#define XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK 0xfc00000
+#define XILINX_DP_TX_INTR_CUST_TS_2 BIT(28)
+#define XILINX_DP_TX_INTR_CUST_TS BIT(29)
+#define XILINX_DP_TX_INTR_EXT_VSYNC_TS BIT(30)
+#define XILINX_DP_TX_INTR_VSYNC_TS BIT(31)
+#define XILINX_DP_TX_INTR_ALL (XILINX_DP_TX_INTR_HPD_IRQ | \
+ XILINX_DP_TX_INTR_HPD_EVENT | \
+ XILINX_DP_TX_INTR_REPLY_RECV | \
+ XILINX_DP_TX_INTR_REPLY_TIMEOUT | \
+ XILINX_DP_TX_INTR_HPD_PULSE | \
+ XILINX_DP_TX_INTR_EXT_PKT_TXD | \
+ XILINX_DP_TX_INTR_LIV_ABUF_UNDRFLW | \
+ XILINX_DP_TX_INTR_VBLANK_START | \
+ XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK | \
+ XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+#define XILINX_DP_TX_REPLY_DATA_CNT 0x148
+#define XILINX_DP_SUB_TX_INTR_STATUS 0x3a0
+#define XILINX_DP_SUB_TX_INTR_MASK 0x3a4
+#define XILINX_DP_SUB_TX_INTR_EN 0x3a8
+#define XILINX_DP_SUB_TX_INTR_DS 0x3ac
+
+/* Main stream attribute registers */
+#define XILINX_DP_TX_MAIN_STREAM_HTOTAL 0x180
+#define XILINX_DP_TX_MAIN_STREAM_VTOTAL 0x184
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY 0x188
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT 0
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT 1
+#define XILINX_DP_TX_MAIN_STREAM_HSWIDTH 0x18c
+#define XILINX_DP_TX_MAIN_STREAM_VSWIDTH 0x190
+#define XILINX_DP_TX_MAIN_STREAM_HRES 0x194
+#define XILINX_DP_TX_MAIN_STREAM_VRES 0x198
+#define XILINX_DP_TX_MAIN_STREAM_HSTART 0x19c
+#define XILINX_DP_TX_MAIN_STREAM_VSTART 0x1a0
+#define XILINX_DP_TX_MAIN_STREAM_MISC0 0x1a4
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC BIT(0)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_FORMAT_SHIFT 1
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_DYNAMIC_RANGE BIT(3)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_YCBCR_COLRIMETRY BIT(4)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_BPC_SHIFT 5
+#define XILINX_DP_TX_MAIN_STREAM_MISC1 0x1a8
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_INTERLACED_VERT BIT(0)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_STEREO_VID_SHIFT 1
+#define XILINX_DP_TX_M_VID 0x1ac
+#define XILINX_DP_TX_TRANSFER_UNIT_SIZE 0x1b0
+#define XILINX_DP_TX_DEF_TRANSFER_UNIT_SIZE 64
+#define XILINX_DP_TX_N_VID 0x1b4
+#define XILINX_DP_TX_USER_PIXEL_WIDTH 0x1b8
+#define XILINX_DP_TX_USER_DATA_CNT_PER_LANE 0x1bc
+#define XILINX_DP_TX_MIN_BYTES_PER_TU 0x1c4
+#define XILINX_DP_TX_FRAC_BYTES_PER_TU 0x1c8
+#define XILINX_DP_TX_INIT_WAIT 0x1cc
+
+/* PHY configuration and status registers */
+#define XILINX_DP_TX_PHY_CONFIG 0x200
+#define XILINX_DP_TX_PHY_CONFIG_PHY_RESET BIT(0)
+#define XILINX_DP_TX_PHY_CONFIG_GTTX_RESET BIT(1)
+#define XILINX_DP_TX_PHY_CONFIG_PHY_PMA_RESET BIT(8)
+#define XILINX_DP_TX_PHY_CONFIG_PHY_PCS_RESET BIT(9)
+#define XILINX_DP_TX_PHY_CONFIG_ALL_RESET (XILINX_DP_TX_PHY_CONFIG_PHY_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_GTTX_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_PHY_PMA_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_PHY_PCS_RESET)
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_0 0x210
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_1 0x214
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_2 0x218
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_3 0x21c
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 0x220
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_1 0x224
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_2 0x228
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_3 0x22c
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING 0x234
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162 0x1
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270 0x3
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540 0x5
+#define XILINX_DP_TX_PHY_POWER_DOWN 0x238
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_0 BIT(0)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_1 BIT(1)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
+#define XILINX_DP_TX_PHY_POWER_DOWN_ALL 0xf
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_0 0x23c
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_1 0x240
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_2 0x244
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_3 0x248
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_0 0x24c
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_1 0x250
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_2 0x254
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_3 0x258
+#define XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_0 0x24c
+#define XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_1 0x250
+#define XILINX_DP_TX_PHY_STATUS 0x280
+#define XILINX_DP_TX_PHY_STATUS_PLL_LOCKED_SHIFT 4
+#define XILINX_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED BIT(6)
+
+/* Audio registers */
+#define XILINX_DP_TX_AUDIO_CONTROL 0x300
+#define XILINX_DP_TX_AUDIO_CHANNELS 0x304
+#define XILINX_DP_TX_AUDIO_INFO_DATA 0x308
+#define XILINX_DP_TX_AUDIO_M_AUD 0x328
+#define XILINX_DP_TX_AUDIO_N_AUD 0x32c
+#define XILINX_DP_TX_AUDIO_EXT_DATA 0x330
+
+#define XILINX_DP_MISC0_RGB (0)
+#define XILINX_DP_MISC0_YCRCB_422 (5 << 1)
+#define XILINX_DP_MISC0_YCRCB_444 (6 << 1)
+#define XILINX_DP_MISC0_FORMAT_MASK 0xe
+#define XILINX_DP_MISC0_BPC_6 (0 << 5)
+#define XILINX_DP_MISC0_BPC_8 (1 << 5)
+#define XILINX_DP_MISC0_BPC_10 (2 << 5)
+#define XILINX_DP_MISC0_BPC_12 (3 << 5)
+#define XILINX_DP_MISC0_BPC_16 (4 << 5)
+#define XILINX_DP_MISC0_BPC_MASK 0xe0
+#define XILINX_DP_MISC1_Y_ONLY (1 << 7)
+
+#define DP_REDUCED_BIT_RATE 162000
+#define DP_HIGH_BIT_RATE 270000
+#define DP_HIGH_BIT_RATE2 540000
+#define DP_MAX_TRAINING_TRIES 5
+#define DP_MAX_LANES 4
+
+enum dp_version {
+ DP_V1_1A = 0x11,
+ DP_V1_2 = 0x12
+};
+
+/**
+ * struct xilinx_drm_dp_link_config - Common link config between source and sink
+ * @max_rate: maximum link rate
+ * @max_lanes: maximum number of lanes
+ */
+struct xilinx_drm_dp_link_config {
+ int max_rate;
+ u8 max_lanes;
+};
+
+/**
+ * struct xilinx_drm_dp_mode - Configured mode of DisplayPort
+ * @bw_code: code for bandwidth(link rate)
+ * @lane_cnt: number of lanes
+ * @pclock: pixel clock frequency of current mode
+ */
+struct xilinx_drm_dp_mode {
+ u8 bw_code;
+ u8 lane_cnt;
+ int pclock;
+};
+
+/**
+ * struct xilinx_drm_dp_config - Configuration of DisplayPort from DTS
+ * @dp_version: DisplayPort protocol version
+ * @max_lanes: max number of lanes
+ * @max_link_rate: max link rate
+ * @max_bpc: maximum bits-per-color
+ * @max_pclock: maximum pixel clock rate
+ * @enable_yonly: enable yonly color space logic
+ * @enable_ycrcb: enable ycrcb color space logic
+ * @misc0: misc0 configuration (per DP v1.2 spec)
+ * @misc1: misc1 configuration (per DP v1.2 spec)
+ * @bpp: bits per pixel
+ */
+struct xilinx_drm_dp_config {
+ enum dp_version dp_version;
+ u32 max_lanes;
+ u32 max_link_rate;
+ u32 max_bpc;
+ u32 max_pclock;
+ bool enable_yonly;
+ bool enable_ycrcb;
+
+ u8 misc0;
+ u8 misc1;
+ u8 bpp;
+};
+
+/**
+ * struct xilinx_drm_dp - Xilinx DisplayPort core
+ * @encoder: pointer to the drm encoder structure
+ * @dev: device structure
+ * @iomem: device I/O memory for register access
+ * @config: IP core configuration from DTS
+ * @aux: aux channel
+ * @dp_sub: DisplayPort subsystem
+ * @phy: PHY handles for DP lanes
+ * @aclk: clock source device for internal axi4-lite clock
+ * @aud_clk: clock source device for audio clock
+ * @aud_clk_enabled: if audio clock is enabled
+ * @dpms: current dpms state
+ * @status: the connection status
+ * @dpcd: DP configuration data from currently connected sink device
+ * @link_config: common link configuration between IP core and sink device
+ * @mode: current mode between IP core and sink device
+ * @train_set: set of training data
+ */
+struct xilinx_drm_dp {
+ struct drm_encoder *encoder;
+ struct device *dev;
+ void __iomem *iomem;
+
+ struct xilinx_drm_dp_config config;
+ struct drm_dp_aux aux;
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct phy *phy[DP_MAX_LANES];
+ struct clk *aclk;
+ struct clk *aud_clk;
+ bool aud_clk_enabled;
+
+ int dpms;
+ enum drm_connector_status status;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct xilinx_drm_dp_link_config link_config;
+ struct xilinx_drm_dp_mode mode;
+ u8 train_set[DP_MAX_LANES];
+};
+
+static inline struct xilinx_drm_dp *to_dp(struct drm_encoder *encoder)
+{
+ return to_encoder_slave(encoder)->slave_priv;
+}
+
+#define AUX_READ_BIT 0x1
+
+#ifdef CONFIG_DRM_XILINX_DP_DEBUG_FS
+#define XILINX_DP_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DP_DEBUGFS_UINT8_MAX_STR "255"
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+static inline int xilinx_drm_dp_max_rate(int link_rate, u8 lane_num, u8 bpp);
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+enum xilinx_dp_testcases {
+ DP_TC_LINK_RATE,
+ DP_TC_LANE_COUNT,
+ DP_TC_OUTPUT_FMT,
+ DP_TC_NONE
+};
+
+struct xilinx_dp_debugfs {
+ enum xilinx_dp_testcases testcase;
+ u8 link_rate;
+ u8 lane_cnt;
+ u8 old_output_fmt;
+ struct xilinx_drm_dp *dp;
+};
+
+struct xilinx_dp_debugfs dp_debugfs;
+struct xilinx_dp_debugfs_request {
+ const char *req;
+ enum xilinx_dp_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static s64 xilinx_dp_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static int xilinx_dp_update_output_format(u8 output_fmt, u32 num_colors)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ struct xilinx_drm_dp_config *config = &dp->config;
+ u32 bpc;
+ u8 bpc_bits = (config->misc0 & XILINX_DP_MISC0_BPC_MASK);
+ bool misc1 = output_fmt & XILINX_DP_MISC1_Y_ONLY ? true : false;
+
+ switch (bpc_bits) {
+ case XILINX_DP_MISC0_BPC_6:
+ bpc = 6;
+ break;
+ case XILINX_DP_MISC0_BPC_8:
+ bpc = 8;
+ break;
+ case XILINX_DP_MISC0_BPC_10:
+ bpc = 10;
+ break;
+ case XILINX_DP_MISC0_BPC_12:
+ bpc = 12;
+ break;
+ case XILINX_DP_MISC0_BPC_16:
+ bpc = 16;
+ break;
+ default:
+ dev_err(dp->dev, "Invalid bpc count for misc0\n");
+ return -EINVAL;
+ }
+
+ /* clear old format */
+ config->misc0 &= ~XILINX_DP_MISC0_FORMAT_MASK;
+ config->misc1 &= ~XILINX_DP_MISC1_Y_ONLY;
+
+ if (misc1) {
+ config->misc1 |= output_fmt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC1,
+ config->misc1);
+ } else {
+ config->misc0 |= output_fmt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC0,
+ config->misc0);
+ }
+ config->bpp = num_colors * bpc;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_linkrate_write(char **dp_test_arg)
+{
+ char *link_rate_arg;
+ s64 link_rate;
+
+ link_rate_arg = strsep(dp_test_arg, " ");
+ link_rate = xilinx_dp_debugfs_argument_value(link_rate_arg);
+ if (link_rate < 0 || (link_rate != DP_HIGH_BIT_RATE2 &&
+ link_rate != DP_HIGH_BIT_RATE &&
+ link_rate != DP_REDUCED_BIT_RATE))
+ return -EINVAL;
+
+ dp_debugfs.link_rate = drm_dp_link_rate_to_bw_code(link_rate);
+ dp_debugfs.testcase = DP_TC_LINK_RATE;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_lanecnt_write(char **dp_test_arg)
+{
+ char *lane_cnt_arg;
+ s64 lane_count;
+
+ lane_cnt_arg = strsep(dp_test_arg, " ");
+ lane_count = xilinx_dp_debugfs_argument_value(lane_cnt_arg);
+ if (lane_count < 0 || !IN_RANGE(lane_count, 1,
+ dp_debugfs.dp->config.max_lanes))
+ return -EINVAL;
+
+ dp_debugfs.lane_cnt = lane_count;
+ dp_debugfs.testcase = DP_TC_LANE_COUNT;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_output_display_format_write(char **dp_test_arg)
+{
+ int ret;
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ char *output_format;
+ u8 output_fmt;
+ u32 num_colors;
+
+ /* Read the value from an user value */
+ output_format = strsep(dp_test_arg, " ");
+
+ if (strncmp(output_format, "rgb", 3) == 0) {
+ output_fmt = XILINX_DP_MISC0_RGB;
+ num_colors = 3;
+ } else if (strncmp(output_format, "ycbcr422", 8) == 0) {
+ output_fmt = XILINX_DP_MISC0_YCRCB_422;
+ num_colors = 2;
+ } else if (strncmp(output_format, "ycbcr444", 8) == 0) {
+ output_fmt = XILINX_DP_MISC0_YCRCB_444;
+ num_colors = 3;
+ } else if (strncmp(output_format, "yonly", 5) == 0) {
+ output_fmt = XILINX_DP_MISC1_Y_ONLY;
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid output format\n");
+ return -EINVAL;
+ }
+
+ if (dp->config.misc1 & XILINX_DP_MISC1_Y_ONLY)
+ dp_debugfs.old_output_fmt = XILINX_DP_MISC1_Y_ONLY;
+ else
+ dp_debugfs.old_output_fmt = dp->config.misc0 &
+ XILINX_DP_MISC0_FORMAT_MASK;
+
+ ret = xilinx_dp_update_output_format(output_fmt, num_colors);
+ if (!ret)
+ dp_debugfs.testcase = DP_TC_OUTPUT_FMT;
+ return ret;
+}
+
+static ssize_t xilinx_dp_debugfs_max_linkrate_read(char **kern_buff)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ size_t output_str_len;
+ u8 dpcd_link_bw;
+ int ret;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.link_rate = 0;
+
+ /* Getting Sink Side Link Rate */
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_LINK_BW_SET, &dpcd_link_bw);
+ if (ret < 0) {
+ dev_err(dp->dev, "Failed to read link rate via AUX.\n");
+ kfree(*kern_buff);
+ return ret;
+ }
+
+ output_str_len = strlen(XILINX_DP_DEBUGFS_UINT8_MAX_STR);
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%u", dpcd_link_bw);
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_lanecnt_read(char **kern_buff)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ size_t output_str_len;
+ u8 dpcd_lane_cnt;
+ int ret;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.lane_cnt = 0;
+
+ /* Getting Sink Side Lane Count */
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &dpcd_lane_cnt);
+ if (ret < 0) {
+ dev_err(dp->dev, "Failed to read link rate via AUX.\n");
+ kfree(*kern_buff);
+ return ret;
+ }
+
+ dpcd_lane_cnt &= DP_LANE_COUNT_MASK;
+ output_str_len = strlen(XILINX_DP_DEBUGFS_UINT8_MAX_STR);
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%u", dpcd_lane_cnt);
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_debugfs_output_display_format_read(char **kern_buff)
+{
+ int ret;
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ u8 old_output_fmt = dp_debugfs.old_output_fmt;
+ size_t output_str_len;
+ u32 num_colors;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+
+ if (old_output_fmt == XILINX_DP_MISC0_RGB) {
+ num_colors = 3;
+ } else if (old_output_fmt == XILINX_DP_MISC0_YCRCB_422) {
+ num_colors = 2;
+ } else if (old_output_fmt == XILINX_DP_MISC0_YCRCB_444) {
+ num_colors = 3;
+ } else if (old_output_fmt == XILINX_DP_MISC1_Y_ONLY) {
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid output format in misc0\n");
+ return -EINVAL;
+ }
+
+ ret = xilinx_dp_update_output_format(old_output_fmt, num_colors);
+ if (ret)
+ return ret;
+
+ output_str_len = strlen("Success");
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%s", "Success");
+
+ return 0;
+}
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+struct xilinx_dp_debugfs_request dp_debugfs_reqs[] = {
+ {"LINK_RATE", DP_TC_LINK_RATE,
+ xilinx_dp_debugfs_max_linkrate_read,
+ xilinx_dp_debugfs_max_linkrate_write},
+ {"LANE_COUNT", DP_TC_LANE_COUNT,
+ xilinx_dp_debugfs_max_lanecnt_read,
+ xilinx_dp_debugfs_max_lanecnt_write},
+ {"OUTPUT_DISPLAY_FORMAT", DP_TC_OUTPUT_FMT,
+ xilinx_dp_debugfs_output_display_format_read,
+ xilinx_dp_debugfs_output_display_format_write},
+};
+
+static ssize_t xilinx_dp_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DP_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dp_debugfs.testcase = DP_TC_NONE;
+ return -ENOMEM;
+ }
+
+ if (dp_debugfs.testcase == DP_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dp_debugfs_reqs[dp_debugfs.testcase].read_handler(
+ &kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t
+xilinx_dp_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *dp_test_req;
+ int ret;
+ int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (dp_debugfs.testcase != DP_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name and argument from an user request */
+ dp_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dp_debugfs_reqs); i++) {
+ if (!strcasecmp(dp_test_req, dp_debugfs_reqs[i].req))
+ if (!dp_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ }
+
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static const struct file_operations fops_xilinx_dp_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dp_debugfs_read,
+ .write = xilinx_dp_debugfs_write,
+};
+
+static int xilinx_dp_debugfs_init(struct xilinx_drm_dp *dp)
+{
+ int err;
+ struct dentry *xilinx_dp_debugfs_dir, *xilinx_dp_debugfs_file;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.dp = dp;
+
+ xilinx_dp_debugfs_dir = debugfs_create_dir("dp", NULL);
+ if (!xilinx_dp_debugfs_dir) {
+ dev_err(dp->dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dp_debugfs_file =
+ debugfs_create_file("testcase", 0444, xilinx_dp_debugfs_dir,
+ NULL, &fops_xilinx_dp_dbgfs);
+ if (!xilinx_dp_debugfs_file) {
+ dev_err(dp->dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dp_debugfs_dir);
+ xilinx_dp_debugfs_dir = NULL;
+ return err;
+}
+
+static void xilinx_dp_debugfs_mode_config(struct xilinx_drm_dp *dp, u8 *lanes,
+ u8 *rate_code, int pclock)
+{
+ int debugfs_rate = 0;
+ u8 bpp = dp->config.bpp;
+
+ if (!dp_debugfs.link_rate && !dp_debugfs.lane_cnt)
+ return;
+
+ if (dp_debugfs.link_rate) {
+ debugfs_rate = min(dp_debugfs.link_rate, *rate_code);
+ debugfs_rate =
+ drm_dp_bw_code_to_link_rate(debugfs_rate);
+ debugfs_rate =
+ xilinx_drm_dp_max_rate(debugfs_rate, *lanes, bpp);
+ }
+
+ if (dp_debugfs.lane_cnt) {
+ u8 lane;
+
+ lane = min(dp_debugfs.lane_cnt, *lanes);
+ debugfs_rate =
+ xilinx_drm_dp_max_rate(debugfs_rate, lane, bpp);
+ }
+
+ if (pclock > debugfs_rate) {
+ dev_dbg(dp->dev, "debugfs could't configure link values\n");
+ return;
+ }
+
+ if (dp_debugfs.link_rate)
+ *rate_code = dp_debugfs.link_rate;
+ if (dp_debugfs.lane_cnt)
+ *lanes = dp_debugfs.lane_cnt;
+
+}
+#else
+static int xilinx_dp_debugfs_init(struct xilinx_drm_dp *dp)
+{
+ return 0;
+}
+
+static void xilinx_dp_debugfs_mode_config(struct xilinx_drm_dp *dp, u8 *lanes,
+ u8 *rate_code, int pclock)
+{
+}
+#endif /* DRM_XILINX_DP_DEBUG_FS */
+
+/**
+ * xilinx_drm_dp_aux_cmd_submit - Submit aux command
+ * @dp: DisplayPort IP core structure
+ * @cmd: aux command
+ * @addr: aux address
+ * @buf: buffer for command data
+ * @bytes: number of bytes for @buf
+ * @reply: reply code to be returned
+ *
+ * Submit an aux command. All aux related commands, native or i2c aux
+ * read/write, are submitted through this function. The function is mapped to
+ * the transfer function of struct drm_dp_aux. This function involves in
+ * multiple register reads/writes, thus synchronization is needed, and it is
+ * done by drm_dp_helper using @hw_mutex. The calling thread goes into sleep
+ * if there's no immediate reply to the command submission. The reply code is
+ * returned at @reply if @reply != NULL.
+ *
+ * Return: 0 if the command is submitted properly, or corresponding error code:
+ * -EBUSY when there is any request already being processed
+ * -ETIMEDOUT when receiving reply is timed out
+ * -EIO when received bytes are less than requested
+ */
+static int xilinx_drm_dp_aux_cmd_submit(struct xilinx_drm_dp *dp, u32 cmd,
+ u16 addr, u8 *buf, u8 bytes, u8 *reply)
+{
+ bool is_read = (cmd & AUX_READ_BIT) ? true : false;
+ void __iomem *iomem = dp->iomem;
+ u32 reg, i;
+
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REQUEST)
+ return -EBUSY;
+
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_ADDRESS, addr);
+
+ if (!is_read)
+ for (i = 0; i < bytes; i++)
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_WRITE_FIFO,
+ buf[i]);
+
+ reg = cmd << XILINX_DP_TX_AUX_COMMAND_CMD_SHIFT;
+ if (!buf || !bytes)
+ reg |= XILINX_DP_TX_AUX_COMMAND_ADDRESS_ONLY;
+ else
+ reg |= (bytes - 1) << XILINX_DP_TX_AUX_COMMAND_BYTES_SHIFT;
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_COMMAND, reg);
+
+ /* Wait for reply to be delivered upto 2ms */
+ for (i = 0; ; i++) {
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY)
+ break;
+
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT ||
+ i == 2)
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1100);
+ }
+
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_AUX_REPLY_CODE);
+ if (reply)
+ *reply = reg;
+
+ if (is_read &&
+ (reg == XILINX_DP_TX_AUX_REPLY_CODE_AUX_ACK ||
+ reg == XILINX_DP_TX_AUX_REPLY_CODE_I2C_ACK)) {
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_REPLY_DATA_CNT);
+ if ((reg & XILINX_DP_TX_AUX_REPLY_CNT_MASK) != bytes)
+ return -EIO;
+
+ for (i = 0; i < bytes; i++)
+ buf[i] = xilinx_drm_readl(iomem,
+ XILINX_DP_TX_AUX_REPLY_DATA);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_phy_ready - Check if PHY is ready
+ * @dp: DisplayPort IP core structure
+ *
+ * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times.
+ * This amount of delay was suggested by IP designer.
+ *
+ * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready.
+ */
+static int xilinx_drm_dp_phy_ready(struct xilinx_drm_dp *dp)
+{
+ u32 i, reg, ready, lane;
+
+ lane = dp->config.max_lanes;
+ ready = (1 << lane) - 1;
+ if (!dp->dp_sub)
+ ready |= XILINX_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED;
+
+ /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */
+ for (i = 0; ; i++) {
+ reg = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_PHY_STATUS);
+ if ((reg & ready) == ready)
+ return 0;
+
+ if (i == 100) {
+ DRM_ERROR("PHY isn't ready\n");
+ return -ENODEV;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_max_rate - Calculate and return available max pixel clock
+ * @link_rate: link rate (Kilo-bytes / sec)
+ * @lane_num: number of lanes
+ * @bpp: bits per pixel
+ *
+ * Return: max pixel clock (KHz) supported by current link config.
+ */
+static inline int xilinx_drm_dp_max_rate(int link_rate, u8 lane_num, u8 bpp)
+{
+ return link_rate * lane_num * 8 / bpp;
+}
+
+/**
+ * xilinx_drm_dp_mode_configure - Configure the link values
+ * @dp: DisplayPort IP core structure
+ * @pclock: pixel clock for requested display mode
+ * @current_bw: current link rate
+ *
+ * Find the link configuration values, rate and lane count for requested pixel
+ * clock @pclock. The @pclock is stored in the mode to be used in other
+ * functions later. The returned rate is downshifted from the current rate
+ * @current_bw.
+ *
+ * Return: Current link rate code, or -EINVAL.
+ */
+static int xilinx_drm_dp_mode_configure(struct xilinx_drm_dp *dp, int pclock,
+ u8 current_bw)
+{
+ int max_rate = dp->link_config.max_rate;
+ u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
+ u8 bpp = dp->config.bpp;
+ u8 lane_cnt;
+ s8 i;
+
+ if (current_bw == DP_LINK_BW_1_62)
+ return -EINVAL;
+
+ xilinx_dp_debugfs_mode_config(dp, &max_lanes, &max_link_rate_code,
+ pclock);
+
+ for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
+ if (current_bw && bws[i] >= current_bw)
+ continue;
+
+ if (bws[i] <= max_link_rate_code)
+ break;
+ }
+
+ for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
+ int bw;
+ u32 rate;
+
+ bw = drm_dp_bw_code_to_link_rate(bws[i]);
+ rate = xilinx_drm_dp_max_rate(bw, lane_cnt, bpp);
+ if (pclock <= rate) {
+ dp->mode.bw_code = bws[i];
+ dp->mode.lane_cnt = lane_cnt;
+ dp->mode.pclock = pclock;
+ return dp->mode.bw_code;
+ }
+ }
+
+ dev_dbg(dp->dev, "failed to configure link values\n");
+
+ return -EINVAL;
+}
+
+/**
+ * xilinx_drm_dp_adjust_train - Adjust train values
+ * @dp: DisplayPort IP core structure
+ * @link_status: link status from sink which contains requested training values
+ */
+static void xilinx_drm_dp_adjust_train(struct xilinx_drm_dp *dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 *train_set = dp->train_set;
+ u8 voltage = 0, preemphasis = 0;
+ u8 max_preemphasis;
+ u8 i;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u8 v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 p = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+
+ if (v > voltage)
+ voltage = v;
+
+ if (p > preemphasis)
+ preemphasis = p;
+ }
+
+ if (voltage >= DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ voltage |= DP_TRAIN_MAX_SWING_REACHED;
+
+ max_preemphasis = (dp->dp_sub) ? DP_TRAIN_PRE_EMPH_LEVEL_2 :
+ DP_TRAIN_PRE_EMPH_LEVEL_3;
+
+ if (preemphasis >= max_preemphasis)
+ preemphasis |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++)
+ train_set[i] = voltage | preemphasis;
+}
+
+/**
+ * xilinx_drm_dp_update_vs_emph - Update the training values
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the training values based on the request from sink. The mapped values
+ * are predefined, and values(vs, pe, pc) are from the device manual.
+ *
+ * Return: 0 if vs and emph are updated successfully, or the error code returned
+ * by drm_dp_dpcd_write().
+ */
+static int xilinx_drm_dp_update_vs_emph(struct xilinx_drm_dp *dp)
+{
+ u8 *train_set = dp->train_set;
+ u8 i, v_level, p_level;
+ int ret;
+ static u8 vs[4][4] = { { 0x2a, 0x27, 0x24, 0x20 },
+ { 0x27, 0x23, 0x20, 0xff },
+ { 0x24, 0x20, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+ static u8 pe[4][4] = { { 0x2, 0x2, 0x2, 0x2 },
+ { 0x1, 0x1, 0x1, 0xff },
+ { 0x0, 0x0, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set,
+ dp->mode.lane_cnt);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ v_level = (train_set[i] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p_level = (train_set[i] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (dp->phy[i]) {
+ u32 reg = XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
+
+ xpsgtr_margining_factor(dp->phy[i], p_level, v_level);
+ xpsgtr_override_deemph(dp->phy[i], p_level, v_level);
+ xilinx_drm_writel(dp->iomem, reg, 0x2);
+ } else {
+ u32 reg;
+
+ reg = XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, vs[p_level][v_level]);
+ reg = XILINX_DP_TX_PHY_PRECURSOR_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, pe[p_level][v_level]);
+ reg = XILINX_DP_TX_PHY_POSTCURSOR_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, 0);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train_cr - Train clock recovery
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if clock recovery train is done successfully, or corresponding
+ * error code.
+ */
+static int xilinx_drm_dp_link_train_cr(struct xilinx_drm_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 vs = 0, tries = 0;
+ u16 max_tries, i;
+ bool cr_done;
+ int ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /* 256 loops should be maximum iterations for 4 lanes and 4 values.
+ * So, This loop should exit before 512 iterations
+ */
+ for (max_tries = 0; max_tries < 512; max_tries++) {
+ ret = xilinx_drm_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ cr_done = drm_dp_clock_recovery_ok(link_status, lane_cnt);
+ if (cr_done)
+ break;
+
+ for (i = 0; i < lane_cnt; i++)
+ if (!(dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED))
+ break;
+
+ if (i == lane_cnt)
+ break;
+
+ if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == vs)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == DP_MAX_TRAINING_TRIES)
+ break;
+
+ vs = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ xilinx_drm_dp_adjust_train(dp, link_status);
+ }
+
+ if (!cr_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train_ce - Train channel equalization
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if channel equalization train is done successfully, or
+ * corresponding error code.
+ */
+static int xilinx_drm_dp_link_train_ce(struct xilinx_drm_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 pat, tries;
+ int ret;
+ bool ce_done;
+
+ if (dp->config.dp_version == DP_V1_2 &&
+ dp->dpcd[DP_DPCD_REV] >= DP_V1_2 &&
+ dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED)
+ pat = DP_TRAINING_PATTERN_3;
+ else
+ pat = DP_TRAINING_PATTERN_2;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET, pat);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ pat | DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
+ ret = xilinx_drm_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ ce_done = drm_dp_channel_eq_ok(link_status, lane_cnt);
+ if (ce_done)
+ break;
+
+ xilinx_drm_dp_adjust_train(dp, link_status);
+ }
+
+ if (!ce_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train - Train the link
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if all trains are done successfully, or corresponding error code.
+ */
+static int xilinx_drm_dp_train(struct xilinx_drm_dp *dp)
+{
+ u32 reg;
+ u8 bw_code = dp->mode.bw_code;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 aux_lane_cnt = lane_cnt;
+ bool enhanced;
+ int ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_LANE_CNT_SET, lane_cnt);
+
+ enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
+ if (enhanced) {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENHANCED_FRAME_EN, 1);
+ aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+
+ if (dp->dpcd[3] & 0x1) {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_DOWNSPREAD_CTL, 1);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ } else {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_DOWNSPREAD_CTL, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, 0);
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, aux_lane_cnt);
+ if (ret < 0) {
+ DRM_ERROR("failed to set lane count\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ DP_SET_ANSI_8B10B);
+ if (ret < 0) {
+ DRM_ERROR("failed to set ANSI 8B/10B encoding\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LINK_BW_SET, bw_code);
+ if (ret < 0) {
+ DRM_ERROR("failed to set DP bandwidth\n");
+ return ret;
+ }
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_LINK_BW_SET, bw_code);
+
+ switch (bw_code) {
+ case DP_LINK_BW_1_62:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162;
+ break;
+ case DP_LINK_BW_2_7:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270;
+ break;
+ case DP_LINK_BW_5_4:
+ default:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540;
+ break;
+ }
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING,
+ reg);
+ ret = xilinx_drm_dp_phy_ready(dp);
+ if (ret < 0)
+ return ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SCRAMBLING_DISABLE, 1);
+
+ memset(dp->train_set, 0, 4);
+
+ ret = xilinx_drm_dp_link_train_cr(dp);
+ if (ret)
+ return ret;
+
+ ret = xilinx_drm_dp_link_train_ce(dp);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable training pattern\n");
+ return ret;
+ }
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SCRAMBLING_DISABLE, 0);
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_train_loop - Downshift the link rate during training
+ * @dp: DisplayPort IP core structure
+ *
+ * Train the link by downshifting the link rate if training is not successful.
+ */
+static void xilinx_drm_dp_train_loop(struct xilinx_drm_dp *dp)
+{
+ struct xilinx_drm_dp_mode *mode = &dp->mode;
+ u8 bw = mode->bw_code;
+ int ret;
+
+ do {
+ if (dp->status == connector_status_disconnected)
+ return;
+
+ ret = xilinx_drm_dp_train(dp);
+ if (!ret)
+ return;
+
+ ret = xilinx_drm_dp_mode_configure(dp, mode->pclock, bw);
+ if (ret < 0)
+ return;
+ bw = ret;
+ } while (bw >= DP_LINK_BW_1_62);
+
+ DRM_ERROR("failed to train the DP link\n");
+}
+
+/**
+ * xilinx_drm_dp_init_aux - Initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the DP aux. The aux clock is derived from the axi clock, so
+ * this function gets the axi clock frequency and calculates the filter
+ * value. Additionally, the interrupts and transmitter are enabled.
+ *
+ * Return: 0 on success, error value otherwise
+ */
+static int xilinx_drm_dp_init_aux(struct xilinx_drm_dp *dp)
+{
+ int clock_rate;
+ u32 reg, w;
+
+ clock_rate = clk_get_rate(dp->aclk);
+ if (clock_rate < XILINX_DP_TX_CLK_DIVIDER_MHZ) {
+ DRM_ERROR("aclk should be higher than 1MHz\n");
+ return -EINVAL;
+ }
+
+ /* Allowable values for this register are: 8, 16, 24, 32, 40, 48 */
+ for (w = 8; w <= 48; w += 8) {
+ /* AUX pulse width should be between 0.4 to 0.6 usec */
+ if (w >= (4 * clock_rate / 10000000) &&
+ w <= (6 * clock_rate / 10000000))
+ break;
+ }
+
+ if (w > 48) {
+ DRM_ERROR("aclk frequency too high\n");
+ return -EINVAL;
+ }
+ reg = w << XILINX_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT;
+
+ reg |= clock_rate / XILINX_DP_TX_CLK_DIVIDER_MHZ;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_CLK_DIVIDER, reg);
+
+ if (dp->dp_sub)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_SUB_TX_INTR_EN,
+ XILINX_DP_TX_INTR_ALL);
+ else
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INTR_MASK,
+ (u32)~XILINX_DP_TX_INTR_ALL);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 1);
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_init_phy - Initialize the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the phy.
+ *
+ * Return: 0 if the phy instances are initialized correctly, or the error code
+ * returned from the callee functions.
+ */
+static int xilinx_drm_dp_init_phy(struct xilinx_drm_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ ret = phy_init(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev, "failed to init phy lane %d\n", i);
+ return ret;
+ }
+ }
+
+ if (dp->dp_sub)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_SUB_TX_INTR_DS,
+ XILINX_DP_TX_INTR_ALL);
+ else
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INTR_MASK,
+ XILINX_DP_TX_INTR_ALL);
+
+ xilinx_drm_clr(dp->iomem, XILINX_DP_TX_PHY_CONFIG,
+ XILINX_DP_TX_PHY_CONFIG_ALL_RESET);
+
+ /* Wait for PLL to be locked for the primary (1st) */
+ if (dp->phy[0]) {
+ ret = xpsgtr_wait_pll_lock(dp->phy[0]);
+ if (ret) {
+ dev_err(dp->dev, "failed to lock pll\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_exit_phy - Exit the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Exit the phy.
+ */
+static void xilinx_drm_dp_exit_phy(struct xilinx_drm_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ ret = phy_exit(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev,
+ "failed to exit phy (%d) %d\n", i, ret);
+ }
+ }
+}
+
+static void xilinx_drm_dp_dpms(struct drm_encoder *encoder, int dpms)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+ unsigned int i;
+ int ret;
+
+ if (dp->dpms == dpms)
+ return;
+
+ dp->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ pm_runtime_get_sync(dp->dev);
+
+ if (dp->aud_clk && !dp->aud_clk_enabled) {
+ ret = clk_prepare_enable(dp->aud_clk);
+ if (ret) {
+ dev_err(dp->dev, "failed to enable aud_clk\n");
+ } else {
+ xilinx_drm_writel(iomem,
+ XILINX_DP_TX_AUDIO_CONTROL,
+ 1);
+ dp->aud_clk_enabled = true;
+ }
+ }
+ xilinx_drm_writel(iomem, XILINX_DP_TX_PHY_POWER_DOWN, 0);
+
+ if (dp->status == connector_status_connected) {
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ usleep_range(300, 500);
+ }
+ /* Some monitors take time to wake up properly */
+ msleep(xilinx_drm_dp_power_on_delay_ms);
+ if (ret != 1)
+ dev_dbg(dp->dev, "DP aux failed\n");
+ else
+ xilinx_drm_dp_train_loop(dp);
+ }
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SW_RESET,
+ XILINX_DP_TX_SW_RESET_ALL);
+ xilinx_drm_writel(iomem, XILINX_DP_TX_ENABLE_MAIN_STREAM, 1);
+
+ return;
+ default:
+ xilinx_drm_writel(iomem, XILINX_DP_TX_ENABLE_MAIN_STREAM, 0);
+ if (dp->status == connector_status_connected) {
+ drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ }
+ xilinx_drm_writel(iomem, XILINX_DP_TX_PHY_POWER_DOWN,
+ XILINX_DP_TX_PHY_POWER_DOWN_ALL);
+ if (dp->aud_clk && dp->aud_clk_enabled) {
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUDIO_CONTROL, 0);
+ clk_disable_unprepare(dp->aud_clk);
+ dp->aud_clk_enabled = false;
+ }
+ pm_runtime_put_sync(dp->dev);
+
+ return;
+ }
+}
+
+static void xilinx_drm_dp_save(struct drm_encoder *encoder)
+{
+ /* no op */
+}
+
+static void xilinx_drm_dp_restore(struct drm_encoder *encoder)
+{
+ /* no op */
+}
+
+#define XILINX_DP_SUB_TX_MIN_H_BACKPORCH 20
+
+static bool xilinx_drm_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ int diff = mode->htotal - mode->hsync_end;
+
+ /*
+ * ZynqMP DP requires horizontal backporch to be greater than 12.
+ * This limitation may conflict with the sink device.
+ */
+ if (dp->dp_sub && diff < XILINX_DP_SUB_TX_MIN_H_BACKPORCH) {
+ int vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+
+ diff = XILINX_DP_SUB_TX_MIN_H_BACKPORCH - diff;
+ adjusted_mode->htotal += diff;
+ adjusted_mode->clock = adjusted_mode->vtotal *
+ adjusted_mode->htotal * vrefresh / 1000;
+ }
+
+ return true;
+}
+
+static int xilinx_drm_dp_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ u32 max_pclock = dp->config.max_pclock;
+ int max_rate = dp->link_config.max_rate;
+ int rate;
+
+ if (max_pclock && mode->clock > max_pclock)
+ return MODE_CLOCK_HIGH;
+
+ rate = xilinx_drm_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+/**
+ * xilinx_drm_dp_mode_set_transfer_unit - Set the transfer unit values
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Set the transfer unit, and caculate all transfer unit size related values.
+ * Calculation is based on DP and IP core specification.
+ */
+static void xilinx_drm_dp_mode_set_transfer_unit(struct xilinx_drm_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u32 tu = XILINX_DP_TX_DEF_TRANSFER_UNIT_SIZE;
+ u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
+
+ /* Use the max transfer unit size (default) */
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRANSFER_UNIT_SIZE, tu);
+
+ vid_kbytes = mode->clock * (dp->config.bpp / 8);
+ bw = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ avg_bytes_per_tu = vid_kbytes * tu / (dp->mode.lane_cnt * bw / 1000);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MIN_BYTES_PER_TU,
+ avg_bytes_per_tu / 1000);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_FRAC_BYTES_PER_TU,
+ avg_bytes_per_tu % 1000);
+
+ /* Configure the initial wait cycle based on transfer unit size */
+ if (tu < (avg_bytes_per_tu / 1000))
+ init_wait = 0;
+ else if ((avg_bytes_per_tu / 1000) <= 4)
+ init_wait = tu;
+ else
+ init_wait = tu - avg_bytes_per_tu / 1000;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INIT_WAIT, init_wait);
+}
+
+/**
+ * xilinx_drm_dp_mode_set_stream - Configure the main stream
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Configure the main stream based on the requested mode @mode. Calculation is
+ * based on IP core specification.
+ */
+static void xilinx_drm_dp_mode_set_stream(struct xilinx_drm_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 reg, wpl;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HTOTAL,
+ mode->htotal);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VTOTAL,
+ mode->vtotal);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_POLARITY,
+ (!!(mode->flags & DRM_MODE_FLAG_PVSYNC) <<
+ XILINX_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT) |
+ (!!(mode->flags & DRM_MODE_FLAG_PHSYNC) <<
+ XILINX_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT));
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HSWIDTH,
+ mode->hsync_end - mode->hsync_start);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VSWIDTH,
+ mode->vsync_end - mode->vsync_start);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HRES,
+ mode->hdisplay);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VRES,
+ mode->vdisplay);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HSTART,
+ mode->htotal - mode->hsync_start);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VSTART,
+ mode->vtotal - mode->vsync_start);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC0,
+ dp->config.misc0);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC1,
+ dp->config.misc1);
+
+ /* In synchronous mode, set the diviers */
+ if (dp->config.misc0 & XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC) {
+ reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_N_VID, reg);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_M_VID, mode->clock);
+ if (dp->aud_clk) {
+ int aud_rate = clk_get_rate(dp->aud_clk);
+
+ dev_dbg(dp->dev, "Audio rate: %d\n", aud_rate / 512);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_N_AUD,
+ reg);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_M_AUD,
+ aud_rate / 1000);
+ }
+ }
+
+ /* Only 2 channel is supported now */
+ if (dp->aud_clk)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_CHANNELS, 1);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_USER_PIXEL_WIDTH, 1);
+
+ /* Translate to the native 16 bit datapath based on IP core spec */
+ wpl = (mode->hdisplay * dp->config.bpp + 15) / 16;
+ reg = wpl + wpl % lane_cnt - lane_cnt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_USER_DATA_CNT_PER_LANE, reg);
+}
+
+static void xilinx_drm_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ int ret;
+
+ ret = xilinx_drm_dp_mode_configure(dp, adjusted_mode->clock, 0);
+ if (ret < 0)
+ return;
+
+ xilinx_drm_dp_mode_set_stream(dp, adjusted_mode);
+ xilinx_drm_dp_mode_set_transfer_unit(dp, adjusted_mode);
+}
+
+static enum drm_connector_status
+xilinx_drm_dp_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ struct xilinx_drm_dp_link_config *link_config = &dp->link_config;
+ u32 state;
+ int ret;
+
+ state = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+ if (state & XILINX_DP_TX_INTR_SIGNAL_STATE_HPD) {
+ dp->status = connector_status_connected;
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0)
+ goto disconnected;
+
+ link_config->max_rate = min_t(int,
+ drm_dp_max_link_rate(dp->dpcd),
+ dp->config.max_link_rate);
+ link_config->max_lanes = min_t(u8,
+ drm_dp_max_lane_count(dp->dpcd),
+ dp->config.max_lanes);
+ return dp->status;
+ }
+
+disconnected:
+ dp->status = connector_status_disconnected;
+ return dp->status;
+}
+
+static int xilinx_drm_dp_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &dp->aux.ddc);
+ if (!edid)
+ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
+}
+
+static struct drm_encoder_slave_funcs xilinx_drm_dp_encoder_funcs = {
+ .dpms = xilinx_drm_dp_dpms,
+ .save = xilinx_drm_dp_save,
+ .restore = xilinx_drm_dp_restore,
+ .mode_fixup = xilinx_drm_dp_mode_fixup,
+ .mode_valid = xilinx_drm_dp_mode_valid,
+ .mode_set = xilinx_drm_dp_mode_set,
+ .detect = xilinx_drm_dp_detect,
+ .get_modes = xilinx_drm_dp_get_modes,
+};
+
+static int xilinx_drm_dp_encoder_init(struct platform_device *pdev,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+ struct xilinx_drm_dp *dp = platform_get_drvdata(pdev);
+
+ encoder->slave_priv = dp;
+ encoder->slave_funcs = &xilinx_drm_dp_encoder_funcs;
+
+ dp->encoder = &encoder->base;
+
+ return xilinx_drm_dp_init_aux(dp);
+}
+
+static irqreturn_t xilinx_drm_dp_irq_handler(int irq, void *data)
+{
+ struct xilinx_drm_dp *dp = (struct xilinx_drm_dp *)data;
+ u32 reg, status;
+
+ reg = dp->dp_sub ?
+ XILINX_DP_SUB_TX_INTR_STATUS : XILINX_DP_TX_INTR_STATUS;
+ status = xilinx_drm_readl(dp->iomem, reg);
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK)
+ dev_dbg(dp->dev, "underflow interrupt\n");
+ if (status & XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+ dev_dbg(dp->dev, "overflow interrupt\n");
+
+ xilinx_drm_writel(dp->iomem, reg, status);
+
+ if (status & XILINX_DP_TX_INTR_VBLANK_START)
+ xilinx_drm_dp_sub_handle_vblank(dp->dp_sub);
+
+ if (status & XILINX_DP_TX_INTR_HPD_EVENT)
+ drm_helper_hpd_irq_event(dp->encoder->dev);
+
+ if (status & XILINX_DP_TX_INTR_HPD_IRQ) {
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+ int ret;
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (ret < 0)
+ goto handled;
+
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt))
+ xilinx_drm_dp_train_loop(dp);
+ }
+
+handled:
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+xilinx_drm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct xilinx_drm_dp *dp = container_of(aux, struct xilinx_drm_dp, aux);
+ int ret;
+ unsigned int i, iter;
+
+ /* Number of loops = timeout in msec / aux delay (400 usec) */
+ iter = xilinx_drm_dp_aux_timeout_ms * 1000 / 400;
+ iter = iter ? iter : 1;
+
+ for (i = 0; i < iter; i++) {
+ ret = xilinx_drm_dp_aux_cmd_submit(dp, msg->request,
+ msg->address, msg->buffer,
+ msg->size, &msg->reply);
+ if (!ret) {
+ dev_dbg(dp->dev, "aux %d retries\n", i);
+ return msg->size;
+ }
+
+ if (dp->status == connector_status_disconnected) {
+ dev_dbg(dp->dev, "no aux dev\n");
+ return -ENODEV;
+ }
+
+ usleep_range(400, 500);
+ }
+
+ dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
+
+ return ret;
+}
+
+static int xilinx_drm_dp_parse_of(struct xilinx_drm_dp *dp)
+{
+ struct device_node *node = dp->dev->of_node;
+ struct xilinx_drm_dp_config *config = &dp->config;
+ const char *string;
+ u32 num_colors, bpc;
+ bool sync;
+ int ret;
+
+ ret = of_property_read_string(node, "xlnx,dp-version", &string);
+ if (ret < 0) {
+ dev_err(dp->dev, "No DP version in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "v1.1a") == 0) {
+ config->dp_version = DP_V1_1A;
+ } else if (strcmp(string, "v1.2") == 0) {
+ config->dp_version = DP_V1_2;
+ } else {
+ dev_err(dp->dev, "Invalid DP version in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-lanes", &config->max_lanes);
+ if (ret < 0) {
+ dev_err(dp->dev, "No lane count in DT\n");
+ return ret;
+ }
+
+ if (config->max_lanes != 1 && config->max_lanes != 2 &&
+ config->max_lanes != 4) {
+ dev_err(dp->dev, "Invalid max lanes in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-link-rate",
+ &config->max_link_rate);
+ if (ret < 0) {
+ dev_err(dp->dev, "No link rate in DT\n");
+ return ret;
+ }
+
+ if (config->max_link_rate != DP_REDUCED_BIT_RATE &&
+ config->max_link_rate != DP_HIGH_BIT_RATE &&
+ config->max_link_rate != DP_HIGH_BIT_RATE2) {
+ dev_err(dp->dev, "Invalid link rate in DT\n");
+ return -EINVAL;
+ }
+
+ config->enable_yonly = of_property_read_bool(node, "xlnx,enable-yonly");
+ config->enable_ycrcb = of_property_read_bool(node, "xlnx,enable-ycrcb");
+
+ sync = of_property_read_bool(node, "xlnx,sync");
+ if (sync)
+ config->misc0 |= XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC;
+
+ ret = of_property_read_string(node, "xlnx,colormetry", &string);
+ if (ret < 0) {
+ dev_err(dp->dev, "No colormetry in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "rgb") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_RGB;
+ num_colors = 3;
+ } else if (config->enable_ycrcb && strcmp(string, "ycrcb422") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_YCRCB_422;
+ num_colors = 2;
+ } else if (config->enable_ycrcb && strcmp(string, "ycrcb444") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_YCRCB_444;
+ num_colors = 3;
+ } else if (config->enable_yonly && strcmp(string, "yonly") == 0) {
+ config->misc1 |= XILINX_DP_MISC1_Y_ONLY;
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid colormetry in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-bpc", &config->max_bpc);
+ if (ret < 0) {
+ dev_err(dp->dev, "No max bpc in DT\n");
+ return ret;
+ }
+
+ if (config->max_bpc != 8 && config->max_bpc != 10 &&
+ config->max_bpc != 12 && config->max_bpc != 16) {
+ dev_err(dp->dev, "Invalid max bpc in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,bpc", &bpc);
+ if (ret < 0) {
+ dev_err(dp->dev, "No color depth(bpc) in DT\n");
+ return ret;
+ }
+
+ if (bpc > config->max_bpc) {
+ dev_err(dp->dev, "Invalid color depth(bpc) in DT\n");
+ return -EINVAL;
+ }
+
+ switch (bpc) {
+ case 6:
+ config->misc0 |= XILINX_DP_MISC0_BPC_6;
+ break;
+ case 8:
+ config->misc0 |= XILINX_DP_MISC0_BPC_8;
+ break;
+ case 10:
+ config->misc0 |= XILINX_DP_MISC0_BPC_10;
+ break;
+ case 12:
+ config->misc0 |= XILINX_DP_MISC0_BPC_12;
+ break;
+ case 16:
+ config->misc0 |= XILINX_DP_MISC0_BPC_16;
+ break;
+ default:
+ dev_err(dp->dev, "Not supported color depth in DT\n");
+ return -EINVAL;
+ }
+
+ config->bpp = num_colors * bpc;
+
+ of_property_read_u32(node, "xlnx,max-pclock-frequency",
+ &config->max_pclock);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_drm_dp_pm_suspend(struct device *dev)
+{
+ struct xilinx_drm_dp *dp = dev_get_drvdata(dev);
+
+ xilinx_drm_dp_exit_phy(dp);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_drm_dp_pm_resume(struct device *dev)
+{
+ struct xilinx_drm_dp *dp = dev_get_drvdata(dev);
+
+ xilinx_drm_dp_init_phy(dp);
+ xilinx_drm_dp_init_aux(dp);
+ drm_helper_hpd_irq_event(dp->encoder->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xilinx_drm_dp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_drm_dp_pm_suspend,
+ xilinx_drm_dp_pm_resume)
+};
+
+static int xilinx_drm_dp_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp *dp;
+ struct resource *res;
+ u32 version, i;
+ int irq, ret;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dpms = DRM_MODE_DPMS_OFF;
+ dp->status = connector_status_disconnected;
+ dp->dev = &pdev->dev;
+
+ ret = xilinx_drm_dp_parse_of(dp);
+ if (ret < 0)
+ return ret;
+
+ dp->aclk = devm_clk_get(dp->dev, "aclk");
+ if (IS_ERR(dp->aclk))
+ return PTR_ERR(dp->aclk);
+
+ ret = clk_prepare_enable(dp->aclk);
+ if (ret) {
+ dev_err(dp->dev, "failed to enable the aclk\n");
+ return ret;
+ }
+
+ dp->aud_clk = devm_clk_get(dp->dev, "aud_clk");
+ if (IS_ERR(dp->aud_clk)) {
+ ret = PTR_ERR(dp->aud_clk);
+ if (ret == -EPROBE_DEFER)
+ goto error_aclk;
+ dp->aud_clk = NULL;
+ dev_dbg(dp->dev, "failed to get the aud_clk:\n");
+ }
+
+ dp->dp_sub = xilinx_drm_dp_sub_of_get(pdev->dev.of_node);
+ if (IS_ERR(dp->dp_sub)) {
+ ret = PTR_ERR(dp->dp_sub);
+ goto error_aclk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dp->iomem = devm_ioremap_resource(dp->dev, res);
+ if (IS_ERR(dp->iomem)) {
+ ret = PTR_ERR(dp->iomem);
+ goto error_dp_sub;
+ }
+
+ platform_set_drvdata(pdev, dp);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_PHY_POWER_DOWN,
+ XILINX_DP_TX_PHY_POWER_DOWN_ALL);
+ xilinx_drm_set(dp->iomem, XILINX_DP_TX_PHY_CONFIG,
+ XILINX_DP_TX_PHY_CONFIG_ALL_RESET);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_FORCE_SCRAMBLER_RESET, 1);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 0);
+
+ if (dp->dp_sub) {
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ char phy_name[16];
+
+ snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
+ dp->phy[i] = devm_phy_get(dp->dev, phy_name);
+ if (IS_ERR(dp->phy[i])) {
+ dev_err(dp->dev, "failed to get phy lane\n");
+ ret = PTR_ERR(dp->phy[i]);
+ dp->phy[i] = NULL;
+ goto error_dp_sub;
+ }
+ }
+ }
+
+ ret = xilinx_drm_dp_init_phy(dp);
+ if (ret)
+ goto error_phy;
+
+ dp->aux.name = "Xilinx DP AUX";
+ dp->aux.dev = dp->dev;
+ dp->aux.transfer = xilinx_drm_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to initialize DP aux\n");
+ goto error;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error;
+ }
+
+ ret = devm_request_threaded_irq(dp->dev, irq, NULL,
+ xilinx_drm_dp_irq_handler, IRQF_ONESHOT,
+ dev_name(dp->dev), dp);
+ if (ret < 0)
+ goto error;
+
+ version = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_VERSION);
+
+ dev_info(dp->dev, "device found, version %u.%02x%x\n",
+ ((version & XILINX_DP_TX_VERSION_MAJOR_MASK) >>
+ XILINX_DP_TX_VERSION_MAJOR_SHIFT),
+ ((version & XILINX_DP_TX_VERSION_MINOR_MASK) >>
+ XILINX_DP_TX_VERSION_MINOR_SHIFT),
+ ((version & XILINX_DP_TX_VERSION_REVISION_MASK) >>
+ XILINX_DP_TX_VERSION_REVISION_SHIFT));
+
+ version = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_CORE_ID);
+ if (version & XILINX_DP_TX_CORE_ID_DIRECTION) {
+ dev_err(dp->dev, "Receiver is not supported\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_info(dp->dev, "Display Port, version %u.%02x%02x (tx)\n",
+ ((version & XILINX_DP_TX_CORE_ID_MAJOR_MASK) >>
+ XILINX_DP_TX_CORE_ID_MAJOR_SHIFT),
+ ((version & XILINX_DP_TX_CORE_ID_MINOR_MASK) >>
+ XILINX_DP_TX_CORE_ID_MINOR_SHIFT),
+ ((version & XILINX_DP_TX_CORE_ID_REVISION_MASK) >>
+ XILINX_DP_TX_CORE_ID_REVISION_SHIFT));
+
+ pm_runtime_enable(dp->dev);
+
+ xilinx_dp_debugfs_init(dp);
+
+ return 0;
+
+error:
+ drm_dp_aux_unregister(&dp->aux);
+error_dp_sub:
+ xilinx_drm_dp_sub_put(dp->dp_sub);
+error_phy:
+ xilinx_drm_dp_exit_phy(dp);
+error_aclk:
+ clk_disable_unprepare(dp->aclk);
+ return ret;
+}
+
+static int xilinx_drm_dp_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp *dp = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(dp->dev);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 0);
+
+ drm_dp_aux_unregister(&dp->aux);
+ xilinx_drm_dp_exit_phy(dp);
+ xilinx_drm_dp_sub_put(dp->dp_sub);
+
+ if (dp->aud_clk && dp->aud_clk_enabled)
+ clk_disable_unprepare(dp->aud_clk);
+ clk_disable_unprepare(dp->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_drm_dp_of_match[] = {
+ { .compatible = "xlnx,v-dp", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_dp_of_match);
+
+static struct drm_platform_encoder_driver xilinx_drm_dp_driver = {
+ .platform_driver = {
+ .probe = xilinx_drm_dp_probe,
+ .remove = xilinx_drm_dp_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "xilinx-drm-dp",
+ .of_match_table = xilinx_drm_dp_of_match,
+ .pm = &xilinx_drm_dp_pm_ops,
+ },
+ },
+
+ .encoder_init = xilinx_drm_dp_encoder_init,
+};
+
+static int __init xilinx_drm_dp_init(void)
+{
+ return platform_driver_register(&xilinx_drm_dp_driver.platform_driver);
+}
+
+static void __exit xilinx_drm_dp_exit(void)
+{
+ platform_driver_unregister(&xilinx_drm_dp_driver.platform_driver);
+}
+
+module_init(xilinx_drm_dp_init);
+module_exit(xilinx_drm_dp_exit);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS DiplayPort Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c
new file mode 100644
index 000000000000..e3a68b36fafb
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c
@@ -0,0 +1,2265 @@
+/*
+ * DisplayPort subsystem support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+
+/* Blender registers */
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_0 0x0
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_1 0x4
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_2 0x8
+#define XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA 0xc
+#define XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA_MASK 0x1fe
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT 0x14
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB 0x0
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444 0x1
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422 0x2
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY 0x3
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_XVYCC 0x4
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE BIT(4)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL 0x18
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_EN_US BIT(0)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_RGB BIT(1)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_BYPASS BIT(8)
+#define XILINX_DP_SUB_V_BLEND_NUM_COEFF 9
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0 0x20
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF1 0x24
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF2 0x28
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF3 0x2c
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF4 0x30
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF5 0x34
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF6 0x38
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF7 0x3c
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF8 0x40
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF0 0x44
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF1 0x48
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF2 0x4c
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF3 0x50
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF4 0x54
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF5 0x58
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF6 0x5c
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF7 0x60
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF8 0x64
+#define XILINX_DP_SUB_V_BLEND_NUM_OFFSET 3
+#define XILINX_DP_SUB_V_BLEND_LUMA_IN1CSC_OFFSET 0x68
+#define XILINX_DP_SUB_V_BLEND_CR_IN1CSC_OFFSET 0x6c
+#define XILINX_DP_SUB_V_BLEND_CB_IN1CSC_OFFSET 0x70
+#define XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET 0x74
+#define XILINX_DP_SUB_V_BLEND_CR_OUTCSC_OFFSET 0x78
+#define XILINX_DP_SUB_V_BLEND_CB_OUTCSC_OFFSET 0x7c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF0 0x80
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF1 0x84
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF2 0x88
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF3 0x8c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF4 0x90
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF5 0x94
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF6 0x98
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF7 0x9c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF8 0xa0
+#define XILINX_DP_SUB_V_BLEND_LUMA_IN2CSC_OFFSET 0xa4
+#define XILINX_DP_SUB_V_BLEND_CR_IN2CSC_OFFSET 0xa8
+#define XILINX_DP_SUB_V_BLEND_CB_IN2CSC_OFFSET 0xac
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_ENABLE 0x1d0
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP1 0x1d4
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP2 0x1d8
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP3 0x1dc
+
+/* AV buffer manager registers */
+#define XILINX_DP_SUB_AV_BUF_FMT 0x0
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_SHIFT 0
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MASK (0x1f << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_UYVY (0 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY (1 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YVYU (2 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV (3 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16 (4 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24 (5 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI (6 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MONO (7 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2 (8 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUV444 (9 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888 (10 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880 (11 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10 (12 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUV444_10 (13 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_10 (14 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_10 (15 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_10 (16 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24_10 (17 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YONLY_10 (18 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420 (19 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420 (20 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_420 (21 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420_10 (22 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420_10 (23 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_420_10 (24 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_SHIFT 8
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_MASK (0xf << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888 (0 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888 (1 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB888 (2 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_BGR888 (3 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551 (4 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444 (5 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565 (6 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_8BPP (7 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_4BPP (8 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_2BPP (9 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_1BPP (10 << 8)
+#define XILINX_DP_SUB_AV_BUF_NON_LIVE_LATENCY 0x8
+#define XILINX_DP_SUB_AV_BUF_CHBUF 0x10
+#define XILINX_DP_SUB_AV_BUF_CHBUF_EN BIT(0)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_FLUSH BIT(1)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT 2
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MASK (0xf << 2)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MAX 0xf
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_AUD_MAX 0x3
+#define XILINX_DP_SUB_AV_BUF_STATUS 0x28
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL 0x2c
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EN BIT(0)
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_SHIFT 1
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_VSYNC 0
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_VID 1
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_AUD 2
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_INT_VSYNC 3
+#define XILINX_DP_SUB_AV_BUF_STC_INIT_VALUE0 0x30
+#define XILINX_DP_SUB_AV_BUF_STC_INIT_VALUE1 0x34
+#define XILINX_DP_SUB_AV_BUF_STC_ADJ 0x38
+#define XILINX_DP_SUB_AV_BUF_STC_VID_VSYNC_TS0 0x3c
+#define XILINX_DP_SUB_AV_BUF_STC_VID_VSYNC_TS1 0x40
+#define XILINX_DP_SUB_AV_BUF_STC_EXT_VSYNC_TS0 0x44
+#define XILINX_DP_SUB_AV_BUF_STC_EXT_VSYNC_TS1 0x48
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT_TS0 0x4c
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT_TS1 0x50
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT2_TS0 0x54
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT2_TS1 0x58
+#define XILINX_DP_SUB_AV_BUF_STC_SNAPSHOT0 0x60
+#define XILINX_DP_SUB_AV_BUF_STC_SNAPSHOT1 0x64
+#define XILINX_DP_SUB_AV_BUF_OUTPUT 0x70
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_SHIFT 0
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK (0x3 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_PL (0 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MEM (1 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_PATTERN (2 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_NONE (3 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_SHIFT 2
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK (0x3 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_DISABLE (0 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MEM (1 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_LIVE (2 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_NONE (3 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_SHIFT 4
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK (0x3 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_PL (0 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MEM (1 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_PATTERN (2 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_DISABLE (3 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN BIT(6)
+#define XILINX_DP_SUB_AV_BUF_HCOUNT_VCOUNT_INT0 0x74
+#define XILINX_DP_SUB_AV_BUF_HCOUNT_VCOUNT_INT1 0x78
+#define XILINX_DP_SUB_AV_BUF_PATTERN_GEN_SELECT 0x100
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC 0x120
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS BIT(0)
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS BIT(1)
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING BIT(2)
+#define XILINX_DP_SUB_AV_BUF_SRST_REG 0x124
+#define XILINX_DP_SUB_AV_BUF_SRST_REG_VID_RST BIT(1)
+#define XILINX_DP_SUB_AV_BUF_AUDIO_CH_CONFIG 0x12c
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP0_SF 0x200
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP1_SF 0x204
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP2_SF 0x208
+#define XILINX_DP_SUB_AV_BUF_VID_COMP0_SF 0x20c
+#define XILINX_DP_SUB_AV_BUF_VID_COMP1_SF 0x210
+#define XILINX_DP_SUB_AV_BUF_VID_COMP2_SF 0x214
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP0_SF 0x218
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP1_SF 0x21c
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP2_SF 0x220
+#define XILINX_DP_SUB_AV_BUF_4BIT_SF 0x11111
+#define XILINX_DP_SUB_AV_BUF_5BIT_SF 0x10842
+#define XILINX_DP_SUB_AV_BUF_6BIT_SF 0x10410
+#define XILINX_DP_SUB_AV_BUF_8BIT_SF 0x10101
+#define XILINX_DP_SUB_AV_BUF_10BIT_SF 0x10040
+#define XILINX_DP_SUB_AV_BUF_NULL_SF 0
+#define XILINX_DP_SUB_AV_BUF_NUM_SF 3
+#define XILINX_DP_SUB_AV_BUF_LIVE_CB_CR_SWAP 0x224
+#define XILINX_DP_SUB_AV_BUF_PALETTE_MEMORY 0x400
+
+/* Audio registers */
+#define XILINX_DP_SUB_AUD_MIXER_VOLUME 0x0
+#define XILINX_DP_SUB_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
+#define XILINX_DP_SUB_AUD_MIXER_META_DATA 0x4
+#define XILINX_DP_SUB_AUD_CH_STATUS0 0x8
+#define XILINX_DP_SUB_AUD_CH_STATUS1 0xc
+#define XILINX_DP_SUB_AUD_CH_STATUS2 0x10
+#define XILINX_DP_SUB_AUD_CH_STATUS3 0x14
+#define XILINX_DP_SUB_AUD_CH_STATUS4 0x18
+#define XILINX_DP_SUB_AUD_CH_STATUS5 0x1c
+#define XILINX_DP_SUB_AUD_CH_A_DATA0 0x20
+#define XILINX_DP_SUB_AUD_CH_A_DATA1 0x24
+#define XILINX_DP_SUB_AUD_CH_A_DATA2 0x28
+#define XILINX_DP_SUB_AUD_CH_A_DATA3 0x2c
+#define XILINX_DP_SUB_AUD_CH_A_DATA4 0x30
+#define XILINX_DP_SUB_AUD_CH_A_DATA5 0x34
+#define XILINX_DP_SUB_AUD_CH_B_DATA0 0x38
+#define XILINX_DP_SUB_AUD_CH_B_DATA1 0x3c
+#define XILINX_DP_SUB_AUD_CH_B_DATA2 0x40
+#define XILINX_DP_SUB_AUD_CH_B_DATA3 0x44
+#define XILINX_DP_SUB_AUD_CH_B_DATA4 0x48
+#define XILINX_DP_SUB_AUD_CH_B_DATA5 0x4c
+#define XILINX_DP_SUB_AUD_SOFT_RESET 0xc00
+#define XILINX_DP_SUB_AUD_SOFT_RESET_AUD_SRST BIT(0)
+
+#define XILINX_DP_SUB_AV_BUF_NUM_VID_GFX_BUFFERS 4
+#define XILINX_DP_SUB_AV_BUF_NUM_BUFFERS 6
+
+/**
+ * enum xilinx_drm_dp_sub_layer_type - Layer type
+ * @XILINX_DRM_DP_SUB_LAYER_VID: video layer
+ * @XILINX_DRM_DP_SUB_LAYER_GFX: graphics layer
+ */
+enum xilinx_drm_dp_sub_layer_type {
+ XILINX_DRM_DP_SUB_LAYER_VID,
+ XILINX_DRM_DP_SUB_LAYER_GFX
+};
+
+/**
+ * struct xilinx_drm_dp_sub_layer - DP subsystem layer
+ * @id: layer ID
+ * @offset: layer offset in the register space
+ * @avail: flag if layer is available
+ * @primary: flag for primary plane
+ * @enabled: flag if the layer is enabled
+ * @fmt: format descriptor
+ * @drm_fmts: array of supported DRM formats
+ * @num_fmts: number of supported DRM formats
+ * @w: width
+ * @h: height
+ * @other: other layer
+ */
+struct xilinx_drm_dp_sub_layer {
+ enum xilinx_drm_dp_sub_layer_type id;
+ u32 offset;
+ bool avail;
+ bool primary;
+ bool enabled;
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+ u32 *drm_fmts;
+ unsigned int num_fmts;
+ u32 w;
+ u32 h;
+ struct xilinx_drm_dp_sub_layer *other;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_blend - DP subsystem blender
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_blend {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_av_buf - DP subsystem av buffer manager
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_av_buf {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_aud - DP subsystem audio
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_aud {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub - DP subsystem
+ * @dev: device structure
+ * @blend: blender device
+ * @av_buf: av buffer manager device
+ * @aud: audio device
+ * @layers: layers
+ * @list: entry in the global DP subsystem list
+ * @vblank_fn: vblank handler
+ * @vblank_data: vblank data to be used in vblank_fn
+ * @vid_clk_pl: flag if the clock is from PL
+ * @alpha: stored global alpha value
+ * @alpha_en: flag if the global alpha is enabled
+ */
+struct xilinx_drm_dp_sub {
+ struct device *dev;
+ struct xilinx_drm_dp_sub_blend blend;
+ struct xilinx_drm_dp_sub_av_buf av_buf;
+ struct xilinx_drm_dp_sub_aud aud;
+ struct xilinx_drm_dp_sub_layer layers[XILINX_DRM_DP_SUB_NUM_LAYERS];
+ struct list_head list;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+ bool vid_clk_pl;
+ u32 alpha;
+ bool alpha_en;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_fmt - DP subsystem format mapping
+ * @drm_fmt: drm format
+ * @dp_sub_fmt: DP subsystem format
+ * @rgb: flag for RGB formats
+ * @swap: flag to swap r & b for rgb formats, and u & v for yuv formats
+ * @chroma_sub: flag for chroma subsampled formats
+ * @sf: scaling factors for upto 3 color components
+ * @name: format name
+ */
+struct xilinx_drm_dp_sub_fmt {
+ u32 drm_fmt;
+ u32 dp_sub_fmt;
+ bool rgb;
+ bool swap;
+ bool chroma_sub;
+ u32 sf[3];
+ const char *name;
+};
+
+static LIST_HEAD(xilinx_drm_dp_sub_list);
+static DEFINE_MUTEX(xilinx_drm_dp_sub_lock);
+
+#ifdef CONFIG_DRM_XILINX_DP_SUB_DEBUG_FS
+#define XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL 0xFFF
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+enum xilinx_dp_sub_testcases {
+ DP_SUB_TC_BG_COLOR,
+ DP_SUB_TC_OUTPUT_FMT,
+ DP_SUB_TC_NONE
+};
+
+struct xilinx_dp_sub_debugfs {
+ enum xilinx_dp_sub_testcases testcase;
+ u16 r_value;
+ u16 g_value;
+ u16 b_value;
+ u32 output_fmt;
+ struct xilinx_drm_dp_sub *xilinx_dp_sub;
+};
+
+static struct xilinx_dp_sub_debugfs dp_sub_debugfs;
+struct xilinx_dp_sub_debugfs_request {
+ const char *req;
+ enum xilinx_dp_sub_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static s64 xilinx_dp_sub_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static void
+xilinx_dp_sub_debugfs_update_v_blend(u16 *sdtv_coeffs, u32 *full_range_offsets)
+{
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+ u32 offset, i;
+
+ /* Hardcode SDTV coefficients. Can be runtime configurable */
+ offset = XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ sdtv_coeffs[i]);
+
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ full_range_offsets[i]);
+}
+
+static void xilinx_dp_sub_debugfs_output_format(u32 fmt)
+{
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT, fmt);
+
+ if (fmt != XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+
+ xilinx_dp_sub_debugfs_update_v_blend(sdtv_coeffs,
+ full_range_offsets);
+ } else {
+ /* In case of RGB set the reset values*/
+ u16 sdtv_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u32 full_range_offsets[] = { 0x0, 0x0, 0x0 };
+
+ xilinx_dp_sub_debugfs_update_v_blend(sdtv_coeffs,
+ full_range_offsets);
+ }
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_background_color_write(char **dp_sub_test_arg)
+{
+ char *r_color, *g_color, *b_color;
+ s64 r_val, g_val, b_val;
+
+ r_color = strsep(dp_sub_test_arg, " ");
+ g_color = strsep(dp_sub_test_arg, " ");
+ b_color = strsep(dp_sub_test_arg, " ");
+
+ /* char * to int conversion */
+ r_val = xilinx_dp_sub_debugfs_argument_value(r_color);
+ g_val = xilinx_dp_sub_debugfs_argument_value(g_color);
+ b_val = xilinx_dp_sub_debugfs_argument_value(b_color);
+
+ if (!(IN_RANGE(r_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL) &&
+ IN_RANGE(g_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL) &&
+ IN_RANGE(b_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL)))
+ return -EINVAL;
+
+ dp_sub_debugfs.r_value = r_val;
+ dp_sub_debugfs.g_value = g_val;
+ dp_sub_debugfs.b_value = b_val;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_BG_COLOR;
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_output_display_format_write(char **dp_sub_test_arg)
+{
+ char *output_format;
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+ u32 fmt;
+
+ /* Read the value from an user value */
+ output_format = strsep(dp_sub_test_arg, " ");
+ if (strncmp(output_format, "rgb", 3) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB;
+ } else if (strncmp(output_format, "ycbcr444", 8) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444;
+ } else if (strncmp(output_format, "ycbcr422", 8) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422;
+ fmt |= XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE;
+ } else if (strncmp(output_format, "yonly", 5) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY;
+ } else {
+ dev_err(dp_sub->dev, "Invalid output format\n");
+ return -EINVAL;
+ }
+
+ dp_sub_debugfs.output_fmt =
+ xilinx_drm_readl(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT);
+
+ xilinx_dp_sub_debugfs_output_format(fmt);
+ dp_sub_debugfs.testcase = DP_SUB_TC_OUTPUT_FMT;
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_output_display_format_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ xilinx_dp_sub_debugfs_output_format(dp_sub_debugfs.output_fmt);
+
+ out_str_len = strlen("Success");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%s", "Success");
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_background_color_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ dp_sub_debugfs.r_value = 0;
+ dp_sub_debugfs.g_value = 0;
+ dp_sub_debugfs.b_value = 0;
+
+ out_str_len = strlen("Success");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%s", "Success");
+
+ return 0;
+}
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+static struct xilinx_dp_sub_debugfs_request dp_sub_debugfs_reqs[] = {
+ {"BACKGROUND_COLOR", DP_SUB_TC_BG_COLOR,
+ xilinx_dp_sub_debugfs_background_color_read,
+ xilinx_dp_sub_debugfs_background_color_write},
+ {"OUTPUT_DISPLAY_FORMAT", DP_SUB_TC_OUTPUT_FMT,
+ xilinx_dp_sub_debugfs_output_display_format_read,
+ xilinx_dp_sub_debugfs_output_display_format_write},
+};
+
+static ssize_t
+xilinx_dp_sub_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff, *dp_sub_test_req, *kern_buff_start;
+ int ret;
+ unsigned int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (dp_sub_debugfs.testcase != DP_SUB_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name and argument from an user request */
+ dp_sub_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dp_sub_debugfs_reqs); i++) {
+ if (!strcasecmp(dp_sub_test_req, dp_sub_debugfs_reqs[i].req))
+ if (!dp_sub_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ }
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static ssize_t xilinx_dp_sub_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ return -ENOMEM;
+ }
+
+ if (dp_sub_debugfs.testcase == DP_SUB_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dp_sub_debugfs_reqs[dp_sub_debugfs.testcase].read_handler(
+ &kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static const struct file_operations fops_xilinx_dp_sub_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dp_sub_debugfs_read,
+ .write = xilinx_dp_sub_debugfs_write,
+};
+
+static int xilinx_dp_sub_debugfs_init(struct xilinx_drm_dp_sub *dp_sub)
+{
+ int err;
+ struct dentry *xilinx_dp_sub_debugfs_dir, *xilinx_dp_sub_debugfs_file;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ dp_sub_debugfs.xilinx_dp_sub = dp_sub;
+
+ xilinx_dp_sub_debugfs_dir = debugfs_create_dir("dp_sub", NULL);
+ if (!xilinx_dp_sub_debugfs_dir) {
+ dev_err(dp_sub->dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dp_sub_debugfs_file =
+ debugfs_create_file("testcase", 0444,
+ xilinx_dp_sub_debugfs_dir, NULL,
+ &fops_xilinx_dp_sub_dbgfs);
+ if (!xilinx_dp_sub_debugfs_file) {
+ dev_err(dp_sub->dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dp_sub_debugfs_dir);
+ xilinx_dp_sub_debugfs_dir = NULL;
+ return err;
+}
+
+static void xilinx_drm_dp_sub_debugfs_bg_color(struct xilinx_drm_dp_sub *dp_sub)
+{
+ if (dp_sub_debugfs.testcase == DP_SUB_TC_BG_COLOR) {
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_0,
+ dp_sub_debugfs.r_value);
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_1,
+ dp_sub_debugfs.g_value);
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_2,
+ dp_sub_debugfs.b_value);
+ }
+}
+#else
+static int xilinx_dp_sub_debugfs_init(struct xilinx_drm_dp_sub *dp_sub)
+{
+ return 0;
+}
+
+static void xilinx_drm_dp_sub_debugfs_bg_color(struct xilinx_drm_dp_sub *dp_sub)
+{
+}
+#endif /* CONFIG_DP_DEBUG_FS */
+
+/* Blender functions */
+
+/**
+ * xilinx_drm_dp_sub_blend_layer_enable - Enable a layer
+ * @blend: blend object
+ * @layer: layer to enable
+ *
+ * Enable a layer @layer.
+ */
+static void
+xilinx_drm_dp_sub_blend_layer_enable(struct xilinx_drm_dp_sub_blend *blend,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg, offset, i, s0, s1;
+ u16 sdtv_coeffs[] = { 0x1000, 0x166f, 0x0,
+ 0x1000, 0x7483, 0x7a7f,
+ 0x1000, 0x0, 0x1c5a };
+ u16 swap_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u16 *coeffs;
+ u32 offsets[] = { 0x0, 0x1800, 0x1800 };
+
+ reg = layer->fmt->rgb ? XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_RGB : 0;
+ reg |= layer->fmt->chroma_sub ?
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_EN_US : 0;
+
+ xilinx_drm_writel(blend->base,
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL + layer->offset,
+ reg);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ offset = XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF0;
+ else
+ offset = XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF0;
+
+ if (!layer->fmt->rgb) {
+ coeffs = sdtv_coeffs;
+ s0 = 1;
+ s1 = 2;
+ } else {
+ coeffs = swap_coeffs;
+ s0 = 0;
+ s1 = 2;
+
+ /* No offset for RGB formats */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ offsets[i] = 0;
+ }
+
+ if (layer->fmt->swap) {
+ for (i = 0; i < 3; i++) {
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ coeffs[i * 3 + s1] ^= coeffs[i * 3 + s0];
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ }
+ }
+
+ /* Program coefficients. Can be runtime configurable */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(blend->base, offset + i * 4, coeffs[i]);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_IN1CSC_OFFSET;
+ else
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_IN2CSC_OFFSET;
+
+ /* Program offsets. Can be runtime configurable */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_layer_disable - Disable a layer
+ * @blend: blend object
+ * @layer: layer to disable
+ *
+ * Disable a layer @layer.
+ */
+static void
+xilinx_drm_dp_sub_blend_layer_disable(struct xilinx_drm_dp_sub_blend *blend,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_writel(blend->base,
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL + layer->offset,
+ 0);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_set_bg_color - Set the background color
+ * @blend: blend object
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_bg_color(struct xilinx_drm_dp_sub_blend *blend,
+ u32 c0, u32 c1, u32 c2)
+{
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_0, c0);
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_1, c1);
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_2, c2);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_set_alpha - Set the alpha for blending
+ * @blend: blend object
+ * @alpha: alpha value to be used
+ *
+ * Set the alpha for blending.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_alpha(struct xilinx_drm_dp_sub_blend *blend,
+ u32 alpha)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA);
+ reg &= ~XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA_MASK;
+ reg |= alpha << 1;
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA,
+ reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_enable_alpha - Enable/disable the global alpha
+ * @blend: blend object
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Enable/disable the global alpha blending based on @enable.
+ */
+static void
+xilinx_drm_dp_sub_blend_enable_alpha(struct xilinx_drm_dp_sub_blend *blend,
+ bool enable)
+{
+ if (enable)
+ xilinx_drm_set(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+ else
+ xilinx_drm_clr(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+}
+
+static const struct xilinx_drm_dp_sub_fmt blend_output_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv444",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "yuv422",
+ }, {
+ }
+};
+
+/**
+ * xilinx_drm_dp_sub_blend_set_output_fmt - Set the output format
+ * @blend: blend object
+ * @fmt: output format
+ *
+ * Set the output format to @fmt.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_output_fmt(struct xilinx_drm_dp_sub_blend *blend,
+ u32 fmt)
+{
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT,
+ fmt);
+}
+
+/* AV buffer manager functions */
+
+static const struct xilinx_drm_dp_sub_fmt av_buf_vid_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_VYUY,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "vyuy",
+ }, {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "uyvy",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuyv",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVYU,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvyu",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv422",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu422",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv444",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu444",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV16,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv16",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV61,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv61",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgr888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "xbgr8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "xrgb8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "xbgr2101010",
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB2101010,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "xrgb2101010",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV420,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv420",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU420,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu420",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV12,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv12",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV21,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv21",
+ }, {
+ .drm_fmt = DRM_FORMAT_XV15,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "yuv42010b",
+ }, {
+ .drm_fmt = DRM_FORMAT_XV20,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "yuv42210b",
+ }
+};
+
+static const struct xilinx_drm_dp_sub_fmt av_buf_gfx_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "abgr8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "argb8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgba8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgra8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgr888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_BGR888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "rgba5551",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "bgra5551",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "rgba4444",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "bgra4444",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_6BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "rgb565",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR565,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_6BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "bgr565",
+ }
+};
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_fmt - Set the input formats
+ * @av_buf: av buffer manager
+ * @fmt: formats
+ *
+ * Set the av buffer manager format to @fmt. @fmt should have valid values
+ * for both video and graphics layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_set_fmt(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ u32 fmt)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT, fmt);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_get_fmt - Get the input formats
+ * @av_buf: av buffer manager
+ *
+ * Get the input formats (which include video and graphics) of
+ * av buffer manager.
+ *
+ * Return: value of XILINX_DP_SUB_AV_BUF_FMT register.
+ */
+static u32
+xilinx_drm_dp_sub_av_buf_get_fmt(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ return xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_vid_clock_src - Set the video clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the video clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_vid_clock_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool from_ps)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (from_ps)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_vid_timing_src - Set the video timing source
+ * @av_buf: av buffer manager
+ * @internal: flag if the video timing is generated internally
+ *
+ * Set the video timing source based on @internal. It can come externally or
+ * be generated internally.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_vid_timing_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool internal)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (internal)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_aud_clock_src - Set the audio clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the audio clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_aud_clock_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool from_ps)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (from_ps)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_buf - Enable buffers
+ * @av_buf: av buffer manager
+ *
+ * Enable all (video and audio) buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_buf(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ reg |= XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MAX <<
+ XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ reg |= XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_AUD_MAX <<
+ XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (; i < XILINX_DP_SUB_AV_BUF_NUM_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_buf - Disable buffers
+ * @av_buf: av buffer manager
+ *
+ * Disable all (video and audio) buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_buf(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_FLUSH & ~XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_aud - Enable audio
+ * @av_buf: av buffer manager
+ *
+ * Enable all audio buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_aud(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MEM;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable - Enable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * De-assert the video pipe reset
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_SRST_REG, 0);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable - Disable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * Assert the video pipe reset
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_SRST_REG,
+ XILINX_DP_SUB_AV_BUF_SRST_REG_VID_RST);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_aud - Disable audio
+ * @av_buf: av buffer manager
+ *
+ * Disable all audio buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_aud(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_DISABLE;
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_vid - Enable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to enable
+ *
+ * Enable the video/graphics buffer for @layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_vid(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MEM;
+ } else {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MEM;
+ }
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_vid - Disable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to disable
+ *
+ * Disable the video/graphics buffer for @layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_vid(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_NONE;
+ } else {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_DISABLE;
+ }
+
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_init_fmts - Initialize the layer formats
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize formats of both video and graphics layers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_init_fmts(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt,
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt)
+{
+ u32 reg;
+
+ reg = vid_fmt->dp_sub_fmt;
+ reg |= gfx_fmt->dp_sub_fmt;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_init_sf - Initialize scaling factors
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize scaling factors for both video and graphics layers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_init_sf(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt,
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt)
+{
+ unsigned int i;
+ int offset;
+
+ if (gfx_fmt) {
+ offset = XILINX_DP_SUB_AV_BUF_GFX_COMP0_SF;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_SF; i++)
+ xilinx_drm_writel(av_buf->base, offset + i * 4,
+ gfx_fmt->sf[i]);
+ }
+
+ if (vid_fmt) {
+ offset = XILINX_DP_SUB_AV_BUF_VID_COMP0_SF;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_SF; i++)
+ xilinx_drm_writel(av_buf->base, offset + i * 4,
+ vid_fmt->sf[i]);
+ }
+}
+
+/* Audio functions */
+
+/**
+ * xilinx_drm_dp_sub_aud_init - Initialize the audio
+ * @aud: audio
+ *
+ * Initialize the audio with default mixer volume. The de-assertion will
+ * initialize the audio states.
+ */
+static void xilinx_drm_dp_sub_aud_init(struct xilinx_drm_dp_sub_aud *aud)
+{
+ /* Clear the audio soft reset register as it's an non-reset flop */
+ xilinx_drm_writel(aud->base, XILINX_DP_SUB_AUD_SOFT_RESET, 0);
+ xilinx_drm_writel(aud->base, XILINX_DP_SUB_AUD_MIXER_VOLUME,
+ XILINX_DP_SUB_AUD_MIXER_VOLUME_NO_SCALE);
+}
+
+/**
+ * xilinx_drm_dp_sub_aud_deinit - De-initialize the audio
+ * @aud: audio
+ *
+ * Put the audio in reset.
+ */
+static void xilinx_drm_dp_sub_aud_deinit(struct xilinx_drm_dp_sub_aud *aud)
+{
+ xilinx_drm_set(aud->base, XILINX_DP_SUB_AUD_SOFT_RESET,
+ XILINX_DP_SUB_AUD_SOFT_RESET_AUD_SRST);
+}
+
+/* DP subsystem layer functions */
+
+/**
+ * xilinx_drm_dp_sub_layer_check_size - Verify width and height for the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer
+ * @width: width
+ * @height: height
+ *
+ * The DP subsystem has the limitation that both layers should have
+ * identical size. This function stores width and height of @layer, and verifies
+ * if the size (width and height) is valid.
+ *
+ * Return: 0 on success, or -EINVAL if width or/and height is invalid.
+ */
+int xilinx_drm_dp_sub_layer_check_size(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 width, u32 height)
+{
+ struct xilinx_drm_dp_sub_layer *other = layer->other;
+
+ if (other->enabled && (other->w != width || other->h != height)) {
+ dev_err(dp_sub->dev, "Layer width:height must be %d:%d\n",
+ other->w, other->h);
+ return -EINVAL;
+ }
+
+ layer->w = width;
+ layer->h = height;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_check_size);
+
+/**
+ * xilinx_drm_dp_sub_map_fmt - Find the DP subsystem format for given drm format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @drm_fmt: DRM format to search
+ *
+ * Search a DP subsystem format corresponding to the given DRM format @drm_fmt,
+ * and return the format descriptor which contains the DP subsystem format
+ * value.
+ *
+ * Return: a DP subsystem format descriptor on success, or NULL.
+ */
+static const struct xilinx_drm_dp_sub_fmt *
+xilinx_drm_dp_sub_map_fmt(const struct xilinx_drm_dp_sub_fmt fmts[],
+ unsigned int size, u32 drm_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].drm_fmt == drm_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * xilinx_drm_dp_sub_set_fmt - Set the format of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to set the format
+ * @drm_fmt: DRM format to set
+ *
+ * Set the format of the given layer to @drm_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @drm_fmt is not supported by the layer.
+ */
+int xilinx_drm_dp_sub_layer_set_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 drm_fmt)
+{
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt = NULL, *gfx_fmt = NULL;
+ u32 size, fmts, mask;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ mask = ~XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MASK;
+ fmt = xilinx_drm_dp_sub_map_fmt(av_buf_vid_fmts, size, drm_fmt);
+ vid_fmt = fmt;
+ } else {
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ mask = ~XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_MASK;
+ fmt = xilinx_drm_dp_sub_map_fmt(av_buf_gfx_fmts, size, drm_fmt);
+ gfx_fmt = fmt;
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmts = xilinx_drm_dp_sub_av_buf_get_fmt(&dp_sub->av_buf);
+ fmts &= mask;
+ fmts |= fmt->dp_sub_fmt;
+ xilinx_drm_dp_sub_av_buf_set_fmt(&dp_sub->av_buf, fmts);
+ xilinx_drm_dp_sub_av_buf_init_sf(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+
+ layer->fmt = fmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_set_fmt);
+
+/**
+ * xilinx_drm_dp_sub_get_fmt - Get the format of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to set the format
+ *
+ * Get the format of the given layer.
+ *
+ * Return: DRM format of the layer
+ */
+u32 xilinx_drm_dp_sub_layer_get_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ return layer->fmt->drm_fmt;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get_fmt);
+
+/**
+ * xilinx_drm_dp_sub_get_fmt - Get the supported DRM formats of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to get the formats
+ * @drm_fmts: pointer to array of DRM format strings
+ * @num_fmts: pointer to number of returned DRM formats
+ *
+ * Get the supported DRM formats of the given layer.
+ */
+void xilinx_drm_dp_sub_layer_get_fmts(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 **drm_fmts,
+ unsigned int *num_fmts)
+{
+ *drm_fmts = layer->drm_fmts;
+ *num_fmts = layer->num_fmts;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get_fmts);
+
+/**
+ * xilinx_drm_dp_sub_layer_enable - Enable the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to esable
+ *
+ * Enable the layer @layer.
+ */
+void xilinx_drm_dp_sub_layer_enable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_dp_sub_av_buf_enable_vid(&dp_sub->av_buf, layer);
+ xilinx_drm_dp_sub_blend_layer_enable(&dp_sub->blend, layer);
+ layer->enabled = true;
+ if (layer->other->enabled) {
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend,
+ dp_sub->alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend,
+ dp_sub->alpha_en);
+ } else {
+ u32 alpha;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ alpha = 0;
+ else
+ alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, true);
+ }
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_enable);
+
+/**
+ * xilinx_drm_dp_sub_layer_enable - Disable the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to disable
+ *
+ * Disable the layer @layer.
+ */
+void xilinx_drm_dp_sub_layer_disable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_dp_sub_av_buf_disable_vid(&dp_sub->av_buf, layer);
+ xilinx_drm_dp_sub_blend_layer_disable(&dp_sub->blend, layer);
+ layer->enabled = false;
+ if (layer->other->enabled) {
+ u32 alpha;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ else
+ alpha = 0;
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, true);
+ }
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_disable);
+
+/**
+ * xilinx_drm_dp_sub_layer_get - Get the DP subsystem layer
+ * @dp_sub: DP subsystem
+ * @primary: flag to indicate the primary plane
+ *
+ * Check if there's any available layer based on the flag @primary, and return
+ * the found layer.
+ *
+ * Return: a DP subsystem layer on success, or -ENODEV error pointer.
+ */
+struct xilinx_drm_dp_sub_layer *
+xilinx_drm_dp_sub_layer_get(struct xilinx_drm_dp_sub *dp_sub, bool primary)
+{
+ struct xilinx_drm_dp_sub_layer *layer = NULL;
+ unsigned int i;
+
+ for (i = 0; i < XILINX_DRM_DP_SUB_NUM_LAYERS; i++) {
+ if (dp_sub->layers[i].primary == primary) {
+ layer = &dp_sub->layers[i];
+ break;
+ }
+ }
+
+ if (!layer || !layer->avail)
+ return ERR_PTR(-ENODEV);
+
+ return layer;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get);
+
+/**
+ * xilinx_drm_dp_sub_layer_get - Put the DP subsystem layer
+ * @dp_sub: DP subsystem
+ * @layer: DP subsystem layer
+ *
+ * Return the DP subsystem layer @layer when it's no longer used.
+ */
+void xilinx_drm_dp_sub_layer_put(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ layer->avail = true;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_put);
+
+/* DP subsystem functions */
+
+/**
+ * xilinx_drm_dp_sub_set_output_fmt - Set the output format
+ * @dp_sub: DP subsystem
+ * @drm_fmt: DRM format to set
+ *
+ * Set the output format of the DP subsystem. The flag @primary indicates that
+ * which layer to configure.
+ *
+ * Return: 0 on success, or -EINVAL if @drm_fmt is not supported for output.
+ */
+int xilinx_drm_dp_sub_set_output_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ u32 drm_fmt)
+{
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+
+ fmt = xilinx_drm_dp_sub_map_fmt(blend_output_fmts,
+ ARRAY_SIZE(blend_output_fmts), drm_fmt);
+ if (!fmt)
+ return -EINVAL;
+
+ xilinx_drm_dp_sub_blend_set_output_fmt(&dp_sub->blend, fmt->dp_sub_fmt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_output_fmt);
+
+/**
+ * xilinx_drm_dp_sub_set_bg_color - Set the background color
+ * @dp_sub: DP subsystem
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color with given color components (@c0, @c1, @c2).
+ */
+void xilinx_drm_dp_sub_set_bg_color(struct xilinx_drm_dp_sub *dp_sub,
+ u32 c0, u32 c1, u32 c2)
+{
+ xilinx_drm_dp_sub_blend_set_bg_color(&dp_sub->blend, c0, c1, c2);
+ xilinx_drm_dp_sub_debugfs_bg_color(dp_sub);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_bg_color);
+
+/**
+ * xilinx_drm_dp_sub_set_alpha - Set the alpha value
+ * @dp_sub: DP subsystem
+ * @alpha: alpha value to set
+ *
+ * Set the alpha value for blending.
+ */
+void xilinx_drm_dp_sub_set_alpha(struct xilinx_drm_dp_sub *dp_sub, u32 alpha)
+{
+ dp_sub->alpha = alpha;
+ if (dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].enabled &&
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].enabled)
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_alpha);
+
+/**
+ * xilinx_drm_dp_sub_enable_alpha - Enable/disable the global alpha blending
+ * @dp_sub: DP subsystem
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Set the alpha value for blending.
+ */
+void
+xilinx_drm_dp_sub_enable_alpha(struct xilinx_drm_dp_sub *dp_sub, bool enable)
+{
+ dp_sub->alpha_en = enable;
+ if (dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].enabled &&
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].enabled)
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, enable);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable_alpha);
+
+/**
+ * xilinx_drm_dp_sub_handle_vblank - Vblank handling wrapper
+ * @dp_sub: DP subsystem
+ *
+ * Trigger the registered vblank handler. This function is supposed to be
+ * called in the actual vblank handler.
+ */
+void xilinx_drm_dp_sub_handle_vblank(struct xilinx_drm_dp_sub *dp_sub)
+{
+ if (dp_sub->vblank_fn)
+ dp_sub->vblank_fn(dp_sub->vblank_data);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_handle_vblank);
+
+/**
+ * xilinx_drm_dp_sub_enable_vblank - Enable the vblank handling
+ * @dp_sub: DP subsystem
+ * @vblank_fn: callback to be called on vblank event
+ * @vblank_data: data to be used in @vblank_fn
+ *
+ * This function register the vblank handler, and the handler will be triggered
+ * on vblank event after.
+ */
+void xilinx_drm_dp_sub_enable_vblank(struct xilinx_drm_dp_sub *dp_sub,
+ void (*vblank_fn)(void *),
+ void *vblank_data)
+{
+ dp_sub->vblank_fn = vblank_fn;
+ dp_sub->vblank_data = vblank_data;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable_vblank);
+
+/**
+ * xilinx_drm_dp_sub_disable_vblank - Disable the vblank handling
+ * @dp_sub: DP subsystem
+ *
+ * Disable the vblank handler. The vblank handler and data are unregistered.
+ */
+void xilinx_drm_dp_sub_disable_vblank(struct xilinx_drm_dp_sub *dp_sub)
+{
+ dp_sub->vblank_fn = NULL;
+ dp_sub->vblank_data = NULL;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_disable_vblank);
+
+/**
+ * xilinx_drm_dp_sub_enable - Enable the DP subsystem
+ * @dp_sub: DP subsystem
+ *
+ * Enable the DP subsystem.
+ */
+void xilinx_drm_dp_sub_enable(struct xilinx_drm_dp_sub *dp_sub)
+{
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt;
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt;
+
+ vid_fmt = dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].fmt;
+ gfx_fmt = dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].fmt;
+ xilinx_drm_dp_sub_av_buf_enable(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_init_fmts(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+ xilinx_drm_dp_sub_av_buf_init_sf(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+ xilinx_drm_dp_sub_av_buf_set_vid_clock_src(&dp_sub->av_buf,
+ !dp_sub->vid_clk_pl);
+ xilinx_drm_dp_sub_av_buf_set_vid_timing_src(&dp_sub->av_buf, true);
+ xilinx_drm_dp_sub_av_buf_set_aud_clock_src(&dp_sub->av_buf, true);
+ xilinx_drm_dp_sub_av_buf_enable_buf(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_enable_aud(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_aud_init(&dp_sub->aud);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable);
+
+/**
+ * xilinx_drm_dp_sub_enable - Disable the DP subsystem
+ * @dp_sub: DP subsystem
+ *
+ * Disable the DP subsystem.
+ */
+void xilinx_drm_dp_sub_disable(struct xilinx_drm_dp_sub *dp_sub)
+{
+ xilinx_drm_dp_sub_aud_deinit(&dp_sub->aud);
+ xilinx_drm_dp_sub_av_buf_disable_aud(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_disable_buf(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_disable(&dp_sub->av_buf);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_disable);
+
+/* DP subsystem initialization functions */
+
+/**
+ * xilinx_drm_dp_sub_of_get - Get the DP subsystem instance
+ * @np: parent device node
+ *
+ * This function searches and returns a DP subsystem structure for
+ * the parent device node, @np. The DP subsystem node should be a child node of
+ * @np, with 'xlnx,dp-sub' property pointing to the DP device node. An instance
+ * can be shared by multiple users.
+ *
+ * Return: corresponding DP subsystem structure if found. NULL if
+ * the device node doesn't have 'xlnx,dp-sub' property, or -EPROBE_DEFER error
+ * pointer if the the DP subsystem isn't found.
+ */
+struct xilinx_drm_dp_sub *xilinx_drm_dp_sub_of_get(struct device_node *np)
+{
+ struct device_node *xilinx_drm_dp_sub_node;
+ struct xilinx_drm_dp_sub *found = NULL;
+ struct xilinx_drm_dp_sub *dp_sub;
+
+ if (!of_find_property(np, "xlnx,dp-sub", NULL))
+ return NULL;
+
+ xilinx_drm_dp_sub_node = of_parse_phandle(np, "xlnx,dp-sub", 0);
+ if (!xilinx_drm_dp_sub_node)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_for_each_entry(dp_sub, &xilinx_drm_dp_sub_list, list) {
+ if (dp_sub->dev->of_node == xilinx_drm_dp_sub_node) {
+ found = dp_sub;
+ break;
+ }
+ }
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+
+ of_node_put(xilinx_drm_dp_sub_node);
+
+ if (!found)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_of_get);
+
+/**
+ * xilinx_drm_dp_sub_put - Put the DP subsystem instance
+ * @dp_sub: DP subsystem
+ *
+ * Put the DP subsystem instance @dp_sub.
+ */
+void xilinx_drm_dp_sub_put(struct xilinx_drm_dp_sub *dp_sub)
+{
+ /* no-op */
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_put);
+
+/**
+ * xilinx_drm_dp_register_device - Register the DP subsystem to the global list
+ * @dp_sub: DP subsystem
+ *
+ * Register the DP subsystem instance to the global list
+ */
+static void xilinx_drm_dp_sub_register_device(struct xilinx_drm_dp_sub *dp_sub)
+{
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_add_tail(&dp_sub->list, &xilinx_drm_dp_sub_list);
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+}
+
+/**
+ * xilinx_drm_dp_register_device - Unregister the DP subsystem instance
+ * @dp_sub: DP subsystem
+ *
+ * Unregister the DP subsystem instance from the global list
+ */
+static void
+xilinx_drm_dp_sub_unregister_device(struct xilinx_drm_dp_sub *dp_sub)
+{
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_del(&dp_sub->list);
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+}
+
+/**
+ * xilinx_drm_dp_sub_parse_of - Parse the DP subsystem device tree node
+ * @dp_sub: DP subsystem
+ *
+ * Parse the DP subsystem device tree node.
+ *
+ * Return: 0 on success, or the corresponding error code.
+ */
+static int xilinx_drm_dp_sub_parse_of(struct xilinx_drm_dp_sub *dp_sub)
+{
+ struct device_node *node = dp_sub->dev->of_node;
+ struct xilinx_drm_dp_sub_layer *layer;
+ const char *string;
+ u32 fmt, i, size;
+ int ret;
+
+ ret = of_property_read_string(node, "xlnx,output-fmt", &string);
+ if (ret < 0) {
+ dev_err(dp_sub->dev, "No colormetry in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "rgb") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB;
+ } else if (strcmp(string, "ycrcb444") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444;
+ } else if (strcmp(string, "ycrcb422") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422;
+ fmt |= XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE;
+ } else if (strcmp(string, "yonly") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY;
+ } else {
+ dev_err(dp_sub->dev, "Invalid output format in DT\n");
+ return -EINVAL;
+ }
+
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT, fmt);
+
+ if (fmt != XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+ u32 offset, i;
+
+ /* Hardcode SDTV coefficients. Can be runtime configurable */
+ offset = XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ sdtv_coeffs[i]);
+
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ full_range_offsets[i]);
+ }
+
+ if (of_property_read_bool(node, "xlnx,vid-primary"))
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].primary = true;
+ else
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].primary = true;
+
+ ret = of_property_read_string(node, "xlnx,vid-fmt", &string);
+ if (!ret) {
+ layer = &dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID];
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(dp_sub->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++) {
+ const struct xilinx_drm_dp_sub_fmt *fmt =
+ &av_buf_vid_fmts[i];
+
+ if (strcmp(string, fmt->name) == 0)
+ layer->fmt = fmt;
+
+ layer->drm_fmts[i] = fmt->drm_fmt;
+ }
+
+ if (!layer->fmt) {
+ dev_info(dp_sub->dev, "Invalid vid-fmt in DT\n");
+ layer->fmt = &av_buf_vid_fmts[0];
+ }
+ }
+
+ ret = of_property_read_string(node, "xlnx,gfx-fmt", &string);
+ if (!ret) {
+ layer = &dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX];
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(dp_sub->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++) {
+ const struct xilinx_drm_dp_sub_fmt *fmt =
+ &av_buf_gfx_fmts[i];
+
+ if (strcmp(string, fmt->name) == 0)
+ layer->fmt = fmt;
+
+ layer->drm_fmts[i] = fmt->drm_fmt;
+ }
+
+ if (!layer->fmt) {
+ dev_info(dp_sub->dev, "Invalid vid-fmt in DT\n");
+ layer->fmt = &av_buf_gfx_fmts[0];
+ }
+ }
+
+ dp_sub->vid_clk_pl = of_property_read_bool(node, "xlnx,vid-clk-pl");
+
+ return 0;
+}
+
+static int xilinx_drm_dp_sub_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct resource *res;
+ int ret;
+
+ dp_sub = devm_kzalloc(&pdev->dev, sizeof(*dp_sub), GFP_KERNEL);
+ if (!dp_sub)
+ return -ENOMEM;
+
+ dp_sub->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
+ dp_sub->blend.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->blend.base))
+ return PTR_ERR(dp_sub->blend.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
+ dp_sub->av_buf.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->av_buf.base))
+ return PTR_ERR(dp_sub->av_buf.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ dp_sub->aud.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->aud.base))
+ return PTR_ERR(dp_sub->aud.base);
+
+ dp_sub->layers[0].id = XILINX_DRM_DP_SUB_LAYER_VID;
+ dp_sub->layers[0].offset = 0;
+ dp_sub->layers[0].avail = true;
+ dp_sub->layers[0].other = &dp_sub->layers[1];
+
+ dp_sub->layers[1].id = XILINX_DRM_DP_SUB_LAYER_GFX;
+ dp_sub->layers[1].offset = 4;
+ dp_sub->layers[1].avail = true;
+ dp_sub->layers[1].other = &dp_sub->layers[0];
+
+ ret = xilinx_drm_dp_sub_parse_of(dp_sub);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dp_sub);
+
+ xilinx_drm_dp_sub_register_device(dp_sub);
+
+ xilinx_dp_sub_debugfs_init(dp_sub);
+
+ dev_info(dp_sub->dev, "Xilinx DisplayPort Subsystem is probed\n");
+
+ return 0;
+}
+
+static int xilinx_drm_dp_sub_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp_sub *dp_sub = platform_get_drvdata(pdev);
+
+ xilinx_drm_dp_sub_unregister_device(dp_sub);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_drm_dp_sub_of_id_table[] = {
+ { .compatible = "xlnx,dp-sub" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_dp_sub_of_id_table);
+
+static struct platform_driver xilinx_drm_dp_sub_driver = {
+ .driver = {
+ .name = "xilinx-drm-dp-sub",
+ .of_match_table = xilinx_drm_dp_sub_of_id_table,
+ },
+ .probe = xilinx_drm_dp_sub_probe,
+ .remove = xilinx_drm_dp_sub_remove,
+};
+
+module_platform_driver(xilinx_drm_dp_sub_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h
new file mode 100644
index 000000000000..b86e74622541
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h
@@ -0,0 +1,69 @@
+/*
+ * DisplayPort subsystem header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2014 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_DP_SUB_H_
+#define _XILINX_DRM_DP_SUB_H_
+
+#define XILINX_DRM_DP_SUB_NUM_LAYERS 2
+#define XILINX_DRM_DP_SUB_MAX_WIDTH 4096
+#define XILINX_DRM_DP_SUB_MAX_ALPHA 255
+
+struct drm_device;
+struct xilinx_drm_dp_sub;
+struct xilinx_drm_dp_sub_layer;
+
+int xilinx_drm_dp_sub_layer_check_size(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 width, u32 height);
+int xilinx_drm_dp_sub_layer_set_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 drm_fmt);
+u32 xilinx_drm_dp_sub_layer_get_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+void xilinx_drm_dp_sub_layer_get_fmts(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 **drm_fmts,
+ unsigned int *num_fmts);
+void xilinx_drm_dp_sub_layer_enable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+void xilinx_drm_dp_sub_layer_disable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+struct xilinx_drm_dp_sub_layer *
+xilinx_drm_dp_sub_layer_get(struct xilinx_drm_dp_sub *dp_sub, bool priv);
+void xilinx_drm_dp_sub_layer_put(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+
+int xilinx_drm_dp_sub_set_output_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ u32 drm_fmt);
+void xilinx_drm_dp_sub_set_bg_color(struct xilinx_drm_dp_sub *dp_sub,
+ u32 c0, u32 c1, u32 c2);
+void xilinx_drm_dp_sub_set_alpha(struct xilinx_drm_dp_sub *dp_sub, u32 alpha);
+void
+xilinx_drm_dp_sub_enable_alpha(struct xilinx_drm_dp_sub *dp_sub, bool enable);
+
+void xilinx_drm_dp_sub_enable_vblank(struct xilinx_drm_dp_sub *dp_sub,
+ void (*vblank_fn)(void *),
+ void *vblank_data);
+void xilinx_drm_dp_sub_disable_vblank(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_handle_vblank(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_enable(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_disable(struct xilinx_drm_dp_sub *dp_sub);
+
+struct xilinx_drm_dp_sub *xilinx_drm_dp_sub_of_get(struct device_node *np);
+void xilinx_drm_dp_sub_put(struct xilinx_drm_dp_sub *dp_sub);
+
+#endif /* _XILINX_DRM_DP_SUB_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_drv.c b/drivers/gpu/drm/xilinx/xilinx_drm_drv.c
new file mode 100644
index 000000000000..8298ad2caa0b
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_drv.c
@@ -0,0 +1,614 @@
+/*
+ * Xilinx DRM KMS support for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_connector.h"
+#include "xilinx_drm_crtc.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_encoder.h"
+#include "xilinx_drm_fb.h"
+#include "xilinx_drm_gem.h"
+
+#define DRIVER_NAME "xilinx_drm"
+#define DRIVER_DESC "Xilinx DRM KMS support for Xilinx"
+#define DRIVER_DATE "20130509"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static uint xilinx_drm_fbdev_vres = 2;
+module_param_named(fbdev_vres, xilinx_drm_fbdev_vres, uint, 0444);
+MODULE_PARM_DESC(fbdev_vres,
+ "fbdev virtual resolution multiplier for fb (default: 2)");
+
+/*
+ * TODO: The possible pipeline configurations are numerous with Xilinx soft IPs.
+ * It's not too bad for now, but the more proper way(Common Display Framework,
+ * or some internal abstraction) should be considered, when it reaches a point
+ * that such thing is required.
+ */
+
+struct xilinx_drm_private {
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ struct drm_fb_helper *fb;
+ struct platform_device *pdev;
+ bool is_master;
+};
+
+/**
+ * struct xilinx_video_format_desc - Xilinx Video IP video format description
+ * @name: Xilinx video format name
+ * @depth: color depth
+ * @bpp: bits per pixel
+ * @xilinx_format: xilinx format code
+ * @drm_format: drm format code
+ */
+struct xilinx_video_format_desc {
+ const char *name;
+ unsigned int depth;
+ unsigned int bpp;
+ unsigned int xilinx_format;
+ u32 drm_format;
+};
+
+static const struct xilinx_video_format_desc xilinx_video_formats[] = {
+ { "yuv420", 16, 16, XILINX_VIDEO_FORMAT_YUV420, DRM_FORMAT_YUV420 },
+ { "uvy422", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_UYVY },
+ { "vuy422", 16, 16, XILINX_VIDEO_FORMAT_YUV422, DRM_FORMAT_VYUY },
+ { "yuv422", 16, 16, XILINX_VIDEO_FORMAT_YUV422, DRM_FORMAT_YUYV },
+ { "yvu422", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_YVYU },
+ { "yuv444", 24, 24, XILINX_VIDEO_FORMAT_YUV444, DRM_FORMAT_YUV444 },
+ { "nv12", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV12 },
+ { "nv21", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV21 },
+ { "nv16", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV16 },
+ { "nv61", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV61 },
+ { "abgr1555", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ABGR1555 },
+ { "argb1555", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ARGB1555 },
+ { "rgba4444", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGBA4444 },
+ { "bgra4444", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGRA4444 },
+ { "bgr565", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGR565 },
+ { "rgb565", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGB565 },
+ { "bgr888", 24, 24, XILINX_VIDEO_FORMAT_RGB, DRM_FORMAT_BGR888 },
+ { "rgb888", 24, 24, XILINX_VIDEO_FORMAT_RGB, DRM_FORMAT_RGB888 },
+ { "xbgr8888", 24, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_XBGR8888 },
+ { "xrgb8888", 24, 32, XILINX_VIDEO_FORMAT_XRGB, DRM_FORMAT_XRGB8888 },
+ { "abgr8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ABGR8888 },
+ { "argb8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ARGB8888 },
+ { "bgra8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGRA8888 },
+ { "rgba8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGBA8888 },
+};
+
+/**
+ * xilinx_drm_check_format - Check if the given format is supported
+ * @drm: DRM device
+ * @fourcc: format fourcc
+ *
+ * Check if the given format @fourcc is supported by the current pipeline
+ *
+ * Return: true if the format is supported, or false
+ */
+bool xilinx_drm_check_format(struct drm_device *drm, u32 fourcc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_check_format(private->crtc, fourcc);
+}
+
+/**
+ * xilinx_drm_get_format - Get the current device format
+ * @drm: DRM device
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+u32 xilinx_drm_get_format(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_get_format(private->crtc);
+}
+
+/**
+ * xilinx_drm_get_align - Get the alignment value for pitch
+ * @drm: DRM object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_get_align(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_get_align(private->crtc);
+}
+
+/* poll changed handler */
+static void xilinx_drm_output_poll_changed(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_fb_hotplug_event(private->fb);
+}
+
+static const struct drm_mode_config_funcs xilinx_drm_mode_config_funcs = {
+ .fb_create = xilinx_drm_fb_create,
+ .output_poll_changed = xilinx_drm_output_poll_changed,
+};
+
+/* enable vblank */
+static int xilinx_drm_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_enable_vblank(private->crtc);
+
+ return 0;
+}
+
+/* disable vblank */
+static void xilinx_drm_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_disable_vblank(private->crtc);
+}
+
+/* initialize mode config */
+static void xilinx_drm_mode_config_init(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width =
+ xilinx_drm_crtc_get_max_width(private->crtc);
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &xilinx_drm_mode_config_funcs;
+}
+
+/* convert xilinx format to drm format by code */
+int xilinx_drm_format_by_code(unsigned int xilinx_format, u32 *drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->xilinx_format == xilinx_format) {
+ *drm_format = format->drm_format;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Unknown Xilinx video format: %d\n", xilinx_format);
+
+ return -EINVAL;
+}
+
+/* convert xilinx format to drm format by name */
+int xilinx_drm_format_by_name(const char *name, u32 *drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (strcmp(format->name, name) == 0) {
+ *drm_format = format->drm_format;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Unknown Xilinx video format: %s\n", name);
+
+ return -EINVAL;
+}
+
+/* get bpp of given format */
+unsigned int xilinx_drm_format_bpp(u32 drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->drm_format == drm_format)
+ return format->bpp;
+ }
+
+ return 0;
+}
+
+/* get color depth of given format */
+unsigned int xilinx_drm_format_depth(u32 drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->drm_format == drm_format)
+ return format->depth;
+ }
+
+ return 0;
+}
+
+static int xilinx_drm_bind(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+
+ return component_bind_all(dev, drm);
+}
+
+static void xilinx_drm_unbind(struct device *dev)
+{
+ dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops xilinx_drm_ops = {
+ .bind = xilinx_drm_bind,
+ .unbind = xilinx_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static int xilinx_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xilinx_drm_private *private = dev->dev_private;
+
+ /* This is a hack way to allow the root user to run as a master */
+ if (!(drm_is_primary_client(file) && !dev->master) &&
+ !file->is_master && capable(CAP_SYS_ADMIN)) {
+ file->is_master = 1;
+ private->is_master = true;
+ }
+
+ return 0;
+}
+
+static int xilinx_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file = filp->private_data;
+ struct drm_minor *minor = file->minor;
+ struct drm_device *drm = minor->dev;
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ if (private->is_master) {
+ private->is_master = false;
+ file->is_master = 0;
+ }
+
+ return drm_release(inode, filp);
+}
+
+/* restore the default mode when xilinx drm is released */
+static void xilinx_drm_lastclose(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_restore(private->crtc);
+
+ xilinx_drm_fb_restore_mode(private->fb);
+}
+
+static const struct file_operations xilinx_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = xilinx_drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xilinx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_LEGACY,
+ .open = xilinx_drm_open,
+ .lastclose = xilinx_drm_lastclose,
+
+ .enable_vblank = xilinx_drm_enable_vblank,
+ .disable_vblank = xilinx_drm_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = xilinx_drm_gem_cma_dumb_create,
+
+ .fops = &xilinx_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+#if defined(CONFIG_PM_SLEEP)
+/* suspend xilinx drm */
+static int xilinx_drm_pm_suspend(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+ struct drm_connector *connector;
+
+ drm_kms_helper_poll_disable(drm);
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector,
+ DRM_MODE_DPMS_SUSPEND);
+
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+
+/* resume xilinx drm */
+static int xilinx_drm_pm_resume(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+ struct drm_connector *connector;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ if (connector->funcs->dpms) {
+ int dpms = connector->dpms;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ connector->funcs->dpms(connector, dpms);
+ }
+ }
+ drm_modeset_unlock_all(drm);
+
+ drm_helper_resume_force_mode(drm);
+
+ drm_modeset_lock_all(drm);
+ drm_kms_helper_poll_enable(drm);
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops xilinx_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_drm_pm_suspend, xilinx_drm_pm_resume)
+};
+
+/* init xilinx drm platform */
+static int xilinx_drm_platform_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private;
+ struct drm_device *drm;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ const struct drm_format_info *info;
+ struct device_node *encoder_node, *ep = NULL, *remote;
+ struct component_match *match = NULL;
+ unsigned int align, i = 0;
+ int ret;
+ u32 format;
+
+ drm = drm_dev_alloc(&xilinx_drm_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ private = devm_kzalloc(drm->dev, sizeof(*private), GFP_KERNEL);
+ if (!private) {
+ ret = -ENOMEM;
+ goto err_drm;
+ }
+
+ drm_mode_config_init(drm);
+
+ /* create a xilinx crtc */
+ private->crtc = xilinx_drm_crtc_create(drm);
+ if (IS_ERR(private->crtc)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx crtc\n");
+ ret = PTR_ERR(private->crtc);
+ goto err_config;
+ }
+
+ while ((encoder_node = of_parse_phandle(drm->dev->of_node,
+ "xlnx,encoder-slave", i))) {
+ encoder = xilinx_drm_encoder_create(drm, encoder_node);
+ of_node_put(encoder_node);
+ if (IS_ERR(encoder)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx encoder\n");
+ ret = PTR_ERR(encoder);
+ goto err_config;
+ }
+
+ connector = xilinx_drm_connector_create(drm, encoder, i);
+ if (IS_ERR(connector)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx connector\n");
+ ret = PTR_ERR(connector);
+ goto err_config;
+ }
+
+ i++;
+ }
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(drm->dev->of_node, ep);
+ if (!ep)
+ break;
+
+ of_node_put(ep);
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(drm->dev, &match, compare_of, remote);
+ of_node_put(remote);
+ i++;
+ }
+
+ if (i == 0) {
+ DRM_ERROR("failed to get an encoder slave node\n");
+ return -ENODEV;
+ }
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto err_master;
+ }
+
+ /* enable irq to enable vblank feature */
+ drm->irq_enabled = 1;
+
+ drm->dev_private = private;
+ private->drm = drm;
+ xilinx_drm_mode_config_init(drm);
+
+ format = xilinx_drm_crtc_get_format(private->crtc);
+ info = drm_format_info(format);
+ if (info && info->depth && info->cpp[0]) {
+ align = xilinx_drm_crtc_get_align(private->crtc);
+ private->fb = xilinx_drm_fb_init(drm, info->cpp[0] * 8, 1,
+ align, xilinx_drm_fbdev_vres);
+ if (IS_ERR(private->fb)) {
+ DRM_ERROR("failed to initialize drm fb\n");
+ private->fb = NULL;
+ }
+ } else {
+ dev_info(&pdev->dev, "fbdev is not initialized\n");
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ drm_helper_disable_unused_functions(drm);
+
+ platform_set_drvdata(pdev, private);
+
+ if (match) {
+ ret = component_master_add_with_match(drm->dev,
+ &xilinx_drm_ops, match);
+ if (ret)
+ goto err_master;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(sizeof(dma_addr_t) * 8));
+ if (ret) {
+ dev_info(&pdev->dev, "failed to set coherent mask (%zu)\n",
+ sizeof(dma_addr_t));
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_master;
+
+ return 0;
+
+err_master:
+ component_master_del(drm->dev, &xilinx_drm_ops);
+err_config:
+ drm_mode_config_cleanup(drm);
+ if (ret == -EPROBE_DEFER)
+ DRM_INFO("load() is defered & will be called again\n");
+err_drm:
+ drm_dev_put(drm);
+ return ret;
+}
+
+/* exit xilinx drm platform */
+static int xilinx_drm_platform_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private = platform_get_drvdata(pdev);
+ struct drm_device *drm = private->drm;
+
+ component_master_del(drm->dev, &xilinx_drm_ops);
+ drm_kms_helper_poll_fini(drm);
+ xilinx_drm_fb_fini(private->fb);
+ drm_mode_config_cleanup(drm);
+ drm->dev_private = NULL;
+ drm_dev_put(private->drm);
+
+ return 0;
+}
+
+static void xilinx_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private = platform_get_drvdata(pdev);
+
+ drm_put_dev(private->drm);
+}
+
+static const struct of_device_id xilinx_drm_of_match[] = {
+ { .compatible = "xlnx,drm", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_of_match);
+
+static struct platform_driver xilinx_drm_private_driver = {
+ .probe = xilinx_drm_platform_probe,
+ .remove = xilinx_drm_platform_remove,
+ .shutdown = xilinx_drm_platform_shutdown,
+ .driver = {
+ .name = "xilinx-drm",
+ .pm = &xilinx_drm_pm_ops,
+ .of_match_table = xilinx_drm_of_match,
+ },
+};
+
+module_platform_driver(xilinx_drm_private_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_drv.h b/drivers/gpu/drm/xilinx/xilinx_drm_drv.h
new file mode 100644
index 000000000000..b871d421df84
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_drv.h
@@ -0,0 +1,65 @@
+/*
+ * Xilinx DRM KMS Header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_H_
+#define _XILINX_DRM_H_
+
+enum xilinx_video_format {
+ XILINX_VIDEO_FORMAT_YUV422 = 0,
+ XILINX_VIDEO_FORMAT_YUV444 = 1,
+ XILINX_VIDEO_FORMAT_RGB = 2,
+ XILINX_VIDEO_FORMAT_YUV420 = 3,
+ XILINX_VIDEO_FORMAT_XRGB = 16,
+ XILINX_VIDEO_FORMAT_NONE = 32,
+};
+
+/* convert the xilinx format to the drm format */
+int xilinx_drm_format_by_code(unsigned int xilinx_format, u32 *drm_format);
+int xilinx_drm_format_by_name(const char *name, u32 *drm_format);
+
+unsigned int xilinx_drm_format_bpp(u32 drm_format);
+unsigned int xilinx_drm_format_depth(u32 drm_format);
+
+/* io write operations */
+static inline void xilinx_drm_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+/* io read operations */
+static inline u32 xilinx_drm_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static inline void xilinx_drm_clr(void __iomem *base, int offset, u32 clr)
+{
+ xilinx_drm_writel(base, offset, xilinx_drm_readl(base, offset) & ~clr);
+}
+
+static inline void xilinx_drm_set(void __iomem *base, int offset, u32 set)
+{
+ xilinx_drm_writel(base, offset, xilinx_drm_readl(base, offset) | set);
+}
+
+struct drm_device;
+
+bool xilinx_drm_check_format(struct drm_device *drm, uint32_t fourcc);
+uint32_t xilinx_drm_get_format(struct drm_device *drm);
+unsigned int xilinx_drm_get_align(struct drm_device *drm);
+
+#endif /* _XILINX_DRM_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c b/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c
new file mode 100644
index 000000000000..b168ee26a6fe
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c
@@ -0,0 +1,808 @@
+/*
+ * Xilinx FPGA MIPI DSI Tx Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Siva Rajesh J <siva.rajesh.jarugula@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+/* DSI Tx IP registers */
+#define XDSI_CCR 0x00
+#define XDSI_CCR_COREENB BIT(0)
+#define XDSI_CCR_CRREADY BIT(2)
+#define XDSI_PCR 0x04
+#define XDSI_PCR_VIDEOMODE(x) (((x) & 0x3) << 3)
+#define XDSI_PCR_VIDEOMODE_MASK (0x3 << 3)
+#define XDSI_PCR_VIDEOMODE_SHIFT 3
+#define XDSI_PCR_BLLPTYPE(x) ((x) << 5)
+#define XDSI_PCR_BLLPMODE(x) ((x) << 6)
+#define XDSI_PCR_EOTPENABLE(x) ((x) << 13)
+#define XDSI_GIER 0x20
+#define XDSI_ISR 0x24
+#define XDSI_IER 0x28
+#define XDSI_CMD 0x30
+#define XDSI_CMD_QUEUE_PACKET(x) (((x) & 0xffffff) << 0)
+#define XDSI_TIME1 0x50
+#define XDSI_TIME1_BLLP_BURST(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME1_HSA(x) (((x) & 0xffff) << 16)
+#define XDSI_TIME2 0x54
+#define XDSI_TIME2_VACT(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME2_HACT(x) (((x) & 0xffff) << 16)
+#define XDSI_HACT_MULTIPLIER GENMASK(1, 0)
+#define XDSI_TIME3 0x58
+#define XDSI_TIME3_HFP(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME3_HBP(x) (((x) & 0xffff) << 16)
+#define XDSI_TIME4 0x5c
+#define XDSI_TIME4_VFP(x) (((x) & 0xff) << 0)
+#define XDSI_TIME4_VBP(x) (((x) & 0xff) << 8)
+#define XDSI_TIME4_VSA(x) (((x) & 0xff) << 16)
+#define XDSI_LTIME 0x60
+#define XDSI_BLLP_TIME 0x64
+#define XDSI_NUM_DATA_TYPES 4
+#define XDSI_VIDEO_MODE_SYNC_PULSE 0x0
+#define XDSI_VIDEO_MODE_SYNC_EVENT 0x1
+#define XDSI_VIDEO_MODE_BURST 0x2
+
+/*
+ * Used as a multiplication factor for HACT based on used
+ * DSI data type.
+ *
+ * e.g. for RGB666_L datatype and 1920x1080 resolution,
+ * the Hact (WC) would be as follows -
+ * 1920 pixels * 18 bits per pixel / 8 bits per byte
+ * = 1920 pixels * 2.25 bytes per pixel = 4320 bytes.
+ *
+ * Data Type - Multiplication factor
+ * RGB888 - 3
+ * RGB666_L - 2.25
+ * RGB666_P - 2.25
+ * RGB565 - 2
+ *
+ * Since the multiplication factor maybe a floating number,
+ * a 100x multiplication factor is used.
+ *
+ * XDSI_NUM_DATA_TYPES represents number of data types in the
+ * enum mipi_dsi_pixel_format in the MIPI DSI part of DRM framework.
+ */
+static const int xdsi_mul_factor[XDSI_NUM_DATA_TYPES] = {300, 225, 225, 200};
+
+/*
+ * struct xilinx_dsi - Core configuration DSI Tx subsystem device structure
+ * @drm_encoder: DRM encoder structure
+ * @dsi_host: DSI host device
+ * @connector: DRM connector structure
+ * @panel_node: MIPI DSI device panel node
+ * @panel: DRM panel structure
+ * @dev: device structure
+ * @iomem: Base address of DSI subsystem
+ * @lanes: number of active data lanes supported by DSI controller
+ * @mode_flags: DSI operation mode related flags
+ * @format: pixel format for video mode of DSI controller
+ * @vm: videomode data structure
+ * @mul_factor: multiplication factor for HACT timing parameter
+ * @eotp_prop: configurable EoTP DSI parameter
+ * @bllp_mode_prop: configurable BLLP mode DSI parameter
+ * @bllp_type_prop: configurable BLLP type DSI parameter
+ * @video_mode_prop: configurable Video mode DSI parameter
+ * @bllp_burst_time_prop: Configurable BLLP time for burst mode
+ * @cmd_queue_prop: configurable command queue
+ * @eotp_prop_val: configurable EoTP DSI parameter value
+ * @bllp_mode_prop_val: configurable BLLP mode DSI parameter value
+ * @bllp_type_prop_val: configurable BLLP type DSI parameter value
+ * @video_mode_prop_val: configurable Video mode DSI parameter value
+ * @bllp_burst_time_prop_val: Configurable BLLP time for burst mode value
+ * @cmd_queue_prop_val: configurable command queue value
+ */
+struct xilinx_dsi {
+ struct drm_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+ void __iomem *iomem;
+ u32 lanes;
+ u32 mode_flags;
+ enum mipi_dsi_pixel_format format;
+ struct videomode vm;
+ u32 mul_factor;
+ struct drm_property *eotp_prop;
+ struct drm_property *bllp_mode_prop;
+ struct drm_property *bllp_type_prop;
+ struct drm_property *video_mode_prop;
+ struct drm_property *bllp_burst_time_prop;
+ struct drm_property *cmd_queue_prop;
+ bool eotp_prop_val;
+ bool bllp_mode_prop_val;
+ bool bllp_type_prop_val;
+ u32 video_mode_prop_val;
+ u32 bllp_burst_time_prop_val;
+ u32 cmd_queue_prop_val;
+};
+
+#define host_to_dsi(host) container_of(host, struct xilinx_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct xilinx_dsi, connector)
+#define encoder_to_dsi(e) container_of(e, struct xilinx_dsi, encoder)
+
+static inline void xilinx_dsi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_dsi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xilinx_dsi_set_default_drm_properties - Configure DSI DRM
+ * properties with their default values
+ * @dsi: DSI structure having the updated user parameters
+ */
+static void
+xilinx_dsi_set_default_drm_properties(struct xilinx_dsi *dsi)
+{
+ drm_object_property_set_value(&dsi->connector.base, dsi->eotp_prop, 1);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_mode_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_type_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->video_mode_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_burst_time_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->cmd_queue_prop, 0);
+}
+
+/**
+ * xilinx_dsi_set_config_parameters - Configure DSI Tx registers with parameters
+ * given from user application.
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI structure having drm_property parameters
+ * configured from user application and writes them into DSI IP registers.
+ */
+static void xilinx_dsi_set_config_parameters(struct xilinx_dsi *dsi)
+{
+ u32 reg = 0;
+
+ reg |= XDSI_PCR_EOTPENABLE(dsi->eotp_prop_val);
+ reg |= XDSI_PCR_VIDEOMODE(dsi->video_mode_prop_val);
+ reg |= XDSI_PCR_BLLPTYPE(dsi->bllp_type_prop_val);
+ reg |= XDSI_PCR_BLLPMODE(dsi->bllp_mode_prop_val);
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_PCR, reg);
+
+ /* Configure the burst time if video mode is burst.
+ * HSA of TIME1 register is ignored in this mode.
+ */
+ if (dsi->video_mode_prop_val == XDSI_VIDEO_MODE_BURST) {
+ reg = XDSI_TIME1_BLLP_BURST(dsi->bllp_burst_time_prop_val);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_CMD_QUEUE_PACKET(dsi->cmd_queue_prop_val);
+ xilinx_dsi_writel(dsi->iomem, XDSI_CMD, reg);
+
+ dev_dbg(dsi->dev, "PCR register value is = %x\n",
+ xilinx_dsi_readl(dsi->iomem, XDSI_PCR));
+}
+
+/**
+ * xilinx_dsi_set_display_mode - Configure DSI timing registers
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function writes the timing parameters of DSI IP which are
+ * retrieved from panel timing values.
+ */
+static void xilinx_dsi_set_display_mode(struct xilinx_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg, video_mode;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_PCR);
+ video_mode = ((reg & XDSI_PCR_VIDEOMODE_MASK) >>
+ XDSI_PCR_VIDEOMODE_SHIFT);
+
+ /* configure the HSA value only if non_burst_sync_pluse video mode */
+ if ((!video_mode) &&
+ (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) {
+ reg = XDSI_TIME1_HSA(vm->hsync_len);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_TIME4_VFP(vm->vfront_porch) |
+ XDSI_TIME4_VBP(vm->vback_porch) |
+ XDSI_TIME4_VSA(vm->vsync_len);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME4, reg);
+
+ reg = XDSI_TIME3_HFP(vm->hfront_porch) |
+ XDSI_TIME3_HBP(vm->hback_porch);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME3, reg);
+
+ dev_dbg(dsi->dev, "mul factor for parsed datatype is = %d\n",
+ (dsi->mul_factor) / 100);
+
+ /* The HACT parameter received from panel timing values should be
+ * divisible by 4. The reason for this is, the word count given as
+ * input to DSI controller is HACT * mul_factor. The mul_factor is
+ * 3, 2.25, 2.25, 2 respectively for RGB888, RGB666_L, RGB666_P and
+ * RGB565.
+ * e.g. for RGB666_L color format and 1080p, the word count is
+ * 1920*2.25 = 4320 which is divisible by 4 and it is a valid input
+ * to DSI controller. Based on this 2.25 mul factor, we come up with
+ * the division factor of (XDSI_HACT_MULTIPLIER) as 4 for checking
+ */
+ if (((vm->hactive) & XDSI_HACT_MULTIPLIER) != 0)
+ dev_alert(dsi->dev, "Incorrect HACT will be programmed\n");
+
+ reg = XDSI_TIME2_HACT((vm->hactive) * (dsi->mul_factor) / 100) |
+ XDSI_TIME2_VACT(vm->vactive);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME2, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+/**
+ * xilinx_dsi_set_display_enable - Enables the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_dsi_set_display_enable(struct xilinx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg |= XDSI_CCR_COREENB;
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "MIPI DSI Tx controller is enabled.\n");
+}
+
+/**
+ * xilinx_dsi_set_display_disable - Disable the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_dsi_set_display_disable(struct xilinx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg &= ~XDSI_CCR_COREENB;
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "DSI Tx is disabled. reset regs to default values\n");
+}
+
+static void xilinx_dsi_encoder_dpms(struct drm_encoder *encoder,
+ int mode)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "encoder dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ xilinx_dsi_set_display_enable(dsi);
+ break;
+ default:
+ xilinx_dsi_set_display_disable(dsi);
+ xilinx_dsi_set_default_drm_properties(dsi);
+ break;
+ }
+}
+
+/**
+ * xilinx_dsi_connector_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @base_connector: pointer Xilinx DSI connector
+ * @property: pointer to the drm_property structure
+ * @value: DSI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the DSI structure property varabiles with the values.
+ * These values are later used to configure the DSI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xilinx_dsi_connector_set_property(struct drm_connector *base_connector,
+ struct drm_property *property,
+ u64 value)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+
+ dev_dbg(dsi->dev, "property name = %s, value = %lld\n",
+ property->name, value);
+
+ if (property == dsi->eotp_prop)
+ dsi->eotp_prop_val = !!value;
+ else if (property == dsi->bllp_mode_prop)
+ dsi->bllp_mode_prop_val = !!value;
+ else if (property == dsi->bllp_type_prop)
+ dsi->bllp_type_prop_val = !!value;
+ else if (property == dsi->video_mode_prop)
+ dsi->video_mode_prop_val = (unsigned int)value;
+ else if (property == dsi->bllp_burst_time_prop)
+ dsi->bllp_burst_time_prop_val = (unsigned int)value;
+ else if (property == dsi->cmd_queue_prop)
+ dsi->cmd_queue_prop_val = (unsigned int)value;
+ else
+ return -EINVAL;
+
+ xilinx_dsi_set_config_parameters(dsi);
+
+ return 0;
+}
+
+static int xilinx_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ u32 panel_lanes;
+ struct xilinx_dsi *dsi = host_to_dsi(host);
+
+ panel_lanes = device->lanes;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (panel_lanes != dsi->lanes) {
+ dev_err(dsi->dev, "Mismatch of lanes. panel = %d, DSI = %d\n",
+ panel_lanes, dsi->lanes);
+ return -EINVAL;
+ }
+
+ if ((dsi->lanes > 4) || (dsi->lanes < 1)) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (device->format != dsi->format) {
+ dev_err(dsi->dev, "Mismatch of format. panel = %d, DSI = %d\n",
+ device->format, dsi->format);
+ return -EINVAL;
+ }
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int xilinx_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct xilinx_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel_node = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops xilinx_dsi_ops = {
+ .attach = xilinx_dsi_host_attach,
+ .detach = xilinx_dsi_host_detach,
+};
+
+static int xilinx_dsi_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+ int ret;
+
+ dev_dbg(dsi->dev, "connector dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ drm_panel_unprepare(dsi->panel);
+ dev_err(dsi->dev, "DRM panel not enabled. power off DSI\n");
+ return ret;
+ }
+ break;
+ default:
+ drm_panel_disable(dsi->panel);
+ drm_panel_unprepare(dsi->panel);
+ break;
+ }
+
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xilinx_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel)
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ } else if (!dsi->panel_node) {
+ xilinx_dsi_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void xilinx_dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xilinx_dsi_connector_funcs = {
+ .dpms = xilinx_dsi_connector_dpms,
+ .detect = xilinx_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xilinx_dsi_connector_destroy,
+ .set_property = xilinx_dsi_connector_set_property,
+};
+
+static int xilinx_dsi_get_modes(struct drm_connector *connector)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel);
+
+ return 0;
+}
+
+static struct drm_encoder *
+xilinx_dsi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_dsi(connector)->encoder);
+}
+
+static struct drm_connector_helper_funcs xilinx_dsi_connector_helper_funcs = {
+ .get_modes = xilinx_dsi_get_modes,
+ .best_encoder = xilinx_dsi_best_encoder,
+};
+
+/**
+ * xilinx_drm_dsi_connector_create_property - create DSI connector properties
+ *
+ * @base_connector: pointer to Xilinx DSI connector
+ *
+ * This function takes the xilinx DSI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xilinx_drm_dsi_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+
+ dsi->eotp_prop = drm_property_create_bool(dev, 1, "eotp");
+ dsi->video_mode_prop = drm_property_create_range(dev, 0,
+ "video_mode", 0, 2);
+ dsi->bllp_mode_prop = drm_property_create_bool(dev, 0, "bllp_mode");
+ dsi->bllp_type_prop = drm_property_create_bool(dev, 0, "bllp_type");
+ dsi->bllp_burst_time_prop = drm_property_create_range(dev, 0,
+ "bllp_burst_time", 0, 0xFFFF);
+ dsi->cmd_queue_prop = drm_property_create_range(dev, 0,
+ "cmd_queue", 0, 0xFFFFFF);
+}
+
+/**
+ * xilinx_drm_dsi_connector_attach_property - attach DSI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx DSI connector
+ */
+static void
+xilinx_drm_dsi_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (dsi->eotp_prop)
+ drm_object_attach_property(obj, dsi->eotp_prop, 1);
+
+ if (dsi->video_mode_prop)
+ drm_object_attach_property(obj, dsi->video_mode_prop, 0);
+
+ if (dsi->bllp_burst_time_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_burst_time_prop, 0);
+
+ if (dsi->bllp_mode_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_mode_prop, 0);
+
+ if (dsi->bllp_type_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_type_prop, 0);
+
+ if (dsi->cmd_queue_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->cmd_queue_prop, 0);
+}
+
+static int xilinx_dsi_create_connector(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xilinx_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ dev_err(dsi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xilinx_dsi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xilinx_drm_dsi_connector_create_property(connector);
+ xilinx_drm_dsi_connector_attach_property(connector);
+
+ return 0;
+}
+
+static bool xilinx_dsi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/**
+ * xilinx_dsi_mode_set - derive the DSI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @mode: DRM kernel-internal display mode structure
+ * @adjusted_mode: DSI panel timing parameters
+ *
+ * This function derives the DSI IP timing parameters from the timing
+ * values given in the attached panel driver.
+ */
+static void xilinx_dsi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = adjusted_mode;
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
+ xilinx_dsi_set_display_mode(dsi);
+}
+
+static void xilinx_dsi_prepare(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "%s %d\n", __func__, __LINE__);
+ xilinx_dsi_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void xilinx_dsi_commit(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "config and enable the DSI: %s %d\n",
+ __func__, __LINE__);
+
+ xilinx_dsi_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs xilinx_dsi_encoder_helper_funcs = {
+ .dpms = xilinx_dsi_encoder_dpms,
+ .mode_fixup = xilinx_dsi_mode_fixup,
+ .mode_set = xilinx_dsi_mode_set,
+ .prepare = xilinx_dsi_prepare,
+ .commit = xilinx_dsi_commit,
+};
+
+static const struct drm_encoder_funcs xilinx_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xilinx_dsi_parse_dt(struct xilinx_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u32 datatype;
+
+ ret = of_property_read_u32(node, "xlnx,dsi-num-lanes",
+ &dsi->lanes);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-num-lanes property\n");
+ return ret;
+ }
+
+ if ((dsi->lanes > 4) || (dsi->lanes < 1)) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,dsi-data-type", &datatype);
+
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-data-type property\n");
+ return ret;
+ }
+
+ dsi->format = datatype;
+
+ if (datatype > MIPI_DSI_FMT_RGB565) {
+ dev_err(dsi->dev, "Invalid xlnx,dsi-data-type string\n");
+ return -EINVAL;
+ }
+
+ dsi->mul_factor = xdsi_mul_factor[datatype];
+
+ dev_dbg(dsi->dev, "DSI controller num lanes = %d", dsi->lanes);
+
+ dev_dbg(dsi->dev, "DSI controller datatype = %d\n", datatype);
+
+ return 0;
+}
+
+static int xilinx_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_dsi *dsi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * DSI tx drivers. DRM framework can support more than one CRTCs and
+ * DSI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xilinx_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+
+ drm_encoder_helper_add(encoder, &xilinx_dsi_encoder_helper_funcs);
+
+ ret = xilinx_dsi_create_connector(encoder);
+ if (ret) {
+ dev_err(dsi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ xilinx_dsi_connector_destroy(&dsi->connector);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void xilinx_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_dsi *dsi = dev_get_drvdata(dev);
+
+ xilinx_dsi_encoder_dpms(&dsi->encoder, DRM_MODE_DPMS_OFF);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+}
+
+static const struct component_ops xilinx_dsi_component_ops = {
+ .bind = xilinx_dsi_bind,
+ .unbind = xilinx_dsi_unbind,
+};
+
+static int xilinx_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dsi_host.ops = &xilinx_dsi_ops;
+ dsi->dsi_host.dev = dev;
+ dsi->dev = dev;
+
+ ret = xilinx_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->iomem = devm_ioremap_resource(dev, res);
+ dev_dbg(dsi->dev, "dsi virtual address = %p %s %d\n",
+ dsi->iomem, __func__, __LINE__);
+
+ if (IS_ERR(dsi->iomem)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(dsi->iomem);
+ }
+
+ platform_set_drvdata(pdev, dsi);
+
+ return component_add(dev, &xilinx_dsi_component_ops);
+}
+
+static int xilinx_dsi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &xilinx_dsi_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dsi_of_match[] = {
+ { .compatible = "xlnx,mipi-dsi-tx-subsystem"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = xilinx_dsi_probe,
+ .remove = xilinx_dsi_remove,
+ .driver = {
+ .name = "xilinx-mipi-dsi",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Siva Rajesh <sivaraj@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA MIPI DSI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c
new file mode 100644
index 000000000000..ca3f9f112162
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c
@@ -0,0 +1,240 @@
+/*
+ * Xilinx DRM encoder driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_encoder.h"
+
+struct xilinx_drm_encoder {
+ struct drm_encoder_slave slave;
+ struct device *dev;
+ int dpms;
+};
+
+#define to_xilinx_encoder(x) \
+ container_of(x, struct xilinx_drm_encoder, slave)
+
+/* set encoder dpms */
+static void xilinx_drm_encoder_dpms(struct drm_encoder *base_encoder, int dpms)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ encoder = to_xilinx_encoder(encoder_slave);
+
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", encoder->dpms, dpms);
+
+ if (encoder->dpms == dpms)
+ return;
+
+ encoder->dpms = dpms;
+ if (encoder_sfuncs->dpms)
+ encoder_sfuncs->dpms(base_encoder, dpms);
+}
+
+/* adjust a mode if needed */
+static bool
+xilinx_drm_encoder_mode_fixup(struct drm_encoder *base_encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs = NULL;
+ bool ret = true;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ if (encoder_sfuncs->mode_fixup)
+ ret = encoder_sfuncs->mode_fixup(base_encoder, mode,
+ adjusted_mode);
+
+ return ret;
+}
+
+/* set mode to xilinx encoder */
+static void xilinx_drm_encoder_mode_set(struct drm_encoder *base_encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs;
+
+ DRM_DEBUG_KMS("h: %d, v: %d\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+ DRM_DEBUG_KMS("refresh: %d, pclock: %d khz\n",
+ adjusted_mode->vrefresh, adjusted_mode->clock);
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ if (encoder_sfuncs->mode_set)
+ encoder_sfuncs->mode_set(base_encoder, mode, adjusted_mode);
+}
+
+/* apply mode to encoder pipe */
+static void xilinx_drm_encoder_commit(struct drm_encoder *base_encoder)
+{
+ /* start encoder with new mode */
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_ON);
+}
+
+/* prepare encoder */
+static void xilinx_drm_encoder_prepare(struct drm_encoder *base_encoder)
+{
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_OFF);
+}
+
+/* get crtc */
+static struct drm_crtc *
+xilinx_drm_encoder_get_crtc(struct drm_encoder *base_encoder)
+{
+ return base_encoder->crtc;
+}
+
+static const struct drm_encoder_helper_funcs xilinx_drm_encoder_helper_funcs = {
+ .dpms = xilinx_drm_encoder_dpms,
+ .mode_fixup = xilinx_drm_encoder_mode_fixup,
+ .mode_set = xilinx_drm_encoder_mode_set,
+ .prepare = xilinx_drm_encoder_prepare,
+ .commit = xilinx_drm_encoder_commit,
+ .get_crtc = xilinx_drm_encoder_get_crtc,
+};
+
+/* destroy encoder */
+void xilinx_drm_encoder_destroy(struct drm_encoder *base_encoder)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct drm_encoder_slave *encoder_slave;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder = to_xilinx_encoder(encoder_slave);
+
+ /* make sure encoder is off */
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_OFF);
+
+ drm_encoder_cleanup(base_encoder);
+ put_device(encoder->dev);
+}
+
+static const struct drm_encoder_funcs xilinx_drm_encoder_funcs = {
+ .destroy = xilinx_drm_encoder_destroy,
+};
+
+/* create encoder */
+struct drm_encoder *xilinx_drm_encoder_create(struct drm_device *drm,
+ struct device_node *node)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct i2c_client *i2c_slv;
+ struct i2c_driver *i2c_driver;
+ struct drm_i2c_encoder_driver *drm_i2c_driver;
+ struct device_driver *device_driver;
+ struct platform_device *platform_slv;
+ struct platform_driver *platform_driver;
+ struct drm_platform_encoder_driver *drm_platform_driver;
+ int ret = 0;
+
+ encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return ERR_PTR(-ENOMEM);
+
+ encoder->dpms = DRM_MODE_DPMS_OFF;
+
+ /* FIXME: Use DT to figure out crtcs / clones */
+ encoder->slave.base.possible_crtcs = 1;
+ encoder->slave.base.possible_clones = ~0;
+ ret = drm_encoder_init(drm, &encoder->slave.base,
+ &xilinx_drm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm encoder\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_encoder_helper_add(&encoder->slave.base,
+ &xilinx_drm_encoder_helper_funcs);
+
+ /* initialize slave encoder */
+ i2c_slv = of_find_i2c_device_by_node(node);
+ if (i2c_slv && i2c_slv->dev.driver) {
+ i2c_driver = to_i2c_driver(i2c_slv->dev.driver);
+ drm_i2c_driver = to_drm_i2c_encoder_driver(i2c_driver);
+ if (!drm_i2c_driver || !drm_i2c_driver->encoder_init) {
+ DRM_DEBUG_KMS("failed to initialize i2c slave\n");
+ ret = -EPROBE_DEFER;
+ goto err_out;
+ }
+
+ encoder->dev = &i2c_slv->dev;
+ ret = drm_i2c_driver->encoder_init(i2c_slv, drm,
+ &encoder->slave);
+ } else {
+ platform_slv = of_find_device_by_node(node);
+ if (!platform_slv) {
+ DRM_DEBUG_KMS("failed to get an encoder slv\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ device_driver = platform_slv->dev.driver;
+ if (!device_driver) {
+ DRM_DEBUG_KMS("failed to get device driver\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ platform_driver = to_platform_driver(device_driver);
+ drm_platform_driver =
+ to_drm_platform_encoder_driver(platform_driver);
+ if (!drm_platform_driver ||
+ !drm_platform_driver->encoder_init) {
+ DRM_DEBUG_KMS("failed to initialize platform slave\n");
+ ret = -EPROBE_DEFER;
+ goto err_out;
+ }
+
+ encoder->dev = &platform_slv->dev;
+ ret = drm_platform_driver->encoder_init(platform_slv, drm,
+ &encoder->slave);
+ }
+
+ if (ret) {
+ DRM_ERROR("failed to initialize encoder slave\n");
+ goto err_out;
+ }
+
+ if (!encoder->slave.slave_funcs) {
+ DRM_ERROR("there's no encoder slave function\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ return &encoder->slave.base;
+
+err_out:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h
new file mode 100644
index 000000000000..7707f14db499
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h
@@ -0,0 +1,28 @@
+/*
+ * Xilinx DRM encoder header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_ENCODER_H_
+#define _XILINX_DRM_ENCODER_H_
+
+struct drm_device;
+struct drm_encoder;
+
+struct drm_encoder *xilinx_drm_encoder_create(struct drm_device *drm,
+ struct device_node *node);
+void xilinx_drm_encoder_destroy(struct drm_encoder *base_encoder);
+
+#endif /* _XILINX_DRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_fb.c b/drivers/gpu/drm/xilinx/xilinx_drm_fb.c
new file mode 100644
index 000000000000..5830d7ff6e0f
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_fb.c
@@ -0,0 +1,511 @@
+/*
+ * Xilinx DRM KMS Framebuffer helper
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * Based on drm_fb_cma_helper.c
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_fb.h"
+
+struct xilinx_drm_fb {
+ struct drm_framebuffer base;
+ struct drm_gem_cma_object *obj[4];
+};
+
+struct xilinx_drm_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct xilinx_drm_fb *fb;
+ unsigned int align;
+ unsigned int vres_mult;
+};
+
+static inline struct xilinx_drm_fbdev *to_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct xilinx_drm_fbdev, fb_helper);
+}
+
+static inline struct xilinx_drm_fb *to_fb(struct drm_framebuffer *base_fb)
+{
+ return container_of(base_fb, struct xilinx_drm_fb, base);
+}
+
+static void xilinx_drm_fb_destroy(struct drm_framebuffer *base_fb)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+ int i;
+
+ for (i = 0; i < 4; i++)
+ if (fb->obj[i])
+ drm_gem_object_put_unlocked(&fb->obj[i]->base);
+
+ drm_framebuffer_cleanup(base_fb);
+ kfree(fb);
+}
+
+static int xilinx_drm_fb_create_handle(struct drm_framebuffer *base_fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+
+ return drm_gem_handle_create(file_priv, &fb->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs xilinx_drm_fb_funcs = {
+ .destroy = xilinx_drm_fb_destroy,
+ .create_handle = xilinx_drm_fb_create_handle,
+};
+
+/**
+ * xilinx_drm_fb_alloc - Allocate a xilinx_drm_fb
+ * @drm: DRM object
+ * @mode_cmd: drm_mode_fb_cmd2 struct
+ * @obj: pointers for returned drm_gem_cma_objects
+ * @num_planes: number of planes to be allocated
+ *
+ * This function is based on drm_fb_cma_alloc().
+ *
+ * Return: a xilinx_drm_fb object, or ERR_PTR.
+ */
+static struct xilinx_drm_fb *
+xilinx_drm_fb_alloc(struct drm_device *drm,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_cma_object **obj, unsigned int num_planes)
+{
+ struct xilinx_drm_fb *fb;
+ int ret;
+ int i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(drm, &fb->base, &xilinx_drm_fb_funcs);
+ if (ret) {
+ DRM_ERROR("Failed to initialize framebuffer: %d\n", ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+/**
+ * xilinx_drm_fb_get_gem_obj - Get CMA GEM object for framebuffer
+ * @base_fb: the framebuffer
+ * @plane: which plane
+ *
+ * This function is based on drm_fb_cma_get_gem_obj().
+ *
+ * Return: a CMA GEM object for given framebuffer, or NULL if not available.
+ */
+struct drm_gem_cma_object *
+xilinx_drm_fb_get_gem_obj(struct drm_framebuffer *base_fb, unsigned int plane)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+
+ if (plane >= 4)
+ return NULL;
+
+ return fb->obj[plane];
+}
+
+static int xilinx_drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_set *modeset;
+ int ret = 0;
+
+ if (oops_in_progress)
+ return -EBUSY;
+
+ mutex_lock(&fb_helper->client.modeset_mutex);
+ drm_modeset_lock_all(dev);
+ drm_client_for_each_modeset(modeset, &fb_helper->client) {
+ modeset->x = var->xoffset;
+ modeset->y = var->yoffset;
+
+ if (modeset->num_connectors) {
+ ret = drm_mode_set_config_internal(modeset);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ }
+ }
+ }
+ drm_modeset_unlock_all(dev);
+ mutex_unlock(&fb_helper->client.modeset_mutex);
+ return ret;
+}
+
+static int
+xilinx_drm_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_mode_set *mode_set;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ mutex_lock(&fb_helper->client.modeset_mutex);
+ drm_client_for_each_modeset(mode_set, &fb_helper->client) {
+ crtc = mode_set->crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ mutex_lock(&fb_helper->client.modeset_mutex);
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops xilinx_drm_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = xilinx_drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_ioctl = xilinx_drm_fb_ioctl,
+};
+
+/**
+ * xilinx_drm_fbdev_create - Create the fbdev with a framebuffer
+ * @fb_helper: fb helper structure
+ * @sizes: framebuffer size info
+ *
+ * This function is based on drm_fbdev_cma_create().
+ *
+ * Return: 0 if successful, or the error code.
+ */
+static int xilinx_drm_fbdev_create(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct xilinx_drm_fbdev *fbdev = to_fbdev(fb_helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *drm = fb_helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *base_fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel,
+ fbdev->align);
+ mode_cmd.pixel_format = xilinx_drm_get_format(drm);
+
+ mode_cmd.height *= fbdev->vres_mult;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = drm_gem_cma_create(drm, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ fbi = framebuffer_alloc(0, drm->dev);
+ if (!fbi) {
+ DRM_ERROR("Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ fbdev->fb = xilinx_drm_fb_alloc(drm, &mode_cmd, &obj, 1);
+ if (IS_ERR(fbdev->fb)) {
+ DRM_ERROR("Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev->fb);
+ goto err_framebuffer_release;
+ }
+
+ base_fb = &fbdev->fb->base;
+ fb_helper->fb = base_fb;
+ fb_helper->fbdev = fbi;
+
+ fbi->par = fb_helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &xilinx_drm_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("Failed to allocate color map.\n");
+ goto err_xilinx_drm_fb_destroy;
+ }
+
+ drm_fb_helper_fill_info(fbi, fb_helper, sizes);
+ fbi->var.yres = base_fb->height / fbdev->vres_mult;
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * base_fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = (char __iomem *)(obj->vaddr + offset);
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+
+err_xilinx_drm_fb_destroy:
+ drm_framebuffer_unregister_private(base_fb);
+ xilinx_drm_fb_destroy(base_fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs xilinx_drm_fb_helper_funcs = {
+ .fb_probe = xilinx_drm_fbdev_create,
+};
+
+/**
+ * xilinx_drm_fb_init - Allocate and initializes the Xilinx framebuffer
+ * @drm: DRM device
+ * @preferred_bpp: preferred bits per pixel for the device
+ * @num_crtc: number of CRTCs
+ * @max_conn_count: maximum number of connectors
+ * @align: alignment value for pitch
+ * @vres_mult: multiplier for virtual resolution
+ *
+ * This function is based on drm_fbdev_cma_init().
+ *
+ * Return: a newly allocated drm_fb_helper struct or a ERR_PTR.
+ */
+struct drm_fb_helper *
+xilinx_drm_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult)
+{
+ struct xilinx_drm_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ DRM_ERROR("Failed to allocate drm fbdev.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fbdev->vres_mult = vres_mult;
+
+ fbdev->align = align;
+ fb_helper = &fbdev->fb_helper;
+ drm_fb_helper_prepare(drm, fb_helper, &xilinx_drm_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(drm, fb_helper, max_conn_count);
+ if (ret < 0) {
+ DRM_ERROR("Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_ERROR("Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ drm_helper_disable_unused_functions(drm);
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ DRM_ERROR("Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fb_helper;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * xilinx_drm_fbdev_fini - Free the Xilinx framebuffer
+ * @fb_helper: drm_fb_helper struct
+ *
+ * This function is based on drm_fbdev_cma_fini().
+ */
+void xilinx_drm_fb_fini(struct drm_fb_helper *fb_helper)
+{
+ struct xilinx_drm_fbdev *fbdev;
+
+ if (!fb_helper)
+ return;
+
+ fbdev = to_fbdev(fb_helper);
+ if (fbdev->fb_helper.fbdev) {
+ struct fb_info *info;
+
+ info = fbdev->fb_helper.fbdev;
+ unregister_framebuffer(info);
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(&fbdev->fb->base);
+ xilinx_drm_fb_destroy(&fbdev->fb->base);
+ }
+
+ drm_fb_helper_fini(&fbdev->fb_helper);
+ kfree(fbdev);
+}
+
+/**
+ * xilinx_drm_fb_restore_mode - Restores initial framebuffer mode
+ * @fb_helper: drm_fb_helper struct, may be NULL
+ *
+ * This function is based on drm_fbdev_cma_restore_mode() and usually called
+ * from the Xilinx DRM drivers lastclose callback.
+ */
+void xilinx_drm_fb_restore_mode(struct drm_fb_helper *fb_helper)
+{
+ if (!fb_helper)
+ return;
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+}
+
+/**
+ * xilinx_drm_fb_create - (struct drm_mode_config_funcs *)->fb_create callback
+ * @drm: DRM device
+ * @file_priv: drm file private data
+ * @mode_cmd: mode command for fb creation
+ *
+ * This functions creates a drm_framebuffer for given mode @mode_cmd. This
+ * functions is intended to be used for the fb_create callback function of
+ * drm_mode_config_funcs.
+ *
+ * Return: a drm_framebuffer object if successful, or ERR_PTR.
+ */
+struct drm_framebuffer *
+xilinx_drm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct xilinx_drm_fb *fb;
+ struct drm_gem_cma_object *objs[4];
+ struct drm_gem_object *obj;
+ const struct drm_format_info *info;
+ struct drm_format_name_buf format_name;
+ unsigned int hsub;
+ unsigned int vsub;
+ int ret;
+ int i;
+
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info) {
+ DRM_ERROR("Unsupported framebuffer format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name));
+ return ERR_PTR(-EINVAL);
+ }
+
+ hsub = info->hsub;
+ vsub = info->vsub;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ DRM_ERROR("Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i] + width *
+ info->cpp[i] + mode_cmd->offsets[i];
+
+ if (obj->size < min_size) {
+ drm_gem_object_put_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = to_drm_gem_cma_obj(obj);
+ }
+
+ fb = xilinx_drm_fb_alloc(drm, mode_cmd, objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_gem_object_unreference;
+ }
+
+ fb->base.format = info;
+
+ return &fb->base;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_put_unlocked(&objs[i]->base);
+ return ERR_PTR(ret);
+}
+
+/**
+ * xilinx_drm_fb_hotplug_event - Poll for hotpulug events
+ * @fb_helper: drm_fb_helper struct, may be NULL
+ *
+ * This function is based on drm_fbdev_cma_hotplug_event() and usually called
+ * from the Xilinx DRM drivers output_poll_changed callback.
+ */
+void xilinx_drm_fb_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ if (!fb_helper)
+ return;
+
+ drm_fb_helper_hotplug_event(fb_helper);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_fb.h b/drivers/gpu/drm/xilinx/xilinx_drm_fb.h
new file mode 100644
index 000000000000..c8b436edd08d
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_fb.h
@@ -0,0 +1,38 @@
+/*
+ * Xilinx DRM KMS Framebuffer helper header
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_FB_H_
+#define _XILINX_DRM_FB_H_
+
+struct drm_fb_helper;
+
+struct drm_gem_cma_object *
+xilinx_drm_fb_get_gem_obj(struct drm_framebuffer *base_fb, unsigned int plane);
+
+struct drm_fb_helper *
+xilinx_drm_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult);
+void xilinx_drm_fb_fini(struct drm_fb_helper *fb_helper);
+
+void xilinx_drm_fb_restore_mode(struct drm_fb_helper *fb_helper);
+struct drm_framebuffer *
+xilinx_drm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+void xilinx_drm_fb_hotplug_event(struct drm_fb_helper *fb_helper);
+
+#endif /* _XILINX_DRM_FB_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_gem.c b/drivers/gpu/drm/xilinx/xilinx_drm_gem.c
new file mode 100644
index 000000000000..b554c200ca09
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_gem.c
@@ -0,0 +1,45 @@
+/*
+ * Xilinx DRM KMS GEM helper
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_gem.h"
+
+/*
+ * xilinx_drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * @file_priv: drm_file object
+ * @drm: DRM object
+ * @args: info for dumb scanout buffer creation
+ *
+ * This function is for dumb_create callback of drm_driver struct. Simply
+ * it wraps around drm_gem_cma_dumb_create() and sets the pitch value
+ * by retrieving the value from the device.
+ *
+ * Return: The return value from drm_gem_cma_dumb_create()
+ */
+int xilinx_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ args->pitch = ALIGN(pitch, xilinx_drm_get_align(drm));
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_gem.h b/drivers/gpu/drm/xilinx/xilinx_drm_gem.h
new file mode 100644
index 000000000000..9e05e78cb766
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_gem.h
@@ -0,0 +1,25 @@
+/*
+ * Xilinx DRM KMS GEM helper header
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_GEM_H_
+#define _XILINX_DRM_GEM_H_
+
+int xilinx_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _XILINX_DRM_GEM_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_plane.c b/drivers/gpu/drm/xilinx/xilinx_drm_plane.c
new file mode 100644
index 000000000000..8467f22f86af
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_plane.c
@@ -0,0 +1,1098 @@
+/*
+ * Xilinx DRM plane driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_fb.h"
+#include "xilinx_drm_plane.h"
+
+#include "xilinx_cresample.h"
+#include "xilinx_osd.h"
+#include "xilinx_rgb2yuv.h"
+
+#define MAX_NUM_SUB_PLANES 4
+
+/**
+ * struct xilinx_drm_plane_dma - Xilinx drm plane VDMA object
+ *
+ * @chan: dma channel
+ * @xt: dma interleaved configuration template
+ * @sgl: data chunk for dma_interleaved_template
+ * @is_active: flag if the DMA is active
+ */
+struct xilinx_drm_plane_dma {
+ struct dma_chan *chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+ bool is_active;
+};
+
+/**
+ * struct xilinx_drm_plane - Xilinx drm plane object
+ *
+ * @base: base drm plane object
+ * @id: plane id
+ * @dpms: current dpms level
+ * @zpos: user requested z-position value
+ * @prio: actual layer priority
+ * @alpha: alpha value
+ * @alpha_enable: alpha enable value
+ * @primary: flag for primary plane
+ * @format: pixel format
+ * @dma: dma object
+ * @rgb2yuv: rgb2yuv instance
+ * @cresample: cresample instance
+ * @osd_layer: osd layer
+ * @dp_layer: DisplayPort subsystem layer
+ * @manager: plane manager
+ */
+struct xilinx_drm_plane {
+ struct drm_plane base;
+ int id;
+ int dpms;
+ unsigned int zpos;
+ unsigned int prio;
+ unsigned int alpha;
+ unsigned int alpha_enable;
+ bool primary;
+ u32 format;
+ struct xilinx_drm_plane_dma dma[MAX_NUM_SUB_PLANES];
+ struct xilinx_rgb2yuv *rgb2yuv;
+ struct xilinx_cresample *cresample;
+ struct xilinx_osd_layer *osd_layer;
+ struct xilinx_drm_dp_sub_layer *dp_layer;
+ struct xilinx_drm_plane_manager *manager;
+};
+
+#define MAX_PLANES 8
+
+/**
+ * struct xilinx_drm_plane_manager - Xilinx drm plane manager object
+ *
+ * @drm: drm device
+ * @node: plane device node
+ * @osd: osd instance
+ * @dp_sub: DisplayPort subsystem instance
+ * @num_planes: number of available planes
+ * @format: video format
+ * @max_width: maximum width
+ * @zpos_prop: z-position(priority) property
+ * @alpha_prop: alpha value property
+ * @alpha_enable_prop: alpha enable property
+ * @default_alpha: default alpha value
+ * @planes: xilinx drm planes
+ */
+struct xilinx_drm_plane_manager {
+ struct drm_device *drm;
+ struct device_node *node;
+ struct xilinx_osd *osd;
+ struct xilinx_drm_dp_sub *dp_sub;
+ int num_planes;
+ u32 format;
+ int max_width;
+ struct drm_property *zpos_prop;
+ struct drm_property *alpha_prop;
+ struct drm_property *alpha_enable_prop;
+ unsigned int default_alpha;
+ struct xilinx_drm_plane *planes[MAX_PLANES];
+};
+
+#define to_xilinx_plane(x) container_of(x, struct xilinx_drm_plane, base)
+
+void xilinx_drm_plane_commit(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ /* for xilinx video framebuffer dma, if used */
+ xilinx_xdma_drm_config(plane->dma[0].chan, plane->format);
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++) {
+ struct xilinx_drm_plane_dma *dma = &plane->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt,
+ flags);
+ if (!desc) {
+ DRM_ERROR("failed to prepare DMA descriptor\n");
+ return;
+ }
+
+ dmaengine_submit(desc);
+
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+}
+
+/* set plane dpms */
+void xilinx_drm_plane_dpms(struct drm_plane *base_plane, int dpms)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+ unsigned int i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", plane->dpms, dpms);
+
+ if (plane->dpms == dpms)
+ return;
+
+ plane->dpms = dpms;
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (manager->dp_sub) {
+ if (plane->primary) {
+ xilinx_drm_dp_sub_enable_alpha(manager->dp_sub,
+ plane->alpha_enable);
+ xilinx_drm_dp_sub_set_alpha(manager->dp_sub,
+ plane->alpha);
+ }
+ xilinx_drm_dp_sub_layer_enable(manager->dp_sub,
+ plane->dp_layer);
+ }
+
+ if (plane->rgb2yuv)
+ xilinx_rgb2yuv_enable(plane->rgb2yuv);
+
+ if (plane->cresample)
+ xilinx_cresample_enable(plane->cresample);
+
+ /* enable osd */
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+
+ xilinx_osd_layer_set_priority(plane->osd_layer,
+ plane->prio);
+ xilinx_osd_layer_enable_alpha(plane->osd_layer,
+ plane->alpha_enable);
+ xilinx_osd_layer_set_alpha(plane->osd_layer,
+ plane->alpha);
+ xilinx_osd_layer_enable(plane->osd_layer);
+
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ xilinx_drm_plane_commit(base_plane);
+ break;
+ default:
+ /* disable/reset osd */
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+
+ xilinx_osd_layer_set_dimension(plane->osd_layer,
+ 0, 0, 0, 0);
+ xilinx_osd_layer_disable(plane->osd_layer);
+
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ if (plane->cresample) {
+ xilinx_cresample_disable(plane->cresample);
+ xilinx_cresample_reset(plane->cresample);
+ }
+
+ if (plane->rgb2yuv) {
+ xilinx_rgb2yuv_disable(plane->rgb2yuv);
+ xilinx_rgb2yuv_reset(plane->rgb2yuv);
+ }
+
+ /* stop dma engine and release descriptors */
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++) {
+ if (plane->dma[i].chan && plane->dma[i].is_active)
+ dmaengine_terminate_all(plane->dma[i].chan);
+ }
+
+ if (manager->dp_sub)
+ xilinx_drm_dp_sub_layer_disable(manager->dp_sub,
+ plane->dp_layer);
+
+ break;
+ }
+}
+
+/* mode set a plane */
+int xilinx_drm_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct drm_gem_cma_object *obj;
+ const struct drm_format_info *info;
+ struct drm_format_name_buf format_name;
+ size_t offset;
+ unsigned int hsub, vsub, i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ /* configure cresample */
+ if (plane->cresample)
+ xilinx_cresample_configure(plane->cresample, crtc_w, crtc_h);
+
+ /* configure rgb2yuv */
+ if (plane->rgb2yuv)
+ xilinx_rgb2yuv_configure(plane->rgb2yuv, crtc_w, crtc_h);
+
+ DRM_DEBUG_KMS("h: %d(%d), v: %d(%d)\n",
+ src_w, crtc_x, src_h, crtc_y);
+ DRM_DEBUG_KMS("bpp: %d\n", fb->format->cpp[0] * 8);
+
+ info = fb->format;
+ if (!info) {
+ DRM_ERROR("Unsupported framebuffer format %s\n",
+ drm_get_format_name(info->format, &format_name));
+ return -EINVAL;
+ }
+
+ hsub = info->hsub;
+ vsub = info->vsub;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? hsub : 1);
+ unsigned int height = src_h / (i ? vsub : 1);
+ unsigned int cpp = info->cpp[i];
+
+ if (!cpp)
+ cpp = xilinx_drm_format_bpp(fb->format->format) >> 3;
+
+ obj = xilinx_drm_fb_get_gem_obj(fb, i);
+ if (!obj) {
+ DRM_ERROR("failed to get a gem obj for fb\n");
+ return -EINVAL;
+ }
+
+ plane->dma[i].xt.numf = height;
+ plane->dma[i].sgl[0].size = drm_format_plane_width_bytes(info,
+ i,
+ width);
+ plane->dma[i].sgl[0].icg = fb->pitches[i] -
+ plane->dma[i].sgl[0].size;
+ offset = drm_format_plane_width_bytes(info, i, src_x);
+ offset += src_y * fb->pitches[i];
+ offset += fb->offsets[i];
+ plane->dma[i].xt.src_start = obj->paddr + offset;
+ plane->dma[i].xt.frame_size = 1;
+ plane->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ plane->dma[i].xt.src_sgl = true;
+ plane->dma[i].xt.dst_sgl = false;
+ plane->dma[i].is_active = true;
+ }
+
+ for (; i < MAX_NUM_SUB_PLANES; i++)
+ plane->dma[i].is_active = false;
+
+ /* set OSD dimensions */
+ if (plane->manager->osd) {
+ xilinx_osd_disable_rue(plane->manager->osd);
+
+ xilinx_osd_layer_set_dimension(plane->osd_layer, crtc_x, crtc_y,
+ src_w, src_h);
+
+ xilinx_osd_enable_rue(plane->manager->osd);
+ }
+
+ if (plane->manager->dp_sub) {
+ int ret;
+
+ ret = xilinx_drm_dp_sub_layer_check_size(plane->manager->dp_sub,
+ plane->dp_layer,
+ src_w, src_h);
+ if (ret)
+ return ret;
+
+ ret = xilinx_drm_dp_sub_layer_set_fmt(plane->manager->dp_sub,
+ plane->dp_layer,
+ fb->format->format);
+ if (ret) {
+ DRM_ERROR("failed to set dp_sub layer fmt\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* update a plane. just call mode_set() with bit-shifted values */
+static int xilinx_drm_plane_update(struct drm_plane *base_plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ int ret;
+
+ ret = xilinx_drm_plane_mode_set(base_plane, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
+ if (ret) {
+ DRM_ERROR("failed to mode-set a plane\n");
+ return ret;
+ }
+
+ /* make sure a plane is on */
+ if (plane->dpms != DRM_MODE_DPMS_ON)
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_ON);
+ else
+ xilinx_drm_plane_commit(base_plane);
+
+ return 0;
+}
+
+/* disable a plane */
+static int xilinx_drm_plane_disable(struct drm_plane *base_plane,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_OFF);
+
+ return 0;
+}
+
+/* destroy a plane */
+static void xilinx_drm_plane_destroy(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ unsigned int i;
+
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_OFF);
+
+ plane->manager->planes[plane->id] = NULL;
+
+ drm_plane_cleanup(base_plane);
+
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+
+ if (plane->manager->osd) {
+ xilinx_osd_layer_disable(plane->osd_layer);
+ xilinx_osd_layer_put(plane->osd_layer);
+ }
+
+ if (plane->manager->dp_sub) {
+ xilinx_drm_dp_sub_layer_disable(plane->manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_put(plane->manager->dp_sub,
+ plane->dp_layer);
+ }
+}
+
+/**
+ * xilinx_drm_plane_update_prio - Configure plane priorities based on zpos
+ * @manager: the plane manager
+ *
+ * Z-position values are user requested position of planes. The priority is
+ * the actual position of planes in hardware. Some hardware doesn't allow
+ * any duplicate priority, so this function needs to be called when a duplicate
+ * priority is found. Then planes are sorted by zpos value, and the priorities
+ * are reconfigured. A plane with lower plane ID gets assigned to the lower
+ * priority when planes have the same zpos value.
+ */
+static void
+xilinx_drm_plane_update_prio(struct xilinx_drm_plane_manager *manager)
+{
+ struct xilinx_drm_plane *planes[MAX_PLANES];
+ struct xilinx_drm_plane *plane;
+ unsigned int i, j;
+
+ /* sort planes by zpos */
+ for (i = 0; i < manager->num_planes; i++) {
+ plane = manager->planes[i];
+
+ for (j = i; j > 0; --j) {
+ if (planes[j - 1]->zpos <= plane->zpos)
+ break;
+ planes[j] = planes[j - 1];
+ }
+
+ planes[j] = plane;
+ }
+
+ xilinx_osd_disable_rue(manager->osd);
+
+ /* remove duplicates by reassigning priority */
+ for (i = 0; i < manager->num_planes; i++) {
+ planes[i]->prio = i;
+ xilinx_osd_layer_set_priority(planes[i]->osd_layer,
+ planes[i]->prio);
+ }
+
+ xilinx_osd_enable_rue(manager->osd);
+}
+
+static void xilinx_drm_plane_set_zpos(struct drm_plane *base_plane,
+ unsigned int zpos)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+ bool update = false;
+ int i;
+
+ for (i = 0; i < manager->num_planes; i++) {
+ if (manager->planes[i] != plane &&
+ manager->planes[i]->prio == zpos) {
+ update = true;
+ break;
+ }
+ }
+
+ plane->zpos = zpos;
+
+ if (update) {
+ xilinx_drm_plane_update_prio(manager);
+ } else {
+ plane->prio = zpos;
+ xilinx_osd_layer_set_priority(plane->osd_layer, plane->prio);
+ }
+}
+
+static void xilinx_drm_plane_set_alpha(struct drm_plane *base_plane,
+ unsigned int alpha)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ plane->alpha = alpha;
+
+ if (plane->osd_layer)
+ xilinx_osd_layer_set_alpha(plane->osd_layer, plane->alpha);
+ else if (manager->dp_sub)
+ xilinx_drm_dp_sub_set_alpha(manager->dp_sub, plane->alpha);
+}
+
+static void xilinx_drm_plane_enable_alpha(struct drm_plane *base_plane,
+ bool enable)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ plane->alpha_enable = enable;
+
+ if (plane->osd_layer)
+ xilinx_osd_layer_enable_alpha(plane->osd_layer, enable);
+ else if (manager->dp_sub)
+ xilinx_drm_dp_sub_enable_alpha(manager->dp_sub, enable);
+}
+
+/* set property of a plane */
+static int xilinx_drm_plane_set_property(struct drm_plane *base_plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ if (property == manager->zpos_prop)
+ xilinx_drm_plane_set_zpos(base_plane, val);
+ else if (property == manager->alpha_prop)
+ xilinx_drm_plane_set_alpha(base_plane, val);
+ else if (property == manager->alpha_enable_prop)
+ xilinx_drm_plane_enable_alpha(base_plane, val);
+ else
+ return -EINVAL;
+
+ drm_object_property_set_value(&base_plane->base, property, val);
+
+ return 0;
+}
+
+static struct drm_plane_funcs xilinx_drm_plane_funcs = {
+ .update_plane = xilinx_drm_plane_update,
+ .disable_plane = xilinx_drm_plane_disable,
+ .destroy = xilinx_drm_plane_destroy,
+ .set_property = xilinx_drm_plane_set_property,
+};
+
+/* get a plane max width */
+int xilinx_drm_plane_get_max_width(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return plane->manager->max_width;
+}
+
+/* check if format is supported */
+bool xilinx_drm_plane_check_format(struct xilinx_drm_plane_manager *manager,
+ u32 format)
+{
+ int i;
+
+ for (i = 0; i < MAX_PLANES; i++)
+ if (manager->planes[i] &&
+ (manager->planes[i]->format == format))
+ return true;
+
+ return false;
+}
+
+/* get the number of planes */
+int xilinx_drm_plane_get_num_planes(struct xilinx_drm_plane_manager *manager)
+{
+ return manager->num_planes;
+}
+
+/**
+ * xilinx_drm_plane_restore - Restore the plane states
+ * @manager: the plane manager
+ *
+ * Restore the plane states to the default ones. Any state that needs to be
+ * restored should be here. This improves consistency as applications see
+ * the same default values, and removes mismatch between software and hardware
+ * values as software values are updated as hardware values are reset.
+ */
+void xilinx_drm_plane_restore(struct xilinx_drm_plane_manager *manager)
+{
+ struct xilinx_drm_plane *plane;
+ unsigned int i;
+
+ /*
+ * Reinitialize property default values as they get reset by DPMS OFF
+ * operation. User will read the correct default values later, and
+ * planes will be initialized with default values.
+ */
+ for (i = 0; i < manager->num_planes; i++) {
+ plane = manager->planes[i];
+
+ plane->prio = plane->id;
+ plane->zpos = plane->id;
+ if (manager->zpos_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->zpos_prop,
+ plane->prio);
+
+ plane->alpha = manager->default_alpha;
+ if (manager->alpha_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->alpha_prop,
+ plane->alpha);
+
+ plane->alpha_enable = true;
+ if (manager->alpha_enable_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->alpha_enable_prop, true);
+ }
+}
+
+/* get the plane format */
+u32 xilinx_drm_plane_get_format(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return plane->format;
+}
+
+/**
+ * xilinx_drm_plane_get_align - Get the alignment value for pitch
+ * @base_plane: Base drm plane object
+ *
+ * Get the alignment value for pitch from the dma device
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_plane_get_align(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return 1 << plane->dma[0].chan->device->copy_align;
+}
+
+/* create plane properties */
+static void
+xilinx_drm_plane_create_property(struct xilinx_drm_plane_manager *manager)
+{
+ if (manager->osd)
+ manager->zpos_prop = drm_property_create_range(manager->drm, 0,
+ "zpos", 0, manager->num_planes - 1);
+
+ if (manager->osd || manager->dp_sub) {
+ manager->alpha_prop = drm_property_create_range(manager->drm, 0,
+ "alpha", 0, manager->default_alpha);
+ manager->alpha_enable_prop =
+ drm_property_create_bool(manager->drm, 0,
+ "global alpha enable");
+ }
+}
+
+/* attach plane properties */
+static void xilinx_drm_plane_attach_property(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ if (manager->zpos_prop)
+ drm_object_attach_property(&base_plane->base,
+ manager->zpos_prop,
+ plane->id);
+
+ if (manager->alpha_prop) {
+ if (manager->dp_sub && !plane->primary)
+ return;
+
+ drm_object_attach_property(&base_plane->base,
+ manager->alpha_prop,
+ manager->default_alpha);
+ drm_object_attach_property(&base_plane->base,
+ manager->alpha_enable_prop, false);
+ }
+
+ plane->alpha_enable = true;
+}
+
+/**
+ * xilinx_drm_plane_manager_dpms - Set DPMS for the Xilinx plane manager
+ * @manager: Xilinx plane manager object
+ * @dpms: requested DPMS
+ *
+ * Set the Xilinx plane manager to the given DPMS state. This function is
+ * usually called from the CRTC driver with calling xilinx_drm_plane_dpms().
+ */
+void xilinx_drm_plane_manager_dpms(struct xilinx_drm_plane_manager *manager,
+ int dpms)
+{
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (manager->dp_sub) {
+ xilinx_drm_dp_sub_set_bg_color(manager->dp_sub,
+ 0, 0, 0);
+ xilinx_drm_dp_sub_enable(manager->dp_sub);
+ }
+
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+ xilinx_osd_enable(manager->osd);
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ break;
+ default:
+ if (manager->osd)
+ xilinx_osd_reset(manager->osd);
+
+ if (manager->dp_sub)
+ xilinx_drm_dp_sub_disable(manager->dp_sub);
+
+ break;
+ }
+}
+
+/**
+ * xilinx_drm_plane_manager_mode_set - Set the mode to the Xilinx plane manager
+ * @manager: Xilinx plane manager object
+ * @crtc_w: CRTC width
+ * @crtc_h: CRTC height
+ *
+ * Set the width and height of the Xilinx plane manager. This function is uaully
+ * called from the CRTC driver before calling the xilinx_drm_plane_mode_set().
+ */
+void xilinx_drm_plane_manager_mode_set(struct xilinx_drm_plane_manager *manager,
+ unsigned int crtc_w, unsigned int crtc_h)
+{
+ if (manager->osd)
+ xilinx_osd_set_dimension(manager->osd, crtc_w, crtc_h);
+}
+
+/* create a plane */
+static struct xilinx_drm_plane *
+xilinx_drm_plane_create(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs, bool primary)
+{
+ struct xilinx_drm_plane *plane;
+ struct device *dev = manager->drm->dev;
+ char plane_name[16];
+ struct device_node *plane_node;
+ struct device_node *sub_node;
+ struct property *prop;
+ const char *dma_name;
+ enum drm_plane_type type;
+ u32 fmt_in = 0;
+ u32 fmt_out = 0;
+ const char *fmt;
+ int i;
+ int ret;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+
+ for (i = 0; i < manager->num_planes; i++)
+ if (!manager->planes[i])
+ break;
+
+ if (i >= manager->num_planes) {
+ DRM_ERROR("failed to allocate plane\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ snprintf(plane_name, sizeof(plane_name), "plane%d", i);
+ plane_node = of_get_child_by_name(manager->node, plane_name);
+ if (!plane_node) {
+ DRM_ERROR("failed to find a plane node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ plane->primary = primary;
+ plane->id = i;
+ plane->prio = i;
+ plane->zpos = i;
+ plane->alpha = manager->default_alpha;
+ plane->dpms = DRM_MODE_DPMS_OFF;
+ plane->format = 0;
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ i = 0;
+ of_property_for_each_string(plane_node, "dma-names", prop, dma_name) {
+ if (i >= MAX_NUM_SUB_PLANES) {
+ DRM_WARN("%s contains too many sub-planes (dma-names), indexes %d and above ignored\n",
+ of_node_full_name(plane_node),
+ MAX_NUM_SUB_PLANES);
+ break;
+ }
+ plane->dma[i].chan = of_dma_request_slave_channel(plane_node,
+ dma_name);
+ if (IS_ERR(plane->dma[i].chan)) {
+ ret = PTR_ERR(plane->dma[i].chan);
+ DRM_ERROR("failed to request dma channel \"%s\" for plane %s (err:%d)\n",
+ dma_name, of_node_full_name(plane_node), ret);
+ plane->dma[i].chan = NULL;
+ goto err_dma;
+ }
+ ++i;
+ }
+
+ if (i == 0) {
+ DRM_ERROR("plane \"%s\" doesn't have any dma channels (dma-names)\n",
+ of_node_full_name(plane_node));
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* probe color space converter */
+ sub_node = of_parse_phandle(plane_node, "xlnx,rgb2yuv", i);
+ if (sub_node) {
+ plane->rgb2yuv = xilinx_rgb2yuv_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(plane->rgb2yuv)) {
+ DRM_ERROR("failed to probe a rgb2yuv\n");
+ ret = PTR_ERR(plane->rgb2yuv);
+ goto err_dma;
+ }
+
+ /* rgb2yuv input format */
+ plane->format = DRM_FORMAT_XRGB8888;
+
+ /* rgb2yuv output format */
+ fmt_out = DRM_FORMAT_YUV444;
+ }
+
+ /* probe chroma resampler */
+ sub_node = of_parse_phandle(plane_node, "xlnx,cresample", i);
+ if (sub_node) {
+ plane->cresample = xilinx_cresample_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(plane->cresample)) {
+ DRM_ERROR("failed to probe a cresample\n");
+ ret = PTR_ERR(plane->cresample);
+ goto err_dma;
+ }
+
+ /* cresample input format */
+ fmt = xilinx_cresample_get_input_format_name(plane->cresample);
+ ret = xilinx_drm_format_by_name(fmt, &fmt_in);
+ if (ret)
+ goto err_dma;
+
+ /* format sanity check */
+ if ((fmt_out != 0) && (fmt_out != fmt_in)) {
+ DRM_ERROR("input/output format mismatch\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ if (plane->format == 0)
+ plane->format = fmt_in;
+
+ /* cresample output format */
+ fmt = xilinx_cresample_get_output_format_name(plane->cresample);
+ ret = xilinx_drm_format_by_name(fmt, &fmt_out);
+ if (ret)
+ goto err_dma;
+ }
+
+ /* create an OSD layer when OSD is available */
+ if (manager->osd) {
+ /* format sanity check */
+ if ((fmt_out != 0) && (fmt_out != manager->format)) {
+ DRM_ERROR("input/output format mismatch\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ /* create an osd layer */
+ plane->osd_layer = xilinx_osd_layer_get(manager->osd);
+ if (IS_ERR(plane->osd_layer)) {
+ DRM_ERROR("failed to create a osd layer\n");
+ ret = PTR_ERR(plane->osd_layer);
+ plane->osd_layer = NULL;
+ goto err_dma;
+ }
+
+ if (plane->format == 0)
+ plane->format = manager->format;
+ }
+
+ if (manager->dp_sub) {
+ plane->dp_layer = xilinx_drm_dp_sub_layer_get(manager->dp_sub,
+ primary);
+ if (IS_ERR(plane->dp_layer)) {
+ DRM_ERROR("failed to create a dp_sub layer\n");
+ ret = PTR_ERR(plane->dp_layer);
+ plane->dp_layer = NULL;
+ goto err_dma;
+ }
+
+ if (primary) {
+ ret = xilinx_drm_dp_sub_layer_set_fmt(manager->dp_sub,
+ plane->dp_layer,
+ manager->format);
+ if (ret) {
+ DRM_ERROR("failed to set dp_sub layer fmt\n");
+ goto err_dma;
+ }
+ }
+
+ plane->format =
+ xilinx_drm_dp_sub_layer_get_fmt(manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_get_fmts(manager->dp_sub,
+ plane->dp_layer, &fmts,
+ &num_fmts);
+ }
+
+ /* If there's no IP other than VDMA, pick the manager's format */
+ if (plane->format == 0)
+ plane->format = manager->format;
+
+ /* initialize drm plane */
+ type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(manager->drm, &plane->base,
+ possible_crtcs, &xilinx_drm_plane_funcs,
+ fmts ? fmts : &plane->format,
+ num_fmts ? num_fmts : 1, NULL, type,
+ NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_init;
+ }
+ plane->manager = manager;
+ manager->planes[plane->id] = plane;
+
+ xilinx_drm_plane_attach_property(&plane->base);
+
+ of_node_put(plane_node);
+
+ return plane;
+
+err_init:
+ if (manager->dp_sub) {
+ xilinx_drm_dp_sub_layer_disable(manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_put(plane->manager->dp_sub,
+ plane->dp_layer);
+ }
+ if (manager->osd) {
+ xilinx_osd_layer_disable(plane->osd_layer);
+ xilinx_osd_layer_put(plane->osd_layer);
+ }
+err_dma:
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+err_out:
+ of_node_put(plane_node);
+ return ERR_PTR(ret);
+}
+
+/* create a primary plane */
+struct drm_plane *
+xilinx_drm_plane_create_primary(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs)
+{
+ struct xilinx_drm_plane *plane;
+
+ plane = xilinx_drm_plane_create(manager, possible_crtcs, true);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to allocate a primary plane\n");
+ return ERR_CAST(plane);
+ }
+
+ return &plane->base;
+}
+
+/* create extra planes */
+int xilinx_drm_plane_create_planes(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs)
+{
+ struct xilinx_drm_plane *plane;
+ int i;
+
+ /* find if there any available plane, and create if available */
+ for (i = 0; i < manager->num_planes; i++) {
+ if (manager->planes[i])
+ continue;
+
+ plane = xilinx_drm_plane_create(manager, possible_crtcs, false);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to allocate a plane\n");
+ return PTR_ERR(plane);
+ }
+
+ manager->planes[i] = plane;
+ }
+
+ return 0;
+}
+
+/* initialize a plane manager: num_planes, format, max_width */
+static int
+xilinx_drm_plane_init_manager(struct xilinx_drm_plane_manager *manager)
+{
+ unsigned int format;
+ u32 drm_format;
+ int ret = 0;
+
+ if (manager->osd) {
+ manager->num_planes = xilinx_osd_get_num_layers(manager->osd);
+ manager->max_width = xilinx_osd_get_max_width(manager->osd);
+
+ format = xilinx_osd_get_format(manager->osd);
+ ret = xilinx_drm_format_by_code(format, &drm_format);
+ if (drm_format != manager->format)
+ ret = -EINVAL;
+ } else if (manager->dp_sub) {
+ manager->num_planes = XILINX_DRM_DP_SUB_NUM_LAYERS;
+ manager->max_width = XILINX_DRM_DP_SUB_MAX_WIDTH;
+ } else {
+ /* without osd, only one plane is supported */
+ manager->num_planes = 1;
+ manager->max_width = 4096;
+ }
+
+ return ret;
+}
+
+struct xilinx_drm_plane_manager *
+xilinx_drm_plane_probe_manager(struct drm_device *drm)
+{
+ struct xilinx_drm_plane_manager *manager;
+ struct device *dev = drm->dev;
+ struct device_node *sub_node;
+ const char *format;
+ int ret;
+
+ manager = devm_kzalloc(dev, sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return ERR_PTR(-ENOMEM);
+
+ /* this node is used to create a plane */
+ manager->node = of_get_child_by_name(dev->of_node, "planes");
+ if (!manager->node) {
+ DRM_ERROR("failed to get a planes node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* check the base pixel format of plane manager */
+ ret = of_property_read_string(manager->node, "xlnx,pixel-format",
+ &format);
+ if (ret < 0) {
+ DRM_ERROR("failed to get a plane manager format\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = xilinx_drm_format_by_name(format, &manager->format);
+ if (ret < 0) {
+ DRM_ERROR("invalid plane manager format\n");
+ return ERR_PTR(ret);
+ }
+
+ manager->drm = drm;
+
+ /* probe an OSD. proceed even if there's no OSD */
+ sub_node = of_parse_phandle(dev->of_node, "xlnx,osd", 0);
+ if (sub_node) {
+ manager->osd = xilinx_osd_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(manager->osd)) {
+ of_node_put(manager->node);
+ DRM_ERROR("failed to probe an osd\n");
+ return ERR_CAST(manager->osd);
+ }
+ manager->default_alpha = OSD_MAX_ALPHA;
+ }
+
+ manager->dp_sub = xilinx_drm_dp_sub_of_get(drm->dev->of_node);
+ if (IS_ERR(manager->dp_sub)) {
+ DRM_DEBUG_KMS("failed to get a dp_sub\n");
+ return ERR_CAST(manager->dp_sub);
+ } else if (manager->dp_sub) {
+ manager->default_alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ }
+
+ ret = xilinx_drm_plane_init_manager(manager);
+ if (ret) {
+ DRM_ERROR("failed to init a plane manager\n");
+ return ERR_PTR(ret);
+ }
+
+ xilinx_drm_plane_create_property(manager);
+
+ return manager;
+}
+
+void xilinx_drm_plane_remove_manager(struct xilinx_drm_plane_manager *manager)
+{
+ xilinx_drm_dp_sub_put(manager->dp_sub);
+ of_node_put(manager->node);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_plane.h b/drivers/gpu/drm/xilinx/xilinx_drm_plane.h
new file mode 100644
index 000000000000..3d3616b5a9d1
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_plane.h
@@ -0,0 +1,61 @@
+/*
+ * Xilinx DRM plane header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_PLANE_H_
+#define _XILINX_DRM_PLANE_H_
+
+struct drm_crtc;
+struct drm_plane;
+
+/* plane operations */
+void xilinx_drm_plane_dpms(struct drm_plane *base_plane, int dpms);
+void xilinx_drm_plane_commit(struct drm_plane *base_plane);
+int xilinx_drm_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h);
+int xilinx_drm_plane_get_max_width(struct drm_plane *base_plane);
+u32 xilinx_drm_plane_get_format(struct drm_plane *base_plane);
+unsigned int xilinx_drm_plane_get_align(struct drm_plane *base_plane);
+
+/* plane manager operations */
+struct xilinx_drm_plane_manager;
+
+void
+xilinx_drm_plane_manager_mode_set(struct xilinx_drm_plane_manager *manager,
+ unsigned int crtc_w, unsigned int crtc_h);
+void xilinx_drm_plane_manager_dpms(struct xilinx_drm_plane_manager *manager,
+ int dpms);
+struct drm_plane *
+xilinx_drm_plane_create_primary(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs);
+int xilinx_drm_plane_create_planes(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs);
+
+bool xilinx_drm_plane_check_format(struct xilinx_drm_plane_manager *manager,
+ u32 format);
+int xilinx_drm_plane_get_num_planes(struct xilinx_drm_plane_manager *manager);
+
+void xilinx_drm_plane_restore(struct xilinx_drm_plane_manager *manager);
+
+struct xilinx_drm_plane_manager *
+xilinx_drm_plane_probe_manager(struct drm_device *drm);
+void xilinx_drm_plane_remove_manager(struct xilinx_drm_plane_manager *manager);
+
+#endif /* _XILINX_DRM_PLANE_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c
new file mode 100644
index 000000000000..c33b3dfb6809
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c
@@ -0,0 +1,1452 @@
+/*
+ * Xilinx FPGA SDI Tx Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/videomode.h>
+#include "xilinx_drm_sdi.h"
+#include "xilinx_vtc.h"
+
+/* SDI register offsets */
+#define XSDI_TX_RST_CTRL 0x00
+#define XSDI_TX_MDL_CTRL 0x04
+#define XSDI_TX_GLBL_IER 0x0C
+#define XSDI_TX_ISR_STAT 0x10
+#define XSDI_TX_IER_STAT 0x14
+#define XSDI_TX_ST352_LINE 0x18
+#define XSDI_TX_ST352_DATA_CH0 0x1C
+#define XSDI_TX_VER 0x3C
+#define XSDI_TX_SYS_CFG 0x40
+#define XSDI_TX_STS_SB_TDATA 0x60
+#define XSDI_TX_AXI4S_STS1 0x68
+#define XSDI_TX_AXI4S_STS2 0x6C
+
+/* MODULE_CTRL register masks */
+#define XSDI_TX_CTRL_MDL_EN_MASK BIT(0)
+#define XSDI_TX_CTRL_OUT_EN_MASK BIT(1)
+#define XSDI_TX_CTRL_M_MASK BIT(7)
+#define XSDI_TX_CTRL_INS_CRC_MASK BIT(12)
+#define XSDI_TX_CTRL_INS_ST352_MASK BIT(13)
+#define XSDI_TX_CTRL_OVR_ST352_MASK BIT(14)
+#define XSDI_TX_CTRL_INS_SYNC_BIT_MASK BIT(16)
+#define XSDI_TX_CTRL_SD_BITREP_BYPASS_MASK BIT(17)
+#define XSDI_TX_CTRL_USE_ANC_IN_MASK BIT(18)
+#define XSDI_TX_CTRL_INS_LN_MASK BIT(19)
+#define XSDI_TX_CTRL_INS_EDH_MASK BIT(20)
+#define XSDI_TX_CTRL_MODE_MASK 0x7
+#define XSDI_TX_CTRL_MUX_MASK 0x7
+#define XSDI_TX_CTRL_MODE_SHIFT 4
+#define XSDI_TX_CTRL_M_SHIFT 7
+#define XSDI_TX_CTRL_MUX_SHIFT 8
+#define XSDI_TX_CTRL_INS_CRC_SHIFT 12
+#define XSDI_TX_CTRL_INS_ST352_SHIFT 13
+#define XSDI_TX_CTRL_OVR_ST352_SHIFT 14
+#define XSDI_TX_CTRL_ST352_F2_EN_SHIFT 15
+#define XSDI_TX_CTRL_INS_SYNC_BIT_SHIFT 16
+#define XSDI_TX_CTRL_SD_BITREP_BYPASS_SHIFT 17
+#define XSDI_TX_CTRL_USE_ANC_IN_SHIFT 18
+#define XSDI_TX_CTRL_INS_LN_SHIFT 19
+#define XSDI_TX_CTRL_INS_EDH_SHIFT 20
+
+/* TX_ST352_LINE register masks */
+#define XSDI_TX_ST352_LINE_MASK GENMASK(10, 0)
+#define XSDI_TX_ST352_LINE_F2_SHIFT 16
+
+/* ISR STAT register masks */
+#define XSDI_GTTX_RSTDONE_INTR_MASK BIT(0)
+#define XSDI_TX_CE_ALIGN_ERR_INTR_MASK BIT(1)
+#define XSDI_AXI4S_VID_LOCK_INTR_MASK BIT(8)
+#define XSDI_OVERFLOW_INTR_MASK BIT(9)
+#define XSDI_UNDERFLOW_INTR_MASK BIT(10)
+#define XSDI_IER_EN_MASK (XSDI_GTTX_RSTDONE_INTR_MASK | \
+ XSDI_TX_CE_ALIGN_ERR_INTR_MASK | \
+ XSDI_OVERFLOW_INTR_MASK | \
+ XSDI_UNDERFLOW_INTR_MASK)
+
+/* RST_CTRL_OFFSET masks */
+#define XSDI_TX_BRIDGE_CTRL_EN_MASK BIT(8)
+#define XSDI_TX_AXI4S_CTRL_EN_MASK BIT(9)
+#define XSDI_TX_CTRL_EN_MASK BIT(0)
+
+/* STS_SB_TX_TDATA masks */
+#define XSDI_TX_TDATA_DONE_MASK BIT(0)
+#define XSDI_TX_TDATA_FAIL_MASK BIT(1)
+#define XSDI_TX_TDATA_GT_RESETDONE_MASK BIT(2)
+#define XSDI_TX_TDATA_SLEW_RATE_MASK BIT(3)
+#define XSDI_TX_TDATA_TXPLLCLKSEL_MASK GENMASK(5, 4)
+#define XSDI_TX_TDATA_GT_SYSCLKSEL_MASK GENMASK(7, 6)
+#define XSDI_TX_TDATA_FABRIC_RST_MASK BIT(8)
+#define XSDI_TX_TDATA_DRP_FAIL_MASK BIT(9)
+#define XSDI_TX_TDATA_FAIL_CODE_MASK GENMASK(14, 12)
+#define XSDI_TX_TDATA_DRP_FAIL_CNT_MASK 0xFF0000
+#define XSDI_TX_TDATA_GT_QPLL0LOCK_MASK BIT(24)
+#define XSDI_TX_TDATA_GT_QPLL1LOCK_MASK BIT(25)
+
+#define SDI_MAX_DATASTREAM 8
+
+#define XSDI_TX_MUX_SD_HD_3GA 0
+#define XSDI_TX_MUX_3GB 1
+#define XSDI_TX_MUX_8STREAM_6G_12G 2
+#define XSDI_TX_MUX_4STREAM_6G 3
+#define XSDI_TX_MUX_16STREAM_12G 4
+
+#define PIXELS_PER_CLK 2
+#define XSDI_CH_SHIFT 29
+#define XST352_PROG_PIC_MASK BIT(6)
+#define XST352_PROG_TRANS_MASK BIT(7)
+#define XST352_2048_SHIFT BIT(6)
+#define ST352_BYTE3 0x00
+#define ST352_BYTE4 0x01
+#define INVALID_VALUE -1
+#define GT_TIMEOUT 500
+
+static LIST_HEAD(xilinx_sdi_list);
+static DEFINE_MUTEX(xilinx_sdi_lock);
+/**
+ * enum payload_line_1 - Payload Ids Line 1 number
+ * @PAYLD_LN1_HD_3_6_12G: line 1 HD,3G,6G or 12G mode value
+ * @PAYLD_LN1_SDPAL: line 1 SD PAL mode value
+ * @PAYLD_LN1_SDNTSC: line 1 SD NTSC mode value
+ */
+enum payload_line_1 {
+ PAYLD_LN1_HD_3_6_12G = 10,
+ PAYLD_LN1_SDPAL = 9,
+ PAYLD_LN1_SDNTSC = 13
+};
+
+/**
+ * enum payload_line_2 - Payload Ids Line 2 number
+ * @PAYLD_LN2_HD_3_6_12G: line 2 HD,3G,6G or 12G mode value
+ * @PAYLD_LN2_SDPAL: line 2 SD PAL mode value
+ * @PAYLD_LN2_SDNTSC: line 2 SD NTSC mode value
+ */
+enum payload_line_2 {
+ PAYLD_LN2_HD_3_6_12G = 572,
+ PAYLD_LN2_SDPAL = 322,
+ PAYLD_LN2_SDNTSC = 276
+};
+
+/**
+ * enum sdi_modes - SDI modes
+ * @XSDI_MODE_HD: HD mode
+ * @XSDI_MODE_SD: SD mode
+ * @XSDI_MODE_3GA: 3GA mode
+ * @XSDI_MODE_3GB: 3GB mode
+ * @XSDI_MODE_6G: 6G mode
+ * @XSDI_MODE_12G: 12G mode
+ */
+enum sdi_modes {
+ XSDI_MODE_HD = 0,
+ XSDI_MODE_SD,
+ XSDI_MODE_3GA,
+ XSDI_MODE_3GB,
+ XSDI_MODE_6G,
+ XSDI_MODE_12G
+};
+
+/**
+ * struct xilinx_sdi - Core configuration SDI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @connector: DRM connector structure
+ * @vtc: Pointer to VTC structure
+ * @dev: device structure
+ * @base: Base address of SDI subsystem
+ * @mode_flags: SDI operation mode related flags
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @list: entry in the global SDI subsystem list
+ * @vblank_fn: vblank handler
+ * @vblank_data: vblank data to be used in vblank_fn
+ * @sdi_mode: configurable SDI mode parameter, supported values are:
+ * 0 - HD
+ * 1 - SD
+ * 2 - 3GA
+ * 3 - 3GB
+ * 4 - 6G
+ * 5 - 12G
+ * @sdi_mod_prop_val: configurable SDI mode parameter value
+ * @sdi_data_strm: configurable SDI data stream parameter
+ * @sdi_data_strm_prop_val: configurable number of SDI data streams
+ * value currently supported are 2, 4 and 8
+ * @is_frac_prop: configurable SDI fractional fps parameter
+ * @is_frac_prop_val: configurable SDI fractional fps parameter value
+ */
+struct xilinx_sdi {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct xilinx_vtc *vtc;
+ struct device *dev;
+ void __iomem *base;
+ u32 mode_flags;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ struct list_head list;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+ struct drm_property *sdi_mode;
+ u32 sdi_mod_prop_val;
+ struct drm_property *sdi_data_strm;
+ u32 sdi_data_strm_prop_val;
+ struct drm_property *is_frac_prop;
+ bool is_frac_prop_val;
+};
+
+/**
+ * struct xilinx_sdi_display_config - SDI supported modes structure
+ * @mode: drm display mode
+ * @st352_byt2: st352 byte 2 value
+ * index 0 : value for integral fps
+ * index 1 : value for fractional fps
+ * @st352_byt1: st352 byte 1 value
+ * index 0 : value for HD mode
+ * index 1 : value for SD mode
+ * index 2 : value for 3GA
+ * index 3 : value for 3GB
+ * index 4 : value for 6G
+ * index 5 : value for 12G
+ */
+struct xlnx_sdi_display_config {
+ struct drm_display_mode mode;
+ u8 st352_byt2[2];
+ u8 st352_byt1[6];
+};
+
+/*
+ * xlnx_sdi_modes - SDI DRM modes
+ */
+static const struct xlnx_sdi_display_config xlnx_sdi_modes[] = {
+ /* 0 - dummy, VICs start at 1 */
+ { },
+ /* SD: 720x480i@60Hz */
+ {{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+ 801, 858, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* SD: 720x576i@50Hz */
+ {{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* HD: 1280x720@25Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2990, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@24Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 3155, 4125, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@30Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2330, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@50Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@60Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1920x1080@24Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@25Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@30Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@48Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@50Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@60Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@24Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@25Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@30Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@48Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@50Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@60Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@24Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@25Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@30Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@30Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@25Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@24Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@48Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@50Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@60Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@60Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2136,
+ 2180, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@50Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@48Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@96Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2291,
+ 2379, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@100Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@120Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@96Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2377,
+ 2421, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@100Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2322,
+ 2366, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@120Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 6G: 3840x2160@30Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@25Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@24Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@24Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@25Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@30Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@48Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@50Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@60Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@48Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@50Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@60Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 593408, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+};
+
+#define connector_to_sdi(c) container_of(c, struct xilinx_sdi, connector)
+#define encoder_to_sdi(e) container_of(e, struct xilinx_sdi, encoder)
+
+/**
+ * xilinx_sdi_writel - Memory mapped SDI Tx register write
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ * @val: value to be written
+ *
+ * This function writes the value to SDI TX registers
+ */
+static inline void xilinx_sdi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+/**
+ * xilinx_sdi_readl - Memory mapped SDI Tx register read
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ *
+ * Return: The contents of the SDI Tx register
+ *
+ * This function returns the contents of the corresponding SDI Tx register.
+ */
+static inline u32 xilinx_sdi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xilinx_en_axi4s - Enable SDI Tx AXI4S-to-Video core
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx AXI4S-to-Video core.
+ */
+static void xilinx_en_axi4s(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_AXI4S_CTRL_EN_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xilinx_en_bridge - Enable SDI Tx bridge
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx bridge.
+ */
+static void xilinx_en_bridge(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_BRIDGE_CTRL_EN_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_default_drm_properties - Configure SDI DRM
+ * properties with their default values
+ * @sdi: SDI structure having the updated user parameters
+ */
+static void
+xilinx_sdi_set_default_drm_properties(struct xilinx_sdi *sdi)
+{
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->sdi_mode, 0);
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->sdi_data_strm, 0);
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->is_frac_prop, 0);
+}
+
+/**
+ * xilinx_sdi_irq_handler - SDI Tx interrupt
+ * @irq: irq number
+ * @data: irq data
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ * This is the compact GT ready interrupt.
+ */
+static irqreturn_t xilinx_sdi_irq_handler(int irq, void *data)
+{
+ struct xilinx_sdi *sdi = (struct xilinx_sdi *)data;
+ u32 reg;
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_ISR_STAT);
+
+ if (reg & XSDI_GTTX_RSTDONE_INTR_MASK)
+ dev_dbg(sdi->dev, "GT reset interrupt received\n");
+ if (reg & XSDI_TX_CE_ALIGN_ERR_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "SDI SD CE align error\n");
+ if (reg & XSDI_OVERFLOW_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Overflow error\n");
+ if (reg & XSDI_UNDERFLOW_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Underflow error\n");
+ xilinx_sdi_writel(sdi->base, XSDI_TX_ISR_STAT,
+ reg & ~(XSDI_AXI4S_VID_LOCK_INTR_MASK));
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_STS_SB_TDATA);
+ if (reg & XSDI_TX_TDATA_GT_RESETDONE_MASK) {
+ sdi->event_received = true;
+ wake_up_interruptible(&sdi->wait_event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_sdi_set_payload_line - set ST352 packet line number
+ * @sdi: Pointer to SDI Tx structure
+ * @line_1: line number used to insert st352 packet for field 1.
+ * @line_2: line number used to insert st352 packet for field 2.
+ *
+ * This function set 352 packet line number.
+ */
+static void xilinx_sdi_set_payload_line(struct xilinx_sdi *sdi,
+ u32 line_1, u32 line_2)
+{
+ u32 data;
+
+ data = ((line_1 & XSDI_TX_ST352_LINE_MASK) |
+ ((line_2 & XSDI_TX_ST352_LINE_MASK) <<
+ XSDI_TX_ST352_LINE_F2_SHIFT));
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_ST352_LINE, data);
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data |= (1 << XSDI_TX_CTRL_ST352_F2_EN_SHIFT);
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_payload_data - set ST352 packet payload
+ * @sdi: Pointer to SDI Tx structure
+ * @data_strm: data stream number
+ * @payload: st352 packet payload
+ *
+ * This function set ST352 payload data to corresponding stream.
+ */
+static void xilinx_sdi_set_payload_data(struct xilinx_sdi *sdi,
+ u32 data_strm, u32 payload)
+{
+ xilinx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_CH0 + (data_strm * 4)), payload);
+}
+
+/**
+ * xilinx_sdi_set_display_disable - Disable the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_sdi_set_display_disable(struct xilinx_sdi *sdi)
+{
+ u32 i;
+
+ for (i = 0; i < SDI_MAX_DATASTREAM; i++)
+ xilinx_sdi_set_payload_data(sdi, i, 0);
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, 0);
+}
+
+/**
+ * xilinx_sdi_payload_config - config the SDI payload parameters
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: display mode
+ *
+ * This function config the SDI st352 payload parameter.
+ */
+static void xilinx_sdi_payload_config(struct xilinx_sdi *sdi, u32 mode)
+{
+ u32 payload_1, payload_2;
+
+ switch (mode) {
+ case XSDI_MODE_SD:
+ payload_1 = PAYLD_LN1_SDPAL;
+ payload_2 = PAYLD_LN2_SDPAL;
+ break;
+ case XSDI_MODE_HD:
+ case XSDI_MODE_3GA:
+ case XSDI_MODE_3GB:
+ case XSDI_MODE_6G:
+ case XSDI_MODE_12G:
+ payload_1 = PAYLD_LN1_HD_3_6_12G;
+ payload_2 = PAYLD_LN2_HD_3_6_12G;
+ break;
+ default:
+ payload_1 = 0;
+ payload_2 = 0;
+ break;
+ }
+
+ xilinx_sdi_set_payload_line(sdi, payload_1, payload_2);
+}
+
+/**
+ * xilinx_set_sdi_mode - Set mode parameters in SDI Tx
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: SDI Tx display mode
+ * @is_frac: 0 - integer 1 - fractional
+ * @mux_ptrn: specifiy the data stream interleaving pattern to be used
+ * This function config the SDI st352 payload parameter.
+ */
+static void xilinx_set_sdi_mode(struct xilinx_sdi *sdi, u32 mode,
+ bool is_frac, u32 mux_ptrn)
+{
+ u32 data;
+
+ xilinx_sdi_payload_config(sdi, mode);
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data &= ~((XSDI_TX_CTRL_MODE_MASK << XSDI_TX_CTRL_MODE_SHIFT) |
+ (XSDI_TX_CTRL_M_MASK) | (XSDI_TX_CTRL_MUX_MASK
+ << XSDI_TX_CTRL_MUX_SHIFT));
+
+ data |= (((mode & XSDI_TX_CTRL_MODE_MASK)
+ << XSDI_TX_CTRL_MODE_SHIFT) |
+ (is_frac << XSDI_TX_CTRL_M_SHIFT) |
+ ((mux_ptrn & XSDI_TX_CTRL_MUX_MASK) << XSDI_TX_CTRL_MUX_SHIFT));
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_config_parameters - Configure SDI Tx registers with parameters
+ * given from user application.
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI structure having drm_property parameters
+ * configured from user application and writes them into SDI IP registers.
+ */
+static void xilinx_sdi_set_config_parameters(struct xilinx_sdi *sdi)
+{
+ u32 mode;
+ int mux_ptrn = INVALID_VALUE;
+ bool is_frac;
+
+ mode = sdi->sdi_mod_prop_val;
+ is_frac = sdi->is_frac_prop_val;
+
+ switch (mode) {
+ case XSDI_MODE_3GA:
+ mux_ptrn = XSDI_TX_MUX_SD_HD_3GA;
+ break;
+ case XSDI_MODE_3GB:
+ mux_ptrn = XSDI_TX_MUX_3GB;
+ break;
+ case XSDI_MODE_6G:
+ if (sdi->sdi_data_strm_prop_val == 4)
+ mux_ptrn = XSDI_TX_MUX_4STREAM_6G;
+ else if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ case XSDI_MODE_12G:
+ if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ default:
+ mux_ptrn = 0;
+ break;
+ }
+ if (mux_ptrn == INVALID_VALUE) {
+ dev_err(sdi->dev, "%d data stream not supported for %d mode",
+ sdi->sdi_data_strm_prop_val, mode);
+ return;
+ }
+ xilinx_set_sdi_mode(sdi, mode, is_frac, mux_ptrn);
+}
+
+/**
+ * xilinx_sdi_connector_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @base_connector: pointer Xilinx SDI connector
+ * @property: pointer to the drm_property structure
+ * @value: SDI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the SDI structure property varabiles with the values.
+ * These values are later used to configure the SDI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xilinx_sdi_connector_set_property(struct drm_connector *base_connector,
+ struct drm_property *property,
+ u64 value)
+{
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+
+ if (property == sdi->sdi_mode)
+ sdi->sdi_mod_prop_val = (unsigned int)value;
+ else if (property == sdi->sdi_data_strm)
+ sdi->sdi_data_strm_prop_val = (unsigned int)value;
+ else if (property == sdi->is_frac_prop)
+ sdi->is_frac_prop_val = !!value;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * xilinx_sdi_get_mode_id - Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ *
+ * Return: true if mode is found
+ */
+static int xilinx_sdi_get_mode_id(struct drm_display_mode *mode)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++)
+ if (drm_mode_equal(&xlnx_sdi_modes[i].mode, mode))
+ return i;
+ return -EINVAL;
+}
+
+/**
+ * xilinx_sdi_drm_add_modes - Adds SDI supported modes
+ * @connector: pointer Xilinx SDI connector
+ *
+ * Return: Count of modes added
+ *
+ * This function adds the SDI modes supported and returns its count
+ */
+static int xilinx_sdi_drm_add_modes(struct drm_connector *connector)
+{
+ int i, num_modes = 0;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ const struct drm_display_mode *ptr = &xlnx_sdi_modes[i].mode;
+
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
+static int xilinx_sdi_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xilinx_sdi_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void xilinx_sdi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xilinx_sdi_connector_funcs = {
+ .dpms = xilinx_sdi_connector_dpms,
+ .detect = xilinx_sdi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xilinx_sdi_connector_destroy,
+ .set_property = xilinx_sdi_connector_set_property,
+};
+
+static struct drm_encoder *
+xilinx_sdi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_sdi(connector)->encoder);
+}
+
+static int xilinx_sdi_get_modes(struct drm_connector *connector)
+{
+ return xilinx_sdi_drm_add_modes(connector);
+}
+
+static struct drm_connector_helper_funcs xilinx_sdi_connector_helper_funcs = {
+ .get_modes = xilinx_sdi_get_modes,
+ .best_encoder = xilinx_sdi_best_encoder,
+};
+
+/**
+ * xilinx_sdi_drm_connector_create_property - create SDI connector properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ *
+ * This function takes the xilinx SDI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xilinx_sdi_drm_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+
+ sdi->is_frac_prop = drm_property_create_bool(dev, 1, "is_frac");
+ sdi->sdi_mode = drm_property_create_range(dev, 0,
+ "sdi_mode", 0, 5);
+ sdi->sdi_data_strm = drm_property_create_range(dev, 0,
+ "sdi_data_stream", 2, 8);
+}
+
+/**
+ * xilinx_sdi_drm_connector_attach_property - attach SDI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ */
+static void
+xilinx_sdi_drm_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (sdi->sdi_mode)
+ drm_object_attach_property(obj, sdi->sdi_mode, 0);
+
+ if (sdi->sdi_data_strm)
+ drm_object_attach_property(obj, sdi->sdi_data_strm, 0);
+
+ if (sdi->is_frac_prop)
+ drm_object_attach_property(obj, sdi->is_frac_prop, 0);
+}
+
+static int xilinx_sdi_create_connector(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_connector *connector = &sdi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xilinx_sdi_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(sdi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xilinx_sdi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xilinx_sdi_drm_connector_create_property(connector);
+ xilinx_sdi_drm_connector_attach_property(connector);
+
+ return 0;
+}
+
+static bool xilinx_sdi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/**
+ * xilinx_sdi_set_display_enable - Enables the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_sdi_set_display_enable(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_CTRL_EN_MASK;
+ /* start sdi stream */
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+static void xilinx_sdi_encoder_dpms(struct drm_encoder *encoder,
+ int mode)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+
+ dev_dbg(sdi->dev, "encoder dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ xilinx_sdi_set_display_enable(sdi);
+ return;
+ default:
+ xilinx_sdi_set_display_disable(sdi);
+ xilinx_sdi_set_default_drm_properties(sdi);
+ }
+}
+
+/**
+ * xilinx_sdi_calc_st352_payld - calculate the st352 payload
+ *
+ * @sdi: pointer to SDI Tx structure
+ * @mode: DRM display mode
+ *
+ * This function calculates the st352 payload to be configured.
+ * Please refer to SMPTE ST352 documents for it.
+ * Return: return st352 payload
+ */
+static u32 xilinx_sdi_calc_st352_payld(struct xilinx_sdi *sdi,
+ struct drm_display_mode *mode)
+{
+ u8 byt1, byt2;
+ u16 is_p;
+ u32 id, sdi_mode = sdi->sdi_mod_prop_val;
+ bool is_frac = sdi->is_frac_prop_val;
+ u32 byt3 = ST352_BYTE3;
+
+ id = xilinx_sdi_get_mode_id(mode);
+ dev_dbg(sdi->dev, "mode id: %d\n", id);
+ if (mode->hdisplay == 2048 || mode->hdisplay == 4096)
+ byt3 |= XST352_2048_SHIFT;
+ /* byte 2 calculation */
+ is_p = !(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ byt2 = xlnx_sdi_modes[id].st352_byt2[is_frac];
+ if ((sdi_mode == XSDI_MODE_3GB) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN) || is_p)
+ byt2 |= XST352_PROG_PIC_MASK;
+ if (is_p && (mode->vtotal >= 1125))
+ byt2 |= XST352_PROG_TRANS_MASK;
+
+ /* byte 1 calculation */
+ byt1 = xlnx_sdi_modes[id].st352_byt1[sdi_mode];
+
+ return (ST352_BYTE4 << 24 | byt3 << 16 | byt2 << 8 | byt1);
+}
+
+/**
+ * xilinx_sdi_mode_set - drive the SDI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @mode: DRM kernel-internal display mode structure
+ * @adjusted_mode: SDI panel timing parameters
+ *
+ * This function derives the SDI IP timing parameters from the timing
+ * values given by VTC driver.
+ */
+static void xilinx_sdi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ struct videomode vm;
+ u32 payload, i;
+
+ xilinx_sdi_set_config_parameters(sdi);
+
+ /* set st352 payloads */
+ payload = xilinx_sdi_calc_st352_payld(sdi, adjusted_mode);
+ dev_dbg(sdi->dev, "payload : %0x\n", payload);
+
+ for (i = 0; i < sdi->sdi_data_strm_prop_val / 2; i++) {
+ if (sdi->sdi_mod_prop_val == XSDI_MODE_3GB)
+ payload |= (i << 1) << XSDI_CH_SHIFT;
+ xilinx_sdi_set_payload_data(sdi, i, payload);
+ }
+
+ /* UHDSDI is fixed 2 pixels per clock, horizontal timings div by 2 */
+ vm.hactive = adjusted_mode->hdisplay / PIXELS_PER_CLK;
+ vm.hfront_porch = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) / PIXELS_PER_CLK;
+ vm.hback_porch = (adjusted_mode->htotal -
+ adjusted_mode->hsync_end) / PIXELS_PER_CLK;
+ vm.hsync_len = (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) / PIXELS_PER_CLK;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ xilinx_vtc_config_sig(sdi->vtc, &vm);
+}
+
+static void xilinx_sdi_prepare(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ u32 reg;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ reg |= XSDI_TX_CTRL_INS_CRC_MASK | XSDI_TX_CTRL_INS_ST352_MASK |
+ XSDI_TX_CTRL_OVR_ST352_MASK | XSDI_TX_CTRL_INS_SYNC_BIT_MASK |
+ XSDI_TX_CTRL_INS_EDH_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, reg);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_IER_STAT, XSDI_IER_EN_MASK);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 1);
+ xilinx_vtc_reset(sdi->vtc);
+}
+
+static void xilinx_sdi_commit(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ u32 ret;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+ xilinx_sdi_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ ret = wait_event_interruptible_timeout(sdi->wait_event,
+ sdi->event_received,
+ usecs_to_jiffies(GT_TIMEOUT));
+ if (!ret) {
+ dev_err(sdi->dev, "Timeout: GT interrupt not received\n");
+ return;
+ }
+ sdi->event_received = false;
+ /* enable sdi bridge, vtc and Axi4s_vid_out_ctrl */
+ xilinx_en_bridge(sdi);
+ xilinx_vtc_enable(sdi->vtc);
+ xilinx_en_axi4s(sdi);
+}
+
+static const struct drm_encoder_helper_funcs xilinx_sdi_encoder_helper_funcs = {
+ .dpms = xilinx_sdi_encoder_dpms,
+ .mode_fixup = xilinx_sdi_mode_fixup,
+ .mode_set = xilinx_sdi_mode_set,
+ .prepare = xilinx_sdi_prepare,
+ .commit = xilinx_sdi_commit,
+};
+
+static const struct drm_encoder_funcs xilinx_sdi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xilinx_sdi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_sdi *sdi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &sdi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * SDI tx drivers. DRM framework can support more than one CRTCs and
+ * SDI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xilinx_sdi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ drm_encoder_helper_add(encoder, &xilinx_sdi_encoder_helper_funcs);
+
+ ret = xilinx_sdi_create_connector(encoder);
+ if (ret) {
+ dev_err(sdi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ }
+ return ret;
+}
+
+static void xilinx_sdi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_sdi *sdi = dev_get_drvdata(dev);
+
+ xilinx_sdi_encoder_dpms(&sdi->encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_cleanup(&sdi->encoder);
+ drm_connector_cleanup(&sdi->connector);
+}
+
+static const struct component_ops xilinx_sdi_component_ops = {
+ .bind = xilinx_sdi_bind,
+ .unbind = xilinx_sdi_unbind,
+};
+
+static irqreturn_t xilinx_sdi_vblank_handler(int irq, void *data)
+{
+ struct xilinx_sdi *sdi = (struct xilinx_sdi *)data;
+ u32 intr = xilinx_vtc_intr_get(sdi->vtc);
+
+ if (!intr)
+ return IRQ_NONE;
+
+ if (sdi->vblank_fn)
+ sdi->vblank_fn(sdi->vblank_data);
+
+ xilinx_vtc_intr_clear(sdi->vtc, intr);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_drm_sdi_enable_vblank - Enable the vblank handling
+ * @sdi: SDI subsystem
+ * @vblank_fn: callback to be called on vblank event
+ * @vblank_data: data to be used in @vblank_fn
+ *
+ * This function register the vblank handler, and the handler will be triggered
+ * on vblank event after.
+ */
+void xilinx_drm_sdi_enable_vblank(struct xilinx_sdi *sdi,
+ void (*vblank_fn)(void *),
+ void *vblank_data)
+{
+ sdi->vblank_fn = vblank_fn;
+ sdi->vblank_data = vblank_data;
+ xilinx_vtc_vblank_enable(sdi->vtc);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_sdi_enable_vblank);
+
+/**
+ * xilinx_drm_sdi_disable_vblank - Disable the vblank handling
+ * @sdi: SDI subsystem
+ *
+ * Disable the vblank handler. The vblank handler and data are unregistered.
+ */
+void xilinx_drm_sdi_disable_vblank(struct xilinx_sdi *sdi)
+{
+ sdi->vblank_fn = NULL;
+ sdi->vblank_data = NULL;
+ xilinx_vtc_vblank_disable(sdi->vtc);
+}
+
+/**
+ * xilinx_sdi_register_device - Register the SDI subsystem to the global list
+ * @sdi: SDI subsystem
+ *
+ * Register the SDI subsystem instance to the global list
+ */
+static void xilinx_sdi_register_device(struct xilinx_sdi *sdi)
+{
+ mutex_lock(&xilinx_sdi_lock);
+ list_add_tail(&sdi->list, &xilinx_sdi_list);
+ mutex_unlock(&xilinx_sdi_lock);
+}
+
+/**
+ * xilinx_drm_sdi_of_get - Get the SDI subsystem instance
+ * @np: parent device node
+ *
+ * This function searches and returns a SDI subsystem structure for
+ * the parent device node, @np. The SDI subsystem node should be a child node
+ * of @np, with 'xlnx,sdi' property pointing to the SDI device node.
+ * An instance can be shared by multiple users.
+ *
+ * Return: corresponding SDI subsystem structure if found. NULL if
+ * the device node doesn't have 'xlnx,sdi' property, or -EPROBE_DEFER error
+ * pointer if the the SDI subsystem isn't found.
+ */
+struct xilinx_sdi *xilinx_drm_sdi_of_get(struct device_node *np)
+{
+ struct xilinx_sdi *found = NULL;
+ struct xilinx_sdi *sdi;
+ struct device_node *sdi_node;
+
+ if (!of_find_property(np, "xlnx,sdi", NULL))
+ return NULL;
+
+ sdi_node = of_parse_phandle(np, "xlnx,sdi", 0);
+ if (!sdi_node)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&xilinx_sdi_lock);
+ list_for_each_entry(sdi, &xilinx_sdi_list, list) {
+ if (sdi->dev->of_node == sdi_node) {
+ found = sdi;
+ break;
+ }
+ }
+ mutex_unlock(&xilinx_sdi_lock);
+
+ of_node_put(sdi_node);
+ if (!found)
+ return ERR_PTR(-EPROBE_DEFER);
+ return found;
+}
+
+/**
+ * xilinx_sdi_unregister_device - Unregister the SDI subsystem instance
+ * @sdi: SDI subsystem
+ *
+ * Unregister the SDI subsystem instance from the global list
+ */
+static void xilinx_sdi_unregister_device(struct xilinx_sdi *sdi)
+{
+ mutex_lock(&xilinx_sdi_lock);
+ list_del(&sdi->list);
+ mutex_unlock(&xilinx_sdi_lock);
+}
+
+static int xilinx_sdi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_sdi *sdi;
+ struct device_node *vtc_node;
+ int ret, irq;
+
+ sdi = devm_kzalloc(dev, sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
+ sdi->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdi->base = devm_ioremap_resource(dev, res);
+
+ if (IS_ERR(sdi->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(sdi->base);
+ }
+ platform_set_drvdata(pdev, sdi);
+
+ vtc_node = of_parse_phandle(sdi->dev->of_node, "xlnx,vtc", 0);
+ if (!vtc_node) {
+ dev_err(dev, "vtc node not present\n");
+ return PTR_ERR(vtc_node);
+ }
+ sdi->vtc = xilinx_vtc_probe(sdi->dev, vtc_node);
+ of_node_put(vtc_node);
+ if (IS_ERR(sdi->vtc)) {
+ dev_err(dev, "failed to probe VTC\n");
+ return PTR_ERR(sdi->vtc);
+ }
+
+ /* disable interrupt */
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xilinx_sdi_irq_handler, IRQF_ONESHOT,
+ dev_name(sdi->dev), sdi);
+ if (ret < 0)
+ return ret;
+
+ irq = platform_get_irq(pdev, 1); /* vblank interrupt */
+ if (irq < 0)
+ return irq;
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xilinx_sdi_vblank_handler, IRQF_ONESHOT,
+ "sdiTx-vblank", sdi);
+ if (ret < 0)
+ return ret;
+
+ init_waitqueue_head(&sdi->wait_event);
+ sdi->event_received = false;
+
+ xilinx_sdi_register_device(sdi);
+ return component_add(dev, &xilinx_sdi_component_ops);
+}
+
+static int xilinx_sdi_remove(struct platform_device *pdev)
+{
+ struct xilinx_sdi *sdi = platform_get_drvdata(pdev);
+
+ xilinx_sdi_unregister_device(sdi);
+ component_del(&pdev->dev, &xilinx_sdi_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_sdi_of_match[] = {
+ { .compatible = "xlnx,v-smpte-uhdsdi-tx-ss"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_sdi_of_match);
+
+static struct platform_driver sdi_tx_driver = {
+ .probe = xilinx_sdi_probe,
+ .remove = xilinx_sdi_remove,
+ .driver = {
+ .name = "xlnx,uhdsdi-tx",
+ .of_match_table = xilinx_sdi_of_match,
+ },
+};
+
+module_platform_driver(sdi_tx_driver);
+
+MODULE_AUTHOR("Saurabh Sengar <saurabhs@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SDI Tx Driver");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h
new file mode 100644
index 000000000000..b9a773eef094
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h
@@ -0,0 +1,29 @@
+/*
+ * SDI subsystem header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_SDI_H_
+#define _XILINX_DRM_SDI_H_
+
+struct xilinx_sdi;
+struct device_node;
+
+struct xilinx_sdi *xilinx_drm_sdi_of_get(struct device_node *np);
+void xilinx_drm_sdi_enable_vblank(struct xilinx_sdi *sdi,
+ void (*vblank_fn)(void *),
+ void *vblank_data);
+void xilinx_drm_sdi_disable_vblank(struct xilinx_sdi *sdi);
+#endif /* _XILINX_DRM_SDI_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_osd.c b/drivers/gpu/drm/xilinx/xilinx_osd.c
new file mode 100644
index 000000000000..b777fbbed5b8
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_osd.c
@@ -0,0 +1,382 @@
+/*
+ * Xilinx OSD support
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_osd.h"
+
+/* registers */
+#define OSD_CTL 0x000 /* control */
+#define OSD_SS 0x020 /* screen size */
+#define OSD_ENC 0x028 /* encoding register */
+#define OSD_BC0 0x100 /* background color channel 0 */
+#define OSD_BC1 0x104 /* background color channel 1 */
+#define OSD_BC2 0x108 /* background color channel 2 */
+
+#define OSD_L0C 0x110 /* layer 0 control */
+
+/* register offset of layers */
+#define OSD_LAYER_SIZE 0x10
+#define OSD_LXC 0x00 /* layer control */
+#define OSD_LXP 0x04 /* layer position */
+#define OSD_LXS 0x08 /* layer size */
+
+/*osd control register bit definition */
+#define OSD_CTL_RUE (1 << 1) /* osd reg update enable */
+#define OSD_CTL_EN (1 << 0) /* osd enable */
+
+/* osd screen size register bit definition */
+#define OSD_SS_YSIZE_MASK 0x0fff0000 /* vertical height of OSD output */
+#define OSD_SS_YSIZE_SHIFT 16 /* bit shift of OSD_SS_YSIZE_MASK */
+#define OSD_SS_XSIZE_MASK 0x00000fff /* horizontal width of OSD output */
+
+/* osd vidoe format mask */
+#define OSD_VIDEO_FORMAT_MASK 0x0000000f /* video format */
+
+/* osd background color channel 0 */
+#define OSD_BC0_YG_MASK 0x000000ff /* Y (luma) or Green */
+
+/* osd background color channel 1 */
+#define OSD_BC1_UCBB_MASK 0x000000ff /* U (Cb) or Blue */
+
+/* osd background color channel 2 */
+#define OSD_BC2_VCRR_MASK 0x000000ff /* V(Cr) or Red */
+
+/* maximum number of the layers */
+#define OSD_MAX_NUM_OF_LAYERS 8
+
+/* osd layer control (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXC_ALPHA_MASK 0x0fff0000 /* global alpha value */
+#define OSD_LXC_ALPHA_SHIFT 16 /* bit shift of alpha value */
+#define OSD_LXC_PRIORITY_MASK 0x00000700 /* layer priority */
+#define OSD_LXC_PRIORITY_SHIFT 8 /* bit shift of priority */
+#define OSD_LXC_GALPHAEN (1 << 1) /* global alpha enable */
+#define OSD_LXC_EN (1 << 0) /* layer enable */
+
+/* osd layer position (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXP_YSTART_MASK 0x0fff0000 /* vert start line */
+#define OSD_LXP_YSTART_SHIFT 16 /* vert start line bit shift */
+#define OSD_LXP_XSTART_MASK 0x00000fff /* horizontal start pixel */
+
+/* osd layer size (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXS_YSIZE_MASK 0x0fff0000 /* vert size */
+#define OSD_LXS_YSIZE_SHIFT 16 /* vertical size bit shift */
+#define OSD_LXS_XSIZE_MASK 0x00000fff /* horizontal size of layer */
+
+/* osd software reset */
+#define OSD_RST_RESET (1 << 31)
+
+/**
+ * struct xilinx_osd_layer - Xilinx OSD layer object
+ *
+ * @base: base address
+ * @id: id
+ * @avail: available flag
+ * @osd: osd
+ */
+struct xilinx_osd_layer {
+ void __iomem *base;
+ int id;
+ bool avail;
+ struct xilinx_osd *osd;
+};
+
+/**
+ * struct xilinx_osd - Xilinx OSD object
+ *
+ * @base: base address
+ * @layers: layers
+ * @num_layers: number of layers
+ * @max_width: maximum width
+ * @format: video format
+ */
+struct xilinx_osd {
+ void __iomem *base;
+ struct xilinx_osd_layer *layers[OSD_MAX_NUM_OF_LAYERS];
+ unsigned int num_layers;
+ unsigned int max_width;
+ unsigned int format;
+};
+
+/* osd layer operation */
+/* set layer alpha */
+void xilinx_osd_layer_set_alpha(struct xilinx_osd_layer *layer, u32 alpha)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("alpha: 0x%08x\n", alpha);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_ALPHA_MASK;
+ value |= (alpha << OSD_LXC_ALPHA_SHIFT) & OSD_LXC_ALPHA_MASK;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+void xilinx_osd_layer_enable_alpha(struct xilinx_osd_layer *layer, bool enable)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("enable: %d\n", enable);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value = enable ? (value | OSD_LXC_GALPHAEN) :
+ (value & ~OSD_LXC_GALPHAEN);
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* set layer priority */
+void xilinx_osd_layer_set_priority(struct xilinx_osd_layer *layer, u32 prio)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("prio: %d\n", prio);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_PRIORITY_MASK;
+ value |= (prio << OSD_LXC_PRIORITY_SHIFT) & OSD_LXC_PRIORITY_MASK;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* set layer dimension */
+void xilinx_osd_layer_set_dimension(struct xilinx_osd_layer *layer,
+ u16 xstart, u16 ystart,
+ u16 xsize, u16 ysize)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("w: %d(%d), h: %d(%d)\n",
+ xsize, xstart, ysize, ystart);
+
+ value = xstart & OSD_LXP_XSTART_MASK;
+ value |= (ystart << OSD_LXP_YSTART_SHIFT) & OSD_LXP_YSTART_MASK;
+
+ xilinx_drm_writel(layer->base, OSD_LXP, value);
+
+ value = xsize & OSD_LXS_XSIZE_MASK;
+ value |= (ysize << OSD_LXS_YSIZE_SHIFT) & OSD_LXS_YSIZE_MASK;
+
+ xilinx_drm_writel(layer->base, OSD_LXS, value);
+}
+
+/* enable layer */
+void xilinx_osd_layer_enable(struct xilinx_osd_layer *layer)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value |= OSD_LXC_EN;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* disable layer */
+void xilinx_osd_layer_disable(struct xilinx_osd_layer *layer)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_EN;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* get an available layer */
+struct xilinx_osd_layer *xilinx_osd_layer_get(struct xilinx_osd *osd)
+{
+ struct xilinx_osd_layer *layer = NULL;
+ int i;
+
+ for (i = 0; i < osd->num_layers; i++) {
+ if (osd->layers[i]->avail) {
+ layer = osd->layers[i];
+ layer->avail = false;
+ break;
+ }
+ }
+
+ if (!layer)
+ return ERR_PTR(-ENODEV);
+
+ DRM_DEBUG_DRIVER("layer id: %d\n", i);
+
+ return layer;
+}
+
+/* put a layer */
+void xilinx_osd_layer_put(struct xilinx_osd_layer *layer)
+{
+ layer->avail = true;
+}
+
+/* osd operations */
+/* set osd color */
+void xilinx_osd_set_color(struct xilinx_osd *osd, u8 r, u8 g, u8 b)
+{
+ u32 value;
+
+ value = g;
+ xilinx_drm_writel(osd->base, OSD_BC0, value);
+ value = b;
+ xilinx_drm_writel(osd->base, OSD_BC1, value);
+ value = r;
+ xilinx_drm_writel(osd->base, OSD_BC2, value);
+}
+
+/* set osd dimension */
+void xilinx_osd_set_dimension(struct xilinx_osd *osd, u32 width, u32 height)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("w: %d, h: %d\n", width, height);
+
+ value = width | ((height << OSD_SS_YSIZE_SHIFT) & OSD_SS_YSIZE_MASK);
+ xilinx_drm_writel(osd->base, OSD_SS, value);
+}
+
+/* get osd number of layers */
+unsigned int xilinx_osd_get_num_layers(struct xilinx_osd *osd)
+{
+ return osd->num_layers;
+}
+
+/* get osd max width */
+unsigned int xilinx_osd_get_max_width(struct xilinx_osd *osd)
+{
+ return osd->max_width;
+}
+
+/* get osd color format */
+unsigned int xilinx_osd_get_format(struct xilinx_osd *osd)
+{
+ return osd->format;
+}
+
+/* reset osd */
+void xilinx_osd_reset(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL, OSD_RST_RESET);
+}
+
+/* enable osd */
+void xilinx_osd_enable(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) | OSD_CTL_EN);
+}
+
+/* disable osd */
+void xilinx_osd_disable(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) & ~OSD_CTL_EN);
+}
+
+/* register-update-enable osd */
+void xilinx_osd_enable_rue(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) | OSD_CTL_RUE);
+}
+
+/* register-update-enable osd */
+void xilinx_osd_disable_rue(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) & ~OSD_CTL_RUE);
+}
+
+static const struct of_device_id xilinx_osd_of_match[] = {
+ { .compatible = "xlnx,v-osd-5.01.a" },
+ { /* end of table */ },
+};
+
+struct xilinx_osd *xilinx_osd_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_osd *osd;
+ struct xilinx_osd_layer *layer;
+ const struct of_device_id *match;
+ struct resource res;
+ int i;
+ int ret;
+
+ match = of_match_node(xilinx_osd_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ osd = devm_kzalloc(dev, sizeof(*osd), GFP_KERNEL);
+ if (!osd)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ osd->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(osd->base))
+ return ERR_CAST(osd->base);
+
+ ret = of_property_read_u32(node, "xlnx,num-layers", &osd->num_layers);
+ if (ret) {
+ dev_warn(dev, "failed to get num of layers prop\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,screen-width", &osd->max_width);
+ if (ret) {
+ dev_warn(dev, "failed to get screen width prop\n");
+ return ERR_PTR(ret);
+ }
+
+ /* read the video format set by a user */
+ osd->format = xilinx_drm_readl(osd->base, OSD_ENC) &
+ OSD_VIDEO_FORMAT_MASK;
+
+ for (i = 0; i < osd->num_layers; i++) {
+ layer = devm_kzalloc(dev, sizeof(*layer), GFP_KERNEL);
+ if (!layer)
+ return ERR_PTR(-ENOMEM);
+
+ layer->base = osd->base + OSD_L0C + OSD_LAYER_SIZE * i;
+ layer->id = i;
+ layer->osd = osd;
+ layer->avail = true;
+ osd->layers[i] = layer;
+ }
+
+ return osd;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_osd.h b/drivers/gpu/drm/xilinx/xilinx_osd.h
new file mode 100644
index 000000000000..d84ee9117419
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_osd.h
@@ -0,0 +1,62 @@
+/*
+ * Xilinx OSD Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_OSD_H_
+#define _XILINX_OSD_H_
+
+/* TODO: use the fixed max alpha value for 8 bit component width for now. */
+#define OSD_MAX_ALPHA 0x100
+
+struct xilinx_osd;
+struct xilinx_osd_layer;
+
+/* osd layer configuration */
+void xilinx_osd_layer_set_alpha(struct xilinx_osd_layer *layer, u32 alpha);
+void xilinx_osd_layer_enable_alpha(struct xilinx_osd_layer *layer, bool enable);
+void xilinx_osd_layer_set_priority(struct xilinx_osd_layer *layer, u32 prio);
+void xilinx_osd_layer_set_dimension(struct xilinx_osd_layer *layer,
+ u16 xstart, u16 ystart,
+ u16 xsize, u16 ysize);
+
+/* osd layer operation */
+void xilinx_osd_layer_enable(struct xilinx_osd_layer *layer);
+void xilinx_osd_layer_disable(struct xilinx_osd_layer *layer);
+struct xilinx_osd_layer *xilinx_osd_layer_get(struct xilinx_osd *osd);
+void xilinx_osd_layer_put(struct xilinx_osd_layer *layer);
+
+/* osd configuration */
+void xilinx_osd_set_color(struct xilinx_osd *osd, u8 r, u8 g, u8 b);
+void xilinx_osd_set_dimension(struct xilinx_osd *osd, u32 width, u32 height);
+
+unsigned int xilinx_osd_get_num_layers(struct xilinx_osd *osd);
+unsigned int xilinx_osd_get_max_width(struct xilinx_osd *osd);
+unsigned int xilinx_osd_get_format(struct xilinx_osd *osd);
+
+/* osd operation */
+void xilinx_osd_reset(struct xilinx_osd *osd);
+void xilinx_osd_enable(struct xilinx_osd *osd);
+void xilinx_osd_disable(struct xilinx_osd *osd);
+void xilinx_osd_enable_rue(struct xilinx_osd *osd);
+void xilinx_osd_disable_rue(struct xilinx_osd *osd);
+
+struct device;
+struct device_node;
+
+struct xilinx_osd *xilinx_osd_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_OSD_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c
new file mode 100644
index 000000000000..2d3400456cb0
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c
@@ -0,0 +1,119 @@
+/*
+ * Xilinx rgb to yuv converter support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_rgb2yuv.h"
+
+/* registers */
+/* control register */
+#define RGB_CONTROL 0x000
+/* active size v,h */
+#define RGB_ACTIVE_SIZE 0x020
+
+/* control register bit definition */
+#define RGB_CTL_EN (1 << 0) /* enable */
+#define RGB_CTL_RUE (1 << 1) /* register update enable */
+#define RGB_RST_RESET (1 << 31) /* instant reset */
+
+struct xilinx_rgb2yuv {
+ void __iomem *base;
+};
+
+/* enable rgb2yuv */
+void xilinx_rgb2yuv_enable(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg | RGB_CTL_EN);
+}
+
+/* disable rgb2yuv */
+void xilinx_rgb2yuv_disable(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg & ~RGB_CTL_EN);
+}
+
+/* configure rgb2yuv */
+void xilinx_rgb2yuv_configure(struct xilinx_rgb2yuv *rgb2yuv,
+ int hactive, int vactive)
+{
+ xilinx_drm_writel(rgb2yuv->base, RGB_ACTIVE_SIZE,
+ (vactive << 16) | hactive);
+}
+
+/* reset rgb2yuv */
+void xilinx_rgb2yuv_reset(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, RGB_RST_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg | RGB_CTL_RUE);
+}
+
+static const struct of_device_id xilinx_rgb2yuv_of_match[] = {
+ { .compatible = "xlnx,v-rgb2ycrcb-6.01.a" },
+ { /* end of table */ },
+};
+
+/* probe rgb2yuv */
+struct xilinx_rgb2yuv *xilinx_rgb2yuv_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_rgb2yuv *rgb2yuv;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_rgb2yuv_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ rgb2yuv = devm_kzalloc(dev, sizeof(*rgb2yuv), GFP_KERNEL);
+ if (!rgb2yuv)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ rgb2yuv->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(rgb2yuv->base))
+ return ERR_CAST(rgb2yuv->base);
+
+ xilinx_rgb2yuv_reset(rgb2yuv);
+
+ return rgb2yuv;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h
new file mode 100644
index 000000000000..d1e544ac336b
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h
@@ -0,0 +1,35 @@
+/*
+ * Color Space Converter Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_RGB2YUV_H_
+#define _XILINX_RGB2YUV_H_
+
+struct xilinx_rgb2yuv;
+
+void xilinx_rgb2yuv_configure(struct xilinx_rgb2yuv *rgb2yuv,
+ int hactive, int vactive);
+void xilinx_rgb2yuv_reset(struct xilinx_rgb2yuv *rgb2yuv);
+void xilinx_rgb2yuv_enable(struct xilinx_rgb2yuv *rgb2yuv);
+void xilinx_rgb2yuv_disable(struct xilinx_rgb2yuv *rgb2yuv);
+
+struct device;
+struct device_node;
+
+struct xilinx_rgb2yuv *xilinx_rgb2yuv_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_RGB2YUV_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_vtc.c b/drivers/gpu/drm/xilinx/xilinx_vtc.c
new file mode 100644
index 000000000000..67a7e4fa14bf
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_vtc.c
@@ -0,0 +1,645 @@
+/*
+ * Video Timing Controller support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include <video/videomode.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_vtc.h"
+
+/* register offsets */
+#define VTC_CTL 0x000 /* control */
+#define VTC_STATS 0x004 /* status */
+#define VTC_ERROR 0x008 /* error */
+
+#define VTC_GASIZE 0x060 /* generator active size */
+#define VTC_GENC 0x068 /* generator encoding */
+#define VTC_GPOL 0x06c /* generator polarity */
+#define VTC_GHSIZE 0x070 /* generator frame horizontal size */
+#define VTC_GVSIZE 0x074 /* generator frame vertical size */
+#define VTC_GHSYNC 0x078 /* generator horizontal sync */
+#define VTC_GVBHOFF_F0 0x07c /* generator Field 0 vblank horizontal offset */
+#define VTC_GVSYNC_F0 0x080 /* generator Field 0 vertical sync */
+#define VTC_GVSHOFF_F0 0x084 /* generator Field 0 vsync horizontal offset */
+#define VTC_GVBHOFF_F1 0x088 /* generator Field 1 vblank horizontal offset */
+#define VTC_GVSYNC_F1 0x08C /* generator Field 1 vertical sync */
+#define VTC_GVSHOFF_F1 0x090 /* generator Field 1 vsync horizontal offset */
+
+#define VTC_RESET 0x000 /* reset register */
+#define VTC_ISR 0x004 /* interrupt status register */
+#define VTC_IER 0x00c /* interrupt enable register */
+
+/* control register bit */
+#define VTC_CTL_FIP (1 << 6) /* field id output polarity */
+#define VTC_CTL_ACP (1 << 5) /* active chroma output polarity */
+#define VTC_CTL_AVP (1 << 4) /* active video output polarity */
+#define VTC_CTL_HSP (1 << 3) /* hori sync output polarity */
+#define VTC_CTL_VSP (1 << 2) /* vert sync output polarity */
+#define VTC_CTL_HBP (1 << 1) /* hori blank output polarity */
+#define VTC_CTL_VBP (1 << 0) /* vert blank output polarity */
+
+#define VTC_CTL_FIPSS (1 << 26) /* field id output polarity source */
+#define VTC_CTL_ACPSS (1 << 25) /* active chroma out polarity source */
+#define VTC_CTL_AVPSS (1 << 24) /* active video out polarity source */
+#define VTC_CTL_HSPSS (1 << 23) /* hori sync out polarity source */
+#define VTC_CTL_VSPSS (1 << 22) /* vert sync out polarity source */
+#define VTC_CTL_HBPSS (1 << 21) /* hori blank out polarity source */
+#define VTC_CTL_VBPSS (1 << 20) /* vert blank out polarity source */
+
+#define VTC_CTL_VCSS (1 << 18) /* chroma source select */
+#define VTC_CTL_VASS (1 << 17) /* vertical offset source select */
+#define VTC_CTL_VBSS (1 << 16) /* vertical sync end source select */
+#define VTC_CTL_VSSS (1 << 15) /* vertical sync start source select */
+#define VTC_CTL_VFSS (1 << 14) /* vertical active size source select */
+#define VTC_CTL_VTSS (1 << 13) /* vertical frame size source select */
+
+#define VTC_CTL_HBSS (1 << 11) /* horiz sync end source select */
+#define VTC_CTL_HSSS (1 << 10) /* horiz sync start source select */
+#define VTC_CTL_HFSS (1 << 9) /* horiz active size source select */
+#define VTC_CTL_HTSS (1 << 8) /* horiz frame size source select */
+
+#define VTC_CTL_GE (1 << 2) /* vtc generator enable */
+#define VTC_CTL_RU (1 << 1) /* vtc register update */
+
+/* vtc generator horizontal 1 */
+#define VTC_GH1_BPSTART_MASK 0x1fff0000 /* horiz back porch start */
+#define VTC_GH1_BPSTART_SHIFT 16
+#define VTC_GH1_SYNCSTART_MASK 0x00001fff
+
+/* vtc generator vertical 1 (filed 0) */
+#define VTC_GV1_BPSTART_MASK 0x1fff0000 /* vertical back porch start */
+#define VTC_GV1_BPSTART_SHIFT 16
+#define VTC_GV1_SYNCSTART_MASK 0x00001fff
+
+/* vtc generator/detector vblank/vsync horizontal offset registers */
+#define VTC_XVXHOX_HEND_MASK 0x1fff0000 /* horiz offset end */
+#define VTC_XVXHOX_HEND_SHIFT 16 /* horiz offset end shift */
+#define VTC_XVXHOX_HSTART_MASK 0x00001fff /* horiz offset start */
+
+/* reset register bit definition */
+#define VTC_RESET_RESET (1 << 31) /* Software Reset */
+
+/* interrupt status/enable register bit definition */
+#define VTC_IXR_FSYNC15 (1 << 31) /* frame sync interrupt 15 */
+#define VTC_IXR_FSYNC14 (1 << 30) /* frame sync interrupt 14 */
+#define VTC_IXR_FSYNC13 (1 << 29) /* frame sync interrupt 13 */
+#define VTC_IXR_FSYNC12 (1 << 28) /* frame sync interrupt 12 */
+#define VTC_IXR_FSYNC11 (1 << 27) /* frame sync interrupt 11 */
+#define VTC_IXR_FSYNC10 (1 << 26) /* frame sync interrupt 10 */
+#define VTC_IXR_FSYNC09 (1 << 25) /* frame sync interrupt 09 */
+#define VTC_IXR_FSYNC08 (1 << 24) /* frame sync interrupt 08 */
+#define VTC_IXR_FSYNC07 (1 << 23) /* frame sync interrupt 07 */
+#define VTC_IXR_FSYNC06 (1 << 22) /* frame sync interrupt 06 */
+#define VTC_IXR_FSYNC05 (1 << 21) /* frame sync interrupt 05 */
+#define VTC_IXR_FSYNC04 (1 << 20) /* frame sync interrupt 04 */
+#define VTC_IXR_FSYNC03 (1 << 19) /* frame sync interrupt 03 */
+#define VTC_IXR_FSYNC02 (1 << 18) /* frame sync interrupt 02 */
+#define VTC_IXR_FSYNC01 (1 << 17) /* frame sync interrupt 01 */
+#define VTC_IXR_FSYNC00 (1 << 16) /* frame sync interrupt 00 */
+#define VTC_IXR_FSYNCALL_MASK (VTC_IXR_FSYNC00 | \
+ VTC_IXR_FSYNC01 | \
+ VTC_IXR_FSYNC02 | \
+ VTC_IXR_FSYNC03 | \
+ VTC_IXR_FSYNC04 | \
+ VTC_IXR_FSYNC05 | \
+ VTC_IXR_FSYNC06 | \
+ VTC_IXR_FSYNC07 | \
+ VTC_IXR_FSYNC08 | \
+ VTC_IXR_FSYNC09 | \
+ VTC_IXR_FSYNC10 | \
+ VTC_IXR_FSYNC11 | \
+ VTC_IXR_FSYNC12 | \
+ VTC_IXR_FSYNC13 | \
+ VTC_IXR_FSYNC14 | \
+ VTC_IXR_FSYNC15)
+
+#define VTC_IXR_G_AV (1 << 13) /* generator actv video intr */
+#define VTC_IXR_G_VBLANK (1 << 12) /* generator vblank interrupt */
+#define VTC_IXR_G_ALL_MASK (VTC_IXR_G_AV | \
+ VTC_IXR_G_VBLANK) /* all generator intr */
+
+#define VTC_IXR_D_AV (1 << 11) /* detector active video intr */
+#define VTC_IXR_D_VBLANK (1 << 10) /* detector vblank interrupt */
+#define VTC_IXR_D_ALL_MASK (VTC_IXR_D_AV | \
+ VTC_IXR_D_VBLANK) /* all detector intr */
+
+#define VTC_IXR_LOL (1 << 9) /* lock loss */
+#define VTC_IXR_LO (1 << 8) /* lock */
+#define VTC_IXR_LOCKALL_MASK (VTC_IXR_LOL | \
+ VTC_IXR_LO) /* all signal lock intr */
+
+#define VTC_IXR_ACL (1 << 21) /* active chroma signal lock */
+#define VTC_IXR_AVL (1 << 20) /* active video signal lock */
+#define VTC_IXR_HSL (1 << 19) /* horizontal sync signal lock */
+#define VTC_IXR_VSL (1 << 18) /* vertical sync signal lock */
+#define VTC_IXR_HBL (1 << 17) /* horizontal blank signal lock */
+#define VTC_IXR_VBL (1 << 16) /* vertical blank signal lock */
+
+#define VTC_GENC_INTERL BIT(6) /* Interlaced bit in VTC_GENC */
+/* mask for all interrupts */
+#define VTC_IXR_ALLINTR_MASK (VTC_IXR_FSYNCALL_MASK | \
+ VTC_IXR_G_ALL_MASK | \
+ VTC_IXR_D_ALL_MASK | \
+ VTC_IXR_LOCKALL_MASK)
+/**
+ * struct xilinx_vtc - Xilinx VTC object
+ *
+ * @base: base addr
+ * @irq: irq
+ * @vblank_fn: vblank handler func
+ * @vblank_data: vblank handler private data
+ */
+struct xilinx_vtc {
+ void __iomem *base;
+ int irq;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+};
+
+/**
+ * struct xilinx_vtc_polarity - vtc polarity config
+ *
+ * @active_chroma: active chroma polarity
+ * @active_video: active video polarity
+ * @field_id: field ID polarity
+ * @vblank: vblank polarity
+ * @vsync: vsync polarity
+ * @hblank: hblank polarity
+ * @hsync: hsync polarity
+ */
+struct xilinx_vtc_polarity {
+ u8 active_chroma;
+ u8 active_video;
+ u8 field_id;
+ u8 vblank;
+ u8 vsync;
+ u8 hblank;
+ u8 hsync;
+};
+
+/**
+ * struct xilinx_vtc_hori_offset - vtc horizontal offset config
+ *
+ * @v0blank_hori_start: vblank horizontal start (field 0)
+ * @v0blank_hori_end: vblank horizontal end (field 0)
+ * @v0sync_hori_start: vsync horizontal start (field 0)
+ * @v0sync_hori_end: vsync horizontal end (field 0)
+ * @v1blank_hori_start: vblank horizontal start (field 1)
+ * @v1blank_hori_end: vblank horizontal end (field 1)
+ * @v1sync_hori_start: vsync horizontal start (field 1)
+ * @v1sync_hori_end: vsync horizontal end (field 1)
+ */
+struct xilinx_vtc_hori_offset {
+ u16 v0blank_hori_start;
+ u16 v0blank_hori_end;
+ u16 v0sync_hori_start;
+ u16 v0sync_hori_end;
+ u16 v1blank_hori_start;
+ u16 v1blank_hori_end;
+ u16 v1sync_hori_start;
+ u16 v1sync_hori_end;
+};
+
+/**
+ * struct xilinx_vtc_src_config - vtc source config
+ *
+ * @field_id_pol: filed id polarity source
+ * @active_chroma_pol: active chroma polarity source
+ * @active_video_pol: active video polarity source
+ * @hsync_pol: hsync polarity source
+ * @vsync_pol: vsync polarity source
+ * @hblank_pol: hblnak polarity source
+ * @vblank_pol: vblank polarity source
+ * @vchroma: vchroma polarity start source
+ * @vactive: vactive size source
+ * @vbackporch: vbackporch start source
+ * @vsync: vsync start source
+ * @vfrontporch: vfrontporch start source
+ * @vtotal: vtotal size source
+ * @hactive: hactive start source
+ * @hbackporch: hbackporch start source
+ * @hsync: hsync start source
+ * @hfrontporch: hfrontporch start source
+ * @htotal: htotal size source
+ */
+struct xilinx_vtc_src_config {
+ u8 field_id_pol;
+ u8 active_chroma_pol;
+ u8 active_video_pol;
+ u8 hsync_pol;
+ u8 vsync_pol;
+ u8 hblank_pol;
+ u8 vblank_pol;
+
+ u8 vchroma;
+ u8 vactive;
+ u8 vbackporch;
+ u8 vsync;
+ u8 vfrontporch;
+ u8 vtotal;
+
+ u8 hactive;
+ u8 hbackporch;
+ u8 hsync;
+ u8 hfrontporch;
+ u8 htotal;
+};
+
+/* configure polarity of signals */
+static void xilinx_vtc_config_polarity(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_polarity *polarity)
+{
+ u32 reg = 0;
+
+ if (polarity->active_chroma)
+ reg |= VTC_CTL_ACP;
+ if (polarity->active_video)
+ reg |= VTC_CTL_AVP;
+ if (polarity->field_id)
+ reg |= VTC_CTL_FIP;
+ if (polarity->vblank)
+ reg |= VTC_CTL_VBP;
+ if (polarity->vsync)
+ reg |= VTC_CTL_VSP;
+ if (polarity->hblank)
+ reg |= VTC_CTL_HBP;
+ if (polarity->hsync)
+ reg |= VTC_CTL_HSP;
+
+ xilinx_drm_writel(vtc->base, VTC_GPOL, reg);
+}
+
+/* configure horizontal offset */
+static void
+xilinx_vtc_config_hori_offset(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_hori_offset *hori_offset)
+{
+ u32 reg;
+
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hori_offset->v0blank_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v0blank_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVBHOFF_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hori_offset->v0sync_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v0sync_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSHOFF_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ reg = hori_offset->v1blank_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v1blank_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVBHOFF_F1, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ reg = hori_offset->v1sync_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v1sync_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSHOFF_F1, reg);
+
+}
+
+/* configure source */
+static void xilinx_vtc_config_src(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_src_config *src_config)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+
+ if (src_config->field_id_pol)
+ reg |= VTC_CTL_FIPSS;
+ if (src_config->active_chroma_pol)
+ reg |= VTC_CTL_ACPSS;
+ if (src_config->active_video_pol)
+ reg |= VTC_CTL_AVPSS;
+ if (src_config->hsync_pol)
+ reg |= VTC_CTL_HSPSS;
+ if (src_config->vsync_pol)
+ reg |= VTC_CTL_VSPSS;
+ if (src_config->hblank_pol)
+ reg |= VTC_CTL_HBPSS;
+ if (src_config->vblank_pol)
+ reg |= VTC_CTL_VBPSS;
+
+ if (src_config->vchroma)
+ reg |= VTC_CTL_VCSS;
+ if (src_config->vactive)
+ reg |= VTC_CTL_VASS;
+ if (src_config->vbackporch)
+ reg |= VTC_CTL_VBSS;
+ if (src_config->vsync)
+ reg |= VTC_CTL_VSSS;
+ if (src_config->vfrontporch)
+ reg |= VTC_CTL_VFSS;
+ if (src_config->vtotal)
+ reg |= VTC_CTL_VTSS;
+
+ if (src_config->hbackporch)
+ reg |= VTC_CTL_HBSS;
+ if (src_config->hsync)
+ reg |= VTC_CTL_HSSS;
+ if (src_config->hfrontporch)
+ reg |= VTC_CTL_HFSS;
+ if (src_config->htotal)
+ reg |= VTC_CTL_HTSS;
+
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg);
+}
+
+/* enable vtc */
+void xilinx_vtc_enable(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ /* enable a generator only for now */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_GE);
+}
+
+/* disable vtc */
+void xilinx_vtc_disable(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ /* disable a generator only for now */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg & ~VTC_CTL_GE);
+}
+
+/* configure vtc signals */
+void xilinx_vtc_config_sig(struct xilinx_vtc *vtc,
+ struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xilinx_vtc_hori_offset hori_offset;
+ struct xilinx_vtc_polarity polarity;
+ struct xilinx_vtc_src_config src;
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg & ~VTC_CTL_RU);
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ reg = htotal & 0x1fff;
+ xilinx_drm_writel(vtc->base, VTC_GHSIZE, reg);
+
+ reg = vtotal & 0x1fff;
+ reg |= reg << VTC_GV1_BPSTART_SHIFT;
+ xilinx_drm_writel(vtc->base, VTC_GVSIZE, reg);
+
+ DRM_DEBUG_DRIVER("ht: %d, vt: %d\n", htotal, vtotal);
+
+ reg = hactive & 0x1fff;
+ reg |= (vactive & 0x1fff) << 16;
+ xilinx_drm_writel(vtc->base, VTC_GASIZE, reg);
+
+ DRM_DEBUG_DRIVER("ha: %d, va: %d\n", hactive, vactive);
+
+ reg = hsync_start & VTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << VTC_GH1_BPSTART_SHIFT) &
+ VTC_GH1_BPSTART_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GHSYNC, reg);
+
+ DRM_DEBUG_DRIVER("hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+
+ reg = vsync_start & VTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << VTC_GV1_BPSTART_SHIFT) &
+ VTC_GV1_BPSTART_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSYNC_F0, reg);
+ DRM_DEBUG_DRIVER("vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+
+ hori_offset.v0blank_hori_start = hactive;
+ hori_offset.v0blank_hori_end = hactive;
+ hori_offset.v0sync_hori_start = hsync_start;
+ hori_offset.v0sync_hori_end = hsync_start;
+
+ hori_offset.v1blank_hori_start = hactive;
+ hori_offset.v1blank_hori_end = hactive;
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ hori_offset.v1sync_hori_start = hsync_start - (htotal / 2);
+ hori_offset.v1sync_hori_end = hsync_start - (htotal / 2);
+ xilinx_drm_writel(vtc->base, VTC_GVSYNC_F1, reg);
+ reg = xilinx_drm_readl(vtc->base, VTC_GENC) | VTC_GENC_INTERL;
+ xilinx_drm_writel(vtc->base, VTC_GENC, reg);
+ } else {
+ hori_offset.v1sync_hori_start = hsync_start;
+ hori_offset.v1sync_hori_end = hsync_start;
+ reg = xilinx_drm_readl(vtc->base, VTC_GENC) & ~VTC_GENC_INTERL;
+ xilinx_drm_writel(vtc->base, VTC_GENC, reg);
+ }
+
+ xilinx_vtc_config_hori_offset(vtc, &hori_offset);
+ /* set up polarity */
+ memset(&polarity, 0x0, sizeof(polarity));
+ polarity.hsync = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vsync = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.hblank = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vblank = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.active_video = 1;
+ polarity.active_chroma = 1;
+ polarity.field_id = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
+ xilinx_vtc_config_polarity(vtc, &polarity);
+
+ /* set up src config */
+ memset(&src, 0x0, sizeof(src));
+ src.vchroma = 1;
+ src.vactive = 1;
+ src.vbackporch = 1;
+ src.vsync = 1;
+ src.vfrontporch = 1;
+ src.vtotal = 1;
+ src.hactive = 1;
+ src.hbackporch = 1;
+ src.hsync = 1;
+ src.hfrontporch = 1;
+ src.htotal = 1;
+ xilinx_vtc_config_src(vtc, &src);
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_RU);
+}
+
+/* reset vtc */
+void xilinx_vtc_reset(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ xilinx_drm_writel(vtc->base, VTC_RESET, VTC_RESET_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_RU);
+}
+
+/* enable vblank interrupt */
+void xilinx_vtc_vblank_enable(struct xilinx_vtc *vtc)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, VTC_IXR_G_VBLANK |
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_vblank_enable);
+
+/* enable interrupt */
+static inline void xilinx_vtc_intr_enable(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, (intr & VTC_IXR_ALLINTR_MASK) |
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+
+/* disable interrupt */
+static inline void xilinx_vtc_intr_disable(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, ~(intr & VTC_IXR_ALLINTR_MASK) &
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+
+/* disable vblank interrupt */
+void xilinx_vtc_vblank_disable(struct xilinx_vtc *vtc)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, ~(VTC_IXR_G_VBLANK) &
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_vblank_disable);
+
+/* get interrupt */
+u32 xilinx_vtc_intr_get(struct xilinx_vtc *vtc)
+{
+ return xilinx_drm_readl(vtc->base, VTC_IER) &
+ xilinx_drm_readl(vtc->base, VTC_ISR) & VTC_IXR_ALLINTR_MASK;
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_intr_get);
+
+/* clear interrupt */
+void xilinx_vtc_intr_clear(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_ISR, intr & VTC_IXR_ALLINTR_MASK);
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_intr_clear);
+
+/* interrupt handler */
+static irqreturn_t xilinx_vtc_intr_handler(int irq, void *data)
+{
+ struct xilinx_vtc *vtc = data;
+
+ u32 intr = xilinx_vtc_intr_get(vtc);
+
+ if (!intr)
+ return IRQ_NONE;
+
+ if ((intr & VTC_IXR_G_VBLANK) && (vtc->vblank_fn))
+ vtc->vblank_fn(vtc->vblank_data);
+
+ xilinx_vtc_intr_clear(vtc, intr);
+
+ return IRQ_HANDLED;
+}
+
+/* enable vblank interrupt */
+void xilinx_vtc_enable_vblank_intr(struct xilinx_vtc *vtc,
+ void (*vblank_fn)(void *),
+ void *vblank_priv)
+{
+ vtc->vblank_fn = vblank_fn;
+ vtc->vblank_data = vblank_priv;
+ xilinx_vtc_intr_enable(vtc, VTC_IXR_G_VBLANK);
+}
+
+/* disable vblank interrupt */
+void xilinx_vtc_disable_vblank_intr(struct xilinx_vtc *vtc)
+{
+ xilinx_vtc_intr_disable(vtc, VTC_IXR_G_VBLANK);
+ vtc->vblank_data = NULL;
+ vtc->vblank_fn = NULL;
+}
+
+static const struct of_device_id xilinx_vtc_of_match[] = {
+ { .compatible = "xlnx,v-tc-5.01.a" },
+ { /* end of table */ },
+};
+
+/* probe vtc */
+struct xilinx_vtc *xilinx_vtc_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_vtc *vtc;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_vtc_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ vtc = devm_kzalloc(dev, sizeof(*vtc), GFP_KERNEL);
+ if (!vtc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ vtc->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(vtc->base))
+ return ERR_CAST(vtc->base);
+
+ xilinx_vtc_intr_disable(vtc, VTC_IXR_ALLINTR_MASK);
+ vtc->irq = irq_of_parse_and_map(node, 0);
+ if (vtc->irq > 0) {
+ ret = devm_request_irq(dev, vtc->irq, xilinx_vtc_intr_handler,
+ IRQF_SHARED, "xilinx_vtc", vtc);
+ if (ret) {
+ dev_warn(dev, "failed to requet_irq() for vtc\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ xilinx_vtc_reset(vtc);
+
+ return vtc;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_vtc.h b/drivers/gpu/drm/xilinx/xilinx_vtc.h
new file mode 100644
index 000000000000..33b4eb43513d
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_vtc.h
@@ -0,0 +1,44 @@
+/*
+ * Video Timing Controller Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_VTC_H_
+#define _XILINX_VTC_H_
+
+struct xilinx_vtc;
+
+struct videomode;
+
+void xilinx_vtc_config_sig(struct xilinx_vtc *vtc,
+ struct videomode *vm);
+void xilinx_vtc_enable_vblank_intr(struct xilinx_vtc *vtc,
+ void (*fn)(void *), void *data);
+void xilinx_vtc_disable_vblank_intr(struct xilinx_vtc *vtc);
+void xilinx_vtc_reset(struct xilinx_vtc *vtc);
+void xilinx_vtc_enable(struct xilinx_vtc *vtc);
+void xilinx_vtc_disable(struct xilinx_vtc *vtc);
+
+struct device;
+struct device_node;
+
+struct xilinx_vtc *xilinx_vtc_probe(struct device *dev,
+ struct device_node *node);
+void xilinx_vtc_vblank_enable(struct xilinx_vtc *vtc);
+void xilinx_vtc_vblank_disable(struct xilinx_vtc *vtc);
+u32 xilinx_vtc_intr_get(struct xilinx_vtc *vtc);
+void xilinx_vtc_intr_clear(struct xilinx_vtc *vtc, u32 intr);
+
+#endif /* _XILINX_VTC_H_ */
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
new file mode 100644
index 000000000000..ecc5f90caf61
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -0,0 +1,106 @@
+config DRM_XLNX
+ tristate "Xilinx DRM KMS Driver"
+ depends on DRM && OF
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Xilinx DRM KMS driver. Choose this option if you have
+ a Xilinx SoCs with hardened display pipeline or soft
+ display pipeline using Xilinx IPs in FPGA. This module
+ provides the kernel mode setting functionalities
+ for Xilinx display drivers.
+
+config DRM_XLNX_BRIDGE
+ tristate "Xilinx DRM KMS bridge"
+ depends on DRM_XLNX
+ help
+ Xilinx DRM KMS bridge. This module provides some interfaces
+ to enable inter-module communication. Choose this option
+ from the provider driver when the Xilinx bridge interface is
+ needed.
+
+config DRM_XLNX_BRIDGE_DEBUG_FS
+ bool "Xilinx DRM KMS bridge debugfs"
+ depends on DEBUG_FS && DRM_XLNX_BRIDGE
+ help
+ Enable the debugfs code for Xilinx bridge. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_ZYNQMP_DPSUB
+ tristate "ZynqMP DP Subsystem Driver"
+ depends on ARCH_ZYNQMP && OF && DRM_XLNX && COMMON_CLK
+ select DMADEVICES
+ select XILINX_DMA_ENGINES
+ select XILINX_DPDMA
+ select PHY_XILINX_ZYNQMP
+ help
+ DRM KMS driver for ZynqMP DP Subsystem controller. Choose
+ this option if you have a Xilinx ZynqMP SoC with DisplayPort
+ subsystem. The driver provides the kernel mode setting
+ functionlaities for ZynqMP DP subsystem.
+
+config DRM_XLNX_DSI
+ tristate "Xilinx DRM DSI Subsystem Driver"
+ depends on DRM_XLNX
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ select DRM_PANEL_SIMPLE
+ help
+ DRM driver for Xilinx MIPI-DSI.
+
+config DRM_XLNX_MIXER
+ tristate "Xilinx DRM Mixer Driver"
+ depends on DRM_XLNX
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Xilinx Mixer driver
+
+config DRM_XLNX_PL_DISP
+ tristate "Xilinx DRM PL display driver"
+ depends on DRM_XLNX
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Xilinx PL display driver, provides drm
+ crtc and plane object to display pipeline. You need to
+ choose this option if your display pipeline needs one
+ crtc and plane object with single DMA connected.
+
+config DRM_XLNX_SDI
+ tristate "Xilinx DRM SDI Subsystem Driver"
+ depends on DRM_XLNX
+ help
+ DRM driver for Xilinx SDI Tx Subsystem.
+
+config DRM_XLNX_BRIDGE_CSC
+ tristate "Xilinx DRM CSC Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for color space converter of VPSS. Choose
+ this option if color space converter is connected to an encoder.
+ The driver provides set/get resolution and color format
+ functionality through bridge layer.
+
+config DRM_XLNX_BRIDGE_SCALER
+ tristate "Xilinx DRM Scaler Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for scaler of VPSS. Choose this option
+ if scaler is connected to an encoder. The driver provides
+ upscaling, down scaling and no scaling functionality through
+ bridge layer.
+
+config DRM_XLNX_BRIDGE_VTC
+ tristate "Xilinx DRM VTC Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for Xilinx Video Timing Controller. Choose
+ this option to make VTC a part of the CRTC in display pipeline.
+ Currently the support is added to the Xilinx Video Mixer and
+ Xilinx PL display CRTC drivers. This driver provides ability
+ to generate timings through the bridge layer.
diff --git a/drivers/gpu/drm/xlnx/Makefile b/drivers/gpu/drm/xlnx/Makefile
new file mode 100644
index 000000000000..1d80be7d3e70
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/Makefile
@@ -0,0 +1,21 @@
+xlnx_drm-objs += xlnx_crtc.o xlnx_drv.o xlnx_fb.o xlnx_gem.o
+xlnx_drm-$(CONFIG_DRM_XLNX_BRIDGE) += xlnx_bridge.o
+obj-$(CONFIG_DRM_XLNX) += xlnx_drm.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_CSC) += xlnx_csc.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_SCALER) += xlnx_scaler.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_VTC) += xlnx_vtc.o
+
+obj-$(CONFIG_DRM_XLNX_DSI) += xlnx_dsi.o
+
+obj-$(CONFIG_DRM_XLNX_MIXER) += xlnx_mixer.o
+
+obj-$(CONFIG_DRM_XLNX_PL_DISP) += xlnx_pl_disp.o
+
+xlnx-sdi-objs += xlnx_sdi.o xlnx_sdi_timing.o
+obj-$(CONFIG_DRM_XLNX_SDI) += xlnx-sdi.o
+
+zynqmp-dpsub-objs += zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o
+obj-$(CONFIG_DRM_ZYNQMP_DPSUB) += zynqmp-dpsub.o
diff --git a/drivers/gpu/drm/xlnx/xlnx_bridge.c b/drivers/gpu/drm/xlnx/xlnx_bridge.c
new file mode 100644
index 000000000000..6ee462ada676
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_bridge.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM bridge driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/list.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * Similar to drm bridge, but this can be used by any DRM driver. There
+ * is no limitation to be used by non DRM drivers as well. No complex topology
+ * is modeled, thus it's assumed that the Xilinx bridge device is directly
+ * attached to client. The client should call Xilinx bridge functions explicitly
+ * where it's needed, as opposed to drm bridge functions which are called
+ * implicitly by DRM core.
+ * One Xlnx bridge can be owned by one driver at a time.
+ */
+
+/**
+ * struct xlnx_bridge_helper - Xilinx bridge helper
+ * @xlnx_bridges: list of Xilinx bridges
+ * @lock: lock to protect @xlnx_crtcs
+ * @refcnt: reference count
+ * @error: flag if in error state
+ */
+struct xlnx_bridge_helper {
+ struct list_head xlnx_bridges;
+ struct mutex lock; /* lock for @xlnx_bridges */
+ unsigned int refcnt;
+ bool error;
+};
+
+static struct xlnx_bridge_helper helper;
+
+struct videomode;
+/*
+ * Client functions
+ */
+
+/**
+ * xlnx_bridge_enable - Enable the bridge
+ * @bridge: bridge to enable
+ *
+ * Enable bridge.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_enable(struct xlnx_bridge *bridge)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->enable)
+ return bridge->enable(bridge);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_enable);
+
+/**
+ * xlnx_bridge_disable - Disable the bridge
+ * @bridge: bridge to disable
+ *
+ * Disable bridge.
+ */
+void xlnx_bridge_disable(struct xlnx_bridge *bridge)
+{
+ if (!bridge)
+ return;
+
+ if (helper.error)
+ return;
+
+ if (bridge->disable)
+ bridge->disable(bridge);
+}
+EXPORT_SYMBOL(xlnx_bridge_disable);
+
+/**
+ * xlnx_bridge_set_input - Set the input of @bridge
+ * @bridge: bridge to set
+ * @width: width
+ * @height: height
+ * @bus_fmt: bus format (ex, MEDIA_BUS_FMT_*);
+ *
+ * Set the bridge input with height / width / format.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_input)
+ return bridge->set_input(bridge, width, height, bus_fmt);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_input);
+
+/**
+ * xlnx_bridge_get_input_fmts - Get the supported input formats
+ * @bridge: bridge to set
+ * @fmts: pointer to formats
+ * @count: pointer to format count
+ *
+ * Get the list of supported input bus formats.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->get_input_fmts)
+ return bridge->get_input_fmts(bridge, fmts, count);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_get_input_fmts);
+
+/**
+ * xlnx_bridge_set_output - Set the output of @bridge
+ * @bridge: bridge to set
+ * @width: width
+ * @height: height
+ * @bus_fmt: bus format (ex, MEDIA_BUS_FMT_*);
+ *
+ * Set the bridge output with height / width / format.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_output)
+ return bridge->set_output(bridge, width, height, bus_fmt);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_output);
+
+/**
+ * xlnx_bridge_get_output_fmts - Get the supported output formats
+ * @bridge: bridge to set
+ * @fmts: pointer to formats
+ * @count: pointer to format count
+ *
+ * Get the list of supported output bus formats.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->get_output_fmts)
+ return bridge->get_output_fmts(bridge, fmts, count);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_get_output_fmts);
+
+/**
+ * xlnx_bridge_set_timing - Set the video timing
+ * @bridge: bridge to set
+ * @vm: Videomode
+ *
+ * Set the video mode so that timing can be generated using this
+ * by the video timing controller.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_timing(struct xlnx_bridge *bridge, struct videomode *vm)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_timing) {
+ bridge->set_timing(bridge, vm);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_timing);
+
+/**
+ * of_xlnx_bridge_get - Get the corresponding Xlnx bridge instance
+ * @bridge_np: The device node of the bridge device
+ *
+ * The function walks through the Xlnx bridge list of @drm, and return
+ * if any registered bridge matches the device node. The returned
+ * bridge will not be accesible by others.
+ *
+ * Return: the matching Xlnx bridge instance, or NULL
+ */
+struct xlnx_bridge *of_xlnx_bridge_get(struct device_node *bridge_np)
+{
+ struct xlnx_bridge *found = NULL;
+ struct xlnx_bridge *bridge;
+
+ if (helper.error)
+ return NULL;
+
+ mutex_lock(&helper.lock);
+ list_for_each_entry(bridge, &helper.xlnx_bridges, list) {
+ if (bridge->of_node == bridge_np && !bridge->owned) {
+ found = bridge;
+ bridge->owned = true;
+ break;
+ }
+ }
+ mutex_unlock(&helper.lock);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(of_xlnx_bridge_get);
+
+/**
+ * of_xlnx_bridge_put - Put the Xlnx bridge instance
+ * @bridge: Xlnx bridge instance to release
+ *
+ * Return the @bridge. After this, the bridge will be available for
+ * other drivers to use.
+ */
+void of_xlnx_bridge_put(struct xlnx_bridge *bridge)
+{
+ if (WARN_ON(helper.error))
+ return;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(!bridge->owned);
+ bridge->owned = false;
+ mutex_unlock(&helper.lock);
+}
+EXPORT_SYMBOL_GPL(of_xlnx_bridge_put);
+
+#ifdef CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS
+
+#include <linux/debugfs.h>
+
+struct xlnx_bridge_debugfs_dir {
+ struct dentry *dir;
+ int ref_cnt;
+};
+
+static struct xlnx_bridge_debugfs_dir *dir;
+
+struct xlnx_bridge_debugfs_file {
+ struct dentry *file;
+ const char *status;
+};
+
+#define XLNX_BRIDGE_DEBUGFS_MAX_BYTES 16
+
+static ssize_t xlnx_bridge_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct xlnx_bridge *bridge = f->f_inode->i_private;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ size = min(size, strlen(bridge->debugfs_file->status));
+ ret = copy_to_user(buf, bridge->debugfs_file->status, size);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t xlnx_bridge_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct xlnx_bridge *bridge = f->f_inode->i_private;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (!strncmp(buf, "enable", 5)) {
+ xlnx_bridge_enable(bridge);
+ } else if (!strncmp(buf, "disable", 6)) {
+ xlnx_bridge_disable(bridge);
+ } else if (!strncmp(buf, "set_input", 3)) {
+ char *cmd, **tmp;
+ char *w, *h, *f;
+ u32 width, height, fmt;
+ int ret = -EINVAL;
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ ret = strncpy_from_user(cmd, buf, size);
+ if (ret < 0) {
+ pr_err("%s %d failed to copy the command %s\n",
+ __func__, __LINE__, buf);
+ return ret;
+ }
+
+ tmp = &cmd;
+ strsep(tmp, " ");
+ w = strsep(tmp, " ");
+ h = strsep(tmp, " ");
+ f = strsep(tmp, " ");
+ if (w && h && f) {
+ ret = kstrtouint(w, 0, &width);
+ ret |= kstrtouint(h, 0, &height);
+ ret |= kstrtouint(f, 0, &fmt);
+ }
+
+ kfree(cmd);
+ if (ret) {
+ pr_err("%s %d invalid command: %s\n",
+ __func__, __LINE__, buf);
+ return -EINVAL;
+ }
+ xlnx_bridge_set_input(bridge, width, height, fmt);
+ }
+
+ return size;
+}
+
+static const struct file_operations xlnx_bridge_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = xlnx_bridge_debugfs_read,
+ .write = xlnx_bridge_debugfs_write,
+};
+
+static int xlnx_bridge_debugfs_register(struct xlnx_bridge *bridge)
+{
+ struct xlnx_bridge_debugfs_file *file;
+ char file_name[32];
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
+
+ snprintf(file_name, sizeof(file_name), "xlnx_bridge-%s",
+ bridge->of_node->name);
+ file->file = debugfs_create_file(file_name, 0444, dir->dir, bridge,
+ &xlnx_bridge_debugfs_fops);
+ bridge->debugfs_file = file;
+
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_unregister(struct xlnx_bridge *bridge)
+{
+ debugfs_remove(bridge->debugfs_file->file);
+ kfree(bridge->debugfs_file);
+}
+
+static int xlnx_bridge_debugfs_init(void)
+{
+ if (dir) {
+ dir->ref_cnt++;
+ return 0;
+ }
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ dir->dir = debugfs_create_dir("xlnx-bridge", NULL);
+ if (!dir->dir)
+ return -ENODEV;
+ dir->ref_cnt++;
+
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_fini(void)
+{
+ if (--dir->ref_cnt)
+ return;
+
+ debugfs_remove_recursive(dir->dir);
+ dir = NULL;
+}
+
+#else
+
+static int xlnx_bridge_debugfs_register(struct xlnx_bridge *bridge)
+{
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_unregister(struct xlnx_bridge *bridge)
+{
+}
+
+static int xlnx_bridge_debugfs_init(void)
+{
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_fini(void)
+{
+}
+
+#endif
+
+/*
+ * Provider functions
+ */
+
+/**
+ * xlnx_bridge_register - Register the bridge instance
+ * @bridge: Xlnx bridge instance to register
+ *
+ * Register @bridge to be available for clients.
+ *
+ * Return: 0 on success. -EPROBE_DEFER if helper is not initialized, or
+ * -EFAULT if in error state.
+ */
+int xlnx_bridge_register(struct xlnx_bridge *bridge)
+{
+ if (!helper.refcnt)
+ return -EPROBE_DEFER;
+
+ if (helper.error)
+ return -EFAULT;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(!bridge->of_node);
+ bridge->owned = false;
+ xlnx_bridge_debugfs_register(bridge);
+ list_add_tail(&bridge->list, &helper.xlnx_bridges);
+ mutex_unlock(&helper.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xlnx_bridge_register);
+
+/**
+ * xlnx_bridge_unregister - Unregister the bridge instance
+ * @bridge: Xlnx bridge instance to unregister
+ *
+ * Unregister @bridge. The bridge shouldn't be owned by any client
+ * at this point.
+ */
+void xlnx_bridge_unregister(struct xlnx_bridge *bridge)
+{
+ if (helper.error)
+ return;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(bridge->owned);
+ xlnx_bridge_debugfs_unregister(bridge);
+ list_del(&bridge->list);
+ mutex_unlock(&helper.lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_bridge_unregister);
+
+/*
+ * Internal functions: used by Xlnx DRM
+ */
+
+/**
+ * xlnx_bridge_helper_init - Initialize the bridge helper
+ * @void: No arg
+ *
+ * Initialize the bridge helper or increment the reference count
+ * if already initialized.
+ *
+ * Return: 0 on success, or -EFAULT if in error state.
+ */
+int xlnx_bridge_helper_init(void)
+{
+ if (helper.refcnt++ > 0) {
+ if (helper.error)
+ return -EFAULT;
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&helper.xlnx_bridges);
+ mutex_init(&helper.lock);
+ helper.error = false;
+
+ if (xlnx_bridge_debugfs_init())
+ pr_err("failed to init xlnx bridge debugfs\n");
+
+ return 0;
+}
+
+/**
+ * xlnx_bridge_helper_fini - Release the bridge helper
+ *
+ * Clean up or decrement the reference of the bridge helper.
+ */
+void xlnx_bridge_helper_fini(void)
+{
+ if (--helper.refcnt > 0)
+ return;
+
+ xlnx_bridge_debugfs_fini();
+
+ if (WARN_ON(!list_empty(&helper.xlnx_bridges))) {
+ helper.error = true;
+ pr_err("any further xlnx bridge call will fail\n");
+ }
+
+ mutex_destroy(&helper.lock);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_bridge.h b/drivers/gpu/drm/xlnx/xlnx_bridge.h
new file mode 100644
index 000000000000..64330169bd22
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_bridge.h
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM bridge header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_BRIDGE_H_
+#define _XLNX_BRIDGE_H_
+
+struct videomode;
+
+struct xlnx_bridge_debugfs_file;
+
+/**
+ * struct xlnx_bridge - Xilinx bridge device
+ * @list: list node for Xilinx bridge device list
+ * @of_node: OF node for the bridge
+ * @owned: flag if the bridge is owned
+ * @enable: callback to enable the bridge
+ * @disable: callback to disable the bridge
+ * @set_input: callback to set the input
+ * @get_input_fmts: callback to get supported input formats.
+ * @set_output: callback to set the output
+ * @get_output_fmts: callback to get supported output formats.
+ * @set_timing: callback to set timing in connected video timing controller.
+ * @debugfs_file: for debugfs support
+ */
+struct xlnx_bridge {
+ struct list_head list;
+ struct device_node *of_node;
+ bool owned;
+ int (*enable)(struct xlnx_bridge *bridge);
+ void (*disable)(struct xlnx_bridge *bridge);
+ int (*set_input)(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+ int (*get_input_fmts)(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+ int (*set_output)(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+ int (*get_output_fmts)(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+ int (*set_timing)(struct xlnx_bridge *bridge, struct videomode *vm);
+ struct xlnx_bridge_debugfs_file *debugfs_file;
+};
+
+#if IS_ENABLED(CONFIG_DRM_XLNX_BRIDGE)
+/*
+ * Helper functions: used within Xlnx DRM
+ */
+
+struct xlnx_bridge_helper;
+
+int xlnx_bridge_helper_init(void);
+void xlnx_bridge_helper_fini(void);
+
+/*
+ * Helper functions: used by client driver
+ */
+
+int xlnx_bridge_enable(struct xlnx_bridge *bridge);
+void xlnx_bridge_disable(struct xlnx_bridge *bridge);
+int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+int xlnx_bridge_set_timing(struct xlnx_bridge *bridge, struct videomode *vm);
+struct xlnx_bridge *of_xlnx_bridge_get(struct device_node *bridge_np);
+void of_xlnx_bridge_put(struct xlnx_bridge *bridge);
+
+/*
+ * Bridge registration: used by bridge driver
+ */
+
+int xlnx_bridge_register(struct xlnx_bridge *bridge);
+void xlnx_bridge_unregister(struct xlnx_bridge *bridge);
+
+#else /* CONFIG_DRM_XLNX_BRIDGE */
+
+struct xlnx_bridge_helper;
+
+static inline inline int xlnx_bridge_helper_init(void)
+{
+ return 0;
+}
+
+static inline void xlnx_bridge_helper_fini(void)
+{
+}
+
+static inline int xlnx_bridge_enable(struct xlnx_bridge *bridge)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline void xlnx_bridge_disable(struct xlnx_bridge *bridge)
+{
+}
+
+static inline int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static int xlnx_bridge_set_timing(struct xlnx_bridge *bridge,
+ struct videomode *vm)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline struct xlnx_bridge *
+of_xlnx_bridge_get(struct device_node *bridge_np)
+{
+ return NULL;
+}
+
+static inline void of_xlnx_bridge_put(struct xlnx_bridge *bridge)
+{
+}
+
+static inline int xlnx_bridge_register(struct xlnx_bridge *bridge)
+{
+ return 0;
+}
+
+static inline void xlnx_bridge_unregister(struct xlnx_bridge *bridge)
+{
+}
+
+#endif /* CONFIG_DRM_XLNX_BRIDGE */
+
+#endif /* _XLNX_BRIDGE_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_crtc.c b/drivers/gpu/drm/xlnx/xlnx_crtc.c
new file mode 100644
index 000000000000..d5805c923675
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_crtc.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM crtc driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/list.h>
+
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * The Xilinx CRTC layer is to enable the custom interface to CRTC drivers.
+ * The interface is used by Xilinx DRM driver where it needs CRTC
+ * functionailty. CRTC drivers should attach the desired callbacks
+ * to struct xlnx_crtc and register the xlnx_crtc with correcsponding
+ * drm_device. It's highly recommended CRTC drivers register all callbacks
+ * even though many of them are optional.
+ * The CRTC helper simply walks through the registered CRTC device,
+ * and call the callbacks.
+ */
+
+/**
+ * struct xlnx_crtc_helper - Xilinx CRTC helper
+ * @xlnx_crtcs: list of Xilinx CRTC devices
+ * @lock: lock to protect @xlnx_crtcs
+ * @drm: back pointer to DRM core
+ */
+struct xlnx_crtc_helper {
+ struct list_head xlnx_crtcs;
+ struct mutex lock; /* lock for @xlnx_crtcs */
+ struct drm_device *drm;
+};
+
+#define XLNX_CRTC_MAX_HEIGHT_WIDTH INT_MAX
+
+unsigned int xlnx_crtc_helper_get_align(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ unsigned int align = 1, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_align) {
+ tmp = crtc->get_align(crtc);
+ align = ALIGN(align, tmp);
+ }
+ }
+
+ return align;
+}
+
+u64 xlnx_crtc_helper_get_dma_mask(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u64 mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8), tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_dma_mask) {
+ tmp = crtc->get_dma_mask(crtc);
+ mask = min(mask, tmp);
+ }
+ }
+
+ return mask;
+}
+
+int xlnx_crtc_helper_get_max_width(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ int width = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_max_width) {
+ tmp = crtc->get_max_width(crtc);
+ width = min(width, tmp);
+ }
+ }
+
+ return width;
+}
+
+int xlnx_crtc_helper_get_max_height(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ int height = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_max_height) {
+ tmp = crtc->get_max_height(crtc);
+ height = min(height, tmp);
+ }
+ }
+
+ return height;
+}
+
+uint32_t xlnx_crtc_helper_get_format(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 format = 0, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_format) {
+ tmp = crtc->get_format(crtc);
+ if (format && format != tmp)
+ return 0;
+ format = tmp;
+ }
+ }
+
+ return format;
+}
+
+u32 xlnx_crtc_helper_get_cursor_width(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 width = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_cursor_width) {
+ tmp = crtc->get_cursor_width(crtc);
+ width = min(width, tmp);
+ }
+ }
+
+ return width;
+}
+
+u32 xlnx_crtc_helper_get_cursor_height(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 height = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_cursor_height) {
+ tmp = crtc->get_cursor_height(crtc);
+ height = min(height, tmp);
+ }
+ }
+
+ return height;
+}
+struct xlnx_crtc_helper *xlnx_crtc_helper_init(struct drm_device *drm)
+{
+ struct xlnx_crtc_helper *helper;
+
+ helper = devm_kzalloc(drm->dev, sizeof(*helper), GFP_KERNEL);
+ if (!helper)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&helper->xlnx_crtcs);
+ mutex_init(&helper->lock);
+ helper->drm = drm;
+
+ return helper;
+}
+
+void xlnx_crtc_helper_fini(struct drm_device *drm,
+ struct xlnx_crtc_helper *helper)
+{
+ if (WARN_ON(helper->drm != drm))
+ return;
+
+ if (WARN_ON(!list_empty(&helper->xlnx_crtcs)))
+ return;
+
+ mutex_destroy(&helper->lock);
+ devm_kfree(drm->dev, helper);
+}
+
+void xlnx_crtc_register(struct drm_device *drm, struct xlnx_crtc *crtc)
+{
+ struct xlnx_crtc_helper *helper = xlnx_get_crtc_helper(drm);
+
+ mutex_lock(&helper->lock);
+ list_add_tail(&crtc->list, &helper->xlnx_crtcs);
+ mutex_unlock(&helper->lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_crtc_register);
+
+void xlnx_crtc_unregister(struct drm_device *drm, struct xlnx_crtc *crtc)
+{
+ struct xlnx_crtc_helper *helper = xlnx_get_crtc_helper(drm);
+
+ mutex_lock(&helper->lock);
+ list_del(&crtc->list);
+ mutex_unlock(&helper->lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_crtc_unregister);
diff --git a/drivers/gpu/drm/xlnx/xlnx_crtc.h b/drivers/gpu/drm/xlnx/xlnx_crtc.h
new file mode 100644
index 000000000000..9ab57594aba8
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_crtc.h
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM crtc header
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_CRTC_H_
+#define _XLNX_CRTC_H_
+
+/**
+ * struct xlnx_crtc - Xilinx CRTC device
+ * @crtc: DRM CRTC device
+ * @list: list node for Xilinx CRTC device list
+ * @get_align: Get the alignment requirement of CRTC device
+ * @get_dma_mask: Get the dma mask of CRTC device
+ * @get_max_width: Get the maximum supported width
+ * @get_max_height: Get the maximum supported height
+ * @get_format: Get the current format of CRTC device
+ * @get_cursor_width: Get the cursor width
+ * @get_cursor_height: Get the cursor height
+ */
+struct xlnx_crtc {
+ struct drm_crtc crtc;
+ struct list_head list;
+ unsigned int (*get_align)(struct xlnx_crtc *crtc);
+ u64 (*get_dma_mask)(struct xlnx_crtc *crtc);
+ int (*get_max_width)(struct xlnx_crtc *crtc);
+ int (*get_max_height)(struct xlnx_crtc *crtc);
+ uint32_t (*get_format)(struct xlnx_crtc *crtc);
+ uint32_t (*get_cursor_width)(struct xlnx_crtc *crtc);
+ uint32_t (*get_cursor_height)(struct xlnx_crtc *crtc);
+};
+
+/*
+ * Helper functions: used within Xlnx DRM
+ */
+
+struct xlnx_crtc_helper;
+
+unsigned int xlnx_crtc_helper_get_align(struct xlnx_crtc_helper *helper);
+u64 xlnx_crtc_helper_get_dma_mask(struct xlnx_crtc_helper *helper);
+int xlnx_crtc_helper_get_max_width(struct xlnx_crtc_helper *helper);
+int xlnx_crtc_helper_get_max_height(struct xlnx_crtc_helper *helper);
+uint32_t xlnx_crtc_helper_get_format(struct xlnx_crtc_helper *helper);
+u32 xlnx_crtc_helper_get_cursor_width(struct xlnx_crtc_helper *helper);
+u32 xlnx_crtc_helper_get_cursor_height(struct xlnx_crtc_helper *helper);
+
+struct xlnx_crtc_helper *xlnx_crtc_helper_init(struct drm_device *drm);
+void xlnx_crtc_helper_fini(struct drm_device *drm,
+ struct xlnx_crtc_helper *helper);
+
+/*
+ * CRTC registration: used by other sub-driver modules
+ */
+
+static inline struct xlnx_crtc *to_xlnx_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct xlnx_crtc, crtc);
+}
+
+void xlnx_crtc_register(struct drm_device *drm, struct xlnx_crtc *crtc);
+void xlnx_crtc_unregister(struct drm_device *drm, struct xlnx_crtc *crtc);
+
+#endif /* _XLNX_CRTC_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_csc.c b/drivers/gpu/drm/xlnx/xlnx_csc.c
new file mode 100644
index 000000000000..1d4341dce570
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_csc.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPSS CSC DRM bridge driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar rao G <vgannava@xilinx.com>
+ */
+
+/*
+ * Overview:
+ * This experimentatl driver works as a bridge driver and
+ * reused the code from V4L2.
+ * TODO:
+ * Need to implement in a modular approach to share driver code between
+ * V4L2 and DRM frameworks.
+ * Should be integrated with plane
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/media-bus-format.h>
+
+#include "xlnx_bridge.h"
+
+/* Register offset */
+#define XV_CSC_AP_CTRL (0x000)
+#define XV_CSC_INVIDEOFORMAT (0x010)
+#define XV_CSC_OUTVIDEOFORMAT (0x018)
+#define XV_CSC_WIDTH (0x020)
+#define XV_CSC_HEIGHT (0x028)
+#define XV_CSC_K11 (0x050)
+#define XV_CSC_K12 (0x058)
+#define XV_CSC_K13 (0x060)
+#define XV_CSC_K21 (0x068)
+#define XV_CSC_K22 (0x070)
+#define XV_CSC_K23 (0x078)
+#define XV_CSC_K31 (0x080)
+#define XV_CSC_K32 (0x088)
+#define XV_CSC_K33 (0x090)
+#define XV_CSC_ROFFSET (0x098)
+#define XV_CSC_GOFFSET (0x0a0)
+#define XV_CSC_BOFFSET (0x0a8)
+#define XV_CSC_CLAMPMIN (0x0b0)
+#define XV_CSC_CLIPMAX (0x0b8)
+#define XV_CSC_SCALE_FACTOR (4096)
+#define XV_CSC_DIVISOR (10000)
+/* Streaming Macros */
+#define XCSC_CLAMP_MIN_ZERO (0)
+#define XCSC_AP_START BIT(0)
+#define XCSC_AP_AUTO_RESTART BIT(7)
+#define XCSC_STREAM_ON (XCSC_AP_START | XCSC_AP_AUTO_RESTART)
+#define XCSC_STREAM_OFF (0)
+/* GPIO Reset Assert/De-assert */
+#define XCSC_RESET_ASSERT (1)
+#define XCSC_RESET_DEASSERT (0)
+
+#define XCSC_MIN_WIDTH (64)
+#define XCSC_MAX_WIDTH (8192)
+#define XCSC_MIN_HEIGHT (64)
+#define XCSC_MAX_HEIGHT (4320)
+
+static const u32 xilinx_csc_video_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYYUYY8_1X24,
+};
+
+/* vpss_csc_color_fmt - Color format type */
+enum vpss_csc_color_fmt {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+/**
+ * struct xilinx_csc - Core configuration of csc device structure
+ * @base: pointer to register base address
+ * @dev: device structure
+ * @bridge: xilinx bridge
+ * @cft_in: input color format
+ * @cft_out: output color format
+ * @color_depth: color depth
+ * @k_hw: array of hardware values
+ * @clip_max: clipping maximum value
+ * @width: width of the video
+ * @height: height of video
+ * @max_width: maximum number of pixels in a line
+ * @max_height: maximum number of lines per frame
+ * @rst_gpio: Handle to GPIO specifier to assert/de-assert the reset line
+ * @aclk: IP clock struct
+ */
+struct xilinx_csc {
+ void __iomem *base;
+ struct device *dev;
+ struct xlnx_bridge bridge;
+ enum vpss_csc_color_fmt cft_in;
+ enum vpss_csc_color_fmt cft_out;
+ u32 color_depth;
+ s32 k_hw[3][4];
+ s32 clip_max;
+ u32 width;
+ u32 height;
+ u32 max_width;
+ u32 max_height;
+ struct gpio_desc *rst_gpio;
+ struct clk *aclk;
+};
+
+static inline void xilinx_csc_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_csc_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * bridge_to_layer - Gets the parent structure
+ * @bridge: pointer to the member.
+ *
+ * Return: parent structure pointer
+ */
+static inline struct xilinx_csc *bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xilinx_csc, bridge);
+}
+
+static void xilinx_csc_write_rgb_3x3(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_K11, csc->k_hw[0][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K12, csc->k_hw[0][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K13, csc->k_hw[0][2]);
+ xilinx_csc_write(csc->base, XV_CSC_K21, csc->k_hw[1][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K22, csc->k_hw[1][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K23, csc->k_hw[1][2]);
+ xilinx_csc_write(csc->base, XV_CSC_K31, csc->k_hw[2][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K32, csc->k_hw[2][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K33, csc->k_hw[2][2]);
+}
+
+static void xilinx_csc_write_rgb_offset(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_ROFFSET, csc->k_hw[0][3]);
+ xilinx_csc_write(csc->base, XV_CSC_GOFFSET, csc->k_hw[1][3]);
+ xilinx_csc_write(csc->base, XV_CSC_BOFFSET, csc->k_hw[2][3]);
+}
+
+static void xilinx_csc_write_coeff(struct xilinx_csc *csc)
+{
+ xilinx_csc_write_rgb_3x3(csc);
+ xilinx_csc_write_rgb_offset(csc);
+}
+
+static void xcsc_set_default_state(struct xilinx_csc *csc)
+{
+ csc->cft_in = XVIDC_CSF_YCRCB_422;
+ csc->cft_out = XVIDC_CSF_YCRCB_422;
+
+ /* This represents an identity matrix mutliped by 2^12 */
+ csc->k_hw[0][0] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[0][1] = 0;
+ csc->k_hw[0][2] = 0;
+ csc->k_hw[1][0] = 0;
+ csc->k_hw[1][1] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[1][2] = 0;
+ csc->k_hw[2][0] = 0;
+ csc->k_hw[2][1] = 0;
+ csc->k_hw[2][2] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[0][3] = 0;
+ csc->k_hw[1][3] = 0;
+ csc->k_hw[2][3] = 0;
+ csc->clip_max = ((1 << csc->color_depth) - 1);
+ xilinx_csc_write(csc->base, XV_CSC_INVIDEOFORMAT, csc->cft_in);
+ xilinx_csc_write(csc->base, XV_CSC_OUTVIDEOFORMAT, csc->cft_out);
+ xilinx_csc_write_coeff(csc);
+ xilinx_csc_write(csc->base, XV_CSC_CLIPMAX, csc->clip_max);
+ xilinx_csc_write(csc->base, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+static void xcsc_ycrcb_to_rgb(struct xilinx_csc *csc, s32 *clip_max)
+{
+ u16 bpc_scale = (1 << (csc->color_depth - 8));
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations.
+ *
+ * Coefficients valid only for BT 709
+ */
+ csc->k_hw[0][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][1] = 0;
+ csc->k_hw[0][2] = 17927 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][1] = -2132 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][2] = -5329 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][1] = 21124 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][2] = 0;
+ csc->k_hw[0][3] = -248 * bpc_scale;
+ csc->k_hw[1][3] = 77 * bpc_scale;
+ csc->k_hw[2][3] = -289 * bpc_scale;
+ *clip_max = ((1 << csc->color_depth) - 1);
+}
+
+static void xcsc_rgb_to_ycrcb(struct xilinx_csc *csc, s32 *clip_max)
+{
+ u16 bpc_scale = (1 << (csc->color_depth - 8));
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations.
+ *
+ * Coefficients valid only for BT 709
+ */
+ csc->k_hw[0][0] = 1826 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][1] = 6142 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][2] = 620 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][0] = -1006 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][1] = -3386 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][2] = 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][0] = 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][1] = -3989 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][2] = -403 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][3] = 16 * bpc_scale;
+ csc->k_hw[1][3] = 128 * bpc_scale;
+ csc->k_hw[2][3] = 128 * bpc_scale;
+ *clip_max = ((1 << csc->color_depth) - 1);
+}
+
+/**
+ * xcsc_set_coeff- Sets the coefficients
+ * @csc: Pointer to csc device structure
+ *
+ * This function set the coefficients
+ *
+ */
+static void xcsc_set_coeff(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_INVIDEOFORMAT, csc->cft_in);
+ xilinx_csc_write(csc->base, XV_CSC_OUTVIDEOFORMAT, csc->cft_out);
+ xilinx_csc_write_coeff(csc);
+ xilinx_csc_write(csc->base, XV_CSC_CLIPMAX, csc->clip_max);
+ xilinx_csc_write(csc->base, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+/**
+ * xilinx_csc_bridge_enable - enabes csc core
+ * @bridge: bridge instance
+ *
+ * This function enables the csc core
+ *
+ * Return: 0 on success.
+ *
+ */
+static int xilinx_csc_bridge_enable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xilinx_csc_write(csc->base, XV_CSC_AP_CTRL, XCSC_STREAM_ON);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_disable - disables csc core
+ * @bridge: bridge instance
+ *
+ * This function disables the csc core
+ */
+static void xilinx_csc_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xilinx_csc_write(csc->base, XV_CSC_AP_CTRL, XCSC_STREAM_OFF);
+ /* Reset the Global IP Reset through GPIO */
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_DEASSERT);
+}
+
+/**
+ * xilinx_csc_bridge_set_input - Sets the input parameters of csc
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the input parameters of csc
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_csc_bridge_set_input(struct xlnx_bridge *bridge, u32 width,
+ u32 height, u32 bus_fmt)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xcsc_set_default_state(csc);
+
+ if (height > csc->max_height || height < XCSC_MIN_HEIGHT)
+ return -EINVAL;
+
+ if (width > csc->max_width || width < XCSC_MIN_WIDTH)
+ return -EINVAL;
+
+ csc->height = height;
+ csc->width = width;
+
+ switch (bus_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ csc->cft_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ csc->cft_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ csc->cft_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ csc->cft_in = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_dbg(csc->dev, "unsupported input video format\n");
+ return -EINVAL;
+ }
+
+ xilinx_csc_write(csc->base, XV_CSC_WIDTH, width);
+ xilinx_csc_write(csc->base, XV_CSC_HEIGHT, height);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_get_input_fmts - input formats supported by csc
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the input video formats information csc
+ * Return: 0 on success.
+ */
+static int xilinx_csc_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_csc_video_fmts;
+ *count = ARRAY_SIZE(xilinx_csc_video_fmts);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_set_output - Sets the output parameters of csc
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the output parameters of csc
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_csc_bridge_set_output(struct xlnx_bridge *bridge, u32 width,
+ u32 height, u32 bus_fmt)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ if (width != csc->width || height != csc->height)
+ return -EINVAL;
+
+ switch (bus_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ csc->cft_out = XVIDC_CSF_RGB;
+ dev_dbg(csc->dev, "Media Format Out : RGB");
+ if (csc->cft_in != MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_ycrcb_to_rgb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ csc->cft_out = XVIDC_CSF_YCRCB_444;
+ dev_dbg(csc->dev, "Media Format Out : YUV 444");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ csc->cft_out = XVIDC_CSF_YCRCB_422;
+ dev_dbg(csc->dev, "Media Format Out : YUV 422");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ csc->cft_out = XVIDC_CSF_YCRCB_420;
+ dev_dbg(csc->dev, "Media Format Out : YUV 420");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ default:
+ dev_info(csc->dev, "unsupported output video format\n");
+ return -EINVAL;
+ }
+ xcsc_set_coeff(csc);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_get_output_fmts - output formats supported by csc
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the output video formats information csc
+ * Return: 0 on success.
+ */
+static int xilinx_csc_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_csc_video_fmts;
+ *count = ARRAY_SIZE(xilinx_csc_video_fmts);
+ return 0;
+}
+
+static int xcsc_parse_of(struct xilinx_csc *csc)
+{
+ int ret;
+ struct device_node *node = csc->dev->of_node;
+
+ csc->aclk = devm_clk_get(csc->dev, NULL);
+ if (IS_ERR(csc->aclk)) {
+ ret = PTR_ERR(csc->aclk);
+ dev_err(csc->dev, "failed to get aclk %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,video-width",
+ &csc->color_depth);
+ if (ret < 0) {
+ dev_info(csc->dev, "video width not present in DT\n");
+ return ret;
+ }
+ if (csc->color_depth != 8 && csc->color_depth != 10 &&
+ csc->color_depth != 12 && csc->color_depth != 16) {
+ dev_err(csc->dev, "Invalid video width in DT\n");
+ return -EINVAL;
+ }
+ /* Reset GPIO */
+ csc->rst_gpio = devm_gpiod_get(csc->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(csc->rst_gpio)) {
+ if (PTR_ERR(csc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(csc->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(csc->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height", &csc->max_height);
+ if (ret < 0) {
+ dev_err(csc->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (csc->max_height > XCSC_MAX_HEIGHT ||
+ csc->max_height < XCSC_MIN_HEIGHT) {
+ dev_err(csc->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width", &csc->max_width);
+ if (ret < 0) {
+ dev_err(csc->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (csc->max_width > XCSC_MAX_WIDTH ||
+ csc->max_width < XCSC_MIN_WIDTH) {
+ dev_err(csc->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xilinx_csc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_csc *csc;
+ int ret;
+
+ csc = devm_kzalloc(dev, sizeof(*csc), GFP_KERNEL);
+ if (!csc)
+ return -ENOMEM;
+
+ csc->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(csc->base))
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, csc);
+ ret = xcsc_parse_of(csc);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(csc->aclk);
+ if (ret) {
+ dev_err(csc->dev, "failed to enable clock %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_DEASSERT);
+ csc->bridge.enable = &xilinx_csc_bridge_enable;
+ csc->bridge.disable = &xilinx_csc_bridge_disable;
+ csc->bridge.set_input = &xilinx_csc_bridge_set_input;
+ csc->bridge.get_input_fmts = &xilinx_csc_bridge_get_input_fmts;
+ csc->bridge.set_output = &xilinx_csc_bridge_set_output;
+ csc->bridge.get_output_fmts = &xilinx_csc_bridge_get_output_fmts;
+ csc->bridge.of_node = dev->of_node;
+
+ ret = xlnx_bridge_register(&csc->bridge);
+ if (ret) {
+ dev_info(csc->dev, "Bridge registration failed\n");
+ goto err_clk;
+ }
+
+ dev_info(csc->dev, "Xilinx VPSS CSC DRM experimental driver probed\n");
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(csc->aclk);
+ return ret;
+}
+
+static int xilinx_csc_remove(struct platform_device *pdev)
+{
+ struct xilinx_csc *csc = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&csc->bridge);
+ clk_disable_unprepare(csc->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_csc_of_match[] = {
+ { .compatible = "xlnx,vpss-csc"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_csc_of_match);
+
+static struct platform_driver csc_bridge_driver = {
+ .probe = xilinx_csc_probe,
+ .remove = xilinx_csc_remove,
+ .driver = {
+ .name = "xlnx,csc-bridge",
+ .of_match_table = xilinx_csc_of_match,
+ },
+};
+
+module_platform_driver(csc_bridge_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA CSC Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_drv.c b/drivers/gpu/drm/xlnx/xlnx_drv.c
new file mode 100644
index 000000000000..7bcb3f40202d
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_drv.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Driver
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+#include "xlnx_fb.h"
+#include "xlnx_gem.h"
+
+#define DRIVER_NAME "xlnx"
+#define DRIVER_DESC "Xilinx DRM KMS Driver"
+#define DRIVER_DATE "20130509"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static uint xlnx_fbdev_vres = 2;
+module_param_named(fbdev_vres, xlnx_fbdev_vres, uint, 0444);
+MODULE_PARM_DESC(fbdev_vres,
+ "fbdev virtual resolution multiplier for fb (default: 2)");
+
+/**
+ * struct xlnx_drm - Xilinx DRM private data
+ * @drm: DRM core
+ * @crtc: Xilinx DRM CRTC helper
+ * @fb: DRM fb helper
+ * @master: logical master device for pipeline
+ * @suspend_state: atomic state for suspend / resume
+ * @is_master: A flag to indicate if this instance is fake master
+ */
+struct xlnx_drm {
+ struct drm_device *drm;
+ struct xlnx_crtc_helper *crtc;
+ struct drm_fb_helper *fb;
+ struct platform_device *master;
+ struct drm_atomic_state *suspend_state;
+ bool is_master;
+};
+
+/**
+ * xlnx_get_crtc_helper - Return the crtc helper instance
+ * @drm: DRM device
+ *
+ * Return: the crtc helper instance
+ */
+struct xlnx_crtc_helper *xlnx_get_crtc_helper(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_drm->crtc;
+}
+
+/**
+ * xlnx_get_align - Return the align requirement through CRTC helper
+ * @drm: DRM device
+ *
+ * Return: the alignment requirement
+ */
+unsigned int xlnx_get_align(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_crtc_helper_get_align(xlnx_drm->crtc);
+}
+
+/**
+ * xlnx_get_format - Return the current format of CRTC
+ * @drm: DRM device
+ *
+ * Return: the current CRTC format
+ */
+uint32_t xlnx_get_format(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_crtc_helper_get_format(xlnx_drm->crtc);
+}
+
+static void xlnx_output_poll_changed(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->fb)
+ drm_fb_helper_hotplug_event(xlnx_drm->fb);
+}
+
+static const struct drm_mode_config_funcs xlnx_mode_config_funcs = {
+ .fb_create = xlnx_fb_create,
+ .output_poll_changed = xlnx_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void xlnx_mode_config_init(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+ struct xlnx_crtc_helper *crtc = xlnx_drm->crtc;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = xlnx_crtc_helper_get_max_width(crtc);
+ drm->mode_config.max_height = xlnx_crtc_helper_get_max_height(crtc);
+ drm->mode_config.cursor_width =
+ xlnx_crtc_helper_get_cursor_width(crtc);
+ drm->mode_config.cursor_height =
+ xlnx_crtc_helper_get_cursor_height(crtc);
+}
+
+static int xlnx_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xlnx_drm *xlnx_drm = dev->dev_private;
+
+ /* This is a hacky way to allow the root user to run as a master */
+ if (!(drm_is_primary_client(file) && !dev->master) &&
+ !file->is_master && capable(CAP_SYS_ADMIN)) {
+ file->is_master = 1;
+ xlnx_drm->is_master = true;
+ }
+
+ return 0;
+}
+
+static int xlnx_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file = filp->private_data;
+ struct drm_minor *minor = file->minor;
+ struct drm_device *drm = minor->dev;
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->is_master) {
+ xlnx_drm->is_master = false;
+ file->is_master = 0;
+ }
+
+ return drm_release(inode, filp);
+}
+
+static void xlnx_lastclose(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->fb)
+ drm_fb_helper_restore_fbdev_mode_unlocked(xlnx_drm->fb);
+}
+
+static const struct file_operations xlnx_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = xlnx_drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xlnx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_ATOMIC,
+ .open = xlnx_drm_open,
+ .lastclose = xlnx_lastclose,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = xlnx_gem_cma_dumb_create,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .fops = &xlnx_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int xlnx_bind(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm;
+ struct drm_device *drm;
+ const struct drm_format_info *info;
+ struct platform_device *master = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev->parent);
+ int ret;
+ u32 format;
+
+ drm = drm_dev_alloc(&xlnx_drm_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ xlnx_drm = devm_kzalloc(drm->dev, sizeof(*xlnx_drm), GFP_KERNEL);
+ if (!xlnx_drm) {
+ ret = -ENOMEM;
+ goto err_drm;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.funcs = &xlnx_mode_config_funcs;
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto err_xlnx_drm;
+ }
+
+ drm->irq_enabled = 1;
+ drm->dev_private = xlnx_drm;
+ xlnx_drm->drm = drm;
+ xlnx_drm->master = master;
+ drm_kms_helper_poll_init(drm);
+ platform_set_drvdata(master, xlnx_drm);
+
+ xlnx_drm->crtc = xlnx_crtc_helper_init(drm);
+ if (IS_ERR(xlnx_drm->crtc)) {
+ ret = PTR_ERR(xlnx_drm->crtc);
+ goto err_xlnx_drm;
+ }
+
+ ret = component_bind_all(&master->dev, drm);
+ if (ret)
+ goto err_crtc;
+
+ xlnx_mode_config_init(drm);
+ drm_mode_config_reset(drm);
+ dma_set_mask(drm->dev, xlnx_crtc_helper_get_dma_mask(xlnx_drm->crtc));
+
+ format = xlnx_crtc_helper_get_format(xlnx_drm->crtc);
+ info = drm_format_info(format);
+ if (info && info->depth && info->cpp[0]) {
+ unsigned int align;
+
+ align = xlnx_crtc_helper_get_align(xlnx_drm->crtc);
+ xlnx_drm->fb = xlnx_fb_init(drm, info->cpp[0] * 8, 1, align,
+ xlnx_fbdev_vres);
+ if (IS_ERR(xlnx_drm->fb)) {
+ dev_err(&pdev->dev,
+ "failed to initialize drm fb\n");
+ xlnx_drm->fb = NULL;
+ }
+ } else {
+ /* fbdev emulation is optional */
+ dev_info(&pdev->dev, "fbdev is not initialized\n");
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_fb;
+
+ return 0;
+
+err_fb:
+ if (xlnx_drm->fb)
+ xlnx_fb_fini(xlnx_drm->fb);
+ component_unbind_all(drm->dev, drm);
+err_crtc:
+ xlnx_crtc_helper_fini(drm, xlnx_drm->crtc);
+err_xlnx_drm:
+ drm_mode_config_cleanup(drm);
+err_drm:
+ drm_dev_put(drm);
+ return ret;
+}
+
+static void xlnx_unbind(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_dev_unregister(drm);
+ if (xlnx_drm->fb)
+ xlnx_fb_fini(xlnx_drm->fb);
+ component_unbind_all(&xlnx_drm->master->dev, drm);
+ xlnx_crtc_helper_fini(drm, xlnx_drm->crtc);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_dev_put(drm);
+}
+
+static const struct component_master_ops xlnx_master_ops = {
+ .bind = xlnx_bind,
+ .unbind = xlnx_unbind,
+};
+
+static int xlnx_of_component_probe(struct device *master_dev,
+ int (*compare_of)(struct device *, void *),
+ const struct component_master_ops *m_ops)
+{
+ struct device *dev = master_dev->parent;
+ struct device_node *ep, *port, *remote, *parent;
+ struct component_match *match = NULL;
+ int i;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ component_match_add(master_dev, &match, compare_of, dev->of_node);
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ parent = port->parent;
+ if (!of_node_cmp(parent->name, "ports"))
+ parent = parent->parent;
+ parent = of_node_get(parent);
+
+ if (!of_device_is_available(parent)) {
+ of_node_put(parent);
+ of_node_put(port);
+ continue;
+ }
+
+ component_match_add(master_dev, &match, compare_of, parent);
+ of_node_put(parent);
+ of_node_put(port);
+ }
+
+ parent = dev->of_node;
+ for (i = 0; ; i++) {
+ parent = of_node_get(parent);
+ if (!of_device_is_available(parent)) {
+ of_node_put(parent);
+ continue;
+ }
+
+ for_each_endpoint_of_node(parent, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote) ||
+ remote == dev->of_node) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent dev of %s unavailable\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+ component_match_add(master_dev, &match, compare_of,
+ remote);
+ of_node_put(remote);
+ }
+ of_node_put(parent);
+
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ parent = port->parent;
+ if (!of_node_cmp(parent->name, "ports"))
+ parent = parent->parent;
+ of_node_put(port);
+ }
+
+ return component_master_add_with_match(master_dev, m_ops, match);
+}
+
+static int xlnx_compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int xlnx_platform_probe(struct platform_device *pdev)
+{
+ return xlnx_of_component_probe(&pdev->dev, xlnx_compare_of,
+ &xlnx_master_ops);
+}
+
+static int xlnx_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &xlnx_master_ops);
+ return 0;
+}
+
+static void xlnx_platform_shutdown(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &xlnx_master_ops);
+}
+
+static int __maybe_unused xlnx_pm_suspend(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_kms_helper_poll_disable(drm);
+
+ xlnx_drm->suspend_state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(xlnx_drm->suspend_state)) {
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(xlnx_drm->suspend_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused xlnx_pm_resume(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_atomic_helper_resume(drm, xlnx_drm->suspend_state);
+ drm_kms_helper_poll_enable(drm);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xlnx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xlnx_pm_suspend, xlnx_pm_resume)
+};
+
+static struct platform_driver xlnx_driver = {
+ .probe = xlnx_platform_probe,
+ .remove = xlnx_platform_remove,
+ .shutdown = xlnx_platform_shutdown,
+ .driver = {
+ .name = "xlnx-drm",
+ .pm = &xlnx_pm_ops,
+ },
+};
+
+/* bitmap for master id */
+static u32 xlnx_master_ids = GENMASK(31, 0);
+
+/**
+ * xlnx_drm_pipeline_init - Initialize the drm pipeline for the device
+ * @pdev: The platform device to initialize the drm pipeline device
+ *
+ * This function initializes the drm pipeline device, struct drm_device,
+ * on @pdev by creating a logical master platform device. The logical platform
+ * device acts as a master device to bind slave devices and represents
+ * the entire pipeline.
+ * The logical master uses the port bindings of the calling device to
+ * figure out the pipeline topology.
+ *
+ * Return: the logical master platform device if the drm device is initialized
+ * on @pdev. Error code otherwise.
+ */
+struct platform_device *xlnx_drm_pipeline_init(struct platform_device *pdev)
+{
+ struct platform_device *master;
+ int id, ret;
+
+ id = ffs(xlnx_master_ids);
+ if (!id)
+ return ERR_PTR(-ENOSPC);
+
+ master = platform_device_alloc("xlnx-drm", id - 1);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ master->dev.parent = &pdev->dev;
+ ret = platform_device_add(master);
+ if (ret)
+ goto err_out;
+
+ WARN_ON(master->id != id - 1);
+ xlnx_master_ids &= ~BIT(master->id);
+ return master;
+
+err_out:
+ platform_device_unregister(master);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(xlnx_drm_pipeline_init);
+
+/**
+ * xlnx_drm_pipeline_exit - Release the drm pipeline for the device
+ * @master: The master pipeline device to release
+ *
+ * Release the logical pipeline device returned by xlnx_drm_pipeline_init().
+ */
+void xlnx_drm_pipeline_exit(struct platform_device *master)
+{
+ xlnx_master_ids |= BIT(master->id);
+ platform_device_unregister(master);
+}
+EXPORT_SYMBOL_GPL(xlnx_drm_pipeline_exit);
+
+static int __init xlnx_drm_drv_init(void)
+{
+ xlnx_bridge_helper_init();
+ platform_driver_register(&xlnx_driver);
+ return 0;
+}
+
+static void __exit xlnx_drm_drv_exit(void)
+{
+ platform_driver_unregister(&xlnx_driver);
+ xlnx_bridge_helper_fini();
+}
+
+module_init(xlnx_drm_drv_init);
+module_exit(xlnx_drm_drv_exit);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_drv.h b/drivers/gpu/drm/xlnx/xlnx_drv.h
new file mode 100644
index 000000000000..0f6595f1bd85
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_drv.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Header for Xilinx
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_DRV_H_
+#define _XLNX_DRV_H_
+
+struct drm_device;
+struct xlnx_crtc_helper;
+
+struct platform_device *xlnx_drm_pipeline_init(struct platform_device *parent);
+void xlnx_drm_pipeline_exit(struct platform_device *pipeline);
+
+uint32_t xlnx_get_format(struct drm_device *drm);
+unsigned int xlnx_get_align(struct drm_device *drm);
+struct xlnx_crtc_helper *xlnx_get_crtc_helper(struct drm_device *drm);
+struct xlnx_bridge_helper *xlnx_get_bridge_helper(struct drm_device *drm);
+
+#endif /* _XLNX_DRV_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_dsi.c b/drivers/gpu/drm/xlnx/xlnx_dsi.c
new file mode 100644
index 000000000000..c785b941b037
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_dsi.c
@@ -0,0 +1,1011 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA MIPI DSI Tx Controller driver.
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author : Saurabh Sengar <saurabhs@xilinx.com>
+ * : Siva Rajesh J <siva.rajesh.jarugula@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+#include "xlnx_bridge.h"
+
+/* DSI Tx IP registers */
+#define XDSI_CCR 0x00
+#define XDSI_CCR_COREENB BIT(0)
+#define XDSI_CCR_SOFTRST BIT(1)
+#define XDSI_CCR_CRREADY BIT(2)
+#define XDSI_CCR_CMDMODE BIT(3)
+#define XDSI_CCR_DFIFORST BIT(4)
+#define XDSI_CCR_CMDFIFORST BIT(5)
+#define XDSI_PCR 0x04
+#define XDSI_PCR_VIDEOMODE(x) (((x) & 0x3) << 3)
+#define XDSI_PCR_VIDEOMODE_MASK (0x3 << 3)
+#define XDSI_PCR_VIDEOMODE_SHIFT 3
+#define XDSI_PCR_BLLPTYPE(x) ((x) << 5)
+#define XDSI_PCR_BLLPMODE(x) ((x) << 6)
+#define XDSI_PCR_EOTPENABLE(x) ((x) << 13)
+#define XDSI_GIER 0x20
+#define XDSI_ISR 0x24
+#define XDSI_IER 0x28
+#define XDSI_STR 0x2C
+#define XDSI_STR_RDY_SHPKT BIT(6)
+#define XDSI_STR_RDY_LNGPKT BIT(7)
+#define XDSI_STR_DFIFO_FULL BIT(8)
+#define XDSI_STR_DFIFO_EMPTY BIT(9)
+#define XDSI_STR_WAITFR_DATA BIT(10)
+#define XDSI_STR_CMD_EXE_PGS BIT(11)
+#define XDSI_STR_CCMD_PROC BIT(12)
+#define XDSI_STR_LPKT_MASK (0x5 << 7)
+#define XDSI_CMD 0x30
+#define XDSI_CMD_QUEUE_PACKET(x) ((x) & GENMASK(23, 0))
+#define XDSI_DFR 0x34
+#define XDSI_TIME1 0x50
+#define XDSI_TIME1_BLLP_BURST(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME1_HSA(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_TIME2 0x54
+#define XDSI_TIME2_VACT(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME2_HACT(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_HACT_MULTIPLIER GENMASK(1, 0)
+#define XDSI_TIME3 0x58
+#define XDSI_TIME3_HFP(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME3_HBP(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_TIME4 0x5c
+#define XDSI_TIME4_VFP(x) ((x) & GENMASK(7, 0))
+#define XDSI_TIME4_VBP(x) (((x) & GENMASK(7, 0)) << 8)
+#define XDSI_TIME4_VSA(x) (((x) & GENMASK(7, 0)) << 16)
+#define XDSI_LTIME 0x60
+#define XDSI_BLLP_TIME 0x64
+/*
+ * XDSI_NUM_DATA_T represents number of data types in the
+ * enum mipi_dsi_pixel_format in the MIPI DSI part of DRM framework.
+ */
+#define XDSI_NUM_DATA_T 4
+#define XDSI_VIDEO_MODE_SYNC_PULSE 0x0
+#define XDSI_VIDEO_MODE_SYNC_EVENT 0x1
+#define XDSI_VIDEO_MODE_BURST 0x2
+
+#define XDSI_DPHY_CLK_MIN 197000000000UL
+#define XDSI_DPHY_CLK_MAX 203000000000UL
+#define XDSI_DPHY_CLK_REQ 200000000000UL
+
+/* command timeout in usec */
+#define XDSI_CMD_TIMEOUT_VAL (3000)
+
+/**
+ * struct xlnx_dsi - Core configuration DSI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @dsi_host: DSI host device
+ * @connector: DRM connector structure
+ * @panel_node: MIPI DSI device panel node
+ * @panel: DRM panel structure
+ * @dev: device structure
+ * @iomem: Base address of DSI subsystem
+ * @lanes: number of active data lanes supported by DSI controller
+ * @cmdmode: command mode support
+ * @mode_flags: DSI operation mode related flags
+ * @format: pixel format for video mode of DSI controller
+ * @vm: videomode data structure
+ * @mul_factor: multiplication factor for HACT timing parameter
+ * @eotp_prop: configurable EoTP DSI parameter
+ * @bllp_mode_prop: configurable BLLP mode DSI parameter
+ * @bllp_type_prop: configurable BLLP type DSI parameter
+ * @video_mode_prop: configurable Video mode DSI parameter
+ * @bllp_burst_time_prop: Configurable BLLP time for burst mode
+ * @cmd_queue_prop: configurable command queue
+ * @eotp_prop_val: configurable EoTP DSI parameter value
+ * @bllp_mode_prop_val: configurable BLLP mode DSI parameter value
+ * @bllp_type_prop_val: configurable BLLP type DSI parameter value
+ * @video_mode_prop_val: configurable Video mode DSI parameter value
+ * @bllp_burst_time_prop_val: Configurable BLLP time for burst mode value
+ * @cmd_queue_prop_val: configurable command queue value
+ * @bridge: bridge structure
+ * @height_out: configurable bridge output height parameter
+ * @height_out_prop_val: configurable bridge output height parameter value
+ * @width_out: configurable bridge output width parameter
+ * @width_out_prop_val: configurable bridge output width parameter value
+ * @in_fmt: configurable bridge input media format
+ * @in_fmt_prop_val: configurable media bus format value
+ * @out_fmt: configurable bridge output media format
+ * @out_fmt_prop_val: configurable media bus format value
+ * @video_aclk: Video clock
+ * @dphy_clk_200M: 200MHz DPHY clock and AXI Lite clock
+ */
+struct xlnx_dsi {
+ struct drm_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+ void __iomem *iomem;
+ u32 lanes;
+ bool cmdmode;
+ u32 mode_flags;
+ enum mipi_dsi_pixel_format format;
+ struct videomode vm;
+ u32 mul_factor;
+ struct drm_property *eotp_prop;
+ struct drm_property *bllp_mode_prop;
+ struct drm_property *bllp_type_prop;
+ struct drm_property *video_mode_prop;
+ struct drm_property *bllp_burst_time_prop;
+ struct drm_property *cmd_queue_prop;
+ bool eotp_prop_val;
+ bool bllp_mode_prop_val;
+ bool bllp_type_prop_val;
+ u32 video_mode_prop_val;
+ u32 bllp_burst_time_prop_val;
+ u32 cmd_queue_prop_val;
+ struct xlnx_bridge *bridge;
+ struct drm_property *height_out;
+ u32 height_out_prop_val;
+ struct drm_property *width_out;
+ u32 width_out_prop_val;
+ struct drm_property *in_fmt;
+ u32 in_fmt_prop_val;
+ struct drm_property *out_fmt;
+ u32 out_fmt_prop_val;
+ struct clk *video_aclk;
+ struct clk *dphy_clk_200M;
+};
+
+#define host_to_dsi(host) container_of(host, struct xlnx_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct xlnx_dsi, connector)
+#define encoder_to_dsi(e) container_of(e, struct xlnx_dsi, encoder)
+
+static inline void xlnx_dsi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_dsi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_dsi_set_config_parameters - Configure DSI Tx registers with parameters
+ * given from user application.
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI structure having drm_property parameters
+ * configured from user application and writes them into DSI IP registers.
+ */
+static void xlnx_dsi_set_config_parameters(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = XDSI_PCR_EOTPENABLE(dsi->eotp_prop_val);
+ reg |= XDSI_PCR_VIDEOMODE(dsi->video_mode_prop_val);
+ reg |= XDSI_PCR_BLLPTYPE(dsi->bllp_type_prop_val);
+ reg |= XDSI_PCR_BLLPMODE(dsi->bllp_mode_prop_val);
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_PCR, reg);
+ /*
+ * Configure the burst time if video mode is burst.
+ * HSA of TIME1 register is ignored in this mode.
+ */
+ if (dsi->video_mode_prop_val == XDSI_VIDEO_MODE_BURST) {
+ reg = XDSI_TIME1_BLLP_BURST(dsi->bllp_burst_time_prop_val);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_CMD_QUEUE_PACKET(dsi->cmd_queue_prop_val);
+ xlnx_dsi_writel(dsi->iomem, XDSI_CMD, reg);
+
+ dev_dbg(dsi->dev, "PCR register value is = %x\n",
+ xlnx_dsi_readl(dsi->iomem, XDSI_PCR));
+}
+
+/**
+ * xlnx_dsi_set_display_mode - Configure DSI timing registers
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function writes the timing parameters of DSI IP which are
+ * retrieved from panel timing values.
+ */
+static void xlnx_dsi_set_display_mode(struct xlnx_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg, video_mode;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_PCR);
+ video_mode = (reg & XDSI_PCR_VIDEOMODE_MASK) >>
+ XDSI_PCR_VIDEOMODE_SHIFT;
+
+ /* configure the HSA value only if non_burst_sync_pluse video mode */
+ if (!video_mode &&
+ (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) {
+ reg = XDSI_TIME1_HSA(vm->hsync_len);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_TIME4_VFP(vm->vfront_porch) |
+ XDSI_TIME4_VBP(vm->vback_porch) |
+ XDSI_TIME4_VSA(vm->vsync_len);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME4, reg);
+
+ reg = XDSI_TIME3_HFP(vm->hfront_porch) |
+ XDSI_TIME3_HBP(vm->hback_porch);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME3, reg);
+
+ dev_dbg(dsi->dev, "mul factor for parsed datatype is = %d\n",
+ (dsi->mul_factor) / 100);
+ /*
+ * The HACT parameter received from panel timing values should be
+ * divisible by 4. The reason for this is, the word count given as
+ * input to DSI controller is HACT * mul_factor. The mul_factor is
+ * 3, 2.25, 2.25, 2 respectively for RGB888, RGB666_L, RGB666_P and
+ * RGB565.
+ * e.g. for RGB666_L color format and 1080p, the word count is
+ * 1920*2.25 = 4320 which is divisible by 4 and it is a valid input
+ * to DSI controller. Based on this 2.25 mul factor, we come up with
+ * the division factor of (XDSI_HACT_MULTIPLIER) as 4 for checking
+ */
+ if ((vm->hactive & XDSI_HACT_MULTIPLIER) != 0)
+ dev_warn(dsi->dev, "Incorrect HACT will be programmed\n");
+
+ reg = XDSI_TIME2_HACT((vm->hactive) * (dsi->mul_factor) / 100) |
+ XDSI_TIME2_VACT(vm->vactive);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME2, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+/**
+ * xlnx_dsi_set_display_enable - Enables the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_dsi_set_display_enable(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg |= XDSI_CCR_COREENB;
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "MIPI DSI Tx controller is enabled.\n");
+}
+
+/**
+ * xlnx_dsi_set_display_disable - Disable the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_dsi_set_display_disable(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg &= ~XDSI_CCR_COREENB;
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "DSI Tx is disabled. reset regs to default values\n");
+}
+
+/**
+ * xlnx_dsi_atomic_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @connector: pointer Xilinx DSI connector
+ * @state: DRM connector state
+ * @prop: pointer to the drm_property structure
+ * @val: DSI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the DSI structure property varabiles with the values.
+ * These values are later used to configure the DSI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int xlnx_dsi_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *prop, u64 val)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ dev_dbg(dsi->dev, "property name = %s, value = %lld\n",
+ prop->name, val);
+
+ if (prop == dsi->eotp_prop)
+ dsi->eotp_prop_val = !!val;
+ else if (prop == dsi->bllp_mode_prop)
+ dsi->bllp_mode_prop_val = !!val;
+ else if (prop == dsi->bllp_type_prop)
+ dsi->bllp_type_prop_val = !!val;
+ else if (prop == dsi->video_mode_prop)
+ dsi->video_mode_prop_val = (unsigned int)val;
+ else if (prop == dsi->bllp_burst_time_prop)
+ dsi->bllp_burst_time_prop_val = (unsigned int)val;
+ else if (prop == dsi->cmd_queue_prop)
+ dsi->cmd_queue_prop_val = (unsigned int)val;
+ else if (prop == dsi->height_out)
+ dsi->height_out_prop_val = (u32)val;
+ else if (prop == dsi->width_out)
+ dsi->width_out_prop_val = (u32)val;
+ else if (prop == dsi->in_fmt)
+ dsi->in_fmt_prop_val = (u32)val;
+ else if (prop == dsi->out_fmt)
+ dsi->out_fmt_prop_val = (u32)val;
+ else
+ return -EINVAL;
+
+ xlnx_dsi_set_config_parameters(dsi);
+
+ return 0;
+}
+
+static int
+xlnx_dsi_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *prop, uint64_t *val)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (prop == dsi->eotp_prop)
+ *val = dsi->eotp_prop_val;
+ else if (prop == dsi->bllp_mode_prop)
+ *val = dsi->bllp_mode_prop_val;
+ else if (prop == dsi->bllp_type_prop)
+ *val = dsi->bllp_type_prop_val;
+ else if (prop == dsi->video_mode_prop)
+ *val = dsi->video_mode_prop_val;
+ else if (prop == dsi->bllp_burst_time_prop)
+ *val = dsi->bllp_burst_time_prop_val;
+ else if (prop == dsi->cmd_queue_prop)
+ *val = dsi->cmd_queue_prop_val;
+ else if (prop == dsi->height_out)
+ *val = dsi->height_out_prop_val;
+ else if (prop == dsi->width_out)
+ *val = dsi->width_out_prop_val;
+ else if (prop == dsi->in_fmt)
+ *val = dsi->in_fmt_prop_val;
+ else if (prop == dsi->out_fmt)
+ *val = dsi->out_fmt_prop_val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_dsi_host_transfer - transfer command to panel
+ * @host: mipi dsi host structure
+ * @msg: mipi dsi msg with type, length and data
+ *
+ * This function is valid only in command mode.
+ * It checks the command fifo empty status and writes into
+ * data or cmd register and waits for the completion status.
+ *
+ * Return: number of bytes, on success and error number on failure
+ */
+static ssize_t xlnx_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+ u32 data0, data1, cmd0, status, val;
+ const char *tx_buf = msg->tx_buf;
+
+ if (!(xlnx_dsi_readl(dsi->iomem, XDSI_CCR) & (XDSI_CCR_COREENB |
+ XDSI_CCR_CMDMODE))) {
+ dev_err(dsi->dev, "dsi command mode not enabled\n");
+ return -EINVAL;
+ }
+
+ if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
+ status = readl_poll_timeout(dsi->iomem + XDSI_STR, val,
+ ((val & XDSI_STR_LPKT_MASK) ==
+ XDSI_STR_LPKT_MASK), 1,
+ XDSI_CMD_TIMEOUT_VAL);
+ if (status) {
+ dev_err(dsi->dev, "long cmd fifo not empty!\n");
+ return -ETIMEDOUT;
+ }
+ data0 = tx_buf[0] | (tx_buf[1] << 8) | (tx_buf[2] << 16) |
+ (tx_buf[3] << 24);
+ data1 = tx_buf[4] | (tx_buf[5] << 8);
+ cmd0 = msg->type | (MIPI_DSI_DCS_READ << 8);
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_DFR, data0);
+ xlnx_dsi_writel(dsi->iomem, XDSI_DFR, data1);
+ xlnx_dsi_writel(dsi->iomem, XDSI_CMD, cmd0);
+ } else {
+ data0 = tx_buf[0];
+ if (msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM)
+ data0 = MIPI_DSI_DCS_SHORT_WRITE_PARAM |
+ (tx_buf[0] << 8) | (tx_buf[1] << 16);
+ else
+ data0 = MIPI_DSI_DCS_SHORT_WRITE | (tx_buf[0] << 8);
+
+ status = readl_poll_timeout(dsi->iomem + XDSI_STR, val,
+ ((val & XDSI_STR_RDY_SHPKT) ==
+ XDSI_STR_RDY_SHPKT), 1,
+ XDSI_CMD_TIMEOUT_VAL);
+ if (status) {
+ dev_err(dsi->dev, "short cmd fifo not empty\n");
+ return -ETIMEDOUT;
+ }
+ xlnx_dsi_writel(dsi->iomem, XDSI_CMD, data0);
+ }
+
+ status = readl_poll_timeout(dsi->iomem + XDSI_STR, val,
+ (!(val & XDSI_STR_CMD_EXE_PGS)), 1,
+ XDSI_CMD_TIMEOUT_VAL);
+ if (status) {
+ dev_err(dsi->dev, "cmd time out\n");
+ return -ETIMEDOUT;
+ }
+
+ return msg->tx_len;
+}
+
+static int xlnx_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ u32 panel_lanes;
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+
+ panel_lanes = device->lanes;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (panel_lanes != dsi->lanes) {
+ dev_err(dsi->dev, "Mismatch of lanes. panel = %d, DSI = %d\n",
+ panel_lanes, dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (dsi->lanes > 4 || dsi->lanes < 1) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (device->format != dsi->format) {
+ dev_err(dsi->dev, "Mismatch of format. panel = %d, DSI = %d\n",
+ device->format, dsi->format);
+ return -EINVAL;
+ }
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int xlnx_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops xlnx_dsi_ops = {
+ .attach = xlnx_dsi_host_attach,
+ .detach = xlnx_dsi_host_detach,
+ .transfer = xlnx_dsi_host_transfer,
+};
+
+static int xlnx_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+ int ret;
+
+ dev_dbg(dsi->dev, "connector dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret < 0) {
+ dev_err(dsi->dev, "DRM panel not found\n");
+ return ret;
+ }
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ drm_panel_unprepare(dsi->panel);
+ dev_err(dsi->dev, "DRM panel not enabled\n");
+ return ret;
+ }
+ break;
+ default:
+ drm_panel_disable(dsi->panel);
+ drm_panel_unprepare(dsi->panel);
+ break;
+ }
+
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xlnx_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel) {
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ if (dsi->cmdmode) {
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR,
+ XDSI_CCR_CMDMODE |
+ XDSI_CCR_COREENB);
+ drm_panel_prepare(dsi->panel);
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, 0);
+ }
+ }
+ } else if (!dsi->panel_node) {
+ xlnx_dsi_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void xlnx_dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xlnx_dsi_connector_funcs = {
+ .dpms = xlnx_dsi_connector_dpms,
+ .detect = xlnx_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xlnx_dsi_connector_destroy,
+ .atomic_set_property = xlnx_dsi_atomic_set_property,
+ .atomic_get_property = xlnx_dsi_atomic_get_property,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int xlnx_dsi_get_modes(struct drm_connector *connector)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel);
+
+ return 0;
+}
+
+static struct drm_encoder *
+xlnx_dsi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_dsi(connector)->encoder);
+}
+
+static struct drm_connector_helper_funcs xlnx_dsi_connector_helper_funcs = {
+ .get_modes = xlnx_dsi_get_modes,
+ .best_encoder = xlnx_dsi_best_encoder,
+};
+
+/**
+ * xlnx_dsi_connector_create_property - create DSI connector properties
+ *
+ * @connector: pointer to Xilinx DSI connector
+ *
+ * This function takes the xilinx DSI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void xlnx_dsi_connector_create_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ dsi->eotp_prop = drm_property_create_bool(dev, 0, "eotp");
+ dsi->video_mode_prop = drm_property_create_range(dev, 0, "video_mode",
+ 0, 2);
+ dsi->bllp_mode_prop = drm_property_create_bool(dev, 0, "bllp_mode");
+ dsi->bllp_type_prop = drm_property_create_bool(dev, 0, "bllp_type");
+ dsi->bllp_burst_time_prop =
+ drm_property_create_range(dev, 0, "bllp_burst_time", 0, 0xFFFF);
+ dsi->cmd_queue_prop = drm_property_create_range(dev, 0, "cmd_queue", 0,
+ 0xffffff);
+ dsi->height_out = drm_property_create_range(dev, 0, "height_out",
+ 2, 4096);
+ dsi->width_out = drm_property_create_range(dev, 0, "width_out",
+ 2, 4096);
+ dsi->in_fmt = drm_property_create_range(dev, 0, "in_fmt", 0, 16384);
+ dsi->out_fmt = drm_property_create_range(dev, 0, "out_fmt", 0, 16384);
+}
+
+/**
+ * xlnx_dsi_connector_attach_property - attach DSI connector
+ * properties
+ *
+ * @connector: pointer to Xilinx DSI connector
+ */
+static void xlnx_dsi_connector_attach_property(struct drm_connector *connector)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+ struct drm_mode_object *obj = &connector->base;
+
+ if (dsi->eotp_prop)
+ drm_object_attach_property(obj, dsi->eotp_prop, 1);
+
+ if (dsi->video_mode_prop)
+ drm_object_attach_property(obj, dsi->video_mode_prop, 0);
+
+ if (dsi->bllp_burst_time_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_burst_time_prop, 0);
+
+ if (dsi->bllp_mode_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_mode_prop, 0);
+
+ if (dsi->bllp_type_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_type_prop, 0);
+
+ if (dsi->cmd_queue_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->cmd_queue_prop, 0);
+
+ if (dsi->height_out)
+ drm_object_attach_property(obj, dsi->height_out, 0);
+
+ if (dsi->width_out)
+ drm_object_attach_property(obj, dsi->width_out, 0);
+
+ if (dsi->in_fmt)
+ drm_object_attach_property(obj, dsi->in_fmt, 0);
+
+ if (dsi->out_fmt)
+ drm_object_attach_property(obj, dsi->out_fmt, 0);
+}
+
+static int xlnx_dsi_create_connector(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xlnx_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ dev_err(dsi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xlnx_dsi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xlnx_dsi_connector_create_property(connector);
+ xlnx_dsi_connector_attach_property(connector);
+
+ return 0;
+}
+
+/**
+ * xlnx_dsi_atomic_mode_set - derive the DSI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @crtc_state: Pointer to drm core crtc state
+ * @connector_state: DSI connector drm state
+ *
+ * This function derives the DSI IP timing parameters from the timing
+ * values given in the attached panel driver.
+ */
+static void
+xlnx_dsi_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = &crtc_state->adjusted_mode;
+
+ /* Set bridge input and output parameters */
+ xlnx_bridge_set_input(dsi->bridge, m->hdisplay, m->vdisplay,
+ dsi->in_fmt_prop_val);
+ xlnx_bridge_set_output(dsi->bridge, dsi->width_out_prop_val,
+ dsi->height_out_prop_val,
+ dsi->out_fmt_prop_val);
+ xlnx_bridge_enable(dsi->bridge);
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
+ xlnx_dsi_set_display_mode(dsi);
+}
+
+static void xlnx_dsi_disable(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+
+ if (dsi->bridge)
+ xlnx_bridge_disable(dsi->bridge);
+
+ xlnx_dsi_set_display_disable(dsi);
+}
+
+static void xlnx_dsi_enable(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+
+ xlnx_dsi_set_display_enable(dsi);
+}
+
+static const struct drm_encoder_helper_funcs xlnx_dsi_encoder_helper_funcs = {
+ .atomic_mode_set = xlnx_dsi_atomic_mode_set,
+ .enable = xlnx_dsi_enable,
+ .disable = xlnx_dsi_disable,
+};
+
+static const struct drm_encoder_funcs xlnx_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xlnx_dsi_parse_dt(struct xlnx_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u32 datatype;
+ static const int xdsi_mul_fact[XDSI_NUM_DATA_T] = {300, 225, 225, 200};
+
+ dsi->dphy_clk_200M = devm_clk_get(dev, "dphy_clk_200M");
+ if (IS_ERR(dsi->dphy_clk_200M)) {
+ ret = PTR_ERR(dsi->dphy_clk_200M);
+ dev_err(dev, "failed to get dphy_clk_200M %d\n", ret);
+ return ret;
+ }
+
+ dsi->video_aclk = devm_clk_get(dev, "s_axis_aclk");
+ if (IS_ERR(dsi->video_aclk)) {
+ ret = PTR_ERR(dsi->video_aclk);
+ dev_err(dev, "failed to get video_clk %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Used as a multiplication factor for HACT based on used
+ * DSI data type.
+ *
+ * e.g. for RGB666_L datatype and 1920x1080 resolution,
+ * the Hact (WC) would be as follows -
+ * 1920 pixels * 18 bits per pixel / 8 bits per byte
+ * = 1920 pixels * 2.25 bytes per pixel = 4320 bytes.
+ *
+ * Data Type - Multiplication factor
+ * RGB888 - 3
+ * RGB666_L - 2.25
+- * RGB666_P - 2.25
+ * RGB565 - 2
+ *
+ * Since the multiplication factor maybe a floating number,
+ * a 100x multiplication factor is used.
+ */
+ ret = of_property_read_u32(node, "xlnx,dsi-num-lanes", &dsi->lanes);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-num-lanes property\n");
+ return ret;
+ }
+ if (dsi->lanes > 4 || dsi->lanes < 1) {
+ dev_err(dsi->dev, "%d lanes : invalid lanes\n", dsi->lanes);
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(node, "xlnx,dsi-data-type", &datatype);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-data-type property\n");
+ return ret;
+ }
+ dsi->format = datatype;
+ if (datatype > MIPI_DSI_FMT_RGB565) {
+ dev_err(dsi->dev, "Invalid xlnx,dsi-data-type string\n");
+ return -EINVAL;
+ }
+ dsi->mul_factor = xdsi_mul_fact[datatype];
+
+ dsi->cmdmode = of_property_read_bool(node, "xlnx,dsi-cmd-mode");
+
+ dev_dbg(dsi->dev, "DSI controller num lanes = %d", dsi->lanes);
+ dev_dbg(dsi->dev, "DSI controller datatype = %d\n", datatype);
+ dev_dbg(dsi->dev, "DSI controller cmd mode = %d\n", dsi->cmdmode);
+
+ return 0;
+}
+
+static int xlnx_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_dsi *dsi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * DSI tx drivers. DRM framework can support more than one CRTCs and
+ * DSI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+ drm_encoder_init(drm_dev, encoder, &xlnx_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ drm_encoder_helper_add(encoder, &xlnx_dsi_encoder_helper_funcs);
+ ret = xlnx_dsi_create_connector(encoder);
+ if (ret) {
+ dev_err(dsi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ xlnx_dsi_connector_destroy(&dsi->connector);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ return 0;
+}
+
+static void xlnx_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_dsi *dsi = dev_get_drvdata(dev);
+
+ xlnx_dsi_disable(&dsi->encoder);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ xlnx_bridge_disable(dsi->bridge);
+}
+
+static const struct component_ops xlnx_dsi_component_ops = {
+ .bind = xlnx_dsi_bind,
+ .unbind = xlnx_dsi_unbind,
+};
+
+static int xlnx_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xlnx_dsi *dsi;
+ struct device_node *vpss_node;
+ int ret;
+ unsigned long rate;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dsi_host.ops = &xlnx_dsi_ops;
+ dsi->dsi_host.dev = dev;
+ dsi->dev = dev;
+
+ ret = xlnx_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->iomem))
+ return PTR_ERR(dsi->iomem);
+
+ platform_set_drvdata(pdev, dsi);
+
+ /* Bridge support */
+ vpss_node = of_parse_phandle(dsi->dev->of_node, "xlnx,vpss", 0);
+ if (vpss_node) {
+ dsi->bridge = of_xlnx_bridge_get(vpss_node);
+ if (!dsi->bridge) {
+ dev_info(dsi->dev, "Didn't get bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ret = clk_set_rate(dsi->dphy_clk_200M, XDSI_DPHY_CLK_REQ);
+ if (ret) {
+ dev_err(dev, "failed to set dphy clk rate %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dsi->dphy_clk_200M);
+ if (ret) {
+ dev_err(dev, "failed to enable dphy clk %d\n", ret);
+ return ret;
+ }
+
+ rate = clk_get_rate(dsi->dphy_clk_200M);
+ if (rate < XDSI_DPHY_CLK_MIN && rate > XDSI_DPHY_CLK_MAX) {
+ dev_err(dev, "Error DPHY clock = %lu\n", rate);
+ ret = -EINVAL;
+ goto err_disable_dphy_clk;
+ }
+
+ ret = clk_prepare_enable(dsi->video_aclk);
+ if (ret) {
+ dev_err(dev, "failed to enable video clk %d\n", ret);
+ goto err_disable_dphy_clk;
+ }
+
+ ret = component_add(dev, &xlnx_dsi_component_ops);
+ if (ret < 0)
+ goto err_disable_video_clk;
+
+ return ret;
+
+err_disable_video_clk:
+ clk_disable_unprepare(dsi->video_aclk);
+err_disable_dphy_clk:
+ clk_disable_unprepare(dsi->dphy_clk_200M);
+ return ret;
+}
+
+static int xlnx_dsi_remove(struct platform_device *pdev)
+{
+ struct xlnx_dsi *dsi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &xlnx_dsi_component_ops);
+ clk_disable_unprepare(dsi->video_aclk);
+ clk_disable_unprepare(dsi->dphy_clk_200M);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_dsi_of_match[] = {
+ { .compatible = "xlnx,dsi"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = xlnx_dsi_probe,
+ .remove = xlnx_dsi_remove,
+ .driver = {
+ .name = "xlnx-dsi",
+ .of_match_table = xlnx_dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Siva Rajesh <sivaraj@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA MIPI DSI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.c b/drivers/gpu/drm/xlnx/xlnx_fb.c
new file mode 100644
index 000000000000..58c5d1d17ae9
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_fb.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Framebuffer helper
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * Based on drm_fb_cma_helper.c
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+#include "xlnx_fb.h"
+
+#define XLNX_MAX_PLANES 4
+
+struct xlnx_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct drm_framebuffer *fb;
+ unsigned int align;
+ unsigned int vres_mult;
+};
+
+static inline struct xlnx_fbdev *to_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct xlnx_fbdev, fb_helper);
+}
+
+static struct drm_framebuffer_funcs xlnx_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
+static int
+xlnx_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_mode_set *mode_set;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ mutex_lock(&fb_helper->client.modeset_mutex);
+ drm_client_for_each_modeset(mode_set, &fb_helper->client) {
+ crtc = mode_set->crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops xlnx_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_ioctl = xlnx_fb_ioctl,
+};
+
+static struct drm_framebuffer *
+xlnx_fb_gem_fb_alloc(struct drm_device *drm,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **obj, unsigned int num_planes,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_framebuffer *fb;
+ int ret, i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(drm, fb, funcs);
+ if (ret) {
+ dev_err(drm->dev, "Failed to init framebuffer: %d\n", ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+static struct drm_framebuffer *
+xlnx_fb_gem_fbdev_fb_create(struct drm_device *drm,
+ struct drm_fb_helper_surface_size *size,
+ unsigned int pitch_align, struct drm_gem_object *obj,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+
+ mode_cmd.width = size->surface_width;
+ mode_cmd.height = size->surface_height;
+ mode_cmd.pitches[0] = size->surface_width *
+ DIV_ROUND_UP(size->surface_bpp, 8);
+ if (pitch_align)
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0],
+ pitch_align);
+ mode_cmd.pixel_format = drm_driver_legacy_fb_format(drm,
+ size->surface_bpp,
+ size->surface_depth);
+ if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
+ return ERR_PTR(-EINVAL);
+
+ return xlnx_fb_gem_fb_alloc(drm, &mode_cmd, &obj, 1, funcs);
+}
+
+/**
+ * xlnx_fbdev_create - Create the fbdev with a framebuffer
+ * @fb_helper: fb helper structure
+ * @size: framebuffer size info
+ *
+ * This function is based on drm_fbdev_cma_create().
+ *
+ * Return: 0 if successful, or the error code.
+ */
+static int xlnx_fbdev_create(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *size)
+{
+ struct xlnx_fbdev *fbdev = to_fbdev(fb_helper);
+ struct drm_device *drm = fb_helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ u32 format;
+ const struct drm_format_info *info;
+ size_t bytes;
+ int ret;
+
+ dev_dbg(drm->dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ size->surface_width, size->surface_height, size->surface_bpp);
+
+ size->surface_height *= fbdev->vres_mult;
+ bytes_per_pixel = DIV_ROUND_UP(size->surface_bpp, 8);
+ bytes = ALIGN(size->surface_width * bytes_per_pixel, fbdev->align);
+ bytes *= size->surface_height;
+
+ obj = drm_gem_cma_create(drm, bytes);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ fbi = framebuffer_alloc(0, drm->dev);
+ if (!fbi) {
+ dev_err(drm->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ /* Override the depth given by fb helper with current format value */
+ format = xlnx_get_format(drm);
+ info = drm_format_info(format);
+ if (size->surface_bpp == info->cpp[0] * 8)
+ size->surface_depth = info->depth;
+
+ fbdev->fb = xlnx_fb_gem_fbdev_fb_create(drm, size, fbdev->align,
+ &obj->base, &xlnx_fb_funcs);
+ if (IS_ERR(fbdev->fb)) {
+ dev_err(drm->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev->fb);
+ goto err_framebuffer_release;
+ }
+
+ fb = fbdev->fb;
+ fb_helper->fb = fb;
+ fb_helper->fbdev = fbi;
+ fbi->par = fb_helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &xlnx_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(drm->dev, "Failed to allocate color map.\n");
+ goto err_fb_destroy;
+ }
+
+ drm_fb_helper_fill_info(fbi, fb_helper, size);
+ fbi->var.yres = fb->height / fbdev->vres_mult;
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = (char __iomem *)(obj->vaddr + offset);
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = bytes;
+ fbi->fix.smem_len = bytes;
+
+ return 0;
+
+err_fb_destroy:
+ drm_framebuffer_unregister_private(fb);
+ drm_gem_fb_destroy(fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs xlnx_fb_helper_funcs = {
+ .fb_probe = xlnx_fbdev_create,
+};
+
+/**
+ * xlnx_fb_init - Allocate and initializes the Xilinx framebuffer
+ * @drm: DRM device
+ * @preferred_bpp: preferred bits per pixel for the device
+ * @max_conn_count: maximum number of connectors
+ * @align: alignment value for pitch
+ * @vres_mult: multiplier for virtual resolution
+ *
+ * This function is based on drm_fbdev_cma_init().
+ *
+ * Return: a newly allocated drm_fb_helper struct or a ERR_PTR.
+ */
+struct drm_fb_helper *
+xlnx_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult)
+{
+ struct xlnx_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return ERR_PTR(-ENOMEM);
+
+ fbdev->vres_mult = vres_mult;
+ fbdev->align = align;
+ fb_helper = &fbdev->fb_helper;
+ drm_fb_helper_prepare(drm, fb_helper, &xlnx_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(drm, fb_helper, max_conn_count);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fb_helper;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev);
+ return ERR_PTR(ret);
+}
+
+/**
+ * xlnx_fbdev_defio_fini - Free the defio fb
+ * @fbi: fb_info struct
+ *
+ * This function is based on drm_fbdev_cma_defio_fini().
+ */
+static void xlnx_fbdev_defio_fini(struct fb_info *fbi)
+{
+ if (!fbi->fbdefio)
+ return;
+
+ fb_deferred_io_cleanup(fbi);
+ kfree(fbi->fbdefio);
+ kfree(fbi->fbops);
+}
+
+/**
+ * xlnx_fbdev_fini - Free the Xilinx framebuffer
+ * @fb_helper: drm_fb_helper struct
+ *
+ * This function is based on drm_fbdev_cma_fini().
+ */
+void xlnx_fb_fini(struct drm_fb_helper *fb_helper)
+{
+ struct xlnx_fbdev *fbdev = to_fbdev(fb_helper);
+
+ drm_fb_helper_unregister_fbi(&fbdev->fb_helper);
+ if (fbdev->fb_helper.fbdev)
+ xlnx_fbdev_defio_fini(fbdev->fb_helper.fbdev);
+
+ if (fbdev->fb_helper.fb)
+ drm_framebuffer_remove(fbdev->fb_helper.fb);
+
+ drm_fb_helper_fini(&fbdev->fb_helper);
+ kfree(fbdev);
+}
+
+/**
+ * xlnx_fb_create - (struct drm_mode_config_funcs *)->fb_create callback
+ * @drm: DRM device
+ * @file_priv: drm file private data
+ * @mode_cmd: mode command for fb creation
+ *
+ * This functions creates a drm_framebuffer with xlnx_fb_funcs for given mode
+ * @mode_cmd. This functions is intended to be used for the fb_create callback
+ * function of drm_mode_config_funcs.
+ *
+ * Return: a drm_framebuffer object if successful, or
+ * ERR_PTR from drm_gem_fb_create_with_funcs().
+ */
+struct drm_framebuffer *
+xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_gem_fb_create_with_funcs(drm, file_priv, mode_cmd,
+ &xlnx_fb_funcs);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.h b/drivers/gpu/drm/xlnx/xlnx_fb.h
new file mode 100644
index 000000000000..6efc985f2fb3
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_fb.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Framebuffer helper header
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_FB_H_
+#define _XLNX_FB_H_
+
+struct drm_fb_helper;
+
+struct drm_framebuffer *
+xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_fb_helper *
+xlnx_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult);
+void xlnx_fb_fini(struct drm_fb_helper *fb_helper);
+
+#endif /* _XLNX_FB_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_gem.c b/drivers/gpu/drm/xlnx/xlnx_gem.c
new file mode 100644
index 000000000000..4a5d533ec72e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_gem.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS GEM helper
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xlnx_drv.h"
+#include "xlnx_gem.h"
+
+/*
+ * xlnx_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * @file_priv: drm_file object
+ * @drm: DRM object
+ * @args: info for dumb scanout buffer creation
+ *
+ * This function is for dumb_create callback of drm_driver struct. Simply
+ * it wraps around drm_gem_cma_dumb_create() and sets the pitch value
+ * by retrieving the value from the device.
+ *
+ * Return: The return value from drm_gem_cma_dumb_create()
+ */
+int xlnx_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ unsigned int align = xlnx_get_align(drm);
+
+ if (!args->pitch || !IS_ALIGNED(args->pitch, align))
+ args->pitch = ALIGN(pitch, align);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_gem.h b/drivers/gpu/drm/xlnx/xlnx_gem.h
new file mode 100644
index 000000000000..f380de916379
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_gem.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS GEM helper header
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_GEM_H_
+#define _XLNX_GEM_H_
+
+int xlnx_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _XLNX_GEM_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_mixer.c b/drivers/gpu/drm/xlnx/xlnx_mixer.c
new file mode 100644
index 000000000000..82b3f151c93c
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_mixer.c
@@ -0,0 +1,2830 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx logicore video mixer driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ * : Jeffrey Mouroux <jmouroux@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/dmaengine.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/**************************** Register Data **********************************/
+#define XVMIX_AP_CTRL 0x00000
+#define XVMIX_GIE 0x00004
+#define XVMIX_IER 0x00008
+#define XVMIX_ISR 0x0000c
+#define XVMIX_WIDTH_DATA 0x00010
+#define XVMIX_HEIGHT_DATA 0x00018
+#define XVMIX_BACKGROUND_Y_R_DATA 0x00028
+#define XVMIX_BACKGROUND_U_G_DATA 0x00030
+#define XVMIX_BACKGROUND_V_B_DATA 0x00038
+#define XVMIX_LAYERENABLE_DATA 0x00040
+#define XVMIX_LAYERALPHA_0_DATA 0x00100
+#define XVMIX_LAYERSTARTX_0_DATA 0x00108
+#define XVMIX_LAYERSTARTY_0_DATA 0x00110
+#define XVMIX_LAYERWIDTH_0_DATA 0x00118
+#define XVMIX_LAYERSTRIDE_0_DATA 0x00120
+#define XVMIX_LAYERHEIGHT_0_DATA 0x00128
+#define XVMIX_LAYERSCALE_0_DATA 0x00130
+#define XVMIX_LAYERVIDEOFORMAT_0_DATA 0x00138
+#define XVMIX_LAYER1_BUF1_V_DATA 0x00240
+#define XVMIX_LAYER1_BUF2_V_DATA 0x0024c
+#define XVMIX_LOGOSTARTX_DATA 0x01000
+#define XVMIX_LOGOSTARTY_DATA 0x01008
+#define XVMIX_LOGOWIDTH_DATA 0x01010
+#define XVMIX_LOGOHEIGHT_DATA 0x01018
+#define XVMIX_LOGOSCALEFACTOR_DATA 0x01020
+#define XVMIX_LOGOALPHA_DATA 0x01028
+#define XVMIX_LOGOCLRKEYMIN_R_DATA 0x01030
+#define XVMIX_LOGOCLRKEYMIN_G_DATA 0x01038
+#define XVMIX_LOGOCLRKEYMIN_B_DATA 0x01040
+#define XVMIX_LOGOCLRKEYMAX_R_DATA 0x01048
+#define XVMIX_LOGOCLRKEYMAX_G_DATA 0x01050
+#define XVMIX_LOGOCLRKEYMAX_B_DATA 0x01058
+#define XVMIX_LOGOR_V_BASE 0x10000
+#define XVMIX_LOGOR_V_HIGH 0x10fff
+#define XVMIX_LOGOG_V_BASE 0x20000
+#define XVMIX_LOGOG_V_HIGH 0x20fff
+#define XVMIX_LOGOB_V_BASE 0x30000
+#define XVMIX_LOGOB_V_HIGH 0x30fff
+#define XVMIX_LOGOA_V_BASE 0x40000
+#define XVMIX_LOGOA_V_HIGH 0x40fff
+
+/************************** Constant Definitions *****************************/
+#define XVMIX_LOGO_OFFSET 0x1000
+#define XVMIX_MASK_DISABLE_ALL_LAYERS 0x0
+#define XVMIX_REG_OFFSET 0x100
+#define XVMIX_MASTER_LAYER_IDX 0x0
+#define XVMIX_LOGO_LAYER_IDX 0x1
+#define XVMIX_DISP_MAX_WIDTH 4096
+#define XVMIX_DISP_MAX_HEIGHT 2160
+#define XVMIX_MAX_OVERLAY_LAYERS 16
+#define XVMIX_MAX_BPC 16
+#define XVMIX_ALPHA_MIN 0
+#define XVMIX_ALPHA_MAX 256
+#define XVMIX_LAYER_WIDTH_MIN 64
+#define XVMIX_LAYER_HEIGHT_MIN 64
+#define XVMIX_LOGO_LAYER_WIDTH_MIN 32
+#define XVMIX_LOGO_LAYER_HEIGHT_MIN 32
+#define XVMIX_LOGO_LAYER_WIDTH_MAX 256
+#define XVMIX_LOGO_LAYER_HEIGHT_MAX 256
+#define XVMIX_IRQ_DONE_MASK BIT(0)
+#define XVMIX_GIE_EN_MASK BIT(0)
+#define XVMIX_AP_EN_MASK BIT(0)
+#define XVMIX_AP_RST_MASK BIT(7)
+#define XVMIX_MAX_NUM_SUB_PLANES 4
+#define XVMIX_SCALE_FACTOR_1X 0
+#define XVMIX_SCALE_FACTOR_2X 1
+#define XVMIX_SCALE_FACTOR_4X 2
+#define XVMIX_SCALE_FACTOR_INVALID 3
+#define XVMIX_BASE_ALIGN 8
+
+/*************************** STATIC DATA ************************************/
+static const u32 color_table[] = {
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_AYUV,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_Y8,
+ DRM_FORMAT_Y10,
+ DRM_FORMAT_XVUY2101010,
+ DRM_FORMAT_VUY888,
+ DRM_FORMAT_XVUY8888,
+ DRM_FORMAT_XV15,
+ DRM_FORMAT_XV20,
+};
+
+/*********************** Inline Functions/Macros *****************************/
+#define to_mixer_hw(p) (&((p)->mixer->mixer_hw))
+#define to_xlnx_crtc(x) container_of(x, struct xlnx_crtc, crtc)
+#define to_xlnx_plane(x) container_of(x, struct xlnx_mix_plane, base)
+#define to_xlnx_mixer(x) container_of(x, struct xlnx_mix, crtc)
+
+/**
+ * enum xlnx_mix_layer_id - Describes the layer by index to be acted upon
+ * @XVMIX_LAYER_MASTER: Master layer
+ * @XVMIX_LAYER_1: Layer 1
+ * @XVMIX_LAYER_2: Layer 2
+ * @XVMIX_LAYER_3: Layer 3
+ * @XVMIX_LAYER_4: Layer 4
+ * @XVMIX_LAYER_5: Layer 5
+ * @XVMIX_LAYER_6: Layer 6
+ * @XVMIX_LAYER_7: Layer 7
+ * @XVMIX_LAYER_8: Layer 8
+ * @XVMIX_LAYER_9: Layer 9
+ * @XVMIX_LAYER_10: Layer 10
+ * @XVMIX_LAYER_11: Layer 11
+ * @XVMIX_LAYER_12: Layer 12
+ * @XVMIX_LAYER_13: Layer 13
+ * @XVMIX_LAYER_14: Layer 14
+ * @XVMIX_LAYER_15: Layer 15
+ * @XVMIX_LAYER_16: Layer 16
+ */
+enum xlnx_mix_layer_id {
+ XVMIX_LAYER_MASTER = 0,
+ XVMIX_LAYER_1,
+ XVMIX_LAYER_2,
+ XVMIX_LAYER_3,
+ XVMIX_LAYER_4,
+ XVMIX_LAYER_5,
+ XVMIX_LAYER_6,
+ XVMIX_LAYER_7,
+ XVMIX_LAYER_8,
+ XVMIX_LAYER_9,
+ XVMIX_LAYER_10,
+ XVMIX_LAYER_11,
+ XVMIX_LAYER_12,
+ XVMIX_LAYER_13,
+ XVMIX_LAYER_14,
+ XVMIX_LAYER_15,
+ XVMIX_LAYER_16
+};
+
+/**
+ * struct xlnx_mix_layer_data - Describes the hardware configuration of a given
+ * mixer layer
+ * @hw_config: struct specifying the IP hardware constraints for this layer
+ * @vid_fmt: DRM format for this layer
+ * @can_alpha: Indicates that layer alpha is enabled for this layer
+ * @can_scale: Indicates that layer scaling is enabled for this layer
+ * @is_streaming: Indicates layer is not using mixer DMA but streaming from
+ * external DMA
+ * @max_width: Max possible pixel width
+ * @max_height: Max possible pixel height
+ * @min_width: Min possible pixel width
+ * @min_height: Min possible pixel height
+ * @layer_regs: struct containing current cached register values
+ * @buff_addr: Current physical address of image buffer
+ * @x_pos: Current CRTC x offset
+ * @y_pos: Current CRTC y offset
+ * @width: Current width in pixels
+ * @height: Current hight in pixels
+ * @stride: Current stride (when Mixer is performing DMA)
+ * @alpha: Current alpha setting
+ * @is_active: Logical flag indicating layer in use. If false, calls to
+ * enable layer will be ignored.
+ * @scale_fact: Current scaling factor applied to layer
+ * @id: The logical layer id identifies which layer this struct describes
+ * (e.g. 0 = master, 1-15 = overlay).
+ *
+ * All mixer layers are reprsented by an instance of this struct:
+ * output streaming, overlay, logo.
+ * Current layer-specific register state is stored in the layer_regs struct.
+ * The hardware configuration is stored in struct hw_config.
+ *
+ * Note:
+ * Some properties of the logo layer are unique and not described in this
+ * struct. Those properites are part of the xlnx_mix struct as global
+ * properties.
+ */
+struct xlnx_mix_layer_data {
+ struct {
+ u32 vid_fmt;
+ bool can_alpha;
+ bool can_scale;
+ bool is_streaming;
+ u32 max_width;
+ u32 max_height;
+ u32 min_width;
+ u32 min_height;
+ } hw_config;
+
+ struct {
+ u64 buff_addr1;
+ u64 buff_addr2;
+ u32 x_pos;
+ u32 y_pos;
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 alpha;
+ bool is_active;
+ u32 scale_fact;
+ } layer_regs;
+
+ enum xlnx_mix_layer_id id;
+};
+
+/**
+ * struct xlnx_mix_hw - Describes a mixer IP block instance within the design
+ * @base: Base physical address of Mixer IP in memory map
+ * @logo_layer_en: Indicates logo layer is enabled in hardware
+ * @logo_pixel_alpha_enabled: Indicates that per-pixel alpha supported for logo
+ * layer
+ * @max_layer_width: Max possible width for any layer on this Mixer
+ * @max_layer_height: Max possible height for any layer on this Mixer
+ * @max_logo_layer_width: Min possible width for any layer on this Mixer
+ * @max_logo_layer_height: Min possible height for any layer on this Mixer
+ * @num_layers: Max number of layers (excl: logo)
+ * @bg_layer_bpc: Bits per component for the background streaming layer
+ * @dma_addr_size: dma address size in bits
+ * @ppc: Pixels per component
+ * @irq: Interrupt request number assigned
+ * @bg_color: Current RGB color value for internal background color generator
+ * @layer_data: Array of layer data
+ * @layer_cnt: Layer data array count
+ * @max_layers: Maximum number of layers supported by hardware
+ * @logo_layer_id: Index of logo layer
+ * @logo_en_mask: Mask used to enable logo layer
+ * @enable_all_mask: Mask used to enable all layers
+ * @reset_gpio: GPIO line used to reset IP between modesetting operations
+ * @intrpt_handler_fn: Interrupt handler function called when frame is completed
+ * @intrpt_data: Data pointer passed to interrupt handler
+ *
+ * Used as the primary data structure for many L2 driver functions. Logo layer
+ * data, if enabled within the IP, is described in this structure. All other
+ * layers are described by an instance of xlnx_mix_layer_data referenced by this
+ * struct.
+ *
+ */
+struct xlnx_mix_hw {
+ void __iomem *base;
+ bool logo_layer_en;
+ bool logo_pixel_alpha_enabled;
+ u32 max_layer_width;
+ u32 max_layer_height;
+ u32 max_logo_layer_width;
+ u32 max_logo_layer_height;
+ u32 num_layers;
+ u32 bg_layer_bpc;
+ u32 dma_addr_size;
+ u32 ppc;
+ int irq;
+ u64 bg_color;
+ struct xlnx_mix_layer_data *layer_data;
+ u32 layer_cnt;
+ u32 max_layers;
+ u32 logo_layer_id;
+ u32 logo_en_mask;
+ u32 enable_all_mask;
+ struct gpio_desc *reset_gpio;
+ void (*intrpt_handler_fn)(void *);
+ void *intrpt_data;
+};
+
+/**
+ * struct xlnx_mix - Container for interfacing DRM driver to mixer
+ * @mixer_hw: Object representing actual hardware state of mixer
+ * @master: Logical master device from xlnx drm
+ * @crtc: Xilinx DRM driver crtc object
+ * @drm_primary_layer: Hardware layer serving as logical DRM primary layer
+ * @hw_master_layer: Base video streaming layer
+ * @hw_logo_layer: Hardware logo layer
+ * @planes: Mixer overlay layers
+ * @num_planes : number of planes
+ * @max_width : maximum width of plane
+ * @max_height : maximum height of plane
+ * @max_cursor_width : maximum cursor width
+ * @max_cursor_height: maximum cursor height
+ * @alpha_prop: Global layer alpha property
+ * @scale_prop: Layer scale property (1x, 2x or 4x)
+ * @bg_color: Background color property for primary layer
+ * @drm: core drm object
+ * @pixel_clock: pixel clock for mixer
+ * @pixel_clock_enabled: pixel clock status
+ * @dpms: mixer drm state
+ * @event: vblank pending event
+ * @vtc_bridge: vtc_bridge structure
+ *
+ * Contains pointers to logical constructions such as the DRM plane manager as
+ * well as pointers to distinquish the mixer layer serving as the DRM "primary"
+ * plane from the actual mixer layer which serves as the background layer in
+ * hardware.
+ *
+ */
+struct xlnx_mix {
+ struct xlnx_mix_hw mixer_hw;
+ struct platform_device *master;
+ struct xlnx_crtc crtc;
+ struct xlnx_mix_plane *drm_primary_layer;
+ struct xlnx_mix_plane *hw_master_layer;
+ struct xlnx_mix_plane *hw_logo_layer;
+ struct xlnx_mix_plane *planes;
+ u32 num_planes;
+ u32 max_width;
+ u32 max_height;
+ u32 max_cursor_width;
+ u32 max_cursor_height;
+ struct drm_property *alpha_prop;
+ struct drm_property *scale_prop;
+ struct drm_property *bg_color;
+ struct drm_device *drm;
+ struct clk *pixel_clock;
+ bool pixel_clock_enabled;
+ int dpms;
+ struct drm_pending_vblank_event *event;
+ struct xlnx_bridge *vtc_bridge;
+};
+
+/**
+ * struct xlnx_mix_plane_dma - Xilinx drm plane VDMA object
+ *
+ * @chan: dma channel
+ * @xt: dma interleaved configuration template
+ * @sgl: data chunk for dma_interleaved_template
+ * @is_active: flag if the DMA is active
+ */
+struct xlnx_mix_plane_dma {
+ struct dma_chan *chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+ bool is_active;
+};
+
+/**
+ * struct xlnx_mix_plane - Xilinx drm plane object
+ *
+ * @base: base drm plane object
+ * @mixer_layer: video mixer hardware layer data instance
+ * @mixer: mixer DRM object
+ * @dma: dma object
+ * @id: plane id
+ * @dpms: current dpms level
+ * @format: pixel format
+ */
+struct xlnx_mix_plane {
+ struct drm_plane base;
+ struct xlnx_mix_layer_data *mixer_layer;
+ struct xlnx_mix *mixer;
+ struct xlnx_mix_plane_dma dma[XVMIX_MAX_NUM_SUB_PLANES];
+ int id;
+ int dpms;
+ u32 format;
+};
+
+static inline void reg_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline void reg_writeq(void __iomem *base, int offset, u64 val)
+{
+ writel(lower_32_bits(val), base + offset);
+ writel(upper_32_bits(val), base + offset + 4);
+}
+
+static inline u32 reg_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_mix_intrpt_enable_done - Enables interrupts
+ * @mixer: instance of mixer IP core
+ *
+ * Enables interrupts in the mixer core
+ */
+static void xlnx_mix_intrpt_enable_done(struct xlnx_mix_hw *mixer)
+{
+ u32 curr_val = reg_readl(mixer->base, XVMIX_IER);
+
+ /* Enable Interrupts */
+ reg_writel(mixer->base, XVMIX_IER, curr_val | XVMIX_IRQ_DONE_MASK);
+ reg_writel(mixer->base, XVMIX_GIE, XVMIX_GIE_EN_MASK);
+}
+
+/**
+ * xlnx_mix_intrpt_disable - Disable interrupts
+ * @mixer: instance of mixer IP core
+ *
+ * Disables interrupts in the mixer core
+ */
+static void xlnx_mix_intrpt_disable(struct xlnx_mix_hw *mixer)
+{
+ u32 curr_val = reg_readl(mixer->base, XVMIX_IER);
+
+ reg_writel(mixer->base, XVMIX_IER, curr_val & (~XVMIX_IRQ_DONE_MASK));
+ reg_writel(mixer->base, XVMIX_GIE, 0);
+}
+
+/**
+ * xlnx_mix_start - Start the mixer core video generator
+ * @mixer: Mixer core instance for which to start video output
+ *
+ * Starts the core to generate a video frame.
+ */
+static void xlnx_mix_start(struct xlnx_mix_hw *mixer)
+{
+ u32 val;
+
+ val = XVMIX_AP_RST_MASK | XVMIX_AP_EN_MASK;
+ reg_writel(mixer->base, XVMIX_AP_CTRL, val);
+}
+
+/**
+ * xlnx_mix_stop - Stop the mixer core video generator
+ * @mixer: Mixer core instance for which to stop video output
+ *
+ * Starts the core to generate a video frame.
+ */
+static void xlnx_mix_stop(struct xlnx_mix_hw *mixer)
+{
+ reg_writel(mixer->base, XVMIX_AP_CTRL, 0);
+}
+
+static inline uint32_t xlnx_mix_get_intr_status(struct xlnx_mix_hw *mixer)
+{
+ return reg_readl(mixer->base, XVMIX_ISR) & XVMIX_IRQ_DONE_MASK;
+}
+
+static inline void xlnx_mix_clear_intr_status(struct xlnx_mix_hw *mixer,
+ uint32_t intr)
+{
+ reg_writel(mixer->base, XVMIX_ISR, intr);
+}
+
+/**
+ * xlnx_mix_get_layer_data - Retrieve current hardware and register
+ * values for a logical video layer
+ * @mixer: Mixer instance to interrogate
+ * @id: Id of layer for which data is requested
+ *
+ * Return:
+ * Structure containing layer-specific data; NULL upon failure
+ */
+static struct xlnx_mix_layer_data *
+xlnx_mix_get_layer_data(struct xlnx_mix_hw *mixer, enum xlnx_mix_layer_id id)
+{
+ u32 i;
+ struct xlnx_mix_layer_data *layer_data;
+
+ for (i = 0; i <= (mixer->layer_cnt - 1); i++) {
+ layer_data = &mixer->layer_data[i];
+ if (layer_data->id == id)
+ return layer_data;
+ }
+ return NULL;
+}
+
+/**
+ * xlnx_mix_set_active_area - Sets the number of active horizontal and
+ * vertical scan lines for the mixer background layer.
+ * @mixer: Mixer instance for which to set a new viewable area
+ * @hactive: Width of new background image dimension
+ * @vactive: Height of new background image dimension
+ *
+ * Minimum values are 64x64 with maximum values determined by the IP hardware
+ * design.
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_active_area(struct xlnx_mix_hw *mixer,
+ u32 hactive, u32 vactive)
+{
+ struct xlnx_mix_layer_data *ld =
+ xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+
+ if (hactive > ld->hw_config.max_width ||
+ vactive > ld->hw_config.max_height) {
+ DRM_ERROR("Invalid layer dimention\n");
+ return -EINVAL;
+ }
+ /* set resolution */
+ reg_writel(mixer->base, XVMIX_HEIGHT_DATA, vactive);
+ reg_writel(mixer->base, XVMIX_WIDTH_DATA, hactive);
+ ld->layer_regs.width = hactive;
+ ld->layer_regs.height = vactive;
+
+ return 0;
+}
+
+/**
+ * is_window_valid - Validate requested plane dimensions
+ * @mixer: Mixer core instance for which to stop video output
+ * @x_pos: x position requested for start of plane
+ * @y_pos: y position requested for start of plane
+ * @width: width of plane
+ * @height: height of plane
+ * @scale: scale factor of plane
+ *
+ * Validates if the requested window is within the frame boundary
+ *
+ * Return:
+ * true on success, false on failure
+ */
+static bool is_window_valid(struct xlnx_mix_hw *mixer, u32 x_pos, u32 y_pos,
+ u32 width, u32 height, u32 scale)
+{
+ struct xlnx_mix_layer_data *master_layer;
+ int scale_factor[3] = {1, 2, 4};
+
+ master_layer = xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+
+ /* Check if window scale factor is set */
+ if (scale < XVMIX_SCALE_FACTOR_INVALID) {
+ width *= scale_factor[scale];
+ height *= scale_factor[scale];
+ }
+
+ /* verify overlay falls within currently active background area */
+ if (((x_pos + width) <= master_layer->layer_regs.width) &&
+ ((y_pos + height) <= master_layer->layer_regs.height))
+ return true;
+
+ DRM_ERROR("Requested plane dimensions can't be set\n");
+ return false;
+}
+
+/**
+ * xlnx_mix_layer_enable - Enables the requested layers
+ * @mixer: Mixer instance in which to enable a video layer
+ * @id: Logical id (e.g. 16 = logo layer) to enable
+ *
+ * Enables (permit video output) for layers in mixer
+ * Enables the layer denoted by id in the IP core.
+ * Layer 0 will indicate the background layer and layer 8 the logo
+ * layer. Passing max layers value will enable all
+ */
+static void xlnx_mix_layer_enable(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 curr_state;
+
+ /* Ensure layer is marked as 'active' by application before
+ * turning on in hardware. In some cases, layer register data
+ * may be written to otherwise inactive layers in lieu of, eventually,
+ * turning them on.
+ */
+ layer_data = xlnx_mix_get_layer_data(mixer, id);
+ if (!layer_data) {
+ DRM_ERROR("Invalid layer id %d\n", id);
+ return;
+ }
+ if (!layer_data->layer_regs.is_active)
+ return; /* for inactive layers silently return */
+
+ /* Check if request is to enable all layers or single layer */
+ if (id == mixer->max_layers) {
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA,
+ mixer->enable_all_mask);
+
+ } else if ((id < mixer->layer_cnt) || ((id == mixer->logo_layer_id) &&
+ mixer->logo_layer_en)) {
+ curr_state = reg_readl(mixer->base, XVMIX_LAYERENABLE_DATA);
+ if (id == mixer->logo_layer_id)
+ curr_state |= mixer->logo_en_mask;
+ else
+ curr_state |= BIT(id);
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA, curr_state);
+ } else {
+ DRM_ERROR("Can't enable requested layer %d\n", id);
+ }
+}
+
+/**
+ * xlnx_mix_disp_layer_enable - Enables video output represented by the
+ * plane object
+ * @plane: Drm plane object describing video layer to enable
+ *
+ */
+static void xlnx_mix_disp_layer_enable(struct xlnx_mix_plane *plane)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct xlnx_mix_layer_data *l_data;
+ u32 id;
+
+ if (!plane)
+ return;
+ mixer_hw = to_mixer_hw(plane);
+ l_data = plane->mixer_layer;
+ id = l_data->id;
+ if (id < XVMIX_LAYER_MASTER || id > mixer_hw->logo_layer_id) {
+ DRM_DEBUG_KMS("Attempt to activate invalid layer: %d\n", id);
+ return;
+ }
+ if (id == XVMIX_LAYER_MASTER && !l_data->hw_config.is_streaming)
+ return;
+
+ xlnx_mix_layer_enable(mixer_hw, id);
+}
+
+/**
+ * xlnx_mix_layer_disable - Disables the requested layer
+ * @mixer: Mixer for which the layer will be disabled
+ * @id: Logical id of the layer to be disabled (0-16)
+ *
+ * Disables the layer denoted by layer_id in the IP core.
+ * Layer 0 will indicate the background layer and layer 16 the logo
+ * layer. Passing the value of max layers will disable all
+ * layers.
+ */
+static void xlnx_mix_layer_disable(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ u32 num_layers, curr_state;
+
+ num_layers = mixer->layer_cnt;
+
+ if (id == mixer->max_layers) {
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA,
+ XVMIX_MASK_DISABLE_ALL_LAYERS);
+ } else if ((id < num_layers) ||
+ ((id == mixer->logo_layer_id) && (mixer->logo_layer_en))) {
+ curr_state = reg_readl(mixer->base, XVMIX_LAYERENABLE_DATA);
+ if (id == mixer->logo_layer_id)
+ curr_state &= ~(mixer->logo_en_mask);
+ else
+ curr_state &= ~(BIT(id));
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA, curr_state);
+ } else {
+ DRM_ERROR("Can't disable requested layer %d\n", id);
+ }
+}
+
+/**
+ * xlnx_mix_disp_layer_disable - Disables video output represented by the
+ * plane object
+ * @plane: Drm plane object describing video layer to disable
+ *
+ */
+static void xlnx_mix_disp_layer_disable(struct xlnx_mix_plane *plane)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ u32 layer_id;
+
+ if (plane)
+ mixer_hw = to_mixer_hw(plane);
+ else
+ return;
+ layer_id = plane->mixer_layer->id;
+ if (layer_id < XVMIX_LAYER_MASTER ||
+ layer_id > mixer_hw->logo_layer_id)
+ return;
+
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+}
+
+static int xlnx_mix_mark_layer_inactive(struct xlnx_mix_plane *plane)
+{
+ if (!plane || !plane->mixer_layer)
+ return -ENODEV;
+
+ plane->mixer_layer->layer_regs.is_active = false;
+
+ return 0;
+}
+
+/* apply mode to plane pipe */
+static void xlnx_mix_plane_commit(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ /* for xlnx video framebuffer dma, if used */
+ xilinx_xdma_drm_config(plane->dma[0].chan, plane->format);
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ struct xlnx_mix_plane_dma *dma = &plane->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt,
+ flags);
+ if (!desc) {
+ DRM_ERROR("failed to prepare DMA descriptor\n");
+ return;
+ }
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+}
+
+static int xlnx_mix_plane_get_max_width(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_width;
+}
+
+static int xlnx_mix_plane_get_max_height(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_height;
+}
+
+static int xlnx_mix_plane_get_max_cursor_width(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_cursor_width;
+}
+
+static int xlnx_mix_plane_get_max_cursor_height(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_cursor_height;
+}
+
+static int xlnx_mix_crtc_get_max_width(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_width(crtc->crtc.primary);
+}
+
+static int xlnx_mix_crtc_get_max_height(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_height(crtc->crtc.primary);
+}
+
+static unsigned int xlnx_mix_crtc_get_max_cursor_width(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_cursor_width(crtc->crtc.primary);
+}
+
+static unsigned int xlnx_mix_crtc_get_max_cursor_height(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_cursor_height(crtc->crtc.primary);
+}
+
+/**
+ * xlnx_mix_crtc_get_format - Get the current device format
+ * @crtc: xlnx crtc object
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+static uint32_t xlnx_mix_crtc_get_format(struct xlnx_crtc *crtc)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(crtc->crtc.primary);
+
+ return plane->format;
+}
+
+/**
+ * xlnx_mix_crtc_get_align - Get the alignment value for pitch
+ * @crtc: xlnx crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+static unsigned int xlnx_mix_crtc_get_align(struct xlnx_crtc *crtc)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(crtc->crtc.primary);
+ struct xlnx_mix *m = plane->mixer;
+
+ return XVMIX_BASE_ALIGN * m->mixer_hw.ppc;
+}
+
+/**
+ * xlnx_mix_attach_plane_prop - Attach mixer-specific drm property to
+ * the given plane
+ * @plane: Xilinx drm plane object to inspect and attach appropriate
+ * properties to
+ *
+ * The linked mixer layer will be inspected to see what capabilities it offers
+ * (e.g. global layer alpha; scaling) and drm property objects that indicate
+ * those capabilities will then be attached and initialized to default values.
+ */
+static void xlnx_mix_attach_plane_prop(struct xlnx_mix_plane *plane)
+{
+ struct drm_mode_object *base = &plane->base.base;
+ struct xlnx_mix *mixer = plane->mixer;
+
+ if (plane->mixer_layer->hw_config.can_scale)
+ drm_object_attach_property(base, mixer->scale_prop,
+ XVMIX_SCALE_FACTOR_1X);
+ if (plane->mixer_layer->hw_config.can_alpha)
+ drm_object_attach_property(base, mixer->alpha_prop,
+ XVMIX_ALPHA_MAX);
+}
+
+static int xlnx_mix_mark_layer_active(struct xlnx_mix_plane *plane)
+{
+ if (!plane->mixer_layer)
+ return -ENODEV;
+ plane->mixer_layer->layer_regs.is_active = true;
+
+ return 0;
+}
+
+static bool xlnx_mix_isfmt_support(u32 format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(color_table); i++) {
+ if (format == color_table[i])
+ return true;
+ }
+ return false;
+}
+
+/*************** DISPLAY ************/
+
+/**
+ * xlnx_mix_get_layer_scaling - Get layer scaling factor
+ * @mixer: Mixer instance to program with new background color
+ * @id: Plane id
+ *
+ * Applicable only for overlay layers
+ *
+ * Return:
+ * scaling factor of the specified layer
+ */
+static int xlnx_mix_get_layer_scaling(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ int scale_factor = 0;
+ u32 reg;
+ struct xlnx_mix_layer_data *l_data = xlnx_mix_get_layer_data(mixer, id);
+
+ if (id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg = XVMIX_LOGOSCALEFACTOR_DATA +
+ XVMIX_LOGO_OFFSET;
+ else
+ reg = XVMIX_LOGOSCALEFACTOR_DATA;
+ scale_factor = reg_readl(mixer->base, reg);
+ l_data->layer_regs.scale_fact = scale_factor;
+ }
+ } else {
+ /*Layer0-Layer15*/
+ if (id < mixer->logo_layer_id && l_data->hw_config.can_scale) {
+ reg = XVMIX_LAYERSCALE_0_DATA + (id * XVMIX_REG_OFFSET);
+ scale_factor = reg_readl(mixer->base, reg);
+ l_data->layer_regs.scale_fact = scale_factor;
+ }
+ }
+ return scale_factor;
+}
+
+/**
+ * xlnx_mix_set_layer_window - Sets the position of an overlay layer
+ * @mixer: Specific mixer object instance controlling the video
+ * @id: Logical layer id (1-15) to be positioned
+ * @x_pos: new: Column to start display of overlay layer
+ * @y_pos: new: Row to start display of overlay layer
+ * @width: Number of active columns to dislay for overlay layer
+ * @height: Number of active columns to display for overlay layer
+ * @stride: Width in bytes of overaly memory buffer (memory layer only)
+ *
+ * Sets the position of an overlay layer over the background layer (layer 0)
+ * Applicable only for layers 1-15 or the logo layer
+ *
+ * Return:
+ * Zero on success, -EINVAL if position is invalid or -ENODEV if layer
+ */
+static int xlnx_mix_set_layer_window(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id, u32 x_pos,
+ u32 y_pos, u32 width, u32 height,
+ u32 stride)
+{
+ struct xlnx_mix_layer_data *l_data;
+ u32 scale = 0;
+ int status = -EINVAL;
+ u32 x_reg, y_reg, w_reg, h_reg, s_reg;
+ u32 off;
+
+ l_data = xlnx_mix_get_layer_data(mixer, id);
+ if (!l_data)
+ return status;
+
+ scale = xlnx_mix_get_layer_scaling(mixer, id);
+ if (!is_window_valid(mixer, x_pos, y_pos, width, height, scale))
+ return status;
+
+ if (id == mixer->logo_layer_id) {
+ if (!(mixer->logo_layer_en &&
+ width <= l_data->hw_config.max_width &&
+ height <= l_data->hw_config.max_height &&
+ height >= l_data->hw_config.min_height &&
+ width >= l_data->hw_config.min_width))
+ return status;
+
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS) {
+ x_reg = XVMIX_LOGOSTARTX_DATA + XVMIX_LOGO_OFFSET;
+ y_reg = XVMIX_LOGOSTARTY_DATA + XVMIX_LOGO_OFFSET;
+ w_reg = XVMIX_LOGOWIDTH_DATA + XVMIX_LOGO_OFFSET;
+ h_reg = XVMIX_LOGOHEIGHT_DATA + XVMIX_LOGO_OFFSET;
+ } else {
+ x_reg = XVMIX_LOGOSTARTX_DATA;
+ y_reg = XVMIX_LOGOSTARTY_DATA;
+ w_reg = XVMIX_LOGOWIDTH_DATA;
+ h_reg = XVMIX_LOGOHEIGHT_DATA;
+ }
+ reg_writel(mixer->base, x_reg, x_pos);
+ reg_writel(mixer->base, y_reg, y_pos);
+ reg_writel(mixer->base, w_reg, width);
+ reg_writel(mixer->base, h_reg, height);
+ l_data->layer_regs.x_pos = x_pos;
+ l_data->layer_regs.y_pos = y_pos;
+ l_data->layer_regs.width = width;
+ l_data->layer_regs.height = height;
+ status = 0;
+ } else {
+ /*Layer1-Layer15*/
+
+ if (!(id < mixer->layer_cnt &&
+ width <= l_data->hw_config.max_width &&
+ width >= l_data->hw_config.min_width))
+ return status;
+ x_reg = XVMIX_LAYERSTARTX_0_DATA;
+ y_reg = XVMIX_LAYERSTARTY_0_DATA;
+ w_reg = XVMIX_LAYERWIDTH_0_DATA;
+ h_reg = XVMIX_LAYERHEIGHT_0_DATA;
+ s_reg = XVMIX_LAYERSTRIDE_0_DATA;
+
+ off = id * XVMIX_REG_OFFSET;
+ reg_writel(mixer->base, (x_reg + off), x_pos);
+ reg_writel(mixer->base, (y_reg + off), y_pos);
+ reg_writel(mixer->base, (w_reg + off), width);
+ reg_writel(mixer->base, (h_reg + off), height);
+ l_data->layer_regs.x_pos = x_pos;
+ l_data->layer_regs.y_pos = y_pos;
+ l_data->layer_regs.width = width;
+ l_data->layer_regs.height = height;
+
+ if (!l_data->hw_config.is_streaming)
+ reg_writel(mixer->base, (s_reg + off), stride);
+ status = 0;
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_set_layer_dimensions - Set layer dimensions
+ * @plane: Drm plane object desribing video layer to reposition
+ * @crtc_x: New horizontal anchor postion from which to begin rendering
+ * @crtc_y: New vertical anchor position from which to begin rendering
+ * @width: Width, in pixels, to render from stream or memory buffer
+ * @height: Height, in pixels, to render from stream or memory buffer
+ * @stride: Width, in bytes, of a memory buffer. Used only for
+ * memory layers. Use 0 for streaming layers.
+ *
+ * Establishes new coordinates and dimensions for a video plane layer
+ * New size and coordinates of window must fit within the currently active
+ * area of the crtc (e.g. the background resolution)
+ *
+ * Return: 0 if successful; Either -EINVAL if coordindate data is invalid
+ * or -ENODEV if layer data not present
+ */
+static int xlnx_mix_set_layer_dimensions(struct xlnx_mix_plane *plane,
+ u32 crtc_x, u32 crtc_y,
+ u32 width, u32 height, u32 stride)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer_data;
+ enum xlnx_mix_layer_id layer_id;
+ int ret = 0;
+
+ layer_data = plane->mixer_layer;
+ layer_id = layer_data->id;
+ if (layer_data->layer_regs.height != height ||
+ layer_data->layer_regs.width != width) {
+ if (mixer->drm_primary_layer == plane)
+ xlnx_mix_layer_disable(mixer_hw, XVMIX_LAYER_MASTER);
+
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+ }
+ if (mixer->drm_primary_layer == plane) {
+ crtc_x = 0;
+ crtc_y = 0;
+ ret = xlnx_mix_set_active_area(mixer_hw, width, height);
+ if (ret)
+ return ret;
+ xlnx_mix_layer_enable(mixer_hw, XVMIX_LAYER_MASTER);
+ }
+ if (layer_id != XVMIX_LAYER_MASTER && layer_id < mixer_hw->max_layers) {
+ ret = xlnx_mix_set_layer_window(mixer_hw, layer_id, crtc_x,
+ crtc_y, width, height, stride);
+ if (ret)
+ return ret;
+ xlnx_mix_disp_layer_enable(plane);
+ }
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_layer_scaling - Sets scaling factor
+ * @mixer: Instance of mixer to be subject of scaling request
+ * @id: Logical id of video layer subject to new scale setting
+ * @scale: scale Factor (1x, 2x or 4x) for horiz. and vert. dimensions
+ *
+ * Sets the scaling factor for the specified video layer
+ * Not applicable to background stream layer (layer 0)
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure to set scale for layer (likely
+ * returned if resulting size of layer exceeds dimensions of active
+ * display area
+ */
+static int xlnx_mix_set_layer_scaling(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id, u32 scale)
+{
+ void __iomem *reg = mixer->base;
+ struct xlnx_mix_layer_data *l_data;
+ int status = 0;
+ u32 x_pos, y_pos, width, height, offset;
+
+ l_data = xlnx_mix_get_layer_data(mixer, id);
+ x_pos = l_data->layer_regs.x_pos;
+ y_pos = l_data->layer_regs.y_pos;
+ width = l_data->layer_regs.width;
+ height = l_data->layer_regs.height;
+
+ if (!is_window_valid(mixer, x_pos, y_pos, width, height, scale))
+ return -EINVAL;
+
+ if (id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg_writel(reg, XVMIX_LOGOSCALEFACTOR_DATA +
+ XVMIX_LOGO_OFFSET, scale);
+ else
+ reg_writel(reg, XVMIX_LOGOSCALEFACTOR_DATA,
+ scale);
+ l_data->layer_regs.scale_fact = scale;
+ status = 0;
+ }
+ } else {
+ /* Layer0-Layer15 */
+ if (id < mixer->layer_cnt && l_data->hw_config.can_scale) {
+ offset = id * XVMIX_REG_OFFSET;
+
+ reg_writel(reg, (XVMIX_LAYERSCALE_0_DATA + offset),
+ scale);
+ l_data->layer_regs.scale_fact = scale;
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_set_layer_scale - Change video scale factor for video plane
+ * @plane: Drm plane object describing layer to be modified
+ * @val: Index of scale factor to use:
+ * 0 = 1x
+ * 1 = 2x
+ * 2 = 4x
+ *
+ * Return:
+ * Zero on success, either -EINVAL if scale value is illegal or
+ * -ENODEV if layer does not exist (null)
+ */
+static int xlnx_mix_set_layer_scale(struct xlnx_mix_plane *plane,
+ uint64_t val)
+{
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer = plane->mixer_layer;
+ int ret;
+
+ if (!layer || !layer->hw_config.can_scale)
+ return -ENODEV;
+ if (val > XVMIX_SCALE_FACTOR_4X || val < XVMIX_SCALE_FACTOR_1X) {
+ DRM_ERROR("Mixer layer scale value illegal.\n");
+ return -EINVAL;
+ }
+ xlnx_mix_disp_layer_disable(plane);
+ msleep(50);
+ ret = xlnx_mix_set_layer_scaling(mixer_hw, layer->id, val);
+ xlnx_mix_disp_layer_enable(plane);
+
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_layer_alpha - Set the alpha value
+ * @mixer: Instance of mixer controlling layer to modify
+ * @layer_id: Logical id of video overlay to adjust alpha setting
+ * @alpha: Desired alpha setting (0-255) for layer specified
+ * 255 = completely opaque
+ * 0 = fully transparent
+ *
+ * Set the layer global transparency for a video overlay
+ * Not applicable to background streaming layer
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_layer_alpha(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id layer_id, u32 alpha)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 reg;
+ int status = -EINVAL;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, layer_id);
+
+ if (layer_id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg = XVMIX_LOGOALPHA_DATA + XVMIX_LOGO_OFFSET;
+ else
+ reg = XVMIX_LOGOALPHA_DATA;
+ reg_writel(mixer->base, reg, alpha);
+ layer_data->layer_regs.alpha = alpha;
+ status = 0;
+ }
+ } else {
+ /*Layer1-Layer15*/
+ if (layer_id < mixer->layer_cnt &&
+ layer_data->hw_config.can_alpha) {
+ u32 offset = layer_id * XVMIX_REG_OFFSET;
+
+ reg = XVMIX_LAYERALPHA_0_DATA;
+ reg_writel(mixer->base, (reg + offset), alpha);
+ layer_data->layer_regs.alpha = alpha;
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_disp_set_layer_alpha - Change the transparency of an entire plane
+ * @plane: Video layer affected by new alpha setting
+ * @val: Value of transparency setting (0-255) with 255 being opaque
+ * 0 being fully transparent
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_disp_set_layer_alpha(struct xlnx_mix_plane *plane,
+ uint64_t val)
+{
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer = plane->mixer_layer;
+
+ if (!layer || !layer->hw_config.can_alpha)
+ return -ENODEV;
+ if (val > XVMIX_ALPHA_MAX || val < XVMIX_ALPHA_MIN) {
+ DRM_ERROR("Mixer layer alpha dts value illegal.\n");
+ return -EINVAL;
+ }
+ return xlnx_mix_set_layer_alpha(mixer_hw, layer->id, val);
+}
+
+/**
+ * xlnx_mix_set_layer_buff_addr - Set buff addr for layer
+ * @mixer: Instance of mixer controlling layer to modify
+ * @id: Logical id of video overlay to adjust alpha setting
+ * @luma_addr: Start address of plane 1 of frame buffer for layer 1
+ * @chroma_addr: Start address of plane 2 of frame buffer for layer 1
+ *
+ * Sets the buffer address of the specified layer
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_layer_buff_addr(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id,
+ dma_addr_t luma_addr,
+ dma_addr_t chroma_addr)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 align, offset;
+ u32 reg1, reg2;
+
+ if (id >= mixer->layer_cnt)
+ return -EINVAL;
+
+ /* Check if addr is aligned to aximm width (PPC * 64-bits) */
+ align = mixer->ppc * 8;
+ if ((luma_addr % align) != 0 || (chroma_addr % align) != 0)
+ return -EINVAL;
+
+ offset = (id - 1) * XVMIX_REG_OFFSET;
+ reg1 = XVMIX_LAYER1_BUF1_V_DATA + offset;
+ reg2 = XVMIX_LAYER1_BUF2_V_DATA + offset;
+ layer_data = &mixer->layer_data[id];
+ if (mixer->dma_addr_size == 64 && sizeof(dma_addr_t) == 8) {
+ reg_writeq(mixer->base, reg1, luma_addr);
+ reg_writeq(mixer->base, reg2, chroma_addr);
+ } else {
+ reg_writel(mixer->base, reg1, (u32)luma_addr);
+ reg_writel(mixer->base, reg2, (u32)chroma_addr);
+ }
+ layer_data->layer_regs.buff_addr1 = luma_addr;
+ layer_data->layer_regs.buff_addr2 = chroma_addr;
+
+ return 0;
+}
+
+/**
+ * xlnx_mix_hw_plane_dpms - Implementation of display power management
+ * system call (dpms).
+ * @plane: Plane/mixer layer to enable/disable (based on dpms value)
+ * @dpms: Display power management state to act upon
+ *
+ * Designed to disable and turn off a plane and restore all attached drm
+ * properities to their initial values. Alterntively, if dpms is "on", will
+ * enable a layer.
+ */
+
+static void
+xlnx_mix_hw_plane_dpms(struct xlnx_mix_plane *plane, int dpms)
+{
+ struct xlnx_mix *mixer;
+
+ if (!plane->mixer)
+ return;
+ mixer = plane->mixer;
+ plane->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ xlnx_mix_disp_layer_enable(plane);
+ break;
+ default:
+ xlnx_mix_mark_layer_inactive(plane);
+ xlnx_mix_disp_layer_disable(plane);
+ /* restore to default property values */
+ if (mixer->alpha_prop)
+ xlnx_mix_disp_set_layer_alpha(plane, XVMIX_ALPHA_MAX);
+ if (mixer->scale_prop)
+ xlnx_mix_set_layer_scale(plane, XVMIX_SCALE_FACTOR_1X);
+ }
+}
+
+static void xlnx_mix_plane_dpms(struct drm_plane *base_plane, int dpms)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ unsigned int i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", plane->dpms, dpms);
+
+ if (plane->dpms == dpms)
+ return;
+ plane->dpms = dpms;
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ /* start dma engine */
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan && plane->dma[i].is_active)
+ dma_async_issue_pending(plane->dma[i].chan);
+ xlnx_mix_hw_plane_dpms(plane, dpms);
+ break;
+ default:
+ xlnx_mix_hw_plane_dpms(plane, dpms);
+ /* stop dma engine and release descriptors */
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ if (plane->dma[i].chan && plane->dma[i].is_active) {
+ dmaengine_terminate_sync(plane->dma[i].chan);
+ plane->dma[i].is_active = false;
+ }
+ }
+ break;
+ }
+}
+
+static int
+xlnx_mix_disp_plane_atomic_set_property(struct drm_plane *base_plane,
+ struct drm_plane_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix *mixer = plane->mixer;
+
+ if (property == mixer->alpha_prop)
+ return xlnx_mix_disp_set_layer_alpha(plane, val);
+ else if (property == mixer->scale_prop)
+ return xlnx_mix_set_layer_scale(plane, val);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int
+xlnx_mix_disp_plane_atomic_get_property(struct drm_plane *base_plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix *mixer = plane->mixer;
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ u32 layer_id = plane->mixer_layer->id;
+
+ if (property == mixer->alpha_prop)
+ *val = mixer_hw->layer_data[layer_id].layer_regs.alpha;
+ else if (property == mixer->scale_prop)
+ *val = mixer_hw->layer_data[layer_id].layer_regs.scale_fact;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_mix_disp_plane_atomic_update_plane - plane update using atomic
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ * @ctx: lock acquire context
+ *
+ * Provides a default plane update handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int
+xlnx_mix_disp_plane_atomic_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w,
+ unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
+ /* Do async-update if possible */
+ state->async_update = !drm_atomic_helper_async_check(plane->dev, state);
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_plane_funcs xlnx_mix_plane_funcs = {
+ .update_plane = xlnx_mix_disp_plane_atomic_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .atomic_set_property = xlnx_mix_disp_plane_atomic_set_property,
+ .atomic_get_property = xlnx_mix_disp_plane_atomic_get_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/**
+ * xlnx_mix_logo_load - Loads mixer's internal bram
+ * @mixer: Mixer instance to act upon
+ * @logo_w: Width of logo in pixels
+ * @logo_h: Height of logo in pixels
+ * @r_buf: Pointer to byte buffer array of R data values
+ * @g_buf: Pointer to byte buffer array of G data values
+ * @b_buf: Pointer to byte buffer array of B data values
+ * @a_buf: Pointer to byte buffer array of A data values
+ *
+ * Loads mixer's internal bram with planar R, G, B and A data
+ *
+ * Return:
+ * Zero on success, -ENODEV if logo layer not enabled; -EINVAL otherwise
+ */
+static int xlnx_mix_logo_load(struct xlnx_mix_hw *mixer, u32 logo_w, u32 logo_h,
+ u8 *r_buf, u8 *g_buf, u8 *b_buf, u8 *a_buf)
+{
+ void __iomem *reg = mixer->base;
+ struct xlnx_mix_layer_data *layer_data;
+
+ int x;
+ u32 shift;
+ u32 rword, gword, bword, aword;
+ u32 pixel_cnt = logo_w * logo_h;
+ u32 unaligned_pix_cnt = pixel_cnt % 4;
+ u32 width, height, curr_x_pos, curr_y_pos;
+ u32 rbase_addr, gbase_addr, bbase_addr, abase_addr;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, mixer->logo_layer_id);
+ rword = 0;
+ gword = 0;
+ bword = 0;
+ aword = 0;
+
+ if (!layer_data)
+ return -ENODEV;
+
+ /* RGBA data should be 32-bit word aligned */
+ if (unaligned_pix_cnt && mixer->logo_pixel_alpha_enabled)
+ return -EINVAL;
+
+ if (!(mixer->logo_layer_en &&
+ logo_w <= layer_data->hw_config.max_width &&
+ logo_h <= layer_data->hw_config.max_height))
+ return -EINVAL;
+
+ width = logo_w;
+ height = logo_h;
+ rbase_addr = XVMIX_LOGOR_V_BASE;
+ gbase_addr = XVMIX_LOGOG_V_BASE;
+ bbase_addr = XVMIX_LOGOB_V_BASE;
+ abase_addr = XVMIX_LOGOA_V_BASE;
+
+ for (x = 0; x < pixel_cnt; x++) {
+ shift = (x % 4) * 8;
+ rword |= r_buf[x] << shift;
+ gword |= g_buf[x] << shift;
+ bword |= b_buf[x] << shift;
+ if (mixer->logo_pixel_alpha_enabled)
+ aword |= a_buf[x] << shift;
+
+ if (x % 4 == 3) {
+ reg_writel(reg, (rbase_addr + (x - 3)), rword);
+ reg_writel(reg, (gbase_addr + (x - 3)), gword);
+ reg_writel(reg, (bbase_addr + (x - 3)), bword);
+ if (mixer->logo_pixel_alpha_enabled)
+ reg_writel(reg, (abase_addr + (x - 3)), aword);
+ }
+ }
+
+ curr_x_pos = layer_data->layer_regs.x_pos;
+ curr_y_pos = layer_data->layer_regs.y_pos;
+ return xlnx_mix_set_layer_window(mixer, mixer->logo_layer_id,
+ curr_x_pos, curr_y_pos,
+ logo_w, logo_h, 0);
+}
+
+static int xlnx_mix_update_logo_img(struct xlnx_mix_plane *plane,
+ struct drm_gem_cma_object *buffer,
+ u32 src_w, u32 src_h)
+{
+ struct xlnx_mix_layer_data *logo_layer = plane->mixer_layer;
+ struct xlnx_mix_hw *mixer = to_mixer_hw(plane);
+ size_t pixel_cnt = src_h * src_w;
+ /* color comp defaults to offset in RG24 buffer */
+ u32 pix_cmp_cnt;
+ u32 logo_cmp_cnt;
+ bool per_pixel_alpha = false;
+ u32 max_width = logo_layer->hw_config.max_width;
+ u32 max_height = logo_layer->hw_config.max_height;
+ u32 min_width = logo_layer->hw_config.min_width;
+ u32 min_height = logo_layer->hw_config.min_height;
+ u8 *r_data = NULL;
+ u8 *g_data = NULL;
+ u8 *b_data = NULL;
+ u8 *a_data = NULL;
+ size_t el_size = sizeof(u8);
+ u8 *pixel_mem_data;
+ int ret, i, j;
+
+ /* ensure valid conditions for update */
+ if (logo_layer->id != mixer->logo_layer_id)
+ return 0;
+
+ if (src_h > max_height || src_w > max_width ||
+ src_h < min_height || src_w < min_width) {
+ DRM_ERROR("Mixer logo/cursor layer dimensions illegal.\n");
+ return -EINVAL;
+ }
+
+ if (!xlnx_mix_isfmt_support(plane->mixer_layer->hw_config.vid_fmt)) {
+ DRM_ERROR("DRM color format not supported for logo layer\n");
+ return -EINVAL;
+ }
+ per_pixel_alpha = (logo_layer->hw_config.vid_fmt ==
+ DRM_FORMAT_RGBA8888) ? true : false;
+ r_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ g_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ b_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ if (per_pixel_alpha)
+ a_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+
+ if (!r_data || !g_data || !b_data || (per_pixel_alpha && !a_data)) {
+ DRM_ERROR("Unable to allocate memory for logo layer data\n");
+ ret = -ENOMEM;
+ goto free;
+ }
+ pix_cmp_cnt = per_pixel_alpha ? 4 : 3;
+ logo_cmp_cnt = pixel_cnt * pix_cmp_cnt;
+ /* ensure buffer attributes have changed to indicate new logo
+ * has been created
+ */
+ if ((phys_addr_t)buffer->vaddr == logo_layer->layer_regs.buff_addr1 &&
+ src_w == logo_layer->layer_regs.width &&
+ src_h == logo_layer->layer_regs.height)
+ return 0;
+
+ /* cache buffer address for future comparison */
+ logo_layer->layer_regs.buff_addr1 = (phys_addr_t)buffer->vaddr;
+ pixel_mem_data = (u8 *)(buffer->vaddr);
+ for (i = 0, j = 0; j < pixel_cnt; j++) {
+ if (per_pixel_alpha && a_data)
+ a_data[j] = pixel_mem_data[i++];
+
+ b_data[j] = pixel_mem_data[i++];
+ g_data[j] = pixel_mem_data[i++];
+ r_data[j] = pixel_mem_data[i++];
+ }
+ ret = xlnx_mix_logo_load(to_mixer_hw(plane), src_w, src_h, r_data,
+ g_data, b_data,
+ per_pixel_alpha ? a_data : NULL);
+free:
+ kfree(r_data);
+ kfree(g_data);
+ kfree(b_data);
+ kfree(a_data);
+
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_plane - Implementation of DRM plane_update callback
+ * @plane: xlnx_mix_plane object containing references to
+ * the base plane and mixer
+ * @fb: Framebuffer descriptor
+ * @crtc_x: X position of layer on crtc. Note, if the plane represents either
+ * the master hardware layer (video0) or the layer representing the DRM primary
+ * layer, the crtc x/y coordinates are either ignored and/or set to 0/0
+ * respectively.
+ * @crtc_y: Y position of layer. See description of crtc_x handling
+ * for more inforation.
+ * @src_x: x-offset in memory buffer from which to start reading
+ * @src_y: y-offset in memory buffer from which to start reading
+ * @src_w: Number of horizontal pixels to read from memory per row
+ * @src_h: Number of rows of video data to read from memory
+ *
+ * Configures a mixer layer to comply with user space SET_PLANE icotl
+ * call.
+ *
+ * Return:
+ * Zero on success, non-zero linux error code otherwise.
+ */
+static int xlnx_mix_set_plane(struct xlnx_mix_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct xlnx_mix *mixer;
+ struct drm_gem_cma_object *luma_buffer;
+ u32 luma_stride = fb->pitches[0];
+ dma_addr_t luma_addr, chroma_addr = 0;
+ u32 active_area_width;
+ u32 active_area_height;
+ enum xlnx_mix_layer_id layer_id;
+ int ret;
+ const struct drm_format_info *info = fb->format;
+
+ mixer = plane->mixer;
+ mixer_hw = &mixer->mixer_hw;
+ layer_id = plane->mixer_layer->id;
+ active_area_width =
+ mixer->drm_primary_layer->mixer_layer->layer_regs.width;
+ active_area_height =
+ mixer->drm_primary_layer->mixer_layer->layer_regs.height;
+ /* compute memory data */
+ luma_buffer = drm_fb_cma_get_gem_obj(fb, 0);
+ luma_addr = drm_fb_cma_get_gem_addr(fb, plane->base.state, 0);
+ if (!luma_addr) {
+ DRM_ERROR("%s failed to get luma paddr\n", __func__);
+ return -EINVAL;
+ }
+
+ if (info->num_planes > 1) {
+ chroma_addr = drm_fb_cma_get_gem_addr(fb, plane->base.state, 1);
+ if (!chroma_addr) {
+ DRM_ERROR("failed to get chroma paddr\n");
+ return -EINVAL;
+ }
+ }
+ ret = xlnx_mix_mark_layer_active(plane);
+ if (ret)
+ return ret;
+
+ switch (layer_id) {
+ case XVMIX_LAYER_MASTER:
+ if (!plane->mixer_layer->hw_config.is_streaming)
+ xlnx_mix_mark_layer_inactive(plane);
+ if (mixer->drm_primary_layer == mixer->hw_master_layer) {
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+ ret = xlnx_mix_set_active_area(mixer_hw, src_w, src_h);
+ if (ret)
+ return ret;
+ xlnx_mix_layer_enable(mixer_hw, layer_id);
+
+ } else if (src_w != active_area_width ||
+ src_h != active_area_height) {
+ DRM_ERROR("Invalid dimensions for mixer layer 0.\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ ret = xlnx_mix_set_layer_dimensions(plane, crtc_x, crtc_y,
+ src_w, src_h, luma_stride);
+ if (ret)
+ break;
+ if (layer_id == mixer_hw->logo_layer_id) {
+ ret = xlnx_mix_update_logo_img(plane, luma_buffer,
+ src_w, src_h);
+ } else {
+ if (!plane->mixer_layer->hw_config.is_streaming)
+ ret = xlnx_mix_set_layer_buff_addr
+ (mixer_hw, plane->mixer_layer->id,
+ luma_addr, chroma_addr);
+ }
+ }
+ return ret;
+}
+
+/* mode set a plane */
+static int xlnx_mix_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, uint32_t src_y,
+ u32 src_w, uint32_t src_h)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ const struct drm_format_info *info = fb->format;
+ size_t i = 0;
+ dma_addr_t luma_paddr;
+ int ret;
+ u32 stride;
+
+ /* JPM TODO begin start of code to extract into prep-interleaved*/
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("h: %d(%d), v: %d(%d)\n", src_w, crtc_x, src_h, crtc_y);
+
+ /* We have multiple dma channels. Set each per video plane */
+ for (; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? info->hsub : 1);
+ unsigned int height = src_h / (i ? info->vsub : 1);
+
+ luma_paddr = drm_fb_cma_get_gem_addr(fb, base_plane->state, i);
+ if (!luma_paddr) {
+ DRM_ERROR("%s failed to get luma paddr\n", __func__);
+ return -EINVAL;
+ }
+
+ plane->dma[i].xt.numf = height;
+ plane->dma[i].sgl[0].size =
+ drm_format_plane_width_bytes(info, 0, width);
+ plane->dma[i].sgl[0].icg = fb->pitches[0] -
+ plane->dma[i].sgl[0].size;
+ plane->dma[i].xt.src_start = luma_paddr;
+ plane->dma[i].xt.frame_size = info->num_planes;
+ plane->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ plane->dma[i].xt.src_sgl = true;
+ plane->dma[i].xt.dst_sgl = false;
+ plane->dma[i].is_active = true;
+ }
+
+ for (; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ plane->dma[i].is_active = false;
+ /* Do we have a video format aware dma channel?
+ * If so, modify descriptor accordingly
+ */
+ if (plane->dma[0].chan && !plane->dma[1].chan && info->num_planes > 1) {
+ stride = plane->dma[0].sgl[0].size + plane->dma[0].sgl[0].icg;
+ plane->dma[0].sgl[0].src_icg = plane->dma[1].xt.src_start -
+ plane->dma[0].xt.src_start -
+ (plane->dma[0].xt.numf * stride);
+ }
+
+ ret = xlnx_mix_set_plane(plane, fb, crtc_x, crtc_y, src_x, src_y,
+ src_w, src_h);
+ return ret;
+}
+
+static int xlnx_mix_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ return 0;
+}
+
+static void xlnx_mix_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
+static int xlnx_mix_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int scale;
+ struct xlnx_mix_plane *mix_plane = to_xlnx_plane(plane);
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(mix_plane);
+ struct xlnx_mix *mix;
+
+ /* No check required for the drm_primary_plane */
+ mix = container_of(mixer_hw, struct xlnx_mix, mixer_hw);
+ if (mix->drm_primary_layer == mix_plane)
+ return 0;
+
+ scale = xlnx_mix_get_layer_scaling(mixer_hw,
+ mix_plane->mixer_layer->id);
+ if (is_window_valid(mixer_hw, state->crtc_x, state->crtc_y,
+ state->src_w >> 16, state->src_h >> 16, scale))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void xlnx_mix_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (old_state->fb &&
+ old_state->fb->format->format != plane->state->fb->format->format)
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+
+ ret = xlnx_mix_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret) {
+ DRM_ERROR("failed to mode-set a plane\n");
+ return;
+ }
+ /* apply the new fb addr */
+ xlnx_mix_plane_commit(plane);
+ /* make sure a plane is on */
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_ON);
+}
+
+static void xlnx_mix_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+}
+
+static int xlnx_mix_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void
+xlnx_mix_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
+
+ /* Update the current state with new configurations */
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ plane->state->crtc = new_state->crtc;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->state = new_state->state;
+
+ xlnx_mix_plane_atomic_update(plane, old_state);
+}
+
+static const struct drm_plane_helper_funcs xlnx_mix_plane_helper_funcs = {
+ .prepare_fb = xlnx_mix_plane_prepare_fb,
+ .cleanup_fb = xlnx_mix_plane_cleanup_fb,
+ .atomic_check = xlnx_mix_plane_atomic_check,
+ .atomic_update = xlnx_mix_plane_atomic_update,
+ .atomic_disable = xlnx_mix_plane_atomic_disable,
+ .atomic_async_check = xlnx_mix_plane_atomic_async_check,
+ .atomic_async_update = xlnx_mix_plane_atomic_async_update,
+};
+
+static int xlnx_mix_init_plane(struct xlnx_mix_plane *plane,
+ unsigned int poss_crtcs,
+ struct device_node *layer_node)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ char name[16];
+ enum drm_plane_type type;
+ int ret, i;
+
+ plane->dpms = DRM_MODE_DPMS_OFF;
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ snprintf(name, sizeof(name), "dma%d", i);
+ plane->dma[i].chan = of_dma_request_slave_channel(layer_node,
+ name);
+ if (PTR_ERR(plane->dma[i].chan) == -ENODEV) {
+ plane->dma[i].chan = NULL;
+ continue;
+ }
+ if (IS_ERR(plane->dma[i].chan)) {
+ DRM_ERROR("failed to request dma channel\n");
+ ret = PTR_ERR(plane->dma[i].chan);
+ plane->dma[i].chan = NULL;
+ goto err_dma;
+ }
+ }
+ if (!xlnx_mix_isfmt_support(plane->mixer_layer->hw_config.vid_fmt)) {
+ DRM_ERROR("DRM color format not supported by mixer\n");
+ ret = -ENODEV;
+ goto err_init;
+ }
+ plane->format = plane->mixer_layer->hw_config.vid_fmt;
+ if (plane == mixer->hw_logo_layer)
+ type = DRM_PLANE_TYPE_CURSOR;
+ if (plane == mixer->drm_primary_layer)
+ type = DRM_PLANE_TYPE_PRIMARY;
+
+ /* initialize drm plane */
+ ret = drm_universal_plane_init(mixer->drm, &plane->base,
+ poss_crtcs, &xlnx_mix_plane_funcs,
+ &plane->format,
+ 1, NULL, type, NULL);
+
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_init;
+ }
+ drm_plane_helper_add(&plane->base, &xlnx_mix_plane_helper_funcs);
+ of_node_put(layer_node);
+
+ return 0;
+
+err_init:
+ xlnx_mix_disp_layer_disable(plane);
+err_dma:
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+
+ of_node_put(layer_node);
+ return ret;
+}
+
+static int xlnx_mix_parse_dt_bg_video_fmt(struct device_node *node,
+ struct xlnx_mix_hw *mixer_hw)
+{
+ struct device_node *layer_node;
+ struct xlnx_mix_layer_data *layer;
+ const char *vformat;
+
+ layer_node = of_get_child_by_name(node, "layer_0");
+ layer = &mixer_hw->layer_data[XVMIX_MASTER_LAYER_IDX];
+
+ /* Set default values */
+ layer->hw_config.can_alpha = false;
+ layer->hw_config.can_scale = false;
+ layer->hw_config.min_width = XVMIX_LAYER_WIDTH_MIN;
+ layer->hw_config.min_height = XVMIX_LAYER_HEIGHT_MIN;
+
+ if (of_property_read_string(layer_node, "xlnx,vformat",
+ &vformat)) {
+ DRM_ERROR("No xlnx,vformat value for layer 0 in dts\n");
+ return -EINVAL;
+ }
+ strcpy((char *)&layer->hw_config.vid_fmt, vformat);
+ layer->hw_config.is_streaming =
+ of_property_read_bool(layer_node, "xlnx,layer-streaming");
+ if (of_property_read_u32(node, "xlnx,bpc", &mixer_hw->bg_layer_bpc)) {
+ DRM_ERROR("Failed to get bits per component (bpc) prop\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(layer_node, "xlnx,layer-max-width",
+ &layer->hw_config.max_width)) {
+ DRM_ERROR("Failed to get screen width prop\n");
+ return -EINVAL;
+ }
+ mixer_hw->max_layer_width = layer->hw_config.max_width;
+ if (of_property_read_u32(layer_node, "xlnx,layer-max-height",
+ &layer->hw_config.max_height)) {
+ DRM_ERROR("Failed to get screen height prop\n");
+ return -EINVAL;
+ }
+ mixer_hw->max_layer_height = layer->hw_config.max_height;
+ layer->id = XVMIX_LAYER_MASTER;
+
+ return 0;
+}
+
+static int xlnx_mix_parse_dt_logo_data(struct device_node *node,
+ struct xlnx_mix_hw *mixer_hw)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ struct device_node *logo_node;
+ u32 max_width, max_height;
+
+ logo_node = of_get_child_by_name(node, "logo");
+ if (!logo_node) {
+ DRM_ERROR("No logo node specified in device tree.\n");
+ return -EINVAL;
+ }
+
+ layer_data = &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+
+ /* set defaults for logo layer */
+ layer_data->hw_config.min_height = XVMIX_LOGO_LAYER_HEIGHT_MIN;
+ layer_data->hw_config.min_width = XVMIX_LOGO_LAYER_WIDTH_MIN;
+ layer_data->hw_config.is_streaming = false;
+ layer_data->hw_config.vid_fmt = DRM_FORMAT_RGB888;
+ layer_data->hw_config.can_alpha = true;
+ layer_data->hw_config.can_scale = true;
+ layer_data->layer_regs.buff_addr1 = 0;
+ layer_data->layer_regs.buff_addr2 = 0;
+ layer_data->id = mixer_hw->logo_layer_id;
+
+ if (of_property_read_u32(logo_node, "xlnx,logo-width", &max_width)) {
+ DRM_ERROR("Failed to get logo width prop\n");
+ return -EINVAL;
+ }
+ if (max_width > XVMIX_LOGO_LAYER_WIDTH_MAX ||
+ max_width < XVMIX_LOGO_LAYER_WIDTH_MIN) {
+ DRM_ERROR("Illegal mixer logo layer width.\n");
+ return -EINVAL;
+ }
+ layer_data->hw_config.max_width = max_width;
+ mixer_hw->max_logo_layer_width = layer_data->hw_config.max_width;
+
+ if (of_property_read_u32(logo_node, "xlnx,logo-height", &max_height)) {
+ DRM_ERROR("Failed to get logo height prop\n");
+ return -EINVAL;
+ }
+ if (max_height > XVMIX_LOGO_LAYER_HEIGHT_MAX ||
+ max_height < XVMIX_LOGO_LAYER_HEIGHT_MIN) {
+ DRM_ERROR("Illegal mixer logo layer height.\n");
+ return -EINVAL;
+ }
+ layer_data->hw_config.max_height = max_height;
+ mixer_hw->max_logo_layer_height = layer_data->hw_config.max_height;
+ mixer_hw->logo_pixel_alpha_enabled =
+ of_property_read_bool(logo_node, "xlnx,logo-pixel-alpha");
+ if (mixer_hw->logo_pixel_alpha_enabled)
+ layer_data->hw_config.vid_fmt = DRM_FORMAT_RGBA8888;
+
+ return 0;
+}
+
+static int xlnx_mix_dt_parse(struct device *dev, struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_plane *planes;
+ struct xlnx_mix_hw *mixer_hw;
+ struct device_node *node, *vtc_node;
+ struct xlnx_mix_layer_data *l_data;
+ struct resource res;
+ int ret, l_cnt, i;
+
+ node = dev->of_node;
+ mixer_hw = &mixer->mixer_hw;
+ mixer->dpms = DRM_MODE_DPMS_OFF;
+
+ mixer_hw->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mixer_hw->reset_gpio)) {
+ ret = PTR_ERR(mixer_hw->reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(dev, "No gpio probed for mixer. Deferring\n");
+ else
+ dev_err(dev, "No reset gpio info from dts for mixer\n");
+ return ret;
+ }
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 0);
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 1);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Invalid memory address for mixer %d\n", ret);
+ return ret;
+ }
+ /* Read in mandatory global dts properties */
+ mixer_hw->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(mixer_hw->base)) {
+ dev_err(dev, "Failed to map io mem space for mixer\n");
+ return PTR_ERR(mixer_hw->base);
+ }
+ if (of_device_is_compatible(dev->of_node, "xlnx,mixer-4.0")) {
+ mixer_hw->max_layers = 18;
+ mixer_hw->logo_en_mask = BIT(23);
+ mixer_hw->enable_all_mask = (GENMASK(16, 0) |
+ mixer_hw->logo_en_mask);
+ } else {
+ mixer_hw->max_layers = 10;
+ mixer_hw->logo_en_mask = BIT(15);
+ mixer_hw->enable_all_mask = (GENMASK(8, 0) |
+ mixer_hw->logo_en_mask);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-layers",
+ &mixer_hw->num_layers);
+ if (ret) {
+ dev_err(dev, "No xlnx,num-layers dts prop for mixer node\n");
+ return ret;
+ }
+ mixer_hw->logo_layer_id = mixer_hw->max_layers - 1;
+ if (mixer_hw->num_layers > mixer_hw->max_layers) {
+ dev_err(dev, "Num layer nodes in device tree > mixer max\n");
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &mixer_hw->dma_addr_size);
+ if (ret) {
+ dev_err(dev, "missing addr-width dts prop\n");
+ return ret;
+ }
+ if (mixer_hw->dma_addr_size != 32 && mixer_hw->dma_addr_size != 64) {
+ dev_err(dev, "invalid addr-width dts prop\n");
+ return -EINVAL;
+ }
+
+ /* VTC Bridge support */
+ vtc_node = of_parse_phandle(node, "xlnx,bridge", 0);
+ if (vtc_node) {
+ mixer->vtc_bridge = of_xlnx_bridge_get(vtc_node);
+ if (!mixer->vtc_bridge) {
+ dev_info(dev, "Didn't get vtc bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ } else {
+ dev_info(dev, "vtc bridge property not present\n");
+ }
+
+ mixer_hw->logo_layer_en = of_property_read_bool(node,
+ "xlnx,logo-layer");
+ l_cnt = mixer_hw->num_layers + (mixer_hw->logo_layer_en ? 1 : 0);
+ mixer_hw->layer_cnt = l_cnt;
+
+ l_data = devm_kzalloc(dev, sizeof(*l_data) * l_cnt, GFP_KERNEL);
+ if (!l_data)
+ return -ENOMEM;
+ mixer_hw->layer_data = l_data;
+ /* init DRM planes */
+ planes = devm_kzalloc(dev, sizeof(*planes) * l_cnt, GFP_KERNEL);
+ if (!planes)
+ return -ENOMEM;
+ mixer->planes = planes;
+ mixer->num_planes = l_cnt;
+ for (i = 0; i < mixer->num_planes; i++)
+ mixer->planes[i].mixer = mixer;
+
+ /* establish background layer video properties from dts */
+ ret = xlnx_mix_parse_dt_bg_video_fmt(node, mixer_hw);
+ if (ret)
+ return ret;
+ if (mixer_hw->logo_layer_en) {
+ /* read logo data from dts */
+ ret = xlnx_mix_parse_dt_logo_data(node, mixer_hw);
+ return ret;
+ }
+ return 0;
+}
+
+static int xlnx_mix_of_init_layer(struct device *dev, struct device_node *node,
+ char *name, struct xlnx_mix_layer_data *layer,
+ u32 max_width, struct xlnx_mix *mixer, int id)
+{
+ struct device_node *layer_node;
+ const char *vformat;
+ int ret;
+
+ layer_node = of_get_child_by_name(node, name);
+ if (!layer_node)
+ return -EINVAL;
+
+ /* Set default values */
+ layer->hw_config.can_alpha = false;
+ layer->hw_config.can_scale = false;
+ layer->hw_config.is_streaming = false;
+ layer->hw_config.max_width = max_width;
+ layer->hw_config.min_width = XVMIX_LAYER_WIDTH_MIN;
+ layer->hw_config.min_height = XVMIX_LAYER_HEIGHT_MIN;
+ layer->hw_config.vid_fmt = 0;
+ layer->id = 0;
+ mixer->planes[id].mixer_layer = layer;
+
+ ret = of_property_read_u32(layer_node, "xlnx,layer-id", &layer->id);
+ if (ret) {
+ dev_err(dev, "xlnx,layer-id property not found\n");
+ return ret;
+ }
+ if (layer->id < 1 || layer->id >= mixer->mixer_hw.max_layers) {
+ dev_err(dev, "Mixer layer id %u in dts is out of legal range\n",
+ layer->id);
+ return -EINVAL;
+ }
+ ret = of_property_read_string(layer_node, "xlnx,vformat", &vformat);
+ if (ret) {
+ dev_err(dev, "No mixer layer vformat in dts for layer id %d\n",
+ layer->id);
+ return ret;
+ }
+
+ strcpy((char *)&layer->hw_config.vid_fmt, vformat);
+ layer->hw_config.can_scale =
+ of_property_read_bool(layer_node, "xlnx,layer-scale");
+ if (layer->hw_config.can_scale) {
+ ret = of_property_read_u32(layer_node, "xlnx,layer-max-width",
+ &layer->hw_config.max_width);
+ if (ret) {
+ dev_err(dev, "Mixer layer %d dts missing width prop.\n",
+ layer->id);
+ return ret;
+ }
+
+ if (layer->hw_config.max_width > max_width) {
+ dev_err(dev, "Illlegal Mixer layer %d width %d\n",
+ layer->id, layer->hw_config.max_width);
+ return -EINVAL;
+ }
+ }
+ layer->hw_config.can_alpha =
+ of_property_read_bool(layer_node, "xlnx,layer-alpha");
+ layer->hw_config.is_streaming =
+ of_property_read_bool(layer_node, "xlnx,layer-streaming");
+ if (of_property_read_bool(layer_node, "xlnx,layer-primary")) {
+ if (mixer->drm_primary_layer) {
+ dev_err(dev,
+ "More than one primary layer in mixer dts\n");
+ return -EINVAL;
+ }
+ mixer->drm_primary_layer = &mixer->planes[id];
+ }
+ ret = xlnx_mix_init_plane(&mixer->planes[id], 1, layer_node);
+ if (ret)
+ dev_err(dev, "Unable to init drm mixer plane id = %u", id);
+
+ return ret;
+}
+
+static irqreturn_t xlnx_mix_intr_handler(int irq, void *data)
+{
+ struct xlnx_mix_hw *mixer = data;
+ u32 intr = xlnx_mix_get_intr_status(mixer);
+
+ if (!intr)
+ return IRQ_NONE;
+ if (mixer->intrpt_handler_fn)
+ mixer->intrpt_handler_fn(mixer->intrpt_data);
+ xlnx_mix_clear_intr_status(mixer, intr);
+
+ return IRQ_HANDLED;
+}
+
+static void xlnx_mix_create_plane_properties(struct xlnx_mix *mixer)
+{
+ mixer->scale_prop = drm_property_create_range(mixer->drm, 0, "scale",
+ XVMIX_SCALE_FACTOR_1X,
+ XVMIX_SCALE_FACTOR_4X);
+ mixer->alpha_prop = drm_property_create_range(mixer->drm, 0, "alpha",
+ XVMIX_ALPHA_MIN,
+ XVMIX_ALPHA_MAX);
+}
+
+static int xlnx_mix_plane_create(struct device *dev, struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct device_node *node, *layer_node;
+ char name[20];
+ struct xlnx_mix_layer_data *layer_data;
+ int ret, i;
+ int layer_idx;
+
+ node = dev->of_node;
+ mixer_hw = &mixer->mixer_hw;
+ xlnx_mix_create_plane_properties(mixer);
+
+ mixer->planes[XVMIX_MASTER_LAYER_IDX].mixer_layer =
+ &mixer_hw->layer_data[XVMIX_MASTER_LAYER_IDX];
+ mixer->planes[XVMIX_MASTER_LAYER_IDX].id = XVMIX_MASTER_LAYER_IDX;
+ mixer->hw_master_layer = &mixer->planes[XVMIX_MASTER_LAYER_IDX];
+
+ if (mixer_hw->logo_layer_en) {
+ mixer->planes[XVMIX_LOGO_LAYER_IDX].mixer_layer =
+ &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+ mixer->planes[XVMIX_LOGO_LAYER_IDX].id = XVMIX_LOGO_LAYER_IDX;
+ mixer->hw_logo_layer = &mixer->planes[XVMIX_LOGO_LAYER_IDX];
+ layer_node = of_get_child_by_name(node, "logo");
+ ret = xlnx_mix_init_plane(&mixer->planes[XVMIX_LOGO_LAYER_IDX],
+ 1, layer_node);
+ if (ret)
+ return ret;
+ }
+ layer_idx = mixer_hw->logo_layer_en ? 2 : 1;
+ for (i = 1; i < mixer_hw->num_layers; i++, layer_idx++) {
+ snprintf(name, sizeof(name), "layer_%d", i);
+ ret = xlnx_mix_of_init_layer(dev, node, name,
+ &mixer_hw->layer_data[layer_idx],
+ mixer_hw->max_layer_width,
+ mixer, layer_idx);
+ if (ret)
+ return ret;
+ }
+ /* If none of the overlay layers were designated as the drm
+ * primary layer, default to the mixer's video0 layer as drm primary
+ */
+ if (!mixer->drm_primary_layer)
+ mixer->drm_primary_layer = mixer->hw_master_layer;
+ layer_node = of_get_child_by_name(node, "layer_0");
+ ret = xlnx_mix_init_plane(&mixer->planes[XVMIX_MASTER_LAYER_IDX], 1,
+ layer_node);
+ /* request irq and obtain pixels-per-clock (ppc) property */
+ mixer_hw->irq = irq_of_parse_and_map(node, 0);
+ if (mixer_hw->irq > 0) {
+ ret = devm_request_irq(dev, mixer_hw->irq,
+ xlnx_mix_intr_handler,
+ IRQF_SHARED, "xlnx-mixer", mixer_hw);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ return ret;
+ }
+ }
+ ret = of_property_read_u32(node, "xlnx,ppc", &mixer_hw->ppc);
+ if (ret) {
+ dev_err(dev, "No xlnx,ppc property for mixer dts\n");
+ return ret;
+ }
+
+ mixer->max_width = XVMIX_DISP_MAX_WIDTH;
+ mixer->max_height = XVMIX_DISP_MAX_HEIGHT;
+ if (mixer->hw_logo_layer) {
+ layer_data = &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+ mixer->max_cursor_width = layer_data->hw_config.max_width;
+ mixer->max_cursor_height = layer_data->hw_config.max_height;
+ }
+ return 0;
+}
+
+/**
+ * xlnx_mix_plane_restore - Restore the plane states
+ * @mixer: mixer device core structure
+ *
+ * Restore the plane states to the default ones. Any state that needs to be
+ * restored should be here. This improves consistency as applications see
+ * the same default values, and removes mismatch between software and hardware
+ * values as software values are updated as hardware values are reset.
+ */
+static void xlnx_mix_plane_restore(struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_plane *plane;
+ unsigned int i;
+
+ if (!mixer)
+ return;
+ /*
+ * Reinitialize property default values as they get reset by DPMS OFF
+ * operation. User will read the correct default values later, and
+ * planes will be initialized with default values.
+ */
+ for (i = 0; i < mixer->num_planes; i++) {
+ plane = &mixer->planes[i];
+ if (!plane)
+ continue;
+ xlnx_mix_hw_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ }
+}
+
+/**
+ * xlnx_mix_set_bkg_col - Set background color
+ * @mixer: Mixer instance to program with new background color
+ * @rgb_value: RGB encoded as 32-bit integer in little-endian format
+ *
+ * Set the color to be output as background color when background stream layer
+ */
+static void xlnx_mix_set_bkg_col(struct xlnx_mix_hw *mixer, u64 rgb_value)
+{
+ u32 bg_bpc = mixer->bg_layer_bpc;
+ u32 bpc_mask_shift = XVMIX_MAX_BPC - bg_bpc;
+ u32 val_mask = (GENMASK(15, 0) >> bpc_mask_shift);
+ u16 b_val = (rgb_value >> (bg_bpc * 2)) & val_mask;
+ u16 g_val = (rgb_value >> bg_bpc) & val_mask;
+ u16 r_val = (rgb_value >> 0) & val_mask;
+
+ /* Set Background Color */
+ reg_writel(mixer->base, XVMIX_BACKGROUND_Y_R_DATA, r_val);
+ reg_writel(mixer->base, XVMIX_BACKGROUND_U_G_DATA, g_val);
+ reg_writel(mixer->base, XVMIX_BACKGROUND_V_B_DATA, b_val);
+ mixer->bg_color = rgb_value;
+}
+
+/**
+ * xlnx_mix_reset - Reset the mixer core video generator
+ * @mixer: Mixer core instance for which to start video output
+ *
+ * Toggle the reset gpio and restores the bg color, plane and interrupt mask.
+ */
+static void xlnx_mix_reset(struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_hw *mixer_hw = &mixer->mixer_hw;
+
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 0);
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 1);
+ /* restore layer properties and bg color after reset */
+ xlnx_mix_set_bkg_col(mixer_hw, mixer_hw->bg_color);
+ xlnx_mix_plane_restore(mixer);
+ xlnx_mix_intrpt_enable_done(&mixer->mixer_hw);
+}
+
+static void xlnx_mix_dpms(struct xlnx_mix *mixer, int dpms)
+{
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ xlnx_mix_start(&mixer->mixer_hw);
+ break;
+ default:
+ xlnx_mix_stop(&mixer->mixer_hw);
+ mdelay(50); /* let IP shut down */
+ xlnx_mix_reset(mixer);
+ }
+}
+
+/* set crtc dpms */
+static void xlnx_mix_crtc_dpms(struct drm_crtc *base_crtc, int dpms)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+ int ret;
+ struct videomode vm;
+ struct drm_display_mode *mode = &base_crtc->mode;
+
+ DRM_DEBUG_KMS("dpms: %d\n", dpms);
+ if (mixer->dpms == dpms)
+ return;
+ mixer->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (!mixer->pixel_clock_enabled) {
+ ret = clk_prepare_enable(mixer->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ mixer->pixel_clock_enabled = false;
+ }
+ }
+ mixer->pixel_clock_enabled = true;
+
+ if (mixer->vtc_bridge) {
+ drm_display_mode_to_videomode(mode, &vm);
+ xlnx_bridge_set_timing(mixer->vtc_bridge, &vm);
+ xlnx_bridge_enable(mixer->vtc_bridge);
+ }
+
+ xlnx_mix_dpms(mixer, dpms);
+ xlnx_mix_plane_dpms(base_crtc->primary, dpms);
+ break;
+ default:
+ xlnx_mix_plane_dpms(base_crtc->primary, dpms);
+ xlnx_mix_dpms(mixer, dpms);
+ xlnx_bridge_disable(mixer->vtc_bridge);
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+ break;
+ }
+}
+
+static void xlnx_mix_set_intr_handler(struct xlnx_mix *mixer,
+ void (*intr_handler_fn)(void *),
+ void *data)
+{
+ mixer->mixer_hw.intrpt_handler_fn = intr_handler_fn;
+ mixer->mixer_hw.intrpt_data = data;
+}
+
+static void xlnx_mix_crtc_vblank_handler(void *data)
+{
+ struct drm_crtc *base_crtc = data;
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(base_crtc);
+ /* Finish page flip */
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = mixer->event;
+ mixer->event = NULL;
+ if (event) {
+ drm_crtc_send_vblank_event(base_crtc, event);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static int xlnx_mix_crtc_enable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ xlnx_mix_set_intr_handler(mixer, xlnx_mix_crtc_vblank_handler,
+ base_crtc);
+ return 0;
+}
+
+static void xlnx_mix_crtc_disable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ mixer->mixer_hw.intrpt_handler_fn = NULL;
+ mixer->mixer_hw.intrpt_data = NULL;
+}
+
+static void xlnx_mix_crtc_destroy(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ /* make sure crtc is off */
+ mixer->alpha_prop = NULL;
+ mixer->scale_prop = NULL;
+ mixer->bg_color = NULL;
+ xlnx_mix_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+ drm_crtc_cleanup(base_crtc);
+}
+
+static int
+xlnx_mix_disp_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static int
+xlnx_mix_disp_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ return 0;
+}
+
+static struct drm_crtc_funcs xlnx_mix_crtc_funcs = {
+ .destroy = xlnx_mix_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_set_property = xlnx_mix_disp_crtc_atomic_set_property,
+ .atomic_get_property = xlnx_mix_disp_crtc_atomic_get_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = xlnx_mix_crtc_enable_vblank,
+ .disable_vblank = xlnx_mix_crtc_disable_vblank,
+};
+
+static void
+xlnx_mix_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int vrefresh;
+
+ xlnx_mix_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
+ /* Delay of 3 vblank interval for timing gen to be stable */
+ vrefresh = ((adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal));
+ msleep(3 * 1000 / vrefresh);
+}
+
+/**
+ * xlnx_mix_clear_event - Clear any event if pending
+ * @crtc: DRM crtc object
+ *
+ */
+static void xlnx_mix_clear_event(struct drm_crtc *crtc)
+{
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+}
+
+static void
+xlnx_mix_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ xlnx_mix_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ xlnx_mix_clear_event(crtc);
+}
+
+static void xlnx_mix_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+}
+
+static int xlnx_mix_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void
+xlnx_mix_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ /* Don't rely on vblank when disabling crtc */
+ if (crtc->state->event) {
+ struct xlnx_crtc *xcrtc = to_xlnx_crtc(crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(xcrtc);
+
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ mixer->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
+}
+
+static struct drm_crtc_helper_funcs xlnx_mix_crtc_helper_funcs = {
+ .atomic_enable = xlnx_mix_crtc_atomic_enable,
+ .atomic_disable = xlnx_mix_crtc_atomic_disable,
+ .mode_set_nofb = xlnx_mix_crtc_mode_set_nofb,
+ .atomic_check = xlnx_mix_crtc_atomic_check,
+ .atomic_begin = xlnx_mix_crtc_atomic_begin,
+};
+
+/**
+ * xlnx_mix_crtc_create - create crtc for mixer
+ * @mixer: xilinx video mixer object
+ *
+ * Return:
+ * Zero on success, error on failure
+ *
+ */
+static int xlnx_mix_crtc_create(struct xlnx_mix *mixer)
+{
+ struct xlnx_crtc *crtc;
+ struct drm_plane *primary_plane = NULL;
+ struct drm_plane *cursor_plane = NULL;
+ int ret, i;
+
+ crtc = &mixer->crtc;
+ primary_plane = &mixer->drm_primary_layer->base;
+ cursor_plane = &mixer->hw_logo_layer->base;
+
+ for (i = 0; i < mixer->num_planes; i++)
+ xlnx_mix_attach_plane_prop(&mixer->planes[i]);
+ mixer->pixel_clock = devm_clk_get(mixer->drm->dev, NULL);
+ if (IS_ERR(mixer->pixel_clock)) {
+ DRM_DEBUG_KMS("failed to get pixel clock\n");
+ mixer->pixel_clock = NULL;
+ }
+ ret = clk_prepare_enable(mixer->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ mixer->pixel_clock_enabled = false;
+ goto err_plane;
+ }
+ mixer->pixel_clock_enabled = true;
+ /* initialize drm crtc */
+ ret = drm_crtc_init_with_planes(mixer->drm, &crtc->crtc,
+ &mixer->drm_primary_layer->base,
+ &mixer->hw_logo_layer->base,
+ &xlnx_mix_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize mixer crtc\n");
+ goto err_pixel_clk;
+ }
+ drm_crtc_helper_add(&crtc->crtc, &xlnx_mix_crtc_helper_funcs);
+ crtc->get_max_width = &xlnx_mix_crtc_get_max_width;
+ crtc->get_max_height = &xlnx_mix_crtc_get_max_height;
+ crtc->get_align = &xlnx_mix_crtc_get_align;
+ crtc->get_format = &xlnx_mix_crtc_get_format;
+ crtc->get_cursor_height = &xlnx_mix_crtc_get_max_cursor_height;
+ crtc->get_cursor_width = &xlnx_mix_crtc_get_max_cursor_width;
+ xlnx_crtc_register(mixer->drm, crtc);
+
+ return 0;
+
+err_pixel_clk:
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+err_plane:
+ return ret;
+}
+
+/**
+ * xlnx_mix_init - Establishes a default power-on state for the mixer IP
+ * core
+ * @mixer: instance of IP core to initialize to a default state
+ *
+ * Background layer initialized to maximum height and width settings based on
+ * device tree properties and all overlay layers set to minimum height and width
+ * sizes and positioned to 0,0 in the crtc. All layers are inactive (resulting
+ * in video output being generated by the background color generator).
+ * Interrupts are disabled and the IP is started (with auto-restart enabled).
+ */
+static void xlnx_mix_init(struct xlnx_mix_hw *mixer)
+{
+ u32 i;
+ u32 bg_bpc = mixer->bg_layer_bpc;
+ u64 rgb_bg_clr = (0xFFFF >> (XVMIX_MAX_BPC - bg_bpc)) << (bg_bpc * 2);
+ enum xlnx_mix_layer_id layer_id;
+ struct xlnx_mix_layer_data *layer_data;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+ xlnx_mix_layer_disable(mixer, mixer->max_layers);
+ xlnx_mix_set_active_area(mixer, layer_data->hw_config.max_width,
+ layer_data->hw_config.max_height);
+ /* default to blue */
+ xlnx_mix_set_bkg_col(mixer, rgb_bg_clr);
+
+ for (i = 0; i < mixer->layer_cnt; i++) {
+ layer_id = mixer->layer_data[i].id;
+ layer_data = &mixer->layer_data[i];
+ if (layer_id == XVMIX_LAYER_MASTER)
+ continue;
+ xlnx_mix_set_layer_window(mixer, layer_id, 0, 0,
+ XVMIX_LAYER_WIDTH_MIN,
+ XVMIX_LAYER_HEIGHT_MIN, 0);
+ if (layer_data->hw_config.can_scale)
+ xlnx_mix_set_layer_scaling(mixer, layer_id, 0);
+ if (layer_data->hw_config.can_alpha)
+ xlnx_mix_set_layer_alpha(mixer, layer_id,
+ XVMIX_ALPHA_MAX);
+ }
+ xlnx_mix_intrpt_enable_done(mixer);
+}
+
+static int xlnx_mix_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_mix *mixer = dev_get_drvdata(dev);
+ struct drm_device *drm = data;
+ u32 ret;
+
+ mixer->drm = drm;
+ ret = xlnx_mix_plane_create(dev, mixer);
+ if (ret)
+ return ret;
+ ret = xlnx_mix_crtc_create(mixer);
+ if (ret)
+ return ret;
+ xlnx_mix_init(&mixer->mixer_hw);
+
+ return ret;
+}
+
+static void xlnx_mix_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_mix *mixer = dev_get_drvdata(dev);
+
+ dev_set_drvdata(dev, NULL);
+ xlnx_mix_intrpt_disable(&mixer->mixer_hw);
+ xlnx_crtc_unregister(mixer->drm, &mixer->crtc);
+}
+
+static const struct component_ops xlnx_mix_component_ops = {
+ .bind = xlnx_mix_bind,
+ .unbind = xlnx_mix_unbind,
+};
+
+static int xlnx_mix_probe(struct platform_device *pdev)
+{
+ struct xlnx_mix *mixer;
+ int ret;
+
+ mixer = devm_kzalloc(&pdev->dev, sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return -ENOMEM;
+
+ /* Sub-driver will access mixer from drvdata */
+ platform_set_drvdata(pdev, mixer);
+ ret = xlnx_mix_dt_parse(&pdev->dev, mixer);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to probe mixer\n");
+ return ret;
+ }
+
+ ret = component_add(&pdev->dev, &xlnx_mix_component_ops);
+ if (ret)
+ goto err;
+
+ mixer->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(mixer->master)) {
+ dev_err(&pdev->dev, "Failed to initialize the drm pipeline\n");
+ goto err_component;
+ }
+
+ dev_info(&pdev->dev, "Xilinx Mixer driver probed success\n");
+ return ret;
+
+err_component:
+ component_del(&pdev->dev, &xlnx_mix_component_ops);
+err:
+ return ret;
+}
+
+static int xlnx_mix_remove(struct platform_device *pdev)
+{
+ struct xlnx_mix *mixer = platform_get_drvdata(pdev);
+
+ if (mixer->vtc_bridge)
+ of_xlnx_bridge_put(mixer->vtc_bridge);
+ xlnx_drm_pipeline_exit(mixer->master);
+ component_del(&pdev->dev, &xlnx_mix_component_ops);
+ return 0;
+}
+
+/*
+ * TODO:
+ * In Mixer IP core version 4.0, layer enable bits and logo layer offsets
+ * have been changed. To provide backward compatibility number of max layers
+ * field has been taken to differentiate IP versions.
+ * This logic will have to be changed properly using the IP core version.
+ */
+
+static const struct of_device_id xlnx_mix_of_match[] = {
+ { .compatible = "xlnx,mixer-3.0", },
+ { .compatible = "xlnx,mixer-4.0", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xlnx_mix_of_match);
+
+static struct platform_driver xlnx_mix_driver = {
+ .probe = xlnx_mix_probe,
+ .remove = xlnx_mix_remove,
+ .driver = {
+ .name = "xlnx-mixer",
+ .of_match_table = xlnx_mix_of_match,
+ },
+};
+
+module_platform_driver(xlnx_mix_driver);
+
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_DESCRIPTION("Xilinx Mixer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_pl_disp.c b/drivers/gpu/drm/xlnx/xlnx_pl_disp.c
new file mode 100644
index 000000000000..a4de9b31a717
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_pl_disp.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM CRTC DMA engine driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author : Saurabh Sengar <saurabhs@xilinx.com>
+ * : Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * This driver intends to support the display pipeline with DMA engine
+ * driver by initializing DRM crtc and plane objects. The driver makes
+ * an assumption that it's single plane pipeline, as multi-plane pipeline
+ * would require programing beyond the DMA engine interface.
+ */
+
+/**
+ * struct xlnx_dma_chan - struct for DMA engine
+ * @dma_chan: DMA channel
+ * @xt: Interleaved desc config container
+ * @sgl: Data chunk for dma_interleaved_template
+ */
+struct xlnx_dma_chan {
+ struct dma_chan *dma_chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+/**
+ * struct xlnx_pl_disp - struct for display subsystem
+ * @dev: device structure
+ * @master: logical master device from xlnx drm
+ * @xlnx_crtc: Xilinx DRM driver crtc object
+ * @plane: base drm plane object
+ * @chan: struct for DMA engine
+ * @event: vblank pending event
+ * @callback: callback for registering DMA callback function
+ * @callback_param: parameter for passing to DMA callback function
+ * @drm: core drm object
+ * @fmt: drm color format
+ * @vtc_bridge: vtc_bridge structure
+ * @fid: field id
+ */
+struct xlnx_pl_disp {
+ struct device *dev;
+ struct platform_device *master;
+ struct xlnx_crtc xlnx_crtc;
+ struct drm_plane plane;
+ struct xlnx_dma_chan *chan;
+ struct drm_pending_vblank_event *event;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ struct drm_device *drm;
+ u32 fmt;
+ struct xlnx_bridge *vtc_bridge;
+ u32 fid;
+};
+
+/*
+ * Xlnx crtc functions
+ */
+static inline struct xlnx_pl_disp *crtc_to_dma(struct xlnx_crtc *xlnx_crtc)
+{
+ return container_of(xlnx_crtc, struct xlnx_pl_disp, xlnx_crtc);
+}
+
+/**
+ * xlnx_pl_disp_complete - vblank handler
+ * @param: parameter to vblank handler
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object.
+ */
+static void xlnx_pl_disp_complete(void *param)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = param;
+ struct drm_device *drm = xlnx_pl_disp->drm;
+
+ drm_handle_vblank(drm, 0);
+}
+
+/**
+ * xlnx_pl_disp_get_format - Get the current display pipeline format
+ * @xlnx_crtc: xlnx crtc object
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+static uint32_t xlnx_pl_disp_get_format(struct xlnx_crtc *xlnx_crtc)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ return xlnx_pl_disp->fmt;
+}
+
+/**
+ * xlnx_pl_disp_get_align - Get the alignment value for pitch
+ * @xlnx_crtc: xlnx crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+static unsigned int xlnx_pl_disp_get_align(struct xlnx_crtc *xlnx_crtc)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ return 1 << xlnx_pl_disp->chan->dma_chan->device->copy_align;
+}
+
+/*
+ * DRM plane functions
+ */
+static inline struct xlnx_pl_disp *plane_to_dma(struct drm_plane *plane)
+{
+ return container_of(plane, struct xlnx_pl_disp, plane);
+}
+
+/**
+ * xlnx_pl_disp_plane_disable - Disables DRM plane
+ * @plane: DRM plane object
+ *
+ * Disable the DRM plane, by stopping the corrosponding DMA
+ */
+static void xlnx_pl_disp_plane_disable(struct drm_plane *plane)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ dmaengine_terminate_sync(xlnx_dma_chan->dma_chan);
+}
+
+/**
+ * xlnx_pl_disp_plane_enable - Enables DRM plane
+ * @plane: DRM plane object
+ *
+ * Enable the DRM plane, by enabling the corresponding DMA
+ */
+static void xlnx_pl_disp_plane_enable(struct drm_plane *plane)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+ struct dma_chan *dma_chan = xlnx_dma_chan->dma_chan;
+ struct dma_interleaved_template *xt = &xlnx_dma_chan->xt;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma_chan, xt, flags);
+ if (!desc) {
+ dev_err(xlnx_pl_disp->dev,
+ "failed to prepare DMA descriptor\n");
+ return;
+ }
+ desc->callback = xlnx_pl_disp->callback;
+ desc->callback_param = xlnx_pl_disp->callback_param;
+ xilinx_xdma_set_earlycb(xlnx_dma_chan->dma_chan, desc, EARLY_CALLBACK);
+
+ if (plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_TOP ||
+ plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_BOTTOM) {
+ if (plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_TOP)
+ xlnx_pl_disp->fid = 1;
+ else
+ xlnx_pl_disp->fid = 0;
+
+ xilinx_xdma_set_fid(xlnx_dma_chan->dma_chan, desc,
+ xlnx_pl_disp->fid);
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(xlnx_dma_chan->dma_chan);
+}
+
+static void xlnx_pl_disp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ xlnx_pl_disp_plane_disable(plane);
+}
+
+static int xlnx_pl_disp_plane_mode_set(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, uint32_t src_y,
+ u32 src_w, uint32_t src_h)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ const struct drm_format_info *info = fb->format;
+ dma_addr_t luma_paddr, chroma_paddr;
+ size_t stride;
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ if (info->num_planes > 2) {
+ dev_err(xlnx_pl_disp->dev, "Color format not supported\n");
+ return -EINVAL;
+ }
+ luma_paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
+ if (!luma_paddr) {
+ dev_err(xlnx_pl_disp->dev, "failed to get luma paddr\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(xlnx_pl_disp->dev, "num planes = %d\n", info->num_planes);
+ xlnx_dma_chan->xt.numf = src_h;
+ xlnx_dma_chan->sgl[0].size = drm_format_plane_width_bytes(info,
+ 0, src_w);
+ xlnx_dma_chan->sgl[0].icg = fb->pitches[0] - xlnx_dma_chan->sgl[0].size;
+ xlnx_dma_chan->xt.src_start = luma_paddr;
+ xlnx_dma_chan->xt.frame_size = info->num_planes;
+ xlnx_dma_chan->xt.dir = DMA_MEM_TO_DEV;
+ xlnx_dma_chan->xt.src_sgl = true;
+ xlnx_dma_chan->xt.dst_sgl = false;
+
+ /* Do we have a video format aware dma channel?
+ * so, modify descriptor accordingly. Hueristic test:
+ * we have a multi-plane format but only one dma channel
+ */
+ if (info->num_planes > 1) {
+ chroma_paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 1);
+ if (!chroma_paddr) {
+ dev_err(xlnx_pl_disp->dev,
+ "failed to get chroma paddr\n");
+ return -EINVAL;
+ }
+ stride = xlnx_dma_chan->sgl[0].size +
+ xlnx_dma_chan->sgl[0].icg;
+ xlnx_dma_chan->sgl[0].src_icg = chroma_paddr -
+ xlnx_dma_chan->xt.src_start -
+ (xlnx_dma_chan->xt.numf * stride);
+ }
+
+ return 0;
+}
+
+static void xlnx_pl_disp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+
+ ret = xlnx_pl_disp_plane_mode_set(plane,
+ plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret) {
+ dev_err(xlnx_pl_disp->dev, "failed to mode set a plane\n");
+ return;
+ }
+ /* in case frame buffer is used set the color format */
+ xilinx_xdma_drm_config(xlnx_pl_disp->chan->dma_chan,
+ xlnx_pl_disp->plane.state->fb->format->format);
+ /* apply the new fb addr and enable */
+ xlnx_pl_disp_plane_enable(plane);
+}
+
+static int
+xlnx_pl_disp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_atomic_state *state = new_plane_state->state;
+ const struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
+ const struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ /* plane must be enabled when state is active */
+ if (new_crtc_state->active && !new_plane_state->crtc)
+ return -EINVAL;
+
+ /*
+ * This check is required to call modeset if there is a change in color
+ * format
+ */
+ if (new_plane_state->fb && old_plane_state->fb &&
+ new_plane_state->fb->format->format !=
+ old_plane_state->fb->format->format)
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs xlnx_pl_disp_plane_helper_funcs = {
+ .atomic_update = xlnx_pl_disp_plane_atomic_update,
+ .atomic_disable = xlnx_pl_disp_plane_atomic_disable,
+ .atomic_check = xlnx_pl_disp_plane_atomic_check,
+};
+
+static struct drm_plane_funcs xlnx_pl_disp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct xlnx_pl_disp *drm_crtc_to_dma(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+
+ return crtc_to_dma(xlnx_crtc);
+}
+
+static void xlnx_pl_disp_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_on(crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void xlnx_pl_disp_clear_event(struct drm_crtc *crtc)
+{
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+}
+
+static void xlnx_pl_disp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int vrefresh;
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+ struct videomode vm;
+
+ if (xlnx_pl_disp->vtc_bridge) {
+ /* set video timing */
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+ xlnx_bridge_set_timing(xlnx_pl_disp->vtc_bridge, &vm);
+ xlnx_bridge_enable(xlnx_pl_disp->vtc_bridge);
+ }
+
+ xlnx_pl_disp_plane_enable(crtc->primary);
+
+ /* Delay of 1 vblank interval for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(1 * 1000 / vrefresh);
+}
+
+static void xlnx_pl_disp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ xlnx_pl_disp_plane_disable(crtc->primary);
+ xlnx_pl_disp_clear_event(crtc);
+ drm_crtc_vblank_off(crtc);
+ xlnx_bridge_disable(xlnx_pl_disp->vtc_bridge);
+}
+
+static int xlnx_pl_disp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static struct drm_crtc_helper_funcs xlnx_pl_disp_crtc_helper_funcs = {
+ .atomic_enable = xlnx_pl_disp_crtc_atomic_enable,
+ .atomic_disable = xlnx_pl_disp_crtc_atomic_disable,
+ .atomic_check = xlnx_pl_disp_crtc_atomic_check,
+ .atomic_begin = xlnx_pl_disp_crtc_atomic_begin,
+};
+
+static void xlnx_pl_disp_crtc_destroy(struct drm_crtc *crtc)
+{
+ xlnx_pl_disp_plane_disable(crtc->primary);
+ drm_crtc_cleanup(crtc);
+}
+
+static int xlnx_pl_disp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ /*
+ * Use the complete callback for vblank event assuming the dma engine
+ * starts on the next descriptor upon this event. This may not be safe
+ * assumption for some dma engines.
+ */
+ xlnx_pl_disp->callback = xlnx_pl_disp_complete;
+ xlnx_pl_disp->callback_param = xlnx_pl_disp;
+
+ return 0;
+}
+
+static void xlnx_pl_disp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ xlnx_pl_disp->callback = NULL;
+ xlnx_pl_disp->callback_param = NULL;
+}
+
+static struct drm_crtc_funcs xlnx_pl_disp_crtc_funcs = {
+ .destroy = xlnx_pl_disp_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = xlnx_pl_disp_crtc_enable_vblank,
+ .disable_vblank = xlnx_pl_disp_crtc_disable_vblank,
+};
+
+static int xlnx_pl_disp_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct xlnx_pl_disp *xlnx_pl_disp = dev_get_drvdata(dev);
+ int ret;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+
+ /* in case of fb IP query the supported formats and there count */
+ xilinx_xdma_get_drm_vid_fmts(xlnx_pl_disp->chan->dma_chan,
+ &num_fmts, &fmts);
+ ret = drm_universal_plane_init(drm, &xlnx_pl_disp->plane, 0,
+ &xlnx_pl_disp_plane_funcs,
+ fmts ? fmts : &xlnx_pl_disp->fmt,
+ num_fmts ? num_fmts : 1,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(&xlnx_pl_disp->plane,
+ &xlnx_pl_disp_plane_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &xlnx_pl_disp->xlnx_crtc.crtc,
+ &xlnx_pl_disp->plane, NULL,
+ &xlnx_pl_disp_crtc_funcs, NULL);
+ if (ret) {
+ drm_plane_cleanup(&xlnx_pl_disp->plane);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&xlnx_pl_disp->xlnx_crtc.crtc,
+ &xlnx_pl_disp_crtc_helper_funcs);
+ xlnx_pl_disp->xlnx_crtc.get_format = &xlnx_pl_disp_get_format;
+ xlnx_pl_disp->xlnx_crtc.get_align = &xlnx_pl_disp_get_align;
+ xlnx_pl_disp->drm = drm;
+ xlnx_crtc_register(xlnx_pl_disp->drm, &xlnx_pl_disp->xlnx_crtc);
+
+ return 0;
+}
+
+static void xlnx_pl_disp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = dev_get_drvdata(dev);
+
+ drm_plane_cleanup(&xlnx_pl_disp->plane);
+ drm_crtc_cleanup(&xlnx_pl_disp->xlnx_crtc.crtc);
+}
+
+static const struct component_ops xlnx_pl_disp_component_ops = {
+ .bind = xlnx_pl_disp_bind,
+ .unbind = xlnx_pl_disp_unbind,
+};
+
+static int xlnx_pl_disp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vtc_node;
+ struct xlnx_pl_disp *xlnx_pl_disp;
+ int ret;
+ const char *vformat;
+ struct dma_chan *dma_chan;
+ struct xlnx_dma_chan *xlnx_dma_chan;
+
+ xlnx_pl_disp = devm_kzalloc(dev, sizeof(*xlnx_pl_disp), GFP_KERNEL);
+ if (!xlnx_pl_disp)
+ return -ENOMEM;
+
+ dma_chan = of_dma_request_slave_channel(dev->of_node, "dma0");
+ if (IS_ERR_OR_NULL(dma_chan)) {
+ dev_err(dev, "failed to request dma channel\n");
+ return PTR_ERR(dma_chan);
+ }
+
+ xlnx_dma_chan = devm_kzalloc(dev, sizeof(*xlnx_dma_chan), GFP_KERNEL);
+ if (!xlnx_dma_chan)
+ return -ENOMEM;
+
+ xlnx_dma_chan->dma_chan = dma_chan;
+ xlnx_pl_disp->chan = xlnx_dma_chan;
+ ret = of_property_read_string(dev->of_node, "xlnx,vformat", &vformat);
+ if (ret) {
+ dev_err(dev, "No xlnx,vformat value in dts\n");
+ goto err_dma;
+ }
+
+ strcpy((char *)&xlnx_pl_disp->fmt, vformat);
+
+ /* VTC Bridge support */
+ vtc_node = of_parse_phandle(dev->of_node, "xlnx,bridge", 0);
+ if (vtc_node) {
+ xlnx_pl_disp->vtc_bridge = of_xlnx_bridge_get(vtc_node);
+ if (!xlnx_pl_disp->vtc_bridge) {
+ dev_info(dev, "Didn't get vtc bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ } else {
+ dev_info(dev, "vtc bridge property not present\n");
+ }
+
+ xlnx_pl_disp->dev = dev;
+ platform_set_drvdata(pdev, xlnx_pl_disp);
+
+ ret = component_add(dev, &xlnx_pl_disp_component_ops);
+ if (ret)
+ goto err_dma;
+
+ xlnx_pl_disp->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(xlnx_pl_disp->master)) {
+ ret = PTR_ERR(xlnx_pl_disp->master);
+ dev_err(dev, "failed to initialize the drm pipeline\n");
+ goto err_component;
+ }
+
+ dev_info(&pdev->dev, "Xlnx PL display driver probed\n");
+
+ return 0;
+
+err_component:
+ component_del(dev, &xlnx_pl_disp_component_ops);
+err_dma:
+ dma_release_channel(xlnx_pl_disp->chan->dma_chan);
+
+ return ret;
+}
+
+static int xlnx_pl_disp_remove(struct platform_device *pdev)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = platform_get_drvdata(pdev);
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ of_xlnx_bridge_put(xlnx_pl_disp->vtc_bridge);
+ xlnx_drm_pipeline_exit(xlnx_pl_disp->master);
+ component_del(&pdev->dev, &xlnx_pl_disp_component_ops);
+
+ /* Make sure the channel is terminated before release */
+ dmaengine_terminate_sync(xlnx_dma_chan->dma_chan);
+ dma_release_channel(xlnx_dma_chan->dma_chan);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_pl_disp_of_match[] = {
+ { .compatible = "xlnx,pl-disp"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_pl_disp_of_match);
+
+static struct platform_driver xlnx_pl_disp_driver = {
+ .probe = xlnx_pl_disp_probe,
+ .remove = xlnx_pl_disp_remove,
+ .driver = {
+ .name = "xlnx-pl-disp",
+ .of_match_table = xlnx_pl_disp_of_match,
+ },
+};
+
+module_platform_driver(xlnx_pl_disp_driver);
+
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_DESCRIPTION("Xilinx DRM Display Driver for PL IPs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_scaler.c b/drivers/gpu/drm/xlnx/xlnx_scaler.c
new file mode 100644
index 000000000000..f239d33c7182
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_scaler.c
@@ -0,0 +1,1789 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPSS SCALER DRM bridge driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar rao G <vgannava@xilinx.com>
+ * Rohit Athavale <rathavale@xilinx.com>
+ */
+
+/*
+ * Overview:
+ * This experimentatl driver works as a bridge driver and
+ * reused the code from V4L2.
+ * TODO:
+ * Need to implement in a modular approach to share driver code between
+ * V4L2 and DRM frameworks.
+ * Should be integrated with plane.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/media-bus-format.h>
+
+#include "xlnx_bridge.h"
+
+#define XSCALER_MAX_WIDTH (3840)
+#define XSCALER_MAX_HEIGHT (2160)
+#define XSCALER_MAX_PHASES (64)
+#define XSCALER_MIN_WIDTH (64)
+#define XSCALER_MIN_HEIGHT (64)
+
+/* Video subsytems block offset */
+#define S_AXIS_RESET_OFF (0x00010000)
+#define V_HSCALER_OFF (0x00000000)
+#define V_VSCALER_OFF (0x00020000)
+
+/* HW Reset Network GPIO Channel */
+#define XGPIO_CH_RESET_SEL (1)
+#define XGPIO_RESET_MASK_VIDEO_IN BIT(0)
+#define XGPIO_RESET_MASK_IP_AXIS BIT(1)
+#define XGPIO_RESET_MASK_ALL_BLOCKS (XGPIO_RESET_MASK_VIDEO_IN | \
+ XGPIO_RESET_MASK_IP_AXIS)
+#define XGPIO_DATA_OFFSET (0x0)
+#define XGPIO_DATA2_OFFSET (0x8)
+#define XGPIO_TRI2_OFFSET (0xc)
+
+#define XGPIO_ISR_OFFSET (0x120)
+#define XGPIO_IER_OFFSET (0x128)
+#define XGPIO_CHAN_OFFSET (8)
+#define STEP_PRECISION (65536)
+
+/* SCALER POWER MACROS */
+#define XSCALER_RESET_ASSERT (0x1)
+#define XSCALER_RESET_DEASSERT (0x0)
+
+/* Video IP PPC */
+#define XSCALER_PPC_1 (1)
+#define XSCALER_PPC_2 (2)
+
+#define XV_HSCALER_MAX_H_TAPS (12)
+#define XV_HSCALER_MAX_H_PHASES (64)
+#define XV_HSCALER_MAX_LINE_WIDTH (3840)
+#define XV_VSCALER_MAX_V_TAPS (12)
+#define XV_VSCALER_MAX_V_PHASES (64)
+
+#define XV_HSCALER_TAPS_2 (2)
+#define XV_HSCALER_TAPS_4 (4)
+#define XV_HSCALER_TAPS_6 (6)
+#define XV_HSCALER_TAPS_8 (8)
+#define XV_HSCALER_TAPS_10 (10)
+#define XV_HSCALER_TAPS_12 (12)
+#define XV_VSCALER_TAPS_2 (2)
+#define XV_VSCALER_TAPS_4 (4)
+#define XV_VSCALER_TAPS_6 (6)
+#define XV_VSCALER_TAPS_8 (8)
+#define XV_VSCALER_TAPS_10 (10)
+#define XV_VSCALER_TAPS_12 (12)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XHSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XHSC_MASK_HIGH_16BITS GENMASK(31, 16)
+#define XHSC_MASK_LOW_32BITS GENMASK(31, 0)
+#define XHSC_STEP_PRECISION_SHIFT (16)
+#define XHSC_HPHASE_SHIFT_BY_6 (6)
+#define XHSC_HPHASE_MULTIPLIER (9)
+#define XSCALER_BITSHIFT_16 (16)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVSC_MASK_HIGH_16BITS GENMASK(31, 16)
+
+/* Scaler AP Control Registers */
+#define XSCALER_START BIT(0)
+#define XSCALER_AUTO_RESTART BIT(7)
+#define XSCALER_STREAM_ON (XSCALER_START | XSCALER_AUTO_RESTART)
+
+/* H-scaler registers */
+#define XV_HSCALER_CTRL_ADDR_AP_CTRL (0x0000)
+#define XV_HSCALER_CTRL_ADDR_GIE (0x0004)
+#define XV_HSCALER_CTRL_ADDR_IER (0x0008)
+#define XV_HSCALER_CTRL_ADDR_ISR (0x000c)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA (0x0010)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA (0x0018)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA (0x0020)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x0028)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA (0x0030)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA (0X0038)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE (0x0800)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_HIGH (0x0bff)
+
+#define XV_HSCALER_CTRL_WIDTH_HWREG_HFLTCOEFF (16)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_HFLTCOEFF (384)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE (0x2000)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_HIGH (0x3fff)
+#define XV_HSCALER_CTRL_WIDTH_HWREG_PHASESH_V (18)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_PHASESH_V (1920)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASEH_FIX (0x4000)
+
+/* H-scaler masks */
+#define XV_HSCALER_PHASESH_V_OUTPUT_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XV_VSCALER_CTRL_ADDR_AP_CTRL (0x000)
+#define XV_VSCALER_CTRL_ADDR_GIE (0x004)
+#define XV_VSCALER_CTRL_ADDR_IER (0x008)
+#define XV_VSCALER_CTRL_ADDR_ISR (0x00c)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA (0x010)
+#define XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA (0x018)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA (0x020)
+#define XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA (0x028)
+#define XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x030)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE (0x800)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_HIGH (0xbff)
+
+/* H-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xhsc_coeff_taps6[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xhsc_coeff_taps8[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xhsc_coeff_taps10[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xhsc_coeff_taps12[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+/* V-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xvsc_coeff_taps6[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_6] = {
+ {-132, 236, 3824, 236, -132, 64, },
+ {-116, 184, 3816, 292, -144, 64, },
+ {-100, 132, 3812, 348, -160, 64, },
+ {-88, 84, 3808, 404, -176, 64, },
+ {-72, 36, 3796, 464, -192, 64, },
+ {-60, -8, 3780, 524, -208, 68, },
+ {-48, -52, 3768, 588, -228, 68, },
+ {-32, -96, 3748, 652, -244, 68, },
+ {-20, -136, 3724, 716, -260, 72, },
+ {-8, -172, 3696, 784, -276, 72, },
+ {0, -208, 3676, 848, -292, 72, },
+ {12, -244, 3640, 920, -308, 76, },
+ {20, -276, 3612, 988, -324, 76, },
+ {32, -304, 3568, 1060, -340, 80, },
+ {40, -332, 3532, 1132, -356, 80, },
+ {48, -360, 3492, 1204, -372, 84, },
+ {56, -384, 3448, 1276, -388, 88, },
+ {64, -408, 3404, 1352, -404, 88, },
+ {72, -428, 3348, 1428, -416, 92, },
+ {76, -448, 3308, 1500, -432, 92, },
+ {84, -464, 3248, 1576, -444, 96, },
+ {88, -480, 3200, 1652, -460, 96, },
+ {92, -492, 3140, 1728, -472, 100, },
+ {96, -504, 3080, 1804, -484, 104, },
+ {100, -516, 3020, 1880, -492, 104, },
+ {104, -524, 2956, 1960, -504, 104, },
+ {104, -532, 2892, 2036, -512, 108, },
+ {108, -540, 2832, 2108, -520, 108, },
+ {108, -544, 2764, 2184, -528, 112, },
+ {112, -544, 2688, 2260, -532, 112, },
+ {112, -548, 2624, 2336, -540, 112, },
+ {112, -548, 2556, 2408, -544, 112, },
+ {112, -544, 2480, 2480, -544, 112, },
+ {112, -544, 2408, 2556, -548, 112, },
+ {112, -540, 2336, 2624, -548, 112, },
+ {112, -532, 2260, 2688, -544, 112, },
+ {112, -528, 2184, 2764, -544, 108, },
+ {108, -520, 2108, 2832, -540, 108, },
+ {108, -512, 2036, 2892, -532, 104, },
+ {104, -504, 1960, 2956, -524, 104, },
+ {104, -492, 1880, 3020, -516, 100, },
+ {104, -484, 1804, 3080, -504, 96, },
+ {100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xvsc_coeff_taps8[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xvsc_coeff_taps10[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xvsc_coeff_taps12[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+enum xilinx_scaler_vid_reg_fmts {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+static const u32 xilinx_scaler_video_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYYUYY8_1X24,
+};
+
+/* This bit is for xscaler feature flag */
+#define XSCALER_HPHASE_FIX BIT(0)
+
+/**
+ * struct xscaler_feature - dt or IP property structure
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xscaler_feature {
+ u32 flags;
+};
+
+/**
+ * struct xilinx_scaler - Core configuration of scaler device structure
+ * @base: pointer to register base address
+ * @dev: device structure
+ * @bridge: xilinx bridge
+ * @width_in: input width
+ * @height_in: input height
+ * @width_out: output width
+ * @height_out: output height
+ * @fmt_in: input format
+ * @fmt_out: output format
+ * @num_hori_taps: number of horizontal taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @pix_per_clk: Pixels per Clock cycle the IP operates upon
+ * @max_pixels: The maximum number of pixels that the H-scaler examines
+ * @max_lines: The maximum number of lines that the V-scaler examines
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @hscaler_coeff: The complete array of H-scaler coefficients
+ * @vscaler_coeff: The complete array of V-scaler coefficients
+ * @is_polyphase: Track if scaling algorithm is polyphase or not
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @ctrl_clk: AXI Lite clock
+ * @axis_clk: Video Clock
+ * @cfg: Pointer to scaler config structure
+ */
+struct xilinx_scaler {
+ void __iomem *base;
+ struct device *dev;
+ struct xlnx_bridge bridge;
+ u32 width_in;
+ u32 height_in;
+ u32 width_out;
+ u32 height_out;
+ u32 fmt_in;
+ u32 fmt_out;
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ u32 pix_per_clk;
+ u32 max_pixels;
+ u32 max_lines;
+ u32 H_phases[XV_HSCALER_MAX_LINE_WIDTH];
+ short hscaler_coeff[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_MAX_H_TAPS];
+ short vscaler_coeff[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_MAX_V_TAPS];
+ bool is_polyphase;
+ struct gpio_desc *rst_gpio;
+ struct clk *ctrl_clk;
+ struct clk *axis_clk;
+ const struct xscaler_feature *cfg;
+};
+
+static inline void xilinx_scaler_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_scaler_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void xilinx_scaler_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ xilinx_scaler_write(base, offset,
+ xilinx_scaler_read(base, offset) & ~clr);
+}
+
+static inline void xilinx_scaler_set(void __iomem *base, u32 offset, u32 set)
+{
+ xilinx_scaler_write(base, offset,
+ xilinx_scaler_read(base, offset) | set);
+}
+
+static inline void
+xilinx_scaler_disable_block(struct xilinx_scaler *scaler, u32 channel,
+ u32 ip_block)
+{
+ xilinx_scaler_clr(scaler->base, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF, ip_block);
+}
+
+static inline void
+xilinx_scaler_enable_block(struct xilinx_scaler *scaler, u32 channel,
+ u32 ip_block)
+{
+ xilinx_scaler_set(scaler->base, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF, ip_block);
+}
+
+/**
+ * bridge_to_layer - Gets the parent structure
+ * @bridge: pointer to the member.
+ *
+ * Return: parent structure pointer
+ */
+static inline struct xilinx_scaler *bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xilinx_scaler, bridge);
+}
+
+/**
+ * xilinx_scaler_reset - Resets scaler block
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function resets scaler block
+ */
+static void xilinx_scaler_reset(struct xilinx_scaler *scaler)
+{
+ xilinx_scaler_disable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+ xilinx_scaler_enable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+}
+
+/**
+ * xv_hscaler_calculate_phases - Calculates h-scaler phases
+ * @scaler: Pointer to scaler registers base
+ * @width_in: input width
+ * @width_out: output width
+ * @pixel_rate: pixel rate
+ */
+static void
+xv_hscaler_calculate_phases(struct xilinx_scaler *scaler,
+ u32 width_in, u32 width_out, u32 pixel_rate)
+{
+ unsigned int loop_width;
+ unsigned int x, s;
+ int offset = 0;
+ int xwrite_pos = 0;
+ bool output_write_en;
+ bool get_new_pix;
+ u64 phaseH;
+ u32 array_idx = 0;
+ int nr_rds;
+ int nr_rds_clck;
+ unsigned int nphases = scaler->max_num_phases;
+ unsigned int nppc = scaler->pix_per_clk;
+ unsigned int shift = XHSC_STEP_PRECISION_SHIFT - ilog2(nphases);
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XHSC_STEP_PRECISION_SHIFT) != 0) {
+ get_new_pix = true;
+ offset -= (1 << XHSC_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XHSC_STEP_PRECISION_SHIFT) == 0) &&
+ xwrite_pos < width_out) {
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ scaler->H_phases[x] |= (phaseH <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ scaler->H_phases[x] |= (array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MULTIPLIER)));
+ if (output_write_en) {
+ scaler->H_phases[x] |=
+ (XV_HSCALER_PHASESH_V_OUTPUT_WR_EN <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+/**
+ * xv_hscaler_load_ext_coeff - Loads external coefficients of h-scaler
+ * @scaler: Pointer to scaler registers base
+ * @coeff: Pointer to coeff array
+ * @ntaps: number of taps
+ *
+ * This function loads h-scaler coefficients.
+ */
+static void
+xv_hscaler_load_ext_coeff(struct xilinx_scaler *scaler,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ u32 nphases = scaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_HSCALER_MAX_H_TAPS - ntaps;
+ offset = pad >> 1;
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ scaler->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) {
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < offset; j++)
+ scaler->hscaler_coeff[i][j] = 0;
+ j = ntaps + offset;
+ for (; j < XV_HSCALER_MAX_H_TAPS; j++)
+ scaler->hscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_hscaler_coeff_select - Selection of H-Scaler coefficients of operation
+ * @scaler: Pointer to Scaler device structure
+ * @width_in: Width of input video
+ * @width_out: Width of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 12-tap
+ * filter may operate with 10 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * H-scaler number of taps.
+ */
+static int
+xv_hscaler_select_coeff(struct xilinx_scaler *scaler,
+ u32 width_in, u32 width_out)
+{
+ const short *coeff;
+ u16 hscale_ratio;
+ u32 ntaps = scaler->num_hori_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+ if (width_out < width_in) {
+ hscale_ratio = ((width_in * 10) / width_out);
+
+ switch (scaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_6:
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ break;
+ case XV_HSCALER_TAPS_8:
+ if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_10:
+ if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_12:
+ if (hscale_ratio > 35) {
+ coeff = &xhsc_coeff_taps12[0][0];
+ ntaps = XV_HSCALER_TAPS_12;
+ } else if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_info(scaler->dev, "Unsupported H-scaler number of taps\n");
+ return -EINVAL;
+ }
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ xv_hscaler_load_ext_coeff(scaler, coeff, ntaps);
+ return 0;
+}
+
+/**
+ * xv_hscaler_set_coeff - Sets h-scaler coefficients
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets coefficients of h-scaler.
+ */
+static void xv_hscaler_set_coeff(struct xilinx_scaler *scaler)
+{
+ int val, i, j, offset, rd_indx;
+ u32 ntaps = scaler->num_hori_taps;
+ u32 nphases = scaler->max_num_phases;
+ u32 base_addr;
+
+ offset = (XV_HSCALER_MAX_H_TAPS - ntaps) / 2;
+ base_addr = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (scaler->hscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (scaler->hscaler_coeff[i][rd_indx] &
+ XHSC_MASK_LOW_16BITS);
+ xilinx_scaler_write(scaler->base, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_load_ext_coeff - Loads external coefficients of v-scaler
+ * @scaler: Pointer to scaler device structure
+ * @coeff: Pointer to coeff array
+ * @ntaps: number of taps
+ *
+ * This function loads v-scaler coefficients.
+ */
+static void
+xv_vscaler_load_ext_coeff(struct xilinx_scaler *scaler,
+ const short *coeff, u32 ntaps)
+{
+ int i, j, pad, offset;
+ u32 nphases = scaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_VSCALER_MAX_V_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ scaler->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+ if (pad) {
+ /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ scaler->vscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_VSCALER_MAX_V_TAPS; j++)
+ scaler->vscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_vscaler_set_coeff - Sets v-scaler coefficients
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets coefficients of v-scaler.
+ */
+static void xv_vscaler_set_coeff(struct xilinx_scaler *scaler)
+{
+ u32 nphases = scaler->max_num_phases;
+ u32 ntaps = scaler->num_vert_taps;
+ int val, i, j, offset, rd_indx;
+ u32 base_addr;
+
+ offset = (XV_VSCALER_MAX_V_TAPS - ntaps) / 2;
+ base_addr = V_VSCALER_OFF + XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (scaler->vscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (scaler->vscaler_coeff[i][rd_indx] &
+ XVSC_MASK_LOW_16BITS);
+ xilinx_scaler_write(scaler->base, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_coeff_select - Selection of V-Scaler coefficients of operation
+ * @scaler: Pointer to Scaler device structure
+ * @height_in: Height of input video
+ * @height_out: Height of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 10-tap
+ * filter may operate with 6 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * V-scaler number of taps.
+ */
+static int
+xv_vscaler_select_coeff(struct xilinx_scaler *scaler,
+ u32 height_in, u32 height_out)
+{
+ const short *coeff;
+ u16 vscale_ratio;
+ u32 ntaps = scaler->num_vert_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+
+ if (height_out < height_in) {
+ vscale_ratio = ((height_in * 10) / height_out);
+
+ switch (scaler->num_vert_taps) {
+ case XV_VSCALER_TAPS_6:
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ break;
+ case XV_VSCALER_TAPS_8:
+ if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_10:
+ if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_12:
+ if (vscale_ratio > 35) {
+ coeff = &xvsc_coeff_taps12[0][0];
+ ntaps = XV_VSCALER_TAPS_12;
+ } else if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+
+ xv_vscaler_load_ext_coeff(scaler, coeff, ntaps);
+ return 0;
+}
+
+/**
+ * xv_hscaler_set_phases - Sets phases of h-scaler
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets phases of h-scaler.
+ */
+static void
+xv_hscaler_set_phases(struct xilinx_scaler *scaler)
+{
+ u32 loop_width;
+ u32 index, val;
+ u32 offset, i, lsb, msb;
+
+ loop_width = scaler->max_pixels / scaler->pix_per_clk;
+ if (scaler->cfg->flags & XSCALER_HPHASE_FIX) {
+ offset = V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PHASEH_FIX;
+ } else {
+ offset = V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE;
+ }
+
+ switch (scaler->pix_per_clk) {
+ case XSCALER_PPC_1:
+ index = 0;
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = scaler->H_phases[i] & XHSC_MASK_LOW_16BITS;
+ msb = scaler->H_phases[i + 1] & XHSC_MASK_LOW_16BITS;
+ val = (msb << 16 | lsb);
+ xilinx_scaler_write(scaler->base, offset +
+ (index * 4), val);
+ ++index;
+ }
+ return;
+ case XSCALER_PPC_2:
+ for (i = 0; i < loop_width; i++) {
+ val = (scaler->H_phases[i] & XHSC_MASK_LOW_32BITS);
+ xilinx_scaler_write(scaler->base, offset +
+ (i * 4), val);
+ }
+ return;
+ }
+}
+
+/**
+ * xv_vscaler_setup_video_fmt - Sets video format of v-scaler
+ * @scaler: Pointer to scaler device structure
+ * @code_in: format to be set
+ *
+ * This function set the given format of v-scaler
+ *
+ * Return: format value on success. -EINVAL for invalid format.
+ *
+ */
+static int
+xv_vscaler_setup_video_fmt(struct xilinx_scaler *scaler, u32 code_in)
+{
+ u32 video_in;
+
+ switch (code_in) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ video_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ video_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ video_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ video_in = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_info(scaler->dev, "Vscaler Unsupported media fmt\n");
+ return -EINVAL;
+ }
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ video_in);
+ return video_in;
+}
+
+/**
+ * xv_hscaler_setup_video_fmt - Sets video format of h-scaler
+ * @scaler: Pointer to scaler device structure
+ * @code_out: bus format to be set
+ * @vsc_out: return value of vscaler
+ *
+ * This function set the given video format of h-scaler
+ *
+ * Return: format value on success. -EINVAL for invalid format.
+ *
+ */
+static int xv_hscaler_setup_video_fmt(struct xilinx_scaler *scaler,
+ u32 code_out, u32 vsc_out)
+{
+ u32 video_out;
+
+ switch (vsc_out) {
+ case XVIDC_CSF_YCRCB_422:
+ break;
+ case XVIDC_CSF_YCRCB_444:
+ break;
+ case XVIDC_CSF_RGB:
+ break;
+ case XVIDC_CSF_YCRCB_420:
+ break;
+ default:
+ dev_info(scaler->dev, "unsupported format from Vscaler");
+ return -EINVAL;
+ }
+
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ vsc_out);
+
+ switch (code_out) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ video_out = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ video_out = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ video_out = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ video_out = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_info(scaler->dev, "Hscaler Unsupported Out media fmt\n");
+ return -EINVAL;
+ }
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA,
+ video_out);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_parse_of - Parse device tree information
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function reads the device tree contents
+ *
+ * Return: 0 on success. -EINVAL for invalid value.
+ *
+ */
+static int xilinx_scaler_parse_of(struct xilinx_scaler *scaler)
+{
+ int ret;
+ u32 dt_ppc;
+ struct device_node *node = scaler->dev->of_node;
+
+ scaler->ctrl_clk = devm_clk_get(scaler->dev, "aclk_ctrl");
+ if (IS_ERR(scaler->ctrl_clk)) {
+ ret = PTR_ERR(scaler->ctrl_clk);
+ dev_err(scaler->dev, "failed to get axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ scaler->axis_clk = devm_clk_get(scaler->dev, "aclk_axis");
+ if (IS_ERR(scaler->axis_clk)) {
+ ret = PTR_ERR(scaler->axis_clk);
+ dev_err(scaler->dev, "failed to get video clk %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,h-scaler-taps",
+ &scaler->num_hori_taps);
+ if (ret < 0) {
+ dev_info(scaler->dev, "h-scaler-taps not present in DT\n");
+ return ret;
+ }
+ switch (scaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_HSCALER_TAPS_4:
+ scaler->is_polyphase = false;
+ break;
+ case XV_HSCALER_TAPS_6:
+ case XV_HSCALER_TAPS_8:
+ case XV_HSCALER_TAPS_10:
+ case XV_HSCALER_TAPS_12:
+ scaler->is_polyphase = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,v-scaler-taps",
+ &scaler->num_vert_taps);
+ if (ret < 0) {
+ dev_info(scaler->dev, "v-scaler-taps not present in DT\n");
+ return ret;
+ }
+
+ switch (scaler->num_vert_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_VSCALER_TAPS_4:
+ if (scaler->num_vert_taps != scaler->num_hori_taps)
+ return -EINVAL;
+ break;
+ case XV_VSCALER_TAPS_6:
+ case XV_VSCALER_TAPS_8:
+ case XV_VSCALER_TAPS_10:
+ case XV_VSCALER_TAPS_12:
+ scaler->is_polyphase = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,samples-per-clk", &dt_ppc);
+ if (ret < 0) {
+ dev_info(scaler->dev, "PPC is missing in DT\n");
+ return ret;
+ }
+ if (dt_ppc != XSCALER_PPC_1 && dt_ppc != XSCALER_PPC_2) {
+ dev_info(scaler->dev, "Unsupported ppc: %d", dt_ppc);
+ return -EINVAL;
+ }
+ scaler->pix_per_clk = dt_ppc;
+
+ /* Reset GPIO */
+ scaler->rst_gpio = devm_gpiod_get(scaler->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(scaler->rst_gpio)) {
+ if (PTR_ERR(scaler->rst_gpio) != -EPROBE_DEFER)
+ dev_err(scaler->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(scaler->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &scaler->max_lines);
+ if (ret < 0) {
+ dev_err(scaler->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (scaler->max_lines > XSCALER_MAX_HEIGHT ||
+ scaler->max_lines < XSCALER_MIN_HEIGHT) {
+ dev_err(scaler->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &scaler->max_pixels);
+ if (ret < 0) {
+ dev_err(scaler->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (scaler->max_pixels > XSCALER_MAX_WIDTH ||
+ scaler->max_pixels < XSCALER_MIN_WIDTH) {
+ dev_err(scaler->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_scaler_stream - Set up v-scaler and h-scaler for streaming
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets up the required configuration of v-scaler and h-scaler
+ *
+ * Return: 0 on success. Returns -EINVAL on failure conditions.
+ */
+static int xilinx_scaler_stream(struct xilinx_scaler *scaler)
+{
+ u32 fmt_in, fmt_out;
+ u32 pixel_rate;
+ u32 line_rate;
+ int ret;
+
+ fmt_in = scaler->fmt_in;
+ fmt_out = scaler->fmt_out;
+ line_rate = (scaler->height_in * STEP_PRECISION) / scaler->height_out;
+
+ if (scaler->is_polyphase) {
+ ret = xv_vscaler_select_coeff(scaler, scaler->height_in,
+ scaler->height_out);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler select coeff\n");
+ return ret;
+ }
+ xv_vscaler_set_coeff(scaler);
+ }
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA,
+ line_rate);
+ ret = xv_vscaler_setup_video_fmt(scaler, scaler->fmt_in);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler setup video format\n");
+ return ret;
+ }
+ pixel_rate = (scaler->width_in * STEP_PRECISION) / scaler->width_out;
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA,
+ pixel_rate);
+ ret = xv_hscaler_setup_video_fmt(scaler, scaler->fmt_out, ret);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler setup video format\n");
+ return ret;
+ }
+ if (scaler->is_polyphase) {
+ ret = xv_hscaler_select_coeff(scaler, scaler->width_in,
+ scaler->width_out);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: hscaler select coeff\n");
+ return ret;
+ }
+ xv_hscaler_set_coeff(scaler);
+ }
+ xv_hscaler_calculate_phases(scaler, scaler->width_in,
+ scaler->width_out, pixel_rate);
+ xv_hscaler_set_phases(scaler);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_enable - enabes scaler sub-cores
+ * @bridge: bridge instance
+ *
+ * This function enables the scaler sub-cores
+ *
+ * Return: 0 on success. Return -EINVAL on failure conditions.
+ *
+ */
+static int xilinx_scaler_bridge_enable(struct xlnx_bridge *bridge)
+{
+ int ret;
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ ret = xilinx_scaler_stream(scaler);
+ if (ret)
+ return ret;
+
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xilinx_scaler_enable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+ return ret;
+}
+
+/**
+ * xilinx_scaler_bridge_disable - disables scaler sub-cores
+ * @bridge: bridge instance
+ *
+ * This function disables the scaler sub-cores
+ */
+static void xilinx_scaler_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ xilinx_scaler_disable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+}
+
+/**
+ * xilinx_scaler_bridge_set_input - Sets the input parameters of scaler
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the input parameters of scaler
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_scaler_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ if (width > scaler->max_pixels || height > scaler->max_lines)
+ return -EINVAL;
+
+ scaler->height_in = height;
+ scaler->width_in = width;
+ scaler->fmt_in = bus_fmt;
+
+ /* IP Reset through GPIO */
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_ASSERT);
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ xilinx_scaler_reset(scaler);
+ memset(scaler->H_phases, 0, sizeof(scaler->H_phases));
+
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA, height);
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA, width);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA, width);
+
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_get_input_fmts - input formats supported by scaler
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the input video formats information scaler
+ * Return: 0 on success.
+ */
+static int xilinx_scaler_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_scaler_video_fmts;
+ *count = ARRAY_SIZE(xilinx_scaler_video_fmts);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_set_output - Sets the output parameters of scaler
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the output parameters of scaler
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_scaler_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ if (width > scaler->max_pixels || height > scaler->max_lines)
+ return -EINVAL;
+
+ scaler->height_out = height;
+ scaler->width_out = width;
+ scaler->fmt_out = bus_fmt;
+
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA, height);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA, height);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA, width);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_get_output_fmts - output formats supported by scaler
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the output video formats information scaler
+ * Return: 0 on success.
+ */
+static int xilinx_scaler_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_scaler_video_fmts;
+ *count = ARRAY_SIZE(xilinx_scaler_video_fmts);
+ return 0;
+}
+
+static const struct xscaler_feature xlnx_scaler_v2_2 = {
+ .flags = XSCALER_HPHASE_FIX,
+};
+
+static const struct xscaler_feature xlnx_scaler = {
+ .flags = 0,
+};
+
+static const struct of_device_id xilinx_scaler_of_match[] = {
+ { .compatible = "xlnx,vpss-scaler",
+ .data = &xlnx_scaler},
+ { .compatible = "xlnx,vpss-scaler-2.2",
+ .data = &xlnx_scaler_v2_2},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, xilinx_scaler_of_match);
+
+static int xilinx_scaler_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_scaler *scaler;
+ const struct of_device_id *match;
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+
+ scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL);
+ if (!scaler)
+ return -ENOMEM;
+ scaler->dev = dev;
+
+ match = of_match_node(xilinx_scaler_of_match, node);
+ if (!match)
+ return -ENODEV;
+
+ scaler->cfg = match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scaler->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scaler->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, scaler);
+
+ ret = xilinx_scaler_parse_of(scaler);
+ if (ret < 0) {
+ dev_info(scaler->dev, "parse_of failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(scaler->ctrl_clk);
+ if (ret) {
+ dev_err(scaler->dev, "unable to enable axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(scaler->axis_clk);
+ if (ret) {
+ dev_err(scaler->dev, "unable to enable video clk %d\n", ret);
+ goto err_ctrl_clk;
+ }
+
+ scaler->max_num_phases = XSCALER_MAX_PHASES;
+
+ /* Reset the Global IP Reset through a GPIO */
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ xilinx_scaler_reset(scaler);
+
+ scaler->bridge.enable = &xilinx_scaler_bridge_enable;
+ scaler->bridge.disable = &xilinx_scaler_bridge_disable;
+ scaler->bridge.set_input = &xilinx_scaler_bridge_set_input;
+ scaler->bridge.get_input_fmts = &xilinx_scaler_bridge_get_input_fmts;
+ scaler->bridge.set_output = &xilinx_scaler_bridge_set_output;
+ scaler->bridge.get_output_fmts = &xilinx_scaler_bridge_get_output_fmts;
+ scaler->bridge.of_node = dev->of_node;
+
+ ret = xlnx_bridge_register(&scaler->bridge);
+ if (ret) {
+ dev_info(scaler->dev, "Bridge registration failed\n");
+ goto err_axis_clk;
+ }
+ dev_info(scaler->dev, "xlnx drm scaler experimental driver probed\n");
+
+ return 0;
+
+err_axis_clk:
+ clk_disable_unprepare(scaler->axis_clk);
+err_ctrl_clk:
+ clk_disable_unprepare(scaler->ctrl_clk);
+ return ret;
+}
+
+static int xilinx_scaler_remove(struct platform_device *pdev)
+{
+ struct xilinx_scaler *scaler = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&scaler->bridge);
+ clk_disable_unprepare(scaler->axis_clk);
+ clk_disable_unprepare(scaler->ctrl_clk);
+ return 0;
+}
+
+static struct platform_driver scaler_bridge_driver = {
+ .probe = xilinx_scaler_probe,
+ .remove = xilinx_scaler_remove,
+ .driver = {
+ .name = "xlnx,scaler-bridge",
+ .of_match_table = xilinx_scaler_of_match,
+ },
+};
+
+module_platform_driver(scaler_bridge_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SCALER Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi.c b/drivers/gpu/drm/xlnx/xlnx_sdi.c
new file mode 100644
index 000000000000..9fb7b5db5589
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi.c
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx Subsystem driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/videomode.h>
+#include "xlnx_sdi_modes.h"
+#include "xlnx_sdi_timing.h"
+
+#include "xlnx_bridge.h"
+
+/* SDI register offsets */
+#define XSDI_TX_RST_CTRL 0x00
+#define XSDI_TX_MDL_CTRL 0x04
+#define XSDI_TX_GLBL_IER 0x0C
+#define XSDI_TX_ISR_STAT 0x10
+#define XSDI_TX_IER_STAT 0x14
+#define XSDI_TX_ST352_LINE 0x18
+#define XSDI_TX_ST352_DATA_CH0 0x1C
+#define XSDI_TX_VER 0x3C
+#define XSDI_TX_SYS_CFG 0x40
+#define XSDI_TX_STS_SB_TDATA 0x60
+#define XSDI_TX_AXI4S_STS1 0x68
+#define XSDI_TX_AXI4S_STS2 0x6C
+#define XSDI_TX_ST352_DATA_DS2 0x70
+
+/* MODULE_CTRL register masks */
+#define XSDI_TX_CTRL_M BIT(7)
+#define XSDI_TX_CTRL_INS_CRC BIT(12)
+#define XSDI_TX_CTRL_INS_ST352 BIT(13)
+#define XSDI_TX_CTRL_OVR_ST352 BIT(14)
+#define XSDI_TX_CTRL_INS_SYNC_BIT BIT(16)
+#define XSDI_TX_CTRL_USE_ANC_IN BIT(18)
+#define XSDI_TX_CTRL_INS_LN BIT(19)
+#define XSDI_TX_CTRL_INS_EDH BIT(20)
+#define XSDI_TX_CTRL_MODE 0x7
+#define XSDI_TX_CTRL_MUX 0x7
+#define XSDI_TX_CTRL_MODE_SHIFT 4
+#define XSDI_TX_CTRL_M_SHIFT 7
+#define XSDI_TX_CTRL_MUX_SHIFT 8
+#define XSDI_TX_CTRL_ST352_F2_EN_SHIFT 15
+#define XSDI_TX_CTRL_420_BIT BIT(21)
+#define XSDI_TX_CTRL_INS_ST352_CHROMA BIT(23)
+#define XSDI_TX_CTRL_USE_DS2_3GA BIT(24)
+
+/* TX_ST352_LINE register masks */
+#define XSDI_TX_ST352_LINE_MASK GENMASK(10, 0)
+#define XSDI_TX_ST352_LINE_F2_SHIFT 16
+
+/* ISR STAT register masks */
+#define XSDI_GTTX_RSTDONE_INTR BIT(0)
+#define XSDI_TX_CE_ALIGN_ERR_INTR BIT(1)
+#define XSDI_AXI4S_VID_LOCK_INTR BIT(8)
+#define XSDI_OVERFLOW_INTR BIT(9)
+#define XSDI_UNDERFLOW_INTR BIT(10)
+#define XSDI_IER_EN_MASK (XSDI_GTTX_RSTDONE_INTR | \
+ XSDI_TX_CE_ALIGN_ERR_INTR | \
+ XSDI_OVERFLOW_INTR | \
+ XSDI_UNDERFLOW_INTR)
+
+/* RST_CTRL_OFFSET masks */
+#define XSDI_TX_CTRL_EN BIT(0)
+#define XSDI_TX_BRIDGE_CTRL_EN BIT(8)
+#define XSDI_TX_AXI4S_CTRL_EN BIT(9)
+/* STS_SB_TX_TDATA masks */
+#define XSDI_TX_TDATA_GT_RESETDONE BIT(2)
+
+#define XSDI_TX_MUX_SD_HD_3GA 0
+#define XSDI_TX_MUX_3GB 1
+#define XSDI_TX_MUX_8STREAM_6G_12G 2
+#define XSDI_TX_MUX_4STREAM_6G 3
+#define XSDI_TX_MUX_16STREAM_12G 4
+
+#define SDI_MAX_DATASTREAM 8
+#define PIXELS_PER_CLK 2
+#define XSDI_CH_SHIFT 29
+#define XST352_PROG_PIC BIT(6)
+#define XST352_PROG_TRANS BIT(7)
+#define XST352_2048_SHIFT BIT(6)
+#define XST352_YUV420_MASK 0x03
+#define ST352_BYTE3 0x00
+#define ST352_BYTE4 0x01
+#define GT_TIMEOUT 50
+/* SDI modes */
+#define XSDI_MODE_HD 0
+#define XSDI_MODE_SD 1
+#define XSDI_MODE_3GA 2
+#define XSDI_MODE_3GB 3
+#define XSDI_MODE_6G 4
+#define XSDI_MODE_12G 5
+
+#define SDI_TIMING_PARAMS_SIZE 48
+
+/**
+ * enum payload_line_1 - Payload Ids Line 1 number
+ * @PAYLD_LN1_HD_3_6_12G: line 1 HD,3G,6G or 12G mode value
+ * @PAYLD_LN1_SDPAL: line 1 SD PAL mode value
+ * @PAYLD_LN1_SDNTSC: line 1 SD NTSC mode value
+ */
+enum payload_line_1 {
+ PAYLD_LN1_HD_3_6_12G = 10,
+ PAYLD_LN1_SDPAL = 9,
+ PAYLD_LN1_SDNTSC = 13
+};
+
+/**
+ * enum payload_line_2 - Payload Ids Line 2 number
+ * @PAYLD_LN2_HD_3_6_12G: line 2 HD,3G,6G or 12G mode value
+ * @PAYLD_LN2_SDPAL: line 2 SD PAL mode value
+ * @PAYLD_LN2_SDNTSC: line 2 SD NTSC mode value
+ */
+enum payload_line_2 {
+ PAYLD_LN2_HD_3_6_12G = 572,
+ PAYLD_LN2_SDPAL = 322,
+ PAYLD_LN2_SDNTSC = 276
+};
+
+/**
+ * struct xlnx_sdi - Core configuration SDI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @connector: DRM connector structure
+ * @dev: device structure
+ * @base: Base address of SDI subsystem
+ * @mode_flags: SDI operation mode related flags
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @enable_st352_chroma: Able to send ST352 packets in Chroma stream.
+ * @enable_anc_data: Enable/Disable Ancillary Data insertion for Audio
+ * @sdi_mode: configurable SDI mode parameter, supported values are:
+ * 0 - HD
+ * 1 - SD
+ * 2 - 3GA
+ * 3 - 3GB
+ * 4 - 6G
+ * 5 - 12G
+ * @sdi_mod_prop_val: configurable SDI mode parameter value
+ * @sdi_data_strm: configurable SDI data stream parameter
+ * @sdi_data_strm_prop_val: configurable number of SDI data streams
+ * value currently supported are 2, 4 and 8
+ * @sdi_420_in: Specifying input bus color format parameter to SDI
+ * @sdi_420_in_val: 1 for yuv420 and 0 for yuv422
+ * @sdi_420_out: configurable SDI out color format parameter
+ * @sdi_420_out_val: 1 for yuv420 and 0 for yuv422
+ * @is_frac_prop: configurable SDI fractional fps parameter
+ * @is_frac_prop_val: configurable SDI fractional fps parameter value
+ * @bridge: bridge structure
+ * @height_out: configurable bridge output height parameter
+ * @height_out_prop_val: configurable bridge output height parameter value
+ * @width_out: configurable bridge output width parameter
+ * @width_out_prop_val: configurable bridge output width parameter value
+ * @in_fmt: configurable bridge input media format
+ * @in_fmt_prop_val: configurable media bus format value
+ * @out_fmt: configurable bridge output media format
+ * @out_fmt_prop_val: configurable media bus format value
+ * @en_st352_c_prop: configurable ST352 payload on Chroma stream parameter
+ * @en_st352_c_val: configurable ST352 payload on Chroma parameter value
+ * @use_ds2_3ga_prop: Use DS2 instead of DS3 in 3GA mode parameter
+ * @use_ds2_3ga_val: Use DS2 instead of DS3 in 3GA mode parameter value
+ * @video_mode: current display mode
+ * @axi_clk: AXI Lite interface clock
+ * @sditx_clk: SDI Tx Clock
+ * @vidin_clk: Video Clock
+ */
+struct xlnx_sdi {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct device *dev;
+ void __iomem *base;
+ u32 mode_flags;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ bool enable_st352_chroma;
+ bool enable_anc_data;
+ struct drm_property *sdi_mode;
+ u32 sdi_mod_prop_val;
+ struct drm_property *sdi_data_strm;
+ u32 sdi_data_strm_prop_val;
+ struct drm_property *sdi_420_in;
+ bool sdi_420_in_val;
+ struct drm_property *sdi_420_out;
+ bool sdi_420_out_val;
+ struct drm_property *is_frac_prop;
+ bool is_frac_prop_val;
+ struct xlnx_bridge *bridge;
+ struct drm_property *height_out;
+ u32 height_out_prop_val;
+ struct drm_property *width_out;
+ u32 width_out_prop_val;
+ struct drm_property *in_fmt;
+ u32 in_fmt_prop_val;
+ struct drm_property *out_fmt;
+ u32 out_fmt_prop_val;
+ struct drm_property *en_st352_c_prop;
+ bool en_st352_c_val;
+ struct drm_property *use_ds2_3ga_prop;
+ bool use_ds2_3ga_val;
+ struct drm_display_mode video_mode;
+ struct clk *axi_clk;
+ struct clk *sditx_clk;
+ struct clk *vidin_clk;
+};
+
+#define connector_to_sdi(c) container_of(c, struct xlnx_sdi, connector)
+#define encoder_to_sdi(e) container_of(e, struct xlnx_sdi, encoder)
+
+static inline void xlnx_sdi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_sdi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_sdi_en_axi4s - Enable SDI Tx AXI4S-to-Video core
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx AXI4S-to-Video core.
+ */
+static void xlnx_sdi_en_axi4s(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_AXI4S_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_en_bridge - Enable SDI Tx bridge
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx bridge.
+ */
+static void xlnx_sdi_en_bridge(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_BRIDGE_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_irq_handler - SDI Tx interrupt
+ * @irq: irq number
+ * @data: irq data
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ * This is the compact GT ready interrupt.
+ */
+static irqreturn_t xlnx_sdi_irq_handler(int irq, void *data)
+{
+ struct xlnx_sdi *sdi = (struct xlnx_sdi *)data;
+ u32 reg;
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_ISR_STAT);
+
+ if (reg & XSDI_GTTX_RSTDONE_INTR)
+ dev_dbg(sdi->dev, "GT reset interrupt received\n");
+ if (reg & XSDI_TX_CE_ALIGN_ERR_INTR)
+ dev_err_ratelimited(sdi->dev, "SDI SD CE align error\n");
+ if (reg & XSDI_OVERFLOW_INTR)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Overflow error\n");
+ if (reg & XSDI_UNDERFLOW_INTR)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Underflow error\n");
+ xlnx_sdi_writel(sdi->base, XSDI_TX_ISR_STAT,
+ reg & ~(XSDI_AXI4S_VID_LOCK_INTR));
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_STS_SB_TDATA);
+ if (reg & XSDI_TX_TDATA_GT_RESETDONE) {
+ sdi->event_received = true;
+ wake_up_interruptible(&sdi->wait_event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xlnx_sdi_set_payload_line - set ST352 packet line number
+ * @sdi: Pointer to SDI Tx structure
+ * @line_1: line number used to insert st352 packet for field 1.
+ * @line_2: line number used to insert st352 packet for field 2.
+ *
+ * This function set 352 packet line number.
+ */
+static void xlnx_sdi_set_payload_line(struct xlnx_sdi *sdi,
+ u32 line_1, u32 line_2)
+{
+ u32 data;
+
+ data = ((line_1 & XSDI_TX_ST352_LINE_MASK) |
+ ((line_2 & XSDI_TX_ST352_LINE_MASK) <<
+ XSDI_TX_ST352_LINE_F2_SHIFT));
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_ST352_LINE, data);
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data |= (1 << XSDI_TX_CTRL_ST352_F2_EN_SHIFT);
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_set_payload_data - set ST352 packet payload
+ * @sdi: Pointer to SDI Tx structure
+ * @data_strm: data stream number
+ * @payload: st352 packet payload
+ *
+ * This function set ST352 payload data to corresponding stream.
+ */
+static void xlnx_sdi_set_payload_data(struct xlnx_sdi *sdi,
+ u32 data_strm, u32 payload)
+{
+ xlnx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_CH0 + (data_strm * 4)), payload);
+
+ dev_dbg(sdi->dev, "enable_st352_chroma = %d and en_st352_c_val = %d\n",
+ sdi->enable_st352_chroma, sdi->en_st352_c_val);
+ if (sdi->enable_st352_chroma && sdi->en_st352_c_val) {
+ xlnx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_DS2 + (data_strm * 4)),
+ payload);
+ }
+}
+
+/**
+ * xlnx_sdi_set_display_disable - Disable the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_sdi_set_display_disable(struct xlnx_sdi *sdi)
+{
+ u32 i;
+
+ for (i = 0; i < SDI_MAX_DATASTREAM; i++)
+ xlnx_sdi_set_payload_data(sdi, i, 0);
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, 0);
+}
+
+/**
+ * xlnx_sdi_payload_config - config the SDI payload parameters
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: display mode
+ *
+ * This function config the SDI st352 payload parameter.
+ */
+static void xlnx_sdi_payload_config(struct xlnx_sdi *sdi, u32 mode)
+{
+ u32 payload_1, payload_2;
+
+ switch (mode) {
+ case XSDI_MODE_SD:
+ payload_1 = PAYLD_LN1_SDPAL;
+ payload_2 = PAYLD_LN2_SDPAL;
+ break;
+ case XSDI_MODE_HD:
+ case XSDI_MODE_3GA:
+ case XSDI_MODE_3GB:
+ case XSDI_MODE_6G:
+ case XSDI_MODE_12G:
+ payload_1 = PAYLD_LN1_HD_3_6_12G;
+ payload_2 = PAYLD_LN2_HD_3_6_12G;
+ break;
+ default:
+ payload_1 = 0;
+ payload_2 = 0;
+ break;
+ }
+
+ xlnx_sdi_set_payload_line(sdi, payload_1, payload_2);
+}
+
+/**
+ * xlnx_sdi_set_mode - Set mode parameters in SDI Tx
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: SDI Tx display mode
+ * @is_frac: 0 - integer 1 - fractional
+ * @mux_ptrn: specifiy the data stream interleaving pattern to be used
+ * This function config the SDI st352 payload parameter.
+ */
+static void xlnx_sdi_set_mode(struct xlnx_sdi *sdi, u32 mode,
+ bool is_frac, u32 mux_ptrn)
+{
+ u32 data;
+
+ xlnx_sdi_payload_config(sdi, mode);
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data &= ~(XSDI_TX_CTRL_MODE << XSDI_TX_CTRL_MODE_SHIFT);
+ data &= ~(XSDI_TX_CTRL_M);
+ data &= ~(XSDI_TX_CTRL_MUX << XSDI_TX_CTRL_MUX_SHIFT);
+ data &= ~XSDI_TX_CTRL_420_BIT;
+
+ data |= (((mode & XSDI_TX_CTRL_MODE) << XSDI_TX_CTRL_MODE_SHIFT) |
+ (is_frac << XSDI_TX_CTRL_M_SHIFT) |
+ ((mux_ptrn & XSDI_TX_CTRL_MUX) << XSDI_TX_CTRL_MUX_SHIFT));
+
+ if (sdi->sdi_420_out_val)
+ data |= XSDI_TX_CTRL_420_BIT;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_set_config_parameters - Configure SDI Tx registers with parameters
+ * given from user application.
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI structure having drm_property parameters
+ * configured from user application and writes them into SDI IP registers.
+ */
+static void xlnx_sdi_set_config_parameters(struct xlnx_sdi *sdi)
+{
+ int mux_ptrn = -EINVAL;
+
+ switch (sdi->sdi_mod_prop_val) {
+ case XSDI_MODE_3GA:
+ mux_ptrn = XSDI_TX_MUX_SD_HD_3GA;
+ break;
+ case XSDI_MODE_3GB:
+ mux_ptrn = XSDI_TX_MUX_3GB;
+ break;
+ case XSDI_MODE_6G:
+ if (sdi->sdi_data_strm_prop_val == 4)
+ mux_ptrn = XSDI_TX_MUX_4STREAM_6G;
+ else if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ case XSDI_MODE_12G:
+ if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ default:
+ mux_ptrn = 0;
+ break;
+ }
+ if (mux_ptrn == -EINVAL) {
+ dev_err(sdi->dev, "%d data stream not supported for %d mode",
+ sdi->sdi_data_strm_prop_val, sdi->sdi_mod_prop_val);
+ return;
+ }
+ xlnx_sdi_set_mode(sdi, sdi->sdi_mod_prop_val, sdi->is_frac_prop_val,
+ mux_ptrn);
+}
+
+/**
+ * xlnx_sdi_atomic_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @connector: pointer Xilinx SDI connector
+ * @state: DRM connector state
+ * @property: pointer to the drm_property structure
+ * @val: SDI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the SDI structure property varabiles with the values.
+ * These values are later used to configure the SDI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xlnx_sdi_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property, uint64_t val)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(connector);
+
+ if (property == sdi->sdi_mode)
+ sdi->sdi_mod_prop_val = (unsigned int)val;
+ else if (property == sdi->sdi_data_strm)
+ sdi->sdi_data_strm_prop_val = (unsigned int)val;
+ else if (property == sdi->sdi_420_in)
+ sdi->sdi_420_in_val = val;
+ else if (property == sdi->sdi_420_out)
+ sdi->sdi_420_out_val = val;
+ else if (property == sdi->is_frac_prop)
+ sdi->is_frac_prop_val = !!val;
+ else if (property == sdi->height_out)
+ sdi->height_out_prop_val = (unsigned int)val;
+ else if (property == sdi->width_out)
+ sdi->width_out_prop_val = (unsigned int)val;
+ else if (property == sdi->in_fmt)
+ sdi->in_fmt_prop_val = (unsigned int)val;
+ else if (property == sdi->out_fmt)
+ sdi->out_fmt_prop_val = (unsigned int)val;
+ else if (property == sdi->en_st352_c_prop)
+ sdi->en_st352_c_val = !!val;
+ else if (property == sdi->use_ds2_3ga_prop)
+ sdi->use_ds2_3ga_val = !!val;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int
+xlnx_sdi_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(connector);
+
+ if (property == sdi->sdi_mode)
+ *val = sdi->sdi_mod_prop_val;
+ else if (property == sdi->sdi_data_strm)
+ *val = sdi->sdi_data_strm_prop_val;
+ else if (property == sdi->sdi_420_in)
+ *val = sdi->sdi_420_in_val;
+ else if (property == sdi->sdi_420_out)
+ *val = sdi->sdi_420_out_val;
+ else if (property == sdi->is_frac_prop)
+ *val = sdi->is_frac_prop_val;
+ else if (property == sdi->height_out)
+ *val = sdi->height_out_prop_val;
+ else if (property == sdi->width_out)
+ *val = sdi->width_out_prop_val;
+ else if (property == sdi->in_fmt)
+ *val = sdi->in_fmt_prop_val;
+ else if (property == sdi->out_fmt)
+ *val = sdi->out_fmt_prop_val;
+ else if (property == sdi->en_st352_c_prop)
+ *val = sdi->en_st352_c_val;
+ else if (property == sdi->use_ds2_3ga_prop)
+ *val = sdi->use_ds2_3ga_val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_sdi_get_mode_id - Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ *
+ * Return: mode id if mode is found OR -EINVAL otherwise
+ */
+static int xlnx_sdi_get_mode_id(struct drm_display_mode *mode)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++)
+ if (drm_mode_equal(&xlnx_sdi_modes[i].mode, mode))
+ return i;
+ return -EINVAL;
+}
+
+/**
+ * xlnx_sdi_drm_add_modes - Adds SDI supported modes
+ * @connector: pointer Xilinx SDI connector
+ *
+ * Return: Count of modes added
+ *
+ * This function adds the SDI modes supported and returns its count
+ */
+static int xlnx_sdi_drm_add_modes(struct drm_connector *connector)
+{
+ int num_modes = 0;
+ u32 i;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ const struct drm_display_mode *ptr = &xlnx_sdi_modes[i].mode;
+
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
+static enum drm_connector_status
+xlnx_sdi_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void xlnx_sdi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xlnx_sdi_connector_funcs = {
+ .detect = xlnx_sdi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xlnx_sdi_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_set_property = xlnx_sdi_atomic_set_property,
+ .atomic_get_property = xlnx_sdi_atomic_get_property,
+};
+
+static struct drm_encoder *
+xlnx_sdi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_sdi(connector)->encoder);
+}
+
+static int xlnx_sdi_get_modes(struct drm_connector *connector)
+{
+ return xlnx_sdi_drm_add_modes(connector);
+}
+
+static struct drm_connector_helper_funcs xlnx_sdi_connector_helper_funcs = {
+ .get_modes = xlnx_sdi_get_modes,
+ .best_encoder = xlnx_sdi_best_encoder,
+};
+
+/**
+ * xlnx_sdi_drm_connector_create_property - create SDI connector properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ *
+ * This function takes the xilinx SDI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xlnx_sdi_drm_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xlnx_sdi *sdi = connector_to_sdi(base_connector);
+
+ sdi->is_frac_prop = drm_property_create_bool(dev, 0, "is_frac");
+ sdi->sdi_mode = drm_property_create_range(dev, 0,
+ "sdi_mode", 0, 5);
+ sdi->sdi_data_strm = drm_property_create_range(dev, 0,
+ "sdi_data_stream", 2, 8);
+ sdi->sdi_420_in = drm_property_create_bool(dev, 0, "sdi_420_in");
+ sdi->sdi_420_out = drm_property_create_bool(dev, 0, "sdi_420_out");
+ sdi->height_out = drm_property_create_range(dev, 0,
+ "height_out", 2, 4096);
+ sdi->width_out = drm_property_create_range(dev, 0,
+ "width_out", 2, 4096);
+ sdi->in_fmt = drm_property_create_range(dev, 0,
+ "in_fmt", 0, 16384);
+ sdi->out_fmt = drm_property_create_range(dev, 0,
+ "out_fmt", 0, 16384);
+ if (sdi->enable_st352_chroma) {
+ sdi->en_st352_c_prop = drm_property_create_bool(dev, 0,
+ "en_st352_c");
+ sdi->use_ds2_3ga_prop = drm_property_create_bool(dev, 0,
+ "use_ds2_3ga");
+ }
+}
+
+/**
+ * xlnx_sdi_drm_connector_attach_property - attach SDI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ */
+static void
+xlnx_sdi_drm_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (sdi->sdi_mode)
+ drm_object_attach_property(obj, sdi->sdi_mode, 0);
+
+ if (sdi->sdi_data_strm)
+ drm_object_attach_property(obj, sdi->sdi_data_strm, 0);
+
+ if (sdi->sdi_420_in)
+ drm_object_attach_property(obj, sdi->sdi_420_in, 0);
+
+ if (sdi->sdi_420_out)
+ drm_object_attach_property(obj, sdi->sdi_420_out, 0);
+
+ if (sdi->is_frac_prop)
+ drm_object_attach_property(obj, sdi->is_frac_prop, 0);
+
+ if (sdi->height_out)
+ drm_object_attach_property(obj, sdi->height_out, 0);
+
+ if (sdi->width_out)
+ drm_object_attach_property(obj, sdi->width_out, 0);
+
+ if (sdi->in_fmt)
+ drm_object_attach_property(obj, sdi->in_fmt, 0);
+
+ if (sdi->out_fmt)
+ drm_object_attach_property(obj, sdi->out_fmt, 0);
+
+ if (sdi->en_st352_c_prop)
+ drm_object_attach_property(obj, sdi->en_st352_c_prop, 0);
+
+ if (sdi->use_ds2_3ga_prop)
+ drm_object_attach_property(obj, sdi->use_ds2_3ga_prop, 0);
+}
+
+static int xlnx_sdi_create_connector(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_connector *connector = &sdi->connector;
+ int ret;
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xlnx_sdi_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(sdi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xlnx_sdi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xlnx_sdi_drm_connector_create_property(connector);
+ xlnx_sdi_drm_connector_attach_property(connector);
+
+ return 0;
+}
+
+/**
+ * xlnx_sdi_set_display_enable - Enables the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_sdi_set_display_enable(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_calc_st352_payld - calculate the st352 payload
+ *
+ * @sdi: pointer to SDI Tx structure
+ * @mode: DRM display mode
+ *
+ * This function calculates the st352 payload to be configured.
+ * Please refer to SMPTE ST352 documents for it.
+ * Return: return st352 payload
+ */
+static u32 xlnx_sdi_calc_st352_payld(struct xlnx_sdi *sdi,
+ struct drm_display_mode *mode)
+{
+ u8 byt1, byt2;
+ u16 is_p;
+ int id;
+ u32 sdi_mode = sdi->sdi_mod_prop_val;
+ bool is_frac = sdi->is_frac_prop_val;
+ u32 byt3 = ST352_BYTE3;
+
+ id = xlnx_sdi_get_mode_id(mode);
+ dev_dbg(sdi->dev, "mode id: %d\n", id);
+ if (mode->hdisplay == 2048 || mode->hdisplay == 4096)
+ byt3 |= XST352_2048_SHIFT;
+ if (sdi->sdi_420_in_val)
+ byt3 |= XST352_YUV420_MASK;
+
+ /* byte 2 calculation */
+ is_p = !(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ byt2 = xlnx_sdi_modes[id].st352_byt2[is_frac];
+ if (sdi_mode == XSDI_MODE_3GB ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN) || is_p)
+ byt2 |= XST352_PROG_PIC;
+ if (is_p && mode->vtotal >= 1125)
+ byt2 |= XST352_PROG_TRANS;
+
+ /* byte 1 calculation */
+ byt1 = xlnx_sdi_modes[id].st352_byt1[sdi_mode];
+
+ return (ST352_BYTE4 << 24 | byt3 << 16 | byt2 << 8 | byt1);
+}
+
+static void xlnx_sdi_setup(struct xlnx_sdi *sdi)
+{
+ u32 reg;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ reg |= XSDI_TX_CTRL_INS_CRC | XSDI_TX_CTRL_INS_ST352 |
+ XSDI_TX_CTRL_OVR_ST352 | XSDI_TX_CTRL_INS_SYNC_BIT |
+ XSDI_TX_CTRL_INS_EDH;
+
+ if (sdi->enable_anc_data)
+ reg |= XSDI_TX_CTRL_USE_ANC_IN;
+
+ if (sdi->enable_st352_chroma) {
+ if (sdi->en_st352_c_val) {
+ reg |= XSDI_TX_CTRL_INS_ST352_CHROMA;
+ if (sdi->use_ds2_3ga_val)
+ reg |= XSDI_TX_CTRL_USE_DS2_3GA;
+ else
+ reg &= ~XSDI_TX_CTRL_USE_DS2_3GA;
+ } else {
+ reg &= ~XSDI_TX_CTRL_INS_ST352_CHROMA;
+ reg &= ~XSDI_TX_CTRL_USE_DS2_3GA;
+ }
+ }
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, reg);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_IER_STAT, XSDI_IER_EN_MASK);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 1);
+ xlnx_stc_reset(sdi->base);
+}
+
+/**
+ * xlnx_sdi_encoder_atomic_mode_set - drive the SDI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @crtc_state: DRM crtc state
+ * @connector_state: DRM connector state
+ *
+ * This function derives the SDI IP timing parameters from the timing
+ * values given to timing module.
+ */
+static void xlnx_sdi_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct videomode vm;
+ u32 payload, i;
+ u32 sditx_blank, vtc_blank;
+
+ /* Set timing parameters as per bridge output parameters */
+ xlnx_bridge_set_input(sdi->bridge, adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay, sdi->in_fmt_prop_val);
+ xlnx_bridge_set_output(sdi->bridge, sdi->width_out_prop_val,
+ sdi->height_out_prop_val, sdi->out_fmt_prop_val);
+ xlnx_bridge_enable(sdi->bridge);
+
+ if (sdi->bridge) {
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ if (xlnx_sdi_modes[i].mode.hdisplay ==
+ sdi->width_out_prop_val &&
+ xlnx_sdi_modes[i].mode.vdisplay ==
+ sdi->height_out_prop_val &&
+ xlnx_sdi_modes[i].mode.vrefresh ==
+ adjusted_mode->vrefresh) {
+ memcpy((char *)adjusted_mode +
+ offsetof(struct drm_display_mode,
+ clock),
+ &xlnx_sdi_modes[i].mode.clock,
+ SDI_TIMING_PARAMS_SIZE);
+ break;
+ }
+ }
+ }
+
+ xlnx_sdi_setup(sdi);
+ xlnx_sdi_set_config_parameters(sdi);
+
+ /* set st352 payloads */
+ payload = xlnx_sdi_calc_st352_payld(sdi, adjusted_mode);
+ dev_dbg(sdi->dev, "payload : %0x\n", payload);
+
+ for (i = 0; i < sdi->sdi_data_strm_prop_val / 2; i++) {
+ if (sdi->sdi_mod_prop_val == XSDI_MODE_3GB)
+ payload |= (i << 1) << XSDI_CH_SHIFT;
+ xlnx_sdi_set_payload_data(sdi, i, payload);
+ }
+
+ /* UHDSDI is fixed 2 pixels per clock, horizontal timings div by 2 */
+ vm.hactive = adjusted_mode->hdisplay / PIXELS_PER_CLK;
+ vm.hfront_porch = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) / PIXELS_PER_CLK;
+ vm.hback_porch = (adjusted_mode->htotal -
+ adjusted_mode->hsync_end) / PIXELS_PER_CLK;
+ vm.hsync_len = (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) / PIXELS_PER_CLK;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ do {
+ sditx_blank = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) +
+ (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) +
+ (adjusted_mode->htotal -
+ adjusted_mode->hsync_end);
+
+ vtc_blank = (vm.hfront_porch + vm.hback_porch +
+ vm.hsync_len) * PIXELS_PER_CLK;
+
+ if (vtc_blank != sditx_blank)
+ vm.hfront_porch++;
+ } while (vtc_blank < sditx_blank);
+
+ vm.pixelclock = adjusted_mode->clock * 1000;
+
+ /* parameters for sdi audio */
+ sdi->video_mode.vdisplay = adjusted_mode->vdisplay;
+ sdi->video_mode.hdisplay = adjusted_mode->hdisplay;
+ sdi->video_mode.vrefresh = adjusted_mode->vrefresh;
+ sdi->video_mode.flags = adjusted_mode->flags;
+
+ xlnx_stc_sig(sdi->base, &vm);
+}
+
+static void xlnx_sdi_commit(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ long ret;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+ xlnx_sdi_set_display_enable(sdi);
+ ret = wait_event_interruptible_timeout(sdi->wait_event,
+ sdi->event_received,
+ usecs_to_jiffies(GT_TIMEOUT));
+ if (!ret) {
+ dev_err(sdi->dev, "Timeout: GT interrupt not received\n");
+ return;
+ }
+ sdi->event_received = false;
+ /* enable sdi bridge, timing controller and Axi4s_vid_out_ctrl */
+ xlnx_sdi_en_bridge(sdi);
+ xlnx_stc_enable(sdi->base);
+ xlnx_sdi_en_axi4s(sdi);
+}
+
+static void xlnx_sdi_disable(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+
+ if (sdi->bridge)
+ xlnx_bridge_disable(sdi->bridge);
+
+ xlnx_sdi_set_display_disable(sdi);
+ xlnx_stc_disable(sdi->base);
+}
+
+static const struct drm_encoder_helper_funcs xlnx_sdi_encoder_helper_funcs = {
+ .atomic_mode_set = xlnx_sdi_encoder_atomic_mode_set,
+ .enable = xlnx_sdi_commit,
+ .disable = xlnx_sdi_disable,
+};
+
+static const struct drm_encoder_funcs xlnx_sdi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xlnx_sdi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_sdi *sdi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &sdi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * SDI tx drivers. DRM framework can support more than one CRTCs and
+ * SDI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xlnx_sdi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ drm_encoder_helper_add(encoder, &xlnx_sdi_encoder_helper_funcs);
+
+ ret = xlnx_sdi_create_connector(encoder);
+ if (ret) {
+ dev_err(sdi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ }
+ return ret;
+}
+
+static void xlnx_sdi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_sdi *sdi = dev_get_drvdata(dev);
+
+ xlnx_sdi_set_display_disable(sdi);
+ xlnx_stc_disable(sdi->base);
+ drm_encoder_cleanup(&sdi->encoder);
+ drm_connector_cleanup(&sdi->connector);
+ xlnx_bridge_disable(sdi->bridge);
+}
+
+static const struct component_ops xlnx_sdi_component_ops = {
+ .bind = xlnx_sdi_bind,
+ .unbind = xlnx_sdi_unbind,
+};
+
+static int xlnx_sdi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xlnx_sdi *sdi;
+ struct device_node *vpss_node;
+ int ret, irq;
+ struct device_node *ports, *port;
+ u32 nports = 0, portmask = 0;
+
+ sdi = devm_kzalloc(dev, sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
+ sdi->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sdi->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(sdi->base);
+ }
+ platform_set_drvdata(pdev, sdi);
+
+ sdi->axi_clk = devm_clk_get(dev, "s_axi_aclk");
+ if (IS_ERR(sdi->axi_clk)) {
+ ret = PTR_ERR(sdi->axi_clk);
+ dev_err(dev, "failed to get s_axi_aclk %d\n", ret);
+ return ret;
+ }
+
+ sdi->sditx_clk = devm_clk_get(dev, "sdi_tx_clk");
+ if (IS_ERR(sdi->sditx_clk)) {
+ ret = PTR_ERR(sdi->sditx_clk);
+ dev_err(dev, "failed to get sdi_tx_clk %d\n", ret);
+ return ret;
+ }
+
+ sdi->vidin_clk = devm_clk_get(dev, "video_in_clk");
+ if (IS_ERR(sdi->vidin_clk)) {
+ ret = PTR_ERR(sdi->vidin_clk);
+ dev_err(dev, "failed to get video_in_clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdi->axi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable axi_clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdi->sditx_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable sditx_clk %d\n", ret);
+ goto err_disable_axi_clk;
+ }
+
+ ret = clk_prepare_enable(sdi->vidin_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable vidin_clk %d\n", ret);
+ goto err_disable_sditx_clk;
+ }
+
+ /* in case all "port" nodes are grouped under a "ports" node */
+ ports = of_get_child_by_name(sdi->dev->of_node, "ports");
+ if (!ports) {
+ dev_dbg(dev, "Searching for port nodes in device node.\n");
+ ports = sdi->dev->of_node;
+ }
+
+ for_each_child_of_node(ports, port) {
+ struct device_node *endpoint;
+ u32 index;
+
+ if (!port->name || of_node_cmp(port->name, "port")) {
+ dev_dbg(dev, "port name is null or node name is not port!\n");
+ continue;
+ }
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(dev, "No remote port at %s\n", port->name);
+ of_node_put(endpoint);
+ ret = -EINVAL;
+ goto err_disable_vidin_clk;
+ }
+
+ of_node_put(endpoint);
+
+ ret = of_property_read_u32(port, "reg", &index);
+ if (ret) {
+ dev_err(dev, "reg property not present - %d\n", ret);
+ goto err_disable_vidin_clk;
+ }
+
+ portmask |= (1 << index);
+
+ nports++;
+ }
+
+ if (nports == 2 && portmask & 0x3) {
+ dev_dbg(dev, "enable ancillary port\n");
+ sdi->enable_anc_data = true;
+ } else if (nports == 1 && portmask & 0x1) {
+ dev_dbg(dev, "no ancillary port\n");
+ sdi->enable_anc_data = false;
+ } else {
+ dev_err(dev, "Incorrect dt node!\n");
+ ret = -EINVAL;
+ goto err_disable_vidin_clk;
+ }
+
+ sdi->enable_st352_chroma = of_property_read_bool(sdi->dev->of_node,
+ "xlnx,tx-insert-c-str-st352");
+
+ /* disable interrupt */
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_disable_vidin_clk;
+ }
+
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xlnx_sdi_irq_handler, IRQF_ONESHOT,
+ dev_name(sdi->dev), sdi);
+ if (ret < 0)
+ goto err_disable_vidin_clk;
+
+ /* initialize the wait queue for GT reset event */
+ init_waitqueue_head(&sdi->wait_event);
+
+ /* Bridge support */
+ vpss_node = of_parse_phandle(sdi->dev->of_node, "xlnx,vpss", 0);
+ if (vpss_node) {
+ sdi->bridge = of_xlnx_bridge_get(vpss_node);
+ if (!sdi->bridge) {
+ dev_info(sdi->dev, "Didn't get bridge instance\n");
+ ret = -EPROBE_DEFER;
+ goto err_disable_vidin_clk;
+ }
+ }
+
+ /* video mode properties needed by audio driver are shared to audio
+ * driver through a pointer in platform data. This will be used in
+ * audio driver. The solution may be needed to modify/extend to avoid
+ * probable error scenarios
+ */
+ pdev->dev.platform_data = &sdi->video_mode;
+
+ ret = component_add(dev, &xlnx_sdi_component_ops);
+ if (ret < 0)
+ goto err_disable_vidin_clk;
+
+ return ret;
+
+err_disable_vidin_clk:
+ clk_disable_unprepare(sdi->vidin_clk);
+err_disable_sditx_clk:
+ clk_disable_unprepare(sdi->sditx_clk);
+err_disable_axi_clk:
+ clk_disable_unprepare(sdi->axi_clk);
+
+ return ret;
+}
+
+static int xlnx_sdi_remove(struct platform_device *pdev)
+{
+ struct xlnx_sdi *sdi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &xlnx_sdi_component_ops);
+ clk_disable_unprepare(sdi->vidin_clk);
+ clk_disable_unprepare(sdi->sditx_clk);
+ clk_disable_unprepare(sdi->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_sdi_of_match[] = {
+ { .compatible = "xlnx,sdi-tx"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_sdi_of_match);
+
+static struct platform_driver sdi_tx_driver = {
+ .probe = xlnx_sdi_probe,
+ .remove = xlnx_sdi_remove,
+ .driver = {
+ .name = "xlnx-sdi-tx",
+ .of_match_table = xlnx_sdi_of_match,
+ },
+};
+
+module_platform_driver(sdi_tx_driver);
+
+MODULE_AUTHOR("Saurabh Sengar <saurabhs@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SDI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h b/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h
new file mode 100644
index 000000000000..534f7d80f29c
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI modes timing values for various
+ * resolutions
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#ifndef _XLNX_SDI_MODES_H_
+#define _XLNX_SDI_MODES_H_
+
+/**
+ * struct xlnx_sdi_display_config - SDI supported modes structure
+ * @mode: drm display mode
+ * @st352_byt2: st352 byte 2 value
+ * index 0 : value for integral fps
+ * index 1 : value for fractional fps
+ * @st352_byt1: st352 byte 1 value
+ * index 0 : value for HD mode
+ * index 1 : value for SD mode
+ * index 2 : value for 3GA
+ * index 3 : value for 3GB
+ * index 4 : value for 6G
+ * index 5 : value for 12G
+ */
+struct xlnx_sdi_display_config {
+ struct drm_display_mode mode;
+ u8 st352_byt2[2];
+ u8 st352_byt1[6];
+};
+
+/*
+ * xlnx_sdi_modes - SDI DRM modes
+ */
+static const struct xlnx_sdi_display_config xlnx_sdi_modes[] = {
+ /* 0 - dummy, VICs start at 1 */
+ { },
+ /* SD: 720x486i@60Hz */
+ {{ DRM_MODE("720x486i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+ 801, 858, 0, 243, 247, 250, 262, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* SD: 720x576i@50Hz */
+ {{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* HD: 1280x720@25Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2990, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@24Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 3155, 4125, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@30Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2330, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@50Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@60Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1920x1080@24Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@25Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@30Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@48Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@50Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@60Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@24Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@25Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@30Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@48Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@50Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@60Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@24Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@25Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@30Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@30Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@25Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@24Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@48Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@50Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@60Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@60Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2136,
+ 2180, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@50Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@48Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@96Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2291,
+ 2379, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@100Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@120Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@96Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2377,
+ 2421, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@100Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2322,
+ 2366, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@120Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 6G: 3840x2160@30Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@25Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@24Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@24Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@25Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@30Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@48Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@50Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@60Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@48Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@50Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@60Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 593408, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+};
+
+#endif /* _XLNX_SDI_MODES_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c
new file mode 100644
index 000000000000..61ee98e87fdc
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx timing controller driver
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <linux/device.h>
+#include <video/videomode.h>
+#include "xlnx_sdi_timing.h"
+
+/* timing controller register offsets */
+#define XSTC_CTL 0x00
+#define XSTC_STATS 0x04
+#define XSTC_ERROR 0x08
+#define XSTC_GASIZE 0x60
+#define XSTC_GENC 0x68
+#define XSTC_GPOL 0x6c
+#define XSTC_GHSIZE 0x70
+#define XSTC_GVSIZE 0x74
+#define XSTC_GHSYNC 0x78
+#define XSTC_GVBH_F0 0x7c
+#define XSTC_GVSYNC_F0 0x80
+#define XSTC_GVSH_F0 0x84
+#define XSTC_GVBH_F1 0x88
+#define XSTC_GVSYNC_F1 0x8C
+#define XSTC_GVSH_F1 0x90
+#define XSTC_GASIZE_F1 0x94
+#define XSTC_OFFSET 0x10000
+
+/* timing controller register bit */
+#define XSTC_CTL_FIP BIT(6) /* field id polarity */
+#define XSTC_CTL_ACP BIT(5) /* active chroma polarity */
+#define XSTC_CTL_AVP BIT(4) /* active video polarity */
+#define XSTC_CTL_HSP BIT(3) /* hori sync polarity */
+#define XSTC_CTL_VSP BIT(2) /* vert sync polarity */
+#define XSTC_CTL_HBP BIT(1) /* hori blank polarity */
+#define XSTC_CTL_VBP BIT(0) /* vert blank polarity */
+#define XSTC_CTL_FIPSS BIT(26) /* field id polarity source */
+#define XSTC_CTL_ACPSS BIT(25) /* active chroma polarity src */
+#define XSTC_CTL_AVPSS BIT(24) /* active video polarity src */
+#define XSTC_CTL_HSPSS BIT(23) /* hori sync polarity src */
+#define XSTC_CTL_VSPSS BIT(22) /* vert sync polarity src */
+#define XSTC_CTL_HBPSS BIT(21) /* hori blank polarity src */
+#define XSTC_CTL_VBPSS BIT(20) /* vert blank polarity src */
+#define XSTC_CTL_VCSS BIT(18) /* chroma src */
+#define XSTC_CTL_VASS BIT(17) /* vertical offset src */
+#define XSTC_CTL_VBSS BIT(16) /* vertical sync end src */
+#define XSTC_CTL_VSSS BIT(15) /* vertical sync start src */
+#define XSTC_CTL_VFSS BIT(14) /* vertical active size src */
+#define XSTC_CTL_VTSS BIT(13) /* vertical frame size src */
+#define XSTC_CTL_HBSS BIT(11) /* horiz sync end src */
+#define XSTC_CTL_HSSS BIT(10) /* horiz sync start src */
+#define XSTC_CTL_HFSS BIT(9) /* horiz active size src */
+#define XSTC_CTL_HTSS BIT(8) /* horiz frame size src */
+#define XSTC_CTL_GE BIT(2) /* timing generator enable */
+#define XSTC_CTL_RU BIT(1) /* timing register update */
+
+/* timing generator horizontal 1 */
+#define XSTC_GH1_BPSTART_MASK GENMASK(28, 16)
+#define XSTC_GH1_BPSTART_SHIFT 16
+#define XSTC_GH1_SYNCSTART_MASK GENMASK(12, 0)
+/* timing generator vertical 1 (filed 0) */
+#define XSTC_GV1_BPSTART_MASK GENMASK(28, 16)
+#define XSTC_GV1_BPSTART_SHIFT 16
+#define XSTC_GV1_SYNCSTART_MASK GENMASK(12, 0)
+/* timing generator/detector vblank/vsync horizontal offset registers */
+#define XSTC_XVXHOX_HEND_MASK GENMASK(28, 16)
+#define XSTC_XVXHOX_HEND_SHIFT 16
+#define XSTC_XVXHOX_HSTART_MASK GENMASK(12, 0)
+
+#define XSTC_GHFRAME_HSIZE GENMASK(12, 0)
+#define XSTC_GVFRAME_HSIZE_F1 GENMASK(12, 0)
+#define XSTC_GA_ACTSIZE_MASK GENMASK(12, 0)
+/* reset register bit definition */
+#define XSTC_RST BIT(31)
+/* Interlaced bit in XSTC_GENC */
+#define XSTC_GENC_INTERL BIT(6)
+
+/**
+ * struct xlnx_stc_polarity - timing signal polarity
+ *
+ * @field_id: field ID polarity
+ * @vblank: vblank polarity
+ * @vsync: vsync polarity
+ * @hblank: hblank polarity
+ * @hsync: hsync polarity
+ */
+struct xlnx_stc_polarity {
+ u8 field_id;
+ u8 vblank;
+ u8 vsync;
+ u8 hblank;
+ u8 hsync;
+};
+
+/**
+ * struct xlnx_stc_hori_off - timing signal horizontal offset
+ *
+ * @v0blank_hori_start: vblank horizontal start (field 0)
+ * @v0blank_hori_end: vblank horizontal end (field 0)
+ * @v0sync_hori_start: vsync horizontal start (field 0)
+ * @v0sync_hori_end: vsync horizontal end (field 0)
+ * @v1blank_hori_start: vblank horizontal start (field 1)
+ * @v1blank_hori_end: vblank horizontal end (field 1)
+ * @v1sync_hori_start: vsync horizontal start (field 1)
+ * @v1sync_hori_end: vsync horizontal end (field 1)
+ */
+struct xlnx_stc_hori_off {
+ u16 v0blank_hori_start;
+ u16 v0blank_hori_end;
+ u16 v0sync_hori_start;
+ u16 v0sync_hori_end;
+ u16 v1blank_hori_start;
+ u16 v1blank_hori_end;
+ u16 v1sync_hori_start;
+ u16 v1sync_hori_end;
+};
+
+/**
+ * xlnx_stc_writel - Memory mapped SDI Tx timing controller write
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ * @val: value to be written
+ *
+ * This function writes the value to SDI TX timing controller registers
+ */
+static inline void xlnx_stc_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + XSTC_OFFSET + offset);
+}
+
+/**
+ * xlnx_stc_readl - Memory mapped timing controllerregister read
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ *
+ * Return: The contents of the SDI Tx timing controller register
+ *
+ * This function returns the contents of the corresponding SDI Tx register.
+ */
+static inline u32 xlnx_stc_readl(void __iomem *base, int offset)
+{
+ return readl(base + XSTC_OFFSET + offset);
+}
+
+/**
+ * xlnx_stc_enable - Enable timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function enables the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_enable(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_GE);
+}
+
+/**
+ * xlnx_stc_disable - Disable timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function disables the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_disable(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg & ~XSTC_CTL_GE);
+}
+
+/**
+ * xlnx_stc_reset - Reset timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function resets the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_reset(void __iomem *base)
+{
+ u32 reg;
+
+ xlnx_stc_writel(base, XSTC_CTL, XSTC_RST);
+
+ /* enable register update */
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_RU);
+}
+
+/**
+ * xlnx_stc_polarity - Configure timing signal polarity
+ * @base: Base address of SDI Tx subsystem
+ * @polarity: timing signal polarity data
+ *
+ * This function configure timing signal polarity
+ */
+static void xlnx_stc_polarity(void __iomem *base,
+ struct xlnx_stc_polarity *polarity)
+{
+ u32 reg = 0;
+
+ reg = XSTC_CTL_ACP;
+ reg |= XSTC_CTL_AVP;
+ if (polarity->field_id)
+ reg |= XSTC_CTL_FIP;
+ if (polarity->vblank)
+ reg |= XSTC_CTL_VBP;
+ if (polarity->vsync)
+ reg |= XSTC_CTL_VSP;
+ if (polarity->hblank)
+ reg |= XSTC_CTL_HBP;
+ if (polarity->hsync)
+ reg |= XSTC_CTL_HSP;
+
+ xlnx_stc_writel(base, XSTC_GPOL, reg);
+}
+
+/**
+ * xlnx_stc_hori_off - Configure horzontal timing offset
+ * @base: Base address of SDI Tx subsystem
+ * @hori_off: horizontal offset configuration data
+ * @flags: Display flags
+ *
+ * This function configure horizontal offset
+ */
+static void xlnx_stc_hori_off(void __iomem *base,
+ struct xlnx_stc_hori_off *hori_off,
+ enum display_flags flags)
+{
+ u32 reg;
+
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hori_off->v0blank_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v0blank_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVBH_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hori_off->v0sync_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v0sync_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVSH_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hori_off->v1blank_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v1blank_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVBH_F1, reg);
+ }
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hori_off->v1sync_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v1sync_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVSH_F1, reg);
+ }
+}
+
+/**
+ * xlnx_stc_src - Configure timing source
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function configure timing source
+ */
+static void xlnx_stc_src(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ reg |= XSTC_CTL_VCSS;
+ reg |= XSTC_CTL_VASS;
+ reg |= XSTC_CTL_VBSS;
+ reg |= XSTC_CTL_VSSS;
+ reg |= XSTC_CTL_VFSS;
+ reg |= XSTC_CTL_VTSS;
+ reg |= XSTC_CTL_HBSS;
+ reg |= XSTC_CTL_HSSS;
+ reg |= XSTC_CTL_HFSS;
+ reg |= XSTC_CTL_HTSS;
+ xlnx_stc_writel(base, XSTC_CTL, reg);
+}
+
+/**
+ * xlnx_stc_sig - Generates timing signal
+ * @base: Base address of SDI Tx subsystem
+ * @vm: video mode
+ *
+ * This function generated the timing for given vide mode
+ */
+void xlnx_stc_sig(void __iomem *base, struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xlnx_stc_hori_off hori_off;
+ struct xlnx_stc_polarity polarity;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg & ~XSTC_CTL_RU);
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ DRM_DEBUG_DRIVER("ha: %d, va: %d\n", hactive, vactive);
+ DRM_DEBUG_DRIVER("hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+ DRM_DEBUG_DRIVER("vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+ DRM_DEBUG_DRIVER("ht: %d, vt: %d\n", htotal, vtotal);
+
+ reg = htotal & XSTC_GHFRAME_HSIZE;
+ xlnx_stc_writel(base, XSTC_GHSIZE, reg);
+ reg = vtotal & XSTC_GVFRAME_HSIZE_F1;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vm->pixelclock == 148500000)
+ reg |= (reg + 2) <<
+ XSTC_GV1_BPSTART_SHIFT;
+ else
+ reg |= (reg + 1) <<
+ XSTC_GV1_BPSTART_SHIFT;
+ } else {
+ reg |= reg << XSTC_GV1_BPSTART_SHIFT;
+ }
+ xlnx_stc_writel(base, XSTC_GVSIZE, reg);
+ reg = hactive & XSTC_GA_ACTSIZE_MASK;
+ reg |= (vactive & XSTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_stc_writel(base, XSTC_GASIZE, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vactive == 243)
+ reg = ((vactive + 1) & XSTC_GA_ACTSIZE_MASK) << 16;
+ else
+ reg = (vactive & XSTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_stc_writel(base, XSTC_GASIZE_F1, reg);
+ }
+
+ reg = hsync_start & XSTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << XSTC_GH1_BPSTART_SHIFT) &
+ XSTC_GH1_BPSTART_MASK;
+ xlnx_stc_writel(base, XSTC_GHSYNC, reg);
+ reg = vsync_start & XSTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << XSTC_GV1_BPSTART_SHIFT) &
+ XSTC_GV1_BPSTART_MASK;
+
+ /*
+ * Fix the Vsync_vstart and vsync_vend of Field 0
+ * for all interlaced modes including 3GB.
+ */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) - 1) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) - 1);
+
+ xlnx_stc_writel(base, XSTC_GVSYNC_F0, reg);
+
+ /*
+ * Fix the Vsync_vstart and vsync_vend of Field 1
+ * for interlaced and 3GB modes.
+ */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vm->pixelclock == 148500000)
+ /* Revert and increase by 1 for 3GB mode */
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) + 2) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) + 2);
+ else
+ /* Only revert the reduction */
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) + 1) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) + 1);
+ }
+
+ hori_off.v0blank_hori_start = hactive;
+ hori_off.v0blank_hori_end = hactive;
+ hori_off.v0sync_hori_start = hsync_start;
+ hori_off.v0sync_hori_end = hsync_start;
+ hori_off.v1blank_hori_start = hactive;
+ hori_off.v1blank_hori_end = hactive;
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ hori_off.v1sync_hori_start = hsync_start - (htotal / 2);
+ hori_off.v1sync_hori_end = hsync_start - (htotal / 2);
+ xlnx_stc_writel(base, XSTC_GVSYNC_F1, reg);
+ reg = xlnx_stc_readl(base, XSTC_GENC)
+ | XSTC_GENC_INTERL;
+ xlnx_stc_writel(base, XSTC_GENC, reg);
+ } else {
+ hori_off.v1sync_hori_start = hsync_start;
+ hori_off.v1sync_hori_end = hsync_start;
+ reg = xlnx_stc_readl(base, XSTC_GENC)
+ & ~XSTC_GENC_INTERL;
+ xlnx_stc_writel(base, XSTC_GENC, reg);
+ }
+
+ xlnx_stc_hori_off(base, &hori_off, vm->flags);
+ /* set up polarity */
+ memset(&polarity, 0x0, sizeof(polarity));
+ polarity.hsync = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vsync = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.hblank = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vblank = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.field_id = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
+ xlnx_stc_polarity(base, &polarity);
+
+ xlnx_stc_src(base);
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_RU);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h
new file mode 100644
index 000000000000..4ca9f8972e0a
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx timing controller driver
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#ifndef _XLNX_SDI_TIMING_H_
+#define _XLNX_SDI_TIMING_H_
+
+struct videomode;
+
+void xlnx_stc_enable(void __iomem *base);
+void xlnx_stc_disable(void __iomem *base);
+void xlnx_stc_reset(void __iomem *base);
+void xlnx_stc_sig(void __iomem *base, struct videomode *vm);
+
+#endif /* _XLNX_SDI_TIMING_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_vtc.c b/drivers/gpu/drm/xlnx/xlnx_vtc.c
new file mode 100644
index 000000000000..427b35b84e16
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_vtc.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Video Timing Controller support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ * Saurabh Sengar <saurabhs@xilinx.com>
+ * Vishal Sagar <vishal.sagar@xilinx.com>
+ *
+ * This driver adds support to control the Xilinx Video Timing
+ * Controller connected to the CRTC.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+
+/* register offsets */
+#define XVTC_CTL 0x000
+#define XVTC_VER 0x010
+#define XVTC_GASIZE 0x060
+#define XVTC_GENC 0x068
+#define XVTC_GPOL 0x06c
+#define XVTC_GHSIZE 0x070
+#define XVTC_GVSIZE 0x074
+#define XVTC_GHSYNC 0x078
+#define XVTC_GVBHOFF_F0 0x07c
+#define XVTC_GVSYNC_F0 0x080
+#define XVTC_GVSHOFF_F0 0x084
+#define XVTC_GVBHOFF_F1 0x088
+#define XVTC_GVSYNC_F1 0x08C
+#define XVTC_GVSHOFF_F1 0x090
+#define XVTC_GASIZE_F1 0x094
+
+/* vtc control register bits */
+#define XVTC_CTL_SWRESET BIT(31)
+#define XVTC_CTL_FIPSS BIT(26)
+#define XVTC_CTL_ACPSS BIT(25)
+#define XVTC_CTL_AVPSS BIT(24)
+#define XVTC_CTL_HSPSS BIT(23)
+#define XVTC_CTL_VSPSS BIT(22)
+#define XVTC_CTL_HBPSS BIT(21)
+#define XVTC_CTL_VBPSS BIT(20)
+#define XVTC_CTL_VCSS BIT(18)
+#define XVTC_CTL_VASS BIT(17)
+#define XVTC_CTL_VBSS BIT(16)
+#define XVTC_CTL_VSSS BIT(15)
+#define XVTC_CTL_VFSS BIT(14)
+#define XVTC_CTL_VTSS BIT(13)
+#define XVTC_CTL_HBSS BIT(11)
+#define XVTC_CTL_HSSS BIT(10)
+#define XVTC_CTL_HFSS BIT(9)
+#define XVTC_CTL_HTSS BIT(8)
+#define XVTC_CTL_GE BIT(2)
+#define XVTC_CTL_RU BIT(1)
+
+/* vtc generator polarity register bits */
+#define XVTC_GPOL_FIP BIT(6)
+#define XVTC_GPOL_ACP BIT(5)
+#define XVTC_GPOL_AVP BIT(4)
+#define XVTC_GPOL_HSP BIT(3)
+#define XVTC_GPOL_VSP BIT(2)
+#define XVTC_GPOL_HBP BIT(1)
+#define XVTC_GPOL_VBP BIT(0)
+
+/* vtc generator horizontal 1 */
+#define XVTC_GH1_BPSTART_MASK GENMASK(28, 16)
+#define XVTC_GH1_BPSTART_SHIFT 16
+#define XVTC_GH1_SYNCSTART_MASK GENMASK(12, 0)
+/* vtc generator vertical 1 (field 0) */
+#define XVTC_GV1_BPSTART_MASK GENMASK(28, 16)
+#define XVTC_GV1_BPSTART_SHIFT 16
+#define XVTC_GV1_SYNCSTART_MASK GENMASK(12, 0)
+/* vtc generator/detector vblank/vsync horizontal offset registers */
+#define XVTC_XVXHOX_HEND_MASK GENMASK(28, 16)
+#define XVTC_XVXHOX_HEND_SHIFT 16
+#define XVTC_XVXHOX_HSTART_MASK GENMASK(12, 0)
+
+#define XVTC_GHFRAME_HSIZE GENMASK(12, 0)
+#define XVTC_GVFRAME_HSIZE_F1 GENMASK(12, 0)
+#define XVTC_GA_ACTSIZE_MASK GENMASK(12, 0)
+
+/* vtc generator encoding register bits */
+#define XVTC_GENC_INTERL BIT(6)
+
+/**
+ * struct xlnx_vtc - Xilinx VTC object
+ *
+ * @bridge: xilinx bridge structure
+ * @dev: device structure
+ * @base: base addr
+ * @ppc: pixels per clock
+ * @axi_clk: AXI Lite clock
+ * @vid_clk: Video clock
+ */
+struct xlnx_vtc {
+ struct xlnx_bridge bridge;
+ struct device *dev;
+ void __iomem *base;
+ u32 ppc;
+ struct clk *axi_clk;
+ struct clk *vid_clk;
+};
+
+static inline void xlnx_vtc_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_vtc_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static inline struct xlnx_vtc *bridge_to_vtc(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xlnx_vtc, bridge);
+}
+
+static void xlnx_vtc_reset(struct xlnx_vtc *vtc)
+{
+ u32 reg;
+
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, XVTC_CTL_SWRESET);
+
+ /* enable register update */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_RU);
+}
+
+/**
+ * xlnx_vtc_enable - Enable the VTC
+ * @bridge: xilinx bridge structure pointer
+ *
+ * Return:
+ * Zero on success.
+ *
+ * This function enables the VTC
+ */
+static int xlnx_vtc_enable(struct xlnx_bridge *bridge)
+{
+ u32 reg;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ /* enable generator */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_GE);
+ dev_dbg(vtc->dev, "enabled\n");
+ return 0;
+}
+
+/**
+ * xlnx_vtc_disable - Disable the VTC
+ * @bridge: xilinx bridge structure pointer
+ *
+ * This function disables and resets the VTC.
+ */
+static void xlnx_vtc_disable(struct xlnx_bridge *bridge)
+{
+ u32 reg;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ /* disable generator and reset */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg & ~XVTC_CTL_GE);
+ xlnx_vtc_reset(vtc);
+ dev_dbg(vtc->dev, "disabled\n");
+}
+
+/**
+ * xlnx_vtc_set_timing - Configures the VTC
+ * @bridge: xilinx bridge structure pointer
+ * @vm: video mode requested
+ *
+ * Return:
+ * Zero on success.
+ *
+ * This function calculates the timing values from the video mode
+ * structure passed from the CRTC and configures the VTC.
+ */
+static int xlnx_vtc_set_timing(struct xlnx_bridge *bridge,
+ struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg & ~XVTC_CTL_RU);
+
+ vm->hactive /= vtc->ppc;
+ vm->hfront_porch /= vtc->ppc;
+ vm->hback_porch /= vtc->ppc;
+ vm->hsync_len /= vtc->ppc;
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ dev_dbg(vtc->dev, "ha: %d, va: %d\n", hactive, vactive);
+ dev_dbg(vtc->dev, "ht: %d, vt: %d\n", htotal, vtotal);
+ dev_dbg(vtc->dev, "hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+ dev_dbg(vtc->dev, "vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+
+ reg = htotal & XVTC_GHFRAME_HSIZE;
+ xlnx_vtc_writel(vtc->base, XVTC_GHSIZE, reg);
+
+ reg = vtotal & XVTC_GVFRAME_HSIZE_F1;
+ reg |= reg << XVTC_GV1_BPSTART_SHIFT;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSIZE, reg);
+
+ reg = hactive & XVTC_GA_ACTSIZE_MASK;
+ reg |= (vactive & XVTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_vtc_writel(vtc->base, XVTC_GASIZE, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ xlnx_vtc_writel(vtc->base, XVTC_GASIZE_F1, reg);
+
+ reg = hsync_start & XVTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << XVTC_GH1_BPSTART_SHIFT) &
+ XVTC_GH1_BPSTART_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GHSYNC, reg);
+
+ reg = vsync_start & XVTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << XVTC_GV1_BPSTART_SHIFT) &
+ XVTC_GV1_BPSTART_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSYNC_F0, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ xlnx_vtc_writel(vtc->base, XVTC_GVSYNC_F1, reg);
+ reg = xlnx_vtc_readl(vtc->base, XVTC_GENC) | XVTC_GENC_INTERL;
+ xlnx_vtc_writel(vtc->base, XVTC_GENC, reg);
+ } else {
+ reg = xlnx_vtc_readl(vtc->base, XVTC_GENC) & ~XVTC_GENC_INTERL;
+ xlnx_vtc_writel(vtc->base, XVTC_GENC, reg);
+ }
+
+ /* configure horizontal offset */
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hactive & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hactive << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVBHOFF_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hsync_start & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hsync_start << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSHOFF_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hactive & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hactive << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVBHOFF_F1, reg);
+ }
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = (hsync_start - (htotal / 2)) & XVTC_XVXHOX_HSTART_MASK;
+ reg |= ((hsync_start - (htotal / 2)) <<
+ XVTC_XVXHOX_HEND_SHIFT) & XVTC_XVXHOX_HEND_MASK;
+ } else {
+ reg = hsync_start & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hsync_start << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ }
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ xlnx_vtc_writel(vtc->base, XVTC_GVSHOFF_F1, reg);
+
+ /* configure polarity of signals */
+ reg = 0;
+ reg |= XVTC_GPOL_ACP;
+ reg |= XVTC_GPOL_AVP;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ reg |= XVTC_GPOL_FIP;
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH) {
+ reg |= XVTC_GPOL_VBP;
+ reg |= XVTC_GPOL_VSP;
+ }
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH) {
+ reg |= XVTC_GPOL_HBP;
+ reg |= XVTC_GPOL_HSP;
+ }
+ xlnx_vtc_writel(vtc->base, XVTC_GPOL, reg);
+
+ /* configure timing source */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ reg |= XVTC_CTL_VCSS;
+ reg |= XVTC_CTL_VASS;
+ reg |= XVTC_CTL_VBSS;
+ reg |= XVTC_CTL_VSSS;
+ reg |= XVTC_CTL_VFSS;
+ reg |= XVTC_CTL_VTSS;
+ reg |= XVTC_CTL_HBSS;
+ reg |= XVTC_CTL_HSSS;
+ reg |= XVTC_CTL_HFSS;
+ reg |= XVTC_CTL_HTSS;
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg);
+
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_RU);
+ dev_dbg(vtc->dev, "set timing done\n");
+
+ return 0;
+}
+
+static int xlnx_vtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xlnx_vtc *vtc;
+ struct resource *res;
+ int ret;
+
+ vtc = devm_kzalloc(dev, sizeof(*vtc), GFP_KERNEL);
+ if (!vtc)
+ return -ENOMEM;
+
+ vtc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get resource for device\n");
+ return -EFAULT;
+ }
+
+ vtc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vtc->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(vtc->base);
+ }
+
+ platform_set_drvdata(pdev, vtc);
+
+ ret = of_property_read_u32(dev->of_node, "xlnx,pixels-per-clock",
+ &vtc->ppc);
+ if (ret || (vtc->ppc != 1 && vtc->ppc != 2 && vtc->ppc != 4)) {
+ dev_err(dev, "failed to get ppc\n");
+ return ret;
+ }
+ dev_info(dev, "vtc ppc = %d\n", vtc->ppc);
+
+ vtc->axi_clk = devm_clk_get(vtc->dev, "s_axi_aclk");
+ if (IS_ERR(vtc->axi_clk)) {
+ ret = PTR_ERR(vtc->axi_clk);
+ dev_err(dev, "failed to get axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ vtc->vid_clk = devm_clk_get(vtc->dev, "clk");
+ if (IS_ERR(vtc->vid_clk)) {
+ ret = PTR_ERR(vtc->vid_clk);
+ dev_err(dev, "failed to get video clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vtc->axi_clk);
+ if (ret) {
+ dev_err(vtc->dev, "unable to enable axilite clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vtc->vid_clk);
+ if (ret) {
+ dev_err(vtc->dev, "unable to enable video clk %d\n", ret);
+ goto err_axi_clk;
+ }
+
+ xlnx_vtc_reset(vtc);
+
+ vtc->bridge.enable = &xlnx_vtc_enable;
+ vtc->bridge.disable = &xlnx_vtc_disable;
+ vtc->bridge.set_timing = &xlnx_vtc_set_timing;
+ vtc->bridge.of_node = dev->of_node;
+ ret = xlnx_bridge_register(&vtc->bridge);
+ if (ret) {
+ dev_err(dev, "Bridge registration failed\n");
+ goto err_vid_clk;
+ }
+
+ dev_info(dev, "Xilinx VTC IP version : 0x%08x\n",
+ xlnx_vtc_readl(vtc->base, XVTC_VER));
+ dev_info(dev, "Xilinx VTC DRM Bridge driver probed\n");
+ return 0;
+
+err_vid_clk:
+ clk_disable_unprepare(vtc->vid_clk);
+err_axi_clk:
+ clk_disable_unprepare(vtc->axi_clk);
+ return ret;
+}
+
+static int xlnx_vtc_remove(struct platform_device *pdev)
+{
+ struct xlnx_vtc *vtc = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&vtc->bridge);
+ clk_disable_unprepare(vtc->vid_clk);
+ clk_disable_unprepare(vtc->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_vtc_of_match[] = {
+ { .compatible = "xlnx,bridge-v-tc-6.1" },
+ { /* end of table */ },
+};
+
+MODULE_DEVICE_TABLE(of, xlnx_vtc_of_match);
+
+static struct platform_driver xlnx_vtc_bridge_driver = {
+ .probe = xlnx_vtc_probe,
+ .remove = xlnx_vtc_remove,
+ .driver = {
+ .name = "xlnx,bridge-vtc",
+ .of_match_table = xlnx_vtc_of_match,
+ },
+};
+
+module_platform_driver(xlnx_vtc_bridge_driver);
+
+MODULE_AUTHOR("Vishal Sagar");
+MODULE_DESCRIPTION("Xilinx VTC Bridge Driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
new file mode 100644
index 000000000000..1786a70897b5
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -0,0 +1,3333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Display Controller Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_fb.h"
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * The display part of ZynqMP DP subsystem. Internally, the device
+ * is partitioned into 3 blocks: AV buffer manager, Blender, Audio.
+ * The driver creates the DRM crtc and plane objectes and maps the DRM
+ * interface into those 3 blocks. In high level, the driver is layered
+ * in the following way:
+ *
+ * zynqmp_disp_crtc & zynqmp_disp_plane
+ * |->zynqmp_disp
+ * |->zynqmp_disp_aud
+ * |->zynqmp_disp_blend
+ * |->zynqmp_disp_av_buf
+ *
+ * The driver APIs are used externally by
+ * - zynqmp_dpsub: Top level ZynqMP DP subsystem driver
+ * - zynqmp_dp: ZynqMP DP driver
+ * - xlnx_crtc: Xilinx DRM specific crtc functions
+ */
+
+/* The default value is ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565 */
+static uint zynqmp_disp_gfx_init_fmt;
+module_param_named(gfx_init_fmt, zynqmp_disp_gfx_init_fmt, uint, 0444);
+MODULE_PARM_DESC(gfx_init_fmt, "The initial format of the graphics layer\n"
+ "\t\t0 = rgb565 (default)\n"
+ "\t\t1 = rgb888\n"
+ "\t\t2 = argb8888\n");
+/* These value should be mapped to index of av_buf_gfx_fmts[] */
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565 10
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB888 5
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_ARGB8888 1
+static const u32 zynqmp_disp_gfx_init_fmts[] = {
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565,
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB888,
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_ARGB8888,
+};
+
+/* Blender registers */
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_0 0x0
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_1 0x4
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_2 0x8
+#define ZYNQMP_DISP_V_BLEND_BG_MAX 0xfff
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA 0xc
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MASK 0x1fe
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX 0xff
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT 0x14
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB 0x0
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444 0x1
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422 0x2
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY 0x3
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_XVYCC 0x4
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_EN_DOWNSAMPLE BIT(4)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL 0x18
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US BIT(0)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB BIT(1)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_BYPASS BIT(8)
+#define ZYNQMP_DISP_V_BLEND_NUM_COEFF 9
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF0 0x20
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF1 0x24
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF2 0x28
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF3 0x2c
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF4 0x30
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF5 0x34
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF6 0x38
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF7 0x3c
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF8 0x40
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF0 0x44
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF1 0x48
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF2 0x4c
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF3 0x50
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF4 0x54
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF5 0x58
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF6 0x5c
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF7 0x60
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF8 0x64
+#define ZYNQMP_DISP_V_BLEND_NUM_OFFSET 3
+#define ZYNQMP_DISP_V_BLEND_LUMA_IN1CSC_OFFSET 0x68
+#define ZYNQMP_DISP_V_BLEND_CR_IN1CSC_OFFSET 0x6c
+#define ZYNQMP_DISP_V_BLEND_CB_IN1CSC_OFFSET 0x70
+#define ZYNQMP_DISP_V_BLEND_LUMA_OUTCSC_OFFSET 0x74
+#define ZYNQMP_DISP_V_BLEND_CR_OUTCSC_OFFSET 0x78
+#define ZYNQMP_DISP_V_BLEND_CB_OUTCSC_OFFSET 0x7c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF0 0x80
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF1 0x84
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF2 0x88
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF3 0x8c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF4 0x90
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF5 0x94
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF6 0x98
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF7 0x9c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF8 0xa0
+#define ZYNQMP_DISP_V_BLEND_LUMA_IN2CSC_OFFSET 0xa4
+#define ZYNQMP_DISP_V_BLEND_CR_IN2CSC_OFFSET 0xa8
+#define ZYNQMP_DISP_V_BLEND_CB_IN2CSC_OFFSET 0xac
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_ENABLE 0x1d0
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP1 0x1d4
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP2 0x1d8
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP3 0x1dc
+
+/* AV buffer manager registers */
+#define ZYNQMP_DISP_AV_BUF_FMT 0x0
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_SHIFT 0
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK (0x1f << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_UYVY (0 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY (1 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YVYU (2 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV (3 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16 (4 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24 (5 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI (6 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MONO (7 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2 (8 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUV444 (9 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888 (10 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880 (11 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10 (12 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUV444_10 (13 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_10 (14 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_10 (15 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_10 (16 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24_10 (17 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YONLY_10 (18 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420 (19 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420 (20 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_420 (21 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420_10 (22 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420_10 (23 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_420_10 (24 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_SHIFT 8
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK (0xf << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888 (0 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888 (1 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888 (2 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888 (3 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551 (4 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444 (5 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565 (6 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_8BPP (7 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_4BPP (8 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_2BPP (9 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_1BPP (10 << 8)
+#define ZYNQMP_DISP_AV_BUF_NON_LIVE_LATENCY 0x8
+#define ZYNQMP_DISP_AV_BUF_CHBUF 0x10
+#define ZYNQMP_DISP_AV_BUF_CHBUF_EN BIT(0)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH BIT(1)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT 2
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MASK (0xf << 2)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX 0xf
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX 0x3
+#define ZYNQMP_DISP_AV_BUF_STATUS 0x28
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL 0x2c
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EN BIT(0)
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_SHIFT 1
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_VSYNC 0
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_VID 1
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_AUD 2
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_INT_VSYNC 3
+#define ZYNQMP_DISP_AV_BUF_STC_INIT_VALUE0 0x30
+#define ZYNQMP_DISP_AV_BUF_STC_INIT_VALUE1 0x34
+#define ZYNQMP_DISP_AV_BUF_STC_ADJ 0x38
+#define ZYNQMP_DISP_AV_BUF_STC_VID_VSYNC_TS0 0x3c
+#define ZYNQMP_DISP_AV_BUF_STC_VID_VSYNC_TS1 0x40
+#define ZYNQMP_DISP_AV_BUF_STC_EXT_VSYNC_TS0 0x44
+#define ZYNQMP_DISP_AV_BUF_STC_EXT_VSYNC_TS1 0x48
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT_TS0 0x4c
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT_TS1 0x50
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT2_TS0 0x54
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT2_TS1 0x58
+#define ZYNQMP_DISP_AV_BUF_STC_SNAPSHOT0 0x60
+#define ZYNQMP_DISP_AV_BUF_STC_SNAPSHOT1 0x64
+#define ZYNQMP_DISP_AV_BUF_OUTPUT 0x70
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_SHIFT 0
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK (0x3 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE (0 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM (1 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN (2 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE (3 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_SHIFT 2
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK (0x3 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE (0 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM (1 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE (2 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_NONE (3 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_SHIFT 4
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK (0x3 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_PL (0 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM (1 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_PATTERN (2 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE (3 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN BIT(6)
+#define ZYNQMP_DISP_AV_BUF_HCOUNT_VCOUNT_INT0 0x74
+#define ZYNQMP_DISP_AV_BUF_HCOUNT_VCOUNT_INT1 0x78
+#define ZYNQMP_DISP_AV_BUF_PATTERN_GEN_SELECT 0x100
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC 0x120
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS BIT(0)
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS BIT(1)
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING BIT(2)
+#define ZYNQMP_DISP_AV_BUF_SRST_REG 0x124
+#define ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST BIT(1)
+#define ZYNQMP_DISP_AV_BUF_AUDIO_CH_CONFIG 0x12c
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP0_SF 0x200
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP1_SF 0x204
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP2_SF 0x208
+#define ZYNQMP_DISP_AV_BUF_VID_COMP0_SF 0x20c
+#define ZYNQMP_DISP_AV_BUF_VID_COMP1_SF 0x210
+#define ZYNQMP_DISP_AV_BUF_VID_COMP2_SF 0x214
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP0_SF 0x218
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP1_SF 0x21c
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP2_SF 0x220
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG 0x224
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP0_SF 0x228
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP1_SF 0x22c
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP2_SF 0x230
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG 0x234
+#define ZYNQMP_DISP_AV_BUF_4BIT_SF 0x11111
+#define ZYNQMP_DISP_AV_BUF_5BIT_SF 0x10842
+#define ZYNQMP_DISP_AV_BUF_6BIT_SF 0x10410
+#define ZYNQMP_DISP_AV_BUF_8BIT_SF 0x10101
+#define ZYNQMP_DISP_AV_BUF_10BIT_SF 0x10040
+#define ZYNQMP_DISP_AV_BUF_NULL_SF 0
+#define ZYNQMP_DISP_AV_BUF_NUM_SF 3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 0x0
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 0x1
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 0x2
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_12 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_MASK GENMASK(2, 0)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB 0x0
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444 0x1
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422 0x2
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YONLY 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_MASK GENMASK(5, 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_CB_FIRST BIT(8)
+#define ZYNQMP_DISP_AV_BUF_PALETTE_MEMORY 0x400
+
+/* Audio registers */
+#define ZYNQMP_DISP_AUD_MIXER_VOLUME 0x0
+#define ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
+#define ZYNQMP_DISP_AUD_MIXER_META_DATA 0x4
+#define ZYNQMP_DISP_AUD_CH_STATUS0 0x8
+#define ZYNQMP_DISP_AUD_CH_STATUS1 0xc
+#define ZYNQMP_DISP_AUD_CH_STATUS2 0x10
+#define ZYNQMP_DISP_AUD_CH_STATUS3 0x14
+#define ZYNQMP_DISP_AUD_CH_STATUS4 0x18
+#define ZYNQMP_DISP_AUD_CH_STATUS5 0x1c
+#define ZYNQMP_DISP_AUD_CH_A_DATA0 0x20
+#define ZYNQMP_DISP_AUD_CH_A_DATA1 0x24
+#define ZYNQMP_DISP_AUD_CH_A_DATA2 0x28
+#define ZYNQMP_DISP_AUD_CH_A_DATA3 0x2c
+#define ZYNQMP_DISP_AUD_CH_A_DATA4 0x30
+#define ZYNQMP_DISP_AUD_CH_A_DATA5 0x34
+#define ZYNQMP_DISP_AUD_CH_B_DATA0 0x38
+#define ZYNQMP_DISP_AUD_CH_B_DATA1 0x3c
+#define ZYNQMP_DISP_AUD_CH_B_DATA2 0x40
+#define ZYNQMP_DISP_AUD_CH_B_DATA3 0x44
+#define ZYNQMP_DISP_AUD_CH_B_DATA4 0x48
+#define ZYNQMP_DISP_AUD_CH_B_DATA5 0x4c
+#define ZYNQMP_DISP_AUD_SOFT_RESET 0xc00
+#define ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST BIT(0)
+
+#define ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS 4
+#define ZYNQMP_DISP_AV_BUF_NUM_BUFFERS 6
+
+#define ZYNQMP_DISP_NUM_LAYERS 2
+#define ZYNQMP_DISP_MAX_NUM_SUB_PLANES 3
+/*
+ * 3840x2160 is advertised max resolution, but almost any resolutions under
+ * 300Mhz pixel rate would work. Thus put 4096 as maximum width and height.
+ */
+#define ZYNQMP_DISP_MAX_WIDTH 4096
+#define ZYNQMP_DISP_MAX_HEIGHT 4096
+/* 44 bit addressing. This is acutally DPDMA limitation */
+#define ZYNQMP_DISP_MAX_DMA_BIT 44
+
+/**
+ * enum zynqmp_disp_layer_type - Layer type (can be used for hw ID)
+ * @ZYNQMP_DISP_LAYER_VID: Video layer
+ * @ZYNQMP_DISP_LAYER_GFX: Graphics layer
+ */
+enum zynqmp_disp_layer_type {
+ ZYNQMP_DISP_LAYER_VID,
+ ZYNQMP_DISP_LAYER_GFX
+};
+
+/**
+ * enum zynqmp_disp_layer_mode - Layer mode
+ * @ZYNQMP_DISP_LAYER_NONLIVE: non-live (memory) mode
+ * @ZYNQMP_DISP_LAYER_LIVE: live (stream) mode
+ */
+enum zynqmp_disp_layer_mode {
+ ZYNQMP_DISP_LAYER_NONLIVE,
+ ZYNQMP_DISP_LAYER_LIVE
+};
+
+/**
+ * struct zynqmp_disp_layer_dma - struct for DMA engine
+ * @chan: DMA channel
+ * @is_active: flag if the DMA is active
+ * @xt: Interleaved desc config container
+ * @sgl: Data chunk for dma_interleaved_template
+ */
+struct zynqmp_disp_layer_dma {
+ struct dma_chan *chan;
+ bool is_active;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+/**
+ * struct zynqmp_disp_layer - Display subsystem layer
+ * @plane: DRM plane
+ * @bridge: Xlnx bridge
+ * @of_node: device node
+ * @dma: struct for DMA engine
+ * @num_chan: Number of DMA channel
+ * @id: Layer ID
+ * @offset: Layer offset in the register space
+ * @enabled: flag if enabled
+ * @fmt: Current format descriptor
+ * @drm_fmts: Array of supported DRM formats
+ * @num_fmts: Number of supported DRM formats
+ * @bus_fmts: Array of supported bus formats
+ * @num_bus_fmts: Number of supported bus formats
+ * @w: Width
+ * @h: Height
+ * @mode: the operation mode
+ * @other: other layer
+ * @disp: back pointer to struct zynqmp_disp
+ */
+struct zynqmp_disp_layer {
+ struct drm_plane plane;
+ struct xlnx_bridge bridge;
+ struct device_node *of_node;
+ struct zynqmp_disp_layer_dma dma[ZYNQMP_DISP_MAX_NUM_SUB_PLANES];
+ unsigned int num_chan;
+ enum zynqmp_disp_layer_type id;
+ u32 offset;
+ u8 enabled;
+ const struct zynqmp_disp_fmt *fmt;
+ u32 *drm_fmts;
+ unsigned int num_fmts;
+ u32 *bus_fmts;
+ unsigned int num_bus_fmts;
+ u32 w;
+ u32 h;
+ enum zynqmp_disp_layer_mode mode;
+ struct zynqmp_disp_layer *other;
+ struct zynqmp_disp *disp;
+};
+
+/**
+ * struct zynqmp_disp_blend - Blender
+ * @base: Base address offset
+ */
+struct zynqmp_disp_blend {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp_av_buf - AV buffer manager
+ * @base: Base address offset
+ */
+struct zynqmp_disp_av_buf {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp_aud - Audio
+ * @base: Base address offset
+ */
+struct zynqmp_disp_aud {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp - Display subsystem
+ * @xlnx_crtc: Xilinx DRM crtc
+ * @dev: device structure
+ * @dpsub: Display subsystem
+ * @drm: DRM core
+ * @enabled: flag if enabled
+ * @blend: Blender block
+ * @av_buf: AV buffer manager block
+ * @aud:Audio block
+ * @layers: layers
+ * @g_alpha_prop: global alpha property
+ * @alpha: current global alpha value
+ * @g_alpha_en_prop: the global alpha enable property
+ * @alpha_en: flag if the global alpha is enabled
+ * @color_prop: output color format property
+ * @color: current output color value
+ * @bg_c0_prop: 1st component of background color property
+ * @bg_c0: current value of 1st background color component
+ * @bg_c1_prop: 2nd component of background color property
+ * @bg_c1: current value of 2nd background color component
+ * @bg_c2_prop: 3rd component of background color property
+ * @bg_c2: current value of 3rd background color component
+ * @tpg_prop: Test Pattern Generation mode property
+ * @tpg_on: current TPG mode state
+ * @event: pending vblank event request
+ * @_ps_pclk: Pixel clock from PS
+ * @_pl_pclk: Pixel clock from PL
+ * @pclk: Pixel clock
+ * @pclk_en: Flag if the pixel clock is enabled
+ * @_ps_audclk: Audio clock from PS
+ * @_pl_audclk: Audio clock from PL
+ * @audclk: Audio clock
+ * @audclk_en: Flag if the audio clock is enabled
+ * @aclk: APB clock
+ * @aclk_en: Flag if the APB clock is enabled
+ */
+struct zynqmp_disp {
+ struct xlnx_crtc xlnx_crtc;
+ struct device *dev;
+ struct zynqmp_dpsub *dpsub;
+ struct drm_device *drm;
+ bool enabled;
+ struct zynqmp_disp_blend blend;
+ struct zynqmp_disp_av_buf av_buf;
+ struct zynqmp_disp_aud aud;
+ struct zynqmp_disp_layer layers[ZYNQMP_DISP_NUM_LAYERS];
+ struct drm_property *g_alpha_prop;
+ u32 alpha;
+ struct drm_property *g_alpha_en_prop;
+ bool alpha_en;
+ struct drm_property *color_prop;
+ unsigned int color;
+ struct drm_property *bg_c0_prop;
+ u32 bg_c0;
+ struct drm_property *bg_c1_prop;
+ u32 bg_c1;
+ struct drm_property *bg_c2_prop;
+ u32 bg_c2;
+ struct drm_property *tpg_prop;
+ bool tpg_on;
+ struct drm_pending_vblank_event *event;
+ /* Don't operate directly on _ps_ */
+ struct clk *_ps_pclk;
+ struct clk *_pl_pclk;
+ struct clk *pclk;
+ bool pclk_en;
+ struct clk *_ps_audclk;
+ struct clk *_pl_audclk;
+ struct clk *audclk;
+ bool audclk_en;
+ struct clk *aclk;
+ bool aclk_en;
+};
+
+/**
+ * struct zynqmp_disp_fmt - Display subsystem format mapping
+ * @drm_fmt: drm format
+ * @disp_fmt: Display subsystem format
+ * @bus_fmt: Bus formats (live formats)
+ * @rgb: flag for RGB formats
+ * @swap: flag to swap r & b for rgb formats, and u & v for yuv formats
+ * @chroma_sub: flag for chroma subsampled formats
+ * @sf: scaling factors for upto 3 color components
+ */
+struct zynqmp_disp_fmt {
+ u32 drm_fmt;
+ u32 disp_fmt;
+ u32 bus_fmt;
+ bool rgb;
+ bool swap;
+ bool chroma_sub;
+ u32 sf[3];
+};
+
+static void zynqmp_disp_write(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 zynqmp_disp_read(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static void zynqmp_disp_clr(void __iomem *base, int offset, u32 clr)
+{
+ zynqmp_disp_write(base, offset, zynqmp_disp_read(base, offset) & ~clr);
+}
+
+static void zynqmp_disp_set(void __iomem *base, int offset, u32 set)
+{
+ zynqmp_disp_write(base, offset, zynqmp_disp_read(base, offset) | set);
+}
+
+/*
+ * Clock functions
+ */
+
+/**
+ * zynqmp_disp_clk_enable - Enable the clock if needed
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * Enable the clock only if it's not enabled @flag.
+ *
+ * Return: value from clk_prepare_enable().
+ */
+static int zynqmp_disp_clk_enable(struct clk *clk, bool *flag)
+{
+ int ret = 0;
+
+ if (!*flag) {
+ ret = clk_prepare_enable(clk);
+ if (!ret)
+ *flag = true;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_disp_clk_enable - Enable the clock if needed
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * Disable the clock only if it's enabled @flag.
+ */
+static void zynqmp_disp_clk_disable(struct clk *clk, bool *flag)
+{
+ if (*flag) {
+ clk_disable_unprepare(clk);
+ *flag = false;
+ }
+}
+
+/**
+ * zynqmp_disp_clk_enable - Enable and disable the clock
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * This is to ensure the clock is disabled. The initial hardware state is
+ * unknown, and this makes sure that the clock is disabled.
+ *
+ * Return: value from clk_prepare_enable().
+ */
+static int zynqmp_disp_clk_enable_disable(struct clk *clk, bool *flag)
+{
+ int ret = 0;
+
+ if (!*flag) {
+ ret = clk_prepare_enable(clk);
+ clk_disable_unprepare(clk);
+ }
+
+ return ret;
+}
+
+/*
+ * Blender functions
+ */
+
+/**
+ * zynqmp_disp_blend_set_output_fmt - Set the output format of the blend
+ * @blend: blend object
+ * @fmt: output format
+ *
+ * Set the output format to @fmt.
+ */
+static void
+zynqmp_disp_blend_set_output_fmt(struct zynqmp_disp_blend *blend, u32 fmt)
+{
+ u16 reset_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u32 reset_offsets[] = { 0x0, 0x0, 0x0 };
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+ u16 *coeffs;
+ u32 *offsets;
+ u32 offset, i;
+
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT, fmt);
+ if (fmt == ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ coeffs = reset_coeffs;
+ offsets = reset_offsets;
+ } else {
+ /* Hardcode Full-range SDTV values. Can be runtime config */
+ coeffs = sdtv_coeffs;
+ offsets = full_range_offsets;
+ }
+
+ offset = ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, coeffs[i]);
+
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * zynqmp_disp_blend_layer_coeff - Set the coefficients for @layer
+ * @blend: blend object
+ * @layer: layer to set the coefficients for
+ * @on: if layer is on / off
+ *
+ * Depending on the format (rgb / yuv and swap), and the status (on / off),
+ * this function sets the coefficients for the given layer @layer accordingly.
+ */
+static void zynqmp_disp_blend_layer_coeff(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer,
+ bool on)
+{
+ u32 offset, i, s0, s1;
+ u16 sdtv_coeffs[] = { 0x1000, 0x166f, 0x0,
+ 0x1000, 0x7483, 0x7a7f,
+ 0x1000, 0x0, 0x1c5a };
+ u16 sdtv_coeffs_yonly[] = { 0x0, 0x0, 0x1000,
+ 0x0, 0x0, 0x1000,
+ 0x0, 0x0, 0x1000 };
+ u16 swap_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u16 null_coeffs[] = { 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0 };
+ u16 *coeffs;
+ u32 sdtv_offsets[] = { 0x0, 0x1800, 0x1800 };
+ u32 sdtv_offsets_yonly[] = { 0x1800, 0x1800, 0x0 };
+ u32 null_offsets[] = { 0x0, 0x0, 0x0 };
+ u32 *offsets;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID)
+ offset = ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF0;
+ else
+ offset = ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF0;
+
+ if (!on) {
+ coeffs = null_coeffs;
+ offsets = null_offsets;
+ } else {
+ if (!layer->fmt->rgb) {
+ /*
+ * In case of Y_ONLY formats, pixels are unpacked
+ * differently compared to YCbCr
+ */
+ if (layer->fmt->drm_fmt == DRM_FORMAT_Y8 ||
+ layer->fmt->drm_fmt == DRM_FORMAT_Y10) {
+ coeffs = sdtv_coeffs_yonly;
+ offsets = sdtv_offsets_yonly;
+ } else {
+ coeffs = sdtv_coeffs;
+ offsets = sdtv_offsets;
+ }
+
+ s0 = 1;
+ s1 = 2;
+ } else {
+ coeffs = swap_coeffs;
+ s0 = 0;
+ s1 = 2;
+
+ /* No offset for RGB formats */
+ offsets = null_offsets;
+ }
+
+ if (layer->fmt->swap) {
+ for (i = 0; i < 3; i++) {
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ coeffs[i * 3 + s1] ^= coeffs[i * 3 + s0];
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ }
+ }
+ }
+
+ /* Program coefficients. Can be runtime configurable */
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, coeffs[i]);
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID)
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_IN1CSC_OFFSET;
+ else
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_IN2CSC_OFFSET;
+
+ /* Program offsets. Can be runtime configurable */
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * zynqmp_disp_blend_layer_enable - Enable a layer
+ * @blend: blend object
+ * @layer: layer to enable
+ *
+ * Enable a layer @layer.
+ */
+static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer)
+{
+ u32 reg;
+
+ reg = layer->fmt->rgb ? ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB : 0;
+ reg |= layer->fmt->chroma_sub ?
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US : 0;
+
+ zynqmp_disp_write(blend->base,
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL + layer->offset,
+ reg);
+
+ zynqmp_disp_blend_layer_coeff(blend, layer, true);
+}
+
+/**
+ * zynqmp_disp_blend_layer_disable - Disable a layer
+ * @blend: blend object
+ * @layer: layer to disable
+ *
+ * Disable a layer @layer.
+ */
+static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer)
+{
+ zynqmp_disp_write(blend->base,
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL + layer->offset, 0);
+
+ zynqmp_disp_blend_layer_coeff(blend, layer, false);
+}
+
+/**
+ * zynqmp_disp_blend_set_bg_color - Set the background color
+ * @blend: blend object
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color.
+ */
+static void zynqmp_disp_blend_set_bg_color(struct zynqmp_disp_blend *blend,
+ u32 c0, u32 c1, u32 c2)
+{
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_0, c0);
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_1, c1);
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_2, c2);
+}
+
+/**
+ * zynqmp_disp_blend_set_alpha - Set the alpha for blending
+ * @blend: blend object
+ * @alpha: alpha value to be used
+ *
+ * Set the alpha for blending.
+ */
+static void
+zynqmp_disp_blend_set_alpha(struct zynqmp_disp_blend *blend, u32 alpha)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA);
+ reg &= ~ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MASK;
+ reg |= alpha << 1;
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA,
+ reg);
+}
+
+/**
+ * zynqmp_disp_blend_enable_alpha - Enable/disable the global alpha
+ * @blend: blend object
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Enable/disable the global alpha blending based on @enable.
+ */
+static void
+zynqmp_disp_blend_enable_alpha(struct zynqmp_disp_blend *blend, bool enable)
+{
+ if (enable)
+ zynqmp_disp_set(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+ else
+ zynqmp_disp_clr(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+}
+
+/* List of blend output formats */
+/* The id / order should be aligned with zynqmp_disp_color_enum */
+static const struct zynqmp_disp_fmt blend_output_fmts[] = {
+ {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY,
+ }
+};
+
+/*
+ * AV buffer manager functions
+ */
+
+/* List of video layer formats */
+#define ZYNQMP_DISP_AV_BUF_VID_FMT_YUYV 2
+static const struct zynqmp_disp_fmt av_buf_vid_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_VYUY,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVYU,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV16,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV61,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_Y8,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MONO,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_Y10,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YONLY_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB2101010,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV420,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU420,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV12,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV21,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XV15,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XV20,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }
+};
+
+/* List of graphics layer formats */
+static const struct zynqmp_disp_fmt av_buf_gfx_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR565,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }
+};
+
+/* List of live formats */
+/* Format can be combination of color, bpc, and cb-cr order.
+ * - Color: RGB / YUV444 / YUV422 / Y only
+ * - BPC: 6, 8, 10, 12
+ * - Swap: Cb and Cr swap
+ * which can be 32 bus formats. Only list the subset of those for now.
+ */
+static const struct zynqmp_disp_fmt av_buf_live_fmts[] = {
+ {
+ .bus_fmt = MEDIA_BUS_FMT_RGB666_1X18,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_RBG888_1X24,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_UYVY8_1X16,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_VUY8_1X24,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_UYVY10_1X20,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }
+};
+
+/**
+ * zynqmp_disp_av_buf_set_fmt - Set the input formats
+ * @av_buf: av buffer manager
+ * @fmt: formats
+ *
+ * Set the av buffer manager format to @fmt. @fmt should have valid values
+ * for both video and graphics layer.
+ */
+static void
+zynqmp_disp_av_buf_set_fmt(struct zynqmp_disp_av_buf *av_buf, u32 fmt)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_FMT, fmt);
+}
+
+/**
+ * zynqmp_disp_av_buf_get_fmt - Get the input formats
+ * @av_buf: av buffer manager
+ *
+ * Get the input formats (which include video and graphics) of
+ * av buffer manager.
+ *
+ * Return: value of ZYNQMP_DISP_AV_BUF_FMT register.
+ */
+static u32
+zynqmp_disp_av_buf_get_fmt(struct zynqmp_disp_av_buf *av_buf)
+{
+ return zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_FMT);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_live_fmt - Set the live_input format
+ * @av_buf: av buffer manager
+ * @fmt: format
+ * @is_vid: if it's for video layer
+ *
+ * Set the live input format to @fmt. @fmt should have valid values.
+ * @vid will determine if it's for video layer or graphics layer
+ * @fmt should be a valid hardware value.
+ */
+static void zynqmp_disp_av_buf_set_live_fmt(struct zynqmp_disp_av_buf *av_buf,
+ u32 fmt, bool is_vid)
+{
+ u32 offset;
+
+ if (is_vid)
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG;
+ else
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG;
+
+ zynqmp_disp_write(av_buf->base, offset, fmt);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_vid_clock_src - Set the video clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the video clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void
+zynqmp_disp_av_buf_set_vid_clock_src(struct zynqmp_disp_av_buf *av_buf,
+ bool from_ps)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (from_ps)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_vid_clock_src_is_ps - if ps clock is used
+ * @av_buf: av buffer manager
+ *
+ * Return: if ps clock is used
+ */
+static bool
+zynqmp_disp_av_buf_vid_clock_src_is_ps(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ return !!(reg & ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_vid_timing_src - Set the video timing source
+ * @av_buf: av buffer manager
+ * @internal: flag if the video timing is generated internally
+ *
+ * Set the video timing source based on @internal. It can come externally or
+ * be generated internally.
+ */
+static void
+zynqmp_disp_av_buf_set_vid_timing_src(struct zynqmp_disp_av_buf *av_buf,
+ bool internal)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (internal)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_vid_timing_src_is_int - if internal timing is used
+ * @av_buf: av buffer manager
+ *
+ * Return: if the internal timing is used
+ */
+static bool
+zynqmp_disp_av_buf_vid_timing_src_is_int(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ return !!(reg & ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_aud_clock_src - Set the audio clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the audio clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void
+zynqmp_disp_av_buf_set_aud_clock_src(struct zynqmp_disp_av_buf *av_buf,
+ bool from_ps)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (from_ps)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_buf - Enable buffers
+ * @av_buf: av buffer manager
+ *
+ * Enable all (video and audio) buffers.
+ */
+static void
+zynqmp_disp_av_buf_enable_buf(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ reg |= ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX <<
+ ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ reg |= ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX <<
+ ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_buf - Disable buffers
+ * @av_buf: av buffer manager
+ *
+ * Disable all (video and audio) buffers.
+ */
+static void
+zynqmp_disp_av_buf_disable_buf(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH & ~ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_aud - Enable audio
+ * @av_buf: av buffer manager
+ *
+ * Enable all audio buffers.
+ */
+static void
+zynqmp_disp_av_buf_enable_aud(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable - Enable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * De-assert the video pipe reset
+ */
+static void
+zynqmp_disp_av_buf_enable(struct zynqmp_disp_av_buf *av_buf)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_SRST_REG, 0);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable - Disable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * Assert the video pipe reset
+ */
+static void
+zynqmp_disp_av_buf_disable(struct zynqmp_disp_av_buf *av_buf)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_SRST_REG,
+ ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_aud - Disable audio
+ * @av_buf: av buffer manager
+ *
+ * Disable all audio buffers.
+ */
+static void
+zynqmp_disp_av_buf_disable_aud(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE;
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_tpg - Set TPG mode
+ * @av_buf: av buffer manager
+ * @tpg_on: if TPG should be on
+ *
+ * Set the TPG mode based on @tpg_on.
+ */
+static void zynqmp_disp_av_buf_set_tpg(struct zynqmp_disp_av_buf *av_buf,
+ bool tpg_on)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ if (tpg_on)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_vid - Enable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to enable
+ * @mode: operation mode of layer
+ *
+ * Enable the video/graphics buffer for @layer.
+ */
+static void zynqmp_disp_av_buf_enable_vid(struct zynqmp_disp_av_buf *av_buf,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM;
+ else
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE;
+ } else {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
+ if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
+ else
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE;
+ }
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_vid - Disable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to disable
+ *
+ * Disable the video/graphics buffer for @layer.
+ */
+static void
+zynqmp_disp_av_buf_disable_vid(struct zynqmp_disp_av_buf *av_buf,
+ struct zynqmp_disp_layer *layer)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE;
+ } else {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE;
+ }
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_init_sf - Initialize scaling factors
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize scaling factors for both video and graphics layers.
+ * If the format descriptor is NULL, the function skips the programming.
+ */
+static void zynqmp_disp_av_buf_init_sf(struct zynqmp_disp_av_buf *av_buf,
+ const struct zynqmp_disp_fmt *vid_fmt,
+ const struct zynqmp_disp_fmt *gfx_fmt)
+{
+ unsigned int i;
+ u32 offset;
+
+ if (gfx_fmt) {
+ offset = ZYNQMP_DISP_AV_BUF_GFX_COMP0_SF;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ gfx_fmt->sf[i]);
+ }
+
+ if (vid_fmt) {
+ offset = ZYNQMP_DISP_AV_BUF_VID_COMP0_SF;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ vid_fmt->sf[i]);
+ }
+}
+
+/**
+ * zynqmp_disp_av_buf_init_live_sf - Initialize scaling factors for live source
+ * @av_buf: av buffer manager
+ * @fmt: format descriptor
+ * @is_vid: flag if this is for video layer
+ *
+ * Initialize scaling factors for live source.
+ */
+static void zynqmp_disp_av_buf_init_live_sf(struct zynqmp_disp_av_buf *av_buf,
+ const struct zynqmp_disp_fmt *fmt,
+ bool is_vid)
+{
+ unsigned int i;
+ u32 offset;
+
+ if (is_vid)
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP0_SF;
+ else
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP0_SF;
+
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ fmt->sf[i]);
+}
+
+/*
+ * Audio functions
+ */
+
+/**
+ * zynqmp_disp_aud_init - Initialize the audio
+ * @aud: audio
+ *
+ * Initialize the audio with default mixer volume. The de-assertion will
+ * initialize the audio states.
+ */
+static void zynqmp_disp_aud_init(struct zynqmp_disp_aud *aud)
+{
+ /* Clear the audio soft reset register as it's an non-reset flop */
+ zynqmp_disp_write(aud->base, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
+ zynqmp_disp_write(aud->base, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
+}
+
+/**
+ * zynqmp_disp_aud_deinit - De-initialize the audio
+ * @aud: audio
+ *
+ * Put the audio in reset.
+ */
+static void zynqmp_disp_aud_deinit(struct zynqmp_disp_aud *aud)
+{
+ zynqmp_disp_set(aud->base, ZYNQMP_DISP_AUD_SOFT_RESET,
+ ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
+}
+
+/*
+ * ZynqMP Display layer functions
+ */
+
+/**
+ * zynqmp_disp_layer_check_size - Verify width and height for the layer
+ * @disp: Display subsystem
+ * @layer: layer
+ * @width: width
+ * @height: height
+ *
+ * The Display subsystem has the limitation that both layers should have
+ * identical size. This function stores width and height of @layer, and verifies
+ * if the size (width and height) is valid.
+ *
+ * Return: 0 on success, or -EINVAL if width or/and height is invalid.
+ */
+static int zynqmp_disp_layer_check_size(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ u32 width, u32 height)
+{
+ struct zynqmp_disp_layer *other = layer->other;
+
+ if (other->enabled && (other->w != width || other->h != height)) {
+ dev_err(disp->dev, "Layer width:height must be %d:%d\n",
+ other->w, other->h);
+ return -EINVAL;
+ }
+
+ layer->w = width;
+ layer->h = height;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_map_fmt - Find the Display subsystem format for given drm format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @drm_fmt: DRM format to search
+ *
+ * Search a Display subsystem format corresponding to the given DRM format
+ * @drm_fmt, and return the format descriptor which contains the Display
+ * subsystem format value.
+ *
+ * Return: a Display subsystem format descriptor on success, or NULL.
+ */
+static const struct zynqmp_disp_fmt *
+zynqmp_disp_map_fmt(const struct zynqmp_disp_fmt fmts[],
+ unsigned int size, uint32_t drm_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].drm_fmt == drm_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * zynqmp_disp_set_fmt - Set the format of the layer
+ * @disp: Display subsystem
+ * @layer: layer to set the format
+ * @drm_fmt: DRM format to set
+ *
+ * Set the format of the given layer to @drm_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @drm_fmt is not supported by the layer.
+ */
+static int zynqmp_disp_layer_set_fmt(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ uint32_t drm_fmt)
+{
+ const struct zynqmp_disp_fmt *fmt;
+ const struct zynqmp_disp_fmt *vid_fmt = NULL, *gfx_fmt = NULL;
+ u32 size, fmts, mask;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ mask = ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK;
+ fmt = zynqmp_disp_map_fmt(av_buf_vid_fmts, size, drm_fmt);
+ vid_fmt = fmt;
+ } else {
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ mask = ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
+ fmt = zynqmp_disp_map_fmt(av_buf_gfx_fmts, size, drm_fmt);
+ gfx_fmt = fmt;
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmts = zynqmp_disp_av_buf_get_fmt(&disp->av_buf);
+ fmts &= mask;
+ fmts |= fmt->disp_fmt;
+ zynqmp_disp_av_buf_set_fmt(&disp->av_buf, fmts);
+ zynqmp_disp_av_buf_init_sf(&disp->av_buf, vid_fmt, gfx_fmt);
+ layer->fmt = fmt;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_map_live_fmt - Find the hardware format for given bus format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @bus_fmt: bus format to search
+ *
+ * Search a Display subsystem format corresponding to the given bus format
+ * @bus_fmt, and return the format descriptor which contains the Display
+ * subsystem format value.
+ *
+ * Return: a Display subsystem format descriptor on success, or NULL.
+ */
+static const struct zynqmp_disp_fmt *
+zynqmp_disp_map_live_fmt(const struct zynqmp_disp_fmt fmts[],
+ unsigned int size, uint32_t bus_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].bus_fmt == bus_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * zynqmp_disp_set_live_fmt - Set the live format of the layer
+ * @disp: Display subsystem
+ * @layer: layer to set the format
+ * @bus_fmt: bus format to set
+ *
+ * Set the live format of the given layer to @live_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @bus_fmt is not supported by the layer.
+ */
+static int zynqmp_disp_layer_set_live_fmt(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ uint32_t bus_fmt)
+{
+ const struct zynqmp_disp_fmt *fmt;
+ u32 size;
+ bool is_vid = layer->id == ZYNQMP_DISP_LAYER_VID;
+
+ size = ARRAY_SIZE(av_buf_live_fmts);
+ fmt = zynqmp_disp_map_live_fmt(av_buf_live_fmts, size, bus_fmt);
+ if (!fmt)
+ return -EINVAL;
+
+ zynqmp_disp_av_buf_set_live_fmt(&disp->av_buf, fmt->disp_fmt, is_vid);
+ zynqmp_disp_av_buf_init_live_sf(&disp->av_buf, fmt, is_vid);
+ layer->fmt = fmt;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_set_tpg - Enable or disable TPG
+ * @disp: Display subsystem
+ * @layer: Video layer
+ * @tpg_on: flag if TPG needs to be enabled or disabled
+ *
+ * Enable / disable the TPG mode on the video layer @layer depending on
+ * @tpg_on. The video layer should be disabled prior to enable request.
+ *
+ * Return: 0 on success. -ENODEV if it's not video layer. -EIO if
+ * the video layer is enabled.
+ */
+static int zynqmp_disp_layer_set_tpg(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ bool tpg_on)
+{
+ if (layer->id != ZYNQMP_DISP_LAYER_VID) {
+ dev_err(disp->dev,
+ "only the video layer has the tpg mode\n");
+ return -ENODEV;
+ }
+
+ if (layer->enabled) {
+ dev_err(disp->dev,
+ "the video layer should be disabled for tpg mode\n");
+ return -EIO;
+ }
+
+ zynqmp_disp_blend_layer_coeff(&disp->blend, layer, tpg_on);
+ zynqmp_disp_av_buf_set_tpg(&disp->av_buf, tpg_on);
+ disp->tpg_on = tpg_on;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_get_tpg - Get the TPG mode status
+ * @disp: Display subsystem
+ * @layer: Video layer
+ *
+ * Return if the TPG is enabled or not.
+ *
+ * Return: true if TPG is on, otherwise false
+ */
+static bool zynqmp_disp_layer_get_tpg(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer)
+{
+ return disp->tpg_on;
+}
+
+/**
+ * zynqmp_disp_get_fmt - Get the supported DRM formats of the layer
+ * @disp: Display subsystem
+ * @layer: layer to get the formats
+ * @drm_fmts: pointer to array of DRM format strings
+ * @num_fmts: pointer to number of returned DRM formats
+ *
+ * Get the supported DRM formats of the given layer.
+ */
+static void zynqmp_disp_layer_get_fmts(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ u32 **drm_fmts, unsigned int *num_fmts)
+{
+ *drm_fmts = layer->drm_fmts;
+ *num_fmts = layer->num_fmts;
+}
+
+/**
+ * zynqmp_disp_layer_enable - Enable the layer
+ * @disp: Display subsystem
+ * @layer: layer to esable
+ * @mode: operation mode
+ *
+ * Enable the layer @layer.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+static int zynqmp_disp_layer_enable(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ struct device *dev = disp->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ if (layer->enabled && layer->mode != mode) {
+ dev_err(dev, "layer is already enabled in different mode\n");
+ return -EBUSY;
+ }
+
+ zynqmp_disp_av_buf_enable_vid(&disp->av_buf, layer, mode);
+ zynqmp_disp_blend_layer_enable(&disp->blend, layer);
+
+ layer->enabled = true;
+ layer->mode = mode;
+
+ if (mode == ZYNQMP_DISP_LAYER_LIVE)
+ return 0;
+
+ for (i = 0; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++) {
+ struct zynqmp_disp_layer_dma *dma = &layer->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt, flags);
+ if (!desc) {
+ dev_err(dev, "failed to prep DMA descriptor\n");
+ return -ENOMEM;
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_disable - Disable the layer
+ * @disp: Display subsystem
+ * @layer: layer to disable
+ * @mode: operation mode
+ *
+ * Disable the layer @layer.
+ *
+ * Return: 0 on success, or -EBUSY if the layer is in different mode.
+ */
+static int zynqmp_disp_layer_disable(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ struct device *dev = disp->dev;
+ unsigned int i;
+
+ if (layer->mode != mode) {
+ dev_err(dev, "the layer is operating in different mode\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++)
+ if (layer->dma[i].chan && layer->dma[i].is_active)
+ dmaengine_terminate_sync(layer->dma[i].chan);
+
+ zynqmp_disp_av_buf_disable_vid(&disp->av_buf, layer);
+ zynqmp_disp_blend_layer_disable(&disp->blend, layer);
+ layer->enabled = false;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_request_dma - Request DMA channels for a layer
+ * @disp: Display subsystem
+ * @layer: layer to request DMA channels
+ * @name: identifier string for layer type
+ *
+ * Request DMA engine channels for corresponding layer.
+ *
+ * Return: 0 on success, or err value from of_dma_request_slave_channel().
+ */
+static int
+zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer, const char *name)
+{
+ struct zynqmp_disp_layer_dma *dma;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < layer->num_chan; i++) {
+ char temp[16];
+
+ dma = &layer->dma[i];
+ snprintf(temp, sizeof(temp), "%s%d", name, i);
+ dma->chan = of_dma_request_slave_channel(layer->of_node,
+ temp);
+ if (IS_ERR(dma->chan)) {
+ dev_err(disp->dev, "failed to request dma channel\n");
+ ret = PTR_ERR(dma->chan);
+ dma->chan = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_release_dma - Release DMA channels for a layer
+ * @disp: Display subsystem
+ * @layer: layer to release DMA channels
+ *
+ * Release the dma channels associated with @layer.
+ */
+static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer)
+{
+ unsigned int i;
+
+ for (i = 0; i < layer->num_chan; i++) {
+ if (layer->dma[i].chan) {
+ /* Make sure the channel is terminated before release */
+ dmaengine_terminate_all(layer->dma[i].chan);
+ dma_release_channel(layer->dma[i].chan);
+ }
+ }
+}
+
+/**
+ * zynqmp_disp_layer_is_live - if any layer is live
+ * @disp: Display subsystem
+ *
+ * Return: true if any layer is live
+ */
+static bool zynqmp_disp_layer_is_live(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ if (disp->layers[i].enabled &&
+ disp->layers[i].mode == ZYNQMP_DISP_LAYER_LIVE)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zynqmp_disp_layer_is_enabled - if any layer is enabled
+ * @disp: Display subsystem
+ *
+ * Return: true if any layer is enabled
+ */
+static bool zynqmp_disp_layer_is_enabled(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ if (disp->layers[i].enabled)
+ return true;
+
+ return false;
+}
+
+/**
+ * zynqmp_disp_layer_destroy - Destroy all layers
+ * @disp: Display subsystem
+ *
+ * Destroy all layers.
+ */
+static void zynqmp_disp_layer_destroy(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ zynqmp_disp_layer_release_dma(disp, &disp->layers[i]);
+ if (disp->layers[i].of_node)
+ of_node_put(disp->layers[i].of_node);
+ }
+}
+
+/**
+ * zynqmp_disp_layer_create - Create all layers
+ * @disp: Display subsystem
+ *
+ * Create all layers.
+ *
+ * Return: 0 on success, otherwise error code from failed function
+ */
+static int zynqmp_disp_layer_create(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+ int num_chans[ZYNQMP_DISP_NUM_LAYERS] = { 3, 1 };
+ const char * const dma_name[] = { "vid", "gfx" };
+ int ret;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ char temp[16];
+
+ layer = &disp->layers[i];
+ layer->id = i;
+ layer->offset = i * 4;
+ layer->other = &disp->layers[!i];
+ layer->num_chan = num_chans[i];
+ snprintf(temp, sizeof(temp), "%s-layer", dma_name[i]);
+ layer->of_node = of_get_child_by_name(disp->dev->of_node, temp);
+ if (!layer->of_node)
+ goto err;
+ ret = zynqmp_disp_layer_request_dma(disp, layer, dma_name[i]);
+ if (ret)
+ goto err;
+ layer->disp = disp;
+ }
+
+ return 0;
+
+err:
+ zynqmp_disp_layer_destroy(disp);
+ return ret;
+}
+
+/*
+ * ZynqMP Display internal functions
+ */
+
+/*
+ * Output format enumeration.
+ * The ID should be aligned with blend_output_fmts.
+ * The string should be aligned with how zynqmp_dp_set_color() decodes.
+ */
+static struct drm_prop_enum_list zynqmp_disp_color_enum[] = {
+ { 0, "rgb" },
+ { 1, "ycrcb444" },
+ { 2, "ycrcb422" },
+ { 3, "yonly" },
+};
+
+/**
+ * zynqmp_disp_set_output_fmt - Set the output format
+ * @disp: Display subsystem
+ * @id: the format ID. Refer to zynqmp_disp_color_enum[].
+ *
+ * This function sets the output format of the display / blender as well as
+ * the format of DP controller. The @id should be aligned with
+ * zynqmp_disp_color_enum.
+ */
+static void
+zynqmp_disp_set_output_fmt(struct zynqmp_disp *disp, unsigned int id)
+{
+ const struct zynqmp_disp_fmt *fmt = &blend_output_fmts[id];
+
+ zynqmp_dp_set_color(disp->dpsub->dp, zynqmp_disp_color_enum[id].name);
+ zynqmp_disp_blend_set_output_fmt(&disp->blend, fmt->disp_fmt);
+}
+
+/**
+ * zynqmp_disp_set_bg_color - Set the background color
+ * @disp: Display subsystem
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color with given color components (@c0, @c1, @c2).
+ */
+static void zynqmp_disp_set_bg_color(struct zynqmp_disp *disp,
+ u32 c0, u32 c1, u32 c2)
+{
+ zynqmp_disp_blend_set_bg_color(&disp->blend, c0, c1, c2);
+}
+
+/**
+ * zynqmp_disp_set_alpha - Set the alpha value
+ * @disp: Display subsystem
+ * @alpha: alpha value to set
+ *
+ * Set the alpha value for blending.
+ */
+static void zynqmp_disp_set_alpha(struct zynqmp_disp *disp, u32 alpha)
+{
+ disp->alpha = alpha;
+ zynqmp_disp_blend_set_alpha(&disp->blend, alpha);
+}
+
+/**
+ * zynqmp_disp_get_alpha - Get the alpha value
+ * @disp: Display subsystem
+ *
+ * Get the alpha value for blending.
+ *
+ * Return: current alpha value.
+ */
+static u32 zynqmp_disp_get_alpha(struct zynqmp_disp *disp)
+{
+ return disp->alpha;
+}
+
+/**
+ * zynqmp_disp_set_g_alpha - Enable/disable the global alpha blending
+ * @disp: Display subsystem
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Set the alpha value for blending.
+ */
+static void zynqmp_disp_set_g_alpha(struct zynqmp_disp *disp, bool enable)
+{
+ disp->alpha_en = enable;
+ zynqmp_disp_blend_enable_alpha(&disp->blend, enable);
+}
+
+/**
+ * zynqmp_disp_get_g_alpha - Get the global alpha status
+ * @disp: Display subsystem
+ *
+ * Get the global alpha statue.
+ *
+ * Return: true if global alpha is enabled, or false.
+ */
+static bool zynqmp_disp_get_g_alpha(struct zynqmp_disp *disp)
+{
+ return disp->alpha_en;
+}
+
+/**
+ * zynqmp_disp_enable - Enable the Display subsystem
+ * @disp: Display subsystem
+ *
+ * Enable the Display subsystem.
+ */
+static void zynqmp_disp_enable(struct zynqmp_disp *disp)
+{
+ bool live;
+
+ if (disp->enabled)
+ return;
+
+ zynqmp_disp_av_buf_enable(&disp->av_buf);
+ /* Choose clock source based on the DT clock handle */
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, !!disp->_ps_pclk);
+ zynqmp_disp_av_buf_set_aud_clock_src(&disp->av_buf, !!disp->_ps_audclk);
+ live = zynqmp_disp_layer_is_live(disp);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, !live);
+ zynqmp_disp_av_buf_enable_buf(&disp->av_buf);
+ zynqmp_disp_av_buf_enable_aud(&disp->av_buf);
+ zynqmp_disp_aud_init(&disp->aud);
+ disp->enabled = true;
+}
+
+/**
+ * zynqmp_disp_disable - Disable the Display subsystem
+ * @disp: Display subsystem
+ * @force: flag to disable forcefully
+ *
+ * Disable the Display subsystem.
+ */
+static void zynqmp_disp_disable(struct zynqmp_disp *disp, bool force)
+{
+ struct drm_crtc *crtc = &disp->xlnx_crtc.crtc;
+
+ if (!force && (!disp->enabled || zynqmp_disp_layer_is_enabled(disp)))
+ return;
+
+ zynqmp_disp_aud_deinit(&disp->aud);
+ zynqmp_disp_av_buf_disable_aud(&disp->av_buf);
+ zynqmp_disp_av_buf_disable_buf(&disp->av_buf);
+ zynqmp_disp_av_buf_disable(&disp->av_buf);
+
+ /* Mark the flip is done as crtc is disabled anyway */
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+
+ disp->enabled = false;
+}
+
+/**
+ * zynqmp_disp_init - Initialize the Display subsystem states
+ * @disp: Display subsystem
+ *
+ * Some states are not initialized as desired. For example, the output select
+ * register resets to the live source. This function is to initialize
+ * some register states as desired.
+ */
+static void zynqmp_disp_init(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ zynqmp_disp_av_buf_disable_vid(&disp->av_buf, layer);
+ }
+}
+
+/*
+ * ZynqMP Display external functions for zynqmp_dp
+ */
+
+/**
+ * zynqmp_disp_handle_vblank - Handle the vblank event
+ * @disp: Display subsystem
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object. This will be called by the DP vblank interrupt handler.
+ */
+void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp)
+{
+ struct drm_crtc *crtc = &disp->xlnx_crtc.crtc;
+
+ drm_crtc_handle_vblank(crtc);
+}
+
+/**
+ * zynqmp_disp_get_apb_clk_rate - Get the current APB clock rate
+ * @disp: Display subsystem
+ *
+ * Return: the current APB clock rate.
+ */
+unsigned int zynqmp_disp_get_apb_clk_rate(struct zynqmp_disp *disp)
+{
+ return clk_get_rate(disp->aclk);
+}
+
+/**
+ * zynqmp_disp_aud_enabled - If the audio is enabled
+ * @disp: Display subsystem
+ *
+ * Return if the audio is enabled depending on the audio clock.
+ *
+ * Return: true if audio is enabled, or false.
+ */
+bool zynqmp_disp_aud_enabled(struct zynqmp_disp *disp)
+{
+ return !!disp->audclk;
+}
+
+/**
+ * zynqmp_disp_get_aud_clk_rate - Get the current audio clock rate
+ * @disp: Display subsystem
+ *
+ * Return: the current audio clock rate.
+ */
+unsigned int zynqmp_disp_get_aud_clk_rate(struct zynqmp_disp *disp)
+{
+ if (zynqmp_disp_aud_enabled(disp))
+ return 0;
+ return clk_get_rate(disp->aclk);
+}
+
+/**
+ * zynqmp_disp_get_crtc_mask - Return the CRTC bit mask
+ * @disp: Display subsystem
+ *
+ * Return: the crtc mask of the zyqnmp_disp CRTC.
+ */
+uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp)
+{
+ return drm_crtc_mask(&disp->xlnx_crtc.crtc);
+}
+
+/*
+ * Xlnx bridge functions
+ */
+
+static inline struct zynqmp_disp_layer
+*bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct zynqmp_disp_layer, bridge);
+}
+
+static int zynqmp_disp_bridge_enable(struct xlnx_bridge *bridge)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret;
+
+ if (!disp->_pl_pclk) {
+ dev_err(disp->dev, "PL clock is required for live\n");
+ return -ENODEV;
+ }
+
+ ret = zynqmp_disp_layer_check_size(disp, layer, layer->w, layer->h);
+ if (ret)
+ return ret;
+
+ zynqmp_disp_set_g_alpha(disp, disp->alpha_en);
+ zynqmp_disp_set_alpha(disp, disp->alpha);
+ ret = zynqmp_disp_layer_enable(layer->disp, layer,
+ ZYNQMP_DISP_LAYER_LIVE);
+ if (ret)
+ return ret;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_GFX && disp->tpg_on) {
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+ }
+
+ if (zynqmp_disp_av_buf_vid_timing_src_is_int(&disp->av_buf) ||
+ zynqmp_disp_av_buf_vid_clock_src_is_ps(&disp->av_buf)) {
+ dev_info(disp->dev,
+ "Disabling the pipeline to change the clk/timing src");
+ zynqmp_disp_disable(disp, true);
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, false);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, false);
+ }
+
+ zynqmp_disp_enable(disp);
+
+ return 0;
+}
+
+static void zynqmp_disp_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ struct zynqmp_disp *disp = layer->disp;
+
+ zynqmp_disp_disable(disp, false);
+
+ zynqmp_disp_layer_disable(disp, layer, ZYNQMP_DISP_LAYER_LIVE);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID && disp->tpg_on)
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+
+ if (!zynqmp_disp_layer_is_live(disp)) {
+ dev_info(disp->dev,
+ "Disabling the pipeline to change the clk/timing src");
+ zynqmp_disp_disable(disp, true);
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, true);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, true);
+ if (zynqmp_disp_layer_is_enabled(disp))
+ zynqmp_disp_enable(disp);
+ }
+}
+
+static int zynqmp_disp_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ int ret;
+
+ ret = zynqmp_disp_layer_check_size(layer->disp, layer, width, height);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_disp_layer_set_live_fmt(layer->disp, layer, bus_fmt);
+ if (ret)
+ dev_err(layer->disp->dev, "failed to set live fmt\n");
+
+ return ret;
+}
+
+static int zynqmp_disp_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+
+ *fmts = layer->bus_fmts;
+ *count = layer->num_bus_fmts;
+
+ return 0;
+}
+
+/*
+ * DRM plane functions
+ */
+
+static inline struct zynqmp_disp_layer *plane_to_layer(struct drm_plane *plane)
+{
+ return container_of(plane, struct zynqmp_disp_layer, plane);
+}
+
+static int zynqmp_disp_plane_enable(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret;
+
+ zynqmp_disp_set_g_alpha(disp, disp->alpha_en);
+ zynqmp_disp_set_alpha(disp, disp->alpha);
+ ret = zynqmp_disp_layer_enable(layer->disp, layer,
+ ZYNQMP_DISP_LAYER_NONLIVE);
+ if (ret)
+ return ret;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_GFX && disp->tpg_on) {
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+ }
+
+ return 0;
+}
+
+static int zynqmp_disp_plane_disable(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+
+ zynqmp_disp_layer_disable(disp, layer, ZYNQMP_DISP_LAYER_NONLIVE);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID && disp->tpg_on)
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+
+ return 0;
+}
+
+static int zynqmp_disp_plane_mode_set(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ const struct drm_format_info *info = fb->format;
+ struct device *dev = layer->disp->dev;
+ dma_addr_t paddr;
+ unsigned int i;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "No format info found\n");
+ return -EINVAL;
+ }
+
+ ret = zynqmp_disp_layer_check_size(layer->disp, layer, src_w, src_h);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? info->hsub : 1);
+ unsigned int height = src_h / (i ? info->vsub : 1);
+ int width_bytes;
+
+ paddr = drm_fb_cma_get_gem_addr(fb, plane->state, i);
+ if (!paddr) {
+ dev_err(dev, "failed to get a paddr\n");
+ return -EINVAL;
+ }
+
+ layer->dma[i].xt.numf = height;
+ width_bytes = drm_format_plane_width_bytes(info, i, width);
+ layer->dma[i].sgl[0].size = width_bytes;
+ layer->dma[i].sgl[0].icg = fb->pitches[i] -
+ layer->dma[i].sgl[0].size;
+ layer->dma[i].xt.src_start = paddr;
+ layer->dma[i].xt.frame_size = 1;
+ layer->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ layer->dma[i].xt.src_sgl = true;
+ layer->dma[i].xt.dst_sgl = false;
+ layer->dma[i].is_active = true;
+ }
+
+ for (; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++)
+ layer->dma[i].is_active = false;
+
+ ret = zynqmp_disp_layer_set_fmt(layer->disp, layer, info->format);
+ if (ret)
+ dev_err(dev, "failed to set dp_sub layer fmt\n");
+
+ return ret;
+}
+
+static void zynqmp_disp_plane_destroy(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+
+ xlnx_bridge_unregister(&layer->bridge);
+ drm_plane_cleanup(plane);
+}
+
+static int
+zynqmp_disp_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret = 0;
+
+ if (property == disp->g_alpha_prop)
+ zynqmp_disp_set_alpha(disp, val);
+ else if (property == disp->g_alpha_en_prop)
+ zynqmp_disp_set_g_alpha(disp, val);
+ else if (property == disp->tpg_prop)
+ ret = zynqmp_disp_layer_set_tpg(disp, layer, val);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+zynqmp_disp_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret = 0;
+
+ if (property == disp->g_alpha_prop)
+ *val = zynqmp_disp_get_alpha(disp);
+ else if (property == disp->g_alpha_en_prop)
+ *val = zynqmp_disp_get_g_alpha(disp);
+ else if (property == disp->tpg_prop)
+ *val = zynqmp_disp_layer_get_tpg(disp, layer);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+zynqmp_disp_plane_atomic_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
+ /* Do async-update if possible */
+ state->async_update = !drm_atomic_helper_async_check(plane->dev, state);
+ ret = drm_atomic_commit(state);
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_plane_funcs zynqmp_disp_plane_funcs = {
+ .update_plane = zynqmp_disp_plane_atomic_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .atomic_set_property = zynqmp_disp_plane_atomic_set_property,
+ .atomic_get_property = zynqmp_disp_plane_atomic_get_property,
+ .destroy = zynqmp_disp_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void
+zynqmp_disp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (plane->state->fb == old_state->fb)
+ return;
+
+ if (old_state->fb &&
+ old_state->fb->format->format != plane->state->fb->format->format)
+ zynqmp_disp_plane_disable(plane);
+
+ ret = zynqmp_disp_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret)
+ return;
+
+ zynqmp_disp_plane_enable(plane);
+}
+
+static void
+zynqmp_disp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ zynqmp_disp_plane_disable(plane);
+}
+
+static int zynqmp_disp_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void
+zynqmp_disp_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ int ret;
+
+ if (plane->state->fb == new_state->fb)
+ return;
+
+ if (plane->state->fb &&
+ plane->state->fb->format->format != new_state->fb->format->format)
+ zynqmp_disp_plane_disable(plane);
+
+ /* Update the current state with new configurations */
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ plane->state->crtc = new_state->crtc;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->state = new_state->state;
+
+ ret = zynqmp_disp_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret)
+ return;
+
+ zynqmp_disp_plane_enable(plane);
+}
+
+static const struct drm_plane_helper_funcs zynqmp_disp_plane_helper_funcs = {
+ .atomic_update = zynqmp_disp_plane_atomic_update,
+ .atomic_disable = zynqmp_disp_plane_atomic_disable,
+ .atomic_async_check = zynqmp_disp_plane_atomic_async_check,
+ .atomic_async_update = zynqmp_disp_plane_atomic_async_update,
+};
+
+static int zynqmp_disp_create_plane(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+ enum drm_plane_type type;
+ int ret;
+
+ /* graphics layer is primary, and video layer is overaly */
+ type = DRM_PLANE_TYPE_OVERLAY;
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ zynqmp_disp_layer_get_fmts(disp, layer, &fmts, &num_fmts);
+ ret = drm_universal_plane_init(disp->drm, &layer->plane, 0,
+ &zynqmp_disp_plane_funcs, fmts,
+ num_fmts, NULL, type, NULL);
+ if (ret)
+ goto err_plane;
+ drm_plane_helper_add(&layer->plane,
+ &zynqmp_disp_plane_helper_funcs);
+ type = DRM_PLANE_TYPE_PRIMARY;
+ }
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ layer->bridge.enable = &zynqmp_disp_bridge_enable;
+ layer->bridge.disable = &zynqmp_disp_bridge_disable;
+ layer->bridge.set_input = &zynqmp_disp_bridge_set_input;
+ layer->bridge.get_input_fmts =
+ &zynqmp_disp_bridge_get_input_fmts;
+ layer->bridge.of_node = layer->of_node;
+ xlnx_bridge_register(&layer->bridge);
+ }
+
+ /* Attach properties to each layers */
+ drm_object_attach_property(&layer->plane.base, disp->g_alpha_prop,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX);
+ disp->alpha = ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX;
+ /* Enable the global alpha as default */
+ drm_object_attach_property(&layer->plane.base, disp->g_alpha_en_prop,
+ true);
+ disp->alpha_en = true;
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ drm_object_attach_property(&layer->plane.base, disp->tpg_prop, false);
+
+ return ret;
+
+err_plane:
+ if (i)
+ drm_plane_cleanup(&disp->layers[0].plane);
+ return ret;
+}
+
+static void zynqmp_disp_destroy_plane(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ zynqmp_disp_plane_destroy(&disp->layers[i].plane);
+}
+
+/*
+ * Xlnx crtc functions
+ */
+
+static inline struct zynqmp_disp *xlnx_crtc_to_disp(struct xlnx_crtc *xlnx_crtc)
+{
+ return container_of(xlnx_crtc, struct zynqmp_disp, xlnx_crtc);
+}
+
+static int zynqmp_disp_get_max_width(struct xlnx_crtc *xlnx_crtc)
+{
+ return ZYNQMP_DISP_MAX_WIDTH;
+}
+
+static int zynqmp_disp_get_max_height(struct xlnx_crtc *xlnx_crtc)
+{
+ return ZYNQMP_DISP_MAX_HEIGHT;
+}
+
+static uint32_t zynqmp_disp_get_format(struct xlnx_crtc *xlnx_crtc)
+{
+ struct zynqmp_disp *disp = xlnx_crtc_to_disp(xlnx_crtc);
+
+ return disp->layers[ZYNQMP_DISP_LAYER_GFX].fmt->drm_fmt;
+}
+
+static unsigned int zynqmp_disp_get_align(struct xlnx_crtc *xlnx_crtc)
+{
+ struct zynqmp_disp *disp = xlnx_crtc_to_disp(xlnx_crtc);
+ struct zynqmp_disp_layer *layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+
+ return 1 << layer->dma->chan->device->copy_align;
+}
+
+static u64 zynqmp_disp_get_dma_mask(struct xlnx_crtc *xlnx_crtc)
+{
+ return DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT);
+}
+
+/*
+ * DRM crtc functions
+ */
+
+static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+
+ return xlnx_crtc_to_disp(xlnx_crtc);
+}
+
+static int zynqmp_disp_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+ unsigned long rate;
+ long diff;
+ int ret;
+
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ ret = clk_set_rate(disp->pclk, adjusted_mode->clock * 1000);
+ if (ret) {
+ dev_err(disp->dev, "failed to set a pixel clock\n");
+ return ret;
+ }
+
+ rate = clk_get_rate(disp->pclk);
+ diff = rate - adjusted_mode->clock * 1000;
+ if (abs(diff) > (adjusted_mode->clock * 1000) / 20) {
+ dev_info(disp->dev, "request pixel rate: %d actual rate: %lu\n",
+ adjusted_mode->clock, rate);
+ } else {
+ dev_dbg(disp->dev, "request pixel rate: %d actual rate: %lu\n",
+ adjusted_mode->clock, rate);
+ }
+
+ /* The timing register should be programmed always */
+ zynqmp_dp_encoder_mode_set_stream(disp->dpsub->dp, adjusted_mode);
+
+ return 0;
+}
+
+static void
+zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int ret, vrefresh;
+
+ zynqmp_disp_crtc_mode_set(crtc, &crtc->state->mode,
+ adjusted_mode, crtc->x, crtc->y, NULL);
+
+ pm_runtime_get_sync(disp->dev);
+ ret = zynqmp_disp_clk_enable(disp->pclk, &disp->pclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to enable a pixel clock\n");
+ return;
+ }
+ zynqmp_disp_set_output_fmt(disp, disp->color);
+ zynqmp_disp_set_bg_color(disp, disp->bg_c0, disp->bg_c1, disp->bg_c2);
+ zynqmp_disp_enable(disp);
+ /* Delay of 3 vblank intervals for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(3 * 1000 / vrefresh);
+}
+
+static void
+zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ zynqmp_disp_plane_disable(crtc->primary);
+ zynqmp_disp_disable(disp, true);
+ drm_crtc_vblank_off(crtc);
+ pm_runtime_put_sync(disp->dev);
+}
+
+static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void
+zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ drm_crtc_vblank_on(crtc);
+ /* Don't rely on vblank when disabling crtc */
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static struct drm_crtc_helper_funcs zynqmp_disp_crtc_helper_funcs = {
+ .atomic_enable = zynqmp_disp_crtc_atomic_enable,
+ .atomic_disable = zynqmp_disp_crtc_atomic_disable,
+ .atomic_check = zynqmp_disp_crtc_atomic_check,
+ .atomic_begin = zynqmp_disp_crtc_atomic_begin,
+};
+
+static void zynqmp_disp_crtc_destroy(struct drm_crtc *crtc)
+{
+ zynqmp_disp_crtc_atomic_disable(crtc, NULL);
+ drm_crtc_cleanup(crtc);
+}
+
+static int zynqmp_disp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_dp_enable_vblank(disp->dpsub->dp);
+
+ return 0;
+}
+
+static void zynqmp_disp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_dp_disable_vblank(disp->dpsub->dp);
+}
+
+static int
+zynqmp_disp_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ /*
+ * CRTC prop values are just stored here and applied when CRTC gets
+ * enabled
+ */
+ if (property == disp->color_prop)
+ disp->color = val;
+ else if (property == disp->bg_c0_prop)
+ disp->bg_c0 = val;
+ else if (property == disp->bg_c1_prop)
+ disp->bg_c1 = val;
+ else if (property == disp->bg_c2_prop)
+ disp->bg_c2 = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+zynqmp_disp_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ if (property == disp->color_prop)
+ *val = disp->color;
+ else if (property == disp->bg_c0_prop)
+ *val = disp->bg_c0;
+ else if (property == disp->bg_c1_prop)
+ *val = disp->bg_c1;
+ else if (property == disp->bg_c2_prop)
+ *val = disp->bg_c2;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct drm_crtc_funcs zynqmp_disp_crtc_funcs = {
+ .destroy = zynqmp_disp_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_set_property = zynqmp_disp_crtc_atomic_set_property,
+ .atomic_get_property = zynqmp_disp_crtc_atomic_get_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = zynqmp_disp_crtc_enable_vblank,
+ .disable_vblank = zynqmp_disp_crtc_disable_vblank,
+};
+
+static void zynqmp_disp_create_crtc(struct zynqmp_disp *disp)
+{
+ struct drm_plane *plane = &disp->layers[ZYNQMP_DISP_LAYER_GFX].plane;
+ struct drm_mode_object *obj = &disp->xlnx_crtc.crtc.base;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(disp->drm, &disp->xlnx_crtc.crtc, plane,
+ NULL, &zynqmp_disp_crtc_funcs, NULL);
+ drm_crtc_helper_add(&disp->xlnx_crtc.crtc,
+ &zynqmp_disp_crtc_helper_funcs);
+ drm_object_attach_property(obj, disp->color_prop, 0);
+ zynqmp_dp_set_color(disp->dpsub->dp, zynqmp_disp_color_enum[0].name);
+ drm_object_attach_property(obj, disp->bg_c0_prop, 0);
+ drm_object_attach_property(obj, disp->bg_c1_prop, 0);
+ drm_object_attach_property(obj, disp->bg_c2_prop, 0);
+
+ disp->xlnx_crtc.get_max_width = &zynqmp_disp_get_max_width;
+ disp->xlnx_crtc.get_max_height = &zynqmp_disp_get_max_height;
+ disp->xlnx_crtc.get_format = &zynqmp_disp_get_format;
+ disp->xlnx_crtc.get_align = &zynqmp_disp_get_align;
+ disp->xlnx_crtc.get_dma_mask = &zynqmp_disp_get_dma_mask;
+ xlnx_crtc_register(disp->drm, &disp->xlnx_crtc);
+}
+
+static void zynqmp_disp_destroy_crtc(struct zynqmp_disp *disp)
+{
+ xlnx_crtc_unregister(disp->drm, &disp->xlnx_crtc);
+ zynqmp_disp_crtc_destroy(&disp->xlnx_crtc.crtc);
+}
+
+static void zynqmp_disp_map_crtc_to_plane(struct zynqmp_disp *disp)
+{
+ u32 possible_crtcs = drm_crtc_mask(&disp->xlnx_crtc.crtc);
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ disp->layers[i].plane.possible_crtcs = possible_crtcs;
+}
+
+/*
+ * Component functions
+ */
+
+int zynqmp_disp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_disp *disp = dpsub->disp;
+ struct drm_device *drm = data;
+ int num;
+ u64 max;
+ int ret;
+
+ disp->drm = drm;
+
+ max = ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX;
+ disp->g_alpha_prop = drm_property_create_range(drm, 0, "alpha", 0, max);
+ disp->g_alpha_en_prop = drm_property_create_bool(drm, 0,
+ "g_alpha_en");
+ num = ARRAY_SIZE(zynqmp_disp_color_enum);
+ disp->color_prop = drm_property_create_enum(drm, 0,
+ "output_color",
+ zynqmp_disp_color_enum,
+ num);
+ max = ZYNQMP_DISP_V_BLEND_BG_MAX;
+ disp->bg_c0_prop = drm_property_create_range(drm, 0, "bg_c0", 0, max);
+ disp->bg_c1_prop = drm_property_create_range(drm, 0, "bg_c1", 0, max);
+ disp->bg_c2_prop = drm_property_create_range(drm, 0, "bg_c2", 0, max);
+ disp->tpg_prop = drm_property_create_bool(drm, 0, "tpg");
+
+ ret = zynqmp_disp_create_plane(disp);
+ if (ret)
+ return ret;
+ zynqmp_disp_create_crtc(disp);
+ zynqmp_disp_map_crtc_to_plane(disp);
+
+ return 0;
+}
+
+void zynqmp_disp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_disp *disp = dpsub->disp;
+
+ zynqmp_disp_destroy_crtc(disp);
+ zynqmp_disp_destroy_plane(disp);
+ drm_property_destroy(disp->drm, disp->bg_c2_prop);
+ drm_property_destroy(disp->drm, disp->bg_c1_prop);
+ drm_property_destroy(disp->drm, disp->bg_c0_prop);
+ drm_property_destroy(disp->drm, disp->color_prop);
+ drm_property_destroy(disp->drm, disp->g_alpha_en_prop);
+ drm_property_destroy(disp->drm, disp->g_alpha_prop);
+}
+
+/*
+ * Platform initialization functions
+ */
+
+static int zynqmp_disp_enumerate_fmts(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ u32 *bus_fmts;
+ u32 i, size, num_bus_fmts;
+ u32 gfx_fmt = ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565;
+
+ num_bus_fmts = ARRAY_SIZE(av_buf_live_fmts);
+ bus_fmts = devm_kzalloc(disp->dev, sizeof(*bus_fmts) * num_bus_fmts,
+ GFP_KERNEL);
+ if (!bus_fmts)
+ return -ENOMEM;
+ for (i = 0; i < num_bus_fmts; i++)
+ bus_fmts[i] = av_buf_live_fmts[i].bus_fmt;
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ layer->num_bus_fmts = num_bus_fmts;
+ layer->bus_fmts = bus_fmts;
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(disp->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+ for (i = 0; i < layer->num_fmts; i++)
+ layer->drm_fmts[i] = av_buf_vid_fmts[i].drm_fmt;
+ layer->fmt = &av_buf_vid_fmts[ZYNQMP_DISP_AV_BUF_VID_FMT_YUYV];
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_GFX];
+ layer->num_bus_fmts = num_bus_fmts;
+ layer->bus_fmts = bus_fmts;
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(disp->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++)
+ layer->drm_fmts[i] = av_buf_gfx_fmts[i].drm_fmt;
+ if (zynqmp_disp_gfx_init_fmt < ARRAY_SIZE(zynqmp_disp_gfx_init_fmts))
+ gfx_fmt = zynqmp_disp_gfx_init_fmts[zynqmp_disp_gfx_init_fmt];
+ layer->fmt = &av_buf_gfx_fmts[gfx_fmt];
+
+ return 0;
+}
+
+int zynqmp_disp_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ struct zynqmp_disp *disp;
+ struct resource *res;
+ int ret;
+
+ disp = devm_kzalloc(&pdev->dev, sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+ disp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
+ disp->blend.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->blend.base))
+ return PTR_ERR(disp->blend.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
+ disp->av_buf.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->av_buf.base))
+ return PTR_ERR(disp->av_buf.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ disp->aud.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->aud.base))
+ return PTR_ERR(disp->aud.base);
+
+ dpsub = platform_get_drvdata(pdev);
+ dpsub->disp = disp;
+ disp->dpsub = dpsub;
+
+ ret = zynqmp_disp_enumerate_fmts(disp);
+ if (ret)
+ return ret;
+
+ /* Try the live PL video clock */
+ disp->_pl_pclk = devm_clk_get(disp->dev, "dp_live_video_in_clk");
+ if (!IS_ERR(disp->_pl_pclk)) {
+ disp->pclk = disp->_pl_pclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->pclk,
+ &disp->pclk_en);
+ if (ret)
+ disp->pclk = NULL;
+ } else if (PTR_ERR(disp->_pl_pclk) == -EPROBE_DEFER) {
+ return PTR_ERR(disp->_pl_pclk);
+ }
+
+ /* If the live PL video clock is not valid, fall back to PS clock */
+ if (!disp->pclk) {
+ disp->_ps_pclk = devm_clk_get(disp->dev, "dp_vtc_pixel_clk_in");
+ if (IS_ERR(disp->_ps_pclk)) {
+ dev_err(disp->dev, "failed to init any video clock\n");
+ return PTR_ERR(disp->_ps_pclk);
+ }
+ disp->pclk = disp->_ps_pclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->pclk,
+ &disp->pclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to init any video clock\n");
+ return ret;
+ }
+ }
+
+ disp->aclk = devm_clk_get(disp->dev, "dp_apb_clk");
+ if (IS_ERR(disp->aclk))
+ return PTR_ERR(disp->aclk);
+ ret = zynqmp_disp_clk_enable(disp->aclk, &disp->aclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to enable the APB clk\n");
+ return ret;
+ }
+
+ /* Try the live PL audio clock */
+ disp->_pl_audclk = devm_clk_get(disp->dev, "dp_live_audio_aclk");
+ if (!IS_ERR(disp->_pl_audclk)) {
+ disp->audclk = disp->_pl_audclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->audclk,
+ &disp->audclk_en);
+ if (ret)
+ disp->audclk = NULL;
+ }
+
+ /* If the live PL audio clock is not valid, fall back to PS clock */
+ if (!disp->audclk) {
+ disp->_ps_audclk = devm_clk_get(disp->dev, "dp_aud_clk");
+ if (!IS_ERR(disp->_ps_audclk)) {
+ disp->audclk = disp->_ps_audclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->audclk,
+ &disp->audclk_en);
+ if (ret)
+ disp->audclk = NULL;
+ }
+
+ if (!disp->audclk) {
+ dev_err(disp->dev,
+ "audio is disabled due to clock failure\n");
+ }
+ }
+
+ ret = zynqmp_disp_layer_create(disp);
+ if (ret)
+ goto error_aclk;
+
+ zynqmp_disp_init(disp);
+
+ return 0;
+
+error_aclk:
+ zynqmp_disp_clk_disable(disp->aclk, &disp->aclk_en);
+ return ret;
+}
+
+int zynqmp_disp_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ struct zynqmp_disp *disp = dpsub->disp;
+
+ zynqmp_disp_layer_destroy(disp);
+ if (disp->audclk)
+ zynqmp_disp_clk_disable(disp->audclk, &disp->audclk_en);
+ zynqmp_disp_clk_disable(disp->aclk, &disp->aclk_en);
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ dpsub->disp = NULL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.h b/drivers/gpu/drm/xlnx/zynqmp_disp.h
new file mode 100644
index 000000000000..28d8188f8f5e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Display Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DISP_H_
+#define _ZYNQMP_DISP_H_
+
+struct zynqmp_disp;
+
+void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp);
+unsigned int zynqmp_disp_get_apb_clk_rate(struct zynqmp_disp *disp);
+bool zynqmp_disp_aud_enabled(struct zynqmp_disp *disp);
+unsigned int zynqmp_disp_get_aud_clk_rate(struct zynqmp_disp *disp);
+uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp);
+
+int zynqmp_disp_bind(struct device *dev, struct device *master, void *data);
+void zynqmp_disp_unbind(struct device *dev, struct device *master, void *data);
+
+int zynqmp_disp_probe(struct platform_device *pdev);
+int zynqmp_disp_remove(struct platform_device *pdev);
+
+#endif /* _ZYNQMP_DISP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
new file mode 100644
index 000000000000..c3c86dacac97
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -0,0 +1,1917 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dpsub.h"
+
+static uint zynqmp_dp_aux_timeout_ms = 50;
+module_param_named(aux_timeout_ms, zynqmp_dp_aux_timeout_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)");
+
+/*
+ * Some sink requires a delay after power on request
+ */
+static uint zynqmp_dp_power_on_delay_ms = 4;
+module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)");
+
+/* Link configuration registers */
+#define ZYNQMP_DP_TX_LINK_BW_SET 0x0
+#define ZYNQMP_DP_TX_LANE_CNT_SET 0x4
+#define ZYNQMP_DP_TX_ENHANCED_FRAME_EN 0x8
+#define ZYNQMP_DP_TX_TRAINING_PATTERN_SET 0xc
+#define ZYNQMP_DP_TX_SCRAMBLING_DISABLE 0x14
+#define ZYNQMP_DP_TX_DOWNSPREAD_CTL 0x18
+#define ZYNQMP_DP_TX_SW_RESET 0x1c
+#define ZYNQMP_DP_TX_SW_RESET_STREAM1 BIT(0)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM2 BIT(1)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM3 BIT(2)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM4 BIT(3)
+#define ZYNQMP_DP_TX_SW_RESET_AUX BIT(7)
+#define ZYNQMP_DP_TX_SW_RESET_ALL (ZYNQMP_DP_TX_SW_RESET_STREAM1 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM2 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM3 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM4 | \
+ ZYNQMP_DP_TX_SW_RESET_AUX)
+
+/* Core enable registers */
+#define ZYNQMP_DP_TX_ENABLE 0x80
+#define ZYNQMP_DP_TX_ENABLE_MAIN_STREAM 0x84
+#define ZYNQMP_DP_TX_FORCE_SCRAMBLER_RESET 0xc0
+#define ZYNQMP_DP_TX_VERSION 0xf8
+#define ZYNQMP_DP_TX_VERSION_MAJOR_MASK GENMASK(31, 24)
+#define ZYNQMP_DP_TX_VERSION_MAJOR_SHIFT 24
+#define ZYNQMP_DP_TX_VERSION_MINOR_MASK GENMASK(23, 16)
+#define ZYNQMP_DP_TX_VERSION_MINOR_SHIFT 16
+#define ZYNQMP_DP_TX_VERSION_REVISION_MASK GENMASK(15, 12)
+#define ZYNQMP_DP_TX_VERSION_REVISION_SHIFT 12
+#define ZYNQMP_DP_TX_VERSION_PATCH_MASK GENMASK(11, 8)
+#define ZYNQMP_DP_TX_VERSION_PATCH_SHIFT 8
+#define ZYNQMP_DP_TX_VERSION_INTERNAL_MASK GENMASK(7, 0)
+#define ZYNQMP_DP_TX_VERSION_INTERNAL_SHIFT 0
+
+/* Core ID registers */
+#define ZYNQMP_DP_TX_CORE_ID 0xfc
+#define ZYNQMP_DP_TX_CORE_ID_MAJOR_MASK GENMASK(31, 24)
+#define ZYNQMP_DP_TX_CORE_ID_MAJOR_SHIFT 24
+#define ZYNQMP_DP_TX_CORE_ID_MINOR_MASK GENMASK(23, 16)
+#define ZYNQMP_DP_TX_CORE_ID_MINOR_SHIFT 16
+#define ZYNQMP_DP_TX_CORE_ID_REVISION_MASK GENMASK(15, 8)
+#define ZYNQMP_DP_TX_CORE_ID_REVISION_SHIFT 8
+#define ZYNQMP_DP_TX_CORE_ID_DIRECTION GENMASK(1)
+
+/* AUX channel interface registers */
+#define ZYNQMP_DP_TX_AUX_COMMAND 0x100
+#define ZYNQMP_DP_TX_AUX_COMMAND_CMD_SHIFT 8
+#define ZYNQMP_DP_TX_AUX_COMMAND_ADDRESS_ONLY BIT(12)
+#define ZYNQMP_DP_TX_AUX_COMMAND_BYTES_SHIFT 0
+#define ZYNQMP_DP_TX_AUX_WRITE_FIFO 0x104
+#define ZYNQMP_DP_TX_AUX_ADDRESS 0x108
+#define ZYNQMP_DP_TX_CLK_DIVIDER 0x10c
+#define ZYNQMP_DP_TX_CLK_DIVIDER_MHZ 1000000
+#define ZYNQMP_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT 8
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE 0x130
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD BIT(0)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REQUEST BIT(1)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY BIT(2)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT BIT(3)
+#define ZYNQMP_DP_TX_AUX_REPLY_DATA 0x134
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE 0x138
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_ACK (0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_NACK BIT(0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_DEFER BIT(1)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_ACK (0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_NACK BIT(2)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_DEFER BIT(3)
+#define ZYNQMP_DP_TX_AUX_REPLY_CNT 0x13c
+#define ZYNQMP_DP_TX_AUX_REPLY_CNT_MASK 0xff
+#define ZYNQMP_DP_TX_INTR_STATUS 0x140
+#define ZYNQMP_DP_TX_INTR_MASK 0x144
+#define ZYNQMP_DP_TX_INTR_HPD_IRQ BIT(0)
+#define ZYNQMP_DP_TX_INTR_HPD_EVENT BIT(1)
+#define ZYNQMP_DP_TX_INTR_REPLY_RECV BIT(2)
+#define ZYNQMP_DP_TX_INTR_REPLY_TIMEOUT BIT(3)
+#define ZYNQMP_DP_TX_INTR_HPD_PULSE BIT(4)
+#define ZYNQMP_DP_TX_INTR_EXT_PKT_TXD BIT(5)
+#define ZYNQMP_DP_TX_INTR_LIV_ABUF_UNDRFLW BIT(12)
+#define ZYNQMP_DP_TX_INTR_VBLANK_START BIT(13)
+#define ZYNQMP_DP_TX_INTR_PIXEL0_MATCH BIT(14)
+#define ZYNQMP_DP_TX_INTR_PIXEL1_MATCH BIT(15)
+#define ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK 0x3f0000
+#define ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK 0xfc00000
+#define ZYNQMP_DP_TX_INTR_CUST_TS_2 BIT(28)
+#define ZYNQMP_DP_TX_INTR_CUST_TS BIT(29)
+#define ZYNQMP_DP_TX_INTR_EXT_VSYNC_TS BIT(30)
+#define ZYNQMP_DP_TX_INTR_VSYNC_TS BIT(31)
+#define ZYNQMP_DP_TX_INTR_ALL (ZYNQMP_DP_TX_INTR_HPD_IRQ | \
+ ZYNQMP_DP_TX_INTR_HPD_EVENT | \
+ ZYNQMP_DP_TX_INTR_REPLY_RECV | \
+ ZYNQMP_DP_TX_INTR_REPLY_TIMEOUT | \
+ ZYNQMP_DP_TX_INTR_HPD_PULSE | \
+ ZYNQMP_DP_TX_INTR_EXT_PKT_TXD | \
+ ZYNQMP_DP_TX_INTR_LIV_ABUF_UNDRFLW | \
+ ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK | \
+ ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+#define ZYNQMP_DP_TX_NO_INTR_ALL (ZYNQMP_DP_TX_INTR_PIXEL0_MATCH | \
+ ZYNQMP_DP_TX_INTR_PIXEL1_MATCH | \
+ ZYNQMP_DP_TX_INTR_CUST_TS_2 | \
+ ZYNQMP_DP_TX_INTR_CUST_TS | \
+ ZYNQMP_DP_TX_INTR_EXT_VSYNC_TS | \
+ ZYNQMP_DP_TX_INTR_VSYNC_TS)
+#define ZYNQMP_DP_TX_REPLY_DATA_CNT 0x148
+#define ZYNQMP_DP_SUB_TX_INTR_STATUS 0x3a0
+#define ZYNQMP_DP_SUB_TX_INTR_MASK 0x3a4
+#define ZYNQMP_DP_SUB_TX_INTR_EN 0x3a8
+#define ZYNQMP_DP_SUB_TX_INTR_DS 0x3ac
+
+/* Main stream attribute registers */
+#define ZYNQMP_DP_TX_MAIN_STREAM_HTOTAL 0x180
+#define ZYNQMP_DP_TX_MAIN_STREAM_VTOTAL 0x184
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY 0x188
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT 0
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT 1
+#define ZYNQMP_DP_TX_MAIN_STREAM_HSWIDTH 0x18c
+#define ZYNQMP_DP_TX_MAIN_STREAM_VSWIDTH 0x190
+#define ZYNQMP_DP_TX_MAIN_STREAM_HRES 0x194
+#define ZYNQMP_DP_TX_MAIN_STREAM_VRES 0x198
+#define ZYNQMP_DP_TX_MAIN_STREAM_HSTART 0x19c
+#define ZYNQMP_DP_TX_MAIN_STREAM_VSTART 0x1a0
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0 0x1a4
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC BIT(0)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_FORMAT_SHIFT 1
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_DYNAMIC_RANGE BIT(3)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_YCBCR_COLRIMETRY BIT(4)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_BPC_SHIFT 5
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC1 0x1a8
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_INTERLACED_VERT BIT(0)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_STEREO_VID_SHIFT 1
+#define ZYNQMP_DP_TX_M_VID 0x1ac
+#define ZYNQMP_DP_TX_TRANSFER_UNIT_SIZE 0x1b0
+#define ZYNQMP_DP_TX_DEF_TRANSFER_UNIT_SIZE 64
+#define ZYNQMP_DP_TX_N_VID 0x1b4
+#define ZYNQMP_DP_TX_USER_PIXEL_WIDTH 0x1b8
+#define ZYNQMP_DP_TX_USER_DATA_CNT_PER_LANE 0x1bc
+#define ZYNQMP_DP_TX_MIN_BYTES_PER_TU 0x1c4
+#define ZYNQMP_DP_TX_FRAC_BYTES_PER_TU 0x1c8
+#define ZYNQMP_DP_TX_INIT_WAIT 0x1cc
+
+/* PHY configuration and status registers */
+#define ZYNQMP_DP_TX_PHY_CONFIG 0x200
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_RESET BIT(0)
+#define ZYNQMP_DP_TX_PHY_CONFIG_GTTX_RESET BIT(1)
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_PMA_RESET BIT(8)
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_PCS_RESET BIT(9)
+#define ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET (ZYNQMP_DP_TX_PHY_CONFIG_PHY_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_GTTX_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_PHY_PMA_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_PHY_PCS_RESET)
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_0 0x210
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_1 0x214
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_2 0x218
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_3 0x21c
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 0x220
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_1 0x224
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_2 0x228
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_3 0x22c
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING 0x234
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162 0x1
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270 0x3
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540 0x5
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN 0x238
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_0 BIT(0)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_1 BIT(1)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_0 0x23c
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_1 0x240
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_2 0x244
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_3 0x248
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_0 0x24c
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_1 0x250
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_2 0x254
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_3 0x258
+#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 0x24c
+#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_1 0x250
+#define ZYNQMP_DP_TX_PHY_STATUS 0x280
+#define ZYNQMP_DP_TX_PHY_STATUS_PLL_LOCKED_SHIFT 4
+#define ZYNQMP_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED BIT(6)
+
+/* Audio registers */
+#define ZYNQMP_DP_TX_AUDIO_CONTROL 0x300
+#define ZYNQMP_DP_TX_AUDIO_CHANNELS 0x304
+#define ZYNQMP_DP_TX_AUDIO_INFO_DATA 0x308
+#define ZYNQMP_DP_TX_AUDIO_M_AUD 0x328
+#define ZYNQMP_DP_TX_AUDIO_N_AUD 0x32c
+#define ZYNQMP_DP_TX_AUDIO_EXT_DATA 0x330
+
+#define ZYNQMP_DP_MISC0_RGB (0)
+#define ZYNQMP_DP_MISC0_YCRCB_422 (5 << 1)
+#define ZYNQMP_DP_MISC0_YCRCB_444 (6 << 1)
+#define ZYNQMP_DP_MISC0_FORMAT_MASK 0xe
+#define ZYNQMP_DP_MISC0_BPC_6 (0 << 5)
+#define ZYNQMP_DP_MISC0_BPC_8 (1 << 5)
+#define ZYNQMP_DP_MISC0_BPC_10 (2 << 5)
+#define ZYNQMP_DP_MISC0_BPC_12 (3 << 5)
+#define ZYNQMP_DP_MISC0_BPC_16 (4 << 5)
+#define ZYNQMP_DP_MISC0_BPC_MASK 0xe0
+#define ZYNQMP_DP_MISC1_Y_ONLY (1 << 7)
+
+#define ZYNQMP_DP_MAX_LANES 2
+#define ZYNQMP_MAX_FREQ 3000000
+
+#define DP_REDUCED_BIT_RATE 162000
+#define DP_HIGH_BIT_RATE 270000
+#define DP_HIGH_BIT_RATE2 540000
+#define DP_MAX_TRAINING_TRIES 5
+#define DP_V1_2 0x12
+
+/**
+ * struct zynqmp_dp_link_config - Common link config between source and sink
+ * @max_rate: maximum link rate
+ * @max_lanes: maximum number of lanes
+ */
+struct zynqmp_dp_link_config {
+ int max_rate;
+ u8 max_lanes;
+};
+
+/**
+ * struct zynqmp_dp_mode - Configured mode of DisplayPort
+ * @bw_code: code for bandwidth(link rate)
+ * @lane_cnt: number of lanes
+ * @pclock: pixel clock frequency of current mode
+ * @fmt: format identifier string
+ */
+struct zynqmp_dp_mode {
+ u8 bw_code;
+ u8 lane_cnt;
+ int pclock;
+ const char *fmt;
+};
+
+/**
+ * struct zynqmp_dp_config - Configuration of DisplayPort from DTS
+ * @misc0: misc0 configuration (per DP v1.2 spec)
+ * @misc1: misc1 configuration (per DP v1.2 spec)
+ * @bpp: bits per pixel
+ * @bpc: bits per component
+ * @num_colors: number of color components
+ */
+struct zynqmp_dp_config {
+ u8 misc0;
+ u8 misc1;
+ u8 bpp;
+ u8 bpc;
+ u8 num_colors;
+};
+
+/**
+ * struct zynqmp_dp - Xilinx DisplayPort core
+ * @encoder: the drm encoder structure
+ * @connector: the drm connector structure
+ * @sync_prop: synchronous mode property
+ * @bpc_prop: bpc mode property
+ * @dev: device structure
+ * @dpsub: Display subsystem
+ * @drm: DRM core
+ * @iomem: device I/O memory for register access
+ * @irq: irq
+ * @config: IP core configuration from DTS
+ * @aux: aux channel
+ * @phy: PHY handles for DP lanes
+ * @num_lanes: number of enabled phy lanes
+ * @hpd_work: hot plug detection worker
+ * @status: connection status
+ * @enabled: flag to indicate if the device is enabled
+ * @dpms: current dpms state
+ * @dpcd: DP configuration data from currently connected sink device
+ * @link_config: common link configuration between IP core and sink device
+ * @mode: current mode between IP core and sink device
+ * @train_set: set of training data
+ */
+struct zynqmp_dp {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct drm_property *sync_prop;
+ struct drm_property *bpc_prop;
+ struct device *dev;
+ struct zynqmp_dpsub *dpsub;
+ struct drm_device *drm;
+ void __iomem *iomem;
+ int irq;
+
+ struct zynqmp_dp_config config;
+ struct drm_dp_aux aux;
+ struct phy *phy[ZYNQMP_DP_MAX_LANES];
+ u8 num_lanes;
+ struct delayed_work hpd_work;
+ enum drm_connector_status status;
+ bool enabled;
+
+ int dpms;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct zynqmp_dp_link_config link_config;
+ struct zynqmp_dp_mode mode;
+ u8 train_set[ZYNQMP_DP_MAX_LANES];
+};
+
+static inline struct zynqmp_dp *encoder_to_dp(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct zynqmp_dp, encoder);
+}
+
+static inline struct zynqmp_dp *connector_to_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct zynqmp_dp, connector);
+}
+
+static void zynqmp_dp_write(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 zynqmp_dp_read(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static void zynqmp_dp_clr(void __iomem *base, int offset, u32 clr)
+{
+ zynqmp_dp_write(base, offset, zynqmp_dp_read(base, offset) & ~clr);
+}
+
+static void zynqmp_dp_set(void __iomem *base, int offset, u32 set)
+{
+ zynqmp_dp_write(base, offset, zynqmp_dp_read(base, offset) | set);
+}
+
+/*
+ * Internal functions: used by zynqmp_disp.c
+ */
+
+/**
+ * zynqmp_dp_update_bpp - Update the current bpp config
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the current bpp based on the color format: bpc & num_colors.
+ * Any function that changes bpc or num_colors should call this
+ * to keep the bpp value in sync.
+ */
+static void zynqmp_dp_update_bpp(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ config->bpp = dp->config.bpc * dp->config.num_colors;
+}
+
+/**
+ * zynqmp_dp_set_color - Set the color
+ * @dp: DisplayPort IP core structure
+ * @color: color string, from zynqmp_disp_color_enum
+ *
+ * Update misc register values based on @color string.
+ *
+ * Return: 0 on success, or -EINVAL.
+ */
+int zynqmp_dp_set_color(struct zynqmp_dp *dp, const char *color)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ config->misc0 &= ~ZYNQMP_DP_MISC0_FORMAT_MASK;
+ config->misc1 &= ~ZYNQMP_DP_MISC1_Y_ONLY;
+ if (strcmp(color, "rgb") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_RGB;
+ config->num_colors = 3;
+ } else if (strcmp(color, "ycrcb422") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_YCRCB_422;
+ config->num_colors = 2;
+ } else if (strcmp(color, "ycrcb444") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_YCRCB_444;
+ config->num_colors = 3;
+ } else if (strcmp(color, "yonly") == 0) {
+ config->misc1 |= ZYNQMP_DP_MISC1_Y_ONLY;
+ config->num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid colormetry in DT\n");
+ return -EINVAL;
+ }
+ zynqmp_dp_update_bpp(dp);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_enable_vblank - Enable vblank
+ * @dp: DisplayPort IP core structure
+ *
+ * Enable vblank interrupt
+ */
+void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_EN,
+ ZYNQMP_DP_TX_INTR_VBLANK_START);
+}
+
+/**
+ * zynqmp_dp_disable_vblank - Disable vblank
+ * @dp: DisplayPort IP core structure
+ *
+ * Disable vblank interrupt
+ */
+void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_INTR_VBLANK_START);
+}
+
+/*
+ * DP PHY functions
+ */
+
+/**
+ * zynqmp_dp_init_phy - Initialize the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the phy.
+ *
+ * Return: 0 if the phy instances are initialized correctly, or the error code
+ * returned from the callee functions.
+ * Note: We can call this function without any phy lane assigned to DP.
+ */
+static int zynqmp_dp_init_phy(struct zynqmp_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ ret = phy_init(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev, "failed to init phy lane %d\n", i);
+ return ret;
+ }
+ }
+ /* Wait for PLL to be locked for the primary (1st) lane */
+ if (dp->phy[0]) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_INTR_ALL);
+ zynqmp_dp_clr(dp->iomem, ZYNQMP_DP_TX_PHY_CONFIG,
+ ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET);
+ ret = xpsgtr_wait_pll_lock(dp->phy[0]);
+ if (ret) {
+ dev_err(dp->dev, "failed to lock pll\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_exit_phy - Exit the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Exit the phy.
+ */
+static void zynqmp_dp_exit_phy(struct zynqmp_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ ret = phy_exit(dp->phy[i]);
+ if (ret)
+ dev_err(dp->dev, "failed to exit phy(%d) %d\n", i, ret);
+ }
+}
+
+/**
+ * zynqmp_dp_phy_ready - Check if PHY is ready
+ * @dp: DisplayPort IP core structure
+ *
+ * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times.
+ * This amount of delay was suggested by IP designer.
+ *
+ * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready.
+ */
+static int zynqmp_dp_phy_ready(struct zynqmp_dp *dp)
+{
+ u32 i, reg, ready;
+
+ ready = (1 << dp->num_lanes) - 1;
+
+ /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */
+ for (i = 0; ; i++) {
+ reg = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_TX_PHY_STATUS);
+ if ((reg & ready) == ready)
+ return 0;
+
+ if (i == 100) {
+ dev_err(dp->dev, "PHY isn't ready\n");
+ return -ENODEV;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ return 0;
+}
+
+/*
+ * Power Management functions
+ */
+/**
+ * zynqmp_dp_pm_resume - Resume DP IP
+ * @dp: DisplayPort IP core structure
+ *
+ * Resume the DP IP including PHY and pipeline.
+ */
+void zynqmp_dp_pm_resume(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_init_phy(dp);
+}
+/**
+ * zynqmp_dp_pm_suspend - Suspend DP IP
+ * @dp: DisplayPort IP core structure
+ *
+ * Suspend the DP IP including PHY and pipeline.
+ */
+void zynqmp_dp_pm_suspend(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_exit_phy(dp);
+}
+/*
+ * DP functions
+ */
+
+/**
+ * zynqmp_dp_max_rate - Calculate and return available max pixel clock
+ * @link_rate: link rate (Kilo-bytes / sec)
+ * @lane_num: number of lanes
+ * @bpp: bits per pixel
+ *
+ * Return: max pixel clock (KHz) supported by current link config.
+ */
+static inline int zynqmp_dp_max_rate(int link_rate, u8 lane_num, u8 bpp)
+{
+ return link_rate * lane_num * 8 / bpp;
+}
+
+/**
+ * zynqmp_dp_mode_configure - Configure the link values
+ * @dp: DisplayPort IP core structure
+ * @pclock: pixel clock for requested display mode
+ * @current_bw: current link rate
+ *
+ * Find the link configuration values, rate and lane count for requested pixel
+ * clock @pclock. The @pclock is stored in the mode to be used in other
+ * functions later. The returned rate is downshifted from the current rate
+ * @current_bw.
+ *
+ * Return: Current link rate code, or -EINVAL.
+ */
+static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock,
+ u8 current_bw)
+{
+ int max_rate = dp->link_config.max_rate;
+ u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
+ u8 bpp = dp->config.bpp;
+ u8 lane_cnt;
+ s8 i;
+
+ if (current_bw == DP_LINK_BW_1_62) {
+ dev_err(dp->dev, "can't downshift. already lowest link rate\n");
+ return -EINVAL;
+ }
+
+ for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
+ if (current_bw && bws[i] >= current_bw)
+ continue;
+
+ if (bws[i] <= max_link_rate_code)
+ break;
+ }
+
+ for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
+ int bw;
+ u32 rate;
+
+ bw = drm_dp_bw_code_to_link_rate(bws[i]);
+ rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp);
+ if (pclock <= rate) {
+ dp->mode.bw_code = bws[i];
+ dp->mode.lane_cnt = lane_cnt;
+ dp->mode.pclock = pclock;
+ return dp->mode.bw_code;
+ }
+ }
+
+ dev_err(dp->dev, "failed to configure link values\n");
+
+ return -EINVAL;
+}
+
+/**
+ * zynqmp_dp_adjust_train - Adjust train values
+ * @dp: DisplayPort IP core structure
+ * @link_status: link status from sink which contains requested training values
+ */
+static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 *train_set = dp->train_set;
+ u8 voltage = 0, preemphasis = 0;
+ u8 i;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u8 v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 p = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+
+ if (v > voltage)
+ voltage = v;
+
+ if (p > preemphasis)
+ preemphasis = p;
+ }
+
+ if (voltage >= DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ voltage |= DP_TRAIN_MAX_SWING_REACHED;
+
+ if (preemphasis >= DP_TRAIN_PRE_EMPH_LEVEL_2)
+ preemphasis |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++)
+ train_set[i] = voltage | preemphasis;
+}
+
+/**
+ * zynqmp_dp_update_vs_emph - Update the training values
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the training values based on the request from sink. The mapped values
+ * are predefined, and values(vs, pe, pc) are from the device manual.
+ *
+ * Return: 0 if vs and emph are updated successfully, or the error code returned
+ * by drm_dp_dpcd_write().
+ */
+static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp)
+{
+ u8 *train_set = dp->train_set;
+ u8 i, v_level, p_level;
+ int ret;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set,
+ dp->mode.lane_cnt);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
+
+ v_level = (train_set[i] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p_level = (train_set[i] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ xpsgtr_margining_factor(dp->phy[i], p_level, v_level);
+ xpsgtr_override_deemph(dp->phy[i], p_level, v_level);
+ zynqmp_dp_write(dp->iomem, reg, 0x2);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train_cr - Train clock recovery
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if clock recovery train is done successfully, or corresponding
+ * error code.
+ */
+static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 vs = 0, tries = 0;
+ u16 max_tries, i;
+ bool cr_done;
+ int ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 256 loops should be maximum iterations for 4 lanes and 4 values.
+ * So, This loop should exit before 512 iterations
+ */
+ for (max_tries = 0; max_tries < 512; max_tries++) {
+ ret = zynqmp_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ cr_done = drm_dp_clock_recovery_ok(link_status, lane_cnt);
+ if (cr_done)
+ break;
+
+ for (i = 0; i < lane_cnt; i++)
+ if (!(dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED))
+ break;
+ if (i == lane_cnt)
+ break;
+
+ if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == vs)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == DP_MAX_TRAINING_TRIES)
+ break;
+
+ vs = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ zynqmp_dp_adjust_train(dp, link_status);
+ }
+
+ if (!cr_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train_ce - Train channel equalization
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if channel equalization train is done successfully, or
+ * corresponding error code.
+ */
+static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 pat, tries;
+ int ret;
+ bool ce_done;
+
+ if (dp->dpcd[DP_DPCD_REV] >= DP_V1_2 &&
+ dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED)
+ pat = DP_TRAINING_PATTERN_3;
+ else
+ pat = DP_TRAINING_PATTERN_2;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET, pat);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ pat | DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
+ ret = zynqmp_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(dp->dpcd);
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ ce_done = drm_dp_channel_eq_ok(link_status, lane_cnt);
+ if (ce_done)
+ break;
+
+ zynqmp_dp_adjust_train(dp, link_status);
+ }
+
+ if (!ce_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train - Train the link
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if all trains are done successfully, or corresponding error code.
+ */
+static int zynqmp_dp_train(struct zynqmp_dp *dp)
+{
+ u32 reg;
+ u8 bw_code = dp->mode.bw_code;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 aux_lane_cnt = lane_cnt;
+ bool enhanced;
+ int ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_LANE_CNT_SET, lane_cnt);
+ enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
+ if (enhanced) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENHANCED_FRAME_EN, 1);
+ aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+
+ if (dp->dpcd[3] & 0x1) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_DOWNSPREAD_CTL, 1);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ } else {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_DOWNSPREAD_CTL, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, 0);
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, aux_lane_cnt);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set lane count\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ DP_SET_ANSI_8B10B);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set ANSI 8B/10B encoding\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LINK_BW_SET, bw_code);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set DP bandwidth\n");
+ return ret;
+ }
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_LINK_BW_SET, bw_code);
+ switch (bw_code) {
+ case DP_LINK_BW_1_62:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162;
+ break;
+ case DP_LINK_BW_2_7:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270;
+ break;
+ case DP_LINK_BW_5_4:
+ default:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540;
+ break;
+ }
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING,
+ reg);
+ ret = zynqmp_dp_phy_ready(dp);
+ if (ret < 0)
+ return ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_SCRAMBLING_DISABLE, 1);
+ memset(dp->train_set, 0, 4);
+ ret = zynqmp_dp_link_train_cr(dp);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_dp_link_train_ce(dp);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to disable training pattern\n");
+ return ret;
+ }
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_SCRAMBLING_DISABLE, 0);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_train_loop - Downshift the link rate during training
+ * @dp: DisplayPort IP core structure
+ *
+ * Train the link by downshifting the link rate if training is not successful.
+ */
+static void zynqmp_dp_train_loop(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_mode *mode = &dp->mode;
+ u8 bw = mode->bw_code;
+ int ret;
+
+ do {
+ if (dp->status == connector_status_disconnected ||
+ !dp->enabled)
+ return;
+
+ ret = zynqmp_dp_train(dp);
+ if (!ret)
+ return;
+
+ ret = zynqmp_dp_mode_configure(dp, mode->pclock, bw);
+ if (ret < 0)
+ goto err_out;
+
+ bw = ret;
+ } while (bw >= DP_LINK_BW_1_62);
+
+err_out:
+ dev_err(dp->dev, "failed to train the DP link\n");
+}
+
+/*
+ * DP Aux functions
+ */
+
+#define AUX_READ_BIT 0x1
+
+/**
+ * zynqmp_dp_aux_cmd_submit - Submit aux command
+ * @dp: DisplayPort IP core structure
+ * @cmd: aux command
+ * @addr: aux address
+ * @buf: buffer for command data
+ * @bytes: number of bytes for @buf
+ * @reply: reply code to be returned
+ *
+ * Submit an aux command. All aux related commands, native or i2c aux
+ * read/write, are submitted through this function. The function is mapped to
+ * the transfer function of struct drm_dp_aux. This function involves in
+ * multiple register reads/writes, thus synchronization is needed, and it is
+ * done by drm_dp_helper using @hw_mutex. The calling thread goes into sleep
+ * if there's no immediate reply to the command submission. The reply code is
+ * returned at @reply if @reply != NULL.
+ *
+ * Return: 0 if the command is submitted properly, or corresponding error code:
+ * -EBUSY when there is any request already being processed
+ * -ETIMEDOUT when receiving reply is timed out
+ * -EIO when received bytes are less than requested
+ */
+static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr,
+ u8 *buf, u8 bytes, u8 *reply)
+{
+ bool is_read = (cmd & AUX_READ_BIT) ? true : false;
+ void __iomem *iomem = dp->iomem;
+ u32 reg, i;
+
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REQUEST)
+ return -EBUSY;
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_ADDRESS, addr);
+ if (!is_read)
+ for (i = 0; i < bytes; i++)
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_WRITE_FIFO,
+ buf[i]);
+
+ reg = cmd << ZYNQMP_DP_TX_AUX_COMMAND_CMD_SHIFT;
+ if (!buf || !bytes)
+ reg |= ZYNQMP_DP_TX_AUX_COMMAND_ADDRESS_ONLY;
+ else
+ reg |= (bytes - 1) << ZYNQMP_DP_TX_AUX_COMMAND_BYTES_SHIFT;
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_COMMAND, reg);
+
+ /* Wait for reply to be delivered upto 2ms */
+ for (i = 0; ; i++) {
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY)
+ break;
+
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT ||
+ i == 2)
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1100);
+ }
+
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_AUX_REPLY_CODE);
+ if (reply)
+ *reply = reg;
+
+ if (is_read &&
+ (reg == ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_ACK ||
+ reg == ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_ACK)) {
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_REPLY_DATA_CNT);
+ if ((reg & ZYNQMP_DP_TX_AUX_REPLY_CNT_MASK) != bytes)
+ return -EIO;
+
+ for (i = 0; i < bytes; i++) {
+ buf[i] = zynqmp_dp_read(iomem,
+ ZYNQMP_DP_TX_AUX_REPLY_DATA);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t
+zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct zynqmp_dp *dp = container_of(aux, struct zynqmp_dp, aux);
+ int ret;
+ unsigned int i, iter;
+
+ /* Number of loops = timeout in msec / aux delay (400 usec) */
+ iter = zynqmp_dp_aux_timeout_ms * 1000 / 400;
+ iter = iter ? iter : 1;
+
+ for (i = 0; i < iter; i++) {
+ ret = zynqmp_dp_aux_cmd_submit(dp, msg->request, msg->address,
+ msg->buffer, msg->size,
+ &msg->reply);
+ if (!ret) {
+ dev_dbg(dp->dev, "aux %d retries\n", i);
+ return msg->size;
+ }
+
+ if (dp->status == connector_status_disconnected) {
+ dev_dbg(dp->dev, "no connected aux device\n");
+ return -ENODEV;
+ }
+
+ usleep_range(400, 500);
+ }
+
+ dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
+
+ return ret;
+}
+
+/**
+ * zynqmp_dp_init_aux - Initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the DP aux. The aux clock is derived from the axi clock, so
+ * this function gets the axi clock frequency and calculates the filter
+ * value. Additionally, the interrupts and transmitter are enabled.
+ *
+ * Return: 0 on success, error value otherwise
+ */
+static int zynqmp_dp_init_aux(struct zynqmp_dp *dp)
+{
+ unsigned int rate;
+ u32 reg, w;
+
+ rate = zynqmp_disp_get_apb_clk_rate(dp->dpsub->disp);
+ if (rate < ZYNQMP_DP_TX_CLK_DIVIDER_MHZ) {
+ dev_err(dp->dev, "aclk should be higher than 1MHz\n");
+ return -EINVAL;
+ }
+
+ /* Allowable values for this register are: 8, 16, 24, 32, 40, 48 */
+ for (w = 8; w <= 48; w += 8) {
+ /* AUX pulse width should be between 0.4 to 0.6 usec */
+ if (w >= (4 * rate / 10000000) &&
+ w <= (6 * rate / 10000000))
+ break;
+ }
+
+ if (w > 48) {
+ dev_err(dp->dev, "aclk frequency too high\n");
+ return -EINVAL;
+ }
+ reg = w << ZYNQMP_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT;
+ reg |= rate / ZYNQMP_DP_TX_CLK_DIVIDER_MHZ;
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_CLK_DIVIDER, reg);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_EN,
+ ZYNQMP_DP_TX_INTR_ALL);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_NO_INTR_ALL);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 1);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_exit_aux - De-initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * De-initialize the DP aux. Disable all interrupts which are enabled
+ * through aux initialization, as well as the transmitter.
+ */
+static void zynqmp_dp_exit_aux(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS, 0xffffffff);
+}
+
+/*
+ * Generic DP functions
+ */
+
+/**
+ * zynqmp_dp_update_misc - Write the misc registers
+ * @dp: DisplayPort IP core structure
+ *
+ * The misc register values are stored in the structure, and this
+ * function applies the values into the registers.
+ */
+static void zynqmp_dp_update_misc(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MAIN_STREAM_MISC0,
+ dp->config.misc0);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MAIN_STREAM_MISC1,
+ dp->config.misc1);
+}
+
+/**
+ * zynqmp_dp_set_sync_mode - Set the sync mode bit in the software misc state
+ * @dp: DisplayPort IP core structure
+ * @mode: flag if the sync mode should be on or off
+ *
+ * Set the bit in software misc state. To apply to hardware,
+ * zynqmp_dp_update_misc() should be called.
+ */
+static void zynqmp_dp_set_sync_mode(struct zynqmp_dp *dp, bool mode)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ if (mode)
+ config->misc0 |= ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+ else
+ config->misc0 &= ~ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+}
+
+/**
+ * zynqmp_dp_get_sync_mode - Get the sync mode state
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: true if the sync mode is on, or false
+ */
+static bool zynqmp_dp_get_sync_mode(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ return !!(config->misc0 & ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC);
+}
+
+/**
+ * zynqmp_dp_set_bpc - Set bpc value in software misc state
+ * @dp: DisplayPort IP core structure
+ * @bpc: bits per component
+ *
+ * Return: 0 on success, or the fallback bpc value
+ */
+static u8 zynqmp_dp_set_bpc(struct zynqmp_dp *dp, u8 bpc)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+ u8 ret = 0;
+
+ if (dp->connector.display_info.bpc &&
+ dp->connector.display_info.bpc != bpc) {
+ dev_err(dp->dev, "requested bpc (%u) != display info (%u)\n",
+ bpc, dp->connector.display_info.bpc);
+ bpc = dp->connector.display_info.bpc;
+ }
+
+ config->misc0 &= ~ZYNQMP_DP_MISC0_BPC_MASK;
+ switch (bpc) {
+ case 6:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_6;
+ break;
+ case 8:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_8;
+ break;
+ case 10:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_10;
+ break;
+ case 12:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_12;
+ break;
+ case 16:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_16;
+ break;
+ default:
+ dev_err(dp->dev, "Not supported bpc (%u). fall back to 8bpc\n",
+ bpc);
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_8;
+ ret = 8;
+ break;
+ }
+ config->bpc = bpc;
+ zynqmp_dp_update_bpp(dp);
+
+ return ret;
+}
+
+/**
+ * zynqmp_dp_get_bpc - Set bpc value from software state
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: current bpc value
+ */
+static u8 zynqmp_dp_get_bpc(struct zynqmp_dp *dp)
+{
+ return dp->config.bpc;
+}
+
+/**
+ * zynqmp_dp_encoder_mode_set_transfer_unit - Set the transfer unit values
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Set the transfer unit, and caculate all transfer unit size related values.
+ * Calculation is based on DP and IP core specification.
+ */
+static void
+zynqmp_dp_encoder_mode_set_transfer_unit(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u32 tu = ZYNQMP_DP_TX_DEF_TRANSFER_UNIT_SIZE;
+ u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
+
+ /* Use the max transfer unit size (default) */
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRANSFER_UNIT_SIZE, tu);
+
+ vid_kbytes = mode->clock * (dp->config.bpp / 8);
+ bw = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ avg_bytes_per_tu = vid_kbytes * tu / (dp->mode.lane_cnt * bw / 1000);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MIN_BYTES_PER_TU,
+ avg_bytes_per_tu / 1000);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_FRAC_BYTES_PER_TU,
+ avg_bytes_per_tu % 1000);
+
+ /* Configure the initial wait cycle based on transfer unit size */
+ if (tu < (avg_bytes_per_tu / 1000))
+ init_wait = 0;
+ else if ((avg_bytes_per_tu / 1000) <= 4)
+ init_wait = tu;
+ else
+ init_wait = tu - avg_bytes_per_tu / 1000;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_INIT_WAIT, init_wait);
+}
+
+/**
+ * zynqmp_dp_encoder_mode_set_stream - Configure the main stream
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Configure the main stream based on the requested mode @mode. Calculation is
+ * based on IP core specification.
+ */
+void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode)
+{
+ void __iomem *iomem = dp->iomem;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 reg, wpl;
+ unsigned int rate;
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HTOTAL, mode->htotal);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VTOTAL, mode->vtotal);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_POLARITY,
+ (!!(mode->flags & DRM_MODE_FLAG_PVSYNC) <<
+ ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT) |
+ (!!(mode->flags & DRM_MODE_FLAG_PHSYNC) <<
+ ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT));
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HSWIDTH,
+ mode->hsync_end - mode->hsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VSWIDTH,
+ mode->vsync_end - mode->vsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HRES, mode->hdisplay);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VRES, mode->vdisplay);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HSTART,
+ mode->htotal - mode->hsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VSTART,
+ mode->vtotal - mode->vsync_start);
+
+ /* In synchronous mode, set the diviers */
+ if (dp->config.misc0 & ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC) {
+ reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_N_VID, reg);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_M_VID, mode->clock);
+ rate = zynqmp_disp_get_aud_clk_rate(dp->dpsub->disp);
+ if (rate) {
+ dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_N_AUD, reg);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_M_AUD,
+ rate / 1000);
+ }
+ }
+
+ /* Only 2 channel audio is supported now */
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_USER_PIXEL_WIDTH, 1);
+
+ /* Translate to the native 16 bit datapath based on IP core spec */
+ wpl = (mode->hdisplay * dp->config.bpp + 15) / 16;
+ reg = wpl + wpl % lane_cnt - lane_cnt;
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_USER_DATA_CNT_PER_LANE, reg);
+}
+
+/*
+ * DRM connector functions
+ */
+
+static enum drm_connector_status
+zynqmp_dp_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct zynqmp_dp_link_config *link_config = &dp->link_config;
+ u32 state, i;
+ int ret;
+
+ /*
+ * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
+ * get the HPD signal with some monitors.
+ */
+ for (i = 0; i < 10; i++) {
+ state = zynqmp_dp_read(dp->iomem,
+ ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (state & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD)
+ break;
+ msleep(100);
+ }
+
+ if (state & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD) {
+ dp->status = connector_status_connected;
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read first try fails");
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read retry fails");
+ goto disconnected;
+ }
+ }
+
+ link_config->max_rate = min_t(int,
+ drm_dp_max_link_rate(dp->dpcd),
+ DP_HIGH_BIT_RATE2);
+ link_config->max_lanes = min_t(u8,
+ drm_dp_max_lane_count(dp->dpcd),
+ dp->num_lanes);
+
+ return connector_status_connected;
+ }
+
+disconnected:
+ dp->status = connector_status_disconnected;
+ return connector_status_disconnected;
+}
+
+static int zynqmp_dp_connector_get_modes(struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &dp->aux.ddc);
+ if (!edid)
+ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static struct drm_encoder *
+zynqmp_dp_connector_best_encoder(struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+
+ return &dp->encoder;
+}
+
+static int zynqmp_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ int max_rate = dp->link_config.max_rate;
+ int rate;
+
+ if (mode->clock > ZYNQMP_MAX_FREQ) {
+ dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ return MODE_CLOCK_HIGH;
+ }
+
+ /* Check with link rate and lane count */
+ rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate) {
+ dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static void zynqmp_dp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int
+zynqmp_dp_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ int ret;
+
+ if (property == dp->sync_prop) {
+ zynqmp_dp_set_sync_mode(dp, val);
+ } else if (property == dp->bpc_prop) {
+ u8 bpc;
+
+ bpc = zynqmp_dp_set_bpc(dp, val);
+ if (bpc) {
+ drm_object_property_set_value(&connector->base,
+ property, bpc);
+ ret = -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+zynqmp_dp_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+
+ if (property == dp->sync_prop)
+ *val = zynqmp_dp_get_sync_mode(dp);
+ else if (property == dp->bpc_prop)
+ *val = zynqmp_dp_get_bpc(dp);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct drm_connector_funcs zynqmp_dp_connector_funcs = {
+ .detect = zynqmp_dp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = zynqmp_dp_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_set_property = zynqmp_dp_connector_atomic_set_property,
+ .atomic_get_property = zynqmp_dp_connector_atomic_get_property,
+};
+
+static struct drm_connector_helper_funcs zynqmp_dp_connector_helper_funcs = {
+ .get_modes = zynqmp_dp_connector_get_modes,
+ .best_encoder = zynqmp_dp_connector_best_encoder,
+ .mode_valid = zynqmp_dp_connector_mode_valid,
+};
+
+/*
+ * DRM encoder functions
+ */
+
+static void zynqmp_dp_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+ unsigned int i;
+ int ret = 0;
+
+ pm_runtime_get_sync(dp->dev);
+ dp->enabled = true;
+ zynqmp_dp_init_aux(dp);
+ zynqmp_dp_update_misc(dp);
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
+ if (dp->status == connector_status_connected) {
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ usleep_range(300, 500);
+ }
+ /* Some monitors take time to wake up properly */
+ msleep(zynqmp_dp_power_on_delay_ms);
+ }
+ if (ret != 1)
+ dev_dbg(dp->dev, "DP aux failed\n");
+ else
+ zynqmp_dp_train_loop(dp);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_SW_RESET,
+ ZYNQMP_DP_TX_SW_RESET_ALL);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_ENABLE_MAIN_STREAM, 1);
+}
+
+static void zynqmp_dp_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+
+ dp->enabled = false;
+ cancel_delayed_work(&dp->hpd_work);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_ENABLE_MAIN_STREAM, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN,
+ ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
+ pm_runtime_put_sync(dp->dev);
+}
+
+static void
+zynqmp_dp_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ int rate, max_rate = dp->link_config.max_rate;
+ int ret;
+
+ /* Check again as bpp or format might have been chagned */
+ rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate) {
+ dev_err(dp->dev, "the mode, %s,has too high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ ret = zynqmp_dp_mode_configure(dp, adjusted_mode->clock, 0);
+ if (ret < 0)
+ return;
+
+ zynqmp_dp_encoder_mode_set_transfer_unit(dp, adjusted_mode);
+}
+
+#define ZYNQMP_DP_MIN_H_BACKPORCH 20
+
+static int
+zynqmp_dp_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ int diff = mode->htotal - mode->hsync_end;
+
+ /*
+ * ZynqMP DP requires horizontal backporch to be greater than 12.
+ * This limitation may not be compatible with the sink device.
+ */
+ if (diff < ZYNQMP_DP_MIN_H_BACKPORCH) {
+ int vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+
+ dev_dbg(encoder->dev->dev, "hbackporch adjusted: %d to %d",
+ diff, ZYNQMP_DP_MIN_H_BACKPORCH - diff);
+ diff = ZYNQMP_DP_MIN_H_BACKPORCH - diff;
+ adjusted_mode->htotal += diff;
+ adjusted_mode->clock = adjusted_mode->vtotal *
+ adjusted_mode->htotal * vrefresh / 1000;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs zynqmp_dp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_encoder_helper_funcs zynqmp_dp_encoder_helper_funcs = {
+ .enable = zynqmp_dp_encoder_enable,
+ .disable = zynqmp_dp_encoder_disable,
+ .atomic_mode_set = zynqmp_dp_encoder_atomic_mode_set,
+ .atomic_check = zynqmp_dp_encoder_atomic_check,
+};
+
+/*
+ * Component functions
+ */
+
+static void zynqmp_dp_hpd_work_func(struct work_struct *work)
+{
+ struct zynqmp_dp *dp;
+
+ dp = container_of(work, struct zynqmp_dp, hpd_work.work);
+
+ if (dp->drm)
+ drm_helper_hpd_irq_event(dp->drm);
+}
+
+static struct drm_prop_enum_list zynqmp_dp_bpc_enum[] = {
+ { 6, "6BPC" },
+ { 8, "8BPC" },
+ { 10, "10BPC" },
+ { 12, "12BPC" },
+};
+
+int zynqmp_dp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_dp *dp = dpsub->dp;
+ struct drm_encoder *encoder = &dp->encoder;
+ struct drm_connector *connector = &dp->connector;
+ struct drm_device *drm = data;
+ struct device_node *port;
+ int ret;
+
+ if (!dp->num_lanes)
+ return 0;
+
+ encoder->possible_crtcs |= zynqmp_disp_get_crtc_mask(dpsub->disp);
+ for_each_child_of_node(dev->of_node, port) {
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+ encoder->possible_crtcs |= drm_of_find_possible_crtcs(drm,
+ port);
+ }
+ drm_encoder_init(drm, encoder, &zynqmp_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &zynqmp_dp_encoder_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(encoder->dev, connector,
+ &zynqmp_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize the drm connector");
+ goto error_encoder;
+ }
+
+ drm_connector_helper_add(connector, &zynqmp_dp_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ connector->dpms = DRM_MODE_DPMS_OFF;
+
+ dp->drm = drm;
+ dp->sync_prop = drm_property_create_bool(drm, 0, "sync");
+ dp->bpc_prop = drm_property_create_enum(drm, 0, "bpc",
+ zynqmp_dp_bpc_enum,
+ ARRAY_SIZE(zynqmp_dp_bpc_enum));
+
+ dp->config.misc0 &= ~ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+ drm_object_attach_property(&connector->base, dp->sync_prop, false);
+ ret = zynqmp_dp_set_bpc(dp, 8);
+ drm_object_attach_property(&connector->base, dp->bpc_prop,
+ ret ? ret : 8);
+ zynqmp_dp_update_bpp(dp);
+
+ INIT_DELAYED_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
+
+ /* This enables interrupts, so should be called after DRM init */
+ ret = zynqmp_dp_init_aux(dp);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize DP aux");
+ goto error_prop;
+ }
+
+ return 0;
+
+error_prop:
+ drm_property_destroy(dp->drm, dp->bpc_prop);
+ drm_property_destroy(dp->drm, dp->sync_prop);
+ zynqmp_dp_connector_destroy(&dp->connector);
+error_encoder:
+ drm_encoder_cleanup(&dp->encoder);
+ return ret;
+}
+
+void zynqmp_dp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_dp *dp = dpsub->dp;
+
+ disable_irq(dp->irq);
+ if (!dp->num_lanes)
+ return;
+
+ cancel_delayed_work_sync(&dp->hpd_work);
+ zynqmp_dp_exit_aux(dp);
+ drm_property_destroy(dp->drm, dp->bpc_prop);
+ drm_property_destroy(dp->drm, dp->sync_prop);
+ zynqmp_dp_connector_destroy(&dp->connector);
+ drm_encoder_cleanup(&dp->encoder);
+}
+
+/*
+ * Platform functions
+ */
+
+static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
+{
+ struct zynqmp_dp *dp = (struct zynqmp_dp *)data;
+ u32 status, mask;
+
+ status = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_STATUS);
+ mask = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_MASK);
+ if (!(status & ~mask))
+ return IRQ_NONE;
+
+ /* dbg for diagnostic, but not much that the driver can do */
+ if (status & ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK)
+ dev_dbg_ratelimited(dp->dev, "underflow interrupt\n");
+ if (status & ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+ dev_dbg_ratelimited(dp->dev, "overflow interrupt\n");
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_STATUS, status);
+
+ /* The DP vblank will not be enabled with remote crtc device */
+ if (status & ZYNQMP_DP_TX_INTR_VBLANK_START)
+ zynqmp_disp_handle_vblank(dp->dpsub->disp);
+
+ if (status & ZYNQMP_DP_TX_INTR_HPD_EVENT)
+ schedule_delayed_work(&dp->hpd_work, 0);
+
+ if (status & ZYNQMP_DP_TX_INTR_HPD_IRQ) {
+ int ret;
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (ret < 0)
+ goto handled;
+
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
+ zynqmp_dp_train_loop(dp);
+ }
+ }
+
+handled:
+ return IRQ_HANDLED;
+}
+
+int zynqmp_dp_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ struct zynqmp_dp *dp;
+ struct resource *res;
+ unsigned int i;
+ int irq, ret;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dpms = DRM_MODE_DPMS_OFF;
+ dp->status = connector_status_disconnected;
+ dp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
+ dp->iomem = devm_ioremap_resource(dp->dev, res);
+ if (IS_ERR(dp->iomem))
+ return PTR_ERR(dp->iomem);
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN,
+ ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
+ zynqmp_dp_set(dp->iomem, ZYNQMP_DP_TX_PHY_CONFIG,
+ ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_FORCE_SCRAMBLER_RESET, 1);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+
+ dp->num_lanes = 2;
+ for (i = 0; i < ZYNQMP_DP_MAX_LANES; i++) {
+ char phy_name[16];
+
+ snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
+ dp->phy[i] = devm_phy_get(dp->dev, phy_name);
+ if (IS_ERR(dp->phy[i])) {
+ ret = PTR_ERR(dp->phy[i]);
+ dp->phy[i] = NULL;
+
+ /* 2nd lane is optional */
+ if (i == 1 && ret == -ENODEV) {
+ dp->num_lanes = 1;
+ break;
+ }
+
+ /*
+ * If no phy lane is assigned, the DP Tx gets disabled.
+ * The display part of the DP subsystem can still be
+ * used to drive the output to FPGA, thus let the DP
+ * subsystem driver to proceed without this DP Tx.
+ */
+ if (i == 0 && ret == -ENODEV) {
+ dp->num_lanes = 0;
+ goto out;
+ }
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dp->dev, "failed to get phy lane\n");
+
+ return ret;
+ }
+ }
+
+ ret = zynqmp_dp_init_phy(dp);
+ if (ret)
+ goto error_phy;
+
+ dp->aux.name = "ZynqMP DP AUX";
+ dp->aux.dev = dp->dev;
+ dp->aux.transfer = zynqmp_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to initialize DP aux\n");
+ goto error;
+ }
+
+out:
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error;
+ }
+
+ ret = devm_request_threaded_irq(dp->dev, irq, NULL,
+ zynqmp_dp_irq_handler, IRQF_ONESHOT,
+ dev_name(dp->dev), dp);
+ if (ret < 0)
+ goto error;
+ dp->irq = irq;
+
+ dpsub = platform_get_drvdata(pdev);
+ dpsub->dp = dp;
+ dp->dpsub = dpsub;
+
+ dev_dbg(dp->dev,
+ "ZynqMP DisplayPort Tx driver probed with %u phy lanes\n",
+ dp->num_lanes);
+
+ return 0;
+
+error:
+ drm_dp_aux_unregister(&dp->aux);
+error_phy:
+ zynqmp_dp_exit_phy(dp);
+ return ret;
+}
+
+int zynqmp_dp_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ struct zynqmp_dp *dp = dpsub->dp;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+ drm_dp_aux_unregister(&dp->aux);
+ zynqmp_dp_exit_phy(dp);
+ dpsub->dp = NULL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.h b/drivers/gpu/drm/xlnx/zynqmp_dp.h
new file mode 100644
index 000000000000..2f6ce3f3e8cf
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DP_H_
+#define _ZYNQMP_DP_H_
+
+struct zynqmp_dp;
+struct drm_display_mode;
+
+const int zynqmp_dp_set_color(struct zynqmp_dp *dp, const char *color);
+void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp);
+void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp);
+void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode);
+void __maybe_unused zynqmp_dp_pm_suspend(struct zynqmp_dp *dp);
+void __maybe_unused zynqmp_dp_pm_resume(struct zynqmp_dp *dp);
+int zynqmp_dp_bind(struct device *dev, struct device *master, void *data);
+void zynqmp_dp_unbind(struct device *dev, struct device *master, void *data);
+
+int zynqmp_dp_probe(struct platform_device *pdev);
+int zynqmp_dp_remove(struct platform_device *pdev);
+
+#endif /* _ZYNQMP_DP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
new file mode 100644
index 000000000000..9b3545348f7b
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DP Subsystem Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "xlnx_drv.h"
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+static int
+zynqmp_dpsub_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+
+ ret = zynqmp_disp_bind(dev, master, data);
+ if (ret)
+ return ret;
+
+ /* zynqmp_disp should bind first, so zynqmp_dp encoder can find crtc */
+ ret = zynqmp_dp_bind(dev, master, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+zynqmp_dpsub_unbind(struct device *dev, struct device *master, void *data)
+{
+ zynqmp_dp_unbind(dev, master, data);
+ zynqmp_disp_unbind(dev, master, data);
+}
+
+static const struct component_ops zynqmp_dpsub_component_ops = {
+ .bind = zynqmp_dpsub_bind,
+ .unbind = zynqmp_dpsub_unbind,
+};
+
+static int zynqmp_dpsub_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ int ret;
+
+ dpsub = devm_kzalloc(&pdev->dev, sizeof(*dpsub), GFP_KERNEL);
+ if (!dpsub)
+ return -ENOMEM;
+
+ /* Sub-driver will access dpsub from drvdata */
+ platform_set_drvdata(pdev, dpsub);
+ pm_runtime_enable(&pdev->dev);
+
+ /*
+ * DP should be probed first so that the zynqmp_disp can set the output
+ * format accordingly.
+ */
+ ret = zynqmp_dp_probe(pdev);
+ if (ret)
+ goto err_pm;
+
+ ret = zynqmp_disp_probe(pdev);
+ if (ret)
+ goto err_dp;
+
+ ret = component_add(&pdev->dev, &zynqmp_dpsub_component_ops);
+ if (ret)
+ goto err_disp;
+
+ /* Try the reserved memory. Proceed if there's none */
+ of_reserved_mem_device_init(&pdev->dev);
+
+ /* Populate the sound child nodes */
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to populate child nodes\n");
+ goto err_rmem;
+ }
+
+ dpsub->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(dpsub->master)) {
+ dev_err(&pdev->dev, "failed to initialize the drm pipeline\n");
+ goto err_populate;
+ }
+
+ dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
+
+ return 0;
+
+err_populate:
+ of_platform_depopulate(&pdev->dev);
+err_rmem:
+ of_reserved_mem_device_release(&pdev->dev);
+ component_del(&pdev->dev, &zynqmp_dpsub_component_ops);
+err_disp:
+ zynqmp_disp_remove(pdev);
+err_dp:
+ zynqmp_dp_remove(pdev);
+err_pm:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int zynqmp_dpsub_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ int err, ret = 0;
+
+ xlnx_drm_pipeline_exit(dpsub->master);
+ of_platform_depopulate(&pdev->dev);
+ of_reserved_mem_device_release(&pdev->dev);
+ component_del(&pdev->dev, &zynqmp_dpsub_component_ops);
+
+ err = zynqmp_disp_remove(pdev);
+ if (err)
+ ret = -EIO;
+
+ err = zynqmp_dp_remove(pdev);
+ if (err)
+ ret = -EIO;
+
+ pm_runtime_disable(&pdev->dev);
+
+ return err;
+}
+
+static int __maybe_unused zynqmp_dpsub_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+
+ zynqmp_dp_pm_suspend(dpsub->dp);
+
+ return 0;
+}
+
+static int __maybe_unused zynqmp_dpsub_pm_resume(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+
+ zynqmp_dp_pm_resume(dpsub->dp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dpsub_pm_suspend,
+ zynqmp_dpsub_pm_resume)
+};
+
+static const struct of_device_id zynqmp_dpsub_of_match[] = {
+ { .compatible = "xlnx,zynqmp-dpsub-1.7", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
+
+static struct platform_driver zynqmp_dpsub_driver = {
+ .probe = zynqmp_dpsub_probe,
+ .remove = zynqmp_dpsub_remove,
+ .driver = {
+ .name = "zynqmp-display",
+ .of_match_table = zynqmp_dpsub_of_match,
+ .pm = &zynqmp_dpsub_pm_ops,
+ },
+};
+
+module_platform_driver(zynqmp_dpsub_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
new file mode 100644
index 000000000000..6606beffee15
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DPSUB Subsystem Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DPSUB_H_
+#define _ZYNQMP_DPSUB_H_
+
+struct zynqmp_dpsub {
+ struct zynqmp_dp *dp;
+ struct zynqmp_disp *disp;
+ struct platform_device *master;
+};
+
+#endif /* _ZYNQMP_DPSUB_H_ */
diff --git a/drivers/gpu/drm/zocl/Kconfig b/drivers/gpu/drm/zocl/Kconfig
new file mode 100644
index 000000000000..6a54d01cccd1
--- /dev/null
+++ b/drivers/gpu/drm/zocl/Kconfig
@@ -0,0 +1,8 @@
+config DRM_ZOCL
+ tristate "Xilinx Zynq OpenCL"
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Xilinx Zynq OpenCL Manager
diff --git a/drivers/gpu/drm/zocl/Makefile b/drivers/gpu/drm/zocl/Makefile
new file mode 100644
index 000000000000..da58e5084f9d
--- /dev/null
+++ b/drivers/gpu/drm/zocl/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+zocl-y := zocl_drv.o zocl_bo.o
+
+obj-$(CONFIG_DRM_ZOCL) += zocl.o
diff --git a/drivers/gpu/drm/zocl/zocl_bo.c b/drivers/gpu/drm/zocl/zocl_bo.c
new file mode 100644
index 000000000000..123a37842ad4
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_bo.c
@@ -0,0 +1,271 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include "zocl_drv.h"
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/zocl_ioctl.h>
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+void zocl_describe(const struct drm_zocl_bo *obj)
+{
+ size_t size_in_kb = obj->base.base.size / 1024;
+ size_t physical_addr = obj->base.paddr;
+
+ DRM_INFO("%p: H[0x%zxKB] D[0x%zx]\n",
+ obj,
+ size_in_kb,
+ physical_addr);
+}
+
+static struct drm_zocl_bo *zocl_create_bo(struct drm_device *dev,
+ uint64_t unaligned_size)
+{
+ size_t size = PAGE_ALIGN(unaligned_size);
+ struct drm_gem_cma_object *cma_obj;
+
+ DRM_DEBUG("%s:%s:%d: %zd\n", __FILE__, __func__, __LINE__, size);
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ cma_obj = drm_gem_cma_create(dev, size);
+ if (IS_ERR(cma_obj))
+ return ERR_PTR(-ENOMEM);
+
+ return to_zocl_bo(&cma_obj->base);
+}
+
+int zocl_create_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ int ret;
+ struct drm_zocl_create_bo *args = data;
+ struct drm_zocl_bo *bo;
+
+ if (((args->flags & DRM_ZOCL_BO_FLAGS_COHERENT) == 0) ||
+ ((args->flags & DRM_ZOCL_BO_FLAGS_CMA) == 0))
+ return -EINVAL;
+
+ bo = zocl_create_bo(dev, args->size);
+ bo->flags |= DRM_ZOCL_BO_FLAGS_COHERENT;
+ bo->flags |= DRM_ZOCL_BO_FLAGS_CMA;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, bo);
+
+ if (IS_ERR(bo)) {
+ DRM_DEBUG("object creation failed\n");
+ return PTR_ERR(bo);
+ }
+ ret = drm_gem_handle_create(filp, &bo->base.base, &args->handle);
+ if (ret) {
+ drm_gem_cma_free_object(&bo->base.base);
+ DRM_DEBUG("handle creation failed\n");
+ return ret;
+ }
+
+ zocl_describe(bo);
+ drm_gem_object_put_unlocked(&bo->base.base);
+
+ return ret;
+}
+
+int zocl_map_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ struct drm_zocl_map_bo *args = data;
+ struct drm_gem_object *gem_obj;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+ gem_obj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ /* The mmap offset was set up at BO allocation time. */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ zocl_describe(to_zocl_bo(gem_obj));
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return 0;
+}
+
+int zocl_sync_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_sync_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ void *kaddr;
+ int ret = 0;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size) ||
+ ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+
+ /* only invalidate the range of addresses requested by the user */
+ kaddr += args->offset;
+
+ if (args->dir == DRM_ZOCL_SYNC_BO_TO_DEVICE)
+ flush_kernel_vmap_range(kaddr, args->size);
+ else if (args->dir == DRM_ZOCL_SYNC_BO_FROM_DEVICE)
+ invalidate_kernel_vmap_range(kaddr, args->size);
+ else
+ ret = -EINVAL;
+
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+int zocl_info_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_bo *bo;
+ struct drm_zocl_info_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ bo = to_zocl_bo(gem_obj);
+
+ args->size = bo->base.base.size;
+ args->paddr = bo->base.paddr;
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return 0;
+}
+
+int zocl_pwrite_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_pwrite_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret = 0;
+ void *kaddr;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size)
+ || ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!access_ok(user_data, args->size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+ kaddr += args->offset;
+
+ ret = copy_from_user(kaddr, user_data, args->size);
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+int zocl_pread_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_pread_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret = 0;
+ void *kaddr;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size)
+ || ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!access_ok(user_data, args->size)) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+ kaddr += args->offset;
+
+ ret = copy_to_user(user_data, kaddr, args->size);
+
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/zocl/zocl_drv.c b/drivers/gpu/drm/zocl/zocl_drv.c
new file mode 100644
index 000000000000..c14cce5b245c
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_drv.c
@@ -0,0 +1,216 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include "zocl_drv.h"
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/zocl_ioctl.h>
+
+#define ZOCL_DRIVER_NAME "zocl"
+#define ZOCL_DRIVER_DESC "Zynq BO manager"
+#define ZOCL_DRIVER_DATE "20161024"
+#define ZOCL_DRIVER_MAJOR 2016
+#define ZOCL_DRIVER_MINOR 3
+#define ZOCL_DRIVER_PATCHLEVEL 1
+#define ZOCL_FILE_PAGE_OFFSET 0x00100000
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+static const struct vm_operations_struct reg_physical_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+static int zocl_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ struct drm_zocl_dev *zdev;
+ void __iomem *map;
+
+ pdev = to_platform_device(drm->dev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ map = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(map)) {
+ DRM_ERROR("Failed to map registers: %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ zdev = devm_kzalloc(drm->dev, sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
+
+ zdev->ddev = drm;
+ drm->dev_private = zdev;
+ zdev->regs = map;
+ zdev->res_start = res->start;
+ zdev->res_len = resource_size(res);
+ platform_set_drvdata(pdev, zdev);
+
+ return 0;
+}
+
+static int zocl_drm_unload(struct drm_device *drm)
+{
+ return 0;
+}
+
+static void zocl_free_object(struct drm_gem_object *obj)
+{
+ struct drm_zocl_bo *zocl_obj = to_zocl_bo(obj);
+
+ DRM_INFO("Freeing BO\n");
+ zocl_describe(zocl_obj);
+ drm_gem_cma_free_object(obj);
+}
+
+static int zocl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_zocl_dev *zdev = dev->dev_private;
+ unsigned long vsize;
+ int rc;
+
+ /* If the page offset is > than 4G, then let GEM handle that and do what
+ * it thinks is best,we will only handle page offsets less than 4G.
+ */
+ if (likely(vma->vm_pgoff >= ZOCL_FILE_PAGE_OFFSET))
+ return drm_gem_cma_mmap(filp, vma);
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize > zdev->res_len)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ vma->vm_ops = &reg_physical_vm_ops;
+ rc = io_remap_pfn_range(vma, vma->vm_start,
+ zdev->res_start >> PAGE_SHIFT,
+ vsize, vma->vm_page_prot);
+
+ return rc;
+}
+
+static const struct drm_ioctl_desc zocl_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(ZOCL_CREATE_BO, zocl_create_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_MAP_BO, zocl_map_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_SYNC_BO, zocl_sync_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_INFO_BO, zocl_info_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_PWRITE_BO, zocl_pwrite_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_PREAD_BO, zocl_pread_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations zocl_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = zocl_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+};
+
+static struct drm_driver zocl_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_RENDER,
+ .load = zocl_drm_load,
+ .unload = zocl_drm_unload,
+ .gem_free_object = zocl_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .ioctls = zocl_ioctls,
+ .num_ioctls = ARRAY_SIZE(zocl_ioctls),
+ .fops = &zocl_driver_fops,
+ .name = ZOCL_DRIVER_NAME,
+ .desc = ZOCL_DRIVER_DESC,
+ .date = ZOCL_DRIVER_DATE,
+ .major = ZOCL_DRIVER_MAJOR,
+ .minor = ZOCL_DRIVER_MINOR,
+ .patchlevel = ZOCL_DRIVER_PATCHLEVEL,
+};
+
+/* init xilinx opencl drm platform */
+static int zocl_drm_platform_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&zocl_driver, pdev);
+}
+
+/* exit xilinx opencl drm platform */
+static int zocl_drm_platform_remove(struct platform_device *pdev)
+{
+ struct drm_zocl_dev *zdev = platform_get_drvdata(pdev);
+
+ if (zdev->ddev) {
+ drm_dev_unregister(zdev->ddev);
+ drm_dev_put(zdev->ddev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id zocl_drm_of_match[] = {
+ { .compatible = "xlnx,zocl", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, zocl_drm_of_match);
+
+static struct platform_driver zocl_drm_private_driver = {
+ .probe = zocl_drm_platform_probe,
+ .remove = zocl_drm_platform_remove,
+ .driver = {
+ .name = "zocl-drm",
+ .of_match_table = zocl_drm_of_match,
+ },
+};
+
+module_platform_driver(zocl_drm_private_driver);
+
+MODULE_VERSION(__stringify(ZOCL_DRIVER_MAJOR) "."
+ __stringify(ZOCL_DRIVER_MINOR) "."
+ __stringify(ZOCL_DRIVER_PATCHLEVEL));
+
+MODULE_DESCRIPTION(ZOCL_DRIVER_DESC);
+MODULE_AUTHOR("Sonal Santan <sonal.santan@xilinx.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/zocl/zocl_drv.h b/drivers/gpu/drm/zocl/zocl_drv.h
new file mode 100644
index 000000000000..ef6a9acadfc1
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_drv.h
@@ -0,0 +1,59 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZOCL_DRV_H_
+#define _ZOCL_DRV_H_
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_gem_cma_helper.h>
+
+struct drm_zocl_bo {
+ struct drm_gem_cma_object base;
+ uint32_t flags;
+};
+
+struct drm_zocl_dev {
+ struct drm_device *ddev;
+ void __iomem *regs;
+ phys_addr_t res_start;
+ resource_size_t res_len;
+ unsigned int irq;
+};
+
+static inline struct drm_zocl_bo *to_zocl_bo(struct drm_gem_object *bo)
+{
+ return (struct drm_zocl_bo *) bo;
+}
+
+int zocl_create_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_sync_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_map_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_info_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_pwrite_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_pread_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+void zocl_describe(const struct drm_zocl_bo *obj);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index dd39d67ccec3..8cf35b2eff3d 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -106,9 +106,6 @@ static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
#if HOST1X_HW >= 6
struct host1x *host = sp->host;
- if (!host->hv_regs)
- return;
-
host1x_sync_writel(host,
HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index e00809d996a2..762d349ad00f 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -206,9 +206,9 @@ static int tegra_mipi_power_down(struct tegra_mipi *mipi)
return 0;
}
-struct tegra_mipi_device *tegra_mipi_request(struct device *device)
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+ struct device_node *np)
{
- struct device_node *np = device->of_node;
struct tegra_mipi_device *dev;
struct of_phandle_args args;
int err;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index b3dae9ec1a38..528812bf84da 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1235,6 +1235,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
pdev = platform_device_alloc(reg->name, id++);
if (!pdev) {
ret = -ENOMEM;
+ of_node_put(of_node);
goto err_register;
}
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 2477b2a3f7c3..464a48906d01 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -832,6 +832,8 @@ static const struct hid_device_id alps_id[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+ USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
{ }
};
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index efce31d035ef..05d79b01d459 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -51,6 +51,12 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
"(For people who want to keep Windows PC keyboard muscle memory. "
"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+static unsigned int swap_fn_leftctrl;
+module_param(swap_fn_leftctrl, uint, 0644);
+MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
+ "(For people who want to keep PC keyboard muscle memory. "
+ "[0] = as-is, Mac layout, 1 = swapped, PC layout)");
+
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
@@ -64,6 +70,28 @@ struct apple_key_translation {
u8 flags;
};
+static const struct apple_key_translation apple2021_fn_keys[] = {
+ { KEY_BACKSPACE, KEY_DELETE },
+ { KEY_ENTER, KEY_INSERT },
+ { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
+ { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
+ { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
+ { KEY_F4, KEY_SEARCH, APPLE_FLAG_FKEY },
+ { KEY_F5, KEY_MICMUTE, APPLE_FLAG_FKEY },
+ { KEY_F6, KEY_SLEEP, APPLE_FLAG_FKEY },
+ { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
+ { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
+ { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
+ { KEY_F10, KEY_MUTE, APPLE_FLAG_FKEY },
+ { KEY_F11, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
+ { KEY_F12, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
+ { KEY_UP, KEY_PAGEUP },
+ { KEY_DOWN, KEY_PAGEDOWN },
+ { KEY_LEFT, KEY_HOME },
+ { KEY_RIGHT, KEY_END },
+ { }
+};
+
static const struct apple_key_translation macbookair_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
@@ -163,6 +191,11 @@ static const struct apple_key_translation swapped_option_cmd_keys[] = {
{ }
};
+static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
+ { KEY_FN, KEY_LEFTCTRL },
+ { }
+};
+
static const struct apple_key_translation *apple_find_translation(
const struct apple_key_translation *table, u16 from)
{
@@ -184,14 +217,18 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
bool do_translate;
u16 code = 0;
- if (usage->code == KEY_FN) {
+ u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
+
+ if (usage->code == fn_keycode) {
asc->fn_on = !!value;
- input_event(input, usage->type, usage->code, value);
+ input_event(input, usage->type, KEY_FN, value);
return 1;
}
if (fnmode) {
- if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
+ if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021)
+ table = apple2021_fn_keys;
+ else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
table = macbookair_fn_keys;
else if (hid->product < 0x21d || hid->product >= 0x300)
@@ -271,6 +308,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
+ if (swap_fn_leftctrl) {
+ trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
return 0;
}
@@ -341,6 +386,14 @@ static void apple_setup_input(struct input_dev *input)
for (trans = apple_iso_keyboard; trans->from; trans++)
set_bit(trans->to, input->keybit);
+
+ for (trans = apple2021_fn_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+
+ if (swap_fn_leftctrl) {
+ for (trans = swapped_fn_leftctrl_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+ }
}
static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -596,6 +649,10 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
+ .driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
+ .driver_data = APPLE_HAS_FN },
{ }
};
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 7f84ed0afdfe..dea926862905 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -40,7 +40,9 @@ MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define T100_TPAD_INTF 2
+#define MEDION_E1239T_TPAD_INTF 1
+#define E1239T_TP_TOGGLE_REPORT_ID 0x05
#define T100CHI_MOUSE_REPORT_ID 0x06
#define FEATURE_REPORT_ID 0x0d
#define INPUT_REPORT_ID 0x5d
@@ -77,6 +79,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_G752_KEYBOARD BIT(8)
#define QUIRK_T101HA_DOCK BIT(9)
#define QUIRK_T90CHI BIT(10)
+#define QUIRK_MEDION_E1239T BIT(11)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -92,6 +95,7 @@ struct asus_kbd_leds {
struct hid_device *hdev;
struct work_struct work;
unsigned int brightness;
+ spinlock_t lock;
bool removed;
};
@@ -102,12 +106,14 @@ struct asus_touchpad_info {
int res_y;
int contact_size;
int max_contacts;
+ int report_size;
};
struct asus_drvdata {
unsigned long quirks;
struct hid_device *hdev;
struct input_dev *input;
+ struct input_dev *tp_kbd_input;
struct asus_kbd_leds *kbd_backlight;
const struct asus_touchpad_info *tp;
bool enable_backlight;
@@ -126,6 +132,7 @@ static const struct asus_touchpad_info asus_i2c_tp = {
.max_y = 1758,
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100ta_tp = {
@@ -135,6 +142,7 @@ static const struct asus_touchpad_info asus_t100ta_tp = {
.res_y = 27, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100ha_tp = {
@@ -144,6 +152,7 @@ static const struct asus_touchpad_info asus_t100ha_tp = {
.res_y = 29, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t200ta_tp = {
@@ -153,6 +162,7 @@ static const struct asus_touchpad_info asus_t200ta_tp = {
.res_y = 28, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100chi_tp = {
@@ -162,6 +172,17 @@ static const struct asus_touchpad_info asus_t100chi_tp = {
.res_y = 29, /* units/mm */
.contact_size = 3,
.max_contacts = 4,
+ .report_size = 15 /* 2 byte header + 3 * 4 + 1 byte footer */,
+};
+
+static const struct asus_touchpad_info medion_e1239t_tp = {
+ .max_x = 2640,
+ .max_y = 1380,
+ .res_x = 29, /* units/mm */
+ .res_y = 28, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+ .report_size = 32 /* 2 byte header + 5 * 5 + 5 byte footer */,
};
static void asus_report_contact_down(struct asus_drvdata *drvdat,
@@ -229,7 +250,7 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
int i, toolType = MT_TOOL_FINGER;
u8 *contactData = data + 2;
- if (size != 3 + drvdat->tp->contact_size * drvdat->tp->max_contacts)
+ if (size != drvdat->tp->report_size)
return 0;
for (i = 0; i < drvdat->tp->max_contacts; i++) {
@@ -257,6 +278,34 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
return 1;
}
+static int asus_e1239t_event(struct asus_drvdata *drvdat, u8 *data, int size)
+{
+ if (size != 3)
+ return 0;
+
+ /* Handle broken mute key which only sends press events */
+ if (!drvdat->tp &&
+ data[0] == 0x02 && data[1] == 0xe2 && data[2] == 0x00) {
+ input_report_key(drvdat->input, KEY_MUTE, 1);
+ input_sync(drvdat->input);
+ input_report_key(drvdat->input, KEY_MUTE, 0);
+ input_sync(drvdat->input);
+ return 1;
+ }
+
+ /* Handle custom touchpad toggle key which only sends press events */
+ if (drvdat->tp_kbd_input &&
+ data[0] == 0x05 && data[1] == 0x02 && data[2] == 0x28) {
+ input_report_key(drvdat->tp_kbd_input, KEY_F21, 1);
+ input_sync(drvdat->tp_kbd_input);
+ input_report_key(drvdat->tp_kbd_input, KEY_F21, 0);
+ input_sync(drvdat->tp_kbd_input);
+ return 1;
+ }
+
+ return 0;
+}
+
static int asus_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -281,10 +330,13 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->tp && data[0] == INPUT_REPORT_ID)
return asus_report_input(drvdata, data, size);
+ if (drvdata->quirks & QUIRK_MEDION_E1239T)
+ return asus_e1239t_event(drvdata, data, size);
+
return 0;
}
-static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size)
+static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size)
{
unsigned char *dmabuf;
int ret;
@@ -303,7 +355,7 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size
static int asus_kbd_init(struct hid_device *hdev)
{
- u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
int ret;
@@ -317,7 +369,7 @@ static int asus_kbd_init(struct hid_device *hdev)
static int asus_kbd_get_functions(struct hid_device *hdev,
unsigned char *kbd_func)
{
- u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
u8 *readbuf;
int ret;
@@ -346,24 +398,42 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret;
}
+static void asus_schedule_work(struct asus_kbd_leds *led)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&led->lock, flags);
+ if (!led->removed)
+ schedule_work(&led->work);
+ spin_unlock_irqrestore(&led->lock, flags);
+}
+
static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev);
- if (led->brightness == brightness)
- return;
+ unsigned long flags;
+ spin_lock_irqsave(&led->lock, flags);
led->brightness = brightness;
- schedule_work(&led->work);
+ spin_unlock_irqrestore(&led->lock, flags);
+
+ asus_schedule_work(led);
}
static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev)
{
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev);
+ enum led_brightness brightness;
+ unsigned long flags;
+
+ spin_lock_irqsave(&led->lock, flags);
+ brightness = led->brightness;
+ spin_unlock_irqrestore(&led->lock, flags);
- return led->brightness;
+ return brightness;
}
static void asus_kbd_backlight_work(struct work_struct *work)
@@ -371,11 +441,11 @@ static void asus_kbd_backlight_work(struct work_struct *work)
struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work);
u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 };
int ret;
+ unsigned long flags;
- if (led->removed)
- return;
-
+ spin_lock_irqsave(&led->lock, flags);
buf[4] = led->brightness;
+ spin_unlock_irqrestore(&led->lock, flags);
ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf));
if (ret < 0)
@@ -437,6 +507,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set;
drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get;
INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work);
+ spin_lock_init(&drvdata->kbd_backlight->lock);
ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev);
if (ret < 0) {
@@ -615,6 +686,21 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
hi->report->id != T100CHI_MOUSE_REPORT_ID)
return 0;
+ /* Handle MULTI_INPUT on E1239T mouse/touchpad USB interface */
+ if (drvdata->tp && (drvdata->quirks & QUIRK_MEDION_E1239T)) {
+ switch (hi->report->id) {
+ case E1239T_TP_TOGGLE_REPORT_ID:
+ input_set_capability(input, EV_KEY, KEY_F21);
+ input->name = "Asus Touchpad Keys";
+ drvdata->tp_kbd_input = input;
+ return 0;
+ case INPUT_REPORT_ID:
+ break; /* Touchpad report, handled below */
+ default:
+ return 0; /* Ignore other reports */
+ }
+ }
+
if (drvdata->tp) {
int ret;
@@ -694,7 +780,6 @@ static int asus_input_mapping(struct hid_device *hdev,
/* ASUS-specific keyboard hotkeys */
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break;
case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break;
@@ -737,11 +822,11 @@ static int asus_input_mapping(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT)
drvdata->enable_backlight = true;
+ set_bit(EV_REP, hi->input->evbit);
return 1;
}
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0xff01: asus_map_key_clear(BTN_1); break;
case 0xff02: asus_map_key_clear(BTN_2); break;
@@ -764,6 +849,7 @@ static int asus_input_mapping(struct hid_device *hdev,
return 0;
}
+ set_bit(EV_REP, hi->input->evbit);
return 1;
}
@@ -782,6 +868,16 @@ static int asus_input_mapping(struct hid_device *hdev,
}
}
+ /*
+ * The mute button is broken and only sends press events, we
+ * deal with this in our raw_event handler, so do not map it.
+ */
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+ usage->hid == (HID_UP_CONSUMER | 0xe2)) {
+ input_set_capability(hi->input, EV_KEY, KEY_MUTE);
+ return -1;
+ }
+
return 0;
}
@@ -812,6 +908,24 @@ static int asus_start_multitouch(struct hid_device *hdev)
return 0;
}
+static int __maybe_unused asus_resume(struct hid_device *hdev) {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+ int ret = 0;
+
+ if (drvdata->kbd_backlight) {
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4,
+ drvdata->kbd_backlight->cdev.brightness };
+ ret = asus_kbd_set_report(hdev, buf, sizeof(buf));
+ if (ret < 0) {
+ hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret);
+ goto asus_resume_err;
+ }
+ }
+
+asus_resume_err:
+ return ret;
+}
+
static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
@@ -877,6 +991,19 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
drvdata->tp = &asus_t100chi_tp;
}
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+ hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ struct usb_host_interface *alt =
+ to_usb_interface(hdev->dev.parent)->altsetting;
+
+ if (alt->desc.bInterfaceNumber == MEDION_E1239T_TPAD_INTF) {
+ /* For separate input-devs for tp and tp toggle key */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ drvdata->quirks |= QUIRK_SKIP_INPUT_MAPPING;
+ drvdata->tp = &medion_e1239t_tp;
+ }
+ }
+
if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
@@ -935,9 +1062,13 @@ err_stop_hw:
static void asus_remove(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+ unsigned long flags;
if (drvdata->kbd_backlight) {
+ spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags);
drvdata->kbd_backlight->removed = true;
+ spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags);
+
cancel_work_sync(&drvdata->kbd_backlight->work);
}
@@ -1056,7 +1187,8 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
-
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
+ QUIRK_MEDION_E1239T },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
@@ -1071,6 +1203,7 @@ static struct hid_driver asus_driver = {
.input_configured = asus_input_configured,
#ifdef CONFIG_PM
.reset_resume = asus_reset_resume,
+ .resume = asus_resume,
#endif
.event = asus_event,
.raw_event = asus_raw_event
diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
index 467d789f9bc2..25ed7b9a917e 100644
--- a/drivers/hid/hid-betopff.c
+++ b/drivers/hid/hid-betopff.c
@@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid)
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev;
- int field_count = 0;
int error;
int i, j;
@@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid)
* -----------------------------------------
* Do init them with default value.
*/
+ if (report->maxfield < 4) {
+ hid_err(hid, "not enough fields in the report: %d\n",
+ report->maxfield);
+ return -ENODEV;
+ }
for (i = 0; i < report->maxfield; i++) {
+ if (report->field[i]->report_count < 1) {
+ hid_err(hid, "no values in the field\n");
+ return -ENODEV;
+ }
for (j = 0; j < report->field[i]->report_count; j++) {
report->field[i]->value[j] = 0x00;
- field_count++;
}
}
- if (field_count < 4) {
- hid_err(hid, "not enough fields in the report: %d\n",
- field_count);
- return -ENODEV;
- }
-
betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
if (!betopff)
return -ENOMEM;
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index e8c5e3ac9fff..a02cb517b4c4 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = {
struct bigben_device {
struct hid_device *hid;
struct hid_report *report;
+ spinlock_t lock;
bool removed;
u8 led_state; /* LED1 = 1 .. LED4 = 8 */
u8 right_motor_on; /* right motor off/on 0/1 */
@@ -184,18 +185,39 @@ struct bigben_device {
struct work_struct worker;
};
+static inline void bigben_schedule_work(struct bigben_device *bigben)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bigben->lock, flags);
+ if (!bigben->removed)
+ schedule_work(&bigben->worker);
+ spin_unlock_irqrestore(&bigben->lock, flags);
+}
static void bigben_worker(struct work_struct *work)
{
struct bigben_device *bigben = container_of(work,
struct bigben_device, worker);
struct hid_field *report_field = bigben->report->field[0];
-
- if (bigben->removed || !report_field)
+ bool do_work_led = false;
+ bool do_work_ff = false;
+ u8 *buf;
+ u32 len;
+ unsigned long flags;
+
+ buf = hid_alloc_report_buf(bigben->report, GFP_KERNEL);
+ if (!buf)
return;
+ len = hid_report_len(bigben->report);
+
+ /* LED work */
+ spin_lock_irqsave(&bigben->lock, flags);
+
if (bigben->work_led) {
bigben->work_led = false;
+ do_work_led = true;
report_field->value[0] = 0x01; /* 1 = led message */
report_field->value[1] = 0x08; /* reserved value, always 8 */
report_field->value[2] = bigben->led_state;
@@ -204,11 +226,22 @@ static void bigben_worker(struct work_struct *work)
report_field->value[5] = 0x00; /* padding */
report_field->value[6] = 0x00; /* padding */
report_field->value[7] = 0x00; /* padding */
- hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ hid_output_report(bigben->report, buf);
}
+ spin_unlock_irqrestore(&bigben->lock, flags);
+
+ if (do_work_led) {
+ hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len,
+ bigben->report->type, HID_REQ_SET_REPORT);
+ }
+
+ /* FF work */
+ spin_lock_irqsave(&bigben->lock, flags);
+
if (bigben->work_ff) {
bigben->work_ff = false;
+ do_work_ff = true;
report_field->value[0] = 0x02; /* 2 = rumble effect message */
report_field->value[1] = 0x08; /* reserved value, always 8 */
report_field->value[2] = bigben->right_motor_on;
@@ -217,8 +250,17 @@ static void bigben_worker(struct work_struct *work)
report_field->value[5] = 0x00; /* padding */
report_field->value[6] = 0x00; /* padding */
report_field->value[7] = 0x00; /* padding */
- hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ hid_output_report(bigben->report, buf);
}
+
+ spin_unlock_irqrestore(&bigben->lock, flags);
+
+ if (do_work_ff) {
+ hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len,
+ bigben->report->type, HID_REQ_SET_REPORT);
+ }
+
+ kfree(buf);
}
static int hid_bigben_play_effect(struct input_dev *dev, void *data,
@@ -228,6 +270,7 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
struct bigben_device *bigben = hid_get_drvdata(hid);
u8 right_motor_on;
u8 left_motor_force;
+ unsigned long flags;
if (!bigben) {
hid_err(hid, "no device data\n");
@@ -242,10 +285,13 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
if (right_motor_on != bigben->right_motor_on ||
left_motor_force != bigben->left_motor_force) {
+ spin_lock_irqsave(&bigben->lock, flags);
bigben->right_motor_on = right_motor_on;
bigben->left_motor_force = left_motor_force;
bigben->work_ff = true;
- schedule_work(&bigben->worker);
+ spin_unlock_irqrestore(&bigben->lock, flags);
+
+ bigben_schedule_work(bigben);
}
return 0;
@@ -259,6 +305,7 @@ static void bigben_set_led(struct led_classdev *led,
struct bigben_device *bigben = hid_get_drvdata(hid);
int n;
bool work;
+ unsigned long flags;
if (!bigben) {
hid_err(hid, "no device data\n");
@@ -267,6 +314,7 @@ static void bigben_set_led(struct led_classdev *led,
for (n = 0; n < NUM_LEDS; n++) {
if (led == bigben->leds[n]) {
+ spin_lock_irqsave(&bigben->lock, flags);
if (value == LED_OFF) {
work = (bigben->led_state & BIT(n));
bigben->led_state &= ~BIT(n);
@@ -274,10 +322,11 @@ static void bigben_set_led(struct led_classdev *led,
work = !(bigben->led_state & BIT(n));
bigben->led_state |= BIT(n);
}
+ spin_unlock_irqrestore(&bigben->lock, flags);
if (work) {
bigben->work_led = true;
- schedule_work(&bigben->worker);
+ bigben_schedule_work(bigben);
}
return;
}
@@ -307,8 +356,12 @@ static enum led_brightness bigben_get_led(struct led_classdev *led)
static void bigben_remove(struct hid_device *hid)
{
struct bigben_device *bigben = hid_get_drvdata(hid);
+ unsigned long flags;
+ spin_lock_irqsave(&bigben->lock, flags);
bigben->removed = true;
+ spin_unlock_irqrestore(&bigben->lock, flags);
+
cancel_work_sync(&bigben->worker);
hid_hw_stop(hid);
}
@@ -318,7 +371,6 @@ static int bigben_probe(struct hid_device *hid,
{
struct bigben_device *bigben;
struct hid_input *hidinput;
- struct list_head *report_list;
struct led_classdev *led;
char *name;
size_t name_sz;
@@ -343,9 +395,12 @@ static int bigben_probe(struct hid_device *hid,
return error;
}
- report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- bigben->report = list_entry(report_list->next,
- struct hid_report, list);
+ bigben->report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 8);
+ if (!bigben->report) {
+ hid_err(hid, "no output report found\n");
+ error = -ENODEV;
+ goto error_hw_stop;
+ }
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
@@ -357,6 +412,7 @@ static int bigben_probe(struct hid_device *hid,
set_bit(FF_RUMBLE, hidinput->input->ffbit);
INIT_WORK(&bigben->worker, bigben_worker);
+ spin_lock_init(&bigben->lock);
error = input_ff_create_memless(hidinput->input, NULL,
hid_bigben_play_effect);
@@ -397,7 +453,7 @@ static int bigben_probe(struct hid_device *hid,
bigben->left_motor_force = 0;
bigben->work_led = true;
bigben->work_ff = true;
- schedule_work(&bigben->worker);
+ bigben_schedule_work(bigben);
hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b6740ad773ee..e0820feb7e19 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -258,6 +258,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
+ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int usages;
unsigned int offset;
unsigned int i;
@@ -288,8 +289,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ if (parser->device->ll_driver->max_buffer_size)
+ max_buffer_size = parser->device->ll_driver->max_buffer_size;
+
/* Total size check: Allow for possible report index byte */
- if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ if (report->size > (max_buffer_size - 1) << 3) {
hid_err(parser->device, "report is too long\n");
return -1;
}
@@ -698,15 +702,22 @@ static void hid_close_report(struct hid_device *device)
* Free a device structure, all reports, and all fields.
*/
-static void hid_device_release(struct device *dev)
+void hiddev_free(struct kref *ref)
{
- struct hid_device *hid = to_hid_device(dev);
+ struct hid_device *hid = container_of(ref, struct hid_device, ref);
hid_close_report(hid);
kfree(hid->dev_rdesc);
kfree(hid);
}
+static void hid_device_release(struct device *dev)
+{
+ struct hid_device *hid = to_hid_device(dev);
+
+ kref_put(&hid->ref, hiddev_free);
+}
+
/*
* Fetch a report description item from the data stream. We support long
* items, though they are not used yet.
@@ -981,8 +992,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
* Validating on id 0 means we should examine the first
* report in the list.
*/
- report = list_entry(
- hid->report_enum[type].report_list.next,
+ report = list_first_entry_or_null(
+ &hid->report_enum[type].report_list,
struct hid_report, list);
} else {
report = hid->report_enum[type].report_id_hash[id];
@@ -1190,6 +1201,7 @@ int hid_open_report(struct hid_device *device)
__u8 *end;
__u8 *next;
int ret;
+ int i;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
hid_parser_main,
@@ -1240,6 +1252,8 @@ int hid_open_report(struct hid_device *device)
goto err;
}
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
+ for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
+ device->collection[i].parent_idx = -1;
ret = -EINVAL;
while ((next = fetch_item(start, end, &item)) != NULL) {
@@ -1303,6 +1317,9 @@ static s32 snto32(__u32 value, unsigned n)
if (!value || !n)
return 0;
+ if (n > 32)
+ n = 32;
+
switch (n) {
case 8: return ((__s8)value);
case 16: return ((__s16)value);
@@ -1739,6 +1756,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
struct hid_driver *hdrv;
+ int max_buffer_size = HID_MAX_BUFFER_SIZE;
unsigned int a;
u32 rsize, csize = size;
u8 *cdata = data;
@@ -1755,10 +1773,13 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
rsize = hid_compute_report_size(report);
- if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
- rsize = HID_MAX_BUFFER_SIZE - 1;
- else if (rsize > HID_MAX_BUFFER_SIZE)
- rsize = HID_MAX_BUFFER_SIZE;
+ if (hid->ll_driver->max_buffer_size)
+ max_buffer_size = hid->ll_driver->max_buffer_size;
+
+ if (report_enum->numbered && rsize >= max_buffer_size)
+ rsize = max_buffer_size - 1;
+ else if (rsize > max_buffer_size)
+ rsize = max_buffer_size;
if (csize < rsize) {
dbg_hid("report %d is too short, (%d < %d)\n", report->id,
@@ -2423,10 +2444,12 @@ int hid_add_device(struct hid_device *hdev)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
}
+ hdev->id = atomic_inc_return(&id);
+
/* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
- hdev->vendor, hdev->product, atomic_inc_return(&id));
+ hdev->vendor, hdev->product, hdev->id);
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
@@ -2469,6 +2492,7 @@ struct hid_device *hid_allocate_device(void)
spin_lock_init(&hdev->debug_list_lock);
sema_init(&hdev->driver_input_lock, 1);
mutex_init(&hdev->ll_open_lock);
+ kref_init(&hdev->ref);
return hdev;
}
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index db1b55df0d13..648201b6c624 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -787,6 +787,11 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
data->word = le16_to_cpup((__le16 *)buf);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_length > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EINVAL;
+ goto power_normal;
+ }
+
memcpy(data->block + 1, buf, read_length);
break;
case I2C_SMBUS_BLOCK_DATA:
@@ -1151,8 +1156,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct cp2112_device *dev = gpiochip_get_data(gc);
- INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
-
if (!dev->gpio_poll) {
dev->gpio_poll = true;
schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1235,6 +1238,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct cp2112_device *dev;
u8 buf[3];
struct cp2112_smbus_config_report config;
+ struct gpio_irq_chip *girq;
int ret;
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -1338,6 +1342,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.can_sleep = 1;
dev->gc.parent = &hdev->dev;
+ girq = &dev->gc.irq;
+ girq->chip = &cp2112_gpio_irqchip;
+ /* The event comes from the outside so no parent handler */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+
+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+
ret = gpiochip_add_data(&dev->gc, dev);
if (ret < 0) {
hid_err(hdev, "error registering gpio chip\n");
@@ -1353,17 +1368,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
chmod_sysfs_attrs(hdev);
hid_hw_power(hdev, PM_HINT_NORMAL);
- ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(dev->gc.parent, "failed to add IRQ chip\n");
- goto err_sysfs_remove;
- }
-
return ret;
-err_sysfs_remove:
- sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 419d8dec7e49..caeb2b21cd99 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -933,6 +933,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_VOICECOMMAND] = "VoiceCommand",
[KEY_EMOJI_PICKER] = "EmojiPicker",
[KEY_DICTATE] = "Dictate",
+ [KEY_MICMUTE] = "MicrophoneMute",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
@@ -1081,6 +1082,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out;
}
list->hdev = (struct hid_device *) inode->i_private;
+ kref_get(&list->hdev->ref);
file->private_data = list;
mutex_init(&list->read_mutex);
@@ -1173,6 +1175,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
kfifo_free(&list->hid_debug_fifo);
+
+ kref_put(&list->hdev->ref, hiddev_free);
kfree(list);
return 0;
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index b6947d757347..2ebad3ed4e3a 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -474,6 +474,8 @@ static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_JEWEL) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 403506b9697e..b346d68a06f5 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -130,6 +130,10 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
return -ENODEV;
boot_hid = usb_get_intfdata(boot_interface);
+ if (list_empty(&boot_hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
boot_hid_input = list_first_entry(&boot_hid->inputs,
struct hid_input, list);
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 79a28fc91521..5928e934d734 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -492,7 +492,7 @@ static int mousevsc_probe(struct hv_device *device,
ret = hid_add_device(hid_dev);
if (ret)
- goto probe_err1;
+ goto probe_err2;
ret = hid_parse(hid_dev);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c587a77d493c..839430b48755 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -175,6 +175,7 @@
#define USB_DEVICE_ID_APPLE_IRCONTROL3 0x8241
#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
@@ -350,6 +351,7 @@
#define USB_VENDOR_ID_DELL 0x413c
#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
+#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W 0x4503
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
@@ -490,6 +492,7 @@
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
+#define USB_DEVICE_ID_GOOGLE_JEWEL 0x5061
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
@@ -580,6 +583,7 @@
#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
#define USB_VENDOR_ID_HP 0x03f0
+#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A 0x464a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
@@ -650,6 +654,7 @@
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
#define USB_DEVICE_ID_ITE8595 0x8595
+#define USB_DEVICE_ID_ITE_MEDION_E1239T 0xce50
#define USB_VENDOR_ID_JABRA 0x0b0e
#define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412
@@ -818,6 +823,7 @@
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
#define USB_DEVICE_ID_MADCATZ_RAT5 0x1705
#define USB_DEVICE_ID_MADCATZ_RAT9 0x1709
+#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
@@ -863,6 +869,7 @@
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
+#define USB_DEVICE_ID_MS_MOUSE_0783 0x0783
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -935,7 +942,10 @@
#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f
+#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES 0xc055
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
+#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057
+#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058
#define USB_VENDOR_ID_PANASONIC 0x04da
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
@@ -1137,7 +1147,9 @@
#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017 0x73f6
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
@@ -1291,6 +1303,7 @@
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
+#define USB_DEVICE_ID_PRIMAX_MOUSE_4E2A 0x4e2a
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d1ba6fafe960..004aa3cdeacc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -671,6 +671,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
+ if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */
+ switch (usage->hid & 0xf) {
+ case 0x9: map_key_clear(KEY_MICMUTE); break;
+ default: goto ignore;
+ }
+ break;
+ }
+
if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */
switch (usage->hid & 0xf) {
case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 742c052b0110..b8cce9c196d8 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -18,10 +18,21 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) {
+ /* For Acer Aspire Switch 10 SW5-012 keyboard-dock */
if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) {
- hid_info(hdev, "Fixing up ITE keyboard report descriptor\n");
+ hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n");
rdesc[163] = HID_MAIN_ITEM_RELATIVE;
}
+ /* For Acer One S1002/S1003 keyboard-dock */
+ if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) {
+ hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n");
+ rdesc[186] = HID_MAIN_ITEM_RELATIVE;
+ }
+ /* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */
+ if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) {
+ hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n");
+ rdesc[185] = HID_MAIN_ITEM_RELATIVE;
+ }
}
return rdesc;
@@ -103,7 +114,18 @@ static const struct hid_device_id ite_devices[] = {
/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
- USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
+ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002),
+ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003),
+ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017),
+ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 5e6a0cef2a06..e3fcf1353fb3 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -872,6 +872,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
return -ENOMEM;
i = strlen(lbuf);
+
+ if (i == 0) {
+ kfree(lbuf);
+ return -EINVAL;
+ }
+
if (lbuf[i-1] == '\n') {
if (i == 1) {
kfree(lbuf);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index a663cbb7b683..0c2aa9024b87 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1212,6 +1212,9 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
* 50 msec should gives enough time to the receiver to be ready.
*/
msleep(50);
+
+ if (retval)
+ return retval;
}
/*
@@ -1233,7 +1236,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
buf[5] = 0x09;
buf[6] = 0x00;
- hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
+ retval = hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT,
HID_REQ_SET_REPORT);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 919551ed5809..477b082aa6a8 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -809,8 +809,7 @@ static int hidpp_unifying_init(struct hidpp_device *hidpp)
if (ret)
return ret;
- snprintf(hdev->uniq, sizeof(hdev->uniq), "%04x-%4phD",
- hdev->product, &serial);
+ snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial);
dbg_hid("HID++ Unifying: Got serial: %s\n", hdev->uniq);
name = hidpp_unifying_get_name(hidpp);
@@ -904,6 +903,54 @@ print_version:
}
/* -------------------------------------------------------------------------- */
+/* 0x0003: Device Information */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_DEVICE_INFORMATION 0x0003
+
+#define CMD_GET_DEVICE_INFO 0x00
+
+static int hidpp_get_serial(struct hidpp_device *hidpp, u32 *serial)
+{
+ struct hidpp_report response;
+ u8 feature_type;
+ u8 feature_index;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_DEVICE_INFORMATION,
+ &feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_GET_DEVICE_INFO,
+ NULL, 0, &response);
+ if (ret)
+ return ret;
+
+ /* See hidpp_unifying_get_serial() */
+ *serial = *((u32 *)&response.rap.params[1]);
+ return 0;
+}
+
+static int hidpp_serial_init(struct hidpp_device *hidpp)
+{
+ struct hid_device *hdev = hidpp->hid_dev;
+ u32 serial;
+ int ret;
+
+ ret = hidpp_get_serial(hidpp, &serial);
+ if (ret)
+ return ret;
+
+ snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial);
+ dbg_hid("HID++ DeviceInformation: Got serial: %s\n", hdev->uniq);
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
/* 0x0005: GetDeviceNameType */
/* -------------------------------------------------------------------------- */
@@ -3651,6 +3698,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (hidpp->quirks & HIDPP_QUIRK_UNIFYING)
hidpp_unifying_init(hidpp);
+ else if (hid_is_usb(hidpp->hid_dev))
+ hidpp_serial_init(hidpp);
connected = hidpp_root_get_protocol_version(hidpp) == 0;
atomic_set(&hidpp->connected, connected);
@@ -3674,7 +3723,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto hid_hw_init_fail;
}
- hidpp_connect_event(hidpp);
+ schedule_work(&hidpp->work);
+ flush_work(&hidpp->work);
/* Reset the HID node state */
hid_device_io_stop(hdev);
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index fc4c07459753..28158d2f2352 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -387,7 +387,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
magicmouse_raw_event(hdev, report, data + 2, data[1]);
magicmouse_raw_event(hdev, report, data + 2 + data[1],
size - 2 - data[1]);
- break;
+ return 0;
default:
return 0;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 653f436aa459..c3810e7140a5 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1162,7 +1162,7 @@ static void mt_touch_report(struct hid_device *hid,
int contact_count = -1;
/* sticky fingers release in progress, abort */
- if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
+ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
scantime = *app->scantime;
@@ -1243,7 +1243,7 @@ static void mt_touch_report(struct hid_device *hid,
del_timer(&td->release_timer);
}
- clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
+ clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}
static int mt_touch_input_configured(struct hid_device *hdev,
@@ -1548,7 +1548,6 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app)
static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct mt_device *td = hid_get_drvdata(hdev);
- char *name;
const char *suffix = NULL;
struct mt_report_data *rdata;
struct mt_application *mt_application = NULL;
@@ -1602,15 +1601,9 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
break;
}
- if (suffix) {
- name = devm_kzalloc(&hi->input->dev,
- strlen(hdev->name) + strlen(suffix) + 2,
- GFP_KERNEL);
- if (name) {
- sprintf(name, "%s %s", hdev->name, suffix);
- hi->input->name = name;
- }
- }
+ if (suffix)
+ hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s %s", hdev->name, suffix);
return 0;
}
@@ -1680,11 +1673,11 @@ static void mt_expired_timeout(struct timer_list *t)
* An input report came in just before we release the sticky fingers,
* it will take care of the sticky fingers.
*/
- if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
+ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
mt_release_contacts(hdev);
- clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
+ clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -1953,6 +1946,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ELAN, 0x313a) },
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ELAN, 0x3148) },
+
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
@@ -2006,6 +2003,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+ /* HONOR GLO-GXXX panel */
+ { .driver_data = MT_CLS_VTL,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 0x347d, 0x7853) },
+
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
@@ -2086,6 +2088,14 @@ static const struct hid_device_id mt_devices[] = {
/* Synaptics devices */
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xcd7e) },
+
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
+
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index e81b7cec2d12..3d414ae194ac 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -199,8 +199,17 @@ err:
static const struct hid_device_id plantronics_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ }
};
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 45eba224cdc7..99009fda7b80 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
@@ -66,6 +67,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
@@ -96,6 +98,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
@@ -122,6 +125,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_MOUSE_0783), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
@@ -146,6 +150,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4E2A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
@@ -303,6 +308,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
@@ -615,6 +621,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) },
#endif
#if IS_ENABLED(CONFIG_HID_SAMSUNG)
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 26373b82fe81..6da80e442fdd 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -257,6 +257,8 @@ int roccat_report_event(int minor, u8 const *data)
if (!new_value)
return -ENOMEM;
+ mutex_lock(&device->cbuf_lock);
+
report = &device->cbuf[device->cbuf_end];
/* passing NULL is safe */
@@ -276,6 +278,8 @@ int roccat_report_event(int minor, u8 const *data)
reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
}
+ mutex_unlock(&device->cbuf_lock);
+
wake_up_interruptible(&device->wait);
return 0;
}
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index c7bf14c01960..b84e975977c4 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = {
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
.driver_data = SAITEK_RELEASE_MODE_MMO7 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7),
+ .driver_data = SAITEK_RELEASE_MODE_MMO7 },
{ }
};
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index fb827c295842..825f011c7901 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -59,7 +59,7 @@ struct hid_sensor_sample {
u32 raw_len;
} __packed;
-static struct attribute hid_custom_attrs[] = {
+static struct attribute hid_custom_attrs[HID_CUSTOM_TOTAL_ATTRS] = {
{.name = "name", .mode = S_IRUGO},
{.name = "units", .mode = S_IRUGO},
{.name = "unit-expo", .mode = S_IRUGO},
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index a3b151b29bd7..fc616db4231b 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 4edb24195704..e4811d37ca77 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -172,6 +172,7 @@ static int uclogic_probe(struct hid_device *hdev,
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
*/
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_HIDINPUT_FORCE;
/* Allocate and assign driver data */
drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index b382c6bf2c5c..f8ef6268f3f2 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
unsigned int minor = iminor(inode);
struct hidraw_list *list = file->private_data;
unsigned long flags;
+ int i;
mutex_lock(&minors_lock);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+ for (i = list->tail; i < list->head; i++)
+ kfree(list->buffer[i].value);
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 5ffd0da3cf1f..65af0ebef79f 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -110,7 +110,7 @@ struct report_list {
* @multi_packet_cnt: Count of fragmented packet count
*
* This structure is used to store completion flags and per client data like
- * like report description, number of HID devices etc.
+ * report description, number of HID devices etc.
*/
struct ishtp_cl_data {
/* completion flags */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 1cc157126fce..c0d69303e3b0 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
}
/**
- * ipc_tx_callback() - IPC tx callback function
+ * ipc_tx_send() - IPC tx send function
* @prm: Pointer to client device instance
*
- * Send message over IPC either first time or on callback on previous message
- * completion
+ * Send message over IPC. Message will be split into fragments
+ * if message size is bigger than IPC FIFO size, and all
+ * fragments will be sent one by one.
*/
-static void ipc_tx_callback(void *prm)
+static void ipc_tx_send(void *prm)
{
struct ishtp_cl *cl = prm;
struct ishtp_cl_tx_ring *cl_msg;
@@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm)
list);
rem = cl_msg->send_buf.size - cl->tx_offs;
- ishtp_hdr.host_addr = cl->host_client_id;
- ishtp_hdr.fw_addr = cl->fw_client_id;
- ishtp_hdr.reserved = 0;
- pmsg = cl_msg->send_buf.data + cl->tx_offs;
+ while (rem > 0) {
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ /* Last fragment or only one packet */
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs = 0;
+ cl->sending = 0;
- if (rem <= dev->mtu) {
- ishtp_hdr.length = rem;
- ishtp_hdr.msg_complete = 1;
- cl->sending = 0;
- list_del_init(&cl_msg->list); /* Must be before write */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- /* Submit to IPC queue with no callback */
- ishtp_write_message(dev, &ishtp_hdr, pmsg);
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
- ++cl->tx_ring_free_size;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
- tx_free_flags);
- } else {
- /* Send IPC fragment */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- cl->tx_offs += dev->mtu;
- ishtp_hdr.length = dev->mtu;
- ishtp_hdr.msg_complete = 0;
- ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+ break;
+ } else {
+ /* Send ipc fragment */
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ /* All fregments submitted to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs += dev->mtu;
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+ }
}
+
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
}
/**
@@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
return;
cl->tx_offs = 0;
- ipc_tx_callback(cl);
+ ipc_tx_send(cl);
++cl->send_msg_cnt_ipc;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
index 40554c8daca0..00046cbfd4ed 100644
--- a/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
@@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
int required_slots = (size / DMA_SLOT_SIZE)
+ 1 * (size % DMA_SLOT_SIZE != 0);
+ if (!dev->ishtp_dma_tx_map) {
+ dev_err(dev->devc, "Fail to allocate Tx map\n");
+ return NULL;
+ }
+
spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
free = 1;
@@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
return;
}
+ if (!dev->ishtp_dma_tx_map) {
+ dev_err(dev->devc, "Fail to allocate Tx map\n");
+ return;
+ }
+
i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
for (j = 0; j < acked_slots; j++) {
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index fc06d8bb42e0..ba0ca652b9da 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -395,6 +395,7 @@ struct hid_ll_driver uhid_hid_driver = {
.parse = uhid_hid_parse,
.raw_request = uhid_hid_raw_request,
.output_report = uhid_hid_output_report,
+ .max_buffer_size = UHID_DATA_MAX,
};
EXPORT_SYMBOL_GPL(uhid_hid_driver);
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 203d27d198b8..c034a1e850e4 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -91,6 +91,7 @@
#include <linux/leds.h>
#include <linux/usb/input.h>
#include <linux/power_supply.h>
+#include <linux/timer.h>
#include <asm/unaligned.h>
/*
@@ -152,6 +153,7 @@ struct wacom_remote {
struct input_dev *input;
bool registered;
struct wacom_battery battery;
+ ktime_t active_time;
} remotes[WACOM_MAX_REMOTES];
};
@@ -167,6 +169,7 @@ struct wacom {
struct delayed_work init_work;
struct wacom_remote *remote;
struct work_struct mode_change_work;
+ struct timer_list idleprox_timer;
bool generic_has_leds;
struct wacom_leds {
struct wacom_group_leds *groups;
@@ -239,4 +242,5 @@ struct wacom_led *wacom_led_find(struct wacom *wacom, unsigned int group,
struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
int wacom_equivalent_usage(int usage);
int wacom_initialize_leds(struct wacom *wacom);
+void wacom_idleprox_timeout(struct timer_list *list);
#endif
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 329bb1a46f90..eacbd7eae2e6 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -160,6 +160,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
{
struct wacom *wacom = hid_get_drvdata(hdev);
+ if (wacom->wacom_wac.features.type == BOOTLOADER)
+ return 0;
+
if (size > WACOM_PKGLEN_MAX)
return 1;
@@ -2085,7 +2088,7 @@ static int wacom_allocate_inputs(struct wacom *wacom)
return 0;
}
-static int wacom_register_inputs(struct wacom *wacom)
+static int wacom_setup_inputs(struct wacom *wacom)
{
struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
@@ -2104,10 +2107,6 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(pen_input_dev);
wacom_wac->pen_input = NULL;
pen_input_dev = NULL;
- } else {
- error = input_register_device(pen_input_dev);
- if (error)
- goto fail;
}
error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
@@ -2116,19 +2115,42 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(touch_input_dev);
wacom_wac->touch_input = NULL;
touch_input_dev = NULL;
- } else {
- error = input_register_device(touch_input_dev);
- if (error)
- goto fail;
}
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
if (error) {
- /* no pad in use on this interface */
+ /* no pad events using this interface */
input_free_device(pad_input_dev);
wacom_wac->pad_input = NULL;
pad_input_dev = NULL;
- } else {
+ }
+
+ return 0;
+}
+
+static int wacom_register_inputs(struct wacom *wacom)
+{
+ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ int error = 0;
+
+ pen_input_dev = wacom_wac->pen_input;
+ touch_input_dev = wacom_wac->touch_input;
+ pad_input_dev = wacom_wac->pad_input;
+
+ if (pen_input_dev) {
+ error = input_register_device(pen_input_dev);
+ if (error)
+ goto fail;
+ }
+
+ if (touch_input_dev) {
+ error = input_register_device(touch_input_dev);
+ if (error)
+ goto fail;
+ }
+
+ if (pad_input_dev) {
error = input_register_device(pad_input_dev);
if (error)
goto fail;
@@ -2378,6 +2400,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail;
}
+ error = wacom_setup_inputs(wacom);
+ if (error)
+ goto fail;
+
+ if (features->type == HID_GENERIC)
+ connect_mask |= HID_CONNECT_DRIVER;
+
+ /* Regular HID work starts now */
+ error = hid_hw_start(hdev, connect_mask);
+ if (error) {
+ hid_err(hdev, "hw start failed\n");
+ goto fail;
+ }
+
error = wacom_register_inputs(wacom);
if (error)
goto fail;
@@ -2392,16 +2428,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail;
}
- if (features->type == HID_GENERIC)
- connect_mask |= HID_CONNECT_DRIVER;
-
- /* Regular HID work starts now */
- error = hid_hw_start(hdev, connect_mask);
- if (error) {
- hid_err(hdev, "hw start failed\n");
- goto fail;
- }
-
if (!wireless) {
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(wacom);
@@ -2416,8 +2442,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail_quirks;
}
- if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
+ if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) {
error = hid_hw_open(hdev);
+ if (error) {
+ hid_err(hdev, "hw open failed\n");
+ goto fail_quirks;
+ }
+ }
wacom_set_shared_values(wacom_wac);
devres_close_group(&hdev->dev, wacom);
@@ -2521,6 +2552,18 @@ fail:
return;
}
+static void wacom_remote_destroy_battery(struct wacom *wacom, int index)
+{
+ struct wacom_remote *remote = wacom->remote;
+
+ if (remote->remotes[index].battery.battery) {
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[index].battery.bat_desc);
+ remote->remotes[index].battery.battery = NULL;
+ remote->remotes[index].active_time = 0;
+ }
+}
+
static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
{
struct wacom_remote *remote = wacom->remote;
@@ -2535,9 +2578,7 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
remote->remotes[i].registered = false;
spin_unlock_irqrestore(&remote->remote_lock, flags);
- if (remote->remotes[i].battery.battery)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[i].battery.bat_desc);
+ wacom_remote_destroy_battery(wacom, i);
if (remote->remotes[i].group.name)
devres_release_group(&wacom->hdev->dev,
@@ -2545,7 +2586,6 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
remote->remotes[i].serial = 0;
remote->remotes[i].group.name = NULL;
- remote->remotes[i].battery.battery = NULL;
wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
}
}
@@ -2630,6 +2670,9 @@ static int wacom_remote_attach_battery(struct wacom *wacom, int index)
if (remote->remotes[index].battery.battery)
return 0;
+ if (!remote->remotes[index].active_time)
+ return 0;
+
if (wacom->led.groups[index].select == WACOM_STATUS_UNKNOWN)
return 0;
@@ -2645,6 +2688,7 @@ static void wacom_remote_work(struct work_struct *work)
{
struct wacom *wacom = container_of(work, struct wacom, remote_work);
struct wacom_remote *remote = wacom->remote;
+ ktime_t kt = ktime_get();
struct wacom_remote_data data;
unsigned long flags;
unsigned int count;
@@ -2671,6 +2715,10 @@ static void wacom_remote_work(struct work_struct *work)
serial = data.remote[i].serial;
if (data.remote[i].connected) {
+ if (kt - remote->remotes[i].active_time > WACOM_REMOTE_BATTERY_TIMEOUT
+ && remote->remotes[i].active_time != 0)
+ wacom_remote_destroy_battery(wacom, i);
+
if (remote->remotes[i].serial == serial) {
wacom_remote_attach_battery(wacom, i);
continue;
@@ -2778,6 +2826,7 @@ static int wacom_probe(struct hid_device *hdev,
INIT_WORK(&wacom->battery_work, wacom_battery_work);
INIT_WORK(&wacom->remote_work, wacom_remote_work);
INIT_WORK(&wacom->mode_change_work, wacom_mode_change_work);
+ timer_setup(&wacom->idleprox_timer, &wacom_idleprox_timeout, TIMER_DEFERRABLE);
/* ask for the report descriptor to be loaded by HID */
error = hid_parse(hdev);
@@ -2786,6 +2835,11 @@ static int wacom_probe(struct hid_device *hdev,
return error;
}
+ if (features->type == BOOTLOADER) {
+ hid_warn(hdev, "Using device in hidraw-only mode");
+ return hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ }
+
error = wacom_parse_and_register(wacom, false);
if (error)
return error;
@@ -2817,6 +2871,7 @@ static void wacom_remove(struct hid_device *hdev)
cancel_work_sync(&wacom->battery_work);
cancel_work_sync(&wacom->remote_work);
cancel_work_sync(&wacom->mode_change_work);
+ del_timer_sync(&wacom->idleprox_timer);
if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index de69ea5f5a4b..18f3d220aaad 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -11,6 +11,7 @@
#include "wacom_wac.h"
#include "wacom.h"
#include <linux/input/mt.h>
+#include <linux/jiffies.h>
/* resolution for penabled devices */
#define WACOM_PL_RES 20
@@ -41,6 +42,43 @@ static int wacom_numbered_button_to_key(int n);
static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
int group);
+
+static void wacom_force_proxout(struct wacom_wac *wacom_wac)
+{
+ struct input_dev *input = wacom_wac->pen_input;
+
+ wacom_wac->shared->stylus_in_proximity = 0;
+
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_key(input, BTN_STYLUS, 0);
+ input_report_key(input, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_STYLUS3, 0);
+ input_report_key(input, wacom_wac->tool[0], 0);
+ if (wacom_wac->serial[0]) {
+ input_report_abs(input, ABS_MISC, 0);
+ }
+ input_report_abs(input, ABS_PRESSURE, 0);
+
+ wacom_wac->tool[0] = 0;
+ wacom_wac->id[0] = 0;
+ wacom_wac->serial[0] = 0;
+
+ input_sync(input);
+}
+
+void wacom_idleprox_timeout(struct timer_list *list)
+{
+ struct wacom *wacom = from_timer(wacom, list, idleprox_timer);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ if (!wacom_wac->hid_data.sense_state) {
+ return;
+ }
+
+ hid_warn(wacom->hdev, "%s: tool appears to be hung in-prox. forcing it out.\n", __func__);
+ wacom_force_proxout(wacom_wac);
+}
+
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -638,9 +676,26 @@ static int wacom_intuos_id_mangle(int tool_id)
return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
}
+static bool wacom_is_art_pen(int tool_id)
+{
+ bool is_art_pen = false;
+
+ switch (tool_id) {
+ case 0x885: /* Intuos3 Marker Pen */
+ case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
+ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ is_art_pen = true;
+ break;
+ }
+ return is_art_pen;
+}
+
static int wacom_intuos_get_tool_type(int tool_id)
{
- int tool_type;
+ int tool_type = BTN_TOOL_PEN;
+
+ if (wacom_is_art_pen(tool_id))
+ return tool_type;
switch (tool_id) {
case 0x812: /* Inking pen */
@@ -655,17 +710,17 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x852:
case 0x823: /* Intuos3 Grip Pen */
case 0x813: /* Intuos3 Classic Pen */
- case 0x885: /* Intuos3 Marker Pen */
case 0x802: /* Intuos4/5 13HD/24HD General Pen */
- case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
- case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x200: /* Pro Pen 3 */
+ case 0x04200: /* Pro Pen 3 */
case 0x10842: /* MobileStudio Pro Pro Pen slim */
case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
case 0x16802: /* Cintiq 13HD Pro Pen */
case 0x18802: /* DTH2242 Pen */
case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
tool_type = BTN_TOOL_PEN;
break;
@@ -718,10 +773,6 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
tool_type = BTN_TOOL_AIRBRUSH;
break;
-
- default: /* Unknown tool */
- tool_type = BTN_TOOL_PEN;
- break;
}
return tool_type;
}
@@ -780,7 +831,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
/* serial number of the tool */
- wacom->serial[idx] = ((data[3] & 0x0f) << 28) +
+ wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) +
(data[4] << 20) + (data[5] << 12) +
(data[6] << 4) + (data[7] >> 4);
@@ -1076,6 +1127,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
if (index < 0 || !remote->remotes[index].registered)
goto out;
+ remote->remotes[i].active_time = ktime_get();
input = remote->remotes[index].input;
input_report_key(input, BTN_0, (data[9] & 0x01));
@@ -1255,6 +1307,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
struct input_dev *pen_input = wacom->pen_input;
unsigned char *data = wacom->data;
+ int number_of_valid_frames = 0;
+ ktime_t time_interval = 15000000;
+ ktime_t time_packet_received = ktime_get();
int i;
if (wacom->features.type == INTUOSP2_BT ||
@@ -1275,12 +1330,30 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
}
+ /* number of valid frames */
for (i = 0; i < pen_frames; i++) {
unsigned char *frame = &data[i*pen_frame_len + 1];
bool valid = frame[0] & 0x80;
+
+ if (valid)
+ number_of_valid_frames++;
+ }
+
+ if (number_of_valid_frames) {
+ if (wacom->hid_data.time_delayed)
+ time_interval = ktime_get() - wacom->hid_data.time_delayed;
+ time_interval = div_u64(time_interval, number_of_valid_frames);
+ wacom->hid_data.time_delayed = time_packet_received;
+ }
+
+ for (i = 0; i < number_of_valid_frames; i++) {
+ unsigned char *frame = &data[i*pen_frame_len + 1];
+ bool valid = frame[0] & 0x80;
bool prox = frame[0] & 0x40;
bool range = frame[0] & 0x20;
bool invert = frame[0] & 0x10;
+ int frames_number_reversed = number_of_valid_frames - i - 1;
+ ktime_t event_timestamp = time_packet_received - frames_number_reversed * time_interval;
if (!valid)
continue;
@@ -1293,6 +1366,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->tool[0] = 0;
wacom->id[0] = 0;
wacom->serial[0] = 0;
+ wacom->hid_data.time_delayed = 0;
return;
}
@@ -1329,6 +1403,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
get_unaligned_le16(&frame[11]));
}
}
+
if (wacom->tool[0]) {
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
if (wacom->features.type == INTUOSP2_BT ||
@@ -1352,6 +1427,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->shared->stylus_in_proximity = prox;
+ /* add timestamp to unpack the frames */
+ input_set_timestamp(pen_input, event_timestamp);
+
input_sync(pen_input);
}
}
@@ -1843,6 +1921,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
int fmax = field->logical_maximum;
unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
int resolution_code = code;
+ int resolution = hidinput_calc_abs_res(field, resolution_code);
if (equivalent_usage == HID_DG_TWIST) {
resolution_code = ABS_RZ;
@@ -1865,8 +1944,15 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
switch (type) {
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
- input_abs_set_res(input, code,
- hidinput_calc_abs_res(field, resolution_code));
+
+ /* older tablet may miss physical usage */
+ if ((code == ABS_X || code == ABS_Y) && !resolution) {
+ resolution = WACOM_INTUOS_RES;
+ hid_warn(input,
+ "Wacom usage (%d) missing resolution \n",
+ code);
+ }
+ input_abs_set_res(input, code, resolution);
break;
case EV_KEY:
input_set_capability(input, EV_KEY, code);
@@ -1883,18 +1969,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
static void wacom_wac_battery_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
- struct wacom *wacom = hid_get_drvdata(hdev);
- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct wacom_features *features = &wacom_wac->features;
- unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
-
- switch (equivalent_usage) {
- case HID_DG_BATTERYSTRENGTH:
- case WACOM_HID_WD_BATTERY_LEVEL:
- case WACOM_HID_WD_BATTERY_CHARGING:
- features->quirks |= WACOM_QUIRK_BATTERY;
- break;
- }
+ return;
}
static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *field,
@@ -1915,18 +1990,21 @@ static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *f
wacom_wac->hid_data.bat_connected = 1;
wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
}
+ wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
break;
case WACOM_HID_WD_BATTERY_LEVEL:
value = value * 100 / (field->logical_maximum - field->logical_minimum);
wacom_wac->hid_data.battery_capacity = value;
wacom_wac->hid_data.bat_connected = 1;
wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
+ wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
break;
case WACOM_HID_WD_BATTERY_CHARGING:
wacom_wac->hid_data.bat_charging = value;
wacom_wac->hid_data.ps_connected = value;
wacom_wac->hid_data.bat_connected = 1;
wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
+ wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
break;
}
}
@@ -1942,18 +2020,15 @@ static void wacom_wac_battery_report(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct wacom_features *features = &wacom_wac->features;
- if (features->quirks & WACOM_QUIRK_BATTERY) {
- int status = wacom_wac->hid_data.bat_status;
- int capacity = wacom_wac->hid_data.battery_capacity;
- bool charging = wacom_wac->hid_data.bat_charging;
- bool connected = wacom_wac->hid_data.bat_connected;
- bool powered = wacom_wac->hid_data.ps_connected;
+ int status = wacom_wac->hid_data.bat_status;
+ int capacity = wacom_wac->hid_data.battery_capacity;
+ bool charging = wacom_wac->hid_data.bat_charging;
+ bool connected = wacom_wac->hid_data.bat_connected;
+ bool powered = wacom_wac->hid_data.ps_connected;
- wacom_notify_battery(wacom_wac, status, capacity, charging,
- connected, powered);
- }
+ wacom_notify_battery(wacom_wac, status, capacity, charging,
+ connected, powered);
}
static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
@@ -2006,7 +2081,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
wacom_wac->has_mute_touch_switch = true;
usage->type = EV_SW;
usage->code = SW_MUTE_DEVICE;
- features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHSTRIP:
wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
@@ -2086,6 +2160,30 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
wacom_wac->hid_data.inrange_state |= value;
}
+ /* Process touch switch state first since it is reported through touch interface,
+ * which is indepentent of pad interface. In the case when there are no other pad
+ * events, the pad interface will not even be created.
+ */
+ if ((equivalent_usage == WACOM_HID_WD_MUTE_DEVICE) ||
+ (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)) {
+ if (wacom_wac->shared->touch_input) {
+ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
+
+ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
+ *is_touch_on = !(*is_touch_on);
+ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
+ *is_touch_on = value;
+
+ input_report_switch(wacom_wac->shared->touch_input,
+ SW_MUTE_DEVICE, !(*is_touch_on));
+ input_sync(wacom_wac->shared->touch_input);
+ }
+ return;
+ }
+
+ if (!input)
+ return;
+
switch (equivalent_usage) {
case WACOM_HID_WD_TOUCHRING:
/*
@@ -2121,22 +2219,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
input_event(input, usage->type, usage->code, 0);
break;
- case WACOM_HID_WD_MUTE_DEVICE:
- case WACOM_HID_WD_TOUCHONOFF:
- if (wacom_wac->shared->touch_input) {
- bool *is_touch_on = &wacom_wac->shared->is_touch_on;
-
- if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
- *is_touch_on = !(*is_touch_on);
- else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
- *is_touch_on = value;
-
- input_report_switch(wacom_wac->shared->touch_input,
- SW_MUTE_DEVICE, !(*is_touch_on));
- input_sync(wacom_wac->shared->touch_input);
- }
- break;
-
case WACOM_HID_WD_MODE_CHANGE:
if (wacom_wac->is_direct_mode != value) {
wacom_wac->is_direct_mode = value;
@@ -2288,6 +2370,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
value = field->logical_maximum - value;
break;
case HID_DG_INRANGE:
+ mod_timer(&wacom->idleprox_timer, jiffies + msecs_to_jiffies(100));
wacom_wac->hid_data.inrange_state = value;
if (!(features->quirks & WACOM_QUIRK_SENSE))
wacom_wac->hid_data.sense_state = value;
@@ -2312,6 +2395,9 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
}
return;
case HID_DG_TWIST:
+ /* don't modify the value if the pen doesn't support the feature */
+ if (!wacom_is_art_pen(wacom_wac->id[0])) return;
+
/*
* Userspace expects pen twist to have its zero point when
* the buttons/finger is on the tablet's left. HID values
@@ -2454,7 +2540,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], sense);
if (wacom_wac->serial[0]) {
- input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ /*
+ * xf86-input-wacom does not accept a serial number
+ * of '0'. Report the low 32 bits if possible, but
+ * if they are zero, report the upper ones instead.
+ */
+ __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu;
+ __u32 serial_hi = wacom_wac->serial[0] >> 32;
+ input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi));
input_report_abs(input, ABS_MISC, sense ? id : 0);
}
@@ -2526,8 +2619,8 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
{
struct hid_data *hid_data = &wacom_wac->hid_data;
bool mt = wacom_wac->features.touch_max > 1;
- bool prox = hid_data->tipswitch &&
- report_touch_events(wacom_wac);
+ bool touch_down = hid_data->tipswitch && hid_data->confidence;
+ bool prox = touch_down && report_touch_events(wacom_wac);
if (wacom_wac->shared->has_mute_touch_switch &&
!wacom_wac->shared->is_touch_on) {
@@ -2566,24 +2659,6 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}
-static bool wacom_wac_slot_is_active(struct input_dev *dev, int key)
-{
- struct input_mt *mt = dev->mt;
- struct input_mt_slot *s;
-
- if (!mt)
- return false;
-
- for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
- if (s->key == key &&
- input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) {
- return true;
- }
- }
-
- return false;
-}
-
static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
@@ -2631,14 +2706,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
}
if (usage->usage_index + 1 == field->report_count) {
- if (equivalent_usage == wacom_wac->hid_data.last_slot_field) {
- bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input,
- wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch;
-
- if (wacom_wac->hid_data.confidence || touch_removed) {
- wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
- }
- }
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
+ wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
}
@@ -2763,7 +2832,7 @@ void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
/* usage tests must precede field tests */
if (WACOM_BATTERY_USAGE(usage))
wacom_wac_battery_event(hdev, field, usage, value);
- else if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+ else if (WACOM_PAD_FIELD(field))
wacom_wac_pad_event(hdev, field, usage, value);
else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_event(hdev, field, usage, value);
@@ -4758,10 +4827,17 @@ static const struct wacom_features wacom_features_0x3c6 =
static const struct wacom_features wacom_features_0x3c8 =
{ "Wacom Intuos BT M", 21600, 13500, 4095, 63,
INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+static const struct wacom_features wacom_features_0x3dd =
+ { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
+ INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
+ .touch_max = 10 };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
+static const struct wacom_features wacom_features_0x94 =
+ { "Wacom Bootloader", .type = BOOTLOADER };
+
#define USB_DEVICE_WACOM(prod) \
HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
.driver_data = (kernel_ulong_t)&wacom_features_##prod
@@ -4835,6 +4911,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x84) },
{ USB_DEVICE_WACOM(0x90) },
{ USB_DEVICE_WACOM(0x93) },
+ { USB_DEVICE_WACOM(0x94) },
{ USB_DEVICE_WACOM(0x97) },
{ USB_DEVICE_WACOM(0x9A) },
{ USB_DEVICE_WACOM(0x9F) },
@@ -4933,6 +5010,7 @@ const struct hid_device_id wacom_ids[] = {
{ BT_DEVICE_WACOM(0x393) },
{ BT_DEVICE_WACOM(0x3c6) },
{ BT_DEVICE_WACOM(0x3c8) },
+ { BT_DEVICE_WACOM(0x3dd) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 8dea7cb298e6..d393f96626fb 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -15,6 +15,7 @@
#define WACOM_NAME_MAX 64
#define WACOM_MAX_REMOTES 5
#define WACOM_STATUS_UNKNOWN 255
+#define WACOM_REMOTE_BATTERY_TIMEOUT 21000000000ll
/* packet length for individual models */
#define WACOM_PKGLEN_BBFUN 9
@@ -242,6 +243,7 @@ enum {
MTTPC,
MTTPC_B,
HID_GENERIC,
+ BOOTLOADER,
MAX_TYPE
};
@@ -319,6 +321,7 @@ struct hid_data {
int bat_connected;
int ps_connected;
bool pad_input_event_flag;
+ ktime_t time_delayed;
};
struct wacom_remote_data {
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index f36036be7f03..329889bf42f9 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -502,8 +502,10 @@ static int ssi_probe(struct platform_device *pd)
platform_set_drvdata(pd, ssi);
err = ssi_add_controller(ssi, pd);
- if (err < 0)
+ if (err < 0) {
+ hsi_put_controller(ssi);
goto out1;
+ }
pm_runtime_enable(&pd->dev);
@@ -524,6 +526,7 @@ static int ssi_probe(struct platform_device *pd)
if (!childpdev) {
err = -ENODEV;
dev_err(&pd->dev, "failed to create ssi controller port\n");
+ of_node_put(child);
goto out3;
}
}
@@ -535,9 +538,9 @@ out3:
device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
out2:
ssi_remove_controller(ssi);
+ pm_runtime_disable(&pd->dev);
out1:
platform_set_drvdata(pd, NULL);
- pm_runtime_disable(&pd->dev);
return err;
}
@@ -628,7 +631,13 @@ static int __init ssi_init(void) {
if (ret)
return ret;
- return platform_driver_register(&ssi_port_pdriver);
+ ret = platform_driver_register(&ssi_port_pdriver);
+ if (ret) {
+ platform_driver_unregister(&ssi_pdriver);
+ return ret;
+ }
+
+ return 0;
}
module_init(ssi_init);
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index a0cb5be246e1..b9495b720f1b 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -230,10 +230,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
if (msg->ttype == HSI_MSG_READ) {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_FROM_DEVICE);
- if (err < 0) {
+ if (!err) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
pm_runtime_put_autosuspend(omap_port->pdev);
- return err;
+ return -EIO;
}
csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
@@ -247,10 +247,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
} else {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_TO_DEVICE);
- if (err < 0) {
+ if (!err) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
pm_runtime_put_autosuspend(omap_port->pdev);
- return err;
+ return -EIO;
}
csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 9260ad47350f..3eb27f33e1fb 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -493,13 +493,17 @@ static void vmbus_add_channel_work(struct work_struct *work)
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
+ *
+ * If vmbus_device_register() fails, the 'device_obj' is freed in
+ * vmbus_device_release() as called by device_unregister() in the
+ * error path of vmbus_device_register(). In the outside error
+ * path, there's no need to free it.
*/
ret = vmbus_device_register(newchannel->device_obj);
if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
- kfree(newchannel->device_obj);
goto err_deq_chan;
}
@@ -798,11 +802,22 @@ static void vmbus_wait_for_unload(void)
if (completion_done(&vmbus_connection.unload_event))
goto completed;
- for_each_online_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
+ /*
+ * In a CoCo VM the synic_message_page is not allocated
+ * in hv_synic_alloc(). Instead it is set/cleared in
+ * hv_synic_enable_regs() and hv_synic_disable_regs()
+ * such that it is set only when the CPU is online. If
+ * not all present CPUs are online, the message page
+ * might be NULL, so skip such CPUs.
+ */
page_addr = hv_cpu->synic_message_page;
+ if (!page_addr)
+ continue;
+
msg = (struct hv_message *)page_addr
+ VMBUS_MESSAGE_SINT;
@@ -836,11 +851,14 @@ completed:
* maybe-pending messages on all CPUs to be able to receive new
* messages after we reconnect.
*/
- for_each_online_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
page_addr = hv_cpu->synic_message_page;
+ if (!page_addr)
+ continue;
+
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
msg->header.message_type = HVMSG_NONE;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 56918274c48c..a1cfa7596853 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1863,6 +1863,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
ret = device_register(&child_device_obj->device);
if (ret) {
pr_err("Unable to register child device\n");
+ put_device(&child_device_obj->device);
return ret;
}
@@ -2075,7 +2076,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
bool fb_overlap_ok)
{
struct resource *iter, *shadow;
- resource_size_t range_min, range_max, start;
+ resource_size_t range_min, range_max, start, end;
const char *dev_n = dev_name(&device_obj->device);
int retval;
@@ -2110,6 +2111,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
range_max = iter->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
+ end = start + size - 1;
+
+ /* Skip the whole fb_mmio region if not fb_overlap_ok */
+ if (!fb_overlap_ok && fb_mmio &&
+ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+ continue;
+
shadow = __request_region(iter, start, size, NULL,
IORESOURCE_BUSY);
if (!shadow)
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 740ac0a1b726..549c8f30acce 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -32,6 +32,7 @@ ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
#define POWER_METER_CAN_NOTIFY (1 << 3)
#define POWER_METER_IS_BATTERY (1 << 8)
#define UNKNOWN_HYSTERESIS 0xFFFFFFFF
+#define UNKNOWN_POWER 0xFFFFFFFF
#define METER_NOTIFY_CONFIG 0x80
#define METER_NOTIFY_TRIP 0x81
@@ -343,6 +344,9 @@ static ssize_t show_power(struct device *dev,
update_meter(resource);
mutex_unlock(&resource->lock);
+ if (resource->power == UNKNOWN_POWER)
+ return -ENODATA;
+
return sprintf(buf, "%llu\n", resource->power * 1000);
}
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 01c2eeb02aa9..5af7226657ab 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -484,10 +484,10 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
val = (temp - val) / 1000;
if (sattr->index != 1) {
- data->temp[HYSTERSIS][sattr->index] &= 0xF0;
+ data->temp[HYSTERSIS][sattr->index] &= 0x0F;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
} else {
- data->temp[HYSTERSIS][sattr->index] &= 0x0F;
+ data->temp[HYSTERSIS][sattr->index] &= 0xF0;
data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
}
@@ -552,11 +552,11 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr,
val = data->enh_acoustics[0] & 0xf;
break;
case 1:
- val = (data->enh_acoustics[1] >> 4) & 0xf;
+ val = data->enh_acoustics[1] & 0xf;
break;
case 2:
default:
- val = data->enh_acoustics[1] & 0xf;
+ val = (data->enh_acoustics[1] >> 4) & 0xf;
break;
}
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 40f3139f1e02..dca5d3bf0629 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -194,6 +194,8 @@ struct aspeed_pwm_tacho_data {
u8 fan_tach_ch_source[16];
struct aspeed_cooling_device *cdev[8];
const struct attribute_group *groups[3];
+ /* protects access to shared ASPEED_PTCR_RESULT */
+ struct mutex tach_lock;
};
enum type { TYPEM, TYPEN, TYPEO };
@@ -528,6 +530,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
u8 fan_tach_ch_source, type, mode, both;
int ret;
+ mutex_lock(&priv->tach_lock);
+
regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
@@ -545,6 +549,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
ASPEED_RPM_STATUS_SLEEP_USEC,
usec);
+ mutex_unlock(&priv->tach_lock);
+
/* return -ETIMEDOUT if we didn't get an answer. */
if (ret)
return ret;
@@ -909,6 +915,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ mutex_init(&priv->tach_lock);
priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index d855c78fb8be..04acb8274fdf 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -40,15 +40,12 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
+#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
+#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
-
#ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) \
for_each_cpu(i, topology_sibling_cpumask(cpu))
@@ -91,6 +88,8 @@ struct temp_data {
struct platform_data {
struct device *hwmon_dev;
u16 pkg_id;
+ u16 cpu_map[NUM_REAL_CORES];
+ struct ida ida;
struct cpumask cpumask;
struct temp_data *core_data[MAX_CORE_DATA];
struct device_attribute name_attr;
@@ -243,10 +242,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
*/
if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
- if (host_bridge->device == tjmax_pci_table[i].device)
+ if (host_bridge->device == tjmax_pci_table[i].device) {
+ pci_dev_put(host_bridge);
return tjmax_pci_table[i].tjmax;
+ }
}
}
+ pci_dev_put(host_bridge);
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
if (strstr(c->x86_model_id, tjmax_table[i].id))
@@ -378,7 +380,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
}
static int create_core_attrs(struct temp_data *tdata, struct device *dev,
- int attr_no)
+ int index)
{
int i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -390,13 +392,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
};
for (i = 0; i < tdata->attr_size; i++) {
+ /*
+ * We map the attr number to core id of the CPU
+ * The attr number is always core id + 2
+ * The Pkgtemp will always show up as temp1_*, if available
+ */
+ int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
+
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
"temp%d_%s", attr_no, suffixes[i]);
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
- tdata->sd_attrs[i].index = attr_no;
+ tdata->sd_attrs[i].index = index;
tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
}
tdata->attr_group.attrs = tdata->attrs;
@@ -441,7 +450,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
MSR_IA32_THERM_STATUS;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
- tdata->cpu_core_id = TO_CORE_ID(cpu);
+ tdata->cpu_core_id = topology_core_id(cpu);
tdata->attr_size = MAX_CORE_ATTRS;
mutex_init(&tdata->update_lock);
return tdata;
@@ -454,22 +463,29 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
- int err, attr_no;
+ int err, index;
/*
- * Find attr number for sysfs:
- * We map the attr number to core id of the CPU
- * The attr number is always core id + 2
- * The Pkgtemp will always show up as temp1_*, if available
+ * Get the index of tdata in pdata->core_data[]
+ * tdata for package: pdata->core_data[1]
+ * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
*/
- attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
+ if (pkg_flag) {
+ index = PKG_SYSFS_ATTR_NO;
+ } else {
+ index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
+ if (index < 0)
+ return index;
- if (attr_no > MAX_CORE_DATA - 1)
- return -ERANGE;
+ pdata->cpu_map[index] = topology_core_id(cpu);
+ index += BASE_SYSFS_ATTR_NO;
+ }
tdata = init_temp_data(cpu, pkg_flag);
- if (!tdata)
- return -ENOMEM;
+ if (!tdata) {
+ err = -ENOMEM;
+ goto ida_free;
+ }
/* Test if we can access the status register */
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
@@ -494,17 +510,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
}
}
- pdata->core_data[attr_no] = tdata;
+ pdata->core_data[index] = tdata;
/* Create sysfs interfaces */
- err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
+ err = create_core_attrs(tdata, pdata->hwmon_dev, index);
if (err)
goto exit_free;
return 0;
exit_free:
- pdata->core_data[attr_no] = NULL;
+ pdata->core_data[index] = NULL;
kfree(tdata);
+ida_free:
+ if (!pkg_flag)
+ ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
return err;
}
@@ -519,71 +538,63 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
{
struct temp_data *tdata = pdata->core_data[indx];
+ /* if we errored on add then this is already gone */
+ if (!tdata)
+ return;
+
/* Remove the sysfs attributes */
sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
kfree(pdata->core_data[indx]);
pdata->core_data[indx] = NULL;
+
+ if (indx >= BASE_SYSFS_ATTR_NO)
+ ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
}
-static int coretemp_probe(struct platform_device *pdev)
+static int coretemp_device_add(int zoneid)
{
- struct device *dev = &pdev->dev;
+ struct platform_device *pdev;
struct platform_data *pdata;
+ int err;
/* Initialize the per-zone data structures */
- pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->pkg_id = pdev->id;
- platform_set_drvdata(pdev, pdata);
+ pdata->pkg_id = zoneid;
+ ida_init(&pdata->ida);
- pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
- pdata, NULL);
- return PTR_ERR_OR_ZERO(pdata->hwmon_dev);
-}
-
-static int coretemp_remove(struct platform_device *pdev)
-{
- struct platform_data *pdata = platform_get_drvdata(pdev);
- int i;
+ pdev = platform_device_alloc(DRVNAME, zoneid);
+ if (!pdev) {
+ err = -ENOMEM;
+ goto err_free_pdata;
+ }
- for (i = MAX_CORE_DATA - 1; i >= 0; --i)
- if (pdata->core_data[i])
- coretemp_remove_core(pdata, i);
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_put_dev;
+ platform_set_drvdata(pdev, pdata);
+ zone_devices[zoneid] = pdev;
return 0;
-}
-static struct platform_driver coretemp_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = coretemp_probe,
- .remove = coretemp_remove,
-};
+err_put_dev:
+ platform_device_put(pdev);
+err_free_pdata:
+ kfree(pdata);
+ return err;
+}
-static struct platform_device *coretemp_device_add(unsigned int cpu)
+static void coretemp_device_remove(int zoneid)
{
- int err, zoneid = topology_logical_die_id(cpu);
- struct platform_device *pdev;
-
- if (zoneid < 0)
- return ERR_PTR(-ENOMEM);
-
- pdev = platform_device_alloc(DRVNAME, zoneid);
- if (!pdev)
- return ERR_PTR(-ENOMEM);
-
- err = platform_device_add(pdev);
- if (err) {
- platform_device_put(pdev);
- return ERR_PTR(err);
- }
+ struct platform_device *pdev = zone_devices[zoneid];
+ struct platform_data *pdata = platform_get_drvdata(pdev);
- zone_devices[zoneid] = pdev;
- return pdev;
+ ida_destroy(&pdata->ida);
+ kfree(pdata);
+ platform_device_unregister(pdev);
}
static int coretemp_cpu_online(unsigned int cpu)
@@ -607,7 +618,10 @@ static int coretemp_cpu_online(unsigned int cpu)
if (!cpu_has(c, X86_FEATURE_DTHERM))
return -ENODEV;
- if (!pdev) {
+ pdata = platform_get_drvdata(pdev);
+ if (!pdata->hwmon_dev) {
+ struct device *hwmon;
+
/* Check the microcode version of the CPU */
if (chk_ucode_version(cpu))
return -EINVAL;
@@ -618,9 +632,11 @@ static int coretemp_cpu_online(unsigned int cpu)
* online. So, initialize per-pkg data structures and
* then bring this core online.
*/
- pdev = coretemp_device_add(cpu);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ hwmon = hwmon_device_register_with_groups(&pdev->dev, DRVNAME,
+ pdata, NULL);
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+ pdata->hwmon_dev = hwmon;
/*
* Check whether pkgtemp support is available.
@@ -630,7 +646,6 @@ static int coretemp_cpu_online(unsigned int cpu)
coretemp_add_core(pdev, cpu, 1);
}
- pdata = platform_get_drvdata(pdev);
/*
* Check whether a thread sibling is already online. If not add the
* interface for this CPU core.
@@ -647,25 +662,28 @@ static int coretemp_cpu_offline(unsigned int cpu)
struct platform_device *pdev = coretemp_get_pdev(cpu);
struct platform_data *pd;
struct temp_data *tdata;
- int indx, target;
+ int i, indx = -1, target;
- /*
- * Don't execute this on suspend as the device remove locks
- * up the machine.
- */
+ /* No need to tear down any interfaces for suspend */
if (cpuhp_tasks_frozen)
return 0;
/* If the physical CPU device does not exist, just return */
- if (!pdev)
+ pd = platform_get_drvdata(pdev);
+ if (!pd->hwmon_dev)
return 0;
- /* The core id is too big, just return */
- indx = TO_ATTR_NO(cpu);
- if (indx > MAX_CORE_DATA - 1)
+ for (i = 0; i < NUM_REAL_CORES; i++) {
+ if (pd->cpu_map[i] == topology_core_id(cpu)) {
+ indx = i + BASE_SYSFS_ATTR_NO;
+ break;
+ }
+ }
+
+ /* Too many cores and this core is not populated, just return */
+ if (indx < 0)
return 0;
- pd = platform_get_drvdata(pdev);
tdata = pd->core_data[indx];
cpumask_clear_cpu(cpu, &pd->cpumask);
@@ -685,13 +703,14 @@ static int coretemp_cpu_offline(unsigned int cpu)
}
/*
- * If all cores in this pkg are offline, remove the device. This
- * will invoke the platform driver remove function, which cleans up
- * the rest.
+ * If all cores in this pkg are offline, remove the interface.
*/
+ tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
if (cpumask_empty(&pd->cpumask)) {
- zone_devices[topology_logical_die_id(cpu)] = NULL;
- platform_device_unregister(pdev);
+ if (tdata)
+ coretemp_remove_core(pd, PKG_SYSFS_ATTR_NO);
+ hwmon_device_unregister(pd->hwmon_dev);
+ pd->hwmon_dev = NULL;
return 0;
}
@@ -699,7 +718,6 @@ static int coretemp_cpu_offline(unsigned int cpu)
* Check whether this core is the target for the package
* interface. We need to assign it to some other cpu.
*/
- tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
if (tdata && tdata->cpu == cpu) {
target = cpumask_first(&pd->cpumask);
mutex_lock(&tdata->update_lock);
@@ -718,7 +736,7 @@ static enum cpuhp_state coretemp_hp_online;
static int __init coretemp_init(void)
{
- int err;
+ int i, err;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -734,20 +752,22 @@ static int __init coretemp_init(void)
if (!zone_devices)
return -ENOMEM;
- err = platform_driver_register(&coretemp_driver);
- if (err)
- goto outzone;
+ for (i = 0; i < max_zones; i++) {
+ err = coretemp_device_add(i);
+ if (err)
+ goto outzone;
+ }
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
coretemp_cpu_online, coretemp_cpu_offline);
if (err < 0)
- goto outdrv;
+ goto outzone;
coretemp_hp_online = err;
return 0;
-outdrv:
- platform_driver_unregister(&coretemp_driver);
outzone:
+ while (i--)
+ coretemp_device_remove(i);
kfree(zone_devices);
return err;
}
@@ -755,8 +775,11 @@ module_init(coretemp_init)
static void __exit coretemp_exit(void)
{
+ int i;
+
cpuhp_remove_state(coretemp_hp_online);
- platform_driver_unregister(&coretemp_driver);
+ for (i = 0; i < max_zones; i++)
+ coretemp_device_remove(i);
kfree(zone_devices);
}
module_exit(coretemp_exit)
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 3ea4021f267c..d96e435cc42b 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -391,6 +391,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (!fan_data)
return -EINVAL;
+ if (state >= fan_data->num_speed)
+ return -EINVAL;
+
set_fan_speed(fan_data, state);
return 0;
}
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
index 360f5aee1394..d4be03f43fb4 100644
--- a/drivers/hwmon/i5500_temp.c
+++ b/drivers/hwmon/i5500_temp.c
@@ -108,7 +108,7 @@ static int i5500_temp_probe(struct pci_dev *pdev,
u32 tstimer;
s8 tsfsc;
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Failed to enable device\n");
return err;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index b2ab83c9fd9a..fe90f0536d76 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -502,6 +502,7 @@ static void ibmpex_register_bmc(int iface, struct device *dev)
return;
out_register:
+ list_del(&data->list);
hwmon_device_unregister(data->hwmon_dev);
out_user:
ipmi_destroy_user(data->user);
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 026f70d7c5a4..1b7f92f23530 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -672,7 +672,7 @@ static int ina3221_probe_child_from_dt(struct device *dev,
return ret;
} else if (val > INA3221_CHANNEL3) {
dev_err(dev, "invalid reg %d of %pOFn\n", val, child);
- return ret;
+ return -EINVAL;
}
input = &ina->inputs[val];
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index fac9b5c68a6a..85413d3dc394 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -486,6 +486,8 @@ static const struct it87_devices it87_devices[] = {
#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
+#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \
+ FEAT_10_9MV_ADC))
struct it87_sio_data {
int sioaddr;
@@ -3098,7 +3100,7 @@ static int it87_probe(struct platform_device *pdev)
"Detected broken BIOS defaults, disabling PWM interface\n");
/* Starting with IT8721F, we handle scaling of internal voltages */
- if (has_12mv_adc(data)) {
+ if (has_scaling(data)) {
if (sio_data->internal & BIT(0))
data->in_scaled |= BIT(3); /* in3 is AVCC */
if (sio_data->internal & BIT(1))
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
index 2818276ed3d6..a1dd1ef40b56 100644
--- a/drivers/hwmon/ltc2945.c
+++ b/drivers/hwmon/ltc2945.c
@@ -248,6 +248,8 @@ static ssize_t ltc2945_value_store(struct device *dev,
/* convert to register value, then clamp and write result */
regval = ltc2945_val_to_reg(dev, reg, val);
+ if (regval < 0)
+ return regval;
if (is_power_reg(reg)) {
regval = clamp_val(regval, 0, 0xffffff);
regbuf[0] = regval >> 16;
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index bd8f5a3aaad9..052c897a635d 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -127,6 +127,12 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (err)
return err;
+ if (MLXREG_FAN_GET_FAULT(regval, tacho->mask)) {
+ /* FAN is broken - return zero for FAN speed. */
+ *val = 0;
+ return 0;
+ }
+
*val = MLXREG_FAN_GET_RPM(regval, fan->divider,
fan->samples);
break;
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 2e97e56c72c7..5cdc60717006 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -708,7 +708,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
if (index >= 38 && index < 46 && !(reg & 0x01)) /* PECI 0 */
return 0;
- if (index >= 0x46 && (!(reg & 0x02))) /* PECI 1 */
+ if (index >= 46 && !(reg & 0x02)) /* PECI 1 */
return 0;
return attr->mode;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index d62d69bb7e49..1643382c3c88 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -199,6 +199,26 @@ config SENSORS_TPS53679
This driver can also be built as a module. If so, the module will
be called tps53679.
+config SENSORS_TPS544
+ tristate "TI TPS544"
+ help
+ If you say yes here you get hardware monitoring support for Texas
+ Instruments TPS544.
+
+ This driver can also be built as a module. If so, the module will
+ be called tps544.
+
+config SENSORS_TPS544_REGULATOR
+ bool "Regulator support for TPS544"
+ depends on SENSORS_TPS544
+ select REGULATOR
+ help
+ If you say yes here you get regulator support for Texas Instruments
+ TPS544.
+
+ This driver is dependent on TPS544 sensor and it can also be built
+ as a module.
+
config SENSORS_UCD9000
tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 03bacfcfd660..647c92a06ed8 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
+obj-$(CONFIG_SENSORS_TPS544) += tps544.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index c0bc43d01018..e32b98e11869 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -229,10 +229,17 @@ static const struct i2c_device_id pmbus_id[] = {
MODULE_DEVICE_TABLE(i2c, pmbus_id);
+static const struct of_device_id pmbus_of_match[] = {
+ {.compatible = "ti,tps544b25"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, pmbus_of_match);
+
/* This is the driver that will be inserted */
static struct i2c_driver pmbus_driver = {
.driver = {
.name = "pmbus",
+ .of_match_table = of_match_ptr(pmbus_of_match),
},
.probe = pmbus_probe,
.remove = pmbus_do_remove,
diff --git a/drivers/hwmon/pmbus/tps544.c b/drivers/hwmon/pmbus/tps544.c
new file mode 100644
index 000000000000..ad5082efcccd
--- /dev/null
+++ b/drivers/hwmon/pmbus/tps544.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPS544B25 power regulator driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/regulator/driver.h>
+#include "pmbus.h"
+
+#define TPS544_NUM_PAGES 1
+
+struct tps544_data {
+ struct device *dev;
+ u16 vout_min[TPS544_NUM_PAGES], vout_max[TPS544_NUM_PAGES];
+ struct pmbus_driver_info info;
+};
+
+struct vlut {
+ int vol;
+ u16 vloop;
+ u16 v_ovfault;
+ u16 v_ovwarn;
+ u16 vmax;
+ u16 mfr_vmin;
+ u16 v_uvwarn;
+ u16 v_uvfault;
+};
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+#define TPS544_MFR_VOUT_MIN 0xA4
+#define TPS544_MFR_RESTORE_DEF_ALL 0x12
+#define TPS544_MFR_IOUT_CAL_OFFSET 0x39
+
+#define TPS544_VOUTREAD_MULTIPLIER 1950
+#define TPS544_IOUTREAD_MULTIPLIER 62500
+#define TPS544_IOUTREAD_MASK GENMASK(9, 0)
+
+#define TPS544_VOUT_LIMIT 5300000
+
+#define to_tps544_data(x) container_of(x, struct tps544_data, info)
+
+/*
+ * This currently supports 3 voltage out buckets:
+ * 0.5V to 1.3V
+ * 1.3V to 2.6V
+ * 2.6V to 5.3V
+ * Any requested voltage will be mapped to one of these buckets and
+ * VOUT will be programmed with 0.1V granularity.
+ */
+static const struct vlut tps544_vout[3] = {
+ {500000, 0xF004, 0x0290, 0x0285, 0x0300, 0x0100, 0x00CD, 0x009A},
+ {1300000, 0xF002, 0x059A, 0x0566, 0x0600, 0x0100, 0x0143, 0x0130},
+ {2600000, 0xF001, 0x0B00, 0x0A9A, 0x0A00, 0x0100, 0x0143, 0x0130}
+};
+#endif
+
+static int tps544_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ return pmbus_read_word_data(client, page, reg);
+}
+
+static int tps544_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static int tps544_write_byte(struct i2c_client *client, int page, u8 byte)
+{
+ return pmbus_write_byte(client, page, byte);
+}
+
+static int tps544_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ int ret;
+
+ ret = pmbus_write_word_data(client, page, reg, word);
+ /* TODO - Define new PMBUS virtual register entries for these */
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+static int tps544_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct device *dev = rdev_get_dev(rdev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ int page = 0;
+
+ return pmbus_read_word_data(client, page, PMBUS_READ_VOUT);
+}
+
+static int tps544_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned int *selector)
+{
+ struct device *dev = rdev_get_dev(rdev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct tps544_data *data = to_tps544_data(info);
+ int index, page = 0;
+ u16 vout;
+
+ /* voltage will be set close to min value requested */
+ vout = (u16)((min_uV * 512) / 1000000);
+
+ /* Check voltage bucket */
+ if (min_uV >= tps544_vout[2].vol)
+ index = 2;
+ else if (min_uV >= tps544_vout[1].vol)
+ index = 1;
+ else if (min_uV >= tps544_vout[0].vol)
+ index = 0;
+ else
+ return -EINVAL;
+
+ pmbus_write_word_data(client, page, PMBUS_VOUT_SCALE_LOOP,
+ tps544_vout[index].vloop);
+ /* Use delay after setting scale loop; this is derived from testing */
+ msleep(2000);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_OV_FAULT_LIMIT,
+ tps544_vout[index].v_ovfault);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_OV_WARN_LIMIT,
+ tps544_vout[index].v_ovwarn);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_MAX,
+ tps544_vout[index].vmax);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_COMMAND, vout);
+ tps544_write_word_data(client, page, TPS544_MFR_VOUT_MIN,
+ tps544_vout[index].mfr_vmin);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_UV_WARN_LIMIT,
+ tps544_vout[index].v_uvwarn);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_UV_FAULT_LIMIT,
+ tps544_vout[index].v_uvfault);
+
+ data->vout_min[page] = min_uV;
+ data->vout_max[page] = max_uV;
+
+ return 0;
+}
+
+static ssize_t tps544_setv_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ int vout;
+
+ vout = tps544_regulator_get_voltage(rdev) * TPS544_VOUTREAD_MULTIPLIER;
+ return sprintf(buf, "%d\n", vout);
+}
+
+static ssize_t tps544_setv_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ int val;
+ int err;
+
+ err = kstrtoint(buf, 0, &val);
+ if (err)
+ return err;
+ if (val > TPS544_VOUT_LIMIT)
+ return -EINVAL;
+
+ err = tps544_regulator_set_voltage(rdev, val, val, NULL);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(tps544_setv);
+
+static ssize_t tps544_restorev_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ int err;
+
+ err = pmbus_write_byte(client, 0, TPS544_MFR_RESTORE_DEF_ALL);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(tps544_restorev);
+
+static ssize_t tps544_geti_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ u16 reg_iout;
+
+ reg_iout = pmbus_read_word_data(client, 0, PMBUS_READ_IOUT) &
+ TPS544_IOUTREAD_MASK;
+
+ return sprintf(buf, "%d\n", reg_iout * TPS544_IOUTREAD_MULTIPLIER);
+}
+
+static DEVICE_ATTR_RO(tps544_geti);
+
+static ssize_t tps544_setcali_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ int reg_cali;
+
+ reg_cali = pmbus_read_word_data(client, 0, TPS544_MFR_IOUT_CAL_OFFSET);
+
+ return sprintf(buf, "Current: 0x%x; Set value in hex to calibrate\n",
+ reg_cali);
+}
+
+static ssize_t tps544_setcali_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ u16 val;
+ int err;
+
+ err = kstrtou16(buf, 0x0, &val);
+ if (err)
+ return err;
+
+ err = pmbus_write_word_data(client, 0, TPS544_MFR_IOUT_CAL_OFFSET, val);
+ if (err)
+ return err;
+
+ return (ssize_t)count;
+}
+
+static DEVICE_ATTR_RW(tps544_setcali);
+
+static struct attribute *reg_attrs[] = {
+ &dev_attr_tps544_setv.attr,
+ &dev_attr_tps544_restorev.attr,
+ &dev_attr_tps544_geti.attr,
+ &dev_attr_tps544_setcali.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(reg);
+
+static const struct regulator_desc tps544_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+};
+#endif /* CONFIG_SENSORS_TPS544_REGULATOR */
+
+static int tps544_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ unsigned int i;
+ struct device *dev = &client->dev;
+ struct tps544_data *data;
+ struct pmbus_driver_info *info;
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+ int ret;
+ struct regulator_dev *rdev;
+ struct regulator_config rconfig = { };
+#endif
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(struct tps544_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+
+ info = &data->info;
+ info->write_word_data = tps544_write_word_data;
+ /* TODO - remove these 3 hooks maybe unnecessary */
+ info->write_byte = tps544_write_byte;
+ info->read_word_data = tps544_read_word_data;
+ info->read_byte_data = tps544_read_byte_data;
+
+ for (i = 0; i < ARRAY_SIZE(data->vout_min); i++)
+ data->vout_min[i] = 0xffff;
+
+ info->pages = TPS544_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+ rconfig.dev = dev;
+ rconfig.driver_data = data;
+ info->num_regulators = info->pages;
+ info->reg_desc = tps544_reg_desc;
+ if (info->num_regulators > (int)ARRAY_SIZE(tps544_reg_desc)) {
+ dev_err(&client->dev, "num_regulators too large!");
+ info->num_regulators = ARRAY_SIZE(tps544_reg_desc);
+ }
+
+ rdev = devm_regulator_register(dev, tps544_reg_desc, &rconfig);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register %s regulator\n",
+ info->reg_desc[0].name);
+ return (int)PTR_ERR(rdev);
+ }
+
+ ret = sysfs_create_groups(&rdev->dev.kobj, reg_groups);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, rdev);
+#endif
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static int tps544_remove(struct i2c_client *client)
+{
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+ struct device *dev = &client->dev;
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+ sysfs_remove_groups(&rdev->dev.kobj, reg_groups);
+#endif
+ pmbus_do_remove(client);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tps544_of_match[] = {
+ { .compatible = "ti,tps544" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tps544_of_match);
+#endif
+
+static const struct i2c_device_id tps544_id[] = {
+ {"tps544", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tps544_id);
+
+static struct i2c_driver tps544_driver = {
+ .driver = {
+ .name = "tps544",
+ .of_match_table = of_match_ptr(tps544_of_match),
+ },
+ .probe = tps544_probe,
+ .remove = tps544_remove,
+ .id_table = tps544_id,
+};
+
+module_i2c_driver(tps544_driver);
+
+MODULE_AUTHOR("Harini Katakam");
+MODULE_DESCRIPTION("PMBus regulator driver for TPS544");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index f2a5af239c95..f5d3cf86753f 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -768,6 +768,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
{
struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
+ cancel_work_sync(&ctx->workq);
hwmon_device_unregister(ctx->hwmon_dev);
kfifo_free(&ctx->async_msg_fifo);
if (acpi_disabled)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 83dccdeef906..da63e09fb0d5 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -156,7 +156,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
writel_relaxed(config->ss_pe_cmp[i],
drvdata->base + TRCSSPCICRn(i));
}
- for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
writeq_relaxed(config->addr_val[i],
drvdata->base + TRCACVRn(i));
writeq_relaxed(config->addr_acc[i],
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4523f10ddd0f..334bb56cc8a9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -361,7 +361,7 @@ struct etmv4_drvdata {
u8 ctxid_size;
u8 vmid_size;
u8 ccsize;
- u8 ccitmin;
+ u16 ccitmin;
u8 s_ex_level;
u8 ns_ex_level;
u8 q_support;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 75dfa1e2f3f2..e15726611c64 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -426,7 +426,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev,
return -EINVAL;
/* wrap head around to the amount of space we have */
- head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+ head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
/* find the page to write to */
buf->cur = head / PAGE_SIZE;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index ed77c7f7b344..2a0a12c194c0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -47,7 +47,8 @@ struct etr_perf_buffer {
};
/* Convert the perf index to an offset within the ETR buffer */
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/* Lower limit for ETR hardware buffer */
#define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
@@ -909,7 +910,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
len = tmc_etr_buf_get_data(etr_buf, offset,
CORESIGHT_BARRIER_PKT_SIZE, &bufp);
- if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
+ if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE))
return -EINVAL;
coresight_insert_barrier_packet(bufp);
return offset + CORESIGHT_BARRIER_PKT_SIZE;
@@ -1215,7 +1216,7 @@ alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
* than the size requested via sysfs.
*/
if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
- etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
+ etr_buf = tmc_alloc_etr_buf(drvdata, ((ssize_t)nr_pages << PAGE_SHIFT),
0, node, NULL);
if (!IS_ERR(etr_buf))
goto done;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 71de978575f3..1fcf6e29e8d3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -320,7 +320,7 @@ ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
static inline unsigned long
tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
{
- return sg_table->data_pages.nr_pages << PAGE_SHIFT;
+ return (unsigned long)sg_table->data_pages.nr_pages << PAGE_SHIFT;
}
struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 0bbce0d29158..7d64d77d5b7a 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1073,6 +1073,7 @@ static int coresight_remove_match(struct device *dev, void *data)
* platform data.
*/
fwnode_handle_put(conn->child_fwnode);
+ conn->child_fwnode = NULL;
/* No need to continue */
break;
}
diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c
index 2c7f5116be12..891b28ea25fe 100644
--- a/drivers/hwtracing/intel_th/msu-sink.c
+++ b/drivers/hwtracing/intel_th/msu-sink.c
@@ -71,6 +71,9 @@ static int msu_sink_alloc_window(void *data, struct sg_table **sgt, size_t size)
block = dma_alloc_coherent(priv->dev->parent->parent,
PAGE_SIZE, &sg_dma_address(sg_ptr),
GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
sg_set_buf(sg_ptr, block, PAGE_SIZE);
}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 3cd2489d398c..640b0aae7eb4 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1050,6 +1050,16 @@ msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
static inline void msc_buffer_set_wb(struct msc_window *win) {}
#endif /* CONFIG_X86 */
+static struct page *msc_sg_page(struct scatterlist *sg)
+{
+ void *addr = sg_virt(sg);
+
+ if (is_vmalloc_addr(addr))
+ return vmalloc_to_page(addr);
+
+ return sg_page(sg);
+}
+
/**
* msc_buffer_win_alloc() - alloc a window for a multiblock mode
* @msc: MSC device
@@ -1122,7 +1132,7 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
int i;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
- struct page *page = sg_page(sg);
+ struct page *page = msc_sg_page(sg);
page->mapping = NULL;
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
@@ -1384,7 +1394,7 @@ found:
pgoff -= win->pgoff;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
- struct page *page = sg_page(sg);
+ struct page *page = msc_sg_page(sg);
size_t pgsz = PFN_DOWN(sg->length);
if (pgoff < pgsz)
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index a723c8c33087..6fcc20e0bc7b 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -100,8 +100,10 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
}
th = intel_th_alloc(&pdev->dev, drvdata, resource, r);
- if (IS_ERR(th))
- return PTR_ERR(th);
+ if (IS_ERR(th)) {
+ err = PTR_ERR(th);
+ goto err_free_irq;
+ }
th->activate = intel_th_pci_activate;
th->deactivate = intel_th_pci_deactivate;
@@ -109,6 +111,10 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
return 0;
+
+err_free_irq:
+ pci_free_irq_vectors(pdev);
+ return err;
}
static void intel_th_pci_remove(struct pci_dev *pdev)
@@ -274,6 +280,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Raptor Lake-S CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa76f),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Meteor Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Raptor Lake-S */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Rocket Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index bdcc3c9d0abe..3f0b072a4cc8 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -250,18 +250,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
if (!slave)
return 0;
- command = readl(bus->base + ASPEED_I2C_CMD_REG);
+ /*
+ * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
+ * transfers with low enough latency between the nak/stop phase of the current
+ * command and the start/address phase of the following command that the
+ * interrupts are coalesced by the time we process them.
+ */
+ if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
+ irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
+ bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+ }
- /* Slave was requested, restart state machine. */
+ if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
+ bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
+ irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
+ bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+ }
+
+ /* Propagate any stop conditions to the slave implementation. */
+ if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
+ i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+ bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ }
+
+ /*
+ * Now that we've dealt with any potentially coalesced stop conditions,
+ * address any start conditions.
+ */
if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
bus->slave_state = ASPEED_I2C_SLAVE_START;
}
- /* Slave is not currently active, irq was for someone else. */
+ /*
+ * If the slave has been stopped and not started then slave interrupt
+ * handling is complete.
+ */
if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
return irq_handled;
+ command = readl(bus->base + ASPEED_I2C_CMD_REG);
dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
irq_status, command);
@@ -280,17 +308,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
}
- /* Slave was asked to stop. */
- if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
- irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
- bus->slave_state = ASPEED_I2C_SLAVE_STOP;
- }
- if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
- bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
- irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
- bus->slave_state = ASPEED_I2C_SLAVE_STOP;
- }
-
switch (bus->slave_state) {
case ASPEED_I2C_SLAVE_READ_REQUESTED:
if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
@@ -319,8 +336,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
break;
case ASPEED_I2C_SLAVE_STOP:
- i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
- bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ /* Stop event handling is done early. Unreachable. */
break;
case ASPEED_I2C_SLAVE_START:
/* Slave was just started. Waiting for the next event. */;
@@ -693,13 +709,16 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
if (time_left == 0) {
/*
- * If timed out and bus is still busy in a multi master
- * environment, attempt recovery at here.
+ * In a multi-master setup, if a timeout occurs, attempt
+ * recovery. But if the bus is idle, we still need to reset the
+ * i2c controller to clear the remaining interrupts.
*/
if (bus->multi_master &&
(readl(bus->base + ASPEED_I2C_CMD_REG) &
ASPEED_I2CD_BUS_BUSY_STS))
aspeed_i2c_recover_bus(bus);
+ else
+ aspeed_i2c_reset(bus);
/*
* If timed out and the state is still pending, drop the pending
@@ -737,6 +756,8 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
+
+ bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
}
static int aspeed_i2c_reg_slave(struct i2c_client *client)
@@ -753,7 +774,6 @@ static int aspeed_i2c_reg_slave(struct i2c_client *client)
__aspeed_i2c_reg_slave(bus, client->addr);
bus->slave = client;
- bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
spin_unlock_irqrestore(&bus->lock, flags);
return 0;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 70cd9fc7fb86..cae34c55ae08 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -240,13 +240,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 offset)
{
u32 val;
+ unsigned long flags;
if (iproc_i2c->idm_base) {
- spin_lock(&iproc_i2c->idm_lock);
+ spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
writel(iproc_i2c->ape_addr_mask,
iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
val = readl(iproc_i2c->base + offset);
- spin_unlock(&iproc_i2c->idm_lock);
+ spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
} else {
val = readl(iproc_i2c->base + offset);
}
@@ -257,12 +258,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 offset, u32 val)
{
+ unsigned long flags;
+
if (iproc_i2c->idm_base) {
- spin_lock(&iproc_i2c->idm_lock);
+ spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
writel(iproc_i2c->ape_addr_mask,
iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
writel(val, iproc_i2c->base + offset);
- spin_unlock(&iproc_i2c->idm_lock);
+ spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
} else {
writel(val, iproc_i2c->base + offset);
}
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 8750e444f449..3e0846a17beb 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -13,7 +13,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/gpio/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
/* Register offsets for the I2C device. */
#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */
@@ -22,11 +24,14 @@
#define CDNS_I2C_DATA_OFFSET 0x0C /* I2C Data Register, RW */
#define CDNS_I2C_ISR_OFFSET 0x10 /* IRQ Status Register, RW */
#define CDNS_I2C_XFER_SIZE_OFFSET 0x14 /* Transfer Size Register, RW */
+#define CDNS_I2C_SLV_PAUSE_OFFSET 0x18 /* Transfer Size Register, RW */
#define CDNS_I2C_TIME_OUT_OFFSET 0x1C /* Time Out Register, RW */
+#define CDNS_I2C_IMR_OFFSET 0x20 /* IRQ Mask Register, RO */
#define CDNS_I2C_IER_OFFSET 0x24 /* IRQ Enable Register, WO */
#define CDNS_I2C_IDR_OFFSET 0x28 /* IRQ Disable Register, WO */
/* Control Register Bit mask definitions */
+#define CDNS_I2C_CR_SLVMON BIT(5) /* Slave monitor mode bit */
#define CDNS_I2C_CR_HOLD BIT(4) /* Hold Bus bit */
#define CDNS_I2C_CR_ACK_EN BIT(3)
#define CDNS_I2C_CR_NEA BIT(2)
@@ -40,9 +45,16 @@
#define CDNS_I2C_CR_DIVB_SHIFT 8
#define CDNS_I2C_CR_DIVB_MASK (0x3f << CDNS_I2C_CR_DIVB_SHIFT)
+#define CDNS_I2C_CR_SLAVE_EN_MASK (CDNS_I2C_CR_CLR_FIFO | \
+ CDNS_I2C_CR_NEA | \
+ CDNS_I2C_CR_ACK_EN | \
+ CDNS_I2C_CR_MS)
+
/* Status Register Bit mask definitions */
#define CDNS_I2C_SR_BA BIT(8)
+#define CDNS_I2C_SR_TXDV BIT(6)
#define CDNS_I2C_SR_RXDV BIT(5)
+#define CDNS_I2C_SR_RXRW BIT(3)
/*
* I2C Address Register Bit mask definitions
@@ -91,6 +103,14 @@
CDNS_I2C_IXR_DATA | \
CDNS_I2C_IXR_COMP)
+#define CDNS_I2C_IXR_SLAVE_INTR_MASK (CDNS_I2C_IXR_RX_UNF | \
+ CDNS_I2C_IXR_TX_OVF | \
+ CDNS_I2C_IXR_RX_OVF | \
+ CDNS_I2C_IXR_TO | \
+ CDNS_I2C_IXR_NACK | \
+ CDNS_I2C_IXR_DATA | \
+ CDNS_I2C_IXR_COMP)
+
#define CDNS_I2C_TIMEOUT msecs_to_jiffies(1000)
/* timeout for pm runtime autosuspend */
#define CNDS_I2C_PM_TIMEOUT 1000 /* ms */
@@ -117,6 +137,32 @@
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+/**
+ * enum cdns_i2c_mode - I2C Controller current operating mode
+ *
+ * @CDNS_I2C_MODE_SLAVE: I2C controller operating in slave mode
+ * @CDNS_I2C_MODE_MASTER: I2C Controller operating in master mode
+ */
+enum cdns_i2c_mode {
+ CDNS_I2C_MODE_SLAVE,
+ CDNS_I2C_MODE_MASTER,
+};
+
+/**
+ * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode
+ *
+ * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle
+ * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master
+ * @CDNS_I2C_SLAVE_STATE_RECV: I2C slave receiving data from master
+ */
+enum cdns_i2c_slave_state {
+ CDNS_I2C_SLAVE_STATE_IDLE,
+ CDNS_I2C_SLAVE_STATE_SEND,
+ CDNS_I2C_SLAVE_STATE_RECV,
+};
+#endif
+
/**
* struct cdns_i2c - I2C device private data structure
*
@@ -138,6 +184,14 @@
* @clk: Pointer to struct clk
* @clk_rate_change_nb: Notifier block for clock rate changes
* @quirks: flag for broken hold bit usage in r1p10
+ * @ctrl_reg: Cached value of the control register.
+ * @rinfo: Structure holding recovery information.
+ * @pinctrl: Pin control state holder.
+ * @pinctrl_pins_default: Default pin control state.
+ * @pinctrl_pins_gpio: GPIO pin control state.
+ * @slave: Registered slave instance.
+ * @dev_mode: I2C operating role(master/slave).
+ * @slave_state: I2C Slave state(idle/read/write).
*/
struct cdns_i2c {
struct device *dev;
@@ -158,6 +212,16 @@ struct cdns_i2c {
struct clk *clk;
struct notifier_block clk_rate_change_nb;
u32 quirks;
+ u32 ctrl_reg;
+ struct i2c_bus_recovery_info rinfo;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_pins_default;
+ struct pinctrl_state *pinctrl_pins_gpio;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ struct i2c_client *slave;
+ enum cdns_i2c_mode dev_mode;
+ enum cdns_i2c_slave_state slave_state;
+#endif
};
struct cdns_platform_data {
@@ -186,17 +250,148 @@ static inline bool cdns_is_holdquirk(struct cdns_i2c *id, bool hold_wrkaround)
(id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1));
}
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static void cdns_i2c_set_mode(enum cdns_i2c_mode mode, struct cdns_i2c *id)
+{
+ u32 reg;
+
+ /* Disable all interrupts */
+ cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET);
+
+ /* Clear FIFO and transfer size */
+ cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET);
+
+ /* Update device mode and state */
+ id->dev_mode = mode;
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+
+ switch (mode) {
+ case CDNS_I2C_MODE_MASTER:
+ /* Enable i2c master */
+ cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+ break;
+ case CDNS_I2C_MODE_SLAVE:
+ /* Enable i2c slave */
+ reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ reg &= ~CDNS_I2C_CR_SLAVE_EN_MASK;
+ cdns_i2c_writereg(reg, CDNS_I2C_CR_OFFSET);
+
+ /* Setting slave address */
+ cdns_i2c_writereg(id->slave->addr & CDNS_I2C_ADDR_MASK,
+ CDNS_I2C_ADDR_OFFSET);
+
+ /* Enable slave send/receive interrupts */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLAVE_INTR_MASK,
+ CDNS_I2C_IER_OFFSET);
+ break;
+ }
+}
+
+static void cdns_i2c_slave_rcv_data(struct cdns_i2c *id)
+{
+ u8 bytes;
+ unsigned char data;
+
+ /* Prepare backend for data reception */
+ if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_RECV;
+ i2c_slave_event(id->slave, I2C_SLAVE_WRITE_REQUESTED, NULL);
+ }
+
+ /* Fetch number of bytes to receive */
+ bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+
+ /* Read data and send to backend */
+ while (bytes--) {
+ data = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+ i2c_slave_event(id->slave, I2C_SLAVE_WRITE_RECEIVED, &data);
+ }
+}
+
+static void cdns_i2c_slave_send_data(struct cdns_i2c *id)
+{
+ u8 data;
+
+ /* Prepare backend for data transmission */
+ if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_SEND;
+ i2c_slave_event(id->slave, I2C_SLAVE_READ_REQUESTED, &data);
+ } else {
+ i2c_slave_event(id->slave, I2C_SLAVE_READ_PROCESSED, &data);
+ }
+
+ /* Send data over bus */
+ cdns_i2c_writereg(data, CDNS_I2C_DATA_OFFSET);
+}
+
/**
- * cdns_i2c_isr - Interrupt handler for the I2C device
- * @irq: irq number for the I2C device
- * @ptr: void pointer to cdns_i2c structure
+ * cdns_i2c_slave_isr - Interrupt handler for the I2C device in slave role
+ * @ptr: Pointer to I2C device private data
+ *
+ * This function handles the data interrupt and transfer complete interrupt of
+ * the I2C device in slave role.
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t cdns_i2c_slave_isr(void *ptr)
+{
+ struct cdns_i2c *id = ptr;
+ unsigned int isr_status, i2c_status;
+
+ /* Fetch the interrupt status */
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ /* Ignore masked interrupts */
+ isr_status &= ~cdns_i2c_readreg(CDNS_I2C_IMR_OFFSET);
+
+ /* Fetch transfer mode (send/receive) */
+ i2c_status = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET);
+
+ /* Handle data send/receive */
+ if (i2c_status & CDNS_I2C_SR_RXRW) {
+ /* Send data to master */
+ if (isr_status & CDNS_I2C_IXR_DATA)
+ cdns_i2c_slave_send_data(id);
+
+ if (isr_status & CDNS_I2C_IXR_COMP) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ }
+ } else {
+ /* Receive data from master */
+ if (isr_status & CDNS_I2C_IXR_DATA)
+ cdns_i2c_slave_rcv_data(id);
+
+ if (isr_status & CDNS_I2C_IXR_COMP) {
+ cdns_i2c_slave_rcv_data(id);
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ }
+ }
+
+ /* Master indicated xfer stop or fifo underflow/overflow */
+ if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_RX_OVF |
+ CDNS_I2C_IXR_RX_UNF | CDNS_I2C_IXR_TX_OVF)) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET);
+ }
+
+ return IRQ_HANDLED;
+}
+#endif
+
+/**
+ * cdns_i2c_master_isr - Interrupt handler for the I2C device in master role
+ * @ptr: Pointer to I2C device private data
*
* This function handles the data interrupt, transfer complete interrupt and
- * the error interrupts of the I2C device.
+ * the error interrupts of the I2C device in master role.
*
* Return: IRQ_HANDLED always
*/
-static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
+static irqreturn_t cdns_i2c_master_isr(void *ptr)
{
unsigned int isr_status, avail_bytes;
unsigned int bytes_to_send;
@@ -321,6 +516,23 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
status = IRQ_HANDLED;
}
+ /* Handling Slave monitor mode interrupt */
+ if (isr_status & CDNS_I2C_IXR_SLV_RDY) {
+ unsigned int ctrl_reg;
+ /* Read control register */
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+
+ /* Disable slave monitor mode */
+ ctrl_reg &= ~CDNS_I2C_CR_SLVMON;
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Clear interrupt flag for slvmon mode */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLV_RDY, CDNS_I2C_IDR_OFFSET);
+
+ done_flag = 1;
+ status = IRQ_HANDLED;
+ }
+
/* Update the status for errors */
id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
if (id->err_status)
@@ -333,6 +545,40 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
}
/**
+ * cdns_i2c_isr - Interrupt handler for the I2C device
+ * @irq: irq number for the I2C device
+ * @ptr: void pointer to cdns_i2c structure
+ *
+ * This function passes the control to slave/master based on current role of
+ * i2c controller.
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
+{
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ struct cdns_i2c *id = ptr;
+
+ switch (id->dev_mode) {
+ case CDNS_I2C_MODE_SLAVE:
+ dev_dbg(&id->adap.dev, "slave interrupt\n");
+ cdns_i2c_slave_isr(ptr);
+ break;
+ case CDNS_I2C_MODE_MASTER:
+ dev_dbg(&id->adap.dev, "master interrupt\n");
+ cdns_i2c_master_isr(ptr);
+ break;
+ default:
+ dev_dbg(&id->adap.dev, "undefined interrupt\n");
+ break;
+ }
+#else
+ cdns_i2c_master_isr(ptr);
+#endif
+ return IRQ_HANDLED;
+}
+
+/**
* cdns_i2c_mrecv - Prepare and start a master receive operation
* @id: pointer to the i2c device structure
*/
@@ -340,6 +586,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
{
unsigned int ctrl_reg;
unsigned int isr_status;
+ unsigned long flags;
id->p_recv_buf = id->p_msg->buf;
id->recv_count = id->p_msg->len;
@@ -348,8 +595,13 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
ctrl_reg |= CDNS_I2C_CR_RW | CDNS_I2C_CR_CLR_FIFO;
+ /*
+ * Receive up to I2C_SMBUS_BLOCK_MAX data bytes, plus one message length
+ * byte, plus one checksum byte if PEC is enabled. p_msg->len will be 2 if
+ * PEC is enabled, otherwise 1.
+ */
if (id->p_msg->flags & I2C_M_RECV_LEN)
- id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
+ id->recv_count = I2C_SMBUS_BLOCK_MAX + id->p_msg->len;
id->curr_recv_count = id->recv_count;
@@ -357,8 +609,12 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+ if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) {
ctrl_reg |= CDNS_I2C_CR_HOLD;
+ } else {
+ if (id->p_msg->flags & I2C_M_NOSTART)
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+ }
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -381,6 +637,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
}
/* Set the slave address in address register - triggers operation */
+ cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+ local_irq_save(flags);
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
/* Clear the bus hold flag if bytes to receive is less than FIFO size */
@@ -388,7 +646,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
(id->recv_count <= CDNS_I2C_FIFO_DEPTH))
cdns_i2c_clear_bus_hold(id);
- cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+ local_irq_restore(flags);
}
/**
@@ -448,10 +706,43 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
if (!id->bus_hold_flag && !id->send_count)
cdns_i2c_clear_bus_hold(id);
/* Set the slave address in address register - triggers operation. */
+ cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
+}
- cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+/**
+ * cdns_i2c_slvmon - Handling Slav monitor mode feature
+ * @id: pointer to the i2c device
+ */
+static void cdns_i2c_slvmon(struct cdns_i2c *id)
+{
+ unsigned int ctrl_reg;
+ unsigned int isr_status;
+
+ id->p_recv_buf = NULL;
+ id->p_send_buf = id->p_msg->buf;
+ id->send_count = id->p_msg->len;
+
+ /* Clear the interrupts in interrupt status register. */
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ /* Enable slvmon control reg */
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg |= CDNS_I2C_CR_MS | CDNS_I2C_CR_NEA | CDNS_I2C_CR_SLVMON
+ | CDNS_I2C_CR_CLR_FIFO;
+ ctrl_reg &= ~(CDNS_I2C_CR_RW);
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Initialize slvmon reg */
+ cdns_i2c_writereg(0xF, CDNS_I2C_SLV_PAUSE_OFFSET);
+
+ /* Set the slave address to start the slave address transmission */
+ cdns_i2c_writereg(id->p_msg->addr, CDNS_I2C_ADDR_OFFSET);
+
+ /* Setup slvmon interrupt flag */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLV_RDY, CDNS_I2C_IER_OFFSET);
}
/**
@@ -470,12 +761,12 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET);
/* Clear the hold bit and fifos */
regval = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
- regval &= ~CDNS_I2C_CR_HOLD;
+ regval &= ~(CDNS_I2C_CR_HOLD | CDNS_I2C_CR_SLVMON);
regval |= CDNS_I2C_CR_CLR_FIFO;
cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET);
/* Update the transfercount register to zero */
cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET);
- /* Clear the interupt status register */
+ /* Clear the interrupt status register */
regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET);
/* Clear the status register */
@@ -504,9 +795,11 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
cdns_i2c_writereg(reg | CDNS_I2C_CR_NEA,
CDNS_I2C_CR_OFFSET);
}
-
- /* Check for the R/W flag on each msg */
- if (msg->flags & I2C_M_RD)
+ /* Check for zero length - Slave monitor mode */
+ if (msg->len == 0)
+ cdns_i2c_slvmon(id);
+ /* Check for the R/W flag on each msg */
+ else if (msg->flags & I2C_M_RD)
cdns_i2c_mrecv(id);
else
cdns_i2c_msend(id);
@@ -522,6 +815,7 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
/* Wait for the signal of completion */
time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
if (time_left == 0) {
+ i2c_recover_bus(adap);
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,
"timeout waiting on completion\n");
@@ -535,6 +829,9 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
if (id->err_status & CDNS_I2C_IXR_ARB_LOST)
return -EAGAIN;
+ if (msg->flags & I2C_M_RECV_LEN)
+ msg->len += min_t(unsigned int, msg->buf[0], I2C_SMBUS_BLOCK_MAX);
+
return 0;
}
@@ -555,15 +852,34 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
u32 reg;
struct cdns_i2c *id = adap->algo_data;
bool hold_quirk;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ bool change_role = false;
+#endif
ret = pm_runtime_get_sync(id->dev);
if (ret < 0)
return ret;
- /* Check if the bus is free */
- if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) {
- ret = -EAGAIN;
- goto out;
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* Check i2c operating mode and switch if possible */
+ if (id->dev_mode == CDNS_I2C_MODE_SLAVE) {
+ if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE)
+ return -EAGAIN;
+
+ /* Set mode to master */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
+
+ /* Mark flag to change role once xfer is completed */
+ change_role = true;
}
+#endif
+
+ /* Check if the bus is free */
+ if (msgs->len)
+ if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) {
+ ret = -EAGAIN;
+ goto out;
+ }
hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT);
/*
@@ -617,6 +933,13 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
ret = num;
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* Switch i2c mode to slave */
+ if (change_role)
+ cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id);
+#endif
+
out:
pm_runtime_mark_last_busy(id->dev);
pm_runtime_put_autosuspend(id->dev);
@@ -631,14 +954,67 @@ out:
*/
static u32 cdns_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
- (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
- I2C_FUNC_SMBUS_BLOCK_DATA;
+ u32 func = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_BLOCK_DATA;
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ func |= I2C_FUNC_SLAVE;
+#endif
+
+ return func;
}
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static int cdns_reg_slave(struct i2c_client *slave)
+{
+ int ret;
+ struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c,
+ adap);
+
+ if (id->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ ret = pm_runtime_get_sync(id->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Store slave information */
+ id->slave = slave;
+
+ /* Enable I2C slave */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id);
+
+ return 0;
+}
+
+static int cdns_unreg_slave(struct i2c_client *slave)
+{
+ struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c,
+ adap);
+
+ pm_runtime_put(id->dev);
+
+ /* Remove slave information */
+ id->slave = NULL;
+
+ /* Enable I2C master */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
+
+ return 0;
+}
+#endif
+
static const struct i2c_algorithm cdns_i2c_algo = {
.master_xfer = cdns_i2c_master_xfer,
.functionality = cdns_i2c_func,
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ .reg_slave = cdns_reg_slave,
+ .unreg_slave = cdns_unreg_slave,
+#endif
};
/**
@@ -728,12 +1104,11 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
if (ret)
return ret;
- ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg = id->ctrl_reg;
ctrl_reg &= ~(CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK);
ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) |
(div_b << CDNS_I2C_CR_DIVB_SHIFT));
- cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
-
+ id->ctrl_reg = ctrl_reg;
return 0;
}
@@ -817,6 +1192,26 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
}
/**
+ * cdns_i2c_init - Controller initialisation
+ * @id: Device private data structure
+ *
+ * Initialise the i2c controller.
+ *
+ */
+static void cdns_i2c_init(struct cdns_i2c *id)
+{
+ cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+ /*
+ * Cadence I2C controller has a bug wherein it generates
+ * invalid read transaction after HW timeout in master receiver mode.
+ * HW timeout is not used by this driver and the interrupt is disabled.
+ * But the feature itself cannot be disabled. Hence maximum value
+ * is written to this register to reduce the chances of error.
+ */
+ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+}
+
+/**
* cdns_i2c_runtime_resume - Runtime resume
* @dev: Address of the platform_device structure
*
@@ -834,6 +1229,89 @@ static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev)
dev_err(dev, "Cannot enable clock.\n");
return ret;
}
+ cdns_i2c_init(xi2c);
+
+ return 0;
+}
+
+/**
+ * cdns_i2c_prepare_recovery - Withhold recovery state
+ * @adapter: Pointer to i2c adapter
+ *
+ * This function is called to prepare for recovery.
+ * It changes the state of pins from SCL/SDA to GPIO.
+ */
+static void cdns_i2c_prepare_recovery(struct i2c_adapter *adapter)
+{
+ struct cdns_i2c *p_cdns_i2c;
+
+ p_cdns_i2c = container_of(adapter, struct cdns_i2c, adap);
+
+ /* Setting pin state as gpio */
+ pinctrl_select_state(p_cdns_i2c->pinctrl,
+ p_cdns_i2c->pinctrl_pins_gpio);
+}
+
+/**
+ * cdns_i2c_unprepare_recovery - Release recovery state
+ * @adapter: Pointer to i2c adapter
+ *
+ * This function is called on exiting recovery. It reverts
+ * the state of pins from GPIO to SCL/SDA.
+ */
+static void cdns_i2c_unprepare_recovery(struct i2c_adapter *adapter)
+{
+ struct cdns_i2c *p_cdns_i2c;
+
+ p_cdns_i2c = container_of(adapter, struct cdns_i2c, adap);
+
+ /* Setting pin state to default(i2c) */
+ pinctrl_select_state(p_cdns_i2c->pinctrl,
+ p_cdns_i2c->pinctrl_pins_default);
+}
+
+/**
+ * cdns_i2c_init_recovery_info - Initialize I2C bus recovery
+ * @pid: Pointer to cdns i2c structure
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does required initialization for i2c bus
+ * recovery. It registers three functions for prepare,
+ * recover and unprepare
+ *
+ * Return: 0 on Success, negative error otherwise.
+ */
+static int cdns_i2c_init_recovery_info(struct cdns_i2c *pid,
+ struct platform_device *pdev)
+{
+ struct i2c_bus_recovery_info *rinfo = &pid->rinfo;
+
+ pid->pinctrl_pins_default = pinctrl_lookup_state(pid->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ pid->pinctrl_pins_gpio = pinctrl_lookup_state(pid->pinctrl, "gpio");
+
+ /* Fetches GPIO pins */
+ rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda-gpios", 0);
+ rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl-gpios", 0);
+
+ /* if GPIO driver isn't ready yet, deffer probe */
+ if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
+ PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* Validates fetched information */
+ if (IS_ERR(rinfo->sda_gpiod) ||
+ IS_ERR(rinfo->scl_gpiod) ||
+ IS_ERR(pid->pinctrl_pins_default) ||
+ IS_ERR(pid->pinctrl_pins_gpio)) {
+ dev_dbg(&pdev->dev, "recovery information incomplete\n");
+ return 0;
+ }
+
+ rinfo->prepare_recovery = cdns_i2c_prepare_recovery;
+ rinfo->unprepare_recovery = cdns_i2c_unprepare_recovery;
+ rinfo->recover_bus = i2c_generic_scl_recovery;
+ pid->adap.bus_recovery_info = rinfo;
return 0;
}
@@ -884,6 +1362,13 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->quirks = data->quirks;
}
+ id->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(id->pinctrl)) {
+ ret = cdns_i2c_init_recovery_info(id, pdev);
+ if (ret)
+ return ret;
+ }
+
r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
id->membase = devm_ioremap_resource(&pdev->dev, r_mem);
if (IS_ERR(id->membase))
@@ -914,10 +1399,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (ret)
dev_err(&pdev->dev, "Unable to enable clock.\n");
- pm_runtime_enable(id->dev);
pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(id->dev);
pm_runtime_set_active(id->dev);
+ pm_runtime_enable(id->dev);
id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb;
if (clk_notifier_register(id->clk, &id->clk_rate_change_nb))
@@ -929,8 +1414,12 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX))
id->i2c_clk = CDNS_I2C_SPEED_DEFAULT;
- cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS,
- CDNS_I2C_CR_OFFSET);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* Set initial mode to master */
+ id->dev_mode = CDNS_I2C_MODE_MASTER;
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+#endif
+ id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS;
ret = cdns_i2c_setclk(id->input_clk, id);
if (ret) {
@@ -946,14 +1435,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
goto err_clk_dis;
}
- /*
- * Cadence I2C controller has a bug wherein it generates
- * invalid read transaction after HW timeout in master receiver mode.
- * HW timeout is not used by this driver and the interrupt is disabled.
- * But the feature itself cannot be disabled. Hence maximum value
- * is written to this register to reduce the chances of error.
- */
- cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+ cdns_i2c_init(id);
ret = i2c_add_adapter(&id->adap);
if (ret < 0)
@@ -967,8 +1449,8 @@ static int cdns_i2c_probe(struct platform_device *pdev)
err_clk_dis:
clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
clk_disable_unprepare(id->clk);
- pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return ret;
}
@@ -984,10 +1466,13 @@ static int cdns_i2c_remove(struct platform_device *pdev)
{
struct cdns_i2c *id = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
i2c_del_adapter(&id->adap);
clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
clk_disable_unprepare(id->clk);
- pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 4e6d0b722ddc..18489940a947 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -528,12 +528,13 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
return -EOPNOTSUPP;
}
- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
+ /* Set block buffer mode */
+ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
- /* Use 32-byte buffer to process this transaction */
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
outb_p(len, SMBHSTDAT0(priv));
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
outb_p(data->block[i+1], SMBBLKDAT(priv));
}
@@ -549,6 +550,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
return -EPROTO;
data->block[0] = len;
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
data->block[i + 1] = inb_p(SMBBLKDAT(priv));
}
@@ -723,15 +725,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
return i801_check_post(priv, status);
}
- for (i = 1; i <= len; i++) {
- if (i == len && read_write == I2C_SMBUS_READ)
- smbcmd |= SMBHSTCNT_LAST_BYTE;
- outb_p(smbcmd, SMBHSTCNT(priv));
-
- if (i == 1)
- outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
- SMBHSTCNT(priv));
+ if (len == 1 && read_write == I2C_SMBUS_READ)
+ smbcmd |= SMBHSTCNT_LAST_BYTE;
+ outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+ for (i = 1; i <= len; i++) {
status = i801_wait_byte_done(priv);
if (status)
goto exit;
@@ -754,9 +752,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
data->block[0] = len;
}
- /* Retrieve/store value in SMBBLKDAT */
- if (read_write == I2C_SMBUS_READ)
+ if (read_write == I2C_SMBUS_READ) {
data->block[i] = inb_p(SMBBLKDAT(priv));
+ if (i == len - 1)
+ outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
+ }
+
if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
outb_p(data->block[i+1], SMBBLKDAT(priv));
@@ -769,14 +770,6 @@ exit:
return i801_check_post(priv, status);
}
-static int i801_set_block_buffer_mode(struct i801_priv *priv)
-{
- outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
- if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
- return -EIO;
- return 0;
-}
-
/* Block transaction function */
static int i801_block_transaction(struct i801_priv *priv,
union i2c_smbus_data *data, char read_write,
@@ -806,9 +799,8 @@ static int i801_block_transaction(struct i801_priv *priv,
/* Experience has shown that the block buffer can only be used for
SMBus (not I2C) block transactions, even though the datasheet
doesn't mention this limitation. */
- if ((priv->features & FEATURE_BLOCK_BUFFER)
- && command != I2C_SMBUS_I2C_BLOCK_DATA
- && i801_set_block_buffer_mode(priv) == 0)
+ if ((priv->features & FEATURE_BLOCK_BUFFER) &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA)
result = i801_block_transaction_by_block(priv, data,
read_write,
command, hwpec);
@@ -1253,6 +1245,7 @@ static const struct {
* Additional individual entries were added after verification.
*/
{ "Vostro V131", 0x1d },
+ { "Vostro 5568", 0x29 },
};
static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
@@ -1872,6 +1865,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"SMBus I801 adapter at %04lx", priv->smba);
err = i2c_add_adapter(&priv->adapter);
if (err) {
+ platform_device_unregister(priv->tco_pdev);
i801_acpi_remove(priv);
return err;
}
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index a0d045c1bc9e..89faef6f013b 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -206,8 +206,8 @@ static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
/* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
{
- u8 prescale, filt, sethold, clkhi, clklo, datavd;
- unsigned int clk_rate, clk_cycle;
+ u8 prescale, filt, sethold, datavd;
+ unsigned int clk_rate, clk_cycle, clkhi, clklo;
enum lpi2c_imx_pincfg pincfg;
unsigned int temp;
@@ -468,6 +468,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
if (num == 1 && msgs[0].len == 0)
goto stop;
+ lpi2c_imx->rx_buf = NULL;
+ lpi2c_imx->tx_buf = NULL;
lpi2c_imx->delivered = 0;
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);
@@ -508,10 +510,14 @@ disable:
static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+ unsigned int enabled;
unsigned int temp;
+ enabled = readl(lpi2c_imx->base + LPI2C_MIER);
+
lpi2c_imx_intctrl(lpi2c_imx, 0);
temp = readl(lpi2c_imx->base + LPI2C_MSR);
+ temp &= enabled;
if (temp & MSR_RDF)
lpi2c_imx_read_rxfifo(lpi2c_imx);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 53325419ec13..e69f79246606 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -506,6 +506,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
if (read_write == I2C_SMBUS_WRITE) {
/* Block Write */
dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n");
+ if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
dma_size = data->block[0] + 1;
dma_direction = DMA_TO_DEVICE;
desc->wr_len_cmd = dma_size;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ca8b3ecfa93d..1c3595c8a761 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -343,18 +343,18 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
* ocores_isr(), we just add our polling code around it.
*
* It can run in atomic context
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout
*/
-static void ocores_process_polling(struct ocores_i2c *i2c)
+static int ocores_process_polling(struct ocores_i2c *i2c)
{
- while (1) {
- irqreturn_t ret;
- int err;
+ irqreturn_t ret;
+ int err = 0;
+ while (1) {
err = ocores_poll_wait(i2c);
- if (err) {
- i2c->state = STATE_ERROR;
+ if (err)
break; /* timeout */
- }
ret = ocores_isr(-1, i2c);
if (ret == IRQ_NONE)
@@ -365,13 +365,15 @@ static void ocores_process_polling(struct ocores_i2c *i2c)
break;
}
}
+
+ return err;
}
static int ocores_xfer_core(struct ocores_i2c *i2c,
struct i2c_msg *msgs, int num,
bool polling)
{
- int ret;
+ int ret = 0;
u8 ctrl;
ctrl = oc_getreg(i2c, OCI2C_CONTROL);
@@ -389,15 +391,16 @@ static int ocores_xfer_core(struct ocores_i2c *i2c,
oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START);
if (polling) {
- ocores_process_polling(i2c);
+ ret = ocores_process_polling(i2c);
} else {
- ret = wait_event_timeout(i2c->wait,
- (i2c->state == STATE_ERROR) ||
- (i2c->state == STATE_DONE), HZ);
- if (ret == 0) {
- ocores_process_timeout(i2c);
- return -ETIMEDOUT;
- }
+ if (wait_event_timeout(i2c->wait,
+ (i2c->state == STATE_ERROR) ||
+ (i2c->state == STATE_DONE), HZ) == 0)
+ ret = -ETIMEDOUT;
+ }
+ if (ret) {
+ ocores_process_timeout(i2c);
+ return ret;
}
return (i2c->state == STATE_DONE) ? num : -EIO;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index c36522849325..a788f2e70677 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1058,7 +1058,7 @@ omap_i2c_isr(int irq, void *dev_id)
u16 stat;
stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
if (stat & mask)
ret = IRQ_WAKE_THREAD;
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index f614cade432b..30e38bc8b6db 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -105,7 +105,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
int i;
struct ce4100_devices *sds;
- ret = pci_enable_device_mem(dev);
+ ret = pcim_enable_device(dev);
if (ret)
return ret;
@@ -114,10 +114,8 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
return -EINVAL;
}
sds = kzalloc(sizeof(*sds), GFP_KERNEL);
- if (!sds) {
- ret = -ENOMEM;
- goto err_mem;
- }
+ if (!sds)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
sds->pdev[i] = add_i2c_device(dev, i);
@@ -133,8 +131,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
err_dev_add:
kfree(sds);
-err_mem:
- pci_disable_device(dev);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 1107a5e7229e..74dd378446c1 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -79,7 +79,7 @@ enum {
#define DEFAULT_SCL_RATE (100 * 1000) /* Hz */
/**
- * struct i2c_spec_values:
+ * struct i2c_spec_values - I2C specification values for various modes
* @min_hold_start_ns: min hold time (repeated) START condition
* @min_low_ns: min LOW period of the SCL clock
* @min_high_ns: min HIGH period of the SCL cloc
@@ -135,7 +135,7 @@ static const struct i2c_spec_values fast_mode_plus_spec = {
};
/**
- * struct rk3x_i2c_calced_timings:
+ * struct rk3x_i2c_calced_timings - calculated V1 timings
* @div_low: Divider output for low
* @div_high: Divider output for high
* @tuning: Used to adjust setup/hold data time,
@@ -158,7 +158,7 @@ enum rk3x_i2c_state {
};
/**
- * struct rk3x_i2c_soc_data:
+ * struct rk3x_i2c_soc_data - SOC-specific data
* @grf_offset: offset inside the grf regmap for setting the i2c type
* @calc_timings: Callback function for i2c timing information calculated
*/
@@ -238,7 +238,8 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
}
/**
- * Generate a START condition, which triggers a REG_INT_START interrupt.
+ * rk3x_i2c_start - Generate a START condition, which triggers a REG_INT_START interrupt.
+ * @i2c: target controller data
*/
static void rk3x_i2c_start(struct rk3x_i2c *i2c)
{
@@ -257,8 +258,8 @@ static void rk3x_i2c_start(struct rk3x_i2c *i2c)
}
/**
- * Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
- *
+ * rk3x_i2c_stop - Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
+ * @i2c: target controller data
* @error: Error code to return in rk3x_i2c_xfer
*/
static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
@@ -297,7 +298,8 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
}
/**
- * Setup a read according to i2c->msg
+ * rk3x_i2c_prepare_read - Setup a read according to i2c->msg
+ * @i2c: target controller data
*/
static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
{
@@ -328,7 +330,8 @@ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
}
/**
- * Fill the transmit buffer with data from i2c->msg
+ * rk3x_i2c_fill_transmit_buf - Fill the transmit buffer with data from i2c->msg
+ * @i2c: target controller data
*/
static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
{
@@ -415,7 +418,7 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
{
unsigned int i;
unsigned int len = i2c->msg->len - i2c->processed;
- u32 uninitialized_var(val);
+ u32 val;
u8 byte;
/* we only care for MBRF here. */
@@ -531,11 +534,10 @@ out:
}
/**
- * Get timing values of I2C specification
- *
+ * rk3x_i2c_get_spec - Get timing values of I2C specification
* @speed: Desired SCL frequency
*
- * Returns: Matched i2c spec values.
+ * Return: Matched i2c_spec_values.
*/
static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
{
@@ -548,13 +550,12 @@ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
}
/**
- * Calculate divider values for desired SCL frequency
- *
+ * rk3x_i2c_v0_calc_timings - Calculate divider values for desired SCL frequency
* @clk_rate: I2C input clock rate
* @t: Known I2C timing information
* @t_calc: Caculated rk3x private timings that would be written into regs
*
- * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
+ * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
* a best-effort divider value is returned in divs. If the target rate is
* too high, we silently use the highest possible rate.
*/
@@ -709,13 +710,12 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
}
/**
- * Calculate timing values for desired SCL frequency
- *
+ * rk3x_i2c_v1_calc_timings - Calculate timing values for desired SCL frequency
* @clk_rate: I2C input clock rate
* @t: Known I2C timing information
* @t_calc: Caculated rk3x private timings that would be written into regs
*
- * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
+ * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
* a best-effort divider value is returned in divs. If the target rate is
* too high, we silently use the highest possible rate.
* The following formulas are v1's method to calculate timings.
@@ -959,14 +959,14 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
}
/**
- * Setup I2C registers for an I2C operation specified by msgs, num.
- *
- * Must be called with i2c->lock held.
- *
+ * rk3x_i2c_setup - Setup I2C registers for an I2C operation specified by msgs, num.
+ * @i2c: target controller data
* @msgs: I2C msgs to process
* @num: Number of msgs
*
- * returns: Number of I2C msgs processed or negative in case of error
+ * Must be called with i2c->lock held.
+ *
+ * Return: Number of I2C msgs processed or negative in case of error
*/
static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
{
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index e6f927c6f8af..7402f71dd24d 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -223,8 +223,17 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
int tries;
for (tries = 50; tries; --tries) {
- if (readl(i2c->regs + S3C2410_IICCON)
- & S3C2410_IICCON_IRQPEND) {
+ unsigned long tmp = readl(i2c->regs + S3C2410_IICCON);
+
+ if (!(tmp & S3C2410_IICCON_ACKEN)) {
+ /*
+ * Wait a bit for the bus to stabilize,
+ * delay estimated experimentally.
+ */
+ usleep_range(100, 200);
+ return true;
+ }
+ if (tmp & S3C2410_IICCON_IRQPEND) {
if (!(readl(i2c->regs + S3C2410_IICSTAT)
& S3C2410_IICSTAT_LASTBIT))
return true;
@@ -277,16 +286,6 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
stat |= S3C2410_IICSTAT_START;
writel(stat, i2c->regs + S3C2410_IICSTAT);
-
- if (i2c->quirks & QUIRK_POLL) {
- while ((i2c->msg_num != 0) && is_ack(i2c)) {
- i2c_s3c_irq_nextbyte(i2c, stat);
- stat = readl(i2c->regs + S3C2410_IICSTAT);
-
- if (stat & S3C2410_IICSTAT_ARBITR)
- dev_err(i2c->dev, "deal with arbitration loss\n");
- }
- }
}
static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
@@ -694,7 +693,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long timeout;
+ unsigned long timeout = 0;
int ret;
ret = s3c24xx_i2c_set_master(i2c);
@@ -714,16 +713,19 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
s3c24xx_i2c_message_start(i2c, msgs);
if (i2c->quirks & QUIRK_POLL) {
- ret = i2c->msg_idx;
+ while ((i2c->msg_num != 0) && is_ack(i2c)) {
+ unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT);
- if (ret != num)
- dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
+ i2c_s3c_irq_nextbyte(i2c, stat);
- goto out;
+ stat = readl(i2c->regs + S3C2410_IICSTAT);
+ if (stat & S3C2410_IICSTAT_ARBITR)
+ dev_err(i2c->dev, "deal with arbitration loss\n");
+ }
+ } else {
+ timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
}
- timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
-
ret = i2c->msg_idx;
/*
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 92ba0183fd8a..ef0dc06a3778 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -577,12 +577,14 @@ static int sprd_i2c_remove(struct platform_device *pdev)
struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
int ret;
- ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ ret = pm_runtime_get_sync(i2c_dev->dev);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev, "Failed to resume device (%pe)\n", ERR_PTR(ret));
i2c_del_adapter(&i2c_dev->adap);
- clk_disable_unprepare(i2c_dev->clk);
+
+ if (ret >= 0)
+ clk_disable_unprepare(i2c_dev->clk);
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 0f0c41174ddd..74dd2c15e34c 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -974,9 +974,10 @@ static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
/* Configure PEC */
if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) {
cr1 |= STM32F7_I2C_CR1_PECEN;
- cr2 |= STM32F7_I2C_CR2_PECBYTE;
- if (!f7_msg->read_write)
+ if (!f7_msg->read_write) {
+ cr2 |= STM32F7_I2C_CR2_PECBYTE;
f7_msg->count++;
+ }
} else {
cr1 &= ~STM32F7_I2C_CR1_PECEN;
cr2 &= ~STM32F7_I2C_CR2_PECBYTE;
@@ -1064,8 +1065,10 @@ static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev)
f7_msg->stop = true;
/* Add one byte for PEC if needed */
- if (cr1 & STM32F7_I2C_CR1_PECEN)
+ if (cr1 & STM32F7_I2C_CR1_PECEN) {
+ cr2 |= STM32F7_I2C_CR2_PECBYTE;
f7_msg->count++;
+ }
/* Set number of bytes to be transferred */
cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK);
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index 7c07ce116e38..540c33f4e350 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -202,6 +202,11 @@ static int p2wi_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (clk_freq == 0) {
+ dev_err(dev, "clock-frequency is set to 0 in DT\n");
+ return -EINVAL;
+ }
+
if (of_get_child_count(np) > 1) {
dev_err(dev, "P2WI only supports one slave device\n");
return -EINVAL;
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 63cbb9c7c1b0..76e9dcd63856 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -308,6 +308,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
u32 msg[3];
int rc;
+ if (writelen > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
memcpy(ctx->dma_buffer, data, writelen);
paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
DMA_TO_DEVICE);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index a48bee59dcde..6ce041ed693e 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -46,6 +46,7 @@ enum xiic_endian {
/**
* struct xiic_i2c - Internal representation of the XIIC I2C bus
+ * @dev: Pointer to device structure
* @base: Memory base of the HW registers
* @wait: Wait queue for callers
* @adap: Kernel adapter representation
@@ -57,6 +58,7 @@ enum xiic_endian {
* @rx_msg: Current RX message
* @rx_pos: Position within current RX message
* @endianness: big/little-endian byte order
+ * @clk: Pointer to struct clk
*/
struct xiic_i2c {
struct device *dev;
@@ -154,6 +156,8 @@ struct xiic_i2c {
#define XIIC_RESET_MASK 0xAUL
#define XIIC_PM_TIMEOUT 1000 /* ms */
+/* timeout waiting for the controller to respond */
+#define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000))
/*
* The following constant is used for the device global interrupt enable
* register, to enable all interrupts for the device, this is the only bit
@@ -164,7 +168,7 @@ struct xiic_i2c {
#define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
#define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
-static void xiic_start_xfer(struct xiic_i2c *i2c);
+static int xiic_start_xfer(struct xiic_i2c *i2c);
static void __xiic_start_xfer(struct xiic_i2c *i2c);
/*
@@ -245,17 +249,29 @@ static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
xiic_irq_en(i2c, mask);
}
-static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
+static int xiic_clear_rx_fifo(struct xiic_i2c *i2c)
{
u8 sr;
+ unsigned long timeout;
+
+ timeout = jiffies + XIIC_I2C_TIMEOUT;
for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
!(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
- sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
+ sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) {
xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
+ if (time_after(jiffies, timeout)) {
+ dev_err(i2c->dev, "Failed to clear rx fifo\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
}
-static void xiic_reinit(struct xiic_i2c *i2c)
+static int xiic_reinit(struct xiic_i2c *i2c)
{
+ int ret;
+
xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
/* Set receive Fifo depth to maximum (zero based). */
@@ -268,12 +284,16 @@ static void xiic_reinit(struct xiic_i2c *i2c)
xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
/* make sure RX fifo is empty */
- xiic_clear_rx_fifo(i2c);
+ ret = xiic_clear_rx_fifo(i2c);
+ if (ret)
+ return ret;
/* Enable interrupts */
xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
+
+ return 0;
}
static void xiic_deinit(struct xiic_i2c *i2c)
@@ -353,6 +373,9 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
struct xiic_i2c *i2c = dev_id;
u32 pend, isr, ier;
u32 clr = 0;
+ int xfer_more = 0;
+ int wakeup_req = 0;
+ int wakeup_code = 0;
/* Get the interrupt Status from the IPIF. There is no clearing of
* interrupts in the IPIF. Interrupts must be cleared at the source.
@@ -389,10 +412,16 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
*/
xiic_reinit(i2c);
- if (i2c->rx_msg)
- xiic_wakeup(i2c, STATE_ERROR);
- if (i2c->tx_msg)
- xiic_wakeup(i2c, STATE_ERROR);
+ if (i2c->rx_msg) {
+ wakeup_req = 1;
+ wakeup_code = STATE_ERROR;
+ }
+ if (i2c->tx_msg) {
+ wakeup_req = 1;
+ wakeup_code = STATE_ERROR;
+ }
+ /* don't try to handle other events */
+ goto out;
}
if (pend & XIIC_INTR_RX_FULL_MASK) {
/* Receive register/FIFO is full */
@@ -426,8 +455,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
i2c->tx_msg++;
dev_dbg(i2c->adap.dev.parent,
"%s will start next...\n", __func__);
-
- __xiic_start_xfer(i2c);
+ xfer_more = 1;
}
}
}
@@ -441,11 +469,13 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
if (!i2c->tx_msg)
goto out;
- if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
- xiic_tx_space(i2c) == 0)
- xiic_wakeup(i2c, STATE_DONE);
+ wakeup_req = 1;
+
+ if (i2c->nmsgs == 1 && !i2c->rx_msg &&
+ xiic_tx_space(i2c) == 0)
+ wakeup_code = STATE_DONE;
else
- xiic_wakeup(i2c, STATE_ERROR);
+ wakeup_code = STATE_ERROR;
}
if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
/* Transmit register/FIFO is empty or ½ empty */
@@ -469,7 +499,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
if (i2c->nmsgs > 1) {
i2c->nmsgs--;
i2c->tx_msg++;
- __xiic_start_xfer(i2c);
+ xfer_more = 1;
} else {
xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
@@ -487,6 +517,13 @@ out:
dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
+ if (xfer_more)
+ __xiic_start_xfer(i2c);
+ if (wakeup_req)
+ xiic_wakeup(i2c, wakeup_code);
+
+ WARN_ON(xfer_more && wakeup_req);
+
mutex_unlock(&i2c->lock);
return IRQ_HANDLED;
}
@@ -653,12 +690,18 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
}
-static void xiic_start_xfer(struct xiic_i2c *i2c)
+static int xiic_start_xfer(struct xiic_i2c *i2c)
{
+ int ret;
mutex_lock(&i2c->lock);
- xiic_reinit(i2c);
- __xiic_start_xfer(i2c);
+
+ ret = xiic_reinit(i2c);
+ if (!ret)
+ __xiic_start_xfer(i2c);
+
mutex_unlock(&i2c->lock);
+
+ return ret;
}
static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
@@ -680,7 +723,11 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
i2c->tx_msg = msgs;
i2c->nmsgs = num;
- xiic_start_xfer(i2c);
+ err = xiic_start_xfer(i2c);
+ if (err < 0) {
+ dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
+ goto out;
+ }
if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
(i2c->state == STATE_DONE), HZ)) {
@@ -759,7 +806,8 @@ static int xiic_i2c_probe(struct platform_device *pdev)
i2c->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "input clock not found.\n");
+ if (PTR_ERR(i2c->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found.\n");
return PTR_ERR(i2c->clk);
}
ret = clk_prepare_enable(i2c->clk);
@@ -768,10 +816,10 @@ static int xiic_i2c_probe(struct platform_device *pdev)
return ret;
}
i2c->dev = &pdev->dev;
- pm_runtime_enable(i2c->dev);
pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
pm_runtime_use_autosuspend(i2c->dev);
pm_runtime_set_active(i2c->dev);
+ pm_runtime_enable(i2c->dev);
ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
xiic_process, IRQF_ONESHOT,
pdev->name, i2c);
@@ -793,7 +841,11 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
i2c->endianness = BIG;
- xiic_reinit(i2c);
+ ret = xiic_reinit(i2c);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot xiic_reinit\n");
+ goto err_clk_dis;
+ }
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
@@ -825,14 +877,16 @@ static int xiic_i2c_remove(struct platform_device *pdev)
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
- ret = clk_prepare_enable(i2c->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock.\n");
+ ret = pm_runtime_get_sync(i2c->dev);
+ if (ret < 0)
return ret;
- }
+
xiic_deinit(i2c);
+ pm_runtime_put_sync(i2c->dev);
clk_disable_unprepare(i2c->clk);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return 0;
}
@@ -884,6 +938,7 @@ static struct platform_driver xiic_i2c_driver = {
module_platform_driver(xiic_i2c_driver);
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 1b93fae58ec7..964e8a29b27b 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2358,8 +2358,9 @@ void i2c_put_adapter(struct i2c_adapter *adap)
if (!adap)
return;
- put_device(&adap->dev);
module_put(adap->owner);
+ /* Should be last, otherwise we risk use-after-free with 'adap' */
+ put_device(&adap->dev);
}
EXPORT_SYMBOL(i2c_put_adapter);
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 517d98be68d2..1226617a27e1 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -3,6 +3,7 @@
* i2c-core.h - interfaces internal to the I2C framework
*/
+#include <linux/kconfig.h>
#include <linux/rwsem.h>
struct i2c_devinfo {
@@ -29,7 +30,8 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
*/
static inline bool i2c_in_atomic_xfer_mode(void)
{
- return system_state > SYSTEM_RUNNING && irqs_disabled();
+ return system_state > SYSTEM_RUNNING &&
+ (IS_ENABLED(CONFIG_PREEMPT_COUNT) ? !preemptible() : irqs_disabled());
}
static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 774507b54b57..c90cec8d9656 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -340,7 +340,7 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
priv->adap.lock_ops = &i2c_parent_lock_ops;
/* Sanity check on class */
- if (i2c_mux_parent_classes(parent) & class)
+ if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED)
dev_err(&parent->dev,
"Segment %d behind mux can't share classes with ancestors\n",
chan_id);
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index f7a7405d4350..45a3f7e7b3f6 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -61,7 +61,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
if (ret)
goto err;
- adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
+ adap = of_get_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
if (!adap) {
ret = -ENODEV;
goto err_with_revert;
@@ -243,6 +243,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+ if (!props[i].name || !props[i].value) {
+ err = -ENOMEM;
+ goto err_rollback;
+ }
props[i].length = 3;
of_changeset_init(&priv->chan[i].chgset);
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index f830535cff12..fed7d17e0190 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -52,7 +52,7 @@ static struct i2c_adapter *mux_parent_adapter(struct device *dev)
dev_err(dev, "Cannot parse i2c-parent\n");
return ERR_PTR(-ENODEV);
}
- parent = of_find_i2c_adapter_by_node(parent_np);
+ parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
if (!parent)
return ERR_PTR(-EPROBE_DEFER);
@@ -138,6 +138,7 @@ static int i2c_mux_probe(struct platform_device *pdev)
return 0;
err_children:
+ of_node_put(child);
i2c_mux_del_adapters(muxc);
err_parent:
i2c_put_adapter(parent);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index f1bb00a11ad6..fc991cf002af 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -62,7 +62,7 @@ static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev)
dev_err(dev, "Cannot parse i2c-parent\n");
return ERR_PTR(-ENODEV);
}
- parent = of_find_i2c_adapter_by_node(parent_np);
+ parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
if (!parent)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 6cc71c90f85e..02dc8e29c828 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1469,9 +1469,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
desc->dev->dev.of_node = desc->boardinfo->of_node;
ret = device_register(&desc->dev->dev);
- if (ret)
+ if (ret) {
dev_err(&master->dev,
"Failed to add I3C device (err = %d)\n", ret);
+ put_device(&desc->dev->dev);
+ }
}
}
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 6d5719cea9f5..6e0621e730fa 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -74,7 +74,8 @@
#define PRESCL_CTRL0 0x14
#define PRESCL_CTRL0_I2C(x) ((x) << 16)
#define PRESCL_CTRL0_I3C(x) (x)
-#define PRESCL_CTRL0_MAX GENMASK(9, 0)
+#define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0)
+#define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0)
#define PRESCL_CTRL1 0x18
#define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
@@ -189,7 +190,7 @@
#define SLV_STATUS1_HJ_DIS BIT(18)
#define SLV_STATUS1_MR_DIS BIT(17)
#define SLV_STATUS1_PROT_ERR BIT(16)
-#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
+#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
#define SLV_STATUS1_HAS_DA BIT(8)
#define SLV_STATUS1_DDR_RX_FULL BIT(7)
#define SLV_STATUS1_DDR_TX_FULL BIT(6)
@@ -1212,7 +1213,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
return -EINVAL;
pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
- if (pres > PRESCL_CTRL0_MAX)
+ if (pres > PRESCL_CTRL0_I3C_MAX)
return -ERANGE;
bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
@@ -1225,7 +1226,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
max_i2cfreq = bus->scl_rate.i2c;
pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
- if (pres > PRESCL_CTRL0_MAX)
+ if (pres > PRESCL_CTRL0_I2C_MAX)
return -ERANGE;
bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
@@ -1580,13 +1581,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
/* Device ID0 is reserved to describe this master. */
master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
master->free_rr_slots = GENMASK(master->maxdevs, 1);
+ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
val = readl(master->regs + CONF_STATUS1);
master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
- master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
- master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
spin_lock_init(&master->ibi.lock);
master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 7d4e5c08f133..05e18d658141 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -180,7 +180,7 @@ err:
static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
{
struct device *dev = hwif->gendev.parent;
- acpi_handle uninitialized_var(dev_handle);
+ acpi_handle dev_handle;
u64 pcidevfn;
acpi_handle chan_handle;
int err;
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 775fd34132ab..013ad33fbbc8 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -608,7 +608,7 @@ static int ide_delayed_transfer_pc(ide_drive_t *drive)
static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
{
- struct ide_atapi_pc *uninitialized_var(pc);
+ struct ide_atapi_pc *pc;
ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->rq;
ide_expiry_t *expiry;
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
index 18c20a7aa0ce..94bdcf1ea186 100644
--- a/drivers/ide/ide-io-std.c
+++ b/drivers/ide/ide-io-std.c
@@ -173,7 +173,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
if (io_32bit) {
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
if ((io_32bit & 2) && !mmio) {
local_irq_save(flags);
@@ -217,7 +217,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
if (io_32bit) {
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
if ((io_32bit & 2) && !mmio) {
local_irq_save(flags);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index b32a013d827a..fefe95079e95 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -614,12 +614,12 @@ static int drive_is_ready(ide_drive_t *drive)
void ide_timer_expiry (struct timer_list *t)
{
ide_hwif_t *hwif = from_timer(hwif, t, timer);
- ide_drive_t *uninitialized_var(drive);
+ ide_drive_t *drive;
ide_handler_t *handler;
unsigned long flags;
int wait = -1;
int plug_device = 0;
- struct request *uninitialized_var(rq_in_flight);
+ struct request *rq_in_flight;
spin_lock_irqsave(&hwif->lock, flags);
@@ -772,13 +772,13 @@ irqreturn_t ide_intr (int irq, void *dev_id)
{
ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
struct ide_host *host = hwif->host;
- ide_drive_t *uninitialized_var(drive);
+ ide_drive_t *drive;
ide_handler_t *handler;
unsigned long flags;
ide_startstop_t startstop;
irqreturn_t irq_ret = IRQ_NONE;
int plug_device = 0;
- struct request *uninitialized_var(rq_in_flight);
+ struct request *rq_in_flight;
if (host->host_flags & IDE_HFLAG_SERIALIZE) {
if (hwif != host->cur_port)
diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c
index b9dfeb2e8bd6..c08a8a0916e2 100644
--- a/drivers/ide/ide-sysfs.c
+++ b/drivers/ide/ide-sysfs.c
@@ -131,7 +131,7 @@ static struct device_attribute *ide_port_attrs[] = {
int ide_sysfs_register_port(ide_hwif_t *hwif)
{
- int i, uninitialized_var(rc);
+ int i, rc;
for (i = 0; ide_port_attrs[i]; i++) {
rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 870e235e30af..cf996f788292 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -108,7 +108,7 @@ static void umc_set_speeds(u8 speeds[])
static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
ide_hwif_t *mate = hwif->mate;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
const u8 pio = drive->pio_mode - XFER_PIO_0;
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 347b08b56042..63b221226261 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -46,11 +46,13 @@
#include <linux/tick.h>
#include <trace/events/power.h>
#include <linux/sched.h>
+#include <linux/sched/smt.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/nospec-branch.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@@ -98,6 +100,12 @@ static struct cpuidle_state *cpuidle_state_table;
#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
/*
+ * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
+ * above.
+ */
+#define CPUIDLE_FLAG_IBRS BIT(16)
+
+/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -107,6 +115,24 @@ static struct cpuidle_state *cpuidle_state_table;
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
+static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ bool smt_active = sched_smt_active();
+ u64 spec_ctrl = spec_ctrl_current();
+ int ret;
+
+ if (smt_active)
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
+ ret = intel_idle(dev, drv, index);
+
+ if (smt_active)
+ wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
+
+ return ret;
+}
+
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
@@ -605,7 +631,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
@@ -613,7 +639,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C7s",
.desc = "MWAIT 0x33",
- .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
.enter = &intel_idle,
@@ -621,7 +647,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C8",
.desc = "MWAIT 0x40",
- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
@@ -629,7 +655,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C9",
.desc = "MWAIT 0x50",
- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
.enter = &intel_idle,
@@ -637,7 +663,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C10",
.desc = "MWAIT 0x60",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
.enter = &intel_idle,
@@ -666,7 +692,7 @@ static struct cpuidle_state skx_cstates[] = {
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
.enter = &intel_idle,
@@ -1370,6 +1396,11 @@ static void __init intel_idle_cpuidle_driver_init(void)
drv->states[drv->state_count] = /* structure copy */
cpuidle_state_table[cstate];
+ if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
+ cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
+ drv->states[drv->state_count].enter = intel_idle_ibrs;
+ }
+
drv->state_count += 1;
}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 5bd51853b15e..3c0da322ece7 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT
source "drivers/iio/accel/Kconfig"
source "drivers/iio/adc/Kconfig"
+source "drivers/iio/addac/Kconfig"
source "drivers/iio/afe/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/chemical/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index bff682ad1cfb..96fd43b2ef7c 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
obj-y += accel/
obj-y += adc/
+obj-y += addac/
obj-y += afe/
obj-y += amplifiers/
obj-y += buffer/
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index f908476fa095..a4e0e2b129bc 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -279,6 +279,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
hid_sensor_convert_timestamp(
&accel_state->common_attributes,
*(int64_t *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
index 666e7a04a7d7..9bb5c2fea08c 100644
--- a/drivers/iio/accel/mma9551_core.c
+++ b/drivers/iio/accel/mma9551_core.c
@@ -296,9 +296,12 @@ int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
reg, NULL, 0, (u8 *)&v, 2);
+ if (ret < 0)
+ return ret;
+
*val = be16_to_cpu(v);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(mma9551_read_config_word);
@@ -354,9 +357,12 @@ int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
reg, NULL, 0, (u8 *)&v, 2);
+ if (ret < 0)
+ return ret;
+
*val = be16_to_cpu(v);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(mma9551_read_status_word);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index cb5788084299..2a2aa17d230c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -840,22 +840,6 @@ config STMPE_ADC
Say yes here to build support for ST Microelectronics STMPE
built-in ADC block (stmpe811).
-config STX104
- tristate "Apex Embedded Systems STX104 driver"
- depends on PC104 && X86
- select ISA_BUS_API
- select GPIOLIB
- help
- Say yes here to build support for the Apex Embedded Systems STX104
- integrated analog PC/104 card.
-
- This driver supports the 16 channels of single-ended (8 channels of
- differential) analog inputs, 2 channels of analog output, 4 digital
- inputs, and 4 digital outputs provided by the STX104.
-
- The base port addresses for the devices may be configured via the base
- array module parameter.
-
config SUN4I_GPADC
tristate "Support for the Allwinner SoCs GPADC"
depends on IIO
@@ -1096,4 +1080,14 @@ config XILINX_XADC
The driver can also be build as a module. If so, the module will be called
xilinx-xadc.
+config XILINX_AMS
+ tristate "Xilinx AMS driver"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to have support for the Xilinx AMS.
+
+ The driver can also be build as a module. If so, the module will be called
+ xilinx-ams.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ef9cc485fb67..30cbad22b38a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -72,7 +72,6 @@ obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
-obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
@@ -99,4 +98,5 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o
obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 2640b75fb774..a2171c887561 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -283,10 +283,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
unsigned int data_reg;
int ret = 0;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- mutex_lock(&indio_dev->mlock);
ad_sigma_delta_set_channel(sigma_delta, chan->address);
spi_bus_lock(sigma_delta->spi->master);
@@ -325,7 +325,7 @@ out:
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->master);
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
if (ret)
return ret;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 8854da453669..e84a4c5150e2 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -73,7 +73,7 @@
#define AT91_SAMA5D2_MR_ANACH BIT(23)
/* Tracking Time */
#define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24)
-#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff
+#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xf
/* Transfer Time */
#define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28)
#define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3
@@ -983,7 +983,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
indio->id, trigger_name);
if (!trig)
- return NULL;
+ return ERR_PTR(-ENOMEM);
trig->dev.parent = indio->dev.parent;
iio_trigger_set_drvdata(trig, indio);
@@ -1321,10 +1321,12 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
ret = at91_adc_read_position(st, chan->channel,
&tmp_val);
*val = tmp_val;
+ if (ret > 0)
+ ret = at91_adc_adjust_val_osr(st, val);
mutex_unlock(&st->lock);
iio_device_release_direct_mode(indio_dev);
- return at91_adc_adjust_val_osr(st, val);
+ return ret;
}
if (chan->type == IIO_PRESSURE) {
ret = iio_device_claim_direct_mode(indio_dev);
@@ -1335,10 +1337,12 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
ret = at91_adc_read_pressure(st, chan->channel,
&tmp_val);
*val = tmp_val;
+ if (ret > 0)
+ ret = at91_adc_adjust_val_osr(st, val);
mutex_unlock(&st->lock);
iio_device_release_direct_mode(indio_dev);
- return at91_adc_adjust_val_osr(st, val);
+ return ret;
}
/* in this case we have a voltage channel */
@@ -1429,16 +1433,20 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
/* if no change, optimize out */
if (val == st->oversampling_ratio)
return 0;
+ mutex_lock(&st->lock);
st->oversampling_ratio = val;
/* update ratio */
at91_adc_config_emr(st);
+ mutex_unlock(&st->lock);
return 0;
case IIO_CHAN_INFO_SAMP_FREQ:
if (val < st->soc_info.min_sample_rate ||
val > st->soc_info.max_sample_rate)
return -EINVAL;
+ mutex_lock(&st->lock);
at91_adc_setup_samp_freq(indio_dev, val);
+ mutex_unlock(&st->lock);
return 0;
default:
return -EINVAL;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index abe99856c823..cc81b35ca47a 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -616,8 +616,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
trig->ops = &at91_adc_trigger_ops;
ret = iio_trigger_register(trig);
- if (ret)
+ if (ret) {
+ iio_trigger_free(trig);
return NULL;
+ }
return trig;
}
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 72d8fa94ab31..86fdfea32979 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -289,8 +289,10 @@ static int berlin2_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
- if (!indio_dev)
+ if (!indio_dev) {
+ of_node_put(parent_np);
return -ENOMEM;
+ }
priv = iio_priv(indio_dev);
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 42a3ced11fbd..234f9c37aa10 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -804,16 +804,26 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
}
+ /* leave out any TS related code if unreachable */
+ if (IS_REACHABLE(CONFIG_INPUT)) {
+ has_ts = of_property_read_bool(pdev->dev.of_node,
+ "has-touchscreen") || pdata;
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
info->irq = irq;
- irq = platform_get_irq(pdev, 1);
- if (irq == -EPROBE_DEFER)
- return irq;
+ if (has_ts) {
+ irq = platform_get_irq(pdev, 1);
+ if (irq == -EPROBE_DEFER)
+ return irq;
- info->tsirq = irq;
+ info->tsirq = irq;
+ } else {
+ info->tsirq = -1;
+ }
info->dev = &pdev->dev;
@@ -880,12 +890,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->init_hw)
info->data->init_hw(info);
- /* leave out any TS related code if unreachable */
- if (IS_REACHABLE(CONFIG_INPUT)) {
- has_ts = of_property_read_bool(pdev->dev.of_node,
- "has-touchscreen") || pdata;
- }
-
if (pdata)
info->delay = pdata->delay;
else
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index dd52f08ec82e..4e2e8e819b1e 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -38,8 +38,8 @@
#define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
#define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
-/* Internal voltage reference in uV */
-#define MCP3911_INT_VREF_UV 1200000
+/* Internal voltage reference in mV */
+#define MCP3911_INT_VREF_MV 1200
#define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
@@ -111,6 +111,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
if (ret)
goto out;
+ *val = sign_extend32(*val, 23);
+
ret = IIO_VAL_INT;
break;
@@ -135,11 +137,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
*val = ret / 1000;
} else {
- *val = MCP3911_INT_VREF_UV;
+ *val = MCP3911_INT_VREF_MV;
}
- *val2 = 24;
- ret = IIO_VAL_FRACTIONAL_LOG2;
+ /*
+ * For 24bit Conversion
+ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
+ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5)
+ */
+
+ /* val2 = (2^23 * 1.5) */
+ *val2 = 12582912;
+ ret = IIO_VAL_FRACTIONAL;
break;
}
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 7b27306330a3..1a82be03624c 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -71,7 +71,7 @@
#define MESON_SAR_ADC_REG3_PANEL_DETECT_COUNT_MASK GENMASK(20, 18)
#define MESON_SAR_ADC_REG3_PANEL_DETECT_FILTER_TB_MASK GENMASK(17, 16)
#define MESON_SAR_ADC_REG3_ADC_CLK_DIV_SHIFT 10
- #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 5
+ #define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 6
#define MESON_SAR_ADC_REG3_BLOCK_DLY_SEL_MASK GENMASK(9, 8)
#define MESON_SAR_ADC_REG3_BLOCK_DLY_MASK GENMASK(7, 0)
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index 01f85bbe6a2e..e681adaedcf6 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -760,13 +760,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
ret = mxs_lradc_adc_trigger_init(iio);
if (ret)
- goto err_trig;
+ return ret;
ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
&mxs_lradc_adc_trigger_handler,
&mxs_lradc_adc_buffer_ops);
if (ret)
- return ret;
+ goto err_trig;
adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc];
@@ -804,9 +804,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
err_dev:
mxs_lradc_adc_hw_stop(adc);
- mxs_lradc_adc_trigger_remove(iio);
-err_trig:
iio_triggered_buffer_cleanup(iio);
+err_trig:
+ mxs_lradc_adc_trigger_remove(iio);
return ret;
}
@@ -817,8 +817,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev)
iio_device_unregister(iio);
mxs_lradc_adc_hw_stop(adc);
- mxs_lradc_adc_trigger_remove(iio);
iio_triggered_buffer_cleanup(iio);
+ mxs_lradc_adc_trigger_remove(iio);
return 0;
}
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 2bd785e9e42a..4861093b0764 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -630,7 +630,7 @@ out:
static int palmas_gpadc_remove(struct platform_device *pdev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
if (adc->wakeup1_enable || adc->wakeup2_enable)
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index c2948defa785..0c92eb30f0d8 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1544,6 +1544,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match);
static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
{
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 4267d756cd50..c1de80bd82f9 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -194,13 +194,13 @@ static int adc128_remove(struct spi_device *spi)
}
static const struct of_device_id adc128_of_match[] = {
- { .compatible = "ti,adc128s052", },
- { .compatible = "ti,adc122s021", },
- { .compatible = "ti,adc122s051", },
- { .compatible = "ti,adc122s101", },
- { .compatible = "ti,adc124s021", },
- { .compatible = "ti,adc124s051", },
- { .compatible = "ti,adc124s101", },
+ { .compatible = "ti,adc128s052", .data = (void*)0L, },
+ { .compatible = "ti,adc122s021", .data = (void*)1L, },
+ { .compatible = "ti,adc122s051", .data = (void*)1L, },
+ { .compatible = "ti,adc122s101", .data = (void*)1L, },
+ { .compatible = "ti,adc124s021", .data = (void*)2L, },
+ { .compatible = "ti,adc124s051", .data = (void*)2L, },
+ { .compatible = "ti,adc124s101", .data = (void*)2L, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, adc128_of_match);
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 7a1a9fe47072..1dee372e4199 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -635,6 +635,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.label = dev_name(&st->spi->dev);
st->chip.parent = &st->spi->dev;
st->chip.owner = THIS_MODULE;
+ st->chip.can_sleep = true;
st->chip.base = -1;
st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
st->chip.get_direction = ti_ads7950_get_direction;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 9d984f2a8ba7..38d8a01d9f95 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -656,8 +656,10 @@ static int tiadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
err = tiadc_request_dma(pdev, adc_dev);
- if (err && err == -EPROBE_DEFER)
+ if (err && err != -ENODEV) {
+ dev_err_probe(&pdev->dev, err, "DMA request failed\n");
goto err_dma;
+ }
return 0;
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index fb77c3ff5a3e..0a7608174758 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -57,6 +57,18 @@
#define TWL6030_GPADCS BIT(1)
#define TWL6030_GPADCR BIT(0)
+#define USB_VBUS_CTRL_SET 0x04
+#define USB_ID_CTRL_SET 0x06
+
+#define TWL6030_MISC1 0xE4
+#define VBUS_MEAS 0x01
+#define ID_MEAS 0x01
+
+#define VAC_MEAS 0x04
+#define VBAT_MEAS 0x02
+#define BB_MEAS 0x01
+
+
/**
* struct twl6030_chnl_calib - channel calibration
* @gain: slope coefficient for ideal curve
@@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
return ret;
}
+ ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID0,
+ VBAT_MEAS | BB_MEAS | VAC_MEAS,
+ TWL6030_MISC1);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
indio_dev->name = DRIVER_NAME;
indio_dev->dev.parent = dev;
indio_dev->info = &twl6030_gpadc_iio_info;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
new file mode 100644
index 000000000000..c21c46512d5f
--- /dev/null
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -0,0 +1,1109 @@
+/*
+ * Xilinx AMS driver
+ *
+ * Licensed under the GPL-2
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/iopoll.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/io.h>
+
+#include "xilinx-ams.h"
+#include <linux/delay.h>
+
+static const unsigned int AMS_UNMASK_TIMEOUT = 500;
+
+static inline void ams_read_reg(struct ams *ams, unsigned int offset, u32 *data)
+{
+ *data = readl(ams->base + offset);
+}
+
+static inline void ams_write_reg(struct ams *ams, unsigned int offset, u32 data)
+{
+ writel(data, ams->base + offset);
+}
+
+static inline void ams_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_read_reg(ams, offset, &val);
+ ams_write_reg(ams, offset, (val & ~mask) | (mask & data));
+}
+
+static inline void ams_ps_read_reg(struct ams *ams, unsigned int offset,
+ u32 *data)
+{
+ *data = readl(ams->ps_base + offset);
+}
+
+static inline void ams_ps_write_reg(struct ams *ams, unsigned int offset,
+ u32 data)
+{
+ writel(data, ams->ps_base + offset);
+}
+
+static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_ps_read_reg(ams, offset, &val);
+ ams_ps_write_reg(ams, offset, (val & ~mask) | (data & mask));
+}
+
+static inline void ams_apb_pl_read_reg(struct ams *ams, unsigned int offset,
+ u32 *data)
+{
+ *data = readl(ams->pl_base + offset);
+}
+
+static inline void ams_apb_pl_write_reg(struct ams *ams, unsigned int offset,
+ u32 data)
+{
+ writel(data, ams->pl_base + offset);
+}
+
+static inline void ams_apb_pl_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_apb_pl_read_reg(ams, offset, &val);
+ ams_apb_pl_write_reg(ams, offset, (val & ~mask) | (data & mask));
+}
+
+static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val)
+{
+ /* intr_mask variable in ams represent bit in AMS regisetr IDR0 and IDR1
+ * first 32 biit will be of IDR0, next one are of IDR1 register.
+ */
+ ams->intr_mask &= ~mask;
+ ams->intr_mask |= (val & mask);
+
+ ams_write_reg(ams, AMS_IER_0, ~(ams->intr_mask | ams->masked_alarm));
+ ams_write_reg(ams, AMS_IER_1,
+ ~(ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT));
+ ams_write_reg(ams, AMS_IDR_0, ams->intr_mask | ams->masked_alarm);
+ ams_write_reg(ams, AMS_IDR_1,
+ ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT);
+}
+
+static void iio_ams_disable_all_alarm(struct ams *ams)
+{
+ /* disable PS module alarm */
+ if (ams->ps_base) {
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+
+ /* disable PL module alarm */
+ if (ams->pl_base) {
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1,
+ AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG3,
+ AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+}
+
+static void iio_ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ u32 cfg;
+ unsigned long flags;
+ unsigned long pl_alarm_mask;
+
+ if (ams->ps_base) {
+ /* Configuring PS alarm enable */
+ cfg = ~((alarm_mask & AMS_ISR0_ALARM_2_TO_0_MASK) <<
+ AMS_CONF1_ALARM_2_TO_0_SHIFT);
+ cfg &= ~((alarm_mask & AMS_ISR0_ALARM_6_TO_3_MASK) <<
+ AMS_CONF1_ALARM_6_TO_3_SHIFT);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ cfg);
+
+ cfg = ~((alarm_mask >> AMS_CONF3_ALARM_12_TO_7_SHIFT) &
+ AMS_ISR0_ALARM_12_TO_7_MASK);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ cfg);
+ }
+
+ if (ams->pl_base) {
+ pl_alarm_mask = (alarm_mask >> AMS_PL_ALARM_START);
+ /* Configuring PL alarm enable */
+ cfg = ~((pl_alarm_mask & AMS_ISR0_ALARM_2_TO_0_MASK) <<
+ AMS_CONF1_ALARM_2_TO_0_SHIFT);
+ cfg &= ~((pl_alarm_mask & AMS_ISR0_ALARM_6_TO_3_MASK) <<
+ AMS_CONF1_ALARM_6_TO_3_SHIFT);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1,
+ AMS_REGCFG1_ALARM_MASK, cfg);
+
+ cfg = ~((pl_alarm_mask >> AMS_CONF3_ALARM_12_TO_7_SHIFT) &
+ AMS_ISR0_ALARM_12_TO_7_MASK);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG3,
+ AMS_REGCFG3_ALARM_MASK, cfg);
+ }
+
+ spin_lock_irqsave(&ams->lock, flags);
+ ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
+ spin_unlock_irqrestore(&ams->lock, flags);
+}
+
+static void ams_enable_channel_sequence(struct ams *ams)
+{
+ int i;
+ unsigned long long scan_mask;
+ struct iio_dev *indio_dev = iio_priv_to_dev(ams);
+
+ /* Enable channel sequence. First 22 bit of scan_mask represent
+ * PS channels, and next remaining bit represents PL channels.
+ */
+
+ /* Run calibration of PS & PL as part of the sequence */
+ scan_mask = 1 | (1 << PS_SEQ_MAX);
+ for (i = 0; i < indio_dev->num_channels; i++)
+ scan_mask |= BIT(indio_dev->channels[i].scan_index);
+
+ if (ams->ps_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ ams_ps_write_reg(ams, AMS_REG_SEQ_CH0,
+ scan_mask & AMS_REG_SEQ0_MASK);
+ ams_ps_write_reg(ams, AMS_REG_SEQ_CH2, AMS_REG_SEQ2_MASK &
+ (scan_mask >> AMS_REG_SEQ2_MASK_SHIFT));
+
+ /* set continuous sequence mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+
+ if (ams->pl_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ scan_mask = (scan_mask >> PS_SEQ_MAX);
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH0,
+ scan_mask & AMS_REG_SEQ0_MASK);
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH2, AMS_REG_SEQ2_MASK &
+ (scan_mask >> AMS_REG_SEQ2_MASK_SHIFT));
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH1, AMS_REG_SEQ1_MASK &
+ (scan_mask >> AMS_REG_SEQ1_MASK_SHIFT));
+
+ /* set continuous sequence mode */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+}
+
+static int iio_ams_init_device(struct ams *ams)
+{
+ int ret = 0;
+ u32 reg;
+
+ /* reset AMS */
+ if (ams->ps_base) {
+ ams_ps_write_reg(ams, AMS_VP_VN, AMS_PS_RESET_VALUE);
+
+ ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg,
+ (reg & AMS_PS_CSTS_PS_READY) ==
+ AMS_PS_CSTS_PS_READY, 0,
+ AMS_INIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* put sysmon in a default state */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ if (ams->pl_base) {
+ ams->pl_bus->write(ams, AMS_VP_VN, AMS_PL_RESET_VALUE);
+
+ ret = readl_poll_timeout(ams->base + AMS_PL_CSTS, reg,
+ (reg & AMS_PL_CSTS_ACCESS_MASK) ==
+ AMS_PL_CSTS_ACCESS_MASK, 0,
+ AMS_INIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* put sysmon in a default state */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ iio_ams_disable_all_alarm(ams);
+
+ /* Disable interrupt */
+ ams_update_intrmask(ams, ~0, ~0);
+
+ /* Clear any pending interrupt */
+ ams_write_reg(ams, AMS_ISR_0, AMS_ISR0_ALARM_MASK);
+ ams_write_reg(ams, AMS_ISR_1, AMS_ISR1_ALARM_MASK);
+
+ return ret;
+}
+
+static void ams_enable_single_channel(struct ams *ams, unsigned int offset)
+{
+ u8 channel_num = 0;
+
+ switch (offset) {
+ case AMS_VCC_PSPLL0:
+ channel_num = AMS_VCC_PSPLL0_CH;
+ break;
+ case AMS_VCC_PSPLL3:
+ channel_num = AMS_VCC_PSPLL3_CH;
+ break;
+ case AMS_VCCINT:
+ channel_num = AMS_VCCINT_CH;
+ break;
+ case AMS_VCCBRAM:
+ channel_num = AMS_VCCBRAM_CH;
+ break;
+ case AMS_VCCAUX:
+ channel_num = AMS_VCCAUX_CH;
+ break;
+ case AMS_PSDDRPLL:
+ channel_num = AMS_PSDDRPLL_CH;
+ break;
+ case AMS_PSINTFPDDR:
+ channel_num = AMS_PSINTFPDDR_CH;
+ break;
+ default:
+ break;
+ }
+
+ /* set single channel, sequencer off mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_SINGLE_CHANNEL);
+
+ /* write the channel number */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
+ channel_num);
+ mdelay(1);
+}
+
+static void ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
+{
+ ams_enable_single_channel(ams, offset);
+ ams_read_reg(ams, offset, data);
+ ams_enable_channel_sequence(ams);
+}
+
+static int ams_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ams *ams = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&ams->mutex);
+ if (chan->scan_index >= (PS_SEQ_MAX * 3))
+ ams_read_vcc_reg(ams, chan->address, val);
+ else if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->read(ams, chan->address, val);
+ else
+ ams_ps_read_reg(ams, chan->address, val);
+ mutex_unlock(&ams->mutex);
+
+ *val2 = 0;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ switch (chan->address) {
+ case AMS_SUPPLY1:
+ case AMS_SUPPLY2:
+ case AMS_SUPPLY3:
+ case AMS_SUPPLY4:
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ break;
+ case AMS_SUPPLY5:
+ case AMS_SUPPLY6:
+ if (chan->scan_index < PS_SEQ_MAX)
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ break;
+ case AMS_SUPPLY7:
+ case AMS_SUPPLY8:
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ break;
+ case AMS_SUPPLY9:
+ case AMS_SUPPLY10:
+ if (chan->scan_index < PS_SEQ_MAX)
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ break;
+ default:
+ if (chan->scan_index >= (PS_SEQ_MAX * 3))
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_1VOLT;
+ break;
+ }
+ *val2 = AMS_SUPPLY_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ *val = AMS_TEMP_SCALE;
+ *val2 = AMS_TEMP_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = AMS_TEMP_OFFSET;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir)
+{
+ int offset = 0;
+
+ if (scan_index >= PS_SEQ_MAX)
+ scan_index -= PS_SEQ_MAX;
+
+ if (dir == IIO_EV_DIR_FALLING) {
+ if (scan_index < AMS_SEQ_SUPPLY7)
+ offset = AMS_ALARM_THRESOLD_OFF_10;
+ else
+ offset = AMS_ALARM_THRESOLD_OFF_20;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return (AMS_ALARM_TEMP + offset);
+ case AMS_SEQ_SUPPLY1:
+ return (AMS_ALARM_SUPPLY1 + offset);
+ case AMS_SEQ_SUPPLY2:
+ return (AMS_ALARM_SUPPLY2 + offset);
+ case AMS_SEQ_SUPPLY3:
+ return (AMS_ALARM_SUPPLY3 + offset);
+ case AMS_SEQ_SUPPLY4:
+ return (AMS_ALARM_SUPPLY4 + offset);
+ case AMS_SEQ_SUPPLY5:
+ return (AMS_ALARM_SUPPLY5 + offset);
+ case AMS_SEQ_SUPPLY6:
+ return (AMS_ALARM_SUPPLY6 + offset);
+ case AMS_SEQ_SUPPLY7:
+ return (AMS_ALARM_SUPPLY7 + offset);
+ case AMS_SEQ_SUPPLY8:
+ return (AMS_ALARM_SUPPLY8 + offset);
+ case AMS_SEQ_SUPPLY9:
+ return (AMS_ALARM_SUPPLY9 + offset);
+ case AMS_SEQ_SUPPLY10:
+ return (AMS_ALARM_SUPPLY10 + offset);
+ case AMS_SEQ_VCCAMS:
+ return (AMS_ALARM_VCCAMS + offset);
+ case AMS_SEQ_TEMP_REMOTE:
+ return (AMS_ALARM_TEMP_REMOTE + offset);
+ }
+
+ return 0;
+}
+
+static const struct iio_chan_spec *ams_event_to_channel(
+ struct iio_dev *indio_dev, u32 event)
+{
+ int scan_index = 0, i;
+
+ if (event >= AMS_PL_ALARM_START) {
+ event -= AMS_PL_ALARM_START;
+ scan_index = PS_SEQ_MAX;
+ }
+
+ switch (event) {
+ case AMS_ALARM_BIT_TEMP:
+ scan_index += AMS_SEQ_TEMP;
+ break;
+ case AMS_ALARM_BIT_SUPPLY1:
+ scan_index += AMS_SEQ_SUPPLY1;
+ break;
+ case AMS_ALARM_BIT_SUPPLY2:
+ scan_index += AMS_SEQ_SUPPLY2;
+ break;
+ case AMS_ALARM_BIT_SUPPLY3:
+ scan_index += AMS_SEQ_SUPPLY3;
+ break;
+ case AMS_ALARM_BIT_SUPPLY4:
+ scan_index += AMS_SEQ_SUPPLY4;
+ break;
+ case AMS_ALARM_BIT_SUPPLY5:
+ scan_index += AMS_SEQ_SUPPLY5;
+ break;
+ case AMS_ALARM_BIT_SUPPLY6:
+ scan_index += AMS_SEQ_SUPPLY6;
+ break;
+ case AMS_ALARM_BIT_SUPPLY7:
+ scan_index += AMS_SEQ_SUPPLY7;
+ break;
+ case AMS_ALARM_BIT_SUPPLY8:
+ scan_index += AMS_SEQ_SUPPLY8;
+ break;
+ case AMS_ALARM_BIT_SUPPLY9:
+ scan_index += AMS_SEQ_SUPPLY9;
+ break;
+ case AMS_ALARM_BIT_SUPPLY10:
+ scan_index += AMS_SEQ_SUPPLY10;
+ break;
+ case AMS_ALARM_BIT_VCCAMS:
+ scan_index += AMS_SEQ_VCCAMS;
+ break;
+ case AMS_ALARM_BIT_TEMP_REMOTE:
+ scan_index += AMS_SEQ_TEMP_REMOTE;
+ break;
+ }
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].scan_index == scan_index)
+ break;
+
+ return &indio_dev->channels[i];
+}
+
+static int ams_get_alarm_mask(int scan_index)
+{
+ int bit = 0;
+
+ if (scan_index >= PS_SEQ_MAX) {
+ bit = AMS_PL_ALARM_START;
+ scan_index -= PS_SEQ_MAX;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return BIT(AMS_ALARM_BIT_TEMP + bit);
+ case AMS_SEQ_SUPPLY1:
+ return BIT(AMS_ALARM_BIT_SUPPLY1 + bit);
+ case AMS_SEQ_SUPPLY2:
+ return BIT(AMS_ALARM_BIT_SUPPLY2 + bit);
+ case AMS_SEQ_SUPPLY3:
+ return BIT(AMS_ALARM_BIT_SUPPLY3 + bit);
+ case AMS_SEQ_SUPPLY4:
+ return BIT(AMS_ALARM_BIT_SUPPLY4 + bit);
+ case AMS_SEQ_SUPPLY5:
+ return BIT(AMS_ALARM_BIT_SUPPLY5 + bit);
+ case AMS_SEQ_SUPPLY6:
+ return BIT(AMS_ALARM_BIT_SUPPLY6 + bit);
+ case AMS_SEQ_SUPPLY7:
+ return BIT(AMS_ALARM_BIT_SUPPLY7 + bit);
+ case AMS_SEQ_SUPPLY8:
+ return BIT(AMS_ALARM_BIT_SUPPLY8 + bit);
+ case AMS_SEQ_SUPPLY9:
+ return BIT(AMS_ALARM_BIT_SUPPLY9 + bit);
+ case AMS_SEQ_SUPPLY10:
+ return BIT(AMS_ALARM_BIT_SUPPLY10 + bit);
+ case AMS_SEQ_VCCAMS:
+ return BIT(AMS_ALARM_BIT_VCCAMS + bit);
+ case AMS_SEQ_TEMP_REMOTE:
+ return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit);
+ }
+
+ return 0;
+}
+
+static int ams_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ams *ams = iio_priv(indio_dev);
+
+ return (ams->alarm_mask & ams_get_alarm_mask(chan->scan_index)) ? 1 : 0;
+}
+
+static int ams_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int alarm;
+
+ alarm = ams_get_alarm_mask(chan->scan_index);
+
+ mutex_lock(&ams->mutex);
+
+ if (state)
+ ams->alarm_mask |= alarm;
+ else
+ ams->alarm_mask &= ~alarm;
+
+ iio_ams_update_alarm(ams, ams->alarm_mask);
+
+ mutex_unlock(&ams->mutex);
+
+ return 0;
+}
+
+static int ams_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir);
+
+ mutex_lock(&ams->mutex);
+
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->read(ams, offset, val);
+ else
+ ams_ps_read_reg(ams, offset, val);
+
+ mutex_unlock(&ams->mutex);
+
+ *val2 = 0;
+ return IIO_VAL_INT;
+}
+
+static int ams_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset;
+
+ mutex_lock(&ams->mutex);
+
+ /* Set temperature channel threshold to direct threshold */
+ if (chan->type == IIO_TEMP) {
+ offset = ams_get_alarm_offset(chan->scan_index,
+ IIO_EV_DIR_FALLING);
+
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->update(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ else
+ ams_ps_update_reg(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ }
+
+ offset = ams_get_alarm_offset(chan->scan_index, dir);
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->write(ams, offset, val);
+ else
+ ams_ps_write_reg(ams, offset, val);
+
+ mutex_unlock(&ams->mutex);
+
+ return 0;
+}
+
+static void ams_handle_event(struct iio_dev *indio_dev, u32 event)
+{
+ const struct iio_chan_spec *chan;
+
+ chan = ams_event_to_channel(indio_dev, event);
+
+ if (chan->type == IIO_TEMP) {
+ /* The temperature channel only supports over-temperature
+ * events
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ } else {
+ /* For other channels we don't know whether it is a upper or
+ * lower threshold event. Userspace will have to check the
+ * channel value if it wants to know.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+}
+
+static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS)
+ ams_handle_event(indio_dev, bit);
+}
+
+/**
+ * ams_unmask_worker - ams alarm interrupt unmask worker
+ * @work : work to be done
+ *
+ * The ZynqMP threshold interrupts are level sensitive. Since we can't make the
+ * threshold condition go way from within the interrupt handler, this means as
+ * soon as a threshold condition is present we would enter the interrupt handler
+ * again and again. To work around this we mask all active thresholds interrupts
+ * in the interrupt handler and start a timer. In this timer we poll the
+ * interrupt status and only if the interrupt is inactive we unmask it again.
+ */
+static void ams_unmask_worker(struct work_struct *work)
+{
+ struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+ unsigned int status, unmask;
+
+ spin_lock_irq(&ams->lock);
+
+ ams_read_reg(ams, AMS_ISR_0, &status);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (ams->masked_alarm ^ status) & ams->masked_alarm;
+
+ /* clear status of disabled alarm */
+ unmask |= ams->intr_mask;
+
+ ams->masked_alarm &= status;
+
+ /* Also clear those which are masked out anyway */
+ ams->masked_alarm &= ~ams->intr_mask;
+
+ /* Clear the interrupts before we unmask them */
+ ams_write_reg(ams, AMS_ISR_0, unmask);
+
+ ams_update_intrmask(ams, 0, 0);
+
+ spin_unlock_irq(&ams->lock);
+
+ /* if still pending some alarm re-trigger the timer */
+ if (ams->masked_alarm)
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT));
+}
+
+static irqreturn_t ams_iio_irq(int irq, void *data)
+{
+ unsigned int isr0, isr1;
+ struct iio_dev *indio_dev = data;
+ struct ams *ams = iio_priv(indio_dev);
+
+ spin_lock(&ams->lock);
+
+ ams_read_reg(ams, AMS_ISR_0, &isr0);
+ ams_read_reg(ams, AMS_ISR_1, &isr1);
+
+ /* only process alarm that are not masked */
+ isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->masked_alarm);
+ isr1 &= ~(ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT);
+
+ /* clear interrupt */
+ ams_write_reg(ams, AMS_ISR_0, isr0);
+ ams_write_reg(ams, AMS_ISR_1, isr1);
+
+ if (isr0) {
+ /* Once the alarm interrupt occurred, mask until get cleared */
+ ams->masked_alarm |= isr0;
+ ams_update_intrmask(ams, 0, 0);
+
+ ams_handle_events(indio_dev, isr0);
+
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT));
+ }
+
+ spin_unlock(&ams->lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_event_spec ams_temp_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_event_spec ams_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec ams_ps_channels[] = {
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "ps_temp"),
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE, "remote_temp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "vccpsintlp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "vccpsintfp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "vccpsaux"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "vccpsddr"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "vccpsio3"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "vccpsio0"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "vccpsio1"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "vccpsio2"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "psmgtravcc"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "psmgtravtt"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "vccams"),
+};
+
+static const struct iio_chan_spec ams_pl_channels[] = {
+ AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "pl_temp"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "vccint", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "vccaux", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, "vccvrefp", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, "vccvrefn", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "vccbram", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "vccplintlp", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "vccplintfp", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "vccplaux", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "vccams", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, "vccvpvn", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "vuser0", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "vuser1", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "vuser2", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "vuser3", true),
+ AMS_PL_AUX_CHAN_VOLTAGE(0, "vccaux0"),
+ AMS_PL_AUX_CHAN_VOLTAGE(1, "vccaux1"),
+ AMS_PL_AUX_CHAN_VOLTAGE(2, "vccaux2"),
+ AMS_PL_AUX_CHAN_VOLTAGE(3, "vccaux3"),
+ AMS_PL_AUX_CHAN_VOLTAGE(4, "vccaux4"),
+ AMS_PL_AUX_CHAN_VOLTAGE(5, "vccaux5"),
+ AMS_PL_AUX_CHAN_VOLTAGE(6, "vccaux6"),
+ AMS_PL_AUX_CHAN_VOLTAGE(7, "vccaux7"),
+ AMS_PL_AUX_CHAN_VOLTAGE(8, "vccaux8"),
+ AMS_PL_AUX_CHAN_VOLTAGE(9, "vccaux9"),
+ AMS_PL_AUX_CHAN_VOLTAGE(10, "vccaux10"),
+ AMS_PL_AUX_CHAN_VOLTAGE(11, "vccaux11"),
+ AMS_PL_AUX_CHAN_VOLTAGE(12, "vccaux12"),
+ AMS_PL_AUX_CHAN_VOLTAGE(13, "vccaux13"),
+ AMS_PL_AUX_CHAN_VOLTAGE(14, "vccaux14"),
+ AMS_PL_AUX_CHAN_VOLTAGE(15, "vccaux15"),
+};
+
+static const struct iio_chan_spec ams_ctrl_channels[] = {
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0, "vcc_pspll0"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3, "vcc_psbatt"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT, "vccint"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM, "vccbram"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX, "vccaux"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL, "vcc_psddrpll"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR, "vccpsintfpddr"),
+};
+
+static int ams_init_module(struct iio_dev *indio_dev, struct device_node *np,
+ struct iio_chan_spec *channels)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ struct device_node *chan_node, *child;
+ int ret, num_channels = 0;
+ unsigned int reg;
+
+ if (of_device_is_compatible(np, "xlnx,zynqmp-ams-ps")) {
+ ams->ps_base = of_iomap(np, 0);
+ if (!ams->ps_base)
+ return -ENXIO;
+
+ /* add PS channels to iio device channels */
+ memcpy(channels + num_channels, ams_ps_channels,
+ sizeof(ams_ps_channels));
+ num_channels += ARRAY_SIZE(ams_ps_channels);
+ } else if (of_device_is_compatible(np, "xlnx,zynqmp-ams-pl")) {
+ ams->pl_base = of_iomap(np, 0);
+ if (!ams->pl_base)
+ return -ENXIO;
+
+ /* Copy only first 10 fix channels */
+ memcpy(channels + num_channels, ams_pl_channels,
+ AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels));
+ num_channels += AMS_PL_MAX_FIXED_CHANNEL;
+
+ chan_node = of_get_child_by_name(np, "xlnx,ext-channels");
+ if (chan_node) {
+ for_each_child_of_node(chan_node, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg > AMS_PL_MAX_EXT_CHANNEL)
+ continue;
+
+ memcpy(&channels[num_channels],
+ &ams_pl_channels[reg +
+ AMS_PL_MAX_FIXED_CHANNEL],
+ sizeof(*channels));
+
+ if (of_property_read_bool(child,
+ "xlnx,bipolar"))
+ channels[num_channels].
+ scan_type.sign = 's';
+
+ num_channels += 1;
+ }
+ }
+ of_node_put(chan_node);
+ } else if (of_device_is_compatible(np, "xlnx,zynqmp-ams")) {
+ /* add AMS channels to iio device channels */
+ memcpy(channels + num_channels, ams_ctrl_channels,
+ sizeof(ams_ctrl_channels));
+ num_channels += ARRAY_SIZE(ams_ctrl_channels);
+ } else {
+ return -EINVAL;
+ }
+
+ return num_channels;
+}
+
+static int ams_parse_dt(struct iio_dev *indio_dev, struct platform_device *pdev)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ struct iio_chan_spec *ams_channels, *dev_channels;
+ struct device_node *child_node = NULL, *np = pdev->dev.of_node;
+ int ret, chan_vol = 0, chan_temp = 0, i, rising_off, falling_off;
+ unsigned int num_channels = 0;
+
+ /* Initialize buffer for channel specification */
+ ams_channels = kzalloc(sizeof(ams_ps_channels) +
+ sizeof(ams_pl_channels) +
+ sizeof(ams_ctrl_channels), GFP_KERNEL);
+ if (!ams_channels)
+ return -ENOMEM;
+
+ if (of_device_is_available(np)) {
+ ret = ams_init_module(indio_dev, np, ams_channels);
+ if (ret < 0) {
+ kfree(ams_channels);
+ return ret;
+ }
+
+ num_channels += ret;
+ }
+
+ for_each_child_of_node(np, child_node) {
+ if (of_device_is_available(child_node)) {
+ ret = ams_init_module(indio_dev, child_node,
+ ams_channels + num_channels);
+ if (ret < 0) {
+ kfree(ams_channels);
+ return ret;
+ }
+
+ num_channels += ret;
+ }
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ if (ams_channels[i].type == IIO_VOLTAGE)
+ ams_channels[i].channel = chan_vol++;
+ else
+ ams_channels[i].channel = chan_temp++;
+
+ if (ams_channels[i].scan_index < (PS_SEQ_MAX * 3)) {
+ /* set threshold to max and min for each channel */
+ falling_off = ams_get_alarm_offset(
+ ams_channels[i].scan_index,
+ IIO_EV_DIR_FALLING);
+ rising_off = ams_get_alarm_offset(
+ ams_channels[i].scan_index,
+ IIO_EV_DIR_RISING);
+ if (ams_channels[i].scan_index >= PS_SEQ_MAX) {
+ ams->pl_bus->write(ams, falling_off,
+ AMS_ALARM_THR_MIN);
+ ams->pl_bus->write(ams, rising_off,
+ AMS_ALARM_THR_MAX);
+ } else {
+ ams_ps_write_reg(ams, falling_off,
+ AMS_ALARM_THR_MIN);
+ ams_ps_write_reg(ams, rising_off,
+ AMS_ALARM_THR_MAX);
+ }
+ }
+ }
+
+ dev_channels = devm_kzalloc(&pdev->dev, sizeof(*dev_channels) *
+ num_channels, GFP_KERNEL);
+ if (!dev_channels) {
+ kfree(ams_channels);
+ return -ENOMEM;
+ }
+
+ memcpy(dev_channels, ams_channels,
+ sizeof(*ams_channels) * num_channels);
+ kfree(ams_channels);
+ indio_dev->channels = dev_channels;
+ indio_dev->num_channels = num_channels;
+
+ return 0;
+}
+
+static const struct iio_info iio_pl_info = {
+ .read_raw = &ams_read_raw,
+ .read_event_config = &ams_read_event_config,
+ .write_event_config = &ams_write_event_config,
+ .read_event_value = &ams_read_event_value,
+ .write_event_value = &ams_write_event_value,
+};
+
+static const struct ams_pl_bus_ops ams_pl_apb = {
+ .read = ams_apb_pl_read_reg,
+ .write = ams_apb_pl_write_reg,
+ .update = ams_apb_pl_update_reg,
+};
+
+static const struct of_device_id ams_of_match_table[] = {
+ { .compatible = "xlnx,zynqmp-ams", &ams_pl_apb },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ams_of_match_table);
+
+static int ams_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct ams *ams;
+ struct resource *res;
+ const struct of_device_id *id;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ id = of_match_node(ams_of_match_table, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ams = iio_priv(indio_dev);
+ ams->pl_bus = id->data;
+ mutex_init(&ams->mutex);
+ spin_lock_init(&ams->lock);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->name = "ams";
+
+ indio_dev->info = &iio_pl_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ams-base");
+ ams->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ams->base))
+ return PTR_ERR(ams->base);
+
+ INIT_DELAYED_WORK(&ams->ams_unmask_work, ams_unmask_worker);
+
+ ams->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ams->clk))
+ return PTR_ERR(ams->clk);
+ clk_prepare_enable(ams->clk);
+
+ ret = iio_ams_init_device(ams);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize AMS\n");
+ goto clk_disable;
+ }
+
+ ret = ams_parse_dt(indio_dev, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failure in parsing DT\n");
+ goto clk_disable;
+ }
+
+ ams_enable_channel_sequence(ams);
+
+ ams->irq = platform_get_irq_byname(pdev, "ams-irq");
+ ret = devm_request_irq(&pdev->dev, ams->irq, &ams_iio_irq, 0, "ams-irq",
+ indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register interrupt\n");
+ goto clk_disable;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return iio_device_register(indio_dev);
+
+clk_disable:
+ clk_disable_unprepare(ams->clk);
+ return ret;
+}
+
+static int ams_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ cancel_delayed_work(&ams->ams_unmask_work);
+
+ /* Unregister the device */
+ iio_device_unregister(indio_dev);
+ clk_disable_unprepare(ams->clk);
+ return 0;
+}
+
+static int __maybe_unused ams_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ clk_disable_unprepare(ams->clk);
+
+ return 0;
+}
+
+static int __maybe_unused ams_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ clk_prepare_enable(ams->clk);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
+
+static struct platform_driver ams_driver = {
+ .probe = ams_probe,
+ .remove = ams_remove,
+ .driver = {
+ .name = "ams",
+ .pm = &ams_pm_ops,
+ .of_match_table = ams_of_match_table,
+ },
+};
+module_platform_driver(ams_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rajnikant Bhojani <rajnikant.bhojani@xilinx.com>");
diff --git a/drivers/iio/adc/xilinx-ams.h b/drivers/iio/adc/xilinx-ams.h
new file mode 100644
index 000000000000..3d900a9df82f
--- /dev/null
+++ b/drivers/iio/adc/xilinx-ams.h
@@ -0,0 +1,278 @@
+#ifndef __XILINX_AMS_H__
+#define __XILINX_AMS_H__
+
+#define AMS_MISC_CTRL 0x000
+#define AMS_ISR_0 0x010
+#define AMS_ISR_1 0x014
+#define AMS_IMR_0 0x018
+#define AMS_IMR_1 0x01c
+#define AMS_IER_0 0x020
+#define AMS_IER_1 0x024
+#define AMS_IDR_0 0x028
+#define AMS_IDR_1 0x02c
+#define AMS_PS_CSTS 0x040
+#define AMS_PL_CSTS 0x044
+#define AMS_MON_CSTS 0x050
+
+#define AMS_VCC_PSPLL0 0x060
+#define AMS_VCC_PSPLL3 0x06C
+#define AMS_VCCINT 0x078
+#define AMS_VCCBRAM 0x07C
+#define AMS_VCCAUX 0x080
+#define AMS_PSDDRPLL 0x084
+#define AMS_PSINTFPDDR 0x09C
+
+#define AMS_VCC_PSPLL0_CH 48
+#define AMS_VCC_PSPLL3_CH 51
+#define AMS_VCCINT_CH 54
+#define AMS_VCCBRAM_CH 55
+#define AMS_VCCAUX_CH 56
+#define AMS_PSDDRPLL_CH 57
+#define AMS_PSINTFPDDR_CH 63
+
+#define AMS_REG_CONFIG0 0x100
+#define AMS_REG_CONFIG1 0x104
+#define AMS_REG_CONFIG2 0x108
+#define AMS_REG_CONFIG3 0x10C
+#define AMS_REG_CONFIG4 0x110
+#define AMS_REG_SEQ_CH0 0x120
+#define AMS_REG_SEQ_CH1 0x124
+#define AMS_REG_SEQ_CH2 0x118
+
+#define AMS_TEMP 0x000
+#define AMS_SUPPLY1 0x004
+#define AMS_SUPPLY2 0x008
+#define AMS_VP_VN 0x00c
+#define AMS_VREFP 0x010
+#define AMS_VREFN 0x014
+#define AMS_SUPPLY3 0x018
+#define AMS_SUPPLY4 0x034
+#define AMS_SUPPLY5 0x038
+#define AMS_SUPPLY6 0x03c
+#define AMS_SUPPLY7 0x200
+#define AMS_SUPPLY8 0x204
+#define AMS_SUPPLY9 0x208
+#define AMS_SUPPLY10 0x20c
+#define AMS_VCCAMS 0x210
+#define AMS_TEMP_REMOTE 0x214
+
+#define AMS_REG_VAUX(x) (0x40 + (4*(x)))
+#define AMS_REG_VUSER(x) (0x200 + (4*(x)))
+
+#define AMS_PS_RESET_VALUE 0xFFFFU
+#define AMS_PL_RESET_VALUE 0xFFFFU
+
+#define AMS_CONF0_CHANNEL_NUM_MASK (0x3f << 0)
+
+#define AMS_CONF1_SEQ_MASK (0xf << 12)
+#define AMS_CONF1_SEQ_DEFAULT (0 << 12)
+#define AMS_CONF1_SEQ_SINGLE_PASS (1 << 12)
+#define AMS_CONF1_SEQ_CONTINUOUS (2 << 12)
+#define AMS_CONF1_SEQ_SINGLE_CHANNEL (3 << 12)
+
+#define AMS_REG_SEQ0_MASK 0xFFFF
+#define AMS_REG_SEQ2_MASK 0x3F
+#define AMS_REG_SEQ1_MASK 0xFFFF
+#define AMS_REG_SEQ2_MASK_SHIFT 16
+#define AMS_REG_SEQ1_MASK_SHIFT 22
+
+#define AMS_REGCFG1_ALARM_MASK 0xF0F
+#define AMS_REGCFG3_ALARM_MASK 0x3F
+
+#define AMS_ALARM_TEMP 0x140
+#define AMS_ALARM_SUPPLY1 0x144
+#define AMS_ALARM_SUPPLY2 0x148
+#define AMS_ALARM_OT 0x14c
+
+#define AMS_ALARM_SUPPLY3 0x160
+#define AMS_ALARM_SUPPLY4 0x164
+#define AMS_ALARM_SUPPLY5 0x168
+#define AMS_ALARM_SUPPLY6 0x16c
+#define AMS_ALARM_SUPPLY7 0x180
+#define AMS_ALARM_SUPPLY8 0x184
+#define AMS_ALARM_SUPPLY9 0x188
+#define AMS_ALARM_SUPPLY10 0x18c
+#define AMS_ALARM_VCCAMS 0x190
+#define AMS_ALARM_TEMP_REMOTE 0x194
+#define AMS_ALARM_THRESOLD_OFF_10 0x10
+#define AMS_ALARM_THRESOLD_OFF_20 0x20
+
+#define AMS_ALARM_THR_DIRECT_MASK 0x01
+#define AMS_ALARM_THR_MIN 0x0000
+#define AMS_ALARM_THR_MAX 0xffff
+
+#define AMS_NO_OF_ALARMS 32
+#define AMS_PL_ALARM_START 16
+#define AMS_ISR0_ALARM_MASK 0xFFFFFFFFU
+#define AMS_ISR1_ALARM_MASK 0xE000001FU
+#define AMS_ISR1_INTR_MASK_SHIFT 32
+#define AMS_ISR0_ALARM_2_TO_0_MASK 0x07
+#define AMS_ISR0_ALARM_6_TO_3_MASK 0x78
+#define AMS_ISR0_ALARM_12_TO_7_MASK 0x3F
+#define AMS_CONF1_ALARM_2_TO_0_SHIFT 1
+#define AMS_CONF1_ALARM_6_TO_3_SHIFT 5
+#define AMS_CONF3_ALARM_12_TO_7_SHIFT 8
+
+#define AMS_PS_CSTS_PS_READY 0x08010000U
+#define AMS_PL_CSTS_ACCESS_MASK 0x00000001U
+
+#define AMS_PL_MAX_FIXED_CHANNEL 10
+#define AMS_PL_MAX_EXT_CHANNEL 20
+
+#define AMS_INIT_TIMEOUT 10000
+
+/* Following scale and offset value is derivef from
+ * UG580 (v1.7) December 20, 2016
+ */
+#define AMS_SUPPLY_SCALE_1VOLT 1000
+#define AMS_SUPPLY_SCALE_3VOLT 3000
+#define AMS_SUPPLY_SCALE_6VOLT 6000
+#define AMS_SUPPLY_SCALE_DIV_BIT 16
+
+#define AMS_TEMP_SCALE 509314
+#define AMS_TEMP_SCALE_DIV_BIT 16
+#define AMS_TEMP_OFFSET -((280230L << 16) / 509314)
+
+enum ams_alarm_bit {
+ AMS_ALARM_BIT_TEMP,
+ AMS_ALARM_BIT_SUPPLY1,
+ AMS_ALARM_BIT_SUPPLY2,
+ AMS_ALARM_BIT_SUPPLY3,
+ AMS_ALARM_BIT_SUPPLY4,
+ AMS_ALARM_BIT_SUPPLY5,
+ AMS_ALARM_BIT_SUPPLY6,
+ AMS_ALARM_BIT_RESERVED,
+ AMS_ALARM_BIT_SUPPLY7,
+ AMS_ALARM_BIT_SUPPLY8,
+ AMS_ALARM_BIT_SUPPLY9,
+ AMS_ALARM_BIT_SUPPLY10,
+ AMS_ALARM_BIT_VCCAMS,
+ AMS_ALARM_BIT_TEMP_REMOTE
+};
+
+enum ams_seq {
+ AMS_SEQ_VCC_PSPLL,
+ AMS_SEQ_VCC_PSBATT,
+ AMS_SEQ_VCCINT,
+ AMS_SEQ_VCCBRAM,
+ AMS_SEQ_VCCAUX,
+ AMS_SEQ_PSDDRPLL,
+ AMS_SEQ_INTDDR
+};
+
+enum ams_ps_pl_seq {
+ AMS_SEQ_CALIB,
+ AMS_SEQ_RSVD_1,
+ AMS_SEQ_RSVD_2,
+ AMS_SEQ_TEST,
+ AMS_SEQ_RSVD_4,
+ AMS_SEQ_SUPPLY4,
+ AMS_SEQ_SUPPLY5,
+ AMS_SEQ_SUPPLY6,
+ AMS_SEQ_TEMP,
+ AMS_SEQ_SUPPLY2,
+ AMS_SEQ_SUPPLY1,
+ AMS_SEQ_VP_VN,
+ AMS_SEQ_VREFP,
+ AMS_SEQ_VREFN,
+ AMS_SEQ_SUPPLY3,
+ AMS_SEQ_CURRENT_MON,
+ AMS_SEQ_SUPPLY7,
+ AMS_SEQ_SUPPLY8,
+ AMS_SEQ_SUPPLY9,
+ AMS_SEQ_SUPPLY10,
+ AMS_SEQ_VCCAMS,
+ AMS_SEQ_TEMP_REMOTE,
+ AMS_SEQ_MAX
+};
+
+#define AMS_SEQ(x) (AMS_SEQ_MAX + (x))
+#define AMS_VAUX_SEQ(x) (AMS_SEQ_MAX + (x))
+
+#define PS_SEQ_MAX AMS_SEQ_MAX
+#define PS_SEQ(x) (x)
+#define PL_SEQ(x) (PS_SEQ_MAX + x)
+
+#define AMS_CHAN_TEMP(_scan_index, _addr, _ext) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = ams_temp_events, \
+ .num_event_specs = ARRAY_SIZE(ams_temp_events), \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+#define AMS_CHAN_VOLTAGE(_scan_index, _addr, _ext, _alarm) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = (_alarm) ? ams_voltage_events : NULL, \
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .shift = 6, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+#define AMS_PS_CHAN_TEMP(_scan_index, _addr, _ext) \
+ AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr, _ext)
+#define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr, _ext) \
+ AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, _ext, true)
+
+#define AMS_PL_CHAN_TEMP(_scan_index, _addr, _ext) \
+ AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr, _ext)
+#define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _ext, _alarm) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _ext, _alarm)
+#define AMS_PL_AUX_CHAN_VOLTAGE(_auxno, _ext) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_VAUX_SEQ(_auxno)), \
+ AMS_REG_VAUX(_auxno), _ext, false)
+#define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr, _ext) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_VAUX_SEQ(AMS_SEQ(_scan_index))), \
+ _addr, _ext, false)
+
+struct ams {
+ void __iomem *base;
+ void __iomem *ps_base;
+ void __iomem *pl_base;
+ struct clk *clk;
+ struct device *dev;
+
+ struct mutex mutex;
+ spinlock_t lock;
+
+ unsigned int alarm_mask;
+ unsigned int masked_alarm;
+ u64 intr_mask;
+ int irq;
+
+ struct delayed_work ams_unmask_work;
+ const struct ams_pl_bus_ops *pl_bus;
+};
+
+struct ams_pl_bus_ops {
+ void (*read)(struct ams *ams, unsigned int offset, unsigned int *data);
+ void (*write)(struct ams *ams, unsigned int offset, unsigned int data);
+ void (*update)(struct ams *ams, unsigned int offset, u32 mask,
+ u32 data);
+};
+
+#endif /* __XILINX_AMS_H__ */
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 3f0b88b13dd3..a8571806ccc2 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -94,6 +94,9 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_AXI_REG_IPIER 0x68
#define XADC_AXI_ADC_REG_OFFSET 0x200
+/* AXI sysmon offset */
+#define XADC_AXI_SYSMON_REG_OFFSET 0x400
+
#define XADC_AXI_RESET_MAGIC 0xa
#define XADC_AXI_GIER_ENABLE BIT(31)
@@ -468,6 +471,26 @@ static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
return 0;
}
+/* AXI sysmon read/write methods */
+static int xadc_axi_read_sysmon_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t val32;
+
+ xadc_read_reg(xadc, XADC_AXI_SYSMON_REG_OFFSET + reg * 4, &val32);
+ *val = val32 & 0xffff;
+
+ return 0;
+}
+
+static int xadc_axi_write_sysmon_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ xadc_write_reg(xadc, XADC_AXI_SYSMON_REG_OFFSET + reg * 4, val);
+
+ return 0;
+}
+
static int xadc_axi_setup(struct platform_device *pdev,
struct iio_dev *indio_dev, int irq)
{
@@ -551,6 +574,17 @@ static const struct xadc_ops xadc_axi_ops = {
.flags = XADC_FLAGS_BUFFERED,
};
+/* AXI sysmon */
+static const struct xadc_ops sysmon_axi_ops = {
+ .read = xadc_axi_read_sysmon_reg,
+ .write = xadc_axi_write_sysmon_reg,
+ .setup = xadc_axi_setup,
+ .get_dclk_rate = xadc_axi_get_dclk,
+ .update_alarm = xadc_axi_update_alarm,
+ .interrupt_handler = xadc_axi_interrupt_handler,
+ .flags = XADC_FLAGS_BUFFERED,
+};
+
static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
uint16_t mask, uint16_t val)
{
@@ -1055,23 +1089,23 @@ static const struct iio_chan_spec xadc_channels[] = {
XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
- XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
- XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
- XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
- XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
- XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
- XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
- XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
- XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
- XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
- XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
- XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
- XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
- XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
- XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
- XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
- XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
- XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+ XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, "vpvn", false),
+ XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), "vaux0", false),
+ XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), "vaux1", false),
+ XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), "vaux2", false),
+ XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), "vaux3", false),
+ XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), "vaux4", false),
+ XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), "vaux5", false),
+ XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), "vaux6", false),
+ XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), "vaux7", false),
+ XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), "vaux8", false),
+ XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), "vaux9", false),
+ XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), "vaux10", false),
+ XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), "vaux11", false),
+ XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), "vaux12", false),
+ XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), "vaux13", false),
+ XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), "vaux14", false),
+ XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), "vaux15", false),
};
static const struct iio_info xadc_info = {
@@ -1087,6 +1121,7 @@ static const struct iio_info xadc_info = {
static const struct of_device_id xadc_of_match_table[] = {
{ .compatible = "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops },
{ .compatible = "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops },
+ { .compatible = "xlnx,axi-sysmon-1.3", (void *)&sysmon_axi_ops},
{ },
};
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
@@ -1095,7 +1130,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
unsigned int *conf)
{
struct xadc *xadc = iio_priv(indio_dev);
- struct iio_chan_spec *channels, *chan;
+ struct iio_chan_spec *iio_xadc_channels;
struct device_node *chan_node, *child;
unsigned int num_channels;
const char *external_mux;
@@ -1138,12 +1173,12 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
*conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
}
- channels = kmemdup(xadc_channels, sizeof(xadc_channels), GFP_KERNEL);
- if (!channels)
+ iio_xadc_channels = kmemdup(xadc_channels, sizeof(xadc_channels),
+ GFP_KERNEL);
+ if (!iio_xadc_channels)
return -ENOMEM;
num_channels = 9;
- chan = &channels[9];
chan_node = of_get_child_by_name(np, "xlnx,channels");
if (chan_node) {
@@ -1157,28 +1192,24 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
if (ret || reg > 16)
continue;
+ iio_xadc_channels[num_channels] = xadc_channels[reg + 9];
+ iio_xadc_channels[num_channels].channel = num_channels - 1;
+
if (of_property_read_bool(child, "xlnx,bipolar"))
- chan->scan_type.sign = 's';
-
- if (reg == 0) {
- chan->scan_index = 11;
- chan->address = XADC_REG_VPVN;
- } else {
- chan->scan_index = 15 + reg;
- chan->address = XADC_REG_VAUX(reg - 1);
- }
+ iio_xadc_channels[num_channels].scan_type.sign = 's';
+
num_channels++;
- chan++;
}
}
of_node_put(chan_node);
indio_dev->num_channels = num_channels;
- indio_dev->channels = krealloc(channels, sizeof(*channels) *
- num_channels, GFP_KERNEL);
+ indio_dev->channels = krealloc(iio_xadc_channels,
+ sizeof(*iio_xadc_channels) *
+ num_channels, GFP_KERNEL);
/* If we can't resize the channels array, just use the original */
if (!indio_dev->channels)
- indio_dev->channels = channels;
+ indio_dev->channels = iio_xadc_channels;
return 0;
}
@@ -1278,14 +1309,14 @@ static int xadc_probe(struct platform_device *pdev)
}
}
- ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
- dev_name(&pdev->dev), indio_dev);
+ ret = devm_request_irq(&pdev->dev, irq, xadc->ops->interrupt_handler, 0,
+ dev_name(&pdev->dev), indio_dev);
if (ret)
goto err_clk_disable_unprepare;
ret = xadc->ops->setup(pdev, indio_dev, xadc->irq);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
for (i = 0; i < 16; i++)
xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
@@ -1293,7 +1324,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
bipolar_mask = 0;
for (i = 0; i < indio_dev->num_channels; i++) {
@@ -1303,17 +1334,17 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
bipolar_mask >> 16);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
/* Disable all alarms */
ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
XADC_CONF1_ALARM_MASK);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
/* Set thresholds to min/max */
for (i = 0; i < 16; i++) {
@@ -1328,7 +1359,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
xadc->threshold[i]);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
}
/* Go to non-buffered mode */
@@ -1336,16 +1367,14 @@ static int xadc_probe(struct platform_device *pdev)
ret = iio_device_register(indio_dev);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
platform_set_drvdata(pdev, indio_dev);
return 0;
-err_free_irq:
- free_irq(xadc->irq, indio_dev);
- cancel_delayed_work_sync(&xadc->zynq_unmask_work);
err_clk_disable_unprepare:
+ cancel_delayed_work_sync(&xadc->zynq_unmask_work);
clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
@@ -1373,7 +1402,6 @@ static int xadc_remove(struct platform_device *pdev)
iio_trigger_free(xadc->convst_trigger);
iio_triggered_buffer_cleanup(indio_dev);
}
- free_irq(xadc->irq, indio_dev);
cancel_delayed_work_sync(&xadc->zynq_unmask_work);
clk_disable_unprepare(xadc->clk);
kfree(xadc->data);
diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
new file mode 100644
index 000000000000..1f598670e84f
--- /dev/null
+++ b/drivers/iio/addac/Kconfig
@@ -0,0 +1,24 @@
+#
+# ADC DAC drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Analog to digital and digital to analog converters"
+
+config STX104
+ tristate "Apex Embedded Systems STX104 driver"
+ depends on PC104 && X86
+ select ISA_BUS_API
+ select GPIOLIB
+ help
+ Say yes here to build support for the Apex Embedded Systems STX104
+ integrated analog PC/104 card.
+
+ This driver supports the 16 channels of single-ended (8 channels of
+ differential) analog inputs, 2 channels of analog output, 4 digital
+ inputs, and 4 digital outputs provided by the STX104.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
+endmenu
diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile
new file mode 100644
index 000000000000..862914523354
--- /dev/null
+++ b/drivers/iio/addac/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for industrial I/O ADDAC drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_STX104) += stx104.o
diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/addac/stx104.c
index f87bbc711ccc..8237ae4263cb 100644
--- a/drivers/iio/adc/stx104.c
+++ b/drivers/iio/addac/stx104.c
@@ -15,7 +15,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
#define STX104_OUT_CHAN(chan) { \
.type = IIO_VOLTAGE, \
@@ -45,13 +47,37 @@ module_param_hw_array(base, uint, ioport, &num_stx104, 0);
MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
/**
+ * struct stx104_reg - device register structure
+ * @ssr_ad: Software Strobe Register and ADC Data
+ * @achan: ADC Channel
+ * @dio: Digital I/O
+ * @dac: DAC Channels
+ * @cir_asr: Clear Interrupts and ADC Status
+ * @acr: ADC Control
+ * @pccr_fsh: Pacer Clock Control and FIFO Status MSB
+ * @acfg: ADC Configuration
+ */
+struct stx104_reg {
+ u16 ssr_ad;
+ u8 achan;
+ u8 dio;
+ u16 dac[2];
+ u8 cir_asr;
+ u8 acr;
+ u8 pccr_fsh;
+ u8 acfg;
+};
+
+/**
* struct stx104_iio - IIO device private data structure
+ * @lock: synchronization lock to prevent I/O race conditions
* @chan_out_states: channels' output states
- * @base: base port address of the IIO device
+ * @reg: I/O address offset for the device registers
*/
struct stx104_iio {
+ struct mutex lock;
unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
- unsigned int base;
+ struct stx104_reg __iomem *reg;
};
/**
@@ -64,7 +90,7 @@ struct stx104_iio {
struct stx104_gpio {
struct gpio_chip chip;
spinlock_t lock;
- unsigned int base;
+ u8 __iomem *base;
unsigned int out_state;
};
@@ -72,6 +98,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
struct stx104_iio *const priv = iio_priv(indio_dev);
+ struct stx104_reg __iomem *const reg = priv->reg;
unsigned int adc_config;
int adbu;
int gain;
@@ -79,7 +106,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_HARDWAREGAIN:
/* get gain configuration */
- adc_config = inb(priv->base + 11);
+ adc_config = ioread8(&reg->acfg);
gain = adc_config & 0x3;
*val = 1 << gain;
@@ -90,25 +117,31 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
+ mutex_lock(&priv->lock);
+
/* select ADC channel */
- outb(chan->channel | (chan->channel << 4), priv->base + 2);
+ iowrite8(chan->channel | (chan->channel << 4), &reg->achan);
+
+ /* trigger ADC sample capture by writing to the 8-bit
+ * Software Strobe Register and wait for completion
+ */
+ iowrite8(0, &reg->ssr_ad);
+ while (ioread8(&reg->cir_asr) & BIT(7));
- /* trigger ADC sample capture and wait for completion */
- outb(0, priv->base);
- while (inb(priv->base + 8) & BIT(7));
+ *val = ioread16(&reg->ssr_ad);
- *val = inw(priv->base);
+ mutex_unlock(&priv->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
/* get ADC bipolar/unipolar configuration */
- adc_config = inb(priv->base + 11);
+ adc_config = ioread8(&reg->acfg);
adbu = !(adc_config & BIT(2));
*val = -32768 * adbu;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
/* get ADC bipolar/unipolar and gain configuration */
- adc_config = inb(priv->base + 11);
+ adc_config = ioread8(&reg->acfg);
adbu = !(adc_config & BIT(2));
gain = adc_config & 0x3;
@@ -130,16 +163,16 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
/* Only four gain states (x1, x2, x4, x8) */
switch (val) {
case 1:
- outb(0, priv->base + 11);
+ iowrite8(0, &priv->reg->acfg);
break;
case 2:
- outb(1, priv->base + 11);
+ iowrite8(1, &priv->reg->acfg);
break;
case 4:
- outb(2, priv->base + 11);
+ iowrite8(2, &priv->reg->acfg);
break;
case 8:
- outb(3, priv->base + 11);
+ iowrite8(3, &priv->reg->acfg);
break;
default:
return -EINVAL;
@@ -152,9 +185,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
if ((unsigned int)val > 65535)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
priv->chan_out_states[chan->channel] = val;
- outw(val, priv->base + 4 + 2 * chan->channel);
+ iowrite16(val, &priv->reg->dac[chan->channel]);
+ mutex_unlock(&priv->lock);
return 0;
}
return -EINVAL;
@@ -222,7 +258,7 @@ static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
if (offset >= 4)
return -EINVAL;
- return !!(inb(stx104gpio->base) & BIT(offset));
+ return !!(ioread8(stx104gpio->base) & BIT(offset));
}
static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
@@ -230,7 +266,7 @@ static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
{
struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
- *bits = inb(stx104gpio->base);
+ *bits = ioread8(stx104gpio->base);
return 0;
}
@@ -252,7 +288,7 @@ static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
else
stx104gpio->out_state &= ~mask;
- outb(stx104gpio->out_state, stx104gpio->base);
+ iowrite8(stx104gpio->out_state, stx104gpio->base);
spin_unlock_irqrestore(&stx104gpio->lock, flags);
}
@@ -279,7 +315,7 @@ static void stx104_gpio_set_multiple(struct gpio_chip *chip,
stx104gpio->out_state &= ~*mask;
stx104gpio->out_state |= *mask & *bits;
- outb(stx104gpio->out_state, stx104gpio->base);
+ iowrite8(stx104gpio->out_state, stx104gpio->base);
spin_unlock_irqrestore(&stx104gpio->lock, flags);
}
@@ -306,11 +342,16 @@ static int stx104_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
+ priv = iio_priv(indio_dev);
+ priv->reg = devm_ioport_map(dev, base[id], STX104_EXTENT);
+ if (!priv->reg)
+ return -ENOMEM;
+
indio_dev->info = &stx104_info;
indio_dev->modes = INDIO_DIRECT_MODE;
/* determine if differential inputs */
- if (inb(base[id] + 8) & BIT(5)) {
+ if (ioread8(&priv->reg->cir_asr) & BIT(5)) {
indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff);
indio_dev->channels = stx104_channels_diff;
} else {
@@ -321,18 +362,17 @@ static int stx104_probe(struct device *dev, unsigned int id)
indio_dev->name = dev_name(dev);
indio_dev->dev.parent = dev;
- priv = iio_priv(indio_dev);
- priv->base = base[id];
+ mutex_init(&priv->lock);
/* configure device for software trigger operation */
- outb(0, base[id] + 9);
+ iowrite8(0, &priv->reg->acr);
/* initialize gain setting to x1 */
- outb(0, base[id] + 11);
+ iowrite8(0, &priv->reg->acfg);
/* initialize DAC output to 0V */
- outw(0, base[id] + 4);
- outw(0, base[id] + 6);
+ iowrite16(0, &priv->reg->dac[0]);
+ iowrite16(0, &priv->reg->dac[1]);
stx104gpio->chip.label = dev_name(dev);
stx104gpio->chip.parent = dev;
@@ -347,7 +387,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
stx104gpio->chip.get_multiple = stx104_gpio_get_multiple;
stx104gpio->chip.set = stx104_gpio_set;
stx104gpio->chip.set_multiple = stx104_gpio_set_multiple;
- stx104gpio->base = base[id] + 3;
+ stx104gpio->base = &priv->reg->dio;
stx104gpio->out_state = 0x0;
spin_lock_init(&stx104gpio->lock);
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index b4f394f05863..44553eccb30d 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -99,7 +99,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
platform_set_drvdata(pdev, indio_dev);
state->ec = ec->ec_dev;
- state->msg = devm_kzalloc(&pdev->dev,
+ state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) +
max((u16)sizeof(struct ec_params_motion_sense),
state->ec->max_response), GFP_KERNEL);
if (!state->msg)
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
index b52cba1b3c83..3f01ebeda242 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
@@ -15,8 +15,8 @@
/* Conversion times in us */
static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000,
13000, 7000 };
-static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000,
- 5000, 8000 };
+static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000,
+ 3000, 8000 };
static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100,
4100, 8220, 16440 };
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 1369fa1d2f0e..cd5c30d03d48 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
obj-$(CONFIG_AD5592R) += ad5592r.o
obj-$(CONFIG_AD5593R) += ad5593r.o
obj-$(CONFIG_AD5755) += ad5755.o
-obj-$(CONFIG_AD5755) += ad5758.o
+obj-$(CONFIG_AD5758) += ad5758.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 44ea3b8117d0..cd5d9ce5fcae 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -14,6 +14,8 @@
#include <linux/of.h>
#include <linux/acpi.h>
+#include <asm/unaligned.h>
+
#define AD5593R_MODE_CONF (0 << 4)
#define AD5593R_MODE_DAC_WRITE (1 << 4)
#define AD5593R_MODE_ADC_READBACK (4 << 4)
@@ -21,6 +23,24 @@
#define AD5593R_MODE_GPIO_READBACK (6 << 4)
#define AD5593R_MODE_REG_READBACK (7 << 4)
+static int ad5593r_read_word(struct i2c_client *i2c, u8 reg, u16 *value)
+{
+ int ret;
+ u8 buf[2];
+
+ ret = i2c_smbus_write_byte(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_recv(i2c, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ *value = get_unaligned_be16(buf);
+
+ return 0;
+}
+
static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
@@ -39,13 +59,7 @@ static int ad5593r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
if (val < 0)
return (int) val;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK);
- if (val < 0)
- return (int) val;
-
- *value = (u16) val;
-
- return 0;
+ return ad5593r_read_word(i2c, AD5593R_MODE_ADC_READBACK, value);
}
static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
@@ -59,25 +73,19 @@ static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
- s32 val;
-
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg);
- if (val < 0)
- return (int) val;
- *value = (u16) val;
-
- return 0;
+ return ad5593r_read_word(i2c, AD5593R_MODE_REG_READBACK | reg, value);
}
static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
- s32 val;
+ u16 val;
+ int ret;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK);
- if (val < 0)
- return (int) val;
+ ret = ad5593r_read_word(i2c, AD5593R_MODE_GPIO_READBACK, &val);
+ if (ret)
+ return ret;
*value = (u8) val;
diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
index 81677795e57a..080a3721874d 100644
--- a/drivers/iio/dac/cio-dac.c
+++ b/drivers/iio/dac/cio-dac.c
@@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
- /* DAC can only accept up to a 16-bit value */
- if ((unsigned int)val > 65535)
+ /* DAC can only accept up to a 12-bit value */
+ if ((unsigned int)val > 4095)
return -EINVAL;
priv->chan_out_states[chan->channel] = val;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index ed455e801e80..4ed7e90d37f9 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -47,12 +47,18 @@ static int __maybe_unused mcp4725_suspend(struct device *dev)
struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
u8 outbuf[2];
+ int ret;
outbuf[0] = (data->powerdown_mode + 1) << 4;
outbuf[1] = 0;
data->powerdown = true;
- return i2c_master_send(data->client, outbuf, 2);
+ ret = i2c_master_send(data->client, outbuf, 2);
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+ return 0;
}
static int __maybe_unused mcp4725_resume(struct device *dev)
@@ -60,13 +66,19 @@ static int __maybe_unused mcp4725_resume(struct device *dev)
struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
u8 outbuf[2];
+ int ret;
/* restore previous DAC value */
outbuf[0] = (data->dac_value >> 8) & 0xf;
outbuf[1] = data->dac_value & 0xff;
data->powerdown = false;
- return i2c_master_send(data->client, outbuf, 2);
+ ret = i2c_master_send(data->client, outbuf, 2);
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+ return 0;
}
static SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend, mcp4725_resume);
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 29104656a537..3fdaf3104cce 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -245,14 +245,14 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4403_data *afe = iio_priv(indio_dev);
- unsigned int reg = afe4403_channel_values[chan->address];
- unsigned int field = afe4403_channel_leds[chan->address];
+ unsigned int reg, field;
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ reg = afe4403_channel_values[chan->address];
ret = afe4403_read(afe, reg, val);
if (ret)
return ret;
@@ -262,6 +262,7 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ field = afe4403_channel_leds[chan->address];
ret = regmap_field_read(afe->fields[field], val);
if (ret)
return ret;
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index cebb1fd4d0b1..7780e9b312b3 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -250,20 +250,20 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
- unsigned int value_reg = afe4404_channel_values[chan->address];
- unsigned int led_field = afe4404_channel_leds[chan->address];
- unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
+ unsigned int value_reg, led_field, offdac_field;
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ value_reg = afe4404_channel_values[chan->address];
ret = regmap_read(afe->regmap, value_reg, val);
if (ret)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
+ offdac_field = afe4404_channel_offdacs[chan->address];
ret = regmap_field_read(afe->fields[offdac_field], val);
if (ret)
return ret;
@@ -273,6 +273,7 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ led_field = afe4404_channel_leds[chan->address];
ret = regmap_field_read(afe->fields[led_field], val);
if (ret)
return ret;
@@ -295,19 +296,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
- unsigned int led_field = afe4404_channel_leds[chan->address];
- unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
+ unsigned int led_field, offdac_field;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_OFFSET:
+ offdac_field = afe4404_channel_offdacs[chan->address];
return regmap_field_write(afe->fields[offdac_field], val);
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ led_field = afe4404_channel_leds[chan->address];
return regmap_field_write(afe->fields[led_field], val);
}
break;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 2261c6c4ac65..87de2a05c711 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -501,13 +501,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
chan->channel2, val);
mutex_unlock(&st->lock);
- return IIO_VAL_INT;
+ return ret;
case IIO_ACCEL:
mutex_lock(&st->lock);
ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
chan->channel2, val);
mutex_unlock(&st->lock);
- return IIO_VAL_INT;
+ return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c
index 9ae793a70b8b..a7714d32a641 100644
--- a/drivers/iio/industrialio-sw-trigger.c
+++ b/drivers/iio/industrialio-sw-trigger.c
@@ -58,8 +58,12 @@ int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t)
t->group = configfs_register_default_group(iio_triggers_group, t->name,
&iio_trigger_type_group_type);
- if (IS_ERR(t->group))
+ if (IS_ERR(t->group)) {
+ mutex_lock(&iio_trigger_types_lock);
+ list_del(&t->list);
+ mutex_unlock(&iio_trigger_types_lock);
ret = PTR_ERR(t->group);
+ }
return ret;
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index ca0fe902a7db..d00f3045557c 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -136,9 +136,10 @@ static int __of_iio_channel_get(struct iio_channel *channel,
idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
iio_dev_node_match);
- of_node_put(iiospec.np);
- if (idev == NULL)
+ if (idev == NULL) {
+ of_node_put(iiospec.np);
return -EPROBE_DEFER;
+ }
indio_dev = dev_to_iio_dev(idev);
channel->indio_dev = indio_dev;
@@ -146,6 +147,7 @@ static int __of_iio_channel_get(struct iio_channel *channel,
index = indio_dev->info->of_xlate(indio_dev, &iiospec);
else
index = __of_iio_simple_xlate(indio_dev, &iiospec);
+ of_node_put(iiospec.np);
if (index < 0)
goto err_put;
channel->channel = &indio_dev->channels[index];
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 42b64b85d06c..83885f6c93cd 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -239,6 +239,8 @@ config RPR0521
tristate "ROHM RPR0521 ALS and proximity sensor driver"
depends on I2C
select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build support for ROHM's RPR0521
ambient light and proximity sensor device.
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index c5dfb9a6b5a1..2403911fdbd9 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -55,9 +55,6 @@
#define APDS9960_REG_CONTROL_PGAIN_MASK_SHIFT 2
#define APDS9960_REG_CONFIG_2 0x90
-#define APDS9960_REG_CONFIG_2_GGAIN_MASK 0x60
-#define APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT 5
-
#define APDS9960_REG_ID 0x92
#define APDS9960_REG_STATUS 0x93
@@ -78,6 +75,9 @@
#define APDS9960_REG_GCONF_1_GFIFO_THRES_MASK_SHIFT 6
#define APDS9960_REG_GCONF_2 0xa3
+#define APDS9960_REG_GCONF_2_GGAIN_MASK 0x60
+#define APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT 5
+
#define APDS9960_REG_GOFFSET_U 0xa4
#define APDS9960_REG_GOFFSET_D 0xa5
#define APDS9960_REG_GPULSE 0xa6
@@ -397,9 +397,9 @@ static int apds9960_set_pxs_gain(struct apds9960_data *data, int val)
}
ret = regmap_update_bits(data->regmap,
- APDS9960_REG_CONFIG_2,
- APDS9960_REG_CONFIG_2_GGAIN_MASK,
- idx << APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT);
+ APDS9960_REG_GCONF_2,
+ APDS9960_REG_GCONF_2_GGAIN_MASK,
+ idx << APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT);
if (!ret)
data->pxs_gain = idx;
mutex_unlock(&data->lock);
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 4d220c835c75..14e150c52dfa 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -628,7 +628,7 @@ static int isl29028_probe(struct i2c_client *client,
ISL29028_POWER_OFF_DELAY_MS);
pm_runtime_use_autosuspend(&client->dev);
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev,
"%s(): iio registration failed with error %d\n",
diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
index 00ba15499638..5103b1061a77 100644
--- a/drivers/iio/light/max44009.c
+++ b/drivers/iio/light/max44009.c
@@ -529,6 +529,12 @@ static int max44009_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
+static const struct of_device_id max44009_of_match[] = {
+ { .compatible = "maxim,max44009" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max44009_of_match);
+
static const struct i2c_device_id max44009_id[] = {
{ "max44009", 0 },
{ }
@@ -538,18 +544,13 @@ MODULE_DEVICE_TABLE(i2c, max44009_id);
static struct i2c_driver max44009_driver = {
.driver = {
.name = MAX44009_DRV_NAME,
+ .of_match_table = max44009_of_match,
},
.probe = max44009_probe,
.id_table = max44009_id,
};
module_i2c_driver(max44009_driver);
-static const struct of_device_id max44009_of_match[] = {
- { .compatible = "maxim,max44009" },
- { }
-};
-MODULE_DEVICE_TABLE(of, max44009_of_match);
-
MODULE_AUTHOR("Robert Eshleman <bobbyeshleman@gmail.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MAX44009 ambient light sensor driver");
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index fe6001afb7b4..06b98a2e174f 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -857,7 +857,7 @@ static int tsl2583_probe(struct i2c_client *clientp,
TSL2583_POWER_OFF_DELAY_MS);
pm_runtime_use_autosuspend(&clientp->dev);
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&clientp->dev, "%s: iio registration failed\n",
__func__);
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index be37fcbd4654..665a24f63044 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -606,6 +606,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
return -EINVAL;
}
}
+ chip->settings.prox_diode = prox_diode_mask;
return 0;
}
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 234623ab3817..6c83f9e54e2e 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -8,6 +8,7 @@
* TODO: Proximity
*/
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -42,6 +43,7 @@
#define VCNL4035_ALS_PERS_MASK GENMASK(3, 2)
#define VCNL4035_INT_ALS_IF_H_MASK BIT(12)
#define VCNL4035_INT_ALS_IF_L_MASK BIT(13)
+#define VCNL4035_DEV_ID_MASK GENMASK(7, 0)
/* Default values */
#define VCNL4035_MODE_ALS_ENABLE BIT(0)
@@ -415,6 +417,7 @@ static int vcnl4035_init(struct vcnl4035_data *data)
return ret;
}
+ id = FIELD_GET(VCNL4035_DEV_ID_MASK, id);
if (id != VCNL4035_DEV_ID_VAL) {
dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n",
id, VCNL4035_DEV_ID_VAL);
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index f31ff225fe61..489e17e83a4d 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -539,6 +539,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
struct rm3100_data *data;
unsigned int tmp;
int ret;
+ int samp_rate_index;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
@@ -598,9 +599,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp);
if (ret < 0)
return ret;
+
+ samp_rate_index = tmp - RM3100_TMRC_OFFSET;
+ if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) {
+ dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n");
+ return -EINVAL;
+ }
/* Initializing max wait time, which is double conversion time. */
- data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2]
- * 2;
+ data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2;
/* Cycle count values may not be what we want. */
if ((tmp - RM3100_TMRC_OFFSET) == 0)
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 0a95afaa48fe..6d1e7c1deea0 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1113,7 +1113,7 @@ int bmp280_common_probe(struct device *dev,
* however as it happens, the BMP085 shares the chip ID of BMP180
* so we look for an IRQ if we have that.
*/
- if (irq > 0 || (chip_id == BMP180_CHIP_ID)) {
+ if (irq > 0 && (chip_id == BMP180_CHIP_ID)) {
ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
if (ret)
goto out_disable_vdda;
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index 2c1943bbc433..2b2203eea3e9 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -57,8 +57,8 @@
#define DPS310_RESET_MAGIC 0x09
#define DPS310_COEF_BASE 0x10
-/* Make sure sleep time is <= 20ms for usleep_range */
-#define DPS310_POLL_SLEEP_US(t) min(20000, (t) / 8)
+/* Make sure sleep time is <= 30ms for usleep_range */
+#define DPS310_POLL_SLEEP_US(t) min(30000, (t) / 8)
/* Silently handle error in rate value here */
#define DPS310_POLL_TIMEOUT_US(rc) ((rc) <= 0 ? 1000000 : 1000000 / (rc))
@@ -89,6 +89,7 @@ struct dps310_data {
s32 c00, c10, c20, c30, c01, c11, c21;
s32 pressure_raw;
s32 temp_raw;
+ bool timeout_recovery_failed;
};
static const struct iio_chan_spec dps310_channels[] = {
@@ -159,6 +160,102 @@ static int dps310_get_coefs(struct dps310_data *data)
return 0;
}
+/*
+ * Some versions of the chip will read temperatures in the ~60C range when
+ * it's actually ~20C. This is the manufacturer recommended workaround
+ * to correct the issue. The registers used below are undocumented.
+ */
+static int dps310_temp_workaround(struct dps310_data *data)
+{
+ int rc;
+ int reg;
+
+ rc = regmap_read(data->regmap, 0x32, &reg);
+ if (rc)
+ return rc;
+
+ /*
+ * If bit 1 is set then the device is okay, and the workaround does not
+ * need to be applied
+ */
+ if (reg & BIT(1))
+ return 0;
+
+ rc = regmap_write(data->regmap, 0x0e, 0xA5);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x0f, 0x96);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x62, 0x02);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x0e, 0x00);
+ if (rc)
+ return rc;
+
+ return regmap_write(data->regmap, 0x0f, 0x00);
+}
+
+static int dps310_startup(struct dps310_data *data)
+{
+ int rc;
+ int ready;
+
+ /*
+ * Set up pressure sensor in single sample, one measurement per second
+ * mode
+ */
+ rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0);
+ if (rc)
+ return rc;
+
+ /*
+ * Set up external (MEMS) temperature sensor in single sample, one
+ * measurement per second mode
+ */
+ rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT);
+ if (rc)
+ return rc;
+
+ /* Temp and pressure shifts are disabled when PRC <= 8 */
+ rc = regmap_write_bits(data->regmap, DPS310_CFG_REG,
+ DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0);
+ if (rc)
+ return rc;
+
+ /* MEAS_CFG doesn't update correctly unless first written with 0 */
+ rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
+ DPS310_MEAS_CTRL_BITS, 0);
+ if (rc)
+ return rc;
+
+ /* Turn on temperature and pressure measurement in the background */
+ rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
+ DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN |
+ DPS310_TEMP_EN | DPS310_BACKGROUND);
+ if (rc)
+ return rc;
+
+ /*
+ * Calibration coefficients required for reporting temperature.
+ * They are available 40ms after the device has started
+ */
+ rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
+ ready & DPS310_COEF_RDY, 10000, 40000);
+ if (rc)
+ return rc;
+
+ rc = dps310_get_coefs(data);
+ if (rc)
+ return rc;
+
+ return dps310_temp_workaround(data);
+}
+
static int dps310_get_pres_precision(struct dps310_data *data)
{
int rc;
@@ -297,11 +394,69 @@ static int dps310_get_temp_k(struct dps310_data *data)
return scale_factors[ilog2(rc)];
}
+static int dps310_reset_wait(struct dps310_data *data)
+{
+ int rc;
+
+ rc = regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC);
+ if (rc)
+ return rc;
+
+ /* Wait for device chip access: 15ms in specification */
+ usleep_range(15000, 55000);
+ return 0;
+}
+
+static int dps310_reset_reinit(struct dps310_data *data)
+{
+ int rc;
+
+ rc = dps310_reset_wait(data);
+ if (rc)
+ return rc;
+
+ return dps310_startup(data);
+}
+
+static int dps310_ready_status(struct dps310_data *data, int ready_bit, int timeout)
+{
+ int sleep = DPS310_POLL_SLEEP_US(timeout);
+ int ready;
+
+ return regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, ready & ready_bit,
+ sleep, timeout);
+}
+
+static int dps310_ready(struct dps310_data *data, int ready_bit, int timeout)
+{
+ int rc;
+
+ rc = dps310_ready_status(data, ready_bit, timeout);
+ if (rc) {
+ if (rc == -ETIMEDOUT && !data->timeout_recovery_failed) {
+ /* Reset and reinitialize the chip. */
+ if (dps310_reset_reinit(data)) {
+ data->timeout_recovery_failed = true;
+ } else {
+ /* Try again to get sensor ready status. */
+ if (dps310_ready_status(data, ready_bit, timeout))
+ data->timeout_recovery_failed = true;
+ else
+ return 0;
+ }
+ }
+
+ return rc;
+ }
+
+ data->timeout_recovery_failed = false;
+ return 0;
+}
+
static int dps310_read_pres_raw(struct dps310_data *data)
{
int rc;
int rate;
- int ready;
int timeout;
s32 raw;
u8 val[3];
@@ -313,9 +468,7 @@ static int dps310_read_pres_raw(struct dps310_data *data)
timeout = DPS310_POLL_TIMEOUT_US(rate);
/* Poll for sensor readiness; base the timeout upon the sample rate. */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_PRS_RDY,
- DPS310_POLL_SLEEP_US(timeout), timeout);
+ rc = dps310_ready(data, DPS310_PRS_RDY, timeout);
if (rc)
goto done;
@@ -352,7 +505,6 @@ static int dps310_read_temp_raw(struct dps310_data *data)
{
int rc;
int rate;
- int ready;
int timeout;
if (mutex_lock_interruptible(&data->lock))
@@ -362,10 +514,8 @@ static int dps310_read_temp_raw(struct dps310_data *data)
timeout = DPS310_POLL_TIMEOUT_US(rate);
/* Poll for sensor readiness; base the timeout upon the sample rate. */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_TMP_RDY,
- DPS310_POLL_SLEEP_US(timeout), timeout);
- if (rc < 0)
+ rc = dps310_ready(data, DPS310_TMP_RDY, timeout);
+ if (rc)
goto done;
rc = dps310_read_temp_ready(data);
@@ -660,7 +810,7 @@ static void dps310_reset(void *action_data)
{
struct dps310_data *data = action_data;
- regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC);
+ dps310_reset_wait(data);
}
static const struct regmap_config dps310_regmap_config = {
@@ -677,52 +827,12 @@ static const struct iio_info dps310_info = {
.write_raw = dps310_write_raw,
};
-/*
- * Some verions of chip will read temperatures in the ~60C range when
- * its actually ~20C. This is the manufacturer recommended workaround
- * to correct the issue. The registers used below are undocumented.
- */
-static int dps310_temp_workaround(struct dps310_data *data)
-{
- int rc;
- int reg;
-
- rc = regmap_read(data->regmap, 0x32, &reg);
- if (rc < 0)
- return rc;
-
- /*
- * If bit 1 is set then the device is okay, and the workaround does not
- * need to be applied
- */
- if (reg & BIT(1))
- return 0;
-
- rc = regmap_write(data->regmap, 0x0e, 0xA5);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x0f, 0x96);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x62, 0x02);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x0e, 0x00);
- if (rc < 0)
- return rc;
-
- return regmap_write(data->regmap, 0x0f, 0x00);
-}
-
static int dps310_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct dps310_data *data;
struct iio_dev *iio;
- int rc, ready;
+ int rc;
iio = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!iio)
@@ -748,54 +858,8 @@ static int dps310_probe(struct i2c_client *client,
if (rc)
return rc;
- /*
- * Set up pressure sensor in single sample, one measurement per second
- * mode
- */
- rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0);
-
- /*
- * Set up external (MEMS) temperature sensor in single sample, one
- * measurement per second mode
- */
- rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT);
- if (rc < 0)
- return rc;
-
- /* Temp and pressure shifts are disabled when PRC <= 8 */
- rc = regmap_write_bits(data->regmap, DPS310_CFG_REG,
- DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0);
- if (rc < 0)
- return rc;
-
- /* MEAS_CFG doesn't update correctly unless first written with 0 */
- rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
- DPS310_MEAS_CTRL_BITS, 0);
- if (rc < 0)
- return rc;
-
- /* Turn on temperature and pressure measurement in the background */
- rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
- DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN |
- DPS310_TEMP_EN | DPS310_BACKGROUND);
- if (rc < 0)
- return rc;
-
- /*
- * Calibration coefficients required for reporting temperature.
- * They are available 40ms after the device has started
- */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_COEF_RDY, 10000, 40000);
- if (rc < 0)
- return rc;
-
- rc = dps310_get_coefs(data);
- if (rc < 0)
- return rc;
-
- rc = dps310_temp_workaround(data);
- if (rc < 0)
+ rc = dps310_startup(data);
+ if (rc)
return rc;
rc = devm_iio_device_register(&client->dev, iio);
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index bc06271fa38b..5e2d2d4d87b5 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -25,13 +25,6 @@ enum {
MS5607,
};
-struct ms5611_chip_info {
- u16 prom[MS5611_PROM_WORDS_NB];
-
- int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info,
- s32 *temp, s32 *pressure);
-};
-
/*
* OverSampling Rate descriptor.
* Warning: cmd MUST be kept aligned on a word boundary (see
@@ -50,12 +43,15 @@ struct ms5611_state {
const struct ms5611_osr *pressure_osr;
const struct ms5611_osr *temp_osr;
- int (*reset)(struct device *dev);
- int (*read_prom_word)(struct device *dev, int index, u16 *word);
- int (*read_adc_temp_and_pressure)(struct device *dev,
+ u16 prom[MS5611_PROM_WORDS_NB];
+
+ int (*reset)(struct ms5611_state *st);
+ int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word);
+ int (*read_adc_temp_and_pressure)(struct ms5611_state *st,
s32 *temp, s32 *pressure);
- struct ms5611_chip_info *chip_info;
+ int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp,
+ s32 *pressure);
struct regulator *vdd;
};
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index f5db9fa086f3..6943d118752e 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -76,7 +76,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len)
crc = (crc >> 12) & 0x000F;
- return crc_orig != 0x0000 && crc == crc_orig;
+ return crc == crc_orig;
}
static int ms5611_read_prom(struct iio_dev *indio_dev)
@@ -85,8 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
struct ms5611_state *st = iio_priv(indio_dev);
for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
- ret = st->read_prom_word(&indio_dev->dev,
- i, &st->chip_info->prom[i]);
+ ret = st->read_prom_word(st, i, &st->prom[i]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read prom at %d\n", i);
@@ -94,7 +93,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
}
}
- if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) {
+ if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) {
dev_err(&indio_dev->dev, "PROM integrity check failed\n");
return -ENODEV;
}
@@ -108,28 +107,27 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev,
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure);
+ ret = st->read_adc_temp_and_pressure(st, temp, pressure);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read temperature and pressure\n");
return ret;
}
- return st->chip_info->temp_and_pressure_compensate(st->chip_info,
- temp, pressure);
+ return st->compensate_temp_and_pressure(st, temp, pressure);
}
-static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+static int ms5611_temp_and_pressure_compensate(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
s32 t = *temp, p = *pressure;
s64 off, sens, dt;
- dt = t - (chip_info->prom[5] << 8);
- off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7);
- sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8);
+ dt = t - (st->prom[5] << 8);
+ off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7);
+ sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8);
- t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+ t = 2000 + ((st->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2;
@@ -155,17 +153,17 @@ static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf
return 0;
}
-static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+static int ms5607_temp_and_pressure_compensate(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
s32 t = *temp, p = *pressure;
s64 off, sens, dt;
- dt = t - (chip_info->prom[5] << 8);
- off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6);
- sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7);
+ dt = t - (st->prom[5] << 8);
+ off = ((s64)st->prom[2] << 17) + ((st->prom[4] * dt) >> 6);
+ sens = ((s64)st->prom[1] << 16) + ((st->prom[3] * dt) >> 7);
- t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+ t = 2000 + ((st->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2, tmp;
@@ -196,7 +194,7 @@ static int ms5611_reset(struct iio_dev *indio_dev)
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->reset(&indio_dev->dev);
+ ret = st->reset(st);
if (ret < 0) {
dev_err(&indio_dev->dev, "failed to reset device\n");
return ret;
@@ -343,15 +341,6 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
static const unsigned long ms5611_scan_masks[] = {0x3, 0};
-static struct ms5611_chip_info chip_info_tbl[] = {
- [MS5611] = {
- .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
- },
- [MS5607] = {
- .temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate,
- }
-};
-
static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
@@ -434,7 +423,20 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
struct ms5611_state *st = iio_priv(indio_dev);
mutex_init(&st->lock);
- st->chip_info = &chip_info_tbl[type];
+
+ switch (type) {
+ case MS5611:
+ st->compensate_temp_and_pressure =
+ ms5611_temp_and_pressure_compensate;
+ break;
+ case MS5607:
+ st->compensate_temp_and_pressure =
+ ms5607_temp_and_pressure_compensate;
+ break;
+ default:
+ return -EINVAL;
+ }
+
st->temp_osr =
&ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1];
st->pressure_osr =
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 8089c59adce5..3175816e657f 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -18,17 +18,15 @@
#include "ms5611.h"
-static int ms5611_i2c_reset(struct device *dev)
+static int ms5611_i2c_reset(struct ms5611_state *st)
{
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
-
return i2c_smbus_write_byte(st->client, MS5611_RESET);
}
-static int ms5611_i2c_read_prom_word(struct device *dev, int index, u16 *word)
+static int ms5611_i2c_read_prom_word(struct ms5611_state *st, int index,
+ u16 *word)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = i2c_smbus_read_word_swapped(st->client,
MS5611_READ_PROM_WORD + (index << 1));
@@ -55,11 +53,10 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val)
return 0;
}
-static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev,
+static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
ret = i2c_smbus_write_byte(st->client, osr->cmd);
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index b463eaa799ab..6e9cab61bc32 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -13,18 +13,17 @@
#include "ms5611.h"
-static int ms5611_spi_reset(struct device *dev)
+static int ms5611_spi_reset(struct ms5611_state *st)
{
u8 cmd = MS5611_RESET;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
return spi_write_then_read(st->client, &cmd, 1, NULL, 0);
}
-static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
+static int ms5611_spi_read_prom_word(struct ms5611_state *st, int index,
+ u16 *word)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_w8r16be(st->client, MS5611_READ_PROM_WORD + (index << 1));
if (ret < 0)
@@ -35,11 +34,10 @@ static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
return 0;
}
-static int ms5611_spi_read_adc(struct device *dev, s32 *val)
+static int ms5611_spi_read_adc(struct ms5611_state *st, s32 *val)
{
int ret;
u8 buf[3] = { MS5611_READ_ADC };
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_write_then_read(st->client, buf, 1, buf, 3);
if (ret < 0)
@@ -50,11 +48,10 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
return 0;
}
-static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
+static int ms5611_spi_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
/*
@@ -66,7 +63,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
- ret = ms5611_spi_read_adc(dev, temp);
+ ret = ms5611_spi_read_adc(st, temp);
if (ret < 0)
return ret;
@@ -76,7 +73,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
- return ms5611_spi_read_adc(dev, pressure);
+ return ms5611_spi_read_adc(st, pressure);
}
static int ms5611_spi_probe(struct spi_device *spi)
@@ -92,7 +89,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
spi->mode = SPI_MODE_0;
- spi->max_speed_hz = 20000000;
+ spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0)
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 2277d6336ac0..9ed5b9405ade 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -209,9 +209,13 @@ static int iio_sysfs_trigger_remove(int id)
static int __init iio_sysfs_trig_init(void)
{
+ int ret;
device_initialize(&iio_sysfs_trig_dev);
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
- return device_add(&iio_sysfs_trig_dev);
+ ret = device_add(&iio_sysfs_trig_dev);
+ if (ret)
+ put_device(&iio_sysfs_trig_dev);
+ return ret;
}
module_init(iio_sysfs_trig_init);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index de7df5ab06f3..052d15629153 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1434,7 +1434,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
return false;
memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_iif = net_dev->ifindex;
+ fl4.flowi4_oif = net_dev->ifindex;
fl4.daddr = daddr;
fl4.saddr = saddr;
@@ -1719,8 +1719,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req->listen_addr_storage,
- (struct sockaddr *)&req->src_addr_storage)) {
+ (struct sockaddr *)&req->src_addr_storage,
+ (struct sockaddr *)&req->listen_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 726e70b68249..4348adc570a0 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -218,7 +218,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
}
for (i = 0; i < ports_num; i++) {
- char port_str[10];
+ char port_str[11];
ports[i].port_num = i + 1;
snprintf(port_str, sizeof(port_str), "%u", i + 1);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index de66d7da1bf6..c7c3b9bae938 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1700,7 +1700,7 @@ static int assign_client_id(struct ib_client *client)
{
int ret;
- down_write(&clients_rwsem);
+ lockdep_assert_held(&clients_rwsem);
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
@@ -1709,14 +1709,11 @@ static int assign_client_id(struct ib_client *client)
client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
- goto out;
+ return ret;
highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
-
-out:
- up_write(&clients_rwsem);
- return ret;
+ return 0;
}
static void remove_client_id(struct ib_client *client)
@@ -1746,25 +1743,35 @@ int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
unsigned long index;
+ bool need_unreg = false;
int ret;
refcount_set(&client->uses, 1);
init_completion(&client->uses_zero);
+
+ /*
+ * The devices_rwsem is held in write mode to ensure that a racing
+ * ib_register_device() sees a consisent view of clients and devices.
+ */
+ down_write(&devices_rwsem);
+ down_write(&clients_rwsem);
ret = assign_client_id(client);
if (ret)
- return ret;
+ goto out;
- down_read(&devices_rwsem);
+ need_unreg = true;
xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
ret = add_client_context(device, client);
- if (ret) {
- up_read(&devices_rwsem);
- ib_unregister_client(client);
- return ret;
- }
+ if (ret)
+ goto out;
}
- up_read(&devices_rwsem);
- return 0;
+ ret = 0;
+out:
+ up_write(&clients_rwsem);
+ up_write(&devices_rwsem);
+ if (need_unreg && ret)
+ ib_unregister_client(client);
+ return ret;
}
EXPORT_SYMBOL(ib_register_client);
@@ -2762,10 +2769,18 @@ static int __init ib_core_init(void)
nldev_init();
rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
- roce_gid_mgmt_init();
+ ret = roce_gid_mgmt_init();
+ if (ret) {
+ pr_warn("Couldn't init RoCE GID management\n");
+ goto err_parent;
+ }
return 0;
+err_parent:
+ rdma_nl_unregister(RDMA_NL_LS);
+ nldev_exit();
+ unregister_pernet_device(&rdma_dev_net_ops);
err_compat:
unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
err_sa:
@@ -2788,8 +2803,8 @@ err:
static void __exit ib_core_cleanup(void)
{
roce_gid_mgmt_cleanup();
- nldev_exit();
rdma_nl_unregister(RDMA_NL_LS);
+ nldev_exit();
unregister_pernet_device(&rdma_dev_net_ops);
unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
ib_sa_cleanup();
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e4905d9fecb0..a19e2104ffe9 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -493,7 +493,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_cm_id *cm_id = &id_priv->id;
if (port && port != cm_id->port_num)
- return 0;
+ return -EAGAIN;
if (cm_id->port_num &&
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
@@ -694,6 +694,8 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
int ret = 0;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr)
+ return -EMSGSIZE;
rt = &counter->device->res[RDMA_RESTRACK_QP];
xa_lock(&rt->xa);
@@ -2078,6 +2080,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
},
[RDMA_NLDEV_CMD_SYS_SET] = {
.doit = nldev_set_sys_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NLDEV_CMD_STAT_SET] = {
.doit = nldev_stat_set_doit,
@@ -2098,7 +2101,7 @@ void __init nldev_init(void)
rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
}
-void __exit nldev_exit(void)
+void nldev_exit(void)
{
rdma_nl_unregister(RDMA_NL_NLDEV);
}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index ad3a092b8b5c..390123f87658 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -131,6 +131,11 @@ struct ib_umad_packet {
struct ib_user_mad mad;
};
+struct ib_rmpp_mad_hdr {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+} __packed;
+
#define CREATE_TRACE_POINTS
#include <trace/events/ib_umad.h>
@@ -494,11 +499,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_umad_file *file = filp->private_data;
+ struct ib_rmpp_mad_hdr *rmpp_mad_hdr;
struct ib_umad_packet *packet;
struct ib_mad_agent *agent;
struct rdma_ah_attr ah_attr;
struct ib_ah *ah;
- struct ib_rmpp_mad *rmpp_mad;
__be64 *tid;
int ret, data_len, hdr_len, copy_offset, rmpp_active;
u8 base_version;
@@ -506,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
return -EINVAL;
- packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
+ packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL);
if (!packet)
return -ENOMEM;
@@ -560,13 +565,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto err_up;
}
- rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
- hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
+ rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data;
+ hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class);
- if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
+ if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
&& ib_mad_kernel_rmpp_agent(agent)) {
copy_offset = IB_MGMT_RMPP_HDR;
- rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
+ rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE;
} else {
copy_offset = IB_MGMT_MAD_HDR;
@@ -615,12 +620,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
(be64_to_cpup(tid) & 0xffffffff));
- rmpp_mad->mad_hdr.tid = *tid;
+ rmpp_mad_hdr->mad_hdr.tid = *tid;
}
if (!ib_mad_kernel_rmpp_agent(agent)
- && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
- && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
+ && ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
+ && (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
spin_lock_irq(&file->send_lock);
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d413dafb9211..80c76b62b12b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1557,7 +1557,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
struct ib_uverbs_create_qp_resp resp;
struct ib_uqp_object *obj;
struct ib_xrcd *xrcd;
- struct ib_uobject *uninitialized_var(xrcd_uobj);
+ struct ib_uobject *xrcd_uobj;
struct ib_qp *qp;
struct ib_qp_open_attr attr;
int ret;
@@ -1863,8 +1863,13 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
attr->path_mtu = cmd->base.path_mtu;
if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
attr->path_mig_state = cmd->base.path_mig_state;
- if (cmd->base.attr_mask & IB_QP_QKEY)
+ if (cmd->base.attr_mask & IB_QP_QKEY) {
+ if (cmd->base.qkey & IB_QP_SET_QKEY && !capable(CAP_NET_RAW)) {
+ ret = -EPERM;
+ goto release_qp;
+ }
attr->qkey = cmd->base.qkey;
+ }
if (cmd->base.attr_mask & IB_QP_RQ_PSN)
attr->rq_psn = cmd->base.rq_psn;
if (cmd->base.attr_mask & IB_QP_SQ_PSN)
@@ -1952,7 +1957,7 @@ static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
* Last bit is reserved for extending the attr_mask by
* using another field.
*/
- BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
+ BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1ULL << 31));
if (cmd.base.attr_mask &
~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
@@ -3378,7 +3383,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
struct ib_usrq_object *obj;
struct ib_pd *pd;
struct ib_srq *srq;
- struct ib_uobject *uninitialized_var(xrcd_uobj);
+ struct ib_uobject *xrcd_uobj;
struct ib_srq_init_attr attr;
int ret;
struct ib_device *ib_dev;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index adb08c3fc085..af6dedabd6a5 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -230,8 +230,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
spin_lock_irq(&ev_queue->lock);
while (list_empty(&ev_queue->event_list)) {
- spin_unlock_irq(&ev_queue->lock);
+ if (ev_queue->is_closed) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -EIO;
+ }
+ spin_unlock_irq(&ev_queue->lock);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
@@ -241,12 +245,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
return -ERESTARTSYS;
spin_lock_irq(&ev_queue->lock);
-
- /* If device was disassociated and no event exists set an error */
- if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
- spin_unlock_irq(&ev_queue->lock);
- return -EIO;
- }
}
event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
@@ -635,7 +633,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
if (hdr->in_words * 4 != count)
return -EINVAL;
- if (count < method_elm->req_size + sizeof(hdr)) {
+ if (count < method_elm->req_size + sizeof(*hdr)) {
/*
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
* with a 16 byte write instead of 24. Old kernels didn't
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 9f013304e677..35e41c5ca1bb 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -105,6 +105,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
return ret;
uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
+ if (IS_ERR(uattr))
+ return PTR_ERR(uattr);
read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
read_attr.counters_buff = uverbs_zalloc(
attrs, array_size(read_attr.ncounters, sizeof(u64)));
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 5d896f6b2b61..f61933dacbfd 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -521,6 +521,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
ret = device->ops.create_ah(ah, ah_attr, flags, udata);
if (ret) {
+ if (ah->sgid_attr)
+ rdma_put_gid_attr(ah->sgid_attr);
kfree(ah);
return ERR_PTR(ret);
}
@@ -2840,15 +2842,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;
+ unsigned int sg_delta;
if (!biter->__sg_nents || !biter->__sg)
return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
- biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
+ sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
- if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
+ if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
+ biter->__sg_advance += sg_delta;
+ } else {
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index e55a1666c0cd..c2805384e832 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -104,10 +104,19 @@ struct bnxt_re_sqp_entries {
struct bnxt_re_qp *qp1_qp;
};
+#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024
+struct bnxt_re_gsi_context {
+ struct bnxt_re_qp *gsi_qp;
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_sqp_entries *sqp_tbl;
+};
+
#define BNXT_RE_MIN_MSIX 2
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
+#define BNXT_RE_GEN_P5_MAX_VF 64
struct bnxt_re_dev {
struct ib_device ibdev;
@@ -164,10 +173,7 @@ struct bnxt_re_dev {
u16 cosq[2];
/* QP for for handling QP1 packets */
- u32 sqp_id;
- struct bnxt_re_qp *qp1_sqp;
- struct bnxt_re_ah *sqp_ah;
- struct bnxt_re_sqp_entries sqp_tbl[1024];
+ struct bnxt_re_gsi_context gsi_ctx;
atomic_t nq_alloc_cnt;
u32 is_virtfn;
u32 num_vfs;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index dd006b177b54..c9a7c03403ac 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -330,7 +330,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
*/
if (ctx->idx == 0 &&
rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
- ctx->refcnt == 1 && rdev->qp1_sqp) {
+ ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
dev_dbg(rdev_to_dev(rdev),
"Trying to delete GID0 while QP1 is alive\n");
return -EFAULT;
@@ -760,6 +760,49 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
+static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+{
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_dev *rdev;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+
+ dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n");
+ bnxt_qplib_destroy_ah(&rdev->qplib_res,
+ &gsi_sah->qplib_ah,
+ true);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+
+ dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n");
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed");
+ goto fail;
+ }
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+
+ /* remove from active qp list */
+ mutex_lock(&rdev->qp_lock);
+ list_del(&gsi_sqp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
+ kfree(rdev->gsi_ctx.sqp_tbl);
+ kfree(gsi_sah);
+ kfree(gsi_sqp);
+ rdev->gsi_ctx.gsi_sqp = NULL;
+ rdev->gsi_ctx.gsi_sah = NULL;
+ rdev->gsi_ctx.sqp_tbl = NULL;
+
+ return 0;
+fail:
+ return rc;
+}
+
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
@@ -769,6 +812,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
int rc;
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
+
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
@@ -783,40 +827,24 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
- bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah,
- false);
-
- bnxt_qplib_clean_qp(&qp->qplib_qp);
- rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to destroy Shadow QP");
- return rc;
- }
- bnxt_qplib_free_qp_res(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- mutex_lock(&rdev->qp_lock);
- list_del(&rdev->qp1_sqp->list);
- atomic_dec(&rdev->qp_count);
- mutex_unlock(&rdev->qp_lock);
-
- kfree(rdev->sqp_ah);
- kfree(rdev->qp1_sqp);
- rdev->qp1_sqp = NULL;
- rdev->sqp_ah = NULL;
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
+ rc = bnxt_re_destroy_gsi_sqp(qp);
+ if (rc)
+ goto sh_fail;
}
- ib_umem_release(qp->rumem);
- ib_umem_release(qp->sumem);
-
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
- atomic_dec(&rdev->qp_count);
mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
+ ib_umem_release(qp->rumem);
+ ib_umem_release(qp->sumem);
+
kfree(qp);
return 0;
+sh_fail:
+ return rc;
}
static u8 __from_ib_qp_type(enum ib_qp_type type)
@@ -984,8 +1012,6 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
if (rc)
goto fail;
- rdev->sqp_id = qp->qplib_qp.id;
-
spin_lock_init(&qp->sq_lock);
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
@@ -998,205 +1024,375 @@ fail:
return NULL;
}
-struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
{
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_qp *qp;
- struct bnxt_re_cq *cq;
- struct bnxt_re_srq *srq;
- int rc, entries;
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
- if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
- (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
- (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
- (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
- (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
- return ERR_PTR(-EINVAL);
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ if (init_attr->srq) {
+ struct bnxt_re_srq *srq;
- qp->rdev = rdev;
- ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
- qp->qplib_qp.pd = &pd->qplib_pd;
- qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
- qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
+ srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
+ if (!srq) {
+ dev_err(rdev_to_dev(rdev), "SRQ not found");
+ return -EINVAL;
+ }
+ qplqp->srq = &srq->qplib_srq;
+ qplqp->rq.max_wqe = 0;
+ } else {
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty.
+ */
+ entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
+ qplqp->rq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
- if (qp_init_attr->qp_type == IB_QPT_GSI &&
- bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
- qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
- if (qp->qplib_qp.type == IB_QPT_MAX) {
+ qplqp->rq.q_full_delta = qplqp->rq.max_wqe -
+ init_attr->cap.max_recv_wr;
+ qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ }
+
+ return 0;
+}
+
+static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+}
+
+static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ qplqp->sq.max_sge = init_attr->cap.max_send_sge;
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ /*
+ * Change the SQ depth if user has requested minimum using
+ * configfs. Only supported for kernel consumers
+ */
+ entries = init_attr->cap.max_send_wr;
+ /* Allocate 128 + 1 more than what's provided */
+ entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+ * post one extra entry in this case. But allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qplqp->sq.q_full_delta -= 1;
+}
+
+static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
+ qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
+ init_attr->cap.max_send_wr;
+ qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+}
+
+static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ int qptype;
+
+ chip_ctx = &rdev->chip_ctx;
+
+ qptype = __from_ib_qp_type(init_attr->qp_type);
+ if (qptype == IB_QPT_MAX) {
dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
- qp->qplib_qp.type);
- rc = -EINVAL;
- goto fail;
+ qptype);
+ qptype = -EINVAL;
+ goto out;
}
- qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
- qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
- IB_SIGNAL_ALL_WR) ? true : false);
+ if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
+ init_attr->qp_type == IB_QPT_GSI)
+ qptype = CMDQ_CREATE_QP_TYPE_GSI;
+out:
+ return qptype;
+}
- qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
+static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int rc = 0, qptype;
- if (qp_init_attr->send_cq) {
- cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
- ib_cq);
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ /* Setup misc params */
+ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
+ qplqp->pd = &pd->qplib_pd;
+ qplqp->qp_handle = (u64)qplqp;
+ qplqp->max_inline_data = init_attr->cap.max_inline_data;
+ qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
+ true : false);
+ qptype = bnxt_re_init_qp_type(rdev, init_attr);
+ if (qptype < 0) {
+ rc = qptype;
+ goto out;
+ }
+ qplqp->type = (u8)qptype;
+
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
+ qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
+ }
+ qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
+ if (init_attr->create_flags)
+ dev_dbg(rdev_to_dev(rdev),
+ "QP create flags 0x%x not supported",
+ init_attr->create_flags);
+
+ /* Setup CQs */
+ if (init_attr->send_cq) {
+ cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
if (!cq) {
dev_err(rdev_to_dev(rdev), "Send CQ not found");
rc = -EINVAL;
- goto fail;
+ goto out;
}
- qp->qplib_qp.scq = &cq->qplib_cq;
+ qplqp->scq = &cq->qplib_cq;
qp->scq = cq;
}
- if (qp_init_attr->recv_cq) {
- cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
- ib_cq);
+ if (init_attr->recv_cq) {
+ cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
if (!cq) {
dev_err(rdev_to_dev(rdev), "Receive CQ not found");
rc = -EINVAL;
- goto fail;
+ goto out;
}
- qp->qplib_qp.rcq = &cq->qplib_cq;
+ qplqp->rcq = &cq->qplib_cq;
qp->rcq = cq;
}
- if (qp_init_attr->srq) {
- srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
- ib_srq);
- if (!srq) {
- dev_err(rdev_to_dev(rdev), "SRQ not found");
- rc = -EINVAL;
- goto fail;
- }
- qp->qplib_qp.srq = &srq->qplib_srq;
- qp->qplib_qp.rq.max_wqe = 0;
- } else {
- /* Allocate 1 more than what's provided so posting max doesn't
- * mean empty
- */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
- qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
+ /* Setup RQ/SRQ */
+ rc = bnxt_re_init_rq_attr(qp, init_attr);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_rq_attr(qp);
+
+ /* Setup SQ */
+ bnxt_re_init_sq_attr(qp, init_attr, udata);
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
+
+ if (udata) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+out:
+ return rc;
+}
- qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
- qp_init_attr->cap.max_recv_wr;
+static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
+ struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_sqp_entries *sqp_tbl = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_qp *sqp;
+ struct bnxt_re_ah *sah;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ /* Create a shadow QP to handle the QP1 traffic */
+ sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
+ GFP_KERNEL);
+ if (!sqp_tbl)
+ return -ENOMEM;
+ rdev->gsi_ctx.sqp_tbl = sqp_tbl;
- qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
+ sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
+ if (!sqp) {
+ rc = -ENODEV;
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create Shadow QP for QP1");
+ goto out;
}
+ rdev->gsi_ctx.gsi_sqp = sqp;
- qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ sqp->rcq = qp->rcq;
+ sqp->scq = qp->scq;
+ sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
+ &qp->qplib_qp);
+ if (!sah) {
+ bnxt_qplib_destroy_qp(&rdev->qplib_res,
+ &sqp->qplib_qp);
+ rc = -ENODEV;
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create AH entry for ShadowQP");
+ goto out;
+ }
+ rdev->gsi_ctx.gsi_sah = sah;
- if (qp_init_attr->qp_type == IB_QPT_GSI &&
- !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
- /* Allocate 1 more than what's provided */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
- qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
- qp_init_attr->cap.max_send_wr;
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
- qp->qplib_qp.sq.max_sge++;
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
-
- qp->qplib_qp.rq_hdr_buf_size =
- BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
-
- qp->qplib_qp.sq_hdr_buf_size =
- BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
- rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
- goto fail;
- }
- /* Create a shadow QP to handle the QP1 traffic */
- rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
- &qp->qplib_qp);
- if (!rdev->qp1_sqp) {
- rc = -EINVAL;
- dev_err(rdev_to_dev(rdev),
- "Failed to create Shadow QP for QP1");
- goto qp_destroy;
- }
- rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
- &qp->qplib_qp);
- if (!rdev->sqp_ah) {
- bnxt_qplib_destroy_qp(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- rc = -EINVAL;
- dev_err(rdev_to_dev(rdev),
- "Failed to create AH entry for ShadowQP");
- goto qp_destroy;
- }
+ return 0;
+out:
+ kfree(sqp_tbl);
+ return rc;
+}
- } else {
- /* Allocate 128 + 1 more than what's provided */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_qp *qplqp;
+ int rc = 0;
- /*
- * Reserving one slot for Phantom WQE. Application can
- * post one extra entry in this case. But allowing this to avoid
- * unexpected Queue full condition
- */
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
- qp->qplib_qp.sq.q_full_delta -= 1;
+ qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
+ qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
- qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
- qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
- if (udata) {
- rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
- if (rc)
- goto fail;
- } else {
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
- }
+ rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "create HW QP1 failed!");
+ goto out;
+ }
+
+ rc = bnxt_re_create_shadow_gsi(qp, pd);
+out:
+ return rc;
+}
+
+static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_qplib_dev_attr *dev_attr)
+{
+ bool rc = true;
+ if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
+ dev_err(rdev_to_dev(rdev),
+ "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
+ init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_inline_data,
+ dev_attr->max_inline_data);
+ rc = false;
+ }
+ return rc;
+}
+
+struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_qp *qp;
+ int rc;
+
+ rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
+ if (!rc) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ qp->rdev = rdev;
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
+ if (rc)
+ goto fail;
+
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+ !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
+ rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
+ if (rc == -ENODEV)
+ goto qp_destroy;
+ if (rc)
+ goto fail;
+ } else {
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
goto free_umem;
}
+ if (udata) {
+ struct bnxt_re_qp_resp resp;
+
+ resp.qpid = qp->qplib_qp.id;
+ resp.rsvd = 0;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
+ goto qp_destroy;
+ }
+ }
}
qp->ib_qp.qp_num = qp->qplib_qp.id;
+ if (qp_init_attr->qp_type == IB_QPT_GSI)
+ rdev->gsi_ctx.gsi_qp = qp;
spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
-
- if (udata) {
- struct bnxt_re_qp_resp resp;
-
- resp.qpid = qp->ib_qp.qp_num;
- resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
- goto qp_destroy;
- }
- }
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
list_add_tail(&qp->list, &rdev->qp_list);
- atomic_inc(&rdev->qp_count);
mutex_unlock(&rdev->qp_lock);
+ atomic_inc(&rdev->qp_count);
return &qp->ib_qp;
qp_destroy:
@@ -1206,6 +1402,7 @@ free_umem:
ib_umem_release(qp->sumem);
fail:
kfree(qp);
+exit:
return ERR_PTR(rc);
}
@@ -1426,7 +1623,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
/* SRQ resize is not supported */
- break;
+ return -EINVAL;
case IB_SRQ_LIMIT:
/* Change the SRQ threshold */
if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
@@ -1441,13 +1638,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */
- break;
+ return 0;
default:
dev_err(rdev_to_dev(rdev),
"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
return -EINVAL;
}
- return 0;
}
int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
@@ -1504,7 +1700,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
{
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
+ struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
int rc = 0;
if (qp_attr_mask & IB_QP_STATE) {
@@ -1768,7 +1964,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
return rc;
}
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
return rc;
}
@@ -2013,9 +2209,12 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
+ struct bnxt_re_sqp_entries *sqp_entry;
struct bnxt_qplib_sge ref, sge;
+ struct bnxt_re_dev *rdev;
u32 rq_prod_index;
- struct bnxt_re_sqp_entries *sqp_entry;
+
+ rdev = qp->rdev;
rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
@@ -2030,7 +2229,7 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
ref.lkey = wqe->sg_list[0].lkey;
ref.size = wqe->sg_list[0].size;
- sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
/* SGE 1 */
wqe->sg_list[0].addr = sge.addr;
@@ -2850,12 +3049,13 @@ static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
return rc;
}
-static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
+static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
struct bnxt_qplib_cqe *cqe)
{
- struct bnxt_re_dev *rdev = qp1_qp->rdev;
+ struct bnxt_re_dev *rdev = gsi_qp->rdev;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
+ struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
struct ib_send_wr *swr;
struct ib_ud_wr udwr;
struct ib_recv_wr rwr;
@@ -2878,19 +3078,19 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
swr = &udwr.wr;
tbl_idx = cqe->wr_id;
- rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
- (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
- rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
+ rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
+ (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
+ rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
tbl_idx);
/* Shadow QP header buffer */
- shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
+ shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
tbl_idx);
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
/* Store this cqe */
memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
- sqp_entry->qp1_qp = qp1_qp;
+ sqp_entry->qp1_qp = gsi_qp;
/* Find packet type from the cqe */
@@ -2944,7 +3144,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
rwr.wr_id = tbl_idx;
rwr.next = NULL;
- rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
+ rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to post Rx buffers to shadow QP");
@@ -2956,15 +3156,13 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
swr->wr_id = tbl_idx;
swr->opcode = IB_WR_SEND;
swr->next = NULL;
-
- udwr.ah = &rdev->sqp_ah->ib_ah;
- udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
- udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+ udwr.ah = &gsi_sah->ib_ah;
+ udwr.remote_qpn = gsi_sqp->qplib_qp.id;
+ udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
/* post data received in the send queue */
- rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
-
- return 0;
+ return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
}
static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
@@ -3029,12 +3227,12 @@ static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
}
-static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
+static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
- struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_re_qp *qp1_qp = NULL;
+ struct bnxt_re_dev *rdev = gsi_sqp->rdev;
+ struct bnxt_re_qp *gsi_qp = NULL;
struct bnxt_qplib_cqe *orig_cqe = NULL;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
int nw_type;
@@ -3044,13 +3242,13 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
tbl_idx = cqe->wr_id;
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
- qp1_qp = sqp_entry->qp1_qp;
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+ gsi_qp = sqp_entry->qp1_qp;
orig_cqe = &sqp_entry->cqe;
wc->wr_id = sqp_entry->wrid;
wc->byte_len = orig_cqe->length;
- wc->qp = &qp1_qp->ib_qp;
+ wc->qp = &gsi_qp->ib_qp;
wc->ex.imm_data = orig_cqe->immdata;
wc->src_qp = orig_cqe->src_qp;
@@ -3137,7 +3335,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp)
int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
{
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- struct bnxt_re_qp *qp;
+ struct bnxt_re_qp *qp, *sh_qp;
struct bnxt_qplib_cqe *cqe;
int i, ncqe, budget;
struct bnxt_qplib_q *sq;
@@ -3201,8 +3399,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
switch (cqe->opcode) {
case CQ_BASE_CQE_TYPE_REQ:
- if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
- qp->rdev->qp1_sqp->qplib_qp.id) {
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
*/
@@ -3228,7 +3427,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
* stored in the table
*/
tbl_idx = cqe->wr_id;
- sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
+ sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
wc->wr_id = sqp_entry->wrid;
bnxt_re_process_res_rawqp1_wc(wc, cqe);
break;
@@ -3236,8 +3435,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
bnxt_re_process_res_rc_wc(wc, cqe);
break;
case CQ_BASE_CQE_TYPE_RES_UD:
- if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
- qp->rdev->qp1_sqp->qplib_qp.id) {
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
*/
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index cfe5f47d9890..34d1c4264602 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -70,7 +70,7 @@ static char version[] =
BNXT_RE_DESC "\n";
MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
-MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
+MODULE_DESCRIPTION(BNXT_RE_DESC);
MODULE_LICENSE("Dual BSD/GPL");
/* globals */
@@ -119,61 +119,76 @@ static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
* reserved for the function. The driver may choose to allocate fewer
* resources than the firmware maximum.
*/
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
{
- u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
- u32 i;
- u32 vf_pct;
- u32 num_vfs;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *attr;
+ struct bnxt_qplib_ctx *ctx;
+ int i;
- rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
- dev_attr->max_qp);
+ attr = &rdev->dev_attr;
+ ctx = &rdev->qplib_ctx;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ attr->max_qp);
+ ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
/* Use max_mr from fw since max_mrw does not get set */
- rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
- dev_attr->max_mr);
- rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
- dev_attr->max_srq);
- rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
- dev_attr->max_cq);
-
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-
- if (rdev->num_vfs) {
- /*
- * Reserve a set of resources for the PF. Divide the remaining
- * resources among the VFs
- */
- vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
- num_vfs = 100 * rdev->num_vfs;
- vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
- vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
- vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
- /*
- * The driver allows many more MRs than other resources. If the
- * firmware does also, then reserve a fixed amount for the PF
- * and divide the rest among VFs. VFs may use many MRs for NFS
- * mounts, ISER, NVME applications, etc. If the firmware
- * severely restricts the number of MRs, then let PF have
- * half and divide the rest among VFs, as for the other
- * resource types.
- */
- if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
- vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
- else
- vf_mrws = (rdev->qplib_ctx.mrw_count -
- BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
- vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
+ ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ attr->max_srq);
+ ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+}
+
+static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
+{
+ struct bnxt_qplib_vf_res *vf_res;
+ u32 mrws = 0;
+ u32 vf_pct;
+ u32 nvfs;
+
+ vf_res = &qplib_ctx->vf_res;
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ nvfs = num_vf;
+ num_vf = 100 * num_vf;
+ vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
+ vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
+ vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF and
+ * divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware severely
+ * restricts the number of MRs, then let PF have half and divide
+ * the rest among VFs, as for the other resource types.
+ */
+ if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
+ mrws = qplib_ctx->mrw_count * vf_pct;
+ nvfs = num_vf;
+ } else {
+ mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
}
- rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
- rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
- rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
- rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
- rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+ vf_res->max_mrw_per_vf = (mrws / nvfs);
+ vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
+}
+
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 num_vfs;
+
+ memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
+ bnxt_re_limit_pf_res(rdev);
+
+ num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
+ if (num_vfs)
+ bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
}
/* for handling bnxt_en callbacks later */
@@ -193,9 +208,11 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
return;
rdev->num_vfs = num_vfs;
- bnxt_re_set_resource_limits(rdev);
- bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) {
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+ }
}
static void bnxt_re_shutdown(void *p)
@@ -897,10 +914,14 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
return 0;
}
+#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
+#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
{
return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
- 0x10000 : rdev->msix_entries[indx].db_offset;
+ (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
+ BNXT_RE_GEN_P5_PF_NQ_DB) :
+ rdev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@@ -1106,7 +1127,8 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp)
{
- return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp);
+ return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
+ (qp == rdev->gsi_ctx.gsi_sqp);
}
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
@@ -1410,8 +1432,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- if (!rdev->is_virtfn)
- bnxt_re_set_resource_limits(rdev);
+
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 5fc5ab7813c0..18b579c8a8c5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -2606,11 +2606,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
- if (!qp) {
- dev_err(&cq->hwq.pdev->dev,
- "FP: CQ Process terminal qp is NULL\n");
+ if (!qp)
return -EINVAL;
- }
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 60c8f76aab33..5cdfa84faf85 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -494,8 +494,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
* shall setup this area for VF. Skipping the
* HW programming
*/
- if (is_virtfn || bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ if (is_virtfn)
goto skip_ctx_setup;
+ if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ goto config_vf_res;
level = ctx->qpc_tbl.level;
req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
@@ -540,6 +542,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
+config_vf_res:
req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 535ee41ee421..c7214c49f202 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
@@ -3282,7 +3285,7 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
- struct in6_addr uninitialized_var(addr);
+ struct in6_addr addr;
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 16b74591a68d..a3595b8a4bcf 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -754,7 +754,7 @@ skip_cqe:
static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
struct ib_wc *wc, struct c4iw_srq *srq)
{
- struct t4_cqe uninitialized_var(cqe);
+ struct t4_cqe cqe;
struct t4_wq *wq = qhp ? &qhp->wq : NULL;
u32 credit = 0;
u8 cqe_flushed;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 17f1e59ab12e..559b24ca5205 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1231,7 +1231,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev,
*/
static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
{
- u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
+ u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
struct scatterlist *sgl;
int sg_dma_cnt, err;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 1aeea5d65c01..832b878fa67e 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -218,6 +218,8 @@ out:
for (node = 0; node < node_affinity.num_possible_nodes; node++)
hfi1_per_node_cntr[node] = 1;
+ pci_dev_put(dev);
+
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 10924f122072..65d6bf34614c 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12191,6 +12191,7 @@ static void free_cntrs(struct hfi1_devdata *dd)
if (dd->synth_stats_timer.function)
del_timer_sync(&dd->synth_stats_timer);
+ cancel_work_sync(&dd->update_cntr_work);
ppd = (struct hfi1_pportdata *)(dd + 1);
for (i = 0; i < dd->num_pports; i++, ppd++) {
kfree(ppd->cntrs);
diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
index d106d23016ba..75e39e403a58 100644
--- a/drivers/infiniband/hw/hfi1/efivar.c
+++ b/drivers/infiniband/hw/hfi1/efivar.c
@@ -152,7 +152,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
unsigned long *size, void **return_data)
{
char prefix_name[64];
- char name[64];
+ char name[128];
int result;
int i;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 8c7ba7bad42b..607e2636a6d1 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1224,8 +1224,10 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
goto done;
ret = init_user_ctxt(fd, uctxt);
- if (ret)
+ if (ret) {
+ hfi1_free_ctxt_rcv_groups(uctxt);
goto done;
+ }
user_init(uctxt);
@@ -1361,12 +1363,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
- return -EFAULT;
+ ret = -EFAULT;
addr = arg + offsetof(struct hfi1_tid_info, length);
- if (copy_to_user((void __user *)addr, &tinfo.length,
+ if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
sizeof(tinfo.length)))
ret = -EFAULT;
+
+ if (ret)
+ hfi1_user_exp_rcv_invalid(fd, &tinfo);
}
return ret;
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index c09080712485..747ec08dec0d 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1786,6 +1786,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
if (!dd->platform_config.data) {
dd_dev_err(dd, "%s: Missing config file\n", __func__);
+ ret = -EINVAL;
goto bail;
}
ptr = (u32 *)dd->platform_config.data;
@@ -1794,6 +1795,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
ptr++;
if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
dd_dev_err(dd, "%s: Bad config file\n", __func__);
+ ret = -EINVAL;
goto bail;
}
@@ -1817,6 +1819,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
if (file_length > dd->platform_config.size) {
dd_dev_info(dd, "%s:File claims to be larger than read size\n",
__func__);
+ ret = -EINVAL;
goto bail;
} else if (file_length < dd->platform_config.size) {
dd_dev_info(dd,
@@ -1837,6 +1840,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
__func__, (ptr - (u32 *)
dd->platform_config.data));
+ ret = -EINVAL;
goto bail;
}
@@ -1883,6 +1887,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
__func__, table_type,
(ptr - (u32 *)
dd->platform_config.data));
+ ret = -EINVAL;
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table = ptr;
@@ -1907,6 +1912,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
__func__, table_type,
(ptr -
(u32 *)dd->platform_config.data));
+ ret = -EINVAL;
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table_metadata =
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 14d2a90964c3..a5631286c8e0 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -173,7 +173,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
goto unlock;
}
__mmu_int_rb_insert(mnode, &handler->root);
- list_add(&mnode->list, &handler->lru_list);
+ list_add_tail(&mnode->list, &handler->lru_list);
ret = handler->ops->insert(handler->ops_arg, mnode);
if (ret) {
@@ -220,8 +220,10 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, addr, len);
if (node) {
- if (node->addr == addr && node->len == len)
+ if (node->addr == addr && node->len == len) {
+ list_move_tail(&node->list, &handler->lru_list);
goto unlock;
+ }
__mmu_int_rb_remove(node, &handler->root);
list_del(&node->list); /* remove from LRU list */
ret = true;
@@ -242,8 +244,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags);
- list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
- list) {
+ list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
&stop)) {
__mmu_int_rb_remove(rbnode, &handler->root);
@@ -255,9 +256,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
}
spin_unlock_irqrestore(&handler->lock, flags);
- while (!list_empty(&del_list)) {
- rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
- list_del(&rbnode->list);
+ list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
handler->ops->remove(handler->ops_arg, rbnode);
}
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 61362bd6d3ce..111705e6609c 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -45,6 +45,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -261,12 +262,6 @@ static u32 extract_speed(u16 linkstat)
return speed;
}
-/* return the PCIe link speed from the given link status */
-static u32 extract_width(u16 linkstat)
-{
- return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
-}
-
/* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
static void update_lbus_info(struct hfi1_devdata *dd)
{
@@ -279,7 +274,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
return;
}
- dd->lbus_width = extract_width(linkstat);
+ dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
dd->lbus_speed = extract_speed(linkstat);
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
"PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 1a82ea73a0fc..8303c506733c 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -955,8 +955,7 @@ void sc_disable(struct send_context *sc)
spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
- if (!list_empty(&sc->piowait))
- list_move(&sc->piowait, &wake_list);
+ list_splice_init(&sc->piowait, &wake_list);
write_sequnlock(&sc->waitlock);
while (!list_empty(&wake_list)) {
struct iowait *wait;
@@ -2132,7 +2131,7 @@ int init_credit_return(struct hfi1_devdata *dd)
"Unable to allocate credit return DMA range for NUMA %d\n",
i);
ret = -ENOMEM;
- goto done;
+ goto free_cr_base;
}
}
set_dev_node(&dd->pcidev->dev, dd->node);
@@ -2140,6 +2139,10 @@ int init_credit_return(struct hfi1_devdata *dd)
ret = 0;
done:
return ret;
+
+free_cr_base:
+ free_credit_return(dd);
+ goto done;
}
void free_credit_return(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 2a684fc6056e..e4d5c33baece 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -3203,7 +3203,6 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
- tx->num_desc++;
if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
@@ -3217,6 +3216,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
SDMA_MAP_NONE,
dd->sdma_pad_phys,
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
+ tx->num_desc++;
_sdma_close_tx(dd, tx);
return rval;
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 1e2e40f79cb2..6ac00755848d 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -672,14 +672,13 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
static inline void _sdma_close_tx(struct hfi1_devdata *dd,
struct sdma_txreq *tx)
{
- tx->descp[tx->num_desc].qw[0] |=
- SDMA_DESC0_LAST_DESC_FLAG;
- tx->descp[tx->num_desc].qw[1] |=
- dd->default_desc1;
+ u16 last_desc = tx->num_desc - 1;
+
+ tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
+ tx->descp[last_desc].qw[1] |= dd->default_desc1;
if (tx->flags & SDMA_TXREQ_F_URGENT)
- tx->descp[tx->num_desc].qw[1] |=
- (SDMA_DESC1_HEAD_TO_HOST_FLAG |
- SDMA_DESC1_INT_REQ_FLAG);
+ tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
+ SDMA_DESC1_INT_REQ_FLAG);
}
static inline int _sdma_txadd_daddr(
@@ -696,6 +695,7 @@ static inline int _sdma_txadd_daddr(
type,
addr, len);
WARN_ON(len > tx->tlen);
+ tx->num_desc++;
tx->tlen -= len;
/* special cases for last */
if (!tx->tlen) {
@@ -707,7 +707,6 @@ static inline int _sdma_txadd_daddr(
_sdma_close_tx(dd, tx);
}
}
- tx->num_desc++;
return rval;
}
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 4d732353379d..6c1d36b2e2a7 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -215,16 +215,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
int pinned;
- unsigned int npages;
+ unsigned int npages = tidbuf->npages;
unsigned long vaddr = tidbuf->vaddr;
struct page **pages = NULL;
struct hfi1_devdata *dd = fd->uctxt->dd;
- /* Get the number of pages the user buffer spans */
- npages = num_user_pages(vaddr, tidbuf->length);
- if (!npages)
- return -EINVAL;
-
if (npages > fd->uctxt->expected_count) {
dd_dev_err(dd, "Expected buffer too big\n");
return -EINVAL;
@@ -258,7 +253,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return pinned;
}
tidbuf->pages = pages;
- tidbuf->npages = npages;
fd->tid_n_pinned += pinned;
return pinned;
}
@@ -325,6 +319,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
if (!PAGE_ALIGNED(tinfo->vaddr))
return -EINVAL;
+ if (tinfo->length == 0)
+ return -EINVAL;
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
if (!tidbuf)
@@ -332,43 +328,42 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length;
+ tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
- kfree(tidbuf);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_release_mem;
}
pinned = pin_rcv_pages(fd, tidbuf);
if (pinned <= 0) {
- kfree(tidbuf->psets);
- kfree(tidbuf);
- return pinned;
+ ret = (pinned < 0) ? pinned : -ENOSPC;
+ goto fail_unpin;
}
/* Find sets of physically contiguous pages */
tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
- /*
- * We don't need to access this under a lock since tid_used is per
- * process and the same process cannot be in hfi1_user_exp_rcv_clear()
- * and hfi1_user_exp_rcv_setup() at the same time.
- */
+ /* Reserve the number of expected tids to be used. */
spin_lock(&fd->tid_lock);
if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
pageset_count = fd->tid_limit - fd->tid_used;
else
pageset_count = tidbuf->n_psets;
+ fd->tid_used += pageset_count;
spin_unlock(&fd->tid_lock);
- if (!pageset_count)
- goto bail;
+ if (!pageset_count) {
+ ret = -ENOSPC;
+ goto fail_unreserve;
+ }
ngroups = pageset_count / dd->rcv_entries.group_size;
tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
if (!tidlist) {
ret = -ENOMEM;
- goto nomem;
+ goto fail_unreserve;
}
tididx = 0;
@@ -464,43 +459,60 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
}
unlock:
mutex_unlock(&uctxt->exp_mutex);
-nomem:
hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
mapped_pages, ret);
- if (tididx) {
- spin_lock(&fd->tid_lock);
- fd->tid_used += tididx;
- spin_unlock(&fd->tid_lock);
- tinfo->tidcnt = tididx;
- tinfo->length = mapped_pages * PAGE_SIZE;
-
- if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
- tidlist, sizeof(tidlist[0]) * tididx)) {
- /*
- * On failure to copy to the user level, we need to undo
- * everything done so far so we don't leak resources.
- */
- tinfo->tidlist = (unsigned long)&tidlist;
- hfi1_user_exp_rcv_clear(fd, tinfo);
- tinfo->tidlist = 0;
- ret = -EFAULT;
- goto bail;
- }
+
+ /* fail if nothing was programmed, set error if none provided */
+ if (tididx == 0) {
+ if (ret >= 0)
+ ret = -ENOSPC;
+ goto fail_unreserve;
}
- /*
- * If not everything was mapped (due to insufficient RcvArray entries,
- * for example), unpin all unmapped pages so we can pin them nex time.
- */
- if (mapped_pages != pinned)
- unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
- (pinned - mapped_pages), false);
-bail:
+ /* adjust reserved tid_used to actual count */
+ spin_lock(&fd->tid_lock);
+ fd->tid_used -= pageset_count - tididx;
+ spin_unlock(&fd->tid_lock);
+
+ /* unpin all pages not covered by a TID */
+ unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
+ false);
+
+ tinfo->tidcnt = tididx;
+ tinfo->length = mapped_pages * PAGE_SIZE;
+
+ if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+ tidlist, sizeof(tidlist[0]) * tididx)) {
+ ret = -EFAULT;
+ goto fail_unprogram;
+ }
+
+ kfree(tidbuf->pages);
kfree(tidbuf->psets);
+ kfree(tidbuf);
kfree(tidlist);
+ return 0;
+
+fail_unprogram:
+ /* unprogram, unmap, and unpin all allocated TIDs */
+ tinfo->tidlist = (unsigned long)tidlist;
+ hfi1_user_exp_rcv_clear(fd, tinfo);
+ tinfo->tidlist = 0;
+ pinned = 0; /* nothing left to unpin */
+ pageset_count = 0; /* nothing left reserved */
+fail_unreserve:
+ spin_lock(&fd->tid_lock);
+ fd->tid_used -= pageset_count;
+ spin_unlock(&fd->tid_lock);
+fail_unpin:
+ if (pinned > 0)
+ unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
+fail_release_mem:
kfree(tidbuf->pages);
+ kfree(tidbuf->psets);
kfree(tidbuf);
- return ret > 0 ? 0 : ret;
+ kfree(tidlist);
+ return ret;
}
int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index d01e3222c00c..28bbc4708fd4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5216,8 +5216,8 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
dev_err(dev, "AEQ overflow!\n");
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
+ 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
/* Set reset level for reset_event() */
if (ops->set_default_reset_request)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 76a14db7028d..b9ab3ca3079c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -89,7 +89,7 @@
#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 6d6719fa7e46..44d65b11a36e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -411,9 +411,8 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
bool ipv4,
u32 action);
-int i40iw_manage_apbvt(struct i40iw_device *iwdev,
- u16 accel_local_port,
- bool add_port);
+enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
+ u16 accel_local_port, bool add_port);
struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);
void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 4d841a3c68f3..026557aa2307 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -2945,6 +2945,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
u64 header;
enum i40iw_page_size page_size;
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
cqp = dev->cqp;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -3003,6 +3006,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
u8 addr_type;
enum i40iw_page_size page_size;
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index adc8d2ec523d..5c4e2f206105 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -779,6 +779,7 @@ struct i40iw_allocate_stag_info {
bool use_hmc_fcn_index;
u8 hmc_fcn_index;
bool use_pf_rid;
+ bool all_memory;
};
struct i40iw_reg_ns_stag_info {
@@ -797,6 +798,7 @@ struct i40iw_reg_ns_stag_info {
bool use_hmc_fcn_index;
u8 hmc_fcn_index;
bool use_pf_rid;
+ bool all_memory;
};
struct i40iw_fast_reg_stag_info {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 7e9c1a40f040..98f779c82ea0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1500,7 +1500,8 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
{
struct i40iw_allocate_stag_info *info;
- struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct i40iw_pd *iwpd = to_iwpd(pd);
enum i40iw_status_code status;
int err = 0;
struct i40iw_cqp_request *cqp_request;
@@ -1517,6 +1518,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->length;
+ info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
info->remote_access = true;
cqp_info->cqp_cmd = OP_ALLOC_STAG;
cqp_info->post_sq = 1;
@@ -1570,6 +1572,8 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
iwmr->type = IW_MEMREG_TYPE_MEM;
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
+ /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
+ iwmr->length = max_num_sg * PAGE_SIZE;
mutex_lock(&iwdev->pbl_mutex);
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
mutex_unlock(&iwdev->pbl_mutex);
@@ -1666,7 +1670,8 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
{
struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
struct i40iw_reg_ns_stag_info *stag_info;
- struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct i40iw_pd *iwpd = to_iwpd(pd);
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
enum i40iw_status_code status;
int err = 0;
@@ -1686,6 +1691,7 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
stag_info->total_len = iwmr->length;
stag_info->access_rights = access;
stag_info->pd_id = iwpd->sc_pd.pd_id;
+ stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
stag_info->page_size = iwmr->page_size;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bca5358f3ef2..395d8a99b12e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -438,9 +438,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
struct mlx4_ib_qp *qp,
struct mlx4_ib_create_qp *ucmd)
{
+ u32 cnt;
+
/* Sanity check SQ size before proceeding */
- if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
- ucmd->log_sq_stride >
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
+ cnt > dev->dev->caps.max_wqes)
+ return -EINVAL;
+ if (ucmd->log_sq_stride >
ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
return -EINVAL;
@@ -552,15 +556,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
- if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
- MLX4_IB_RX_HASH_DST_IPV4 |
- MLX4_IB_RX_HASH_SRC_IPV6 |
- MLX4_IB_RX_HASH_DST_IPV6 |
- MLX4_IB_RX_HASH_SRC_PORT_TCP |
- MLX4_IB_RX_HASH_DST_PORT_TCP |
- MLX4_IB_RX_HASH_SRC_PORT_UDP |
- MLX4_IB_RX_HASH_DST_PORT_UDP |
- MLX4_IB_RX_HASH_INNER)) {
+ if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP |
+ MLX4_IB_RX_HASH_INNER)) {
pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
ucmd->rx_hash_fields_mask);
return (-EOPNOTSUPP);
@@ -3543,11 +3547,11 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int nreq;
int err = 0;
unsigned ind;
- int uninitialized_var(size);
- unsigned uninitialized_var(seglen);
+ int size;
+ unsigned seglen;
__be32 dummy;
__be32 *lso_wqe;
- __be32 uninitialized_var(lso_hdr_sz);
+ __be32 lso_hdr_sz;
__be32 blh;
int i;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index ea1f3a081b05..6c3a23ee3bc7 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
- char buff[11];
+ char buff[12];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 23ce0126b268..7f659c240c99 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -916,8 +916,8 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
- int uninitialized_var(index);
- int uninitialized_var(inlen);
+ int index;
+ int inlen;
u32 *cqb = NULL;
void *cqc;
int cqe_size;
@@ -1237,7 +1237,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
__be64 *pas;
int page_shift;
int inlen;
- int uninitialized_var(cqe_size);
+ int cqe_size;
unsigned long flags;
if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 747f42855b7b..7a3b56c15079 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2556,7 +2556,7 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
{
struct devx_async_event_file *ev_file = filp->private_data;
struct devx_event_subscription *event_sub;
- struct devx_async_event_data *uninitialized_var(event);
+ struct devx_async_event_data *event;
int ret = 0;
size_t eventsz;
bool omit_data;
@@ -2811,7 +2811,7 @@ DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
UVERBS_IDR_ANY_OBJECT,
- UVERBS_ACCESS_WRITE,
+ UVERBS_ACCESS_READ,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 348c1df69cdc..3897a3ce02ad 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -219,6 +219,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
/* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9025086a8932..6698032af87d 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2053,7 +2053,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
case MLX5_IB_MMAP_DEVICE_MEM:
return "Device Memory";
default:
- return NULL;
+ return "Unknown";
}
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 634f29cb7395..51623431b879 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3821,7 +3821,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -EINVAL;
if (attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
+ attr->port_num > dev->num_ports) {
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
attr->port_num, dev->num_ports);
return -EINVAL;
@@ -3890,6 +3890,40 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return err;
}
+static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_type qp_type)
+{
+ int log_max_ra_res;
+ int log_max_ra_req;
+
+ if (qp_type == MLX5_IB_QPT_DCI) {
+ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_res_dc);
+ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_req_dc);
+ } else {
+ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_res_qp);
+ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_req_qp);
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > log_max_ra_res) {
+ mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+ attr->max_rd_atomic);
+ return false;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > log_max_ra_req) {
+ mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+ attr->max_dest_rd_atomic);
+ return false;
+ }
+ return true;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -3986,21 +4020,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
- attr->max_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
- mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
- attr->max_rd_atomic);
- goto out;
- }
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
- attr->max_dest_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
- mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
- attr->max_dest_rd_atomic);
+ if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
goto out;
- }
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
err = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index bdf5ed38de22..0307c45aa6d3 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -635,7 +635,7 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
int mthca_SYS_EN(struct mthca_dev *dev)
{
- u64 out;
+ u64 out = 0;
int ret;
ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
@@ -1955,7 +1955,7 @@ int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
u16 *hash)
{
- u64 imm;
+ u64 imm = 0;
int err;
err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index fe9654a7af71..3acd1372c814 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -382,7 +382,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
struct mthca_init_hca_param *init_hca,
u64 icm_size)
{
- u64 aux_pages;
+ u64 aux_pages = 0;
int err;
err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index d04c245359eb..c6e95d0d760a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1639,8 +1639,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
- int uninitialized_var(size0);
- u32 uninitialized_var(f0);
+ int size0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -1835,7 +1835,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
* without initializing size0, and it is in fact never used
* uninitialized.
*/
- int uninitialized_var(size0);
+ int size0;
int ind;
void *wqe;
void *prev_wqe;
@@ -1943,8 +1943,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
- int uninitialized_var(size0);
- u32 uninitialized_var(f0);
+ int size0;
+ u32 f0;
int ind;
u8 op0 = 0;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 93040c994e2e..50b75bd4633c 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -362,6 +362,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
if (IS_IWARP(dev)) {
xa_init(&dev->qps);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
+ if (!dev->iwarp_wq) {
+ rc = -ENOMEM;
+ goto err1;
+ }
}
/* Allocate Status blocks for CNQ */
@@ -369,7 +373,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
GFP_KERNEL);
if (!dev->sb_array) {
rc = -ENOMEM;
- goto err1;
+ goto err_destroy_wq;
}
dev->cnq_array = kcalloc(dev->num_cnq,
@@ -423,6 +427,9 @@ err3:
kfree(dev->cnq_array);
err2:
kfree(dev->sb_array);
+err_destroy_wq:
+ if (IS_IWARP(dev))
+ destroy_workqueue(dev->iwarp_wq);
err1:
kfree(dev->sgid_tbl);
return rc;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 62e6ffa9ad78..1d060ae47933 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -281,8 +281,8 @@ iter_chunk:
size = pa_end - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
- err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ err = iommu_map_atomic(pd->domain, va_start,
+ pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@@ -298,8 +298,8 @@ iter_chunk:
size = pa - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
- err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ err = iommu_map_atomic(pd->domain, va_start,
+ pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e97c13967174..905e2eaed095 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -505,8 +505,6 @@ void rvt_qp_exit(struct rvt_dev_info *rdi)
if (qps_inuse)
rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
qps_inuse);
- if (!rdi->qp_dev)
- return;
kfree(rdi->qp_dev->qp_table);
free_qpn_table(&rdi->qp_dev->qpn_table);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 53166b9ae67e..d93ed04276ac 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -211,6 +211,17 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->grp_lock);
spin_lock_init(&qp->state_lock);
+ spin_lock_init(&qp->req.task.state_lock);
+ spin_lock_init(&qp->resp.task.state_lock);
+ spin_lock_init(&qp->comp.task.state_lock);
+
+ spin_lock_init(&qp->sq.sq_lock);
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
+ skb_queue_head_init(&qp->req_pkts);
+ skb_queue_head_init(&qp->resp_pkts);
+
atomic_set(&qp->ssn, 0);
atomic_set(&qp->skb_out, 0);
}
@@ -268,13 +279,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->req.opcode = -1;
qp->comp.opcode = -1;
- spin_lock_init(&qp->sq.sq_lock);
- skb_queue_head_init(&qp->req_pkts);
-
- rxe_init_task(rxe, &qp->req.task, qp,
- rxe_requester, "req");
- rxe_init_task(rxe, &qp->comp.task, qp,
- rxe_completer, "comp");
+ rxe_init_task(&qp->req.task, qp, rxe_requester);
+ rxe_init_task(&qp->comp.task, qp, rxe_completer);
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
if (init->qp_type == IB_QPT_RC) {
@@ -318,13 +324,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
}
}
- spin_lock_init(&qp->rq.producer_lock);
- spin_lock_init(&qp->rq.consumer_lock);
-
- skb_queue_head_init(&qp->resp_pkts);
-
- rxe_init_task(rxe, &qp->resp.task, qp,
- rxe_responder, "resp");
+ rxe_init_task(&qp->resp.task, qp, rxe_responder);
qp->resp.opcode = OPCODE_NONE;
qp->resp.msn = 0;
@@ -801,7 +801,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */
- __rxe_do_task(&qp->req.task);
+ if (qp->req.task.func)
+ __rxe_do_task(&qp->req.task);
+
if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task);
@@ -836,13 +838,15 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
qp->resp.mr = NULL;
}
- if (qp_type(qp) == IB_QPT_RC)
- sk_dst_reset(qp->sk->sk);
-
free_rd_atomic_resources(qp);
- kernel_sock_shutdown(qp->sk, SHUT_RDWR);
- sock_release(qp->sk);
+ if (qp->sk) {
+ if (qp_type(qp) == IB_QPT_RC)
+ sk_dst_reset(qp->sk->sk);
+
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
+ }
}
/* called when the last reference to the qp is dropped */
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 08f05ac5f5d5..39b3adda1655 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -114,13 +114,10 @@ void rxe_do_task(unsigned long data)
task->ret = ret;
}
-int rxe_init_task(void *obj, struct rxe_task *task,
- void *arg, int (*func)(void *), char *name)
+int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *))
{
- task->obj = obj;
task->arg = arg;
task->func = func;
- snprintf(task->name, sizeof(task->name), "%s", name);
task->destroyed = false;
tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index 08ff42d451c6..ecd81b1d1a8c 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -46,14 +46,12 @@ enum {
* called again.
*/
struct rxe_task {
- void *obj;
struct tasklet_struct tasklet;
int state;
spinlock_t state_lock; /* spinlock for task state */
void *arg;
int (*func)(void *arg);
int ret;
- char name[16];
bool destroyed;
};
@@ -62,8 +60,7 @@ struct rxe_task {
* arg => parameter to pass to fcn
* fcn => function to call until it returns != 0
*/
-int rxe_init_task(void *obj, struct rxe_task *task,
- void *arg, int (*func)(void *), char *name);
+int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *));
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 3aed597103d3..8c4d868dd154 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -725,11 +725,11 @@ static int siw_proc_mpareply(struct siw_cep *cep)
enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
rv = siw_recv_mpa_rr(cep);
- if (rv != -EAGAIN)
- siw_cancel_mpatimer(cep);
if (rv)
goto out_err;
+ siw_cancel_mpatimer(cep);
+
rep = &cep->mpa.hdr;
if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
@@ -895,7 +895,8 @@ static int siw_proc_mpareply(struct siw_cep *cep)
}
out_err:
- siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+ if (rv != -EAGAIN)
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
return rv;
}
@@ -980,6 +981,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_put(cep);
new_cep->listen_cep = NULL;
if (rv) {
+ siw_cancel_mpatimer(new_cep);
siw_cep_set_free(new_cep);
goto error;
}
@@ -1104,9 +1106,12 @@ static void siw_cm_work_handler(struct work_struct *w)
/*
* Socket close before MPA request received.
*/
- siw_dbg_cep(cep, "no mpareq: drop listener\n");
- siw_cep_put(cep->listen_cep);
- cep->listen_cep = NULL;
+ if (cep->listen_cep) {
+ siw_dbg_cep(cep,
+ "no mpareq: drop listener\n");
+ siw_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ }
}
}
release_cep = 1;
@@ -1229,7 +1234,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
if (!cep)
goto out;
- siw_dbg_cep(cep, "state: %d\n", cep->state);
+ siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
+ cep->state, sk->sk_state);
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out;
switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE:
@@ -1524,7 +1533,6 @@ error:
cep->cm_id = NULL;
id->rem_ref(id);
- siw_cep_put(cep);
qp->cep = NULL;
siw_cep_put(cep);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index d8db3bee9da7..214714afacb7 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -56,8 +56,6 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
memset(wc, 0, sizeof(*wc));
wc->wr_id = cqe->id;
- wc->status = map_cqe_status[cqe->status].ib;
- wc->opcode = map_wc_opcode[cqe->opcode];
wc->byte_len = cqe->bytes;
/*
@@ -71,10 +69,32 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
wc->wc_flags = IB_WC_WITH_INVALIDATE;
}
wc->qp = cqe->base_qp;
+ wc->opcode = map_wc_opcode[cqe->opcode];
+ wc->status = map_cqe_status[cqe->status].ib;
siw_dbg_cq(cq,
"idx %u, type %d, flags %2x, id 0x%pK\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
cqe->flags, (void *)(uintptr_t)cqe->id);
+ } else {
+ /*
+ * A malicious user may set invalid opcode or
+ * status in the user mmapped CQE array.
+ * Sanity check and correct values in that case
+ * to avoid out-of-bounds access to global arrays
+ * for opcode and status mapping.
+ */
+ u8 opcode = cqe->opcode;
+ u16 status = cqe->status;
+
+ if (opcode >= SIW_NUM_OPCODES) {
+ opcode = 0;
+ status = SIW_WC_GENERAL_ERR;
+ } else if (status >= SIW_NUM_WC_STATUS) {
+ status = SIW_WC_GENERAL_ERR;
+ }
+ wc->opcode = map_wc_opcode[opcode];
+ wc->status = map_cqe_status[status].ib;
+
}
WRITE_ONCE(cqe->flags, 0);
cq->cq_get++;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index dbbf8c6c16d3..a462c2fc6f31 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -472,9 +472,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
dev_dbg(&netdev->dev, "siw: event %lu\n", event);
- if (dev_net(netdev) != &init_net)
- return NOTIFY_OK;
-
base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
if (!base_dev)
return NOTIFY_OK;
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 5f94c716301f..e8a1aa07f058 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -961,27 +961,28 @@ out:
static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
{
struct sk_buff *skb = srx->skb;
+ int avail = min(srx->skb_new, srx->fpdu_part_rem);
u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
__wsum crc_in, crc_own = 0;
siw_dbg_qp(qp, "expected %d, available %d, pad %u\n",
srx->fpdu_part_rem, srx->skb_new, srx->pad);
- if (srx->skb_new < srx->fpdu_part_rem)
- return -EAGAIN;
-
- skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem);
+ skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
- if (srx->mpa_crc_hd && srx->pad)
- crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
+ srx->skb_new -= avail;
+ srx->skb_offset += avail;
+ srx->skb_copied += avail;
+ srx->fpdu_part_rem -= avail;
- srx->skb_new -= srx->fpdu_part_rem;
- srx->skb_offset += srx->fpdu_part_rem;
- srx->skb_copied += srx->fpdu_part_rem;
+ if (srx->fpdu_part_rem)
+ return -EAGAIN;
if (!srx->mpa_crc_hd)
return 0;
+ if (srx->pad)
+ crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
/*
* CRC32 is computed, transmitted and received directly in NBO,
* so there's never a reason to convert byte order.
@@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
* completely received.
*/
if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) {
- bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR;
+ int hdrlen = iwarp_pktinfo[opcode].hdr_len;
- if (srx->skb_new < bytes)
- return -EAGAIN;
+ bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
skb_copy_bits(skb, srx->skb_offset,
(char *)c_hdr + srx->fpdu_part_rcvd, bytes);
@@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
srx->skb_new -= bytes;
srx->skb_offset += bytes;
srx->skb_copied += bytes;
+
+ if (srx->fpdu_part_rcvd < hdrlen)
+ return -EAGAIN;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 424918eb1cd4..42d03bd1622d 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
- return virt_to_page(paddr);
+ return virt_to_page((void *)(uintptr_t)paddr);
return NULL;
}
@@ -523,13 +523,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
kunmap(p);
}
} else {
- u64 va = sge->laddr + sge_off;
+ /*
+ * Cast to an uintptr_t to preserve all 64 bits
+ * in sge->laddr.
+ */
+ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
- page_array[seg] = virt_to_page(va & PAGE_MASK);
+ /*
+ * virt_to_page() takes a (void *) pointer
+ * so cast to a (void *) meaning it will be 64
+ * bits on a 64 bit platform and 32 bits on a
+ * 32 bit platform.
+ */
+ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(uintptr_t)va,
+ (void *)va,
plen);
}
@@ -538,7 +548,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
data_len -= plen;
fp_off = 0;
- if (++seg > (int)MAX_ARRAY) {
+ if (++seg >= (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index b9ca54e372b4..236f9efaa75c 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -694,13 +694,45 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
- struct siw_sqe sqe = {};
int rv = 0;
while (wr) {
- sqe.id = wr->wr_id;
- sqe.opcode = wr->opcode;
- rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
+ struct siw_sqe sqe = {};
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ sqe.opcode = SIW_OP_WRITE;
+ break;
+ case IB_WR_RDMA_READ:
+ sqe.opcode = SIW_OP_READ;
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ sqe.opcode = SIW_OP_READ_LOCAL_INV;
+ break;
+ case IB_WR_SEND:
+ sqe.opcode = SIW_OP_SEND;
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ sqe.opcode = SIW_OP_SEND_WITH_IMM;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ sqe.opcode = SIW_OP_SEND_REMOTE_INV;
+ break;
+ case IB_WR_LOCAL_INV:
+ sqe.opcode = SIW_OP_INVAL_STAG;
+ break;
+ case IB_WR_REG_MR:
+ sqe.opcode = SIW_OP_REG_MR;
+ break;
+ default:
+ rv = -EINVAL;
+ break;
+ }
+ if (!rv) {
+ sqe.id = wr->wr_id;
+ rv = siw_sqe_complete(qp, &sqe, 0,
+ SIW_WC_WR_FLUSH_ERR);
+ }
if (rv) {
if (bad_wr)
*bad_wr = wr;
@@ -1477,7 +1509,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
if (pbl->max_buf < num_sle) {
siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
- mem->pbl->max_buf, num_sle);
+ num_sle, pbl->max_buf);
return -ENOMEM;
}
for_each_sg(sl, slp, num_sle, i) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 69ecf37053a8..3c3cc6af0a1e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2171,6 +2171,14 @@ int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
rn->hca = hca;
+
+ rc = netif_set_real_num_tx_queues(dev, 1);
+ if (rc)
+ goto out;
+
+ rc = netif_set_real_num_rx_queues(dev, 1);
+ if (rc)
+ goto out;
}
priv->rn_ops = dev->netdev_ops;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b9e9562f5034..de82fb0cb1d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -543,21 +543,18 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
/* SM supports sendonly-fullmember, otherwise fallback to full-member */
rec.join_state = SENDONLY_FULLMEMBER_JOIN;
}
- spin_unlock_irq(&priv->lock);
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
- &rec, comp_mask, GFP_KERNEL,
+ &rec, comp_mask, GFP_ATOMIC,
ipoib_mcast_join_complete, mcast);
- spin_lock_irq(&priv->lock);
if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- spin_unlock_irq(&priv->lock);
complete(&mcast->done);
- spin_lock_irq(&priv->lock);
+ return ret;
}
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 5b05cf3837da..28e9b70844e4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -42,6 +42,11 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
[IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
};
+static unsigned int ipoib_get_max_num_queues(void)
+{
+ return min_t(unsigned int, num_possible_cpus(), 128);
+}
+
static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -173,6 +178,8 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.changelink = ipoib_changelink,
.get_size = ipoib_get_size,
.fill_info = ipoib_fill_info,
+ .get_num_rx_queues = ipoib_get_max_num_queues,
+ .get_num_tx_queues = ipoib_get_max_num_queues,
};
struct rtnl_link_ops *ipoib_get_link_ops(void)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 71268d61d2b8..6ff92dca2898 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -733,9 +733,13 @@ static int
isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ struct isert_np *isert_np = cma_id->context;
ib_drain_qp(isert_conn->qp);
+
+ mutex_lock(&isert_np->mutex);
list_del_init(&isert_conn->node);
+ mutex_unlock(&isert_np->mutex);
isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
@@ -2507,6 +2511,7 @@ isert_free_np(struct iscsi_np *np)
{
struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn, *n;
+ LIST_HEAD(drop_conn_list);
if (isert_np->cm_id)
rdma_destroy_id(isert_np->cm_id);
@@ -2526,7 +2531,7 @@ isert_free_np(struct iscsi_np *np)
node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
- isert_connect_release(isert_conn);
+ list_move_tail(&isert_conn->node, &drop_conn_list);
}
}
@@ -2537,11 +2542,16 @@ isert_free_np(struct iscsi_np *np)
node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
- isert_connect_release(isert_conn);
+ list_move_tail(&isert_conn->node, &drop_conn_list);
}
}
mutex_unlock(&isert_np->mutex);
+ list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
+ list_del_init(&isert_conn->node);
+ isert_connect_release(isert_conn);
+ }
+
np->np_context = NULL;
kfree(isert_np);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index b2861cd2087a..3f17bfd33eb5 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -63,9 +63,6 @@ enum {
SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
SRP_TSK_MGMT_SQ_SIZE,
- SRP_TAG_NO_REQ = ~0U,
- SRP_TAG_TSK_MGMT = 1U << 31,
-
SRP_MAX_PAGES_PER_MR = 512,
SRP_MAX_ADD_CDB_LEN = 16,
@@ -80,6 +77,11 @@ enum {
sizeof(struct srp_imm_buf),
};
+enum {
+ SRP_TAG_NO_REQ = ~0U,
+ SRP_TAG_TSK_MGMT = BIT(31),
+};
+
enum srp_target_state {
SRP_TARGET_SCANNING,
SRP_TARGET_LIVE,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2822ca5e8277..d03a4f2e006f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -213,12 +213,15 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
/**
* srpt_qp_event - QP event callback function
* @event: Description of the event that occurred.
- * @ch: SRPT RDMA channel.
+ * @ptr: SRPT RDMA channel.
*/
-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+static void srpt_qp_event(struct ib_event *event, void *ptr)
{
- pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
- event->event, ch, ch->sess_name, ch->state);
+ struct srpt_rdma_ch *ch = ptr;
+
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
switch (event->event) {
case IB_EVENT_COMM_EST:
@@ -1801,8 +1804,7 @@ retry:
}
qp_init->qp_context = (void *)ch;
- qp_init->event_handler
- = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->event_handler = srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -2005,8 +2007,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s because target %s_%d has been disabled\n",
- ch->sess_name,
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
dev_name(&sport->sdev->device->dev),
sport->port);
srpt_close_ch(ch);
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b2a68bc9f0b4..84b87526b7ba 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = {
{ 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
{ 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
+ { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
@@ -272,22 +273,22 @@ int iforce_init_device(struct device *parent, u16 bustype,
* Get device info.
*/
- if (!iforce_get_id_packet(iforce, 'M', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3)
input_dev->id.vendor = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n");
- if (!iforce_get_id_packet(iforce, 'P', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3)
input_dev->id.product = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n");
- if (!iforce_get_id_packet(iforce, 'B', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3)
iforce->device_memory.end = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n");
- if (!iforce_get_id_packet(iforce, 'N', buf, &len) || len < 2)
+ if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2)
ff_effects = buf[1];
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n");
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index f95a81b9fac7..2380546d7978 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce)
again:
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -64,7 +64,7 @@ again:
if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags))
goto again;
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
}
@@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio,
iforce_serio->cmd_response_len = iforce_serio->len;
/* Signal that command is done */
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
} else if (likely(iforce->type)) {
iforce_process_packet(iforce, iforce_serio->id,
iforce_serio->data_in,
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index ea58805c480f..cba92bd590a8 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce)
spin_lock_irqsave(&iforce->xmit_lock, flags);
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce)
XMIT_INC(iforce->xmit.tail, n);
if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_warn(&iforce_usb->intf->dev,
"usb_submit_urb failed %d\n", n);
+ iforce_clear_xmit_and_wake(iforce);
}
/* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
@@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb)
struct iforce *iforce = &iforce_usb->iforce;
if (urb->status) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n",
urb->status);
+ iforce_clear_xmit_and_wake(iforce);
return;
}
__iforce_usb_xmit(iforce);
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
}
static int iforce_usb_probe(struct usb_interface *intf,
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 6aa761ebbdf7..9ccb9107ccbe 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id,
response_data, response_len);
}
+static inline void iforce_clear_xmit_and_wake(struct iforce *iforce)
+{
+ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ wake_up_all(&iforce->wait);
+}
+
/* Public functions */
/* iforce-main.c */
int iforce_init_device(struct device *parent, u16 bustype,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ba101afcfc27..239471cf7e4c 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -112,11 +112,14 @@ static const struct xpad_device {
u8 xtype;
} xpad_device[] = {
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
+ { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
@@ -242,6 +245,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
@@ -249,6 +253,7 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+ { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -258,9 +263,10 @@ static const struct xpad_device {
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
- { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
+ { 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -322,6 +328,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
@@ -331,6 +338,14 @@ static const struct xpad_device {
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+ { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -416,7 +431,9 @@ static const signed short xpad_abs_triggers[] = {
static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
+ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One Controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
@@ -426,12 +443,14 @@ static const struct usb_device_id xpad_table[] = {
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */
+ XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
+ XPAD_XBOX360_VENDOR(0x11ff), /* PXN V900 */
XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
@@ -446,8 +465,12 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
+ XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
{ }
};
@@ -470,6 +493,9 @@ struct xboxone_init_packet {
}
+#define GIP_WIRED_INTF_DATA 0
+#define GIP_WIRED_INTF_AUDIO 1
+
/*
* This packet is required for all Xbox One pads with 2015
* or later firmware installed (or present from the factory).
@@ -1794,7 +1820,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
}
if (xpad->xtype == XTYPE_XBOXONE &&
- intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+ intf->cur_altsetting->desc.bInterfaceNumber != GIP_WIRED_INTF_DATA) {
/*
* The Xbox One controller lists three interfaces all with the
* same interface class, subclass and protocol. Differentiate by
@@ -1964,7 +1990,6 @@ static struct usb_driver xpad_driver = {
.disconnect = xpad_disconnect,
.suspend = xpad_suspend,
.resume = xpad_resume,
- .reset_resume = xpad_resume,
.id_table = xpad_table,
};
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 7e3eae54c192..cb7f9b46ce65 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -715,6 +715,44 @@ static void atkbd_deactivate(struct atkbd *atkbd)
ps2dev->serio->phys);
}
+#ifdef CONFIG_X86
+static bool atkbd_is_portable_device(void)
+{
+ static const char * const chassis_types[] = {
+ "8", /* Portable */
+ "9", /* Laptop */
+ "10", /* Notebook */
+ "14", /* Sub-Notebook */
+ "31", /* Convertible */
+ "32", /* Detachable */
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(chassis_types); i++)
+ if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i]))
+ return true;
+
+ return false;
+}
+
+/*
+ * On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops
+ * the controller is always in translated mode. In this mode mice/touchpads will
+ * not work. So in this case simply assume a keyboard is connected to avoid
+ * confusing some laptop keyboards.
+ *
+ * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using the standard
+ * 0xab83 id is ok in translated mode, only atkbd_select_set() checks atkbd->id
+ * and in translated mode that is a no-op.
+ */
+static bool atkbd_skip_getid(struct atkbd *atkbd)
+{
+ return atkbd->translated && atkbd_is_portable_device();
+}
+#else
+static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; }
+#endif
+
/*
* atkbd_probe() probes for an AT keyboard on a serio port.
*/
@@ -736,6 +774,11 @@ static int atkbd_probe(struct atkbd *atkbd)
"keyboard reset failed on %s\n",
ps2dev->serio->phys);
+ if (atkbd_skip_getid(atkbd)) {
+ atkbd->id = 0xab83;
+ return 0;
+ }
+
/*
* Then we check the keyboard ID. We should get 0xab83 under normal conditions.
* Some keyboards report different values, but the first byte is always 0xab or
@@ -747,9 +790,9 @@ static int atkbd_probe(struct atkbd *atkbd)
if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
/*
- * If the get ID command failed, we check if we can at least set the LEDs on
- * the keyboard. This should work on every keyboard out there. It also turns
- * the LEDs off, which we want anyway.
+ * If the get ID command failed, we check if we can at least set
+ * the LEDs on the keyboard. This should work on every keyboard out there.
+ * It also turns the LEDs off, which we want anyway.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 465eecfa6b3f..825fff00e4f8 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -325,12 +325,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
error = devm_gpio_request_one(dev, button->gpio,
flags, button->desc ? : DRV_NAME);
- if (error) {
- dev_err(dev,
- "unable to claim gpio %u, err=%d\n",
- button->gpio, error);
- return error;
- }
+ if (error)
+ return dev_err_probe(dev, error,
+ "unable to claim gpio %u\n",
+ button->gpio);
bdata->gpiod = gpio_to_desc(button->gpio);
if (!bdata->gpiod) {
diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c
index e3f9e445e880..fe5a9c54ad58 100644
--- a/drivers/input/keyboard/ipaq-micro-keys.c
+++ b/drivers/input/keyboard/ipaq-micro-keys.c
@@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev)
keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
keys->input->keycodesize * keys->input->keycodemax,
GFP_KERNEL);
+ if (!keys->codes)
+ return -ENOMEM;
+
keys->input->keycode = keys->codes;
__set_bit(EV_KEY, keys->input->evbit);
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 5fe7a5633e33..dbe836c7ff47 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -46,7 +46,7 @@ struct omap_kp {
unsigned short keymap[];
};
-static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0);
+static DECLARE_TASKLET_DISABLED_OLD(kp_tasklet, omap_kp_tasklet);
static unsigned int *row_gpios;
static unsigned int *col_gpios;
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 4cc4e8ff42b3..ad035c342cd3 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -811,8 +811,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
AC_WRITE(ac, POWER_CTL, 0);
err = request_threaded_irq(ac->irq, NULL, adxl34x_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- dev_name(dev), ac);
+ IRQF_ONESHOT, dev_name(dev), ac);
if (err) {
dev_err(dev, "irq %d busy?\n", ac->irq);
goto err_free_mem;
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 79d7fa710a71..54002d1a446b 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -435,6 +435,7 @@ static int drv260x_init(struct drv260x_data *haptics)
}
do {
+ usleep_range(15000, 15500);
error = regmap_read(haptics->regmap, DRV260X_GO, &cal_buf);
if (error) {
dev_err(&haptics->client->dev,
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index c4e0e1886061..6b1b95d58e6b 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -425,6 +425,7 @@ static void powermate_disconnect(struct usb_interface *intf)
pm->requires_update = 0;
usb_kill_urb(pm->irq);
input_unregister_device(pm->input);
+ usb_kill_urb(pm->config);
usb_free_urb(pm->irq);
usb_free_urb(pm->config);
powermate_free_buffers(interface_to_usbdev(intf), pm);
diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
index 3fb64dbda1a2..76873aa005b4 100644
--- a/drivers/input/misc/rk805-pwrkey.c
+++ b/drivers/input/misc/rk805-pwrkey.c
@@ -98,6 +98,7 @@ static struct platform_driver rk805_pwrkey_driver = {
};
module_platform_driver(rk805_pwrkey_driver);
+MODULE_ALIAS("platform:rk805-pwrkey");
MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>");
MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 34700eda0429..3aaebadb2e13 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -852,8 +852,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
x = y = z = 0;
/* Divide 4 since trackpoint's speed is too fast */
- input_report_rel(dev2, REL_X, (char)x / 4);
- input_report_rel(dev2, REL_Y, -((char)y / 4));
+ input_report_rel(dev2, REL_X, (s8)x / 4);
+ input_report_rel(dev2, REL_Y, -((s8)y / 4));
psmouse_report_standard_buttons(dev2, packet[3]);
@@ -1104,8 +1104,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
((packet[3] & 0x20) << 1);
z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
- input_report_rel(dev2, REL_X, (char)x);
- input_report_rel(dev2, REL_Y, -((char)y));
+ input_report_rel(dev2, REL_X, (s8)x);
+ input_report_rel(dev2, REL_Y, -((s8)y));
input_report_abs(dev2, ABS_PRESSURE, z);
psmouse_report_standard_buttons(dev2, packet[1]);
@@ -2294,20 +2294,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
if (reg < 0)
return reg;
- x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
- y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
+ y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
if (reg < 0)
return reg;
- x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_electrode = 17 + x_electrode;
- y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
+ y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
y_electrode = 13 + y_electrode;
x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 3e78c2602581..9ff89bfda7a2 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -674,10 +674,11 @@ static void process_packet_head_v4(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
- int id = ((packet[3] & 0xe0) >> 5) - 1;
+ int id;
int pres, traces;
- if (id < 0)
+ id = ((packet[3] & 0xe0) >> 5) - 1;
+ if (id < 0 || id >= ETP_MAX_FINGERS)
return;
etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2];
@@ -707,7 +708,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
int id, sid;
id = ((packet[0] & 0xe0) >> 5) - 1;
- if (id < 0)
+ if (id < 0 || id >= ETP_MAX_FINGERS)
return;
sid = ((packet[3] & 0xe0) >> 5) - 1;
@@ -728,7 +729,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
- if (sid >= 0) {
+ if (sid >= 0 && sid < ETP_MAX_FINGERS) {
etd->mt[sid].x += delta_x2 * weight;
etd->mt[sid].y -= delta_y2 * weight;
input_mt_slot(dev, sid);
@@ -2113,6 +2114,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
psmouse->protocol_handler = elantech_process_byte;
psmouse->disconnect = elantech_disconnect;
psmouse->reconnect = elantech_reconnect;
+ psmouse->fast_reconnect = NULL;
psmouse->pktsize = info->hw_version > 1 ? 6 : 4;
return 0;
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 6fd5fff0cbff..c74b99077d16 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -202,8 +202,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
state->pressed = packet[0] >> 7;
finger1 = ((packet[0] >> 4) & 0x7) - 1;
if (finger1 < FOC_MAX_FINGERS) {
- state->fingers[finger1].x += (char)packet[1];
- state->fingers[finger1].y += (char)packet[2];
+ state->fingers[finger1].x += (s8)packet[1];
+ state->fingers[finger1].y += (s8)packet[2];
} else {
psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
finger1);
@@ -218,8 +218,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
*/
finger2 = ((packet[3] >> 4) & 0x7) - 1;
if (finger2 < FOC_MAX_FINGERS) {
- state->fingers[finger2].x += (char)packet[4];
- state->fingers[finger2].y += (char)packet[5];
+ state->fingers[finger2].x += (s8)packet[4];
+ state->fingers[finger2].y += (s8)packet[5];
}
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 4b81b2d0fe06..9cfd2c1d4e3a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1617,6 +1617,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse,
psmouse->set_rate = synaptics_set_rate;
psmouse->disconnect = synaptics_disconnect;
psmouse->reconnect = synaptics_reconnect;
+ psmouse->fast_reconnect = NULL;
psmouse->cleanup = synaptics_reset;
/* Synaptics can usually stay in sync without extra help */
psmouse->resync_time = 0;
@@ -1746,6 +1747,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
!SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10);
const struct rmi_device_platform_data pdata = {
+ .reset_delay_ms = 30,
.sensor_pdata = {
.sensor_type = rmi_sensor_touchpad,
.axis_align.flip_y = true,
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index af706a583656..1e3aaff310c9 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -276,11 +276,11 @@ void rmi_unregister_function(struct rmi_function *fn)
device_del(&fn->dev);
of_node_put(fn->dev.of_node);
- put_device(&fn->dev);
for (i = 0; i < fn->num_of_irqs; i++)
irq_dispose_mapping(fn->irq[i]);
+ put_device(&fn->dev);
}
/**
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index 2407ea43de59..f38bf9a5f599 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -235,12 +235,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
{
- int retval;
+ struct i2c_client *client = rmi_smb->client;
+ int smbus_version;
+
+ /*
+ * psmouse driver resets the controller, we only need to wait
+ * to give the firmware chance to fully reinitialize.
+ */
+ if (rmi_smb->xport.pdata.reset_delay_ms)
+ msleep(rmi_smb->xport.pdata.reset_delay_ms);
/* we need to get the smbus version to activate the touchpad */
- retval = rmi_smb_get_version(rmi_smb);
- if (retval < 0)
- return retval;
+ smbus_version = rmi_smb_get_version(rmi_smb);
+ if (smbus_version < 0)
+ return smbus_version;
+
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
+ smbus_version);
+
+ if (smbus_version != 2 && smbus_version != 3) {
+ dev_err(&client->dev, "Unrecognized SMB version %d\n",
+ smbus_version);
+ return -ENODEV;
+ }
return 0;
}
@@ -253,11 +270,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
rmi_smb_clear_state(rmi_smb);
/*
- * we do not call the actual reset command, it has to be handled in
- * PS/2 or there will be races between PS/2 and SMBus.
- * PS/2 should ensure that a psmouse_reset is called before
- * intializing the device and after it has been removed to be in a known
- * state.
+ * We do not call the actual reset command, it has to be handled in
+ * PS/2 or there will be races between PS/2 and SMBus. PS/2 should
+ * ensure that a psmouse_reset is called before initializing the
+ * device and after it has been removed to be in a known state.
*/
return rmi_smb_enable_smbus_mode(rmi_smb);
}
@@ -273,7 +289,6 @@ static int rmi_smb_probe(struct i2c_client *client,
{
struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
struct rmi_smb_xport *rmi_smb;
- int smbus_version;
int error;
if (!pdata) {
@@ -312,18 +327,9 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_smb->xport.proto_name = "smb";
rmi_smb->xport.ops = &rmi_smb_ops;
- smbus_version = rmi_smb_get_version(rmi_smb);
- if (smbus_version < 0)
- return smbus_version;
-
- rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
- smbus_version);
-
- if (smbus_version != 2 && smbus_version != 3) {
- dev_err(&client->dev, "Unrecognized SMB version %d\n",
- smbus_version);
- return -ENODEV;
- }
+ error = rmi_smb_enable_smbus_mode(rmi_smb);
+ if (error)
+ return error;
i2c_set_clientdata(client, rmi_smb);
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 4c039e4125d9..d36e89d6fc54 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -77,7 +77,7 @@ static struct timer_list hil_mlcs_kicker;
static int hil_mlcs_probe, hil_mlc_stop;
static void hil_mlcs_process(unsigned long unused);
-static DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0);
+static DECLARE_TASKLET_DISABLED_OLD(hil_mlcs_tasklet, hil_mlcs_process);
/* #define HIL_MLC_DEBUG */
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 0282c4c55e9d..4ae96bd56253 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -67,25 +67,84 @@ static inline void i8042_write_command(int val)
#include <linux/dmi.h>
-static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+#define SERIO_QUIRK_NOKBD BIT(0)
+#define SERIO_QUIRK_NOAUX BIT(1)
+#define SERIO_QUIRK_NOMUX BIT(2)
+#define SERIO_QUIRK_FORCEMUX BIT(3)
+#define SERIO_QUIRK_UNLOCK BIT(4)
+#define SERIO_QUIRK_PROBE_DEFER BIT(5)
+#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
+#define SERIO_QUIRK_RESET_NEVER BIT(7)
+#define SERIO_QUIRK_DIECT BIT(8)
+#define SERIO_QUIRK_DUMBKBD BIT(9)
+#define SERIO_QUIRK_NOLOOP BIT(10)
+#define SERIO_QUIRK_NOTIMEOUT BIT(11)
+#define SERIO_QUIRK_KBDRESET BIT(12)
+#define SERIO_QUIRK_DRITEK BIT(13)
+#define SERIO_QUIRK_NOPNP BIT(14)
+
+/* Quirk table for different mainboards. Options similar or identical to i8042
+ * module parameters.
+ * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored.
+ * This allows entries to overwrite vendor wide quirks on a per device basis.
+ * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR
+ * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries.
+ */
+static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
{
- /*
- * Arima-Rioworks HDAMB -
- * AUX LOOP command does not raise AUX IRQ
- */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
- DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
- DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_BOARD_NAME, "G1S"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ /* Asus X450LCP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UX425UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
},
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UM325UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ /*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
},
{
/* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
@@ -94,585 +153,705 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
+ /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "G1S"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Acer Aspire 5710 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Embedded Box PC 3000 */
+ /* Acer Aspire 7738 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* OQO Model 01 */
+ /* Acer Aspire 5536 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ULI EV4873 - AUX LOOP does not work properly */
+ /*
+ * Acer Aspire 5738z
+ * Touchpad stops working in mux mode when dis- + re-enabled
+ * with the touchpad enable/disable toggle hotkey
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Microsoft Virtual Machine */
+ /* Acer Aspire One 150 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Medion MAM 2070 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "blue"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M1022M netbook */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
- DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Wistron based laptops need us to explicitly enable the 'Dritek
+ * keyboard extension' to make their extra keys start generating scancodes.
+ * Originally, this was just confined to older laptops, but a few Acer laptops
+ * have turned up in 2007 that also need this again.
+ */
{
- /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
+ /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
- { }
-};
-
-/*
- * Some Fujitsu notebooks are having trouble with touchpads if
- * active multiplexing mode is activated. Luckily they don't have
- * external PS/2 ports so we can safely disable it.
- * ... apparently some Toshibas don't like MUX mode either and
- * die horrible death on reboot.
- */
-static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{
- /* Fujitsu Lifebook P7010/P7010D */
+ /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P7010 */
+ /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P5020D */
+ /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S2000 */
+ /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S6230 */
+ /* Acer TravelMate 2490 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Acer TravelMate 4280 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook U745 */
+ /* Acer TravelMate P459-G2-M */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu T70H */
+ /* Amoi M636/A737 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Lifebook E4010 */
+ /* Compal HEL80I */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * No data is coming from the touchscreen unless KBC
- * is in legacy mode.
- */
- /* Panasonic CF-29 */
+ /* Advent 4211 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
+ DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /*
- * HP Pavilion DV4017EA -
- * errors on MUX ports are reported without raising AUXDATA
- * causing "spurious NAK" messages.
- */
+ /* Dell Embedded Box PC 3000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * HP Pavilion ZT1000 -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell XPS M1530 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * HP Pavilion DV4270ca -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell Vostro 1510 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
+ /* Dell Vostro 1320 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1520 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Sharp Actius MM20 */
+ /* Entroware Proteus */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Fujitsu notebooks are having trouble with touchpads if
+ * active multiplexing mode is activated. Luckily they don't have
+ * external PS/2 ports so we can safely disable it.
+ * ... apparently some Toshibas don't like MUX mode either and
+ * die horrible death on reboot.
+ */
{
- /* Sony Vaio FS-115b */
+ /* Fujitsu Lifebook P7010/P7010D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Sony Vaio FZ-240E -
- * reset and GET ID commands issued via KBD port are
- * sometimes being delivered to AUX3.
- */
+ /* Fujitsu Lifebook P5020D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Most (all?) VAIOs do not have external PS/2 ports nor
- * they implement active multiplexing properly, and
- * MUX discovery usually messes up keyboard/touchpad.
- */
+ /* Fujitsu Lifebook S2000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Amoi M636/A737 */
+ /* Fujitsu Lifebook S6230 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Lenovo 3000 n100 */
+ /* Fujitsu Lifebook T725 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Lenovo XiaoXin Air 12 */
+ /* Fujitsu Lifebook U745 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Fujitsu T70H */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5710 */
+ /* Fujitsu A544 laptop */
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Acer Aspire 7738 */
+ /* Fujitsu AH544 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Gericom Bellagio */
+ /* Fujitsu U574 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* IBM 2656 */
+ /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
- DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Dell XPS M1530 */
+ /* Fujitsu Lifebook P7010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Compal HEL80I */
+ /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1510 */
+ /* Fujitsu-Siemens Lifebook E4010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5536 */
+ /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro V13 */
+ /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Newer HP Pavilion dv4 models */
+ /* Fujitsu Lifebook A574/H */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Asus X450LCP */
+ /* Fujitsu Lifebook E5411 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
},
{
- /* Avatar AVIU-145A6 */
+ /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* TUXEDO BU1406 */
+ /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo LaVie Z */
+ /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * Acer Aspire 5738z
- * Touchpad stops working in mux mode when dis- + re-enabled
- * with the touchpad enable/disable toggle hotkey
- */
+ /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Entroware Proteus */
+ /* Gigabyte P35 v2 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Gigabyte P34 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Gigabyte P57 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Gericom Bellagio */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /* Gigabyte M1022M netbook */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
- { }
-};
-
-static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
{
/*
- * Sony Vaio VGN-CS series require MUX or the touch sensor
- * buttons will disturb touchpad operation
+ * HP Pavilion DV4017EA -
+ * errors on MUX ports are reported without raising AUXDATA
+ * causing "spurious NAK" messages.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-/*
- * On some Asus laptops, just running self tests cause problems.
- */
-static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
{
+ /*
+ * HP Pavilion ZT1000 -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
},
- }, {
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * HP Pavilion DV4270ca -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
- /* MSI Wind U-100 */
+ /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* LG Electronics X110 */
+ /* IBM 2656 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "X110"),
- DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire One 150 */
+ /* Avatar AVIU-145A6 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Intel MBO Desktop D845PESV */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /*
+ * Intel NUC D54250WYK - does not have i8042 controller but
+ * declares PS/2 devices in DSDT.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /* Lenovo 3000 n100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo XiaoXin Air 12 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo LaVie Z */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo Ideapad U455 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Lenovo ThinkPad L460 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Advent 4211 */
+ /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
+ },
+ {
+ /* LG Electronics X110 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "X110"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya Mini E1210 */
@@ -680,6 +859,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya E1222 */
@@ -687,48 +867,62 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Mivvy M310 */
+ /* MSI Wind U-100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOPNP)
},
{
- /* Dell Vostro 1320 */
+ /*
+ * No data is coming from the touchscreen unless KBC
+ * is in legacy mode.
+ */
+ /* Panasonic CF-29 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1520 */
+ /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Dell Vostro 1720 */
+ /* Microsoft Virtual Machine */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo Ideapad U455 */
+ /* Medion MAM 2070 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad L460 */
+ /* TUXEDO BU1406 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
@@ -736,282 +930,331 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Lenovo ThinkPad Twist S230u */
+ /* OQO Model 01 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Entroware Proteus */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
- { }
-};
-
-#ifdef CONFIG_PNP
-static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
{
- /* Intel MBO Desktop D845PESV */
+ /* Acer Aspire 5 A515 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
+ },
+ {
+ /* ULI EV4873 - AUX LOOP does not work properly */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
/*
- * Intel NUC D54250WYK - does not have i8042 controller but
- * declares PS/2 devices in DSDT.
+ * Arima-Rioworks HDAMB -
+ * AUX LOOP command does not raise AUX IRQ
*/
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
+ DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* MSI Wind U-100 */
+ /* Sharp Actius MM20 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5 A515 */
+ /*
+ * Sony Vaio FZ-240E -
+ * reset and GET ID commands issued via KBD port are
+ * sometimes being delivered to AUX3.
+ */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
{
+ /*
+ * Most (all?) VAIOs do not have external PS/2 ports nor
+ * they implement active multiplexing properly, and
+ * MUX discovery usually messes up keyboard/touchpad.
+ */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Sony Vaio FS-115b */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /*
+ * Sony Vaio VGN-CS series require MUX or the touch sensor
+ * buttons will disturb touchpad operation
+ */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_FORCEMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-#endif
-
-static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
{
- /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them. These two are based on a Clevo design, but have the
+ * board_name changed.
+ */
{
- /* Fujitsu A544 laptop */
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu AH544 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Mivvy M310 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Fujitsu U574 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Fujitsu UH554 laptop */
+ /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
- { }
-};
-
-/*
- * Some Wistron based laptops need us to explicitly enable the 'Dritek
- * keyboard extension' to make their extra keys start generating scancodes.
- * Originally, this was just confined to older laptops, but a few Acer laptops
- * have turned up in 2007 that also need this again.
- */
-static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them.
+ * Clevo barebones come with board_vendor and/or system_vendor set to
+ * either the very generic string "Notebook" and/or a different value
+ * for each individual reseller. The only somewhat universal way to
+ * identify them is by board_name.
+ */
{
- /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ /*
+ * At least one modern Clevo barebone has the touchpad connected both
+ * via PS/2 and i2c interface. This causes a race condition between the
+ * psmouse and i2c-hid driver. Since the full capability of the touchpad
+ * is available via the i2c interface and the device has no external
+ * PS/2 port, it is safe to just ignore all ps2 mouses here to avoid
+ * this issue. The known affected device is the
+ * TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU which comes with one of
+ * the two different dmi strings below. NS50MU is not a typo!
+ */
{
- /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 2490 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
},
{
- /* Acer TravelMate 4280 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
+ DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some laptops need keyboard reset before probing for the trackpad to get
- * it detected, initialised & finally work.
- */
-static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{
- /* Gigabyte P35 v2 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- {
- /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ {
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ /* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
{
- /* Gigabyte P34 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
},
{
- /* Gigabyte P57 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{ }
};
-static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+#ifdef CONFIG_PNP
+static const struct dmi_system_id i8042_dmi_laptop_table[] __initconst = {
{
- /* ASUS ZenBook UX425UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
},
{
- /* ASUS ZenBook UM325UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
},
},
{ }
};
+#endif
#endif /* CONFIG_X86 */
@@ -1166,11 +1409,6 @@ static int __init i8042_pnp_init(void)
bool pnp_data_busted = false;
int err;
-#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_nopnp_table))
- i8042_nopnp = true;
-#endif
-
if (i8042_nopnp) {
pr_info("PNP detection disabled\n");
return 0;
@@ -1274,6 +1512,59 @@ static inline int i8042_pnp_init(void) { return 0; }
static inline void i8042_pnp_exit(void) { }
#endif /* CONFIG_PNP */
+
+#ifdef CONFIG_X86
+static void __init i8042_check_quirks(void)
+{
+ const struct dmi_system_id *device_quirk_info;
+ uintptr_t quirks;
+
+ device_quirk_info = dmi_first_match(i8042_dmi_quirk_table);
+ if (!device_quirk_info)
+ return;
+
+ quirks = (uintptr_t)device_quirk_info->driver_data;
+
+ if (quirks & SERIO_QUIRK_NOKBD)
+ i8042_nokbd = true;
+ if (quirks & SERIO_QUIRK_NOAUX)
+ i8042_noaux = true;
+ if (quirks & SERIO_QUIRK_NOMUX)
+ i8042_nomux = true;
+ if (quirks & SERIO_QUIRK_FORCEMUX)
+ i8042_nomux = false;
+ if (quirks & SERIO_QUIRK_UNLOCK)
+ i8042_unlock = true;
+ if (quirks & SERIO_QUIRK_PROBE_DEFER)
+ i8042_probe_defer = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (quirks & SERIO_QUIRK_RESET_ALWAYS)
+ i8042_reset = I8042_RESET_ALWAYS;
+ if (quirks & SERIO_QUIRK_RESET_NEVER)
+ i8042_reset = I8042_RESET_NEVER;
+ }
+ if (quirks & SERIO_QUIRK_DIECT)
+ i8042_direct = true;
+ if (quirks & SERIO_QUIRK_DUMBKBD)
+ i8042_dumbkbd = true;
+ if (quirks & SERIO_QUIRK_NOLOOP)
+ i8042_noloop = true;
+ if (quirks & SERIO_QUIRK_NOTIMEOUT)
+ i8042_notimeout = true;
+ if (quirks & SERIO_QUIRK_KBDRESET)
+ i8042_kbdreset = true;
+ if (quirks & SERIO_QUIRK_DRITEK)
+ i8042_dritek = true;
+#ifdef CONFIG_PNP
+ if (quirks & SERIO_QUIRK_NOPNP)
+ i8042_nopnp = true;
+#endif
+}
+#else
+static inline void i8042_check_quirks(void) {}
+#endif
+
static int __init i8042_platform_init(void)
{
int retval;
@@ -1296,45 +1587,17 @@ static int __init i8042_platform_init(void)
i8042_kbd_irq = I8042_MAP_IRQ(1);
i8042_aux_irq = I8042_MAP_IRQ(12);
- retval = i8042_pnp_init();
- if (retval)
- return retval;
-
#if defined(__ia64__)
- i8042_reset = I8042_RESET_ALWAYS;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
-#ifdef CONFIG_X86
- /* Honor module parameter when value is not default */
- if (i8042_reset == I8042_RESET_DEFAULT) {
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = I8042_RESET_ALWAYS;
-
- if (dmi_check_system(i8042_dmi_noselftest_table))
- i8042_reset = I8042_RESET_NEVER;
- }
-
- if (dmi_check_system(i8042_dmi_noloop_table))
- i8042_noloop = true;
-
- if (dmi_check_system(i8042_dmi_nomux_table))
- i8042_nomux = true;
-
- if (dmi_check_system(i8042_dmi_forcemux_table))
- i8042_nomux = false;
-
- if (dmi_check_system(i8042_dmi_notimeout_table))
- i8042_notimeout = true;
-
- if (dmi_check_system(i8042_dmi_dritek_table))
- i8042_dritek = true;
+ i8042_check_quirks();
- if (dmi_check_system(i8042_dmi_kbdreset_table))
- i8042_kbdreset = true;
-
- if (dmi_check_system(i8042_dmi_probe_defer_table))
- i8042_probe_defer = true;
+ retval = i8042_pnp_init();
+ if (retval)
+ return retval;
+#ifdef CONFIG_X86
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index bb76ff2f6b1d..dc40f6099dcf 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1540,8 +1540,6 @@ static int i8042_probe(struct platform_device *dev)
{
int error;
- i8042_platform_device = dev;
-
if (i8042_reset == I8042_RESET_ALWAYS) {
error = i8042_controller_selftest();
if (error)
@@ -1579,7 +1577,6 @@ static int i8042_probe(struct platform_device *dev)
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
i8042_free_irqs();
i8042_controller_reset(false);
- i8042_platform_device = NULL;
return error;
}
@@ -1589,7 +1586,6 @@ static int i8042_remove(struct platform_device *dev)
i8042_unregister_ports();
i8042_free_irqs();
i8042_controller_reset(false);
- i8042_platform_device = NULL;
return 0;
}
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index e9647ebff187..1e4770094415 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -159,7 +159,7 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
{
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
- char uninitialized_var(c);
+ char c;
ssize_t read = 0;
int error;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index d247d0ae82d2..0506115211dd 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -790,14 +790,8 @@ static void ads7846_report_state(struct ads7846 *ts)
if (x == MAX_12BIT)
x = 0;
- if (ts->model == 7843) {
+ if (ts->model == 7843 || ts->model == 7845) {
Rt = ts->pressure_max / 2;
- } else if (ts->model == 7845) {
- if (get_pendown_state(ts))
- Rt = ts->pressure_max / 2;
- else
- Rt = 0;
- dev_vdbg(&ts->spi->dev, "x/y: %d/%d, PD %d\n", x, y, Rt);
} else if (likely(x && z1)) {
/* compute touch pressure resistance using equation #2 */
Rt = z2;
@@ -1376,8 +1370,9 @@ static int ads7846_probe(struct spi_device *spi)
pdata->y_min ? : 0,
pdata->y_max ? : MAX_12BIT,
0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE,
- pdata->pressure_min, pdata->pressure_max, 0, 0);
+ if (ts->model != 7845)
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ pdata->pressure_min, pdata->pressure_max, 0, 0);
/*
* Parse common framework properties. Must be done here to ensure the
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index a51e7c85f581..4022816a4736 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1078,14 +1078,12 @@ static int elants_i2c_power_on(struct elants_data *ts)
if (IS_ERR_OR_NULL(ts->reset_gpio))
return 0;
- gpiod_set_value_cansleep(ts->reset_gpio, 1);
-
error = regulator_enable(ts->vcc33);
if (error) {
dev_err(&ts->client->dev,
"failed to enable vcc33 regulator: %d\n",
error);
- goto release_reset_gpio;
+ return error;
}
error = regulator_enable(ts->vccio);
@@ -1094,7 +1092,7 @@ static int elants_i2c_power_on(struct elants_data *ts)
"failed to enable vccio regulator: %d\n",
error);
regulator_disable(ts->vcc33);
- goto release_reset_gpio;
+ return error;
}
/*
@@ -1103,7 +1101,6 @@ static int elants_i2c_power_on(struct elants_data *ts)
*/
udelay(ELAN_POWERON_DELAY_USEC);
-release_reset_gpio:
gpiod_set_value_cansleep(ts->reset_gpio, 0);
if (error)
return error;
@@ -1211,7 +1208,7 @@ static int elants_i2c_probe(struct i2c_client *client,
return error;
}
- ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
+ ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ts->reset_gpio)) {
error = PTR_ERR(ts->reset_gpio);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 3c9cdb87770f..f60466c27099 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -170,10 +170,18 @@ static const struct dmi_system_id rotated_screen[] = {
static const struct dmi_system_id nine_bytes_report[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
- .ident = "Lenovo YogaBook",
- /* YB1-X91L/F and YB1-X90L/F */
+ /* Lenovo Yoga Book X90F / X90L */
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ }
+ },
+ {
+ /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
}
},
#endif
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 247c3aaba2d8..0588b8f04dac 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1453,7 +1453,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
"ce", GPIOD_OUT_LOW);
if (IS_ERR(ts->gpio_ce)) {
error = PTR_ERR(ts->gpio_ce);
- if (error != EPROBE_DEFER)
+ if (error != -EPROBE_DEFER)
dev_err(&client->dev,
"Failed to get gpio: %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index 69881265d121..8135a80226fd 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -137,7 +137,7 @@ static int rpi_ts_probe(struct platform_device *pdev)
return -ENOENT;
}
- fw = rpi_firmware_get(fw_node);
+ fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
of_node_put(fw_node);
if (!fw)
return -EPROBE_DEFER;
@@ -164,7 +164,6 @@ static int rpi_ts_probe(struct platform_device *pdev)
touchbuf = (u32)ts->fw_regs_phys;
error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
&touchbuf, sizeof(touchbuf));
-
if (error || touchbuf != 0) {
dev_warn(dev, "Failed to set touchbuf, %d\n", error);
return error;
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index e579b3633a84..8f6dfa6b6e4d 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -19,39 +19,13 @@
#include <linux/of.h>
#include <linux/overflow.h>
+#include "internal.h"
+
static DEFINE_IDR(icc_idr);
static LIST_HEAD(icc_providers);
static DEFINE_MUTEX(icc_lock);
static struct dentry *icc_debugfs_dir;
-/**
- * struct icc_req - constraints that are attached to each node
- * @req_node: entry in list of requests for the particular @node
- * @node: the interconnect node to which this constraint applies
- * @dev: reference to the device that sets the constraints
- * @tag: path tag (optional)
- * @avg_bw: an integer describing the average bandwidth in kBps
- * @peak_bw: an integer describing the peak bandwidth in kBps
- */
-struct icc_req {
- struct hlist_node req_node;
- struct icc_node *node;
- struct device *dev;
- u32 tag;
- u32 avg_bw;
- u32 peak_bw;
-};
-
-/**
- * struct icc_path - interconnect path structure
- * @num_nodes: number of hops (nodes)
- * @reqs: array of the requests applicable to this path of nodes
- */
-struct icc_path {
- size_t num_nodes;
- struct icc_req reqs[];
-};
-
static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
{
if (!n)
@@ -117,6 +91,7 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
path->reqs[i].node = node;
path->reqs[i].dev = dev;
+ path->reqs[i].enabled = true;
/* reference to previous node was saved during path traversal */
node = node->reverse;
}
@@ -201,6 +176,7 @@ static int aggregate_requests(struct icc_node *node)
{
struct icc_provider *p = node->provider;
struct icc_req *r;
+ u32 avg_bw, peak_bw;
node->avg_bw = 0;
node->peak_bw = 0;
@@ -208,9 +184,17 @@ static int aggregate_requests(struct icc_node *node)
if (p->pre_aggregate)
p->pre_aggregate(node);
- hlist_for_each_entry(r, &node->req_list, req_node)
- p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
+ hlist_for_each_entry(r, &node->req_list, req_node) {
+ if (r->enabled) {
+ avg_bw = r->avg_bw;
+ peak_bw = r->peak_bw;
+ } else {
+ avg_bw = 0;
+ peak_bw = 0;
+ }
+ p->aggregate(node, r->tag, avg_bw, peak_bw,
&node->avg_bw, &node->peak_bw);
+ }
return 0;
}
@@ -296,6 +280,9 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
}
mutex_unlock(&icc_lock);
+ if (!node)
+ return ERR_PTR(-EINVAL);
+
return node;
}
@@ -475,6 +462,39 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
}
EXPORT_SYMBOL_GPL(icc_set_bw);
+static int __icc_enable(struct icc_path *path, bool enable)
+{
+ int i;
+
+ if (!path)
+ return 0;
+
+ if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ for (i = 0; i < path->num_nodes; i++)
+ path->reqs[i].enabled = enable;
+
+ mutex_unlock(&icc_lock);
+
+ return icc_set_bw(path, path->reqs[0].avg_bw,
+ path->reqs[0].peak_bw);
+}
+
+int icc_enable(struct icc_path *path)
+{
+ return __icc_enable(path, true);
+}
+EXPORT_SYMBOL_GPL(icc_enable);
+
+int icc_disable(struct icc_path *path)
+{
+ return __icc_enable(path, false);
+}
+EXPORT_SYMBOL_GPL(icc_disable);
+
/**
* icc_get() - return a handle for path between two endpoints
* @dev: the device requesting the path
@@ -612,6 +632,10 @@ void icc_node_destroy(int id)
mutex_unlock(&icc_lock);
+ if (!node)
+ return;
+
+ kfree(node->links);
kfree(node);
}
EXPORT_SYMBOL_GPL(icc_node_destroy);
diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
new file mode 100644
index 000000000000..5c923c444f44
--- /dev/null
+++ b/drivers/interconnect/internal.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework internal structs
+ *
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_INTERNAL_H
+#define __DRIVERS_INTERCONNECT_INTERNAL_H
+
+/**
+ * struct icc_req - constraints that are attached to each node
+ * @req_node: entry in list of requests for the particular @node
+ * @node: the interconnect node to which this constraint applies
+ * @dev: reference to the device that sets the constraints
+ * @enabled: indicates whether the path with this request is enabled
+ * @tag: path tag (optional)
+ * @avg_bw: an integer describing the average bandwidth in kBps
+ * @peak_bw: an integer describing the peak bandwidth in kBps
+ */
+struct icc_req {
+ struct hlist_node req_node;
+ struct icc_node *node;
+ struct device *dev;
+ bool enabled;
+ u32 tag;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/**
+ * struct icc_path - interconnect path structure
+ * @num_nodes: number of hops (nodes)
+ * @reqs: array of the requests applicable to this path of nodes
+ */
+struct icc_path {
+ size_t num_nodes;
+ struct icc_req reqs[];
+};
+
+#endif
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c392930253a3..a30aac41af42 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3098,7 +3098,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot,
+ gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
@@ -3113,7 +3114,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
prot |= IOMMU_PROT_IW;
mutex_lock(&domain->api_lock);
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
mutex_unlock(&domain->api_lock);
domain_flush_np_cache(domain, iova, page_size);
@@ -4419,8 +4420,7 @@ int amd_iommu_activate_guest_mode(void *data)
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
- !entry || entry->lo.fields_vapic.guest_mode)
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
return 0;
entry->lo.val = 0;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 82d008310418..15e25f712f39 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -84,6 +84,10 @@
#define ACPI_DEVFLAG_ATSDIS 0x10000000
#define LOOP_TIMEOUT 2000000
+
+#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
+ | ((dev & 0x1f) << 3) | (fn & 0x7))
+
/*
* ACPI table definitions
*
@@ -1741,6 +1745,9 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
/* Prevent binding other PCI device drivers to IOMMU devices */
iommu->dev->match_driver = false;
+ /* ACPI _PRT won't have an IRQ for IOMMU */
+ iommu->dev->irq_managed = 1;
+
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
@@ -2971,24 +2978,32 @@ static int __init parse_amd_iommu_options(char *str)
static int __init parse_ivrs_ioapic(char *str)
{
- unsigned int bus, dev, fn;
- int ret, id, i;
- u16 devid;
+ u32 seg = 0, bus, dev, fn;
+ int id, i;
+ u32 devid;
- ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
+ goto found;
- if (ret != 4) {
- pr_err("Invalid command line: ivrs_ioapic%s\n", str);
- return 1;
+ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
+ pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
+ str, id, seg, bus, dev, fn);
+ goto found;
}
+ pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+
+found:
if (early_ioapic_map_size == EARLY_MAP_SIZE) {
pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
str);
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_ioapic_map_size++;
@@ -3001,24 +3016,32 @@ static int __init parse_ivrs_ioapic(char *str)
static int __init parse_ivrs_hpet(char *str)
{
- unsigned int bus, dev, fn;
- int ret, id, i;
- u16 devid;
+ u32 seg = 0, bus, dev, fn;
+ int id, i;
+ u32 devid;
- ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
+ goto found;
- if (ret != 4) {
- pr_err("Invalid command line: ivrs_hpet%s\n", str);
- return 1;
+ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
+ pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
+ str, id, seg, bus, dev, fn);
+ goto found;
}
+ pr_err("Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+
+found:
if (early_hpet_map_size == EARLY_MAP_SIZE) {
pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
str);
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_hpet_map_size++;
@@ -3029,19 +3052,53 @@ static int __init parse_ivrs_hpet(char *str)
return 1;
}
+#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
+
static int __init parse_ivrs_acpihid(char *str)
{
- u32 bus, dev, fn;
- char *hid, *uid, *p;
- char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
- int ret, i;
+ u32 seg = 0, bus, dev, fn;
+ char *hid, *uid, *p, *addr;
+ char acpiid[ACPIID_LEN] = {0};
+ int i;
- ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
- if (ret != 4) {
- pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
- return 1;
+ addr = strchr(str, '@');
+ if (!addr) {
+ addr = strchr(str, '=');
+ if (!addr)
+ goto not_found;
+
+ ++addr;
+
+ if (strlen(addr) > ACPIID_LEN)
+ goto not_found;
+
+ if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
+ sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
+ pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
+ str, acpiid, seg, bus, dev, fn);
+ goto found;
+ }
+ goto not_found;
}
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+ if (strlen(str) > ACPIID_LEN + 1)
+ goto not_found;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+ goto not_found;
+
+ if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
+ sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
+ goto found;
+
+not_found:
+ pr_err("Invalid command line: ivrs_acpihid%s\n", str);
+ return 1;
+
+found:
p = acpiid;
hid = strsep(&p, ":");
uid = p;
@@ -3051,11 +3108,17 @@ static int __init parse_ivrs_acpihid(char *str)
return 1;
}
+ /*
+ * Ignore leading zeroes after ':', so e.g., AMDI0095:00
+ * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
+ */
+ while (*uid == '0' && *(uid + 1))
+ uid++;
+
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
- early_acpihid_map[i].devid =
- ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
early_acpihid_map[i].cmd_line = true;
return 1;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 76e9d3e2f9f2..15eef44efd03 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -886,8 +886,8 @@ struct amd_ir_data {
*/
struct irq_cfg *cfg;
int ga_vector;
- int ga_root_ptr;
- int ga_tag;
+ u64 ga_root_ptr;
+ u32 ga_tag;
};
struct amd_irte_ops {
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 05f3d93cf480..db391dd779c0 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -591,6 +591,7 @@ out_drop_state:
put_device_state(dev_state);
out:
+ pci_dev_put(pdev);
return ret;
}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 02c2fb551f38..b3c5d7b4547a 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -760,6 +760,18 @@ static void queue_inc_cons(struct arm_smmu_ll_queue *q)
q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
}
+static void queue_sync_cons_ovf(struct arm_smmu_queue *q)
+{
+ struct arm_smmu_ll_queue *llq = &q->llq;
+
+ if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
+ return;
+
+ llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+ Q_IDX(llq, llq->cons);
+ queue_sync_cons_out(q);
+}
+
static int queue_sync_prod_in(struct arm_smmu_queue *q)
{
int ret = 0;
@@ -1720,8 +1732,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
} while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
- llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
- Q_IDX(llq, llq->cons);
+ queue_sync_cons_ovf(q);
return IRQ_HANDLED;
}
@@ -1779,9 +1790,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
} while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
- llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
- Q_IDX(llq, llq->cons);
- queue_sync_cons_out(q);
+ queue_sync_cons_ovf(q);
return IRQ_HANDLED;
}
@@ -2451,7 +2460,7 @@ out_unlock:
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2185ea5191c1..775acae84d7d 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1160,7 +1160,7 @@ rpm_put:
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9c3e630c6c4c..651054aa8710 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -477,7 +477,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
if (!iova)
return DMA_MAPPING_ERROR;
- if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
return DMA_MAPPING_ERROR;
}
@@ -612,7 +612,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
+ if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
@@ -660,7 +660,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_cpu(dev, phys, size, dir);
+ arch_sync_dma_for_cpu(phys, size, dir);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -672,7 +672,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
}
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@@ -686,7 +686,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -700,7 +700,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -715,7 +715,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
dma_handle =__iommu_dma_map(dev, phys, size, prot);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return dma_handle;
}
@@ -872,7 +872,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
+ if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 9f7c1e29e86a..36900d65386f 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -475,7 +475,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
if (drhd->reg_base_addr == rhsa->base_address) {
int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
- if (!node_online(node))
+ if (node != NUMA_NO_NODE && !node_online(node))
node = NUMA_NO_NODE;
drhd->iommu->node = node;
return 0;
@@ -794,6 +794,7 @@ int __init dmar_dev_scope_init(void)
info = dmar_alloc_pci_notify_info(dev,
BUS_NOTIFY_ADD_DEVICE);
if (!info) {
+ pci_dev_put(dev);
return dmar_dev_scope_status;
} else {
dmar_pci_bus_add_dev(info);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 55ed857f804f..acf21e423794 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -635,7 +635,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&data->iommu);
if (ret)
- return ret;
+ goto err_iommu_register;
platform_set_drvdata(pdev, data);
@@ -662,6 +662,10 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
return 0;
+
+err_iommu_register:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
}
static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
@@ -1073,7 +1077,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/
static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size,
- int prot)
+ int prot, gfp_t gfp)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index cde281b97afa..4dbecd14034a 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -1122,7 +1122,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
ret = create_csd(ppaact_phys, mem_size, csd_port_id);
if (ret) {
dev_err(dev, "could not create coherence subdomain\n");
- return ret;
+ goto error;
}
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a2a03df97704..2a53fa7d2b47 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2751,6 +2751,7 @@ static int __init si_domain_init(int hw)
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
+ si_domain = NULL;
return -EFAULT;
}
@@ -3371,6 +3372,10 @@ free_iommu:
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
+ if (si_domain) {
+ domain_exit(si_domain);
+ si_domain = NULL;
+ }
kfree(g_iommus);
@@ -5453,7 +5458,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
+ size_t size, int iommu_prot, gfp_t gfp)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index e7cb0b8a7332..9641eaa19e08 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -166,6 +166,9 @@ int intel_pasid_alloc_table(struct device *dev)
attach_out:
device_attach_pasid_table(info, pasid_table);
+ if (!ecap_coherent(info->iommu->ecap))
+ clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
+
return 0;
}
@@ -250,6 +253,10 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
WRITE_ONCE(dir[dir_index].val,
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT);
+ if (!ecap_coherent(info->iommu->ecap)) {
+ clflush_cache_range(entries, VTD_PAGE_SIZE);
+ clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
+ }
}
spin_unlock(&pasid_lock);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c5758fb696cc..ef8f0d1ac5ab 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1858,8 +1858,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -1896,8 +1896,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
+ ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
- ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
@@ -1917,8 +1917,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ might_sleep();
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map);
+int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_atomic);
+
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -1995,8 +2009,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
+size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
size_t len = 0, mapped = 0;
phys_addr_t start;
@@ -2007,7 +2022,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) {
- ret = iommu_map(domain, iova + mapped, start, len, prot);
+ ret = __iommu_map(domain, iova + mapped, start,
+ len, prot, gfp);
+
if (ret)
goto out_err;
@@ -2035,8 +2052,22 @@ out_err:
return 0;
}
+
+size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ might_sleep();
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map_sg);
+size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
+
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 584eefab1dbc..33874c7aea42 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -724,7 +724,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
}
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index cba0097eba39..696a6c53a69f 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -504,7 +504,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t len, int prot)
+ phys_addr_t pa, size_t len, int prot, gfp_t gfp)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 18d7c818a174..dad44e10b331 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -427,7 +427,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b5efd6dac953..fa602bfe6951 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
@@ -626,18 +626,34 @@ static int mtk_iommu_probe(struct platform_device *pdev)
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(&pdev->dev));
if (ret)
- return ret;
+ goto out_clk_unprepare;
iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
ret = iommu_device_register(&data->iommu);
if (ret)
- return ret;
+ goto out_sysfs_remove;
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ if (!iommu_present(&platform_bus_type)) {
+ ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+ if (ret)
+ goto out_dev_unreg;
+ }
- return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+ if (ret)
+ goto out_bus_set_null;
+ return ret;
+
+out_bus_set_null:
+ bus_set_iommu(&platform_bus_type, NULL);
+out_dev_unreg:
+ iommu_device_unregister(&data->iommu);
+out_sysfs_remove:
+ iommu_device_sysfs_remove(&data->iommu);
+out_clk_unprepare:
+ clk_disable_unprepare(data->bclk);
+ return ret;
}
static int mtk_iommu_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 614a93aa5305..656939ed842e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -121,7 +121,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
struct of_phandle_args iommu_spec = { .args_count = 1 };
int err;
- err = of_map_rid(info->np, alias, "iommu-map", "iommu-map-mask",
+ err = of_map_id(info->np, alias, "iommu-map", "iommu-map-mask",
&iommu_spec.np, iommu_spec.args);
if (err)
return err == -ENODEV ? NO_IOMMU : err;
@@ -137,7 +137,7 @@ static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev,
struct of_phandle_args iommu_spec = { .args_count = 1 };
int err;
- err = of_map_rid(master_np, mc_dev->icid, "iommu-map",
+ err = of_map_id(master_np, mc_dev->icid, "iommu-map",
"iommu-map-mask", &iommu_spec.np,
iommu_spec.args);
if (err)
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index a99afb5d9011..259f65291d90 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -32,12 +32,12 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
+ if (len < maxcol) \
+ goto out; \
+ bytes = scnprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
- if (len < maxcol) \
- goto out; \
} while (0)
static ssize_t
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 09c6e1c680db..be551cc34be4 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 280de92b332e..e377f7771e30 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -419,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
unsigned long flags;
@@ -785,9 +785,12 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
{
struct device_node *child;
- for_each_child_of_node(qcom_iommu->dev->of_node, child)
- if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
+ for_each_child_of_node(qcom_iommu->dev->of_node, child) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
+ of_node_put(child);
return true;
+ }
+ }
return false;
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 0df091934361..67a8c08d8210 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -758,7 +758,7 @@ unwind:
}
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -1230,18 +1230,20 @@ static int rk_iommu_probe(struct platform_device *pdev)
for (i = 0; i < iommu->num_irq; i++) {
int irq = platform_get_irq(pdev, i);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ err = irq;
+ goto err_pm_disable;
+ }
err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
IRQF_SHARED, dev_name(dev), iommu);
- if (err) {
- pm_runtime_disable(dev);
- goto err_remove_sysfs;
- }
+ if (err)
+ goto err_pm_disable;
}
return 0;
+err_pm_disable:
+ pm_runtime_disable(dev);
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
err_put_group:
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3b0b18e23187..1137f3ddcb85 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -265,7 +265,7 @@ undo_cpu_trans:
}
static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
int flags = ZPCI_PTE_VALID, rc = 0;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 3924f7c05544..3fb7ba72507d 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct gart_device *gart = gart_handle;
int ret;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index dd486233e282..576be3f245da 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -651,7 +651,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
}
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 60e659a24f90..37e2267acf29 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -715,7 +715,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
u32 flags;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 20f44ef9c4c9..bf3258f262af 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -286,8 +286,12 @@ config XTENSA_MX
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config XILINX_INTC
- bool
+ bool "Xilinx Interrupt Controller (IP core)"
select IRQ_DOMAIN
+ help
+ Support for the Xilinx Interrupt Controller which can be used
+ with MicroBlaze and Zynq. It is a secondary chained controller when
+ used with Zynq.
config IRQ_CROSSBAR
bool
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index ede02dc2bcd0..1819bb1d2723 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -199,6 +199,7 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
}
gic_domain = irq_find_host(gic_node);
+ of_node_put(gic_node);
if (!gic_domain) {
pr_err("Failed to find the GIC domain\n");
return -ENXIO;
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index 1bd0621c4ce2..4827a1183247 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -82,6 +82,7 @@ struct bcm6345_l1_chip {
};
struct bcm6345_l1_cpu {
+ struct bcm6345_l1_chip *intc;
void __iomem *map_base;
unsigned int parent_irq;
u32 enable_cache[];
@@ -115,17 +116,11 @@ static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
static void bcm6345_l1_irq_handle(struct irq_desc *desc)
{
- struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
- struct bcm6345_l1_cpu *cpu;
+ struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
+ struct bcm6345_l1_chip *intc = cpu->intc;
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int idx;
-#ifdef CONFIG_SMP
- cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
-#else
- cpu = intc->cpus[0];
-#endif
-
chained_irq_enter(chip, desc);
for (idx = 0; idx < intc->n_words; idx++) {
@@ -257,6 +252,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
if (!cpu)
return -ENOMEM;
+ cpu->intc = intc;
cpu->map_base = ioremap(res.start, sz);
if (!cpu->map_base)
return -ENOMEM;
@@ -272,7 +268,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
return -EINVAL;
}
irq_set_chained_handler_and_data(cpu->parent_irq,
- bcm6345_l1_irq_handle, intc);
+ bcm6345_l1_irq_handle, cpu);
return 0;
}
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 586df3587be0..d308bbe6f528 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -268,7 +268,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
flags |= IRQ_GC_BE_IO;
ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
- dn->full_name, handle_level_irq, clr, 0, flags);
+ dn->full_name, handle_level_irq, clr,
+ IRQ_LEVEL, flags);
if (ret) {
pr_err("failed to allocate generic irq chip\n");
goto out_free_domain;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 0298ede67e51..fea64bf960c2 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -2,7 +2,7 @@
/*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
*
- * Copyright (C) 2014-2017 Broadcom
+ * Copyright (C) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -113,6 +113,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
generic_handle_irq(irq_linear_revmap(b->domain, irq));
} while (status);
out:
+ /* Don't ack parent before all device writes are done */
+ wmb();
+
chained_irq_exit(chip, desc);
}
@@ -161,6 +164,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
*init_params)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ unsigned int set = 0;
struct brcmstb_l2_intc_data *data;
struct irq_chip_type *ct;
int ret;
@@ -208,9 +212,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
flags |= IRQ_GC_BE_IO;
+ if (init_params->handler == handle_level_irq)
+ set |= IRQ_LEVEL;
+
/* Allocate a single Generic IRQ chip for this node */
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
- np->full_name, init_params->handler, clr, 0, flags);
+ np->full_name, init_params->handler, clr, set, flags);
if (ret) {
pr_err("failed to allocate generic irq chip\n");
goto out_free_domain;
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
index 1337ceceb59b..8be7d136c3bf 100644
--- a/drivers/irqchip/irq-gic-pm.c
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -104,7 +104,7 @@ static int gic_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto rpm_disable;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 882204d1ef4f..fd0d65e9c53c 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -345,6 +345,17 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return IRQ_SET_MASK_OK_DONE;
}
+
+void gic_set_cpu(unsigned int cpu, unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ struct cpumask mask;
+
+ cpumask_clear(&mask);
+ cpumask_set_cpu(cpu, &mask);
+ gic_set_affinity(d, &mask, true);
+}
+EXPORT_SYMBOL(gic_set_cpu);
#endif
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
@@ -795,18 +806,19 @@ static int gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
-static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
int cpu;
unsigned long flags, map = 0;
+#if 0
if (unlikely(nr_cpu_ids == 1)) {
/* Only one CPU? let's do a self-IPI... */
writel_relaxed(2 << 24 | irq,
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
return;
}
-
+#endif
gic_lock_irqsave(flags);
/* Convert our logical CPU mask into a physical one. */
@@ -824,6 +836,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
gic_unlock_irqrestore(flags);
}
+EXPORT_SYMBOL(gic_raise_softirq);
#endif
#ifdef CONFIG_BL_SWITCHER
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 033bccb41455..b9dcc8e78c75 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node,
unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
struct irq_domain *domain;
+ int ret;
pr_info("Initializing J-Core AIC\n");
@@ -100,11 +101,17 @@ static int __init aic_irq_of_init(struct device_node *node,
jcore_aic.irq_unmask = noop;
jcore_aic.name = "AIC";
- domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops,
+ ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq,
+ of_node_to_nid(node));
+
+ if (ret < 0)
+ return ret;
+
+ domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
+ &jcore_aic_irqdomain_ops,
&jcore_aic);
if (!domain)
return -ENOMEM;
- irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq);
return 0;
}
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 829084b568fa..ffd03d248737 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -70,7 +70,7 @@ static const struct meson_gpio_irq_params sm1_params = {
.support_edge_both = true,
};
-static const struct of_device_id meson_irq_gpio_matches[] = {
+static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index f3985469c221..caebafed49bb 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -48,7 +48,7 @@ void __iomem *mips_gic_base;
DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
-static DEFINE_SPINLOCK(gic_lock);
+static DEFINE_RAW_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
@@ -207,7 +207,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
pol = GIC_POL_FALLING_EDGE;
@@ -247,7 +247,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
else
irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
handle_level_irq, NULL);
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@@ -265,7 +265,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
return -EINVAL;
/* Assumption : cpumask refers to a single CPU */
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
@@ -276,7 +276,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK;
}
@@ -354,12 +354,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
cd = irq_data_get_irq_chip_data(d);
cd->mask = false;
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_rmask(BIT(intr));
}
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
@@ -372,32 +372,45 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
cd = irq_data_get_irq_chip_data(d);
cd->mask = true;
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_smask(BIT(intr));
}
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
}
-static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
+static void gic_all_vpes_irq_cpu_online(void)
{
- struct gic_all_vpes_chip_data *cd;
- unsigned int intr;
+ static const unsigned int local_intrs[] = {
+ GIC_LOCAL_INT_TIMER,
+ GIC_LOCAL_INT_PERFCTR,
+ GIC_LOCAL_INT_FDC,
+ };
+ unsigned long flags;
+ int i;
- intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- cd = irq_data_get_irq_chip_data(d);
+ raw_spin_lock_irqsave(&gic_lock, flags);
- write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
- if (cd->mask)
- write_gic_vl_smask(BIT(intr));
+ for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
+ unsigned int intr = local_intrs[i];
+ struct gic_all_vpes_chip_data *cd;
+
+ if (!gic_local_irq_is_routable(intr))
+ continue;
+ cd = &gic_all_vpes_chip_data[intr];
+ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+ }
+
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static struct irq_chip gic_all_vpes_local_irq_controller = {
.name = "MIPS GIC Local",
.irq_mask = gic_mask_local_irq_all_vpes,
.irq_unmask = gic_unmask_local_irq_all_vpes,
- .irq_cpu_online = gic_all_vpes_irq_cpu_online,
};
static void __gic_irq_dispatch(void)
@@ -421,11 +434,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
data = irq_get_irq_data(virq);
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
irq_data_update_effective_affinity(data, cpumask_of(cpu));
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@@ -476,6 +489,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
intr = GIC_HWIRQ_TO_LOCAL(hwirq);
map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+ /*
+ * If adding support for more per-cpu interrupts, keep the the
+ * array in gic_all_vpes_irq_cpu_online() in sync.
+ */
switch (intr) {
case GIC_LOCAL_INT_TIMER:
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
@@ -514,12 +531,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr))
return -EPERM;
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@@ -662,8 +679,8 @@ static int gic_cpu_startup(unsigned int cpu)
/* Clear all local IRQ masks (ie. disable all local interrupts) */
write_gic_vl_rmask(~0);
- /* Invoke irq_cpu_online callbacks to enable desired interrupts */
- irq_cpu_online();
+ /* Enable desired interrupts */
+ gic_all_vpes_irq_cpu_online();
return 0;
}
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index 3be5c5dba1da..5caec411059f 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -223,6 +223,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
}
parent_domain = irq_find_host(irq_parent_dn);
+ of_node_put(irq_parent_dn);
if (!parent_domain) {
dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
return -ENODEV;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a8322a4e18d3..df18465a0985 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -414,6 +414,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = {
.map = irq_map_generic_chip,
.alloc = stm32_exti_alloc,
.free = stm32_exti_free,
+ .xlate = irq_domain_xlate_twocell,
};
static void stm32_irq_ack(struct irq_data *d)
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index e1f771c72fc4..ad3e2c1b3c87 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -148,10 +148,10 @@ static int tegra_ictlr_suspend(void)
lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
/* Disable COP interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
/* Disable CPU interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
/* Enable the wakeup sources of ictlr */
writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
@@ -172,12 +172,12 @@ static void tegra_ictlr_resume(void)
writel_relaxed(lic->cpu_iep[i],
ictlr + ICTLR_CPU_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(lic->cpu_ier[i],
ictlr + ICTLR_CPU_IER_SET);
writel_relaxed(lic->cop_iep[i],
ictlr + ICTLR_COP_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(lic->cop_ier[i],
ictlr + ICTLR_COP_IER_SET);
}
@@ -312,7 +312,7 @@ static int __init tegra_ictlr_init(struct device_node *node,
lic->base[i] = base;
/* Disable all interrupts */
- writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
/* All interrupts target IRQ */
writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 0a35499c4672..94cba5914788 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 Interrupt Aggregator irqchip driver
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index 59d51a20bbd8..6b366d98fe3c 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 Interrupt Router irqchip driver
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
@@ -205,6 +205,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
}
parent_domain = irq_find_host(parent_node);
+ of_node_put(parent_node);
if (!parent_domain) {
dev_err(dev, "Failed to find IRQ parent domain\n");
return -ENODEV;
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index e3043ded8973..43d6e4e705f5 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -15,10 +15,11 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/io.h>
-#include <linux/jump_label.h>
#include <linux/bug.h>
#include <linux/of_irq.h>
+static struct xintc_irq_chip *primary_intc;
+
/* No one else should require these constants, so define them locally here. */
#define ISR 0x00 /* Interrupt Status Register */
#define IPR 0x04 /* Interrupt Pending Register */
@@ -32,35 +33,40 @@
#define MER_ME (1<<0)
#define MER_HIE (1<<1)
-static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
-
struct xintc_irq_chip {
void __iomem *base;
struct irq_domain *root_domain;
u32 intr_mask;
+ struct irq_chip *intc_dev;
+ u32 nr_irq;
+ unsigned int (*read_fn)(void __iomem *addr);
+ void (*write_fn)(void __iomem *addr, u32);
};
-static struct xintc_irq_chip *xintc_irqc;
+static void xintc_write(void __iomem *addr, u32 data)
+{
+ iowrite32(data, addr);
+}
+
+static unsigned int xintc_read(void __iomem *addr)
+{
+ return ioread32(addr);
+}
-static void xintc_write(int reg, u32 data)
+static void xintc_write_be(void __iomem *addr, u32 data)
{
- if (static_branch_unlikely(&xintc_is_be))
- iowrite32be(data, xintc_irqc->base + reg);
- else
- iowrite32(data, xintc_irqc->base + reg);
+ iowrite32be(data, addr);
}
-static unsigned int xintc_read(int reg)
+static unsigned int xintc_read_be(void __iomem *addr)
{
- if (static_branch_unlikely(&xintc_is_be))
- return ioread32be(xintc_irqc->base + reg);
- else
- return ioread32(xintc_irqc->base + reg);
+ return ioread32be(addr);
}
static void intc_enable_or_unmask(struct irq_data *d)
{
unsigned long mask = 1 << d->hwirq;
+ struct xintc_irq_chip *local_intc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
@@ -69,47 +75,57 @@ static void intc_enable_or_unmask(struct irq_data *d)
* acks the irq before calling the interrupt handler
*/
if (irqd_is_level_type(d))
- xintc_write(IAR, mask);
+ local_intc->write_fn(local_intc->base + IAR, mask);
- xintc_write(SIE, mask);
+ local_intc->write_fn(local_intc->base + SIE, mask);
}
static void intc_disable_or_mask(struct irq_data *d)
{
+ struct xintc_irq_chip *local_intc = irq_data_get_irq_chip_data(d);
+
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
- xintc_write(CIE, 1 << d->hwirq);
+ local_intc->write_fn(local_intc->base + CIE, 1 << d->hwirq);
}
static void intc_ack(struct irq_data *d)
{
+ struct xintc_irq_chip *local_intc = irq_data_get_irq_chip_data(d);
+
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
- xintc_write(IAR, 1 << d->hwirq);
+ local_intc->write_fn(local_intc->base + IAR, 1 << d->hwirq);
}
static void intc_mask_ack(struct irq_data *d)
{
unsigned long mask = 1 << d->hwirq;
+ struct xintc_irq_chip *local_intc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
- xintc_write(CIE, mask);
- xintc_write(IAR, mask);
+ local_intc->write_fn(local_intc->base + CIE, mask);
+ local_intc->write_fn(local_intc->base + IAR, mask);
}
-static struct irq_chip intc_dev = {
- .name = "Xilinx INTC",
- .irq_unmask = intc_enable_or_unmask,
- .irq_mask = intc_disable_or_mask,
- .irq_ack = intc_ack,
- .irq_mask_ack = intc_mask_ack,
-};
+static unsigned int xintc_get_irq_local(struct xintc_irq_chip *local_intc)
+{
+ int hwirq, irq = -1;
+
+ hwirq = local_intc->read_fn(local_intc->base + IVR);
+ if (hwirq != -1U)
+ irq = irq_find_mapping(local_intc->root_domain, hwirq);
+
+ pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+ return irq;
+}
unsigned int xintc_get_irq(void)
{
- unsigned int hwirq, irq = -1;
+ int hwirq, irq = -1;
- hwirq = xintc_read(IVR);
+ hwirq = primary_intc->read_fn(primary_intc->base + IVR);
if (hwirq != -1U)
- irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+ irq = irq_find_mapping(primary_intc->root_domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
@@ -118,15 +134,18 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
- if (xintc_irqc->intr_mask & (1 << hw)) {
- irq_set_chip_and_handler_name(irq, &intc_dev,
+ struct xintc_irq_chip *local_intc = d->host_data;
+
+ if (local_intc->intr_mask & (1 << hw)) {
+ irq_set_chip_and_handler_name(irq, local_intc->intc_dev,
handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {
- irq_set_chip_and_handler_name(irq, &intc_dev,
+ irq_set_chip_and_handler_name(irq, local_intc->intc_dev,
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
+ irq_set_chip_data(irq, local_intc);
return 0;
}
@@ -138,11 +157,13 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
static void xil_intc_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xintc_irq_chip *local_intc =
+ irq_data_get_irq_handler_data(&desc->irq_data);
u32 pending;
chained_irq_enter(chip, desc);
do {
- pending = xintc_get_irq();
+ pending = xintc_get_irq_local(local_intc);
if (pending == -1U)
break;
generic_handle_irq(pending);
@@ -153,28 +174,20 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
- u32 nr_irq;
int ret, irq;
struct xintc_irq_chip *irqc;
-
- if (xintc_irqc) {
- pr_err("irq-xilinx: Multiple instances aren't supported\n");
- return -EINVAL;
- }
+ struct irq_chip *intc_dev;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc)
return -ENOMEM;
-
- xintc_irqc = irqc;
-
irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base);
- ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
if (ret < 0) {
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
- goto err_alloc;
+ goto error;
}
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
@@ -183,30 +196,45 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0;
}
- if (irqc->intr_mask >> nr_irq)
+ if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
- intc, nr_irq, irqc->intr_mask);
+ intc, irqc->nr_irq, irqc->intr_mask);
+
+ intc_dev = kzalloc(sizeof(*intc_dev), GFP_KERNEL);
+ if (!intc_dev) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ intc_dev->name = intc->full_name;
+ intc_dev->irq_unmask = intc_enable_or_unmask,
+ intc_dev->irq_mask = intc_disable_or_mask,
+ intc_dev->irq_ack = intc_ack,
+ intc_dev->irq_mask_ack = intc_mask_ack,
+ irqc->intc_dev = intc_dev;
+ irqc->write_fn = xintc_write;
+ irqc->read_fn = xintc_read;
/*
* Disable all external interrupts until they are
* explicity requested.
*/
- xintc_write(IER, 0);
+ irqc->write_fn(irqc->base + IER, 0);
/* Acknowledge any pending interrupts just in case. */
- xintc_write(IAR, 0xffffffff);
+ irqc->write_fn(irqc->base + IAR, 0xffffffff);
/* Turn on the Master Enable. */
- xintc_write(MER, MER_HIE | MER_ME);
- if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
- static_branch_enable(&xintc_is_be);
- xintc_write(MER, MER_HIE | MER_ME);
+ irqc->write_fn(irqc->base + MER, MER_HIE | MER_ME);
+ if (!(irqc->read_fn(irqc->base + MER) & (MER_HIE | MER_ME))) {
+ irqc->write_fn = xintc_write_be;
+ irqc->read_fn = xintc_read_be;
+ irqc->write_fn(irqc->base + MER, MER_HIE | MER_ME);
}
- irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+ irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
@@ -225,13 +253,16 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
goto err_alloc;
}
} else {
- irq_set_default_host(irqc->root_domain);
+ primary_intc = irqc;
+ irq_set_default_host(primary_intc->root_domain);
}
return 0;
err_alloc:
- xintc_irqc = NULL;
+ kfree(intc_dev);
+error:
+ iounmap(irqc->base);
kfree(irqc);
return ret;
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 86669ec8b977..2c7406465233 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -3219,6 +3219,7 @@ static int
hfcm_l1callback(struct dchannel *dch, u_int cmd)
{
struct hfc_multi *hc = dch->hw;
+ struct sk_buff_head free_queue;
u_long flags;
switch (cmd) {
@@ -3247,6 +3248,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
l1_event(dch->l1, HW_POWERUP_IND);
break;
case HW_DEACT_REQ:
+ __skb_queue_head_init(&free_queue);
/* start deactivation */
spin_lock_irqsave(&hc->lock, flags);
if (hc->ctype == HFC_TYPE_E1) {
@@ -3266,20 +3268,21 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
plxsd_checksync(hc, 0);
}
}
- skb_queue_purge(&dch->squeue);
+ skb_queue_splice_init(&dch->squeue, &free_queue);
if (dch->tx_skb) {
- dev_kfree_skb(dch->tx_skb);
+ __skb_queue_tail(&free_queue, dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
- dev_kfree_skb(dch->rx_skb);
+ __skb_queue_tail(&free_queue, dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
del_timer(&dch->timer);
spin_unlock_irqrestore(&hc->lock, flags);
+ __skb_queue_purge(&free_queue);
break;
case HW_POWERUP_REQ:
spin_lock_irqsave(&hc->lock, flags);
@@ -3386,6 +3389,9 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
case PH_DEACTIVATE_REQ:
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
if (dch->dev.D.protocol != ISDN_P_TE_S0) {
+ struct sk_buff_head free_queue;
+
+ __skb_queue_head_init(&free_queue);
spin_lock_irqsave(&hc->lock, flags);
if (debug & DEBUG_HFCMULTI_MSG)
printk(KERN_DEBUG
@@ -3407,14 +3413,14 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
/* deactivate */
dch->state = 1;
}
- skb_queue_purge(&dch->squeue);
+ skb_queue_splice_init(&dch->squeue, &free_queue);
if (dch->tx_skb) {
- dev_kfree_skb(dch->tx_skb);
+ __skb_queue_tail(&free_queue, dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
- dev_kfree_skb(dch->rx_skb);
+ __skb_queue_tail(&free_queue, dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
@@ -3426,6 +3432,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
#endif
ret = 0;
spin_unlock_irqrestore(&hc->lock, flags);
+ __skb_queue_purge(&free_queue);
} else
ret = l1_event(dch->l1, hh->prim);
break;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index a2b2ce1dfec8..0a683a66fc61 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
*z1t = cpu_to_le16(new_z1); /* now send data */
if (bch->tx_idx < bch->tx_skb->len)
return;
- dev_kfree_skb(bch->tx_skb);
+ dev_kfree_skb_any(bch->tx_skb);
if (get_next_bframe(bch))
goto next_t_frame;
return;
@@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
}
bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
bz->f1 = new_f1; /* next frame */
- dev_kfree_skb(bch->tx_skb);
+ dev_kfree_skb_any(bch->tx_skb);
get_next_bframe(bch);
}
@@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch)
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
hfcpci_fill_fifo(bch);
else {
- dev_kfree_skb(bch->tx_skb);
+ dev_kfree_skb_any(bch->tx_skb);
if (get_next_bframe(bch))
hfcpci_fill_fifo(bch);
}
@@ -1617,16 +1617,19 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
spin_lock_irqsave(&hc->lock, flags);
if (hc->hw.protocol == ISDN_P_NT_S0) {
+ struct sk_buff_head free_queue;
+
+ __skb_queue_head_init(&free_queue);
/* prepare deactivation */
Write_hfc(hc, HFCPCI_STATES, 0x40);
- skb_queue_purge(&dch->squeue);
+ skb_queue_splice_init(&dch->squeue, &free_queue);
if (dch->tx_skb) {
- dev_kfree_skb(dch->tx_skb);
+ __skb_queue_tail(&free_queue, dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
- dev_kfree_skb(dch->rx_skb);
+ __skb_queue_tail(&free_queue, dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
@@ -1639,10 +1642,12 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
hc->hw.mst_m &= ~HFCPCI_MASTER;
Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
ret = 0;
+ spin_unlock_irqrestore(&hc->lock, flags);
+ __skb_queue_purge(&free_queue);
} else {
ret = l1_event(dch->l1, hh->prim);
+ spin_unlock_irqrestore(&hc->lock, flags);
}
- spin_unlock_irqrestore(&hc->lock, flags);
break;
}
if (!ret)
@@ -2267,7 +2272,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
return 0;
if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
- spin_lock(&hc->lock);
+ spin_lock_irq(&hc->lock);
bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
main_rec_hfcpci(bch);
@@ -2278,7 +2283,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
main_rec_hfcpci(bch);
tx_birq(bch);
}
- spin_unlock(&hc->lock);
+ spin_unlock_irq(&hc->lock);
}
return 0;
}
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 1f89378b5623..111a597ef23c 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -327,20 +327,24 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
if (hw->protocol == ISDN_P_NT_S0) {
+ struct sk_buff_head free_queue;
+
+ __skb_queue_head_init(&free_queue);
hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT);
spin_lock_irqsave(&hw->lock, flags);
- skb_queue_purge(&dch->squeue);
+ skb_queue_splice_init(&dch->squeue, &free_queue);
if (dch->tx_skb) {
- dev_kfree_skb(dch->tx_skb);
+ __skb_queue_tail(&free_queue, dch->tx_skb);
dch->tx_skb = NULL;
}
dch->tx_idx = 0;
if (dch->rx_skb) {
- dev_kfree_skb(dch->rx_skb);
+ __skb_queue_tail(&free_queue, dch->rx_skb);
dch->rx_skb = NULL;
}
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
spin_unlock_irqrestore(&hw->lock, flags);
+ __skb_queue_purge(&free_queue);
#ifdef FIXME
if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
dchannel_sched_event(&hc->dch, D_CLEARBUSY);
@@ -1331,7 +1335,7 @@ tx_iso_complete(struct urb *urb)
printk("\n");
}
- dev_kfree_skb(tx_skb);
+ dev_consume_skb_irq(tx_skb);
tx_skb = NULL;
if (fifo->dch && get_next_dframe(fifo->dch))
tx_skb = fifo->dch->tx_skb;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 8299defff55a..6d818d5d1377 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -956,7 +956,7 @@ nj_release(struct tiger_hw *card)
}
if (card->irq > 0)
free_irq(card->irq, card);
- if (card->isac.dch.dev.dev.class)
+ if (device_is_registered(&card->isac.dch.dev.dev))
mISDN_unregister_device(&card->isac.dch.dev);
for (i = 0; i < 2; i++) {
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index a41b4b264594..90ee56d07a6e 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -222,7 +222,7 @@ mISDN_register_device(struct mISDNdevice *dev,
err = get_free_devid();
if (err < 0)
- goto error1;
+ return err;
dev->id = err;
device_initialize(&dev->dev);
@@ -233,11 +233,12 @@ mISDN_register_device(struct mISDNdevice *dev,
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "mISDN_register %s %d\n",
dev_name(&dev->dev), dev->id);
+ dev->dev.class = &mISDN_class;
+
err = create_stack(dev);
if (err)
goto error1;
- dev->dev.class = &mISDN_class;
dev->dev.platform_data = dev;
dev->dev.parent = parent;
dev_set_drvdata(&dev->dev, dev);
@@ -249,8 +250,8 @@ mISDN_register_device(struct mISDNdevice *dev,
error3:
delete_stack(dev);
- return err;
error1:
+ put_device(&dev->dev);
return err;
}
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
index fa09d511a8ed..baf31258f5c9 100644
--- a/drivers/isdn/mISDN/dsp.h
+++ b/drivers/isdn/mISDN/dsp.h
@@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
-extern void dsp_cmx_send(void *arg);
+extern void dsp_cmx_send(struct timer_list *arg);
extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
extern int dsp_cmx_del_conf_member(struct dsp *dsp);
extern int dsp_cmx_del_conf(struct dsp_conf *conf);
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 6d2088fbaf69..1b73af501397 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -1625,7 +1625,7 @@ static u16 dsp_count; /* last sample count */
static int dsp_count_valid; /* if we have last sample count */
void
-dsp_cmx_send(void *arg)
+dsp_cmx_send(struct timer_list *arg)
{
struct dsp_conf *conf;
struct dsp_conf_member *member;
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 038e72a84b33..5b954012e394 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1200,7 +1200,7 @@ static int __init dsp_init(void)
}
/* set sample timer */
- timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0);
+ timer_setup(&dsp_spl_tl, dsp_cmx_send, 0);
dsp_spl_tl.expires = jiffies + dsp_tics;
dsp_spl_jiffies = dsp_spl_tl.expires;
add_timer(&dsp_spl_tl);
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 40588692cec7..cd9bc11e8dfb 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -80,6 +80,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
if (!entry)
return -ENOMEM;
+ INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
entry->dev.class = elements_class;
@@ -114,7 +115,7 @@ err2:
device_unregister(&entry->dev);
return ret;
err1:
- kfree(entry);
+ put_device(&entry->dev);
return ret;
}
EXPORT_SYMBOL(mISDN_dsp_element_register);
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
index 7ea10db20e3a..48133d022812 100644
--- a/drivers/isdn/mISDN/l1oip.h
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -59,6 +59,7 @@ struct l1oip {
int bundle; /* bundle channels in one frm */
int codec; /* codec to use for transmis. */
int limit; /* limit number of bchannels */
+ bool shutdown; /* if card is released */
/* timer */
struct timer_list keep_tl;
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index b57dcb834594..aec4f2a69c3b 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
p = frame;
/* restart timer */
- if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
+ if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown)
mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
else
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
@@ -601,7 +601,9 @@ multiframe:
goto multiframe;
/* restart timer */
- if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
+ if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) ||
+ !hc->timeout_on) &&
+ !hc->shutdown) {
hc->timeout_on = 1;
mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
} else /* only adjust timer */
@@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc)
{
int ch;
- if (timer_pending(&hc->keep_tl))
- del_timer(&hc->keep_tl);
+ hc->shutdown = true;
- if (timer_pending(&hc->timeout_tl))
- del_timer(&hc->timeout_tl);
+ del_timer_sync(&hc->keep_tl);
+ del_timer_sync(&hc->timeout_tl);
cancel_work_sync(&hc->workq);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 2cbf66d1c300..34334adcad01 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -802,7 +802,7 @@ config LEDS_SPI_BYTE
config LEDS_TI_LMU_COMMON
tristate "LED driver for TI LMU"
depends on LEDS_CLASS
- depends on REGMAP
+ select REGMAP
help
Say Y to enable the LED driver for TI LMU devices.
This supports common features between the TI LM3532, LM3631, LM3632,
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 8b6965a563e9..acc99e01eb4a 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -22,9 +22,8 @@
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
+ struct pwm_state pwmstate;
unsigned int active_low;
- unsigned int period;
- int duty;
};
struct led_pwm_priv {
@@ -32,44 +31,29 @@ struct led_pwm_priv {
struct led_pwm_data leds[0];
};
-static void __led_pwm_set(struct led_pwm_data *led_dat)
-{
- int new_duty = led_dat->duty;
-
- pwm_config(led_dat->pwm, new_duty, led_dat->period);
-
- if (new_duty == 0)
- pwm_disable(led_dat->pwm);
- else
- pwm_enable(led_dat->pwm);
-}
-
static int led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
unsigned int max = led_dat->cdev.max_brightness;
- unsigned long long duty = led_dat->period;
+ unsigned long long duty = led_dat->pwmstate.period;
duty *= brightness;
do_div(duty, max);
if (led_dat->active_low)
- duty = led_dat->period - duty;
-
- led_dat->duty = duty;
-
- __led_pwm_set(led_dat);
+ duty = led_dat->pwmstate.period - duty;
- return 0;
+ led_dat->pwmstate.duty_cycle = duty;
+ led_dat->pwmstate.enabled = true;
+ return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
}
static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct fwnode_handle *fwnode)
{
struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
- struct pwm_args pargs;
int ret;
led_data->active_low = led->active_low;
@@ -93,17 +77,10 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
led_data->cdev.brightness_set_blocking = led_pwm_set;
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(led_data->pwm);
-
- pwm_get_args(led_data->pwm, &pargs);
+ pwm_init_state(led_data->pwm, &led_data->pwmstate);
- led_data->period = pargs.period;
- if (!led_data->period && (led->pwm_period_ns > 0))
- led_data->period = led->pwm_period_ns;
+ if (!led_data->pwmstate.period)
+ led_data->pwmstate.period = led->pwm_period_ns;
ret = devm_led_classdev_register(dev, &led_data->cdev);
if (ret == 0) {
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 869976d1b734..f19baed61502 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -2,14 +2,18 @@
/*
* ledtrig-cpu.c - LED trigger based on CPU activity
*
- * This LED trigger will be registered for each possible CPU and named as
- * cpu0, cpu1, cpu2, cpu3, etc.
+ * This LED trigger will be registered for first 8 CPUs and named
+ * as cpu0..cpu7. There's additional trigger called cpu that
+ * is on when any CPU is active.
+ *
+ * If you want support for arbitrary number of CPUs, make it one trigger,
+ * with additional sysfs file selecting which CPU to watch.
*
* It can be bound to any LED just like other triggers using either a
* board file or via sysfs interface.
*
* An API named ledtrig_cpu is exported for any user, who want to add CPU
- * activity indication in their code
+ * activity indication in their code.
*
* Copyright 2011 Linus Walleij <linus.walleij@linaro.org>
* Copyright 2011 - 2012 Bryan Wu <bryan.wu@canonical.com>
@@ -126,7 +130,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu)
static int __init ledtrig_cpu_init(void)
{
- int cpu;
+ unsigned int cpu;
int ret;
/* Supports up to 9999 cpu cores */
@@ -145,7 +149,10 @@ static int __init ledtrig_cpu_init(void)
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
- snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
+ if (cpu >= 8)
+ continue;
+
+ snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
led_trigger_register_simple(trig->name, &trig->_trig);
}
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index d5e774d83021..f4d670ec30bc 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -318,6 +318,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
switch (evt) {
case NETDEV_CHANGENAME:
+ if (netif_carrier_ok(dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ fallthrough;
case NETDEV_REGISTER:
if (trigger_data->net_dev)
dev_put(trigger_data->net_dev);
diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
index 5751cd032f9d..4bf232465dfd 100644
--- a/drivers/leds/trigger/ledtrig-panic.c
+++ b/drivers/leds/trigger/ledtrig-panic.c
@@ -63,10 +63,13 @@ static long led_panic_blink(int state)
static int __init ledtrig_panic_init(void)
{
+ led_trigger_register_simple("panic", &trigger);
+ if (!trigger)
+ return -ENOMEM;
+
atomic_notifier_chain_register(&panic_notifier_list,
&led_trigger_panic_nb);
- led_trigger_register_simple("panic", &trigger);
panic_blink = led_panic_blink;
return 0;
}
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index b5a534206edd..1605dcaa89e3 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -86,6 +86,7 @@ config ADB_PMU_LED
config ADB_PMU_LED_DISK
bool "Use front LED as DISK LED by default"
+ depends on ATA
depends on ADB_PMU_LED
depends on LEDS_CLASS
select LEDS_TRIGGERS
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index e49d1f287a17..c37d5fce86f7 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req)
switch(req->data[1]) {
case ADB_QUERY_GETDEVINFO:
- if (req->nbytes < 3)
+ if (req->nbytes < 3 || req->data[2] >= 16)
break;
mutex_lock(&adb_handler_mutex);
req->reply[0] = adb_handler[req->data[2]].original_address;
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index eb3adfb7f88d..172a8b18c579 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -106,6 +106,10 @@ int macio_init(void)
return -ENXIO;
}
adb = ioremap(r.start, sizeof(struct adb_regs));
+ if (!adb) {
+ of_node_put(adbs);
+ return -ENOMEM;
+ }
out_8(&adb->ctrl.r, 0);
out_8(&adb->intr.r, 0);
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 92d142d2b75f..176bbd062617 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -425,7 +425,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
if (of_device_register(&dev->ofdev) != 0) {
printk(KERN_DEBUG"macio: device registration error for %s!\n",
dev_name(&dev->ofdev.dev));
- kfree(dev);
+ put_device(&dev->ofdev.dev);
return NULL;
}
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 1e5fa09845e7..8713e80201c0 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -34,8 +34,8 @@
#endif
struct wf_lm75_sensor {
- int ds1775 : 1;
- int inited : 1;
+ unsigned int ds1775 : 1;
+ unsigned int inited : 1;
struct i2c_client *i2c;
struct wf_sensor sens;
};
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index cb75dc035616..30d53a535d55 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -171,6 +171,7 @@ static void wf_sat_release(struct kref *ref)
if (sat->nr >= 0)
sats[sat->nr] = NULL;
+ of_node_put(sat->node);
kfree(sat);
}
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index 3e6059eaa138..90823b428025 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -273,8 +273,8 @@ struct smu_cpu_power_sensor {
struct list_head link;
struct wf_sensor *volts;
struct wf_sensor *amps;
- int fake_volts : 1;
- int quadratic : 1;
+ unsigned int fake_volts : 1;
+ unsigned int quadratic : 1;
struct wf_sensor sens;
};
#define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens)
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index 8ee9db274802..f7191dbef6fa 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -632,15 +632,15 @@ static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- if (rc < 0)
- return rc;
+ if (!rc)
+ return -EIO;
rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
DMA_FROM_DEVICE);
- if (rc < 0) {
+ if (!rc) {
dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- return rc;
+ return -EIO;
}
return 0;
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 4555d678fadd..abcee58e851c 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
@@ -38,6 +39,7 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
+ struct mutex mutex;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
struct dentry *root_debugfs_dir;
@@ -95,6 +97,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
+ char *message;
void *data;
int ret;
@@ -110,10 +113,13 @@ static ssize_t mbox_test_message_write(struct file *filp,
return -EINVAL;
}
- tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
- if (!tdev->message)
+ message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
+ if (!message)
return -ENOMEM;
+ mutex_lock(&tdev->mutex);
+
+ tdev->message = message;
ret = copy_from_user(tdev->message, userbuf, count);
if (ret) {
ret = -EFAULT;
@@ -144,6 +150,8 @@ out:
kfree(tdev->message);
tdev->signal = NULL;
+ mutex_unlock(&tdev->mutex);
+
return ret < 0 ? ret : count;
}
@@ -392,6 +400,7 @@ static int mbox_test_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
spin_lock_init(&tdev->lock);
+ mutex_init(&tdev->mutex);
if (tdev->rx_channel) {
tdev->rx_buffer = devm_kzalloc(&pdev->dev,
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index 88047d835211..75f14b624ca2 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -385,14 +385,20 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
/* Ensure all unused data is 0 */
data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
writel(data_trail, data_reg);
- data_reg++;
+ data_reg += sizeof(u32);
}
+
/*
* 'data_reg' indicates next register to write. If we did not already
* write on tx complete reg(last reg), we must do so for transmit
+ * In addition, we also need to make sure all intermediate data
+ * registers(if any required), are reset to 0 for TISCI backward
+ * compatibility to be maintained.
*/
- if (data_reg <= qinst->queue_buff_end)
- writel(0, qinst->queue_buff_end);
+ while (data_reg <= qinst->queue_buff_end) {
+ writel(0, data_reg);
+ data_reg += sizeof(u32);
+ }
return 0;
}
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index f9cc674ba9b7..144a3229fdf1 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -110,7 +110,7 @@ struct zynqmp_ipi_pdata {
unsigned int method;
u32 local_id;
int num_mboxes;
- struct zynqmp_ipi_mbox *ipi_mboxes;
+ struct zynqmp_ipi_mbox ipi_mboxes[];
};
static struct device_driver zynqmp_ipi_mbox_driver = {
@@ -152,7 +152,7 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
struct zynqmp_ipi_message *msg;
u64 arg0, arg3;
struct arm_smccc_res res;
- int ret, i;
+ int ret, i, status = IRQ_NONE;
(void)irq;
arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
@@ -170,11 +170,11 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
memcpy_fromio(msg->data, mchan->req_buf,
msg->len);
mbox_chan_received_data(chan, (void *)msg);
- return IRQ_HANDLED;
+ status = IRQ_HANDLED;
}
}
}
- return IRQ_NONE;
+ return status;
}
/**
@@ -493,6 +493,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
ret = device_register(&ipi_mbox->dev);
if (ret) {
dev_err(dev, "Failed to register ipi mbox dev.\n");
+ put_device(&ipi_mbox->dev);
return ret;
}
mdev = &ipi_mbox->dev;
@@ -619,7 +620,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
ipi_mbox = &pdata->ipi_mboxes[i];
if (ipi_mbox->dev.parent) {
mbox_controller_unregister(&ipi_mbox->mbox);
- device_unregister(&ipi_mbox->dev);
+ if (device_is_registered(&ipi_mbox->dev))
+ device_unregister(&ipi_mbox->dev);
}
}
}
@@ -632,8 +634,13 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
struct zynqmp_ipi_mbox *mbox;
int num_mboxes, ret = -EINVAL;
- num_mboxes = of_get_child_count(np);
- pdata = devm_kzalloc(dev, sizeof(*pdata) + (num_mboxes * sizeof(*mbox)),
+ num_mboxes = of_get_available_child_count(np);
+ if (num_mboxes == 0) {
+ dev_err(dev, "mailbox nodes not available\n");
+ return -EINVAL;
+ }
+
+ pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -647,8 +654,6 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
}
pdata->num_mboxes = num_mboxes;
- pdata->ipi_mboxes = (struct zynqmp_ipi_mbox *)
- ((char *)pdata + sizeof(*pdata));
mbox = pdata->ipi_mboxes;
for_each_available_child_of_node(np, nc) {
@@ -657,6 +662,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to probe subdev.\n");
ret = -EINVAL;
+ of_node_put(nc);
goto free_mbox_dev;
}
mbox++;
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index c799bb81ab03..e051e4dd3352 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -71,8 +71,10 @@ static int mcb_probe(struct device *dev)
get_device(dev);
ret = mdrv->probe(mdev, found_id);
- if (ret)
+ if (ret) {
module_put(carrier_mod);
+ put_device(dev);
+ }
return ret;
}
@@ -246,6 +248,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
return 0;
out:
+ put_device(&dev->dev);
return ret;
}
@@ -387,17 +390,13 @@ EXPORT_SYMBOL_GPL(mcb_free_dev);
static int __mcb_bus_add_devices(struct device *dev, void *data)
{
- struct mcb_device *mdev = to_mcb_device(dev);
int retval;
- if (mdev->is_added)
- return 0;
-
retval = device_attach(dev);
- if (retval < 0)
+ if (retval < 0) {
dev_err(dev, "Error adding device (%d)\n", retval);
-
- mdev->is_added = true;
+ return retval;
+ }
return 0;
}
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index 8f1bde437a7e..92915d2c2d46 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -23,7 +23,7 @@ static int mcb_lpc_probe(struct platform_device *pdev)
{
struct resource *res;
struct priv *priv;
- int ret = 0;
+ int ret = 0, table_size;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -58,16 +58,43 @@ static int mcb_lpc_probe(struct platform_device *pdev)
ret = chameleon_parse_cells(priv->bus, priv->mem->start, priv->base);
if (ret < 0) {
- mcb_release_bus(priv->bus);
- return ret;
+ goto out_mcb_bus;
}
- dev_dbg(&pdev->dev, "Found %d cells\n", ret);
+ table_size = ret;
+
+ if (table_size < CHAM_HEADER_SIZE) {
+ /* Release the previous resources */
+ devm_iounmap(&pdev->dev, priv->base);
+ devm_release_mem_region(&pdev->dev, priv->mem->start, resource_size(priv->mem));
+
+ /* Then, allocate it again with the actual chameleon table size */
+ res = devm_request_mem_region(&pdev->dev, priv->mem->start,
+ table_size,
+ KBUILD_MODNAME);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to request PCI memory\n");
+ ret = -EBUSY;
+ goto out_mcb_bus;
+ }
+
+ priv->base = devm_ioremap(&pdev->dev, priv->mem->start, table_size);
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ ret = -ENOMEM;
+ goto out_mcb_bus;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ }
mcb_bus_add_devices(priv->bus);
return 0;
+out_mcb_bus:
+ mcb_release_bus(priv->bus);
+ return ret;
}
static int mcb_lpc_remove(struct platform_device *pdev)
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 3b69e6aa3d88..268d5a482ab1 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -99,8 +99,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
mdev->mem.end = mdev->mem.start + size - 1;
mdev->mem.flags = IORESOURCE_MEM;
- mdev->is_added = false;
-
ret = mcb_device_register(bus, mdev);
if (ret < 0)
goto err;
@@ -130,7 +128,7 @@ static void chameleon_parse_bar(void __iomem *base,
}
}
-static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase,
+static int chameleon_get_bar(void __iomem **base, phys_addr_t mapbase,
struct chameleon_bar **cb)
{
struct chameleon_bar *c;
@@ -179,12 +177,13 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
{
struct chameleon_fpga_header *header;
struct chameleon_bar *cb;
- char __iomem *p = base;
+ void __iomem *p = base;
int num_cells = 0;
uint32_t dtype;
int bar_count;
int ret;
u32 hsize;
+ u32 table_size;
hsize = sizeof(struct chameleon_fpga_header);
@@ -239,12 +238,16 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
num_cells++;
}
- if (num_cells == 0)
- num_cells = -EINVAL;
+ if (num_cells == 0) {
+ ret = -EINVAL;
+ goto free_bar;
+ }
+ table_size = p - base;
+ pr_debug("%d cell(s) found. Chameleon table size: 0x%04x bytes\n", num_cells, table_size);
kfree(cb);
kfree(header);
- return num_cells;
+ return table_size;
free_bar:
kfree(cb);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 14866aa22f75..22927c80ff46 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -31,7 +31,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct resource *res;
struct priv *priv;
- int ret;
+ int ret, table_size;
unsigned long flags;
priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
@@ -90,7 +90,30 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret < 0)
goto out_mcb_bus;
- dev_dbg(&pdev->dev, "Found %d cells\n", ret);
+ table_size = ret;
+
+ if (table_size < CHAM_HEADER_SIZE) {
+ /* Release the previous resources */
+ devm_iounmap(&pdev->dev, priv->base);
+ devm_release_mem_region(&pdev->dev, priv->mapbase, CHAM_HEADER_SIZE);
+
+ /* Then, allocate it again with the actual chameleon table size */
+ res = devm_request_mem_region(&pdev->dev, priv->mapbase,
+ table_size,
+ KBUILD_MODNAME);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to request PCI memory\n");
+ ret = -EBUSY;
+ goto out_mcb_bus;
+ }
+
+ priv->base = devm_ioremap(&pdev->dev, priv->mapbase, table_size);
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ ret = -ENOMEM;
+ goto out_mcb_bus;
+ }
+ }
mcb_bus_add_devices(priv->bus);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a1df0d95151c..5310e1f4a282 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -49,7 +49,7 @@
*
* bch_bucket_alloc() allocates a single bucket from a specific cache.
*
- * bch_bucket_alloc_set() allocates one or more buckets from different caches
+ * bch_bucket_alloc_set() allocates one bucket from different caches
* out of a cache set.
*
* free_some_buckets() drives all the processes described above. It's called
@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait)
+ struct bkey *k, bool wait)
{
- int i;
+ struct cache *ca;
+ long b;
/* No allocation if CACHE_SET_IO_DISABLE bit is set */
if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
return -1;
lockdep_assert_held(&c->bucket_lock);
- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
bkey_init(k);
- /* sort by free space/prio of oldest data in caches */
-
- for (i = 0; i < n; i++) {
- struct cache *ca = c->cache_by_alloc[i];
- long b = bch_bucket_alloc(ca, reserve, wait);
+ ca = c->cache_by_alloc[0];
+ b = bch_bucket_alloc(ca, reserve, wait);
+ if (b == -1)
+ goto err;
- if (b == -1)
- goto err;
+ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
+ bucket_to_sector(c, b),
+ ca->sb.nr_this_dev);
- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
- bucket_to_sector(c, b),
- ca->sb.nr_this_dev);
-
- SET_KEY_PTRS(k, i + 1);
- }
+ SET_KEY_PTRS(k, 1);
return 0;
err:
@@ -525,12 +520,12 @@ err:
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait)
+ struct bkey *k, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
+ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
mutex_unlock(&c->bucket_lock);
return ret;
}
@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c,
spin_unlock(&c->data_bucket_lock);
- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
return false;
spin_lock(&c->data_bucket_lock);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 36de6f7ddf22..7bce58278845 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -265,6 +265,7 @@ struct bcache_device {
#define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4
int nr_stripes;
+#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT)
unsigned int stripe_size;
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
@@ -970,9 +971,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait);
+ struct bkey *k, bool wait);
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait);
+ struct bkey *k, bool wait);
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
unsigned int sectors, unsigned int write_point,
unsigned int write_prio, bool wait);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5a33910aea78..5db893d6a824 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1020,6 +1020,9 @@ err:
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
+ *
+ * Note: Only error code or btree pointer will be returned, it is unncessary
+ * for callers to check NULL pointer.
*/
struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
struct bkey *k, int level, bool write,
@@ -1132,16 +1135,22 @@ retry:
mutex_unlock(&b->c->bucket_lock);
}
+/*
+ * Only error code or btree pointer will be returned, it is unncessary for
+ * callers to check NULL pointer.
+ */
struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
int level, bool wait,
struct btree *parent)
{
BKEY_PADDED(key) k;
- struct btree *b = ERR_PTR(-EAGAIN);
+ struct btree *b;
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
+ /* return ERR_PTR(-EAGAIN) when it fails */
+ b = ERR_PTR(-EAGAIN);
+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
goto err;
bkey_put(c, &k.key);
@@ -1186,7 +1195,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
{
struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
- if (!IS_ERR_OR_NULL(n)) {
+ if (!IS_ERR(n)) {
mutex_lock(&n->write_lock);
bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
bkey_copy_key(&n->key, &b->key);
@@ -1401,7 +1410,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
for (i = 0; i < nodes; i++) {
new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
- if (IS_ERR_OR_NULL(new_nodes[i]))
+ if (IS_ERR(new_nodes[i]))
goto out_nocoalesce;
}
@@ -1553,6 +1562,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return 0;
n = btree_node_alloc_replacement(replace, NULL);
+ if (IS_ERR(n))
+ return 0;
/* recheck reserve after allocating replacement node */
if (btree_check_reserve(b, NULL)) {
@@ -1718,7 +1729,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
if (should_rewrite) {
n = btree_node_alloc_replacement(b, NULL);
- if (!IS_ERR_OR_NULL(n)) {
+ if (!IS_ERR(n)) {
bch_btree_node_write_sync(n);
bch_btree_set_root(n);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index efdf6ce0443e..5d1a4eb81669 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -428,7 +428,7 @@ static int __uuid_write(struct cache_set *c)
closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -822,6 +822,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
if (!d->stripe_size)
d->stripe_size = 1 << 31;
+ else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
+ d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
if (!n || n > max_stripes) {
@@ -1633,7 +1635,7 @@ static void cache_set_flush(struct closure *cl)
if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
- if (!IS_ERR_OR_NULL(c->root))
+ if (!IS_ERR(c->root))
list_add(&c->root->list, &c->btree_cache);
/*
@@ -1906,7 +1908,7 @@ static int run_cache_set(struct cache_set *c)
c->root = bch_btree_node_get(c, NULL, k,
j->btree_level,
true, NULL);
- if (IS_ERR_OR_NULL(c->root))
+ if (IS_ERR(c->root))
goto err;
list_del_init(&c->root->list);
@@ -2000,7 +2002,7 @@ static int run_cache_set(struct cache_set *c)
err = "cannot allocate new btree root";
c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
- if (IS_ERR_OR_NULL(c->root))
+ if (IS_ERR(c->root))
goto err;
mutex_lock(&c->root->write_lock);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 7f0fb4b5755a..30073e4074c1 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1057,7 +1057,7 @@ SHOW(__bch_cache)
sum += INITIAL_PRIO - cached[i];
if (n)
- do_div(sum, n);
+ sum = div64_u64(sum, n);
for (i = 0; i < ARRAY_SIZE(q); i++)
q[i] = INITIAL_PRIO - cached[n * (i + 1) /
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 0b02210ab435..5767ff6c13e3 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -119,27 +119,61 @@ static void __update_writeback_rate(struct cached_dev *dc)
dc->writeback_rate_target = target;
}
+static bool idle_counter_exceeded(struct cache_set *c)
+{
+ int counter, dev_nr;
+
+ /*
+ * If c->idle_counter is overflow (idel for really long time),
+ * reset as 0 and not set maximum rate this time for code
+ * simplicity.
+ */
+ counter = atomic_inc_return(&c->idle_counter);
+ if (counter <= 0) {
+ atomic_set(&c->idle_counter, 0);
+ return false;
+ }
+
+ dev_nr = atomic_read(&c->attached_dev_nr);
+ if (dev_nr == 0)
+ return false;
+
+ /*
+ * c->idle_counter is increased by writeback thread of all
+ * attached backing devices, in order to represent a rough
+ * time period, counter should be divided by dev_nr.
+ * Otherwise the idle time cannot be larger with more backing
+ * device attached.
+ * The following calculation equals to checking
+ * (counter / dev_nr) < (dev_nr * 6)
+ */
+ if (counter < (dev_nr * dev_nr * 6))
+ return false;
+
+ return true;
+}
+
+/*
+ * Idle_counter is increased every time when update_writeback_rate() is
+ * called. If all backing devices attached to the same cache set have
+ * identical dc->writeback_rate_update_seconds values, it is about 6
+ * rounds of update_writeback_rate() on each backing device before
+ * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
+ * to each dc->writeback_rate.rate.
+ * In order to avoid extra locking cost for counting exact dirty cached
+ * devices number, c->attached_dev_nr is used to calculate the idle
+ * throushold. It might be bigger if not all cached device are in write-
+ * back mode, but it still works well with limited extra rounds of
+ * update_writeback_rate().
+ */
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
/* Don't set max writeback rate if gc is running */
if (!c->gc_mark_valid)
return false;
- /*
- * Idle_counter is increased everytime when update_writeback_rate() is
- * called. If all backing devices attached to the same cache set have
- * identical dc->writeback_rate_update_seconds values, it is about 6
- * rounds of update_writeback_rate() on each backing device before
- * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
- * to each dc->writeback_rate.rate.
- * In order to avoid extra locking cost for counting exact dirty cached
- * devices number, c->attached_dev_nr is used to calculate the idle
- * throushold. It might be bigger if not all cached device are in write-
- * back mode, but it still works well with limited extra rounds of
- * update_writeback_rate().
- */
- if (atomic_inc_return(&c->idle_counter) <
- atomic_read(&c->attached_dev_nr) * 6)
+
+ if (!idle_counter_exceeded(c))
return false;
if (atomic_read(&c->at_max_writeback_rate) != 1)
@@ -153,13 +187,10 @@ static bool set_at_max_writeback_rate(struct cache_set *c,
dc->writeback_rate_change = 0;
/*
- * Check c->idle_counter and c->at_max_writeback_rate agagain in case
- * new I/O arrives during before set_at_max_writeback_rate() returns.
- * Then the writeback rate is set to 1, and its new value should be
- * decided via __update_writeback_rate().
+ * In case new I/O arrives during before
+ * set_at_max_writeback_rate() returns.
*/
- if ((atomic_read(&c->idle_counter) <
- atomic_read(&c->attached_dev_nr) * 6) ||
+ if (!idle_counter_exceeded(c) ||
!atomic_read(&c->at_max_writeback_rate))
return false;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index af6d4f898e4c..2ecd0db0f294 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -551,11 +551,13 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
return r;
}
-static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
+static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd,
+ bool destroy_bm)
{
dm_sm_destroy(cmd->metadata_sm);
dm_tm_destroy(cmd->tm);
- dm_block_manager_destroy(cmd->bm);
+ if (destroy_bm)
+ dm_block_manager_destroy(cmd->bm);
}
typedef unsigned long (*flags_mutator)(unsigned long);
@@ -826,7 +828,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
cmd2 = lookup(bdev);
if (cmd2) {
mutex_unlock(&table_lock);
- __destroy_persistent_data_objects(cmd);
+ __destroy_persistent_data_objects(cmd, true);
kfree(cmd);
return cmd2;
}
@@ -874,7 +876,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
mutex_unlock(&table_lock);
if (!cmd->fail_io)
- __destroy_persistent_data_objects(cmd);
+ __destroy_persistent_data_objects(cmd, true);
kfree(cmd);
}
}
@@ -1808,14 +1810,52 @@ int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
{
- int r;
+ int r = -EINVAL;
+ struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
+
+ /* fail_io is double-checked with cmd->root_lock held below */
+ if (unlikely(cmd->fail_io))
+ return r;
+
+ /*
+ * Replacement block manager (new_bm) is created and old_bm destroyed outside of
+ * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
+ * shrinker associated with the block manager's bufio client vs cmd root_lock).
+ * - must take shrinker_rwsem without holding cmd->root_lock
+ */
+ new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
+ CACHE_MAX_CONCURRENT_LOCKS);
WRITE_LOCK(cmd);
- __destroy_persistent_data_objects(cmd);
- r = __create_persistent_data_objects(cmd, false);
+ if (cmd->fail_io) {
+ WRITE_UNLOCK(cmd);
+ goto out;
+ }
+
+ __destroy_persistent_data_objects(cmd, false);
+ old_bm = cmd->bm;
+ if (IS_ERR(new_bm)) {
+ DMERR("could not create block manager during abort");
+ cmd->bm = NULL;
+ r = PTR_ERR(new_bm);
+ goto out_unlock;
+ }
+
+ cmd->bm = new_bm;
+ r = __open_or_format_metadata(cmd, false);
+ if (r) {
+ cmd->bm = NULL;
+ goto out_unlock;
+ }
+ new_bm = NULL;
+out_unlock:
if (r)
cmd->fail_io = true;
WRITE_UNLOCK(cmd);
+ dm_block_manager_destroy(old_bm);
+out:
+ if (new_bm && !IS_ERR(new_bm))
+ dm_block_manager_destroy(new_bm);
return r;
}
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b61aac00ff40..859073193f5b 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -854,7 +854,13 @@ struct smq_policy {
struct background_tracker *bg_work;
- bool migrations_allowed;
+ bool migrations_allowed:1;
+
+ /*
+ * If this is set the policy will try and clean the whole cache
+ * even if the device is not idle.
+ */
+ bool cleaner:1;
};
/*----------------------------------------------------------------*/
@@ -1133,7 +1139,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
- if (idle) {
+ if (idle || mq->cleaner) {
/*
* We'd like to clean everything.
*/
@@ -1716,11 +1722,9 @@ static void calc_hotspot_params(sector_t origin_size,
*hotspot_block_size /= 2u;
}
-static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
- sector_t origin_size,
- sector_t cache_block_size,
- bool mimic_mq,
- bool migrations_allowed)
+static struct dm_cache_policy *
+__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
+ bool mimic_mq, bool migrations_allowed, bool cleaner)
{
unsigned i;
unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
@@ -1807,6 +1811,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
goto bad_btracker;
mq->migrations_allowed = migrations_allowed;
+ mq->cleaner = cleaner;
return &mq->policy;
@@ -1830,21 +1835,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
- return __smq_create(cache_size, origin_size, cache_block_size, false, true);
+ return __smq_create(cache_size, origin_size, cache_block_size,
+ false, true, false);
}
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
- return __smq_create(cache_size, origin_size, cache_block_size, true, true);
+ return __smq_create(cache_size, origin_size, cache_block_size,
+ true, true, false);
}
static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
- return __smq_create(cache_size, origin_size, cache_block_size, false, false);
+ return __smq_create(cache_size, origin_size, cache_block_size,
+ false, false, true);
}
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index f595e9867cbe..08b5e44df324 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1011,16 +1011,16 @@ static void abort_transaction(struct cache *cache)
if (get_cache_mode(cache) >= CM_READ_ONLY)
return;
- if (dm_cache_metadata_set_needs_check(cache->cmd)) {
- DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
- set_cache_mode(cache, CM_FAIL);
- }
-
DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
if (dm_cache_metadata_abort(cache->cmd)) {
DMERR("%s: failed to abort metadata transaction", dev_name);
set_cache_mode(cache, CM_FAIL);
}
+
+ if (dm_cache_metadata_set_needs_check(cache->cmd)) {
+ DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+ set_cache_mode(cache, CM_FAIL);
+ }
}
static void metadata_operation_failed(struct cache *cache, const char *op, int r)
@@ -1910,6 +1910,7 @@ static void process_deferred_bios(struct work_struct *ws)
else
commit_needed = process_bio(cache, bio) || commit_needed;
+ cond_resched();
}
if (commit_needed)
@@ -1932,6 +1933,7 @@ static void requeue_deferred_bios(struct cache *cache)
while ((bio = bio_list_pop(&bios))) {
bio->bi_status = BLK_STS_DM_REQUEUE;
bio_endio(bio);
+ cond_resched();
}
}
@@ -1972,6 +1974,8 @@ static void check_migrations(struct work_struct *ws)
r = mg_start(cache, op, NULL);
if (r)
break;
+
+ cond_resched();
}
}
@@ -1992,6 +1996,7 @@ static void destroy(struct cache *cache)
if (cache->prison)
dm_bio_prison_destroy_v2(cache->prison);
+ cancel_delayed_work_sync(&cache->waker);
if (cache->wq)
destroy_workqueue(cache->wq);
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index eb7a5d3ba81a..355afca2969d 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1977,6 +1977,7 @@ static void clone_dtr(struct dm_target *ti)
mempool_exit(&clone->hydration_pool);
dm_kcopyd_client_destroy(clone->kcopyd_client);
+ cancel_delayed_work_sync(&clone->waker);
destroy_workqueue(clone->wq);
hash_table_exit(clone);
dm_clone_metadata_close(clone->cmd);
@@ -2231,6 +2232,7 @@ static int __init dm_clone_init(void)
r = dm_register_target(&clone_target);
if (r < 0) {
DMERR("Failed to register clone target");
+ kmem_cache_destroy(_hydration_cache);
return r;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index fa674e9b6f23..6866aa2aade3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -46,11 +46,11 @@
struct convert_context {
struct completion restart;
struct bio *bio_in;
- struct bio *bio_out;
struct bvec_iter iter_in;
+ struct bio *bio_out;
struct bvec_iter iter_out;
- u64 cc_sector;
atomic_t cc_pending;
+ u64 cc_sector;
union {
struct skcipher_request *req;
struct aead_request *req_aead;
@@ -1554,6 +1554,7 @@ pop_from_list:
io = crypt_io_from_node(rb_first(&write_tree));
rb_erase(&io->rb_node, &write_tree);
kcryptd_io_write(io);
+ cond_resched();
} while (!RB_EMPTY_ROOT(&write_tree));
blk_finish_plug(&plug);
}
@@ -1626,6 +1627,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.bio_out = clone;
io->ctx.iter_out = clone->bi_iter;
+ if (crypt_integrity_aead(cc)) {
+ bio_copy_data(clone, io->base_bio);
+ io->ctx.bio_in = clone;
+ io->ctx.iter_in = clone->bi_iter;
+ }
+
sector += bio_sectors(clone);
crypt_inc_pending(io);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index f496213f8b67..7c0e7c662e07 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -30,7 +30,7 @@ struct delay_c {
struct workqueue_struct *kdelayd_wq;
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
- atomic_t may_delay;
+ bool may_delay;
struct delay_class read;
struct delay_class write;
@@ -191,7 +191,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
INIT_LIST_HEAD(&dc->delayed_bios);
mutex_init(&dc->timer_lock);
- atomic_set(&dc->may_delay, 1);
+ dc->may_delay = true;
dc->argc = argc;
ret = delay_class_ctr(ti, &dc->read, argv);
@@ -245,7 +245,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
struct dm_delay_info *delayed;
unsigned long expires = 0;
- if (!c->delay || !atomic_read(&dc->may_delay))
+ if (!c->delay)
return DM_MAPIO_REMAPPED;
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
@@ -254,6 +254,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
mutex_lock(&delayed_bios_lock);
+ if (unlikely(!dc->may_delay)) {
+ mutex_unlock(&delayed_bios_lock);
+ return DM_MAPIO_REMAPPED;
+ }
c->ops++;
list_add_tail(&delayed->list, &dc->delayed_bios);
mutex_unlock(&delayed_bios_lock);
@@ -267,7 +271,10 @@ static void delay_presuspend(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- atomic_set(&dc->may_delay, 0);
+ mutex_lock(&delayed_bios_lock);
+ dc->may_delay = false;
+ mutex_unlock(&delayed_bios_lock);
+
del_timer_sync(&dc->delay_timer);
flush_bios(flush_delayed_bios(dc, 1));
}
@@ -276,7 +283,7 @@ static void delay_resume(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- atomic_set(&dc->may_delay, 1);
+ dc->may_delay = true;
}
static int delay_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 2900fbde89b3..967d4ad6d32c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -124,9 +124,9 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* Direction r or w?
*/
arg_name = dm_shift_arg(as);
- if (!strcasecmp(arg_name, "w"))
+ if (arg_name && !strcasecmp(arg_name, "w"))
fc->corrupt_bio_rw = WRITE;
- else if (!strcasecmp(arg_name, "r"))
+ else if (arg_name && !strcasecmp(arg_name, "r"))
fc->corrupt_bio_rw = READ;
else {
ti->error = "Invalid corrupt bio direction (r or w)";
@@ -301,8 +301,11 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
*/
bio_for_each_segment(bvec, bio, iter) {
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
- char *segment = (page_address(bio_iter_page(bio, iter))
- + bio_iter_offset(bio, iter));
+ char *segment;
+ struct page *page = bio_iter_page(bio, iter);
+ if (unlikely(page == ZERO_PAGE(0)))
+ break;
+ segment = (page_address(page) + bio_iter_offset(bio, iter));
segment[corrupt_bio_byte] = fc->corrupt_bio_value;
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
@@ -360,9 +363,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
/*
* Corrupt matching writes.
*/
- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
- if (all_corrupt_bio_flags_match(bio, fc))
- corrupt_bio_data(bio, fc);
+ if (fc->corrupt_bio_byte) {
+ if (fc->corrupt_bio_rw == WRITE) {
+ if (all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+ }
goto map_bio;
}
@@ -388,13 +393,14 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
- all_corrupt_bio_flags_match(bio, fc)) {
- /*
- * Corrupt successful matching READs while in down state.
- */
- corrupt_bio_data(bio, fc);
-
+ if (fc->corrupt_bio_byte) {
+ if ((fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc)) {
+ /*
+ * Corrupt successful matching READs while in down state.
+ */
+ corrupt_bio_data(bio, fc);
+ }
} else if (!test_bit(DROP_WRITES, &fc->flags) &&
!test_bit(ERROR_WRITES, &fc->flags)) {
/*
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index acbda91e7643..f3246f7407d6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -31,11 +31,11 @@
#define DEFAULT_BUFFER_SECTORS 128
#define DEFAULT_JOURNAL_WATERMARK 50
#define DEFAULT_SYNC_MSEC 10000
-#define DEFAULT_MAX_JOURNAL_SECTORS 131072
+#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
#define MIN_LOG2_INTERLEAVE_SECTORS 3
#define MAX_LOG2_INTERLEAVE_SECTORS 31
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
-#define RECALC_SECTORS 8192
+#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
#define RECALC_WRITE_SUPER 16
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
@@ -1582,11 +1582,12 @@ static void integrity_metadata(struct work_struct *w)
}
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+ struct bio_vec bv_copy = bv;
unsigned pos;
char *mem, *checksums_ptr;
again:
- mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
+ mem = (char *)kmap_atomic(bv_copy.bv_page) + bv_copy.bv_offset;
pos = 0;
checksums_ptr = checksums;
do {
@@ -1595,7 +1596,7 @@ again:
sectors_to_process -= ic->sectors_per_block;
pos += ic->sectors_per_block << SECTOR_SHIFT;
sector += ic->sectors_per_block;
- } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
+ } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
kunmap_atomic(mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
@@ -1615,9 +1616,9 @@ again:
if (!sectors_to_process)
break;
- if (unlikely(pos < bv.bv_len)) {
- bv.bv_offset += pos;
- bv.bv_len -= pos;
+ if (unlikely(pos < bv_copy.bv_len)) {
+ bv_copy.bv_offset += pos;
+ bv_copy.bv_len -= pos;
goto again;
}
}
@@ -2346,10 +2347,6 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
- /* the following test is not needed, but it tests the replay code */
- if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
- return;
-
spin_lock_irq(&ic->endio_wait.lock);
write_start = ic->committed_section;
write_sections = ic->n_committed_sections;
@@ -2858,8 +2855,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
drain_workqueue(ic->commit_wq);
if (ic->mode == 'J') {
- if (ic->meta_dev)
- queue_work(ic->writer_wq, &ic->writer_work);
+ queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
dm_integrity_flush_buffers(ic, true);
}
@@ -4200,6 +4196,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
BUG_ON(!list_empty(&ic->wait_list));
+ if (ic->mode == 'B')
+ cancel_delayed_work_sync(&ic->bitmap_flush_work);
if (ic->metadata_wq)
destroy_workqueue(ic->metadata_wq);
if (ic->wait_wq)
@@ -4291,11 +4289,13 @@ static int __init dm_integrity_init(void)
}
r = dm_register_target(&integrity_target);
-
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ kmem_cache_destroy(journal_io_cache);
+ return r;
+ }
- return r;
+ return 0;
}
static void __exit dm_integrity_exit(void)
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 81ffc59d05c9..4312007d2d34 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -306,7 +306,7 @@ static void do_region(int op, int op_flags, unsigned region,
struct request_queue *q = bdev_get_queue(where->bdev);
unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors;
- unsigned int uninitialized_var(special_cmd_max_sectors);
+ unsigned int special_cmd_max_sectors;
/*
* Reject unsupported discard and write same requests.
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 7a73f2fa0ad7..8e787677a810 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -573,7 +573,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param)
size_t *needed = needed_param;
*needed += sizeof(struct dm_target_versions);
- *needed += strlen(tt->name);
+ *needed += strlen(tt->name) + 1;
*needed += ALIGN_MASK;
}
@@ -638,7 +638,7 @@ static int __list_versions(struct dm_ioctl *param, size_t param_size, const char
iter_info.old_vers = NULL;
iter_info.vers = vers;
iter_info.flags = 0;
- iter_info.end = (char *)vers+len;
+ iter_info.end = (char *)vers + needed;
/*
* Now loop through filling out the names & versions.
@@ -1435,11 +1435,12 @@ static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_s
hc->new_map = NULL;
}
- param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
-
- __dev_status(hc->md, param);
md = hc->md;
up_write(&_hash_lock);
+
+ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+ __dev_status(md, param);
+
if (old_map) {
dm_sync_table(md);
dm_table_destroy(old_map);
@@ -1847,7 +1848,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
int ioctl_flags;
int param_flags;
unsigned int cmd;
- struct dm_ioctl *uninitialized_var(param);
+ struct dm_ioctl *param;
ioctl_fn fn = NULL;
size_t input_param_size;
struct dm_ioctl param_kernel;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 42151c9fc6e5..1ccd765fad93 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3284,15 +3284,19 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
if (rs_is_raid456(rs)) {
r = rs_set_raid456_stripe_cache(rs);
- if (r)
+ if (r) {
+ mddev_unlock(&rs->md);
goto bad_stripe_cache;
+ }
}
/* Now do an early reshape check */
if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
r = rs_check_reshape(rs);
- if (r)
+ if (r) {
+ mddev_unlock(&rs->md);
goto bad_check_reshape;
+ }
/* Restore new, ctr requested layout to perform check */
rs_config_restore(rs, &rs_layout);
@@ -3301,6 +3305,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
r = rs->md.pers->check_reshape(&rs->md);
if (r) {
ti->error = "Reshape check failed";
+ mddev_unlock(&rs->md);
goto bad_check_reshape;
}
}
@@ -3338,14 +3343,14 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
struct mddev *mddev = &rs->md;
/*
- * If we're reshaping to add disk(s)), ti->len and
+ * If we're reshaping to add disk(s), ti->len and
* mddev->array_sectors will differ during the process
* (ti->len > mddev->array_sectors), so we have to requeue
* bios with addresses > mddev->array_sectors here or
* there will occur accesses past EOD of the component
* data images thus erroring the raid set.
*/
- if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
+ if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE;
md_handle_request(mddev, bio);
@@ -3528,7 +3533,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL;
int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
@@ -3808,7 +3813,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
- for (i = 0; i < mddev->raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
r = &rs->dev[i].rdev;
/* HM FIXME: enhance journal device recovery processing */
if (test_bit(Journal, &r->flags))
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 963d3774c93e..247089c2be25 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -613,7 +613,7 @@ static int persistent_read_metadata(struct dm_exception_store *store,
chunk_t old, chunk_t new),
void *callback_context)
{
- int r, uninitialized_var(new_snapshot);
+ int r, new_snapshot;
struct pstore *ps = get_info(store);
/*
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index ce6d3bce1b7b..0f42f4a7f382 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
atomic_read(&shared->in_flight[WRITE]);
}
-void dm_stats_init(struct dm_stats *stats)
+int dm_stats_init(struct dm_stats *stats)
{
int cpu;
struct dm_stats_last_position *last;
@@ -196,11 +196,16 @@ void dm_stats_init(struct dm_stats *stats)
mutex_init(&stats->mutex);
INIT_LIST_HEAD(&stats->list);
stats->last = alloc_percpu(struct dm_stats_last_position);
+ if (!stats->last)
+ return -ENOMEM;
+
for_each_possible_cpu(cpu) {
last = per_cpu_ptr(stats->last, cpu);
last->last_sector = (sector_t)ULLONG_MAX;
last->last_rw = UINT_MAX;
}
+
+ return 0;
}
void dm_stats_cleanup(struct dm_stats *stats)
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index 2ddfae678f32..dcac11fce03b 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -22,7 +22,7 @@ struct dm_stats_aux {
unsigned long long duration_ns;
};
-void dm_stats_init(struct dm_stats *st);
+int dm_stats_init(struct dm_stats *st);
void dm_stats_cleanup(struct dm_stats *st);
struct mapped_device;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 81bc36a43b32..8b05d938aa98 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -668,7 +668,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
*/
unsigned short remaining = 0;
- struct dm_target *uninitialized_var(ti);
+ struct dm_target *ti;
struct queue_limits ti_limits;
unsigned i;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index a5ed59eafdc5..92e646d597ea 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -701,6 +701,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
goto bad_cleanup_data_sm;
}
+ /*
+ * For pool metadata opening process, root setting is redundant
+ * because it will be set again in __begin_transaction(). But dm
+ * pool aborting process really needs to get last transaction's
+ * root to avoid accessing broken btree.
+ */
+ pmd->root = le64_to_cpu(disk_super->data_mapping_root);
+ pmd->details_root = le64_to_cpu(disk_super->device_details_root);
+
__setup_btree_details(pmd);
dm_bm_unlock(sblock);
@@ -753,13 +762,15 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
return r;
}
-static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
+static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
+ bool destroy_bm)
{
dm_sm_destroy(pmd->data_sm);
dm_sm_destroy(pmd->metadata_sm);
dm_tm_destroy(pmd->nb_tm);
dm_tm_destroy(pmd->tm);
- dm_block_manager_destroy(pmd->bm);
+ if (destroy_bm)
+ dm_block_manager_destroy(pmd->bm);
}
static int __begin_transaction(struct dm_pool_metadata *pmd)
@@ -966,7 +977,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
}
pmd_write_unlock(pmd);
if (!pmd->fail_io)
- __destroy_persistent_data_objects(pmd);
+ __destroy_persistent_data_objects(pmd, true);
kfree(pmd);
return 0;
@@ -1875,19 +1886,52 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
+ struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
+
+ /* fail_io is double-checked with pmd->root_lock held below */
+ if (unlikely(pmd->fail_io))
+ return r;
+
+ /*
+ * Replacement block manager (new_bm) is created and old_bm destroyed outside of
+ * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
+ * shrinker associated with the block manager's bufio client vs pmd root_lock).
+ * - must take shrinker_rwsem without holding pmd->root_lock
+ */
+ new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
+ THIN_MAX_CONCURRENT_LOCKS);
pmd_write_lock(pmd);
- if (pmd->fail_io)
+ if (pmd->fail_io) {
+ pmd_write_unlock(pmd);
goto out;
+ }
__set_abort_with_changes_flags(pmd);
- __destroy_persistent_data_objects(pmd);
- r = __create_persistent_data_objects(pmd, false);
+ __destroy_persistent_data_objects(pmd, false);
+ old_bm = pmd->bm;
+ if (IS_ERR(new_bm)) {
+ DMERR("could not create block manager during abort");
+ pmd->bm = NULL;
+ r = PTR_ERR(new_bm);
+ goto out_unlock;
+ }
+
+ pmd->bm = new_bm;
+ r = __open_or_format_metadata(pmd, false);
+ if (r) {
+ pmd->bm = NULL;
+ goto out_unlock;
+ }
+ new_bm = NULL;
+out_unlock:
if (r)
pmd->fail_io = true;
-
-out:
pmd_write_unlock(pmd);
+ dm_block_manager_destroy(old_bm);
+out:
+ if (new_bm && !IS_ERR(new_bm))
+ dm_block_manager_destroy(new_bm);
return r;
}
@@ -2060,10 +2104,13 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_sm_threshold_fn fn,
void *context)
{
- int r;
+ int r = -EINVAL;
pmd_write_lock_in_core(pmd);
- r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
+ if (!pmd->fail_io) {
+ r = dm_sm_register_threshold_callback(pmd->metadata_sm,
+ threshold, fn, context);
+ }
pmd_write_unlock(pmd);
return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1b2c98b43519..a1abdb3467a9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2224,6 +2224,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
throttle_work_update(&pool->throttle);
dm_pool_issue_prefetches(pool->pmd);
}
+ cond_resched();
}
blk_finish_plug(&plug);
}
@@ -2307,6 +2308,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
else
pool->process_cell(tc, cell);
}
+ cond_resched();
} while (!list_empty(&cells));
}
@@ -2931,6 +2933,8 @@ static void __pool_destroy(struct pool *pool)
dm_bio_prison_destroy(pool->prison);
dm_kcopyd_client_destroy(pool->copier);
+ cancel_delayed_work_sync(&pool->waker);
+ cancel_delayed_work_sync(&pool->no_space_timeout);
if (pool->wq)
destroy_workqueue(pool->wq);
@@ -3403,6 +3407,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->adjusted_pf = pt->requested_pf = pf;
bio_init(&pt->flush_bio, NULL, 0);
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
/*
* Only need to enable discards if the pool should pass
@@ -3425,8 +3430,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
calc_metadata_threshold(pt),
metadata_low_callback,
pool);
- if (r)
+ if (r) {
+ ti->error = "Error registering metadata threshold";
goto out_flags_changed;
+ }
pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);
@@ -3589,23 +3596,31 @@ static int pool_preresume(struct dm_target *ti)
*/
r = bind_control_target(pool, ti);
if (r)
- return r;
+ goto out;
dm_pool_register_pre_commit_callback(pool->pmd,
metadata_pre_commit_callback, pt);
r = maybe_resize_data_dev(ti, &need_commit1);
if (r)
- return r;
+ goto out;
r = maybe_resize_metadata_dev(ti, &need_commit2);
if (r)
- return r;
+ goto out;
if (need_commit1 || need_commit2)
(void) commit(pool);
+out:
+ /*
+ * When a thin-pool is PM_FAIL, it cannot be rebuilt if
+ * bio is in deferred list. Therefore need to return 0
+ * to allow pool_resume() to flush IO.
+ */
+ if (r && get_pool_mode(pool) == PM_FAIL)
+ r = 0;
- return 0;
+ return r;
}
static void pool_suspend_active_thins(struct pool *pool)
@@ -4278,6 +4293,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
ti->flush_supported = true;
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index cea2b3789736..442437e4e03b 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
*/
static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
{
- return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
+ return (struct dm_verity_fec_io *)
+ ((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
}
/*
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 9dcdf34b7e32..cebc7828ea8b 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -471,13 +471,14 @@ static int verity_verify_io(struct dm_verity_io *io)
struct bvec_iter start;
unsigned b;
struct crypto_wait wait;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
for (b = 0; b < io->n_blocks; b++) {
int r;
sector_t cur_block = io->block + b;
struct ahash_request *req = verity_io_hash_req(v, io);
- if (v->validated_blocks &&
+ if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
likely(test_bit(cur_block, v->validated_blocks))) {
verity_bv_skip_block(v, io, &io->iter);
continue;
@@ -525,9 +526,17 @@ static int verity_verify_io(struct dm_verity_io *io)
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0)
continue;
- else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
- cur_block))
- return -EIO;
+ else {
+ if (bio->bi_status) {
+ /*
+ * Error correction failed; Just return error
+ */
+ return -EIO;
+ }
+ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+ cur_block))
+ return -EIO;
+ }
}
return 0;
@@ -570,7 +579,9 @@ static void verity_end_io(struct bio *bio)
struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status &&
- (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
+ (!verity_fec_is_enabled(io->v) ||
+ verity_is_system_shutting_down() ||
+ (bio->bi_opf & REQ_RAHEAD))) {
verity_finish_io(io, bio->bi_status);
return;
}
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 641b9e3a399b..fee7c7a81ce4 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -73,11 +73,11 @@ struct dm_verity_io {
/* original value of bio->bi_end_io */
bio_end_io_t *orig_bi_end_io;
+ struct bvec_iter iter;
+
sector_t block;
unsigned n_blocks;
- struct bvec_iter iter;
-
struct work_struct work;
/*
@@ -110,12 +110,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
}
-static inline u8 *verity_io_digest_end(struct dm_verity *v,
- struct dm_verity_io *io)
-{
- return verity_io_want_digest(v, io) + v->digest_size;
-}
-
extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter *iter,
int (*process)(struct dm_verity *v,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index ec10fda3f24f..961498d8f406 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -20,7 +20,7 @@
#define HIGH_WATERMARK 50
#define LOW_WATERMARK 45
-#define MAX_WRITEBACK_JOBS 0
+#define MAX_WRITEBACK_JOBS min(0x10000000 / PAGE_SIZE, totalram_pages() / 16)
#define ENDIO_LATENCY 16
#define WRITEBACK_LATENCY 64
#define AUTOCOMMIT_BLOCKS_SSD 65536
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 77e28f77c59f..a7724ba45b43 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -263,7 +263,6 @@ out_uevent_exit:
static void local_exit(void)
{
- flush_scheduled_work();
destroy_workqueue(deferred_remove_workqueue);
unregister_blkdev(_major, _name);
@@ -1995,7 +1994,9 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->bdev)
goto bad;
- dm_stats_init(&md->stats);
+ r = dm_stats_init(&md->stats);
+ if (r < 0)
+ goto bad;
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
@@ -2808,6 +2809,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
static void __dm_internal_resume(struct mapped_device *md)
{
+ int r;
+ struct dm_table *map;
+
BUG_ON(!md->internal_suspend_count);
if (--md->internal_suspend_count)
@@ -2816,12 +2820,23 @@ static void __dm_internal_resume(struct mapped_device *md)
if (dm_suspended_md(md))
goto done; /* resume from nested suspend */
- /*
- * NOTE: existing callers don't need to call dm_table_resume_targets
- * (which may fail -- so best to avoid it for now by passing NULL map)
- */
- (void) __dm_resume(md, NULL);
-
+ map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ r = __dm_resume(md, map);
+ if (r) {
+ /*
+ * If a preresume method of some target failed, we are in a
+ * tricky situation. We can't return an error to the caller. We
+ * can't fake success because then the "resume" and
+ * "postsuspend" methods would not be paired correctly, and it
+ * would break various targets, for example it would cause list
+ * corruption in the "origin" target.
+ *
+ * So, we fake normal suspend here, to make sure that the
+ * "resume" and "postsuspend" methods will be paired correctly.
+ */
+ DMERR("Preresume method failed: %d", r);
+ set_bit(DMF_SUSPENDED, &md->flags);
+ }
done:
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
smp_mb__after_atomic();
@@ -3080,6 +3095,11 @@ static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
goto out;
ti = dm_table_get_target(table, 0);
+ if (dm_suspended_md(md)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
ret = -EINVAL;
if (!ti->type->iterate_devices)
goto out;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index a95e20c3d0d4..843139447a96 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -54,14 +54,7 @@ __acquires(bitmap->lock)
{
unsigned char *mappage;
- if (page >= bitmap->pages) {
- /* This can happen if bitmap_start_sync goes beyond
- * End-of-device while looking for a whole page.
- * It is harmless.
- */
- return -EINVAL;
- }
-
+ WARN_ON_ONCE(page >= bitmap->pages);
if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
return 0;
@@ -489,7 +482,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
sb = kmap_atomic(bitmap->storage.sb_page);
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
- pr_debug(" version: %d\n", le32_to_cpu(sb->version));
+ pr_debug(" version: %u\n", le32_to_cpu(sb->version));
pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
le32_to_cpu(*(__le32 *)(sb->uuid+0)),
le32_to_cpu(*(__le32 *)(sb->uuid+4)),
@@ -500,11 +493,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug("events cleared: %llu\n",
(unsigned long long) le64_to_cpu(sb->events_cleared));
pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
- pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
- pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
+ pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize));
+ pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2);
- pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
+ pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb);
}
@@ -1369,6 +1362,14 @@ __acquires(bitmap->lock)
sector_t csize;
int err;
+ if (page >= bitmap->pages) {
+ /*
+ * This can happen if bitmap_start_sync goes beyond
+ * End-of-device while looking for a whole page or
+ * user set a huge number to sysfs bitmap_set_bits.
+ */
+ return NULL;
+ }
err = md_bitmap_checkpage(bitmap, page, create, 0);
if (bitmap->bp[page].hijacked ||
@@ -2110,7 +2111,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bytes = DIV_ROUND_UP(chunks, 8);
if (!bitmap->mddev->bitmap_info.external)
bytes += sizeof(bitmap_super_t);
- } while (bytes > (space << 9));
+ } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
+ (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
} else
chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
@@ -2155,7 +2157,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->counts.missing_pages = pages;
bitmap->counts.chunkshift = chunkshift;
bitmap->counts.chunks = chunks;
- bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
+ bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
BITMAP_BLOCK_SHIFT);
blocks = min(old_counts.chunks << old_counts.chunkshift,
@@ -2181,8 +2183,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->counts.missing_pages = old_counts.pages;
bitmap->counts.chunkshift = old_counts.chunkshift;
bitmap->counts.chunks = old_counts.chunks;
- bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
- BITMAP_BLOCK_SHIFT);
+ bitmap->mddev->bitmap_info.chunksize =
+ 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift;
pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break;
@@ -2200,20 +2202,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (set) {
bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
- if (*bmc_new == 0) {
- /* need to set on-disk bits too. */
- sector_t end = block + new_blocks;
- sector_t start = block >> chunkshift;
- start <<= chunkshift;
- while (start < end) {
- md_bitmap_file_set_bit(bitmap, block);
- start += 1 << chunkshift;
+ if (bmc_new) {
+ if (*bmc_new == 0) {
+ /* need to set on-disk bits too. */
+ sector_t end = block + new_blocks;
+ sector_t start = block >> chunkshift;
+
+ start <<= chunkshift;
+ while (start < end) {
+ md_bitmap_file_set_bit(bitmap, block);
+ start += 1 << chunkshift;
+ }
+ *bmc_new = 2;
+ md_bitmap_count_page(&bitmap->counts, block, 1);
+ md_bitmap_set_pending(&bitmap->counts, block);
}
- *bmc_new = 2;
- md_bitmap_count_page(&bitmap->counts, block, 1);
- md_bitmap_set_pending(&bitmap->counts, block);
+ *bmc_new |= NEEDED_MASK;
}
- *bmc_new |= NEEDED_MASK;
if (new_blocks < old_blocks)
old_blocks = new_blocks;
}
@@ -2475,11 +2480,35 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long backlog;
unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
+ struct md_rdev *rdev;
+ bool has_write_mostly = false;
int rv = kstrtoul(buf, 10, &backlog);
if (rv)
return rv;
if (backlog > COUNTER_MAX)
return -EINVAL;
+
+ rv = mddev_lock(mddev);
+ if (rv)
+ return rv;
+
+ /*
+ * Without write mostly device, it doesn't make sense to set
+ * backlog for max_write_behind.
+ */
+ rdev_for_each(rdev, mddev) {
+ if (test_bit(WriteMostly, &rdev->flags)) {
+ has_write_mostly = true;
+ break;
+ }
+ }
+ if (!has_write_mostly) {
+ pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
+ mdname(mddev));
+ mddev_unlock(mddev);
+ return -EINVAL;
+ }
+
mddev->bitmap_info.max_write_behind = backlog;
if (!backlog && mddev->wb_info_pool) {
/* wb_info_pool is not needed if backlog is zero */
@@ -2494,6 +2523,8 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
}
if (old_mwb != backlog)
md_bitmap_update_sb(mddev->bitmap);
+
+ mddev_unlock(mddev);
return len;
}
@@ -2520,6 +2551,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
if (csize < 512 ||
!is_power_of_2(csize))
return -EINVAL;
+ if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
+ sizeof(((bitmap_super_t *)0)->chunksize))))
+ return -EOVERFLOW;
mddev->bitmap_info.chunksize = csize;
return len;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 11fd3b32b562..61c3e8df1b55 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -481,13 +481,14 @@ static void md_end_flush(struct bio *bio)
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
+ bio_put(bio);
+
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work);
}
- bio_put(bio);
}
static void md_submit_flush_data(struct work_struct *ws);
@@ -885,10 +886,12 @@ static void super_written(struct bio *bio)
} else
clear_bit(LastDev, &rdev->flags);
+ bio_put(bio);
+
+ rdev_dec_pending(rdev, mddev);
+
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
- rdev_dec_pending(rdev, mddev);
- bio_put(bio);
}
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
@@ -1095,6 +1098,7 @@ struct super_type {
struct md_rdev *refdev,
int minor_version);
int (*validate_super)(struct mddev *mddev,
+ struct md_rdev *freshest,
struct md_rdev *rdev);
void (*sync_super)(struct mddev *mddev,
struct md_rdev *rdev);
@@ -1233,8 +1237,9 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
/*
* validate_super for 0.90.0
+ * note: we are not using "freshest" for 0.9 superblock
*/
-static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
+static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
{
mdp_disk_t *desc;
mdp_super_t *sb = page_address(rdev->sb_page);
@@ -1749,7 +1754,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return ret;
}
-static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
@@ -1845,13 +1850,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
- * spares (which don't need an event count) */
- ++ev1;
+ * spares (which don't need an event count).
+ * Similar to mdadm, we allow event counter difference of 1
+ * from the freshest device.
+ */
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
(le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
- if (ev1 < mddev->events)
+ if (ev1 + 1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
@@ -1872,8 +1879,38 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
role = MD_DISK_ROLE_SPARE;
rdev->desc_nr = -1;
- } else
+ } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
+ /*
+ * If we are assembling, and our event counter is smaller than the
+ * highest event counter, we cannot trust our superblock about the role.
+ * It could happen that our rdev was marked as Faulty, and all other
+ * superblocks were updated with +1 event counter.
+ * Then, before the next superblock update, which typically happens when
+ * remove_and_add_spares() removes the device from the array, there was
+ * a crash or reboot.
+ * If we allow current rdev without consulting the freshest superblock,
+ * we could cause data corruption.
+ * Note that in this case our event counter is smaller by 1 than the
+ * highest, otherwise, this rdev would not be allowed into array;
+ * both kernel and mdadm allow event counter difference of 1.
+ */
+ struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
+ u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
+
+ if (rdev->desc_nr >= freshest_max_dev) {
+ /* this is unexpected, better not proceed */
+ pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
+ mdname(mddev), rdev->bdev, rdev->desc_nr,
+ freshest->bdev, freshest_max_dev);
+ return -EUCLEAN;
+ }
+
+ role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
+ pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
+ mdname(mddev), rdev->bdev, role, role, freshest->bdev);
+ } else {
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
+ }
switch(role) {
case MD_DISK_ROLE_SPARE: /* spare */
break;
@@ -2777,7 +2814,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
* and should be added immediately.
*/
super_types[mddev->major_version].
- validate_super(mddev, rdev);
+ validate_super(mddev, NULL/*freshest*/, rdev);
if (add_journal)
mddev_suspend(mddev);
err = mddev->pers->hot_add_disk(mddev, rdev);
@@ -3079,6 +3116,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
err = kstrtouint(buf, 10, (unsigned int *)&slot);
if (err < 0)
return err;
+ if (slot < 0)
+ /* overflow */
+ return -ENOSPC;
}
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
@@ -3684,7 +3724,7 @@ static int analyze_sbs(struct mddev *mddev)
}
super_types[mddev->major_version].
- validate_super(mddev, freshest);
+ validate_super(mddev, NULL/*freshest*/, freshest);
i = 0;
rdev_for_each_safe(rdev, tmp, mddev) {
@@ -3699,7 +3739,7 @@ static int analyze_sbs(struct mddev *mddev)
}
if (rdev != freshest) {
if (super_types[mddev->major_version].
- validate_super(mddev, rdev)) {
+ validate_super(mddev, freshest, rdev)) {
pr_warn("md: kicking non-fresh %s from array!\n",
bdevname(rdev->bdev,b));
md_kick_rdev_from_array(rdev);
@@ -3760,8 +3800,9 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
- int msec = (mddev->safemode_delay*1000)/HZ;
- return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
+ unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
+
+ return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
}
static ssize_t
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
@@ -3773,7 +3814,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
return -EINVAL;
}
- if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
+ if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
return -EINVAL;
if (msec == 0)
mddev->safemode_delay = 0;
@@ -4434,6 +4475,8 @@ max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len
rv = kstrtouint(buf, 10, &n);
if (rv < 0)
return rv;
+ if (n > INT_MAX)
+ return -EINVAL;
atomic_set(&mddev->max_corr_read_errors, n);
return len;
}
@@ -4734,11 +4777,21 @@ action_store(struct mddev *mddev, const char *page, size_t len)
return -EINVAL;
err = mddev_lock(mddev);
if (!err) {
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
err = -EBUSY;
- else {
+ } else if (mddev->reshape_position == MaxSector ||
+ mddev->pers->check_reshape == NULL ||
+ mddev->pers->check_reshape(mddev)) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
err = mddev->pers->start_reshape(mddev);
+ } else {
+ /*
+ * If reshape is still in progress, and
+ * md_check_recovery() can continue to reshape,
+ * don't restart reshape because data can be
+ * corrupted for raid456.
+ */
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
mddev_unlock(mddev);
}
@@ -6094,6 +6147,7 @@ void md_stop(struct mddev *mddev)
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
+ __md_stop_writes(mddev);
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
@@ -6590,7 +6644,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
rdev->saved_raid_disk = rdev->raid_disk;
} else
super_types[mddev->major_version].
- validate_super(mddev, rdev);
+ validate_super(mddev, NULL/*freshest*/, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0ead5a7887f1..7f80e86459b1 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -63,8 +63,8 @@ static void dump_zones(struct mddev *mddev)
int len = 0;
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
- bdevname(conf->devlist[j*raid_disks
+ len += scnprintf(line+len, 200-len, "%s%s", k?"/":"",
+ bdevname(conf->devlist[j*raid_disks
+ k]->bdev, b));
pr_debug("md: zone%d=[%s]\n", j, line);
@@ -289,6 +289,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
goto abort;
}
+ if (conf->layout == RAID0_ORIG_LAYOUT) {
+ for (i = 1; i < conf->nr_strip_zones; i++) {
+ sector_t first_sector = conf->strip_zone[i-1].zone_end;
+
+ sector_div(first_sector, mddev->chunk_sectors);
+ zone = conf->strip_zone + i;
+ /* disk_shift is first disk index used in the zone */
+ zone->disk_shift = sector_div(first_sector,
+ zone->nb_dev);
+ }
+ }
+
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -475,6 +487,20 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
}
}
+/*
+ * Convert disk_index to the disk order in which it is read/written.
+ * For example, if we have 4 disks, they are numbered 0,1,2,3. If we
+ * write the disks starting at disk 3, then the read/write order would
+ * be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
+ * to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
+ * to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
+ * that 'output' space to understand the read/write disk ordering.
+ */
+static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
+{
+ return ((disk_index + num_disks - disk_shift) % num_disks);
+}
+
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
{
struct r0conf *conf = mddev->private;
@@ -488,7 +514,9 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
sector_t end_disk_offset;
unsigned int end_disk_index;
unsigned int disk;
+ sector_t orig_start, orig_end;
+ orig_start = start;
zone = find_zone(conf, &start);
if (bio_end_sector(bio) > zone->zone_end) {
@@ -502,6 +530,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
} else
end = bio_end_sector(bio);
+ orig_end = end;
if (zone != conf->strip_zone)
end = end - zone[-1].zone_end;
@@ -513,13 +542,26 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
last_stripe_index = end;
sector_div(last_stripe_index, stripe_size);
- start_disk_index = (int)(start - first_stripe_index * stripe_size) /
- mddev->chunk_sectors;
+ /* In the first zone the original and alternate layouts are the same */
+ if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
+ sector_div(orig_start, mddev->chunk_sectors);
+ start_disk_index = sector_div(orig_start, zone->nb_dev);
+ start_disk_index = map_disk_shift(start_disk_index,
+ zone->nb_dev,
+ zone->disk_shift);
+ sector_div(orig_end, mddev->chunk_sectors);
+ end_disk_index = sector_div(orig_end, zone->nb_dev);
+ end_disk_index = map_disk_shift(end_disk_index,
+ zone->nb_dev, zone->disk_shift);
+ } else {
+ start_disk_index = (int)(start - first_stripe_index * stripe_size) /
+ mddev->chunk_sectors;
+ end_disk_index = (int)(end - last_stripe_index * stripe_size) /
+ mddev->chunk_sectors;
+ }
start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
mddev->chunk_sectors) +
first_stripe_index * mddev->chunk_sectors;
- end_disk_index = (int)(end - last_stripe_index * stripe_size) /
- mddev->chunk_sectors;
end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
mddev->chunk_sectors) +
last_stripe_index * mddev->chunk_sectors;
@@ -528,18 +570,22 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
sector_t dev_start, dev_end;
struct bio *discard_bio = NULL;
struct md_rdev *rdev;
+ int compare_disk;
+
+ compare_disk = map_disk_shift(disk, zone->nb_dev,
+ zone->disk_shift);
- if (disk < start_disk_index)
+ if (compare_disk < start_disk_index)
dev_start = (first_stripe_index + 1) *
mddev->chunk_sectors;
- else if (disk > start_disk_index)
+ else if (compare_disk > start_disk_index)
dev_start = first_stripe_index * mddev->chunk_sectors;
else
dev_start = start_disk_offset;
- if (disk < end_disk_index)
+ if (compare_disk < end_disk_index)
dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
- else if (disk > end_disk_index)
+ else if (compare_disk > end_disk_index)
dev_end = last_stripe_index * mddev->chunk_sectors;
else
dev_end = end_disk_offset;
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 3816e5477db1..8cc761ca7423 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -6,6 +6,7 @@ struct strip_zone {
sector_t zone_end; /* Start of the next zone (in sectors) */
sector_t dev_start; /* Zone offset in real dev (in sectors) */
int nb_dev; /* # of devices attached to the zone */
+ int disk_shift; /* start disk for the original layout */
};
/* Linux 3.14 (20d0189b101) made an unintended change to
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e87184645c54..c40237cfdcb0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1810,6 +1810,9 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
int number = rdev->raid_disk;
struct raid1_info *p = conf->mirrors + number;
+ if (unlikely(number >= conf->raid_disks))
+ goto abort;
+
if (rdev != p->rdev)
p = conf->mirrors + conf->raid_disks + number;
@@ -3132,6 +3135,7 @@ static int raid1_run(struct mddev *mddev)
* RAID1 needs at least one disk in active
*/
if (conf->raid_disks - mddev->degraded < 1) {
+ md_unregister_thread(&conf->thread);
ret = -EINVAL;
goto abort;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index deddabfb07d7..3983d5c8b5cd 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -751,8 +751,16 @@ static struct md_rdev *read_balance(struct r10conf *conf,
disk = r10_bio->devs[slot].devnum;
rdev = rcu_dereference(conf->mirrors[disk].replacement);
if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
- r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
+ r10_bio->devs[slot].addr + sectors >
+ rdev->recovery_offset) {
+ /*
+ * Read replacement first to prevent reading both rdev
+ * and replacement as NULL during replacement replace
+ * rdev.
+ */
+ smp_mb();
rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ }
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags))
continue;
@@ -919,6 +927,7 @@ static void flush_pending_writes(struct r10conf *conf)
else
generic_make_request(bio);
bio = next;
+ cond_resched();
}
blk_finish_plug(&plug);
} else
@@ -1104,6 +1113,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
else
generic_make_request(bio);
bio = next;
+ cond_resched();
}
kfree(plug);
}
@@ -1363,9 +1373,15 @@ retry_write:
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
- struct md_rdev *rrdev = rcu_dereference(
- conf->mirrors[d].replacement);
+ struct md_rdev *rdev, *rrdev;
+
+ rrdev = rcu_dereference(conf->mirrors[d].replacement);
+ /*
+ * Read replacement first to prevent reading both rdev and
+ * replacement as NULL during replacement replace rdev.
+ */
+ smp_mb();
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev == rrdev)
rrdev = NULL;
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
@@ -1826,9 +1842,12 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
int err = 0;
int number = rdev->raid_disk;
struct md_rdev **rdevp;
- struct raid10_info *p = conf->mirrors + number;
+ struct raid10_info *p;
print_conf(conf);
+ if (unlikely(number >= mddev->raid_disks))
+ return 0;
+ p = conf->mirrors + number;
if (rdev == p->rdev)
rdevp = &p->rdev;
else if (rdev == p->replacement)
@@ -2226,11 +2245,22 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
int d;
- struct bio *wbio, *wbio2;
+ struct bio *wbio = r10_bio->devs[1].bio;
+ struct bio *wbio2 = r10_bio->devs[1].repl_bio;
+
+ /* Need to test wbio2->bi_end_io before we call
+ * generic_make_request as if the former is NULL,
+ * the latter is free to free wbio2.
+ */
+ if (wbio2 && !wbio2->bi_end_io)
+ wbio2 = NULL;
if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
fix_recovery_read_error(r10_bio);
- end_sync_request(r10_bio);
+ if (wbio->bi_end_io)
+ end_sync_request(r10_bio);
+ if (wbio2)
+ end_sync_request(r10_bio);
return;
}
@@ -2239,14 +2269,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* and submit the write request
*/
d = r10_bio->devs[1].devnum;
- wbio = r10_bio->devs[1].bio;
- wbio2 = r10_bio->devs[1].repl_bio;
- /* Need to test wbio2->bi_end_io before we call
- * generic_make_request as if the former is NULL,
- * the latter is free to free wbio2.
- */
- if (wbio2 && !wbio2->bi_end_io)
- wbio2 = NULL;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
@@ -2914,10 +2936,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t chunk_mask = conf->geo.chunk_mask;
int page_idx = 0;
- if (!mempool_initialized(&conf->r10buf_pool))
- if (init_resync(conf))
- return 0;
-
/*
* Allow skipping a full rebuild for incremental assembly
* of a clean array, like RAID1 does.
@@ -2933,6 +2951,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
return mddev->dev_sectors - sector_nr;
}
+ if (!mempool_initialized(&conf->r10buf_pool))
+ if (init_resync(conf))
+ return 0;
+
skipped:
max_sector = mddev->dev_sectors;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -3048,7 +3070,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
int must_sync;
int any_working;
int need_recover = 0;
- int need_replace = 0;
struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace;
@@ -3060,11 +3081,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
!test_bit(Faulty, &mrdev->flags) &&
!test_bit(In_sync, &mrdev->flags))
need_recover = 1;
- if (mreplace != NULL &&
- !test_bit(Faulty, &mreplace->flags))
- need_replace = 1;
+ if (mreplace && test_bit(Faulty, &mreplace->flags))
+ mreplace = NULL;
- if (!need_recover && !need_replace) {
+ if (!need_recover && !mreplace) {
rcu_read_unlock();
continue;
}
@@ -3080,8 +3100,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rcu_read_unlock();
continue;
}
- if (mreplace && test_bit(Faulty, &mreplace->flags))
- mreplace = NULL;
/* Unless we are doing a full sync, or a replacement
* we only need to recover the block if it is set in
* the bitmap
@@ -3204,11 +3222,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[1].repl_bio;
if (bio)
bio->bi_end_io = NULL;
- /* Note: if need_replace, then bio
+ /* Note: if replace is not NULL, then bio
* cannot be NULL as r10buf_pool_alloc will
* have allocated it.
*/
- if (!need_replace)
+ if (!mreplace)
break;
bio->bi_next = biolist;
biolist = bio;
@@ -3629,6 +3647,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
return nc*fc;
}
+static void raid10_free_conf(struct r10conf *conf)
+{
+ if (!conf)
+ return;
+
+ mempool_exit(&conf->r10bio_pool);
+ kfree(conf->mirrors);
+ kfree(conf->mirrors_old);
+ kfree(conf->mirrors_new);
+ safe_put_page(conf->tmppage);
+ bioset_exit(&conf->bio_split);
+ kfree(conf);
+}
+
static struct r10conf *setup_conf(struct mddev *mddev)
{
struct r10conf *conf = NULL;
@@ -3711,20 +3743,24 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;
out:
- if (conf) {
- mempool_exit(&conf->r10bio_pool);
- kfree(conf->mirrors);
- safe_put_page(conf->tmppage);
- bioset_exit(&conf->bio_split);
- kfree(conf);
- }
+ raid10_free_conf(conf);
return ERR_PTR(err);
}
+static void raid10_set_io_opt(struct r10conf *conf)
+{
+ int raid_disks = conf->geo.raid_disks;
+
+ if (!(conf->geo.raid_disks % conf->geo.near_copies))
+ raid_disks /= conf->geo.near_copies;
+ blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
+ raid_disks);
+}
+
static int raid10_run(struct mddev *mddev)
{
struct r10conf *conf;
- int i, disk_idx, chunk_size;
+ int i, disk_idx;
struct raid10_info *disk;
struct md_rdev *rdev;
sector_t size;
@@ -3745,6 +3781,9 @@ static int raid10_run(struct mddev *mddev)
if (!conf)
goto out;
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+
if (mddev_is_clustered(conf->mddev)) {
int fc, fo;
@@ -3757,21 +3796,13 @@ static int raid10_run(struct mddev *mddev)
}
}
- mddev->thread = conf->thread;
- conf->thread = NULL;
-
- chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
- blk_queue_io_min(mddev->queue, chunk_size);
- if (conf->geo.raid_disks % conf->geo.near_copies)
- blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
- else
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->geo.raid_disks / conf->geo.near_copies));
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ raid10_set_io_opt(conf);
}
rdev_for_each(rdev, mddev) {
@@ -3931,10 +3962,7 @@ static int raid10_run(struct mddev *mddev)
out_free_conf:
md_unregister_thread(&mddev->thread);
- mempool_exit(&conf->r10bio_pool);
- safe_put_page(conf->tmppage);
- kfree(conf->mirrors);
- kfree(conf);
+ raid10_free_conf(conf);
mddev->private = NULL;
out:
return -EIO;
@@ -3942,15 +3970,7 @@ out:
static void raid10_free(struct mddev *mddev, void *priv)
{
- struct r10conf *conf = priv;
-
- mempool_exit(&conf->r10bio_pool);
- safe_put_page(conf->tmppage);
- kfree(conf->mirrors);
- kfree(conf->mirrors_old);
- kfree(conf->mirrors_new);
- bioset_exit(&conf->bio_split);
- kfree(conf);
+ raid10_free_conf(priv);
}
static void raid10_quiesce(struct mddev *mddev, int quiesce)
@@ -4745,6 +4765,7 @@ static void end_reshape(struct r10conf *conf)
stripe /= conf->geo.near_copies;
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+ raid10_set_io_opt(conf);
}
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 474cf6abefea..0bea103f63d5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2598,7 +2598,7 @@ static void raid5_end_write_request(struct bio *bi)
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
- struct md_rdev *uninitialized_var(rdev);
+ struct md_rdev *rdev;
sector_t first_bad;
int bad_sectors;
int replacement = 0;
@@ -2666,10 +2666,10 @@ static void raid5_end_write_request(struct bio *bi)
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- raid5_release_stripe(sh);
if (sh->batch_head && sh != sh->batch_head)
raid5_release_stripe(sh->batch_head);
+ raid5_release_stripe(sh);
}
static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
@@ -3728,7 +3728,7 @@ static void handle_stripe_fill(struct stripe_head *sh,
* back cache (prexor with orig_page, and then xor with
* page) in the read path
*/
- if (s->injournal && s->failed) {
+ if (s->to_read && s->injournal && s->failed) {
if (test_bit(STRIPE_R5C_CACHING, &sh->state))
r5c_make_stripe_write_out(sh);
goto out;
@@ -7147,6 +7147,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0;
}
+static void raid5_set_io_opt(struct r5conf *conf)
+{
+ blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
+ (conf->raid_disks - conf->max_degraded));
+}
+
static int raid5_run(struct mddev *mddev)
{
struct r5conf *conf;
@@ -7436,8 +7442,7 @@ static int raid5_run(struct mddev *mddev)
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->raid_disks - conf->max_degraded));
+ raid5_set_io_opt(conf);
mddev->queue->limits.raid_partial_stripes_expensive = 1;
/*
* We can only discard a whole stripe. It doesn't make sense to
@@ -8031,6 +8036,7 @@ static void end_reshape(struct r5conf *conf)
/ PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+ raid5_set_io_opt(conf);
}
}
}
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index c665f7d20c44..4c1770b8128c 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1077,7 +1077,8 @@ void cec_received_msg_ts(struct cec_adapter *adap,
mutex_lock(&adap->lock);
dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
- adap->last_initiator = 0xff;
+ if (!adap->transmit_in_progress)
+ adap->last_initiator = 0xff;
/* Check if this message was for us (directed or broadcast). */
if (!cec_msg_is_broadcast(msg))
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 50f1e0b28b25..a4d729a4f330 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -113,6 +113,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
{
unsigned pat;
unsigned plane;
+ int ret = 0;
tpg->max_line_width = max_w;
for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
@@ -121,14 +122,18 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
tpg->lines[pat][plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->lines[pat][plane])
- return -ENOMEM;
+ if (!tpg->lines[pat][plane]) {
+ ret = -ENOMEM;
+ goto free_lines;
+ }
if (plane == 0)
continue;
tpg->downsampled_lines[pat][plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->downsampled_lines[pat][plane])
- return -ENOMEM;
+ if (!tpg->downsampled_lines[pat][plane]) {
+ ret = -ENOMEM;
+ goto free_lines;
+ }
}
}
for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
@@ -136,18 +141,45 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
tpg->contrast_line[plane] =
vzalloc(array_size(pixelsz, max_w));
- if (!tpg->contrast_line[plane])
- return -ENOMEM;
+ if (!tpg->contrast_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
tpg->black_line[plane] =
vzalloc(array_size(pixelsz, max_w));
- if (!tpg->black_line[plane])
- return -ENOMEM;
+ if (!tpg->black_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
tpg->random_line[plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->random_line[plane])
- return -ENOMEM;
+ if (!tpg->random_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
}
return 0;
+
+free_contrast_line:
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ vfree(tpg->contrast_line[plane]);
+ vfree(tpg->black_line[plane]);
+ vfree(tpg->random_line[plane]);
+ tpg->contrast_line[plane] = NULL;
+ tpg->black_line[plane] = NULL;
+ tpg->random_line[plane] = NULL;
+ }
+free_lines:
+ for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ vfree(tpg->lines[pat][plane]);
+ tpg->lines[pat][plane] = NULL;
+ if (plane == 0)
+ continue;
+ vfree(tpg->downsampled_lines[pat][plane]);
+ tpg->downsampled_lines[pat][plane] = NULL;
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(tpg_alloc);
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index e58cb8434daf..12b7f698f562 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -800,6 +800,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS;
+ if (dmxdev->exit) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ENODEV;
+ }
+
for (i = 0; i < dmxdev->filternum; i++)
if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
break;
@@ -1458,7 +1463,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init);
void dvb_dmxdev_release(struct dmxdev *dmxdev)
{
+ mutex_lock(&dmxdev->mutex);
dmxdev->exit = 1;
+ mutex_unlock(&dmxdev->mutex);
+
if (dmxdev->dvbdev->users > 1) {
wait_event(dmxdev->dvbdev->wait_queue,
dmxdev->dvbdev->users == 1);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index cfc27629444f..dec036e0336c 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -151,13 +151,19 @@ struct dvb_ca_private {
/* mutex serializing ioctls */
struct mutex ioctl_mutex;
+
+ /* A mutex used when a device is disconnected */
+ struct mutex remove_mutex;
+
+ /* Whether the device is disconnected */
+ int exit;
};
static void dvb_ca_private_free(struct dvb_ca_private *ca)
{
unsigned int i;
- dvb_free_device(ca->dvbdev);
+ dvb_device_put(ca->dvbdev);
for (i = 0; i < ca->slot_count; i++)
vfree(ca->slot_info[i].rx_buffer.data);
@@ -187,7 +193,7 @@ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
u8 *ebuf, int ecount);
static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
- u8 *ebuf, int ecount);
+ u8 *ebuf, int ecount, int size_write_flag);
/**
* Safely find needle in haystack.
@@ -370,7 +376,7 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10);
if (ret)
return ret;
- ret = dvb_ca_en50221_write_data(ca, slot, buf, 2);
+ ret = dvb_ca_en50221_write_data(ca, slot, buf, 2, CMDREG_SW);
if (ret != 2)
return -EIO;
ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
@@ -778,11 +784,13 @@ exit:
* @buf: The data in this buffer is treated as a complete link-level packet to
* be written.
* @bytes_write: Size of ebuf.
+ * @size_write_flag: A flag on Command Register which says whether the link size
+ * information will be writen or not.
*
* return: Number of bytes written, or < 0 on error.
*/
static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
- u8 *buf, int bytes_write)
+ u8 *buf, int bytes_write, int size_write_flag)
{
struct dvb_ca_slot *sl = &ca->slot_info[slot];
int status;
@@ -817,7 +825,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
/* OK, set HC bit */
status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
- IRQEN | CMDREG_HC);
+ IRQEN | CMDREG_HC | size_write_flag);
if (status)
goto exit;
@@ -1505,7 +1513,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
mutex_lock(&sl->slot_lock);
status = dvb_ca_en50221_write_data(ca, slot, fragbuf,
- fraglen + 2);
+ fraglen + 2, 0);
mutex_unlock(&sl->slot_lock);
if (status == (fraglen + 2)) {
written = 1;
@@ -1706,12 +1714,22 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
dprintk("%s\n", __func__);
- if (!try_module_get(ca->pub->owner))
+ mutex_lock(&ca->remove_mutex);
+
+ if (ca->exit) {
+ mutex_unlock(&ca->remove_mutex);
+ return -ENODEV;
+ }
+
+ if (!try_module_get(ca->pub->owner)) {
+ mutex_unlock(&ca->remove_mutex);
return -EIO;
+ }
err = dvb_generic_open(inode, file);
if (err < 0) {
module_put(ca->pub->owner);
+ mutex_unlock(&ca->remove_mutex);
return err;
}
@@ -1736,6 +1754,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
dvb_ca_private_get(ca);
+ mutex_unlock(&ca->remove_mutex);
return 0;
}
@@ -1755,6 +1774,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
dprintk("%s\n", __func__);
+ mutex_lock(&ca->remove_mutex);
+
/* mark the CA device as closed */
ca->open = 0;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1765,6 +1786,13 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
dvb_ca_private_put(ca);
+ if (dvbdev->users == 1 && ca->exit == 1) {
+ mutex_unlock(&ca->remove_mutex);
+ wake_up(&dvbdev->wait_queue);
+ } else {
+ mutex_unlock(&ca->remove_mutex);
+ }
+
return err;
}
@@ -1888,6 +1916,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
}
mutex_init(&ca->ioctl_mutex);
+ mutex_init(&ca->remove_mutex);
if (signal_pending(current)) {
ret = -EINTR;
@@ -1930,6 +1959,14 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
dprintk("%s\n", __func__);
+ mutex_lock(&ca->remove_mutex);
+ ca->exit = 1;
+ mutex_unlock(&ca->remove_mutex);
+
+ if (ca->dvbdev->users < 1)
+ wait_event(ca->dvbdev->wait_queue,
+ ca->dvbdev->users == 1);
+
/* shutdown the thread if there was one */
kthread_stop(ca->thread);
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 39a2c6ccf31d..9904a170faef 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -125,12 +125,12 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
cc = buf[3] & 0x0f;
ccok = ((feed->cc + 1) & 0x0f) == cc;
- feed->cc = cc;
if (!ccok) {
set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
dprintk_sect_loss("missed packet: %d instead of %d!\n",
cc, (feed->cc + 1) & 0x0f);
}
+ feed->cc = cc;
if (buf[1] & 0x40) // PUSI ?
feed->peslen = 0xfffa;
@@ -310,7 +310,6 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
cc = buf[3] & 0x0f;
ccok = ((feed->cc + 1) & 0x0f) == cc;
- feed->cc = cc;
if (buf[3] & 0x20) {
/* adaption field present, check for discontinuity_indicator */
@@ -346,6 +345,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
feed->pusi_seen = false;
dvb_dmx_swfilter_section_new(feed);
}
+ feed->cc = cc;
if (buf[1] & 0x40) {
/* PUSI=1 (is set), section boundary is here */
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 06ea30a689d7..ad3e42a4eaf7 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -135,7 +135,7 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
struct dvb_frontend_private *fepriv = fe->frontend_priv;
if (fepriv)
- dvb_free_device(fepriv->dvbdev);
+ dvb_device_put(fepriv->dvbdev);
dvb_frontend_invoke_release(fe, fe->ops.release);
@@ -292,14 +292,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
}
if (events->eventw == events->eventr) {
- int ret;
+ struct wait_queue_entry wait;
+ int ret = 0;
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
- ret = wait_event_interruptible(events->wait_queue,
- dvb_frontend_test_event(fepriv, events));
-
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&events->wait_queue, &wait);
+ while (!dvb_frontend_test_event(fepriv, events)) {
+ wait_woken(&wait, TASK_INTERRUPTIBLE, 0);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ remove_wait_queue(&events->wait_queue, &wait);
if (ret < 0)
return ret;
}
@@ -2961,6 +2969,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
.name = fe->ops.info.name,
#endif
};
+ int ret;
dev_dbg(dvb->device, "%s:\n", __func__);
@@ -2994,8 +3003,13 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
"DVB: registering adapter %i frontend %i (%s)...\n",
fe->dvb->num, fe->id, fe->ops.info.name);
- dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template,
+ ret = dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template,
fe, DVB_DEVICE_FRONTEND, 0);
+ if (ret) {
+ dvb_frontend_put(fe);
+ mutex_unlock(&frontend_mutex);
+ return ret;
+ }
/*
* Initialize the cache to the proper values according with the
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 9fed06ba88ef..ccaaabaaeb57 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1564,15 +1564,43 @@ static long dvb_net_ioctl(struct file *file,
return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
}
+static int locked_dvb_net_open(struct inode *inode, struct file *file)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct dvb_net *dvbnet = dvbdev->priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&dvbnet->remove_mutex))
+ return -ERESTARTSYS;
+
+ if (dvbnet->exit) {
+ mutex_unlock(&dvbnet->remove_mutex);
+ return -ENODEV;
+ }
+
+ ret = dvb_generic_open(inode, file);
+
+ mutex_unlock(&dvbnet->remove_mutex);
+
+ return ret;
+}
+
static int dvb_net_close(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_net *dvbnet = dvbdev->priv;
+ mutex_lock(&dvbnet->remove_mutex);
+
dvb_generic_release(inode, file);
- if(dvbdev->users == 1 && dvbnet->exit == 1)
+ if (dvbdev->users == 1 && dvbnet->exit == 1) {
+ mutex_unlock(&dvbnet->remove_mutex);
wake_up(&dvbdev->wait_queue);
+ } else {
+ mutex_unlock(&dvbnet->remove_mutex);
+ }
+
return 0;
}
@@ -1580,7 +1608,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
static const struct file_operations dvb_net_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dvb_net_ioctl,
- .open = dvb_generic_open,
+ .open = locked_dvb_net_open,
.release = dvb_net_close,
.llseek = noop_llseek,
};
@@ -1599,10 +1627,13 @@ void dvb_net_release (struct dvb_net *dvbnet)
{
int i;
+ mutex_lock(&dvbnet->remove_mutex);
dvbnet->exit = 1;
+ mutex_unlock(&dvbnet->remove_mutex);
+
if (dvbnet->dvbdev->users < 1)
wait_event(dvbnet->dvbdev->wait_queue,
- dvbnet->dvbdev->users==1);
+ dvbnet->dvbdev->users == 1);
dvb_unregister_device(dvbnet->dvbdev);
@@ -1621,6 +1652,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
int i;
mutex_init(&dvbnet->ioctl_mutex);
+ mutex_init(&dvbnet->remove_mutex);
dvbnet->demux = dmx;
for (i=0; i<DVB_NET_DEVICES_MAX; i++)
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 6974f1731529..1331f2c2237e 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -358,6 +358,12 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
vb2_core_querybuf(&ctx->vb_q, b->index, b);
dprintk(3, "[%s] index=%d\n", ctx->name, b->index);
return 0;
@@ -382,8 +388,13 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
int ret;
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 197cf17b246f..d4d903a9dc31 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -37,6 +37,7 @@
#include <media/tuner.h>
static DEFINE_MUTEX(dvbdev_mutex);
+static LIST_HEAD(dvbdevfops_list);
static int dvbdev_debug;
module_param(dvbdev_debug, int, 0644);
@@ -107,12 +108,14 @@ static int dvb_device_open(struct inode *inode, struct file *file)
new_fops = fops_get(dvbdev->fops);
if (!new_fops)
goto fail;
- file->private_data = dvbdev;
+ file->private_data = dvb_device_get(dvbdev);
replace_fops(file, new_fops);
if (file->f_op->open)
err = file->f_op->open(inode, file);
up_read(&minor_rwsem);
mutex_unlock(&dvbdev_mutex);
+ if (err)
+ dvb_device_put(dvbdev);
return err;
}
fail:
@@ -171,6 +174,9 @@ int dvb_generic_release(struct inode *inode, struct file *file)
}
dvbdev->users++;
+
+ dvb_device_put(dvbdev);
+
return 0;
}
EXPORT_SYMBOL(dvb_generic_release);
@@ -342,6 +348,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
GFP_KERNEL);
if (!dvbdev->pads) {
kfree(dvbdev->entity);
+ dvbdev->entity = NULL;
return -ENOMEM;
}
}
@@ -458,14 +465,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
enum dvb_device_type type, int demux_sink_pads)
{
struct dvb_device *dvbdev;
- struct file_operations *dvbdevfops;
+ struct file_operations *dvbdevfops = NULL;
+ struct dvbdevfops_node *node = NULL, *new_node = NULL;
struct device *clsdev;
int minor;
int id, ret;
mutex_lock(&dvbdev_register_lock);
- if ((id = dvbdev_get_free_id (adap, type)) < 0){
+ if ((id = dvbdev_get_free_id (adap, type)) < 0) {
mutex_unlock(&dvbdev_register_lock);
*pdvbdev = NULL;
pr_err("%s: couldn't find free device id\n", __func__);
@@ -473,41 +481,73 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
}
*pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL);
-
if (!dvbdev){
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
- dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+ /*
+ * When a device of the same type is probe()d more than once,
+ * the first allocated fops are used. This prevents memory leaks
+ * that can occur when the same device is probe()d repeatedly.
+ */
+ list_for_each_entry(node, &dvbdevfops_list, list_head) {
+ if (node->fops->owner == adap->module &&
+ node->type == type &&
+ node->template == template) {
+ dvbdevfops = node->fops;
+ break;
+ }
+ }
- if (!dvbdevfops){
- kfree (dvbdev);
- mutex_unlock(&dvbdev_register_lock);
- return -ENOMEM;
+ if (dvbdevfops == NULL) {
+ dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+ if (!dvbdevfops) {
+ kfree(dvbdev);
+ *pdvbdev = NULL;
+ mutex_unlock(&dvbdev_register_lock);
+ return -ENOMEM;
+ }
+
+ new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL);
+ if (!new_node) {
+ kfree(dvbdevfops);
+ kfree(dvbdev);
+ *pdvbdev = NULL;
+ mutex_unlock(&dvbdev_register_lock);
+ return -ENOMEM;
+ }
+
+ new_node->fops = dvbdevfops;
+ new_node->type = type;
+ new_node->template = template;
+ list_add_tail (&new_node->list_head, &dvbdevfops_list);
}
memcpy(dvbdev, template, sizeof(struct dvb_device));
+ kref_init(&dvbdev->ref);
dvbdev->type = type;
dvbdev->id = id;
dvbdev->adapter = adap;
dvbdev->priv = priv;
dvbdev->fops = dvbdevfops;
init_waitqueue_head (&dvbdev->wait_queue);
-
dvbdevfops->owner = adap->module;
-
list_add_tail (&dvbdev->list_head, &adap->device_list);
-
down_write(&minor_rwsem);
#ifdef CONFIG_DVB_DYNAMIC_MINORS
for (minor = 0; minor < MAX_DVB_MINORS; minor++)
if (dvb_minors[minor] == NULL)
break;
-
if (minor == MAX_DVB_MINORS) {
- kfree(dvbdevfops);
+ if (new_node) {
+ list_del (&new_node->list_head);
+ kfree(dvbdevfops);
+ kfree(new_node);
+ }
+ list_del (&dvbdev->list_head);
kfree(dvbdev);
+ *pdvbdev = NULL;
up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
return -EINVAL;
@@ -515,36 +555,49 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
#else
minor = nums2minor(adap->num, type, id);
#endif
-
dvbdev->minor = minor;
- dvb_minors[minor] = dvbdev;
+ dvb_minors[minor] = dvb_device_get(dvbdev);
up_write(&minor_rwsem);
-
ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
if (ret) {
pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
__func__);
-
+ if (new_node) {
+ list_del (&new_node->list_head);
+ kfree(dvbdevfops);
+ kfree(new_node);
+ }
dvb_media_device_free(dvbdev);
- kfree(dvbdevfops);
+ list_del (&dvbdev->list_head);
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return ret;
}
- mutex_unlock(&dvbdev_register_lock);
-
clsdev = device_create(dvb_class, adap->device,
MKDEV(DVB_MAJOR, minor),
dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
if (IS_ERR(clsdev)) {
pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
+ if (new_node) {
+ list_del (&new_node->list_head);
+ kfree(dvbdevfops);
+ kfree(new_node);
+ }
+ dvb_media_device_free(dvbdev);
+ list_del (&dvbdev->list_head);
+ kfree(dvbdev);
+ *pdvbdev = NULL;
+ mutex_unlock(&dvbdev_register_lock);
return PTR_ERR(clsdev);
}
+
dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
adap->num, dnames[type], id, minor, minor);
+ mutex_unlock(&dvbdev_register_lock);
return 0;
}
EXPORT_SYMBOL(dvb_register_device);
@@ -557,6 +610,7 @@ void dvb_remove_device(struct dvb_device *dvbdev)
down_write(&minor_rwsem);
dvb_minors[dvbdev->minor] = NULL;
+ dvb_device_put(dvbdev);
up_write(&minor_rwsem);
dvb_media_device_free(dvbdev);
@@ -568,21 +622,33 @@ void dvb_remove_device(struct dvb_device *dvbdev)
EXPORT_SYMBOL(dvb_remove_device);
-void dvb_free_device(struct dvb_device *dvbdev)
+static void dvb_free_device(struct kref *ref)
{
- if (!dvbdev)
- return;
+ struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
- kfree (dvbdev->fops);
kfree (dvbdev);
}
-EXPORT_SYMBOL(dvb_free_device);
+
+
+struct dvb_device *dvb_device_get(struct dvb_device *dvbdev)
+{
+ kref_get(&dvbdev->ref);
+ return dvbdev;
+}
+EXPORT_SYMBOL(dvb_device_get);
+
+
+void dvb_device_put(struct dvb_device *dvbdev)
+{
+ if (dvbdev)
+ kref_put(&dvbdev->ref, dvb_free_device);
+}
void dvb_unregister_device(struct dvb_device *dvbdev)
{
dvb_remove_device(dvbdev);
- dvb_free_device(dvbdev);
+ dvb_device_put(dvbdev);
}
EXPORT_SYMBOL(dvb_unregister_device);
@@ -1065,9 +1131,17 @@ error:
static void __exit exit_dvbdev(void)
{
+ struct dvbdevfops_node *node, *next;
+
class_destroy(dvb_class);
cdev_del(&dvb_device_cdev);
unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS);
+
+ list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) {
+ list_del (&node->list_head);
+ kfree(node->fops);
+ kfree(node);
+ }
}
subsys_initcall(init_dvbdev);
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index 9b00b56230b6..cf8e5f1bd101 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -533,7 +533,7 @@ struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(ascot2e_attach);
+EXPORT_SYMBOL_GPL(ascot2e_attach);
MODULE_DESCRIPTION("Sony ASCOT2E terr/cab tuner driver");
MODULE_AUTHOR("info@netup.ru");
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index bdd16b9c5824..778c865085bf 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -489,7 +489,7 @@ error_out:
return NULL;
}
-EXPORT_SYMBOL(atbm8830_attach);
+EXPORT_SYMBOL_GPL(atbm8830_attach);
MODULE_DESCRIPTION("AltoBeam ATBM8830/8831 GB20600 demodulator driver");
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index 78cafdf27961..230436bf6cbd 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -879,7 +879,7 @@ error:
au8522_release_state(state);
return NULL;
}
-EXPORT_SYMBOL(au8522_attach);
+EXPORT_SYMBOL_GPL(au8522_attach);
static const struct dvb_frontend_ops au8522_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index e92542b92d34..bc4cc8c24e1a 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -649,6 +649,7 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe)
deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size);
if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) {
err("firmware download failed: %d\n",ret);
+ release_firmware(fw);
return ret;
}
i += 4 + len;
@@ -834,7 +835,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(bcm3510_attach);
+EXPORT_SYMBOL_GPL(bcm3510_attach);
static const struct dvb_frontend_ops bcm3510_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index b39ff516271b..1d04c0a652b2 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -432,4 +432,4 @@ MODULE_DESCRIPTION("Conexant CX22700 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(cx22700_attach);
+EXPORT_SYMBOL_GPL(cx22700_attach);
diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c
index cc6acbf6393d..61ad34b7004b 100644
--- a/drivers/media/dvb-frontends/cx22702.c
+++ b/drivers/media/dvb-frontends/cx22702.c
@@ -604,7 +604,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(cx22702_attach);
+EXPORT_SYMBOL_GPL(cx22702_attach);
static const struct dvb_frontend_ops cx22702_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 6f99d6a27be2..9aeea089756f 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -653,4 +653,4 @@ MODULE_DESCRIPTION("Conexant CX24110 DVB-S Demodulator driver");
MODULE_AUTHOR("Peter Hettkamp");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(cx24110_attach);
+EXPORT_SYMBOL_GPL(cx24110_attach);
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 60a9f70275f7..619df8329fbb 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -590,7 +590,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(cx24113_attach);
+EXPORT_SYMBOL_GPL(cx24113_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index ea8264ccbb4e..8b978a9f74a4 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -1133,7 +1133,7 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
state->frontend.demodulator_priv = state;
return &state->frontend;
}
-EXPORT_SYMBOL(cx24116_attach);
+EXPORT_SYMBOL_GPL(cx24116_attach);
/*
* Initialise or wake up device
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 2464b63fe0cf..0fa033633f4c 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -305,7 +305,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(cx24120_attach);
+EXPORT_SYMBOL_GPL(cx24120_attach);
static int cx24120_test_rom(struct cx24120_state *state)
{
@@ -972,7 +972,9 @@ static void cx24120_set_clock_ratios(struct dvb_frontend *fe)
cmd.arg[8] = (clock_ratios_table[idx].rate >> 8) & 0xff;
cmd.arg[9] = (clock_ratios_table[idx].rate >> 0) & 0xff;
- cx24120_message_send(state, &cmd);
+ ret = cx24120_message_send(state, &cmd);
+ if (ret != 0)
+ return;
/* Calculate ber window rates for stat work */
cx24120_calculate_ber_window(state, clock_ratios_table[idx].rate);
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 3d84ee17e54c..539889e638cc 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1096,7 +1096,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(cx24123_attach);
+EXPORT_SYMBOL_GPL(cx24123_attach);
static const struct dvb_frontend_ops cx24123_ops = {
.delsys = { SYS_DVBS },
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index d137199e13e6..4c2d3dcfe8ea 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -536,7 +536,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
return pdata.get_dvb_frontend(client);
}
-EXPORT_SYMBOL(cxd2820r_attach);
+EXPORT_SYMBOL_GPL(cxd2820r_attach);
static struct dvb_frontend *cxd2820r_get_dvb_frontend(struct i2c_client *client)
{
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 1b30cf570803..6b495fc36fd0 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3920,14 +3920,14 @@ struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
{
return cxd2841er_attach(cfg, i2c, SYS_DVBS);
}
-EXPORT_SYMBOL(cxd2841er_attach_s);
+EXPORT_SYMBOL_GPL(cxd2841er_attach_s);
struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
struct i2c_adapter *i2c)
{
return cxd2841er_attach(cfg, i2c, 0);
}
-EXPORT_SYMBOL(cxd2841er_attach_t_c);
+EXPORT_SYMBOL_GPL(cxd2841er_attach_t_c);
static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
index f87e27481ea7..ea1bc9a35618 100644
--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
@@ -1950,7 +1950,7 @@ struct dvb_frontend *cxd2880_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(cxd2880_attach);
+EXPORT_SYMBOL_GPL(cxd2880_attach);
MODULE_DESCRIPTION("Sony CXD2880 DVB-T2/T tuner + demod driver");
MODULE_AUTHOR("Sony Semiconductor Solutions Corporation");
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index 3b26f61785d8..c4dfcb005b59 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -755,7 +755,7 @@ free_mem:
fe->tuner_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(dib0070_attach);
+EXPORT_SYMBOL_GPL(dib0070_attach);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner");
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index d13d2e81f8c9..35c5a48d8fc8 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -2631,7 +2631,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
return NULL;
}
-EXPORT_SYMBOL(dib0090_register);
+EXPORT_SYMBOL_GPL(dib0090_register);
struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
{
@@ -2657,7 +2657,7 @@ free_mem:
fe->tuner_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(dib0090_fw_register);
+EXPORT_SYMBOL_GPL(dib0090_fw_register);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 46ed0e20c8fa..282cdcf9f21b 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -815,4 +815,4 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(dib3000mb_attach);
+EXPORT_SYMBOL_GPL(dib3000mb_attach);
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 692600ce5f23..c69665024330 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -935,7 +935,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib3000mc_attach);
+EXPORT_SYMBOL_GPL(dib3000mc_attach);
static const struct dvb_frontend_ops dib3000mc_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index e211830c9c99..7a0e06a420d9 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -1434,7 +1434,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib7000m_attach);
+EXPORT_SYMBOL_GPL(dib7000m_attach);
static const struct dvb_frontend_ops dib7000m_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 0d22c700016d..fd08a851a452 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -497,7 +497,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
prediv = reg_1856 & 0x3f;
loopdiv = (reg_1856 >> 6) & 0x3f;
- if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
+ if (loopdiv && bw && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
reg_1856 &= 0xf000;
reg_1857 = dib7000p_read_word(state, 1857);
@@ -2822,7 +2822,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops)
return ops;
}
-EXPORT_SYMBOL(dib7000p_attach);
+EXPORT_SYMBOL_GPL(dib7000p_attach);
static const struct dvb_frontend_ops dib7000p_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index d67f2dd997d0..02cb48223dc6 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -4527,7 +4527,7 @@ void *dib8000_attach(struct dib8000_ops *ops)
return ops;
}
-EXPORT_SYMBOL(dib8000_attach);
+EXPORT_SYMBOL_GPL(dib8000_attach);
MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator");
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 04d92d614279..24f7f7a7598d 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -2546,7 +2546,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib9000_attach);
+EXPORT_SYMBOL_GPL(dib9000_attach);
static const struct dvb_frontend_ops dib9000_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 2f5af4813a74..d6ab8a79629c 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12366,7 +12366,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(drx39xxj_attach);
+EXPORT_SYMBOL_GPL(drx39xxj_attach);
static const struct dvb_frontend_ops drx39xxj_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index fae6f3763364..4be69230dd57 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2948,7 +2948,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(drxd_attach);
+EXPORT_SYMBOL_GPL(drxd_attach);
MODULE_DESCRIPTION("DRXD driver");
MODULE_AUTHOR("Micronas");
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 0a4875b391d9..d04ad020d6e1 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6684,7 +6684,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct drxk_state *state = fe->demodulator_priv;
- u16 err;
+ u16 err = 0;
dprintk(1, "\n");
@@ -6857,7 +6857,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(drxk_attach);
+EXPORT_SYMBOL_GPL(drxk_attach);
MODULE_DESCRIPTION("DRX-K driver");
MODULE_AUTHOR("Ralph Metzler");
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 20fcf31af165..515aa7c7baf2 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -859,7 +859,7 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
return &state->frontend;
}
-EXPORT_SYMBOL(ds3000_attach);
+EXPORT_SYMBOL_GPL(ds3000_attach);
static int ds3000_set_carrier_offset(struct dvb_frontend *fe,
s32 carrier_offset_khz)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index d45b4ddc8f91..846bfe7ef30e 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -866,7 +866,7 @@ out:
return NULL;
}
-EXPORT_SYMBOL(dvb_pll_attach);
+EXPORT_SYMBOL_GPL(dvb_pll_attach);
static int
diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
index 03bd80666cf8..2ad0a3c2f756 100644
--- a/drivers/media/dvb-frontends/ec100.c
+++ b/drivers/media/dvb-frontends/ec100.c
@@ -299,7 +299,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(ec100_attach);
+EXPORT_SYMBOL_GPL(ec100_attach);
static const struct dvb_frontend_ops ec100_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index 8c1310c6b0bc..c299d31dc7d2 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -1025,7 +1025,7 @@ struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(helene_attach_s);
+EXPORT_SYMBOL_GPL(helene_attach_s);
struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
const struct helene_config *config,
@@ -1061,7 +1061,7 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(helene_attach);
+EXPORT_SYMBOL_GPL(helene_attach);
static int helene_probe(struct i2c_client *client,
const struct i2c_device_id *id)
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 24bf5cbcc184..0330b78a5b3f 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -395,7 +395,7 @@ struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(horus3a_attach);
+EXPORT_SYMBOL_GPL(horus3a_attach);
MODULE_DESCRIPTION("Sony HORUS3A satellite tuner driver");
MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c
index 2cd69b4ff82c..7d28a743f97e 100644
--- a/drivers/media/dvb-frontends/isl6405.c
+++ b/drivers/media/dvb-frontends/isl6405.c
@@ -141,7 +141,7 @@ struct dvb_frontend *isl6405_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(isl6405_attach);
+EXPORT_SYMBOL_GPL(isl6405_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6405");
MODULE_AUTHOR("Hartmut Hackmann & Oliver Endriss");
diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
index 43b0dfc6f453..2e9f6f12f849 100644
--- a/drivers/media/dvb-frontends/isl6421.c
+++ b/drivers/media/dvb-frontends/isl6421.c
@@ -213,7 +213,7 @@ struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(isl6421_attach);
+EXPORT_SYMBOL_GPL(isl6421_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421");
MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss");
diff --git a/drivers/media/dvb-frontends/isl6423.c b/drivers/media/dvb-frontends/isl6423.c
index 8cd1bb88ce6e..a0d0a3834057 100644
--- a/drivers/media/dvb-frontends/isl6423.c
+++ b/drivers/media/dvb-frontends/isl6423.c
@@ -289,7 +289,7 @@ exit:
fe->sec_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(isl6423_attach);
+EXPORT_SYMBOL_GPL(isl6423_attach);
MODULE_DESCRIPTION("ISL6423 SEC");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index 1b33478653d1..f8f362f50e78 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -389,7 +389,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(itd1000_attach);
+EXPORT_SYMBOL_GPL(itd1000_attach);
MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>");
MODULE_DESCRIPTION("Integrant ITD1000 driver");
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 73f27105c139..3212e333d472 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -302,7 +302,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(ix2505v_attach);
+EXPORT_SYMBOL_GPL(ix2505v_attach);
module_param_named(debug, ix2505v_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index c5106a1ea1cd..fe5af2453d55 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("LSI L64781 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler, Marko Kohtala");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(l64781_attach);
+EXPORT_SYMBOL_GPL(l64781_attach);
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 10c152f461dd..958c868132b3 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1426,7 +1426,7 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(lg2160_attach);
+EXPORT_SYMBOL_GPL(lg2160_attach);
MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 62d743988919..60a97f1cc74e 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1148,7 +1148,7 @@ fail:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(lgdt3305_attach);
+EXPORT_SYMBOL_GPL(lgdt3305_attach);
static const struct dvb_frontend_ops lgdt3304_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 6c4adec58174..0e7d97e7b0f5 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -1881,7 +1881,7 @@ fail:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(lgdt3306a_attach);
+EXPORT_SYMBOL_GPL(lgdt3306a_attach);
#ifdef DBG_DUMP
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 651c8aa75e17..1416aedb1234 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -928,7 +928,7 @@ struct dvb_frontend *lgdt330x_attach(const struct lgdt330x_config *_config,
return lgdt330x_get_dvb_frontend(client);
}
-EXPORT_SYMBOL(lgdt330x_attach);
+EXPORT_SYMBOL_GPL(lgdt330x_attach);
static const struct dvb_frontend_ops lgdt3302_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 30014979b985..ffaf60e16ecd 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -1043,7 +1043,7 @@ error_out:
return NULL;
}
-EXPORT_SYMBOL(lgs8gxx_attach);
+EXPORT_SYMBOL_GPL(lgs8gxx_attach);
MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver");
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
index 9ffe06cd787d..41bec050642b 100644
--- a/drivers/media/dvb-frontends/lnbh25.c
+++ b/drivers/media/dvb-frontends/lnbh25.c
@@ -173,7 +173,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
__func__, priv->i2c_address);
return fe;
}
-EXPORT_SYMBOL(lnbh25_attach);
+EXPORT_SYMBOL_GPL(lnbh25_attach);
MODULE_DESCRIPTION("ST LNBH25 driver");
MODULE_AUTHOR("info@netup.ru");
diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c
index e564974162d6..32593b1f75a3 100644
--- a/drivers/media/dvb-frontends/lnbp21.c
+++ b/drivers/media/dvb-frontends/lnbp21.c
@@ -155,7 +155,7 @@ struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe,
return lnbx2x_attach(fe, i2c, override_set, override_clear,
i2c_addr, LNBH24_TTX);
}
-EXPORT_SYMBOL(lnbh24_attach);
+EXPORT_SYMBOL_GPL(lnbh24_attach);
struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 override_set,
@@ -164,7 +164,7 @@ struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
return lnbx2x_attach(fe, i2c, override_set, override_clear,
0x08, LNBP21_ISEL);
}
-EXPORT_SYMBOL(lnbp21_attach);
+EXPORT_SYMBOL_GPL(lnbp21_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp21, lnbh24");
MODULE_AUTHOR("Oliver Endriss, Igor M. Liplianin");
diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c
index b8c7145d4cef..cb4ea5d3fad4 100644
--- a/drivers/media/dvb-frontends/lnbp22.c
+++ b/drivers/media/dvb-frontends/lnbp22.c
@@ -125,7 +125,7 @@ struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(lnbp22_attach);
+EXPORT_SYMBOL_GPL(lnbp22_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp22");
MODULE_AUTHOR("Dominik Kuhlen");
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 3a367a585084..1f8f0e3bb195 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1284,7 +1284,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
*tuner_i2c_adapter = pdata.get_i2c_adapter(client);
return pdata.get_dvb_frontend(client);
}
-EXPORT_SYMBOL(m88ds3103_attach);
+EXPORT_SYMBOL_GPL(m88ds3103_attach);
static const struct dvb_frontend_ops m88ds3103_ops = {
.delsys = {SYS_DVBS, SYS_DVBS2},
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 39cbb3ea1c9d..d1f235b472ce 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -807,7 +807,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(m88rs2000_attach);
+EXPORT_SYMBOL_GPL(m88rs2000_attach);
MODULE_DESCRIPTION("M88RS2000 DVB-S Demodulator driver");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 3843181bba16..29d5d29d9f2c 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1851,6 +1851,6 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(mb86a16_attach);
+EXPORT_SYMBOL_GPL(mb86a16_attach);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 4e50441c247a..6bbf3fb44740 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2089,7 +2089,7 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n");
return &state->frontend;
}
-EXPORT_SYMBOL(mb86a20s_attach);
+EXPORT_SYMBOL_GPL(mb86a20s_attach);
static const struct dvb_frontend_ops mb86a20s_ops = {
.delsys = { SYS_ISDBT },
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index fff212c0bf3b..05894deb8a19 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -800,7 +800,7 @@ MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id);
static struct i2c_driver mn88443x_driver = {
.driver = {
.name = "mn88443x",
- .of_match_table = of_match_ptr(mn88443x_of_match),
+ .of_match_table = mn88443x_of_match,
},
.probe = mn88443x_probe,
.remove = mn88443x_remove,
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index 7cae7d632030..2c01d912f1ce 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -832,7 +832,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(mt312_attach);
+EXPORT_SYMBOL_GPL(mt312_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index 881897583cf2..20add66b1466 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("Zarlink MT352 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler, Daniel Mack, Antonio Mancuso");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(mt352_attach);
+EXPORT_SYMBOL_GPL(mt352_attach);
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 35b83b1dd82c..1124baf5baf4 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -1232,5 +1232,5 @@ MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulat
MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(nxt200x_attach);
+EXPORT_SYMBOL_GPL(nxt200x_attach);
diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
index 136918f82dda..e8d4940370dd 100644
--- a/drivers/media/dvb-frontends/nxt6000.c
+++ b/drivers/media/dvb-frontends/nxt6000.c
@@ -621,4 +621,4 @@ MODULE_DESCRIPTION("NxtWave NXT6000 DVB-T demodulator driver");
MODULE_AUTHOR("Florian Schirmer");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(nxt6000_attach);
+EXPORT_SYMBOL_GPL(nxt6000_attach);
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index 35a3e47497c2..2ca01af5608c 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -605,4 +605,4 @@ MODULE_AUTHOR("Kirk Lapray");
MODULE_AUTHOR("Trent Piepho");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(or51132_attach);
+EXPORT_SYMBOL_GPL(or51132_attach);
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index ddcaea5c9941..dc60482162c5 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -551,5 +551,5 @@ MODULE_DESCRIPTION("Oren OR51211 VSB [pcHDTV HD-2000] Demodulator Driver");
MODULE_AUTHOR("Kirk Lapray");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(or51211_attach);
+EXPORT_SYMBOL_GPL(or51211_attach);
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 6ec277421390..e5bffaaeed38 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -640,7 +640,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- u32 uninitialized_var(tmp);
+ u32 tmp;
u8 u8tmp, buf[2];
u16 u16tmp;
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index 3089cc174a6f..28b1dca077ea 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -981,7 +981,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1409_attach);
+EXPORT_SYMBOL_GPL(s5h1409_attach);
static const struct dvb_frontend_ops s5h1409_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 89402916d301..7f5313bb9b43 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -900,7 +900,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1411_attach);
+EXPORT_SYMBOL_GPL(s5h1411_attach);
static const struct dvb_frontend_ops s5h1411_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index 6bdec2898bc8..d700de1ea6c2 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -918,7 +918,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1420_attach);
+EXPORT_SYMBOL_GPL(s5h1420_attach);
static const struct dvb_frontend_ops s5h1420_ops = {
.delsys = { SYS_DVBS },
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 956e8ee4b388..ff5d3bdf3bc6 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -355,7 +355,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(s5h1432_attach);
+EXPORT_SYMBOL_GPL(s5h1432_attach);
static const struct dvb_frontend_ops s5h1432_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index f118d8e64103..7e461ac159fc 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -495,7 +495,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(s921_attach);
+EXPORT_SYMBOL_GPL(s921_attach);
static const struct dvb_frontend_ops s921_ops = {
.delsys = { SYS_ISDBT },
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index a116eff417f2..6d84a5534aba 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -938,7 +938,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(si21xx_attach);
+EXPORT_SYMBOL_GPL(si21xx_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index c89a91a3daf4..72f58626475c 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -626,4 +626,4 @@ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Spase sp887x DVB-T demodulator driver");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(sp887x_attach);
+EXPORT_SYMBOL_GPL(sp887x_attach);
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 4ee6c1e1e9f7..2f4d8fb400cd 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1638,7 +1638,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stb0899_attach);
+EXPORT_SYMBOL_GPL(stb0899_attach);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("STB0899 Multi-Std frontend");
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index 8c9800d577e0..d74e34677b92 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -232,7 +232,7 @@ struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
return fe;
}
-EXPORT_SYMBOL(stb6000_attach);
+EXPORT_SYMBOL_GPL(stb6000_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index d541d6613610..9f92760256cf 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -557,7 +557,7 @@ static void stb6100_release(struct dvb_frontend *fe)
kfree(state);
}
-EXPORT_SYMBOL(stb6100_attach);
+EXPORT_SYMBOL_GPL(stb6100_attach);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index 3d54a0ec86af..a5581bd60f9e 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -440,9 +440,8 @@ static int stv0288_set_frontend(struct dvb_frontend *fe)
struct stv0288_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- char tm;
- unsigned char tda[3];
- u8 reg, time_out = 0;
+ u8 tda[3], reg, time_out = 0;
+ s8 tm;
dprintk("%s : FE_SET_FRONTEND\n", __func__);
@@ -591,7 +590,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(stv0288_attach);
+EXPORT_SYMBOL_GPL(stv0288_attach);
module_param(debug_legacy_dish_switch, int, 0444);
MODULE_PARM_DESC(debug_legacy_dish_switch,
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index 6d5962d5697a..9d4dbd99a5a7 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -710,4 +710,4 @@ MODULE_DESCRIPTION("ST STV0297 DVB-C Demodulator driver");
MODULE_AUTHOR("Dennis Noermann and Andrew de Quincey");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(stv0297_attach);
+EXPORT_SYMBOL_GPL(stv0297_attach);
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 421395ea3334..0a1b57e9e228 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -751,4 +751,4 @@ MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(stv0299_attach);
+EXPORT_SYMBOL_GPL(stv0299_attach);
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 6c2b05fae1c5..8cbae8235b17 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -118,50 +118,32 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S
}
};
-static
-int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
+static noinline_for_stack
+int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
{
- u8 buf[MAX_XFER_SIZE];
+ u8 buf[3] = { MSB(reg), LSB(reg), data };
struct i2c_msg msg = {
.addr = state->config->demod_address,
.flags = 0,
.buf = buf,
- .len = len + 2
+ .len = 3,
};
int ret;
- if (2 + len > sizeof(buf)) {
- printk(KERN_WARNING
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
-
- buf[0] = MSB(reg);
- buf[1] = LSB(reg);
- memcpy(buf + 2, data, len);
-
if (i2cdebug)
printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
- state->config->demod_address, reg, buf[2]);
+ state->config->demod_address, reg, data);
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n",
- __func__, state->config->demod_address, reg, buf[2]);
+ __func__, state->config->demod_address, reg, data);
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
-{
- u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
-
- return stv0367_writeregs(state, reg, &tmp, 1);
-}
-
-static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
+static noinline_for_stack
+u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
{
u8 b0[] = { 0, 0 };
u8 b1[] = { 0 };
@@ -1750,7 +1732,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367ter_attach);
+EXPORT_SYMBOL_GPL(stv0367ter_attach);
static int stv0367cab_gate_ctrl(struct dvb_frontend *fe, int enable)
{
@@ -2923,7 +2905,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367cab_attach);
+EXPORT_SYMBOL_GPL(stv0367cab_attach);
/*
* Functions for operation on Digital Devices hardware
@@ -3344,7 +3326,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367ddb_attach);
+EXPORT_SYMBOL_GPL(stv0367ddb_attach);
MODULE_PARM_DESC(debug, "Set debug");
MODULE_PARM_DESC(i2c_debug, "Set i2c debug");
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 7d93a1617e86..eea22c1a9537 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1957,7 +1957,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0900_attach);
+EXPORT_SYMBOL_GPL(stv0900_attach);
MODULE_PARM_DESC(debug, "Set debug");
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 90d24131d335..799dbefb9eef 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -5073,7 +5073,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv090x_attach);
+EXPORT_SYMBOL_GPL(stv090x_attach);
static const struct i2c_device_id stv090x_id_table[] = {
{"stv090x", 0},
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 963f6a896102..1cf9c095dbff 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -427,7 +427,7 @@ struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(stv6110_attach);
+EXPORT_SYMBOL_GPL(stv6110_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index 5012d0231652..b08c7536a69f 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -469,7 +469,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
dev_info(&stv6110x->i2c->dev, "Attaching STV6110x\n");
return stv6110x->devctl;
}
-EXPORT_SYMBOL(stv6110x_attach);
+EXPORT_SYMBOL_GPL(stv6110x_attach);
static const struct i2c_device_id stv6110x_id_table[] = {
{"stv6110x", 0},
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index 9fb207b41576..3bc4f0006659 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -513,4 +513,4 @@ MODULE_DESCRIPTION("Philips TDA10021 DVB-C demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Markus Schulz");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10021_attach);
+EXPORT_SYMBOL_GPL(tda10021_attach);
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 8f32edf6b700..4c2541ecd743 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -594,4 +594,4 @@ MODULE_DESCRIPTION("Philips TDA10023 DVB-C demodulator driver");
MODULE_AUTHOR("Georg Acher, Hartmut Birr");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10023_attach);
+EXPORT_SYMBOL_GPL(tda10023_attach);
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index d1d206ebdedd..f1d5e77d5dcc 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1138,7 +1138,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(tda10048_attach);
+EXPORT_SYMBOL_GPL(tda10048_attach);
static const struct dvb_frontend_ops tda10048_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index 83a798ca9b00..6f306db6c615 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -1378,5 +1378,5 @@ MODULE_DESCRIPTION("Philips TDA10045H & TDA10046H DVB-T Demodulator");
MODULE_AUTHOR("Andrew de Quincey & Robert Schlabbach");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10045_attach);
-EXPORT_SYMBOL(tda10046_attach);
+EXPORT_SYMBOL_GPL(tda10045_attach);
+EXPORT_SYMBOL_GPL(tda10046_attach);
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index be6b40138f6e..3f51527c86b8 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -764,4 +764,4 @@ MODULE_DESCRIPTION("Philips TDA10086 DVB-S Demodulator");
MODULE_AUTHOR("Andrew de Quincey");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10086_attach);
+EXPORT_SYMBOL_GPL(tda10086_attach);
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 13e8969da7f8..346be5011fb7 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -227,7 +227,7 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(tda665x_attach);
+EXPORT_SYMBOL_GPL(tda665x_attach);
MODULE_DESCRIPTION("TDA665x driver");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 5be11fd65e3b..9fc16e917f34 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -481,4 +481,4 @@ MODULE_DESCRIPTION("Philips TDA8083 DVB-S Demodulator");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda8083_attach);
+EXPORT_SYMBOL_GPL(tda8083_attach);
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index 0d576d41c67d..8b06f92745dc 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -188,7 +188,7 @@ exit:
return NULL;
}
-EXPORT_SYMBOL(tda8261_attach);
+EXPORT_SYMBOL_GPL(tda8261_attach);
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index f9703a1dd758..eafcf5f7da3d 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -164,7 +164,7 @@ struct dvb_frontend *tda826x_attach(struct dvb_frontend *fe, int addr, struct i2
return fe;
}
-EXPORT_SYMBOL(tda826x_attach);
+EXPORT_SYMBOL_GPL(tda826x_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 6c24d6d0d4c9..06dedd42e7b1 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -525,7 +525,7 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(ts2020_attach);
+EXPORT_SYMBOL_GPL(ts2020_attach);
/*
* We implement own regmap locking due to legacy DVB attach which uses frontend
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 2483f614d0e7..41dd9b6d3190 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -186,7 +186,7 @@ struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(tua6100_attach);
+EXPORT_SYMBOL_GPL(tua6100_attach);
MODULE_DESCRIPTION("DVB tua6100 driver");
MODULE_AUTHOR("Andrew de Quincey");
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index 9df14d0be1c1..ee5620e731e9 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -434,4 +434,4 @@ MODULE_DESCRIPTION("VLSI VES1820 DVB-C Demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ves1820_attach);
+EXPORT_SYMBOL_GPL(ves1820_attach);
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index b74727286302..c60e21d26b88 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -540,4 +540,4 @@ MODULE_DESCRIPTION("VLSI VES1x93 DVB-S Demodulator driver");
MODULE_AUTHOR("Ralph Metzler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ves1x93_attach);
+EXPORT_SYMBOL_GPL(ves1x93_attach);
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index d392c7cce2ce..7ba575e9c55f 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -496,7 +496,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(zl10036_attach);
+EXPORT_SYMBOL_GPL(zl10036_attach);
module_param_named(debug, zl10036_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index 1335bf78d5b7..a3e4d219400c 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -295,7 +295,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(zl10039_attach);
+EXPORT_SYMBOL_GPL(zl10039_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 2fc6aea580f9..a889d1cfe3f4 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -665,4 +665,4 @@ MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver");
MODULE_AUTHOR("Chris Pascoe");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(zl10353_attach);
+EXPORT_SYMBOL_GPL(zl10353_attach);
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 7a49651f4d1f..d7d85edeedd5 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -314,18 +314,18 @@ static int ad5820_probe(struct i2c_client *client,
ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
if (ret < 0)
- goto cleanup2;
+ goto clean_mutex;
ret = v4l2_async_register_subdev(&coil->subdev);
if (ret < 0)
- goto cleanup;
+ goto clean_entity;
return ret;
-cleanup2:
- mutex_destroy(&coil->power_lock);
-cleanup:
+clean_entity:
media_entity_cleanup(&coil->subdev.entity);
+clean_mutex:
+ mutex_destroy(&coil->power_lock);
return ret;
}
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 86267e01c251..d9accfb84011 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -52,6 +52,7 @@ MODULE_LICENSE("GPL v2");
#define ADV7511_MAX_HEIGHT 1200
#define ADV7511_MIN_PIXELCLOCK 20000000
#define ADV7511_MAX_PIXELCLOCK 225000000
+#define XYLON_LOGICVC_INTG
#define ADV7511_MAX_ADDRS (3)
@@ -79,7 +80,50 @@ struct adv7511_state_edid {
bool complete;
};
+struct adv7511_in_params {
+ uint8_t input_id;
+ uint8_t input_style;
+ uint8_t input_color_depth;
+ uint8_t bit_justification;
+ uint8_t hsync_polarity;
+ uint8_t vsync_polarity;
+ uint8_t clock_delay;
+};
+
+struct adv7511_csc_coeff {
+ uint16_t a1;
+ uint16_t a2;
+ uint16_t a3;
+ uint16_t a4;
+ uint16_t b1;
+ uint16_t b2;
+ uint16_t b3;
+ uint16_t b4;
+ uint16_t c1;
+ uint16_t c2;
+ uint16_t c3;
+ uint16_t c4;
+};
+
+struct adv7511_out_params {
+ bool hdmi_mode;
+ uint8_t output_format;
+ uint8_t output_color_space;
+ uint8_t up_conversion;
+ uint8_t csc_enable;
+ uint8_t csc_scaling_factor;
+ struct adv7511_csc_coeff csc_coeff;
+};
+
+struct adv7511_config {
+ struct adv7511_in_params in_params;
+ struct adv7511_out_params out_params;
+ bool embedded_sync;
+ bool loaded;
+};
+
struct adv7511_state {
+ struct adv7511_config cfg;
struct adv7511_platform_data pdata;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -379,6 +423,10 @@ static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l
{
struct adv7511_state *state = get_adv7511_state(sd);
+#ifdef XYLON_LOGICVC_INTG
+ return 0;
+#endif
+
/* Only makes sense for RGB formats */
if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
/* so just keep quantization */
@@ -1527,34 +1575,278 @@ static void adv7511_audio_setup(struct v4l2_subdev *sd)
adv7511_s_routing(sd, 0, 0, 0);
}
-/* Configure hdmi transmitter. */
-static void adv7511_setup(struct v4l2_subdev *sd)
+static void adv7511_set_ofdt_config(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_config *config = &state->cfg;
+ uint8_t val_mask, val;
v4l2_dbg(1, debug, sd, "%s\n", __func__);
- /* Input format: RGB 4:4:4 */
- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
- /* Output format: RGB 4:4:4 */
- adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
- /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
- adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
- /* Disable pixel repetition */
- adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
- /* Disable CSC */
- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
- /* Output format: RGB 4:4:4, Active Format Information is valid,
- * underscanned */
- adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
- /* AVI Info frame packet enable, Audio Info frame disable */
+ /* Input format */
+ val_mask = 0;
+ switch (config->in_params.input_id) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ config->embedded_sync = true;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ case 4:
+ val = 0x04;
+ config->embedded_sync = true;
+ break;
+ case 5:
+ val = 0x05;
+ break;
+ case 6:
+ val = 0x06;
+ break;
+ case 7:
+ val = 0x07;
+ break;
+ case 8:
+ val = 0x08;
+ config->embedded_sync = true;
+ break;
+ }
+ val_mask |= val;
+ adv7511_wr(sd, 0x15, val_mask);
+
+ /* Output format */
+ val_mask = 0;
+ switch (config->out_params.output_color_space) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 0);
+ switch (config->in_params.input_style) {
+ case 1:
+ val = 0x02;
+ break;
+ case 2:
+ val = 0x01;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ default:
+ val = 0x00;
+ break;
+ }
+ val_mask |= (val << 2);
+ switch (config->in_params.input_color_depth) {
+ case 8:
+ val = 0x03;
+ break;
+ case 10:
+ val = 0x01;
+ break;
+ case 12:
+ val = 0x02;
+ break;
+ default:
+ val = 0x00;
+ break;
+ }
+ val_mask |= (val << 4);
+ switch (config->out_params.output_format) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 7);
+ adv7511_wr(sd, 0x16, val_mask);
+
+ /* H, V sync polarity, interpolation style */
+ val_mask = 0;
+ switch (config->out_params.up_conversion) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 2);
+ switch (config->in_params.hsync_polarity) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 5);
+ switch (config->in_params.vsync_polarity) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 6);
+ adv7511_wr(sd, 0x17, val_mask);
+
+ /* CSC mode, CSC coefficients */
+ if (config->out_params.csc_enable) {
+ switch (config->out_params.csc_scaling_factor) {
+ case 1:
+ val = 0x00;
+ break;
+ case 2:
+ val = 0x01;
+ break;
+ case 4:
+ default:
+ val = 0x02;
+ break;
+ }
+ adv7511_csc_conversion_mode(sd, val);
+ adv7511_csc_coeff(sd,
+ config->out_params.csc_coeff.a1,
+ config->out_params.csc_coeff.a2,
+ config->out_params.csc_coeff.a3,
+ config->out_params.csc_coeff.a4,
+ config->out_params.csc_coeff.b1,
+ config->out_params.csc_coeff.b2,
+ config->out_params.csc_coeff.b3,
+ config->out_params.csc_coeff.b4,
+ config->out_params.csc_coeff.c1,
+ config->out_params.csc_coeff.c2,
+ config->out_params.csc_coeff.c3,
+ config->out_params.csc_coeff.c4);
+ /* enable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
+ /* AVI infoframe: Limited range RGB (16-235) */
+ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
+ }
+
+ /* AVI Info, Audio Info */
adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
- /* Colorimetry, Active format aspect ratio: same as picure. */
- adv7511_wr(sd, 0x56, 0xa8);
- /* No encryption */
- adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
- /* Positive clk edge capture for input video clock */
- adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+ /* Video input justification */
+ val_mask = 0;
+ switch (config->in_params.bit_justification) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ break;
+ }
+ val_mask |= (val << 3);
+ adv7511_wr(sd, 0x48, val_mask);
+
+ /* Output format */
+ val_mask = 0x00;
+ if (config->out_params.output_format == 1) {
+ if (config->out_params.output_color_space == 0)
+ val_mask = 0x02;
+ else if (config->out_params.output_format == 1)
+ val_mask = 0x01;
+ }
+ val_mask <<= 5;
+ adv7511_wr(sd, 0x55, val_mask);
+
+ /* Picture format aspect ratio */
+ adv7511_wr(sd, 0x56, 0x28);
+
+ /* HDCP, Frame encryption, HDMI/DVI */
+ val_mask = 0x04;
+ if (config->out_params.hdmi_mode)
+ val_mask |= 0x02;
+ adv7511_wr(sd, 0xaf, val_mask);
+
+ /* Capture for input video clock */
+ val_mask = 0;
+ switch (config->in_params.clock_delay) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ case 4:
+ val = 0x04;
+ break;
+ case 5:
+ val = 0x05;
+ break;
+ case 6:
+ val = 0x06;
+ break;
+ case 7:
+ val = 0x07;
+ break;
+ }
+ val_mask |= (val << 5);
+ adv7511_wr_and_or(sd, 0xba, 0x1f, val_mask);
+}
+
+/* Configure hdmi transmitter. */
+static void adv7511_setup(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ if (!state->cfg.loaded) {
+ /* Input format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
+ /* Output format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
+ /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
+ adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
+ /* Disable pixel repetition */
+ adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
+ /* Disable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+ /* Output format: RGB 4:4:4, Active Format Information is valid,
+ * underscanned */
+ adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
+ /* AVI Info frame packet enable, Audio Info frame disable */
+ adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
+ /* Colorimetry, Active format aspect ratio: same as picure. */
+ adv7511_wr(sd, 0x56, 0xa8);
+ /* No encryption */
+ adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
+
+ /* Positive clk edge capture for input video clock */
+ adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+ } else {
+ adv7511_set_ofdt_config(sd);
+ }
adv7511_audio_setup(sd);
@@ -1792,6 +2084,181 @@ static void adv7511_init_setup(struct v4l2_subdev *sd)
adv7511_cec_write(sd, 0x4e, ratio << 2);
}
+
+static void adv7511_get_ofdt_config(struct i2c_client *client,
+ struct adv7511_state *state)
+{
+ struct device_node *dn = client->dev.of_node;
+ struct device_node *np;
+ struct adv7511_config *config = &state->cfg;
+ u32 const *prop;
+ int size;
+ bool vin_loaded, vout_loaded;
+
+ vin_loaded = vout_loaded = false;
+
+ prop = of_get_property(dn, "edid-addr", &size);
+ if (prop)
+ state->pdata.i2c_edid = (uint8_t)be32_to_cpup(prop);
+
+ prop = of_get_property(dn, "pktmem-addr", &size);
+ if (prop)
+ state->pdata.i2c_pktmem = (uint8_t)be32_to_cpup(prop);
+
+ prop = of_get_property(dn, "cec-addr", &size);
+ if (prop)
+ state->pdata.i2c_cec = (uint8_t)be32_to_cpup(prop);
+
+ np = of_find_node_by_name(dn, "video-input");
+ if (np) {
+ prop = of_get_property(np, "input-id", &size);
+ if (prop)
+ config->in_params.input_id =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "input-style", &size);
+ if (prop)
+ config->in_params.input_style =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "input-color-depth", &size);
+ if (prop)
+ config->in_params.input_color_depth =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "bit-justification", &size);
+ if (prop)
+ config->in_params.bit_justification =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "hsync-polarity", &size);
+ if (prop)
+ config->in_params.hsync_polarity =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "vsync-polarity", &size);
+ if (prop)
+ config->in_params.vsync_polarity =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "clock-delay", &size);
+ if (prop)
+ config->in_params.clock_delay =
+ (uint8_t)be32_to_cpup(prop);
+ vin_loaded = true;
+ } else {
+ pr_info("No video input configuration, using device default\n");
+ }
+
+ np = of_find_node_by_name(dn, "video-output");
+ if (np) {
+ prop = of_get_property(np, "hdmi-mode", &size);
+ if (prop) {
+ if (be32_to_cpup(prop) == 1)
+ config->out_params.hdmi_mode = true;
+ }
+ prop = of_get_property(np, "output-format", &size);
+ if (prop)
+ config->out_params.output_format =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "output-color-space", &size);
+ if (prop)
+ config->out_params.output_color_space =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "up-conversion", &size);
+ if (prop)
+ config->out_params.up_conversion =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "csc-enable", &size);
+ if (prop)
+ config->out_params.csc_enable =
+ (uint8_t)be32_to_cpup(prop);
+ if (config->out_params.csc_enable) {
+ prop = of_get_property(np, "csc-scaling-factor", &size);
+ if (prop) {
+ config->out_params.csc_scaling_factor =
+ (uint8_t)be32_to_cpup(prop);
+ }
+ np = of_find_node_by_name(dn, "csc-coefficients");
+ if (np) {
+ prop = of_get_property(np, "a1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ } else {
+ pr_info("No CSC coefficients, using default\n");
+ }
+ }
+ vout_loaded = true;
+ } else {
+ pr_info("No video output configuration, using device default\n");
+ }
+
+ if (vin_loaded && vout_loaded)
+ config->loaded = true;
+}
+
+struct v4l2_subdev *adv7511_subdev(struct v4l2_subdev *sd)
+{
+ static struct v4l2_subdev *subdev;
+
+ if (sd)
+ subdev = sd;
+
+ return subdev;
+}
+EXPORT_SYMBOL(adv7511_subdev);
+
static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct adv7511_state *state;
@@ -1809,11 +2276,17 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
if (!state)
return -ENOMEM;
- /* Platform data */
- if (!pdata) {
- v4l_err(client, "No platform data!\n");
- return -ENODEV;
+ if (client->dev.of_node) {
+ adv7511_get_ofdt_config(client, state);
+ } else {
+ /* Platform data */
+ if (!pdata) {
+ v4l_err(client, "No platform data!\n");
+ return -ENODEV;
+ }
+ memcpy(&state->pdata, pdata, sizeof(state->pdata));
}
+
memcpy(&state->pdata, pdata, sizeof(state->pdata));
state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
state->colorspace = V4L2_COLORSPACE_SRGB;
@@ -1824,6 +2297,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
client->addr << 1);
v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
+ adv7511_subdev(sd);
sd->internal_ops = &adv7511_int_ops;
hdl = &state->hdl;
@@ -1917,7 +2391,9 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
+#ifndef XYLON_LOGICVC_INTG
adv7511_init_setup(sd);
+#endif
#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
@@ -1931,8 +2407,9 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
#endif
adv7511_set_isr(sd, true);
+#ifndef XYLON_LOGICVC_INTG
adv7511_check_monitor_present_status(sd);
-
+#endif
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
return 0;
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 21666d705e37..dcf9e4d4ee6b 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -488,7 +488,7 @@ static enum m5mols_restype __find_restype(u32 code)
do {
if (code == m5mols_default_ffmt[type].code)
return type;
- } while (type++ != SIZE_DEFAULT_FFMT);
+ } while (++type != SIZE_DEFAULT_FFMT);
return 0;
}
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 59cdbc33658c..731a60f6a59a 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -85,15 +85,8 @@ struct ov2680_mode_info {
struct ov2680_ctrls {
struct v4l2_ctrl_handler handler;
- struct {
- struct v4l2_ctrl *auto_exp;
- struct v4l2_ctrl *exposure;
- };
- struct {
- struct v4l2_ctrl *auto_gain;
- struct v4l2_ctrl *gain;
- };
-
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
struct v4l2_ctrl *hflip;
struct v4l2_ctrl *vflip;
struct v4l2_ctrl *test_pattern;
@@ -143,6 +136,7 @@ static const struct reg_value ov2680_setting_30fps_QUXGA_800_600[] = {
{0x380e, 0x02}, {0x380f, 0x84}, {0x3811, 0x04}, {0x3813, 0x04},
{0x3814, 0x31}, {0x3815, 0x31}, {0x3820, 0xc0}, {0x4008, 0x00},
{0x4009, 0x03}, {0x4837, 0x1e}, {0x3501, 0x4e}, {0x3502, 0xe0},
+ {0x3503, 0x03},
};
static const struct reg_value ov2680_setting_30fps_720P_1280_720[] = {
@@ -321,70 +315,49 @@ static void ov2680_power_down(struct ov2680_dev *sensor)
usleep_range(5000, 10000);
}
-static int ov2680_bayer_order(struct ov2680_dev *sensor)
+static void ov2680_set_bayer_order(struct ov2680_dev *sensor)
{
- u32 format1;
- u32 format2;
- u32 hv_flip;
- int ret;
-
- ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT1, &format1);
- if (ret < 0)
- return ret;
+ int hv_flip = 0;
- ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT2, &format2);
- if (ret < 0)
- return ret;
+ if (sensor->ctrls.vflip && sensor->ctrls.vflip->val)
+ hv_flip += 1;
- hv_flip = (format2 & BIT(2) << 1) | (format1 & BIT(2));
+ if (sensor->ctrls.hflip && sensor->ctrls.hflip->val)
+ hv_flip += 2;
sensor->fmt.code = ov2680_hv_flip_bayer_order[hv_flip];
-
- return 0;
}
-static int ov2680_vflip_enable(struct ov2680_dev *sensor)
+static int ov2680_set_vflip(struct ov2680_dev *sensor, s32 val)
{
int ret;
- ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(2));
- if (ret < 0)
- return ret;
-
- return ov2680_bayer_order(sensor);
-}
-
-static int ov2680_vflip_disable(struct ov2680_dev *sensor)
-{
- int ret;
+ if (sensor->is_streaming)
+ return -EBUSY;
- ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(0));
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1,
+ BIT(2), val ? BIT(2) : 0);
if (ret < 0)
return ret;
- return ov2680_bayer_order(sensor);
+ ov2680_set_bayer_order(sensor);
+ return 0;
}
-static int ov2680_hflip_enable(struct ov2680_dev *sensor)
+static int ov2680_set_hflip(struct ov2680_dev *sensor, s32 val)
{
int ret;
- ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(2));
- if (ret < 0)
- return ret;
-
- return ov2680_bayer_order(sensor);
-}
-
-static int ov2680_hflip_disable(struct ov2680_dev *sensor)
-{
- int ret;
+ if (sensor->is_streaming)
+ return -EBUSY;
- ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(0));
+ ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2,
+ BIT(2), val ? BIT(2) : 0);
if (ret < 0)
return ret;
- return ov2680_bayer_order(sensor);
+ ov2680_set_bayer_order(sensor);
+ return 0;
}
static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
@@ -405,69 +378,15 @@ static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
return 0;
}
-static int ov2680_gain_set(struct ov2680_dev *sensor, bool auto_gain)
+static int ov2680_gain_set(struct ov2680_dev *sensor, u32 gain)
{
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
- u32 gain;
- int ret;
-
- ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(1),
- auto_gain ? 0 : BIT(1));
- if (ret < 0)
- return ret;
-
- if (auto_gain || !ctrls->gain->is_new)
- return 0;
-
- gain = ctrls->gain->val;
-
- ret = ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
-
- return 0;
+ return ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
}
-static int ov2680_gain_get(struct ov2680_dev *sensor)
+static int ov2680_exposure_set(struct ov2680_dev *sensor, u32 exp)
{
- u32 gain;
- int ret;
-
- ret = ov2680_read_reg16(sensor, OV2680_REG_GAIN_PK, &gain);
- if (ret)
- return ret;
-
- return gain;
-}
-
-static int ov2680_exposure_set(struct ov2680_dev *sensor, bool auto_exp)
-{
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
- u32 exp;
- int ret;
-
- ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(0),
- auto_exp ? 0 : BIT(0));
- if (ret < 0)
- return ret;
-
- if (auto_exp || !ctrls->exposure->is_new)
- return 0;
-
- exp = (u32)ctrls->exposure->val;
- exp <<= 4;
-
- return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, exp);
-}
-
-static int ov2680_exposure_get(struct ov2680_dev *sensor)
-{
- int ret;
- u32 exp;
-
- ret = ov2680_read_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, &exp);
- if (ret)
- return ret;
-
- return exp >> 4;
+ return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH,
+ exp << 4);
}
static int ov2680_stream_enable(struct ov2680_dev *sensor)
@@ -482,33 +401,17 @@ static int ov2680_stream_disable(struct ov2680_dev *sensor)
static int ov2680_mode_set(struct ov2680_dev *sensor)
{
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
int ret;
- ret = ov2680_gain_set(sensor, false);
+ ret = ov2680_load_regs(sensor, sensor->current_mode);
if (ret < 0)
return ret;
- ret = ov2680_exposure_set(sensor, false);
+ /* Restore value of all ctrls */
+ ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
if (ret < 0)
return ret;
- ret = ov2680_load_regs(sensor, sensor->current_mode);
- if (ret < 0)
- return ret;
-
- if (ctrls->auto_gain->val) {
- ret = ov2680_gain_set(sensor, true);
- if (ret < 0)
- return ret;
- }
-
- if (ctrls->auto_exp->val == V4L2_EXPOSURE_AUTO) {
- ret = ov2680_exposure_set(sensor, true);
- if (ret < 0)
- return ret;
- }
-
sensor->mode_pending_changes = false;
return 0;
@@ -556,7 +459,7 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
ret = ov2680_write_reg(sensor, OV2680_REG_SOFT_RESET, 0x01);
if (ret != 0) {
dev_err(dev, "sensor soft reset failed\n");
- return ret;
+ goto err_disable_regulators;
}
usleep_range(1000, 2000);
} else {
@@ -566,7 +469,7 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
ret = clk_prepare_enable(sensor->xvclk);
if (ret < 0)
- return ret;
+ goto err_disable_regulators;
sensor->is_enabled = true;
@@ -576,6 +479,10 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
ov2680_stream_disable(sensor);
return 0;
+
+err_disable_regulators:
+ regulator_bulk_disable(OV2680_NUM_SUPPLIES, sensor->supplies);
+ return ret;
}
static int ov2680_s_power(struct v4l2_subdev *sd, int on)
@@ -590,15 +497,10 @@ static int ov2680_s_power(struct v4l2_subdev *sd, int on)
else
ret = ov2680_power_off(sensor);
- mutex_unlock(&sensor->lock);
-
- if (on && ret == 0) {
- ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
- if (ret < 0)
- return ret;
-
+ if (on && ret == 0)
ret = ov2680_mode_restore(sensor);
- }
+
+ mutex_unlock(&sensor->lock);
return ret;
}
@@ -793,66 +695,23 @@ static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
- struct ov2680_dev *sensor = to_ov2680_dev(sd);
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
- int val;
-
- if (!sensor->is_enabled)
- return 0;
-
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- val = ov2680_gain_get(sensor);
- if (val < 0)
- return val;
- ctrls->gain->val = val;
- break;
- case V4L2_CID_EXPOSURE:
- val = ov2680_exposure_get(sensor);
- if (val < 0)
- return val;
- ctrls->exposure->val = val;
- break;
- }
-
- return 0;
-}
-
static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
struct ov2680_dev *sensor = to_ov2680_dev(sd);
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
if (!sensor->is_enabled)
return 0;
switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- return ov2680_gain_set(sensor, !!ctrl->val);
case V4L2_CID_GAIN:
- return ov2680_gain_set(sensor, !!ctrls->auto_gain->val);
- case V4L2_CID_EXPOSURE_AUTO:
- return ov2680_exposure_set(sensor, !!ctrl->val);
+ return ov2680_gain_set(sensor, ctrl->val);
case V4L2_CID_EXPOSURE:
- return ov2680_exposure_set(sensor, !!ctrls->auto_exp->val);
+ return ov2680_exposure_set(sensor, ctrl->val);
case V4L2_CID_VFLIP:
- if (sensor->is_streaming)
- return -EBUSY;
- if (ctrl->val)
- return ov2680_vflip_enable(sensor);
- else
- return ov2680_vflip_disable(sensor);
+ return ov2680_set_vflip(sensor, ctrl->val);
case V4L2_CID_HFLIP:
- if (sensor->is_streaming)
- return -EBUSY;
- if (ctrl->val)
- return ov2680_hflip_enable(sensor);
- else
- return ov2680_hflip_disable(sensor);
+ return ov2680_set_hflip(sensor, ctrl->val);
case V4L2_CID_TEST_PATTERN:
return ov2680_test_pattern_set(sensor, ctrl->val);
default:
@@ -863,7 +722,6 @@ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
}
static const struct v4l2_ctrl_ops ov2680_ctrl_ops = {
- .g_volatile_ctrl = ov2680_g_volatile_ctrl,
.s_ctrl = ov2680_s_ctrl,
};
@@ -935,7 +793,7 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
if (ret < 0)
return ret;
- v4l2_ctrl_handler_init(hdl, 7);
+ v4l2_ctrl_handler_init(hdl, 5);
hdl->lock = &sensor->lock;
@@ -947,16 +805,9 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
ARRAY_SIZE(test_pattern_menu) - 1,
0, 0, test_pattern_menu);
- ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
- V4L2_CID_EXPOSURE_AUTO,
- V4L2_EXPOSURE_MANUAL, 0,
- V4L2_EXPOSURE_AUTO);
-
ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
0, 32767, 1, 0);
- ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
- 0, 1, 1, 1);
ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 2047, 1, 0);
if (hdl->error) {
@@ -964,11 +815,8 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
goto cleanup_entity;
}
- ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
- ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
-
- v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
- v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
+ ctrls->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ ctrls->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
sensor->sd.ctrl_handler = hdl;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index be6c882dd1d5..fb3c37feeef9 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -46,6 +46,7 @@
#define OV5640_REG_SC_PLL_CTRL1 0x3035
#define OV5640_REG_SC_PLL_CTRL2 0x3036
#define OV5640_REG_SC_PLL_CTRL3 0x3037
+#define OV5640_REG_SC_PLLS_CTRL3 0x303d
#define OV5640_REG_SLAVE_ID 0x3100
#define OV5640_REG_SCCB_SYS_CTRL1 0x3103
#define OV5640_REG_SYS_ROOT_DIVIDER 0x3108
@@ -87,6 +88,7 @@
#define OV5640_REG_POLARITY_CTRL00 0x4740
#define OV5640_REG_MIPI_CTRL00 0x4800
#define OV5640_REG_DEBUG_MODE 0x4814
+#define OV5640_REG_PCLK_PERIOD 0x4837
#define OV5640_REG_ISP_FORMAT_MUX_CTRL 0x501f
#define OV5640_REG_PRE_ISP_TEST_SET1 0x503d
#define OV5640_REG_SDE_CTRL0 0x5580
@@ -134,6 +136,8 @@ static const struct ov5640_pixfmt ov5640_formats[] = {
{ MEDIA_BUS_FMT_JPEG_1X8, V4L2_COLORSPACE_JPEG, },
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB, },
@@ -185,6 +189,7 @@ struct reg_value {
struct ov5640_mode_info {
enum ov5640_mode_id id;
enum ov5640_downsize_mode dn_mode;
+ bool scaler; /* Mode uses ISP scaler (reg 0x5001,BIT(5)=='1') */
u32 hact;
u32 htot;
u32 vact;
@@ -303,7 +308,7 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
{0x501f, 0x00, 0, 0}, {0x4407, 0x04, 0, 0},
{0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x482a, 0x06, 0, 0},
{0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
{0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
{0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
@@ -542,7 +547,7 @@ static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
/* power-on sensor init reg table */
static const struct ov5640_mode_info ov5640_mode_init_data = {
- 0, SUBSAMPLING, 640, 1896, 480, 984,
+ 0, SUBSAMPLING, 0, 640, 1896, 480, 984,
ov5640_init_setting_30fps_VGA,
ARRAY_SIZE(ov5640_init_setting_30fps_VGA),
};
@@ -1067,6 +1072,222 @@ static int ov5640_set_jpeg_timings(struct ov5640_dev *sensor,
return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->vact);
}
+/*
+ *
+ * The current best guess of the clock tree, as reverse engineered by several
+ * people on the media mailing list:
+ *
+ * +--------------+
+ * | Ext. Clock |
+ * +------+-------+
+ * |
+ * +------+-------+ - reg 0x3037[3:0] for the pre-divider
+ * | System PLL | - reg 0x3036 for the multiplier
+ * +--+-----------+ - reg 0x3035[7:4] for the system divider
+ * |
+ * | +--------------+
+ * |---+ MIPI Rate | - reg 0x3035[3:0] for the MIPI root divider
+ * | +--------------+
+ * |
+ * +--+-----------+
+ * | PLL Root Div | - (reg 0x3037[4])+1 for the root divider
+ * +--+-----------+
+ * |
+ * +------+-------+
+ * | MIPI Bit Div | - reg 0x3034[3:0]/4 for divider when in MIPI mode, else 1
+ * +--+-----------+
+ * |
+ * | +--------------+
+ * |---+ SCLK | - log2(reg 0x3108[1:0]) for the root divider
+ * | +--------------+
+ * |
+ * +--+-----------+ - reg 0x3035[3:0] for the MIPI root divider
+ * | PCLK | - log2(reg 0x3108[5:4]) for the DVP root divider
+ * +--------------+
+ *
+ * Not all limitations of register values are documented above, see ov5640
+ * datasheet.
+ *
+ * In order for the sensor to operate correctly the ratio of
+ * SCLK:PCLK:MIPI RATE must be 1:2:8 when the scalar in the ISP is not
+ * enabled, and 1:1:4 when it is enabled (MIPI rate doesn't matter in DVP mode).
+ * The ratio of these different clocks is maintained by the constant div values
+ * below, with PCLK div being selected based on if the mode is using the scalar.
+ */
+
+/*
+ * This is supposed to be ranging from 1 to 16, but the value is
+ * always set to either 1 or 2 in the vendor kernels.
+ */
+#define OV5640_SYSDIV_MIN 1
+#define OV5640_SYSDIV_MAX 12
+
+/*
+ * This is supposed to be ranging from 1 to 8, but the value is always
+ * set to 3 in the vendor kernels.
+ */
+#define OV5640_PLL_PREDIV 2
+
+/*
+ *This is supposed to be ranging from 4-252, but must be even when >127
+ */
+#define OV5640_PLL_MULT_MIN 4
+#define OV5640_PLL_MULT_MAX 252
+
+/*
+ * This is supposed to be ranging from 1 to 2, but the value is always
+ * set to 1 in the vendor kernels.
+ */
+#define OV5640_PLL_DVP_ROOT_DIV 1
+#define OV5640_PLL_MIPI_ROOT_DIV 2
+
+/*
+ * This is supposed to be ranging from 1 to 8, but the value is always
+ * set to 2 in the vendor kernels.
+ */
+#define OV5640_SCLK_ROOT_DIV 2
+
+/*
+ * This is equal to the MIPI bit rate divided by 4. Now it is hardcoded to
+ * only work with 8-bit formats, so this value will need to be set in
+ * software if support for 10-bit formats is added. The bit divider is
+ * only active when in MIPI mode (not DVP)
+ */
+#define OV5640_BIT_DIV 2
+
+static unsigned long ov5640_compute_sclk(struct ov5640_dev *sensor,
+ u8 sys_div, u8 pll_prediv,
+ u8 pll_mult, u8 pll_div,
+ u8 sclk_div)
+{
+ unsigned long rate = clk_get_rate(sensor->xclk);
+
+ rate = rate / pll_prediv * pll_mult / sys_div / pll_div;
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
+ rate = rate / OV5640_BIT_DIV;
+
+ return rate / sclk_div;
+}
+
+static unsigned long ov5640_calc_sclk(struct ov5640_dev *sensor,
+ unsigned long rate,
+ u8 *sysdiv, u8 *prediv, u8 pll_rdiv,
+ u8 *mult, u8 *sclk_rdiv)
+{
+ unsigned long best = ~0;
+ u8 best_sysdiv = 1, best_mult = 1;
+ u8 _sysdiv, _pll_mult;
+
+ for (_sysdiv = OV5640_SYSDIV_MIN;
+ _sysdiv <= OV5640_SYSDIV_MAX;
+ _sysdiv++) {
+ for (_pll_mult = OV5640_PLL_MULT_MIN;
+ _pll_mult <= OV5640_PLL_MULT_MAX;
+ _pll_mult++) {
+ unsigned long _rate;
+
+ /*
+ * The PLL multiplier cannot be odd if above
+ * 127.
+ */
+ if (_pll_mult > 127 && (_pll_mult % 2))
+ continue;
+
+ _rate = ov5640_compute_sclk(sensor, _sysdiv,
+ OV5640_PLL_PREDIV,
+ _pll_mult,
+ pll_rdiv,
+ OV5640_SCLK_ROOT_DIV);
+
+ if (abs(rate - _rate) < abs(rate - best)) {
+ best = _rate;
+ best_sysdiv = _sysdiv;
+ best_mult = _pll_mult;
+ }
+
+ if (_rate == rate)
+ goto out;
+ if (_rate > rate)
+ break;
+ }
+ }
+
+out:
+ *sysdiv = best_sysdiv;
+ *prediv = OV5640_PLL_PREDIV;
+ *mult = best_mult;
+ *sclk_rdiv = OV5640_SCLK_ROOT_DIV;
+ return best;
+}
+
+static int ov5640_set_sclk(struct ov5640_dev *sensor,
+ const struct ov5640_mode_info *mode)
+{
+ u8 sysdiv, prediv, mult, pll_rdiv, sclk_rdiv, mipi_div, pclk_div;
+ u8 pclk_period;
+ int ret;
+ unsigned long sclk, rate, pclk;
+ unsigned char bpp;
+
+ /*
+ * All the formats we support have 2 bytes per pixel, except for JPEG
+ * which is 1 byte per pixel.
+ */
+ bpp = sensor->fmt.code == MEDIA_BUS_FMT_JPEG_1X8 ? 1 : 2;
+ rate = mode->vtot * mode->htot * bpp;
+ rate *= ov5640_framerates[sensor->current_fr];
+
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
+ rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
+
+ pll_rdiv = (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) ?
+ OV5640_PLL_MIPI_ROOT_DIV : OV5640_PLL_DVP_ROOT_DIV;
+
+ sclk = ov5640_calc_sclk(sensor, rate, &sysdiv, &prediv, pll_rdiv,
+ &mult, &sclk_rdiv);
+
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ mipi_div = (sensor->current_mode->scaler) ? 2 : 1;
+ pclk_div = 1;
+
+ /*
+ * Calculate pclk period * number of CSI2 lanes in ns for MIPI
+ * timing.
+ */
+ pclk = sclk * sclk_rdiv / mipi_div;
+ pclk_period = (u8)((1000000000UL + pclk / 2UL) / pclk);
+ pclk_period = pclk_period *
+ sensor->ep.bus.mipi_csi2.num_data_lanes;
+ ret = ov5640_write_reg(sensor, OV5640_REG_PCLK_PERIOD,
+ pclk_period);
+ if (ret)
+ return ret;
+ } else {
+ mipi_div = 1;
+ pclk_div = (sensor->current_mode->scaler) ? 2 : 1;
+ }
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1,
+ 0xff, (sysdiv << 4) | (mipi_div & 0x0f));
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL2,
+ 0xff, mult);
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL3,
+ 0x1f, prediv | ((pll_rdiv - 1) << 4));
+ if (ret)
+ return ret;
+
+ return ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3F,
+ (ilog2(pclk_div) << 4) |
+ (ilog2(sclk_rdiv / 2) << 2) |
+ ilog2(sclk_rdiv));
+}
+
/* download ov5640 settings to sensor through i2c */
static int ov5640_set_timings(struct ov5640_dev *sensor,
const struct ov5640_mode_info *mode)
@@ -1206,71 +1427,6 @@ static int ov5640_set_autogain(struct ov5640_dev *sensor, bool on)
static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
{
- int ret;
- unsigned int flags = sensor->ep.bus.parallel.flags;
- u8 pclk_pol = 0;
- u8 hsync_pol = 0;
- u8 vsync_pol = 0;
-
- /*
- * Note about parallel port configuration.
- *
- * When configured in parallel mode, the OV5640 will
- * output 10 bits data on DVP data lines [9:0].
- * If only 8 bits data are wanted, the 8 bits data lines
- * of the camera interface must be physically connected
- * on the DVP data lines [9:2].
- *
- * Control lines polarity can be configured through
- * devicetree endpoint control lines properties.
- * If no endpoint control lines properties are set,
- * polarity will be as below:
- * - VSYNC: active high
- * - HREF: active low
- * - PCLK: active low
- */
-
- if (on) {
- /*
- * configure parallel port control lines polarity
- *
- * POLARITY CTRL0
- * - [5]: PCLK polarity (0: active low, 1: active high)
- * - [1]: HREF polarity (0: active low, 1: active high)
- * - [0]: VSYNC polarity (mismatch here between
- * datasheet and hardware, 0 is active high
- * and 1 is active low...)
- */
- if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
- pclk_pol = 1;
- if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- hsync_pol = 1;
- if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- vsync_pol = 1;
-
- ret = ov5640_write_reg(sensor,
- OV5640_REG_POLARITY_CTRL00,
- (pclk_pol << 5) |
- (hsync_pol << 1) |
- vsync_pol);
-
- if (ret)
- return ret;
- }
-
- /*
- * powerdown MIPI TX/RX PHY & disable MIPI
- *
- * MIPI CONTROL 00
- * 4: PWDN PHY TX
- * 3: PWDN PHY RX
- * 2: MIPI enable
- */
- ret = ov5640_write_reg(sensor,
- OV5640_REG_IO_MIPI_CTRL00, on ? 0x18 : 0);
- if (ret)
- return ret;
-
return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
OV5640_REG_SYS_CTRL0_SW_PWUP :
OV5640_REG_SYS_CTRL0_SW_PWDN);
@@ -1433,6 +1589,7 @@ static int ov5640_get_light_freq(struct ov5640_dev *sensor)
light_freq = 50;
} else {
/* 60Hz */
+ light_freq = 60;
}
}
@@ -1650,6 +1807,11 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor,
if (ret < 0)
return ret;
+ /* Set PLL registers for new mode */
+ ret = ov5640_set_sclk(sensor, mode);
+ if (ret < 0)
+ return ret;
+
/* Write capture setting */
ret = ov5640_load_regs(sensor, mode);
if (ret < 0)
@@ -1771,9 +1933,16 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor,
static int ov5640_set_mode_direct(struct ov5640_dev *sensor,
const struct ov5640_mode_info *mode)
{
+ int ret;
+
if (!mode->reg_data)
return -EINVAL;
+ /* Set PLL registers for new mode */
+ ret = ov5640_set_sclk(sensor, mode);
+ if (ret < 0)
+ return ret;
+
/* Write capture setting */
return ov5640_load_regs(sensor, mode);
}
@@ -1992,9 +2161,9 @@ static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
* "ov5640_set_stream_mipi()")
* [4] = 0 : Power up MIPI HS Tx
* [3] = 0 : Power up MIPI LS Rx
- * [2] = 0 : MIPI interface disabled
+ * [2] = 1 : MIPI interface enabled
*/
- ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x44);
if (ret)
return ret;
@@ -2029,16 +2198,74 @@ static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
{
+ unsigned int flags = sensor->ep.bus.parallel.flags;
+ u8 pclk_pol = 0;
+ u8 hsync_pol = 0;
+ u8 vsync_pol = 0;
int ret;
if (!on) {
/* Reset settings to their default values. */
+ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
+ ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00, 0x20);
ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
return 0;
}
/*
+ * Note about parallel port configuration.
+ *
+ * When configured in parallel mode, the OV5640 will
+ * output 10 bits data on DVP data lines [9:0].
+ * If only 8 bits data are wanted, the 8 bits data lines
+ * of the camera interface must be physically connected
+ * on the DVP data lines [9:2].
+ *
+ * Control lines polarity can be configured through
+ * devicetree endpoint control lines properties.
+ * If no endpoint control lines properties are set,
+ * polarity will be as below:
+ * - VSYNC: active high
+ * - HREF: active low
+ * - PCLK: active low
+ */
+ /*
+ * configure parallel port control lines polarity
+ *
+ * POLARITY CTRL0
+ * - [5]: PCLK polarity (0: active low, 1: active high)
+ * - [1]: HREF polarity (0: active low, 1: active high)
+ * - [0]: VSYNC polarity (mismatch here between
+ * datasheet and hardware, 0 is active high
+ * and 1 is active low...)
+ */
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ pclk_pol = 1;
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ hsync_pol = 1;
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ vsync_pol = 1;
+
+ ret = ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00,
+ (pclk_pol << 5) | (hsync_pol << 1) | vsync_pol);
+
+ if (ret)
+ return ret;
+
+ /*
+ * powerdown MIPI TX/RX PHY & disable MIPI
+ *
+ * MIPI CONTROL 00
+ * 4: PWDN PHY TX
+ * 3: PWDN PHY RX
+ * 2: MIPI enable
+ */
+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x18);
+ if (ret)
+ return ret;
+
+ /*
* enable VSYNC/HREF/PCLK DVP control lines
* & D[9:6] DVP data lines
*
@@ -2276,11 +2503,13 @@ static int ov5640_set_framefmt(struct ov5640_dev *sensor,
switch (format->code) {
case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
/* YUV422, UYVY */
fmt = 0x3f;
mux = OV5640_FMT_MUX_YUV422;
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
/* YUV422, YUYV */
fmt = 0x30;
mux = OV5640_FMT_MUX_YUV422;
@@ -2596,6 +2825,13 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
/* v4l2_ctrl_lock() locks our own mutex */
+ /*
+ * If the sensor is not powered up by the host driver, do
+ * not try to access it to update the volatile controls.
+ */
+ if (sensor->power_count == 0)
+ return 0;
+
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
val = ov5640_get_gain(sensor);
@@ -2704,7 +2940,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
/* Auto/manual gain */
ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
0, 1, 1, 1);
- ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
0, 1023, 1, 0);
ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 1ae252378799..477f61b8559c 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -722,8 +722,10 @@ static int ov5675_init_controls(struct ov5675 *ov5675)
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(ov5675_test_pattern_menu) - 1,
0, 0, ov5675_test_pattern_menu);
- if (ctrl_hdlr->error)
+ if (ctrl_hdlr->error) {
+ v4l2_ctrl_handler_free(ctrl_hdlr);
return ctrl_hdlr->error;
+ }
ov5675->sd.ctrl_handler = ctrl_hdlr;
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 154776d0069e..e47800cb6c0f 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1824,7 +1824,7 @@ static int ov7670_parse_dt(struct device *dev,
if (bus_cfg.bus_type != V4L2_MBUS_PARALLEL) {
dev_err(dev, "Unsupported media bus type\n");
- return ret;
+ return -EINVAL;
}
info->mbus_config = bus_cfg.bus.parallel.flags;
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 2cc6a678069a..5033950a48ab 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1397,7 +1397,7 @@ static int ov772x_probe(struct i2c_client *client)
priv->subdev.ctrl_handler = &priv->hdl;
if (priv->hdl.error) {
ret = priv->hdl.error;
- goto error_mutex_destroy;
+ goto error_ctrl_free;
}
priv->clk = clk_get(&client->dev, NULL);
@@ -1446,7 +1446,6 @@ error_clk_put:
clk_put(priv->clk);
error_ctrl_free:
v4l2_ctrl_handler_free(&priv->hdl);
-error_mutex_destroy:
mutex_destroy(&priv->lock);
return ret;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 76c443067ec2..ef1460c61bea 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2107,9 +2107,6 @@ static int tc358743_probe(struct i2c_client *client)
state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
sd->dev = &client->dev;
- err = v4l2_async_register_subdev(sd);
- if (err < 0)
- goto err_hdl;
mutex_init(&state->confctl_mutex);
@@ -2167,6 +2164,10 @@ static int tc358743_probe(struct i2c_client *client)
if (err)
goto err_work_queues;
+ err = v4l2_async_register_subdev(sd);
+ if (err < 0)
+ goto err_work_queues;
+
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 668770e9f609..457dc215a224 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -264,7 +264,36 @@ static struct media_entity *stack_pop(struct media_graph *graph)
#define stack_top(en) ((en)->stack[(en)->top].entity)
/**
- * media_graph_walk_init - Allocate resources for graph walk
+ * media_entity_has_route - Check if two entity pads are connected internally
+ * @entity: The entity
+ * @pad0: The first pad index
+ * @pad1: The second pad index
+ *
+ * This function can be used to check whether two pads of an entity are
+ * connected internally in the entity.
+ *
+ * The caller must hold entity->source->parent->mutex.
+ *
+ * Return: true if the pads are connected internally and false otherwise.
+ */
+bool media_entity_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ if (pad0 >= entity->num_pads || pad1 >= entity->num_pads)
+ return false;
+
+ if (pad0 == pad1)
+ return true;
+
+ if (!entity->ops || !entity->ops->has_route)
+ return true;
+
+ return entity->ops->has_route(entity, pad0, pad1);
+}
+EXPORT_SYMBOL_GPL(media_entity_has_route);
+
+/**
+ * media_entity_graph_walk_init - Allocate resources for graph walk
* @graph: Media graph structure that will be used to walk the graph
* @mdev: Media device
*
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 6441e7d63d97..cdde56889fe2 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2964,7 +2964,7 @@ static int bttv_open(struct file *file)
dprintk("open dev=%s\n", video_device_node_name(vdev));
- if (vdev->vfl_type == VFL_TYPE_GRABBER) {
+ if (vdev->vfl_type == VFL_TYPE_VIDEO) {
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
} else if (vdev->vfl_type == VFL_TYPE_VBI) {
type = V4L2_BUF_TYPE_VBI_CAPTURE;
@@ -3905,7 +3905,7 @@ static int bttv_register_video(struct bttv *btv)
if (no_overlay <= 0)
btv->video_dev.device_caps |= V4L2_CAP_VIDEO_OVERLAY;
- if (video_register_device(&btv->video_dev, VFL_TYPE_GRABBER,
+ if (video_register_device(&btv->video_dev, VFL_TYPE_VIDEO,
video_nr[btv->c.nr]) < 0)
goto err;
pr_info("%d: registered device %s\n",
@@ -4258,6 +4258,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
/* free resources */
free_irq(btv->c.pci->irq,btv);
+ del_timer_sync(&btv->timeout);
iounmap(btv->bt848_mmio);
release_mem_region(pci_resource_start(btv->c.pci,0),
pci_resource_len(btv->c.pci,0));
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 3e52a51982d7..110651e47831 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1722,7 +1722,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
return state; /* Manu (DST is a card not a frontend) */
}
-EXPORT_SYMBOL(dst_attach);
+EXPORT_SYMBOL_GPL(dst_attach);
static const struct dvb_frontend_ops dst_dvbt_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index 85fcdc59f0d1..571392d80ccc 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -668,7 +668,7 @@ struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_
return NULL;
}
-EXPORT_SYMBOL(dst_ca_attach);
+EXPORT_SYMBOL_GPL(dst_ca_attach);
MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 1bd8bbe57a30..1f230b14cbfd 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -8,6 +8,7 @@
* All rights reserved.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <media/i2c/adv7604.h>
#include <media/i2c/adv7842.h>
@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
capa, get_link_speed(capa),
- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
+ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
cobalt_info("PCIe link control 0x%04x\n", ctrl);
cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
stat, get_link_speed(stat),
- (stat & PCI_EXP_LNKSTA_NLW) >> 4);
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
/* Bus */
pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
capa, get_link_speed(capa),
- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
+ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
/* Slot */
pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
if (!pci_is_pcie(pci_dev))
return 0;
pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
- return (link & PCI_EXP_LNKSTA_NLW) >> 4;
+ return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
}
static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
if (!pci_is_pcie(pci_dev))
return 0;
pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
- return (link & PCI_EXP_LNKCAP_MLW) >> 4;
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
}
static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index c5207501d5e0..0ff37496c9ab 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -1272,7 +1272,7 @@ static int cobalt_node_register(struct cobalt *cobalt, int node)
video_set_drvdata(vdev, s);
ret = vb2_queue_init(q);
if (!s->is_audio && ret == 0)
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
else if (!s->is_dummy)
ret = cobalt_alsa_init(s);
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index b79718519b9b..3178df3c4922 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -48,19 +48,19 @@ static struct {
} cx18_stream_info[] = {
{ /* CX18_ENC_STREAM_TYPE_MPG */
"encoder MPEG",
- VFL_TYPE_GRABBER, 0,
+ VFL_TYPE_VIDEO, 0,
PCI_DMA_FROMDEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_TS */
"TS",
- VFL_TYPE_GRABBER, -1,
+ VFL_TYPE_VIDEO, -1,
PCI_DMA_FROMDEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_YUV */
"encoder YUV",
- VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET,
+ VFL_TYPE_VIDEO, CX18_V4L2_ENC_YUV_OFFSET,
PCI_DMA_FROMDEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_TUNER
@@ -74,13 +74,13 @@ static struct {
},
{ /* CX18_ENC_STREAM_TYPE_PCM */
"encoder PCM audio",
- VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET,
+ VFL_TYPE_VIDEO, CX18_V4L2_ENC_PCM_OFFSET,
PCI_DMA_FROMDEVICE,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
},
{ /* CX18_ENC_STREAM_TYPE_IDX */
"encoder IDX",
- VFL_TYPE_GRABBER, -1,
+ VFL_TYPE_VIDEO, -1,
PCI_DMA_FROMDEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_RAD */
@@ -434,7 +434,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
name = video_device_node_name(&s->video_dev);
switch (vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
CX18_INFO("Registered device %s for %s (%d x %d.%02d kB)\n",
name, s->name, cx->stream_buffers[type],
cx->stream_buf_size[type] / 1024,
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 2327fe612610..434677bd4ad1 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1545,7 +1545,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
if (dev->tuner_type != TUNER_ABSENT)
dev->v4l_device->device_caps |= V4L2_CAP_TUNER;
err = video_register_device(dev->v4l_device,
- VFL_TYPE_GRABBER, -1);
+ VFL_TYPE_VIDEO, -1);
if (err < 0) {
pr_info("%s: can't register mpeg device\n", dev->name);
return err;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 7fc408ee4934..90224a994702 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -409,7 +409,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
dev->height >> 1);
break;
default:
- BUG();
+ return -EINVAL; /* should not happen */
}
dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp 0x%08x - dma=0x%08lx\n",
buf, buf->vb.vb2_buf.index,
@@ -1304,7 +1304,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
dev->video_dev->device_caps |= V4L2_CAP_TUNER;
- err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
+ err = video_register_device(dev->video_dev, VFL_TYPE_VIDEO,
video_nr[dev->nr]);
if (err < 0) {
pr_info("%s: can't register video device\n",
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index a10261da0db6..1b80c990cb94 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -757,7 +757,7 @@ int cx25821_video_register(struct cx25821_dev *dev)
snprintf(vdev->name, sizeof(vdev->name), "%s #%d", dev->name, i);
video_set_drvdata(vdev, chan);
- err = video_register_device(vdev, VFL_TYPE_GRABBER,
+ err = video_register_device(vdev, VFL_TYPE_VIDEO,
video_nr[dev->nr]);
if (err < 0)
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index d3da7f4297af..fa4ca002ed19 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1138,7 +1138,7 @@ static int blackbird_register_video(struct cx8802_dev *dev)
V4L2_CAP_VIDEO_CAPTURE;
if (dev->core->board.tuner_type != UNSET)
dev->mpeg_dev.device_caps |= V4L2_CAP_TUNER;
- err = video_register_device(&dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&dev->mpeg_dev, VFL_TYPE_VIDEO, -1);
if (err < 0) {
pr_info("can't register mpeg device\n");
return err;
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index 58489ea0c1da..7cf2271866d0 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -144,11 +144,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
vb2_set_plane_payload(vb, 0, size);
- cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
- 0, VBI_LINE_LENGTH * lines,
- VBI_LINE_LENGTH, 0,
- lines);
- return 0;
+ return cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
+ 0, VBI_LINE_LENGTH * lines,
+ VBI_LINE_LENGTH, 0,
+ lines);
}
static void buffer_finish(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index b8abcd550604..151ffb5fd404 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -433,6 +433,7 @@ static int queue_setup(struct vb2_queue *q,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ int ret;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_core *core = dev->core;
@@ -447,35 +448,35 @@ static int buffer_prepare(struct vb2_buffer *vb)
switch (core->field) {
case V4L2_FIELD_TOP:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, 0, UNSET,
- buf->bpl, 0, core->height);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, UNSET,
+ buf->bpl, 0, core->height);
break;
case V4L2_FIELD_BOTTOM:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, UNSET, 0,
- buf->bpl, 0, core->height);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, UNSET, 0,
+ buf->bpl, 0, core->height);
break;
case V4L2_FIELD_SEQ_TB:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl,
- 0, buf->bpl * (core->height >> 1),
- buf->bpl, 0,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ 0, buf->bpl * (core->height >> 1),
+ buf->bpl, 0,
+ core->height >> 1);
break;
case V4L2_FIELD_SEQ_BT:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl,
- buf->bpl * (core->height >> 1), 0,
- buf->bpl, 0,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ buf->bpl * (core->height >> 1), 0,
+ buf->bpl, 0,
+ core->height >> 1);
break;
case V4L2_FIELD_INTERLACED:
default:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, 0, buf->bpl,
- buf->bpl, buf->bpl,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, buf->bpl,
+ buf->bpl, buf->bpl,
+ core->height >> 1);
break;
}
dprintk(2,
@@ -483,7 +484,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
buf, buf->vb.vb2_buf.index, __func__,
core->width, core->height, dev->fmt->depth, dev->fmt->fourcc,
(unsigned long)buf->risc.dma);
- return 0;
+ return ret;
}
static void buffer_finish(struct vb2_buffer *vb)
@@ -1451,7 +1452,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
V4L2_CAP_VIDEO_CAPTURE;
if (core->board.tuner_type != UNSET)
dev->video_dev.device_caps |= V4L2_CAP_TUNER;
- err = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER,
+ err = video_register_device(&dev->video_dev, VFL_TYPE_VIDEO,
video_nr[core->nr]);
if (err < 0) {
pr_err("can't register video device\n");
diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c
index 03dc9924fa2c..bb7fb6402d6e 100644
--- a/drivers/media/pci/ddbridge/ddbridge-main.c
+++ b/drivers/media/pci/ddbridge/ddbridge-main.c
@@ -247,7 +247,7 @@ fail:
ddb_unmap(dev);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
- return -1;
+ return stat;
}
/****************************************************************************/
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index bb3a8cc9de0c..6dbd98a3e5b8 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -1179,6 +1179,7 @@ static void dm1105_remove(struct pci_dev *pdev)
struct dvb_demux *dvbdemux = &dev->demux;
struct dmx_demux *dmx = &dvbdemux->dmx;
+ cancel_work_sync(&dev->ir.work);
dm1105_ir_exit(dev);
dmx->close(dmx);
dvb_net_release(&dev->dvbnet);
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 7480f0d3ad0f..82581aa5a2a3 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -550,7 +550,7 @@ static int dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
IRQF_SHARED, DT3155_NAME, pd);
if (err)
goto err_iounmap;
- err = video_register_device(&pd->vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(&pd->vdev, VFL_TYPE_VIDEO, -1);
if (err)
goto err_free_irq;
dev_info(&pdev->dev, "/dev/video%i is ready\n", pd->vdev.minor);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 7808ec1052bf..385b20b22ac3 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -359,7 +359,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
void __iomem *const base = cio2->base;
u8 lanes, csi2bus = q->csi2.port;
u8 sensor_vc = SENSOR_VIR_CH_DFLT;
- struct cio2_csi2_timing timing;
+ struct cio2_csi2_timing timing = { 0 };
int i, r;
fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
@@ -1651,7 +1651,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vdev->queue = &q->vbq;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
video_set_drvdata(vdev, cio2);
- r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (r) {
dev_err(&cio2->pci_dev->dev,
"failed to register video device (%d)\n", r);
@@ -1871,6 +1871,9 @@ static void cio2_pci_remove(struct pci_dev *pci_dev)
v4l2_device_unregister(&cio2->v4l2_dev);
media_device_cleanup(&cio2->media_dev);
mutex_destroy(&cio2->lock);
+
+ pm_runtime_forbid(&pci_dev->dev);
+ pm_runtime_get_noresume(&pci_dev->dev);
}
static int __maybe_unused cio2_runtime_suspend(struct device *dev)
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index 200d2100dbff..f9de5d1605fe 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -99,7 +99,7 @@ static struct {
} ivtv_stream_info[] = {
{ /* IVTV_ENC_STREAM_TYPE_MPG */
"encoder MPG",
- VFL_TYPE_GRABBER, 0,
+ VFL_TYPE_VIDEO, 0,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
@@ -107,7 +107,7 @@ static struct {
},
{ /* IVTV_ENC_STREAM_TYPE_YUV */
"encoder YUV",
- VFL_TYPE_GRABBER, IVTV_V4L2_ENC_YUV_OFFSET,
+ VFL_TYPE_VIDEO, IVTV_V4L2_ENC_YUV_OFFSET,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
@@ -123,7 +123,7 @@ static struct {
},
{ /* IVTV_ENC_STREAM_TYPE_PCM */
"encoder PCM",
- VFL_TYPE_GRABBER, IVTV_V4L2_ENC_PCM_OFFSET,
+ VFL_TYPE_VIDEO, IVTV_V4L2_ENC_PCM_OFFSET,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -137,7 +137,7 @@ static struct {
},
{ /* IVTV_DEC_STREAM_TYPE_MPG */
"decoder MPG",
- VFL_TYPE_GRABBER, IVTV_V4L2_DEC_MPG_OFFSET,
+ VFL_TYPE_VIDEO, IVTV_V4L2_DEC_MPG_OFFSET,
PCI_DMA_TODEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
@@ -158,7 +158,7 @@ static struct {
},
{ /* IVTV_DEC_STREAM_TYPE_YUV */
"decoder YUV",
- VFL_TYPE_GRABBER, IVTV_V4L2_DEC_YUV_OFFSET,
+ VFL_TYPE_VIDEO, IVTV_V4L2_DEC_YUV_OFFSET,
PCI_DMA_TODEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
@@ -315,7 +315,7 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
name = video_device_node_name(&s->vdev);
switch (vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
IVTV_INFO("Registered device %s for %s (%d kB)\n",
name, s->name, itv->options.kilobytes[type]);
break;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 0e61c81356ef..c42e48bc5c1c 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1711,7 +1711,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
v4l2_ctrl_handler_setup(&meye.hdl);
meye.vdev.ctrl_handler = &meye.hdl;
- if (video_register_device(&meye.vdev, VFL_TYPE_GRABBER,
+ if (video_register_device(&meye.vdev, VFL_TYPE_VIDEO,
video_nr) < 0) {
v4l2_err(v4l2_dev, "video_register_device failed\n");
goto outvideoreg;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index eb5621c9ebf8..478247e13637 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -697,7 +697,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
netup_unidvb_dma_enable(dma, 0);
msleep(50);
cancel_work_sync(&dma->work);
- del_timer(&dma->timeout);
+ del_timer_sync(&dma->timeout);
}
static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
@@ -887,12 +887,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
pci_dev->irq);
- if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
- "netup_unidvb", pci_dev) < 0) {
- dev_err(&pci_dev->dev,
- "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
- goto irq_request_err;
- }
+
ndev->dma_size = 2 * 188 *
NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
@@ -933,6 +928,14 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
goto dma_setup_err;
}
+
+ if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
+ "netup_unidvb", pci_dev) < 0) {
+ dev_err(&pci_dev->dev,
+ "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
+ goto dma_setup_err;
+ }
+
dev_info(&pci_dev->dev,
"netup_unidvb: device has been initialized\n");
return 0;
@@ -951,8 +954,6 @@ spi_setup_err:
dma_free_coherent(&pci_dev->dev, ndev->dma_size,
ndev->dma_virt, ndev->dma_phys);
dma_alloc_err:
- free_irq(pci_dev->irq, pci_dev);
-irq_request_err:
iounmap(ndev->lmmio1);
pci_bar1_error:
iounmap(ndev->lmmio0);
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 2d582c02adbf..e4623ed2f831 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -1214,7 +1214,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (saa7134_no_overlay <= 0)
dev->video_dev->device_caps |= V4L2_CAP_VIDEO_OVERLAY;
- err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
+ err = video_register_device(dev->video_dev,VFL_TYPE_VIDEO,
video_nr[dev->nr]);
if (err < 0) {
pr_info("%s: can't register video device\n",
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e2666d1c6896..141ee18ed827 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -294,7 +294,7 @@ static int empress_init(struct saa7134_dev *dev)
dev->empress_dev->device_caps |= V4L2_CAP_TUNER;
video_set_drvdata(dev->empress_dev, dev);
- err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
+ err = video_register_device(dev->empress_dev,VFL_TYPE_VIDEO,
empress_nr[dev->nr]);
if (err < 0) {
pr_info("%s: can't register video device\n",
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 6a5053126237..437dbe5e75e2 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -300,6 +300,7 @@ int saa7134_ts_start(struct saa7134_dev *dev)
int saa7134_ts_fini(struct saa7134_dev *dev)
{
+ del_timer_sync(&dev->ts_q.timeout);
saa7134_pgtable_free(dev->pci, &dev->ts_q.pt);
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index 3f0b0933eed6..3e773690468b 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -185,6 +185,7 @@ int saa7134_vbi_init1(struct saa7134_dev *dev)
int saa7134_vbi_fini(struct saa7134_dev *dev)
{
/* nothing */
+ del_timer_sync(&dev->vbi_q.timeout);
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index e454a288229b..ea26c8c57494 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2154,6 +2154,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
void saa7134_video_fini(struct saa7134_dev *dev)
{
+ del_timer_sync(&dev->video_q.timeout);
/* free stuff */
vb2_queue_release(&dev->video_vbq);
saa7134_pgtable_free(dev->pci, &dev->video_q.pt);
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index 86d4e2abed82..3947701cd6c7 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -294,7 +294,7 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
- ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
+ ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_VIDEO);
if (ret < 0) {
pr_err("cannot register capture v4l2 device. skipping.\n");
saa7146_vv_release(dev);
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index 31388597386a..2eb4bee16b71 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -368,7 +368,7 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
- if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
+ if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_VIDEO)) {
pr_err("cannot register capture v4l2 device. skipping.\n");
return -1;
}
diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
index 58fe4c1619ee..bf0b9b0914cd 100644
--- a/drivers/media/pci/saa7146/mxb.c
+++ b/drivers/media/pci/saa7146/mxb.c
@@ -714,7 +714,7 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data
vv_data.vid_ops.vidioc_g_register = vidioc_g_register;
vv_data.vid_ops.vidioc_s_register = vidioc_s_register;
#endif
- if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
+ if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_VIDEO)) {
ERR("cannot register capture v4l2 device. skipping.\n");
saa7146_vv_release(dev);
return -1;
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 9ae04e18e6c6..59b039b953bb 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1227,7 +1227,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
if (saa7164_dev_setup(dev) < 0) {
err = -EINVAL;
- goto fail_free;
+ goto fail_dev;
}
/* print pci info */
@@ -1395,6 +1395,8 @@ fail_fw:
fail_irq:
saa7164_dev_unregister(dev);
+fail_dev:
+ pci_disable_device(pci_dev);
fail_free:
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index df494644b5b6..1d1d32e043f1 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1087,7 +1087,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
v4l2_ctrl_handler_setup(hdl);
video_set_drvdata(port->v4l_device, port);
result = video_register_device(port->v4l_device,
- VFL_TYPE_GRABBER, -1);
+ VFL_TYPE_VIDEO, -1);
if (result < 0) {
printk(KERN_INFO "%s: can't register mpeg device\n",
dev->name);
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 6e1ba4846ea4..c52ee141b8cc 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -420,6 +420,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev)
solo_dev->nr_chans);
if (device_register(dev)) {
+ put_device(dev);
dev->parent = NULL;
return -ENOMEM;
}
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 476d7f3b32d6..cbf85231b708 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -1304,7 +1304,7 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
solo_enc->vfd->queue = &solo_enc->vidq;
solo_enc->vfd->lock = &solo_enc->lock;
video_set_drvdata(solo_enc->vfd, solo_enc);
- ret = video_register_device(solo_enc->vfd, VFL_TYPE_GRABBER, nr);
+ ret = video_register_device(solo_enc->vfd, VFL_TYPE_VIDEO, nr);
if (ret < 0)
goto vdev_release;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index 78792067e920..54434f3c428d 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -692,7 +692,7 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
while (erase_off(solo_dev))
/* Do nothing */;
- ret = video_register_device(solo_dev->vfd, VFL_TYPE_GRABBER, nr);
+ ret = video_register_device(solo_dev->vfd, VFL_TYPE_VIDEO, nr);
if (ret < 0)
goto fail;
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index fd3de3bb0c89..798574cfad35 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1069,7 +1069,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
vip->video_dev.lock = &vip->v4l_lock;
video_set_drvdata(&vip->video_dev, vip);
- ret = video_register_device(&vip->video_dev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(&vip->video_dev, VFL_TYPE_VIDEO, -1);
if (ret)
goto vrelease;
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index ea9f7d0058a2..e201d5a56bc6 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -822,10 +822,10 @@ static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, s
av7110_ipack_flush(ipack);
if (buf[3] & ADAPT_FIELD) {
+ if (buf[4] > len - 1 - 4)
+ return 0;
len -= buf[4] + 1;
buf += buf[4] + 1;
- if (!len)
- return 0;
}
av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/media/pci/ttpci/av7110_v4l.c
index f3d6c3cdb872..cabe006658dd 100644
--- a/drivers/media/pci/ttpci/av7110_v4l.c
+++ b/drivers/media/pci/ttpci/av7110_v4l.c
@@ -831,7 +831,7 @@ int av7110_init_v4l(struct av7110 *av7110)
if (FW_VERSION(av7110->arm_app) < 0x2623)
vv_data->capabilities &= ~V4L2_CAP_SLICED_VBI_OUTPUT;
- if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
+ if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_VIDEO)) {
ERR("cannot register capture device. skipping\n");
saa7146_vv_release(dev);
return -ENODEV;
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index e2d482af2367..3766c7aa96f4 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1462,7 +1462,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
budget_av->has_saa7113 = 1;
err = saa7146_vv_init(dev, &vv_data);
if (err != 0) {
- /* fixme: proper cleanup here */
+ ttpci_budget_deinit(&budget_av->budget);
+ kfree(budget_av);
ERR("cannot init vv subsystem\n");
return err;
}
@@ -1470,10 +1471,11 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
- if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
- /* fixme: proper cleanup here */
- ERR("cannot register capture v4l2 device\n");
+ if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
saa7146_vv_release(dev);
+ ttpci_budget_deinit(&budget_av->budget);
+ kfree(budget_av);
+ ERR("cannot register capture v4l2 device\n");
return err;
}
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 656142c7a2cc..a65114e7ca34 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -1162,7 +1162,7 @@ static int tw5864_video_input_init(struct tw5864_input *input, int video_nr)
input->gop = GOP_SIZE;
input->frame_interval = 1;
- ret = video_register_device(&input->vdev, VFL_TYPE_GRABBER, video_nr);
+ ret = video_register_device(&input->vdev, VFL_TYPE_VIDEO, video_nr);
if (ret)
goto free_v4l2_hdl;
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 2fb82d50c53e..10986fcd66a5 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -962,7 +962,7 @@ int tw68_video_init2(struct tw68_dev *dev, int video_nr)
dev->vdev.lock = &dev->lock;
dev->vdev.queue = &dev->vidq;
video_set_drvdata(&dev->vdev, dev);
- return video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr);
+ return video_register_device(&dev->vdev, VFL_TYPE_VIDEO, video_nr);
}
/*
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index 74ae4f0dcee7..8a25a0dac4ae 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -315,13 +315,6 @@ static int tw686x_probe(struct pci_dev *pci_dev,
spin_lock_init(&dev->lock);
- err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED,
- dev->name, dev);
- if (err < 0) {
- dev_err(&pci_dev->dev, "unable to request interrupt\n");
- goto iounmap;
- }
-
timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0);
/*
@@ -333,18 +326,23 @@ static int tw686x_probe(struct pci_dev *pci_dev,
err = tw686x_video_init(dev);
if (err) {
dev_err(&pci_dev->dev, "can't register video\n");
- goto free_irq;
+ goto iounmap;
}
err = tw686x_audio_init(dev);
if (err)
dev_warn(&pci_dev->dev, "can't register audio\n");
+ err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED,
+ dev->name, dev);
+ if (err < 0) {
+ dev_err(&pci_dev->dev, "unable to request interrupt\n");
+ goto iounmap;
+ }
+
pci_set_drvdata(pci_dev, dev);
return 0;
-free_irq:
- free_irq(pci_dev->irq, dev);
iounmap:
pci_iounmap(pci_dev, dev->mmio);
free_region:
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 9be8c6e4fb69..1ced2b0ddb24 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -1282,7 +1282,7 @@ int tw686x_video_init(struct tw686x_dev *dev)
vc->device = vdev;
video_set_drvdata(vdev, vc);
- err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ err = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (err < 0)
goto error;
vc->num = vdev->num;
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index e6b68be09f8f..73023d34d920 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -852,7 +852,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
/* Only H.264BP and H.263P3 are considered */
iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64);
iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64);
- if (!iram_info->buf_dbk_c_use)
+ if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
goto out;
iram_info->axi_sram_use |= dbk_bits;
@@ -876,7 +876,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128);
iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128);
- if (!iram_info->buf_dbk_c_use)
+ if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
goto out;
iram_info->axi_sram_use |= dbk_bits;
@@ -1082,10 +1082,16 @@ static int coda_start_encoding(struct coda_ctx *ctx)
}
if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
- if (!ctx->params.jpeg_qmat_tab[0])
+ if (!ctx->params.jpeg_qmat_tab[0]) {
ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
- if (!ctx->params.jpeg_qmat_tab[1])
+ if (!ctx->params.jpeg_qmat_tab[0])
+ return -ENOMEM;
+ }
+ if (!ctx->params.jpeg_qmat_tab[1]) {
ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
+ if (!ctx->params.jpeg_qmat_tab[1])
+ return -ENOMEM;
+ }
coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
}
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
index 31390ce2dbf2..ae274a7aa3a9 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -45,6 +45,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
uint8_t *cec_message = cros_ec->event_data.data.cec_message;
unsigned int len = cros_ec->event_size;
+ if (len > CEC_MAX_MSG_SIZE)
+ len = CEC_MAX_MSG_SIZE;
cros_ec_cec->rx_msg.len = len;
memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index cde60fbb23a8..5b06c83f5c99 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1231,7 +1231,7 @@ int __init fimc_register_driver(void)
return platform_driver_register(&fimc_driver);
}
-void __exit fimc_unregister_driver(void)
+void fimc_unregister_driver(void)
{
platform_driver_unregister(&fimc_driver);
}
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 9bb14bb2e498..c78c2a7f03fa 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -214,6 +214,7 @@ static int fimc_is_register_subdevs(struct fimc_is *is)
if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) {
of_node_put(child);
+ of_node_put(i2c_bus);
return ret;
}
index++;
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index a07d796f63df..707feb35a950 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1581,7 +1581,11 @@ static int __init fimc_md_init(void)
if (ret)
return ret;
- return platform_driver_register(&fimc_md_driver);
+ ret = platform_driver_register(&fimc_md_driver);
+ if (ret)
+ fimc_unregister_driver();
+
+ return ret;
}
static void __exit fimc_md_exit(void)
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
index 2cb8cecb3077..b810c96695c8 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
@@ -40,12 +40,14 @@ struct mdp_ipi_init {
* @ipi_id : IPI_MDP
* @ap_inst : AP mtk_mdp_vpu address
* @vpu_inst_addr : VPU MDP instance address
+ * @padding : Alignment padding
*/
struct mdp_ipi_comm {
uint32_t msg_id;
uint32_t ipi_id;
uint64_t ap_inst;
uint32_t vpu_inst_addr;
+ uint32_t padding;
};
/**
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index fd8de027e83e..6117efb425c7 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -759,6 +759,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
return -EINVAL;
if (*nplanes) {
+ if (*nplanes != q_data->fmt->num_planes)
+ return -EINVAL;
for (i = 0; i < *nplanes; i++)
if (sizes[i] < q_data->sizeimage[i])
return -EINVAL;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 5066c283d86d..2fd7d913fd64 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -222,10 +222,11 @@ static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst
if (fb->base_y.va == addr) {
list_move_tail(&node->list,
&inst->available_fb_node_list);
- break;
+ return fb;
}
}
- return fb;
+
+ return NULL;
}
static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index acf64723f938..650e198a270e 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -529,15 +529,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
int vpu_load_firmware(struct platform_device *pdev)
{
struct mtk_vpu *vpu;
- struct device *dev = &pdev->dev;
+ struct device *dev;
struct vpu_run *run;
int ret;
if (!pdev) {
- dev_err(dev, "VPU platform device is invalid\n");
+ pr_err("VPU platform device is invalid\n");
return -EINVAL;
}
+ dev = &pdev->dev;
+
vpu = platform_get_drvdata(pdev);
run = &vpu->run;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index dce6b3685e13..88d491a8e326 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2312,7 +2312,16 @@ static int isp_probe(struct platform_device *pdev)
/* Regulators */
isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1");
+ if (IS_ERR(isp->isp_csiphy1.vdd)) {
+ ret = PTR_ERR(isp->isp_csiphy1.vdd);
+ goto error;
+ }
+
isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2");
+ if (IS_ERR(isp->isp_csiphy2.vdd)) {
+ ret = PTR_ERR(isp->isp_csiphy2.vdd);
+ goto error;
+ }
/* Clocks
*
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 4c2675b43718..6a5ec133a957 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -438,7 +438,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
ret = media_pipeline_start(&vdev->entity, &video->pipe);
if (ret < 0)
- return ret;
+ goto flush_buffers;
ret = video_check_format(video);
if (ret < 0)
@@ -467,6 +467,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
error:
media_pipeline_stop(&vdev->entity);
+flush_buffers:
video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
return ret;
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 04ef2286efc6..5694d18b43d5 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -350,7 +350,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
idx++;
- if (idx > HFI_BUFFER_TYPE_MAX)
+ if (idx >= HFI_BUFFER_TYPE_MAX)
return HFI_ERR_SESSION_INVALID_PARAMETER;
req_bytes -= sizeof(struct hfi_buffer_requirements);
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 7f515a4b9bd1..ad22b51765d4 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
struct venus_caps *caps = core->caps, *cap;
unsigned long bit;
+ if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
+ return;
+
for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
cap = &caps[core->codecs_count++];
cap->codec = BIT(bit);
@@ -86,6 +89,9 @@ static void fill_profile_level(struct venus_caps *cap, const void *data,
{
const struct hfi_profile_level *pl = data;
+ if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
+ return;
+
memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
cap->num_pl += num;
}
@@ -111,6 +117,9 @@ fill_caps(struct venus_caps *cap, const void *data, unsigned int num)
{
const struct hfi_capability *caps = data;
+ if (cap->num_caps + num >= MAX_CAP_ENTRIES)
+ return;
+
memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
cap->num_caps += num;
}
@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct venus_caps *cap, const void *fmts,
{
const struct raw_formats *formats = fmts;
+ if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
+ return;
+
memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
cap->num_fmts += num_fmts;
}
@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
rawfmts[i].buftype = fmt->buffer_type;
i++;
+ if (i >= MAX_FMT_ENTRIES)
+ return;
+
if (pinfo->num_planes > MAX_PLANES)
break;
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 0d8855014ab3..306082e25943 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -206,6 +206,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
new_wr_idx = wr_idx + dwords;
wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
+
+ if (wr_ptr < (u32 *)queue->qmem.kva ||
+ wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
+ return -EINVAL;
+
if (new_wr_idx < qsize) {
memcpy(wr_ptr, packet, dwords << 2);
} else {
@@ -273,6 +278,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
}
rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
+
+ if (rd_ptr < (u32 *)queue->qmem.kva ||
+ rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
+ return -EINVAL;
+
dwords = *rd_ptr >> 2;
if (!dwords)
return -EINVAL;
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 658825b4c4e8..d1ac1d78c08f 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -157,6 +157,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
+ if (!fmt)
+ return NULL;
}
pixmp->width = clamp(pixmp->width, frame_width_min(inst),
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index e5f636080108..5d6b7aaee295 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -638,6 +638,7 @@ static int rvin_setup(struct rvin_dev *vin)
vnmc = VNMC_IM_FULL | VNMC_FOC;
break;
case V4L2_FIELD_NONE:
+ case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 97bed45360f0..af2408b4856e 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2121,9 +2121,7 @@ static int fdp1_open(struct file *file)
if (ctx->hdl.error) {
ret = ctx->hdl.error;
- v4l2_ctrl_handler_free(&ctx->hdl);
- kfree(ctx);
- goto done;
+ goto error_ctx;
}
ctx->fh.ctrl_handler = &ctx->hdl;
@@ -2137,20 +2135,27 @@ static int fdp1_open(struct file *file)
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
-
- v4l2_ctrl_handler_free(&ctx->hdl);
- kfree(ctx);
- goto done;
+ goto error_ctx;
}
/* Perform any power management required */
- pm_runtime_get_sync(fdp1->dev);
+ ret = pm_runtime_resume_and_get(fdp1->dev);
+ if (ret < 0)
+ goto error_pm;
v4l2_fh_add(&ctx->fh);
dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
+ mutex_unlock(&fdp1->dev_mutex);
+ return 0;
+
+error_pm:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+error_ctx:
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
done:
mutex_unlock(&fdp1->dev_mutex);
return ret;
@@ -2255,7 +2260,6 @@ static int fdp1_probe(struct platform_device *pdev)
struct fdp1_dev *fdp1;
struct video_device *vfd;
struct device_node *fcp_node;
- struct resource *res;
struct clk *clk;
unsigned int i;
@@ -2282,17 +2286,15 @@ static int fdp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdp1);
/* Memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+ fdp1->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fdp1->regs))
return PTR_ERR(fdp1->regs);
/* Interrupt service routine registration */
- fdp1->irq = ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot find IRQ\n");
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
return ret;
- }
+ fdp1->irq = ret;
ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
dev_name(&pdev->dev), fdp1);
@@ -2315,8 +2317,10 @@ static int fdp1_probe(struct platform_device *pdev)
/* Determine our clock rate */
clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto put_dev;
+ }
fdp1->clk_rate = clk_get_rate(clk);
clk_put(clk);
@@ -2325,7 +2329,7 @@ static int fdp1_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
if (ret) {
v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
- return ret;
+ goto put_dev;
}
/* M2M registration */
@@ -2355,7 +2359,9 @@ static int fdp1_probe(struct platform_device *pdev)
/* Power up the cells to read HW */
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(fdp1->dev);
+ ret = pm_runtime_resume_and_get(fdp1->dev);
+ if (ret < 0)
+ goto disable_pm;
hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
switch (hw_version) {
@@ -2384,12 +2390,17 @@ static int fdp1_probe(struct platform_device *pdev)
return 0;
+disable_pm:
+ pm_runtime_disable(fdp1->dev);
+
release_m2m:
v4l2_m2m_release(fdp1->m2m_dev);
unreg_dev:
v4l2_device_unregister(&fdp1->v4l2_dev);
+put_dev:
+ rcar_fcp_put(fdp1->fcp);
return ret;
}
@@ -2401,6 +2412,7 @@ static int fdp1_remove(struct platform_device *pdev)
video_unregister_device(&fdp1->vfd);
v4l2_device_unregister(&fdp1->v4l2_dev);
pm_runtime_disable(&pdev->dev);
+ rcar_fcp_put(fdp1->fcp);
return 0;
}
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index e9ff12b6b5bb..302da68075b2 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -187,7 +187,7 @@ static int rga_setup_ctrls(struct rga_ctx *ctx)
static struct rga_fmt formats[] = {
{
.fourcc = V4L2_PIX_FMT_ARGB32,
- .color_swap = RGA_COLOR_RB_SWAP,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
.hw_format = RGA_COLOR_FMT_ABGR8888,
.depth = 32,
.uv_factor = 1,
@@ -195,17 +195,8 @@ static struct rga_fmt formats[] = {
.x_div = 1,
},
{
- .fourcc = V4L2_PIX_FMT_XRGB32,
- .color_swap = RGA_COLOR_RB_SWAP,
- .hw_format = RGA_COLOR_FMT_XBGR8888,
- .depth = 32,
- .uv_factor = 1,
- .y_div = 1,
- .x_div = 1,
- },
- {
.fourcc = V4L2_PIX_FMT_ABGR32,
- .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .color_swap = RGA_COLOR_RB_SWAP,
.hw_format = RGA_COLOR_FMT_ABGR8888,
.depth = 32,
.uv_factor = 1,
@@ -214,7 +205,7 @@ static struct rga_fmt formats[] = {
},
{
.fourcc = V4L2_PIX_FMT_XBGR32,
- .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .color_swap = RGA_COLOR_RB_SWAP,
.hw_format = RGA_COLOR_FMT_XBGR8888,
.depth = 32,
.uv_factor = 1,
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 2fb45db8e4ba..d24ef08633ab 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -1132,12 +1132,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
ret = vb2_queue_init(q);
if (ret)
- goto err_vd_rel;
+ return ret;
vp->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
if (ret)
- goto err_vd_rel;
+ return ret;
video_set_drvdata(vfd, vp);
@@ -1170,8 +1170,6 @@ err_ctrlh_free:
v4l2_ctrl_handler_free(&vp->ctrl_handler);
err_me_cleanup:
media_entity_cleanup(&vfd->entity);
-err_vd_rel:
- video_device_release(vfd);
return ret;
}
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 828792b854f5..0c668d4a3daa 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
cec->rx = STATE_BUSY;
cec->msg.len = status >> 24;
+ if (cec->msg.len > CEC_MAX_MSG_SIZE)
+ cec->msg.len = CEC_MAX_MSG_SIZE;
cec->msg.rx_status = CEC_RX_STATUS_OK;
s5p_cec_get_rx_buf(cec, cec->msg.len,
cec->msg.msg);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 9faecd049002..d3fd3375ce19 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1580,8 +1580,18 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
.port_num = MFC_NUM_PORTS_V7,
.buf_size = &buf_size_v7,
.fw_name[0] = "s5p-mfc-v7.fw",
- .clk_names = {"mfc", "sclk_mfc"},
- .num_clocks = 2,
+ .clk_names = {"mfc"},
+ .num_clocks = 1,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v7_3250 = {
+ .version = MFC_VERSION_V7,
+ .version_bit = MFC_V7_BIT,
+ .port_num = MFC_NUM_PORTS_V7,
+ .buf_size = &buf_size_v7,
+ .fw_name[0] = "s5p-mfc-v7.fw",
+ .clk_names = {"mfc", "sclk_mfc"},
+ .num_clocks = 2,
};
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
@@ -1652,6 +1662,9 @@ static const struct of_device_id exynos_mfc_match[] = {
.compatible = "samsung,mfc-v7",
.data = &mfc_drvdata_v7,
}, {
+ .compatible = "samsung,exynos3250-mfc",
+ .data = &mfc_drvdata_v7_3250,
+ }, {
.compatible = "samsung,mfc-v8",
.data = &mfc_drvdata_v8,
}, {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index da138c314963..58822ec5370e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -468,8 +468,10 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
/* Wait until instance is returned or timeout occurred */
if (s5p_mfc_wait_for_done_ctx(ctx,
- S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0))
+ S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)){
+ clear_work_bit_irqsave(ctx);
mfc_err("Err returning instance\n");
+ }
/* Free resources */
s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 912fe0c5ab18..6ed3df5ae5bb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1212,6 +1212,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
unsigned long mb_y_addr, mb_c_addr;
int slice_type;
unsigned int strm_size;
+ bool src_ready;
slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
@@ -1251,7 +1252,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
}
}
}
- if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) {
+ if (ctx->src_queue_cnt > 0 && (ctx->state == MFCINST_RUNNING ||
+ ctx->state == MFCINST_FINISHING)) {
mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
list);
if (mb_entry->flags & MFC_BUF_FLAG_USED) {
@@ -1282,7 +1284,13 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size);
vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
}
- if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0))
+
+ src_ready = true;
+ if (ctx->state == MFCINST_RUNNING && ctx->src_queue_cnt == 0)
+ src_ready = false;
+ if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0)
+ src_ready = false;
+ if (!src_ready || ctx->dst_queue_cnt == 0)
clear_work_bit(ctx);
return 0;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index a1453053e31a..ef8169f6c428 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1060,7 +1060,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
}
/* aspect ratio VUI */
- readl(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 5);
reg |= ((p_h264->vui_sar & 0x1) << 5);
writel(reg, mfc_regs->e_h264_options);
@@ -1083,7 +1083,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
/* intra picture period for H.264 open GOP */
/* control */
- readl(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 4);
reg |= ((p_h264->open_gop & 0x1) << 4);
writel(reg, mfc_regs->e_h264_options);
@@ -1097,23 +1097,23 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
}
/* 'WEIGHTED_BI_PREDICTION' for B is disable */
- readl(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x3 << 9);
writel(reg, mfc_regs->e_h264_options);
/* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */
- readl(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 14);
writel(reg, mfc_regs->e_h264_options);
/* ASO */
- readl(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 6);
reg |= ((p_h264->aso & 0x1) << 6);
writel(reg, mfc_regs->e_h264_options);
/* hier qp enable */
- readl(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 8);
reg |= ((p_h264->open_gop & 0x1) << 8);
writel(reg, mfc_regs->e_h264_options);
@@ -1134,7 +1134,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
writel(reg, mfc_regs->e_h264_num_t_layer);
/* frame packing SEI generation */
- readl(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 25);
reg |= ((p_h264->sei_frame_packing & 0x1) << 25);
writel(reg, mfc_regs->e_h264_options);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 16a097f93b42..2485a3657c89 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1308,6 +1308,8 @@ static int bdisp_probe(struct platform_device *pdev)
init_waitqueue_head(&bdisp->irq_queue);
INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
bdisp->work_queue = create_workqueue(BDISP_NAME);
+ if (!bdisp->work_queue)
+ return -ENOMEM;
spin_lock_init(&bdisp->slock);
mutex_init(&bdisp->lock);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 5baada4f65e5..69070b706831 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -939,6 +939,7 @@ static int configure_channels(struct c8sectpfei *fei)
if (ret) {
dev_err(fei->dev,
"configure_memdma_and_inputblock failed\n");
+ of_node_put(child);
goto err_unmap;
}
index++;
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index cc71aa425597..f6dc2b69b62e 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -297,6 +297,28 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a
return vivid_vid_out_g_fbuf(file, fh, a);
}
+/*
+ * Only support the framebuffer of one of the vivid instances.
+ * Anything else is rejected.
+ */
+bool vivid_validate_fb(const struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev;
+ int i;
+
+ for (i = 0; i < n_devs; i++) {
+ dev = vivid_devs[i];
+ if (!dev || !dev->video_pbase)
+ continue;
+ if ((unsigned long)a->base == dev->video_pbase &&
+ a->fmt.width <= dev->display_width &&
+ a->fmt.height <= dev->display_height &&
+ a->fmt.bytesperline <= dev->display_byte_stride)
+ return true;
+ }
+ return false;
+}
+
static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 7ebb14673c75..0ab4051327d6 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -564,4 +564,6 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
return dev->output_type[dev->output] == HDMI;
}
+bool vivid_validate_fb(const struct v4l2_framebuffer *a);
+
#endif
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.c b/drivers/media/platform/vivid/vivid-rds-gen.c
index b5b104ee64c9..c57771119a34 100644
--- a/drivers/media/platform/vivid/vivid-rds-gen.c
+++ b/drivers/media/platform/vivid/vivid-rds-gen.c
@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
rds->ta = alt;
rds->ms = true;
snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
- freq / 16, ((freq & 0xf) * 10) / 16);
+ (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
if (alt)
strscpy(rds->radiotext,
" The Radio Data System can switch between different Radio Texts ",
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 2d030732feac..842ebfe9b117 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -452,6 +452,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
dev->crop_cap = dev->src_rect;
dev->crop_bounds_cap = dev->src_rect;
+ if (dev->bitmap_cap &&
+ (dev->compose_cap.width != dev->crop_cap.width ||
+ dev->compose_cap.height != dev->crop_cap.height)) {
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
+ }
dev->compose_cap = dev->crop_cap;
if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
dev->compose_cap.height /= 2;
@@ -883,6 +889,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_rect *crop = &dev->crop_cap;
struct v4l2_rect *compose = &dev->compose_cap;
+ unsigned orig_compose_w = compose->width;
+ unsigned orig_compose_h = compose->height;
unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
int ret;
@@ -927,6 +935,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
if (dev->has_compose_cap) {
v4l2_rect_set_min_size(compose, &min_rect);
v4l2_rect_set_max_size(compose, &max_rect);
+ v4l2_rect_map_inside(compose, &fmt);
}
dev->fmt_cap_rect = fmt;
tpg_s_buf_height(&dev->tpg, fmt.height);
@@ -999,17 +1008,17 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
s->r.height /= factor;
}
v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
- if (dev->bitmap_cap && (compose->width != s->r.width ||
- compose->height != s->r.height)) {
- vfree(dev->bitmap_cap);
- dev->bitmap_cap = NULL;
- }
*compose = s->r;
break;
default:
return -EINVAL;
}
+ if (dev->bitmap_cap && (compose->width != orig_compose_w ||
+ compose->height != orig_compose_h)) {
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
+ }
tpg_s_crop_compose(&dev->tpg, crop, compose);
return 0;
}
@@ -1250,7 +1259,14 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
return -EINVAL;
if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
return -EINVAL;
- if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
+ if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height)
+ return -EINVAL;
+
+ /*
+ * Only support the framebuffer of one of the vivid instances.
+ * Anything else is rejected.
+ */
+ if (!vivid_validate_fb(a))
return -EINVAL;
dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index a2773ad7c185..4420e117b1f4 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -4,23 +4,154 @@ config VIDEO_XILINX
tristate "Xilinx Video IP (EXPERIMENTAL)"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
select VIDEOBUF2_DMA_CONTIG
+ select DMADEVICES
+ select XILINX_FRMBUF
select V4L2_FWNODE
help
Driver for Xilinx Video IP Pipelines
if VIDEO_XILINX
+config VIDEO_XILINX_AXI4S_SWITCH
+ tristate "Xilinx AXI4-Stream Video Switch"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx AXI4-Stream Video Switch. This is a
+ V4L sub device based driver. It supports fixed (TDEST based)
+ as well as dynamic (control register based) routing.
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_CFA
+ tristate "Xilinx Video Color Filter Array"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Color Filter Array
+
+config VIDEO_XILINX_CRESAMPLE
+ tristate "Xilinx Video Chroma Resampler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Chroma Resampler
+
+config VIDEO_XILINX_DEMOSAIC
+ tristate "Xilinx Video Demosaic IP"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Video Demosaic IP. This is a V4L sub-device
+ based driver for the Demosaic IP that takes input a Bayer video
+ stream format as input and generates an RGB video output.
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_GAMMA
+ tristate "Xilinx Gamma Correction LUT"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Gamma Correction LUT IP. This is a V4L sub-device
+ based driver that exposes V4L controls to adjust Red, Blue and Green
+ Gamma Correction.
+
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_HLS
+ tristate "Xilinx Video HLS Core"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video HLS Cores
+
+config VIDEO_XILINX_REMAPPER
+ tristate "Xilinx Video Remapper"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Remapper
+
+config VIDEO_XILINX_RGB2YUV
+ tristate "Xilinx Video RGB to YUV Convertor"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video RGB to YUV Convertor
+
+config VIDEO_XILINX_SCALER
+ tristate "Xilinx Video Scaler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Scaler
+
+config VIDEO_XILINX_MULTISCALER
+ tristate "Xilinx Video Multiscaler"
+ depends on VIDEO_XILINX
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Driver for the Xilinx Video Multi Scaler. This is a V4L2 memory to
+ memory based driver. Multi-Scaler has max 8 channels which can be
+ programed for different scaling ratio.
+
+config VIDEO_XILINX_SDIRXSS
+ tristate "Xilinx SDI Rx Subsystem"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx SDI Rx Subsystem
+
+config VIDEO_XILINX_SWITCH
+ tristate "Xilinx Video Switch"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Switch
+
config VIDEO_XILINX_TPG
tristate "Xilinx Video Test Pattern Generator"
depends on VIDEO_XILINX
select VIDEO_XILINX_VTC
help
- Driver for the Xilinx Video Test Pattern Generator
+ Driver for the Xilinx Video Test Pattern Generator
+
+config VIDEO_XILINX_VPSS_CSC
+ tristate "Xilinx VPSS CSC"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Processing Sub-System (VPSS)
+ Color Space Conversion. The driver provides RGB to YUV444
+ conversion and provides video controls like Brightness,
+ Contrast, Color Gains that can be applied to video.
+ Say N if unsure. Say M to modularize.
+
+config VIDEO_XILINX_VPSS_SCALER
+ tristate "Xilinx Video VPSS Scaler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Video Processing Sub-System(VPSS) Scaler.
+ It allows upscaling and downscaling of video. It also supports
+ limited Color Space Conversion.
+ Say N if unsure.
config VIDEO_XILINX_VTC
tristate "Xilinx Video Timing Controller"
depends on VIDEO_XILINX
help
- Driver for the Xilinx Video Timing Controller
+ Driver for the Xilinx Video Timing Controller
+
+config VIDEO_XILINX_CSI2RXSS
+ tristate "Xilinx CSI2 Rx Subsystem"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx MIPI CSI2 Rx Subsystem
+
+config VIDEO_XILINX_SCD
+ tristate "Xilinx Scene Change Detect"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Scene Change Detection Controller.
+ The driver allows applications to pass video buffers and
+ provides if scene change detection is present between
+ adjacent frames.
+
+config VIDEO_XILINX_M2M
+ tristate "Xilinx Video mem2mem"
+ depends on VIDEO_XILINX
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ Driver for Xilinx V4L2 mem2mem pipeline operation to achieve memory
+ copy between two different physical memories using DMA transfers.
endif #VIDEO_XILINX
diff --git a/drivers/media/platform/xilinx/Makefile b/drivers/media/platform/xilinx/Makefile
index 4cdc0b1ec7a5..f7838c307405 100644
--- a/drivers/media/platform/xilinx/Makefile
+++ b/drivers/media/platform/xilinx/Makefile
@@ -1,7 +1,26 @@
# SPDX-License-Identifier: GPL-2.0
xilinx-video-objs += xilinx-dma.o xilinx-vip.o xilinx-vipp.o
+xilinx-scd-objs += xilinx-scenechange.o xilinx-scenechange-channel.o \
+ xilinx-scenechange-dma.o
obj-$(CONFIG_VIDEO_XILINX) += xilinx-video.o
+obj-$(CONFIG_VIDEO_XILINX_AXI4S_SWITCH) += xilinx-axis-switch.o
+obj-$(CONFIG_VIDEO_XILINX_CFA) += xilinx-cfa.o
+obj-$(CONFIG_VIDEO_XILINX_CRESAMPLE) += xilinx-cresample.o
+obj-$(CONFIG_VIDEO_XILINX_CSI2RXSS) += xilinx-csi2rxss.o
+obj-$(CONFIG_VIDEO_XILINX_DEMOSAIC) += xilinx-demosaic.o
+obj-$(CONFIG_VIDEO_XILINX_GAMMA) += xilinx-gamma.o
+obj-$(CONFIG_VIDEO_XILINX_HLS) += xilinx-hls.o
+obj-$(CONFIG_VIDEO_XILINX_M2M) += xilinx-m2m.o
+obj-$(CONFIG_VIDEO_XILINX_MULTISCALER) += xilinx-multi-scaler.o
+obj-$(CONFIG_VIDEO_XILINX_REMAPPER) += xilinx-remapper.o
+obj-$(CONFIG_VIDEO_XILINX_RGB2YUV) += xilinx-rgb2yuv.o
+obj-$(CONFIG_VIDEO_XILINX_SCALER) += xilinx-scaler.o
+obj-$(CONFIG_VIDEO_XILINX_SCD) += xilinx-scd.o
+obj-$(CONFIG_VIDEO_XILINX_SDIRXSS) += xilinx-sdirxss.o
+obj-$(CONFIG_VIDEO_XILINX_SWITCH) += xilinx-switch.o
obj-$(CONFIG_VIDEO_XILINX_TPG) += xilinx-tpg.o
+obj-$(CONFIG_VIDEO_XILINX_VPSS_CSC) += xilinx-vpss-csc.o
+obj-$(CONFIG_VIDEO_XILINX_VPSS_SCALER) += xilinx-vpss-scaler.o
obj-$(CONFIG_VIDEO_XILINX_VTC) += xilinx-vtc.o
diff --git a/drivers/media/platform/xilinx/xilinx-axis-switch.c b/drivers/media/platform/xilinx/xilinx-axis-switch.c
new file mode 100644
index 000000000000..3963e364570a
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-axis-switch.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AXI4-Stream Video Switch
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Vishal Sagar <vishal.sagar@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XVSW_CTRL_REG 0x00
+#define XVSW_CTRL_REG_UPDATE_MASK BIT(1)
+
+#define XVSW_MI_MUX_REG_BASE 0x40
+#define XVSW_MI_MUX_VAL_MASK 0xF
+#define XVSW_MI_MUX_DISABLE_MASK BIT(31)
+
+#define MIN_VSW_SINKS 1
+#define MAX_VSW_SINKS 16
+#define MIN_VSW_SRCS 1
+#define MAX_VSW_SRCS 16
+
+/**
+ * struct xvswitch_device - Xilinx AXI4-Stream Switch device structure
+ * @dev: Platform structure
+ * @iomem: Base address of IP
+ * @subdev: The v4l2 subdev structure
+ * @pads: media pads
+ * @routing: sink pad connected to each source pad (-1 if none)
+ * @formats: active V4L2 media bus formats on sink pads
+ * @nsinks: number of sink pads (1 to 8)
+ * @nsources: number of source pads (2 to 8)
+ * @tdest_routing: Whether TDEST routing is enabled
+ * @aclk: Video clock
+ * @saxi_ctlclk: AXI-Lite control clock
+ */
+struct xvswitch_device {
+ struct device *dev;
+ void __iomem *iomem;
+ struct v4l2_subdev subdev;
+ struct media_pad *pads;
+ int routing[MAX_VSW_SRCS];
+ struct v4l2_mbus_framefmt *formats;
+ u32 nsinks;
+ u32 nsources;
+ bool tdest_routing;
+ struct clk *aclk;
+ struct clk *saxi_ctlclk;
+};
+
+static inline struct xvswitch_device *to_xvsw(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xvswitch_device, subdev);
+}
+
+static inline u32 xvswitch_read(struct xvswitch_device *xvsw, u32 addr)
+{
+ return ioread32(xvsw->iomem + addr);
+}
+
+static inline void xvswitch_write(struct xvswitch_device *xvsw, u32 addr,
+ u32 value)
+{
+ iowrite32(value, xvsw->iomem + addr);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xvsw_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+
+ /* Nothing to be done in case of TDEST routing */
+ if (xvsw->tdest_routing)
+ return 0;
+
+ if (!enable) {
+ /* In control reg routing, disable all master ports */
+ for (i = 0; i < xvsw->nsources; i++) {
+ xvswitch_write(xvsw, XVSW_MI_MUX_REG_BASE + (i * 4),
+ XVSW_MI_MUX_DISABLE_MASK);
+ }
+ xvswitch_write(xvsw, XVSW_CTRL_REG, XVSW_CTRL_REG_UPDATE_MASK);
+ return 0;
+ }
+
+ /*
+ * In case of control reg routing,
+ * from routing table write the values into respective reg
+ * and enable
+ */
+ for (i = 0; i < MAX_VSW_SRCS; i++) {
+ u32 val;
+
+ if (xvsw->routing[i] != -1)
+ val = xvsw->routing[i];
+ else
+ val = XVSW_MI_MUX_DISABLE_MASK;
+
+ xvswitch_write(xvsw, XVSW_MI_MUX_REG_BASE + (i * 4),
+ val);
+ }
+
+ xvswitch_write(xvsw, XVSW_CTRL_REG, XVSW_CTRL_REG_UPDATE_MASK);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+xvsw_get_pad_format(struct xvswitch_device *xvsw,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xvsw->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xvsw->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xvsw_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ int pad = fmt->pad;
+
+ /*
+ * If control reg routing and pad is source pad then
+ * get corresponding sink pad. if no sink pad then
+ * clear the format and return
+ */
+
+ if (!xvsw->tdest_routing && pad >= xvsw->nsinks) {
+ pad = xvsw->routing[pad - xvsw->nsinks];
+ if (pad < 0) {
+ memset(&fmt->format, 0, sizeof(fmt->format));
+ return 0;
+ }
+ }
+
+ fmt->format = *xvsw_get_pad_format(xvsw, cfg, pad, fmt->which);
+
+ return 0;
+}
+
+static int xvsw_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (!xvsw->tdest_routing && fmt->pad >= xvsw->nsinks) {
+ /*
+ * In case of control reg routing,
+ * get the corresponding sink pad to source pad passed.
+ *
+ * The source pad format is always identical to the
+ * sink pad format and can't be modified.
+ *
+ * If sink pad found then get_format for that pad
+ * else clear the fmt->format as the source pad
+ * isn't connected and return.
+ */
+ return xvsw_get_format(subdev, cfg, fmt);
+ }
+
+ /*
+ * In TDEST routing mode, one can set any format on the pad as
+ * it can't be checked which pad's data will travel to
+ * which pad. E.g. In a system with 2 slaves and 4 masters,
+ * S0 or S1 data can reach M0 thru M3 based on TDEST
+ * S0 may have RBG and S1 may have YUV. M0, M1 stream RBG
+ * and M2, M3 stream YUV based on TDEST.
+ *
+ * In Control reg routing mode, set format only for sink pads.
+ */
+ format = xvsw_get_pad_format(xvsw, cfg, fmt->pad, fmt->which);
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xvsw_get_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+ u32 min;
+
+ /* In case of tdest routing, we can't get routing */
+ if (xvsw->tdest_routing)
+ return -EINVAL;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (xvsw->nsources < route->num_routes)
+ min = xvsw->nsources;
+ else
+ min = route->num_routes;
+
+ for (i = 0; i < min; ++i) {
+ route->routes[i].sink = xvsw->routing[i];
+ route->routes[i].source = i;
+ }
+
+ route->num_routes = xvsw->nsources;
+
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ return 0;
+}
+
+static int xvsw_set_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+ int ret = 0;
+
+ /* In case of tdest routing, we can't set routing */
+ if (xvsw->tdest_routing)
+ return -EINVAL;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (subdev->entity.stream_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ for (i = 0; i < xvsw->nsources; ++i)
+ xvsw->routing[i] = -1;
+
+ for (i = 0; i < route->num_routes; ++i)
+ xvsw->routing[route->routes[i].source - xvsw->nsinks] =
+ route->routes[i].sink;
+
+done:
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+ return ret;
+}
+
+static int xvsw_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xvsw_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xvsw_video_ops = {
+ .s_stream = xvsw_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xvsw_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xvsw_get_format,
+ .set_fmt = xvsw_set_format,
+ .get_routing = xvsw_get_routing,
+ .set_routing = xvsw_set_routing,
+};
+
+static struct v4l2_subdev_ops xvsw_ops = {
+ .video = &xvsw_video_ops,
+ .pad = &xvsw_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xvsw_internal_ops = {
+ .open = xvsw_open,
+ .close = xvsw_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static bool xvsw_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ struct xvswitch_device *xvsw =
+ container_of(entity, struct xvswitch_device, subdev.entity);
+ unsigned int sink0, sink1;
+
+ /* Two sinks are never connected together. */
+ if (pad0 < xvsw->nsinks && pad1 < xvsw->nsinks)
+ return false;
+
+ /* In TDEST routing, assume all sinks and sources are connected */
+ if (xvsw->tdest_routing)
+ return true;
+
+ sink0 = pad0 < xvsw->nsinks ? pad0 : xvsw->routing[pad0 - xvsw->nsinks];
+ sink1 = pad1 < xvsw->nsinks ? pad1 : xvsw->routing[pad1 - xvsw->nsinks];
+
+ return sink0 == sink1;
+}
+
+static const struct media_entity_operations xvsw_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_route = xvsw_has_route,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xvsw_parse_of(struct xvswitch_device *xvsw)
+{
+ struct device_node *node = xvsw->dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int nports = 0;
+ u32 routing_mode;
+ int ret;
+
+ ret = of_property_read_u32(node, "xlnx,num-si-slots", &xvsw->nsinks);
+ if (ret < 0 || xvsw->nsinks < MIN_VSW_SINKS ||
+ xvsw->nsinks > MAX_VSW_SINKS) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,num-si-slots property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-mi-slots", &xvsw->nsources);
+ if (ret < 0 || xvsw->nsources < MIN_VSW_SRCS ||
+ xvsw->nsources > MAX_VSW_SRCS) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,num-mi-slots property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,routing-mode", &routing_mode);
+ if (ret < 0 || routing_mode < 0 || routing_mode > 1) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,routing property\n");
+ return ret;
+ }
+
+ if (!routing_mode)
+ xvsw->tdest_routing = true;
+
+ xvsw->aclk = devm_clk_get(xvsw->dev, "aclk");
+ if (IS_ERR(xvsw->aclk)) {
+ ret = PTR_ERR(xvsw->aclk);
+ dev_err(xvsw->dev, "failed to get ap_clk (%d)\n", ret);
+ return ret;
+ }
+
+ if (!xvsw->tdest_routing) {
+ xvsw->saxi_ctlclk = devm_clk_get(xvsw->dev,
+ "s_axi_ctl_clk");
+ if (IS_ERR(xvsw->saxi_ctlclk)) {
+ ret = PTR_ERR(xvsw->saxi_ctlclk);
+ dev_err(xvsw->dev,
+ "failed to get s_axi_ctl_clk (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (xvsw->tdest_routing && xvsw->nsinks > 1) {
+ dev_err(xvsw->dev, "sinks = %d. Driver Limitation max 1 sink in TDEST routing mode\n",
+ xvsw->nsinks);
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ struct device_node *endpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(xvsw->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ /* validate number of ports */
+ if (nports != (xvsw->nsinks + xvsw->nsources)) {
+ dev_err(xvsw->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xvsw_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xvswitch_device *xvsw;
+ struct resource *res;
+ unsigned int npads;
+ unsigned int i, padcount;
+ int ret;
+
+ xvsw = devm_kzalloc(&pdev->dev, sizeof(*xvsw), GFP_KERNEL);
+ if (!xvsw)
+ return -ENOMEM;
+
+ xvsw->dev = &pdev->dev;
+
+ ret = xvsw_parse_of(xvsw);
+ if (ret < 0)
+ return ret;
+
+ /* ioremap only if control reg based routing */
+ if (!xvsw->tdest_routing) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xvsw->iomem = devm_ioremap_resource(xvsw->dev, res);
+ if (IS_ERR(xvsw->iomem))
+ return PTR_ERR(xvsw->iomem);
+ }
+
+ /*
+ * Initialize V4L2 subdevice and media entity. Pad numbers depend on the
+ * number of pads.
+ */
+ npads = xvsw->nsinks + xvsw->nsources;
+ xvsw->pads = devm_kzalloc(&pdev->dev, npads * sizeof(*xvsw->pads),
+ GFP_KERNEL);
+ if (!xvsw->pads)
+ return -ENOMEM;
+
+ for (i = 0; i < xvsw->nsinks; ++i)
+ xvsw->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ for (; i < npads; ++i)
+ xvsw->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ padcount = xvsw->tdest_routing ? npads : xvsw->nsinks;
+
+ /*
+ * In case of tdest routing, allocate format per pad.
+ * source pad format has to match one of the sink pads in tdest routing.
+ *
+ * Otherwise only allocate for sinks as sources will
+ * get the same pad format and corresponding sink.
+ * set format on src pad will return corresponding sinks data.
+ */
+ xvsw->formats = devm_kzalloc(&pdev->dev,
+ padcount * sizeof(*xvsw->formats),
+ GFP_KERNEL);
+ if (!xvsw->formats) {
+ dev_err(xvsw->dev, "No memory to allocate formats!\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < padcount; i++) {
+ xvsw->formats[i].code = MEDIA_BUS_FMT_RGB888_1X24;
+ xvsw->formats[i].field = V4L2_FIELD_NONE;
+ xvsw->formats[i].colorspace = V4L2_COLORSPACE_SRGB;
+ xvsw->formats[i].width = XVIP_MAX_WIDTH;
+ xvsw->formats[i].height = XVIP_MAX_HEIGHT;
+ }
+
+ /*
+ * Initialize the routing table if none are connected.
+ * Routing table is valid only incase routing is not TDEST based.
+ */
+ for (i = 0; i < MAX_VSW_SRCS; ++i)
+ xvsw->routing[i] = -1;
+
+ ret = clk_prepare_enable(xvsw->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (!xvsw->tdest_routing) {
+ ret = clk_prepare_enable(xvsw->saxi_ctlclk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_ctl_clk (%d)\n",
+ ret);
+ clk_disable_unprepare(xvsw->aclk);
+ return ret;
+ }
+ }
+
+ subdev = &xvsw->subdev;
+ v4l2_subdev_init(subdev, &xvsw_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xvsw_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xvsw);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.ops = &xvsw_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, npads, xvsw->pads);
+ if (ret < 0)
+ goto clk_error;
+
+ platform_set_drvdata(pdev, xvsw);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(xvsw->dev, "Xilinx AXI4-Stream Switch found!\n");
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+clk_error:
+ if (!xvsw->tdest_routing)
+ clk_disable_unprepare(xvsw->saxi_ctlclk);
+ clk_disable_unprepare(xvsw->aclk);
+ return ret;
+}
+
+static int xvsw_remove(struct platform_device *pdev)
+{
+ struct xvswitch_device *xvsw = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xvsw->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ if (!xvsw->tdest_routing)
+ clk_disable_unprepare(xvsw->saxi_ctlclk);
+ clk_disable_unprepare(xvsw->aclk);
+ return 0;
+}
+
+static const struct of_device_id xvsw_of_id_table[] = {
+ { .compatible = "xlnx,axis-switch-1.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvsw_of_id_table);
+
+static struct platform_driver xvsw_driver = {
+ .driver = {
+ .name = "xilinx-axis-switch",
+ .of_match_table = xvsw_of_id_table,
+ },
+ .probe = xvsw_probe,
+ .remove = xvsw_remove,
+};
+
+module_platform_driver(xvsw_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vishal.sagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx AXI4-Stream Switch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-cfa.c b/drivers/media/platform/xilinx/xilinx-cfa.c
new file mode 100644
index 000000000000..832fb7306563
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-cfa.c
@@ -0,0 +1,394 @@
+/*
+ * Xilinx Color Filter Array
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XCFA_BAYER_PHASE 0x100
+#define XCFA_BAYER_PHASE_RGGB 0
+#define XCFA_BAYER_PHASE_GRBG 1
+#define XCFA_BAYER_PHASE_GBRG 2
+#define XCFA_BAYER_PHASE_BGGR 3
+
+/**
+ * struct xcfa_device - Xilinx CFA device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ */
+struct xcfa_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+};
+
+static inline struct xcfa_device *to_cfa(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcfa_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xcfa_get_bayer_phase(const unsigned int code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return XCFA_BAYER_PHASE_RGGB;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ return XCFA_BAYER_PHASE_GRBG;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ return XCFA_BAYER_PHASE_GBRG;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return XCFA_BAYER_PHASE_BGGR;
+ }
+
+ return -EINVAL;
+}
+
+static int xcfa_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ const unsigned int code = xcfa->formats[XVIP_PAD_SINK].code;
+ u32 bayer_phase;
+
+ if (!enable) {
+ xvip_stop(&xcfa->xvip);
+ return 0;
+ }
+
+ /* This always returns the valid bayer phase value */
+ bayer_phase = xcfa_get_bayer_phase(code);
+
+ xvip_write(&xcfa->xvip, XCFA_BAYER_PHASE, bayer_phase);
+
+ xvip_set_frame_size(&xcfa->xvip, &xcfa->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xcfa->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xcfa_get_pad_format(struct xcfa_device *xcfa,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcfa->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcfa->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xcfa_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+
+ fmt->format = *__xcfa_get_pad_format(xcfa, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xcfa_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ struct v4l2_mbus_framefmt *format;
+ int bayer_phase;
+
+ format = __xcfa_get_pad_format(xcfa, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ bayer_phase = xcfa_get_bayer_phase(fmt->format.code);
+ if (bayer_phase >= 0) {
+ xcfa->vip_formats[XVIP_PAD_SINK] =
+ xvip_get_format_by_code(fmt->format.code);
+ format->code = fmt->format.code;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad */
+ format = __xcfa_get_pad_format(xcfa, cfg, XVIP_PAD_SOURCE, fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xcfa_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcfa->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcfa->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcfa_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xcfa_video_ops = {
+ .s_stream = xcfa_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xcfa_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcfa_get_format,
+ .set_fmt = xcfa_set_format,
+};
+
+static struct v4l2_subdev_ops xcfa_ops = {
+ .video = &xcfa_video_ops,
+ .pad = &xcfa_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xcfa_internal_ops = {
+ .open = xcfa_open,
+ .close = xcfa_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcfa_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xcfa_pm_suspend(struct device *dev)
+{
+ struct xcfa_device *xcfa = dev_get_drvdata(dev);
+
+ xvip_suspend(&xcfa->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xcfa_pm_resume(struct device *dev)
+{
+ struct xcfa_device *xcfa = dev_get_drvdata(dev);
+
+ xvip_resume(&xcfa->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xcfa_parse_of(struct xcfa_device *xcfa)
+{
+ struct device *dev = xcfa->xvip.dev;
+ struct device_node *node = xcfa->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xcfa->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xcfa_probe(struct platform_device *pdev)
+{
+ struct xcfa_device *xcfa;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+
+ xcfa = devm_kzalloc(&pdev->dev, sizeof(*xcfa), GFP_KERNEL);
+ if (!xcfa)
+ return -ENOMEM;
+
+ xcfa->xvip.dev = &pdev->dev;
+
+ ret = xcfa_parse_of(xcfa);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xcfa->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xcfa->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcfa->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcfa_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcfa_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcfa);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xcfa->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcfa->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xcfa->xvip, default_format);
+
+ xcfa->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xcfa->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xcfa->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcfa->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xcfa->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xcfa->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcfa->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xcfa_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xcfa->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xcfa);
+
+ xvip_print_version(&xcfa->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcfa->xvip);
+ return ret;
+}
+
+static int xcfa_remove(struct platform_device *pdev)
+{
+ struct xcfa_device *xcfa = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcfa->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xcfa->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcfa_pm_ops, xcfa_pm_suspend, xcfa_pm_resume);
+
+static const struct of_device_id xcfa_of_id_table[] = {
+ { .compatible = "xlnx,v-cfa-7.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcfa_of_id_table);
+
+static struct platform_driver xcfa_driver = {
+ .driver = {
+ .name = "xilinx-cfa",
+ .pm = &xcfa_pm_ops,
+ .of_match_table = xcfa_of_id_table,
+ },
+ .probe = xcfa_probe,
+ .remove = xcfa_remove,
+};
+
+module_platform_driver(xcfa_driver);
+
+MODULE_DESCRIPTION("Xilinx Color Filter Array Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-cresample.c b/drivers/media/platform/xilinx/xilinx-cresample.c
new file mode 100644
index 000000000000..05335c10a388
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-cresample.c
@@ -0,0 +1,447 @@
+/*
+ * Xilinx Chroma Resampler
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XCRESAMPLE_ENCODING 0x100
+#define XCRESAMPLE_ENCODING_FIELD (1 << 7)
+#define XCRESAMPLE_ENCODING_CHROMA (1 << 8)
+
+/**
+ * struct xcresample_device - Xilinx CRESAMPLE device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ * @ctrl_handler: control handler
+ */
+struct xcresample_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+static inline struct xcresample_device *to_cresample(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcresample_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xcresample_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+
+ if (!enable) {
+ xvip_stop(&xcresample->xvip);
+ return 0;
+ }
+
+ xvip_set_frame_size(&xcresample->xvip,
+ &xcresample->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xcresample->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xcresample_get_pad_format(struct xcresample_device *xcresample,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcresample->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcresample->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xcresample_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+
+ fmt->format = *__xcresample_get_pad_format(xcresample, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xcresample_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xcresample_get_pad_format(xcresample, cfg, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xcresample_get_pad_format(xcresample, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xcresample_open(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcresample->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcresample->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcresample_close(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xcresample_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xcresample_device *xcresample =
+ container_of(ctrl->handler, struct xcresample_device,
+ ctrl_handler);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY:
+ xvip_clr_or_set(&xcresample->xvip, XCRESAMPLE_ENCODING,
+ XCRESAMPLE_ENCODING_FIELD, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY:
+ xvip_clr_or_set(&xcresample->xvip, XCRESAMPLE_ENCODING,
+ XCRESAMPLE_ENCODING_CHROMA, ctrl->val);
+ return 0;
+ }
+
+ return -EINVAL;
+
+}
+
+static const struct v4l2_ctrl_ops xcresample_ctrl_ops = {
+ .s_ctrl = xcresample_s_ctrl,
+};
+
+static struct v4l2_subdev_video_ops xcresample_video_ops = {
+ .s_stream = xcresample_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xcresample_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcresample_get_format,
+ .set_fmt = xcresample_set_format,
+};
+
+static struct v4l2_subdev_ops xcresample_ops = {
+ .video = &xcresample_video_ops,
+ .pad = &xcresample_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xcresample_internal_ops = {
+ .open = xcresample_open,
+ .close = xcresample_close,
+};
+
+/*
+ * Control Configs
+ */
+
+static const char *const xcresample_parity_string[] = {
+ "Even",
+ "Odd",
+};
+
+static struct v4l2_ctrl_config xcresample_field = {
+ .ops = &xcresample_ctrl_ops,
+ .id = V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY,
+ .name = "Chroma Resampler: Encoding Field Parity",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .qmenu = xcresample_parity_string,
+};
+
+static struct v4l2_ctrl_config xcresample_chroma = {
+ .ops = &xcresample_ctrl_ops,
+ .id = V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY,
+ .name = "Chroma Resampler: Encoding Chroma Parity",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .qmenu = xcresample_parity_string,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcresample_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xcresample_pm_suspend(struct device *dev)
+{
+ struct xcresample_device *xcresample = dev_get_drvdata(dev);
+
+ xvip_suspend(&xcresample->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xcresample_pm_resume(struct device *dev)
+{
+ struct xcresample_device *xcresample = dev_get_drvdata(dev);
+
+ xvip_resume(&xcresample->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xcresample_parse_of(struct xcresample_device *xcresample)
+{
+ struct device *dev = xcresample->xvip.dev;
+ struct device_node *node = xcresample->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xcresample->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xcresample_probe(struct platform_device *pdev)
+{
+ struct xcresample_device *xcresample;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+
+ xcresample = devm_kzalloc(&pdev->dev, sizeof(*xcresample), GFP_KERNEL);
+ if (!xcresample)
+ return -ENOMEM;
+
+ xcresample->xvip.dev = &pdev->dev;
+
+ ret = xcresample_parse_of(xcresample);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xcresample->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xcresample->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcresample->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcresample_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcresample_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcresample);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xcresample->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcresample->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xcresample->xvip, default_format);
+
+ xcresample->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xcresample->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xcresample->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcresample->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xcresample->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xcresample->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcresample->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xcresample_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xcresample->pads);
+ if (ret < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&xcresample->ctrl_handler, 2);
+ xcresample_field.def =
+ (xvip_read(&xcresample->xvip, XCRESAMPLE_ENCODING) &
+ XCRESAMPLE_ENCODING_FIELD) ? 1 : 0;
+ v4l2_ctrl_new_custom(&xcresample->ctrl_handler, &xcresample_field,
+ NULL);
+ xcresample_chroma.def =
+ (xvip_read(&xcresample->xvip, XCRESAMPLE_ENCODING) &
+ XCRESAMPLE_ENCODING_CHROMA) ? 1 : 0;
+ v4l2_ctrl_new_custom(&xcresample->ctrl_handler, &xcresample_chroma,
+ NULL);
+ if (xcresample->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xcresample->ctrl_handler.error;
+ goto error;
+ }
+ subdev->ctrl_handler = &xcresample->ctrl_handler;
+
+ platform_set_drvdata(pdev, xcresample);
+
+ xvip_print_version(&xcresample->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xcresample->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcresample->xvip);
+ return ret;
+}
+
+static int xcresample_remove(struct platform_device *pdev)
+{
+ struct xcresample_device *xcresample = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcresample->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcresample->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xcresample->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcresample_pm_ops, xcresample_pm_suspend,
+ xcresample_pm_resume);
+
+static const struct of_device_id xcresample_of_id_table[] = {
+ { .compatible = "xlnx,v-cresample-4.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcresample_of_id_table);
+
+static struct platform_driver xcresample_driver = {
+ .driver = {
+ .name = "xilinx-cresample",
+ .pm = &xcresample_pm_ops,
+ .of_match_table = xcresample_of_id_table,
+ },
+ .probe = xcresample_probe,
+ .remove = xcresample_remove,
+};
+
+module_platform_driver(xcresample_driver);
+
+MODULE_DESCRIPTION("Xilinx Chroma Resampler Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
new file mode 100644
index 000000000000..69e57e951389
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -0,0 +1,2060 @@
+/*
+ * Xilinx MIPI CSI2 Subsystem
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * Contacts: Vishal Sagar <vsagar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/media/xilinx-vip.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/v4l2-subdev.h>
+#include <linux/xilinx-csi2rxss.h>
+#include <linux/xilinx-v4l2-controls.h>
+#include <media/media-entity.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+/*
+ * MIPI CSI2 Rx register map, bitmask and offsets
+ */
+#define XCSI_CCR_OFFSET 0x00000000
+#define XCSI_CCR_SOFTRESET_SHIFT 1
+#define XCSI_CCR_COREENB_SHIFT 0
+#define XCSI_CCR_SOFTRESET_MASK BIT(XCSI_CCR_SOFTRESET_SHIFT)
+#define XCSI_CCR_COREENB_MASK BIT(XCSI_CCR_COREENB_SHIFT)
+
+#define XCSI_PCR_OFFSET 0x00000004
+#define XCSI_PCR_MAXLANES_MASK 0x00000018
+#define XCSI_PCR_ACTLANES_MASK 0x00000003
+#define XCSI_PCR_MAXLANES_SHIFT 3
+#define XCSI_PCR_ACTLANES_SHIFT 0
+
+#define XCSI_CSR_OFFSET 0x00000010
+#define XCSI_CSR_PKTCOUNT_SHIFT 16
+#define XCSI_CSR_SPFIFOFULL_SHIFT 3
+#define XCSI_CSR_SPFIFONE_SHIFT 2
+#define XCSI_CSR_SLBF_SHIFT 1
+#define XCSI_CSR_RIPCD_SHIFT 0
+#define XCSI_CSR_PKTCOUNT_MASK 0xFFFF0000
+#define XCSI_CSR_SPFIFOFULL_MASK BIT(XCSI_CSR_SPFIFOFULL_SHIFT)
+#define XCSI_CSR_SPFIFONE_MASK BIT(XCSI_CSR_SPFIFONE_SHIFT)
+#define XCSI_CSR_SLBF_MASK BIT(XCSI_CSR_SLBF_SHIFT)
+#define XCSI_CSR_RIPCD_MASK BIT(XCSI_CSR_RIPCD_SHIFT)
+
+#define XCSI_GIER_OFFSET 0x00000020
+#define XCSI_GIER_GIE_SHIFT 0
+#define XCSI_GIER_GIE_MASK BIT(XCSI_GIER_GIE_SHIFT)
+#define XCSI_GIER_SET 1
+#define XCSI_GIER_RESET 0
+
+#define XCSI_ISR_OFFSET 0x00000024
+#define XCSI_ISR_FR_SHIFT 31
+#define XCSI_ISR_VCX_SHIFT 30
+#define XCSI_ISR_ILC_SHIFT 21
+#define XCSI_ISR_SPFIFOF_SHIFT 20
+#define XCSI_ISR_SPFIFONE_SHIFT 19
+#define XCSI_ISR_SLBF_SHIFT 18
+#define XCSI_ISR_STOP_SHIFT 17
+#define XCSI_ISR_SOTERR_SHIFT 13
+#define XCSI_ISR_SOTSYNCERR_SHIFT 12
+#define XCSI_ISR_ECC2BERR_SHIFT 11
+#define XCSI_ISR_ECC1BERR_SHIFT 10
+#define XCSI_ISR_CRCERR_SHIFT 9
+#define XCSI_ISR_DATAIDERR_SHIFT 8
+#define XCSI_ISR_VC3FSYNCERR_SHIFT 7
+#define XCSI_ISR_VC3FLVLERR_SHIFT 6
+#define XCSI_ISR_VC2FSYNCERR_SHIFT 5
+#define XCSI_ISR_VC2FLVLERR_SHIFT 4
+#define XCSI_ISR_VC1FSYNCERR_SHIFT 3
+#define XCSI_ISR_VC1FLVLERR_SHIFT 2
+#define XCSI_ISR_VC0FSYNCERR_SHIFT 1
+#define XCSI_ISR_VC0FLVLERR_SHIFT 0
+#define XCSI_ISR_FR_MASK BIT(XCSI_ISR_FR_SHIFT)
+#define XCSI_ISR_VCX_MASK BIT(XCSI_ISR_VCX_SHIFT)
+#define XCSI_ISR_ILC_MASK BIT(XCSI_ISR_ILC_SHIFT)
+#define XCSI_ISR_SPFIFOF_MASK BIT(XCSI_ISR_SPFIFOF_SHIFT)
+#define XCSI_ISR_SPFIFONE_MASK BIT(XCSI_ISR_SPFIFONE_SHIFT)
+#define XCSI_ISR_SLBF_MASK BIT(XCSI_ISR_SLBF_SHIFT)
+#define XCSI_ISR_STOP_MASK BIT(XCSI_ISR_STOP_SHIFT)
+#define XCSI_ISR_SOTERR_MASK BIT(XCSI_ISR_SOTERR_SHIFT)
+#define XCSI_ISR_SOTSYNCERR_MASK BIT(XCSI_ISR_SOTSYNCERR_SHIFT)
+#define XCSI_ISR_ECC2BERR_MASK BIT(XCSI_ISR_ECC2BERR_SHIFT)
+#define XCSI_ISR_ECC1BERR_MASK BIT(XCSI_ISR_ECC1BERR_SHIFT)
+#define XCSI_ISR_CRCERR_MASK BIT(XCSI_ISR_CRCERR_SHIFT)
+#define XCSI_ISR_DATAIDERR_MASK BIT(XCSI_ISR_DATAIDERR_SHIFT)
+#define XCSI_ISR_VC3FSYNCERR_MASK BIT(XCSI_ISR_VC3FSYNCERR_SHIFT)
+#define XCSI_ISR_VC3FLVLERR_MASK BIT(XCSI_ISR_VC3FLVLERR_SHIFT)
+#define XCSI_ISR_VC2FSYNCERR_MASK BIT(XCSI_ISR_VC2FSYNCERR_SHIFT)
+#define XCSI_ISR_VC2FLVLERR_MASK BIT(XCSI_ISR_VC2FLVLERR_SHIFT)
+#define XCSI_ISR_VC1FSYNCERR_MASK BIT(XCSI_ISR_VC1FSYNCERR_SHIFT)
+#define XCSI_ISR_VC1FLVLERR_MASK BIT(XCSI_ISR_VC1FLVLERR_SHIFT)
+#define XCSI_ISR_VC0FSYNCERR_MASK BIT(XCSI_ISR_VC0FSYNCERR_SHIFT)
+#define XCSI_ISR_VC0FLVLERR_MASK BIT(XCSI_ISR_VC0FLVLERR_SHIFT)
+#define XCSI_ISR_ALLINTR_MASK 0xC03FFFFF
+
+#define XCSI_INTR_PROT_MASK (XCSI_ISR_VC3FSYNCERR_MASK | \
+ XCSI_ISR_VC3FLVLERR_MASK | \
+ XCSI_ISR_VC2FSYNCERR_MASK | \
+ XCSI_ISR_VC2FLVLERR_MASK | \
+ XCSI_ISR_VC1FSYNCERR_MASK | \
+ XCSI_ISR_VC1FLVLERR_MASK | \
+ XCSI_ISR_VC0FSYNCERR_MASK | \
+ XCSI_ISR_VC0FLVLERR_MASK | \
+ XCSI_ISR_VCX_MASK)
+
+#define XCSI_INTR_PKTLVL_MASK (XCSI_ISR_ECC2BERR_MASK | \
+ XCSI_ISR_ECC1BERR_MASK | \
+ XCSI_ISR_CRCERR_MASK | \
+ XCSI_ISR_DATAIDERR_MASK)
+
+#define XCSI_INTR_DPHY_MASK (XCSI_ISR_SOTERR_MASK | \
+ XCSI_ISR_SOTSYNCERR_MASK)
+
+#define XCSI_INTR_SPKT_MASK (XCSI_ISR_SPFIFOF_MASK | \
+ XCSI_ISR_SPFIFONE_MASK)
+
+#define XCSI_INTR_FRAMERCVD_MASK (XCSI_ISR_FR_MASK)
+
+#define XCSI_INTR_ERR_MASK (XCSI_ISR_ILC_MASK | \
+ XCSI_ISR_SLBF_MASK | \
+ XCSI_ISR_STOP_MASK)
+
+#define XCSI_IER_OFFSET 0x00000028
+#define XCSI_IER_FR_SHIFT 31
+#define XCSI_IER_VCX_SHIFT 30
+#define XCSI_IER_ILC_SHIFT 21
+#define XCSI_IER_SPFIFOF_SHIFT 20
+#define XCSI_IER_SPFIFONE_SHIFT 19
+#define XCSI_IER_SLBF_SHIFT 18
+#define XCSI_IER_STOP_SHIFT 17
+#define XCSI_IER_SOTERR_SHIFT 13
+#define XCSI_IER_SOTSYNCERR_SHIFT 12
+#define XCSI_IER_ECC2BERR_SHIFT 11
+#define XCSI_IER_ECC1BERR_SHIFT 10
+#define XCSI_IER_CRCERR_SHIFT 9
+#define XCSI_IER_DATAIDERR_SHIFT 8
+#define XCSI_IER_VC3FSYNCERR_SHIFT 7
+#define XCSI_IER_VC3FLVLERR_SHIFT 6
+#define XCSI_IER_VC2FSYNCERR_SHIFT 5
+#define XCSI_IER_VC2FLVLERR_SHIFT 4
+#define XCSI_IER_VC1FSYNCERR_SHIFT 3
+#define XCSI_IER_VC1FLVLERR_SHIFT 2
+#define XCSI_IER_VC0FSYNCERR_SHIFT 1
+#define XCSI_IER_VC0FLVLERR_SHIFT 0
+#define XCSI_IER_FR_MASK BIT(XCSI_IER_FR_SHIFT)
+#define XCSI_IER_VCX_MASK BIT(XCSI_IER_VCX_SHIFT)
+#define XCSI_IER_ILC_MASK BIT(XCSI_IER_ILC_SHIFT)
+#define XCSI_IER_SPFIFOF_MASK BIT(XCSI_IER_SPFIFOF_SHIFT)
+#define XCSI_IER_SPFIFONE_MASK BIT(XCSI_IER_SPFIFONE_SHIFT)
+#define XCSI_IER_SLBF_MASK BIT(XCSI_IER_SLBF_SHIFT)
+#define XCSI_IER_STOP_MASK BIT(XCSI_IER_STOP_SHIFT)
+#define XCSI_IER_SOTERR_MASK BIT(XCSI_IER_SOTERR_SHIFT)
+#define XCSI_IER_SOTSYNCERR_MASK BIT(XCSI_IER_SOTSYNCERR_SHIFT)
+#define XCSI_IER_ECC2BERR_MASK BIT(XCSI_IER_ECC2BERR_SHIFT)
+#define XCSI_IER_ECC1BERR_MASK BIT(XCSI_IER_ECC1BERR_SHIFT)
+#define XCSI_IER_CRCERR_MASK BIT(XCSI_IER_CRCERR_SHIFT)
+#define XCSI_IER_DATAIDERR_MASK BIT(XCSI_IER_DATAIDERR_SHIFT)
+#define XCSI_IER_VC3FSYNCERR_MASK BIT(XCSI_IER_VC3FSYNCERR_SHIFT)
+#define XCSI_IER_VC3FLVLERR_MASK BIT(XCSI_IER_VC3FLVLERR_SHIFT)
+#define XCSI_IER_VC2FSYNCERR_MASK BIT(XCSI_IER_VC2FSYNCERR_SHIFT)
+#define XCSI_IER_VC2FLVLERR_MASK BIT(XCSI_IER_VC2FLVLERR_SHIFT)
+#define XCSI_IER_VC1FSYNCERR_MASK BIT(XCSI_IER_VC1FSYNCERR_SHIFT)
+#define XCSI_IER_VC1FLVLERR_MASK BIT(XCSI_IER_VC1FLVLERR_SHIFT)
+#define XCSI_IER_VC0FSYNCERR_MASK BIT(XCSI_IER_VC0FSYNCERR_SHIFT)
+#define XCSI_IER_VC0FLVLERR_MASK BIT(XCSI_IER_VC0FLVLERR_SHIFT)
+#define XCSI_IER_ALLINTR_MASK 0xC03FFFFF
+
+#define XCSI_SPKTR_OFFSET 0x00000030
+#define XCSI_SPKTR_DATA_SHIFT 8
+#define XCSI_SPKTR_VC_SHIFT 6
+#define XCSI_SPKTR_DT_SHIFT 0
+#define XCSI_SPKTR_DATA_MASK 0x00FFFF00
+#define XCSI_SPKTR_VC_MASK 0x000000C0
+#define XCSI_SPKTR_DT_MASK 0x0000003F
+
+#define XCSI_VCXR_OFFSET 0x00000034
+#define XCSI_VCXR_VC15FSYNCERR_MASK BIT(23)
+#define XCSI_VCXR_VC15FLVLERR_MASK BIT(22)
+#define XCSI_VCXR_VC14FSYNCERR_MASK BIT(21)
+#define XCSI_VCXR_VC14FLVLERR_MASK BIT(20)
+#define XCSI_VCXR_VC13FSYNCERR_MASK BIT(19)
+#define XCSI_VCXR_VC13FLVLERR_MASK BIT(18)
+#define XCSI_VCXR_VC12FSYNCERR_MASK BIT(17)
+#define XCSI_VCXR_VC12FLVLERR_MASK BIT(16)
+#define XCSI_VCXR_VC11FSYNCERR_MASK BIT(15)
+#define XCSI_VCXR_VC11FLVLERR_MASK BIT(14)
+#define XCSI_VCXR_VC10FSYNCERR_MASK BIT(13)
+#define XCSI_VCXR_VC10FLVLERR_MASK BIT(12)
+#define XCSI_VCXR_VC9FSYNCERR_MASK BIT(11)
+#define XCSI_VCXR_VC9FLVLERR_MASK BIT(10)
+#define XCSI_VCXR_VC8FSYNCERR_MASK BIT(9)
+#define XCSI_VCXR_VC8FLVLERR_MASK BIT(8)
+#define XCSI_VCXR_VC7FSYNCERR_MASK BIT(7)
+#define XCSI_VCXR_VC7FLVLERR_MASK BIT(6)
+#define XCSI_VCXR_VC6FSYNCERR_MASK BIT(5)
+#define XCSI_VCXR_VC6FLVLERR_MASK BIT(4)
+#define XCSI_VCXR_VC5FSYNCERR_MASK BIT(3)
+#define XCSI_VCXR_VC5FLVLERR_MASK BIT(2)
+#define XCSI_VCXR_VC4FSYNCERR_MASK BIT(1)
+#define XCSI_VCXR_VC4FLVLERR_MASK BIT(0)
+#define XCSI_VCXR_MASK 0x00FFFFFF
+
+#define XCSI_CLKINFR_OFFSET 0x0000003C
+#define XCSI_CLKINFR_STOP_SHIFT 1
+#define XCSI_CLKINFR_STOP_MASK BIT(XCSI_CLKINFR_STOP_SHIFT)
+
+#define XCSI_L0INFR_OFFSET 0x00000040
+#define XCSI_L1INFR_OFFSET 0x00000044
+#define XCSI_L2INFR_OFFSET 0x00000048
+#define XCSI_L3INFR_OFFSET 0x0000004C
+#define XCSI_LXINFR_STOP_SHIFT 5
+#define XCSI_LXINFR_SOTERR_SHIFT 1
+#define XCSI_LXINFR_SOTSYNCERR_SHIFT 0
+#define XCSI_LXINFR_STOP_MASK BIT(XCSI_LXINFR_STOP_SHIFT)
+#define XCSI_LXINFR_SOTERR_MASK BIT(XCSI_LXINFR_SOTERR_SHIFT)
+#define XCSI_LXINFR_SOTSYNCERR_MASK BIT(XCSI_LXINFR_SOTSYNCERR_SHIFT)
+
+#define XCSI_VC0INF1R_OFFSET 0x00000060
+#define XCSI_VC1INF1R_OFFSET 0x00000068
+#define XCSI_VC2INF1R_OFFSET 0x00000070
+#define XCSI_VC3INF1R_OFFSET 0x00000078
+#define XCSI_VC4INF1R_OFFSET 0x00000080
+#define XCSI_VC5INF1R_OFFSET 0x00000088
+#define XCSI_VC6INF1R_OFFSET 0x00000090
+#define XCSI_VC7INF1R_OFFSET 0x00000098
+#define XCSI_VC8INF1R_OFFSET 0x000000A0
+#define XCSI_VC9INF1R_OFFSET 0x000000A8
+#define XCSI_VC10INF1R_OFFSET 0x000000B0
+#define XCSI_VC11INF1R_OFFSET 0x000000B8
+#define XCSI_VC12INF1R_OFFSET 0x000000C0
+#define XCSI_VC13INF1R_OFFSET 0x000000C8
+#define XCSI_VC14INF1R_OFFSET 0x000000D0
+#define XCSI_VC15INF1R_OFFSET 0x000000D8
+#define XCSI_VCXINF1R_LINECOUNT_SHIFT 16
+#define XCSI_VCXINF1R_BYTECOUNT_SHIFT 0
+#define XCSI_VCXINF1R_LINECOUNT_MASK 0xFFFF0000
+#define XCSI_VCXINF1R_BYTECOUNT_MASK 0x0000FFFF
+
+#define XCSI_VC0INF2R_OFFSET 0x00000064
+#define XCSI_VC1INF2R_OFFSET 0x0000006C
+#define XCSI_VC2INF2R_OFFSET 0x00000074
+#define XCSI_VC3INF2R_OFFSET 0x0000007C
+#define XCSI_VC4INF2R_OFFSET 0x00000084
+#define XCSI_VC5INF2R_OFFSET 0x0000008C
+#define XCSI_VC6INF2R_OFFSET 0x00000094
+#define XCSI_VC7INF2R_OFFSET 0x0000009C
+#define XCSI_VC8INF2R_OFFSET 0x000000A4
+#define XCSI_VC9INF2R_OFFSET 0x000000AC
+#define XCSI_VC10INF2R_OFFSET 0x000000B4
+#define XCSI_VC11INF2R_OFFSET 0x000000BC
+#define XCSI_VC12INF2R_OFFSET 0x000000C4
+#define XCSI_VC13INF2R_OFFSET 0x000000CC
+#define XCSI_VC14INF2R_OFFSET 0x000000D4
+#define XCSI_VC15INF2R_OFFSET 0x000000DC
+#define XCSI_VCXINF2R_DATATYPE_SHIFT 0
+#define XCSI_VCXINF2R_DATATYPE_MASK 0x0000003F
+
+#define XDPHY_CTRLREG_OFFSET 0x0
+#define XDPHY_CTRLREG_DPHYEN_SHIFT 1
+#define XDPHY_CTRLREG_DPHYEN_MASK BIT(XDPHY_CTRLREG_DPHYEN_SHIFT)
+
+#define XDPHY_CLKSTATREG_OFFSET 0x18
+#define XDPHY_CLKSTATREG_MODE_SHIFT 0
+#define XDPHY_CLKSTATREG_MODE_MASK 0x3
+#define XDPHY_LOW_POWER_MODE 0x0
+#define XDPHY_HI_SPEED_MODE 0x1
+#define XDPHY_ESC_MODE 0x2
+
+/*
+ * Interrupt mask
+ */
+#define XCSI_INTR_MASK (XCSI_ISR_ALLINTR_MASK & ~XCSI_ISR_STOP_MASK)
+/*
+ * Timeout for reset
+ */
+#define XCSI_TIMEOUT_VAL (1000) /* us */
+
+/*
+ * Max string length for CSI Data type string
+ */
+#define MAX_XIL_CSIDT_STR_LENGTH 64
+
+/*
+ * Maximum number of short packet events per file handle.
+ */
+#define XCSI_MAX_SPKT (512)
+
+/* Number of media pads */
+#define XILINX_CSI_MEDIA_PADS (2)
+
+#define XCSI_DEFAULT_WIDTH (1920)
+#define XCSI_DEFAULT_HEIGHT (1080)
+
+#define XCSI_DPHY_CLK_MIN 197000000000UL
+#define XCSI_DPHY_CLK_MAX 203000000000UL
+#define XCSI_DPHY_CLK_REQ 200000000000UL
+
+/*
+ * Macro to return "true" or "false" string if bit is set
+ */
+#define XCSI_GET_BITSET_STR(val, mask) (val) & (mask) ? "true" : "false"
+
+#define XCSI_CLK_PROP BIT(0)
+
+/**
+ * struct xcsi2rxss_feature - dt or IP property structure
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xcsi2rxss_feature {
+ u32 flags;
+};
+
+enum CSI_DataTypes {
+ MIPI_CSI_DT_FRAME_START_CODE = 0x00,
+ MIPI_CSI_DT_FRAME_END_CODE,
+ MIPI_CSI_DT_LINE_START_CODE,
+ MIPI_CSI_DT_LINE_END_CODE,
+ MIPI_CSI_DT_SYNC_RSVD_04,
+ MIPI_CSI_DT_SYNC_RSVD_05,
+ MIPI_CSI_DT_SYNC_RSVD_06,
+ MIPI_CSI_DT_SYNC_RSVD_07,
+ MIPI_CSI_DT_GSPKT_08,
+ MIPI_CSI_DT_GSPKT_09,
+ MIPI_CSI_DT_GSPKT_0A,
+ MIPI_CSI_DT_GSPKT_0B,
+ MIPI_CSI_DT_GSPKT_0C,
+ MIPI_CSI_DT_GSPKT_0D,
+ MIPI_CSI_DT_GSPKT_0E,
+ MIPI_CSI_DT_GSPKT_0F,
+ MIPI_CSI_DT_GLPKT_10,
+ MIPI_CSI_DT_GLPKT_11,
+ MIPI_CSI_DT_GLPKT_12,
+ MIPI_CSI_DT_GLPKT_13,
+ MIPI_CSI_DT_GLPKT_14,
+ MIPI_CSI_DT_GLPKT_15,
+ MIPI_CSI_DT_GLPKT_16,
+ MIPI_CSI_DT_GLPKT_17,
+ MIPI_CSI_DT_YUV_420_8B,
+ MIPI_CSI_DT_YUV_420_10B,
+ MIPI_CSI_DT_YUV_420_8B_LEGACY,
+ MIPI_CSI_DT_YUV_RSVD,
+ MIPI_CSI_DT_YUV_420_8B_CSPS,
+ MIPI_CSI_DT_YUV_420_10B_CSPS,
+ MIPI_CSI_DT_YUV_422_8B,
+ MIPI_CSI_DT_YUV_422_10B,
+ MIPI_CSI_DT_RGB_444,
+ MIPI_CSI_DT_RGB_555,
+ MIPI_CSI_DT_RGB_565,
+ MIPI_CSI_DT_RGB_666,
+ MIPI_CSI_DT_RGB_888,
+ MIPI_CSI_DT_RGB_RSVD_25,
+ MIPI_CSI_DT_RGB_RSVD_26,
+ MIPI_CSI_DT_RGB_RSVD_27,
+ MIPI_CSI_DT_RAW_6,
+ MIPI_CSI_DT_RAW_7,
+ MIPI_CSI_DT_RAW_8,
+ MIPI_CSI_DT_RAW_10,
+ MIPI_CSI_DT_RAW_12,
+ MIPI_CSI_DT_RAW_14,
+ MIPI_CSI_DT_RAW_16,
+ MIPI_CSI_DT_RAW_20,
+ MIPI_CSI_DT_USER_30,
+ MIPI_CSI_DT_USER_31,
+ MIPI_CSI_DT_USER_32,
+ MIPI_CSI_DT_USER_33,
+ MIPI_CSI_DT_USER_34,
+ MIPI_CSI_DT_USER_35,
+ MIPI_CSI_DT_USER_36,
+ MIPI_CSI_DT_USER_37,
+ MIPI_CSI_DT_RSVD_38,
+ MIPI_CSI_DT_RSVD_39,
+ MIPI_CSI_DT_RSVD_3A,
+ MIPI_CSI_DT_RSVD_3B,
+ MIPI_CSI_DT_RSVD_3C,
+ MIPI_CSI_DT_RSVD_3D,
+ MIPI_CSI_DT_RSVD_3E,
+ MIPI_CSI_DT_RSVD_3F
+};
+
+/**
+ * struct pixel_format - Data Type to string name structure
+ * @PixelFormat: MIPI CSI2 Data type
+ * @PixelFormatStr: String name of Data Type
+ */
+struct pixel_format {
+ enum CSI_DataTypes PixelFormat;
+ char PixelFormatStr[MAX_XIL_CSIDT_STR_LENGTH];
+};
+
+/**
+ * struct xcsi2rxss_event - Event log structure
+ * @mask: Event mask
+ * @name: Name of the event
+ * @counter: Count number of events
+ */
+struct xcsi2rxss_event {
+ u32 mask;
+ const char * const name;
+ unsigned int counter;
+};
+
+/*
+ * struct xcsi2rxss_core - Core configuration CSI2 Rx Subsystem device structure
+ * @dev: Platform structure
+ * @iomem: Base address of subsystem
+ * @irq: requested irq number
+ * @dphy_present: Flag for DPHY register interface presence
+ * @dphy_offset: DPHY registers offset
+ * @enable_active_lanes: If number of active lanes can be modified
+ * @max_num_lanes: Maximum number of lanes present
+ * @vfb: Video Format Bridge enabled or not
+ * @ppc: pixels per clock
+ * @vc: Virtual Channel
+ * @axis_tdata_width: AXI Stream data width
+ * @datatype: Data type filter
+ * @pxlformat: String with CSI pixel format from IP
+ * @num_lanes: Number of lanes requested from application
+ * @events: Structure to maintain event logs
+ * @vcx_events: Structure to maintain VCX event logs
+ * @en_vcx: If more than 4 VC are enabled.
+ * @cfg: Pointer to csi2rxss config structure
+ * @lite_aclk: AXI4-Lite interface clock
+ * @video_aclk: Video clock
+ * @dphy_clk_200M: 200MHz DPHY clock
+ * @rst_gpio: video_aresetn
+ */
+struct xcsi2rxss_core {
+ struct device *dev;
+ void __iomem *iomem;
+ int irq;
+ u32 dphy_offset;
+ bool dphy_present;
+ bool enable_active_lanes;
+ u32 max_num_lanes;
+ bool vfb;
+ u32 ppc;
+ u32 vc;
+ u32 axis_tdata_width;
+ u32 datatype;
+ const char *pxlformat;
+ u32 num_lanes;
+ struct xcsi2rxss_event *events;
+ struct xcsi2rxss_event *vcx_events;
+ bool en_vcx;
+ const struct xcsi2rxss_feature *cfg;
+ struct clk *lite_aclk;
+ struct clk *video_aclk;
+ struct clk *dphy_clk_200M;
+ struct gpio_desc *rst_gpio;
+};
+
+/**
+ * struct xcsi2rxss_state - CSI2 Rx Subsystem device structure
+ * @core: Core structure for MIPI CSI2 Rx Subsystem
+ * @subdev: The v4l2 subdev structure
+ * @ctrl_handler: control handler
+ * @formats: Active V4L2 formats on each pad
+ * @default_format: default V4L2 media bus format
+ * @vip_format: format information corresponding to the active format
+ * @event: Holds the short packet event
+ * @lock: mutex for serializing operations
+ * @pads: media pads
+ * @npads: number of pads
+ * @streaming: Flag for storing streaming state
+ * @suspended: Flag for storing suspended state
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xcsi2rxss_state {
+ struct xcsi2rxss_core core;
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_format;
+ const struct xvip_video_format *vip_format;
+ struct v4l2_event event;
+ struct mutex lock;
+ struct media_pad pads[XILINX_CSI_MEDIA_PADS];
+ unsigned int npads;
+ bool streaming;
+ bool suspended;
+};
+
+static const struct xcsi2rxss_feature xlnx_csi2rxss_v4_0 = {
+ .flags = XCSI_CLK_PROP,
+};
+
+static const struct xcsi2rxss_feature xlnx_csi2rxss_v2_0 = {
+ .flags = 0,
+};
+
+static const struct of_device_id xcsi2rxss_of_id_table[] = {
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-2.0",
+ .data = &xlnx_csi2rxss_v2_0 },
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-3.0",
+ .data = &xlnx_csi2rxss_v2_0 },
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-4.0",
+ .data = &xlnx_csi2rxss_v4_0 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcsi2rxss_of_id_table);
+
+static inline struct xcsi2rxss_state *
+to_xcsi2rxssstate(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcsi2rxss_state, subdev);
+}
+
+/*
+ * Regsiter related operations
+ */
+static inline u32 xcsi2rxss_read(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr)
+{
+ return ioread32(xcsi2rxss->iomem + addr);
+}
+
+static inline void xcsi2rxss_write(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 value)
+{
+ iowrite32(value, xcsi2rxss->iomem + addr);
+}
+
+static inline void xcsi2rxss_clr(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 clr)
+{
+ xcsi2rxss_write(xcsi2rxss,
+ addr,
+ xcsi2rxss_read(xcsi2rxss, addr) & ~clr);
+}
+
+static inline void xcsi2rxss_set(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 set)
+{
+ xcsi2rxss_write(xcsi2rxss,
+ addr,
+ xcsi2rxss_read(xcsi2rxss, addr) | set);
+}
+
+static const struct pixel_format pixel_formats[] = {
+ { MIPI_CSI_DT_YUV_420_8B, "YUV420_8bit" },
+ { MIPI_CSI_DT_YUV_420_10B, "YUV420_10bit" },
+ { MIPI_CSI_DT_YUV_420_8B_LEGACY, "Legacy_YUV420_8bit" },
+ { MIPI_CSI_DT_YUV_420_8B_CSPS, "YUV420_8bit_CSPS" },
+ { MIPI_CSI_DT_YUV_420_10B_CSPS, "YUV420_10bit_CSPS" },
+ { MIPI_CSI_DT_YUV_422_8B, "YUV422_8bit" },
+ { MIPI_CSI_DT_YUV_422_10B, "YUV422_10bit" },
+ { MIPI_CSI_DT_RGB_444, "RGB444" },
+ { MIPI_CSI_DT_RGB_555, "RGB555" },
+ { MIPI_CSI_DT_RGB_565, "RGB565" },
+ { MIPI_CSI_DT_RGB_666, "RGB666" },
+ { MIPI_CSI_DT_RGB_888, "RGB888" },
+ { MIPI_CSI_DT_RAW_6, "RAW6" },
+ { MIPI_CSI_DT_RAW_7, "RAW7" },
+ { MIPI_CSI_DT_RAW_8, "RAW8" },
+ { MIPI_CSI_DT_RAW_10, "RAW10" },
+ { MIPI_CSI_DT_RAW_12, "RAW12" },
+ { MIPI_CSI_DT_RAW_14, "RAW14"},
+ { MIPI_CSI_DT_RAW_16, "RAW16"},
+ { MIPI_CSI_DT_RAW_20, "RAW20"}
+};
+
+static struct xcsi2rxss_event xcsi2rxss_events[] = {
+ { XCSI_ISR_FR_MASK, "Frame Received", 0 },
+ { XCSI_ISR_VCX_MASK, "VCX Frame Errors", 0 },
+ { XCSI_ISR_ILC_MASK, "Invalid Lane Count Error", 0 },
+ { XCSI_ISR_SPFIFOF_MASK, "Short Packet FIFO OverFlow Error", 0 },
+ { XCSI_ISR_SPFIFONE_MASK, "Short Packet FIFO Not Empty", 0 },
+ { XCSI_ISR_SLBF_MASK, "Streamline Buffer Full Error", 0 },
+ { XCSI_ISR_STOP_MASK, "Lane Stop State", 0 },
+ { XCSI_ISR_SOTERR_MASK, "SOT Error", 0 },
+ { XCSI_ISR_SOTSYNCERR_MASK, "SOT Sync Error", 0 },
+ { XCSI_ISR_ECC2BERR_MASK, "2 Bit ECC Unrecoverable Error", 0 },
+ { XCSI_ISR_ECC1BERR_MASK, "1 Bit ECC Recoverable Error", 0 },
+ { XCSI_ISR_CRCERR_MASK, "CRC Error", 0 },
+ { XCSI_ISR_DATAIDERR_MASK, "Data Id Error", 0 },
+ { XCSI_ISR_VC3FSYNCERR_MASK, "Virtual Channel 3 Frame Sync Error", 0 },
+ { XCSI_ISR_VC3FLVLERR_MASK, "Virtual Channel 3 Frame Level Error", 0 },
+ { XCSI_ISR_VC2FSYNCERR_MASK, "Virtual Channel 2 Frame Sync Error", 0 },
+ { XCSI_ISR_VC2FLVLERR_MASK, "Virtual Channel 2 Frame Level Error", 0 },
+ { XCSI_ISR_VC1FSYNCERR_MASK, "Virtual Channel 1 Frame Sync Error", 0 },
+ { XCSI_ISR_VC1FLVLERR_MASK, "Virtual Channel 1 Frame Level Error", 0 },
+ { XCSI_ISR_VC0FSYNCERR_MASK, "Virtual Channel 0 Frame Sync Error", 0 },
+ { XCSI_ISR_VC0FLVLERR_MASK, "Virtual Channel 0 Frame Level Error", 0 }
+};
+
+#define XMIPICSISS_NUM_EVENTS ARRAY_SIZE(xcsi2rxss_events)
+
+#define XMIPICSISS_VCX_START (4)
+#define XMIPICSISS_MAX_VC (4)
+#define XMIPICSISS_MAX_VCX (16)
+
+/* There are 2 events frame sync and frame level error per VC */
+#define XMIPICSISS_VCX_NUM_EVENTS ((XMIPICSISS_MAX_VCX -\
+ XMIPICSISS_MAX_VC) * 2)
+
+/**
+ * xcsi2rxss_clr_and_set - Clear and set the register with a bitmask
+ * @xcsi2rxss: Xilinx MIPI CSI2 Rx Subsystem subdev core struct
+ * @addr: address of register
+ * @clr: bitmask to be cleared
+ * @set: bitmask to be set
+ *
+ * Clear a bit(s) of mask @clr in the register at address @addr, then set
+ * a bit(s) of mask @set in the register after.
+ */
+static void xcsi2rxss_clr_and_set(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr, u32 clr, u32 set)
+{
+ u32 reg;
+
+ reg = xcsi2rxss_read(xcsi2rxss, addr);
+ reg &= ~clr;
+ reg |= set;
+ xcsi2rxss_write(xcsi2rxss, addr, reg);
+}
+
+/**
+ * xcsi2rxss_pxlfmtstrtodt - Convert pixel format string got from dts
+ * to data type.
+ * @pxlfmtstr: String obtained while parsing device node
+ *
+ * This function takes a CSI pixel format string obtained while parsing
+ * device tree node and converts it to data type.
+ *
+ * Eg. "RAW8" string is converted to 0x2A.
+ * Refer to MIPI CSI2 specification for details.
+ *
+ * Return: Equivalent pixel format value from table
+ */
+static u32 xcsi2rxss_pxlfmtstrtodt(const char *pxlfmtstr)
+{
+ u32 Index;
+ u32 MaxEntries = ARRAY_SIZE(pixel_formats);
+
+ for (Index = 0; Index < MaxEntries; Index++) {
+ if (!strncmp(pixel_formats[Index].PixelFormatStr,
+ pxlfmtstr, MAX_XIL_CSIDT_STR_LENGTH))
+ return pixel_formats[Index].PixelFormat;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * xcsi2rxss_pxlfmtdttostr - Convert pixel format data type to string.
+ * @datatype: MIPI CSI-2 Data Type
+ *
+ * This function takes a CSI pixel format data type and returns a
+ * pointer to the string name.
+ *
+ * Eg. 0x2A returns "RAW8" string.
+ * Refer to MIPI CSI2 specification for details.
+ *
+ * Return: Equivalent pixel format string from table
+ */
+static const char *xcsi2rxss_pxlfmtdttostr(u32 datatype)
+{
+ u32 Index;
+ u32 MaxEntries = ARRAY_SIZE(pixel_formats);
+
+ for (Index = 0; Index < MaxEntries; Index++) {
+ if (pixel_formats[Index].PixelFormat == datatype)
+ return pixel_formats[Index].PixelFormatStr;
+ }
+
+ return NULL;
+}
+
+/**
+ * xcsi2rxss_enable - Enable or disable the CSI Core
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ * @flag: true for enabling, false for disabling
+ *
+ * This function enables/disables the MIPI CSI2 Rx Subsystem core.
+ * After enabling the CSI2 Rx core, the DPHY is enabled in case the
+ * register interface for it is present.
+ */
+static void xcsi2rxss_enable(struct xcsi2rxss_core *core, bool flag)
+{
+ u32 DphyCtrlRegOffset = core->dphy_offset + XDPHY_CTRLREG_OFFSET;
+
+ if (flag) {
+ xcsi2rxss_write(core, XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+ if (core->dphy_present)
+ xcsi2rxss_write(core, DphyCtrlRegOffset,
+ XDPHY_CTRLREG_DPHYEN_MASK);
+ } else {
+ xcsi2rxss_write(core, XCSI_CCR_OFFSET, 0);
+ if (core->dphy_present)
+ xcsi2rxss_write(core, DphyCtrlRegOffset, 0);
+ }
+
+}
+
+/**
+ * xcsi2rxss_interrupts_enable - Enable or disable CSI interrupts
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ * @flag: true for enabling, false for disabling
+ *
+ * This function enables/disables the interrupts for the MIPI CSI2
+ * Rx Subsystem.
+ */
+static void xcsi2rxss_interrupts_enable(struct xcsi2rxss_core *core, bool flag)
+{
+ if (flag) {
+ xcsi2rxss_clr(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ xcsi2rxss_write(core, XCSI_IER_OFFSET, XCSI_INTR_MASK);
+ xcsi2rxss_set(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ } else {
+ xcsi2rxss_clr(core, XCSI_IER_OFFSET, XCSI_INTR_MASK);
+ xcsi2rxss_clr(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ }
+}
+
+/**
+ * xcsi2rxss_reset - Does a soft reset of the MIPI CSI2 Rx Subsystem
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ *
+ * Return: 0 - on success OR -ETIME if reset times out
+ */
+static int xcsi2rxss_reset(struct xcsi2rxss_core *core)
+{
+ u32 Timeout = XCSI_TIMEOUT_VAL;
+
+ xcsi2rxss_set(core, XCSI_CCR_OFFSET, XCSI_CCR_SOFTRESET_MASK);
+
+ while (xcsi2rxss_read(core, XCSI_CSR_OFFSET) & XCSI_CSR_RIPCD_MASK) {
+ if (Timeout == 0) {
+ dev_err(core->dev, "Xilinx CSI2 Rx Subsystem Soft Reset Timeout!\n");
+ return -ETIME;
+ }
+
+ Timeout--;
+ udelay(1);
+ }
+
+ xcsi2rxss_clr(core, XCSI_CCR_OFFSET, XCSI_CCR_SOFTRESET_MASK);
+ return 0;
+}
+
+static void xcsi2rxss_stop_stream(struct xcsi2rxss_state *xcsi2rxss)
+{
+ xcsi2rxss_interrupts_enable(&xcsi2rxss->core, false);
+ xcsi2rxss_enable(&xcsi2rxss->core, false);
+}
+
+/**
+ * xcsi2rxss_irq_handler - Interrupt handler for CSI-2
+ * @irq: IRQ number
+ * @dev_id: Pointer to device state
+ *
+ * In the interrupt handler, a list of event counters are updated for
+ * corresponding interrupts. This is useful to get status / debug.
+ * If the short packet FIFO not empty or overflow interrupt is received
+ * capture the short packet and notify of event occurrence
+ *
+ * Return: IRQ_HANDLED after handling interrupts
+ */
+static irqreturn_t xcsi2rxss_irq_handler(int irq, void *dev_id)
+{
+ struct xcsi2rxss_state *state = (struct xcsi2rxss_state *)dev_id;
+ struct xcsi2rxss_core *core = &state->core;
+ u32 status;
+
+ status = xcsi2rxss_read(core, XCSI_ISR_OFFSET) & XCSI_INTR_MASK;
+ dev_dbg(core->dev, "interrupt status = 0x%08x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XCSI_ISR_SPFIFONE_MASK) {
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SPKT;
+
+ *((u32 *)(&state->event.u.data)) =
+ xcsi2rxss_read(core, XCSI_SPKTR_OFFSET);
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_SPFIFOF_MASK) {
+ dev_alert(core->dev, "Short packet FIFO overflowed\n");
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SPKT_OVF;
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_SLBF_MASK) {
+ dev_alert(core->dev, "Stream Line Buffer Full!\n");
+ if (core->rst_gpio) {
+ gpiod_set_value(core->rst_gpio, 1);
+ /* minimum 40 dphy_clk_200M cycles */
+ ndelay(250);
+ gpiod_set_value(core->rst_gpio, 0);
+ }
+
+ xcsi2rxss_stop_stream(state);
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SLBF;
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_ALLINTR_MASK) {
+ unsigned int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++) {
+ if (!(status & core->events[i].mask))
+ continue;
+ core->events[i].counter++;
+ dev_dbg(core->dev, "%s: %d\n", core->events[i].name,
+ core->events[i].counter);
+ }
+
+ if (status & XCSI_ISR_VCX_MASK && core->en_vcx) {
+ u32 vcxstatus;
+
+ vcxstatus = xcsi2rxss_read(core, XCSI_VCXR_OFFSET);
+ vcxstatus &= XCSI_VCXR_MASK;
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++) {
+ if (!(vcxstatus & core->vcx_events[i].mask))
+ continue;
+ core->vcx_events[i].counter++;
+ }
+ xcsi2rxss_write(core, XCSI_VCXR_OFFSET, vcxstatus);
+ }
+ }
+
+ xcsi2rxss_write(core, XCSI_ISR_OFFSET, status);
+
+ return IRQ_HANDLED;
+}
+
+static void xcsi2rxss_reset_event_counters(struct xcsi2rxss_state *state)
+{
+ int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++)
+ state->core.events[i].counter = 0;
+
+ if (!state->core.en_vcx)
+ return;
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++)
+ state->core.vcx_events[i].counter = 0;
+}
+
+/**
+ * xcsi2rxss_log_counters - Print out the event counters.
+ * @state: Pointer to device state
+ *
+ */
+static void xcsi2rxss_log_counters(struct xcsi2rxss_state *state)
+{
+ int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++) {
+ if (state->core.events[i].counter > 0)
+ v4l2_info(&state->subdev, "%s events: %d\n",
+ state->core.events[i].name,
+ state->core.events[i].counter);
+ }
+
+ if (!state->core.en_vcx)
+ return;
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++) {
+ if (state->core.vcx_events[i].counter > 0)
+ v4l2_info(&state->subdev,
+ "VC %d Frame %s error vcx events: %d\n",
+ (i / 2) + XMIPICSISS_VCX_START,
+ i & 1 ? "Sync" : "Level",
+ state->core.vcx_events[i].counter);
+ }
+}
+
+/**
+ * xcsi2rxss_log_status - Logs the status of the CSI-2 Receiver
+ * @sd: Pointer to V4L2 subdevice structure
+ *
+ * This function prints the current status of Xilinx MIPI CSI-2
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_log_status(struct v4l2_subdev *sd)
+{
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ u32 reg, data, i, max_vc;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ xcsi2rxss_log_counters(xcsi2rxss);
+
+ v4l2_info(sd, "***** Core Status *****\n");
+ data = xcsi2rxss_read(core, XCSI_CSR_OFFSET);
+ v4l2_info(sd, "Short Packet FIFO Full = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SPFIFOFULL_MASK));
+ v4l2_info(sd, "Short Packet FIFO Not Empty = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SPFIFONE_MASK));
+ v4l2_info(sd, "Stream line buffer full = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SLBF_MASK));
+ v4l2_info(sd, "Soft reset/Core disable in progress = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_RIPCD_MASK));
+
+ /* Clk & Lane Info */
+ v4l2_info(sd, "******** Clock Lane Info *********\n");
+ data = xcsi2rxss_read(core, XCSI_CLKINFR_OFFSET);
+ v4l2_info(sd, "Clock Lane in Stop State = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CLKINFR_STOP_MASK));
+
+ v4l2_info(sd, "******** Data Lane Info *********\n");
+ v4l2_info(sd, "Lane\tSoT Error\tSoT Sync Error\tStop State\n");
+ reg = XCSI_L0INFR_OFFSET;
+ for (i = 0; i < 4; i++) {
+ data = xcsi2rxss_read(core, reg);
+
+ v4l2_info(sd, "%d\t%s\t\t%s\t\t%s\n",
+ i,
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_SOTERR_MASK),
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_SOTSYNCERR_MASK),
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_STOP_MASK));
+
+ reg += 4;
+ }
+
+ /* Virtual Channel Image Information */
+ v4l2_info(sd, "********** Virtual Channel Info ************\n");
+ v4l2_info(sd, "VC\tLine Count\tByte Count\tData Type\n");
+ if (core->en_vcx)
+ max_vc = XMIPICSISS_MAX_VCX;
+ else
+ max_vc = XMIPICSISS_MAX_VC;
+
+ reg = XCSI_VC0INF1R_OFFSET;
+ for (i = 0; i < max_vc; i++) {
+ u32 line_count, byte_count, data_type;
+ char *datatypestr;
+
+ /* Get line and byte count from VCXINFR1 Register */
+ data = xcsi2rxss_read(core, reg);
+ byte_count = (data & XCSI_VCXINF1R_BYTECOUNT_MASK) >>
+ XCSI_VCXINF1R_BYTECOUNT_SHIFT;
+ line_count = (data & XCSI_VCXINF1R_LINECOUNT_MASK) >>
+ XCSI_VCXINF1R_LINECOUNT_SHIFT;
+
+ /* Get data type from VCXINFR2 Register */
+ reg += 4;
+ data = xcsi2rxss_read(core, reg);
+ data_type = (data & XCSI_VCXINF2R_DATATYPE_MASK) >>
+ XCSI_VCXINF2R_DATATYPE_SHIFT;
+ datatypestr = (char *)xcsi2rxss_pxlfmtdttostr(data_type);
+
+ v4l2_info(sd, "%d\t%d\t\t%d\t\t%s\n",
+ i, line_count, byte_count, datatypestr);
+
+ /* Move to next pair of VC Info registers */
+ reg += 4;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/*
+ * xcsi2rxss_subscribe_event - Subscribe to the custom short packet
+ * receive event.
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 File Handle
+ * @sub: Subcribe event structure
+ *
+ * There are two types of events to be subscribed.
+ *
+ * First is to register for receiving a short packet.
+ * The short packets received are queued up in a FIFO.
+ * On reception of a short packet, an event will be generated
+ * with the short packet contents copied to its data area.
+ * Application subscribed to this event will poll for POLLPRI.
+ * On getting the event, the app dequeues the event to get the short packet
+ * data.
+ *
+ * Second is to register for Short packet FIFO overflow
+ * In case the rate of receiving short packets is high and
+ * the short packet FIFO overflows, this event will be triggered.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXCSIRX_SPKT:
+ case V4L2_EVENT_XLNXCSIRX_SPKT_OVF:
+ case V4L2_EVENT_XLNXCSIRX_SLBF:
+ ret = v4l2_event_subscribe(fh, sub, XCSI_MAX_SPKT, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_unsubscribe_event - Unsubscribe from all events registered
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 file handle
+ * @sub: pointer to Event unsubscription structure
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int xcsi2rxss_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+ ret = v4l2_event_unsubscribe(fh, sub);
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_s_ctrl - This is used to set the Xilinx MIPI CSI-2 V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the Xilinx MIPI
+ * CSI-2 Rx Subsystem. It is used to set the active lanes in the system.
+ * The event counters can be reset.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ u32 Timeout = XCSI_TIMEOUT_VAL;
+ u32 active_lanes = 1;
+
+ struct xcsi2rxss_state *xcsi2rxss =
+ container_of(ctrl->handler,
+ struct xcsi2rxss_state, ctrl_handler);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_MIPICSISS_ACT_LANES:
+ /*
+ * This will be called only when "Enable Active Lanes" parameter
+ * is set in design
+ */
+ xcsi2rxss_clr_and_set(core, XCSI_PCR_OFFSET,
+ XCSI_PCR_ACTLANES_MASK, ctrl->val - 1);
+
+ /*
+ * If the core is enabled, wait for active lanes to be
+ * set.
+ *
+ * If core is disabled or there is no clock from DPHY Tx
+ * then the read back won't reflect the updated value
+ * as the PPI clock will not be present.
+ */
+
+ if (core->dphy_present) {
+ u32 dphyclkstatregoffset = core->dphy_offset +
+ XDPHY_CLKSTATREG_OFFSET;
+
+ u32 dphyclkstat =
+ xcsi2rxss_read(core, dphyclkstatregoffset) &
+ XDPHY_CLKSTATREG_MODE_MASK;
+
+ u32 coreenable =
+ xcsi2rxss_read(core, XCSI_CCR_OFFSET) &
+ XCSI_CCR_COREENB_MASK;
+
+ char lpmstr[] = "Low Power";
+ char hsmstr[] = "High Speed";
+ char esmstr[] = "Escape";
+ char *modestr;
+
+ switch (dphyclkstat) {
+ case 0:
+ modestr = lpmstr;
+ break;
+ case 1:
+ modestr = hsmstr;
+ break;
+ case 2:
+ modestr = esmstr;
+ break;
+ default:
+ modestr = NULL;
+ break;
+ }
+
+ dev_dbg(core->dev, "DPHY Clock Lane in %s mode\n",
+ modestr);
+
+ if ((dphyclkstat == XDPHY_HI_SPEED_MODE) &&
+ coreenable) {
+
+ /* Wait for core to apply new active lanes */
+ while (Timeout--)
+ udelay(1);
+
+ active_lanes =
+ xcsi2rxss_read(core, XCSI_PCR_OFFSET);
+ active_lanes &= XCSI_PCR_ACTLANES_MASK;
+ active_lanes++;
+
+ if (active_lanes != ctrl->val) {
+ dev_err(core->dev, "Failed to set active lanes!\n");
+ ret = -EAGAIN;
+ }
+ }
+ } else {
+ dev_dbg(core->dev, "No read back as no DPHY present.\n");
+ }
+
+ dev_dbg(core->dev, "Set active lanes: requested = %d, active = %d\n",
+ ctrl->val, active_lanes);
+ break;
+ case V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS:
+ xcsi2rxss_reset_event_counters(xcsi2rxss);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_g_volatile_ctrl - get the Xilinx MIPI CSI-2 Rx controls
+ * @ctrl: Pointer to V4L2 control
+ *
+ * This is used to get the number of frames received by the Xilinx
+ * MIPI CSI-2 Rx.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss =
+ container_of(ctrl->handler,
+ struct xcsi2rxss_state, ctrl_handler);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER:
+ ctrl->val = xcsi2rxss->core.events[0].counter;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+static int xcsi2rxss_start_stream(struct xcsi2rxss_state *xcsi2rxss)
+{
+ int ret;
+
+ xcsi2rxss_enable(&xcsi2rxss->core, true);
+
+ ret = xcsi2rxss_reset(&xcsi2rxss->core);
+ if (ret < 0)
+ return ret;
+
+ xcsi2rxss_interrupts_enable(&xcsi2rxss->core, true);
+
+ return 0;
+}
+
+
+/**
+ * xcsi2rxss_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @enable: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * Xilinx MIPI CSI-2 Rx Subsystem provided the device isn't in
+ * suspended state.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if (xcsi2rxss->suspended) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (enable) {
+ if (!xcsi2rxss->streaming) {
+ /* reset the event counters */
+ xcsi2rxss_reset_event_counters(xcsi2rxss);
+
+ ret = xcsi2rxss_start_stream(xcsi2rxss);
+ if (ret == 0)
+ xcsi2rxss->streaming = true;
+ }
+ } else {
+ if (xcsi2rxss->streaming) {
+ struct gpio_desc *rst = xcsi2rxss->core.rst_gpio;
+
+ if (rst) {
+ gpiod_set_value_cansleep(rst, 1);
+ usleep_range(1, 2);
+ gpiod_set_value_cansleep(rst, 0);
+ }
+
+ xcsi2rxss_stop_stream(xcsi2rxss);
+ xcsi2rxss->streaming = false;
+ }
+ }
+unlock:
+ mutex_unlock(&xcsi2rxss->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcsi2rxss->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcsi2rxss->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * xcsi2rxss_get_format - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+ fmt->format = *__xcsi2rxss_get_pad_format(xcsi2rxss, cfg,
+ fmt->pad, fmt->which);
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/**
+ * xcsi2rxss_set_format - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ * Since the pad format is fixed in hardware, it can't be
+ * modified on run time. So when a format set is requested by
+ * application, all parameters except the format type is
+ * saved for the pad and the original pad format is sent
+ * back to the application.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *__format;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ u32 code;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ /*
+ * Only the format->code parameter matters for CSI as the
+ * CSI format cannot be changed at runtime.
+ * Ensure that format to set is copied to over to CSI pad format
+ */
+ __format = __xcsi2rxss_get_pad_format(xcsi2rxss, cfg,
+ fmt->pad, fmt->which);
+
+ /* Save the pad format code */
+ code = __format->code;
+
+ /* If the bayer pattern to be set is SXXXX8 then only 1x8 type
+ * is supported and core's data type doesn't matter.
+ * In case the bayer pattern being set is SXXX10 then only
+ * 1x10 type are supported and core should be configured for RAW10.
+ * In case the bayer pattern being set is SXXX12 then only
+ * 1x12 type are supported and core should be configured for RAW12.
+ *
+ * Otherwise don't allow change.
+ */
+ if (((fmt->format.code == MEDIA_BUS_FMT_SBGGR8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB8_1X8))
+ || ((core->datatype == MIPI_CSI_DT_RAW_10) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB10_1X10)))
+ || ((core->datatype == MIPI_CSI_DT_RAW_12) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB12_1X12))) ||
+ ((core->datatype == MIPI_CSI_DT_RAW_16) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB16_1X16))))
+
+ /* Copy over the format to be set */
+ *__format = fmt->format;
+ else {
+ /* Restore the original pad format code */
+ fmt->format.code = code;
+ __format->code = code;
+ __format->width = fmt->format.width;
+ __format->height = fmt->format.height;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/**
+ * xcsi2rxss_open - Called on v4l2_open()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_open(). It sets the default format
+ * for both pads.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ *format = xcsi2rxss->default_format;
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 1);
+ *format = xcsi2rxss->default_format;
+
+ return 0;
+}
+
+static int xcsi2rxss_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcsi2rxss_media_ops = {
+ .link_validate = v4l2_subdev_link_validate
+};
+
+static const struct v4l2_ctrl_ops xcsi2rxss_ctrl_ops = {
+ .g_volatile_ctrl = xcsi2rxss_g_volatile_ctrl,
+ .s_ctrl = xcsi2rxss_s_ctrl
+};
+
+static struct v4l2_ctrl_config xcsi2rxss_ctrls[] = {
+ {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_ACT_LANES,
+ .name = "MIPI CSI2 Rx Subsystem: Active Lanes",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 4,
+ .step = 1,
+ .def = 1,
+ }, {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER,
+ .name = "MIPI CSI2 Rx Subsystem: Frames Received Counter",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS,
+ .name = "MIPI CSI2 Rx Subsystem: Reset Counters",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_WRITE_ONLY,
+ }
+};
+
+static const struct v4l2_subdev_core_ops xcsi2rxss_core_ops = {
+ .log_status = xcsi2rxss_log_status,
+ .subscribe_event = xcsi2rxss_subscribe_event,
+ .unsubscribe_event = xcsi2rxss_unsubscribe_event
+};
+
+static struct v4l2_subdev_video_ops xcsi2rxss_video_ops = {
+ .s_stream = xcsi2rxss_s_stream
+};
+
+static struct v4l2_subdev_pad_ops xcsi2rxss_pad_ops = {
+ .get_fmt = xcsi2rxss_get_format,
+ .set_fmt = xcsi2rxss_set_format,
+};
+
+static struct v4l2_subdev_ops xcsi2rxss_ops = {
+ .core = &xcsi2rxss_core_ops,
+ .video = &xcsi2rxss_video_ops,
+ .pad = &xcsi2rxss_pad_ops
+};
+
+static const struct v4l2_subdev_internal_ops xcsi2rxss_internal_ops = {
+ .open = xcsi2rxss_open,
+ .close = xcsi2rxss_close
+};
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+/*
+ * xcsi2rxss_pm_suspend - Function called on Power Suspend
+ * @dev: Pointer to device structure
+ *
+ * On power suspend the CSI-2 Core is disabled if the device isn't
+ * in suspended state and is streaming.
+ *
+ * Return: 0 on success
+ */
+static int __maybe_unused xcsi2rxss_pm_suspend(struct device *dev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = dev_get_drvdata(dev);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if (!xcsi2rxss->suspended && xcsi2rxss->streaming)
+ xcsi2rxss_clr(&xcsi2rxss->core,
+ XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+
+ xcsi2rxss->suspended = true;
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/*
+ * xcsi2rxss_pm_resume - Function called on Power Resume
+ * @dev: Pointer to device structure
+ *
+ * On power resume the CSI-2 Core is enabled when it is in suspended state
+ * and prior to entering suspended state it was streaming.
+ *
+ * Return: 0 on success
+ */
+static int __maybe_unused xcsi2rxss_pm_resume(struct device *dev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = dev_get_drvdata(dev);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if ((xcsi2rxss->suspended) && (xcsi2rxss->streaming))
+ xcsi2rxss_set(&xcsi2rxss->core,
+ XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+
+ xcsi2rxss->suspended = false;
+
+ mutex_unlock(&xcsi2rxss->lock);
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xcsi2rxss_parse_of(struct xcsi2rxss_state *xcsi2rxss)
+{
+ struct device_node *node = xcsi2rxss->core.dev->of_node;
+ struct device_node *ports = NULL;
+ struct device_node *port = NULL;
+ unsigned int nports = 0;
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ int ret;
+ bool iic_present;
+
+ if (core->cfg->flags & XCSI_CLK_PROP) {
+ core->lite_aclk = devm_clk_get(core->dev, "lite_aclk");
+ if (IS_ERR(core->lite_aclk)) {
+ ret = PTR_ERR(core->lite_aclk);
+ dev_err(core->dev, "failed to get lite_aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ core->video_aclk = devm_clk_get(core->dev, "video_aclk");
+ if (IS_ERR(core->video_aclk)) {
+ ret = PTR_ERR(core->video_aclk);
+ dev_err(core->dev, "failed to get video_aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ core->dphy_clk_200M = devm_clk_get(core->dev, "dphy_clk_200M");
+ if (IS_ERR(core->dphy_clk_200M)) {
+ ret = PTR_ERR(core->dphy_clk_200M);
+ dev_err(core->dev, "failed to get dphy_clk_200M (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ dev_info(core->dev, "assuming all required clocks are enabled!\n");
+ }
+
+ core->dphy_present = of_property_read_bool(node, "xlnx,dphy-present");
+ dev_dbg(core->dev, "DPHY present property = %s\n",
+ core->dphy_present ? "Present" : "Absent");
+
+ iic_present = of_property_read_bool(node, "xlnx,iic-present");
+ dev_dbg(core->dev, "IIC present property = %s\n",
+ iic_present ? "Present" : "Absent");
+
+ if (core->dphy_present) {
+ if (iic_present)
+ core->dphy_offset = 0x20000;
+ else
+ core->dphy_offset = 0x10000;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-lanes",
+ &core->max_num_lanes);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,max-lanes property\n");
+ return ret;
+ }
+
+ if ((core->max_num_lanes > 4) || (core->max_num_lanes < 1)) {
+ dev_err(core->dev, "%d max lanes : invalid xlnx,max-lanes property\n",
+ core->max_num_lanes);
+ return -EINVAL;
+ }
+
+ core->en_vcx = of_property_read_bool(node, "xlnx,en-vcx");
+
+ ret = of_property_read_u32(node, "xlnx,vc", &core->vc);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,vc property\n");
+ return ret;
+ }
+ if ((core->vc > XMIPICSISS_MAX_VC && !core->en_vcx) ||
+ (core->vc > XMIPICSISS_MAX_VCX && core->en_vcx)) {
+ dev_err(core->dev, "invalid virtual channel property value.\n");
+ return -EINVAL;
+ }
+
+ core->enable_active_lanes =
+ of_property_read_bool(node, "xlnx,en-active-lanes");
+ dev_dbg(core->dev, "Enable active lanes property = %s\n",
+ core->enable_active_lanes ? "Present" : "Absent");
+
+ ret = of_property_read_string(node, "xlnx,csi-pxl-format",
+ &core->pxlformat);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,csi-pxl-format property\n");
+ return ret;
+ }
+
+ core->datatype = xcsi2rxss_pxlfmtstrtodt(core->pxlformat);
+ if ((core->datatype < MIPI_CSI_DT_YUV_420_8B) ||
+ (core->datatype > MIPI_CSI_DT_RAW_20)) {
+ dev_err(core->dev, "Invalid xlnx,csi-pxl-format string\n");
+ return -EINVAL;
+ }
+
+ core->vfb = of_property_read_bool(node, "xlnx,vfb");
+ dev_dbg(core->dev, "Video Format Bridge property = %s\n",
+ core->vfb ? "Present" : "Absent");
+
+ if (core->vfb) {
+ ret = of_property_read_u32(node, "xlnx,ppc", &core->ppc);
+ if ((ret < 0) || !((core->ppc == 1) ||
+ (core->ppc == 2) || (core->ppc == 4))) {
+ dev_err(core->dev, "Invalid xlnx,ppc property ret = %d ppc = %d\n",
+ ret, core->ppc);
+ return -EINVAL;
+ }
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ int ret;
+ const struct xvip_video_format *format;
+ struct device_node *endpoint;
+ struct v4l2_fwnode_endpoint v4lendpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ /*
+ * Currently only a subset of VFB enabled formats present in
+ * xvip are supported in the driver.
+ *
+ * If the VFB is disabled, the pixels per clock don't matter.
+ * The data width is either 32 or 64 bit as selected in design.
+ *
+ * For e.g. If Data Type is RGB888, VFB is disabled and
+ * data width is 32 bits.
+ *
+ * Clk Cycle | Byte 0 | Byte 1 | Byte 2 | Byte 3
+ * -----------+----------+----------+----------+----------
+ * 1 | B0 | G0 | R0 | B1
+ * 2 | G1 | R1 | B2 | G2
+ * 3 | R2 | B3 | G3 | R3
+ */
+ format = xvip_of_get_format(port);
+ if (IS_ERR(format)) {
+ dev_err(core->dev, "invalid format in DT");
+ return PTR_ERR(format);
+ }
+
+ if (core->vfb &&
+ (format->vf_code != XVIP_VF_YUV_422) &&
+ (format->vf_code != XVIP_VF_RBG) &&
+ (format->vf_code != XVIP_VF_MONO_SENSOR)) {
+ dev_err(core->dev, "Invalid UG934 video format set.\n");
+ return -EINVAL;
+ }
+
+ /* Get and check the format description */
+ if (!xcsi2rxss->vip_format) {
+ xcsi2rxss->vip_format = format;
+ } else if (xcsi2rxss->vip_format != format) {
+ dev_err(core->dev, "in/out format mismatch in DT");
+ return -EINVAL;
+ }
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(core->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &v4lendpoint);
+ if (ret) {
+ of_node_put(endpoint);
+ return ret;
+ }
+
+ of_node_put(endpoint);
+ dev_dbg(core->dev, "%s : port %d bus type = %d\n",
+ __func__, nports, v4lendpoint.bus_type);
+
+ if (v4lendpoint.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ dev_dbg(core->dev, "%s : base.port = %d base.id = %d\n",
+ __func__,
+ v4lendpoint.base.port,
+ v4lendpoint.base.id);
+
+ dev_dbg(core->dev, "%s : mipi number lanes = %d\n",
+ __func__,
+ v4lendpoint.bus.mipi_csi2.num_data_lanes);
+ } else {
+ dev_dbg(core->dev, "%s : Not a CSI2 bus\n", __func__);
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ if (nports != 2) {
+ dev_err(core->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+ xcsi2rxss->npads = nports;
+
+ /*Register interrupt handler */
+ core->irq = irq_of_parse_and_map(node, 0);
+
+ ret = devm_request_irq(core->dev, core->irq, xcsi2rxss_irq_handler,
+ IRQF_SHARED, "xilinx-csi2rxss", xcsi2rxss);
+ if (ret) {
+ dev_err(core->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ /* Reset GPIO */
+ core->rst_gpio = devm_gpiod_get_optional(core->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(core->rst_gpio)) {
+ if (PTR_ERR(core->rst_gpio) != -EPROBE_DEFER)
+ dev_err(core->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(core->rst_gpio);
+ }
+
+ return 0;
+}
+
+static int xcsi2rxss_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xcsi2rxss_state *xcsi2rxss;
+ struct resource *res;
+ const struct of_device_id *match;
+ struct device_node *node = pdev->dev.of_node;
+ u32 i;
+ int ret;
+ int num_ctrls;
+
+ xcsi2rxss = devm_kzalloc(&pdev->dev, sizeof(*xcsi2rxss), GFP_KERNEL);
+ if (!xcsi2rxss)
+ return -ENOMEM;
+
+ mutex_init(&xcsi2rxss->lock);
+
+ xcsi2rxss->core.dev = &pdev->dev;
+
+ match = of_match_node(xcsi2rxss_of_id_table, node);
+ if (!match)
+ return -ENODEV;
+
+ xcsi2rxss->core.cfg = match->data;
+
+ ret = xcsi2rxss_parse_of(xcsi2rxss);
+ if (ret < 0)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xcsi2rxss->core.iomem = devm_ioremap_resource(xcsi2rxss->core.dev, res);
+ if (IS_ERR(xcsi2rxss->core.iomem))
+ return PTR_ERR(xcsi2rxss->core.iomem);
+
+ if (xcsi2rxss->core.cfg->flags & XCSI_CLK_PROP) {
+ unsigned long rate;
+
+ ret = clk_prepare_enable(xcsi2rxss->core.lite_aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable lite_aclk (%d)\n",
+ ret);
+ goto clk_err;
+ }
+
+ ret = clk_prepare_enable(xcsi2rxss->core.video_aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable video_aclk (%d)\n",
+ ret);
+ goto video_aclk_err;
+ }
+
+ ret = clk_prepare_enable(xcsi2rxss->core.dphy_clk_200M);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable dphy clk (%d)\n",
+ ret);
+ goto dphy_clk_err;
+ }
+
+ ret = clk_set_rate(xcsi2rxss->core.dphy_clk_200M,
+ XCSI_DPHY_CLK_REQ);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set dphy clk rate (%d)\n",
+ ret);
+
+ goto all_clk_err;
+ }
+
+ rate = clk_get_rate(xcsi2rxss->core.dphy_clk_200M);
+ if (rate < XCSI_DPHY_CLK_MIN && rate > XCSI_DPHY_CLK_MAX) {
+ dev_err(&pdev->dev, "Err DPHY Clock = %lu\n",
+ rate);
+ ret = -EINVAL;
+ goto all_clk_err;
+ }
+ }
+
+ /*
+ * Reset and initialize the core.
+ */
+
+ if (xcsi2rxss->core.rst_gpio) {
+ gpiod_set_value_cansleep(xcsi2rxss->core.rst_gpio, 1);
+ /* minimum of 40 dphy_clk_200M cycles */
+ usleep_range(1, 2);
+ gpiod_set_value_cansleep(xcsi2rxss->core.rst_gpio, 0);
+ }
+
+ xcsi2rxss_reset(&xcsi2rxss->core);
+
+ xcsi2rxss->core.events = (struct xcsi2rxss_event *)&xcsi2rxss_events;
+
+ if (xcsi2rxss->core.en_vcx) {
+ u32 alloc_size;
+
+ alloc_size = sizeof(struct xcsi2rxss_event) *
+ XMIPICSISS_VCX_NUM_EVENTS;
+ xcsi2rxss->core.vcx_events = devm_kzalloc(&pdev->dev,
+ alloc_size,
+ GFP_KERNEL);
+ if (!xcsi2rxss->core.vcx_events) {
+ mutex_destroy(&xcsi2rxss->lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++)
+ xcsi2rxss->core.vcx_events[i].mask = 1 << i;
+ }
+
+ /* Initialize V4L2 subdevice and media entity */
+ xcsi2rxss->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ xcsi2rxss->pads[1].flags = MEDIA_PAD_FL_SINK;
+
+ /* Initialize the default format */
+ memset(&xcsi2rxss->default_format, 0,
+ sizeof(xcsi2rxss->default_format));
+ xcsi2rxss->default_format.code = xcsi2rxss->vip_format->code;
+ xcsi2rxss->default_format.field = V4L2_FIELD_NONE;
+ xcsi2rxss->default_format.colorspace = V4L2_COLORSPACE_SRGB;
+ xcsi2rxss->default_format.width = XCSI_DEFAULT_WIDTH;
+ xcsi2rxss->default_format.height = XCSI_DEFAULT_HEIGHT;
+
+ xcsi2rxss->formats[0] = xcsi2rxss->default_format;
+ xcsi2rxss->formats[1] = xcsi2rxss->default_format;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcsi2rxss->subdev;
+ v4l2_subdev_init(subdev, &xcsi2rxss_ops);
+
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcsi2rxss_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ subdev->entity.ops = &xcsi2rxss_media_ops;
+
+ v4l2_set_subdevdata(subdev, xcsi2rxss);
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xcsi2rxss->pads);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * In case the Enable Active Lanes config parameter is not set,
+ * dynamic lane reconfiguration is not allowed.
+ * So V4L2_CID_XILINX_MIPICSISS_ACT_LANES ctrl will not be registered.
+ * Accordingly allocate the number of controls
+ */
+ num_ctrls = ARRAY_SIZE(xcsi2rxss_ctrls);
+
+ if (!xcsi2rxss->core.enable_active_lanes)
+ num_ctrls--;
+
+ dev_dbg(xcsi2rxss->core.dev, "# of ctrls = %d\n", num_ctrls);
+
+ v4l2_ctrl_handler_init(&xcsi2rxss->ctrl_handler, num_ctrls);
+
+ for (i = 0; i < ARRAY_SIZE(xcsi2rxss_ctrls); i++) {
+ struct v4l2_ctrl *ctrl;
+
+ if (xcsi2rxss_ctrls[i].id ==
+ V4L2_CID_XILINX_MIPICSISS_ACT_LANES) {
+
+ if (xcsi2rxss->core.enable_active_lanes) {
+ xcsi2rxss_ctrls[i].max =
+ xcsi2rxss->core.max_num_lanes;
+ } else {
+ /* Don't register control */
+ dev_dbg(xcsi2rxss->core.dev,
+ "Skip active lane control\n");
+ continue;
+ }
+ }
+
+ dev_dbg(xcsi2rxss->core.dev, "%d ctrl = 0x%x\n",
+ i, xcsi2rxss_ctrls[i].id);
+ ctrl = v4l2_ctrl_new_custom(&xcsi2rxss->ctrl_handler,
+ &xcsi2rxss_ctrls[i], NULL);
+ if (!ctrl) {
+ dev_err(xcsi2rxss->core.dev, "Failed for %s ctrl\n",
+ xcsi2rxss_ctrls[i].name);
+ goto error;
+ }
+ }
+
+ dev_dbg(xcsi2rxss->core.dev, "# v4l2 ctrls registered = %d\n", i - 1);
+
+ if (xcsi2rxss->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xcsi2rxss->ctrl_handler.error;
+ goto error;
+ }
+
+ subdev->ctrl_handler = &xcsi2rxss->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_setup(&xcsi2rxss->ctrl_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, xcsi2rxss);
+
+ dev_info(xcsi2rxss->core.dev, "Xilinx CSI2 Rx Subsystem device found!\n");
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ /* default states for streaming and suspend */
+ xcsi2rxss->streaming = false;
+ xcsi2rxss->suspended = false;
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xcsi2rxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&xcsi2rxss->lock);
+
+all_clk_err:
+ clk_disable_unprepare(xcsi2rxss->core.dphy_clk_200M);
+dphy_clk_err:
+ clk_disable_unprepare(xcsi2rxss->core.video_aclk);
+video_aclk_err:
+ clk_disable_unprepare(xcsi2rxss->core.lite_aclk);
+clk_err:
+ return ret;
+}
+
+static int xcsi2rxss_remove(struct platform_device *pdev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcsi2rxss->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcsi2rxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&xcsi2rxss->lock);
+ clk_disable_unprepare(xcsi2rxss->core.dphy_clk_200M);
+ clk_disable_unprepare(xcsi2rxss->core.video_aclk);
+ clk_disable_unprepare(xcsi2rxss->core.lite_aclk);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcsi2rxss_pm_ops,
+ xcsi2rxss_pm_suspend, xcsi2rxss_pm_resume);
+
+static struct platform_driver xcsi2rxss_driver = {
+ .driver = {
+ .name = "xilinx-csi2rxss",
+ .pm = &xcsi2rxss_pm_ops,
+ .of_match_table = xcsi2rxss_of_id_table,
+ },
+ .probe = xcsi2rxss_probe,
+ .remove = xcsi2rxss_remove,
+};
+
+module_platform_driver(xcsi2rxss_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vsagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx MIPI CSI2 Rx Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-demosaic.c b/drivers/media/platform/xilinx/xilinx-demosaic.c
new file mode 100644
index 000000000000..a519c2c9719b
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-demosaic.c
@@ -0,0 +1,418 @@
+/*
+ * Xilinx Video Demosaic IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XDEMOSAIC_AP_CTRL (0x00)
+#define XDEMOSAIC_WIDTH (0x10)
+#define XDEMOSAIC_HEIGHT (0x18)
+#define XDEMOSAIC_INPUT_BAYER_FORMAT (0x28)
+
+#define XDEMOSAIC_MIN_HEIGHT (64)
+#define XDEMOSAIC_MAX_HEIGHT (4320)
+#define XDEMOSAIC_DEF_HEIGHT (720)
+#define XDEMOSAIC_MIN_WIDTH (64)
+#define XDEMOSAIC_MAX_WIDTH (8192)
+#define XDEMOSAIC_DEF_WIDTH (1280)
+
+#define XDEMOSAIC_RESET_DEASSERT (0)
+#define XDEMOSAIC_RESET_ASSERT (1)
+#define XDEMOSAIC_START BIT(0)
+#define XDEMOSAIC_AUTO_RESTART BIT(7)
+#define XDEMOSAIC_STREAM_ON (XDEMOSAIC_AUTO_RESTART | XDEMOSAIC_START)
+
+enum xdmsc_bayer_format {
+ XDEMOSAIC_RGGB = 0,
+ XDEMOSAIC_GRBG,
+ XDEMOSAIC_GBRG,
+ XDEMOSAIC_BGGR,
+};
+
+struct xdmsc_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+
+ enum xdmsc_bayer_format bayer_fmt;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+static inline u32 xdmsc_read(struct xdmsc_dev *xdmsc, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xdmsc->xvip, reg);
+ dev_dbg(xdmsc->xvip.dev,
+ "Reading 0x%x from reg offset 0x%x", data, reg);
+ return data;
+}
+
+static inline void xdmsc_write(struct xdmsc_dev *xdmsc, u32 reg, u32 data)
+{
+ xvip_write(&xdmsc->xvip, reg, data);
+ dev_dbg(xdmsc->xvip.dev,
+ "Writing 0x%x to reg offset 0x%x", data, reg);
+#ifdef DEBUG
+ if (xdmsc_read(xdmsc, reg) != data)
+ dev_err(xdmsc->xvip.dev,
+ "Wrote 0x%x does not match read back", data);
+#endif
+}
+
+static inline struct xdmsc_dev *to_xdmsc(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xdmsc_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt
+*__xdmsc_get_pad_format(struct xdmsc_dev *xdmsc,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xdmsc->xvip.subdev,
+ cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xdmsc->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xdmsc_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+
+ if (!enable) {
+ dev_dbg(xdmsc->xvip.dev, "%s : Off", __func__);
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_DEASSERT);
+ return 0;
+ }
+
+ xdmsc_write(xdmsc, XDEMOSAIC_WIDTH,
+ xdmsc->formats[XVIP_PAD_SINK].width);
+ xdmsc_write(xdmsc, XDEMOSAIC_HEIGHT,
+ xdmsc->formats[XVIP_PAD_SINK].height);
+ xdmsc_write(xdmsc, XDEMOSAIC_INPUT_BAYER_FORMAT, xdmsc->bayer_fmt);
+
+ /* Start Demosaic Video IP */
+ xdmsc_write(xdmsc, XDEMOSAIC_AP_CTRL, XDEMOSAIC_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xdmsc_video_ops = {
+ .s_stream = xdmsc_s_stream,
+};
+
+static int xdmsc_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+
+ fmt->format = *__xdmsc_get_pad_format(xdmsc, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static bool
+xdmsc_is_format_bayer(struct xdmsc_dev *xdmsc, u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SRGGB16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_RGGB;
+ break;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_GRBG;
+ break;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGBRG16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_GBRG;
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SBGGR16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_BGGR;
+ break;
+ default:
+ dev_dbg(xdmsc->xvip.dev, "Unsupported format for Sink Pad");
+ return false;
+ }
+ return true;
+}
+
+static int xdmsc_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+ struct v4l2_mbus_framefmt *__format;
+
+ __format = __xdmsc_get_pad_format(xdmsc, cfg, fmt->pad, fmt->which);
+ *__format = fmt->format;
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XDEMOSAIC_MIN_WIDTH, xdmsc->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XDEMOSAIC_MIN_HEIGHT, xdmsc->max_height);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ if (__format->code != MEDIA_BUS_FMT_RBG888_1X24 &&
+ __format->code != MEDIA_BUS_FMT_RBG101010_1X30 &&
+ __format->code != MEDIA_BUS_FMT_RBG121212_1X36 &&
+ __format->code != MEDIA_BUS_FMT_RBG161616_1X48) {
+ dev_dbg(xdmsc->xvip.dev,
+ "%s : Unsupported source media bus code format",
+ __func__);
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ }
+ }
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ if (!xdmsc_is_format_bayer(xdmsc, __format->code)) {
+ dev_dbg(xdmsc->xvip.dev,
+ "Unsupported Sink Pad Media format, defaulting to RGGB");
+ __format->code = MEDIA_BUS_FMT_SRGGB8_1X8;
+ }
+ }
+
+ fmt->format = *__format;
+ return 0;
+}
+
+static int xdmsc_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xdmsc->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xdmsc->default_formats[XVIP_PAD_SOURCE];
+ return 0;
+}
+
+static int xdmsc_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xdmsc_internal_ops = {
+ .open = xdmsc_open,
+ .close = xdmsc_close,
+};
+
+static const struct v4l2_subdev_pad_ops xdmsc_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xdmsc_get_format,
+ .set_fmt = xdmsc_set_format,
+};
+
+static const struct v4l2_subdev_ops xdmsc_ops = {
+ .video = &xdmsc_video_ops,
+ .pad = &xdmsc_pad_ops,
+};
+
+static const struct media_entity_operations xdmsc_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xdmsc_parse_of(struct xdmsc_dev *xdmsc)
+{
+ struct device *dev = xdmsc->xvip.dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id = 0;
+ int rval;
+
+ rval = of_property_read_u32(node, "xlnx,max-height",
+ &xdmsc->max_height);
+ if (rval < 0) {
+ dev_err(dev, "missing xlnx,max-height property!");
+ return -EINVAL;
+ } else if (xdmsc->max_height > XDEMOSAIC_MAX_HEIGHT ||
+ xdmsc->max_height < XDEMOSAIC_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width",
+ &xdmsc->max_width);
+ if (rval < 0) {
+ dev_err(dev, "missing xlnx,max-width property!");
+ return -EINVAL;
+ } else if (xdmsc->max_width > XDEMOSAIC_MAX_WIDTH ||
+ xdmsc->max_width < XDEMOSAIC_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT");
+ return rval;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ }
+ }
+
+ xdmsc->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xdmsc->rst_gpio)) {
+ if (PTR_ERR(xdmsc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xdmsc->rst_gpio);
+ }
+ return 0;
+}
+
+static int xdmsc_probe(struct platform_device *pdev)
+{
+ struct xdmsc_dev *xdmsc;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval;
+
+ xdmsc = devm_kzalloc(&pdev->dev, sizeof(*xdmsc), GFP_KERNEL);
+ if (!xdmsc)
+ return -ENOMEM;
+ xdmsc->xvip.dev = &pdev->dev;
+ rval = xdmsc_parse_of(xdmsc);
+ if (rval < 0)
+ return rval;
+ rval = xvip_init_resources(&xdmsc->xvip);
+
+ /* Reset Demosaic IP */
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_DEASSERT);
+
+ /* Init V4L2 subdev */
+ subdev = &xdmsc->xvip.subdev;
+ v4l2_subdev_init(subdev, &xdmsc_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xdmsc_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ def_fmt = &xdmsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ def_fmt->width = XDEMOSAIC_DEF_WIDTH;
+ def_fmt->height = XDEMOSAIC_DEF_HEIGHT;
+
+ /*
+ * Sink Pad can be any Bayer format.
+ * Default Sink Pad format is RGGB.
+ */
+ def_fmt->code = MEDIA_BUS_FMT_SRGGB8_1X8;
+ xdmsc->formats[XVIP_PAD_SINK] = *def_fmt;
+
+ def_fmt = &xdmsc->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xdmsc->default_formats[XVIP_PAD_SINK];
+
+ /* Source Pad has a fixed media bus format of RGB */
+ def_fmt->code = MEDIA_BUS_FMT_RBG888_1X24;
+ xdmsc->formats[XVIP_PAD_SOURCE] = *def_fmt;
+
+ xdmsc->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xdmsc->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xdmsc_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xdmsc->pads);
+ if (rval < 0)
+ goto media_error;
+
+ platform_set_drvdata(pdev, xdmsc);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto v4l2_subdev_error;
+ }
+ dev_info(&pdev->dev,
+ "Xilinx Video Demosaic Probe Successful");
+ return 0;
+
+v4l2_subdev_error:
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xdmsc->xvip);
+ return rval;
+}
+
+static int xdmsc_remove(struct platform_device *pdev)
+{
+ struct xdmsc_dev *xdmsc = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xdmsc->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xdmsc->xvip);
+ return 0;
+}
+
+static const struct of_device_id xdmsc_of_id_table[] = {
+ {.compatible = "xlnx,v-demosaic"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xdmsc_of_id_table);
+
+static struct platform_driver xdmsc_driver = {
+ .driver = {
+ .name = "xilinx-demosaic",
+ .of_match_table = xdmsc_of_id_table,
+ },
+ .probe = xdmsc_probe,
+ .remove = xdmsc_remove,
+
+};
+
+module_platform_driver(xdmsc_driver);
+MODULE_DESCRIPTION("Xilinx Demosaic IP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index b211380a11f2..168129dc4204 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -10,11 +10,13 @@
*/
#include <linux/dma/xilinx_dma.h>
+#include <linux/dma/xilinx_frmbuf.h>
#include <linux/lcm.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/xilinx-v4l2-controls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
@@ -36,6 +38,11 @@
#define XVIP_DMA_MIN_HEIGHT 1U
#define XVIP_DMA_MAX_HEIGHT 8191U
+struct xventity_list {
+ struct list_head list;
+ struct media_entity *entity;
+};
+
/* -----------------------------------------------------------------------------
* Helper functions
*/
@@ -62,7 +69,7 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
int ret;
subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
- if (subdev == NULL)
+ if (!subdev)
return -EPIPE;
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -70,10 +77,15 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
if (ret < 0)
return ret == -ENOIOCTLCMD ? -EINVAL : ret;
- if (dma->fmtinfo->code != fmt.format.code ||
- dma->format.height != fmt.format.height ||
- dma->format.width != fmt.format.width ||
- dma->format.colorspace != fmt.format.colorspace)
+ if (dma->fmtinfo->code != fmt.format.code)
+ return -EINVAL;
+
+ /*
+ * Crop rectangle contains format resolution by default, and crop
+ * rectangle if s_selection is executed.
+ */
+ if (dma->r.width != fmt.format.width ||
+ dma->r.height != fmt.format.height)
return -EINVAL;
return 0;
@@ -83,44 +95,135 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
* Pipeline Stream Management
*/
+static int xvip_entity_start_stop(struct xvip_composite_device *xdev,
+ struct media_entity *entity, bool start)
+{
+ struct v4l2_subdev *subdev;
+ bool is_streaming;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "%s entity %s\n",
+ start ? "Starting" : "Stopping", entity->name);
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ /* This is to maintain list of stream on/off devices */
+ is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
+
+ /*
+ * start or stop the subdev only once in case if they are
+ * shared between sub-graphs
+ */
+ if (start && !is_streaming) {
+ /* power-on subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_power on failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ return ret;
+ }
+
+ /* stream-on subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream on failed on subdev\n");
+ v4l2_subdev_call(subdev, core, s_power, 0);
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ }
+ } else if (!start && is_streaming) {
+ /* stream-off subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream off failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 1);
+ }
+
+ /* power-off subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ dev_err(xdev->dev,
+ "s_power off failed on subdev\n");
+ }
+
+ return ret;
+}
+
/**
* xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
- * @pipe: The pipeline
+ * @xdev: Composite video device
+ * @dma: xvip dma
* @start: Start (when true) or stop (when false) the pipeline
*
- * Walk the entities chain starting at the pipeline output video node and start
- * or stop all of them.
+ * Walk the entities chain starting @dma and start or stop all of them
*
* Return: 0 if successful, or the return value of the failed video::s_stream
* operation otherwise.
*/
-static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
+static int xvip_pipeline_start_stop(struct xvip_composite_device *xdev,
+ struct xvip_dma *dma, bool start)
{
- struct xvip_dma *dma = pipe->output;
- struct media_entity *entity;
- struct media_pad *pad;
- struct v4l2_subdev *subdev;
- int ret;
+ struct media_graph graph;
+ struct media_entity *entity = &dma->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct xventity_list *temp, *_temp;
+ LIST_HEAD(ent_list);
+ int ret = 0;
- entity = &dma->video.entity;
- while (1) {
- pad = &entity->pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
+ mutex_lock(&mdev->graph_mutex);
- pad = media_entity_remote_pad(pad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- break;
+ /* Walk the graph to locate the subdev nodes */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret)
+ goto error;
- entity = pad->entity;
- subdev = media_entity_to_v4l2_subdev(entity);
+ media_graph_walk_start(&graph, entity);
- ret = v4l2_subdev_call(subdev, video, s_stream, start);
- if (start && ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
+ /* get the list of entities */
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xventity_list *ele;
+
+ /* We want to stream on/off only subdevs */
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ /* Maintain the pipeline sequence in a list */
+ ele = kzalloc(sizeof(*ele), GFP_KERNEL);
+ if (!ele) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ele->entity = entity;
+ list_add(&ele->list, &ent_list);
}
- return 0;
+ if (start) {
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ /* Enable all subdevs from sink to source */
+ ret = xvip_entity_start_stop(xdev, temp->entity, start);
+ if (ret < 0) {
+ dev_err(xdev->dev, "ret = %d for entity %s\n",
+ ret, temp->entity->name);
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_safe_reverse(temp, _temp, &ent_list, list)
+ /* Enable all subdevs from source to sink */
+ xvip_entity_start_stop(xdev, temp->entity, start);
+ }
+
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ list_del(&temp->list);
+ kfree(temp);
+ }
+
+error:
+ mutex_unlock(&mdev->graph_mutex);
+ media_graph_walk_cleanup(&graph);
+ return ret;
}
/**
@@ -133,7 +236,8 @@ static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
* independently, pipelines have a shared stream state that enable or disable
* all entities in the pipeline. For this reason the pipeline uses a streaming
* counter that tracks the number of DMA engines that have requested the stream
- * to be enabled.
+ * to be enabled. This will walk the graph starting from each DMA and enable or
+ * disable the entities in the path.
*
* When called with the @on argument set to true, this function will increment
* the pipeline streaming count. If the streaming count reaches the number of
@@ -150,20 +254,31 @@ static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
*/
static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
{
+ struct xvip_composite_device *xdev;
+ struct xvip_dma *dma;
int ret = 0;
mutex_lock(&pipe->lock);
+ xdev = pipe->xdev;
if (on) {
if (pipe->stream_count == pipe->num_dmas - 1) {
- ret = xvip_pipeline_start_stop(pipe, true);
- if (ret < 0)
- goto done;
+ /*
+ * This will iterate the DMAs and the stream-on of
+ * subdevs may not be sequential due to multiple
+ * sub-graph path
+ */
+ list_for_each_entry(dma, &xdev->dmas, list) {
+ ret = xvip_pipeline_start_stop(xdev, dma, true);
+ if (ret < 0)
+ goto done;
+ }
}
pipe->stream_count++;
} else {
if (--pipe->stream_count == 0)
- xvip_pipeline_start_stop(pipe, false);
+ list_for_each_entry(dma, &xdev->dmas, list)
+ xvip_pipeline_start_stop(xdev, dma, false);
}
done:
@@ -200,23 +315,22 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
dma = to_xvip_dma(media_entity_to_video_device(entity));
- if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
- pipe->output = dma;
+ if (dma->pad.flags & MEDIA_PAD_FL_SINK)
num_outputs++;
- } else {
+ else
num_inputs++;
- }
}
mutex_unlock(&mdev->graph_mutex);
media_graph_walk_cleanup(&graph);
- /* We need exactly one output and zero or one input. */
- if (num_outputs != 1 || num_inputs > 1)
+ /* We need at least one DMA to proceed */
+ if (num_outputs == 0 && num_inputs == 0)
return -EPIPE;
pipe->num_dmas = num_inputs + num_outputs;
+ pipe->xdev = start->xdev;
return 0;
}
@@ -224,7 +338,6 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
{
pipe->num_dmas = 0;
- pipe->output = NULL;
}
/**
@@ -287,11 +400,13 @@ done:
* @buf: vb2 buffer base object
* @queue: buffer list entry in the DMA engine queued buffers list
* @dma: DMA channel that uses the buffer
+ * @desc: Descriptor associated with this structure
*/
struct xvip_dma_buffer {
struct vb2_v4l2_buffer buf;
struct list_head queue;
struct xvip_dma *dma;
+ struct dma_async_tx_descriptor *desc;
};
#define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
@@ -300,6 +415,9 @@ static void xvip_dma_complete(void *param)
{
struct xvip_dma_buffer *buf = param;
struct xvip_dma *dma = buf->dma;
+ int i, sizeimage;
+ u32 fid;
+ int status;
spin_lock(&dma->queued_lock);
list_del(&buf->queue);
@@ -308,7 +426,38 @@ static void xvip_dma_complete(void *param)
buf->buf.field = V4L2_FIELD_NONE;
buf->buf.sequence = dma->sequence++;
buf->buf.vb2_buf.timestamp = ktime_get_ns();
- vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
+
+ status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
+ if (!status) {
+ if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
+ dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
+ dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
+ /*
+ * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
+ * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
+ */
+ buf->buf.field = fid ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+
+ if (fid == dma->prev_fid)
+ buf->buf.sequence = dma->sequence++;
+
+ buf->buf.sequence >>= 1;
+ dma->prev_fid = fid;
+ }
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
+ }
+ } else {
+ sizeimage = dma->format.fmt.pix.sizeimage;
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
+ }
+
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
}
@@ -318,13 +467,39 @@ xvip_dma_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct xvip_dma *dma = vb2_get_drv_priv(vq);
+ u8 i;
+ int sizeimage;
+
+ /* Multi planar case: Make sure the image size is large enough */
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ if (*nplanes) {
+ if (*nplanes != dma->format.fmt.pix_mp.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ if (sizes[i] < sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = dma->fmtinfo->buffers;
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ sizes[i] = sizeimage;
+ }
+ }
+ return 0;
+ }
- /* Make sure the image size is large enough. */
- if (*nplanes)
- return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
+ /* Single planar case: Make sure the image size is large enough */
+ sizeimage = dma->format.fmt.pix.sizeimage;
+ if (*nplanes == 1)
+ return sizes[0] < sizeimage ? -EINVAL : 0;
*nplanes = 1;
- sizes[0] = dma->format.sizeimage;
+ sizes[0] = sizeimage;
return 0;
}
@@ -348,14 +523,20 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
struct dma_async_tx_descriptor *desc;
dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
u32 flags;
+ u32 luma_size;
+ u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
+ u32 fid = ~0;
+ u32 bpl;
- if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
dma->xt.dir = DMA_DEV_TO_MEM;
dma->xt.src_sgl = false;
dma->xt.dst_sgl = true;
dma->xt.dst_start = addr;
- } else {
+ } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
dma->xt.dir = DMA_MEM_TO_DEV;
dma->xt.src_sgl = true;
@@ -363,10 +544,69 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
dma->xt.src_start = addr;
}
- dma->xt.frame_size = 1;
- dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
- dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
- dma->xt.numf = dma->format.height;
+ /*
+ * DMA IP supports only 2 planes, so one datachunk is sufficient
+ * to get start address of 2nd plane
+ */
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ pix_mp = &dma->format.fmt.pix_mp;
+ bpl = pix_mp->plane_fmt[0].bytesperline;
+
+ xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat,
+ &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
+ &bpl_deno);
+ dma->xt.frame_size = dma->fmtinfo->num_planes;
+ dma->sgl[0].size = (dma->r.width * dma->fmtinfo->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ dma->sgl[0].icg = bpl - dma->sgl[0].size;
+ dma->xt.numf = dma->r.height;
+
+ /*
+ * dst_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+
+ /* Handling contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 1) {
+ dma->sgl[0].dst_icg = bpl *
+ (pix_mp->height - dma->r.height);
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 2) {
+ dma_addr_t chroma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ luma_size = bpl * dma->xt.numf;
+ if (chroma_addr > addr)
+ dma->sgl[0].dst_icg = chroma_addr -
+ addr - luma_size;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &dma->format.fmt.pix;
+ bpl = pix->bytesperline;
+ xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
+ xvip_width_padding_factor(pix->pixelformat,
+ &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
+ &bpl_deno);
+ dma->xt.frame_size = dma->fmtinfo->num_planes;
+ dma->sgl[0].size = (dma->r.width * dma->fmtinfo->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ dma->sgl[0].icg = bpl - dma->sgl[0].size;
+ dma->xt.numf = dma->r.height;
+ dma->sgl[0].dst_icg = 0;
+ dma->sgl[0].dst_icg = bpl * (pix->height - dma->r.height);
+ }
desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
if (!desc) {
@@ -376,11 +616,28 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
}
desc->callback = xvip_dma_complete;
desc->callback_param = buf;
+ buf->desc = desc;
+
+ if (buf->buf.field == V4L2_FIELD_TOP)
+ fid = 1;
+ else if (buf->buf.field == V4L2_FIELD_BOTTOM)
+ fid = 0;
+ else if (buf->buf.field == V4L2_FIELD_NONE)
+ fid = 0;
+
+ xilinx_xdma_set_fid(dma->dma, desc, fid);
spin_lock_irq(&dma->queued_lock);
list_add_tail(&buf->queue, &dma->queued_bufs);
spin_unlock_irq(&dma->queued_lock);
+ /*
+ * Low latency capture: Give descriptor callback at start of
+ * processing the descriptor
+ */
+ if (dma->low_latency_cap)
+ xilinx_xdma_set_earlycb(dma->dma, desc,
+ EARLY_CALLBACK_START_DESC);
dmaengine_submit(desc);
if (vb2_is_streaming(&dma->queue))
@@ -395,6 +652,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
int ret;
dma->sequence = 0;
+ dma->prev_fid = ~0;
/*
* Start streaming on the pipeline. No link touching an entity in the
@@ -403,10 +661,12 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
* Use the pipeline object embedded in the first DMA object that starts
* streaming.
*/
+ mutex_lock(&dma->xdev->lock);
pipe = dma->video.entity.pipe
? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+ mutex_unlock(&dma->xdev->lock);
if (ret < 0)
goto error;
@@ -423,11 +683,25 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Start the DMA engine. This must be done before starting the blocks
* in the pipeline to avoid DMA synchronization issues.
+ * We dont't want to start DMA in case of low latency capture mode,
+ * applications will start DMA using S_CTRL at later point of time.
*/
- dma_async_issue_pending(dma->dma);
+ if (!dma->low_latency_cap) {
+ dma_async_issue_pending(dma->dma);
+ } else {
+ /* For low latency capture, return the first buffer early
+ * so that consumer can initialize until we start DMA.
+ */
+ buf = list_first_entry(&dma->queued_bufs,
+ struct xvip_dma_buffer, queue);
+ xvip_dma_complete(buf);
+ buf->desc->callback = NULL;
+ }
/* Start the pipeline. */
- xvip_pipeline_set_stream(pipe, true);
+ ret = xvip_pipeline_set_stream(pipe, true);
+ if (ret < 0)
+ goto error_stop;
return 0;
@@ -435,6 +709,7 @@ error_stop:
media_pipeline_stop(&dma->video.entity);
error:
+ dmaengine_terminate_all(dma->dma);
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
@@ -502,6 +777,100 @@ xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
return 0;
}
+static int xvip_xdma_enum_fmt(struct xvip_dma *dma, struct v4l2_fmtdesc *f,
+ struct v4l2_subdev_format *v4l_fmt)
+{
+ const struct xvip_video_format *fmt;
+ int ret;
+ u32 i, fmt_cnt, *fmts;
+
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt, &fmts);
+ if (ret)
+ return ret;
+
+ /* Has media pad value changed? */
+ if (v4l_fmt->format.code != dma->remote_subdev_med_bus ||
+ !dma->remote_subdev_med_bus) {
+ /* Re-generate legal list of fourcc codes */
+ dma->poss_v4l2_fmt_cnt = 0;
+ dma->remote_subdev_med_bus = v4l_fmt->format.code;
+
+ if (!dma->poss_v4l2_fmts) {
+ dma->poss_v4l2_fmts =
+ devm_kzalloc(&dma->video.dev,
+ sizeof(u32) * fmt_cnt,
+ GFP_KERNEL);
+ if (!dma->poss_v4l2_fmts)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < fmt_cnt; i++) {
+ fmt = xvip_get_format_by_fourcc(fmts[i]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ if (fmt->code != dma->remote_subdev_med_bus)
+ continue;
+
+ dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] = fmts[i];
+ }
+ }
+
+ /* Return err if index is greater than count of legal values */
+ if (f->index >= dma->poss_v4l2_fmt_cnt)
+ return -EINVAL;
+
+ /* Else return pix format in table */
+ fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int
+xvip_dma_enum_input(struct file *file, void *priv, struct v4l2_input *i)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ struct v4l2_subdev *subdev;
+
+ if (i->index > 0)
+ return -EINVAL;
+
+ subdev = xvip_dma_remote_subdev(&dma->pad, NULL);
+ if (!subdev)
+ return -EPIPE;
+
+ /*
+ * FIXME: right now only camera input type is handled.
+ * There should be mechanism to distinguish other types of
+ * input like V4L2_INPUT_TYPE_TUNER and V4L2_INPUT_TYPE_TOUCH.
+ */
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, subdev->name, sizeof(i->name));
+
+ return 0;
+}
+
+static int
+xvip_dma_get_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int
+xvip_dma_set_input(struct file *file, void *fh, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
/* FIXME: without this callback function, some applications are not configured
* with correct formats, and it results in frames in wrong format. Whether this
* callback needs to be required is not clearly defined, so it should be
@@ -512,11 +881,48 @@ xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format v4l_fmt;
+ const struct xvip_video_format *fmt;
+ int err, ret;
+
+ /* Establish media pad format */
+ subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+ v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ /*
+ * In case of frmbuf DMA, this will invoke frambuf driver specific APIs
+ * to enumerate formats otherwise return the pix format corresponding
+ * to subdev's media bus format. This kind of separation would be
+ * helpful for clean up and upstreaming.
+ */
+ err = xvip_xdma_enum_fmt(dma, f, &v4l_fmt);
+ if (!err)
+ return err;
+
+ /*
+ * This logic will just return one pix format based on subdev's
+ * media bus format
+ */
if (f->index > 0)
return -EINVAL;
- f->pixelformat = dma->format.pixelformat;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ f->pixelformat = dma->format.fmt.pix_mp.pixelformat;
+ else
+ f->pixelformat = dma->format.fmt.pix.pixelformat;
+
+ fmt = xvip_get_format_by_code(v4l_fmt.format.code);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
return 0;
}
@@ -527,13 +933,17 @@ xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
- format->fmt.pix = dma->format;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ format->fmt.pix_mp = dma->format.fmt.pix_mp;
+ else
+ format->fmt.pix = dma->format.fmt.pix;
return 0;
}
static void
-__xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
+__xvip_dma_try_format(struct xvip_dma *dma,
+ struct v4l2_format *format,
const struct xvip_video_format **fmtinfo)
{
const struct xvip_video_format *info;
@@ -544,40 +954,144 @@ __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
unsigned int width;
unsigned int align;
unsigned int bpl;
+ unsigned int i, hsub, vsub, plane_width, plane_height;
+ unsigned int fourcc;
+ unsigned int padding_factor_nume, padding_factor_deno;
+ unsigned int bpl_nume, bpl_deno;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
+ if (!subdev)
+ return;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return;
+
+ if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
+ else
+ dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
+ } else {
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
+ else
+ dma->format.fmt.pix.field = V4L2_FIELD_NONE;
+ }
/* Retrieve format information and select the default format if the
* requested format isn't supported.
*/
- info = xvip_get_format_by_fourcc(pix->pixelformat);
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ fourcc = format->fmt.pix_mp.pixelformat;
+ else
+ fourcc = format->fmt.pix.pixelformat;
+
+ info = xvip_get_format_by_fourcc(fourcc);
+
if (IS_ERR(info))
info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
- pix->pixelformat = info->fourcc;
- pix->field = V4L2_FIELD_NONE;
+ xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
/* The transfer alignment requirements are expressed in bytes. Compute
* the minimum and maximum values, clamp the requested width and convert
* it back to pixels.
*/
- align = lcm(dma->align, info->bpp);
+ align = lcm(dma->align, info->bpp >> 3);
min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
- width = rounddown(pix->width * info->bpp, align);
-
- pix->width = clamp(width, min_width, max_width) / info->bpp;
- pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
- XVIP_DMA_MAX_HEIGHT);
- /* Clamp the requested bytes per line value. If the maximum bytes per
- * line value is zero, the module doesn't support user configurable line
- * sizes. Override the requested value with the minimum in that case.
- */
- min_bpl = pix->width * info->bpp;
- max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
- bpl = rounddown(pix->bytesperline, dma->align);
-
- pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
- pix->sizeimage = pix->bytesperline * pix->height;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+
+ pix_mp = &format->fmt.pix_mp;
+ plane_fmt = pix_mp->plane_fmt;
+ pix_mp->field = dma->format.fmt.pix_mp.field;
+ width = rounddown(pix_mp->width * info->bpl_factor, align);
+ pix_mp->width = clamp(width, min_width, max_width) /
+ info->bpl_factor;
+ pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
+ XVIP_DMA_MAX_HEIGHT);
+
+ /*
+ * Clamp the requested bytes per line value. If the maximum
+ * bytes per line value is zero, the module doesn't support
+ * user configurable line sizes. Override the requested value
+ * with the minimum in that case.
+ */
+
+ max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
+
+ /* Handling contiguous data with mplanes */
+ if (info->buffers == 1) {
+ min_bpl = (pix_mp->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ bpl = roundup(plane_fmt[0].bytesperline, dma->align);
+ plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
+ max_bpl);
+
+ if (info->num_planes == 1) {
+ /* Single plane formats */
+ plane_fmt[0].sizeimage =
+ plane_fmt[0].bytesperline *
+ pix_mp->height;
+ } else {
+ /* Multi plane formats */
+ plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(plane_fmt[0].bytesperline *
+ pix_mp->height *
+ info->bpp, 8);
+ }
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ hsub = info->hsub;
+ vsub = info->vsub;
+ for (i = 0; i < info->num_planes; i++) {
+ plane_width = pix_mp->width / (i ? hsub : 1);
+ plane_height = pix_mp->height / (i ? vsub : 1);
+ min_bpl = (plane_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ bpl = rounddown(plane_fmt[i].bytesperline,
+ dma->align);
+ plane_fmt[i].bytesperline =
+ clamp(bpl, min_bpl, max_bpl);
+ plane_fmt[i].sizeimage =
+ plane_fmt[i].bytesperline *
+ plane_height;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &format->fmt.pix;
+ pix->field = dma->format.fmt.pix.field;
+ width = rounddown(pix->width * info->bpl_factor, align);
+ pix->width = clamp(width, min_width, max_width) /
+ info->bpl_factor;
+ pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
+ XVIP_DMA_MAX_HEIGHT);
+
+ min_bpl = (pix->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
+ bpl = rounddown(pix->bytesperline, dma->align);
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->width * pix->height * info->bpp / 8;
+ }
if (fmtinfo)
*fmtinfo = info;
@@ -589,7 +1103,7 @@ xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
- __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
+ __xvip_dma_try_format(dma, format, NULL);
return 0;
}
@@ -600,26 +1114,149 @@ xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
const struct xvip_video_format *info;
- __xvip_dma_try_format(dma, &format->fmt.pix, &info);
+ __xvip_dma_try_format(dma, format, &info);
if (vb2_is_busy(&dma->queue))
return -EBUSY;
- dma->format = format->fmt.pix;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ dma->format.fmt.pix_mp = format->fmt.pix_mp;
+
+ /*
+ * Save format resolution in crop rectangle. This will be
+ * updated when s_slection is called.
+ */
+ dma->r.width = format->fmt.pix_mp.width;
+ dma->r.height = format->fmt.pix_mp.height;
+ } else {
+ dma->format.fmt.pix = format->fmt.pix;
+ dma->r.width = format->fmt.pix.width;
+ dma->r.height = format->fmt.pix.height;
+ }
+
dma->fmtinfo = info;
return 0;
}
+static int
+xvip_dma_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ u32 width, height;
+ bool crop_frame = false;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ crop_frame = true;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ crop_frame = true;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+
+ if (crop_frame) {
+ sel->r.width = dma->r.width;
+ sel->r.height = dma->r.height;
+ } else {
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ width = dma->format.fmt.pix_mp.width;
+ height = dma->format.fmt.pix_mp.height;
+ } else {
+ width = dma->format.fmt.pix.width;
+ height = dma->format.fmt.pix.height;
+ }
+
+ sel->r.width = width;
+ sel->r.height = height;
+ }
+
+ return 0;
+}
+
+static int
+xvip_dma_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ u32 width, height;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ /* COMPOSE target is only valid for capture buftype */
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ /* CROP target is only valid for output buftype */
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ width = dma->format.fmt.pix_mp.width;
+ height = dma->format.fmt.pix_mp.height;
+ } else {
+ width = dma->format.fmt.pix.width;
+ height = dma->format.fmt.pix.height;
+ }
+
+ if (sel->r.width > width || sel->r.height > height ||
+ sel->r.top != 0 || sel->r.left != 0)
+ return -EINVAL;
+
+ sel->r.width = roundup(max(XVIP_DMA_MIN_WIDTH, sel->r.width),
+ dma->align);
+ sel->r.height = max(XVIP_DMA_MIN_HEIGHT, sel->r.height);
+ dma->r.width = sel->r.width;
+ dma->r.height = sel->r.height;
+
+ return 0;
+}
+
static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
.vidioc_querycap = xvip_dma_querycap,
.vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
+ .vidioc_enum_fmt_vid_out = xvip_dma_enum_format,
.vidioc_g_fmt_vid_cap = xvip_dma_get_format,
+ .vidioc_g_fmt_vid_cap_mplane = xvip_dma_get_format,
.vidioc_g_fmt_vid_out = xvip_dma_get_format,
+ .vidioc_g_fmt_vid_out_mplane = xvip_dma_get_format,
.vidioc_s_fmt_vid_cap = xvip_dma_set_format,
+ .vidioc_s_fmt_vid_cap_mplane = xvip_dma_set_format,
.vidioc_s_fmt_vid_out = xvip_dma_set_format,
+ .vidioc_s_fmt_vid_out_mplane = xvip_dma_set_format,
.vidioc_try_fmt_vid_cap = xvip_dma_try_format,
+ .vidioc_try_fmt_vid_cap_mplane = xvip_dma_try_format,
.vidioc_try_fmt_vid_out = xvip_dma_try_format,
+ .vidioc_try_fmt_vid_out_mplane = xvip_dma_try_format,
+ .vidioc_s_selection = xvip_dma_s_selection,
+ .vidioc_g_selection = xvip_dma_g_selection,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
@@ -628,6 +1265,99 @@ static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_input = &xvip_dma_enum_input,
+ .vidioc_g_input = &xvip_dma_get_input,
+ .vidioc_s_input = &xvip_dma_set_input,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int xvip_dma_s_ctrl(struct v4l2_ctrl *ctl)
+{
+ struct xvip_dma *dma = container_of(ctl->handler, struct xvip_dma,
+ ctrl_handler);
+ int ret = 0;
+
+ switch (ctl->id) {
+ case V4L2_CID_XILINX_LOW_LATENCY:
+ if (ctl->val == XVIP_LOW_LATENCY_ENABLE) {
+ if (vb2_is_busy(&dma->queue))
+ return -EBUSY;
+
+ dma->low_latency_cap = true;
+ /*
+ * Don't use auto-restart for low latency
+ * to avoid extra one frame delay between
+ * programming and actual writing of data
+ */
+ xilinx_xdma_set_mode(dma->dma, DEFAULT);
+ } else if (ctl->val == XVIP_LOW_LATENCY_DISABLE) {
+ if (vb2_is_busy(&dma->queue))
+ return -EBUSY;
+
+ dma->low_latency_cap = false;
+ xilinx_xdma_set_mode(dma->dma, AUTO_RESTART);
+ } else if (ctl->val == XVIP_START_DMA) {
+ /*
+ * In low latency capture, the driver allows application
+ * to start dma when queue has buffers. That's why we
+ * don't check for vb2_is_busy().
+ */
+ if (dma->low_latency_cap &&
+ vb2_is_streaming(&dma->queue))
+ dma_async_issue_pending(dma->dma);
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int xvip_dma_open(struct file *file)
+{
+ int ret;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ return ret;
+
+ /* Disable the low latency mode as default */
+ if (v4l2_fh_is_singular_file(file)) {
+ struct xvip_dma *dma = video_drvdata(file);
+
+ mutex_lock(&dma->lock);
+ dma->low_latency_cap = false;
+ xilinx_xdma_set_mode(dma->dma, AUTO_RESTART);
+ mutex_unlock(&dma->lock);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xvip_dma_ctrl_ops = {
+ .s_ctrl = xvip_dma_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config xvip_dma_ctrls[] = {
+ {
+ .ops = &xvip_dma_ctrl_ops,
+ .id = V4L2_CID_XILINX_LOW_LATENCY,
+ .name = "Low Latency Controls",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = XVIP_LOW_LATENCY_ENABLE,
+ .max = XVIP_START_DMA,
+ .step = 1,
+ .def = XVIP_LOW_LATENCY_DISABLE,
+ }
};
/* -----------------------------------------------------------------------------
@@ -637,7 +1367,7 @@ static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
static const struct v4l2_file_operations xvip_dma_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
- .open = v4l2_fh_open,
+ .open = xvip_dma_open,
.release = vb2_fop_release,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
@@ -652,6 +1382,7 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
{
char name[16];
int ret;
+ u32 i, hsub, vsub, width, height;
dma->xdev = xdev;
dma->port = port;
@@ -661,41 +1392,131 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
spin_lock_init(&dma->queued_lock);
dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
- dma->format.pixelformat = dma->fmtinfo->fourcc;
- dma->format.colorspace = V4L2_COLORSPACE_SRGB;
- dma->format.field = V4L2_FIELD_NONE;
- dma->format.width = XVIP_DMA_DEF_WIDTH;
- dma->format.height = XVIP_DMA_DEF_HEIGHT;
- dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
- dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
+ dma->format.type = type;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ pix_mp = &dma->format.fmt.pix_mp;
+ pix_mp->pixelformat = dma->fmtinfo->fourcc;
+ pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_DMA_DEF_WIDTH;
+
+ /* Handling contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 1) {
+ pix_mp->plane_fmt[0].bytesperline =
+ pix_mp->width * dma->fmtinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ pix_mp->width * pix_mp->height *
+ dma->fmtinfo->bpp / 8;
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ hsub = dma->fmtinfo->hsub;
+ vsub = dma->fmtinfo->vsub;
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ width = pix_mp->width / (i ? hsub : 1);
+ height = pix_mp->height / (i ? vsub : 1);
+ pix_mp->plane_fmt[i].bytesperline =
+ width * dma->fmtinfo->bpl_factor;
+ pix_mp->plane_fmt[i].sizeimage = width * height;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &dma->format.fmt.pix;
+ pix->pixelformat = dma->fmtinfo->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->field = V4L2_FIELD_NONE;
+ pix->width = XVIP_DMA_DEF_WIDTH;
+ pix->height = XVIP_DMA_DEF_HEIGHT;
+ pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
+ pix->sizeimage =
+ pix->width * pix->height * dma->fmtinfo->bpp / 8;
+ }
/* Initialize the media entity... */
- dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ dma->pad.flags = MEDIA_PAD_FL_SINK;
+ else
+ dma->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
if (ret < 0)
goto error;
+ ret = v4l2_ctrl_handler_init(&dma->ctrl_handler,
+ ARRAY_SIZE(xvip_dma_ctrls));
+ if (ret < 0) {
+ dev_err(dma->xdev->dev, "failed to initialize V4L2 ctrl\n");
+ goto error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xvip_dma_ctrls); i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(dma->xdev->dev, "%d ctrl = 0x%x\n", i,
+ xvip_dma_ctrls[i].id);
+ ctrl = v4l2_ctrl_new_custom(&dma->ctrl_handler,
+ &xvip_dma_ctrls[i], NULL);
+ if (!ctrl) {
+ dev_err(dma->xdev->dev, "Failed for %s ctrl\n",
+ xvip_dma_ctrls[i].name);
+ goto error;
+ }
+ }
+
+ if (dma->ctrl_handler.error) {
+ dev_err(dma->xdev->dev, "failed to add controls\n");
+ ret = dma->ctrl_handler.error;
+ goto error;
+ }
+
+ dma->video.ctrl_handler = &dma->ctrl_handler;
+ ret = v4l2_ctrl_handler_setup(&dma->ctrl_handler);
+ if (ret < 0) {
+ dev_err(dma->xdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
/* ... and the video node... */
dma->video.fops = &xvip_dma_fops;
dma->video.v4l2_dev = &xdev->v4l2_dev;
dma->video.queue = &dma->queue;
snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
xdev->dev->of_node,
- type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
+ (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ? "output" : "input",
port);
+
dma->video.vfl_type = VFL_TYPE_GRABBER;
- dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? VFL_DIR_RX : VFL_DIR_TX;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ dma->video.vfl_dir = VFL_DIR_RX;
+ else
+ dma->video.vfl_dir = VFL_DIR_TX;
+
dma->video.release = video_device_release_empty;
dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
dma->video.lock = &dma->lock;
dma->video.device_caps = V4L2_CAP_STREAMING;
- if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ switch (dma->queue.type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- else
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
+ break;
+ }
video_set_drvdata(&dma->video, dma);
@@ -725,10 +1546,12 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
/* ... and the DMA channel. */
snprintf(name, sizeof(name), "port%u", port);
- dma->dma = dma_request_slave_channel(dma->xdev->dev, name);
- if (dma->dma == NULL) {
- dev_err(dma->xdev->dev, "no VDMA channel found\n");
- ret = -ENODEV;
+ dma->dma = dma_request_chan(dma->xdev->dev, name);
+ if (IS_ERR(dma->dma)) {
+ ret = PTR_ERR(dma->dma);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dma->xdev->dev,
+ "No Video DMA channel found");
goto error;
}
@@ -752,9 +1575,10 @@ void xvip_dma_cleanup(struct xvip_dma *dma)
if (video_is_registered(&dma->video))
video_unregister_device(&dma->video);
- if (dma->dma)
+ if (!IS_ERR(dma->dma))
dma_release_channel(dma->dma);
+ v4l2_ctrl_handler_free(&dma->ctrl_handler);
media_entity_cleanup(&dma->video.entity);
mutex_destroy(&dma->lock);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 5aec4d17eb21..61b05557d7f6 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -18,6 +18,7 @@
#include <linux/videodev2.h>
#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/videobuf2-v4l2.h>
@@ -32,7 +33,7 @@ struct xvip_video_format;
* @use_count: number of DMA engines using the pipeline
* @stream_count: number of DMA engines currently streaming
* @num_dmas: number of DMA engines in the pipeline
- * @output: DMA engine at the output of the pipeline
+ * @xdev: Composite device the pipe belongs to
*/
struct xvip_pipeline {
struct media_pipeline pipe;
@@ -42,7 +43,7 @@ struct xvip_pipeline {
unsigned int stream_count;
unsigned int num_dmas;
- struct xvip_dma *output;
+ struct xvip_composite_device *xdev;
};
static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
@@ -55,12 +56,17 @@ static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
* @list: list entry in a composite device dmas list
* @video: V4L2 video device associated with the DMA channel
* @pad: media pad for the video device entity
+ * @remote_subdev_med_bus: media bus format of sub-device
+ * @ctrl_handler: V4L2 ctrl_handler for inheritance ctrls from subdev
* @xdev: composite device the DMA channel belongs to
* @pipe: pipeline belonging to the DMA channel
* @port: composite device DT node port number for the DMA channel
* @lock: protects the @format, @fmtinfo and @queue fields
* @format: active V4L2 pixel format
+ * @r: crop rectangle parameters
* @fmtinfo: format information corresponding to the active @format
+ * @poss_v4l2_fmts: All possible v4l formats supported
+ * @poss_v4l2_fmt_cnt: number of supported v4l formats
* @queue: vb2 buffers queue
* @sequence: V4L2 buffers sequence number
* @queued_bufs: list of queued buffers
@@ -69,19 +75,27 @@ static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
* @align: transfer alignment required by the DMA channel (in bytes)
* @xt: dma interleaved template for dma configuration
* @sgl: data chunk structure for dma_interleaved_template
+ * @prev_fid: Previous Field ID
+ * @low_latency_cap: Low latency capture mode
*/
struct xvip_dma {
struct list_head list;
struct video_device video;
struct media_pad pad;
+ u32 remote_subdev_med_bus;
+
+ struct v4l2_ctrl_handler ctrl_handler;
struct xvip_composite_device *xdev;
struct xvip_pipeline pipe;
unsigned int port;
struct mutex lock;
- struct v4l2_pix_format format;
+ struct v4l2_format format;
+ struct v4l2_rect r;
const struct xvip_video_format *fmtinfo;
+ u32 *poss_v4l2_fmts;
+ u32 poss_v4l2_fmt_cnt;
struct vb2_queue queue;
unsigned int sequence;
@@ -93,6 +107,9 @@ struct xvip_dma {
unsigned int align;
struct dma_interleaved_template xt;
struct data_chunk sgl[1];
+
+ u32 prev_fid;
+ u32 low_latency_cap;
};
#define to_xvip_dma(vdev) container_of(vdev, struct xvip_dma, video)
diff --git a/drivers/media/platform/xilinx/xilinx-gamma-coeff.h b/drivers/media/platform/xilinx/xilinx-gamma-coeff.h
new file mode 100644
index 000000000000..344260008a47
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-gamma-coeff.h
@@ -0,0 +1,5385 @@
+/*
+ * Xilinx Gamma Correction IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_GAMMA_COEFF_H__
+#define __XILINX_GAMMA_COEFF_H__
+
+#define GAMMA_CURVE_LENGTH (40)
+
+#define GAMMA_BPC_8 (8)
+#define GAMMA8_TABLE_LENGTH BIT(GAMMA_BPC_8)
+static const u16 xgamma8_01[GAMMA8_TABLE_LENGTH] = {
+ 0, 147, 157, 164, 168, 172, 175, 178,
+ 180, 183, 184, 186, 188, 189, 191, 192,
+ 193, 195, 196, 197, 198, 199, 200, 200,
+ 201, 202, 203, 204, 204, 205, 206, 207,
+ 207, 208, 208, 209, 210, 210, 211, 211,
+ 212, 212, 213, 213, 214, 214, 215, 215,
+ 216, 216, 217, 217, 218, 218, 218, 219,
+ 219, 220, 220, 220, 221, 221, 221, 222,
+ 222, 222, 223, 223, 223, 224, 224, 224,
+ 225, 225, 225, 226, 226, 226, 227, 227,
+ 227, 227, 228, 228, 228, 228, 229, 229,
+ 229, 230, 230, 230, 230, 231, 231, 231,
+ 231, 232, 232, 232, 232, 232, 233, 233,
+ 233, 233, 234, 234, 234, 234, 234, 235,
+ 235, 235, 235, 235, 236, 236, 236, 236,
+ 236, 237, 237, 237, 237, 237, 238, 238,
+ 238, 238, 238, 239, 239, 239, 239, 239,
+ 239, 240, 240, 240, 240, 240, 240, 241,
+ 241, 241, 241, 241, 241, 242, 242, 242,
+ 242, 242, 242, 243, 243, 243, 243, 243,
+ 243, 244, 244, 244, 244, 244, 244, 244,
+ 245, 245, 245, 245, 245, 245, 245, 246,
+ 246, 246, 246, 246, 246, 246, 247, 247,
+ 247, 247, 247, 247, 247, 247, 248, 248,
+ 248, 248, 248, 248, 248, 249, 249, 249,
+ 249, 249, 249, 249, 249, 249, 250, 250,
+ 250, 250, 250, 250, 250, 250, 251, 251,
+ 251, 251, 251, 251, 251, 251, 251, 252,
+ 252, 252, 252, 252, 252, 252, 252, 252,
+ 253, 253, 253, 253, 253, 253, 253, 253,
+ 253, 254, 254, 254, 254, 254, 254, 254,
+ 254, 254, 254, 255, 255, 255, 255, 255,
+};
+
+static const u16 xgamma8_02[GAMMA8_TABLE_LENGTH] = {
+ 0, 84, 97, 105, 111, 116, 120, 124,
+ 128, 131, 133, 136, 138, 141, 143, 145,
+ 147, 148, 150, 152, 153, 155, 156, 158,
+ 159, 160, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 179, 180, 181, 182,
+ 183, 183, 184, 185, 186, 186, 187, 188,
+ 188, 189, 190, 190, 191, 192, 192, 193,
+ 193, 194, 195, 195, 196, 196, 197, 197,
+ 198, 199, 199, 200, 200, 201, 201, 202,
+ 202, 203, 203, 204, 204, 205, 205, 206,
+ 206, 207, 207, 208, 208, 208, 209, 209,
+ 210, 210, 211, 211, 211, 212, 212, 213,
+ 213, 214, 214, 214, 215, 215, 216, 216,
+ 216, 217, 217, 217, 218, 218, 219, 219,
+ 219, 220, 220, 220, 221, 221, 221, 222,
+ 222, 223, 223, 223, 224, 224, 224, 225,
+ 225, 225, 226, 226, 226, 227, 227, 227,
+ 227, 228, 228, 228, 229, 229, 229, 230,
+ 230, 230, 231, 231, 231, 231, 232, 232,
+ 232, 233, 233, 233, 233, 234, 234, 234,
+ 235, 235, 235, 235, 236, 236, 236, 237,
+ 237, 237, 237, 238, 238, 238, 238, 239,
+ 239, 239, 239, 240, 240, 240, 240, 241,
+ 241, 241, 241, 242, 242, 242, 242, 243,
+ 243, 243, 243, 244, 244, 244, 244, 245,
+ 245, 245, 245, 246, 246, 246, 246, 246,
+ 247, 247, 247, 247, 248, 248, 248, 248,
+ 248, 249, 249, 249, 249, 250, 250, 250,
+ 250, 250, 251, 251, 251, 251, 252, 252,
+ 252, 252, 252, 253, 253, 253, 253, 253,
+ 254, 254, 254, 254, 254, 255, 255, 255,
+};
+
+static const u16 xgamma8_03[GAMMA8_TABLE_LENGTH] = {
+ 0, 48, 60, 67, 73, 78, 83, 87,
+ 90, 94, 97, 99, 102, 104, 107, 109,
+ 111, 113, 115, 117, 119, 121, 122, 124,
+ 125, 127, 129, 130, 131, 133, 134, 136,
+ 137, 138, 139, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 151, 152, 153, 154,
+ 155, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168,
+ 168, 169, 170, 171, 172, 172, 173, 174,
+ 174, 175, 176, 177, 177, 178, 179, 179,
+ 180, 181, 181, 182, 183, 183, 184, 185,
+ 185, 186, 187, 187, 188, 188, 189, 190,
+ 190, 191, 191, 192, 193, 193, 194, 194,
+ 195, 195, 196, 197, 197, 198, 198, 199,
+ 199, 200, 200, 201, 201, 202, 202, 203,
+ 203, 204, 204, 205, 205, 206, 206, 207,
+ 207, 208, 208, 209, 209, 210, 210, 211,
+ 211, 212, 212, 213, 213, 213, 214, 214,
+ 215, 215, 216, 216, 217, 217, 217, 218,
+ 218, 219, 219, 220, 220, 220, 221, 221,
+ 222, 222, 223, 223, 223, 224, 224, 225,
+ 225, 225, 226, 226, 227, 227, 227, 228,
+ 228, 229, 229, 229, 230, 230, 230, 231,
+ 231, 232, 232, 232, 233, 233, 233, 234,
+ 234, 235, 235, 235, 236, 236, 236, 237,
+ 237, 237, 238, 238, 238, 239, 239, 240,
+ 240, 240, 241, 241, 241, 242, 242, 242,
+ 243, 243, 243, 244, 244, 244, 245, 245,
+ 245, 246, 246, 246, 247, 247, 247, 248,
+ 248, 248, 249, 249, 249, 249, 250, 250,
+ 250, 251, 251, 251, 252, 252, 252, 253,
+ 253, 253, 253, 254, 254, 254, 255, 255,
+};
+
+static const u16 xgamma8_04[GAMMA8_TABLE_LENGTH] = {
+ 0, 28, 37, 43, 48, 53, 57, 61,
+ 64, 67, 70, 73, 75, 78, 80, 82,
+ 84, 86, 88, 90, 92, 94, 96, 97,
+ 99, 101, 102, 104, 105, 107, 108, 110,
+ 111, 113, 114, 115, 117, 118, 119, 120,
+ 122, 123, 124, 125, 126, 127, 129, 130,
+ 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 149, 150, 151, 152, 153,
+ 154, 155, 155, 156, 157, 158, 159, 160,
+ 160, 161, 162, 163, 164, 164, 165, 166,
+ 167, 167, 168, 169, 170, 170, 171, 172,
+ 173, 173, 174, 175, 175, 176, 177, 177,
+ 178, 179, 179, 180, 181, 182, 182, 183,
+ 183, 184, 185, 185, 186, 187, 187, 188,
+ 189, 189, 190, 190, 191, 192, 192, 193,
+ 194, 194, 195, 195, 196, 197, 197, 198,
+ 198, 199, 199, 200, 201, 201, 202, 202,
+ 203, 203, 204, 205, 205, 206, 206, 207,
+ 207, 208, 208, 209, 209, 210, 211, 211,
+ 212, 212, 213, 213, 214, 214, 215, 215,
+ 216, 216, 217, 217, 218, 218, 219, 219,
+ 220, 220, 221, 221, 222, 222, 223, 223,
+ 224, 224, 225, 225, 226, 226, 227, 227,
+ 228, 228, 229, 229, 230, 230, 230, 231,
+ 231, 232, 232, 233, 233, 234, 234, 235,
+ 235, 235, 236, 236, 237, 237, 238, 238,
+ 239, 239, 240, 240, 240, 241, 241, 242,
+ 242, 243, 243, 243, 244, 244, 245, 245,
+ 246, 246, 246, 247, 247, 248, 248, 248,
+ 249, 249, 250, 250, 251, 251, 251, 252,
+ 252, 253, 253, 253, 254, 254, 255, 255,
+};
+
+static const u16 xgamma8_05[GAMMA8_TABLE_LENGTH] = {
+ 0, 16, 23, 28, 32, 36, 39, 42,
+ 45, 48, 50, 53, 55, 58, 60, 62,
+ 64, 66, 68, 70, 71, 73, 75, 77,
+ 78, 80, 81, 83, 84, 86, 87, 89,
+ 90, 92, 93, 94, 96, 97, 98, 100,
+ 101, 102, 103, 105, 106, 107, 108, 109,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 145, 146, 147, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 156,
+ 156, 157, 158, 159, 160, 160, 161, 162,
+ 163, 164, 164, 165, 166, 167, 167, 168,
+ 169, 170, 170, 171, 172, 173, 173, 174,
+ 175, 176, 176, 177, 178, 179, 179, 180,
+ 181, 181, 182, 183, 183, 184, 185, 186,
+ 186, 187, 188, 188, 189, 190, 190, 191,
+ 192, 192, 193, 194, 194, 195, 196, 196,
+ 197, 198, 198, 199, 199, 200, 201, 201,
+ 202, 203, 203, 204, 204, 205, 206, 206,
+ 207, 208, 208, 209, 209, 210, 211, 211,
+ 212, 212, 213, 214, 214, 215, 215, 216,
+ 217, 217, 218, 218, 219, 220, 220, 221,
+ 221, 222, 222, 223, 224, 224, 225, 225,
+ 226, 226, 227, 228, 228, 229, 229, 230,
+ 230, 231, 231, 232, 233, 233, 234, 234,
+ 235, 235, 236, 236, 237, 237, 238, 238,
+ 239, 240, 240, 241, 241, 242, 242, 243,
+ 243, 244, 244, 245, 245, 246, 246, 247,
+ 247, 248, 248, 249, 249, 250, 250, 251,
+ 251, 252, 252, 253, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_06[GAMMA8_TABLE_LENGTH] = {
+ 0, 9, 14, 18, 21, 24, 27, 29,
+ 32, 34, 37, 39, 41, 43, 45, 47,
+ 48, 50, 52, 54, 55, 57, 59, 60,
+ 62, 63, 65, 66, 68, 69, 71, 72,
+ 73, 75, 76, 77, 79, 80, 81, 83,
+ 84, 85, 86, 88, 89, 90, 91, 92,
+ 94, 95, 96, 97, 98, 99, 100, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 145, 146, 147, 148,
+ 149, 150, 151, 151, 152, 153, 154, 155,
+ 156, 156, 157, 158, 159, 160, 161, 161,
+ 162, 163, 164, 165, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 173, 174,
+ 175, 176, 176, 177, 178, 179, 179, 180,
+ 181, 182, 182, 183, 184, 185, 185, 186,
+ 187, 188, 188, 189, 190, 191, 191, 192,
+ 193, 194, 194, 195, 196, 196, 197, 198,
+ 199, 199, 200, 201, 201, 202, 203, 203,
+ 204, 205, 206, 206, 207, 208, 208, 209,
+ 210, 210, 211, 212, 212, 213, 214, 214,
+ 215, 216, 216, 217, 218, 218, 219, 220,
+ 220, 221, 222, 222, 223, 224, 224, 225,
+ 226, 226, 227, 228, 228, 229, 230, 230,
+ 231, 231, 232, 233, 233, 234, 235, 235,
+ 236, 237, 237, 238, 238, 239, 240, 240,
+ 241, 242, 242, 243, 243, 244, 245, 245,
+ 246, 247, 247, 248, 248, 249, 250, 250,
+ 251, 251, 252, 253, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_07[GAMMA8_TABLE_LENGTH] = {
+ 0, 5, 9, 11, 14, 16, 18, 21,
+ 23, 25, 26, 28, 30, 32, 33, 35,
+ 37, 38, 40, 41, 43, 44, 46, 47,
+ 49, 50, 52, 53, 54, 56, 57, 58,
+ 60, 61, 62, 64, 65, 66, 67, 69,
+ 70, 71, 72, 73, 75, 76, 77, 78,
+ 79, 80, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150,
+ 150, 151, 152, 153, 154, 155, 156, 157,
+ 157, 158, 159, 160, 161, 162, 163, 163,
+ 164, 165, 166, 167, 168, 168, 169, 170,
+ 171, 172, 173, 173, 174, 175, 176, 177,
+ 178, 178, 179, 180, 181, 182, 182, 183,
+ 184, 185, 186, 186, 187, 188, 189, 190,
+ 190, 191, 192, 193, 194, 194, 195, 196,
+ 197, 197, 198, 199, 200, 201, 201, 202,
+ 203, 204, 204, 205, 206, 207, 208, 208,
+ 209, 210, 211, 211, 212, 213, 214, 214,
+ 215, 216, 217, 217, 218, 219, 220, 220,
+ 221, 222, 223, 223, 224, 225, 226, 226,
+ 227, 228, 228, 229, 230, 231, 231, 232,
+ 233, 234, 234, 235, 236, 237, 237, 238,
+ 239, 239, 240, 241, 242, 242, 243, 244,
+ 244, 245, 246, 247, 247, 248, 249, 249,
+ 250, 251, 251, 252, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_08[GAMMA8_TABLE_LENGTH] = {
+ 0, 3, 5, 7, 9, 11, 13, 14,
+ 16, 18, 19, 21, 22, 24, 25, 26,
+ 28, 29, 31, 32, 33, 35, 36, 37,
+ 39, 40, 41, 42, 44, 45, 46, 47,
+ 48, 50, 51, 52, 53, 54, 56, 57,
+ 58, 59, 60, 61, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 123,
+ 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161,
+ 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 177, 178, 179, 180, 181, 182,
+ 183, 183, 184, 185, 186, 187, 188, 189,
+ 190, 190, 191, 192, 193, 194, 195, 196,
+ 196, 197, 198, 199, 200, 201, 202, 202,
+ 203, 204, 205, 206, 207, 207, 208, 209,
+ 210, 211, 212, 212, 213, 214, 215, 216,
+ 217, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 227, 228, 229,
+ 230, 231, 232, 232, 233, 234, 235, 236,
+ 236, 237, 238, 239, 240, 240, 241, 242,
+ 243, 244, 245, 245, 246, 247, 248, 249,
+ 249, 250, 251, 252, 253, 253, 254, 255,
+};
+
+static const u16 xgamma8_09[GAMMA8_TABLE_LENGTH] = {
+ 0, 2, 3, 5, 6, 7, 9, 10,
+ 11, 13, 14, 15, 16, 18, 19, 20,
+ 21, 22, 23, 25, 26, 27, 28, 29,
+ 30, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113,
+ 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241,
+ 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_10[GAMMA8_TABLE_LENGTH] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_11[GAMMA8_TABLE_LENGTH] = {
+ 0, 1, 1, 2, 3, 3, 4, 5,
+ 6, 6, 7, 8, 9, 10, 10, 11,
+ 12, 13, 14, 15, 16, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 214, 215, 216, 217, 218, 219, 220,
+ 221, 222, 223, 224, 225, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237,
+ 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_12[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 5, 5, 6, 7, 7, 8, 9,
+ 9, 10, 11, 11, 12, 13, 13, 14,
+ 15, 16, 16, 17, 18, 19, 20, 20,
+ 21, 22, 23, 24, 24, 25, 26, 27,
+ 28, 28, 29, 30, 31, 32, 33, 34,
+ 34, 35, 36, 37, 38, 39, 40, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153,
+ 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 175, 176, 177, 178, 179, 180,
+ 181, 183, 184, 185, 186, 187, 188, 189,
+ 191, 192, 193, 194, 195, 196, 197, 199,
+ 200, 201, 202, 203, 204, 205, 207, 208,
+ 209, 210, 211, 212, 214, 215, 216, 217,
+ 218, 219, 221, 222, 223, 224, 225, 226,
+ 228, 229, 230, 231, 232, 234, 235, 236,
+ 237, 238, 239, 241, 242, 243, 244, 245,
+ 247, 248, 249, 250, 251, 253, 254, 255,
+};
+
+static const u16 xgamma8_13[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 2, 2, 2,
+ 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 13, 14, 14, 15, 16, 16,
+ 17, 18, 19, 19, 20, 21, 21, 22,
+ 23, 24, 24, 25, 26, 27, 28, 28,
+ 29, 30, 31, 31, 32, 33, 34, 35,
+ 36, 36, 37, 38, 39, 40, 41, 41,
+ 42, 43, 44, 45, 46, 47, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 135, 136, 137, 138,
+ 139, 140, 141, 143, 144, 145, 146, 147,
+ 148, 149, 151, 152, 153, 154, 155, 156,
+ 157, 159, 160, 161, 162, 163, 164, 166,
+ 167, 168, 169, 170, 172, 173, 174, 175,
+ 176, 178, 179, 180, 181, 182, 184, 185,
+ 186, 187, 188, 190, 191, 192, 193, 194,
+ 196, 197, 198, 199, 201, 202, 203, 204,
+ 206, 207, 208, 209, 210, 212, 213, 214,
+ 215, 217, 218, 219, 220, 222, 223, 224,
+ 226, 227, 228, 229, 231, 232, 233, 234,
+ 236, 237, 238, 240, 241, 242, 243, 245,
+ 246, 247, 249, 250, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_14[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 1, 1, 2,
+ 2, 2, 3, 3, 4, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 12, 12, 13, 13,
+ 14, 15, 15, 16, 16, 17, 18, 18,
+ 19, 20, 20, 21, 22, 22, 23, 24,
+ 25, 25, 26, 27, 28, 28, 29, 30,
+ 31, 31, 32, 33, 34, 34, 35, 36,
+ 37, 38, 38, 39, 40, 41, 42, 43,
+ 43, 44, 45, 46, 47, 48, 49, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57,
+ 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113,
+ 115, 116, 117, 118, 119, 120, 121, 122,
+ 124, 125, 126, 127, 128, 129, 130, 132,
+ 133, 134, 135, 136, 137, 139, 140, 141,
+ 142, 143, 145, 146, 147, 148, 149, 151,
+ 152, 153, 154, 155, 157, 158, 159, 160,
+ 161, 163, 164, 165, 166, 168, 169, 170,
+ 171, 173, 174, 175, 176, 178, 179, 180,
+ 181, 183, 184, 185, 187, 188, 189, 190,
+ 192, 193, 194, 196, 197, 198, 200, 201,
+ 202, 203, 205, 206, 207, 209, 210, 211,
+ 213, 214, 215, 217, 218, 219, 221, 222,
+ 223, 225, 226, 227, 229, 230, 232, 233,
+ 234, 236, 237, 238, 240, 241, 242, 244,
+ 245, 247, 248, 249, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_15[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 2, 2, 2, 3, 3, 3, 4,
+ 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 9, 9, 10, 10, 11,
+ 11, 12, 12, 13, 14, 14, 15, 15,
+ 16, 16, 17, 18, 18, 19, 20, 20,
+ 21, 21, 22, 23, 23, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 31, 31,
+ 32, 33, 34, 34, 35, 36, 37, 37,
+ 38, 39, 40, 41, 41, 42, 43, 44,
+ 45, 46, 46, 47, 48, 49, 50, 51,
+ 52, 53, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 112, 113, 114, 115, 116,
+ 117, 119, 120, 121, 122, 123, 124, 126,
+ 127, 128, 129, 130, 132, 133, 134, 135,
+ 136, 138, 139, 140, 141, 142, 144, 145,
+ 146, 147, 149, 150, 151, 152, 154, 155,
+ 156, 158, 159, 160, 161, 163, 164, 165,
+ 167, 168, 169, 171, 172, 173, 174, 176,
+ 177, 178, 180, 181, 182, 184, 185, 187,
+ 188, 189, 191, 192, 193, 195, 196, 197,
+ 199, 200, 202, 203, 204, 206, 207, 209,
+ 210, 211, 213, 214, 216, 217, 218, 220,
+ 221, 223, 224, 226, 227, 228, 230, 231,
+ 233, 234, 236, 237, 239, 240, 242, 243,
+ 245, 246, 248, 249, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_16[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 3,
+ 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 13,
+ 13, 14, 14, 15, 15, 16, 16, 17,
+ 18, 18, 19, 19, 20, 21, 21, 22,
+ 23, 23, 24, 25, 25, 26, 27, 27,
+ 28, 29, 29, 30, 31, 31, 32, 33,
+ 34, 34, 35, 36, 37, 38, 38, 39,
+ 40, 41, 42, 42, 43, 44, 45, 46,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 97, 98, 99, 100, 101,
+ 102, 103, 104, 106, 107, 108, 109, 110,
+ 111, 113, 114, 115, 116, 117, 119, 120,
+ 121, 122, 123, 125, 126, 127, 128, 130,
+ 131, 132, 133, 135, 136, 137, 138, 140,
+ 141, 142, 143, 145, 146, 147, 149, 150,
+ 151, 153, 154, 155, 157, 158, 159, 161,
+ 162, 163, 165, 166, 167, 169, 170, 171,
+ 173, 174, 176, 177, 178, 180, 181, 183,
+ 184, 185, 187, 188, 190, 191, 193, 194,
+ 196, 197, 198, 200, 201, 203, 204, 206,
+ 207, 209, 210, 212, 213, 215, 216, 218,
+ 219, 221, 222, 224, 225, 227, 228, 230,
+ 231, 233, 235, 236, 238, 239, 241, 242,
+ 244, 245, 247, 249, 250, 252, 253, 255,
+};
+
+static const u16 xgamma8_17[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 3, 3, 3, 3, 4, 4, 4,
+ 5, 5, 5, 6, 6, 6, 7, 7,
+ 7, 8, 8, 9, 9, 10, 10, 10,
+ 11, 11, 12, 12, 13, 13, 14, 14,
+ 15, 15, 16, 17, 17, 18, 18, 19,
+ 19, 20, 21, 21, 22, 22, 23, 24,
+ 24, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 33, 33, 34, 35,
+ 36, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 97, 98, 99, 100, 101, 102, 103, 105,
+ 106, 107, 108, 109, 111, 112, 113, 114,
+ 115, 117, 118, 119, 120, 122, 123, 124,
+ 125, 127, 128, 129, 131, 132, 133, 134,
+ 136, 137, 138, 140, 141, 142, 144, 145,
+ 146, 148, 149, 151, 152, 153, 155, 156,
+ 157, 159, 160, 162, 163, 164, 166, 167,
+ 169, 170, 172, 173, 174, 176, 177, 179,
+ 180, 182, 183, 185, 186, 188, 189, 191,
+ 192, 194, 195, 197, 198, 200, 201, 203,
+ 205, 206, 208, 209, 211, 212, 214, 216,
+ 217, 219, 220, 222, 224, 225, 227, 228,
+ 230, 232, 233, 235, 237, 238, 240, 242,
+ 243, 245, 247, 248, 250, 252, 253, 255,
+};
+
+static const u16 xgamma8_18[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 6,
+ 6, 6, 7, 7, 8, 8, 8, 9,
+ 9, 10, 10, 10, 11, 11, 12, 12,
+ 13, 13, 14, 14, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 21,
+ 21, 22, 22, 23, 24, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 30, 31,
+ 32, 32, 33, 34, 35, 35, 36, 37,
+ 38, 38, 39, 40, 41, 41, 42, 43,
+ 44, 45, 46, 46, 47, 48, 49, 50,
+ 51, 52, 53, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 86, 87, 88, 89, 90,
+ 91, 92, 93, 95, 96, 97, 98, 99,
+ 100, 102, 103, 104, 105, 107, 108, 109,
+ 110, 111, 113, 114, 115, 116, 118, 119,
+ 120, 122, 123, 124, 126, 127, 128, 129,
+ 131, 132, 134, 135, 136, 138, 139, 140,
+ 142, 143, 145, 146, 147, 149, 150, 152,
+ 153, 154, 156, 157, 159, 160, 162, 163,
+ 165, 166, 168, 169, 171, 172, 174, 175,
+ 177, 178, 180, 181, 183, 184, 186, 188,
+ 189, 191, 192, 194, 195, 197, 199, 200,
+ 202, 204, 205, 207, 208, 210, 212, 213,
+ 215, 217, 218, 220, 222, 224, 225, 227,
+ 229, 230, 232, 234, 236, 237, 239, 241,
+ 243, 244, 246, 248, 250, 251, 253, 255,
+};
+
+static const u16 xgamma8_19[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 4, 4, 4, 4, 5,
+ 5, 5, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 20, 20, 21, 21, 22, 22,
+ 23, 24, 24, 25, 26, 26, 27, 28,
+ 28, 29, 30, 30, 31, 32, 32, 33,
+ 34, 35, 35, 36, 37, 38, 38, 39,
+ 40, 41, 41, 42, 43, 44, 45, 46,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 81, 82, 83, 84, 85,
+ 86, 87, 88, 90, 91, 92, 93, 94,
+ 95, 97, 98, 99, 100, 101, 103, 104,
+ 105, 106, 108, 109, 110, 112, 113, 114,
+ 115, 117, 118, 119, 121, 122, 123, 125,
+ 126, 127, 129, 130, 132, 133, 134, 136,
+ 137, 139, 140, 141, 143, 144, 146, 147,
+ 149, 150, 152, 153, 155, 156, 158, 159,
+ 161, 162, 164, 165, 167, 168, 170, 172,
+ 173, 175, 176, 178, 180, 181, 183, 184,
+ 186, 188, 189, 191, 193, 194, 196, 198,
+ 199, 201, 203, 204, 206, 208, 210, 211,
+ 213, 215, 217, 218, 220, 222, 224, 225,
+ 227, 229, 231, 233, 235, 236, 238, 240,
+ 242, 244, 246, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_20[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 4, 4,
+ 4, 4, 5, 5, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 20, 21, 21, 22, 23, 23, 24, 24,
+ 25, 26, 26, 27, 28, 28, 29, 30,
+ 30, 31, 32, 32, 33, 34, 35, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 42, 43, 44, 45, 46, 47, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 84, 85, 86, 87, 88, 89,
+ 91, 92, 93, 94, 95, 97, 98, 99,
+ 100, 102, 103, 104, 105, 107, 108, 109,
+ 111, 112, 113, 115, 116, 117, 119, 120,
+ 121, 123, 124, 126, 127, 128, 130, 131,
+ 133, 134, 136, 137, 139, 140, 142, 143,
+ 145, 146, 148, 149, 151, 152, 154, 155,
+ 157, 158, 160, 162, 163, 165, 166, 168,
+ 170, 171, 173, 175, 176, 178, 180, 181,
+ 183, 185, 186, 188, 190, 192, 193, 195,
+ 197, 199, 200, 202, 204, 206, 207, 209,
+ 211, 213, 215, 217, 218, 220, 222, 224,
+ 226, 228, 230, 232, 233, 235, 237, 239,
+ 241, 243, 245, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_21[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5,
+ 5, 5, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 11, 11, 11, 12, 12, 13, 13, 14,
+ 14, 14, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 20, 20, 21, 21, 22,
+ 22, 23, 24, 24, 25, 25, 26, 27,
+ 27, 28, 29, 29, 30, 31, 31, 32,
+ 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 44,
+ 45, 46, 47, 48, 49, 50, 51, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 75, 76,
+ 77, 78, 79, 80, 81, 83, 84, 85,
+ 86, 87, 88, 90, 91, 92, 93, 95,
+ 96, 97, 98, 100, 101, 102, 104, 105,
+ 106, 107, 109, 110, 112, 113, 114, 116,
+ 117, 118, 120, 121, 123, 124, 126, 127,
+ 129, 130, 131, 133, 134, 136, 137, 139,
+ 141, 142, 144, 145, 147, 148, 150, 151,
+ 153, 155, 156, 158, 160, 161, 163, 165,
+ 166, 168, 170, 171, 173, 175, 176, 178,
+ 180, 182, 183, 185, 187, 189, 191, 192,
+ 194, 196, 198, 200, 202, 203, 205, 207,
+ 209, 211, 213, 215, 217, 219, 221, 223,
+ 225, 226, 228, 230, 232, 234, 236, 238,
+ 241, 243, 245, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_22[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 25, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 33, 33, 34, 35,
+ 35, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 49, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 73, 74, 75, 76, 77, 78, 79, 81,
+ 82, 83, 84, 85, 87, 88, 89, 90,
+ 91, 93, 94, 95, 97, 98, 99, 100,
+ 102, 103, 105, 106, 107, 109, 110, 111,
+ 113, 114, 116, 117, 119, 120, 121, 123,
+ 124, 126, 127, 129, 130, 132, 133, 135,
+ 137, 138, 140, 141, 143, 145, 146, 148,
+ 149, 151, 153, 154, 156, 158, 159, 161,
+ 163, 165, 166, 168, 170, 172, 173, 175,
+ 177, 179, 181, 182, 184, 186, 188, 190,
+ 192, 194, 196, 197, 199, 201, 203, 205,
+ 207, 209, 211, 213, 215, 217, 219, 221,
+ 223, 225, 227, 229, 231, 234, 236, 238,
+ 240, 242, 244, 246, 248, 251, 253, 255,
+};
+
+static const u16 xgamma8_23[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 10, 10, 10,
+ 11, 11, 11, 12, 12, 13, 13, 13,
+ 14, 14, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 19, 20, 20, 21, 21,
+ 22, 23, 23, 24, 24, 25, 26, 26,
+ 27, 28, 28, 29, 30, 30, 31, 32,
+ 32, 33, 34, 35, 35, 36, 37, 38,
+ 38, 39, 40, 41, 42, 42, 43, 44,
+ 45, 46, 47, 48, 49, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+ 78, 79, 80, 81, 82, 84, 85, 86,
+ 87, 89, 90, 91, 92, 94, 95, 96,
+ 98, 99, 100, 102, 103, 104, 106, 107,
+ 109, 110, 112, 113, 114, 116, 117, 119,
+ 120, 122, 123, 125, 126, 128, 130, 131,
+ 133, 134, 136, 138, 139, 141, 143, 144,
+ 146, 148, 149, 151, 153, 154, 156, 158,
+ 160, 161, 163, 165, 167, 169, 170, 172,
+ 174, 176, 178, 180, 182, 183, 185, 187,
+ 189, 191, 193, 195, 197, 199, 201, 203,
+ 205, 207, 209, 211, 213, 215, 218, 220,
+ 222, 224, 226, 228, 230, 233, 235, 237,
+ 239, 241, 244, 246, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_24[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 14, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 24, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 32, 33, 34, 35,
+ 35, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 53, 54, 55,
+ 56, 57, 58, 59, 60, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 73,
+ 74, 75, 76, 77, 78, 80, 81, 82,
+ 83, 85, 86, 87, 88, 90, 91, 92,
+ 94, 95, 96, 98, 99, 100, 102, 103,
+ 105, 106, 108, 109, 111, 112, 114, 115,
+ 117, 118, 120, 121, 123, 124, 126, 127,
+ 129, 131, 132, 134, 136, 137, 139, 141,
+ 142, 144, 146, 148, 149, 151, 153, 155,
+ 156, 158, 160, 162, 164, 166, 167, 169,
+ 171, 173, 175, 177, 179, 181, 183, 185,
+ 187, 189, 191, 193, 195, 197, 199, 201,
+ 203, 205, 207, 210, 212, 214, 216, 218,
+ 220, 223, 225, 227, 229, 232, 234, 236,
+ 239, 241, 243, 246, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_25[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 7, 7, 7, 7, 8,
+ 8, 8, 9, 9, 9, 10, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 14,
+ 14, 15, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 19, 20, 20, 21, 22,
+ 22, 23, 23, 24, 25, 25, 26, 26,
+ 27, 28, 28, 29, 30, 30, 31, 32,
+ 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 67, 68, 69,
+ 70, 71, 72, 73, 75, 76, 77, 78,
+ 80, 81, 82, 83, 85, 86, 87, 89,
+ 90, 91, 93, 94, 95, 97, 98, 99,
+ 101, 102, 104, 105, 107, 108, 110, 111,
+ 113, 114, 116, 117, 119, 121, 122, 124,
+ 125, 127, 129, 130, 132, 134, 135, 137,
+ 139, 141, 142, 144, 146, 148, 150, 151,
+ 153, 155, 157, 159, 161, 163, 165, 166,
+ 168, 170, 172, 174, 176, 178, 180, 182,
+ 184, 186, 189, 191, 193, 195, 197, 199,
+ 201, 204, 206, 208, 210, 212, 215, 217,
+ 219, 221, 224, 226, 228, 231, 233, 235,
+ 238, 240, 243, 245, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_26[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 8, 8, 8, 9, 9, 9,
+ 10, 10, 10, 11, 11, 11, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 20, 21, 21, 22, 22, 23, 24, 24,
+ 25, 25, 26, 27, 27, 28, 29, 29,
+ 30, 31, 31, 32, 33, 34, 34, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 68, 69, 70, 71, 72, 73, 75,
+ 76, 77, 78, 80, 81, 82, 84, 85,
+ 86, 88, 89, 90, 92, 93, 94, 96,
+ 97, 99, 100, 102, 103, 105, 106, 108,
+ 109, 111, 112, 114, 115, 117, 119, 120,
+ 122, 124, 125, 127, 129, 130, 132, 134,
+ 136, 137, 139, 141, 143, 145, 146, 148,
+ 150, 152, 154, 156, 158, 160, 162, 164,
+ 166, 168, 170, 172, 174, 176, 178, 180,
+ 182, 184, 186, 188, 191, 193, 195, 197,
+ 199, 202, 204, 206, 209, 211, 213, 215,
+ 218, 220, 223, 225, 227, 230, 232, 235,
+ 237, 240, 242, 245, 247, 250, 252, 255,
+};
+
+static const u16 xgamma8_27[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 6, 6,
+ 6, 6, 7, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 20, 21, 21, 22,
+ 23, 23, 24, 24, 25, 26, 26, 27,
+ 28, 28, 29, 30, 30, 31, 32, 33,
+ 33, 34, 35, 36, 36, 37, 38, 39,
+ 40, 41, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 51, 52, 53,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 68, 69, 70, 71,
+ 72, 74, 75, 76, 77, 79, 80, 81,
+ 83, 84, 85, 87, 88, 89, 91, 92,
+ 94, 95, 97, 98, 100, 101, 103, 104,
+ 106, 107, 109, 110, 112, 114, 115, 117,
+ 119, 120, 122, 124, 125, 127, 129, 131,
+ 132, 134, 136, 138, 140, 141, 143, 145,
+ 147, 149, 151, 153, 155, 157, 159, 161,
+ 163, 165, 167, 169, 171, 173, 175, 178,
+ 180, 182, 184, 186, 188, 191, 193, 195,
+ 198, 200, 202, 205, 207, 209, 212, 214,
+ 216, 219, 221, 224, 226, 229, 231, 234,
+ 237, 239, 242, 244, 247, 250, 252, 255,
+};
+
+static const u16 xgamma8_28[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7,
+ 7, 8, 8, 8, 9, 9, 9, 10,
+ 10, 10, 11, 11, 11, 12, 12, 13,
+ 13, 13, 14, 14, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 20,
+ 21, 21, 22, 22, 23, 24, 24, 25,
+ 25, 26, 27, 27, 28, 29, 29, 30,
+ 31, 32, 32, 33, 34, 35, 35, 36,
+ 37, 38, 39, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 50,
+ 51, 52, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 66, 67, 68,
+ 69, 70, 72, 73, 74, 75, 77, 78,
+ 79, 81, 82, 83, 85, 86, 87, 89,
+ 90, 92, 93, 95, 96, 98, 99, 101,
+ 102, 104, 105, 107, 109, 110, 112, 114,
+ 115, 117, 119, 120, 122, 124, 126, 127,
+ 129, 131, 133, 135, 137, 138, 140, 142,
+ 144, 146, 148, 150, 152, 154, 156, 158,
+ 160, 162, 164, 167, 169, 171, 173, 175,
+ 177, 180, 182, 184, 186, 189, 191, 193,
+ 196, 198, 200, 203, 205, 208, 210, 213,
+ 215, 218, 220, 223, 225, 228, 231, 233,
+ 236, 239, 241, 244, 247, 249, 252, 255,
+};
+
+static const u16 xgamma8_29[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 9, 10, 10, 11, 11, 11,
+ 12, 12, 12, 13, 13, 14, 14, 15,
+ 15, 15, 16, 16, 17, 17, 18, 18,
+ 19, 19, 20, 21, 21, 22, 22, 23,
+ 23, 24, 25, 25, 26, 27, 27, 28,
+ 29, 29, 30, 31, 32, 32, 33, 34,
+ 35, 35, 36, 37, 38, 39, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 64, 65,
+ 66, 67, 68, 70, 71, 72, 73, 75,
+ 76, 77, 79, 80, 81, 83, 84, 86,
+ 87, 88, 90, 91, 93, 94, 96, 97,
+ 99, 101, 102, 104, 105, 107, 109, 110,
+ 112, 114, 115, 117, 119, 121, 122, 124,
+ 126, 128, 130, 132, 134, 135, 137, 139,
+ 141, 143, 145, 147, 149, 151, 153, 155,
+ 158, 160, 162, 164, 166, 168, 171, 173,
+ 175, 177, 180, 182, 184, 187, 189, 191,
+ 194, 196, 199, 201, 204, 206, 209, 211,
+ 214, 216, 219, 222, 224, 227, 230, 232,
+ 235, 238, 241, 244, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_30[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 7, 7, 7, 8,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 10, 11, 11, 12, 12, 12, 13, 13,
+ 14, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 19, 20, 20, 21,
+ 22, 22, 23, 23, 24, 25, 25, 26,
+ 27, 27, 28, 29, 29, 30, 31, 32,
+ 32, 33, 34, 35, 35, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 60, 61, 62,
+ 63, 64, 65, 67, 68, 69, 70, 72,
+ 73, 74, 76, 77, 78, 80, 81, 82,
+ 84, 85, 87, 88, 90, 91, 93, 94,
+ 96, 97, 99, 101, 102, 104, 105, 107,
+ 109, 111, 112, 114, 116, 118, 119, 121,
+ 123, 125, 127, 129, 131, 132, 134, 136,
+ 138, 140, 142, 144, 147, 149, 151, 153,
+ 155, 157, 159, 162, 164, 166, 168, 171,
+ 173, 175, 178, 180, 182, 185, 187, 190,
+ 192, 195, 197, 200, 202, 205, 207, 210,
+ 213, 215, 218, 221, 223, 226, 229, 232,
+ 235, 237, 240, 243, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_31[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 7,
+ 7, 7, 8, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 12, 12,
+ 12, 13, 13, 14, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 25, 25, 26, 27, 27, 28, 29, 29,
+ 30, 31, 32, 32, 33, 34, 35, 36,
+ 36, 37, 38, 39, 40, 41, 42, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 56, 57, 58, 59,
+ 60, 61, 62, 64, 65, 66, 67, 69,
+ 70, 71, 73, 74, 75, 77, 78, 79,
+ 81, 82, 84, 85, 87, 88, 90, 91,
+ 93, 94, 96, 97, 99, 101, 102, 104,
+ 106, 108, 109, 111, 113, 115, 116, 118,
+ 120, 122, 124, 126, 128, 130, 132, 134,
+ 136, 138, 140, 142, 144, 146, 148, 150,
+ 152, 155, 157, 159, 161, 164, 166, 168,
+ 171, 173, 175, 178, 180, 183, 185, 188,
+ 190, 193, 195, 198, 201, 203, 206, 209,
+ 211, 214, 217, 220, 222, 225, 228, 231,
+ 234, 237, 240, 243, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_32[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 7, 7, 7, 8, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 27,
+ 28, 29, 30, 30, 31, 32, 33, 33,
+ 34, 35, 36, 37, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 59, 60, 61, 62, 63, 65, 66,
+ 67, 68, 70, 71, 72, 74, 75, 76,
+ 78, 79, 81, 82, 84, 85, 87, 88,
+ 90, 91, 93, 95, 96, 98, 99, 101,
+ 103, 105, 106, 108, 110, 112, 113, 115,
+ 117, 119, 121, 123, 125, 127, 129, 131,
+ 133, 135, 137, 139, 141, 143, 146, 148,
+ 150, 152, 154, 157, 159, 161, 164, 166,
+ 168, 171, 173, 176, 178, 181, 183, 186,
+ 188, 191, 194, 196, 199, 202, 204, 207,
+ 210, 213, 216, 219, 221, 224, 227, 230,
+ 233, 236, 239, 242, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_33[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5,
+ 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 10,
+ 10, 11, 11, 11, 12, 12, 12, 13,
+ 13, 14, 14, 15, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 21,
+ 21, 22, 22, 23, 24, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 31, 31,
+ 32, 33, 34, 34, 35, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 53, 54,
+ 55, 56, 57, 58, 59, 61, 62, 63,
+ 64, 66, 67, 68, 70, 71, 72, 74,
+ 75, 76, 78, 79, 81, 82, 84, 85,
+ 87, 88, 90, 92, 93, 95, 97, 98,
+ 100, 102, 103, 105, 107, 109, 111, 113,
+ 114, 116, 118, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 139, 141, 143, 145,
+ 147, 150, 152, 154, 157, 159, 161, 164,
+ 166, 169, 171, 174, 176, 179, 181, 184,
+ 187, 189, 192, 195, 198, 200, 203, 206,
+ 209, 212, 215, 217, 220, 223, 226, 230,
+ 233, 236, 239, 242, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_34[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 12, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 21, 22, 23, 23, 24,
+ 24, 25, 26, 26, 27, 28, 29, 29,
+ 30, 31, 32, 32, 33, 34, 35, 36,
+ 37, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 55, 56, 57, 58, 59, 60,
+ 62, 63, 64, 66, 67, 68, 70, 71,
+ 72, 74, 75, 77, 78, 80, 81, 83,
+ 84, 86, 87, 89, 90, 92, 94, 95,
+ 97, 99, 101, 102, 104, 106, 108, 110,
+ 112, 114, 115, 117, 119, 121, 123, 125,
+ 128, 130, 132, 134, 136, 138, 141, 143,
+ 145, 147, 150, 152, 154, 157, 159, 162,
+ 164, 167, 169, 172, 174, 177, 180, 182,
+ 185, 188, 190, 193, 196, 199, 202, 205,
+ 208, 210, 213, 216, 219, 223, 226, 229,
+ 232, 235, 238, 242, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_35[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 6, 6,
+ 6, 6, 7, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 11, 12, 12, 13, 13, 13, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 20, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 28,
+ 28, 29, 30, 30, 31, 32, 33, 34,
+ 35, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 56, 57, 58,
+ 59, 60, 62, 63, 64, 66, 67, 68,
+ 70, 71, 72, 74, 75, 77, 78, 80,
+ 81, 83, 85, 86, 88, 89, 91, 93,
+ 94, 96, 98, 100, 102, 103, 105, 107,
+ 109, 111, 113, 115, 117, 119, 121, 123,
+ 125, 127, 129, 131, 134, 136, 138, 140,
+ 143, 145, 147, 150, 152, 155, 157, 159,
+ 162, 165, 167, 170, 172, 175, 178, 180,
+ 183, 186, 189, 192, 194, 197, 200, 203,
+ 206, 209, 212, 215, 219, 222, 225, 228,
+ 231, 235, 238, 241, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_36[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 8, 9, 9, 9, 10,
+ 10, 10, 11, 11, 12, 12, 12, 13,
+ 13, 14, 14, 15, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 20, 20, 21,
+ 21, 22, 23, 23, 24, 24, 25, 26,
+ 27, 27, 28, 29, 29, 30, 31, 32,
+ 33, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 42, 43, 44, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 56,
+ 57, 58, 59, 61, 62, 63, 64, 66,
+ 67, 69, 70, 71, 73, 74, 76, 77,
+ 79, 80, 82, 83, 85, 87, 88, 90,
+ 92, 94, 95, 97, 99, 101, 103, 104,
+ 106, 108, 110, 112, 114, 116, 118, 120,
+ 122, 125, 127, 129, 131, 133, 136, 138,
+ 140, 143, 145, 147, 150, 152, 155, 157,
+ 160, 162, 165, 168, 170, 173, 176, 179,
+ 181, 184, 187, 190, 193, 196, 199, 202,
+ 205, 208, 211, 214, 218, 221, 224, 227,
+ 231, 234, 237, 241, 244, 248, 251, 255,
+};
+
+static const u16 xgamma8_37[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 24, 24,
+ 25, 26, 26, 27, 28, 28, 29, 30,
+ 31, 32, 32, 33, 34, 35, 36, 37,
+ 38, 39, 39, 40, 41, 42, 43, 44,
+ 45, 47, 48, 49, 50, 51, 52, 53,
+ 54, 56, 57, 58, 59, 61, 62, 63,
+ 65, 66, 67, 69, 70, 72, 73, 75,
+ 76, 78, 79, 81, 83, 84, 86, 88,
+ 89, 91, 93, 95, 96, 98, 100, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 120, 122, 124, 127, 129, 131, 133, 136,
+ 138, 140, 143, 145, 148, 150, 153, 155,
+ 158, 160, 163, 166, 169, 171, 174, 177,
+ 180, 183, 186, 188, 191, 194, 198, 201,
+ 204, 207, 210, 213, 217, 220, 223, 227,
+ 230, 233, 237, 241, 244, 248, 251, 255,
+};
+
+static const u16 xgamma8_38[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 7, 7, 7, 8, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 15, 15, 15, 16, 16, 17, 18, 18,
+ 19, 19, 20, 20, 21, 21, 22, 23,
+ 23, 24, 25, 25, 26, 27, 28, 28,
+ 29, 30, 31, 31, 32, 33, 34, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 43, 44, 45, 47, 48, 49, 50, 51,
+ 52, 53, 55, 56, 57, 58, 60, 61,
+ 62, 64, 65, 66, 68, 69, 71, 72,
+ 74, 75, 77, 78, 80, 82, 83, 85,
+ 87, 88, 90, 92, 94, 96, 98, 99,
+ 101, 103, 105, 107, 109, 111, 113, 115,
+ 118, 120, 122, 124, 126, 129, 131, 133,
+ 136, 138, 141, 143, 146, 148, 151, 153,
+ 156, 158, 161, 164, 167, 169, 172, 175,
+ 178, 181, 184, 187, 190, 193, 196, 199,
+ 203, 206, 209, 212, 216, 219, 222, 226,
+ 229, 233, 237, 240, 244, 247, 251, 255,
+};
+
+static const u16 xgamma8_39[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 10, 11, 11, 11, 12, 12, 13, 13,
+ 13, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 20, 20, 21, 21,
+ 22, 23, 23, 24, 25, 25, 26, 27,
+ 27, 28, 29, 30, 31, 31, 32, 33,
+ 34, 35, 36, 37, 38, 38, 39, 40,
+ 41, 42, 43, 45, 46, 47, 48, 49,
+ 50, 51, 52, 54, 55, 56, 57, 59,
+ 60, 61, 63, 64, 66, 67, 68, 70,
+ 71, 73, 74, 76, 78, 79, 81, 83,
+ 84, 86, 88, 90, 91, 93, 95, 97,
+ 99, 101, 103, 105, 107, 109, 111, 113,
+ 115, 117, 120, 122, 124, 126, 129, 131,
+ 133, 136, 138, 141, 143, 146, 149, 151,
+ 154, 157, 159, 162, 165, 168, 171, 173,
+ 176, 179, 182, 185, 189, 192, 195, 198,
+ 201, 205, 208, 211, 215, 218, 222, 225,
+ 229, 232, 236, 240, 243, 247, 251, 255,
+};
+
+static const u16 xgamma8_40[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 7, 7,
+ 7, 7, 8, 8, 8, 9, 9, 9,
+ 9, 10, 10, 11, 11, 11, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 21, 21, 22, 23, 23, 24, 25, 25,
+ 26, 27, 27, 28, 29, 30, 31, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 52, 53, 54, 55, 57,
+ 58, 59, 61, 62, 63, 65, 66, 68,
+ 69, 71, 72, 74, 75, 77, 79, 80,
+ 82, 84, 85, 87, 89, 91, 93, 95,
+ 96, 98, 100, 102, 104, 107, 109, 111,
+ 113, 115, 117, 120, 122, 124, 126, 129,
+ 131, 134, 136, 139, 141, 144, 146, 149,
+ 152, 155, 157, 160, 163, 166, 169, 172,
+ 175, 178, 181, 184, 187, 190, 194, 197,
+ 200, 203, 207, 210, 214, 217, 221, 224,
+ 228, 232, 236, 239, 243, 247, 251, 255,
+};
+
+static const u16 *xgamma8_curves[GAMMA_CURVE_LENGTH] = {
+ &xgamma8_01[0],
+ &xgamma8_02[0],
+ &xgamma8_03[0],
+ &xgamma8_04[0],
+ &xgamma8_05[0],
+ &xgamma8_06[0],
+ &xgamma8_07[0],
+ &xgamma8_08[0],
+ &xgamma8_09[0],
+ &xgamma8_10[0],
+ &xgamma8_11[0],
+ &xgamma8_12[0],
+ &xgamma8_13[0],
+ &xgamma8_14[0],
+ &xgamma8_15[0],
+ &xgamma8_16[0],
+ &xgamma8_17[0],
+ &xgamma8_18[0],
+ &xgamma8_19[0],
+ &xgamma8_20[0],
+ &xgamma8_21[0],
+ &xgamma8_22[0],
+ &xgamma8_23[0],
+ &xgamma8_24[0],
+ &xgamma8_25[0],
+ &xgamma8_26[0],
+ &xgamma8_27[0],
+ &xgamma8_28[0],
+ &xgamma8_29[0],
+ &xgamma8_30[0],
+ &xgamma8_31[0],
+ &xgamma8_32[0],
+ &xgamma8_33[0],
+ &xgamma8_34[0],
+ &xgamma8_35[0],
+ &xgamma8_36[0],
+ &xgamma8_37[0],
+ &xgamma8_38[0],
+ &xgamma8_39[0],
+ &xgamma8_40[0],
+};
+
+#define GAMMA_BPC_10 (10)
+#define GAMMA10_TABLE_LENGTH BIT(GAMMA_BPC_10)
+static const u16 xgamma10_01[GAMMA10_TABLE_LENGTH] = {
+ 0, 512, 548, 571, 588, 601, 612, 621, 630, 637, 644,
+ 650, 656, 661, 666, 671, 675, 679, 683, 687, 690, 694,
+ 697, 700, 703, 706, 709, 711, 714, 716, 719, 721, 723,
+ 726, 728, 730, 732, 734, 736, 738, 740, 742, 743, 745,
+ 747, 749, 750, 752, 753, 755, 756, 758, 759, 761, 762,
+ 764, 765, 766, 768, 769, 770, 772, 773, 774, 775, 777,
+ 778, 779, 780, 781, 782, 783, 785, 786, 787, 788, 789,
+ 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800,
+ 800, 801, 802, 803, 804, 805, 806, 807, 807, 808, 809,
+ 810, 811, 812, 812, 813, 814, 815, 815, 816, 817, 818,
+ 819, 819, 820, 821, 821, 822, 823, 824, 824, 825, 826,
+ 826, 827, 828, 828, 829, 830, 830, 831, 832, 832, 833,
+ 834, 834, 835, 835, 836, 837, 837, 838, 838, 839, 840,
+ 840, 841, 841, 842, 843, 843, 844, 844, 845, 845, 846,
+ 847, 847, 848, 848, 849, 849, 850, 850, 851, 851, 852,
+ 852, 853, 853, 854, 854, 855, 855, 856, 856, 857, 857,
+ 858, 858, 859, 859, 860, 860, 861, 861, 862, 862, 863,
+ 863, 864, 864, 864, 865, 865, 866, 866, 867, 867, 868,
+ 868, 869, 869, 869, 870, 870, 871, 871, 872, 872, 872,
+ 873, 873, 874, 874, 874, 875, 875, 876, 876, 876, 877,
+ 877, 878, 878, 878, 879, 879, 880, 880, 880, 881, 881,
+ 882, 882, 882, 883, 883, 883, 884, 884, 885, 885, 885,
+ 886, 886, 886, 887, 887, 887, 888, 888, 889, 889, 889,
+ 890, 890, 890, 891, 891, 891, 892, 892, 892, 893, 893,
+ 893, 894, 894, 894, 895, 895, 895, 896, 896, 896, 897,
+ 897, 897, 898, 898, 898, 899, 899, 899, 900, 900, 900,
+ 901, 901, 901, 902, 902, 902, 902, 903, 903, 903, 904,
+ 904, 904, 905, 905, 905, 906, 906, 906, 906, 907, 907,
+ 907, 908, 908, 908, 908, 909, 909, 909, 910, 910, 910,
+ 910, 911, 911, 911, 912, 912, 912, 912, 913, 913, 913,
+ 914, 914, 914, 914, 915, 915, 915, 915, 916, 916, 916,
+ 917, 917, 917, 917, 918, 918, 918, 918, 919, 919, 919,
+ 919, 920, 920, 920, 921, 921, 921, 921, 922, 922, 922,
+ 922, 923, 923, 923, 923, 924, 924, 924, 924, 925, 925,
+ 925, 925, 926, 926, 926, 926, 927, 927, 927, 927, 928,
+ 928, 928, 928, 928, 929, 929, 929, 929, 930, 930, 930,
+ 930, 931, 931, 931, 931, 932, 932, 932, 932, 932, 933,
+ 933, 933, 933, 934, 934, 934, 934, 935, 935, 935, 935,
+ 935, 936, 936, 936, 936, 937, 937, 937, 937, 937, 938,
+ 938, 938, 938, 939, 939, 939, 939, 939, 940, 940, 940,
+ 940, 940, 941, 941, 941, 941, 942, 942, 942, 942, 942,
+ 943, 943, 943, 943, 943, 944, 944, 944, 944, 944, 945,
+ 945, 945, 945, 945, 946, 946, 946, 946, 946, 947, 947,
+ 947, 947, 947, 948, 948, 948, 948, 948, 949, 949, 949,
+ 949, 949, 950, 950, 950, 950, 950, 951, 951, 951, 951,
+ 951, 952, 952, 952, 952, 952, 953, 953, 953, 953, 953,
+ 953, 954, 954, 954, 954, 954, 955, 955, 955, 955, 955,
+ 956, 956, 956, 956, 956, 956, 957, 957, 957, 957, 957,
+ 958, 958, 958, 958, 958, 958, 959, 959, 959, 959, 959,
+ 960, 960, 960, 960, 960, 960, 961, 961, 961, 961, 961,
+ 961, 962, 962, 962, 962, 962, 962, 963, 963, 963, 963,
+ 963, 964, 964, 964, 964, 964, 964, 965, 965, 965, 965,
+ 965, 965, 966, 966, 966, 966, 966, 966, 967, 967, 967,
+ 967, 967, 967, 968, 968, 968, 968, 968, 968, 969, 969,
+ 969, 969, 969, 969, 970, 970, 970, 970, 970, 970, 970,
+ 971, 971, 971, 971, 971, 971, 972, 972, 972, 972, 972,
+ 972, 973, 973, 973, 973, 973, 973, 974, 974, 974, 974,
+ 974, 974, 974, 975, 975, 975, 975, 975, 975, 976, 976,
+ 976, 976, 976, 976, 976, 977, 977, 977, 977, 977, 977,
+ 977, 978, 978, 978, 978, 978, 978, 979, 979, 979, 979,
+ 979, 979, 979, 980, 980, 980, 980, 980, 980, 980, 981,
+ 981, 981, 981, 981, 981, 981, 982, 982, 982, 982, 982,
+ 982, 982, 983, 983, 983, 983, 983, 983, 983, 984, 984,
+ 984, 984, 984, 984, 984, 985, 985, 985, 985, 985, 985,
+ 985, 986, 986, 986, 986, 986, 986, 986, 987, 987, 987,
+ 987, 987, 987, 987, 988, 988, 988, 988, 988, 988, 988,
+ 989, 989, 989, 989, 989, 989, 989, 989, 990, 990, 990,
+ 990, 990, 990, 990, 991, 991, 991, 991, 991, 991, 991,
+ 991, 992, 992, 992, 992, 992, 992, 992, 993, 993, 993,
+ 993, 993, 993, 993, 993, 994, 994, 994, 994, 994, 994,
+ 994, 994, 995, 995, 995, 995, 995, 995, 995, 996, 996,
+ 996, 996, 996, 996, 996, 996, 997, 997, 997, 997, 997,
+ 997, 997, 997, 998, 998, 998, 998, 998, 998, 998, 998,
+ 999, 999, 999, 999, 999, 999, 999, 999, 1000, 1000, 1000,
+ 1000, 1000, 1000, 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1001,
+ 1001, 1001, 1001, 1002, 1002, 1002, 1002, 1002, 1002, 1002, 1002,
+ 1003, 1003, 1003, 1003, 1003, 1003, 1003, 1003, 1004, 1004, 1004,
+ 1004, 1004, 1004, 1004, 1004, 1004, 1005, 1005, 1005, 1005, 1005,
+ 1005, 1005, 1005, 1006, 1006, 1006, 1006, 1006, 1006, 1006, 1006,
+ 1006, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1008,
+ 1008, 1008, 1008, 1008, 1008, 1008, 1008, 1009, 1009, 1009, 1009,
+ 1009, 1009, 1009, 1009, 1009, 1010, 1010, 1010, 1010, 1010, 1010,
+ 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1011, 1011, 1011, 1011,
+ 1011, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1013,
+ 1013, 1013, 1013, 1013, 1013, 1013, 1013, 1013, 1014, 1014, 1014,
+ 1014, 1014, 1014, 1014, 1014, 1014, 1014, 1015, 1015, 1015, 1015,
+ 1015, 1015, 1015, 1015, 1015, 1016, 1016, 1016, 1016, 1016, 1016,
+ 1016, 1016, 1016, 1017, 1017, 1017, 1017, 1017, 1017, 1017, 1017,
+ 1017, 1017, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018,
+ 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1020,
+ 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1021, 1021,
+ 1021, 1021, 1021, 1021, 1021, 1021, 1021, 1021, 1022, 1022, 1022,
+ 1022, 1022, 1022, 1022, 1022, 1022, 1022, 1023, 1023, 1023, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_02[GAMMA10_TABLE_LENGTH] = {
+ 0, 256, 294, 319, 338, 353, 366, 378, 388, 397, 405,
+ 413, 420, 427, 434, 440, 445, 451, 456, 461, 466, 470,
+ 475, 479, 483, 487, 491, 495, 498, 502, 505, 508, 512,
+ 515, 518, 521, 524, 527, 529, 532, 535, 538, 540, 543,
+ 545, 548, 550, 552, 555, 557, 559, 562, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 589,
+ 591, 593, 595, 597, 598, 600, 602, 603, 605, 607, 608,
+ 610, 611, 613, 614, 616, 618, 619, 621, 622, 623, 625,
+ 626, 628, 629, 631, 632, 633, 635, 636, 637, 639, 640,
+ 641, 643, 644, 645, 646, 648, 649, 650, 651, 653, 654,
+ 655, 656, 657, 658, 660, 661, 662, 663, 664, 665, 666,
+ 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678,
+ 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
+ 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
+ 700, 701, 702, 703, 704, 705, 706, 707, 708, 708, 709,
+ 710, 711, 712, 713, 714, 714, 715, 716, 717, 718, 719,
+ 719, 720, 721, 722, 723, 723, 724, 725, 726, 727, 727,
+ 728, 729, 730, 731, 731, 732, 733, 734, 734, 735, 736,
+ 737, 737, 738, 739, 740, 740, 741, 742, 742, 743, 744,
+ 745, 745, 746, 747, 747, 748, 749, 750, 750, 751, 752,
+ 752, 753, 754, 754, 755, 756, 756, 757, 758, 758, 759,
+ 760, 760, 761, 762, 762, 763, 764, 764, 765, 765, 766,
+ 767, 767, 768, 769, 769, 770, 771, 771, 772, 772, 773,
+ 774, 774, 775, 775, 776, 777, 777, 778, 778, 779, 780,
+ 780, 781, 781, 782, 783, 783, 784, 784, 785, 785, 786,
+ 787, 787, 788, 788, 789, 789, 790, 791, 791, 792, 792,
+ 793, 793, 794, 794, 795, 796, 796, 797, 797, 798, 798,
+ 799, 799, 800, 800, 801, 801, 802, 803, 803, 804, 804,
+ 805, 805, 806, 806, 807, 807, 808, 808, 809, 809, 810,
+ 810, 811, 811, 812, 812, 813, 813, 814, 814, 815, 815,
+ 816, 816, 817, 817, 818, 818, 819, 819, 820, 820, 821,
+ 821, 822, 822, 823, 823, 824, 824, 825, 825, 825, 826,
+ 826, 827, 827, 828, 828, 829, 829, 830, 830, 831, 831,
+ 832, 832, 832, 833, 833, 834, 834, 835, 835, 836, 836,
+ 837, 837, 837, 838, 838, 839, 839, 840, 840, 841, 841,
+ 841, 842, 842, 843, 843, 844, 844, 844, 845, 845, 846,
+ 846, 847, 847, 847, 848, 848, 849, 849, 850, 850, 850,
+ 851, 851, 852, 852, 852, 853, 853, 854, 854, 855, 855,
+ 855, 856, 856, 857, 857, 857, 858, 858, 859, 859, 859,
+ 860, 860, 861, 861, 861, 862, 862, 863, 863, 863, 864,
+ 864, 865, 865, 865, 866, 866, 866, 867, 867, 868, 868,
+ 868, 869, 869, 870, 870, 870, 871, 871, 871, 872, 872,
+ 873, 873, 873, 874, 874, 875, 875, 875, 876, 876, 876,
+ 877, 877, 877, 878, 878, 879, 879, 879, 880, 880, 880,
+ 881, 881, 882, 882, 882, 883, 883, 883, 884, 884, 884,
+ 885, 885, 885, 886, 886, 887, 887, 887, 888, 888, 888,
+ 889, 889, 889, 890, 890, 890, 891, 891, 891, 892, 892,
+ 892, 893, 893, 894, 894, 894, 895, 895, 895, 896, 896,
+ 896, 897, 897, 897, 898, 898, 898, 899, 899, 899, 900,
+ 900, 900, 901, 901, 901, 902, 902, 902, 903, 903, 903,
+ 904, 904, 904, 905, 905, 905, 906, 906, 906, 907, 907,
+ 907, 908, 908, 908, 908, 909, 909, 909, 910, 910, 910,
+ 911, 911, 911, 912, 912, 912, 913, 913, 913, 914, 914,
+ 914, 914, 915, 915, 915, 916, 916, 916, 917, 917, 917,
+ 918, 918, 918, 919, 919, 919, 919, 920, 920, 920, 921,
+ 921, 921, 922, 922, 922, 923, 923, 923, 923, 924, 924,
+ 924, 925, 925, 925, 926, 926, 926, 926, 927, 927, 927,
+ 928, 928, 928, 928, 929, 929, 929, 930, 930, 930, 931,
+ 931, 931, 931, 932, 932, 932, 933, 933, 933, 933, 934,
+ 934, 934, 935, 935, 935, 935, 936, 936, 936, 937, 937,
+ 937, 937, 938, 938, 938, 939, 939, 939, 939, 940, 940,
+ 940, 941, 941, 941, 941, 942, 942, 942, 942, 943, 943,
+ 943, 944, 944, 944, 944, 945, 945, 945, 946, 946, 946,
+ 946, 947, 947, 947, 947, 948, 948, 948, 949, 949, 949,
+ 949, 950, 950, 950, 950, 951, 951, 951, 951, 952, 952,
+ 952, 953, 953, 953, 953, 954, 954, 954, 954, 955, 955,
+ 955, 955, 956, 956, 956, 956, 957, 957, 957, 958, 958,
+ 958, 958, 959, 959, 959, 959, 960, 960, 960, 960, 961,
+ 961, 961, 961, 962, 962, 962, 962, 963, 963, 963, 963,
+ 964, 964, 964, 964, 965, 965, 965, 965, 966, 966, 966,
+ 966, 967, 967, 967, 967, 968, 968, 968, 968, 969, 969,
+ 969, 969, 970, 970, 970, 970, 971, 971, 971, 971, 972,
+ 972, 972, 972, 973, 973, 973, 973, 974, 974, 974, 974,
+ 975, 975, 975, 975, 976, 976, 976, 976, 977, 977, 977,
+ 977, 978, 978, 978, 978, 978, 979, 979, 979, 979, 980,
+ 980, 980, 980, 981, 981, 981, 981, 982, 982, 982, 982,
+ 983, 983, 983, 983, 983, 984, 984, 984, 984, 985, 985,
+ 985, 985, 986, 986, 986, 986, 986, 987, 987, 987, 987,
+ 988, 988, 988, 988, 989, 989, 989, 989, 989, 990, 990,
+ 990, 990, 991, 991, 991, 991, 992, 992, 992, 992, 992,
+ 993, 993, 993, 993, 994, 994, 994, 994, 994, 995, 995,
+ 995, 995, 996, 996, 996, 996, 996, 997, 997, 997, 997,
+ 998, 998, 998, 998, 998, 999, 999, 999, 999, 1000, 1000,
+ 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1002, 1002, 1002, 1002,
+ 1002, 1003, 1003, 1003, 1003, 1003, 1004, 1004, 1004, 1004, 1005,
+ 1005, 1005, 1005, 1005, 1006, 1006, 1006, 1006, 1006, 1007, 1007,
+ 1007, 1007, 1008, 1008, 1008, 1008, 1008, 1009, 1009, 1009, 1009,
+ 1009, 1010, 1010, 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1012,
+ 1012, 1012, 1012, 1012, 1013, 1013, 1013, 1013, 1013, 1014, 1014,
+ 1014, 1014, 1014, 1015, 1015, 1015, 1015, 1015, 1016, 1016, 1016,
+ 1016, 1017, 1017, 1017, 1017, 1017, 1018, 1018, 1018, 1018, 1018,
+ 1019, 1019, 1019, 1019, 1019, 1020, 1020, 1020, 1020, 1020, 1021,
+ 1021, 1021, 1021, 1021, 1022, 1022, 1022, 1022, 1022, 1023, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_03[GAMMA10_TABLE_LENGTH] = {
+ 0, 128, 157, 178, 194, 207, 219, 229, 239, 247, 255,
+ 263, 270, 276, 282, 288, 294, 299, 304, 309, 314, 319,
+ 323, 328, 332, 336, 340, 344, 348, 351, 355, 358, 362,
+ 365, 368, 372, 375, 378, 381, 384, 387, 390, 393, 395,
+ 398, 401, 403, 406, 409, 411, 414, 416, 419, 421, 423,
+ 426, 428, 430, 432, 435, 437, 439, 441, 443, 445, 447,
+ 450, 452, 454, 456, 458, 460, 461, 463, 465, 467, 469,
+ 471, 473, 474, 476, 478, 480, 482, 483, 485, 487, 488,
+ 490, 492, 493, 495, 497, 498, 500, 501, 503, 505, 506,
+ 508, 509, 511, 512, 514, 515, 517, 518, 520, 521, 523,
+ 524, 525, 527, 528, 530, 531, 532, 534, 535, 537, 538,
+ 539, 541, 542, 543, 544, 546, 547, 548, 550, 551, 552,
+ 553, 555, 556, 557, 558, 560, 561, 562, 563, 565, 566,
+ 567, 568, 569, 570, 572, 573, 574, 575, 576, 577, 579,
+ 580, 581, 582, 583, 584, 585, 586, 587, 589, 590, 591,
+ 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602,
+ 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613,
+ 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624,
+ 625, 626, 627, 628, 629, 630, 631, 632, 633, 633, 634,
+ 635, 636, 637, 638, 639, 640, 641, 642, 642, 643, 644,
+ 645, 646, 647, 648, 649, 649, 650, 651, 652, 653, 654,
+ 655, 655, 656, 657, 658, 659, 660, 661, 661, 662, 663,
+ 664, 665, 665, 666, 667, 668, 669, 670, 670, 671, 672,
+ 673, 674, 674, 675, 676, 677, 677, 678, 679, 680, 681,
+ 681, 682, 683, 684, 684, 685, 686, 687, 688, 688, 689,
+ 690, 691, 691, 692, 693, 694, 694, 695, 696, 696, 697,
+ 698, 699, 699, 700, 701, 702, 702, 703, 704, 704, 705,
+ 706, 707, 707, 708, 709, 709, 710, 711, 712, 712, 713,
+ 714, 714, 715, 716, 716, 717, 718, 718, 719, 720, 721,
+ 721, 722, 723, 723, 724, 725, 725, 726, 727, 727, 728,
+ 729, 729, 730, 731, 731, 732, 733, 733, 734, 734, 735,
+ 736, 736, 737, 738, 738, 739, 740, 740, 741, 742, 742,
+ 743, 743, 744, 745, 745, 746, 747, 747, 748, 748, 749,
+ 750, 750, 751, 752, 752, 753, 753, 754, 755, 755, 756,
+ 756, 757, 758, 758, 759, 759, 760, 761, 761, 762, 762,
+ 763, 764, 764, 765, 765, 766, 767, 767, 768, 768, 769,
+ 770, 770, 771, 771, 772, 772, 773, 774, 774, 775, 775,
+ 776, 776, 777, 778, 778, 779, 779, 780, 780, 781, 782,
+ 782, 783, 783, 784, 784, 785, 785, 786, 787, 787, 788,
+ 788, 789, 789, 790, 790, 791, 792, 792, 793, 793, 794,
+ 794, 795, 795, 796, 796, 797, 797, 798, 799, 799, 800,
+ 800, 801, 801, 802, 802, 803, 803, 804, 804, 805, 805,
+ 806, 806, 807, 808, 808, 809, 809, 810, 810, 811, 811,
+ 812, 812, 813, 813, 814, 814, 815, 815, 816, 816, 817,
+ 817, 818, 818, 819, 819, 820, 820, 821, 821, 822, 822,
+ 823, 823, 824, 824, 825, 825, 826, 826, 827, 827, 828,
+ 828, 829, 829, 830, 830, 831, 831, 832, 832, 833, 833,
+ 834, 834, 835, 835, 836, 836, 836, 837, 837, 838, 838,
+ 839, 839, 840, 840, 841, 841, 842, 842, 843, 843, 844,
+ 844, 845, 845, 845, 846, 846, 847, 847, 848, 848, 849,
+ 849, 850, 850, 851, 851, 852, 852, 852, 853, 853, 854,
+ 854, 855, 855, 856, 856, 857, 857, 857, 858, 858, 859,
+ 859, 860, 860, 861, 861, 862, 862, 862, 863, 863, 864,
+ 864, 865, 865, 866, 866, 866, 867, 867, 868, 868, 869,
+ 869, 869, 870, 870, 871, 871, 872, 872, 873, 873, 873,
+ 874, 874, 875, 875, 876, 876, 876, 877, 877, 878, 878,
+ 879, 879, 879, 880, 880, 881, 881, 882, 882, 882, 883,
+ 883, 884, 884, 885, 885, 885, 886, 886, 887, 887, 887,
+ 888, 888, 889, 889, 890, 890, 890, 891, 891, 892, 892,
+ 892, 893, 893, 894, 894, 895, 895, 895, 896, 896, 897,
+ 897, 897, 898, 898, 899, 899, 899, 900, 900, 901, 901,
+ 901, 902, 902, 903, 903, 903, 904, 904, 905, 905, 905,
+ 906, 906, 907, 907, 907, 908, 908, 909, 909, 909, 910,
+ 910, 911, 911, 911, 912, 912, 913, 913, 913, 914, 914,
+ 915, 915, 915, 916, 916, 916, 917, 917, 918, 918, 918,
+ 919, 919, 920, 920, 920, 921, 921, 921, 922, 922, 923,
+ 923, 923, 924, 924, 925, 925, 925, 926, 926, 926, 927,
+ 927, 928, 928, 928, 929, 929, 929, 930, 930, 931, 931,
+ 931, 932, 932, 932, 933, 933, 934, 934, 934, 935, 935,
+ 935, 936, 936, 936, 937, 937, 938, 938, 938, 939, 939,
+ 939, 940, 940, 941, 941, 941, 942, 942, 942, 943, 943,
+ 943, 944, 944, 945, 945, 945, 946, 946, 946, 947, 947,
+ 947, 948, 948, 948, 949, 949, 950, 950, 950, 951, 951,
+ 951, 952, 952, 952, 953, 953, 953, 954, 954, 955, 955,
+ 955, 956, 956, 956, 957, 957, 957, 958, 958, 958, 959,
+ 959, 959, 960, 960, 960, 961, 961, 962, 962, 962, 963,
+ 963, 963, 964, 964, 964, 965, 965, 965, 966, 966, 966,
+ 967, 967, 967, 968, 968, 968, 969, 969, 969, 970, 970,
+ 970, 971, 971, 971, 972, 972, 972, 973, 973, 973, 974,
+ 974, 974, 975, 975, 975, 976, 976, 976, 977, 977, 977,
+ 978, 978, 978, 979, 979, 979, 980, 980, 980, 981, 981,
+ 981, 982, 982, 982, 983, 983, 983, 984, 984, 984, 985,
+ 985, 985, 986, 986, 986, 987, 987, 987, 988, 988, 988,
+ 989, 989, 989, 990, 990, 990, 991, 991, 991, 992, 992,
+ 992, 993, 993, 993, 994, 994, 994, 994, 995, 995, 995,
+ 996, 996, 996, 997, 997, 997, 998, 998, 998, 999, 999,
+ 999, 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1002, 1002, 1002,
+ 1003, 1003, 1003, 1004, 1004, 1004, 1005, 1005, 1005, 1006, 1006,
+ 1006, 1006, 1007, 1007, 1007, 1008, 1008, 1008, 1009, 1009, 1009,
+ 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1012, 1012, 1012, 1013,
+ 1013, 1013, 1014, 1014, 1014, 1015, 1015, 1015, 1015, 1016, 1016,
+ 1016, 1017, 1017, 1017, 1018, 1018, 1018, 1018, 1019, 1019, 1019,
+ 1020, 1020, 1020, 1021, 1021, 1021, 1021, 1022, 1022, 1022, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_04[GAMMA10_TABLE_LENGTH] = {
+ 0, 64, 84, 99, 111, 122, 131, 139, 147, 154, 161,
+ 167, 173, 178, 184, 189, 194, 199, 203, 208, 212, 216,
+ 220, 224, 228, 232, 235, 239, 243, 246, 249, 253, 256,
+ 259, 262, 265, 268, 271, 274, 277, 280, 283, 285, 288,
+ 291, 293, 296, 298, 301, 303, 306, 308, 311, 313, 315,
+ 318, 320, 322, 325, 327, 329, 331, 333, 335, 338, 340,
+ 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362,
+ 364, 365, 367, 369, 371, 373, 375, 376, 378, 380, 382,
+ 383, 385, 387, 389, 390, 392, 394, 395, 397, 399, 400,
+ 402, 404, 405, 407, 408, 410, 412, 413, 415, 416, 418,
+ 419, 421, 422, 424, 425, 427, 428, 430, 431, 433, 434,
+ 436, 437, 438, 440, 441, 443, 444, 445, 447, 448, 450,
+ 451, 452, 454, 455, 456, 458, 459, 460, 462, 463, 464,
+ 466, 467, 468, 470, 471, 472, 473, 475, 476, 477, 478,
+ 480, 481, 482, 483, 485, 486, 487, 488, 489, 491, 492,
+ 493, 494, 495, 497, 498, 499, 500, 501, 503, 504, 505,
+ 506, 507, 508, 509, 511, 512, 513, 514, 515, 516, 517,
+ 518, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
+ 530, 531, 533, 534, 535, 536, 537, 538, 539, 540, 541,
+ 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552,
+ 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563,
+ 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 578, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 591, 591, 592, 593, 594,
+ 595, 596, 597, 598, 599, 600, 600, 601, 602, 603, 604,
+ 605, 606, 607, 607, 608, 609, 610, 611, 612, 613, 614,
+ 614, 615, 616, 617, 618, 619, 620, 620, 621, 622, 623,
+ 624, 625, 625, 626, 627, 628, 629, 630, 630, 631, 632,
+ 633, 634, 635, 635, 636, 637, 638, 639, 639, 640, 641,
+ 642, 643, 643, 644, 645, 646, 647, 647, 648, 649, 650,
+ 651, 651, 652, 653, 654, 655, 655, 656, 657, 658, 658,
+ 659, 660, 661, 662, 662, 663, 664, 665, 665, 666, 667,
+ 668, 668, 669, 670, 671, 671, 672, 673, 674, 674, 675,
+ 676, 677, 677, 678, 679, 680, 680, 681, 682, 683, 683,
+ 684, 685, 685, 686, 687, 688, 688, 689, 690, 691, 691,
+ 692, 693, 693, 694, 695, 696, 696, 697, 698, 698, 699,
+ 700, 701, 701, 702, 703, 703, 704, 705, 705, 706, 707,
+ 708, 708, 709, 710, 710, 711, 712, 712, 713, 714, 714,
+ 715, 716, 717, 717, 718, 719, 719, 720, 721, 721, 722,
+ 723, 723, 724, 725, 725, 726, 727, 727, 728, 729, 729,
+ 730, 731, 731, 732, 733, 733, 734, 735, 735, 736, 737,
+ 737, 738, 739, 739, 740, 740, 741, 742, 742, 743, 744,
+ 744, 745, 746, 746, 747, 748, 748, 749, 749, 750, 751,
+ 751, 752, 753, 753, 754, 755, 755, 756, 756, 757, 758,
+ 758, 759, 760, 760, 761, 761, 762, 763, 763, 764, 765,
+ 765, 766, 766, 767, 768, 768, 769, 769, 770, 771, 771,
+ 772, 773, 773, 774, 774, 775, 776, 776, 777, 777, 778,
+ 779, 779, 780, 780, 781, 782, 782, 783, 783, 784, 785,
+ 785, 786, 786, 787, 788, 788, 789, 789, 790, 791, 791,
+ 792, 792, 793, 793, 794, 795, 795, 796, 796, 797, 798,
+ 798, 799, 799, 800, 800, 801, 802, 802, 803, 803, 804,
+ 804, 805, 806, 806, 807, 807, 808, 808, 809, 810, 810,
+ 811, 811, 812, 812, 813, 814, 814, 815, 815, 816, 816,
+ 817, 818, 818, 819, 819, 820, 820, 821, 821, 822, 823,
+ 823, 824, 824, 825, 825, 826, 826, 827, 827, 828, 829,
+ 829, 830, 830, 831, 831, 832, 832, 833, 834, 834, 835,
+ 835, 836, 836, 837, 837, 838, 838, 839, 839, 840, 841,
+ 841, 842, 842, 843, 843, 844, 844, 845, 845, 846, 846,
+ 847, 847, 848, 849, 849, 850, 850, 851, 851, 852, 852,
+ 853, 853, 854, 854, 855, 855, 856, 856, 857, 857, 858,
+ 859, 859, 860, 860, 861, 861, 862, 862, 863, 863, 864,
+ 864, 865, 865, 866, 866, 867, 867, 868, 868, 869, 869,
+ 870, 870, 871, 871, 872, 872, 873, 873, 874, 874, 875,
+ 875, 876, 876, 877, 877, 878, 878, 879, 879, 880, 880,
+ 881, 881, 882, 882, 883, 883, 884, 884, 885, 885, 886,
+ 886, 887, 887, 888, 888, 889, 889, 890, 890, 891, 891,
+ 892, 892, 893, 893, 894, 894, 895, 895, 896, 896, 897,
+ 897, 898, 898, 899, 899, 900, 900, 901, 901, 902, 902,
+ 903, 903, 904, 904, 905, 905, 905, 906, 906, 907, 907,
+ 908, 908, 909, 909, 910, 910, 911, 911, 912, 912, 913,
+ 913, 914, 914, 915, 915, 915, 916, 916, 917, 917, 918,
+ 918, 919, 919, 920, 920, 921, 921, 922, 922, 923, 923,
+ 923, 924, 924, 925, 925, 926, 926, 927, 927, 928, 928,
+ 929, 929, 929, 930, 930, 931, 931, 932, 932, 933, 933,
+ 934, 934, 935, 935, 935, 936, 936, 937, 937, 938, 938,
+ 939, 939, 940, 940, 940, 941, 941, 942, 942, 943, 943,
+ 944, 944, 945, 945, 945, 946, 946, 947, 947, 948, 948,
+ 949, 949, 949, 950, 950, 951, 951, 952, 952, 953, 953,
+ 953, 954, 954, 955, 955, 956, 956, 957, 957, 957, 958,
+ 958, 959, 959, 960, 960, 961, 961, 961, 962, 962, 963,
+ 963, 964, 964, 965, 965, 965, 966, 966, 967, 967, 968,
+ 968, 968, 969, 969, 970, 970, 971, 971, 971, 972, 972,
+ 973, 973, 974, 974, 974, 975, 975, 976, 976, 977, 977,
+ 977, 978, 978, 979, 979, 980, 980, 980, 981, 981, 982,
+ 982, 983, 983, 983, 984, 984, 985, 985, 986, 986, 986,
+ 987, 987, 988, 988, 989, 989, 989, 990, 990, 991, 991,
+ 991, 992, 992, 993, 993, 994, 994, 994, 995, 995, 996,
+ 996, 996, 997, 997, 998, 998, 999, 999, 999, 1000, 1000,
+ 1001, 1001, 1001, 1002, 1002, 1003, 1003, 1004, 1004, 1004, 1005,
+ 1005, 1006, 1006, 1006, 1007, 1007, 1008, 1008, 1008, 1009, 1009,
+ 1010, 1010, 1010, 1011, 1011, 1012, 1012, 1013, 1013, 1013, 1014,
+ 1014, 1015, 1015, 1015, 1016, 1016, 1017, 1017, 1017, 1018, 1018,
+ 1019, 1019, 1019, 1020, 1020, 1021, 1021, 1021, 1022, 1022, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_05[GAMMA10_TABLE_LENGTH] = {
+ 0, 32, 45, 55, 64, 72, 78, 85, 90, 96, 101,
+ 106, 111, 115, 120, 124, 128, 132, 136, 139, 143, 147,
+ 150, 153, 157, 160, 163, 166, 169, 172, 175, 178, 181,
+ 184, 186, 189, 192, 195, 197, 200, 202, 205, 207, 210,
+ 212, 215, 217, 219, 222, 224, 226, 228, 231, 233, 235,
+ 237, 239, 241, 244, 246, 248, 250, 252, 254, 256, 258,
+ 260, 262, 264, 266, 268, 270, 271, 273, 275, 277, 279,
+ 281, 282, 284, 286, 288, 290, 291, 293, 295, 297, 298,
+ 300, 302, 303, 305, 307, 308, 310, 312, 313, 315, 317,
+ 318, 320, 321, 323, 325, 326, 328, 329, 331, 332, 334,
+ 335, 337, 338, 340, 341, 343, 344, 346, 347, 349, 350,
+ 352, 353, 355, 356, 358, 359, 360, 362, 363, 365, 366,
+ 367, 369, 370, 372, 373, 374, 376, 377, 378, 380, 381,
+ 382, 384, 385, 386, 388, 389, 390, 392, 393, 394, 396,
+ 397, 398, 399, 401, 402, 403, 405, 406, 407, 408, 410,
+ 411, 412, 413, 415, 416, 417, 418, 419, 421, 422, 423,
+ 424, 426, 427, 428, 429, 430, 431, 433, 434, 435, 436,
+ 437, 439, 440, 441, 442, 443, 444, 445, 447, 448, 449,
+ 450, 451, 452, 453, 455, 456, 457, 458, 459, 460, 461,
+ 462, 463, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 477, 478, 479, 480, 481, 482, 483, 484, 485,
+ 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 497,
+ 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508,
+ 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519,
+ 520, 521, 522, 523, 524, 525, 526, 527, 527, 528, 529,
+ 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540,
+ 541, 542, 543, 544, 545, 546, 547, 547, 548, 549, 550,
+ 551, 552, 553, 554, 555, 556, 557, 558, 559, 559, 560,
+ 561, 562, 563, 564, 565, 566, 567, 568, 569, 569, 570,
+ 571, 572, 573, 574, 575, 576, 577, 577, 578, 579, 580,
+ 581, 582, 583, 584, 585, 585, 586, 587, 588, 589, 590,
+ 591, 591, 592, 593, 594, 595, 596, 597, 598, 598, 599,
+ 600, 601, 602, 603, 603, 604, 605, 606, 607, 608, 609,
+ 609, 610, 611, 612, 613, 614, 614, 615, 616, 617, 618,
+ 619, 619, 620, 621, 622, 623, 623, 624, 625, 626, 627,
+ 628, 628, 629, 630, 631, 632, 632, 633, 634, 635, 636,
+ 636, 637, 638, 639, 640, 640, 641, 642, 643, 644, 644,
+ 645, 646, 647, 648, 648, 649, 650, 651, 652, 652, 653,
+ 654, 655, 655, 656, 657, 658, 659, 659, 660, 661, 662,
+ 662, 663, 664, 665, 666, 666, 667, 668, 669, 669, 670,
+ 671, 672, 672, 673, 674, 675, 675, 676, 677, 678, 678,
+ 679, 680, 681, 681, 682, 683, 684, 684, 685, 686, 687,
+ 687, 688, 689, 690, 690, 691, 692, 693, 693, 694, 695,
+ 696, 696, 697, 698, 699, 699, 700, 701, 701, 702, 703,
+ 704, 704, 705, 706, 707, 707, 708, 709, 709, 710, 711,
+ 712, 712, 713, 714, 714, 715, 716, 717, 717, 718, 719,
+ 719, 720, 721, 722, 722, 723, 724, 724, 725, 726, 727,
+ 727, 728, 729, 729, 730, 731, 731, 732, 733, 734, 734,
+ 735, 736, 736, 737, 738, 738, 739, 740, 740, 741, 742,
+ 743, 743, 744, 745, 745, 746, 747, 747, 748, 749, 749,
+ 750, 751, 751, 752, 753, 754, 754, 755, 756, 756, 757,
+ 758, 758, 759, 760, 760, 761, 762, 762, 763, 764, 764,
+ 765, 766, 766, 767, 768, 768, 769, 770, 770, 771, 772,
+ 772, 773, 774, 774, 775, 776, 776, 777, 778, 778, 779,
+ 780, 780, 781, 781, 782, 783, 783, 784, 785, 785, 786,
+ 787, 787, 788, 789, 789, 790, 791, 791, 792, 793, 793,
+ 794, 794, 795, 796, 796, 797, 798, 798, 799, 800, 800,
+ 801, 802, 802, 803, 803, 804, 805, 805, 806, 807, 807,
+ 808, 809, 809, 810, 810, 811, 812, 812, 813, 814, 814,
+ 815, 815, 816, 817, 817, 818, 819, 819, 820, 820, 821,
+ 822, 822, 823, 824, 824, 825, 825, 826, 827, 827, 828,
+ 829, 829, 830, 830, 831, 832, 832, 833, 833, 834, 835,
+ 835, 836, 836, 837, 838, 838, 839, 840, 840, 841, 841,
+ 842, 843, 843, 844, 844, 845, 846, 846, 847, 847, 848,
+ 849, 849, 850, 850, 851, 852, 852, 853, 853, 854, 855,
+ 855, 856, 856, 857, 858, 858, 859, 859, 860, 861, 861,
+ 862, 862, 863, 864, 864, 865, 865, 866, 867, 867, 868,
+ 868, 869, 869, 870, 871, 871, 872, 872, 873, 874, 874,
+ 875, 875, 876, 877, 877, 878, 878, 879, 879, 880, 881,
+ 881, 882, 882, 883, 883, 884, 885, 885, 886, 886, 887,
+ 888, 888, 889, 889, 890, 890, 891, 892, 892, 893, 893,
+ 894, 894, 895, 896, 896, 897, 897, 898, 898, 899, 900,
+ 900, 901, 901, 902, 902, 903, 904, 904, 905, 905, 906,
+ 906, 907, 907, 908, 909, 909, 910, 910, 911, 911, 912,
+ 913, 913, 914, 914, 915, 915, 916, 916, 917, 918, 918,
+ 919, 919, 920, 920, 921, 921, 922, 923, 923, 924, 924,
+ 925, 925, 926, 926, 927, 928, 928, 929, 929, 930, 930,
+ 931, 931, 932, 932, 933, 934, 934, 935, 935, 936, 936,
+ 937, 937, 938, 939, 939, 940, 940, 941, 941, 942, 942,
+ 943, 943, 944, 944, 945, 946, 946, 947, 947, 948, 948,
+ 949, 949, 950, 950, 951, 952, 952, 953, 953, 954, 954,
+ 955, 955, 956, 956, 957, 957, 958, 958, 959, 960, 960,
+ 961, 961, 962, 962, 963, 963, 964, 964, 965, 965, 966,
+ 966, 967, 967, 968, 969, 969, 970, 970, 971, 971, 972,
+ 972, 973, 973, 974, 974, 975, 975, 976, 976, 977, 977,
+ 978, 979, 979, 980, 980, 981, 981, 982, 982, 983, 983,
+ 984, 984, 985, 985, 986, 986, 987, 987, 988, 988, 989,
+ 989, 990, 990, 991, 992, 992, 993, 993, 994, 994, 995,
+ 995, 996, 996, 997, 997, 998, 998, 999, 999, 1000, 1000,
+ 1001, 1001, 1002, 1002, 1003, 1003, 1004, 1004, 1005, 1005, 1006,
+ 1006, 1007, 1007, 1008, 1008, 1009, 1009, 1010, 1010, 1011, 1011,
+ 1012, 1012, 1013, 1013, 1014, 1014, 1015, 1015, 1016, 1016, 1017,
+ 1017, 1018, 1018, 1019, 1019, 1020, 1020, 1021, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_06[GAMMA10_TABLE_LENGTH] = {
+ 0, 16, 24, 31, 37, 42, 47, 51, 56, 60, 64,
+ 67, 71, 75, 78, 81, 84, 88, 91, 94, 97, 99,
+ 102, 105, 108, 110, 113, 116, 118, 121, 123, 126, 128,
+ 130, 133, 135, 137, 140, 142, 144, 146, 148, 151, 153,
+ 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175,
+ 177, 179, 181, 183, 185, 187, 188, 190, 192, 194, 196,
+ 198, 199, 201, 203, 205, 206, 208, 210, 212, 213, 215,
+ 217, 218, 220, 222, 223, 225, 227, 228, 230, 232, 233,
+ 235, 236, 238, 240, 241, 243, 244, 246, 247, 249, 250,
+ 252, 253, 255, 257, 258, 260, 261, 263, 264, 265, 267,
+ 268, 270, 271, 273, 274, 276, 277, 279, 280, 281, 283,
+ 284, 286, 287, 288, 290, 291, 293, 294, 295, 297, 298,
+ 299, 301, 302, 303, 305, 306, 308, 309, 310, 312, 313,
+ 314, 315, 317, 318, 319, 321, 322, 323, 325, 326, 327,
+ 328, 330, 331, 332, 334, 335, 336, 337, 339, 340, 341,
+ 342, 344, 345, 346, 347, 349, 350, 351, 352, 353, 355,
+ 356, 357, 358, 359, 361, 362, 363, 364, 365, 367, 368,
+ 369, 370, 371, 373, 374, 375, 376, 377, 378, 380, 381,
+ 382, 383, 384, 385, 387, 388, 389, 390, 391, 392, 393,
+ 394, 396, 397, 398, 399, 400, 401, 402, 403, 405, 406,
+ 407, 408, 409, 410, 411, 412, 413, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 425, 426, 428, 429, 430,
+ 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441,
+ 442, 443, 445, 446, 447, 448, 449, 450, 451, 452, 453,
+ 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464,
+ 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475,
+ 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486,
+ 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497,
+ 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 507,
+ 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518,
+ 519, 520, 521, 522, 523, 524, 525, 525, 526, 527, 528,
+ 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 538,
+ 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 548,
+ 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 558,
+ 559, 560, 561, 562, 563, 564, 565, 566, 566, 567, 568,
+ 569, 570, 571, 572, 573, 574, 574, 575, 576, 577, 578,
+ 579, 580, 581, 581, 582, 583, 584, 585, 586, 587, 588,
+ 588, 589, 590, 591, 592, 593, 594, 594, 595, 596, 597,
+ 598, 599, 600, 601, 601, 602, 603, 604, 605, 606, 606,
+ 607, 608, 609, 610, 611, 612, 612, 613, 614, 615, 616,
+ 617, 617, 618, 619, 620, 621, 622, 622, 623, 624, 625,
+ 626, 627, 627, 628, 629, 630, 631, 632, 632, 633, 634,
+ 635, 636, 637, 637, 638, 639, 640, 641, 642, 642, 643,
+ 644, 645, 646, 646, 647, 648, 649, 650, 650, 651, 652,
+ 653, 654, 655, 655, 656, 657, 658, 659, 659, 660, 661,
+ 662, 663, 663, 664, 665, 666, 667, 667, 668, 669, 670,
+ 671, 671, 672, 673, 674, 675, 675, 676, 677, 678, 678,
+ 679, 680, 681, 682, 682, 683, 684, 685, 686, 686, 687,
+ 688, 689, 689, 690, 691, 692, 693, 693, 694, 695, 696,
+ 696, 697, 698, 699, 700, 700, 701, 702, 703, 703, 704,
+ 705, 706, 707, 707, 708, 709, 710, 710, 711, 712, 713,
+ 713, 714, 715, 716, 716, 717, 718, 719, 719, 720, 721,
+ 722, 723, 723, 724, 725, 726, 726, 727, 728, 729, 729,
+ 730, 731, 732, 732, 733, 734, 735, 735, 736, 737, 738,
+ 738, 739, 740, 741, 741, 742, 743, 743, 744, 745, 746,
+ 746, 747, 748, 749, 749, 750, 751, 752, 752, 753, 754,
+ 755, 755, 756, 757, 758, 758, 759, 760, 760, 761, 762,
+ 763, 763, 764, 765, 766, 766, 767, 768, 768, 769, 770,
+ 771, 771, 772, 773, 774, 774, 775, 776, 776, 777, 778,
+ 779, 779, 780, 781, 781, 782, 783, 784, 784, 785, 786,
+ 786, 787, 788, 789, 789, 790, 791, 791, 792, 793, 794,
+ 794, 795, 796, 796, 797, 798, 799, 799, 800, 801, 801,
+ 802, 803, 803, 804, 805, 806, 806, 807, 808, 808, 809,
+ 810, 811, 811, 812, 813, 813, 814, 815, 815, 816, 817,
+ 818, 818, 819, 820, 820, 821, 822, 822, 823, 824, 824,
+ 825, 826, 827, 827, 828, 829, 829, 830, 831, 831, 832,
+ 833, 833, 834, 835, 835, 836, 837, 838, 838, 839, 840,
+ 840, 841, 842, 842, 843, 844, 844, 845, 846, 846, 847,
+ 848, 848, 849, 850, 851, 851, 852, 853, 853, 854, 855,
+ 855, 856, 857, 857, 858, 859, 859, 860, 861, 861, 862,
+ 863, 863, 864, 865, 865, 866, 867, 867, 868, 869, 869,
+ 870, 871, 871, 872, 873, 873, 874, 875, 875, 876, 877,
+ 877, 878, 879, 879, 880, 881, 881, 882, 883, 883, 884,
+ 885, 885, 886, 887, 887, 888, 889, 889, 890, 891, 891,
+ 892, 893, 893, 894, 895, 895, 896, 897, 897, 898, 898,
+ 899, 900, 900, 901, 902, 902, 903, 904, 904, 905, 906,
+ 906, 907, 908, 908, 909, 910, 910, 911, 911, 912, 913,
+ 913, 914, 915, 915, 916, 917, 917, 918, 919, 919, 920,
+ 921, 921, 922, 922, 923, 924, 924, 925, 926, 926, 927,
+ 928, 928, 929, 930, 930, 931, 931, 932, 933, 933, 934,
+ 935, 935, 936, 937, 937, 938, 938, 939, 940, 940, 941,
+ 942, 942, 943, 944, 944, 945, 945, 946, 947, 947, 948,
+ 949, 949, 950, 950, 951, 952, 952, 953, 954, 954, 955,
+ 956, 956, 957, 957, 958, 959, 959, 960, 961, 961, 962,
+ 962, 963, 964, 964, 965, 966, 966, 967, 967, 968, 969,
+ 969, 970, 970, 971, 972, 972, 973, 974, 974, 975, 975,
+ 976, 977, 977, 978, 979, 979, 980, 980, 981, 982, 982,
+ 983, 983, 984, 985, 985, 986, 987, 987, 988, 988, 989,
+ 990, 990, 991, 991, 992, 993, 993, 994, 995, 995, 996,
+ 996, 997, 998, 998, 999, 999, 1000, 1001, 1001, 1002, 1002,
+ 1003, 1004, 1004, 1005, 1006, 1006, 1007, 1007, 1008, 1009, 1009,
+ 1010, 1010, 1011, 1012, 1012, 1013, 1013, 1014, 1015, 1015, 1016,
+ 1016, 1017, 1018, 1018, 1019, 1019, 1020, 1021, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_07[GAMMA10_TABLE_LENGTH] = {
+ 0, 8, 13, 17, 21, 25, 28, 31, 34, 37, 40,
+ 43, 46, 48, 51, 53, 56, 58, 60, 63, 65, 67,
+ 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90,
+ 92, 94, 96, 98, 100, 102, 104, 106, 108, 109, 111,
+ 113, 115, 117, 118, 120, 122, 124, 125, 127, 129, 131,
+ 132, 134, 136, 137, 139, 140, 142, 144, 145, 147, 149,
+ 150, 152, 153, 155, 157, 158, 160, 161, 163, 164, 166,
+ 167, 169, 170, 172, 173, 175, 176, 178, 179, 181, 182,
+ 184, 185, 187, 188, 190, 191, 192, 194, 195, 197, 198,
+ 199, 201, 202, 204, 205, 206, 208, 209, 211, 212, 213,
+ 215, 216, 217, 219, 220, 222, 223, 224, 226, 227, 228,
+ 230, 231, 232, 234, 235, 236, 237, 239, 240, 241, 243,
+ 244, 245, 247, 248, 249, 250, 252, 253, 254, 256, 257,
+ 258, 259, 261, 262, 263, 264, 266, 267, 268, 269, 271,
+ 272, 273, 274, 275, 277, 278, 279, 280, 282, 283, 284,
+ 285, 286, 288, 289, 290, 291, 292, 294, 295, 296, 297,
+ 298, 300, 301, 302, 303, 304, 306, 307, 308, 309, 310,
+ 311, 313, 314, 315, 316, 317, 318, 319, 321, 322, 323,
+ 324, 325, 326, 327, 329, 330, 331, 332, 333, 334, 335,
+ 337, 338, 339, 340, 341, 342, 343, 344, 346, 347, 348,
+ 349, 350, 351, 352, 353, 354, 355, 357, 358, 359, 360,
+ 361, 362, 363, 364, 365, 366, 368, 369, 370, 371, 372,
+ 373, 374, 375, 376, 377, 378, 379, 380, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
+ 396, 397, 398, 400, 401, 402, 403, 404, 405, 406, 407,
+ 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429,
+ 430, 431, 432, 433, 434, 435, 436, 437, 439, 440, 441,
+ 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452,
+ 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462,
+ 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 487, 488, 489, 490, 491, 492, 492, 493, 494,
+ 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
+ 506, 507, 508, 509, 510, 511, 511, 512, 513, 514, 515,
+ 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526,
+ 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536,
+ 537, 538, 538, 539, 540, 541, 542, 543, 544, 545, 546,
+ 547, 548, 549, 549, 550, 551, 552, 553, 554, 555, 556,
+ 557, 558, 559, 560, 560, 561, 562, 563, 564, 565, 566,
+ 567, 568, 569, 569, 570, 571, 572, 573, 574, 575, 576,
+ 577, 578, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 586, 587, 588, 589, 590, 591, 592, 593, 594, 594, 595,
+ 596, 597, 598, 599, 600, 601, 601, 602, 603, 604, 605,
+ 606, 607, 608, 608, 609, 610, 611, 612, 613, 614, 615,
+ 615, 616, 617, 618, 619, 620, 621, 622, 622, 623, 624,
+ 625, 626, 627, 628, 628, 629, 630, 631, 632, 633, 634,
+ 634, 635, 636, 637, 638, 639, 640, 640, 641, 642, 643,
+ 644, 645, 646, 646, 647, 648, 649, 650, 651, 652, 652,
+ 653, 654, 655, 656, 657, 657, 658, 659, 660, 661, 662,
+ 663, 663, 664, 665, 666, 667, 668, 668, 669, 670, 671,
+ 672, 673, 673, 674, 675, 676, 677, 678, 678, 679, 680,
+ 681, 682, 683, 683, 684, 685, 686, 687, 688, 688, 689,
+ 690, 691, 692, 693, 693, 694, 695, 696, 697, 698, 698,
+ 699, 700, 701, 702, 703, 703, 704, 705, 706, 707, 707,
+ 708, 709, 710, 711, 712, 712, 713, 714, 715, 716, 716,
+ 717, 718, 719, 720, 721, 721, 722, 723, 724, 725, 725,
+ 726, 727, 728, 729, 729, 730, 731, 732, 733, 733, 734,
+ 735, 736, 737, 738, 738, 739, 740, 741, 742, 742, 743,
+ 744, 745, 746, 746, 747, 748, 749, 750, 750, 751, 752,
+ 753, 754, 754, 755, 756, 757, 758, 758, 759, 760, 761,
+ 761, 762, 763, 764, 765, 765, 766, 767, 768, 769, 769,
+ 770, 771, 772, 773, 773, 774, 775, 776, 777, 777, 778,
+ 779, 780, 780, 781, 782, 783, 784, 784, 785, 786, 787,
+ 788, 788, 789, 790, 791, 791, 792, 793, 794, 795, 795,
+ 796, 797, 798, 798, 799, 800, 801, 802, 802, 803, 804,
+ 805, 805, 806, 807, 808, 809, 809, 810, 811, 812, 812,
+ 813, 814, 815, 816, 816, 817, 818, 819, 819, 820, 821,
+ 822, 822, 823, 824, 825, 826, 826, 827, 828, 829, 829,
+ 830, 831, 832, 832, 833, 834, 835, 835, 836, 837, 838,
+ 839, 839, 840, 841, 842, 842, 843, 844, 845, 845, 846,
+ 847, 848, 848, 849, 850, 851, 851, 852, 853, 854, 854,
+ 855, 856, 857, 857, 858, 859, 860, 860, 861, 862, 863,
+ 864, 864, 865, 866, 867, 867, 868, 869, 870, 870, 871,
+ 872, 873, 873, 874, 875, 876, 876, 877, 878, 879, 879,
+ 880, 881, 881, 882, 883, 884, 884, 885, 886, 887, 887,
+ 888, 889, 890, 890, 891, 892, 893, 893, 894, 895, 896,
+ 896, 897, 898, 899, 899, 900, 901, 902, 902, 903, 904,
+ 904, 905, 906, 907, 907, 908, 909, 910, 910, 911, 912,
+ 913, 913, 914, 915, 916, 916, 917, 918, 918, 919, 920,
+ 921, 921, 922, 923, 924, 924, 925, 926, 927, 927, 928,
+ 929, 929, 930, 931, 932, 932, 933, 934, 935, 935, 936,
+ 937, 937, 938, 939, 940, 940, 941, 942, 943, 943, 944,
+ 945, 945, 946, 947, 948, 948, 949, 950, 950, 951, 952,
+ 953, 953, 954, 955, 956, 956, 957, 958, 958, 959, 960,
+ 961, 961, 962, 963, 963, 964, 965, 966, 966, 967, 968,
+ 968, 969, 970, 971, 971, 972, 973, 973, 974, 975, 976,
+ 976, 977, 978, 978, 979, 980, 981, 981, 982, 983, 983,
+ 984, 985, 986, 986, 987, 988, 988, 989, 990, 991, 991,
+ 992, 993, 993, 994, 995, 996, 996, 997, 998, 998, 999,
+ 1000, 1000, 1001, 1002, 1003, 1003, 1004, 1005, 1005, 1006, 1007,
+ 1008, 1008, 1009, 1010, 1010, 1011, 1012, 1012, 1013, 1014, 1015,
+ 1015, 1016, 1017, 1017, 1018, 1019, 1019, 1020, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_08[GAMMA10_TABLE_LENGTH] = {
+ 0, 4, 7, 10, 12, 14, 17, 19, 21, 23, 25,
+ 27, 29, 31, 33, 35, 37, 39, 40, 42, 44, 46,
+ 47, 49, 51, 53, 54, 56, 58, 59, 61, 62, 64,
+ 66, 67, 69, 70, 72, 73, 75, 76, 78, 80, 81,
+ 83, 84, 86, 87, 89, 90, 91, 93, 94, 96, 97,
+ 99, 100, 102, 103, 104, 106, 107, 109, 110, 111, 113,
+ 114, 116, 117, 118, 120, 121, 122, 124, 125, 126, 128,
+ 129, 131, 132, 133, 135, 136, 137, 138, 140, 141, 142,
+ 144, 145, 146, 148, 149, 150, 152, 153, 154, 155, 157,
+ 158, 159, 160, 162, 163, 164, 166, 167, 168, 169, 171,
+ 172, 173, 174, 176, 177, 178, 179, 181, 182, 183, 184,
+ 185, 187, 188, 189, 190, 192, 193, 194, 195, 196, 198,
+ 199, 200, 201, 202, 204, 205, 206, 207, 208, 210, 211,
+ 212, 213, 214, 216, 217, 218, 219, 220, 221, 223, 224,
+ 225, 226, 227, 228, 230, 231, 232, 233, 234, 235, 237,
+ 238, 239, 240, 241, 242, 243, 245, 246, 247, 248, 249,
+ 250, 251, 253, 254, 255, 256, 257, 258, 259, 260, 262,
+ 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345,
+ 346, 347, 348, 349, 350, 351, 352, 353, 355, 356, 357,
+ 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368,
+ 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379,
+ 380, 381, 382, 383, 384, 385, 386, 388, 389, 390, 391,
+ 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,
+ 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435,
+ 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446,
+ 447, 448, 449, 450, 451, 451, 452, 453, 454, 455, 456,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
+ 479, 480, 481, 482, 483, 484, 485, 486, 486, 487, 488,
+ 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499,
+ 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 509,
+ 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520,
+ 521, 522, 523, 524, 525, 526, 527, 527, 528, 529, 530,
+ 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541,
+ 542, 543, 543, 544, 545, 546, 547, 548, 549, 550, 551,
+ 552, 553, 554, 555, 556, 557, 557, 558, 559, 560, 561,
+ 562, 563, 564, 565, 566, 567, 568, 569, 570, 570, 571,
+ 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592,
+ 593, 594, 594, 595, 596, 597, 598, 599, 600, 601, 602,
+ 603, 604, 604, 605, 606, 607, 608, 609, 610, 611, 612,
+ 613, 614, 615, 615, 616, 617, 618, 619, 620, 621, 622,
+ 623, 624, 624, 625, 626, 627, 628, 629, 630, 631, 632,
+ 633, 634, 634, 635, 636, 637, 638, 639, 640, 641, 642,
+ 643, 643, 644, 645, 646, 647, 648, 649, 650, 651, 651,
+ 652, 653, 654, 655, 656, 657, 658, 659, 660, 660, 661,
+ 662, 663, 664, 665, 666, 667, 668, 668, 669, 670, 671,
+ 672, 673, 674, 675, 676, 676, 677, 678, 679, 680, 681,
+ 682, 683, 684, 684, 685, 686, 687, 688, 689, 690, 691,
+ 691, 692, 693, 694, 695, 696, 697, 698, 699, 699, 700,
+ 701, 702, 703, 704, 705, 706, 706, 707, 708, 709, 710,
+ 711, 712, 713, 713, 714, 715, 716, 717, 718, 719, 720,
+ 720, 721, 722, 723, 724, 725, 726, 727, 727, 728, 729,
+ 730, 731, 732, 733, 734, 734, 735, 736, 737, 738, 739,
+ 740, 740, 741, 742, 743, 744, 745, 746, 747, 747, 748,
+ 749, 750, 751, 752, 753, 753, 754, 755, 756, 757, 758,
+ 759, 759, 760, 761, 762, 763, 764, 765, 766, 766, 767,
+ 768, 769, 770, 771, 772, 772, 773, 774, 775, 776, 777,
+ 778, 778, 779, 780, 781, 782, 783, 784, 784, 785, 786,
+ 787, 788, 789, 790, 790, 791, 792, 793, 794, 795, 795,
+ 796, 797, 798, 799, 800, 801, 801, 802, 803, 804, 805,
+ 806, 807, 807, 808, 809, 810, 811, 812, 812, 813, 814,
+ 815, 816, 817, 818, 818, 819, 820, 821, 822, 823, 823,
+ 824, 825, 826, 827, 828, 829, 829, 830, 831, 832, 833,
+ 834, 834, 835, 836, 837, 838, 839, 839, 840, 841, 842,
+ 843, 844, 845, 845, 846, 847, 848, 849, 850, 850, 851,
+ 852, 853, 854, 855, 855, 856, 857, 858, 859, 860, 860,
+ 861, 862, 863, 864, 865, 865, 866, 867, 868, 869, 870,
+ 870, 871, 872, 873, 874, 875, 875, 876, 877, 878, 879,
+ 880, 880, 881, 882, 883, 884, 885, 885, 886, 887, 888,
+ 889, 890, 890, 891, 892, 893, 894, 895, 895, 896, 897,
+ 898, 899, 899, 900, 901, 902, 903, 904, 904, 905, 906,
+ 907, 908, 909, 909, 910, 911, 912, 913, 913, 914, 915,
+ 916, 917, 918, 918, 919, 920, 921, 922, 923, 923, 924,
+ 925, 926, 927, 927, 928, 929, 930, 931, 932, 932, 933,
+ 934, 935, 936, 936, 937, 938, 939, 940, 941, 941, 942,
+ 943, 944, 945, 945, 946, 947, 948, 949, 950, 950, 951,
+ 952, 953, 954, 954, 955, 956, 957, 958, 958, 959, 960,
+ 961, 962, 963, 963, 964, 965, 966, 967, 967, 968, 969,
+ 970, 971, 971, 972, 973, 974, 975, 976, 976, 977, 978,
+ 979, 980, 980, 981, 982, 983, 984, 984, 985, 986, 987,
+ 988, 988, 989, 990, 991, 992, 992, 993, 994, 995, 996,
+ 997, 997, 998, 999, 1000, 1001, 1001, 1002, 1003, 1004, 1005,
+ 1005, 1006, 1007, 1008, 1009, 1009, 1010, 1011, 1012, 1013, 1013,
+ 1014, 1015, 1016, 1017, 1017, 1018, 1019, 1020, 1021, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_09[GAMMA10_TABLE_LENGTH] = {
+ 0, 2, 4, 5, 7, 9, 10, 12, 13, 14, 16,
+ 17, 19, 20, 22, 23, 24, 26, 27, 28, 30, 31,
+ 32, 34, 35, 36, 38, 39, 40, 41, 43, 44, 45,
+ 47, 48, 49, 50, 52, 53, 54, 55, 57, 58, 59,
+ 60, 62, 63, 64, 65, 66, 68, 69, 70, 71, 72,
+ 74, 75, 76, 77, 78, 80, 81, 82, 83, 84, 86,
+ 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 99,
+ 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 111,
+ 112, 114, 115, 116, 117, 118, 119, 120, 122, 123, 124,
+ 125, 126, 127, 128, 130, 131, 132, 133, 134, 135, 136,
+ 137, 139, 140, 141, 142, 143, 144, 145, 146, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335,
+ 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357,
+ 358, 359, 360, 361, 362, 363, 364, 365, 367, 368, 369,
+ 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380,
+ 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391,
+ 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,
+ 425, 426, 427, 427, 428, 429, 430, 431, 432, 433, 434,
+ 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445,
+ 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
+ 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489,
+ 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499,
+ 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510,
+ 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521,
+ 522, 523, 524, 525, 525, 526, 527, 528, 529, 530, 531,
+ 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542,
+ 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553,
+ 554, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563,
+ 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595,
+ 596, 597, 598, 599, 600, 601, 601, 602, 603, 604, 605,
+ 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616,
+ 617, 618, 619, 620, 621, 621, 622, 623, 624, 625, 626,
+ 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
+ 638, 639, 640, 640, 641, 642, 643, 644, 645, 646, 647,
+ 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658,
+ 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668,
+ 669, 670, 671, 672, 673, 674, 675, 675, 676, 677, 678,
+ 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
+ 690, 691, 691, 692, 693, 694, 695, 696, 697, 698, 699,
+ 700, 701, 702, 703, 704, 705, 706, 706, 707, 708, 709,
+ 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720,
+ 721, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730,
+ 731, 732, 733, 734, 735, 735, 736, 737, 738, 739, 740,
+ 741, 742, 743, 744, 745, 746, 747, 748, 749, 749, 750,
+ 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761,
+ 762, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771,
+ 772, 773, 774, 775, 776, 776, 777, 778, 779, 780, 781,
+ 782, 783, 784, 785, 786, 787, 788, 788, 789, 790, 791,
+ 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 801,
+ 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812,
+ 813, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822,
+ 823, 824, 825, 825, 826, 827, 828, 829, 830, 831, 832,
+ 833, 834, 835, 836, 836, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 848, 849, 850, 851, 852,
+ 853, 854, 855, 856, 857, 858, 859, 859, 860, 861, 862,
+ 863, 864, 865, 866, 867, 868, 869, 870, 870, 871, 872,
+ 873, 874, 875, 876, 877, 878, 879, 880, 881, 881, 882,
+ 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 892,
+ 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 902,
+ 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913,
+ 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
+ 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933,
+ 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943,
+ 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953,
+ 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 962,
+ 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 972,
+ 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 982,
+ 983, 984, 985, 986, 987, 988, 989, 990, 991, 991, 992,
+ 993, 994, 995, 996, 997, 998, 999, 1000, 1000, 1001, 1002,
+ 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1009, 1010, 1011, 1012,
+ 1013, 1014, 1015, 1016, 1017, 1018, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_10[GAMMA10_TABLE_LENGTH] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
+ 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
+ 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340,
+ 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351,
+ 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
+ 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373,
+ 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
+ 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406,
+ 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428,
+ 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439,
+ 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
+ 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461,
+ 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472,
+ 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
+ 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
+ 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
+ 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
+ 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538,
+ 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549,
+ 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560,
+ 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571,
+ 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
+ 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593,
+ 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604,
+ 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615,
+ 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626,
+ 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
+ 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648,
+ 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659,
+ 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670,
+ 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681,
+ 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692,
+ 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703,
+ 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714,
+ 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725,
+ 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736,
+ 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
+ 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758,
+ 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769,
+ 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780,
+ 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791,
+ 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802,
+ 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813,
+ 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824,
+ 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835,
+ 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
+ 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
+ 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868,
+ 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879,
+ 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890,
+ 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901,
+ 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912,
+ 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
+ 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934,
+ 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945,
+ 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956,
+ 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967,
+ 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978,
+ 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989,
+ 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000,
+ 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011,
+ 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_11[GAMMA10_TABLE_LENGTH] = {
+ 0, 1, 1, 2, 2, 3, 4, 4, 5, 6, 6,
+ 7, 8, 8, 9, 10, 11, 11, 12, 13, 13, 14,
+ 15, 16, 16, 17, 18, 19, 20, 20, 21, 22, 23,
+ 23, 24, 25, 26, 27, 27, 28, 29, 30, 31, 31,
+ 32, 33, 34, 35, 35, 36, 37, 38, 39, 39, 40,
+ 41, 42, 43, 44, 44, 45, 46, 47, 48, 49, 49,
+ 50, 51, 52, 53, 54, 54, 55, 56, 57, 58, 59,
+ 59, 60, 61, 62, 63, 64, 65, 65, 66, 67, 68,
+ 69, 70, 71, 71, 72, 73, 74, 75, 76, 77, 78,
+ 78, 79, 80, 81, 82, 83, 84, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 232, 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359,
+ 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370,
+ 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381,
+ 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392,
+ 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403,
+ 404, 405, 406, 407, 408, 409, 410, 411, 412, 414, 415,
+ 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426,
+ 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
+ 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448,
+ 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
+ 460, 461, 462, 463, 464, 465, 466, 468, 469, 470, 471,
+ 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482,
+ 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493,
+ 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 505,
+ 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
+ 528, 529, 530, 531, 532, 533, 535, 536, 537, 538, 539,
+ 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550,
+ 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 562,
+ 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
+ 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584,
+ 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596,
+ 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 608,
+ 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619,
+ 620, 621, 622, 623, 624, 625, 626, 627, 629, 630, 631,
+ 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642,
+ 643, 644, 645, 646, 648, 649, 650, 651, 652, 653, 654,
+ 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665,
+ 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677,
+ 678, 679, 680, 681, 682, 683, 685, 686, 687, 688, 689,
+ 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
+ 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712,
+ 713, 714, 715, 716, 717, 719, 720, 721, 722, 723, 724,
+ 725, 726, 727, 728, 729, 730, 731, 732, 733, 735, 736,
+ 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
+ 748, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759,
+ 760, 761, 762, 763, 764, 766, 767, 768, 769, 770, 771,
+ 772, 773, 774, 775, 776, 777, 778, 779, 781, 782, 783,
+ 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 795,
+ 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806,
+ 807, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818,
+ 819, 820, 821, 823, 824, 825, 826, 827, 828, 829, 830,
+ 831, 832, 833, 834, 835, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 850, 851, 852, 853, 854,
+ 855, 856, 857, 858, 859, 860, 861, 863, 864, 865, 866,
+ 867, 868, 869, 870, 871, 872, 873, 874, 876, 877, 878,
+ 879, 880, 881, 882, 883, 884, 885, 886, 887, 889, 890,
+ 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 902,
+ 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 914,
+ 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 926,
+ 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 938,
+ 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 950,
+ 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 962,
+ 963, 964, 965, 966, 967, 968, 969, 970, 971, 973, 974,
+ 975, 976, 977, 978, 979, 980, 981, 982, 983, 985, 986,
+ 987, 988, 989, 990, 991, 992, 993, 994, 996, 997, 998,
+ 999, 1000, 1001, 1002, 1003, 1004, 1005, 1007, 1008, 1009, 1010,
+ 1011, 1012, 1013, 1014, 1015, 1016, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_12[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 14, 14, 15, 15, 16,
+ 17, 17, 18, 18, 19, 20, 20, 21, 22, 22, 23,
+ 23, 24, 25, 25, 26, 27, 27, 28, 29, 29, 30,
+ 31, 31, 32, 33, 33, 34, 35, 35, 36, 37, 37,
+ 38, 39, 40, 40, 41, 42, 42, 43, 44, 44, 45,
+ 46, 47, 47, 48, 49, 49, 50, 51, 52, 52, 53,
+ 54, 55, 55, 56, 57, 58, 58, 59, 60, 61, 61,
+ 62, 63, 64, 64, 65, 66, 67, 67, 68, 69, 70,
+ 70, 71, 72, 73, 74, 74, 75, 76, 77, 77, 78,
+ 79, 80, 81, 81, 82, 83, 84, 84, 85, 86, 87,
+ 88, 88, 89, 90, 91, 92, 92, 93, 94, 95, 96,
+ 96, 97, 98, 99, 100, 101, 101, 102, 103, 104, 105,
+ 105, 106, 107, 108, 109, 110, 110, 111, 112, 113, 114,
+ 115, 115, 116, 117, 118, 119, 120, 120, 121, 122, 123,
+ 124, 125, 125, 126, 127, 128, 129, 130, 131, 131, 132,
+ 133, 134, 135, 136, 137, 137, 138, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 147, 148, 149, 150, 150, 151,
+ 152, 153, 154, 155, 156, 157, 157, 158, 159, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
+ 274, 275, 276, 277, 278, 279, 280, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 334, 335, 336, 337,
+ 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 353, 354, 355, 356, 357, 358, 359, 360,
+ 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371,
+ 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382,
+ 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393,
+ 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 409, 411, 412, 413, 414, 415, 416,
+ 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427,
+ 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 439,
+ 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
+ 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 462,
+ 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 476, 477, 478, 479, 481, 482, 483, 484, 485,
+ 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496,
+ 497, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508,
+ 509, 510, 511, 512, 513, 515, 516, 517, 518, 519, 520,
+ 521, 522, 523, 524, 525, 526, 527, 529, 530, 531, 532,
+ 533, 534, 535, 536, 537, 538, 539, 540, 541, 543, 544,
+ 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555,
+ 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567,
+ 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
+ 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591,
+ 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 604,
+ 605, 606, 607, 608, 609, 610, 611, 612, 613, 615, 616,
+ 617, 618, 619, 620, 621, 622, 623, 624, 626, 627, 628,
+ 629, 630, 631, 632, 633, 634, 636, 637, 638, 639, 640,
+ 641, 642, 643, 644, 646, 647, 648, 649, 650, 651, 652,
+ 653, 654, 656, 657, 658, 659, 660, 661, 662, 663, 664,
+ 666, 667, 668, 669, 670, 671, 672, 673, 675, 676, 677,
+ 678, 679, 680, 681, 682, 683, 685, 686, 687, 688, 689,
+ 690, 691, 692, 694, 695, 696, 697, 698, 699, 700, 701,
+ 703, 704, 705, 706, 707, 708, 709, 710, 712, 713, 714,
+ 715, 716, 717, 718, 720, 721, 722, 723, 724, 725, 726,
+ 727, 729, 730, 731, 732, 733, 734, 735, 737, 738, 739,
+ 740, 741, 742, 743, 745, 746, 747, 748, 749, 750, 751,
+ 752, 754, 755, 756, 757, 758, 759, 760, 762, 763, 764,
+ 765, 766, 767, 768, 770, 771, 772, 773, 774, 775, 776,
+ 778, 779, 780, 781, 782, 783, 785, 786, 787, 788, 789,
+ 790, 791, 793, 794, 795, 796, 797, 798, 799, 801, 802,
+ 803, 804, 805, 806, 808, 809, 810, 811, 812, 813, 814,
+ 816, 817, 818, 819, 820, 821, 823, 824, 825, 826, 827,
+ 828, 830, 831, 832, 833, 834, 835, 836, 838, 839, 840,
+ 841, 842, 843, 845, 846, 847, 848, 849, 850, 852, 853,
+ 854, 855, 856, 857, 859, 860, 861, 862, 863, 864, 866,
+ 867, 868, 869, 870, 871, 873, 874, 875, 876, 877, 878,
+ 880, 881, 882, 883, 884, 885, 887, 888, 889, 890, 891,
+ 892, 894, 895, 896, 897, 898, 900, 901, 902, 903, 904,
+ 905, 907, 908, 909, 910, 911, 912, 914, 915, 916, 917,
+ 918, 920, 921, 922, 923, 924, 925, 927, 928, 929, 930,
+ 931, 932, 934, 935, 936, 937, 938, 940, 941, 942, 943,
+ 944, 946, 947, 948, 949, 950, 951, 953, 954, 955, 956,
+ 957, 959, 960, 961, 962, 963, 964, 966, 967, 968, 969,
+ 970, 972, 973, 974, 975, 976, 978, 979, 980, 981, 982,
+ 984, 985, 986, 987, 988, 989, 991, 992, 993, 994, 995,
+ 997, 998, 999, 1000, 1001, 1003, 1004, 1005, 1006, 1007, 1009,
+ 1010, 1011, 1012, 1013, 1015, 1016, 1017, 1018, 1019, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_13[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 26, 27, 27, 28, 28,
+ 29, 30, 30, 31, 31, 32, 32, 33, 34, 34, 35,
+ 35, 36, 37, 37, 38, 38, 39, 40, 40, 41, 42,
+ 42, 43, 43, 44, 45, 45, 46, 47, 47, 48, 48,
+ 49, 50, 50, 51, 52, 52, 53, 54, 54, 55, 56,
+ 56, 57, 58, 58, 59, 60, 60, 61, 62, 62, 63,
+ 64, 64, 65, 66, 67, 67, 68, 69, 69, 70, 71,
+ 71, 72, 73, 74, 74, 75, 76, 76, 77, 78, 79,
+ 79, 80, 81, 81, 82, 83, 84, 84, 85, 86, 87,
+ 87, 88, 89, 89, 90, 91, 92, 92, 93, 94, 95,
+ 95, 96, 97, 98, 98, 99, 100, 101, 102, 102, 103,
+ 104, 105, 105, 106, 107, 108, 108, 109, 110, 111, 112,
+ 112, 113, 114, 115, 115, 116, 117, 118, 119, 119, 120,
+ 121, 122, 123, 123, 124, 125, 126, 127, 127, 128, 129,
+ 130, 131, 131, 132, 133, 134, 135, 135, 136, 137, 138,
+ 139, 140, 140, 141, 142, 143, 144, 145, 145, 146, 147,
+ 148, 149, 149, 150, 151, 152, 153, 154, 155, 155, 156,
+ 157, 158, 159, 160, 160, 161, 162, 163, 164, 165, 166,
+ 166, 167, 168, 169, 170, 171, 172, 172, 173, 174, 175,
+ 176, 177, 178, 178, 179, 180, 181, 182, 183, 184, 185,
+ 185, 186, 187, 188, 189, 190, 191, 192, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330,
+ 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341,
+ 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363,
+ 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
+ 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 386,
+ 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397,
+ 398, 399, 400, 401, 402, 403, 404, 405, 407, 408, 409,
+ 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420,
+ 421, 422, 423, 424, 426, 427, 428, 429, 430, 431, 432,
+ 433, 434, 435, 436, 437, 438, 439, 440, 442, 443, 444,
+ 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479,
+ 480, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491,
+ 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 504,
+ 505, 506, 507, 508, 509, 510, 511, 512, 513, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 525, 526, 527, 528,
+ 529, 530, 531, 532, 534, 535, 536, 537, 538, 539, 540,
+ 541, 542, 544, 545, 546, 547, 548, 549, 550, 551, 553,
+ 554, 555, 556, 557, 558, 559, 561, 562, 563, 564, 565,
+ 566, 567, 568, 570, 571, 572, 573, 574, 575, 576, 578,
+ 579, 580, 581, 582, 583, 584, 586, 587, 588, 589, 590,
+ 591, 592, 594, 595, 596, 597, 598, 599, 600, 602, 603,
+ 604, 605, 606, 607, 608, 610, 611, 612, 613, 614, 615,
+ 617, 618, 619, 620, 621, 622, 624, 625, 626, 627, 628,
+ 629, 630, 632, 633, 634, 635, 636, 637, 639, 640, 641,
+ 642, 643, 644, 646, 647, 648, 649, 650, 652, 653, 654,
+ 655, 656, 657, 659, 660, 661, 662, 663, 664, 666, 667,
+ 668, 669, 670, 671, 673, 674, 675, 676, 677, 679, 680,
+ 681, 682, 683, 684, 686, 687, 688, 689, 690, 692, 693,
+ 694, 695, 696, 698, 699, 700, 701, 702, 704, 705, 706,
+ 707, 708, 709, 711, 712, 713, 714, 715, 717, 718, 719,
+ 720, 721, 723, 724, 725, 726, 727, 729, 730, 731, 732,
+ 733, 735, 736, 737, 738, 739, 741, 742, 743, 744, 746,
+ 747, 748, 749, 750, 752, 753, 754, 755, 756, 758, 759,
+ 760, 761, 762, 764, 765, 766, 767, 769, 770, 771, 772,
+ 773, 775, 776, 777, 778, 780, 781, 782, 783, 784, 786,
+ 787, 788, 789, 791, 792, 793, 794, 795, 797, 798, 799,
+ 800, 802, 803, 804, 805, 807, 808, 809, 810, 811, 813,
+ 814, 815, 816, 818, 819, 820, 821, 823, 824, 825, 826,
+ 827, 829, 830, 831, 832, 834, 835, 836, 837, 839, 840,
+ 841, 842, 844, 845, 846, 847, 849, 850, 851, 852, 854,
+ 855, 856, 857, 859, 860, 861, 862, 864, 865, 866, 867,
+ 869, 870, 871, 872, 874, 875, 876, 877, 879, 880, 881,
+ 882, 884, 885, 886, 887, 889, 890, 891, 892, 894, 895,
+ 896, 897, 899, 900, 901, 903, 904, 905, 906, 908, 909,
+ 910, 911, 913, 914, 915, 916, 918, 919, 920, 922, 923,
+ 924, 925, 927, 928, 929, 930, 932, 933, 934, 935, 937,
+ 938, 939, 941, 942, 943, 944, 946, 947, 948, 950, 951,
+ 952, 953, 955, 956, 957, 958, 960, 961, 962, 964, 965,
+ 966, 967, 969, 970, 971, 973, 974, 975, 976, 978, 979,
+ 980, 982, 983, 984, 985, 987, 988, 989, 991, 992, 993,
+ 994, 996, 997, 998, 1000, 1001, 1002, 1004, 1005, 1006, 1007,
+ 1009, 1010, 1011, 1013, 1014, 1015, 1017, 1018, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_14[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 12,
+ 12, 13, 13, 14, 14, 15, 15, 15, 16, 16, 17,
+ 17, 18, 18, 18, 19, 19, 20, 20, 21, 21, 22,
+ 22, 23, 23, 23, 24, 24, 25, 25, 26, 26, 27,
+ 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32,
+ 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38,
+ 39, 39, 40, 41, 41, 42, 42, 43, 43, 44, 45,
+ 45, 46, 46, 47, 47, 48, 49, 49, 50, 50, 51,
+ 52, 52, 53, 53, 54, 55, 55, 56, 56, 57, 58,
+ 58, 59, 59, 60, 61, 61, 62, 63, 63, 64, 64,
+ 65, 66, 66, 67, 68, 68, 69, 70, 70, 71, 72,
+ 72, 73, 74, 74, 75, 76, 76, 77, 78, 78, 79,
+ 80, 80, 81, 82, 82, 83, 84, 84, 85, 86, 86,
+ 87, 88, 88, 89, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 98, 98, 99, 100, 100, 101, 102,
+ 103, 103, 104, 105, 106, 106, 107, 108, 109, 109, 110,
+ 111, 111, 112, 113, 114, 114, 115, 116, 117, 117, 118,
+ 119, 120, 120, 121, 122, 123, 124, 124, 125, 126, 127,
+ 127, 128, 129, 130, 130, 131, 132, 133, 134, 134, 135,
+ 136, 137, 138, 138, 139, 140, 141, 141, 142, 143, 144,
+ 145, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153,
+ 154, 154, 155, 156, 157, 158, 158, 159, 160, 161, 162,
+ 163, 163, 164, 165, 166, 167, 168, 168, 169, 170, 171,
+ 172, 173, 173, 174, 175, 176, 177, 178, 179, 179, 180,
+ 181, 182, 183, 184, 185, 185, 186, 187, 188, 189, 190,
+ 191, 191, 192, 193, 194, 195, 196, 197, 198, 198, 199,
+ 200, 201, 202, 203, 204, 205, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270,
+ 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335,
+ 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 351, 352, 353, 354, 355, 356, 357, 358,
+ 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369,
+ 370, 371, 372, 373, 374, 375, 377, 378, 379, 380, 381,
+ 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392,
+ 393, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 410, 411, 412, 413, 414, 415, 416,
+ 417, 418, 419, 420, 421, 423, 424, 425, 426, 427, 428,
+ 429, 430, 431, 432, 433, 435, 436, 437, 438, 439, 440,
+ 441, 442, 443, 444, 446, 447, 448, 449, 450, 451, 452,
+ 453, 454, 456, 457, 458, 459, 460, 461, 462, 463, 464,
+ 466, 467, 468, 469, 470, 471, 472, 473, 475, 476, 477,
+ 478, 479, 480, 481, 482, 484, 485, 486, 487, 488, 489,
+ 490, 491, 493, 494, 495, 496, 497, 498, 499, 501, 502,
+ 503, 504, 505, 506, 507, 509, 510, 511, 512, 513, 514,
+ 515, 517, 518, 519, 520, 521, 522, 524, 525, 526, 527,
+ 528, 529, 531, 532, 533, 534, 535, 536, 537, 539, 540,
+ 541, 542, 543, 544, 546, 547, 548, 549, 550, 552, 553,
+ 554, 555, 556, 557, 559, 560, 561, 562, 563, 564, 566,
+ 567, 568, 569, 570, 572, 573, 574, 575, 576, 578, 579,
+ 580, 581, 582, 583, 585, 586, 587, 588, 589, 591, 592,
+ 593, 594, 595, 597, 598, 599, 600, 601, 603, 604, 605,
+ 606, 607, 609, 610, 611, 612, 613, 615, 616, 617, 618,
+ 620, 621, 622, 623, 624, 626, 627, 628, 629, 630, 632,
+ 633, 634, 635, 637, 638, 639, 640, 641, 643, 644, 645,
+ 646, 648, 649, 650, 651, 653, 654, 655, 656, 657, 659,
+ 660, 661, 662, 664, 665, 666, 667, 669, 670, 671, 672,
+ 674, 675, 676, 677, 679, 680, 681, 682, 684, 685, 686,
+ 687, 689, 690, 691, 692, 694, 695, 696, 697, 699, 700,
+ 701, 702, 704, 705, 706, 707, 709, 710, 711, 712, 714,
+ 715, 716, 717, 719, 720, 721, 723, 724, 725, 726, 728,
+ 729, 730, 731, 733, 734, 735, 737, 738, 739, 740, 742,
+ 743, 744, 745, 747, 748, 749, 751, 752, 753, 754, 756,
+ 757, 758, 760, 761, 762, 763, 765, 766, 767, 769, 770,
+ 771, 772, 774, 775, 776, 778, 779, 780, 782, 783, 784,
+ 785, 787, 788, 789, 791, 792, 793, 794, 796, 797, 798,
+ 800, 801, 802, 804, 805, 806, 808, 809, 810, 811, 813,
+ 814, 815, 817, 818, 819, 821, 822, 823, 825, 826, 827,
+ 829, 830, 831, 833, 834, 835, 836, 838, 839, 840, 842,
+ 843, 844, 846, 847, 848, 850, 851, 852, 854, 855, 856,
+ 858, 859, 860, 862, 863, 864, 866, 867, 868, 870, 871,
+ 872, 874, 875, 876, 878, 879, 880, 882, 883, 884, 886,
+ 887, 888, 890, 891, 893, 894, 895, 897, 898, 899, 901,
+ 902, 903, 905, 906, 907, 909, 910, 911, 913, 914, 915,
+ 917, 918, 920, 921, 922, 924, 925, 926, 928, 929, 930,
+ 932, 933, 935, 936, 937, 939, 940, 941, 943, 944, 945,
+ 947, 948, 950, 951, 952, 954, 955, 956, 958, 959, 961,
+ 962, 963, 965, 966, 967, 969, 970, 972, 973, 974, 976,
+ 977, 978, 980, 981, 983, 984, 985, 987, 988, 990, 991,
+ 992, 994, 995, 996, 998, 999, 1001, 1002, 1003, 1005, 1006,
+ 1008, 1009, 1010, 1012, 1013, 1015, 1016, 1017, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_15[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 15, 16, 16, 16,
+ 17, 17, 18, 18, 18, 19, 19, 20, 20, 20, 21,
+ 21, 22, 22, 22, 23, 23, 24, 24, 25, 25, 25,
+ 26, 26, 27, 27, 28, 28, 28, 29, 29, 30, 30,
+ 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36,
+ 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47,
+ 47, 48, 48, 49, 50, 50, 51, 51, 52, 52, 53,
+ 53, 54, 55, 55, 56, 56, 57, 57, 58, 59, 59,
+ 60, 60, 61, 62, 62, 63, 63, 64, 64, 65, 66,
+ 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72,
+ 73, 74, 74, 75, 76, 76, 77, 77, 78, 79, 79,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 88, 88, 89, 90, 90, 91, 92, 92, 93, 94,
+ 94, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 106, 106, 107, 108, 108, 109,
+ 110, 110, 111, 112, 113, 113, 114, 115, 116, 116, 117,
+ 118, 118, 119, 120, 121, 121, 122, 123, 124, 124, 125,
+ 126, 127, 127, 128, 129, 130, 130, 131, 132, 133, 133,
+ 134, 135, 136, 136, 137, 138, 139, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 146, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 158, 158, 159,
+ 160, 161, 162, 162, 163, 164, 165, 166, 167, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 176, 177,
+ 178, 179, 180, 181, 181, 182, 183, 184, 185, 186, 187,
+ 187, 188, 189, 190, 191, 192, 193, 193, 194, 195, 196,
+ 197, 198, 199, 199, 200, 201, 202, 203, 204, 205, 206,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343,
+ 344, 345, 346, 347, 349, 350, 351, 352, 353, 354, 355,
+ 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366,
+ 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378,
+ 379, 380, 381, 383, 384, 385, 386, 387, 388, 389, 390,
+ 391, 392, 393, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 407, 408, 409, 410, 411, 412, 413, 414,
+ 415, 417, 418, 419, 420, 421, 422, 423, 424, 425, 427,
+ 428, 429, 430, 431, 432, 433, 434, 436, 437, 438, 439,
+ 440, 441, 442, 444, 445, 446, 447, 448, 449, 450, 451,
+ 453, 454, 455, 456, 457, 458, 460, 461, 462, 463, 464,
+ 465, 466, 468, 469, 470, 471, 472, 473, 475, 476, 477,
+ 478, 479, 480, 482, 483, 484, 485, 486, 487, 489, 490,
+ 491, 492, 493, 494, 496, 497, 498, 499, 500, 501, 503,
+ 504, 505, 506, 507, 509, 510, 511, 512, 513, 515, 516,
+ 517, 518, 519, 521, 522, 523, 524, 525, 527, 528, 529,
+ 530, 531, 533, 534, 535, 536, 537, 539, 540, 541, 542,
+ 543, 545, 546, 547, 548, 550, 551, 552, 553, 554, 556,
+ 557, 558, 559, 561, 562, 563, 564, 565, 567, 568, 569,
+ 570, 572, 573, 574, 575, 577, 578, 579, 580, 582, 583,
+ 584, 585, 587, 588, 589, 590, 591, 593, 594, 595, 596,
+ 598, 599, 600, 602, 603, 604, 605, 607, 608, 609, 610,
+ 612, 613, 614, 615, 617, 618, 619, 620, 622, 623, 624,
+ 626, 627, 628, 629, 631, 632, 633, 634, 636, 637, 638,
+ 640, 641, 642, 643, 645, 646, 647, 649, 650, 651, 652,
+ 654, 655, 656, 658, 659, 660, 662, 663, 664, 665, 667,
+ 668, 669, 671, 672, 673, 675, 676, 677, 678, 680, 681,
+ 682, 684, 685, 686, 688, 689, 690, 692, 693, 694, 696,
+ 697, 698, 700, 701, 702, 703, 705, 706, 707, 709, 710,
+ 711, 713, 714, 715, 717, 718, 719, 721, 722, 723, 725,
+ 726, 727, 729, 730, 731, 733, 734, 735, 737, 738, 740,
+ 741, 742, 744, 745, 746, 748, 749, 750, 752, 753, 754,
+ 756, 757, 758, 760, 761, 763, 764, 765, 767, 768, 769,
+ 771, 772, 773, 775, 776, 778, 779, 780, 782, 783, 784,
+ 786, 787, 789, 790, 791, 793, 794, 795, 797, 798, 800,
+ 801, 802, 804, 805, 806, 808, 809, 811, 812, 813, 815,
+ 816, 818, 819, 820, 822, 823, 825, 826, 827, 829, 830,
+ 832, 833, 834, 836, 837, 839, 840, 841, 843, 844, 846,
+ 847, 848, 850, 851, 853, 854, 855, 857, 858, 860, 861,
+ 863, 864, 865, 867, 868, 870, 871, 872, 874, 875, 877,
+ 878, 880, 881, 882, 884, 885, 887, 888, 890, 891, 892,
+ 894, 895, 897, 898, 900, 901, 902, 904, 905, 907, 908,
+ 910, 911, 913, 914, 915, 917, 918, 920, 921, 923, 924,
+ 926, 927, 929, 930, 931, 933, 934, 936, 937, 939, 940,
+ 942, 943, 945, 946, 947, 949, 950, 952, 953, 955, 956,
+ 958, 959, 961, 962, 964, 965, 967, 968, 969, 971, 972,
+ 974, 975, 977, 978, 980, 981, 983, 984, 986, 987, 989,
+ 990, 992, 993, 995, 996, 998, 999, 1001, 1002, 1004, 1005,
+ 1007, 1008, 1010, 1011, 1013, 1014, 1016, 1017, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_16[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16,
+ 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20,
+ 20, 21, 21, 21, 22, 22, 22, 23, 23, 24, 24,
+ 24, 25, 25, 26, 26, 26, 27, 27, 28, 28, 28,
+ 29, 29, 30, 30, 31, 31, 31, 32, 32, 33, 33,
+ 34, 34, 35, 35, 35, 36, 36, 37, 37, 38, 38,
+ 39, 39, 40, 40, 41, 41, 41, 42, 42, 43, 43,
+ 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49,
+ 49, 50, 50, 51, 52, 52, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 58, 58, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 64, 65, 65, 66, 66, 67,
+ 67, 68, 69, 69, 70, 70, 71, 72, 72, 73, 73,
+ 74, 75, 75, 76, 76, 77, 78, 78, 79, 79, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 86, 86, 87,
+ 87, 88, 89, 89, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 111, 111, 112, 113, 114, 114, 115, 116, 116,
+ 117, 118, 119, 119, 120, 121, 121, 122, 123, 124, 124,
+ 125, 126, 126, 127, 128, 129, 129, 130, 131, 132, 132,
+ 133, 134, 135, 135, 136, 137, 138, 138, 139, 140, 141,
+ 141, 142, 143, 144, 144, 145, 146, 147, 148, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 159, 159, 160, 161, 162, 163, 163, 164, 165, 166, 167,
+ 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176,
+ 176, 177, 178, 179, 180, 181, 181, 182, 183, 184, 185,
+ 186, 186, 187, 188, 189, 190, 191, 192, 192, 193, 194,
+ 195, 196, 197, 198, 198, 199, 200, 201, 202, 203, 204,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 212, 213,
+ 214, 215, 216, 217, 218, 219, 220, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 326, 327, 329, 330, 331,
+ 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
+ 343, 344, 345, 346, 348, 349, 350, 351, 352, 353, 354,
+ 355, 356, 357, 358, 359, 360, 362, 363, 364, 365, 366,
+ 367, 368, 369, 370, 371, 372, 374, 375, 376, 377, 378,
+ 379, 380, 381, 382, 383, 385, 386, 387, 388, 389, 390,
+ 391, 392, 393, 395, 396, 397, 398, 399, 400, 401, 402,
+ 404, 405, 406, 407, 408, 409, 410, 411, 413, 414, 415,
+ 416, 417, 418, 419, 421, 422, 423, 424, 425, 426, 428,
+ 429, 430, 431, 432, 433, 434, 436, 437, 438, 439, 440,
+ 441, 443, 444, 445, 446, 447, 448, 450, 451, 452, 453,
+ 454, 456, 457, 458, 459, 460, 461, 463, 464, 465, 466,
+ 467, 469, 470, 471, 472, 473, 475, 476, 477, 478, 479,
+ 481, 482, 483, 484, 485, 487, 488, 489, 490, 491, 493,
+ 494, 495, 496, 498, 499, 500, 501, 502, 504, 505, 506,
+ 507, 509, 510, 511, 512, 514, 515, 516, 517, 519, 520,
+ 521, 522, 523, 525, 526, 527, 528, 530, 531, 532, 533,
+ 535, 536, 537, 538, 540, 541, 542, 544, 545, 546, 547,
+ 549, 550, 551, 552, 554, 555, 556, 557, 559, 560, 561,
+ 563, 564, 565, 566, 568, 569, 570, 572, 573, 574, 575,
+ 577, 578, 579, 581, 582, 583, 584, 586, 587, 588, 590,
+ 591, 592, 594, 595, 596, 598, 599, 600, 601, 603, 604,
+ 605, 607, 608, 609, 611, 612, 613, 615, 616, 617, 619,
+ 620, 621, 623, 624, 625, 627, 628, 629, 631, 632, 633,
+ 635, 636, 637, 639, 640, 641, 643, 644, 645, 647, 648,
+ 649, 651, 652, 653, 655, 656, 657, 659, 660, 662, 663,
+ 664, 666, 667, 668, 670, 671, 672, 674, 675, 677, 678,
+ 679, 681, 682, 683, 685, 686, 688, 689, 690, 692, 693,
+ 694, 696, 697, 699, 700, 701, 703, 704, 706, 707, 708,
+ 710, 711, 712, 714, 715, 717, 718, 719, 721, 722, 724,
+ 725, 727, 728, 729, 731, 732, 734, 735, 736, 738, 739,
+ 741, 742, 743, 745, 746, 748, 749, 751, 752, 753, 755,
+ 756, 758, 759, 761, 762, 763, 765, 766, 768, 769, 771,
+ 772, 774, 775, 776, 778, 779, 781, 782, 784, 785, 787,
+ 788, 789, 791, 792, 794, 795, 797, 798, 800, 801, 803,
+ 804, 805, 807, 808, 810, 811, 813, 814, 816, 817, 819,
+ 820, 822, 823, 825, 826, 827, 829, 830, 832, 833, 835,
+ 836, 838, 839, 841, 842, 844, 845, 847, 848, 850, 851,
+ 853, 854, 856, 857, 859, 860, 862, 863, 865, 866, 868,
+ 869, 871, 872, 874, 875, 877, 878, 880, 881, 883, 884,
+ 886, 887, 889, 890, 892, 893, 895, 897, 898, 900, 901,
+ 903, 904, 906, 907, 909, 910, 912, 913, 915, 916, 918,
+ 919, 921, 923, 924, 926, 927, 929, 930, 932, 933, 935,
+ 936, 938, 940, 941, 943, 944, 946, 947, 949, 950, 952,
+ 954, 955, 957, 958, 960, 961, 963, 964, 966, 968, 969,
+ 971, 972, 974, 975, 977, 979, 980, 982, 983, 985, 986,
+ 988, 990, 991, 993, 994, 996, 998, 999, 1001, 1002, 1004,
+ 1005, 1007, 1009, 1010, 1012, 1013, 1015, 1017, 1018, 1020, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_17[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7,
+ 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 13, 14, 14, 14, 15, 15, 15, 15,
+ 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19,
+ 19, 20, 20, 20, 21, 21, 21, 22, 22, 22, 23,
+ 23, 23, 24, 24, 25, 25, 25, 26, 26, 26, 27,
+ 27, 28, 28, 28, 29, 29, 29, 30, 30, 31, 31,
+ 31, 32, 32, 33, 33, 34, 34, 34, 35, 35, 36,
+ 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 40,
+ 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46,
+ 46, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51,
+ 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56,
+ 57, 57, 58, 58, 59, 60, 60, 61, 61, 62, 62,
+ 63, 63, 64, 64, 65, 65, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 72, 72, 73, 73, 74, 74,
+ 75, 76, 76, 77, 77, 78, 79, 79, 80, 80, 81,
+ 82, 82, 83, 83, 84, 85, 85, 86, 86, 87, 88,
+ 88, 89, 89, 90, 91, 91, 92, 93, 93, 94, 95,
+ 95, 96, 96, 97, 98, 98, 99, 100, 100, 101, 102,
+ 102, 103, 104, 104, 105, 106, 106, 107, 108, 108, 109,
+ 110, 110, 111, 112, 112, 113, 114, 114, 115, 116, 116,
+ 117, 118, 119, 119, 120, 121, 121, 122, 123, 124, 124,
+ 125, 126, 126, 127, 128, 129, 129, 130, 131, 131, 132,
+ 133, 134, 134, 135, 136, 137, 137, 138, 139, 140, 140,
+ 141, 142, 143, 143, 144, 145, 146, 146, 147, 148, 149,
+ 149, 150, 151, 152, 153, 153, 154, 155, 156, 156, 157,
+ 158, 159, 160, 160, 161, 162, 163, 164, 164, 165, 166,
+ 167, 168, 168, 169, 170, 171, 172, 172, 173, 174, 175,
+ 176, 177, 177, 178, 179, 180, 181, 182, 182, 183, 184,
+ 185, 186, 187, 187, 188, 189, 190, 191, 192, 193, 193,
+ 194, 195, 196, 197, 198, 199, 199, 200, 201, 202, 203,
+ 204, 205, 206, 206, 207, 208, 209, 210, 211, 212, 213,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 248, 249, 250, 251, 252, 253,
+ 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 319, 320,
+ 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331,
+ 332, 333, 334, 336, 337, 338, 339, 340, 341, 342, 343,
+ 344, 345, 346, 347, 349, 350, 351, 352, 353, 354, 355,
+ 356, 357, 358, 360, 361, 362, 363, 364, 365, 366, 367,
+ 368, 370, 371, 372, 373, 374, 375, 376, 377, 379, 380,
+ 381, 382, 383, 384, 385, 386, 388, 389, 390, 391, 392,
+ 393, 394, 396, 397, 398, 399, 400, 401, 403, 404, 405,
+ 406, 407, 408, 409, 411, 412, 413, 414, 415, 417, 418,
+ 419, 420, 421, 422, 424, 425, 426, 427, 428, 430, 431,
+ 432, 433, 434, 435, 437, 438, 439, 440, 441, 443, 444,
+ 445, 446, 448, 449, 450, 451, 452, 454, 455, 456, 457,
+ 458, 460, 461, 462, 463, 465, 466, 467, 468, 469, 471,
+ 472, 473, 474, 476, 477, 478, 479, 481, 482, 483, 484,
+ 486, 487, 488, 489, 491, 492, 493, 494, 496, 497, 498,
+ 499, 501, 502, 503, 505, 506, 507, 508, 510, 511, 512,
+ 513, 515, 516, 517, 519, 520, 521, 522, 524, 525, 526,
+ 528, 529, 530, 532, 533, 534, 535, 537, 538, 539, 541,
+ 542, 543, 545, 546, 547, 549, 550, 551, 552, 554, 555,
+ 556, 558, 559, 560, 562, 563, 564, 566, 567, 568, 570,
+ 571, 572, 574, 575, 576, 578, 579, 580, 582, 583, 584,
+ 586, 587, 589, 590, 591, 593, 594, 595, 597, 598, 599,
+ 601, 602, 604, 605, 606, 608, 609, 610, 612, 613, 615,
+ 616, 617, 619, 620, 621, 623, 624, 626, 627, 628, 630,
+ 631, 633, 634, 635, 637, 638, 640, 641, 642, 644, 645,
+ 647, 648, 649, 651, 652, 654, 655, 656, 658, 659, 661,
+ 662, 664, 665, 666, 668, 669, 671, 672, 674, 675, 676,
+ 678, 679, 681, 682, 684, 685, 686, 688, 689, 691, 692,
+ 694, 695, 697, 698, 699, 701, 702, 704, 705, 707, 708,
+ 710, 711, 713, 714, 716, 717, 718, 720, 721, 723, 724,
+ 726, 727, 729, 730, 732, 733, 735, 736, 738, 739, 741,
+ 742, 744, 745, 747, 748, 750, 751, 753, 754, 756, 757,
+ 759, 760, 762, 763, 765, 766, 768, 769, 771, 772, 774,
+ 775, 777, 778, 780, 781, 783, 784, 786, 787, 789, 790,
+ 792, 793, 795, 797, 798, 800, 801, 803, 804, 806, 807,
+ 809, 810, 812, 814, 815, 817, 818, 820, 821, 823, 824,
+ 826, 827, 829, 831, 832, 834, 835, 837, 838, 840, 842,
+ 843, 845, 846, 848, 849, 851, 853, 854, 856, 857, 859,
+ 860, 862, 864, 865, 867, 868, 870, 872, 873, 875, 876,
+ 878, 880, 881, 883, 884, 886, 888, 889, 891, 892, 894,
+ 896, 897, 899, 900, 902, 904, 905, 907, 908, 910, 912,
+ 913, 915, 917, 918, 920, 921, 923, 925, 926, 928, 930,
+ 931, 933, 935, 936, 938, 939, 941, 943, 944, 946, 948,
+ 949, 951, 953, 954, 956, 958, 959, 961, 963, 964, 966,
+ 968, 969, 971, 973, 974, 976, 978, 979, 981, 983, 984,
+ 986, 988, 989, 991, 993, 994, 996, 998, 999, 1001, 1003,
+ 1004, 1006, 1008, 1009, 1011, 1013, 1015, 1016, 1018, 1020, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_18[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
+ 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+ 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18,
+ 18, 19, 19, 19, 20, 20, 20, 21, 21, 21, 22,
+ 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25,
+ 26, 26, 26, 27, 27, 27, 28, 28, 29, 29, 29,
+ 30, 30, 30, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 35, 35, 35, 36, 36, 37, 37, 38, 38,
+ 38, 39, 39, 40, 40, 40, 41, 41, 42, 42, 43,
+ 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48,
+ 48, 48, 49, 49, 50, 50, 51, 51, 52, 52, 53,
+ 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 74, 74, 75, 75, 76,
+ 76, 77, 78, 78, 79, 79, 80, 80, 81, 82, 82,
+ 83, 83, 84, 85, 85, 86, 86, 87, 88, 88, 89,
+ 89, 90, 91, 91, 92, 92, 93, 94, 94, 95, 96,
+ 96, 97, 97, 98, 99, 99, 100, 101, 101, 102, 103,
+ 103, 104, 104, 105, 106, 106, 107, 108, 108, 109, 110,
+ 110, 111, 112, 112, 113, 114, 114, 115, 116, 117, 117,
+ 118, 119, 119, 120, 121, 121, 122, 123, 123, 124, 125,
+ 126, 126, 127, 128, 128, 129, 130, 131, 131, 132, 133,
+ 133, 134, 135, 136, 136, 137, 138, 139, 139, 140, 141,
+ 142, 142, 143, 144, 145, 145, 146, 147, 148, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 158, 159, 160, 161, 162, 162, 163, 164, 165, 166, 166,
+ 167, 168, 169, 170, 170, 171, 172, 173, 174, 175, 175,
+ 176, 177, 178, 179, 179, 180, 181, 182, 183, 184, 184,
+ 185, 186, 187, 188, 189, 190, 190, 191, 192, 193, 194,
+ 195, 196, 196, 197, 198, 199, 200, 201, 202, 203, 203,
+ 204, 205, 206, 207, 208, 209, 210, 210, 211, 212, 213,
+ 214, 215, 216, 217, 218, 219, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 317, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 344, 345, 346,
+ 347, 348, 349, 350, 351, 353, 354, 355, 356, 357, 358,
+ 359, 360, 362, 363, 364, 365, 366, 367, 368, 370, 371,
+ 372, 373, 374, 375, 376, 378, 379, 380, 381, 382, 383,
+ 385, 386, 387, 388, 389, 390, 392, 393, 394, 395, 396,
+ 397, 399, 400, 401, 402, 403, 405, 406, 407, 408, 409,
+ 411, 412, 413, 414, 415, 417, 418, 419, 420, 421, 423,
+ 424, 425, 426, 427, 429, 430, 431, 432, 434, 435, 436,
+ 437, 439, 440, 441, 442, 443, 445, 446, 447, 448, 450,
+ 451, 452, 453, 455, 456, 457, 458, 460, 461, 462, 464,
+ 465, 466, 467, 469, 470, 471, 472, 474, 475, 476, 478,
+ 479, 480, 481, 483, 484, 485, 487, 488, 489, 490, 492,
+ 493, 494, 496, 497, 498, 500, 501, 502, 504, 505, 506,
+ 507, 509, 510, 511, 513, 514, 515, 517, 518, 519, 521,
+ 522, 523, 525, 526, 527, 529, 530, 531, 533, 534, 535,
+ 537, 538, 540, 541, 542, 544, 545, 546, 548, 549, 550,
+ 552, 553, 555, 556, 557, 559, 560, 561, 563, 564, 566,
+ 567, 568, 570, 571, 572, 574, 575, 577, 578, 579, 581,
+ 582, 584, 585, 586, 588, 589, 591, 592, 594, 595, 596,
+ 598, 599, 601, 602, 603, 605, 606, 608, 609, 611, 612,
+ 613, 615, 616, 618, 619, 621, 622, 624, 625, 626, 628,
+ 629, 631, 632, 634, 635, 637, 638, 640, 641, 642, 644,
+ 645, 647, 648, 650, 651, 653, 654, 656, 657, 659, 660,
+ 662, 663, 665, 666, 668, 669, 671, 672, 673, 675, 676,
+ 678, 679, 681, 682, 684, 686, 687, 689, 690, 692, 693,
+ 695, 696, 698, 699, 701, 702, 704, 705, 707, 708, 710,
+ 711, 713, 714, 716, 717, 719, 721, 722, 724, 725, 727,
+ 728, 730, 731, 733, 734, 736, 738, 739, 741, 742, 744,
+ 745, 747, 749, 750, 752, 753, 755, 756, 758, 760, 761,
+ 763, 764, 766, 767, 769, 771, 772, 774, 775, 777, 779,
+ 780, 782, 783, 785, 787, 788, 790, 791, 793, 795, 796,
+ 798, 799, 801, 803, 804, 806, 807, 809, 811, 812, 814,
+ 816, 817, 819, 820, 822, 824, 825, 827, 829, 830, 832,
+ 834, 835, 837, 839, 840, 842, 843, 845, 847, 848, 850,
+ 852, 853, 855, 857, 858, 860, 862, 863, 865, 867, 868,
+ 870, 872, 873, 875, 877, 878, 880, 882, 884, 885, 887,
+ 889, 890, 892, 894, 895, 897, 899, 900, 902, 904, 906,
+ 907, 909, 911, 912, 914, 916, 918, 919, 921, 923, 924,
+ 926, 928, 930, 931, 933, 935, 936, 938, 940, 942, 943,
+ 945, 947, 949, 950, 952, 954, 956, 957, 959, 961, 963,
+ 964, 966, 968, 970, 971, 973, 975, 977, 978, 980, 982,
+ 984, 986, 987, 989, 991, 993, 994, 996, 998, 1000, 1002,
+ 1003, 1005, 1007, 1009, 1010, 1012, 1014, 1016, 1018, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_19[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15,
+ 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 21,
+ 21, 21, 22, 22, 22, 22, 23, 23, 23, 24, 24,
+ 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 32,
+ 32, 32, 33, 33, 33, 34, 34, 35, 35, 35, 36,
+ 36, 36, 37, 37, 38, 38, 38, 39, 39, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 48, 48, 49, 49, 50,
+ 50, 51, 51, 51, 52, 52, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60,
+ 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66,
+ 66, 67, 67, 68, 68, 69, 69, 70, 70, 71, 71,
+ 72, 72, 73, 74, 74, 75, 75, 76, 76, 77, 77,
+ 78, 79, 79, 80, 80, 81, 81, 82, 83, 83, 84,
+ 84, 85, 85, 86, 87, 87, 88, 88, 89, 90, 90,
+ 91, 91, 92, 93, 93, 94, 94, 95, 96, 96, 97,
+ 98, 98, 99, 99, 100, 101, 101, 102, 103, 103, 104,
+ 105, 105, 106, 107, 107, 108, 108, 109, 110, 110, 111,
+ 112, 112, 113, 114, 114, 115, 116, 116, 117, 118, 119,
+ 119, 120, 121, 121, 122, 123, 123, 124, 125, 125, 126,
+ 127, 128, 128, 129, 130, 130, 131, 132, 133, 133, 134,
+ 135, 135, 136, 137, 138, 138, 139, 140, 141, 141, 142,
+ 143, 144, 144, 145, 146, 147, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 157, 158, 159,
+ 160, 161, 161, 162, 163, 164, 165, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 173, 174, 175, 176, 177,
+ 178, 178, 179, 180, 181, 182, 183, 183, 184, 185, 186,
+ 187, 188, 188, 189, 190, 191, 192, 193, 194, 195, 195,
+ 196, 197, 198, 199, 200, 201, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 336, 337, 338,
+ 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350,
+ 351, 353, 354, 355, 356, 357, 358, 360, 361, 362, 363,
+ 364, 365, 367, 368, 369, 370, 371, 372, 374, 375, 376,
+ 377, 378, 379, 381, 382, 383, 384, 385, 387, 388, 389,
+ 390, 391, 393, 394, 395, 396, 397, 399, 400, 401, 402,
+ 404, 405, 406, 407, 408, 410, 411, 412, 413, 415, 416,
+ 417, 418, 420, 421, 422, 423, 425, 426, 427, 428, 430,
+ 431, 432, 433, 435, 436, 437, 439, 440, 441, 442, 444,
+ 445, 446, 447, 449, 450, 451, 453, 454, 455, 456, 458,
+ 459, 460, 462, 463, 464, 466, 467, 468, 470, 471, 472,
+ 473, 475, 476, 477, 479, 480, 481, 483, 484, 485, 487,
+ 488, 489, 491, 492, 493, 495, 496, 498, 499, 500, 502,
+ 503, 504, 506, 507, 508, 510, 511, 512, 514, 515, 517,
+ 518, 519, 521, 522, 523, 525, 526, 528, 529, 530, 532,
+ 533, 535, 536, 537, 539, 540, 542, 543, 544, 546, 547,
+ 549, 550, 551, 553, 554, 556, 557, 559, 560, 561, 563,
+ 564, 566, 567, 569, 570, 572, 573, 574, 576, 577, 579,
+ 580, 582, 583, 585, 586, 587, 589, 590, 592, 593, 595,
+ 596, 598, 599, 601, 602, 604, 605, 607, 608, 610, 611,
+ 613, 614, 616, 617, 619, 620, 622, 623, 625, 626, 628,
+ 629, 631, 632, 634, 635, 637, 638, 640, 641, 643, 644,
+ 646, 647, 649, 650, 652, 653, 655, 656, 658, 660, 661,
+ 663, 664, 666, 667, 669, 670, 672, 674, 675, 677, 678,
+ 680, 681, 683, 684, 686, 688, 689, 691, 692, 694, 696,
+ 697, 699, 700, 702, 703, 705, 707, 708, 710, 711, 713,
+ 715, 716, 718, 719, 721, 723, 724, 726, 728, 729, 731,
+ 732, 734, 736, 737, 739, 741, 742, 744, 745, 747, 749,
+ 750, 752, 754, 755, 757, 759, 760, 762, 764, 765, 767,
+ 768, 770, 772, 773, 775, 777, 778, 780, 782, 783, 785,
+ 787, 789, 790, 792, 794, 795, 797, 799, 800, 802, 804,
+ 805, 807, 809, 810, 812, 814, 816, 817, 819, 821, 822,
+ 824, 826, 828, 829, 831, 833, 834, 836, 838, 840, 841,
+ 843, 845, 847, 848, 850, 852, 854, 855, 857, 859, 861,
+ 862, 864, 866, 868, 869, 871, 873, 875, 876, 878, 880,
+ 882, 883, 885, 887, 889, 891, 892, 894, 896, 898, 899,
+ 901, 903, 905, 907, 908, 910, 912, 914, 916, 917, 919,
+ 921, 923, 925, 926, 928, 930, 932, 934, 936, 937, 939,
+ 941, 943, 945, 947, 948, 950, 952, 954, 956, 958, 959,
+ 961, 963, 965, 967, 969, 970, 972, 974, 976, 978, 980,
+ 982, 983, 985, 987, 989, 991, 993, 995, 997, 998, 1000,
+ 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_20[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
+ 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17,
+ 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20,
+ 20, 20, 21, 21, 21, 21, 22, 22, 22, 23, 23,
+ 23, 23, 24, 24, 24, 25, 25, 25, 26, 26, 26,
+ 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 32, 32, 32, 33, 33, 33, 34,
+ 34, 35, 35, 35, 36, 36, 36, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 41, 41, 41, 42, 42,
+ 43, 43, 44, 44, 44, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 49, 49, 49, 50, 50, 51, 51, 52,
+ 52, 53, 53, 54, 54, 54, 55, 55, 56, 56, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 62, 62,
+ 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68,
+ 68, 69, 69, 70, 70, 71, 71, 72, 72, 73, 73,
+ 74, 74, 75, 76, 76, 77, 77, 78, 78, 79, 79,
+ 80, 81, 81, 82, 82, 83, 83, 84, 84, 85, 86,
+ 86, 87, 87, 88, 89, 89, 90, 90, 91, 92, 92,
+ 93, 93, 94, 95, 95, 96, 96, 97, 98, 98, 99,
+ 99, 100, 101, 101, 102, 103, 103, 104, 105, 105, 106,
+ 106, 107, 108, 108, 109, 110, 110, 111, 112, 112, 113,
+ 114, 114, 115, 116, 116, 117, 118, 118, 119, 120, 120,
+ 121, 122, 122, 123, 124, 125, 125, 126, 127, 127, 128,
+ 129, 130, 130, 131, 132, 132, 133, 134, 135, 135, 136,
+ 137, 137, 138, 139, 140, 140, 141, 142, 143, 143, 144,
+ 145, 146, 146, 147, 148, 149, 149, 150, 151, 152, 153,
+ 153, 154, 155, 156, 156, 157, 158, 159, 160, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168, 168, 169, 170,
+ 171, 172, 172, 173, 174, 175, 176, 177, 177, 178, 179,
+ 180, 181, 182, 182, 183, 184, 185, 186, 187, 188, 188,
+ 189, 190, 191, 192, 193, 194, 194, 195, 196, 197, 198,
+ 199, 200, 201, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 318, 319,
+ 320, 321, 322, 323, 324, 325, 327, 328, 329, 330, 331,
+ 332, 333, 335, 336, 337, 338, 339, 340, 341, 343, 344,
+ 345, 346, 347, 348, 350, 351, 352, 353, 354, 355, 357,
+ 358, 359, 360, 361, 363, 364, 365, 366, 367, 369, 370,
+ 371, 372, 373, 375, 376, 377, 378, 379, 381, 382, 383,
+ 384, 386, 387, 388, 389, 390, 392, 393, 394, 395, 397,
+ 398, 399, 400, 402, 403, 404, 405, 407, 408, 409, 410,
+ 412, 413, 414, 416, 417, 418, 419, 421, 422, 423, 425,
+ 426, 427, 428, 430, 431, 432, 434, 435, 436, 437, 439,
+ 440, 441, 443, 444, 445, 447, 448, 449, 451, 452, 453,
+ 455, 456, 457, 459, 460, 461, 463, 464, 465, 467, 468,
+ 469, 471, 472, 474, 475, 476, 478, 479, 480, 482, 483,
+ 484, 486, 487, 489, 490, 491, 493, 494, 496, 497, 498,
+ 500, 501, 503, 504, 505, 507, 508, 510, 511, 512, 514,
+ 515, 517, 518, 519, 521, 522, 524, 525, 527, 528, 530,
+ 531, 532, 534, 535, 537, 538, 540, 541, 543, 544, 545,
+ 547, 548, 550, 551, 553, 554, 556, 557, 559, 560, 562,
+ 563, 565, 566, 568, 569, 571, 572, 574, 575, 577, 578,
+ 580, 581, 583, 584, 586, 587, 589, 590, 592, 593, 595,
+ 596, 598, 599, 601, 602, 604, 605, 607, 609, 610, 612,
+ 613, 615, 616, 618, 619, 621, 622, 624, 626, 627, 629,
+ 630, 632, 633, 635, 637, 638, 640, 641, 643, 645, 646,
+ 648, 649, 651, 652, 654, 656, 657, 659, 660, 662, 664,
+ 665, 667, 669, 670, 672, 673, 675, 677, 678, 680, 682,
+ 683, 685, 686, 688, 690, 691, 693, 695, 696, 698, 700,
+ 701, 703, 705, 706, 708, 710, 711, 713, 715, 716, 718,
+ 720, 721, 723, 725, 726, 728, 730, 731, 733, 735, 736,
+ 738, 740, 742, 743, 745, 747, 748, 750, 752, 754, 755,
+ 757, 759, 760, 762, 764, 766, 767, 769, 771, 773, 774,
+ 776, 778, 780, 781, 783, 785, 787, 788, 790, 792, 794,
+ 795, 797, 799, 801, 802, 804, 806, 808, 809, 811, 813,
+ 815, 817, 818, 820, 822, 824, 826, 827, 829, 831, 833,
+ 835, 836, 838, 840, 842, 844, 845, 847, 849, 851, 853,
+ 855, 856, 858, 860, 862, 864, 866, 867, 869, 871, 873,
+ 875, 877, 878, 880, 882, 884, 886, 888, 890, 892, 893,
+ 895, 897, 899, 901, 903, 905, 907, 908, 910, 912, 914,
+ 916, 918, 920, 922, 924, 925, 927, 929, 931, 933, 935,
+ 937, 939, 941, 943, 945, 946, 948, 950, 952, 954, 956,
+ 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978,
+ 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999,
+ 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_21[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16,
+ 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19,
+ 19, 19, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 24, 24, 24, 24, 25, 25,
+ 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 29,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32,
+ 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36,
+ 36, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 45,
+ 45, 45, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 50, 50, 50, 51, 51, 52, 52, 53, 53, 54, 54,
+ 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 74, 74, 75, 75, 76,
+ 76, 77, 77, 78, 78, 79, 79, 80, 81, 81, 82,
+ 82, 83, 83, 84, 85, 85, 86, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 91, 92, 93, 93, 94, 94,
+ 95, 96, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 102, 103, 104, 104, 105, 106, 106, 107, 108, 108,
+ 109, 110, 110, 111, 111, 112, 113, 113, 114, 115, 115,
+ 116, 117, 117, 118, 119, 120, 120, 121, 122, 122, 123,
+ 124, 124, 125, 126, 126, 127, 128, 129, 129, 130, 131,
+ 131, 132, 133, 134, 134, 135, 136, 136, 137, 138, 139,
+ 139, 140, 141, 142, 142, 143, 144, 145, 145, 146, 147,
+ 148, 148, 149, 150, 151, 152, 152, 153, 154, 155, 155,
+ 156, 157, 158, 159, 159, 160, 161, 162, 163, 163, 164,
+ 165, 166, 167, 167, 168, 169, 170, 171, 171, 172, 173,
+ 174, 175, 176, 176, 177, 178, 179, 180, 181, 181, 182,
+ 183, 184, 185, 186, 187, 187, 188, 189, 190, 191, 192,
+ 193, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 310, 311, 312, 313,
+ 314, 315, 316, 317, 319, 320, 321, 322, 323, 324, 326,
+ 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338,
+ 339, 341, 342, 343, 344, 345, 347, 348, 349, 350, 351,
+ 353, 354, 355, 356, 357, 359, 360, 361, 362, 363, 365,
+ 366, 367, 368, 370, 371, 372, 373, 375, 376, 377, 378,
+ 380, 381, 382, 383, 385, 386, 387, 388, 390, 391, 392,
+ 393, 395, 396, 397, 399, 400, 401, 402, 404, 405, 406,
+ 408, 409, 410, 411, 413, 414, 415, 417, 418, 419, 421,
+ 422, 423, 425, 426, 427, 429, 430, 431, 433, 434, 435,
+ 437, 438, 439, 441, 442, 443, 445, 446, 447, 449, 450,
+ 452, 453, 454, 456, 457, 458, 460, 461, 463, 464, 465,
+ 467, 468, 469, 471, 472, 474, 475, 477, 478, 479, 481,
+ 482, 484, 485, 486, 488, 489, 491, 492, 494, 495, 496,
+ 498, 499, 501, 502, 504, 505, 507, 508, 509, 511, 512,
+ 514, 515, 517, 518, 520, 521, 523, 524, 526, 527, 529,
+ 530, 532, 533, 535, 536, 538, 539, 541, 542, 544, 545,
+ 547, 548, 550, 551, 553, 554, 556, 557, 559, 560, 562,
+ 563, 565, 566, 568, 569, 571, 573, 574, 576, 577, 579,
+ 580, 582, 583, 585, 587, 588, 590, 591, 593, 595, 596,
+ 598, 599, 601, 602, 604, 606, 607, 609, 610, 612, 614,
+ 615, 617, 618, 620, 622, 623, 625, 627, 628, 630, 631,
+ 633, 635, 636, 638, 640, 641, 643, 645, 646, 648, 650,
+ 651, 653, 654, 656, 658, 659, 661, 663, 664, 666, 668,
+ 670, 671, 673, 675, 676, 678, 680, 681, 683, 685, 686,
+ 688, 690, 692, 693, 695, 697, 698, 700, 702, 704, 705,
+ 707, 709, 711, 712, 714, 716, 717, 719, 721, 723, 724,
+ 726, 728, 730, 732, 733, 735, 737, 739, 740, 742, 744,
+ 746, 747, 749, 751, 753, 755, 756, 758, 760, 762, 764,
+ 765, 767, 769, 771, 773, 774, 776, 778, 780, 782, 784,
+ 785, 787, 789, 791, 793, 795, 796, 798, 800, 802, 804,
+ 806, 807, 809, 811, 813, 815, 817, 819, 821, 822, 824,
+ 826, 828, 830, 832, 834, 836, 837, 839, 841, 843, 845,
+ 847, 849, 851, 853, 855, 856, 858, 860, 862, 864, 866,
+ 868, 870, 872, 874, 876, 878, 880, 882, 883, 885, 887,
+ 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909,
+ 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931,
+ 933, 935, 937, 939, 941, 943, 945, 947, 949, 951, 953,
+ 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975,
+ 977, 979, 981, 984, 986, 988, 990, 992, 994, 996, 998,
+ 1000, 1002, 1004, 1006, 1008, 1010, 1013, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_22[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 19, 19, 19, 19, 20, 20, 20, 21, 21, 21,
+ 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 27, 27, 27,
+ 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 36, 36, 37, 37, 37, 38, 38, 38,
+ 39, 39, 39, 40, 40, 41, 41, 41, 42, 42, 43,
+ 43, 43, 44, 44, 44, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 49, 49, 49, 50, 50, 51, 51, 52,
+ 52, 52, 53, 53, 54, 54, 55, 55, 55, 56, 56,
+ 57, 57, 58, 58, 59, 59, 60, 60, 61, 61, 61,
+ 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67,
+ 67, 68, 68, 69, 69, 70, 70, 71, 71, 72, 72,
+ 73, 73, 74, 75, 75, 76, 76, 77, 77, 78, 78,
+ 79, 79, 80, 80, 81, 82, 82, 83, 83, 84, 84,
+ 85, 85, 86, 87, 87, 88, 88, 89, 89, 90, 91,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97,
+ 98, 98, 99, 100, 100, 101, 102, 102, 103, 103, 104,
+ 105, 105, 106, 107, 107, 108, 109, 109, 110, 110, 111,
+ 112, 112, 113, 114, 114, 115, 116, 116, 117, 118, 118,
+ 119, 120, 121, 121, 122, 123, 123, 124, 125, 125, 126,
+ 127, 127, 128, 129, 130, 130, 131, 132, 132, 133, 134,
+ 135, 135, 136, 137, 138, 138, 139, 140, 141, 141, 142,
+ 143, 144, 144, 145, 146, 147, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 157, 158, 159,
+ 160, 161, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 170, 171, 172, 173, 174, 175, 175, 176, 177,
+ 178, 179, 180, 181, 181, 182, 183, 184, 185, 186, 187,
+ 187, 188, 189, 190, 191, 192, 193, 194, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 294, 295, 296,
+ 297, 298, 299, 300, 301, 303, 304, 305, 306, 307, 308,
+ 309, 311, 312, 313, 314, 315, 316, 317, 319, 320, 321,
+ 322, 323, 324, 326, 327, 328, 329, 330, 332, 333, 334,
+ 335, 336, 338, 339, 340, 341, 342, 344, 345, 346, 347,
+ 348, 350, 351, 352, 353, 355, 356, 357, 358, 360, 361,
+ 362, 363, 365, 366, 367, 368, 370, 371, 372, 373, 375,
+ 376, 377, 378, 380, 381, 382, 384, 385, 386, 387, 389,
+ 390, 391, 393, 394, 395, 397, 398, 399, 401, 402, 403,
+ 405, 406, 407, 409, 410, 411, 413, 414, 415, 417, 418,
+ 419, 421, 422, 423, 425, 426, 427, 429, 430, 432, 433,
+ 434, 436, 437, 438, 440, 441, 443, 444, 445, 447, 448,
+ 450, 451, 452, 454, 455, 457, 458, 459, 461, 462, 464,
+ 465, 467, 468, 469, 471, 472, 474, 475, 477, 478, 480,
+ 481, 483, 484, 485, 487, 488, 490, 491, 493, 494, 496,
+ 497, 499, 500, 502, 503, 505, 506, 508, 509, 511, 512,
+ 514, 515, 517, 518, 520, 521, 523, 524, 526, 527, 529,
+ 530, 532, 534, 535, 537, 538, 540, 541, 543, 544, 546,
+ 548, 549, 551, 552, 554, 555, 557, 559, 560, 562, 563,
+ 565, 567, 568, 570, 571, 573, 575, 576, 578, 579, 581,
+ 583, 584, 586, 587, 589, 591, 592, 594, 596, 597, 599,
+ 601, 602, 604, 605, 607, 609, 610, 612, 614, 615, 617,
+ 619, 620, 622, 624, 625, 627, 629, 631, 632, 634, 636,
+ 637, 639, 641, 642, 644, 646, 648, 649, 651, 653, 654,
+ 656, 658, 660, 661, 663, 665, 667, 668, 670, 672, 674,
+ 675, 677, 679, 681, 682, 684, 686, 688, 689, 691, 693,
+ 695, 697, 698, 700, 702, 704, 705, 707, 709, 711, 713,
+ 714, 716, 718, 720, 722, 724, 725, 727, 729, 731, 733,
+ 735, 736, 738, 740, 742, 744, 746, 747, 749, 751, 753,
+ 755, 757, 759, 760, 762, 764, 766, 768, 770, 772, 774,
+ 776, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795,
+ 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816,
+ 818, 820, 822, 824, 826, 828, 829, 831, 833, 835, 837,
+ 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859,
+ 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881,
+ 883, 885, 887, 889, 892, 894, 896, 898, 900, 902, 904,
+ 906, 908, 910, 912, 914, 916, 918, 920, 922, 925, 927,
+ 929, 931, 933, 935, 937, 939, 941, 943, 945, 948, 950,
+ 952, 954, 956, 958, 960, 962, 965, 967, 969, 971, 973,
+ 975, 977, 980, 982, 984, 986, 988, 990, 992, 995, 997,
+ 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_23[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11,
+ 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13,
+ 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20,
+ 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23,
+ 23, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26,
+ 27, 27, 27, 27, 28, 28, 28, 29, 29, 29, 30,
+ 30, 30, 30, 31, 31, 31, 32, 32, 32, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 42, 42, 42, 43, 43, 43, 44, 44, 45, 45,
+ 45, 46, 46, 47, 47, 47, 48, 48, 49, 49, 49,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 73, 74, 74, 75, 75,
+ 76, 76, 77, 77, 78, 78, 79, 80, 80, 81, 81,
+ 82, 82, 83, 83, 84, 85, 85, 86, 86, 87, 87,
+ 88, 89, 89, 90, 90, 91, 91, 92, 93, 93, 94,
+ 94, 95, 96, 96, 97, 97, 98, 99, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 106, 107, 107,
+ 108, 109, 109, 110, 111, 111, 112, 113, 113, 114, 115,
+ 115, 116, 117, 117, 118, 119, 119, 120, 121, 121, 122,
+ 123, 124, 124, 125, 126, 126, 127, 128, 128, 129, 130,
+ 131, 131, 132, 133, 133, 134, 135, 136, 136, 137, 138,
+ 139, 139, 140, 141, 142, 142, 143, 144, 145, 145, 146,
+ 147, 148, 148, 149, 150, 151, 152, 152, 153, 154, 155,
+ 156, 156, 157, 158, 159, 160, 160, 161, 162, 163, 164,
+ 164, 165, 166, 167, 168, 168, 169, 170, 171, 172, 173,
+ 174, 174, 175, 176, 177, 178, 179, 179, 180, 181, 182,
+ 183, 184, 185, 186, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 280,
+ 281, 282, 283, 284, 285, 286, 287, 288, 290, 291, 292,
+ 293, 294, 295, 296, 298, 299, 300, 301, 302, 303, 304,
+ 306, 307, 308, 309, 310, 311, 313, 314, 315, 316, 317,
+ 319, 320, 321, 322, 323, 325, 326, 327, 328, 329, 331,
+ 332, 333, 334, 335, 337, 338, 339, 340, 342, 343, 344,
+ 345, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358,
+ 359, 360, 362, 363, 364, 366, 367, 368, 369, 371, 372,
+ 373, 375, 376, 377, 379, 380, 381, 383, 384, 385, 386,
+ 388, 389, 390, 392, 393, 394, 396, 397, 399, 400, 401,
+ 403, 404, 405, 407, 408, 409, 411, 412, 414, 415, 416,
+ 418, 419, 420, 422, 423, 425, 426, 427, 429, 430, 432,
+ 433, 435, 436, 437, 439, 440, 442, 443, 444, 446, 447,
+ 449, 450, 452, 453, 455, 456, 458, 459, 460, 462, 463,
+ 465, 466, 468, 469, 471, 472, 474, 475, 477, 478, 480,
+ 481, 483, 484, 486, 487, 489, 490, 492, 493, 495, 496,
+ 498, 499, 501, 502, 504, 506, 507, 509, 510, 512, 513,
+ 515, 516, 518, 520, 521, 523, 524, 526, 527, 529, 531,
+ 532, 534, 535, 537, 539, 540, 542, 543, 545, 547, 548,
+ 550, 551, 553, 555, 556, 558, 560, 561, 563, 565, 566,
+ 568, 569, 571, 573, 574, 576, 578, 579, 581, 583, 584,
+ 586, 588, 590, 591, 593, 595, 596, 598, 600, 601, 603,
+ 605, 606, 608, 610, 612, 613, 615, 617, 619, 620, 622,
+ 624, 625, 627, 629, 631, 632, 634, 636, 638, 640, 641,
+ 643, 645, 647, 648, 650, 652, 654, 655, 657, 659, 661,
+ 663, 664, 666, 668, 670, 672, 674, 675, 677, 679, 681,
+ 683, 684, 686, 688, 690, 692, 694, 696, 697, 699, 701,
+ 703, 705, 707, 709, 710, 712, 714, 716, 718, 720, 722,
+ 724, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743,
+ 745, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764,
+ 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786,
+ 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807,
+ 809, 811, 814, 816, 818, 820, 822, 824, 826, 828, 830,
+ 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852,
+ 854, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875,
+ 878, 880, 882, 884, 886, 888, 890, 892, 894, 897, 899,
+ 901, 903, 905, 907, 909, 912, 914, 916, 918, 920, 922,
+ 925, 927, 929, 931, 933, 936, 938, 940, 942, 944, 946,
+ 949, 951, 953, 955, 958, 960, 962, 964, 966, 969, 971,
+ 973, 975, 978, 980, 982, 984, 987, 989, 991, 993, 996,
+ 998, 1000, 1002, 1005, 1007, 1009, 1012, 1014, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_24[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15,
+ 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 20,
+ 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 26, 26, 26, 26, 27, 27, 27, 28, 28, 28, 28,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32,
+ 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35,
+ 36, 36, 36, 37, 37, 38, 38, 38, 39, 39, 39,
+ 40, 40, 40, 41, 41, 41, 42, 42, 43, 43, 43,
+ 44, 44, 44, 45, 45, 46, 46, 46, 47, 47, 48,
+ 48, 48, 49, 49, 50, 50, 50, 51, 51, 52, 52,
+ 53, 53, 53, 54, 54, 55, 55, 56, 56, 56, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 61, 62,
+ 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67,
+ 68, 68, 69, 69, 70, 70, 71, 71, 72, 72, 73,
+ 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 79,
+ 79, 80, 80, 81, 81, 82, 82, 83, 83, 84, 85,
+ 85, 86, 86, 87, 87, 88, 89, 89, 90, 90, 91,
+ 91, 92, 93, 93, 94, 94, 95, 96, 96, 97, 97,
+ 98, 99, 99, 100, 100, 101, 102, 102, 103, 104, 104,
+ 105, 106, 106, 107, 107, 108, 109, 109, 110, 111, 111,
+ 112, 113, 113, 114, 115, 115, 116, 117, 117, 118, 119,
+ 119, 120, 121, 121, 122, 123, 124, 124, 125, 126, 126,
+ 127, 128, 129, 129, 130, 131, 131, 132, 133, 134, 134,
+ 135, 136, 137, 137, 138, 139, 140, 140, 141, 142, 143,
+ 143, 144, 145, 146, 146, 147, 148, 149, 149, 150, 151,
+ 152, 153, 153, 154, 155, 156, 157, 157, 158, 159, 160,
+ 161, 161, 162, 163, 164, 165, 166, 166, 167, 168, 169,
+ 170, 171, 171, 172, 173, 174, 175, 176, 177, 177, 178,
+ 179, 180, 181, 182, 183, 184, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 279, 280, 281, 282, 283, 284, 285, 287, 288, 289,
+ 290, 291, 292, 293, 295, 296, 297, 298, 299, 300, 302,
+ 303, 304, 305, 306, 308, 309, 310, 311, 312, 314, 315,
+ 316, 317, 318, 320, 321, 322, 323, 324, 326, 327, 328,
+ 329, 331, 332, 333, 334, 336, 337, 338, 339, 341, 342,
+ 343, 344, 346, 347, 348, 350, 351, 352, 353, 355, 356,
+ 357, 359, 360, 361, 363, 364, 365, 367, 368, 369, 370,
+ 372, 373, 374, 376, 377, 378, 380, 381, 383, 384, 385,
+ 387, 388, 389, 391, 392, 393, 395, 396, 398, 399, 400,
+ 402, 403, 405, 406, 407, 409, 410, 412, 413, 414, 416,
+ 417, 419, 420, 421, 423, 424, 426, 427, 429, 430, 432,
+ 433, 434, 436, 437, 439, 440, 442, 443, 445, 446, 448,
+ 449, 451, 452, 454, 455, 457, 458, 460, 461, 463, 464,
+ 466, 467, 469, 470, 472, 473, 475, 476, 478, 479, 481,
+ 483, 484, 486, 487, 489, 490, 492, 493, 495, 497, 498,
+ 500, 501, 503, 505, 506, 508, 509, 511, 512, 514, 516,
+ 517, 519, 521, 522, 524, 525, 527, 529, 530, 532, 534,
+ 535, 537, 539, 540, 542, 543, 545, 547, 548, 550, 552,
+ 553, 555, 557, 559, 560, 562, 564, 565, 567, 569, 570,
+ 572, 574, 576, 577, 579, 581, 582, 584, 586, 588, 589,
+ 591, 593, 595, 596, 598, 600, 602, 603, 605, 607, 609,
+ 610, 612, 614, 616, 618, 619, 621, 623, 625, 627, 628,
+ 630, 632, 634, 636, 637, 639, 641, 643, 645, 647, 648,
+ 650, 652, 654, 656, 658, 660, 661, 663, 665, 667, 669,
+ 671, 673, 674, 676, 678, 680, 682, 684, 686, 688, 690,
+ 692, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711,
+ 713, 715, 717, 719, 721, 723, 724, 726, 728, 730, 732,
+ 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754,
+ 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 777,
+ 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799,
+ 801, 803, 805, 808, 810, 812, 814, 816, 818, 820, 822,
+ 824, 826, 829, 831, 833, 835, 837, 839, 841, 844, 846,
+ 848, 850, 852, 854, 856, 859, 861, 863, 865, 867, 870,
+ 872, 874, 876, 878, 880, 883, 885, 887, 889, 891, 894,
+ 896, 898, 900, 903, 905, 907, 909, 912, 914, 916, 918,
+ 921, 923, 925, 927, 930, 932, 934, 936, 939, 941, 943,
+ 946, 948, 950, 952, 955, 957, 959, 962, 964, 966, 969,
+ 971, 973, 976, 978, 980, 983, 985, 987, 990, 992, 994,
+ 997, 999, 1002, 1004, 1006, 1009, 1011, 1013, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_25[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11,
+ 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17,
+ 17, 17, 17, 18, 18, 18, 18, 18, 19, 19, 19,
+ 19, 20, 20, 20, 20, 20, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 40, 41, 41, 42, 42,
+ 42, 43, 43, 43, 44, 44, 45, 45, 45, 46, 46,
+ 46, 47, 47, 48, 48, 48, 49, 49, 50, 50, 50,
+ 51, 51, 52, 52, 53, 53, 53, 54, 54, 55, 55,
+ 56, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60,
+ 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65,
+ 66, 66, 67, 67, 68, 68, 69, 69, 70, 70, 71,
+ 71, 72, 72, 73, 73, 74, 74, 75, 75, 76, 76,
+ 77, 77, 78, 78, 79, 79, 80, 80, 81, 82, 82,
+ 83, 83, 84, 84, 85, 85, 86, 87, 87, 88, 88,
+ 89, 89, 90, 91, 91, 92, 92, 93, 94, 94, 95,
+ 95, 96, 97, 97, 98, 98, 99, 100, 100, 101, 102,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 110, 111, 112, 112, 113, 114, 114, 115, 116,
+ 117, 117, 118, 119, 119, 120, 121, 121, 122, 123, 123,
+ 124, 125, 126, 126, 127, 128, 128, 129, 130, 131, 131,
+ 132, 133, 133, 134, 135, 136, 136, 137, 138, 139, 139,
+ 140, 141, 142, 143, 143, 144, 145, 146, 146, 147, 148,
+ 149, 149, 150, 151, 152, 153, 153, 154, 155, 156, 157,
+ 158, 158, 159, 160, 161, 162, 162, 163, 164, 165, 166,
+ 167, 167, 168, 169, 170, 171, 172, 173, 173, 174, 175,
+ 176, 177, 178, 179, 180, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250,
+ 251, 252, 253, 254, 255, 256, 257, 258, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 270, 271, 272, 273, 274,
+ 275, 276, 277, 279, 280, 281, 282, 283, 284, 286, 287,
+ 288, 289, 290, 291, 293, 294, 295, 296, 297, 298, 300,
+ 301, 302, 303, 304, 306, 307, 308, 309, 311, 312, 313,
+ 314, 315, 317, 318, 319, 320, 322, 323, 324, 325, 327,
+ 328, 329, 330, 332, 333, 334, 336, 337, 338, 339, 341,
+ 342, 343, 345, 346, 347, 349, 350, 351, 352, 354, 355,
+ 356, 358, 359, 360, 362, 363, 364, 366, 367, 369, 370,
+ 371, 373, 374, 375, 377, 378, 379, 381, 382, 384, 385,
+ 386, 388, 389, 391, 392, 393, 395, 396, 398, 399, 400,
+ 402, 403, 405, 406, 408, 409, 411, 412, 413, 415, 416,
+ 418, 419, 421, 422, 424, 425, 427, 428, 430, 431, 433,
+ 434, 436, 437, 439, 440, 442, 443, 445, 446, 448, 449,
+ 451, 452, 454, 455, 457, 458, 460, 461, 463, 465, 466,
+ 468, 469, 471, 472, 474, 476, 477, 479, 480, 482, 483,
+ 485, 487, 488, 490, 491, 493, 495, 496, 498, 500, 501,
+ 503, 504, 506, 508, 509, 511, 513, 514, 516, 518, 519,
+ 521, 523, 524, 526, 528, 529, 531, 533, 534, 536, 538,
+ 540, 541, 543, 545, 546, 548, 550, 552, 553, 555, 557,
+ 558, 560, 562, 564, 565, 567, 569, 571, 572, 574, 576,
+ 578, 580, 581, 583, 585, 587, 588, 590, 592, 594, 596,
+ 597, 599, 601, 603, 605, 607, 608, 610, 612, 614, 616,
+ 618, 619, 621, 623, 625, 627, 629, 631, 632, 634, 636,
+ 638, 640, 642, 644, 646, 648, 649, 651, 653, 655, 657,
+ 659, 661, 663, 665, 667, 669, 671, 673, 674, 676, 678,
+ 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700,
+ 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722,
+ 724, 726, 728, 730, 732, 734, 736, 739, 741, 743, 745,
+ 747, 749, 751, 753, 755, 757, 759, 761, 763, 766, 768,
+ 770, 772, 774, 776, 778, 780, 782, 785, 787, 789, 791,
+ 793, 795, 797, 800, 802, 804, 806, 808, 810, 813, 815,
+ 817, 819, 821, 824, 826, 828, 830, 832, 835, 837, 839,
+ 841, 843, 846, 848, 850, 852, 855, 857, 859, 861, 864,
+ 866, 868, 870, 873, 875, 877, 880, 882, 884, 886, 889,
+ 891, 893, 896, 898, 900, 903, 905, 907, 910, 912, 914,
+ 917, 919, 921, 924, 926, 928, 931, 933, 935, 938, 940,
+ 942, 945, 947, 950, 952, 954, 957, 959, 962, 964, 966,
+ 969, 971, 974, 976, 979, 981, 983, 986, 988, 991, 993,
+ 996, 998, 1001, 1003, 1006, 1008, 1011, 1013, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_26[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14,
+ 14, 14, 15, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 17, 17, 17, 17, 18, 18, 18, 18, 18, 19,
+ 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21,
+ 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24,
+ 24, 24, 25, 25, 25, 25, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 28, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 41, 42, 42, 43, 43, 43, 44, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 75, 76, 76, 77, 77, 78, 78, 79, 80, 80,
+ 81, 81, 82, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 94, 95, 96, 96, 97, 97, 98, 99, 99,
+ 100, 100, 101, 102, 102, 103, 104, 104, 105, 106, 106,
+ 107, 107, 108, 109, 109, 110, 111, 111, 112, 113, 113,
+ 114, 115, 115, 116, 117, 117, 118, 119, 120, 120, 121,
+ 122, 122, 123, 124, 124, 125, 126, 127, 127, 128, 129,
+ 129, 130, 131, 132, 132, 133, 134, 135, 135, 136, 137,
+ 138, 138, 139, 140, 141, 141, 142, 143, 144, 145, 145,
+ 146, 147, 148, 149, 149, 150, 151, 152, 153, 153, 154,
+ 155, 156, 157, 157, 158, 159, 160, 161, 162, 162, 163,
+ 164, 165, 166, 167, 167, 168, 169, 170, 171, 172, 173,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 266, 267, 268, 269, 270, 271, 272,
+ 274, 275, 276, 277, 278, 279, 281, 282, 283, 284, 285,
+ 286, 288, 289, 290, 291, 292, 294, 295, 296, 297, 299,
+ 300, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312,
+ 313, 315, 316, 317, 318, 320, 321, 322, 323, 325, 326,
+ 327, 329, 330, 331, 333, 334, 335, 336, 338, 339, 340,
+ 342, 343, 344, 346, 347, 348, 350, 351, 352, 354, 355,
+ 356, 358, 359, 361, 362, 363, 365, 366, 367, 369, 370,
+ 372, 373, 374, 376, 377, 379, 380, 381, 383, 384, 386,
+ 387, 389, 390, 391, 393, 394, 396, 397, 399, 400, 402,
+ 403, 405, 406, 407, 409, 410, 412, 413, 415, 416, 418,
+ 419, 421, 422, 424, 425, 427, 428, 430, 432, 433, 435,
+ 436, 438, 439, 441, 442, 444, 445, 447, 449, 450, 452,
+ 453, 455, 456, 458, 460, 461, 463, 464, 466, 468, 469,
+ 471, 472, 474, 476, 477, 479, 481, 482, 484, 485, 487,
+ 489, 490, 492, 494, 495, 497, 499, 500, 502, 504, 505,
+ 507, 509, 510, 512, 514, 516, 517, 519, 521, 522, 524,
+ 526, 528, 529, 531, 533, 535, 536, 538, 540, 542, 543,
+ 545, 547, 549, 550, 552, 554, 556, 558, 559, 561, 563,
+ 565, 567, 568, 570, 572, 574, 576, 577, 579, 581, 583,
+ 585, 587, 588, 590, 592, 594, 596, 598, 600, 601, 603,
+ 605, 607, 609, 611, 613, 615, 617, 619, 620, 622, 624,
+ 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646,
+ 648, 650, 651, 653, 655, 657, 659, 661, 663, 665, 667,
+ 669, 671, 673, 675, 677, 679, 681, 683, 685, 688, 690,
+ 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712,
+ 714, 716, 718, 721, 723, 725, 727, 729, 731, 733, 735,
+ 737, 740, 742, 744, 746, 748, 750, 752, 755, 757, 759,
+ 761, 763, 765, 768, 770, 772, 774, 776, 779, 781, 783,
+ 785, 787, 790, 792, 794, 796, 798, 801, 803, 805, 807,
+ 810, 812, 814, 816, 819, 821, 823, 826, 828, 830, 832,
+ 835, 837, 839, 842, 844, 846, 849, 851, 853, 855, 858,
+ 860, 862, 865, 867, 870, 872, 874, 877, 879, 881, 884,
+ 886, 888, 891, 893, 896, 898, 900, 903, 905, 908, 910,
+ 913, 915, 917, 920, 922, 925, 927, 930, 932, 934, 937,
+ 939, 942, 944, 947, 949, 952, 954, 957, 959, 962, 964,
+ 967, 969, 972, 974, 977, 979, 982, 984, 987, 990, 992,
+ 995, 997, 1000, 1002, 1005, 1007, 1010, 1013, 1015, 1018, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_27[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 19, 19, 19, 19, 20, 20, 20, 20, 20, 21,
+ 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23,
+ 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26,
+ 26, 27, 27, 27, 27, 28, 28, 28, 29, 29, 29,
+ 29, 30, 30, 30, 31, 31, 31, 32, 32, 32, 32,
+ 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36,
+ 36, 37, 37, 37, 38, 38, 38, 39, 39, 39, 40,
+ 40, 40, 41, 41, 41, 42, 42, 43, 43, 43, 44,
+ 44, 44, 45, 45, 46, 46, 46, 47, 47, 47, 48,
+ 48, 49, 49, 49, 50, 50, 51, 51, 51, 52, 52,
+ 53, 53, 54, 54, 54, 55, 55, 56, 56, 57, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 61, 62,
+ 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67,
+ 68, 68, 69, 69, 70, 70, 71, 71, 72, 72, 73,
+ 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 78,
+ 79, 79, 80, 81, 81, 82, 82, 83, 83, 84, 84,
+ 85, 86, 86, 87, 87, 88, 88, 89, 90, 90, 91,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97,
+ 98, 99, 99, 100, 100, 101, 102, 102, 103, 104, 104,
+ 105, 105, 106, 107, 107, 108, 109, 109, 110, 111, 111,
+ 112, 113, 113, 114, 115, 115, 116, 117, 118, 118, 119,
+ 120, 120, 121, 122, 122, 123, 124, 125, 125, 126, 127,
+ 127, 128, 129, 130, 130, 131, 132, 133, 133, 134, 135,
+ 136, 136, 137, 138, 139, 139, 140, 141, 142, 143, 143,
+ 144, 145, 146, 146, 147, 148, 149, 150, 150, 151, 152,
+ 153, 154, 155, 155, 156, 157, 158, 159, 160, 160, 161,
+ 162, 163, 164, 165, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 256, 257, 258, 259,
+ 260, 261, 262, 263, 265, 266, 267, 268, 269, 270, 272,
+ 273, 274, 275, 276, 278, 279, 280, 281, 282, 283, 285,
+ 286, 287, 288, 290, 291, 292, 293, 294, 296, 297, 298,
+ 299, 301, 302, 303, 304, 306, 307, 308, 309, 311, 312,
+ 313, 315, 316, 317, 318, 320, 321, 322, 324, 325, 326,
+ 328, 329, 330, 332, 333, 334, 336, 337, 338, 340, 341,
+ 342, 344, 345, 346, 348, 349, 351, 352, 353, 355, 356,
+ 357, 359, 360, 362, 363, 364, 366, 367, 369, 370, 372,
+ 373, 374, 376, 377, 379, 380, 382, 383, 385, 386, 387,
+ 389, 390, 392, 393, 395, 396, 398, 399, 401, 402, 404,
+ 405, 407, 408, 410, 411, 413, 414, 416, 417, 419, 421,
+ 422, 424, 425, 427, 428, 430, 431, 433, 435, 436, 438,
+ 439, 441, 442, 444, 446, 447, 449, 450, 452, 454, 455,
+ 457, 459, 460, 462, 463, 465, 467, 468, 470, 472, 473,
+ 475, 477, 478, 480, 482, 483, 485, 487, 488, 490, 492,
+ 494, 495, 497, 499, 500, 502, 504, 506, 507, 509, 511,
+ 513, 514, 516, 518, 520, 521, 523, 525, 527, 528, 530,
+ 532, 534, 536, 537, 539, 541, 543, 545, 546, 548, 550,
+ 552, 554, 556, 557, 559, 561, 563, 565, 567, 569, 570,
+ 572, 574, 576, 578, 580, 582, 584, 586, 587, 589, 591,
+ 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613,
+ 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634,
+ 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656,
+ 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679,
+ 681, 683, 685, 688, 690, 692, 694, 696, 698, 700, 702,
+ 705, 707, 709, 711, 713, 715, 717, 720, 722, 724, 726,
+ 728, 730, 733, 735, 737, 739, 741, 744, 746, 748, 750,
+ 752, 755, 757, 759, 761, 764, 766, 768, 770, 773, 775,
+ 777, 779, 782, 784, 786, 789, 791, 793, 795, 798, 800,
+ 802, 805, 807, 809, 812, 814, 816, 819, 821, 823, 826,
+ 828, 831, 833, 835, 838, 840, 842, 845, 847, 850, 852,
+ 854, 857, 859, 862, 864, 867, 869, 871, 874, 876, 879,
+ 881, 884, 886, 889, 891, 894, 896, 899, 901, 903, 906,
+ 908, 911, 914, 916, 919, 921, 924, 926, 929, 931, 934,
+ 936, 939, 941, 944, 947, 949, 952, 954, 957, 959, 962,
+ 965, 967, 970, 973, 975, 978, 980, 983, 986, 988, 991,
+ 994, 996, 999, 1002, 1004, 1007, 1010, 1012, 1015, 1018, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_28[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20,
+ 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23,
+ 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 26,
+ 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 29,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 31, 32,
+ 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35,
+ 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39,
+ 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43,
+ 43, 43, 44, 44, 45, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 48, 49, 49, 50, 50, 50, 51, 51,
+ 52, 52, 52, 53, 53, 54, 54, 55, 55, 55, 56,
+ 56, 57, 57, 58, 58, 58, 59, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 63, 64, 64, 65, 65, 66,
+ 66, 67, 67, 68, 68, 69, 69, 70, 70, 71, 71,
+ 72, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77,
+ 77, 78, 79, 79, 80, 80, 81, 81, 82, 82, 83,
+ 83, 84, 85, 85, 86, 86, 87, 87, 88, 89, 89,
+ 90, 90, 91, 92, 92, 93, 93, 94, 95, 95, 96,
+ 96, 97, 98, 98, 99, 99, 100, 101, 101, 102, 103,
+ 103, 104, 105, 105, 106, 106, 107, 108, 108, 109, 110,
+ 110, 111, 112, 112, 113, 114, 115, 115, 116, 117, 117,
+ 118, 119, 119, 120, 121, 122, 122, 123, 124, 124, 125,
+ 126, 127, 127, 128, 129, 130, 130, 131, 132, 132, 133,
+ 134, 135, 136, 136, 137, 138, 139, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 146, 147, 148, 149, 150, 151,
+ 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160,
+ 161, 161, 162, 163, 164, 165, 166, 167, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 251, 252, 253, 254, 255, 256, 257, 259,
+ 260, 261, 262, 263, 264, 266, 267, 268, 269, 270, 272,
+ 273, 274, 275, 276, 278, 279, 280, 281, 282, 284, 285,
+ 286, 287, 289, 290, 291, 292, 294, 295, 296, 297, 299,
+ 300, 301, 302, 304, 305, 306, 308, 309, 310, 311, 313,
+ 314, 315, 317, 318, 319, 321, 322, 323, 325, 326, 327,
+ 329, 330, 331, 333, 334, 336, 337, 338, 340, 341, 342,
+ 344, 345, 347, 348, 349, 351, 352, 354, 355, 356, 358,
+ 359, 361, 362, 364, 365, 366, 368, 369, 371, 372, 374,
+ 375, 377, 378, 380, 381, 383, 384, 386, 387, 389, 390,
+ 392, 393, 395, 396, 398, 399, 401, 402, 404, 405, 407,
+ 408, 410, 412, 413, 415, 416, 418, 419, 421, 423, 424,
+ 426, 427, 429, 431, 432, 434, 435, 437, 439, 440, 442,
+ 444, 445, 447, 448, 450, 452, 453, 455, 457, 458, 460,
+ 462, 463, 465, 467, 468, 470, 472, 474, 475, 477, 479,
+ 480, 482, 484, 486, 487, 489, 491, 493, 494, 496, 498,
+ 500, 501, 503, 505, 507, 509, 510, 512, 514, 516, 518,
+ 519, 521, 523, 525, 527, 528, 530, 532, 534, 536, 538,
+ 539, 541, 543, 545, 547, 549, 551, 553, 554, 556, 558,
+ 560, 562, 564, 566, 568, 570, 572, 574, 575, 577, 579,
+ 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601,
+ 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623,
+ 625, 627, 629, 631, 633, 635, 637, 640, 642, 644, 646,
+ 648, 650, 652, 654, 656, 658, 660, 663, 665, 667, 669,
+ 671, 673, 675, 678, 680, 682, 684, 686, 688, 690, 693,
+ 695, 697, 699, 701, 704, 706, 708, 710, 712, 715, 717,
+ 719, 721, 724, 726, 728, 730, 733, 735, 737, 739, 742,
+ 744, 746, 749, 751, 753, 755, 758, 760, 762, 765, 767,
+ 769, 772, 774, 776, 779, 781, 783, 786, 788, 790, 793,
+ 795, 798, 800, 802, 805, 807, 810, 812, 814, 817, 819,
+ 822, 824, 827, 829, 831, 834, 836, 839, 841, 844, 846,
+ 849, 851, 854, 856, 859, 861, 864, 866, 869, 871, 874,
+ 876, 879, 881, 884, 887, 889, 892, 894, 897, 899, 902,
+ 905, 907, 910, 912, 915, 918, 920, 923, 925, 928, 931,
+ 933, 936, 939, 941, 944, 947, 949, 952, 955, 957, 960,
+ 963, 965, 968, 971, 973, 976, 979, 982, 984, 987, 990,
+ 992, 995, 998, 1001, 1004, 1006, 1009, 1012, 1015, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_29[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18,
+ 18, 18, 18, 18, 19, 19, 19, 19, 19, 20, 20,
+ 20, 20, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 25, 26, 26, 26, 26, 27, 27, 27, 28, 28, 28,
+ 28, 29, 29, 29, 29, 30, 30, 30, 31, 31, 31,
+ 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 35,
+ 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 41, 41, 41, 42, 42,
+ 42, 43, 43, 43, 44, 44, 44, 45, 45, 46, 46,
+ 46, 47, 47, 48, 48, 48, 49, 49, 49, 50, 50,
+ 51, 51, 52, 52, 52, 53, 53, 54, 54, 54, 55,
+ 55, 56, 56, 57, 57, 57, 58, 58, 59, 59, 60,
+ 60, 61, 61, 61, 62, 62, 63, 63, 64, 64, 65,
+ 65, 66, 66, 67, 67, 68, 68, 69, 69, 70, 70,
+ 71, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76,
+ 76, 77, 77, 78, 78, 79, 80, 80, 81, 81, 82,
+ 82, 83, 83, 84, 85, 85, 86, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 92, 92, 93, 93, 94, 95,
+ 95, 96, 96, 97, 98, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 111, 111, 112, 113, 113, 114, 115, 115, 116,
+ 117, 117, 118, 119, 120, 120, 121, 122, 122, 123, 124,
+ 125, 125, 126, 127, 128, 128, 129, 130, 131, 131, 132,
+ 133, 134, 134, 135, 136, 137, 137, 138, 139, 140, 141,
+ 141, 142, 143, 144, 145, 145, 146, 147, 148, 149, 149,
+ 150, 151, 152, 153, 154, 154, 155, 156, 157, 158, 159,
+ 160, 160, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
+ 211, 212, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 241, 242, 243, 244, 245, 246,
+ 247, 248, 250, 251, 252, 253, 254, 255, 257, 258, 259,
+ 260, 261, 263, 264, 265, 266, 267, 269, 270, 271, 272,
+ 273, 275, 276, 277, 278, 280, 281, 282, 283, 285, 286,
+ 287, 288, 290, 291, 292, 293, 295, 296, 297, 299, 300,
+ 301, 302, 304, 305, 306, 308, 309, 310, 312, 313, 314,
+ 316, 317, 318, 320, 321, 322, 324, 325, 327, 328, 329,
+ 331, 332, 333, 335, 336, 338, 339, 340, 342, 343, 345,
+ 346, 348, 349, 350, 352, 353, 355, 356, 358, 359, 361,
+ 362, 363, 365, 366, 368, 369, 371, 372, 374, 375, 377,
+ 378, 380, 381, 383, 384, 386, 388, 389, 391, 392, 394,
+ 395, 397, 398, 400, 402, 403, 405, 406, 408, 409, 411,
+ 413, 414, 416, 417, 419, 421, 422, 424, 426, 427, 429,
+ 430, 432, 434, 435, 437, 439, 440, 442, 444, 445, 447,
+ 449, 450, 452, 454, 456, 457, 459, 461, 462, 464, 466,
+ 468, 469, 471, 473, 475, 476, 478, 480, 482, 483, 485,
+ 487, 489, 491, 492, 494, 496, 498, 500, 501, 503, 505,
+ 507, 509, 511, 512, 514, 516, 518, 520, 522, 524, 525,
+ 527, 529, 531, 533, 535, 537, 539, 541, 542, 544, 546,
+ 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 590,
+ 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612,
+ 614, 616, 618, 621, 623, 625, 627, 629, 631, 633, 635,
+ 637, 640, 642, 644, 646, 648, 650, 652, 655, 657, 659,
+ 661, 663, 665, 668, 670, 672, 674, 676, 679, 681, 683,
+ 685, 688, 690, 692, 694, 697, 699, 701, 703, 706, 708,
+ 710, 712, 715, 717, 719, 722, 724, 726, 729, 731, 733,
+ 736, 738, 740, 743, 745, 747, 750, 752, 754, 757, 759,
+ 762, 764, 766, 769, 771, 774, 776, 778, 781, 783, 786,
+ 788, 791, 793, 795, 798, 800, 803, 805, 808, 810, 813,
+ 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 841,
+ 843, 846, 848, 851, 853, 856, 859, 861, 864, 866, 869,
+ 872, 874, 877, 879, 882, 885, 887, 890, 893, 895, 898,
+ 901, 903, 906, 909, 911, 914, 917, 919, 922, 925, 927,
+ 930, 933, 936, 938, 941, 944, 947, 949, 952, 955, 958,
+ 960, 963, 966, 969, 972, 974, 977, 980, 983, 986, 989,
+ 991, 994, 997, 1000, 1003, 1006, 1009, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_30[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 20,
+ 20, 20, 20, 21, 21, 21, 21, 21, 22, 22, 22,
+ 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25,
+ 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 30, 30, 30, 30, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 33, 34, 34,
+ 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38,
+ 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41,
+ 42, 42, 42, 43, 43, 43, 44, 44, 45, 45, 45,
+ 46, 46, 46, 47, 47, 48, 48, 48, 49, 49, 50,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 58, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69,
+ 70, 70, 71, 71, 72, 72, 73, 73, 74, 74, 75,
+ 75, 76, 77, 77, 78, 78, 79, 79, 80, 80, 81,
+ 81, 82, 83, 83, 84, 84, 85, 85, 86, 86, 87,
+ 88, 88, 89, 89, 90, 91, 91, 92, 92, 93, 94,
+ 94, 95, 95, 96, 97, 97, 98, 99, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 106, 107, 108,
+ 108, 109, 110, 110, 111, 112, 112, 113, 114, 114, 115,
+ 116, 117, 117, 118, 119, 119, 120, 121, 122, 122, 123,
+ 124, 125, 125, 126, 127, 128, 128, 129, 130, 131, 131,
+ 132, 133, 134, 134, 135, 136, 137, 137, 138, 139, 140,
+ 141, 141, 142, 143, 144, 145, 146, 146, 147, 148, 149,
+ 150, 150, 151, 152, 153, 154, 155, 156, 156, 157, 158,
+ 159, 160, 161, 162, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 236, 237, 238, 239, 240, 241, 242, 244, 245, 246, 247,
+ 248, 249, 250, 252, 253, 254, 255, 256, 258, 259, 260,
+ 261, 262, 264, 265, 266, 267, 269, 270, 271, 272, 273,
+ 275, 276, 277, 278, 280, 281, 282, 284, 285, 286, 287,
+ 289, 290, 291, 293, 294, 295, 296, 298, 299, 300, 302,
+ 303, 304, 306, 307, 308, 310, 311, 313, 314, 315, 317,
+ 318, 319, 321, 322, 324, 325, 326, 328, 329, 331, 332,
+ 333, 335, 336, 338, 339, 341, 342, 343, 345, 346, 348,
+ 349, 351, 352, 354, 355, 357, 358, 360, 361, 363, 364,
+ 366, 367, 369, 370, 372, 373, 375, 376, 378, 379, 381,
+ 383, 384, 386, 387, 389, 390, 392, 394, 395, 397, 398,
+ 400, 402, 403, 405, 406, 408, 410, 411, 413, 415, 416,
+ 418, 419, 421, 423, 424, 426, 428, 429, 431, 433, 435,
+ 436, 438, 440, 441, 443, 445, 447, 448, 450, 452, 453,
+ 455, 457, 459, 460, 462, 464, 466, 468, 469, 471, 473,
+ 475, 477, 478, 480, 482, 484, 486, 487, 489, 491, 493,
+ 495, 497, 498, 500, 502, 504, 506, 508, 510, 512, 513,
+ 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535,
+ 537, 539, 540, 542, 544, 546, 548, 550, 552, 554, 556,
+ 558, 560, 562, 564, 566, 568, 570, 572, 574, 577, 579,
+ 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601,
+ 604, 606, 608, 610, 612, 614, 616, 618, 621, 623, 625,
+ 627, 629, 631, 634, 636, 638, 640, 642, 645, 647, 649,
+ 651, 653, 656, 658, 660, 662, 665, 667, 669, 671, 674,
+ 676, 678, 680, 683, 685, 687, 690, 692, 694, 697, 699,
+ 701, 704, 706, 708, 711, 713, 715, 718, 720, 722, 725,
+ 727, 730, 732, 734, 737, 739, 742, 744, 746, 749, 751,
+ 754, 756, 759, 761, 764, 766, 769, 771, 774, 776, 779,
+ 781, 784, 786, 789, 791, 794, 796, 799, 801, 804, 806,
+ 809, 812, 814, 817, 819, 822, 824, 827, 830, 832, 835,
+ 837, 840, 843, 845, 848, 851, 853, 856, 859, 861, 864,
+ 867, 869, 872, 875, 878, 880, 883, 886, 888, 891, 894,
+ 897, 899, 902, 905, 908, 910, 913, 916, 919, 922, 924,
+ 927, 930, 933, 936, 938, 941, 944, 947, 950, 953, 956,
+ 958, 961, 964, 967, 970, 973, 976, 979, 982, 984, 987,
+ 990, 993, 996, 999, 1002, 1005, 1008, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_31[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27,
+ 28, 28, 28, 28, 29, 29, 29, 30, 30, 30, 30,
+ 31, 31, 31, 32, 32, 32, 32, 33, 33, 33, 34,
+ 34, 34, 35, 35, 35, 36, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 41, 41,
+ 41, 42, 42, 42, 43, 43, 43, 44, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 61, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 75, 76, 76, 77, 77, 78, 79, 79, 80, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 85, 86, 86,
+ 87, 88, 88, 89, 89, 90, 91, 91, 92, 92, 93,
+ 94, 94, 95, 95, 96, 97, 97, 98, 99, 99, 100,
+ 101, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 111, 111, 112, 113, 113, 114, 115,
+ 115, 116, 117, 118, 118, 119, 120, 120, 121, 122, 123,
+ 123, 124, 125, 126, 126, 127, 128, 129, 129, 130, 131,
+ 132, 132, 133, 134, 135, 136, 136, 137, 138, 139, 140,
+ 140, 141, 142, 143, 144, 144, 145, 146, 147, 148, 149,
+ 149, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 159, 160, 161, 162, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 227, 228, 229, 230, 231, 232, 233, 234, 236,
+ 237, 238, 239, 240, 241, 243, 244, 245, 246, 247, 248,
+ 250, 251, 252, 253, 254, 256, 257, 258, 259, 260, 262,
+ 263, 264, 265, 267, 268, 269, 270, 272, 273, 274, 275,
+ 277, 278, 279, 281, 282, 283, 285, 286, 287, 288, 290,
+ 291, 292, 294, 295, 296, 298, 299, 300, 302, 303, 305,
+ 306, 307, 309, 310, 311, 313, 314, 316, 317, 318, 320,
+ 321, 323, 324, 325, 327, 328, 330, 331, 333, 334, 336,
+ 337, 338, 340, 341, 343, 344, 346, 347, 349, 350, 352,
+ 353, 355, 356, 358, 359, 361, 362, 364, 366, 367, 369,
+ 370, 372, 373, 375, 376, 378, 380, 381, 383, 384, 386,
+ 388, 389, 391, 392, 394, 396, 397, 399, 401, 402, 404,
+ 406, 407, 409, 411, 412, 414, 416, 417, 419, 421, 422,
+ 424, 426, 427, 429, 431, 433, 434, 436, 438, 440, 441,
+ 443, 445, 447, 448, 450, 452, 454, 456, 457, 459, 461,
+ 463, 465, 466, 468, 470, 472, 474, 476, 477, 479, 481,
+ 483, 485, 487, 489, 490, 492, 494, 496, 498, 500, 502,
+ 504, 506, 508, 510, 511, 513, 515, 517, 519, 521, 523,
+ 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545,
+ 547, 549, 551, 553, 555, 557, 559, 561, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 587, 589, 591,
+ 593, 595, 597, 599, 602, 604, 606, 608, 610, 613, 615,
+ 617, 619, 621, 624, 626, 628, 630, 632, 635, 637, 639,
+ 641, 644, 646, 648, 651, 653, 655, 657, 660, 662, 664,
+ 667, 669, 671, 674, 676, 678, 681, 683, 685, 688, 690,
+ 692, 695, 697, 700, 702, 704, 707, 709, 712, 714, 717,
+ 719, 721, 724, 726, 729, 731, 734, 736, 739, 741, 744,
+ 746, 749, 751, 754, 756, 759, 761, 764, 766, 769, 772,
+ 774, 777, 779, 782, 784, 787, 790, 792, 795, 797, 800,
+ 803, 805, 808, 811, 813, 816, 819, 821, 824, 827, 829,
+ 832, 835, 837, 840, 843, 845, 848, 851, 854, 856, 859,
+ 862, 865, 867, 870, 873, 876, 879, 881, 884, 887, 890,
+ 893, 895, 898, 901, 904, 907, 910, 913, 915, 918, 921,
+ 924, 927, 930, 933, 936, 939, 942, 945, 947, 950, 953,
+ 956, 959, 962, 965, 968, 971, 974, 977, 980, 983, 986,
+ 989, 992, 995, 998, 1001, 1005, 1008, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_32[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 30, 30, 30,
+ 30, 31, 31, 31, 32, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 41,
+ 41, 41, 42, 42, 42, 43, 43, 43, 44, 44, 44,
+ 45, 45, 46, 46, 46, 47, 47, 48, 48, 48, 49,
+ 49, 49, 50, 50, 51, 51, 51, 52, 52, 53, 53,
+ 54, 54, 54, 55, 55, 56, 56, 57, 57, 57, 58,
+ 58, 59, 59, 60, 60, 61, 61, 62, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 77, 77, 78, 78, 79, 79, 80,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 85, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 95, 95, 96, 96, 97, 98, 98, 99, 100,
+ 100, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 110, 111, 112, 112, 113, 114, 114,
+ 115, 116, 117, 117, 118, 119, 120, 120, 121, 122, 122,
+ 123, 124, 125, 125, 126, 127, 128, 129, 129, 130, 131,
+ 132, 132, 133, 134, 135, 136, 136, 137, 138, 139, 140,
+ 140, 141, 142, 143, 144, 145, 145, 146, 147, 148, 149,
+ 150, 150, 151, 152, 153, 154, 155, 156, 157, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 186, 187, 188, 189,
+ 190, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 214, 215, 216, 217, 218, 219, 220, 221, 222, 224, 225,
+ 226, 227, 228, 229, 230, 231, 233, 234, 235, 236, 237,
+ 238, 240, 241, 242, 243, 244, 246, 247, 248, 249, 250,
+ 252, 253, 254, 255, 257, 258, 259, 260, 262, 263, 264,
+ 265, 267, 268, 269, 270, 272, 273, 274, 276, 277, 278,
+ 280, 281, 282, 283, 285, 286, 287, 289, 290, 291, 293,
+ 294, 296, 297, 298, 300, 301, 302, 304, 305, 307, 308,
+ 309, 311, 312, 314, 315, 316, 318, 319, 321, 322, 324,
+ 325, 327, 328, 330, 331, 332, 334, 335, 337, 338, 340,
+ 341, 343, 344, 346, 347, 349, 351, 352, 354, 355, 357,
+ 358, 360, 361, 363, 364, 366, 368, 369, 371, 372, 374,
+ 376, 377, 379, 380, 382, 384, 385, 387, 389, 390, 392,
+ 394, 395, 397, 399, 400, 402, 404, 405, 407, 409, 410,
+ 412, 414, 416, 417, 419, 421, 423, 424, 426, 428, 430,
+ 431, 433, 435, 437, 438, 440, 442, 444, 446, 447, 449,
+ 451, 453, 455, 457, 458, 460, 462, 464, 466, 468, 469,
+ 471, 473, 475, 477, 479, 481, 483, 485, 487, 488, 490,
+ 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512,
+ 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534,
+ 536, 538, 540, 542, 544, 547, 549, 551, 553, 555, 557,
+ 559, 561, 563, 565, 568, 570, 572, 574, 576, 578, 581,
+ 583, 585, 587, 589, 591, 594, 596, 598, 600, 602, 605,
+ 607, 609, 611, 614, 616, 618, 620, 623, 625, 627, 630,
+ 632, 634, 636, 639, 641, 643, 646, 648, 650, 653, 655,
+ 657, 660, 662, 665, 667, 669, 672, 674, 677, 679, 681,
+ 684, 686, 689, 691, 694, 696, 698, 701, 703, 706, 708,
+ 711, 713, 716, 718, 721, 723, 726, 728, 731, 734, 736,
+ 739, 741, 744, 746, 749, 751, 754, 757, 759, 762, 765,
+ 767, 770, 772, 775, 778, 780, 783, 786, 788, 791, 794,
+ 796, 799, 802, 804, 807, 810, 813, 815, 818, 821, 824,
+ 826, 829, 832, 835, 838, 840, 843, 846, 849, 852, 854,
+ 857, 860, 863, 866, 869, 871, 874, 877, 880, 883, 886,
+ 889, 892, 895, 897, 900, 903, 906, 909, 912, 915, 918,
+ 921, 924, 927, 930, 933, 936, 939, 942, 945, 948, 951,
+ 954, 957, 960, 963, 967, 970, 973, 976, 979, 982, 985,
+ 988, 991, 994, 998, 1001, 1004, 1007, 1010, 1013, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_33[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 44, 44, 44,
+ 45, 45, 45, 46, 46, 47, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 53, 53,
+ 53, 54, 54, 55, 55, 55, 56, 56, 57, 57, 58,
+ 58, 59, 59, 59, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 76, 77, 77, 78, 79, 79, 80,
+ 80, 81, 81, 82, 82, 83, 84, 84, 85, 85, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 94, 95, 96, 96, 97, 98, 98, 99, 100,
+ 100, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 110, 111, 112, 112, 113, 114, 115,
+ 115, 116, 117, 118, 118, 119, 120, 120, 121, 122, 123,
+ 123, 124, 125, 126, 127, 127, 128, 129, 130, 130, 131,
+ 132, 133, 134, 134, 135, 136, 137, 138, 138, 139, 140,
+ 141, 142, 143, 143, 144, 145, 146, 147, 148, 148, 149,
+ 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 218, 219, 220, 221, 222, 223, 224, 226, 227,
+ 228, 229, 230, 231, 233, 234, 235, 236, 237, 238, 240,
+ 241, 242, 243, 245, 246, 247, 248, 249, 251, 252, 253,
+ 254, 256, 257, 258, 259, 261, 262, 263, 265, 266, 267,
+ 268, 270, 271, 272, 274, 275, 276, 278, 279, 280, 282,
+ 283, 284, 286, 287, 288, 290, 291, 292, 294, 295, 297,
+ 298, 299, 301, 302, 304, 305, 307, 308, 309, 311, 312,
+ 314, 315, 317, 318, 320, 321, 322, 324, 325, 327, 328,
+ 330, 331, 333, 334, 336, 337, 339, 341, 342, 344, 345,
+ 347, 348, 350, 351, 353, 355, 356, 358, 359, 361, 362,
+ 364, 366, 367, 369, 371, 372, 374, 375, 377, 379, 380,
+ 382, 384, 385, 387, 389, 390, 392, 394, 395, 397, 399,
+ 401, 402, 404, 406, 408, 409, 411, 413, 414, 416, 418,
+ 420, 422, 423, 425, 427, 429, 431, 432, 434, 436, 438,
+ 440, 441, 443, 445, 447, 449, 451, 453, 454, 456, 458,
+ 460, 462, 464, 466, 468, 470, 472, 473, 475, 477, 479,
+ 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501,
+ 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523,
+ 525, 528, 530, 532, 534, 536, 538, 540, 542, 544, 547,
+ 549, 551, 553, 555, 557, 559, 562, 564, 566, 568, 570,
+ 573, 575, 577, 579, 581, 584, 586, 588, 590, 593, 595,
+ 597, 599, 602, 604, 606, 609, 611, 613, 615, 618, 620,
+ 622, 625, 627, 629, 632, 634, 637, 639, 641, 644, 646,
+ 648, 651, 653, 656, 658, 661, 663, 665, 668, 670, 673,
+ 675, 678, 680, 683, 685, 688, 690, 693, 695, 698, 700,
+ 703, 705, 708, 710, 713, 716, 718, 721, 723, 726, 729,
+ 731, 734, 736, 739, 742, 744, 747, 750, 752, 755, 758,
+ 760, 763, 766, 768, 771, 774, 776, 779, 782, 785, 787,
+ 790, 793, 796, 798, 801, 804, 807, 810, 812, 815, 818,
+ 821, 824, 827, 829, 832, 835, 838, 841, 844, 847, 850,
+ 852, 855, 858, 861, 864, 867, 870, 873, 876, 879, 882,
+ 885, 888, 891, 894, 897, 900, 903, 906, 909, 912, 915,
+ 918, 921, 924, 927, 930, 933, 937, 940, 943, 946, 949,
+ 952, 955, 958, 962, 965, 968, 971, 974, 978, 981, 984,
+ 987, 990, 994, 997, 1000, 1003, 1007, 1010, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_34[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 30, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44,
+ 45, 45, 45, 46, 46, 46, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 52, 53,
+ 53, 54, 54, 55, 55, 55, 56, 56, 57, 57, 58,
+ 58, 59, 59, 59, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 76, 77, 78, 78, 79, 79, 80,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 92, 92, 93,
+ 93, 94, 95, 95, 96, 97, 97, 98, 99, 99, 100,
+ 101, 101, 102, 102, 103, 104, 105, 105, 106, 107, 107,
+ 108, 109, 109, 110, 111, 111, 112, 113, 114, 114, 115,
+ 116, 117, 117, 118, 119, 119, 120, 121, 122, 123, 123,
+ 124, 125, 126, 126, 127, 128, 129, 129, 130, 131, 132,
+ 133, 133, 134, 135, 136, 137, 138, 138, 139, 140, 141,
+ 142, 143, 143, 144, 145, 146, 147, 148, 149, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 191, 192, 193,
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 207, 208, 209, 210, 211, 212, 213, 214, 215, 217,
+ 218, 219, 220, 221, 222, 223, 225, 226, 227, 228, 229,
+ 231, 232, 233, 234, 235, 237, 238, 239, 240, 241, 243,
+ 244, 245, 246, 248, 249, 250, 251, 253, 254, 255, 256,
+ 258, 259, 260, 262, 263, 264, 266, 267, 268, 269, 271,
+ 272, 273, 275, 276, 278, 279, 280, 282, 283, 284, 286,
+ 287, 288, 290, 291, 293, 294, 296, 297, 298, 300, 301,
+ 303, 304, 306, 307, 308, 310, 311, 313, 314, 316, 317,
+ 319, 320, 322, 323, 325, 326, 328, 329, 331, 332, 334,
+ 335, 337, 339, 340, 342, 343, 345, 346, 348, 350, 351,
+ 353, 354, 356, 358, 359, 361, 363, 364, 366, 367, 369,
+ 371, 372, 374, 376, 377, 379, 381, 383, 384, 386, 388,
+ 389, 391, 393, 395, 396, 398, 400, 402, 403, 405, 407,
+ 409, 410, 412, 414, 416, 418, 419, 421, 423, 425, 427,
+ 429, 430, 432, 434, 436, 438, 440, 442, 443, 445, 447,
+ 449, 451, 453, 455, 457, 459, 461, 463, 464, 466, 468,
+ 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490,
+ 492, 494, 496, 498, 500, 503, 505, 507, 509, 511, 513,
+ 515, 517, 519, 521, 523, 526, 528, 530, 532, 534, 536,
+ 538, 541, 543, 545, 547, 549, 551, 554, 556, 558, 560,
+ 563, 565, 567, 569, 572, 574, 576, 578, 581, 583, 585,
+ 587, 590, 592, 594, 597, 599, 601, 604, 606, 608, 611,
+ 613, 615, 618, 620, 623, 625, 627, 630, 632, 635, 637,
+ 640, 642, 644, 647, 649, 652, 654, 657, 659, 662, 664,
+ 667, 669, 672, 674, 677, 679, 682, 685, 687, 690, 692,
+ 695, 697, 700, 703, 705, 708, 711, 713, 716, 718, 721,
+ 724, 726, 729, 732, 734, 737, 740, 743, 745, 748, 751,
+ 753, 756, 759, 762, 764, 767, 770, 773, 776, 778, 781,
+ 784, 787, 790, 793, 795, 798, 801, 804, 807, 810, 813,
+ 815, 818, 821, 824, 827, 830, 833, 836, 839, 842, 845,
+ 848, 851, 854, 857, 860, 863, 866, 869, 872, 875, 878,
+ 881, 884, 887, 890, 893, 896, 899, 903, 906, 909, 912,
+ 915, 918, 921, 925, 928, 931, 934, 937, 940, 944, 947,
+ 950, 953, 957, 960, 963, 966, 970, 973, 976, 979, 983,
+ 986, 989, 993, 996, 999, 1003, 1006, 1009, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_35[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44,
+ 45, 45, 45, 46, 46, 47, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 53, 53,
+ 53, 54, 54, 55, 55, 56, 56, 56, 57, 57, 58,
+ 58, 59, 59, 60, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 75, 75, 76, 76, 77, 77, 78, 78, 79, 79, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 85, 86, 86,
+ 87, 88, 88, 89, 89, 90, 91, 91, 92, 93, 93,
+ 94, 95, 95, 96, 96, 97, 98, 98, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 107, 107, 108,
+ 109, 109, 110, 111, 111, 112, 113, 114, 114, 115, 116,
+ 117, 117, 118, 119, 120, 120, 121, 122, 123, 123, 124,
+ 125, 126, 126, 127, 128, 129, 130, 130, 131, 132, 133,
+ 134, 135, 135, 136, 137, 138, 139, 140, 140, 141, 142,
+ 143, 144, 145, 146, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 204, 205, 206, 207,
+ 208, 209, 210, 211, 213, 214, 215, 216, 217, 218, 219,
+ 221, 222, 223, 224, 225, 227, 228, 229, 230, 231, 233,
+ 234, 235, 236, 237, 239, 240, 241, 242, 244, 245, 246,
+ 247, 249, 250, 251, 253, 254, 255, 256, 258, 259, 260,
+ 262, 263, 264, 266, 267, 268, 270, 271, 272, 274, 275,
+ 277, 278, 279, 281, 282, 284, 285, 286, 288, 289, 291,
+ 292, 293, 295, 296, 298, 299, 301, 302, 304, 305, 307,
+ 308, 310, 311, 313, 314, 316, 317, 319, 320, 322, 323,
+ 325, 326, 328, 329, 331, 332, 334, 336, 337, 339, 340,
+ 342, 344, 345, 347, 348, 350, 352, 353, 355, 357, 358,
+ 360, 362, 363, 365, 367, 368, 370, 372, 373, 375, 377,
+ 378, 380, 382, 384, 385, 387, 389, 391, 392, 394, 396,
+ 398, 400, 401, 403, 405, 407, 409, 410, 412, 414, 416,
+ 418, 420, 421, 423, 425, 427, 429, 431, 433, 435, 436,
+ 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458,
+ 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480,
+ 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 503,
+ 505, 507, 509, 511, 513, 515, 517, 520, 522, 524, 526,
+ 528, 531, 533, 535, 537, 539, 542, 544, 546, 548, 550,
+ 553, 555, 557, 560, 562, 564, 566, 569, 571, 573, 576,
+ 578, 580, 583, 585, 587, 590, 592, 594, 597, 599, 602,
+ 604, 606, 609, 611, 614, 616, 618, 621, 623, 626, 628,
+ 631, 633, 636, 638, 641, 643, 646, 648, 651, 653, 656,
+ 658, 661, 664, 666, 669, 671, 674, 677, 679, 682, 684,
+ 687, 690, 692, 695, 698, 700, 703, 706, 708, 711, 714,
+ 716, 719, 722, 725, 727, 730, 733, 736, 738, 741, 744,
+ 747, 750, 752, 755, 758, 761, 764, 766, 769, 772, 775,
+ 778, 781, 784, 787, 789, 792, 795, 798, 801, 804, 807,
+ 810, 813, 816, 819, 822, 825, 828, 831, 834, 837, 840,
+ 843, 846, 849, 852, 855, 858, 862, 865, 868, 871, 874,
+ 877, 880, 883, 887, 890, 893, 896, 899, 902, 906, 909,
+ 912, 915, 919, 922, 925, 928, 932, 935, 938, 941, 945,
+ 948, 951, 955, 958, 961, 965, 968, 971, 975, 978, 982,
+ 985, 988, 992, 995, 999, 1002, 1006, 1009, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_36[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 44,
+ 45, 45, 46, 46, 46, 47, 47, 47, 48, 48, 49,
+ 49, 49, 50, 50, 51, 51, 52, 52, 52, 53, 53,
+ 54, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58,
+ 58, 59, 59, 60, 60, 61, 61, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 76, 76, 77, 77, 78, 78, 79, 79, 80, 81,
+ 81, 82, 82, 83, 83, 84, 85, 85, 86, 86, 87,
+ 88, 88, 89, 90, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 110, 110, 111, 112, 112, 113, 114, 115, 115, 116, 117,
+ 118, 118, 119, 120, 121, 121, 122, 123, 124, 125, 125,
+ 126, 127, 128, 129, 129, 130, 131, 132, 133, 133, 134,
+ 135, 136, 137, 138, 138, 139, 140, 141, 142, 143, 144,
+ 145, 145, 146, 147, 148, 149, 150, 151, 152, 153, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 207, 208, 209, 210,
+ 211, 212, 214, 215, 216, 217, 218, 219, 221, 222, 223,
+ 224, 225, 227, 228, 229, 230, 231, 233, 234, 235, 236,
+ 238, 239, 240, 241, 243, 244, 245, 247, 248, 249, 250,
+ 252, 253, 254, 256, 257, 258, 260, 261, 262, 264, 265,
+ 266, 268, 269, 271, 272, 273, 275, 276, 277, 279, 280,
+ 282, 283, 285, 286, 287, 289, 290, 292, 293, 295, 296,
+ 298, 299, 301, 302, 304, 305, 307, 308, 310, 311, 313,
+ 314, 316, 317, 319, 320, 322, 324, 325, 327, 328, 330,
+ 331, 333, 335, 336, 338, 339, 341, 343, 344, 346, 348,
+ 349, 351, 353, 354, 356, 358, 359, 361, 363, 364, 366,
+ 368, 370, 371, 373, 375, 377, 378, 380, 382, 384, 385,
+ 387, 389, 391, 393, 394, 396, 398, 400, 402, 403, 405,
+ 407, 409, 411, 413, 415, 416, 418, 420, 422, 424, 426,
+ 428, 430, 432, 434, 436, 438, 439, 441, 443, 445, 447,
+ 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 470,
+ 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492,
+ 495, 497, 499, 501, 503, 505, 508, 510, 512, 514, 516,
+ 518, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541,
+ 543, 545, 548, 550, 552, 555, 557, 559, 562, 564, 566,
+ 569, 571, 573, 576, 578, 580, 583, 585, 588, 590, 592,
+ 595, 597, 600, 602, 605, 607, 610, 612, 615, 617, 620,
+ 622, 625, 627, 630, 632, 635, 637, 640, 642, 645, 648,
+ 650, 653, 655, 658, 661, 663, 666, 669, 671, 674, 677,
+ 679, 682, 685, 687, 690, 693, 695, 698, 701, 704, 706,
+ 709, 712, 715, 717, 720, 723, 726, 729, 732, 734, 737,
+ 740, 743, 746, 749, 751, 754, 757, 760, 763, 766, 769,
+ 772, 775, 778, 781, 784, 787, 790, 793, 796, 799, 802,
+ 805, 808, 811, 814, 817, 820, 823, 826, 829, 832, 835,
+ 838, 842, 845, 848, 851, 854, 857, 860, 864, 867, 870,
+ 873, 876, 880, 883, 886, 889, 893, 896, 899, 903, 906,
+ 909, 912, 916, 919, 922, 926, 929, 932, 936, 939, 943,
+ 946, 949, 953, 956, 960, 963, 967, 970, 973, 977, 980,
+ 984, 987, 991, 994, 998, 1002, 1005, 1009, 1012, 1016, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_37[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 20,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 28, 28, 28, 28, 29, 29, 29, 29, 30, 30, 30,
+ 31, 31, 31, 31, 32, 32, 32, 33, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 41, 42, 42, 42, 43, 43, 44, 44, 44, 45,
+ 45, 45, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69,
+ 70, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75,
+ 76, 76, 77, 77, 78, 78, 79, 80, 80, 81, 81,
+ 82, 82, 83, 84, 84, 85, 85, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 92, 92, 93, 94, 94, 95,
+ 96, 96, 97, 98, 98, 99, 100, 100, 101, 102, 102,
+ 103, 104, 104, 105, 106, 106, 107, 108, 109, 109, 110,
+ 111, 112, 112, 113, 114, 114, 115, 116, 117, 118, 118,
+ 119, 120, 121, 121, 122, 123, 124, 125, 125, 126, 127,
+ 128, 129, 129, 130, 131, 132, 133, 134, 134, 135, 136,
+ 137, 138, 139, 139, 140, 141, 142, 143, 144, 145, 146,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 197, 198, 199, 200, 201,
+ 202, 203, 204, 206, 207, 208, 209, 210, 211, 213, 214,
+ 215, 216, 217, 218, 220, 221, 222, 223, 225, 226, 227,
+ 228, 229, 231, 232, 233, 234, 236, 237, 238, 240, 241,
+ 242, 243, 245, 246, 247, 249, 250, 251, 253, 254, 255,
+ 257, 258, 259, 261, 262, 263, 265, 266, 268, 269, 270,
+ 272, 273, 275, 276, 277, 279, 280, 282, 283, 285, 286,
+ 288, 289, 291, 292, 294, 295, 297, 298, 300, 301, 303,
+ 304, 306, 307, 309, 310, 312, 313, 315, 316, 318, 320,
+ 321, 323, 324, 326, 328, 329, 331, 332, 334, 336, 337,
+ 339, 341, 342, 344, 346, 347, 349, 351, 352, 354, 356,
+ 358, 359, 361, 363, 364, 366, 368, 370, 372, 373, 375,
+ 377, 379, 380, 382, 384, 386, 388, 389, 391, 393, 395,
+ 397, 399, 401, 402, 404, 406, 408, 410, 412, 414, 416,
+ 418, 420, 421, 423, 425, 427, 429, 431, 433, 435, 437,
+ 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459,
+ 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 483,
+ 485, 487, 489, 491, 493, 496, 498, 500, 502, 504, 507,
+ 509, 511, 513, 515, 518, 520, 522, 524, 527, 529, 531,
+ 534, 536, 538, 541, 543, 545, 548, 550, 552, 555, 557,
+ 559, 562, 564, 567, 569, 571, 574, 576, 579, 581, 584,
+ 586, 589, 591, 593, 596, 598, 601, 603, 606, 609, 611,
+ 614, 616, 619, 621, 624, 626, 629, 632, 634, 637, 639,
+ 642, 645, 647, 650, 653, 655, 658, 661, 663, 666, 669,
+ 672, 674, 677, 680, 682, 685, 688, 691, 694, 696, 699,
+ 702, 705, 708, 710, 713, 716, 719, 722, 725, 728, 730,
+ 733, 736, 739, 742, 745, 748, 751, 754, 757, 760, 763,
+ 766, 769, 772, 775, 778, 781, 784, 787, 790, 793, 796,
+ 799, 802, 805, 809, 812, 815, 818, 821, 824, 827, 831,
+ 834, 837, 840, 843, 847, 850, 853, 856, 860, 863, 866,
+ 869, 873, 876, 879, 883, 886, 889, 893, 896, 899, 903,
+ 906, 910, 913, 916, 920, 923, 927, 930, 934, 937, 940,
+ 944, 947, 951, 954, 958, 961, 965, 969, 972, 976, 979,
+ 983, 986, 990, 994, 997, 1001, 1005, 1008, 1012, 1016, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_38[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11,
+ 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 14, 14,
+ 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18,
+ 18, 18, 18, 18, 19, 19, 19, 19, 19, 20, 20,
+ 20, 20, 20, 21, 21, 21, 21, 21, 22, 22, 22,
+ 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 31,
+ 31, 31, 31, 32, 32, 32, 33, 33, 33, 33, 34,
+ 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37,
+ 38, 38, 38, 39, 39, 39, 40, 40, 40, 41, 41,
+ 41, 42, 42, 43, 43, 43, 44, 44, 44, 45, 45,
+ 46, 46, 46, 47, 47, 47, 48, 48, 49, 49, 49,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 60, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76,
+ 76, 77, 78, 78, 79, 79, 80, 81, 81, 82, 82,
+ 83, 83, 84, 85, 85, 86, 86, 87, 88, 88, 89,
+ 90, 90, 91, 92, 92, 93, 93, 94, 95, 95, 96,
+ 97, 97, 98, 99, 99, 100, 101, 102, 102, 103, 104,
+ 104, 105, 106, 106, 107, 108, 109, 109, 110, 111, 112,
+ 112, 113, 114, 115, 115, 116, 117, 118, 118, 119, 120,
+ 121, 122, 122, 123, 124, 125, 126, 126, 127, 128, 129,
+ 130, 130, 131, 132, 133, 134, 135, 136, 136, 137, 138,
+ 139, 140, 141, 142, 143, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205,
+ 206, 207, 208, 210, 211, 212, 213, 214, 216, 217, 218,
+ 219, 220, 222, 223, 224, 225, 227, 228, 229, 230, 232,
+ 233, 234, 235, 237, 238, 239, 241, 242, 243, 245, 246,
+ 247, 249, 250, 251, 253, 254, 255, 257, 258, 259, 261,
+ 262, 264, 265, 266, 268, 269, 271, 272, 274, 275, 276,
+ 278, 279, 281, 282, 284, 285, 287, 288, 290, 291, 293,
+ 294, 296, 297, 299, 300, 302, 303, 305, 307, 308, 310,
+ 311, 313, 314, 316, 318, 319, 321, 323, 324, 326, 327,
+ 329, 331, 332, 334, 336, 337, 339, 341, 342, 344, 346,
+ 348, 349, 351, 353, 354, 356, 358, 360, 361, 363, 365,
+ 367, 369, 370, 372, 374, 376, 378, 379, 381, 383, 385,
+ 387, 389, 391, 392, 394, 396, 398, 400, 402, 404, 406,
+ 408, 410, 412, 413, 415, 417, 419, 421, 423, 425, 427,
+ 429, 431, 433, 435, 437, 439, 441, 443, 446, 448, 450,
+ 452, 454, 456, 458, 460, 462, 464, 466, 469, 471, 473,
+ 475, 477, 479, 482, 484, 486, 488, 490, 493, 495, 497,
+ 499, 501, 504, 506, 508, 511, 513, 515, 517, 520, 522,
+ 524, 527, 529, 531, 534, 536, 538, 541, 543, 546, 548,
+ 550, 553, 555, 558, 560, 562, 565, 567, 570, 572, 575,
+ 577, 580, 582, 585, 587, 590, 592, 595, 597, 600, 603,
+ 605, 608, 610, 613, 616, 618, 621, 623, 626, 629, 631,
+ 634, 637, 639, 642, 645, 648, 650, 653, 656, 658, 661,
+ 664, 667, 669, 672, 675, 678, 681, 684, 686, 689, 692,
+ 695, 698, 701, 703, 706, 709, 712, 715, 718, 721, 724,
+ 727, 730, 733, 736, 739, 742, 745, 748, 751, 754, 757,
+ 760, 763, 766, 769, 772, 775, 778, 781, 785, 788, 791,
+ 794, 797, 800, 803, 807, 810, 813, 816, 820, 823, 826,
+ 829, 832, 836, 839, 842, 846, 849, 852, 856, 859, 862,
+ 866, 869, 872, 876, 879, 883, 886, 889, 893, 896, 900,
+ 903, 907, 910, 914, 917, 921, 924, 928, 931, 935, 938,
+ 942, 945, 949, 953, 956, 960, 964, 967, 971, 974, 978,
+ 982, 986, 989, 993, 997, 1000, 1004, 1008, 1012, 1015, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_39[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11,
+ 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 19, 19, 19, 19, 19, 20, 20, 20,
+ 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 25, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28,
+ 28, 28, 29, 29, 29, 29, 30, 30, 30, 31, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38,
+ 38, 38, 39, 39, 39, 40, 40, 41, 41, 41, 42,
+ 42, 42, 43, 43, 43, 44, 44, 45, 45, 45, 46,
+ 46, 46, 47, 47, 48, 48, 48, 49, 49, 50, 50,
+ 51, 51, 51, 52, 52, 53, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 57, 58, 58, 59, 59, 60,
+ 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65,
+ 66, 66, 67, 67, 68, 68, 69, 69, 70, 70, 71,
+ 71, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77,
+ 78, 78, 79, 79, 80, 80, 81, 82, 82, 83, 83,
+ 84, 85, 85, 86, 87, 87, 88, 88, 89, 90, 90,
+ 91, 92, 92, 93, 94, 94, 95, 96, 96, 97, 98,
+ 98, 99, 100, 100, 101, 102, 102, 103, 104, 105, 105,
+ 106, 107, 107, 108, 109, 110, 110, 111, 112, 113, 113,
+ 114, 115, 116, 116, 117, 118, 119, 120, 120, 121, 122,
+ 123, 124, 124, 125, 126, 127, 128, 129, 129, 130, 131,
+ 132, 133, 134, 134, 135, 136, 137, 138, 139, 140, 141,
+ 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 190, 191, 192, 193, 194, 195, 196,
+ 198, 199, 200, 201, 202, 203, 204, 206, 207, 208, 209,
+ 210, 212, 213, 214, 215, 217, 218, 219, 220, 221, 223,
+ 224, 225, 227, 228, 229, 230, 232, 233, 234, 236, 237,
+ 238, 239, 241, 242, 243, 245, 246, 248, 249, 250, 252,
+ 253, 254, 256, 257, 259, 260, 261, 263, 264, 266, 267,
+ 269, 270, 271, 273, 274, 276, 277, 279, 280, 282, 283,
+ 285, 286, 288, 289, 291, 292, 294, 295, 297, 299, 300,
+ 302, 303, 305, 306, 308, 310, 311, 313, 314, 316, 318,
+ 319, 321, 323, 324, 326, 328, 329, 331, 333, 334, 336,
+ 338, 340, 341, 343, 345, 346, 348, 350, 352, 353, 355,
+ 357, 359, 361, 362, 364, 366, 368, 370, 372, 373, 375,
+ 377, 379, 381, 383, 385, 386, 388, 390, 392, 394, 396,
+ 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418,
+ 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440,
+ 442, 444, 446, 448, 451, 453, 455, 457, 459, 461, 463,
+ 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 488,
+ 490, 492, 494, 497, 499, 501, 504, 506, 508, 511, 513,
+ 515, 518, 520, 522, 525, 527, 529, 532, 534, 537, 539,
+ 541, 544, 546, 549, 551, 554, 556, 559, 561, 564, 566,
+ 569, 571, 574, 576, 579, 581, 584, 586, 589, 592, 594,
+ 597, 599, 602, 605, 607, 610, 613, 615, 618, 621, 623,
+ 626, 629, 632, 634, 637, 640, 643, 645, 648, 651, 654,
+ 656, 659, 662, 665, 668, 671, 673, 676, 679, 682, 685,
+ 688, 691, 694, 697, 700, 702, 705, 708, 711, 714, 717,
+ 720, 723, 726, 729, 732, 735, 739, 742, 745, 748, 751,
+ 754, 757, 760, 763, 766, 770, 773, 776, 779, 782, 786,
+ 789, 792, 795, 798, 802, 805, 808, 811, 815, 818, 821,
+ 825, 828, 831, 835, 838, 841, 845, 848, 852, 855, 858,
+ 862, 865, 869, 872, 876, 879, 883, 886, 890, 893, 897,
+ 900, 904, 907, 911, 914, 918, 922, 925, 929, 933, 936,
+ 940, 944, 947, 951, 955, 958, 962, 966, 969, 973, 977,
+ 981, 985, 988, 992, 996, 1000, 1004, 1007, 1011, 1015, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_40[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10,
+ 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
+ 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 18, 19, 19, 19, 19, 19, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23,
+ 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25,
+ 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28,
+ 29, 29, 29, 29, 30, 30, 30, 30, 31, 31, 31,
+ 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 35,
+ 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38,
+ 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42,
+ 43, 43, 43, 44, 44, 44, 45, 45, 46, 46, 46,
+ 47, 47, 48, 48, 48, 49, 49, 50, 50, 50, 51,
+ 51, 52, 52, 53, 53, 53, 54, 54, 55, 55, 56,
+ 56, 57, 57, 57, 58, 58, 59, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66,
+ 67, 67, 68, 68, 69, 69, 70, 70, 71, 72, 72,
+ 73, 73, 74, 74, 75, 75, 76, 77, 77, 78, 78,
+ 79, 79, 80, 81, 81, 82, 82, 83, 84, 84, 85,
+ 85, 86, 87, 87, 88, 89, 89, 90, 91, 91, 92,
+ 93, 93, 94, 95, 95, 96, 97, 97, 98, 99, 99,
+ 100, 101, 101, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 109, 109, 110, 111, 112, 112, 113, 114, 115, 116,
+ 116, 117, 118, 119, 119, 120, 121, 122, 123, 123, 124,
+ 125, 126, 127, 128, 128, 129, 130, 131, 132, 133, 134,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 192, 193, 194, 195, 196, 197, 199, 200, 201,
+ 202, 203, 204, 206, 207, 208, 209, 210, 212, 213, 214,
+ 215, 217, 218, 219, 220, 222, 223, 224, 226, 227, 228,
+ 229, 231, 232, 233, 235, 236, 237, 239, 240, 241, 243,
+ 244, 245, 247, 248, 250, 251, 252, 254, 255, 257, 258,
+ 259, 261, 262, 264, 265, 267, 268, 270, 271, 273, 274,
+ 276, 277, 279, 280, 282, 283, 285, 286, 288, 289, 291,
+ 292, 294, 296, 297, 299, 300, 302, 304, 305, 307, 308,
+ 310, 312, 313, 315, 317, 318, 320, 322, 323, 325, 327,
+ 328, 330, 332, 333, 335, 337, 339, 340, 342, 344, 346,
+ 348, 349, 351, 353, 355, 357, 358, 360, 362, 364, 366,
+ 368, 369, 371, 373, 375, 377, 379, 381, 383, 385, 386,
+ 388, 390, 392, 394, 396, 398, 400, 402, 404, 406, 408,
+ 410, 412, 414, 416, 418, 420, 422, 424, 426, 429, 431,
+ 433, 435, 437, 439, 441, 443, 445, 448, 450, 452, 454,
+ 456, 458, 461, 463, 465, 467, 469, 472, 474, 476, 478,
+ 481, 483, 485, 488, 490, 492, 495, 497, 499, 501, 504,
+ 506, 509, 511, 513, 516, 518, 521, 523, 525, 528, 530,
+ 533, 535, 538, 540, 543, 545, 548, 550, 553, 555, 558,
+ 560, 563, 565, 568, 570, 573, 576, 578, 581, 583, 586,
+ 589, 591, 594, 597, 599, 602, 605, 607, 610, 613, 616,
+ 618, 621, 624, 627, 629, 632, 635, 638, 641, 643, 646,
+ 649, 652, 655, 658, 660, 663, 666, 669, 672, 675, 678,
+ 681, 684, 687, 690, 693, 696, 699, 702, 705, 708, 711,
+ 714, 717, 720, 723, 726, 729, 732, 735, 739, 742, 745,
+ 748, 751, 754, 758, 761, 764, 767, 770, 774, 777, 780,
+ 783, 787, 790, 793, 797, 800, 803, 807, 810, 813, 817,
+ 820, 824, 827, 830, 834, 837, 841, 844, 848, 851, 855,
+ 858, 862, 865, 869, 872, 876, 879, 883, 886, 890, 894,
+ 897, 901, 905, 908, 912, 916, 919, 923, 927, 930, 934,
+ 938, 942, 945, 949, 953, 957, 960, 964, 968, 972, 976,
+ 980, 984, 987, 991, 995, 999, 1003, 1007, 1011, 1015, 1019,
+ 1023,
+};
+
+static const u16 *xgamma10_curves[GAMMA_CURVE_LENGTH] = {
+ &xgamma10_01[0],
+ &xgamma10_02[0],
+ &xgamma10_03[0],
+ &xgamma10_04[0],
+ &xgamma10_05[0],
+ &xgamma10_06[0],
+ &xgamma10_07[0],
+ &xgamma10_08[0],
+ &xgamma10_09[0],
+ &xgamma10_10[0],
+ &xgamma10_11[0],
+ &xgamma10_12[0],
+ &xgamma10_13[0],
+ &xgamma10_14[0],
+ &xgamma10_15[0],
+ &xgamma10_16[0],
+ &xgamma10_17[0],
+ &xgamma10_18[0],
+ &xgamma10_19[0],
+ &xgamma10_20[0],
+ &xgamma10_21[0],
+ &xgamma10_22[0],
+ &xgamma10_23[0],
+ &xgamma10_24[0],
+ &xgamma10_25[0],
+ &xgamma10_26[0],
+ &xgamma10_27[0],
+ &xgamma10_28[0],
+ &xgamma10_29[0],
+ &xgamma10_30[0],
+ &xgamma10_31[0],
+ &xgamma10_32[0],
+ &xgamma10_33[0],
+ &xgamma10_34[0],
+ &xgamma10_35[0],
+ &xgamma10_36[0],
+ &xgamma10_37[0],
+ &xgamma10_38[0],
+ &xgamma10_39[0],
+ &xgamma10_40[0],
+};
+#endif /* __XILINX_GAMMA_COEFF_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-gamma.c b/drivers/media/platform/xilinx/xilinx-gamma.c
new file mode 100644
index 000000000000..d7996d0f34fa
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-gamma.c
@@ -0,0 +1,543 @@
+/*
+ * Xilinx Gamma Correction IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-gamma-coeff.h"
+#include "xilinx-vip.h"
+
+#define XGAMMA_MIN_HEIGHT (64)
+#define XGAMMA_MAX_HEIGHT (4320)
+#define XGAMMA_DEF_HEIGHT (720)
+#define XGAMMA_MIN_WIDTH (64)
+#define XGAMMA_MAX_WIDTH (8192)
+#define XGAMMA_DEF_WIDTH (1280)
+
+#define XGAMMA_AP_CTRL (0x0000)
+#define XGAMMA_GIE (0x0004)
+#define XGAMMA_IER (0x0008)
+#define XGAMMA_ISR (0x000c)
+#define XGAMMA_WIDTH (0x0010)
+#define XGAMMA_HEIGHT (0x0018)
+#define XGAMMA_VIDEO_FORMAT (0x0020)
+#define XGAMMA_GAMMA_LUT_0_BASE (0x0800)
+#define XGAMMA_GAMMA_LUT_1_BASE (0x1000)
+#define XGAMMA_GAMMA_LUT_2_BASE (0x1800)
+
+#define XGAMMA_RESET_DEASSERT (0)
+#define XGAMMA_RESET_ASSERT (1)
+#define XGAMMA_START BIT(0)
+#define XGAMMA_AUTO_RESTART BIT(7)
+#define XGAMMA_STREAM_ON (XGAMMA_START | XGAMMA_AUTO_RESTART)
+
+enum xgamma_video_format {
+ XGAMMA_RGB = 0,
+};
+
+/**
+ * struct xgamma_dev - Xilinx Video Gamma LUT device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: Scaler sub-device media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @ctrl_handler: V4L2 Control Handler for R,G,B Gamma Controls
+ * @red_lut: Pointer to the gamma coefficient as per the Red Gamma control
+ * @green_lut: Pointer to the gamma coefficient as per the Green Gamma control
+ * @blue_lut: Pointer to the gamma coefficient as per the Blue Gamma control
+ * @color_depth: Color depth of the Video Gamma IP
+ * @gamma_table: Pointer to the table containing various gamma values
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @max_width: Maximum width supported by this instance.
+ * @max_height: Maximum height supported by this instance.
+ */
+struct xgamma_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ const u16 *red_lut;
+ const u16 *green_lut;
+ const u16 *blue_lut;
+ u32 color_depth;
+ const u16 **gamma_table;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+static inline u32 xg_read(struct xgamma_dev *xg, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xg->xvip, reg);
+ dev_dbg(xg->xvip.dev,
+ "Reading 0x%x from reg offset 0x%x", data, reg);
+ return data;
+}
+
+static inline void xg_write(struct xgamma_dev *xg, u32 reg, u32 data)
+{
+ dev_dbg(xg->xvip.dev,
+ "Writing 0x%x to reg offset 0x%x", data, reg);
+ xvip_write(&xg->xvip, reg, data);
+#ifdef DEBUG
+ if (xg_read(xg, reg) != data)
+ dev_err(xg->xvip.dev,
+ "Write 0x%x does not match read back", data);
+#endif
+}
+
+static inline struct xgamma_dev *to_xg(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xgamma_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt *
+__xg_get_pad_format(struct xgamma_dev *xg,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(
+ &xg->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xg->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static void xg_set_lut_entries(struct xgamma_dev *xg,
+ const u16 *lut, const u32 lut_base)
+{
+ int itr;
+ u32 lut_offset, lut_data;
+
+ lut_offset = lut_base;
+ /* Write LUT Entries */
+ for (itr = 0; itr < BIT(xg->color_depth - 1); itr++) {
+ lut_data = (lut[2 * itr + 1] << 16) | lut[2 * itr];
+ xg_write(xg, lut_offset, lut_data);
+ lut_offset += 4;
+ }
+}
+
+static int xg_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+
+ if (!enable) {
+ dev_dbg(xg->xvip.dev, "%s : Off", __func__);
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_ASSERT);
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_DEASSERT);
+ return 0;
+ }
+ dev_dbg(xg->xvip.dev, "%s : Started", __func__);
+
+ dev_dbg(xg->xvip.dev, "%s : Setting width %d and height %d",
+ __func__, xg->formats[XVIP_PAD_SINK].width,
+ xg->formats[XVIP_PAD_SINK].height);
+ xg_write(xg, XGAMMA_WIDTH, xg->formats[XVIP_PAD_SINK].width);
+ xg_write(xg, XGAMMA_HEIGHT, xg->formats[XVIP_PAD_SINK].height);
+ xg_write(xg, XGAMMA_VIDEO_FORMAT, XGAMMA_RGB);
+ xg_set_lut_entries(xg, xg->red_lut, XGAMMA_GAMMA_LUT_0_BASE);
+ xg_set_lut_entries(xg, xg->green_lut, XGAMMA_GAMMA_LUT_1_BASE);
+ xg_set_lut_entries(xg, xg->blue_lut, XGAMMA_GAMMA_LUT_2_BASE);
+
+ /* Start GAMMA Correction LUT Video IP */
+ xg_write(xg, XGAMMA_AP_CTRL, XGAMMA_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xg_video_ops = {
+ .s_stream = xg_s_stream,
+};
+
+static int xg_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+
+ fmt->format = *__xg_get_pad_format(xg, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xg_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+ struct v4l2_mbus_framefmt *__format;
+
+ __format = __xg_get_pad_format(xg, cfg, fmt->pad, fmt->which);
+ *__format = fmt->format;
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ if (__format->code != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xg->xvip.dev,
+ "Unsupported sink media bus code format");
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ }
+ }
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XGAMMA_MIN_WIDTH, xg->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XGAMMA_MIN_HEIGHT, xg->max_height);
+
+ fmt->format = *__format;
+ /* Propagate to Source Pad */
+ __format = __xg_get_pad_format(xg, cfg, XVIP_PAD_SOURCE, fmt->which);
+ *__format = fmt->format;
+ return 0;
+}
+
+static int xg_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xg->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xg->default_formats[XVIP_PAD_SOURCE];
+ return 0;
+}
+
+static int xg_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xg_internal_ops = {
+ .open = xg_open,
+ .close = xg_close,
+};
+
+static const struct v4l2_subdev_pad_ops xg_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xg_get_format,
+ .set_fmt = xg_set_format,
+};
+
+static const struct v4l2_subdev_ops xg_ops = {
+ .video = &xg_video_ops,
+ .pad = &xg_pad_ops,
+};
+
+static int
+select_gamma(s32 value, const u16 **coeff, const u16 **xgamma_curves)
+{
+ if (!coeff)
+ return -EINVAL;
+ if (value <= 0 || value > GAMMA_CURVE_LENGTH)
+ return -EINVAL;
+
+ *coeff = *(xgamma_curves + value - 1);
+ return 0;
+}
+
+static int xg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int rval;
+ struct xgamma_dev *xg =
+ container_of(ctrl->handler,
+ struct xgamma_dev, ctrl_handler);
+ dev_dbg(xg->xvip.dev, "%s called", __func__);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->red_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Red Gamma");
+ return rval;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Red Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->red_lut, XGAMMA_GAMMA_LUT_0_BASE);
+ break;
+ case V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->blue_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Blue Gamma");
+ return rval;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Blue Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->blue_lut, XGAMMA_GAMMA_LUT_1_BASE);
+ break;
+ case V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->green_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Green Gamma");
+ return -EINVAL;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Green Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->green_lut, XGAMMA_GAMMA_LUT_2_BASE);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xg_ctrl_ops = {
+ .s_ctrl = xg_s_ctrl,
+};
+
+static struct v4l2_ctrl_config xg_ctrls[] = {
+ /* Red Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA,
+ .name = "Red Gamma Correction|1->0.1|10->1.0",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Blue Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA,
+ .name = "Blue Gamma Correction|1->0.1|10->1.0",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Green Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA,
+ .name = "Green Gamma Correction|1->0.1|10->1.0)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+};
+
+static const struct media_entity_operations xg_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xg_parse_of(struct xgamma_dev *xg)
+{
+ struct device *dev = xg->xvip.dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id = 0;
+ int rval;
+
+ rval = of_property_read_u32(node, "xlnx,max-height", &xg->max_height);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xg->max_height > XGAMMA_MAX_HEIGHT ||
+ xg->max_height < XGAMMA_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width", &xg->max_width);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xg->max_width > XGAMMA_MAX_WIDTH ||
+ xg->max_width < XGAMMA_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT");
+ return rval;
+ }
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(port, "xlnx,video-width",
+ &xg->color_depth);
+ if (rval < 0) {
+ dev_err(dev, "Missing xlnx-video-width in DT");
+ return rval;
+ }
+ switch (xg->color_depth) {
+ case GAMMA_BPC_8:
+ xg->gamma_table = xgamma8_curves;
+ break;
+ case GAMMA_BPC_10:
+ xg->gamma_table = xgamma10_curves;
+ break;
+ default:
+ dev_err(dev, "Unsupported color depth %d",
+ xg->color_depth);
+ return -EINVAL;
+ }
+ }
+ }
+
+ xg->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xg->rst_gpio)) {
+ if (PTR_ERR(xg->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xg->rst_gpio);
+ }
+ return 0;
+}
+
+static int xg_probe(struct platform_device *pdev)
+{
+ struct xgamma_dev *xg;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval, itr;
+
+ dev_dbg(&pdev->dev, "Gamma LUT Probe Started");
+ xg = devm_kzalloc(&pdev->dev, sizeof(*xg), GFP_KERNEL);
+ if (!xg)
+ return -ENOMEM;
+ xg->xvip.dev = &pdev->dev;
+ rval = xg_parse_of(xg);
+ if (rval < 0)
+ return rval;
+ rval = xvip_init_resources(&xg->xvip);
+
+ dev_dbg(xg->xvip.dev, "Reset Xilinx Video Gamma Corrrection");
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_DEASSERT);
+
+ /* Init V4L2 subdev */
+ subdev = &xg->xvip.subdev;
+ v4l2_subdev_init(subdev, &xg_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xg_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ def_fmt = &xg->default_formats[XVIP_PAD_SINK];
+ /* GAMMA LUT IP only to be supported for RGB */
+ def_fmt->code = MEDIA_BUS_FMT_RBG888_1X24;
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ def_fmt->width = XGAMMA_DEF_WIDTH;
+ def_fmt->height = XGAMMA_DEF_HEIGHT;
+ xg->formats[XVIP_PAD_SINK] = *def_fmt;
+
+ def_fmt = &xg->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xg->default_formats[XVIP_PAD_SINK];
+ xg->formats[XVIP_PAD_SOURCE] = *def_fmt;
+
+ xg->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xg->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xg_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xg->pads);
+ if (rval < 0)
+ goto media_error;
+
+ /* V4L2 Controls */
+ v4l2_ctrl_handler_init(&xg->ctrl_handler, ARRAY_SIZE(xg_ctrls));
+ for (itr = 0; itr < ARRAY_SIZE(xg_ctrls); itr++) {
+ v4l2_ctrl_new_custom(&xg->ctrl_handler,
+ &xg_ctrls[itr], NULL);
+ }
+ if (xg->ctrl_handler.error) {
+ dev_err(&pdev->dev, "Failed to add V4L2 controls");
+ rval = xg->ctrl_handler.error;
+ goto ctrl_error;
+ }
+ subdev->ctrl_handler = &xg->ctrl_handler;
+ rval = v4l2_ctrl_handler_setup(&xg->ctrl_handler);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "Failed to setup control handler");
+ goto ctrl_error;
+ }
+
+ platform_set_drvdata(pdev, xg);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto v4l2_subdev_error;
+ }
+ dev_info(&pdev->dev,
+ "Xilinx %d-bit Video Gamma Correction LUT registered",
+ xg->color_depth);
+ return 0;
+ctrl_error:
+ v4l2_ctrl_handler_free(&xg->ctrl_handler);
+v4l2_subdev_error:
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xg->xvip);
+ return rval;
+}
+
+static int xg_remove(struct platform_device *pdev)
+{
+ struct xgamma_dev *xg = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xg->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ /* Add entry to cleanup v4l2 control handle */
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xg->xvip);
+ return 0;
+}
+
+static const struct of_device_id xg_of_id_table[] = {
+ {.compatible = "xlnx,v-gamma-lut"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xg_of_id_table);
+
+static struct platform_driver xg_driver = {
+ .driver = {
+ .name = "xilinx-gamma-lut",
+ .of_match_table = xg_of_id_table,
+ },
+ .probe = xg_probe,
+ .remove = xg_remove,
+};
+
+module_platform_driver(xg_driver);
+MODULE_DESCRIPTION("Xilinx Video Gamma Correction LUT Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-hls-common.h b/drivers/media/platform/xilinx/xilinx-hls-common.h
new file mode 100644
index 000000000000..8ecc3cfb8a83
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-hls-common.h
@@ -0,0 +1,36 @@
+/*
+ * Xilinx HLS common header
+ *
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Radhey Shyam Pandey <radheys@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_HLS_COMMON_H__
+#define __XILINX_HLS_COMMON_H__
+
+#include <linux/bitops.h>
+
+#define XHLS_DEF_WIDTH 1920
+#define XHLS_DEF_HEIGHT 1080
+
+#define XHLS_REG_CTRL_DONE BIT(1)
+#define XHLS_REG_CTRL_IDLE BIT(2)
+#define XHLS_REG_CTRL_READY BIT(3)
+#define XHLS_REG_CTRL_AUTO_RESTART BIT(7)
+#define XHLS_REG_GIE 0x04
+#define XHLS_REG_GIE_GIE BIT(0)
+#define XHLS_REG_IER 0x08
+#define XHLS_REG_IER_DONE BIT(0)
+#define XHLS_REG_IER_READY BIT(1)
+#define XHLS_REG_ISR 0x0c
+#define XHLS_REG_ISR_DONE BIT(0)
+#define XHLS_REG_ISR_READY BIT(1)
+#define XHLS_REG_ROWS 0x10
+#define XHLS_REG_COLS 0x18
+
+#endif /* __XILINX_HLS_COMMON_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-hls.c b/drivers/media/platform/xilinx/xilinx-hls.c
new file mode 100644
index 000000000000..fc42977440a9
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-hls.c
@@ -0,0 +1,481 @@
+/*
+ * Xilinx HLS Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/xilinx-hls.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-hls-common.h"
+#include "xilinx-vip.h"
+
+/**
+ * struct xhls_device - Xilinx HLS Core device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @compatible: first DT compatible string for the device
+ * @formats: active V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: format information corresponding to the pads active formats
+ * @model: additional description of IP implementation if available
+ * @ctrl_handler: control handler
+ * @user_mem: user portion of the register space
+ * @user_mem_size: size of the user portion of the register space
+ */
+struct xhls_device {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+
+ const char *compatible;
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *model;
+
+ void __iomem *user_mem;
+ size_t user_mem_size;
+};
+
+static inline struct xhls_device *to_hls(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xhls_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static const struct v4l2_ctrl_config xhls_model_ctrl = {
+ .id = V4L2_CID_XILINX_HLS_MODEL,
+ .name = "HLS Model",
+ .type = V4L2_CTRL_TYPE_STRING,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+};
+
+static int xhls_create_controls(struct xhls_device *xhls)
+{
+ struct v4l2_ctrl_config model = xhls_model_ctrl;
+ struct v4l2_ctrl *ctrl;
+
+ model.max = strlen(xhls->compatible);
+ model.min = model.max;
+
+ v4l2_ctrl_handler_init(&xhls->ctrl_handler, 1);
+
+ ctrl = v4l2_ctrl_new_custom(&xhls->ctrl_handler, &model, NULL);
+
+ if (xhls->ctrl_handler.error) {
+ dev_err(xhls->xvip.dev, "failed to add controls\n");
+ return xhls->ctrl_handler.error;
+ }
+
+ v4l2_ctrl_s_ctrl_string(ctrl, xhls->compatible);
+
+ xhls->xvip.subdev.ctrl_handler = &xhls->ctrl_handler;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int xhls_user_read(struct xhls_device *xhls,
+ struct xilinx_axi_hls_registers *regs)
+{
+ unsigned int i;
+ u32 offset;
+ u32 value;
+
+ if (regs->num_regs >= xhls->user_mem_size / 4)
+ return -EINVAL;
+
+ for (i = 0; i < regs->num_regs; ++i) {
+ if (copy_from_user(&offset, &regs->regs[i].offset,
+ sizeof(offset)))
+ return -EFAULT;
+
+ if (offset >= xhls->user_mem_size || offset & 3)
+ return -EINVAL;
+
+ value = ioread32(xhls->user_mem + offset);
+
+ if (copy_to_user(&regs->regs[i].value, &value, sizeof(value)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int xhls_user_write(struct xhls_device *xhls,
+ struct xilinx_axi_hls_registers *regs)
+{
+ struct xilinx_axi_hls_register reg;
+ unsigned int i;
+
+ if (regs->num_regs >= xhls->user_mem_size / 4)
+ return -EINVAL;
+
+ for (i = 0; i < regs->num_regs; ++i) {
+ if (copy_from_user(&reg, &regs->regs[i], sizeof(reg)))
+ return -EFAULT;
+
+ if (reg.offset >= xhls->user_mem_size || reg.offset & 3)
+ return -EINVAL;
+
+ iowrite32(reg.value, xhls->user_mem + reg.offset);
+ }
+
+ return 0;
+}
+
+static long xhls_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+
+ switch (cmd) {
+ case XILINX_AXI_HLS_READ:
+ return xhls_user_read(xhls, arg);
+ case XILINX_AXI_HLS_WRITE:
+ return xhls_user_write(xhls, arg);
+ }
+
+ return -ENOTTY;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xhls_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format = &xhls->formats[XVIP_PAD_SINK];
+
+ if (!enable) {
+ xvip_write(&xhls->xvip, XVIP_CTRL_CONTROL, 0);
+ return 0;
+ }
+
+ xvip_write(&xhls->xvip, XHLS_REG_COLS, format->width);
+ xvip_write(&xhls->xvip, XHLS_REG_ROWS, format->height);
+
+ xvip_write(&xhls->xvip, XVIP_CTRL_CONTROL,
+ XHLS_REG_CTRL_AUTO_RESTART | XVIP_CTRL_CONTROL_SW_ENABLE);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xhls_get_pad_format(struct xhls_device *xhls,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xhls->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xhls->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xhls_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+
+ fmt->format = *__xhls_get_pad_format(xhls, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xhls_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xhls_get_pad_format(xhls, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xhls_get_pad_format(xhls, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int xhls_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xhls->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xhls->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xhls_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops xhls_core_ops = {
+ .ioctl = xhls_ioctl,
+};
+
+static struct v4l2_subdev_video_ops xhls_video_ops = {
+ .s_stream = xhls_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xhls_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xhls_get_format,
+ .set_fmt = xhls_set_format,
+};
+
+static struct v4l2_subdev_ops xhls_ops = {
+ .core = &xhls_core_ops,
+ .video = &xhls_video_ops,
+ .pad = &xhls_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xhls_internal_ops = {
+ .open = xhls_open,
+ .close = xhls_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xhls_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static void xhls_init_formats(struct xhls_device *xhls)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize default and active formats */
+ format = &xhls->default_formats[XVIP_PAD_SINK];
+ format->code = xhls->vip_formats[XVIP_PAD_SINK]->code;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->width = xvip_read(&xhls->xvip, XHLS_REG_COLS);
+ format->height = xvip_read(&xhls->xvip, XHLS_REG_ROWS);
+
+ xhls->formats[XVIP_PAD_SINK] = *format;
+
+ format = &xhls->default_formats[XVIP_PAD_SOURCE];
+ *format = xhls->default_formats[XVIP_PAD_SINK];
+ format->code = xhls->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xhls->formats[XVIP_PAD_SOURCE] = *format;
+}
+
+static int xhls_parse_of(struct xhls_device *xhls)
+{
+ struct device *dev = xhls->xvip.dev;
+ struct device_node *node = xhls->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_string(node, "compatible", &xhls->compatible);
+ if (ret < 0)
+ return -EINVAL;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xhls->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xhls_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xhls_device *xhls;
+ struct resource *mem;
+ int ret;
+
+ xhls = devm_kzalloc(&pdev->dev, sizeof(*xhls), GFP_KERNEL);
+ if (!xhls)
+ return -ENOMEM;
+
+ xhls->xvip.dev = &pdev->dev;
+
+ ret = xhls_parse_of(xhls);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xhls->xvip);
+ if (ret < 0)
+ return ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ xhls->user_mem = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xhls->user_mem))
+ return PTR_ERR(xhls->user_mem);
+ xhls->user_mem_size = resource_size(mem);
+
+ /* Reset and initialize the core */
+ xvip_reset(&xhls->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xhls->xvip.subdev;
+ v4l2_subdev_init(subdev, &xhls_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xhls_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xhls);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ xhls_init_formats(xhls);
+
+ xhls->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xhls->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xhls_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xhls->pads);
+ if (ret < 0)
+ goto error;
+
+ ret = xhls_create_controls(xhls);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xhls);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(xhls->xvip.dev, "device %s found\n", xhls->compatible);
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xhls->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xhls->xvip);
+ return ret;
+}
+
+static int xhls_remove(struct platform_device *pdev)
+{
+ struct xhls_device *xhls = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xhls->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xhls->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xhls->xvip);
+
+ return 0;
+}
+
+static const struct of_device_id xhls_of_id_table[] = {
+ { .compatible = "xlnx,v-hls" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xhls_of_id_table);
+
+static struct platform_driver xhls_driver = {
+ .driver = {
+ .name = "xilinx-hls",
+ .of_match_table = xhls_of_id_table,
+ },
+ .probe = xhls_probe,
+ .remove = xhls_remove,
+};
+
+module_platform_driver(xhls_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx HLS Core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-m2m.c b/drivers/media/platform/xilinx/xilinx-m2m.c
new file mode 100644
index 000000000000..b6dff18bc66a
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-m2m.c
@@ -0,0 +1,2116 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx V4L2 mem2mem driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <drm/drm_fourcc.h>
+#include <linux/delay.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/lcm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "xilinx-vip.h"
+
+#define XVIP_M2M_NAME "xilinx-mem2mem"
+#define XVIP_M2M_DEFAULT_FMT V4L2_PIX_FMT_RGB24
+
+/* Minimum and maximum widths are expressed in bytes */
+#define XVIP_M2M_MIN_WIDTH 1U
+#define XVIP_M2M_MAX_WIDTH 65535U
+#define XVIP_M2M_MIN_HEIGHT 1U
+#define XVIP_M2M_MAX_HEIGHT 8191U
+
+#define XVIP_M2M_DEF_WIDTH 1920
+#define XVIP_M2M_DEF_HEIGHT 1080
+
+#define XVIP_M2M_PAD_SINK 1
+#define XVIP_M2M_PAD_SOURCE 0
+
+/**
+ * struct xvip_graph_entity - Entity in the video graph
+ * @list: list entry in a graph entities list
+ * @node: the entity's DT node
+ * @entity: media entity, from the corresponding V4L2 subdev
+ * @asd: subdev asynchronous registration information
+ * @subdev: V4L2 subdev
+ * @streaming: status of the V4L2 subdev if streaming or not
+ */
+struct xvip_graph_entity {
+ struct list_head list;
+ struct device_node *node;
+ struct media_entity *entity;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+ bool streaming;
+};
+
+/**
+ * struct xvip_pipeline - Xilinx Video IP pipeline structure
+ * @pipe: media pipeline
+ * @lock: protects the pipeline @stream_count
+ * @use_count: number of DMA engines using the pipeline
+ * @stream_count: number of DMA engines currently streaming
+ * @num_dmas: number of DMA engines in the pipeline
+ * @xdev: Composite device the pipe belongs to
+ */
+struct xvip_pipeline {
+ struct media_pipeline pipe;
+
+ /* protects the pipeline @stream_count */
+ struct mutex lock;
+ unsigned int use_count;
+ unsigned int stream_count;
+
+ unsigned int num_dmas;
+ struct xvip_m2m_dev *xdev;
+};
+
+struct xventity_list {
+ struct list_head list;
+ struct media_entity *entity;
+};
+
+/**
+ * struct xvip_m2m_dev - Xilinx Video mem2mem device structure
+ * @v4l2_dev: V4L2 device
+ * @dev: (OF) device
+ * @media_dev: media device
+ * @notifier: V4L2 asynchronous subdevs notifier
+ * @entities: entities in the graph as a list of xvip_graph_entity
+ * @num_subdevs: number of subdevs in the pipeline
+ * @lock: This is to protect mem2mem context structure data
+ * @queued_lock: This is to protect video buffer information
+ * @dma: Video DMA channels
+ * @m2m_dev: V4L2 mem2mem device structure
+ * @v4l2_caps: V4L2 capabilities of the whole device
+ */
+struct xvip_m2m_dev {
+ struct v4l2_device v4l2_dev;
+ struct device *dev;
+
+ struct media_device media_dev;
+ struct v4l2_async_notifier notifier;
+ struct list_head entities;
+ unsigned int num_subdevs;
+
+ /* Protects to m2m context data */
+ struct mutex lock;
+
+ /* Protects vb2_v4l2_buffer data */
+ spinlock_t queued_lock;
+ struct xvip_m2m_dma *dma;
+ struct v4l2_m2m_dev *m2m_dev;
+ u32 v4l2_caps;
+};
+
+static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
+{
+ return container_of(e->pipe, struct xvip_pipeline, pipe);
+}
+
+/**
+ * struct xvip_m2m_dma - Video DMA channel
+ * @video: V4L2 video device associated with the DMA channel
+ * @xdev: composite mem2mem device the DMA channels belongs to
+ * @chan_tx: DMA engine channel for MEM2DEV transfer
+ * @chan_rx: DMA engine channel for DEV2MEM transfer
+ * @outfmt: active V4L2 OUTPUT port pixel format
+ * @capfmt: active V4L2 CAPTURE port pixel format
+ * @r: crop rectangle parameters
+ * @outinfo: format information corresponding to the active @outfmt
+ * @capinfo: format information corresponding to the active @capfmt
+ * @align: transfer alignment required by the DMA channel (in bytes)
+ * @crop: boolean flag to indicate if crop is requested
+ * @pads: media pads for the video M2M device entity
+ * @pipe: pipeline belonging to the DMA channel
+ */
+struct xvip_m2m_dma {
+ struct video_device video;
+ struct xvip_m2m_dev *xdev;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct v4l2_format outfmt;
+ struct v4l2_format capfmt;
+ struct v4l2_rect r;
+ const struct xvip_video_format *outinfo;
+ const struct xvip_video_format *capinfo;
+ u32 align;
+ bool crop;
+
+ struct media_pad pads[2];
+ struct xvip_pipeline pipe;
+};
+
+/**
+ * struct xvip_m2m_ctx - VIPP mem2mem context
+ * @fh: V4L2 file handler
+ * @xdev: composite mem2mem device the DMA channels belongs to
+ * @xt: dma interleaved template for dma configuration
+ * @sgl: data chunk structure for dma_interleaved_template
+ */
+struct xvip_m2m_ctx {
+ struct v4l2_fh fh;
+ struct xvip_m2m_dev *xdev;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+static inline struct xvip_m2m_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct xvip_m2m_ctx, fh);
+}
+
+static struct v4l2_subdev *
+xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(local);
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int xvip_dma_verify_format(struct xvip_m2m_dma *dma)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+ int width, height;
+
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ if (dma->outinfo->code != fmt.format.code)
+ return -EINVAL;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->outfmt.type)) {
+ width = dma->outfmt.fmt.pix_mp.width;
+ height = dma->outfmt.fmt.pix_mp.height;
+ } else {
+ width = dma->outfmt.fmt.pix.width;
+ height = dma->outfmt.fmt.pix.height;
+ }
+
+ if (width != fmt.format.width || height != fmt.format.height)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define to_xvip_dma(vdev) container_of(vdev, struct xvip_m2m_dma, video)
+/* -----------------------------------------------------------------------------
+ * Pipeline Stream Management
+ */
+
+/**
+ * xvip_subdev_set_streaming - Find and update streaming status of subdev
+ * @xdev: Composite video device
+ * @subdev: V4L2 sub-device
+ * @enable: enable/disable streaming status
+ *
+ * Walk the xvip graph entities list and find if subdev is present. Returns
+ * streaming status of subdev and update the status as requested
+ *
+ * Return: streaming status (true or false) if successful or warn_on if subdev
+ * is not present and return false
+ */
+static bool xvip_subdev_set_streaming(struct xvip_m2m_dev *xdev,
+ struct v4l2_subdev *subdev, bool enable)
+{
+ struct xvip_graph_entity *entity;
+
+ list_for_each_entry(entity, &xdev->entities, list)
+ if (entity->node == subdev->dev->of_node) {
+ bool status = entity->streaming;
+
+ entity->streaming = enable;
+ return status;
+ }
+
+ WARN(1, "Should never get here\n");
+ return false;
+}
+
+static int xvip_entity_start_stop(struct xvip_m2m_dev *xdev,
+ struct media_entity *entity, bool start)
+{
+ struct v4l2_subdev *subdev;
+ bool is_streaming;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "%s entity %s\n",
+ start ? "Starting" : "Stopping", entity->name);
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ /* This is to maintain list of stream on/off devices */
+ is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
+
+ /*
+ * start or stop the subdev only once in case if they are
+ * shared between sub-graphs
+ */
+ if (start && !is_streaming) {
+ /* power-on subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_power on failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ return ret;
+ }
+
+ /* stream-on subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream on failed on subdev\n");
+ v4l2_subdev_call(subdev, core, s_power, 0);
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ }
+ } else if (!start && is_streaming) {
+ /* stream-off subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream off failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 1);
+ }
+
+ /* power-off subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ dev_err(xdev->dev,
+ "s_power off failed on subdev\n");
+ }
+
+ return ret;
+}
+
+/**
+ * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
+ * @xdev: Composite video device
+ * @dma: xvip dma
+ * @start: Start (when true) or stop (when false) the pipeline
+ *
+ * Walk the entities chain starting @dma and start or stop all of them
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int xvip_pipeline_start_stop(struct xvip_m2m_dev *xdev,
+ struct xvip_m2m_dma *dma, bool start)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &dma->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct xventity_list *temp, *_temp;
+ LIST_HEAD(ent_list);
+ int ret = 0;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the subdev nodes */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret)
+ goto error;
+
+ media_graph_walk_start(&graph, entity);
+
+ /* get the list of entities */
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xventity_list *ele;
+
+ /* We want to stream on/off only subdevs */
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ /* Maintain the pipeline sequence in a list */
+ ele = kzalloc(sizeof(*ele), GFP_KERNEL);
+ if (!ele) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ele->entity = entity;
+ list_add(&ele->list, &ent_list);
+ }
+
+ if (start) {
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ /* Enable all subdevs from sink to source */
+ ret = xvip_entity_start_stop(xdev, temp->entity, start);
+ if (ret < 0) {
+ dev_err(xdev->dev, "ret = %d for entity %s\n",
+ ret, temp->entity->name);
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_safe_reverse(temp, _temp, &ent_list, list)
+ /* Enable all subdevs from source to sink */
+ xvip_entity_start_stop(xdev, temp->entity, start);
+ }
+
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ list_del(&temp->list);
+ kfree(temp);
+ }
+
+error:
+ mutex_unlock(&mdev->graph_mutex);
+ media_graph_walk_cleanup(&graph);
+ return ret;
+}
+
+/**
+ * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: The pipeline
+ * @on: Turn the stream on when true or off when false
+ *
+ * The pipeline is shared between all DMA engines connect at its input and
+ * output. While the stream state of DMA engines can be controlled
+ * independently, pipelines have a shared stream state that enable or disable
+ * all entities in the pipeline. For this reason the pipeline uses a streaming
+ * counter that tracks the number of DMA engines that have requested the stream
+ * to be enabled. This will walk the graph starting from each DMA and enable or
+ * disable the entities in the path.
+ *
+ * When called with the @on argument set to true, this function will increment
+ * the pipeline streaming count. If the streaming count reaches the number of
+ * DMA engines in the pipeline it will enable all entities that belong to the
+ * pipeline.
+ *
+ * Similarly, when called with the @on argument set to false, this function will
+ * decrement the pipeline streaming count and disable all entities in the
+ * pipeline when the streaming count reaches zero.
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. Stopping the pipeline never fails. The pipeline state is
+ * not updated when the operation fails.
+ */
+static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
+{
+ struct xvip_m2m_dev *xdev;
+ struct xvip_m2m_dma *dma;
+ int ret = 0;
+
+ mutex_lock(&pipe->lock);
+ xdev = pipe->xdev;
+ dma = xdev->dma;
+
+ if (on) {
+ ret = xvip_pipeline_start_stop(xdev, dma, true);
+ if (ret < 0)
+ goto done;
+ pipe->stream_count++;
+ } else {
+ if (--pipe->stream_count == 0)
+ xvip_pipeline_start_stop(xdev, dma, false);
+ }
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
+ struct xvip_m2m_dma *start)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &start->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ unsigned int num_inputs = 0;
+ unsigned int num_outputs = 0;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the video nodes. */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret) {
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+
+ media_graph_walk_start(&graph, entity);
+
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xvip_m2m_dma *dma;
+
+ if (entity->function != MEDIA_ENT_F_IO_V4L)
+ continue;
+
+ dma = to_xvip_dma(media_entity_to_video_device(entity));
+
+ num_outputs++;
+ num_inputs++;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ media_graph_walk_cleanup(&graph);
+
+ /* We need at least one DMA to proceed */
+ if (num_outputs == 0 && num_inputs == 0)
+ return -EPIPE;
+
+ pipe->num_dmas = num_inputs + num_outputs;
+ pipe->xdev = start->xdev;
+
+ return 0;
+}
+
+static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ pipe->num_dmas = 0;
+}
+
+/**
+ * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
+ * @pipe: the pipeline
+ *
+ * Decrease the pipeline use count and clean it up if we were the last user.
+ */
+static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ mutex_lock(&pipe->lock);
+
+ /* If we're the last user clean up the pipeline. */
+ if (--pipe->use_count == 0)
+ __xvip_pipeline_cleanup(pipe);
+
+ mutex_unlock(&pipe->lock);
+}
+
+/**
+ * xvip_pipeline_prepare - Prepare the pipeline for streaming
+ * @pipe: the pipeline
+ * @dma: DMA engine at one end of the pipeline
+ *
+ * Validate the pipeline if no user exists yet, otherwise just increase the use
+ * count.
+ *
+ * Return: 0 if successful or -EPIPE if the pipeline is not valid.
+ */
+static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
+ struct xvip_m2m_dma *dma)
+{
+ int ret;
+
+ mutex_lock(&pipe->lock);
+
+ /* If we're the first user validate and initialize the pipeline. */
+ if (pipe->use_count == 0) {
+ ret = xvip_pipeline_validate(pipe, dma);
+ if (ret < 0) {
+ __xvip_pipeline_cleanup(pipe);
+ goto done;
+ }
+ }
+
+ pipe->use_count++;
+ ret = 0;
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static void xvip_m2m_dma_callback_mem2dev(void *data)
+{
+}
+
+static void xvip_m2m_dma_callback(void *data)
+{
+ struct xvip_m2m_ctx *ctx = data;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ spin_lock(&xdev->queued_lock);
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->timecode = src_vb->timecode;
+
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(xdev->m2m_dev, ctx->fh.m2m_ctx);
+ spin_unlock(&xdev->queued_lock);
+}
+
+/*
+ * Queue operations
+ */
+
+static int xvip_m2m_queue_setup(struct vb2_queue *vq,
+ u32 *nbuffers, u32 *nplanes,
+ u32 sizes[], struct device *alloc_devs[])
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vq);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct v4l2_format *f;
+ const struct xvip_video_format *info;
+ u32 i;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f = &dma->outfmt;
+ info = dma->outinfo;
+ } else {
+ f = &dma->capfmt;
+ info = dma->capinfo;
+ }
+
+ if (*nplanes) {
+ if (*nplanes != f->fmt.pix_mp.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = info->buffers;
+ for (i = 0; i < info->buffers; i++)
+ sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int xvip_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct v4l2_format *f;
+ const struct xvip_video_format *info;
+ u32 i;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f = &dma->outfmt;
+ info = dma->outinfo;
+ } else {
+ f = &dma->capfmt;
+ info = dma->capinfo;
+ }
+
+ for (i = 0; i < info->buffers; i++) {
+ if (vb2_plane_size(vb, i) <
+ f->fmt.pix_mp.plane_fmt[i].sizeimage) {
+ dev_err(ctx->xdev->dev,
+ "insufficient plane size (%u < %u)\n",
+ (u32)vb2_plane_size(vb, i),
+ f->fmt.pix_mp.plane_fmt[i].sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i,
+ f->fmt.pix_mp.plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static void xvip_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void xvip_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
+ struct vb2_v4l2_buffer *vbuf;
+
+ dma->crop = false;
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dmaengine_terminate_sync(dma->chan_tx);
+ else
+ dmaengine_terminate_sync(dma->chan_rx);
+
+ if (ctx->xdev->num_subdevs) {
+ /* Stop the pipeline. */
+ xvip_pipeline_set_stream(pipe, false);
+
+ /* Cleanup the pipeline and mark it as being stopped. */
+ xvip_pipeline_cleanup(pipe);
+ media_pipeline_stop(&dma->video.entity);
+ }
+
+ for (;;) {
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!vbuf)
+ return;
+
+ spin_lock(&ctx->xdev->queued_lock);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock(&ctx->xdev->queued_lock);
+ }
+}
+
+static int xvip_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct xvip_pipeline *pipe;
+ int ret;
+
+ if (!xdev->num_subdevs)
+ return 0;
+
+ pipe = dma->video.entity.pipe
+ ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
+
+ ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto error;
+
+ /* Verify that the configured format matches the output of the
+ * connected subdev.
+ */
+ ret = xvip_dma_verify_format(dma);
+ if (ret < 0)
+ goto error_stop;
+
+ ret = xvip_pipeline_prepare(pipe, dma);
+ if (ret < 0)
+ goto error_stop;
+
+ /* Start the pipeline. */
+ ret = xvip_pipeline_set_stream(pipe, true);
+ if (ret < 0)
+ goto error_stop;
+
+ return 0;
+error_stop:
+ media_pipeline_stop(&dma->video.entity);
+
+error:
+ xvip_m2m_stop_streaming(q);
+
+ return ret;
+}
+
+static const struct vb2_ops m2m_vb2_ops = {
+ .queue_setup = xvip_m2m_queue_setup,
+ .buf_prepare = xvip_m2m_buf_prepare,
+ .buf_queue = xvip_m2m_buf_queue,
+ .start_streaming = xvip_m2m_start_streaming,
+ .stop_streaming = xvip_m2m_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int xvip_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &m2m_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = ctx->xdev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &m2m_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = ctx->xdev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ strlcpy(cap->driver, XVIP_M2M_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, XVIP_M2M_NAME, sizeof(cap->card));
+ strlcpy(cap->bus_info, XVIP_M2M_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int
+xvip_m2m_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ const struct xvip_video_format *fmtinfo;
+ const struct xvip_video_format *fmt;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format v4l_fmt;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ u32 i, fmt_cnt, *fmts;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_rx,
+ &fmt_cnt, &fmts);
+ else
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_tx,
+ &fmt_cnt, &fmts);
+ if (ret)
+ return ret;
+
+ if (f->index >= fmt_cnt)
+ return -EINVAL;
+
+ if (!xdev->num_subdevs) {
+ fmt = xvip_get_format_by_fourcc(fmts[f->index]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ /* Establish media pad format */
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE],
+ &v4l_fmt.pad);
+ else
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SINK],
+ &v4l_fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+
+ v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ for (i = 0; i < fmt_cnt; i++) {
+ fmt = xvip_get_format_by_fourcc(fmts[i]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ if (fmt->code == v4l_fmt.format.code)
+ break;
+ }
+
+ if (i >= fmt_cnt)
+ return -EINVAL;
+
+ fmtinfo = xvip_get_format_by_fourcc(fmts[i]);
+ f->pixelformat = fmtinfo->fourcc;
+
+ return 0;
+}
+
+static int xvip_m2m_get_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f->fmt.pix_mp = dma->outfmt.fmt.pix_mp;
+ else
+ f->fmt.pix_mp = dma->capfmt.fmt.pix_mp;
+
+ return 0;
+}
+
+static int __xvip_m2m_try_fmt(struct xvip_m2m_ctx *ctx, struct v4l2_format *f)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ u32 align, min_width, max_width;
+ u32 bpl, min_bpl, max_bpl;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 i, plane_width, plane_height;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ if (xdev->num_subdevs) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ subdev = xvip_dma_remote_subdev
+ (&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
+ else
+ subdev = xvip_dma_remote_subdev
+ (&dma->pads[XVIP_PAD_SINK], &fmt.pad);
+
+ if (!subdev)
+ return -EPIPE;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return -EINVAL;
+ }
+
+ pix_mp = &f->fmt.pix_mp;
+ plane_fmt = pix_mp->plane_fmt;
+ info = xvip_get_format_by_fourcc(f->fmt.pix_mp.pixelformat);
+ if (info) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dma->outinfo = info;
+ else
+ dma->capinfo = info;
+ } else {
+ info = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ }
+
+ if (xdev->num_subdevs) {
+ if (info->code != fmt.format.code ||
+ fmt.format.width != pix_mp->width ||
+ fmt.format.height != pix_mp->height) {
+ dev_err(xdev->dev, "Failed to set format\n");
+ dev_info(xdev->dev,
+ "Reqed Code = %d, Width = %d, Height = %d\n",
+ info->code, pix_mp->width, pix_mp->height);
+ dev_info(xdev->dev,
+ "Subdev Code = %d, Width = %d, Height = %d",
+ fmt.format.code, fmt.format.width,
+ fmt.format.height);
+ return -EINVAL;
+ }
+ }
+
+ xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
+
+ /*
+ * V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported
+ */
+ align = lcm(dma->align, info->bpp >> 3);
+ min_width = roundup(XVIP_M2M_MIN_WIDTH, align);
+ max_width = rounddown(XVIP_M2M_MAX_WIDTH, align);
+ pix_mp->width = clamp(pix_mp->width, min_width, max_width);
+ pix_mp->height = clamp(pix_mp->height, XVIP_M2M_MIN_HEIGHT,
+ XVIP_M2M_MAX_HEIGHT);
+
+ /*
+ * Clamp the requested bytes per line value. If the maximum
+ * bytes per line value is zero, the module doesn't support
+ * user configurable line sizes. Override the requested value
+ * with the minimum in that case.
+ */
+ max_bpl = rounddown(XVIP_M2M_MAX_WIDTH, align);
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ min_bpl = (pix_mp->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, align);
+ bpl = roundup(plane_fmt[0].bytesperline, align);
+ plane_fmt[0].bytesperline = clamp(bpl, min_bpl, max_bpl);
+
+ if (info->num_planes == 1) {
+ /* Single plane formats */
+ plane_fmt[0].sizeimage = plane_fmt[0].bytesperline *
+ pix_mp->height;
+ } else {
+ /* Multi plane formats in contiguous buffer*/
+ plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(plane_fmt[0].bytesperline *
+ pix_mp->height *
+ info->bpp, 8);
+ }
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ for (i = 0; i < info->num_planes; i++) {
+ plane_width = pix_mp->width / (i ? info->hsub : 1);
+ plane_height = pix_mp->height / (i ? info->vsub : 1);
+ min_bpl = (plane_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, align);
+ bpl = rounddown(plane_fmt[i].bytesperline, align);
+ plane_fmt[i].bytesperline = clamp(bpl, min_bpl,
+ max_bpl);
+ plane_fmt[i].sizeimage = plane_fmt[i].bytesperline *
+ plane_height;
+ }
+ }
+
+ return 0;
+}
+
+static int xvip_m2m_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = __xvip_m2m_try_fmt(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int xvip_m2m_set_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->xdev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = __xvip_m2m_try_fmt(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dma->outfmt.fmt.pix_mp = f->fmt.pix_mp;
+ else
+ dma->capfmt.fmt.pix_mp = f->fmt.pix_mp;
+
+ return 0;
+}
+
+static int
+xvip_m2m_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ int ret = 0;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ ret = -ENOTTY;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = dma->r.width;
+ s->r.height = dma->r.height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+xvip_m2m_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ u32 min_width, max_width;
+ int ret = 0;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ ret = -ENOTTY;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->r.width > dma->outfmt.fmt.pix_mp.width ||
+ s->r.height > dma->outfmt.fmt.pix_mp.height ||
+ s->r.top != 0 || s->r.left != 0)
+ return -EINVAL;
+
+ dma->crop = true;
+ min_width = roundup(XVIP_M2M_MIN_WIDTH, dma->align);
+ max_width = rounddown(XVIP_M2M_MAX_WIDTH, dma->align);
+ dma->r.width = clamp(s->r.width, min_width, max_width);
+ dma->r.height = s->r.height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops xvip_m2m_ioctl_ops = {
+ .vidioc_querycap = xvip_dma_querycap,
+
+ .vidioc_enum_fmt_vid_cap = xvip_m2m_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = xvip_m2m_get_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = xvip_m2m_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = xvip_m2m_set_fmt,
+
+ .vidioc_enum_fmt_vid_out = xvip_m2m_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = xvip_m2m_get_fmt,
+ .vidioc_try_fmt_vid_out_mplane = xvip_m2m_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = xvip_m2m_set_fmt,
+ .vidioc_s_selection = xvip_m2m_s_selection,
+ .vidioc_g_selection = xvip_m2m_g_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+/*
+ * File operations
+ */
+static int xvip_m2m_open(struct file *file)
+{
+ struct xvip_m2m_dev *xdev = video_drvdata(file);
+ struct xvip_m2m_ctx *ctx = NULL;
+ int ret;
+
+ ctx = devm_kzalloc(xdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->xdev = xdev;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(xdev->m2m_dev, ctx,
+ &xvip_m2m_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ v4l2_fh_exit(&ctx->fh);
+ return ret;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+ dev_info(xdev->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
+ ctx->fh.m2m_ctx);
+ return 0;
+}
+
+static int xvip_m2m_release(struct file *file)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ return 0;
+}
+
+static u32 xvip_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+ int ret;
+
+ mutex_lock(&ctx->xdev->lock);
+ ret = v4l2_m2m_poll(file, ctx->fh.m2m_ctx, wait);
+ mutex_unlock(&ctx->xdev->lock);
+
+ return ret;
+}
+
+static int xvip_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->fh.m2m_ctx, vma);
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+static int xvip_m2m_job_ready(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+
+ if ((v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) &&
+ (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0))
+ return 1;
+
+ return 0;
+}
+
+static void xvip_m2m_job_abort(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ v4l2_m2m_job_finish(ctx->xdev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void xvip_m2m_prep_submit_dev2mem_desc(struct xvip_m2m_ctx *ctx,
+ struct vb2_v4l2_buffer *dst_buf)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t p_out;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 luma_size;
+ u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ enum operation_mode mode = DEFAULT;
+ u32 bpl, dst_width, dst_height;
+
+ p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ if (!p_out) {
+ dev_err(xdev->dev,
+ "Acquiring kernel pointer to buffer failed\n");
+ return;
+ }
+
+ ctx->xt.dir = DMA_DEV_TO_MEM;
+ ctx->xt.src_sgl = false;
+ ctx->xt.dst_sgl = true;
+ ctx->xt.dst_start = p_out;
+
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ bpl = pix_mp->plane_fmt[0].bytesperline;
+ if (dma->crop) {
+ dst_width = dma->r.width;
+ dst_height = dma->r.height;
+ } else {
+ dst_width = pix_mp->width;
+ dst_height = pix_mp->height;
+ }
+
+ info = dma->capinfo;
+ xilinx_xdma_set_mode(dma->chan_rx, mode);
+ xilinx_xdma_v4l2_config(dma->chan_rx, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
+
+ ctx->xt.frame_size = info->num_planes;
+ ctx->sgl[0].size = (dst_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ ctx->sgl[0].icg = bpl - ctx->sgl[0].size;
+ ctx->xt.numf = dst_height;
+
+ /*
+ * dst_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+ ctx->sgl[0].src_icg = 0;
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ ctx->sgl[0].dst_icg = 0;
+ if (dma->crop)
+ ctx->sgl[0].dst_icg = bpl *
+ (pix_mp->height - dst_height);
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (info->buffers == 2) {
+ dma_addr_t chroma_cap =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
+ luma_size = pix_mp->plane_fmt[0].bytesperline *
+ ctx->xt.numf;
+ if (chroma_cap > p_out)
+ ctx->sgl[0].dst_icg = chroma_cap - p_out -
+ luma_size;
+ }
+ }
+
+ desc = dmaengine_prep_interleaved_dma(dma->chan_rx, &ctx->xt, flags);
+ if (!desc) {
+ dev_err(xdev->dev, "Failed to prepare DMA rx transfer\n");
+ return;
+ }
+
+ desc->callback = xvip_m2m_dma_callback;
+ desc->callback_param = ctx;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan_rx);
+}
+
+static void xvip_m2m_prep_submit_mem2dev_desc(struct xvip_m2m_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t p_in;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 luma_size;
+ u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ enum operation_mode mode = DEFAULT;
+ u32 bpl, src_width, src_height;
+
+ p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+
+ if (!p_in) {
+ dev_err(xdev->dev,
+ "Acquiring kernel pointer to buffer failed\n");
+ return;
+ }
+
+ ctx->xt.dir = DMA_MEM_TO_DEV;
+ ctx->xt.src_sgl = true;
+ ctx->xt.dst_sgl = false;
+ ctx->xt.src_start = p_in;
+
+ pix_mp = &dma->outfmt.fmt.pix_mp;
+ bpl = pix_mp->plane_fmt[0].bytesperline;
+ if (dma->crop) {
+ src_width = dma->r.width;
+ src_height = dma->r.height;
+ } else {
+ src_width = pix_mp->width;
+ src_height = pix_mp->height;
+ }
+
+ info = dma->outinfo;
+ xilinx_xdma_set_mode(dma->chan_tx, mode);
+ xilinx_xdma_v4l2_config(dma->chan_tx, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
+
+ ctx->xt.frame_size = info->num_planes;
+ ctx->sgl[0].size = (src_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ ctx->sgl[0].icg = bpl - ctx->sgl[0].size;
+ ctx->xt.numf = src_height;
+
+ /*
+ * src_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+ ctx->sgl[0].dst_icg = 0;
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ ctx->sgl[0].src_icg = 0;
+ if (dma->crop)
+ ctx->sgl[0].src_icg = bpl *
+ (pix_mp->height - src_height);
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (info->buffers == 2) {
+ dma_addr_t chroma_out =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
+ luma_size = bpl * ctx->xt.numf;
+ if (chroma_out > p_in)
+ ctx->sgl[0].src_icg = chroma_out - p_in -
+ luma_size;
+ }
+ }
+
+ desc = dmaengine_prep_interleaved_dma(dma->chan_tx, &ctx->xt, flags);
+ if (!desc) {
+ dev_err(xdev->dev, "Failed to prepare DMA tx transfer\n");
+ return;
+ }
+
+ desc->callback = xvip_m2m_dma_callback_mem2dev;
+ desc->callback_param = ctx;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan_tx);
+}
+
+/**
+ * xvip_m2m_device_run - prepares and starts the device
+ *
+ * @priv: Instance private data
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void xvip_m2m_device_run(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Prepare and submit mem2dev transaction */
+ xvip_m2m_prep_submit_mem2dev_desc(ctx, src_buf);
+
+ /* Prepare and submit dev2mem transaction */
+ xvip_m2m_prep_submit_dev2mem_desc(ctx, dst_buf);
+}
+
+static const struct v4l2_file_operations xvip_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = xvip_m2m_open,
+ .release = xvip_m2m_release,
+ .poll = xvip_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = xvip_m2m_mmap,
+};
+
+static struct video_device xvip_m2m_videodev = {
+ .name = XVIP_M2M_NAME,
+ .fops = &xvip_m2m_fops,
+ .ioctl_ops = &xvip_m2m_ioctl_ops,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+ .vfl_type = VFL_TYPE_GRABBER,
+};
+
+static const struct v4l2_m2m_ops xvip_m2m_ops = {
+ .device_run = xvip_m2m_device_run,
+ .job_ready = xvip_m2m_job_ready,
+ .job_abort = xvip_m2m_job_abort,
+};
+
+static int xvip_m2m_dma_init(struct xvip_m2m_dma *dma)
+{
+ struct xvip_m2m_dev *xdev;
+ struct v4l2_pix_format_mplane *pix_mp;
+ int ret;
+
+ xdev = dma->xdev;
+ mutex_init(&xdev->lock);
+ mutex_init(&dma->pipe.lock);
+ spin_lock_init(&xdev->queued_lock);
+
+ /* Format info on capture port - NV12 is the default format */
+ dma->capinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ pix_mp->pixelformat = dma->capinfo->fourcc;
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_M2M_DEF_WIDTH;
+ pix_mp->height = XVIP_M2M_DEF_HEIGHT;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
+ dma->capinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
+ pix_mp->height * dma->capinfo->bpp, 8);
+
+ /* Format info on output port - NV12 is the default format */
+ dma->outinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ pix_mp->pixelformat = dma->outinfo->fourcc;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_M2M_DEF_WIDTH;
+ pix_mp->height = XVIP_M2M_DEF_HEIGHT;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
+ dma->outinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
+ pix_mp->height * dma->outinfo->bpp, 8);
+
+ /* DMA channels for mem2mem */
+ dma->chan_tx = dma_request_chan(xdev->dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+ ret = PTR_ERR(dma->chan_tx);
+ if (ret != -EPROBE_DEFER)
+ dev_err(xdev->dev, "mem2mem DMA tx channel not found");
+
+ return ret;
+ }
+
+ dma->chan_rx = dma_request_chan(xdev->dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
+ ret = PTR_ERR(dma->chan_rx);
+ if (ret != -EPROBE_DEFER)
+ dev_err(xdev->dev, "mem2mem DMA rx channel not found");
+
+ goto tx;
+ }
+
+ dma->align = BIT(dma->chan_tx->device->copy_align);
+
+ /* Video node */
+ dma->video = xvip_m2m_videodev;
+ dma->video.v4l2_dev = &xdev->v4l2_dev;
+ dma->video.lock = &xdev->lock;
+
+ dma->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ dma->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&dma->video.entity, 2, dma->pads);
+ if (ret < 0)
+ goto error;
+
+ ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(xdev->dev, "Failed to register mem2mem video device\n");
+ goto tx_rx;
+ }
+
+ video_set_drvdata(&dma->video, dma->xdev);
+ return 0;
+
+tx_rx:
+ dma_release_channel(dma->chan_rx);
+tx:
+ dma_release_channel(dma->chan_tx);
+error:
+ return ret;
+}
+
+static void xvip_m2m_dma_deinit(struct xvip_m2m_dma *dma)
+{
+ if (video_is_registered(&dma->video))
+ video_unregister_device(&dma->video);
+
+ mutex_destroy(&dma->pipe.lock);
+ mutex_destroy(&dma->xdev->lock);
+ dma_release_channel(dma->chan_tx);
+ dma_release_channel(dma->chan_rx);
+}
+
+static int xvip_m2m_dma_alloc_init(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_m2m_dma *dma = NULL;
+ int ret;
+
+ dma = devm_kzalloc(xdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->xdev = xdev;
+ xdev->dma = dma;
+
+ ret = xvip_m2m_dma_init(xdev->dma);
+ if (ret) {
+ dev_err(xdev->dev, "DMA initialization failed\n");
+ return ret;
+ }
+
+ xdev->v4l2_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+static void xvip_composite_v4l2_cleanup(struct xvip_m2m_dev *xdev)
+{
+ v4l2_device_unregister(&xdev->v4l2_dev);
+ media_device_unregister(&xdev->media_dev);
+ media_device_cleanup(&xdev->media_dev);
+}
+
+static int xvip_composite_v4l2_init(struct xvip_m2m_dev *xdev)
+{
+ int ret;
+
+ xdev->media_dev.dev = xdev->dev;
+ strlcpy(xdev->media_dev.model, "Xilinx Videoi M2M Composite Device",
+ sizeof(xdev->media_dev.model));
+ xdev->media_dev.hw_revision = 0;
+
+ media_device_init(&xdev->media_dev);
+
+ xdev->v4l2_dev.mdev = &xdev->media_dev;
+ ret = v4l2_device_register(xdev->dev, &xdev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ media_device_cleanup(&xdev->media_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct xvip_graph_entity *
+xvip_graph_find_entity(struct xvip_m2m_dev *xdev,
+ const struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node == node)
+ return entity;
+ }
+
+ return NULL;
+}
+
+static int xvip_graph_build_one(struct xvip_m2m_dev *xdev,
+ struct xvip_graph_entity *entity)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct media_entity *local = entity->entity;
+ struct media_entity *remote;
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_fwnode_link link;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for entity %s\n", local->name);
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ next = of_graph_get_next_endpoint(entity->node, ep);
+ if (!next)
+ break;
+
+ ep = next;
+
+ dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
+
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %pOF\n",
+ ep);
+ continue;
+ }
+
+ /* Skip sink ports, they will be processed from the other end of
+ * the link.
+ */
+ if (link.local_port >= local->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u for %pOF\n",
+ link.local_port,
+ to_of_node(link.local_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ local_pad = &local->pads[link.local_port];
+
+ if (local_pad->flags & MEDIA_PAD_FL_SINK) {
+ dev_dbg(xdev->dev, "skipping sink port %pOF:%u\n",
+ to_of_node(link.local_node),
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* Skip DMA engines, they will be processed separately. */
+ if (link.remote_node == of_fwnode_handle(xdev->dev->of_node)) {
+ dev_dbg(xdev->dev, "skipping DMA port %pOF:%u\n",
+ to_of_node(link.local_node),
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
+ if (!ent) {
+ dev_err(xdev->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ remote = ent->entity;
+
+ if (link.remote_port >= remote->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %pOF\n",
+ link.remote_port, to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ remote_pad = &remote->pads[link.remote_port];
+
+ v4l2_fwnode_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+
+ ret = media_create_pad_link(local, local_pad->index,
+ remote, remote_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int xvip_graph_parse_one(struct xvip_m2m_dev *xdev,
+ struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+ struct device_node *remote;
+ struct device_node *ep = NULL;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "parsing node %pOF\n", node);
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ dev_dbg(xdev->dev, "handling endpoint %pOF %s\n",
+ ep, ep->name);
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ ret = -EINVAL;
+ break;
+ }
+ dev_dbg(xdev->dev, "Remote endpoint %pOF %s\n",
+ remote, remote->name);
+
+ /* Skip entities that we have already processed. */
+ if (remote == xdev->dev->of_node ||
+ xvip_graph_find_entity(xdev, remote)) {
+ of_node_put(remote);
+ continue;
+ }
+
+ entity = devm_kzalloc(xdev->dev, sizeof(*entity), GFP_KERNEL);
+ if (!entity) {
+ of_node_put(remote);
+ ret = -ENOMEM;
+ break;
+ }
+
+ entity->node = remote;
+ entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ entity->asd.match.fwnode = of_fwnode_handle(remote);
+ list_add_tail(&entity->list, &xdev->entities);
+ xdev->num_subdevs++;
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static int xvip_graph_parse(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ /*
+ * Walk the links to parse the full graph. Start by parsing the
+ * composite node and then parse entities in turn. The list_for_each
+ * loop will handle entities added at the end of the list while walking
+ * the links.
+ */
+ ret = xvip_graph_parse_one(xdev, xdev->dev->of_node);
+ if (ret < 0)
+ return 0;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_parse_one(xdev, entity->node);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int xvip_graph_build_dma(struct xvip_m2m_dev *xdev)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct device_node *node = xdev->dev->of_node;
+ struct media_entity *source;
+ struct media_entity *sink;
+ struct media_pad *source_pad;
+ struct media_pad *sink_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_fwnode_link link;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ struct xvip_m2m_dma *dma = xdev->dma;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for DMA engines\n");
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ next = of_graph_get_next_endpoint(node, ep);
+ if (!next)
+ break;
+
+ ep = next;
+
+ dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
+
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %pOF\n",
+ ep);
+ continue;
+ }
+
+ dev_dbg(xdev->dev, "creating link for DMA engine %s\n",
+ dma->video.name);
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
+ if (!ent) {
+ dev_err(xdev->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+ if (link.remote_port >= ent->entity->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %pOF\n",
+ link.remote_port,
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev_dbg(xdev->dev, "Entity %s %s\n", ent->node->name,
+ ent->node->full_name);
+ dev_dbg(xdev->dev, "port number %u on %pOF\n",
+ link.remote_port, to_of_node(link.remote_node));
+ dev_dbg(xdev->dev, "local port number %u on %pOF\n",
+ link.local_port, to_of_node(link.local_node));
+
+ if (link.local_port == XVIP_PAD_SOURCE) {
+ source = &dma->video.entity;
+ source_pad = &dma->pads[XVIP_PAD_SOURCE];
+ sink = ent->entity;
+ sink_pad = &sink->pads[XVIP_PAD_SINK];
+
+ } else {
+ source = ent->entity;
+ source_pad = &source->pads[XVIP_PAD_SOURCE];
+ sink = &dma->video.entity;
+ sink_pad = &dma->pads[XVIP_PAD_SINK];
+ }
+
+ v4l2_fwnode_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+
+ ret = media_create_pad_link(source, source_pad->index,
+ sink, sink_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct xvip_m2m_dev *xdev =
+ container_of(notifier, struct xvip_m2m_dev, notifier);
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ dev_dbg(xdev->dev, "notify complete, all subdevs registered\n");
+
+ /* Create links for every entity. */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_build_one(xdev, entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Create links for DMA channels. */
+ ret = xvip_graph_build_dma(xdev);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&xdev->v4l2_dev);
+ if (ret < 0)
+ dev_err(xdev->dev, "failed to register subdev nodes\n");
+
+ return media_device_register(&xdev->media_dev);
+}
+
+static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct xvip_m2m_dev *xdev =
+ container_of(notifier, struct xvip_m2m_dev, notifier);
+ struct xvip_graph_entity *entity;
+
+ /* Locate the entity corresponding to the bound subdev and store the
+ * subdev pointer.
+ */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node != subdev->dev->of_node)
+ continue;
+
+ if (entity->subdev) {
+ dev_err(xdev->dev, "duplicate subdev for node %pOF\n",
+ entity->node);
+ return -EINVAL;
+ }
+
+ dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name);
+ entity->entity = &subdev->entity;
+ entity->subdev = subdev;
+ return 0;
+ }
+
+ dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name);
+ return -EINVAL;
+}
+
+static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
+ .bound = xvip_graph_notify_bound,
+ .complete = xvip_graph_notify_complete,
+};
+
+static void xvip_graph_cleanup(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entityp;
+ struct xvip_graph_entity *entity;
+
+ v4l2_async_notifier_unregister(&xdev->notifier);
+
+ list_for_each_entry_safe(entity, entityp, &xdev->entities, list) {
+ of_node_put(entity->node);
+ list_del(&entity->list);
+ }
+}
+
+static int xvip_graph_init(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entity;
+ struct v4l2_async_subdev **subdevs = NULL;
+ unsigned int num_subdevs;
+ int ret;
+
+ /* Init the DMA channels. */
+ ret = xvip_m2m_dma_alloc_init(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "DMA initialization failed\n");
+ goto done;
+ }
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = xvip_graph_parse(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "graph parsing failed\n");
+ goto done;
+ }
+ dev_dbg(xdev->dev, "Number of subdev = %d\n", xdev->num_subdevs);
+
+ if (!xdev->num_subdevs) {
+ dev_err(xdev->dev, "no subdev found in graph\n");
+ goto done;
+ }
+
+ /* Register the subdevices notifier. */
+ num_subdevs = xdev->num_subdevs;
+ subdevs = devm_kzalloc(xdev->dev, sizeof(*subdevs) * num_subdevs,
+ GFP_KERNEL);
+ if (!subdevs) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ v4l2_async_notifier_init(&xdev->notifier);
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = v4l2_async_notifier_add_subdev(&xdev->notifier, &entity->asd);
+ if (ret)
+ goto done;
+ xdev->notifier.ops = &xvip_graph_notify_ops;
+ }
+
+ ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
+ if (ret < 0) {
+ dev_err(xdev->dev, "notifier registration failed\n");
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ if (ret < 0)
+ xvip_graph_cleanup(xdev);
+
+ return ret;
+}
+
+static int xvip_composite_remove(struct platform_device *pdev)
+{
+ struct xvip_m2m_dev *xdev = platform_get_drvdata(pdev);
+
+ xvip_graph_cleanup(xdev);
+ xvip_composite_v4l2_cleanup(xdev);
+
+ return 0;
+}
+
+static int xvip_m2m_probe(struct platform_device *pdev)
+{
+ struct xvip_m2m_dev *xdev = NULL;
+ int ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&xdev->entities);
+
+ ret = xvip_composite_v4l2_init(xdev);
+ if (ret)
+ return -EINVAL;
+
+ ret = xvip_graph_init(xdev);
+ if (ret < 0)
+ goto error;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
+ goto dma_cleanup;
+ }
+
+ platform_set_drvdata(pdev, xdev);
+
+ xdev->m2m_dev = v4l2_m2m_init(&xvip_m2m_ops);
+ if (IS_ERR(xdev->m2m_dev)) {
+ dev_err(xdev->dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(xdev->m2m_dev);
+ goto dma_cleanup;
+ }
+
+ dev_info(xdev->dev, "mem2mem device registered\n");
+ return 0;
+
+dma_cleanup:
+ xvip_m2m_dma_deinit(xdev->dma);
+
+error:
+ v4l2_device_unregister(&xdev->v4l2_dev);
+ return ret;
+}
+
+static int xvip_m2m_remove(struct platform_device *pdev)
+{
+ xvip_composite_remove(pdev);
+ return 0;
+}
+
+static const struct of_device_id xvip_m2m_of_id_table[] = {
+ { .compatible = "xlnx,mem2mem" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvip_m2m_of_id_table);
+
+static struct platform_driver xvip_m2m_driver = {
+ .driver = {
+ .name = XVIP_M2M_NAME,
+ .of_match_table = xvip_m2m_of_id_table,
+ },
+ .probe = xvip_m2m_probe,
+ .remove = xvip_m2m_remove,
+};
+
+module_platform_driver(xvip_m2m_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx V4L2 mem2mem driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h b/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h
new file mode 100644
index 000000000000..65a3482aa249
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Memory-to-Memory Video Multi-Scaler IP
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Suresh Gupta <sureshg@xilinx.com>
+ *
+ * The file contains the coefficients used by the Xilinx
+ * Video Multi Scaler Controller driver (xm2msc)
+ *
+ */
+
+#define XSCALER_MAX_PHASES (64)
+#define XSCALER_MAX_TAPS (12)
+
+#define XSCALER_TAPS_6 (6)
+#define XSCALER_TAPS_8 (8)
+#define XSCALER_TAPS_10 (10)
+#define XSCALER_TAPS_12 (12)
+
+/* Filter bank ID for various filter tap configurations */
+enum xm2mvsc_filter_bank_id {
+ FILTER_BANK_TAPS_6 = 0,
+ FILTER_BANK_TAPS_8,
+ FILTER_BANK_TAPS_10,
+ FILTER_BANK_TAPS_12,
+};
+
+/* H-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const short
+xhsc_coeff_taps6[XSCALER_MAX_PHASES][XSCALER_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const short
+xhsc_coeff_taps8[XSCALER_MAX_PHASES][XSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const short
+xhsc_coeff_taps10[XSCALER_MAX_PHASES][XSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const short
+xhsc_coeff_taps12[XSCALER_MAX_PHASES][XSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+/* V-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const short
+xvsc_coeff_taps6[XSCALER_MAX_PHASES][XSCALER_TAPS_6] = {
+ {-132, 236, 3824, 236, -132, 64, },
+ {-116, 184, 3816, 292, -144, 64, },
+ {-100, 132, 3812, 348, -160, 64, },
+ {-88, 84, 3808, 404, -176, 64, },
+ {-72, 36, 3796, 464, -192, 64, },
+ {-60, -8, 3780, 524, -208, 68, },
+ {-48, -52, 3768, 588, -228, 68, },
+ {-32, -96, 3748, 652, -244, 68, },
+ {-20, -136, 3724, 716, -260, 72, },
+ {-8, -172, 3696, 784, -276, 72, },
+ {0, -208, 3676, 848, -292, 72, },
+ {12, -244, 3640, 920, -308, 76, },
+ {20, -276, 3612, 988, -324, 76, },
+ {32, -304, 3568, 1060, -340, 80, },
+ {40, -332, 3532, 1132, -356, 80, },
+ {48, -360, 3492, 1204, -372, 84, },
+ {56, -384, 3448, 1276, -388, 88, },
+ {64, -408, 3404, 1352, -404, 88, },
+ {72, -428, 3348, 1428, -416, 92, },
+ {76, -448, 3308, 1500, -432, 92, },
+ {84, -464, 3248, 1576, -444, 96, },
+ {88, -480, 3200, 1652, -460, 96, },
+ {92, -492, 3140, 1728, -472, 100, },
+ {96, -504, 3080, 1804, -484, 104, },
+ {100, -516, 3020, 1880, -492, 104, },
+ {104, -524, 2956, 1960, -504, 104, },
+ {104, -532, 2892, 2036, -512, 108, },
+ {108, -540, 2832, 2108, -520, 108, },
+ {108, -544, 2764, 2184, -528, 112, },
+ {112, -544, 2688, 2260, -532, 112, },
+ {112, -548, 2624, 2336, -540, 112, },
+ {112, -548, 2556, 2408, -544, 112, },
+ {112, -544, 2480, 2480, -544, 112, },
+ {112, -544, 2408, 2556, -548, 112, },
+ {112, -540, 2336, 2624, -548, 112, },
+ {112, -532, 2260, 2688, -544, 112, },
+ {112, -528, 2184, 2764, -544, 108, },
+ {108, -520, 2108, 2832, -540, 108, },
+ {108, -512, 2036, 2892, -532, 104, },
+ {104, -504, 1960, 2956, -524, 104, },
+ {104, -492, 1880, 3020, -516, 100, },
+ {104, -484, 1804, 3080, -504, 96, },
+ {100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const short
+xvsc_coeff_taps8[XSCALER_MAX_PHASES][XSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const short
+xvsc_coeff_taps10[XSCALER_MAX_PHASES][XSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const short
+xvsc_coeff_taps12[XSCALER_MAX_PHASES][XSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
diff --git a/drivers/media/platform/xilinx/xilinx-multi-scaler.c b/drivers/media/platform/xilinx/xilinx-multi-scaler.c
new file mode 100644
index 000000000000..fe53a1d193f9
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-multi-scaler.c
@@ -0,0 +1,2449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Memory-to-Memory Video Multi-Scaler IP
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Suresh Gupta <suresh.gupta@xilinx.com>
+ *
+ * Based on the virtual v4l2-mem2mem example device
+ *
+ * This driver adds support to control the Xilinx Video Multi
+ * Scaler Controller
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "xilinx-multi-scaler-coeff.h"
+
+/* 0x0000 : Control signals */
+#define XM2MSC_AP_CTRL 0x0000
+#define XM2MSC_AP_CTRL_START BIT(0)
+#define XM2MSC_AP_CTRL_DONE BIT(1)
+#define XM2MSC_AP_CTRL_IDEL BIT(2)
+#define XM2MSC_AP_CTRL_READY BIT(3)
+#define XM2MSC_AP_CTRL_AUTO_RESTART BIT(7)
+
+/* 0x0004 : Global Interrupt Enable Register */
+#define XM2MSC_GIE 0x0004
+#define XM2MSC_GIE_EN BIT(0)
+
+/* 0x0008 : IP Interrupt Enable Register (Read/Write) */
+#define XM2MSC_IER 0x0008
+#define XM2MSC_ISR 0x000c
+#define XM2MSC_ISR_DONE BIT(0)
+#define XM2MSC_ISR_READY BIT(1)
+
+#define XM2MSC_NUM_OUTS 0x0010
+
+#define XM2MSC_WIDTHIN 0x000
+#define XM2MSC_WIDTHOUT 0x008
+#define XM2MSC_HEIGHTIN 0x010
+#define XM2MSC_HEIGHTOUT 0x018
+#define XM2MSC_LINERATE 0x020
+#define XM2MSC_PIXELRATE 0x028
+#define XM2MSC_INPIXELFMT 0x030
+#define XM2MSC_OUTPIXELFMT 0x038
+#define XM2MSC_INSTRIDE 0x050
+#define XM2MSC_OUTSTRIDE 0x058
+#define XM2MSC_SRCIMGBUF0 0x060
+#define XM2MSC_SRCIMGBUF1 0x070
+#define XM2MSC_DSTIMGBUF0 0x090
+#define XM2MSC_DSTIMGBUF1 0x0100
+
+#define XM2MVSC_VFLTCOEFF_L 0x2000
+#define XM2MVSC_VFLTCOEFF(x) (XM2MVSC_VFLTCOEFF_L + 0x2000 * (x))
+#define XM2MVSC_HFLTCOEFF_L 0x2800
+#define XM2MVSC_HFLTCOEFF(x) (XM2MVSC_HFLTCOEFF_L + 0x2000 * (x))
+
+#define XM2MSC_CHAN_REGS_START(x) (0x100 + 0x200 * (x))
+
+/*
+ * IP has reserved area between XM2MSC_DSTIMGBUF0 and
+ * XM2MSC_DSTIMGBUF1 registers of channel 4
+ */
+#define XM2MSC_RESERVED_AREA 0x600
+
+/* GPIO RESET MACROS */
+#define XM2MSC_RESET_ASSERT (0x1)
+#define XM2MSC_RESET_DEASSERT (0x0)
+
+#define XM2MSC_MIN_CHAN 1
+#define XM2MSC_MAX_CHAN 8
+
+#define XM2MSC_MAX_WIDTH (8192)
+#define XM2MSC_MAX_HEIGHT (4320)
+#define XM2MSC_MIN_WIDTH (64)
+#define XM2MSC_MIN_HEIGHT (64)
+#define XM2MSC_STEP_PRECISION (65536)
+/* Mask definitions for Low 16 bits in a 32 bit number */
+#define XM2MSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XM2MSC_BITSHIFT_16 (16)
+
+#define XM2MSC_DRIVER_NAME "xm2msc"
+
+#define CHAN_ATTACHED BIT(0)
+#define CHAN_OPENED BIT(1)
+
+#define XM2MSC_CHAN_OUT 0
+#define XM2MSC_CHAN_CAP 1
+
+#define NUM_STREAM(_x) \
+ ({ typeof(_x) (x) = (_x); \
+ min(ffz(x->out_streamed_chan), \
+ ffz(x->cap_streamed_chan)); })
+
+#define XM2MSC_ALIGN_MUL 8
+
+/*
+ * These are temporary variables. Once the stride and height
+ * alignment support added to plugin, these variables will
+ * be remove.
+ */
+static unsigned int output_stride_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(output_stride_align, uint, NULL, 0644);
+MODULE_PARM_DESC(output_stride_align,
+ "Per Cahnnel stride alignment requied at output.");
+
+static unsigned int capture_stride_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(capture_stride_align, uint, NULL, 0644);
+MODULE_PARM_DESC(capture_stride_align,
+ "Per channel stride alignment requied at capture.");
+
+static unsigned int output_height_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(output_height_align, uint, NULL, 0644);
+MODULE_PARM_DESC(output_height_align,
+ "Per Channel height alignment requied at output.");
+
+static unsigned int capture_height_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(capture_height_align, uint, NULL, 0644);
+MODULE_PARM_DESC(capture_height_align,
+ "Per channel height alignment requied at capture.");
+
+/* Xilinx Video Specific Color/Pixel Formats */
+enum xm2msc_pix_fmt {
+ XILINX_M2MSC_FMT_RGBX8 = 10,
+ XILINX_M2MSC_FMT_YUVX8 = 11,
+ XILINX_M2MSC_FMT_YUYV8 = 12,
+ XILINX_M2MSC_FMT_RGBX10 = 15,
+ XILINX_M2MSC_FMT_YUVX10 = 16,
+ XILINX_M2MSC_FMT_Y_UV8 = 18,
+ XILINX_M2MSC_FMT_Y_UV8_420 = 19,
+ XILINX_M2MSC_FMT_RGB8 = 20,
+ XILINX_M2MSC_FMT_YUV8 = 21,
+ XILINX_M2MSC_FMT_Y_UV10 = 22,
+ XILINX_M2MSC_FMT_Y_UV10_420 = 23,
+ XILINX_M2MSC_FMT_Y8 = 24,
+ XILINX_M2MSC_FMT_Y10 = 25,
+ XILINX_M2MSC_FMT_BGRX8 = 27,
+ XILINX_M2MSC_FMT_UYVY8 = 28,
+ XILINX_M2MSC_FMT_BGR8 = 29,
+};
+
+/**
+ * struct xm2msc_fmt - driver info for each of the supported video formats
+ * @name: human-readable device tree name for this entry
+ * @fourcc: standard format identifier
+ * @xm2msc_fmt: Xilinx Video Specific Color/Pixel Formats
+ * @num_buffs: number of physically non-contiguous data planes/buffs
+ */
+struct xm2msc_fmt {
+ char *name;
+ u32 fourcc;
+ enum xm2msc_pix_fmt xm2msc_fmt;
+ u32 num_buffs;
+};
+
+static const struct xm2msc_fmt formats[] = {
+ {
+ .name = "xbgr8888",
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xvuy8888",
+ .fourcc = V4L2_PIX_FMT_XVUY32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "yuyv",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUYV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xbgr2101010",
+ .fourcc = V4L2_PIX_FMT_XBGR30,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "yuvx2101010",
+ .fourcc = V4L2_PIX_FMT_XVUY10,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "nv16",
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
+ .num_buffs = 2,
+ },
+ {
+ .name = "nv16",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "nv12",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
+ .num_buffs = 2,
+ },
+ {
+ .name = "nv12",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
+ .num_buffs = 1,
+ },
+ {
+ .name = "bgr888",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGB8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "vuy888",
+ .fourcc = V4L2_PIX_FMT_VUY24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xv20",
+ .fourcc = V4L2_PIX_FMT_XV20M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
+ .num_buffs = 2,
+ },
+ {
+ .name = "xv20",
+ .fourcc = V4L2_PIX_FMT_XV20,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xv15",
+ .fourcc = V4L2_PIX_FMT_XV15M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
+ .num_buffs = 2,
+ },
+ {
+ .name = "xv15",
+ .fourcc = V4L2_PIX_FMT_XV15,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
+ .num_buffs = 1,
+ },
+ {
+ .name = "y8",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "y10",
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xrgb8888",
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_BGRX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "uyvy",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_UYVY8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "rgb888",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_BGR8,
+ .num_buffs = 1,
+ },
+};
+
+/**
+ * struct xm2msc_q_data - Per-queue, driver-specific private data
+ * There is one source queue and one destination queue for each m2m context.
+ * @width: frame width
+ * @height: frame height
+ * @stride: bytes per lines
+ * @nbuffs: Current number of buffs
+ * @bytesperline: bytes per line per plane
+ * @sizeimage: image size per plane
+ * @colorspace: supported colorspace
+ * @field: supported field value
+ * @fmt: format info
+ */
+struct xm2msc_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int nbuffs;
+ unsigned int bytesperline[2];
+ unsigned int sizeimage[2];
+ enum v4l2_colorspace colorspace;
+ enum v4l2_field field;
+ const struct xm2msc_fmt *fmt;
+};
+
+/**
+ * struct xm2msc_chan_ctx - Scaler Channel Info, Per-Channel context
+ * @regs: IO mapped base address of the Channel
+ * @xm2msc_dev: Pointer to struct xm2m_msc_dev
+ * @num: HW Scaling Channel number
+ * @minor: Minor number of the video device
+ * @output_stride_align: required align stride value at output pad
+ * @capture_stride_align: required align stride valure at capture pad
+ * @output_height_align: required align height value at output pad
+ * @capture_height_align: required align heigh value at capture pad
+ * @status: channel status, CHAN_ATTACHED or CHAN_OPENED
+ * @frames: number of frames processed
+ * @vfd: V4L2 device
+ * @fh: v4l2 file handle
+ * @m2m_dev: m2m device
+ * @m2m_ctx: memory to memory context structure
+ * @q_data: src & dst queue data
+ */
+struct xm2msc_chan_ctx {
+ void __iomem *regs;
+ struct xm2m_msc_dev *xm2msc_dev;
+ u32 num;
+ u32 minor;
+ u32 output_stride_align;
+ u32 capture_stride_align;
+ u32 output_height_align;
+ u32 capture_height_align;
+ u8 status;
+ unsigned long frames;
+
+ struct video_device vfd;
+ struct v4l2_fh fh;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+
+ struct xm2msc_q_data q_data[2];
+};
+
+/**
+ * struct xm2m_msc_dev - Xilinx M2M Multi-scaler Device
+ * @dev: pointer to struct device instance used by the driver
+ * @regs: IO mapped base address of the HW/IP
+ * @irq: interrupt number
+ * @clk: video core clock
+ * @max_chan: maximum number of Scaling Channels
+ * @max_ht: maximum number of rows in a plane
+ * @max_wd: maximum number of column in a plane
+ * @taps: number of taps set in HW
+ * @supported_fmt: bitmap for all supported fmts by HW
+ * @dma_addr_size: Size of dma address pointer in IP (either 32 or 64)
+ * @ppc: Pixels per clock set in IP (1, 2 or 4)
+ * @rst_gpio: reset gpio handler
+ * @opened_chan: bitmap for all open channel
+ * @out_streamed_chan: bitmap for all out streamed channel
+ * @cap_streamed_chan: bitmap for all capture streamed channel
+ * @running_chan: currently running channels
+ * @device_busy: HW device is busy or not
+ * @isr_wait: flag to follow the ISR complete or not
+ * @isr_finished: Wait queue used to wait for IP to complete processing
+ * @v4l2_dev: main struct to for V4L2 device drivers
+ * @dev_mutex: lock for V4L2 device
+ * @mutex: lock for channel ctx
+ * @lock: lock used in IRQ
+ * @xm2msc_chan: arrey of channel context
+ * @hscaler_coeff: Array of filter coefficients for the Horizontal Scaler
+ * @vscaler_coeff: Array of filter coefficients for the Vertical Scaler
+ */
+struct xm2m_msc_dev {
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ u32 max_chan;
+ u32 max_ht;
+ u32 max_wd;
+ u32 taps;
+ u32 supported_fmt;
+ u32 dma_addr_size;
+ u8 ppc;
+ struct gpio_desc *rst_gpio;
+
+ u32 opened_chan;
+ u32 out_streamed_chan;
+ u32 cap_streamed_chan;
+ u32 running_chan;
+ bool device_busy;
+ bool isr_wait;
+ wait_queue_head_t isr_finished;
+
+ struct v4l2_device v4l2_dev;
+
+ struct mutex dev_mutex; /*the mutex for v4l2*/
+ struct mutex mutex; /*lock for bitmap reg*/
+ spinlock_t lock; /*IRQ lock*/
+
+ struct xm2msc_chan_ctx xm2msc_chan[XM2MSC_MAX_CHAN];
+ short hscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
+ short vscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
+};
+
+#define fh_to_chanctx(__fh) container_of(__fh, struct xm2msc_chan_ctx, fh)
+
+static inline u32 xm2msc_readreg(const void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void xm2msc_write64reg(void __iomem *addr, u64 value)
+{
+ iowrite32(lower_32_bits(value), addr);
+ iowrite32(upper_32_bits(value), (void __iomem *)(addr + 4));
+}
+
+static inline void xm2msc_writereg(void __iomem *addr, u32 value)
+{
+ iowrite32(value, addr);
+}
+
+static bool xm2msc_is_yuv_singlebuff(u32 fourcc)
+{
+ if (fourcc == V4L2_PIX_FMT_NV12 || fourcc == V4L2_PIX_FMT_XV15 ||
+ fourcc == V4L2_PIX_FMT_NV16 || fourcc == V4L2_PIX_FMT_XV20)
+ return true;
+
+ return false;
+}
+
+static inline u32 xm2msc_yuv_1stplane_size(struct xm2msc_q_data *q_data,
+ u32 row_align)
+{
+ return q_data->bytesperline[0] * ALIGN(q_data->height, row_align);
+}
+
+static struct xm2msc_q_data *get_q_data(struct xm2msc_chan_ctx *chan_ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ default:
+ v4l2_err(&chan_ctx->xm2msc_dev->v4l2_dev,
+ "Not supported Q type %d\n", type);
+ }
+ return NULL;
+}
+
+static u32 find_format_index(struct v4l2_format *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ fmt = &formats[i];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ break;
+ }
+
+ return i;
+}
+
+static const struct xm2msc_fmt *find_format(struct v4l2_format *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ fmt = &formats[i];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ return NULL;
+
+ return &formats[i];
+}
+
+static void
+xm2msc_hscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XSCALER_MAX_TAPS - ntaps;
+ offset = pad >> 1;
+
+ memset(xm2msc->hscaler_coeff, 0, sizeof(xm2msc->hscaler_coeff));
+
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xm2msc->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+}
+
+static void xm2msc_hscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
+ const u32 base_addr)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int val, offset, rd_indx;
+ unsigned int i, j;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ offset = (XSCALER_MAX_TAPS - ntaps) / 2;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xm2msc->hscaler_coeff[i][rd_indx + 1] <<
+ XM2MSC_BITSHIFT_16) |
+ (xm2msc->hscaler_coeff[i][rd_indx] &
+ XM2MSC_MASK_LOW_16BITS);
+ xm2msc_writereg((xm2msc->regs + base_addr) +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void
+xm2msc_vscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
+ const short *coeff, const u32 ntaps)
+{
+ unsigned int i, j;
+ int pad, offset;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XSCALER_MAX_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+
+ /* Zero Entire Array */
+ memset(xm2msc->vscaler_coeff, 0, sizeof(xm2msc->vscaler_coeff));
+
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xm2msc->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+}
+
+static void
+xm2msc_vscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
+ const u32 base_addr)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ u32 val, i, j, offset, rd_indx;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ offset = (XSCALER_MAX_TAPS - ntaps) / 2;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xm2msc->vscaler_coeff[i][rd_indx + 1] <<
+ XM2MSC_BITSHIFT_16) |
+ (xm2msc->vscaler_coeff[i][rd_indx] &
+ XM2MSC_MASK_LOW_16BITS);
+ xm2msc_writereg((xm2msc->regs +
+ base_addr) + ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static u32
+xm2msc_select_hcoeff(struct xm2msc_chan_ctx *chan_ctx, const short **coeff)
+{
+ u16 hscale_ratio;
+ u32 width_in = chan_ctx->q_data[XM2MSC_CHAN_OUT].width;
+ u32 width_out = chan_ctx->q_data[XM2MSC_CHAN_CAP].width;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+
+ if (width_out < width_in) {
+ hscale_ratio = (width_in * 10) / width_out;
+
+ switch (chan_ctx->xm2msc_dev->taps) {
+ case XSCALER_TAPS_12:
+ if (hscale_ratio > 35) {
+ *coeff = &xhsc_coeff_taps12[0][0];
+ ntaps = XSCALER_TAPS_12;
+ } else if (hscale_ratio > 25) {
+ *coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_10:
+ if (hscale_ratio > 25) {
+ *coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_8:
+ if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ default: /* or XSCALER_TAPS_6 */
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ } else {
+ /*
+ * Scale Up Mode will always use 6 tap filter
+ * This also includes 1:1
+ */
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+
+ return ntaps;
+}
+
+static u32
+xm2msc_select_vcoeff(struct xm2msc_chan_ctx *chan_ctx, const short **coeff)
+{
+ u16 vscale_ratio;
+ u32 height_in = chan_ctx->q_data[XM2MSC_CHAN_OUT].height;
+ u32 height_out = chan_ctx->q_data[XM2MSC_CHAN_CAP].height;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+
+ if (height_out < height_in) {
+ vscale_ratio = (height_in * 10) / height_out;
+
+ switch (chan_ctx->xm2msc_dev->taps) {
+ case XSCALER_TAPS_12:
+ if (vscale_ratio > 35) {
+ *coeff = &xvsc_coeff_taps12[0][0];
+ ntaps = XSCALER_TAPS_12;
+ } else if (vscale_ratio > 25) {
+ *coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_10:
+ if (vscale_ratio > 25) {
+ *coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_8:
+ if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ default: /* or XSCALER_TAPS_6 */
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ } else {
+ /*
+ * Scale Up Mode will always use 6 tap filter
+ * This also includes 1:1
+ */
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+
+ return ntaps;
+}
+
+static void xm2mvsc_initialize_coeff_banks(struct xm2msc_chan_ctx *chan_ctx)
+{
+ const short *coeff = NULL;
+ u32 ntaps;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ ntaps = xm2msc_select_hcoeff(chan_ctx, &coeff);
+ xm2msc_hscaler_load_ext_coeff(xm2msc, coeff, ntaps);
+ xm2msc_hscaler_set_coeff(chan_ctx, XM2MVSC_HFLTCOEFF(chan_ctx->num));
+
+ dev_dbg(xm2msc->dev, "htaps %d selected for chan %d\n",
+ ntaps, chan_ctx->num);
+
+ ntaps = xm2msc_select_vcoeff(chan_ctx, &coeff);
+ xm2msc_vscaler_load_ext_coeff(xm2msc, coeff, ntaps);
+ xm2msc_vscaler_set_coeff(chan_ctx, XM2MVSC_VFLTCOEFF(chan_ctx->num));
+
+ dev_dbg(xm2msc->dev, "vtaps %d selected for chan %d\n",
+ ntaps, chan_ctx->num);
+}
+
+static void xm2msc_set_chan_params(struct xm2msc_chan_ctx *chan_ctx,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_q_data *q_data = get_q_data(chan_ctx, type);
+ const struct xm2msc_fmt *fmt = q_data->fmt;
+ void __iomem *base = chan_ctx->regs;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ xm2msc_writereg(base + XM2MSC_WIDTHIN, q_data->width);
+ xm2msc_writereg(base + XM2MSC_HEIGHTIN, q_data->height);
+ xm2msc_writereg(base + XM2MSC_INPIXELFMT, fmt->xm2msc_fmt);
+ xm2msc_writereg(base + XM2MSC_INSTRIDE, q_data->stride);
+ } else {
+ xm2msc_writereg(base + XM2MSC_WIDTHOUT, q_data->width);
+ xm2msc_writereg(base + XM2MSC_HEIGHTOUT, q_data->height);
+ xm2msc_writereg(base + XM2MSC_OUTPIXELFMT, fmt->xm2msc_fmt);
+ xm2msc_writereg(base + XM2MSC_OUTSTRIDE, q_data->stride);
+ }
+}
+
+static void xm2msc_set_chan_com_params(struct xm2msc_chan_ctx *chan_ctx)
+{
+ void __iomem *base = chan_ctx->regs;
+ struct xm2msc_q_data *out_q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ struct xm2msc_q_data *cap_q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ u32 pixel_rate;
+ u32 line_rate;
+
+ xm2mvsc_initialize_coeff_banks(chan_ctx);
+
+ pixel_rate = (out_q_data->width * XM2MSC_STEP_PRECISION) /
+ cap_q_data->width;
+ line_rate = (out_q_data->height * XM2MSC_STEP_PRECISION) /
+ cap_q_data->height;
+
+ xm2msc_writereg(base + XM2MSC_PIXELRATE, pixel_rate);
+ xm2msc_writereg(base + XM2MSC_LINERATE, line_rate);
+}
+
+static void xm2msc_program_allchan(struct xm2m_msc_dev *xm2msc)
+{
+ u32 chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ xm2msc_set_chan_params(chan_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ xm2msc_set_chan_params(chan_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ xm2msc_set_chan_com_params(chan_ctx);
+ }
+}
+
+static void
+xm2msc_pr_q(struct device *dev, struct xm2msc_q_data *q, int chan,
+ int type, const char *fun_name)
+{
+ unsigned int i;
+ const struct xm2msc_fmt *fmt = q->fmt;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dev_dbg(dev, "\n\nOUTPUT Q (%d) Context from [[ %s ]]",
+ chan, fun_name);
+ else
+ dev_dbg(dev, "\n\nCAPTURE Q (%d) Context from [[ %s ]]",
+ chan, fun_name);
+
+ dev_dbg(dev, "width height stride clrspace field planes\n");
+ dev_dbg(dev, " %d %d %d %d %d %d\n",
+ q->width, q->height, q->stride,
+ q->colorspace, q->field, q->nbuffs);
+
+ for (i = 0; i < q->nbuffs; i++) {
+ dev_dbg(dev, "[plane %d ] bytesperline sizeimage\n", i);
+ dev_dbg(dev, " %d %d\n",
+ q->bytesperline[i], q->sizeimage[i]);
+ }
+
+ dev_dbg(dev, "fmt_name 4cc xlnx-fmt\n");
+ dev_dbg(dev, "%s %d %d\n",
+ fmt->name, fmt->fourcc, fmt->xm2msc_fmt);
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_status(struct xm2m_msc_dev *xm2msc,
+ const char *fun_name)
+{
+ struct device *dev = xm2msc->dev;
+
+ dev_dbg(dev, "Status in %s\n", fun_name);
+ dev_dbg(dev, "opened_chan out_streamed_chan cap_streamed_chan\n");
+ dev_dbg(dev, "0x%x 0x%x 0x%x\n",
+ xm2msc->opened_chan, xm2msc->out_streamed_chan,
+ xm2msc->cap_streamed_chan);
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_chanctx(struct xm2msc_chan_ctx *ctx, const char *fun_name)
+{
+ struct device *dev = ctx->xm2msc_dev->dev;
+
+ dev_dbg(dev, "\n\n----- [[ %s ]]: Channel %d (0x%p) context -----\n",
+ fun_name, ctx->num, ctx);
+ dev_dbg(dev, "minor = %d\n", ctx->minor);
+ dev_dbg(dev, "reg mapped at %p\n", ctx->regs);
+ dev_dbg(dev, "xm2msc \tm2m_dev \tm2m_ctx\n");
+ dev_dbg(dev, "%p \t%p \t%p\n", ctx->xm2msc_dev,
+ ctx->m2m_dev, ctx->m2m_ctx);
+
+ if (ctx->status & CHAN_OPENED)
+ dev_dbg(dev, "Opened ");
+ if (ctx->status & CHAN_ATTACHED)
+ dev_dbg(dev, "and attached");
+ dev_dbg(dev, "\n");
+ dev_dbg(dev, "-----------------------------------\n");
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_screg(struct device *dev, const void __iomem *base)
+{
+ dev_dbg(dev, "Ctr, GIE, IE, IS OUT\n");
+ dev_dbg(dev, "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ xm2msc_readreg(base + XM2MSC_AP_CTRL),
+ xm2msc_readreg(base + XM2MSC_GIE),
+ xm2msc_readreg(base + XM2MSC_IER),
+ xm2msc_readreg(base + XM2MSC_ISR),
+ xm2msc_readreg(base + XM2MSC_NUM_OUTS));
+}
+
+static void
+xm2msc_pr_chanreg(struct device *dev, struct xm2msc_chan_ctx *chan)
+{
+ const void __iomem *base = chan->regs;
+
+ dev_dbg(dev, "WIN HIN INPIXELFMT INSTRIDE SRCB0L/H SRCB1L/H\n");
+ dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
+ xm2msc_readreg(base + XM2MSC_WIDTHIN),
+ xm2msc_readreg(base + XM2MSC_HEIGHTIN),
+ xm2msc_readreg(base + XM2MSC_INPIXELFMT),
+ xm2msc_readreg(base + XM2MSC_INSTRIDE),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF0),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF0 + 4),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF1),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF1 + 4));
+ dev_dbg(dev, "WOUT HOUT OUTPIXELFMT OUTSTRIDE DBUF0L/H DBUF1L/H\n");
+ dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
+ xm2msc_readreg(base + XM2MSC_WIDTHOUT),
+ xm2msc_readreg(base + XM2MSC_HEIGHTOUT),
+ xm2msc_readreg(base + XM2MSC_OUTPIXELFMT),
+ xm2msc_readreg(base + XM2MSC_OUTSTRIDE),
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF0),
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF0 + 4),
+ chan->num == 4 ?
+ xm2msc_readreg(base +
+ XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA) :
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF1),
+ chan->num == 4 ?
+ xm2msc_readreg(base +
+ XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA + 4) :
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF1 + 4));
+
+ dev_dbg(dev, "LINERATE PIXELRATE\n");
+ dev_dbg(dev, "0x%x 0x%x\n",
+ xm2msc_readreg(base + XM2MSC_LINERATE),
+ xm2msc_readreg(base + XM2MSC_PIXELRATE));
+}
+
+static void
+xm2msc_pr_allchanreg(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int i;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct device *dev = xm2msc->dev;
+
+ xm2msc_pr_screg(xm2msc->dev, xm2msc->regs);
+
+ for (i = 0; i < xm2msc->running_chan; i++) {
+ chan_ctx = &xm2msc->xm2msc_chan[i];
+ dev_dbg(dev, "Regs val for channel %d\n", i);
+ dev_dbg(dev, "______________________________________________\n");
+ xm2msc_pr_chanreg(dev, chan_ctx);
+ dev_dbg(dev, "processed frames = %lu\n", chan_ctx->frames);
+ dev_dbg(dev, "______________________________________________\n");
+ }
+}
+
+static inline bool xm2msc_testbit(int num, u32 *addr)
+{
+ return (*addr & BIT(num));
+}
+
+static inline void xm2msc_setbit(int num, u32 *addr)
+{
+ *addr |= BIT(num);
+}
+
+static inline void xm2msc_clrbit(int num, u32 *addr)
+{
+ *addr &= ~BIT(num);
+}
+
+static void xm2msc_stop(struct xm2m_msc_dev *xm2msc)
+{
+ void __iomem *base = xm2msc->regs;
+ u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
+
+ data &= ~XM2MSC_AP_CTRL_START;
+ xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
+}
+
+static void xm2msc_start(struct xm2m_msc_dev *xm2msc)
+{
+ void __iomem *base = xm2msc->regs;
+ u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
+
+ data |= XM2MSC_AP_CTRL_START;
+ xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
+}
+
+static void xm2msc_set_chan(struct xm2msc_chan_ctx *ctx, bool state)
+{
+ mutex_lock(&ctx->xm2msc_dev->mutex);
+ if (state)
+ xm2msc_setbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
+ else
+ xm2msc_clrbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
+ mutex_unlock(&ctx->xm2msc_dev->mutex);
+}
+
+static void
+xm2msc_set_chan_stream(struct xm2msc_chan_ctx *ctx, bool state, int type)
+{
+ u32 *ptr;
+
+ if (type == XM2MSC_CHAN_OUT)
+ ptr = &ctx->xm2msc_dev->out_streamed_chan;
+ else
+ ptr = &ctx->xm2msc_dev->cap_streamed_chan;
+
+ spin_lock(&ctx->xm2msc_dev->lock);
+ if (state)
+ xm2msc_setbit(ctx->num, ptr);
+ else
+ xm2msc_clrbit(ctx->num, ptr);
+
+ spin_unlock(&ctx->xm2msc_dev->lock);
+}
+
+static int
+xm2msc_chk_chan_stream(struct xm2msc_chan_ctx *ctx, int type)
+{
+ u32 *ptr;
+ int ret;
+
+ if (type == XM2MSC_CHAN_OUT)
+ ptr = &ctx->xm2msc_dev->out_streamed_chan;
+ else
+ ptr = &ctx->xm2msc_dev->cap_streamed_chan;
+
+ mutex_lock(&ctx->xm2msc_dev->mutex);
+ ret = xm2msc_testbit(ctx->num, ptr);
+ mutex_unlock(&ctx->xm2msc_dev->mutex);
+
+ return ret;
+}
+
+static void xm2msc_set_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
+{
+ xm2msc_setbit(index, &xm2msc->supported_fmt);
+}
+
+static int xm2msc_chk_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
+{
+ return xm2msc_testbit(index, &xm2msc->supported_fmt);
+}
+
+static void xm2msc_reset(struct xm2m_msc_dev *xm2msc)
+{
+ gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_DEASSERT);
+}
+
+/*
+ * mem2mem callbacks
+ */
+static int xm2msc_job_ready(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+
+ if ((v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) &&
+ (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0))
+ return 1;
+ return 0;
+}
+
+static bool xm2msc_alljob_ready(struct xm2m_msc_dev *xm2msc)
+{
+ struct xm2msc_chan_ctx *chan_ctx;
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ if (!xm2msc_job_ready((void *)chan_ctx)) {
+ dev_dbg(xm2msc->dev, "chan %d not ready\n",
+ chan_ctx->num);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void xm2msc_chan_abort_bufs(struct xm2msc_chan_ctx *chan_ctx)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct vb2_v4l2_buffer *dst_vb, *src_vb;
+
+ spin_lock(&xm2msc->lock);
+ dev_dbg(xm2msc->dev, "aborting all buffers\n");
+
+ while (v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) {
+ src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0) {
+ dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
+ spin_unlock(&xm2msc->lock);
+}
+
+static void xm2msc_job_abort(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+
+ xm2msc_chan_abort_bufs(chan_ctx);
+
+ /*
+ * Stream off the channel as job_abort may not always
+ * be called after streamoff
+ */
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
+}
+
+static int xm2msc_set_bufaddr(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int chan;
+ u32 row_align;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ void __iomem *base;
+ struct xm2msc_q_data *q_data;
+ dma_addr_t src_luma, dst_luma;
+ dma_addr_t src_croma, dst_croma;
+
+ if (!xm2msc_alljob_ready(xm2msc))
+ return -EINVAL;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ base = chan_ctx->regs;
+
+ src_vb = v4l2_m2m_next_src_buf(chan_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_next_dst_buf(chan_ctx->m2m_ctx);
+
+ if (!src_vb || !dst_vb) {
+ v4l2_err(&xm2msc->v4l2_dev, "buffer not found chan = %d\n",
+ chan_ctx->num);
+ return -EINVAL;
+ }
+
+ src_luma = vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 0);
+ dst_luma = vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 0);
+
+ q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ row_align = chan_ctx->output_height_align;
+ if (chan_ctx->q_data[XM2MSC_CHAN_OUT].nbuffs == 2)
+ /* fmts having 2 planes 2 buffers */
+ src_croma =
+ vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf,
+ 1);
+ else if (xm2msc_is_yuv_singlebuff(q_data->fmt->fourcc))
+ /* fmts having 2 planes 1 contiguous buffer */
+ src_croma = src_luma +
+ xm2msc_yuv_1stplane_size(q_data, row_align);
+ else /* fmts having 1 planes 1 contiguous buffer */
+ src_croma = 0;
+
+ q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ row_align = chan_ctx->capture_height_align;
+ if (chan_ctx->q_data[XM2MSC_CHAN_CAP].nbuffs == 2)
+ dst_croma =
+ vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf,
+ 1);
+ else if (xm2msc_is_yuv_singlebuff(q_data->fmt->fourcc))
+ dst_croma = dst_luma +
+ xm2msc_yuv_1stplane_size(q_data, row_align);
+ else
+ dst_croma = 0;
+
+ if (xm2msc->dma_addr_size == 64 &&
+ sizeof(dma_addr_t) == sizeof(u64)) {
+ xm2msc_write64reg(base + XM2MSC_SRCIMGBUF0, src_luma);
+ xm2msc_write64reg(base + XM2MSC_SRCIMGBUF1, src_croma);
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF0, dst_luma);
+ if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1 +
+ XM2MSC_RESERVED_AREA,
+ dst_croma);
+ else
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1,
+ dst_croma);
+ } else {
+ xm2msc_writereg(base + XM2MSC_SRCIMGBUF0, src_luma);
+ xm2msc_writereg(base + XM2MSC_SRCIMGBUF1, src_croma);
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF0, dst_luma);
+ if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF1 +
+ XM2MSC_RESERVED_AREA,
+ dst_croma);
+ else
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF1,
+ dst_croma);
+ }
+ }
+ return 0;
+}
+
+static void xm2msc_job_finish(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
+ }
+}
+
+static void xm2msc_job_done(struct xm2m_msc_dev *xm2msc)
+{
+ u32 chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ spin_lock_irqsave(&xm2msc->lock, flags);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+ }
+ chan_ctx->frames++;
+ }
+}
+
+static void xm2msc_device_run(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ void __iomem *base = xm2msc->regs;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xm2msc->lock, flags);
+ if (xm2msc->device_busy) {
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+ return;
+ }
+ xm2msc->device_busy = true;
+
+ if (xm2msc->running_chan != NUM_STREAM(xm2msc)) {
+ dev_dbg(xm2msc->dev, "Running chan was %d\n",
+ xm2msc->running_chan);
+ xm2msc->running_chan = NUM_STREAM(xm2msc);
+
+ /* IP need reset for updating of XM2MSC_NUM_OUT */
+ xm2msc_reset(xm2msc);
+ xm2msc_writereg(base + XM2MSC_NUM_OUTS, xm2msc->running_chan);
+ xm2msc_program_allchan(xm2msc);
+ }
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+
+ dev_dbg(xm2msc->dev, "Running chan = %d\n", xm2msc->running_chan);
+ if (!xm2msc->running_chan) {
+ xm2msc->device_busy = false;
+ return;
+ }
+
+ ret = xm2msc_set_bufaddr(xm2msc);
+ if (ret) {
+ /*
+ * All channel does not have buffer
+ * Currently we do not handle the removal of any Intermediate
+ * channel while streaming is going on
+ */
+ if (xm2msc->out_streamed_chan || xm2msc->cap_streamed_chan)
+ dev_err(xm2msc->dev,
+ "Buffer not available, streaming chan 0x%x\n",
+ xm2msc->cap_streamed_chan);
+
+ xm2msc->device_busy = false;
+ return;
+ }
+
+ xm2msc_writereg(base + XM2MSC_GIE, XM2MSC_GIE_EN);
+ xm2msc_writereg(base + XM2MSC_IER, XM2MSC_ISR_DONE);
+
+ xm2msc_pr_status(xm2msc, __func__);
+ xm2msc_pr_screg(xm2msc->dev, base);
+ xm2msc_pr_allchanreg(xm2msc);
+
+ xm2msc_start(xm2msc);
+
+ xm2msc->isr_wait = true;
+ wait_event(xm2msc->isr_finished, !xm2msc->isr_wait);
+
+ xm2msc_job_done(xm2msc);
+
+ xm2msc->device_busy = false;
+
+ if (xm2msc_alljob_ready(xm2msc))
+ xm2msc_device_run(xm2msc->xm2msc_chan);
+
+ xm2msc_job_finish(xm2msc);
+}
+
+static irqreturn_t xm2msc_isr(int irq, void *data)
+{
+ struct xm2m_msc_dev *xm2msc = (struct xm2m_msc_dev *)data;
+ void __iomem *base = xm2msc->regs;
+ u32 status;
+
+ status = xm2msc_readreg(base + XM2MSC_ISR);
+ if (!(status & XM2MSC_ISR_DONE))
+ return IRQ_NONE;
+
+ xm2msc_writereg(base + XM2MSC_ISR, status & XM2MSC_ISR_DONE);
+
+ xm2msc_stop(xm2msc);
+
+ xm2msc->isr_wait = false;
+ wake_up(&xm2msc->isr_finished);
+
+ return IRQ_HANDLED;
+}
+
+static int xm2msc_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_streamon(file, chan_ctx->m2m_ctx, type);
+}
+
+static int xm2msc_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+ int ret;
+
+ ret = v4l2_m2m_streamoff(file, chan_ctx->m2m_ctx, type);
+
+ /* Check if any channel is still running */
+ xm2msc_device_run(chan_ctx);
+ return ret;
+}
+
+static int xm2msc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_qbuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static int xm2msc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_dqbuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static int xm2msc_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_expbuf(file, chan_ctx->m2m_ctx, eb);
+}
+
+static int xm2msc_createbufs(struct file *file, void *fh,
+ struct v4l2_create_buffers *cb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_create_bufs(file, chan_ctx->m2m_ctx, cb);
+}
+
+static int xm2msc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_reqbufs(file, chan_ctx->m2m_ctx, reqbufs);
+}
+
+static int xm2msc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_querybuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static void
+xm2msc_cal_imagesize(struct xm2msc_chan_ctx *chan_ctx,
+ struct xm2msc_q_data *q_data, u32 type)
+{
+ unsigned int i;
+ u32 fourcc = q_data->fmt->fourcc;
+ u32 height = q_data->height;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ height = ALIGN(height, chan_ctx->output_height_align);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ height = ALIGN(height, chan_ctx->capture_height_align);
+
+ for (i = 0; i < q_data->nbuffs; i++) {
+ q_data->bytesperline[i] = q_data->stride;
+ q_data->sizeimage[i] = q_data->stride * height;
+ }
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_XV15:
+ /*
+ * Adding chroma plane size as NV12/XV15
+ * have a contiguous buffer for luma and chroma
+ */
+ q_data->sizeimage[0] +=
+ q_data->stride * (height / 2);
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_XV15M:
+ q_data->sizeimage[1] =
+ q_data->stride * (height / 2);
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned int
+xm2msc_cal_stride(unsigned int width, enum xm2msc_pix_fmt xfmt, u8 ppc)
+{
+ unsigned int stride;
+ u32 align;
+
+ /* Stride in Bytes = (Width × Bytes per Pixel); */
+ switch (xfmt) {
+ case XILINX_M2MSC_FMT_RGBX8:
+ case XILINX_M2MSC_FMT_YUVX8:
+ case XILINX_M2MSC_FMT_RGBX10:
+ case XILINX_M2MSC_FMT_YUVX10:
+ case XILINX_M2MSC_FMT_BGRX8:
+ stride = width * 4;
+ break;
+ case XILINX_M2MSC_FMT_YUYV8:
+ case XILINX_M2MSC_FMT_UYVY8:
+ stride = width * 2;
+ break;
+ case XILINX_M2MSC_FMT_Y_UV8:
+ case XILINX_M2MSC_FMT_Y_UV8_420:
+ case XILINX_M2MSC_FMT_Y8:
+ stride = width * 1;
+ break;
+ case XILINX_M2MSC_FMT_RGB8:
+ case XILINX_M2MSC_FMT_YUV8:
+ case XILINX_M2MSC_FMT_BGR8:
+ stride = width * 3;
+ break;
+ case XILINX_M2MSC_FMT_Y_UV10:
+ case XILINX_M2MSC_FMT_Y_UV10_420:
+ case XILINX_M2MSC_FMT_Y10:
+ /* 4 bytes per 3 pixels */
+ stride = DIV_ROUND_UP(width * 4, 3);
+ break;
+ default:
+ stride = 0;
+ }
+
+ /* The data size is 64*pixels per clock bits */
+ align = ppc * XM2MSC_ALIGN_MUL;
+ stride = ALIGN(stride, align);
+
+ return stride;
+}
+
+static int
+vidioc_try_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct xm2msc_q_data *q_data;
+ struct vb2_queue *vq;
+ int index;
+
+ if (pix->width < XM2MSC_MIN_WIDTH || pix->width > xm2msc->max_wd ||
+ pix->height < XM2MSC_MIN_HEIGHT || pix->height > xm2msc->max_ht)
+ dev_dbg(xm2msc->dev,
+ "Wrong input parameters %d, wxh: %dx%d.\n",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+
+ /* The width value must be a multiple of pixels per clock */
+ if (pix->width % chan_ctx->xm2msc_dev->ppc) {
+ dev_info(xm2msc->dev,
+ "Wrong align parameters %d, wxh: %dx%d.\n",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+ pix->width = ALIGN(pix->width, chan_ctx->xm2msc_dev->ppc);
+ }
+
+ /*
+ * V4L2 specification suggests the driver corrects the
+ * format struct if any of the dimensions is unsupported
+ */
+ if (pix->height < XM2MSC_MIN_HEIGHT)
+ pix->height = XM2MSC_MIN_HEIGHT;
+ else if (pix->height > xm2msc->max_ht)
+ pix->height = xm2msc->max_ht;
+
+ if (pix->width < XM2MSC_MIN_WIDTH)
+ pix->width = XM2MSC_MIN_WIDTH;
+ else if (pix->width > xm2msc->max_wd)
+ pix->width = xm2msc->max_wd;
+
+ vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(chan_ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = find_format(f);
+ index = find_format_index(f);
+ if (!q_data->fmt || index == ARRAY_SIZE(formats) ||
+ !xm2msc_chk_fmt(xm2msc, index)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Couldn't set format type %d, wxh: %dx%d. ",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+ v4l2_err(&xm2msc->v4l2_dev,
+ "fmt: %d, field: %d\n",
+ f->fmt.pix.pixelformat, f->fmt.pix.field);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void xm2msc_get_align(struct xm2msc_chan_ctx *chan_ctx)
+{
+ /*
+ * TODO: This is a temporary solution, will be reverted once stride and
+ * height align value come from application.
+ */
+ chan_ctx->output_stride_align = output_stride_align[chan_ctx->num];
+ chan_ctx->capture_stride_align = capture_stride_align[chan_ctx->num];
+ chan_ctx->output_height_align = output_height_align[chan_ctx->num];
+ chan_ctx->capture_height_align = capture_height_align[chan_ctx->num];
+ if (output_stride_align[chan_ctx->num] != 1 ||
+ capture_stride_align[chan_ctx->num] != 1 ||
+ output_height_align[chan_ctx->num] != 1 ||
+ capture_height_align[chan_ctx->num] != 1) {
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "You entered values other than default values.\n");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "Please note this may not be available for longer");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "and align values will come from application\n");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "value entered are -\n"
+ "output_stride_align = %d\n"
+ "output_height_align = %d\n"
+ "capture_stride_align = %d\n"
+ "capture_height_align = %d\n",
+ chan_ctx->output_stride_align,
+ chan_ctx->output_height_align,
+ chan_ctx->capture_stride_align,
+ chan_ctx->capture_height_align);
+ }
+}
+
+static int
+vidioc_s_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct xm2msc_q_data *q_data = get_q_data(chan_ctx, f->type);
+ unsigned int i;
+ unsigned int align = 1;
+
+ q_data = get_q_data(chan_ctx, f->type);
+
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->stride = xm2msc_cal_stride(pix->width,
+ q_data->fmt->xm2msc_fmt,
+ chan_ctx->xm2msc_dev->ppc);
+
+ xm2msc_get_align(chan_ctx);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ align = chan_ctx->output_stride_align;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ align = chan_ctx->capture_stride_align;
+
+ q_data->stride = ALIGN(q_data->stride, align);
+
+ q_data->colorspace = pix->colorspace;
+ q_data->field = pix->field;
+ q_data->nbuffs = q_data->fmt->num_buffs;
+
+ xm2msc_cal_imagesize(chan_ctx, q_data, f->type);
+
+ for (i = 0; i < q_data->nbuffs; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data,
+ chan_ctx->num, f->type, __func__);
+
+ return 0;
+}
+
+static int xm2msc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_try_fmt(chan_ctx, f);
+}
+
+static int xm2msc_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_try_fmt(chan_ctx, f);
+}
+
+static int xm2msc_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ ret = xm2msc_try_fmt_vid_cap(file, fh, f);
+ if (ret)
+ return ret;
+ return vidioc_s_fmt(chan_ctx, f);
+}
+
+static int xm2msc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ ret = xm2msc_try_fmt_vid_out(file, fh, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(chan_ctx, f);
+}
+
+static int vidioc_g_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct xm2msc_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ unsigned int i;
+
+ vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(chan_ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->colorspace = q_data->colorspace;
+ pix->num_planes = q_data->nbuffs;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int xm2msc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_g_fmt(chan_ctx, f);
+}
+
+static int xm2msc_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_g_fmt(chan_ctx, f);
+}
+
+static int enum_fmt(struct xm2m_msc_dev *xm2msc, struct v4l2_fmtdesc *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i, enabled = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (xm2msc_chk_fmt(xm2msc, i) && enabled++ == f->index)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ /* Format not found */
+ return -EINVAL;
+
+ /* Format found */
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name,
+ sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int xm2msc_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ return enum_fmt(chan_ctx->xm2msc_dev, f);
+}
+
+static int xm2msc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ return enum_fmt(chan_ctx->xm2msc_dev, f);
+}
+
+static int xm2msc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, XM2MSC_DRIVER_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, XM2MSC_DRIVER_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", XM2MSC_DRIVER_NAME);
+ /*
+ * This is only a mem-to-mem video device. The STREAMING
+ * device capability flags are left only for compatibility
+ * and are scheduled for removal.
+ */
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int xm2msc_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ unsigned int i;
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vq);
+ struct xm2msc_q_data *q_data;
+
+ q_data = get_q_data(chan_ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
+
+ *nplanes = q_data->nbuffs;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+
+ dev_dbg(chan_ctx->xm2msc_dev->dev, "get %d buffer(s) of size %d",
+ *nbuffers, sizes[0]);
+ if (q_data->nbuffs == 2)
+ dev_dbg(chan_ctx->xm2msc_dev->dev, " and %d\n", sizes[1]);
+
+ return 0;
+}
+
+static int xm2msc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct xm2msc_q_data *q_data;
+ unsigned int i, num_buffs;
+
+ q_data = get_q_data(chan_ctx, vb->vb2_queue->type);
+ if (!q_data)
+ return -EINVAL;
+ num_buffs = q_data->nbuffs;
+
+ for (i = 0; i < num_buffs; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ v4l2_err(&xm2msc->v4l2_dev, "data will not fit into plane ");
+ v4l2_err(&xm2msc->v4l2_dev, "(%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long)q_data->sizeimage[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < num_buffs; i++)
+ vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+
+ return 0;
+}
+
+static void xm2msc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(chan_ctx->m2m_ctx, vbuf);
+}
+
+static void xm2msc_return_all_buffers(struct xm2msc_chan_ctx *chan_ctx,
+ struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+ if (!vb)
+ break;
+ spin_lock_irqsave(&chan_ctx->xm2msc_dev->lock, flags);
+ v4l2_m2m_buf_done(vb, state);
+ spin_unlock_irqrestore(&chan_ctx->xm2msc_dev->lock, flags);
+ }
+}
+
+static int xm2msc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
+ static struct xm2msc_q_data *q_data;
+ int type;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_OUT);
+ else
+ xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_CAP);
+
+ xm2msc_set_chan_params(chan_ctx, q->type);
+
+ if (xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_CAP) &&
+ xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_OUT))
+ xm2msc_set_chan_com_params(chan_ctx);
+
+ type = V4L2_TYPE_IS_OUTPUT(q->type) ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q_data = get_q_data(chan_ctx, type);
+ xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data, chan_ctx->num,
+ type, __func__);
+ xm2msc_pr_status(chan_ctx->xm2msc_dev, __func__);
+
+ return 0;
+}
+
+static void xm2msc_stop_streaming(struct vb2_queue *q)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
+
+ xm2msc_return_all_buffers(chan_ctx, q, VB2_BUF_STATE_ERROR);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
+ else
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
+}
+
+static const struct vb2_ops xm2msc_qops = {
+ .queue_setup = xm2msc_queue_setup,
+ .buf_prepare = xm2msc_buf_prepare,
+ .buf_queue = xm2msc_buf_queue,
+ .start_streaming = xm2msc_start_streaming,
+ .stop_streaming = xm2msc_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = chan_ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &xm2msc_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &xm2msc->dev_mutex;
+ src_vq->dev = xm2msc->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
+ dst_vq->drv_priv = chan_ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &xm2msc_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &xm2msc->dev_mutex;
+ dst_vq->dev = xm2msc->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ioctl_ops xm2msc_ioctl_ops = {
+ .vidioc_querycap = xm2msc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = xm2msc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = xm2msc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = xm2msc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = xm2msc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = xm2msc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = xm2msc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out_mplane = xm2msc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out_mplane = xm2msc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = xm2msc_reqbufs,
+ .vidioc_querybuf = xm2msc_querybuf,
+ .vidioc_expbuf = xm2msc_expbuf,
+ .vidioc_create_bufs = xm2msc_createbufs,
+
+ .vidioc_qbuf = xm2msc_qbuf,
+ .vidioc_dqbuf = xm2msc_dqbuf,
+
+ .vidioc_streamon = xm2msc_streamon,
+ .vidioc_streamoff = xm2msc_streamoff,
+};
+
+static void xm2msc_set_q_data(struct xm2msc_chan_ctx *chan_ctx,
+ const struct xm2msc_fmt *fmt,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_q_data *q_data;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ q_data = get_q_data(chan_ctx, type);
+
+ q_data->fmt = fmt;
+ q_data->width = xm2msc->max_wd;
+ q_data->height = xm2msc->max_ht;
+ q_data->field = V4L2_FIELD_NONE;
+ q_data->nbuffs = q_data->fmt->num_buffs;
+
+ q_data->stride = xm2msc_cal_stride(q_data->width,
+ q_data->fmt->xm2msc_fmt,
+ xm2msc->ppc);
+
+ xm2msc_cal_imagesize(chan_ctx, q_data, type);
+}
+
+static int xm2msc_set_chan_parm(struct xm2msc_chan_ctx *chan_ctx)
+{
+ int ret = 0;
+ unsigned int i;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ chan_ctx->output_stride_align = 1;
+ chan_ctx->output_height_align = 1;
+ chan_ctx->capture_stride_align = 1;
+ chan_ctx->capture_height_align = 1;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (xm2msc_chk_fmt(xm2msc, i))
+ break;
+ }
+
+ /* No supported format */
+ if (i == ARRAY_SIZE(formats)) {
+ dev_err(xm2msc->dev, "no supported format found\n");
+ return -EINVAL;
+ }
+
+ xm2msc_set_q_data(chan_ctx, &formats[i],
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ xm2msc_set_q_data(chan_ctx, &formats[i],
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ return ret;
+}
+
+static int xm2msc_open(struct file *file)
+{
+ struct xm2m_msc_dev *xm2msc = video_drvdata(file);
+ struct xm2msc_chan_ctx *chan_ctx = NULL;
+ u32 minor, chan;
+ int ret;
+
+ if (mutex_lock_interruptible(&xm2msc->dev_mutex))
+ return -ERESTARTSYS;
+
+ minor = iminor(file_inode(file));
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ if ((chan_ctx->status & CHAN_ATTACHED) &&
+ chan_ctx->minor == minor)
+ break;
+ }
+
+ if (chan == xm2msc->max_chan) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s Chan not found with minor = %d\n",
+ __func__, minor);
+ ret = -EBADF;
+ goto unlock;
+ }
+
+ /* Already opened, do not allow same channel
+ * to be open more then once
+ */
+ if (chan_ctx->status & CHAN_OPENED) {
+ v4l2_warn(&xm2msc->v4l2_dev,
+ "%s Chan already opened for minor = %d\n",
+ __func__, minor);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ v4l2_fh_init(&chan_ctx->fh, &chan_ctx->vfd);
+ file->private_data = &chan_ctx->fh;
+ v4l2_fh_add(&chan_ctx->fh);
+
+ chan_ctx->m2m_ctx = v4l2_m2m_ctx_init(chan_ctx->m2m_dev,
+ chan_ctx, &queue_init);
+ if (IS_ERR(chan_ctx->m2m_ctx)) {
+ ret = PTR_ERR(chan_ctx->m2m_ctx);
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s Chan M2M CTX not creted for minor %d\n",
+ __func__, minor);
+ goto error_m2m;
+ }
+
+ chan_ctx->fh.m2m_ctx = chan_ctx->m2m_ctx;
+ chan_ctx->status |= CHAN_OPENED;
+ chan_ctx->xm2msc_dev = xm2msc;
+ chan_ctx->frames = 0;
+
+ xm2msc_set_chan(chan_ctx, true);
+
+ v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance created\n", chan);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ xm2msc_pr_status(xm2msc, __func__);
+ return 0;
+
+error_m2m:
+ v4l2_fh_del(&chan_ctx->fh);
+ v4l2_fh_exit(&chan_ctx->fh);
+unlock:
+ mutex_unlock(&xm2msc->dev_mutex);
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ xm2msc_pr_status(xm2msc, __func__);
+ return ret;
+}
+
+static int xm2msc_release(struct file *file)
+{
+ struct xm2m_msc_dev *xm2msc = video_drvdata(file);
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
+
+ if (mutex_lock_interruptible(&xm2msc->dev_mutex))
+ return -ERESTARTSYS;
+
+ v4l2_m2m_ctx_release(chan_ctx->m2m_ctx);
+ v4l2_fh_del(&chan_ctx->fh);
+ v4l2_fh_exit(&chan_ctx->fh);
+ chan_ctx->status &= ~CHAN_OPENED;
+ xm2msc_set_chan(chan_ctx, false);
+
+ v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance released\n",
+ chan_ctx->num);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ return 0;
+}
+
+static unsigned int xm2msc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ mutex_lock(&xm2msc->dev_mutex);
+ ret = v4l2_m2m_poll(file, chan_ctx->m2m_ctx, wait);
+ mutex_unlock(&xm2msc->dev_mutex);
+
+ return ret;
+}
+
+static int xm2msc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct xm2msc_chan_ctx *chan_ctx = file->private_data;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ mutex_lock(&xm2msc->dev_mutex);
+ ret = v4l2_m2m_mmap(file, chan_ctx->m2m_ctx, vma);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ return ret;
+}
+
+static const struct v4l2_file_operations xm2msc_fops = {
+ .owner = THIS_MODULE,
+ .open = xm2msc_open,
+ .release = xm2msc_release,
+ .poll = xm2msc_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = xm2msc_mmap,
+};
+
+static const struct video_device xm2msc_videodev = {
+ .name = XM2MSC_DRIVER_NAME,
+ .fops = &xm2msc_fops,
+ .ioctl_ops = &xm2msc_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops xm2msc_m2m_ops = {
+ .device_run = xm2msc_device_run,
+ .job_ready = xm2msc_job_ready,
+ .job_abort = xm2msc_job_abort,
+};
+
+static int xm2msc_parse_of(struct platform_device *pdev,
+ struct xm2m_msc_dev *xm2msc)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ int hw_vid_fmt_cnt;
+ const char *vid_fmts[ARRAY_SIZE(formats)];
+ int ret;
+ u32 i, j;
+
+ xm2msc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(xm2msc->clk)) {
+ ret = PTR_ERR(xm2msc->clk);
+ dev_err(dev, "failed to get clk (%d)\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xm2msc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)xm2msc->regs))
+ return PTR_ERR((__force const void *)xm2msc->regs);
+
+ dev_dbg(dev, "IO Mem 0x%llx mapped at %p\n", res->start, xm2msc->regs);
+
+ ret = of_property_read_u32(node, "xlnx,max-chan",
+ &xm2msc->max_chan);
+ if (ret < 0)
+ return ret;
+
+ if (xm2msc->max_chan < XM2MSC_MIN_CHAN ||
+ xm2msc->max_chan > XM2MSC_MAX_CHAN) {
+ dev_err(dev,
+ "Invalid maximum scaler channels : %d",
+ xm2msc->max_chan);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xm2msc->max_wd);
+ if (ret < 0) {
+ dev_err(dev,
+ "missing xlnx,max-width prop\n");
+ return ret;
+ }
+
+ if (xm2msc->max_wd < XM2MSC_MIN_WIDTH ||
+ xm2msc->max_wd > XM2MSC_MAX_WIDTH) {
+ dev_err(dev, "Invalid width : %d",
+ xm2msc->max_wd);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xm2msc->max_ht);
+ if (ret < 0) {
+ dev_err(dev, "missing xlnx,max-height prop\n");
+ return ret;
+ }
+
+ if (xm2msc->max_ht < XM2MSC_MIN_HEIGHT ||
+ xm2msc->max_ht > XM2MSC_MAX_HEIGHT) {
+ dev_err(dev, "Invalid height : %d",
+ xm2msc->max_ht);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &xm2msc->dma_addr_size);
+ if (ret || (xm2msc->dma_addr_size != 32 &&
+ xm2msc->dma_addr_size != 64)) {
+ dev_err(dev, "missing/invalid addr width dts prop\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u8(node, "xlnx,pixels-per-clock",
+ &xm2msc->ppc);
+ if (ret || (xm2msc->ppc != 1 && xm2msc->ppc != 2 && xm2msc->ppc != 4)) {
+ dev_err(dev, "missing or invalid pixels per clock dts prop\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-taps",
+ &xm2msc->taps);
+ if (ret || (xm2msc->taps != XSCALER_TAPS_6 &&
+ xm2msc->taps != XSCALER_TAPS_8 &&
+ xm2msc->taps != XSCALER_TAPS_10 &&
+ xm2msc->taps != XSCALER_TAPS_12)) {
+ dev_err(dev, "missing/invalid taps in dts prop\n");
+ return -EINVAL;
+ }
+
+ xm2msc->irq = irq_of_parse_and_map(node, 0);
+ if (xm2msc->irq < 0) {
+ dev_err(dev, "Unable to get IRQ");
+ return xm2msc->irq;
+ }
+
+ dev_dbg(dev, "Max Channel Supported = %d\n", xm2msc->max_chan);
+ dev_dbg(dev, "DMA Addr width Supported = %d\n", xm2msc->dma_addr_size);
+ dev_dbg(dev, "Max col/row Supported = (%d) / (%d)\n",
+ xm2msc->max_wd, xm2msc->max_ht);
+ dev_dbg(dev, "taps Supported = %d\n", xm2msc->taps);
+ /* read supported video formats and update internal table */
+ hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
+
+ ret = of_property_read_string_array(node, "xlnx,vid-formats",
+ vid_fmts, hw_vid_fmt_cnt);
+ if (ret < 0) {
+ dev_err(dev,
+ "Missing or invalid xlnx,vid-formats dts prop\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Supported format = ");
+ for (i = 0; i < hw_vid_fmt_cnt; i++) {
+ const char *vid_fmt_name = vid_fmts[i];
+
+ for (j = 0; j < ARRAY_SIZE(formats); j++) {
+ const char *dts_name = formats[j].name;
+
+ if (strcmp(vid_fmt_name, dts_name))
+ continue;
+ dev_dbg(dev, "%s ", dts_name);
+
+ xm2msc_set_fmt(xm2msc, j);
+ }
+ }
+ dev_dbg(dev, "\n");
+ xm2msc->rst_gpio = devm_gpiod_get(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xm2msc->rst_gpio)) {
+ ret = PTR_ERR(xm2msc->rst_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_info(dev,
+ "Probe deferred due to GPIO reset defer\n");
+ else
+ dev_err(dev,
+ "Unable to locate reset property in dt\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xm2msc_unreg_video_n_m2m(struct xm2m_msc_dev *xm2msc)
+{
+ struct xm2msc_chan_ctx *chan_ctx;
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ if (!(chan_ctx->status & CHAN_ATTACHED))
+ break; /*We register video sequentially */
+ video_unregister_device(&chan_ctx->vfd);
+ chan_ctx->status &= ~CHAN_ATTACHED;
+
+ if (!IS_ERR(chan_ctx->m2m_dev))
+ v4l2_m2m_release(chan_ctx->m2m_dev);
+ }
+}
+
+static int xm2m_msc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct xm2m_msc_dev *xm2msc;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct video_device *vfd;
+ unsigned int chan;
+
+ xm2msc = devm_kzalloc(&pdev->dev, sizeof(*xm2msc), GFP_KERNEL);
+ if (!xm2msc)
+ return -ENOMEM;
+
+ ret = xm2msc_parse_of(pdev, xm2msc);
+ if (ret < 0)
+ return ret;
+
+ xm2msc->dev = &pdev->dev;
+
+ ret = clk_prepare_enable(xm2msc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clk (%d)\n", ret);
+ return ret;
+ }
+
+ xm2msc_reset(xm2msc);
+
+ spin_lock_init(&xm2msc->lock);
+
+ ret = v4l2_device_register(&pdev->dev, &xm2msc->v4l2_dev);
+ if (ret)
+ goto reg_dev_err;
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ vfd = &chan_ctx->vfd;
+ *vfd = xm2msc_videodev;
+ vfd->lock = &xm2msc->dev_mutex;
+ vfd->v4l2_dev = &xm2msc->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, chan);
+ if (ret) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Failed to register video dev for chan %d\n",
+ chan);
+ goto unreg_dev;
+ }
+
+ chan_ctx->status = CHAN_ATTACHED;
+
+ video_set_drvdata(vfd, xm2msc);
+ snprintf(vfd->name, sizeof(vfd->name),
+ "%s", xm2msc_videodev.name);
+ v4l2_info(&xm2msc->v4l2_dev,
+ " Device registered as /dev/video%d\n", vfd->num);
+
+ dev_dbg(xm2msc->dev, "%s Device registered as /dev/video%d\n",
+ __func__, vfd->num);
+
+ chan_ctx->m2m_dev = v4l2_m2m_init(&xm2msc_m2m_ops);
+ if (IS_ERR(chan_ctx->m2m_dev)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Failed to init mem2mem device for chan %d\n",
+ chan);
+ ret = PTR_ERR(chan_ctx->m2m_dev);
+ goto unreg_dev;
+ }
+ chan_ctx->xm2msc_dev = xm2msc;
+ chan_ctx->regs = xm2msc->regs + XM2MSC_CHAN_REGS_START(chan);
+ if (chan > 4) /* TODO: To be fixed in HW */
+ chan_ctx->regs += XM2MSC_RESERVED_AREA;
+ chan_ctx->num = chan;
+ chan_ctx->minor = vfd->minor;
+
+ /* Set channel parameters to default values */
+ ret = xm2msc_set_chan_parm(chan_ctx);
+ if (ret)
+ goto unreg_dev;
+
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ }
+
+ mutex_init(&xm2msc->dev_mutex);
+ mutex_init(&xm2msc->mutex);
+ init_waitqueue_head(&xm2msc->isr_finished);
+
+ ret = devm_request_irq(&pdev->dev, xm2msc->irq,
+ xm2msc_isr, IRQF_SHARED,
+ XM2MSC_DRIVER_NAME, xm2msc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to register IRQ\n");
+ goto unreg_dev;
+ }
+
+ platform_set_drvdata(pdev, xm2msc);
+
+ return 0;
+
+unreg_dev:
+ xm2msc_unreg_video_n_m2m(xm2msc);
+ v4l2_device_unregister(&xm2msc->v4l2_dev);
+reg_dev_err:
+ clk_disable_unprepare(xm2msc->clk);
+ return ret;
+}
+
+static int xm2m_msc_remove(struct platform_device *pdev)
+{
+ struct xm2m_msc_dev *xm2msc = platform_get_drvdata(pdev);
+
+ xm2msc_unreg_video_n_m2m(xm2msc);
+ v4l2_device_unregister(&xm2msc->v4l2_dev);
+ clk_disable_unprepare(xm2msc->clk);
+ return 0;
+}
+
+static const struct of_device_id xm2m_msc_of_id_table[] = {
+ {.compatible = "xlnx,v-multi-scaler-v1.0"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, xm2m_msc_of_id_table);
+
+static struct platform_driver xm2m_msc_driver = {
+ .driver = {
+ .name = "xilinx-multiscaler",
+ .of_match_table = xm2m_msc_of_id_table,
+ },
+ .probe = xm2m_msc_probe,
+ .remove = xm2m_msc_remove,
+};
+
+module_platform_driver(xm2m_msc_driver);
+
+MODULE_DESCRIPTION("Xilinx M2M Multi-Scaler Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("xlnx_m2m_multiscaler_dev");
diff --git a/drivers/media/platform/xilinx/xilinx-remapper.c b/drivers/media/platform/xilinx/xilinx-remapper.c
new file mode 100644
index 000000000000..d2e84ec1f2d6
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-remapper.c
@@ -0,0 +1,546 @@
+/*
+ * Xilinx Video Remapper
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XREMAP_MIN_WIDTH 1
+#define XREMAP_DEF_WIDTH 1920
+#define XREMAP_MAX_WIDTH 65535
+#define XREMAP_MIN_HEIGHT 1
+#define XREMAP_DEF_HEIGHT 1080
+#define XREMAP_MAX_HEIGHT 65535
+
+#define XREMAP_PAD_SINK 0
+#define XREMAP_PAD_SOURCE 1
+
+/**
+ * struct xremap_mapping_output - Output format description
+ * @code: media bus pixel core after remapping
+ * @num_components: number of pixel components after remapping
+ * @component_maps: configuration array corresponding to this output
+ */
+struct xremap_mapping_output {
+ u32 code;
+ unsigned int num_components;
+ unsigned int component_maps[4];
+};
+
+/**
+ * struct xremap_mapping - Input-output remapping description
+ * @code: media bus pixel code before remapping
+ * @width: video bus width in bits
+ * @num_components: number of pixel components before remapping
+ * @outputs: array of possible output formats
+ */
+struct xremap_mapping {
+ u32 code;
+ unsigned int width;
+ unsigned int num_components;
+ const struct xremap_mapping_output *outputs;
+};
+
+/**
+ * struct xremap_device - Xilinx Test Pattern Generator device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @config: device configuration parsed from its DT node
+ * @config.width: video bus width in bits
+ * @config.num_s_components: number of pixel components at the input
+ * @config.num_m_components: number of pixel components at the output
+ * @config.component_maps: component remapping configuration
+ * @default_mapping: Default mapping compatible with the configuration
+ * @default_output: Default output format for the default mapping
+ */
+struct xremap_device {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+
+ struct {
+ unsigned int width;
+ unsigned int num_s_components;
+ unsigned int num_m_components;
+ unsigned int component_maps[4];
+ } config;
+
+ const struct xremap_mapping *default_mapping;
+ const struct xremap_mapping_output *default_output;
+};
+
+static inline struct xremap_device *to_remap(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xremap_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Mappings
+ */
+
+static const struct xremap_mapping xremap_mappings[] = {
+ {
+ .code = MEDIA_BUS_FMT_RBG888_1X24,
+ .width = 8,
+ .num_components = 3,
+ .outputs = (const struct xremap_mapping_output[]) {
+ { MEDIA_BUS_FMT_RGB888_1X32_PADHI, 4, { 1, 0, 2, 4 } },
+ { },
+ },
+ },
+};
+
+static const struct xremap_mapping_output *
+xremap_match_mapping(struct xremap_device *xremap,
+ const struct xremap_mapping *mapping)
+{
+ const struct xremap_mapping_output *output;
+
+ if (mapping->width != xremap->config.width ||
+ mapping->num_components != xremap->config.num_s_components)
+ return NULL;
+
+ for (output = mapping->outputs; output->code; ++output) {
+ unsigned int i;
+
+ if (output->num_components != xremap->config.num_m_components)
+ continue;
+
+ for (i = 0; i < output->num_components; ++i) {
+ if (output->component_maps[i] !=
+ xremap->config.component_maps[i])
+ break;
+ }
+
+ if (i == output->num_components)
+ return output;
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xremap_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == XREMAP_PAD_SINK) {
+ const struct xremap_mapping *mapping = NULL;
+ unsigned int index = code->index + 1;
+ unsigned int i;
+
+ /* Iterate through the mappings and skip the ones that don't
+ * match the remapper configuration until we reach the requested
+ * index.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings) && index; ++i) {
+ mapping = &xremap_mappings[i];
+
+ if (xremap_match_mapping(xremap, mapping))
+ index--;
+ }
+
+ /* If the index was larger than the number of supported mappings
+ * return -EINVAL.
+ */
+ if (index > 0)
+ return -EINVAL;
+
+ code->code = mapping->code;
+ } else {
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, code->pad);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int xremap_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == XREMAP_PAD_SINK) {
+ /* The remapper doesn't restrict the size on the sink pad. */
+ fse->min_width = XREMAP_MIN_WIDTH;
+ fse->max_width = XREMAP_MAX_WIDTH;
+ fse->min_height = XREMAP_MIN_HEIGHT;
+ fse->max_height = XREMAP_MAX_HEIGHT;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+xremap_get_pad_format(struct xremap_device *xremap,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xremap->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xremap->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xremap_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+
+ fmt->format = *xremap_get_pad_format(xremap, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xremap_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ const struct xremap_mapping_output *output;
+ const struct xremap_mapping *mapping;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int i;
+
+ format = xremap_get_pad_format(xremap, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XREMAP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ /* Find the mapping. If the requested format has no mapping, use the
+ * default.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings); ++i) {
+ mapping = &xremap_mappings[i];
+ if (mapping->code != fmt->format.code)
+ continue;
+
+ output = xremap_match_mapping(xremap, mapping);
+ if (output)
+ break;
+ }
+
+ if (!output) {
+ mapping = xremap->default_mapping;
+ output = xremap->default_output;
+ }
+
+ format->code = mapping->code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XREMAP_MIN_WIDTH, XREMAP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XREMAP_MIN_HEIGHT, XREMAP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = xremap_get_pad_format(xremap, cfg, XREMAP_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ format->code = output->code;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/*
+ * xremap_init_formats - Initialize formats on all pads
+ * @subdev: remapper V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static void xremap_init_formats(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+
+ format.pad = XREMAP_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = xremap->default_mapping->code;
+ format.format.width = XREMAP_DEF_WIDTH;
+ format.format.height = XREMAP_DEF_HEIGHT;
+
+ xremap_set_format(subdev, fh ? fh->pad : NULL, &format);
+}
+
+static int xremap_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ xremap_init_formats(subdev, fh);
+
+ return 0;
+}
+
+static int xremap_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops xremap_core_ops = {
+};
+
+static struct v4l2_subdev_video_ops xremap_video_ops = {
+};
+
+static struct v4l2_subdev_pad_ops xremap_pad_ops = {
+ .enum_mbus_code = xremap_enum_mbus_code,
+ .enum_frame_size = xremap_enum_frame_size,
+ .get_fmt = xremap_get_format,
+ .set_fmt = xremap_set_format,
+};
+
+static struct v4l2_subdev_ops xremap_ops = {
+ .core = &xremap_core_ops,
+ .video = &xremap_video_ops,
+ .pad = &xremap_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xremap_internal_ops = {
+ .open = xremap_open,
+ .close = xremap_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xremap_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xremap_parse_of(struct xremap_device *xremap)
+{
+ struct device_node *node = xremap->xvip.dev->of_node;
+ unsigned int i;
+ int ret;
+
+ /* Parse the DT properties. */
+ ret = of_property_read_u32(node, "xlnx,video-width",
+ &xremap->config.width);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "xlnx,video-width");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,s-components",
+ &xremap->config.num_s_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "#xlnx,s-components");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,m-components",
+ &xremap->config.num_m_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "#xlnx,m-components");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(node, "xlnx,component-maps",
+ xremap->config.component_maps,
+ xremap->config.num_m_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "xlnx,component-maps");
+ return -EINVAL;
+ }
+
+ /* Validate the parsed values. */
+ if (xremap->config.num_s_components > 4 ||
+ xremap->config.num_m_components > 4) {
+ dev_dbg(xremap->xvip.dev,
+ "invalid number of components (s %u m %u)\n",
+ xremap->config.num_s_components,
+ xremap->config.num_m_components);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < xremap->config.num_m_components; ++i) {
+ if (xremap->config.component_maps[i] > 4) {
+ dev_dbg(xremap->xvip.dev, "invalid map %u @%u\n",
+ xremap->config.component_maps[i], i);
+ return -EINVAL;
+ }
+ }
+
+ /* Find the first mapping that matches the remapper configuration and
+ * store it as the default mapping.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings); ++i) {
+ const struct xremap_mapping_output *output;
+ const struct xremap_mapping *mapping;
+
+ mapping = &xremap_mappings[i];
+ output = xremap_match_mapping(xremap, mapping);
+
+ if (output) {
+ xremap->default_mapping = mapping;
+ xremap->default_output = output;
+ return 0;
+ }
+ }
+
+ dev_err(xremap->xvip.dev,
+ "No format compatible with device configuration\n");
+
+ return -EINVAL;
+}
+
+static int xremap_probe(struct platform_device *pdev)
+{
+ struct xremap_device *xremap;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ xremap = devm_kzalloc(&pdev->dev, sizeof(*xremap), GFP_KERNEL);
+ if (!xremap)
+ return -ENOMEM;
+
+ xremap->xvip.dev = &pdev->dev;
+
+ ret = xremap_parse_of(xremap);
+ if (ret < 0)
+ return ret;
+
+ xremap->xvip.clk = devm_clk_get(xremap->xvip.dev, NULL);
+ if (IS_ERR(xremap->xvip.clk))
+ return PTR_ERR(xremap->xvip.clk);
+
+ clk_prepare_enable(xremap->xvip.clk);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xremap->xvip.subdev;
+ v4l2_subdev_init(subdev, &xremap_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xremap_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xremap);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ xremap_init_formats(subdev, NULL);
+
+ xremap->pads[XREMAP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xremap->pads[XREMAP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xremap_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xremap->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xremap);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "device registered\n");
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xremap->xvip.clk);
+ return ret;
+}
+
+static int xremap_remove(struct platform_device *pdev)
+{
+ struct xremap_device *xremap = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xremap->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ clk_disable_unprepare(xremap->xvip.clk);
+
+ return 0;
+}
+
+static const struct of_device_id xremap_of_id_table[] = {
+ { .compatible = "xlnx,v-remapper" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xremap_of_id_table);
+
+static struct platform_driver xremap_driver = {
+ .driver = {
+ .name = "xilinx-remapper",
+ .of_match_table = xremap_of_id_table,
+ },
+ .probe = xremap_probe,
+ .remove = xremap_remove,
+};
+
+module_platform_driver(xremap_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video Remapper Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-rgb2yuv.c b/drivers/media/platform/xilinx/xilinx-rgb2yuv.c
new file mode 100644
index 000000000000..20ae95946ca3
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-rgb2yuv.c
@@ -0,0 +1,566 @@
+/*
+ * Xilinx RGB to YUV Convertor
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XRGB2YUV_YMAX 0x100
+#define XRGB2YUV_YMIN 0x104
+#define XRGB2YUV_CBMAX 0x108
+#define XRGB2YUV_CBMIN 0x10c
+#define XRGB2YUV_CRMAX 0x110
+#define XRGB2YUV_CRMIN 0x114
+#define XRGB2YUV_YOFFSET 0x118
+#define XRGB2YUV_CBOFFSET 0x11c
+#define XRGB2YUV_CROFFSET 0x120
+#define XRGB2YUV_ACOEF 0x124
+#define XRGB2YUV_BCOEF 0x128
+#define XRGB2YUV_CCOEF 0x12c
+#define XRGB2YUV_DCOEF 0x130
+
+/**
+ * struct xrgb2yuv_device - Xilinx RGB2YUV device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ * @ctrl_handler: control handler
+ */
+struct xrgb2yuv_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+static inline struct xrgb2yuv_device *to_rgb2yuv(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xrgb2yuv_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xrgb2yuv_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+
+ if (!enable) {
+ xvip_stop(&xrgb2yuv->xvip);
+ return 0;
+ }
+
+ xvip_set_frame_size(&xrgb2yuv->xvip, &xrgb2yuv->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xrgb2yuv_get_pad_format(struct xrgb2yuv_device *xrgb2yuv,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xrgb2yuv->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xrgb2yuv->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xrgb2yuv_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+
+ fmt->format = *__xrgb2yuv_get_pad_format(xrgb2yuv, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xrgb2yuv_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xrgb2yuv_get_pad_format(xrgb2yuv, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xrgb2yuv_get_pad_format(xrgb2yuv, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xrgb2yuv_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xrgb2yuv->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xrgb2yuv->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xrgb2yuv_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xrgb2yuv_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xrgb2yuv_device *xrgb2yuv =
+ container_of(ctrl->handler, struct xrgb2yuv_device,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_RGB2YUV_YMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_YMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CRMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CRMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CRMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CRMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_YOFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YOFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBOFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBOFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CROFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CROFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_ACOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_ACOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_BCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_BCOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CCOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_DCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_DCOEF, ctrl->val);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops xrgb2yuv_ctrl_ops = {
+ .s_ctrl = xrgb2yuv_s_ctrl,
+};
+
+static struct v4l2_subdev_video_ops xrgb2yuv_video_ops = {
+ .s_stream = xrgb2yuv_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xrgb2yuv_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xrgb2yuv_get_format,
+ .set_fmt = xrgb2yuv_set_format,
+};
+
+static struct v4l2_subdev_ops xrgb2yuv_ops = {
+ .video = &xrgb2yuv_video_ops,
+ .pad = &xrgb2yuv_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xrgb2yuv_internal_ops = {
+ .open = xrgb2yuv_open,
+ .close = xrgb2yuv_close,
+};
+
+/*
+ * Control Configs
+ */
+
+static struct v4l2_ctrl_config xrgb2yuv_ctrls[] = {
+ {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YMAX,
+ .name = "RGB to YUV: Maximum Y value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YMIN,
+ .name = "RGB to YUV: Minimum Y value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBMAX,
+ .name = "RGB to YUV: Maximum Cb value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBMIN,
+ .name = "RGB to YUV: Minimum Cb value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CRMAX,
+ .name = "RGB to YUV: Maximum Cr value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CRMIN,
+ .name = "RGB to YUV: Minimum Cr value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YOFFSET,
+ .name = "RGB to YUV: Luma offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBOFFSET,
+ .name = "RGB to YUV: Chroma Cb offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CROFFSET,
+ .name = "RGB to YUV: Chroma Cr offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_ACOEF,
+ .name = "RGB to YUV: CA coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_BCOEF,
+ .name = "RGB to YUV: CB coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CCOEF,
+ .name = "RGB to YUV: CC coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_DCOEF,
+ .name = "RGB to YUV: CD coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ },
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xrgb2yuv_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xrgb2yuv_pm_suspend(struct device *dev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = dev_get_drvdata(dev);
+
+ xvip_suspend(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xrgb2yuv_pm_resume(struct device *dev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = dev_get_drvdata(dev);
+
+ xvip_resume(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xrgb2yuv_parse_of(struct xrgb2yuv_device *xrgb2yuv)
+{
+ struct device *dev = xrgb2yuv->xvip.dev;
+ struct device_node *node = xrgb2yuv->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xrgb2yuv->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xrgb2yuv_probe(struct platform_device *pdev)
+{
+ struct xrgb2yuv_device *xrgb2yuv;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ unsigned int i;
+ int ret;
+
+ xrgb2yuv = devm_kzalloc(&pdev->dev, sizeof(*xrgb2yuv), GFP_KERNEL);
+ if (!xrgb2yuv)
+ return -ENOMEM;
+
+ xrgb2yuv->xvip.dev = &pdev->dev;
+
+ ret = xrgb2yuv_parse_of(xrgb2yuv);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xrgb2yuv->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xrgb2yuv->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xrgb2yuv->xvip.subdev;
+ v4l2_subdev_init(subdev, &xrgb2yuv_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xrgb2yuv_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xrgb2yuv);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xrgb2yuv->default_formats[XVIP_PAD_SINK];
+ default_format->code = xrgb2yuv->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xrgb2yuv->xvip, default_format);
+
+ xrgb2yuv->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xrgb2yuv->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xrgb2yuv->default_formats[XVIP_PAD_SINK];
+ default_format->code = xrgb2yuv->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xrgb2yuv->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xrgb2yuv->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xrgb2yuv->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xrgb2yuv_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xrgb2yuv->pads);
+ if (ret < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&xrgb2yuv->ctrl_handler, 13);
+
+ for (i = 0; i < ARRAY_SIZE(xrgb2yuv_ctrls); i++) {
+ xrgb2yuv_ctrls[i].def = xvip_read(&xrgb2yuv->xvip,
+ XRGB2YUV_YMAX + i * 4);
+ v4l2_ctrl_new_custom(&xrgb2yuv->ctrl_handler,
+ &xrgb2yuv_ctrls[i], NULL);
+ }
+
+ if (xrgb2yuv->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xrgb2yuv->ctrl_handler.error;
+ goto error;
+ }
+ subdev->ctrl_handler = &xrgb2yuv->ctrl_handler;
+
+ platform_set_drvdata(pdev, xrgb2yuv);
+
+ xvip_print_version(&xrgb2yuv->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xrgb2yuv->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xrgb2yuv->xvip);
+ return ret;
+}
+
+static int xrgb2yuv_remove(struct platform_device *pdev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xrgb2yuv->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xrgb2yuv->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xrgb2yuv_pm_ops, xrgb2yuv_pm_suspend,
+ xrgb2yuv_pm_resume);
+
+static const struct of_device_id xrgb2yuv_of_id_table[] = {
+ { .compatible = "xlnx,v-rgb2yuv-7.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xrgb2yuv_of_id_table);
+
+static struct platform_driver xrgb2yuv_driver = {
+ .driver = {
+ .name = "xilinx-rgb2yuv",
+ .pm = &xrgb2yuv_pm_ops,
+ .of_match_table = xrgb2yuv_of_id_table,
+ },
+ .probe = xrgb2yuv_probe,
+ .remove = xrgb2yuv_remove,
+};
+
+module_platform_driver(xrgb2yuv_driver);
+
+MODULE_DESCRIPTION("Xilinx RGB to YUV Converter Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scaler.c b/drivers/media/platform/xilinx/xilinx-scaler.c
new file mode 100644
index 000000000000..bb0d52627a50
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scaler.c
@@ -0,0 +1,708 @@
+/*
+ * Xilinx Scaler
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fixp-arith.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XSCALER_MIN_WIDTH 32
+#define XSCALER_MAX_WIDTH 4096
+#define XSCALER_MIN_HEIGHT 32
+#define XSCALER_MAX_HEIGHT 4096
+
+#define XSCALER_HSF 0x0100
+#define XSCALER_VSF 0x0104
+#define XSCALER_SF_SHIFT 20
+#define XSCALER_SF_MASK 0xffffff
+#define XSCALER_SOURCE_SIZE 0x0108
+#define XSCALER_SIZE_HORZ_SHIFT 0
+#define XSCALER_SIZE_VERT_SHIFT 16
+#define XSCALER_SIZE_MASK 0xfff
+#define XSCALER_HAPERTURE 0x010c
+#define XSCALER_VAPERTURE 0x0110
+#define XSCALER_APERTURE_START_SHIFT 0
+#define XSCALER_APERTURE_END_SHIFT 16
+#define XSCALER_OUTPUT_SIZE 0x0114
+#define XSCALER_COEF_DATA_IN 0x0134
+#define XSCALER_COEF_DATA_IN_SHIFT 16
+
+/* Fixed point operations */
+#define FRAC_N 8
+
+static inline s16 fixp_new(s16 a)
+{
+ return a << FRAC_N;
+}
+
+static inline s16 fixp_mult(s16 a, s16 b)
+{
+ return ((s32)(a * b)) >> FRAC_N;
+}
+
+/**
+ * struct xscaler_device - Xilinx Scaler device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_format: Xilinx Video IP format
+ * @crop: Active crop rectangle for the sink pad
+ * @num_hori_taps: number of vertical taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @separate_yc_coef: separate coefficients for Luma(y) and Chroma(c)
+ * @separate_hv_coef: separate coefficients for Horizontal(h) and Vertical(v)
+ */
+struct xscaler_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_format;
+ struct v4l2_rect crop;
+
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ bool separate_yc_coef;
+ bool separate_hv_coef;
+};
+
+static inline struct xscaler_device *to_scaler(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscaler_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+/**
+ * lanczos - Lanczos 2D FIR kernel convolution
+ * @x: phase
+ * @a: Lanczos kernel size
+ *
+ * Return: the coefficient value in fixed point format.
+ */
+static s16 lanczos(s16 x, s16 a)
+{
+ s16 pi;
+ s16 numerator;
+ s16 denominator;
+ s16 temp;
+
+ if (x < -a || x > a)
+ return 0;
+ else if (x == 0)
+ return fixp_new(1);
+
+ /* a * sin(pi * x) * sin(pi * x / a) / (pi * pi * x * x) */
+
+ pi = (fixp_new(157) << FRAC_N) / fixp_new(50);
+
+ if (x < 0)
+ x = -x;
+
+ /* sin(pi * x) */
+ temp = fixp_mult(fixp_new(180), x);
+ temp = fixp_sin16(temp >> FRAC_N);
+
+ /* a * sin(pi * x) */
+ numerator = fixp_mult(temp, a);
+
+ /* sin(pi * x / a) */
+ temp = (fixp_mult(fixp_new(180), x) << FRAC_N) / a;
+ temp = fixp_sin16(temp >> FRAC_N);
+
+ /* a * sin(pi * x) * sin(pi * x / a) */
+ numerator = fixp_mult(temp, numerator);
+
+ /* pi * pi * x * x */
+ denominator = fixp_mult(pi, pi);
+ temp = fixp_mult(x, x);
+ denominator = fixp_mult(temp, denominator);
+
+ return (numerator << FRAC_N) / denominator;
+}
+
+/**
+ * xscaler_set_coefs - generate and program the coefficient table
+ * @xscaler: scaler device
+ * @taps: maximum coefficient tap index
+ *
+ * Generate the coefficient table using Lanczos resampling, and program
+ * generated coefficients to the scaler. The generated coefficients are
+ * supposed to work regardless of resolutions.
+ *
+ * Return: 0 if the coefficient table is programmed, and -ENOMEM if memory
+ * allocation for the table fails.
+ */
+static int xscaler_set_coefs(struct xscaler_device *xscaler, s16 taps)
+{
+ s16 *coef;
+ s16 dy;
+ u32 coef_val;
+ u16 phases = xscaler->max_num_phases;
+ u16 i;
+ u16 j;
+
+ coef = kcalloc(phases, sizeof(*coef), GFP_KERNEL);
+ if (!coef)
+ return -ENOMEM;
+
+ for (i = 0; i < phases; i++) {
+ s16 sum = 0;
+
+ dy = ((fixp_new(i) << FRAC_N) / fixp_new(phases));
+
+ /* Generate Lanczos coefficients */
+ for (j = 0; j < taps; j++) {
+ coef[j] = lanczos(fixp_new(j - (taps >> 1)) + dy,
+ fixp_new(taps >> 1));
+ sum += coef[j];
+ }
+
+ /* Program coefficients */
+ for (j = 0; j < taps; j += 2) {
+ /* Normalize and multiply coefficients */
+ coef_val = (((coef[j] << FRAC_N) << (FRAC_N - 2)) /
+ sum) & 0xffff;
+ if (j + 1 < taps)
+ coef_val |= ((((coef[j + 1] << FRAC_N) <<
+ (FRAC_N - 2)) / sum) & 0xffff) <<
+ 16;
+
+ xvip_write(&xscaler->xvip, XSCALER_COEF_DATA_IN,
+ coef_val);
+ }
+ }
+
+ kfree(coef);
+
+ return 0;
+}
+
+static void xscaler_set_aperture(struct xscaler_device *xscaler)
+{
+ u16 start;
+ u16 end;
+ u32 scale_factor;
+
+ xvip_disable_reg_update(&xscaler->xvip);
+
+ /* set horizontal aperture */
+ start = xscaler->crop.left;
+ end = start + xscaler->crop.width - 1;
+ xvip_write(&xscaler->xvip, XSCALER_HAPERTURE,
+ (end << XSCALER_APERTURE_END_SHIFT) |
+ (start << XSCALER_APERTURE_START_SHIFT));
+
+ /* set vertical aperture */
+ start = xscaler->crop.top;
+ end = start + xscaler->crop.height - 1;
+ xvip_write(&xscaler->xvip, XSCALER_VAPERTURE,
+ (end << XSCALER_APERTURE_END_SHIFT) |
+ (start << XSCALER_APERTURE_START_SHIFT));
+
+ /* set scaling factors */
+ scale_factor = ((xscaler->crop.width << XSCALER_SF_SHIFT) /
+ xscaler->formats[XVIP_PAD_SOURCE].width) &
+ XSCALER_SF_MASK;
+ xvip_write(&xscaler->xvip, XSCALER_HSF, scale_factor);
+
+ scale_factor = ((xscaler->crop.height << XSCALER_SF_SHIFT) /
+ xscaler->formats[XVIP_PAD_SOURCE].height) &
+ XSCALER_SF_MASK;
+ xvip_write(&xscaler->xvip, XSCALER_VSF, scale_factor);
+
+ xvip_enable_reg_update(&xscaler->xvip);
+}
+
+static int xscaler_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ u32 width;
+ u32 height;
+
+ if (!enable) {
+ xvip_stop(&xscaler->xvip);
+ return 0;
+ }
+
+ /* set input width / height */
+ width = xscaler->formats[XVIP_PAD_SINK].width;
+ height = xscaler->formats[XVIP_PAD_SINK].height;
+ xvip_write(&xscaler->xvip, XSCALER_SOURCE_SIZE,
+ (height << XSCALER_SIZE_VERT_SHIFT) |
+ (width << XSCALER_SIZE_HORZ_SHIFT));
+
+ /* set output width / height */
+ width = xscaler->formats[XVIP_PAD_SOURCE].width;
+ height = xscaler->formats[XVIP_PAD_SOURCE].height;
+ xvip_write(&xscaler->xvip, XSCALER_OUTPUT_SIZE,
+ (height << XSCALER_SIZE_VERT_SHIFT) |
+ (width << XSCALER_SIZE_HORZ_SHIFT));
+
+ /* set aperture */
+ xscaler_set_aperture(xscaler);
+
+ xvip_start(&xscaler->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscaler_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ fse->min_width = XSCALER_MIN_WIDTH;
+ fse->max_width = XSCALER_MAX_WIDTH;
+ fse->min_height = XSCALER_MIN_HEIGHT;
+ fse->max_height = XSCALER_MAX_HEIGHT;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscaler_get_pad_format(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xscaler->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static struct v4l2_rect *__xscaler_get_crop(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&xscaler->xvip.subdev, cfg,
+ XVIP_PAD_SINK);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->crop;
+ default:
+ return NULL;
+ }
+}
+
+static int xscaler_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ fmt->format = *__xscaler_get_pad_format(xscaler, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static void xscaler_try_crop(const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+
+ crop->left = min_t(u32, crop->left, sink->width - XSCALER_MIN_WIDTH);
+ crop->top = min_t(u32, crop->top, sink->height - XSCALER_MIN_HEIGHT);
+ crop->width = clamp_t(u32, crop->width, XSCALER_MIN_WIDTH,
+ sink->width - crop->left);
+ crop->height = clamp_t(u32, crop->height, XSCALER_MIN_HEIGHT,
+ sink->height - crop->top);
+}
+
+static int xscaler_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, fmt->pad, fmt->which);
+
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCALER_MIN_WIDTH, XSCALER_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCALER_MIN_HEIGHT, XSCALER_MAX_HEIGHT);
+
+ fmt->format = *format;
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ /* Set the crop rectangle to the full frame */
+ crop = __xscaler_get_crop(xscaler, cfg, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+ }
+
+ return 0;
+}
+
+static int xscaler_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != XVIP_PAD_SINK)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ format = __xscaler_get_pad_format(xscaler, cfg, XVIP_PAD_SINK,
+ sel->which);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__xscaler_get_crop(xscaler, cfg, sel->which);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int xscaler_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP) || (sel->pad != XVIP_PAD_SINK))
+ return -EINVAL;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, XVIP_PAD_SINK,
+ sel->which);
+ xscaler_try_crop(format, &sel->r);
+ *__xscaler_get_crop(xscaler, cfg, sel->which) = sel->r;
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xscaler_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xscaler->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xscaler->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xscaler_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xscaler_video_ops = {
+ .s_stream = xscaler_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscaler_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xscaler_enum_frame_size,
+ .get_fmt = xscaler_get_format,
+ .set_fmt = xscaler_set_format,
+ .get_selection = xscaler_get_selection,
+ .set_selection = xscaler_set_selection,
+};
+
+static struct v4l2_subdev_ops xscaler_ops = {
+ .video = &xscaler_video_ops,
+ .pad = &xscaler_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscaler_internal_ops = {
+ .open = xscaler_open,
+ .close = xscaler_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscaler_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xscaler_pm_suspend(struct device *dev)
+{
+ struct xscaler_device *xscaler = dev_get_drvdata(dev);
+
+ xvip_suspend(&xscaler->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xscaler_pm_resume(struct device *dev)
+{
+ struct xscaler_device *xscaler = dev_get_drvdata(dev);
+
+ xvip_resume(&xscaler->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xscaler_parse_of(struct xscaler_device *xscaler)
+{
+ struct device *dev = xscaler->xvip.dev;
+ struct device_node *node = xscaler->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ if (!xscaler->vip_format) {
+ xscaler->vip_format = vip_format;
+ } else if (xscaler->vip_format != vip_format) {
+ dev_err(dev, "in/out format mismatch in DT");
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-hori-taps",
+ &xscaler->num_hori_taps);
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_u32(node, "xlnx,num-vert-taps",
+ &xscaler->num_vert_taps);
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_u32(node, "xlnx,max-num-phases",
+ &xscaler->max_num_phases);
+ if (ret < 0)
+ return ret;
+
+ xscaler->separate_yc_coef =
+ of_property_read_bool(node, "xlnx,separate-yc-coef");
+
+ xscaler->separate_hv_coef =
+ of_property_read_bool(node, "xlnx,separate-hv-coef");
+
+ return 0;
+}
+
+static int xscaler_probe(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ u32 size;
+ int ret;
+
+ xscaler = devm_kzalloc(&pdev->dev, sizeof(*xscaler), GFP_KERNEL);
+ if (!xscaler)
+ return -ENOMEM;
+
+ xscaler->xvip.dev = &pdev->dev;
+
+ ret = xscaler_parse_of(xscaler);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xscaler->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xscaler->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xscaler->xvip.subdev;
+ v4l2_subdev_init(subdev, &xscaler_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xscaler_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xscaler);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_format->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ size = xvip_read(&xscaler->xvip, XSCALER_SOURCE_SIZE);
+ default_format->width = (size >> XSCALER_SIZE_HORZ_SHIFT) &
+ XSCALER_SIZE_MASK;
+ default_format->height = (size >> XSCALER_SIZE_VERT_SHIFT) &
+ XSCALER_SIZE_MASK;
+
+ xscaler->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xscaler->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xscaler->default_formats[XVIP_PAD_SINK];
+ size = xvip_read(&xscaler->xvip, XSCALER_OUTPUT_SIZE);
+ default_format->width = (size >> XSCALER_SIZE_HORZ_SHIFT) &
+ XSCALER_SIZE_MASK;
+ default_format->height = (size >> XSCALER_SIZE_VERT_SHIFT) &
+ XSCALER_SIZE_MASK;
+
+ xscaler->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xscaler->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xscaler->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xscaler_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xscaler->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xscaler);
+
+ xvip_print_version(&xscaler->xvip);
+
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_hori_taps);
+ if (ret < 0)
+ goto error;
+
+ if (xscaler->separate_hv_coef) {
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_vert_taps);
+ if (ret < 0)
+ goto error;
+ }
+
+ if (xscaler->separate_yc_coef) {
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_hori_taps);
+ if (ret < 0)
+ goto error;
+
+ if (xscaler->separate_hv_coef) {
+ ret = xscaler_set_coefs(xscaler,
+ (s16)xscaler->num_vert_taps);
+ if (ret < 0)
+ goto error;
+ }
+ }
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xscaler->xvip);
+ return ret;
+}
+
+static int xscaler_remove(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xscaler->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xscaler->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xscaler_pm_ops, xscaler_pm_suspend, xscaler_pm_resume);
+
+static const struct of_device_id xscaler_of_id_table[] = {
+ { .compatible = "xlnx,v-scaler-8.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xscaler_of_id_table);
+
+static struct platform_driver xscaler_driver = {
+ .driver = {
+ .name = "xilinx-scaler",
+ .of_match_table = xscaler_of_id_table,
+ },
+ .probe = xscaler_probe,
+ .remove = xscaler_remove,
+};
+
+module_platform_driver(xscaler_driver);
+
+MODULE_DESCRIPTION("Xilinx Scaler Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange-channel.c b/drivers/media/platform/xilinx/xilinx-scenechange-channel.c
new file mode 100644
index 000000000000..4f3a8d03d217
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange-channel.c
@@ -0,0 +1,452 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/xilinx-v4l2-events.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-scenechange.h"
+#include "xilinx-vip.h"
+
+#define XSCD_MAX_WIDTH 3840
+#define XSCD_MAX_HEIGHT 2160
+#define XSCD_MIN_WIDTH 640
+#define XSCD_MIN_HEIGHT 480
+
+#define XSCD_V_SUBSAMPLING 16
+#define XSCD_BYTE_ALIGN 16
+#define MULTIPLICATION_FACTOR 100
+
+#define XSCD_SCENE_CHANGE 1
+#define XSCD_NO_SCENE_CHANGE 0
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscd_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return 0;
+}
+
+static int xscd_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscd_get_pad_format(struct xscd_chan *chan,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&chan->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &chan->format;
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static int xscd_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+
+ fmt->format = *__xscd_get_pad_format(chan, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xscd_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xscd_get_pad_format(chan, cfg, fmt->pad, fmt->which);
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCD_MIN_WIDTH, XSCD_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCD_MIN_HEIGHT, XSCD_MAX_HEIGHT);
+ format->code = fmt->format.code;
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xscd_chan_get_vid_fmt(u32 media_bus_fmt, bool memory_based)
+{
+ u32 vid_fmt;
+
+ if (memory_based) {
+ switch (media_bus_fmt) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ vid_fmt = XSCD_VID_FMT_Y8;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ vid_fmt = XSCD_VID_FMT_Y10;
+ break;
+ default:
+ vid_fmt = XSCD_VID_FMT_Y8;
+ }
+
+ return vid_fmt;
+ }
+
+ /* Streaming based */
+ switch (media_bus_fmt) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ vid_fmt = XSCD_VID_FMT_YUV_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ vid_fmt = XSCD_VID_FMT_YUV_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ vid_fmt = XSCD_VID_FMT_YUV_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ vid_fmt = XSCD_VID_FMT_RGB;
+ break;
+ default:
+ vid_fmt = XSCD_VID_FMT_YUV_420;
+ }
+
+ return vid_fmt;
+}
+
+/**
+ * xscd_chan_configure_params - Program parameters to HW registers
+ * @chan: Driver specific channel struct pointer
+ */
+static void xscd_chan_configure_params(struct xscd_chan *chan)
+{
+ u32 vid_fmt, stride;
+
+ xscd_write(chan->iomem, XSCD_WIDTH_OFFSET, chan->format.width);
+
+ /* Stride is required only for memory based IP, not for streaming IP */
+ if (chan->xscd->memory_based) {
+ stride = roundup(chan->format.width, XSCD_BYTE_ALIGN);
+ xscd_write(chan->iomem, XSCD_STRIDE_OFFSET, stride);
+ }
+
+ xscd_write(chan->iomem, XSCD_HEIGHT_OFFSET, chan->format.height);
+
+ /* Hardware video format */
+ vid_fmt = xscd_chan_get_vid_fmt(chan->format.code,
+ chan->xscd->memory_based);
+ xscd_write(chan->iomem, XSCD_VID_FMT_OFFSET, vid_fmt);
+
+ /*
+ * This is the vertical subsampling factor of the input image. Instead
+ * of sampling every line to calculate the histogram, IP uses this
+ * register value to sample only specific lines of the frame.
+ */
+ xscd_write(chan->iomem, XSCD_SUBSAMPLE_OFFSET, XSCD_V_SUBSAMPLING);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int xscd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ struct xscd_chan *chan = container_of(ctrl->handler, struct xscd_chan,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_SCD_THRESHOLD:
+ chan->threshold = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int xscd_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+ struct xscd_device *xscd = chan->xscd;
+
+ if (enable)
+ xscd_chan_configure_params(chan);
+
+ xscd_dma_enable_channel(&chan->dmachan, enable);
+
+ /*
+ * Resolution change doesn't work in stream based mode unless
+ * the device is reset.
+ */
+ if (!enable && !xscd->memory_based) {
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_ASSERT);
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_DEASSERT);
+ }
+
+ return 0;
+}
+
+static int xscd_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xscd_chan *chan = to_xscd_chan(sd);
+
+ mutex_lock(&chan->lock);
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXSCD:
+ ret = v4l2_event_subscribe(fh, sub, 1, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&chan->lock);
+
+ return ret;
+}
+
+static int xscd_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xscd_chan *chan = to_xscd_chan(sd);
+
+ mutex_lock(&chan->lock);
+ ret = v4l2_event_unsubscribe(fh, sub);
+ mutex_unlock(&chan->lock);
+
+ return ret;
+}
+
+static int xscd_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xscd_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xscd_ctrl_ops = {
+ .s_ctrl = xscd_s_ctrl
+};
+
+static const struct v4l2_ctrl_config xscd_ctrls[] = {
+ {
+ .ops = &xscd_ctrl_ops,
+ .id = V4L2_CID_XILINX_SCD_THRESHOLD,
+ .name = "Threshold Value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = 50,
+ }
+};
+
+static const struct v4l2_subdev_core_ops xscd_core_ops = {
+ .subscribe_event = xscd_subscribe_event,
+ .unsubscribe_event = xscd_unsubscribe_event
+};
+
+static struct v4l2_subdev_video_ops xscd_video_ops = {
+ .s_stream = xscd_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscd_pad_ops = {
+ .enum_mbus_code = xscd_enum_mbus_code,
+ .enum_frame_size = xscd_enum_frame_size,
+ .get_fmt = xscd_get_format,
+ .set_fmt = xscd_set_format,
+};
+
+static struct v4l2_subdev_ops xscd_ops = {
+ .core = &xscd_core_ops,
+ .video = &xscd_video_ops,
+ .pad = &xscd_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscd_internal_ops = {
+ .open = xscd_open,
+ .close = xscd_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscd_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void xscd_chan_event_notify(struct xscd_chan *chan)
+{
+ u32 *eventdata;
+ u32 sad;
+
+ sad = xscd_read(chan->iomem, XSCD_SAD_OFFSET);
+ sad = (sad * XSCD_V_SUBSAMPLING * MULTIPLICATION_FACTOR) /
+ (chan->format.width * chan->format.height);
+ eventdata = (u32 *)&chan->event.u.data;
+
+ if (sad > chan->threshold)
+ eventdata[0] = XSCD_SCENE_CHANGE;
+ else
+ eventdata[0] = XSCD_NO_SCENE_CHANGE;
+
+ chan->event.type = V4L2_EVENT_XLNXSCD;
+ v4l2_subdev_notify_event(&chan->subdev, &chan->event);
+}
+
+/**
+ * xscd_chan_init - Initialize the V4L2 subdev for a channel
+ * @xscd: Pointer to the SCD device structure
+ * @chan_id: Channel id
+ * @node: device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xscd_chan_init(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node)
+{
+ struct xscd_chan *chan = &xscd->chans[chan_id];
+ struct v4l2_subdev *subdev;
+ unsigned int num_pads;
+ int ret;
+ unsigned int i;
+
+ mutex_init(&chan->lock);
+ chan->xscd = xscd;
+ chan->id = chan_id;
+ chan->iomem = chan->xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &chan->subdev;
+ v4l2_subdev_init(subdev, &xscd_ops);
+ subdev->dev = chan->xscd->dev;
+ subdev->fwnode = of_fwnode_handle(node);
+ subdev->internal_ops = &xscd_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "xlnx-scdchan.%u",
+ chan_id);
+ v4l2_set_subdevdata(subdev, chan);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* Initialize default format */
+ chan->format.code = MEDIA_BUS_FMT_VYYUYY8_1X24;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.width = XSCD_MAX_WIDTH;
+ chan->format.height = XSCD_MAX_HEIGHT;
+
+ /* Initialize media pads */
+ num_pads = xscd->memory_based ? 1 : 2;
+
+ chan->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ if (!xscd->memory_based)
+ chan->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&subdev->entity, num_pads, chan->pads);
+ if (ret < 0)
+ goto media_init_error;
+
+ subdev->entity.ops = &xscd_media_ops;
+
+ /* Initialize V4L2 Control Handler */
+ v4l2_ctrl_handler_init(&chan->ctrl_handler, ARRAY_SIZE(xscd_ctrls));
+
+ for (i = 0; i < ARRAY_SIZE(xscd_ctrls); i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(chan->xscd->dev, "%d ctrl = 0x%x\n", i,
+ xscd_ctrls[i].id);
+ ctrl = v4l2_ctrl_new_custom(&chan->ctrl_handler, &xscd_ctrls[i],
+ NULL);
+ if (!ctrl) {
+ dev_err(chan->xscd->dev, "Failed for %s ctrl\n",
+ xscd_ctrls[i].name);
+ goto ctrl_handler_error;
+ }
+ }
+
+ if (chan->ctrl_handler.error) {
+ dev_err(chan->xscd->dev, "failed to add controls\n");
+ ret = chan->ctrl_handler.error;
+ goto ctrl_handler_error;
+ }
+
+ subdev->ctrl_handler = &chan->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_setup(&chan->ctrl_handler);
+ if (ret < 0) {
+ dev_err(chan->xscd->dev, "failed to set controls\n");
+ goto ctrl_handler_error;
+ }
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(chan->xscd->dev, "failed to register subdev\n");
+ goto ctrl_handler_error;
+ }
+
+ dev_info(chan->xscd->dev, "Scene change detection channel found!\n");
+ return 0;
+
+ctrl_handler_error:
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+media_init_error:
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&chan->lock);
+ return ret;
+}
+
+/**
+ * xscd_chan_cleanup - Clean up the V4L2 subdev for a channel
+ * @xscd: Pointer to the SCD device structure
+ * @chan_id: Channel id
+ * @node: device node
+ */
+void xscd_chan_cleanup(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node)
+{
+ struct xscd_chan *chan = &xscd->chans[chan_id];
+ struct v4l2_subdev *subdev = &chan->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&chan->lock);
+}
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange-dma.c b/drivers/media/platform/xilinx/xilinx-scenechange-dma.c
new file mode 100644
index 000000000000..58437a769605
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange-dma.c
@@ -0,0 +1,554 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection DMA driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/of_dma.h>
+#include <linux/slab.h>
+
+#include "../../../dma/dmaengine.h"
+
+#include "xilinx-scenechange.h"
+
+/**
+ * xscd_dma_start - Start the SCD core
+ * @xscd: The SCD device
+ * @channels: Bitmask of enabled channels
+ */
+static void xscd_dma_start(struct xscd_device *xscd, unsigned int channels)
+{
+ xscd_write(xscd->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
+ xscd_write(xscd->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
+ xscd_write(xscd->iomem, XSCD_CHAN_EN_OFFSET, channels);
+
+ xscd_set(xscd->iomem, XSCD_CTRL_OFFSET,
+ xscd->memory_based ? XSCD_CTRL_AP_START
+ : XSCD_CTRL_AP_START |
+ XSCD_CTRL_AUTO_RESTART);
+
+ xscd->running = true;
+}
+
+/**
+ * xscd_dma_stop - Stop the SCD core
+ * @xscd: The SCD device
+ */
+static void xscd_dma_stop(struct xscd_device *xscd)
+{
+ xscd_clr(xscd->iomem, XSCD_CTRL_OFFSET,
+ xscd->memory_based ? XSCD_CTRL_AP_START
+ : XSCD_CTRL_AP_START |
+ XSCD_CTRL_AUTO_RESTART);
+
+ xscd->running = false;
+}
+
+/**
+ * xscd_dma_setup_channel - Setup a channel for transfer
+ * @chan: Driver specific channel struct pointer
+ *
+ * Return: 1 if the channel starts to run for a new transfer. Otherwise, 0.
+ */
+static int xscd_dma_setup_channel(struct xscd_dma_chan *chan)
+{
+ struct xscd_dma_tx_descriptor *desc;
+
+ if (!chan->enabled)
+ return 0;
+
+ if (list_empty(&chan->pending_list))
+ return 0;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xscd_dma_tx_descriptor, node);
+ list_del(&desc->node);
+
+ xscd_write(chan->iomem, XSCD_ADDR_OFFSET, desc->sw.luma_plane_addr);
+ chan->active_desc = desc;
+
+ return 1;
+}
+
+/**
+ * xscd_dma_kick - Start a run of the SCD core if channels are ready
+ * @xscd: The SCD device
+ *
+ * This function starts a single run of the SCD core when all the following
+ * conditions are met:
+ *
+ * - The SCD is not currently running
+ * - At least one channel is enabled and has buffers available
+ *
+ * It can be used to start the SCD when a buffer is queued, when a channel
+ * starts streaming, or to start the next run. Calling this function is only
+ * valid for memory-based mode and is not permitted for stream-based mode.
+ *
+ * The running state for all channels is updated. Channels that are being
+ * stopped are signalled through the channel wait queue.
+ *
+ * The function must be called with the xscd_device lock held.
+ */
+static void xscd_dma_kick(struct xscd_device *xscd)
+{
+ unsigned int channels = 0;
+ unsigned int i;
+
+ lockdep_assert_held(&xscd->lock);
+
+ if (xscd->running)
+ return;
+
+ for (i = 0; i < xscd->num_streams; i++) {
+ struct xscd_dma_chan *chan = xscd->channels[i];
+ unsigned long flags;
+ unsigned int running;
+ bool stopped;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ running = xscd_dma_setup_channel(chan);
+ stopped = chan->running && !running;
+ chan->running = running;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ channels |= running << chan->id;
+ if (stopped)
+ wake_up(&chan->wait);
+ }
+
+ if (channels)
+ xscd_dma_start(xscd, channels);
+ else
+ xscd_dma_stop(xscd);
+}
+
+/**
+ * xscd_dma_enable_channel - Enable/disable a channel
+ * @chan: Driver specific channel struct pointer
+ * @enable: True to enable the channel, false to disable it
+ *
+ * This function enables or disable a channel. When operating in memory-based
+ * mode, enabling a channel kicks processing if buffers are available for any
+ * enabled channel and the SCD core is idle. When operating in stream-based
+ * mode, the SCD core is started or stopped synchronously when then channel is
+ * enabled or disabled.
+ *
+ * This function must be called in non-atomic, non-interrupt context.
+ */
+void xscd_dma_enable_channel(struct xscd_dma_chan *chan, bool enable)
+{
+ struct xscd_device *xscd = chan->xscd;
+
+ if (enable) {
+ /*
+ * FIXME: Don't set chan->enabled to false here, it will be
+ * done in xscd_dma_terminate_all(). This works around a bug
+ * introduced in commit 2e77607047c6 ("xilinx: v4l2: dma: Add
+ * multiple output support") that stops all channels when the
+ * first one is stopped, even though they are part of
+ * independent pipelines. This workaround should be safe as
+ * long as dmaengine_terminate_all() is called after
+ * xvip_pipeline_set_stream().
+ */
+ spin_lock_irq(&chan->lock);
+ chan->enabled = true;
+ spin_unlock_irq(&chan->lock);
+ }
+
+ if (xscd->memory_based) {
+ if (enable) {
+ spin_lock_irq(&xscd->lock);
+ xscd_dma_kick(xscd);
+ spin_unlock_irq(&xscd->lock);
+ }
+ } else {
+ if (enable)
+ xscd_dma_start(xscd, BIT(chan->id));
+ else
+ xscd_dma_stop(xscd);
+ }
+}
+
+/**
+ * xscd_dma_irq_handler - scdma Interrupt handler
+ * @xscd: Pointer to the SCD device structure
+ */
+void xscd_dma_irq_handler(struct xscd_device *xscd)
+{
+ unsigned int i;
+
+ /*
+ * Mark the active descriptors as complete, move them to the done list
+ * and schedule the tasklet to clean them up.
+ */
+ for (i = 0; i < xscd->num_streams; ++i) {
+ struct xscd_dma_chan *chan = xscd->channels[i];
+ struct xscd_dma_tx_descriptor *desc = chan->active_desc;
+
+ if (!desc)
+ continue;
+
+ dma_cookie_complete(&desc->async_tx);
+ xscd_chan_event_notify(&xscd->chans[i]);
+
+ spin_lock(&chan->lock);
+ list_add_tail(&desc->node, &chan->done_list);
+ chan->active_desc = NULL;
+ spin_unlock(&chan->lock);
+
+ tasklet_schedule(&chan->tasklet);
+ }
+
+ /* Start the next run, if any. */
+ spin_lock(&xscd->lock);
+ xscd->running = false;
+ xscd_dma_kick(xscd);
+ spin_unlock(&xscd->lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA Engine
+ */
+
+/**
+ * xscd_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &chan->pending_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xscd_dma_free_desc_list - Free descriptors list
+ * @chan: Driver specific dma channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xscd_dma_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+}
+
+/**
+ * xscd_dma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xscd_dma_free_desc_list(chan, &chan->pending_list);
+ xscd_dma_free_desc_list(chan, &chan->done_list);
+ kfree(chan->active_desc);
+
+ chan->active_desc = NULL;
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * scd_dma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
+{
+ struct xscd_dma_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ kfree(desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xscd_dma_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xscd_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ struct xscd_dma_tx_descriptor *desc;
+ struct xscd_dma_desc *sw;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xscd_dma_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ sw = &desc->sw;
+ sw->vsize = xt->numf;
+ sw->hsize = xt->sgl[0].size;
+ sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
+ sw->luma_plane_addr = xt->src_start;
+
+ return &desc->async_tx;
+}
+
+static bool xscd_dma_is_running(struct xscd_dma_chan *chan)
+{
+ bool running;
+
+ spin_lock_irq(&chan->lock);
+ running = chan->running;
+ spin_unlock_irq(&chan->lock);
+
+ return running;
+}
+
+/**
+ * xscd_dma_terminate_all - Halt the channel and free descriptors
+ * @dchan: Driver specific dma channel pointer
+ *
+ * Return: 0
+ */
+static int xscd_dma_terminate_all(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ int ret;
+
+ spin_lock_irq(&chan->lock);
+ chan->enabled = false;
+ spin_unlock_irq(&chan->lock);
+
+ /* Wait for any on-going transfer to complete. */
+ ret = wait_event_timeout(chan->wait, !xscd_dma_is_running(chan),
+ msecs_to_jiffies(100));
+ WARN_ON(ret == 0);
+
+ xscd_dma_free_descriptors(chan);
+ return 0;
+}
+
+/**
+ * xscd_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xscd_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ struct xscd_device *xscd = chan->xscd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xscd->lock, flags);
+ xscd_dma_kick(xscd);
+ spin_unlock_irqrestore(&xscd->lock, flags);
+}
+
+static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xscd_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+
+ xscd_dma_free_descriptors(chan);
+}
+
+/**
+ * xscd_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx scdma channel structure
+ */
+static void xscd_dma_do_tasklet(unsigned long data)
+{
+ struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
+
+ xscd_dma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xscd_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ dma_cookie_init(dchan);
+ return 0;
+}
+
+/**
+ * of_scdma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xscd_device *xscd = ofdma->of_dma_data;
+ u32 chan_id = dma_spec->args[0];
+
+ if (chan_id >= xscd->num_streams)
+ return NULL;
+
+ if (!xscd->channels[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xscd->channels[chan_id]->common);
+}
+
+static void xscd_dma_chan_init(struct xscd_device *xscd, int chan_id)
+{
+ struct xscd_dma_chan *chan = &xscd->chans[chan_id].dmachan;
+
+ chan->id = chan_id;
+ chan->iomem = xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
+ chan->xscd = xscd;
+
+ xscd->channels[chan->id] = chan;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+ tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
+ (unsigned long)chan);
+ init_waitqueue_head(&chan->wait);
+
+ chan->common.device = &xscd->dma_device;
+ list_add_tail(&chan->common.device_node, &xscd->dma_device.channels);
+}
+
+/**
+ * xscd_dma_chan_remove - Per Channel remove function
+ * @chan: Driver specific DMA channel
+ */
+static void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
+{
+ list_del(&chan->common.device_node);
+}
+
+/**
+ * xscd_dma_init - Initialize the SCD DMA engine
+ * @xscd: Pointer to the SCD device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xscd_dma_init(struct xscd_device *xscd)
+{
+ struct dma_device *ddev = &xscd->dma_device;
+ unsigned int chan_id;
+ int ret;
+
+ /* Initialize the DMA engine */
+ ddev->dev = xscd->dev;
+ dma_set_mask(xscd->dev, DMA_BIT_MASK(32));
+
+ INIT_LIST_HEAD(&ddev->channels);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
+ ddev->device_tx_status = xscd_dma_tx_status;
+ ddev->device_issue_pending = xscd_dma_issue_pending;
+ ddev->device_terminate_all = xscd_dma_terminate_all;
+ ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
+
+ for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
+ xscd_dma_chan_init(xscd, chan_id);
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(xscd->dev, "failed to register the dma device\n");
+ goto error;
+ }
+
+ ret = of_dma_controller_register(xscd->dev->of_node,
+ of_scdma_xilinx_xlate, xscd);
+ if (ret) {
+ dev_err(xscd->dev, "failed to register DMA to DT DMA helper\n");
+ goto error_of_dma;
+ }
+
+ dev_info(xscd->dev, "Xilinx Scene Change DMA is initialized!\n");
+ return 0;
+
+error_of_dma:
+ dma_async_device_unregister(ddev);
+
+error:
+ for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
+ xscd_dma_chan_remove(xscd->channels[chan_id]);
+
+ return ret;
+}
+
+/**
+ * xscd_dma_cleanup - Clean up the SCD DMA engine
+ * @xscd: Pointer to the SCD device structure
+ *
+ * This function is the counterpart of xscd_dma_init() and cleans up the
+ * resources related to the DMA engine.
+ */
+void xscd_dma_cleanup(struct xscd_device *xscd)
+{
+ dma_async_device_unregister(&xscd->dma_device);
+ of_dma_controller_free(xscd->dev->of_node);
+}
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange.c b/drivers/media/platform/xilinx/xilinx-scenechange.c
new file mode 100644
index 000000000000..38db9c7d37b2
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange.c
@@ -0,0 +1,195 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "xilinx-scenechange.h"
+
+static irqreturn_t xscd_irq_handler(int irq, void *data)
+{
+ struct xscd_device *xscd = (struct xscd_device *)data;
+ u32 status;
+
+ status = xscd_read(xscd->iomem, XSCD_ISR_OFFSET);
+ if (!(status & XSCD_IE_AP_DONE))
+ return IRQ_NONE;
+
+ xscd_write(xscd->iomem, XSCD_ISR_OFFSET, XSCD_IE_AP_DONE);
+
+ if (xscd->memory_based)
+ xscd_dma_irq_handler(xscd);
+ else
+ xscd_chan_event_notify(&xscd->chans[0]);
+
+ return IRQ_HANDLED;
+}
+
+static int xscd_init_resources(struct xscd_device *xscd)
+{
+ struct platform_device *pdev = to_platform_device(xscd->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xscd->iomem = devm_ioremap_resource(xscd->dev, res);
+ if (IS_ERR(xscd->iomem))
+ return PTR_ERR(xscd->iomem);
+
+ xscd->irq = platform_get_irq(pdev, 0);
+ if (xscd->irq < 0) {
+ dev_err(xscd->dev, "No valid irq found\n");
+ return -EINVAL;
+ }
+
+ xscd->clk = devm_clk_get(xscd->dev, NULL);
+ if (IS_ERR(xscd->clk))
+ return PTR_ERR(xscd->clk);
+
+ clk_prepare_enable(xscd->clk);
+ return 0;
+}
+
+static int xscd_parse_of(struct xscd_device *xscd)
+{
+ struct device *dev = xscd->dev;
+ struct device_node *node = xscd->dev->of_node;
+ int ret;
+
+ xscd->memory_based = of_property_read_bool(node, "xlnx,memorybased");
+ xscd->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xscd->rst_gpio)) {
+ if (PTR_ERR(xscd->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT\n");
+
+ return PTR_ERR(xscd->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,numstreams",
+ &xscd->num_streams);
+ if (ret < 0)
+ return ret;
+
+ if (!xscd->memory_based && xscd->num_streams != 1) {
+ dev_err(dev, "Stream-based mode only supports one stream\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xscd_probe(struct platform_device *pdev)
+{
+ struct xscd_device *xscd;
+ struct device_node *subdev_node;
+ unsigned int id;
+ int ret;
+
+ xscd = devm_kzalloc(&pdev->dev, sizeof(*xscd), GFP_KERNEL);
+ if (!xscd)
+ return -ENOMEM;
+
+ spin_lock_init(&xscd->lock);
+
+ xscd->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xscd);
+
+ ret = xscd_parse_of(xscd);
+ if (ret < 0)
+ return ret;
+
+ ret = xscd_init_resources(xscd);
+ if (ret < 0)
+ return ret;
+
+ /* Reset Scene Change Detection IP */
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_ASSERT);
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_DEASSERT);
+
+ /* Initialize the channels. */
+ xscd->chans = devm_kcalloc(xscd->dev, xscd->num_streams,
+ sizeof(*xscd->chans), GFP_KERNEL);
+ if (!xscd->chans)
+ return -ENOMEM;
+
+ id = 0;
+ for_each_child_of_node(xscd->dev->of_node, subdev_node) {
+ if (id >= xscd->num_streams) {
+ dev_warn(&pdev->dev,
+ "Too many channels, limiting to %u\n",
+ xscd->num_streams);
+ of_node_put(subdev_node);
+ break;
+ }
+
+ ret = xscd_chan_init(xscd, id, subdev_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize channel %u\n",
+ id);
+ return ret;
+ }
+
+ id++;
+ }
+
+ /* Initialize the DMA engine. */
+ ret = xscd_dma_init(xscd);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to initialize the DMA\n");
+
+ ret = devm_request_irq(xscd->dev, xscd->irq, xscd_irq_handler,
+ IRQF_SHARED, dev_name(xscd->dev), xscd);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+
+ dev_info(xscd->dev, "scene change detect device found!\n");
+ return 0;
+}
+
+static int xscd_remove(struct platform_device *pdev)
+{
+ struct xscd_device *xscd = platform_get_drvdata(pdev);
+ struct device_node *subdev_node;
+ unsigned int id = 0;
+
+ for_each_child_of_node(xscd->dev->of_node, subdev_node) {
+ xscd_chan_cleanup(xscd, id, subdev_node);
+ id++;
+ }
+
+ xscd_dma_cleanup(xscd);
+ clk_disable_unprepare(xscd->clk);
+
+ return 0;
+}
+
+static const struct of_device_id xscd_of_id_table[] = {
+ { .compatible = "xlnx,v-scd" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xscd_of_id_table);
+
+static struct platform_driver xscd_driver = {
+ .driver = {
+ .name = "xilinx-scd",
+ .of_match_table = xscd_of_id_table,
+ },
+ .probe = xscd_probe,
+ .remove = xscd_remove,
+};
+
+module_platform_driver(xscd_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Scene Change Detection");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange.h b/drivers/media/platform/xilinx/xilinx-scenechange.h
new file mode 100644
index 000000000000..5cc9ce54bfc4
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#ifndef _XILINX_SCENECHANGE_H_
+#define _XILINX_SCENECHANGE_H_
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+struct clk;
+struct device;
+struct device_node;
+struct gpio_desc;
+
+/* Register/Descriptor Offsets */
+#define XSCD_CTRL_OFFSET 0x000
+#define XSCD_CTRL_AP_START BIT(0)
+#define XSCD_CTRL_AP_DONE BIT(1)
+#define XSCD_CTRL_AP_IDLE BIT(2)
+#define XSCD_CTRL_AP_READY BIT(3)
+#define XSCD_CTRL_AUTO_RESTART BIT(7)
+
+#define XSCD_GIE_OFFSET 0x004
+#define XSCD_GIE_EN BIT(0)
+
+#define XSCD_IE_OFFSET 0x008
+#define XSCD_IE_AP_DONE BIT(0)
+#define XSCD_IE_AP_READY BIT(1)
+
+#define XSCD_ISR_OFFSET 0x00c
+#define XSCD_WIDTH_OFFSET 0x010
+#define XSCD_HEIGHT_OFFSET 0x018
+#define XSCD_STRIDE_OFFSET 0x020
+#define XSCD_VID_FMT_OFFSET 0x028
+#define XSCD_VID_FMT_RGB 0
+#define XSCD_VID_FMT_YUV_444 1
+#define XSCD_VID_FMT_YUV_422 2
+#define XSCD_VID_FMT_YUV_420 3
+#define XSCD_VID_FMT_Y8 24
+#define XSCD_VID_FMT_Y10 25
+
+#define XSCD_SUBSAMPLE_OFFSET 0x030
+#define XSCD_SAD_OFFSET 0x038
+#define XSCD_ADDR_OFFSET 0x040
+#define XSCD_CHAN_OFFSET 0x100
+#define XSCD_CHAN_EN_OFFSET 0x780
+
+#define XSCD_MAX_CHANNELS 8
+
+#define XSCD_RESET_DEASSERT (0)
+#define XSCD_RESET_ASSERT (1)
+
+/****************************** PROTOTYPES ************************************/
+
+struct xscd_device;
+
+/**
+ * struct xscd_dma_desc - DMA channel
+ * @luma_plane_addr: Luma plane buffer address
+ * @vsize: width of the luma frame
+ * @hsize: height of the luma frame
+ * @stride: stride of the luma frame
+ */
+struct xscd_dma_desc {
+ dma_addr_t luma_plane_addr;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+};
+
+/**
+ * struct xscd_dma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @sw: Software Descriptor
+ * @node: Node in the channel descriptor list
+ */
+struct xscd_dma_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct xscd_dma_desc sw;
+ struct list_head node;
+};
+
+static inline struct xscd_dma_tx_descriptor *
+to_xscd_dma_tx_descriptor(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct xscd_dma_tx_descriptor, async_tx);
+}
+
+/**
+ * struct xscd_dma_chan - DMA Channel structure
+ * @xscd: SCD device
+ * @iomem: I/O memory address of the channel registers
+ * @id: scene change channel ID
+ * @common: DMA common channel
+ * @tasklet: Cleanup work after irq
+ * @lock: Protects pending_list, done_list, active_desc, enabled and running
+ * @pending_list: Descriptors waiting
+ * @done_list: Complete descriptors
+ * @active_desc: Currently active buffer being read/written to
+ * @enabled: Channel is enabled
+ * @running: Channel is running
+ * @wait: Wait queue to wait for the channel to stop
+ */
+struct xscd_dma_chan {
+ struct xscd_device *xscd;
+ void __iomem *iomem;
+ unsigned int id;
+
+ struct dma_chan common;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct list_head done_list;
+ struct xscd_dma_tx_descriptor *active_desc;
+ unsigned int enabled;
+ unsigned int running;
+ wait_queue_head_t wait;
+};
+
+static inline struct xscd_dma_chan *to_xscd_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct xscd_dma_chan, common);
+}
+
+/**
+ * struct xscd_chan - Video Stream structure
+ * @id: scene change channel ID
+ * @threshold: scene change detection threshold
+ * @iomem: I/O memory address of the channel registers
+ * @xscd: SCD device
+ * @subdev: V4L2 subdevice
+ * @ctrl_handler: V4L2 control handler
+ * @pads: media pads
+ * @format: active V4L2 media bus format for the pad
+ * @event: scene change event
+ * @dmachan: dma channel part of the scenechange stream
+ * @lock: lock to protect active stream count variable
+ */
+struct xscd_chan {
+ int id;
+ int threshold;
+ void __iomem *iomem;
+ struct xscd_device *xscd;
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_event event;
+ struct xscd_dma_chan dmachan;
+
+ /* Lock to protect active stream count */
+ struct mutex lock;
+};
+
+static inline struct xscd_chan *to_xscd_chan(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscd_chan, subdev);
+}
+
+/**
+ * struct xscd_device - Xilinx Scene Change Detection device structure
+ * @dev: (OF) device
+ * @iomem: device I/O register space remapped to kernel virtual memory
+ * @rst_gpio: reset GPIO
+ * @clk: video core clock
+ * @irq: Device IRQ
+ * @memory_based: Flag to identify memory based mode
+ * @num_streams: Number of streams in the design
+ * @chans: video stream instances
+ * @dma_device: DMA device structure
+ * @channels: DMA channels
+ * @lock: Protects the running field
+ * @running: True when the SCD core is running
+ */
+struct xscd_device {
+ struct device *dev;
+ void __iomem *iomem;
+ struct gpio_desc *rst_gpio;
+ struct clk *clk;
+ int irq;
+
+ u8 memory_based;
+ int num_streams;
+
+ struct xscd_chan *chans;
+
+ struct dma_device dma_device;
+ struct xscd_dma_chan *channels[XSCD_MAX_CHANNELS];
+
+ /* This lock is to protect the running field */
+ spinlock_t lock;
+ u8 running;
+};
+
+/*
+ * Register related operations
+ */
+static inline u32 xscd_read(void __iomem *iomem, u32 addr)
+{
+ return ioread32(iomem + addr);
+}
+
+static inline void xscd_write(void __iomem *iomem, u32 addr, u32 value)
+{
+ iowrite32(value, iomem + addr);
+}
+
+static inline void xscd_clr(void __iomem *iomem, u32 addr, u32 clr)
+{
+ xscd_write(iomem, addr, xscd_read(iomem, addr) & ~clr);
+}
+
+static inline void xscd_set(void __iomem *iomem, u32 addr, u32 set)
+{
+ xscd_write(iomem, addr, xscd_read(iomem, addr) | set);
+}
+
+void xscd_dma_enable_channel(struct xscd_dma_chan *chan, bool enable);
+void xscd_dma_irq_handler(struct xscd_device *xscd);
+int xscd_dma_init(struct xscd_device *xscd);
+void xscd_dma_cleanup(struct xscd_device *xscd);
+
+void xscd_chan_event_notify(struct xscd_chan *chan);
+int xscd_chan_init(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node);
+void xscd_chan_cleanup(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node);
+#endif
diff --git a/drivers/media/platform/xilinx/xilinx-sdirxss.c b/drivers/media/platform/xilinx/xilinx-sdirxss.c
new file mode 100644
index 000000000000..4af98631a1e9
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-sdirxss.c
@@ -0,0 +1,2048 @@
+/*
+ * Xilinx SDI Rx Subsystem
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Contacts: Vishal Sagar <vsagar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/media/xilinx-vip.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/v4l2-dv-timings.h>
+#include <linux/v4l2-subdev.h>
+#include <linux/xilinx-sdirxss.h>
+#include <linux/xilinx-v4l2-controls.h>
+#include <media/media-entity.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+/*
+ * SDI Rx register map, bitmask and offsets
+ */
+#define XSDIRX_RST_CTRL_REG 0x00
+#define XSDIRX_MDL_CTRL_REG 0x04
+#define XSDIRX_GLBL_IER_REG 0x0C
+#define XSDIRX_ISR_REG 0x10
+#define XSDIRX_IER_REG 0x14
+#define XSDIRX_ST352_VALID_REG 0x18
+#define XSDIRX_ST352_DS1_REG 0x1C
+#define XSDIRX_ST352_DS3_REG 0x20
+#define XSDIRX_ST352_DS5_REG 0x24
+#define XSDIRX_ST352_DS7_REG 0x28
+#define XSDIRX_ST352_DS9_REG 0x2C
+#define XSDIRX_ST352_DS11_REG 0x30
+#define XSDIRX_ST352_DS13_REG 0x34
+#define XSDIRX_ST352_DS15_REG 0x38
+#define XSDIRX_VERSION_REG 0x3C
+#define XSDIRX_SS_CONFIG_REG 0x40
+#define XSDIRX_MODE_DET_STAT_REG 0x44
+#define XSDIRX_TS_DET_STAT_REG 0x48
+#define XSDIRX_EDH_STAT_REG 0x4C
+#define XSDIRX_EDH_ERRCNT_EN_REG 0x50
+#define XSDIRX_EDH_ERRCNT_REG 0x54
+#define XSDIRX_CRC_ERRCNT_REG 0x58
+#define XSDIRX_VID_LOCK_WINDOW_REG 0x5C
+#define XSDIRX_SB_RX_STS_REG 0x60
+
+#define XSDIRX_RST_CTRL_SS_EN_MASK BIT(0)
+#define XSDIRX_RST_CTRL_SRST_MASK BIT(1)
+#define XSDIRX_RST_CTRL_RST_CRC_ERRCNT_MASK BIT(2)
+#define XSDIRX_RST_CTRL_RST_EDH_ERRCNT_MASK BIT(3)
+#define XSDIRX_RST_CTRL_SDIRX_BRIDGE_ENB_MASK BIT(8)
+#define XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK BIT(9)
+
+#define XSDIRX_MDL_CTRL_FRM_EN_MASK BIT(4)
+#define XSDIRX_MDL_CTRL_MODE_DET_EN_MASK BIT(5)
+#define XSDIRX_MDL_CTRL_MODE_HD_EN_MASK BIT(8)
+#define XSDIRX_MDL_CTRL_MODE_SD_EN_MASK BIT(9)
+#define XSDIRX_MDL_CTRL_MODE_3G_EN_MASK BIT(10)
+#define XSDIRX_MDL_CTRL_MODE_6G_EN_MASK BIT(11)
+#define XSDIRX_MDL_CTRL_MODE_12GI_EN_MASK BIT(12)
+#define XSDIRX_MDL_CTRL_MODE_12GF_EN_MASK BIT(13)
+#define XSDIRX_MDL_CTRL_MODE_AUTO_DET_MASK GENMASK(13, 8)
+
+#define XSDIRX_MDL_CTRL_FORCED_MODE_OFFSET 16
+#define XSDIRX_MDL_CTRL_FORCED_MODE_MASK GENMASK(18, 16)
+
+#define XSDIRX_GLBL_INTR_EN_MASK BIT(0)
+
+#define XSDIRX_INTR_VIDLOCK_MASK BIT(0)
+#define XSDIRX_INTR_VIDUNLOCK_MASK BIT(1)
+#define XSDIRX_INTR_OVERFLOW_MASK BIT(9)
+#define XSDIRX_INTR_UNDERFLOW_MASK BIT(10)
+
+#define XSDIRX_INTR_ALL_MASK (XSDIRX_INTR_VIDLOCK_MASK |\
+ XSDIRX_INTR_VIDUNLOCK_MASK |\
+ XSDIRX_INTR_OVERFLOW_MASK |\
+ XSDIRX_INTR_UNDERFLOW_MASK)
+
+#define XSDIRX_ST352_VALID_DS1_MASK BIT(0)
+#define XSDIRX_ST352_VALID_DS3_MASK BIT(1)
+#define XSDIRX_ST352_VALID_DS5_MASK BIT(2)
+#define XSDIRX_ST352_VALID_DS7_MASK BIT(3)
+#define XSDIRX_ST352_VALID_DS9_MASK BIT(4)
+#define XSDIRX_ST352_VALID_DS11_MASK BIT(5)
+#define XSDIRX_ST352_VALID_DS13_MASK BIT(6)
+#define XSDIRX_ST352_VALID_DS15_MASK BIT(7)
+
+#define XSDIRX_MODE_DET_STAT_RX_MODE_MASK GENMASK(2, 0)
+#define XSDIRX_MODE_DET_STAT_MODE_LOCK_MASK BIT(3)
+#define XSDIRX_MODE_DET_STAT_ACT_STREAM_MASK GENMASK(6, 4)
+#define XSDIRX_MODE_DET_STAT_ACT_STREAM_OFFSET 4
+#define XSDIRX_MODE_DET_STAT_LVLB_3G_MASK BIT(7)
+
+#define XSDIRX_ACTIVE_STREAMS_1 0x0
+#define XSDIRX_ACTIVE_STREAMS_2 0x1
+#define XSDIRX_ACTIVE_STREAMS_4 0x2
+#define XSDIRX_ACTIVE_STREAMS_8 0x3
+#define XSDIRX_ACTIVE_STREAMS_16 0x4
+
+#define XSDIRX_TS_DET_STAT_LOCKED_MASK BIT(0)
+#define XSDIRX_TS_DET_STAT_SCAN_MASK BIT(1)
+#define XSDIRX_TS_DET_STAT_SCAN_OFFSET (1)
+#define XSDIRX_TS_DET_STAT_FAMILY_MASK GENMASK(7, 4)
+#define XSDIRX_TS_DET_STAT_FAMILY_OFFSET (4)
+#define XSDIRX_TS_DET_STAT_RATE_MASK GENMASK(11, 8)
+#define XSDIRX_TS_DET_STAT_RATE_OFFSET (8)
+
+#define XSDIRX_TS_DET_STAT_RATE_NONE 0x0
+#define XSDIRX_TS_DET_STAT_RATE_23_98HZ 0x2
+#define XSDIRX_TS_DET_STAT_RATE_24HZ 0x3
+#define XSDIRX_TS_DET_STAT_RATE_47_95HZ 0x4
+#define XSDIRX_TS_DET_STAT_RATE_25HZ 0x5
+#define XSDIRX_TS_DET_STAT_RATE_29_97HZ 0x6
+#define XSDIRX_TS_DET_STAT_RATE_30HZ 0x7
+#define XSDIRX_TS_DET_STAT_RATE_48HZ 0x8
+#define XSDIRX_TS_DET_STAT_RATE_50HZ 0x9
+#define XSDIRX_TS_DET_STAT_RATE_59_94HZ 0xA
+#define XSDIRX_TS_DET_STAT_RATE_60HZ 0xB
+
+#define XSDIRX_EDH_STAT_EDH_AP_MASK BIT(0)
+#define XSDIRX_EDH_STAT_EDH_FF_MASK BIT(1)
+#define XSDIRX_EDH_STAT_EDH_ANC_MASK BIT(2)
+#define XSDIRX_EDH_STAT_AP_FLAG_MASK GENMASK(8, 4)
+#define XSDIRX_EDH_STAT_FF_FLAG_MASK GENMASK(13, 9)
+#define XSDIRX_EDH_STAT_ANC_FLAG_MASK GENMASK(18, 14)
+#define XSDIRX_EDH_STAT_PKT_FLAG_MASK GENMASK(22, 19)
+
+#define XSDIRX_EDH_ERRCNT_COUNT_MASK GENMASK(15, 0)
+
+#define XSDIRX_CRC_ERRCNT_COUNT_MASK GENMASK(31, 16)
+#define XSDIRX_CRC_ERRCNT_DS_CRC_MASK GENMASK(15, 0)
+
+#define XSDIRX_VERSION_REV_MASK GENMASK(7, 0)
+#define XSDIRX_VERSION_PATCHID_MASK GENMASK(11, 8)
+#define XSDIRX_VERSION_VER_REV_MASK GENMASK(15, 12)
+#define XSDIRX_VERSION_VER_MIN_MASK GENMASK(23, 16)
+#define XSDIRX_VERSION_VER_MAJ_MASK GENMASK(31, 24)
+
+#define XSDIRX_SS_CONFIG_EDH_INCLUDED_MASK BIT(1)
+
+#define XSDIRX_STAT_SB_RX_TDATA_CHANGE_DONE_MASK BIT(0)
+#define XSDIRX_STAT_SB_RX_TDATA_CHANGE_FAIL_MASK BIT(1)
+#define XSDIRX_STAT_SB_RX_TDATA_GT_RESETDONE_MASK BIT(2)
+#define XSDIRX_STAT_SB_RX_TDATA_GT_BITRATE_MASK BIT(3)
+
+#define XSDIRX_DEFAULT_WIDTH (1920)
+#define XSDIRX_DEFAULT_HEIGHT (1080)
+
+#define XSDIRX_MAX_STR_LENGTH 16
+
+#define XSDIRXSS_SDI_STD_3G 0
+#define XSDIRXSS_SDI_STD_6G 1
+#define XSDIRXSS_SDI_STD_12G_8DS 2
+
+#define XSDIRX_DEFAULT_VIDEO_LOCK_WINDOW 0x3000
+
+#define XSDIRX_MODE_HD_MASK 0x0
+#define XSDIRX_MODE_SD_MASK 0x1
+#define XSDIRX_MODE_3G_MASK 0x2
+#define XSDIRX_MODE_6G_MASK 0x4
+#define XSDIRX_MODE_12GI_MASK 0x5
+#define XSDIRX_MODE_12GF_MASK 0x6
+
+/* Maximum number of events per file handle. */
+#define XSDIRX_MAX_EVENTS (128)
+
+/* ST352 related macros */
+#define XST352_PAYLOAD_BYTE_MASK 0xFF
+#define XST352_PAYLOAD_BYTE1_SHIFT 0
+#define XST352_PAYLOAD_BYTE2_SHIFT 8
+#define XST352_PAYLOAD_BYTE3_SHIFT 16
+#define XST352_PAYLOAD_BYTE4_SHIFT 24
+
+#define XST352_BYTE1_ST292_1x720L_1_5G 0x84
+#define XST352_BYTE1_ST292_1x1080L_1_5G 0x85
+#define XST352_BYTE1_ST425_2008_750L_3GB 0x88
+#define XST352_BYTE1_ST425_2008_1125L_3GA 0x89
+#define XST352_BYTE1_ST372_DL_3GB 0x8A
+#define XST352_BYTE1_ST372_2x720L_3GB 0x8B
+#define XST352_BYTE1_ST372_2x1080L_3GB 0x8C
+#define XST352_BYTE1_ST2081_10_2160L_6G 0xC0
+#define XST352_BYTE1_ST2081_10_DL_2160L_6G 0xC2
+#define XST352_BYTE1_ST2082_10_2160L_12G 0xCE
+
+#define XST352_BYTE2_TS_TYPE_MASK BIT(15)
+#define XST352_BYTE2_TS_TYPE_OFFSET 15
+#define XST352_BYTE2_PIC_TYPE_MASK BIT(14)
+#define XST352_BYTE2_PIC_TYPE_OFFSET 14
+#define XST352_BYTE2_TS_PIC_TYPE_INTERLACED 0
+#define XST352_BYTE2_TS_PIC_TYPE_PROGRESSIVE 1
+
+#define XST352_BYTE2_FPS_MASK 0xF
+#define XST352_BYTE2_FPS_SHIFT 8
+#define XST352_BYTE2_FPS_24F 0x2
+#define XST352_BYTE2_FPS_24 0x3
+#define XST352_BYTE2_FPS_48F 0x4
+#define XST352_BYTE2_FPS_25 0x5
+#define XST352_BYTE2_FPS_30F 0x6
+#define XST352_BYTE2_FPS_30 0x7
+#define XST352_BYTE2_FPS_48 0x8
+#define XST352_BYTE2_FPS_50 0x9
+#define XST352_BYTE2_FPS_60F 0xA
+#define XST352_BYTE2_FPS_60 0xB
+/* Table 4 ST 2081-10:2015 */
+#define XST352_BYTE2_FPS_96 0xC
+#define XST352_BYTE2_FPS_100 0xD
+#define XST352_BYTE2_FPS_120 0xE
+#define XST352_BYTE2_FPS_120F 0xF
+
+#define XST352_BYTE3_ACT_LUMA_COUNT_MASK BIT(22)
+#define XST352_BYTE3_ACT_LUMA_COUNT_OFFSET 22
+
+#define XST352_BYTE3_COLOR_FORMAT_MASK GENMASK(19, 16)
+#define XST352_BYTE3_COLOR_FORMAT_OFFSET 16
+#define XST352_BYTE3_COLOR_FORMAT_422 0x0
+#define XST352_BYTE3_COLOR_FORMAT_420 0x3
+
+/**
+ * enum sdi_family_enc - SDI Transport Video Format Detected with Active Pixels
+ * @XSDIRX_SMPTE_ST_274: SMPTE ST 274 detected with AP 1920x1080
+ * @XSDIRX_SMPTE_ST_296: SMPTE ST 296 detected with AP 1280x720
+ * @XSDIRX_SMPTE_ST_2048_2: SMPTE ST 2048-2 detected with AP 2048x1080
+ * @XSDIRX_SMPTE_ST_295: SMPTE ST 295 detected with AP 1920x1080
+ * @XSDIRX_NTSC: NTSC encoding detected with AP 720x486
+ * @XSDIRX_PAL: PAL encoding detected with AP 720x576
+ * @XSDIRX_TS_UNKNOWN: Unknown SMPTE Transport family type
+ */
+enum sdi_family_enc {
+ XSDIRX_SMPTE_ST_274 = 0,
+ XSDIRX_SMPTE_ST_296 = 1,
+ XSDIRX_SMPTE_ST_2048_2 = 2,
+ XSDIRX_SMPTE_ST_295 = 3,
+ XSDIRX_NTSC = 8,
+ XSDIRX_PAL = 9,
+ XSDIRX_TS_UNKNOWN = 15
+};
+
+/**
+ * struct xsdirxss_core - Core configuration SDI Rx Subsystem device structure
+ * @dev: Platform structure
+ * @iomem: Base address of subsystem
+ * @irq: requested irq number
+ * @include_edh: EDH processor presence
+ * @mode: 3G/6G/12G mode
+ * @clks: array of clocks
+ * @num_clks: number of clocks
+ */
+struct xsdirxss_core {
+ struct device *dev;
+ void __iomem *iomem;
+ int irq;
+ bool include_edh;
+ int mode;
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+/**
+ * struct xsdirxss_state - SDI Rx Subsystem device structure
+ * @core: Core structure for MIPI SDI Rx Subsystem
+ * @subdev: The v4l2 subdev structure
+ * @ctrl_handler: control handler
+ * @event: Holds the video unlock event
+ * @format: Active V4L2 format on source pad
+ * @default_format: default V4L2 media bus format
+ * @frame_interval: Captures the frame rate
+ * @vip_format: format information corresponding to the active format
+ * @pad: source media pad
+ * @streaming: Flag for storing streaming state
+ * @vidlocked: Flag indicating SDI Rx has locked onto video stream
+ * @ts_is_interlaced: Flag indicating Transport Stream is interlaced.
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xsdirxss_state {
+ struct xsdirxss_core core;
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_event event;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_mbus_framefmt default_format;
+ struct v4l2_fract frame_interval;
+ const struct xvip_video_format *vip_format;
+ struct media_pad pad;
+ bool streaming;
+ bool vidlocked;
+ bool ts_is_interlaced;
+};
+
+/* List of clocks required by UHD-SDI Rx subsystem */
+static const char * const xsdirxss_clks[] = {
+ "s_axi_aclk", "sdi_rx_clk", "video_out_clk",
+};
+
+static const u32 xsdirxss_mbus_fmts[] = {
+ MEDIA_BUS_FMT_UYVY10_1X20,
+ MEDIA_BUS_FMT_VYYUYY10_4X20,
+};
+
+static const struct v4l2_dv_timings fmt_cap[] = {
+ V4L2_DV_BT_SDI_720X487I60,
+ V4L2_DV_BT_CEA_720X576I50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P30,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+ V4L2_DV_BT_CEA_1920X1080P50,
+ V4L2_DV_BT_CEA_1920X1080P60,
+ V4L2_DV_BT_CEA_3840X2160P24,
+ V4L2_DV_BT_CEA_3840X2160P30,
+ V4L2_DV_BT_CEA_3840X2160P50,
+ V4L2_DV_BT_CEA_3840X2160P60,
+ V4L2_DV_BT_CEA_4096X2160P24,
+ V4L2_DV_BT_CEA_4096X2160P25,
+ V4L2_DV_BT_CEA_4096X2160P30,
+ V4L2_DV_BT_CEA_4096X2160P50,
+ V4L2_DV_BT_CEA_4096X2160P60,
+};
+
+struct xsdirxss_dv_map {
+ u32 width;
+ u32 height;
+ u32 fps;
+ struct v4l2_dv_timings format;
+};
+
+static const struct xsdirxss_dv_map xsdirxss_dv_timings[] = {
+ /* SD - 720x487i60 */
+ { 720, 243, 30, V4L2_DV_BT_SDI_720X487I60 },
+ /* SD - 720x576i50 */
+ { 720, 288, 25, V4L2_DV_BT_CEA_720X576I50 },
+ /* HD - 1280x720p23.98 */
+ /* HD - 1280x720p24 */
+ { 1280, 720, 24, V4L2_DV_BT_CEA_1280X720P24 },
+ /* HD - 1280x720p25 */
+ { 1280, 720, 25, V4L2_DV_BT_CEA_1280X720P25 },
+ /* HD - 1280x720p29.97 */
+ /* HD - 1280x720p30 */
+ { 1280, 720, 30, V4L2_DV_BT_CEA_1280X720P30 },
+ /* HD - 1280x720p50 */
+ { 1280, 720, 50, V4L2_DV_BT_CEA_1280X720P50 },
+ /* HD - 1280x720p59.94 */
+ /* HD - 1280x720p60 */
+ { 1280, 720, 60, V4L2_DV_BT_CEA_1280X720P60 },
+ /* HD - 1920x1080p23.98 */
+ /* HD - 1920x1080p24 */
+ { 1920, 1080, 24, V4L2_DV_BT_CEA_1920X1080P24 },
+ /* HD - 1920x1080p25 */
+ { 1920, 1080, 25, V4L2_DV_BT_CEA_1920X1080P25 },
+ /* HD - 1920x1080p29.97 */
+ /* HD - 1920x1080p30 */
+ { 1920, 1080, 30, V4L2_DV_BT_CEA_1920X1080P30 },
+
+ /* TODO add below timings */
+ /* HD - 2048x1080p23.98 */
+ /* HD - 2048x1080p24 */
+ /* HD - 2048x1080p25 */
+ /* HD - 2048x1080p29.97 */
+ /* HD - 2048x1080p30 */
+ /* HD - 1920x1080i47.95 */
+ /* HD - 1920x1080i48 */
+
+ /* HD - 1920x1080i50 */
+ { 1920, 540, 25, V4L2_DV_BT_CEA_1920X1080I50 },
+ /* HD - 1920x1080i59.94 */
+ /* HD - 1920x1080i60 */
+ { 1920, 540, 30, V4L2_DV_BT_CEA_1920X1080I60 },
+
+ /* TODO add below timings */
+ /* HD - 2048x1080i47.95 */
+ /* HD - 2048x1080i48 */
+ /* HD - 2048x1080i50 */
+ /* HD - 2048x1080i59.94 */
+ /* HD - 2048x1080i60 */
+ /* 3G - 1920x1080p47.95 */
+ /* 3G - 1920x1080p48 */
+
+ /* 3G - 1920x1080p50 148.5 */
+ { 1920, 1080, 50, V4L2_DV_BT_CEA_1920X1080P50 },
+ /* 3G - 1920x1080p59.94 148.5/1.001 */
+ /* 3G - 1920x1080p60 148.5 */
+ { 1920, 1080, 60, V4L2_DV_BT_CEA_1920X1080P60 },
+
+ /* TODO add below timings */
+ /* 3G - 2048x1080p47.95 */
+ /* 3G - 2048x1080p48 */
+ /* 3G - 2048x1080p50 */
+ /* 3G - 2048x1080p59.94 */
+ /* 3G - 2048x1080p60 */
+
+ /* 6G - 3840X2160p23.98 */
+ /* 6G - 3840X2160p24 */
+ { 3840, 2160, 24, V4L2_DV_BT_CEA_3840X2160P24 },
+ /* 6G - 3840X2160p25 */
+ { 3840, 2160, 25, V4L2_DV_BT_CEA_3840X2160P25 },
+ /* 6G - 3840X2160p29.97 */
+ /* 6G - 3840X2160p30 */
+ { 3840, 2160, 30, V4L2_DV_BT_CEA_3840X2160P30 },
+ /* 6G - 4096X2160p23.98 */
+ /* 6G - 4096X2160p24 */
+ { 4096, 2160, 24, V4L2_DV_BT_CEA_4096X2160P24 },
+ /* 6G - 4096X2160p25 */
+ { 4096, 2160, 25, V4L2_DV_BT_CEA_4096X2160P25 },
+ /* 6G - 4096X2160p29.97 */
+ /* 6G - 4096X2160p30 */
+ { 4096, 2160, 30, V4L2_DV_BT_CEA_4096X2160P30 },
+ /* TODO add below timings */
+ /* 12G - 3840X2160p47.95 */
+ /* 12G - 3840X2160p48 */
+
+ /* 12G - 3840X2160p50 */
+ { 3840, 2160, 50, V4L2_DV_BT_CEA_3840X2160P50 },
+ /* 12G - 3840X2160p59.94 */
+ /* 12G - 3840X2160p60 */
+ { 3840, 2160, 60, V4L2_DV_BT_CEA_3840X2160P60 },
+
+ /* TODO add below timings */
+ /* 12G - 4096X2160p47.95 */
+ /* 12G - 4096X2160p48 */
+
+ /* 12G - 4096X2160p50 */
+ { 4096, 2160, 50, V4L2_DV_BT_CEA_4096X2160P50 },
+ /* 12G - 4096X2160p59.94 */
+ /* 12G - 4096X2160p60 */
+ { 4096, 2160, 60, V4L2_DV_BT_CEA_4096X2160P60 },
+};
+
+static inline struct xsdirxss_state *
+to_xsdirxssstate(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xsdirxss_state, subdev);
+}
+
+/*
+ * Register related operations
+ */
+static inline u32 xsdirxss_read(struct xsdirxss_core *xsdirxss, u32 addr)
+{
+ return ioread32(xsdirxss->iomem + addr);
+}
+
+static inline void xsdirxss_write(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 value)
+{
+ iowrite32(value, xsdirxss->iomem + addr);
+}
+
+static inline void xsdirxss_clr(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 clr)
+{
+ xsdirxss_write(xsdirxss, addr, xsdirxss_read(xsdirxss, addr) & ~clr);
+}
+
+static inline void xsdirxss_set(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 set)
+{
+ xsdirxss_write(xsdirxss, addr, xsdirxss_read(xsdirxss, addr) | set);
+}
+
+static inline void xsdirx_core_disable(struct xsdirxss_core *core)
+{
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG, XSDIRX_RST_CTRL_SS_EN_MASK);
+}
+
+static inline void xsdirx_core_enable(struct xsdirxss_core *core)
+{
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG, XSDIRX_RST_CTRL_SS_EN_MASK);
+}
+
+static int xsdirx_set_modedetect(struct xsdirxss_core *core, u16 mask)
+{
+ u32 i, val;
+
+ mask &= XSDIRX_DETECT_ALL_MODES;
+ if (!mask) {
+ dev_err(core->dev, "Invalid bit mask = 0x%08x\n", mask);
+ return -EINVAL;
+ }
+
+ dev_dbg(core->dev, "mask = 0x%x\n", mask);
+
+ val = xsdirxss_read(core, XSDIRX_MDL_CTRL_REG);
+ val &= ~XSDIRX_MDL_CTRL_MODE_DET_EN_MASK;
+ val &= ~XSDIRX_MDL_CTRL_MODE_AUTO_DET_MASK;
+ val &= ~XSDIRX_MDL_CTRL_FORCED_MODE_MASK;
+
+ if (hweight16(mask) > 1) {
+ /* Multi mode detection as more than 1 bit set in mask */
+ dev_dbg(core->dev, "Detect multiple modes\n");
+ for (i = 0; i < XSDIRX_MODE_NUM_SUPPORTED; i++) {
+ switch (mask & (1 << i)) {
+ case BIT(XSDIRX_MODE_SD_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_SD_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_HD_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_HD_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_3G_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_3G_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_6G_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_6G_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_12GI_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_12GI_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_12GF_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_12GF_EN_MASK;
+ break;
+ }
+ }
+ val |= XSDIRX_MDL_CTRL_MODE_DET_EN_MASK;
+ } else {
+ /* Fixed Mode */
+ u32 forced_mode_mask;
+
+ dev_dbg(core->dev, "Detect fixed mode\n");
+
+ /* Find offset of first bit set */
+ switch (__ffs(mask)) {
+ case XSDIRX_MODE_SD_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_SD_MASK;
+ break;
+ case XSDIRX_MODE_HD_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_HD_MASK;
+ break;
+ case XSDIRX_MODE_3G_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_3G_MASK;
+ break;
+ case XSDIRX_MODE_6G_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_6G_MASK;
+ break;
+ case XSDIRX_MODE_12GI_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_12GI_MASK;
+ break;
+ case XSDIRX_MODE_12GF_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_12GF_MASK;
+ break;
+ default:
+ forced_mode_mask = 0;
+ }
+ dev_dbg(core->dev, "Forced Mode Mask : 0x%x\n",
+ forced_mode_mask);
+ val |= forced_mode_mask << XSDIRX_MDL_CTRL_FORCED_MODE_OFFSET;
+ }
+
+ dev_dbg(core->dev, "Modes to be detected : sdi ctrl reg = 0x%08x\n",
+ val);
+ xsdirxss_write(core, XSDIRX_MDL_CTRL_REG, val);
+
+ return 0;
+}
+
+static void xsdirx_framer(struct xsdirxss_core *core, bool flag)
+{
+ if (flag)
+ xsdirxss_set(core, XSDIRX_MDL_CTRL_REG,
+ XSDIRX_MDL_CTRL_FRM_EN_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_MDL_CTRL_REG,
+ XSDIRX_MDL_CTRL_FRM_EN_MASK);
+}
+
+static void xsdirx_setedherrcnttrigger(struct xsdirxss_core *core, u32 enable)
+{
+ u32 val = xsdirxss_read(core, XSDIRX_EDH_ERRCNT_EN_REG);
+
+ val = enable & XSDIRX_EDH_ALLERR_MASK;
+
+ xsdirxss_write(core, XSDIRX_EDH_ERRCNT_EN_REG, val);
+}
+
+static inline void xsdirx_setvidlockwindow(struct xsdirxss_core *core, u32 val)
+{
+ /*
+ * The video lock window is the amount of time for which the
+ * the mode and transport stream should be locked to get the
+ * video lock interrupt.
+ */
+ xsdirxss_write(core, XSDIRX_VID_LOCK_WINDOW_REG, val);
+}
+
+static inline void xsdirx_disableintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_clr(core, XSDIRX_IER_REG, mask);
+}
+
+static inline void xsdirx_enableintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_set(core, XSDIRX_IER_REG, mask);
+}
+
+static void xsdirx_globalintr(struct xsdirxss_core *core, bool flag)
+{
+ if (flag)
+ xsdirxss_set(core, XSDIRX_GLBL_IER_REG,
+ XSDIRX_GLBL_INTR_EN_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_GLBL_IER_REG,
+ XSDIRX_GLBL_INTR_EN_MASK);
+}
+
+static inline void xsdirx_clearintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_set(core, XSDIRX_ISR_REG, mask);
+}
+
+static void xsdirx_vid_bridge_control(struct xsdirxss_core *core,
+ bool enable)
+{
+ if (enable)
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_SDIRX_BRIDGE_ENB_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_SDIRX_BRIDGE_ENB_MASK);
+}
+
+static void xsdirx_axis4_bridge_control(struct xsdirxss_core *core,
+ bool enable)
+{
+ if (enable)
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK);
+}
+
+static void xsdirx_streamflow_control(struct xsdirxss_core *core, bool enable)
+{
+ /* The sdi to native bridge is followed by native to axis4 bridge */
+ if (enable) {
+ xsdirx_axis4_bridge_control(core, enable);
+ xsdirx_vid_bridge_control(core, enable);
+ } else {
+ xsdirx_vid_bridge_control(core, enable);
+ xsdirx_axis4_bridge_control(core, enable);
+ }
+}
+
+static void xsdirxss_get_framerate(struct v4l2_fract *frame_interval,
+ u32 framerate)
+{
+ switch (framerate) {
+ case XSDIRX_TS_DET_STAT_RATE_23_98HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 24000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_24HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 24000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_25HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 25000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_29_97HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 30000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_30HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 30000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_47_95HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 48000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_48HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 48000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_50HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 50000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_59_94HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 60000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_60HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 60000;
+ break;
+ default:
+ frame_interval->numerator = 1;
+ frame_interval->denominator = 1;
+ }
+}
+
+/**
+ * xsdirx_get_stream_properties - Get SDI Rx stream properties
+ * @state: pointer to driver state
+ *
+ * This function decodes the stream's ST352 payload (if available) to get
+ * stream properties like width, height, picture type (interlaced/progressive),
+ * etc.
+ *
+ * Return: 0 for success else errors
+ */
+static int xsdirx_get_stream_properties(struct xsdirxss_state *state)
+{
+ struct xsdirxss_core *core = &state->core;
+ u32 mode, payload = 0, val, family, valid, tscan;
+ u8 byte1 = 0, active_luma = 0, pic_type = 0, framerate = 0;
+ u8 sampling = XST352_BYTE3_COLOR_FORMAT_422;
+ struct v4l2_mbus_framefmt *format = &state->format;
+
+ mode = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ mode &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+
+ valid = xsdirxss_read(core, XSDIRX_ST352_VALID_REG);
+
+ if (mode >= XSDIRX_MODE_3G_MASK && !valid) {
+ dev_err(core->dev, "No valid ST352 payload present even for 3G mode and above\n");
+ return -EINVAL;
+ }
+
+ val = xsdirxss_read(core, XSDIRX_TS_DET_STAT_REG);
+ if (valid & XSDIRX_ST352_VALID_DS1_MASK) {
+ payload = xsdirxss_read(core, XSDIRX_ST352_DS1_REG);
+ byte1 = (payload >> XST352_PAYLOAD_BYTE1_SHIFT) &
+ XST352_PAYLOAD_BYTE_MASK;
+ active_luma = (payload & XST352_BYTE3_ACT_LUMA_COUNT_MASK) >>
+ XST352_BYTE3_ACT_LUMA_COUNT_OFFSET;
+ pic_type = (payload & XST352_BYTE2_PIC_TYPE_MASK) >>
+ XST352_BYTE2_PIC_TYPE_OFFSET;
+ framerate = (payload >> XST352_BYTE2_FPS_SHIFT) &
+ XST352_BYTE2_FPS_MASK;
+ tscan = (payload & XST352_BYTE2_TS_TYPE_MASK) >>
+ XST352_BYTE2_TS_TYPE_OFFSET;
+ sampling = (payload & XST352_BYTE3_COLOR_FORMAT_MASK) >>
+ XST352_BYTE3_COLOR_FORMAT_OFFSET;
+ } else {
+ dev_dbg(core->dev, "No ST352 payload available : Mode = %d\n",
+ mode);
+ framerate = (val & XSDIRX_TS_DET_STAT_RATE_MASK) >>
+ XSDIRX_TS_DET_STAT_RATE_OFFSET;
+ tscan = (val & XSDIRX_TS_DET_STAT_SCAN_MASK) >>
+ XSDIRX_TS_DET_STAT_SCAN_OFFSET;
+ }
+
+ family = (val & XSDIRX_TS_DET_STAT_FAMILY_MASK) >>
+ XSDIRX_TS_DET_STAT_FAMILY_OFFSET;
+ state->ts_is_interlaced = tscan ? false : true;
+
+ dev_dbg(core->dev, "ts_is_interlaced = %d, family = %d\n",
+ state->ts_is_interlaced, family);
+
+ switch (mode) {
+ case XSDIRX_MODE_HD_MASK:
+ if (!valid) {
+ /* No payload obtained */
+ dev_dbg(core->dev, "frame rate : %d, tscan = %d\n",
+ framerate, tscan);
+ /*
+ * NOTE : A progressive segmented frame pSF will be
+ * reported incorrectly as Interlaced as we rely on IP's
+ * transport scan locked bit.
+ */
+ dev_warn(core->dev, "pSF will be incorrectly reported as Interlaced\n");
+
+ switch (framerate) {
+ case XSDIRX_TS_DET_STAT_RATE_23_98HZ:
+ case XSDIRX_TS_DET_STAT_RATE_24HZ:
+ case XSDIRX_TS_DET_STAT_RATE_25HZ:
+ case XSDIRX_TS_DET_STAT_RATE_29_97HZ:
+ case XSDIRX_TS_DET_STAT_RATE_30HZ:
+ if (family == XSDIRX_SMPTE_ST_296) {
+ format->width = 1280;
+ format->height = 720;
+ format->field = V4L2_FIELD_NONE;
+ } else if (family == XSDIRX_SMPTE_ST_2048_2) {
+ format->width = 2048;
+ format->height = 1080;
+ if (tscan)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field =
+ V4L2_FIELD_ALTERNATE;
+ } else {
+ format->width = 1920;
+ format->height = 1080;
+ if (tscan)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field =
+ V4L2_FIELD_ALTERNATE;
+ }
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_50HZ:
+ case XSDIRX_TS_DET_STAT_RATE_59_94HZ:
+ case XSDIRX_TS_DET_STAT_RATE_60HZ:
+ if (family == XSDIRX_SMPTE_ST_274) {
+ format->width = 1920;
+ format->height = 1080;
+ } else {
+ format->width = 1280;
+ format->height = 720;
+ }
+ format->field = V4L2_FIELD_NONE;
+ break;
+ default:
+ format->width = 1920;
+ format->height = 1080;
+ format->field = V4L2_FIELD_NONE;
+ }
+ } else {
+ dev_dbg(core->dev, "Got the payload\n");
+ switch (byte1) {
+ case XST352_BYTE1_ST292_1x720L_1_5G:
+ /* SMPTE ST 292-1 for 720 line payloads */
+ format->width = 1280;
+ format->height = 720;
+ break;
+ case XST352_BYTE1_ST292_1x1080L_1_5G:
+ /* SMPTE ST 292-1 for 1080 line payloads */
+ format->height = 1080;
+ if (active_luma)
+ format->width = 2048;
+ else
+ format->width = 1920;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown HD Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case XSDIRX_MODE_SD_MASK:
+ format->field = V4L2_FIELD_ALTERNATE;
+
+ switch (family) {
+ case XSDIRX_NTSC:
+ format->width = 720;
+ format->height = 486;
+ break;
+ case XSDIRX_PAL:
+ format->width = 720;
+ format->height = 576;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown SD Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_3G_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST425_2008_750L_3GB:
+ /* Sec 4.1.6.1 SMPTE 425-2008 */
+ case XST352_BYTE1_ST372_2x720L_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ format->width = 1280;
+ format->height = 720;
+ break;
+ case XST352_BYTE1_ST425_2008_1125L_3GA:
+ /* ST352 Table SMPTE 425-1 */
+ case XST352_BYTE1_ST372_DL_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ case XST352_BYTE1_ST372_2x1080L_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ format->height = 1080;
+ if (active_luma)
+ format->width = 2048;
+ else
+ format->width = 1920;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 3G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_6G_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST2081_10_DL_2160L_6G:
+ /* Dual link 6G */
+ case XST352_BYTE1_ST2081_10_2160L_6G:
+ /* Table 3 SMPTE ST 2081-10 */
+ format->height = 2160;
+ if (active_luma)
+ format->width = 4096;
+ else
+ format->width = 3840;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 6G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_12GI_MASK:
+ case XSDIRX_MODE_12GF_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST2082_10_2160L_12G:
+ /* Section 4.3.1 SMPTE ST 2082-10 */
+ format->height = 2160;
+ if (active_luma)
+ format->width = 4096;
+ else
+ format->width = 3840;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 12G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(core->dev, "Invalid Mode\n");
+ return -EINVAL;
+ }
+
+ if (valid) {
+ if (pic_type)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field = V4L2_FIELD_ALTERNATE;
+ }
+
+ if (format->field == V4L2_FIELD_ALTERNATE)
+ format->height = format->height / 2;
+
+ switch (sampling) {
+ case XST352_BYTE3_COLOR_FORMAT_420:
+ format->code = MEDIA_BUS_FMT_VYYUYY10_4X20;
+ break;
+ case XST352_BYTE3_COLOR_FORMAT_422:
+ format->code = MEDIA_BUS_FMT_UYVY10_1X20;
+ break;
+ default:
+ dev_err(core->dev, "Unsupported color format : %d\n", sampling);
+ return -EINVAL;
+ }
+
+ xsdirxss_get_framerate(&state->frame_interval, framerate);
+
+ dev_dbg(core->dev, "Stream width = %d height = %d Field = %d payload = 0x%08x ts = 0x%08x\n",
+ format->width, format->height, format->field, payload, val);
+ dev_dbg(core->dev, "frame rate numerator = %d denominator = %d\n",
+ state->frame_interval.numerator,
+ state->frame_interval.denominator);
+ dev_dbg(core->dev, "Stream code = 0x%x\n", format->code);
+ return 0;
+}
+
+/**
+ * xsdirxss_irq_handler - Interrupt handler for SDI Rx
+ * @irq: IRQ number
+ * @dev_id: Pointer to device state
+ *
+ * The SDI Rx interrupts are cleared by writing 1 to corresponding bit.
+ *
+ * Return: IRQ_HANDLED after handling interrupts
+ */
+static irqreturn_t xsdirxss_irq_handler(int irq, void *dev_id)
+{
+ struct xsdirxss_state *state = (struct xsdirxss_state *)dev_id;
+ struct xsdirxss_core *core = &state->core;
+ u32 status;
+
+ status = xsdirxss_read(core, XSDIRX_ISR_REG);
+ dev_dbg(core->dev, "interrupt status = 0x%08x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ xsdirxss_write(core, XSDIRX_ISR_REG, status);
+
+ if (status & XSDIRX_INTR_VIDLOCK_MASK ||
+ status & XSDIRX_INTR_VIDUNLOCK_MASK) {
+ u32 val1, val2;
+
+ dev_dbg(core->dev, "video lock/unlock interrupt\n");
+
+ xsdirx_streamflow_control(core, false);
+ state->streaming = false;
+
+ val1 = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val2 = xsdirxss_read(core, XSDIRX_TS_DET_STAT_REG);
+
+ if ((val1 & XSDIRX_MODE_DET_STAT_MODE_LOCK_MASK) &&
+ (val2 & XSDIRX_TS_DET_STAT_LOCKED_MASK)) {
+ u32 mask = XSDIRX_RST_CTRL_RST_CRC_ERRCNT_MASK |
+ XSDIRX_RST_CTRL_RST_EDH_ERRCNT_MASK;
+
+ dev_dbg(core->dev, "video lock interrupt\n");
+
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG, mask);
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG, mask);
+
+ val1 = xsdirxss_read(core, XSDIRX_ST352_VALID_REG);
+ val2 = xsdirxss_read(core, XSDIRX_ST352_DS1_REG);
+
+ dev_dbg(core->dev, "valid st352 mask = 0x%08x\n", val1);
+ dev_dbg(core->dev, "st352 payload = 0x%08x\n", val2);
+
+ if (!xsdirx_get_stream_properties(state)) {
+ state->vidlocked = true;
+ } else {
+ dev_err(core->dev, "Unable to get stream properties!\n");
+ state->vidlocked = false;
+ }
+ } else {
+ dev_dbg(core->dev, "video unlock interrupt\n");
+ state->vidlocked = false;
+ }
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_SOURCE_CHANGE;
+ state->event.u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XSDIRX_INTR_UNDERFLOW_MASK) {
+ dev_dbg(core->dev, "Video in to AXI4 Stream core underflow interrupt\n");
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_XLNXSDIRX_UNDERFLOW;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XSDIRX_INTR_OVERFLOW_MASK) {
+ dev_dbg(core->dev, "Video in to AXI4 Stream core overflow interrupt\n");
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_XLNXSDIRX_OVERFLOW;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xsdirxss_subscribe_event - Subscribe to video lock and unlock event
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 File Handle
+ * @sub: Subcribe event structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXSDIRX_UNDERFLOW:
+ case V4L2_EVENT_XLNXSDIRX_OVERFLOW:
+ ret = v4l2_event_subscribe(fh, sub, XSDIRX_MAX_EVENTS, NULL);
+ break;
+ case V4L2_EVENT_SOURCE_CHANGE:
+ ret = v4l2_src_change_event_subscribe(fh, sub);
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "Event subscribed : 0x%08x\n", sub->type);
+ return ret;
+}
+
+/**
+ * xsdirxss_unsubscribe_event - Unsubscribe from all events registered
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 file handle
+ * @sub: pointer to Event unsubscription structure
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int xsdirxss_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ dev_dbg(core->dev, "Event unsubscribe : 0x%08x\n", sub->type);
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+/**
+ * xsdirxss_s_ctrl - This is used to set the Xilinx SDI Rx V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the Xilinx SDI Rx
+ * Subsystem.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ struct xsdirxss_state *xsdirxss =
+ container_of(ctrl->handler,
+ struct xsdirxss_state, ctrl_handler);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ dev_dbg(core->dev, "set ctrl id = 0x%08x val = 0x%08x\n",
+ ctrl->id, ctrl->val);
+
+ if (xsdirxss->streaming) {
+ dev_err(core->dev, "Cannot set controls while streaming\n");
+ return -EINVAL;
+ }
+
+ xsdirx_core_disable(core);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_SDIRX_FRAMER:
+ xsdirx_framer(core, ctrl->val);
+ break;
+ case V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW:
+ xsdirx_setvidlockwindow(core, ctrl->val);
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE:
+ xsdirx_setedherrcnttrigger(core, ctrl->val);
+ break;
+ case V4L2_CID_XILINX_SDIRX_SEARCH_MODES:
+ if (ctrl->val) {
+ if (core->mode == XSDIRXSS_SDI_STD_3G) {
+ dev_dbg(core->dev, "Upto 3G supported\n");
+ ctrl->val &= ~(BIT(XSDIRX_MODE_6G_OFFSET) |
+ BIT(XSDIRX_MODE_12GI_OFFSET) |
+ BIT(XSDIRX_MODE_12GF_OFFSET));
+ }
+
+ if (core->mode == XSDIRXSS_SDI_STD_6G) {
+ dev_dbg(core->dev, "Upto 6G supported\n");
+ ctrl->val &= ~(BIT(XSDIRX_MODE_12GI_OFFSET) |
+ BIT(XSDIRX_MODE_12GF_OFFSET));
+ }
+
+ ret = xsdirx_set_modedetect(core, ctrl->val);
+ } else {
+ dev_err(core->dev, "Select at least one mode!\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_SS_EN_MASK);
+ return -EINVAL;
+ }
+ xsdirx_core_enable(core);
+ return ret;
+}
+
+/**
+ * xsdirxss_g_volatile_ctrl - get the Xilinx SDI Rx controls
+ * @ctrl: Pointer to V4L2 control
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ u32 val;
+ struct xsdirxss_state *xsdirxss =
+ container_of(ctrl->handler,
+ struct xsdirxss_state, ctrl_handler);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_SDIRX_MODE_DETECT:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+
+ switch (val) {
+ case XSDIRX_MODE_SD_MASK:
+ ctrl->val = XSDIRX_MODE_SD_OFFSET;
+ break;
+ case XSDIRX_MODE_HD_MASK:
+ ctrl->val = XSDIRX_MODE_HD_OFFSET;
+ break;
+ case XSDIRX_MODE_3G_MASK:
+ ctrl->val = XSDIRX_MODE_3G_OFFSET;
+ break;
+ case XSDIRX_MODE_6G_MASK:
+ ctrl->val = XSDIRX_MODE_6G_OFFSET;
+ break;
+ case XSDIRX_MODE_12GI_MASK:
+ ctrl->val = XSDIRX_MODE_12GI_OFFSET;
+ break;
+ case XSDIRX_MODE_12GF_MASK:
+ ctrl->val = XSDIRX_MODE_12GF_OFFSET;
+ break;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_CRC:
+ ctrl->val = xsdirxss_read(core, XSDIRX_CRC_ERRCNT_REG);
+ xsdirxss_write(core, XSDIRX_CRC_ERRCNT_REG, 0xFFFF);
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_ERRCNT:
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+ if (val == XSDIRX_MODE_SD_MASK) {
+ ctrl->val = xsdirxss_read(core, XSDIRX_EDH_ERRCNT_REG);
+ } else {
+ dev_dbg(core->dev, "%d - not in SD mode\n", ctrl->id);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_STATUS:
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+ if (val == XSDIRX_MODE_SD_MASK) {
+ ctrl->val = xsdirxss_read(core, XSDIRX_EDH_STAT_REG);
+ } else {
+ dev_dbg(core->dev, "%d - not in SD mode\n", ctrl->id);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ ctrl->val = xsdirxss->ts_is_interlaced;
+ break;
+ case V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_ACT_STREAM_MASK;
+ val >>= XSDIRX_MODE_DET_STAT_ACT_STREAM_OFFSET;
+ ctrl->val = 1 << val;
+ break;
+ case V4L2_CID_XILINX_SDIRX_IS_3GB:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_LVLB_3G_MASK;
+ ctrl->val = val ? true : false;
+ break;
+ default:
+ dev_err(core->dev, "Get Invalid control id 0x%0x\n", ctrl->id);
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "Get ctrl id = 0x%08x val = 0x%08x\n",
+ ctrl->id, ctrl->val);
+ return 0;
+}
+
+/**
+ * xsdirxss_log_status - Logs the status of the SDI Rx Subsystem
+ * @sd: Pointer to V4L2 subdevice structure
+ *
+ * This function prints the current status of Xilinx SDI Rx Subsystem
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_log_status(struct v4l2_subdev *sd)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+ u32 i;
+
+ v4l2_info(sd, "***** SDI Rx subsystem reg dump start *****\n");
+ for (i = 0; i < 0x28; i++) {
+ u32 data;
+
+ data = xsdirxss_read(core, i * 4);
+ v4l2_info(sd, "offset 0x%08x data 0x%08x\n",
+ i * 4, data);
+ }
+ v4l2_info(sd, "***** SDI Rx subsystem reg dump end *****\n");
+ return 0;
+}
+
+/**
+ * xsdirxss_g_frame_interval - Get the frame interval
+ * @sd: V4L2 Sub device
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to get the frame interval.
+ * The frame rate can be integral or fractional.
+ * Integral frame rate e.g. numerator = 1000, denominator = 24000 => 24 fps
+ * Fractional frame rate e.g. numerator = 1001, denominator = 24000 => 23.97 fps
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Video not locked!\n");
+ return -EINVAL;
+ }
+
+ fi->interval = xsdirxss->frame_interval;
+
+ dev_dbg(core->dev, "frame rate numerator = %d denominator = %d\n",
+ xsdirxss->frame_interval.numerator,
+ xsdirxss->frame_interval.denominator);
+ return 0;
+}
+
+/**
+ * xsdirxss_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @enable: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * Xilinx SDI Rx Subsystem.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (enable) {
+ if (!xsdirxss->vidlocked) {
+ dev_dbg(core->dev, "Video is not locked\n");
+ return -EINVAL;
+ }
+ if (xsdirxss->streaming) {
+ dev_dbg(core->dev, "Already streaming\n");
+ return -EINVAL;
+ }
+
+ xsdirx_streamflow_control(core, true);
+ xsdirxss->streaming = true;
+ dev_dbg(core->dev, "Streaming started\n");
+ } else {
+ if (!xsdirxss->streaming) {
+ dev_dbg(core->dev, "Stopped streaming already\n");
+ return -EINVAL;
+ }
+
+ xsdirx_streamflow_control(core, false);
+ xsdirxss->streaming = false;
+ dev_dbg(core->dev, "Streaming stopped\n");
+ }
+
+ return 0;
+}
+
+/**
+ * xsdirxss_g_input_status - It is used to determine if the video signal
+ * is present / locked onto or not.
+ *
+ * @sd: V4L2 Sub device
+ * @status: status of signal locked
+ *
+ * This is used to determine if the video signal is present and locked onto
+ * by the SDI Rx core or not based on vidlocked flag.
+ *
+ * Return: zero on success
+ */
+static int xsdirxss_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+
+ if (!xsdirxss->vidlocked)
+ *status = V4L2_IN_ST_NO_SYNC | V4L2_IN_ST_NO_SIGNAL;
+ else
+ *status = 0;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xsdirxss_get_pad_format(struct xsdirxss_state *xsdirxss,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xsdirxss->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xsdirxss->format;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * xsdirxss_get_format - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Video not locked!\n");
+ return -EINVAL;
+ }
+
+ fmt->format = *__xsdirxss_get_pad_format(xsdirxss, cfg,
+ fmt->pad, fmt->which);
+
+ dev_dbg(core->dev, "Stream width = %d height = %d Field = %d\n",
+ fmt->format.width, fmt->format.height, fmt->format.field);
+
+ return 0;
+}
+
+/**
+ * xsdirxss_set_format - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ * Since the pad format is fixed in hardware, it can't be
+ * modified on run time.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *__format;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+
+ dev_dbg(xsdirxss->core.dev,
+ "set width %d height %d code %d field %d colorspace %d\n",
+ fmt->format.width, fmt->format.height,
+ fmt->format.code, fmt->format.field,
+ fmt->format.colorspace);
+
+ __format = __xsdirxss_get_pad_format(xsdirxss, cfg,
+ fmt->pad, fmt->which);
+
+ /* Currently reset the code to one fixed in hardware */
+ /* TODO : Add checks for width height */
+ fmt->format.code = __format->code;
+
+ return 0;
+}
+
+/**
+ * xsdirxss_enum_mbus_code - Handle pixel format enumeration
+ * @sd: pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ *
+ * Return: -EINVAL or zero on success
+ */
+static int xsdirxss_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index >= ARRAY_SIZE(xsdirxss_mbus_fmts))
+ return -EINVAL;
+
+ code->code = xsdirxss_mbus_fmts[code->index];
+
+ return 0;
+}
+
+/**
+ * xsdirxss_enum_dv_timings: Enumerate all the supported DV timings
+ * @sd: pointer to v4l2 subdev structure
+ * @timings: DV timings structure to be returned.
+ *
+ * Return: -EINVAL incase of invalid index and pad or zero on success
+ */
+static int xsdirxss_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ if (timings->index >= ARRAY_SIZE(fmt_cap))
+ return -EINVAL;
+
+ if (timings->pad != 0)
+ return -EINVAL;
+
+ timings->timings = fmt_cap[timings->index];
+ return 0;
+}
+
+/**
+ * xsdirxss_query_dv_timings: Query for the current DV timings
+ * @sd: pointer to v4l2 subdev structure
+ * @timings: DV timings structure to be returned.
+ *
+ * Return: -ENOLCK when video is not locked, -ERANGE when corresponding timing
+ * entry is not found or zero on success.
+ */
+static int xsdirxss_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct xsdirxss_state *state = to_xsdirxssstate(sd);
+ unsigned int i;
+
+ if (!state->vidlocked)
+ return -ENOLCK;
+
+ for (i = 0; i < ARRAY_SIZE(xsdirxss_dv_timings); i++) {
+ if (state->format.width == xsdirxss_dv_timings[i].width &&
+ state->format.height == xsdirxss_dv_timings[i].height &&
+ state->frame_interval.denominator ==
+ (xsdirxss_dv_timings[i].fps * 1000)) {
+ *timings = xsdirxss_dv_timings[i].format;
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+
+/**
+ * xsdirxss_open - Called on v4l2_open()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_open(). It sets the default format for pad.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ *format = xsdirxss->default_format;
+
+ return 0;
+}
+
+/**
+ * xsdirxss_close - Called on v4l2_close()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_close().
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xsdirxss_media_ops = {
+ .link_validate = v4l2_subdev_link_validate
+};
+
+static const struct v4l2_ctrl_ops xsdirxss_ctrl_ops = {
+ .g_volatile_ctrl = xsdirxss_g_volatile_ctrl,
+ .s_ctrl = xsdirxss_s_ctrl
+};
+
+static const struct v4l2_ctrl_config xsdirxss_edh_ctrls[] = {
+ {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE,
+ .name = "SDI Rx : EDH Error Count Enable",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = XSDIRX_EDH_ALLERR_MASK,
+ .def = 0,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_ERRCNT,
+ .name = "SDI Rx : EDH Error Count",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_STATUS,
+ .name = "SDI Rx : EDH Status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }
+};
+
+static const struct v4l2_ctrl_config xsdirxss_ctrls[] = {
+ {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_FRAMER,
+ .name = "SDI Rx : Enable Framer",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = true,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW,
+ .name = "SDI Rx : Video Lock Window",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = XSDIRX_DEFAULT_VIDEO_LOCK_WINDOW,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_SEARCH_MODES,
+ .name = "SDI Rx : Modes search Mask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = XSDIRX_DETECT_ALL_MODES,
+ .def = XSDIRX_DETECT_ALL_MODES,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_MODE_DETECT,
+ .name = "SDI Rx : Mode Detect Status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = XSDIRX_MODE_SD_OFFSET,
+ .max = XSDIRX_MODE_12GF_OFFSET,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_CRC,
+ .name = "SDI Rx : CRC Error status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED,
+ .name = "SDI Rx : TS is Interlaced",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .def = false,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS,
+ .name = "SDI Rx : Active Streams",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 16,
+ .def = 1,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_IS_3GB,
+ .name = "SDI Rx : Is 3GB",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .def = false,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }
+};
+
+static const struct v4l2_subdev_core_ops xsdirxss_core_ops = {
+ .log_status = xsdirxss_log_status,
+ .subscribe_event = xsdirxss_subscribe_event,
+ .unsubscribe_event = xsdirxss_unsubscribe_event
+};
+
+static const struct v4l2_subdev_video_ops xsdirxss_video_ops = {
+ .g_frame_interval = xsdirxss_g_frame_interval,
+ .s_stream = xsdirxss_s_stream,
+ .g_input_status = xsdirxss_g_input_status,
+ .query_dv_timings = xsdirxss_query_dv_timings,
+};
+
+static const struct v4l2_subdev_pad_ops xsdirxss_pad_ops = {
+ .get_fmt = xsdirxss_get_format,
+ .set_fmt = xsdirxss_set_format,
+ .enum_mbus_code = xsdirxss_enum_mbus_code,
+ .enum_dv_timings = xsdirxss_enum_dv_timings,
+};
+
+static const struct v4l2_subdev_ops xsdirxss_ops = {
+ .core = &xsdirxss_core_ops,
+ .video = &xsdirxss_video_ops,
+ .pad = &xsdirxss_pad_ops
+};
+
+static const struct v4l2_subdev_internal_ops xsdirxss_internal_ops = {
+ .open = xsdirxss_open,
+ .close = xsdirxss_close
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xsdirxss_parse_of(struct xsdirxss_state *xsdirxss)
+{
+ struct device_node *node = xsdirxss->core.dev->of_node;
+ struct device_node *ports = NULL;
+ struct device_node *port = NULL;
+ unsigned int nports = 0;
+ struct xsdirxss_core *core = &xsdirxss->core;
+ int ret;
+ const char *sdi_std;
+
+ core->include_edh = of_property_read_bool(node, "xlnx,include-edh");
+ dev_dbg(core->dev, "EDH property = %s\n",
+ core->include_edh ? "Present" : "Absent");
+
+ ret = of_property_read_string(node, "xlnx,line-rate", &sdi_std);
+ if (ret < 0) {
+ dev_err(core->dev, "xlnx,line-rate property not found\n");
+ return ret;
+ }
+
+ if (!strncmp(sdi_std, "12G_SDI_8DS", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_12G_8DS;
+ } else if (!strncmp(sdi_std, "6G_SDI", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_6G;
+ } else if (!strncmp(sdi_std, "3G_SDI", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_3G;
+ } else {
+ dev_err(core->dev, "Invalid Line Rate\n");
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "SDI Rx Line Rate = %s, mode = %d\n", sdi_std,
+ core->mode);
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ const struct xvip_video_format *format;
+ struct device_node *endpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ format = xvip_of_get_format(port);
+ if (IS_ERR(format)) {
+ dev_err(core->dev, "invalid format in DT");
+ return PTR_ERR(format);
+ }
+
+ dev_dbg(core->dev, "vf_code = %d bpc = %d bpp = %d\n",
+ format->vf_code, format->width, format->bpp);
+
+ if (format->vf_code != XVIP_VF_YUV_422 &&
+ format->vf_code != XVIP_VF_YUV_420 && format->width != 10) {
+ dev_err(core->dev,
+ "Incorrect UG934 video format set.\n");
+ return -EINVAL;
+ }
+ xsdirxss->vip_format = format;
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(core->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ if (nports != 1) {
+ dev_err(core->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+
+ /* Register interrupt handler */
+ core->irq = irq_of_parse_and_map(node, 0);
+
+ ret = devm_request_irq(core->dev, core->irq, xsdirxss_irq_handler,
+ IRQF_SHARED, "xilinx-sdirxss", xsdirxss);
+ if (ret) {
+ dev_err(core->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xsdirxss_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xsdirxss_state *xsdirxss;
+ struct xsdirxss_core *core;
+ struct resource *res;
+ int ret;
+ unsigned int num_ctrls, num_edh_ctrls = 0, i;
+
+ xsdirxss = devm_kzalloc(&pdev->dev, sizeof(*xsdirxss), GFP_KERNEL);
+ if (!xsdirxss)
+ return -ENOMEM;
+
+ xsdirxss->core.dev = &pdev->dev;
+ core = &xsdirxss->core;
+
+ core->num_clks = ARRAY_SIZE(xsdirxss_clks);
+ core->clks = devm_kcalloc(&pdev->dev, core->num_clks,
+ sizeof(*core->clks), GFP_KERNEL);
+ if (!core->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < core->num_clks; i++)
+ core->clks[i].id = xsdirxss_clks[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, core->num_clks, core->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(core->num_clks, core->clks);
+ if (ret)
+ return ret;
+
+ ret = xsdirxss_parse_of(xsdirxss);
+ if (ret < 0)
+ goto clk_err;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xsdirxss->core.iomem = devm_ioremap_resource(xsdirxss->core.dev, res);
+ if (IS_ERR(xsdirxss->core.iomem)) {
+ ret = PTR_ERR(xsdirxss->core.iomem);
+ goto clk_err;
+ }
+
+ /* Reset the core */
+ xsdirx_streamflow_control(core, false);
+ xsdirx_core_disable(core);
+ xsdirx_clearintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_disableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_enableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_globalintr(core, true);
+ xsdirxss_write(core, XSDIRX_CRC_ERRCNT_REG, 0xFFFF);
+
+ /* Initialize V4L2 subdevice and media entity */
+ xsdirxss->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Initialize the default format */
+ xsdirxss->default_format.code = xsdirxss->vip_format->code;
+ xsdirxss->default_format.field = V4L2_FIELD_NONE;
+ xsdirxss->default_format.colorspace = V4L2_COLORSPACE_DEFAULT;
+ xsdirxss->default_format.width = XSDIRX_DEFAULT_WIDTH;
+ xsdirxss->default_format.height = XSDIRX_DEFAULT_HEIGHT;
+
+ xsdirxss->format = xsdirxss->default_format;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xsdirxss->subdev;
+ v4l2_subdev_init(subdev, &xsdirxss_ops);
+
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xsdirxss_internal_ops;
+ strscpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ subdev->entity.ops = &xsdirxss_media_ops;
+
+ v4l2_set_subdevdata(subdev, xsdirxss);
+
+ ret = media_entity_pads_init(&subdev->entity, 1, &xsdirxss->pad);
+ if (ret < 0)
+ goto error;
+
+ /* Initialise and register the controls */
+ num_ctrls = ARRAY_SIZE(xsdirxss_ctrls);
+
+ if (xsdirxss->core.include_edh)
+ num_edh_ctrls = ARRAY_SIZE(xsdirxss_edh_ctrls);
+
+ v4l2_ctrl_handler_init(&xsdirxss->ctrl_handler,
+ (num_ctrls + num_edh_ctrls));
+
+ for (i = 0; i < num_ctrls; i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(xsdirxss->core.dev, "%d %s ctrl = 0x%x\n",
+ i, xsdirxss_ctrls[i].name, xsdirxss_ctrls[i].id);
+
+ ctrl = v4l2_ctrl_new_custom(&xsdirxss->ctrl_handler,
+ &xsdirxss_ctrls[i], NULL);
+ if (!ctrl) {
+ dev_dbg(xsdirxss->core.dev, "Failed to add %s ctrl\n",
+ xsdirxss_ctrls[i].name);
+ goto error;
+ }
+ }
+
+ if (xsdirxss->core.include_edh) {
+ for (i = 0; i < num_edh_ctrls; i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(xsdirxss->core.dev, "%d %s ctrl = 0x%x\n",
+ i, xsdirxss_edh_ctrls[i].name,
+ xsdirxss_edh_ctrls[i].id);
+
+ ctrl = v4l2_ctrl_new_custom(&xsdirxss->ctrl_handler,
+ &xsdirxss_edh_ctrls[i],
+ NULL);
+ if (!ctrl) {
+ dev_dbg(xsdirxss->core.dev, "Failed to add %s ctrl\n",
+ xsdirxss_edh_ctrls[i].name);
+ goto error;
+ }
+ }
+ }
+
+ if (xsdirxss->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xsdirxss->ctrl_handler.error;
+ goto error;
+ }
+
+ subdev->ctrl_handler = &xsdirxss->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_setup(&xsdirxss->ctrl_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, xsdirxss);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ xsdirxss->streaming = false;
+
+ dev_info(xsdirxss->core.dev, "Xilinx SDI Rx Subsystem device found!\n");
+
+ xsdirx_core_enable(core);
+
+ return 0;
+error:
+ v4l2_ctrl_handler_free(&xsdirxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xsdirx_globalintr(core, false);
+ xsdirx_disableintr(core, XSDIRX_INTR_ALL_MASK);
+clk_err:
+ clk_bulk_disable_unprepare(core->num_clks, core->clks);
+ return ret;
+}
+
+static int xsdirxss_remove(struct platform_device *pdev)
+{
+ struct xsdirxss_state *xsdirxss = platform_get_drvdata(pdev);
+ struct xsdirxss_core *core = &xsdirxss->core;
+ struct v4l2_subdev *subdev = &xsdirxss->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xsdirxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xsdirx_globalintr(core, false);
+ xsdirx_disableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_core_disable(core);
+ xsdirx_streamflow_control(core, false);
+
+ clk_bulk_disable_unprepare(core->num_clks, core->clks);
+
+ return 0;
+}
+
+static const struct of_device_id xsdirxss_of_id_table[] = {
+ { .compatible = "xlnx,v-smpte-uhdsdi-rx-ss" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xsdirxss_of_id_table);
+
+static struct platform_driver xsdirxss_driver = {
+ .driver = {
+ .name = "xilinx-sdirxss",
+ .of_match_table = xsdirxss_of_id_table,
+ },
+ .probe = xsdirxss_probe,
+ .remove = xsdirxss_remove,
+};
+
+module_platform_driver(xsdirxss_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vsagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx SDI Rx Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-switch.c b/drivers/media/platform/xilinx/xilinx-switch.c
new file mode 100644
index 000000000000..b0052a76c65d
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-switch.c
@@ -0,0 +1,460 @@
+/*
+ * Xilinx Video Switch
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XSW_CORE_CH_CTRL 0x0100
+#define XSW_CORE_CH_CTRL_FORCE (1 << 3)
+
+#define XSW_SWITCH_STATUS 0x0104
+
+/**
+ * struct xswitch_device - Xilinx Video Switch device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @nsinks: number of sink pads (2 to 8)
+ * @nsources: number of source pads (1 to 8)
+ * @routing: sink pad connected to each source pad (-1 if none)
+ * @formats: active V4L2 media bus formats on sink pads
+ */
+struct xswitch_device {
+ struct xvip_device xvip;
+
+ struct media_pad *pads;
+ unsigned int nsinks;
+ unsigned int nsources;
+
+ int routing[8];
+
+ struct v4l2_mbus_framefmt *formats;
+};
+
+static inline struct xswitch_device *to_xsw(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xswitch_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xsw_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int unused_input;
+ unsigned int i;
+ u32 routing;
+
+ if (!enable) {
+ xvip_stop(&xsw->xvip);
+ return 0;
+ }
+
+ /*
+ * All outputs must be routed to an input. When less than 8 inputs are
+ * synthesized we can use input 7 for that purpose. Otherwise find an
+ * unused input to connect to unused outputs.
+ */
+ if (xsw->nsinks == 8) {
+ u32 mask;
+
+ for (i = 0, mask = 0xff; i < xsw->nsources; ++i) {
+ if (xsw->routing[i] != -1)
+ mask &= ~BIT(xsw->routing[i]);
+ }
+
+ /*
+ * If all inputs are used all outputs are also used. We don't
+ * need an unused input in that case, use a zero value.
+ */
+ unused_input = mask ? ffs(mask) - 1 : 0;
+ } else {
+ unused_input = 7;
+ }
+
+ /* Configure routing. */
+ for (i = 0, routing = 0; i < xsw->nsources; ++i) {
+ unsigned int route;
+
+ route = xsw->routing[i] == -1 ? unused_input : xsw->routing[i];
+ routing |= (XSW_CORE_CH_CTRL_FORCE | route)
+ << (i * 4);
+ }
+
+ xvip_write(&xsw->xvip, XSW_CORE_CH_CTRL, routing);
+
+ xvip_write(&xsw->xvip, XVIP_CTRL_CONTROL,
+ (((1 << xsw->nsources) - 1) << 4) |
+ XVIP_CTRL_CONTROL_SW_ENABLE);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+xsw_get_pad_format(struct xswitch_device *xsw,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xsw->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xsw->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xsw_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ int pad = fmt->pad;
+
+ if (pad >= xsw->nsinks) {
+ pad = xsw->routing[pad - xsw->nsinks];
+ if (pad < 0) {
+ memset(&fmt->format, 0, sizeof(fmt->format));
+ return 0;
+ }
+ }
+
+ fmt->format = *xsw_get_pad_format(xsw, cfg, pad, fmt->which);
+
+ return 0;
+}
+
+static int xsw_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* The source pad format is always identical to the sink pad format and
+ * can't be modified.
+ */
+ if (fmt->pad >= xsw->nsinks)
+ return xsw_get_format(subdev, cfg, fmt);
+
+ format = xsw_get_pad_format(xsw, cfg, fmt->pad, fmt->which);
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xsw_get_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int i;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ for (i = 0; i < min(xsw->nsources, route->num_routes); ++i) {
+ route->routes[i].sink = xsw->routing[i];
+ route->routes[i].source = i;
+ }
+
+ route->num_routes = xsw->nsources;
+
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ return 0;
+}
+
+static int xsw_set_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (subdev->entity.stream_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ for (i = 0; i < xsw->nsources; ++i)
+ xsw->routing[i] = -1;
+
+ for (i = 0; i < route->num_routes; ++i)
+ xsw->routing[route->routes[i].source - xsw->nsinks] =
+ route->routes[i].sink;
+
+done:
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/**
+ * xsw_init_formats - Initialize formats on all pads
+ * @subdev: tpgper V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ *
+ * The function sets the format on pad 0 only. In two pads mode, this is the
+ * sink pad and the set format handler will propagate the format to the source
+ * pad. In one pad mode this is the source pad.
+ */
+static void xsw_init_formats(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ struct v4l2_subdev_format format;
+ unsigned int i;
+
+ for (i = 0; i < xsw->nsinks; ++i) {
+ memset(&format, 0, sizeof(format));
+
+ format.pad = 0;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.width = 1920;
+ format.format.height = 1080;
+
+ xsw_set_format(subdev, fh ? fh->pad : NULL, &format);
+ }
+}
+
+static int xsw_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ xsw_init_formats(subdev, fh);
+
+ return 0;
+}
+
+static int xsw_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xsw_video_ops = {
+ .s_stream = xsw_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xsw_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xsw_get_format,
+ .set_fmt = xsw_set_format,
+ .get_routing = xsw_get_routing,
+ .set_routing = xsw_set_routing,
+};
+
+static struct v4l2_subdev_ops xsw_ops = {
+ .video = &xsw_video_ops,
+ .pad = &xsw_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xsw_internal_ops = {
+ .open = xsw_open,
+ .close = xsw_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static bool xsw_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ struct xswitch_device *xsw = container_of(entity, struct xswitch_device,
+ xvip.subdev.entity);
+ unsigned int sink0, sink1;
+
+ /* Two sinks are never connected together. */
+ if (pad0 < xsw->nsinks && pad1 < xsw->nsinks)
+ return false;
+
+ sink0 = pad0 < xsw->nsinks ? pad0 : xsw->routing[pad0 - xsw->nsinks];
+ sink1 = pad1 < xsw->nsinks ? pad1 : xsw->routing[pad1 - xsw->nsinks];
+
+ return sink0 == sink1;
+}
+
+static const struct media_entity_operations xsw_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_route = xsw_has_route,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xsw_parse_of(struct xswitch_device *xsw)
+{
+ struct device_node *node = xsw->xvip.dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(node, "#xlnx,inputs", &xsw->nsinks);
+ if (ret < 0) {
+ dev_err(xsw->xvip.dev, "missing or invalid #xlnx,%s property\n",
+ "inputs");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,outputs", &xsw->nsources);
+ if (ret < 0) {
+ dev_err(xsw->xvip.dev, "missing or invalid #xlnx,%s property\n",
+ "outputs");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xsw_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xswitch_device *xsw;
+ unsigned int npads;
+ unsigned int i;
+ int ret;
+
+ xsw = devm_kzalloc(&pdev->dev, sizeof(*xsw), GFP_KERNEL);
+ if (!xsw)
+ return -ENOMEM;
+
+ xsw->xvip.dev = &pdev->dev;
+
+ ret = xsw_parse_of(xsw);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xsw->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize V4L2 subdevice and media entity. Pad numbers depend on the
+ * number of pads.
+ */
+ npads = xsw->nsinks + xsw->nsources;
+ xsw->pads = devm_kzalloc(&pdev->dev, npads * sizeof(*xsw->pads),
+ GFP_KERNEL);
+ if (!xsw->pads)
+ goto error;
+
+ for (i = 0; i < xsw->nsinks; ++i)
+ xsw->pads[i].flags = MEDIA_PAD_FL_SINK;
+ for (; i < npads; ++i)
+ xsw->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ xsw->formats = devm_kzalloc(&pdev->dev,
+ xsw->nsinks * sizeof(*xsw->formats),
+ GFP_KERNEL);
+ if (!xsw->formats)
+ goto error;
+
+ for (i = 0; i < xsw->nsources; ++i)
+ xsw->routing[i] = i < xsw->nsinks ? i : -1;
+
+ subdev = &xsw->xvip.subdev;
+ v4l2_subdev_init(subdev, &xsw_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xsw_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xsw);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.ops = &xsw_media_ops;
+
+ xsw_init_formats(subdev, NULL);
+
+ ret = media_entity_pads_init(&subdev->entity, npads, xsw->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xsw);
+
+ xvip_print_version(&xsw->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xsw->xvip);
+ return ret;
+}
+
+static int xsw_remove(struct platform_device *pdev)
+{
+ struct xswitch_device *xsw = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xsw->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xsw->xvip);
+
+ return 0;
+}
+
+static const struct of_device_id xsw_of_id_table[] = {
+ { .compatible = "xlnx,v-switch-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xsw_of_id_table);
+
+static struct platform_driver xsw_driver = {
+ .driver = {
+ .name = "xilinx-switch",
+ .of_match_table = xsw_of_id_table,
+ },
+ .probe = xsw_probe,
+ .remove = xsw_remove,
+};
+
+module_platform_driver(xsw_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video Switch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index ed01bedb5db6..f840bc098d9e 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -20,6 +20,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
+#include "xilinx-hls-common.h"
#include "xilinx-vip.h"
#include "xilinx-vtc.h"
@@ -58,6 +59,36 @@
#define XTPG_BAYER_PHASE_BGGR 3
#define XTPG_BAYER_PHASE_OFF 4
+/* TPG v7 is a completely redesigned IP using Vivado HLS
+ * having a different AXI4-Lite interface
+ */
+#define XTPG_HLS_BG_PATTERN 0x0020
+#define XTPG_HLS_FG_PATTERN 0x0028
+#define XTPG_HLS_FG_PATTERN_CROSS_HAIR (1 << 1)
+#define XTPG_HLS_MASK_ID 0x0030
+#define XTPG_HLS_MOTION_SPEED 0x0038
+#define XTPG_HLS_COLOR_FORMAT 0x0040
+#define XTPG_HLS_COLOR_FORMAT_RGB 0
+#define XTPG_HLS_COLOR_FORMAT_YUV_444 1
+#define XTPG_HLS_COLOR_FORMAT_YUV_422 2
+#define XTPG_HLS_COLOR_FORMAT_YUV_420 3
+#define XTPG_HLS_CROSS_HAIR_HOR 0x0048
+#define XTPG_HLS_CROSS_HAIR_VER 0x0050
+#define XTPG_HLS_ZPLATE_HOR_CNTL_START 0x0058
+#define XTPG_HLS_ZPLATE_HOR_CNTL_DELTA 0x0060
+#define XTPG_HLS_ZPLATE_VER_CNTL_START 0x0068
+#define XTPG_HLS_ZPLATE_VER_CNTL_DELTA 0x0070
+#define XTPG_HLS_BOX_SIZE 0x0078
+#define XTPG_HLS_BOX_COLOR_RED_CB 0x0080
+#define XTPG_HLS_BOX_COLOR_GREEN_CR 0x0088
+#define XTPG_HLS_BOX_COLOR_BLUE_Y 0x0090
+#define XTPG_HLS_ENABLE_INPUT 0x0098
+#define XTPG_HLS_USE_INPUT_VID_STREAM (1 << 0)
+#define XTPG_HLS_PASS_THRU_START_X 0x00a0
+#define XTPG_HLS_PASS_THRU_START_Y 0x00a8
+#define XTPG_HLS_PASS_THRU_END_X 0x00b0
+#define XTPG_HLS_PASS_THRU_END_Y 0x00b8
+
/*
* The minimum blanking value is one clock cycle for the front porch, one clock
* cycle for the sync pulse and one clock cycle for the back porch.
@@ -67,6 +98,15 @@
#define XTPG_MIN_VBLANK 3
#define XTPG_MAX_VBLANK (XVTC_MAX_VSIZE - XVIP_MIN_HEIGHT)
+#define XTPG_MIN_WIDTH (64)
+#define XTPG_MIN_HEIGHT (64)
+#define XTPG_MAX_WIDTH (10328)
+#define XTPG_MAX_HEIGHT (7760)
+
+#define XTPG_MIN_PPC 1
+
+#define XTPG_MIN_FRM_INT 1
+
/**
* struct xtpg_device - Xilinx Test Pattern Generator device structure
* @xvip: Xilinx Video IP device
@@ -82,8 +122,15 @@
* @vblank: vertical blanking control
* @pattern: test pattern control
* @streaming: is the video stream active
+ * @is_hls: whether the IP core is HLS based
* @vtc: video timing controller
* @vtmux_gpio: video timing mux GPIO
+ * @rst_gpio: reset IP core GPIO
+ * @max_width: Maximum width supported by this instance
+ * @max_height: Maximum height supported by this instance
+ * @fi_d: frame interval denominator
+ * @fi_n: frame interval numerator
+ * @ppc: Pixels per clock control
*/
struct xtpg_device {
struct xvip_device xvip;
@@ -102,9 +149,17 @@ struct xtpg_device {
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *pattern;
bool streaming;
+ bool is_hls;
struct xvtc_device *vtc;
struct gpio_desc *vtmux_gpio;
+ struct gpio_desc *rst_gpio;
+
+ u32 max_width;
+ u32 max_height;
+ u32 fi_d;
+ u32 fi_n;
+ u32 ppc;
};
static inline struct xtpg_device *to_tpg(struct v4l2_subdev *subdev)
@@ -128,6 +183,32 @@ static u32 xtpg_get_bayer_phase(unsigned int code)
}
}
+static void xtpg_config_vtc(struct xtpg_device *xtpg, int width, int height)
+{
+
+ struct xvtc_config config = {
+ .hblank_start = width / xtpg->ppc,
+ .hsync_start = width / xtpg->ppc + 1,
+ .vblank_start = height,
+ .vsync_start = height + 1,
+ .fps = xtpg->fi_d / xtpg->fi_n,
+ };
+ unsigned int htotal;
+ unsigned int vtotal;
+
+ htotal = min_t(unsigned int, XVTC_MAX_HSIZE,
+ (v4l2_ctrl_g_ctrl(xtpg->hblank) + width) / xtpg->ppc);
+ vtotal = min_t(unsigned int, XVTC_MAX_VSIZE,
+ v4l2_ctrl_g_ctrl(xtpg->vblank) + height);
+
+ config.hsync_end = htotal - 1;
+ config.hsize = htotal;
+ config.vsync_end = vtotal - 1;
+ config.vsize = vtotal;
+
+ xvtc_generator_start(xtpg->vtc, &config);
+}
+
static void __xtpg_update_pattern_control(struct xtpg_device *xtpg,
bool passthrough, bool pattern)
{
@@ -164,6 +245,33 @@ static void xtpg_update_pattern_control(struct xtpg_device *xtpg,
* V4L2 Subdevice Video Operations
*/
+static int xtpg_g_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+
+ fi->interval.numerator = xtpg->fi_n;
+ fi->interval.denominator = xtpg->fi_d;
+
+ return 0;
+}
+
+static int xtpg_s_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+
+ if (!fi->interval.numerator || !fi->interval.denominator) {
+ xtpg->fi_n = XTPG_MIN_FRM_INT;
+ xtpg->fi_d = XTPG_MIN_FRM_INT;
+ } else {
+ xtpg->fi_n = fi->interval.numerator;
+ xtpg->fi_d = fi->interval.denominator;
+ }
+
+ return 0;
+}
+
static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct xtpg_device *xtpg = to_tpg(subdev);
@@ -173,7 +281,20 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
u32 bayer_phase;
if (!enable) {
- xvip_stop(&xtpg->xvip);
+ if (!xtpg->is_hls) {
+ xvip_stop(&xtpg->xvip);
+ } else {
+ /*
+ * There is an known issue in TPG v7.0 that on
+ * resolution change it doesn't generates pattern
+ * correctly i.e some hor/ver offset is added.
+ * As a workaround issue reset on stop.
+ */
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x1);
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x0);
+ v4l2_ctrl_handler_setup(&xtpg->ctrl_handler);
+ }
+
if (xtpg->vtc)
xvtc_generator_stop(xtpg->vtc);
@@ -182,31 +303,36 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
return 0;
}
- xvip_set_frame_size(&xtpg->xvip, &xtpg->formats[0]);
-
- if (xtpg->vtc) {
- struct xvtc_config config = {
- .hblank_start = width,
- .hsync_start = width + 1,
- .vblank_start = height,
- .vsync_start = height + 1,
- };
- unsigned int htotal;
- unsigned int vtotal;
-
- htotal = min_t(unsigned int, XVTC_MAX_HSIZE,
- v4l2_ctrl_g_ctrl(xtpg->hblank) + width);
- vtotal = min_t(unsigned int, XVTC_MAX_VSIZE,
- v4l2_ctrl_g_ctrl(xtpg->vblank) + height);
-
- config.hsync_end = htotal - 1;
- config.hsize = htotal;
- config.vsync_end = vtotal - 1;
- config.vsize = vtotal;
-
- xvtc_generator_start(xtpg->vtc, &config);
+ if (xtpg->is_hls) {
+ u32 fmt = 0;
+
+ switch (xtpg->formats[0].code) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ fmt = XTPG_HLS_COLOR_FORMAT_RGB;
+ break;
+ }
+ xvip_write(&xtpg->xvip, XTPG_HLS_COLOR_FORMAT, fmt);
+ xvip_write(&xtpg->xvip, XHLS_REG_COLS, width);
+ xvip_write(&xtpg->xvip, XHLS_REG_ROWS, height);
+ } else {
+ xvip_set_frame_size(&xtpg->xvip, &xtpg->formats[0]);
}
+ if (xtpg->vtc)
+ xtpg_config_vtc(xtpg, width, height);
/*
* Configure the bayer phase and video timing mux based on the
* operation mode (passthrough or test pattern generation). The test
@@ -215,7 +341,11 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
*/
mutex_lock(xtpg->ctrl_handler.lock);
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BG_PATTERN,
+ xtpg->pattern->cur.val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
XTPG_PATTERN_MASK, xtpg->pattern->cur.val);
/*
@@ -229,18 +359,26 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
mutex_unlock(xtpg->ctrl_handler.lock);
- /*
- * For TPG v5.0, the bayer phase needs to be off for the pass through
- * mode, otherwise the external input would be subsampled.
- */
- bayer_phase = passthrough ? XTPG_BAYER_PHASE_OFF
- : xtpg_get_bayer_phase(xtpg->formats[0].code);
- xvip_write(&xtpg->xvip, XTPG_BAYER_PHASE, bayer_phase);
-
if (xtpg->vtmux_gpio)
gpiod_set_value_cansleep(xtpg->vtmux_gpio, !passthrough);
- xvip_start(&xtpg->xvip);
+ if (xtpg->is_hls) {
+ xvip_set(&xtpg->xvip, XTPG_HLS_ENABLE_INPUT,
+ XTPG_HLS_USE_INPUT_VID_STREAM);
+ xvip_set(&xtpg->xvip, XVIP_CTRL_CONTROL,
+ XHLS_REG_CTRL_AUTO_RESTART |
+ XVIP_CTRL_CONTROL_SW_ENABLE);
+ } else {
+ /*
+ * For TPG v5.0, the bayer phase needs to be off for the pass
+ * through mode, otherwise the external input would
+ * be subsampled.
+ */
+ bayer_phase = passthrough ? XTPG_BAYER_PHASE_OFF
+ : xtpg_get_bayer_phase(xtpg->formats[0].code);
+ xvip_write(&xtpg->xvip, XTPG_BAYER_PHASE, bayer_phase);
+ xvip_start(&xtpg->xvip);
+ }
return 0;
}
@@ -300,7 +438,27 @@ static int xtpg_set_format(struct v4l2_subdev *subdev,
__format->code = fmt->format.code;
}
- xvip_set_format_size(__format, fmt);
+ if (xtpg->is_hls) {
+ switch (fmt->format.code) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ __format->code = fmt->format.code;
+ break;
+ default:
+ __format->code = xtpg->default_format.code;
+ }
+ }
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XTPG_MIN_WIDTH, xtpg->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XTPG_MIN_HEIGHT, xtpg->max_height);
fmt->format = *__format;
@@ -322,6 +480,7 @@ static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt *format;
+ struct xtpg_device *xtpg = to_tpg(subdev);
format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
@@ -330,12 +489,13 @@ static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
/* Min / max values for pad 0 is always fixed in both one and two pads
* modes. In two pads mode, the source pad(= 1) size is identical to
- * the sink pad size */
+ * the sink pad size.
+ */
if (fse->pad == 0) {
- fse->min_width = XVIP_MIN_WIDTH;
- fse->max_width = XVIP_MAX_WIDTH;
- fse->min_height = XVIP_MIN_HEIGHT;
- fse->max_height = XVIP_MAX_HEIGHT;
+ fse->min_width = XTPG_MIN_WIDTH;
+ fse->max_width = xtpg->max_width;
+ fse->min_height = XTPG_MIN_HEIGHT;
+ fse->max_height = xtpg->max_height;
} else {
fse->min_width = format->width;
fse->max_width = format->width;
@@ -374,8 +534,12 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
- XTPG_PATTERN_MASK, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BG_PATTERN,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_MASK, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIRS:
xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
@@ -386,10 +550,13 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
XTPG_PATTERN_CONTROL_MOVING_BOX, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_COLOR_MASK:
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
- XTPG_PATTERN_CONTROL_COLOR_MASK_MASK,
- ctrl->val <<
- XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_MASK_ID, ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_COLOR_MASK_MASK,
+ ctrl->val <<
+ XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_STUCK_PIXEL:
xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
@@ -404,43 +571,85 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
XTPG_PATTERN_CONTROL_MOTION, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_MOTION_SPEED:
- xvip_write(&xtpg->xvip, XTPG_MOTION_SPEED, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_MOTION_SPEED,
+ ctrl->val);
+ else
+ xvip_write(&xtpg->xvip, XTPG_MOTION_SPEED, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW:
- xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
- XTPG_CROSS_HAIRS_ROW_MASK,
- ctrl->val << XTPG_CROSS_HAIRS_ROW_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_CROSS_HAIR_HOR,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_ROW_MASK,
+ ctrl->val <<
+ XTPG_CROSS_HAIRS_ROW_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN:
- xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
- XTPG_CROSS_HAIRS_COLUMN_MASK,
- ctrl->val << XTPG_CROSS_HAIRS_COLUMN_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_CROSS_HAIR_VER,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_COLUMN_MASK,
+ ctrl->val <<
+ XTPG_CROSS_HAIRS_COLUMN_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_HOR_START:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
- XTPG_ZPLATE_START_MASK,
- ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_HOR_CNTL_START,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
- XTPG_ZPLATE_SPEED_MASK,
- ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_HOR_CNTL_DELTA,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_VER_START:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
- XTPG_ZPLATE_START_MASK,
- ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_VER_CNTL_START,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
- XTPG_ZPLATE_SPEED_MASK,
- ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_VER_CNTL_DELTA,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_BOX_SIZE:
- xvip_write(&xtpg->xvip, XTPG_BOX_SIZE, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_SIZE, ctrl->val);
+ else
+ xvip_write(&xtpg->xvip, XTPG_BOX_SIZE, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_BOX_COLOR:
- xvip_write(&xtpg->xvip, XTPG_BOX_COLOR, ctrl->val);
+ if (xtpg->is_hls) {
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_RED_CB,
+ ctrl->val >> 16);
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_GREEN_CR,
+ ctrl->val >> 8);
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_BLUE_Y,
+ ctrl->val);
+ } else {
+ xvip_write(&xtpg->xvip, XTPG_BOX_COLOR, ctrl->val);
+ }
return 0;
case V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH:
xvip_write(&xtpg->xvip, XTPG_STUCK_PIXEL_THRESH, ctrl->val);
@@ -448,6 +657,9 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_XILINX_TPG_NOISE_GAIN:
xvip_write(&xtpg->xvip, XTPG_NOISE_GAIN, ctrl->val);
return 0;
+ case V4L2_CID_XILINX_TPG_HLS_FG_PATTERN:
+ xvip_write(&xtpg->xvip, XTPG_HLS_FG_PATTERN, ctrl->val);
+ return 0;
}
return 0;
@@ -461,6 +673,8 @@ static const struct v4l2_subdev_core_ops xtpg_core_ops = {
};
static const struct v4l2_subdev_video_ops xtpg_video_ops = {
+ .g_frame_interval = xtpg_g_frame_interval,
+ .s_frame_interval = xtpg_s_frame_interval,
.s_stream = xtpg_s_stream,
};
@@ -505,60 +719,51 @@ static const char *const xtpg_pattern_strings[] = {
"Black/White Checker Board",
};
-static struct v4l2_ctrl_config xtpg_ctrls[] = {
+static const char *const xtpg_hls_pattern_strings[] = {
+ "Passthrough",
+ "Horizontal Ramp",
+ "Vertical Ramp",
+ "Temporal Ramp",
+ "Solid Red",
+ "Solid Green",
+ "Solid Blue",
+ "Solid Black",
+ "Solid White",
+ "Color Bars",
+ "Zone Plate",
+ "Tartan Color Bars",
+ "Cross Hatch",
+ "Color Sweep",
+ "Vertical/Horizontal Ramps",
+ "Black/White Checker Board",
+ "PseudoRandom",
+};
+
+static const char *const xtpg_hls_fg_strings[] = {
+ "No Overlay",
+ "Moving Box",
+ "Cross Hairs",
+};
+
+static const struct v4l2_ctrl_config xtpg_hls_fg_ctrl = {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_HLS_FG_PATTERN,
+ .name = "Test Pattern: Foreground Pattern",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = ARRAY_SIZE(xtpg_hls_fg_strings) - 1,
+ .qmenu = xtpg_hls_fg_strings,
+};
+
+static struct v4l2_ctrl_config xtpg_common_ctrls[] = {
{
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_CROSS_HAIRS,
- .name = "Test Pattern: Cross Hairs",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_MOVING_BOX,
- .name = "Test Pattern: Moving Box",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_COLOR_MASK,
- .name = "Test Pattern: Color Mask",
- .type = V4L2_CTRL_TYPE_BITMASK,
- .min = 0,
- .max = 0xf,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL,
- .name = "Test Pattern: Stuck Pixel",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_NOISE,
- .name = "Test Pattern: Noise",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_MOTION,
- .name = "Test Pattern: Motion",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_COLOR_MASK,
+ .name = "Test Pattern: Color Mask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = 0x7,
+ .def = 0,
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_MOTION_SPEED,
@@ -642,12 +847,61 @@ static struct v4l2_ctrl_config xtpg_ctrls[] = {
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_BOX_COLOR,
- .name = "Test Pattern: Box Color(RGB)",
+ .name = "Test Pattern: Box Color(RGB/YCbCr)",
.type = V4L2_CTRL_TYPE_INTEGER,
.min = 0,
.max = (1 << 24) - 1,
.step = 1,
.def = 0,
+ },
+};
+
+static struct v4l2_ctrl_config xtpg_ctrls[] = {
+ {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIRS,
+ .name = "Test Pattern: Cross Hairs",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOVING_BOX,
+ .name = "Test Pattern: Moving Box",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL,
+ .name = "Test Pattern: Stuck Pixel",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_NOISE,
+ .name = "Test Pattern: Noise",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOTION,
+ .name = "Test Pattern: Motion",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH,
@@ -713,6 +967,49 @@ static int xtpg_parse_of(struct xtpg_device *xtpg)
struct device_node *port;
unsigned int nports = 0;
bool has_endpoint = false;
+ int ret;
+
+ if (!of_device_is_compatible(dev->of_node, "xlnx,v-tpg-5.0"))
+ xtpg->is_hls = true;
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xtpg->max_height);
+ if (ret < 0) {
+ if (of_device_is_compatible(dev->of_node, "xlnx,v-tpg-8.0")) {
+ dev_err(dev, "xlnx,max-height dt property is missing!");
+ return -EINVAL;
+ }
+ xtpg->max_height = XTPG_MAX_HEIGHT;
+ } else if (xtpg->max_height > XTPG_MAX_HEIGHT ||
+ xtpg->max_height < XTPG_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xtpg->max_width);
+ if (ret < 0) {
+ if (of_device_is_compatible(dev->of_node, "xlnx,v-tpg-8.0")) {
+ dev_err(dev, "xlnx,max-width dt property is missing!");
+ return -EINVAL;
+ }
+ xtpg->max_width = XTPG_MAX_WIDTH;
+ } else if (xtpg->max_width > XTPG_MAX_WIDTH ||
+ xtpg->max_width < XTPG_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,ppc",
+ &xtpg->ppc);
+ if (ret < 0) {
+ xtpg->ppc = XTPG_MIN_PPC;
+ dev_dbg(dev, "failed to read ppc in dt\n");
+ } else if ((xtpg->ppc != 1) && (xtpg->ppc != 2) &&
+ (xtpg->ppc != 4) && (xtpg->ppc != 8)) {
+ dev_err(dev, "Invalid ppc config in dt\n");
+ return -EINVAL;
+ }
ports = of_get_child_by_name(node, "ports");
if (ports == NULL)
@@ -769,6 +1066,7 @@ static int xtpg_probe(struct platform_device *pdev)
struct v4l2_subdev *subdev;
struct xtpg_device *xtpg;
u32 i, bayer_phase;
+ u32 npatterns;
int ret;
xtpg = devm_kzalloc(&pdev->dev, sizeof(*xtpg), GFP_KERNEL);
@@ -792,14 +1090,29 @@ static int xtpg_probe(struct platform_device *pdev)
goto error_resource;
}
+ if (xtpg->is_hls) {
+ xtpg->rst_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xtpg->rst_gpio)) {
+ ret = PTR_ERR(xtpg->rst_gpio);
+ goto error_resource;
+ }
+ }
+
xtpg->vtc = xvtc_of_get(pdev->dev.of_node);
if (IS_ERR(xtpg->vtc)) {
ret = PTR_ERR(xtpg->vtc);
goto error_resource;
}
- /* Reset and initialize the core */
- xvip_reset(&xtpg->xvip);
+ /*
+ * Reset and initialize the core. For TPG HLS version there
+ * is no SW_RESET bit hence using GPIO based reset.
+ */
+ if (xtpg->is_hls)
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x0);
+ else
+ xvip_reset(&xtpg->xvip);
/* Initialize V4L2 subdevice and media entity. Pad numbers depend on the
* number of pads.
@@ -815,11 +1128,23 @@ static int xtpg_probe(struct platform_device *pdev)
xtpg->default_format.code = xtpg->vip_format->code;
xtpg->default_format.field = V4L2_FIELD_NONE;
xtpg->default_format.colorspace = V4L2_COLORSPACE_SRGB;
- xvip_get_frame_size(&xtpg->xvip, &xtpg->default_format);
- bayer_phase = xtpg_get_bayer_phase(xtpg->vip_format->code);
- if (bayer_phase != XTPG_BAYER_PHASE_OFF)
- xtpg->bayer = true;
+ if (xtpg->is_hls) {
+ npatterns = ARRAY_SIZE(xtpg_hls_pattern_strings);
+ xtpg->default_format.width = xvip_read(&xtpg->xvip,
+ XHLS_REG_COLS);
+ xtpg->default_format.height = xvip_read(&xtpg->xvip,
+ XHLS_REG_ROWS);
+ } else {
+ npatterns = ARRAY_SIZE(xtpg_pattern_strings);
+ xvip_get_frame_size(&xtpg->xvip, &xtpg->default_format);
+ }
+
+ if (!xtpg->is_hls) {
+ bayer_phase = xtpg_get_bayer_phase(xtpg->vip_format->code);
+ if (bayer_phase != XTPG_BAYER_PHASE_OFF)
+ xtpg->bayer = true;
+ }
xtpg->formats[0] = xtpg->default_format;
if (xtpg->npads == 2)
@@ -839,7 +1164,13 @@ static int xtpg_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
- v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 3 + ARRAY_SIZE(xtpg_ctrls));
+ if (xtpg->is_hls)
+ v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 4 +
+ ARRAY_SIZE(xtpg_common_ctrls));
+ else
+ v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 3 +
+ ARRAY_SIZE(xtpg_common_ctrls) +
+ ARRAY_SIZE(xtpg_ctrls));
xtpg->vblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
V4L2_CID_VBLANK, XTPG_MIN_VBLANK,
@@ -847,19 +1178,41 @@ static int xtpg_probe(struct platform_device *pdev)
xtpg->hblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
V4L2_CID_HBLANK, XTPG_MIN_HBLANK,
XTPG_MAX_HBLANK, 1, 100);
- xtpg->pattern = v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
- &xtpg_ctrl_ops, V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(xtpg_pattern_strings) - 1,
- 1, 9, xtpg_pattern_strings);
- for (i = 0; i < ARRAY_SIZE(xtpg_ctrls); i++)
- v4l2_ctrl_new_custom(&xtpg->ctrl_handler, &xtpg_ctrls[i], NULL);
+ if (xtpg->is_hls) {
+ xtpg->pattern =
+ v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
+ &xtpg_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ npatterns - 1,
+ 1, 9,
+ xtpg_hls_pattern_strings);
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_hls_fg_ctrl, NULL);
+ } else {
+ xtpg->pattern =
+ v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
+ &xtpg_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ npatterns - 1,
+ 1, 9,
+ xtpg_pattern_strings);
+
+ for (i = 0; i < ARRAY_SIZE(xtpg_ctrls); i++)
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_ctrls[i], NULL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xtpg_common_ctrls); i++)
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_common_ctrls[i], NULL);
if (xtpg->ctrl_handler.error) {
dev_err(&pdev->dev, "failed to add controls\n");
ret = xtpg->ctrl_handler.error;
goto error;
}
+
subdev->ctrl_handler = &xtpg->ctrl_handler;
xtpg_update_pattern_control(xtpg, true, true);
@@ -874,6 +1227,10 @@ static int xtpg_probe(struct platform_device *pdev)
xvip_print_version(&xtpg->xvip);
+ /* Initialize default frame interval */
+ xtpg->fi_n = 1;
+ xtpg->fi_d = 30;
+
ret = v4l2_async_register_subdev(subdev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register subdev\n");
@@ -909,6 +1266,8 @@ static SIMPLE_DEV_PM_OPS(xtpg_pm_ops, xtpg_pm_suspend, xtpg_pm_resume);
static const struct of_device_id xtpg_of_id_table[] = {
{ .compatible = "xlnx,v-tpg-5.0" },
+ { .compatible = "xlnx,v-tpg-7.0" },
+ { .compatible = "xlnx,v-tpg-8.0" },
{ }
};
MODULE_DEVICE_TABLE(of, xtpg_of_id_table);
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 6ad61b08a31a..a9559a518938 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -24,22 +24,102 @@
*/
static const struct xvip_video_format xvip_video_formats[] = {
+ { XVIP_VF_YUV_420, 8, NULL, MEDIA_BUS_FMT_VYYUYY8_1X24,
+ 1, 12, V4L2_PIX_FMT_NV12, 2, 1, 1, 2 },
+ { XVIP_VF_YUV_420, 8, NULL, MEDIA_BUS_FMT_VYYUYY8_1X24,
+ 1, 12, V4L2_PIX_FMT_NV12M, 2, 2, 1, 2 },
+ { XVIP_VF_YUV_420, 10, NULL, MEDIA_BUS_FMT_VYYUYY10_4X20,
+ 1, 12, V4L2_PIX_FMT_XV15, 2, 1, 2, 2 },
+ { XVIP_VF_YUV_420, 10, NULL, MEDIA_BUS_FMT_VYYUYY10_4X20,
+ 1, 12, V4L2_PIX_FMT_XV15M, 2, 2, 1, 2 },
+ { XVIP_VF_YUV_420, 12, NULL, MEDIA_BUS_FMT_UYYVYY12_4X24,
+ 1, 12, V4L2_PIX_FMT_X012, 2, 1, 2, 2 },
+ { XVIP_VF_YUV_420, 12, NULL, MEDIA_BUS_FMT_UYYVYY12_4X24,
+ 1, 12, V4L2_PIX_FMT_X012M, 2, 2, 1, 2 },
+ { XVIP_VF_YUV_420, 16, NULL, MEDIA_BUS_FMT_UYYVYY16_4X32,
+ 2, 12, V4L2_PIX_FMT_X016, 2, 1, 2, 2 },
+ { XVIP_VF_YUV_420, 16, NULL, MEDIA_BUS_FMT_UYYVYY16_4X32,
+ 2, 12, V4L2_PIX_FMT_X016M, 2, 2, 1, 2 },
{ XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
- 2, V4L2_PIX_FMT_YUYV },
+ 1, 16, V4L2_PIX_FMT_NV16, 2, 1, 1, 1 },
+ { XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 1, 16, V4L2_PIX_FMT_NV16M, 2, 2, 1, 1 },
+ { XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 2, 16, V4L2_PIX_FMT_YUYV, 1, 1, 2, 1 },
+ { XVIP_VF_VUY_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 2, 16, V4L2_PIX_FMT_UYVY, 1, 1, 2, 1 },
+ { XVIP_VF_YUV_422, 10, NULL, MEDIA_BUS_FMT_UYVY10_1X20,
+ 1, 16, V4L2_PIX_FMT_XV20, 2, 1, 2, 1 },
+ { XVIP_VF_YUV_422, 10, NULL, MEDIA_BUS_FMT_UYVY10_1X20,
+ 1, 16, V4L2_PIX_FMT_XV20M, 2, 2, 1, 1 },
+ { XVIP_VF_YUV_422, 12, NULL, MEDIA_BUS_FMT_UYVY12_1X24,
+ 1, 16, V4L2_PIX_FMT_X212, 2, 1, 2, 1 },
+ { XVIP_VF_YUV_422, 12, NULL, MEDIA_BUS_FMT_UYVY12_1X24,
+ 1, 16, V4L2_PIX_FMT_X212M, 2, 2, 1, 1 },
+ { XVIP_VF_YUV_422, 16, NULL, MEDIA_BUS_FMT_UYVY16_2X32,
+ 2, 16, V4L2_PIX_FMT_X216, 2, 1, 2, 1 },
+ { XVIP_VF_YUV_422, 16, NULL, MEDIA_BUS_FMT_UYVY16_2X32,
+ 2, 16, V4L2_PIX_FMT_X216M, 2, 2, 1, 1 },
{ XVIP_VF_YUV_444, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
- 3, V4L2_PIX_FMT_YUV444 },
+ 3, 24, V4L2_PIX_FMT_VUY24, 1, 1, 1, 1 },
+ { XVIP_VF_YUVX, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
+ 4, 32, V4L2_PIX_FMT_XVUY32, 1, 1, 1, 1 },
+ { XVIP_VF_YUVX, 10, NULL, MEDIA_BUS_FMT_VUY10_1X30,
+ 3, 32, V4L2_PIX_FMT_XVUY10, 1, 1, 1, 1 },
+ { XVIP_VF_YUV_444, 12, NULL, MEDIA_BUS_FMT_VUY12_1X36,
+ 1, 24, V4L2_PIX_FMT_X412, 1, 1, 1, 1 },
+ { XVIP_VF_YUV_444, 12, NULL, MEDIA_BUS_FMT_VUY12_1X36,
+ 1, 24, V4L2_PIX_FMT_X412M, 1, 1, 1, 1 },
+ { XVIP_VF_YUV_444, 16, NULL, MEDIA_BUS_FMT_VUY16_1X48,
+ 2, 24, V4L2_PIX_FMT_X416, 1, 1, 1, 1 },
+ { XVIP_VF_YUV_444, 16, NULL, MEDIA_BUS_FMT_VUY16_1X48,
+ 2, 24, V4L2_PIX_FMT_X416M, 1, 1, 1, 1 },
+ { XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 3, 24, V4L2_PIX_FMT_BGR24, 1, 1, 1, 1 },
{ XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
- 3, 0 },
+ 3, 24, V4L2_PIX_FMT_RGB24, 1, 1, 1, 1 },
+ { XVIP_VF_BGRX, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 4, 32, V4L2_PIX_FMT_BGRX32, 1, 1, 1, 1 },
+ { XVIP_VF_XRGB, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 4, 32, V4L2_PIX_FMT_XBGR32, 1, 1, 1, 1 },
+ { XVIP_VF_XBGR, 10, NULL, MEDIA_BUS_FMT_RBG101010_1X30,
+ 3, 32, V4L2_PIX_FMT_XBGR30, 1, 1, 1, 1 },
+ { XVIP_VF_XBGR, 12, NULL, MEDIA_BUS_FMT_RBG121212_1X36,
+ 3, 40, V4L2_PIX_FMT_XBGR40, 1, 1, 1, 1 },
+ { XVIP_VF_RBG, 16, NULL, MEDIA_BUS_FMT_RBG161616_1X48,
+ 6, 48, V4L2_PIX_FMT_BGR48, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "mono", MEDIA_BUS_FMT_Y8_1X8,
- 1, V4L2_PIX_FMT_GREY },
+ 1, 8, V4L2_PIX_FMT_GREY, 1, 1, 1, 1 },
+ { XVIP_VF_Y_GREY, 10, NULL, MEDIA_BUS_FMT_Y10_1X10,
+ 4, 32, V4L2_PIX_FMT_XY10, 1, 1, 1, 1 },
+ { XVIP_VF_Y_GREY, 12, NULL, MEDIA_BUS_FMT_Y12_1X12,
+ 1, 12, V4L2_PIX_FMT_XY12, 1, 1, 1, 1 },
+ { XVIP_VF_Y_GREY, 16, NULL, MEDIA_BUS_FMT_Y16_1X16,
+ 2, 16, V4L2_PIX_FMT_Y16, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "rggb", MEDIA_BUS_FMT_SRGGB8_1X8,
- 1, V4L2_PIX_FMT_SRGGB8 },
+ 1, 8, V4L2_PIX_FMT_SGRBG8, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "grbg", MEDIA_BUS_FMT_SGRBG8_1X8,
- 1, V4L2_PIX_FMT_SGRBG8 },
+ 1, 8, V4L2_PIX_FMT_SGRBG8, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "gbrg", MEDIA_BUS_FMT_SGBRG8_1X8,
- 1, V4L2_PIX_FMT_SGBRG8 },
+ 1, 8, V4L2_PIX_FMT_SGBRG8, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
- 1, V4L2_PIX_FMT_SBGGR8 },
+ 1, 8, V4L2_PIX_FMT_SBGGR8, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 12, "rggb", MEDIA_BUS_FMT_SRGGB12_1X12,
+ 1, 12, V4L2_PIX_FMT_SRGGB12, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 12, "grbg", MEDIA_BUS_FMT_SGRBG12_1X12,
+ 1, 12, V4L2_PIX_FMT_SGRBG12, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 12, "gbrg", MEDIA_BUS_FMT_SGBRG12_1X12,
+ 1, 12, V4L2_PIX_FMT_SGBRG12, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 12, "bggr", MEDIA_BUS_FMT_SBGGR12_1X12,
+ 1, 12, V4L2_PIX_FMT_SBGGR12, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 16, "rggb", MEDIA_BUS_FMT_SRGGB16_1X16,
+ 1, 16, V4L2_PIX_FMT_SRGGB16, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 16, "grbg", MEDIA_BUS_FMT_SGRBG16_1X16,
+ 1, 12, V4L2_PIX_FMT_SGRBG16, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 16, "gbrg", MEDIA_BUS_FMT_SGBRG16_1X16,
+ 1, 12, V4L2_PIX_FMT_SGBRG16, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 16, "bggr", MEDIA_BUS_FMT_SBGGR12_1X12,
+ 1, 12, V4L2_PIX_FMT_SBGGR16, 1, 1, 1, 1 },
};
/**
@@ -89,6 +169,87 @@ const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc)
EXPORT_SYMBOL_GPL(xvip_get_format_by_fourcc);
/**
+ * xvip_bpl_scaling_factor - Retrieve bpl scaling factor for a 4CC
+ * @fourcc: the format 4CC
+ * @numerator: returning numerator of scaling factor
+ * @denominator: returning denominator of scaling factor
+ *
+ * Return: Return numerator and denominator values by address
+ */
+void xvip_bpl_scaling_factor(u32 fourcc, u32 *numerator, u32 *denominator)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_XY10:
+ case V4L2_PIX_FMT_XV15:
+ case V4L2_PIX_FMT_XV20:
+ case V4L2_PIX_FMT_XV15M:
+ case V4L2_PIX_FMT_XV20M:
+ case V4L2_PIX_FMT_XBGR30:
+ case V4L2_PIX_FMT_XVUY10:
+ *numerator = 10;
+ *denominator = 8;
+ break;
+ case V4L2_PIX_FMT_XBGR40:
+ case V4L2_PIX_FMT_XY12:
+ case V4L2_PIX_FMT_X012:
+ case V4L2_PIX_FMT_X012M:
+ case V4L2_PIX_FMT_X212:
+ case V4L2_PIX_FMT_X212M:
+ case V4L2_PIX_FMT_X412:
+ case V4L2_PIX_FMT_X412M:
+ *numerator = 12;
+ *denominator = 8;
+ break;
+ default:
+ *numerator = 1;
+ *denominator = 1;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(xvip_bpl_scaling_factor);
+
+/**
+ * xvip_width_padding_factor - Retrieve width's padding factor for a 4CC
+ * @fourcc: the format 4CC
+ * @numerator: returning numerator of padding factor
+ * @denominator: returning denominator of padding factor
+ *
+ * Return: Return numerator and denominator values by address
+ */
+void xvip_width_padding_factor(u32 fourcc, u32 *numerator, u32 *denominator)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_XY10:
+ case V4L2_PIX_FMT_XV15:
+ case V4L2_PIX_FMT_XV20:
+ case V4L2_PIX_FMT_XV15M:
+ case V4L2_PIX_FMT_XV20M:
+ case V4L2_PIX_FMT_XBGR30:
+ case V4L2_PIX_FMT_XVUY10:
+ /* 32 bits are required per 30 bits of data */
+ *numerator = 32;
+ *denominator = 30;
+ break;
+ case V4L2_PIX_FMT_XBGR40:
+ case V4L2_PIX_FMT_XY12:
+ case V4L2_PIX_FMT_X012:
+ case V4L2_PIX_FMT_X012M:
+ case V4L2_PIX_FMT_X212:
+ case V4L2_PIX_FMT_X212M:
+ case V4L2_PIX_FMT_X412:
+ case V4L2_PIX_FMT_X412M:
+ *numerator = 40;
+ *denominator = 36;
+ break;
+ default:
+ *numerator = 1;
+ *denominator = 1;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(xvip_width_padding_factor);
+
+/**
* xvip_of_get_format - Parse a device tree node and return format information
* @node: the device tree node
*
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index f71e2b650453..07893a0acc2f 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -107,21 +107,33 @@ struct xvip_device {
* @width: AXI4 format width in bits per component
* @pattern: CFA pattern for Mono/Sensor formats
* @code: media bus format code
- * @bpp: bytes per pixel (when stored in memory)
+ * @bpl_factor: Bytes per line factor
+ * @bpp: bits per pixel
* @fourcc: V4L2 pixel format FCC identifier
+ * @num_planes: number of planes w.r.t. color format
+ * @buffers: number of buffers per format
+ * @hsub: Horizontal sampling factor of Chroma
+ * @vsub: Vertical sampling factor of Chroma
*/
struct xvip_video_format {
unsigned int vf_code;
unsigned int width;
const char *pattern;
unsigned int code;
+ unsigned int bpl_factor;
unsigned int bpp;
u32 fourcc;
+ u8 num_planes;
+ u8 buffers;
+ u8 hsub;
+ u8 vsub;
};
const struct xvip_video_format *xvip_get_format_by_code(unsigned int code);
const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc);
const struct xvip_video_format *xvip_of_get_format(struct device_node *node);
+void xvip_bpl_scaling_factor(u32 fourcc, u32 *numerator, u32 *denominator);
+void xvip_width_padding_factor(u32 fourcc, u32 *numerator, u32 *denominator);
void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
const struct v4l2_subdev_format *fmt);
int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index cc2856efea59..8c70ee5ef0c6 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -27,16 +27,27 @@
#define XVIPP_DMA_S2MM 0
#define XVIPP_DMA_MM2S 1
+/*
+ * This is for backward compatibility for existing applications,
+ * and planned to be deprecated
+ */
+static bool xvip_is_mplane = true;
+MODULE_PARM_DESC(is_mplane,
+ "v4l2 device capability to handle multi planar formats");
+module_param_named(is_mplane, xvip_is_mplane, bool, 0444);
+
/**
* struct xvip_graph_entity - Entity in the video graph
* @asd: subdev asynchronous registration information
* @entity: media entity, from the corresponding V4L2 subdev
* @subdev: V4L2 subdev
+ * @streaming: status of the V4L2 subdev if streaming or not
*/
struct xvip_graph_entity {
struct v4l2_async_subdev asd; /* must be first */
struct media_entity *entity;
struct v4l2_subdev *subdev;
+ bool streaming;
};
static inline struct xvip_graph_entity *
@@ -182,6 +193,38 @@ xvip_graph_find_dma(struct xvip_composite_device *xdev, unsigned int port)
return NULL;
}
+/**
+ * xvip_subdev_set_streaming - Find and update streaming status of subdev
+ * @xdev: Composite video device
+ * @subdev: V4L2 sub-device
+ * @enable: enable/disable streaming status
+ *
+ * Walk the xvip graph entities list and find if subdev is present. Returns
+ * streaming status of subdev and update the status as requested
+ *
+ * Return: streaming status (true or false) if successful or warn_on if subdev
+ * is not present and return false
+ */
+bool xvip_subdev_set_streaming(struct xvip_composite_device *xdev,
+ struct v4l2_subdev *subdev, bool enable)
+{
+ struct xvip_graph_entity *entity;
+ struct v4l2_async_subdev *asd;
+
+ list_for_each_entry(asd, &xdev->notifier.asd_list, asd_list) {
+ entity = to_xvip_entity(asd);
+ if (entity->asd.match.fwnode == of_fwnode_handle(subdev->dev->of_node)) {
+ bool status = entity->streaming;
+
+ entity->streaming = enable;
+ return status;
+ }
+ }
+
+ WARN(1, "Should never get here\n");
+ return false;
+}
+
static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
{
u32 link_flags = MEDIA_LNK_FL_ENABLED;
@@ -276,7 +319,6 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
}
}
- of_node_put(ep);
return ret;
}
@@ -442,9 +484,11 @@ static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
return ret;
if (strcmp(direction, "input") == 0)
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ type = xvip_is_mplane ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (strcmp(direction, "output") == 0)
- type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ type = xvip_is_mplane ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
else
return -EINVAL;
@@ -462,8 +506,14 @@ static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
list_add_tail(&dma->list, &xdev->dmas);
- xdev->v4l2_caps |= type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? V4L2_CAP_VIDEO_CAPTURE : V4L2_CAP_VIDEO_OUTPUT;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_CAPTURE;
+ else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_OUTPUT;
+ else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
return 0;
}
@@ -472,7 +522,7 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
{
struct device_node *ports;
struct device_node *port;
- int ret;
+ int ret = 0;
ports = of_get_child_by_name(xdev->dev->of_node, "ports");
if (ports == NULL) {
@@ -482,13 +532,14 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
for_each_child_of_node(ports, port) {
ret = xvip_graph_dma_init_one(xdev, port);
- if (ret < 0) {
+ if (ret) {
of_node_put(port);
- return ret;
+ break;
}
}
- return 0;
+ of_node_put(ports);
+ return ret;
}
static void xvip_graph_cleanup(struct xvip_composite_device *xdev)
@@ -594,6 +645,7 @@ static int xvip_composite_probe(struct platform_device *pdev)
return -ENOMEM;
xdev->dev = &pdev->dev;
+ mutex_init(&xdev->lock);
INIT_LIST_HEAD(&xdev->dmas);
v4l2_async_notifier_init(&xdev->notifier);
@@ -620,6 +672,7 @@ static int xvip_composite_remove(struct platform_device *pdev)
{
struct xvip_composite_device *xdev = platform_get_drvdata(pdev);
+ mutex_destroy(&xdev->lock);
xvip_graph_cleanup(xdev);
xvip_composite_v4l2_cleanup(xdev);
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
index e65fce9538f9..24934d57529b 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.h
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -27,6 +27,7 @@
* @notifier: V4L2 asynchronous subdevs notifier
* @dmas: list of DMA channels at the pipeline output and input
* @v4l2_caps: V4L2 capabilities of the whole device (see VIDIOC_QUERYCAP)
+ * @lock: This is to ensure all dma path entities acquire same pipeline object
*/
struct xvip_composite_device {
struct v4l2_device v4l2_dev;
@@ -37,6 +38,10 @@ struct xvip_composite_device {
struct list_head dmas;
u32 v4l2_caps;
+ struct mutex lock; /* lock to protect xvip pipeline instance */
};
+bool xvip_subdev_set_streaming(struct xvip_composite_device *xdev,
+ struct v4l2_subdev *subdev, bool enable);
+
#endif /* __XILINX_VIPP_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-vpss-csc.c b/drivers/media/platform/xilinx/xilinx-vpss-csc.c
new file mode 100644
index 000000000000..c628ee07a1e0
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vpss-csc.c
@@ -0,0 +1,1170 @@
+/*
+ * Xilinx VPSS Color Space Converter
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XV_CSC_AP_CTRL (0x000)
+#define XV_CSC_INVIDEOFORMAT (0x010)
+#define XV_CSC_OUTVIDEOFORMAT (0x018)
+#define XV_CSC_WIDTH (0x020)
+#define XV_CSC_HEIGHT (0x028)
+#define XV_CSC_K11 (0x050)
+#define XV_CSC_K12 (0x058)
+#define XV_CSC_K13 (0x060)
+#define XV_CSC_K21 (0x068)
+#define XV_CSC_K22 (0x070)
+#define XV_CSC_K23 (0x078)
+#define XV_CSC_K31 (0x080)
+#define XV_CSC_K32 (0x088)
+#define XV_CSC_K33 (0x090)
+#define XV_CSC_ROFFSET (0x098)
+#define XV_CSC_GOFFSET (0x0a0)
+#define XV_CSC_BOFFSET (0x0a8)
+#define XV_CSC_CLAMPMIN (0x0b0)
+#define XV_CSC_CLIPMAX (0x0b8)
+
+#define XV_CSC_FRACTIONAL_BITS (12)
+#define XV_CSC_SCALE_FACTOR (4096)
+/* This a VPSS CSC specific macro used to calculate Contrast */
+#define XV_CSC_DIVISOR (10000)
+#define XV_CSC_DEFAULT_HEIGHT (720)
+#define XV_CSC_DEFAULT_WIDTH (1280)
+#define XV_CSC_K_MAX_ROWS (3)
+#define XV_CSC_K_MAX_COLUMNS (3)
+#define XV_CSC_MIN_WIDTH (64)
+#define XV_CSC_MAX_WIDTH (8192)
+#define XV_CSC_MIN_HEIGHT (64)
+#define XV_CSC_MAX_HEIGHT (4320)
+
+/* GPIO Reset Assert/De-assert */
+#define XCSC_RESET_ASSERT (1)
+#define XCSC_RESET_DEASSERT (0)
+/* Streaming Macros */
+#define XCSC_CLAMP_MIN_ZERO (0)
+#define XCSC_AP_START BIT(0)
+#define XCSC_AP_AUTO_RESTART BIT(7)
+#define XCSC_STREAM_ON (XCSC_AP_START | XCSC_AP_AUTO_RESTART)
+/* Color Control Macros */
+#define XCSC_COLOR_CTRL_COUNT (5)
+#define XCSC_COLOR_CTRL_DEFAULT (50)
+
+enum xcsc_color_fmt {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+enum xcsc_output_range {
+ XVIDC_CR_0_255 = 1,
+ XVIDC_CR_16_240,
+ XVIDC_CR_16_235
+};
+
+enum xcsc_color_depth {
+ XVIDC_BPC_8 = 8,
+ XVIDC_BPC_10 = 10,
+};
+
+static const s32
+rgb_unity_matrix[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {XV_CSC_SCALE_FACTOR, 0, 0, 0},
+ {0, XV_CSC_SCALE_FACTOR, 0, 0},
+ {0, 0, XV_CSC_SCALE_FACTOR, 0},
+};
+
+static const s32
+ycrcb_to_rgb_unity[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ 17927 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -2132 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -5329 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 21124 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ 0
+ },
+};
+
+static const s32
+rgb_to_ycrcb_unity[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {
+ 1826 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 6142 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 620 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ -1006 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -3386 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -3989 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -403 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ },
+};
+
+/**
+ * struct xcsc_dev - xilinx vpss csc device structure
+ * @xvip: Xilinx Video IP core struct
+ * @pads: Media bus pads for VPSS CSC
+ * @formats: Current media bus formats
+ * @default_formats: Default media bus formats for VPSS CSC
+ * @vip_formats: Pointer to DT specified media bus code info
+ * @ctrl_handler: V4L2 Control Handler struct
+ * @custom_ctrls: Array of pointers to various custom controls
+ * @cft_in: IP or Hardware specific input video format
+ * @cft_out: IP or Hardware specific output video format
+ * @output_range: Color range for Outgoing video
+ * @color_depth: Data width used to represent color
+ * @brightness: Expected brightness value
+ * @contrast: Expected contrast value
+ * @red_gain: Expected red gain
+ * @green_gain: Expect green gain
+ * @blue_gain: Expected blue gain
+ * @brightness_active: Current brightness value
+ * @contrast_active: Current contrast value
+ * @red_gain_active: Current red gain
+ * @green_gain_active: Current green gain
+ * @blue_gain_active: Current blue gain
+ * @k_hw : Coefficients to be written to IP/Hardware
+ * @shadow_coeff: Coefficients to track RGB equivalents for color controls
+ * @clip_max: Maximum value to clip output color range
+ * @rst_gpio: Handle to PS GPIO specifier to assert/de-assert the reset line
+ * @max_width: Maximum width supported by IP.
+ * @max_height: Maximum height supported by IP.
+ */
+struct xcsc_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *custom_ctrls[XCSC_COLOR_CTRL_COUNT];
+
+ enum xcsc_color_fmt cft_in;
+ enum xcsc_color_fmt cft_out;
+ enum xcsc_output_range output_range;
+ enum xcsc_color_depth color_depth;
+ s32 brightness;
+ s32 contrast;
+ s32 red_gain;
+ s32 green_gain;
+ s32 blue_gain;
+ s32 brightness_active;
+ s32 contrast_active;
+ s32 red_gain_active;
+ s32 green_gain_active;
+ s32 blue_gain_active;
+ s32 k_hw[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1];
+ s32 shadow_coeff[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1];
+ s32 clip_max;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+#ifdef DEBUG
+static u32 xcsc_read(struct xcsc_dev *xcsc, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xcsc->xvip, reg);
+ return data;
+}
+
+static void xcsc_get_coeff(struct xcsc_dev *xcsc, s32 C[3][4])
+{
+ C[0][0] = xcsc_read(xcsc, XV_CSC_K11);
+ C[0][1] = xcsc_read(xcsc, XV_CSC_K12);
+ C[0][2] = xcsc_read(xcsc, XV_CSC_K13);
+ C[1][0] = xcsc_read(xcsc, XV_CSC_K21);
+ C[1][1] = xcsc_read(xcsc, XV_CSC_K22);
+ C[1][2] = xcsc_read(xcsc, XV_CSC_K23);
+ C[2][0] = xcsc_read(xcsc, XV_CSC_K31);
+ C[2][1] = xcsc_read(xcsc, XV_CSC_K32);
+ C[2][2] = xcsc_read(xcsc, XV_CSC_K33);
+ C[0][3] = xcsc_read(xcsc, XV_CSC_ROFFSET);
+ C[1][3] = xcsc_read(xcsc, XV_CSC_GOFFSET);
+ C[2][3] = xcsc_read(xcsc, XV_CSC_BOFFSET);
+}
+
+static void xcsc_print_coeff(struct xcsc_dev *xcsc)
+{
+ s32 C[3][4];
+
+ xcsc_get_coeff(xcsc, C);
+
+ dev_info(xcsc->xvip.dev,
+ "-------------CSC Coeff Dump Start------\n");
+ dev_info(xcsc->xvip.dev,
+ " R row : %5d %5d %5d\n",
+ (s16)C[0][0], (s16)C[0][1], (s16)C[0][2]);
+ dev_info(xcsc->xvip.dev,
+ " G row : %5d %5d %5d\n",
+ (s16)C[1][0], (s16)C[1][1], (s16)C[1][2]);
+ dev_info(xcsc->xvip.dev,
+ " B row : %5d %5d %5d\n",
+ (s16)C[2][0], (s16)C[2][1], (s16)C[2][2]);
+ dev_info(xcsc->xvip.dev,
+ "Offset : %5d %5d %5d\n",
+ (s16)C[0][3], (s16)C[1][3], (s16)C[2][3]);
+ dev_info(xcsc->xvip.dev,
+ "ClampMin: %3d ClipMax %3d",
+ xcsc_read(xcsc, XV_CSC_CLAMPMIN),
+ xcsc_read(xcsc, XV_CSC_CLIPMAX));
+ dev_info(xcsc->xvip.dev,
+ "-------------CSC Coeff Dump Stop-------\n");
+}
+
+static void
+xcsc_log_coeff(struct device *dev,
+ s32 coeff[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ if (!dev)
+ return;
+ dev_dbg(dev, "--- %s : Start Coeff Log ---", __func__);
+ dev_dbg(dev, "R row : %5d %5d %5d\n",
+ coeff[0][0], coeff[0][1], coeff[0][2]);
+ dev_dbg(dev, "G row : %5d %5d %5d\n",
+ coeff[1][0], coeff[1][1], coeff[1][2]);
+ dev_dbg(dev, "B row : %5d %5d %5d\n",
+ coeff[2][0], coeff[2][1], coeff[2][2]);
+ dev_dbg(dev, "Offset: %5d %5d %5d\n",
+ coeff[0][3], coeff[1][3], coeff[2][3]);
+ dev_dbg(dev, "--- %s : Stop Coeff Log ---", __func__);
+}
+
+static void xcsc_print_k_hw(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "-------------CSC Driver k_hw[][] Dump------------\n");
+ xcsc_log_coeff(xcsc->xvip.dev, xcsc->k_hw);
+ dev_dbg(xcsc->xvip.dev,
+ "-------------------------------------------------\n");
+}
+#endif /* DEBUG */
+
+static void xcsc_write(struct xcsc_dev *xcsc, u32 reg, u32 data)
+{
+ xvip_write(&xcsc->xvip, reg, data);
+}
+
+static void xcsc_write_rgb_3x3(struct xcsc_dev *xcsc)
+{
+ /* Write Matrix Coefficients */
+ xcsc_write(xcsc, XV_CSC_K11, xcsc->k_hw[0][0]);
+ xcsc_write(xcsc, XV_CSC_K12, xcsc->k_hw[0][1]);
+ xcsc_write(xcsc, XV_CSC_K13, xcsc->k_hw[0][2]);
+ xcsc_write(xcsc, XV_CSC_K21, xcsc->k_hw[1][0]);
+ xcsc_write(xcsc, XV_CSC_K22, xcsc->k_hw[1][1]);
+ xcsc_write(xcsc, XV_CSC_K23, xcsc->k_hw[1][2]);
+ xcsc_write(xcsc, XV_CSC_K31, xcsc->k_hw[2][0]);
+ xcsc_write(xcsc, XV_CSC_K32, xcsc->k_hw[2][1]);
+ xcsc_write(xcsc, XV_CSC_K33, xcsc->k_hw[2][2]);
+}
+
+static void xcsc_write_rgb_offset(struct xcsc_dev *xcsc)
+{
+ /* Write RGB Offsets */
+ xcsc_write(xcsc, XV_CSC_ROFFSET, xcsc->k_hw[0][3]);
+ xcsc_write(xcsc, XV_CSC_GOFFSET, xcsc->k_hw[1][3]);
+ xcsc_write(xcsc, XV_CSC_BOFFSET, xcsc->k_hw[2][3]);
+}
+
+static void xcsc_write_coeff(struct xcsc_dev *xcsc)
+{
+ xcsc_write_rgb_3x3(xcsc);
+ xcsc_write_rgb_offset(xcsc);
+}
+
+static void xcsc_set_v4l2_ctrl_defaults(struct xcsc_dev *xcsc)
+{
+ unsigned int i;
+
+ mutex_lock(xcsc->ctrl_handler.lock);
+ for (i = 0; i < XCSC_COLOR_CTRL_COUNT; i++)
+ xcsc->custom_ctrls[i]->cur.val = XCSC_COLOR_CTRL_DEFAULT;
+ mutex_unlock(xcsc->ctrl_handler.lock);
+}
+
+static void xcsc_set_control_defaults(struct xcsc_dev *xcsc)
+{
+ /* These are VPSS CSC IP specific defaults */
+ xcsc->brightness = 120;
+ xcsc->contrast = 0;
+ xcsc->red_gain = 120;
+ xcsc->blue_gain = 120;
+ xcsc->green_gain = 120;
+ xcsc->brightness_active = 120;
+ xcsc->contrast_active = 0;
+ xcsc->red_gain_active = 120;
+ xcsc->blue_gain_active = 120;
+ xcsc->green_gain_active = 120;
+}
+
+static void xcsc_copy_coeff(
+ s32 dest[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 const src[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ unsigned int i, j;
+
+ for (i = 0; i < XV_CSC_K_MAX_ROWS; i++)
+ for (j = 0; j < XV_CSC_K_MAX_COLUMNS + 1; j++)
+ memcpy(&dest[i][j], &src[i][j], sizeof(dest[0][0]));
+}
+
+static void xcsc_set_unity_matrix(struct xcsc_dev *xcsc)
+{
+ xcsc_copy_coeff(xcsc->k_hw, rgb_unity_matrix);
+ xcsc_copy_coeff(xcsc->shadow_coeff, rgb_unity_matrix);
+}
+
+static void xcsc_set_default_state(struct xcsc_dev *xcsc)
+{
+ xcsc->cft_in = XVIDC_CSF_RGB;
+ xcsc->cft_out = XVIDC_CSF_RGB;
+ xcsc->output_range = XVIDC_CR_0_255;
+ /* Needed to add 10, 12 and 16 bit color depth support */
+ xcsc->clip_max = BIT(xcsc->color_depth) - 1;
+ xcsc_set_control_defaults(xcsc);
+ xcsc_set_unity_matrix(xcsc);
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+ xcsc_write_coeff(xcsc);
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+static void
+xcsc_ycrcb_to_rgb(struct xcsc_dev *xcsc, s32 *clip_max,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ u16 bpc_scale = BIT(xcsc->color_depth - 8);
+
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations
+ *
+ * Coefficients valid only for BT 709
+ */
+ dev_dbg(xcsc->xvip.dev, "Performing YCrCb to RGB BT 709");
+ xcsc_copy_coeff(temp, ycrcb_to_rgb_unity);
+ temp[0][3] = -248 * bpc_scale;
+ temp[1][3] = 77 * bpc_scale;
+ temp[2][3] = -289 * bpc_scale;
+ *clip_max = BIT(xcsc->color_depth) - 1;
+}
+
+static void
+xcsc_matrix_multiply(s32 K1[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 K2[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 kout[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ s32 A, B, C, D, E, F, G, H, I, J, K, L, M, N;
+ s32 O, P, Q, R, S, T, U, V, W, X;
+
+ A = K1[0][0]; B = K1[0][1]; C = K1[0][2]; J = K1[0][3];
+ D = K1[1][0]; E = K1[1][1]; F = K1[1][2]; K = K1[1][3];
+ G = K1[2][0]; H = K1[2][1]; I = K1[2][2]; L = K1[2][3];
+
+ M = K2[0][0]; N = K2[0][1]; O = K2[0][2]; V = K2[0][3];
+ P = K2[1][0]; Q = K2[1][1]; R = K2[1][2]; W = K2[1][3];
+ S = K2[2][0]; T = K2[2][1]; U = K2[2][2]; X = K2[2][3];
+
+ kout[0][0] = (M * A + N * D + O * G) / XV_CSC_SCALE_FACTOR;
+ kout[0][1] = (M * B + N * E + O * H) / XV_CSC_SCALE_FACTOR;
+ kout[0][2] = (M * C + N * F + O * I) / XV_CSC_SCALE_FACTOR;
+ kout[1][0] = (P * A + Q * D + R * G) / XV_CSC_SCALE_FACTOR;
+ kout[1][1] = (P * B + Q * E + R * H) / XV_CSC_SCALE_FACTOR;
+ kout[1][2] = (P * C + Q * F + R * I) / XV_CSC_SCALE_FACTOR;
+ kout[2][0] = (S * A + T * D + U * G) / XV_CSC_SCALE_FACTOR;
+ kout[2][1] = (S * B + T * E + U * H) / XV_CSC_SCALE_FACTOR;
+ kout[2][2] = (S * C + T * F + U * I) / XV_CSC_SCALE_FACTOR;
+ kout[0][3] = ((M * J + N * K + O * L) / XV_CSC_SCALE_FACTOR) + V;
+ kout[1][3] = ((P * J + Q * K + R * L) / XV_CSC_SCALE_FACTOR) + W;
+ kout[2][3] = ((S * J + T * K + U * L) / XV_CSC_SCALE_FACTOR) + X;
+}
+
+static void
+xcsc_rgb_to_ycrcb(struct xcsc_dev *xcsc, s32 *clip_max,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ u16 bpc_scale = BIT(xcsc->color_depth - 8);
+
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations
+ *
+ * Coefficients valid only for BT 709
+ */
+ dev_dbg(xcsc->xvip.dev, "Performing RGB to YCrCb BT 709");
+ xcsc_copy_coeff(temp, rgb_to_ycrcb_unity);
+ temp[0][3] = 16 * bpc_scale;
+ temp[1][3] = 128 * bpc_scale;
+ temp[2][3] = 128 * bpc_scale;
+ *clip_max = BIT(xcsc->color_depth) - 1;
+}
+
+static int xcsc_update_formats(struct xcsc_dev *xcsc)
+{
+ u32 color_in, color_out;
+
+ /* Write In and Out Video Formats */
+ color_in = xcsc->formats[XVIP_PAD_SINK].code;
+ color_out = xcsc->formats[XVIP_PAD_SOURCE].code;
+
+ switch (color_in) {
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : RGB");
+ xcsc->cft_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 444");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 422");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 420");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_420;
+ break;
+ }
+
+ switch (color_out) {
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ xcsc->cft_out = XVIDC_CSF_RGB;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : RGB");
+ if (color_in != MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_444;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 444");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_422;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 422");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_420;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 420");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ }
+
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+
+ xcsc_write_coeff(xcsc);
+
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+#ifdef DEBUG
+ xcsc_print_k_hw(xcsc);
+ xcsc_print_coeff(xcsc);
+#endif
+ return 0;
+}
+
+static inline struct xcsc_dev *to_csc(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcsc_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt *
+__xcsc_get_pad_format(struct xcsc_dev *xcsc,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcsc->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcsc->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static void
+xcsc_correct_coeff(struct xcsc_dev *xcsc,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ s32 csc_change[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = { {0} };
+ s32 csc_extra[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = { {0} };
+ u32 mbus_in = xcsc->formats[XVIP_PAD_SINK].code;
+ u32 mbus_out = xcsc->formats[XVIP_PAD_SOURCE].code;
+
+#ifdef DEBUG
+ xcsc_log_coeff(xcsc->xvip.dev, temp);
+#endif
+ if (mbus_in == MEDIA_BUS_FMT_RBG888_1X24 && mbus_out == mbus_in) {
+ dev_dbg(xcsc->xvip.dev, "%s : RGB to RGB", __func__);
+ xcsc_copy_coeff(xcsc->k_hw,
+ (const s32 (*)[XV_CSC_K_MAX_COLUMNS + 1])temp);
+ } else if (mbus_in == MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : RGB to YUV", __func__);
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(temp, csc_change, xcsc->k_hw);
+ } else if (mbus_in != MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out == MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : YUV to RGB", __func__);
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_change, temp, xcsc->k_hw);
+ } else if (mbus_in != MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : YUV to YUV", __func__);
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_change, temp, csc_extra);
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_extra, csc_change, xcsc->k_hw);
+ } else {
+ /* Should never get here */
+ WARN_ON(1);
+ }
+}
+
+static void xcsc_set_brightness(struct xcsc_dev *xcsc)
+{
+ unsigned int i, j;
+
+ dev_dbg(xcsc->xvip.dev,
+ "%s : Brightness %d Brightness Active %d",
+ __func__,
+ ((xcsc->brightness - 20) / 2),
+ ((xcsc->brightness_active - 20) / 2));
+ if (xcsc->brightness == xcsc->brightness_active)
+ return;
+ for (i = 0; i < XV_CSC_K_MAX_ROWS; i++) {
+ for (j = 0; j < XV_CSC_K_MAX_COLUMNS; j++) {
+ xcsc->shadow_coeff[i][j] = (xcsc->shadow_coeff[i][j] *
+ xcsc->brightness) /
+ xcsc->brightness_active;
+ }
+ }
+ xcsc->brightness_active = xcsc->brightness;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+}
+
+static void xcsc_set_contrast(struct xcsc_dev *xcsc)
+{
+ s32 contrast;
+ u8 scale = BIT(xcsc->color_depth - 8);
+
+ contrast = xcsc->contrast - xcsc->contrast_active;
+ dev_dbg(xcsc->xvip.dev,
+ "%s : Contrast Difference %d scale = %d",
+ __func__, contrast, scale);
+ /* Avoid updates if same */
+ if (!contrast)
+ return;
+ /* Update RGB Offsets */
+ xcsc->shadow_coeff[0][3] += contrast * scale;
+ xcsc->shadow_coeff[1][3] += contrast * scale;
+ xcsc->shadow_coeff[2][3] += contrast * scale;
+ xcsc->contrast_active = xcsc->contrast;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+}
+
+static void xcsc_set_red_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Red Gain %d Red Gain Active %d", __func__,
+ (xcsc->red_gain - 20) / 2,
+ (xcsc->red_gain_active - 20) / 2);
+
+ if (xcsc->red_gain != xcsc->red_gain_active) {
+ xcsc->shadow_coeff[0][0] = (xcsc->shadow_coeff[0][0] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->shadow_coeff[0][1] = (xcsc->shadow_coeff[0][1] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->shadow_coeff[0][2] = (xcsc->shadow_coeff[0][2] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->red_gain_active = xcsc->red_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_green_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Green Gain %d Green Gain Active %d", __func__,
+ (xcsc->green_gain - 20) / 2,
+ (xcsc->green_gain_active - 20) / 2);
+
+ if (xcsc->green_gain != xcsc->green_gain_active) {
+ xcsc->shadow_coeff[1][0] = (xcsc->shadow_coeff[1][0] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->shadow_coeff[1][1] = (xcsc->shadow_coeff[1][1] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->shadow_coeff[1][2] = (xcsc->shadow_coeff[1][2] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->green_gain_active = xcsc->green_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_blue_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Blue Gain %d Blue Gain Active %d", __func__,
+ (xcsc->blue_gain - 20) / 2,
+ (xcsc->blue_gain_active - 20) / 2);
+
+ if (xcsc->blue_gain != xcsc->blue_gain_active) {
+ xcsc->shadow_coeff[2][0] = (xcsc->shadow_coeff[2][0] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->shadow_coeff[2][1] = (xcsc->shadow_coeff[2][1] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->shadow_coeff[2][2] = (xcsc->shadow_coeff[2][2] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->blue_gain_active = xcsc->blue_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_size(struct xcsc_dev *xcsc)
+{
+ u32 width, height;
+
+ width = xcsc->formats[XVIP_PAD_SINK].width;
+ height = xcsc->formats[XVIP_PAD_SINK].height;
+ dev_dbg(xcsc->xvip.dev, "%s : Setting width %d and height %d",
+ __func__, width, height);
+ xcsc_write(xcsc, XV_CSC_WIDTH, width);
+ xcsc_write(xcsc, XV_CSC_HEIGHT, height);
+}
+
+static int xcsc_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+
+ dev_dbg(xcsc->xvip.dev, "%s : Stream %s", __func__,
+ enable ? "On" : "Off");
+ if (!enable) {
+ /* Reset the Global IP Reset through PS GPIO */
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_DEASSERT);
+ return 0;
+ }
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+ xcsc_set_size(xcsc);
+ xcsc_write_coeff(xcsc);
+#ifdef DEBUG
+ xcsc_print_coeff(xcsc);
+ dev_dbg(xcsc->xvip.dev, "cft_in = %d cft_out = %d",
+ xcsc_read(xcsc, XV_CSC_INVIDEOFORMAT),
+ xcsc_read(xcsc, XV_CSC_OUTVIDEOFORMAT));
+ dev_dbg(xcsc->xvip.dev, "clipmax = %d clampmin = %d",
+ xcsc_read(xcsc, XV_CSC_CLIPMAX),
+ xcsc_read(xcsc, XV_CSC_CLAMPMIN));
+ dev_dbg(xcsc->xvip.dev, "height = %d width = %d",
+ xcsc_read(xcsc, XV_CSC_HEIGHT),
+ xcsc_read(xcsc, XV_CSC_WIDTH));
+#endif
+ /* Start VPSS CSC IP */
+ xcsc_write(xcsc, XV_CSC_AP_CTRL, XCSC_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xcsc_video_ops = {
+ .s_stream = xcsc_s_stream,
+};
+
+static int xcsc_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+
+ fmt->format = *__xcsc_get_pad_format(xcsc, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xcsc_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_mbus_framefmt *__propagate;
+
+ __format = __xcsc_get_pad_format(xcsc, cfg, fmt->pad, fmt->which);
+ /* Propagate to Source Pad */
+ __propagate = __xcsc_get_pad_format(xcsc, cfg,
+ XVIP_PAD_SOURCE, fmt->which);
+ *__format = fmt->format;
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XV_CSC_MIN_WIDTH, xcsc->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XV_CSC_MIN_HEIGHT, xcsc->max_height);
+
+ switch (__format->code) {
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ break;
+ default:
+ /* Unsupported Format. Default to RGB */
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ return -EINVAL;
+ }
+
+ /* Always propagate Sink image size to Source */
+ __propagate->width = __format->width;
+ __propagate->height = __format->height;
+
+ fmt->format = *__format;
+ xcsc_update_formats(xcsc);
+ xcsc_set_control_defaults(xcsc);
+ xcsc_set_v4l2_ctrl_defaults(xcsc);
+ dev_info(xcsc->xvip.dev, "VPSS CSC color controls reset to defaults");
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops xcsc_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcsc_get_format,
+ .set_fmt = xcsc_set_format,
+};
+
+static const struct v4l2_subdev_ops xcsc_ops = {
+ .video = &xcsc_video_ops,
+ .pad = &xcsc_pad_ops
+};
+
+static int xcsc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xcsc_dev *xcsc = container_of(ctrl->handler,
+ struct xcsc_dev,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_CSC_BRIGHTNESS:
+ xcsc->brightness = (2 * ctrl->val) + 20;
+ xcsc_set_brightness(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_CONTRAST:
+ xcsc->contrast = (4 * ctrl->val) - 200;
+ xcsc_set_contrast(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_RED_GAIN:
+ xcsc->red_gain = (2 * ctrl->val) + 20;
+ xcsc_set_red_gain(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_BLUE_GAIN:
+ xcsc->blue_gain = (2 * ctrl->val) + 20;
+ xcsc_set_blue_gain(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_GREEN_GAIN:
+ xcsc->green_gain = (2 * ctrl->val) + 20;
+ xcsc_set_green_gain(xcsc);
+ break;
+ }
+#ifdef DEBUG
+ xcsc_print_k_hw(xcsc);
+ xcsc_print_coeff(xcsc);
+#endif
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xcsc_ctrl_ops = {
+ .s_ctrl = xcsc_s_ctrl,
+};
+
+static struct v4l2_ctrl_config xcsc_color_ctrls[XCSC_COLOR_CTRL_COUNT] = {
+ /* Brightness */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_BRIGHTNESS,
+ .name = "CSC Brightness",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Contrast */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_CONTRAST,
+ .name = "CSC Contrast",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Red Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_RED_GAIN,
+ .name = "CSC Red Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Blue Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_BLUE_GAIN,
+ .name = "CSC Blue Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Green Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_GREEN_GAIN,
+ .name = "CSC Green Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+};
+
+static int xcsc_open(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcsc->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcsc->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcsc_close(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xcsc_internal_ops = {
+ .open = xcsc_open,
+ .close = xcsc_close,
+};
+
+static const struct media_entity_operations xcsc_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xcsc_parse_of(struct xcsc_dev *xcsc)
+{
+ struct device *dev = xcsc->xvip.dev;
+ struct device_node *node = xcsc->xvip.dev->of_node;
+ const struct xvip_video_format *vip_format;
+ struct device_node *ports, *port;
+ int rval;
+ u32 port_id = 0;
+ u32 video_width[2];
+
+ rval = of_property_read_u32(node, "xlnx,max-height", &xcsc->max_height);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xcsc->max_height > XV_CSC_MAX_HEIGHT ||
+ xcsc->max_height < XV_CSC_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width", &xcsc->max_width);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xcsc->max_width > XV_CSC_MAX_WIDTH ||
+ xcsc->max_width < XV_CSC_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "Invalid media pad format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT to specify pad");
+ return rval;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ xcsc->vip_formats[port_id] = vip_format;
+
+ rval = of_property_read_u32(port, "xlnx,video-width",
+ &video_width[port_id]);
+ if (rval < 0) {
+ dev_err(dev,
+ "DT Port%d xlnx,video-width not found",
+ port_id);
+ return rval;
+ }
+ }
+ }
+ if (video_width[0] != video_width[1]) {
+ dev_err(dev, "Changing video width in DT not supported");
+ return -EINVAL;
+ }
+ switch (video_width[0]) {
+ case XVIDC_BPC_8:
+ case XVIDC_BPC_10:
+ xcsc->color_depth = video_width[0];
+ break;
+ default:
+ dev_err(dev, "Unsupported color depth %d", video_width[0]);
+ return -EINVAL;
+ }
+ /* Reset GPIO */
+ xcsc->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xcsc->rst_gpio)) {
+ if (PTR_ERR(xcsc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xcsc->rst_gpio);
+ }
+ return 0;
+}
+
+static int xcsc_probe(struct platform_device *pdev)
+{
+ struct xcsc_dev *xcsc;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval, itr;
+
+ xcsc = devm_kzalloc(&pdev->dev, sizeof(*xcsc), GFP_KERNEL);
+ if (!xcsc)
+ return -ENOMEM;
+
+ xcsc->xvip.dev = &pdev->dev;
+
+ rval = xcsc_parse_of(xcsc);
+ if (rval < 0)
+ return rval;
+
+ /* Reset and initialize the core */
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_DEASSERT);
+ rval = xvip_init_resources(&xcsc->xvip);
+ if (rval < 0)
+ return rval;
+
+ /* Init v4l2 subdev */
+ subdev = &xcsc->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcsc_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcsc_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcsc);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ xcsc_set_default_state(xcsc);
+ def_fmt = &xcsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->code = xcsc->vip_formats[XVIP_PAD_SINK]->code;
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_REC709;
+ def_fmt->width = XV_CSC_DEFAULT_WIDTH;
+ def_fmt->height = XV_CSC_DEFAULT_HEIGHT;
+ xcsc->formats[XVIP_PAD_SINK] = *def_fmt;
+ /* Source supports only YUV 444, YUV 422, and RGB */
+ def_fmt = &xcsc->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xcsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->code = xcsc->vip_formats[XVIP_PAD_SOURCE]->code;
+ def_fmt->width = XV_CSC_DEFAULT_WIDTH;
+ def_fmt->height = XV_CSC_DEFAULT_HEIGHT;
+ xcsc->formats[XVIP_PAD_SOURCE] = *def_fmt;
+ xcsc->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcsc->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xcsc_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xcsc->pads);
+ if (rval < 0)
+ goto media_error;
+ /* V4L2 Control Setup */
+ v4l2_ctrl_handler_init(&xcsc->ctrl_handler,
+ ARRAY_SIZE(xcsc_color_ctrls));
+ for (itr = 0; itr < ARRAY_SIZE(xcsc_color_ctrls); itr++) {
+ xcsc->custom_ctrls[itr] =
+ v4l2_ctrl_new_custom(&xcsc->ctrl_handler,
+ &xcsc_color_ctrls[itr], NULL);
+ }
+ if (xcsc->ctrl_handler.error) {
+ dev_err(&pdev->dev, "Failed to add v4l2 controls");
+ rval = xcsc->ctrl_handler.error;
+ goto ctrl_error;
+ }
+ subdev->ctrl_handler = &xcsc->ctrl_handler;
+ rval = v4l2_ctrl_handler_setup(&xcsc->ctrl_handler);
+ if (rval < 0) {
+ dev_err(xcsc->xvip.dev, "Failed to setup control handler");
+ goto ctrl_error;
+ }
+ platform_set_drvdata(pdev, xcsc);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto ctrl_error;
+ }
+ dev_info(&pdev->dev, "VPSS CSC %d-bit Color Depth Probe Successful",
+ xcsc->color_depth);
+ return 0;
+ctrl_error:
+ v4l2_ctrl_handler_free(&xcsc->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xcsc->xvip);
+ return rval;
+}
+
+static int xcsc_remove(struct platform_device *pdev)
+{
+ struct xcsc_dev *xcsc = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcsc->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcsc->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcsc->xvip);
+ return 0;
+}
+
+static const struct of_device_id xcsc_of_id_table[] = {
+ {.compatible = "xlnx,v-vpss-csc"},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, xcsc_of_id_table);
+
+static struct platform_driver xcsc_driver = {
+ .driver = {
+ .name = "xilinx-vpss-csc",
+ .of_match_table = xcsc_of_id_table,
+ },
+ .probe = xcsc_probe,
+ .remove = xcsc_remove,
+};
+
+module_platform_driver(xcsc_driver);
+MODULE_DESCRIPTION("Xilinx VPSS CSC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vpss-scaler.c b/drivers/media/platform/xilinx/xilinx-vpss-scaler.c
new file mode 100644
index 000000000000..b29639dd8023
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vpss-scaler.c
@@ -0,0 +1,1936 @@
+/*
+ * Xilinx VPSS Scaler
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+#define XSCALER_MIN_WIDTH (64)
+#define XSCALER_MAX_WIDTH (8192)
+#define XSCALER_MIN_HEIGHT (64)
+#define XSCALER_MAX_HEIGHT (4320)
+#define XSCALER_MAX_PHASES (64)
+
+/* Modify to defaults incase it is not configured from application */
+#define XSCALER_DEF_IN_HEIGHT (720)
+#define XSCALER_DEF_IN_WIDTH (1280)
+#define XSCALER_DEF_OUT_HEIGHT (1080)
+#define XSCALER_DEF_OUT_WIDTH (1920)
+
+#define XSCALER_HSF (0x0100)
+#define XSCALER_VSF (0x0104)
+#define XSCALER_SF_SHIFT (20)
+#define XSCALER_SF_MASK (0xffffff)
+#define XSCALER_SOURCE_SIZE (0x0108)
+#define XSCALER_SIZE_HORZ_SHIFT (0)
+#define XSCALER_SIZE_VERT_SHIFT (16)
+#define XSCALER_SIZE_MASK (0xfff)
+#define XSCALER_HAPERTURE (0x010c)
+#define XSCALER_VAPERTURE (0x0110)
+#define XSCALER_APERTURE_START_SHIFT (0)
+#define XSCALER_APERTURE_END_SHIFT (16)
+#define XSCALER_OUTPUT_SIZE (0x0114)
+#define XSCALER_COEF_DATA_IN (0x0134)
+#define XSCALER_BITSHIFT_16 (16)
+
+/* Video subsytems block offset */
+#define S_AXIS_RESET_OFF (0x00010000)
+#define V_HSCALER_OFF (0x00000000)
+#define V_VSCALER_OFF (0x00020000)
+
+/* HW Reset Network GPIO Channel */
+#define XGPIO_CH_RESET_SEL (1)
+#define XGPIO_RESET_MASK_VIDEO_IN BIT(0)
+#define XGPIO_RESET_MASK_IP_AXIS BIT(1)
+#define XGPIO_RESET_MASK_IP_AXIMM BIT(0)
+#define XGPIO_RESET_MASK_ALL_BLOCKS (XGPIO_RESET_MASK_VIDEO_IN | \
+ XGPIO_RESET_MASK_IP_AXIS)
+#define XGPIO_DATA_OFFSET (0x0)
+#define XGPIO_TRI_OFFSET (0x4)
+#define XGPIO_DATA2_OFFSET (0x8)
+#define XGPIO_TRI2_OFFSET (0xc)
+
+#define XGPIO_GIE_OFFSET (0x11c)
+#define XGPIO_ISR_OFFSET (0x120)
+#define XGPIO_IER_OFFSET (0x128)
+#define XGPIO_CHAN_OFFSET (8)
+#define STEP_PRECISION (65536)
+
+/* Video IP Formats */
+enum xscaler_vid_reg_fmts {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+/* Video IP PPC */
+#define XSCALER_PPC_1 (1)
+#define XSCALER_PPC_2 (2)
+#define XSCALER_PPC_4 (4)
+
+#define XV_HSCALER_MAX_H_TAPS (12)
+#define XV_HSCALER_MAX_H_PHASES (64)
+#define XV_HSCALER_MAX_LINE_WIDTH (3840)
+#define XV_VSCALER_MAX_V_TAPS (12)
+#define XV_VSCALER_MAX_V_PHASES (64)
+
+#define XV_HSCALER_TAPS_2 (2)
+#define XV_HSCALER_TAPS_4 (4)
+#define XV_HSCALER_TAPS_6 (6)
+#define XV_HSCALER_TAPS_8 (8)
+#define XV_HSCALER_TAPS_10 (10)
+#define XV_HSCALER_TAPS_12 (12)
+#define XV_VSCALER_TAPS_2 (2)
+#define XV_VSCALER_TAPS_4 (4)
+#define XV_VSCALER_TAPS_6 (6)
+#define XV_VSCALER_TAPS_8 (8)
+#define XV_VSCALER_TAPS_10 (10)
+#define XV_VSCALER_TAPS_12 (12)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XHSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XHSC_MASK_HIGH_16BITS GENMASK(31, 16)
+#define XHSC_MASK_LOW_32BITS GENMASK(31, 0)
+#define XHSC_STEP_PRECISION_SHIFT (16)
+#define XHSC_HPHASE_SHIFT_BY_6 (6)
+#define XHSC_HPHASE_MULTIPLIER (9)
+#define XHSC_HPHASE_MUL_4PPC (10)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVSC_MASK_HIGH_16BITS GENMASK(31, 16)
+
+/* XSCALER POWER MACROS */
+#define XSCALER_RESET_ASSERT (0x1)
+#define XSCALER_RESET_DEASSERT (0x0)
+
+/* Scaler AP Control Registers */
+#define XSCALER_START BIT(0)
+#define XSCALER_AUTO_RESTART BIT(7)
+#define XSCALER_STREAM_ON (XSCALER_START | XSCALER_AUTO_RESTART)
+
+/* H-scaler registers */
+#define XV_HSCALER_CTRL_ADDR_AP_CTRL (0x0000)
+#define XV_HSCALER_CTRL_ADDR_GIE (0x0004)
+#define XV_HSCALER_CTRL_ADDR_IER (0x0008)
+#define XV_HSCALER_CTRL_ADDR_ISR (0x000c)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA (0x0010)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA (0x0018)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA (0x0020)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x0028)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA (0x0030)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA (0X0038)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE (0x0800)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_HIGH (0x0bff)
+
+/* H-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xhsc_coeff_taps6[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xhsc_coeff_taps8[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xhsc_coeff_taps10[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xhsc_coeff_taps12[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+#define XV_HSCALER_CTRL_WIDTH_HWREG_HFLTCOEFF (16)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_HFLTCOEFF (384)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE (0x2000)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_HIGH (0x3fff)
+#define XV_HSCALER_CTRL_WIDTH_HWREG_PHASESH_V (18)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_PHASESH_V (1920)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASEH_FIX (0x4000)
+
+/* H-scaler masks */
+#define XV_HSCALER_PHASESH_V_OUTPUT_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XV_VSCALER_CTRL_ADDR_AP_CTRL (0x000)
+#define XV_VSCALER_CTRL_ADDR_GIE (0x004)
+#define XV_VSCALER_CTRL_ADDR_IER (0x008)
+#define XV_VSCALER_CTRL_ADDR_ISR (0x00c)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA (0x010)
+#define XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA (0x018)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA (0x020)
+#define XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA (0x028)
+#define XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x030)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE (0x800)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_HIGH (0xbff)
+
+/* V-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xvsc_coeff_taps6[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_6] = {
+ {-132, 236, 3824, 236, -132, 64, },
+ {-116, 184, 3816, 292, -144, 64, },
+ {-100, 132, 3812, 348, -160, 64, },
+ {-88, 84, 3808, 404, -176, 64, },
+ {-72, 36, 3796, 464, -192, 64, },
+ {-60, -8, 3780, 524, -208, 68, },
+ {-48, -52, 3768, 588, -228, 68, },
+ {-32, -96, 3748, 652, -244, 68, },
+ {-20, -136, 3724, 716, -260, 72, },
+ {-8, -172, 3696, 784, -276, 72, },
+ {0, -208, 3676, 848, -292, 72, },
+ {12, -244, 3640, 920, -308, 76, },
+ {20, -276, 3612, 988, -324, 76, },
+ {32, -304, 3568, 1060, -340, 80, },
+ {40, -332, 3532, 1132, -356, 80, },
+ {48, -360, 3492, 1204, -372, 84, },
+ {56, -384, 3448, 1276, -388, 88, },
+ {64, -408, 3404, 1352, -404, 88, },
+ {72, -428, 3348, 1428, -416, 92, },
+ {76, -448, 3308, 1500, -432, 92, },
+ {84, -464, 3248, 1576, -444, 96, },
+ {88, -480, 3200, 1652, -460, 96, },
+ {92, -492, 3140, 1728, -472, 100, },
+ {96, -504, 3080, 1804, -484, 104, },
+ {100, -516, 3020, 1880, -492, 104, },
+ {104, -524, 2956, 1960, -504, 104, },
+ {104, -532, 2892, 2036, -512, 108, },
+ {108, -540, 2832, 2108, -520, 108, },
+ {108, -544, 2764, 2184, -528, 112, },
+ {112, -544, 2688, 2260, -532, 112, },
+ {112, -548, 2624, 2336, -540, 112, },
+ {112, -548, 2556, 2408, -544, 112, },
+ {112, -544, 2480, 2480, -544, 112, },
+ {112, -544, 2408, 2556, -548, 112, },
+ {112, -540, 2336, 2624, -548, 112, },
+ {112, -532, 2260, 2688, -544, 112, },
+ {112, -528, 2184, 2764, -544, 108, },
+ {108, -520, 2108, 2832, -540, 108, },
+ {108, -512, 2036, 2892, -532, 104, },
+ {104, -504, 1960, 2956, -524, 104, },
+ {104, -492, 1880, 3020, -516, 100, },
+ {104, -484, 1804, 3080, -504, 96, },
+ {100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xvsc_coeff_taps8[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xvsc_coeff_taps10[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xvsc_coeff_taps12[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+#define XV_VSCALER_CTRL_WIDTH_HWREG_VFLTCOEFF (16)
+#define XV_VSCALER_CTRL_DEPTH_HWREG_VFLTCOEFF (384)
+
+/* These bits are for xscaler feature flags */
+#define XSCALER_CLK_PROP BIT(0)
+#define XSCALER_HPHASE_FIX BIT(1)
+
+/**
+ * struct xscaler_feature - dt or IP property structure
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xscaler_feature {
+ u32 flags;
+};
+
+/**
+ * struct xscaler_device - Xilinx Scaler device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: Scaler sub-device media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP format retrieved from the DT
+ * @num_hori_taps: number of horizontal taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @pix_per_clk: Pixels per Clock cycle the IP operates upon
+ * @max_pixels: The maximum number of pixels that the H-scaler examines
+ * @max_lines: The maximum number of lines that the V-scaler examines
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @hscaler_coeff: The complete array of H-scaler coefficients
+ * @vscaler_coeff: The complete array of V-scaler coefficients
+ * @is_polyphase: Track if scaling algorithm is polyphase or not
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @cfg: Pointer to scaler config structure
+ * @aclk_axis: AXI4-Stream video interface clock
+ * @aclk_ctrl: AXI4-Lite control interface clock
+ */
+struct xscaler_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ u32 pix_per_clk;
+ u32 max_pixels;
+ u32 max_lines;
+ u64 H_phases[XV_HSCALER_MAX_LINE_WIDTH];
+ short hscaler_coeff[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_MAX_H_TAPS];
+ short vscaler_coeff[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_MAX_V_TAPS];
+ bool is_polyphase;
+
+ struct gpio_desc *rst_gpio;
+ const struct xscaler_feature *cfg;
+ struct clk *aclk_axis;
+ struct clk *aclk_ctrl;
+};
+
+static const struct xscaler_feature xlnx_scaler_v2_2 = {
+ .flags = XSCALER_CLK_PROP | XSCALER_HPHASE_FIX,
+};
+
+static const struct xscaler_feature xlnx_scaler_v1_0 = {
+ .flags = XSCALER_CLK_PROP,
+};
+
+static const struct xscaler_feature xlnx_scaler = {
+ .flags = 0,
+};
+
+static const struct of_device_id xscaler_of_id_table[] = {
+ { .compatible = "xlnx,v-vpss-scaler",
+ .data = &xlnx_scaler},
+ { .compatible = "xlnx,v-vpss-scaler-1.0",
+ .data = &xlnx_scaler_v1_0},
+ { .compatible = "xlnx,v-vpss-scaler-2.2",
+ .data = &xlnx_scaler_v2_2},
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xscaler_of_id_table);
+
+static inline struct xscaler_device *to_scaler(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscaler_device, xvip.subdev);
+}
+
+static void
+xv_hscaler_calculate_phases(struct xscaler_device *xscaler,
+ u32 width_in, u32 width_out, u32 pixel_rate)
+{
+ unsigned int loop_width;
+ unsigned int x, s;
+ int offset = 0;
+ int xwrite_pos = 0;
+ bool output_write_en;
+ bool get_new_pix;
+ u64 phaseH;
+ u64 array_idx = 0;
+ int nr_rds;
+ int nr_rds_clck;
+ unsigned int nphases = xscaler->max_num_phases;
+ unsigned int nppc = xscaler->pix_per_clk;
+ unsigned int shift = XHSC_STEP_PRECISION_SHIFT - ilog2(nphases);
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XHSC_STEP_PRECISION_SHIFT) != 0) {
+ /* read a new input sample */
+ get_new_pix = true;
+ offset -= (1 << XHSC_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XHSC_STEP_PRECISION_SHIFT) == 0) &&
+ (xwrite_pos < width_out)) {
+ /* produce a new output sample */
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ if (nppc == XSCALER_PPC_4) {
+ xscaler->H_phases[x] |=
+ ((u64)phaseH <<
+ (s * XHSC_HPHASE_MUL_4PPC));
+ xscaler->H_phases[x] |=
+ ((u64)array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MUL_4PPC)));
+ if (output_write_en)
+ xscaler->H_phases[x] |=
+ ((u64)1 <<
+ (XHSC_HPHASE_MULTIPLIER + s *
+ XHSC_HPHASE_MUL_4PPC));
+ } else {
+ xscaler->H_phases[x] |=
+ (phaseH <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ xscaler->H_phases[x] |=
+ (array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MULTIPLIER)));
+
+ if (output_write_en)
+ xscaler->H_phases[x] |=
+ (XV_HSCALER_PHASESH_V_OUTPUT_WR_EN <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+static void
+xv_hscaler_load_ext_coeff(struct xscaler_device *xscaler,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ u32 nphases = xscaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_HSCALER_MAX_H_TAPS - ntaps;
+ offset = pad >> 1;
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Pad = %d Offset = %d Nphases = %d ntaps = %d",
+ __func__, pad, offset, nphases, ntaps);
+
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xscaler->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) { /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ xscaler->hscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_HSCALER_MAX_H_TAPS; j++)
+ xscaler->hscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_hscaler_coeff_select - Selection of H-Scaler coefficients of operation
+ * @xscaler: VPSS Scaler device information
+ * @width_in: Width of input video
+ * @width_out: Width of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 12-tap
+ * filter may operate with 10 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * H-scaler number of taps.
+ */
+static int
+xv_hscaler_select_coeff(struct xscaler_device *xscaler,
+ u32 width_in, u32 width_out)
+{
+ const short *coeff;
+ u16 hscale_ratio;
+ u32 ntaps = xscaler->num_hori_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+ if (width_out < width_in) {
+ hscale_ratio = ((width_in * 10) / width_out);
+
+ switch (xscaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_6:
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ break;
+ case XV_HSCALER_TAPS_8:
+ if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_10:
+ if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_12:
+ if (hscale_ratio > 35) {
+ coeff = &xhsc_coeff_taps12[0][0];
+ ntaps = XV_HSCALER_TAPS_12;
+ } else if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Unsupported H-scaler number of taps = %d",
+ xscaler->num_hori_taps);
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(xscaler->xvip.dev, "H-scaler : scale up 6 tap");
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ xv_hscaler_load_ext_coeff(xscaler, coeff, ntaps);
+ return 0;
+}
+
+static void xv_hscaler_set_coeff(struct xscaler_device *xscaler)
+{
+ int val, i, j, offset, rd_indx;
+ u32 ntaps = xscaler->num_hori_taps;
+ u32 nphases = xscaler->max_num_phases;
+ u32 base_addr;
+
+ offset = (XV_HSCALER_MAX_H_TAPS - ntaps) / 2;
+ base_addr = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xscaler->hscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (xscaler->hscaler_coeff[i][rd_indx] &
+ XHSC_MASK_LOW_16BITS);
+ xvip_write(&xscaler->xvip, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void
+xv_vscaler_load_ext_coeff(struct xscaler_device *xscaler,
+ const short *coeff, u32 ntaps)
+{
+ int i, j, pad, offset;
+ u32 nphases = xscaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_VSCALER_MAX_V_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Pad = %d Offset = %d Nphases = %d ntaps = %d",
+ __func__, pad, offset, nphases, ntaps);
+
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xscaler->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) { /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ xscaler->vscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_VSCALER_MAX_V_TAPS; j++)
+ xscaler->vscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+static void xv_vscaler_set_coeff(struct xscaler_device *xscaler)
+{
+ u32 nphases = xscaler->max_num_phases;
+ u32 ntaps = xscaler->num_vert_taps;
+ int val, i, j, offset, rd_indx;
+ u32 base_addr;
+
+ offset = (XV_VSCALER_MAX_V_TAPS - ntaps) / 2;
+ base_addr = V_VSCALER_OFF + XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xscaler->vscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (xscaler->vscaler_coeff[i][rd_indx] &
+ XVSC_MASK_LOW_16BITS);
+ xvip_write(&xscaler->xvip,
+ base_addr + ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_coeff_select - Selection of V-Scaler coefficients of operation
+ * @xscaler: VPSS Scaler device information
+ * @height_in: Height of input video
+ * @height_out: Height of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 10-tap
+ * filter may operate with 6 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * V-scaler number of taps.
+ */
+static int
+xv_vscaler_select_coeff(struct xscaler_device *xscaler,
+ u32 height_in, u32 height_out)
+{
+ const short *coeff;
+ u16 vscale_ratio;
+ u32 ntaps = xscaler->num_vert_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+
+ if (height_out < height_in) {
+ vscale_ratio = ((height_in * 10) / height_out);
+
+ switch (xscaler->num_vert_taps) {
+ case XV_VSCALER_TAPS_6:
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ break;
+ case XV_VSCALER_TAPS_8:
+ if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_10:
+ if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_12:
+ if (vscale_ratio > 35) {
+ coeff = &xvsc_coeff_taps12[0][0];
+ ntaps = XV_VSCALER_TAPS_12;
+ } else if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Unsupported V-scaler number of taps = %d",
+ xscaler->num_vert_taps);
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(xscaler->xvip.dev, "V-scaler : scale up 6 tap");
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+
+ xv_vscaler_load_ext_coeff(xscaler, coeff, ntaps);
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static inline void
+xv_procss_disable_block(struct xvip_device *xvip, u32 channel, u32 ip_block)
+{
+ xvip_clr(xvip, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF,
+ ip_block);
+}
+
+static inline void
+xv_procss_enable_block(struct xvip_device *xvip, u32 channel, u32 ip_block)
+{
+ xvip_set(xvip, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF,
+ ip_block);
+}
+
+static void xscaler_reset(struct xscaler_device *xscaler)
+{
+ xv_procss_disable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+ xv_procss_enable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+}
+
+static int
+xv_vscaler_setup_video_fmt(struct xscaler_device *xscaler, u32 code_in)
+{
+ u32 video_in;
+
+ switch (code_in) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 420");
+ video_in = XVIDC_CSF_YCRCB_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 422");
+ video_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 444");
+ video_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format RGB");
+ video_in = XVIDC_CSF_RGB;
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Vscaler Unsupported Input Media Format 0x%x",
+ code_in);
+ return -EINVAL;
+ }
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ video_in);
+ /*
+ * Vscaler will upscale to YUV 422 before
+ * Hscaler starts operation
+ */
+ if (video_in == XVIDC_CSF_YCRCB_420)
+ return XVIDC_CSF_YCRCB_422;
+ return video_in;
+}
+
+static int xv_hscaler_setup_video_fmt(struct xscaler_device *xscaler,
+ u32 code_out, u32 vsc_out)
+{
+ u32 video_out;
+
+ switch (vsc_out) {
+ case XVIDC_CSF_YCRCB_422:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is YUV 422");
+ break;
+ case XVIDC_CSF_YCRCB_444:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is YUV 444");
+ break;
+ case XVIDC_CSF_RGB:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is RGB");
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Hscaler got unsupported format from Vscaler");
+ return -EINVAL;
+ }
+
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ vsc_out);
+
+ switch (code_out) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 420\n");
+ video_out = XVIDC_CSF_YCRCB_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 422\n");
+ video_out = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 444\n");
+ video_out = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format RGB\n");
+ video_out = XVIDC_CSF_RGB;
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Hscaler Unsupported Output Media Format 0x%x",
+ code_out);
+ return -EINVAL;
+ }
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA,
+ video_out);
+ return 0;
+}
+
+static void
+xv_hscaler_set_phases(struct xscaler_device *xscaler)
+{
+ u32 loop_width;
+ u32 index = 0, val;
+ u32 offset, i, j = 0, lsb, msb;
+ u64 phasehdata;
+
+ loop_width = xscaler->max_pixels / xscaler->pix_per_clk;
+
+ if (xscaler->cfg->flags & XSCALER_HPHASE_FIX) {
+ offset = V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PHASEH_FIX;
+ } else {
+ offset = V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE;
+ }
+
+ switch (xscaler->pix_per_clk) {
+ case XSCALER_PPC_1:
+ /*
+ * phaseH is 64 bits but only lower 16 bits of each entry
+ * is valid .Form a 32 bit word with 16bit LSB from 2
+ * consecutive entries. Need 1 32b write to get 2 entries
+ * into IP registers (i is array loc and index is
+ * address offset)
+ */
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = lower_32_bits(xscaler->H_phases[i] &
+ XHSC_MASK_LOW_16BITS);
+ msb = lower_32_bits(xscaler->H_phases[i + 1] &
+ XHSC_MASK_LOW_16BITS);
+ val = (msb << 16 | lsb);
+ xvip_write(&xscaler->xvip, offset + (index * 4), val);
+ ++index;
+ }
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Operating in 1 PPC design", __func__);
+ return;
+ case XSCALER_PPC_2:
+ /*
+ * PhaseH is 64bits but only lower 32b of each entry is valid
+ * Need 1 32b write to get each entry into IP registers
+ */
+ for (i = 0; i < loop_width; i++) {
+ val = lower_32_bits(xscaler->H_phases[i] &
+ XHSC_MASK_LOW_32BITS);
+ xvip_write(&xscaler->xvip, offset + (i * 4), val);
+ }
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Operating in 2 PPC design", __func__);
+ return;
+ case XSCALER_PPC_4:
+ /*
+ * PhaseH is 64bits and each entry has valid 32b MSB & LSB
+ * Need 2 32b writes to get each entry into IP registers
+ * (index is array loc and offset is address offset)
+ */
+ for (i = 0; i < loop_width; i++) {
+ phasehdata = xscaler->H_phases[index++];
+ lsb = (u32)(phasehdata & XHSC_MASK_LOW_32BITS);
+ msb = (u32)((phasehdata >> 32) & XHSC_MASK_LOW_32BITS);
+ xvip_write(&xscaler->xvip, offset + (j * 4), lsb);
+ xvip_write(&xscaler->xvip, offset + ((j + 1) * 4), msb);
+ j += 2;
+ }
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Operating in 4 PPC design", __func__);
+ return;
+ default:
+ dev_warn(xscaler->xvip.dev, "%s : %d unsupported ppc design!!!\n",
+ __func__, xscaler->pix_per_clk);
+
+ }
+}
+
+static int xscaler_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ u32 width_in, width_out;
+ u32 height_in, height_out;
+ u32 code_in, code_out;
+ u32 pixel_rate;
+ u32 line_rate;
+ int ret;
+
+ if (!enable) {
+ dev_dbg(xscaler->xvip.dev, "%s: Stream Off", __func__);
+ /* Reset the Global IP Reset through PS GPIO */
+ gpiod_set_value_cansleep(xscaler->rst_gpio,
+ XSCALER_RESET_ASSERT);
+ gpiod_set_value_cansleep(xscaler->rst_gpio,
+ XSCALER_RESET_DEASSERT);
+ xscaler_reset(xscaler);
+ memset(xscaler->H_phases, 0, sizeof(xscaler->H_phases));
+ return 0;
+ }
+
+ dev_dbg(xscaler->xvip.dev, "%s: Stream On", __func__);
+
+ /* Extract Sink Pad Information */
+ width_in = xscaler->formats[XVIP_PAD_SINK].width;
+ height_in = xscaler->formats[XVIP_PAD_SINK].height;
+ code_in = xscaler->formats[XVIP_PAD_SINK].code;
+
+ /* Extract Source Pad Information */
+ width_out = xscaler->formats[XVIP_PAD_SOURCE].width;
+ height_out = xscaler->formats[XVIP_PAD_SOURCE].height;
+ code_out = xscaler->formats[XVIP_PAD_SOURCE].code;
+
+ /*
+ * V Scaler is before H Scaler
+ * V-Scaler_setup
+ */
+ line_rate = (height_in * STEP_PRECISION) / height_out;
+
+ if (xscaler->is_polyphase) {
+ ret = xv_vscaler_select_coeff(xscaler, height_in, height_out);
+ if (ret < 0)
+ return ret;
+ xv_vscaler_set_coeff(xscaler);
+ }
+
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA, height_in);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA, width_in);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA, height_out);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA, line_rate);
+ ret = xv_vscaler_setup_video_fmt(xscaler, code_in);
+ if (ret < 0)
+ return ret;
+
+ /* H-Scaler_setup */
+ pixel_rate = (width_in * STEP_PRECISION) / width_out;
+
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA, height_out);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA, width_in);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA, width_out);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA, pixel_rate);
+ ret = xv_hscaler_setup_video_fmt(xscaler, code_out, ret);
+ if (ret < 0)
+ return ret;
+
+ if (xscaler->is_polyphase) {
+ ret = xv_hscaler_select_coeff(xscaler, width_in, width_out);
+ if (ret < 0)
+ return ret;
+ xv_hscaler_set_coeff(xscaler);
+ }
+
+ xv_hscaler_calculate_phases(xscaler, width_in, width_out, pixel_rate);
+ xv_hscaler_set_phases(xscaler);
+
+ /* Start Scaler sub-cores */
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xv_procss_enable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_VIDEO_IN);
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscaler_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ fse->min_width = XSCALER_MIN_WIDTH;
+ fse->max_width = xscaler->max_pixels;
+ fse->min_height = XSCALER_MIN_HEIGHT;
+ fse->max_height = xscaler->max_lines;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscaler_get_pad_format(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xscaler->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xscaler_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ fmt->format = *__xscaler_get_pad_format(xscaler, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xscaler_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, fmt->pad, fmt->which);
+ *format = fmt->format;
+
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCALER_MIN_WIDTH, xscaler->max_pixels);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCALER_MIN_HEIGHT, xscaler->max_lines);
+ format->code = fmt->format.code;
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int
+xscaler_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xscaler->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xscaler->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int
+xscaler_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xscaler_video_ops = {
+ .s_stream = xscaler_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscaler_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xscaler_enum_frame_size,
+ .get_fmt = xscaler_get_format,
+ .set_fmt = xscaler_set_format,
+};
+
+static struct v4l2_subdev_ops xscaler_ops = {
+ .video = &xscaler_video_ops,
+ .pad = &xscaler_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscaler_internal_ops = {
+ .open = xscaler_open,
+ .close = xscaler_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscaler_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Platform Device Driver
+ */
+
+static int xscaler_parse_of(struct xscaler_device *xscaler)
+{
+ struct device *dev = xscaler->xvip.dev;
+ struct device_node *node = xscaler->xvip.dev->of_node;
+ const struct xvip_video_format *vip_format;
+ struct device_node *ports;
+ struct device_node *port;
+ int ret;
+ u32 port_id, dt_ppc;
+
+ if (xscaler->cfg->flags & XSCALER_CLK_PROP) {
+ xscaler->aclk_axis = devm_clk_get(dev, "aclk_axis");
+ if (IS_ERR(xscaler->aclk_axis)) {
+ ret = PTR_ERR(xscaler->aclk_axis);
+ dev_err(dev, "failed to get aclk_axis (%d)\n", ret);
+ return ret;
+ }
+ xscaler->aclk_ctrl = devm_clk_get(dev, "aclk_ctrl");
+ if (IS_ERR(xscaler->aclk_ctrl)) {
+ ret = PTR_ERR(xscaler->aclk_ctrl);
+ dev_err(dev, "failed to get aclk_ctrl (%d)\n", ret);
+ return ret;
+ }
+ } else {
+ dev_info(dev, "assuming all required clocks are enabled!\n");
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xscaler->max_lines);
+ if (ret < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xscaler->max_lines > XSCALER_MAX_HEIGHT ||
+ xscaler->max_lines < XSCALER_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xscaler->max_pixels);
+ if (ret < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xscaler->max_pixels > XSCALER_MAX_WIDTH ||
+ xscaler->max_pixels < XSCALER_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "No reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ xscaler->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-hori-taps",
+ &xscaler->num_hori_taps);
+ if (ret < 0)
+ return ret;
+
+ switch (xscaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_HSCALER_TAPS_4:
+ xscaler->is_polyphase = false;
+ break;
+ case XV_HSCALER_TAPS_6:
+ case XV_HSCALER_TAPS_8:
+ case XV_HSCALER_TAPS_10:
+ case XV_HSCALER_TAPS_12:
+ xscaler->is_polyphase = true;
+ break;
+ default:
+ dev_err(dev, "Unsupported num-hori-taps %d",
+ xscaler->num_hori_taps);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-vert-taps",
+ &xscaler->num_vert_taps);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * For Bilinear and Bicubic case
+ * number of vertical and horizontal taps must match
+ */
+ switch (xscaler->num_vert_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_VSCALER_TAPS_4:
+ if (xscaler->num_vert_taps != xscaler->num_hori_taps) {
+ dev_err(dev,
+ "H-scaler taps %d mismatches V-scaler taps %d",
+ xscaler->num_hori_taps,
+ xscaler->num_vert_taps);
+ return -EINVAL;
+ }
+ break;
+ case XV_VSCALER_TAPS_6:
+ case XV_VSCALER_TAPS_8:
+ case XV_VSCALER_TAPS_10:
+ case XV_VSCALER_TAPS_12:
+ xscaler->is_polyphase = true;
+ break;
+ default:
+ dev_err(dev, "Unsupported num-vert-taps %d",
+ xscaler->num_vert_taps);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,pix-per-clk", &dt_ppc);
+ if (ret < 0)
+ return ret;
+
+ /* Driver only supports 1 PPC and 2 PPC */
+ if (dt_ppc != XSCALER_PPC_1 && dt_ppc != XSCALER_PPC_2 &&
+ dt_ppc != XSCALER_PPC_4) {
+ dev_err(xscaler->xvip.dev,
+ "Unsupported xlnx,pix-per-clk(%d) value in DT", dt_ppc);
+ return -EINVAL;
+ }
+ xscaler->pix_per_clk = dt_ppc;
+
+ /* Reset GPIO */
+ xscaler->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xscaler->rst_gpio)) {
+ if (PTR_ERR(xscaler->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xscaler->rst_gpio);
+ }
+
+ return 0;
+}
+
+static int xscaler_probe(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+ const struct of_device_id *match;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+
+ xscaler = devm_kzalloc(&pdev->dev, sizeof(*xscaler), GFP_KERNEL);
+ if (!xscaler)
+ return -ENOMEM;
+
+ xscaler->xvip.dev = &pdev->dev;
+
+ match = of_match_node(xscaler_of_id_table, node);
+ if (!match)
+ return -ENODEV;
+
+ if (!strncmp(match->compatible, xscaler_of_id_table[0].compatible,
+ strlen(xscaler_of_id_table[0].compatible))) {
+ dev_warn(&pdev->dev,
+ "%s - compatible string is getting deprecated!\n",
+ match->compatible);
+ }
+
+ xscaler->cfg = match->data;
+
+ ret = xscaler_parse_of(xscaler);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize coefficient parameters */
+ xscaler->max_num_phases = XSCALER_MAX_PHASES;
+
+ if (xscaler->cfg->flags & XSCALER_CLK_PROP) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xscaler->xvip.iomem = devm_ioremap_resource(xscaler->xvip.dev,
+ res);
+ if (IS_ERR(xscaler->xvip.iomem))
+ return PTR_ERR(xscaler->xvip.iomem);
+
+ ret = clk_prepare_enable(xscaler->aclk_axis);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk_axis (%d)\n",
+ ret);
+ goto res_cleanup;
+ }
+
+ ret = clk_prepare_enable(xscaler->aclk_ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk_ctrl (%d)\n",
+ ret);
+ goto axis_clk_cleanup;
+ }
+ } else {
+ ret = xvip_init_resources(&xscaler->xvip);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Reset the Global IP Reset through a PS GPIO */
+ gpiod_set_value_cansleep(xscaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ /* Reset internal GPIO within the IP */
+ xscaler_reset(xscaler);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xscaler->xvip.subdev;
+ v4l2_subdev_init(subdev, &xscaler_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xscaler_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xscaler);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ default_format->width = XSCALER_DEF_IN_WIDTH;
+ default_format->height = XSCALER_DEF_IN_HEIGHT;
+ xscaler->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xscaler->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_formats[XVIP_PAD_SOURCE]->code;
+ default_format->width = XSCALER_DEF_OUT_WIDTH;
+ default_format->height = XSCALER_DEF_OUT_HEIGHT;
+ xscaler->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xscaler->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xscaler->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xscaler_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xscaler->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xscaler);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto error;
+ }
+ dev_info(xscaler->xvip.dev, "Num Hori Taps %d",
+ xscaler->num_hori_taps);
+ dev_info(xscaler->xvip.dev, "Num Vert Taps %d",
+ xscaler->num_vert_taps);
+ dev_info(&pdev->dev, "VPSS Scaler Probe Successful");
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xscaler->aclk_ctrl);
+axis_clk_cleanup:
+ clk_disable_unprepare(xscaler->aclk_axis);
+res_cleanup:
+ xvip_cleanup_resources(&xscaler->xvip);
+ return ret;
+}
+
+static int xscaler_remove(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xscaler->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xscaler->aclk_ctrl);
+ clk_disable_unprepare(xscaler->aclk_axis);
+ xvip_cleanup_resources(&xscaler->xvip);
+
+ return 0;
+}
+
+static struct platform_driver xscaler_driver = {
+ .driver = {
+ .name = "xilinx-vpss-scaler",
+ .of_match_table = xscaler_of_id_table,
+ },
+ .probe = xscaler_probe,
+ .remove = xscaler_remove,
+};
+
+module_platform_driver(xscaler_driver);
+MODULE_DESCRIPTION("Xilinx Scaler VPSS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.c b/drivers/media/platform/xilinx/xilinx-vtc.c
index 0ae0208d7529..d69f48dfdac4 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.c
+++ b/drivers/media/platform/xilinx/xilinx-vtc.c
@@ -141,6 +141,9 @@
#define XVTC_GENERATOR_GLOBAL_DELAY 0x0104
+/* Value of 1 = .01% */
+#define XVTC_CLK_MAX_PCT_ERR 1
+
/**
* struct xvtc_device - Xilinx Video Timing Controller device structure
* @xvip: Xilinx Video IP device
@@ -175,10 +178,25 @@ int xvtc_generator_start(struct xvtc_device *xvtc,
const struct xvtc_config *config)
{
int ret;
+ unsigned long s_rate;
+ unsigned long g_rate;
+ unsigned long clk_err;
if (!xvtc->has_generator)
return -ENXIO;
+ s_rate = config->fps * config->hsize * config->vsize;
+ ret = clk_set_rate(xvtc->xvip.clk, s_rate);
+ if (ret < 0)
+ return ret;
+
+ /* Verify that the clock is within a reasonable tolerance. */
+ g_rate = clk_get_rate(xvtc->xvip.clk);
+ clk_err = (abs(g_rate - s_rate) * 10000) / (s_rate);
+ if (clk_err > XVTC_CLK_MAX_PCT_ERR)
+ dev_warn(xvtc->xvip.dev, "Failed to set clk rate: %lu, actual rate: %lu\n",
+ s_rate, g_rate);
+
ret = clk_prepare_enable(xvtc->xvip.clk);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
index 90cf44245283..c2c744b03426 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.h
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -27,6 +27,7 @@ struct xvtc_config {
unsigned int vsync_start;
unsigned int vsync_end;
unsigned int vsize;
+ unsigned int fps;
};
struct xvtc_device *xvtc_of_get(struct device_node *np);
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index 8230da828d0e..127a3be0e0f0 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -316,6 +316,16 @@ static int usb_shark_probe(struct usb_interface *intf,
{
struct shark_device *shark;
int retval = -ENOMEM;
+ static const u8 ep_addresses[] = {
+ SHARK_IN_EP | USB_DIR_IN,
+ SHARK_OUT_EP | USB_DIR_OUT,
+ 0};
+
+ /* Are the expected endpoints present? */
+ if (!usb_check_int_endpoints(intf, ep_addresses)) {
+ dev_err(&intf->dev, "Invalid radioSHARK device\n");
+ return -EINVAL;
+ }
shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
if (!shark)
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index d150f12382c6..f1c5c0a6a335 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -282,6 +282,16 @@ static int usb_shark_probe(struct usb_interface *intf,
{
struct shark_device *shark;
int retval = -ENOMEM;
+ static const u8 ep_addresses[] = {
+ SHARK_IN_EP | USB_DIR_IN,
+ SHARK_OUT_EP | USB_DIR_OUT,
+ 0};
+
+ /* Are the expected endpoints present? */
+ if (!usb_check_int_endpoints(intf, ep_addresses)) {
+ dev_err(&intf->dev, "Invalid radioSHARK2 device\n");
+ return -EINVAL;
+ }
shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
if (!shark)
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 3f8634a46573..1365ae732b79 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -733,8 +733,10 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
/* start radio */
retval = si470x_start_usb(radio);
- if (retval < 0)
+ if (retval < 0 && !radio->int_in_running)
goto err_buf;
+ else if (retval < 0) /* in case of radio->int_in_running == 1 */
+ goto err_all;
/* set initial frequency */
si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 82867a2a60b0..20cadff242cf 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1106,6 +1106,8 @@ static void ene_remove(struct pnp_dev *pnp_dev)
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
unsigned long flags;
+ rc_unregister_device(dev->rdev);
+ del_timer_sync(&dev->tx_sim_timer);
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
ene_rx_restore_hw_buffer(dev);
@@ -1113,7 +1115,6 @@ static void ene_remove(struct pnp_dev *pnp_dev)
free_irq(dev->irq, dev);
release_region(dev->hw_io, ENE_IO_SIZE);
- rc_unregister_device(dev->rdev);
kfree(dev);
}
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index a20413008c3c..f50398b3ed5f 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -83,6 +83,8 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->map_name = RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
+ if (of_property_read_bool(np, "wakeup-source"))
+ device_init_wakeup(dev, true);
rc = devm_rc_register_device(dev, rcdev);
if (rc < 0) {
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index c683a244b9fa..d8401ef9b0a7 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -604,15 +604,14 @@ static int send_packet(struct imon_context *ictx)
pr_err_ratelimited("error submitting urb(%d)\n", retval);
} else {
/* Wait for transmission to complete (or abort) */
- mutex_unlock(&ictx->lock);
retval = wait_for_completion_interruptible(
&ictx->tx.finished);
if (retval) {
usb_kill_urb(ictx->tx_urb);
pr_err_ratelimited("task interrupted\n");
}
- mutex_lock(&ictx->lock);
+ ictx->tx.busy = false;
retval = ictx->tx.status;
if (retval)
pr_err_ratelimited("packet tx failed (%d)\n", retval);
@@ -919,7 +918,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
return -ENODEV;
}
- mutex_lock(&ictx->lock);
+ if (mutex_lock_interruptible(&ictx->lock))
+ return -ERESTARTSYS;
if (!ictx->dev_present_intf0) {
pr_err_ratelimited("no iMON device present\n");
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index 37fab0919131..e92bcd00e3a8 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -15,7 +15,9 @@
#define SHARP_UNIT 40000 /* ns */
#define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
#define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
-#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
+#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680us space) */
+#define SHARP_BIT_0_SPACE (17 * SHARP_UNIT) /* 680us space */
+#define SHARP_BIT_1_SPACE (42 * SHARP_UNIT) /* 1680us space */
#define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
#define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
.header_pulse = 0,
.header_space = 0,
.bit_pulse = SHARP_BIT_PULSE,
- .bit_space[0] = SHARP_BIT_0_PERIOD,
- .bit_space[1] = SHARP_BIT_1_PERIOD,
+ .bit_space[0] = SHARP_BIT_0_SPACE,
+ .bit_space[1] = SHARP_BIT_1_SPACE,
.trailer_pulse = SHARP_BIT_PULSE,
.trailer_space = SHARP_ECHO_SPACE,
.msb_first = 1,
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index f078f8a3aec8..7de97c26b622 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -292,7 +292,11 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
if (ret < 0)
goto out_kfree_raw;
- count = ret;
+ /* drop trailing space */
+ if (!(ret % 2))
+ count = ret - 1;
+ else
+ count = ret;
txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
if (!txbuf) {
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index b7b5b33b11f4..4df06d40503f 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -499,7 +499,7 @@ struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(fc0011_attach);
+EXPORT_SYMBOL_GPL(fc0011_attach);
MODULE_DESCRIPTION("Fitipower FC0011 silicon tuner driver");
MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index 4429d5e8c579..81e65acbdb17 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -495,7 +495,7 @@ err:
return fe;
}
-EXPORT_SYMBOL(fc0012_attach);
+EXPORT_SYMBOL_GPL(fc0012_attach);
MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver");
MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index 29dd9b55ff33..1006a2798eef 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -608,7 +608,7 @@ struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(fc0013_attach);
+EXPORT_SYMBOL_GPL(fc0013_attach);
MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver");
MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index 1c746bed51fe..1575ab94e1c8 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -410,7 +410,7 @@ struct dvb_frontend *max2165_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(max2165_attach);
+EXPORT_SYMBOL_GPL(max2165_attach);
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
MODULE_DESCRIPTION("Maxim MAX2165 silicon tuner driver");
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index 0c9161516abd..ed8bdf7ebd99 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -356,7 +356,7 @@ error:
kfree(priv);
return NULL;
}
-EXPORT_SYMBOL(mc44s803_attach);
+EXPORT_SYMBOL_GPL(mc44s803_attach);
MODULE_AUTHOR("Jochen Friedrich");
MODULE_DESCRIPTION("Freescale MC44S803 silicon tuner driver");
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 0e7ac2b49990..b59c5ba2ee58 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -440,7 +440,7 @@ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(mt2060_attach);
+EXPORT_SYMBOL_GPL(mt2060_attach);
static int mt2060_probe(struct i2c_client *client,
const struct i2c_device_id *id)
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index 37f50ff6c0bd..eebc06088341 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -274,7 +274,7 @@ struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe,
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(mt2131_attach);
+EXPORT_SYMBOL_GPL(mt2131_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver");
diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
index 6136f20fa9b7..2e92885a6bcb 100644
--- a/drivers/media/tuners/mt2266.c
+++ b/drivers/media/tuners/mt2266.c
@@ -336,7 +336,7 @@ struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter
mt2266_calibrate(priv);
return fe;
}
-EXPORT_SYMBOL(mt2266_attach);
+EXPORT_SYMBOL_GPL(mt2266_attach);
MODULE_AUTHOR("Olivier DANET");
MODULE_DESCRIPTION("Microtune MT2266 silicon tuner driver");
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 1c07e2225fb3..cae6ded10b12 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -4114,7 +4114,7 @@ struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
fe->tuner_priv = state;
return fe;
}
-EXPORT_SYMBOL(mxl5005s_attach);
+EXPORT_SYMBOL_GPL(mxl5005s_attach);
MODULE_DESCRIPTION("MaxLinear MXL5005S silicon tuner driver");
MODULE_AUTHOR("Steven Toth");
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index 85bbdd4ecdbb..f7516cb52436 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -215,7 +215,7 @@ static int qt1010_set_params(struct dvb_frontend *fe)
static int qt1010_init_meas1(struct qt1010_priv *priv,
u8 oper, u8 reg, u8 reg_init_val, u8 *retval)
{
- u8 i, val1, uninitialized_var(val2);
+ u8 i, val1, val2;
int err;
qt1010_i2c_oper_t i2c_data[] = {
@@ -250,7 +250,7 @@ static int qt1010_init_meas1(struct qt1010_priv *priv,
static int qt1010_init_meas2(struct qt1010_priv *priv,
u8 reg_init_val, u8 *retval)
{
- u8 i, uninitialized_var(val);
+ u8 i, val;
int err;
qt1010_i2c_oper_t i2c_data[] = {
{ QT1010_WR, 0x07, reg_init_val },
@@ -342,11 +342,12 @@ static int qt1010_init(struct dvb_frontend *fe)
else
valptr = &tmpval;
- BUG_ON(i >= ARRAY_SIZE(i2c_data) - 1);
-
- err = qt1010_init_meas1(priv, i2c_data[i+1].reg,
- i2c_data[i].reg,
- i2c_data[i].val, valptr);
+ if (i >= ARRAY_SIZE(i2c_data) - 1)
+ err = -EIO;
+ else
+ err = qt1010_init_meas1(priv, i2c_data[i + 1].reg,
+ i2c_data[i].reg,
+ i2c_data[i].val, valptr);
i++;
break;
}
@@ -437,7 +438,7 @@ struct dvb_frontend * qt1010_attach(struct dvb_frontend *fe,
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(qt1010_attach);
+EXPORT_SYMBOL_GPL(qt1010_attach);
MODULE_DESCRIPTION("Quantek QT1010 silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 4ed94646116f..7d8d84dcb245 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -336,7 +336,7 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(tda18218_attach);
+EXPORT_SYMBOL_GPL(tda18218_attach);
MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index d9606738ce43..ef9af052007c 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1744,7 +1744,7 @@ fail2:
xc4000_release(fe);
return NULL;
}
-EXPORT_SYMBOL(xc4000_attach);
+EXPORT_SYMBOL_GPL(xc4000_attach);
MODULE_AUTHOR("Steven Toth, Davide Ferri");
MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver");
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 734a92caad8d..65b886338557 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1460,7 +1460,7 @@ fail:
xc5000_release(fe);
return NULL;
}
-EXPORT_SYMBOL(xc5000_attach);
+EXPORT_SYMBOL_GPL(xc5000_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Xceive xc5000 silicon tuner driver");
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 982cb56e97e9..0f11f50c0ae4 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -1028,6 +1028,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
if (!dev->video_mode.isoc_ctl.urb) {
dev_err(dev->dev,
"cannot alloc memory for usb buffers\n");
+ kfree(dma_q->p_left_data);
return -ENOMEM;
}
@@ -1037,6 +1038,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
dev_err(dev->dev,
"cannot allocate memory for usbtransfer\n");
kfree(dev->video_mode.isoc_ctl.urb);
+ kfree(dma_q->p_left_data);
return -ENOMEM;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 3afd18733614..c5f6af5debaf 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -269,6 +269,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct state *state = d_to_priv(d);
int ret;
+ u32 reg;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
@@ -321,8 +322,12 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
+ if (msg[0].len < 3 || msg[1].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
/* demod access via firmware interface */
- u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
if (msg[0].addr == state->af9033_i2c_addr[1])
@@ -380,17 +385,18 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
+ if (msg[0].len < 3) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
/* demod access via firmware interface */
- u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
- ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
- &msg[0].buf[3],
- msg[0].len - 3)
- : -EOPNOTSUPP;
+ ret = af9035_wr_regs(d, reg, &msg[0].buf[3], msg[0].len - 3);
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
@@ -457,6 +463,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
}
+unlock:
mutex_unlock(&d->i2c_mutex);
if (ret < 0)
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index fb6d99dea31a..08fdb9e5e3a2 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -202,7 +202,7 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
while (i < num) {
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
- if (msg[i].len > 2 || msg[i+1].len > 60) {
+ if (msg[i].len != 2 || msg[i + 1].len > 60) {
ret = -EOPNOTSUPP;
break;
}
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 62ee09f28a0b..6cbfe75791c2 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -202,7 +202,8 @@ static int az6007_rc_query(struct dvb_usb_device *d)
unsigned code;
enum rc_proto proto;
- az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10);
+ if (az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10) < 0)
+ return -EIO;
if (st->data[1] == 0x44)
return 0;
@@ -787,6 +788,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (az6007_xfer_debug)
printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n",
addr, msgs[i].len);
+ if (msgs[i].len < 1) {
+ ret = -EIO;
+ goto err;
+ }
req = AZ6007_I2C_WR;
index = msgs[i].buf[0];
value = addr | (1 << 8);
@@ -801,6 +806,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (az6007_xfer_debug)
printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n",
addr, msgs[i].len);
+ if (msgs[i].len < 1) {
+ ret = -EIO;
+ goto err;
+ }
req = AZ6007_I2C_RD;
index = msgs[i].buf[0];
value = addr;
diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.c b/drivers/media/usb/dvb-usb-v2/ce6230.c
index 44540de1a206..d3b5cb4a24da 100644
--- a/drivers/media/usb/dvb-usb-v2/ce6230.c
+++ b/drivers/media/usb/dvb-usb-v2/ce6230.c
@@ -101,6 +101,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
if (msg[i].addr ==
ce6230_zl10353_config.demod_address) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = DEMOD_READ;
req.value = msg[i].addr >> 1;
req.index = msg[i].buf[0];
@@ -117,6 +121,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
} else {
if (msg[i].addr ==
ce6230_zl10353_config.demod_address) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = DEMOD_WRITE;
req.value = msg[i].addr >> 1;
req.index = msg[i].buf[0];
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index e30305876840..1191e32407c7 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -115,6 +115,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
while (i < num) {
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
if (msg[i].addr == ec168_ec100_config.demod_address) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = READ_DEMOD;
req.value = 0;
req.index = 0xff00 + msg[i].buf[0]; /* reg */
@@ -131,6 +135,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
} else {
if (msg[i].addr == ec168_ec100_config.demod_address) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = WRITE_DEMOD;
req.value = msg[i].buf[1]; /* val */
req.index = 0xff00 + msg[i].buf[0]; /* reg */
@@ -139,6 +147,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = ec168_ctrl_msg(d, &req);
i += 1;
} else {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = WRITE_I2C;
req.value = msg[i].buf[0]; /* val */
req.index = 0x0100 + msg[i].addr; /* I2C addr */
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 0fe71437601e..ec9bbd8c89ad 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -176,6 +176,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = -EOPNOTSUPP;
goto err_mutex_unlock;
} else if (msg[0].addr == 0x10) {
+ if (msg[0].len < 1 || msg[1].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* method 1 - integrated demod */
if (msg[0].buf[0] == 0x00) {
/* return demod page from driver cache */
@@ -189,6 +193,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = rtl28xxu_ctrl_msg(d, &req);
}
} else if (msg[0].len < 2) {
+ if (msg[0].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* method 2 - old I2C */
req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
req.index = CMD_I2C_RD;
@@ -217,8 +225,16 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = -EOPNOTSUPP;
goto err_mutex_unlock;
} else if (msg[0].addr == 0x10) {
+ if (msg[0].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* method 1 - integrated demod */
if (msg[0].buf[0] == 0x00) {
+ if (msg[0].len < 2) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* save demod page for later demod access */
dev->page = msg[0].buf[1];
ret = 0;
@@ -231,6 +247,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = rtl28xxu_ctrl_msg(d, &req);
}
} else if ((msg[0].len < 23) && (!dev->new_i2c_write)) {
+ if (msg[0].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* method 2 - old I2C */
req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
req.index = CMD_I2C_WR;
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 89b4b5d84cdf..827f9db16aa1 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -422,6 +422,10 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (ret == 0)
ret = 2;
} else {
+ if (msg[0].len < 2) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
/* write one or more registers */
reg = msg[0].buf[0];
addr = msg[0].addr;
@@ -431,6 +435,7 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = 1;
}
+unlock:
mutex_unlock(&d->i2c_mutex);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 5aa9c501ed9c..2b56393d1000 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
if (msg[i].addr == 0x99) {
req = 0xBE;
index = 0;
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
value = msg[i].buf[0] & 0x00ff;
length = 1;
az6027_usb_out_op(d, req, value, index, data, length);
@@ -984,6 +988,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
/* write/read request */
if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) {
req = 0xB9;
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
value = msg[i].addr + (msg[i].len << 8);
length = msg[i + 1].len + 6;
@@ -997,6 +1005,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
/* demod 16bit addr */
req = 0xBD;
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
value = msg[i].addr + (2 << 8);
length = msg[i].len - 2;
@@ -1022,6 +1034,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
} else {
req = 0xBD;
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
index = msg[i].buf[0] & 0x00FF;
value = msg[i].addr + (1 << 8);
length = msg[i].len - 1;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 99a39339d45d..cf5ec8c1f677 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -63,6 +63,10 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
warn("more than 2 i2c messages at a time is not handled yet. TODO.");
for (i = 0; i < num; i++) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
/* write/read request */
if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
if (digitv_ctrl_msg(d, USB_READ_COFDM, msg[i].buf[0], NULL, 0,
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index e7720ff11d3d..cb5bf119df9f 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -81,7 +81,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
ret = dvb_usb_adapter_stream_init(adap);
if (ret)
- return ret;
+ goto stream_init_err;
ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
if (ret)
@@ -114,6 +114,8 @@ frontend_init_err:
dvb_usb_adapter_dvb_exit(adap);
dvb_init_err:
dvb_usb_adapter_stream_exit(adap);
+stream_init_err:
+ kfree(adap->priv);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 8493ebb377c4..924a6478007a 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -128,6 +128,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
switch (num) {
case 2:
+ if (msg[0].len < 1) {
+ num = -EOPNOTSUPP;
+ break;
+ }
/* read stv0299 register */
value = msg[0].buf[0];/* register */
for (i = 0; i < msg[1].len; i++) {
@@ -139,6 +143,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
case 1:
switch (msg[0].addr) {
case 0x68:
+ if (msg[0].len < 2) {
+ num = -EOPNOTSUPP;
+ break;
+ }
/* write to stv0299 register */
buf6[0] = 0x2a;
buf6[1] = msg[0].buf[0];
@@ -148,6 +156,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
break;
case 0x60:
if (msg[0].flags == 0) {
+ if (msg[0].len < 4) {
+ num = -EOPNOTSUPP;
+ break;
+ }
/* write to tuner pll */
buf6[0] = 0x2c;
buf6[1] = 5;
@@ -159,6 +171,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 7, DW210X_WRITE_MSG);
} else {
+ if (msg[0].len < 1) {
+ num = -EOPNOTSUPP;
+ break;
+ }
/* read from tuner */
dw210x_op_rw(d->udev, 0xb5, 0, 0,
buf6, 1, DW210X_READ_MSG);
@@ -166,12 +182,20 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
break;
case (DW2102_RC_QUERY):
+ if (msg[0].len < 2) {
+ num = -EOPNOTSUPP;
+ break;
+ }
dw210x_op_rw(d->udev, 0xb8, 0, 0,
buf6, 2, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
msg[0].buf[1] = buf6[1];
break;
case (DW2102_VOLTAGE_CTRL):
+ if (msg[0].len < 1) {
+ num = -EOPNOTSUPP;
+ break;
+ }
buf6[0] = 0x30;
buf6[1] = msg[0].buf[0];
dw210x_op_rw(d->udev, 0xb2, 0, 0,
@@ -946,7 +970,7 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
for (i = 0; i < 6; i++) {
obuf[1] = 0xf0 + i;
if (i2c_transfer(&d->i2c_adap, msg, 2) != 2)
- break;
+ return -1;
else
mac[i] = ibuf[0];
}
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index 7282f6022655..8b19ae67f187 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -277,7 +277,6 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
char *read = kmalloc(1, GFP_KERNEL);
if (!read) {
ret = -ENOMEM;
- kfree(read);
goto unlock;
}
@@ -288,8 +287,10 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
if ((ret = m920x_read(d->udev, M9206_I2C, 0x0,
0x20 | stop,
- read, 1)) != 0)
+ read, 1)) != 0) {
+ kfree(read);
goto unlock;
+ }
msg[i].buf[j] = read[0];
}
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 5ae13ee9272d..252e463cc13b 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -3989,6 +3989,10 @@ static int em28xx_usb_probe(struct usb_interface *intf,
* topology will likely change after the load of the em28xx subdrivers.
*/
#ifdef CONFIG_MEDIA_CONTROLLER
+ /*
+ * No need to check the return value, the device will still be
+ * usable without media controller API.
+ */
retval = media_device_register(dev->media_dev);
#endif
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index b9302d77d6c8..8ee0d5ea8415 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -80,7 +80,7 @@ static int go7007_load_encoder(struct go7007 *go)
const struct firmware *fw_entry;
char fw_name[] = "go7007/go7007fw.bin";
void *bounce;
- int fw_len, rv = 0;
+ int fw_len;
u16 intr_val, intr_data;
if (go->boot_fw == NULL) {
@@ -109,9 +109,11 @@ static int go7007_load_encoder(struct go7007 *go)
go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
(intr_val & ~0x1) != 0x5a5a) {
v4l2_err(go, "error transferring firmware\n");
- rv = -1;
+ kfree(go->boot_fw);
+ go->boot_fw = NULL;
+ return -1;
}
- return rv;
+ return 0;
}
MODULE_FIRMWARE("go7007/go7007fw.bin");
diff --git a/drivers/media/usb/go7007/go7007-i2c.c b/drivers/media/usb/go7007/go7007-i2c.c
index 38339dd2f83f..2880370e45c8 100644
--- a/drivers/media/usb/go7007/go7007-i2c.c
+++ b/drivers/media/usb/go7007/go7007-i2c.c
@@ -165,8 +165,6 @@ static int go7007_i2c_master_xfer(struct i2c_adapter *adapter,
} else if (msgs[i].len == 3) {
if (msgs[i].flags & I2C_M_RD)
return -EIO;
- if (msgs[i].len != 3)
- return -EIO;
if (go7007_i2c_xfer(go, msgs[i].addr, 0,
(msgs[i].buf[0] << 8) | msgs[i].buf[1],
0x01, &msgs[i].buf[2]) < 0)
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index f889c9d740cd..1cfe8371511d 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1198,7 +1198,9 @@ static int go7007_usb_probe(struct usb_interface *intf,
u16 channel;
/* read channel number from GPIO[1:0] */
- go7007_read_addr(go, 0x3c81, &channel);
+ if (go7007_read_addr(go, 0x3c81, &channel))
+ goto allocfail;
+
channel &= 0x3;
go->board_id = GO7007_BOARDID_ADLINK_MPG24;
usb->board = board = &board_adlink_mpg24;
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index d93d384286c1..de945e13c7c6 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -18,6 +18,7 @@
#include <linux/input.h>
#include <linux/sched/signal.h>
+#include <linux/bitops.h>
#include "gspca.h"
@@ -1027,6 +1028,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
sd->params.exposure.expMode = 2;
sd->exposure_status = EXPOSURE_NORMAL;
}
+ if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
+ return -EINVAL;
currentexp = currentexp << sd->params.exposure.gain;
sd->params.exposure.gain = 0;
/* round down current exposure to nearest value */
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
index 179b2ec3df57..d98343fd33fe 100644
--- a/drivers/media/usb/gspca/vicam.c
+++ b/drivers/media/usb/gspca/vicam.c
@@ -225,7 +225,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
{
int ret;
const struct ihex_binrec *rec;
- const struct firmware *uninitialized_var(fw);
+ const struct firmware *fw;
u8 *firmware_buf;
ret = request_ihex_firmware(&fw, VICAM_FIRMWARE,
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 7849f1fbbcc4..4f1505b94338 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -409,7 +409,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
struct hdpvr_device *dev = video_drvdata(file);
struct hdpvr_buffer *buf = NULL;
struct urb *urb;
- unsigned int ret = 0;
+ int ret = 0;
int rem, cnt;
if (*pos)
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index 14170a5d72b3..73c95ba2328a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -90,8 +90,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp)
}
-static void pvr2_context_notify(struct pvr2_context *mp)
+static void pvr2_context_notify(void *ptr)
{
+ struct pvr2_context *mp = ptr;
+
pvr2_context_set_notify(mp,!0);
}
@@ -106,9 +108,7 @@ static void pvr2_context_check(struct pvr2_context *mp)
pvr2_trace(PVR2_TRACE_CTXT,
"pvr2_context %p (initialize)", mp);
/* Finish hardware initialization */
- if (pvr2_hdw_initialize(mp->hdw,
- (void (*)(void *))pvr2_context_notify,
- mp)) {
+ if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) {
mp->video_stream.stream =
pvr2_hdw_get_video_stream(mp->hdw);
/* Trigger interface initialization. By doing this
@@ -267,8 +267,9 @@ static void pvr2_context_exit(struct pvr2_context *mp)
void pvr2_context_disconnect(struct pvr2_context *mp)
{
pvr2_hdw_disconnect(mp->hdw);
+ if (!pvr2_context_shutok())
+ pvr2_context_notify(mp);
mp->disconnect_flag = !0;
- pvr2_context_notify(mp);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
index 6954584526a3..1b768e746672 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
@@ -88,8 +88,10 @@ static int pvr2_dvb_feed_thread(void *data)
return stat;
}
-static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap)
+static void pvr2_dvb_notify(void *ptr)
{
+ struct pvr2_dvb_adapter *adap = ptr;
+
wake_up(&adap->buffer_wait_data);
}
@@ -149,7 +151,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
}
pvr2_stream_set_callback(pvr->video_stream.stream,
- (pvr2_stream_callback) pvr2_dvb_notify, adap);
+ pvr2_dvb_notify, adap);
ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT);
if (ret < 0) return ret;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 11e7fcfc3f19..d101fa8d61bb 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2611,6 +2611,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
del_timer_sync(&hdw->encoder_run_timer);
del_timer_sync(&hdw->encoder_wait_timer);
flush_work(&hdw->workpoll);
+ v4l2_device_unregister(&hdw->v4l2_dev);
usb_free_urb(hdw->ctl_read_urb);
usb_free_urb(hdw->ctl_write_urb);
kfree(hdw->ctl_read_buffer);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index eaa08c7999d4..f8da501689c3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1037,8 +1037,10 @@ static int pvr2_v4l2_open(struct file *file)
}
-static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
+static void pvr2_v4l2_notify(void *ptr)
{
+ struct pvr2_v4l2_fh *fhp = ptr;
+
wake_up(&fhp->wait_data);
}
@@ -1071,7 +1073,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
hdw = fh->channel.mc_head->hdw;
sp = fh->pdi->stream->stream;
- pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
+ pvr2_stream_set_callback(sp, pvr2_v4l2_notify, fh);
pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
return pvr2_ioread_set_enabled(fh->rhp,!0);
@@ -1202,11 +1204,6 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
dip->minor_type = pvr2_v4l_type_video;
nr_ptr = video_nr;
caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
- if (!dip->stream) {
- pr_err(KBUILD_MODNAME
- ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
- return;
- }
break;
case VFL_TYPE_VBI:
dip->config = pvr2_config_vbi;
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 9ba3a2ae36e5..598ad05f5bea 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -179,6 +179,8 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev)
for (i = 0; i < MAX_URBS; i++) {
usb_kill_urb(&dev->surbs[i].urb);
+ if (dev->surbs[i].wq.func)
+ cancel_work_sync(&dev->surbs[i].wq);
if (dev->surbs[i].cb) {
smscore_putbuffer(dev->coredev, dev->surbs[i].cb);
@@ -453,12 +455,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
rc = smscore_register_device(&params, &dev->coredev, 0, mdev);
if (rc < 0) {
pr_err("smscore_register_device(...) failed, rc %d\n", rc);
- smsusb_term_device(intf);
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- media_device_unregister(mdev);
-#endif
- kfree(mdev);
- return rc;
+ goto err_unregister_device;
}
smscore_set_board_id(dev->coredev, board_id);
@@ -475,8 +472,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
rc = smsusb_start_streaming(dev);
if (rc < 0) {
pr_err("smsusb_start_streaming(...) failed\n");
- smsusb_term_device(intf);
- return rc;
+ goto err_unregister_device;
}
dev->state = SMSUSB_ACTIVE;
@@ -484,13 +480,20 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
rc = smscore_start_device(dev->coredev);
if (rc < 0) {
pr_err("smscore_start_device(...) failed\n");
- smsusb_term_device(intf);
- return rc;
+ goto err_unregister_device;
}
pr_debug("device 0x%p created\n", dev);
return rc;
+
+err_unregister_device:
+ smsusb_term_device(intf);
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ media_device_unregister(mdev);
+#endif
+ kfree(mdev);
+ return rc;
}
static int smsusb_probe(struct usb_interface *intf,
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 202b084f65a2..4cf540d1b250 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -107,8 +107,7 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
/*
* TODO: These stk1160_dbg are very spammy!
- * We should 1) check why we are getting them
- * and 2) add ratelimit.
+ * We should check why we are getting them.
*
* UPDATE: One of the reasons (the only one?) for getting these
* is incorrect standard (mismatch between expected and configured).
@@ -151,7 +150,7 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
/* Let the bug hunt begin! sanity checks! */
if (lencopy < 0) {
- stk1160_dbg("copy skipped: negative lencopy\n");
+ printk_ratelimited(KERN_DEBUG "copy skipped: negative lencopy\n");
return;
}
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 3198f9624b7c..46bb0ccaafc1 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -1551,8 +1551,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec)
dvb_dmx_release(&dec->demux);
if (dec->fe) {
dvb_unregister_frontend(dec->fe);
- if (dec->fe->ops.release)
- dec->fe->ops.release(dec->fe);
+ dvb_frontend_detach(dec->fe);
}
dvb_unregister_adapter(&dec->adapter);
}
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 36abe47997b0..e0d894c61f4b 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -6,6 +6,7 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <asm/barrier.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -1275,17 +1276,12 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
}
-static void uvc_ctrl_status_event_work(struct work_struct *work)
+void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data)
{
- struct uvc_device *dev = container_of(work, struct uvc_device,
- async_ctrl.work);
- struct uvc_ctrl_work *w = &dev->async_ctrl;
- struct uvc_video_chain *chain = w->chain;
struct uvc_control_mapping *mapping;
- struct uvc_control *ctrl = w->ctrl;
struct uvc_fh *handle;
unsigned int i;
- int ret;
mutex_lock(&chain->ctrl_mutex);
@@ -1293,7 +1289,7 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
ctrl->handle = NULL;
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
- s32 value = __uvc_ctrl_get_value(mapping, w->data);
+ s32 value = __uvc_ctrl_get_value(mapping, data);
/*
* handle may be NULL here if the device sends auto-update
@@ -1312,6 +1308,20 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
}
mutex_unlock(&chain->ctrl_mutex);
+}
+
+static void uvc_ctrl_status_event_work(struct work_struct *work)
+{
+ struct uvc_device *dev = container_of(work, struct uvc_device,
+ async_ctrl.work);
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+ int ret;
+
+ uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+
+ /* The barrier is needed to synchronize with uvc_status_stop(). */
+ if (smp_load_acquire(&dev->flush_status))
+ return;
/* Resubmit the URB. */
w->urb->interval = dev->int_ep->desc.bInterval;
@@ -1321,8 +1331,8 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
ret);
}
-bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain,
- struct uvc_control *ctrl, const u8 *data)
+bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data)
{
struct uvc_device *dev = chain->dev;
struct uvc_ctrl_work *w = &dev->async_ctrl;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 40ca1d4e0348..0caa57a6782a 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1060,10 +1060,8 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ n;
memcpy(unit->extension.bmControls, &buffer[23+p], 2*n);
- if (buffer[24+p+2*n] != 0)
- usb_string(udev, buffer[24+p+2*n], unit->name,
- sizeof(unit->name));
- else
+ if (buffer[24+p+2*n] == 0 ||
+ usb_string(udev, buffer[24+p+2*n], unit->name, sizeof(unit->name)) < 0)
sprintf(unit->name, "Extension %u", buffer[3]);
list_add_tail(&unit->list, &dev->entities);
@@ -1188,15 +1186,15 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(term->media.bmTransportModes, &buffer[10+n], p);
}
- if (buffer[7] != 0)
- usb_string(udev, buffer[7], term->name,
- sizeof(term->name));
- else if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
- sprintf(term->name, "Camera %u", buffer[3]);
- else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
- sprintf(term->name, "Media %u", buffer[3]);
- else
- sprintf(term->name, "Input %u", buffer[3]);
+ if (buffer[7] == 0 ||
+ usb_string(udev, buffer[7], term->name, sizeof(term->name)) < 0) {
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
+ sprintf(term->name, "Camera %u", buffer[3]);
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
+ sprintf(term->name, "Media %u", buffer[3]);
+ else
+ sprintf(term->name, "Input %u", buffer[3]);
+ }
list_add_tail(&term->list, &dev->entities);
break;
@@ -1228,10 +1226,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(term->baSourceID, &buffer[7], 1);
- if (buffer[8] != 0)
- usb_string(udev, buffer[8], term->name,
- sizeof(term->name));
- else
+ if (buffer[8] == 0 ||
+ usb_string(udev, buffer[8], term->name, sizeof(term->name)) < 0)
sprintf(term->name, "Output %u", buffer[3]);
list_add_tail(&term->list, &dev->entities);
@@ -1253,10 +1249,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(unit->baSourceID, &buffer[5], p);
- if (buffer[5+p] != 0)
- usb_string(udev, buffer[5+p], unit->name,
- sizeof(unit->name));
- else
+ if (buffer[5+p] == 0 ||
+ usb_string(udev, buffer[5+p], unit->name, sizeof(unit->name)) < 0)
sprintf(unit->name, "Selector %u", buffer[3]);
list_add_tail(&unit->list, &dev->entities);
@@ -1286,10 +1280,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
if (dev->uvc_version >= 0x0110)
unit->processing.bmVideoStandards = buffer[9+n];
- if (buffer[8+n] != 0)
- usb_string(udev, buffer[8+n], unit->name,
- sizeof(unit->name));
- else
+ if (buffer[8+n] == 0 ||
+ usb_string(udev, buffer[8+n], unit->name, sizeof(unit->name)) < 0)
sprintf(unit->name, "Processing %u", buffer[3]);
list_add_tail(&unit->list, &dev->entities);
@@ -1317,10 +1309,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
unit->extension.bmControls = (u8 *)unit + sizeof(*unit);
memcpy(unit->extension.bmControls, &buffer[23+p], n);
- if (buffer[23+p+n] != 0)
- usb_string(udev, buffer[23+p+n], unit->name,
- sizeof(unit->name));
- else
+ if (buffer[23+p+n] == 0 ||
+ usb_string(udev, buffer[23+p+n], unit->name, sizeof(unit->name)) < 0)
sprintf(unit->name, "Extension %u", buffer[3]);
list_add_tail(&unit->list, &dev->entities);
@@ -2472,6 +2462,24 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ /* Logitech, Webcam C910 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x0821,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
+ /* Logitech, Webcam B910 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x0823,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
/* Logitech Quickcam Fusion */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index ca3a9c2eec27..7c9895377118 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -37,7 +37,7 @@ static int uvc_mc_create_links(struct uvc_video_chain *chain,
continue;
remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
- if (remote == NULL)
+ if (remote == NULL || remote->num_pads == 0)
return -EINVAL;
source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index da72577c2998..caa1c48517ae 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -185,6 +185,18 @@ static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
static void uvc_stop_streaming(struct vb2_queue *vq)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
+
+ /* Prevent new buffers coming in. */
+ spin_lock_irq(&queue->irqlock);
+ queue->flags |= UVC_QUEUE_STOPPING;
+ spin_unlock_irq(&queue->irqlock);
+
+ /*
+ * All pending work should be completed before disabling the stream, as
+ * all URBs will be free'd during uvc_video_enable(s, 0).
+ */
+ flush_workqueue(stream->async_wq);
lockdep_assert_irqs_enabled();
@@ -193,6 +205,7 @@ static void uvc_stop_streaming(struct vb2_queue *vq)
spin_lock_irq(&queue->irqlock);
uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
+ queue->flags &= ~UVC_QUEUE_STOPPING;
spin_unlock_irq(&queue->irqlock);
}
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 2bdb0ff203f8..73725051cc16 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -6,6 +6,7 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <asm/barrier.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -179,7 +180,8 @@ static bool uvc_event_control(struct urb *urb,
switch (status->bAttribute) {
case UVC_CTRL_VALUE_CHANGE:
- return uvc_ctrl_status_event(urb, chain, ctrl, status->bValue);
+ return uvc_ctrl_status_event_async(urb, chain, ctrl,
+ status->bValue);
case UVC_CTRL_INFO_CHANGE:
case UVC_CTRL_FAILURE_CHANGE:
@@ -309,5 +311,41 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags)
void uvc_status_stop(struct uvc_device *dev)
{
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+
+ /*
+ * Prevent the asynchronous control handler from requeing the URB. The
+ * barrier is needed so the flush_status change is visible to other
+ * CPUs running the asynchronous handler before usb_kill_urb() is
+ * called below.
+ */
+ smp_store_release(&dev->flush_status, true);
+
+ /*
+ * Cancel any pending asynchronous work. If any status event was queued,
+ * process it synchronously.
+ */
+ if (cancel_work_sync(&w->work))
+ uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+
+ /* Kill the urb. */
usb_kill_urb(dev->int_urb);
+
+ /*
+ * The URB completion handler may have queued asynchronous work. This
+ * won't resubmit the URB as flush_status is set, but it needs to be
+ * cancelled before returning or it could then race with a future
+ * uvc_status_start() call.
+ */
+ if (cancel_work_sync(&w->work))
+ uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+
+ /*
+ * From this point, there are no events on the queue and the status URB
+ * is dead. No events will be queued until uvc_status_start() is called.
+ * The barrier is needed to make sure that flush_status is visible to
+ * uvc_ctrl_status_event_work() when uvc_status_start() will be called
+ * again.
+ */
+ smp_store_release(&dev->flush_status, false);
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index fe58723fc5ac..9d6feb95e5c6 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -797,9 +797,9 @@ static void uvc_video_stats_decode(struct uvc_streaming *stream,
unsigned int header_size;
bool has_pts = false;
bool has_scr = false;
- u16 uninitialized_var(scr_sof);
- u32 uninitialized_var(scr_stc);
- u32 uninitialized_var(pts);
+ u16 scr_sof;
+ u32 scr_stc;
+ u32 pts;
if (stream->stats.stream.nb_frames == 0 &&
stream->stats.frame.nb_packets == 0)
@@ -1129,6 +1129,20 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return data[0];
}
+static void uvc_video_copy_packets(struct uvc_urb *uvc_urb)
+{
+ unsigned int i;
+
+ for (i = 0; i < uvc_urb->async_operations; i++) {
+ struct uvc_copy_op *op = &uvc_urb->copy_operations[i];
+
+ memcpy(op->dst, op->src, op->len);
+
+ /* Release reference taken on this buffer. */
+ uvc_queue_buffer_release(op->buf);
+ }
+}
+
/*
* uvc_video_decode_data_work: Asynchronous memcpy processing
*
@@ -1138,22 +1152,26 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
static void uvc_video_copy_data_work(struct work_struct *work)
{
struct uvc_urb *uvc_urb = container_of(work, struct uvc_urb, work);
- unsigned int i;
+ struct uvc_streaming *stream = uvc_urb->stream;
+ struct uvc_video_queue *queue = &stream->queue;
int ret;
- for (i = 0; i < uvc_urb->async_operations; i++) {
- struct uvc_copy_op *op = &uvc_urb->copy_operations[i];
-
- memcpy(op->dst, op->src, op->len);
+ uvc_video_copy_packets(uvc_urb);
- /* Release reference taken on this buffer. */
- uvc_queue_buffer_release(op->buf);
+ /*
+ * Prevent resubmitting URBs when shutting down to ensure that no new
+ * work item will be scheduled after uvc_stop_streaming() flushes the
+ * work queue.
+ */
+ spin_lock_irq(&queue->irqlock);
+ if (!(queue->flags & UVC_QUEUE_STOPPING)) {
+ ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
+ if (ret < 0)
+ uvc_printk(KERN_ERR,
+ "Failed to resubmit video URB (%d).\n",
+ ret);
}
-
- ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL);
- if (ret < 0)
- uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
- ret);
+ spin_unlock_irq(&queue->irqlock);
}
static void uvc_video_decode_data(struct uvc_urb *uvc_urb,
@@ -1308,7 +1326,9 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
if (has_scr)
memcpy(stream->clock.last_scr, scr, 6);
- memcpy(&meta->length, mem, length);
+ meta->length = mem[0];
+ meta->flags = mem[1];
+ memcpy(meta->buf, &mem[2], length - 2);
meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof);
uvc_trace(UVC_TRACE_FRAME,
@@ -1556,6 +1576,10 @@ static void uvc_video_complete(struct urb *urb)
buf = uvc_queue_get_current_buffer(queue);
+ /*
+ * Process the URB headers, and optionally queue expensive memcpy tasks
+ * to be deferred to a work queue.
+ */
if (vb2_qmeta) {
spin_lock_irqsave(&qmeta->irqlock, flags);
if (!list_empty(&qmeta->irqqueue))
@@ -1573,7 +1597,7 @@ static void uvc_video_complete(struct urb *urb)
*/
stream->decode(uvc_urb, buf, buf_meta);
- /* If no async work is needed, resubmit the URB immediately. */
+ /* Without any queued work, we must submit the URB. */
if (!uvc_urb->async_operations) {
ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
if (ret < 0)
@@ -1583,7 +1607,21 @@ static void uvc_video_complete(struct urb *urb)
return;
}
- queue_work(stream->async_wq, &uvc_urb->work);
+ /*
+ * When the stream is stopped, all URBs are freed as part of the call to
+ * uvc_stop_streaming() and must not be handled asynchronously. In that
+ * event we can safely complete the packet work directly in this
+ * context, without resubmitting the URB.
+ */
+ spin_lock_irqsave(&queue->irqlock, flags);
+ if (!(queue->flags & UVC_QUEUE_STOPPING)) {
+ /* Handle any heavy lifting required */
+ INIT_WORK(&uvc_urb->work, uvc_video_copy_data_work);
+ queue_work(stream->async_wq, &uvc_urb->work);
+ } else {
+ uvc_video_copy_packets(uvc_urb);
+ }
+ spin_unlock_irqrestore(&queue->irqlock, flags);
}
/*
@@ -1860,7 +1898,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
struct usb_host_endpoint *best_ep = NULL;
unsigned int best_psize = UINT_MAX;
unsigned int bandwidth;
- unsigned int uninitialized_var(altsetting);
+ unsigned int altsetting;
int intfnum = stream->intfnum;
/* Isochronous endpoint, select the alternate setting. */
@@ -1903,6 +1941,17 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
uvc_trace(UVC_TRACE_VIDEO, "Selecting alternate setting %u "
"(%u B/frame bandwidth).\n", altsetting, best_psize);
+ /*
+ * Some devices, namely the Logitech C910 and B910, are unable
+ * to recover from a USB autosuspend, unless the alternate
+ * setting of the streaming interface is toggled.
+ */
+ if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) {
+ usb_set_interface(stream->dev->udev, intfnum,
+ altsetting);
+ usb_set_interface(stream->dev->udev, intfnum, 0);
+ }
+
ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
if (ret < 0)
return ret;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 5f137400bebd..977bcc9647fe 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -173,9 +173,9 @@
#define DRIVER_VERSION "1.1.1"
/* Number of isochronous URBs. */
-#define UVC_URBS 5
+#define UVC_URBS 50
/* Maximum number of packets per URB. */
-#define UVC_MAX_PACKETS 32
+#define UVC_MAX_PACKETS 48
/* Maximum status buffer size in bytes of interrupt URB. */
#define UVC_MAX_STATUS_SIZE 16
@@ -199,6 +199,7 @@
#define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
#define UVC_QUIRK_FORCE_Y8 0x00000800
#define UVC_QUIRK_FORCE_BPP 0x00001000
+#define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -421,6 +422,7 @@ struct uvc_buffer {
#define UVC_QUEUE_DISCONNECTED (1 << 0)
#define UVC_QUEUE_DROP_CORRUPTED (1 << 1)
+#define UVC_QUEUE_STOPPING (1 << 2)
struct uvc_video_queue {
struct vb2_queue queue;
@@ -663,6 +665,7 @@ struct uvc_device {
/* Status Interrupt Endpoint */
struct usb_host_endpoint *int_ep;
struct urb *int_urb;
+ bool flush_status;
u8 *status;
struct input_dev *input;
char input_phys[64];
@@ -832,7 +835,9 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
int uvc_ctrl_init_device(struct uvc_device *dev);
void uvc_ctrl_cleanup_device(struct uvc_device *dev);
int uvc_ctrl_restore_values(struct uvc_device *dev);
-bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain,
+bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data);
+void uvc_ctrl_status_event(struct uvc_video_chain *chain,
struct uvc_control *ctrl, const u8 *data);
int uvc_ctrl_begin(struct uvc_video_chain *chain);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 4037689a945a..ddc861868ce0 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -533,13 +533,23 @@ static int get_index(struct video_device *vdev)
*/
static void determine_valid_ioctls(struct video_device *vdev)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO &&
+ (vdev->device_caps & vid_caps);
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO &&
+ (vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -587,39 +597,31 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
if (is_vid || is_tch) {
- /* video and metadata specific ioctls */
+ /* video and touch specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_overlay ||
- ops->vidioc_enum_fmt_meta_cap)) ||
- (is_tx && (ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_meta_out)))
+ ops->vidioc_enum_fmt_vid_overlay)) ||
+ (is_tx && ops->vidioc_enum_fmt_vid_out))
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
- ops->vidioc_g_fmt_vid_overlay ||
- ops->vidioc_g_fmt_meta_cap)) ||
+ ops->vidioc_g_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
- ops->vidioc_g_fmt_vid_out_overlay ||
- ops->vidioc_g_fmt_meta_out)))
+ ops->vidioc_g_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
- ops->vidioc_s_fmt_vid_overlay ||
- ops->vidioc_s_fmt_meta_cap)) ||
+ ops->vidioc_s_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
- ops->vidioc_s_fmt_vid_out_overlay ||
- ops->vidioc_s_fmt_meta_out)))
+ ops->vidioc_s_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
- ops->vidioc_try_fmt_vid_overlay ||
- ops->vidioc_try_fmt_meta_cap)) ||
+ ops->vidioc_try_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
- ops->vidioc_try_fmt_vid_out_overlay ||
- ops->vidioc_try_fmt_meta_out)))
+ ops->vidioc_try_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
@@ -641,7 +643,21 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
- } else if (is_vbi) {
+ }
+ if (is_meta && is_rx) {
+ /* metadata capture specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap);
+ } else if (is_meta && is_tx) {
+ /* metadata output specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out);
+ }
+ if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
ops->vidioc_g_fmt_sliced_vbi_cap)) ||
@@ -681,8 +697,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
}
- if (is_vid || is_vbi || is_sdr || is_tch) {
- /* ioctls valid for video, metadata, vbi or sdr */
+ if (is_vid || is_vbi || is_sdr || is_tch || is_meta) {
+ /* ioctls valid for video, vbi, sdr, touch and metadata */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
@@ -694,8 +710,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
}
- if (is_vid || is_vbi || is_tch) {
- /* ioctls valid for video or vbi */
+ if (is_vid || is_vbi || is_tch || is_meta) {
+ /* ioctls valid for video, vbi, touch and metadata */
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
@@ -761,7 +777,7 @@ static int video_register_media_controller(struct video_device *vdev)
vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
intf_type = MEDIA_INTF_T_V4L_VIDEO;
vdev->entity.function = MEDIA_ENT_F_IO_V4L;
break;
@@ -869,7 +885,7 @@ int __video_register_device(struct video_device *vdev,
/* Part 1: check device type */
switch (type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
name_base = "video";
break;
case VFL_TYPE_VBI:
@@ -913,7 +929,7 @@ int __video_register_device(struct video_device *vdev,
* of 128-191 and just pick the first free minor there
* (new style). */
switch (type) {
- case VFL_TYPE_GRABBER:
+ case VFL_TYPE_VIDEO:
minor_offset = 0;
minor_cnt = 64;
break;
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 60454e1b727e..845ef4b2a4bf 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -145,6 +145,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
const struct v4l2_bt_timings *bt = &t->bt;
const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
u32 caps = cap->capabilities;
+ const u32 max_vert = 10240;
+ u32 max_hor = 3 * bt->width;
if (t->type != V4L2_DV_BT_656_1120)
return false;
@@ -161,6 +163,26 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
(bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
(!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
return false;
+
+ /* sanity checks for the blanking timings */
+ if (!bt->interlaced &&
+ (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
+ return false;
+ /*
+ * Some video receivers cannot properly separate the frontporch,
+ * backporch and sync values, and instead they only have the total
+ * blanking. That can be assigned to any of these three fields.
+ * So just check that none of these are way out of range.
+ */
+ if (bt->hfrontporch > max_hor ||
+ bt->hsync > max_hor || bt->hbackporch > max_hor)
+ return false;
+ if (bt->vfrontporch > max_vert ||
+ bt->vsync > max_vert || bt->vbackporch > max_vert)
+ return false;
+ if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
+ bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
+ return false;
return fnc == NULL || fnc(t, fnc_handle);
}
EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 48c3b9f72722..00d66495b47d 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -560,35 +560,38 @@ int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
-int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
+int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
struct v4l2_fwnode_link *link)
{
- const char *port_prop = is_of_node(__fwnode) ? "reg" : "port";
- struct fwnode_handle *fwnode;
+ struct fwnode_endpoint fwep;
memset(link, 0, sizeof(*link));
- fwnode = fwnode_get_parent(__fwnode);
- fwnode_property_read_u32(fwnode, port_prop, &link->local_port);
- fwnode = fwnode_get_next_parent(fwnode);
- if (is_of_node(fwnode) && of_node_name_eq(to_of_node(fwnode), "ports"))
- fwnode = fwnode_get_next_parent(fwnode);
- link->local_node = fwnode;
-
- fwnode = fwnode_graph_get_remote_endpoint(__fwnode);
- if (!fwnode) {
- fwnode_handle_put(fwnode);
+ fwnode_graph_parse_endpoint(fwnode, &fwep);
+ link->local_port = fwep.port;
+ link->local_node = fwnode_graph_get_port_parent(fwnode);
+ if (!link->local_node)
return -ENOLINK;
- }
- fwnode = fwnode_get_parent(fwnode);
- fwnode_property_read_u32(fwnode, port_prop, &link->remote_port);
- fwnode = fwnode_get_next_parent(fwnode);
- if (is_of_node(fwnode) && of_node_name_eq(to_of_node(fwnode), "ports"))
- fwnode = fwnode_get_next_parent(fwnode);
- link->remote_node = fwnode;
+ fwnode = fwnode_graph_get_remote_endpoint(fwnode);
+ if (!fwnode)
+ goto err_put_local_node;
+
+ fwnode_graph_parse_endpoint(fwnode, &fwep);
+ link->remote_port = fwep.port;
+ link->remote_node = fwnode_graph_get_port_parent(fwnode);
+ if (!link->remote_node)
+ goto err_put_remote_endpoint;
return 0;
+
+err_put_remote_endpoint:
+ fwnode_handle_put(fwnode);
+
+err_put_local_node:
+ fwnode_handle_put(link->local_node);
+
+ return -ENOLINK;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 3012e8ecffb9..6b96c072777c 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/version.h>
+#include <linux/v4l2-subdev.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
@@ -950,12 +951,22 @@ static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
static int check_fmt(struct file *file, enum v4l2_buf_type type)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO &&
+ (vfd->device_caps & vid_caps);
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO &&
+ (vfd->device_caps & meta_caps);
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -1014,11 +1025,11 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
return 0;
break;
case V4L2_BUF_TYPE_META_CAPTURE:
- if (is_vid && is_rx && ops->vidioc_g_fmt_meta_cap)
+ if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap)
return 0;
break;
case V4L2_BUF_TYPE_META_OUTPUT:
- if (is_vid && is_tx && ops->vidioc_g_fmt_meta_out)
+ if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out)
return 0;
break;
default:
@@ -1221,6 +1232,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break;
case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break;
case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XBGR30: descr = "32-bit XBGR 2-10-10-10"; break;
+ case V4L2_PIX_FMT_XBGR40: descr = "40-bit XBGR 4-12-12-12"; break;
+ case V4L2_PIX_FMT_BGR48: descr = "48-bit BGR 16-16-16"; break;
case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break;
case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break;
case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break;
@@ -1252,6 +1266,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break;
case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break;
case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break;
+ case V4L2_PIX_FMT_XVUY32: descr = "32-bit packed XVUY 8-8-8-8"; break;
+ case V4L2_PIX_FMT_AVUY32: descr = "32-bit packed AVUY 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUY24: descr = "24-bit packed VUY 8-8-8"; break;
case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break;
case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break;
case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break;
@@ -1264,18 +1281,23 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
case V4L2_PIX_FMT_HM12: descr = "YUV 4:2:0 (16x16 Macroblocks)"; break;
case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
+ case V4L2_PIX_FMT_XVUY10: descr = "XVUY 2-10-10-10"; break;
case V4L2_PIX_FMT_NV12: descr = "Y/CbCr 4:2:0"; break;
case V4L2_PIX_FMT_NV21: descr = "Y/CrCb 4:2:0"; break;
case V4L2_PIX_FMT_NV16: descr = "Y/CbCr 4:2:2"; break;
case V4L2_PIX_FMT_NV61: descr = "Y/CrCb 4:2:2"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/CbCr 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/CrCb 4:4:4"; break;
+ case V4L2_PIX_FMT_XV15: descr = "Y/CrCb 4:2:0 10-bit"; break;
+ case V4L2_PIX_FMT_XV20: descr = "Y/CrCb 4:2:2 10-bit"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/CbCr 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/CrCb 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV16M: descr = "Y/CbCr 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_NV61M: descr = "Y/CrCb 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_NV12MT: descr = "Y/CbCr 4:2:0 (64x32 MB, N-C)"; break;
case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/CbCr 4:2:0 (16x16 MB, N-C)"; break;
+ case V4L2_PIX_FMT_XV20M: descr = "Y/CrCb 4:2:2 10-bit (N-C)"; break;
+ case V4L2_PIX_FMT_XV15M: descr = "Y/CrCb 4:2:0 10-bit (N-C)"; break;
case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break;
@@ -3026,6 +3048,23 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
}
break;
}
+
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *route = parg;
+
+ if (route->num_routes > 0) {
+ if (route->num_routes > 256)
+ return -EINVAL;
+
+ *user_ptr = (void __user *)route->routes;
+ *kernel_ptr = (void *)&route->routes;
+ *array_size = sizeof(struct v4l2_plane)
+ * route->num_routes;
+ ret = 1;
+ }
+ break;
+ }
}
return ret;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 639dc8d45e60..b374c9fab4b9 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -460,19 +460,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
-int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
- struct v4l2_buffer *buf)
+static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
+ struct v4l2_buffer *buf)
{
- struct vb2_queue *vq;
- int ret = 0;
- unsigned int i;
-
- vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_querybuf(vq, buf);
-
/* Adjust MMAP memory offsets for the CAPTURE queue */
if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
+ unsigned int i;
+
for (i = 0; i < buf->length; ++i)
buf->m.planes[i].m.mem_offset
+= DST_QUEUE_OFF_BASE;
@@ -480,8 +475,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
buf->m.offset += DST_QUEUE_OFF_BASE;
}
}
+}
- return ret;
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_querybuf(vq, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
@@ -500,10 +510,16 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
return -EPERM;
}
ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
- if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
v4l2_m2m_try_schedule(m2m_ctx);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
@@ -511,9 +527,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
@@ -522,9 +546,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
+ ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
@@ -767,11 +799,17 @@ static int v4l2_m2m_register_entity(struct media_device *mdev,
entity->function = function;
ret = media_entity_pads_init(entity, num_pads, pads);
- if (ret)
+ if (ret) {
+ kfree(entity->name);
+ entity->name = NULL;
return ret;
+ }
ret = media_device_register_entity(mdev, entity);
- if (ret)
+ if (ret) {
+ kfree(entity->name);
+ entity->name = NULL;
return ret;
+ }
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index f725cd9b66b9..8a70fb3f9f01 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -582,6 +582,33 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_DV_TIMINGS:
return v4l2_subdev_call(sd, video, s_dv_timings, arg);
+ case VIDIOC_SUBDEV_G_ROUTING:
+ return v4l2_subdev_call(sd, pad, get_routing, arg);
+
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *route = arg;
+ unsigned int i;
+
+ if (route->num_routes > sd->entity.num_pads)
+ return -EINVAL;
+
+ for (i = 0; i < route->num_routes; ++i) {
+ unsigned int sink = route->routes[i].sink;
+ unsigned int source = route->routes[i].source;
+ struct media_pad *pads = sd->entity.pads;
+
+ if (sink >= sd->entity.num_pads ||
+ source >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[sink].flags & MEDIA_PAD_FL_SINK) ||
+ !(pads[source].flags & MEDIA_PAD_FL_SOURCE))
+ return -EINVAL;
+ }
+
+ return v4l2_subdev_call(sd, pad, set_routing, route);
+ }
+
case VIDIOC_SUBDEV_G_STD:
return v4l2_subdev_call(sd, video, g_std, arg);
@@ -604,6 +631,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_QUERYSTD:
return v4l2_subdev_call(sd, video, querystd, arg);
#endif
+
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index aeb2f497c683..6a6cd046cefb 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory {
static int __videobuf_dc_alloc(struct device *dev,
struct videobuf_dma_contig_memory *mem,
- unsigned long size, gfp_t flags)
+ unsigned long size)
{
mem->size = size;
- mem->vaddr = dma_alloc_coherent(dev, mem->size,
- &mem->dma_handle, flags);
-
+ mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
+ GFP_KERNEL);
if (!mem->vaddr) {
dev_err(dev, "memory alloc size %ld failed\n", mem->size);
return -ENOMEM;
@@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
- GFP_KERNEL))
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
return -ENOMEM;
break;
case V4L2_MEMORY_OVERLAY:
@@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
- GFP_KERNEL | __GFP_COMP))
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
goto error;
- /* Try to remap memory */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
/* the "vm_pgoff" is just used in v4l2 to find the
* corresponding buffer data structure which is allocated
* earlier and it does not mean the offset from the physical
* buffer start address as usual. So set it to 0 to pass
- * the sanity check in vm_iomap_memory().
+ * the sanity check in dma_mmap_coherent().
*/
vma->vm_pgoff = 0;
-
- retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
+ retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
+ mem->size);
if (retval) {
dev_err(q->dev, "mmap: remap failed with error %d. ",
retval);
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 9c49d00c2a96..ea6e9e1eaf04 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev)
caps = of_device_get_match_data(&pdev->dev);
if (caps->has_ddrck) {
- clk = devm_clk_get(&pdev->dev, "ddrck");
+ clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
if (IS_ERR(clk))
return PTR_ERR(clk);
- clk_prepare_enable(clk);
}
if (caps->has_mpddr_clk) {
- clk = devm_clk_get(&pdev->dev, "mpddr");
+ clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
if (IS_ERR(clk)) {
pr_err("AT91 RAMC: couldn't get mpddr clock\n");
return PTR_ERR(clk);
}
- clk_prepare_enable(clk);
}
return 0;
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 6827ed484750..127a9bffdbca 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -398,15 +398,17 @@ static void __finalize_command(struct private_data *priv)
static int __send_command(struct private_data *priv, unsigned int cmd,
u32 result[])
{
- const u32 *msg = priv->dpfe_api->command[cmd];
void __iomem *regs = priv->regs;
unsigned int i, chksum, chksum_idx;
+ const u32 *msg;
int ret = 0;
u32 resp;
if (cmd >= DPFE_CMD_MAX)
return -1;
+ msg = priv->dpfe_api->command[cmd];
+
mutex_lock(&priv->lock);
/* Wait for DCPU to become ready */
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 095f8a3b2cfc..9bf477b000c0 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -282,10 +282,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
if (IS_ERR(devbus->base))
return PTR_ERR(devbus->base);
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- clk_prepare_enable(clk);
/*
* Obtain clock period in picoseconds,
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 46539b27a3fb..835754304a7f 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -132,6 +132,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
for_each_child_of_node(np_ddr, np_tim) {
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_do_get_timings(np_tim, &timings[i])) {
+ of_node_put(np_tim);
devm_kfree(dev, timings);
goto default_timings;
}
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index cc01979780d8..322d7ead0031 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -416,6 +416,7 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
if (init)
init(adev, child);
of_platform_device_create(child, NULL, &adev->dev);
+ of_node_put(child);
return 0;
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 12bc3f5a6cbb..1c7a9dcfed65 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -412,6 +412,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
return card;
err_out:
host->card = old_card;
+ kfree_const(card->dev.kobj.name);
kfree(card);
return NULL;
}
@@ -470,8 +471,10 @@ static void memstick_check(struct work_struct *work)
put_device(&card->dev);
host->card = NULL;
}
- } else
+ } else {
+ kfree_const(card->dev.kobj.name);
kfree(card);
+ }
}
out_power_off:
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 55907e4c36b1..6014fcb49d7e 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1335,17 +1335,17 @@ static int msb_ftl_initialize(struct msb_data *msb)
msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
msb->logical_block_count = msb->zone_count * 496 - 2;
- msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
- msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
+ msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
msb->lba_to_pba_table =
kmalloc_array(msb->logical_block_count, sizeof(u16),
GFP_KERNEL);
if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
!msb->erased_blocks_bitmap) {
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
- kfree(msb->erased_blocks_bitmap);
return -ENOMEM;
}
@@ -1953,7 +1953,8 @@ static int msb_bd_open(struct block_device *bdev, fmode_t mode)
static void msb_data_clear(struct msb_data *msb)
{
kfree(msb->boot_page);
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
kfree(msb->cache);
msb->card = NULL;
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 74d6686b35f7..672671a12215 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -314,7 +314,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
}
while (length) {
- unsigned int uninitialized_var(p_off);
+ unsigned int p_off;
if (host->req->long_data) {
pg = nth_page(sg_page(&host->req->sg),
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index eaa2a94d18be..0e37c6a5ee36 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -44,12 +44,10 @@ static const char *tpc_names[] = {
* memstick_debug_get_tpc_name - debug helper that returns string for
* a TPC number
*/
-const char *memstick_debug_get_tpc_name(int tpc)
+static __maybe_unused const char *memstick_debug_get_tpc_name(int tpc)
{
return tpc_names[tpc-1];
}
-EXPORT_SYMBOL(memstick_debug_get_tpc_name);
-
/* Read a register*/
static inline u32 r592_read_reg(struct r592_device *dev, int address)
@@ -828,7 +826,7 @@ static void r592_remove(struct pci_dev *pdev)
/* Stop the processing thread.
That ensures that we won't take any more requests */
kthread_stop(dev->io_thread);
-
+ del_timer_sync(&dev->detect_timer);
r592_enable_device(dev, false);
while (!error && dev->req) {
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 5b966b54d6e9..fc35c7404429 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -198,7 +198,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
host->block_pos);
while (length) {
- unsigned int uninitialized_var(p_off);
+ unsigned int p_off;
if (host->req->long_data) {
pg = nth_page(sg_page(&host->req->sg),
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index ebc00d47abf5..624803a887d8 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -1430,7 +1430,9 @@ mptlan_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct net_device *dev = ioc->netdev;
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ cancel_delayed_work_sync(&priv->post_buckets_task);
if(dev != NULL) {
unregister_netdev(dev);
free_netdev(dev);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 43169f25da1f..385db201fe9a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1299,6 +1299,7 @@ config MFD_DAVINCI_VOICECODEC
config MFD_TI_AM335X_TSCADC
tristate "TI ADC / Touch Screen chip support"
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
select MFD_CORE
select REGMAP
select REGMAP_MMIO
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index d2a13a547a3c..5610a5d9fad1 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -109,7 +109,9 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver,
(void *)sysmgr_np);
- of_node_put(sysmgr_np);
+ if (property)
+ of_node_put(sysmgr_np);
+
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 3ff872c205ee..07d256e6875c 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -45,7 +45,7 @@ int arizona_clk32k_enable(struct arizona *arizona)
if (arizona->clk32k_ref == 1) {
switch (arizona->pdata.clk32k_src) {
case ARIZONA_32KZ_MCLK1:
- ret = pm_runtime_get_sync(arizona->dev);
+ ret = pm_runtime_resume_and_get(arizona->dev);
if (ret != 0)
goto err_ref;
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index a016b39fe9b0..5f1f6f3a0696 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -69,7 +69,7 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
int irq;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
+ if (irq < 0)
return irq;
tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops,
@@ -84,6 +84,19 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
return 0;
}
+static int mx25_tsadc_unset_irq(struct platform_device *pdev)
+{
+ struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0) {
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(tsadc->domain);
+ }
+
+ return 0;
+}
+
static void mx25_tsadc_setup_clk(struct platform_device *pdev,
struct mx25_tsadc *tsadc)
{
@@ -171,18 +184,21 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tsadc);
- return devm_of_platform_populate(dev);
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ goto err_irq;
+
+ return 0;
+
+err_irq:
+ mx25_tsadc_unset_irq(pdev);
+
+ return ret;
}
static int mx25_tsadc_remove(struct platform_device *pdev)
{
- struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (irq) {
- irq_set_chained_handler_and_data(irq, NULL, NULL);
- irq_domain_remove(tsadc->domain);
- }
+ mx25_tsadc_unset_irq(pdev);
return 0;
}
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 045cbf0cbe53..993e305a232c 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -114,6 +114,9 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!info->mem)
+ return -ENODEV;
+
info->irq = platform_get_irq(pdev, 0);
ret = intel_lpss_probe(&pdev->dev, info);
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index c9f35378d391..4d9b2ad9b086 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -111,6 +111,7 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
return 0;
err_del_irq_chip:
+ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
return ret;
}
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index 348439a3fbbd..39006297f3d2 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -175,6 +175,7 @@ int lp8788_irq_init(struct lp8788 *lp, int irq)
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"lp8788-irq", irqd);
if (ret) {
+ irq_domain_remove(lp->irqdm);
dev_err(lp->dev, "failed to create a thread for IRQ_N\n");
return ret;
}
@@ -188,4 +189,6 @@ void lp8788_irq_exit(struct lp8788 *lp)
{
if (lp->irq)
free_irq(lp->irq, lp->irqdm);
+ if (lp->irqdm)
+ irq_domain_remove(lp->irqdm);
}
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index 768d556b3fe9..5c3d642c8e3a 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -195,8 +195,16 @@ static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id)
if (ret)
return ret;
- return mfd_add_devices(lp->dev, -1, lp8788_devs,
- ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
+ ret = mfd_add_devices(lp->dev, -1, lp8788_devs,
+ ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
+ if (ret)
+ goto err_exit_irq;
+
+ return 0;
+
+err_exit_irq:
+ lp8788_irq_exit(lp);
+ return ret;
}
static int lp8788_remove(struct i2c_client *cl)
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index a851ff473a44..2bf5bcbc8852 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -418,9 +418,11 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
of_node_put(fps_child);
+ of_node_put(fps_np);
return ret;
}
}
+ of_node_put(fps_np);
config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index 5cd653e61512..191b1bc6141c 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -136,6 +136,7 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
void *callback_param)
{
struct pcf50633_adc_request *req;
+ int ret;
/* req is freed when the result is ready, in interrupt handler */
req = kmalloc(sizeof(*req), GFP_KERNEL);
@@ -147,7 +148,11 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
req->callback = callback;
req->callback_param = callback_param;
- return adc_enqueue_request(pcf, req);
+ ret = adc_enqueue_request(pcf, req);
+ if (ret)
+ kfree(req);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index 48381d9bf740..302115dabff4 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -42,9 +42,6 @@ static const struct mfd_cell rt5033_devs[] = {
.name = "rt5033-charger",
.of_compatible = "richtek,rt5033-charger",
}, {
- .name = "rt5033-battery",
- .of_compatible = "richtek,rt5033-battery",
- }, {
.name = "rt5033-led",
.of_compatible = "richtek,rt5033-led",
},
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index bbcde58e2a11..aab8d8910319 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1733,7 +1733,12 @@ static struct platform_driver sm501_plat_driver = {
static int __init sm501_base_init(void)
{
- platform_driver_register(&sm501_plat_driver);
+ int ret;
+
+ ret = platform_driver_register(&sm501_plat_driver);
+ if (ret < 0)
+ return ret;
+
return pci_register_driver(&sm501_pci_driver);
}
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index 711979afd90a..887c92342b7f 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -389,7 +389,7 @@ static int stmfx_chip_init(struct i2c_client *client)
err:
if (stmfx->vdd)
- return regulator_disable(stmfx->vdd);
+ regulator_disable(stmfx->vdd);
return ret;
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 508349399f8a..7f758fb60c1f 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1494,9 +1494,9 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
int stmpe_remove(struct stmpe *stmpe)
{
- if (!IS_ERR(stmpe->vio))
+ if (!IS_ERR(stmpe->vio) && regulator_is_enabled(stmpe->vio))
regulator_disable(stmpe->vio);
- if (!IS_ERR(stmpe->vcc))
+ if (!IS_ERR(stmpe->vcc) && regulator_is_enabled(stmpe->vcc))
regulator_disable(stmpe->vcc);
__stmpe_disable(stmpe, STMPE_BLOCK_ADC);
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 660723276481..9ec90ce12f4d 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -219,7 +219,9 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
return ERR_PTR(-ENODEV);
regmap = syscon_node_to_regmap(syscon_np);
- of_node_put(syscon_np);
+
+ if (property)
+ of_node_put(syscon_np);
return regmap;
}
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 70da0c4ae457..58811c5ab564 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -405,11 +405,8 @@ err_noirq:
static int t7l66xb_remove(struct platform_device *dev)
{
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- int ret;
- ret = pdata->disable(dev);
clk_disable_unprepare(t7l66xb->clk48m);
clk_put(t7l66xb->clk48m);
clk_disable_unprepare(t7l66xb->clk32k);
@@ -420,8 +417,7 @@ static int t7l66xb_remove(struct platform_device *dev)
mfd_remove_devices(&dev->dev);
kfree(t7l66xb);
- return ret;
-
+ return 0;
}
static struct platform_driver t7l66xb_platform_driver = {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c55b63750757..8a2c6726e2da 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -454,6 +454,27 @@ config XILINX_SDFEC
If unsure, say N.
+config XILINX_TRAFGEN
+ tristate "Xilinx Traffic Generator"
+ help
+ This option enables support for the Xilinx Traffic Generator driver.
+ It is designed to generate AXI4 traffic which can be used to stress
+ different modules/interconnect connected in the system. Different
+ configurable options which are provided through sysfs entries allow
+ allow the user to generate a wide variety of traffic based on their
+ their requirements.
+
+ If unsure, say N
+
+config XILINX_FLEX_PM
+ tristate "Xilinx Flexnoc Performance Monitor"
+ help
+ This option enables support for the Xilinx Flex Noc Performance Monitor driver.
+ It monitors the read and write transactions. It has counters for the LPD and
+ FPD domains.
+
+ If unsure, say N
+
config MISC_RTSX
tristate
default MISC_RTSX_PCI || MISC_RTSX_USB
@@ -466,6 +487,7 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+source "drivers/misc/jesd204b/Kconfig"
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c1860d35dc7e..2dfb3226834c 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,6 +46,9 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
+obj-$(CONFIG_XILINX_TRAFGEN) += xilinx_trafgen.o
+obj-$(CONFIG_XILINX_FLEX_PM) += xilinx_flex_pm.o
+obj-$(CONFIG_XILINX_JESD204B) += jesd204b/
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 4c707d8dc3eb..5807aefd4c88 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1485,7 +1485,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->remap_addr = ioremap_nocache(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
- goto free_handle;
+ goto free_idr;
}
pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
@@ -1547,6 +1547,10 @@ disable_msi:
pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
unmap:
iounmap(pcr->remap_addr);
+free_idr:
+ spin_lock(&rtsx_pci_lock);
+ idr_remove(&rtsx_pci_idr, pcr->id);
+ spin_unlock(&rtsx_pci_lock);
free_handle:
kfree(handle);
free_pcr:
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 186308f1f8eb..6334376826a9 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -959,10 +959,10 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
* if it returns an error!
*/
if ((rc = cxl_register_afu(afu)))
- goto err_put1;
+ goto err_put_dev;
if ((rc = cxl_sysfs_afu_add(afu)))
- goto err_put1;
+ goto err_del_dev;
/*
* pHyp doesn't expose the programming models supported by the
@@ -978,7 +978,7 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
afu->modes_supported = CXL_MODE_DIRECTED;
if ((rc = cxl_afu_select_best_mode(afu)))
- goto err_put2;
+ goto err_remove_sysfs;
adapter->afu[afu->slice] = afu;
@@ -998,10 +998,12 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
return 0;
-err_put2:
+err_remove_sysfs:
cxl_sysfs_afu_remove(afu);
-err_put1:
- device_unregister(&afu->dev);
+err_del_dev:
+ device_del(&afu->dev);
+err_put_dev:
+ put_device(&afu->dev);
free = false;
guest_release_serr_irq(afu);
err2:
@@ -1135,18 +1137,20 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
* even if it returns an error!
*/
if ((rc = cxl_register_adapter(adapter)))
- goto err_put1;
+ goto err_put_dev;
if ((rc = cxl_sysfs_adapter_add(adapter)))
- goto err_put1;
+ goto err_del_dev;
/* release the context lock as the adapter is configured */
cxl_adapter_context_unlock(adapter);
return adapter;
-err_put1:
- device_unregister(&adapter->dev);
+err_del_dev:
+ device_del(&adapter->dev);
+err_put_dev:
+ put_device(&adapter->dev);
free = false;
cxl_guest_remove_chardev(adapter);
err1:
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 4cb829d5d873..2e4dcfebf19a 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -349,6 +349,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
out:
cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+ bitmap_free(ctx->irq_bitmap);
afu_irq_name_free(ctx);
return -ENOMEM;
}
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 2ba899f5659f..d183836d80e3 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -387,6 +387,7 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
rc = get_phb_index(np, phb_index);
if (rc) {
pr_err("cxl: invalid phb index\n");
+ of_node_put(np);
return rc;
}
@@ -1164,10 +1165,10 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
* if it returns an error!
*/
if ((rc = cxl_register_afu(afu)))
- goto err_put1;
+ goto err_put_dev;
if ((rc = cxl_sysfs_afu_add(afu)))
- goto err_put1;
+ goto err_del_dev;
adapter->afu[afu->slice] = afu;
@@ -1176,10 +1177,12 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
return 0;
-err_put1:
+err_del_dev:
+ device_del(&afu->dev);
+err_put_dev:
pci_deconfigure_afu(afu);
cxl_debugfs_afu_remove(afu);
- device_unregister(&afu->dev);
+ put_device(&afu->dev);
return rc;
err_free_native:
@@ -1667,23 +1670,25 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
* even if it returns an error!
*/
if ((rc = cxl_register_adapter(adapter)))
- goto err_put1;
+ goto err_put_dev;
if ((rc = cxl_sysfs_adapter_add(adapter)))
- goto err_put1;
+ goto err_del_dev;
/* Release the context lock as adapter is configured */
cxl_adapter_context_unlock(adapter);
return adapter;
-err_put1:
+err_del_dev:
+ device_del(&adapter->dev);
+err_put_dev:
/* This should mirror cxl_remove_adapter, except without the
* sysfs parts
*/
cxl_debugfs_adapter_remove(adapter);
cxl_deconfigure_adapter(adapter);
- device_unregister(&adapter->dev);
+ put_device(&adapter->dev);
return ERR_PTR(rc);
err_release:
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 0f791bfdc1f5..c92f2cdf4026 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -6,6 +6,7 @@ config EEPROM_AT24
depends on I2C && SYSFS
select NVMEM
select NVMEM_SYSFS
+ select REGMAP
select REGMAP_I2C
help
Enable this driver to get read/write support to most I2C EEPROMs
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index fb5ddf3864fd..6a2646c481ec 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -218,6 +218,13 @@ static void fastrpc_free_map(struct kref *ref)
dma_buf_put(map->buf);
}
+ if (map->fl) {
+ spin_lock(&map->fl->lock);
+ list_del(&map->node);
+ spin_unlock(&map->fl->lock);
+ map->fl = NULL;
+ }
+
kfree(map);
}
@@ -227,10 +234,12 @@ static void fastrpc_map_put(struct fastrpc_map *map)
kref_put(&map->refcount, fastrpc_free_map);
}
-static void fastrpc_map_get(struct fastrpc_map *map)
+static int fastrpc_map_get(struct fastrpc_map *map)
{
- if (map)
- kref_get(&map->refcount);
+ if (!map)
+ return -ENOENT;
+
+ return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
}
static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
@@ -1065,7 +1074,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
if (init.attrs)
- sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, args);
@@ -1080,12 +1089,7 @@ err_invoke:
fl->init_mem = NULL;
fastrpc_buf_free(imem);
err_alloc:
- if (map) {
- spin_lock(&fl->lock);
- list_del(&map->node);
- spin_unlock(&fl->lock);
- fastrpc_map_put(map);
- }
+ fastrpc_map_put(map);
err:
kfree(args);
@@ -1161,10 +1165,8 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
fastrpc_context_put(ctx);
}
- list_for_each_entry_safe(map, m, &fl->maps, node) {
- list_del(&map->node);
+ list_for_each_entry_safe(map, m, &fl->maps, node)
fastrpc_map_put(map);
- }
fastrpc_session_free(cctx, fl->sctx);
fastrpc_channel_ctx_put(cctx);
@@ -1357,7 +1359,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
spin_lock_irqsave(&cctx->lock, flags);
- sess = &cctx->session[cctx->sesscount];
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
+ dev_err(&pdev->dev, "too many sessions\n");
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ return -ENOSPC;
+ }
+ sess = &cctx->session[cctx->sesscount++];
sess->used = false;
sess->valid = true;
sess->dev = dev;
@@ -1370,13 +1377,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
struct fastrpc_session_ctx *dup_sess;
for (i = 1; i < sessions; i++) {
- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
break;
- dup_sess = &cctx->session[cctx->sesscount];
+ dup_sess = &cctx->session[cctx->sesscount++];
memcpy(dup_sess, sess, sizeof(*dup_sess));
}
}
- cctx->sesscount++;
spin_unlock_irqrestore(&cctx->lock, flags);
rc = dma_set_mask(dev, DMA_BIT_MASK(32));
if (rc) {
@@ -1395,7 +1401,7 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
int i;
spin_lock_irqsave(&cctx->lock, flags);
- for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
+ for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
if (cctx->session[i].sid == sess->sid) {
cctx->session[i].valid = false;
cctx->sesscount--;
@@ -1478,8 +1484,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user)
struct fastrpc_invoke_ctx *ctx;
spin_lock(&user->lock);
- list_for_each_entry(ctx, &user->pending, node)
+ list_for_each_entry(ctx, &user->pending, node) {
+ ctx->retval = -EPIPE;
complete(&ctx->work);
+ }
spin_unlock(&user->lock);
}
@@ -1489,7 +1497,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
struct fastrpc_user *user;
unsigned long flags;
+ /* No invocations past this point */
spin_lock_irqsave(&cctx->lock, flags);
+ cctx->rpdev = NULL;
list_for_each_entry(user, &cctx->users, user)
fastrpc_notify_users(user);
spin_unlock_irqrestore(&cctx->lock, flags);
@@ -1497,7 +1507,6 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
misc_deregister(&cctx->miscdev);
of_platform_depopulate(&rpdev->dev);
- cctx->rpdev = NULL;
fastrpc_channel_ctx_put(cctx);
}
diff --git a/drivers/misc/jesd204b/Kconfig b/drivers/misc/jesd204b/Kconfig
new file mode 100644
index 000000000000..aff08cfe8f82
--- /dev/null
+++ b/drivers/misc/jesd204b/Kconfig
@@ -0,0 +1,28 @@
+#
+# Jesd204b support
+#
+
+config XILINX_JESD204B
+ tristate "Xilinx JESD204B"
+ help
+ This option enables support for the Xilinx JESD204B driver.
+ It is designed to allow user to access JESD204B IP registers
+ with sysfs entries. JESD204B is the protocol used by High-Speed
+ data converters to transfer data to FPGA/ASIC.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jesd204b.
+
+config XILINX_JESD204B_PHY
+ tristate "JESD Phy Driver"
+ depends on XILINX_JESD204B
+ help
+ This is JESD204b Phy Interface. It enables support for xilinx jesd204b phy
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jesd204b_phy.
diff --git a/drivers/misc/jesd204b/Makefile b/drivers/misc/jesd204b/Makefile
new file mode 100644
index 000000000000..7723fcb002c2
--- /dev/null
+++ b/drivers/misc/jesd204b/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XILINX_JESD204B_PHY) += jesd204b_phy.o
+jesd204b_phy-y += jesd_phy.o gtx7s_cpll_bands.o \
+ gtx7s_qpll_bands.o
+obj-$(CONFIG_XILINX_JESD204B) += jesd204b.o
+jesd204b-y += xilinx_jesd204b.o
diff --git a/drivers/misc/jesd204b/gtx7s_cpll_bands.c b/drivers/misc/jesd204b/gtx7s_cpll_bands.c
new file mode 100644
index 000000000000..a9610f7ade67
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_cpll_bands.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include "s7_gtxe2_drp.h"
+#include "gtx7s_cpll_bands.h"
+
+static const u32 gtx7s_cpll_channel_address_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_ADDR,
+ RXCDR_CFG1_ADDR,
+ RXCDR_CFG2_ADDR,
+ RXCDR_CFG3_ADDR,
+ RXCDR_CFG4_ADDR,
+ RXOUT_DIV_ADDR,
+ TXOUT_DIV_ADDR,
+ RX_DFE_LPM_CFG_ADDR
+};
+
+static const u32 gtx7s_cpll_channel_offset_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_OFFSET,
+ RXCDR_CFG1_OFFSET,
+ RXCDR_CFG2_OFFSET,
+ RXCDR_CFG3_OFFSET,
+ RXCDR_CFG4_OFFSET,
+ RXOUT_DIV_OFFSET,
+ TXOUT_DIV_OFFSET,
+ RX_DFE_LPM_CFG_OFFSET
+};
+
+static const u32 gtx7s_cpll_channel_mask_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_MASK,
+ RXCDR_CFG1_MASK,
+ RXCDR_CFG2_MASK,
+ RXCDR_CFG3_MASK,
+ RXCDR_CFG4_MASK,
+ RXOUT_DIV_MASK,
+ TXOUT_DIV_MASK,
+ RX_DFE_LPM_CFG_MASK
+};
+
+/* Note bands run vertically from 1 to 4 */
+static const u16 gtx7s_cpll_channel_param_lut[GTX7S_CPLL_NUM_CHANNEL_DRP_REGS]
+ [GTX7S_CPLL_NUM_LINE_RATE_BANDS] = {
+ {0x20, 0x20, 0x20, 0x20 },/* RXCDR_CFG0 */
+ {0x1010, 0x1020, 0x1040, 0x1040 },/* RXCDR_CFG1 */
+ {0x23ff, 0x23ff, 0x23ff, 0x23ff },/* RXCDR_CFG2 */
+ {0x0, 0x0, 0x0, 0x0 },/* RXCDR_CFG3 */
+ {0x3, 0x3, 0x3, 0x3 },/* RXCDR_CFG4 */
+ {0x3, 0x2, 0x1, 0x1 },/* RXOUT_DIV */
+ {0x3, 0x2, 0x1, 0x1 },/* TXOUT_DIV */
+ {0x904, 0x904, 0x904, 0x104 } /* RX_DFE_LPM_CFG */
+};
+
+u32 get_gtx7s_cpll_address_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_address_lut[lut_address];
+}
+
+u32 get_gtx7s_cpll_offset_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_offset_lut[lut_address];
+}
+
+u32 get_gtx7s_cpll_mask_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_mask_lut[lut_address];
+}
+
+u16 get_gtx7s_cpll_param_lut(u32 param_address, u32 band_address)
+{
+ return gtx7s_cpll_channel_param_lut[param_address][band_address];
+}
diff --git a/drivers/misc/jesd204b/gtx7s_cpll_bands.h b/drivers/misc/jesd204b/gtx7s_cpll_bands.h
new file mode 100644
index 000000000000..f53f20de2cda
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_cpll_bands.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+
+#ifndef GTX7S_CPLL_BANDS_H_
+#define GTX7S_CPLL_BANDS_H_
+
+#define GTX7S_CPLL_NUM_CHANNEL_DRP_REGS 8
+#define GTX7S_CPLL_NUM_LINE_RATE_BANDS 4
+
+u32 get_gtx7s_cpll_address_lut(u32);
+u32 get_gtx7s_cpll_offset_lut(u32);
+u32 get_gtx7s_cpll_mask_lut(u32);
+u16 get_gtx7s_cpll_param_lut(u32, u32);
+
+#endif /* GTX7S_CPLL_BANDS_H_ */
diff --git a/drivers/misc/jesd204b/gtx7s_qpll_bands.c b/drivers/misc/jesd204b/gtx7s_qpll_bands.c
new file mode 100644
index 000000000000..71e70a611bb7
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_qpll_bands.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include "s7_gtxe2_drp.h"
+#include "gtx7s_qpll_bands.h"
+
+static const u32 gtx7s_qpll_channel_address_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_ADDR,
+ RXCDR_CFG1_ADDR,
+ RXCDR_CFG2_ADDR,
+ RXCDR_CFG3_ADDR,
+ RXCDR_CFG4_ADDR,
+ RXOUT_DIV_ADDR,
+ TXOUT_DIV_ADDR,
+ RX_DFE_LPM_CFG_ADDR,
+ QPLL_CFG0_ADDR,
+ QPLL_CFG1_ADDR
+};
+
+static const u32 gtx7s_qpll_channel_offset_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_OFFSET,
+ RXCDR_CFG1_OFFSET,
+ RXCDR_CFG2_OFFSET,
+ RXCDR_CFG3_OFFSET,
+ RXCDR_CFG4_OFFSET,
+ RXOUT_DIV_OFFSET,
+ TXOUT_DIV_OFFSET,
+ RX_DFE_LPM_CFG_OFFSET,
+ QPLL_CFG0_OFFSET,
+ QPLL_CFG1_OFFSET
+};
+
+static const u32 gtx7s_qpll_channel_mask_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_MASK,
+ RXCDR_CFG1_MASK,
+ RXCDR_CFG2_MASK,
+ RXCDR_CFG3_MASK,
+ RXCDR_CFG4_MASK,
+ RXOUT_DIV_MASK,
+ TXOUT_DIV_MASK,
+ RX_DFE_LPM_CFG_MASK,
+ QPLL_CFG0_MASK,
+ QPLL_CFG1_MASK
+};
+
+/* Note bands run vertically from 1 to 10 */
+static const u16 gtx7s_qpll_channel_param_lut[GTX7S_QPLL_NUM_CHANNEL_DRP_REGS]
+ [GTX7S_QPLL_NUM_LINE_RATE_BANDS] = {
+{0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20},/* RXCDR_CFG0 */
+{0x1008, 0x1010, 0x1020, 0x1010, 0x1020, 0x1040, 0x1020, 0x1040, 0x1040, 0x1040},/* RXCDR_CFG1 */
+{0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff},/* RXCDR_CFG2 */
+{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* RXCDR_CFG3 */
+{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},/* RXCDR_CFG4 */
+{0x3e8, 0x4, 0x2, 0x3, 0x2, 0x1, 0x2, 0x1, 0x1, 0x1},/* RXOUT_DIV */
+{0x3e8, 0x4, 0x2, 0x3, 0x2, 0x1, 0x2, 0x1, 0x1, 0x1},/* TXOUT_DIV */
+{0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x104, 0x104},/* RX_DFE_LPM_CFG */
+{0x1c1, 0x1c1, 0x1c1, 0x181, 0x1c1, 0x1c1, 0x181, 0x1c1, 0x1c1, 0x181},/* QPLL_CFG0 */
+{0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68} /* QPLL_CFG1 */
+};
+
+u32 get_gtx7s_qpll_address_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_address_lut[lut_address];
+}
+
+u32 get_gtx7s_qpll_offset_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_offset_lut[lut_address];
+}
+
+u32 get_gtx7s_qpll_mask_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_mask_lut[lut_address];
+}
+
+u16 get_gtx7s_qpll_param_lut(u32 param_address, u32 band_address)
+{
+ return gtx7s_qpll_channel_param_lut[param_address][band_address];
+}
diff --git a/drivers/misc/jesd204b/gtx7s_qpll_bands.h b/drivers/misc/jesd204b/gtx7s_qpll_bands.h
new file mode 100644
index 000000000000..8b9f6c24efb4
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_qpll_bands.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+
+#ifndef GTX7S_QPLL_BANDS_H_
+#define GTX7S_QPLL_BANDS_H_
+
+#define GTX7S_QPLL_NUM_CHANNEL_DRP_REGS 10
+#define GTX7S_QPLL_NUM_LINE_RATE_BANDS 10
+
+u32 get_gtx7s_qpll_address_lut(u32);
+u32 get_gtx7s_qpll_offset_lut(u32);
+u32 get_gtx7s_qpll_mask_lut(u32);
+u16 get_gtx7s_qpll_param_lut(u32, u32);
+
+#endif /* GTX7S_QPLL_BANDS_H_ */
diff --git a/drivers/misc/jesd204b/jesd_phy.c b/drivers/misc/jesd204b/jesd_phy.c
new file mode 100644
index 000000000000..c35d9433d0fc
--- /dev/null
+++ b/drivers/misc/jesd204b/jesd_phy.c
@@ -0,0 +1,384 @@
+/*
+ * Jesd204b phy support
+ *
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "jesd_phy.h"
+#include "gtx7s_cpll_bands.h"
+#include "gtx7s_qpll_bands.h"
+
+#define PLATFORM_JESD204_PHY_ADDR 0x41E10000
+#define JESD_PHY_LOOP_OFF 0
+#define JESD_PHY_LOOP_PCS 1
+#define JESD_PHY_LOOP_PMA 2
+#define JESD_PHY_LOOP_MAX 2
+
+static inline void jesd204b_phy_write(struct jesd204b_phy_state *st,
+ unsigned reg, unsigned val)
+{
+ iowrite32(val, st->phy + reg);
+}
+
+static inline unsigned int jesd204b_phy_read(struct jesd204b_phy_state *st,
+ unsigned reg)
+{
+ return ioread32(st->phy + reg);
+}
+
+#define NUM_GT_CHANNELS 8
+
+#define QPLL 0x3 /* QPLL (7 series) QPLL1 (UltraScale) */
+#define QPLL0 0x2 /* (UltraScale Only) */
+#define CPLL 0x0
+
+#define DRPREAD BIT(30)
+#define DRPWRITE BIT(31)
+
+#define NR_COMMON_DRP_INTERFACES 0x008
+#define NR_TRANS_DRP_INTERFACES 0x00C
+
+#define CHANNEL_DRP_BASE 0x200
+#define CHANNEL_DRP_ADDR 0x204
+#define CHANNEL_DRP_DREAD 0x20C
+#define CHANNEL_DRP_DWRITE 0x208
+#define CHANNEL_DRP_STAT 0x214
+
+#define CHANNEL_XCVR_SEL 0x400
+#define CHANNEL_XCVR_TXPLL 0x40C
+#define CHANNEL_XCVR_RXPLL 0x410
+#define CHANNEL_XCVR_LOOPB 0x41C
+
+static u32 read_channel_drp_reg(struct jesd204b_phy_state *st, u32 addr)
+{
+ u32 temp;
+
+ jesd204b_phy_write(st, CHANNEL_DRP_ADDR, (DRPREAD | addr));
+ temp = jesd204b_phy_read(st, CHANNEL_DRP_DREAD);
+ return temp;
+}
+
+static void write_channel_drp_reg(struct jesd204b_phy_state *st, u32 addr,
+ u32 data)
+{
+ u32 loop = 10;
+
+ jesd204b_phy_write(st, CHANNEL_DRP_DWRITE, data);
+ jesd204b_phy_write(st, CHANNEL_DRP_ADDR, (DRPWRITE | addr));
+
+ do {
+ if (!jesd204b_phy_read(st, CHANNEL_DRP_STAT))
+ break;
+ msleep(1);
+ } while (loop--);
+
+ if (!loop)
+ dev_err(st->dev, "DRP wait timeout\n");
+}
+
+static void read_plls(struct jesd204b_phy_state *st)
+{
+ int i;
+ int pll = st->pll;
+ u32 no_of_common_drp_interfaces = 1;
+
+ if (st->pll == CPLL)
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+ else
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_COMMON_DRP_INTERFACES);
+
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ pll = jesd204b_phy_read(st, CHANNEL_XCVR_TXPLL);
+ pll = jesd204b_phy_read(st, CHANNEL_XCVR_RXPLL);
+ }
+}
+
+static void configure_plls(struct jesd204b_phy_state *st, u32 pll)
+{
+ int i;
+ u32 no_of_common_drp_interfaces;
+
+ if (pll == CPLL)
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+ else
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_COMMON_DRP_INTERFACES);
+
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ jesd204b_phy_write(st, CHANNEL_XCVR_TXPLL, pll);
+ jesd204b_phy_write(st, CHANNEL_XCVR_RXPLL, pll);
+ }
+}
+
+static void configure_channel_drp(struct jesd204b_phy_state *st, u32 setting)
+{
+ u32 i, j, addr, temp, no_of_common_drp_interfaces;
+ u32 no_channel_drp_reg = GTX7S_QPLL_NUM_CHANNEL_DRP_REGS;
+
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+
+ if (st->pll == CPLL)
+ no_channel_drp_reg = GTX7S_CPLL_NUM_CHANNEL_DRP_REGS;
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_DRP_BASE, i);
+ for (j = 0; j < no_channel_drp_reg; j++) {
+ /* Get the register address */
+ if (st->pll == QPLL) {
+ addr = get_gtx7s_qpll_address_lut(j);
+
+ /* Read the register */
+ temp = read_channel_drp_reg(st, addr);
+
+ temp &= (0xFFFF ^ (get_gtx7s_qpll_mask_lut(j)));
+ temp |= ((get_gtx7s_qpll_param_lut(j, setting)
+ << get_gtx7s_qpll_offset_lut(j))
+ & get_gtx7s_qpll_mask_lut(j));
+ } else {
+ addr = get_gtx7s_cpll_address_lut(j);
+
+ temp = read_channel_drp_reg(st, addr);
+
+ temp &= (0xFFFF ^ (get_gtx7s_cpll_mask_lut(j)));
+ temp |= ((get_gtx7s_cpll_param_lut(j, setting)
+ << get_gtx7s_cpll_offset_lut(j))
+ & get_gtx7s_cpll_mask_lut(j));
+ }
+ write_channel_drp_reg(st, addr, temp);
+ }
+ }
+}
+
+void jesd204_phy_set_speed(struct jesd204b_phy_state *st, u32 band)
+{
+ /* make sure we have the correct PLL's selected. */
+ configure_channel_drp(st, band);
+}
+
+static void jesd204_phy_init(struct jesd204b_phy_state *st, int line_rate)
+{
+ jesd204_phy_set_speed(st, line_rate);
+}
+
+int jesd204_phy_set_loop(struct jesd204b_phy_state *st, u32 loopval)
+{
+ int i;
+ u32 no_of_channels;
+
+ no_of_channels = jesd204b_phy_read(st, NR_COMMON_DRP_INTERFACES);
+
+ if (loopval > JESD_PHY_LOOP_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < no_of_channels ; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ jesd204b_phy_write(st, CHANNEL_XCVR_LOOPB, loopval);
+ }
+ return 0;
+}
+
+static ssize_t jesd204b_pll_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+
+ read_plls(st);
+ if (st->pll == CPLL)
+ return sprintf(buf, "cpll\n");
+ return sprintf(buf, "qpll\n");
+}
+
+static ssize_t jesd204b_configure_pll(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+ unsigned val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (!ret)
+ return 0;
+
+ if (val > QPLL) {
+ dev_err(dev, "Setting the pll to %d valid values\n"
+ "00 = CPLL\n"
+ "10 = QPLL0 (UltraScale Only)\n"
+ "11 = QPLL (7 series) QPLL1 (UltraScale)\n", val);
+ return 0;
+ }
+ st->pll = val;
+ configure_plls(st, val);
+
+ return count;
+}
+
+static DEVICE_ATTR(configure_pll, S_IWUSR | S_IRUSR, jesd204b_pll_read,
+ jesd204b_configure_pll);
+
+static ssize_t jesd204b_linerate_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", st->band);
+}
+
+static ssize_t jesd204b_linerate_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+ int ret;
+ /* Low frequencies are not supported by qpll */
+
+ ret = kstrtouint(buf, 0, &st->band);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Setting the line rate to band to %d\n", st->band);
+ /* QPLL - freq options in phy
+ * 62.5
+ * 78.125
+ * 94.697
+ * 97.656
+ * 125.000
+ * 156.25
+ * 187.5
+ * 189.394
+ * 195.313
+ * 234.375
+ * 250.000
+ * 284.091
+ * 292.969
+ */
+ if (st->band == 2)
+ clk_set_rate(st->clk, 62500000); /* 2.5G */
+ else if (st->band == 4)
+ clk_set_rate(st->clk, 97656000); /* 3.9G */
+ else if (st->band == 6)
+ clk_set_rate(st->clk, 125000000); /* 5G */
+ else if (st->band == 7)
+ clk_set_rate(st->clk, 156250000); /* 6.25G */
+ else if (st->band == 8)
+ clk_set_rate(st->clk, 195313000); /* 7.812G */
+ else if (st->band == 9)
+ clk_set_rate(st->clk, 250000000);/* 10G */
+
+ jesd204_phy_init(st, st->band);
+
+ return count;
+}
+
+static DEVICE_ATTR(line_rate_band, S_IWUSR | S_IRUSR, jesd204b_linerate_read,
+ jesd204b_linerate_write);
+
+/* Match table for of_platform binding */
+static const struct of_device_id jesd204b_phy_of_match[] = {
+ { .compatible = "xlnx,jesd204-phy-2.0", },
+ { /* end of list */ },
+};
+
+static int jesd204b_phy_probe(struct platform_device *pdev)
+{
+ struct jesd204b_phy_state *st;
+ struct resource *mem; /* IO mem resources */
+ int ret;
+ u32 ref_clk;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(st->clk))
+ return -EPROBE_DEFER;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ st->phy = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(st->phy)) {
+ dev_err(&pdev->dev, "Failed ioremap\n");
+ return PTR_ERR(st->phy);
+ }
+ st->dev = &pdev->dev;
+ platform_set_drvdata(pdev, st);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,lanes",
+ &st->lanes);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,pll-selection",
+ &st->pll);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,gt-refclk-freq",
+ &ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+
+ clk_set_rate(st->clk, (unsigned long)ref_clk);
+ device_create_file(&pdev->dev, &dev_attr_configure_pll);
+ device_create_file(&pdev->dev, &dev_attr_line_rate_band);
+
+ ret = clk_prepare_enable(st->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int jesd204b_phy_remove(struct platform_device *pdev)
+{
+ struct jesd204b_phy_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->clk);
+ clk_put(st->clk);
+ device_remove_file(&pdev->dev, &dev_attr_configure_pll);
+ device_remove_file(&pdev->dev, &dev_attr_line_rate_band);
+ return 0;
+}
+
+static struct platform_driver jesd204b_driver = {
+ .driver = {
+ .name = "jesd204b_phy",
+ .of_match_table = jesd204b_phy_of_match,
+ },
+ .probe = jesd204b_phy_probe,
+ .remove = jesd204b_phy_remove,
+};
+
+module_platform_driver(jesd204b_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhraj@xilinx.com>");
+MODULE_DESCRIPTION("AXI-JESD204B Phy Interface Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/jesd204b/jesd_phy.h b/drivers/misc/jesd204b/jesd_phy.h
new file mode 100644
index 000000000000..c15328532c3f
--- /dev/null
+++ b/drivers/misc/jesd204b/jesd_phy.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef JESD_PHY_H_
+#define JESD_PHY_H_
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+struct jesd204b_phy_state {
+ struct device *dev;
+ void __iomem *phy;
+ struct clk *clk;
+ u32 vers_id;
+ u32 addr;
+ u32 lanes;
+ u32 band;
+ u32 pll;
+ unsigned long rate;
+};
+
+int jesd204_phy_set_loop(struct jesd204b_phy_state *st, u32 loopval);
+
+#endif /* JESD_PHY_H_ */
diff --git a/drivers/misc/jesd204b/s7_gtxe2_drp.h b/drivers/misc/jesd204b/s7_gtxe2_drp.h
new file mode 100644
index 000000000000..f08a211432bd
--- /dev/null
+++ b/drivers/misc/jesd204b/s7_gtxe2_drp.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define TXOUT_DIV_ADDR 0x88
+#define TXOUT_DIV_MASK 0x70
+#define TXOUT_DIV_OFFSET 0x4
+#define TXOUT_DIV_WIDTH 0x3
+#define TXOUT_DIV_DEFAULT 0x0
+
+#define RXOUT_DIV_ADDR 0x88
+#define RXOUT_DIV_MASK 0x7
+#define RXOUT_DIV_OFFSET 0x0
+#define RXOUT_DIV_WIDTH 0x3
+#define RXOUT_DIV_DEFAULT 0x0
+
+#define RXCDR_CFG0_ADDR 0xa8
+#define RXCDR_CFG0_MASK 0xffff
+#define RXCDR_CFG0_OFFSET 0x0
+#define RXCDR_CFG0_WIDTH 0x10
+#define RXCDR_CFG0_DEFAULT 0x0
+
+#define RXCDR_CFG1_ADDR 0xa9
+#define RXCDR_CFG1_MASK 0xffff
+#define RXCDR_CFG1_OFFSET 0x0
+#define RXCDR_CFG1_WIDTH 0x10
+#define RXCDR_CFG1_DEFAULT 0x0
+
+#define RXCDR_CFG2_ADDR 0xaa
+#define RXCDR_CFG2_MASK 0xffff
+#define RXCDR_CFG2_OFFSET 0x0
+#define RXCDR_CFG2_WIDTH 0x10
+#define RXCDR_CFG2_DEFAULT 0x0
+
+#define RXCDR_CFG3_ADDR 0xab
+#define RXCDR_CFG3_MASK 0xffff
+#define RXCDR_CFG3_OFFSET 0x0
+#define RXCDR_CFG3_WIDTH 0x10
+#define RXCDR_CFG3_DEFAULT 0x0
+
+#define RXCDR_CFG4_ADDR 0xac
+#define RXCDR_CFG4_MASK 0xff
+#define RXCDR_CFG4_OFFSET 0x0
+#define RXCDR_CFG4_WIDTH 0x8
+#define RXCDR_CFG4_DEFAULT 0x0
+
+#define RX_DFE_LPM_CFG_ADDR 0x29
+#define RX_DFE_LPM_CFG_MASK 0xffff
+#define RX_DFE_LPM_CFG_OFFSET 0x0
+#define RX_DFE_LPM_CFG_WIDTH 0x10
+#define RX_DFE_LPM_CFG_DEFAULT 0x0
+
+#define QPLL_CFG0_ADDR 0x32
+#define QPLL_CFG0_MASK 0xffff
+#define QPLL_CFG0_OFFSET 0x0
+#define QPLL_CFG0_WIDTH 0x10
+#define QPLL_CFG0_DEFAULT 0x0
+
+#define QPLL_CFG1_ADDR 0x33
+#define QPLL_CFG1_MASK 0x7ff
+#define QPLL_CFG1_OFFSET 0x0
+#define QPLL_CFG1_WIDTH 0xb
+#define QPLL_CFG1_DEFAULT 0x0
+
+#define QPLL_REFCLK_DIV_M_ADDR 0x33
+#define QPLL_REFCLK_DIV_M_MASK 0xf800
+#define QPLL_REFCLK_DIV_M_OFFSET 0xb
+#define QPLL_REFCLK_DIV_M_WIDTH 0x5
+#define QPLL_REFCLK_DIV_M_DEFAULT 0x0
+
+#define QPLL_FBDIV_N_ADDR 0x36
+#define QPLL_FBDIV_N_MASK 0x3ff
+#define QPLL_FBDIV_N_OFFSET 0x0
+#define QPLL_FBDIV_N_WIDTH 0xa
+#define QPLL_FBDIV_N_DEFAULT 0x0
+
+#define QPLL_FBDIV_RATIO_ADDR 0x37
+#define QPLL_FBDIV_RATIO_MASK 0x40
+#define QPLL_FBDIV_RATIO_OFFSET 0x6
+#define QPLL_FBDIV_RATIO_WIDTH 0x1
+#define QPLL_FBDIV_RATIO_DEFAULT 0x0
+
+#define CPLL_CFG0_ADDR 0x5c
+#define CPLL_CFG0_MASK 0xff00
+#define CPLL_CFG0_OFFSET 0x8
+#define CPLL_CFG0_WIDTH 0x8
+#define CPLL_CFG0_DEFAULT 0x0
+
+#define CPLL_CFG1_ADDR 0x5d
+#define CPLL_CFG1_MASK 0xffff
+#define CPLL_CFG1_OFFSET 0x0
+#define CPLL_CFG1_WIDTH 0x10
+#define CPLL_CFG1_DEFAULT 0x0
+
+#define CPLL_REFCLK_DIV_M_ADDR 0x5e
+#define CPLL_REFCLK_DIV_M_MASK 0x1f00
+#define CPLL_REFCLK_DIV_M_OFFSET 0x8
+#define CPLL_REFCLK_DIV_M_WIDTH 0x5
+#define CPLL_REFCLK_DIV_M_DEFAULT 0x0
+
+#define CPLL_FB_DIV_45_N1_ADDR 0x5e
+#define CPLL_FB_DIV_45_N1_MASK 0x80
+#define CPLL_FB_DIV_45_N1_OFFSET 0x7
+#define CPLL_FB_DIV_45_N1_WIDTH 0x1
+#define CPLL_FB_DIV_45_N1_DEFAULT 0x0
+
+#define CPLL_FBDIV_N2_ADDR 0x5e
+#define CPLL_FBDIV_N2_MASK 0x7f
+#define CPLL_FBDIV_N2_OFFSET 0x0
+#define CPLL_FBDIV_N2_WIDTH 0x7
+#define CPLL_FBDIV_N2_DEFAULT 0x0
diff --git a/drivers/misc/jesd204b/xilinx_jesd204b.c b/drivers/misc/jesd204b/xilinx_jesd204b.c
new file mode 100644
index 000000000000..304557c8978e
--- /dev/null
+++ b/drivers/misc/jesd204b/xilinx_jesd204b.c
@@ -0,0 +1,399 @@
+/*
+ * Xilinx AXI-JESD204B Interface Module
+ *
+ * Copyright 2014 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * http://wiki.analog.com/resources/fpga/xilinx/
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "xilinx_jesd204b.h"
+
+struct child_clk {
+ struct clk_hw hw;
+ struct jesd204b_state *st;
+ unsigned long rate;
+ bool enabled;
+};
+
+#define to_clk_priv(_hw) container_of(_hw, struct child_clk, hw)
+
+static inline void jesd204b_write(struct jesd204b_state *st,
+ unsigned int reg, unsigned int val)
+{
+ iowrite32(val, st->regs + reg);
+}
+
+static inline unsigned int jesd204b_read(struct jesd204b_state *st,
+ unsigned int reg)
+{
+ return ioread32(st->regs + reg);
+}
+
+static ssize_t jesd204b_laneinfo_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, unsigned int lane)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+ int ret;
+ unsigned int val1, val2, val3;
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_ID_L(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_LANE_F(lane));
+ val3 = jesd204b_read(st, XLNX_JESD204_REG_SCR_S_HD_CF(lane));
+ ret = sprintf(buf,
+ "DID: %d, BID: %d, LID: %d, L: %d, SCR: %d, F: %d\n",
+ XLNX_JESD204_LANE_DID(val1),
+ XLNX_JESD204_LANE_BID(val1),
+ XLNX_JESD204_LANE_LID(val1),
+ XLNX_JESD204_LANE_L(val1),
+ XLNX_JESD204_LANE_SCR(val3),
+ XLNX_JESD204_LANE_F(val2));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_LANE_K(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_M_N_ND_CS(lane));
+
+ ret += sprintf(buf + ret,
+ "K: %d, M: %d, N: %d, CS: %d, S: %d, N': %d, HD: %d\n",
+ XLNX_JESD204_LANE_K(val1),
+ XLNX_JESD204_LANE_M(val2),
+ XLNX_JESD204_LANE_N(val2),
+ XLNX_JESD204_LANE_CS(val2),
+ XLNX_JESD204_LANE_S(val3),
+ XLNX_JESD204_LANE_ND(val2),
+ XLNX_JESD204_LANE_HD(val3));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_FCHK(lane));
+ ret += sprintf(buf + ret, "FCHK: 0x%X, CF: %d\n",
+ XLNX_JESD204_LANE_FCHK(val1),
+ XLNX_JESD204_LANE_CF(val3));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_SC2_ADJ_CTRL(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_LANE_VERSION(lane));
+ ret += sprintf(buf + ret,
+ "ADJCNT: %d, PHYADJ: %d, ADJDIR: %d, JESDV: %d, SUBCLASS: %d\n",
+ XLNX_JESD204_LANE_ADJ_CNT(val1),
+ XLNX_JESD204_LANE_PHASE_ADJ_REQ(val1),
+ XLNX_JESD204_LANE_ADJ_CNT_DIR(val1),
+ XLNX_JESD204_LANE_JESDV(val2),
+ XLNX_JESD204_LANE_SUBCLASS(val2));
+
+ ret += sprintf(buf + ret, "MFCNT : 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_MFC_CNT(lane)));
+ ret += sprintf(buf + ret, "ILACNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_ILA_CNT(lane)));
+ ret += sprintf(buf + ret, "ERRCNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_ERR_CNT(lane)));
+ ret += sprintf(buf + ret, "BUFCNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_BUF_ADJ(lane)));
+ ret += sprintf(buf + ret, "LECNT: 0x%X\n",
+ jesd204b_read(st,
+ XLNX_JESD204_REG_TM_LINK_ERR_CNT(lane)));
+
+ ret += sprintf(buf + ret, "FC: %lu\n", st->rate);
+
+ return ret;
+}
+
+#define JESD_LANE(_x) \
+static ssize_t jesd204b_lane##_x##_info_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return jesd204b_laneinfo_read(dev, attr, buf, _x); \
+} \
+static DEVICE_ATTR(lane##_x##_info, 0400, jesd204b_lane##_x##_info_read, \
+ NULL)
+
+JESD_LANE(0);
+JESD_LANE(1);
+JESD_LANE(2);
+JESD_LANE(3);
+JESD_LANE(4);
+JESD_LANE(5);
+JESD_LANE(6);
+JESD_LANE(7);
+
+static ssize_t jesd204b_lane_syscstat_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, unsigned int lane)
+{
+ unsigned int stat;
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ stat = jesd204b_read(st, XLNX_JESD204_REG_SYNC_ERR_STAT);
+
+ return sprintf(buf,
+ "NOT_IN_TAB: %d, DISPARITY: %d, UNEXPECTED_K: %d\n",
+ stat & XLNX_JESD204_SYNC_ERR_NOT_IN_TAB(lane),
+ stat & XLNX_JESD204_SYNC_ERR_DISPARITY(lane),
+ stat & XLNX_JESD204_SYNC_ERR_UNEXPECTED_K(lane));
+}
+
+#define JESD_SYNCSTAT_LANE(_x) \
+static ssize_t jesd204b_lane##_x##_syncstat_read(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ return jesd204b_lane_syscstat_read(dev, attr, buf, _x); \
+} \
+static DEVICE_ATTR(lane##_x##_syncstat, 0400, \
+ jesd204b_lane##_x##_syncstat_read, NULL)
+
+JESD_SYNCSTAT_LANE(0);
+JESD_SYNCSTAT_LANE(1);
+JESD_SYNCSTAT_LANE(2);
+JESD_SYNCSTAT_LANE(3);
+JESD_SYNCSTAT_LANE(4);
+JESD_SYNCSTAT_LANE(5);
+JESD_SYNCSTAT_LANE(6);
+JESD_SYNCSTAT_LANE(7);
+
+static ssize_t jesd204b_reg_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = sscanf(buf, "%i %i", &st->addr, &val);
+ if (ret == 2)
+ jesd204b_write(st, st->addr, val);
+
+ return count;
+}
+
+static ssize_t jesd204b_reg_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", jesd204b_read(st, st->addr));
+}
+
+static DEVICE_ATTR(reg_access, 0600, jesd204b_reg_read,
+ jesd204b_reg_write);
+
+static ssize_t jesd204b_syncreg_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", jesd204b_read(st,
+ XLNX_JESD204_REG_SYNC_STATUS));
+}
+
+static DEVICE_ATTR(sync_status, 0400, jesd204b_syncreg_read, NULL);
+
+static unsigned int long jesd204b_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate;
+}
+
+static int jesd204b_clk_enable(struct clk_hw *hw)
+{
+ to_clk_priv(hw)->enabled = true;
+
+ return 0;
+}
+
+static void jesd204b_clk_disable(struct clk_hw *hw)
+{
+ to_clk_priv(hw)->enabled = false;
+}
+
+static int jesd204b_clk_is_enabled(struct clk_hw *hw)
+{
+ return to_clk_priv(hw)->enabled;
+}
+
+static const struct clk_ops clkout_ops = {
+ .recalc_rate = jesd204b_clk_recalc_rate,
+ .enable = jesd204b_clk_enable,
+ .disable = jesd204b_clk_disable,
+ .is_enabled = jesd204b_clk_is_enabled,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id jesd204b_of_match[] = {
+ { .compatible = "xlnx,jesd204-5.1",},
+ { .compatible = "xlnx,jesd204-5.2",},
+ { .compatible = "xlnx,jesd204-6.1",},
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, jesd204b_of_match);
+
+static int jesd204b_probe(struct platform_device *pdev)
+{
+ struct jesd204b_state *st;
+ struct resource *mem; /* IO mem resources */
+ struct clk *clk;
+ struct child_clk *clk_priv;
+ struct clk_init_data init;
+ unsigned int val;
+ int ret;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return -EPROBE_DEFER;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ st->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(st->regs)) {
+ dev_err(&pdev->dev, "Failed ioremap\n");
+ return PTR_ERR(st->regs);
+ }
+
+ st->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, st);
+
+ st->clk = clk;
+ clk_set_rate(st->clk, 156250000);
+ st->rate = clk_get_rate(clk);
+
+ of_property_read_u32(pdev->dev.of_node, "xlnx,node-is-transmit",
+ &st->transmit);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,lanes",
+ &st->lanes);
+ if (ret)
+ st->lanes = jesd204b_read(st, XLNX_JESD204_REG_LANES) + 1;
+
+ jesd204b_write(st, XLNX_JESD204_REG_RESET, XLNX_JESD204_RESET);
+ while (!jesd204b_read(st, XLNX_JESD204_REG_RESET))
+ msleep(20);
+
+ jesd204b_write(st, XLNX_JESD204_REG_ILA_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,lanesync-enable") ? XLNX_JESD204_ILA_EN : 0));
+
+ jesd204b_write(st, XLNX_JESD204_REG_SCR_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,scramble-enable") ? XLNX_JESD204_SCR_EN : 0));
+
+ jesd204b_write(st, XLNX_JESD204_REG_SYSREF_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,sysref-always-enable") ?
+ XLNX_JESD204_ALWAYS_SYSREF_EN : 0));
+
+ device_create_file(&pdev->dev, &dev_attr_reg_access);
+
+ device_create_file(&pdev->dev, &dev_attr_sync_status);
+ switch (st->lanes) {
+ case 8:
+ device_create_file(&pdev->dev, &dev_attr_lane4_info);
+ device_create_file(&pdev->dev, &dev_attr_lane5_info);
+ device_create_file(&pdev->dev, &dev_attr_lane6_info);
+ device_create_file(&pdev->dev, &dev_attr_lane7_info);
+ if (!st->transmit) {
+ device_create_file(&pdev->dev,
+ &dev_attr_lane4_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane5_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane6_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane7_syncstat);
+ }
+ /* fall through */
+ case 4:
+ device_create_file(&pdev->dev, &dev_attr_lane2_info);
+ device_create_file(&pdev->dev, &dev_attr_lane3_info);
+ if (!st->transmit) {
+ device_create_file(&pdev->dev,
+ &dev_attr_lane2_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane3_syncstat);
+ }
+ /* fall through */
+ case 2:
+ device_create_file(&pdev->dev, &dev_attr_lane1_info);
+ if (!st->transmit)
+ device_create_file(&pdev->dev,
+ &dev_attr_lane1_syncstat);
+ /* fall through */
+ case 1:
+ device_create_file(&pdev->dev, &dev_attr_lane0_info);
+ if (!st->transmit)
+ device_create_file(&pdev->dev,
+ &dev_attr_lane0_syncstat);
+ break;
+ default:
+
+ break;
+ }
+
+ clk_priv = devm_kzalloc(&pdev->dev, sizeof(*clk_priv), GFP_KERNEL);
+ if (!clk_priv)
+ return -ENOMEM;
+
+ /* struct child_clk assignments */
+ clk_priv->hw.init = &init;
+ clk_priv->rate = st->rate;
+ clk_priv->st = st;
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ val = jesd204b_read(st, XLNX_JESD204_REG_VERSION);
+
+ dev_info(&pdev->dev,
+ "AXI-JESD204B %d.%d Rev %d, at 0x%08llX mapped to 0x%p",
+ XLNX_JESD204_VERSION_MAJOR(val),
+ XLNX_JESD204_VERSION_MINOR(val),
+ XLNX_JESD204_VERSION_REV(val),
+ (unsigned long long)mem->start, st->regs);
+
+ return 0;
+}
+
+static int jesd204b_remove(struct platform_device *pdev)
+{
+ struct jesd204b_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->clk);
+ clk_put(st->clk);
+
+ return 0;
+}
+
+static struct platform_driver jesd204b_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = jesd204b_of_match,
+ },
+ .probe = jesd204b_probe,
+ .remove = jesd204b_remove,
+};
+
+module_platform_driver(jesd204b_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AXI-JESD204B Interface Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/jesd204b/xilinx_jesd204b.h b/drivers/misc/jesd204b/xilinx_jesd204b.h
new file mode 100644
index 000000000000..b9946a723a23
--- /dev/null
+++ b/drivers/misc/jesd204b/xilinx_jesd204b.h
@@ -0,0 +1,135 @@
+/*
+ * Xilinx AXI-JESD204B v5.1 Interface Module
+ *
+ * Copyright 2014 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * http://wiki.analog.com/resources/fpga/xilinx/
+ */
+
+#ifndef XILINX_JESD204B_H_
+#define XILINX_JESD204B_H_
+
+struct jesd204b_state {
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *phy;
+ struct clk *clk;
+ u32 lanes;
+ u32 vers_id;
+ u32 addr;
+ u32 band;
+ u32 transmit;
+ u32 pll;
+ unsigned long rate;
+};
+
+#define XLNX_JESD204_REG_VERSION 0x000
+#define XLNX_JESD204_VERSION_MAJOR(x) (((x) >> 24) & 0xFF)
+#define XLNX_JESD204_VERSION_MINOR(x) (((x) >> 16) & 0xFF)
+#define XLNX_JESD204_VERSION_REV(x) (((x) >> 8) & 0xFF)
+
+#define XLNX_JESD204_REG_RESET 0x004
+#define XLNX_JESD204_RESET (1 << 0)
+
+#define XLNX_JESD204_REG_ILA_CTRL 0x008
+#define XLNX_JESD204_ILA_EN (1 << 0)
+
+#define XLNX_JESD204_REG_SCR_CTRL 0x00C
+#define XLNX_JESD204_SCR_EN (1 << 0)
+
+#define XLNX_JESD204_REG_SYSREF_CTRL 0x010
+#define XLNX_JESD204_ALWAYS_SYSREF_EN (1 << 0)
+
+#define XLNX_JESD204_REG_ILA_MFC 0x014
+#define XLNX_JESD204_ILA_MFC(x) (((x) - 1) & 0xFF)
+ /* TX only 4..256 */
+
+#define XLNX_JESD204_REG_TEST_MODE_SEL 0x018
+#define XLNX_JESD204_TEST_MODE_OFF 0 /* Normal operation */
+#define XLNX_JESD204_TEST_MODE_K28_5 1 /* Send/Receive /K28.5/
+ * indefinitely
+ */
+#define XLNX_JESD204_TEST_MODE_ILA 2 /* Synchronize as normal then
+ * send/receive repeated ILA
+ * sequences
+ */
+#define XLNX_JESD204_TEST_MODE_D21_5 3 /* Send/Receive /D21.5/
+ * indefinitely
+ */
+#define XLNX_JESD204_TEST_MODE_RPAT 5 /* Send/Receive modified
+ * random pattern (RPAT)
+ */
+#define XLNX_JESD204_TEST_MODE_JSPAT 7 /* Send/Receive a scrambled
+ * jitter pattern (JSPAT)
+ */
+
+#define XLNX_JESD204_REG_SYNC_STATUS 0x038 /* Link SYNC status */
+#define XLNX_JESD204_REG_SYNC_ERR_STAT 0x01C /* RX only */
+#define XLNX_JESD204_SYNC_ERR_NOT_IN_TAB(lane) (1 << (0 + (lane) * 3))
+#define XLNX_JESD204_SYNC_ERR_DISPARITY(lane) (1 << (1 + (lane) * 3))
+#define XLNX_JESD204_SYNC_ERR_UNEXPECTED_K(lane) (1 << (2 + (lane) * 3))
+
+#define XLNX_JESD204_REG_OCTETS_PER_FRAME 0x020
+#define XLNX_JESD204_OCTETS_PER_FRAME(x) (((x) - 1) & 0xFF) /* 1..256 */
+
+#define XLNX_JESD204_REG_FRAMES_PER_MFRAME 0x024
+#define XLNX_JESD204_FRAMES_PER_MFRAME(x) (((x) - 1) & 0x1F) /* 1..32 */
+
+#define XLNX_JESD204_REG_LANES 0x028
+#define XLNX_JESD204_LANES(x) (((x) - 1) & 0x1F) /* 1..32 */
+
+#define XLNX_JESD204_REG_SUBCLASS 0x02C
+
+#define XLNX_JESD204_REG_RX_BUF_DELAY 0x030 /* RX only */
+#define XLNX_JESD204_RX_BUF_DELAY(x) ((x) & 0x1FFF)
+
+#define XLNX_JESD204_REG_RX_LINK_CTRL 0x034 /* RX only */
+#define XLNX_JESD204_LINK_TEST_EN (1 << 0)
+#define XLNX_JESD204_SYNC_ERR_REP_DIS (1 << 8)
+
+/* Per LANE Registers */
+#define XLNX_JESD204_REG_LANE_VERSION(l) (0x800 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_SUBCLASS(x) (((x) >> 0) & 0x7)
+#define XLNX_JESD204_LANE_JESDV(x) (((x) >> 8) & 0x7)
+
+#define XLNX_JESD204_REG_LANE_F(l) (0x804 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_F(x) ((((x) >> 0) & 0xFF) + 1)
+
+#define XLNX_JESD204_REG_LANE_K(l) (0x808 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_K(x) ((((x) >> 0) & 0x1F) + 1)
+
+#define XLNX_JESD204_REG_ID_L(l) (0x80C + ((l) * 0x40))
+#define XLNX_JESD204_LANE_DID(x) (((x) >> 0) & 0xFF)
+#define XLNX_JESD204_LANE_BID(x) (((x) >> 8) & 0x1F)
+#define XLNX_JESD204_LANE_LID(x) (((x) >> 16) & 0x1F)
+#define XLNX_JESD204_LANE_L(x) ((((x) >> 24) & 0x1F) + 1)
+
+#define XLNX_JESD204_REG_M_N_ND_CS(l) (0x810 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_M(x) ((((x) >> 0) & 0xFF) + 1)
+#define XLNX_JESD204_LANE_N(x) ((((x) >> 8) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_ND(x) ((((x) >> 16) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_CS(x) (((x) >> 24) & 0x3)
+
+#define XLNX_JESD204_REG_SCR_S_HD_CF(l) (0x814 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_SCR(x) (((x) >> 0) & 0x1)
+#define XLNX_JESD204_LANE_S(x) ((((x) >> 8) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_HD(x) (((x) >> 16) & 0x1)
+#define XLNX_JESD204_LANE_CF(x) (((x) >> 24) & 0x1F)
+
+#define XLNX_JESD204_REG_FCHK(l) (0x818 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_FCHK(x) (((x) >> 16) & 0xFF)
+
+#define XLNX_JESD204_REG_SC2_ADJ_CTRL(l) (0x81C + ((l) * 0x40))
+#define XLNX_JESD204_LANE_ADJ_CNT(x) (((x) >> 0) & 0xF)
+#define XLNX_JESD204_LANE_PHASE_ADJ_REQ(x) (((x) >> 8) & 0x1)
+#define XLNX_JESD204_LANE_ADJ_CNT_DIR(x) (((x) >> 16) & 0x1)
+
+#define XLNX_JESD204_REG_TM_ERR_CNT(l) (0x820 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_LINK_ERR_CNT(l) (0x824 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_ILA_CNT(l) (0x828 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_MFC_CNT(l) (0x82C + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_BUF_ADJ(l) (0x830 + ((l) * 0x40))
+
+#endif /* ADI_JESD204B_V51_H_ */
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 0a2b99e1af45..27262e701f30 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -178,7 +178,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr),
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
- dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
+ dev_err(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret);
return ret;
}
@@ -190,7 +190,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
* Should be at least one version block,
* error out if nothing found
*/
- dev_err(&cldev->dev, "Could not read FW version\n");
+ dev_err(&cldev->dev, "Could not read FW version ret = %d\n", bytes_recv);
return -EIO;
}
@@ -339,7 +339,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd),
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
- dev_err(bus->dev, "Could not send IF version cmd\n");
+ dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret);
return ret;
}
@@ -354,7 +354,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = 0;
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0);
if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
- dev_err(bus->dev, "Could not read IF version\n");
+ dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
ret = -EIO;
goto err;
}
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index c742ab02ae18..524ded87964d 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -259,6 +259,8 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
if (IS_ERR(ev_ctx))
return PTR_ERR(ev_ctx);
rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx);
+ if (rc)
+ eventfd_ctx_put(ev_ctx);
break;
case OCXL_IOCTL_GET_METADATA:
@@ -541,8 +543,11 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
goto err_put;
rc = device_register(&info->dev);
- if (rc)
- goto err_put;
+ if (rc) {
+ free_minor(info);
+ put_device(&info->dev);
+ return rc;
+ }
rc = ocxl_sysfs_register_afu(info);
if (rc)
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 1154f0435b0a..478d6118550e 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -590,6 +590,10 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
struct pci_dev *pdev = test->pdev;
mutex_lock(&test->mutex);
+
+ reinit_completion(&test->irq_raised);
+ test->last_irq = -ENODATA;
+
switch (cmd) {
case PCITEST_BAR:
bar = arg;
@@ -774,6 +778,9 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
if (id < 0)
return;
+ pci_endpoint_test_release_irq(test);
+ pci_endpoint_test_free_irq_vectors(test);
+
misc_deregister(&test->miscdev);
kfree(misc_device->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
@@ -782,9 +789,6 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_iounmap(pdev, test->bar[bar]);
}
- pci_endpoint_test_release_irq(test);
- pci_endpoint_test_free_irq_vectors(test);
-
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 4b713a80b572..7f26a78bb403 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -648,6 +648,7 @@ int gru_handle_user_call_os(unsigned long cb)
if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
return -EINVAL;
+again:
gts = gru_find_lock_gts(cb);
if (!gts)
return -EINVAL;
@@ -656,7 +657,11 @@ int gru_handle_user_call_os(unsigned long cb)
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
goto exit;
- gru_check_context_placement(gts);
+ if (gru_check_context_placement(gts)) {
+ gru_unlock_gts(gts);
+ gru_unload_context(gts, 1);
+ goto again;
+ }
/*
* CCH may contain stale data if ts_force_cch_reload is set.
@@ -874,7 +879,11 @@ int gru_set_context_option(unsigned long arg)
} else {
gts->ts_user_blade_id = req.val1;
gts->ts_user_chiplet_id = req.val0;
- gru_check_context_placement(gts);
+ if (gru_check_context_placement(gts)) {
+ gru_unlock_gts(gts);
+ gru_unload_context(gts, 1);
+ return ret;
+ }
}
break;
case sco_gseg_owner:
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 40ac59dd018c..e2325e3d077e 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -716,9 +716,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru,
* chiplet. Misassignment can occur if the process migrates to a different
* blade or if the user changes the selected blade/chiplet.
*/
-void gru_check_context_placement(struct gru_thread_state *gts)
+int gru_check_context_placement(struct gru_thread_state *gts)
{
struct gru_state *gru;
+ int ret = 0;
/*
* If the current task is the context owner, verify that the
@@ -726,15 +727,23 @@ void gru_check_context_placement(struct gru_thread_state *gts)
* references. Pthread apps use non-owner references to the CBRs.
*/
gru = gts->ts_gru;
+ /*
+ * If gru or gts->ts_tgid_owner isn't initialized properly, return
+ * success to indicate that the caller does not need to unload the
+ * gru context.The caller is responsible for their inspection and
+ * reinitialization if needed.
+ */
if (!gru || gts->ts_tgid_owner != current->tgid)
- return;
+ return ret;
if (!gru_check_chiplet_assignment(gru, gts)) {
STAT(check_context_unload);
- gru_unload_context(gts, 1);
+ ret = -EINVAL;
} else if (gru_retarget_intr(gts)) {
STAT(check_context_retarget_intr);
}
+
+ return ret;
}
@@ -934,7 +943,12 @@ again:
mutex_lock(&gts->ts_ctxlock);
preempt_disable();
- gru_check_context_placement(gts);
+ if (gru_check_context_placement(gts)) {
+ preempt_enable();
+ mutex_unlock(&gts->ts_ctxlock);
+ gru_unload_context(gts, 1);
+ return VM_FAULT_NOPAGE;
+ }
if (!gts->ts_gru) {
STAT(load_user_context);
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index a7e44b2eb413..6cebec4dd316 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -637,7 +637,7 @@ extern int gru_user_flush_tlb(unsigned long arg);
extern int gru_user_unload_context(unsigned long arg);
extern int gru_get_exception_detail(unsigned long arg);
extern int gru_set_context_option(unsigned long address);
-extern void gru_check_context_placement(struct gru_thread_state *gts);
+extern int gru_check_context_placement(struct gru_thread_state *gts);
extern int gru_cpu_fault_map_id(void);
extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
extern void gru_flush_all_tlb(struct gru_state *gru);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 7d9e23aa0b92..c19460e7f0f1 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -15,6 +15,7 @@
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
+#include <linux/netdevice.h>
extern void st_kim_recv(void *, const unsigned char *, long);
void st_int_recv(void *, const unsigned char *, long);
@@ -423,7 +424,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
case ST_LL_AWAKE_TO_ASLEEP:
pr_err("ST LL is illegal state(%ld),"
"purging received skb.", st_ll_getstate(st_gdata));
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
break;
case ST_LL_ASLEEP:
skb_queue_tail(&st_gdata->tx_waitq, skb);
@@ -432,7 +433,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
default:
pr_err("ST LL is illegal state(%ld),"
"purging received skb.", st_ll_getstate(st_gdata));
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
break;
}
@@ -486,7 +487,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
spin_unlock_irqrestore(&st_data->lock, flags);
break;
}
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
spin_unlock_irqrestore(&st_data->lock, flags);
}
/* if wake-up is set in another context- restart sending */
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index e6b40aa8fb42..8f0ffb46bf15 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -190,7 +190,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
spin_unlock_irqrestore(&fm->lock, flags);
}
if (sock)
- tifm_free_device(&sock->dev);
+ put_device(&sock->dev);
}
spin_lock_irqsave(&fm->lock, flags);
}
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 833e2bd248a5..8ff8d649d9b3 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -160,10 +160,16 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
- struct vmci_ctx *context = vmci_host_dev->context;
+ struct vmci_ctx *context;
__poll_t mask = 0;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ /*
+ * Read context only if ct_type == VMCIOBJ_CONTEXT to make
+ * sure that context is initialized
+ */
+ context = vmci_host_dev->context;
+
/* Check for VMCI calls to this VM context. */
if (wait)
poll_wait(filp, &context->host_context.wait_queue,
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index a49782dd903c..d4d388f021cc 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -852,6 +852,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
u32 context_id = vmci_get_context_id();
struct vmci_event_qp ev;
+ memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
@@ -1465,6 +1466,7 @@ static int qp_notify_peer(bool attach,
* kernel.
*/
+ memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
diff --git a/drivers/misc/xilinx_flex_pm.c b/drivers/misc/xilinx_flex_pm.c
new file mode 100644
index 000000000000..87ef2a6116ea
--- /dev/null
+++ b/drivers/misc/xilinx_flex_pm.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Flex Noc Performance Monitor driver.
+ * Copyright (c) 2019 Xilinx Inc.
+ */
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define to_xflex_dev_info(n) ((struct xflex_dev_info *)dev_get_drvdata(n))
+
+#define FPM_LAR_OFFSET 0xFB0
+#define FPM_UNLOCK 0xC5ACCE55
+
+#define FPM_RD_REQ_OFFSET 0x1000
+#define FPM_RD_RES_OFFSET 0x2000
+#define FPM_WR_REQ_OFFSET 0x3000
+#define FPM_WR_RES_OFFSET 0x4000
+
+#define FPM_PORT_SEL_OFFSET 0x134
+#define FPM_MAIN_CTRL_OFFSET 0x008
+#define FPM_SRC_SEL_OFFSET 0x138
+#define FPM_STATPERIOD 0x24
+#define FPM_CFGCTRL 0x0C
+#define FPM_LPD 0x4210002
+#define FPM_FPD 0x420c003
+
+#define FPM_VAL 0x300
+#define FPM_SRC 0x200
+#define FPM_WRRSP_L 0x70000
+#define FPM_WRREQ_L 0x60000
+#define FPM_RDRSP_L 0x50000
+#define FPM_RDREQ_L 0x40000
+#define FPM_PROBE_SHIFT 16
+#define FPM_COUNTER_OFFSET 0x14
+#define FPM_GLOBALEN BIT(0)
+#define FPM_STATEN BIT(3)
+#define FPM_STATCOND_DUMP BIT(5)
+#define FPM_NUM_COUNTERS 4
+#define FPM_MAINCTL_DIS 0
+
+#define FPM_SRC_OFF 0x0
+#define FPM_SRC_CYCLE 0x1
+#define FPM_SRC_IDLE 0x2
+#define FPM_SRC_XFER 0x3
+#define FPM_SRC_BUSY 0x4
+#define FPM_SRC_WAIT 0x5
+#define FPM_SRC_PACKET 0x6
+
+/* Port values */
+#define FPM_PORT_LPD_AFIFS_AXI 0x0
+#define FPM_PORT_LPD_OCM 0x1
+#define FPM_PORT_LPD_OCMEXT 0x2
+#define FPM_PORT_PMC_RPU_AXI0 0x3
+
+#define FPM_PORT_FPDAXI 0x1
+#define FPM_PORT_PROTXPPU 0x2
+
+/**
+ * struct xflex_dev_info - Global Driver structure
+ * @dev: Device structure
+ * @baselpd: Iomapped LPD base address
+ * @basefpd: Iomapped FPD base address
+ * @funnel: Iomapped funnel register base address
+ * @counterid_lpd: LPD counter id
+ * @counterid_fpd: FPD counter id
+ */
+struct xflex_dev_info {
+ struct device *dev;
+ void __iomem *baselpd;
+ void __iomem *basefpd;
+ void __iomem *funnel;
+ u32 counterid_fpd;
+ u32 counterid_lpd;
+};
+
+/**
+ * enum xflex_sysfs_cmd_codes - sysfs command codes
+ * @XFLEX_GET_COUNTER_FPD: get the FPD counter value
+ * @XFLEX_SET_COUNTER_FPD: set the FPD counter value
+ * @XFLEX_GET_COUNTER_FPD_RDREQ: get the FPD read request count
+ * @XFLEX_GET_COUNTER_FPD_RDRSP: get the FPD read response count
+ * @XFLEX_GET_COUNTER_FPD_WRREQ: get the FPD write request count
+ * @XFLEX_GET_COUNTER_FPD_WRRSP: get the FPD write response count
+ * @XFLEX_GET_COUNTER_LPD_RDREQ: get the LPD read request count
+ * @XFLEX_GET_COUNTER_LPD_RDRSP: get the LPD read response count
+ * @XFLEX_GET_COUNTER_LPD_WRREQ: get the LPD write request count
+ * @XFLEX_GET_COUNTER_LPD_WRRSP: get the LPD write response count
+ * @XFLEX_SET_COUNTER_LPD: set the LPD counter value
+ * @XFLEX_SET_SRC_COUNTER_LPD: set the LPD source
+ * @XFLEX_SET_SRC_COUNTER_FPD: set the FPD source
+ * @XFLEX_SET_PORT_COUNTER_LPD: set the LPD port
+ * @XFLEX_SET_PORT_COUNTER_FPD: set the FPD port
+ */
+enum xflex_sysfs_cmd_codes {
+ XFLEX_GET_COUNTER_FPD = 0,
+ XFLEX_SET_COUNTER_FPD,
+ XFLEX_GET_COUNTER_FPD_RDREQ,
+ XFLEX_GET_COUNTER_FPD_RDRSP,
+ XFLEX_GET_COUNTER_FPD_WRREQ,
+ XFLEX_GET_COUNTER_FPD_WRRSP,
+ XFLEX_GET_COUNTER_LPD_RDREQ,
+ XFLEX_GET_COUNTER_LPD_RDRSP,
+ XFLEX_GET_COUNTER_LPD_WRREQ,
+ XFLEX_GET_COUNTER_LPD_WRRSP,
+ XFLEX_SET_COUNTER_LPD,
+ XFLEX_SET_SRC_COUNTER_LPD,
+ XFLEX_SET_SRC_COUNTER_FPD,
+ XFLEX_SET_PORT_COUNTER_LPD,
+ XFLEX_SET_PORT_COUNTER_FPD,
+};
+
+static inline void fpm_reg(void __iomem *base, u32 val, u32 offset)
+{
+ writel(val, base + FPM_RD_REQ_OFFSET + offset);
+ writel(val, base + FPM_RD_RES_OFFSET + offset);
+ writel(val, base + FPM_WR_REQ_OFFSET + offset);
+ writel(val, base + FPM_WR_RES_OFFSET + offset);
+}
+
+static void reset_default(struct device *dev, u32 counter, u32 domain)
+{
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+ void __iomem *base = flexpm->basefpd;
+ u32 offset;
+
+ if (domain == FPM_LPD)
+ base = flexpm->baselpd;
+
+ fpm_reg(base, FPM_MAINCTL_DIS, FPM_MAIN_CTRL_OFFSET);
+ fpm_reg(base, FPM_STATEN | FPM_STATCOND_DUMP, FPM_MAIN_CTRL_OFFSET);
+ fpm_reg(base, FPM_STATEN | FPM_STATCOND_DUMP, FPM_MAIN_CTRL_OFFSET);
+
+ offset = FPM_PORT_SEL_OFFSET + counter * FPM_COUNTER_OFFSET;
+ fpm_reg(base, FPM_PORT_LPD_OCM, offset);
+ offset = FPM_SRC_SEL_OFFSET + counter * FPM_COUNTER_OFFSET;
+ fpm_reg(base, FPM_SRC_PACKET, offset);
+
+ fpm_reg(base, 0, FPM_STATPERIOD);
+ fpm_reg(base, FPM_GLOBALEN, FPM_CFGCTRL);
+}
+
+/**
+ * xflex_sysfs_cmd - Implements sysfs operations
+ * @dev: Device structure
+ * @buf: Value to write
+ * @cmd: sysfs cmd
+ *
+ * Return: value read from the sysfs cmd on success and negative error code
+ * otherwise.
+ */
+static int xflex_sysfs_cmd(struct device *dev, const char *buf,
+ enum xflex_sysfs_cmd_codes cmd)
+{
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+ u32 domain, src, offset, reg, val, counter;
+ int ret;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ u32 rdval = 0;
+ u32 pm_api_ret[4] = {0, 0, 0, 0};
+
+ if (IS_ERR_OR_NULL(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ switch (cmd) {
+ case XFLEX_GET_COUNTER_LPD_WRRSP:
+ reg = flexpm->counterid_lpd | FPM_WRRSP_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_LPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ return ret;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_LPD_WRREQ:
+ reg = flexpm->counterid_lpd | FPM_WRREQ_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_LPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ return ret;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_LPD_RDRSP:
+ reg = flexpm->counterid_lpd | FPM_RDRSP_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_LPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ return ret;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_LPD_RDREQ:
+ reg = flexpm->counterid_lpd | FPM_RDREQ_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_LPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ return ret;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_SET_COUNTER_LPD:
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ flexpm->counterid_lpd = val;
+ reset_default(dev, val, FPM_LPD);
+ break;
+
+ case XFLEX_SET_PORT_COUNTER_FPD:
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ counter = flexpm->counterid_fpd * FPM_COUNTER_OFFSET;
+ offset = FPM_PORT_SEL_OFFSET + counter * FPM_COUNTER_OFFSET;
+ fpm_reg(flexpm->basefpd, val, offset);
+ break;
+
+ case XFLEX_SET_PORT_COUNTER_LPD:
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ counter = flexpm->counterid_lpd * FPM_COUNTER_OFFSET;
+ offset = FPM_PORT_SEL_OFFSET + counter * FPM_COUNTER_OFFSET;
+ fpm_reg(flexpm->baselpd, val, offset);
+ break;
+
+ case XFLEX_SET_SRC_COUNTER_LPD:
+ reg = flexpm->counterid_lpd;
+ domain = FPM_LPD;
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ for (src = 0; src < FPM_NUM_COUNTERS; src++) {
+ reg = reg | FPM_SRC | (src << FPM_PROBE_SHIFT);
+ ret = eemi_ops->ioctl(domain, IOCTL_PROBE_COUNTER_WRITE,
+ reg, val, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Counter write error %d\n", ret);
+ return ret;
+ }
+ }
+ break;
+
+ case XFLEX_SET_SRC_COUNTER_FPD:
+ reg = flexpm->counterid_fpd;
+ domain = FPM_FPD;
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ for (src = 0; src < FPM_NUM_COUNTERS; src++) {
+ reg = reg | FPM_SRC | (src << FPM_PROBE_SHIFT);
+ ret = eemi_ops->ioctl(domain, IOCTL_PROBE_COUNTER_WRITE,
+ reg, val, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Counter write error %d\n", ret);
+ return ret;
+ }
+ }
+ break;
+
+ case XFLEX_SET_COUNTER_FPD:
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ flexpm->counterid_fpd = val;
+ reset_default(dev, val, FPM_FPD);
+ break;
+
+ case XFLEX_GET_COUNTER_FPD_WRRSP:
+ reg = flexpm->counterid_fpd | FPM_WRRSP_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_FPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ return ret;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_FPD_WRREQ:
+ reg = flexpm->counterid_fpd | FPM_WRREQ_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_FPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ return ret;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_FPD_RDRSP:
+ reg = flexpm->counterid_fpd | FPM_RDRSP_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_FPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ return ret;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_FPD_RDREQ:
+ reg = flexpm->counterid_fpd | FPM_RDREQ_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_FPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ return ret;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ default:
+ dev_err(dev, "Invalid option\n");
+ break;
+ }
+
+ return rdval;
+}
+
+/* Sysfs functions */
+
+static ssize_t counterfpd_wrreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_FPD_WRREQ);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterfpd_wrreq);
+
+static ssize_t counterfpd_wrrsp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_FPD_WRRSP);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterfpd_wrrsp);
+
+static ssize_t counterfpd_rdreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_FPD_RDREQ);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterfpd_rdreq);
+
+static ssize_t counterfpd_rdrsp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_FPD_RDRSP);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterfpd_rdrsp);
+
+static ssize_t counterlpd_wrreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_LPD_WRREQ);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterlpd_wrreq);
+
+static ssize_t counterlpd_wrrsp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_LPD_WRRSP);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterlpd_wrrsp);
+
+static ssize_t counterlpd_rdreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_LPD_RDREQ);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterlpd_rdreq);
+
+static ssize_t counterlpd_rdrsp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_LPD_RDRSP);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterlpd_rdrsp);
+
+static ssize_t counterlpdsrc_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xflex_sysfs_cmd(dev, buf, XFLEX_SET_SRC_COUNTER_LPD);
+
+ return size;
+}
+static DEVICE_ATTR_WO(counterlpdsrc);
+
+static ssize_t counterfpdsrc_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xflex_sysfs_cmd(dev, buf, XFLEX_SET_SRC_COUNTER_FPD);
+
+ return size;
+}
+static DEVICE_ATTR_WO(counterfpdsrc);
+
+static ssize_t counterlpdport_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xflex_sysfs_cmd(dev, buf, XFLEX_SET_PORT_COUNTER_LPD);
+
+ return size;
+}
+static DEVICE_ATTR_WO(counterlpdport);
+
+static ssize_t counterfpdport_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xflex_sysfs_cmd(dev, buf, XFLEX_SET_PORT_COUNTER_FPD);
+
+ return size;
+}
+static DEVICE_ATTR_WO(counterfpdport);
+
+static ssize_t counteridlpd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%08d\n", flexpm->counterid_lpd);
+}
+
+static ssize_t counteridlpd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+
+ ret = kstrtou32(buf, 0, &flexpm->counterid_lpd);
+ if (ret < 0)
+ return ret;
+
+ reset_default(dev, flexpm->counterid_lpd, FPM_LPD);
+
+ return size;
+}
+static DEVICE_ATTR_RW(counteridlpd);
+
+static ssize_t counteridfpd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%08d\n", flexpm->counterid_fpd);
+}
+
+static ssize_t counteridfpd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+
+ ret = kstrtou32(buf, 0, &flexpm->counterid_fpd);
+ if (ret < 0)
+ return ret;
+
+ return size;
+}
+static DEVICE_ATTR_RW(counteridfpd);
+
+static struct attribute *xflex_attrs[] = {
+ &dev_attr_counterlpdsrc.attr,
+ &dev_attr_counterlpdport.attr,
+ &dev_attr_counterfpdsrc.attr,
+ &dev_attr_counterfpdport.attr,
+
+ &dev_attr_counterlpd_rdreq.attr,
+ &dev_attr_counterlpd_wrreq.attr,
+ &dev_attr_counterlpd_rdrsp.attr,
+ &dev_attr_counterlpd_wrrsp.attr,
+
+ &dev_attr_counterfpd_rdreq.attr,
+ &dev_attr_counterfpd_wrreq.attr,
+ &dev_attr_counterfpd_rdrsp.attr,
+ &dev_attr_counterfpd_wrrsp.attr,
+
+ &dev_attr_counteridlpd.attr,
+ &dev_attr_counteridfpd.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(xflex);
+
+/**
+ * xflex_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This is the driver probe routine. It does all the memory
+ * allocation and creates sysfs entires for the device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xflex_probe(struct platform_device *pdev)
+{
+ struct xflex_dev_info *flexpm;
+ struct resource *res;
+ int err;
+ struct device *dev = &pdev->dev;
+
+ flexpm = devm_kzalloc(dev, sizeof(*flexpm), GFP_KERNEL);
+ if (!flexpm)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "baselpd");
+ flexpm->baselpd = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flexpm->baselpd))
+ return PTR_ERR(flexpm->baselpd);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "basefpd");
+ flexpm->basefpd = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flexpm->basefpd))
+ return PTR_ERR(flexpm->basefpd);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "funnel");
+ flexpm->funnel = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flexpm->funnel))
+ return PTR_ERR(flexpm->funnel);
+
+ writel(FPM_UNLOCK, flexpm->funnel + FPM_LAR_OFFSET);
+ writel(FPM_UNLOCK, flexpm->baselpd + FPM_LAR_OFFSET);
+
+ /* Create sysfs file entries for the device */
+ err = sysfs_create_groups(&dev->kobj, xflex_groups);
+ if (err < 0) {
+ dev_err(dev, "unable to create sysfs entries\n");
+ return err;
+ }
+
+ dev_set_drvdata(dev, flexpm);
+
+ return 0;
+}
+
+/**
+ * xflex_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function frees all the resources allocated to the device.
+ *
+ * Return: 0 always
+ */
+static int xflex_remove(struct platform_device *pdev)
+{
+ sysfs_remove_groups(&pdev->dev.kobj, xflex_groups);
+ return 0;
+}
+
+static const struct of_device_id xflex_of_match[] = {
+ { .compatible = "xlnx,flexnoc-pm-2.7", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xflex_of_match);
+
+static struct platform_driver xflex_driver = {
+ .driver = {
+ .name = "xilinx-flex",
+ .of_match_table = xflex_of_match,
+ },
+ .probe = xflex_probe,
+ .remove = xflex_remove,
+};
+
+module_platform_driver(xflex_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Flexnoc performance monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/xilinx_trafgen.c b/drivers/misc/xilinx_trafgen.c
new file mode 100644
index 000000000000..ba101e032573
--- /dev/null
+++ b/drivers/misc/xilinx_trafgen.c
@@ -0,0 +1,1648 @@
+/*
+ * Xilinx AXI Traffic Generator
+ *
+ * Copyright (C) 2013 - 2014 Xilinx, Inc.
+ *
+ * Description:
+ * This driver is developed for AXI Traffic Generator IP, which is
+ * designed to generate AXI4 traffic which can be used to stress
+ * different modules/interconnect connected in the system. Different
+ * configurable options which are provided through sysfs entries
+ * allow the user to generate a wide variety of traffic based on
+ * their requirements.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Hw specific definitions */
+
+/* Internal RAM Offsets */
+#define XTG_PARAM_RAM_OFFSET 0x1000 /* Parameter RAM offset */
+#define XTG_COMMAND_RAM_OFFSET 0x8000 /* Command RAM offset */
+#define XTG_COMMAND_RAM_MSB_OFFSET 0xa000 /**< Command RAM MSB Offset */
+#define XTG_MASTER_RAM_INIT_OFFSET 0x10000 /* Master RAM initial offset(v1.0) */
+#define XTG_MASTER_RAM_OFFSET 0xc000 /* Master RAM offset */
+#define XTG_WRITE_COMMAND_RAM_OFFSET 0x9000 /* Write Command RAM offset */
+
+/* Register Offsets */
+#define XTG_MCNTL_OFFSET 0x00 /* Master control */
+#define XTG_SCNTL_OFFSET 0x04 /* Slave control */
+#define XTG_ERR_STS_OFFSET 0x08 /* Error status */
+#define XTG_ERR_EN_OFFSET 0x0C /* Error enable */
+#define XTG_MSTERR_INTR_OFFSET 0x10 /* Master error interrupt enable */
+#define XTG_CFG_STS_OFFSET 0x14 /* Config status */
+#define XTG_STREAM_CNTL_OFFSET 0x30 /* Streaming Control */
+#define XTG_STREAM_CFG_OFFSET 0x34 /* Streaming Control */
+#define XTG_STREAM_TL_OFFSET 0x38 /* Streaming Transfer Length */
+#define XTG_STREAM_TKTS1_OFFSET 0x40 /* Streaming tkeep tstrb set1*/
+#define XTG_STREAM_TKTS2_OFFSET 0x44 /* Streaming tkeep tstrb set2*/
+#define XTG_STREAM_TKTS3_OFFSET 0x48 /* Streaming tkeep tstrb set3*/
+#define XTG_STREAM_TKTS4_OFFSET 0x4C /* Streaming tkeep tstrb set4*/
+#define XTG_STATIC_CNTL_OFFSET 0x60 /* Static Control */
+#define XTG_STATIC_LEN_OFFSET 0x64 /* Static Length */
+
+/* Register Bitmasks/shifts */
+
+/* Master logic enable */
+#define XTG_MCNTL_MSTEN_MASK 0x00100000
+/* Loop enable */
+#define XTG_MCNTL_LOOPEN_MASK 0x00080000
+/* Slave error interrupt enable */
+#define XTG_SCNTL_ERREN_MASK 0x00008000
+/* Master complete interrupt enable */
+#define XTG_ERR_EN_MSTIRQEN_MASK 0x80000000
+/* Master error interrupt enable */
+#define XTG_MSTERR_INTR_MINTREN_MASK 0x00008000
+/* Master complete done status */
+#define XTG_ERR_STS_MSTDONE_MASK 0x80000000
+/* Error mask for error status/enable registers */
+#define XTG_ERR_ALL_ERRS_MASK 0x801F0003
+/* Core Revision shift */
+#define XTG_MCNTL_REV_SHIFT 24
+
+/* Axi Traffic Generator Command RAM Entry field mask/shifts */
+
+/* Command RAM entry masks */
+#define XTG_LEN_MASK 0xFF /* Driven to a*_len line */
+#define XTG_LOCK_MASK 0x1 /* Driven to a*_lock line */
+#define XTG_BURST_MASK 0x3 /* Driven to a*_burst line */
+#define XTG_SIZE_MASK 0x7 /* Driven to a*_size line */
+#define XTG_ID_MASK 0x1F /* Driven to a*_id line */
+#define XTG_PROT_MASK 0x7 /* Driven to a*_prot line */
+#define XTG_LAST_ADDR_MASK 0x7 /* Last address */
+#define XTG_VALID_CMD_MASK 0x1 /* Valid Command */
+#define XTG_MSTRAM_INDEX_MASK 0x1FFF /* Master RAM Index */
+#define XTG_OTHER_DEPEND_MASK 0x1FF /* Other depend Command no */
+#define XTG_MY_DEPEND_MASK 0x1FF /* My depend command no */
+#define XTG_QOS_MASK 0xF /* Driven to a*_qos line */
+#define XTG_USER_MASK 0xFF /* Driven to a*_user line */
+#define XTG_CACHE_MASK 0xF /* Driven to a*_cache line */
+#define XTG_EXPECTED_RESP_MASK 0x7 /* Expected response */
+
+/* Command RAM entry shift values */
+#define XTG_LEN_SHIFT 0 /* Driven to a*_len line */
+#define XTG_LOCK_SHIFT 8 /* Driven to a*_lock line */
+#define XTG_BURST_SHIFT 10 /* Driven to a*_burst line */
+#define XTG_SIZE_SHIFT 12 /* Driven to a*_size line */
+#define XTG_ID_SHIFT 15 /* Driven to a*_id line */
+#define XTG_PROT_SHIFT 21 /* Driven to a*_prot line */
+#define XTG_LAST_ADDR_SHIFT 28 /* Last address */
+#define XTG_VALID_CMD_SHIFT 31 /* Valid Command */
+#define XTG_MSTRAM_INDEX_SHIFT 0 /* Master RAM Index */
+#define XTG_OTHER_DEPEND_SHIFT 13 /* Other depend cmd num */
+#define XTG_MY_DEPEND_SHIFT 22 /* My depend cmd num */
+#define XTG_QOS_SHIFT 16 /* Driven to a*_qos line */
+#define XTG_USER_SHIFT 5 /* Driven to a*_user line */
+#define XTG_CACHE_SHIFT 4 /* Driven to a*_cache line */
+#define XTG_EXPECTED_RESP_SHIFT 0 /* Expected response */
+
+/* Axi Traffic Generator Parameter RAM Entry field mask/shifts */
+
+/* Parameter RAM Entry field shift values */
+#define XTG_PARAM_ADDRMODE_SHIFT 24 /* Address mode */
+#define XTG_PARAM_INTERVALMODE_SHIFT 26 /* Interval mode */
+#define XTG_PARAM_IDMODE_SHIFT 28 /* Id mode */
+#define XTG_PARAM_OP_SHIFT 29 /* Opcode */
+
+/* PARAM RAM Opcode shift values */
+#define XTG_PARAM_COUNT_SHIFT 0 /* Repeat/Delay count */
+#define XTG_PARAM_DELAYRANGE_SHIFT 0 /* Delay range */
+#define XTG_PARAM_DELAY_SHIFT 8 /* FIXED RPT delay count */
+#define XTG_PARAM_ADDRRANGE_SHIFT 20 /* Address range */
+
+/* Parameter RAM Entry field mask values */
+#define XTG_PARAM_ADDRMODE_MASK 0x3 /* Address mode */
+#define XTG_PARAM_INTERVALMODE_MASK 0x3 /* Interval mode */
+#define XTG_PARAM_IDMODE_MASK 0x1 /* Id mode */
+#define XTG_PARAM_OP_MASK 0x7 /* Opcode */
+
+/* PARAM RAM Opcode mask values */
+#define XTG_PARAM_COUNT_MASK 0xFFFFFF/* Repeat/Delay count */
+#define XTG_PARAM_DELAYRANGE_MASK 0xFF /* Delay range */
+#define XTG_PARAM_DELAY_MASK 0xFFF /* FIXED RPT delay count */
+#define XTG_PARAM_ADDRRANGE_MASK 0xF /* Address range */
+
+/* PARAM RAM Opcode values */
+#define XTG_PARAM_OP_NOP 0x0 /* NOP mode */
+#define XTG_PARAM_OP_RPT 0x1 /* Repeat mode */
+#define XTG_PARAM_OP_DELAY 0x2 /* Delay mode */
+#define XTG_PARAM_OP_FIXEDRPT 0x3 /* Fixed repeat delay */
+
+/* Axi Traffic Generator Static Mode masks */
+#define XTG_STATIC_CNTL_TD_MASK 0x00000002 /* Transfer Done Mask */
+#define XTG_STATIC_CNTL_STEN_MASK 0x00000001 /* Static Enable Mask */
+#define XTG_STATIC_CNTL_RESET_MASK 0x00000000 /* Static Reset Mask */
+
+/* Axi Traffic Generator Stream Mode mask/shifts */
+#define XTG_STREAM_CNTL_STEN_MASK 0x00000001 /* Stream Enable Mask */
+#define XTG_STREAM_TL_TCNT_MASK 0xFFFF0000 /* Transfer Count Mask */
+#define XTG_STREAM_TL_TLEN_MASK 0x0000FFFF /* Transfer Length Mask */
+#define XTG_STREAM_TL_TCNT_SHIFT 16 /* Transfer Count Shift */
+
+/* Driver Specific Definitions */
+
+#define MAX_NUM_ENTRIES 256 /* Number of command entries per region */
+
+#define VALID_SIG 0xa5a5a5a5 /* Valid unique identifier */
+
+/* Internal RAM Sizes */
+#define XTG_PRM_RAM_BLOCK_SIZE 0x400 /* PRAM Block size (1KB) */
+#define XTG_CMD_RAM_BLOCK_SIZE 0x1000 /* CRAM Block size (4KB) */
+#define XTG_EXTCMD_RAM_BLOCK_SIZE 0x400 /**< Extended CMDRAM Block Size (1KB) */
+#define XTG_PARAM_RAM_SIZE 0x800 /* Parameter RAM (2KB) */
+#define XTG_COMMAND_RAM_SIZE 0x2000 /* Command RAM (8KB) */
+#define XTG_EXTCMD_RAM_SIZE 0x800 /* Command RAM (2KB) */
+#define XTG_MASTER_RAM_SIZE 0x2000 /* Master RAM (8KB) */
+
+/* RAM Access Flags */
+#define XTG_READ_RAM 0x0 /* Read RAM flag */
+#define XTG_WRITE_RAM 0x1 /* Write RAM flag */
+#define XTG_WRITE_RAM_ZERO 0x2 /* Write Zero flag */
+
+/* Bytes per entry */
+#define XTG_CRAM_BYTES_PER_ENTRY 16 /* CRAM bytes per entry */
+#define XTG_PRAM_BYTES_PER_ENTRY 4 /* PRAM bytes per entry */
+
+/* Interrupt Definitions */
+#define XTG_MASTER_CMP_INTR 0x1 /* Master complete intr flag */
+#define XTG_MASTER_ERR_INTR 0x2 /* Master error intr flag */
+#define XTG_SLAVE_ERR_INTR 0x4 /* Slave error intr flag */
+
+/*
+ * Version value of the trafgen core.
+ * For the initial IP release the version(v1.0) value is 0x47
+ * From the v2.0 IP and onwards the value starts from 0x20.
+ * For eg:
+ * v2.1 -> 0x21
+ * v2.2 -> 0x22 ... so on.
+ *
+ */
+#define XTG_INIT_VERSION 0x47 /* Trafgen initial version(v1.0) */
+
+/* Macro */
+#define to_xtg_dev_info(n) ((struct xtg_dev_info *)dev_get_drvdata(n))
+
+#define CMD_WDS 0x4 /* No of words in command ram per command */
+#define EXT_WDS 0x1 /* No of words in extended ram per command */
+#define MSB_INDEX 0x4
+/**
+ * struct xtg_cram - Command RAM structure
+ * @addr: Address Driven to a*_addr line
+ * @valid_cmd: Valid Command
+ * @last_addr: Last address
+ * @prot: Driven to a*_prot line
+ * @id: Driven to a*_id line
+ * @size: Driven to a*_size line
+ * @burst: Driven to a*_burst line
+ * @lock: Driven to a*_lock line
+ * @length: Driven to a*_len line
+ * @my_dpnd: My Depend command number
+ * @other_dpnd: Other depend command number
+ * @mram_idx: Master RAM index
+ * @qos: Driven to a*_qos line
+ * @user: Driven to a*_user line
+ * @cache: Driven to a*_cache line
+ * @expected_resp: Expected response
+ * @index: Command Index
+ * @is_write_block: Write/Read block
+ * @is_valid_req: Unique signature
+ *
+ * FIXME: This structure is shared with the user application and
+ * hence need to be synchronized. We know these kind of structures
+ * should not be defined in the driver and this need to be fixed
+ * if found a proper placeholder (in uapi/).
+ */
+struct xtg_cram {
+ phys_addr_t addr;
+ u32 valid_cmd;
+ u32 last_addr;
+ u32 prot;
+ u32 id;
+ u32 size;
+ u32 burst;
+ u32 lock;
+ u32 length;
+ u32 my_dpnd;
+ u32 other_dpnd;
+ u32 mram_idx;
+ u32 qos;
+ u32 user;
+ u32 cache;
+ u32 expected_resp;
+ u16 index;
+ bool is_write_block;
+ u32 is_valid_req;
+};
+
+/**
+ * struct xtg_pram - Parameter RAM structure
+ * @op_cntl0: Control field 0
+ * @op_cntl1: Control field 1
+ * @op_cntl2: Control field 2
+ * @addr_mode: Address mode
+ * @interval_mode: Interval mode
+ * @id_mode: Id mode
+ * @opcode: Opcode
+ * @index: Command Index
+ * @is_write_block: Write/Read block
+ * @is_valid_req: Unique signature
+ *
+ * FIXME: This structure is shared with the user application and
+ * hence need to be synchronized. We know these kind of structures
+ * should not be defined in the driver and this need to be fixed
+ * if found a proper placeholder (in uapi/).
+ */
+struct xtg_pram {
+ u32 op_cntl0;
+ u32 op_cntl1;
+ u32 op_cntl2;
+ u32 addr_mode;
+ u32 interval_mode;
+ u32 id_mode;
+ u32 opcode;
+ u16 index;
+ bool is_write_block;
+ u32 is_valid_req;
+};
+
+/**
+ * struct xtg_dev_info - Global Driver structure
+ * @regs: Iomapped base address
+ * @dev: Device structure
+ * @phys_base_addr: Physical base address
+ * @last_rd_valid_idx: Last Read Valid Command Index
+ * @last_wr_valid_idx: Last Write Valid Command Index
+ * @id: Device instance id
+ * @xtg_mram_offset: MasterRam offset
+ * @clk: Input clock
+ */
+struct xtg_dev_info {
+ void __iomem *regs;
+ struct device *dev;
+ phys_addr_t phys_base_addr;
+ s16 last_rd_valid_idx;
+ s16 last_wr_valid_idx;
+ u32 id;
+ u32 xtg_mram_offset;
+ struct clk *clk;
+};
+
+/**
+ * enum xtg_sysfs_ioctl - Ioctl opcodes
+ * @XTG_GET_MASTER_CMP_STS: get master complete status
+ * @XTG_GET_SLV_CTRL_REG: get slave control reg status
+ * @XTG_GET_ERR_STS: get error status
+ * @XTG_GET_CFG_STS: get config status
+ * @XTG_GET_LAST_VALID_INDEX: get last valid index
+ * @XTG_GET_DEVICE_ID: get device id
+ * @XTG_GET_RESOURCE: get resource
+ * @XTG_GET_STATIC_ENABLE: get staic mode traffic genration state
+ * @XTG_GET_STATIC_BURSTLEN: get static mode burst length
+ * @XTG_GET_STATIC_TRANSFERDONE: get static transfer done
+ * @XTG_GET_STREAM_ENABLE : get strean mode traffic genration state
+ * @XTG_GET_STREAM_TRANSFERLEN: get streaming mode transfer length
+ * @XTG_GET_STREAM_TRANSFERCNT: get streaming mode transfer count
+ * @XTG_GET_MASTER_LOOP_EN: get master loop enable status
+ * @XTG_GET_STREAM_TKTS1: get stream tstrb and tkeep set1 values
+ * @XTG_GET_STREAM_TKTS2: get stream tstrb and tkeep set2 values
+ * @XTG_GET_STREAM_TKTS3: get stream tstrb and tkeep set3 values
+ * @XTG_GET_STREAM_TKTS4: get stream tstrb and tkeep set4 values
+ * @XTG_GET_STREAM_CFG: get stream configuration values
+ * @XTG_START_MASTER_LOGIC: start master logic
+ * @XTG_SET_SLV_CTRL_REG: set slave control
+ * @XTG_CLEAR_ERRORS: clear errors
+ * @XTG_ENABLE_ERRORS: enable errors
+ * @XTG_ENABLE_INTRS: enable interrupts
+ * @XTG_CLEAR_MRAM: clear master ram
+ * @XTG_CLEAR_CRAM: clear command ram
+ * @XTG_CLEAR_PRAM: clear parameter ram
+ * @XTG_SET_STATIC_ENABLE: enable static mode traffic genration
+ * @XTG_SET_STATIC_DISABLE: disable static mode traffic genration
+ * @XTG_SET_STATIC_BURSTLEN: set static mode burst length
+ * @XTG_SET_STATIC_TRANSFERDONE: set static transfer done
+ * @XTG_SET_STREAM_ENABLE: enable streaming mode traffic genration
+ * @XTG_SET_STREAM_DISABLE: disable streaming mode traffic genration
+ * @XTG_SET_STREAM_TRANSFERLEN: set streaming mode transfer length
+ * @XTG_SET_STREAM_TRANSFERCNT: set streaming mode transfer count
+ * @XTG_SET_STREAM_TKTS1: set stream tstrb and tkeep set1 values
+ * @XTG_SET_STREAM_TKTS2: set stream tstrb and tkeep set2 values
+ * @XTG_SET_STREAM_TKTS3: set stream tstrb and tkeep set3 values
+ * @XTG_SET_STREAM_TKTS4: set stream tstrb and tkeep set4 values
+ * @XTG_SET_STREAM_CFG: set stream configuration values
+ * @XTG_MASTER_LOOP_EN: enable master loop
+ */
+enum xtg_sysfs_ioctl_opcode {
+ XTG_GET_MASTER_CMP_STS,
+ XTG_GET_SLV_CTRL_REG,
+ XTG_GET_ERR_STS,
+ XTG_GET_CFG_STS,
+ XTG_GET_LAST_VALID_INDEX,
+ XTG_GET_DEVICE_ID,
+ XTG_GET_RESOURCE,
+ XTG_GET_STATIC_ENABLE,
+ XTG_GET_STATIC_BURSTLEN,
+ XTG_GET_STATIC_TRANSFERDONE,
+ XTG_GET_STREAM_ENABLE,
+ XTG_GET_STREAM_TRANSFERLEN,
+ XTG_GET_MASTER_LOOP_EN,
+ XTG_GET_STREAM_TKTS1,
+ XTG_GET_STREAM_TKTS2,
+ XTG_GET_STREAM_TKTS3,
+ XTG_GET_STREAM_TKTS4,
+ XTG_GET_STREAM_CFG,
+ XTG_GET_STREAM_TRANSFERCNT,
+ XTG_START_MASTER_LOGIC,
+ XTG_SET_SLV_CTRL_REG,
+ XTG_CLEAR_ERRORS,
+ XTG_ENABLE_ERRORS,
+ XTG_ENABLE_INTRS,
+ XTG_CLEAR_MRAM,
+ XTG_CLEAR_CRAM,
+ XTG_CLEAR_PRAM,
+ XTG_SET_STATIC_ENABLE,
+ XTG_SET_STATIC_DISABLE,
+ XTG_SET_STATIC_BURSTLEN,
+ XTG_SET_STATIC_TRANSFERDONE,
+ XTG_SET_STREAM_ENABLE,
+ XTG_SET_STREAM_DISABLE,
+ XTG_SET_STREAM_TRANSFERLEN,
+ XTG_SET_STREAM_TRANSFERCNT,
+ XTG_SET_STREAM_TKTS1,
+ XTG_SET_STREAM_TKTS2,
+ XTG_SET_STREAM_TKTS3,
+ XTG_SET_STREAM_TKTS4,
+ XTG_SET_STREAM_CFG,
+ XTG_MASTER_LOOP_EN
+};
+
+/**
+ * xtg_access_rams - Write/Read Master/Command/Parameter RAM
+ * @tg: Pointer to xtg_dev_info structure
+ * @where: Offset from base
+ * @count: Number of bytes to write/read
+ * @flags: Read/Write/Write Zero
+ * @data: Data pointer
+ */
+static void xtg_access_rams(struct xtg_dev_info *tg, int where,
+ int count, int flags, u32 *data)
+{
+ u32 index;
+
+ switch (flags) {
+ case XTG_WRITE_RAM_ZERO:
+ memset_io(tg->regs + where, 0, count);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ writel(0x0, tg->regs + where +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ case XTG_WRITE_RAM:
+ for (index = 0; count > 0; index++, count -= 4)
+ writel(data[index], tg->regs + where + index * 4);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ /*
+ * This additional logic is required only for command ram.
+ * when writing to READ Command RAM write higher address to READ addr
+ * RAM
+ */
+ if ((where >= XTG_COMMAND_RAM_OFFSET) &&
+ (where < XTG_WRITE_COMMAND_RAM_OFFSET))
+ writel(data[MSB_INDEX], tg->regs + XTG_COMMAND_RAM_OFFSET +
+ (where - XTG_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET));
+ /*
+ * Writing to WRITE Command RAM write higher address to WRITE addr RAM
+ */
+ if ((where >= XTG_WRITE_COMMAND_RAM_OFFSET) &&
+ (where < XTG_COMMAND_RAM_MSB_OFFSET))
+ writel(data[MSB_INDEX], tg->regs +
+ XTG_WRITE_COMMAND_RAM_OFFSET +
+ (where - XTG_WRITE_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ case XTG_READ_RAM:
+ for (index = 0; count > 0; index++, count -= 4)
+ data[index] = readl(tg->regs + where + index * 4);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if ((where >= XTG_COMMAND_RAM_OFFSET) &&
+ (where < XTG_WRITE_COMMAND_RAM_OFFSET))
+ data[MSB_INDEX] = readl(tg->regs + XTG_COMMAND_RAM_OFFSET +
+ (where - XTG_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET));
+
+ if ((where >= XTG_WRITE_COMMAND_RAM_OFFSET) &&
+ (where < XTG_COMMAND_RAM_MSB_OFFSET))
+ data[MSB_INDEX] = readl(tg->regs +
+ XTG_WRITE_COMMAND_RAM_OFFSET +
+ (where - XTG_WRITE_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ }
+}
+
+/**
+ * xtg_prepare_cmd_words - Prepares all four Command RAM words
+ * @tg: Pointer to xtg_dev_info structure
+ * @cmdp: Pointer to xtg_cram structure
+ * @cmd_words: Pointer to Command Words that needs to be prepared
+ */
+static void xtg_prepare_cmd_words(struct xtg_dev_info *tg,
+ const struct xtg_cram *cmdp, u32 *cmd_words)
+{
+ /* Command Word 0 */
+ cmd_words[0] = lower_32_bits(cmdp->addr);
+
+ /* Command Word 4 */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ cmd_words[MSB_INDEX] = upper_32_bits(cmdp->addr);
+#endif
+
+ /* Command Word 1 */
+ cmd_words[1] = 0;
+ cmd_words[1] |= (cmdp->length & XTG_LEN_MASK) << XTG_LEN_SHIFT;
+ cmd_words[1] |= (cmdp->lock & XTG_LOCK_MASK) << XTG_LOCK_SHIFT;
+ cmd_words[1] |= (cmdp->burst & XTG_BURST_MASK) << XTG_BURST_SHIFT;
+ cmd_words[1] |= (cmdp->size & XTG_SIZE_MASK) << XTG_SIZE_SHIFT;
+ cmd_words[1] |= (cmdp->id & XTG_ID_MASK) << XTG_ID_SHIFT;
+ cmd_words[1] |= (cmdp->prot & XTG_PROT_MASK) << XTG_PROT_SHIFT;
+ cmd_words[1] |= (cmdp->last_addr & XTG_LAST_ADDR_MASK) <<
+ XTG_LAST_ADDR_SHIFT;
+ cmd_words[1] |= (cmdp->valid_cmd & XTG_VALID_CMD_MASK) <<
+ XTG_VALID_CMD_SHIFT;
+
+ /* Command Word 2 */
+ cmd_words[2] = 0;
+ cmd_words[2] |= (cmdp->mram_idx & XTG_MSTRAM_INDEX_MASK) <<
+ XTG_MSTRAM_INDEX_SHIFT;
+ cmd_words[2] |= (cmdp->other_dpnd & XTG_OTHER_DEPEND_MASK) <<
+ XTG_OTHER_DEPEND_SHIFT;
+ cmd_words[2] |= (cmdp->my_dpnd & XTG_MY_DEPEND_MASK) <<
+ XTG_MY_DEPEND_SHIFT;
+
+ /* Command Word 3 */
+ cmd_words[3] = 0;
+ cmd_words[3] |= (cmdp->qos & XTG_QOS_MASK) << XTG_QOS_SHIFT;
+ cmd_words[3] |= (cmdp->user & XTG_USER_MASK) << XTG_USER_SHIFT;
+ cmd_words[3] |= (cmdp->cache & XTG_CACHE_MASK) << XTG_CACHE_SHIFT;
+ cmd_words[3] |= (cmdp->expected_resp & XTG_EXPECTED_RESP_MASK) <<
+ XTG_EXPECTED_RESP_SHIFT;
+}
+
+/**
+ * xtg_prepare_param_words - Prepares Parameter RAM word
+ * @tg: Pointer to xtg_dev_info structure
+ * @cmdp: Pointer to xtg_pram structure
+ * @param_word: Pointer to Param Word that needs to be prepared
+ */
+static void xtg_prepare_param_word(struct xtg_dev_info *tg,
+ const struct xtg_pram *cmdp, u32 *param_word)
+{
+ *param_word = 0;
+ *param_word |= (cmdp->opcode & XTG_PARAM_OP_MASK) << XTG_PARAM_OP_SHIFT;
+ *param_word |= (cmdp->addr_mode & XTG_PARAM_ADDRMODE_MASK) <<
+ XTG_PARAM_ADDRMODE_SHIFT;
+ *param_word |= (cmdp->id_mode & XTG_PARAM_IDMODE_MASK) <<
+ XTG_PARAM_IDMODE_SHIFT;
+ *param_word |= (cmdp->interval_mode & XTG_PARAM_INTERVALMODE_MASK) <<
+ XTG_PARAM_INTERVALMODE_SHIFT;
+
+ switch (cmdp->opcode) {
+ case XTG_PARAM_OP_RPT:
+ case XTG_PARAM_OP_DELAY:
+ *param_word |= (cmdp->op_cntl0 & XTG_PARAM_COUNT_MASK) <<
+ XTG_PARAM_COUNT_SHIFT;
+ break;
+
+ case XTG_PARAM_OP_FIXEDRPT:
+ *param_word |= (cmdp->op_cntl0 & XTG_PARAM_ADDRRANGE_MASK) <<
+ XTG_PARAM_ADDRRANGE_SHIFT;
+ *param_word |= (cmdp->op_cntl1 & XTG_PARAM_DELAY_MASK) <<
+ XTG_PARAM_DELAY_SHIFT;
+ *param_word |= (cmdp->op_cntl2 & XTG_PARAM_DELAYRANGE_MASK) <<
+ XTG_PARAM_DELAYRANGE_SHIFT;
+ break;
+
+ case XTG_PARAM_OP_NOP:
+ *param_word = 0;
+ break;
+ }
+}
+
+/**
+ * xtg_sysfs_ioctl - Implements sysfs operations
+ * @dev: Device structure
+ * @buf: Value to write
+ * @opcode: Ioctl opcode
+ *
+ * Return: value read from the sysfs opcode.
+ */
+static ssize_t xtg_sysfs_ioctl(struct device *dev, const char *buf,
+ enum xtg_sysfs_ioctl_opcode opcode)
+{
+ struct xtg_dev_info *tg = to_xtg_dev_info(dev);
+ unsigned long wrval;
+ ssize_t status, rdval = 0;
+
+ if (opcode > XTG_GET_STREAM_TRANSFERCNT) {
+ status = kstrtoul(buf, 0, &wrval);
+ if (status < 0)
+ return status;
+ }
+
+ switch (opcode) {
+ case XTG_GET_MASTER_CMP_STS:
+ rdval = (readl(tg->regs + XTG_MCNTL_OFFSET) &
+ XTG_MCNTL_MSTEN_MASK) ? 1 : 0;
+ break;
+
+ case XTG_GET_MASTER_LOOP_EN:
+ rdval = (readl(tg->regs + XTG_MCNTL_OFFSET) &
+ XTG_MCNTL_LOOPEN_MASK) ? 1 : 0;
+ break;
+
+ case XTG_GET_SLV_CTRL_REG:
+ rdval = readl(tg->regs + XTG_SCNTL_OFFSET);
+ break;
+
+ case XTG_GET_ERR_STS:
+ rdval = readl(tg->regs + XTG_ERR_STS_OFFSET) &
+ XTG_ERR_ALL_ERRS_MASK;
+ break;
+
+ case XTG_GET_CFG_STS:
+ rdval = readl(tg->regs + XTG_CFG_STS_OFFSET);
+ break;
+
+ case XTG_GET_LAST_VALID_INDEX:
+ rdval = (((tg->last_wr_valid_idx << 16) & 0xffff0000) |
+ (tg->last_rd_valid_idx & 0xffff));
+ break;
+
+ case XTG_GET_DEVICE_ID:
+ rdval = tg->id;
+ break;
+
+ case XTG_GET_RESOURCE:
+ rdval = (unsigned long)tg->regs;
+ break;
+
+ case XTG_GET_STATIC_ENABLE:
+ rdval = readl(tg->regs + XTG_STATIC_CNTL_OFFSET);
+ break;
+
+ case XTG_GET_STATIC_BURSTLEN:
+ rdval = readl(tg->regs + XTG_STATIC_LEN_OFFSET);
+ break;
+
+ case XTG_GET_STATIC_TRANSFERDONE:
+ rdval = (readl(tg->regs + XTG_STATIC_CNTL_OFFSET) &
+ XTG_STATIC_CNTL_TD_MASK);
+ break;
+
+ case XTG_GET_STREAM_ENABLE:
+ rdval = readl(tg->regs + XTG_STREAM_CNTL_OFFSET);
+ break;
+
+ case XTG_GET_STREAM_TRANSFERLEN:
+ rdval = (readl(tg->regs + XTG_STREAM_TL_OFFSET) &
+ XTG_STREAM_TL_TLEN_MASK);
+ break;
+
+ case XTG_GET_STREAM_TRANSFERCNT:
+ rdval = ((readl(tg->regs + XTG_STREAM_TL_OFFSET) &
+ XTG_STREAM_TL_TCNT_MASK) >>
+ XTG_STREAM_TL_TCNT_SHIFT);
+ break;
+
+ case XTG_GET_STREAM_TKTS1:
+ rdval = readl(tg->regs + XTG_STREAM_TKTS1_OFFSET);
+ break;
+ case XTG_GET_STREAM_TKTS2:
+ rdval = readl(tg->regs + XTG_STREAM_TKTS2_OFFSET);
+ break;
+ case XTG_GET_STREAM_TKTS3:
+ rdval = readl(tg->regs + XTG_STREAM_TKTS3_OFFSET);
+ break;
+ case XTG_GET_STREAM_TKTS4:
+ rdval = readl(tg->regs + XTG_STREAM_TKTS4_OFFSET);
+ break;
+
+ case XTG_GET_STREAM_CFG:
+ rdval = (readl(tg->regs + XTG_STREAM_CFG_OFFSET));
+ break;
+
+ case XTG_START_MASTER_LOGIC:
+ if (wrval)
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) |
+ XTG_MCNTL_MSTEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ break;
+
+ case XTG_MASTER_LOOP_EN:
+ if (wrval)
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) |
+ XTG_MCNTL_LOOPEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ else
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) &
+ ~XTG_MCNTL_LOOPEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ break;
+
+ case XTG_SET_SLV_CTRL_REG:
+ writel(wrval, tg->regs + XTG_SCNTL_OFFSET);
+ break;
+
+ case XTG_ENABLE_ERRORS:
+ wrval &= XTG_ERR_ALL_ERRS_MASK;
+ writel(wrval, tg->regs + XTG_ERR_EN_OFFSET);
+ break;
+
+ case XTG_CLEAR_ERRORS:
+ wrval &= XTG_ERR_ALL_ERRS_MASK;
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) | wrval,
+ tg->regs + XTG_ERR_STS_OFFSET);
+ break;
+
+ case XTG_ENABLE_INTRS:
+ if (wrval & XTG_MASTER_CMP_INTR) {
+ pr_info("Enabling Master Complete Interrupt\n");
+ writel(readl(tg->regs + XTG_ERR_EN_OFFSET) |
+ XTG_ERR_EN_MSTIRQEN_MASK,
+ tg->regs + XTG_ERR_EN_OFFSET);
+ }
+ if (wrval & XTG_MASTER_ERR_INTR) {
+ pr_info("Enabling Interrupt on Master Errors\n");
+ writel(readl(tg->regs + XTG_MSTERR_INTR_OFFSET) |
+ XTG_MSTERR_INTR_MINTREN_MASK,
+ tg->regs + XTG_MSTERR_INTR_OFFSET);
+ }
+ if (wrval & XTG_SLAVE_ERR_INTR) {
+ pr_info("Enabling Interrupt on Slave Errors\n");
+ writel(readl(tg->regs + XTG_SCNTL_OFFSET) |
+ XTG_SCNTL_ERREN_MASK,
+ tg->regs + XTG_SCNTL_OFFSET);
+ }
+ break;
+
+ case XTG_CLEAR_MRAM:
+ xtg_access_rams(tg, tg->xtg_mram_offset,
+ XTG_MASTER_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_CLEAR_CRAM:
+ xtg_access_rams(tg, XTG_COMMAND_RAM_OFFSET,
+ XTG_COMMAND_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_CLEAR_PRAM:
+ xtg_access_rams(tg, XTG_PARAM_RAM_OFFSET,
+ XTG_PARAM_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_SET_STATIC_ENABLE:
+ if (wrval) {
+ wrval &= XTG_STATIC_CNTL_STEN_MASK;
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) | wrval,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ } else {
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) &
+ ~XTG_STATIC_CNTL_STEN_MASK,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ }
+ break;
+
+ case XTG_SET_STATIC_BURSTLEN:
+ writel(wrval, tg->regs + XTG_STATIC_LEN_OFFSET);
+ break;
+
+ case XTG_SET_STATIC_TRANSFERDONE:
+ wrval |= XTG_STATIC_CNTL_TD_MASK;
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) | wrval,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_ENABLE:
+ if (wrval) {
+ rdval = readl(tg->regs + XTG_STREAM_CNTL_OFFSET);
+ rdval |= XTG_STREAM_CNTL_STEN_MASK,
+ writel(rdval,
+ tg->regs + XTG_STREAM_CNTL_OFFSET);
+ } else {
+ writel(readl(tg->regs + XTG_STREAM_CNTL_OFFSET) &
+ ~XTG_STREAM_CNTL_STEN_MASK,
+ tg->regs + XTG_STREAM_CNTL_OFFSET);
+ }
+ break;
+
+ case XTG_SET_STREAM_TRANSFERLEN:
+ wrval &= XTG_STREAM_TL_TLEN_MASK;
+ rdval = readl(tg->regs + XTG_STREAM_TL_OFFSET);
+ rdval &= ~XTG_STREAM_TL_TLEN_MASK;
+ writel(rdval | wrval,
+ tg->regs + XTG_STREAM_TL_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_TRANSFERCNT:
+ wrval = ((wrval << XTG_STREAM_TL_TCNT_SHIFT) &
+ XTG_STREAM_TL_TCNT_MASK);
+ rdval = readl(tg->regs + XTG_STREAM_TL_OFFSET);
+ rdval = rdval & ~XTG_STREAM_TL_TCNT_MASK;
+ writel(rdval | wrval,
+ tg->regs + XTG_STREAM_TL_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_TKTS1:
+ writel(wrval, tg->regs + XTG_STREAM_TKTS1_OFFSET);
+ break;
+ case XTG_SET_STREAM_TKTS2:
+ writel(wrval, tg->regs + XTG_STREAM_TKTS2_OFFSET);
+ break;
+ case XTG_SET_STREAM_TKTS3:
+ writel(wrval, tg->regs + XTG_STREAM_TKTS3_OFFSET);
+ break;
+ case XTG_SET_STREAM_TKTS4:
+ writel(wrval, tg->regs + XTG_STREAM_TKTS4_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_CFG:
+ writel(wrval, tg->regs + XTG_STREAM_CFG_OFFSET);
+ break;
+
+ default:
+ break;
+ }
+
+ return rdval;
+}
+
+/* Sysfs functions */
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_DEVICE_ID);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t resource_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_RESOURCE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(resource);
+
+static ssize_t master_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_MASTER_CMP_STS);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t master_start_stop_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_START_MASTER_LOGIC);
+
+ return size;
+}
+static DEVICE_ATTR_RW(master_start_stop);
+
+static ssize_t config_slave_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_SLV_CTRL_REG);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t config_slave_status_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_SLV_CTRL_REG);
+
+ return size;
+}
+static DEVICE_ATTR_RW(config_slave_status);
+
+static ssize_t err_sts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_ERR_STS);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t err_sts_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_ERRORS);
+
+ return size;
+}
+static DEVICE_ATTR_RW(err_sts);
+
+static ssize_t err_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_ENABLE_ERRORS);
+
+ return size;
+}
+static DEVICE_ATTR_WO(err_en);
+
+static ssize_t intr_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_ENABLE_INTRS);
+
+ return size;
+}
+static DEVICE_ATTR_WO(intr_en);
+
+static ssize_t last_valid_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_LAST_VALID_INDEX);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(last_valid_index);
+
+static ssize_t config_sts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_CFG_STS);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(config_sts);
+
+static ssize_t mram_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_MRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(mram_clear);
+
+static ssize_t cram_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_CRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(cram_clear);
+
+static ssize_t pram_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_CRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(pram_clear);
+
+static ssize_t static_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_ENABLE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t static_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_ENABLE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_enable);
+
+static ssize_t static_burstlen_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_BURSTLEN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t static_burstlen_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_BURSTLEN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_burstlen);
+
+static ssize_t stream_cfg_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_CFG);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_cfg_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_CFG);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_cfg);
+
+static ssize_t stream_tkts4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TKTS4);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_tkts4_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TKTS4);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_tkts4);
+
+static ssize_t stream_tkts3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TKTS3);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_tkts3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TKTS3);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_tkts3);
+
+static ssize_t stream_tkts2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TKTS2);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_tkts2_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TKTS2);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_tkts2);
+
+static ssize_t stream_tkts1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TKTS1);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_tkts1_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TKTS1);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_tkts1);
+
+static ssize_t static_transferdone_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_TRANSFERDONE);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t static_transferdone_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_TRANSFERDONE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_transferdone);
+
+static ssize_t reset_static_transferdone_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_TRANSFERDONE);
+
+ if (rdval == XTG_STATIC_CNTL_RESET_MASK)
+ rdval = 1;
+ else
+ rdval = 0;
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+static DEVICE_ATTR_RO(reset_static_transferdone);
+
+static ssize_t stream_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_ENABLE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t stream_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_ENABLE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_enable);
+
+static ssize_t stream_transferlen_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TRANSFERLEN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_transferlen_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TRANSFERLEN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_transferlen);
+
+static ssize_t stream_transfercnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TRANSFERCNT);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_transfercnt_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TRANSFERCNT);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_transfercnt);
+
+static ssize_t loop_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_MASTER_LOOP_EN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t loop_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_MASTER_LOOP_EN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(loop_enable);
+
+static ssize_t xtg_pram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ pr_info("No read access to Parameter RAM\n");
+
+ return 0;
+}
+
+static ssize_t xtg_pram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ u32 *data = (u32 *)buf;
+
+ if (off >= XTG_PARAM_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 2K PRAM size\n");
+ return -ENOMEM;
+ }
+
+ if (count >= XTG_PARAM_RAM_SIZE)
+ count = XTG_PARAM_RAM_SIZE;
+
+ /* Program each command */
+ if (count == sizeof(struct xtg_pram)) {
+ struct xtg_pram *cmdp = (struct xtg_pram *)buf;
+ u32 param_word;
+
+ if (!cmdp)
+ return -EINVAL;
+
+ if (cmdp->is_valid_req == VALID_SIG) {
+ /* Prepare parameter word */
+ xtg_prepare_param_word(tg, cmdp, &param_word);
+
+ count = XTG_PRAM_BYTES_PER_ENTRY;
+ data = &param_word;
+
+ /* Maximum command entries are 256 */
+ if (cmdp->index > MAX_NUM_ENTRIES)
+ return -EINVAL;
+
+ /* Calculate the block index */
+ if (cmdp->is_write_block)
+ off = XTG_PRM_RAM_BLOCK_SIZE +
+ cmdp->index * count;
+ else
+ off = cmdp->index * count;
+ }
+ }
+
+ off += XTG_PARAM_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, data);
+
+ return count;
+}
+
+static int xtg_pram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ XTG_PARAM_RAM_OFFSET) >> PAGE_SHIFT,
+ XTG_PARAM_RAM_SIZE, vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_pram_attr = {
+ .attr = {
+ .name = "parameter_ram",
+ .mode = 0644,
+ },
+ .size = XTG_PARAM_RAM_SIZE,
+ .read = xtg_pram_read,
+ .write = xtg_pram_write,
+ .mmap = xtg_pram_mmap,
+};
+
+static ssize_t xtg_cram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ off += XTG_COMMAND_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_READ_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static ssize_t xtg_cram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ u32 *data = (u32 *)buf;
+
+ if (off >= XTG_COMMAND_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 8K CRAM size\n");
+ return -ENOMEM;
+ }
+
+ /* Program each command */
+ if (count == sizeof(struct xtg_cram)) {
+ struct xtg_cram *cmdp = (struct xtg_cram *)buf;
+ u32 cmd_words[CMD_WDS + EXT_WDS];
+
+ if (!cmdp)
+ return -EINVAL;
+
+ if (cmdp->is_valid_req == VALID_SIG) {
+ /* Prepare command words */
+ xtg_prepare_cmd_words(tg, cmdp, cmd_words);
+ count = XTG_CRAM_BYTES_PER_ENTRY;
+ data = cmd_words;
+
+ /* Maximum command entries are 256 */
+ if (cmdp->index > MAX_NUM_ENTRIES)
+ return -EINVAL;
+
+ /* Calculate the block index */
+ if (cmdp->is_write_block)
+ off = XTG_CMD_RAM_BLOCK_SIZE +
+ cmdp->index * count;
+ else
+ off = cmdp->index * count;
+
+ /* Store the valid command index */
+ if (cmdp->valid_cmd) {
+ if (cmdp->is_write_block)
+ tg->last_wr_valid_idx =
+ cmdp->index;
+ else
+ tg->last_rd_valid_idx =
+ cmdp->index;
+ }
+ }
+ }
+
+ off += XTG_COMMAND_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, data);
+
+ return count;
+}
+
+static int xtg_cram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ XTG_COMMAND_RAM_OFFSET) >> PAGE_SHIFT,
+ XTG_COMMAND_RAM_SIZE + XTG_EXTCMD_RAM_SIZE,
+ vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_cram_attr = {
+ .attr = {
+ .name = "command_ram",
+ .mode = 0644,
+ },
+ .size = XTG_COMMAND_RAM_SIZE,
+ .read = xtg_cram_read,
+ .write = xtg_cram_write,
+ .mmap = xtg_cram_mmap,
+};
+
+static ssize_t xtg_mram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ off += tg->xtg_mram_offset;
+ xtg_access_rams(tg, off, count, XTG_READ_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static ssize_t xtg_mram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ if (off >= XTG_MASTER_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 8K MRAM size\n");
+ return -ENOMEM;
+ }
+
+ off += tg->xtg_mram_offset;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static int xtg_mram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ tg->xtg_mram_offset) >> PAGE_SHIFT,
+ XTG_MASTER_RAM_SIZE,
+ vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_mram_attr = {
+ .attr = {
+ .name = "master_ram",
+ .mode = 0644,
+ },
+ .size = XTG_MASTER_RAM_SIZE,
+ .read = xtg_mram_read,
+ .write = xtg_mram_write,
+ .mmap = xtg_mram_mmap,
+};
+
+static struct bin_attribute *xtg_bin_attrs[] = {
+ &xtg_mram_attr,
+ &xtg_pram_attr,
+ &xtg_cram_attr,
+ NULL,
+};
+
+static const struct attribute *xtg_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_master_start_stop.attr,
+ &dev_attr_config_slave_status.attr,
+ &dev_attr_err_en.attr,
+ &dev_attr_err_sts.attr,
+ &dev_attr_intr_en.attr,
+ &dev_attr_last_valid_index.attr,
+ &dev_attr_config_sts.attr,
+ &dev_attr_mram_clear.attr,
+ &dev_attr_cram_clear.attr,
+ &dev_attr_pram_clear.attr,
+ &dev_attr_static_enable.attr,
+ &dev_attr_static_burstlen.attr,
+ &dev_attr_static_transferdone.attr,
+ &dev_attr_stream_transfercnt.attr,
+ &dev_attr_stream_transferlen.attr,
+ &dev_attr_stream_tkts1.attr,
+ &dev_attr_stream_tkts2.attr,
+ &dev_attr_stream_tkts3.attr,
+ &dev_attr_stream_tkts4.attr,
+ &dev_attr_stream_cfg.attr,
+ &dev_attr_stream_enable.attr,
+ &dev_attr_reset_static_transferdone.attr,
+ &dev_attr_loop_enable.attr,
+ NULL,
+};
+
+static const struct attribute_group xtg_attributes = {
+ .attrs = (struct attribute **)xtg_attrs,
+ .bin_attrs = xtg_bin_attrs,
+};
+/**
+ * xtg_cmp_intr_handler - Master Complete Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the xtg_dev_info structure
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t xtg_cmp_intr_handler(int irq, void *data)
+{
+ struct xtg_dev_info *tg = (struct xtg_dev_info *)data;
+
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) |
+ XTG_ERR_STS_MSTDONE_MASK,
+ tg->regs + XTG_ERR_STS_OFFSET);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xtg_err_intr_handler - Master/Slave Error Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the xtg_dev_info structure
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t xtg_err_intr_handler(int irq, void *data)
+{
+ struct xtg_dev_info *tg = (struct xtg_dev_info *)data;
+ u32 value;
+
+ value = readl(tg->regs + XTG_ERR_STS_OFFSET) &
+ XTG_ERR_ALL_ERRS_MASK;
+
+ if (value) {
+ dev_err(tg->dev, "Found errors 0x%08x\n", value);
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) | value,
+ tg->regs + XTG_ERR_STS_OFFSET);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xtg_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This is the driver probe routine. It does all the memory
+ * allocation and creates sysfs entires for the device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xtg_probe(struct platform_device *pdev)
+{
+ struct xtg_dev_info *tg;
+ struct device_node *node;
+ struct resource *res;
+ struct device *dev;
+ int err, irq, var;
+
+ tg = devm_kzalloc(&pdev->dev, sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return -ENOMEM;
+
+ tg->dev = &(pdev->dev);
+ dev = tg->dev;
+ node = pdev->dev.of_node;
+
+ /* Map the registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tg->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tg->regs))
+ return PTR_ERR(tg->regs);
+
+
+ /* Save physical base address */
+ tg->phys_base_addr = res->start;
+
+ /* Get the device instance id */
+ err = of_property_read_u32(node, "xlnx,device-id", &tg->id);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ return err;
+ }
+
+ /* Map the error interrupt, if it exists in the device tree. */
+ irq = platform_get_irq_byname(pdev, "err-out");
+ if (irq < 0) {
+ dev_dbg(&pdev->dev, "unable to get err irq");
+ } else {
+ err = devm_request_irq(&pdev->dev, irq, xtg_err_intr_handler,
+ 0, dev_name(&pdev->dev), tg);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to request irq %d", irq);
+ return err;
+ }
+ }
+
+ /* Map the completion interrupt, if it exists in the device tree. */
+ irq = platform_get_irq_byname(pdev, "irq-out");
+ if (irq < 0) {
+ dev_dbg(&pdev->dev, "unable to get cmp irq");
+ } else {
+ err = devm_request_irq(&pdev->dev, irq, xtg_cmp_intr_handler,
+ 0, dev_name(&pdev->dev), tg);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to request irq %d", irq);
+ return err;
+ }
+ }
+
+ tg->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tg->clk)) {
+ if (PTR_ERR(tg->clk) != -ENOENT) {
+ if (PTR_ERR(tg->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found\n");
+ return PTR_ERR(tg->clk);
+ }
+ tg->clk = NULL;
+ }
+
+ err = clk_prepare_enable(tg->clk);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return err;
+ }
+
+ /*
+ * Create sysfs file entries for the device
+ */
+ err = sysfs_create_group(&dev->kobj, &xtg_attributes);
+ if (err < 0) {
+ dev_err(tg->dev, "unable to create sysfs entries\n");
+ clk_disable_unprepare(tg->clk);
+ return err;
+ }
+
+ /*
+ * Initialize the write and read valid index values.
+ * Possible range of values for these variables is <0 255>.
+ */
+ tg->last_wr_valid_idx = -1;
+ tg->last_rd_valid_idx = -1;
+
+ dev_set_drvdata(&pdev->dev, tg);
+
+ /* Update the Proper MasterRam offset */
+ tg->xtg_mram_offset = XTG_MASTER_RAM_OFFSET;
+ var = readl(tg->regs + XTG_MCNTL_OFFSET) >> XTG_MCNTL_REV_SHIFT;
+ if (var == XTG_INIT_VERSION)
+ tg->xtg_mram_offset = XTG_MASTER_RAM_INIT_OFFSET;
+
+ dev_info(&pdev->dev, "Probing xilinx traffic generator success\n");
+
+ return 0;
+}
+
+/**
+ * xtg_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function frees all the resources allocated to the device.
+ *
+ * Return: 0 always
+ */
+static int xtg_remove(struct platform_device *pdev)
+{
+ struct xtg_dev_info *tg;
+ struct device *dev;
+
+ tg = dev_get_drvdata(&pdev->dev);
+ dev = tg->dev;
+ sysfs_remove_group(&dev->kobj, &xtg_attributes);
+ clk_disable_unprepare(tg->clk);
+
+ return 0;
+}
+
+static const struct of_device_id xtg_of_match[] = {
+ { .compatible = "xlnx,axi-traffic-gen", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xtg_of_match);
+
+static struct platform_driver xtg_driver = {
+ .driver = {
+ .name = "xilinx-trafgen",
+ .of_match_table = xtg_of_match,
+ },
+ .probe = xtg_probe,
+ .remove = xtg_remove,
+};
+
+module_platform_driver(xtg_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Traffic Generator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 482e01ece0b7..554dba0a06f5 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -249,6 +249,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(mq->queue, NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_put_request(req);
@@ -344,6 +345,10 @@ struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
+ unsigned int flags;
+#define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */
+#define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */
+
struct mmc_rpmb_data *rpmb;
};
@@ -489,7 +494,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
}
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
- struct mmc_blk_ioc_data *idata)
+ struct mmc_blk_ioc_data **idatas, int i)
{
struct mmc_command cmd = {}, sbc = {};
struct mmc_data data = {};
@@ -497,10 +502,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct scatterlist sg;
int err;
unsigned int target_part;
+ struct mmc_blk_ioc_data *idata = idatas[i];
+ struct mmc_blk_ioc_data *prev_idata = NULL;
if (!card || !md || !idata)
return -EINVAL;
+ if (idata->flags & MMC_BLK_IOC_DROP)
+ return 0;
+
+ if (idata->flags & MMC_BLK_IOC_SBC)
+ prev_idata = idatas[i - 1];
+
/*
* The RPMB accesses comes in from the character device, so we
* need to target these explicitly. Else we just target the
@@ -567,7 +580,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (idata->rpmb) {
+ if (idata->rpmb || prev_idata) {
sbc.opcode = MMC_SET_BLOCK_COUNT;
/*
* We don't do any blockcount validation because the max size
@@ -575,6 +588,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
* 'Reliable Write' bit here.
*/
sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
+ if (prev_idata)
+ sbc.arg = prev_idata->ic.arg;
sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
mrq.sbc = &sbc;
}
@@ -593,6 +608,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
mmc_wait_for_req(card->host, &mrq);
memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+ if (prev_idata) {
+ memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp));
+ if (sbc.error) {
+ dev_err(mmc_dev(card->host), "%s: sbc error %d\n",
+ __func__, sbc.error);
+ return sbc.error;
+ }
+ }
+
if (cmd.error) {
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
__func__, cmd.error);
@@ -688,6 +712,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idatas[0] = idata;
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -757,6 +782,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
}
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -847,9 +873,10 @@ static const struct block_device_operations mmc_bdops = {
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ if ((part_type & mask) == mask) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@@ -864,9 +891,10 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ if ((part_type & mask) == mask) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
@@ -1030,6 +1058,20 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
+static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq)
+{
+ struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data;
+ int i;
+
+ for (i = 1; i < mq_rq->ioc_count; i++) {
+ if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT &&
+ mmc_op_multi(idata[i]->ic.opcode)) {
+ idata[i - 1]->flags |= MMC_BLK_IOC_DROP;
+ idata[i]->flags |= MMC_BLK_IOC_SBC;
+ }
+ }
+}
+
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
@@ -1057,11 +1099,14 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
if (ret)
break;
}
+
+ mmc_blk_check_sbc(mq_rq);
+
fallthrough;
case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
- ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ ret = __mmc_blk_ioctl_cmd(card, md, idata, i);
if (ret)
break;
}
@@ -1460,6 +1505,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
+ } else if (mq->in_recovery) {
+ blk_mq_requeue_request(req, true);
} else {
blk_mq_end_request(req, BLK_STS_OK);
}
@@ -1966,14 +2013,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
mmc_blk_urgent_bkops(mq, mqrq);
}
-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
{
unsigned long flags;
bool put_card;
spin_lock_irqsave(&mq->lock, flags);
- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+ mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -1985,6 +2032,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
{
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct mmc_host *host = mq->card->host;
@@ -2000,7 +2048,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
else
blk_mq_complete_request(req);
- mmc_blk_mq_dec_in_flight(mq, req);
+ mmc_blk_mq_dec_in_flight(mq, issue_type);
}
void mmc_blk_mq_recovery(struct mmc_queue *mq)
@@ -2738,6 +2786,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(mq->queue, NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
@@ -2776,6 +2825,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
goto out_free;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(mq->queue, NULL, req, 0);
err = req_to_mmc_queue_req(req)->drv_op_result;
@@ -3083,4 +3133,3 @@ module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
-
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d7bda1fa0a6e..341adc1816e3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -564,22 +564,27 @@ int mmc_cqe_recovery(struct mmc_host *host)
host->cqe_ops->cqe_recovery_start(host);
memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = MMC_STOP_TRANSMISSION,
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
- cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
- mmc_wait_for_cmd(host, &cmd, 0);
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, true);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
cmd.arg = 1; /* Discard entire queue */
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
- cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
- err = mmc_wait_for_cmd(host, &cmd, 0);
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
host->cqe_ops->cqe_recovery_finish(host);
+ if (err)
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
mmc_retune_release(host);
return err;
@@ -1145,7 +1150,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1;
- ocr &= 3 << bit;
+ /*
+ * The bit variable represents the highest voltage bit set in
+ * the OCR register.
+ * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
+ * we must shift the mask '3' with (bit - 1).
+ */
+ ocr &= 3 << (bit - 1);
if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
@@ -1550,6 +1561,11 @@ void mmc_init_erase(struct mmc_card *card)
card->pref_erase = 0;
}
+static bool is_trim_arg(unsigned int arg)
+{
+ return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG;
+}
+
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
unsigned int arg, unsigned int qty)
{
@@ -1881,7 +1897,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
- if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
+ if (mmc_card_mmc(card) && is_trim_arg(arg) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
@@ -1911,7 +1927,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
* identified by the card->eg_boundary flag.
*/
rem = card->erase_size - (from % card->erase_size);
- if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
+ if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
err = mmc_do_erase(card, from, from + rem - 1, arg);
from += rem;
if ((err) || (to <= from))
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 32801639e0be..2d63c0f188e1 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -570,6 +570,7 @@ EXPORT_SYMBOL(mmc_remove_host);
*/
void mmc_free_host(struct mmc_host *host)
{
+ cancel_delayed_work_sync(&host->detect);
mmc_pwrseq_free(host);
put_device(&host->class_dev);
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index ed939bb2f700..8433af39c27e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -991,10 +991,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index d495ba2f368c..ba0731ab1467 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -450,8 +450,8 @@ int mmc_switch_status(struct mmc_card *card)
return __mmc_switch_status(card, true);
}
-static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- bool send_status, bool retry_crc_err)
+int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ bool send_status, bool retry_crc_err)
{
struct mmc_host *host = card->host;
int err;
@@ -504,6 +504,7 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
return 0;
}
+EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
/**
* __mmc_switch - modify EXT_CSD register
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 8f2f9475716d..4f36a96b9d21 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -31,6 +31,8 @@ int mmc_can_ext_csd(struct mmc_card *card);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
int mmc_switch_status(struct mmc_card *card);
int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
+int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ bool send_status, bool retry_crc_err);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
bool use_busy_signal, bool send_status, bool retry_crc_err);
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 492dd4596314..cd64e0f23ae5 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -3167,7 +3167,8 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
struct mmc_test_dbgfs_file *df;
if (card->debugfs_root)
- debugfs_create_file(name, mode, card->debugfs_root, card, fops);
+ file = debugfs_create_file(name, mode, card->debugfs_root,
+ card, fops);
df = kmalloc(sizeof(*df), GFP_KERNEL);
if (!df) {
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 3dba15bccce2..9a253324c95a 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -91,6 +91,20 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
/*
+ * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
+ * support being used to offload WRITE_ZEROES.
+ */
+ MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
+ * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
+ * support being used to offload WRITE_ZEROES.
+ */
+ MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
* On Some Kingston eMMCs, performing trim can result in
* unrecoverable data conrruption occasionally due to a firmware bug.
*/
diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
index b6febbcf8978..327032cbbb1f 100644
--- a/drivers/mmc/core/regulator.c
+++ b/drivers/mmc/core/regulator.c
@@ -258,3 +258,44 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
return 0;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
+
+/**
+ * mmc_regulator_enable_vqmmc - enable VQMMC regulator for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. Enables the regulator for vqmmc.
+ * Keeps track of the enable status for ensuring that calls to
+ * regulator_enable/disable are balanced.
+ */
+int mmc_regulator_enable_vqmmc(struct mmc_host *mmc)
+{
+ int ret = 0;
+
+ if (!IS_ERR(mmc->supply.vqmmc) && !mmc->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(mmc_dev(mmc), "enabling vqmmc regulator failed\n");
+ else
+ mmc->vqmmc_enabled = true;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_enable_vqmmc);
+
+/**
+ * mmc_regulator_disable_vqmmc - disable VQMMC regulator for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. Disables the regulator for vqmmc.
+ * Keeps track of the enable status for ensuring that calls to
+ * regulator_enable/disable are balanced.
+ */
+void mmc_regulator_disable_vqmmc(struct mmc_host *mmc)
+{
+ if (!IS_ERR(mmc->supply.vqmmc) && mmc->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ mmc->vqmmc_enabled = false;
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_disable_vqmmc);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c6d7a0adde0d..5271da625abb 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -799,7 +799,8 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
+ if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
+ rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
@@ -1028,6 +1029,23 @@ retry:
goto free_card;
/*
+ * If the card is already in 1.8V and the system doesn't have
+ * mechanism to power cycle the SD card, it will respond with no 1.8V
+ * supported in OCR response. Below check will confirm if the above
+ * condition has occurred and set the rocr flag accordingly.
+ *
+ * If the host is supporting UHS modes and the card is supporting SD
+ * specification 3.0 and above, it can operate at UHS modes.
+ */
+ if (mmc_host_uhs(host) && card->scr.sda_spec3 &&
+ card->sw_caps.sd3_bus_mode >= SD_MODE_UHS_SDR50) {
+ rocr |= SD_ROCR_S18A;
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (err)
+ goto free_card;
+ }
+
+ /*
* If the card has not been power cycled, it may still be using 1.8V
* signaling. Detect that situation and try to initialize a UHS-I (1.8V)
* transfer mode.
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 9e0791332ef3..e614fd82a32a 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1014,8 +1014,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
err = mmc_sdio_reinit_card(host);
} else if (mmc_card_wake_sdio_irq(host)) {
- /* We may have switched to 1-bit mode during suspend */
+ /*
+ * We may have switched to 1-bit mode during suspend,
+ * need to hold retuning, because tuning only supprt
+ * 4-bit mode or 8 bit mode.
+ */
+ mmc_retune_hold_now(host);
err = sdio_enable_4bit_bus(host->card);
+ mmc_retune_release(host);
}
if (err)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 3cc928282af7..69eeff7a7d16 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -266,7 +266,14 @@ static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- sdio_free_func_cis(func);
+ if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
+ sdio_free_func_cis(func);
+
+ /*
+ * We have now removed the link to the tuples in the
+ * card structure, so remove the reference.
+ */
+ put_device(&func->card->dev);
kfree(func->info);
kfree(func->tmpbuf);
@@ -298,6 +305,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
device_initialize(&func->dev);
+ /*
+ * We may link to tuples in the card structure,
+ * we need make sure we have a reference to it.
+ */
+ get_device(&func->card->dev);
+
func->dev.parent = &card->dev;
func->dev.bus = &sdio_bus_type;
func->dev.release = sdio_release_func;
@@ -351,10 +364,9 @@ int sdio_add_func(struct sdio_func *func)
*/
void sdio_remove_func(struct sdio_func *func)
{
- if (!sdio_func_present(func))
- return;
+ if (sdio_func_present(func))
+ device_del(&func->dev);
- device_del(&func->dev);
of_node_put(func->dev.of_node);
put_device(&func->dev);
}
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 9a5aaac29099..e45053284fdc 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -384,12 +384,6 @@ int sdio_read_func_cis(struct sdio_func *func)
return ret;
/*
- * Since we've linked to tuples in the card structure,
- * we must make sure we have a reference to it.
- */
- get_device(&func->card->dev);
-
- /*
* Vendor/device id is optional for function CIS, so
* copy it from the card structure as needed.
*/
@@ -414,11 +408,5 @@ void sdio_free_func_cis(struct sdio_func *func)
}
func->tuples = NULL;
-
- /*
- * We have now removed the link to the tuples in the
- * card structure, so remove the reference.
- */
- put_device(&func->card->dev);
}
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 582ec3d720f6..3370c5747c8b 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -63,11 +63,15 @@ int mmc_gpio_alloc(struct mmc_host *host)
int mmc_gpio_get_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
+ int cansleep;
if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
- return gpiod_get_value_cansleep(ctx->ro_gpio);
+ cansleep = gpiod_cansleep(ctx->ro_gpio);
+ return cansleep ?
+ gpiod_get_value_cansleep(ctx->ro_gpio) :
+ gpiod_get_value(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 1b4a40d910cf..f0684c6ed8d7 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -466,11 +466,12 @@ config MMC_ALCOR
of Alcor Micro PCI-E card reader
config MMC_AU1X
- tristate "Alchemy AU1XX0 MMC Card Interface support"
+ bool "Alchemy AU1XX0 MMC Card Interface support"
depends on MIPS_ALCHEMY
+ depends on MMC=y
help
This selects the AMD Alchemy(R) Multimedia card interface.
- If you have a Alchemy platform with a MMC slot, say Y or M here.
+ If you have a Alchemy platform with a MMC slot, say Y here.
If unsure, say N.
@@ -995,13 +996,14 @@ config MMC_SDHCI_XENON
config MMC_SDHCI_OMAP
tristate "TI SDHCI Controller Support"
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on MMC_SDHCI_PLTFM && OF
select THERMAL
imply TI_SOC_THERMAL
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- support present in TI's DRA7 SOCs. The controller supports
- SD/MMC/SDIO devices.
+ support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller
+ supports SD/MMC/SDIO devices.
If you have a controller with this interface, say Y or M here.
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index 026ca9194ce5..a8ec2e6fefa8 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1114,7 +1114,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
alcor_hw_init(host);
dev_set_drvdata(&pdev->dev, host);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto free_host;
+
return 0;
free_host:
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index c26fbe5f2222..4d8f2778d8f9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1812,7 +1812,6 @@ static void atmci_tasklet_func(unsigned long priv)
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else if (host->mrq->stop) {
- atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
@@ -1845,8 +1844,6 @@ static void atmci_tasklet_func(unsigned long priv)
* command to send.
*/
if (host->mrq->stop) {
- atmci_writel(host, ATMCI_IER,
- ATMCI_CMDRDY);
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
@@ -2217,6 +2214,7 @@ static int atmci_init_slot(struct atmel_mci *host,
{
struct mmc_host *mmc;
struct atmel_mci_slot *slot;
+ int ret;
mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
if (!mmc)
@@ -2300,11 +2298,13 @@ static int atmci_init_slot(struct atmel_mci *host,
host->slot[id] = slot;
mmc_regulator_get_supply(mmc);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ mmc_free_host(mmc);
+ return ret;
+ }
if (gpio_is_valid(slot->detect_pin)) {
- int ret;
-
timer_setup(&slot->detect_timer, atmci_detect_change, 0);
ret = request_irq(gpio_to_irq(slot->detect_pin),
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index bc8aeb47a7b4..9c4a67f4195e 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1116,8 +1116,9 @@ out5:
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
-out_clk:
+
clk_disable_unprepare(host->clk);
+out_clk:
clk_put(host->clk);
out_irq:
free_irq(host->irq, host);
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 148414d7f0c9..d20943e43312 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1408,8 +1408,8 @@ static int bcm2835_probe(struct platform_device *pdev)
host->max_clk = clk_get_rate(clk);
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
+ if (host->irq < 0) {
+ ret = host->irq;
goto err;
}
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 22aded1065ae..2245452a44c8 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -288,6 +288,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Error populating slots\n");
octeon_mmc_set_shared_power(host, 0);
+ of_node_put(cn);
goto error;
}
i++;
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index eee08d81b242..f79806e31e7e 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -138,8 +138,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
continue;
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
- if (ret)
+ if (ret) {
+ of_node_put(child_node);
goto error;
+ }
}
i++;
}
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index ec56464eaf23..10b36b156562 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -878,8 +878,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_tasks_cleared(cq_host);
if (!ret)
- pr_debug("%s: cqhci: Failed to clear tasks\n",
- mmc_hostname(mmc));
+ pr_warn("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
return ret;
}
@@ -912,7 +912,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_halted(cq_host);
if (!ret)
- pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+ pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
return ret;
}
@@ -920,10 +920,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
/*
* After halting we expect to be able to use the command line. We interpret the
* failure to halt to mean the data lines might still be in use (and the upper
- * layers will need to send a STOP command), so we set the timeout based on a
- * generous command timeout.
+ * layers will need to send a STOP command), however failing to halt complicates
+ * the recovery, so set a timeout that would reasonably allow I/O to complete.
*/
-#define CQHCI_START_HALT_TIMEOUT 5
+#define CQHCI_START_HALT_TIMEOUT 500
static void cqhci_recovery_start(struct mmc_host *mmc)
{
@@ -1011,28 +1011,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
- ok = false;
-
/*
* The specification contradicts itself, by saying that tasks cannot be
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should
* be disabled/re-enabled, but not to disable before clearing tasks.
* Have a go anyway.
*/
- if (!ok) {
- pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
- cqcfg &= ~CQHCI_ENABLE;
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- cqcfg |= CQHCI_ENABLE;
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- /* Be sure that there are no tasks */
- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
- ok = false;
- WARN_ON(!ok);
- }
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /* Disable to make sure tasks really are cleared */
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!ok)
+ cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
cqhci_recover_mrqs(cq_host);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 9044faf0050a..8930dbd13c65 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -803,7 +803,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
- cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
meson_mmc_set_response_bits(cmd, &cmd_cfg);
@@ -973,11 +972,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
if (data && !cmd->error)
data->bytes_xfered = data->blksz * data->blocks;
- if (meson_mmc_bounce_buf_read(data) ||
- meson_mmc_get_next_command(cmd))
- ret = IRQ_WAKE_THREAD;
- else
- ret = IRQ_HANDLED;
+
+ return IRQ_WAKE_THREAD;
}
out:
@@ -989,9 +985,6 @@ out:
writel(start, host->regs + SD_EMMC_START);
}
- if (ret == IRQ_HANDLED)
- meson_mmc_request_done(host->mmc, cmd->mrq);
-
return ret;
}
@@ -1289,7 +1282,9 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
mmc->ops = &meson_mmc_ops;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto err_free_irq;
return 0;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 7083d8ddd495..7874b266a444 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1420,7 +1420,7 @@ static int mmc_spi_probe(struct spi_device *spi)
status = mmc_add_host(mmc);
if (status != 0)
- goto fail_add_host;
+ goto fail_glue_init;
/*
* Index 0 is card detect
@@ -1428,7 +1428,7 @@ static int mmc_spi_probe(struct spi_device *spi)
*/
status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1, NULL);
if (status == -EPROBE_DEFER)
- goto fail_add_host;
+ goto fail_gpiod_request;
if (!status) {
/*
* The platform has a CD GPIO signal that may support
@@ -1443,7 +1443,7 @@ static int mmc_spi_probe(struct spi_device *spi)
/* Index 1 is write protect/read only */
status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
if (status == -EPROBE_DEFER)
- goto fail_add_host;
+ goto fail_gpiod_request;
if (!status)
has_ro = true;
@@ -1457,7 +1457,7 @@ static int mmc_spi_probe(struct spi_device *spi)
? ", cd polling" : "");
return 0;
-fail_add_host:
+fail_gpiod_request:
mmc_remove_host(mmc);
fail_glue_init:
if (host->dma_dev)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 7e4bc9124efd..1e5e2442b748 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2079,7 +2079,9 @@ static int mmci_probe(struct amba_device *dev,
pm_runtime_set_autosuspend_delay(&dev->dev, 50);
pm_runtime_use_autosuspend(&dev->dev);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto clk_disable;
pm_runtime_put(&dev->dev);
return 0;
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 5c81dc7371db..0c392b41a858 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -111,8 +111,8 @@
#define CLK_DIV_MASK 0x7f
/* REG_BUS_WIDTH */
-#define BUS_WIDTH_8 BIT(2)
-#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_4_SUPPORT BIT(3)
+#define BUS_WIDTH_4 BIT(2)
#define BUS_WIDTH_1 BIT(0)
#define MMC_VDD_360 23
@@ -339,13 +339,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
return;
}
for (len = 0; len < remain && len < host->fifo_width;) {
- /* SCR data must be read in big endian. */
- if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
- *sgp = ioread32be(host->base +
- REG_DATA_WINDOW);
- else
- *sgp = ioread32(host->base +
- REG_DATA_WINDOW);
+ *sgp = ioread32(host->base + REG_DATA_WINDOW);
sgp++;
len += 4;
}
@@ -527,9 +521,6 @@ static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
break;
- case MMC_BUS_WIDTH_8:
- writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
- break;
default:
writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
break;
@@ -646,16 +637,8 @@ static int moxart_probe(struct platform_device *pdev)
dmaengine_slave_config(host->dma_chan_rx, &cfg);
}
- switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
- case 1:
+ if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 2:
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- break;
- default:
- break;
- }
writel(0, host->base + REG_INTERRUPT_MASK);
@@ -671,7 +654,9 @@ static int moxart_probe(struct platform_device *pdev)
goto out;
dev_set_drvdata(dev, mmc);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out;
dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 1254a5650cff..2673890c7690 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2249,7 +2249,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
- ret = -EINVAL;
+ ret = host->irq;
goto host_free;
}
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 74a0a7fbbf7f..0dfcf7bea9ff 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -696,17 +696,15 @@ static int mvsd_probe(struct platform_device *pdev)
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mbus_dram_target_info *dram;
- struct resource *r;
int ret, irq;
if (!np) {
dev_err(&pdev->dev, "no DT node\n");
return -ENODEV;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
- return -ENXIO;
+ if (irq < 0)
+ return irq;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
if (!mmc) {
@@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = devm_ioremap_resource(&pdev->dev, r);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 011b59a3602e..9165af4760e3 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1158,7 +1158,9 @@ static int mxcmci_probe(struct platform_device *pdev)
timer_setup(&host->watchdog, mxcmci_watchdog, 0);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out_free_dma;
return 0;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index d74e73c95fdf..80574040d8fb 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1344,7 +1344,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENXIO;
+ return irq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->virt_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index d0df054b0b47..aef2253ed5c8 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1843,9 +1843,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (res == NULL || irq < 0)
+ if (!res)
return -ENXIO;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
@@ -1998,7 +2000,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto err_irq;
if (mmc_pdata(host)->name != NULL) {
ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 953e7457137a..7f96df4d2a87 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_of_init(pdev, mmc);
if (ret)
- return ret;
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_init_ocr(host);
if (ret < 0)
- return ret;
+ goto out;
mmc->caps = 0;
host->cmdat = 0;
@@ -761,7 +761,12 @@ static int pxamci_probe(struct platform_device *pdev)
dev_warn(dev, "gpio_ro and get_ro() both defined\n");
}
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ if (host->pdata && host->pdata->exit)
+ host->pdata->exit(dev, mmc);
+ goto out;
+ }
return 0;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 81d0dfe553a8..3261560bede4 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1338,6 +1338,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
#ifdef RTSX_USB_USE_LEDS_CLASS
int err;
#endif
+ int ret;
ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent));
if (!ucr)
@@ -1374,7 +1375,15 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
INIT_WORK(&host->led_work, rtsx_usb_update_led);
#endif
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+#ifdef RTSX_USB_USE_LEDS_CLASS
+ led_classdev_unregister(&host->led);
+#endif
+ mmc_free_host(mmc);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 0dc8eafdc81d..a02f3538d561 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -835,7 +835,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
- err = -EINVAL;
+ err = host->irq;
goto err_free;
}
diff --git a/drivers/mmc/host/sdhci-cqhci.h b/drivers/mmc/host/sdhci-cqhci.h
new file mode 100644
index 000000000000..cf8e7ba71bbd
--- /dev/null
+++ b/drivers/mmc/host/sdhci-cqhci.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022 The Chromium OS Authors
+ *
+ * Support that applies to the combination of SDHCI and CQHCI, while not
+ * expressing a dependency between the two modules.
+ */
+
+#ifndef __MMC_HOST_SDHCI_CQHCI_H__
+#define __MMC_HOST_SDHCI_CQHCI_H__
+
+#include "cqhci.h"
+#include "sdhci.h"
+
+static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
+ host->mmc->cqe_private)
+ cqhci_deactivate(host->mmc);
+
+ sdhci_reset(host, mask);
+}
+
+#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d97c19ef7583..762288c6d30c 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -89,6 +89,8 @@
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
#define ESDHC_TUNING_START_TAP_MASK 0x7f
+#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7)
+#define ESDHC_TUNING_STEP_DEFAULT 0x1
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@@ -151,8 +153,8 @@
#define ESDHC_FLAG_HS400 BIT(9)
/*
* The IP has errata ERR010450
- * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't
- * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
+ * uSDHC: At 1.8V due to the I/O timing limit, for SDR mode, SD card
+ * clock can't exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
*/
#define ESDHC_FLAG_ERR010450 BIT(10)
/* The IP supports HS400ES mode */
@@ -775,7 +777,8 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
| ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) {
+ if ((imx_data->socdata->flags & ESDHC_FLAG_ERR010450) &&
+ (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V))) {
unsigned int max_clock;
max_clock = imx_data->is_ddr ? 45000000 : 150000000;
@@ -1180,7 +1183,8 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
- int tmp;
+ struct cqhci_host *cq_host = host->mmc->cqe_private;
+ u32 tmp;
if (esdhc_is_usdhc(imx_data)) {
/*
@@ -1233,18 +1237,37 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
- tmp |= ESDHC_STD_TUNING_EN |
- ESDHC_TUNING_START_TAP_DEFAULT;
- if (imx_data->boarddata.tuning_start_tap) {
- tmp &= ~ESDHC_TUNING_START_TAP_MASK;
+ tmp |= ESDHC_STD_TUNING_EN;
+
+ /*
+ * ROM code or bootloader may config the start tap
+ * and step, unmask them first.
+ */
+ tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
+ if (imx_data->boarddata.tuning_start_tap)
tmp |= imx_data->boarddata.tuning_start_tap;
- }
+ else
+ tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
if (imx_data->boarddata.tuning_step) {
- tmp &= ~ESDHC_TUNING_STEP_MASK;
tmp |= imx_data->boarddata.tuning_step
<< ESDHC_TUNING_STEP_SHIFT;
+ } else {
+ tmp |= ESDHC_TUNING_STEP_DEFAULT
+ << ESDHC_TUNING_STEP_SHIFT;
}
+
+ /* Disable the CMD CRC check for tuning, if not, need to
+ * add some delay after every tuning command, because
+ * hardware standard tuning logic will directly go to next
+ * step once it detect the CMD CRC error, will not wait for
+ * the card side to finally send out the tuning data, trigger
+ * the buffer read ready interrupt immediately. If usdhc send
+ * the next tuning command some eMMC card will stuck, can't
+ * response, block the tuning procedure or the first command
+ * after the whole tuning procedure always can't get any response.
+ */
+ tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE;
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
/*
@@ -1256,6 +1279,21 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
tmp &= ~ESDHC_STD_TUNING_EN;
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
}
+
+ /*
+ * On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card
+ * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the
+ * the 1st linux configure power/clock for the 2nd Linux.
+ *
+ * When the 2nd Linux is booting into rootfs stage, we let the 1st Linux
+ * to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump.
+ * After we clear the pending interrupt and halt CQCTL, issue gone.
+ */
+ if (cq_host) {
+ tmp = cqhci_readl(cq_host, CQHCI_IS);
+ cqhci_writel(cq_host, tmp, CQHCI_IS);
+ cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
+ }
}
}
@@ -1302,7 +1340,7 @@ static void esdhc_cqe_enable(struct mmc_host *mmc)
* system resume back.
*/
cqhci_writel(cq_host, 0, CQHCI_CTL);
- if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT)
+ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
dev_err(mmc_dev(host->mmc),
"failed to exit halt state when enable CQE\n");
@@ -1571,8 +1609,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ahb_clk;
- host->tuning_delay = 1;
-
sdhci_esdhc_imx_hwinit(host);
err = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 8ab963055238..5e6f6c951fd4 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1755,6 +1755,7 @@ static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
static const struct of_device_id sdhci_msm_dt_match[] = {
{.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
{.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
+ {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{},
};
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index dd10f7abf5a7..0ae40595d925 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -18,20 +18,53 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/phy/phy.h>
+#include <linux/mmc/mmc.h>
+#include <linux/soc/xilinx/zynqmp/tap_delays.h>
+#include <linux/soc/xilinx/zynqmp/fw.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include "cqhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define SDHCI_ARASAN_ITAPDLY_REGISTER 0xF0F8
+#define SDHCI_ARASAN_OTAPDLY_REGISTER 0xF0FC
+
#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
+#define CLK_CTRL_TIMEOUT_SHIFT 16
+#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
+#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+#define SD_CLK_25_MHZ 25000000
+#define SD_CLK_19_MHZ 19000000
+#define MAX_TUNING_LOOP 40
#define PHY_CLK_TOO_SLOW_HZ 400000
+#define SDHCI_ITAPDLY_CHGWIN 0x200
+#define SDHCI_ITAPDLY_ENABLE 0x100
+#define SDHCI_OTAPDLY_ENABLE 0x40
+
+static u32 zynqmp_itap_delays[MMC_TIMING_MMC_HS400+1] = {0x0, 0x15, 0x15, 0x0,
+ 0x15, 0x0, 0x0, 0x3D, 0x12, 0x0, 0x0};
+static u32 zynqmp_otap_delays[MMC_TIMING_MMC_HS400+1] = {0x0, 0x5, 0x6, 0x0,
+ 0x5, 0x3, 0x3, 0x4, 0x6, 0x3, 0x0};
+
+static u32 versal_itap_delays[MMC_TIMING_MMC_HS400 + 1] = {0x0, 0x2C, 0x2C,
+ 0x0, 0x2C, 0x0, 0x0, 0x36, 0x1E, 0x0, 0x0};
+static u32 versal_otap_delays[MMC_TIMING_MMC_HS400 + 1] = {0x0, 0x5, 0x4,
+ 0x0, 0x4, 0x3, 0x2, 0x3, 0x5, 0x2, 0x0};
+
+#define MMC_BANK2 0x2
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -57,13 +90,13 @@ struct sdhci_arasan_soc_ctl_field {
/**
* struct sdhci_arasan_soc_ctl_map - Map in syscon to corecfg registers
*
- * It's up to the licensee of the Arsan IP block to make these available
- * somewhere if needed. Presumably these will be scattered somewhere that's
- * accessible via the syscon API.
- *
* @baseclkfreq: Where to find corecfg_baseclkfreq
* @clockmultiplier: Where to find corecfg_clockmultiplier
* @hiword_update: If true, use HIWORD_UPDATE to access the syscon
+ *
+ * It's up to the licensee of the Arsan IP block to make these available
+ * somewhere if needed. Presumably these will be scattered somewhere that's
+ * accessible via the syscon API.
*/
struct sdhci_arasan_soc_ctl_map {
struct sdhci_arasan_soc_ctl_field baseclkfreq;
@@ -72,20 +105,33 @@ struct sdhci_arasan_soc_ctl_map {
};
/**
- * struct sdhci_arasan_data
+ * struct sdhci_arasan_data - Structure for Arasan Controller Data
+ *
* @host: Pointer to the main SDHCI host structure.
* @clk_ahb: Pointer to the AHB clock
* @phy: Pointer to the generic phy
+ * @mio_bank: MIO Bank Number for ZynqMP SD Controller pins.
+ * @device_id: ZynqMP SD controller ID.
+ * @itapdly: Input Clock Delay for SD card clock.
+ * @otapdly: Output Clock Delay for SD card clock.
* @is_phy_on: True if the PHY is on; false if not.
+ * @has_cqe: Flag indicating support for Command Queueing.
* @sdcardclk_hw: Struct for the clock we might provide to a PHY.
* @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
* @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
+ * @pinctrl: Pointer to pin control structure.
+ * @pins_default: Pointer to pinctrl state for a device.
* @soc_ctl_map: Map to get offsets into soc_ctl registers.
+ * @quirks: Arasan deviations from spec.
*/
struct sdhci_arasan_data {
struct sdhci_host *host;
struct clk *clk_ahb;
struct phy *phy;
+ u32 mio_bank;
+ u32 device_id;
+ u32 itapdly[MMC_TIMING_MMC_HS400 + 1];
+ u32 otapdly[MMC_TIMING_MMC_HS400 + 1];
bool is_phy_on;
bool has_cqe;
@@ -93,6 +139,8 @@ struct sdhci_arasan_data {
struct clk *sdcardclk;
struct regmap *soc_ctl_base;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
unsigned int quirks; /* Arasan deviations from spec */
@@ -101,6 +149,8 @@ struct sdhci_arasan_data {
/* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
* internal clock even when the clock isn't stable */
#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
+/* Controller has tap delay setting registers in it's local reg space */
+#define SDHCI_ARASAN_TAPDELAY_REG_LOCAL BIT(2)
};
struct sdhci_arasan_of_data {
@@ -123,14 +173,16 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_emmc_soc_ctl_map = {
/**
* sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
*
+ * @host: The sdhci_host
+ * @fld: The field to write to
+ * @val: The value to write
+ *
* This function allows writing to fields in sdhci_arasan_soc_ctl_map.
* Note that if a field is specified as not available (shift < 0) then
* this function will silently return an error code. It will be noisy
* and print errors for any other (unexpected) errors.
*
- * @host: The sdhci_host
- * @fld: The field to write to
- * @val: The value to write
+ * Return: 0 on success and error value on error
*/
static int sdhci_arasan_syscon_write(struct sdhci_host *host,
const struct sdhci_arasan_soc_ctl_field *fld,
@@ -170,6 +222,100 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
return ret;
}
+static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u8 deviceid)
+{
+ u16 clk;
+ unsigned long timeout;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN);
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Issue DLL Reset */
+ zynqmp_dll_reset(deviceid);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ dev_err(mmc_dev(host->mmc),
+ ": Internal clock never stabilised.\n");
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+}
+
+static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ int err;
+
+ arasan_zynqmp_dll_reset(host, sdhci_arasan->device_id);
+
+ err = sdhci_execute_tuning(mmc, opcode);
+ if (err)
+ return err;
+
+ arasan_zynqmp_dll_reset(host, sdhci_arasan->device_id);
+
+ return 0;
+}
+
+static void __arasan_set_tap_delay(struct sdhci_host *host, u8 itap_delay,
+ u8 otap_delay)
+{
+ u32 regval;
+
+ if (itap_delay) {
+ regval = sdhci_readl(host, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= SDHCI_ITAPDLY_CHGWIN;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= SDHCI_ITAPDLY_ENABLE;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= itap_delay;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval &= ~SDHCI_ITAPDLY_CHGWIN;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ }
+
+ if (otap_delay) {
+ regval = sdhci_readl(host, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval |= SDHCI_OTAPDLY_ENABLE;
+ sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval |= otap_delay;
+ sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ }
+}
+
+static void arasan_set_tap_delay(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ u8 itap_delay;
+ u8 otap_delay;
+
+ itap_delay = sdhci_arasan->itapdly[host->timing];
+ otap_delay = sdhci_arasan->otapdly[host->timing];
+
+ if (sdhci_arasan->quirks & SDHCI_ARASAN_TAPDELAY_REG_LOCAL)
+ __arasan_set_tap_delay(host, itap_delay, otap_delay);
+ else
+ arasan_zynqmp_set_tap_delay(sdhci_arasan->device_id,
+ itap_delay, otap_delay);
+}
+
static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -215,6 +361,17 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+ /* Set the Input and Output Tap Delays */
+ if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_STANDARD_25_BROKEN) &&
+ (host->version >= SDHCI_SPEC_300)) {
+ if (clock == SD_CLK_25_MHZ)
+ clock = SD_CLK_19_MHZ;
+ if ((host->timing != MMC_TIMING_LEGACY) &&
+ (host->timing != MMC_TIMING_UHS_SDR12)) {
+ arasan_set_tap_delay(host);
+ }
+ }
+
if (ctrl_phy && sdhci_arasan->is_phy_on) {
phy_power_off(sdhci_arasan->phy);
sdhci_arasan->is_phy_on = false;
@@ -264,7 +421,7 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
@@ -306,7 +463,7 @@ static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-static const struct sdhci_ops sdhci_arasan_ops = {
+static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -318,7 +475,6 @@ static const struct sdhci_ops sdhci_arasan_ops = {
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
.ops = &sdhci_arasan_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC,
@@ -394,13 +550,82 @@ static struct sdhci_arasan_of_data intel_lgm_emmc_data = {
.pdata = &sdhci_arasan_cqe_pdata,
};
+#ifdef CONFIG_PM
+/**
+ * sdhci_arasan_runtime_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ * Put the device in a low power state.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_arasan_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
+
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
+ clk_disable(pltfm_host->clk);
+ clk_disable(sdhci_arasan->clk_ahb);
+
+ return 0;
+}
+
+/**
+ * sdhci_arasan_runtime_resume - Resume method for the driver
+ * @dev: Address of the device structure
+ * Resume operation after suspend
+ *
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_arasan_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = clk_enable(sdhci_arasan->clk_ahb);
+ if (ret) {
+ dev_err(dev, "Cannot enable AHB clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable SD clock.\n");
+ return ret;
+ }
+
+ ret = sdhci_runtime_resume_host(host, 0);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ clk_disable(pltfm_host->clk);
+ clk_disable(sdhci_arasan->clk_ahb);
+
+ return ret;
+}
+#endif /* ! CONFIG_PM */
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
* @dev: Address of the device structure
- * Returns 0 on success and error value on error
- *
* Put the device in a low power state.
+ *
+ * Return: 0 on success and error value on error
*/
static int sdhci_arasan_suspend(struct device *dev)
{
@@ -443,9 +668,9 @@ static int sdhci_arasan_suspend(struct device *dev)
/**
* sdhci_arasan_resume - Resume method for the driver
* @dev: Address of the device structure
- * Returns 0 on success and error value on error
- *
* Resume operation after suspend
+ *
+ * Return: 0 on success and error value on error
*/
static int sdhci_arasan_resume(struct device *dev)
{
@@ -488,8 +713,11 @@ static int sdhci_arasan_resume(struct device *dev)
}
#endif /* ! CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
- sdhci_arasan_resume);
+static const struct dev_pm_ops sdhci_arasan_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_arasan_suspend, sdhci_arasan_resume)
+ SET_RUNTIME_PM_OPS(sdhci_arasan_runtime_suspend,
+ sdhci_arasan_runtime_resume, NULL)
+};
static const struct of_device_id sdhci_arasan_of_match[] = {
/* SoC-specific compatible strings w/ soc_ctl_map */
@@ -514,6 +742,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "arasan,sdhci-4.9a",
.data = &sdhci_arasan_data,
},
+ {
+ .compatible = "xlnx,zynqmp-8.9a",
+ .data = &sdhci_arasan_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -521,12 +753,13 @@ MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
/**
* sdhci_arasan_sdcardclk_recalc_rate - Return the card clock rate
*
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate: The parent rate (should be rate of clk_xin).
+ *
* Return the current actual rate of the SD card clock. This can be used
* to communicate with out PHY.
*
- * @hw: Pointer to the hardware clock structure.
- * @parent_rate The parent rate (should be rate of clk_xin).
- * Returns the card clock rate.
+ * Return: the card clock rate.
*/
static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -546,6 +779,9 @@ static const struct clk_ops arasan_sdcardclk_ops = {
/**
* sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
*
+ * @host: The sdhci_host
+ * @value: The value to write to syscon registers
+ *
* The corecfg_clockmultiplier is supposed to contain clock multiplier
* value of programmable clock generator.
*
@@ -557,8 +793,6 @@ static const struct clk_ops arasan_sdcardclk_ops = {
* - The value of corecfg_clockmultiplier should sync with that of corresponding
* value reading from sdhci_capability_register. So this function is called
* once at probe time and never called again.
- *
- * @host: The sdhci_host
*/
static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host,
u32 value)
@@ -585,6 +819,8 @@ static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host,
/**
* sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq
*
+ * @host: The sdhci_host
+ *
* The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This
* function can be used to make that happen.
*
@@ -596,8 +832,6 @@ static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host,
* - It's assumed that clk_xin is not dynamic and that we use the SDHCI divider
* to achieve lower clock rates. That means that this function is called once
* at probe time and never called again.
- *
- * @host: The sdhci_host
*/
static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
{
@@ -624,6 +858,10 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
/**
* sdhci_arasan_register_sdclk - Register the sdclk for a PHY to use
*
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ *
* Some PHY devices need to know what the actual card clock is. In order for
* them to find out, we'll provide a clock through the common clock framework
* for them to query.
@@ -636,10 +874,7 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
* to create nice clean device tree bindings and later (if needed) we can try
* re-architecting SDHCI if we see some benefit to it.
*
- * @sdhci_arasan: Our private data structure.
- * @clk_xin: Pointer to the functional clock
- * @dev: Pointer to our struct device.
- * Returns 0 on success and error value on error
+ * Return: 0 on success and error value on error
*/
static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
struct clk *clk_xin,
@@ -683,10 +918,10 @@ static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
/**
* sdhci_arasan_unregister_sdclk - Undoes sdhci_arasan_register_sdclk()
*
+ * @dev: Pointer to our struct device.
+ *
* Should be called any time we're exiting and sdhci_arasan_register_sdclk()
* returned success.
- *
- * @dev: Pointer to our struct device.
*/
static void sdhci_arasan_unregister_sdclk(struct device *dev)
{
@@ -741,6 +976,88 @@ cleanup:
return ret;
}
+static void arasan_dt_read_tap_delay(struct device *dev, u32 *tapdly,
+ u8 mode, const char *prop)
+{
+ struct device_node *np = dev->of_node;
+ /*
+ * Read Tap Delay values from DT, if the DT does not contain the
+ * Tap Values then use the pre-defined values
+ */
+ if (of_property_read_u32(np, prop, &tapdly[mode])) {
+ dev_dbg(dev, "Using predefined tapdly for %s = %d\n",
+ prop, tapdly[mode]);
+ }
+}
+
+/**
+ * arasan_dt_parse_tap_delays - Read Tap Delay values from DT
+ *
+ * @dev: Pointer to our struct device.
+ *
+ * Called at initialization to parse the values of Tap Delays.
+ */
+static void arasan_dt_parse_tap_delays(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ u32 *itapdly;
+ u32 *otapdly;
+ int i;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynqmp-8.9a")) {
+ itapdly = zynqmp_itap_delays;
+ otapdly = zynqmp_otap_delays;
+ if (sdhci_arasan->mio_bank == MMC_BANK2) {
+ otapdly[MMC_TIMING_UHS_SDR104] = 0x2;
+ otapdly[MMC_TIMING_MMC_HS200] = 0x2;
+ }
+ } else {
+ itapdly = versal_itap_delays;
+ otapdly = versal_otap_delays;
+ }
+
+ arasan_dt_read_tap_delay(dev, itapdly, MMC_TIMING_SD_HS,
+ "xlnx,itap-delay-sd-hsd");
+ arasan_dt_read_tap_delay(dev, itapdly, MMC_TIMING_UHS_SDR25,
+ "xlnx,itap-delay-sdr25");
+ arasan_dt_read_tap_delay(dev, itapdly, MMC_TIMING_UHS_SDR50,
+ "xlnx,itap-delay-sdr50");
+ arasan_dt_read_tap_delay(dev, itapdly, MMC_TIMING_UHS_SDR104,
+ "xlnx,itap-delay-sdr104");
+ arasan_dt_read_tap_delay(dev, itapdly, MMC_TIMING_UHS_DDR50,
+ "xlnx,itap-delay-sd-ddr50");
+ arasan_dt_read_tap_delay(dev, itapdly, MMC_TIMING_MMC_HS,
+ "xlnx,itap-delay-mmc-hsd");
+ arasan_dt_read_tap_delay(dev, itapdly, MMC_TIMING_MMC_DDR52,
+ "xlnx,itap-delay-mmc-ddr52");
+ arasan_dt_read_tap_delay(dev, itapdly, MMC_TIMING_MMC_HS200,
+ "xlnx,itap-delay-mmc-hs200");
+ arasan_dt_read_tap_delay(dev, otapdly, MMC_TIMING_SD_HS,
+ "xlnx,otap-delay-sd-hsd");
+ arasan_dt_read_tap_delay(dev, otapdly, MMC_TIMING_UHS_SDR25,
+ "xlnx,otap-delay-sdr25");
+ arasan_dt_read_tap_delay(dev, otapdly, MMC_TIMING_UHS_SDR50,
+ "xlnx,otap-delay-sdr50");
+ arasan_dt_read_tap_delay(dev, otapdly, MMC_TIMING_UHS_SDR104,
+ "xlnx,otap-delay-sdr104");
+ arasan_dt_read_tap_delay(dev, otapdly, MMC_TIMING_UHS_DDR50,
+ "xlnx,otap-delay-sd-ddr50");
+ arasan_dt_read_tap_delay(dev, otapdly, MMC_TIMING_MMC_HS,
+ "xlnx,otap-delay-mmc-hsd");
+ arasan_dt_read_tap_delay(dev, otapdly, MMC_TIMING_MMC_DDR52,
+ "xlnx,otap-delay-mmc-ddr52");
+ arasan_dt_read_tap_delay(dev, otapdly, MMC_TIMING_MMC_HS200,
+ "xlnx,otap-delay-mmc-hs200");
+
+ for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
+ sdhci_arasan->itapdly[i] = itapdly[i];
+ sdhci_arasan->otapdly[i] = otapdly[i];
+ }
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
@@ -751,10 +1068,35 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
struct device_node *np = pdev->dev.of_node;
+ unsigned int host_quirks2 = 0;
const struct sdhci_arasan_of_data *data;
match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node);
data = match->data;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynqmp-8.9a")) {
+ char *soc_rev;
+
+ /* read Silicon version using nvmem driver */
+ soc_rev = zynqmp_nvmem_get_silicon_version(&pdev->dev,
+ "soc_revision");
+ if (PTR_ERR(soc_rev) == -EPROBE_DEFER)
+ /* Do a deferred probe */
+ return -EPROBE_DEFER;
+ else if (IS_ERR(soc_rev))
+ dev_dbg(&pdev->dev, "Error getting silicon version\n");
+
+ /* Set host quirk if the silicon version is v1.0 */
+ if (!IS_ERR(soc_rev) && (*soc_rev == ZYNQMP_SILICON_V1))
+ host_quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+
+ /* Clean soc_rev if got a valid pointer from nvmem driver
+ * else we may end up in kernel panic
+ */
+ if (!IS_ERR(soc_rev))
+ kfree(soc_rev);
+ }
+
host = sdhci_pltfm_init(pdev, data->pdata, sizeof(*sdhci_arasan));
if (IS_ERR(host))
@@ -766,6 +1108,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan->soc_ctl_map = data->soc_ctl_map;
+ host->quirks2 |= host_quirks2;
+
node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0);
if (node) {
sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node);
@@ -814,6 +1158,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "xlnx,int-clock-stable-broken"))
sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE;
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-8.9a"))
+ sdhci_arasan->quirks |= SDHCI_ARASAN_TAPDELAY_REG_LOCAL;
+
pltfm_host->clk = clk_xin;
if (of_device_is_compatible(pdev->dev.of_node,
@@ -833,6 +1180,50 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto unreg_clk;
}
+ if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-8.9a")) {
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+ host->quirks2 |= SDHCI_QUIRK2_CLOCK_STANDARD_25_BROKEN;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynqmp-8.9a") ||
+ of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-8.9a")) {
+ arasan_dt_parse_tap_delays(&pdev->dev);
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynqmp-8.9a")) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,mio_bank",
+ &sdhci_arasan->mio_bank);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "\"xlnx,mio_bank \" property is missing.\n");
+ goto clk_disable_all;
+ }
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,device_id",
+ &sdhci_arasan->device_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "\"xlnx,device_id \" property is missing.\n");
+ goto clk_disable_all;
+ }
+
+ host->mmc_host_ops.execute_tuning =
+ arasan_zynqmp_execute_tuning;
+ }
+
+ sdhci_arasan->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(sdhci_arasan->pinctrl)) {
+ sdhci_arasan->pins_default = pinctrl_lookup_state(
+ sdhci_arasan->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(sdhci_arasan->pins_default)) {
+ dev_err(&pdev->dev, "Missing default pinctrl config\n");
+ return IS_ERR(sdhci_arasan->pins_default);
+ }
+
+ pinctrl_select_state(sdhci_arasan->pinctrl,
+ sdhci_arasan->pins_default);
+ }
+
sdhci_arasan->phy = ERR_PTR(-ENODEV);
if (of_device_is_compatible(pdev->dev.of_node,
"arasan,sdhci-5.1")) {
@@ -865,6 +1256,13 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (ret)
goto err_add_host;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+
return 0;
err_add_host:
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 881f8138e7de..03698d78a402 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -109,8 +109,13 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- if (timing == MMC_TIMING_MMC_DDR52)
- sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ u8 mc1r;
+
+ if (timing == MMC_TIMING_MMC_DDR52) {
+ mc1r = sdhci_readb(host, SDMMC_MC1R);
+ mc1r |= SDMMC_MC1R_DDR;
+ sdhci_writeb(host, mc1r, SDMMC_MC1R);
+ }
sdhci_set_uhs_signaling(host, timing);
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 0ff339004d8a..48765208e295 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -124,6 +124,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
}
+
/*
* The DAT[3:0] line signal levels and the CMD line signal level are
* not compatible with standard SDHC register. The line signal levels
@@ -135,6 +136,16 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
ret = value & 0x000fffff;
ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
ret |= (value << 1) & SDHCI_CMD_LVL;
+
+ /*
+ * Some controllers have unreliable Data Line Active
+ * bit for commands with busy signal. This affects
+ * Command Inhibit (data) bit. Just ignore it since
+ * MMC core driver has already polled card status
+ * with CMD13 after any command with busy siganl.
+ */
+ if (esdhc->quirk_ignore_data_inhibit)
+ ret &= ~SDHCI_DATA_INHIBIT;
return ret;
}
@@ -149,19 +160,6 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
return ret;
}
- /*
- * Some controllers have unreliable Data Line Active
- * bit for commands with busy signal. This affects
- * Command Inhibit (data) bit. Just ignore it since
- * MMC core driver has already polled card status
- * with CMD13 after any command with busy siganl.
- */
- if ((spec_reg == SDHCI_PRESENT_STATE) &&
- (esdhc->quirk_ignore_data_inhibit == true)) {
- ret = value & ~SDHCI_DATA_INHIBIT;
- return ret;
- }
-
ret = value;
return ret;
}
@@ -844,6 +842,7 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
scfg_node = of_find_matching_node(NULL, scfg_device_ids);
if (scfg_node)
scfg_base = of_iomap(scfg_node, 0);
+ of_node_put(scfg_node);
if (scfg_base) {
sdhciovselcr = SDHCIOVSELCR_TGLEN |
SDHCIOVSELCR_VSELVAL;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1e19c0d0bca8..892c10950c81 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1795,6 +1795,8 @@ static int amd_probe(struct sdhci_pci_chip *chip)
}
}
+ pci_dev_put(smbus_dev);
+
if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 3170c19683c4..4b639e819e03 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -31,6 +31,7 @@
#define O2_SD_CAPS 0xE0
#define O2_SD_ADMA1 0xE2
#define O2_SD_ADMA2 0xE7
+#define O2_SD_MISC_CTRL2 0xF0
#define O2_SD_INF_MOD 0xF1
#define O2_SD_MISC_CTRL4 0xFC
#define O2_SD_TUNING_CTRL 0x300
@@ -777,6 +778,12 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* Set Tuning Windows to 5 */
pci_write_config_byte(chip->pdev,
O2_SD_TUNING_CTRL, 0x55);
+ //Adjust 1st and 2nd CD debounce time
+ pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32);
+ scratch_32 &= 0xFFE7FFFF;
+ scratch_32 |= 0x00180000;
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32);
+ pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 328b132bbe57..8c620c590dd8 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -102,6 +102,9 @@ void sdhci_get_property(struct platform_device *pdev)
sdhci_get_compatibility(pdev);
+ if (device_property_present(dev, "broken-mmc-highspeed"))
+ host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
+
device_property_read_u32(dev, "clock-frequency", &pltfm_host->clock);
if (device_property_present(dev, "keep-power-in-suspend"))
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index a999d2089d3d..ef89fdf1f904 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -223,13 +223,19 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
sdhci_enable_clk(host, div);
- /* enable auto gate sdhc_enable_auto_gate */
val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
- mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
- SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
- if (mask != (val & mask)) {
- val |= mask;
- sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+ /* Enable CLK_AUTO when the clock is greater than 400K. */
+ if (clk > 400000) {
+ if (mask != (val & mask)) {
+ val |= mask;
+ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ }
+ } else {
+ if (val & mask) {
+ val &= ~mask;
+ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ }
}
}
@@ -295,7 +301,7 @@ static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host)
static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host)
{
- return 400000;
+ return 100000;
}
static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host,
@@ -379,12 +385,33 @@ static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
return 0;
}
+static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ switch (mode) {
+ case MMC_POWER_OFF:
+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
+
+ mmc_regulator_disable_vqmmc(mmc);
+ break;
+ case MMC_POWER_ON:
+ mmc_regulator_enable_vqmmc(mmc);
+ break;
+ case MMC_POWER_UP:
+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
+ break;
+ }
+}
+
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock,
+ .set_power = sdhci_sprd_set_power,
.get_max_clock = sdhci_sprd_get_max_clock,
.get_min_clock = sdhci_sprd_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -431,7 +458,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
}
if (IS_ERR(sprd_host->pinctrl))
- return 0;
+ goto reset;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_180:
@@ -459,6 +486,8 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
/* Wait for 300 ~ 500 us for pin state stable */
usleep_range(300, 500);
+
+reset:
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
return 0;
@@ -626,6 +655,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
+ ret = mmc_regulator_get_supply(host->mmc);
+ if (ret)
+ goto pm_runtime_disable;
+
ret = sdhci_setup_host(host);
if (ret)
goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 65dfe75120ed..d58383767a6e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -24,6 +24,7 @@
#include <linux/gpio/consumer.h>
#include <linux/ktime.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -347,7 +348,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
u32 misc_ctrl, clk_ctrl, pad_ctrl;
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (!(mask & SDHCI_RESET_ALL))
return;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index deafcc56adee..5a2baaf5f911 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -9,6 +9,7 @@
* - JMicron (hardware and technical support)
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/highmem.h>
@@ -331,6 +332,7 @@ static void sdhci_init(struct sdhci_host *host, int soft)
if (soft) {
/* force clock reconfiguration */
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->set_ios(mmc, &mmc->ios);
}
}
@@ -474,7 +476,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
{
unsigned long flags;
size_t blksize, len, chunk;
- u32 uninitialized_var(scratch);
+ u32 scratch;
u8 *buf;
DBG("PIO reading\n");
@@ -1102,6 +1104,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
}
+ sdhci_config_dma(host);
+
if (host->flags & SDHCI_REQ_USE_DMA) {
int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
@@ -1121,8 +1125,6 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
}
- sdhci_config_dma(host);
-
if (!(host->flags & SDHCI_REQ_USE_DMA)) {
int flags;
@@ -1570,10 +1572,9 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
pre_val = sdhci_get_preset_value(host);
- div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
- >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
+ div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
if (host->clk_mul &&
- (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
+ (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
clk = SDHCI_PROG_CLOCK_MODE;
real_div = div + 1;
clk_mul = host->clk_mul;
@@ -1911,11 +1912,46 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
+static bool sdhci_timing_has_preset(unsigned char timing)
+{
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ return true;
+ };
+ return false;
+}
+
+static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
+{
+ return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+ sdhci_timing_has_preset(timing);
+}
+
+static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
+{
+ /*
+ * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
+ * Frequency. Check if preset values need to be enabled, or the Driver
+ * Strength needs updating. Note, clock changes are handled separately.
+ */
+ return !host->preset_enabled &&
+ (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
+}
+
void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
+ bool reinit_uhs = host->reinit_uhs;
+ bool turning_on_clk = false;
u8 ctrl;
+ host->reinit_uhs = false;
+
if (ios->power_mode == MMC_POWER_UNDEFINED)
return;
@@ -1941,6 +1977,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_enable_preset_value(host, false);
if (!ios->clock || ios->clock != host->clock) {
+ turning_on_clk = ios->clock && !host->clock;
+
host->ops->set_clock(host, ios->clock);
host->clock = ios->clock;
@@ -1967,6 +2005,17 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->ops->set_bus_width(host, ios->bus_width);
+ /*
+ * Special case to avoid multiple clock changes during voltage
+ * switching.
+ */
+ if (!reinit_uhs &&
+ turning_on_clk &&
+ host->timing == ios->timing &&
+ host->version >= SDHCI_SPEC_300 &&
+ !sdhci_presetable_values_change(host, ios))
+ return;
+
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
@@ -2010,6 +2059,7 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ host->drv_type = ios->drv_type;
} else {
/*
* According to SDHC Spec v3.00, if the Preset Value
@@ -2037,19 +2087,14 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->ops->set_uhs_signaling(host, ios->timing);
host->timing = ios->timing;
- if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
- ((ios->timing == MMC_TIMING_UHS_SDR12) ||
- (ios->timing == MMC_TIMING_UHS_SDR25) ||
- (ios->timing == MMC_TIMING_UHS_SDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR104) ||
- (ios->timing == MMC_TIMING_UHS_DDR50) ||
- (ios->timing == MMC_TIMING_MMC_DDR52))) {
+ if (sdhci_preset_needed(host, ios->timing)) {
u16 preset;
sdhci_enable_preset_value(host, true);
preset = sdhci_get_preset_value(host);
- ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
- >> SDHCI_PRESET_DRV_SHIFT;
+ ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
+ preset);
+ host->drv_type = ios->drv_type;
}
/* Re-enable SD Clock */
@@ -3327,6 +3372,7 @@ int sdhci_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
host->pwr = 0;
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
@@ -3389,6 +3435,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
+ host->reinit_uhs = true;
mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
mmc->ops->set_ios(mmc, &mmc->ios);
@@ -4010,7 +4057,8 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
mmc->caps &= ~MMC_CAP_CMD23;
- if (host->caps & SDHCI_CAN_DO_HISPD)
+ if ((host->caps & SDHCI_CAN_DO_HISPD) &&
+ !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 54f9d6720f13..b92662f77545 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -9,6 +9,7 @@
#ifndef __SDHCI_HW_H
#define __SDHCI_HW_H
+#include <linux/bits.h>
#include <linux/scatterlist.h>
#include <linux/compiler.h>
#include <linux/types.h>
@@ -268,12 +269,9 @@
#define SDHCI_PRESET_FOR_SDR104 0x6C
#define SDHCI_PRESET_FOR_DDR50 0x6E
#define SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */
-#define SDHCI_PRESET_DRV_MASK 0xC000
-#define SDHCI_PRESET_DRV_SHIFT 14
-#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
-#define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10
-#define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF
-#define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0
+#define SDHCI_PRESET_DRV_MASK GENMASK(15, 14)
+#define SDHCI_PRESET_CLKGEN_SEL BIT(10)
+#define SDHCI_PRESET_SDCLK_FREQ_MASK GENMASK(9, 0)
#define SDHCI_SLOT_INT_STATUS 0xFC
@@ -486,6 +484,8 @@ struct sdhci_host {
* block count.
*/
#define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18)
+/* Broken Clock between 19MHz-25MHz */
+#define SDHCI_QUIRK2_CLOCK_STANDARD_25_BROKEN (1<<19)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -530,6 +530,8 @@ struct sdhci_host {
unsigned int clock; /* Current clock (MHz) */
u8 pwr; /* Current voltage */
+ u8 drv_type; /* Current UHS-I driver type */
+ bool reinit_uhs; /* Force UHS-related re-initialization */
bool runtime_suspended; /* Host is runtime suspended */
bool bus_on; /* Bus power prevents runtime suspend */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 4cbb764c9822..987626ffdb6f 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -227,8 +227,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
*/
case MMC_TIMING_SD_HS:
case MMC_TIMING_MMC_HS:
- case MMC_TIMING_UHS_SDR12:
- case MMC_TIMING_UHS_SDR25:
val &= ~SDHCI_CTRL_HISPD;
}
}
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index f8b939e63e02..86a8644af450 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -50,9 +50,16 @@ struct f_sdhost_priv {
bool enable_cmd_dat_delay;
};
+static void *sdhci_f_sdhost_priv(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return sdhci_pltfm_priv(pltfm_host);
+}
+
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
{
- struct f_sdhost_priv *priv = sdhci_priv(host);
+ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
u32 ctrl = 0;
usleep_range(2500, 3000);
@@ -85,7 +92,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
- struct f_sdhost_priv *priv = sdhci_priv(host);
+ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
u32 ctl;
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
@@ -109,31 +116,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = {
+ .ops = &sdhci_f_sdh30_ops,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_INVERTED_WRITE_PROTECT,
+ .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE
+ | SDHCI_QUIRK2_TUNING_WORK_AROUND,
+};
+
static int sdhci_f_sdh30_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
- int irq, ctrl = 0, ret = 0;
+ int ctrl = 0, ret = 0;
struct f_sdhost_priv *priv;
+ struct sdhci_pltfm_host *pltfm_host;
u32 reg = 0;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
+ host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data,
+ sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
return PTR_ERR(host);
- priv = sdhci_priv(host);
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
priv->dev = dev;
- host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
- SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
- host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
- SDHCI_QUIRK2_TUNING_WORK_AROUND;
-
priv->enable_cmd_dat_delay = device_property_read_bool(dev,
"fujitsu,cmd-dat-delay-select");
@@ -141,19 +149,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
if (ret)
goto err;
- platform_set_drvdata(pdev, host);
-
- host->hw_name = "f_sdh30";
- host->ops = &sdhci_f_sdh30_ops;
- host->irq = irq;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(host->ioaddr)) {
- ret = PTR_ERR(host->ioaddr);
- goto err;
- }
-
if (dev_of_node(dev)) {
sdhci_get_of_property(pdev);
@@ -194,6 +189,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
if (reg & SDHCI_CAN_DO_8BIT)
priv->vendor_hs200 = F_SDH30_EMMC_HS200;
+ if (!(reg & SDHCI_TIMEOUT_CLK_MASK))
+ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
+
ret = sdhci_add_host(host);
if (ret)
goto err_add_host;
@@ -205,23 +203,22 @@ err_add_host:
err_clk:
clk_disable_unprepare(priv->clk_iface);
err:
- sdhci_free_host(host);
+ sdhci_pltfm_free(pdev);
+
return ret;
}
static int sdhci_f_sdh30_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct f_sdhost_priv *priv = sdhci_priv(host);
-
- sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
- 0xffffffff);
+ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+ struct clk *clk_iface = priv->clk_iface;
+ struct clk *clk = priv->clk;
- clk_disable_unprepare(priv->clk_iface);
- clk_disable_unprepare(priv->clk);
+ sdhci_pltfm_unregister(pdev);
- sdhci_free_host(host);
- platform_set_drvdata(pdev, NULL);
+ clk_disable_unprepare(clk_iface);
+ clk_disable_unprepare(clk);
return 0;
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 98c575de43c7..ed18c233c786 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1395,7 +1395,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq_optional(pdev, 1);
if (irq[0] < 0)
- return -ENXIO;
+ return irq[0];
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d577a6b0ceae..0a67ad57e5c1 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1314,8 +1314,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return ret;
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
+ if (host->irq < 0) {
+ ret = host->irq;
goto error_disable_mmc;
}
@@ -1456,9 +1456,11 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
struct sunxi_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
- pm_runtime_force_suspend(&pdev->dev);
- disable_irq(host->irq);
- sunxi_mmc_disable(host);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev)) {
+ disable_irq(host->irq);
+ sunxi_mmc_disable(host);
+ }
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
index 8d037c2071ab..497791ffada6 100644
--- a/drivers/mmc/host/toshsd.c
+++ b/drivers/mmc/host/toshsd.c
@@ -651,7 +651,9 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto unmap;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto free_irq;
base = pci_resource_start(pdev, 0);
dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq);
@@ -660,6 +662,8 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+free_irq:
+ free_irq(pdev->irq, host);
unmap:
pci_iounmap(pdev, host->ioaddr);
release:
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 96b0f81a2032..56d131183c1e 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1743,8 +1743,10 @@ static int usdhi6_probe(struct platform_device *pdev)
irq_cd = platform_get_irq_byname(pdev, "card detect");
irq_sd = platform_get_irq_byname(pdev, "data");
irq_sdio = platform_get_irq_byname(pdev, "SDIO");
- if (irq_sd < 0 || irq_sdio < 0)
- return -ENODEV;
+ if (irq_sd < 0)
+ return irq_sd;
+ if (irq_sdio < 0)
+ return irq_sdio;
mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
if (!mmc)
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 721e5dd1eb7d..2c4d390a8acd 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1154,7 +1154,9 @@ static int via_sd_probe(struct pci_dev *pcidev,
pcidev->subsystem_device == 0x3891)
sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto unmap;
return 0;
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 5e1d7025dbf7..177937dd69ae 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -1715,6 +1715,9 @@ static void construct_request_response(struct vub300_mmc_host *vub300,
int bytes = 3 & less_cmd;
int words = less_cmd >> 2;
u8 *r = vub300->resp.response.command_response;
+
+ if (!resp_len)
+ return;
if (bytes == 3) {
cmd->resp[words] = (r[1 + (words << 2)] << 24)
| (r[2 + (words << 2)] << 16)
@@ -2049,6 +2052,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
return;
kref_get(&vub300->kref);
if (enable) {
+ set_current_state(TASK_RUNNING);
mutex_lock(&vub300->irq_mutex);
if (vub300->irqs_queued) {
vub300->irqs_queued -= 1;
@@ -2064,6 +2068,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
vub300_queue_poll_work(vub300, 0);
}
mutex_unlock(&vub300->irq_mutex);
+ set_current_state(TASK_INTERRUPTIBLE);
} else {
vub300->irq_enabled = 0;
}
@@ -2306,14 +2311,15 @@ static int vub300_probe(struct usb_interface *interface,
0x0000, 0x0000, &vub300->system_port_status,
sizeof(vub300->system_port_status), 1000);
if (retval < 0) {
- goto error4;
+ goto error5;
} else if (sizeof(vub300->system_port_status) == retval) {
vub300->card_present =
(0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
vub300->read_only =
(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
} else {
- goto error4;
+ retval = -EINVAL;
+ goto error5;
}
usb_set_intfdata(interface, vub300);
INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
@@ -2336,8 +2342,13 @@ static int vub300_probe(struct usb_interface *interface,
"USB vub300 remote SDIO host controller[%d]"
"connected with no SD/SDIO card inserted\n",
interface_to_InterfaceNumber(interface));
- mmc_add_host(mmc);
+ retval = mmc_add_host(mmc);
+ if (retval)
+ goto error6;
+
return 0;
+error6:
+ del_timer_sync(&vub300->inactivity_timer);
error5:
mmc_free_host(mmc);
/*
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 740179f42cf2..c8fd3cb91789 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1701,7 +1701,15 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
*/
wbsd_init_device(host);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ if (!pnp)
+ wbsd_chip_poweroff(host);
+
+ wbsd_release_resources(host);
+ wbsd_free_mmc(dev);
+ return ret;
+ }
pr_info("%s: W83L51xD", mmc_hostname(mmc));
if (host->chip_id != 0)
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 2c4ba1fa4bbf..345b3c50f27d 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -849,7 +849,7 @@ static int wmt_mci_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_sdmmc)) {
dev_err(&pdev->dev, "Error getting clock\n");
ret = PTR_ERR(priv->clk_sdmmc);
- goto fail5;
+ goto fail5_and_a_half;
}
ret = clk_prepare_enable(priv->clk_sdmmc);
@@ -859,13 +859,20 @@ static int wmt_mci_probe(struct platform_device *pdev)
/* configure the controller to a known 'ready' state */
wmt_reset_hardware(mmc);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto fail7;
dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
return 0;
+fail7:
+ clk_disable_unprepare(priv->clk_sdmmc);
fail6:
clk_put(priv->clk_sdmmc);
+fail5_and_a_half:
+ dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
+ priv->dma_desc_buffer, priv->dma_desc_device_addr);
fail5:
free_irq(dma_irq, priv);
fail4:
@@ -882,7 +889,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
- struct resource *res;
u32 reg_tmp;
mmc = platform_get_drvdata(pdev);
@@ -910,9 +916,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk_sdmmc);
clk_put(priv->clk_sdmmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
mmc_free_host(mmc);
dev_info(&pdev->dev, "WMT MCI device removed\n");
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 79a53cb8507b..dc350272d9ef 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -420,8 +420,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
extra_size = 0;
/* Protection Register info */
- extra_size += (extp->NumProtectionFields - 1) *
- sizeof(struct cfi_intelext_otpinfo);
+ if (extp->NumProtectionFields) {
+ struct cfi_intelext_otpinfo *otp =
+ (struct cfi_intelext_otpinfo *)&extp->extra[0];
+
+ extra_size += (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
+
+ if (extp_size >= sizeof(*extp) + extra_size) {
+ int i;
+
+ /* Do some byteswapping if necessary */
+ for (i = 0; i < extp->NumProtectionFields - 1; i++) {
+ otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
+ otp->FactGroups = le16_to_cpu(otp->FactGroups);
+ otp->UserGroups = le16_to_cpu(otp->UserGroups);
+ otp++;
+ }
+ }
+ }
}
if (extp->MinorVersion >= '1') {
@@ -695,14 +712,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
*/
if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
&& extp->FeatureSupport & (1 << 9)) {
+ int offs = 0;
struct cfi_private *newcfi;
struct flchip *chip;
struct flchip_shared *shared;
- int offs, numregions, numparts, partshift, numvirtchips, i, j;
+ int numregions, numparts, partshift, numvirtchips, i, j;
/* Protection Register info */
- offs = (extp->NumProtectionFields - 1) *
- sizeof(struct cfi_intelext_otpinfo);
+ if (extp->NumProtectionFields)
+ offs = (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
/* Burst Read info */
offs += extp->extra[offs+1]+2;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index cf426956454c..626321f8ba94 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -198,6 +198,9 @@ static int __xipram cfi_chip_setup(struct map_info *map,
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
+ int extendedId1 = 0;
+ int extendedId2 = 0;
+ int extendedId3 = 0;
int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
@@ -222,6 +225,38 @@ static int __xipram cfi_chip_setup(struct map_info *map,
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
+ /* Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get device ID cycle 1,2,3 for Numonyx/ST devices */
+ if ((cfi->mfr == CFI_MFR_INTEL || cfi->mfr == CFI_MFR_ST)
+ && ((cfi->id & 0xff) == 0x7e)
+ && (le16_to_cpu(cfi->cfiq->P_ID) == 0x0002)) {
+ extendedId1 = cfi_read_query16(map, base + 0x1 * ofs_factor);
+ extendedId2 = cfi_read_query16(map, base + 0xe * ofs_factor);
+ extendedId3 = cfi_read_query16(map, base + 0xf * ofs_factor);
+ }
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
@@ -231,6 +266,16 @@ static int __xipram cfi_chip_setup(struct map_info *map,
cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
+ /* If the device is a M29EW used in 8-bit mode, adjust buffer size */
+ if ((cfi->cfiq->MaxBufWriteSize > 0x8) && (cfi->mfr == CFI_MFR_INTEL ||
+ cfi->mfr == CFI_MFR_ST) && (extendedId1 == 0x7E) &&
+ (extendedId2 == 0x22 || extendedId2 == 0x23 || extendedId2 == 0x28) &&
+ (extendedId3 == 0x01)) {
+ cfi->cfiq->MaxBufWriteSize = 0x8;
+ pr_warning("Adjusted buffer size on Numonyx flash M29EW family");
+ pr_warning("in 8 bit mode\n");
+ }
+
#ifdef DEBUG_CFI
/* Dump the information therein */
print_cfi_ident(cfi->cfiq);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index eb0f4600efd1..40f9b6dffe3d 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1975,9 +1975,14 @@ static int __init docg3_probe(struct platform_device *pdev)
dev_err(dev, "No I/O memory resource defined\n");
return ret;
}
- base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
ret = -ENOMEM;
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
+ if (!base) {
+ dev_err(dev, "devm_ioremap dev failed\n");
+ return ret;
+ }
+
cascade = devm_kcalloc(dev, DOC_MAX_NBFLOORS, sizeof(*cascade),
GFP_KERNEL);
if (!cascade)
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index f4d1667daaf9..41b5a236276b 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2116,10 +2116,12 @@ static int stfsm_probe(struct platform_device *pdev)
(long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
- return mtd_device_register(&fsm->mtd, NULL, 0);
-
+ ret = mtd_device_register(&fsm->mtd, NULL, 0);
+ if (ret) {
err_clk_unprepare:
- clk_disable_unprepare(fsm->clk);
+ clk_disable_unprepare(fsm->clk);
+ }
+
return ret;
}
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 72f5c7b30079..add4386f99f0 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -433,6 +433,8 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
/* lpddr2_nvm address range */
add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!add_range)
+ return -ENODEV;
/* Populate map_info data structure */
*map = (struct map_info) {
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 21b556afc305..fa7e1c0b70c6 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -505,7 +505,7 @@ static int physmap_flash_probe(struct platform_device *dev)
if (!info->maps[i].phys)
info->maps[i].phys = res->start;
- info->win_order = get_bitmask_order(resource_size(res)) - 1;
+ info->win_order = fls64(resource_size(res)) - 1;
info->maps[i].size = BIT(info->win_order +
(info->gpios ?
info->gpios->ndescs : 0));
@@ -533,6 +533,17 @@ static int physmap_flash_probe(struct platform_device *dev)
if (info->probe_type) {
info->mtds[i] = do_map_probe(info->probe_type,
&info->maps[i]);
+
+ /* Fall back to mapping region as ROM */
+ if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
+ strcmp(info->probe_type, "map_rom")) {
+ dev_warn(&dev->dev,
+ "map_probe() failed for type %s\n",
+ info->probe_type);
+
+ info->mtds[i] = do_map_probe("map_rom",
+ &info->maps[i]);
+ }
} else {
int j;
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index ad7cd9cfaee0..a1b8b7b25f88 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -93,6 +93,7 @@ static int ap_flash_init(struct platform_device *pdev)
return -ENODEV;
}
ebi_base = of_iomap(ebi, 0);
+ of_node_put(ebi);
if (!ebi_base)
return -ENODEV;
@@ -207,6 +208,7 @@ int of_flash_probe_versatile(struct platform_device *pdev,
versatile_flashprot = (enum versatile_flashprot)devid->data;
rmap = syscon_node_to_regmap(sysnp);
+ of_node_put(sysnp);
if (IS_ERR(rmap))
return PTR_ERR(rmap);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 7d96758a8f04..6e5e55755970 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -66,6 +66,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
if (!info->map.virt) {
printk(KERN_WARNING "Failed to ioremap %s\n",
info->map.name);
+ kfree(info);
return -ENOMEM;
}
info->map.cached = ioremap_cache(info->map.phys, info->map.size);
@@ -87,6 +88,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
iounmap((void *)info->map.virt);
if (info->map.cached)
iounmap(info->map.cached);
+ kfree(info);
return -EIO;
}
info->mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 0c05f77f9b21..dd0d0bf5f57f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -533,7 +533,7 @@ static void blktrans_notify_add(struct mtd_info *mtd)
{
struct mtd_blktrans_ops *tr;
- if (mtd->type == MTD_ABSENT)
+ if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
return;
list_for_each_entry(tr, &blktrans_majors, list)
@@ -576,7 +576,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd)
- if (mtd->type != MTD_ABSENT)
+ if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index c06b5322d470..c103b44d3d66 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -150,7 +150,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data);
- if (ret)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != sect_size)
return -EIO;
@@ -185,8 +185,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
- if (!sect_size)
- return mtd_read(mtd, pos, len, &retlen, buf);
+ if (!sect_size) {
+ ret = mtd_read(mtd, pos, len, &retlen, buf);
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+ return 0;
+ }
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -206,7 +210,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf);
- if (ret)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != size)
return -EIO;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index ac5d3b6db9b8..83012d74dcd5 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -673,8 +673,10 @@ int add_mtd_device(struct mtd_info *mtd)
dev_set_drvdata(&mtd->dev, mtd);
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
- if (error)
+ if (error) {
+ put_device(&mtd->dev);
goto fail_added;
+ }
/* Add the nvmem provider */
error = mtd_nvmem_add(mtd);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index e59de3f60cf6..3469280dc082 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -544,4 +544,18 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
+config MTD_NAND_ARASAN
+ tristate "Support for Arasan Nand Flash controller"
+ depends on HAS_IOMEM && HAS_DMA
+ help
+ Enables the driver for the Arasan Nand Flash controller on
+ Zynq Ultrascale+ MPSoC.
+
+config MTD_NAND_PL353
+ tristate "ARM Pl353 NAND flash driver"
+ depends on MTD_NAND && ARM
+ depends on PL353_SMC
+ help
+ Enables support for PrimeCell Static Memory Controller PL353.
+
endif # MTD_RAW_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index a98721988e61..6d0bb6b573ef 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -57,6 +57,8 @@ obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
+obj-$(CONFIG_MTD_NAND_ARASAN) += arasan_nand.o
+obj-$(CONFIG_MTD_NAND_PL353) += pl353_nand.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/arasan_nand.c b/drivers/mtd/nand/raw/arasan_nand.c
new file mode 100644
index 000000000000..ace84d598305
--- /dev/null
+++ b/drivers/mtd/nand/raw/arasan_nand.c
@@ -0,0 +1,1465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2017 Xilinx, Inc.
+ * Author: Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#define EVENT_TIMEOUT_MSEC 1000
+#define ANFC_PM_TIMEOUT 1000 /* ms */
+
+#define PKT_OFST 0x00
+#define PKT_CNT_SHIFT 12
+
+#define MEM_ADDR1_OFST 0x04
+#define MEM_ADDR2_OFST 0x08
+#define PG_ADDR_SHIFT 16
+#define BCH_MODE_SHIFT 25
+#define MEM_ADDR_MASK GENMASK(7, 0)
+#define BCH_MODE_MASK GENMASK(27, 25)
+#define CS_MASK GENMASK(31, 30)
+#define CS_SHIFT 30
+
+#define CMD_OFST 0x0C
+#define ECC_ENABLE BIT(31)
+#define DMA_EN_MASK GENMASK(27, 26)
+#define DMA_ENABLE 0x2
+#define DMA_EN_SHIFT 26
+#define REG_PAGE_SIZE_SHIFT 23
+
+#define PROG_OFST 0x10
+#define PROG_PGRD BIT(0)
+#define PROG_ERASE BIT(2)
+#define PROG_STATUS BIT(3)
+#define PROG_PGPROG BIT(4)
+#define PROG_RDID BIT(6)
+#define PROG_RDPARAM BIT(7)
+#define PROG_RST BIT(8)
+#define PROG_GET_FEATURE BIT(9)
+#define PROG_SET_FEATURE BIT(10)
+
+#define INTR_STS_EN_OFST 0x14
+#define INTR_SIG_EN_OFST 0x18
+#define XFER_COMPLETE BIT(2)
+#define READ_READY BIT(1)
+#define WRITE_READY BIT(0)
+#define MBIT_ERROR BIT(3)
+#define EVENT_MASK (XFER_COMPLETE | READ_READY | WRITE_READY | MBIT_ERROR)
+
+#define INTR_STS_OFST 0x1C
+#define READY_STS_OFST 0x20
+#define DMA_ADDR1_OFST 0x24
+#define FLASH_STS_OFST 0x28
+#define DATA_PORT_OFST 0x30
+#define ECC_OFST 0x34
+#define BCH_EN_SHIFT 27
+#define ECC_SIZE_SHIFT 16
+
+#define ECC_ERR_CNT_OFST 0x38
+#define PAGE_ERR_CNT_MASK GENMASK(16, 8)
+#define PKT_ERR_CNT_MASK GENMASK(7, 0)
+
+#define ECC_SPR_CMD_OFST 0x3C
+#define CMD2_SHIFT 8
+#define ADDR_CYCLES_SHIFT 28
+
+#define ECC_ERR_CNT_1BIT_OFST 0x40
+#define ECC_ERR_CNT_2BIT_OFST 0x44
+#define DMA_ADDR0_OFST 0x50
+#define DATA_INTERFACE_OFST 0x6C
+#define ANFC_MAX_CHUNK_SIZE 0x4000
+#define ANFC_MAX_ADDR_CYCLES 7
+
+#define REG_PAGE_SIZE_512 0
+#define REG_PAGE_SIZE_1K 5
+#define REG_PAGE_SIZE_2K 1
+#define REG_PAGE_SIZE_4K 2
+#define REG_PAGE_SIZE_8K 3
+#define REG_PAGE_SIZE_16K 4
+
+#define TEMP_BUF_SIZE 1024
+#define NVDDR_MODE_PACKET_SIZE 8
+#define SDR_MODE_PACKET_SIZE 4
+
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define NVDDR_MODE BIT(9)
+#define NVDDR_TIMING_MODE_SHIFT 3
+
+#define SDR_MODE_DEFLT_FREQ 80000000
+#define COL_ROW_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+struct anfc_op {
+ u32 cmds[4];
+ u32 len;
+ u32 col;
+ u32 row;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/**
+ * struct anfc_nand_chip - Defines the nand chip related information
+ * @node: Used to store NAND chips into a list.
+ * @chip: NAND chip information structure.
+ * @strength: Bch or Hamming mode enable/disable.
+ * @ecc_strength: Ecc strength 4.8/12/16.
+ * @eccval: Ecc config value.
+ * @raddr_cycles: Row address cycle information.
+ * @caddr_cycles: Column address cycle information.
+ * @pktsize: Packet size for read / write operation.
+ * @csnum: chipselect number to be used.
+ * @spktsize: Packet size in ddr mode for status operation.
+ * @inftimeval: Data interface and timing mode information
+ */
+struct anfc_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+ bool strength;
+ u32 ecc_strength;
+ u32 eccval;
+ u16 raddr_cycles;
+ u16 caddr_cycles;
+ u32 pktsize;
+ int csnum;
+ u32 spktsize;
+ u32 inftimeval;
+};
+
+/**
+ * struct anfc_nand_controller - Defines the Arasan NAND flash controller
+ * driver instance
+ * @controller: base controller structure.
+ * @chips: list of all nand chips attached to the ctrler.
+ * @dev: Pointer to the device structure.
+ * @base: Virtual address of the NAND flash device.
+ * @curr_cmd: Current command issued.
+ * @clk_sys: Pointer to the system clock.
+ * @clk_flash: Pointer to the flash clock.
+ * @dma: Dma enable/disable.
+ * @buf: Buffer used for read/write byte operations.
+ * @irq: irq number
+ * @bufshift: Variable used for indexing buffer operation
+ * @csnum: Chip select number currently inuse.
+ * @event: Completion event for nand status events.
+ * @status: Status of the flash device.
+ * @prog: Used to initiate controller operations.
+ * @chip_active: Used to check the chip select state, active or not.
+ */
+struct anfc_nand_controller {
+ struct nand_controller controller;
+ struct list_head chips;
+ struct device *dev;
+ void __iomem *base;
+ int curr_cmd;
+ struct clk *clk_sys;
+ struct clk *clk_flash;
+ int irq;
+ int csnum;
+ struct completion event;
+ int status;
+ u32 prog;
+ u8 buf[TEMP_BUF_SIZE];
+ bool chip_active;
+};
+
+static int anfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = nand->ecc.total;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int anfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - nand->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops anfc_ooblayout_ops = {
+ .ecc = anfc_ooblayout_ecc,
+ .free = anfc_ooblayout_free,
+};
+
+static inline struct anfc_nand_chip *to_anfc_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct anfc_nand_chip, chip);
+}
+
+static inline struct anfc_nand_controller *to_anfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct anfc_nand_controller, controller);
+}
+
+static u8 anfc_page(u32 pagesize)
+{
+ switch (pagesize) {
+ case 512:
+ return REG_PAGE_SIZE_512;
+ case 1024:
+ return REG_PAGE_SIZE_1K;
+ case 2048:
+ return REG_PAGE_SIZE_2K;
+ case 4096:
+ return REG_PAGE_SIZE_4K;
+ case 8192:
+ return REG_PAGE_SIZE_8K;
+ case 16384:
+ return REG_PAGE_SIZE_16K;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline void anfc_enable_intrs(struct anfc_nand_controller *nfc, u32 val)
+{
+ writel(val, nfc->base + INTR_STS_EN_OFST);
+ writel(val, nfc->base + INTR_SIG_EN_OFST);
+}
+
+static inline void anfc_config_ecc(struct anfc_nand_controller *nfc, bool on)
+{
+ u32 val;
+
+ val = readl(nfc->base + CMD_OFST);
+ if (on)
+ val |= ECC_ENABLE;
+ else
+ val &= ~ECC_ENABLE;
+ writel(val, nfc->base + CMD_OFST);
+}
+
+static inline void anfc_config_dma(struct anfc_nand_controller *nfc, int on)
+{
+ u32 val;
+
+ val = readl(nfc->base + CMD_OFST);
+ val &= ~DMA_EN_MASK;
+ if (on)
+ val |= DMA_ENABLE << DMA_EN_SHIFT;
+ writel(val, nfc->base + CMD_OFST);
+}
+
+static inline int anfc_wait_for_event(struct anfc_nand_controller *nfc)
+{
+ return wait_for_completion_timeout(&nfc->event,
+ msecs_to_jiffies(EVENT_TIMEOUT_MSEC));
+}
+
+static inline void anfc_setpktszcnt(struct anfc_nand_controller *nfc,
+ u32 pktsize, u32 pktcount)
+{
+ writel(pktsize | (pktcount << PKT_CNT_SHIFT), nfc->base + PKT_OFST);
+}
+
+static inline void anfc_set_eccsparecmd(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *achip, u8 cmd1,
+ u8 cmd2)
+{
+ writel(cmd1 | (cmd2 << CMD2_SHIFT) |
+ (achip->caddr_cycles << ADDR_CYCLES_SHIFT),
+ nfc->base + ECC_SPR_CMD_OFST);
+}
+
+static void anfc_setpagecoladdr(struct anfc_nand_controller *nfc, u32 page,
+ u16 col)
+{
+ u32 val;
+
+ writel(col | (page << PG_ADDR_SHIFT), nfc->base + MEM_ADDR1_OFST);
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val = (val & ~MEM_ADDR_MASK) |
+ ((page >> PG_ADDR_SHIFT) & MEM_ADDR_MASK);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+}
+
+static void anfc_prepare_cmd(struct anfc_nand_controller *nfc, u8 cmd1,
+ u8 cmd2, u8 dmamode,
+ u32 pagesize, u8 addrcycles)
+{
+ u32 regval;
+
+ regval = cmd1 | (cmd2 << CMD2_SHIFT);
+ if (dmamode)
+ regval |= DMA_ENABLE << DMA_EN_SHIFT;
+ regval |= addrcycles << ADDR_CYCLES_SHIFT;
+ regval |= anfc_page(pagesize) << REG_PAGE_SIZE_SHIFT;
+ writel(regval, nfc->base + CMD_OFST);
+}
+
+static void anfc_rw_dma_op(struct mtd_info *mtd, u8 *buf, int len,
+ bool do_read, u32 prog, int pktcount, int pktsize)
+{
+ dma_addr_t paddr;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 eccintr = 0, dir;
+
+ if (pktsize == 0)
+ pktsize = len;
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (!achip->strength)
+ eccintr = MBIT_ERROR;
+
+ if (do_read)
+ dir = DMA_FROM_DEVICE;
+ else
+ dir = DMA_TO_DEVICE;
+
+ paddr = dma_map_single(nfc->dev, buf, len, dir);
+ if (dma_mapping_error(nfc->dev, paddr)) {
+ dev_err(nfc->dev, "Read buffer mapping error");
+ return;
+ }
+ writel(paddr, nfc->base + DMA_ADDR0_OFST);
+ writel((paddr >> 32), nfc->base + DMA_ADDR1_OFST);
+ anfc_enable_intrs(nfc, (XFER_COMPLETE | eccintr));
+ writel(prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+ dma_unmap_single(nfc->dev, paddr, len, dir);
+}
+
+static void anfc_rw_pio_op(struct mtd_info *mtd, u8 *buf, int len,
+ bool do_read, int prog, int pktcount, int pktsize)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 *bufptr = (u32 *)buf;
+ u32 cnt = 0, intr = 0;
+
+ anfc_config_dma(nfc, 0);
+
+ if (pktsize == 0)
+ pktsize = len;
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (!achip->strength)
+ intr = MBIT_ERROR;
+
+ if (do_read)
+ intr |= READ_READY;
+ else
+ intr |= WRITE_READY;
+
+ anfc_enable_intrs(nfc, intr);
+ writel(prog, nfc->base + PROG_OFST);
+ while (cnt < pktcount) {
+ anfc_wait_for_event(nfc);
+ cnt++;
+ if (cnt == pktcount)
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ if (do_read)
+ ioread32_rep(nfc->base + DATA_PORT_OFST, bufptr,
+ pktsize / 4);
+ else
+ iowrite32_rep(nfc->base + DATA_PORT_OFST, bufptr,
+ pktsize / 4);
+ bufptr += (pktsize / 4);
+ if (cnt < pktcount)
+ anfc_enable_intrs(nfc, intr);
+ }
+ anfc_wait_for_event(nfc);
+}
+
+static void anfc_read_data_op(struct nand_chip *chip, u8 *buf, int len,
+ int pktcount, int pktsize)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (virt_addr_valid(buf))
+ anfc_rw_dma_op(mtd, buf, len, 1, PROG_PGRD, pktcount, pktsize);
+ else
+ anfc_rw_pio_op(mtd, buf, len, 1, PROG_PGRD, pktcount, pktsize);
+}
+
+static void anfc_write_data_op(struct nand_chip *chip, const u8 *buf,
+ int len, int pktcount, int pktsize)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (virt_addr_valid(buf))
+ anfc_rw_dma_op(mtd, (char *)buf, len, 0, PROG_PGPROG, pktcount,
+ pktsize);
+ else
+ anfc_rw_pio_op(mtd, (char *)buf, len, 0, PROG_PGPROG, pktcount,
+ pktsize);
+}
+
+static int anfc_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u8 *ecc_code = chip->ecc.code_buf;
+ u8 *p;
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int stat = 0, i;
+ u32 ret;
+ unsigned int max_bitflips = 0;
+ u32 eccsteps;
+ u32 one_bit_err = 0, multi_bit_err = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_set_eccsparecmd(nfc, achip, NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART);
+ anfc_config_ecc(nfc, true);
+ anfc_read_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (achip->strength) {
+ /*
+ * In BCH mode Arasan NAND controller can correct ECC upto
+ * 24-bit Beyond that, it can't even detect errors.
+ */
+ multi_bit_err = readl(nfc->base + ECC_ERR_CNT_OFST);
+ multi_bit_err = ((multi_bit_err & PAGE_ERR_CNT_MASK) >> 8);
+ } else {
+ /*
+ * In Hamming mode Arasan NAND controller can correct ECC upto
+ * 1-bit and can detect upto 4-bit errors.
+ */
+ one_bit_err = readl(nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ multi_bit_err = readl(nfc->base + ECC_ERR_CNT_2BIT_OFST);
+
+ /* Clear ecc error count register 1Bit, 2Bit */
+ writel(0x0, nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ writel(0x0, nfc->base + ECC_ERR_CNT_2BIT_OFST);
+ }
+
+ anfc_config_ecc(nfc, false);
+
+ if (oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ if (multi_bit_err || one_bit_err) {
+ if (!oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ eccsteps = chip->ecc.steps;
+ p = buf;
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes,
+ p += eccsize) {
+ stat = nand_check_erased_ecc_chunk(p,
+ chip->ecc.size,
+ &ecc_code[i],
+ eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (stat < 0) {
+ stat = 0;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ stat);
+ }
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int anfc_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_set_eccsparecmd(nfc, achip, NAND_CMD_RNDIN, 0);
+ anfc_config_ecc(nfc, true);
+ anfc_write_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (oob_required)
+ chip->ecc.write_oob(mtd, chip, page);
+
+ anfc_config_ecc(nfc, false);
+
+ return 0;
+}
+
+static int anfc_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc, int ecc_mode)
+{
+ u32 ecc_addr;
+ unsigned int ecc_strength, steps;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+
+ ecc->mode = NAND_ECC_HW;
+ ecc->read_page = anfc_read_page_hwecc;
+ ecc->write_page = anfc_write_page_hwecc;
+
+ mtd_set_ooblayout(mtd, &anfc_ooblayout_ops);
+
+ steps = mtd->writesize / chip->ecc_step_ds;
+
+ switch (chip->ecc_strength_ds) {
+ case 12:
+ ecc_strength = 0x1;
+ break;
+ case 8:
+ ecc_strength = 0x2;
+ break;
+ case 4:
+ ecc_strength = 0x3;
+ break;
+ case 24:
+ ecc_strength = 0x4;
+ break;
+ default:
+ ecc_strength = 0x0;
+ }
+ if (!ecc_strength)
+ ecc->total = 3 * steps;
+ else
+ ecc->total =
+ DIV_ROUND_UP(fls(8 * chip->ecc_step_ds) *
+ chip->ecc_strength_ds * steps, 8);
+
+ ecc->strength = chip->ecc_strength_ds;
+ ecc->size = chip->ecc_step_ds;
+ ecc->bytes = ecc->total / steps;
+ ecc->steps = steps;
+ achip->ecc_strength = ecc_strength;
+ achip->strength = achip->ecc_strength;
+ ecc_addr = mtd->writesize + (mtd->oobsize - ecc->total);
+ achip->eccval = ecc_addr | (ecc->total << ECC_SIZE_SHIFT) |
+ (achip->strength << BCH_EN_SHIFT);
+
+ if (chip->ecc_step_ds >= 1024)
+ achip->pktsize = 1024;
+ else
+ achip->pktsize = 512;
+
+ return 0;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void anfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct anfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id;
+ int i = 0;
+
+ memset(nfc_op, 0, sizeof(struct anfc_op));
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int naddrs;
+
+ instr = &subop->instrs[op_id];
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (op_id)
+ nfc_op->cmds[1] = instr->ctx.cmd.opcode;
+ else
+ nfc_op->cmds[0] = instr->ctx.cmd.opcode;
+ nfc->curr_cmd = nfc_op->cmds[0];
+
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ i = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop,
+ op_id);
+
+ for (; i < naddrs; i++) {
+ u8 val = instr->ctx.addr.addrs[i];
+
+ if (nfc_op->cmds[0] == NAND_CMD_ERASE1) {
+ nfc_op->row |= COL_ROW_ADDR(i, val);
+ } else {
+ if (i < 2)
+ nfc_op->col |= COL_ROW_ADDR(i,
+ val);
+ else
+ nfc_op->row |= COL_ROW_ADDR(i -
+ 2, val);
+ }
+ }
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ break;
+ }
+ }
+}
+
+static int anfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct anfc_op nfc_op = {};
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Do not execute commands other than NAND_CMD_RESET
+ * Other commands have their own patterns
+ * If there is no pattern match, that means controller
+ * is not supporting that pattern.
+ */
+ if (nfc_op.cmds[0] != NAND_CMD_RESET)
+ return 0;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 0);
+ nfc->prog = PROG_RST;
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+
+ return 0;
+}
+
+static int anfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_op nfc_op = {};
+ unsigned int op_id, len;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ nfc->prog = PROG_RDID;
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, PROG_RDID, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_op nfc_op = {};
+ unsigned int op_id, len;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 0);
+ anfc_setpktszcnt(nfc, achip->spktsize / 4, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ nfc->prog = PROG_STATUS;
+
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+
+ /*
+ * The Arasan NAND controller will update the status value
+ * returned by the flash device in FLASH_STS register.
+ */
+ nfc->status = readl(nfc->base + FLASH_STS_OFST);
+ memcpy(instr->ctx.data.buf.in, &nfc->status, len);
+
+ return 0;
+}
+
+static int anfc_erase_and_zero_len_page_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop
+ *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 dma_mode = 0, write_size = 0, addrcycles = 0, len, op_id;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ if (nfc_op.cmds[0] == NAND_CMD_ERASE1) {
+ nfc->prog = PROG_ERASE;
+ addrcycles = achip->raddr_cycles;
+ write_size = 0;
+ dma_mode = 0;
+ nfc_op.col = nfc_op.row & 0xffff;
+ nfc_op.row = (nfc_op.row >> PG_ADDR_SHIFT) & 0xffff;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_READ0) {
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ write_size = mtd->writesize;
+ dma_mode = 1;
+ }
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], dma_mode,
+ write_size, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (nfc_op.cmds[0] == NAND_CMD_ERASE1) {
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+ }
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_read_data_op(chip, instr->ctx.data.buf.in, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_read_param_get_feature_sp_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop
+ *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 dma_mode, addrcycles, write_size;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ if (nfc_op.cmds[0] == NAND_CMD_PARAM) {
+ nfc->prog = PROG_RDPARAM;
+ dma_mode = 0;
+ addrcycles = 1;
+ write_size = 0;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_GET_FEATURES) {
+ nfc->prog = PROG_GET_FEATURE;
+ dma_mode = 0;
+ addrcycles = 1;
+ write_size = 0;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_READ0) {
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ write_size = mtd->writesize;
+ dma_mode = 1;
+ }
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, dma_mode, write_size,
+ addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_random_datain_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, PROG_PGRD, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_setfeature_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_SET_FEATURE;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_change_read_column_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, 2);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_page_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_zero_len_page_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ return 0;
+}
+
+static int anfc_page_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ nfc->prog = PROG_PGPROG;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_page_write_nowait_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ nfc->prog = PROG_PGPROG;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out,
+ mtd->writesize, DIV_ROUND_UP(mtd->writesize,
+ achip->pktsize), achip->pktsize);
+
+ return 0;
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+ /* Use a separate function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ anfc_random_datain_type_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_change_read_column_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_erase_and_zero_len_page_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_setfeature_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_write_nowait_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_param_get_feature_sp_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_zero_len_page_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES)),
+ );
+
+static int anfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &anfc_op_parser,
+ op, check_only);
+}
+
+static void anfc_select_chip(struct mtd_info *mtd, int num)
+{
+ u32 val;
+ int ret;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ if (num < 0) {
+ nfc->chip_active = false;
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return;
+ }
+
+ nfc->chip_active = true;
+ ret = pm_runtime_get_sync(nfc->dev);
+ if (ret < 0) {
+ dev_err(nfc->dev, "runtime_get_sync failed\n");
+ return;
+ }
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val &= (val & ~(CS_MASK | BCH_MODE_MASK));
+ val |= (achip->csnum << CS_SHIFT) |
+ (achip->ecc_strength << BCH_MODE_SHIFT);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+ nfc->csnum = achip->csnum;
+ writel(achip->eccval, nfc->base + ECC_OFST);
+ writel(achip->inftimeval, nfc->base + DATA_INTERFACE_OFST);
+}
+
+static irqreturn_t anfc_irq_handler(int irq, void *ptr)
+{
+ struct anfc_nand_controller *nfc = ptr;
+ u32 status;
+
+ status = readl(nfc->base + INTR_STS_OFST);
+ if (status & EVENT_MASK) {
+ writel(status & EVENT_MASK, nfc->base + INTR_STS_OFST);
+ writel(0, nfc->base + INTR_STS_EN_OFST);
+ writel(0, nfc->base + INTR_SIG_EN_OFST);
+ complete(&nfc->event);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int anfc_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 ret;
+
+ if (mtd->writesize <= SZ_512)
+ achip->caddr_cycles = 1;
+ else
+ achip->caddr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ achip->raddr_cycles = 3;
+ else
+ achip->raddr_cycles = 2;
+
+ chip->ecc.calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ chip->ecc.code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ret = anfc_ecc_init(mtd, &chip->ecc, chip->ecc.mode);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct nand_controller_ops anfc_nand_controller_ops = {
+ .attach_chip = anfc_nand_attach_chip,
+};
+
+static int anfc_init_timing_mode(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *achip)
+{
+ struct nand_chip *chip = &achip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int mode, err;
+ unsigned int feature[2];
+ u32 inftimeval;
+ bool change_sdr_clk = false;
+
+ memset(feature, 0, NVDDR_MODE_PACKET_SIZE);
+ /* Get nvddr timing modes */
+ mode = onfi_get_sync_timing_mode(chip) & 0xff;
+ if (!mode) {
+ mode = fls(onfi_get_async_timing_mode(chip)) - 1;
+ inftimeval = mode;
+ if (mode >= 2 && mode <= 5)
+ change_sdr_clk = true;
+ } else {
+ mode = fls(mode) - 1;
+ inftimeval = NVDDR_MODE | (mode << NVDDR_TIMING_MODE_SHIFT);
+ mode |= ONFI_DATA_INTERFACE_NVDDR;
+ }
+
+ feature[0] = mode;
+ chip->select_chip(mtd, achip->csnum);
+ err = chip->set_features(mtd, chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ (uint8_t *)feature);
+ chip->select_chip(mtd, -1);
+ if (err)
+ return err;
+
+ /*
+ * SDR timing modes 2-5 will not work for the arasan nand when
+ * freq > 90 MHz, so reduce the freq in SDR modes 2-5 to < 90Mhz
+ */
+ if (change_sdr_clk) {
+ clk_disable_unprepare(nfc->clk_sys);
+ err = clk_set_rate(nfc->clk_sys, SDR_MODE_DEFLT_FREQ);
+ if (err) {
+ dev_err(nfc->dev, "Can't set the clock rate\n");
+ return err;
+ }
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(nfc->dev, "Unable to enable sys clock.\n");
+ clk_disable_unprepare(nfc->clk_sys);
+ return err;
+ }
+ }
+ achip->inftimeval = inftimeval;
+
+ if (mode & ONFI_DATA_INTERFACE_NVDDR)
+ achip->spktsize = NVDDR_MODE_PACKET_SIZE;
+
+ return 0;
+}
+
+static int anfc_nand_chip_init(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *anand_chip,
+ struct device_node *np)
+{
+ struct nand_chip *chip = &anand_chip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &anand_chip->csnum);
+ if (ret) {
+ dev_err(nfc->dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, "arasan_nand.%d",
+ anand_chip->csnum);
+ mtd->dev.parent = nfc->dev;
+
+ chip->chip_delay = 30;
+ chip->controller = &nfc->controller;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->select_chip = anfc_select_chip;
+ chip->exec_op = anfc_exec_op;
+ nand_set_flash_node(chip, np);
+
+ anand_chip->spktsize = SDR_MODE_PACKET_SIZE;
+
+ ret = nand_scan(mtd, 1);
+ if (ret) {
+ dev_err(nfc->dev, "nand_scan_tail for NAND failed\n");
+ return ret;
+ }
+
+ ret = anfc_init_timing_mode(nfc, anand_chip);
+ if (ret) {
+ dev_err(nfc->dev, "timing mode init failed\n");
+ return ret;
+ }
+
+ return mtd_device_register(mtd, NULL, 0);
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+ struct anfc_nand_controller *nfc;
+ struct anfc_nand_chip *anand_chip;
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct resource *res;
+ int err;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ init_completion(&nfc->event);
+ nfc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, nfc);
+ nfc->csnum = -1;
+ nfc->controller.ops = &anfc_nand_controller_ops;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->base))
+ return PTR_ERR(nfc->base);
+ nfc->irq = platform_get_irq(pdev, 0);
+ if (nfc->irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ return -ENXIO;
+ }
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = devm_request_irq(&pdev->dev, nfc->irq, anfc_irq_handler,
+ 0, "arasannfc", nfc);
+ if (err)
+ return err;
+ nfc->clk_sys = devm_clk_get(&pdev->dev, "clk_sys");
+ if (IS_ERR(nfc->clk_sys)) {
+ dev_err(&pdev->dev, "sys clock not found.\n");
+ return PTR_ERR(nfc->clk_sys);
+ }
+
+ nfc->clk_flash = devm_clk_get(&pdev->dev, "clk_flash");
+ if (IS_ERR(nfc->clk_flash)) {
+ dev_err(&pdev->dev, "flash clock not found.\n");
+ return PTR_ERR(nfc->clk_flash);
+ }
+
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable sys clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(nfc->clk_flash);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable flash clock.\n");
+ goto clk_dis_sys;
+ }
+
+ pm_runtime_set_autosuspend_delay(nfc->dev, ANFC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(nfc->dev);
+ pm_runtime_set_active(nfc->dev);
+ pm_runtime_get_noresume(nfc->dev);
+ pm_runtime_enable(nfc->dev);
+ for_each_available_child_of_node(np, child) {
+ anand_chip = devm_kzalloc(&pdev->dev, sizeof(*anand_chip),
+ GFP_KERNEL);
+ if (!anand_chip) {
+ of_node_put(child);
+ err = -ENOMEM;
+ goto nandchip_clean_up;
+ }
+ err = anfc_nand_chip_init(nfc, anand_chip, child);
+ if (err) {
+ devm_kfree(&pdev->dev, anand_chip);
+ continue;
+ }
+
+ list_add_tail(&anand_chip->node, &nfc->chips);
+ }
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return 0;
+
+nandchip_clean_up:
+ list_for_each_entry(anand_chip, &nfc->chips, node)
+ nand_release(nand_to_mtd(&anand_chip->chip));
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(nfc->clk_flash);
+clk_dis_sys:
+ clk_disable_unprepare(nfc->clk_sys);
+
+ return err;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+ struct anfc_nand_controller *nfc = platform_get_drvdata(pdev);
+ struct anfc_nand_chip *anand_chip;
+
+ list_for_each_entry(anand_chip, &nfc->chips, node)
+ nand_release(nand_to_mtd(&anand_chip->chip));
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ clk_disable_unprepare(nfc->clk_sys);
+ clk_disable_unprepare(nfc->clk_flash);
+
+ return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+ { .compatible = "arasan,nfc-v3p10" },
+ { .compatible = "xlnx,zynqmp-nand" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static int anfc_suspend(struct device *dev)
+{
+ return pm_runtime_put_sync(dev);
+}
+
+static int anfc_resume(struct device *dev)
+{
+ return pm_runtime_get_sync(dev);
+}
+
+static int __maybe_unused anfc_runtime_suspend(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+
+ clk_disable(nfc->clk_sys);
+ clk_disable(nfc->clk_flash);
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_idle(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+
+ if (nfc->chip_active)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_resume(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(nfc->clk_sys);
+ if (ret) {
+ dev_err(dev, "Cannot enable sys clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(nfc->clk_flash);
+ if (ret) {
+ dev_err(dev, "Cannot enable flash clock.\n");
+ clk_disable(nfc->clk_sys);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops anfc_pm_ops = {
+ .resume = anfc_resume,
+ .suspend = anfc_suspend,
+ .runtime_resume = anfc_runtime_resume,
+ .runtime_suspend = anfc_runtime_suspend,
+ .runtime_idle = anfc_runtime_idle,
+};
+
+static struct platform_driver anfc_driver = {
+ .driver = {
+ .name = "arasan-nand-controller",
+ .of_match_table = anfc_ids,
+ .pm = &anfc_pm_ops,
+ },
+ .probe = anfc_probe,
+ .remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 17c751f359d3..997910bec727 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -402,6 +402,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
dma_async_issue_pending(nc->dmac);
wait_for_completion(&finished);
+ dma_unmap_single(nc->dev, buf_dma, len, dir);
return 0;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index bd9f45edc9a3..e6efc115ef15 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -197,6 +197,7 @@ struct brcmnand_controller {
unsigned int max_page_size;
const unsigned int *page_sizes;
unsigned int max_oob;
+ u32 ecc_level_shift;
u32 features;
/* for low-power standby/resume only */
@@ -487,6 +488,34 @@ enum {
INTFC_CTLR_READY = BIT(31),
};
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+#define ACC_CONTROL_ECC_SHIFT 16
+/* Only for v7.2 */
+#define ACC_CONTROL_ECC_EXT_SHIFT 13
+
static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
{
return brcmnand_readl(ctrl->nand_base + offs);
@@ -590,6 +619,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
ctrl->features |= BRCMNAND_HAS_WP;
+ /* v7.2 has different ecc level shift in the acc register */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
+ else
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
+
return 0;
}
@@ -752,30 +787,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
return 0;
}
-/***********************************************************************
- * NAND ACC CONTROL bitfield
- *
- * Some bits have remained constant throughout hardware revision, while
- * others have shifted around.
- ***********************************************************************/
-
-/* Constant for all versions (where supported) */
-enum {
- /* See BRCMNAND_HAS_CACHE_MODE */
- ACC_CONTROL_CACHE_MODE = BIT(22),
-
- /* See BRCMNAND_HAS_PREFETCH */
- ACC_CONTROL_PREFETCH = BIT(23),
-
- ACC_CONTROL_PAGE_HIT = BIT(24),
- ACC_CONTROL_WR_PREEMPT = BIT(25),
- ACC_CONTROL_PARTIAL_PAGE = BIT(26),
- ACC_CONTROL_RD_ERASED = BIT(27),
- ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
- ACC_CONTROL_WR_ECC = BIT(30),
- ACC_CONTROL_RD_ECC = BIT(31),
-};
-
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version == 0x0702)
@@ -786,18 +797,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
return GENMASK(5, 0);
}
-#define NAND_ACC_CONTROL_ECC_SHIFT 16
-#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
-
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
- mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+ mask <<= ACC_CONTROL_ECC_SHIFT;
/* v7.2 includes additional ECC levels */
- if (ctrl->nand_version >= 0x0702)
- mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+ if (ctrl->nand_version == 0x0702)
+ mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
return mask;
}
@@ -811,8 +819,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
if (en) {
acc_control |= ecc_flags; /* enable RD/WR ECC */
- acc_control |= host->hwcfg.ecc_level
- << NAND_ACC_CONTROL_ECC_SHIFT;
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
} else {
acc_control &= ~ecc_flags; /* disable RD/WR ECC */
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
@@ -891,6 +899,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
cpu_relax();
} while (time_after(limit, jiffies));
+ /*
+ * do a final check after time out in case the CPU was busy and the driver
+ * did not get enough time to perform the polling to avoid false alarms
+ */
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
expected_val, val & mask);
@@ -1273,19 +1289,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
const u8 *oob, int sas, int sector_1k)
{
int tbytes = sas << sector_1k;
- int j;
+ int j, k = 0;
+ u32 last = 0xffffffff;
+ u8 *plast = (u8 *)&last;
/* Adjust OOB values for 1K sector size */
if (sector_1k && (i & 0x01))
tbytes = max(0, tbytes - (int)ctrl->max_oob);
tbytes = min_t(int, tbytes, ctrl->max_oob);
- for (j = 0; j < tbytes; j += 4)
+ /*
+ * tbytes may not be multiple of words. Make sure we don't read out of
+ * the boundary and stop at last word.
+ */
+ for (j = 0; (j + 3) < tbytes; j += 4)
oob_reg_write(ctrl, j,
(oob[j + 0] << 24) |
(oob[j + 1] << 16) |
(oob[j + 2] << 8) |
(oob[j + 3] << 0));
+
+ /* handle the remaing bytes */
+ while (j < tbytes)
+ plast[k++] = oob[j++];
+
+ if (tbytes & 0x3)
+ oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
+
return tbytes;
}
@@ -1331,7 +1361,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
- BUG_ON(ctrl->cmd_pending != 0);
+ /*
+ * If we came here through _panic_write and there is a pending
+ * command, try to wait for it. If it times out, rather than
+ * hitting BUG_ON, just return so we don't crash while crashing.
+ */
+ if (oops_in_progress) {
+ if (ctrl->cmd_pending &&
+ bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ return;
+ } else
+ BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
@@ -2161,7 +2201,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
tmp = nand_readreg(ctrl, acc_control_offs);
tmp &= ~brcmnand_ecc_level_mask(ctrl);
- tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+ tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
tmp &= ~brcmnand_spare_area_mask(ctrl);
tmp |= cfg->spare_area_size;
nand_writereg(ctrl, acc_control_offs, tmp);
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 2af09edf405b..5c52cf1b7bd4 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -21,7 +21,7 @@
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
-#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
for IFC NAND Machine */
struct fsl_ifc_ctrl;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index cc8369a595de..bccadf8f27fa 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -1181,9 +1181,14 @@ static int fsmc_nand_suspend(struct device *dev)
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ int ret;
if (host) {
- clk_prepare_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
nand_reset(&host->nand, 0);
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 02218c3b548f..fdf5cf5565f9 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -652,8 +652,9 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
unsigned int tRP_ps;
bool use_half_period;
int sample_delay_ps, sample_delay_factor;
- u16 busy_timeout_cycles;
+ unsigned int busy_timeout_cycles;
u8 wrn_dly_sel;
+ u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */
@@ -677,12 +678,13 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
- busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
- hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
+ hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
/*
* Derive NFC ideal delay from {3}:
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
index 2cda439b5e11..017868f59f22 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
@@ -36,25 +36,25 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc,
void ingenic_ecc_release(struct ingenic_ecc *ecc);
struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np);
#else /* CONFIG_MTD_NAND_INGENIC_ECC */
-int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
struct ingenic_ecc_params *params,
const u8 *buf, u8 *ecc_code)
{
return -ENODEV;
}
-int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc,
struct ingenic_ecc_params *params, u8 *buf,
u8 *ecc_code)
{
return -ENODEV;
}
-void ingenic_ecc_release(struct ingenic_ecc *ecc)
+static inline void ingenic_ecc_release(struct ingenic_ecc *ecc)
{
}
-struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
+static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 78b31f845c50..a7d0a76eee7f 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -304,8 +304,9 @@ static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
return 0;
}
-static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+static irqreturn_t lpc3xxx_nand_irq(int irq, void *data)
{
+ struct lpc32xx_nand_host *host = data;
uint8_t sr;
/* Clear interrupt flag by reading status */
@@ -778,7 +779,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
goto release_dma_chan;
}
- if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ if (request_irq(host->irq, &lpc3xxx_nand_irq,
IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
res = -ENXIO;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index ef163ff1edf7..9a119688c0a6 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1109,6 +1109,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
.ndcb[2] = NDCB2_ADDR5_PAGE(page),
};
unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ u8 status;
int ret;
/* NFCv2 needs more information about the operation being executed */
@@ -1142,7 +1143,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
ret = marvell_nfc_wait_op(chip,
PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
- return ret;
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
@@ -1570,6 +1582,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
int data_len = lt->data_bytes;
int spare_len = lt->spare_bytes;
int chunk, ret;
+ u8 status;
marvell_nfc_select_target(chip, chip->cur_cs);
@@ -1607,6 +1620,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
if (ret)
return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
return 0;
}
@@ -2401,6 +2422,12 @@ static int marvell_nfc_setup_data_interface(struct nand_chip *chip, int chipnr,
NDTR1_WAIT_MODE;
}
+ /*
+ * Reset nfc->selected_chip so the next command will cause the timing
+ * registers to be updated in marvell_nfc_select_target().
+ */
+ nfc->selected_chip = NULL;
+
return 0;
}
@@ -2630,7 +2657,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
- if (!of_property_read_bool(np, "marvell,nand-keep-config"))
+ if (of_property_read_bool(np, "marvell,nand-keep-config"))
chip->options |= NAND_KEEP_TIMINGS;
mtd = nand_to_mtd(chip);
@@ -2828,10 +2855,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
GENCONF_CLK_GATING_CTRL_ND_GATE,
GENCONF_CLK_GATING_CTRL_ND_GATE);
-
- regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL,
- GENCONF_ND_CLK_CTRL_EN,
- GENCONF_ND_CLK_CTRL_EN);
}
/* Configure the DMA if appropriate */
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index ab7ab6a279aa..29f3d39138e8 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -72,6 +72,7 @@
#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff))
#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
+#define DMA_ADDR_ALIGN 8
#define ECC_CHECK_RETURN_FF (-1)
@@ -172,6 +173,7 @@ struct meson_nfc {
dma_addr_t daddr;
dma_addr_t iaddr;
+ u32 info_bytes;
unsigned long assigned_cs;
};
@@ -275,7 +277,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
if (raw) {
len = mtd->writesize + mtd->oobsize;
- cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
+ cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
return;
}
@@ -454,7 +456,7 @@ static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
*bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
- *correct_bitmap |= 1 >> i;
+ *correct_bitmap |= BIT_ULL(i);
continue;
}
if ((nand->options & NAND_NEED_SCRAMBLING) &&
@@ -499,6 +501,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
nfc->daddr, datalen, dir);
return ret;
}
+ nfc->info_bytes = infolen;
cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
@@ -516,8 +519,10 @@ static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
struct meson_nfc *nfc = nand_get_controller_data(nand);
dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
- if (infolen)
+ if (infolen) {
dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
+ nfc->info_bytes = 0;
+ }
}
static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
@@ -536,7 +541,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
goto out;
- cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
+ cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
@@ -560,7 +565,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
return ret;
- cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
+ cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
@@ -706,6 +711,8 @@ static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
usleep_range(10, 15);
/* info is updated by nfc dma engine*/
smp_rmb();
+ dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
+ DMA_FROM_DEVICE);
ret = *info & ECC_COMPLETE;
} while (!ret);
}
@@ -800,7 +807,7 @@ static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf,
u8 *data = buf + i * ecc->size;
u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
- if (correct_bitmap & (1 << i))
+ if (correct_bitmap & BIT_ULL(i))
continue;
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 2,
@@ -832,6 +839,9 @@ static int meson_nfc_read_oob(struct nand_chip *nand, int page)
static bool meson_nfc_is_buffer_dma_safe(const void *buffer)
{
+ if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
+ return false;
+
if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer)))
return true;
return false;
@@ -1167,7 +1177,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
struct meson_nfc *nfc = nand_get_controller_data(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct mtd_info *mtd = nand_to_mtd(nand);
- int nsectors = mtd->writesize / 1024;
int ret;
if (!mtd->name) {
@@ -1185,7 +1194,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
nand->options |= NAND_NO_SUBPAGE_WRITE;
ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
- mtd->oobsize - 2 * nsectors);
+ mtd->oobsize - 2);
if (ret) {
dev_err(nfc->dev, "failed to ECC init\n");
return -EINVAL;
@@ -1304,7 +1313,6 @@ static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
if (ret)
return ret;
- meson_nfc_free_buffer(&meson_chip->nand);
nand_cleanup(&meson_chip->nand);
list_del(&meson_chip->node);
}
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index db66c1be6e5f..22d5087017b0 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -467,9 +467,18 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- ret = nand_reset(chip, chipnr);
- if (ret)
- return ret;
+ /*
+ * Nand onfi compatible devices may support different data interface
+ * modes like SDR, NVDDR and NVDDR2. Giving reset to device places the
+ * device in to power-up state and places the target in the SDR data
+ * interface mode. This will be the problem for devices configured for
+ * NVDDR modes. So, limiting the reset operation to Toshiba devices.
+ */
+ if (chip->parameters.onfi->jedec_id == NAND_MFR_TOSHIBA) {
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
+ }
nand_select_target(chip, chipnr);
diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/raw/nand_ecc.c
index 09fdced659f5..b6a46b1b7781 100644
--- a/drivers/mtd/nand/raw/nand_ecc.c
+++ b/drivers/mtd/nand/raw/nand_ecc.c
@@ -131,7 +131,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
- uint32_t uninitialized_var(rp17); /* to make compiler happy */
+ uint32_t rp17;
uint32_t par; /* the cumulative parity for all data */
uint32_t tmppar; /* the cumulative parity for this iteration;
for rp12, rp14 and rp16 at the end of the
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index 8fe8d7bdd203..6b1f727a2de6 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -149,6 +149,12 @@ int nand_onfi_detect(struct nand_chip *chip)
memorg = nanddev_get_memorg(&chip->base);
+ /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+ return 0;
+ }
+
/* Try ONFI for unknown chip or LP */
ret = nand_readid_op(chip, 0x20, id, sizeof(id));
if (ret || strncmp(id, "ONFI", 4))
@@ -294,6 +300,8 @@ int nand_onfi_detect(struct nand_chip *chip)
onfi->tR = le16_to_cpu(p->t_r);
onfi->tCCS = le16_to_cpu(p->t_ccs);
onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->src_sync_timing_mode = le16_to_cpu(p->src_sync_timing_mode);
+ onfi->jedec_id = le16_to_cpu(p->jedec_id);
onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
chip->parameters.onfi = onfi;
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index f12b7a7844c9..e4c161afd3e6 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -53,6 +53,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 120000,
.tWP_min = 50000,
.tWW_min = 100000,
+ .mode = 0,
},
},
/* Mode 1 */
@@ -95,6 +96,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 25000,
.tWW_min = 100000,
+ .mode = 1,
},
},
/* Mode 2 */
@@ -137,6 +139,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 17000,
.tWW_min = 100000,
+ .mode = 2,
},
},
/* Mode 3 */
@@ -179,6 +182,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 15000,
.tWW_min = 100000,
+ .mode = 3,
},
},
/* Mode 4 */
@@ -221,6 +225,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 12000,
.tWW_min = 100000,
+ .mode = 4,
},
},
/* Mode 5 */
@@ -263,6 +268,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 10000,
.tWW_min = 100000,
+ .mode = 5,
},
},
};
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 6e0e31eab7cc..c4064f8fd611 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -174,17 +174,17 @@ static void elm_load_syndrome(struct elm_info *info,
switch (info->bch_type) {
case BCH8_ECC:
/* syndrome fragment 0 = ecc[9-12B] */
- val = cpu_to_be32(*(u32 *) &ecc[9]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
elm_write_reg(info, offset, val);
/* syndrome fragment 1 = ecc[5-8B] */
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[5]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
elm_write_reg(info, offset, val);
/* syndrome fragment 2 = ecc[1-4B] */
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[1]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
elm_write_reg(info, offset, val);
/* syndrome fragment 3 = ecc[0B] */
@@ -194,35 +194,35 @@ static void elm_load_syndrome(struct elm_info *info,
break;
case BCH4_ECC:
/* syndrome fragment 0 = ecc[20-52b] bits */
- val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+ val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
((ecc[2] & 0xf) << 28);
elm_write_reg(info, offset, val);
/* syndrome fragment 1 = ecc[0-20b] bits */
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
elm_write_reg(info, offset, val);
break;
case BCH16_ECC:
- val = cpu_to_be32(*(u32 *) &ecc[22]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
elm_write_reg(info, offset, val);
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[18]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
elm_write_reg(info, offset, val);
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[14]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
elm_write_reg(info, offset, val);
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[10]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
elm_write_reg(info, offset, val);
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[6]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
elm_write_reg(info, offset, val);
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[2]);
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
elm_write_reg(info, offset, val);
offset += 4;
- val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
+ val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
elm_write_reg(info, offset, val);
break;
default:
diff --git a/drivers/mtd/nand/raw/pl353_nand.c b/drivers/mtd/nand/raw/pl353_nand.c
new file mode 100644
index 000000000000..c004dfa505ac
--- /dev/null
+++ b/drivers/mtd/nand/raw/pl353_nand.c
@@ -0,0 +1,1398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM PL353 NAND flash controller driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc
+ * Author: Punnaiah chowdary kalluri <punnaiah@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pl353-smc.h>
+#include <linux/clk.h>
+
+#define PL353_NAND_DRIVER_NAME "pl353-nand"
+
+/* NAND flash driver defines */
+#define PL353_NAND_CMD_PHASE 1 /* End command valid in command phase */
+#define PL353_NAND_DATA_PHASE 2 /* End command valid in data phase */
+#define PL353_NAND_ECC_SIZE 512 /* Size of data for ECC operation */
+
+/* Flash memory controller operating parameters */
+
+#define PL353_NAND_ECC_CONFIG (BIT(4) | /* ECC read at end of page */ \
+ (0 << 5)) /* No Jumping */
+
+/* AXI Address definitions */
+#define START_CMD_SHIFT 3
+#define END_CMD_SHIFT 11
+#define END_CMD_VALID_SHIFT 20
+#define ADDR_CYCLES_SHIFT 21
+#define CLEAR_CS_SHIFT 21
+#define ECC_LAST_SHIFT 10
+#define COMMAND_PHASE (0 << 19)
+#define DATA_PHASE BIT(19)
+
+#define PL353_NAND_ECC_LAST BIT(ECC_LAST_SHIFT) /* Set ECC_Last */
+#define PL353_NAND_CLEAR_CS BIT(CLEAR_CS_SHIFT) /* Clear chip select */
+
+#define ONDIE_ECC_FEATURE_ADDR 0x90
+#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ)
+#define PL353_NAND_DEV_BUSY_TIMEOUT (1 * HZ)
+#define PL353_NAND_LAST_TRANSFER_LENGTH 4
+#define PL353_NAND_ECC_VALID_SHIFT 24
+#define PL353_NAND_ECC_VALID_MASK 0x40
+#define PL353_ECC_BITS_BYTEOFF_MASK 0x1FF
+#define PL353_ECC_BITS_BITOFF_MASK 0x7
+#define PL353_ECC_BIT_MASK 0xFFF
+#define PL353_TREA_MAX_VALUE 1
+#define PL353_MAX_ECC_CHUNKS 4
+#define PL353_MAX_ECC_BYTES 3
+
+struct pl353_nfc_op {
+ u32 cmnds[4];
+ u32 end_cmd;
+ u32 addrs;
+ u32 len;
+ u32 naddrs;
+ u32 addr5;
+ u32 addr6;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int cle_ale_delay_ns;
+ const struct nand_op_instr *data_instr;
+};
+
+/**
+ * struct pl353_nand_controller - Defines the NAND flash controller driver
+ * instance
+ * @chip: NAND chip information structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: Virtual address of the NAND flash device
+ * @buf_addr: Virtual address of the NAND flash device for
+ * data read/writes
+ * @addr_cycles: Address cycles
+ * @mclk: Memory controller clock
+ * @buswidth: Bus width 8 or 16
+ */
+struct pl353_nand_controller {
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *buf_addr;
+ u8 addr_cycles;
+ struct clk *mclk;
+ u32 buswidth;
+};
+
+static int pl353_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes);
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl353_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl353_ecc_ooblayout16_ops = {
+ .ecc = pl353_ecc_ooblayout16_ecc,
+ .free = pl353_ecc_ooblayout16_free,
+};
+
+static int pl353_ecc_ooblayout64_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 52;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl353_ecc_ooblayout64_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 2;
+ oobregion->length = 50;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl353_ecc_ooblayout64_ops = {
+ .ecc = pl353_ecc_ooblayout64_ecc,
+ .free = pl353_ecc_ooblayout64_free,
+};
+
+/* Generic flash bbt decriptors */
+static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static void pl353_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (xnfc->buswidth == 8)
+ return;
+
+ if (force_8bit)
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8);
+ else
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_16);
+}
+
+/**
+ * pl353_nand_read_data_op - read chip data into buffer
+ * @chip: Pointer to the NAND chip info structure
+ * @in: Pointer to the buffer to store read data
+ * @len: Number of bytes to read
+ * @force_8bit: Force 8-bit bus access
+ * Return: Always return zero
+ */
+static int pl353_nand_read_data_op(struct nand_chip *chip,
+ u8 *in,
+ unsigned int len, bool force_8bit)
+{
+ int i;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, true);
+
+ if ((IS_ALIGNED((uint32_t)in, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) || (!force_8bit)) {
+ u32 *ptr = (u32 *)in;
+
+ len /= 4;
+ for (i = 0; i < len; i++)
+ ptr[i] = readl(xnfc->buf_addr);
+ } else {
+ for (i = 0; i < len; i++)
+ in[i] = readb(xnfc->buf_addr);
+ }
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_buf - write buffer to chip
+ * @mtd: Pointer to the mtd info structure
+ * @buf: Pointer to the buffer to store write data
+ * @len: Number of bytes to write
+ * @force_8bit: Force 8-bit bus access
+ */
+static void pl353_nand_write_data_op(struct mtd_info *mtd, const u8 *buf,
+ int len, bool force_8bit)
+{
+ int i;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, true);
+
+ if ((IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) || (!force_8bit)) {
+ u32 *ptr = (u32 *)buf;
+
+ len /= 4;
+ for (i = 0; i < len; i++)
+ writel(ptr[i], xnfc->buf_addr);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb(buf[i], xnfc->buf_addr);
+ }
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, false);
+}
+
+static int pl353_wait_for_ecc_done(void)
+{
+ unsigned long timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT;
+
+ do {
+ if (pl353_smc_ecc_is_busy())
+ cpu_relax();
+ else
+ break;
+ } while (!time_after_eq(jiffies, timeout));
+
+ if (time_after_eq(jiffies, timeout)) {
+ pr_err("%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_calculate_hwecc - Calculate Hardware ECC
+ * @mtd: Pointer to the mtd_info structure
+ * @data: Pointer to the page data
+ * @ecc: Pointer to the ECC buffer where ECC data needs to be stored
+ *
+ * This function retrieves the Hardware ECC data from the controller and returns
+ * ECC data back to the MTD subsystem.
+ * It operates on a number of 512 byte blocks of NAND memory and can be
+ * programmed to store the ECC codes after the data in memory. For writes,
+ * the ECC is written to the spare area of the page. For reads, the result of
+ * a block ECC check are made available to the device driver.
+ *
+ * ------------------------------------------------------------------------
+ * | n * 512 blocks | extra | ecc | |
+ * | | block | codes | |
+ * ------------------------------------------------------------------------
+ *
+ * The ECC calculation uses a simple Hamming code, using 1-bit correction 2-bit
+ * detection. It starts when a valid read or write command with a 512 byte
+ * aligned address is detected on the memory interface.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_calculate_hwecc(struct mtd_info *mtd,
+ const u8 *data, u8 *ecc)
+{
+ u32 ecc_value;
+ u8 chunk, ecc_byte, ecc_status;
+
+ for (chunk = 0; chunk < PL353_MAX_ECC_CHUNKS; chunk++) {
+ /* Read ECC value for each block */
+ ecc_value = pl353_smc_get_ecc_val(chunk);
+ ecc_status = (ecc_value >> PL353_NAND_ECC_VALID_SHIFT);
+
+ /* ECC value valid */
+ if (ecc_status & PL353_NAND_ECC_VALID_MASK) {
+ for (ecc_byte = 0; ecc_byte < PL353_MAX_ECC_BYTES;
+ ecc_byte++) {
+ /* Copy ECC bytes to MTD buffer */
+ *ecc = ~ecc_value & 0xFF;
+ ecc_value = ecc_value >> 8;
+ ecc++;
+ }
+ } else {
+ pr_warn("%s status failed\n", __func__);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_correct_data - ECC correction function
+ * @mtd: Pointer to the mtd_info structure
+ * @buf: Pointer to the page data
+ * @read_ecc: Pointer to the ECC value read from spare data area
+ * @calc_ecc: Pointer to the calculated ECC value
+ *
+ * This function corrects the ECC single bit errors & detects 2-bit errors.
+ *
+ * Return: 0 if no ECC errors found
+ * 1 if single bit error found and corrected.
+ * -1 if multiple uncorrectable ECC errors found.
+ */
+static int pl353_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ unsigned char bit_addr;
+ unsigned int byte_addr;
+ unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
+ unsigned short calc_ecc_lower, calc_ecc_upper;
+
+ read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
+ PL353_ECC_BIT_MASK;
+ read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
+ PL353_ECC_BIT_MASK;
+
+ calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
+ PL353_ECC_BIT_MASK;
+ calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
+ PL353_ECC_BIT_MASK;
+
+ ecc_odd = read_ecc_lower ^ calc_ecc_lower;
+ ecc_even = read_ecc_upper ^ calc_ecc_upper;
+
+ /* no error */
+ if (!ecc_odd && !ecc_even)
+ return 0;
+
+ if (ecc_odd == (~ecc_even & PL353_ECC_BIT_MASK)) {
+ /* bits [11:3] of error code is byte offset */
+ byte_addr = (ecc_odd >> 3) & PL353_ECC_BITS_BYTEOFF_MASK;
+ /* bits [2:0] of error code is bit offset */
+ bit_addr = ecc_odd & PL353_ECC_BITS_BITOFF_MASK;
+ /* Toggling error bit */
+ buf[byte_addr] ^= (BIT(bit_addr));
+ return 1;
+ }
+
+ /* one error in parity */
+ if (hweight32(ecc_odd | ecc_even) == 1)
+ return 1;
+
+ /* Uncorrectable error */
+ return -1;
+}
+
+static void pl353_prepare_cmd(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int column, int start_cmd, int end_cmd,
+ bool read)
+{
+ unsigned long data_phase_addr;
+ u32 end_cmd_valid = 0;
+ unsigned long cmd_phase_addr = 0, cmd_phase_data = 0;
+
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ end_cmd_valid = read ? 1 : 0;
+
+ cmd_phase_addr = (unsigned long __force)xnfc->regs +
+ ((xnfc->addr_cycles
+ << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (start_cmd << START_CMD_SHIFT));
+
+ /* Get the data phase address */
+ data_phase_addr = (unsigned long __force)xnfc->regs +
+ ((0x0 << CLEAR_CS_SHIFT) |
+ (0 << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT));
+
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column /= 2;
+ cmd_phase_data = column;
+ if (mtd->writesize > PL353_NAND_ECC_SIZE) {
+ cmd_phase_data |= page << 16;
+ /* Another address cycle for devices > 128MiB */
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(cmd_phase_data,
+ (void __iomem * __force)cmd_phase_addr);
+ cmd_phase_data = (page >> 16);
+ }
+ } else {
+ cmd_phase_data |= page << 8;
+ }
+
+ writel_relaxed(cmd_phase_data, (void __iomem * __force)cmd_phase_addr);
+}
+
+/**
+ * pl353_nand_read_oob - [REPLACEABLE] the most common OOB data read function
+ * @mtd: Pointer to the mtd_info structure
+ * @chip: Pointer to the nand_chip structure
+ * @page: Page number to read
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ chip->pagebuf = -1;
+ if (mtd->writesize < PL353_NAND_ECC_SIZE)
+ return 0;
+
+ pl353_prepare_cmd(mtd, chip, page, mtd->writesize, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+
+ nand_wait_ready(mtd);
+
+ p = chip->oob_poi;
+ pl353_nand_read_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_oob - [REPLACEABLE] the most common OOB data write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @page: Page number to write
+ *
+ * Return: Zero on success and EIO on failure
+ */
+static int pl353_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ const u8 *buf = chip->oob_poi;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+ u32 addrcycles = 0;
+
+ chip->pagebuf = -1;
+ addrcycles = xnfc->addr_cycles;
+ pl353_prepare_cmd(mtd, chip, page, mtd->writesize, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+
+ pl353_nand_write_data_op(mtd, buf,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ buf += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, buf, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ nand_wait_ready(mtd);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_page_raw - [Intern] read raw page data without ecc
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to read
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ nand_wait_ready(mtd);
+ pl353_nand_read_data_op(chip, buf, mtd->writesize, false);
+ p = chip->oob_poi;
+ pl353_nand_read_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_page_raw - [Intern] raw page write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to write
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+ pl353_nand_write_data_op(mtd, buf, mtd->writesize, false);
+ p = chip->oob_poi;
+ pl353_nand_write_data_op(mtd, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * nand_write_page_hwecc - Hardware ECC based page write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to write
+ *
+ * This functions writes data and hardware generated ECC values in to the page.
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *oob_ptr;
+ const u8 *p = buf;
+ u32 ret;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+
+ for ( ; (eccsteps - 1); eccsteps--) {
+ pl353_nand_write_data_op(mtd, p, eccsize, false);
+ p += eccsize;
+ }
+ pl353_nand_write_data_op(mtd, p,
+ (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH),
+ false);
+ p += (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ /* Set ECC Last bit to 1 */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ /* Wait till the ECC operation is complete or timeout */
+ ret = pl353_wait_for_ecc_done();
+ if (ret)
+ dev_err(xnfc->dev, "ECC Timeout\n");
+ p = buf;
+ ret = chip->ecc.calculate(mtd, p, &ecc_calc[0]);
+ if (ret)
+ return ret;
+
+ /* Wait for ECC to be calculated and read the error values */
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
+ 0, chip->ecc.total);
+ if (ret)
+ return ret;
+ /* Clear ECC last bit */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr &= ~PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Write the spare area with ECC bytes */
+ oob_ptr = chip->oob_poi;
+ pl353_nand_write_data_op(mtd, oob_ptr,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ oob_ptr += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ pl353_nand_write_data_op(mtd, oob_ptr, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ nand_wait_ready(mtd);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_page_hwecc - Hardware ECC based page read function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the buffer to store read data
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to read
+ *
+ * This functions reads data and checks the data integrity by comparing
+ * hardware generated ECC values and read ECC values from spare area.
+ * There is a limitation in SMC controller, that we must set ECC LAST on
+ * last data phase access, to tell ECC block not to expect any data further.
+ * Ex: When number of ECC STEPS are 4, then till 3 we will write to flash
+ * using SMC with HW ECC enabled. And for the last ECC STEP, we will subtract
+ * 4bytes from page size, and will initiate a transfer. And the remaining 4 as
+ * one more transfer with ECC_LAST bit set in NAND data phase register to
+ * notify ECC block not to expect any more data. The last block should be align
+ * with end of 512 byte block. Because of this limitation, we are not using
+ * core routines.
+ *
+ * Return: 0 always and updates ECC operation status in to MTD structure
+ */
+static int pl353_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ int i, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+ u8 *oob_ptr;
+ u32 ret;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ nand_wait_ready(mtd);
+
+ for ( ; (eccsteps - 1); eccsteps--) {
+ pl353_nand_read_data_op(chip, p, eccsize, false);
+ p += eccsize;
+ }
+ pl353_nand_read_data_op(chip, p,
+ (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH),
+ false);
+ p += (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ /* Set ECC Last bit to 1 */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ /* Wait till the ECC operation is complete or timeout */
+ ret = pl353_wait_for_ecc_done();
+ if (ret)
+ dev_err(xnfc->dev, "ECC Timeout\n");
+
+ /* Read the calculated ECC value */
+ p = buf;
+ ret = chip->ecc.calculate(mtd, p, &ecc_calc[0]);
+ if (ret)
+ return ret;
+
+ /* Clear ECC last bit */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr &= ~PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Read the stored ECC value */
+ oob_ptr = chip->oob_poi;
+ pl353_nand_read_data_op(chip, oob_ptr,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+
+ /* de-assert chip select */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ oob_ptr += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ pl353_nand_read_data_op(chip, oob_ptr, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ /* Check ECC error for all blocks and correct if it is correctable */
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ stat = chip->ecc.correct(mtd, p, &ecc[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * pl353_nand_select_chip - Select the flash device
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ *
+ * This function is empty as the NAND controller handles chip select line
+ * internally based on the chip address passed in command and data phase.
+ */
+static void pl353_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void pl353_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct pl353_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id, offset, naddrs;
+ int i, len;
+ const u8 *addrs;
+
+ memset(nfc_op, 0, sizeof(struct pl353_nfc_op));
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ nfc_op->len = nand_subop_get_data_len(subop, op_id);
+ len = nand_subop_get_data_len(subop, op_id);
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (op_id)
+ nfc_op->cmnds[1] = instr->ctx.cmd.opcode;
+ else
+ nfc_op->cmnds[0] = instr->ctx.cmd.opcode;
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ nfc_op->addrs = instr->ctx.addr.addrs[offset];
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++) {
+ nfc_op->addrs |= instr->ctx.addr.addrs[i] <<
+ (8 * i);
+ }
+
+ if (naddrs >= 5)
+ nfc_op->addr5 = addrs[4];
+ if (naddrs >= 6)
+ nfc_op->addr6 = addrs[5];
+ nfc_op->naddrs = nand_subop_get_num_addr_cyc(subop,
+ op_id);
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/**
+ * pl353_nand_exec_op_cmd - Send command to NAND device
+ * @chip: Pointer to the NAND chip info structure
+ * @subop: Pointer to array of instructions
+ * Return: Always return zero
+ */
+static int pl353_nand_exec_op_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_op_instr *instr;
+ struct pl353_nfc_op nfc_op = {};
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long cmd_phase_data = 0, end_cmd_valid = 0;
+ unsigned long cmd_phase_addr, data_phase_addr, end_cmd;
+ unsigned int op_id, len, offset;
+ bool reading;
+
+ pl353_nfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ pl353_smc_clr_nand_int();
+ /* Get the command phase address */
+ if (nfc_op.cmnds[1] != 0) {
+ if (nfc_op.cmnds[0] == NAND_CMD_SEQIN)
+ end_cmd_valid = 0;
+ else
+ end_cmd_valid = 1;
+ end_cmd = nfc_op.cmnds[1];
+ } else {
+ end_cmd = 0x0;
+ }
+
+ /*
+ * The SMC defines two phases of commands when transferring data to or
+ * from NAND flash.
+ * Command phase: Commands and optional address information are written
+ * to the NAND flash.The command and address can be associated with
+ * either a data phase operation to write to or read from the array,
+ * or a status/ID register transfer.
+ * Data phase: Data is either written to or read from the NAND flash.
+ * This data can be either data transferred to or from the array,
+ * or status/ID register information.
+ */
+ cmd_phase_addr = (unsigned long __force)xnfc->regs +
+ ((nfc_op.naddrs << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (nfc_op.cmnds[0] << START_CMD_SHIFT));
+
+ /* Get the data phase address */
+ end_cmd_valid = 0;
+
+ data_phase_addr = (unsigned long __force)xnfc->regs +
+ ((0x0 << CLEAR_CS_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT));
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Command phase AXI Read & Write */
+ if (nfc_op.naddrs >= 5) {
+ if (mtd->writesize > PL353_NAND_ECC_SIZE) {
+ cmd_phase_data = nfc_op.addrs;
+ /* Another address cycle for devices > 128MiB */
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(cmd_phase_data,
+ (void __iomem * __force)
+ cmd_phase_addr);
+ cmd_phase_data = nfc_op.addr5;
+ if (nfc_op.naddrs >= 6)
+ cmd_phase_data |= (nfc_op.addr6 << 8);
+ }
+ }
+ } else {
+ if (nfc_op.addrs != -1) {
+ int column = nfc_op.addrs;
+ /*
+ * Change read/write column, read id etc
+ * Adjust columns for 16 bit bus width
+ */
+ if ((chip->options & NAND_BUSWIDTH_16) &&
+ (nfc_op.cmnds[0] == NAND_CMD_READ0 ||
+ nfc_op.cmnds[0] == NAND_CMD_SEQIN ||
+ nfc_op.cmnds[0] == NAND_CMD_RNDOUT ||
+ nfc_op.cmnds[0] == NAND_CMD_RNDIN)) {
+ column >>= 1;
+ }
+ cmd_phase_data = column;
+ }
+ }
+ writel_relaxed(cmd_phase_data, (void __iomem * __force)cmd_phase_addr);
+
+ if (!nfc_op.data_instr) {
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ return 0;
+ }
+
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+ if (!reading) {
+ pl353_nand_write_data_op(mtd, instr->ctx.data.buf.out,
+ len, instr->ctx.data.force_8bit);
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+ if (reading) {
+ cond_delay(nfc_op.rdy_delay_ns);
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ pl353_nand_read_data_op(chip, instr->ctx.data.buf.in, len,
+ instr->ctx.data.force_8bit);
+ }
+
+ return 0;
+}
+
+static const struct nand_op_parser pl353_nfc_op_parser = NAND_OP_PARSER
+ (NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 2048)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 2048)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2048),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ );
+
+static int pl353_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &pl353_nfc_op_parser,
+ op, check_only);
+}
+
+/**
+ * pl353_nand_device_ready - Check device ready/busy line
+ * @mtd: Pointer to the mtd_info structure
+ *
+ * Return: 0 on busy or 1 on ready state
+ */
+static int pl353_nand_device_ready(struct mtd_info *mtd)
+{
+ if (pl353_smc_get_nand_int_status_raw()) {
+ pl353_smc_clr_nand_int();
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_ecc_init - Initialize the ecc information as per the ecc mode
+ * @mtd: Pointer to the mtd_info structure
+ * @ecc: Pointer to ECC control structure
+ * @ecc_mode: ondie ecc status
+ *
+ * This function initializes the ecc block and functional pointers as per the
+ * ecc mode
+ *
+ * Return: 0 on success or negative errno.
+ */
+static int pl353_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
+ int ecc_mode)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ int err = 0;
+
+ ecc->read_oob = pl353_nand_read_oob;
+ ecc->write_oob = pl353_nand_write_oob;
+
+ if (ecc_mode == NAND_ECC_ON_DIE) {
+ ecc->write_page_raw = pl353_nand_write_page_raw;
+ ecc->read_page_raw = pl353_nand_read_page_raw;
+ pl353_smc_set_ecc_mode(PL353_SMC_ECCMODE_BYPASS);
+ /*
+ * On-Die ECC spare bytes offset 8 is used for ECC codes
+ * Use the BBT pattern descriptors
+ */
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ } else {
+
+ ecc->mode = NAND_ECC_HW;
+ /* Hardware ECC generates 3 bytes ECC code for each 512 bytes */
+ ecc->bytes = 3;
+ ecc->strength = 1;
+ ecc->calculate = pl353_nand_calculate_hwecc;
+ ecc->correct = pl353_nand_correct_data;
+ ecc->read_page = pl353_nand_read_page_hwecc;
+ ecc->size = PL353_NAND_ECC_SIZE;
+ ecc->read_page = pl353_nand_read_page_hwecc;
+ ecc->write_page = pl353_nand_write_page_hwecc;
+ pl353_smc_set_ecc_pg_size(mtd->writesize);
+ switch (mtd->writesize) {
+ case SZ_512:
+ case SZ_1K:
+ case SZ_2K:
+ pl353_smc_set_ecc_mode(PL353_SMC_ECCMODE_APB);
+ break;
+ default:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->size = 256;
+ break;
+ }
+
+ if (mtd->oobsize == 16) {
+ mtd_set_ooblayout(mtd, &pl353_ecc_ooblayout16_ops);
+ } else if (mtd->oobsize == 64) {
+ mtd_set_ooblayout(mtd, &pl353_ecc_ooblayout64_ops);
+ } else {
+ err = -ENXIO;
+ dev_err(xnfc->dev, "Unsupported oob Layout\n");
+ }
+ }
+
+ return err;
+}
+
+static int pl353_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ const struct nand_sdr_timings *sdr;
+ u32 timings[7], mckperiodps;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles.
+ */
+ mckperiodps = NSEC_PER_SEC / clk_get_rate(xnfc->mclk);
+ mckperiodps *= 1000;
+ if (sdr->tRC_min <= 20000)
+ /*
+ * PL353 SMC needs one extra read cycle in SDR Mode 5
+ * This is not written anywhere in the datasheet but
+ * the results observed during testing.
+ */
+ timings[0] = DIV_ROUND_UP(sdr->tRC_min, mckperiodps) + 1;
+ else
+ timings[0] = DIV_ROUND_UP(sdr->tRC_min, mckperiodps);
+
+ timings[1] = DIV_ROUND_UP(sdr->tWC_min, mckperiodps);
+ /*
+ * For all SDR modes, PL353 SMC needs tREA max value as 1,
+ * Results observed during testing.
+ */
+ timings[2] = PL353_TREA_MAX_VALUE;
+ timings[3] = DIV_ROUND_UP(sdr->tWP_min, mckperiodps);
+ timings[4] = DIV_ROUND_UP(sdr->tCLR_min, mckperiodps);
+ timings[5] = DIV_ROUND_UP(sdr->tAR_min, mckperiodps);
+ timings[6] = DIV_ROUND_UP(sdr->tRR_min, mckperiodps);
+ pl353_smc_set_cycles(timings);
+
+ return 0;
+}
+
+static int pl353_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ u32 ret;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_16);
+
+ if (mtd->writesize <= SZ_512)
+ xnfc->addr_cycles = 1;
+ else
+ xnfc->addr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ xnfc->addr_cycles += 3;
+ else
+ xnfc->addr_cycles += 2;
+
+ ret = pl353_nand_ecc_init(mtd, &chip->ecc, chip->ecc.mode);
+ if (ret) {
+ dev_err(xnfc->dev, "ECC init failed\n");
+ return ret;
+ }
+
+ if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "pl353-nand";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(xnfc->dev, GFP_KERNEL,
+ "%s", PL353_NAND_DRIVER_NAME);
+ if (!mtd->name) {
+ dev_err(xnfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+
+}
+
+static const struct nand_controller_ops pl353_nand_controller_ops = {
+ .attach_chip = pl353_nand_attach_chip,
+};
+
+/**
+ * pl353_nand_probe - Probe method for the NAND driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ * The NAND driver has dependency with the pl353_smc memory controller
+ * driver for initializing the NAND timing parameters, bus width, ECC modes,
+ * control and status information.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_probe(struct platform_device *pdev)
+{
+ struct pl353_nand_controller *xnfc;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *res;
+ struct device_node *np, *dn;
+ u32 ret, val;
+
+ xnfc = devm_kzalloc(&pdev->dev, sizeof(*xnfc), GFP_KERNEL);
+ if (!xnfc)
+ return -ENOMEM;
+ xnfc->dev = &pdev->dev;
+
+ /* Map physical address of NAND flash */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xnfc->regs = devm_ioremap_resource(xnfc->dev, res);
+ if (IS_ERR(xnfc->regs))
+ return PTR_ERR(xnfc->regs);
+
+ chip = &xnfc->chip;
+ mtd = nand_to_mtd(chip);
+ chip->exec_op = pl353_nfc_exec_op;
+ nand_set_controller_data(chip, xnfc);
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+
+ nand_set_flash_node(chip, xnfc->dev->of_node);
+
+ /* Set the driver entry points for MTD */
+ chip->dev_ready = pl353_nand_device_ready;
+ chip->select_chip = pl353_nand_select_chip;
+ /* If we don't set this delay driver sets 20us by default */
+ np = of_get_next_parent(xnfc->dev->of_node);
+ xnfc->mclk = of_clk_get(np, 0);
+ if (IS_ERR(xnfc->mclk)) {
+ dev_err(xnfc->dev, "Failed to retrieve MCK clk\n");
+ return PTR_ERR(xnfc->mclk);
+ }
+
+ dn = nand_get_flash_node(chip);
+ ret = of_property_read_u32(dn, "nand-bus-width", &val);
+ if (ret)
+ val = 8;
+
+ xnfc->buswidth = val;
+ chip->chip_delay = 30;
+ /* Set the device option and flash width */
+ chip->options = NAND_BUSWIDTH_AUTO;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ platform_set_drvdata(pdev, xnfc);
+ chip->setup_data_interface = pl353_setup_data_interface;
+ chip->dummy_controller.ops = &pl353_nand_controller_ops;
+ ret = nand_scan(mtd, 1);
+ if (ret) {
+ dev_err(xnfc->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(xnfc->dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_remove - Remove method for the NAND driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if the driver module is being unloaded. It frees all
+ * resources allocated to the device.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_remove(struct platform_device *pdev)
+{
+ struct pl353_nand_controller *xnfc = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = nand_to_mtd(&xnfc->chip);
+
+ /* Release resources, unregister device */
+ nand_release(mtd);
+
+ return 0;
+}
+
+/* Match table for device tree binding */
+static const struct of_device_id pl353_nand_of_match[] = {
+ { .compatible = "arm,pl353-nand-r2p1" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pl353_nand_of_match);
+
+/*
+ * pl353_nand_driver - This structure defines the NAND subsystem platform driver
+ */
+static struct platform_driver pl353_nand_driver = {
+ .probe = pl353_nand_probe,
+ .remove = pl353_nand_remove,
+ .driver = {
+ .name = PL353_NAND_DRIVER_NAME,
+ .of_match_table = pl353_nand_of_match,
+ },
+};
+
+module_platform_driver(pl353_nand_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_ALIAS("platform:" PL353_NAND_DRIVER_NAME);
+MODULE_DESCRIPTION("ARM PL353 NAND Flash Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 5af3bef6c230..db5cfcadb2bd 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2972,7 +2972,7 @@ err_nandc_alloc:
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- dma_unmap_resource(dev, res->start, resource_size(res),
+ dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
DMA_BIDIRECTIONAL, 0);
return ret;
}
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index 0009c1820e21..93dbe35dd30b 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -291,7 +291,7 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
+ unsigned long set, cfg, mask;
unsigned long flags;
/* calculate the timing information for the controller */
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 5c06e0b4d4ef..ad4d944ada0c 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1592,6 +1592,9 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
if (IS_ERR(sdrt))
return PTR_ERR(sdrt);
+ if (sdrt->tRC_min < 30000)
+ return -EOPNOTSUPP;
+
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 45c376fc571a..0f3bce3fb00b 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1587,7 +1587,7 @@ static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
if (section < ecc->steps)
oobregion->length = 4;
else
- oobregion->offset = mtd->oobsize - oobregion->offset;
+ oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 21def3f8fb36..f18c6cfe8ff5 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -116,6 +116,22 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF2GE4AD", 0x26,
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF4GE4AD", 0x37,
+ NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
};
static int macronix_spinand_detect(struct spinand_device *spinand)
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 7d7b1f7fcf71..c232ecd761ae 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -12,7 +12,7 @@
#define SPINAND_MFR_MICRON 0x2c
-#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_MASK GENMASK(6, 4)
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 1cb3760ff779..b3ed6b42a76f 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -60,7 +60,7 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -79,7 +79,7 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
if (spi_mem_exec_op(spinand->spimem, &op))
return nand->eccreq.strength;
- mbf >>= 4;
+ mbf = *(spinand->scratchbuf) >> 4;
if (WARN_ON(mbf > nand->eccreq.strength || !mbf))
return nand->eccreq.strength;
diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
index 8fd61767af83..26116694c821 100644
--- a/drivers/mtd/parsers/afs.c
+++ b/drivers/mtd/parsers/afs.c
@@ -126,8 +126,8 @@ static int afs_parse_v1_partition(struct mtd_info *mtd,
* Static checks cannot see that we bail out if we have an error
* reading the footer.
*/
- u_int uninitialized_var(iis_ptr);
- u_int uninitialized_var(img_ptr);
+ u_int iis_ptr;
+ u_int img_ptr;
u_int ptr;
size_t sz;
int ret;
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index 3ccd6363ee8c..4f3bcc59a638 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -58,6 +58,7 @@ static void parse_redboot_of(struct mtd_info *master)
return;
ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
+ of_node_put(npart);
if (ret)
return;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 4744bf94ad9a..d4e72fd5e5b3 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1097,9 +1097,9 @@ static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
- mutex_lock(&ftl->mutex);
del_timer_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
+ mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
}
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 97a5e1eaeefd..c18ae6fb753f 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -25,7 +25,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/sched.h>
+#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/timer.h>
@@ -34,6 +36,10 @@
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
+#define CQSPI_HAS_DMA BIT(1)
+#define CQSPI_STIG_WRITE BIT(2)
+#define CQSPI_SUPPORT_RESET BIT(3)
+#define CQSPI_DISABLE_DAC_MODE BIT(4)
/* Capabilities mask */
#define CQSPI_BASE_HWCAPS_MASK \
@@ -77,9 +83,6 @@ struct cqspi_st {
dma_addr_t mmap_phys_base;
int current_cs;
- int current_page_size;
- int current_erase_size;
- int current_addr_width;
unsigned long master_ref_clk_hz;
bool is_decoded_cs;
u32 fifo_depth;
@@ -88,6 +91,19 @@ struct cqspi_st {
u32 trigger_address;
u32 wr_delay;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+ bool read_dma;
+ void *rxbuf;
+ int bytes_to_rx;
+ int bytes_to_dma;
+ loff_t addr;
+ dma_addr_t dma_addr;
+ u8 edge_mode;
+ bool extra_dummy;
+ bool stig_write;
+ int (*indirect_read_dma)(struct spi_nor *nor, u_char *rxbuf,
+ loff_t from_addr, size_t n_rx);
+ int (*flash_reset)(struct cqspi_st *cqspi, u8 reset_type);
+ const struct zynqmp_eemi_ops *eemi_ops;
};
struct cqspi_driver_platdata {
@@ -111,13 +127,20 @@ struct cqspi_driver_platdata {
#define CQSPI_STIG_DATA_LEN_MAX 8
+/* Edge mode */
+#define CQSPI_EDGE_MODE_SDR 0
+#define CQSPI_EDGE_MODE_DDR 1
+
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_PHY_ENABLE_MASK BIT(3)
#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
+#define CQSPI_REG_CONFIG_AHB_ADDR_REMAP_MASK BIT(16)
+#define CQSPI_REG_CONFIG_DTR_PROT_EN_MASK BIT(24)
#define CQSPI_REG_CONFIG_BAUD_LSB 19
#define CQSPI_REG_CONFIG_IDLE_LSB 31
#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
@@ -137,6 +160,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_WR_INSTR 0x08
#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_WR_INSTR_OPCODE_MASK 0xFF
#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
@@ -151,6 +175,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
#define CQSPI_REG_READCAPTURE 0x10
+#define CQSPI_REG_READCAPTURE_DQS_ENABLE BIT(8)
#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
@@ -171,6 +196,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_DMA_BURST_LSB 8
#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
#define CQSPI_REG_DMA_BURST_MASK 0xFF
+#define CQSPI_REG_DMA_VAL 0x602
#define CQSPI_REG_REMAP 0x24
#define CQSPI_REG_MODE_BIT 0x28
@@ -181,6 +207,10 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+#define CQSPI_REG_WRCOMPLETION 0x38
+#define CQSPI_REG_WRCOMPLETION_POLLCNT_MASK 0xFF0000
+#define CQSPI_REG_WRCOMPLETION_POLLCNY_LSB 16
+
#define CQSPI_REG_IRQSTATUS 0x40
#define CQSPI_REG_IRQMASK 0x44
@@ -196,6 +226,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDCTRL 0x90
#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
+#define CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB 7
#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
@@ -206,6 +237,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK 0x1F
#define CQSPI_REG_INDIRECTWR 0x70
#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
@@ -216,12 +248,34 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+#define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
+#define CQSPI_REG_INDTRIG_ADDRRANGE_WIDTH 0x6
+
#define CQSPI_REG_CMDADDRESS 0x94
#define CQSPI_REG_CMDREADDATALOWER 0xA0
#define CQSPI_REG_CMDREADDATAUPPER 0xA4
#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+#define CQSPI_REG_PHY_CONFIG 0xB4
+#define CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK 0x80000000
+#define CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK 0x40000000
+
+#define CQSPI_REG_DMA_SRC_ADDR 0x1000
+#define CQSPI_REG_DMA_DST_ADDR 0x1800
+#define CQSPI_REG_DMA_DST_SIZE 0x1804
+#define CQSPI_REG_DMA_DST_STS 0x1808
+#define CQSPI_REG_DMA_DST_CTRL 0x180C
+#define CQSPI_REG_DMA_DST_CTRL_VAL 0xF43FFA00
+
+#define CQSPI_REG_DMA_DTS_I_STS 0x1814
+#define CQSPI_REG_DMA_DST_I_EN 0x1818
+#define CQSPI_REG_DMA_DST_I_EN_DONE BIT(1)
+
+#define CQSPI_REG_DMA_DST_I_DIS 0x181C
+#define CQSPI_REG_DMA_DST_I_MASK 0x1820
+#define CQSPI_REG_DMA_DST_ADDR_MSB 0x1828
+
/* Interrupt status bits */
#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
@@ -241,6 +295,12 @@ struct cqspi_driver_platdata {
CQSPI_REG_IRQ_UNDERFLOW)
#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
+#define CQSPI_MIO_NODE_ID_12 0x14108027
+#define CQSPI_READ_ID 0x9F
+#define CQSPI_READ_ID_LEN 6
+#define TERA_MACRO 1000000000000l
+
+#define CQSPI_RESET_TYPE_HWPIN 0
static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
{
@@ -266,25 +326,6 @@ static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
}
-static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
-{
- struct cqspi_st *cqspi = dev;
- unsigned int irq_status;
-
- /* Read interrupt status */
- irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
-
- /* Clear interrupt */
- writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
-
- irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
-
- if (irq_status)
- complete(&cqspi->transfer_complete);
-
- return IRQ_HANDLED;
-}
-
static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -354,6 +395,83 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
return cqspi_wait_idle(cqspi);
}
+static void process_dma_irq(struct cqspi_st *cqspi)
+{
+ struct platform_device *pdev = cqspi->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned int rem;
+ unsigned int reg;
+ unsigned int data;
+ u8 addr_bytes;
+ u8 opcode;
+ u8 dummy_cycles;
+
+ /* Disable DMA interrupt */
+ writel(0x0, cqspi->iobase + CQSPI_REG_DMA_DST_I_DIS);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
+ cqspi->iobase + CQSPI_REG_INDIRECTRD);
+ dma_unmap_single(dev, cqspi->dma_addr, cqspi->bytes_to_dma,
+ DMA_FROM_DEVICE);
+ rem = cqspi->bytes_to_rx - cqspi->bytes_to_dma;
+
+ /* Read unaligned data in STIG */
+ if (rem) {
+ cqspi->rxbuf += cqspi->bytes_to_dma;
+ writel(cqspi->addr + cqspi->bytes_to_dma,
+ cqspi->iobase + CQSPI_REG_CMDADDRESS);
+ opcode = (u8)readl(cqspi->iobase + CQSPI_REG_RD_INSTR);
+ addr_bytes = readl(cqspi->iobase + CQSPI_REG_SIZE) &
+ CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= (addr_bytes & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+ dummy_cycles = (readl(cqspi->iobase + CQSPI_REG_RD_INSTR) >>
+ CQSPI_REG_RD_INSTR_DUMMY_LSB) &
+ CQSPI_REG_RD_INSTR_DUMMY_MASK;
+ reg |= (dummy_cycles & CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB;
+ /* 0 means 1 byte. */
+ reg |= (((rem - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ cqspi_exec_flash_cmd(cqspi, reg);
+ data = readl(cqspi->iobase + CQSPI_REG_CMDREADDATALOWER);
+
+ /* Put the read value into rx_buf */
+ memcpy(cqspi->rxbuf, &data, rem);
+ }
+}
+
+static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
+{
+ struct cqspi_st *cqspi = dev;
+ unsigned int irq_status;
+ unsigned int dma_status;
+
+ /* Read interrupt status */
+ irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ /* Clear interrupt */
+ writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ if (cqspi->read_dma) {
+ dma_status = readl(cqspi->iobase + CQSPI_REG_DMA_DTS_I_STS);
+ writel(dma_status, cqspi->iobase + CQSPI_REG_DMA_DTS_I_STS);
+ if (dma_status)
+ process_dma_irq(cqspi);
+ }
+
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+
+ if (irq_status || (cqspi->read_dma && dma_status))
+ complete(&cqspi->transfer_complete);
+
+ return IRQ_HANDLED;
+}
+
static int cqspi_command_read(struct spi_nor *nor,
const u8 *txbuf, const unsigned n_tx,
u8 *rxbuf, const unsigned n_rx)
@@ -365,6 +483,7 @@ static int cqspi_command_read(struct spi_nor *nor,
unsigned int reg;
unsigned int read_len;
int status;
+ u8 dummy_cycles;
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
@@ -382,6 +501,14 @@ static int cqspi_command_read(struct spi_nor *nor,
/* 0 means 1 byte. */
reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR)
+ dummy_cycles = 8;
+ else
+ dummy_cycles = 0;
+ if (cqspi->extra_dummy)
+ dummy_cycles++;
+ reg |= ((dummy_cycles & CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB);
status = cqspi_exec_flash_cmd(cqspi, reg);
if (status)
return status;
@@ -421,6 +548,12 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
return -EINVAL;
}
+ reg = f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
+ writel(reg, reg_base + CQSPI_REG_WR_INSTR);
+ reg = cqspi_calc_rdreg(nor, opcode);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
if (n_tx) {
reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
@@ -466,9 +599,14 @@ static int cqspi_read_setup(struct spi_nor *nor)
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
+ struct platform_device *pdev = cqspi->pdev;
+ struct device *dev = &pdev->dev;
+ struct cqspi_driver_platdata *ddata;
unsigned int dummy_clk = 0;
unsigned int reg;
+ ddata = (struct cqspi_driver_platdata *)of_device_get_match_data(dev);
+
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
@@ -477,18 +615,27 @@ static int cqspi_read_setup(struct spi_nor *nor)
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
return -EOPNOTSUPP;
- if (dummy_clk / 8) {
- reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
- /* Set mode bits high to ensure chip doesn't enter XIP */
- writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
-
- /* Need to subtract the mode byte (8 clocks). */
- if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
- dummy_clk -= 8;
-
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ if (cqspi->extra_dummy)
+ dummy_clk++;
if (dummy_clk)
reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
<< CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ } else {
+ if (dummy_clk / 8) {
+ reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
+ /* Set mode bit high to ensure chip doesn't enter XIP */
+ writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
+
+ /* Need to subtract the mode byte (8 clocks). */
+ if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
+ dummy_clk -= 8;
+
+ if (dummy_clk)
+ reg |= (dummy_clk &
+ CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ }
}
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
@@ -513,6 +660,11 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
unsigned int bytes_to_read = 0;
u8 *rxbuf_end = rxbuf + n_rx;
int ret = 0;
+ u32 reg;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
@@ -593,7 +745,7 @@ failrd:
return ret;
}
-static int cqspi_write_setup(struct spi_nor *nor)
+static int cqspi_write_setup(struct spi_nor *nor, const u8 opcode)
{
unsigned int reg;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -601,9 +753,11 @@ static int cqspi_write_setup(struct spi_nor *nor)
void __iomem *reg_base = cqspi->iobase;
/* Set opcode. */
- reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg |= f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
- reg = cqspi_calc_rdreg(nor, nor->program_opcode);
+ reg = cqspi_calc_rdreg(nor, opcode);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
reg = readl(reg_base + CQSPI_REG_SIZE);
@@ -613,6 +767,71 @@ static int cqspi_write_setup(struct spi_nor *nor)
return 0;
}
+static int cqspi_stig_write(struct spi_nor *nor, loff_t addr,
+ const u8 *txbuf, unsigned int n_tx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ unsigned int data;
+ int ret;
+ unsigned int pgmlen = 0, wr_len;
+
+ if (n_tx && !txbuf) {
+ dev_err(nor->dev,
+ "Invalid input argument, cmdlen %d txbuf 0x%p\n",
+ n_tx, txbuf);
+ return -EINVAL;
+ }
+ pgmlen = n_tx;
+ if (n_tx > 8)
+ n_tx = 8;
+
+ while (pgmlen) {
+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ if (ret)
+ return ret;
+
+ reg = nor->program_opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
+ reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((nor->addr_width - 1) &
+ CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+ writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
+
+ wr_len = n_tx > 4 ? 4 : n_tx;
+ data = 0;
+ memcpy(&data, txbuf, wr_len);
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
+ if (n_tx > 4) {
+ txbuf += wr_len;
+ wr_len = n_tx - wr_len;
+ data = 0;
+ memcpy(&data, txbuf, wr_len);
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
+ }
+
+ txbuf += wr_len;
+ pgmlen -= n_tx;
+ addr += n_tx;
+ n_tx = pgmlen > 8 ? 8 : pgmlen;
+ ret = cqspi_exec_flash_cmd(cqspi, reg);
+ if (ret)
+ return ret;
+
+ if (nor->program_opcode != SPINOR_OP_WRCR) {
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
const u8 *txbuf, const size_t n_tx)
{
@@ -623,6 +842,11 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
unsigned int remaining = n_tx;
unsigned int write_bytes;
int ret;
+ u32 reg;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
@@ -736,32 +960,6 @@ static void cqspi_chipselect(struct spi_nor *nor)
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
-static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
-{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
- struct cqspi_st *cqspi = f_pdata->cqspi;
- void __iomem *iobase = cqspi->iobase;
- unsigned int reg;
-
- /* configure page size and block size. */
- reg = readl(iobase + CQSPI_REG_SIZE);
- reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
- reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
- reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
- reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
- reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
- reg |= (nor->addr_width - 1);
- writel(reg, iobase + CQSPI_REG_SIZE);
-
- /* configure the chip select */
- cqspi_chipselect(nor);
-
- /* Store the new configuration of the controller */
- cqspi->current_page_size = nor->page_size;
- cqspi->current_erase_size = nor->mtd.erasesize;
- cqspi->current_addr_width = nor->addr_width;
-}
-
static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
const unsigned int ns_val)
{
@@ -867,18 +1065,13 @@ static void cqspi_configure(struct spi_nor *nor)
int switch_cs = (cqspi->current_cs != f_pdata->cs);
int switch_ck = (cqspi->sclk != sclk);
- if ((cqspi->current_page_size != nor->page_size) ||
- (cqspi->current_erase_size != nor->mtd.erasesize) ||
- (cqspi->current_addr_width != nor->addr_width))
- switch_cs = 1;
-
if (switch_cs || switch_ck)
cqspi_controller_enable(cqspi, 0);
/* Switch chip select. */
if (switch_cs) {
cqspi->current_cs = f_pdata->cs;
- cqspi_configure_cs_and_sizes(nor);
+ cqspi_chipselect(nor);
}
/* Setup baudrate divisor and delays */
@@ -916,6 +1109,37 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
case SNOR_PROTO_1_1_8:
f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
break;
+ case SNOR_PROTO_8_8_8:
+ if (f_pdata->cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
+ f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
+ f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (nor->write_proto) {
+ case SNOR_PROTO_1_1_1:
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+ break;
+ case SNOR_PROTO_1_1_2:
+ f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+ break;
+ case SNOR_PROTO_1_1_4:
+ f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+ break;
+ case SNOR_PROTO_1_1_8:
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ case SNOR_PROTO_8_8_8:
+ if (f_pdata->cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
+ f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
+ f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ }
+ break;
default:
return -EINVAL;
}
@@ -937,13 +1161,15 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
if (ret)
return ret;
- ret = cqspi_write_setup(nor);
+ ret = cqspi_write_setup(nor, nor->program_opcode);
if (ret)
return ret;
if (f_pdata->use_direct_mode) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
ret = cqspi_wait_idle(cqspi);
+ } else if (cqspi->stig_write) {
+ ret = cqspi_stig_write(nor, to, buf, len);
} else {
ret = cqspi_indirect_write_execute(nor, to, buf, len);
}
@@ -1021,6 +1247,7 @@ static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
size_t len, u_char *buf)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 1);
@@ -1031,10 +1258,14 @@ static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
if (ret)
return ret;
- if (f_pdata->use_direct_mode)
+ if (f_pdata->use_direct_mode) {
ret = cqspi_direct_read_execute(nor, buf, from, len);
- else
+ } else if (cqspi->read_dma && virt_addr_valid(buf) &&
+ cqspi->indirect_read_dma) {
+ ret = cqspi->indirect_read_dma(nor, buf, from, len);
+ } else {
ret = cqspi_indirect_read_execute(nor, buf, from, len);
+ }
if (ret)
return ret;
@@ -1049,8 +1280,7 @@ static int cqspi_erase(struct spi_nor *nor, loff_t offs)
if (ret)
return ret;
- /* Send write enable, then erase commands. */
- ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ ret = cqspi_write_setup(nor, nor->erase_opcode);
if (ret)
return ret;
@@ -1082,12 +1312,16 @@ static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 0);
- if (!ret)
+ if (!ret) {
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR)
+ len = ((len % 2) != 0) ? (len + 1) : len;
ret = cqspi_command_read(nor, &opcode, 1, buf, len);
-
+ }
return ret;
}
@@ -1167,6 +1401,127 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
return 0;
}
+static int cqspi_setdlldelay(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ int i;
+ u8 j;
+ int ret = 1;
+ u8 id[CQSPI_READ_ID_LEN];
+ bool rxtapfound = false;
+ u8 min_rxtap = 0;
+ u8 max_rxtap = 0;
+ u8 avg_rxtap;
+ bool id_matched;
+ u32 txtap = 0;
+ u8 max_tap;
+ s8 max_windowsize = -1;
+ u8 windowsize;
+ u8 dummy_incr;
+ u8 dummy_flag = 0;
+
+ max_tap = ((TERA_MACRO / cqspi->master_ref_clk_hz) / 160);
+ for (dummy_incr = 0; dummy_incr <= 1; dummy_incr++) {
+ if (dummy_incr)
+ cqspi->extra_dummy = true;
+ for (i = 0; i <= max_tap; i++) {
+ writel((txtap | i), cqspi->iobase +
+ CQSPI_REG_PHY_CONFIG);
+ writel((CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK | txtap |
+ i), cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+ ret = nor->read_reg(nor, CQSPI_READ_ID, id,
+ CQSPI_READ_ID_LEN);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error %d reading JEDEC ID\n", ret);
+ return ret;
+ }
+ id_matched = true;
+ for (j = 0; j < CQSPI_READ_ID_LEN; j++) {
+ if (nor->device_id[j] != id[j]) {
+ id_matched = false;
+ break;
+ }
+ }
+ if (id_matched) {
+ if (!rxtapfound) {
+ min_rxtap = i;
+ max_rxtap = i;
+ rxtapfound = true;
+ } else {
+ max_rxtap = i;
+ }
+ }
+ if (!id_matched || i == max_tap) {
+ if (rxtapfound) {
+ windowsize = max_rxtap - min_rxtap + 1;
+ if (windowsize > max_windowsize) {
+ dummy_flag = dummy_incr;
+ max_windowsize = windowsize;
+ avg_rxtap = (max_rxtap +
+ min_rxtap) / 2;
+ }
+ rxtapfound = false;
+ }
+ }
+ }
+ if (!dummy_incr) {
+ rxtapfound = false;
+ min_rxtap = 0;
+ max_rxtap = 0;
+ }
+ }
+ if (!dummy_flag)
+ cqspi->extra_dummy = false;
+ if (max_windowsize < 3)
+ return ret;
+
+ writel((txtap | avg_rxtap), cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+ writel((CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK | txtap | avg_rxtap),
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+
+ return 0;
+}
+
+static int cqspi_setup_edgemode(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ u32 reg;
+ int ret;
+
+ cqspi_controller_enable(cqspi, 0);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= (CQSPI_REG_CONFIG_PHY_ENABLE_MASK);
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ /* Program POLL_CNT */
+ reg = readl(cqspi->iobase + CQSPI_REG_WRCOMPLETION);
+ reg &= ~CQSPI_REG_WRCOMPLETION_POLLCNT_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_WRCOMPLETION);
+
+ reg |= (0x3 << CQSPI_REG_WRCOMPLETION_POLLCNY_LSB);
+ writel(reg, cqspi->iobase + CQSPI_REG_WRCOMPLETION);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DTR_PROT_EN_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_READCAPTURE);
+ reg |= CQSPI_REG_READCAPTURE_DQS_ENABLE;
+ writel(reg, cqspi->iobase + CQSPI_REG_READCAPTURE);
+
+ cqspi->edge_mode = CQSPI_EDGE_MODE_DDR;
+
+ cqspi_controller_enable(cqspi, 1);
+
+ ret = cqspi_setdlldelay(nor);
+
+ return ret;
+}
+
static void cqspi_controller_init(struct cqspi_st *cqspi)
{
u32 reg;
@@ -1176,6 +1531,10 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
/* Configure the remap address register, no remap */
writel(0, cqspi->iobase + CQSPI_REG_REMAP);
+ /* Reset the Delay lines */
+ writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK,
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+
/* Disable all interrupts. */
writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
@@ -1193,14 +1552,155 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
- /* Enable Direct Access Controller */
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
- reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ reg &= ~CQSPI_REG_CONFIG_DTR_PROT_EN_MASK;
+ reg &= ~CQSPI_REG_CONFIG_PHY_ENABLE_MASK;
+ if (cqspi->read_dma) {
+ reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ reg |= CQSPI_REG_CONFIG_DMA_MASK;
+ } else {
+ /* Enable Direct Access Controller */
+ reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ }
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
cqspi_controller_enable(cqspi, 1);
}
+static int cqspi_versal_flash_reset(struct cqspi_st *cqspi, u8 reset_type)
+{
+ struct platform_device *pdev = cqspi->pdev;
+ int ret;
+ int gpio;
+ enum of_gpio_flags flags;
+
+ if (reset_type == CQSPI_RESET_TYPE_HWPIN) {
+ gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+ "reset-gpios", 0, &flags);
+ if (!gpio_is_valid(gpio))
+ return -EIO;
+ ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
+ "flash-reset");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get reset-gpios: %d\n", ret);
+ return -EIO;
+ }
+
+ /* Request for PIN */
+ cqspi->eemi_ops->pinctrl_request(CQSPI_MIO_NODE_ID_12);
+
+ /* Enable hysteresis in cmos receiver */
+ cqspi->eemi_ops->pinctrl_set_config(CQSPI_MIO_NODE_ID_12,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT);
+
+ /* Set the direction as output and enable the output */
+ gpio_direction_output(gpio, 1);
+
+ /* Disable Tri-state */
+ cqspi->eemi_ops->pinctrl_set_config(CQSPI_MIO_NODE_ID_12,
+ PM_PINCTRL_CONFIG_TRI_STATE,
+ PM_PINCTRL_TRI_STATE_DISABLE);
+ udelay(1);
+
+ /* Set value 0 to pin */
+ gpio_set_value(gpio, 0);
+ udelay(1);
+
+ /* Set value 1 to pin */
+ gpio_set_value(gpio, 1);
+ udelay(1);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cqspi_versal_indirect_read_dma(struct spi_nor *nor, u_char *rxbuf,
+ loff_t from_addr, size_t n_rx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int rx_rem;
+ int ret = 0;
+ u32 reg;
+
+ rx_rem = n_rx % 4;
+ cqspi->bytes_to_rx = n_rx;
+ cqspi->bytes_to_dma = (n_rx - rx_rem);
+ cqspi->addr = from_addr;
+ cqspi->rxbuf = rxbuf;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+ writel(cqspi->bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+ writel(CQSPI_REG_INDTRIG_ADDRRANGE_WIDTH,
+ reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ /* Enable DMA done interrupt */
+ writel(CQSPI_REG_DMA_DST_I_EN_DONE,
+ reg_base + CQSPI_REG_DMA_DST_I_EN);
+
+ /* Default DMA periph configuration */
+ writel(CQSPI_REG_DMA_VAL, reg_base + CQSPI_REG_DMA);
+
+ cqspi->dma_addr = dma_map_single(nor->dev, rxbuf, cqspi->bytes_to_dma,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(nor->dev, cqspi->dma_addr)) {
+ dev_err(nor->dev, "ERR:rxdma:memory not mapped\n");
+ goto failrd;
+ }
+ /* Configure DMA Dst address */
+ writel(lower_32_bits(cqspi->dma_addr),
+ reg_base + CQSPI_REG_DMA_DST_ADDR);
+ writel(upper_32_bits(cqspi->dma_addr),
+ reg_base + CQSPI_REG_DMA_DST_ADDR_MSB);
+
+ /* Configure DMA Src read address */
+ writel(cqspi->trigger_address, reg_base + CQSPI_REG_DMA_SRC_ADDR);
+
+ /* Set DMA destination size */
+ writel(cqspi->bytes_to_dma, reg_base + CQSPI_REG_DMA_DST_SIZE);
+
+ /* Set DMA destination control */
+ writel(CQSPI_REG_DMA_DST_CTRL_VAL, reg_base + CQSPI_REG_DMA_DST_CTRL);
+
+ writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ reinit_completion(&cqspi->transfer_complete);
+
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto failrd;
+ }
+
+ return 0;
+
+failrd:
+ /* Disable DMA interrupt */
+ writel(0x0, reg_base + CQSPI_REG_DMA_DST_I_DIS);
+
+ dma_unmap_single(nor->dev, cqspi->dma_addr, cqspi->bytes_to_dma,
+ DMA_DEV_TO_MEM);
+
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ return ret;
+}
+
static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
{
dma_cap_mask_t mask;
@@ -1210,10 +1710,16 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
cqspi->rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(cqspi->rx_chan)) {
- dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
+ int ret = PTR_ERR(cqspi->rx_chan);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
cqspi->rx_chan = NULL;
+ return ret;
}
init_completion(&cqspi->rx_dma_complete);
+
+ return 0;
}
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
@@ -1223,7 +1729,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
const struct cqspi_driver_platdata *ddata;
struct spi_nor_hwcaps hwcaps;
struct cqspi_flash_pdata *f_pdata;
- struct spi_nor *nor;
+ struct spi_nor *nor = NULL;
struct mtd_info *mtd;
unsigned int cs;
int i, ret;
@@ -1281,6 +1787,13 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
goto err;
}
+ if (ddata->quirks & CQSPI_SUPPORT_RESET) {
+ ret = cqspi->flash_reset(cqspi,
+ CQSPI_RESET_TYPE_HWPIN);
+ if (ret)
+ goto err;
+ }
+
ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
goto err;
@@ -1291,16 +1804,25 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
f_pdata->registered = true;
- if (mtd->size <= cqspi->ahb_size) {
+ if (mtd->size <= cqspi->ahb_size && !cqspi->read_dma) {
f_pdata->use_direct_mode = true;
dev_dbg(nor->dev, "using direct mode for %s\n",
mtd->name);
- if (!cqspi->rx_chan)
- cqspi_request_mmap_dma(cqspi);
+ if (!cqspi->rx_chan) {
+ ret = cqspi_request_mmap_dma(cqspi);
+ if (ret == -EPROBE_DEFER)
+ goto err;
+ }
}
}
+ if (nor && !(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ ret = cqspi_setup_edgemode(nor);
+ if (ret)
+ goto err;
+ }
+
return 0;
err:
@@ -1409,6 +1931,25 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
+ if (ddata && (ddata->quirks & CQSPI_HAS_DMA)) {
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ cqspi->read_dma = true;
+ }
+
+ cqspi->stig_write = false;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,versal-ospi-1.0") &&
+ cqspi->read_dma) {
+ cqspi->indirect_read_dma = cqspi_versal_indirect_read_dma;
+ cqspi->flash_reset = cqspi_versal_flash_reset;
+ if (ddata && (ddata->quirks & CQSPI_STIG_WRITE)) {
+ cqspi->stig_write = true;
+ }
+ cqspi->eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(cqspi->eemi_ops))
+ return PTR_ERR(cqspi->eemi_ops);
+ }
+
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
if (ret) {
@@ -1420,6 +1961,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi_controller_init(cqspi);
cqspi->current_cs = -1;
cqspi->sclk = 0;
+ cqspi->extra_dummy = false;
+ cqspi->edge_mode = CQSPI_EDGE_MODE_SDR;
ret = cqspi_setup_flash(cqspi, np);
if (ret) {
@@ -1464,17 +2007,30 @@ static int cqspi_remove(struct platform_device *pdev)
static int cqspi_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+ ret = spi_master_suspend(master);
cqspi_controller_enable(cqspi, 0);
- return 0;
+
+ clk_disable_unprepare(cqspi->clk);
+
+ return ret;
}
static int cqspi_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
- cqspi_controller_enable(cqspi, 1);
- return 0;
+ clk_prepare_enable(cqspi->clk);
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);
+
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ return spi_master_resume(master);
}
static const struct dev_pm_ops cqspi__dev_pm_ops = {
@@ -1489,6 +2045,7 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
static const struct cqspi_driver_platdata cdns_qspi = {
.hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
+ .quirks = CQSPI_DISABLE_DAC_MODE,
};
static const struct cqspi_driver_platdata k2g_qspi = {
@@ -1501,6 +2058,13 @@ static const struct cqspi_driver_platdata am654_ospi = {
.quirks = CQSPI_NEEDS_WR_DELAY,
};
+static const struct cqspi_driver_platdata versal_ospi = {
+ .hwcaps_mask = (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP | SNOR_HWCAPS_PP_8_8_8 |
+ SNOR_HWCAPS_READ_1_1_8 | SNOR_HWCAPS_READ_8_8_8),
+ .quirks = CQSPI_HAS_DMA | CQSPI_STIG_WRITE | CQSPI_SUPPORT_RESET,
+};
+
static const struct of_device_id cqspi_dt_ids[] = {
{
.compatible = "cdns,qspi-nor",
@@ -1514,6 +2078,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "ti,am654-ospi",
.data = &am654_ospi,
},
+ {
+ .compatible = "xlnx,versal-ospi-1.0",
+ .data = (void *)&versal_ospi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 43e55a2e9b27..21b98c82e196 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -113,7 +113,7 @@
#define ERASE_OPCODE_SHIFT 8
#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define ERASE_64K_OPCODE_SHIFT 16
-#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT)
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index dd6963e4af2c..05f8d851eb18 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -22,6 +22,7 @@
#include <linux/sched/task_stack.h>
#include <linux/spi/flash.h>
#include <linux/mtd/spi-nor.h>
+#include <linux/spi/spi.h>
/* Define max times to check status register before we give up. */
@@ -37,7 +38,6 @@
*/
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
-#define SPI_NOR_MAX_ID_LEN 6
#define SPI_NOR_MAX_ADDR_WIDTH 4
struct sfdp_parameter_header {
@@ -78,6 +78,23 @@ struct sfdp_header {
/* Basic Flash Parameter Table */
+bool update_stripe(const u8 opcode)
+{
+ if (opcode == SPINOR_OP_BE_4K ||
+ opcode == SPINOR_OP_BE_32K ||
+ opcode == SPINOR_OP_CHIP_ERASE ||
+ opcode == SPINOR_OP_SE ||
+ opcode == SPINOR_OP_BE_32K_4B ||
+ opcode == SPINOR_OP_SE_4B ||
+ opcode == SPINOR_OP_BE_4K_4B ||
+ opcode == SPINOR_OP_WRSR ||
+ opcode == SPINOR_OP_WREAR ||
+ opcode == SPINOR_OP_BRWR ||
+ opcode == SPINOR_OP_WRSR2)
+ return false;
+
+ return true;
+}
/*
* JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
* They are indexed from 1 but C arrays are indexed from 0.
@@ -196,7 +213,7 @@ struct flash_info {
u16 page_size;
u16 addr_width;
- u16 flags;
+ u32 flags;
#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
#define SST_WRITE BIT(2) /* use SST byte programming */
@@ -233,6 +250,10 @@ struct flash_info {
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
+#define SST_GLOBAL_PROT_UNLK BIT(16) /* Unlock the Global protection for
+ * sst flashes
+ */
+#define SPI_NOR_OCTAL_WRITE BIT(17) /* Flash supports Octal Write */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -388,6 +409,8 @@ static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
return nor->write(nor, to, len, buf);
}
+static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr);
+
/*
* Read the status register, returning its value in the location
* Return the status register value.
@@ -396,17 +419,23 @@ static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
static int read_sr(struct spi_nor *nor)
{
int ret;
+ int count;
+
+ if (nor->isparallel)
+ count = 2;
+ else
+ count = 1;
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_IN(count, nor->bouncebuf, 1));
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1);
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, count);
}
if (ret < 0) {
@@ -414,6 +443,9 @@ static int read_sr(struct spi_nor *nor)
return ret;
}
+ if (nor->isparallel)
+ nor->bouncebuf[0] |= nor->bouncebuf[1];
+
return nor->bouncebuf[0];
}
@@ -425,17 +457,23 @@ static int read_sr(struct spi_nor *nor)
static int read_fsr(struct spi_nor *nor)
{
int ret;
+ int count;
+
+ if (nor->isparallel)
+ count = 2;
+ else
+ count = 1;
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_IN(count, nor->bouncebuf, 1));
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1);
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, count);
}
if (ret < 0) {
@@ -715,6 +753,50 @@ static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1);
}
+/**
+ * read_ear - Get the extended/bank address register value
+ * @nor: Pointer to the flash control structure
+ *
+ * This routine reads the Extended/bank address register value
+ *
+ * Return: Negative if error occured.
+ */
+static int read_ear(struct spi_nor *nor, struct flash_info *info)
+{
+ int ret;
+ u8 val;
+ u8 code;
+
+ /* This is actually Spansion */
+ if (JEDEC_MFR(info) == CFI_MFR_AMD)
+ code = SPINOR_OP_BRRD;
+ /* This is actually Micron */
+ else if (JEDEC_MFR(info) == CFI_MFR_ST ||
+ JEDEC_MFR(info) == CFI_MFR_MACRONIX ||
+ JEDEC_MFR(info) == SNOR_MFR_ISSI)
+ code = SPINOR_OP_RDEAR;
+ else
+ return -EINVAL;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->read_reg(nor, code, &val, 1);
+ }
+
+ if (ret < 0) {
+ pr_err("error %d reading EAR\n", ret);
+ return ret;
+ }
+ return nor->bouncebuf[0];
+}
+
static int s3an_sr_ready(struct spi_nor *nor)
{
int ret;
@@ -846,11 +928,58 @@ static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
return -ETIMEDOUT;
}
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
+int spi_nor_wait_till_ready(struct spi_nor *nor)
{
return spi_nor_wait_till_ready_with_timeout(nor,
DEFAULT_READY_WAIT_JIFFIES);
}
+EXPORT_SYMBOL_GPL(spi_nor_wait_till_ready);
+
+/*
+ * Update Extended Address/bank selection Register.
+ * Call with flash->lock locked.
+ */
+static int write_ear(struct spi_nor *nor, u32 addr)
+{
+ u8 code;
+ u8 ear;
+ int ret;
+ struct mtd_info *mtd = &nor->mtd;
+
+ /* Wait until finished previous write command. */
+ if (spi_nor_wait_till_ready(nor))
+ return 1;
+
+ if (mtd->size <= (0x1000000) << nor->shift)
+ return 0;
+
+ addr = addr % (u32) mtd->size;
+ ear = addr >> 24;
+
+ if ((!nor->isstacked) && (ear == nor->curbank))
+ return 0;
+
+ if (nor->isstacked && (mtd->size <= 0x2000000))
+ return 0;
+
+ if (nor->jedec_id == CFI_MFR_AMD)
+ code = SPINOR_OP_BRWR;
+ if (nor->jedec_id == CFI_MFR_ST ||
+ nor->jedec_id == CFI_MFR_MACRONIX ||
+ nor->jedec_id == SNOR_MFR_ISSI) {
+ write_enable(nor);
+ code = SPINOR_OP_WREAR;
+ }
+ nor->bouncebuf[0] = ear;
+
+ ret = nor->write_reg(nor, code, nor->bouncebuf, 1);
+ if (ret < 0)
+ return ret;
+
+ nor->curbank = ear;
+
+ return 0;
+}
/*
* Erase the whole flash memory
@@ -1010,6 +1139,8 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
continue;
erase = &map->erase_type[i];
+ if (!erase->size)
+ continue;
/* Alignment is not mandatory for overlaid regions */
if (region->offset & SNOR_OVERLAID_REGION &&
@@ -1241,7 +1372,7 @@ destroy_erase_cmd_list:
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
+ u32 addr, len, offset;
uint32_t rem;
int ret;
@@ -1293,9 +1424,35 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* "sector"-at-a-time erase */
} else if (spi_nor_has_uniform_erase(nor)) {
while (len) {
+
+ write_enable(nor);
+ offset = addr;
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |=
+ SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &=
+ ~SPI_MASTER_U_PAGE;
+ }
+ }
+ if (nor->addr_width == 3) {
+ /* Update Extended Address Register */
+ ret = write_ear(nor, offset);
+ if (ret)
+ goto erase_err;
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+
write_enable(nor);
- ret = spi_nor_erase_sector(nor, addr);
+ ret = spi_nor_erase_sector(nor, offset);
if (ret)
goto erase_err;
@@ -1322,6 +1479,118 @@ erase_err:
return ret;
}
+static inline uint16_t min_lockable_sectors(struct spi_nor *nor,
+ uint16_t n_sectors)
+{
+ uint16_t lock_granularity;
+
+ /*
+ * Revisit - SST (not used by us) has the same JEDEC ID as micron but
+ * protected area table is similar to that of spansion.
+ */
+ lock_granularity = max(1, n_sectors/M25P_MAX_LOCKABLE_SECTORS);
+ if (nor->jedec_id == CFI_MFR_ST) /* Micron */
+ lock_granularity = 1;
+
+ return lock_granularity;
+}
+
+static inline uint32_t get_protected_area_start(struct spi_nor *nor,
+ uint8_t lock_bits)
+{
+ u16 n_sectors;
+ u32 sector_size;
+ uint64_t mtd_size;
+ struct mtd_info *mtd = &nor->mtd;
+
+ n_sectors = nor->n_sectors;
+ sector_size = nor->sector_size;
+ mtd_size = mtd->size;
+
+ if (nor->isparallel) {
+ sector_size = (nor->sector_size >> 1);
+ mtd_size = (mtd->size >> 1);
+ }
+ if (nor->isstacked) {
+ n_sectors = (nor->n_sectors >> 1);
+ mtd_size = (mtd->size >> 1);
+ }
+
+ return mtd_size - (1<<(lock_bits-1)) *
+ min_lockable_sectors(nor, n_sectors) * sector_size;
+}
+
+static uint8_t min_protected_area_including_offset(struct spi_nor *nor,
+ uint32_t offset)
+{
+ uint8_t lock_bits, lockbits_limit;
+
+ /*
+ * Revisit - SST (not used by us) has the same JEDEC ID as micron but
+ * protected area table is similar to that of spansion.
+ * Mircon has 4 block protect bits.
+ */
+ lockbits_limit = 7;
+ if (nor->jedec_id == CFI_MFR_ST) /* Micron */
+ lockbits_limit = 15;
+
+ for (lock_bits = 1; lock_bits < lockbits_limit; lock_bits++) {
+ if (offset >= get_protected_area_start(nor, lock_bits))
+ break;
+ }
+ return lock_bits;
+}
+
+static int write_sr_modify_protection(struct spi_nor *nor, uint8_t status,
+ uint8_t lock_bits)
+{
+ uint8_t status_new, bp_mask;
+ u8 val[2];
+
+ status_new = status & ~SR_BP_BIT_MASK;
+ bp_mask = (lock_bits << SR_BP_BIT_OFFSET) & SR_BP_BIT_MASK;
+
+ /* Micron */
+ if (nor->jedec_id == CFI_MFR_ST) {
+ /* To support chips with more than 896 sectors (56MB) */
+ status_new &= ~SR_BP3;
+
+ /* Protected area starts from top */
+ status_new &= ~SR_BP_TB;
+
+ if (lock_bits > 7)
+ bp_mask |= SR_BP3;
+ }
+
+ if (nor->is_lock)
+ status_new |= bp_mask;
+
+ write_enable(nor);
+
+ /* For spansion flashes */
+ if (nor->jedec_id == CFI_MFR_AMD) {
+ val[1] = read_cr(nor) << 8;
+ val[0] |= status_new;
+ if (write_sr_cr(nor, val) < 0)
+ return 1;
+ } else {
+ if (write_sr(nor, status_new) < 0)
+ return 1;
+ }
+ return 0;
+}
+
+static uint8_t bp_bits_from_sr(struct spi_nor *nor, uint8_t status)
+{
+ uint8_t ret;
+
+ ret = (((status) & SR_BP_BIT_MASK) >> SR_BP_BIT_OFFSET);
+ if (nor->jedec_id == 0x20)
+ ret |= ((status & SR_BP3) >> (SR_BP_BIT_OFFSET + 1));
+
+ return ret;
+}
+
/* Write status register and ensure bits in mask match written values */
static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
{
@@ -1618,13 +1887,34 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
+ uint8_t status;
+ uint8_t lock_bits;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
if (ret)
return ret;
+ if (nor->isparallel == 1)
+ ofs = ofs >> nor->shift;
+
ret = nor->params.locking_ops->lock(nor, ofs, len);
+ /* Wait until finished previous command */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status = read_sr(nor);
+
+ lock_bits = min_protected_area_including_offset(nor, ofs);
+ /* Only modify protection if it will not unlock other areas */
+ if (lock_bits > bp_bits_from_sr(nor, status)) {
+ nor->is_lock = 1;
+ ret = write_sr_modify_protection(nor, status, lock_bits);
+ }
+ else
+ dev_err(nor->dev, "trying to unlock already locked area\n");
+err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
return ret;
}
@@ -1633,13 +1923,34 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
+ uint8_t status;
+ uint8_t lock_bits;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
if (ret)
return ret;
+ if (nor->isparallel == 1)
+ ofs = ofs >> nor->shift;
+
ret = nor->params.locking_ops->unlock(nor, ofs, len);
+ /* Wait until finished previous command */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status = read_sr(nor);
+ lock_bits = min_protected_area_including_offset(nor, ofs+len) - 1;
+
+ /* Only modify protection if it will not lock other areas */
+ if (lock_bits < bp_bits_from_sr(nor, status)) {
+ nor->is_lock = 0;
+ ret = write_sr_modify_protection(nor, status, lock_bits);
+ }
+ else
+ dev_err(nor->dev, "trying to lock already unlocked area\n");
+err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
return ret;
}
@@ -2239,6 +2550,17 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
.fixups = &gd25q256_fixups,
},
+ {
+ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_WRITE | SPI_NOR_4B_OPCODES)
+ },
+ {
+ "mt35xu01g", INFO(0x2c5b1b, 0, 128 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_WRITE | SPI_NOR_4B_OPCODES)
+ },
+
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -2246,6 +2568,28 @@ static const struct flash_info spi_nor_ids[] = {
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp080d", INFO(0x9d7014, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp016d", INFO(0x9d7015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp032d", INFO(0x9d6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp032d", INFO(0x9d7016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp064a", INFO(0x9d6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp064a", INFO(0x9d7017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp128f", INFO(0x9d6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp128f", INFO(0x9d7018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp256d", INFO(0x9d6019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp256d", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_4B_OPCODES) },
+ { "is25lp512m", INFO(0x9d601a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp512m", INFO(0x9d701a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_4B_OPCODES) },
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -2299,6 +2643,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66u1g45g", INFO(0xc2253b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron <--> ST Micro */
@@ -2307,28 +2652,31 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q256a", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR| SPI_NOR_HAS_LOCK) },
+ { "n25q256a13", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
{ "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
- SPI_NOR_QUAD_READ) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "n25q512a13", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
{ "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
NO_CHIP_ERASE) },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { "mt25qu512a (n25q512a)", INFO(0x20bb20, 0, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
+ { "mt25ul02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
/* Micron */
{
"mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES)
+ SPI_NOR_OCTAL_WRITE | SPI_NOR_4B_OPCODES)
},
{ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
@@ -2348,17 +2696,18 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | USE_CLSR) },
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, SPI_NOR_HAS_LOCK) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, SPI_NOR_HAS_LOCK) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
@@ -2376,6 +2725,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "sst26wf016B", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K | SST_GLOBAL_PROT_UNLK) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -2392,6 +2742,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
+ SECT_4K | SST_GLOBAL_PROT_UNLK) },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
/* ST Microelectronics -- newer production may have feature updates */
@@ -2481,7 +2833,7 @@ static const struct flash_info spi_nor_ids[] = {
},
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -2531,6 +2883,9 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
return ERR_PTR(tmp);
}
+ for (tmp = 0; tmp < SPI_NOR_MAX_ID_LEN; tmp++)
+ nor->device_id[tmp] = id[tmp];
+
for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
info = &spi_nor_ids[tmp];
if (info->id_len) {
@@ -2547,20 +2902,107 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- ssize_t ret;
+ int ret;
+ u32 offset = from;
+ u32 stack_shift = 0;
+ u32 read_len = 0;
+ u32 rem_bank_len = 0;
+ u8 bank;
+ u8 is_ofst_odd = 0;
+ loff_t addr = 0;
+ u8 cur_bank;
+ u8 nxt_bank;
+ u32 bank_size;
+ u_char *ptr;
+
+#define OFFSET_16_MB 0x1000000
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+ if ((nor->isparallel) && (offset & 1)) {
+ /* We can hit this case when we use file system like ubifs */
+ from = (loff_t)(from - 1);
+ len = (size_t)(len + 1);
+ is_ofst_odd = 1;
+ ptr = kmalloc(len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ } else {
+ ptr = buf;
+ }
+
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
if (ret)
return ret;
while (len) {
- loff_t addr = from;
+ if (nor->addr_width == 3) {
+ bank = (u32)from / (OFFSET_16_MB << nor->shift);
+ rem_bank_len = ((OFFSET_16_MB << nor->shift) *
+ (bank + 1)) - from;
+ }
+ offset = from;
+
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ stack_shift = 1;
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+ }
- addr = spi_nor_convert_addr(nor, addr);
+ if (nor->addr_width == 4) {
+ /*
+ * Some flash devices like N25Q512 have multiple dies
+ * in it. Read operation in these devices is bounded
+ * by its die segment. In a continuous read, across
+ * multiple dies, when the last byte of the selected
+ * die segment is read, the next byte read is the
+ * first byte of the same die segment. This is Die
+ * cross over issue. So to handle this issue, split
+ * a read transaction, that spans across multiple
+ * banks, into one read per bank. Bank size is 16MB
+ * for single and dual stacked mode and 32MB for dual
+ * parallel mode.
+ */
+ if (nor->spi && nor->spi->multi_die) {
+ bank_size = (OFFSET_16_MB << nor->shift);
+ cur_bank = offset / bank_size;
+ nxt_bank = (offset + len) / bank_size;
+ if (cur_bank != nxt_bank)
+ rem_bank_len = (bank_size *
+ (cur_bank + 1)) -
+ offset;
+ else
+ rem_bank_len = (mtd->size >>
+ stack_shift) -
+ (offset << nor->shift);
+ } else {
+ rem_bank_len = (mtd->size >> stack_shift) -
+ (offset << nor->shift);
+ }
+ }
+ if (nor->addr_width == 3)
+ write_ear(nor, offset);
+ if (len < rem_bank_len)
+ read_len = len;
+ else
+ read_len = rem_bank_len;
- ret = spi_nor_read_data(nor, addr, len, buf);
+ /* Wait till previous write/erase is done. */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto read_err;
+
+ addr = spi_nor_convert_addr(nor, offset);
+
+ ret = spi_nor_read_data(nor, offset, len, ptr);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -2570,14 +3012,25 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
goto read_err;
WARN_ON(ret > len);
- *retlen += ret;
+ if (is_ofst_odd == 1) {
+ memcpy(buf, (ptr + 1), (len - 1));
+ *retlen += (ret - 1);
+ } else {
+ *retlen += ret;
+ }
buf += ret;
+ if (!is_ofst_odd)
+ ptr += ret;
from += ret;
len -= ret;
}
ret = 0;
read_err:
+
+ if (is_ofst_odd == 1)
+ kfree(ptr);
+
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
return ret;
}
@@ -2672,40 +3125,90 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
+ u32 offset, stack_shift=0;
+ u8 bank = 0;
+ u32 rem_bank_len = 0;
+
+#define OFFSET_16_MB 0x1000000
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+ /*
+ * Cannot write to odd offset in parallel mode,
+ * so write 2 bytes first
+ */
+ if ((nor->isparallel) && (to & 1)) {
+
+ u8 two[2] = {0xff, buf[0]};
+ size_t local_retlen;
+
+ ret = spi_nor_write(mtd, to & ~1, 2, &local_retlen, two);
+ if (ret < 0)
+ return ret;
+
+ *retlen += 1; /* We've written only one actual byte */
+ ++buf;
+ --len;
+ ++to;
+ }
+
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
if (ret)
return ret;
-
for (i = 0; i < len; ) {
ssize_t written;
loff_t addr = to + i;
+ if (nor->addr_width == 3) {
+ bank = (u32)to / (OFFSET_16_MB << nor->shift);
+ rem_bank_len = ((OFFSET_16_MB << nor->shift) *
+ (bank + 1)) - to;
+ }
- /*
- * If page_size is a power of two, the offset can be quickly
- * calculated with an AND operation. On the other cases we
- * need to do a modulus operation (more expensive).
- * Power of two numbers have only one bit set and we can use
- * the instruction hweight32 to detect if we need to do a
- * modulus (do_div()) or not.
- */
- if (hweight32(nor->page_size) == 1) {
- page_offset = addr & (nor->page_size - 1);
- } else {
- uint64_t aux = addr;
+ page_offset = ((to + i)) & (nor->page_size - 1);
+
+ offset = (to + i);
+
+ if (nor->isparallel == 1)
+ offset /= 2;
- page_offset = do_div(aux, nor->page_size);
+ if (nor->isstacked == 1) {
+ stack_shift = 1;
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
}
- /* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
+
+ /* Die cross over issue is not handled */
+ if (nor->addr_width == 4)
+ rem_bank_len = (mtd->size >> stack_shift) - offset;
+ if (nor->addr_width == 3)
+ write_ear(nor, offset);
+ if (nor->isstacked == 1) {
+ if (len <= rem_bank_len) {
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+ } else {
+ /*
+ * the size of data remaining
+ * on the first page
+ */
+ page_remain = rem_bank_len;
+ }
+ } else {
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
addr = spi_nor_convert_addr(nor, addr);
write_enable(nor);
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ ret = spi_nor_write_data(nor, (offset), page_remain, buf + i);
if (ret < 0)
goto write_err;
written = ret;
@@ -2939,7 +3442,7 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor,
*/
op->addr.nbytes = 4;
if (!spi_mem_supports_op(nor->spimem, op)) {
- if (nor->mtd.size > SZ_16M)
+ if ((nor->mtd.size > SZ_16M) && (!nor->isparallel))
return -ENOTSUPP;
/* If flash size <= 16MB, 3 address bytes are sufficient */
@@ -3417,6 +3920,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
}
params->size >>= 3; /* Convert to bytes. */
+
/* Fast Read settings. */
for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
@@ -3433,6 +3937,8 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
}
+ if (params->size > 0x1000000 && nor->addr_width == 3)
+ return -EINVAL;
/*
* Sector Erase settings. Reinitialize the uniform erase map using the
@@ -4291,7 +4797,7 @@ static int spi_nor_select_erase(struct spi_nor *nor)
if (!erase)
return -EINVAL;
nor->erase_opcode = erase->opcode;
- mtd->erasesize = erase->size;
+ mtd->erasesize = erase->size << nor->shift;
return 0;
}
@@ -4309,7 +4815,7 @@ static int spi_nor_select_erase(struct spi_nor *nor)
if (!erase)
return -EINVAL;
- mtd->erasesize = erase->size;
+ mtd->erasesize = erase->size << nor->shift;
return 0;
}
@@ -4340,6 +4846,14 @@ static int spi_nor_default_setup(struct spi_nor *nor,
* Yet another reason to switch to spi-mem.
*/
ignored_mask = SNOR_HWCAPS_X_X_X;
+
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ if (hwcaps->mask & SNOR_HWCAPS_READ_8_8_8)
+ ignored_mask &= ~SNOR_HWCAPS_READ_8_8_8;
+ if (hwcaps->mask & SNOR_HWCAPS_PP_8_8_8)
+ ignored_mask &= ~SNOR_HWCAPS_PP_8_8_8;
+ }
+
if (shared_mask & ignored_mask) {
dev_dbg(nor->dev,
"SPI n-n-n protocols are not supported.\n");
@@ -4411,6 +4925,7 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
/* Init flash parameters based on MFR */
switch (JEDEC_MFR(nor->info)) {
case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_ISSI:
macronix_set_default_init(nor);
break;
@@ -4513,12 +5028,35 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
0, 8, SPINOR_OP_READ_1_1_8,
SNOR_PROTO_1_1_8);
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8],
+ 0, 16, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_8_8_8);
+ }
}
/* Page Program settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_PP;
- spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
- SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ if (info->flags & SPI_NOR_OCTAL_WRITE) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_8;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_8],
+ SPINOR_OP_PP_1_1_8, SNOR_PROTO_1_1_8);
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8],
+ SPINOR_OP_PP_1_1_8,
+ SNOR_PROTO_8_8_8);
+ }
+ }
+ if (nor->spi && (nor->spi->mode & SPI_TX_QUAD)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
+ } else {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ }
/*
* Sector Erase settings. Sort Erase Types in ascending order, with the
@@ -4674,6 +5212,28 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
return nor->params.quad_enable(nor);
}
+static int spi_nor_switch_micron_octal_ddr(struct spi_nor *nor)
+{
+ u8 cr = SPINOR_VCR_OCTAL_DDR;
+ int ret;
+ u8 program_opcode;
+
+ program_opcode = nor->program_opcode;
+ write_enable(nor);
+ nor->program_opcode = SPINOR_OP_WRCR;
+ nor->addr_width = 3;
+ ret = nor->write(nor, 0x0, 1, &cr);
+ nor->program_opcode = program_opcode;
+ nor->addr_width = 4;
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int spi_nor_init(struct spi_nor *nor)
{
int err;
@@ -4748,13 +5308,60 @@ static const struct flash_info *spi_nor_match_id(const char *name)
static int spi_nor_set_addr_width(struct spi_nor *nor)
{
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ struct device_node *np_spi;
+ struct flash_info *info = (struct flash_info *)nor->info;
+ struct device *dev = nor->dev;
+
if (nor->addr_width) {
/* already configured from SFDP */
} else if (nor->info->addr_width) {
nor->addr_width = nor->info->addr_width;
} else if (nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") >= 0) {
+ int status;
+
+ nor->addr_width = 3;
+ nor->params.set_4byte(nor, false);
+ status = read_ear(nor, info);
+ if (status < 0)
+ dev_warn(dev, "failed to read ear reg\n");
+ else
+ nor->curbank = status & EAR_SEGMENT_MASK;
+ } else {
+#endif
+ /*
+ * enable 4-byte addressing
+ * if the device exceeds 16MiB
+ */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+ info->flags & SPI_NOR_4B_OPCODES)
+ spi_nor_set_4byte_opcodes(nor);
+ else {
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi,
+ "compatible",
+ "xlnx,xps-spi-2.00.a") >= 0) {
+ nor->addr_width = 3;
+ nor->params.set_4byte(nor, false);
+ } else {
+ nor->params.set_4byte(nor, true);
+ if (nor->isstacked) {
+ nor->spi->master->flags |=
+ SPI_MASTER_U_PAGE;
+ nor->params.set_4byte(nor, true);
+ nor->spi->master->flags &=
+ ~SPI_MASTER_U_PAGE;
+ }
+ }
+ }
+#ifdef CONFIG_OF
+ }
+#endif
} else {
nor->addr_width = 3;
}
@@ -4766,9 +5373,15 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
}
/* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
+
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") < 0)
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+#endif
return 0;
}
@@ -4789,10 +5402,10 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
const struct flash_info *info = NULL;
if (name)
- info = spi_nor_match_id(name);
+ info = (struct flash_info *)spi_nor_match_id(name);
/* Try to auto-detect if chip name wasn't specified or not found */
if (!info)
- info = spi_nor_read_id(nor);
+ info = (struct flash_info *)spi_nor_read_id(nor);
if (IS_ERR_OR_NULL(info))
return ERR_PTR(-ENOENT);
@@ -4816,7 +5429,7 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
*/
dev_warn(nor->dev, "found %s, expected %s\n",
jinfo->name, info->name);
- info = jinfo;
+ info = (struct flash_info *)jinfo;
}
}
@@ -4826,13 +5439,15 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
- const struct flash_info *info;
+ struct flash_info *info = NULL;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
struct device_node *np = spi_nor_get_flash_node(nor);
struct spi_nor_flash_parameter *params = &nor->params;
+ struct device_node *np_spi;
int ret;
int i;
+ u32 is_dual;
ret = spi_nor_check(nor);
if (ret)
@@ -4857,7 +5472,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (!nor->bouncebuf)
return -ENOMEM;
- info = spi_nor_get_flash_info(nor, name);
+ info = (struct flash_info *)spi_nor_get_flash_info(nor, name);
if (IS_ERR(info))
return PTR_ERR(info);
@@ -4878,6 +5493,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & SPI_NOR_HAS_LOCK)
nor->flags |= SNOR_F_HAS_LOCK;
+ if ((u16)JEDEC_MFR(info) != SNOR_MFR_MICRON)
+ nor->flags |= SNOR_F_BROKEN_OCTAL_DDR;
+
/*
* Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
* with the software protection bits set.
@@ -4891,6 +5509,25 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
/* Init flash parameters based on flash_info struct and SFDP */
spi_nor_init_params(nor);
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+
+ if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(info) == SNOR_MFR_SST ||
+ info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ if (info->flags & SST_GLOBAL_PROT_UNLK) {
+ write_enable(nor);
+ /* Unlock global write protection bits */
+ nor->write_reg(nor, GLOBAL_BLKPROT_UNLK, NULL, 0);
+ }
+ spi_nor_wait_till_ready(nor);
+ }
+
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
@@ -4900,6 +5537,75 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = params->size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if ((of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") >= 0) ||
+ (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynqmp-qspi-1.0") >= 0) ||
+ (of_property_match_string(np_spi, "compatible",
+ "xlnx,versal-qspi-1.0") >= 0)) {
+ if (of_property_read_u32(np_spi, "is-dual",
+ &is_dual) < 0) {
+ /* Default to single if prop not defined */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+ } else {
+ if (is_dual == 1) {
+ /* dual parallel */
+ nor->shift = 1;
+ info->sector_size <<= nor->shift;
+ info->page_size <<= nor->shift;
+ mtd->size <<= nor->shift;
+ nor->isparallel = 1;
+ nor->isstacked = 0;
+ nor->spi->master->flags |=
+ (SPI_MASTER_DATA_STRIPE
+ | SPI_MASTER_BOTH_CS);
+ } else {
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ /* dual stacked */
+ nor->shift = 0;
+ mtd->size <<= 1;
+ info->n_sectors <<= 1;
+ nor->isstacked = 1;
+ nor->isparallel = 0;
+#else
+ u32 is_stacked;
+ if (of_property_read_u32(np_spi,
+ "is-stacked",
+ &is_stacked) < 0) {
+ is_stacked = 0;
+ }
+ if (is_stacked) {
+ /* dual stacked */
+ nor->shift = 0;
+ mtd->size <<= 1;
+ info->n_sectors <<= 1;
+ nor->isstacked = 1;
+ nor->isparallel = 0;
+ } else {
+ /* single */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+ }
+#endif
+ }
+ }
+ }
+#if 0
+ pr_info("parallel %d stacked %d shift %d mtsize %d\n",
+ nor->isparallel, nor->isstacked, nor->shift, mtd->size);
+#endif
+#else
+ /* Default to single */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+#endif
+
mtd->_resume = spi_nor_resume;
if (nor->params.locking_ops) {
@@ -4923,9 +5629,22 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & USE_CLSR)
nor->flags |= SNOR_F_USE_CLSR;
+ if (nor->shift)
+ mtd->erasesize = info->sector_size;
+
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ if (nor->shift &&
+ (info->flags & SECT_4K ||
+ info->flags & SECT_4K_PMC)) {
+ mtd->erasesize = 4096 << nor->shift;
+ }
+#endif
+
if (info->flags & SPI_NOR_NO_ERASE)
mtd->flags |= MTD_NO_ERASE;
+ nor->jedec_id = info->id[0];
mtd->dev.parent = dev;
nor->page_size = params->page_size;
mtd->writebufsize = nor->page_size;
@@ -4943,8 +5662,10 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (ret)
return ret;
- if (info->flags & SPI_NOR_4B_OPCODES)
- nor->flags |= SNOR_F_4B_OPCODES;
+ if (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") < 0)
+ if (info->flags & SPI_NOR_4B_OPCODES)
+ nor->flags |= SNOR_F_4B_OPCODES;
ret = spi_nor_set_addr_width(nor);
if (ret)
@@ -4964,6 +5685,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+ if (hwcaps->mask & (SNOR_HWCAPS_READ_8_8_8 | SNOR_HWCAPS_PP_8_8_8)) {
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ ret = spi_nor_switch_micron_octal_ddr(nor);
+ if (ret)
+ return ret;
+ }
+ }
+
if (mtd->numeraseregions)
for (i = 0; i < mtd->numeraseregions; i++)
dev_dbg(dev,
@@ -4997,6 +5726,7 @@ static int spi_nor_probe(struct spi_mem *spimem)
nor->spimem = spimem;
nor->dev = &spi->dev;
+ nor->spi = spi;
spi_nor_set_flash_node(nor, spi->dev.of_node);
spi_mem_set_drvdata(spimem, nor);
@@ -5053,13 +5783,6 @@ static int spi_nor_remove(struct spi_mem *spimem)
return mtd_device_unregister(&nor->mtd);
}
-static void spi_nor_shutdown(struct spi_mem *spimem)
-{
- struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
- spi_nor_restore(nor);
-}
-
/*
* Do NOT add to this array without reading the following:
*
@@ -5146,6 +5869,18 @@ static struct spi_mem_driver spi_nor_driver = {
};
module_spi_mem_driver(spi_nor_driver);
+void spi_nor_shutdown(struct spi_mem *mem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(mem);
+
+ if (nor->addr_width == 3 &&
+ (nor->mtd.size >> nor->shift) > 0x1000000)
+ write_ear(nor, 0);
+
+ spi_nor_restore(nor);
+}
+EXPORT_SYMBOL_GPL(spi_nor_shutdown);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
MODULE_AUTHOR("Mike Lavender");
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index df521ff0b328..a7169b0d5ba6 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -467,6 +467,7 @@ static int uif_init(struct ubi_device *ubi)
err = ubi_add_volume(ubi, ubi->volumes[i]);
if (err) {
ubi_err(ubi, "cannot add volume %d", i);
+ ubi->volumes[i] = NULL;
goto out_volumes;
}
}
@@ -660,6 +661,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->vid_hdr_aloffset;
}
+ /*
+ * Memory allocation for VID header is ubi->vid_hdr_alsize
+ * which is described in comments in io.c.
+ * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
+ * ubi->vid_hdr_alsize, so that all vid header operations
+ * won't access memory out of bounds.
+ */
+ if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
+ ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
+ " + VID header size(%zu) > VID header aligned size(%d).",
+ ubi->vid_hdr_offset, ubi->vid_hdr_shift,
+ UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
+ return -EINVAL;
+ }
+
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
@@ -849,6 +865,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /* UBI cannot work on flashes with zero erasesize. */
+ if (!mtd->erasesize) {
+ pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5133e1be5331..b4cdf2351cac 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -599,7 +599,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
int err, pnum, scrub = 0, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
- uint32_t uninitialized_var(crc);
+ uint32_t crc;
err = leb_read_lock(ubi, vol_id, lnum);
if (err)
@@ -947,7 +947,7 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
int offset, int len)
{
struct ubi_device *ubi = vol->ubi;
- int pnum, opnum, err, vol_id = vol->vol_id;
+ int pnum, opnum, err, err2, vol_id = vol->vol_id;
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
@@ -982,10 +982,19 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
out_put:
up_read(&ubi->fm_eba_sem);
- if (err && pnum >= 0)
- err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- else if (!err && opnum >= 0)
- err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err && pnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ pnum, err2);
+ }
+ } else if (!err && opnum >= 0) {
+ err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+ if (err2) {
+ ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
+ opnum, err2);
+ }
+ }
return err;
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 6ea95ade4ca6..d79323e8ea29 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -464,7 +464,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
- goto out_acc;
+ goto out_free;
}
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
@@ -512,8 +512,10 @@ out_acc:
ubi->avail_pebs += pebs;
spin_unlock(&ubi->volumes_lock);
}
+ return err;
+
out_free:
- kfree(new_eba_tbl);
+ ubi_eba_destroy_table(new_eba_tbl);
return err;
}
@@ -580,6 +582,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
if (err) {
ubi_err(ubi, "cannot add character device for volume %d, error %d",
vol_id, err);
+ vol_release(&vol->dev);
return err;
}
@@ -590,15 +593,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
- if (err)
- goto out_cdev;
+ if (err) {
+ cdev_del(&vol->cdev);
+ put_device(&vol->dev);
+ return err;
+ }
self_check_volumes(ubi);
return err;
-
-out_cdev:
- cdev_del(&vol->cdev);
- return err;
}
/**
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7def041bbe48..fd0e8f948c3d 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -576,6 +576,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
+ * @nested: denotes whether the work_sem is already held
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
@@ -879,8 +880,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
- if (e2)
+ if (e2) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e2);
+ spin_unlock(&ubi->wl_lock);
+ }
goto out_ro;
}
@@ -962,11 +966,11 @@ out_error:
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
+ wl_entry_destroy(ubi, e1);
+ wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
ubi_free_vid_buf(vidb);
- wl_entry_destroy(ubi, e1);
- wl_entry_destroy(ubi, e2);
out_ro:
ubi_ro_mode(ubi);
@@ -1057,8 +1061,6 @@ out_unlock:
* __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
- * @shutdown: non-zero if the worker has to free memory and exit
- * because the WL sub-system is shutting down
*
* This function erases a physical eraseblock and perform torture testing if
* needed. It also takes care about marking the physical eraseblock bad if
@@ -1108,16 +1110,20 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
if (err1) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
err = err1;
goto out_ro;
}
return err;
}
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
@@ -1233,6 +1239,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
+ if (!e) {
+ /*
+ * This wl entry has been removed for some errors by other
+ * process (eg. wear leveling worker), corresponding process
+ * (except __erase_worker, which cannot concurrent with
+ * ubi_wl_put_peb) will set ubi ro_mode at the same time,
+ * just ignore this wl entry.
+ */
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return 0;
+ }
if (e == ubi->move_from) {
/*
* User is putting the physical eraseblock which was selected to
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 14a5fb378145..313b636edf23 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -332,7 +332,7 @@ static int __init arc_rimi_init(void)
dev->irq = 9;
if (arcrimi_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void)
iounmap(lp->mem_start);
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
#ifndef MODULE
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index b0f5bc07aef5..7b783ee91834 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -186,6 +186,8 @@ do { \
#define ARC_IS_5MBIT 1 /* card default speed is 5MBit */
#define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit,
but default is 2.5MBit. */
+#define ARC_HAS_LED 4 /* card has software controlled LEDs */
+#define ARC_HAS_ROTARY 8 /* card has rotary encoder */
/* information needed to define an encapsulation driver */
struct ArcProto {
@@ -298,6 +300,10 @@ struct arcnet_local {
int excnak_pending; /* We just got an excesive nak interrupt */
+ /* RESET flag handling */
+ int reset_in_progress;
+ struct work_struct reset_work;
+
struct {
uint16_t sequence; /* sequence number (incs with each packet) */
__be16 aborted_seq;
@@ -350,7 +356,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc)
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
+
struct net_device *alloc_arcdev(const char *name);
+void free_arcdev(struct net_device *dev);
int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 553776cc1d29..cf652c76af85 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_list *t)
struct arcnet_local *lp = from_timer(lp, t, timer);
struct net_device *dev = lp->dev;
- if (!netif_carrier_ok(dev)) {
+ spin_lock_irq(&lp->lock);
+
+ if (!lp->reset_in_progress && !netif_carrier_ok(dev)) {
netif_carrier_on(dev);
netdev_info(dev, "link up\n");
}
+
+ spin_unlock_irq(&lp->lock);
+}
+
+static void reset_device_work(struct work_struct *work)
+{
+ struct arcnet_local *lp;
+ struct net_device *dev;
+
+ lp = container_of(work, struct arcnet_local, reset_work);
+ dev = lp->dev;
+
+ /* Do not bring the network interface back up if an ifdown
+ * was already done.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ return;
+
+ rtnl_lock();
+
+ /* Do another check, in case of an ifdown that was triggered in
+ * the small race window between the exit condition above and
+ * acquiring RTNL.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ goto out;
+
+ dev_close(dev);
+ dev_open(dev, NULL);
+
+out:
+ rtnl_unlock();
}
static void arcnet_reply_tasklet(unsigned long data)
@@ -434,7 +468,7 @@ static void arcnet_reply_tasklet(unsigned long data)
ret = sock_queue_err_skb(sk, ackskb);
if (ret)
- kfree_skb(ackskb);
+ dev_kfree_skb_irq(ackskb);
local_irq_enable();
};
@@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const char *name)
lp->dev = dev;
spin_lock_init(&lp->lock);
timer_setup(&lp->timer, arcnet_timer, 0);
+ INIT_WORK(&lp->reset_work, reset_device_work);
}
return dev;
}
EXPORT_SYMBOL(alloc_arcdev);
+void free_arcdev(struct net_device *dev)
+{
+ struct arcnet_local *lp = netdev_priv(dev);
+
+ /* Do not cancel this at ->ndo_close(), as the workqueue itself
+ * indirectly calls the ifdown path through dev_close().
+ */
+ cancel_work_sync(&lp->reset_work);
+ free_netdev(dev);
+}
+EXPORT_SYMBOL(free_arcdev);
+
/* Open/initialize the board. This is called sometime after booting when
* the 'ifconfig' program is run.
*
@@ -587,6 +634,10 @@ int arcnet_close(struct net_device *dev)
/* shut down the card */
lp->hw.close(dev);
+
+ /* reset counters */
+ lp->reset_in_progress = 0;
+
module_put(lp->hw.owner);
return 0;
}
@@ -820,6 +871,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&lp->lock, flags);
+ if (lp->reset_in_progress)
+ goto out;
+
/* RESET flag was enabled - if device is not running, we must
* clear it right away (but nothing else).
*/
@@ -852,11 +906,14 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
if (status & RESETflag) {
arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
status);
- arcnet_close(dev);
- arcnet_open(dev);
+
+ lp->reset_in_progress = 1;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ schedule_work(&lp->reset_work);
/* get out of the interrupt handler! */
- break;
+ goto out;
}
/* RX is inhibited - we must have received something.
* Prepare to receive into the next buffer.
@@ -1052,6 +1109,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
udelay(1);
lp->hw.intmask(dev, lp->intmask);
+out:
spin_unlock_irqrestore(&lp->lock, flags);
return retval;
}
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index cd27fdc1059b..141b05451252 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -169,7 +169,7 @@ static int __init com20020_init(void)
dev->irq = 9;
if (com20020isa_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -182,7 +182,7 @@ static void __exit com20020_exit(void)
unregister_netdev(my_dev);
free_irq(my_dev->irq, my_dev);
release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(my_dev);
+ free_arcdev(my_dev);
}
#ifndef MODULE
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 9f44e2e458df..9d9e4200064f 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
int i, ioaddr, ret;
struct resource *r;
+ ret = 0;
+
if (pci_enable_device(pdev))
return -EIO;
@@ -142,6 +144,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
priv->ci = ci;
mm = &ci->misc_map;
+ pci_set_drvdata(pdev, priv);
+
INIT_LIST_HEAD(&priv->list_dev);
if (mm->size) {
@@ -164,7 +168,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
dev = alloc_arcdev(device);
if (!dev) {
ret = -ENOMEM;
- goto out_port;
+ break;
}
dev->dev_port = i;
@@ -181,7 +185,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
pr_err("IO region %xh-%xh already allocated\n",
ioaddr, ioaddr + cm->size - 1);
ret = -EBUSY;
- goto out_port;
+ goto err_free_arcdev;
}
/* Dummy access after Reset
@@ -209,76 +213,79 @@ static int com20020pci_probe(struct pci_dev *pdev,
if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15))
lp->backplane = 1;
- /* Get the dev_id from the PLX rotary coder */
- if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
- dev_id_mask = 0x3;
- dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
-
- snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+ if (ci->flags & ARC_HAS_ROTARY) {
+ /* Get the dev_id from the PLX rotary coder */
+ if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+ dev_id_mask = 0x3;
+ dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
+ snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+ }
if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
pr_err("IO address %Xh is empty!\n", ioaddr);
ret = -EIO;
- goto out_port;
+ goto err_free_arcdev;
}
if (com20020_check(dev)) {
ret = -EIO;
- goto out_port;
+ goto err_free_arcdev;
}
+ ret = com20020_found(dev, IRQF_SHARED);
+ if (ret)
+ goto err_free_arcdev;
+
card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
GFP_KERNEL);
if (!card) {
ret = -ENOMEM;
- goto out_port;
+ goto err_free_arcdev;
}
card->index = i;
card->pci_priv = priv;
- card->tx_led.brightness_set = led_tx_set;
- card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
- GFP_KERNEL, "arc%d-%d-tx",
- dev->dev_id, i);
- card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "pci:green:tx:%d-%d",
- dev->dev_id, i);
-
- card->tx_led.dev = &dev->dev;
- card->recon_led.brightness_set = led_recon_set;
- card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
- GFP_KERNEL, "arc%d-%d-recon",
- dev->dev_id, i);
- card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "pci:red:recon:%d-%d",
- dev->dev_id, i);
- card->recon_led.dev = &dev->dev;
- card->dev = dev;
-
- ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
- if (ret)
- goto out_port;
-
- ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
- if (ret)
- goto out_port;
-
- dev_set_drvdata(&dev->dev, card);
- ret = com20020_found(dev, IRQF_SHARED);
- if (ret)
- goto out_port;
-
- devm_arcnet_led_init(dev, dev->dev_id, i);
+ if (ci->flags & ARC_HAS_LED) {
+ card->tx_led.brightness_set = led_tx_set;
+ card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "arc%d-%d-tx",
+ dev->dev_id, i);
+ card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pci:green:tx:%d-%d",
+ dev->dev_id, i);
+
+ card->tx_led.dev = &dev->dev;
+ card->recon_led.brightness_set = led_recon_set;
+ card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "arc%d-%d-recon",
+ dev->dev_id, i);
+ card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pci:red:recon:%d-%d",
+ dev->dev_id, i);
+ card->recon_led.dev = &dev->dev;
+
+ ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+ if (ret)
+ goto err_free_arcdev;
+
+ ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+ if (ret)
+ goto err_free_arcdev;
+
+ dev_set_drvdata(&dev->dev, card);
+ devm_arcnet_led_init(dev, dev->dev_id, i);
+ }
+ card->dev = dev;
list_add(&card->list, &priv->list_dev);
- }
+ continue;
- pci_set_drvdata(pdev, priv);
-
- return 0;
-
-out_port:
- com20020pci_remove(pdev);
+err_free_arcdev:
+ free_arcdev(dev);
+ break;
+ }
+ if (ret)
+ com20020pci_remove(pdev);
return ret;
}
@@ -294,7 +301,7 @@ static void com20020pci_remove(struct pci_dev *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
@@ -325,7 +332,7 @@ static struct com20020_pci_card_info card_info_5mbit = {
};
static struct com20020_pci_card_info card_info_sohard = {
- .name = "PLX-PCI",
+ .name = "SOHARD SH ARC-PCI",
.devcount = 1,
/* SOHARD needs PCI base addr 4 */
.chan_map_tbl = {
@@ -360,7 +367,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static struct com20020_pci_card_info card_info_eae_ma1 = {
@@ -392,7 +399,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static struct com20020_pci_card_info card_info_eae_fb2 = {
@@ -417,7 +424,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static const struct pci_device_id com20020pci_id_table[] = {
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index cf607ffcf358..9cc5eb6a8e90 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -177,7 +177,7 @@ static void com20020_detach(struct pcmcia_device *link)
dev = info->dev;
if (dev) {
dev_dbg(&link->dev, "kfree...\n");
- free_netdev(dev);
+ free_arcdev(dev);
}
dev_dbg(&link->dev, "kfree2...\n");
kfree(info);
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 186bbf87bc84..5843b8976fd1 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -396,7 +396,7 @@ static int __init com90io_init(void)
err = com90io_probe(dev);
if (err) {
- free_netdev(dev);
+ free_arcdev(dev);
return err;
}
@@ -419,7 +419,7 @@ static void __exit com90io_exit(void)
free_irq(dev->irq, dev);
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(dev);
+ free_arcdev(dev);
}
module_init(com90io_init)
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index bd75d06ad7df..5e40ecf189b1 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -554,7 +554,7 @@ err_free_irq:
err_release_mem:
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
err_free_dev:
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -672,7 +672,7 @@ static void __exit com90xx_exit(void)
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
release_mem_region(dev->mem_start,
dev->mem_end - dev->mem_start + 1);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 31ed7616e84e..0c4e6fcac58e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1529,6 +1529,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
slave_err(bond->dev, port->slave->dev,
"Port %d did not find a suitable aggregator\n",
port->actor_port_number);
+ return;
}
}
/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
@@ -1997,30 +1998,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
*/
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ /* initialize how many times this module is called in one
+ * second (should be about every 100ms)
+ */
+ ad_ticks_per_sec = tick_resolution;
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 8bee935c8f90..342e23e56192 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -656,10 +656,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
return NULL;
arp = (struct arp_pkt *)skb_network_header(skb);
- /* Don't modify or load balance ARPs that do not originate locally
- * (e.g.,arrive via a bridge).
+ /* Don't modify or load balance ARPs that do not originate
+ * from the bond itself or a VLAN directly above the bond.
*/
- if (!bond_slave_has_mac_rx(bond, arp->mac_src))
+ if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -970,7 +970,8 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
if (netif_is_macvlan(upper) && !strict_match) {
tags = bond_verify_device_path(bond->dev, upper, 0);
if (IS_ERR_OR_NULL(tags))
- BUG();
+ return -ENOMEM;
+
alb_send_lp_vid(slave, upper->dev_addr,
tags[0].vlan_proto, tags[0].vlan_id);
kfree(tags);
@@ -1360,7 +1361,7 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves;
unsigned int count;
- slaves = rcu_dereference(bond->slave_arr);
+ slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
@@ -1494,7 +1495,7 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves;
unsigned int count;
- slaves = rcu_dereference(bond->slave_arr);
+ slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index f3f86ef68ae0..8b6cf2bf9025 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
- if (d) {
+ if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 246bcbd650b4..bb1c6743222e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1144,6 +1144,10 @@ done:
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
+ bool was_up = !!(bond_dev->flags & IFF_UP);
+
+ dev_close(bond_dev);
+
bond_dev->header_ops = slave_dev->header_ops;
bond_dev->type = slave_dev->type;
@@ -1153,6 +1157,13 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
memcpy(bond_dev->broadcast, slave_dev->broadcast,
slave_dev->addr_len);
+
+ if (slave_dev->flags & IFF_POINTOPOINT) {
+ bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ }
+ if (was_up)
+ dev_open(bond_dev, NULL);
}
/* On bonding slaves other than the currently active slave, suppress
@@ -2102,12 +2113,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
/* called with rcu_read_lock() */
static int bond_miimon_inspect(struct bonding *bond)
{
+ bool ignore_updelay = false;
int link_state, commit = 0;
struct list_head *iter;
struct slave *slave;
- bool ignore_updelay;
- ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
+ ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ } else {
+ struct bond_up_slave *usable_slaves;
+
+ usable_slaves = rcu_dereference(bond->usable_slaves);
+
+ if (usable_slaves && usable_slaves->count == 0)
+ ignore_updelay = true;
+ }
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
@@ -3230,7 +3250,11 @@ static int bond_slave_netdev_event(unsigned long event,
unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
- bond_compute_features(bond);
+ if (!bond->notifier_ctx) {
+ bond->notifier_ctx = true;
+ bond_compute_features(bond);
+ bond->notifier_ctx = false;
+ }
break;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
@@ -4040,6 +4064,29 @@ err:
bond_slave_arr_work_rearm(bond, 1);
}
+static void bond_skip_slave(struct bond_up_slave *slaves,
+ struct slave *skipslave)
+{
+ int idx;
+
+ /* Rare situation where caller has asked to skip a specific
+ * slave but allocation failed (most likely!). BTW this is
+ * only possible when the call is initiated from
+ * __bond_release_one(). In this situation; overwrite the
+ * skipslave entry in the array with the last entry from the
+ * array to avoid a situation where the xmit path may choose
+ * this to-be-skipped slave to send a packet out.
+ */
+ for (idx = 0; slaves && idx < slaves->count; idx++) {
+ if (skipslave == slaves->arr[idx]) {
+ slaves->arr[idx] =
+ slaves->arr[slaves->count - 1];
+ slaves->count--;
+ break;
+ }
+ }
+}
+
/* Build the usable slaves array in control path for modes that use xmit-hash
* to determine the slave interface -
* (a) BOND_MODE_8023AD
@@ -4050,9 +4097,9 @@ err:
*/
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
+ struct bond_up_slave *usable_slaves, *old_usable_slaves;
struct slave *slave;
struct list_head *iter;
- struct bond_up_slave *new_arr, *old_arr;
int agg_id = 0;
int ret = 0;
@@ -4060,11 +4107,10 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif
- new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
- GFP_KERNEL);
- if (!new_arr) {
+ usable_slaves = kzalloc(struct_size(usable_slaves, arr,
+ bond->slave_cnt), GFP_KERNEL);
+ if (!usable_slaves) {
ret = -ENOMEM;
- pr_err("Failed to build slave-array.\n");
goto out;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -4072,14 +4118,14 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("bond_3ad_get_active_agg_info failed\n");
- kfree_rcu(new_arr, rcu);
+ kfree_rcu(usable_slaves, rcu);
/* No active aggragator means it's not safe to use
* the previous array.
*/
- old_arr = rtnl_dereference(bond->slave_arr);
- if (old_arr) {
- RCU_INIT_POINTER(bond->slave_arr, NULL);
- kfree_rcu(old_arr, rcu);
+ old_usable_slaves = rtnl_dereference(bond->usable_slaves);
+ if (old_usable_slaves) {
+ RCU_INIT_POINTER(bond->usable_slaves, NULL);
+ kfree_rcu(old_usable_slaves, rcu);
}
goto out;
}
@@ -4099,37 +4145,20 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
continue;
slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
- new_arr->count);
+ usable_slaves->count);
- new_arr->arr[new_arr->count++] = slave;
+ usable_slaves->arr[usable_slaves->count++] = slave;
}
- old_arr = rtnl_dereference(bond->slave_arr);
- rcu_assign_pointer(bond->slave_arr, new_arr);
- if (old_arr)
- kfree_rcu(old_arr, rcu);
+ old_usable_slaves = rtnl_dereference(bond->usable_slaves);
+ rcu_assign_pointer(bond->usable_slaves, usable_slaves);
+ if (old_usable_slaves)
+ kfree_rcu(old_usable_slaves, rcu);
out:
- if (ret != 0 && skipslave) {
- int idx;
-
- /* Rare situation where caller has asked to skip a specific
- * slave but allocation failed (most likely!). BTW this is
- * only possible when the call is initiated from
- * __bond_release_one(). In this situation; overwrite the
- * skipslave entry in the array with the last entry from the
- * array to avoid a situation where the xmit path may choose
- * this to-be-skipped slave to send a packet out.
- */
- old_arr = rtnl_dereference(bond->slave_arr);
- for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
- if (skipslave == old_arr->arr[idx]) {
- old_arr->arr[idx] =
- old_arr->arr[old_arr->count-1];
- old_arr->count--;
- break;
- }
- }
- }
+ if (ret != 0 && skipslave)
+ bond_skip_slave(rtnl_dereference(bond->usable_slaves),
+ skipslave);
+
return ret;
}
@@ -4145,7 +4174,7 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
struct bond_up_slave *slaves;
unsigned int count;
- slaves = rcu_dereference(bond->slave_arr);
+ slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
@@ -4428,7 +4457,9 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
bond_dev->features |= bond_dev->hw_features;
@@ -4452,9 +4483,9 @@ static void bond_uninit(struct net_device *bond_dev)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
- arr = rtnl_dereference(bond->slave_arr);
+ arr = rtnl_dereference(bond->usable_slaves);
if (arr) {
- RCU_INIT_POINTER(bond->slave_arr, NULL);
+ RCU_INIT_POINTER(bond->usable_slaves, NULL);
kfree_rcu(arr, rcu);
}
@@ -4864,6 +4895,8 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
+ bond->notifier_ctx = false;
+
spin_lock_init(&bond->stats_lock);
lockdep_register_key(&bond->stats_lock_key);
lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index b9047d8110d5..910e8c309a67 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -264,22 +264,24 @@ static int cc770_isa_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"couldn't register device (err=%d)\n", err);
- goto exit_unmap;
+ goto exit_free;
}
dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
priv->reg_base, dev->irq);
return 0;
- exit_unmap:
+exit_free:
+ free_cc770dev(dev);
+exit_unmap:
if (mem[idx])
iounmap(base);
- exit_release:
+exit_release:
if (mem[idx])
release_mem_region(mem[idx], iosize);
else
release_region(port[idx], iosize);
- exit:
+exit:
return err;
}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 322da89cb9c6..05a2c873af56 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -552,7 +552,8 @@ static void can_restart(struct net_device *dev)
struct can_frame *cf;
int err;
- BUG_ON(netif_carrier_ok(dev));
+ if (netif_carrier_ok(dev))
+ netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
/* No synchronization needed because the device is bus-off and
* no messages can come in or go out.
@@ -577,11 +578,12 @@ restart:
priv->can_stats.restarts++;
/* Now restart the device */
- err = priv->do_set_mode(dev, CAN_MODE_START);
-
netif_carrier_on(dev);
- if (err)
+ err = priv->do_set_mode(dev, CAN_MODE_START);
+ if (err) {
netdev_err(dev, "Error %d during restart", err);
+ netif_carrier_off(dev);
+ }
}
static void can_restart_work(struct work_struct *work)
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index a761092e6ac9..f929db893957 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1451,7 +1451,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* process all communication messages */
while (true) {
- struct ican3_msg uninitialized_var(msg);
+ struct ican3_msg msg;
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index faa78d38d752..560a0a5ba6f3 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -70,10 +70,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
/* Shared receive buffer registers */
#define KVASER_PCIEFD_SRB_BASE 0x1f200
+#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4)
#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
/* EPCS flash controller registers */
#define KVASER_PCIEFD_SPI_BASE 0x1fc00
@@ -110,6 +112,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
/* DMA support */
#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
+/* SRB current packet level */
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff
+
/* DMA Enable */
#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
@@ -528,7 +533,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
- KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
+ KVASER_PCIEFD_KCAN_IRQ_TAR;
iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
@@ -556,6 +561,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
+ else
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
@@ -574,7 +581,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
spin_lock_irqsave(&can->lock, irq);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
@@ -617,7 +624,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
@@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
del_timer(&can->bec_poll_timer);
}
+ can->can.state = CAN_STATE_STOPPED;
close_candev(netdev);
return ret;
@@ -1001,8 +1009,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
SET_NETDEV_DEV(netdev, &pcie->pci->dev);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
- KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
pcie->can[i] = can;
@@ -1052,6 +1059,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
int i;
u32 srb_status;
+ u32 srb_packet_count;
dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
/* Disable the DMA */
@@ -1079,6 +1087,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
KVASER_PCIEFD_SRB_CMD_RDB1,
pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ /* Empty Rx FIFO */
+ srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) &
+ KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK;
+ while (srb_packet_count) {
+ /* Drop current packet in FIFO */
+ ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
+ srb_packet_count--;
+ }
+
srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
@@ -1421,9 +1438,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
cmd = KVASER_PCIEFD_KCAN_CMD_AT;
cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
-
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
- can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
@@ -1712,15 +1726,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
netdev_err(can->can.dev, "Tx FIFO overflow\n");
- if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
- u8 count = ioread32(can->reg_base +
- KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
-
- if (count == 0)
- iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
- can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
- }
-
if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
netdev_err(can->can.dev,
"Fail to change bittiming, when not in reset mode\n");
@@ -1822,6 +1827,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
if (err)
goto err_teardown_can_ctrls;
+ err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+ if (err)
+ goto err_teardown_can_ctrls;
+
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
@@ -1842,11 +1852,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
- err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
- IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
- if (err)
- goto err_teardown_can_ctrls;
-
err = kvaser_pciefd_reg_candev(pcie);
if (err)
goto err_free_irq;
@@ -1854,6 +1859,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
+ /* Disable PCI interrupts */
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
free_irq(pcie->pci->irq, pcie);
err_teardown_can_ctrls:
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 0d66582bd356..b312cbf30df7 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -291,11 +291,6 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
if (ret)
return ret;
- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG,
- TCAN4X5X_ENABLE_MCAN_INT);
- if (ret)
- return ret;
-
ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
TCAN4X5X_CLEAR_ALL_INT);
if (ret)
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e4f4b5c9ebd6..3305d9d6c466 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -325,14 +325,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
&mscan_clksrc);
if (!priv->can.clock.freq) {
dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
- goto exit_free_mscan;
+ goto exit_put_clock;
}
err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_free_mscan;
+ goto exit_put_clock;
}
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
@@ -340,7 +340,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
return 0;
-exit_free_mscan:
+exit_put_clock:
+ if (data->put_clock)
+ data->put_clock(ofdev);
free_candev(dev);
exit_dispose_irq:
irq_dispose_mapping(irq);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index e90651f7b2ea..586bda050d28 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -489,6 +489,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
if (!skb)
return;
+ errc = ioread32(&priv->regs->errc);
if (status & PCH_BUS_OFF) {
pch_can_set_tx_all(priv, 0);
pch_can_set_rx_all(priv, 0);
@@ -496,9 +497,11 @@ static void pch_can_error(struct net_device *ndev, u32 status)
cf->can_id |= CAN_ERR_BUSOFF;
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
+ } else {
+ cf->data[6] = errc & PCH_TEC;
+ cf->data[7] = (errc & PCH_REC) >> 8;
}
- errc = ioread32(&priv->regs->errc);
/* Warning interrupt. */
if (status & PCH_EWARN) {
state = CAN_STATE_ERROR_WARNING;
@@ -556,9 +559,6 @@ static void pch_can_error(struct net_device *ndev, u32 status)
break;
}
- cf->data[6] = errc & PCH_TEC;
- cf->data[7] = (errc & PCH_REC) >> 8;
-
priv->can.state = state;
netif_receive_skb(skb);
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 45963d04a2e5..8d9e3210da7e 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -236,11 +236,8 @@ static void rcar_can_error(struct net_device *ndev)
if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
txerr = readb(&priv->regs->tecr);
rxerr = readb(&priv->regs->recr);
- if (skb) {
+ if (skb)
cf->can_id |= CAN_ERR_CRTL;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
}
if (eifr & RCAR_CAN_EIFR_BEIF) {
int rx_errors = 0, tx_errors = 0;
@@ -340,6 +337,9 @@ static void rcar_can_error(struct net_device *ndev)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
+ } else if (skb) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
if (eifr & RCAR_CAN_EIFR_ORIF) {
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index d4e9815ca26f..eed69e7e95ad 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1075,7 +1075,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
struct net_device *ndev;
struct rcar_canfd_channel *priv;
- u32 sts, gerfl;
+ u32 sts, cc, gerfl;
u32 ch, ridx;
/* Global error interrupts still indicate a condition specific
@@ -1093,7 +1093,9 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
/* Handle Rx interrupts */
sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
- if (likely(sts & RCANFD_RFSTS_RFIF)) {
+ cc = rcar_canfd_read(priv->base, RCANFD_RFCC(ridx));
+ if (likely(sts & RCANFD_RFSTS_RFIF &&
+ cc & RCANFD_RFCC_RFIE)) {
if (napi_schedule_prep(&priv->napi)) {
/* Disable Rx FIFO interrupts */
rcar_canfd_clear_bit(priv->base,
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 9f107798f904..e7327ceabb76 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -405,9 +405,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
-
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -429,6 +426,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 1c4d32d1a542..fe8441be716b 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -202,22 +202,24 @@ static int sja1000_isa_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_unmap;
+ goto exit_free;
}
dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
DRV_NAME, priv->reg_base, dev->irq);
return 0;
- exit_unmap:
+exit_free:
+ free_sja1000dev(dev);
+exit_unmap:
if (mem[idx])
iounmap(base);
- exit_release:
+exit_release:
if (mem[idx])
release_mem_region(mem[idx], iosize);
else
release_region(port[idx], iosize);
- exit:
+exit:
return err;
}
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 7d2315c8cacb..28273e84171a 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -670,8 +670,6 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
txerr = hi3110_read(spi, HI3110_READ_TEC);
rxerr = hi3110_read(spi, HI3110_READ_REC);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
@@ -684,6 +682,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
hi3110_hw_sleep(spi);
break;
}
+ } else {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
}
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bb20a9b75cc6..32bd593a39f1 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -756,9 +756,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -768,6 +765,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -778,6 +787,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index f4cd88196404..c519b6f63b33 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -525,11 +525,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
rxerr = (errc >> 16) & 0xFF;
txerr = errc & 0xFF;
- if (skb) {
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
-
if (isrc & SUN4I_INT_DATA_OR) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -560,6 +555,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (skb && state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & SUN4I_INT_BUS_ERR) {
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 6458da9c13b9..ff05b5230f0b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -194,7 +194,7 @@ struct __packed ems_cpc_msg {
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 8847942a8d97..c9ccce6c60b4 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -227,6 +227,10 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
u8 rxerr = msg->msg.rx.data[2];
u8 txerr = msg->msg.rx.data[3];
+ netdev_dbg(priv->netdev,
+ "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
+ msg->msg.rx.dlc, state, ecc, rxerr, txerr);
+
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb == NULL) {
stats->rx_dropped++;
@@ -253,6 +257,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
break;
default:
priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ txerr = 0;
+ rxerr = 0;
break;
}
} else {
@@ -272,7 +278,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
break;
}
@@ -280,6 +285,9 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
if (!(ecc & SJA1000_ECC_DIR))
cf->data[2] |= CAN_ERR_PROT_TX;
+ /* Bit stream position in CAN frame as the error was detected */
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
+
if (priv->can.state == CAN_STATE_ERROR_WARNING ||
priv->can.state == CAN_STATE_ERROR_PASSIVE) {
cf->data[1] = (txerr > rxerr) ?
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index bf4ab30186af..1a24c1d9dd8f 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -381,6 +381,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
skb = alloc_can_err_skb(netdev, &cf);
if (!skb)
goto resubmit_urb;
@@ -388,8 +391,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
cf->can_id |= CAN_ERR_CRTL;
cf->can_dlc = CAN_ERR_DLC;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_over_errors++;
- stats->rx_errors++;
netif_rx(skb);
}
@@ -678,6 +679,7 @@ static int gs_can_open(struct net_device *netdev)
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
dm->mode = cpu_to_le32(GS_CAN_MODE_START);
dm->flags = cpu_to_le32(flags);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
@@ -694,13 +696,12 @@ static int gs_can_open(struct net_device *netdev)
if (rc < 0) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
kfree(dm);
+ dev->can.state = CAN_STATE_STOPPED;
return rc;
}
kfree(dm);
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -732,6 +733,8 @@ static int gs_can_close(struct net_device *netdev)
usb_kill_anchored_urbs(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);
+ dev->can.state = CAN_STATE_STOPPED;
+
/* reset the device */
rc = gs_cmd_reset(dev);
if (rc < 0)
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 61e67986b625..5699531f8787 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context {
int dlc;
};
+struct kvaser_usb_busparams {
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 nsamples;
+} __packed;
+
struct kvaser_usb {
struct usb_device *udev;
struct usb_interface *intf;
@@ -104,13 +112,19 @@ struct kvaser_usb_net_priv {
struct can_priv can;
struct can_berr_counter bec;
+ /* subdriver-specific data */
+ void *sub_priv;
+
struct kvaser_usb *dev;
struct net_device *netdev;
int channel;
- struct completion start_comp, stop_comp, flush_comp;
+ struct completion start_comp, stop_comp, flush_comp,
+ get_busparams_comp;
struct usb_anchor tx_submitted;
+ struct kvaser_usb_busparams busparams_nominal, busparams_data;
+
spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
int active_tx_contexts;
struct kvaser_usb_tx_urb_context tx_contexts[];
@@ -120,11 +134,15 @@ struct kvaser_usb_net_priv {
* struct kvaser_usb_dev_ops - Device specific functions
* @dev_set_mode: used for can.do_set_mode
* @dev_set_bittiming: used for can.do_set_bittiming
+ * @dev_get_busparams: readback arbitration busparams
* @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_get_data_busparams: readback data busparams
* @dev_get_berr_counter: used for can.do_get_berr_counter
*
* @dev_setup_endpoints: setup USB in and out endpoints
* @dev_init_card: initialize card
+ * @dev_init_channel: initialize channel
+ * @dev_remove_channel: uninitialize channel
* @dev_get_software_info: get software info
* @dev_get_software_details: get software details
* @dev_get_card_info: get card info
@@ -140,12 +158,18 @@ struct kvaser_usb_net_priv {
*/
struct kvaser_usb_dev_ops {
int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
- int (*dev_set_bittiming)(struct net_device *netdev);
- int (*dev_set_data_bittiming)(struct net_device *netdev);
+ int (*dev_set_bittiming)(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams);
+ int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv);
+ int (*dev_set_data_bittiming)(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams);
+ int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv);
int (*dev_get_berr_counter)(const struct net_device *netdev,
struct can_berr_counter *bec);
int (*dev_setup_endpoints)(struct kvaser_usb *dev);
int (*dev_init_card)(struct kvaser_usb *dev);
+ int (*dev_init_channel)(struct kvaser_usb_net_priv *priv);
+ void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv);
int (*dev_get_software_info)(struct kvaser_usb *dev);
int (*dev_get_software_details)(struct kvaser_usb *dev);
int (*dev_get_card_info)(struct kvaser_usb *dev);
@@ -178,6 +202,8 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
+
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
int *actual_len);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 416763fd1f11..1f015b496a47 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -416,10 +416,6 @@ static int kvaser_usb_open(struct net_device *netdev)
if (err)
return err;
- err = kvaser_usb_setup_rx_urbs(dev);
- if (err)
- goto error;
-
err = ops->dev_set_opt_mode(priv);
if (err)
goto error;
@@ -453,7 +449,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
/* This method might sleep. Do not call it in the atomic context
* of URB completions.
*/
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
{
usb_kill_anchored_urbs(&priv->tx_submitted);
kvaser_usb_reset_tx_urb_contexts(priv);
@@ -510,6 +506,93 @@ static int kvaser_usb_close(struct net_device *netdev)
return 0;
}
+static int kvaser_usb_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ struct kvaser_usb_busparams busparams;
+ int tseg1 = bt->prop_seg + bt->phase_seg1;
+ int tseg2 = bt->phase_seg2;
+ int sjw = bt->sjw;
+ int err = -EOPNOTSUPP;
+
+ busparams.bitrate = cpu_to_le32(bt->bitrate);
+ busparams.sjw = (u8)sjw;
+ busparams.tseg1 = (u8)tseg1;
+ busparams.tseg2 = (u8)tseg2;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ busparams.nsamples = 3;
+ else
+ busparams.nsamples = 1;
+
+ err = ops->dev_set_bittiming(netdev, &busparams);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(priv->dev);
+ if (err)
+ return err;
+
+ err = ops->dev_get_busparams(priv);
+ if (err) {
+ /* Treat EOPNOTSUPP as success */
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+ }
+
+ if (memcmp(&busparams, &priv->busparams_nominal,
+ sizeof(priv->busparams_nominal)) != 0)
+ err = -EINVAL;
+
+ return err;
+}
+
+static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+
+ struct kvaser_usb_busparams busparams;
+ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+ int tseg2 = dbt->phase_seg2;
+ int sjw = dbt->sjw;
+ int err;
+
+ if (!ops->dev_set_data_bittiming ||
+ !ops->dev_get_data_busparams)
+ return -EOPNOTSUPP;
+
+ busparams.bitrate = cpu_to_le32(dbt->bitrate);
+ busparams.sjw = (u8)sjw;
+ busparams.tseg1 = (u8)tseg1;
+ busparams.tseg2 = (u8)tseg2;
+ busparams.nsamples = 1;
+
+ err = ops->dev_set_data_bittiming(netdev, &busparams);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(priv->dev);
+ if (err)
+ return err;
+
+ err = ops->dev_get_data_busparams(priv);
+ if (err)
+ return err;
+
+ if (memcmp(&busparams, &priv->busparams_data,
+ sizeof(priv->busparams_data)) != 0)
+ err = -EINVAL;
+
+ return err;
+}
+
static void kvaser_usb_write_bulk_callback(struct urb *urb)
{
struct kvaser_usb_tx_urb_context *context = urb->context;
@@ -645,6 +728,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int i;
for (i = 0; i < dev->nchannels; i++) {
@@ -660,6 +744,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
if (!dev->nets[i])
continue;
+ if (ops->dev_remove_channel)
+ ops->dev_remove_channel(dev->nets[i]);
+
free_candev(dev->nets[i]->netdev);
}
}
@@ -690,6 +777,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_usb_anchor(&priv->tx_submitted);
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
+ init_completion(&priv->flush_comp);
+ init_completion(&priv->get_busparams_comp);
priv->can.ctrlmode_supported = 0;
priv->dev = dev;
@@ -702,7 +791,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = dev->cfg->clock.freq;
priv->can.bittiming_const = dev->cfg->bittiming_const;
- priv->can.do_set_bittiming = ops->dev_set_bittiming;
+ priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
priv->can.do_set_mode = ops->dev_set_mode;
if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
(priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
@@ -714,7 +803,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
+ priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
@@ -726,17 +815,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
dev->nets[channel] = priv;
+ if (ops->dev_init_channel) {
+ err = ops->dev_init_channel(priv);
+ if (err)
+ goto err;
+ }
+
err = register_candev(netdev);
if (err) {
dev_err(&dev->intf->dev, "Failed to register CAN device\n");
- free_candev(netdev);
- dev->nets[channel] = NULL;
- return err;
+ goto err;
}
netdev_dbg(netdev, "device registered\n");
return 0;
+
+err:
+ free_candev(netdev);
+ dev->nets[channel] = NULL;
+ return err;
}
static int kvaser_usb_probe(struct usb_interface *intf,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index a7c408acb0c0..233bbfeaa771 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -43,6 +43,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc;
/* Minihydra command IDs */
#define CMD_SET_BUSPARAMS_REQ 16
+#define CMD_GET_BUSPARAMS_REQ 17
+#define CMD_GET_BUSPARAMS_RESP 18
#define CMD_GET_CHIP_STATE_REQ 19
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_DRIVERMODE_REQ 21
@@ -193,21 +195,26 @@ struct kvaser_cmd_chip_state_event {
#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
struct kvaser_cmd_set_busparams {
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 nsamples;
+ struct kvaser_usb_busparams busparams_nominal;
u8 reserved0[4];
- __le32 bitrate_d;
- u8 tseg1_d;
- u8 tseg2_d;
- u8 sjw_d;
- u8 nsamples_d;
+ struct kvaser_usb_busparams busparams_data;
u8 canfd_mode;
u8 reserved1[7];
} __packed;
+/* Busparam type */
+#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00
+#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01
+struct kvaser_cmd_get_busparams_req {
+ u8 type;
+ u8 reserved[27];
+} __packed;
+
+struct kvaser_cmd_get_busparams_res {
+ struct kvaser_usb_busparams busparams;
+ u8 reserved[20];
+} __packed;
+
/* Ctrl modes */
#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
@@ -278,6 +285,8 @@ struct kvaser_cmd {
struct kvaser_cmd_error_event error_event;
struct kvaser_cmd_set_busparams set_busparams_req;
+ struct kvaser_cmd_get_busparams_req get_busparams_req;
+ struct kvaser_cmd_get_busparams_res get_busparams_res;
struct kvaser_cmd_chip_state_event chip_state_event;
@@ -293,6 +302,7 @@ struct kvaser_cmd {
#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
+#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6)
/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
@@ -359,6 +369,10 @@ struct kvaser_cmd_ext {
} __packed;
} __packed;
+struct kvaser_usb_net_hydra_priv {
+ int pending_get_busparams_type;
+};
+
static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.name = "kvaser_usb_kcan",
.tseg1_min = 1,
@@ -504,6 +518,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
u8 cmd_no, int channel)
{
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@@ -511,6 +526,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
return -ENOMEM;
cmd->header.cmd_no = cmd_no;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
if (channel < 0) {
kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@@ -527,7 +543,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -543,6 +559,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
{
struct kvaser_cmd *cmd;
struct kvaser_usb *dev = priv->dev;
+ size_t cmd_len;
int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
@@ -550,14 +567,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
return -ENOMEM;
cmd->header.cmd_no = cmd_no;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd_async(priv, cmd,
- kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len);
if (err)
kfree(cmd);
@@ -701,6 +718,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
struct kvaser_cmd *cmd;
+ size_t cmd_len;
u32 value = 0;
u32 mask = 0;
u16 cap_cmd_res;
@@ -712,13 +730,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -812,6 +831,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
complete(&priv->flush_comp);
}
+static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct kvaser_usb_net_hydra_priv *hydra;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ hydra = priv->sub_priv;
+ if (!hydra)
+ return;
+
+ switch (hydra->pending_get_busparams_type) {
+ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN:
+ memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams,
+ sizeof(priv->busparams_nominal));
+ break;
+ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD:
+ memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams,
+ sizeof(priv->busparams_nominal));
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n",
+ hydra->pending_get_busparams_type);
+ break;
+ }
+ hydra->pending_get_busparams_type = -1;
+
+ complete(&priv->get_busparams_comp);
+}
+
static void
kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
u8 bus_status,
@@ -890,8 +942,10 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
stats = &netdev->stats;
stats->rx_packets++;
@@ -1045,8 +1099,10 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
shhwtstamps->hwtstamp = hwtstamp;
cf->can_id |= CAN_ERR_BUSERROR;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
@@ -1095,6 +1151,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_net_priv *priv;
unsigned long irq_flags;
bool one_shot_fail = false;
+ bool is_err_frame = false;
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
@@ -1113,10 +1170,13 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
one_shot_fail = true;
}
+
+ is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK &&
+ flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
}
context = &priv->tx_contexts[transid % dev->max_tx_urbs];
- if (!one_shot_fail) {
+ if (!one_shot_fail && !is_err_frame) {
struct net_device_stats *stats = &priv->netdev->stats;
stats->tx_packets++;
@@ -1290,6 +1350,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
kvaser_usb_hydra_state_event(dev, cmd);
break;
+ case CMD_GET_BUSPARAMS_RESP:
+ kvaser_usb_hydra_get_busparams_reply(dev, cmd);
+ break;
+
case CMD_ERROR_EVENT:
kvaser_usb_hydra_error_event(dev, cmd);
break;
@@ -1490,15 +1554,61 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
return err;
}
-static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ int busparams_type)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
+ struct kvaser_cmd *cmd;
+ size_t cmd_len;
+ int err;
+
+ if (!hydra)
+ return -EINVAL;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+ cmd->get_busparams_req.type = busparams_type;
+ hydra->pending_get_busparams_type = busparams_type;
+
+ reinit_completion(&priv->get_busparams_comp);
+
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv)
+{
+ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN);
+}
+
+static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv)
+{
+ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD);
+}
+
+static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
- int tseg1 = bt->prop_seg + bt->phase_seg1;
- int tseg2 = bt->phase_seg2;
- int sjw = bt->sjw;
+ size_t cmd_len;
int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@@ -1506,33 +1616,29 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
- cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
- cmd->set_busparams_req.sjw = (u8)sjw;
- cmd->set_busparams_req.tseg1 = (u8)tseg1;
- cmd->set_busparams_req.tseg2 = (u8)tseg2;
- cmd->set_busparams_req.nsamples = 1;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
+ sizeof(cmd->set_busparams_req.busparams_nominal));
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
return err;
}
-static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
struct kvaser_usb *dev = priv->dev;
- int tseg1 = dbt->prop_seg + dbt->phase_seg1;
- int tseg2 = dbt->phase_seg2;
- int sjw = dbt->sjw;
+ size_t cmd_len;
int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@@ -1540,11 +1646,9 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
- cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
- cmd->set_busparams_req.sjw_d = (u8)sjw;
- cmd->set_busparams_req.tseg1_d = (u8)tseg1;
- cmd->set_busparams_req.tseg2_d = (u8)tseg2;
- cmd->set_busparams_req.nsamples_d = 1;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ memcpy(&cmd->set_busparams_req.busparams_data, busparams,
+ sizeof(cmd->set_busparams_req.busparams_data));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1560,7 +1664,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
@@ -1651,6 +1755,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_hydra_priv *hydra;
+
+ hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL);
+ if (!hydra)
+ return -ENOMEM;
+
+ priv->sub_priv = hydra;
+
+ return 0;
+}
+
static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
@@ -1675,6 +1792,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
{
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
u32 flags;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
@@ -1684,6 +1802,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->sw_detail_req.use_ext_cmd = 1;
kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@@ -1691,7 +1810,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -1807,6 +1926,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
if ((priv->can.ctrlmode &
@@ -1822,6 +1942,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
@@ -1831,7 +1952,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
else
cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
return err;
@@ -1841,7 +1962,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->start_comp);
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
priv->channel);
@@ -1859,7 +1980,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
/* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
* see comment in kvaser_usb_hydra_update_state()
@@ -1882,7 +2003,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->flush_comp);
+ reinit_completion(&priv->flush_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
priv->channel);
@@ -1993,10 +2114,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
.dev_set_mode = kvaser_usb_hydra_set_mode,
.dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
+ .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams,
.dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
+ .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams,
.dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
.dev_init_card = kvaser_usb_hydra_init_card,
+ .dev_init_channel = kvaser_usb_hydra_init_channel,
.dev_get_software_info = kvaser_usb_hydra_get_software_info,
.dev_get_software_details = kvaser_usb_hydra_get_software_details,
.dev_get_card_info = kvaser_usb_hydra_get_card_info,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 0e0403dd0550..f06d63db9077 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -20,6 +20,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -55,6 +56,9 @@
#define CMD_RX_EXT_MESSAGE 14
#define CMD_TX_EXT_MESSAGE 15
#define CMD_SET_BUS_PARAMS 16
+#define CMD_GET_BUS_PARAMS 17
+#define CMD_GET_BUS_PARAMS_REPLY 18
+#define CMD_GET_CHIP_STATE 19
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_CTRL_MODE 21
#define CMD_RESET_CHIP 24
@@ -69,10 +73,13 @@
#define CMD_GET_CARD_INFO_REPLY 35
#define CMD_GET_SOFTWARE_INFO 38
#define CMD_GET_SOFTWARE_INFO_REPLY 39
+#define CMD_ERROR_EVENT 45
#define CMD_FLUSH_QUEUE 48
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
#define CMD_FLUSH_QUEUE_REPLY 68
+#define CMD_GET_CAPABILITIES_REQ 95
+#define CMD_GET_CAPABILITIES_RESP 96
#define CMD_LEAF_LOG_MESSAGE 106
@@ -82,6 +89,8 @@
#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12)
+
/* error factors */
#define M16C_EF_ACKE BIT(0)
#define M16C_EF_CRCE BIT(1)
@@ -156,11 +165,7 @@ struct usbcan_cmd_softinfo {
struct kvaser_cmd_busparams {
u8 tid;
u8 channel;
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 no_samp;
+ struct kvaser_usb_busparams busparams;
} __packed;
struct kvaser_cmd_tx_can {
@@ -229,7 +234,7 @@ struct kvaser_cmd_tx_acknowledge_header {
u8 tid;
} __packed;
-struct leaf_cmd_error_event {
+struct leaf_cmd_can_error_event {
u8 tid;
u8 flags;
__le16 time[3];
@@ -241,7 +246,7 @@ struct leaf_cmd_error_event {
u8 error_factor;
} __packed;
-struct usbcan_cmd_error_event {
+struct usbcan_cmd_can_error_event {
u8 tid;
u8 padding;
u8 tx_errors_count_ch0;
@@ -253,6 +258,28 @@ struct usbcan_cmd_error_event {
__le16 time;
} __packed;
+/* CMD_ERROR_EVENT error codes */
+#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8
+#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9
+
+struct leaf_cmd_error_event {
+ u8 tid;
+ u8 error_code;
+ __le16 timestamp[3];
+ __le16 padding;
+ __le16 info1;
+ __le16 info2;
+} __packed;
+
+struct usbcan_cmd_error_event {
+ u8 tid;
+ u8 error_code;
+ __le16 info1;
+ __le16 info2;
+ __le16 timestamp;
+ __le16 padding;
+} __packed;
+
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
@@ -277,6 +304,28 @@ struct leaf_cmd_log_message {
u8 data[8];
} __packed;
+/* Sub commands for cap_req and cap_res */
+#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02
+#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05
+struct kvaser_cmd_cap_req {
+ __le16 padding0;
+ __le16 cap_cmd;
+ __le16 padding1;
+ __le16 channel;
+} __packed;
+
+/* Status codes for cap_res */
+#define KVASER_USB_LEAF_CAP_STAT_OK 0x00
+#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01
+#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02
+struct kvaser_cmd_cap_res {
+ __le16 padding;
+ __le16 cap_cmd;
+ __le16 status;
+ __le32 mask;
+ __le32 value;
+} __packed;
+
struct kvaser_cmd {
u8 len;
u8 id;
@@ -292,14 +341,18 @@ struct kvaser_cmd {
struct leaf_cmd_softinfo softinfo;
struct leaf_cmd_rx_can rx_can;
struct leaf_cmd_chip_state_event chip_state_event;
- struct leaf_cmd_error_event error_event;
+ struct leaf_cmd_can_error_event can_error_event;
struct leaf_cmd_log_message log_message;
+ struct leaf_cmd_error_event error_event;
+ struct kvaser_cmd_cap_req cap_req;
+ struct kvaser_cmd_cap_res cap_res;
} __packed leaf;
union {
struct usbcan_cmd_softinfo softinfo;
struct usbcan_cmd_rx_can rx_can;
struct usbcan_cmd_chip_state_event chip_state_event;
+ struct usbcan_cmd_can_error_event can_error_event;
struct usbcan_cmd_error_event error_event;
} __packed usbcan;
@@ -309,6 +362,42 @@ struct kvaser_cmd {
} u;
} __packed;
+#define CMD_SIZE_ANY 0xff
+#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field)
+
+static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event),
+ [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res),
+ [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams),
+ [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+};
+
+static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
+ [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ /* ignored events: */
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+};
+
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
* handling. Some discrepancies between the two families exist:
*
@@ -332,6 +421,12 @@ struct kvaser_usb_err_summary {
};
};
+struct kvaser_usb_net_leaf_priv {
+ struct kvaser_usb_net_priv *net;
+
+ struct delayed_work chip_state_req_work;
+};
+
static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
.name = "kvaser_usb_ucii",
.tseg1_min = 4,
@@ -396,6 +491,43 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* buffer size >= cmd->len ensured by caller */
+ u8 min_size = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf))
+ min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id];
+ break;
+ case KVASER_USBCAN:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan))
+ min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id];
+ break;
+ }
+
+ if (min_size == CMD_SIZE_ANY)
+ return 0;
+
+ if (min_size) {
+ min_size += CMD_HEADER_LEN;
+ if (cmd->len >= min_size)
+ return 0;
+
+ dev_err_ratelimited(&dev->intf->dev,
+ "Received command %u too short (size %u, needed %u)",
+ cmd->id, cmd->len, min_size);
+ return -EIO;
+ }
+
+ dev_warn_ratelimited(&dev->intf->dev,
+ "Unhandled command (%d, size %d)\n",
+ cmd->id, cmd->len);
+ return -EINVAL;
+}
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *frame_len,
@@ -503,6 +635,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
end:
kfree(buf);
+ if (err == 0)
+ err = kvaser_usb_leaf_verify_size(dev, cmd);
+
return err;
}
@@ -535,6 +670,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
dev->fw_version = le32_to_cpu(softinfo->fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+ if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
+ dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP;
+
if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
/* Firmware expects bittiming parameters calculated for 16MHz
* clock, regardless of the actual clock
@@ -622,6 +760,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev,
+ u16 cap_cmd_req, u16 *status)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ struct kvaser_cmd *cmd;
+ u32 value = 0;
+ u32 mask = 0;
+ u16 cap_cmd_res;
+ int err;
+ int i;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_GET_CAPABILITIES_REQ;
+ cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req);
+
+ err = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ if (err)
+ goto end;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
+ if (err)
+ goto end;
+
+ *status = le16_to_cpu(cmd->u.leaf.cap_res.status);
+
+ if (*status != KVASER_USB_LEAF_CAP_STAT_OK)
+ goto end;
+
+ cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd);
+ switch (cap_cmd_res) {
+ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
+ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
+ value = le32_to_cpu(cmd->u.leaf.cap_res.value);
+ mask = le32_to_cpu(cmd->u.leaf.cap_res.mask);
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
+ cap_cmd_res);
+ break;
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (BIT(i) & (value & mask)) {
+ switch (cap_cmd_res) {
+ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_LISTENONLY;
+ break;
+ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
+ card_data->capabilities |=
+ KVASER_USB_CAP_BERR_CAP;
+ break;
+ }
+ }
+ }
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
+{
+ int err;
+ u16 status;
+
+ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
+ dev_info(&dev->intf->dev,
+ "No extended capability support. Upgrade device firmware.\n");
+ return 0;
+ }
+
+ err = kvaser_usb_leaf_get_single_capability(dev,
+ KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n",
+ status);
+
+ err = kvaser_usb_leaf_get_single_capability(dev,
+ KVASER_USB_LEAF_CAP_CMD_ERR_REPORT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n",
+ status);
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
+{
+ int err = 0;
+
+ if (dev->driver_info->family == KVASER_LEAF)
+ err = kvaser_usb_leaf_get_capabilities_leaf(dev);
+
+ return err;
+}
+
static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -650,7 +898,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
context = &priv->tx_contexts[tid % dev->max_tx_urbs];
/* Sometimes the state change doesn't come after a bus-off event */
- if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
struct sk_buff *skb;
struct can_frame *cf;
@@ -706,6 +954,16 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
return err;
}
+static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work)
+{
+ struct kvaser_usb_net_leaf_priv *leaf =
+ container_of(work, struct kvaser_usb_net_leaf_priv,
+ chip_state_req_work.work);
+ struct kvaser_usb_net_priv *priv = leaf->net;
+
+ kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE);
+}
+
static void
kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_err_summary *es,
@@ -724,20 +982,16 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
new_state = CAN_STATE_BUS_OFF;
} else if (es->status & M16C_STATE_BUS_PASSIVE) {
new_state = CAN_STATE_ERROR_PASSIVE;
- } else if (es->status & M16C_STATE_BUS_ERROR) {
+ } else if ((es->status & M16C_STATE_BUS_ERROR) &&
+ cur_state >= CAN_STATE_BUS_OFF) {
/* Guard against spurious error events after a busoff */
- if (cur_state < CAN_STATE_BUS_OFF) {
- if (es->txerr >= 128 || es->rxerr >= 128)
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if (es->txerr >= 96 || es->rxerr >= 96)
- new_state = CAN_STATE_ERROR_WARNING;
- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
- new_state = CAN_STATE_ERROR_ACTIVE;
- }
- }
-
- if (!es->status)
+ } else if (es->txerr >= 128 || es->rxerr >= 128) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (es->txerr >= 96 || es->rxerr >= 96) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
new_state = CAN_STATE_ERROR_ACTIVE;
+ }
if (new_state != cur_state) {
tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
@@ -747,7 +1001,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
}
if (priv->can.restart_ms &&
- cur_state >= CAN_STATE_BUS_OFF &&
+ cur_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
@@ -781,6 +1035,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
struct sk_buff *skb;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
+ struct kvaser_usb_net_leaf_priv *leaf;
enum can_state old_state, new_state;
if (es->channel >= dev->nchannels) {
@@ -790,8 +1045,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
priv = dev->nets[es->channel];
+ leaf = priv->sub_priv;
stats = &priv->netdev->stats;
+ /* Ignore e.g. state change to bus-off reported just after stopping */
+ if (!netif_running(priv->netdev))
+ return;
+
/* Update all of the CAN interface's state and error counters before
* trying any memory allocation that can actually fail with -ENOMEM.
*
@@ -806,6 +1066,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
new_state = priv->can.state;
+ /* If there are errors, request status updates periodically as we do
+ * not get automatic notifications of improved state.
+ */
+ if (new_state < CAN_STATE_BUS_OFF &&
+ (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE))
+ schedule_delayed_work(&leaf->chip_state_req_work,
+ msecs_to_jiffies(500));
+
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
@@ -823,7 +1091,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
+ old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
@@ -857,8 +1125,10 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
break;
}
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+ }
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
@@ -921,11 +1191,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
case CMD_CAN_ERROR_EVENT:
es.channel = 0;
- es.status = cmd->u.usbcan.error_event.status_ch0;
- es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
- es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
+ es.status = cmd->u.usbcan.can_error_event.status_ch0;
+ es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0;
+ es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0;
es.usbcan.other_ch_status =
- cmd->u.usbcan.error_event.status_ch1;
+ cmd->u.usbcan.can_error_event.status_ch1;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
/* The USBCAN firmware supports up to 2 channels.
@@ -933,13 +1203,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
*/
if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
es.channel = 1;
- es.status = cmd->u.usbcan.error_event.status_ch1;
+ es.status = cmd->u.usbcan.can_error_event.status_ch1;
es.txerr =
- cmd->u.usbcan.error_event.tx_errors_count_ch1;
+ cmd->u.usbcan.can_error_event.tx_errors_count_ch1;
es.rxerr =
- cmd->u.usbcan.error_event.rx_errors_count_ch1;
+ cmd->u.usbcan.can_error_event.rx_errors_count_ch1;
es.usbcan.other_ch_status =
- cmd->u.usbcan.error_event.status_ch0;
+ cmd->u.usbcan.can_error_event.status_ch0;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
}
break;
@@ -956,11 +1226,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
switch (cmd->id) {
case CMD_CAN_ERROR_EVENT:
- es.channel = cmd->u.leaf.error_event.channel;
- es.status = cmd->u.leaf.error_event.status;
- es.txerr = cmd->u.leaf.error_event.tx_errors_count;
- es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
- es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
+ es.channel = cmd->u.leaf.can_error_event.channel;
+ es.status = cmd->u.leaf.can_error_event.status;
+ es.txerr = cmd->u.leaf.can_error_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count;
+ es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor;
break;
case CMD_LEAF_LOG_MESSAGE:
es.channel = cmd->u.leaf.log_message.channel;
@@ -1092,6 +1362,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
netif_rx(skb);
}
+static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u16 info1 = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ info1 = le16_to_cpu(cmd->u.leaf.error_event.info1);
+ break;
+ case KVASER_USBCAN:
+ info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1);
+ break;
+ }
+
+ /* info1 will contain the offending cmd_no */
+ switch (info1) {
+ case CMD_SET_CTRL_MODE:
+ dev_warn(&dev->intf->dev,
+ "CMD_SET_CTRL_MODE error in parameter\n");
+ break;
+
+ case CMD_SET_BUS_PARAMS:
+ dev_warn(&dev->intf->dev,
+ "CMD_SET_BUS_PARAMS error in parameter\n");
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled parameter error event cmd_no (%u)\n",
+ info1);
+ break;
+ }
+}
+
+static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u8 error_code = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ error_code = cmd->u.leaf.error_event.error_code;
+ break;
+ case KVASER_USBCAN:
+ error_code = cmd->u.usbcan.error_event.error_code;
+ break;
+ }
+
+ switch (error_code) {
+ case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL:
+ /* Received additional CAN message, when firmware TX queue is
+ * already full. Something is wrong with the driver.
+ * This should never happen!
+ */
+ dev_err(&dev->intf->dev,
+ "Received error event TX_QUEUE_FULL\n");
+ break;
+ case KVASER_USB_LEAF_ERROR_EVENT_PARAM:
+ kvaser_usb_leaf_error_event_parameter(dev, cmd);
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled error event (%d)\n", error_code);
+ break;
+ }
+}
+
static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -1132,9 +1470,31 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
complete(&priv->stop_comp);
}
+static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.busparams.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams,
+ sizeof(priv->busparams_nominal));
+
+ complete(&priv->get_busparams_comp);
+}
+
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
+ if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
+ return;
+
switch (cmd->id) {
case CMD_START_CHIP_REPLY:
kvaser_usb_leaf_start_chip_reply(dev, cmd);
@@ -1167,6 +1527,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
kvaser_usb_leaf_tx_acknowledge(dev, cmd);
break;
+ case CMD_ERROR_EVENT:
+ kvaser_usb_leaf_error_event(dev, cmd);
+ break;
+
+ case CMD_GET_BUS_PARAMS_REPLY:
+ kvaser_usb_leaf_get_busparams_reply(dev, cmd);
+ break;
+
/* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
if (dev->driver_info->family != KVASER_USBCAN)
@@ -1247,7 +1615,7 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->start_comp);
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
priv->channel);
@@ -1263,9 +1631,12 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
+
+ cancel_delayed_work(&leaf->chip_state_req_work);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
priv->channel);
@@ -1313,10 +1684,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
return 0;
}
-static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_leaf_priv *leaf;
+
+ leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL);
+ if (!leaf)
+ return -ENOMEM;
+
+ leaf->net = priv;
+ INIT_DELAYED_WORK(&leaf->chip_state_req_work,
+ kvaser_usb_leaf_chip_state_req_work);
+
+ priv->sub_priv = leaf;
+
+ return 0;
+}
+
+static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
+
+ if (leaf)
+ cancel_delayed_work_sync(&leaf->chip_state_req_work);
+}
+
+static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
int rc;
@@ -1329,15 +1725,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
cmd->u.busparams.channel = priv->channel;
cmd->u.busparams.tid = 0xff;
- cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
- cmd->u.busparams.sjw = bt->sjw;
- cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
- cmd->u.busparams.tseg2 = bt->phase_seg2;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- cmd->u.busparams.no_samp = 3;
- else
- cmd->u.busparams.no_samp = 1;
+ memcpy(&cmd->u.busparams.busparams, busparams,
+ sizeof(cmd->u.busparams.busparams));
rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
@@ -1345,6 +1734,27 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
return rc;
}
+static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ if (priv->dev->driver_info->family == KVASER_USBCAN)
+ return -EOPNOTSUPP;
+
+ reinit_completion(&priv->get_busparams_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
enum can_mode mode)
{
@@ -1353,9 +1763,13 @@ static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
switch (mode) {
case CAN_MODE_START:
+ kvaser_usb_unlink_tx_urbs(priv);
+
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
default:
return -EOPNOTSUPP;
@@ -1402,14 +1816,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_set_mode = kvaser_usb_leaf_set_mode,
.dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
+ .dev_get_busparams = kvaser_usb_leaf_get_busparams,
.dev_set_data_bittiming = NULL,
+ .dev_get_data_busparams = NULL,
.dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
.dev_init_card = kvaser_usb_leaf_init_card,
+ .dev_init_channel = kvaser_usb_leaf_init_channel,
+ .dev_remove_channel = kvaser_usb_leaf_remove_channel,
.dev_get_software_info = kvaser_usb_leaf_get_software_info,
.dev_get_software_details = NULL,
.dev_get_card_info = kvaser_usb_leaf_get_card_info,
- .dev_get_capabilities = NULL,
+ .dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
.dev_start_chip = kvaser_usb_leaf_start_chip,
.dev_stop_chip = kvaser_usb_leaf_stop_chip,
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 957e51a77d4d..16fb4fc26518 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -47,6 +47,10 @@
#define MCBA_VER_REQ_USB 1
#define MCBA_VER_REQ_CAN 2
+/* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */
+#define MCBA_VER_TERMINATION_ON 0
+#define MCBA_VER_TERMINATION_OFF 1
+
#define MCBA_SIDL_EXID_MASK 0x8
#define MCBA_DLC_MASK 0xf
#define MCBA_DLC_RTR_MASK 0x40
@@ -469,7 +473,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv,
priv->usb_ka_first_pass = false;
}
- if (msg->termination_state)
+ if (msg->termination_state == MCBA_VER_TERMINATION_ON)
priv->can.termination = MCBA_TERMINATION_ENABLED;
else
priv->can.termination = MCBA_TERMINATION_DISABLED;
@@ -789,9 +793,9 @@ static int mcba_set_termination(struct net_device *netdev, u16 term)
};
if (term == MCBA_TERMINATION_ENABLED)
- usb_msg.termination = 1;
+ usb_msg.termination = MCBA_VER_TERMINATION_ON;
else
- usb_msg.termination = 0;
+ usb_msg.termination = MCBA_VER_TERMINATION_OFF;
mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg);
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index b514b2eaa318..89a94c16fc08 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -442,9 +442,10 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
if (rx_errors)
stats->rx_errors++;
-
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ if (priv->can.state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 282c53ef76d2..1bfede407270 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
nla_peer = data[VXCAN_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = rtnl_nla_parse_ifla(peer_tb,
- nla_data(nla_peer) +
- sizeof(struct ifinfomsg),
- nla_len(nla_peer) -
- sizeof(struct ifinfomsg),
- NULL);
+ err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
if (err < 0)
return err;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index be3811311db2..940e15b519e4 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -193,6 +193,8 @@ struct xcan_devtype_data {
* @bus_clk: Pointer to struct clk
* @can_clk: Pointer to struct clk
* @devtype: Device type specific constants
+ * @cfd: Variable to struct canfd_frame
+ * @is_canfd: For checking canfd or not
*/
struct xcan_priv {
struct can_priv can;
@@ -210,6 +212,8 @@ struct xcan_priv {
struct clk *bus_clk;
struct clk *can_clk;
struct xcan_devtype_data devtype;
+ struct canfd_frame cfd;
+ bool is_canfd;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -540,14 +544,13 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
/**
* xcan_write_frame - Write a frame to HW
* @priv: Driver private data structure
- * @skb: sk_buff pointer that contains data to be Txed
+ * @cf: canfd_frame pointer that contains data to be Txed
* @frame_offset: Register offset to write the frame to
*/
-static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
+static void xcan_write_frame(struct xcan_priv *priv, struct canfd_frame *cf,
int frame_offset)
{
u32 id, dlc, data[2] = {0, 0};
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u32 ramoff, dwindex = 0, i;
/* Watch carefully on the bit sequence */
@@ -578,7 +581,7 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
}
dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
- if (can_is_canfd_skb(skb)) {
+ if (priv->is_canfd) {
if (cf->flags & CANFD_BRS)
dlc |= XCAN_DLCR_BRS_MASK;
dlc |= XCAN_DLCR_EDL_MASK;
@@ -630,6 +633,9 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
unsigned long flags;
+ priv->cfd = *((struct canfd_frame *)skb->data);
+ priv->is_canfd = can_is_canfd_skb(skb);
+
/* Check if the TX buffer is full */
if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
XCAN_SR_TXFLL_MASK))
@@ -641,7 +647,7 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
priv->tx_head++;
- xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
+ xcan_write_frame(priv, &priv->cfd, XCAN_TXFIFO_OFFSET);
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
if (priv->tx_max > 1)
@@ -668,6 +674,9 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
unsigned long flags;
+ priv->cfd = *((struct canfd_frame *)skb->data);
+ priv->is_canfd = can_is_canfd_skb(skb);
+
if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
BIT(XCAN_TX_MAILBOX_IDX)))
return -ENOSPC;
@@ -678,7 +687,7 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
priv->tx_head++;
- xcan_write_frame(priv, skb,
+ xcan_write_frame(priv, &priv->cfd,
XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
/* Mark buffer as ready for transmit */
@@ -1776,7 +1785,8 @@ static int xcan_probe(struct platform_device *pdev)
priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
if (IS_ERR(priv->bus_clk)) {
- dev_err(&pdev->dev, "bus clock not found\n");
+ if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "bus clock not found\n");
ret = PTR_ERR(priv->bus_clk);
goto err_free;
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 825d840cdb8c..1458416f4f91 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1551,6 +1551,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
memset(&ent, 0, sizeof(ent));
ent.port = port;
+ ent.is_valid = is_valid;
ent.vid = vid;
ent.is_static = true;
memcpy(ent.mac, addr, ETH_ALEN);
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index c628d0980c0b..1d52cb3e46d5 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -215,6 +215,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
return 0;
}
+static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
+ u16 *value)
+{
+ return -EIO;
+}
+
+static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
+ u16 value)
+{
+ return -EIO;
+}
+
static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
@@ -226,6 +238,8 @@ static const struct b53_io_ops b53_mmap_ops = {
.write32 = b53_mmap_write32,
.write48 = b53_mmap_write48,
.write64 = b53_mmap_write64,
+ .phy_read16 = b53_mmap_phy_read16,
+ .phy_write16 = b53_mmap_phy_write16,
};
static int b53_mmap_probe(struct platform_device *pdev)
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 0df6c2b9484a..e99e38c6738e 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -329,6 +329,17 @@ static struct mdio_driver dsa_loop_drv = {
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+static void dsa_loop_phydevs_unregister(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ if (!IS_ERR(phydevs[i])) {
+ fixed_phy_unregister(phydevs[i]);
+ phy_device_free(phydevs[i]);
+ }
+}
+
static int __init dsa_loop_init(void)
{
struct fixed_phy_status status = {
@@ -336,23 +347,23 @@ static int __init dsa_loop_init(void)
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
};
- unsigned int i;
+ unsigned int i, ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
- return mdio_driver_register(&dsa_loop_drv);
+ ret = mdio_driver_register(&dsa_loop_drv);
+ if (ret)
+ dsa_loop_phydevs_unregister();
+
+ return ret;
}
module_init(dsa_loop_init);
static void __exit dsa_loop_exit(void)
{
- unsigned int i;
-
mdio_driver_unregister(&dsa_loop_drv);
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i]))
- fixed_phy_unregister(phydevs[i]);
+ dsa_loop_phydevs_unregister();
}
module_exit(dsa_loop_exit);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 4b9d5b0ce416..67f8faa22783 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -957,7 +957,7 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
{ .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", },
{ .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", },
{ .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", },
- { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", },
+ { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "RxShort", },
{ .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", },
{ .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", },
{ .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", },
@@ -1001,9 +1001,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
ret = lan9303_read_switch_port(
chip, port, lan9303_mib[u].offset, &reg);
- if (ret)
+ if (ret) {
dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
port, lan9303_mib[u].offset);
+ reg = 0;
+ }
data[u] = reg;
}
}
@@ -1185,8 +1187,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
- if (vid)
- return -EOPNOTSUPP;
return lan9303_alr_add_port(chip, addr, port, false);
}
@@ -1198,8 +1198,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
- if (vid)
- return -EOPNOTSUPP;
lan9303_alr_del_port(chip, addr, port);
return 0;
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index 9cbe80460b53..a5787dc370eb 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
reg <<= 2; /* reg num to offset */
- mutex_lock(&sw_dev->device->bus->mdio_lock);
+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
mutex_unlock(&sw_dev->device->bus->mdio_lock);
@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
reg <<= 2; /* reg num to offset */
- mutex_lock(&sw_dev->device->bus->mdio_lock);
+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
*val = lan9303_mdio_real_read(sw_dev->device, reg);
*val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
mutex_unlock(&sw_dev->device->bus->mdio_lock);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 0370e71ed6e0..ba2dc01e0f6b 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -682,10 +682,10 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
/* clear forwarding port */
- alu_table[2] &= ~BIT(port);
+ alu_table[1] &= ~BIT(port);
/* if there is no port to forward, clear table */
- if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+ if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
alu_table[0] = 0;
alu_table[1] = 0;
alu_table[2] = 0;
@@ -766,6 +766,9 @@ static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 2d8382eb9add..935004f5f5fe 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -397,9 +397,9 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
case PHY_INTERFACE_MODE_TRGMII:
trgint = 1;
if (priv->id == ID_MT7621) {
- /* PLL frequency: 150MHz: 1.2GBit */
+ /* PLL frequency: 125MHz: 1.0GBit */
if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0780;
+ ncpo1 = 0x0640;
if (xtal == HWTRAP_XTAL_25MHZ)
ncpo1 = 0x0a00;
} else { /* PLL frequency: 250MHz: 2.0Gbit */
@@ -644,7 +644,7 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
/* Set CPU port number */
- if (priv->id == ID_MT7621)
+ if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
/* CPU port gets connected to all user ports of
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 2a2489b5196d..51ef8f262ca9 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -117,6 +117,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index b336ed071fa8..c1655e595222 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2143,12 +2143,22 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
+ /* If the switch has just been reset and not yet completed
+ * loading EEPROM, the reset may interrupt the I2C transaction
+ * mid-byte, causing the first EEPROM read after the reset
+ * from the wrong location resulting in the switch booting
+ * to wrong mode and inoperable.
+ */
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
+
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- mv88e6xxx_g1_wait_eeprom_done(chip);
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
}
}
@@ -2433,9 +2443,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* If this is the upstream port for this switch, enable
* forwarding of unknown unicasts and multicasts.
*/
- reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
- MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+ reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ /* Forward any IPv4 IGMP or IPv6 MLD frames received
+ * by a USER port to the CPU port to allow snooping.
+ */
+ if (dsa_is_user_port(ds, port))
+ reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
+
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;
@@ -3871,6 +3886,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -5090,7 +5106,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
goto out;
}
if (chip->reset)
- usleep_range(1000, 2000);
+ usleep_range(10000, 20000);
err = mv88e6xxx_detect(chip);
if (err)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 938dd146629f..8a903624fdd7 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
-{
- const unsigned long timeout = jiffies + 1 * HZ;
- u16 val;
- int err;
-
- /* Wait up to 1 second for the switch to finish reading the
- * EEPROM.
- */
- while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
- if (err) {
- dev_err(chip->dev, "Error reading status");
- return;
- }
-
- /* If the switch is still resetting, it may not
- * respond on the bus, and so MDIO read returns
- * 0xffff. Differentiate between that, and waiting for
- * the EEPROM to be done by bit 0 being set.
- */
- if (val != 0xffff &&
- val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
- return;
-
- usleep_range(1000, 2000);
- }
-
- dev_err(chip->dev, "Timeout waiting for EEPROM done");
-}
-
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 08d66ef6aace..0ae96a1e919b 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -277,7 +277,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 6240976679e1..7674b0b8cc70 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -310,7 +310,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 42da4bca73e8..12807e52ecea 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -340,6 +340,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 614377ef7956..c7ff98c26ee3 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1108,6 +1108,8 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
vsc->chipid);
+ if (!vsc->gc.label)
+ return -ENOMEM;
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 2b2695311bda..aab26dbe76ff 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -196,6 +196,7 @@ static int tc589_probe(struct pcmcia_device *link)
{
struct el3_private *lp;
struct net_device *dev;
+ int ret;
dev_dbg(&link->dev, "3c589_attach()\n");
@@ -219,7 +220,15 @@ static int tc589_probe(struct pcmcia_device *link)
dev->ethtool_ops = &netdev_ethtool_ops;
- return tc589_config(link);
+ ret = tc589_config(link);
+ if (ret)
+ goto err_free_netdev;
+
+ return 0;
+
+err_free_netdev:
+ free_netdev(dev);
+ return ret;
}
static void tc589_detach(struct pcmcia_device *link)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 907904c0a288..19e2b838750c 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth)
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ dev_kfree_skb(skb);
goto cleanup;
}
greth->rx_skbuff[i] = skb;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 9225733f4fec..29700fee42e9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -375,7 +375,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
io_sq->bounce_buf_ctrl.next_to_use = 0;
size = io_sq->bounce_buf_ctrl.buffer_size *
- io_sq->bounce_buf_ctrl.buffers_num;
+ io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
@@ -702,8 +702,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- pr_err("illegal entry size %d\n",
- llq_info->desc_list_entry_size);
+ pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -2063,8 +2062,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head,
- dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 2e5348ec2a2e..8e2e0c3bee0b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2385,24 +2385,7 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- u16 qid;
- /* we suspect that this is good for in--kernel network services that
- * want to loop incoming skb rx to tx in normal user generated traffic,
- * most probably we will not get to this
- */
- if (skb_rx_queue_recorded(skb))
- qid = skb_get_rx_queue(skb);
- else
- qid = netdev_pick_tx(dev, skb, NULL);
-
- return qid;
-}
-
-static void ena_config_host_info(struct ena_com_dev *ena_dev,
- struct pci_dev *pdev)
+static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct ena_admin_host_info *host_info;
int rc;
@@ -2550,7 +2533,6 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_open = ena_open,
.ndo_stop = ena_close,
.ndo_start_xmit = ena_start_xmit,
- .ndo_select_queue = ena_select_queue,
.ndo_get_stats64 = ena_get_stats64,
.ndo_tx_timeout = ena_tx_timeout,
.ndo_change_mtu = ena_change_mtu,
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index d3d44e07afbc..414b990827e8 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -825,7 +825,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb( skb );
+ dev_consume_skb_irq(skb);
lp->cur_tx++;
while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
lp->cur_tx -= TX_RING_SIZE;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index f90b454b1642..7ba3da856105 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -997,7 +997,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
lp->tx_ring[entry].base =
((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
- dev_kfree_skb(skb);
+ dev_consume_skb_irq(skb);
} else {
lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 9c152d85840d..c9d2a6f15062 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -652,7 +652,7 @@ static int nmclan_config(struct pcmcia_device *link)
} else {
pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
sig[0], sig[1]);
- return -ENODEV;
+ goto failed;
}
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index d5fd49dd25f3..decc1c09a031 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
}
+static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+
+ /* From MAC ver 30H the TFCR is per priority, instead of per queue */
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
+ return max_q_count;
+ else
+ return min_t(unsigned int, pdata->tx_q_count, max_q_count);
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
- unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
- unsigned int i;
+ unsigned int i, q_count;
/* Clear MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
/* Clear MAC flow control */
- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
struct ieee_pfc *pfc = pdata->pfc;
struct ieee_ets *ets = pdata->ets;
- unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
- unsigned int i;
+ unsigned int i, q_count;
/* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) {
@@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
}
/* Set MAC flow control */
- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 0442d7e1cd20..504fbd43be7d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_channel *channel;
+ unsigned int i;
queue_work(pdata->dev_workqueue, &pdata->service_work);
mod_timer(&pdata->service_timer, jiffies + HZ);
+
+ if (!pdata->tx_usecs)
+ return;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring || channel->tx_timer_active)
+ break;
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
}
static void xgbe_init_timers(struct xgbe_prv_data *pdata)
@@ -1139,6 +1153,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ tasklet_kill(&pdata->tasklet_dev);
+ tasklet_kill(&pdata->tasklet_ecc);
+
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index a880f10e3e70..d74f45ce0686 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = pdata->phy.address;
- cmd->base.autoneg = pdata->phy.autoneg;
- cmd->base.speed = pdata->phy.speed;
- cmd->base.duplex = pdata->phy.duplex;
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = pdata->phy.speed;
+ cmd->base.duplex = pdata->phy.duplex;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.autoneg = pdata->phy.autoneg;
cmd->base.port = PORT_NONE;
XGBE_LM_COPY(cmd, supported, lks, supported);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 4d9062d35930..530043742a07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
xgbe_i2c_disable(pdata);
xgbe_i2c_clear_all_interrupts(pdata);
- if (pdata->dev_irq != pdata->i2c_irq)
+ if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
+ tasklet_kill(&pdata->tasklet_i2c);
+ }
}
static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 156a0bc8ab01..0e552022e659 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
reg |= XGBE_KR_TRAINING_ENABLE;
reg |= XGBE_KR_TRAINING_START;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+ pdata->kr_start_time = jiffies;
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
@@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
xgbe_switch_mode(pdata);
+ pdata->an_result = XGBE_AN_READY;
+
xgbe_an_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
@@ -1175,7 +1178,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
if (pdata->phy.duplex != DUPLEX_FULL)
return -EINVAL;
- xgbe_set_mode(pdata, mode);
+ /* Force the mode change for SFI in Fixed PHY config.
+ * Fixed PHY configs needs PLL to be enabled while doing mode set.
+ * When the SFP module isn't connected during boot, driver assumes
+ * AN is ON and attempts autonegotiation. However, if the connected
+ * SFP comes up in Fixed PHY config, the link will not come up as
+ * PLL isn't enabled while the initial mode set command is issued.
+ * So, force the mode change for SFI in Fixed PHY configuration to
+ * fix link issues.
+ */
+ if (mode == XGBE_MODE_SFI)
+ xgbe_change_mode(pdata, mode);
+ else
+ xgbe_set_mode(pdata, mode);
return 0;
}
@@ -1275,9 +1290,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
{
unsigned long link_timeout;
+ unsigned long kr_time;
+ int wait;
link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
if (time_after(jiffies, link_timeout)) {
+ if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
+ pdata->phy.autoneg == AUTONEG_ENABLE) {
+ /* AN restart should not happen while KR training is in progress.
+ * The while loop ensures no AN restart during KR training,
+ * waits up to 500ms and AN restart is triggered only if KR
+ * training is failed.
+ */
+ wait = XGBE_KR_TRAINING_WAIT_ITER;
+ while (wait--) {
+ kr_time = pdata->kr_start_time +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, kr_time))
+ break;
+ /* AN restart is not required, if AN result is COMPLETE */
+ if (pdata->an_result == XGBE_AN_COMPLETE)
+ return;
+ usleep_range(10000, 11000);
+ }
+ }
netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
xgbe_phy_config_aneg(pdata);
}
@@ -1288,7 +1324,7 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
return pdata->phy_if.phy_impl.an_outcome(pdata);
}
-static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
enum xgbe_mode mode;
@@ -1323,8 +1359,13 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
- if (xgbe_set_mode(pdata, mode) && pdata->an_again)
+ if (!xgbe_set_mode(pdata, mode))
+ return false;
+
+ if (pdata->an_again)
xgbe_phy_reconfig_aneg(pdata);
+
+ return true;
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1354,7 +1395,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
return;
}
- xgbe_phy_status_result(pdata);
+ if (xgbe_phy_status_result(pdata))
+ return;
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
@@ -1390,8 +1432,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation */
xgbe_an_disable_all(pdata);
- if (pdata->dev_irq != pdata->an_irq)
+ if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ tasklet_kill(&pdata->tasklet_an);
+ }
pdata->phy_if.phy_impl.stop(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 0b325ae875b5..0a15c617c702 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -188,6 +188,7 @@ enum xgbe_sfp_cable {
XGBE_SFP_CABLE_UNKNOWN = 0,
XGBE_SFP_CABLE_ACTIVE,
XGBE_SFP_CABLE_PASSIVE,
+ XGBE_SFP_CABLE_FIBER,
};
enum xgbe_sfp_base {
@@ -235,9 +236,7 @@ enum xgbe_sfp_speed {
#define XGBE_SFP_BASE_BR 12
#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
-#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
-#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
#define XGBE_SFP_BASE_CU_CABLE_LEN 18
@@ -283,6 +282,8 @@ struct xgbe_sfp_eeprom {
#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+#define XGBE_MOLEX_VENDOR "Molex Inc. "
+
struct xgbe_sfp_ascii {
union {
char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
@@ -822,25 +823,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
enum xgbe_sfp_speed sfp_speed)
{
- u8 *sfp_base, min, max;
+ u8 *sfp_base, min;
sfp_base = sfp_eeprom->base;
switch (sfp_speed) {
case XGBE_SFP_SPEED_1000:
min = XGBE_SFP_BASE_BR_1GBE_MIN;
- max = XGBE_SFP_BASE_BR_1GBE_MAX;
break;
case XGBE_SFP_SPEED_10000:
min = XGBE_SFP_BASE_BR_10GBE_MIN;
- max = XGBE_SFP_BASE_BR_10GBE_MAX;
break;
default:
return false;
}
- return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
- (sfp_base[XGBE_SFP_BASE_BR] <= max));
+ return sfp_base[XGBE_SFP_BASE_BR] >= min;
}
static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
@@ -1141,16 +1139,21 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
- /* Assume ACTIVE cable unless told it is PASSIVE */
+ /* Assume FIBER cable unless told otherwise */
if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
- } else {
+ } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) {
phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
+ } else {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER;
}
/* Determine the type of SFP */
- if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
@@ -1166,9 +1169,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
- else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
- xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
- phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 0c93a552b921..729307a96c50 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -290,6 +290,7 @@
/* Auto-negotiation */
#define XGBE_AN_MS_TIMEOUT 500
#define XGBE_LINK_TIMEOUT 5
+#define XGBE_KR_TRAINING_WAIT_ITER 50
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
@@ -1266,6 +1267,7 @@ struct xgbe_prv_data {
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
+ unsigned long kr_start_time;
enum xgbe_an_mode an_mode;
/* I2C support */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index ce4e617a6ec4..29fcc4deb4da 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1004,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev)
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
- if (ret)
+ if (ret) {
+ xgene_enet_napi_disable(pdata);
return ret;
+ }
if (ndev->phydev) {
phy_start(ndev->phydev);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 18f4923b1723..6a253f81c555 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -18,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/prefetch.h>
@@ -26,7 +27,6 @@
#include "xgene_enet_hw.h"
#include "xgene_enet_cle.h"
#include "xgene_enet_ring2.h"
-#include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0"
#define ETHER_MIN_PACKET 64
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 3e3711b60d01..11d9884eb14d 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1511,7 +1511,7 @@ static void bmac_tx_timeout(struct timer_list *t)
i = bp->tx_empty;
++dev->stats.tx_errors;
if (i != bp->tx_fill) {
- dev_kfree_skb(bp->tx_bufs[i]);
+ dev_kfree_skb_irq(bp->tx_bufs[i]);
bp->tx_bufs[i] = NULL;
if (++i >= N_TX_RING) i = 0;
bp->tx_empty = i;
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index b8ba2abf5b3a..65ed373d04f5 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -841,7 +841,7 @@ static void mace_tx_timeout(struct timer_list *t)
if (mp->tx_bad_runt) {
mp->tx_bad_runt = 0;
} else if (i != mp->tx_fill) {
- dev_kfree_skb(mp->tx_bufs[i]);
+ dev_kfree_skb_irq(mp->tx_bufs[i]);
if (++i >= N_TX_RING)
i = 0;
mp->tx_empty = i;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 145334fb18f4..4eeafd529385 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -903,7 +903,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
default:
err = -1;
goto err_exit;
- break;
}
if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
err = -1;
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index 2f4eabf652e8..51e5aa2c74b3 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -281,9 +281,8 @@ static void alx_get_ethtool_stats(struct net_device *netdev,
spin_lock(&alx->stats_lock);
alx_update_hw_stats(hw);
- BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
- ALX_NUM_STATS * sizeof(u64));
- memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
+ BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64));
+ memcpy(data, &hw->stats, sizeof(hw->stats));
spin_unlock(&alx->stats_lock);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2b239ecea05f..ff28dbf51574 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1989,8 +1989,11 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
- if (real_len < skb->len)
- pskb_trim(skb, real_len);
+ if (real_len < skb->len) {
+ err = pskb_trim(skb, real_len);
+ if (err)
+ return err;
+ }
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 4f7b65825c15..171f4ffcd869 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -868,10 +868,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
offset, adapter->ring_size);
err = -1;
- goto failed;
+ goto free_buffer;
}
return 0;
+free_buffer:
+ kfree(tx_ring->tx_buffer);
+ tx_ring->tx_buffer = NULL;
failed:
if (adapter->ring_vir_addr != NULL) {
pci_free_consistent(pdev, adapter->ring_size,
@@ -1638,8 +1641,11 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
- if (real_len < skb->len)
- pskb_trim(skb, real_len);
+ if (real_len < skb->len) {
+ err = pskb_trim(skb, real_len);
+ if (err)
+ return err;
+ }
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 2d52754afc33..7d844b0ea0da 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -228,12 +228,12 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
- ci->pkg == BCMA_PKG_ID_BCM47186) {
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (ci->pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
case BCMA_CHIP_ID_BCM53573:
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 148734b166f0..1148370e2432 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
if (iost & BGMAC_BCMA_IOST_ATTACHED) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
+ if (bgmac->in_init || !bgmac->has_robosw)
flags |= BGMAC_BCMA_IOCTL_SW_RESET;
}
bgmac_clk_enable(bgmac, flags);
}
- if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
bgmac_idm_write(bgmac, BCMA_IOCTL,
bgmac_idm_read(bgmac, BCMA_IOCTL) &
~BGMAC_BCMA_IOCTL_SW_RESET);
@@ -1447,7 +1447,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
int err;
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (!phy_dev || IS_ERR(phy_dev)) {
+ if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
}
@@ -1489,6 +1489,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
struct net_device *net_dev = bgmac->net_dev;
int err;
+ bgmac->in_init = true;
+
bgmac_chip_intrs_off(bgmac);
net_dev->irq = bgmac->irq;
@@ -1538,6 +1540,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
net_dev->hw_features = net_dev->features;
net_dev->vlan_features = net_dev->features;
+ bgmac->in_init = false;
+
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
@@ -1564,7 +1568,6 @@ void bgmac_enet_remove(struct bgmac *bgmac)
phy_disconnect(bgmac->net_dev->phydev);
netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
- free_netdev(bgmac->net_dev);
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 40d02fec2747..76930b8353d6 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -511,6 +511,8 @@ struct bgmac {
int irq;
u32 int_mask;
+ bool in_init;
+
/* Current MAC state */
int mac_speed;
int mac_duplex;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index c3f67d8e1093..2cb347120852 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1461,7 +1461,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
static void
bnx2_enable_forced_2g5(struct bnx2 *bp)
{
- u32 uninitialized_var(bmcr);
+ u32 bmcr;
int err;
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
@@ -1505,7 +1505,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
static void
bnx2_disable_forced_2g5(struct bnx2 *bp)
{
- u32 uninitialized_var(bmcr);
+ u32 bmcr;
int err;
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9af8afd7ae89..d8e13ee0601f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -787,6 +787,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
return;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3f63ffd7561b..d17731cae863 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1004,9 +1004,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
struct bnx2x_alloc_pool *pool)
{
- if (!pool->page)
- return;
-
put_page(pool->page);
pool->page = NULL;
@@ -1017,6 +1014,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
+ if (!fp->page_pool.page)
+ return;
+
if (fp->mode == TPA_MODE_DISABLED)
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 066765fbef06..0a59a09ef82f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -296,7 +296,6 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
* possible, the driver should only write the valid vnics into the internal
* ram according to the appropriate port mode.
*/
-#define BITS_TO_BYTES(x) ((x)/8)
/* CMNG constants, as derived from system spec calculations */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b5f58c62e7d2..211fbc8f7571 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14426,11 +14426,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
- if (netif_running(dev))
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (netif_running(dev)) {
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n");
+ goto done;
+ }
+ }
netif_device_attach(dev);
+done:
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 4630998d47fd..d920bb8dae77 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -807,16 +807,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
{
- struct pci_dev *dev;
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ struct pci_dev *dev;
+ bool pending;
if (!vf)
return false;
dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
- if (dev)
- return bnx2x_is_pcie_pending(dev);
- return false;
+ if (!dev)
+ return false;
+ pending = bnx2x_is_pcie_pending(dev);
+ pci_dev_put(dev);
+
+ return pending;
}
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5a7d5e7f3b23..2a12a4144261 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -221,12 +221,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
- { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -2747,7 +2747,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -2755,8 +2755,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
kfree(rxr->rx_tpa_idx_map);
rxr->rx_tpa_idx_map = NULL;
if (rxr->rx_tpa) {
- kfree(rxr->rx_tpa[0].agg_arr);
- rxr->rx_tpa[0].agg_arr = NULL;
+ for (j = 0; j < bp->max_tpa; j++) {
+ kfree(rxr->rx_tpa[j].agg_arr);
+ rxr->rx_tpa[j].agg_arr = NULL;
+ }
}
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2765,14 +2767,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j, total_aggs = 0;
+ int i, j;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (!bp->max_tpa_v2)
return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
- total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -2786,12 +2787,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
continue;
- agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
- rxr->rx_tpa[0].agg_arr = agg;
- if (!agg)
- return -ENOMEM;
- for (j = 1; j < bp->max_tpa; j++)
- rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+ for (j = 0; j < bp->max_tpa; j++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[j].agg_arr = agg;
+ }
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
GFP_KERNEL);
if (!rxr->rx_tpa_idx_map)
@@ -7780,6 +7781,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
goto err_out;
}
+ if (BNXT_VF(bp))
+ bnxt_hwrm_func_qcfg(bp);
+
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
@@ -8205,10 +8209,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
}
- if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
+ bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (bp->tx_nr_rings_xdp)
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
+ else
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
return 0;
@@ -10332,6 +10340,8 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
+ if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+ netdev_info(bp->dev, "Receive PF driver unload event!\n");
if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_tunnel_dst_port_alloc(
bp, bp->vxlan_port,
@@ -11182,8 +11192,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
if (bnxt_fltr_match(fltr, new_fltr)) {
+ rc = fltr->sw_id;
rcu_read_unlock();
- rc = 0;
goto err_free;
}
}
@@ -11258,8 +11268,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
}
- if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
- netdev_info(bp->dev, "Receive PF driver unload event!");
}
#else
@@ -12232,8 +12240,16 @@ static struct pci_driver bnxt_pci_driver = {
static int __init bnxt_init(void)
{
+ int err;
+
bnxt_debug_init();
- return pci_register_driver(&bnxt_pci_driver);
+ err = pci_register_driver(&bnxt_pci_driver);
+ if (err) {
+ bnxt_debug_exit();
+ return err;
+ }
+
+ return 0;
}
static void __exit bnxt_exit(void)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index d74c6a34b936..24282a426481 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -124,7 +124,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
reset_coalesce:
- if (netif_running(dev)) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
if (!rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 452be9749827..3434ad6824a0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -597,7 +597,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index e03e2bfcc6a1..380bf7a328ba 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1018,7 +1018,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
}
}
-static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+ bool tx_lpi_enabled)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
@@ -1038,7 +1039,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
/* Enable EEE and switch to a 27Mhz clock automatically */
reg = bcmgenet_readl(priv->base + off);
- if (enable)
+ if (tx_lpi_enabled)
reg |= TBUF_EEE_EN | TBUF_PM_EN;
else
reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
@@ -1059,6 +1060,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
priv->eee.eee_enabled = enable;
priv->eee.eee_active = enable;
+ priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -1074,6 +1076,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
e->eee_enabled = p->eee_enabled;
e->eee_active = p->eee_active;
+ e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
@@ -1083,7 +1086,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct ethtool_eee *p = &priv->eee;
- int ret = 0;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1094,16 +1096,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
p->eee_enabled = e->eee_enabled;
if (!p->eee_enabled) {
- bcmgenet_eee_enable_set(dev, false);
+ bcmgenet_eee_enable_set(dev, false, false);
} else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(priv, hw, dev, "EEE initialization failed\n");
- return ret;
- }
-
+ p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, true);
+ bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
@@ -1648,8 +1645,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
/* Note: if we ever change from DMA_TX_APPEND_CRC below we
* will need to restore software padding of "runt" packets
*/
+ len_stat |= DMA_TX_APPEND_CRC;
+
if (!i) {
- len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+ len_stat |= DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
len_stat |= DMA_TX_DO_CSUM;
}
@@ -1823,6 +1822,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
__func__, p_index, ring->c_index,
ring->read_ptr, dma_length_status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, dev, "oversized packet\n");
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
@@ -2965,7 +2972,7 @@ err_clk_disable:
return ret;
}
-static void bcmgenet_netif_stop(struct net_device *dev)
+static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2980,7 +2987,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- phy_stop(dev->phydev);
+ if (stop_phy)
+ phy_stop(dev->phydev);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3006,7 +3014,7 @@ static int bcmgenet_close(struct net_device *dev)
netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
- bcmgenet_netif_stop(dev);
+ bcmgenet_netif_stop(dev, false);
/* Really kill the PHY state machine and disconnect from it */
phy_disconnect(dev->phydev);
@@ -3677,9 +3685,6 @@ static int bcmgenet_resume(struct device *d)
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
- if (priv->eee.eee_enabled)
- bcmgenet_eee_enable_set(dev, true);
-
bcmgenet_netif_start(dev);
netif_device_attach(dev);
@@ -3704,7 +3709,7 @@ static int bcmgenet_suspend(struct device *d)
netif_device_detach(dev);
- bcmgenet_netif_stop(dev);
+ bcmgenet_netif_stop(dev, true);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 5b7c2f9241d0..29bf256d13f6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -736,4 +736,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+ bool tx_lpi_enabled);
+
#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index ce569b7d3b35..026f00ccaa0c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -25,6 +25,7 @@
#include "bcmgenet.h"
+
/* setup netdev link state when PHY link status change and
* update UMAC and RGMII block when link up
*/
@@ -96,6 +97,11 @@ void bcmgenet_mii_setup(struct net_device *dev)
CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ bcmgenet_eee_enable_set(dev,
+ priv->eee.eee_enabled && priv->eee.eee_active,
+ priv->eee.tx_lpi_enabled);
} else {
/* done if nothing has changed */
if (!status_changed)
@@ -565,7 +571,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
};
phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (!phydev || IS_ERR(phydev)) {
+ if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
}
@@ -618,5 +624,7 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
+ clk_prepare_enable(priv->clk);
platform_device_unregister(priv->mii_pdev);
+ clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 70bd79dc43f2..4847441cf161 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -230,6 +230,7 @@ MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FIRMWARE_TG3);
+MODULE_FIRMWARE(FIRMWARE_TG357766);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
@@ -6459,6 +6460,14 @@ static void tg3_dump_state(struct tg3 *tp)
int i;
u32 *regs;
+ /* If it is a PCI error, all registers will be 0xffff,
+ * we don't dump them out, just report the error and return
+ */
+ if (tp->pdev->error_state != pci_channel_io_normal) {
+ netdev_err(tp->dev, "PCI channel ERROR!\n");
+ return;
+ }
+
regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
if (!regs)
return;
@@ -6869,7 +6878,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
desc_idx, *post_ptr);
drop_it_no_recycle:
/* Other statistics kept track of by card. */
- tp->rx_dropped++;
+ tnapi->rx_dropped++;
goto next_pkt;
}
@@ -7895,8 +7904,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
segs = skb_gso_segment(skb, tp->dev->features &
~(NETIF_F_TSO | NETIF_F_TSO6));
- if (IS_ERR(segs) || !segs)
+ if (IS_ERR(segs) || !segs) {
+ tnapi->tx_dropped++;
goto tg3_tso_bug_end;
+ }
do {
nskb = segs;
@@ -8168,7 +8179,7 @@ dma_error:
drop:
dev_kfree_skb_any(skb);
drop_nofree:
- tp->tx_dropped++;
+ tnapi->tx_dropped++;
return NETDEV_TX_OK;
}
@@ -9347,7 +9358,7 @@ static void __tg3_set_rx_mode(struct net_device *);
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
{
- int err;
+ int err, i;
tg3_stop_fw(tp);
@@ -9368,6 +9379,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
/* And make sure the next sample is new data */
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+ for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->rx_dropped = 0;
+ tnapi->tx_dropped = 0;
+ }
}
return err;
@@ -11195,7 +11213,8 @@ static void tg3_reset_task(struct work_struct *work)
rtnl_lock();
tg3_full_lock(tp, 0);
- if (!netif_running(tp->dev)) {
+ if (tp->pcierr_recovery || !netif_running(tp->dev) ||
+ tp->pdev->error_state != pci_channel_io_normal) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
rtnl_unlock();
@@ -11924,6 +11943,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
{
struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ int i;
stats->rx_packets = old_stats->rx_packets +
get_stat64(&hw_stats->rx_ucast_packets) +
@@ -11970,8 +11992,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = old_stats->rx_missed_errors +
get_stat64(&hw_stats->rx_discards);
- stats->rx_dropped = tp->rx_dropped;
- stats->tx_dropped = tp->tx_dropped;
+ /* Aggregate per-queue counters. The per-queue counters are updated
+ * by a single writer, race-free. The result computed by this loop
+ * might not be 100% accurate (counters can be updated in the middle of
+ * the loop) but the next tg3_get_nstats() will recompute the current
+ * value so it is acceptable.
+ *
+ * Note that these counters wrap around at 4G on 32bit machines.
+ */
+ rx_dropped = (unsigned long)(old_stats->rx_dropped);
+ tx_dropped = (unsigned long)(old_stats->tx_dropped);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ rx_dropped += tnapi->rx_dropped;
+ tx_dropped += tnapi->tx_dropped;
+ }
+
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
}
static int tg3_get_regs_len(struct net_device *dev)
@@ -18154,7 +18194,10 @@ static void tg3_shutdown(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
@@ -18164,6 +18207,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
tg3_power_down(tp);
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
@@ -18183,6 +18228,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
netdev_info(netdev, "PCI I/O error detected\n");
+ /* Want to make sure that the reset task doesn't run */
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
/* Could be second call or maybe we don't have netdev yet */
@@ -18199,9 +18247,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
tg3_timer_stop(tp);
- /* Want to make sure that the reset task doesn't run */
- tg3_reset_task_cancel(tp);
-
netif_device_detach(netdev);
/* Clean up software state, even if MMIO is blocked */
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 6953d0546acb..811627abde5c 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3018,6 +3018,7 @@ struct tg3_napi {
u16 *rx_rcb_prod_idx;
struct tg3_rx_prodring_set prodring;
struct tg3_rx_buffer_desc *rx_rcb;
+ unsigned long rx_dropped;
u32 tx_prod ____cacheline_aligned;
u32 tx_cons;
@@ -3026,6 +3027,7 @@ struct tg3_napi {
u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
struct tg3_tx_ring_info *tx_buffers;
+ unsigned long tx_dropped;
dma_addr_t status_mapping;
dma_addr_t rx_rcb_mapping;
@@ -3219,8 +3221,6 @@ struct tg3 {
/* begin "everything else" cacheline(s) section */
- unsigned long rx_dropped;
- unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
struct tg3_ethtool_stats estats_prev;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 03983bd46eef..96f65244085b 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -79,6 +79,7 @@
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_PBUFRXCUT 0x0044 /* RX Partial Store and Forward */
#define GEM_JML 0x0048 /* Jumbo Max Length */
#define GEM_HRB 0x0080 /* Hash Bottom */
#define GEM_HRT 0x0084 /* Hash Top */
@@ -90,6 +91,9 @@
#define GEM_SA3T 0x009C /* Specific3 Top */
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
+#define GEM_WOL 0x00B8 /* Wake on LAN */
+#define GEM_RXPTPUNI 0x00D4 /* PTP RX Unicast address */
+#define GEM_TXPTPUNI 0x00D8 /* PTP TX Unicast address */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@@ -155,6 +159,7 @@
#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */
#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */
#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */
+#define GEM_PCSCNTRL 0x0200 /* PCS Control */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -227,6 +232,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15
+#define MACB_PTPUNI_OFFSET 20
+#define MACB_PTPUNI_SIZE 1
#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
@@ -314,6 +321,11 @@
#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */
#define GEM_ADDR64_SIZE 1
+/* Bitfields in PBUFRXCUT */
+#define GEM_WTRMRK_OFFSET 0 /* Watermark value offset */
+#define GEM_WTRMRK_SIZE 12
+#define GEM_ENCUTTHRU_OFFSET 31 /* Enable RX partial store and forward */
+#define GEM_ENCUTTHRU_SIZE 1
/* Bitfields in NSR */
#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */
@@ -376,7 +388,7 @@
#define MACB_PFR_SIZE 1
#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */
#define MACB_PTZ_SIZE 1
-#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */
+#define MACB_WOL_OFFSET 28 /* Enable WOL received interrupt */
#define MACB_WOL_SIZE 1
#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */
#define MACB_DRQFR_SIZE 1
@@ -455,6 +467,10 @@
#define MACB_REV_OFFSET 0
#define MACB_REV_SIZE 16
+/* Bitfields in PCSCNTRL */
+#define GEM_PCSAUTONEG_OFFSET 12
+#define GEM_PCSAUTONEG_SIZE 1
+
/* Bitfields in DCFG1. */
#define GEM_IRQCOR_OFFSET 23
#define GEM_IRQCOR_SIZE 1
@@ -467,7 +483,6 @@
#define GEM_TX_PKT_BUFF_OFFSET 21
#define GEM_TX_PKT_BUFF_SIZE 1
-
/* Bitfields in DCFG5. */
#define GEM_TSU_OFFSET 8
#define GEM_TSU_SIZE 1
@@ -568,6 +583,9 @@
#define GEM_T2OFST_OFFSET 0 /* offset value */
#define GEM_T2OFST_SIZE 7
+/* Bitfields in queue pointer registers */
+#define GEM_RBQP_DISABLE 0x1
+
/* Offset for screener type 2 compare values (T2CMPOFST).
* Note the offset is applied after the specified point,
* e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
@@ -644,7 +662,12 @@
#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
+#define MACB_CAPS_PCS 0x00000400
+#define MACB_CAPS_PARTIAL_STORE_FORWARD 0x00000800
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_WOL 0x00000200
+#define MACB_CAPS_NEED_TSUCLK 0x00001000
+#define MACB_CAPS_QUEUE_DISABLE 0x00002000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1160,6 +1183,7 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
+ struct macb_dma_desc *rx_ring_tieoff;
size_t rx_buffer_size;
unsigned int rx_ring_size;
@@ -1182,6 +1206,8 @@ struct macb {
struct gem_stats gem;
} hw_stats;
+ dma_addr_t rx_ring_tieoff_dma;
+
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
@@ -1208,6 +1234,9 @@ struct macb {
u32 wol;
+ /* holds value of rx watermark value for pbuf_rxcutthru register */
+ u16 rx_watermark;
+
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
#ifdef MACB_EXT_DESC
uint8_t hw_dma_cap;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 78219a9943a7..846c2c71b13a 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -36,6 +36,8 @@
#include <linux/tcp.h>
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
+#include <linux/crc32.h>
+#include <linux/inetdevice.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -82,13 +84,11 @@ struct sifive_fu540_macb_mgmt {
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
-#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
-#define MACB_WOL_ENABLED (0x1 << 1)
-
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
#define MACB_HALT_TIMEOUT 1230
+#define MACB_PM_TIMEOUT 100 /* ms */
#define MACB_PM_TIMEOUT 100 /* ms */
@@ -282,6 +282,9 @@ static void macb_set_hwaddr(struct macb *bp)
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
+ gem_writel(bp, RXPTPUNI, bottom);
+ gem_writel(bp, TXPTPUNI, bottom);
+
/* Clear unused address register sets */
macb_or_gem_writel(bp, SA2B, 0);
macb_or_gem_writel(bp, SA2T, 0);
@@ -583,8 +586,8 @@ static int macb_mii_probe(struct net_device *dev)
static int macb_mii_init(struct macb *bp)
{
- struct device_node *np;
- int err = -ENXIO;
+ struct device_node *np, *mdio_np;
+ int err = -ENXIO, i;
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -601,19 +604,39 @@ static int macb_mii_init(struct macb *bp)
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
- bp->mii_bus->parent = &bp->pdev->dev;
+ bp->mii_bus->parent = &bp->dev->dev;
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
- if (np && of_phy_is_fixed_link(np)) {
- if (of_phy_register_fixed_link(np) < 0) {
- dev_err(&bp->pdev->dev,
- "broken fixed-link specification %pOF\n", np);
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ of_node_put(mdio_np);
+ err = of_mdiobus_register(bp->mii_bus, mdio_np);
+ if (err)
goto err_out_free_mdiobus;
- }
+ } else if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ * found during dt phy registration
+ */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev) &&
+ PTR_ERR(phydev) != -ENODEV) {
+ err = PTR_ERR(phydev);
+ break;
+ }
+ }
- err = mdiobus_register(bp->mii_bus);
+ if (err)
+ goto err_out_unregister_bus;
+ }
} else {
err = of_mdiobus_register(bp->mii_bus, np);
}
@@ -719,6 +742,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
}
#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ addr &= ~GEM_BIT(DMA_RXVALID);
+#endif
return addr;
}
@@ -996,6 +1023,15 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
*/
}
+static int macb_validate_hw_csum(struct sk_buff *skb)
+{
+ u32 pkt_csum = *((u32 *)&skb->data[skb->len - ETH_FCS_LEN]);
+ u32 csum = ~crc32_le(~0, skb_mac_header(skb),
+ skb->len + ETH_HLEN - ETH_FCS_LEN);
+
+ return (pkt_csum != csum);
+}
+
static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
int budget)
{
@@ -1057,6 +1093,16 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
bp->rx_buffer_size, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, bp->dev);
+
+ /* Validate MAC fcs if RX checsum offload disabled */
+ if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+ if (macb_validate_hw_csum(skb)) {
+ netdev_err(bp->dev, "incorrect FCS\n");
+ bp->dev->stats.rx_dropped++;
+ break;
+ }
+ }
+
skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM &&
!(bp->dev->flags & IFF_PROMISC) &&
@@ -1154,6 +1200,19 @@ static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
break;
}
+ /* Validate MAC fcs if RX checsum offload disabled */
+ if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+ if (macb_validate_hw_csum(skb)) {
+ netdev_err(bp->dev, "incorrect FCS\n");
+ bp->dev->stats.rx_dropped++;
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ return 1;
+ }
+ }
+
/* Make descriptor updates visible to hardware */
wmb();
@@ -1411,6 +1470,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
spin_lock(&bp->lock);
while (status) {
+ if (status & MACB_BIT(WOL)) {
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(WOL));
+ break;
+ }
+
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
queue_writel(queue, IDR, -1);
@@ -1661,7 +1726,8 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
- skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
+ (skb->data_len == 0))
ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
@@ -1752,23 +1818,21 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
- int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
struct sk_buff *nskb;
u32 fcs;
+ /* Not available for GSO and fragments */
if (!(ndev->features & NETIF_F_HW_CSUM) ||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
- skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ skb_shinfo(*skb)->gso_size ||
+ ((*skb)->data_len > 0))
return 0;
if (padlen <= 0) {
/* FCS could be appeded to tailroom. */
if (tailroom >= ETH_FCS_LEN)
goto add_fcs;
- /* FCS could be appeded by moving data to headroom. */
- else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
- padlen = 0;
/* No room for FCS, need to reallocate skb. */
else
padlen = ETH_FCS_LEN;
@@ -1777,10 +1841,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
padlen += ETH_FCS_LEN;
}
- if (!cloned && headroom + tailroom >= padlen) {
- (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
- skb_set_tail_pointer(*skb, (*skb)->len);
- } else {
+ if (cloned || tailroom < padlen) {
nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
@@ -1813,7 +1874,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
- bool is_lso, is_udp = 0;
+ bool is_lso, is_udp = false;
netdev_tx_t ret = NETDEV_TX_OK;
if (macb_clear_csum(skb)) {
@@ -1975,6 +2036,12 @@ static void macb_free_consistent(struct macb *bp)
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
@@ -2064,6 +2131,16 @@ static int macb_alloc_consistent(struct macb *bp)
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
+ /* Required for tie off descriptor for PM cases */
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ macb_dma_desc_get_size(bp),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+ }
+
return 0;
out_err:
@@ -2071,6 +2148,19 @@ out_err:
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *d = bp->rx_ring_tieoff;
+
+ if (bp->num_queues > 1) {
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ macb_set_addr(bp, d, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+ d->ctrl = 0;
+ }
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
@@ -2094,6 +2184,9 @@ static void gem_init_rings(struct macb *bp)
gem_rx_refill(queue);
}
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_init_tieoff(bp);
+
}
static void macb_init_rings(struct macb *bp)
@@ -2111,6 +2204,8 @@ static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
desc->ctrl |= MACB_BIT(TX_WRAP);
+
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
@@ -2133,6 +2228,10 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, TSR, -1);
macb_writel(bp, RSR, -1);
+ /* Disable RX partial store and forward and reset watermark value */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ gem_writel(bp, PBUFRXCUT, 0xFFF);
+
/* Disable all interrupts */
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, IDR, -1);
@@ -2272,7 +2371,11 @@ static void macb_init_hw(struct macb *bp)
config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
- config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+
+ /* Do not discard Rx FCS if RX checsum offload disabled */
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
else
@@ -2288,13 +2391,24 @@ static void macb_init_hw(struct macb *bp)
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
gem_writel(bp, JML, bp->jumbo_max_len);
bp->speed = SPEED_10;
- bp->duplex = DUPLEX_HALF;
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ bp->duplex = DUPLEX_FULL;
+ else
+ bp->duplex = DUPLEX_HALF;
bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
if (bp->caps & MACB_CAPS_JUMBO)
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
+ /* Enable RX partial store and forward and set watermark */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+ gem_writel(bp, PBUFRXCUT,
+ (gem_readl(bp, PBUFRXCUT) &
+ GEM_BF(WTRMRK, bp->rx_watermark)) |
+ GEM_BIT(ENCUTTHRU));
+ }
+
/* Initialize TX and RX buffers */
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
@@ -2315,8 +2429,14 @@ static void macb_init_hw(struct macb *bp)
MACB_BIT(HRESP));
}
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
/* Enable TX and RX */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE) |
+ MACB_BIT(PTPUNI));
}
/* The hash address register is 64 bits long and takes up two
@@ -2452,6 +2572,10 @@ static int macb_open(struct net_device *dev)
if (err < 0)
goto pm_exit;
+ err = pm_runtime_get_sync(&bp->pdev->dev);
+ if (err < 0)
+ return err;
+
/* carrier starts down */
netif_carrier_off(dev);
@@ -2520,6 +2644,8 @@ static int macb_close(struct net_device *dev)
pm_runtime_put(&bp->pdev->dev);
+ pm_runtime_put(&bp->pdev->dev);
+
return 0;
}
@@ -2736,38 +2862,6 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs_buff[13] = gem_readl(bp, DMACFG);
}
-static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct macb *bp = netdev_priv(netdev);
-
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
-
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
-}
-
-static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct macb *bp = netdev_priv(netdev);
-
- if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
- (wol->wolopts & ~WAKE_MAGIC))
- return -EOPNOTSUPP;
-
- if (wol->wolopts & WAKE_MAGIC)
- bp->wol |= MACB_WOL_ENABLED;
- else
- bp->wol &= ~MACB_WOL_ENABLED;
-
- device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
-
- return 0;
-}
static void macb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
@@ -3206,8 +3300,6 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
- .get_wol = macb_get_wol,
- .set_wol = macb_set_wol,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3362,10 +3454,29 @@ static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
u32 dcfg;
+ int retval;
if (dt_conf)
bp->caps = dt_conf->caps;
+ /* By default we set to partial store and forward mode for zynqmp.
+ * Disable if not set in devicetree.
+ */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+ retval = of_property_read_u16(bp->pdev->dev.of_node,
+ "rx-watermark",
+ &bp->rx_watermark);
+
+ /* Disable partial store and forward in case of error or
+ * invalid watermark value
+ */
+ if (retval || bp->rx_watermark > 0xFFF) {
+ dev_info(&bp->pdev->dev,
+ "Not enabling partial store and forward\n");
+ bp->caps &= ~MACB_CAPS_PARTIAL_STORE_FORWARD;
+ }
+ }
+
if (hw_is_gem(bp->regs, bp->native_io)) {
bp->caps |= MACB_CAPS_MACB_IS_GEM;
@@ -3423,18 +3534,9 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
struct clk **rx_clk, struct clk **tsu_clk)
{
- struct macb_platform_data *pdata;
int err;
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata) {
- *pclk = pdata->pclk;
- *hclk = pdata->hclk;
- } else {
- *pclk = devm_clk_get(&pdev->dev, "pclk");
- *hclk = devm_clk_get(&pdev->dev, "hclk");
- }
-
+ *pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR_OR_NULL(*pclk)) {
err = PTR_ERR(*pclk);
if (!err)
@@ -3444,6 +3546,7 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
return err;
}
+ *hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR_OR_NULL(*hclk)) {
err = PTR_ERR(*hclk);
if (!err)
@@ -3465,6 +3568,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
if (IS_ERR(*tsu_clk))
return PTR_ERR(*tsu_clk);
+ *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
+ if (IS_ERR(*tsu_clk))
+ *tsu_clk = NULL;
+
err = clk_prepare_enable(*pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
@@ -3613,6 +3720,8 @@ static int macb_init(struct platform_device *pdev)
/* Checksum offload is only available on gem with packet buffer */
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ dev->hw_features &= ~NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
@@ -3664,6 +3773,11 @@ static int macb_init(struct platform_device *pdev)
val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
macb_writel(bp, NCFGR, val);
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
return 0;
}
@@ -4174,7 +4288,20 @@ static const struct macb_config np4_config = {
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_PCS |
+ MACB_CAPS_PARTIAL_STORE_FORWARD | MACB_CAPS_WOL,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .jumbo_max_len = 10240,
+};
+
+static const struct macb_config versal_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_PCS | MACB_CAPS_PARTIAL_STORE_FORWARD |
+ MACB_CAPS_WOL | MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4205,6 +4332,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,emac", .data = &emac_config },
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
+ { .compatible = "cdns,versal-gem", .data = &versal_config},
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
{ /* sentinel */ }
};
@@ -4229,6 +4357,7 @@ static int macb_probe(struct platform_device *pdev)
struct clk **) = macb_config->clk_init;
int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
+ struct device_node *phy_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
struct clk *tsu_clk = NULL;
unsigned int queue_mask, num_queues;
@@ -4300,14 +4429,12 @@ static int macb_probe(struct platform_device *pdev)
bp->tx_clk = tx_clk;
bp->rx_clk = rx_clk;
bp->tsu_clk = tsu_clk;
+ if (tsu_clk)
+ bp->tsu_rate = clk_get_rate(tsu_clk);
+
if (macb_config)
bp->jumbo_max_len = macb_config->jumbo_max_len;
- bp->wol = 0;
- if (of_get_property(np, "magic-packet", NULL))
- bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
-
spin_lock_init(&bp->lock);
/* setup capabilities */
@@ -4360,6 +4487,18 @@ static int macb_probe(struct platform_device *pdev)
macb_get_hwaddr(bp);
}
+ /* Power up the PHY if there is a GPIO reset */
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err < 0) {
+ dev_err(&pdev->dev, "broken fixed-link specification");
+ goto err_out_free_netdev;
+ }
+ phy_node = of_node_get(np);
+ }
+ bp->phy_node = phy_node;
+
err = of_get_phy_mode(np);
if (err < 0)
/* not found in DT, MII by default */
@@ -4370,25 +4509,28 @@ static int macb_probe(struct platform_device *pdev)
/* IP specific init */
err = init(pdev);
if (err)
- goto err_out_free_netdev;
+ goto err_out_phy_put;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_phy_put;
+ }
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_unregister_netdev;
phydev = dev->phydev;
netif_carrier_off(dev);
- err = register_netdev(dev);
- if (err) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_unregister_mdio;
- }
-
tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
(unsigned long)bp);
+ if (bp->caps & MACB_CAPS_WOL)
+ device_set_wakeup_capable(&bp->dev->dev, 1);
+
phy_attached_info(phydev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -4400,13 +4542,13 @@ static int macb_probe(struct platform_device *pdev)
return 0;
-err_out_unregister_mdio:
- phy_disconnect(dev->phydev);
- mdiobus_unregister(bp->mii_bus);
+err_out_unregister_netdev:
+ unregister_netdev(dev);
+
+err_out_phy_put:
of_node_put(bp->phy_node);
if (np && of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
- mdiobus_free(bp->mii_bus);
err_out_free_netdev:
free_netdev(dev);
@@ -4468,16 +4610,53 @@ static int __maybe_unused macb_suspend(struct device *dev)
struct macb_queue *queue = bp->queues;
unsigned long flags;
unsigned int q;
+ u32 ctrl, arpipmask;
if (!netif_running(netdev))
return 0;
+ if (device_may_wakeup(&bp->dev->dev)) {
+ spin_lock_irqsave(&bp->lock, flags);
+ ctrl = macb_readl(bp, NCR);
+ ctrl &= ~(MACB_BIT(TE) | MACB_BIT(RE));
+ macb_writel(bp, NCR, ctrl);
+ /* Tie off RX queues */
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
+ queue_writel(queue, RBQP, GEM_RBQP_DISABLE);
+ else
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
+ }
+ ctrl = macb_readl(bp, NCR);
+ ctrl |= MACB_BIT(RE);
+ macb_writel(bp, NCR, ctrl);
+ gem_writel(bp, NCFGR, gem_readl(bp, NCFGR) & ~MACB_BIT(NBC));
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+ macb_readl(bp, ISR);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, -1);
- if (bp->wol & MACB_WOL_ENABLED) {
+ /* Enable WOL (Q0 only) and disable all other interrupts */
macb_writel(bp, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
+ for (q = 1, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+
+ arpipmask = cpu_to_be32p(&bp->dev->ip_ptr->ifa_list->ifa_local)
+ & 0xFFFF;
+ gem_writel(bp, WOL, MACB_BIT(ARP) | arpipmask);
+ spin_unlock_irqrestore(&bp->lock, flags);
enable_irq_wake(bp->queues[0].irq);
netif_device_detach(netdev);
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_disable(&queue->napi);
} else {
netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues;
@@ -4510,6 +4689,7 @@ static int __maybe_unused macb_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
struct macb_queue *queue = bp->queues;
+ unsigned long flags;
unsigned int q;
if (!netif_running(netdev))
@@ -4518,10 +4698,20 @@ static int __maybe_unused macb_resume(struct device *dev)
if (!device_may_wakeup(dev))
pm_runtime_force_resume(dev);
- if (bp->wol & MACB_WOL_ENABLED) {
+ if (device_may_wakeup(&bp->dev->dev)) {
+ spin_lock_irqsave(&bp->lock, flags);
macb_writel(bp, IDR, MACB_BIT(WOL));
- macb_writel(bp, WOL, 0);
+ gem_writel(bp, WOL, 0);
+ /* Clear Q0 ISR as WOL was enabled on Q0 */
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, -1);
disable_irq_wake(bp->queues[0].irq);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ macb_writel(bp, NCR, MACB_BIT(MPE));
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_enable(&queue->napi);
+ netif_carrier_on(netdev);
} else {
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -4561,7 +4751,10 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);
}
- clk_disable_unprepare(bp->tsu_clk);
+
+ if (!(device_may_wakeup(&bp->dev->dev) &&
+ (bp->caps & MACB_CAPS_NEED_TSUCLK)))
+ clk_disable_unprepare(bp->tsu_clk);
return 0;
}
@@ -4577,7 +4770,10 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
}
- clk_prepare_enable(bp->tsu_clk);
+
+ if (!(device_may_wakeup(&bp->dev->dev) &&
+ (bp->caps & MACB_CAPS_NEED_TSUCLK)))
+ clk_prepare_enable(bp->tsu_clk);
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index f5aec28c7730..464da742ea66 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -242,6 +242,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
u32 dma_desc_ts_2, struct timespec64 *ts)
{
struct timespec64 tsu;
+ bool sec_rollover = false;
ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) |
GEM_BFEXT(DMA_SECL, dma_desc_ts_1);
@@ -259,9 +260,12 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
*/
if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) &&
!(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
- ts->tv_sec -= GEM_DMA_SEC_TOP;
+ sec_rollover = true;
+
+ ts->tv_sec |= ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
- ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
+ if (sec_rollover)
+ ts->tv_sec -= GEM_DMA_SEC_TOP;
return 0;
}
@@ -364,7 +368,6 @@ void gem_ptp_init(struct net_device *dev)
bp->ptp_clock_info = gem_ptp_caps_template;
/* nominal frequency and maximum adjustment in ppb */
- bp->tsu_rate = bp->ptp_info->get_tsu_rate(bp);
bp->ptp_clock_info.max_adj = bp->ptp_info->get_ptp_max_adj();
gem_ptp_init_timer(bp);
bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &dev->dev);
@@ -394,11 +397,14 @@ void gem_ptp_init(struct net_device *dev)
void gem_ptp_remove(struct net_device *ndev)
{
struct macb *bp = netdev_priv(ndev);
+ unsigned long flags;
if (bp->ptp_clock)
ptp_clock_unregister(bp->ptp_clock);
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
gem_ptp_clear_timer(bp);
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n",
GEM_PTP_TIMER_NAME);
@@ -471,7 +477,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ERANGE;
/* fall through */
case HWTSTAMP_TX_ON:
- tx_bd_control = TSTAMP_ALL_FRAMES;
+ tx_bd_control = TSTAMP_ALL_PTP_FRAMES;
break;
default:
return -ERANGE;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index d0c77ff9dbb1..ab86240e4532 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1836,13 +1836,10 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- if (OCTEON_CN23XX_PF(oct)) {
- if (!oct->msix_on)
- if (setup_tx_poll_fn(netdev))
- return -1;
- } else {
- if (setup_tx_poll_fn(netdev))
- return -1;
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
+ ret = setup_tx_poll_fn(netdev);
+ if (ret)
+ goto err_poll;
}
netif_tx_start_all_queues(netdev);
@@ -1855,7 +1852,7 @@ static int liquidio_open(struct net_device *netdev)
/* tell Octeon to start forwarding packets to host */
ret = send_rx_ctrl_cmd(lio, 1);
if (ret)
- return ret;
+ goto err_rx_ctrl;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1866,6 +1863,27 @@ static int liquidio_open(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
+ return 0;
+
+err_rx_ctrl:
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+err_poll:
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
return ret;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 27ea528ef448..e861567b2c49 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2268,7 +2268,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_unregister_interrupts;
+ goto err_destroy_workqueue;
}
nic->msg_enable = debug;
@@ -2277,6 +2277,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_destroy_workqueue:
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
err_unregister_interrupts:
nicvf_unregister_interrupts(nic);
err_free_netdev:
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 76ff42ec3ae5..a34c33e8a8ad 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1438,8 +1438,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
return AE_OK;
}
- if (strncmp(string.pointer, bgx_sel, 4))
+ if (strncmp(string.pointer, bgx_sel, 4)) {
+ kfree(string.pointer);
return AE_OK;
+ }
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
bgx_acpi_register_phy, NULL, bgx, NULL);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 97ff8608f0ab..b0fd22cbeef4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1303,6 +1303,7 @@ static int cxgb_up(struct adapter *adap)
if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
+ quiesce_rx(adap);
free_irq_resources(adap);
err = ret;
goto out;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 42374859b9d3..cb3f515559c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3850,6 +3850,8 @@ int t4_load_phy_fw(struct adapter *adap,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
+ if (ret)
+ return ret;
/* If we have version number support, then check to see that the new
* firmware got loaded properly.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index f4d41f968afa..97963d9a6d16 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -860,7 +860,7 @@ static int cxgb4vf_open(struct net_device *dev)
*/
err = t4vf_update_port_info(pi);
if (err < 0)
- return err;
+ goto err_unwind;
/*
* Note that this interface is up and start everything up ...
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index ebd5c2cf1efe..e799c686594e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -454,7 +454,6 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
break;
default:
return -EINVAL;
- break;
}
fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index a8a8b77c1611..4bcdb48b0e9c 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
.val = CONFIG0_MAXLEN_1536,
},
{
- .max_l3_len = 1542,
- .val = CONFIG0_MAXLEN_1542,
+ .max_l3_len = 1548,
+ .val = CONFIG0_MAXLEN_1548,
},
{
.max_l3_len = 9212,
@@ -1152,6 +1152,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
dma_addr_t mapping;
unsigned short mtu;
void *buffer;
+ int ret;
mtu = ETH_HLEN;
mtu += netdev->mtu;
@@ -1166,9 +1167,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
word3 |= mtu;
}
- if (skb->ip_summed != CHECKSUM_NONE) {
+ if (skb->len >= ETH_FRAME_LEN) {
+ /* Hardware offloaded checksumming isn't working on frames
+ * bigger than 1514 bytes. A hypothesis about this is that the
+ * checksum buffer is only 1518 bytes, so when the frames get
+ * bigger they get truncated, or the last few bytes get
+ * overwritten by the FCS.
+ *
+ * Just use software checksumming and bypass on bigger frames.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ret = skb_checksum_help(skb);
+ if (ret)
+ return ret;
+ }
+ word1 |= TSS_BYPASS_BIT;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
int tcp = 0;
+ /* We do not switch off the checksumming on non TCP/UDP
+ * frames: as is shown from tests, the checksumming engine
+ * is smart enough to see that a frame is not actually TCP
+ * or UDP and then just pass it through without any changes
+ * to the frame.
+ */
if (skb->protocol == htons(ETH_P_IP)) {
word1 |= TSS_IP_CHKSUM_BIT;
tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
@@ -1993,15 +2015,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static netdev_features_t gmac_fix_features(struct net_device *netdev,
- netdev_features_t features)
-{
- if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
- features &= ~GMAC_OFFLOAD_FEATURES;
-
- return features;
-}
-
static int gmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2222,7 +2235,6 @@ static const struct net_device_ops gmac_351x_ops = {
.ndo_set_mac_address = gmac_set_mac_address,
.ndo_get_stats64 = gmac_get_stats64,
.ndo_change_mtu = gmac_change_mtu,
- .ndo_fix_features = gmac_fix_features,
.ndo_set_features = gmac_set_features,
};
@@ -2476,11 +2488,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
- /* We can handle jumbo frames up to 10236 bytes so, let's accept
- * payloads of 10236 bytes minus VLAN and ethernet header
+ /* We can receive jumbo frames up to 10236 bytes but only
+ * transmit 2047 bytes so, let's accept payloads of 2047
+ * bytes minus VLAN and ethernet header
*/
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
+ netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
port->freeq_refill = 0;
netif_napi_add(netdev, &port->napi, gmac_napi_poll,
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
index 9fdf77d5eb37..24bb989981f2 100644
--- a/drivers/net/ethernet/cortina/gemini.h
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
#define SOF_BIT 0x80000000
#define EOF_BIT 0x40000000
#define EOFIE_BIT BIT(29)
-#define MTU_SIZE_BIT_MASK 0x1fff
+#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
/* GMAC Tx Descriptor */
struct gmac_txdesc {
@@ -787,7 +787,7 @@ union gmac_config0 {
#define CONFIG0_MAXLEN_1536 0
#define CONFIG0_MAXLEN_1518 1
#define CONFIG0_MAXLEN_1522 2
-#define CONFIG0_MAXLEN_1542 3
+#define CONFIG0_MAXLEN_1548 3
#define CONFIG0_MAXLEN_9k 4 /* 9212 */
#define CONFIG0_MAXLEN_10k 5 /* 10236 */
#define CONFIG0_MAXLEN_1518__6 6
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index e24979010969..da9f9ec3e123 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -553,11 +553,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
/* free the buffer */
dev_kfree_skb(skb);
- spin_unlock_irqrestore(&bp->lock, flags);
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 552877590a8a..a7a3e2ee0676 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1137,10 +1137,11 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
if (skb->len <= 60 &&
- (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
- is_ipv4_pkt(skb)) {
+ (lancer_chip(adapter) || BE3_chip(adapter) ||
+ skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
- pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
+ goto tx_drop;
}
/* If vlan tag is already inlined in the packet, skip HW VLAN
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 34540e604f74..ad0d070f7f17 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -971,8 +971,8 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
buf_array[i] = addr;
/* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
bpid);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index cee77326e7e8..d6df2dd885c5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1146,7 +1146,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+ /* Also prepare the consumer index in case page allocation never
+ * succeeds. In that case, hardware will never advance producer index
+ * to match consumer index, and will drop all frames.
+ */
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
/* enable Rx ints by setting pkt thr to 1 */
enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index bc594892507a..8c3661525694 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -8,7 +8,7 @@
#include "enetc.h"
int enetc_phc_index = -1;
-EXPORT_SYMBOL(enetc_phc_index);
+EXPORT_SYMBOL_GPL(enetc_phc_index);
static struct ptp_clock_info enetc_ptp_caps = {
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a31f891d51fb..b55d6ed9aa13 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -626,7 +626,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
bdp->cbd_datlen = cpu_to_fec16(size);
@@ -688,7 +688,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
}
@@ -1795,6 +1795,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
/* if any of the above changed restart the FEC */
if (status_change) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
@@ -1804,6 +1805,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
}
} else {
if (fep->link) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_stop(ndev);
@@ -3769,7 +3771,9 @@ fec_drv_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev,
+ "Failed to resume device in remove callback (%pe)\n",
+ ERR_PTR(ret));
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -3782,8 +3786,13 @@ fec_drv_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
- clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
+ /* After pm_runtime_get_sync() failed, the clks are still off, so skip
+ * disabling them again.
+ */
+ if (ret >= 0) {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ }
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 3b54a37e780e..6fd0c73b327e 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -141,11 +141,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 147126e79986..8e2a243aa102 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -885,12 +885,21 @@ _return:
return err;
}
+static int mac_remove(struct platform_device *pdev)
+{
+ struct mac_device *mac_dev = platform_get_drvdata(pdev);
+
+ platform_device_unregister(mac_dev->priv->eth_dev);
+ return 0;
+}
+
static struct platform_driver mac_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mac_match,
},
.probe = mac_probe,
+ .remove = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 99fe2c210d0f..61f4b6e50d29 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -98,7 +98,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
- if (!fep->fcc.fccp)
+ if (!fep->fec.fecp)
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 90ab7ade44c4..2ee6265228d1 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_bytes += len;
next:
pos = (pos + 1) % rxq->num;
if (rx_pkts_num >= limit)
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c41b19c760f8..645cae590dc4 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_bytes += len;
next:
pos = dma_ring_incr(pos, RX_DESC_NUM);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 08339278c722..7c838a75934e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
- if (ret)
+ if (ret) {
+ put_device(&hdev->cls_dev);
return ret;
+ }
__module_get(THIS_MODULE);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8aace2de0cc9..e34245649057 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -66,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
}
}
+static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv)
+{
+#define HNS_MAC_LINK_WAIT_TIME 5
+#define HNS_MAC_LINK_WAIT_CNT 40
+
+ u32 link_status = 0;
+ int i;
+
+ if (!mac_ctrl_drv->get_link_status)
+ return link_status;
+
+ for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) {
+ msleep(HNS_MAC_LINK_WAIT_TIME);
+ mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status);
+ if (!link_status)
+ break;
+ }
+
+ return link_status;
+}
+
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
{
struct mac_driver *mac_ctrl_drv;
@@ -83,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
&sfp_prsnt);
if (!ret)
*link_status = *link_status && sfp_prsnt;
+
+ /* for FIBER port, it may have a fake link up.
+ * when the link status changes from down to up, we need to do
+ * anti-shake. the anti-shake time is base on tests.
+ * only FIBER port need to do this.
+ */
+ if (*link_status && !mac_cb->link)
+ *link_status = hns_mac_link_anti_shake(mac_ctrl_drv);
}
mac_cb->link = *link_status;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index ffd1018d43fb..d09cc10b3517 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3773,7 +3773,7 @@ static int hns3_init_mac_addr(struct net_device *netdev, bool init)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- u8 mac_addr_temp[ETH_ALEN];
+ u8 mac_addr_temp[ETH_ALEN] = {0};
int ret = 0;
if (h->ae_algo->ops->get_mac_addr && init) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 34e5448d59f6..4ea19f546df0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -676,7 +676,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
- if (module_type == HNAE3_MODULE_TYPE_CR)
+ if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ cmd->base.port = PORT_OTHER;
+ else if (module_type == HNAE3_MODULE_TYPE_CR)
cmd->base.port = PORT_DA;
else
cmd->base.port = PORT_FIBRE;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d58abdfdb9b7..f5956977fe39 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2588,7 +2588,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
int ret;
hdev->support_sfp_query = true;
- hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
+ if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
if (ret) {
@@ -2939,8 +2942,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
+#define HCLGE_IMP_RESET_DELAY 5
+
switch (event_type) {
case HCLGE_VECTOR0_EVENT_RST:
+ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
+ mdelay(HCLGE_IMP_RESET_DELAY);
+
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
case HCLGE_VECTOR0_EVENT_MBX:
@@ -6688,12 +6696,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
- hdev->reset_type != HNAE3_FUNC_RESET &&
- hdev->reset_type != HNAE3_FLR_RESET) {
- hclge_mac_stop_phy(hdev);
- hclge_update_link_status(hdev);
- return;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
+ HCLGE_PFC_DISABLE);
+ if (hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
+ hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
+ return;
+ }
}
for (i = 0; i < handle->kinfo.num_tqps; i++)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 8448607742a6..2183e700f9d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -170,8 +170,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
- u8 pfc_bitmap)
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap)
{
struct hclge_desc desc;
struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 260f22d19d81..406084bb2307 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -109,6 +109,9 @@ struct hclge_bp_to_qs_map_cmd {
u32 rsvd1;
};
+#define HCLGE_PFC_DISABLE 0
+#define HCLGE_PFC_TX_RX_DISABLE 0
+
struct hclge_pfc_en_cmd {
u8 tx_rx_en_bitmap;
u8 pri_en_bitmap;
@@ -150,6 +153,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 403c1b9cf6ab..ec3d98595198 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1432,7 +1432,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
* might happen in case reset assertion was made by PF. Yes, this also
* means we might end up waiting bit more even for VF reset.
*/
- msleep(5000);
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET)
+ msleep(5000);
+ else
+ msleep(500);
return 0;
}
@@ -2592,7 +2595,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
int ret = 0;
- if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+ if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
+ hdev->reset_type == HNAE3_FLR_RESET) &&
test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
hclgevf_uninit_msi(hdev);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 1fd2b84e2e91..1d8b84eb4aa8 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2897,6 +2897,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
ret = of_device_register(&port->ofdev);
if (ret) {
pr_err("failed to register device. ret=%d\n", ret);
+ put_device(&port->ofdev.dev);
goto out;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a20d9147d5f2..fde949a73cb5 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -196,7 +196,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
unsigned long offset;
for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
- asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
+ asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
}
/* replenish the buffers for a pool. note that we don't need to
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index bc313d85fe13..ce1ab8d59eb0 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -873,12 +873,22 @@ static int ibmvnic_login(struct net_device *netdev)
static void release_login_buffer(struct ibmvnic_adapter *adapter)
{
+ if (!adapter->login_buf)
+ return;
+
+ dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
+ adapter->login_buf_sz, DMA_TO_DEVICE);
kfree(adapter->login_buf);
adapter->login_buf = NULL;
}
static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
{
+ if (!adapter->login_rsp_buf)
+ return;
+
+ dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
+ adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
kfree(adapter->login_rsp_buf);
adapter->login_rsp_buf = NULL;
}
@@ -4298,11 +4308,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
struct ibmvnic_login_buffer *login = adapter->login_buf;
int i;
- dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
- DMA_TO_DEVICE);
- dma_unmap_single(dev, adapter->login_rsp_buf_token,
- adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
-
/* If the number of queues requested can't be allocated by the
* server, the login response will return with code 1. We will need
* to resend the login buffer with fewer queues requested.
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index cbd83bb5c1ac..2c34d45354fe 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5270,31 +5270,6 @@ static void e1000_watchdog_task(struct work_struct *work)
ew32(TARC(0), tarc0);
}
- /* disable TSO for pcie and 10/100 speeds, to avoid
- * some hardware issues
- */
- if (!(adapter->flags & FLAG_TSO_FORCE)) {
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- e_info("10/100 speed: disabling TSO\n");
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- break;
- case SPEED_1000:
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- break;
- default:
- /* oops */
- break;
- }
- if (hw->mac.type == e1000_pch_spt) {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
- }
-
/* enable transmits in the hardware, need to do this
* after setting TARC(0)
*/
@@ -5916,9 +5891,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(tx_ring,
- (MAX_SKB_FRAGS *
+ ((MAX_SKB_FRAGS + 1) *
DIV_ROUND_UP(PAGE_SIZE,
- adapter->tx_fifo_limit) + 2));
+ adapter->tx_fifo_limit) + 4));
if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
@@ -7223,6 +7198,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM |
NETIF_F_HW_CSUM);
+ /* disable TSO for pcie and 10/100 speeds to avoid
+ * some hardware issues and for i219 to fix transfer
+ * speed being capped at 60%
+ */
+ if (!(adapter->flags & FLAG_TSO_FORCE)) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ e_info("10/100 speed: disabling TSO\n");
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ break;
+ case SPEED_1000:
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ break;
+ default:
+ /* oops */
+ break;
+ }
+ if (hw->mac.type == e1000_pch_spt) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
+ }
+
/* Set user-changeable features (subset of all device features) */
netdev->hw_features = netdev->features;
netdev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index cb8689222c8b..55ba6b690ab6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -20,16 +20,11 @@ enum i40e_memory_type {
};
/* prototype for functions used for dynamic memory allocation */
-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem,
- enum i40e_memory_type type,
- u64 size, u32 alignment);
-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem);
-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem,
- u32 size);
-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem);
+int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ enum i40e_memory_type type, u64 size, u32 alignment);
+int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem);
+int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+ u32 size);
+int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 5706abb3c0ea..10125b02d154 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -178,6 +178,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
"Cannot locate client instance close routine\n");
return;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
@@ -376,7 +380,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Remove failed client instance */
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
- i40e_client_del_instance(pf);
return;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6475f78e85f6..a3709c4fc65d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1341,7 +1341,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
I40E_PFLAN_QALLOC_LASTQ_SHIFT;
- if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
num_queues = (j - base_queue) + 1;
else
num_queues = 0;
@@ -1351,7 +1351,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
- if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
num_vfs = (j - i) + 1;
else
num_vfs = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 200a1cb3b536..9de503c5f99b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -889,7 +889,9 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
ret = i40e_read_nvm_module_data(hw,
I40E_SR_EMP_SR_SETTINGS_PTR,
- offset, 1,
+ offset,
+ I40E_LLDP_CURRENT_STATUS_OFFSET,
+ I40E_LLDP_CURRENT_STATUS_SIZE,
&lldp_cfg.adminstatus);
} else {
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2a80c5daa376..ba86ad833bee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -32,6 +32,9 @@
#define I40E_CEE_MAX_FEAT_TYPE 3
#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
+#define I40E_LLDP_CURRENT_STATUS_OFFSET 1
+#define I40E_LLDP_CURRENT_STATUS_SIZE 1
+
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 276f04c0e51d..31f60657f532 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1755,7 +1755,7 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf)
void i40e_dbg_init(void)
{
i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
- if (!i40e_dbg_root)
+ if (IS_ERR(i40e_dbg_root))
pr_info("init of debugfs failed\n");
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index ef4d3762bf37..ca229b0efeb6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
return 0;
}
-struct i40e_diag_reg_test_info i40e_reg_list[] = {
+const struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
{I40E_QTX_CTL(0), 0x0000FFBF, 1,
I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
@@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u32 reg, mask;
+ u32 elements;
u32 i, j;
for (i = 0; i40e_reg_list[i].offset != 0 &&
!ret_code; i++) {
+ elements = i40e_reg_list[i].elements;
/* set actual reg range for dynamically allocated resources */
if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
hw->func_caps.num_tx_qp != 0)
- i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ elements = hw->func_caps.num_tx_qp;
if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
hw->func_caps.num_msix_vectors != 0)
- i40e_reg_list[i].elements =
- hw->func_caps.num_msix_vectors - 1;
+ elements = hw->func_caps.num_msix_vectors - 1;
/* test register access */
mask = i40e_reg_list[i].mask;
- for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
+ for (j = 0; j < elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index c3340f320a18..1db7c6d57231 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
u32 stride; /* bytes between each element */
};
-extern struct i40e_diag_reg_test_info i40e_reg_list[];
+extern const struct i40e_diag_reg_test_info i40e_reg_list[];
i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 689deecb4e1a..95a6f8689b6f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3082,10 +3082,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
if (cmd->flow_type == TCP_V4_FLOW ||
cmd->flow_type == UDP_V4_FLOW) {
- if (i_set & I40E_L3_SRC_MASK)
- cmd->data |= RXH_IP_SRC;
- if (i_set & I40E_L3_DST_MASK)
- cmd->data |= RXH_IP_DST;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i_set & I40E_X722_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_X722_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ } else {
+ if (i_set & I40E_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
} else if (cmd->flow_type == TCP_V6_FLOW ||
cmd->flow_type == UDP_V6_FLOW) {
if (i_set & I40E_L3_V6_SRC_MASK)
@@ -3392,12 +3399,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
/**
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @hw: hw structure
* @nfc: pointer to user request
* @i_setc: bits currently set
*
* Returns value of bits to be set per user request
**/
-static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
+ struct ethtool_rxnfc *nfc,
+ u64 i_setc)
{
u64 i_set = i_setc;
u64 src_l3 = 0, dst_l3 = 0;
@@ -3416,8 +3426,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
dst_l3 = I40E_L3_V6_DST_MASK;
} else if (nfc->flow_type == TCP_V4_FLOW ||
nfc->flow_type == UDP_V4_FLOW) {
- src_l3 = I40E_L3_SRC_MASK;
- dst_l3 = I40E_L3_DST_MASK;
+ if (hw->mac.type == I40E_MAC_X722) {
+ src_l3 = I40E_X722_L3_SRC_MASK;
+ dst_l3 = I40E_X722_L3_DST_MASK;
+ } else {
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
+ }
} else {
/* Any other flow type are not supported here */
return i_set;
@@ -3435,6 +3450,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
return i_set;
}
+#define FLOW_PCTYPES_SIZE 64
/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
@@ -3447,9 +3463,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- u8 flow_pctype = 0;
+ DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
u64 i_set, i_setc;
+ bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
+
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
@@ -3465,36 +3483,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
@@ -3527,17 +3544,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- if (flow_pctype) {
- i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
- flow_pctype)) |
- ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
- flow_pctype)) << 32);
- i_set = i40e_get_rss_hash_bits(nfc, i_setc);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
- (u32)i_set);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
- (u32)(i_set >> 32));
- hena |= BIT_ULL(flow_pctype);
+ if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
+ u8 flow_id;
+
+ for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
+ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
+ i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
+ (u32)(i_set >> 32));
+ hena |= BIT_ULL(flow_id);
+ }
}
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
@@ -4213,11 +4233,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
/* First 4 bytes of L4 header */
- if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
- new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
- else if (!usr_ip4_spec->l4_4_bytes)
- new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
- else
+ if (usr_ip4_spec->l4_4_bytes)
return -EOPNOTSUPP;
/* Filtering on Type of Service is not supported. */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 637f6ed78b48..f7d41ba110a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -110,12 +110,18 @@ static struct workqueue_struct *i40e_wq;
static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
struct net_device *netdev, int delta)
{
+ struct netdev_hw_addr_list *ha_list;
struct netdev_hw_addr *ha;
if (!f || !netdev)
return;
- netdev_for_each_mc_addr(ha, netdev) {
+ if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
+ ha_list = &netdev->uc;
+ else
+ ha_list = &netdev->mc;
+
+ netdev_hw_addr_list_for_each(ha, ha_list) {
if (ether_addr_equal(ha->addr, f->macaddr)) {
ha->refcount += delta;
if (ha->refcount <= 0)
@@ -409,7 +415,9 @@ static void i40e_tx_timeout(struct net_device *netdev)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
@@ -2700,7 +2708,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_pf *pf = vsi->back;
if (i40e_enabled_xdp_vsi(vsi)) {
- int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
if (frame_size > i40e_max_xdp_frame_size(vsi))
return -EINVAL;
@@ -5057,7 +5065,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
int v, ret = 0;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v]) {
ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret)
@@ -5637,6 +5645,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
}
/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
* @seid: seid of the channel/VSI
@@ -5658,10 +5686,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
max_tx_rate, seid);
return -EINVAL;
}
- if (max_tx_rate && max_tx_rate < 50) {
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -7589,9 +7617,9 @@ config_tc:
if (pf->flags & I40E_FLAG_TC_MQPRIO) {
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -9932,6 +9960,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
}
/**
+ * i40e_clean_xps_state - clean xps state for every tx_ring
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_clean_xps_state(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings)
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i])
+ clear_bit(__I40E_TX_XPS_INIT_DONE,
+ vsi->tx_rings[i]->state);
+}
+
+/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
* @lock_acquired: indicates whether or not the lock has been acquired
@@ -9962,8 +10005,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
rtnl_unlock();
for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
+ if (pf->vsi[v]) {
+ i40e_clean_xps_state(pf->vsi[v]);
pf->vsi[v]->seid = 0;
+ }
}
i40e_shutdown_adminq(&pf->hw);
@@ -10245,10 +10290,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;
@@ -10297,8 +10342,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
ret = i40e_setup_misc_vector(pf);
+ if (ret)
+ goto end_unlock;
+ }
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -12496,6 +12544,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
}
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
@@ -13422,15 +13472,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->id = ctxt.vsi_number;
}
- vsi->active_filters = 0;
- clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock);
+ vsi->active_filters = 0;
/* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW;
f_count++;
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -14782,6 +14832,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
int err;
int v_idx;
+ pci_set_drvdata(pf->pdev, pf);
pci_save_state(pf->pdev);
/* set up periodic task facility */
@@ -15508,11 +15559,15 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_switch_branch_release(pf->veb[i]);
}
- /* Now we can shutdown the PF's VSI, just before we kill
+ /* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
- if (pf->vsi[pf->lan_vsi])
- i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+ for (i = pf->num_alloc_vsi; i--;)
+ if (pf->vsi[i]) {
+ i40e_vsi_close(pf->vsi[i]);
+ i40e_vsi_release(pf->vsi[i]);
+ pf->vsi[i] = NULL;
+ }
i40e_cloud_filter_exit(pf);
@@ -15661,6 +15716,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
i40e_reset_and_rebuild(pf, false, false);
+#ifdef CONFIG_PCI_IOV
+ i40e_restore_all_vfs_msi_state(pdev);
+#endif /* CONFIG_PCI_IOV */
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index e4d8d20baf3b..6b1996451a4b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -210,11 +210,11 @@ read_nvm_exit:
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
- * @words: number of words to write
- * @data: buffer with words to write to the Shadow RAM
+ * @words: number of words to read
+ * @data: buffer with words to read to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
**/
static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
u8 module_pointer, u32 offset,
@@ -234,18 +234,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
*/
if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
- /* We can write only up to 4KB (one sector), in one AQ write */
+ /* We can read only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write fail error: tried to write %d words, limit is %d.\n",
+ "NVM read fail error: tried to read %d words, limit is %d.\n",
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
- /* A single write cannot spread over two sectors */
+ /* A single read cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
offset, words);
else
ret_code = i40e_aq_read_nvm(hw, module_pointer,
@@ -323,20 +323,24 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
/**
* i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
- * @hw: pointer to the HW structure
+ * @hw: Pointer to the HW structure
* @module_ptr: Pointer to module in words with respect to NVM beginning
- * @offset: offset in words from module start
+ * @module_offset: Offset in words from module start
+ * @data_offset: Offset in words from reading data area start
* @words_data_size: Words to read from NVM
* @data_ptr: Pointer to memory location where resulting buffer will be stored
**/
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr, u16 offset,
- u16 words_data_size,
- u16 *data_ptr)
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
{
i40e_status status;
+ u16 specific_ptr = 0;
u16 ptr_value = 0;
- u32 flat_offset;
+ u32 offset = 0;
if (module_ptr != 0) {
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -352,36 +356,35 @@ i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
/* Pointer not initialized */
if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
- ptr_value == I40E_NVM_INVALID_VAL)
+ ptr_value == I40E_NVM_INVALID_VAL) {
+ i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
return I40E_ERR_BAD_PTR;
+ }
/* Check whether the module is in SR mapped area or outside */
if (ptr_value & I40E_PTR_TYPE) {
/* Pointer points outside of the Shared RAM mapped area */
- ptr_value &= ~I40E_PTR_TYPE;
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
- /* PtrValue in 4kB units, need to convert to words */
- ptr_value /= 2;
- flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
- status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!status) {
- status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
- 2 * words_data_size,
- data_ptr, true, NULL);
- i40e_release_nvm(hw);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Reading nvm aq failed.Error code: %d.\n",
- status);
- return I40E_ERR_NVM;
- }
- } else {
- return I40E_ERR_NVM;
- }
+ return I40E_ERR_PARAM;
} else {
/* Read from the Shadow RAM */
- status = i40e_read_nvm_buffer(hw, ptr_value + offset,
- &words_data_size, data_ptr);
+
+ status = i40e_read_nvm_word(hw, ptr_value + module_offset,
+ &specific_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+
+ offset = ptr_value + module_offset + specific_ptr +
+ data_offset;
+
+ status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
+ data_ptr);
if (status) {
i40e_debug(hw, I40E_DEBUG_ALL,
"Reading nvm buffer failed.Error code: %d.\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5250441bf75b..7effe5010e32 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -315,10 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr, u16 offset,
- u16 words_data_size,
- u16 *data_ptr);
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 06987913837a..6067c8866834 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2669,7 +2669,7 @@ tx_only:
return budget;
}
- if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Exit the polling mode, but don't re-enable interrupts if stack might
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 666a251e8c72..047068120ba7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1470,6 +1470,10 @@ struct i40e_lldp_variables {
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT 49
+#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT 41
+#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
#define I40E_L3_SRC_SHIFT 47
#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
#define I40E_L3_V6_SRC_SHIFT 43
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4080fdacca4c..e79b8dc5c2a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -99,6 +99,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ u16 vf_id;
+ u16 pos;
+
+ /* Continue only if this is a PF */
+ if (!pdev->is_physfn)
+ return;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vf_dev = NULL;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
+ while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
+ if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
+ pci_restore_msi_state(vf_dev);
+ }
+ }
+}
+#endif /* CONFIG_PCI_IOV */
+
/**
* i40e_vc_notify_vf_reset
* @vf: pointer to the VF structure
@@ -130,17 +156,18 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
/***********************misc routines*****************************/
/**
- * i40e_vc_disable_vf
+ * i40e_vc_reset_vf
* @vf: pointer to the VF info
- *
- * Disable the VF through a SW reset.
+ * @notify_vf: notify vf about reset or not
+ * Reset VF handler.
**/
-static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
+static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
- i40e_vc_notify_vf_reset(vf);
+ if (notify_vf)
+ i40e_vc_notify_vf_reset(vf);
/* We want to ensure that an actual reset occurs initiated after this
* function was called. However, we do not want to wait forever, so
@@ -158,9 +185,14 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
usleep_range(10000, 20000);
}
- dev_warn(&vf->pf->pdev->dev,
- "Failed to initiate reset for VF %d after 200 milliseconds\n",
- vf->vf_id);
+ if (notify_vf)
+ dev_warn(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
+ else
+ dev_dbg(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
}
/**
@@ -1110,39 +1142,98 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
return -EIO;
}
-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
+/**
+ * __i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ u16 num_vlans = 0, bkt;
+
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+ num_vlans++;
+ }
+
+ return num_vlans;
+}
/**
- * i40e_config_vf_promiscuous_mode
- * @vf: pointer to the VF info
- * @vsi_id: VSI id
- * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
- * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
*
- * Called from the VF to configure the promiscuous mode of
- * VF vsis and from the VF reset path to reset promiscuous mode.
+ * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
**/
-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
- u16 vsi_id,
- bool allmulti,
- bool alluni)
+static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ int num_vlans;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ return num_vlans;
+}
+
+/**
+ * i40e_get_vlan_list_sync
+ * @vsi: pointer to the VSI
+ * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
+ * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
+ * This array is allocated here, but has to be freed in caller.
+ *
+ * Called to get number of VLANs and VLAN list present in mac_filter_hash.
+ **/
+static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
+ s16 **vlan_list)
{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
struct i40e_mac_filter *f;
- i40e_status aq_ret = 0;
- struct i40e_vsi *vsi;
+ int i = 0;
int bkt;
- vsi = i40e_find_vsi_from_id(pf, vsi_id);
- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
- return I40E_ERR_PARAM;
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
+ *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
+ if (!(*vlan_list))
+ goto err;
- if (vf->port_vlan_id) {
- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
- allmulti,
- vf->port_vlan_id,
- NULL);
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ (*vlan_list)[i++] = f->vlan;
+ }
+err:
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+}
+
+/**
+ * i40e_set_vsi_promisc
+ * @vf: pointer to the VF struct
+ * @seid: VSI number
+ * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
+ * for a given VLAN
+ * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
+ * for a given VLAN
+ * @vl: List of VLANs - apply filter for given VLANs
+ * @num_vlans: Number of elements in @vl
+ **/
+static i40e_status
+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ bool unicast_enable, s16 *vl, u16 num_vlans)
+{
+ i40e_status aq_ret, aq_tmp = 0;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* No VLAN to set promisc on, set on VSI */
+ if (!num_vlans || !vl) {
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
+ multi_enable,
+ NULL);
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
@@ -1151,13 +1242,14 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+
return aq_ret;
}
- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
- alluni,
- vf->port_vlan_id,
- NULL);
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
+ unicast_enable,
+ NULL, true);
+
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
@@ -1167,68 +1259,94 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
+
return aq_ret;
- } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
- continue;
- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
- vsi->seid,
- allmulti,
- f->vlan,
- NULL);
- if (aq_ret) {
- int aq_err = pf->hw.aq.asq_last_status;
+ }
- dev_err(&pf->pdev->dev,
- "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
- f->vlan,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- }
+ for (i = 0; i < num_vlans; i++) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
+ multi_enable,
+ vl[i], NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
- vsi->seid,
- alluni,
- f->vlan,
- NULL);
- if (aq_ret) {
- int aq_err = pf->hw.aq.asq_last_status;
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
- dev_err(&pf->pdev->dev,
- "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
- f->vlan,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- }
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
+ }
+
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
+ unicast_enable,
+ vl[i], NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
}
- return aq_ret;
}
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
- NULL);
- if (aq_ret) {
- int aq_err = pf->hw.aq.asq_last_status;
- dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
- vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ if (aq_tmp)
+ aq_ret = aq_tmp;
+
+ return aq_ret;
+}
+
+/**
+ * i40e_config_vf_promiscuous_mode
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
+ * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ *
+ * Called from the VF to configure the promiscuous mode of
+ * VF vsis and from the VF reset path to reset promiscuous mode.
+ **/
+static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
+{
+ i40e_status aq_ret = I40E_SUCCESS;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+ u16 num_vlans;
+ s16 *vl;
+
+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
+ return I40E_ERR_PARAM;
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
+ alluni, &vf->port_vlan_id, 1);
return aq_ret;
- }
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
- NULL, true);
- if (aq_ret) {
- int aq_err = pf->hw.aq.asq_last_status;
+ if (!vl)
+ return I40E_ERR_NO_MEMORY;
- dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
- vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+ vl, num_vlans);
+ kfree(vl);
+ return aq_ret;
}
+ /* no VLANs to set on, set on VSI */
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+ NULL, 0);
return aq_ret;
}
@@ -1352,10 +1470,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
return true;
- /* If the VFs have been disabled, this means something else is
- * resetting the VF, so we shouldn't continue.
- */
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ /* Bail out if VFs are disabled. */
+ if (test_bit(__I40E_VF_DISABLE, pf->state))
+ return true;
+
+ /* If VF is being reset already we don't need to continue. */
+ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1392,7 +1512,8 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
- clear_bit(__I40E_VF_DISABLE, pf->state);
+ usleep_range(20000, 40000);
+ clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
}
@@ -1425,8 +1546,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+ /* If VF is being reset no need to trigger reset again */
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1442,9 +1567,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
*/
while (v < pf->num_alloc_vfs) {
vf = &pf->vf[v];
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
- break;
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1472,6 +1599,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1483,6 +1614,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1492,10 +1627,16 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_cleanup_reset_vf(&pf->vf[v]);
+ }
i40e_flush(hw);
+ usleep_range(20000, 40000);
clear_bit(__I40E_VF_DISABLE, pf->state);
return true;
@@ -1874,6 +2015,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1973,6 +2133,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
@@ -1996,39 +2157,6 @@ err:
}
/**
- * i40e_vc_reset_vf_msg
- * @vf: pointer to the VF info
- *
- * called from the VF to reset itself,
- * unlike other virtchnl messages, PF driver
- * doesn't send the response back to the VF
- **/
-static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
-{
- if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
- i40e_reset_vf(vf, false);
-}
-
-/**
- * i40e_getnum_vf_vsi_vlan_filters
- * @vsi: pointer to the vsi
- *
- * called to get the number of VLANs offloaded on this VF
- **/
-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
-{
- struct i40e_mac_filter *f;
- int num_vlans = 0, bkt;
-
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
- num_vlans++;
- }
-
- return num_vlans;
-}
-
-/**
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2581,8 +2709,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
} else {
/* successful request */
vf->num_req_queues = req_pairs;
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return 0;
}
@@ -3231,16 +3358,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
bool found = false;
int bkt;
- if (!tc_filter->action) {
+ if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
dev_info(&pf->pdev->dev,
- "VF %d: Currently ADq doesn't support Drop Action\n",
- vf->vf_id);
+ "VF %d: ADQ doesn't support this action (%d)\n",
+ vf->vf_id, tc_filter->action);
goto err;
}
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
- tc_filter->action_meta > I40E_MAX_VF_VSI) {
+ tc_filter->action_meta > vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
@@ -3774,8 +3901,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
vf->adq_enabled = true;
/* reset the VF in order to allocate resources */
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return I40E_SUCCESS;
@@ -3815,8 +3941,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
}
/* reset the VF in order to allocate resources */
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return I40E_SUCCESS;
@@ -3878,7 +4003,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
- i40e_vc_reset_vf_msg(vf);
+ i40e_vc_reset_vf(vf, false);
ret = 0;
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
@@ -4132,7 +4257,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* Force the VF interface down so it has to bring up with new MAC
* address
*/
- i40e_vc_disable_vf(vf);
+ i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
@@ -4196,9 +4321,6 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
- i40e_vc_disable_vf(vf);
- /* During reset the VF got a new VSI, so refresh a pointer. */
- vsi = pf->vsi[vf->lan_vsi_idx];
/* Locked once because multiple functions below iterate list */
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -4284,6 +4406,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ i40e_vc_reset_vf(vf, true);
+ /* During reset the VF got a new VSI, so refresh a pointer. */
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
if (ret) {
dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
@@ -4579,7 +4705,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
vf->trusted = setting;
- i40e_vc_disable_vf(vf);
+ i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 7df3e5833c5d..75d916714ad8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -41,6 +41,7 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
+ I40E_VF_STATE_RESETTING
};
/* VF capabilities */
@@ -140,5 +141,8 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_IOV */
#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 81ca6472937d..83fb44f2332c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -86,6 +86,7 @@ struct iavf_vsi {
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
+#define IAVF_MBPS_QUANTA 50
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
(IAVF_MAX_VF_VSI * \
@@ -384,7 +385,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev);
void iavf_update_stats(struct iavf_adapter *adapter);
void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
+void iavf_irq_enable_queues(struct iavf_adapter *adapter);
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 9fa3fa99b4c2..897b349cdaf1 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 8547fc8fdfd6..78423ca401b2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -662,7 +662,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
/* Non Tunneled IPv6 */
IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
IAVF_PTT_UNUSED_ENTRY(91),
IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index e8850ba5604c..9cf556fedc70 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -244,21 +244,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
}
/**
- * iavf_irq_enable_queues - Enable interrupt for specified queues
+ * iavf_irq_enable_queues - Enable interrupt for all queues
* @adapter: board private structure
- * @mask: bitmap of queues to enable
**/
-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
+void iavf_irq_enable_queues(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
int i;
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i - 1)) {
- wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
- IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
- IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
- }
+ wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
+ IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
@@ -272,7 +269,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
struct iavf_hw *hw = &adapter->hw;
iavf_misc_irq_enable(adapter);
- iavf_irq_enable_queues(adapter, ~0);
+ iavf_irq_enable_queues(adapter);
if (flush)
iavf_flush(hw);
@@ -1392,19 +1389,16 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
static void iavf_free_q_vectors(struct iavf_adapter *adapter)
{
int q_idx, num_q_vectors;
- int napi_vectors;
if (!adapter->q_vectors)
return;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- napi_vectors = adapter->num_active_queues;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
- if (q_idx < napi_vectors)
- netif_napi_del(&q_vector->napi);
+ netif_napi_del(&q_vector->napi);
}
kfree(adapter->q_vectors);
adapter->q_vectors = NULL;
@@ -2581,6 +2575,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 total_max_rate = 0;
+ u32 tx_rate_rem = 0;
int i, num_qps = 0;
u64 tx_rate = 0;
int ret = 0;
@@ -2595,12 +2590,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
return -EINVAL;
if (mqprio_qopt->min_rate[i]) {
dev_err(&adapter->pdev->dev,
- "Invalid min tx rate (greater than 0) specified\n");
+ "Invalid min tx rate (greater than 0) specified for TC%d\n",
+ i);
return -EINVAL;
}
- /*convert to Mbps */
+
+ /* convert to Mbps */
tx_rate = div_u64(mqprio_qopt->max_rate[i],
IAVF_MBPS_DIVISOR);
+
+ if (mqprio_qopt->max_rate[i] &&
+ tx_rate < IAVF_MBPS_QUANTA) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, minimum %dMbps\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
+ (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
+
+ if (tx_rate_rem != 0) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, not divisible by %d\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h
index bf793332fc9d..a19e88898a0b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_register.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_register.h
@@ -40,7 +40,7 @@
#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
-#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index c6905d1b6182..7a1812912d6c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
+ /* underlying hardware might not allow access and/or always return
+ * 0 for the head/tail registers so just use the cached values
+ */
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -1058,7 +1061,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
@@ -1371,7 +1374,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
- if (!rx_buffer)
+ if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1531,7 +1534,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer)
+ if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 4d471a6f2946..7a17694b6a0b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -241,11 +241,14 @@ out:
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- struct virtchnl_queue_pair_info *vqpi;
+ int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
- int i, max_frame = IAVF_MAX_RXBUFFER;
+ struct virtchnl_queue_pair_info *vqpi;
size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+ max_frame = IAVF_MAX_RXBUFFER;
+
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index cc755382df25..50034ff26477 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1015,8 +1015,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 7d28563ab794..cf1baae963de 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3211,7 +3211,7 @@ static int __init ice_module_init(void)
pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
@@ -3492,15 +3492,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
{
int err;
- if (vsi->netdev) {
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_set_rx_mode(vsi->netdev);
- if (vsi->type != ICE_VSI_LB) {
- err = ice_vsi_vlan_setup(vsi);
-
- if (err)
- return err;
- }
+ err = ice_vsi_vlan_setup(vsi);
+ if (err)
+ return err;
}
ice_vsi_cfg_dcb_rings(vsi);
@@ -3557,7 +3554,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev) {
+ vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
@@ -3567,7 +3564,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
* set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats(vsi);
- ice_service_task_schedule(pf);
+
+ if (vsi->type == ICE_VSI_PF)
+ ice_service_task_schedule(pf);
return 0;
}
@@ -4595,6 +4594,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 7ff2e07f6d38..0c71995e1a70 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2627,7 +2627,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 79ee0a747260..4e69cb2c025f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -425,7 +425,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
- u8 bit_shift = 0;
+ u8 bit_shift = 1;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@@ -433,7 +433,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
- while (hash_mask >> bit_shift != 0xFF)
+ while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
bit_shift++;
/* The portion of the address that is used for the hash table
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ca54e268d157..e6d99759d95a 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -32,11 +32,11 @@ struct igb_adapter;
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
-#define IGB_MIN_TXD 80
+#define IGB_MIN_TXD 64
#define IGB_MAX_TXD 4096
#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
+#define IGB_MIN_RXD 64
#define IGB_MAX_RXD 4096
#define IGB_DEFAULT_ITR 3 /* dynamic */
@@ -594,6 +594,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f80933320fd3..de2c39436fe0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -814,6 +814,8 @@ static int igb_set_eeprom(struct net_device *netdev,
*/
ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
+ if (ret_val)
+ goto out;
}
/* Device's eeprom is always little-endian, word addressable */
@@ -833,6 +835,7 @@ static int igb_set_eeprom(struct net_device *netdev,
hw->nvm.ops.update(hw);
igb_set_fw_version(adapter);
+out:
kfree(eeprom_buff);
return ret_val;
}
@@ -1402,6 +1405,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
+ wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8);
+ wr32(E1000_EIMS, BIT(0));
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,
@@ -2992,11 +2997,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
if (err)
goto err_out_w_lock;
- igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ if (err)
+ goto err_out_input_filter;
spin_unlock(&adapter->nfc_lock);
return 0;
+err_out_input_filter:
+ igb_erase_filter(adapter, input);
err_out_w_lock:
spin_unlock(&adapter->nfc_lock);
err_out:
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8734dfd001bb..cceff1515ea1 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1213,8 +1213,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
if (!q_vector) {
q_vector = kzalloc(size, GFP_KERNEL);
} else if (size > ksize(q_vector)) {
- kfree_rcu(q_vector, rcu);
- q_vector = kzalloc(size, GFP_KERNEL);
+ struct igb_q_vector *new_q_vector;
+
+ new_q_vector = kzalloc(size, GFP_KERNEL);
+ if (new_q_vector)
+ kfree_rcu(q_vector, rcu);
+ q_vector = new_q_vector;
} else {
memset(q_vector, 0, size);
}
@@ -3491,6 +3495,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3503,12 +3508,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3704,8 +3710,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- /* Virtualization features not supported on i210 family. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ /* Virtualization features not supported on i210 and 82580 family. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
+ (hw->mac.type == e1000_82580))
return;
/* Of the below we really only want the effect of getting
@@ -3829,6 +3836,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -4543,6 +4553,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
+#if (PAGE_SIZE < 8192)
+ struct e1000_hw *hw = &adapter->hw;
+#endif
+
/* set build_skb and buffer size flags */
clear_ring_build_skb_enabled(rx_ring);
clear_ring_uses_large_buffer(rx_ring);
@@ -4553,10 +4567,9 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
set_ring_build_skb_enabled(rx_ring);
#if (PAGE_SIZE < 8192)
- if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
- return;
-
- set_ring_uses_large_buffer(rx_ring);
+ if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
+ rd32(E1000_RCTL) & E1000_RCTL_SBP)
+ set_ring_uses_large_buffer(rx_ring);
#endif
}
@@ -6466,77 +6479,75 @@ void igb_update_stats(struct igb_adapter *adapter)
}
}
+static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
+{
+ int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt);
+ struct e1000_hw *hw = &adapter->hw;
+ struct timespec64 ts;
+ u32 tsauxc;
+
+ if (pin < 0 || pin >= IGB_N_PEROUT)
+ return;
+
+ spin_lock(&adapter->tmreg_lock);
+ ts = timespec64_add(adapter->perout[pin].start,
+ adapter->perout[pin].period);
+ /* u32 conversion of tv_sec is safe until y2106 */
+ wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec);
+ wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsauxc |= TSAUXC_EN_TT0;
+ wr32(E1000_TSAUXC, tsauxc);
+ adapter->perout[pin].start = ts;
+ spin_unlock(&adapter->tmreg_lock);
+}
+
+static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+{
+ int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ptp_clock_event event;
+ u32 sec, nsec;
+
+ if (pin < 0 || pin >= IGB_N_EXTTS)
+ return;
+
+ nsec = rd32((tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0);
+ sec = rd32((tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = tsintr_tt;
+ event.timestamp = sec * 1000000000ULL + nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+}
+
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
- struct timespec64 ts;
- u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= TSINTR_SYS_WRAP;
}
if (tsicr & E1000_TSICR_TXTS) {
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
- ack |= E1000_TSICR_TXTS;
- }
-
- if (tsicr & TSINTR_TT0) {
- spin_lock(&adapter->tmreg_lock);
- ts = timespec64_add(adapter->perout[0].start,
- adapter->perout[0].period);
- /* u32 conversion of tv_sec is safe until y2106 */
- wr32(E1000_TRGTTIML0, ts.tv_nsec);
- wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
- tsauxc = rd32(E1000_TSAUXC);
- tsauxc |= TSAUXC_EN_TT0;
- wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[0].start = ts;
- spin_unlock(&adapter->tmreg_lock);
- ack |= TSINTR_TT0;
- }
-
- if (tsicr & TSINTR_TT1) {
- spin_lock(&adapter->tmreg_lock);
- ts = timespec64_add(adapter->perout[1].start,
- adapter->perout[1].period);
- wr32(E1000_TRGTTIML1, ts.tv_nsec);
- wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
- tsauxc = rd32(E1000_TSAUXC);
- tsauxc |= TSAUXC_EN_TT1;
- wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[1].start = ts;
- spin_unlock(&adapter->tmreg_lock);
- ack |= TSINTR_TT1;
- }
-
- if (tsicr & TSINTR_AUTT0) {
- nsec = rd32(E1000_AUXSTMPL0);
- sec = rd32(E1000_AUXSTMPH0);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
- event.timestamp = sec * 1000000000ULL + nsec;
- ptp_clock_event(adapter->ptp_clock, &event);
- ack |= TSINTR_AUTT0;
- }
-
- if (tsicr & TSINTR_AUTT1) {
- nsec = rd32(E1000_AUXSTMPL1);
- sec = rd32(E1000_AUXSTMPH1);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 1;
- event.timestamp = sec * 1000000000ULL + nsec;
- ptp_clock_event(adapter->ptp_clock, &event);
- ack |= TSINTR_AUTT1;
- }
-
- /* acknowledge the interrupts */
- wr32(E1000_TSICR, ack);
+ }
+
+ if (tsicr & TSINTR_TT0)
+ igb_perout(adapter, 0);
+
+ if (tsicr & TSINTR_TT1)
+ igb_perout(adapter, 1);
+
+ if (tsicr & TSINTR_AUTT0)
+ igb_extts(adapter, 0);
+
+ if (tsicr & TSINTR_AUTT1)
+ igb_extts(adapter, 1);
}
static irqreturn_t igb_msix_other(int irq, void *data)
@@ -7126,7 +7137,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- u32 reg, msgbuf[3];
+ u32 reg, msgbuf[3] = {};
u8 *addr = (u8 *)(&msgbuf[1]);
/* process all the same items cleared in a function level reset */
@@ -7569,8 +7580,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7584,6 +7597,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
@@ -9016,6 +9030,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+ if (state == pci_channel_io_normal) {
+ dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ }
+
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c39e921757ba..3c501c67bdbb 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1245,18 +1245,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
return;
}
- spin_lock_init(&adapter->tmreg_lock);
- INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
-
- if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
- INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
- igb_ptp_overflow_check);
-
- adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
- adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
-
- igb_ptp_reset(adapter);
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -1266,6 +1254,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
+
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+ igb_ptp_overflow_check);
+
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ igb_ptp_reset(adapter);
}
}
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index eee26a3be90b..52545cb25d05 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -39,11 +39,11 @@ enum latency_range {
/* Tx/Rx descriptor defines */
#define IGBVF_DEFAULT_TXD 256
#define IGBVF_MAX_TXD 4096
-#define IGBVF_MIN_TXD 80
+#define IGBVF_MIN_TXD 64
#define IGBVF_DEFAULT_RXD 256
#define IGBVF_MAX_RXD 4096
-#define IGBVF_MIN_RXD 80
+#define IGBVF_MIN_RXD 64
#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 1082e49ea056..5bf1c9d84727 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1070,7 +1070,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
- goto out;
+ goto free_irq_tx;
adapter->rx_ring->itr_register = E1000_EITR(vector);
adapter->rx_ring->itr_val = adapter->current_itr;
@@ -1079,10 +1079,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
igbvf_msix_other, 0, netdev->name, netdev);
if (err)
- goto out;
+ goto free_irq_rx;
igbvf_configure_msix(adapter);
return 0;
+free_irq_rx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
+free_irq_tx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
out:
return err;
}
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index b8ba3f94c363..a47a2e3e548c 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
+#include <linux/etherdevice.h>
+
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
- if (msgbuf[0] == (E1000_VF_RESET |
- E1000_VT_MSGTYPE_ACK))
+ switch (msgbuf[0]) {
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
- else
+ break;
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
+ eth_zero_addr(hw->mac.perm_addr);
+ break;
+ default:
ret_val = -E1000_ERR_MAC_INIT;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index aec998c82b69..a46eca3ffbcc 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -78,11 +78,11 @@ extern char igc_driver_version[];
/* TX/RX descriptor defines */
#define IGC_DEFAULT_TXD 256
#define IGC_DEFAULT_TX_WORK 128
-#define IGC_MIN_TXD 80
+#define IGC_MIN_TXD 64
#define IGC_MAX_TXD 4096
#define IGC_DEFAULT_RXD 256
-#define IGC_MIN_RXD 80
+#define IGC_MIN_RXD 64
#define IGC_MAX_RXD 4096
/* Transmit and receive queues */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index db289bcce21d..d66429eb14a5 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
igc_check_for_copper_link(hw);
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I225_I_PHY_ID:
- phy->type = igc_phy_i225;
- break;
- default:
- ret_val = -IGC_ERR_PHY;
- goto out;
- }
+ phy->type = igc_phy_i225;
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index cbcb8611ab50..a307b737dd0c 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1668,6 +1668,8 @@ static int igc_get_link_ksettings(struct net_device *netdev,
/* twisted pair */
cmd->base.port = PORT_TP;
cmd->base.phy_address = hw->phy.addr;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
/* advertising link modes */
if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
@@ -1766,7 +1768,7 @@ static int igc_set_link_ksettings(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- u32 advertising;
+ u16 advertised = 0;
/* When adapter in resetting mode, autoneg/speed/duplex
* cannot be changed
@@ -1792,18 +1794,33 @@ static int igc_set_link_ksettings(struct net_device *netdev,
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
- /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
- * We have to check this and convert it to ADVERTISE_2500_FULL
- * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
- */
- if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
- advertising |= ADVERTISE_2500_FULL;
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 2500baseT_Full))
+ advertised |= ADVERTISE_2500_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 1000baseT_Full))
+ advertised |= ADVERTISE_1000_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Full))
+ advertised |= ADVERTISE_100_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Half))
+ advertised |= ADVERTISE_100_HALF;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Full))
+ advertised |= ADVERTISE_10_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Half))
+ advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
- hw->phy.autoneg_advertised = advertising;
+ hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9ba05d9aa8e0..3839ca8bdf6d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -610,7 +610,6 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
/* disable the queue */
wr32(IGC_TXDCTL(reg_idx), 0);
wrfl();
- mdelay(10);
wr32(IGC_TDLEN(reg_idx),
ring->count * sizeof(union igc_adv_tx_desc));
@@ -2884,8 +2883,7 @@ bool igc_has_link(struct igc_adapter *adapter)
break;
}
- if (hw->mac.type == igc_i225 &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (hw->mac.type == igc_i225) {
if (!netif_carrier_ok(adapter->netdev)) {
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 6156c76d765f..1be112ce6774 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -235,8 +235,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
return ret_val;
}
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
@@ -376,8 +375,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID)
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index fa49ef2afde5..0142ca226bf5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -67,6 +67,8 @@
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Attempt to maximize the headroom available for incoming frames. We
* use a 2K buffer for receives and need 1536/1534 to store the data for
* the frame. This leaves us with 512 bytes of room. From that we need
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index eee277c1bedf..b1788ddffcb2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -123,14 +123,14 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
&list_offset,
&data_offset);
if (ret_val)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
break;
default:
break;
@@ -213,7 +213,7 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
break;
default:
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
return 0;
@@ -283,7 +283,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
/* Validate the water mark configuration */
if (!hw->fc.pause_time)
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
/* Low water mark of zero causes XOFF floods */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -292,7 +292,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
if (!hw->fc.low_water[i] ||
hw->fc.low_water[i] >= hw->fc.high_water[i]) {
hw_dbg(hw, "Invalid water mark configuration\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
}
}
@@ -369,7 +369,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Set 802.3x based flow control settings. */
@@ -438,7 +438,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autonegotiation did not complete.\n");
}
}
@@ -478,7 +478,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
hw_dbg(hw, "Link was indicated but link is down\n");
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
return 0;
@@ -594,7 +594,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
@@ -701,9 +701,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
/* Init PHY and function pointers, perform SFP setup */
phy_status = hw->phy.ops.init(hw);
- if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (phy_status == -EOPNOTSUPP)
return phy_status;
- if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (phy_status == -ENOENT)
goto mac_reset_top;
hw->phy.ops.reset(hw);
@@ -727,7 +727,7 @@ mac_reset_top:
udelay(1);
}
if (ctrl & IXGBE_CTRL_RST) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -789,7 +789,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
@@ -814,7 +814,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
@@ -845,7 +845,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
u32 vftabyte;
if (vlan > 4095)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Determine 32-bit word position in array */
regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
@@ -964,7 +964,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
gssr = IXGBE_GSSR_PHY0_SM;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
if (hw->phy.type == ixgbe_phy_nl) {
/*
@@ -993,7 +993,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
hw_dbg(hw, "EEPROM read did not pass.\n");
- status = IXGBE_ERR_SFP_NOT_PRESENT;
+ status = -ENOENT;
goto out;
}
@@ -1003,7 +1003,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
- status = IXGBE_ERR_PHY;
+ status = -EIO;
}
out:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 109f8de5a1c2..4817be80c01f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -117,7 +117,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
goto setup_sfp_err;
@@ -144,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
if (ret_val) {
hw_dbg(hw, " sfp module setup not complete\n");
- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ return -EIO;
}
}
@@ -159,7 +159,7 @@ setup_sfp_err:
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ return -EIO;
}
/**
@@ -184,7 +184,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
*locked = true;
}
@@ -219,7 +219,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
locked = true;
}
@@ -400,7 +400,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
break;
default:
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
if (hw->phy.multispeed_fiber) {
@@ -541,7 +541,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -794,7 +794,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
@@ -861,8 +861,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status =
- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -927,7 +926,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
/* Identify PHY and related function pointers */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Setup SFP module if there is one present. */
@@ -936,7 +935,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = false;
}
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Reset PHY */
@@ -974,7 +973,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -1093,7 +1092,7 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
udelay(10);
}
- return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+ return -EIO;
}
/**
@@ -1155,7 +1154,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
- return IXGBE_ERR_FDIR_REINIT_FAILED;
+ return -EIO;
}
/* Clear FDIR statistics registers (read to clear) */
@@ -1387,7 +1386,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on flow type input\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* configure FDIRCMD register */
@@ -1546,7 +1545,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on vm pool mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
@@ -1555,13 +1554,13 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
if (input_mask->formatted.dst_port ||
input_mask->formatted.src_port) {
hw_dbg(hw, " Error on src/dst port mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
case IXGBE_ATR_L4TYPE_MASK:
break;
default:
hw_dbg(hw, " Error on flow type mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
@@ -1582,7 +1581,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on VLAN mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
@@ -1594,7 +1593,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on flexible byte mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
@@ -1823,7 +1822,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
/* Return error if SFP module has been detected but is not supported */
if (hw->phy.type == ixgbe_phy_sfp_unsupported)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
return status;
}
@@ -1862,13 +1861,13 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Verifies that installed the firmware version is 0.6 or higher
* for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
*
- * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
- * if the FW version is not supported.
+ * Return: -EACCES if the FW is not present or if the FW version is
+ * not supported.
**/
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
+ s32 status = -EACCES;
u16 offset;
u16 fw_version = 0;
@@ -1882,7 +1881,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
goto fw_version_err;
if (fw_offset == 0 || fw_offset == 0xFFFF)
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
/* get the offset to the Pass Through Patch Configuration block */
offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
@@ -1890,7 +1889,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
goto fw_version_err;
if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
/* get the firmware version */
offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
@@ -1904,7 +1903,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
fw_version_err:
hw_err(hw, "eeprom read at offset %d failed\n", offset);
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
}
/**
@@ -2037,7 +2036,7 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
hw_dbg(hw, "auto negotiation not completed\n");
- ret_val = IXGBE_ERR_RESET_FAILED;
+ ret_val = -EIO;
goto reset_pipeline_out;
}
@@ -2086,7 +2085,7 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
goto release_i2c_access;
}
}
@@ -2140,7 +2139,7 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
goto release_i2c_access;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 39c5e6fdb72c..40c5baa191c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -30,7 +30,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -124,7 +124,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
*/
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
/*
@@ -215,7 +215,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
if (hw->mac.type != ixgbe_mac_X540) {
@@ -500,7 +500,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
if (pba_num == NULL) {
hw_dbg(hw, "PBA string buffer was null\n");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
@@ -526,7 +526,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
/* we will need 11 characters to store the PBA */
if (pba_num_size < 11) {
hw_dbg(hw, "PBA string buffer too small\n");
- return IXGBE_ERR_NO_SPACE;
+ return -ENOSPC;
}
/* extract hex string from data and pba_ptr */
@@ -563,13 +563,13 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
if (length == 0xFFFF || length == 0) {
hw_dbg(hw, "NVM PBA number section invalid length\n");
- return IXGBE_ERR_PBA_SECTION;
+ return -EIO;
}
/* check if pba_num buffer is big enough */
if (pba_num_size < (((u32)length * 2) - 1)) {
hw_dbg(hw, "PBA string buffer too small\n");
- return IXGBE_ERR_NO_SPACE;
+ return -ENOSPC;
}
/* trim pba length from start of string */
@@ -746,10 +746,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
usleep_range(1000, 2000);
/*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * Prevent the PCI-E bus from hanging by disabling PCI-E primary
* access and verify no pending requests
*/
- return ixgbe_disable_pcie_master(hw);
+ return ixgbe_disable_pcie_primary(hw);
}
/**
@@ -805,7 +805,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -826,7 +826,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn off the LED, set mode to OFF. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -904,11 +904,8 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset + words > hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || (offset + words > hw->eeprom.word_size))
+ return -EINVAL;
/*
* The EEPROM page size cannot be queried from the chip. We do lazy
@@ -962,7 +959,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
if (ixgbe_ready_eeprom(hw) != 0) {
ixgbe_release_eeprom(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
for (i = 0; i < words; i++) {
@@ -1028,7 +1025,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ return -EINVAL;
return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
}
@@ -1050,11 +1047,8 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset + words > hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || (offset + words > hw->eeprom.word_size))
+ return -EINVAL;
/*
* We cannot hold synchronization semaphores for too long
@@ -1099,7 +1093,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
if (ixgbe_ready_eeprom(hw) != 0) {
ixgbe_release_eeprom(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
for (i = 0; i < words; i++) {
@@ -1142,7 +1136,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ return -EINVAL;
return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
}
@@ -1165,11 +1159,8 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || offset >= hw->eeprom.word_size)
+ return -EINVAL;
for (i = 0; i < words; i++) {
eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1262,11 +1253,8 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || offset >= hw->eeprom.word_size)
+ return -EINVAL;
for (i = 0; i < words; i++) {
eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1328,7 +1316,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
}
udelay(5);
}
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -1344,7 +1332,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
u32 i;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
@@ -1366,7 +1354,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
hw_dbg(hw, "Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* Setup EEPROM for Read/Write */
@@ -1419,7 +1407,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SMBI) {
hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
}
@@ -1447,7 +1435,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
if (i >= timeout) {
hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
return 0;
@@ -1503,7 +1491,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
*/
if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
hw_dbg(hw, "SPI EEPROM Status error\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
return 0;
@@ -1715,7 +1703,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
if (hw->eeprom.ops.read(hw, i, &pointer)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* If the pointer seems invalid */
@@ -1724,7 +1712,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
if (hw->eeprom.ops.read(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
if (length == 0xFFFF || length == 0)
@@ -1733,7 +1721,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
for (j = pointer + 1; j <= pointer + length; j++) {
if (hw->eeprom.ops.read(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -1786,7 +1774,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* calculated checksum
*/
if (read_checksum != checksum)
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
/* If the user cares, return the calculated checksum */
if (checksum_val)
@@ -1845,7 +1833,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
/* setup VMDq pool selection before this RAR gets enabled */
@@ -1897,7 +1885,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
/*
@@ -2146,7 +2134,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
/* Validate the water mark configuration. */
if (!hw->fc.pause_time)
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
/* Low water mark of zero causes XOFF floods */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -2155,7 +2143,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
if (!hw->fc.low_water[i] ||
hw->fc.low_water[i] >= hw->fc.high_water[i]) {
hw_dbg(hw, "Invalid water mark configuration\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
}
}
@@ -2212,7 +2200,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Set 802.3x based flow control settings. */
@@ -2269,7 +2257,7 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EINVAL;
if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
@@ -2321,7 +2309,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
(!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2353,12 +2341,12 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
}
/*
* Read the 10g AN autoc and LP ability registers and resolve
@@ -2407,8 +2395,8 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
**/
void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
+ s32 ret_val = -EIO;
bool link_up;
/*
@@ -2506,15 +2494,15 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
}
/**
- * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * ixgbe_disable_pcie_primary - Disable PCI-express primary access
* @hw: pointer to hardware structure
*
- * Disables PCI-Express master access and verifies there are no pending
- * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- * bit hasn't caused the master requests to be disabled, else 0
- * is returned signifying master requests disabled.
+ * Disables PCI-Express primary access and verifies there are no pending
+ * requests. -EALREADY is returned if primary disable
+ * bit hasn't caused the primary requests to be disabled, else 0
+ * is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2523,23 +2511,23 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Poll for bit to read as set */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS)
break;
usleep_range(100, 120);
}
- if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) {
+ if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) {
hw_dbg(hw, "GIO disable did not set - requesting resets\n");
goto gio_disable_fail;
}
- /* Exit if master requests are blocked */
+ /* Exit if primary requests are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
ixgbe_removed(hw->hw_addr))
return 0;
- /* Poll for master request bit to clear */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ /* Poll for primary request bit to clear */
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
udelay(100);
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
return 0;
@@ -2547,13 +2535,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
/*
* Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
+ * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new primary requests from
* being issued by our device. We then must wait 1usec or more for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
- hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+ hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n");
gio_disable_fail:
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
@@ -2575,7 +2563,7 @@ gio_disable_fail:
}
hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- return IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ return -EALREADY;
}
/**
@@ -2600,7 +2588,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_eeprom_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
if (!(gssr & (fwmask | swmask))) {
@@ -2620,7 +2608,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
usleep_range(5000, 10000);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
/**
@@ -2757,7 +2745,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
s32 ret_val;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/*
* Link must be up to auto-blink the LEDs;
@@ -2803,7 +2791,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
s32 ret_val;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val)
@@ -2963,7 +2951,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
@@ -3014,7 +3002,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
if (vmdq < 32) {
@@ -3091,7 +3079,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
* will simply bypass the VLVF if there are no entries present in the
* VLVF that contain our VLAN
*/
- first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
+ first_empty_slot = vlvf_bypass ? -ENOSPC : 0;
/* add VLAN enable bit for comparison */
vlan |= IXGBE_VLVF_VIEN;
@@ -3115,7 +3103,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
if (!first_empty_slot)
hw_dbg(hw, "No space in VLVF.\n");
- return first_empty_slot ? : IXGBE_ERR_NO_SPACE;
+ return first_empty_slot ? : -ENOSPC;
}
/**
@@ -3135,7 +3123,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
s32 vlvf_index;
if ((vlan > 4095) || (vind > 63))
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/*
* this is a 2 part operation - first the VFTA, then the
@@ -3596,7 +3584,8 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
*
* Communicates with the manageability block. On success return 0
* else returns semaphore error when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * semaphore, -EINVAL when incorrect parameters passed or -EIO when
+ * command fails.
*
* This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
* by the caller.
@@ -3609,7 +3598,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EINVAL;
}
/* Set bit 9 of FWSTS clearing FW reset indication */
@@ -3620,13 +3609,13 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
dword_len = length >> 2;
@@ -3651,7 +3640,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
/* Check command successful completion. */
if ((timeout && i == timeout) ||
!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
return 0;
}
@@ -3671,7 +3660,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
* in these cases.
*
* Communicates with the manageability block. On success return 0
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ * else return -EIO or -EINVAL.
**/
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
@@ -3688,7 +3677,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EINVAL;
}
/* Take management host interface semaphore */
status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
@@ -3718,7 +3707,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
if (length < round_up(buf_len, 4) + hdr_size) {
hw_dbg(hw, "Buffer not large enough for reply message.\n");
- status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = -EIO;
goto rel_out;
}
@@ -3749,8 +3738,8 @@ rel_out:
*
* Sends driver version number to firmware through the manageability
* block. On success return 0
- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * else returns -EBUSY when encountering an error acquiring
+ * semaphore or -EIO when command fails.
**/
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, __always_unused u16 len,
@@ -3786,7 +3775,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
FW_CEM_RESP_STATUS_SUCCESS)
ret_val = 0;
else
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ ret_val = -EIO;
break;
}
@@ -3884,14 +3873,14 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
return status;
if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
if (status)
return status;
if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
return 0;
}
@@ -3914,7 +3903,7 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
/* Only support thermal sensors attached to physical port 0 */
if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
if (status)
@@ -3974,7 +3963,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
/* Only support thermal sensors attached to physical port 0 */
if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
if (status)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 7c52ae8ac005..89812b34a899 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2542,6 +2542,14 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
return 0;
}
+static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type < ixgbe_mac_X550)
+ return 16;
+ else
+ return 64;
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2550,7 +2558,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
+ cmd->data = min_t(int, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
@@ -2952,14 +2961,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
-{
- if (adapter->hw.mac.type < ixgbe_mac_X550)
- return 16;
- else
- return 64;
-}
-
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
return IXGBE_RSS_KEY_SIZE;
@@ -3008,8 +3009,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
- if (hfunc)
- return -EINVAL;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
/* Fill out the redirection table */
if (indir) {
@@ -3246,7 +3247,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ s32 status = -EFAULT;
u8 databyte = 0xFF;
int i = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f8aa1a0b89c5..3a188576f4c8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2755,7 +2755,6 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
- s32 rc;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
@@ -2789,14 +2788,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
}
/* Check if this is not due to overtemp */
- if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
+ if (!hw->phy.ops.check_overtemp(hw))
return;
break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
- rc = hw->phy.ops.check_overtemp(hw);
- if (rc != IXGBE_ERR_OVERTEMP)
+ if (!hw->phy.ops.check_overtemp(hw))
return;
break;
default:
@@ -2940,8 +2938,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -5519,7 +5517,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
u32 speed;
bool autoneg, link_up = false;
- int ret = IXGBE_ERR_LINK_SETUP;
+ int ret = -EIO;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5938,13 +5936,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
err = hw->mac.ops.init_hw(hw);
switch (err) {
case 0:
- case IXGBE_ERR_SFP_NOT_PRESENT:
- case IXGBE_ERR_SFP_NOT_SUPPORTED:
+ case -ENOENT:
+ case -EOPNOTSUPP:
break;
- case IXGBE_ERR_MASTER_REQUESTS_PENDING:
- e_dev_err("master disable timed out\n");
+ case -EALREADY:
+ e_dev_err("primary disable timed out\n");
break;
- case IXGBE_ERR_EEPROM_VERSION:
+ case -EACCES:
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
"Please be aware there may be issues associated with "
@@ -6722,6 +6720,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @adapter: device handle, pointer to adapter
+ */
+static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
+{
+ if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ return IXGBE_RXBUFFER_2K;
+ else
+ return IXGBE_RXBUFFER_3K;
+}
+
+/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -6732,18 +6742,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (adapter->xdp_prog) {
- int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (ixgbe_enabled_xdp_adapter(adapter)) {
+ int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
- if (new_frame_size > ixgbe_rx_bufsz(ring)) {
- e_warn(probe, "Requested MTU size is not supported with XDP\n");
- return -EINVAL;
- }
+ if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
+ e_warn(probe, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
}
}
@@ -7747,10 +7751,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
err = hw->phy.ops.identify_sfp(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (err == -EOPNOTSUPP)
goto sfp_out;
- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ if (err == -ENOENT) {
/* If no cable is present, then we need to reset
* the next time we find a good cable. */
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
@@ -7776,7 +7780,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
else
err = hw->mac.ops.setup_sfp(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (err == -EOPNOTSUPP)
goto sfp_out;
adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
@@ -7785,8 +7789,8 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
sfp_out:
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
- if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
- (adapter->netdev->reg_state == NETREG_REGISTERED)) {
+ if (err == -EOPNOTSUPP &&
+ adapter->netdev->reg_state == NETREG_REGISTERED) {
e_dev_err("failed to initialize because an unsupported "
"SFP+ module type was detected.\n");
e_dev_err("Reload the driver after installing a "
@@ -7856,7 +7860,7 @@ static void ixgbe_service_timer(struct timer_list *t)
static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 status;
+ bool overtemp;
if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
return;
@@ -7866,11 +7870,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
if (!hw->phy.ops.handle_lasi)
return;
- status = hw->phy.ops.handle_lasi(&adapter->hw);
- if (status != IXGBE_ERR_OVERTEMP)
- return;
-
- e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ hw->phy.ops.handle_lasi(&adapter->hw, &overtemp);
+ if (overtemp)
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
}
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@@ -8417,7 +8419,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
struct ixgbe_adapter *adapter = q_vector->adapter;
if (unlikely(skb_tail_pointer(skb) < hdr.network +
- VXLAN_HEADROOM))
+ vxlan_headroom(0)))
return;
/* verify the port is recognized as VXLAN */
@@ -10527,6 +10529,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
}
/**
+ * ixgbe_irq_disable_single - Disable single IRQ vector
+ * @adapter: adapter structure
+ * @ring: ring index
+ **/
+static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 qmask = BIT_ULL(ring);
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = qmask & IXGBE_EIMC_RTX_QUEUE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ synchronize_irq(adapter->msix_entries[ring].vector);
+ else
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
* @adapter: adapter structure
* @ring: ring index
@@ -10542,6 +10582,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
+ ixgbe_irq_disable_single(adapter, ring);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
ixgbe_disable_txr(adapter, tx_ring);
if (xdp_ring)
ixgbe_disable_txr(adapter, xdp_ring);
@@ -10550,9 +10595,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
if (xdp_ring)
synchronize_rcu();
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_disable(&rx_ring->q_vector->napi);
-
ixgbe_clean_tx_ring(tx_ring);
if (xdp_ring)
ixgbe_clean_tx_ring(xdp_ring);
@@ -10580,9 +10622,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_enable(&rx_ring->q_vector->napi);
-
ixgbe_configure_tx_ring(adapter, tx_ring);
if (xdp_ring)
ixgbe_configure_tx_ring(adapter, xdp_ring);
@@ -10591,6 +10630,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
if (xdp_ring)
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
@@ -10912,9 +10956,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = hw->mac.ops.reset_hw(hw);
hw->phy.reset_if_overtemp = false;
ixgbe_set_eee_capable(adapter);
- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ if (err == -ENOENT) {
err = 0;
- } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ } else if (err == -EOPNOTSUPP) {
e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
e_dev_err("Reload the driver after installing a supported module.\n");
goto err_sw_init;
@@ -11131,7 +11175,7 @@ skip_sriov:
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
- if (err == IXGBE_ERR_EEPROM_VERSION) {
+ if (err == -EACCES) {
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
"Please be aware there may be issues associated "
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 5679293e53f7..fe7ef5773369 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -24,7 +24,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
size = mbx->size;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->read(hw, msg, size, mbx_id);
}
@@ -43,10 +43,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (size > mbx->size)
- return IXGBE_ERR_MBX;
+ return -EINVAL;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->write(hw, msg, size, mbx_id);
}
@@ -63,7 +63,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_msg(hw, mbx_id);
}
@@ -80,7 +80,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_ack(hw, mbx_id);
}
@@ -97,7 +97,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_rst(hw, mbx_id);
}
@@ -115,12 +115,12 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
int countdown = mbx->timeout;
if (!countdown || !mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
while (mbx->ops->check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
- return IXGBE_ERR_MBX;
+ return -EIO;
udelay(mbx->usec_delay);
}
@@ -140,12 +140,12 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
int countdown = mbx->timeout;
if (!countdown || !mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
while (mbx->ops->check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
- return IXGBE_ERR_MBX;
+ return -EIO;
udelay(mbx->usec_delay);
}
@@ -169,7 +169,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
s32 ret_val;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
if (ret_val)
@@ -197,7 +197,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops || !mbx->timeout)
- return IXGBE_ERR_MBX;
+ return -EIO;
/* send msg */
ret_val = mbx->ops->write(hw, msg, size, mbx_id);
@@ -217,7 +217,7 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -238,7 +238,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -259,7 +259,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -295,7 +295,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -317,7 +317,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
return 0;
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a148534d7256..def067b15873 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -7,7 +7,6 @@
#include "ixgbe_type.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 2fb97967961c..33f6ee1d5be5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -102,7 +102,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
csum = ~csum;
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -150,7 +150,7 @@ fail:
hw_dbg(hw, "I2C byte read combined error.\n");
} while (retry < max_retry);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
/**
@@ -179,7 +179,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
csum = ~csum;
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -215,7 +215,7 @@ fail:
hw_dbg(hw, "I2C byte write combined error.\n");
} while (retry < max_retry);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
/**
@@ -262,8 +262,8 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
**/
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
+ u32 status = -EFAULT;
u32 phy_addr;
- u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
if (!hw->phy.phy_semaphore_mask) {
if (hw->bus.lan_id)
@@ -282,7 +282,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
if (ixgbe_probe_phy(hw, phy_addr))
return 0;
else
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
}
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -405,8 +405,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
return status;
/* Don't reset PHY if it's shut down due to overtemp. */
- if (!hw->phy.reset_if_overtemp &&
- (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw))
return 0;
/* Blocked by MNG FW so bail */
@@ -454,7 +453,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
if (ctrl & MDIO_CTRL1_RESET) {
hw_dbg(hw, "PHY reset polling failed to complete.\n");
- return IXGBE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -496,7 +495,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address command did not complete.\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Address cycle complete, setup and write the read
@@ -523,7 +522,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY read command didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Read operation is complete. Get the data
@@ -555,7 +554,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
} else {
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
return status;
@@ -600,7 +599,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address cmd didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/*
@@ -628,7 +627,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY write cmd didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -653,7 +652,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
} else {
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
return status;
@@ -851,9 +850,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
if (rp_pdev && rp_pdev->subordinate) {
bus = rp_pdev->subordinate->number;
+ pci_dev_put(rp_pdev);
return pci_get_domain_bus_and_slot(0, bus, 0);
}
+ pci_dev_put(rp_pdev);
return NULL;
}
@@ -870,6 +871,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct pci_dev *func0_pdev;
+ bool has_mii = false;
/* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
* are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
@@ -880,15 +882,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
if (func0_pdev) {
if (func0_pdev == pdev)
- return true;
- else
- return false;
+ has_mii = true;
+ goto out;
}
func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
if (func0_pdev == pdev)
- return true;
+ has_mii = true;
- return false;
+out:
+ pci_dev_put(func0_pdev);
+ return has_mii;
}
/**
@@ -1298,7 +1301,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if ((phy_data & MDIO_CTRL1_RESET) != 0) {
hw_dbg(hw, "PHY reset did not complete.\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Get init offsets */
@@ -1355,12 +1358,12 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
hw_dbg(hw, "SOL\n");
} else {
hw_dbg(hw, "Bad control value\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
break;
default:
hw_dbg(hw, "Bad control type\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
}
@@ -1368,7 +1371,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
err_eeprom:
hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/**
@@ -1386,10 +1389,10 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
return ixgbe_identify_qsfp_module_generic(hw);
default:
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1414,7 +1417,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/* LAN ID is needed for sfp_type determination */
@@ -1429,7 +1432,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_1GBE_COMP_CODES,
@@ -1620,7 +1623,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
/* Anything else 82598-based is supported */
@@ -1644,7 +1647,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
hw_dbg(hw, "SFP+ module not supported\n");
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
@@ -1654,7 +1657,7 @@ err_read_i2c_eeprom:
hw->phy.id = 0;
hw->phy.type = ixgbe_phy_unknown;
}
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1681,7 +1684,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/* LAN ID is needed for sfp_type determination */
@@ -1695,7 +1698,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
hw->phy.id = identifier;
@@ -1763,7 +1766,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
} else {
/* unsupported module type */
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
}
@@ -1823,7 +1826,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
}
hw_dbg(hw, "QSFP module not supported\n");
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
}
@@ -1834,7 +1837,7 @@ err_read_i2c_eeprom:
hw->phy.id = 0;
hw->phy.type = ixgbe_phy_unknown;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1854,14 +1857,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 sfp_type = hw->phy.sfp_type;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
(hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/*
* Limiting active cables and 1G Phys must be initialized as
@@ -1882,11 +1885,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
hw_err(hw, "eeprom read at %d failed\n",
IXGBE_PHY_INIT_OFFSET_NL);
- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ return -EIO;
}
if ((!*list_offset) || (*list_offset == 0xFFFF))
- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ return -EIO;
/* Shift offset to first ID word */
(*list_offset)++;
@@ -1905,7 +1908,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
goto err_phy;
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
hw_dbg(hw, "SFP+ module not supported\n");
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
} else {
break;
}
@@ -1918,14 +1921,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_id == IXGBE_PHY_INIT_END_NL) {
hw_dbg(hw, "No matching SFP+ module found\n");
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
err_phy:
hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/**
@@ -2020,7 +2023,7 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
@@ -2136,7 +2139,7 @@ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u32 swfw_mask = hw->phy.phy_semaphore_mask;
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
do {
ixgbe_i2c_start(hw);
@@ -2378,7 +2381,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
if (ack == 1) {
hw_dbg(hw, "I2C ack was not received.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
}
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -2450,7 +2453,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
udelay(IXGBE_I2C_T_LOW);
} else {
hw_dbg(hw, "I2C data was not set to %X\n", data);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
return 0;
@@ -2546,7 +2549,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
return 0;
@@ -2616,22 +2619,24 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Checks if the LASI temp alarm status was triggered due to overtemp
+ *
+ * Return true when an overtemp event detected, otherwise false.
**/
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
{
u16 phy_data = 0;
+ u32 status;
if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
- return 0;
+ return false;
/* Check that the LASI temp alarm status was triggered */
- hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
- MDIO_MMD_PMAPMD, &phy_data);
-
- if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
- return 0;
+ status = hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ MDIO_MMD_PMAPMD, &phy_data);
+ if (status)
+ return false;
- return IXGBE_ERR_OVERTEMP;
+ return !!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM);
}
/** ixgbe_set_copper_phy_power - Control power for copper phy
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 6544c4539c0d..ef72729d7c93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -155,7 +155,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 0be13a90ff79..f5e36417c33e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -989,6 +989,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ u32 aflags = adapter->flags;
bool is_l2 = false;
u32 regval;
@@ -1009,20 +1010,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -1036,8 +1037,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -1048,7 +1049,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
if (hw->mac.type >= ixgbe_mac_X550) {
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
/* fall through */
@@ -1059,8 +1060,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -1092,8 +1091,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_TSYNCRXCTL_TYPE_ALL |
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
is_l2 = true;
break;
default:
@@ -1126,6 +1125,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_WRITE_FLUSH(hw);
+ /* configure adapter flags only when HW is actually configured */
+ adapter->flags = aflags;
+
/* clear TX/RX time stamp registers, just to be sure */
ixgbe_ptp_clear_tx_timestamp(adapter);
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
@@ -1211,7 +1213,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1246,18 +1247,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1290,6 +1279,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ };
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1315,6 +1348,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 0e73e3b1af19..843a13f219d7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
struct vf_macvlans *mv_list;
int num_vf_macvlans, i;
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&adapter->vf_mvs.l);
+
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
if (!num_vf_macvlans)
@@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
GFP_KERNEL);
if (mv_list) {
- /* Initialize list of VF macvlans */
- INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
mv_list[i].vf = -1;
mv_list[i].free = true;
@@ -1278,7 +1279,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
- retval = IXGBE_ERR_MBX;
+ retval = -EIO;
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2be1c4c72435..e84dbf6a3cb8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1247,7 +1247,7 @@ struct ixgbe_nvm_version {
#define IXGBE_PSRTYPE_RQPL_SHIFT 29
/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Primary Disable bit */
#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
@@ -1810,7 +1810,7 @@ enum {
/* STATUS Bit Masks */
#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Primary Enable Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
@@ -2192,8 +2192,8 @@ enum {
#define IXGBE_PCIDEVCTRL2_4_8s 0xd
#define IXGBE_PCIDEVCTRL2_17_34s 0xe
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+/* Number of 100 microseconds we wait for PCI Express primary disable */
+#define IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -3505,10 +3505,10 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
- s32 (*check_overtemp)(struct ixgbe_hw *);
+ bool (*check_overtemp)(struct ixgbe_hw *);
s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
s32 (*enter_lplu)(struct ixgbe_hw *);
- s32 (*handle_lasi)(struct ixgbe_hw *hw);
+ s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *);
s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 *value);
s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
@@ -3661,45 +3661,6 @@ struct ixgbe_info {
const u32 *mvals;
};
-
-/* Error Codes */
-#define IXGBE_ERR_EEPROM -1
-#define IXGBE_ERR_EEPROM_CHECKSUM -2
-#define IXGBE_ERR_PHY -3
-#define IXGBE_ERR_CONFIG -4
-#define IXGBE_ERR_PARAM -5
-#define IXGBE_ERR_MAC_TYPE -6
-#define IXGBE_ERR_UNKNOWN_PHY -7
-#define IXGBE_ERR_LINK_SETUP -8
-#define IXGBE_ERR_ADAPTER_STOPPED -9
-#define IXGBE_ERR_INVALID_MAC_ADDR -10
-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
-#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
-#define IXGBE_ERR_RESET_FAILED -15
-#define IXGBE_ERR_SWFW_SYNC -16
-#define IXGBE_ERR_PHY_ADDR_INVALID -17
-#define IXGBE_ERR_I2C -18
-#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
-#define IXGBE_ERR_SFP_NOT_PRESENT -20
-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
-#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
-#define IXGBE_ERR_FDIR_REINIT_FAILED -23
-#define IXGBE_ERR_EEPROM_VERSION -24
-#define IXGBE_ERR_NO_SPACE -25
-#define IXGBE_ERR_OVERTEMP -26
-#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
-#define IXGBE_ERR_FC_NOT_SUPPORTED -28
-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
-#define IXGBE_ERR_PBA_SECTION -31
-#define IXGBE_ERR_INVALID_ARGUMENT -32
-#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
-#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
-#define IXGBE_ERR_FW_RESP_INVALID -39
-#define IXGBE_ERR_TOKEN_RETRY -40
-#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
#define IXGBE_FUSES0_REV_MASK (3u << 6)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index de563cfd294d..fb4ced963c88 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -84,7 +84,7 @@ mac_reset_top:
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status) {
hw_dbg(hw, "semaphore failed with %d", status);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ctrl = IXGBE_CTRL_RST;
@@ -103,7 +103,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
msleep(100);
@@ -220,7 +220,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_read_eerd_generic(hw, offset, data);
@@ -243,7 +243,7 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
@@ -264,7 +264,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_eewr_generic(hw, offset, data);
@@ -287,7 +287,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data);
@@ -324,7 +324,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
for (i = 0; i < checksum_last_word; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -349,8 +349,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
- break;
+ return -EIO;
}
/* Skip pointer section if length is invalid. */
@@ -361,7 +360,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
for (j = pointer + 1; j <= pointer + length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -398,7 +397,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
@@ -419,7 +418,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
*/
if (read_checksum != checksum) {
hw_dbg(hw, "Invalid EEPROM checksum");
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
}
/* If the user cares, return the calculated checksum */
@@ -456,7 +455,7 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
@@ -491,7 +490,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
s32 status;
status = ixgbe_poll_flash_update_done_X540(hw);
- if (status == IXGBE_ERR_EEPROM) {
+ if (status == -EIO) {
hw_dbg(hw, "Flash update time out\n");
return status;
}
@@ -541,7 +540,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
return 0;
udelay(5);
}
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -576,7 +575,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
@@ -600,7 +599,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* bits in the SW_FW_SYNC register.
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (swfw_sync & (fwmask | hwmask)) {
swfw_sync |= swmask;
@@ -623,11 +622,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
rmask |= IXGBE_GSSR_I2C_MASK;
ixgbe_release_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
/**
@@ -681,7 +680,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
if (i == timeout) {
hw_dbg(hw,
"Software semaphore SMBI between device drivers not granted.\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* Now get the semaphore between SW/FW through the REGSMP bit */
@@ -698,7 +697,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
*/
hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -769,7 +768,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
bool link_up;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
@@ -805,7 +804,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
u32 ledctl_reg;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Restore the LED to its default value. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 74728c0a44a8..48b95f0bca4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -206,13 +206,13 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
}
if (retry == IXGBE_CS4227_RETRIES) {
hw_err(hw, "CS4227 reset did not complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
hw_err(hw, "CS4227 EEPROM did not load successfully\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -350,13 +350,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
}
static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
}
/**
@@ -463,7 +463,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
--retries;
} while (retries > 0);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
}
static const struct {
@@ -511,7 +511,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
hw->phy.autoneg_advertised = hw->phy.speeds_supported;
hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
@@ -568,7 +568,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
switch (hw->fc.requested_mode) {
@@ -600,8 +600,10 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
if (rc)
return rc;
+
if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
- return IXGBE_ERR_OVERTEMP;
+ return -EIO;
+
return 0;
}
@@ -675,7 +677,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
*ctrl = command;
if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
hw_dbg(hw, "IOSF wait timed out\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -715,7 +717,8 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
hw_dbg(hw, "Failed to read, error %x\n", error);
- return IXGBE_ERR_PHY;
+ ret = -EIO;
+ goto out;
}
if (!ret)
@@ -750,9 +753,9 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return 0;
if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
- return IXGBE_ERR_FW_RESP_INVALID;
+ return -EIO;
- return IXGBE_ERR_TOKEN_RETRY;
+ return -EAGAIN;
}
/**
@@ -778,7 +781,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
return status;
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return 0;
- return IXGBE_ERR_FW_RESP_INVALID;
+ return -EIO;
}
/**
@@ -942,7 +945,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
local_buffer = buf;
} else {
if (buffer_size < ptr)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
local_buffer = &buffer[ptr];
}
@@ -960,7 +963,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
}
if (buffer && ((u32)start + (u32)length > buffer_size))
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
for (i = start; length; i++, length--) {
if (i == bufsz && !buffer) {
@@ -1012,7 +1015,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
local_buffer = eeprom_ptrs;
} else {
if (buffer_size < IXGBE_EEPROM_LAST_WORD)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
local_buffer = buffer;
}
@@ -1148,7 +1151,7 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
* calculated checksum
*/
if (read_checksum != checksum) {
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
hw_dbg(hw, "Invalid EEPROM checksum");
}
@@ -1203,7 +1206,7 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
} else {
hw_dbg(hw, "write ee hostif failed to get semaphore");
- status = IXGBE_ERR_SWFW_SYNC;
+ status = -EBUSY;
}
return status;
@@ -1415,7 +1418,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
hw_dbg(hw, "Failed to write, error %x\n", error);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
out:
@@ -1558,7 +1561,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
/* iXFI is only supported with X552 */
if (mac->type != ixgbe_mac_X550EM_x)
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
/* Disable AN and force speed to 10G Serial. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
@@ -1580,7 +1583,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
break;
default:
/* Other link speeds are not supported by internal KR PHY. */
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
}
status = ixgbe_write_iosf_sb_reg_x550(hw,
@@ -1611,7 +1614,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
{
switch (hw->phy.sfp_type) {
case ixgbe_sfp_type_not_present:
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
*linear = true;
@@ -1630,7 +1633,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
default:
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
@@ -1660,7 +1663,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
* there is no reason to configure CS4227 and SFP not present error is
* not accepted in the setup MAC link flow.
*/
- if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status == -ENOENT)
return 0;
if (status)
@@ -1718,7 +1721,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
break;
default:
/* Other link speeds are not supported by internal PHY. */
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
}
status = mac->ops.write_iosf_sb_reg(hw,
@@ -1753,7 +1756,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* If no SFP module present, then return success. Return success since
* SFP not present error is not excepted in the setup MAC link flow.
*/
- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (ret_val == -ENOENT)
return 0;
if (ret_val)
@@ -1803,7 +1806,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* If no SFP module present, then return success. Return success since
* SFP not present error is not excepted in the setup MAC link flow.
*/
- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (ret_val == -ENOENT)
return 0;
if (ret_val)
@@ -1813,7 +1816,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ixgbe_setup_kr_speed_x550em(hw, speed);
if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
/* Get external PHY SKU id */
ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
@@ -1912,7 +1915,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
u16 i, autoneg_status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
- return IXGBE_ERR_CONFIG;
+ return -EIO;
status = ixgbe_check_mac_link_generic(hw, speed, link_up,
link_up_wait_to_complete);
@@ -2095,9 +2098,9 @@ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*/
static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
+ s32 status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -2115,7 +2118,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
/* Check if auto-negotiation has completed */
status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
- status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ status = -EIO;
goto out;
}
@@ -2319,18 +2322,18 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* @hw: pointer to hardware structure
* @lsc: pointer to boolean flag which indicates whether external Base T
* PHY interrupt is lsc
+ * @is_overtemp: indicate whether an overtemp event encountered
*
* Determime if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
- *
- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
- * failure alarm, else return PHY access status.
**/
-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
+ bool *is_overtemp)
{
u32 status;
u16 reg;
+ *is_overtemp = false;
*lsc = false;
/* Vendor alarm triggered */
@@ -2362,7 +2365,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
/* power down the PHY in case the PHY FW didn't already */
ixgbe_set_copper_phy_power(hw, false);
- return IXGBE_ERR_OVERTEMP;
+ *is_overtemp = true;
+ return -EIO;
}
if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
/* device fault alarm triggered */
@@ -2376,7 +2380,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
/* power down the PHY in case the PHY FW didn't */
ixgbe_set_copper_phy_power(hw, false);
- return IXGBE_ERR_OVERTEMP;
+ *is_overtemp = true;
+ return -EIO;
}
}
@@ -2412,12 +2417,12 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
**/
static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
+ bool lsc, overtemp;
u32 status;
u16 reg;
- bool lsc;
/* Clear interrupt flags */
- status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, &overtemp);
/* Enable link status change alarm */
@@ -2496,21 +2501,20 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
* @hw: pointer to hardware structure
+ * @is_overtemp: indicate whether an overtemp event encountered
*
* Handle external Base T PHY interrupt. If high temperature
* failure alarm then return error, else if link status change
* then setup internal/external PHY link
- *
- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
- * failure alarm, else return PHY access status.
**/
-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
+ bool *is_overtemp)
{
struct ixgbe_phy_info *phy = &hw->phy;
bool lsc;
u32 status;
- status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, is_overtemp);
if (status)
return status;
@@ -2642,7 +2646,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
u16 speed;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
- return IXGBE_ERR_CONFIG;
+ return -EIO;
if (!(hw->mac.type == ixgbe_mac_X550EM_x &&
!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) {
@@ -2685,7 +2689,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
break;
default:
/* Internal PHY does not support anything else */
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
return ixgbe_setup_ixfi_x550em(hw, &force_speed);
@@ -2717,7 +2721,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
u16 phy_data;
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
@@ -2739,7 +2743,7 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
u16 phy_data;
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
@@ -2763,8 +2767,9 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
*
* Sends driver version number to firmware through the manageability
* block. On success return 0
- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * else returns -EBUSY when encountering an error acquiring
+ * semaphore, -EIO when command fails or -ENIVAL when incorrect
+ * params passed.
**/
static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, u16 len,
@@ -2775,7 +2780,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
int i;
if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
@@ -2800,7 +2805,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
if (fw_cmd.hdr.cmd_or_resp.ret_status !=
FW_CEM_RESP_STATUS_SUCCESS)
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
return 0;
}
@@ -2857,7 +2862,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
/* 10gig parts do not have a word in the EEPROM to determine the
@@ -2892,7 +2897,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
break;
default:
hw_err(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (hw->device_id) {
@@ -2936,8 +2941,8 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
{
u32 link_s1, lp_an_page_low, an_cntl_1;
- s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
+ s32 status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -2963,7 +2968,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
hw_dbg(hw, "Auto-Negotiation did not complete\n");
- status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ status = -EIO;
goto out;
}
@@ -3137,21 +3142,23 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
/**
* ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
* @hw: pointer to hardware structure
+ *
+ * Return true when an overtemp event detected, otherwise false.
*/
-static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
s32 rc;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
- return rc;
+ return false;
if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
ixgbe_shutdown_fw_phy(hw);
- return IXGBE_ERR_OVERTEMP;
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -3201,8 +3208,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
- ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
+ if (ret_val == -EOPNOTSUPP || ret_val == -EFAULT)
return ret_val;
/* Setup function pointers based on detected hardware */
@@ -3410,8 +3416,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
- status == IXGBE_ERR_PHY_ADDR_INVALID)
+ if (status == -EOPNOTSUPP || status == -EFAULT)
return status;
/* start the external PHY */
@@ -3427,7 +3432,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = false;
}
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Reset PHY */
@@ -3451,7 +3456,7 @@ mac_reset_top:
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status) {
hw_dbg(hw, "semaphore failed with %d", status);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
@@ -3469,7 +3474,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -3565,7 +3570,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
if (hw->fc.requested_mode == ixgbe_fc_default)
@@ -3622,7 +3627,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
break;
default:
hw_err(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
status = hw->mac.ops.write_iosf_sb_reg(hw,
@@ -3718,7 +3723,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
return 0;
if (hmask)
ixgbe_release_swfw_sync_X540(hw, hmask);
- if (status != IXGBE_ERR_TOKEN_RETRY)
+ if (status != -EAGAIN)
return status;
msleep(FW_PHY_TOKEN_DELAY);
}
@@ -3762,7 +3767,7 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
@@ -3788,7 +3793,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
s32 status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
hw->mac.ops.release_swfw_sync(hw, mask);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6e73ffe6f928..12e8b0f957d3 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -466,7 +466,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
- dev_kfree_skb_any(skb);
netdev_err(dev, "tx ring full\n");
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 82ea55ae5053..10e5c4c59657 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2476,6 +2476,7 @@ out_free:
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
out:
+ napi_disable(&mp->napi);
free_irq(dev->irq, dev);
return err;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 64aa5510e61a..110221a16bf6 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -549,6 +549,20 @@ struct mvneta_rx_desc {
};
#endif
+enum mvneta_tx_buf_type {
+ MVNETA_TYPE_SKB,
+ MVNETA_TYPE_XDP_TX,
+ MVNETA_TYPE_XDP_NDO,
+};
+
+struct mvneta_tx_buf {
+ enum mvneta_tx_buf_type type;
+ union {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ };
+};
+
struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
u8 id;
@@ -564,8 +578,8 @@ struct mvneta_tx_queue {
int tx_stop_threshold;
int tx_wake_threshold;
- /* Array of transmitted skb */
- struct sk_buff **tx_skb;
+ /* Array of transmitted buffers */
+ struct mvneta_tx_buf *buf;
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -1408,7 +1422,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
*/
if (txq_number == 1)
txq_map = (cpu == pp->rxq_def) ?
- MVNETA_CPU_TXQ_ACCESS(1) : 0;
+ MVNETA_CPU_TXQ_ACCESS(0) : 0;
} else {
txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
@@ -1774,14 +1788,9 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
int i;
for (i = 0; i < num; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
txq->txq_get_index;
- struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
-
- if (skb) {
- bytes_compl += skb->len;
- pkts_compl++;
- }
mvneta_txq_inc_get(txq);
@@ -1789,9 +1798,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE);
- if (!skb)
+ if (!buf->skb)
continue;
- dev_kfree_skb_any(skb);
+
+ bytes_compl += buf->skb->len;
+ pkts_compl++;
+ dev_kfree_skb_any(buf->skb);
}
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@@ -2242,16 +2254,19 @@ static inline void
mvneta_tso_put_hdr(struct sk_buff *skb,
struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
- struct mvneta_tx_desc *tx_desc;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ struct mvneta_tx_desc *tx_desc;
- txq->tx_skb[txq->txq_put_index] = NULL;
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
tx_desc->command = mvneta_skb_tx_csum(pp, skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
+
mvneta_txq_inc_put(txq);
}
@@ -2260,6 +2275,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
struct sk_buff *skb, char *data, int size,
bool last_tcp, bool is_last)
{
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2273,7 +2289,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
}
tx_desc->command = 0;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
if (last_tcp) {
/* last descriptor in the TCP packet */
@@ -2281,7 +2298,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
/* last descriptor in SKB */
if (is_last)
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
}
mvneta_txq_inc_put(txq);
return 0;
@@ -2366,6 +2383,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
int i, nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nr_frags; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = skb_frag_address(frag);
@@ -2385,12 +2403,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->skb = NULL;
}
+ buf->type = MVNETA_TYPE_SKB;
mvneta_txq_inc_put(txq);
}
@@ -2418,6 +2437,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
int len = skb->len;
int frags = 0;
@@ -2450,16 +2470,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ buf->type = MVNETA_TYPE_SKB;
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVNETA_TXD_FLZ_DESC;
tx_desc->command = tx_cmd;
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
mvneta_txq_inc_put(txq);
} else {
/* First but not Last */
tx_cmd |= MVNETA_TXD_F_DESC;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->skb = NULL;
mvneta_txq_inc_put(txq);
tx_desc->command = tx_cmd;
/* Continue with other skb fragments */
@@ -3005,9 +3026,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->last_desc = txq->size - 1;
- txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
- GFP_KERNEL);
- if (!txq->tx_skb) {
+ txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
+ if (!txq->buf) {
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -3019,7 +3039,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
if (!txq->tso_hdrs) {
- kfree(txq->tx_skb);
+ kfree(txq->buf);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -3074,7 +3094,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
- kfree(txq->tx_skb);
+ kfree(txq->buf);
if (txq->tso_hdrs)
dma_free_coherent(pp->dev->dev.parent,
@@ -3717,7 +3737,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
/* Use the cpu associated to the rxq when it is online, in all
* the other cases, use the cpu 0 which can't be offline.
*/
- if (cpu_online(pp->rxq_def))
+ if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
elected_cpu = pp->rxq_def;
max_cpu = num_present_cpus();
@@ -3742,7 +3762,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
*/
if (txq_number == 1)
txq_map = (cpu == elected_cpu) ?
- MVNETA_CPU_TXQ_ACCESS(1) : 0;
+ MVNETA_CPU_TXQ_ACCESS(0) : 0;
else
txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 543a310ec102..cf45b9210c15 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1202,5 +1202,6 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp2_dbgfs_exit(void);
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..75e83ea2a926 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
return 0;
}
+static struct dentry *mvpp2_root;
+
+void mvpp2_dbgfs_exit(void)
+{
+ debugfs_remove(mvpp2_root);
+}
+
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
@@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index d700f1b5a4bf..7a2293a5bcc9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4233,6 +4233,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (loc == info->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
+
if (port->rfs_rules[i])
rules[loc++] = i;
}
@@ -6004,7 +6009,18 @@ static struct platform_driver mvpp2_driver = {
},
};
-module_platform_driver(mvpp2_driver);
+static int __init mvpp2_driver_init(void)
+{
+ return platform_driver_register(&mvpp2_driver);
+}
+module_init(mvpp2_driver_init);
+
+static void __exit mvpp2_driver_exit(void)
+{
+ platform_driver_unregister(&mvpp2_driver);
+ mvpp2_dbgfs_exit();
+}
+module_exit(mvpp2_driver_exit);
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 54e9f6dc24ea..910ed148e827 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -582,7 +582,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
/* Release thread waiting for completion */
lmac->cmd_pend = false;
- wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ wake_up(&lmac->wq_cmd_cmplt);
break;
case CGX_EVT_ASYNC:
if (cgx_event_is_linkevent(event))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 02b4620f7368..9c6307186505 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1708,10 +1708,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
}
}
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
{
struct rvu *rvu = (struct rvu *)rvu_irq;
- int vfs = rvu->vfs;
u64 intr;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
@@ -1723,6 +1722,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
/* Handle VF interrupts */
if (vfs > 64) {
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
@@ -2035,7 +2046,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Register mailbox interrupt handler */
sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_intr_handler, 0,
+ rvu_mbox_pf_intr_handler, 0,
&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
if (ret) {
dev_err(rvu->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4a7609fd6dd0..5bc54ba68c83 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2430,9 +2430,10 @@ rx_frscfg:
if (link < 0)
return NIX_AF_ERR_RX_LINK_INVALID;
- nix_find_link_frs(rvu, req, pcifunc);
linkcfg:
+ nix_find_link_frs(rvu, req, pcifunc);
+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
if (req->update_minlen)
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index b02b6523083c..99451585a45f 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2201,7 +2201,7 @@ struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
DEFINE_DMA_UNMAP_LEN(data_size);
- dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
+ dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1];
};
enum flow_control {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f9139150a8a2..7b9f5eba78dc 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2008,6 +2008,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 427e7a31862c..d7f2890c254f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev,
err = mlx4_bitmap_init(*bitmap + k, 1,
MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
0);
- mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
+ if (!err)
+ mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
}
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6c7b364d0bf0..93a6597366f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1417,8 +1417,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
return -EFAULT;
err = sscanf(outlen_str, "%d", &outlen);
- if (err < 0)
- return err;
+ if (err != 1)
+ return -EINVAL;
ptr = kzalloc(outlen, GFP_KERNEL);
if (!ptr)
@@ -1682,12 +1682,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++)
- while (down_trylock(&cmd->sem))
+ for (i = 0; i < cmd->max_reg_cmds; i++) {
+ while (down_trylock(&cmd->sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
+ }
- while (down_trylock(&cmd->pages_sem))
+ while (down_trylock(&cmd->pages_sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
/* Unlock cmdif */
up(&cmd->pages_sem);
@@ -1850,7 +1855,7 @@ void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
ctx->dev = dev;
/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
atomic_set(&ctx->num_inflight, 1);
- init_waitqueue_head(&ctx->wait);
+ init_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
@@ -1864,8 +1869,8 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
*/
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
{
- atomic_dec(&ctx->num_inflight);
- wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
+ if (!atomic_dec_and_test(&ctx->num_inflight))
+ wait_for_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
@@ -1876,7 +1881,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
- wake_up(&ctx->wait);
+ complete(&ctx->inflight_done);
}
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
@@ -1892,7 +1897,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
if (ret && atomic_dec_and_test(&ctx->num_inflight))
- wake_up(&ctx->wait);
+ complete(&ctx->inflight_done);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d63ce3feb65c..6e763699d504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -55,7 +55,7 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
- err = devlink_info_driver_name_put(req, DRIVER_NAME);
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index f8144ce7e476..53775f3cdaf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -480,7 +480,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
(u64)timestamp_low;
break;
default:
- if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
+ if (tracer_event->event_id >= tracer->str_db.first_string_trace &&
tracer_event->event_id <= tracer->str_db.first_string_trace +
tracer->str_db.num_string_trace) {
tracer_event->type = TRACER_EVENT_TYPE_STRING;
@@ -600,7 +600,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
} else {
cur_string = mlx5_tracer_message_get(tracer, tracer_event);
if (!cur_string) {
- pr_debug("%s Got string event for unknown string tdsm: %d\n",
+ pr_debug("%s Got string event for unknown string tmsn: %d\n",
__func__, tracer_event->string_event.tmsn);
return -1;
}
@@ -636,7 +636,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
else
- trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+ trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
@@ -687,8 +687,8 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
while (block_timestamp > tracer->last_timestamp) {
- /* Check block override if its not the first block */
- if (!tracer->last_timestamp) {
+ /* Check block override if it's not the first block */
+ if (tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
* wraparound, the time stamp of the previous block
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b5c8afe8cd10..3209decdcff0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -101,7 +101,7 @@ struct page_pool;
#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS \
- ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index c467f5e981f6..70087f2542b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -117,7 +117,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
struct xfrm_replay_state_esn *replay_esn;
u32 seq_bottom;
u8 overlap;
- u32 *esn;
if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
sa_entry->esn_state.trigger = 0;
@@ -130,11 +129,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
htonl(seq_bottom));
- esn = &sa_entry->esn_state.esn;
sa_entry->esn_state.trigger = 1;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
- ++(*esn);
sa_entry->esn_state.overlap = 0;
return true;
} else if (unlikely(!overlap &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 0dd17514caae..d212706f1bde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -121,7 +121,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
trailer_len = alen + plen + 2;
- pskb_trim(skb, skb->len - trailer_len);
+ ret = pskb_trim(skb, skb->len - trailer_len);
+ if (unlikely(ret))
+ return ret;
if (skb->protocol == htons(ETH_P_IP)) {
ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
ip_send_check(ipv4hdr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 2c75b2752f58..18c2abbe0b7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -227,11 +227,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in || !ft->g) {
- kvfree(ft->g);
- kvfree(in);
+ if (!ft->g)
return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free_g;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@@ -251,7 +253,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
switch (type) {
@@ -273,7 +275,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -282,7 +284,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
memset(in, 0, inlen);
@@ -291,18 +293,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
kvfree(in);
return 0;
-err:
+err_clean_group:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
-out:
+err_free_in:
kvfree(in);
-
+err_free_g:
+ kfree(ft->g);
+ ft->g = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 01f2918063af..f1952e14c804 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -109,12 +109,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
- ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
- for (i = 0; i < ets->ets_cap; i++) {
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
+ }
+ ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+ for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index e92cc60eade3..18e0cb02aee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -40,9 +40,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
{
struct mlx5_core_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRIVER_VERSION,
- sizeof(drvinfo->version));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 88b51f64a64e..a40fecfdb10c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -71,14 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ int count;
strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%04d (%.16s)",
- fw_rev_maj(mdev), fw_rev_min(mdev),
- fw_rev_sub(mdev), mdev->board_id);
+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+ if (count >= sizeof(drvinfo->fw_version))
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%04d", fw_rev_maj(mdev),
+ fw_rev_min(mdev), fw_rev_sub(mdev));
}
static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
@@ -1434,6 +1437,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->num_tc = 1;
params->tunneled_offload_en = false;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 366bda1bb1c3..c9115c0407ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -267,6 +267,8 @@ revert_changes:
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
+ attr->dests[curr_dest].termtbl = NULL;
+
/* search for the destination associated with the
* current term table
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 90cb50fe17fd..f7f809887984 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -39,7 +39,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = mlx5i_epriv(dev);
mlx5e_ethtool_get_drvinfo(priv, drvinfo);
- strlcpy(drvinfo->driver, DRIVER_NAME "[ib_ipoib]",
+ strlcpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
sizeof(drvinfo->driver));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 492ff2ef9a40..2c81ec31e0a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -417,8 +417,8 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
static const struct ptp_clock_info mlx5_ptp_clock_info = {
.owner = THIS_MODULE,
- .name = "mlx5_p2p",
- .max_adj = 100000000,
+ .name = "mlx5_ptp",
+ .max_adj = 50000000,
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index bced2efe9bef..438be215bbd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
+#include "mlx5_core.h"
static LIST_HEAD(devcom_list);
@@ -14,7 +15,7 @@ static LIST_HEAD(devcom_list);
struct mlx5_devcom_component {
struct {
void *data;
- } device[MLX5_MAX_PORTS];
+ } device[MLX5_DEVCOM_PORTS_SUPPORTED];
mlx5_devcom_event_handler_t handler;
struct rw_semaphore sem;
@@ -25,7 +26,7 @@ struct mlx5_devcom_list {
struct list_head list;
struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
- struct mlx5_core_dev *devs[MLX5_MAX_PORTS];
+ struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED];
};
struct mlx5_devcom {
@@ -74,13 +75,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return NULL;
+ if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
+ return NULL;
+ mlx5_dev_list_lock();
sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) {
struct mlx5_core_dev *tmp_dev = NULL;
idx = -1;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
if (iter->devs[i])
tmp_dev = iter->devs[i];
else
@@ -100,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (!priv) {
priv = mlx5_devcom_list_alloc();
- if (!priv)
- return ERR_PTR(-ENOMEM);
+ if (!priv) {
+ devcom = ERR_PTR(-ENOMEM);
+ goto out;
+ }
idx = 0;
new_priv = true;
@@ -110,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
priv->devs[idx] = dev;
devcom = mlx5_devcom_alloc(priv, idx);
if (!devcom) {
- kfree(priv);
- return ERR_PTR(-ENOMEM);
+ if (new_priv)
+ kfree(priv);
+ devcom = ERR_PTR(-ENOMEM);
+ goto out;
}
if (new_priv)
list_add(&priv->list, &devcom_list);
-
+out:
+ mlx5_dev_list_unlock();
return devcom;
}
@@ -129,20 +138,23 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
if (IS_ERR_OR_NULL(devcom))
return;
+ mlx5_dev_list_lock();
priv = devcom->priv;
priv->devs[devcom->idx] = NULL;
kfree(devcom);
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (priv->devs[i])
break;
- if (i != MLX5_MAX_PORTS)
- return;
+ if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
+ goto out;
list_del(&priv->list);
kfree(priv);
+out:
+ mlx5_dev_list_unlock();
}
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
@@ -191,7 +203,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (i != devcom->idx && comp->device[i].data) {
err = comp->handler(event, comp->device[i].data,
event_data);
@@ -239,7 +251,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
return NULL;
}
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (i != devcom->idx)
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index 939d5bf1581b..94313c18bb64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -6,6 +6,8 @@
#include <linux/mlx5/driver.h>
+#define MLX5_DEVCOM_PORTS_SUPPORTED 2
+
enum mlx5_devcom_components {
MLX5_DEVCOM_ESW_OFFLOADS,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
index 23361a9ae4fa..6dc83e871cd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
@@ -105,6 +105,7 @@ int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *op
geneve->opt_type = opt->type;
geneve->obj_id = res;
geneve->refcount++;
+ res = 0;
}
unlock:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f2657cd3ffa4..a183613420d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -75,7 +75,6 @@
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRIVER_VERSION);
unsigned int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
@@ -222,7 +221,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
strncat(string, ",", remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, DRIVER_NAME, remaining_size);
+ strncat(string, KBUILD_MODNAME, remaining_size);
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
strncat(string, ",", remaining_size);
@@ -307,7 +306,7 @@ static int request_bar(struct pci_dev *pdev)
return -ENODEV;
}
- err = pci_request_regions(pdev, DRIVER_NAME);
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err)
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
@@ -887,7 +886,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
- mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+ mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
@@ -1618,7 +1617,7 @@ void mlx5_recover_device(struct mlx5_core_dev *dev)
}
static struct pci_driver mlx5_core_driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.id_table = mlx5_core_pci_table,
.probe = init_one,
.remove = remove_one,
@@ -1640,10 +1639,13 @@ static void mlx5_core_verify_params(void)
}
}
-static int __init init(void)
+static int __init mlx5_init(void)
{
int err;
+ WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
+ "mlx5_core name not in sync with kernel module name");
+
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
@@ -1665,7 +1667,7 @@ err_debug:
return err;
}
-static void __exit cleanup(void)
+static void __exit mlx5_cleanup(void)
{
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_cleanup();
@@ -1674,5 +1676,5 @@ static void __exit cleanup(void)
mlx5_unregister_debugfs();
}
-module_init(init);
-module_exit(cleanup);
+module_init(mlx5_init);
+module_exit(mlx5_cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b100489dc85c..e053a17e0c7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -43,9 +43,6 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "5.0-0"
-
extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index db76c92b75e2..05ff66c23185 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -167,7 +167,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
if (n >= MLX5_NUM_4K_IN_PAGE) {
- mlx5_core_warn(dev, "alloc 4k bug\n");
+ mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
+ fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
return -ENOENT;
}
clear_bit(n, &fp->bitmask);
@@ -503,8 +504,8 @@ static int req_pages_handler(struct notifier_block *nb,
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{
- u16 uninitialized_var(func_id);
- s32 uninitialized_var(npages);
+ u16 func_id;
+ s32 npages;
int err;
err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 61fcfd8b39b4..7fa805bb0e38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -211,8 +211,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_total_vfs);
kvfree(out);
- if (host_total_vfs)
- return host_total_vfs;
+ return host_total_vfs;
}
done:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index b2dfa2b5366f..dccb4f6c14e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -667,6 +667,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
switch (action_type) {
case DR_ACTION_TYP_DROP:
attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
break;
case DR_ACTION_TYP_FT:
dest_action = action;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 64f6f529f6eb..45b90c769878 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -423,11 +423,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
- return err;
+ goto err_free_in;
*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
- kvfree(in);
+err_free_in:
+ kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index e178d8d3dbc9..077b4bec1b13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -9,7 +9,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_matcher *last_matcher = NULL;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl;
- int ret;
+ int ret = -EOPNOTSUPP;
if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP;
@@ -68,6 +68,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
}
}
+ if (ret)
+ goto out;
+
/* Release old action */
if (tbl->miss_action)
refcount_dec(&tbl->miss_action->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
index 017d68f1e123..972c571b4158 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
@@ -31,6 +31,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
+ if (!multi)
+ return NULL;
tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 7cc4c30af1a7..b0d44b136116 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -47,6 +47,7 @@
#define MLXSW_I2C_MBOX_SIZE_BITS 12
#define MLXSW_I2C_ADDR_BUF_SIZE 4
#define MLXSW_I2C_BLK_DEF 32
+#define MLXSW_I2C_BLK_MAX 100
#define MLXSW_I2C_RETRY 5
#define MLXSW_I2C_TIMEOUT_MSECS 5000
#define MLXSW_I2C_MAX_DATA_SIZE 256
@@ -427,7 +428,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
} else {
/* No input mailbox is case of initialization query command. */
reg_size = MLXSW_I2C_MAX_DATA_SIZE;
- num = reg_size / mlxsw_i2c->block_size;
+ num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size);
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
dev_err(&client->dev, "Could not acquire lock");
@@ -575,7 +576,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
return -EOPNOTSUPP;
}
- mlxsw_i2c->block_size = max_t(u16, MLXSW_I2C_BLK_DEF,
+ mlxsw_i2c->block_size = min_t(u16, MLXSW_I2C_BLK_MAX,
min_t(u16, quirks->max_read_len,
quirks->max_write_len));
} else {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 4c98950380d5..d231f4d2888b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
unsigned long *p_index)
{
unsigned int num_rows, entry_size;
+ unsigned long index;
/* We only allow allocations of entire rows */
if (num_erps % erp_core->num_erp_banks != 0)
@@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
entry_size = erp_core->erpt_entries_size[region_type];
num_rows = num_erps / erp_core->num_erp_banks;
- *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
- if (*p_index == 0)
+ index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+ if (!index)
return -ENOBUFS;
- *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 05517c7feaa5..a20ba23f0ed7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -294,8 +294,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
-static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
- bool learning_en)
+static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
+ bool learning_en)
{
char tnpc_pl[MLXSW_REG_TNPC_LEN];
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 2431723bc2fb..9ffb8880105f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6928,7 +6928,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
- result = pci_enable_device(pdev);
+ result = pcim_enable_device(pdev);
if (result)
return result;
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index e2528633c09a..8c6c5c706992 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -364,7 +364,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
goto err_out;
usleep_range(26, 100);
- while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+ while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
(mistat & BUSY))
cpu_relax();
@@ -402,7 +402,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
goto err_out;
usleep_range(26, 100);
- while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+ while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
(mistat & BUSY))
cpu_relax();
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index c69ffcfe6168..6458dbd6c631 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -80,6 +80,18 @@ static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
!(data & HW_CFG_LRST_), 100000, 10000000);
}
+static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
+ int offset, u32 bit_mask,
+ int target_value, int udelay_min,
+ int udelay_max, int count)
+{
+ u32 data;
+
+ return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
+ target_value == !!(data & bit_mask),
+ udelay_max, udelay_min * count);
+}
+
static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
int offset, u32 bit_mask,
int target_value, int usleep_min,
@@ -675,8 +687,8 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
u32 dp_sel;
int i;
- if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
- 1, 40, 100, 100))
+ if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
+ 1, 40, 100, 100))
return -EIO;
dp_sel = lan743x_csr_read(adapter, DP_SEL);
dp_sel &= ~DP_SEL_MASK_;
@@ -687,8 +699,9 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
lan743x_csr_write(adapter, DP_ADDR, addr + i);
lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
- if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
- 1, 40, 100, 100))
+ if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
+ DP_SEL_DPRDY_,
+ 1, 40, 100, 100))
return -EIO;
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 13714827f452..87327086ea8c 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -74,11 +74,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
static void moxart_mac_free_memory(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
- priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +142,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -193,6 +188,7 @@ static int moxart_mac_open(struct net_device *ndev)
static int moxart_mac_stop(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
napi_disable(&priv->napi);
@@ -204,6 +200,11 @@ static int moxart_mac_stop(struct net_device *ndev)
/* disable all functions */
writel(0, priv->base + REG_MAC_CTRL);
+ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
return 0;
}
@@ -240,7 +241,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +295,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -357,9 +358,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -378,7 +379,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -498,7 +499,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -506,7 +507,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c4c716094982..5aee774768bc 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3956,6 +3956,7 @@ abort_with_slices:
myri10ge_free_slices(mgp);
abort_with_firmware:
+ kfree(mgp->msix_vectors);
myri10ge_dummy_rdma(mgp, 0);
abort_with_ioremap:
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 05e760444a92..953708e2ab4b 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -256,7 +256,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
*/
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
- if (!laddr) {
+ if (dma_mapping_error(lp->device, laddr)) {
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -474,7 +474,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!*new_addr) {
+ if (dma_mapping_error(lp->device, *new_addr)) {
dev_kfree_skb(*new_skb);
*new_skb = NULL;
return false;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 71ab4e9c9a17..00e698935f4e 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2375,7 +2375,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
if (skb) {
swstats->mem_freed += skb->truesize;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
cnt++;
}
}
@@ -7122,9 +7122,8 @@ static int s2io_card_up(struct s2io_nic *sp)
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_fill_buff;
}
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
ring->rx_bufs_left);
@@ -7162,18 +7161,16 @@ static int s2io_card_up(struct s2io_nic *sp)
/* Enable Rx Traffic and interrupts on the NIC */
if (start_nic(sp)) {
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out;
}
/* Add interrupt service routine */
if (s2io_add_isr(sp) != 0) {
if (sp->config.intr_type == MSI_X)
s2io_rem_isr(sp);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out;
}
timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
@@ -7193,6 +7190,20 @@ static int s2io_card_up(struct s2io_nic *sp)
}
return 0;
+
+err_out:
+ if (config->napi) {
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < sp->config.rx_ring_num; i++)
+ napi_disable(&sp->mac_control.rings[i].napi);
+ } else {
+ napi_disable(&sp->napi);
+ }
+ }
+err_fill_buff:
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return ret;
}
/**
@@ -7276,7 +7287,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
int ring_no = ring_data->ring_no;
u16 l3_csum, l4_csum;
unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
- struct lro *uninitialized_var(lro);
+ struct lro *lro;
u8 err_mask;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 63907aeb3884..3167f9675ae0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -308,6 +308,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
acti_netdevs = kmalloc_array(entry->slave_cnt,
sizeof(*acti_netdevs), GFP_KERNEL);
+ if (!acti_netdevs) {
+ schedule_delayed_work(&lag->work,
+ NFP_FL_LAG_DELAY);
+ continue;
+ }
/* Include sanity check in the loop. It may be that a bond has
* changed between processing the last notification and the
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 26772c3310f0..4bfef55c3cb3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -593,7 +593,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
- if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
if (entry->bridge_count ||
!nfp_flower_is_supported_bridge(netdev)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 10857914c552..ff8810357181 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1127,6 +1127,11 @@ nfp_port_get_module_info(struct net_device *netdev,
u8 data;
port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return -EOPNOTSUPP;
+
+ /* update port state to get latest interface */
+ set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);
if (!eth_port)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 85d46f206b3c..3334dba234fe 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -542,11 +542,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
const u32 barcfg_msix_general =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
const u32 barcfg_msix_xpb =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
NFP_CPP_TARGET_ISLAND_XPB);
const u32 barcfg_explicit[4] = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 6ef48eb3a77d..b163489489e9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
}
/* Adjust the start address to be cache size aligned */
- cache->id = id;
cache->addr = addr & ~(u64)(cache->size - 1);
/* Re-init to the new ID and address */
@@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
return NULL;
}
+ cache->id = id;
+
exit:
/* Adjust offset */
*offset = addr - cache->addr;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 56f285985b43..acb25b8c9458 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -249,25 +249,26 @@ static void nixge_hw_dma_bd_release(struct net_device *ndev)
struct sk_buff *skb;
int i;
- for (i = 0; i < RX_BD_NUM; i++) {
- phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
- phys);
-
- dma_unmap_single(ndev->dev.parent, phys_addr,
- NIXGE_MAX_JUMBO_FRAME_SIZE,
- DMA_FROM_DEVICE);
-
- skb = (struct sk_buff *)(uintptr_t)
- nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
- sw_id_offset);
- dev_kfree_skb(skb);
- }
+ if (priv->rx_bd_v) {
+ for (i = 0; i < RX_BD_NUM; i++) {
+ phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
+ phys);
+
+ dma_unmap_single(ndev->dev.parent, phys_addr,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb = (struct sk_buff *)(uintptr_t)
+ nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
+ sw_id_offset);
+ dev_kfree_skb(skb);
+ }
- if (priv->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
sizeof(*priv->rx_bd_v) * RX_BD_NUM,
priv->rx_bd_v,
priv->rx_bd_p);
+ }
if (priv->tx_skb)
devm_kfree(ndev->dev.parent, priv->tx_skb);
@@ -899,6 +900,7 @@ static int nixge_open(struct net_device *ndev)
err_rx_irq:
free_irq(priv->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&priv->napi);
phy_stop(phy);
phy_disconnect(phy);
tasklet_kill(&priv->dma_err_tasklet);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 05d2b478c99b..d069017d43a3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -6099,6 +6099,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
return 0;
out_error:
+ nv_mgmt_release_sema(dev);
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
out_freering:
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 8ff4c616f0ad..45b7f0f419c9 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1166,6 +1166,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
tx_ring->next_to_use = ring_num;
+ dev_kfree_skb_any(skb);
return;
}
buffer_info->mapped = true;
@@ -2480,6 +2481,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
pch_gbe_phy_hw_reset(&adapter->hw);
+ pci_dev_put(adapter->ptp_pdev);
free_netdev(netdev);
}
@@ -2561,7 +2563,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
/* setup the private structure */
ret = pch_gbe_sw_init(adapter);
if (ret)
- goto err_free_netdev;
+ goto err_put_dev;
/* Initialize PHY */
ret = pch_gbe_init_phy(adapter);
@@ -2619,6 +2621,8 @@ static int pch_gbe_probe(struct pci_dev *pdev,
err_free_adapter:
pch_gbe_phy_hw_reset(&adapter->hw);
+err_put_dev:
+ pci_dev_put(adapter->ptp_pdev);
err_free_netdev:
free_netdev(netdev);
return ret;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 040a15a828b4..c1d7bd168f1d 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1423,7 +1423,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
}
-static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct pasemi_mac * const mac = netdev_priv(dev);
struct pasemi_mac_txring * const txring = tx_ring(mac);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index b8de3784d976..c53d2271d8ab 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -573,7 +573,7 @@ static int ionic_get_rxnfc(struct net_device *netdev,
info->data = lif->nxqs;
break;
default:
- netdev_err(netdev, "Command parameter %d is not supported\n",
+ netdev_dbg(netdev, "Command parameter %d is not supported\n",
info->cmd);
err = -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index f9c303d76658..d718c1a6d5fc 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -167,10 +167,10 @@ static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
return 0;
}
-static void ionic_intr_free(struct ionic_lif *lif, int index)
+static void ionic_intr_free(struct ionic *ionic, int index)
{
- if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
- clear_bit(index, lif->ionic->intrs);
+ if (index != INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
+ clear_bit(index, ionic->intrs);
}
static int ionic_qcq_enable(struct ionic_qcq *qcq)
@@ -190,6 +190,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
.oper = IONIC_Q_ENABLE,
},
};
+ int ret;
idev = &lif->ionic->idev;
dev = lif->ionic->dev;
@@ -197,16 +198,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ if (qcq->flags & IONIC_QCQ_F_INTR)
+ ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
+
+ ret = ionic_adminq_post_wait(lif, &ctx);
+ if (ret)
+ return ret;
+
+ if (qcq->napi.poll)
+ napi_enable(&qcq->napi);
+
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
- napi_enable(&qcq->napi);
- ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
static int ionic_qcq_disable(struct ionic_qcq *qcq)
@@ -247,7 +256,6 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
- struct device *dev = lif->ionic->dev;
if (!qcq)
return;
@@ -260,7 +268,6 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
if (qcq->flags & IONIC_QCQ_F_INTR) {
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
- devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
netif_napi_del(&qcq->napi);
}
@@ -278,8 +285,12 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->base = NULL;
qcq->base_pa = 0;
- if (qcq->flags & IONIC_QCQ_F_INTR)
- ionic_intr_free(lif, qcq->intr.index);
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector, NULL);
+ devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
+ qcq->intr.vector = 0;
+ ionic_intr_free(lif->ionic, qcq->intr.index);
+ }
devm_kfree(dev, qcq->cq.info);
qcq->cq.info = NULL;
@@ -321,11 +332,6 @@ static void ionic_qcqs_free(struct ionic_lif *lif)
static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
struct ionic_qcq *n_qcq)
{
- if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
- ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
- n_qcq->flags &= ~IONIC_QCQ_F_INTR;
- }
-
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
}
@@ -409,8 +415,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
IONIC_INTR_MASK_SET);
- new->intr.cpu = new->intr.index % num_online_cpus();
- if (cpu_online(new->intr.cpu))
+ err = ionic_request_irq(lif, new);
+ if (err) {
+ netdev_warn(lif->netdev, "irq request failed %d\n", err);
+ goto err_out_free_intr;
+ }
+
+ new->intr.cpu = cpumask_local_spread(new->intr.index,
+ dev_to_node(dev));
+ if (new->intr.cpu != -1)
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
@@ -422,13 +435,13 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->cq.info) {
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
err = -ENOMEM;
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
@@ -436,7 +449,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM;
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
new->total_size = total_size;
@@ -462,8 +475,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
return 0;
+err_out_free_irq:
+ if (flags & IONIC_QCQ_F_INTR)
+ devm_free_irq(dev, new->intr.vector, &new->napi);
err_out_free_intr:
- ionic_intr_free(lif, new->intr.index);
+ if (flags & IONIC_QCQ_F_INTR)
+ ionic_intr_free(lif->ionic, new->intr.index);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -638,12 +655,6 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
NAPI_POLL_WEIGHT);
- err = ionic_request_irq(lif, qcq);
- if (err) {
- netif_napi_del(&qcq->napi);
- return err;
- }
-
qcq->flags |= IONIC_QCQ_F_INITED;
ionic_debugfs_add_qcq(lif, qcq);
@@ -1861,13 +1872,6 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
NAPI_POLL_WEIGHT);
- err = ionic_request_irq(lif, qcq);
- if (err) {
- netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
- netif_napi_del(&qcq->napi);
- return err;
- }
-
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 8ea46b81b739..8a7ac17d3ef7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1023,6 +1023,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
p_dma->p_virt = NULL;
}
kfree(p_mngr->ilt_shadow);
+ p_mngr->ilt_shadow = NULL;
}
static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index a923c6553270..35119778cf1f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -5139,6 +5139,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
num_vports = p_hwfn->qm_info.num_vports;
+ if (num_vports < 2) {
+ DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
+ return -EINVAL;
+ }
+
/* Accounting for the vports which are configured for WFQ explicitly */
for (i = 0; i < num_vports; i++) {
u32 tmp_speed;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c449ecc0add2..1f77bbb35ea9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -113,7 +113,10 @@ static void qed_ll2b_complete_tx_packet(void *cxt,
static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
u8 **data, dma_addr_t *phys_addr)
{
- *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+ size_t size = cdev->ll2->rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ *data = kmalloc(size, GFP_ATOMIC);
if (!(*data)) {
DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
return -ENOMEM;
@@ -2449,7 +2452,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
INIT_LIST_HEAD(&cdev->ll2->list);
spin_lock_init(&cdev->ll2->lock);
- cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN +
L1_CACHE_BYTES + params->mtu;
/* Allocate memory for LL2.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 5f01fbd3c073..1e0e2dbecc76 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -123,9 +123,9 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
u8 tx_stats_en;
bool main_func_queue;
+ struct qed_ll2_cbs cbs;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
- struct qed_ll2_cbs cbs;
};
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 20f840ea0503..caa0468df4b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4404,6 +4404,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
}
vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
vport_id = vf->vport_id;
return qed_configure_vport_wfq(cdev, vport_id, rate);
@@ -5150,7 +5153,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
/* Validate that the VF has a configured vport */
vf = qed_iov_get_vf_info(hwfn, i, true);
- if (!vf->vport_instance)
+ if (!vf || !vf->vport_instance)
continue;
memset(&params, 0, sizeof(params));
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 5e81cd317a32..5d0e65a3b6a8 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -316,12 +316,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
* buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1803,13 +1802,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
* first buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1944,18 +1942,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
goto invalid_seg_count;
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[i],
- mapaddr),
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[i], mapaddr),
dma_unmap_len(&tx_cb->map[i], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
}
qdev->ndev->stats.tx_packets++;
@@ -2022,10 +2018,9 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
qdev->ndev->stats.rx_bytes += length;
skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb->data);
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev);
@@ -2068,10 +2063,9 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb2 = lrg_buf_cb2->skb;
skb_put(skb2, length); /* Just the second buffer length here. */
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb2->data);
skb_checksum_none_assert(skb2);
@@ -2320,9 +2314,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
/*
* Map the skb buffer first.
*/
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
err);
@@ -2358,11 +2352,11 @@ static int ql_send_map(struct ql3_adapter *qdev,
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- map = pci_map_single(qdev->pdev, oal,
+ map = dma_map_single(&qdev->pdev->dev, oal,
sizeof(struct oal),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
@@ -2424,24 +2418,24 @@ map_error:
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[seg], mapaddr),
- dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ DMA_TO_DEVICE);
oal++;
seg++;
}
- pci_unmap_page(qdev->pdev,
+ dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[seg], mapaddr),
dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
dma_unmap_addr(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return NETDEV_TX_BUSY;
@@ -2476,6 +2470,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags);
if (tx_cb->seg_count == -1) {
netdev_err(ndev, "%s: invalid segment count!\n", __func__);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2526,9 +2521,8 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
wmb();
qdev->req_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->req_q_size,
- &qdev->req_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ &qdev->req_q_phy_addr, GFP_KERNEL);
if ((qdev->req_q_virt_addr == NULL) ||
LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
@@ -2537,16 +2531,14 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
}
qdev->rsp_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->rsp_q_size,
- &qdev->rsp_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr, GFP_KERNEL);
if ((qdev->rsp_q_virt_addr == NULL) ||
LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
netdev_err(qdev->ndev, "rspQ allocation failed\n");
- pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
- qdev->req_q_virt_addr,
- qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
return -ENOMEM;
}
@@ -2562,15 +2554,13 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
return;
}
- pci_free_consistent(qdev->pdev,
- qdev->req_q_size,
- qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
qdev->req_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->rsp_q_size,
- qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
qdev->rsp_q_virt_addr = NULL;
@@ -2594,12 +2584,13 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
return -ENOMEM;
qdev->lrg_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- &qdev->lrg_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "lBufQ failed\n");
+ kfree(qdev->lrg_buf);
return -ENOMEM;
}
qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@@ -2614,15 +2605,17 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
qdev->small_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- &qdev->small_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->small_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
- pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+ kfree(qdev->lrg_buf);
return -ENOMEM;
}
@@ -2639,17 +2632,15 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
return;
}
kfree(qdev->lrg_buf);
- pci_free_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
qdev->lrg_buf_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- qdev->small_buf_q_alloc_virt_addr,
- qdev->small_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
qdev->small_buf_q_virt_addr = NULL;
@@ -2667,9 +2658,9 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
QL_SMALL_BUFFER_SIZE);
qdev->small_buf_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- &qdev->small_buf_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr, GFP_KERNEL);
if (qdev->small_buf_virt_addr == NULL) {
netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
@@ -2702,10 +2693,10 @@ static void ql_free_small_buffers(struct ql3_adapter *qdev)
return;
}
if (qdev->small_buf_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- qdev->small_buf_virt_addr,
- qdev->small_buf_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
qdev->small_buf_virt_addr = NULL;
}
@@ -2720,10 +2711,10 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
lrg_buf_cb = &qdev->lrg_buf[i];
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb, mapaddr),
dma_unmap_len(lrg_buf_cb, maplen),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
break;
@@ -2775,13 +2766,11 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
* buffer
*/
skb_reserve(skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
- skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data,
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -2866,8 +2855,8 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
* Network Completion Queue Producer Index Register
*/
qdev->shadow_reg_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->shadow_reg_phy_addr, GFP_KERNEL);
if (qdev->shadow_reg_virt_addr != NULL) {
qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
@@ -2922,10 +2911,9 @@ err_small_buffers:
err_buffer_queues:
ql_free_net_req_rsp_queues(qdev);
err_req_rsp:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
return -ENOMEM;
}
@@ -2938,10 +2926,9 @@ static void ql_free_mem_resources(struct ql3_adapter *qdev)
ql_free_buffer_queues(qdev);
ql_free_net_req_rsp_queues(qdev);
if (qdev->shadow_reg_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
qdev->shadow_reg_virt_addr = NULL;
}
}
@@ -3642,18 +3629,15 @@ static void ql_reset_work(struct work_struct *work)
if (tx_cb->skb) {
netdev_printk(KERN_DEBUG, ndev,
"Freeing lost SKB\n");
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[0],
- mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ DMA_TO_DEVICE);
for (j = 1; j < tx_cb->seg_count; j++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[j],
- mapaddr),
- dma_unmap_len(&tx_cb->map[j],
- maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[j], mapaddr),
+ dma_unmap_len(&tx_cb->map[j], maplen),
+ DMA_TO_DEVICE);
}
dev_kfree_skb(tx_cb->skb);
tx_cb->skb = NULL;
@@ -3769,7 +3753,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
struct net_device *ndev = NULL;
struct ql3_adapter *qdev = NULL;
static int cards_found;
- int uninitialized_var(pci_using_dac), err;
+ int pci_using_dac, err;
err = pci_enable_device(pdev);
if (err) {
@@ -3785,13 +3769,10 @@ static int ql3xxx_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))))
pci_using_dac = 0;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
if (err) {
pr_err("%s no usable DMA configuration\n", pci_name(pdev));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index df43d364e6a4..e6538f38b056 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2993,7 +2993,7 @@ static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
dev_info(&adapter->pdev->dev,
"%s: lock recovery initiated\n", __func__);
- msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+ mdelay(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
id = ((val >> 2) & 0xF);
if (id == adapter->portnum) {
@@ -3029,7 +3029,7 @@ int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
if (status)
break;
- msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+ mdelay(QLC_83XX_DRV_LOCK_WAIT_DELAY);
i++;
if (i == 1)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 10286215092f..85419b8258b5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2525,7 +2525,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
goto disable_mbx_intr;
qlcnic_83xx_clear_function_resources(adapter);
- qlcnic_dcb_enable(adapter->dcb);
+
+ err = qlcnic_dcb_enable(adapter->dcb);
+ if (err) {
+ qlcnic_dcb_free(adapter->dcb);
+ goto disable_mbx_intr;
+ }
+
qlcnic_83xx_initialize_nic(adapter, 1);
qlcnic_dcb_get_info(adapter->dcb);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index af38d3d73291..4814046cfc78 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -629,7 +629,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
int i, err, ring;
if (dev->flags & QLCNIC_NEED_FLR) {
- pci_reset_function(dev->pdev);
+ err = pci_reset_function(dev->pdev);
+ if (err) {
+ dev_err(&dev->pdev->dev,
+ "Adapter reset failed (%d). Please reboot\n",
+ err);
+ return err;
+ }
dev->flags &= ~QLCNIC_NEED_FLR;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index 0a9d24e86715..eb8000d9b6d0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -42,11 +42,6 @@ struct qlcnic_dcb {
unsigned long state;
};
-static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
-{
- kfree(dcb);
-}
-
static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
{
if (dcb && dcb->ops->get_hw_capability)
@@ -113,9 +108,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
dcb->ops->init_dcbnl_ops(dcb);
}
-static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
{
- if (dcb && qlcnic_dcb_attach(dcb))
- qlcnic_clear_dcb_ops(dcb);
+ return dcb ? qlcnic_dcb_attach(dcb) : 0;
}
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 3a96fd6deef7..9d5b74c804b5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2639,7 +2639,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Device does not support MSI interrupts\n");
if (qlcnic_82xx_check(adapter)) {
- qlcnic_dcb_enable(adapter->dcb);
+ err = qlcnic_dcb_enable(adapter->dcb);
+ if (err) {
+ qlcnic_dcb_free(adapter->dcb);
+ dev_err(&pdev->dev, "Failed to enable DCB\n");
+ goto err_out_free_hw;
+ }
+
qlcnic_dcb_get_info(adapter->dcb);
err = qlcnic_setup_intr(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 400bc2c3f222..7c782df3793d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -222,6 +222,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
return 0;
qlcnic_destroy_async_wq:
+ while (i--)
+ kfree(sriov->vf_info[i].vp);
destroy_workqueue(bc->bc_async_wq);
qlcnic_destroy_trans_wq:
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 5c199d2516d4..5fe28dec60c1 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -738,9 +738,15 @@ static int emac_remove(struct platform_device *pdev)
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
unregister_netdev(netdev);
netif_napi_del(&adpt->rx_q.napi);
+ free_irq(adpt->irq.irq, &adpt->irq);
+ cancel_work_sync(&adpt->work_thread);
+
emac_clks_teardown(adpt);
put_device(&adpt->phydev->mdio.dev);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 702aa217a27a..66229b300c5a 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -30,6 +30,8 @@
#define QCASPI_MAX_REGS 0x20
+#define QCASPI_RX_MAX_FRAMES 4
+
static const u16 qcaspi_spi_regs[] = {
SPI_REG_BFR_SIZE,
SPI_REG_WRBUF_SPC_AVA,
@@ -249,31 +251,30 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
{
struct qcaspi *qca = netdev_priv(dev);
- ring->rx_max_pending = 4;
+ ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_max_pending = TX_RING_MAX_LEN;
- ring->rx_pending = 4;
+ ring->rx_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_pending = qca->txr.count;
}
static int
qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
{
- const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
- if ((ring->rx_pending) ||
+ if (ring->rx_pending != QCASPI_RX_MAX_FRAMES ||
(ring->rx_mini_pending) ||
(ring->rx_jumbo_pending))
return -EINVAL;
- if (netif_running(dev))
- ops->ndo_stop(dev);
+ if (qca->spi_thread)
+ kthread_park(qca->spi_thread);
qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
- if (netif_running(dev))
- ops->ndo_open(dev);
+ if (qca->spi_thread)
+ kthread_unpark(qca->spi_thread);
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 15591ad5fe4e..036ab9dfe7fc 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -573,9 +573,20 @@ qcaspi_spi_thread(void *data)
netdev_info(qca->net_dev, "SPI thread created\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_park()) {
+ netif_tx_disable(qca->net_dev);
+ netif_carrier_off(qca->net_dev);
+ qcaspi_flush_tx_ring(qca);
+ kthread_parkme();
+ if (qca->sync == QCASPI_SYNC_READY) {
+ netif_carrier_on(qca->net_dev);
+ netif_wake_queue(qca->net_dev);
+ }
+ continue;
+ }
+
if ((qca->intr_req == qca->intr_svc) &&
- (qca->txr.skb[qca->txr.head] == NULL) &&
- (qca->sync == QCASPI_SYNC_READY))
+ !qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
@@ -602,11 +613,17 @@ qcaspi_spi_thread(void *data)
if (intr_cause & SPI_INT_CPU_ON) {
qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
+ /* Frame decoding in progress */
+ if (qca->frm_handle.state != qca->frm_handle.init)
+ qca->net_dev->stats.rx_dropped++;
+
+ qcafrm_fsm_init_spi(&qca->frm_handle);
+ qca->stats.device_reset++;
+
/* not synced. */
if (qca->sync != QCASPI_SYNC_READY)
continue;
- qca->stats.device_reset++;
netif_wake_queue(qca->net_dev);
netif_carrier_on(qca->net_dev);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 18d88b424828..deb8d00d27ad 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -375,7 +375,7 @@ nla_put_failure:
struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.kind = "rmnet",
- .maxtype = __IFLA_RMNET_MAX,
+ .maxtype = IFLA_RMNET_MAX,
.priv_size = sizeof(struct rmnet_priv),
.setup = rmnet_vnd_setup,
.validate = rmnet_rtnl_validate,
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index f158fdf3aab2..b66689e0e6f2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1162,10 +1162,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Failed to register net device\n");
- goto err_out_mdio_unregister;
+ goto err_out_phy_disconnect;
}
return 0;
+err_out_phy_disconnect:
+ phy_disconnect(dev->phydev);
err_out_mdio_unregister:
mdiobus_unregister(lp->mii_bus);
err_out_mdio:
@@ -1189,6 +1191,7 @@ static void r6040_remove_one(struct pci_dev *pdev)
struct r6040_private *lp = netdev_priv(dev);
unregister_netdev(dev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
netif_napi_del(&lp->napi);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 8d2b184ff575..e000cabf65f8 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4288,6 +4288,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
/* Unconditionally log net taps. */
netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode |= AcceptAllPhys;
+ } else if (!(dev->flags & IFF_MULTICAST)) {
+ rx_mode &= ~AcceptMulticast;
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
dev->flags & IFF_ALLMULTI ||
tp->mac_version == RTL_GIGA_MAC_VER_35) {
@@ -6031,7 +6033,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
struct ring_info *tx_skb = tp->tx_skb + entry;
u32 status;
- status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+ status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
if (status & DescOwn)
break;
@@ -6114,7 +6116,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
- status = le32_to_cpu(desc->opts1);
+ status = le32_to_cpu(READ_ONCE(desc->opts1));
if (status & DescOwn)
break;
@@ -6293,12 +6295,17 @@ static void rtl8169_rx_missed(struct net_device *dev)
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
+ struct device *d = tp_to_dev(tp);
if (netif_carrier_ok(ndev)) {
rtl_link_chg_patch(tp);
- pm_request_resume(&tp->pci_dev->dev);
+ pm_request_resume(d);
+ netif_wake_queue(tp->dev);
} else {
- pm_runtime_idle(&tp->pci_dev->dev);
+ /* In few cases rx is broken after link-down otherwise */
+ if (rtl_is_8125(tp))
+ rtl_reset_work(tp);
+ pm_runtime_idle(d);
}
if (net_ratelimit())
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 9208a72fe17e..53b9c77c7f6a 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -736,14 +736,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
priv->stats[RAVB_BE].rx_over_errors++;
- /* Receive Descriptor Empty int */
+ /* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF1)
priv->stats[RAVB_NC].rx_over_errors++;
@@ -1380,13 +1380,13 @@ static int ravb_open(struct net_device *ndev)
if (priv->chip_id == RCAR_GEN2)
ravb_ptp_init(ndev, priv->pdev);
- netif_tx_start_all_queues(ndev);
-
/* PHY control start */
error = ravb_phy_start(ndev);
if (error)
goto out_ptp_stop;
+ netif_tx_start_all_queues(ndev);
+
return 0;
out_ptp_stop:
@@ -1435,6 +1435,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct net_device *ndev = priv->ndev;
int error;
+ if (!rtnl_trylock()) {
+ usleep_range(1000, 2000);
+ schedule_work(&priv->work);
+ return;
+ }
+
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
@@ -1467,7 +1473,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
*/
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
__func__, error);
- return;
+ goto out_unlock;
}
ravb_emac_init(ndev);
@@ -1477,6 +1483,9 @@ out:
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
+
+out_unlock:
+ rtnl_unlock();
}
/* Packet transmit function for Ethernet AVB */
@@ -1488,7 +1497,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
unsigned long flags;
- u32 dma_addr;
+ dma_addr_t dma_addr;
void *buffer;
u32 entry;
u32 len;
@@ -1703,6 +1712,8 @@ static int ravb_close(struct net_device *ndev)
of_phy_deregister_fixed_link(np);
}
+ cancel_work_sync(&priv->work);
+
if (priv->chip_id != RCAR_GEN2) {
free_irq(priv->tx_irqs[RAVB_NC], ndev);
free_irq(priv->rx_irqs[RAVB_NC], ndev);
@@ -2028,7 +2039,9 @@ static int ravb_probe(struct platform_device *pdev)
ndev->hw_features = NETIF_F_RXCSUM;
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ error = pm_runtime_resume_and_get(&pdev->dev);
+ if (error < 0)
+ goto out_rpm_disable;
/* The Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
@@ -2199,6 +2212,7 @@ out_release:
free_netdev(ndev);
pm_runtime_put(&pdev->dev);
+out_rpm_disable:
pm_runtime_disable(&pdev->dev);
return error;
}
@@ -2212,15 +2226,15 @@ static int ravb_remove(struct platform_device *pdev)
if (priv->chip_id != RCAR_GEN2)
ravb_ptp_stop(ndev);
- dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
- priv->desc_bat_dma);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
- pm_runtime_put_sync(&pdev->dev);
unregister_netdev(ndev);
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
@@ -2327,6 +2341,7 @@ static int __maybe_unused ravb_resume(struct device *dev)
ret = ravb_open(ndev);
if (ret < 0)
return ret;
+ ravb_set_rx_mode(ndev);
netif_device_attach(ndev);
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 815766620979..e4d919de7e3f 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 33d7c2940ba9..5a672658da80 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1936,11 +1936,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
ret = PTR_ERR(priv->phydev);
dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
priv->phydev = NULL;
+ mdiobus_unregister(bus);
return -ENODEV;
}
ret = phy_device_register(priv->phydev);
if (ret) {
+ phy_device_free(priv->phydev);
mdiobus_unregister(bus);
dev_err(priv->dev,
"phy_device_register err(%d)\n", ret);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index bc82cdf36cc3..570ec618c609 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -176,6 +176,7 @@ struct stmmac_safety_stats {
unsigned long mac_errors[32];
unsigned long mtl_errors[32];
unsigned long dma_errors[32];
+ unsigned long dma_dpp_errors[32];
};
/* Number of fields in Safety Stats */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index bfc4a92f1d92..78be62ecc9a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -505,6 +505,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->has_gmac4 = 1;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
+ plat_dat->rx_clk_runs_in_lpi = 1;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 4ef041bdf6a1..5bb97f0ec6a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -111,6 +111,7 @@ struct stm32_ops {
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
u32 syscfg_eth_mask;
+ bool clk_rx_enable_in_suspend;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
@@ -128,7 +129,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
- if (!dwmac->dev->power.is_suspended) {
+ if (!dwmac->ops->clk_rx_enable_in_suspend ||
+ !dwmac->dev->power.is_suspended) {
ret = clk_prepare_enable(dwmac->clk_rx);
if (ret) {
clk_disable_unprepare(dwmac->clk_tx);
@@ -508,7 +510,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
- .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
+ .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
+ .clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e436fa160c7d..59165c0560d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -520,9 +520,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
return 0;
}
- val |= PPSCMDx(index, 0x2);
val |= TRGTMODSELx(index, 0x2);
val |= PPSEN0;
+ writel(val, ioaddr + MAC_PPS_CONTROL);
writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
@@ -547,6 +547,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
/* Finally, activate it */
+ val |= PPSCMDx(index, 0x2);
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index ff751ab3d765..fc222beeee78 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -212,7 +212,7 @@
((val) << XGMAC_PPS_MINIDX(x))
#define XGMAC_PPSCMD_START 0x2
#define XGMAC_PPSCMD_STOP 0x5
-#define XGMAC_PPSEN0 BIT(4)
+#define XGMAC_PPSENx(x) BIT(4 + (x) * 8)
#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
#define XGMAC_TRGTBUSY0 BIT(31)
@@ -256,6 +256,8 @@
#define XGMAC_RXCEIE BIT(4)
#define XGMAC_TXCEIE BIT(0)
#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
+#define XGMAC_MTL_DPP_CONTROL 0x000010e0
+#define XGMAC_DPP_DISABLE BIT(0)
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -331,6 +333,7 @@
#define XGMAC_DCEIE BIT(1)
#define XGMAC_TCEIE BIT(0)
#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
+#define XGMAC_DMA_DPP_INT_STATUS 0x00003074
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 070bd7d1ae4c..a126d01f49fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -757,6 +757,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
+#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
+#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
+ { true, "TDPES0", DPP_TX_ERR },
+ { true, "TDPES1", DPP_TX_ERR },
+ { true, "TDPES2", DPP_TX_ERR },
+ { true, "TDPES3", DPP_TX_ERR },
+ { true, "TDPES4", DPP_TX_ERR },
+ { true, "TDPES5", DPP_TX_ERR },
+ { true, "TDPES6", DPP_TX_ERR },
+ { true, "TDPES7", DPP_TX_ERR },
+ { true, "TDPES8", DPP_TX_ERR },
+ { true, "TDPES9", DPP_TX_ERR },
+ { true, "TDPES10", DPP_TX_ERR },
+ { true, "TDPES11", DPP_TX_ERR },
+ { true, "TDPES12", DPP_TX_ERR },
+ { true, "TDPES13", DPP_TX_ERR },
+ { true, "TDPES14", DPP_TX_ERR },
+ { true, "TDPES15", DPP_TX_ERR },
+ { true, "RDPES0", DPP_RX_ERR },
+ { true, "RDPES1", DPP_RX_ERR },
+ { true, "RDPES2", DPP_RX_ERR },
+ { true, "RDPES3", DPP_RX_ERR },
+ { true, "RDPES4", DPP_RX_ERR },
+ { true, "RDPES5", DPP_RX_ERR },
+ { true, "RDPES6", DPP_RX_ERR },
+ { true, "RDPES7", DPP_RX_ERR },
+ { true, "RDPES8", DPP_RX_ERR },
+ { true, "RDPES9", DPP_RX_ERR },
+ { true, "RDPES10", DPP_RX_ERR },
+ { true, "RDPES11", DPP_RX_ERR },
+ { true, "RDPES12", DPP_RX_ERR },
+ { true, "RDPES13", DPP_RX_ERR },
+ { true, "RDPES14", DPP_RX_ERR },
+ { true, "RDPES15", DPP_RX_ERR },
+};
+
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
@@ -768,6 +806,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
dwxgmac3_log_error(ndev, value, correctable, "DMA",
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+
+ value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
+ dwxgmac3_dma_dpp_errors,
+ STAT_OFF(dma_dpp_errors), stats);
}
static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
@@ -804,6 +849,12 @@ static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+ /* 5. Enable Data Path Parity Protection */
+ value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
+ /* already enabled by default, explicit enable it again */
+ value &= ~XGMAC_DPP_DISABLE;
+ writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
+
return 0;
}
@@ -837,7 +888,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
ret |= !corr;
}
- err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
+ * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
+ * Parity Errors here
+ */
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
corr = dma & XGMAC_DECIS;
if (err) {
dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
@@ -853,6 +908,7 @@ static const struct dwxgmac3_error {
{ dwxgmac3_mac_errors },
{ dwxgmac3_mtl_errors },
{ dwxgmac3_dma_errors },
+ { dwxgmac3_dma_dpp_errors },
};
static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
@@ -1101,7 +1157,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
- val |= XGMAC_PPSEN0;
+
+ /* XGMAC Core has 4 PPS outputs at most.
+ *
+ * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
+ * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
+ * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
+ * read-only reserved to 0.
+ * But we always set PPSEN{1,2,3} do not make things worse ;-)
+ *
+ * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
+ * be set, or the PPS outputs stay in Fixed PPS mode by default.
+ */
+ val |= XGMAC_PPSENx(index);
writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 252cf48c5816..5b9f344fdd32 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -170,8 +170,10 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
@@ -336,6 +338,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 8c14c9966394..79546810bb3d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -43,7 +43,8 @@ static void config_sub_second_increment(void __iomem *ioaddr,
if (!(value & PTP_TCR_TSCTRLSSR))
data = (data * 1000) / 465;
- data &= PTP_SSIR_SSINC_MASK;
+ if (data > PTP_SSIR_SSINC_MAX)
+ data = PTP_SSIR_SSINC_MAX;
reg_value = data;
if (gmac4)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9931724c4727..ee48283b2d96 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -932,7 +932,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
- priv->eee_active = phy_init_eee(phy, 1) >= 0;
+ priv->eee_active =
+ phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
stmmac_set_eee_pls(priv, priv->hw, true);
}
@@ -998,6 +999,11 @@ static int stmmac_init_phy(struct net_device *dev)
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
+ if (addr < 0) {
+ netdev_err(priv->dev, "no phy found\n");
+ return -ENODEV;
+ }
+
phydev = mdiobus_get_phy(priv->mii, addr);
if (!phydev) {
netdev_err(priv->dev, "no phy at addr %d\n", addr);
@@ -3434,6 +3440,55 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
}
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int ret, coe = priv->hw->rx_csum;
+ unsigned int plen = 0, hlen = 0;
+
+ /* Not first descriptor, buffer is always zero */
+ if (priv->sph && len)
+ return 0;
+
+ /* First descriptor, get split header length */
+ ret = stmmac_get_rx_header_len(priv, p, &hlen);
+ if (priv->sph && hlen) {
+ priv->xstats.rx_split_hdr_pkt_n++;
+ return hlen;
+ }
+
+ /* First descriptor, not last descriptor and not split header */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* First descriptor and last descriptor and not split header */
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
+}
+
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int coe = priv->hw->rx_csum;
+ unsigned int plen = 0;
+
+ /* Not split header, buffer is not available */
+ if (!priv->sph)
+ return 0;
+
+ /* Not last descriptor */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* Last descriptor */
+ return plen - len;
+}
+
/**
* stmmac_rx - manage the receive process
* @priv: driver private structure
@@ -3463,11 +3518,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- unsigned int hlen = 0, prev_len = 0;
+ unsigned int buf1_len = 0, buf2_len = 0;
enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
struct dma_desc *np, *p;
- unsigned int sec_len;
int entry;
u32 hash;
@@ -3482,11 +3536,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
+read_again:
if (count >= limit)
break;
-read_again:
- sec_len = 0;
+ buf1_len = 0;
+ buf2_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -3511,7 +3566,6 @@ read_again:
np = rx_q->dma_rx + next_entry;
prefetch(np);
- prefetch(page_address(buf->page));
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->dev->stats,
@@ -3528,69 +3582,61 @@ read_again:
goto read_again;
if (unlikely(error)) {
dev_kfree_skb(skb);
+ skb = NULL;
count++;
continue;
}
/* Buffer is good. Go on. */
- if (likely(status & rx_not_ls)) {
- len += priv->dma_buf_sz;
- } else {
- prev_len = len;
- len = stmmac_get_rx_frame_len(priv, p, coe);
-
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))
- len -= ETH_FCS_LEN;
+ prefetch(page_address(buf->page));
+ if (buf->sec_page)
+ prefetch(page_address(buf->sec_page));
+
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
+ len += buf2_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap)) {
+ if (buf2_len)
+ buf2_len -= ETH_FCS_LEN;
+ else
+ buf1_len -= ETH_FCS_LEN;
+
+ len -= ETH_FCS_LEN;
}
if (!skb) {
- int ret = stmmac_get_rx_header_len(priv, p, &hlen);
-
- if (priv->sph && !ret && (hlen > 0)) {
- sec_len = len;
- if (!(status & rx_not_ls))
- sec_len = sec_len - hlen;
- len = hlen;
-
- prefetch(page_address(buf->sec_page));
- priv->xstats.rx_split_hdr_pkt_n++;
- }
-
- skb = napi_alloc_skb(&ch->rx_napi, len);
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
priv->dev->stats.rx_dropped++;
count++;
- continue;
+ goto drain_data;
}
- dma_sync_single_for_cpu(priv->device, buf->addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
- len);
- skb_put(skb, len);
+ buf1_len);
+ skb_put(skb, buf1_len);
/* Data payload copied into SKB, page ready for recycle */
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
- } else {
- unsigned int buf_len = len - prev_len;
-
- if (likely(status & rx_not_ls))
- buf_len = priv->dma_buf_sz;
-
+ } else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
- buf_len, DMA_FROM_DEVICE);
+ buf1_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->page, 0, buf_len,
+ buf->page, 0, buf1_len,
priv->dma_buf_sz);
/* Data payload appended into SKB */
@@ -3598,22 +3644,23 @@ read_again:
buf->page = NULL;
}
- if (sec_len > 0) {
+ if (buf2_len) {
dma_sync_single_for_cpu(priv->device, buf->sec_addr,
- sec_len, DMA_FROM_DEVICE);
+ buf2_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->sec_page, 0, sec_len,
+ buf->sec_page, 0, buf2_len,
priv->dma_buf_sz);
- len += sec_len;
-
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
buf->sec_page = NULL;
}
+drain_data:
if (likely(status & rx_not_ls))
goto read_again;
+ if (!skb)
+ continue;
/* Got entire packet into SKB. Finish it. */
@@ -3631,13 +3678,14 @@ read_again:
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rx_napi, skb);
+ skb = NULL;
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += len;
count++;
}
- if (status & rx_not_ls) {
+ if (status & rx_not_ls || skb) {
rx_q->state_saved = true;
rx_q->state.skb = skb;
rx_q->state.error = error;
@@ -4643,9 +4691,9 @@ int stmmac_dvr_probe(struct device *device,
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- dev_err(priv->device,
- "%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ dev_err_probe(priv->device, ret,
+ "%s: MDIO bus (id: %d) registration failed\n",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 40c42637ad75..580a6defe108 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -358,8 +358,12 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->parent = priv->device;
err = of_mdiobus_register(new_bus, mdio_node);
- if (err != 0) {
- dev_err(dev, "Cannot register the MDIO bus\n");
+ if (err == -ENODEV) {
+ err = 0;
+ dev_info(dev, "MDIO bus is disabled\n");
+ goto bus_register_fail;
+ } else if (err) {
+ dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 70cbf48c2c03..6388bae68b65 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -107,10 +107,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
- axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
- axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
- axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
- axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
+ axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe");
+ axi->axi_fb = of_property_read_bool(np, "snps,fb");
+ axi->axi_mb = of_property_read_bool(np, "snps,mb");
+ axi->axi_rb = of_property_read_bool(np, "snps,rb");
if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
axi->axi_wr_osr_lmt = 1;
@@ -554,7 +554,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
- if (plat->force_thresh_dma_mode) {
+ if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
plat->force_sf_dma_mode = 0;
dev_warn(&pdev->dev,
"force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 7abb1d47e7da..60e6b085e2f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -61,7 +61,7 @@
#define PTP_TCR_TSENMACADDR BIT(18)
/* SSIR defines */
-#define PTP_SSIR_SSINC_MASK 0xff
+#define PTP_SSIR_SSINC_MAX 0xff
#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index ba03a2d77434..e65577f1da54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1614,12 +1614,16 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
}
ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
- if (ret)
+ if (ret) {
+ kfree_skb(skb);
goto cleanup;
+ }
ret = dev_set_promiscuity(priv->dev, 1);
- if (ret)
+ if (ret) {
+ kfree_skb(skb);
goto cleanup;
+ }
skb_set_queue_mapping(skb, 0);
ret = dev_queue_xmit(skb);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6e78a33aa5e4..ed9af678bcfb 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1337,7 +1337,7 @@ static void cas_init_rx_dma(struct cas *cp)
writel(val, cp->regs + REG_RX_PAGE_SIZE);
/* enable the header parser if desired */
- if (CAS_HP_FIRMWARE == cas_prog_null)
+ if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
return;
val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
@@ -2291,7 +2291,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
drops = 0;
while (1) {
struct cas_rx_comp *rxc = rxcs + entry;
- struct sk_buff *uninitialized_var(skb);
+ struct sk_buff *skb;
int type, len;
u64 words[4];
int i, dring;
@@ -3807,7 +3807,7 @@ static void cas_reset(struct cas *cp, int blkflag)
/* program header parser */
if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
- (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
+ (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
cas_load_firmware(cp, CAS_HP_FIRMWARE);
} else {
cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
@@ -5138,6 +5138,8 @@ err_out_iounmap:
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
+ vfree(cp->fw_data);
+
pci_iounmap(pdev, cp->regs);
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 01ea0d6f8819..934a4b54784b 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -290,6 +290,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
+
rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
err = -ENODEV;
if (!rmac) {
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 70b9a7bfe4ec..e659415c62bd 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -429,7 +429,7 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
struct niu_link_config *lp = &np->link_config;
u16 pll_cfg, pll_sts;
int max_retry = 100;
- u64 uninitialized_var(sig), mask, val;
+ u64 sig, mask, val;
u32 tx_cfg, rx_cfg;
unsigned long i;
int err;
@@ -526,7 +526,7 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
struct niu_link_config *lp = &np->link_config;
u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
int max_retry = 100;
- u64 uninitialized_var(sig), mask, val;
+ u64 sig, mask, val;
unsigned long i;
int err;
@@ -714,7 +714,7 @@ static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
static int esr_reset(struct niu *np)
{
- u32 uninitialized_var(reset);
+ u32 reset;
int err;
err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
@@ -4503,7 +4503,7 @@ static int niu_alloc_channels(struct niu *np)
err = niu_rbr_fill(np, rp, GFP_KERNEL);
if (err)
- return err;
+ goto out_err;
}
tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 3133f903279c..dbbbb6ea9f2b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2064,9 +2064,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 96b883f965f6..b6c03adf1e76 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -431,6 +431,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
hp = mdesc_grab();
+ if (!hp)
+ return -ENODEV;
+
vp = vnet_find_parent(hp, vdev->mp, vdev);
if (IS_ERR(vp)) {
pr_err("Cannot find port parent vnet\n");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 33eca554424a..774a72db7c96 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1753,6 +1753,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
err_cleanup:
if (!cpsw->usage_count) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
cpdma_ctlr_stop(cpsw->dma);
cpsw_destroy_xdp_rxqs(cpsw);
}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index e7c24396933e..f17619c545ae 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -60,23 +60,37 @@
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx;
+ int idx, idx2;
+ u32 hi_val = 0;
idx = start / 32;
+ idx2 = (start + bits - 1) / 32;
+ /* Check if bits to be fetched exceed a word */
+ if (idx != idx2) {
+ idx2 = 2 - idx2; /* flip */
+ hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ }
start -= idx * 32;
idx = 2 - idx; /* flip */
- return (ale_entry[idx] >> start) & BITMASK(bits);
+ return (hi_val + (ale_entry[idx] >> start)) & BITMASK(bits);
}
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx;
+ int idx, idx2;
value &= BITMASK(bits);
- idx = start / 32;
+ idx = start / 32;
+ idx2 = (start + bits - 1) / 32;
+ /* Check if bits to be set exceed a word */
+ if (idx != idx2) {
+ idx2 = 2 - idx2; /* flip */
+ ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ }
start -= idx * 32;
- idx = 2 - idx; /* flip */
+ idx = 2 - idx; /* flip */
ale_entry[idx] &= ~(BITMASK(bits) << start);
ale_entry[idx] |= (value << start);
}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 4154c48d1ddf..5dbb4ed1b132 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1262,7 +1262,7 @@ out:
}
/* Submit the packet */
-static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_stats *tx_stats = &netcp->stats;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9d9f8acb7ee3..4e6c71da9f21 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
/* set up the hardware pointers in each descriptor */
for (i = 0; i < no; i++, descr++) {
+ dma_addr_t cpu_addr;
+
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
- descr->bus_addr =
- dma_map_single(ctodev(card), descr,
- GELIC_DESCR_SIZE,
- DMA_BIDIRECTIONAL);
- if (!descr->bus_addr)
+ cpu_addr = dma_map_single(ctodev(card), descr,
+ GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(ctodev(card), cpu_addr))
goto iommu_error;
+ descr->bus_addr = cpu_to_be32(cpu_addr);
descr->next = descr + 1;
descr->prev = descr - 1;
}
@@ -365,28 +367,30 @@ iommu_error:
*
* allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
* Activate the descriptor state-wise
+ *
+ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
+ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
*/
static int gelic_descr_prepare_rx(struct gelic_card *card,
struct gelic_descr *descr)
{
+ static const unsigned int rx_skb_size =
+ ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
+ GELIC_NET_RXBUF_ALIGN - 1;
+ dma_addr_t cpu_addr;
int offset;
- unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more */
- descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
descr->buf_addr = 0; /* tell DMAC don't touch memory */
dev_info(ctodev(card),
"%s:allocate skb failed !!\n", __func__);
return -ENOMEM;
}
- descr->buf_size = cpu_to_be32(bufsize);
+ descr->buf_size = cpu_to_be32(rx_skb_size);
descr->dmac_cmd_status = 0;
descr->result_size = 0;
descr->valid_size = 0;
@@ -397,11 +401,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (offset)
skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
- descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
- descr->skb->data,
- GELIC_NET_MAX_MTU,
- DMA_FROM_DEVICE));
- if (!descr->buf_addr) {
+ cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
+ GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
+ descr->buf_addr = cpu_to_be32(cpu_addr);
+ if (dma_mapping_error(ctodev(card), cpu_addr)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
dev_info(ctodev(card),
@@ -781,7 +784,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
- if (!buf) {
+ if (dma_mapping_error(ctodev(card), buf)) {
dev_err(ctodev(card),
"dma map 2 failed (%p, %i). Dropping packet\n",
skb->data, skb->len);
@@ -917,7 +920,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
data_error = be32_to_cpu(descr->data_error);
/* unmap skb buffer */
dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
- GELIC_NET_MAX_MTU,
+ GELIC_NET_MAX_FRAME,
DMA_FROM_DEVICE);
skb_put(skb, be32_to_cpu(descr->valid_size)?
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 051033580f0a..7624c68c95cb 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -19,8 +19,9 @@
#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
-#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
-#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
+#define GELIC_NET_MAX_FRAME 2312
+#define GELIC_NET_MAX_MTU 2294
+#define GELIC_NET_MIN_MTU 64
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 2db546b27ee0..411b2a6091c9 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
key_index = wl->current_key;
if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
- /* reques to change default key index */
+ /* request to change default key index */
pr_debug("%s: request to change default key to %d\n",
__func__, key_index);
wl->current_key = key_index;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c62f474b6d08..fcebd2418dbd 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1302,12 +1302,15 @@ static int tsi108_open(struct net_device *dev)
data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
&data->rxdma, GFP_KERNEL);
- if (!data->rxring)
+ if (!data->rxring) {
+ free_irq(data->irq_num, dev);
return -ENOMEM;
+ }
data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
&data->txdma, GFP_KERNEL);
if (!data->txring) {
+ free_irq(data->irq_num, dev);
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma);
return -ENOMEM;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 8d994cebb6b0..2c9ef02a0262 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Xilink device configuration
+# Xilinx device configuration
#
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP || MIPS || X86 || ARM || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,18 +19,33 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
+ depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || ARCH_ZYNQMP
select PHYLIB
---help---
This driver supports the 10/100 Ethernet Lite from Xilinx.
config XILINX_AXI_EMAC
- tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE || X86 || ARM || COMPILE_TEST
+ tristate "Xilinx AXI Ethernet support"
+ depends on MICROBLAZE || X86 || ARM || ARCH_ZYNQMP || COMPILE_TEST
select PHYLINK
---help---
- This driver supports the 10/100/1000 Ethernet from Xilinx for the
- AXI bus interface used in Xilinx Virtex FPGAs.
+ This driver supports the Xilinx AXI 1G/2.5G, 10 Gigabit,
+ 10G/25G High Speed and USXGMII Ethernet Subsystem.
+
+config XILINX_AXI_EMAC_HWTSTAMP
+ bool "Generate hardware packet timestamps"
+ depends on XILINX_AXI_EMAC
+ depends on PTP_1588_CLOCK
+ default n
+ ---help---
+ Generate hardware packet timestamps. This is to facilitate IEEE 1588.
+
+config AXIENET_HAS_MCDMA
+ bool "AXI Ethernet is configured with MCDMA"
+ depends on XILINX_AXI_EMAC
+ default n
+ ---help---
+ When hardware is generated with AXI Ethernet with MCDMA select this option.
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
@@ -40,4 +55,54 @@ config XILINX_LL_TEMAC
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
core used in Xilinx Spartan and Virtex FPGAs
+config XILINX_TSN
+ bool "Enable Xilinx's TSN IP"
+ default n
+ ---help---
+ Enable Xilinx's TSN IP.
+
+config XILINX_TSN_PTP
+ bool "Generate hardware packet timestamps using Xilinx's TSN IP"
+ depends on XILINX_TSN
+ depends on PTP_1588_CLOCK
+ default y
+ ---help---
+ Generate hardware packet timestamps. This is to facilitate IEEE 1588.
+
+config XILINX_TSN_QBV
+ bool "Support Qbv protocol in TSN"
+ depends on XILINX_TSN_PTP
+ depends on PTP_1588_CLOCK
+ default y
+ ---help---
+ Enables TSN Qbv protocol.
+
+config XILINX_TSN_SWITCH
+ bool "Support TSN switch"
+ depends on XILINX_TSN
+ default y
+ ---help---
+ Enable Xilinx's TSN Switch support.
+
+config XILINX_TSN_QCI
+ bool "Support Qci protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN QCI protocol.
+
+config XILINX_TSN_CB
+ bool "Support CB protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN CB protocol support.
+
+config XILINX_TSN_QBR
+ bool "Support QBR protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN QBR protocol support.
+
endif # NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
index 7d7dc1771423..54de845ee4a1 100644
--- a/drivers/net/ethernet/xilinx/Makefile
+++ b/drivers/net/ethernet/xilinx/Makefile
@@ -1,10 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for the Xilink network device drivers.
+# Makefile for the Xilinx network device drivers.
#
ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
-xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o
+obj-$(CONFIG_XILINX_TSN) += xilinx_tsn_ep.o
+obj-$(CONFIG_XILINX_TSN_PTP) += xilinx_tsn_ptp_xmit.o xilinx_tsn_ptp_clock.o
+obj-$(CONFIG_XILINX_TSN_QBV) += xilinx_tsn_shaper.o
+obj-$(CONFIG_XILINX_TSN_QCI) += xilinx_tsn_qci.o
+obj-$(CONFIG_XILINX_TSN_CB) += xilinx_tsn_cb.o
+obj-$(CONFIG_XILINX_TSN_SWITCH) += xilinx_tsn_switch.o
+xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o xilinx_axienet_dma.o
obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o
+obj-$(CONFIG_XILINX_TSN_QBR) += xilinx_tsn_preemption.o
+obj-$(CONFIG_AXIENET_HAS_MCDMA) += xilinx_axienet_mcdma.o
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a109438f4a78..f4a1d5985cfa 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -844,7 +844,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -887,6 +886,8 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->phys = cpu_to_be32(skb_dma_addr);
for (ii = 0; ii < num_frag; ii++) {
+ frag = &skb_shinfo(skb)->frags[ii];
+ lp->tx_bd_tail++;
if (++lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
@@ -919,7 +920,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->phys = cpu_to_be32(skb_dma_addr);
cur_p->len = cpu_to_be32(skb_frag_size(frag));
cur_p->app0 = 0;
- frag++;
}
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
@@ -1381,9 +1381,11 @@ static int temac_probe(struct platform_device *pdev)
lp->temac_features = 0;
if (temac_np) {
p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
+ dev_info(&op->dev, "TX_CSUM %d\n", be32_to_cpup(p));
if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
+ dev_info(&op->dev, "RX_CSUM %d\n", be32_to_cpup(p));
if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
} else if (pdata) {
@@ -1481,15 +1483,15 @@ static int temac_probe(struct platform_device *pdev)
}
/* Error handle returned DMA RX and TX interrupts */
- if (lp->rx_irq < 0) {
- if (lp->rx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA RX irq\n");
- return lp->rx_irq;
+ if (lp->rx_irq <= 0) {
+ rc = lp->rx_irq ?: -EINVAL;
+ return dev_err_probe(&pdev->dev, rc,
+ "could not get DMA RX irq\n");
}
- if (lp->tx_irq < 0) {
- if (lp->tx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA TX irq\n");
- return lp->tx_irq;
+ if (lp->tx_irq <= 0) {
+ rc = lp->tx_irq ?: -EINVAL;
+ return dev_err_probe(&pdev->dev, rc,
+ "could not get DMA TX irq\n");
}
if (temac_np) {
@@ -1504,14 +1506,15 @@ static int temac_probe(struct platform_device *pdev)
temac_init_mac_address(ndev, pdata->mac_addr);
}
- rc = temac_mdio_setup(lp, pdev);
- if (rc)
- dev_warn(&pdev->dev, "error registering MDIO bus\n");
-
if (temac_np) {
lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
- if (lp->phy_node)
+ if (lp->phy_node) {
dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
+
+ rc = temac_mdio_setup(lp, op->dev.of_node);
+ if (rc)
+ dev_warn(&op->dev, "error registering MDIO bus\n");
+ }
} else if (pdata) {
snprintf(lp->phy_name, sizeof(lp->phy_name),
PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 6fd2dea4e60f..8e672c0a6c95 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -69,6 +69,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
int clk_div;
int rc;
struct resource res;
+ struct device_node *np1 = of_get_parent(lp->phy_node);
/* Get MDIO bus frequency (if specified) */
bus_hz = 0;
@@ -96,7 +97,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
return -ENOMEM;
if (np) {
- of_address_to_resource(np, 0, &res);
+ of_address_to_resource(np1, 0, &res);
snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
(unsigned long long)res.start);
} else if (pdata) {
@@ -112,7 +113,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
lp->mii_bus = bus;
- rc = of_mdiobus_register(bus, np);
+ rc = of_mdiobus_register(bus, np1);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 04e51af32178..f9746e3eca95 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -3,7 +3,7 @@
* Definitions for Xilinx Axi Ethernet device driver.
*
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
- * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
+ * Copyright (c) 2010 - 2018 Xilinx, Inc. All rights reserved.
*/
#ifndef XILINX_AXIENET_H
@@ -14,6 +14,9 @@
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/of_platform.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -25,6 +28,17 @@
#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
+/* DMA address width min and max range */
+#define XAE_DMA_MASK_MIN 32
+#define XAE_DMA_MASK_MAX 64
+
+/* In AXI DMA Tx and Rx queue count is same */
+#define for_each_tx_dma_queue(lp, var) \
+ for ((var) = 0; (var) < (lp)->num_tx_queues; (var)++)
+
+#define for_each_rx_dma_queue(lp, var) \
+ for ((var) = 0; (var) < (lp)->num_rx_queues; (var)++)
+
/* Configuration options */
/* Accept all incoming packets. Default: disabled (cleared) */
@@ -122,7 +136,7 @@
/* Default TX/RX Threshold and waitbound values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_WAITBOUND 254
-#define XAXIDMA_DFT_RX_THRESHOLD 24
+#define XAXIDMA_DFT_RX_THRESHOLD 1
#define XAXIDMA_DFT_RX_WAITBOUND 254
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
@@ -141,6 +155,22 @@
#define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40
+/* AXI Tx Timestamp Stream FIFO Register Definitions */
+#define XAXIFIFO_TXTS_ISR 0x00000000 /* Interrupt Status Register */
+#define XAXIFIFO_TXTS_TXFD 0x00000010 /* Tx Data Write Port */
+#define XAXIFIFO_TXTS_TLR 0x00000014 /* Transmit Length Register */
+#define XAXIFIFO_TXTS_RFO 0x0000001C /* Rx Fifo Occupancy */
+#define XAXIFIFO_TXTS_RDFR 0x00000018 /* Rx Fifo reset */
+#define XAXIFIFO_TXTS_RXFD 0x00000020 /* Rx Data Read Port */
+#define XAXIFIFO_TXTS_RLR 0x00000024 /* Receive Length Register */
+#define XAXIFIFO_TXTS_SRR 0x00000028 /* AXI4-Stream Reset */
+
+#define XAXIFIFO_TXTS_INT_RC_MASK 0x04000000
+#define XAXIFIFO_TXTS_RXFD_MASK 0x7FFFFFFF
+#define XAXIFIFO_TXTS_RESET_MASK 0x000000A5
+#define XAXIFIFO_TXTS_TAG_MASK 0xFFFF0000
+#define XAXIFIFO_TXTS_TAG_SHIFT 16
+
/* Axi Ethernet registers definition */
#define XAE_RAF_OFFSET 0x00000000 /* Reset and Address filter */
#define XAE_TPF_OFFSET 0x00000004 /* Tx Pause Frame */
@@ -159,22 +189,19 @@
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
-#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
-#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
-#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
-#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
-#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
-#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
-#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
-/* MII Mgmt Interrupt Pending register offset */
-#define XAE_MDIO_MIP_OFFSET 0x00000620
-/* MII Management Interrupt Enable register offset */
-#define XAE_MDIO_MIE_OFFSET 0x00000640
-/* MII Management Interrupt Clear register offset. */
-#define XAE_MDIO_MIC_OFFSET 0x00000660
+#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
+#define XAE_RMFC_OFFSET 0x00000414 /* RX Max Frame Configuration */
+#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
+#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
+#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
+#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */
+#define XAE_TEMAC_IS_OFFSET 0x00000600 /* TEMAC Interrupt Status */
+#define XAE_TEMAC_IP_OFFSET 0x00000610 /* TEMAC Interrupt Pending Status */
+#define XAE_TEMAC_IE_OFFSET 0x00000620 /* TEMAC Interrupt Enable Status */
+#define XAE_TEMAC_IC_OFFSET 0x00000630 /* TEMAC Interrupt Clear Status */
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
-#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
+#define XAE_FMC_OFFSET 0x00000708 /* Frame Filter Control */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
@@ -235,6 +262,7 @@
#define XAE_TPID_3_MASK 0xFFFF0000 /* TPID 1 */
/* Bit masks for Axi Ethernet RCW1 register */
+#define XAE_RCW1_INBAND1588_MASK 0x00400000 /* Inband 1588 Enable */
#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
/* In-Band FCS enable (FCS not stripped) */
@@ -251,6 +279,7 @@
#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet TC register */
+#define XAE_TC_INBAND1588_MASK 0x00400000 /* Inband 1588 Enable */
#define XAE_TC_RST_MASK 0x80000000 /* Reset */
#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
/* In-Band FCS enable (FCS not generated) */
@@ -275,18 +304,7 @@
#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
-
-/* Bit masks for Axi Ethernet PHYC register */
-#define XAE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /* SGMII link speed mask*/
-#define XAE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /* RGMII link speed */
-#define XAE_PHYC_RGMIIHD_MASK 0x00000002 /* RGMII Half-duplex */
-#define XAE_PHYC_RGMIILINK_MASK 0x00000001 /* RGMII link status */
-#define XAE_PHYC_RGLINKSPD_10 0x00000000 /* RGMII link 10 Mbit */
-#define XAE_PHYC_RGLINKSPD_100 0x00000004 /* RGMII link 100 Mbit */
-#define XAE_PHYC_RGLINKSPD_1000 0x00000008 /* RGMII link 1000 Mbit */
-#define XAE_PHYC_SGLINKSPD_10 0x00000000 /* SGMII link 10 Mbit */
-#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
-#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
+#define XAE_EMMC_LINKSPD_2500 0x80000000 /* Link Speed mask for 2500 Mbit */
/* Bit masks for Axi Ethernet MDIO interface MC register */
#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
@@ -304,30 +322,19 @@
#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
-/* Bit masks for Axi Ethernet MDIO interface MIS, MIP, MIE, MIC registers */
-#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
-
/* Bit masks for Axi Ethernet UAW1 register */
/* Station address bits [47:32]; Station address
* bits [31:0] are stored in register UAW0
*/
#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
-/* Bit masks for Axi Ethernet FMI register */
-#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
-#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
+/* Bit masks for Axi Ethernet FMC register */
+#define XAE_FMC_PM_MASK 0x80000000 /* Promis. mode enable */
+#define XAE_FMC_IND_MASK 0x00000003 /* Index Mask */
#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
-/* Defines for different options for C_PHY_TYPE parameter in Axi Ethernet IP */
-#define XAE_PHY_TYPE_MII 0
-#define XAE_PHY_TYPE_GMII 1
-#define XAE_PHY_TYPE_RGMII_1_3 2
-#define XAE_PHY_TYPE_RGMII_2_0 3
-#define XAE_PHY_TYPE_SGMII 4
-#define XAE_PHY_TYPE_1000BASE_X 5
-
- /* Total number of entries in the hardware multicast table. */
+/* Total number of entries in the hardware multicast table. */
#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
@@ -344,12 +351,149 @@
#define DELAY_OF_ONE_MILLISEC 1000
+#define XAXIENET_NAPI_WEIGHT 64
+
+/* Defintions of 1588 PTP in Axi Ethernet IP */
+#define TX_TS_OP_NOOP 0x0
+#define TX_TS_OP_ONESTEP 0x1
+#define TX_TS_OP_TWOSTEP 0x2
+#define TX_TS_CSUM_UPDATE 0x1
+#define TX_PTP_CSUM_OFFSET 0x28
+#define TX_PTP_TS_OFFSET 0x4C
+
+/* Read/Write access to the registers */
+#ifndef out_be32
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP)
+#define in_be32(offset) __raw_readl(offset)
+#define out_be32(offset, val) __raw_writel(val, offset)
+#endif
+#endif
+
+/* XXV MAC Register Definitions */
+#define XXV_GT_RESET_OFFSET 0x00000000
+#define XXV_TC_OFFSET 0x0000000C
+#define XXV_RCW1_OFFSET 0x00000014
+#define XXV_JUM_OFFSET 0x00000018
+#define XXV_TICKREG_OFFSET 0x00000020
+#define XXV_STATRX_BLKLCK_OFFSET 0x0000040C
+#define XXV_USXGMII_AN_OFFSET 0x000000C8
+#define XXV_USXGMII_AN_STS_OFFSET 0x00000458
+
+/* XXV MAC Register Mask Definitions */
+#define XXV_GT_RESET_MASK BIT(0)
+#define XXV_TC_TX_MASK BIT(0)
+#define XXV_RCW1_RX_MASK BIT(0)
+#define XXV_RCW1_FCS_MASK BIT(1)
+#define XXV_TC_FCS_MASK BIT(1)
+#define XXV_MIN_JUM_MASK GENMASK(7, 0)
+#define XXV_MAX_JUM_MASK GENMASK(10, 8)
+#define XXV_RX_BLKLCK_MASK BIT(0)
+#define XXV_TICKREG_STATEN_MASK BIT(0)
+
+/* USXGMII Register Mask Definitions */
+#define USXGMII_AN_EN BIT(5)
+#define USXGMII_AN_RESET BIT(6)
+#define USXGMII_AN_RESTART BIT(7)
+#define USXGMII_EN BIT(16)
+#define USXGMII_RATE_MASK 0x0E000700
+#define USXGMII_RATE_1G 0x04000200
+#define USXGMII_RATE_2G5 0x08000400
+#define USXGMII_RATE_10M 0x0
+#define USXGMII_RATE_100M 0x02000100
+#define USXGMII_RATE_5G 0x0A000500
+#define USXGMII_RATE_10G 0x06000300
+#define USXGMII_FD BIT(28)
+#define USXGMII_LINK_STS BIT(31)
+
+/* USXGMII AN STS register mask definitions */
+#define USXGMII_AN_STS_COMP_MASK BIT(16)
+
+/* MCDMA Register Definitions */
+#define XMCDMA_CR_OFFSET 0x00
+#define XMCDMA_SR_OFFSET 0x04
+#define XMCDMA_CHEN_OFFSET 0x08
+#define XMCDMA_CHSER_OFFSET 0x0C
+#define XMCDMA_ERR_OFFSET 0x10
+#define XMCDMA_PKTDROP_OFFSET 0x14
+#define XMCDMA_TXWEIGHT0_OFFSET 0x18
+#define XMCDMA_TXWEIGHT1_OFFSET 0x1C
+#define XMCDMA_RXINT_SER_OFFSET 0x20
+#define XMCDMA_TXINT_SER_OFFSET 0x28
+
+#define XMCDMA_CHOBS1_OFFSET 0x440
+#define XMCDMA_CHOBS2_OFFSET 0x444
+#define XMCDMA_CHOBS3_OFFSET 0x448
+#define XMCDMA_CHOBS4_OFFSET 0x44C
+#define XMCDMA_CHOBS5_OFFSET 0x450
+#define XMCDMA_CHOBS6_OFFSET 0x454
+
+#define XMCDMA_CHAN_RX_OFFSET 0x500
+
+/* Per Channel Registers */
+#define XMCDMA_CHAN_CR_OFFSET(chan_id) (0x40 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_SR_OFFSET(chan_id) (0x44 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_CURDESC_OFFSET(chan_id) (0x48 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_TAILDESC_OFFSET(chan_id) (0x50 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_PKTDROP_OFFSET(chan_id) (0x58 + ((chan_id) - 1) * 0x40)
+
+#define XMCDMA_RX_OFFSET 0x500
+
+/* MCDMA Mask registers */
+#define XMCDMA_CR_RUNSTOP_MASK BIT(0) /* Start/stop DMA channel */
+#define XMCDMA_CR_RESET_MASK BIT(2) /* Reset DMA engine */
+
+#define XMCDMA_SR_HALTED_MASK BIT(0)
+#define XMCDMA_SR_IDLE_MASK BIT(1)
+
+#define XMCDMA_IRQ_ERRON_OTHERQ_MASK BIT(3)
+#define XMCDMA_IRQ_PKTDROP_MASK BIT(4)
+#define XMCDMA_IRQ_IOC_MASK BIT(5)
+#define XMCDMA_IRQ_DELAY_MASK BIT(6)
+#define XMCDMA_IRQ_ERR_MASK BIT(7)
+#define XMCDMA_IRQ_ALL_MASK GENMASK(7, 5)
+#define XMCDMA_PKTDROP_COALESCE_MASK GENMASK(15, 8)
+#define XMCDMA_COALESCE_MASK GENMASK(23, 16)
+#define XMCDMA_DELAY_MASK GENMASK(31, 24)
+
+#define XMCDMA_CHEN_MASK GENMASK(7, 0)
+#define XMCDMA_CHID_MASK GENMASK(7, 0)
+
+#define XMCDMA_ERR_INTERNAL_MASK BIT(0)
+#define XMCDMA_ERR_SLAVE_MASK BIT(1)
+#define XMCDMA_ERR_DECODE_MASK BIT(2)
+#define XMCDMA_ERR_SG_INT_MASK BIT(4)
+#define XMCDMA_ERR_SG_SLV_MASK BIT(5)
+#define XMCDMA_ERR_SG_DEC_MASK BIT(6)
+
+#define XMCDMA_PKTDROP_CNT_MASK GENMASK(31, 0)
+
+#define XMCDMA_BD_CTRL_TXSOF_MASK 0x80000000 /* First tx packet */
+#define XMCDMA_BD_CTRL_TXEOF_MASK 0x40000000 /* Last tx packet */
+#define XMCDMA_BD_CTRL_ALL_MASK 0xC0000000 /* All control bits */
+#define XMCDMA_BD_STS_ALL_MASK 0xF0000000 /* All status bits */
+
+#define XMCDMA_COALESCE_SHIFT 16
+#define XMCDMA_DELAY_SHIFT 24
+#define XMCDMA_DFT_TX_THRESHOLD 1
+
+#define XMCDMA_TXWEIGHT_CH_MASK(chan_id) GENMASK(((chan_id) * 4 + 3), \
+ (chan_id) * 4)
+#define XMCDMA_TXWEIGHT_CH_SHIFT(chan_id) ((chan_id) * 4)
+
+/* PTP Packet length */
+#define XAE_TX_PTP_LEN 16
+#define XXV_TX_PTP_LEN 12
+
+/* Macros used when AXI DMA h/w is configured without DRE */
+#define XAE_TX_BUFFERS 64
+#define XAE_MAX_PKT_LEN 8192
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
- * @reserved1: Reserved and not used
+ * @reserved1: Reserved and not used for 32-bit
* @phys: MM2S/S2MM Buffer Address
- * @reserved2: Reserved and not used
+ * @reserved2: Reserved and not used for 32-bit
* @reserved3: Reserved and not used
* @reserved4: Reserved and not used
* @cntrl: MM2S/S2MM Control value
@@ -359,12 +503,22 @@
* @app2: MM2S/S2MM User Application Field 2.
* @app3: MM2S/S2MM User Application Field 3.
* @app4: MM2S/S2MM User Application Field 4.
+ * @ptp_tx_skb: If timestamping is enabled used for timestamping skb
+ * Otherwise reserved.
+ * @ptp_tx_ts_tag: Tag value of 2 step timestamping if timestamping is enabled
+ * Otherwise reserved.
+ * @tx_skb: Transmit skb address
+ * @tx_desc_mapping: Tx Descriptor DMA mapping type.
*/
struct axidma_bd {
- u32 next; /* Physical address of next buffer descriptor */
+ phys_addr_t next; /* Physical address of next buffer descriptor */
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
u32 reserved1;
- u32 phys;
+#endif
+ phys_addr_t phys;
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
u32 reserved2;
+#endif
u32 reserved3;
u32 reserved4;
u32 cntrl;
@@ -375,9 +529,93 @@ struct axidma_bd {
u32 app3;
u32 app4; /* Last field used by HW */
struct sk_buff *skb;
+ phys_addr_t sw_id_offset; /* first unused field by h/w */
+ phys_addr_t ptp_tx_skb;
+ u32 ptp_tx_ts_tag;
+ phys_addr_t tx_skb;
+ u32 tx_desc_mapping;
} __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT);
/**
+ * struct aximcdma_bd - Axi MCDMA buffer descriptor layout
+ * @next: MM2S/S2MM Next Descriptor Pointer
+ * @reserved1: Reserved and not used for 32-bit
+ * @phys: MM2S/S2MM Buffer Address
+ * @reserved2: Reserved and not used for 32-bit
+ * @reserved3: Reserved and not used
+ * @cntrl: MM2S/S2MM Control value
+ * @status: S2MM Status value
+ * @sband_stats: S2MM Sideband Status value
+ * MM2S Status value
+ * @app0: MM2S/S2MM User Application Field 0.
+ * @app1: MM2S/S2MM User Application Field 1.
+ * @app2: MM2S/S2MM User Application Field 2.
+ * @app3: MM2S/S2MM User Application Field 3.
+ * @app4: MM2S/S2MM User Application Field 4.
+ * @sw_id_offset: MM2S/S2MM Sw ID
+ * @ptp_tx_skb: If timestamping is enabled used for timestamping skb
+ * Otherwise reserved.
+ * @ptp_tx_ts_tag: Tag value of 2 step timestamping if timestamping is enabled
+ * Otherwise reserved.
+ * @tx_skb: Transmit skb address
+ * @tx_desc_mapping: Tx Descriptor DMA mapping type.
+ */
+struct aximcdma_bd {
+ phys_addr_t next; /* Physical address of next buffer descriptor */
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
+ u32 reserved1;
+#endif
+ phys_addr_t phys;
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
+ u32 reserved2;
+#endif
+ u32 reserved3;
+ u32 cntrl;
+ u32 status;
+ u32 sband_stats;
+ u32 app0;
+ u32 app1; /* TX start << 16 | insert */
+ u32 app2; /* TX csum seed */
+ u32 app3;
+ u32 app4;
+ phys_addr_t sw_id_offset; /* first unused field by h/w */
+ phys_addr_t ptp_tx_skb;
+ u32 ptp_tx_ts_tag;
+ phys_addr_t tx_skb;
+ u32 tx_desc_mapping;
+} __aligned(128);
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
+#if defined(CONFIG_XILINX_TSN)
+#define XAE_MAX_QUEUES 5
+#elif defined(CONFIG_AXIENET_HAS_MCDMA)
+#define XAE_MAX_QUEUES 16
+#else
+#define XAE_MAX_QUEUES 1
+#endif
+
+#ifdef CONFIG_XILINX_TSN
+/* TSN queues range is 2 to 5. For eg: for num_tc = 2 minimum queues = 2;
+ * for num_tc = 3 with sideband signalling maximum queues = 5
+ */
+#define XAE_MAX_TSN_TC 3
+#define XAE_TSN_MIN_QUEUES 2
+#endif
+
+enum axienet_tsn_ioctl {
+ SIOCCHIOCTL = SIOCDEVPRIVATE,
+ SIOC_GET_SCHED,
+ SIOC_PREEMPTION_CFG,
+ SIOC_PREEMPTION_CTRL,
+ SIOC_PREEMPTION_STS,
+ SIOC_PREEMPTION_COUNTER,
+ SIOC_QBU_USER_OVERRIDE,
+ SIOC_QBU_STS,
+};
+
+/**
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
@@ -385,25 +623,30 @@ struct axidma_bd {
* @mii_bus: Pointer to MII bus structure
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
- * @dma_regs: Base address for the axidma device address space
- * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
- * @tx_irq: Axidma TX IRQ number
- * @rx_irq: Axidma RX IRQ number
+ * @mcdma_regs: Base address for the aximcdma device address space
+ * @napi: Napi Structure array for all dma queues
+ * @num_tx_queues: Total number of Tx DMA queues
+ * @num_rx_queues: Total number of Rx DMA queues
+ * @dq: DMA queues data
* @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
+ * @is_tsn: Denotes a tsn port
+ * @temac_no: Denotes the port number in TSN IP
+ * @num_tc: Total number of TSN Traffic classes
+ * @timer_priv: PTP timer private data pointer
+ * @ptp_tx_irq: PTP tx irq
+ * @ptp_rx_irq: PTP rx irq
+ * @rtc_irq: PTP RTC irq
+ * @qbv_irq: QBV shed irq
+ * @ptp_ts_type: ptp time stamp type - 1 or 2 step mode
+ * @ptp_rx_hw_pointer: ptp rx hw pointer
+ * @ptp_rx_sw_pointer: ptp rx sw pointer
+ * @ptp_txq: PTP tx queue header
+ * @tx_tstamp_work: PTP timestamping work queue
+ * @ptp_tx_lock: PTP tx lock
+ * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
* @options: AxiEthernet option word
* @last_link: Phy link state in which the PHY was negotiated earlier
* @features: Stores the extended features supported by the axienet hw
- * @tx_bd_v: Virtual address of the TX buffer descriptor ring
- * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
- * @rx_bd_v: Virtual address of the RX buffer descriptor ring
- * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
- * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while alloc. BDs before a TX starts
- * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while processing BDs after the TX
- * completed.
- * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
- * accessed currently.
* @max_frm_size: Stores the maximum size of the frame that can be that
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
@@ -413,6 +656,27 @@ struct axidma_bd {
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_count_tx: Store the irq coalesce on TX side.
+ * @phy_flags: Phy interface flags.
+ * @eth_hasnobuf: Ethernet is configured in Non buf mode.
+ * @eth_hasptp: Ethernet is configured for ptp.
+ * @axienet_config: Ethernet config structure
+ * @tx_ts_regs: Base address for the axififo device address space.
+ * @rx_ts_regs: Base address for the rx axififo device address space.
+ * @tstamp_config: Hardware timestamp config structure.
+ * @tx_ptpheader: Stores the tx ptp header.
+ * @aclk: AXI4-Lite clock for ethernet and dma.
+ * @eth_sclk: AXI4-Stream interface clock.
+ * @eth_refclk: Stable clock used by signal delay primitives and transceivers.
+ * @eth_dclk: Dynamic Reconfiguration Port(DRP) clock.
+ * @dma_sg_clk: DMA Scatter Gather Clock.
+ * @dma_rx_clk: DMA S2MM Primary Clock.
+ * @dma_tx_clk: DMA MM2S Primary Clock.
+ * @qnum: Axi Ethernet queue number to be operate on.
+ * @chan_num: MCDMA Channel number to be operate on.
+ * @chan_id: MCMDA Channel id used in conjunction with weight parameter.
+ * @weight: MCDMA Channel weight value to be configured for.
+ * @usxgmii_rate: USXGMII PHY speed.
+ * @dma_mask: Specify the width of the DMA address space.
*/
struct axienet_local {
struct net_device *ndev;
@@ -433,28 +697,45 @@ struct axienet_local {
/* IO registers, dma functions and IRQs */
resource_size_t regs_start;
void __iomem *regs;
- void __iomem *dma_regs;
-
- struct work_struct dma_err_task;
+ void __iomem *mcdma_regs;
+
+ struct tasklet_struct dma_err_tasklet[XAE_MAX_QUEUES];
+ struct napi_struct napi[XAE_MAX_QUEUES]; /* NAPI Structure */
+
+ #define XAE_TEMAC1 0
+ #define XAE_TEMAC2 1
+ u8 temac_no;
+ u16 num_tx_queues; /* Number of TX DMA queues */
+ u16 num_rx_queues; /* Number of RX DMA queues */
+ struct axienet_dma_q *dq[XAE_MAX_QUEUES]; /* DAM queue data*/
+
+ bool is_tsn;
+#ifdef CONFIG_XILINX_TSN
+ u16 num_tc; /* Number of TSN Traffic classes */
+#ifdef CONFIG_XILINX_TSN_PTP
+ void *timer_priv;
+ int ptp_tx_irq;
+ int ptp_rx_irq;
+ int rtc_irq;
+ int qbv_irq;
+ int ptp_ts_type;
+ u8 ptp_rx_hw_pointer;
+ u8 ptp_rx_sw_pointer;
+ struct sk_buff_head ptp_txq;
+ struct work_struct tx_tstamp_work;
+ spinlock_t ptp_tx_lock; /* TSN PTP tx lock*/
+#endif
+#endif
- int tx_irq;
- int rx_irq;
int eth_irq;
phy_interface_t phy_mode;
u32 options; /* Current options word */
u32 features;
- /* Buffer descriptors */
- struct axidma_bd *tx_bd_v;
- dma_addr_t tx_bd_p;
u32 tx_bd_num;
- struct axidma_bd *rx_bd_v;
- dma_addr_t rx_bd_p;
+
u32 rx_bd_num;
- u32 tx_bd_ci;
- u32 tx_bd_tail;
- u32 rx_bd_ci;
u32 max_frm_size;
u32 rxmem;
@@ -464,6 +745,133 @@ struct axienet_local {
u32 coalesce_count_rx;
u32 coalesce_count_tx;
+ u32 phy_flags;
+ bool eth_hasnobuf;
+ bool eth_hasptp;
+ const struct axienet_config *axienet_config;
+
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+ void __iomem *tx_ts_regs;
+ void __iomem *rx_ts_regs;
+ struct hwtstamp_config tstamp_config;
+ u8 *tx_ptpheader;
+#endif
+ struct clk *aclk;
+ struct clk *eth_sclk;
+ struct clk *eth_refclk;
+ struct clk *eth_dclk;
+ struct clk *dma_sg_clk;
+ struct clk *dma_rx_clk;
+ struct clk *dma_tx_clk;
+
+ /* MCDMA Fields */
+ int qnum[XAE_MAX_QUEUES];
+ int chan_num[XAE_MAX_QUEUES];
+ /* WRR Fields */
+ u16 chan_id;
+ u16 weight;
+
+ u32 usxgmii_rate;
+ u8 dma_mask;
+};
+
+/**
+ * struct axienet_dma_q - axienet private per dma queue data
+ * @lp: Parent pointer
+ * @dma_regs: Base address for the axidma device address space
+ * @tx_irq: Axidma TX IRQ number
+ * @rx_irq: Axidma RX IRQ number
+ * @tx_lock: Spin lock for tx path
+ * @rx_lock: Spin lock for tx path
+ * @tx_bd_v: Virtual address of the TX buffer descriptor ring
+ * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @rx_bd_v: Virtual address of the RX buffer descriptor ring
+ * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @tx_buf: Virtual address of the Tx buffer pool used by the driver when
+ * DMA h/w is configured without DRE.
+ * @tx_bufs: Virutal address of the Tx buffer address.
+ * @tx_bufs_dma: Physical address of the Tx buffer address used by the driver
+ * when DMA h/w is configured without DRE.
+ * @eth_hasdre: Tells whether DMA h/w is configured with dre or not.
+ * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
+ * accessed currently. Used while alloc. BDs before a TX starts
+ * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
+ * accessed currently. Used while processing BDs after the TX
+ * completed.
+ * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
+ * accessed currently.
+ * @chan_id: MCDMA channel to operate on.
+ * @rx_offset: MCDMA S2MM channel starting offset.
+ * @txq_bd_v: Virtual address of the MCDMA TX buffer descriptor ring
+ * @rxq_bd_v: Virtual address of the MCDMA RX buffer descriptor ring
+ * @tx_packets: Number of transmit packets processed by the dma queue.
+ * @tx_bytes: Number of transmit bytes processed by the dma queue.
+ * @rx_packets: Number of receive packets processed by the dma queue.
+ * @rx_bytes: Number of receive bytes processed by the dma queue.
+ */
+struct axienet_dma_q {
+ struct axienet_local *lp; /* parent */
+ void __iomem *dma_regs;
+
+ int tx_irq;
+ int rx_irq;
+
+ spinlock_t tx_lock; /* tx lock */
+ spinlock_t rx_lock; /* rx lock */
+
+ /* Buffer descriptors */
+ struct axidma_bd *tx_bd_v;
+ struct axidma_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ dma_addr_t tx_bd_p;
+
+ unsigned char *tx_buf[XAE_TX_BUFFERS];
+ unsigned char *tx_bufs;
+ dma_addr_t tx_bufs_dma;
+ bool eth_hasdre;
+
+ u32 tx_bd_ci;
+ u32 rx_bd_ci;
+ u32 tx_bd_tail;
+
+ /* MCDMA fields */
+ u16 chan_id;
+ u32 rx_offset;
+ struct aximcdma_bd *txq_bd_v;
+ struct aximcdma_bd *rxq_bd_v;
+
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+};
+
+#define AXIENET_TX_SSTATS_LEN(lp) ((lp)->num_tx_queues * 2)
+#define AXIENET_RX_SSTATS_LEN(lp) ((lp)->num_rx_queues * 2)
+
+/**
+ * enum axienet_ip_type - AXIENET IP/MAC type.
+ *
+ * @XAXIENET_1G: IP is 1G MAC
+ * @XAXIENET_2_5G: IP type is 2.5G MAC.
+ * @XAXIENET_LEGACY_10G: IP type is legacy 10G MAC.
+ * @XAXIENET_10G_25G: IP type is 10G/25G MAC(XXV MAC).
+ *
+ */
+enum axienet_ip_type {
+ XAXIENET_1G = 0,
+ XAXIENET_2_5G,
+ XAXIENET_LEGACY_10G,
+ XAXIENET_10G_25G,
+};
+
+struct axienet_config {
+ enum axienet_ip_type mactype;
+ void (*setoptions)(struct net_device *ndev, u32 options);
+ int (*clk_init)(struct platform_device *pdev, struct clk **axi_aclk,
+ struct clk **axis_clk, struct clk **ref_clk,
+ struct clk **dclk);
+ u32 tx_ptplen;
};
/**
@@ -478,6 +886,12 @@ struct axienet_option {
u32 m_or;
};
+struct xxvenet_option {
+ u32 opt;
+ u32 reg;
+ u32 m_or;
+};
+
/**
* axienet_ior - Memory mapped Axi Ethernet register read
* @lp: Pointer to axienet local structure
@@ -512,10 +926,183 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset,
iowrite32(value, lp->regs + offset);
}
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+/**
+ * axienet_txts_ior - Memory mapped AXI FIFO MM S register read
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core
+ *
+ * Return: the contents of the AXI FIFO MM S register
+ */
+
+static inline u32 axienet_txts_ior(struct axienet_local *lp, off_t reg)
+{
+ return in_be32(lp->tx_ts_regs + reg);
+}
+
+/**
+ * axienet_txts_iow - Memory mapper AXI FIFO MM S register write
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core.
+ * @value: Value to be written into the AXI FIFO MM S register
+ */
+static inline void axienet_txts_iow(struct axienet_local *lp, off_t reg,
+ u32 value)
+{
+ out_be32((lp->tx_ts_regs + reg), value);
+}
+
+/**
+ * axienet_rxts_ior - Memory mapped AXI FIFO MM S register read
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core
+ *
+ * Return: the contents of the AXI FIFO MM S register
+ */
+
+static inline u32 axienet_rxts_ior(struct axienet_local *lp, off_t reg)
+{
+ return in_be32(lp->rx_ts_regs + reg);
+}
+
+/**
+ * axienet_rxts_iow - Memory mapper AXI FIFO MM S register write
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core.
+ * @value: Value to be written into the AXI FIFO MM S register
+ */
+static inline void axienet_rxts_iow(struct axienet_local *lp, off_t reg,
+ u32 value)
+{
+ out_be32((lp->rx_ts_regs + reg), value);
+}
+#endif
+
+/**
+ * axienet_dma_in32 - Memory mapped Axi DMA register read
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ *
+ * Return: The contents of the Axi DMA register
+ *
+ * This function returns the contents of the corresponding Axi DMA register.
+ */
+static inline u32 axienet_dma_in32(struct axienet_dma_q *q, off_t reg)
+{
+ return in_be32(q->dma_regs + reg);
+}
+
+/**
+ * axienet_dma_out32 - Memory mapped Axi DMA register write.
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_out32(struct axienet_dma_q *q,
+ off_t reg, u32 value)
+{
+ out_be32((q->dma_regs + reg), value);
+}
+
+/**
+ * axienet_dma_bdout - Memory mapped Axi DMA register Buffer Descriptor write.
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_bdout(struct axienet_dma_q *q,
+ off_t reg, dma_addr_t value)
+{
+#if defined(CONFIG_PHYS_ADDR_T_64BIT)
+ writeq(value, (q->dma_regs + reg));
+#else
+ writel(value, (q->dma_regs + reg));
+#endif
+}
+
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
int axienet_mdio_enable(struct axienet_local *lp);
void axienet_mdio_disable(struct axienet_local *lp);
int axienet_mdio_setup(struct axienet_local *lp);
void axienet_mdio_teardown(struct axienet_local *lp);
+#ifdef CONFIG_XILINX_TSN_PTP
+void axienet_tx_tstamp(struct work_struct *work);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBV
+int axienet_qbv_init(struct net_device *ndev);
+void axienet_qbv_remove(struct net_device *ndev);
+int axienet_set_schedule(struct net_device *ndev, void __user *useraddr);
+int axienet_get_schedule(struct net_device *ndev, void __user *useraddr);
+#endif
+
+#ifdef CONFIG_XILINX_TSN_QBR
+int axienet_preemption(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_ctrl(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_sts(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_cnt(struct net_device *ndev, void __user *useraddr);
+#ifdef CONFIG_XILINX_TSN_QBV
+int axienet_qbu_user_override(struct net_device *ndev, void __user *useraddr);
+int axienet_qbu_sts(struct net_device *ndev, void __user *useraddr);
+#endif
+#endif
+
+void __maybe_unused axienet_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+int __maybe_unused axienet_dma_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void axienet_dma_err_handler(unsigned long data);
+irqreturn_t __maybe_unused axienet_tx_irq(int irq, void *_ndev);
+irqreturn_t __maybe_unused axienet_rx_irq(int irq, void *_ndev);
+void axienet_start_xmit_done(struct net_device *ndev, struct axienet_dma_q *q);
+void axienet_dma_bd_release(struct net_device *ndev);
+int __axienet_device_reset(struct axienet_dma_q *q);
+void axienet_set_mac_address(struct net_device *ndev, const void *address);
+void axienet_set_multicast_list(struct net_device *ndev);
+int xaxienet_rx_poll(struct napi_struct *napi, int quota);
+
+#if defined(CONFIG_AXIENET_HAS_MCDMA)
+int __maybe_unused axienet_mcdma_rx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+int __maybe_unused axienet_mcdma_tx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void __maybe_unused axienet_mcdma_tx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void __maybe_unused axienet_mcdma_rx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+irqreturn_t __maybe_unused axienet_mcdma_tx_irq(int irq, void *_ndev);
+irqreturn_t __maybe_unused axienet_mcdma_rx_irq(int irq, void *_ndev);
+void __maybe_unused axienet_mcdma_err_handler(unsigned long data);
+void axienet_strings(struct net_device *ndev, u32 sset, u8 *data);
+int axienet_sset_count(struct net_device *ndev, int sset);
+void axienet_get_stats(struct net_device *ndev,
+ struct ethtool_stats *stats,
+ u64 *data);
+int axeinet_mcdma_create_sysfs(struct kobject *kobj);
+void axeinet_mcdma_remove_sysfs(struct kobject *kobj);
+int __maybe_unused axienet_mcdma_tx_probe(struct platform_device *pdev,
+ struct device_node *np,
+ struct axienet_local *lp);
+int __maybe_unused axienet_mcdma_rx_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev);
+#endif
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct aximcdma_bd *cur_p);
+#else
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct axidma_bd *cur_p);
+#endif
#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c b/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c
new file mode 100644
index 000000000000..d265a8881aa2
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Xilinx AXI Ethernet (DMA programming)
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc.
+ * Copyright (C) 2018 Xilinx, Inc. All rights reserved.
+ *
+ * This file contains helper functions for AXI DMA TX and RX programming.
+ */
+
+#include "xilinx_axienet.h"
+
+/**
+ * axienet_bd_free - Release buffer descriptor rings for individual dma queue
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is helper function to axienet_dma_bd_release.
+ */
+
+void __maybe_unused axienet_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ dma_unmap_single(ndev->dev.parent, q->rx_bd_v[i].phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (q->rx_bd_v[i].sw_id_offset));
+ }
+
+ if (q->rx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->rx_bd_v) * lp->rx_bd_num,
+ q->rx_bd_v,
+ q->rx_bd_p);
+ }
+ if (q->tx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->tx_bd_v) * lp->tx_bd_num,
+ q->tx_bd_v,
+ q->tx_bd_p);
+ }
+ if (q->tx_bufs) {
+ dma_free_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * lp->tx_bd_num,
+ q->tx_bufs,
+ q->tx_bufs_dma);
+ }
+}
+
+/**
+ * __dma_txq_init - Setup buffer descriptor rings for individual Axi DMA-Tx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_q_init
+ */
+static int __dma_txq_init(struct net_device *ndev, struct axienet_dma_q *q)
+{
+ int i;
+ u32 cr;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+
+ q->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->tx_bd_v) * lp->tx_bd_num,
+ &q->tx_bd_p, GFP_KERNEL);
+ if (!q->tx_bd_v)
+ goto out;
+
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ q->tx_bd_v[i].next = q->tx_bd_p +
+ sizeof(*q->tx_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+ }
+
+ if (!q->eth_hasdre) {
+ q->tx_bufs = dma_alloc_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * lp->tx_bd_num,
+ &q->tx_bufs_dma,
+ GFP_KERNEL);
+ if (!q->tx_bufs)
+ goto out;
+
+ for (i = 0; i < lp->tx_bd_num; i++)
+ q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
+ }
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * __dma_rxq_init - Setup buffer descriptor rings for individual Axi DMA-Rx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_q_init
+ */
+static int __dma_rxq_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ u32 cr;
+ struct sk_buff *skb;
+ struct axienet_local *lp = netdev_priv(ndev);
+ /* Reset the indexes which are used for accessing the BDs */
+ q->rx_bd_ci = 0;
+
+ /* Allocate the Rx buffer descriptors. */
+ q->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->rx_bd_v) * lp->rx_bd_num,
+ &q->rx_bd_p, GFP_KERNEL);
+ if (!q->rx_bd_v)
+ goto out;
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ q->rx_bd_v[i].next = q->rx_bd_p +
+ sizeof(*q->rx_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ goto out;
+
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
+
+ q->rx_bd_v[i].sw_id_offset = (phys_addr_t)skb;
+ q->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ q->rx_bd_v[i].cntrl = lp->max_frm_size;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
+ (sizeof(*q->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * axienet_dma_q_init - Setup buffer descriptor rings for individual Axi DMA
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_dma_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ if (__dma_txq_init(ndev, q))
+ goto out;
+
+ if (__dma_rxq_init(ndev, q))
+ goto out;
+
+ return 0;
+out:
+ axienet_dma_bd_release(ndev);
+ return -ENOMEM;
+}
+
+/**
+ * map_dma_q_irq - Map dma q based on interrupt number.
+ * @irq: irq number
+ * @lp: axienet local structure
+ *
+ * Return: DMA queue.
+ *
+ * This returns the DMA number on which interrupt has occurred.
+ */
+static int map_dma_q_irq(int irq, struct axienet_local *lp)
+{
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ if (irq == lp->dq[i]->tx_irq || irq == lp->dq[i]->rx_irq)
+ return i;
+ }
+ pr_err("Error mapping DMA irq\n");
+ return -ENODEV;
+}
+
+/**
+ * axienet_tx_irq - Tx Done Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED or IRQ_NONE.
+ *
+ * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
+ * to complete the BD processing.
+ */
+irqreturn_t __maybe_unused axienet_tx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i = map_dma_q_irq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (i < 0)
+ return IRQ_NONE;
+
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
+ axienet_start_xmit_done(lp->ndev, q);
+ goto out;
+ }
+
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
+
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->tx_bd_v[q->tx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_rx_irq - Rx Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED or IRQ_NONE.
+ *
+ * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * processing.
+ */
+irqreturn_t __maybe_unused axienet_rx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i = map_dma_q_irq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (i < 0)
+ return IRQ_NONE;
+
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+ napi_schedule(&lp->napi[i]);
+ }
+
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
+
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->rx_bd_v[q->rx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
+ * @data: Data passed
+ *
+ * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
+ * Tx/Rx BDs.
+ */
+void __maybe_unused axienet_dma_err_handler(unsigned long data)
+{
+ u32 axienet_status;
+ u32 cr, i;
+ struct axienet_dma_q *q = (struct axienet_dma_q *)data;
+ struct axienet_local *lp = q->lp;
+ struct net_device *ndev = lp->ndev;
+ struct axidma_bd *cur_p;
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ /* Disable the MDIO interface till Axi Ethernet Reset is
+ * Completed. When we do an Axi Ethernet reset, it resets the
+ * Complete core including the MDIO. So if MDIO is not disabled
+ * When the reset process is started,
+ * MDIO will be broken afterwards.
+ */
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ }
+
+ __axienet_device_reset(q);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+ }
+
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ cur_p = &q->tx_bd_v[i];
+ if (cur_p->phys)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->tx_skb)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ cur_p->tx_skb = 0;
+ }
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ cur_p = &q->rx_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+ q->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
+ (sizeof(*q->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting
+ */
+ axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G && !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+ lp->axienet_config->setoptions(ndev, lp->options);
+}
+
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0ef806ea1832..7c98fcf735c9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -22,6 +22,7 @@
* - Add support for extended VLAN support.
*/
+#include <linux/circ_buf.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
@@ -37,9 +38,20 @@
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/random.h>
+#include <net/sock.h>
+#include <linux/xilinx_phy.h>
+#include <linux/clk.h>
#include "xilinx_axienet.h"
+#ifdef CONFIG_XILINX_TSN_PTP
+#include "xilinx_tsn_ptp.h"
+#include "xilinx_tsn_timer.h"
+#endif
/* Descriptors defines for Tx and Rx DMA */
#define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024
@@ -52,17 +64,15 @@
#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
#define DRIVER_VERSION "1.00a"
-#define AXIENET_REGS_N 40
-
-/* Match table for of_platform binding */
-static const struct of_device_id axienet_of_match[] = {
- { .compatible = "xlnx,axi-ethernet-1.00.a", },
- { .compatible = "xlnx,axi-ethernet-1.01.a", },
- { .compatible = "xlnx,axi-ethernet-2.01.a", },
- {},
-};
+#define AXIENET_REGS_N 32
+#define AXIENET_TS_HEADER_LEN 8
+#define XXVENET_TS_HEADER_LEN 4
+#define NS_PER_SEC 1000000000ULL /* Nanoseconds per second */
-MODULE_DEVICE_TABLE(of, axienet_of_match);
+#ifdef CONFIG_XILINX_TSN_PTP
+int axienet_phc_index = -1;
+EXPORT_SYMBOL(axienet_phc_index);
+#endif
/* Option table for setting up Axi Ethernet hardware options */
static struct axienet_option axienet_options[] = {
@@ -105,8 +115,8 @@ static struct axienet_option axienet_options[] = {
.m_or = XAE_FCC_FCTX_MASK,
}, { /* Turn on promiscuous frame filtering */
.opt = XAE_OPTION_PROMISC,
- .reg = XAE_FMI_OFFSET,
- .m_or = XAE_FMI_PM_MASK,
+ .reg = XAE_FMC_OFFSET,
+ .m_or = XAE_FMC_PM_MASK,
}, { /* Enable transmitter */
.opt = XAE_OPTION_TXEN,
.reg = XAE_TC_OFFSET,
@@ -119,34 +129,27 @@ static struct axienet_option axienet_options[] = {
{}
};
-/**
- * axienet_dma_in32 - Memory mapped Axi DMA register read
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- *
- * Return: The contents of the Axi DMA register
- *
- * This function returns the contents of the corresponding Axi DMA register.
- */
-static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
-{
- return ioread32(lp->dma_regs + reg);
-}
-
-/**
- * axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- * @value: Value to be written into the Axi DMA register
- *
- * This function writes the desired value into the corresponding Axi DMA
- * register.
- */
-static inline void axienet_dma_out32(struct axienet_local *lp,
- off_t reg, u32 value)
-{
- iowrite32(value, lp->dma_regs + reg);
-}
+/* Option table for setting up Axi Ethernet hardware options */
+static struct xxvenet_option xxvenet_options[] = {
+ { /* Turn on FCS stripping on receive packets */
+ .opt = XAE_OPTION_FCS_STRIP,
+ .reg = XXV_RCW1_OFFSET,
+ .m_or = XXV_RCW1_FCS_MASK,
+ }, { /* Turn on FCS insertion on transmit packets */
+ .opt = XAE_OPTION_FCS_INSERT,
+ .reg = XXV_TC_OFFSET,
+ .m_or = XXV_TC_FCS_MASK,
+ }, { /* Enable transmitter */
+ .opt = XAE_OPTION_TXEN,
+ .reg = XXV_TC_OFFSET,
+ .m_or = XXV_TC_TX_MASK,
+ }, { /* Enable receiver */
+ .opt = XAE_OPTION_RXEN,
+ .reg = XXV_RCW1_OFFSET,
+ .m_or = XXV_RCW1_RX_MASK,
+ },
+ {}
+};
/**
* axienet_dma_bd_release - Release buffer descriptor rings
@@ -156,28 +159,22 @@ static inline void axienet_dma_out32(struct axienet_local *lp,
* axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
* driver stop api is called.
*/
-static void axienet_dma_bd_release(struct net_device *ndev)
+void axienet_dma_bd_release(struct net_device *ndev)
{
int i;
struct axienet_local *lp = netdev_priv(ndev);
- for (i = 0; i < lp->rx_bd_num; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
- dev_kfree_skb(lp->rx_bd_v[i].skb);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ for_each_tx_dma_queue(lp, i) {
+ axienet_mcdma_tx_bd_free(ndev, lp->dq[i]);
}
-
- if (lp->rx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
- lp->rx_bd_v,
- lp->rx_bd_p);
- }
- if (lp->tx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
- lp->tx_bd_v,
- lp->tx_bd_p);
+#endif
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_rx_bd_free(ndev, lp->dq[i]);
+#else
+ axienet_bd_free(ndev, lp->dq[i]);
+#endif
}
}
@@ -193,101 +190,29 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
static int axienet_dma_bd_init(struct net_device *ndev)
{
- u32 cr;
- int i;
- struct sk_buff *skb;
+ int i, ret;
struct axienet_local *lp = netdev_priv(ndev);
- /* Reset the indexes which are used for accessing the BDs */
- lp->tx_bd_ci = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
-
- /* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
- &lp->tx_bd_p, GFP_KERNEL);
- if (!lp->tx_bd_v)
- goto out;
-
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
- &lp->rx_bd_p, GFP_KERNEL);
- if (!lp->rx_bd_v)
- goto out;
-
- for (i = 0; i < lp->tx_bd_num; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % lp->tx_bd_num);
- }
-
- for (i = 0; i < lp->rx_bd_num; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
- ((i + 1) % lp->rx_bd_num);
-
- skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!skb)
- goto out;
-
- lp->rx_bd_v[i].skb = skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
- lp->rx_bd_v[i].cntrl = lp->max_frm_size;
- }
-
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting.
- */
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ for_each_tx_dma_queue(lp, i) {
+ ret = axienet_mcdma_tx_q_init(ndev, lp->dq[i]);
+ if (ret != 0)
+ break;
+ }
+#endif
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ ret = axienet_mcdma_rx_q_init(ndev, lp->dq[i]);
+#else
+ ret = axienet_dma_q_init(ndev, lp->dq[i]);
+#endif
+ if (ret != 0) {
+ netdev_err(ndev, "%s: Failed to init DMA buf\n", __func__);
+ break;
+ }
+ }
- return 0;
-out:
- axienet_dma_bd_release(ndev);
- return -ENOMEM;
+ return ret;
}
/**
@@ -298,16 +223,19 @@ out:
* This function is called to initialize the MAC address of the Axi Ethernet
* core. It writes to the UAW0 and UAW1 registers of the core.
*/
-static void axienet_set_mac_address(struct net_device *ndev,
- const void *address)
+void axienet_set_mac_address(struct net_device *ndev, const void *address)
{
struct axienet_local *lp = netdev_priv(ndev);
if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
+ if (lp->axienet_config->mactype != XAXIENET_1G &&
+ lp->axienet_config->mactype != XAXIENET_2_5G)
+ return;
+
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
@@ -335,6 +263,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
static int netdev_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
+
axienet_set_mac_address(ndev, addr->sa_data);
return 0;
}
@@ -350,12 +279,15 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p)
* means whenever the multicast table entries need to be updated this
* function gets called.
*/
-static void axienet_set_multicast_list(struct net_device *ndev)
+void axienet_set_multicast_list(struct net_device *ndev)
{
int i;
u32 reg, af0reg, af1reg;
struct axienet_local *lp = netdev_priv(ndev);
+ if ((lp->axienet_config->mactype != XAXIENET_1G) || lp->eth_hasnobuf)
+ return;
+
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
/* We must make the kernel realize we had to move into
@@ -363,9 +295,9 @@ static void axienet_set_multicast_list(struct net_device *ndev)
* the flag is already set. If not we set it.
*/
ndev->flags |= IFF_PROMISC;
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg |= XAE_FMI_PM_MASK;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ reg = axienet_ior(lp, XAE_FMC_OFFSET);
+ reg |= XAE_FMC_PM_MASK;
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -383,25 +315,25 @@ static void axienet_set_multicast_list(struct net_device *ndev)
af1reg = (ha->addr[4]);
af1reg |= (ha->addr[5] << 8);
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET) & 0xFFFFFF00;
reg |= i;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
i++;
}
} else {
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg &= ~XAE_FMI_PM_MASK;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET);
+ reg &= ~XAE_FMC_PM_MASK;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET) & 0xFFFFFF00;
reg |= i;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, 0);
axienet_iow(lp, XAE_AF1_OFFSET, 0);
}
@@ -438,7 +370,25 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static int __axienet_device_reset(struct axienet_local *lp)
+static void xxvenet_setoptions(struct net_device *ndev, u32 options)
+{
+ int reg;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct xxvenet_option *tp = &xxvenet_options[0];
+
+ while (tp->opt) {
+ reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
+ if (options & tp->opt)
+ reg |= tp->m_or;
+ axienet_iow(lp, tp->reg, reg);
+ tp++;
+ }
+
+ lp->options |= options;
+}
+
+
+int __axienet_device_reset(struct axienet_dma_q *q)
{
u32 timeout;
@@ -449,13 +399,13 @@ static int __axienet_device_reset(struct axienet_local *lp)
* Note that even though both TX and RX have their own reset register,
* they both reset the entire DMA core, so only one needs to be used.
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
+ while (axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET) &
XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+ netdev_err(q->lp->ndev, "%s: DMA reset timeout!\n",
__func__);
return -ETIMEDOUT;
}
@@ -480,62 +430,246 @@ static int axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
- int ret;
+ u32 err, val;
+ struct axienet_dma_q *q;
+ u32 i;
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Reset the XXV MAC */
+ val = axienet_ior(lp, XXV_GT_RESET_OFFSET);
+ val |= XXV_GT_RESET_MASK;
+ axienet_iow(lp, XXV_GT_RESET_OFFSET, val);
+ /* Wait for 1ms for GT reset to complete as per spec */
+ mdelay(1);
+ val = axienet_ior(lp, XXV_GT_RESET_OFFSET);
+ val &= ~XXV_GT_RESET_MASK;
+ axienet_iow(lp, XXV_GT_RESET_OFFSET, val);
+ }
- ret = __axienet_device_reset(lp);
- if (ret)
- return ret;
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ __axienet_device_reset(q);
+ }
+ }
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
- lp->options |= XAE_OPTION_VLAN;
- lp->options &= (~XAE_OPTION_JUMBO);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ lp->options |= XAE_OPTION_VLAN;
+ lp->options &= (~XAE_OPTION_JUMBO);
+ }
if ((ndev->mtu > XAE_MTU) &&
(ndev->mtu <= XAE_JUMBO_MTU)) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
-
- if (lp->max_frm_size <= lp->rxmem)
+ if (lp->max_frm_size <= lp->rxmem &&
+ (lp->axienet_config->mactype != XAXIENET_10G_25G))
lp->options |= XAE_OPTION_JUMBO;
}
- ret = axienet_dma_bd_init(ndev);
- if (ret) {
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ if (axienet_dma_bd_init(ndev)) {
netdev_err(ndev, "%s: descriptor allocation failed\n",
__func__);
- return ret;
+ }
}
- axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
- axienet_status &= ~XAE_RCW1_RX_MASK;
- axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
- axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
- if (axienet_status & XAE_INT_RXRJECT_MASK)
- axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Check for block lock bit got set or not
+ * This ensures that 10G ethernet IP
+ * is functioning normally or not.
+ */
+ err = readl_poll_timeout(lp->regs + XXV_STATRX_BLKLCK_OFFSET,
+ val, (val & XXV_RX_BLKLCK_MASK),
+ 10, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "XXV MAC block lock not complete! Cross-check the MAC ref clock configuration\n");
+ }
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ axienet_rxts_iow(lp, XAXIFIFO_TXTS_RDFR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ axienet_rxts_iow(lp, XAXIFIFO_TXTS_SRR,
+ XAXIFIFO_TXTS_RESET_MASK);
+
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_RDFR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_SRR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ }
+#endif
+ }
+
+ if ((lp->axienet_config->mactype == XAXIENET_1G) &&
+ !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ }
axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
XAE_INT_RECV_ERROR_MASK : 0);
- axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ lp->options |= XAE_OPTION_FCS_STRIP;
+ lp->options |= XAE_OPTION_FCS_INSERT;
+ } else {
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ }
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
- /* Sync default options with HW but leave receiver and
- * transmitter disabled.
- */
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
+ lp->axienet_config->setoptions(ndev, lp->options);
netif_trans_update(ndev);
return 0;
}
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+/**
+ * axienet_tx_hwtstamp - Read tx timestamp from hw and update it to the skbuff
+ * @lp: Pointer to axienet local structure
+ * @cur_p: Pointer to the axi_dma/axi_mcdma current bd
+ *
+ * Return: None.
+ */
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct aximcdma_bd *cur_p)
+#else
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct axidma_bd *cur_p)
+#endif
+{
+ u32 sec = 0, nsec = 0, val;
+ u64 time64;
+ int err = 0;
+ u32 count, len = lp->axienet_config->tx_ptplen;
+ struct skb_shared_hwtstamps *shhwtstamps =
+ skb_hwtstamps((struct sk_buff *)cur_p->ptp_tx_skb);
+
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_ISR);
+ if (unlikely(!(val & XAXIFIFO_TXTS_INT_RC_MASK)))
+ dev_info(lp->dev, "Did't get FIFO tx interrupt %d\n", val);
+
+ /* If FIFO is configured in cut through Mode we will get Rx complete
+ * interrupt even one byte is there in the fifo wait for the full packet
+ */
+ err = readl_poll_timeout_atomic(lp->tx_ts_regs + XAXIFIFO_TXTS_RLR, val,
+ ((val & XAXIFIFO_TXTS_RXFD_MASK) >=
+ len), 0, 1000000);
+ if (err)
+ netdev_err(lp->ndev, "%s: Didn't get the full timestamp packet",
+ __func__);
+
+ nsec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = ((val & XAXIFIFO_TXTS_TAG_MASK) >> XAXIFIFO_TXTS_TAG_SHIFT);
+ dev_dbg(lp->dev, "tx_stamp:[%04x] %04x %u %9u\n",
+ cur_p->ptp_tx_ts_tag, val, sec, nsec);
+
+ if (val != cur_p->ptp_tx_ts_tag) {
+ count = axienet_txts_ior(lp, XAXIFIFO_TXTS_RFO);
+ while (count) {
+ nsec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = ((val & XAXIFIFO_TXTS_TAG_MASK) >>
+ XAXIFIFO_TXTS_TAG_SHIFT);
+
+ dev_dbg(lp->dev, "tx_stamp:[%04x] %04x %u %9u\n",
+ cur_p->ptp_tx_ts_tag, val, sec, nsec);
+ if (val == cur_p->ptp_tx_ts_tag)
+ break;
+ count = axienet_txts_ior(lp, XAXIFIFO_TXTS_RFO);
+ }
+ if (val != cur_p->ptp_tx_ts_tag) {
+ dev_info(lp->dev, "Mismatching 2-step tag. Got %x",
+ val);
+ dev_info(lp->dev, "Expected %x\n",
+ cur_p->ptp_tx_ts_tag);
+ }
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+
+ time64 = sec * NS_PER_SEC + nsec;
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ skb_pull((struct sk_buff *)cur_p->ptp_tx_skb,
+ AXIENET_TS_HEADER_LEN);
+
+ skb_tstamp_tx((struct sk_buff *)cur_p->ptp_tx_skb, shhwtstamps);
+ dev_kfree_skb_any((struct sk_buff *)cur_p->ptp_tx_skb);
+ cur_p->ptp_tx_skb = 0;
+}
+
+/**
+ * axienet_rx_hwtstamp - Read rx timestamp from hw and update it to the skbuff
+ * @lp: Pointer to axienet local structure
+ * @skb: Pointer to the sk_buff structure
+ *
+ * Return: None.
+ */
+static void axienet_rx_hwtstamp(struct axienet_local *lp,
+ struct sk_buff *skb)
+{
+ u32 sec = 0, nsec = 0, val;
+ u64 time64;
+ int err = 0;
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_ISR);
+ if (unlikely(!(val & XAXIFIFO_TXTS_INT_RC_MASK))) {
+ dev_info(lp->dev, "Did't get FIFO rx interrupt %d\n", val);
+ return;
+ }
+
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RFO);
+ if (!val)
+ return;
+
+ /* If FIFO is configured in cut through Mode we will get Rx complete
+ * interrupt even one byte is there in the fifo wait for the full packet
+ */
+ err = readl_poll_timeout_atomic(lp->rx_ts_regs + XAXIFIFO_TXTS_RLR, val,
+ ((val & XAXIFIFO_TXTS_RXFD_MASK) >= 12),
+ 0, 1000000);
+ if (err) {
+ netdev_err(lp->ndev, "%s: Didn't get the full timestamp packet",
+ __func__);
+ return;
+ }
+
+ nsec = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+
+ if (lp->tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ time64 = sec * NS_PER_SEC + nsec;
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
+ }
+}
+#endif
+
/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
*
* This function is invoked from the Axi DMA Tx isr to notify the completion
* of transmit operation. It clears fields in the corresponding Tx BDs and
@@ -543,22 +677,44 @@ static int axienet_device_reset(struct net_device *ndev)
* buffer. It finally invokes "netif_wake_queue" to restart transmission if
* required.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+void axienet_start_xmit_done(struct net_device *ndev, struct axienet_dma_q *q)
{
u32 size = 0;
u32 packets = 0;
struct axienet_local *lp = netdev_priv(ndev);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
+#endif
unsigned int status = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_ci];
+ status = cur_p->sband_stats;
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_ci];
status = cur_p->status;
+#endif
while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (cur_p->ptp_tx_skb)
+ axienet_tx_hwtstamp(lp, cur_p);
+#endif
+ if (cur_p->tx_desc_mapping == DESC_DMA_MAP_PAGE)
+ dma_unmap_page(ndev->dev.parent, cur_p->phys,
+ cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK,
+ DMA_TO_DEVICE);
if (cur_p->skb)
dev_consume_skb_irq(cur_p->skb);
+ if (cur_p->tx_skb)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
/*cur_p->phys = 0;*/
cur_p->app0 = 0;
cur_p->app1 = 0;
@@ -566,28 +722,44 @@ static void axienet_start_xmit_done(struct net_device *ndev)
cur_p->app4 = 0;
cur_p->status = 0;
cur_p->skb = NULL;
+ cur_p->tx_skb = 0;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->sband_stats = 0;
+#endif
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- if (++lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ if (++q->tx_bd_ci >= lp->tx_bd_num)
+ q->tx_bd_ci = 0;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_ci];
+ status = cur_p->sband_stats;
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_ci];
status = cur_p->status;
+#endif
}
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
+ q->tx_packets += packets;
+ q->tx_bytes += size;
/* Matches barrier in axienet_start_xmit */
smp_mb();
- netif_wake_queue(ndev);
+ /* Fixme: With the existing multiqueue implementation
+ * in the driver it is difficult to get the exact queue info.
+ * We should wake only the particular queue
+ * instead of waking all ndev queues.
+ */
+ netif_tx_wake_all_queues(ndev);
}
/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
- * @lp: Pointer to the axienet_local structure
+ * @q: Pointer to DMA queue structure
* @num_frag: The number of BDs to check for
*
* Return: 0, on success
@@ -598,61 +770,300 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* transmission. If the BD or any of the BDs are not free the function
* returns a busy status. This is invoked from axienet_start_xmit.
*/
-static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+static inline int axienet_check_tx_bd_space(struct axienet_dma_q *q,
int num_frag)
{
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+ struct axienet_local *lp = q->lp;
+
+ if (CIRC_SPACE(q->tx_bd_tail, q->tx_bd_ci, lp->tx_bd_num) < (num_frag + 1))
+ return NETDEV_TX_BUSY;
+
+ cur_p = &q->txq_bd_v[(q->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ if (cur_p->sband_stats & XMCDMA_BD_STS_ALL_MASK)
+ return NETDEV_TX_BUSY;
+#else
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ struct axienet_local *lp = q->lp;
+
+ if (CIRC_SPACE(q->tx_bd_tail, q->tx_bd_ci, lp->tx_bd_num) < (num_frag + 1))
+ return NETDEV_TX_BUSY;
+
+ cur_p = &q->tx_bd_v[(q->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
+#endif
return 0;
}
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
/**
- * axienet_start_xmit - Starts the transmission.
- * @skb: sk_buff pointer that contains data to be Txed.
- * @ndev: Pointer to net_device structure.
+ * axienet_create_tsheader - Create timestamp header for tx
+ * @q: Pointer to DMA queue structure
+ * @buf: Pointer to the buf to copy timestamp header
+ * @msg_type: PTP message type
*
- * Return: NETDEV_TX_OK, on success
- * NETDEV_TX_BUSY, if any of the descriptors are not free
- *
- * This function is invoked from upper layers to initiate transmission. The
- * function uses the next available free BDs and populates their fields to
- * start the transmission. Additionally if checksum offloading is supported,
- * it populates AXI Stream Control fields with appropriate values.
+ * Return: None.
*/
-static netdev_tx_t
-axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void axienet_create_tsheader(u8 *buf, u8 msg_type,
+ struct axienet_dma_q *q)
+{
+ struct axienet_local *lp = q->lp;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
+ struct axidma_bd *cur_p;
+#endif
+ u64 val;
+ u32 tmp;
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
+
+ if (msg_type == TX_TS_OP_NOOP) {
+ buf[0] = TX_TS_OP_NOOP;
+ } else if (msg_type == TX_TS_OP_ONESTEP) {
+ buf[0] = TX_TS_OP_ONESTEP;
+ buf[1] = TX_TS_CSUM_UPDATE;
+ buf[4] = TX_PTP_TS_OFFSET;
+ buf[6] = TX_PTP_CSUM_OFFSET;
+ } else {
+ buf[0] = TX_TS_OP_TWOSTEP;
+ buf[2] = cur_p->ptp_tx_ts_tag & 0xFF;
+ buf[3] = (cur_p->ptp_tx_ts_tag >> 8) & 0xFF;
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G ||
+ lp->axienet_config->mactype == XAXIENET_2_5G) {
+ memcpy(&val, buf, AXIENET_TS_HEADER_LEN);
+ swab64s(&val);
+ memcpy(buf, &val, AXIENET_TS_HEADER_LEN);
+ } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ memcpy(&tmp, buf, XXVENET_TS_HEADER_LEN);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_TXFD, tmp);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_TLR, XXVENET_TS_HEADER_LEN);
+ }
+}
+#endif
+
+#ifdef CONFIG_XILINX_TSN
+static inline u16 get_tsn_queue(u8 pcp, u16 num_tc)
+{
+ u16 queue = 0;
+
+ /* For 3 queue system, RE queue is 1 and ST queue is 2
+ * For 2 queue system, ST queue is 1. BE queue is always 0
+ */
+ if (pcp == 4) {
+ if (num_tc == 2)
+ queue = 1;
+ else
+ queue = 2;
+ } else if ((num_tc == 3) && (pcp == 2 || pcp == 3)) {
+ queue = 1;
+ }
+
+ return queue;
+}
+
+static inline u16 tsn_queue_mapping(const struct sk_buff *skb, u16 num_tc)
+{
+ int queue = 0;
+ u16 vlan_tci;
+ u8 pcp;
+
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ if (unlikely(ether_type == ETH_P_8021Q)) {
+ struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)skb->data;
+
+ /* ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); */
+
+ vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+ pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ pr_debug("vlan_tci: %x\n", vlan_tci);
+ pr_debug("pcp: %d\n", pcp);
+
+ queue = get_tsn_queue(pcp, num_tc);
+ }
+ pr_debug("selected queue: %d\n", queue);
+ return queue;
+}
+#endif
+
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+static int axienet_skb_tstsmp(struct sk_buff **__skb, struct axienet_dma_q *q,
+ struct net_device *ndev)
+{
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
+ struct axidma_bd *cur_p;
+#endif
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct sk_buff *old_skb = *__skb;
+ struct sk_buff *skb = *__skb;
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
+
+ if (!lp->is_tsn) {
+ if ((((lp->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_SYNC) ||
+ (lp->tstamp_config.tx_type == HWTSTAMP_TX_ON)) ||
+ lp->eth_hasptp) && (lp->axienet_config->mactype !=
+ XAXIENET_10G_25G)) {
+ u8 *tmp;
+ struct sk_buff *new_skb;
+
+ if (skb_headroom(old_skb) < AXIENET_TS_HEADER_LEN) {
+ new_skb =
+ skb_realloc_headroom(old_skb,
+ AXIENET_TS_HEADER_LEN);
+ if (!new_skb) {
+ dev_err(&ndev->dev, "failed to allocate new socket buffer\n");
+ dev_kfree_skb_any(old_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Transfer the ownership to the
+ * new socket buffer if required
+ */
+ if (old_skb->sk)
+ skb_set_owner_w(new_skb, old_skb->sk);
+ dev_kfree_skb_any(old_skb);
+ *__skb = new_skb;
+ skb = new_skb;
+ }
+
+ tmp = skb_push(skb, AXIENET_TS_HEADER_LEN);
+ memset(tmp, 0, AXIENET_TS_HEADER_LEN);
+ cur_p->ptp_tx_ts_tag++;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ if (lp->tstamp_config.tx_type ==
+ HWTSTAMP_TX_ONESTEP_SYNC) {
+ axienet_create_tsheader(tmp,
+ TX_TS_OP_ONESTEP
+ , q);
+ } else {
+ axienet_create_tsheader(tmp,
+ TX_TS_OP_TWOSTEP
+ , q);
+ skb_shinfo(skb)->tx_flags
+ |= SKBTX_IN_PROGRESS;
+ cur_p->ptp_tx_skb =
+ (unsigned long)skb_get(skb);
+ }
+ }
+ } else if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (lp->axienet_config->mactype == XAXIENET_10G_25G)) {
+ cur_p->ptp_tx_ts_tag = (prandom_u32() &
+ ~XAXIFIFO_TXTS_TAG_MASK) + 1;
+ dev_dbg(lp->dev, "tx_tag:[%04x]\n",
+ cur_p->ptp_tx_ts_tag);
+ if (lp->tstamp_config.tx_type ==
+ HWTSTAMP_TX_ONESTEP_SYNC) {
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_ONESTEP, q);
+ } else {
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_TWOSTEP, q);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cur_p->ptp_tx_skb = (phys_addr_t)skb_get(skb);
+ }
+ } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ dev_dbg(lp->dev, "tx_tag:NOOP\n");
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_NOOP, q);
+ }
+ }
+ return NETDEV_TX_OK;
+}
+#endif
+
+static int axienet_queue_xmit(struct sk_buff *skb,
+ struct net_device *ndev, u16 map)
{
u32 ii;
u32 num_frag;
u32 csum_start_off;
u32 csum_index_off;
- skb_frag_t *frag;
dma_addr_t tail_p;
struct axienet_local *lp = netdev_priv(ndev);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
+#endif
+ unsigned long flags;
+ struct axienet_dma_q *q;
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Need to manually pad the small frames in case of XXV MAC
+ * because the pad field is not added by the IP. We must present
+ * a packet that meets the minimum length to the IP core.
+ * When the IP core is configured to calculate and add the FCS
+ * to the packet the minimum packet length is 60 bytes.
+ */
+ if (eth_skb_pad(skb)) {
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
- num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+#ifdef CONFIG_XILINX_TSN
+ if (unlikely(lp->is_tsn)) {
+ map = tsn_queue_mapping(skb, lp->num_tc);
+#ifdef CONFIG_XILINX_TSN_PTP
+ const struct ethhdr *eth;
- if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
+ eth = (struct ethhdr *)skb->data;
+ /* check if skb is a PTP frame ? */
+ if (eth->h_proto == htons(ETH_P_1588))
+ return axienet_ptp_xmit(skb, ndev);
+#endif
+ if (lp->temac_no == XAE_TEMAC2) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+ num_frag = skb_shinfo(skb)->nr_frags;
- netif_stop_queue(ndev);
+ q = lp->dq[map];
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag + 1))
- return NETDEV_TX_BUSY;
+ spin_lock_irqsave(&q->tx_lock, flags);
+ if (axienet_check_tx_bd_space(q, num_frag)) {
+ if (!__netif_subqueue_stopped(ndev, map))
+ netif_stop_subqueue(ndev, map);
+ spin_unlock_irqrestore(&q->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
- netif_wake_queue(ndev);
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (axienet_skb_tstsmp(&skb, q, ndev)) {
+ spin_unlock_irqrestore(&q->tx_lock, flags);
+ return NETDEV_TX_BUSY;
}
+#endif
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
/* Tx Full Checksum Offload Enabled */
cur_p->app0 |= 2;
@@ -663,48 +1074,119 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 1;
cur_p->app1 = (csum_start_off << 16) | csum_index_off;
}
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY &&
+ !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl = (skb_headlen(skb) | XMCDMA_BD_CTRL_TXSOF_MASK);
+#else
+ cur_p->cntrl = (skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK);
+#endif
+
+ if (!q->eth_hasdre &&
+ (((phys_addr_t)skb->data & 0x3) || (num_frag > 0))) {
+ skb_copy_and_csum_dev(skb, q->tx_buf[q->tx_bd_tail]);
+
+ cur_p->phys = q->tx_bufs_dma +
+ (q->tx_buf[q->tx_bd_tail] - q->tx_bufs);
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl = skb_pagelen(skb) | XMCDMA_BD_CTRL_TXSOF_MASK;
+#else
+ cur_p->cntrl = skb_pagelen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+#endif
+ goto out;
+ } else {
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ }
+ cur_p->tx_desc_mapping = DESC_DMA_MAP_SINGLE;
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ u32 len;
+ skb_frag_t *frag;
+
+ if (++q->tx_bd_tail >= lp->tx_bd_num)
+ q->tx_bd_tail = 0;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- cur_p->cntrl = skb_frag_size(frag);
+ len = skb_frag_size(frag);
+ cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, len,
+ DMA_TO_DEVICE);
+ cur_p->cntrl = len;
+ cur_p->tx_desc_mapping = DESC_DMA_MAP_PAGE;
}
+out:
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl |= XMCDMA_BD_CTRL_TXEOF_MASK;
+ tail_p = q->tx_bd_p + sizeof(*q->txq_bd_v) * q->tx_bd_tail;
+#else
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
+ tail_p = q->tx_bd_p + sizeof(*q->tx_bd_v) * q->tx_bd_tail;
+#endif
cur_p->skb = skb;
+ cur_p->tx_skb = (phys_addr_t)skb;
+
+ /* Ensure BD write before starting transfer */
+ wmb();
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
- axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id),
+ tail_p);
+#else
+ axienet_dma_bdout(q, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+#endif
+ if (++q->tx_bd_tail >= lp->tx_bd_num)
+ q->tx_bd_tail = 0;
+
+ spin_unlock_irqrestore(&q->tx_lock, flags);
return NETDEV_TX_OK;
}
/**
+ * axienet_start_xmit - Starts the transmission.
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is invoked from upper layers to initiate transmission. The
+ * function uses the next available free BDs and populates their fields to
+ * start the transmission. Additionally if checksum offloading is supported,
+ * it populates AXI Stream Control fields with appropriate values.
+ */
+static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ u16 map = skb_get_queue_mapping(skb); /* Single dma queue default*/
+
+ return axienet_queue_xmit(skb, ndev, map);
+}
+
+/**
* axienet_recv - Is called from Axi DMA Rx Isr to complete the received
* BD processing.
* @ndev: Pointer to net_device structure.
+ * @budget: NAPI budget
+ * @q: Pointer to axienet DMA queue structure
*
- * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
- * does minimal processing and invokes "netif_rx" to complete further
- * processing.
+ * This function is invoked from the Axi DMA Rx isr(poll) to process the Rx BDs
+ * It does minimal processing and invokes "netif_receive_skb" to complete
+ * further processing.
+ * Return: Number of BD's processed.
*/
-static void axienet_recv(struct net_device *ndev)
+static int axienet_recv(struct net_device *ndev, int budget,
+ struct axienet_dma_q *q)
{
u32 length;
u32 csumstatus;
@@ -713,12 +1195,33 @@ static void axienet_recv(struct net_device *ndev)
dma_addr_t tail_p = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
+#endif
+ unsigned int numbdfree = 0;
+
+ /* Get relevat BD status value */
+ rmb();
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->rxq_bd_v[q->rx_bd_ci];
+#else
+ cur_p = &q->rx_bd_v[q->rx_bd_ci];
+#endif
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
-
- while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ while ((numbdfree < budget) &&
+ (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ new_skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!new_skb) {
+ dev_err(lp->dev, "No memory for new_skb\n");
+ break;
+ }
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ tail_p = q->rx_bd_p + sizeof(*q->rxq_bd_v) * q->rx_bd_ci;
+#else
+ tail_p = q->rx_bd_p + sizeof(*q->rx_bd_v) * q->rx_bd_ci;
+#endif
dma_unmap_single(ndev->dev.parent, cur_p->phys,
lp->max_frm_size,
@@ -726,15 +1229,54 @@ static void axienet_recv(struct net_device *ndev)
skb = cur_p->skb;
cur_p->skb = NULL;
- length = cur_p->app4 & 0x0000FFFF;
+ if (lp->eth_hasnobuf ||
+ (lp->axienet_config->mactype != XAXIENET_1G))
+ length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ else
+ length = cur_p->app4 & 0x0000FFFF;
skb_put(skb, length);
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ if ((lp->tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL ||
+ lp->eth_hasptp) && (lp->axienet_config->mactype !=
+ XAXIENET_10G_25G)) {
+ u32 sec, nsec;
+ u64 time64;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ if (lp->axienet_config->mactype == XAXIENET_1G ||
+ lp->axienet_config->mactype == XAXIENET_2_5G) {
+ /* The first 8 bytes will be the timestamp */
+ memcpy(&sec, &skb->data[0], 4);
+ memcpy(&nsec, &skb->data[4], 4);
+
+ sec = cpu_to_be32(sec);
+ nsec = cpu_to_be32(nsec);
+ } else {
+ /* The first 8 bytes will be the timestamp */
+ memcpy(&nsec, &skb->data[0], 4);
+ memcpy(&sec, &skb->data[4], 4);
+ }
+
+ /* Remove these 8 bytes from the buffer */
+ skb_pull(skb, 8);
+ time64 = sec * NS_PER_SEC + nsec;
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
+ } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ axienet_rx_hwtstamp(lp, skb);
+ }
+ }
+#endif
skb->protocol = eth_type_trans(skb, ndev);
/*skb_checksum_none_assert(skb);*/
skb->ip_summed = CHECKSUM_NONE;
/* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM &&
+ (lp->axienet_config->mactype == XAXIENET_1G) &&
+ !lp->eth_hasnobuf) {
csumstatus = (cur_p->app2 &
XAE_FULL_CSUM_STATUS_MASK) >> 3;
if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
@@ -743,19 +1285,21 @@ static void axienet_recv(struct net_device *ndev)
}
} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
+ skb->len > 64 && !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
- netif_rx(skb);
+ netif_receive_skb(skb);
size += length;
packets++;
- new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!new_skb)
- return;
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size,
@@ -764,114 +1308,109 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->skb = new_skb;
- if (++lp->rx_bd_ci >= lp->rx_bd_num)
- lp->rx_bd_ci = 0;
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ if (++q->rx_bd_ci >= lp->rx_bd_num)
+ q->rx_bd_ci = 0;
+
+ /* Get relevat BD status value */
+ rmb();
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->rxq_bd_v[q->rx_bd_ci];
+#else
+ cur_p = &q->rx_bd_v[q->rx_bd_ci];
+#endif
+ numbdfree++;
}
ndev->stats.rx_packets += packets;
ndev->stats.rx_bytes += size;
+ q->rx_packets += packets;
+ q->rx_bytes += size;
+
+ if (tail_p) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, tail_p);
+#else
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+#endif
+ }
- if (tail_p)
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ return numbdfree;
}
/**
- * axienet_tx_irq - Tx Done Isr.
- * @irq: irq number
- * @_ndev: net_device pointer
+ * xaxienet_rx_poll - Poll routine for rx packets (NAPI)
+ * @napi: napi structure pointer
+ * @quota: Max number of rx packets to be processed.
*
- * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
+ * This is the poll routine for rx part.
+ * It will process the packets maximux quota value.
*
- * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
- * to complete the BD processing.
+ * Return: number of packets received
*/
-static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
+int xaxienet_rx_poll(struct napi_struct *napi, int quota)
{
- u32 cr;
- unsigned int status;
- struct net_device *ndev = _ndev;
+ struct net_device *ndev = napi->dev;
struct axienet_local *lp = netdev_priv(ndev);
-
- status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
- goto out;
+ int work_done = 0;
+ unsigned int status, cr;
+
+ int map = napi - lp->napi;
+
+ struct axienet_dma_q *q = lp->dq[map];
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ spin_lock(&q->rx_lock);
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ while ((status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) &&
+ (work_done < quota)) {
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(lp->dev, "Rx error 0x%x\n\r", status);
+ break;
+ }
+ work_done += axienet_recv(lp->ndev, quota - work_done, q);
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
}
- if (!(status & XAXIDMA_IRQ_ALL_MASK))
- return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ spin_unlock(&q->rx_lock);
+#else
+ spin_lock(&q->rx_lock);
+
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ while ((status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) &&
+ (work_done < quota)) {
+ axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(lp->dev, "Rx error 0x%x\n\r", status);
+ break;
+ }
+ work_done += axienet_recv(lp->ndev, quota - work_done, q);
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
}
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * axienet_rx_irq - Rx Isr.
- * @irq: irq number
- * @_ndev: net_device pointer
- *
- * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
- *
- * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
- * processing.
- */
-static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
-{
- u32 cr;
- unsigned int status;
- struct net_device *ndev = _ndev;
- struct axienet_local *lp = netdev_priv(ndev);
+ spin_unlock(&q->rx_lock);
+#endif
- status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- axienet_recv(lp->ndev);
- goto out;
- }
- if (!(status & XAXIDMA_IRQ_ALL_MASK))
- return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ if (work_done < quota) {
+ napi_complete(napi);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable the interrupts again */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ XMCDMA_RX_OFFSET);
+ cr |= (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ XMCDMA_RX_OFFSET, cr);
+#else
+ /* Enable the interrupts again */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+#endif
}
-out:
- return IRQ_HANDLED;
+
+ return work_done;
}
/**
@@ -903,8 +1442,25 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
return IRQ_HANDLED;
}
-static void axienet_dma_err_handler(struct work_struct *work);
+static int axienet_mii_init(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ int ret = 0;
+ /* Disable the MDIO interface till Axi Ethernet Reset is completed.
+ * When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
+ */
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ axienet_device_reset(ndev);
+ ret = axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+
+ return ret;
+}
/**
* axienet_open - Driver open routine.
* @ndev: Pointer to net_device structure
@@ -920,27 +1476,22 @@ static void axienet_dma_err_handler(struct work_struct *work);
*/
static int axienet_open(struct net_device *ndev)
{
- int ret;
+ int ret = 0, i = 0;
struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ u32 reg, err;
dev_dbg(&ndev->dev, "axienet_open()\n");
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- ret = axienet_device_reset(ndev);
- if (ret == 0)
- ret = axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G)
+ axienet_device_reset(ndev);
+ else
+ ret = axienet_mii_init(ndev);
+
if (ret < 0)
return ret;
- ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+ ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, lp->phy_flags);
if (ret) {
dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
return ret;
@@ -948,19 +1499,154 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
- /* Enable worker thread for Axi DMA error handling */
- INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
-
- /* Enable interrupts for Axi DMA Tx */
- ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
- ndev->name, ndev);
- if (ret)
- goto err_tx_irq;
- /* Enable interrupts for Axi DMA Rx */
- ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
- ndev->name, ndev);
- if (ret)
- goto err_rx_irq;
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ /* Enable tasklets for Axi DMA error handling */
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ tasklet_init(&lp->dma_err_tasklet[i],
+ axienet_mcdma_err_handler,
+ (unsigned long)lp->dq[i]);
+#else
+ tasklet_init(&lp->dma_err_tasklet[i],
+ axienet_dma_err_handler,
+ (unsigned long)lp->dq[i]);
+#endif
+
+ /* Enable NAPI scheduling before enabling Axi DMA Rx IRQ, or you
+ * might run into a race condition; the RX ISR disables IRQ processing
+ * before scheduling the NAPI function to complete the processing.
+ * If NAPI scheduling is (still) disabled at that time, no more RX IRQs
+ * will be processed as only the NAPI function re-enables them!
+ */
+ napi_enable(&lp->napi[i]);
+ }
+ for_each_tx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable interrupts for Axi MCDMA Tx */
+ ret = request_irq(q->tx_irq, axienet_mcdma_tx_irq,
+ IRQF_SHARED, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+#else
+ /* Enable interrupts for Axi DMA Tx */
+ ret = request_irq(q->tx_irq, axienet_tx_irq,
+ 0, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+#endif
+ }
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable interrupts for Axi MCDMA Rx */
+ ret = request_irq(q->rx_irq, axienet_mcdma_rx_irq,
+ IRQF_SHARED, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+#else
+ /* Enable interrupts for Axi DMA Rx */
+ ret = request_irq(q->rx_irq, axienet_rx_irq,
+ 0, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+#endif
+ }
+ }
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ INIT_WORK(&lp->tx_tstamp_work, axienet_tx_tstamp);
+ skb_queue_head_init(&lp->ptp_txq);
+
+ lp->ptp_rx_hw_pointer = 0;
+ lp->ptp_rx_sw_pointer = 0xff;
+
+ axienet_iow(lp, PTP_RX_CONTROL_OFFSET, PTP_RX_PACKET_CLEAR);
+
+ ret = request_irq(lp->ptp_rx_irq, axienet_ptp_rx_irq,
+ 0, "ptp_rx", ndev);
+ if (ret)
+ goto err_ptp_rx_irq;
+
+ ret = request_irq(lp->ptp_tx_irq, axienet_ptp_tx_irq,
+ 0, "ptp_tx", ndev);
+ if (ret)
+ goto err_ptp_rx_irq;
+ }
+#endif
+
+ if (lp->phy_mode == XXE_PHY_TYPE_USXGMII) {
+ netdev_dbg(ndev, "RX reg: 0x%x\n",
+ axienet_ior(lp, XXV_RCW1_OFFSET));
+ /* USXGMII setup at selected speed */
+ reg = axienet_ior(lp, XXV_USXGMII_AN_OFFSET);
+ reg &= ~USXGMII_RATE_MASK;
+ netdev_dbg(ndev, "usxgmii_rate %d\n", lp->usxgmii_rate);
+ switch (lp->usxgmii_rate) {
+ case SPEED_1000:
+ reg |= USXGMII_RATE_1G;
+ break;
+ case SPEED_2500:
+ reg |= USXGMII_RATE_2G5;
+ break;
+ case SPEED_10:
+ reg |= USXGMII_RATE_10M;
+ break;
+ case SPEED_100:
+ reg |= USXGMII_RATE_100M;
+ break;
+ case SPEED_5000:
+ reg |= USXGMII_RATE_5G;
+ break;
+ case SPEED_10000:
+ reg |= USXGMII_RATE_10G;
+ break;
+ default:
+ reg |= USXGMII_RATE_1G;
+ }
+ reg |= USXGMII_FD;
+ reg |= (USXGMII_EN | USXGMII_LINK_STS);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET, reg);
+ reg |= USXGMII_AN_EN;
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET, reg);
+ /* AN Restart bit should be reset, set and then reset as per
+ * spec with a 1 ms delay for a raising edge trigger
+ */
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg & ~USXGMII_AN_RESTART);
+ mdelay(1);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg | USXGMII_AN_RESTART);
+ mdelay(1);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg & ~USXGMII_AN_RESTART);
+
+ /* Check block lock bit to make sure RX path is ok with
+ * USXGMII initialization.
+ */
+ err = readl_poll_timeout(lp->regs + XXV_STATRX_BLKLCK_OFFSET,
+ reg, (reg & XXV_RX_BLKLCK_MASK),
+ 100, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "%s: USXGMII Block lock bit not set",
+ __func__);
+ ret = -ENODEV;
+ goto err_eth_irq;
+ }
+
+ err = readl_poll_timeout(lp->regs + XXV_USXGMII_AN_STS_OFFSET,
+ reg, (reg & USXGMII_AN_STS_COMP_MASK),
+ 1000000, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "%s: USXGMII AN not complete",
+ __func__);
+ ret = -ENODEV;
+ goto err_eth_irq;
+ }
+
+ netdev_info(ndev, "USXGMII setup at %d\n", lp->usxgmii_rate);
+ }
+
/* Enable interrupts for Axi Ethernet core (if defined) */
if (lp->eth_irq > 0) {
ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
@@ -969,16 +1655,30 @@ static int axienet_open(struct net_device *ndev)
goto err_eth_irq;
}
+ netif_tx_start_all_queues(ndev);
return 0;
err_eth_irq:
- free_irq(lp->rx_irq, ndev);
+ while (i--) {
+ q = lp->dq[i];
+ free_irq(q->rx_irq, ndev);
+ }
+ i = lp->num_tx_queues;
err_rx_irq:
- free_irq(lp->tx_irq, ndev);
+ while (i--) {
+ q = lp->dq[i];
+ free_irq(q->tx_irq, ndev);
+ }
err_tx_irq:
+ for_each_rx_dma_queue(lp, i)
+ napi_disable(&lp->napi[i]);
+#ifdef CONFIG_XILINX_TSN_PTP
+err_ptp_rx_irq:
+#endif
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- cancel_work_sync(&lp->dma_err_task);
+ for_each_rx_dma_queue(lp, i)
+ tasklet_kill(&lp->dma_err_tasklet[i]);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -996,55 +1696,79 @@ err_tx_irq:
static int axienet_stop(struct net_device *ndev)
{
u32 cr, sr;
+ u32 i;
int count;
struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
dev_dbg(&ndev->dev, "axienet_close()\n");
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_iow(lp, XAE_IE_OFFSET, 0);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_tx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
+ }
- axienet_iow(lp, XAE_IE_OFFSET, 0);
+ free_irq(q->tx_irq, ndev);
+ }
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ }
- /* Give DMAs a chance to halt gracefully */
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- }
+ /* Do a reset to ensure DMA is really stopped */
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(q);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- }
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi[i]);
+ tasklet_kill(&lp->dma_err_tasklet[i]);
- /* Do a reset to ensure DMA is really stopped */
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- __axienet_device_reset(lp);
- axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ free_irq(q->rx_irq, ndev);
+ }
+ }
- cancel_work_sync(&lp->dma_err_task);
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ free_irq(lp->ptp_tx_irq, ndev);
+ free_irq(lp->ptp_rx_irq, ndev);
+ }
+#endif
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
- free_irq(lp->tx_irq, ndev);
- free_irq(lp->rx_irq, ndev);
- axienet_dma_bd_release(ndev);
+ if (lp->temac_no != XAE_TEMAC2)
+ axienet_dma_bd_release(ndev);
return 0;
}
@@ -1086,15 +1810,209 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
static void axienet_poll_controller(struct net_device *ndev)
{
struct axienet_local *lp = netdev_priv(ndev);
- disable_irq(lp->tx_irq);
- disable_irq(lp->rx_irq);
- axienet_rx_irq(lp->tx_irq, ndev);
- axienet_tx_irq(lp->rx_irq, ndev);
- enable_irq(lp->tx_irq);
- enable_irq(lp->rx_irq);
+ int i;
+
+ for_each_tx_dma_queue(lp, i)
+ disable_irq(lp->dq[i]->tx_irq);
+ for_each_rx_dma_queue(lp, i)
+ disable_irq(lp->dq[i]->rx_irq);
+
+ for_each_rx_dma_queue(lp, i)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_rx_irq(lp->dq[i]->rx_irq, ndev);
+#else
+ axienet_rx_irq(lp->dq[i]->rx_irq, ndev);
+#endif
+ for_each_tx_dma_queue(lp, i)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_tx_irq(lp->dq[i]->tx_irq, ndev);
+#else
+ axienet_tx_irq(lp->dq[i]->tx_irq, ndev);
+#endif
+ for_each_tx_dma_queue(lp, i)
+ enable_irq(lp->dq[i]->tx_irq);
+ for_each_rx_dma_queue(lp, i)
+ enable_irq(lp->dq[i]->rx_irq);
+}
+#endif
+
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+/**
+ * axienet_set_timestamp_mode - sets up the hardware for the requested mode
+ * @lp: Pointer to axienet local structure
+ * @config: the hwtstamp configuration requested
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_set_timestamp_mode(struct axienet_local *lp,
+ struct hwtstamp_config *config)
+{
+ u32 regval;
+
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ if (config->tx_type < HWTSTAMP_TX_OFF ||
+ config->tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
+ return -ERANGE;
+
+ lp->ptp_ts_type = config->tx_type;
+
+ /* On RX always timestamp everything */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+ return 0;
+ }
+#endif
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ /* Read the current value in the MAC TX CTRL register */
+ regval = axienet_ior(lp, XAE_TC_OFFSET);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ regval &= ~XAE_TC_INBAND1588_MASK;
+ break;
+ case HWTSTAMP_TX_ON:
+ config->tx_type = HWTSTAMP_TX_ON;
+ regval |= XAE_TC_INBAND1588_MASK;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ regval |= XAE_TC_INBAND1588_MASK;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_TC_OFFSET, regval);
+
+ /* Read the current value in the MAC RX RCW1 register */
+ regval = axienet_ior(lp, XAE_RCW1_OFFSET);
+
+ /* On RX always timestamp everything */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ regval &= ~XAE_RCW1_INBAND1588_MASK;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ regval |= XAE_RCW1_INBAND1588_MASK;
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_RCW1_OFFSET, regval);
+
+ return 0;
}
+
+/**
+ * axienet_set_ts_config - user entry point for timestamp mode
+ * @lp: Pointer to axienet local structure
+ * @ifr: ioctl data
+ *
+ * Set hardware to the requested more. If unsupported return an error
+ * with no changes. Otherwise, store the mode for future reference
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_set_ts_config(struct axienet_local *lp, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = axienet_set_timestamp_mode(lp, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&lp->tstamp_config, &config, sizeof(lp->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/**
+ * axienet_get_ts_config - return the current timestamp configuration
+ * to the user
+ * @lp: pointer to axienet local structure
+ * @ifr: ioctl data
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_get_ts_config(struct axienet_local *lp, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &lp->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+#endif
+
+/* Ioctl MII Interface */
+static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+ struct axienet_local *lp = netdev_priv(dev);
#endif
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+ case SIOCSHWTSTAMP:
+ return axienet_set_ts_config(lp, rq);
+ case SIOCGHWTSTAMP:
+ return axienet_get_ts_config(lp, rq);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOCCHIOCTL:
+ return axienet_set_schedule(dev, rq->ifr_data);
+ case SIOC_GET_SCHED:
+ return axienet_get_schedule(dev, rq->ifr_data);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBR
+ case SIOC_PREEMPTION_CFG:
+ return axienet_preemption(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_CTRL:
+ return axienet_preemption_ctrl(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_STS:
+ return axienet_preemption_sts(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_COUNTER:
+ return axienet_preemption_cnt(dev, rq->ifr_data);
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOC_QBU_USER_OVERRIDE:
+ return axienet_qbu_user_override(dev, rq->ifr_data);
+ case SIOC_QBU_STS:
+ return axienet_qbu_sts(dev, rq->ifr_data);
+#endif
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
@@ -1103,6 +2021,7 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = axienet_set_multicast_list,
+ .ndo_do_ioctl = axienet_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
#endif
@@ -1177,28 +2096,20 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[15] = axienet_ior(lp, XAE_TC_OFFSET);
data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
- data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
+ data[18] = axienet_ior(lp, XAE_RMFC_OFFSET);
data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
- data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
- data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
- data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
- data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
+ data[23] = axienet_ior(lp, XAE_TEMAC_IS_OFFSET);
+ data[24] = axienet_ior(lp, XAE_TEMAC_IP_OFFSET);
+ data[25] = axienet_ior(lp, XAE_TEMAC_IE_OFFSET);
+ data[26] = axienet_ior(lp, XAE_TEMAC_IC_OFFSET);
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
- data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
+ data[29] = axienet_ior(lp, XAE_FMC_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
- data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
- data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
- data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
- data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
}
static void axienet_ethtools_get_ringparam(struct net_device *ndev,
@@ -1258,7 +2169,7 @@ axienet_ethtools_get_pauseparam(struct net_device *ndev,
* axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
* settings.
* @ndev: Pointer to net_device structure
- * @epauseparm:Pointer to ethtool_pauseparam structure
+ * @epauseparm: Pointer to ethtool_pauseparam structure
*
* This implements ethtool command for enabling flow control on Rx and Tx
* paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
@@ -1291,12 +2202,24 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
{
u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+ struct axienet_dma_q *q;
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+
+ regval = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ ecoalesce->rx_max_coalesced_frames +=
+ (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ }
+ for_each_tx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ regval = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ ecoalesce->tx_max_coalesced_frames +=
+ (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ }
return 0;
}
@@ -1369,6 +2292,32 @@ axienet_ethtools_set_link_ksettings(struct net_device *ndev,
return phylink_ethtool_ksettings_set(lp->phylink, cmd);
}
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+/**
+ * axienet_ethtools_get_ts_info - Get h/w timestamping capabilities.
+ * @ndev: Pointer to net_device structure
+ * @info: Pointer to ethtool_ts_info structure
+ *
+ * Return: 0, on success, Non-zero error value on failure.
+ */
+static int axienet_ethtools_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = 0;
+
+#ifdef CONFIG_XILINX_TSN_PTP
+ info->phc_index = axienet_phc_index;
+#endif
+ return 0;
+}
+#endif
+
static const struct ethtool_ops axienet_ethtool_ops = {
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
@@ -1380,8 +2329,16 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+ .get_ts_info = axienet_ethtools_get_ts_info,
+#endif
.get_link_ksettings = axienet_ethtools_get_link_ksettings,
.set_link_ksettings = axienet_ethtools_set_link_ksettings,
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ .get_sset_count = axienet_sset_count,
+ .get_ethtool_stats = axienet_get_stats,
+ .get_strings = axienet_strings,
+#endif
};
static void axienet_validate(struct phylink_config *config,
@@ -1464,6 +2421,9 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
switch (state->speed) {
+ case SPEED_2500:
+ emmc_reg |= XAE_EMMC_LINKSPD_2500;
+ break;
case SPEED_1000:
emmc_reg |= XAE_EMMC_LINKSPD_1000;
break;
@@ -1517,136 +2477,462 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
.mac_link_up = axienet_mac_link_up,
};
-/**
- * axienet_dma_err_handler - Work queue task for Axi DMA Error
- * @work: pointer to work_struct
- *
- * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
- * Tx/Rx BDs.
- */
-static void axienet_dma_err_handler(struct work_struct *work)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+static int __maybe_unused axienet_mcdma_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev)
{
- u32 axienet_status;
- u32 cr, i;
- struct axienet_local *lp = container_of(work, struct axienet_local,
- dma_err_task);
- struct net_device *ndev = lp->ndev;
- struct axidma_bd *cur_p;
+ int i, ret = 0;
+ struct axienet_dma_q *q;
+ struct device_node *np = NULL;
+ struct resource dmares;
+ const char *str;
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- __axienet_device_reset(lp);
- axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ ret = of_property_count_strings(pdev->dev.of_node, "xlnx,channel-ids");
+ if (ret < 0)
+ return -EINVAL;
- for (i = 0; i < lp->tx_bd_num; i++) {
- cur_p = &lp->tx_bd_v[i];
- if (cur_p->phys)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl &
- XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
- if (cur_p->skb)
- dev_kfree_skb_irq(cur_p->skb);
- cur_p->phys = 0;
- cur_p->cntrl = 0;
- cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
- cur_p->skb = NULL;
+ for_each_rx_dma_queue(lp, i) {
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+
+ /* parent */
+ q->lp = lp;
+ lp->dq[i] = q;
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "xlnx,channel-ids", i,
+ &str);
+ ret = kstrtou16(str, 16, &q->chan_id);
+ lp->qnum[i] = i;
+ lp->chan_num[i] = q->chan_id;
}
- for (i = 0; i < lp->rx_bd_num; i++) {
- cur_p = &lp->rx_bd_v[i];
- cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
+ 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ return ret;
+ }
+
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get DMA resource\n");
+ return ret;
}
- lp->tx_bd_ci = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
-
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
+ ret = of_property_read_u8(np, "xlnx,addrwidth", (u8 *)&lp->dma_mask);
+ if (ret < 0 || lp->dma_mask < XAE_DMA_MASK_MIN ||
+ lp->dma_mask > XAE_DMA_MASK_MAX) {
+ dev_info(&pdev->dev, "missing/invalid xlnx,addrwidth property, using default\n");
+ lp->dma_mask = XAE_DMA_MASK_MIN;
+ }
+
+ lp->mcdma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+ if (IS_ERR(lp->mcdma_regs)) {
+ dev_err(&pdev->dev, "iormeap failed for the dma\n");
+ ret = PTR_ERR(lp->mcdma_regs);
+ return ret;
+ }
+
+ axienet_mcdma_tx_probe(pdev, np, lp);
+ axienet_mcdma_rx_probe(pdev, lp, ndev);
+
+ return 0;
+}
+#endif
+
+static int __maybe_unused axienet_dma_probe(struct platform_device *pdev,
+ struct net_device *ndev)
+{
+ int i, ret;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ struct device_node *np = NULL;
+ struct resource dmares;
+#ifdef CONFIG_XILINX_TSN
+ char dma_name[10];
+#endif
+
+ for_each_rx_dma_queue(lp, i) {
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+
+ /* parent */
+ q->lp = lp;
+
+ lp->dq[i] = q;
+ }
+
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ /* TODO handle error ret */
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
+ i);
+ if (np) {
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret >= 0) {
+ q->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ } else {
+ dev_err(&pdev->dev, "unable to get DMA resource for %pOF\n",
+ np);
+ return -ENODEV;
+ }
+ q->eth_hasdre = of_property_read_bool(np,
+ "xlnx,include-dre");
+ ret = of_property_read_u8(np, "xlnx,addrwidth",
+ (u8 *)&lp->dma_mask);
+ if (ret < 0 || lp->dma_mask < XAE_DMA_MASK_MIN ||
+ lp->dma_mask > XAE_DMA_MASK_MAX) {
+ dev_info(&pdev->dev, "missing/invalid xlnx,addrwidth property, using default\n");
+ lp->dma_mask = XAE_DMA_MASK_MIN;
+ }
+
+ } else {
+ dev_err(&pdev->dev, "missing axistream-connected property\n");
+ return -EINVAL;
+ }
+ }
+
+#ifdef CONFIG_XILINX_TSN
+ if (lp->is_tsn) {
+ for_each_rx_dma_queue(lp, i) {
+ sprintf(dma_name, "dma%d_tx", i);
+ lp->dq[i]->tx_irq = platform_get_irq_byname(pdev,
+ dma_name);
+ sprintf(dma_name, "dma%d_rx", i);
+ lp->dq[i]->rx_irq = platform_get_irq_byname(pdev,
+ dma_name);
+ pr_info("lp->dq[%d]->tx_irq %d\n", i,
+ lp->dq[i]->tx_irq);
+ pr_info("lp->dq[%d]->rx_irq %d\n", i,
+ lp->dq[i]->rx_irq);
+ }
+ } else {
+#endif /* This should remove when axienet device tree irq comply to dma name */
+ for_each_rx_dma_queue(lp, i) {
+ lp->dq[i]->tx_irq = irq_of_parse_and_map(np, 0);
+ lp->dq[i]->rx_irq = irq_of_parse_and_map(np, 1);
+ }
+#ifdef CONFIG_XILINX_TSN
+ }
+#endif
+
+ of_node_put(np);
+
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+
+ spin_lock_init(&q->tx_lock);
+ spin_lock_init(&q->rx_lock);
+ }
+
+ for_each_rx_dma_queue(lp, i) {
+ netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
+ XAXIENET_NAPI_WEIGHT);
+ }
+
+ return 0;
+}
+
+static int axienet_clk_init(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **ref_clk, struct clk **tmpclk)
+{
+ int err;
+
+ *tmpclk = NULL;
+
+ /* The "ethernet_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting
+ *axi_aclk = devm_clk_get(&pdev->dev, "ethernet_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+
+ *axi_aclk = devm_clk_get(&pdev->dev, "s_axi_lite_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+ *axi_aclk = NULL;
+ }
+
+ } else {
+ dev_warn(&pdev->dev, "ethernet_clk is deprecated and will be removed sometime in the future\n");
+ }
+
+ *axis_clk = devm_clk_get(&pdev->dev, "axis_clk");
+ if (IS_ERR(*axis_clk)) {
+ if (PTR_ERR(*axis_clk) != -ENOENT) {
+ err = PTR_ERR(*axis_clk);
+ return err;
+ }
+ *axis_clk = NULL;
+ }
+
+ *ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(*ref_clk)) {
+ if (PTR_ERR(*ref_clk) != -ENOENT) {
+ err = PTR_ERR(*ref_clk);
+ return err;
+ }
+ *ref_clk = NULL;
+ }
+
+ err = clk_prepare_enable(*axi_aclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_aclk/ethernet_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*axis_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axis_clk (%d)\n", err);
+ goto err_disable_axi_aclk;
+ }
+
+ err = clk_prepare_enable(*ref_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable ref_clk (%d)\n", err);
+ goto err_disable_axis_clk;
+ }
+
+ return 0;
+
+err_disable_axis_clk:
+ clk_disable_unprepare(*axis_clk);
+err_disable_axi_aclk:
+ clk_disable_unprepare(*axi_aclk);
+
+ return err;
+}
+
+static int axienet_dma_clk_init(struct platform_device *pdev)
+{
+ int err;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ /* The "dma_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
-
- axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
- axienet_status &= ~XAE_RCW1_RX_MASK;
- axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
-
- axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
- if (axienet_status & XAE_INT_RXRJECT_MASK)
- axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
- axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
- XAE_INT_RECV_ERROR_MASK : 0);
- axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ lp->dma_tx_clk = devm_clk_get(&pdev->dev, "dma_clk");
+ if (IS_ERR(lp->dma_tx_clk)) {
+ if (PTR_ERR(lp->dma_tx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_tx_clk);
+ return err;
+ }
+
+ lp->dma_tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(lp->dma_tx_clk)) {
+ if (PTR_ERR(lp->dma_tx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_tx_clk);
+ return err;
+ }
+ lp->dma_tx_clk = NULL;
+ }
+ } else {
+ dev_warn(&pdev->dev, "dma_clk is deprecated and will be removed sometime in the future\n");
+ }
+
+ lp->dma_rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(lp->dma_rx_clk)) {
+ if (PTR_ERR(lp->dma_rx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_rx_clk);
+ return err;
+ }
+ lp->dma_rx_clk = NULL;
+ }
+
+ lp->dma_sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+ if (IS_ERR(lp->dma_sg_clk)) {
+ if (PTR_ERR(lp->dma_sg_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_sg_clk);
+ return err;
+ }
+ lp->dma_sg_clk = NULL;
+ }
+
+ err = clk_prepare_enable(lp->dma_tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk/dma_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(lp->dma_rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(lp->dma_sg_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(lp->dma_rx_clk);
+err_disable_txclk:
+ clk_disable_unprepare(lp->dma_tx_clk);
+
+ return err;
+}
- /* Sync default options with HW but leave receiver and
- * transmitter disabled.
+static void axienet_clk_disable(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ clk_disable_unprepare(lp->dma_sg_clk);
+ clk_disable_unprepare(lp->dma_tx_clk);
+ clk_disable_unprepare(lp->dma_rx_clk);
+ clk_disable_unprepare(lp->eth_sclk);
+ clk_disable_unprepare(lp->eth_refclk);
+ clk_disable_unprepare(lp->eth_dclk);
+ clk_disable_unprepare(lp->aclk);
+}
+
+static int xxvenet_clk_init(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **tmpclk, struct clk **dclk)
+{
+ int err;
+
+ *tmpclk = NULL;
+
+ /* The "ethernet_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
*/
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- axienet_set_mac_address(ndev, NULL);
- axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
+ *axi_aclk = devm_clk_get(&pdev->dev, "ethernet_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+
+ *axi_aclk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+ *axi_aclk = NULL;
+ }
+
+ } else {
+ dev_warn(&pdev->dev, "ethernet_clk is deprecated and will be removed sometime in the future\n");
+ }
+
+ *axis_clk = devm_clk_get(&pdev->dev, "rx_core_clk");
+ if (IS_ERR(*axis_clk)) {
+ if (PTR_ERR(*axis_clk) != -ENOENT) {
+ err = PTR_ERR(*axis_clk);
+ return err;
+ }
+ *axis_clk = NULL;
+ }
+
+ *dclk = devm_clk_get(&pdev->dev, "dclk");
+ if (IS_ERR(*dclk)) {
+ if (PTR_ERR(*dclk) != -ENOENT) {
+ err = PTR_ERR(*dclk);
+ return err;
+ }
+ *dclk = NULL;
+ }
+
+ err = clk_prepare_enable(*axi_aclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk/ethernet_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*axis_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axis_clk (%d)\n", err);
+ goto err_disable_axi_aclk;
+ }
+
+ err = clk_prepare_enable(*dclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dclk (%d)\n", err);
+ goto err_disable_axis_clk;
+ }
+
+ return 0;
+
+err_disable_axis_clk:
+ clk_disable_unprepare(*axis_clk);
+err_disable_axi_aclk:
+ clk_disable_unprepare(*axi_aclk);
+
+ return err;
}
+static const struct axienet_config axienet_1g_config = {
+ .mactype = XAXIENET_1G,
+ .setoptions = axienet_setoptions,
+ .clk_init = axienet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_2_5g_config = {
+ .mactype = XAXIENET_2_5G,
+ .setoptions = axienet_setoptions,
+ .clk_init = axienet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_10g_config = {
+ .mactype = XAXIENET_LEGACY_10G,
+ .setoptions = axienet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_10g25g_config = {
+ .mactype = XAXIENET_10G_25G,
+ .setoptions = xxvenet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = XXV_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_usxgmii_config = {
+ .mactype = XAXIENET_10G_25G,
+ .setoptions = xxvenet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = 0,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id axienet_of_match[] = {
+ { .compatible = "xlnx,axi-ethernet-1.00.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-ethernet-1.01.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-ethernet-2.01.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-2_5-gig-ethernet-1.0",
+ .data = &axienet_2_5g_config},
+ { .compatible = "xlnx,ten-gig-eth-mac", .data = &axienet_10g_config},
+ { .compatible = "xlnx,xxv-ethernet-1.0",
+ .data = &axienet_10g25g_config},
+ { .compatible = "xlnx,tsn-ethernet-1.00.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,xxv-usxgmii-ethernet-1.0",
+ .data = &axienet_usxgmii_config},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, axienet_of_match);
+
/**
* axienet_probe - Axi Ethernet probe function.
* @pdev: Pointer to platform device structure.
@@ -1661,15 +2947,40 @@ static void axienet_dma_err_handler(struct work_struct *work)
*/
static int axienet_probe(struct platform_device *pdev)
{
+ int (*axienet_clk_init)(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **ref_clk, struct clk **tmpclk) =
+ axienet_clk_init;
int ret;
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
struct device_node *np;
+#endif
struct axienet_local *lp;
struct net_device *ndev;
const void *mac_addr;
struct resource *ethres;
u32 value;
+ u16 num_queues = XAE_MAX_QUEUES;
+ bool slave = false;
+ bool is_tsn = false;
- ndev = alloc_etherdev(sizeof(*lp));
+ is_tsn = of_property_read_bool(pdev->dev.of_node, "xlnx,tsn");
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-queues",
+ &num_queues);
+ if (ret) {
+ if (!is_tsn) {
+#ifndef CONFIG_AXIENET_HAS_MCDMA
+ num_queues = 1;
+#endif
+ }
+ }
+#ifdef CONFIG_XILINX_TSN
+ if (is_tsn && (num_queues < XAE_TSN_MIN_QUEUES ||
+ num_queues > XAE_MAX_QUEUES))
+ num_queues = XAE_MAX_QUEUES;
+#endif
+
+ ndev = alloc_etherdev_mq(sizeof(*lp), num_queues);
if (!ndev)
return -ENOMEM;
@@ -1689,8 +3000,18 @@ static int axienet_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
+ lp->num_tx_queues = num_queues;
+ lp->num_rx_queues = num_queues;
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+ lp->is_tsn = is_tsn;
+
+#ifdef CONFIG_XILINX_TSN
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-tc",
+ &lp->num_tc);
+ if (ret || (lp->num_tc != 2 && lp->num_tc != 3))
+ lp->num_tc = XAE_MAX_TSN_TC;
+#endif
lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(lp->clk)) {
@@ -1707,31 +3028,51 @@ static int axienet_probe(struct platform_device *pdev)
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
if (IS_ERR(lp->regs)) {
- dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
ret = PTR_ERR(lp->regs);
goto free_netdev;
}
lp->regs_start = ethres->start;
+#ifdef CONFIG_XILINX_TSN
+ slave = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,tsn-slave");
+ if (slave)
+ lp->temac_no = XAE_TEMAC2;
+ else
+ lp->temac_no = XAE_TEMAC1;
+#endif
+
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(axienet_of_match, pdev->dev.of_node);
+ if (match && match->data) {
+ lp->axienet_config = match->data;
+ axienet_clk_init = lp->axienet_config->clk_init;
+ }
+ }
+
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
if (!ret) {
+ dev_info(&pdev->dev, "TX_CSUM %d\n", value);
+
switch (value) {
case 1:
lp->csum_offload_on_tx_path =
XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
break;
case 2:
lp->csum_offload_on_tx_path =
XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
break;
default:
lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
@@ -1739,6 +3080,8 @@ static int axienet_probe(struct platform_device *pdev)
}
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
if (!ret) {
+ dev_info(&pdev->dev, "RX_CSUM %d\n", value);
+
switch (value) {
case 1:
lp->csum_offload_on_rx_path =
@@ -1762,86 +3105,135 @@ static int axienet_probe(struct platform_device *pdev)
*/
of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
- /* Start with the proprietary, and broken phy_type */
- ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
- if (!ret) {
- netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
- switch (value) {
- case XAE_PHY_TYPE_MII:
- lp->phy_mode = PHY_INTERFACE_MODE_MII;
- break;
- case XAE_PHY_TYPE_GMII:
- lp->phy_mode = PHY_INTERFACE_MODE_GMII;
- break;
- case XAE_PHY_TYPE_RGMII_2_0:
- lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
- break;
- case XAE_PHY_TYPE_SGMII:
- lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
- break;
- case XAE_PHY_TYPE_1000BASE_X:
- lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
- break;
- default:
- ret = -EINVAL;
+ /* The phy_mode is optional but when it is not specified it should not
+ * be a value that alters the driver behavior so set it to an invalid
+ * value as the default.
+ */
+ lp->phy_mode = ~0;
+ of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_mode);
+
+ if (lp->phy_mode == XAE_PHY_TYPE_1000BASE_X)
+ lp->phy_flags = XAE_PHY_TYPE_1000BASE_X;
+ else
+ lp->phy_flags = 0;
+
+ /* Set default USXGMII rate */
+ lp->usxgmii_rate = SPEED_1000;
+ of_property_read_u32(pdev->dev.of_node, "xlnx,usxgmii-rate",
+ &lp->usxgmii_rate);
+
+ lp->eth_hasnobuf = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,eth-hasnobuf");
+ lp->eth_hasptp = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,eth-hasptp");
+
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ struct resource txtsres, rxtsres;
+
+ /* Find AXI Stream FIFO */
+ np = of_parse_phandle(pdev->dev.of_node, "axififo-connected",
+ 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find TX Timestamp FIFO\n");
+ ret = PTR_ERR(np);
goto free_netdev;
}
- } else {
- lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)lp->phy_mode < 0) {
- ret = -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &txtsres);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get TX Timestamp resource\n");
goto free_netdev;
}
+
+ lp->tx_ts_regs = devm_ioremap_resource(&pdev->dev, &txtsres);
+ if (IS_ERR(lp->tx_ts_regs)) {
+ dev_err(&pdev->dev, "could not map Tx Timestamp regs\n");
+ ret = PTR_ERR(lp->tx_ts_regs);
+ goto free_netdev;
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ np = of_parse_phandle(pdev->dev.of_node,
+ "xlnx,rxtsfifo", 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev,
+ "couldn't find rx-timestamp FIFO\n");
+ ret = PTR_ERR(np);
+ goto free_netdev;
+ }
+
+ ret = of_address_to_resource(np, 0, &rxtsres);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get rx-timestamp resource\n");
+ goto free_netdev;
+ }
+
+ lp->rx_ts_regs = devm_ioremap_resource(&pdev->dev,
+ &rxtsres);
+ if (IS_ERR(lp->rx_ts_regs)) {
+ dev_err(&pdev->dev,
+ "couldn't map rx-timestamp regs\n");
+ ret = PTR_ERR(lp->rx_ts_regs);
+ goto free_netdev;
+ }
+ lp->tx_ptpheader = devm_kzalloc(&pdev->dev,
+ XXVENET_TS_HEADER_LEN,
+ GFP_KERNEL);
+ }
+
+ of_node_put(np);
}
+#endif
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (np) {
- struct resource dmares;
+ if (!slave) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ ret = axienet_mcdma_probe(pdev, lp, ndev);
+#else
+ ret = axienet_dma_probe(pdev, ndev);
+#endif
+ if (ret) {
+ pr_err("Getting DMA resource failed\n");
+ goto free_netdev;
+ }
- ret = of_address_to_resource(np, 0, &dmares);
+ if (dma_set_mask_and_coherent(lp->dev, DMA_BIT_MASK(lp->dma_mask)) != 0) {
+ dev_warn(&pdev->dev, "default to %d-bit dma mask\n", XAE_DMA_MASK_MIN);
+ if (dma_set_mask_and_coherent(lp->dev, DMA_BIT_MASK(XAE_DMA_MASK_MIN)) != 0) {
+ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed, aborting\n");
+ goto free_netdev;
+ }
+ }
+
+ ret = axienet_dma_clk_init(pdev);
if (ret) {
- dev_err(&pdev->dev,
- "unable to get DMA resource\n");
- of_node_put(np);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "DMA clock init failed %d\n", ret);
goto free_netdev;
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev,
- &dmares);
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
- lp->eth_irq = platform_get_irq(pdev, 0);
- } else {
- /* Check for these resources directly on the Ethernet node. */
- struct resource *res = platform_get_resource(pdev,
- IORESOURCE_MEM, 1);
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
- lp->rx_irq = platform_get_irq(pdev, 1);
- lp->tx_irq = platform_get_irq(pdev, 0);
- lp->eth_irq = platform_get_irq(pdev, 2);
- }
- if (IS_ERR(lp->dma_regs)) {
- dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- goto free_netdev;
- }
- if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto free_netdev;
}
/* Check for Ethernet core IRQ (optional) */
if (lp->eth_irq <= 0)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
+ ret = axienet_clk_init(pdev, &lp->aclk, &lp->eth_sclk,
+ &lp->eth_refclk, &lp->eth_dclk);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Ethernet clock init failed %d\n", ret);
+ goto err_disable_clk;
+ }
+
/* Retrieve the MAC address */
mac_addr = of_get_mac_address(pdev->dev.of_node);
if (IS_ERR(mac_addr)) {
dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
PTR_ERR(mac_addr));
mac_addr = NULL;
+ goto err_disable_clk;
}
axienet_set_mac_address(ndev, mac_addr);
@@ -1868,14 +3260,52 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Create sysfs file entries for the device */
+ ret = axeinet_mcdma_create_sysfs(&lp->dev->kobj);
+ if (ret < 0) {
+ dev_err(lp->dev, "unable to create sysfs entries\n");
+ return ret;
+ }
+#endif
+
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto free_netdev;
+ axienet_mdio_teardown(lp);
+ goto err_disable_clk;
}
- return 0;
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ lp->ptp_rx_irq = platform_get_irq_byname(pdev, "ptp_rx");
+
+ lp->ptp_tx_irq = platform_get_irq_byname(pdev, "ptp_tx");
+
+ lp->qbv_irq = platform_get_irq_byname(pdev, "qbv_irq");
+
+ pr_debug("ptp RX irq: %d\n", lp->ptp_rx_irq);
+ pr_debug("ptp TX irq: %d\n", lp->ptp_tx_irq);
+ pr_debug("qbv_irq: %d\n", lp->qbv_irq);
+
+ spin_lock_init(&lp->ptp_tx_lock);
+
+ if (lp->temac_no == XAE_TEMAC1) {
+ axienet_ptp_timer_probe(
+ (lp->regs + XAE_RTC_OFFSET), pdev);
+ /* enable VLAN */
+ lp->options |= XAE_OPTION_VLAN;
+ axienet_setoptions(lp->ndev, lp->options);
+#ifdef CONFIG_XILINX_TSN_QBV
+ axienet_qbv_init(ndev);
+#endif
+ }
+ }
+#endif
+ return 0;
+err_disable_clk:
+ axienet_clk_disable(pdev);
free_netdev:
free_netdev(ndev);
@@ -1886,17 +3316,34 @@ static int axienet_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
+ int i;
unregister_netdev(ndev);
+ axienet_clk_disable(pdev);
if (lp->phylink)
phylink_destroy(lp->phylink);
- axienet_mdio_teardown(lp);
+ if (lp->mii_bus)
+ axienet_mdio_teardown(lp);
+#ifdef CONFIG_XILINX_TSN_PTP
+ axienet_ptp_timer_remove(lp->timer_priv);
+#ifdef CONFIG_XILINX_TSN_QBV
+ axienet_qbv_remove(ndev);
+#endif
+#endif
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_rx_dma_queue(lp, i)
+ netif_napi_del(&lp->napi[i]);
+ }
+
if (lp->clk)
clk_disable_unprepare(lp->clk);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axeinet_mcdma_remove_sysfs(&lp->dev->kobj);
+#endif
of_node_put(lp->phy_node);
lp->phy_node = NULL;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c
new file mode 100644
index 000000000000..b0b0b98ba9cf
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Xilinx AXI Ethernet (MCDMA programming)
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc.
+ * Copyright (C) 2018 Xilinx, Inc. All rights reserved.
+ *
+ * This file contains helper functions for AXI MCDMA TX and RX programming.
+ */
+
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+
+#include "xilinx_axienet.h"
+
+struct axienet_stat {
+ const char *name;
+};
+
+static struct axienet_stat axienet_get_tx_strings_stats[] = {
+ { "txq0_packets" },
+ { "txq0_bytes" },
+ { "txq1_packets" },
+ { "txq1_bytes" },
+ { "txq2_packets" },
+ { "txq2_bytes" },
+ { "txq3_packets" },
+ { "txq3_bytes" },
+ { "txq4_packets" },
+ { "txq4_bytes" },
+ { "txq5_packets" },
+ { "txq5_bytes" },
+ { "txq6_packets" },
+ { "txq6_bytes" },
+ { "txq7_packets" },
+ { "txq7_bytes" },
+ { "txq8_packets" },
+ { "txq8_bytes" },
+ { "txq9_packets" },
+ { "txq9_bytes" },
+ { "txq10_packets" },
+ { "txq10_bytes" },
+ { "txq11_packets" },
+ { "txq11_bytes" },
+ { "txq12_packets" },
+ { "txq12_bytes" },
+ { "txq13_packets" },
+ { "txq13_bytes" },
+ { "txq14_packets" },
+ { "txq14_bytes" },
+ { "txq15_packets" },
+ { "txq15_bytes" },
+};
+
+static struct axienet_stat axienet_get_rx_strings_stats[] = {
+ { "rxq0_packets" },
+ { "rxq0_bytes" },
+ { "rxq1_packets" },
+ { "rxq1_bytes" },
+ { "rxq2_packets" },
+ { "rxq2_bytes" },
+ { "rxq3_packets" },
+ { "rxq3_bytes" },
+ { "rxq4_packets" },
+ { "rxq4_bytes" },
+ { "rxq5_packets" },
+ { "rxq5_bytes" },
+ { "rxq6_packets" },
+ { "rxq6_bytes" },
+ { "rxq7_packets" },
+ { "rxq7_bytes" },
+ { "rxq8_packets" },
+ { "rxq8_bytes" },
+ { "rxq9_packets" },
+ { "rxq9_bytes" },
+ { "rxq10_packets" },
+ { "rxq10_bytes" },
+ { "rxq11_packets" },
+ { "rxq11_bytes" },
+ { "rxq12_packets" },
+ { "rxq12_bytes" },
+ { "rxq13_packets" },
+ { "rxq13_bytes" },
+ { "rxq14_packets" },
+ { "rxq14_bytes" },
+ { "rxq15_packets" },
+ { "rxq15_bytes" },
+};
+
+/**
+ * axienet_mcdma_tx_bd_free - Release MCDMA Tx buffer descriptor rings
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is used to release the descriptors allocated in
+ * axienet_mcdma_tx_q_init.
+ */
+void __maybe_unused axienet_mcdma_tx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ if (q->txq_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->txq_bd_v) * lp->tx_bd_num,
+ q->txq_bd_v,
+ q->tx_bd_p);
+ }
+ if (q->tx_bufs) {
+ dma_free_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * lp->tx_bd_num,
+ q->tx_bufs,
+ q->tx_bufs_dma);
+ }
+}
+
+/**
+ * axienet_mcdma_rx_bd_free - Release MCDMA Rx buffer descriptor rings
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is used to release the descriptors allocated in
+ * axienet_mcdma_rx_q_init.
+ */
+void __maybe_unused axienet_mcdma_rx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ dma_unmap_single(ndev->dev.parent, q->rxq_bd_v[i].phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (q->rxq_bd_v[i].sw_id_offset));
+ }
+
+ if (q->rxq_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->rxq_bd_v) * lp->rx_bd_num,
+ q->rxq_bd_v,
+ q->rx_bd_p);
+ }
+}
+
+/**
+ * axienet_mcdma_tx_q_init - Setup buffer descriptor rings for individual Axi
+ * MCDMA-Tx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_mcdma_tx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ u32 cr, chan_en;
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+
+ q->txq_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->txq_bd_v) * lp->tx_bd_num,
+ &q->tx_bd_p, GFP_KERNEL);
+ if (!q->txq_bd_v)
+ goto out;
+
+ if (!q->eth_hasdre) {
+ q->tx_bufs = dma_alloc_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * lp->tx_bd_num,
+ &q->tx_bufs_dma,
+ GFP_KERNEL);
+ if (!q->tx_bufs)
+ goto out;
+
+ for (i = 0; i < lp->tx_bd_num; i++)
+ q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
+ }
+
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ q->txq_bd_v[i].next = q->tx_bd_p +
+ sizeof(*q->txq_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+ }
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XMCDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
+ q->tx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
+
+ return 0;
+out:
+ for_each_tx_dma_queue(lp, i) {
+ axienet_mcdma_tx_bd_free(ndev, lp->dq[i]);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * axienet_mcdma_rx_q_init - Setup buffer descriptor rings for individual Axi
+ * MCDMA-Rx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_mcdma_rx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ u32 cr, chan_en;
+ int i;
+ struct sk_buff *skb;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->rx_bd_ci = 0;
+ q->rx_offset = XMCDMA_CHAN_RX_OFFSET;
+
+ q->rxq_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->rxq_bd_v) * lp->rx_bd_num,
+ &q->rx_bd_p, GFP_KERNEL);
+ if (!q->rxq_bd_v)
+ goto out;
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ q->rxq_bd_v[i].next = q->rx_bd_p +
+ sizeof(*q->rxq_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ goto out;
+
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
+
+ q->rxq_bd_v[i].sw_id_offset = (phys_addr_t)skb;
+ q->rxq_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ q->rxq_bd_v[i].cntrl = lp->max_frm_size;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XMCDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XMCDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET + q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
+ (lp->rx_bd_num - 1)));
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
+
+ return 0;
+
+out:
+ for_each_rx_dma_queue(lp, i) {
+ axienet_mcdma_rx_bd_free(ndev, lp->dq[i]);
+ }
+ return -ENOMEM;
+}
+
+static inline int get_mcdma_tx_q(struct axienet_local *lp, u32 chan_id)
+{
+ int i;
+
+ for_each_tx_dma_queue(lp, i) {
+ if (chan_id == lp->chan_num[i])
+ return lp->qnum[i];
+ }
+
+ return -ENODEV;
+}
+
+static inline int get_mcdma_rx_q(struct axienet_local *lp, u32 chan_id)
+{
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ if (chan_id == lp->chan_num[i])
+ return lp->qnum[i];
+ }
+
+ return -ENODEV;
+}
+
+static inline int map_dma_q_txirq(int irq, struct axienet_local *lp)
+{
+ int i, chan_sermask;
+ u16 chan_id = 1;
+ struct axienet_dma_q *q = lp->dq[0];
+
+ chan_sermask = axienet_dma_in32(q, XMCDMA_TXINT_SER_OFFSET);
+
+ for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
+ i <<= 1, chan_id++) {
+ if (chan_sermask & i)
+ return chan_id;
+ }
+
+ return -ENODEV;
+}
+
+irqreturn_t __maybe_unused axienet_mcdma_tx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i, j = map_dma_q_txirq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (j < 0)
+ return IRQ_NONE;
+
+ i = get_mcdma_tx_q(lp, j);
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id));
+ if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id), status);
+ axienet_start_xmit_done(lp->ndev, q);
+ goto out;
+ }
+ if (!(status & XMCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->txq_bd_v[q->tx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static inline int map_dma_q_rxirq(int irq, struct axienet_local *lp)
+{
+ int i, chan_sermask;
+ u16 chan_id = 1;
+ struct axienet_dma_q *q = lp->dq[0];
+
+ chan_sermask = axienet_dma_in32(q, XMCDMA_RXINT_SER_OFFSET +
+ q->rx_offset);
+
+ for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
+ i <<= 1, chan_id++) {
+ if (chan_sermask & i)
+ return chan_id;
+ }
+
+ return -ENODEV;
+}
+
+irqreturn_t __maybe_unused axienet_mcdma_rx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i, j = map_dma_q_rxirq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (j < 0)
+ return IRQ_NONE;
+
+ i = get_mcdma_rx_q(lp, j);
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ cr &= ~(XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+ napi_schedule(&lp->napi[i]);
+ }
+
+ if (!(status & XMCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->rxq_bd_v[q->rx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void axienet_strings(struct net_device *ndev, u32 sset, u8 *data)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ int i, j, k = 0;
+
+ for (i = 0, j = 0; i < AXIENET_TX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_tx_queues)
+ break;
+ q = lp->dq[j];
+ if (i % 2 == 0)
+ k = (q->chan_id - 1) * 2;
+ if (sset == ETH_SS_STATS)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ axienet_get_tx_strings_stats[k].name,
+ ETH_GSTRING_LEN);
+ ++i;
+ k++;
+ if (i % 2 == 0)
+ ++j;
+ }
+ k = 0;
+ for (j = 0; i < AXIENET_TX_SSTATS_LEN(lp) +
+ AXIENET_RX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_rx_queues)
+ break;
+ q = lp->dq[j];
+ if (i % 2 == 0)
+ k = (q->chan_id - 1) * 2;
+ if (sset == ETH_SS_STATS)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ axienet_get_rx_strings_stats[k].name,
+ ETH_GSTRING_LEN);
+ ++i;
+ k++;
+ if (i % 2 == 0)
+ ++j;
+ }
+}
+
+int axienet_sset_count(struct net_device *ndev, int sset)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (AXIENET_TX_SSTATS_LEN(lp) + AXIENET_RX_SSTATS_LEN(lp));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void axienet_get_stats(struct net_device *ndev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ unsigned int i = 0, j;
+
+ for (i = 0, j = 0; i < AXIENET_TX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_tx_queues)
+ break;
+
+ q = lp->dq[j];
+ data[i++] = q->tx_packets;
+ data[i++] = q->tx_bytes;
+ ++j;
+ }
+ for (j = 0; i < AXIENET_TX_SSTATS_LEN(lp) +
+ AXIENET_RX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_rx_queues)
+ break;
+
+ q = lp->dq[j];
+ data[i++] = q->rx_packets;
+ data[i++] = q->rx_bytes;
+ ++j;
+ }
+}
+
+/**
+ * axienet_mcdma_err_handler - Tasklet handler for Axi MCDMA Error
+ * @data: Data passed
+ *
+ * Resets the Axi MCDMA and Axi Ethernet devices, and reconfigures the
+ * Tx/Rx BDs.
+ */
+void __maybe_unused axienet_mcdma_err_handler(unsigned long data)
+{
+ u32 axienet_status;
+ u32 cr, i, chan_en;
+ int mdio_mcreg = 0;
+ struct axienet_dma_q *q = (struct axienet_dma_q *)data;
+ struct axienet_local *lp = q->lp;
+ struct net_device *ndev = lp->ndev;
+ struct aximcdma_bd *cur_p;
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ axienet_mdio_wait_until_ready(lp);
+ /* Disable the MDIO interface till Axi Ethernet Reset is
+ * Completed. When we do an Axi Ethernet reset, it resets the
+ * Complete core including the MDIO. So if MDIO is not disabled
+ * When the reset process is started,
+ * MDIO will be broken afterwards.
+ */
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
+ ~XAE_MDIO_MC_MDIOEN_MASK));
+ }
+
+ __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
+ axienet_mdio_wait_until_ready(lp);
+ }
+
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ cur_p = &q->txq_bd_v[i];
+ if (cur_p->phys)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->tx_skb)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ cur_p->tx_skb = 0;
+ }
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ cur_p = &q->rxq_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+ q->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XMCDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XMCDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XMCDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET + q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
+ (lp->rx_bd_num - 1)));
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
+ q->tx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G && !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+ lp->axienet_config->setoptions(ndev, lp->options);
+}
+
+int __maybe_unused axienet_mcdma_tx_probe(struct platform_device *pdev,
+ struct device_node *np,
+ struct axienet_local *lp)
+{
+ int i;
+ char dma_name[24];
+
+ for_each_tx_dma_queue(lp, i) {
+ struct axienet_dma_q *q;
+
+ q = lp->dq[i];
+
+ q->dma_regs = lp->mcdma_regs;
+ snprintf(dma_name, sizeof(dma_name), "mm2s_ch%d_introut",
+ q->chan_id);
+ q->tx_irq = platform_get_irq_byname(pdev, dma_name);
+ q->eth_hasdre = of_property_read_bool(np,
+ "xlnx,include-dre");
+ spin_lock_init(&q->tx_lock);
+ }
+ of_node_put(np);
+
+ return 0;
+}
+
+int __maybe_unused axienet_mcdma_rx_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev)
+{
+ int i;
+ char dma_name[24];
+
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q;
+
+ q = lp->dq[i];
+
+ q->dma_regs = lp->mcdma_regs;
+ snprintf(dma_name, sizeof(dma_name), "s2mm_ch%d_introut",
+ q->chan_id);
+ q->rx_irq = platform_get_irq_byname(pdev, dma_name);
+
+ spin_lock_init(&q->rx_lock);
+
+ netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
+ XAXIENET_NAPI_WEIGHT);
+ }
+
+ return 0;
+}
+
+static ssize_t rxch_obs1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 1 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 2 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 3 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 4 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs5_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 5 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs6_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 6 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 1 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t txch_obs2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 2 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 3 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 4 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs5_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 5 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs6_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 6 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t chan_weight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return sprintf(buf, "chan_id is %d and weight is %d\n",
+ lp->chan_id, lp->weight);
+}
+
+static ssize_t chan_weight_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ int ret;
+ u16 flags, chan_id;
+ u32 val;
+
+ ret = kstrtou16(buf, 16, &flags);
+ if (ret)
+ return ret;
+
+ lp->chan_id = (flags & 0xF0) >> 4;
+ lp->weight = flags & 0x0F;
+
+ if (lp->chan_id < 8)
+ val = axienet_dma_in32(q, XMCDMA_TXWEIGHT0_OFFSET);
+ else
+ val = axienet_dma_in32(q, XMCDMA_TXWEIGHT1_OFFSET);
+
+ if (lp->chan_id > 7)
+ chan_id = lp->chan_id - 8;
+ else
+ chan_id = lp->chan_id;
+
+ val &= ~XMCDMA_TXWEIGHT_CH_MASK(chan_id);
+ val |= lp->weight << XMCDMA_TXWEIGHT_CH_SHIFT(chan_id);
+
+ if (lp->chan_id < 8)
+ axienet_dma_out32(q, XMCDMA_TXWEIGHT0_OFFSET, val);
+ else
+ axienet_dma_out32(q, XMCDMA_TXWEIGHT1_OFFSET, val);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(chan_weight);
+static DEVICE_ATTR_RO(rxch_obs1);
+static DEVICE_ATTR_RO(rxch_obs2);
+static DEVICE_ATTR_RO(rxch_obs3);
+static DEVICE_ATTR_RO(rxch_obs4);
+static DEVICE_ATTR_RO(rxch_obs5);
+static DEVICE_ATTR_RO(rxch_obs6);
+static DEVICE_ATTR_RO(txch_obs1);
+static DEVICE_ATTR_RO(txch_obs2);
+static DEVICE_ATTR_RO(txch_obs3);
+static DEVICE_ATTR_RO(txch_obs4);
+static DEVICE_ATTR_RO(txch_obs5);
+static DEVICE_ATTR_RO(txch_obs6);
+static const struct attribute *mcdma_attrs[] = {
+ &dev_attr_chan_weight.attr,
+ &dev_attr_rxch_obs1.attr,
+ &dev_attr_rxch_obs2.attr,
+ &dev_attr_rxch_obs3.attr,
+ &dev_attr_rxch_obs4.attr,
+ &dev_attr_rxch_obs5.attr,
+ &dev_attr_rxch_obs6.attr,
+ &dev_attr_txch_obs1.attr,
+ &dev_attr_txch_obs2.attr,
+ &dev_attr_txch_obs3.attr,
+ &dev_attr_txch_obs4.attr,
+ &dev_attr_txch_obs5.attr,
+ &dev_attr_txch_obs6.attr,
+ NULL,
+};
+
+static const struct attribute_group mcdma_attributes = {
+ .attrs = (struct attribute **)mcdma_attrs,
+};
+
+int axeinet_mcdma_create_sysfs(struct kobject *kobj)
+{
+ return sysfs_create_group(kobj, &mcdma_attributes);
+}
+
+void axeinet_mcdma_remove_sysfs(struct kobject *kobj)
+{
+ sysfs_remove_group(kobj, &mcdma_attributes);
+}
+
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 435ed308d990..917cfcaa4181 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -18,7 +18,7 @@
#include "xilinx_axienet.h"
#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
-#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
+#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT
/* Wait till MDIO interface is ready to accept a new transaction.*/
static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
@@ -98,7 +98,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
if (ret < 0)
return ret;
- axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
+ axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32)val);
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
(((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
XAE_MDIO_MCR_PHYAD_MASK) |
@@ -128,66 +128,63 @@ int axienet_mdio_enable(struct axienet_local *lp)
if (lp->clk) {
host_clock = clk_get_rate(lp->clk);
+
+ clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
+
+ /* If there is any remainder from the division of
+ * fHOST / (MAX_MDIO_FREQ * 2), then we need to add 1
+ * to the clock divisor or we will surely be
+ * above 2.5 MHz
+ */
+ if (host_clock % (MAX_MDIO_FREQ * 2))
+ clk_div++;
+ dev_dbg(lp->dev,
+ "Setting MDIO clock divisor to %u "
+ "based on %u Hz host clock.\n",
+ clk_div, host_clock);
} else {
struct device_node *np1;
+ /* the ethernet controller device node */
+ struct device_node *npp = NULL;
- /* Legacy fallback: detect CPU clock frequency and use as AXI
- * bus clock frequency. This only works on certain platforms.
- */
- np1 = of_find_node_by_name(NULL, "cpu");
- if (!np1) {
- netdev_warn(lp->ndev, "Could not find CPU device node.\n");
- host_clock = DEFAULT_HOST_CLOCK;
+ np1 = of_get_parent(lp->phy_node);
+ if (np1) {
+ npp = of_get_parent(np1);
+ of_node_put(np1);
+ }
+ if (!npp) {
+ dev_warn(lp->dev,
+ "Could not find ethernet controller device node.");
+ dev_warn(lp->dev, "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
+ clk_div = DEFAULT_CLOCK_DIVISOR;
} else {
- int ret = of_property_read_u32(np1, "clock-frequency",
- &host_clock);
- if (ret) {
- netdev_warn(lp->ndev, "CPU clock-frequency property not found.\n");
- host_clock = DEFAULT_HOST_CLOCK;
+ if (of_property_read_u32(npp, "clock-frequency", &host_clock)) {
+ netdev_warn(lp->ndev,
+ "clock-frequency property not found.\n");
+ netdev_warn(lp->ndev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
+ clk_div = DEFAULT_CLOCK_DIVISOR;
+ } else {
+ clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
+
+ /* If there is any remainder from the division of
+ * fHOST / (MAX_MDIO_FREQ * 2), then we need to add 1
+ * to the clock divisor or we will surely be
+ * above 2.5 MHz
+ */
+ if (host_clock % (MAX_MDIO_FREQ * 2))
+ clk_div++;
+ dev_dbg(lp->dev,
+ "Setting MDIO clock divisor to %u "
+ "based on %u Hz host clock.\n",
+ clk_div, host_clock);
}
- of_node_put(np1);
+ of_node_put(npp);
}
- netdev_info(lp->ndev, "Setting assumed host clock to %u\n",
- host_clock);
}
- /* clk_div can be calculated by deriving it from the equation:
- * fMDIO = fHOST / ((1 + clk_div) * 2)
- *
- * Where fMDIO <= 2500000, so we get:
- * fHOST / ((1 + clk_div) * 2) <= 2500000
- *
- * Then we get:
- * 1 / ((1 + clk_div) * 2) <= (2500000 / fHOST)
- *
- * Then we get:
- * 1 / (1 + clk_div) <= ((2500000 * 2) / fHOST)
- *
- * Then we get:
- * 1 / (1 + clk_div) <= (5000000 / fHOST)
- *
- * So:
- * (1 + clk_div) >= (fHOST / 5000000)
- *
- * And finally:
- * clk_div >= (fHOST / 5000000) - 1
- *
- * fHOST can be read from the flattened device tree as property
- * "clock-frequency" from the CPU
- */
-
- clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
- /* If there is any remainder from the division of
- * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
- * 1 to the clock divisor or we will surely be above 2.5 MHz
- */
- if (host_clock % (MAX_MDIO_FREQ * 2))
- clk_div++;
-
- netdev_dbg(lp->ndev,
- "Setting MDIO clock divisor to %u/%u Hz host clock.\n",
- clk_div, host_clock);
-
axienet_iow(lp, XAE_MDIO_MC_OFFSET, clk_div | XAE_MDIO_MC_MDIOEN_MASK);
return axienet_mdio_wait_until_ready(lp);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 6e5ea68b6a7e..e5e32bfdeb2b 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -91,13 +91,11 @@
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
-
-
#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((ulong)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -124,7 +122,6 @@
* @last_link: last link status
*/
struct net_local {
-
struct net_device *ndev;
bool tx_ping_pong;
@@ -133,7 +130,7 @@ struct net_local {
u32 next_rx_buf_to_use;
void __iomem *base_addr;
- spinlock_t reset_lock;
+ spinlock_t reset_lock; /* lock used for synchronization */
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
@@ -144,7 +141,6 @@ struct net_local {
int last_link;
};
-
/*************************/
/* EmacLite driver calls */
/*************************/
@@ -166,10 +162,12 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */
- xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(XEL_RSR_RECV_IE_MASK,
+ drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */
- xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK,
+ drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
@@ -184,7 +182,8 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Disable the Global Interrupt Enable */
- xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK,
+ drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
@@ -207,7 +206,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u32 align_buffer;
u32 *to_u32_ptr;
@@ -264,7 +263,7 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
@@ -329,7 +328,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
-
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
@@ -338,15 +336,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* if it is configured in HW
*/
- addr = (void __iomem __force *)((u32 __force)addr ^
+ addr = (void __iomem __force *)((ulong __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
- } else
+ } else {
return -1; /* Buffer was full, return failure */
+ }
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
@@ -370,7 +369,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* xemaclite_recv_data - Receive a frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Address where the data is to be received
- * @maxlen: Maximum supported ethernet packet length
+ * @maxlen: Maximum supported ethernet packet length
*
* This function is intended to be called from the interrupt context or
* with a wrapper which waits for the receive frame to be available.
@@ -399,7 +398,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* will correct on subsequent calls
*/
if (drvdata->rx_ping_pong != 0)
- addr = (void __iomem __force *)((u32 __force)addr ^
+ addr = (void __iomem __force *)((ulong __force)addr ^
XEL_BUFFER_OFFSET);
else
return 0; /* No data was available */
@@ -421,7 +420,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* or an IP packet or an ARP packet
*/
if (proto_type > ETH_DATA_LEN) {
-
if (proto_type == ETH_P_IP) {
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
@@ -431,23 +429,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
- } else if (proto_type == ETH_P_ARP)
+ } else if (proto_type == ETH_P_ARP) {
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
- else
+ } else {
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it
*/
length = ETH_FRAME_LEN + ETH_FCS_LEN;
- } else
+ }
+ } else {
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ }
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
- data, length);
+ data, length);
/* Acknowledge the frame */
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
@@ -483,7 +483,8 @@ static void xemaclite_update_address(struct net_local *drvdata,
/* Update the MAC address in the EmacLite */
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
- xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR,
+ addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
@@ -541,7 +542,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
xemaclite_enable_interrupts(lp);
if (lp->deferred_skb) {
- dev_kfree_skb(lp->deferred_skb);
+ dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
dev->stats.tx_errors++;
}
@@ -668,8 +669,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the first buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
@@ -677,10 +677,10 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
}
/* Check if the Transmission for the second buffer is completed */
- tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr +
+ XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
@@ -776,7 +776,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
*
- * Return: 0 upon success or a negative error upon failure
+ * Return: 0 upon success or a negative error upon failure
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
@@ -843,6 +843,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
}
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
+
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
@@ -1185,8 +1186,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
dev_info(dev,
- "Xilinx EmacLite at 0x%08X mapped to 0x%p, irq=%d\n",
- (unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq);
+ "Xilinx EmacLite at 0x%08X mapped to 0x%08lX, irq=%d\n",
+ (unsigned int __force)ndev->mem_start,
+ (unsigned long __force)lp->base_addr, ndev->irq);
return 0;
put_node:
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c b/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c
new file mode 100644
index 000000000000..044c285365b3
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c
@@ -0,0 +1,177 @@
+/*
+ * Xilinx FPGA Xilinx TSN QCI Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "xilinx_tsn_switch.h"
+
+#define IN_PORTID_MASK 0x3
+#define IN_PORTID_SHIFT 24
+#define MAX_SEQID_MASK 0x0000FFFF
+
+#define SEQ_REC_HIST_LEN_MASK 0x000000FF
+#define SEQ_REC_HIST_LEN_SHIFT 16
+#define SPLIT_STREAM_INPORTID_SHIFT 12
+#define SPLIT_STREAM_INPORTID_MASK 0x3
+#define SPLIT_STREAM_VLANID_MASK 0x00000FFF
+
+#define GATE_ID_SHIFT 24
+#define MEMBER_ID_SHIFT 8
+#define SEQ_RESET_SHIFT 7
+#define REC_TIMEOUT_SHIFT 6
+#define GATE_STATE_SHIFT 5
+#define FRER_VALID_SHIFT 4
+#define WR_OP_TYPE_SHIFT 2
+#define OP_TYPE_SHIFT 1
+#define WR_OP_TYPE_MASK 0x3
+#define FRER_EN_CONTROL_MASK 0x1
+
+/**
+ * frer_control - Configure thr control for frer
+ * @data: Value to be programmed
+ */
+void frer_control(struct frer_ctrl data)
+{
+ u32 mask = 0;
+
+ mask = data.gate_id << GATE_ID_SHIFT;
+ mask |= data.memb_id << MEMBER_ID_SHIFT;
+ mask |= data.seq_reset << SEQ_RESET_SHIFT;
+ mask |= data.gate_state << GATE_STATE_SHIFT;
+ mask |= data.rcvry_tmout << REC_TIMEOUT_SHIFT;
+ mask |= data.frer_valid << FRER_VALID_SHIFT;
+ mask |= (data.wr_op_type & WR_OP_TYPE_MASK) << WR_OP_TYPE_SHIFT;
+ mask |= data.op_type << OP_TYPE_SHIFT;
+ mask |= FRER_EN_CONTROL_MASK;
+
+ axienet_iow(&lp, FRER_CONTROL_OFFSET, mask);
+
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, FRER_CONTROL_OFFSET) & FRER_EN_CONTROL_MASK))
+ ;
+}
+
+/**
+ * get_ingress_filter_config - Get Ingress Filter Configuration
+ * @data: Value returned
+ */
+void get_ingress_filter_config(struct in_fltr *data)
+{
+ u32 reg_val = 0;
+
+ reg_val = axienet_ior(&lp, INGRESS_FILTER_OFFSET);
+
+ data->max_seq_id = reg_val & MAX_SEQID_MASK;
+ data->in_port_id = (reg_val >> IN_PORTID_SHIFT) & IN_PORTID_MASK;
+}
+
+/**
+ * config_stream_filter - Configure Ingress Filter Configuration
+ * @data: Value to be programmed
+ */
+void config_ingress_filter(struct in_fltr data)
+{
+ u32 mask = 0;
+
+ mask = ((data.in_port_id & IN_PORTID_MASK) << IN_PORTID_SHIFT) |
+ (data.max_seq_id & MAX_SEQID_MASK);
+ axienet_iow(&lp, INGRESS_FILTER_OFFSET, mask);
+}
+
+/**
+ * get_member_reg - Read frer member Configuration registers value
+ * @data: Value returned
+ */
+void get_member_reg(struct frer_memb_config *data)
+{
+ u32 conf_r1 = 0;
+
+ conf_r1 = axienet_ior(&lp, FRER_CONFIG_REG1);
+ data->rem_ticks = axienet_ior(&lp, FRER_CONFIG_REG2);
+
+ data->seq_rec_hist_len = (conf_r1 >> SEQ_REC_HIST_LEN_SHIFT)
+ & SEQ_REC_HIST_LEN_MASK;
+ data->split_strm_egport_id = (conf_r1 >> SPLIT_STREAM_INPORTID_SHIFT)
+ & SPLIT_STREAM_INPORTID_MASK;
+ data->split_strm_vlan_id = conf_r1 & SPLIT_STREAM_VLANID_MASK;
+}
+
+/**
+ * program_member_reg - configure frer member Configuration registers
+ * @data: Value to be programmed
+ */
+void program_member_reg(struct frer_memb_config data)
+{
+ u32 conf_r1 = 0;
+
+ conf_r1 = (data.seq_rec_hist_len & SEQ_REC_HIST_LEN_MASK)
+ << SEQ_REC_HIST_LEN_SHIFT;
+ conf_r1 = conf_r1 | ((data.split_strm_egport_id
+ & SPLIT_STREAM_INPORTID_MASK)
+ << SPLIT_STREAM_INPORTID_SHIFT);
+ conf_r1 = conf_r1 | (data.split_strm_vlan_id
+ & SPLIT_STREAM_VLANID_MASK);
+
+ axienet_iow(&lp, FRER_CONFIG_REG1, conf_r1);
+ axienet_iow(&lp, FRER_CONFIG_REG2, data.rem_ticks);
+}
+
+/**
+ * get_frer_static_counter - get frer static counters value
+ * @data: return value, containing counter value
+ */
+void get_frer_static_counter(struct frer_static_counter *data)
+{
+ int offset = (data->num) * 8;
+
+ data->frer_fr_count.lsb = axienet_ior(&lp, TOTAL_FRER_FRAMES_OFFSET +
+ offset);
+ data->frer_fr_count.msb = axienet_ior(&lp, TOTAL_FRER_FRAMES_OFFSET +
+ offset + 0x4);
+
+ data->disc_frames_in_portid.lsb = axienet_ior(&lp,
+ FRER_DISCARD_INGS_FLTR_OFFSET + offset);
+ data->disc_frames_in_portid.msb = axienet_ior(&lp,
+ FRER_DISCARD_INGS_FLTR_OFFSET + offset + 0x4);
+
+ data->pass_frames_ind_recv.lsb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_INDV_OFFSET + offset);
+ data->pass_frames_ind_recv.msb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_INDV_OFFSET + offset + 0x4);
+
+ data->disc_frames_ind_recv.lsb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_INDV_OFFSET + offset);
+ data->disc_frames_ind_recv.msb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_INDV_OFFSET + offset + 0x4);
+
+ data->pass_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_SEQ_OFFSET + offset);
+ data->pass_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->disc_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_SEQ_OFFSET + offset);
+ data->disc_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->rogue_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_ROGUE_FRAMES_SEQ_OFFSET + offset);
+ data->rogue_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_ROGUE_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->seq_recv_rst.lsb = axienet_ior(&lp,
+ SEQ_RECV_RESETS_OFFSET + offset);
+ data->seq_recv_rst.msb = axienet_ior(&lp,
+ SEQ_RECV_RESETS_OFFSET + offset + 0x4);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c
new file mode 100644
index 000000000000..bcd6c737bc27
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c
@@ -0,0 +1,161 @@
+/*
+ * Xilinx FPGA Xilinx TSN End point driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/skbuff.h>
+
+#include "xilinx_axienet.h"
+
+/**
+ * tsn_ep_ioctl - TSN endpoint ioctl interface.
+ * @dev: Pointer to the net_device structure
+ * @rq: Socket ioctl interface request structure
+ * @cmd: Ioctl case
+ *
+ * Return: 0 on success, Non-zero error value on failure.
+ *
+ * This is the ioctl interface for TSN end point. Currently this
+ * supports only gate programming.
+ */
+static int tsn_ep_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ switch (cmd) {
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOCCHIOCTL:
+ return axienet_set_schedule(dev, rq->ifr_data);
+ case SIOC_GET_SCHED:
+ return axienet_get_schedule(dev, rq->ifr_data);
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * tsn_ep_xmit - TSN endpoint xmit routine.
+ * @skb: Packet data
+ * @dev: Pointer to the net_device structure
+ *
+ * Return: Always returns NETDEV_TX_OK.
+ *
+ * This is dummy xmit function for endpoint as all the data path is assumed to
+ * be connected by TEMAC1 as per linux view
+ */
+static int tsn_ep_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ep_netdev_ops = {
+ .ndo_do_ioctl = tsn_ep_ioctl,
+ .ndo_start_xmit = tsn_ep_xmit,
+};
+
+static const struct of_device_id tsn_ep_of_match[] = {
+ { .compatible = "xlnx,tsn-ep"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tsn_ep_of_match);
+
+/**
+ * tsn_ep_probe - TSN ep pointer probe function.
+ * @pdev: Pointer to platform device structure.
+ *
+ * Return: 0, on success
+ * Non-zero error value on failure.
+ *
+ * This is the probe routine for TSN endpoint driver.
+ */
+static int tsn_ep_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct axienet_local *lp;
+ struct net_device *ndev;
+ struct resource *ethres;
+ u16 num_tc = 0;
+
+ ndev = alloc_netdev(0, "ep", NET_NAME_UNKNOWN, ether_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &ep_netdev_ops;
+
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->dev = &pdev->dev;
+ lp->options = XAE_OPTION_DEFAULTS;
+
+ ret = of_property_read_u16(
+ pdev->dev.of_node, "xlnx,num-tc", &num_tc);
+ if (ret || (num_tc != 2 && num_tc != 3))
+ lp->num_tc = XAE_MAX_TSN_TC;
+ else
+ lp->num_tc = num_tc;
+ /* Map device registers */
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+ if (IS_ERR(lp->regs)) {
+ ret = PTR_ERR(lp->regs);
+ goto free_netdev;
+ }
+
+ ret = register_netdev(lp->ndev);
+ if (ret)
+ dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
+
+ return ret;
+
+free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int tsn_ep_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ unregister_netdev(ndev);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver tsn_ep_driver = {
+ .probe = tsn_ep_probe,
+ .remove = tsn_ep_remove,
+ .driver = {
+ .name = "tsn_ep_axienet",
+ .of_match_table = tsn_ep_of_match,
+ },
+};
+
+module_platform_driver(tsn_ep_driver);
+
+MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
+MODULE_AUTHOR("Xilinx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c
new file mode 100644
index 000000000000..f48c2e0cb69e
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c
@@ -0,0 +1,223 @@
+/*
+ * Xilinx FPGA Xilinx TSN QBU/QBR - Frame Preemption module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Priyadarshini Babu <priyadar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_preemption.h"
+
+/**
+ * axienet_preemption - Configure Frame Preemption
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u8 preemp;
+
+ if (copy_from_user(&preemp, useraddr, sizeof(preemp)))
+ return -EFAULT;
+
+ axienet_iow(lp, PREEMPTION_ENABLE_REG, preemp & PREEMPTION_ENABLE);
+ return 0;
+}
+
+/**
+ * axienet_preemption_ctrl - Configure Frame Preemption Control register
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_ctrl(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct preempt_ctrl_sts data;
+ u32 value;
+
+ if (copy_from_user(&data, useraddr, sizeof(struct preempt_ctrl_sts)))
+ return -EFAULT;
+ value = axienet_ior(lp, PREEMPTION_CTRL_STS_REG);
+
+ value &= ~(VERIFY_TIMER_VALUE_MASK << VERIFY_TIMER_VALUE_SHIFT);
+ value |= (data.verify_timer_value << VERIFY_TIMER_VALUE_SHIFT);
+ value &= ~(ADDITIONAL_FRAG_SIZE_MASK << ADDITIONAL_FRAG_SIZE_SHIFT);
+ value |= (data.additional_frag_size << ADDITIONAL_FRAG_SIZE_SHIFT);
+ value &= ~(DISABLE_PREEMPTION_VERIFY);
+ value |= (data.disable_preemp_verify);
+
+ axienet_iow(lp, PREEMPTION_CTRL_STS_REG, value);
+ return 0;
+}
+
+/**
+ * axienet_preemption_sts - Get Frame Preemption Status
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing Frame Preemption status
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_sts(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct preempt_ctrl_sts status;
+ u32 value;
+
+ value = axienet_ior(lp, PREEMPTION_CTRL_STS_REG);
+
+ status.tx_preemp_sts = (value & TX_PREEMPTION_STS) ? 1 : 0;
+ status.mac_tx_verify_sts = (value >> MAC_MERGE_TX_VERIFY_STS_SHIFT) &
+ MAC_MERGE_TX_VERIFY_STS_MASK;
+ status.verify_timer_value = (value >> VERIFY_TIMER_VALUE_SHIFT) &
+ VERIFY_TIMER_VALUE_MASK;
+ status.additional_frag_size = (value >> ADDITIONAL_FRAG_SIZE_SHIFT) &
+ ADDITIONAL_FRAG_SIZE_MASK;
+ status.disable_preemp_verify = value & DISABLE_PREEMPTION_VERIFY;
+
+ if (copy_to_user(useraddr, &status, sizeof(struct preempt_ctrl_sts)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * statistic_cnts - Read statistics counter registers
+ * @ndev: Pointer to the net_device structure
+ * @ptr: Buffer addr to fill the counter values
+ * @count: read #count number of registers
+ * @addr_off: Register address to be read
+ */
+static void statistic_cnts(struct net_device *ndev, void *ptr,
+ unsigned int count, unsigned int addr_off)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ int *buf = (int *)ptr;
+ int i = 0;
+
+ for (i = 0; i < count; i++) {
+ buf[i] = axienet_ior(lp, addr_off);
+ addr_off += 4;
+ }
+}
+
+/**
+ * axienet_preemption_cnt - Get Frame Preemption Statistics counter
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing counters value
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_cnt(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct emac_pmac_stats stats;
+
+ statistic_cnts(ndev, &stats.emac,
+ sizeof(struct statistics_counters) / 4,
+ RX_BYTES_EMAC_REG);
+
+ stats.preemp_en = axienet_ior(lp, PREEMPTION_ENABLE_REG);
+ if (stats.preemp_en) {
+ statistic_cnts(ndev, &stats.pmac.sts,
+ sizeof(struct statistics_counters) / 4,
+ RX_BYTES_PMAC_REG);
+ statistic_cnts(ndev, &stats.pmac.merge,
+ sizeof(struct mac_merge_counters) / 4,
+ TX_HOLD_REG);
+ }
+
+ if (copy_to_user(useraddr, &stats, sizeof(struct emac_pmac_stats)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * axienet_qbu_user_override - Configure QBU user override register
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_qbu_user_override(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct qbu_user data;
+ u32 value;
+
+ if (copy_from_user(&data, useraddr, sizeof(struct qbu_user)))
+ return -EFAULT;
+
+ value = axienet_ior(lp, QBU_USER_OVERRIDE_REG);
+
+ if (data.set & QBU_WINDOW) {
+ if (data.user.hold_rel_window) {
+ value |= USER_HOLD_REL_ENABLE_VALUE;
+ value |= HOLD_REL_WINDOW_OVERRIDE;
+ } else {
+ value &= ~(USER_HOLD_REL_ENABLE_VALUE);
+ value &= ~(HOLD_REL_WINDOW_OVERRIDE);
+ }
+ }
+ if (data.set & QBU_GUARD_BAND) {
+ if (data.user.guard_band)
+ value |= GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE;
+ else
+ value &= ~(GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE);
+ }
+ if (data.set & QBU_HOLD_TIME) {
+ if (data.user.hold_time_override) {
+ value |= HOLD_TIME_OVERRIDE;
+ value &= ~(USER_HOLD_TIME_MASK << USER_HOLD_TIME_SHIFT);
+ value |= data.user.user_hold_time <<
+ USER_HOLD_TIME_SHIFT;
+ } else {
+ value &= ~(HOLD_TIME_OVERRIDE);
+ value &= ~(USER_HOLD_TIME_MASK << USER_HOLD_TIME_SHIFT);
+ }
+ }
+ if (data.set & QBU_REL_TIME) {
+ if (data.user.rel_time_override) {
+ value |= REL_TIME_OVERRIDE;
+ value &= ~(USER_REL_TIME_MASK << USER_REL_TIME_SHIFT);
+ value |= data.user.user_rel_time << USER_REL_TIME_SHIFT;
+ } else {
+ value &= ~(REL_TIME_OVERRIDE);
+ value &= ~(USER_REL_TIME_MASK << USER_REL_TIME_SHIFT);
+ }
+ }
+
+ axienet_iow(lp, QBU_USER_OVERRIDE_REG, value);
+ return 0;
+}
+
+/**
+ * axienet_qbu_sts - Get QBU Core status
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing QBU core status value
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_qbu_sts(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct qbu_core_status status;
+ u32 value = 0;
+
+ value = axienet_ior(lp, QBU_CORE_STS_REG);
+ status.hold_time = (value >> HOLD_TIME_STS_SHIFT) & HOLD_TIME_STS_MASK;
+ status.rel_time = (value >> REL_TIME_STS_SHIFT) & REL_TIME_STS_MASK;
+ status.hold_rel_en = (value & HOLD_REL_ENABLE_STS) ? 1 : 0;
+ status.pmac_hold_req = value & PMAC_HOLD_REQ_STS;
+
+ if (copy_to_user(useraddr, &status, sizeof(struct qbu_core_status)))
+ return -EFAULT;
+ return 0;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h
new file mode 100644
index 000000000000..d8655513664d
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h
@@ -0,0 +1,159 @@
+/**
+ * Xilinx TSN QBU/QBR - Frame Preemption header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Priyadarshini Babu <priyadar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_PREEMPTION_H
+#define XILINX_TSN_PREEMPTION_H
+
+#define PREEMPTION_ENABLE_REG 0x00000440
+#define PREEMPTION_CTRL_STS_REG 0x00000444
+#define QBU_USER_OVERRIDE_REG 0x00000448
+#define QBU_CORE_STS_REG 0x0000044c
+#define TX_HOLD_REG 0x00000910
+#define RX_BYTES_EMAC_REG 0x00000200
+#define RX_BYTES_PMAC_REG 0x00000800
+
+#define PREEMPTION_ENABLE BIT(0)
+
+#define TX_PREEMPTION_STS BIT(31)
+#define MAC_MERGE_TX_VERIFY_STS_MASK 0x7
+#define MAC_MERGE_TX_VERIFY_STS_SHIFT 24
+#define VERIFY_TIMER_VALUE_MASK 0x7F
+#define VERIFY_TIMER_VALUE_SHIFT 8
+#define ADDITIONAL_FRAG_SIZE_MASK 0x3
+#define ADDITIONAL_FRAG_SIZE_SHIFT 4
+#define DISABLE_PREEMPTION_VERIFY BIT(0)
+
+#define USER_HOLD_REL_ENABLE_VALUE BIT(31)
+#define USER_HOLD_TIME_MASK 0x1FF
+#define USER_HOLD_TIME_SHIFT 16
+#define USER_REL_TIME_MASK 0x3F
+#define USER_REL_TIME_SHIFT 8
+#define GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE BIT(3)
+#define HOLD_REL_WINDOW_OVERRIDE BIT(2)
+#define HOLD_TIME_OVERRIDE BIT(1)
+#define REL_TIME_OVERRIDE BIT(0)
+
+#define HOLD_REL_ENABLE_STS BIT(31)
+#define HOLD_TIME_STS_MASK 0x1FF
+#define HOLD_TIME_STS_SHIFT 16
+#define REL_TIME_STS_MASK 0x3F
+#define REL_TIME_STS_SHIFT 8
+#define PMAC_HOLD_REQ_STS BIT(0)
+
+struct preempt_ctrl_sts {
+ u8 tx_preemp_sts:1;
+ u8 mac_tx_verify_sts:3;
+ u8 verify_timer_value:7;
+ u8 additional_frag_size:2;
+ u8 disable_preemp_verify:1;
+} __packed;
+
+struct qbu_user_override {
+ u8 enable_value:1;
+ u16 user_hold_time:9;
+ u8 user_rel_time:6;
+ u8 guard_band:1;
+ u8 hold_rel_window:1;
+ u8 hold_time_override:1;
+ u8 rel_time_override:1;
+} __packed;
+
+struct qbu_user {
+ struct qbu_user_override user;
+ u8 set;
+};
+
+#define QBU_WINDOW BIT(0)
+#define QBU_GUARD_BAND BIT(1)
+#define QBU_HOLD_TIME BIT(2)
+#define QBU_REL_TIME BIT(3)
+
+struct qbu_core_status {
+ u16 hold_time;
+ u8 rel_time;
+ u8 hold_rel_en:1;
+ u8 pmac_hold_req:1;
+} __packed;
+
+struct cnt_64 {
+ unsigned int msb;
+ unsigned int lsb;
+};
+
+union static_cntr {
+ u64 cnt;
+ struct cnt_64 word;
+};
+
+struct mac_merge_counters {
+ union static_cntr tx_hold_cnt;
+ union static_cntr tx_frag_cnt;
+ union static_cntr rx_assembly_ok_cnt;
+ union static_cntr rx_assembly_err_cnt;
+ union static_cntr rx_smd_err_cnt;
+ union static_cntr rx_frag_cnt;
+};
+
+struct statistics_counters {
+ union static_cntr rx_bytes_cnt;
+ union static_cntr tx_bytes_cnt;
+ union static_cntr undersize_frames_cnt;
+ union static_cntr frag_frames_cnt;
+ union static_cntr rx_64_bytes_frames_cnt;
+ union static_cntr rx_65_127_bytes_frames_cnt;
+ union static_cntr rx_128_255_bytes_frames_cnt;
+ union static_cntr rx_256_511_bytes_frames_cnt;
+ union static_cntr rx_512_1023_bytes_frames_cnt;
+ union static_cntr rx_1024_max_frames_cnt;
+ union static_cntr rx_oversize_frames_cnt;
+ union static_cntr tx_64_bytes_frames_cnt;
+ union static_cntr tx_65_127_bytes_frames_cnt;
+ union static_cntr tx_128_255_bytes_frames_cnt;
+ union static_cntr tx_256_511_bytes_frames_cnt;
+ union static_cntr tx_512_1023_bytes_frames_cnt;
+ union static_cntr tx_1024_max_frames_cnt;
+ union static_cntr tx_oversize_frames_cnt;
+ union static_cntr rx_good_frames_cnt;
+ union static_cntr rx_fcs_err_cnt;
+ union static_cntr rx_good_broadcast_frames_cnt;
+ union static_cntr rx_good_multicast_frames_cnt;
+ union static_cntr rx_good_control_frames_cnt;
+ union static_cntr rx_out_of_range_err_cnt;
+ union static_cntr rx_good_vlan_frames_cnt;
+ union static_cntr rx_good_pause_frames_cnt;
+ union static_cntr rx_bad_opcode_frames_cnt;
+ union static_cntr tx_good_frames_cnt;
+ union static_cntr tx_good_broadcast_frames_cnt;
+ union static_cntr tx_good_multicast_frames_cnt;
+ union static_cntr tx_underrun_err_cnt;
+ union static_cntr tx_good_control_frames_cnt;
+ union static_cntr tx_good_vlan_frames_cnt;
+ union static_cntr tx_good_pause_frames_cnt;
+};
+
+struct pmac_counters {
+ struct statistics_counters sts;
+ struct mac_merge_counters merge;
+};
+
+struct emac_pmac_stats {
+ u8 preemp_en;
+ struct statistics_counters emac;
+ struct pmac_counters pmac;
+};
+
+#endif /* XILINX_TSN_PREEMPTION_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h
new file mode 100644
index 000000000000..d81b0acf12f0
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h
@@ -0,0 +1,88 @@
+/*
+ * Xilinx TSN PTP header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TSN_PTP_H_
+#define _TSN_PTP_H_
+
+#define PTP_HW_TSTAMP_SIZE 8 /* 64 bit timestamp */
+#define PTP_RX_HWBUF_SIZE 256
+#define PTP_RX_FRAME_SIZE 252
+#define PTP_HW_TSTAMP_OFFSET (PTP_RX_HWBUF_SIZE - PTP_HW_TSTAMP_SIZE)
+
+#define PTP_MSG_TYPE_MASK BIT(3)
+#define PTP_TYPE_SYNC 0x0
+#define PTP_TYPE_FOLLOW_UP 0x8
+#define PTP_TYPE_PDELAYREQ 0x2
+#define PTP_TYPE_PDELAYRESP 0x3
+#define PTP_TYPE_PDELAYRESP_FOLLOW_UP 0xA
+#define PTP_TYPE_ANNOUNCE 0xB
+#define PTP_TYPE_SIGNALING 0xC
+
+#define PTP_TX_CONTROL_OFFSET 0x00012000 /**< Tx PTP Control Reg */
+#define PTP_RX_CONTROL_OFFSET 0x00012004 /**< Rx PTP Control Reg */
+#define RX_FILTER_CONTROL 0x00012008 /**< Rx Filter Ctrl Reg */
+
+#define PTP_RX_BASE_OFFSET 0x00010000
+#define PTP_RX_CONTROL_OFFSET 0x00012004 /**< Rx PTP Control Reg */
+#define PTP_RX_PACKET_FIELD_MASK 0x00000F00
+#define PTP_RX_PACKET_CLEAR 0x00000001
+
+#define PTP_TX_BUFFER_OFFSET(index) (0x00011000 + (index) * 0x100)
+
+#define PTP_TX_CMD_FIELD_LEN 8
+#define PTP_TX_CMD_1STEP_SHIFT BIT(16)
+#define PTP_TX_BUFFER_CMD2_FIELD 0x4
+
+#define PTP_TX_SYNC_OFFSET 0x00011000
+#define PTP_TX_FOLLOW_UP_OFFSET 0x00011100
+#define PTP_TX_PDELAYREQ_OFFSET 0x00011200
+#define PTP_TX_PDELAYRESP_OFFSET 0x00011300
+#define PTP_TX_PDELAYRESP_FOLLOW_UP_OFFSET 0x00011400
+#define PTP_TX_ANNOUNCE_OFFSET 0x00011500
+#define PTP_TX_SIGNALING_OFFSET 0x00011600
+#define PTP_TX_GENERIC_OFFSET 0x00011700
+#define PTP_TX_SEND_SYNC_FRAME_MASK 0x00000001
+#define PTP_TX_SEND_FOLLOWUP_FRAME_MASK 0x00000002
+#define PTP_TX_SEND_PDELAYREQ_FRAME_MASK 0x00000004
+#define PTP_TX_SEND_PDELAYRESP_FRAME_MASK 0x00000008
+#define PTP_TX_SEND_PDELAYRESPFOLLOWUP_FRAME_MASK 0x00000010
+#define PTP_TX_SEND_ANNOUNCE_FRAME_MASK 0x00000020
+#define PTP_TX_SEND_FRAME6_BIT_MASK 0x00000040
+#define PTP_TX_SEND_FRAME7_BIT_MASK 0x00000080
+#define PTP_TX_FRAME_WAITING_MASK 0x0000ff00
+#define PTP_TX_FRAME_WAITING_SHIFT 8
+#define PTP_TX_WAIT_SYNC_FRAME_MASK 0x00000100
+#define PTP_TX_WAIT_FOLLOWUP_FRAME_MASK 0x00000200
+#define PTP_TX_WAIT_PDELAYREQ_FRAME_MASK 0x00000400
+#define PTP_TX_WAIT_PDELAYRESP_FRAME_MASK 0x00000800
+#define PTP_TX_WAIT_PDELAYRESPFOLLOWUP_FRAME_MASK 0x00001000
+#define PTP_TX_WAIT_ANNOUNCE_FRAME_MASK 0x00002000
+#define PTP_TX_WAIT_FRAME6_BIT_MASK 0x00004000
+#define PTP_TX_WAIT_FRAME7_BIT_MASK 0x00008000
+#define PTP_TX_WAIT_ALL_FRAMES_MASK 0x0000FF00
+#define PTP_TX_PACKET_FIELD_MASK 0x00070000
+#define PTP_TX_PACKET_FIELD_SHIFT 16
+/* 1-step Correction Field offset 802.1 ASrev */
+#define PTP_CRCT_FIELD_OFFSET 22
+/* 1-step Time Of Day offset 1588-2008 */
+#define PTP_TOD_FIELD_OFFSET 48
+
+int axienet_ptp_xmit(struct sk_buff *skb, struct net_device *ndev);
+irqreturn_t axienet_ptp_rx_irq(int irq, void *_ndev);
+irqreturn_t axienet_ptp_tx_irq(int irq, void *_ndev);
+
+#endif
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c
new file mode 100644
index 000000000000..05c906019694
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c
@@ -0,0 +1,325 @@
+/*
+ * Xilinx FPGA Xilinx TSN PTP protocol clock Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include "xilinx_tsn_timer.h"
+
+struct xlnx_ptp_timer {
+ struct device *dev;
+ void __iomem *baseaddr;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ spinlock_t reg_lock; /* ptp timer lock */
+ int irq;
+ int pps_enable;
+ int countpulse;
+};
+
+static void xlnx_tod_read(struct xlnx_ptp_timer *timer, struct timespec64 *ts)
+{
+ u32 sec, nsec;
+
+ nsec = in_be32(timer->baseaddr + XTIMER1588_CURRENT_RTC_NS);
+ sec = in_be32(timer->baseaddr + XTIMER1588_CURRENT_RTC_SEC_L);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void xlnx_rtc_offset_write(struct xlnx_ptp_timer *timer,
+ const struct timespec64 *ts)
+{
+ pr_debug("%s: sec: %ld nsec: %ld\n", __func__, ts->tv_sec, ts->tv_nsec);
+
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_H), 0);
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_L),
+ (ts->tv_sec));
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_NS), ts->tv_nsec);
+}
+
+static void xlnx_rtc_offset_read(struct xlnx_ptp_timer *timer,
+ struct timespec64 *ts)
+{
+ ts->tv_sec = in_be32(timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_L);
+ ts->tv_nsec = in_be32(timer->baseaddr + XTIMER1588_RTC_OFFSET_NS);
+}
+
+/* PTP clock operations
+ */
+static int xlnx_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+
+ int neg_adj = 0;
+ u64 freq;
+ u32 diff, incval;
+
+ /* This number should be replaced by a call to get the frequency
+ * from the device-tree. Currently assumes 125MHz
+ */
+ incval = 0x800000;
+ /* for 156.25 MHZ Ref clk the value is incval = 0x800000; */
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ freq = incval;
+ freq *= ppb;
+ diff = div_u64(freq, 1000000000ULL);
+
+ pr_debug("%s: adj: %d ppb: %d\n", __func__, diff, ppb);
+
+ incval = neg_adj ? (incval - diff) : (incval + diff);
+ out_be32((timer->baseaddr + XTIMER1588_RTC_INCREMENT), incval);
+ return 0;
+}
+
+static int xlnx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ struct timespec64 now, then = ns_to_timespec64(delta);
+
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ xlnx_rtc_offset_read(timer, &now);
+
+ now = timespec64_add(now, then);
+
+ xlnx_rtc_offset_write(timer, (const struct timespec64 *)&now);
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+
+ return 0;
+}
+
+static int xlnx_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ xlnx_tod_read(timer, ts);
+
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+ return 0;
+}
+
+/**
+ * xlnx_ptp_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec64 containing the new time for the cycle counter
+ *
+ * Return: 0 in all cases.
+ *
+ * The seconds register is written first, then the nanosecond
+ * The hardware loads the entire new value when a nanosecond register
+ * is written
+ **/
+static int xlnx_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ struct timespec64 delta, tod;
+ struct timespec64 offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ /* First zero the offset */
+ offset.tv_sec = 0;
+ offset.tv_nsec = 0;
+ xlnx_rtc_offset_write(timer, &offset);
+
+ /* Get the current timer value */
+ xlnx_tod_read(timer, &tod);
+
+ /* Subtract the current reported time from our desired time */
+ delta = timespec64_sub(*ts, tod);
+
+ /* Don't write a negative offset */
+ if (delta.tv_sec <= 0) {
+ delta.tv_sec = 0;
+ if (delta.tv_nsec < 0)
+ delta.tv_nsec = 0;
+ }
+
+ xlnx_rtc_offset_write(timer, &delta);
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+ return 0;
+}
+
+static int xlnx_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ timer->pps_enable = 1;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info xlnx_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "Xilinx Timer",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 1,
+ .adjfreq = xlnx_ptp_adjfreq,
+ .adjtime = xlnx_ptp_adjtime,
+ .gettime64 = xlnx_ptp_gettime,
+ .settime64 = xlnx_ptp_settime,
+ .enable = xlnx_ptp_enable,
+};
+
+/* module operations */
+
+/**
+ * xlnx_ptp_timer_isr - Interrupt Service Routine
+ * @irq: IRQ number
+ * @priv: pointer to the timer structure
+ *
+ * Returns: IRQ_HANDLED for all cases
+ *
+ * Handles the timer interrupt. The timer interrupt fires 128 times per
+ * secound. When our count reaches 128 emit a PTP_CLOCK_PPS event
+ */
+static irqreturn_t xlnx_ptp_timer_isr(int irq, void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+ struct ptp_clock_event event;
+
+ event.type = PTP_CLOCK_PPS;
+ ++timer->countpulse;
+ if (timer->countpulse >= PULSESIN1PPS) {
+ timer->countpulse = 0;
+ if ((timer->ptp_clock) && (timer->pps_enable))
+ ptp_clock_event(timer->ptp_clock, &event);
+ }
+ out_be32((timer->baseaddr + XTIMER1588_INTERRUPT),
+ (1 << XTIMER1588_INT_SHIFT));
+
+ return IRQ_HANDLED;
+}
+
+int axienet_ptp_timer_remove(void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+
+ free_irq(timer->irq, (void *)timer);
+
+ axienet_phc_index = -1;
+ if (timer->ptp_clock) {
+ ptp_clock_unregister(timer->ptp_clock);
+ timer->ptp_clock = NULL;
+ }
+ kfree(timer);
+ return 0;
+}
+
+int axienet_get_phc_index(void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+
+ if (timer->ptp_clock)
+ return ptp_clock_index(timer->ptp_clock);
+ else
+ return -1;
+}
+
+void *axienet_ptp_timer_probe(void __iomem *base, struct platform_device *pdev)
+{
+ struct xlnx_ptp_timer *timer;
+ struct timespec64 ts;
+ int err = 0;
+
+ timer = kzalloc(sizeof(*timer), GFP_KERNEL);
+ if (!timer)
+ return NULL;
+
+ timer->baseaddr = base;
+
+ timer->irq = platform_get_irq_byname(pdev, "interrupt_ptp_timer");
+
+ if (timer->irq < 0) {
+ timer->irq = platform_get_irq_byname(pdev, "rtc_irq");
+ if (timer->irq > 0) {
+ pr_err("ptp timer interrupt name 'rtc_irq' is"
+ "deprecated\n");
+ } else {
+ pr_err("ptp timer interrupt not found\n");
+ kfree(timer);
+ return NULL;
+ }
+ }
+ spin_lock_init(&timer->reg_lock);
+
+ timer->ptp_clock_info = xlnx_ptp_clock_info;
+
+ timer->ptp_clock = ptp_clock_register(&timer->ptp_clock_info,
+ &pdev->dev);
+
+ if (IS_ERR(timer->ptp_clock)) {
+ err = PTR_ERR(timer->ptp_clock);
+ pr_debug("Failed to register ptp clock\n");
+ goto out;
+ }
+
+ axienet_phc_index = ptp_clock_index(timer->ptp_clock);
+
+ ts = ktime_to_timespec64(ktime_get_real());
+
+ xlnx_ptp_settime(&timer->ptp_clock_info, &ts);
+
+ /* Enable interrupts */
+ err = request_irq(timer->irq,
+ xlnx_ptp_timer_isr,
+ 0,
+ "ptp_rtc",
+ (void *)timer);
+ if (err)
+ goto err_irq;
+
+ return timer;
+
+err_irq:
+ ptp_clock_unregister(timer->ptp_clock);
+out:
+ timer->ptp_clock = NULL;
+ return NULL;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c
new file mode 100644
index 000000000000..831b4b7b5085
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c
@@ -0,0 +1,369 @@
+/*
+ * Xilinx FPGA Xilinx TSN PTP transfer protocol module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_ptp.h"
+#include "xilinx_tsn_timer.h"
+#include <linux/ptp_classify.h>
+
+#define PTP_ONE_SECOND 1000000000 /**< Value in ns */
+
+#define msg_type_string(type) \
+ ((type) == PTP_TYPE_SYNC) ? "SYNC" : \
+ ((type) == PTP_TYPE_FOLLOW_UP) ? "FOLLOW_UP" : \
+ ((type) == PTP_TYPE_PDELAYREQ) ? "PDELAY_REQ" : \
+ ((type) == PTP_TYPE_PDELAYRESP) ? "PDELAY_RESP" : \
+ ((type) == PTP_TYPE_PDELAYRESP_FOLLOW_UP) ? "PDELAY_RESP_FOLLOW_UP" : \
+ ((type) == PTP_TYPE_ANNOUNCE) ? "ANNOUNCE" : \
+ "UNKNOWN"
+
+/**
+ * memcpy_fromio_32 - copy ptp buffer from HW
+ * @lp: Pointer to axienet local structure
+ * @offset: Offset in the PTP buffer
+ * @data: Destination buffer
+ * @len: Len to copy
+ *
+ * This functions copies the data from PTP buffer to destination data buffer
+ */
+static void memcpy_fromio_32(struct axienet_local *lp,
+ unsigned long offset, u8 *data, size_t len)
+{
+ while (len >= 4) {
+ *(u32 *)data = axienet_ior(lp, offset);
+ len -= 4;
+ offset += 4;
+ data += 4;
+ }
+
+ if (len > 0) {
+ u32 leftover = axienet_ior(lp, offset);
+ u8 *src = (u8 *)&leftover;
+
+ while (len) {
+ *data++ = *src++;
+ len--;
+ }
+ }
+}
+
+/**
+ * memcpy_toio_32 - copy ptp buffer from HW
+ * @lp: Pointer to axienet local structure
+ * @offset: Offset in the PTP buffer
+ * @data: Source data
+ * @len: Len to copy
+ *
+ * This functions copies the source data to desination ptp buffer
+ */
+static void memcpy_toio_32(struct axienet_local *lp,
+ unsigned long offset, u8 *data, size_t len)
+{
+ while (len >= 4) {
+ axienet_iow(lp, offset, *(u32 *)data);
+ len -= 4;
+ offset += 4;
+ data += 4;
+ }
+
+ if (len > 0) {
+ u32 leftover = 0;
+ u8 *dest = (u8 *)&leftover;
+
+ while (len) {
+ *dest++ = *data++;
+ len--;
+ }
+ axienet_iow(lp, offset, leftover);
+ }
+}
+
+static int is_sync(struct sk_buff *skb)
+{
+ u8 *msg_type;
+
+ msg_type = (u8 *)skb->data + ETH_HLEN;
+
+ return (*msg_type & 0xf) == PTP_TYPE_SYNC;
+}
+
+/**
+ * axienet_ptp_xmit - xmit skb using PTP HW
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is called to transmit a PTP skb. The function uses
+ * the free PTP TX buffer entry and sends the frame
+ */
+int axienet_ptp_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ u8 msg_type;
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned long flags;
+ u8 tx_frame_waiting;
+ u8 free_index;
+ u32 cmd1_field = 0;
+ u32 cmd2_field = 0;
+
+ msg_type = *(u8 *)(skb->data + ETH_HLEN);
+
+ pr_debug(" -->XMIT: protocol: %x message: %s frame_len: %d\n",
+ skb->protocol,
+ msg_type_string(msg_type & 0xf), skb->len);
+
+ tx_frame_waiting = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
+ PTP_TX_FRAME_WAITING_MASK) >>
+ PTP_TX_FRAME_WAITING_SHIFT;
+
+ /* we reached last frame */
+ if (tx_frame_waiting & (1 << 7)) {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ pr_debug("tx_frame_waiting: %d\n", tx_frame_waiting);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* go to next available slot */
+ free_index = fls(tx_frame_waiting);
+
+ /* write the len */
+ if (lp->ptp_ts_type == HWTSTAMP_TX_ONESTEP_SYNC &&
+ is_sync(skb)) {
+ /* enable 1STEP SYNC */
+ cmd1_field |= PTP_TX_CMD_1STEP_SHIFT;
+ cmd2_field |= PTP_TOD_FIELD_OFFSET;
+ }
+
+ cmd1_field |= skb->len;
+
+ axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index), cmd1_field);
+ axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index) +
+ PTP_TX_BUFFER_CMD2_FIELD, cmd2_field);
+ memcpy_toio_32(lp,
+ (PTP_TX_BUFFER_OFFSET(free_index) +
+ PTP_TX_CMD_FIELD_LEN),
+ skb->data, skb->len);
+
+ /* send the frame */
+ axienet_iow(lp, PTP_TX_CONTROL_OFFSET, (1 << free_index));
+
+ if (lp->ptp_ts_type != HWTSTAMP_TX_ONESTEP_SYNC ||
+ (!is_sync(skb))) {
+ spin_lock_irqsave(&lp->ptp_tx_lock, flags);
+ skb->cb[0] = free_index;
+ skb_queue_tail(&lp->ptp_txq, skb);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_tx_timestamp(skb);
+ spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
+ }
+ return NETDEV_TX_OK;
+}
+
+/**
+ * axienet_set_timestamp - timestamp skb with HW timestamp
+ * @lp: Pointer to axienet local structure
+ * @hwtstamps: Pointer to skb timestamp structure
+ * @offset: offset of the timestamp in the PTP buffer
+ *
+ * Return: None.
+ *
+ */
+static void axienet_set_timestamp(struct axienet_local *lp,
+ struct skb_shared_hwtstamps *hwtstamps,
+ unsigned int offset)
+{
+ u32 captured_ns;
+ u32 captured_sec;
+
+ captured_ns = axienet_ior(lp, offset + 4);
+ captured_sec = axienet_ior(lp, offset);
+
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(captured_sec,
+ captured_ns);
+}
+
+/**
+ * axienet_ptp_recv - receive ptp buffer in skb from HW
+ * @ndev: Pointer to net_device structure.
+ *
+ * This function is called from the ptp rx isr. It allocates skb, and
+ * copies the ptp rx buffer data to it and calls netif_rx for further
+ * processing.
+ *
+ */
+static void axienet_ptp_recv(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned long ptp_frame_base_addr = 0;
+ struct sk_buff *skb;
+ u16 msg_len;
+ u8 msg_type;
+ u32 bytes = 0;
+ u32 packets = 0;
+
+ pr_debug("%s:\n ", __func__);
+
+ while (((lp->ptp_rx_hw_pointer & 0xf) !=
+ (lp->ptp_rx_sw_pointer & 0xf))) {
+ skb = netdev_alloc_skb(ndev, PTP_RX_FRAME_SIZE);
+
+ lp->ptp_rx_sw_pointer += 1;
+
+ ptp_frame_base_addr = PTP_RX_BASE_OFFSET +
+ ((lp->ptp_rx_sw_pointer & 0xf) *
+ PTP_RX_HWBUF_SIZE);
+
+ memset(skb->data, 0x0, PTP_RX_FRAME_SIZE);
+
+ memcpy_fromio_32(lp, ptp_frame_base_addr, skb->data,
+ PTP_RX_FRAME_SIZE);
+
+ msg_type = *(u8 *)(skb->data + ETH_HLEN) & 0xf;
+ msg_len = *(u16 *)(skb->data + ETH_HLEN + 2);
+
+ skb_put(skb, ntohs(msg_len) + ETH_HLEN);
+
+ bytes += skb->len;
+ packets++;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ pr_debug(" -->RECV: protocol: %x message: %s frame_len: %d\n",
+ skb->protocol, msg_type_string(msg_type & 0xf),
+ skb->len);
+ /* timestamp only event messages */
+ if (!(msg_type & PTP_MSG_TYPE_MASK)) {
+ axienet_set_timestamp(lp, skb_hwtstamps(skb),
+ (ptp_frame_base_addr +
+ PTP_HW_TSTAMP_OFFSET));
+ }
+
+ netif_rx(skb);
+ }
+ ndev->stats.rx_packets += packets;
+ ndev->stats.rx_bytes += bytes;
+}
+
+/**
+ * axienet_ptp_rx_irq - PTP RX ISR handler
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED for all cases.
+ */
+irqreturn_t axienet_ptp_rx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ pr_debug("%s: received\n ", __func__);
+ lp->ptp_rx_hw_pointer = (axienet_ior(lp, PTP_RX_CONTROL_OFFSET)
+ & PTP_RX_PACKET_FIELD_MASK) >> 8;
+
+ axienet_ptp_recv(ndev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_tx_tstamp - timestamp skb on trasmit path
+ * @work: Pointer to work_struct structure
+ *
+ * This adds TX timestamp to skb
+ */
+void axienet_tx_tstamp(struct work_struct *work)
+{
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ tx_tstamp_work);
+ struct net_device *ndev = lp->ndev;
+ struct skb_shared_hwtstamps hwtstamps;
+ struct sk_buff *skb;
+ unsigned long ts_reg_offset;
+ unsigned long flags;
+ u8 tx_packet;
+ u8 index;
+ u32 bytes = 0;
+ u32 packets = 0;
+
+ memset(&hwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+
+ spin_lock_irqsave(&lp->ptp_tx_lock, flags);
+
+ tx_packet = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
+ PTP_TX_PACKET_FIELD_MASK) >>
+ PTP_TX_PACKET_FIELD_SHIFT;
+
+ while ((skb = __skb_dequeue(&lp->ptp_txq)) != NULL) {
+ index = skb->cb[0];
+
+ /* dequeued packet yet to be xmited? */
+ if (index > tx_packet) {
+ /* enqueue it back and break */
+ skb_queue_tail(&lp->ptp_txq, skb);
+ break;
+ }
+ /* time stamp reg offset */
+ ts_reg_offset = PTP_TX_BUFFER_OFFSET(index) +
+ PTP_HW_TSTAMP_OFFSET;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ axienet_set_timestamp(lp, &hwtstamps, ts_reg_offset);
+ skb_tstamp_tx(skb, &hwtstamps);
+ }
+
+ bytes += skb->len;
+ packets++;
+ dev_kfree_skb_any(skb);
+ }
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += bytes;
+
+ spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
+}
+
+/**
+ * axienet_ptp_tx_irq - PTP TX irq handler
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ */
+irqreturn_t axienet_ptp_tx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ pr_debug("%s: got tx interrupt\n", __func__);
+
+ /* read ctrl register to clear the interrupt */
+ axienet_ior(lp, PTP_TX_CONTROL_OFFSET);
+
+ schedule_work(&lp->tx_tstamp_work);
+
+ netif_wake_queue(ndev);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c b/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c
new file mode 100644
index 000000000000..20efa6c4d365
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c
@@ -0,0 +1,151 @@
+/*
+ * Xilinx FPGA Xilinx TSN QCI Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_tsn_switch.h"
+
+#define SMC_MODE_SHIFT 28
+#define SMC_CBR_MASK 0x00FFFFFF
+#define SMC_EBR_MASK 0x00FFFFFF
+#define IN_PORTID_MASK 0x3
+#define IN_PORT_SHIFT 14
+#define MAX_FR_SIZE_MASK 0x00000FFF
+
+#define GATE_ID_SHIFT 24
+#define METER_ID_SHIFT 8
+#define EN_METER_SHIFT 6
+#define ALLOW_STREM_SHIFT 5
+#define EN_PSFP_SHIFT 4
+#define WR_OP_TYPE_MASK 0x3
+#define WR_OP_TYPE_SHIFT 2
+#define OP_TYPE_SHIFT 1
+#define PSFP_EN_CONTROL_MASK 0x1
+
+/**
+ * psfp_control - Configure thr control for PSFP
+ * @data: Value to be programmed
+ */
+void psfp_control(struct psfp_config data)
+{
+ u32 mask;
+ u32 timeout = 20000;
+
+ mask = data.gate_id << GATE_ID_SHIFT;
+ mask |= data.meter_id << METER_ID_SHIFT;
+ mask |= data.en_meter << EN_METER_SHIFT;
+ mask |= data.allow_stream << ALLOW_STREM_SHIFT;
+ mask |= data.en_psfp << EN_PSFP_SHIFT;
+ mask |= (data.wr_op_type & WR_OP_TYPE_MASK) << WR_OP_TYPE_SHIFT;
+ mask |= data.op_type << OP_TYPE_SHIFT;
+ mask |= PSFP_EN_CONTROL_MASK;
+
+ axienet_iow(&lp, PSFP_CONTROL_OFFSET, mask);
+
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, PSFP_CONTROL_OFFSET) &
+ PSFP_EN_CONTROL_MASK) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("PSFP control write took longer time!!");
+}
+
+/**
+ * get_stream_filter_config - Get Stream Filter Configuration
+ * @data: Value returned
+ */
+void get_stream_filter_config(struct stream_filter *data)
+{
+ u32 reg_val;
+
+ reg_val = axienet_ior(&lp, STREAM_FILTER_CONFIG_OFFSET);
+
+ data->max_fr_size = reg_val & MAX_FR_SIZE_MASK;
+ data->in_pid = (reg_val >> IN_PORT_SHIFT) & IN_PORTID_MASK;
+}
+
+/**
+ * config_stream_filter - Configure Stream Filter Configuration
+ * @data: Value to be programmed
+ */
+void config_stream_filter(struct stream_filter data)
+{
+ u32 mask;
+
+ mask = ((data.in_pid & IN_PORTID_MASK) << IN_PORT_SHIFT) |
+ (data.max_fr_size & MAX_FR_SIZE_MASK);
+ axienet_iow(&lp, STREAM_FILTER_CONFIG_OFFSET, mask);
+}
+
+/**
+ * get_meter_reg - Read Stream Meter Configuration registers value
+ * @data: Value returned
+ */
+void get_meter_reg(struct meter_config *data)
+{
+ u32 conf_r4;
+
+ data->cir = axienet_ior(&lp, STREAM_METER_CIR_OFFSET);
+ data->eir = axienet_ior(&lp, STREAM_METER_EIR_OFFSET);
+ data->cbr = axienet_ior(&lp, STREAM_METER_CBR_OFFSET) & SMC_CBR_MASK;
+ conf_r4 = axienet_ior(&lp, STREAM_METER_EBR_OFFSET);
+
+ data->ebr = conf_r4 & SMC_EBR_MASK;
+ data->mode = (conf_r4 & 0xF0000000) >> SMC_MODE_SHIFT;
+}
+
+/**
+ * program_meter_reg - configure Stream Meter Configuration registers
+ * @data: Value to be programmed
+ */
+void program_meter_reg(struct meter_config data)
+{
+ u32 conf_r4;
+
+ axienet_iow(&lp, STREAM_METER_CIR_OFFSET, data.cir);
+ axienet_iow(&lp, STREAM_METER_EIR_OFFSET, data.eir);
+ axienet_iow(&lp, STREAM_METER_CBR_OFFSET, data.cbr & SMC_CBR_MASK);
+
+ conf_r4 = (data.ebr & SMC_EBR_MASK) | (data.mode << SMC_MODE_SHIFT);
+ axienet_iow(&lp, STREAM_METER_EBR_OFFSET, conf_r4);
+}
+
+/**
+ * get_psfp_static_counter - get memory static counters value
+ * @data : return value, containing counter value
+ */
+void get_psfp_static_counter(struct psfp_static_counter *data)
+{
+ int offset = (data->num) * 8;
+
+ data->psfp_fr_count.lsb = axienet_ior(&lp, TOTAL_PSFP_FRAMES_OFFSET +
+ offset);
+ data->psfp_fr_count.msb = axienet_ior(&lp, TOTAL_PSFP_FRAMES_OFFSET +
+ offset + 0x4);
+
+ data->err_filter_ins_port.lsb = axienet_ior(&lp,
+ FLTR_INGS_PORT_ERR_OFFSET + offset);
+ data->err_filter_ins_port.msb = axienet_ior(&lp,
+ FLTR_INGS_PORT_ERR_OFFSET + offset + 0x4);
+
+ data->err_filtr_sdu.lsb = axienet_ior(&lp, FLTR_STDU_ERR_OFFSET +
+ offset);
+ data->err_filtr_sdu.msb = axienet_ior(&lp, FLTR_STDU_ERR_OFFSET +
+ offset + 0x4);
+
+ data->err_meter.lsb = axienet_ior(&lp, METER_ERR_OFFSET + offset);
+ data->err_meter.msb = axienet_ior(&lp, METER_ERR_OFFSET + offset + 0x4);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c
new file mode 100644
index 000000000000..e7a054b78a6e
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c
@@ -0,0 +1,232 @@
+/*
+ * Xilinx FPGA Xilinx TSN QBV sheduler module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_shaper.h"
+
+static inline int axienet_map_gs_to_hw(struct axienet_local *lp, u32 gs)
+{
+ u8 be_queue = 0;
+ u8 re_queue = 1;
+ u8 st_queue = 2;
+ unsigned int acl_bit_map = 0;
+
+ if (lp->num_tc == 2)
+ st_queue = 1;
+
+ if (gs & GS_BE_OPEN)
+ acl_bit_map |= (1 << be_queue);
+ if (gs & GS_ST_OPEN)
+ acl_bit_map |= (1 << st_queue);
+ if (lp->num_tc == 3 && (gs & GS_RE_OPEN))
+ acl_bit_map |= (1 << re_queue);
+
+ return acl_bit_map;
+}
+
+static int __axienet_set_schedule(struct net_device *ndev, struct qbv_info *qbv)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u16 i;
+ unsigned int acl_bit_map = 0;
+ u32 u_config_change = 0;
+ u8 port = qbv->port;
+
+ if (qbv->cycle_time == 0) {
+ /* clear the gate enable bit */
+ u_config_change &= ~CC_ADMIN_GATE_ENABLE_BIT;
+ /* open all the gates */
+ u_config_change |= CC_ADMIN_GATE_STATE_SHIFT;
+
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+
+ return 0;
+ }
+
+ if (axienet_ior(lp, PORT_STATUS(port)) & 1) {
+ if (qbv->force) {
+ u_config_change &= ~CC_ADMIN_GATE_ENABLE_BIT;
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+ } else {
+ return -EALREADY;
+ }
+ }
+ /* write admin time */
+ axienet_iow(lp, ADMIN_CYCLE_TIME_DENOMINATOR(port),
+ qbv->cycle_time & CYCLE_TIME_DENOMINATOR_MASK);
+
+ axienet_iow(lp, ADMIN_BASE_TIME_NS(port), qbv->ptp_time_ns);
+
+ axienet_iow(lp, ADMIN_BASE_TIME_SEC(port),
+ qbv->ptp_time_sec & 0xFFFFFFFF);
+ axienet_iow(lp, ADMIN_BASE_TIME_SECS(port),
+ (qbv->ptp_time_sec >> 32) & BASE_TIME_SECS_MASK);
+
+ u_config_change = axienet_ior(lp, CONFIG_CHANGE(port));
+
+ u_config_change &= ~(CC_ADMIN_CTRL_LIST_LENGTH_MASK <<
+ CC_ADMIN_CTRL_LIST_LENGTH_SHIFT);
+ u_config_change |= (qbv->list_length & CC_ADMIN_CTRL_LIST_LENGTH_MASK)
+ << CC_ADMIN_CTRL_LIST_LENGTH_SHIFT;
+
+ /* program each list */
+ for (i = 0; i < qbv->list_length; i++) {
+ acl_bit_map = axienet_map_gs_to_hw(lp, qbv->acl_gate_state[i]);
+ axienet_iow(lp, ADMIN_CTRL_LIST(port, i),
+ (acl_bit_map & (ACL_GATE_STATE_MASK)) <<
+ ACL_GATE_STATE_SHIFT);
+
+ /* set the time for each entry */
+ axienet_iow(lp, ADMIN_CTRL_LIST_TIME(port, i),
+ qbv->acl_gate_time[i] & CTRL_LIST_TIME_INTERVAL_MASK);
+ }
+
+ /* clear interrupt status */
+ axienet_iow(lp, INT_STATUS(port), 0);
+
+ /* kick in new config change */
+ u_config_change |= CC_ADMIN_CONFIG_CHANGE_BIT;
+
+ /* enable gate */
+ u_config_change |= CC_ADMIN_GATE_ENABLE_BIT;
+
+ /* start */
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+
+ return 0;
+}
+
+int axienet_set_schedule(struct net_device *ndev, void __user *useraddr)
+{
+ struct qbv_info *config;
+ int ret;
+
+ config = kmalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ if (copy_from_user(config, useraddr, sizeof(struct qbv_info))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ pr_debug("setting new schedule\n");
+
+ ret = __axienet_set_schedule(ndev, config);
+out:
+ kfree(config);
+ return ret;
+}
+
+static int __axienet_get_schedule(struct net_device *ndev, struct qbv_info *qbv)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u16 i = 0;
+ u32 u_value = 0;
+ u8 port = qbv->port;
+
+ if (!(axienet_ior(lp, CONFIG_CHANGE(port)) &
+ CC_ADMIN_GATE_ENABLE_BIT)) {
+ qbv->cycle_time = 0;
+ return 0;
+ }
+
+ u_value = axienet_ior(lp, GATE_STATE(port));
+ qbv->list_length = (u_value >> CC_ADMIN_CTRL_LIST_LENGTH_SHIFT) &
+ CC_ADMIN_CTRL_LIST_LENGTH_MASK;
+
+ u_value = axienet_ior(lp, OPER_CYCLE_TIME_DENOMINATOR(port));
+ qbv->cycle_time = u_value & CYCLE_TIME_DENOMINATOR_MASK;
+
+ u_value = axienet_ior(lp, OPER_BASE_TIME_NS(port));
+ qbv->ptp_time_ns = u_value & OPER_BASE_TIME_NS_MASK;
+
+ qbv->ptp_time_sec = axienet_ior(lp, OPER_BASE_TIME_SEC(port));
+ u_value = axienet_ior(lp, OPER_BASE_TIME_SECS(port));
+ qbv->ptp_time_sec |= (u64)(u_value & BASE_TIME_SECS_MASK) << 32;
+
+ for (i = 0; i < qbv->list_length; i++) {
+ u_value = axienet_ior(lp, OPER_CTRL_LIST(port, i));
+ qbv->acl_gate_state[i] = (u_value >> ACL_GATE_STATE_SHIFT) &
+ ACL_GATE_STATE_MASK;
+ /**
+ * In 2Q system, the actual ST Gate state value is 2,
+ * for user the ST Gate state value is always 4.
+ */
+ if (lp->num_tc == 2 && qbv->acl_gate_state[i] == 2)
+ qbv->acl_gate_state[i] = 4;
+
+ u_value = axienet_ior(lp, OPER_CTRL_LIST_TIME(port, i));
+ qbv->acl_gate_time[i] = u_value & CTRL_LIST_TIME_INTERVAL_MASK;
+ }
+ return 0;
+}
+
+int axienet_get_schedule(struct net_device *ndev, void __user *useraddr)
+{
+ struct qbv_info *qbv;
+ int ret = 0;
+
+ qbv = kmalloc(sizeof(*qbv), GFP_KERNEL);
+ if (!qbv)
+ return -ENOMEM;
+
+ if (copy_from_user(qbv, useraddr, sizeof(struct qbv_info))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ __axienet_get_schedule(ndev, qbv);
+
+ if (copy_to_user(useraddr, qbv, sizeof(struct qbv_info)))
+ ret = -EFAULT;
+out:
+ kfree(qbv);
+ return ret;
+}
+
+static irqreturn_t axienet_qbv_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ u8 port = 0; /* TODO */
+
+ /* clear status */
+ axienet_iow(lp, INT_CLEAR(port), 0);
+
+ return IRQ_HANDLED;
+}
+
+int axienet_qbv_init(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ int rc;
+
+ rc = request_irq(lp->qbv_irq, axienet_qbv_irq, 0, ndev->name, ndev);
+ if (rc)
+ goto err_qbv_irq;
+
+err_qbv_irq:
+ return rc;
+}
+
+void axienet_qbv_remove(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ free_irq(lp->qbv_irq, ndev);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h
new file mode 100644
index 000000000000..ac2e54d0e134
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h
@@ -0,0 +1,151 @@
+/*
+ * Xilinx TSN QBV scheduler header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_SHAPER_H
+#define XILINX_TSN_SHAPER_H
+
+/* 0x0 CONFIG_CHANGE
+ * 0x8 GATE_STATE
+ * 0x10 ADMIN_CTRL_LIST_LENGTH
+ * 0x18 ADMIN_CYCLE_TIME_DENOMINATOR
+ * 0x20 ADMIN_BASE_TIME_NS
+ * 0x24 ADMIN_BASE_TIME_SEC
+ * 0x28 ADMIN_BASE_TIME_SECS
+ * 0x30 INT_STAT
+ * 0x34 INT_EN
+ * 0x38 INT_CLR
+ * 0x3c STATUS
+ * 0x40 CONFIG_CHANGE_TIME_NS
+ * 0x44 CONFIG_CHANGE_TIME_SEC
+ * 0x48 CONFIG_CHANGE_TIME_SECS
+ * 0x50 OPER_CTRL_LIST_LENGTH
+ * 0x58 OPER_CYCLE_TIME_DENOMINATOR
+ * 0x60 OPER_BASE_TIME_NS
+ * 0x64 OPER_BASE_TIME_SEC
+ * 0x68 OPER_BASE_TIME_SECS
+ * 0x6c BE_XMIT_OVRRUN_CNT
+ * 0x74 RES_XMIT_OVRRUN_CNT
+ * 0x7c ST_XMIT_OVRRUN_CNT
+ */
+
+enum hw_port {
+ PORT_EP = 0,
+ PORT_TEMAC_1,
+ PORT_TEMAC_2,
+};
+
+ /* EP */ /* TEMAC1 */ /* TEMAC2*/
+static u32 qbv_reg_map[3] = { 0x0, 0x14000, 0x14000 };
+
+/* 0x14000 0x14FFC Time Schedule Registers (Control & Status)
+ * 0x15000 0x15FFF Time Schedule Control List Entries
+ */
+
+#define TIME_SCHED_BASE(port) qbv_reg_map[(port)]
+
+#define CTRL_LIST_BASE(port) (TIME_SCHED_BASE(port) + 0x1000)
+
+/* control list entries
+ * admin control list 0 : 31
+ * "Time interval between two gate entries" must be greater than
+ * "time required to transmit biggest supported frame" on that queue when
+ * the gate for the queue is going from open to close state.
+ */
+#define ADMIN_CTRL_LIST(port, n) (CTRL_LIST_BASE(port) + ((n) * 8))
+#define ACL_GATE_STATE_SHIFT 8
+#define ACL_GATE_STATE_MASK 0x7
+#define ADMIN_CTRL_LIST_TIME(port, n) (ADMIN_CTRL_LIST((port), n) + 4)
+
+#define OPER_CTRL_LIST(port, n) (CTRL_LIST_BASE(port) + 0x800 + ((n) * 8))
+#define OPER_CTRL_LIST_TIME(port, n) (OPER_CTRL_LIST(port, n) + 4)
+#define CTRL_LIST_TIME_INTERVAL_MASK 0xFFFFF
+
+#define CONFIG_CHANGE(port) (TIME_SCHED_BASE(port) + 0x0)
+#define CC_ADMIN_GATE_STATE_SHIFT 0x7
+#define CC_ADMIN_GATE_STATE_MASK (7)
+#define CC_ADMIN_CTRL_LIST_LENGTH_SHIFT (8)
+#define CC_ADMIN_CTRL_LIST_LENGTH_MASK (0x1FF)
+/* This request bit is set when all the related Admin* filelds are populated.
+ * This bit is set by S/W and clear by core when core start with new schedule.
+ * Once set it can only be cleared by core or hard/soft reset.
+ */
+#define CC_ADMIN_CONFIG_CHANGE_BIT BIT(30)
+#define CC_ADMIN_GATE_ENABLE_BIT BIT(31)
+
+#define GATE_STATE(port) (TIME_SCHED_BASE(port) + 0x8)
+#define GS_OPER_GATE_STATE_SHIFT (0)
+#define GS_OPER_GATE_STATE_MASK (0x7)
+#define GS_OPER_CTRL_LIST_LENGTH_SHIFT (8)
+#define GS_OPER_CTRL_LIST_LENGTH_MASK (0x3F)
+#define GS_SUP_MAX_LIST_LENGTH_SHIFT (16)
+#define GS_SUP_MAX_LIST_LENGTH_MASK (0x3F)
+#define GS_TICK_GRANULARITY_SHIFT (24)
+#define GS_TICK_GRANULARITY_MASK (0x3F)
+
+#define ADMIN_CYCLE_TIME_DENOMINATOR(port) (TIME_SCHED_BASE(port) + 0x18)
+#define ADMIN_BASE_TIME_NS(port) (TIME_SCHED_BASE(port) + 0x20)
+#define ADMIN_BASE_TIME_SEC(port) (TIME_SCHED_BASE(port) + 0x24)
+#define ADMIN_BASE_TIME_SECS(port) (TIME_SCHED_BASE(port) + 0x28)
+
+#define INT_STATUS(port) (TIME_SCHED_BASE(port) + 0x30)
+#define INT_ENABLE(port) (TIME_SCHED_BASE(port) + 0x34)
+#define INT_CLEAR(port) (TIME_SCHED_BASE(port) + 0x38)
+#define PORT_STATUS(port) (TIME_SCHED_BASE(port) + 0x3c)
+
+/* Config Change time is valid after Config Pending bit is set. */
+#define CONFIG_CHANGE_TIME_NS(port) (TIME_SCHED_BASE((port)) + 0x40)
+#define CONFIG_CHANGE_TIME_SEC(port) (TIME_SCHED_BASE((port)) + 0x44)
+#define CONFIG_CHANGE_TIME_SECS(port) (TIME_SCHED_BASE((port)) + 0x48)
+
+#define OPER_CONTROL_LIST_LENGTH(port) (TIME_SCHED_BASE(port) + 0x50)
+#define OPER_CYCLE_TIME_DENOMINATOR(port) (TIME_SCHED_BASE(port) + 0x58)
+#define CYCLE_TIME_DENOMINATOR_MASK (0x3FFFFFFF)
+
+#define OPER_BASE_TIME_NS(port) (TIME_SCHED_BASE(port) + 0x60)
+#define OPER_BASE_TIME_NS_MASK (0x3FFFFFFF)
+#define OPER_BASE_TIME_SEC(port) (TIME_SCHED_BASE(port) + 0x64)
+#define OPER_BASE_TIME_SECS(port) (TIME_SCHED_BASE(port) + 0x68)
+#define BASE_TIME_SECS_MASK (0xFFFF)
+
+#define BE_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x6c)
+#define RES_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x74)
+#define ST_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x7c)
+
+/* internally hw deals with queues only,
+ * in 3q system ST acl bitmap would be would 1 << 2
+ * in 2q system ST acl bitmap would be 1 << 1
+ * But this is confusing to users.
+ * so use the following fixed gate state and internally
+ * map them to hw
+ */
+#define GS_BE_OPEN BIT(0)
+#define GS_RE_OPEN BIT(1)
+#define GS_ST_OPEN BIT(2)
+#define QBV_MAX_ENTRIES 256
+
+struct qbv_info {
+ u8 port;
+ u8 force;
+ u32 cycle_time;
+ u64 ptp_time_sec;
+ u32 ptp_time_ns;
+ u32 list_length;
+ u32 acl_gate_state[QBV_MAX_ENTRIES];
+ u32 acl_gate_time[QBV_MAX_ENTRIES];
+};
+
+#endif /* XILINX_TSN_SHAPER_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c
new file mode 100644
index 000000000000..cccaaa76cf7a
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c
@@ -0,0 +1,807 @@
+/*
+ * Xilinx FPGA Xilinx TSN Switch Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_tsn_switch.h"
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+static struct miscdevice switch_dev;
+struct axienet_local lp;
+
+#define ADD 1
+#define DELETE 0
+
+#define PMAP_EGRESS_QUEUE_MASK 0x7
+#define PMAP_EGRESS_QUEUE0_SELECT 0x0
+#define PMAP_EGRESS_QUEUE1_SELECT 0x1
+#define PMAP_EGRESS_QUEUE2_SELECT 0x2
+#define PMAP_PRIORITY0_SHIFT 0
+#define PMAP_PRIORITY1_SHIFT 4
+#define PMAP_PRIORITY2_SHIFT 8
+#define PMAP_PRIORITY3_SHIFT 12
+#define PMAP_PRIORITY4_SHIFT 16
+#define PMAP_PRIORITY5_SHIFT 20
+#define PMAP_PRIORITY6_SHIFT 24
+#define PMAP_PRIORITY7_SHIFT 28
+#define SDL_EN_CAM_IPV_SHIFT 28
+#define SDL_CAM_IPV_SHIFT 29
+
+#define SDL_CAM_WR_ENABLE BIT(0)
+#define SDL_CAM_ADD_ENTRY 0x1
+#define SDL_CAM_DELETE_ENTRY 0x3
+#define SDL_CAM_VLAN_SHIFT 16
+#define SDL_CAM_VLAN_MASK 0xFFF
+#define SDL_CAM_IPV_MASK 0x7
+#define SDL_CAM_PORT_LIST_SHIFT 8
+#define SDL_GATEID_SHIFT 16
+#define SDL_CAM_FWD_TO_EP BIT(0)
+#define SDL_CAM_FWD_TO_PORT_1 BIT(1)
+#define SDL_CAM_FWD_TO_PORT_2 BIT(2)
+#define SDL_CAM_EP_ACTION_LIST_SHIFT 0
+#define SDL_CAM_MAC_ACTION_LIST_SHIFT 4
+#define SDL_CAM_DEST_MAC_XLATION BIT(0)
+#define SDL_CAM_VLAN_ID_XLATION BIT(1)
+#define SDL_CAM_UNTAG_FRAME BIT(2)
+
+/* Match table for of_platform binding */
+static const struct of_device_id tsnswitch_of_match[] = {
+ { .compatible = "xlnx,tsn-switch", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tsnswitch_of_match);
+
+static int switch_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int switch_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* set_frame_filter_option Frame Filtering Type Field Options */
+static void set_frame_filter_opt(u16 type1, u16 type2)
+{
+ int type = axienet_ior(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET);
+
+ if (type1)
+ type = (type & 0x0000FFFF) | (type1 << 16);
+ if (type2)
+ type = (type & 0xFFFF0000) | type2;
+ axienet_iow(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET, type);
+}
+
+/* MAC Port-1 Management Queueing Options */
+static void set_mac1_mngmntq(u32 config)
+{
+ axienet_iow(&lp, XAS_MAC1_MNG_Q_OPTION_OFFSET, config);
+}
+
+/* MAC Port-2 Management Queueing Options */
+static void set_mac2_mngmntq(u32 config)
+{
+ axienet_iow(&lp, XAS_MAC2_MNG_Q_OPTION_OFFSET, config);
+}
+
+/**
+ * set_switch_regs - read the various status of switch
+ * @data: Pointer which will be writen to switch
+ */
+static void set_switch_regs(struct switch_data *data)
+{
+ int tmp;
+ u8 mac_addr[6];
+
+ axienet_iow(&lp, XAS_CONTROL_OFFSET, data->switch_ctrl);
+ axienet_iow(&lp, XAS_PMAP_OFFSET, data->switch_prt);
+ mac_addr[0] = data->sw_mac_addr[0];
+ mac_addr[1] = data->sw_mac_addr[1];
+ mac_addr[2] = data->sw_mac_addr[2];
+ mac_addr[3] = data->sw_mac_addr[3];
+ mac_addr[4] = data->sw_mac_addr[4];
+ mac_addr[5] = data->sw_mac_addr[5];
+ axienet_iow(&lp, XAS_MAC_LSB_OFFSET,
+ (mac_addr[0] << 24) | (mac_addr[1] << 16) |
+ (mac_addr[2] << 8) | (mac_addr[3]));
+ axienet_iow(&lp, XAS_MAC_MSB_OFFSET, (mac_addr[4] << 8) | mac_addr[5]);
+
+ /* Threshold */
+ tmp = (data->thld_ep_mac[0].t1 << 16) | data->thld_ep_mac[0].t2;
+ axienet_iow(&lp, XAS_EP2MAC_ST_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_ep_mac[1].t1 << 16) | data->thld_ep_mac[1].t2;
+ axienet_iow(&lp, XAS_EP2MAC_RE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_ep_mac[2].t1 << 16) | data->thld_ep_mac[2].t2;
+ axienet_iow(&lp, XAS_EP2MAC_BE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[0].t1 << 16) | data->thld_mac_mac[0].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_ST_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[1].t1 << 16) | data->thld_mac_mac[1].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_RE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[2].t1 << 16) | data->thld_mac_mac[2].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_BE_FIFOT_OFFSET, tmp);
+
+ /* Port VLAN ID */
+ axienet_iow(&lp, XAS_EP_PORT_VLAN_OFFSET, data->ep_vlan);
+ axienet_iow(&lp, XAS_MAC_PORT_VLAN_OFFSET, data->mac_vlan);
+
+ /* max frame size */
+ axienet_iow(&lp, XAS_ST_MAX_FRAME_SIZE_OFFSET, data->max_frame_sc_que);
+ axienet_iow(&lp, XAS_RE_MAX_FRAME_SIZE_OFFSET, data->max_frame_res_que);
+ axienet_iow(&lp, XAS_BE_MAX_FRAME_SIZE_OFFSET, data->max_frame_be_que);
+}
+
+/**
+ * get_switch_regs - read the various status of switch
+ * @data: Pointer which will return the switch status
+ */
+static void get_switch_regs(struct switch_data *data)
+{
+ int tmp;
+
+ data->switch_status = axienet_ior(&lp, XAS_STATUS_OFFSET);
+ data->switch_ctrl = axienet_ior(&lp, XAS_CONTROL_OFFSET);
+ data->switch_prt = axienet_ior(&lp, XAS_PMAP_OFFSET);
+ tmp = axienet_ior(&lp, XAS_MAC_LSB_OFFSET);
+ data->sw_mac_addr[0] = (tmp & 0xFF000000) >> 24;
+ data->sw_mac_addr[1] = (tmp & 0xFF0000) >> 16;
+ data->sw_mac_addr[2] = (tmp & 0xFF00) >> 8;
+ data->sw_mac_addr[3] = (tmp & 0xFF);
+ tmp = axienet_ior(&lp, XAS_MAC_MSB_OFFSET);
+ data->sw_mac_addr[4] = (tmp & 0xFF00) >> 8;
+ data->sw_mac_addr[5] = (tmp & 0xFF);
+
+ /* Threshold */
+ tmp = axienet_ior(&lp, XAS_EP2MAC_ST_FIFOT_OFFSET);
+ data->thld_ep_mac[0].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[0].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_EP2MAC_RE_FIFOT_OFFSET);
+ data->thld_ep_mac[1].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[1].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_EP2MAC_BE_FIFOT_OFFSET);
+ data->thld_ep_mac[2].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[2].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_ST_FIFOT_OFFSET);
+ data->thld_mac_mac[0].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[0].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_RE_FIFOT_OFFSET);
+ data->thld_mac_mac[1].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[1].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_BE_FIFOT_OFFSET);
+ data->thld_mac_mac[2].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[2].t2 = tmp & (0xFFFF);
+
+ /* Port VLAN ID */
+ data->ep_vlan = axienet_ior(&lp, XAS_EP_PORT_VLAN_OFFSET);
+ data->mac_vlan = axienet_ior(&lp, XAS_MAC_PORT_VLAN_OFFSET);
+
+ /* max frame size */
+ data->max_frame_sc_que = (axienet_ior(&lp,
+ XAS_ST_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+ data->max_frame_res_que = (axienet_ior(&lp,
+ XAS_RE_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+ data->max_frame_be_que = (axienet_ior(&lp,
+ XAS_BE_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+
+ /* frame filter type options*/
+ tmp = axienet_ior(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET);
+ data->typefield.type2 = (tmp & 0xFFFF0000) >> 16;
+ data->typefield.type2 = tmp & 0x0000FFFF;
+
+ /* MAC Port 1 Management Q option*/
+ data->mac1_config = axienet_ior(&lp, XAS_MAC1_MNG_Q_OPTION_OFFSET);
+ /* MAC Port 2 Management Q option*/
+ data->mac2_config = axienet_ior(&lp, XAS_MAC2_MNG_Q_OPTION_OFFSET);
+
+ /* Port VLAN Membership control*/
+ data->port_vlan_mem_ctrl = axienet_ior(&lp, XAS_VLAN_MEMB_CTRL_REG);
+ /* Port VLAN Membership read data*/
+ data->port_vlan_mem_data = axienet_ior(&lp, XAS_VLAN_MEMB_DATA_REG);
+}
+
+/**
+ * get_memory_static_counter - get memory static counters value
+ * @data: Value to be programmed
+ */
+static void get_memory_static_counter(struct switch_data *data)
+{
+ data->mem_arr_cnt.cam_lookup.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_CAM_LOOKUP);
+ data->mem_arr_cnt.cam_lookup.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_CAM_LOOKUP + 0x4);
+
+ data->mem_arr_cnt.multicast_fr.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_MULTCAST);
+ data->mem_arr_cnt.multicast_fr.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_MULTCAST + 0x4);
+
+ data->mem_arr_cnt.err_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC1);
+ data->mem_arr_cnt.err_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC1 + 0x4);
+
+ data->mem_arr_cnt.err_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC2);
+ data->mem_arr_cnt.err_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC2 + 0x4);
+
+ data->mem_arr_cnt.sc_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_EP);
+ data->mem_arr_cnt.sc_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_EP + 0x4);
+ data->mem_arr_cnt.res_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_EP);
+ data->mem_arr_cnt.res_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_EP + 0x4);
+ data->mem_arr_cnt.be_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_EP);
+ data->mem_arr_cnt.be_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_sc_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_EP);
+ data->mem_arr_cnt.err_sc_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_res_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_EP);
+ data->mem_arr_cnt.err_res_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_be_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_EP);
+ data->mem_arr_cnt.err_be_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_EP + 0x4);
+
+ data->mem_arr_cnt.sc_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_EP);
+ data->mem_arr_cnt.sc_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_EP + 0x4);
+ data->mem_arr_cnt.res_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_EP);
+ data->mem_arr_cnt.res_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_EP + 0x4);
+ data->mem_arr_cnt.be_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_EP);
+ data->mem_arr_cnt.be_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_sc_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_EP);
+ data->mem_arr_cnt.err_sc_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_res_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_EP);
+ data->mem_arr_cnt.err_res_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_be_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_EP);
+ data->mem_arr_cnt.err_be_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_EP + 0x4);
+
+ data->mem_arr_cnt.sc_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC1);
+ data->mem_arr_cnt.sc_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.res_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC1);
+ data->mem_arr_cnt.res_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.be_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC1);
+ data->mem_arr_cnt.be_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_sc_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC1);
+ data->mem_arr_cnt.err_sc_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_res_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC1);
+ data->mem_arr_cnt.err_res_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_be_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC1);
+ data->mem_arr_cnt.err_be_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC1 + 0x4);
+
+ data->mem_arr_cnt.sc_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_MAC1);
+ data->mem_arr_cnt.sc_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.res_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_MAC1);
+ data->mem_arr_cnt.res_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.be_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_MAC1);
+ data->mem_arr_cnt.be_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_sc_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1);
+ data->mem_arr_cnt.err_sc_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_res_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1);
+ data->mem_arr_cnt.err_res_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_be_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1);
+ data->mem_arr_cnt.err_be_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1 + 0x4);
+
+ data->mem_arr_cnt.sc_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC2);
+ data->mem_arr_cnt.sc_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.res_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC2);
+ data->mem_arr_cnt.res_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.be_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC2);
+ data->mem_arr_cnt.be_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_sc_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC2);
+ data->mem_arr_cnt.err_sc_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_res_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC2);
+ data->mem_arr_cnt.err_res_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_be_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC2);
+ data->mem_arr_cnt.err_be_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC2 + 0x4);
+
+ data->mem_arr_cnt.sc_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_MAC2);
+ data->mem_arr_cnt.sc_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.res_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_MAC2);
+ data->mem_arr_cnt.res_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.be_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_MAC2);
+ data->mem_arr_cnt.be_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_sc_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2);
+ data->mem_arr_cnt.err_sc_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_res_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2);
+ data->mem_arr_cnt.err_res_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_be_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2);
+ data->mem_arr_cnt.err_be_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2 + 0x4);
+}
+
+static void add_delete_cam_entry(struct cam_struct data, u8 add)
+{
+ u32 port_action = 0;
+ u32 tv2 = 0;
+ u32 timeout = 20000;
+
+ /* wait for cam init done */
+ while (!(axienet_ior(&lp, XAS_SDL_CAM_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM init took longer time!!");
+ /* mac and vlan */
+ axienet_iow(&lp, XAS_SDL_CAM_KEY1_OFFSET,
+ (data.dest_addr[0] << 24) | (data.dest_addr[1] << 16) |
+ (data.dest_addr[2] << 8) | (data.dest_addr[3]));
+ axienet_iow(&lp, XAS_SDL_CAM_KEY2_OFFSET,
+ ((data.dest_addr[4] << 8) | data.dest_addr[5]) |
+ ((data.vlanid & SDL_CAM_VLAN_MASK) << SDL_CAM_VLAN_SHIFT));
+
+ /* TV 1 and TV 2 */
+ axienet_iow(&lp, XAS_SDL_CAM_TV1_OFFSET,
+ (data.src_addr[0] << 24) | (data.src_addr[1] << 16) |
+ (data.src_addr[2] << 8) | (data.src_addr[3]));
+
+ tv2 = ((data.src_addr[4] << 8) | data.src_addr[5]) |
+ ((data.tv_vlanid & SDL_CAM_VLAN_MASK) << SDL_CAM_VLAN_SHIFT);
+
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ tv2 = tv2 | ((data.ipv & SDL_CAM_IPV_MASK) << SDL_CAM_IPV_SHIFT)
+ | (data.en_ipv << SDL_EN_CAM_IPV_SHIFT);
+#endif
+ axienet_iow(&lp, XAS_SDL_CAM_TV2_OFFSET, tv2);
+
+ if (data.tv_en)
+ port_action = ((SDL_CAM_DEST_MAC_XLATION |
+ SDL_CAM_VLAN_ID_XLATION) << SDL_CAM_MAC_ACTION_LIST_SHIFT);
+
+ port_action = port_action | (data.fwd_port << SDL_CAM_PORT_LIST_SHIFT);
+
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI) || IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ port_action = port_action | (data.gate_id << SDL_GATEID_SHIFT);
+#endif
+
+ /* port action */
+ axienet_iow(&lp, XAS_SDL_CAM_PORT_ACT_OFFSET, port_action);
+
+ if (add)
+ axienet_iow(&lp, XAS_SDL_CAM_CTRL_OFFSET, SDL_CAM_ADD_ENTRY);
+ else
+ axienet_iow(&lp, XAS_SDL_CAM_CTRL_OFFSET, SDL_CAM_DELETE_ENTRY);
+
+ timeout = 20000;
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, XAS_SDL_CAM_CTRL_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM write took longer time!!");
+}
+
+static void port_vlan_mem_ctrl(u32 port_vlan_mem)
+{
+ axienet_iow(&lp, XAS_VLAN_MEMB_CTRL_REG, port_vlan_mem);
+}
+
+static long switch_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct switch_data data;
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ struct qci qci_data;
+#endif
+#if IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ struct cb cb_data;
+#endif
+ switch (cmd) {
+ case GET_STATUS_SWITCH:
+ /* Switch configurations */
+ get_switch_regs(&data);
+
+ /* Memory static counter*/
+ get_memory_static_counter(&data);
+ if (copy_to_user((char __user *)arg, &data, sizeof(data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case SET_STATUS_SWITCH:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_switch_regs(&data);
+ break;
+
+ case ADD_CAM_ENTRY:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ add_delete_cam_entry(data.cam_data, ADD);
+ break;
+
+ case DELETE_CAM_ENTRY:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ add_delete_cam_entry(data.cam_data, DELETE);
+ break;
+
+ case PORT_VLAN_MEM_CTRL:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ port_vlan_mem_ctrl(data.port_vlan_mem_ctrl);
+ break;
+
+ case SET_FRAME_TYPE_FIELD:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_frame_filter_opt(data.typefield.type1,
+ data.typefield.type2);
+ break;
+
+ case SET_MAC1_MNGMNT_Q_CONFIG:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_mac1_mngmntq(data.mac1_config);
+ break;
+
+ case SET_MAC2_MNGMNT_Q_CONFIG:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_mac2_mngmntq(data.mac2_config);
+ break;
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ case CONFIG_METER_MEM:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ program_meter_reg(qci_data.meter_config_data);
+ break;
+
+ case CONFIG_GATE_MEM:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ config_stream_filter(qci_data.stream_config_data);
+ break;
+
+ case PSFP_CONTROL:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ retval = -EINVAL;
+ pr_err("Copy from user failed\n");
+ goto end;
+ }
+ psfp_control(qci_data.psfp_config_data);
+ break;
+
+ case GET_STATIC_PSFP_COUNTER:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ get_psfp_static_counter(&qci_data.psfp_counter_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+ case GET_METER_REG:
+ get_meter_reg(&qci_data.meter_config_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+ case GET_STREAM_FLTR_CONFIG:
+ get_stream_filter_config(&qci_data.stream_config_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+#endif
+#if IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ case CONFIG_MEMBER_MEM:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ program_member_reg(cb_data.frer_memb_config_data);
+ break;
+
+ case CONFIG_INGRESS_FLTR:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ config_ingress_filter(cb_data.in_fltr_data);
+ break;
+
+ case FRER_CONTROL:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ frer_control(cb_data.frer_ctrl_data);
+ break;
+
+ case GET_STATIC_FRER_COUNTER:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ get_frer_static_counter(&cb_data.frer_counter_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case GET_MEMBER_REG:
+ get_member_reg(&cb_data.frer_memb_config_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case GET_INGRESS_FLTR:
+ get_ingress_filter_config(&cb_data.in_fltr_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+#endif
+ }
+end:
+ return retval;
+}
+
+static const struct file_operations switch_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = switch_ioctl,
+ .open = switch_open,
+ .release = switch_release,
+};
+
+static int tsn_switch_init(void)
+{
+ int ret;
+
+ switch_dev.minor = MISC_DYNAMIC_MINOR;
+ switch_dev.name = "switch";
+ switch_dev.fops = &switch_fops;
+ ret = misc_register(&switch_dev);
+ if (ret < 0) {
+ pr_err("Switch driver registration failed!\n");
+ return ret;
+ }
+
+ pr_debug("Xilinx TSN Switch driver initialized!\n");
+ return 0;
+}
+
+static int tsn_switch_cam_init(u16 num_q)
+{
+ u32 pmap;
+ u32 timeout = 20000;
+
+ /* wait for switch init done */
+ while (!(axienet_ior(&lp, XAS_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("Switch init took longer time!!");
+
+ if (num_q == 3) {
+ /* map pcp = 2,3 to queue1
+ * pcp = 4 to queue2
+ */
+ pmap = ((PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY2_SHIFT) |
+ (PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY3_SHIFT) |
+ (PMAP_EGRESS_QUEUE2_SELECT << PMAP_PRIORITY4_SHIFT));
+ } else if (num_q == 2) {
+ /* pcp = 4 to queue1 */
+ pmap = (PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY4_SHIFT);
+ }
+
+ axienet_iow(&lp, XAS_PMAP_OFFSET, pmap);
+
+ timeout = 20000;
+ /* wait for cam init done */
+ while (!(axienet_ior(&lp, XAS_SDL_CAM_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM init took longer time!!");
+
+ return 0;
+}
+
+static int tsnswitch_probe(struct platform_device *pdev)
+{
+ struct resource *swt;
+ int ret;
+ u16 num_tc;
+
+ pr_info("TSN Switch probe\n");
+ /* Map device registers */
+ swt = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp.regs = devm_ioremap_resource(&pdev->dev, swt);
+ if (IS_ERR(lp.regs))
+ return PTR_ERR(lp.regs);
+
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-tc",
+ &num_tc);
+ if (ret || (num_tc != 2 && num_tc != 3))
+ num_tc = XAE_MAX_TSN_TC;
+
+ pr_info("TSN Switch Initializing ....\n");
+ ret = tsn_switch_init();
+ if (ret)
+ return ret;
+ pr_info("TSN CAM Initializing ....\n");
+ ret = tsn_switch_cam_init(num_tc);
+
+ return ret;
+}
+
+static int tsnswitch_remove(struct platform_device *pdev)
+{
+ misc_deregister(&switch_dev);
+ return 0;
+}
+
+static struct platform_driver tsnswitch_driver = {
+ .probe = tsnswitch_probe,
+ .remove = tsnswitch_remove,
+ .driver = {
+ .name = "xilinx_tsnswitch",
+ .of_match_table = tsnswitch_of_match,
+ },
+};
+
+module_platform_driver(tsnswitch_driver);
+
+MODULE_DESCRIPTION("Xilinx TSN Switch driver");
+MODULE_AUTHOR("Xilinx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h
new file mode 100644
index 000000000000..9e5e21aea127
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h
@@ -0,0 +1,364 @@
+/*
+ * Xilinx TSN core switch header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_SWITCH_H
+#define XILINX_TSN_SWITCH_H
+
+#include "xilinx_axienet.h"
+
+/* ioctls */
+#define GET_STATUS_SWITCH 0x16
+#define SET_STATUS_SWITCH 0x17
+#define ADD_CAM_ENTRY 0x18
+#define DELETE_CAM_ENTRY 0x19
+#define PORT_VLAN_MEM_CTRL 0x20
+#define SET_FRAME_TYPE_FIELD 0x21
+#define SET_MAC1_MNGMNT_Q_CONFIG 0x22
+#define SET_MAC2_MNGMNT_Q_CONFIG 0x23
+#define CONFIG_METER_MEM 0x24
+#define CONFIG_GATE_MEM 0x25
+#define PSFP_CONTROL 0x26
+#define GET_STATIC_PSFP_COUNTER 0x27
+#define GET_METER_REG 0x28
+#define GET_STREAM_FLTR_CONFIG 0x29
+#define CONFIG_MEMBER_MEM 0x2A
+#define CONFIG_INGRESS_FLTR 0x2B
+#define FRER_CONTROL 0x2C
+#define GET_STATIC_FRER_COUNTER 0x2D
+#define GET_MEMBER_REG 0x2E
+#define GET_INGRESS_FLTR 0x2F
+
+/* Xilinx Axi Switch Offsets*/
+#define XAS_STATUS_OFFSET 0x00000
+#define XAS_CONTROL_OFFSET 0x00004
+#define XAS_PMAP_OFFSET 0x00008
+#define XAS_MAC_LSB_OFFSET 0x0000C
+#define XAS_MAC_MSB_OFFSET 0x00010
+#define XAS_EP2MAC_ST_FIFOT_OFFSET 0x00020
+#define XAS_EP2MAC_RE_FIFOT_OFFSET 0x00024
+#define XAS_EP2MAC_BE_FIFOT_OFFSET 0x00028
+#define XAS_MAC2MAC_ST_FIFOT_OFFSET 0x00030
+#define XAS_MAC2MAC_RE_FIFOT_OFFSET 0x00034
+#define XAS_MAC2MAC_BE_FIFOT_OFFSET 0x00038
+#define XAS_EP_PORT_VLAN_OFFSET 0x00040
+#define XAS_MAC_PORT_VLAN_OFFSET 0x00044
+#define XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET 0x00050
+#define XAS_MAC2_MNG_Q_OPTION_OFFSET 0x00054
+#define XAS_MAC1_MNG_Q_OPTION_OFFSET 0x00058
+#define XAS_ST_MAX_FRAME_SIZE_OFFSET 0x00060
+#define XAS_RE_MAX_FRAME_SIZE_OFFSET 0x00064
+#define XAS_BE_MAX_FRAME_SIZE_OFFSET 0x00068
+
+/* Memory static counters */
+#define XAS_MEM_STCNTR_CAM_LOOKUP 0x00400
+#define XAS_MEM_STCNTR_MULTCAST 0x00408
+#define XAS_MEM_STCNTR_ERR_MAC1 0x00410
+#define XAS_MEM_STCNTR_ERR_MAC2 0x00418
+#define XAS_MEM_STCNTR_SC_MAC1_EP 0x00420
+#define XAS_MEM_STCNTR_RES_MAC1_EP 0x00428
+#define XAS_MEM_STCNTR_BE_MAC1_EP 0x00430
+#define XAS_MEM_STCNTR_ERR_SC_MAC1_EP 0x00438
+#define XAS_MEM_STCNTR_ERR_RES_MAC1_EP 0x00440
+#define XAS_MEM_STCNTR_ERR_BE_MAC1_EP 0x00448
+#define XAS_MEM_STCNTR_SC_MAC2_EP 0x00458
+#define XAS_MEM_STCNTR_RES_MAC2_EP 0x00460
+#define XAS_MEM_STCNTR_BE_MAC2_EP 0x00468
+#define XAS_MEM_STCNTR_ERR_SC_MAC2_EP 0x00470
+#define XAS_MEM_STCNTR_ERR_RES_MAC2_EP 0x00478
+#define XAS_MEM_STCNTR_ERR_BE_MAC2_EP 0x00480
+#define XAS_MEM_STCNTR_SC_EP_MAC1 0x00490
+#define XAS_MEM_STCNTR_RES_EP_MAC1 0x00498
+#define XAS_MEM_STCNTR_BE_EP_MAC1 0x004A0
+#define XAS_MEM_STCNTR_ERR_SC_EP_MAC1 0x004A8
+#define XAS_MEM_STCNTR_ERR_RES_EP_MAC1 0x004B0
+#define XAS_MEM_STCNTR_ERR_BE_EP_MAC1 0x004B8
+#define XAS_MEM_STCNTR_SC_MAC2_MAC1 0x004C0
+#define XAS_MEM_STCNTR_RES_MAC2_MAC1 0x004C8
+#define XAS_MEM_STCNTR_BE_MAC2_MAC1 0x004D0
+#define XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1 0x004D8
+#define XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1 0x004E0
+#define XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1 0x004E8
+#define XAS_MEM_STCNTR_SC_EP_MAC2 0x004F0
+#define XAS_MEM_STCNTR_RES_EP_MAC2 0x004F8
+#define XAS_MEM_STCNTR_BE_EP_MAC2 0x00500
+#define XAS_MEM_STCNTR_ERR_SC_EP_MAC2 0x00508
+#define XAS_MEM_STCNTR_ERR_RES_EP_MAC2 0x00510
+#define XAS_MEM_STCNTR_ERR_BE_EP_MAC2 0x00518
+#define XAS_MEM_STCNTR_SC_MAC1_MAC2 0x00520
+#define XAS_MEM_STCNTR_RES_MAC1_MAC2 0x00528
+#define XAS_MEM_STCNTR_BE_MAC1_MAC2 0x00530
+#define XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2 0x00538
+#define XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2 0x00540
+#define XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2 0x00548
+
+/* Stream Destination Lookup CAM */
+#define XAS_SDL_CAM_CTRL_OFFSET 0x1000
+#define XAS_SDL_CAM_STATUS_OFFSET 0x1004
+#define XAS_SDL_CAM_KEY1_OFFSET 0x1008
+#define XAS_SDL_CAM_KEY2_OFFSET 0x100C
+#define XAS_SDL_CAM_TV1_OFFSET 0x1010
+#define XAS_SDL_CAM_TV2_OFFSET 0x1014
+#define XAS_SDL_CAM_PORT_ACT_OFFSET 0x1018
+
+/* Port VLAN Membership Memory */
+#define XAS_VLAN_MEMB_CTRL_REG 0x1100
+#define XAS_VLAN_MEMB_DATA_REG 0x1104
+
+/* QCI */
+#define PSFP_CONTROL_OFFSET 0x1200
+#define STREAM_FILTER_CONFIG_OFFSET 0x1204
+#define STREAM_METER_CIR_OFFSET 0x1208
+#define STREAM_METER_EIR_OFFSET 0x120C
+#define STREAM_METER_CBR_OFFSET 0x1210
+#define STREAM_METER_EBR_OFFSET 0x1214
+
+/* PSFP Statistics Counters */
+#define TOTAL_PSFP_FRAMES_OFFSET 0x2000
+#define FLTR_INGS_PORT_ERR_OFFSET 0x2800
+#define FLTR_STDU_ERR_OFFSET 0x3000
+#define METER_ERR_OFFSET 0x3800
+
+/* CB */
+#define FRER_CONTROL_OFFSET 0x1300
+#define INGRESS_FILTER_OFFSET 0x1304
+#define FRER_CONFIG_REG1 0x1308
+#define FRER_CONFIG_REG2 0x130C
+
+/* FRER Statistics Counters */
+#define TOTAL_FRER_FRAMES_OFFSET 0x4000
+#define FRER_DISCARD_INGS_FLTR_OFFSET 0x4800
+#define FRER_PASS_FRAMES_INDV_OFFSET 0x5000
+#define FRER_DISCARD_FRAMES_INDV_OFFSET 0x5800
+#define FRER_PASS_FRAMES_SEQ_OFFSET 0x6000
+#define FRER_DISCARD_FRAMES_SEQ_OFFSET 0x6800
+#define FRER_ROGUE_FRAMES_SEQ_OFFSET 0x7000
+#define SEQ_RECV_RESETS_OFFSET 0x7800
+
+/* 64 bit counter*/
+struct static_cntr {
+ u32 msb;
+ u32 lsb;
+};
+
+/*********** QCI Structures **************/
+struct psfp_config {
+ u8 gate_id;
+ u8 meter_id;
+ bool en_meter;
+ bool allow_stream;
+ bool en_psfp;
+ u8 wr_op_type;
+ bool op_type;
+};
+
+struct meter_config {
+ u32 cir;
+ u32 eir;
+ u32 cbr;
+ u32 ebr;
+ u8 mode;
+};
+
+struct stream_filter {
+ u8 in_pid; /* ingress port id*/
+ u16 max_fr_size; /* max frame size*/
+};
+
+/* PSFP Static counter*/
+struct psfp_static_counter {
+ struct static_cntr psfp_fr_count;
+ struct static_cntr err_filter_ins_port;
+ struct static_cntr err_filtr_sdu;
+ struct static_cntr err_meter;
+ unsigned char num;
+};
+
+/* QCI Core stuctures */
+struct qci {
+ struct meter_config meter_config_data;
+ struct stream_filter stream_config_data;
+ struct psfp_config psfp_config_data;
+ struct psfp_static_counter psfp_counter_data;
+};
+
+/************* QCI Structures end *************/
+
+/*********** CB Structures **************/
+struct frer_ctrl {
+ u8 gate_id;
+ u8 memb_id;
+ bool seq_reset;
+ bool gate_state;
+ bool rcvry_tmout;
+ bool frer_valid;
+ u8 wr_op_type;
+ bool op_type;
+};
+
+struct in_fltr {
+ u8 in_port_id;
+ u16 max_seq_id;
+};
+
+struct frer_memb_config {
+ u8 seq_rec_hist_len;
+ u8 split_strm_egport_id;
+ u16 split_strm_vlan_id;
+ u32 rem_ticks;
+};
+
+/* FRER Static counter*/
+struct frer_static_counter {
+ struct static_cntr frer_fr_count;
+ struct static_cntr disc_frames_in_portid;
+ struct static_cntr pass_frames_seq_recv;
+ struct static_cntr disc_frames_seq_recv;
+ struct static_cntr rogue_frames_seq_recv;
+ struct static_cntr pass_frames_ind_recv;
+ struct static_cntr disc_frames_ind_recv;
+ struct static_cntr seq_recv_rst;
+ unsigned char num;
+};
+
+/* CB Core stuctures */
+struct cb {
+ struct frer_ctrl frer_ctrl_data;
+ struct in_fltr in_fltr_data;
+ struct frer_memb_config frer_memb_config_data;
+ struct frer_static_counter frer_counter_data;
+};
+
+/************* CB Structures end *************/
+
+/********* Switch Structures Starts ***********/
+struct thershold {
+ u16 t1;
+ u16 t2;
+};
+
+/* memory static counters */
+struct mem_static_arr_cntr {
+ struct static_cntr cam_lookup;
+ struct static_cntr multicast_fr;
+ struct static_cntr err_mac1;
+ struct static_cntr err_mac2;
+ struct static_cntr sc_mac1_ep;
+ struct static_cntr res_mac1_ep;
+ struct static_cntr be_mac1_ep;
+ struct static_cntr err_sc_mac1_ep;
+ struct static_cntr err_res_mac1_ep;
+ struct static_cntr err_be_mac1_ep;
+ struct static_cntr sc_mac2_ep;
+ struct static_cntr res_mac2_ep;
+ struct static_cntr be_mac2_ep;
+ struct static_cntr err_sc_mac2_ep;
+ struct static_cntr err_res_mac2_ep;
+ struct static_cntr err_be_mac2_ep;
+ struct static_cntr sc_ep_mac1;
+ struct static_cntr res_ep_mac1;
+ struct static_cntr be_ep_mac1;
+ struct static_cntr err_sc_ep_mac1;
+ struct static_cntr err_res_ep_mac1;
+ struct static_cntr err_be_ep_mac1;
+ struct static_cntr sc_mac2_mac1;
+ struct static_cntr res_mac2_mac1;
+ struct static_cntr be_mac2_mac1;
+ struct static_cntr err_sc_mac2_mac1;
+ struct static_cntr err_res_mac2_mac1;
+ struct static_cntr err_be_mac2_mac1;
+ struct static_cntr sc_ep_mac2;
+ struct static_cntr res_ep_mac2;
+ struct static_cntr be_ep_mac2;
+ struct static_cntr err_sc_ep_mac2;
+ struct static_cntr err_res_ep_mac2;
+ struct static_cntr err_be_ep_mac2;
+ struct static_cntr sc_mac1_mac2;
+ struct static_cntr res_mac1_mac2;
+ struct static_cntr be_mac1_mac2;
+ struct static_cntr err_sc_mac1_mac2;
+ struct static_cntr err_res_mac1_mac2;
+ struct static_cntr err_be_mac1_mac2;
+};
+
+/* CAM structure */
+struct cam_struct {
+ u8 src_addr[6];
+ u8 dest_addr[6];
+ u16 vlanid;
+ u16 tv_vlanid;
+ u8 fwd_port;
+ bool tv_en;
+ u8 gate_id;
+ u8 ipv;
+ bool en_ipv;
+};
+
+/*Frame Filtering Type Field Option */
+struct ff_type {
+ u16 type1;
+ u16 type2;
+};
+
+/* Core switch structure*/
+struct switch_data {
+ u32 switch_status;
+ u32 switch_ctrl;
+ u32 switch_prt;
+ u8 sw_mac_addr[6];
+ /*0 - schedule, 1 - reserved, 2 - best effort queue*/
+ struct thershold thld_ep_mac[3];
+ struct thershold thld_mac_mac[3];
+ u32 ep_vlan;
+ u32 mac_vlan;
+ u32 max_frame_sc_que;
+ u32 max_frame_res_que;
+ u32 max_frame_be_que;
+ /* Memory counters */
+ struct mem_static_arr_cntr mem_arr_cnt;
+ /* CAM */
+ struct cam_struct cam_data;
+/* Frame Filtering Type Field Option */
+ struct ff_type typefield;
+/* MAC Port-1 Management Queueing Options */
+ int mac1_config;
+/* MAC Port-2 Management Queueing Options */
+ int mac2_config;
+/* Port VLAN Membership Registers */
+ int port_vlan_mem_ctrl;
+ char port_vlan_mem_data;
+};
+
+/********* Switch Structures ends ***********/
+
+extern struct axienet_local lp;
+
+/********* qci function declararions ********/
+void psfp_control(struct psfp_config data);
+void config_stream_filter(struct stream_filter data);
+void program_meter_reg(struct meter_config data);
+void get_psfp_static_counter(struct psfp_static_counter *data);
+void get_meter_reg(struct meter_config *data);
+void get_stream_filter_config(struct stream_filter *data);
+
+/********* cb function declararions ********/
+void frer_control(struct frer_ctrl data);
+void get_ingress_filter_config(struct in_fltr *data);
+void config_ingress_filter(struct in_fltr data);
+void get_member_reg(struct frer_memb_config *data);
+void program_member_reg(struct frer_memb_config data);
+void get_frer_static_counter(struct frer_static_counter *data);
+#endif /* XILINX_TSN_SWITCH_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h b/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h
new file mode 100644
index 000000000000..4bb74e78d89a
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h
@@ -0,0 +1,73 @@
+/*
+ * Xilinx FPGA Xilinx TSN timer module header.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_TSN_H_
+#define _XILINX_TSN_H_
+
+#include <linux/platform_device.h>
+
+#define XAE_RTC_OFFSET 0x12800
+/* RTC Nanoseconds Field Offset Register */
+#define XTIMER1588_RTC_OFFSET_NS 0x00000
+/* RTC Seconds Field Offset Register - Low */
+#define XTIMER1588_RTC_OFFSET_SEC_L 0x00008
+/* RTC Seconds Field Offset Register - High */
+#define XTIMER1588_RTC_OFFSET_SEC_H 0x0000C
+/* RTC Increment */
+#define XTIMER1588_RTC_INCREMENT 0x00010
+/* Current TOD Nanoseconds - RO */
+#define XTIMER1588_CURRENT_RTC_NS 0x00014
+/* Current TOD Seconds -Low RO */
+#define XTIMER1588_CURRENT_RTC_SEC_L 0x00018
+/* Current TOD Seconds -High RO */
+#define XTIMER1588_CURRENT_RTC_SEC_H 0x0001C
+#define XTIMER1588_SYNTONIZED_NS 0x0002C
+#define XTIMER1588_SYNTONIZED_SEC_L 0x00030
+#define XTIMER1588_SYNTONIZED_SEC_H 0x00034
+/* Write to Bit 0 to clear the interrupt */
+#define XTIMER1588_INTERRUPT 0x00020
+/* 8kHz Pulse Offset Register */
+#define XTIMER1588_8KPULSE 0x00024
+/* Correction Field - Low */
+#define XTIMER1588_CF_L 0x0002C
+/* Correction Field - Low */
+#define XTIMER1588_CF_H 0x00030
+
+#define XTIMER1588_RTC_MASK ((1 << 26) - 1)
+#define XTIMER1588_INT_SHIFT 0
+#define NANOSECOND_BITS 20
+#define NANOSECOND_MASK ((1 << NANOSECOND_BITS) - 1)
+#define SECOND_MASK ((1 << (32 - NANOSECOND_BITS)) - 1)
+#define XTIMER1588_RTC_INCREMENT_SHIFT 20
+#define PULSESIN1PPS 128
+
+/* Read/Write access to the registers */
+#ifndef out_be32
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP)
+#define in_be32(offset) __raw_readl(offset)
+#define out_be32(offset, val) __raw_writel(val, offset)
+#endif
+#endif
+
+/* The tsn ptp module will set this variable */
+extern int axienet_phc_index;
+
+void *axienet_ptp_timer_probe(void __iomem *base,
+ struct platform_device *pdev);
+int axienet_ptp_timer_remove(void *priv);
+int axienet_get_phc_index(void *priv);
+#endif
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index fd5288ff53b5..e3438cef5f9c 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -503,6 +503,11 @@ static void
xirc2ps_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ struct local_info *local = netdev_priv(dev);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ cancel_work_sync(&local->tx_timeout_task);
dev_dbg(&link->dev, "detach\n");
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index c866f58dab70..28bf530cb005 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -3844,10 +3844,24 @@ static int dfx_init(void)
int status;
status = pci_register_driver(&dfx_pci_driver);
- if (!status)
- status = eisa_driver_register(&dfx_eisa_driver);
- if (!status)
- status = tc_register_driver(&dfx_tc_driver);
+ if (status)
+ goto err_pci_register;
+
+ status = eisa_driver_register(&dfx_eisa_driver);
+ if (status)
+ goto err_eisa_register;
+
+ status = tc_register_driver(&dfx_tc_driver);
+ if (status)
+ goto err_tc_register;
+
+ return 0;
+
+err_tc_register:
+ eisa_driver_unregister(&dfx_eisa_driver);
+err_eisa_register:
+ pci_unregister_driver(&dfx_pci_driver);
+err_pci_register:
return status;
}
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 8a4fbfacad7e..a4d3c7fa9ad3 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -220,21 +220,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!(hw->hw_info.req_buf))
- return -ENOMEM;
+ if (!(hw->hw_info.req_buf)) {
+ result = -ENOMEM;
+ goto free_ep_info;
+ }
hw->hw_info.req_buf_size = mem_size;
mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!(hw->hw_info.res_buf))
- return -ENOMEM;
+ if (!(hw->hw_info.res_buf)) {
+ result = -ENOMEM;
+ goto free_req_buf;
+ }
hw->hw_info.res_buf_size = mem_size;
result = fjes_hw_alloc_shared_status_region(hw);
if (result)
- return result;
+ goto free_res_buf;
hw->hw_info.buffer_share_bit = 0;
hw->hw_info.buffer_unshare_reserve_bit = 0;
@@ -245,11 +249,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
result = fjes_hw_alloc_epbuf(&buf_pair->tx);
if (result)
- return result;
+ goto free_epbuf;
result = fjes_hw_alloc_epbuf(&buf_pair->rx);
if (result)
- return result;
+ goto free_epbuf;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
@@ -272,6 +276,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
fjes_hw_init_command_registers(hw, &param);
return 0;
+
+free_epbuf:
+ for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+ }
+ fjes_hw_free_shared_status_region(hw);
+free_res_buf:
+ kfree(hw->hw_info.res_buf);
+ hw->hw_info.res_buf = NULL;
+free_req_buf:
+ kfree(hw->hw_info.req_buf);
+ hw->hw_info.req_buf = NULL;
+free_ep_info:
+ kfree(hw->ep_shm_info);
+ hw->ep_shm_info = NULL;
+ return result;
}
static void fjes_hw_cleanup(struct fjes_hw *hw)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 84f5717c01e2..f932e4120cb6 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -215,7 +215,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
struct metadata_dst *tun_dst = NULL;
struct pcpu_sw_netstats *stats;
unsigned int len;
- int err = 0;
+ int nh, err = 0;
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
@@ -259,9 +259,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
goto drop;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(geneve->dev, rx_length_errors);
+ DEV_STATS_INC(geneve->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (geneve_get_sk_family(gs) == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)
@@ -851,8 +865,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
use_cache = false;
}
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- info->key.label);
+ fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index d0653babab92..e5961082a4af 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -297,7 +297,9 @@ static void __gtp_encap_destroy(struct sock *sk)
gtp->sk1u = NULL;
udp_sk(sk)->encap_type = 0;
rcu_assign_sk_user_data(sk, NULL);
+ release_sock(sk);
sock_put(sk);
+ return;
}
release_sock(sk);
}
@@ -542,8 +544,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
- if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
- mtu < ntohs(iph->tot_len)) {
+ if (iph->frag_off & htons(IP_DF) &&
+ ((!skb_is_gso(skb) && skb->len > mtu) ||
+ (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
@@ -1374,26 +1377,26 @@ static int __init gtp_init(void)
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
- err = rtnl_link_register(&gtp_link_ops);
+ err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto error_out;
- err = genl_register_family(&gtp_genl_family);
+ err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
- goto unreg_rtnl_link;
+ goto unreg_pernet_subsys;
- err = register_pernet_subsys(&gtp_net_ops);
+ err = genl_register_family(&gtp_genl_family);
if (err < 0)
- goto unreg_genl_family;
+ goto unreg_rtnl_link;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
-unreg_genl_family:
- genl_unregister_family(&gtp_genl_family);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
+unreg_pernet_subsys:
+ unregister_pernet_subsys(&gtp_net_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 4476491b58f9..c5495ca5e8e6 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -758,7 +758,7 @@ static void epp_bh(struct work_struct *work)
* ===================== network driver interface =========================
*/
-static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index e2ad3c2e8df5..e0b9823170bf 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -511,7 +511,7 @@ static int bpq_device_event(struct notifier_block *this,
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
- if (!dev_is_ethdev(dev))
+ if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6c03932d8a6b..3dc4eb841aa1 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -300,12 +300,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc)
spin_lock_irqsave(&scc->lock, flags);
if (scc->tx_buff != NULL)
{
- dev_kfree_skb(scc->tx_buff);
+ dev_kfree_skb_irq(scc->tx_buff);
scc->tx_buff = NULL;
}
while (!skb_queue_empty(&scc->tx_queue))
- dev_kfree_skb(skb_dequeue(&scc->tx_queue));
+ dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue));
spin_unlock_irqrestore(&scc->lock, flags);
}
@@ -1667,7 +1667,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
struct sk_buff *skb_del;
skb_del = skb_dequeue(&scc->tx_queue);
- dev_kfree_skb(skb_del);
+ dev_kfree_skb_irq(skb_del);
}
skb_queue_tail(&scc->tx_queue, skb);
netif_trans_update(dev);
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
index ca7bf7f897d3..c8cbd85adcf9 100644
--- a/drivers/net/hyperv/Kconfig
+++ b/drivers/net/hyperv/Kconfig
@@ -3,5 +3,6 @@ config HYPERV_NET
tristate "Microsoft Hyper-V virtual network driver"
depends on HYPERV
select UCS2_STRING
+ select NLS
help
Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e74f2d1def80..43ba2213851c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -954,6 +954,9 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
+
+ /* completion variable to confirm vf association */
+ struct completion vf_add;
};
/* Per channel data */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6c0732fc8c25..01425bfa5cbd 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1223,6 +1223,10 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+
+ if (net_device_ctx->vf_alloc)
+ complete(&net_device_ctx->vf_add);
+
netdev_info(ndev, "VF slot %u %s\n",
net_device_ctx->vf_serial,
net_device_ctx->vf_alloc ? "added" : "removed");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 57e92c5bfcc9..e42102a1de41 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -43,6 +43,10 @@
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
+/* Macros to define the context of vf registration */
+#define VF_REG_IN_PROBE 1
+#define VF_REG_IN_NOTIFIER 2
+
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
@@ -2037,7 +2041,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
}
static int netvsc_vf_join(struct net_device *vf_netdev,
- struct net_device *ndev)
+ struct net_device *ndev, int context)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
int ret;
@@ -2060,10 +2064,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed;
}
- /* set slave flag before open to prevent IPv6 addrconf */
- vf_netdev->flags |= IFF_SLAVE;
-
- schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+ /* If this registration is called from probe context vf_takeover
+ * is taken care of later in probe itself.
+ */
+ if (context == VF_REG_IN_NOTIFIER)
+ schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@@ -2133,6 +2138,7 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
{
struct device *parent = vf_netdev->dev.parent;
struct net_device_context *ndev_ctx;
+ struct net_device *ndev;
struct pci_dev *pdev;
u32 serial;
@@ -2155,8 +2161,31 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
if (!ndev_ctx->vf_alloc)
continue;
- if (ndev_ctx->vf_serial == serial)
- return hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ndev_ctx->vf_serial != serial)
+ continue;
+
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ndev->addr_len != vf_netdev->addr_len ||
+ memcmp(ndev->perm_addr, vf_netdev->perm_addr,
+ ndev->addr_len) != 0)
+ continue;
+
+ return ndev;
+
+ }
+
+ /* Fallback path to check synthetic vf with help of mac addr.
+ * Because this function can be called before vf_netdev is
+ * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
+ * from dev_addr, also try to match to its dev_addr.
+ * Note: On Hyper-V and Azure, it's not possible to set a MAC address
+ * on a VF that matches to the MAC of a unrelated NETVSC device.
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
+ ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
+ return ndev;
}
netdev_notice(vf_netdev,
@@ -2164,7 +2193,20 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return NULL;
}
-static int netvsc_register_vf(struct net_device *vf_netdev)
+static int netvsc_prepare_bonding(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+
+ ndev = get_netvsc_byslot(vf_netdev);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ /* set slave flag before open to prevent IPv6 addrconf */
+ vf_netdev->flags |= IFF_SLAVE;
+ return NOTIFY_DONE;
+}
+
+static int netvsc_register_vf(struct net_device *vf_netdev, int context)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
@@ -2203,7 +2245,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
- if (netvsc_vf_join(vf_netdev, ndev) != 0)
+ if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
return NOTIFY_DONE;
dev_hold(vf_netdev);
@@ -2232,6 +2274,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
if (!netvsc_dev)
return NOTIFY_DONE;
+ if (vf_is_up && !net_device_ctx->vf_alloc) {
+ netdev_info(ndev, "Waiting for the VF association from host\n");
+ wait_for_completion(&net_device_ctx->vf_add);
+ }
+
netvsc_switch_datapath(ndev, vf_is_up);
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
@@ -2253,6 +2300,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
+ reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@@ -2261,10 +2309,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_OK;
}
+static int check_dev_is_matching_vf(struct net_device *event_ndev)
+{
+ /* Skip NetVSC interfaces */
+ if (event_ndev->netdev_ops == &device_ops)
+ return -ENODEV;
+
+ /* Avoid non-Ethernet type devices */
+ if (event_ndev->type != ARPHRD_ETHER)
+ return -ENODEV;
+
+ /* Avoid Vlan dev with same MAC registering as VF */
+ if (is_vlan_dev(event_ndev))
+ return -ENODEV;
+
+ /* Avoid Bonding master dev with same MAC registering as VF */
+ if (netif_is_bond_master(event_ndev))
+ return -ENODEV;
+
+ return 0;
+}
+
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
- struct net_device *net = NULL;
+ struct net_device *net = NULL, *vf_netdev;
struct net_device_context *net_device_ctx;
struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
@@ -2290,6 +2359,7 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
+ init_completion(&net_device_ctx->vf_add);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
@@ -2364,6 +2434,30 @@ static int netvsc_probe(struct hv_device *dev,
}
list_add(&net_device_ctx->list, &netvsc_dev_list);
+
+ /* When the hv_netvsc driver is unloaded and reloaded, the
+ * NET_DEVICE_REGISTER for the vf device is replayed before probe
+ * is complete. This is because register_netdevice_notifier() gets
+ * registered before vmbus_driver_register() so that callback func
+ * is set before probe and we don't miss events like NETDEV_POST_INIT
+ * So, in this section we try to register the matching vf device that
+ * is present as a netdevice, knowing that its register call is not
+ * processed in the netvsc_netdev_notifier(as probing is progress and
+ * get_netvsc_byslot fails).
+ */
+ for_each_netdev(dev_net(net), vf_netdev) {
+ ret = check_dev_is_matching_vf(vf_netdev);
+ if (ret != 0)
+ continue;
+
+ if (net != get_netvsc_byslot(vf_netdev))
+ continue;
+
+ netvsc_prepare_bonding(vf_netdev);
+ netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
+ __netvsc_vf_setup(net, vf_netdev);
+ break;
+ }
rtnl_unlock();
kfree(device_info);
@@ -2456,27 +2550,17 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
- /* Skip our own events */
- if (event_dev->netdev_ops == &device_ops)
- return NOTIFY_DONE;
-
- /* Avoid non-Ethernet type devices */
- if (event_dev->type != ARPHRD_ETHER)
- return NOTIFY_DONE;
-
- /* Avoid Vlan dev with same MAC registering as VF */
- if (is_vlan_dev(event_dev))
- return NOTIFY_DONE;
-
- /* Avoid Bonding master dev with same MAC registering as VF */
- if ((event_dev->priv_flags & IFF_BONDING) &&
- (event_dev->flags & IFF_MASTER))
+ ret = check_dev_is_matching_vf(event_dev);
+ if (ret != 0)
return NOTIFY_DONE;
switch (event) {
+ case NETDEV_POST_INIT:
+ return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER:
- return netvsc_register_vf(event_dev);
+ return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
case NETDEV_UNREGISTER:
return netvsc_unregister_vf(event_dev);
case NETDEV_UP:
@@ -2508,12 +2592,17 @@ static int __init netvsc_drv_init(void)
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
+ register_netdevice_notifier(&netvsc_netdev_notifier);
+
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
- return ret;
+ goto err_vmbus_reg;
- register_netdevice_notifier(&netvsc_netdev_notifier);
return 0;
+
+err_vmbus_reg:
+ unregister_netdevice_notifier(&netvsc_netdev_notifier);
+ return ret;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 5945ac5f38ee..179e3e42b5f0 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
static void adf7242_debugfs_init(struct adf7242_local *lp)
{
- char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
+ char debugfs_dir_name[DNAME_INLINE_LEN + 1];
- strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
+ snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
+ "adf7242-%s", dev_name(&lp->spi->dev));
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
@@ -1310,10 +1311,11 @@ static int adf7242_remove(struct spi_device *spi)
debugfs_remove_recursive(lp->debugfs_root);
+ ieee802154_unregister_hw(lp->hw);
+
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
- ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 47959aadbc50..fdbdc22fe4e5 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -926,7 +926,7 @@ static int ca8210_spi_transfer(
dev_dbg(&spi->dev, "%s called\n", __func__);
- cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC);
+ cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC);
if (!cas_ctl)
return -ENOMEM;
@@ -1944,10 +1944,9 @@ static int ca8210_skb_tx(
struct ca8210_priv *priv
)
{
- int status;
struct ieee802154_hdr header = { };
struct secspec secspec;
- unsigned int mac_len;
+ int mac_len, status;
dev_dbg(&priv->spi->dev, "%s called\n", __func__);
@@ -1955,6 +1954,8 @@ static int ca8210_skb_tx(
* packet
*/
mac_len = ieee802154_hdr_peek_addrs(skb, &header);
+ if (mac_len < 0)
+ return mac_len;
secspec.security_level = header.sec.level;
secspec.key_id_mode = header.sec.key_id_mode;
@@ -2781,7 +2782,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
struct device_node *np = spi->dev.of_node;
struct ca8210_priv *priv = spi_get_drvdata(spi);
struct ca8210_platform_data *pdata = spi->dev.platform_data;
- int ret = 0;
if (!np)
return -EFAULT;
@@ -2798,18 +2798,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
dev_crit(&spi->dev, "Failed to register external clk\n");
return PTR_ERR(priv->clk);
}
- ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
- if (ret) {
- clk_unregister(priv->clk);
- dev_crit(
- &spi->dev,
- "Failed to register external clock as clock provider\n"
- );
- } else {
- dev_info(&spi->dev, "External clock set as clock provider\n");
- }
- return ret;
+ return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
}
/**
@@ -2821,8 +2811,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
{
struct ca8210_priv *priv = spi_get_drvdata(spi);
- if (!priv->clk)
- return
+ if (IS_ERR_OR_NULL(priv->clk))
+ return;
of_clk_del_provider(spi->dev.of_node);
clk_unregister(priv->clk);
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 43506948e444..9739a6ed91ad 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -507,6 +507,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
goto err_tx;
if (status & CC2520_STATUS_TX_UNDERFLOW) {
+ rc = -EINVAL;
dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
goto err_tx;
}
@@ -972,7 +973,7 @@ static int cc2520_hw_init(struct cc2520_private *priv)
if (timeout-- <= 0) {
dev_err(&priv->spi->dev, "oscillator start failed!\n");
- return ret;
+ return -ETIMEDOUT;
}
udelay(1);
} while (!(status & CC2520_STATUS_XOSC32M_STABLE));
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 1d181eff0c29..4028cbe275d6 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -522,7 +522,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
{
struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
- struct hwsim_edge_info *einfo;
+ struct hwsim_edge_info *einfo, *einfo_old;
struct hwsim_phy *phy_v0;
struct hwsim_edge *e;
u32 v0, v1;
@@ -560,8 +560,10 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
list_for_each_entry_rcu(e, &phy_v0->edges, list) {
if (e->endpoint->idx == v1) {
einfo->lqi = lqi;
- rcu_assign_pointer(e->info, einfo);
+ einfo_old = rcu_replace_pointer(e->info, einfo,
+ lockdep_is_held(&hwsim_phys_lock));
rcu_read_unlock();
+ kfree_rcu(einfo_old, rcu);
mutex_unlock(&hwsim_phys_lock);
return 0;
}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8801d093135c..bfea28bd4502 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -412,7 +412,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
return addr;
}
-static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
@@ -437,27 +437,28 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, &rt->dst);
+
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
else
ret = NET_XMIT_SUCCESS;
goto out;
err:
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
out:
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
-static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+
+static noinline_for_stack int
+ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- struct net_device *dev = skb->dev;
- struct net *net = dev_net(dev);
- struct dst_entry *dst;
- int err, ret = NET_XMIT_DROP;
struct flowi6 fl6 = {
.flowi6_oif = dev->ifindex,
.daddr = ip6h->daddr,
@@ -467,24 +468,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
.flowi6_mark = skb->mark,
.flowi6_proto = ip6h->nexthdr,
};
+ struct dst_entry *dst;
+ int err;
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst->error) {
- ret = dst->error;
+ dst = ip6_route_output(dev_net(dev), NULL, &fl6);
+ err = dst->error;
+ if (err) {
dst_release(dst);
- goto err;
+ return err;
}
skb_dst_set(skb, dst);
- err = ip6_local_out(net, skb->sk, skb);
+ return 0;
+}
+
+static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ int err, ret = NET_XMIT_DROP;
+
+ err = ipvlan_route_v6_outbound(dev, skb);
+ if (unlikely(err)) {
+ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return err;
+ }
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+ err = ip6_local_out(dev_net(dev), skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
else
ret = NET_XMIT_SUCCESS;
- goto out;
-err:
- dev->stats.tx_errors++;
- kfree_skb(skb);
-out:
return ret;
}
#else
@@ -496,7 +511,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
static int ipvlan_process_outbound(struct sk_buff *skb)
{
- struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -506,6 +520,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
+ struct ethhdr *ethh = eth_hdr(skb);
+
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
@@ -579,7 +595,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
consume_skb(skb);
return NET_XMIT_DROP;
}
- return ipvlan_rcv_frame(addr, &skb, true);
+ ipvlan_rcv_frame(addr, &skb, true);
+ return NET_XMIT_SUCCESS;
}
}
out:
@@ -590,7 +607,7 @@ out:
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
@@ -605,7 +622,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
consume_skb(skb);
return NET_XMIT_DROP;
}
- return ipvlan_rcv_frame(addr, &skb, true);
+ ipvlan_rcv_frame(addr, &skb, true);
+ return NET_XMIT_SUCCESS;
}
}
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -617,9 +635,11 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
* the skb for the main-dev. At the RX side we just return
* RX_PASS for it to be processed further on the stack.
*/
- return dev_forward_skb(ipvlan->phy_dev, skb);
+ dev_forward_skb(ipvlan->phy_dev, skb);
+ return NET_XMIT_SUCCESS;
} else if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
index 943d26cbf39f..d5b05e803219 100644
--- a/drivers/net/ipvlan/ipvlan_l3s.c
+++ b/drivers/net/ipvlan/ipvlan_l3s.c
@@ -101,6 +101,11 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
goto out;
skb->dev = addr->master->dev;
+ skb->skb_iif = skb->dev->ifindex;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (addr->atype == IPVL_IPV6)
+ IP6CB(skb)->iif = skb->dev->ifindex;
+#endif
len = skb->len + ETH_HLEN;
ipvlan_count_rx(addr->master, len, true, false);
out:
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5fbabae2909e..4edec38437d0 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -320,6 +320,7 @@ static void ipvlan_get_stats64(struct net_device *dev,
s->rx_dropped = rx_errs;
s->tx_dropped = tx_drps;
}
+ s->tx_errors = DEV_STATS_READ(dev, tx_errors);
}
static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
@@ -735,7 +736,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
write_pnet(&port->pnet, newnet);
- ipvlan_migrate_l3s_hook(oldnet, newnet);
+ if (port->mode == IPVLAN_MODE_L3S)
+ ipvlan_migrate_l3s_hook(oldnet, newnet);
break;
}
case NETDEV_UNREGISTER:
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 1cedb634f4f7..f01078b2581c 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
.notifier_call = ipvtap_device_event,
};
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
{
int err;
@@ -228,7 +228,7 @@ out1:
}
module_init(ipvtap_init);
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 14545a8797a8..7788f72c262e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -206,7 +206,7 @@ static __net_init int loopback_net_init(struct net *net)
int err;
err = -ENOMEM;
- dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup);
+ dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup);
if (!dev)
goto out;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index f729f55f6a17..d5f2d895dba2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -317,6 +317,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
+static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
+{
+ struct macsec_rx_sa *sa = NULL;
+ int an;
+
+ for (an = 0; an < MACSEC_NUM_AN; an++) {
+ sa = macsec_rxsa_get(rx_sc->sa[an]);
+ if (sa)
+ break;
+ }
+ return sa;
+}
+
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@@ -561,18 +574,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_hdr(skb)->h_proto;
}
+static unsigned int macsec_msdu_len(struct sk_buff *skb)
+{
+ struct macsec_dev *macsec = macsec_priv(skb->dev);
+ struct macsec_secy *secy = &macsec->secy;
+ bool sci_present = macsec_skb_cb(skb)->has_sci;
+
+ return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
+}
+
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
+ unsigned int msdu_len = macsec_msdu_len(skb);
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
- txsc_stats->stats.OutOctetsEncrypted += skb->len;
+ txsc_stats->stats.OutOctetsEncrypted += msdu_len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
- txsc_stats->stats.OutOctetsProtected += skb->len;
+ txsc_stats->stats.OutOctetsProtected += msdu_len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
@@ -602,9 +625,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
rcu_read_lock_bh();
- macsec_encrypt_finish(skb, dev);
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
- len = skb->len;
+ /* packet is encrypted/protected so tx_bytes must be calculated */
+ len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
+ macsec_encrypt_finish(skb, dev);
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
rcu_read_unlock_bh();
@@ -760,6 +784,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
+ macsec_skb_cb(skb)->has_sci = sci_present;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
@@ -800,15 +825,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ DEV_STATS_INC(secy->netdev, rx_dropped);
return false;
}
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
+ unsigned int msdu_len = macsec_msdu_len(skb);
u64_stats_update_begin(&rxsc_stats->syncp);
if (hdr->tci_an & MACSEC_TCI_E)
- rxsc_stats->stats.InOctetsDecrypted += skb->len;
+ rxsc_stats->stats.InOctetsDecrypted += msdu_len;
else
- rxsc_stats->stats.InOctetsValidated += skb->len;
+ rxsc_stats->stats.InOctetsValidated += msdu_len;
u64_stats_update_end(&rxsc_stats->syncp);
}
@@ -821,6 +848,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
+ this_cpu_inc(rx_sa->stats->InPktsNotValid);
+ DEV_STATS_INC(secy->netdev, rx_errors);
return false;
}
@@ -906,9 +935,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_finalize_skb(skb, macsec->secy.icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, macsec->secy.netdev);
- len = skb->len;
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1050,6 +1079,7 @@ static void handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
continue;
}
@@ -1161,6 +1191,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
+ DEV_STATS_INC(secy->netdev, rx_errors);
goto drop_nosa;
}
@@ -1171,11 +1202,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
+ struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ DEV_STATS_INC(secy->netdev, rx_errors);
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@@ -1185,6 +1220,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@@ -1202,6 +1239,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
goto drop;
}
}
@@ -1230,6 +1268,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
deliver:
macsec_finalize_skb(skb, secy->icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, secy->netdev);
if (rx_sa)
@@ -1237,12 +1276,11 @@ deliver:
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
- len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
else
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
rcu_read_unlock();
@@ -1279,6 +1317,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
+ DEV_STATS_INC(macsec->secy.netdev, rx_errors);
continue;
}
@@ -1297,7 +1336,7 @@ nosci:
secy_stats->stats.InPktsUnknownSCI++;
u64_stats_update_end(&secy_stats->syncp);
} else {
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
}
}
@@ -1311,8 +1350,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (IS_ERR(tfm))
return tfm;
@@ -2731,21 +2769,21 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
if (!secy->operational) {
kfree_skb(skb);
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
+ len = skb->len;
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
macsec_encrypt_finish(skb, dev);
- len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
return ret;
@@ -2957,8 +2995,9 @@ static void macsec_get_stats64(struct net_device *dev,
s->tx_bytes += tmp.tx_bytes;
}
- s->rx_dropped = dev->stats.rx_dropped;
- s->tx_dropped = dev->stats.tx_dropped;
+ s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
+ s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
+ s->rx_errors = DEV_STATS_READ(dev, rx_errors);
}
static int macsec_get_iflink(const struct net_device *dev)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 07622cf8765a..46398b06676c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -138,7 +138,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source(
u32 idx = macvlan_eth_hash(addr);
struct hlist_head *h = &vlan->port->vlan_source_hash[idx];
- hlist_for_each_entry_rcu(entry, h, hlist) {
+ hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
if (ether_addr_equal_64bits(entry->addr, addr) &&
entry->vlan == vlan)
return entry;
@@ -765,7 +765,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
if (dev->flags & IFF_UP) {
if (change & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
- if (change & IFF_PROMISC)
+ if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
dev_set_promiscuity(lowerdev,
dev->flags & IFF_PROMISC ? 1 : -1);
@@ -1166,7 +1166,7 @@ void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->min_mtu = 0;
+ /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
@@ -1499,8 +1499,10 @@ destroy_macvlan_port:
/* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid.
*/
- if (create && macvlan_port_get_rtnl(lowerdev))
+ if (create && macvlan_port_get_rtnl(lowerdev)) {
+ macvlan_flush_sources(port, vlan);
macvlan_port_destroy(port->dev);
+ }
return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
@@ -1602,7 +1604,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb,
struct hlist_head *h = &vlan->port->vlan_source_hash[i];
struct macvlan_source_entry *entry;
- hlist_for_each_entry_rcu(entry, h, hlist) {
+ hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
if (entry->vlan != vlan)
continue;
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index fb182bec8f06..6b7bba720d8c 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -130,14 +130,10 @@ static u16 net_failover_select_queue(struct net_device *dev,
txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
else
txq = netdev_pick_tx(primary_dev, skb, NULL);
-
- qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
-
- return txq;
+ } else {
+ txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
}
- txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
-
/* Save the original txq to restore before passing to the driver */
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index e0a4acc6144b..8e47755cc159 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -347,10 +347,12 @@ nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
- nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].key)
return -ENOMEM;
- nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].value) {
kfree(nmap->entry[idx].key);
nmap->entry[idx].key = NULL;
@@ -492,7 +494,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
if (offmap->map.map_flags)
return -EINVAL;
- nmap = kzalloc(sizeof(*nmap), GFP_USER);
+ nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT);
if (!nmap)
return -ENOMEM;
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index a5bab614ff84..b701ee83e64a 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -137,7 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
enqueue_again:
rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
if (rc) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
ndev->stats.rx_errors++;
ndev->stats.rx_fifo_errors++;
}
@@ -192,7 +192,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
ndev->stats.tx_aborted_errors++;
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
/* Make sure anybody stopping the queue after this sees the new
@@ -484,7 +484,14 @@ static int __init ntb_netdev_init_module(void)
rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
if (rc)
return rc;
- return ntb_transport_register_client(&ntb_netdev_client);
+
+ rc = ntb_transport_register_client(&ntb_netdev_client);
+ if (rc) {
+ ntb_transport_unregister_client_dev(KBUILD_MODNAME);
+ return rc;
+ }
+
+ return 0;
}
module_init(ntb_netdev_init_module);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index dcf2051ef2c0..9239955d78db 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -485,6 +485,11 @@ config VITESSE_PHY
---help---
Currently supports the vsc8244
+config XILINX_PHY
+ tristate "Drivers for xilinx PHYs"
+ ---help---
+ This module provides a driver for the Xilinx PCS/PMA Core.
+
config XILINX_GMII2RGMII
tristate "Xilinx GMII2RGMII converter driver"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a03437e091f3..5ee50ae886b1 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -93,4 +93,5 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+obj-$(CONFIG_XILINX_PHY) += xilinx_phy.o
obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 7be75a611e9e..0e0bcc304d6c 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -425,6 +425,17 @@ static int bcm5482_read_status(struct phy_device *phydev)
return err;
}
+static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
+{
+ return -EOPNOTSUPP;
+}
+
+static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+ u16 val)
+{
+ return -EOPNOTSUPP;
+}
+
static int bcm5481_config_aneg(struct phy_device *phydev)
{
struct device_node *np = phydev->mdio.dev.of_node;
@@ -696,6 +707,8 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
/* PHY_GBIT_FEATURES */
+ .read_mmd = bcm54810_read_mmd,
+ .write_mmd = bcm54810_write_mmd,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index ae17d2f9d534..da3983352dd4 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -197,9 +197,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
if (misr_status < 0)
return misr_status;
- misr_status |= (DP83822_RX_ERR_HF_INT_EN |
- DP83822_FALSE_CARRIER_HF_INT_EN |
- DP83822_ANEG_COMPLETE_INT_EN |
+ misr_status |= (DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
DP83822_SPEED_CHANGED_INT_EN |
DP83822_LINK_STAT_INT_EN |
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 87c0cdbf262a..7de7c91b1e6b 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -21,6 +21,8 @@
#define MII_DP83867_PHYCTRL 0x10
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
+#define MII_DP83867_CFG2 0x14
+#define MII_DP83867_BISCR 0x16
#define DP83867_CTRL 0x1f
#define DP83867_CFG3 0x1e
@@ -41,6 +43,7 @@
#define DP83867_SGMIICTL 0x00D3
#define DP83867_10M_SGMII_CFG 0x016F
#define DP83867_10M_SGMII_RATE_ADAPT_MASK BIT(7)
+#define DP83867_SGMIITYPE 0x00D3
#define DP83867_SW_RESET BIT(15)
#define DP83867_SW_RESTART BIT(14)
@@ -81,6 +84,12 @@
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
#define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03
#define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14)
+#define DP83867_MDI_CROSSOVER 5
+#define DP83867_MDI_CROSSOVER_AUTO 0b10
+#define DP83867_MDI_CROSSOVER_MDIX 0b01
+#define DP83867_PHYCTRL_SGMIIEN 0x0800
+#define DP83867_PHYCTRL_RXFIFO_SHIFT 12
+#define DP83867_PHYCTRL_TXFIFO_SHIFT 14
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
@@ -90,6 +99,23 @@
#define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
+/* CFG2 bits */
+#define MII_DP83867_CFG2_SPEEDOPT_10EN 0x0040
+#define MII_DP83867_CFG2_SGMII_AUTONEGEN 0x0080
+#define MII_DP83867_CFG2_SPEEDOPT_ENH 0x0100
+#define MII_DP83867_CFG2_SPEEDOPT_CNT 0x0800
+#define MII_DP83867_CFG2_SPEEDOPT_INTLOW 0x2000
+#define MII_DP83867_CFG2_MASK 0x003F
+
+/* CFG4 bits */
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_MASK 0x60
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_16MS 0x00
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_2US 0x20
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_800US 0x40
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_11MS 0x60
+#define DP83867_CFG4_RESVDBIT7 BIT(7)
+#define DP83867_CFG4_RESVDBIT8 BIT(8)
+
/* IO_MUX_CFG bits */
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
@@ -104,6 +130,8 @@
/* CFG4 bits */
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
+/* SGMIICTL1 bits */
+#define DP83867_SGMIICLK_EN BIT(14)
/* FLD_THR_CFG */
#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7
@@ -124,6 +152,7 @@ struct dp83867_private {
bool set_clk_output;
u32 clk_output_sel;
bool sgmii_ref_clk_en;
+ bool wiremode_6;
};
static int dp83867_ack_interrupt(struct phy_device *phydev)
@@ -272,6 +301,8 @@ static int dp83867_of_init(struct phy_device *phydev)
if (of_property_read_bool(of_node, "enet-phy-lane-no-swap"))
dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
+ dp83867->wiremode_6 = of_property_read_bool(of_node, "ti,6-wire-mode");
+
ret = of_property_read_u32(of_node, "ti,fifo-depth",
&dp83867->fifo_depth);
if (ret) {
@@ -311,17 +342,19 @@ static int dp83867_probe(struct phy_device *phydev)
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867 = phydev->priv;
- int ret, val, bs;
- u16 delay;
+ int ret, bs;
+ u16 val, delay, cfg2;
ret = dp83867_of_init(phydev);
if (ret)
return ret;
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
- if (dp83867->rxctrl_strap_quirk)
- phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
- BIT(7));
+ if (dp83867->rxctrl_strap_quirk) {
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
+ val &= ~BIT(7);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
+ }
bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
if (bs & DP83867_STRAP_STS2_STRAP_FLD) {
@@ -338,6 +371,12 @@ static int dp83867_config_init(struct phy_device *phydev)
}
if (phy_interface_is_rgmii(phydev)) {
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL,
+ (DP83867_MDI_CROSSOVER_AUTO << DP83867_MDI_CROSSOVER) |
+ (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ if (ret)
+ return ret;
+
val = phy_read(phydev, MII_DP83867_PHYCTRL);
if (val < 0)
return val;
@@ -362,33 +401,74 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* If rgmii mode with no internal delay is selected, we do NOT use
- * aligned mode as one might expect. Instead we use the PHY's default
- * based on pin strapping. And the "mode 0" default is to *use*
- * internal delay with a value of 7 (2.00 ns).
- *
- * Set up RGMII delays
+ } else {
+ /* Set SGMIICTL1 6-wire mode */
+ if (dp83867->wiremode_6)
+ phy_write_mmd(phydev, DP83867_DEVADDR,
+ DP83867_SGMIITYPE, DP83867_SGMIICLK_EN);
+
+ phy_write(phydev, MII_BMCR,
+ (BMCR_ANENABLE | BMCR_FULLDPLX | BMCR_SPEED1000));
+
+ cfg2 = phy_read(phydev, MII_DP83867_CFG2);
+ cfg2 &= MII_DP83867_CFG2_MASK;
+ cfg2 |= (MII_DP83867_CFG2_SPEEDOPT_10EN |
+ MII_DP83867_CFG2_SGMII_AUTONEGEN |
+ MII_DP83867_CFG2_SPEEDOPT_ENH |
+ MII_DP83867_CFG2_SPEEDOPT_CNT |
+ MII_DP83867_CFG2_SPEEDOPT_INTLOW);
+ phy_write(phydev, MII_DP83867_CFG2, cfg2);
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, 0x0);
+
+ phy_write(phydev, MII_DP83867_PHYCTRL,
+ DP83867_PHYCTRL_SGMIIEN |
+ (DP83867_MDI_CROSSOVER_AUTO << DP83867_MDI_CROSSOVER) |
+ (dp83867->fifo_depth << DP83867_PHYCTRL_RXFIFO_SHIFT) |
+ (dp83867->fifo_depth << DP83867_PHYCTRL_TXFIFO_SHIFT));
+ phy_write(phydev, MII_DP83867_BISCR, 0x0);
+
+ /* This is a SW workaround for link instability if
+ * RX_CTRL is not strapped to mode 3 or 4 in HW.
*/
- val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
+ if (dp83867->rxctrl_strap_quirk) {
+ val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_CFG4);
+ val &= ~DP83867_CFG4_RESVDBIT7;
+ val |= DP83867_CFG4_RESVDBIT8;
+ val &= ~DP83867_CFG4_SGMII_AUTONEG_TIMER_MASK;
+ val |= DP83867_CFG4_SGMII_AUTONEG_TIMER_11MS;
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ val);
+ }
+ }
+
+ /* If rgmii mode with no internal delay is selected, we do NOT use
+ * aligned mode as one might expect. Instead we use the PHY's default
+ * based on pin strapping. And the "mode 0" default is to *use*
+ * internal delay with a value of 7 (2.00 ns).
+ *
+ * Set up RGMII delays
+ */
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
- val &= ~(DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
+ val &= ~(DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- val |= DP83867_RGMII_TX_CLK_DELAY_EN;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ val |= DP83867_RGMII_TX_CLK_DELAY_EN;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- val |= DP83867_RGMII_RX_CLK_DELAY_EN;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ val |= DP83867_RGMII_RX_CLK_DELAY_EN;
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
- delay = (dp83867->rx_id_delay |
- (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
+ delay = (dp83867->rx_id_delay |
+ (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
delay);
- }
/* If specified, set io impedance */
if (dp83867->io_impedance >= 0)
@@ -432,6 +512,14 @@ static int dp83867_config_init(struct phy_device *phydev)
else
val &= ~DP83867_SGMII_TYPE;
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
+
+ /* This is a SW workaround for link instability if RX_CTRL is
+ * not strapped to mode 3 or 4 in HW. This is required for SGMII
+ * in addition to clearing bit 7, handled above.
+ */
+ if (dp83867->rxctrl_strap_quirk)
+ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ BIT(8));
}
val = phy_read(phydev, DP83867_CFG3);
@@ -468,7 +556,7 @@ static int dp83867_phy_reset(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
if (err < 0)
return err;
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c
index 0dce67672548..5969878e0aa7 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/phy/mdio-i2c.c
@@ -10,10 +10,9 @@
* of their settings.
*/
#include <linux/i2c.h>
+#include <linux/mdio/mdio-i2c.h>
#include <linux/phy.h>
-#include "mdio-i2c.h"
-
/*
* I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
* specified to be present in SFP modules. These correspond with PHY
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
index 7a9ad54582e1..aa3ad38e37d7 100644
--- a/drivers/net/phy/mdio-mux-meson-g12a.c
+++ b/drivers/net/phy/mdio-mux-meson-g12a.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -148,6 +149,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
{
+ u32 value;
int ret;
/* Enable the phy clock */
@@ -161,18 +163,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
/* Initialize ephy control */
writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
- writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
- FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
- FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
- PHY_CNTL1_CLK_EN |
- PHY_CNTL1_CLKFREQ |
- PHY_CNTL1_PHY_ENB,
- priv->regs + ETH_PHY_CNTL1);
+
+ /* Make sure we get a 0 -> 1 transition on the enable bit */
+ value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+ PHY_CNTL1_CLK_EN |
+ PHY_CNTL1_CLKFREQ;
+ writel(value, priv->regs + ETH_PHY_CNTL1);
writel(PHY_CNTL2_USE_INTERNAL |
PHY_CNTL2_SMI_SRC_MAC |
PHY_CNTL2_RX_CLK_EPHY,
priv->regs + ETH_PHY_CNTL2);
+ value |= PHY_CNTL1_PHY_ENB;
+ writel(value, priv->regs + ETH_PHY_CNTL1);
+
+ /* The phy needs a bit of time to power up */
+ mdelay(10);
+
return 0;
}
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c
index 1e2f57ed1ef7..31ca0361a11e 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/phy/mdio-thunder.c
@@ -104,6 +104,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
if (i >= ARRAY_SIZE(nexus->buses))
break;
}
+ fwnode_handle_put(fwn);
return 0;
err_release_regions:
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 34990eaa3298..461207cdf5d6 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -11,6 +11,7 @@
#include <linux/efi.h>
#include <linux/if_vlan.h>
#include <linux/io.h>
+#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
@@ -18,7 +19,6 @@
#include <linux/prefetch.h>
#include <linux/phy.h>
#include <net/ip.h>
-#include "mdio-xgene.h"
static bool xgene_mdio_status;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 05c24db507a2..fdf8221f46fa 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -117,7 +117,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
{
- struct mdio_device *mdiodev = bus->mdio_map[addr];
+ struct mdio_device *mdiodev;
+
+ if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
+ return NULL;
+
+ mdiodev = bus->mdio_map[addr];
if (!mdiodev)
return NULL;
@@ -419,7 +424,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
bus->reset(bus);
for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & (1 << i)) == 0) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
struct phy_device *phydev;
phydev = mdiobus_scan(bus, i);
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index e8f2ca625837..39151ec6f65e 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -235,6 +235,8 @@ static struct phy_driver meson_gxl_phy[] = {
.config_intr = meson_gxl_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x01803301),
.name = "Meson G12A Internal PHY",
@@ -245,6 +247,8 @@ static struct phy_driver meson_gxl_phy[] = {
.config_intr = meson_gxl_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
},
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 721153dcfd15..caaa51a70cbd 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1156,6 +1156,7 @@ static struct phy_driver ksphy_driver[] = {
/* PHY_GBIT_FEATURES */
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
+ .soft_reset = genphy_soft_reset,
.config_init = ksz9131_config_init,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index a644e8e5071c..375bbd60b38a 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -326,6 +326,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static void lan88xx_link_change_notify(struct phy_device *phydev)
+{
+ int temp;
+
+ /* At forced 100 F/H mode, chip may fail to set mode correctly
+ * when cable is switched between long(~50+m) and short one.
+ * As workaround, set to 10 before setting to 100
+ * at forced 100 F/H mode.
+ */
+ if (!phydev->autoneg && phydev->speed == 100) {
+ /* disable phy interrupt */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+ temp = phy_read(phydev, MII_BMCR);
+ temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+ temp |= BMCR_SPEED100;
+ phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+ /* clear pending interrupt generated while workaround */
+ temp = phy_read(phydev, LAN88XX_INT_STS);
+
+ /* enable phy interrupt back */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
+ }
+}
+
static struct phy_driver microchip_phy_driver[] = {
{
.phy_id = 0x0007c130,
@@ -339,6 +370,7 @@ static struct phy_driver microchip_phy_driver[] = {
.config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
+ .link_change_notify = lan88xx_link_change_notify,
.ack_interrupt = lan88xx_phy_ack_interrupt,
.config_intr = lan88xx_phy_config_intr,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 78b918dcd547..388799985720 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1312,6 +1312,7 @@ error:
error_module_put:
module_put(d->driver->owner);
+ d->driver = NULL;
error_put_device:
put_device(d);
if (ndev_owner != bus->owner)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index beaa00342a13..9639aa181968 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -7,6 +7,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
+#include <linux/mdio/mdio-i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -16,7 +17,6 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "mdio-i2c.h"
#include "sfp.h"
#include "swphy.h"
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index b73298250793..e387c219f17d 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -108,8 +108,11 @@ static int lan911x_config_init(struct phy_device *phydev)
static int lan87xx_read_status(struct phy_device *phydev)
{
struct smsc_phy_priv *priv = phydev->priv;
+ int err;
- int err = genphy_read_status(phydev);
+ err = genphy_read_status(phydev);
+ if (err)
+ return err;
if (!phydev->link && priv->energy_enable) {
int i;
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 151c2a3f0b3a..14814db3194c 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -82,9 +82,15 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
if (!priv->phy_dev->drv) {
dev_info(dev, "Attached phy not ready\n");
+ put_device(&priv->phy_dev->mdio.dev);
return -EPROBE_DEFER;
}
+ if (priv->phy_dev->priv) {
+ dev_err(dev, "phydev has a priv field, cannot attach\n");
+ return -EFAULT;
+ }
+
priv->mdio = mdiodev;
priv->phy_drv = priv->phy_dev->drv;
memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
diff --git a/drivers/net/phy/xilinx_phy.c b/drivers/net/phy/xilinx_phy.c
new file mode 100644
index 000000000000..2410fa3a59ad
--- /dev/null
+++ b/drivers/net/phy/xilinx_phy.c
@@ -0,0 +1,160 @@
+/* Xilinx PCS/PMA Core phy driver
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Description:
+ * This driver is developed for PCS/PMA Core.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/xilinx_phy.h>
+
+#define MII_PHY_STATUS_SPD_MASK 0x0C00
+#define MII_PHY_STATUS_FULLDUPLEX 0x1000
+#define MII_PHY_STATUS_1000 0x0800
+#define MII_PHY_STATUS_100 0x0400
+#define XPCSPMA_PHY_CTRL_ISOLATE_DISABLE 0xFBFF
+
+static int xilinxphy_read_status(struct phy_device *phydev)
+{
+ int err;
+ int status = 0;
+
+ /* Update the link, but return if there
+ * was an error
+ */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ status = phy_read(phydev, MII_LPA);
+
+ if (status & MII_PHY_STATUS_FULLDUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ switch (status & MII_PHY_STATUS_SPD_MASK) {
+ case MII_PHY_STATUS_1000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case MII_PHY_STATUS_100:
+ phydev->speed = SPEED_100;
+ break;
+
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
+ } else {
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+ }
+
+ /* For 1000BASE-X Phy Mode the speed/duplex will always be
+ * 1000Mbps/fullduplex
+ */
+ if (phydev->dev_flags == XAE_PHY_TYPE_1000BASE_X) {
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ }
+
+ /* For 2500BASE-X Phy Mode the speed/duplex will always be
+ * 2500Mbps/fullduplex
+ */
+ if (phydev->dev_flags == XAE_PHY_TYPE_2500) {
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_2500;
+ }
+
+ return 0;
+}
+
+static int xilinxphy_of_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ u32 phytype;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (!of_node)
+ return -ENODEV;
+
+ if (!of_property_read_u32(of_node, "xlnx,phy-type", &phytype)) {
+ if (phytype == XAE_PHY_TYPE_1000BASE_X)
+ phydev->dev_flags |= XAE_PHY_TYPE_1000BASE_X;
+ if (phytype == XAE_PHY_TYPE_2500)
+ phydev->dev_flags |= XAE_PHY_TYPE_2500;
+ }
+
+ return 0;
+}
+
+static int xilinxphy_config_init(struct phy_device *phydev)
+{
+ int temp;
+
+ xilinxphy_of_init(phydev);
+ temp = phy_read(phydev, MII_BMCR);
+ temp &= XPCSPMA_PHY_CTRL_ISOLATE_DISABLE;
+ phy_write(phydev, MII_BMCR, temp);
+
+ return 0;
+}
+
+static struct phy_driver xilinx_drivers[] = {
+ {
+ .phy_id = XILINX_PHY_ID,
+ .phy_id_mask = XILINX_PHY_ID_MASK,
+ .name = "Xilinx PCS/PMA PHY",
+ .features = PHY_GBIT_FEATURES,
+ .config_init = &xilinxphy_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &xilinxphy_read_status,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ },
+};
+
+module_phy_driver(xilinx_drivers);
+
+static struct mdio_device_id __maybe_unused xilinx_tbl[] = {
+ { XILINX_PHY_ID, XILINX_PHY_ID_MASK },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, xilinx_tbl);
+MODULE_DESCRIPTION("Xilinx PCS/PMA PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index e89cdebae6f1..4b50c28f01a7 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -444,12 +444,12 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
}
rcv->state = PLIP_PK_DONE;
if (rcv->skb) {
- kfree_skb(rcv->skb);
+ dev_kfree_skb_irq(rcv->skb);
rcv->skb = NULL;
}
snd->state = PLIP_PK_DONE;
if (snd->skb) {
- dev_kfree_skb(snd->skb);
+ dev_consume_skb_irq(snd->skb);
snd->skb = NULL;
}
spin_unlock_irq(&nl->lock);
@@ -1103,7 +1103,7 @@ plip_open(struct net_device *dev)
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
+ const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
if (ifa != NULL) {
memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 29a0917a81e6..34c31d6da734 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -470,6 +470,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
case PPPIOCSMRU:
if (get_user(val, p))
break;
+ if (val > U16_MAX) {
+ err = -EINVAL;
+ break;
+ }
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index a085213dc2ea..078c0f474f96 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1522,6 +1522,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
int len;
unsigned char *cp;
+ skb->dev = ppp->dev;
+
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
/* check if we should pass this packet */
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 0f338752c38b..55641e01192d 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -463,6 +463,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
case PPPIOCSMRU:
if (get_user(val, (int __user *) argp))
break;
+ if (val > U16_MAX) {
+ err = -EINVAL;
+ break;
+ }
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
@@ -698,7 +702,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
/* strip address/control field if present */
p = skb->data;
- if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
/* chop off address/control */
if (skb->len < 3)
goto err;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index f870d08bb1f8..c299faaf4b2d 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -525,7 +525,7 @@ static int tap_open(struct inode *inode, struct file *file)
q->sock.state = SS_CONNECTED;
q->sock.file = file;
q->sock.ops = &tap_socket_ops;
- sock_init_data(&q->sock, &q->sk);
+ sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
q->sk.sk_write_space = tap_sock_write_space;
q->sk.sk_destruct = tap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
@@ -715,9 +715,8 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_probe_transport_header(skb);
/* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ if (eth_type_vlan(skb->protocol) &&
+ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
rcu_read_lock();
@@ -1177,9 +1176,8 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
}
/* Move network header to the right position for VLAN tagged packets */
- if ((skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD)) &&
- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ if (eth_type_vlan(skb->protocol) &&
+ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
rcu_read_lock();
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 0eb894b7c0bd..56caff2d01c4 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -284,8 +284,10 @@ static int __team_options_register(struct team *team,
return 0;
inst_rollback:
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
__team_option_inst_del_option(team, dst_opts[i]);
+ list_del(&dst_opts[i]->list);
+ }
i = option_count;
alloc_rollback:
@@ -1270,10 +1272,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
+ if (dev->flags & IFF_UP) {
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+ }
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
@@ -1344,8 +1348,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
+ if (dev->flags & IFF_UP) {
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
+ }
dev_close(port_dev);
team_port_leave(team, port);
@@ -1620,6 +1626,7 @@ static int team_init(struct net_device *dev)
team->dev = dev;
team_set_no_mode(team);
+ team->notifier_ctx = false;
team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
if (!team->pcpu_stats)
@@ -1694,6 +1701,14 @@ static int team_open(struct net_device *dev)
static int team_close(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ dev_uc_unsync(port->dev, dev);
+ dev_mc_unsync(port->dev, dev);
+ }
+
return 0;
}
@@ -2108,7 +2123,12 @@ static const struct ethtool_ops team_ethtool_ops = {
static void team_setup_by_port(struct net_device *dev,
struct net_device *port_dev)
{
- dev->header_ops = port_dev->header_ops;
+ struct team *team = netdev_priv(dev);
+
+ if (port_dev->type == ARPHRD_ETHER)
+ dev->header_ops = team->header_ops_cache;
+ else
+ dev->header_ops = port_dev->header_ops;
dev->type = port_dev->type;
dev->hard_header_len = port_dev->hard_header_len;
dev->needed_headroom = port_dev->needed_headroom;
@@ -2116,6 +2136,15 @@ static void team_setup_by_port(struct net_device *dev,
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
eth_hw_addr_inherit(dev, port_dev);
+
+ if (port_dev->flags & IFF_POINTOPOINT) {
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ } else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ==
+ (IFF_BROADCAST | IFF_MULTICAST)) {
+ dev->flags |= (IFF_BROADCAST | IFF_MULTICAST);
+ dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP);
+ }
}
static int team_dev_type_check_change(struct net_device *dev,
@@ -2146,8 +2175,11 @@ static int team_dev_type_check_change(struct net_device *dev,
static void team_setup(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
+ team->header_ops_cache = dev->header_ops;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
@@ -2172,7 +2204,9 @@ static void team_setup(struct net_device *dev)
dev->hw_features = TEAM_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
dev->features |= dev->hw_features;
@@ -3003,7 +3037,11 @@ static int team_device_event(struct notifier_block *unused,
team_del_slave(port->team->dev, dev);
break;
case NETDEV_FEAT_CHANGE:
- team_compute_features(port->team);
+ if (!port->team->notifier_ctx) {
+ port->team->notifier_ctx = true;
+ team_compute_features(port->team);
+ port->team->notifier_ctx = false;
+ }
break;
case NETDEV_PRECHANGEMTU:
/* Forbid to change mtu of underlaying device */
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index dacb4f680fd4..c1f0195464b1 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -958,12 +958,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0,
ip_hdr(skb)->protocol, 0);
- } else if (skb_is_gso_v6(skb)) {
+ } else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
- return false;
} else if (protocol == htons(ETH_P_IPV6)) {
tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -1339,12 +1338,21 @@ static int __init tbnet_init(void)
TBNET_MATCH_FRAGS_ID);
ret = tb_register_property_dir("network", tbnet_dir);
- if (ret) {
- tb_property_free_dir(tbnet_dir);
- return ret;
- }
+ if (ret)
+ goto err_free_dir;
+
+ ret = tb_register_service_driver(&tbnet_driver);
+ if (ret)
+ goto err_unregister;
- return tb_register_service_driver(&tbnet_driver);
+ return 0;
+
+err_unregister:
+ tb_unregister_property_dir("network", tbnet_dir);
+err_free_dir:
+ tb_property_free_dir(tbnet_dir);
+
+ return ret;
}
module_init(tbnet_init);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dd02fcc97277..47958e6bd77f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -250,6 +250,9 @@ struct tun_struct {
struct tun_prog __rcu *steering_prog;
struct tun_prog __rcu *filter_prog;
struct ethtool_link_ksettings link_ksettings;
+ /* init args */
+ struct file *file;
+ struct ifreq *ifr;
};
struct veth {
@@ -275,6 +278,9 @@ void *tun_ptr_to_xdp(void *ptr)
}
EXPORT_SYMBOL(tun_ptr_to_xdp);
+static void tun_flow_init(struct tun_struct *tun);
+static void tun_flow_uninit(struct tun_struct *tun);
+
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -709,6 +715,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
+ ntfile->xdp_rxq.queue_index = index;
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
NULL);
@@ -742,7 +749,6 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
if (tun)
xdp_rxq_info_unreg(&tfile->xdp_rxq);
ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
- sock_put(&tfile->sk);
}
}
@@ -758,6 +764,9 @@ static void tun_detach(struct tun_file *tfile, bool clean)
if (dev)
netdev_state_change(dev);
rtnl_unlock();
+
+ if (clean)
+ sock_put(&tfile->sk);
}
static void tun_detach_all(struct net_device *dev)
@@ -1025,6 +1034,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
static const struct ethtool_ops tun_ethtool_ops;
+static int tun_net_init(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct ifreq *ifr = tun->ifr;
+ int err;
+
+ tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
+ if (!tun->pcpu_stats)
+ return -ENOMEM;
+
+ spin_lock_init(&tun->lock);
+
+ err = security_tun_dev_alloc_security(&tun->security);
+ if (err < 0) {
+ free_percpu(tun->pcpu_stats);
+ return err;
+ }
+
+ tun_flow_init(tun);
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ dev->features = dev->hw_features | NETIF_F_LLTX;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ tun->flags = (tun->flags & ~TUN_FEATURES) |
+ (ifr->ifr_flags & TUN_FEATURES);
+
+ INIT_LIST_HEAD(&tun->disabled);
+ err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+ if (err < 0) {
+ tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
+ free_percpu(tun->pcpu_stats);
+ return err;
+ }
+ return 0;
+}
+
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
@@ -1283,6 +1335,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
}
static const struct net_device_ops tun_netdev_ops = {
+ .ndo_init = tun_net_init,
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
@@ -1363,6 +1416,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
}
static const struct net_device_ops tap_netdev_ops = {
+ .ndo_init = tun_net_init,
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
@@ -1403,7 +1457,7 @@ static void tun_flow_uninit(struct tun_struct *tun)
#define MAX_MTU 65535
/* Initialize net device. */
-static void tun_net_init(struct net_device *dev)
+static void tun_net_initialize(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1492,7 +1546,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int err;
int i;
- if (it->nr_segs > MAX_SKB_FRAGS + 1)
+ if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
+ len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
return ERR_PTR(-EMSGSIZE);
local_bh_disable();
@@ -1618,7 +1673,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
if (zerocopy)
return false;
- if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
+ if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
return false;
@@ -2001,17 +2056,25 @@ drop:
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
+ WARN_ON_ONCE(1);
+ err = -ENOMEM;
this_cpu_inc(tun->pcpu_stats->rx_dropped);
+napi_busy:
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
- WARN_ON(1);
- return -ENOMEM;
+ return err;
}
- local_bh_disable();
- napi_gro_frags(&tfile->napi);
- local_bh_enable();
+ if (likely(napi_schedule_prep(&tfile->napi))) {
+ local_bh_disable();
+ napi_gro_frags(&tfile->napi);
+ napi_complete(&tfile->napi);
+ local_bh_enable();
+ } else {
+ err = -EBUSY;
+ goto napi_busy;
+ }
mutex_unlock(&tfile->napi_mutex);
} else if (tfile->napi_enabled) {
struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
@@ -2828,9 +2891,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
- err = dev_get_valid_name(net, dev, name);
- if (err < 0)
- goto err_free_dev;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
@@ -2849,41 +2909,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->rx_batched = 0;
RCU_INIT_POINTER(tun->steering_prog, NULL);
- tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
- if (!tun->pcpu_stats) {
- err = -ENOMEM;
- goto err_free_dev;
- }
-
- spin_lock_init(&tun->lock);
-
- err = security_tun_dev_alloc_security(&tun->security);
- if (err < 0)
- goto err_free_stat;
-
- tun_net_init(dev);
- tun_flow_init(tun);
-
- dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
- TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
- dev->features = dev->hw_features | NETIF_F_LLTX;
- dev->vlan_features = dev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
+ tun->ifr = ifr;
+ tun->file = file;
- tun->flags = (tun->flags & ~TUN_FEATURES) |
- (ifr->ifr_flags & TUN_FEATURES);
-
- INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
- ifr->ifr_flags & IFF_NAPI_FRAGS, false);
- if (err < 0)
- goto err_free_flow;
+ tun_net_initialize(dev);
err = register_netdevice(tun->dev);
- if (err < 0)
- goto err_detach;
+ if (err < 0) {
+ free_netdev(dev);
+ return err;
+ }
/* free_netdev() won't check refcnt, to aovid race
* with dev_put() we need publish tun after registration.
*/
@@ -2902,20 +2937,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
-
-err_detach:
- tun_detach_all(dev);
- /* register_netdevice() already called tun_free_netdev() */
- goto err_free_dev;
-
-err_free_flow:
- tun_flow_uninit(tun);
- security_tun_dev_free_security(tun->security);
-err_free_stat:
- free_percpu(tun->pcpu_stats);
-err_free_dev:
- free_netdev(dev);
- return err;
}
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
@@ -3114,10 +3135,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct net *net = sock_net(&tfile->sk);
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
- unsigned int ifindex, carrier;
+ unsigned int carrier;
struct ifreq ifr;
kuid_t owner;
kgid_t group;
+ int ifindex;
int sndbuf;
int vnet_hdr_sz;
int le;
@@ -3174,7 +3196,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT;
if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
goto unlock;
-
+ ret = -EINVAL;
+ if (ifindex < 0)
+ goto unlock;
ret = 0;
tfile->ifindex = ifindex;
goto unlock;
@@ -3514,7 +3538,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
- sock_init_data(&tfile->socket, &tfile->sk);
+ sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 68912e266826..892d58b38cf5 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1079,17 +1079,17 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 pkt_count = 0;
u64 desc_hdr = 0;
u16 vlan_tag = 0;
- u32 skb_len = 0;
+ u32 skb_len;
if (!skb)
goto err;
- if (skb->len == 0)
+ skb_len = skb->len;
+ if (skb_len < sizeof(desc_hdr))
goto err;
- skb_len = skb->len;
/* RX Descriptor Header */
- skb_trim(skb, skb->len - sizeof(desc_hdr));
+ skb_trim(skb, skb_len - sizeof(desc_hdr));
desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb));
/* Check these packets */
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 6101d82102e7..bf65262e2256 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -186,7 +186,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
u8 buf[ETH_ALEN];
struct ax88172a_private *priv;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 5ee3e457a79c..84dc9fb2b1f3 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1594,11 +1594,11 @@ static int ax88179_reset(struct usbnet *dev)
*tmp16 = AX_PHYPWR_RSTCTL_IPRL;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
- msleep(200);
+ msleep(500);
*tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
- msleep(100);
+ msleep(200);
/* Ethernet PHY Auto Detach*/
ax88179_auto_detach(dev, 0);
@@ -1690,7 +1690,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1703,7 +1703,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1716,7 +1716,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1729,7 +1729,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1742,7 +1742,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1755,7 +1755,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1768,7 +1768,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1781,7 +1781,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index eee402a59f6d..08800f2a7e45 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -605,6 +605,13 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
+ .idProduct = 0x8005, /* A-300 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_MASTER_INTERFACE,
.driver_info = 0,
@@ -612,11 +619,25 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
+ .idProduct = 0x8006, /* B-500/SL-5600 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
.idProduct = 0x8007, /* C-700 */
ZAURUS_MASTER_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x8007, /* C-700 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x9031, /* C-750 C-760 */
@@ -764,6 +785,13 @@ static const struct usb_device_id products[] = {
},
#endif
+/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 414341c9cf5a..6ad1fb00a35c 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -663,6 +663,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FE990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1c9a1b94f6e2..4824385fe2c7 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -175,10 +175,17 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
u32 val, max, min;
/* clamp new_tx to sane values */
- min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
- max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
- if (max == 0)
+ if (ctx->is_ndp16)
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ else
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
+
+ if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0)
max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
+ else
+ max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize),
+ USB_CDC_NCM_NTB_MIN_OUT_SIZE,
+ CDC_NCM_NTB_MAX_SIZE_TX);
/* some devices set dwNtbOutMaxSize too low for the above default */
min = min(min, max);
@@ -309,10 +316,17 @@ static ssize_t ndp_to_end_store(struct device *d, struct device_attribute *attr
if (enable == (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
return len;
- if (enable && !ctx->delayed_ndp16) {
- ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
- if (!ctx->delayed_ndp16)
- return -ENOMEM;
+ if (enable) {
+ if (ctx->is_ndp16 && !ctx->delayed_ndp16) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ return -ENOMEM;
+ }
+ if (!ctx->is_ndp16 && !ctx->delayed_ndp32) {
+ ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp32)
+ return -ENOMEM;
+ }
}
/* flush pending data before changing flag */
@@ -514,6 +528,9 @@ static int cdc_ncm_init(struct usbnet *dev)
dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
}
+ /* use ndp16 by default */
+ ctx->is_ndp16 = 1;
+
/* set NTB format, if both formats are supported.
*
* "The host shall only send this command while the NCM Data
@@ -521,14 +538,27 @@ static int cdc_ncm_init(struct usbnet *dev)
*/
if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
USB_CDC_NCM_NTB32_SUPPORTED) {
- dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
- if (err < 0)
+ if (ctx->drvflags & CDC_NCM_FLAG_PREFER_NTB32) {
+ ctx->is_ndp16 = 0;
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 32-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB32_FORMAT,
+ iface_no, NULL, 0);
+ } else {
+ ctx->is_ndp16 = 1;
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+ }
+ if (err < 0) {
+ ctx->is_ndp16 = 1;
dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
+ }
}
/* set initial device values */
@@ -551,7 +581,10 @@ static int cdc_ncm_init(struct usbnet *dev)
ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
/* set up maximum NDP size */
- ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
+ if (ctx->is_ndp16)
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
+ else
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp32) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe32);
/* initial coalescing timer interval */
ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
@@ -736,7 +769,10 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
ctx->tx_curr_skb = NULL;
}
- kfree(ctx->delayed_ndp16);
+ if (ctx->is_ndp16)
+ kfree(ctx->delayed_ndp16);
+ else
+ kfree(ctx->delayed_ndp32);
kfree(ctx);
}
@@ -774,10 +810,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
- int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
- __le16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -881,32 +915,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
- /*
- * Some Huawei devices have been observed to come out of reset in NDP32 mode.
- * Let's check if this is the case, and set the device to NDP16 mode again if
- * needed.
- */
- if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
- err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, iface_no, &curr_ntb_format, 2);
- if (err < 0) {
- goto error2;
- }
-
- if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
- dev_info(&intf->dev, "resetting NTB format to 16-bit");
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
-
- if (err < 0)
- goto error2;
- }
- }
-
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
@@ -931,9 +939,15 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* Allocate the delayed NDP if needed. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
- ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
- if (!ctx->delayed_ndp16)
- goto error2;
+ if (ctx->is_ndp16) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ goto error2;
+ } else {
+ ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp32)
+ goto error2;
+ }
dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
}
@@ -1057,7 +1071,7 @@ static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remai
/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
* allocating a new one within skb
*/
-static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
{
struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
@@ -1112,12 +1126,73 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
return ndp16;
}
+static struct usb_cdc_ncm_ndp32 *cdc_ncm_ndp32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+{
+ struct usb_cdc_ncm_ndp32 *ndp32 = NULL;
+ struct usb_cdc_ncm_nth32 *nth32 = (void *)skb->data;
+ size_t ndpoffset = le32_to_cpu(nth32->dwNdpIndex);
+
+ /* If NDP should be moved to the end of the NCM package, we can't follow the
+ * NTH32 header as we would normally do. NDP isn't written to the SKB yet, and
+ * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ if (ctx->delayed_ndp32->dwSignature == sign)
+ return ctx->delayed_ndp32;
+
+ /* We can only push a single NDP to the end. Return
+ * NULL to send what we've already got and queue this
+ * skb for later.
+ */
+ else if (ctx->delayed_ndp32->dwSignature)
+ return NULL;
+ }
+
+ /* follow the chain of NDPs, looking for a match */
+ while (ndpoffset) {
+ ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb->data + ndpoffset);
+ if (ndp32->dwSignature == sign)
+ return ndp32;
+ ndpoffset = le32_to_cpu(ndp32->dwNextNdpIndex);
+ }
+
+ /* align new NDP */
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
+
+ /* verify that there is room for the NDP and the datagram (reserve) */
+ if ((ctx->tx_curr_size - skb->len - reserve) < ctx->max_ndp_size)
+ return NULL;
+
+ /* link to it */
+ if (ndp32)
+ ndp32->dwNextNdpIndex = cpu_to_le32(skb->len);
+ else
+ nth32->dwNdpIndex = cpu_to_le32(skb->len);
+
+ /* push a new empty NDP */
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ ndp32 = skb_put_zero(skb, ctx->max_ndp_size);
+ else
+ ndp32 = ctx->delayed_ndp32;
+
+ ndp32->dwSignature = sign;
+ ndp32->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp32) + sizeof(struct usb_cdc_ncm_dpe32));
+ return ndp32;
+}
+
struct sk_buff *
cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- struct usb_cdc_ncm_nth16 *nth16;
- struct usb_cdc_ncm_ndp16 *ndp16;
+ union {
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_nth32 *nth32;
+ } nth;
+ union {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ } ndp;
struct sk_buff *skb_out;
u16 n = 0, index, ndplen;
u8 ready2send = 0;
@@ -1157,6 +1232,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* further.
*/
if (skb_out == NULL) {
+ /* If even the smallest allocation fails, abort. */
+ if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE)
+ goto alloc_failed;
ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1,
(unsigned)CDC_NCM_LOW_MEM_MAX_CNT);
ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt;
@@ -1175,20 +1253,23 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC);
/* No allocation possible so we will abort */
- if (skb_out == NULL) {
- if (skb != NULL) {
- dev_kfree_skb_any(skb);
- dev->net->stats.tx_dropped++;
- }
- goto exit_no_skb;
- }
+ if (!skb_out)
+ goto alloc_failed;
ctx->tx_low_mem_val--;
}
- /* fill out the initial 16-bit NTB header */
- nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
- nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
- nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
- nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ if (ctx->is_ndp16) {
+ /* fill out the initial 16-bit NTB header */
+ nth.nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
+ nth.nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth.nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth.nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ } else {
+ /* fill out the initial 32-bit NTB header */
+ nth.nth32 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth32));
+ nth.nth32->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH32_SIGN);
+ nth.nth32->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth32));
+ nth.nth32->wSequence = cpu_to_le16(ctx->tx_seq++);
+ }
/* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
@@ -1210,13 +1291,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* get the appropriate NDP for this skb */
- ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+ if (ctx->is_ndp16)
+ ndp.ndp16 = cdc_ncm_ndp16(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+ else
+ ndp.ndp32 = cdc_ncm_ndp32(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
/* align beginning of next frame */
cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_curr_size);
/* check if we had enough room left for both NDP and frame */
- if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
+ if ((ctx->is_ndp16 && !ndp.ndp16) || (!ctx->is_ndp16 && !ndp.ndp32) ||
+ skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -1238,13 +1323,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* calculate frame number withing this NDP */
- ndplen = le16_to_cpu(ndp16->wLength);
- index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
+ if (ctx->is_ndp16) {
+ ndplen = le16_to_cpu(ndp.ndp16->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
+
+ /* OK, add this skb */
+ ndp.ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
+ ndp.ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
+ ndp.ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ } else {
+ ndplen = le16_to_cpu(ndp.ndp32->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp32)) / sizeof(struct usb_cdc_ncm_dpe32) - 1;
- /* OK, add this skb */
- ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
- ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
- ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ ndp.ndp32->dpe32[index].dwDatagramLength = cpu_to_le32(skb->len);
+ ndp.ndp32->dpe32[index].dwDatagramIndex = cpu_to_le32(skb_out->len);
+ ndp.ndp32->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe32));
+ }
skb_put_data(skb_out, skb->data, skb->len);
ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
dev_kfree_skb_any(skb);
@@ -1291,13 +1385,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* If requested, put NDP at end of frame. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
- nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
- nth16->wNdpIndex = cpu_to_le16(skb_out->len);
- skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+ if (ctx->is_ndp16) {
+ nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth.nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+
+ /* Zero out delayed NDP - signature checking will naturally fail. */
+ ndp.ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ } else {
+ nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth.nth32->dwNdpIndex = cpu_to_le32(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp32, ctx->max_ndp_size);
- /* Zero out delayed NDP - signature checking will naturally fail. */
- ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ ndp.ndp32 = memset(ctx->delayed_ndp32, 0, ctx->max_ndp_size);
+ }
}
/* If collected data size is less or equal ctx->min_tx_pkt
@@ -1320,8 +1423,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* set final frame length */
- nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- nth16->wBlockLength = cpu_to_le16(skb_out->len);
+ if (ctx->is_ndp16) {
+ nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ nth.nth16->wBlockLength = cpu_to_le16(skb_out->len);
+ } else {
+ nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
+ nth.nth32->dwBlockLength = cpu_to_le32(skb_out->len);
+ }
/* return skb */
ctx->tx_curr_skb = NULL;
@@ -1339,6 +1447,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
return skb_out;
+alloc_failed:
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ dev->net->stats.tx_dropped++;
+ }
exit_no_skb:
/* Start timer, if there is a remaining non-empty skb */
if (ctx->tx_curr_skb != NULL && n > 0)
@@ -1404,7 +1517,12 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+
+ if (ctx->is_ndp16)
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+ else
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN));
+
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -1465,6 +1583,54 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
+int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
+{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
+ struct usb_cdc_ncm_nth32 *nth32;
+ int len;
+ int ret = -EINVAL;
+
+ if (ctx == NULL)
+ goto error;
+
+ if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth32) +
+ sizeof(struct usb_cdc_ncm_ndp32))) {
+ netif_dbg(dev, rx_err, dev->net, "frame too short\n");
+ goto error;
+ }
+
+ nth32 = (struct usb_cdc_ncm_nth32 *)skb_in->data;
+
+ if (nth32->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH32_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid NTH32 signature <%#010x>\n",
+ le32_to_cpu(nth32->dwSignature));
+ goto error;
+ }
+
+ len = le32_to_cpu(nth32->dwBlockLength);
+ if (len > ctx->rx_max) {
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NTB block length %u/%u\n", len,
+ ctx->rx_max);
+ goto error;
+ }
+
+ if ((ctx->rx_seq + 1) != le16_to_cpu(nth32->wSequence) &&
+ (ctx->rx_seq || le16_to_cpu(nth32->wSequence)) &&
+ !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth32->wSequence))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth32->wSequence));
+ }
+ ctx->rx_seq = le16_to_cpu(nth32->wSequence);
+
+ ret = le32_to_cpu(nth32->dwNdpIndex);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth32);
+
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
@@ -1501,6 +1667,42 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
+/* verify NDP header and return number of datagrams, or negative error */
+int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset)
+{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ int ret = -EINVAL;
+
+ if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp32)) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
+ ndpoffset);
+ goto error;
+ }
+ ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
+
+ if (le16_to_cpu(ndp32->wLength) < USB_CDC_NCM_NDP32_LENGTH_MIN) {
+ netif_dbg(dev, rx_err, dev->net, "invalid DPT32 length <%u>\n",
+ le16_to_cpu(ndp32->wLength));
+ goto error;
+ }
+
+ ret = ((le16_to_cpu(ndp32->wLength) -
+ sizeof(struct usb_cdc_ncm_ndp32)) /
+ sizeof(struct usb_cdc_ncm_dpe32));
+ ret--; /* we process NDP entries except for the last one */
+
+ if ((sizeof(struct usb_cdc_ncm_ndp32) +
+ ret * (sizeof(struct usb_cdc_ncm_dpe32))) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
+ ret = -EINVAL;
+ }
+
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp32);
+
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
@@ -1509,34 +1711,66 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
int nframes;
int x;
int offset;
- struct usb_cdc_ncm_ndp16 *ndp16;
- struct usb_cdc_ncm_dpe16 *dpe16;
+ union {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ } ndp;
+ union {
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ struct usb_cdc_ncm_dpe32 *dpe32;
+ } dpe;
+
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
u32 payload = 0;
- ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ if (ctx->is_ndp16)
+ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ else
+ ndpoffset = cdc_ncm_rx_verify_nth32(ctx, skb_in);
+
if (ndpoffset < 0)
goto error;
next_ndp:
- nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
- if (nframes < 0)
- goto error;
+ if (ctx->is_ndp16) {
+ nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
- ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+ ndp.ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
- if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
- netif_dbg(dev, rx_err, dev->net,
- "invalid DPT16 signature <%#010x>\n",
- le32_to_cpu(ndp16->dwSignature));
- goto err_ndp;
+ if (ndp.ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT16 signature <%#010x>\n",
+ le32_to_cpu(ndp.ndp16->dwSignature));
+ goto err_ndp;
+ }
+ dpe.dpe16 = ndp.ndp16->dpe16;
+ } else {
+ nframes = cdc_ncm_rx_verify_ndp32(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
+
+ ndp.ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
+
+ if (ndp.ndp32->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT32 signature <%#010x>\n",
+ le32_to_cpu(ndp.ndp32->dwSignature));
+ goto err_ndp;
+ }
+ dpe.dpe32 = ndp.ndp32->dpe32;
}
- dpe16 = ndp16->dpe16;
- for (x = 0; x < nframes; x++, dpe16++) {
- offset = le16_to_cpu(dpe16->wDatagramIndex);
- len = le16_to_cpu(dpe16->wDatagramLength);
+ for (x = 0; x < nframes; x++) {
+ if (ctx->is_ndp16) {
+ offset = le16_to_cpu(dpe.dpe16->wDatagramIndex);
+ len = le16_to_cpu(dpe.dpe16->wDatagramLength);
+ } else {
+ offset = le32_to_cpu(dpe.dpe32->dwDatagramIndex);
+ len = le32_to_cpu(dpe.dpe32->dwDatagramLength);
+ }
/*
* CDC NCM ch. 3.7
@@ -1567,10 +1801,19 @@ next_ndp:
usbnet_skb_return(dev, skb);
payload += len; /* count payload bytes in this NTB */
}
+
+ if (ctx->is_ndp16)
+ dpe.dpe16++;
+ else
+ dpe.dpe32++;
}
err_ndp:
/* are there more NDPs to process? */
- ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ if (ctx->is_ndp16)
+ ndpoffset = le16_to_cpu(ndp.ndp16->wNextNdpIndex);
+ else
+ ndpoffset = le32_to_cpu(ndp.ndp32->dwNextNdpIndex);
+
if (ndpoffset && loopcount--)
goto next_ndp;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 915ac75b55fc..9b7db5fd9e08 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -221,13 +221,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
+ int err;
if (phy_id) {
netdev_dbg(dev->net, "Only internal phy supported\n");
return 0;
}
- dm_read_shared_word(dev, 1, loc, &res);
+ err = dm_read_shared_word(dev, 1, loc, &res);
+ if (err < 0) {
+ netdev_err(dev->net, "MDIO read error: %d\n", err);
+ return 0;
+ }
netdev_dbg(dev->net,
"dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index e15a472c6a54..099d84827004 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -77,11 +77,11 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
- /* Additionally, it has been reported that some Huawei E3372H devices, with
- * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
- * needing to be set to the NTB16 one again.
+ /* For many Huawei devices the NTB32 mode is the default and the best mode
+ * they work with. Huawei E5785 and E5885 devices refuse to work in NTB16 mode at all.
*/
- drvflags |= CDC_NCM_FLAG_RESET_NTB16;
+ drvflags |= CDC_NCM_FLAG_PREFER_NTB32;
+
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index fc5895f85cee..a552bb1665b8 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
if (status != 0) {
netdev_err(dev->net,
- "Error sending init packet. Status %i, length %i\n",
- status, act_len);
+ "Error sending init packet. Status %i\n",
+ status);
return status;
}
else if (act_len != init_msg_len) {
@@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
if (status != 0)
netdev_err(dev->net,
- "Error receiving init result. Status %i, length %i\n",
- status, act_len);
+ "Error receiving init result. Status %i\n",
+ status);
else if (act_len != expected_len)
netdev_err(dev->net, "Unexpected init result length: %i\n",
act_len);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index ce3c8f476d75..b0efaf56d78f 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -90,6 +90,12 @@
/* statistic update interval (mSec) */
#define STAT_UPDATE_TIMER (1 * 1000)
+/* time to wait for MAC or FCT to stop (jiffies) */
+#define HW_DISABLE_TIMEOUT (HZ / 10)
+
+/* time to wait between polling MAC or FCT state (ms) */
+#define HW_DISABLE_DELAY_MS 1
+
/* defines interrupts from interrupt EP */
#define MAX_INT_EP (32)
#define INT_EP_INTEP (31)
@@ -384,8 +390,9 @@ struct lan78xx_net {
struct urb *urb_intr;
struct usb_anchor deferred;
+ struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
struct mutex phy_mutex; /* for phy access */
- unsigned pipe_in, pipe_out, pipe_intr;
+ unsigned int pipe_in, pipe_out, pipe_intr;
u32 hard_mtu; /* count any extra framing */
size_t rx_urb_size; /* size for rx urbs */
@@ -395,7 +402,7 @@ struct lan78xx_net {
wait_queue_head_t *wait;
unsigned char suspend_count;
- unsigned maxpacket;
+ unsigned int maxpacket;
struct timer_list delay;
struct timer_list stat_monitor;
@@ -479,6 +486,26 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
return ret;
}
+static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
+ u32 data)
+{
+ int ret;
+ u32 buf;
+
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ buf &= ~mask;
+ buf |= (mask & data);
+
+ ret = lan78xx_write_reg(dev, reg, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int lan78xx_read_stats(struct lan78xx_net *dev,
struct lan78xx_statstage *data)
{
@@ -504,7 +531,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
if (likely(ret >= 0)) {
src = (u32 *)stats;
dst = (u32 *)data;
- for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
+ for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
le32_to_cpus(&src[i]);
dst[i] = src[i];
}
@@ -518,10 +545,11 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
return ret;
}
-#define check_counter_rollover(struct1, dev_stats, member) { \
- if (struct1->member < dev_stats.saved.member) \
- dev_stats.rollover_count.member++; \
- }
+#define check_counter_rollover(struct1, dev_stats, member) \
+ do { \
+ if ((struct1)->member < (dev_stats).saved.member) \
+ (dev_stats).rollover_count.member++; \
+ } while (0)
static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
struct lan78xx_statstage *stats)
@@ -824,20 +852,19 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
int i;
- int ret;
u32 buf;
unsigned long timeout;
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
timeout = jiffies + HZ;
do {
usleep_range(1, 10);
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN");
@@ -847,18 +874,18 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
}
for (i = 0; i < length; i++) {
- ret = lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- ret = lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
+ lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
- ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ lan78xx_read_reg(dev, OTP_STATUS, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_STATUS");
@@ -866,7 +893,7 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
}
} while (buf & OTP_STATUS_BUSY_);
- ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
data[i] = (u8)(buf & 0xFF);
}
@@ -878,20 +905,19 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
int i;
- int ret;
u32 buf;
unsigned long timeout;
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN completion");
@@ -901,21 +927,21 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
}
/* set to BYTE program mode */
- ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
for (i = 0; i < length; i++) {
- ret = lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- ret = lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
- ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
- ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
+ lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ lan78xx_read_reg(dev, OTP_STATUS, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"Timeout on OTP_STATUS completion");
@@ -964,7 +990,7 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
usleep_range(40, 100);
}
- netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
+ netdev_warn(dev->net, "%s timed out", __func__);
return -EIO;
}
@@ -977,7 +1003,7 @@ static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
int i, ret;
if (usb_autopm_get_interface(dev->intf) < 0)
- return 0;
+ return 0;
mutex_lock(&pdata->dataport_mutex);
@@ -1040,7 +1066,6 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
container_of(param, struct lan78xx_priv, set_multicast);
struct lan78xx_net *dev = pdata->dev;
int i;
- int ret;
netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
pdata->rfe_ctl);
@@ -1049,14 +1074,14 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
for (i = 1; i < NUM_OF_MAF; i++) {
- ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
- ret = lan78xx_write_reg(dev, MAF_LO(i),
- pdata->pfilter_table[i][1]);
- ret = lan78xx_write_reg(dev, MAF_HI(i),
- pdata->pfilter_table[i][0]);
+ lan78xx_write_reg(dev, MAF_HI(i), 0);
+ lan78xx_write_reg(dev, MAF_LO(i),
+ pdata->pfilter_table[i][1]);
+ lan78xx_write_reg(dev, MAF_HI(i),
+ pdata->pfilter_table[i][0]);
}
- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
}
static void lan78xx_set_multicast(struct net_device *netdev)
@@ -1072,11 +1097,12 @@ static void lan78xx_set_multicast(struct net_device *netdev)
RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
- pdata->mchash_table[i] = 0;
+ pdata->mchash_table[i] = 0;
+
/* pfilter_table[0] has own HW address */
for (i = 1; i < NUM_OF_MAF; i++) {
- pdata->pfilter_table[i][0] =
- pdata->pfilter_table[i][1] = 0;
+ pdata->pfilter_table[i][0] = 0;
+ pdata->pfilter_table[i][1] = 0;
}
pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
@@ -1126,7 +1152,6 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
u16 lcladv, u16 rmtadv)
{
u32 flow = 0, fct_flow = 0;
- int ret;
u8 cap;
if (dev->fc_autoneg)
@@ -1149,10 +1174,10 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
(cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
(cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
- ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+ lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
/* threshold value should be set before enabling flow */
- ret = lan78xx_write_reg(dev, FLOW, flow);
+ lan78xx_write_reg(dev, FLOW, flow);
return 0;
}
@@ -1167,7 +1192,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
/* clear LAN78xx interrupt status */
ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
if (unlikely(ret < 0))
- return -EIO;
+ return ret;
mutex_lock(&phydev->lock);
phy_read_status(phydev);
@@ -1180,11 +1205,11 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
/* reset MAC */
ret = lan78xx_read_reg(dev, MAC_CR, &buf);
if (unlikely(ret < 0))
- return -EIO;
+ return ret;
buf |= MAC_CR_RST_;
ret = lan78xx_write_reg(dev, MAC_CR, buf);
if (unlikely(ret < 0))
- return -EIO;
+ return ret;
del_timer(&dev->stat_monitor);
} else if (link && !dev->link_on) {
@@ -1196,18 +1221,30 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (ecmd.base.speed == 1000) {
/* disable U2 */
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (ret < 0)
+ return ret;
buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ if (ret < 0)
+ return ret;
/* enable U1 */
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (ret < 0)
+ return ret;
buf |= USB_CFG1_DEV_U1_INIT_EN_;
ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ if (ret < 0)
+ return ret;
} else {
/* enable U1 & U2 */
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (ret < 0)
+ return ret;
buf |= USB_CFG1_DEV_U2_INIT_EN_;
buf |= USB_CFG1_DEV_U1_INIT_EN_;
ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+ if (ret < 0)
+ return ret;
}
}
@@ -1225,6 +1262,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
radv);
+ if (ret < 0)
+ return ret;
if (!timer_pending(&dev->stat_monitor)) {
dev->delta = 1;
@@ -1235,7 +1274,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
tasklet_schedule(&dev->bh);
}
- return ret;
+ return 0;
}
/* some work can't be done in tasklets, so we use keventd
@@ -1271,9 +1310,10 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
generic_handle_irq(dev->domain_data.phyirq);
local_irq_enable();
}
- } else
+ } else {
netdev_warn(dev->net,
"unexpected interrupt: 0x%08x\n", intdata);
+ }
}
static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
@@ -1362,7 +1402,7 @@ static void lan78xx_get_wol(struct net_device *netdev,
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
if (usb_autopm_get_interface(dev->intf) < 0)
- return;
+ return;
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (unlikely(ret < 0)) {
@@ -1681,11 +1721,10 @@ static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
- int ret;
u8 addr[6];
- ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1718,12 +1757,12 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
}
- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
ether_addr_copy(dev->net->dev_addr, addr);
}
@@ -1856,33 +1895,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
struct phy_device *phydev = net->phydev;
- int ret, temp;
-
- /* At forced 100 F/H mode, chip may fail to set mode correctly
- * when cable is switched between long(~50+m) and short one.
- * As workaround, set to 10 before setting to 100
- * at forced 100 F/H mode.
- */
- if (!phydev->autoneg && (phydev->speed == 100)) {
- /* disable phy interrupt */
- temp = phy_read(phydev, LAN88XX_INT_MASK);
- temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
- temp = phy_read(phydev, MII_BMCR);
- temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
- phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
- temp |= BMCR_SPEED100;
- phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
-
- /* clear pending interrupt generated while workaround */
- temp = phy_read(phydev, LAN88XX_INT_STS);
-
- /* enable phy interrupt back */
- temp = phy_read(phydev, LAN88XX_INT_MASK);
- temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
- }
+ phy_print_status(phydev);
}
static int irq_map(struct irq_domain *d, unsigned int irq,
@@ -1935,14 +1949,13 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
struct lan78xx_net *dev =
container_of(data, struct lan78xx_net, domain_data);
u32 buf;
- int ret;
/* call register access here because irq_bus_lock & irq_bus_sync_unlock
* are only two callbacks executed in non-atomic contex.
*/
- ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ lan78xx_read_reg(dev, INT_EP_CTL, &buf);
if (buf != data->irqenable)
- ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+ lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
mutex_unlock(&data->irq_lock);
}
@@ -2009,7 +2022,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
static int lan8835_fixup(struct phy_device *phydev)
{
int buf;
- int ret;
struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
@@ -2019,11 +2031,11 @@ static int lan8835_fixup(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
/* RGMII MAC TXC Delay Enable */
- ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
- MAC_RGMII_ID_TXC_DELAY_EN_);
+ lan78xx_write_reg(dev, MAC_RGMII_ID,
+ MAC_RGMII_ID_TXC_DELAY_EN_);
/* RGMII TX DLL Tune Adjust */
- ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
+ lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
@@ -2207,28 +2219,27 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
{
- int ret = 0;
u32 buf;
bool rxenabled;
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
rxenabled = ((buf & MAC_RX_RXEN_) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
}
/* add 4 to size for FCS */
buf &= ~MAC_RX_MAX_SIZE_MASK_;
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
if (rxenabled) {
buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
}
return 0;
@@ -2291,7 +2302,11 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
+
+ lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
netdev->mtu = new_mtu;
@@ -2306,6 +2321,8 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
}
}
+ usb_autopm_put_interface(dev->intf);
+
return 0;
}
@@ -2314,7 +2331,6 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
u32 addr_lo, addr_hi;
- int ret;
if (netif_running(netdev))
return -EBUSY;
@@ -2331,12 +2347,12 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
addr_hi = netdev->dev_addr[4] |
netdev->dev_addr[5] << 8;
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
/* Added to support MAC address changes */
- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
return 0;
}
@@ -2348,7 +2364,6 @@ static int lan78xx_set_features(struct net_device *netdev,
struct lan78xx_net *dev = netdev_priv(netdev);
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
unsigned long flags;
- int ret;
spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
@@ -2372,7 +2387,7 @@ static int lan78xx_set_features(struct net_device *netdev,
spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
return 0;
}
@@ -2464,26 +2479,186 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
}
+static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
+{
+ return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
+}
+
+static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
+ u32 hw_disabled)
+{
+ unsigned long timeout;
+ bool stopped = true;
+ int ret;
+ u32 buf;
+
+ /* Stop the h/w block (if not already stopped) */
+
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_enabled) {
+ buf &= ~hw_enabled;
+
+ ret = lan78xx_write_reg(dev, reg, buf);
+ if (ret < 0)
+ return ret;
+
+ stopped = false;
+ timeout = jiffies + HW_DISABLE_TIMEOUT;
+ do {
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_disabled)
+ stopped = true;
+ else
+ msleep(HW_DISABLE_DELAY_MS);
+ } while (!stopped && !time_after(jiffies, timeout));
+ }
+
+ ret = stopped ? 0 : -ETIME;
+
+ return ret;
+}
+
+static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
+{
+ return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
+}
+
+static int lan78xx_start_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start tx path");
+
+ /* Start the MAC transmitter */
+
+ ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the Tx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop tx path");
+
+ /* Stop the Tx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the MAC transmitter */
+
+ ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Tx path is stopped before calling
+ * lan78xx_flush_tx_fifo().
+ */
+static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
+}
+
+static int lan78xx_start_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start rx path");
+
+ /* Start the Rx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the MAC receiver*/
+
+ ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop rx path");
+
+ /* Stop the MAC receiver */
+
+ ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the Rx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Rx path is stopped before calling
+ * lan78xx_flush_rx_fifo().
+ */
+static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
+}
+
static int lan78xx_reset(struct lan78xx_net *dev)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
- u32 buf;
- int ret = 0;
unsigned long timeout;
+ int ret;
+ u32 buf;
u8 sig;
ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ if (ret < 0)
+ return ret;
+
buf |= HW_CFG_LRST_;
+
ret = lan78xx_write_reg(dev, HW_CFG, buf);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
mdelay(1);
ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on completion of LiteReset");
- return -EIO;
+ ret = -ETIMEDOUT;
+ return ret;
}
} while (buf & HW_CFG_LRST_);
@@ -2491,13 +2666,22 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
+ if (ret < 0)
+ return ret;
+
dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
/* Respond to the IN token with a NAK */
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ if (ret < 0)
+ return ret;
+
buf |= USB_CFG_BIR_;
+
ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+ if (ret < 0)
+ return ret;
/* Init LTM */
lan78xx_init_ltm(dev);
@@ -2520,58 +2704,111 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+ if (ret < 0)
+ return ret;
+
ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+ if (ret < 0)
+ return ret;
ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ if (ret < 0)
+ return ret;
+
buf |= HW_CFG_MEF_;
+
ret = lan78xx_write_reg(dev, HW_CFG, buf);
+ if (ret < 0)
+ return ret;
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+ if (ret < 0)
+ return ret;
+
buf |= USB_CFG_BCE_;
+
ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+ if (ret < 0)
+ return ret;
/* set FIFO sizes */
buf = (MAX_RX_FIFO_SIZE - 512) / 512;
+
ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
+ if (ret < 0)
+ return ret;
buf = (MAX_TX_FIFO_SIZE - 512) / 512;
+
ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+ if (ret < 0)
+ return ret;
+
ret = lan78xx_write_reg(dev, FLOW, 0);
+ if (ret < 0)
+ return ret;
+
ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
+ if (ret < 0)
+ return ret;
/* Don't need rfe_ctl_lock during initialisation */
ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+ if (ret < 0)
+ return ret;
+
pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
+
ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ if (ret < 0)
+ return ret;
/* Enable or disable checksum offload engines */
- lan78xx_set_features(dev->net, dev->net->features);
+ ret = lan78xx_set_features(dev->net, dev->net->features);
+ if (ret < 0)
+ return ret;
lan78xx_set_multicast(dev->net);
/* reset PHY */
ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
buf |= PMT_CTL_PHY_RST_;
+
ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
mdelay(1);
ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net, "timeout waiting for PHY Reset");
- return -EIO;
+ ret = -ETIMEDOUT;
+ return ret;
}
} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+ if (ret < 0)
+ return ret;
+
/* LAN7801 only has RGMII mode */
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
+ dev->chipid == ID_REV_CHIP_ID_7850_) {
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
if (!ret && sig != EEPROM_INDICATOR) {
/* Implies there is no external eeprom. Set mac speed */
@@ -2580,27 +2817,13 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
}
ret = lan78xx_write_reg(dev, MAC_CR, buf);
-
- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
- buf |= MAC_TX_TXEN_;
- ret = lan78xx_write_reg(dev, MAC_TX, buf);
-
- ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
- buf |= FCT_TX_CTL_EN_;
- ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
+ if (ret < 0)
+ return ret;
ret = lan78xx_set_rx_max_frame_length(dev,
dev->net->mtu + VLAN_ETH_HLEN);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
- buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
-
- ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
- buf |= FCT_RX_CTL_EN_;
- ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
-
- return 0;
+ return ret;
}
static void lan78xx_init_stats(struct lan78xx_net *dev)
@@ -2634,9 +2857,13 @@ static int lan78xx_open(struct net_device *net)
struct lan78xx_net *dev = netdev_priv(net);
int ret;
+ netif_dbg(dev, ifup, dev->net, "open device");
+
ret = usb_autopm_get_interface(dev->intf);
if (ret < 0)
- goto out;
+ return ret;
+
+ mutex_lock(&dev->dev_mutex);
phy_start(net->phydev);
@@ -2652,6 +2879,20 @@ static int lan78xx_open(struct net_device *net)
}
}
+ ret = lan78xx_flush_rx_fifo(dev);
+ if (ret < 0)
+ goto done;
+ ret = lan78xx_flush_tx_fifo(dev);
+ if (ret < 0)
+ goto done;
+
+ ret = lan78xx_start_tx_path(dev);
+ if (ret < 0)
+ goto done;
+ ret = lan78xx_start_rx_path(dev);
+ if (ret < 0)
+ goto done;
+
lan78xx_init_stats(dev);
set_bit(EVENT_DEV_OPEN, &dev->flags);
@@ -2662,9 +2903,11 @@ static int lan78xx_open(struct net_device *net)
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
done:
- usb_autopm_put_interface(dev->intf);
+ mutex_unlock(&dev->dev_mutex);
+
+ if (ret < 0)
+ usb_autopm_put_interface(dev->intf);
-out:
return ret;
}
@@ -2681,38 +2924,56 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
/* maybe wait for deletions to finish. */
- while (!skb_queue_empty(&dev->rxq) &&
- !skb_queue_empty(&dev->txq) &&
- !skb_queue_empty(&dev->done)) {
+ while (!skb_queue_empty(&dev->rxq) ||
+ !skb_queue_empty(&dev->txq)) {
schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
set_current_state(TASK_UNINTERRUPTIBLE);
netif_dbg(dev, ifdown, dev->net,
- "waited for %d urb completions\n", temp);
+ "waited for %d urb completions", temp);
}
set_current_state(TASK_RUNNING);
dev->wait = NULL;
remove_wait_queue(&unlink_wakeup, &wait);
+
+ while (!skb_queue_empty(&dev->done)) {
+ struct skb_data *entry;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&dev->done);
+ entry = (struct skb_data *)(skb->cb);
+ usb_free_urb(entry->urb);
+ dev_kfree_skb(skb);
+ }
}
static int lan78xx_stop(struct net_device *net)
{
struct lan78xx_net *dev = netdev_priv(net);
+ netif_dbg(dev, ifup, dev->net, "stop device");
+
+ mutex_lock(&dev->dev_mutex);
+
if (timer_pending(&dev->stat_monitor))
del_timer_sync(&dev->stat_monitor);
- if (net->phydev)
- phy_stop(net->phydev);
-
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue(net);
+ tasklet_kill(&dev->bh);
+
+ lan78xx_terminate_urbs(dev);
netif_info(dev, ifdown, dev->net,
"stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
- lan78xx_terminate_urbs(dev);
+ /* ignore errors that occur stopping the Tx and Rx data paths */
+ lan78xx_stop_tx_path(dev);
+ lan78xx_stop_rx_path(dev);
+
+ if (net->phydev)
+ phy_stop(net->phydev);
usb_kill_urb(dev->urb_intr);
@@ -2722,12 +2983,17 @@ static int lan78xx_stop(struct net_device *net)
* can't flush_scheduled_work() until we drop rtnl (later),
* else workers could deadlock; so make workers a NOP.
*/
- dev->flags = 0;
+ clear_bit(EVENT_TX_HALT, &dev->flags);
+ clear_bit(EVENT_RX_HALT, &dev->flags);
+ clear_bit(EVENT_LINK_RESET, &dev->flags);
+ clear_bit(EVENT_STAT_UPDATE, &dev->flags);
+
cancel_delayed_work_sync(&dev->wq);
- tasklet_kill(&dev->bh);
usb_autopm_put_interface(dev->intf);
+ mutex_unlock(&dev->dev_mutex);
+
return 0;
}
@@ -2850,6 +3116,9 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
struct lan78xx_net *dev = netdev_priv(net);
struct sk_buff *skb2 = NULL;
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
+ schedule_delayed_work(&dev->wq, 0);
+
if (skb) {
skb_tx_timestamp(skb);
skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
@@ -3377,9 +3646,10 @@ drop:
if (skb)
dev_kfree_skb_any(skb);
usb_free_urb(urb);
- } else
+ } else {
netif_dbg(dev, tx_queued, dev->net,
"> tx, len %d, type 0x%x\n", length, skb->protocol);
+ }
}
static void lan78xx_rx_bh(struct lan78xx_net *dev)
@@ -3455,18 +3725,17 @@ static void lan78xx_delayedwork(struct work_struct *work)
dev = container_of(work, struct lan78xx_net, wq.work);
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
if (test_bit(EVENT_TX_HALT, &dev->flags)) {
unlink_urbs(dev, &dev->txq);
- status = usb_autopm_get_interface(dev->intf);
- if (status < 0)
- goto fail_pipe;
+
status = usb_clear_halt(dev->udev, dev->pipe_out);
- usb_autopm_put_interface(dev->intf);
if (status < 0 &&
status != -EPIPE &&
status != -ESHUTDOWN) {
if (netif_msg_tx_err(dev))
-fail_pipe:
netdev_err(dev->net,
"can't clear tx halt, status %d\n",
status);
@@ -3476,18 +3745,14 @@ fail_pipe:
netif_wake_queue(dev->net);
}
}
+
if (test_bit(EVENT_RX_HALT, &dev->flags)) {
unlink_urbs(dev, &dev->rxq);
- status = usb_autopm_get_interface(dev->intf);
- if (status < 0)
- goto fail_halt;
status = usb_clear_halt(dev->udev, dev->pipe_in);
- usb_autopm_put_interface(dev->intf);
if (status < 0 &&
status != -EPIPE &&
status != -ESHUTDOWN) {
if (netif_msg_rx_err(dev))
-fail_halt:
netdev_err(dev->net,
"can't clear rx halt, status %d\n",
status);
@@ -3501,16 +3766,9 @@ fail_halt:
int ret = 0;
clear_bit(EVENT_LINK_RESET, &dev->flags);
- status = usb_autopm_get_interface(dev->intf);
- if (status < 0)
- goto skip_reset;
if (lan78xx_link_reset(dev) < 0) {
- usb_autopm_put_interface(dev->intf);
-skip_reset:
netdev_info(dev->net, "link reset failed (%d)\n",
ret);
- } else {
- usb_autopm_put_interface(dev->intf);
}
}
@@ -3524,6 +3782,8 @@ skip_reset:
dev->delta = min((dev->delta * 2), 50);
}
+
+ usb_autopm_put_interface(dev->intf);
}
static void intr_complete(struct urb *urb)
@@ -3653,8 +3913,8 @@ static int lan78xx_probe(struct usb_interface *intf,
struct net_device *netdev;
struct usb_device *udev;
int ret;
- unsigned maxp;
- unsigned period;
+ unsigned int maxp;
+ unsigned int period;
u8 *buf = NULL;
udev = interface_to_usbdev(intf);
@@ -3683,6 +3943,7 @@ static int lan78xx_probe(struct usb_interface *intf,
skb_queue_head_init(&dev->rxq_pause);
skb_queue_head_init(&dev->txq_pend);
mutex_init(&dev->phy_mutex);
+ mutex_init(&dev->dev_mutex);
tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
@@ -3825,38 +4086,119 @@ static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
return crc;
}
-static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
+static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
{
u32 buf;
int ret;
- int mask_index;
- u16 crc;
- u32 temp_wucsr;
- u32 temp_pmt_ctl;
+
+ ret = lan78xx_stop_tx_path(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_stop_rx_path(dev);
+ if (ret < 0)
+ return ret;
+
+ /* auto suspend (selective suspend) */
+
+ ret = lan78xx_write_reg(dev, WUCSR, 0);
+ if (ret < 0)
+ return ret;
+ ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ if (ret < 0)
+ return ret;
+ ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+ if (ret < 0)
+ return ret;
+
+ /* set goodframe wakeup */
+
+ ret = lan78xx_read_reg(dev, WUCSR, &buf);
+ if (ret < 0)
+ return ret;
+
+ buf |= WUCSR_RFE_WAKE_EN_;
+ buf |= WUCSR_STORE_WAKE_;
+
+ ret = lan78xx_write_reg(dev, WUCSR, buf);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
+ buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
+ buf |= PMT_CTL_RES_CLR_WKP_STS_;
+ buf |= PMT_CTL_PHY_WAKE_EN_;
+ buf |= PMT_CTL_WOL_EN_;
+ buf &= ~PMT_CTL_SUS_MODE_MASK_;
+ buf |= PMT_CTL_SUS_MODE_3_;
+
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
+ buf |= PMT_CTL_WUPS_MASK_;
+
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_start_rx_path(dev);
+
+ return ret;
+}
+
+static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
+{
const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
const u8 ipv6_multicast[3] = { 0x33, 0x33 };
const u8 arp_type[2] = { 0x08, 0x06 };
+ u32 temp_pmt_ctl;
+ int mask_index;
+ u32 temp_wucsr;
+ u32 buf;
+ u16 crc;
+ int ret;
- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
- buf &= ~MAC_TX_TXEN_;
- ret = lan78xx_write_reg(dev, MAC_TX, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
- buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_stop_tx_path(dev);
+ if (ret < 0)
+ return ret;
+ ret = lan78xx_stop_rx_path(dev);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUCSR, 0);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+ if (ret < 0)
+ return ret;
temp_wucsr = 0;
temp_pmt_ctl = 0;
+
ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+ if (ret < 0)
+ return ret;
+
temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
- for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
+ for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+ if (ret < 0)
+ return ret;
+ }
mask_index = 0;
if (wol & WAKE_PHY) {
@@ -3890,11 +4232,22 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
WUF_CFGX_TYPE_MCAST_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ if (ret < 0)
+ return ret;
+
mask_index++;
/* for IPv6 Multicast */
@@ -3904,11 +4257,22 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
WUF_CFGX_TYPE_MCAST_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ if (ret < 0)
+ return ret;
+
mask_index++;
temp_pmt_ctl |= PMT_CTL_WOL_EN_;
@@ -3934,11 +4298,22 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
WUF_CFGX_TYPE_ALL_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ if (ret < 0)
+ return ret;
+
mask_index++;
temp_pmt_ctl |= PMT_CTL_WOL_EN_;
@@ -3947,6 +4322,8 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
}
ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+ if (ret < 0)
+ return ret;
/* when multiple WOL bits are set */
if (hweight_long((unsigned long)wol) > 1) {
@@ -3955,35 +4332,47 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
}
ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+ if (ret < 0)
+ return ret;
/* clear WUPS */
ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
buf |= PMT_CTL_WUPS_MASK_;
+
ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0)
+ return ret;
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
- buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_start_rx_path(dev);
- return 0;
+ return ret;
}
static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct lan78xx_net *dev = usb_get_intfdata(intf);
- struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
- u32 buf;
+ bool dev_open;
int ret;
int event;
event = message.event;
- if (!dev->suspend_count++) {
+ mutex_lock(&dev->dev_mutex);
+
+ netif_dbg(dev, ifdown, dev->net,
+ "suspending: pm event %#x", message.event);
+
+ dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
+
+ if (dev_open) {
spin_lock_irq(&dev->txq.lock);
/* don't autosuspend while transmitting */
if ((skb_queue_len(&dev->txq) ||
skb_queue_len(&dev->txq_pend)) &&
- PMSG_IS_AUTO(message)) {
+ PMSG_IS_AUTO(message)) {
spin_unlock_irq(&dev->txq.lock);
ret = -EBUSY;
goto out;
@@ -3992,129 +4381,207 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
spin_unlock_irq(&dev->txq.lock);
}
- /* stop TX & RX */
- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
- buf &= ~MAC_TX_TXEN_;
- ret = lan78xx_write_reg(dev, MAC_TX, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
- buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ /* stop RX */
+ ret = lan78xx_stop_rx_path(dev);
+ if (ret < 0)
+ goto out;
+
+ ret = lan78xx_flush_rx_fifo(dev);
+ if (ret < 0)
+ goto out;
+
+ /* stop Tx */
+ ret = lan78xx_stop_tx_path(dev);
+ if (ret < 0)
+ goto out;
- /* empty out the rx and queues */
+ /* empty out the Rx and Tx queues */
netif_device_detach(dev->net);
lan78xx_terminate_urbs(dev);
usb_kill_urb(dev->urb_intr);
/* reattach */
netif_device_attach(dev->net);
- }
- if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
del_timer(&dev->stat_monitor);
if (PMSG_IS_AUTO(message)) {
- /* auto suspend (selective suspend) */
- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
- buf &= ~MAC_TX_TXEN_;
- ret = lan78xx_write_reg(dev, MAC_TX, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
- buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_set_auto_suspend(dev);
+ if (ret < 0)
+ goto out;
+ } else {
+ struct lan78xx_priv *pdata;
- ret = lan78xx_write_reg(dev, WUCSR, 0);
- ret = lan78xx_write_reg(dev, WUCSR2, 0);
- ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+ pdata = (struct lan78xx_priv *)(dev->data[0]);
+ netif_carrier_off(dev->net);
+ ret = lan78xx_set_suspend(dev, pdata->wol);
+ if (ret < 0)
+ goto out;
+ }
+ } else {
+ /* Interface is down; don't allow WOL and PHY
+ * events to wake up the host
+ */
+ u32 buf;
- /* set goodframe wakeup */
- ret = lan78xx_read_reg(dev, WUCSR, &buf);
+ set_bit(EVENT_DEV_ASLEEP, &dev->flags);
- buf |= WUCSR_RFE_WAKE_EN_;
- buf |= WUCSR_STORE_WAKE_;
+ ret = lan78xx_write_reg(dev, WUCSR, 0);
+ if (ret < 0)
+ goto out;
+ ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ if (ret < 0)
+ goto out;
- ret = lan78xx_write_reg(dev, WUCSR, buf);
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0)
+ goto out;
- ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
+ buf |= PMT_CTL_RES_CLR_WKP_STS_;
+ buf &= ~PMT_CTL_SUS_MODE_MASK_;
+ buf |= PMT_CTL_SUS_MODE_3_;
- buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
- buf |= PMT_CTL_RES_CLR_WKP_STS_;
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0)
+ goto out;
- buf |= PMT_CTL_PHY_WAKE_EN_;
- buf |= PMT_CTL_WOL_EN_;
- buf &= ~PMT_CTL_SUS_MODE_MASK_;
- buf |= PMT_CTL_SUS_MODE_3_;
+ ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0)
+ goto out;
- ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ buf |= PMT_CTL_WUPS_MASK_;
- ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = 0;
+out:
+ mutex_unlock(&dev->dev_mutex);
- buf |= PMT_CTL_WUPS_MASK_;
+ return ret;
+}
- ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
+{
+ bool pipe_halted = false;
+ struct urb *urb;
+
+ while ((urb = usb_get_from_anchor(&dev->deferred))) {
+ struct sk_buff *skb = urb->context;
+ int ret;
+
+ if (!netif_device_present(dev->net) ||
+ !netif_carrier_ok(dev->net) ||
+ pipe_halted) {
+ usb_free_urb(urb);
+ dev_kfree_skb(skb);
+ continue;
+ }
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
- buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (ret == 0) {
+ netif_trans_update(dev->net);
+ lan78xx_queue_skb(&dev->txq, skb, tx_start);
} else {
- lan78xx_set_suspend(dev, pdata->wol);
+ usb_free_urb(urb);
+ dev_kfree_skb(skb);
+
+ if (ret == -EPIPE) {
+ netif_stop_queue(dev->net);
+ pipe_halted = true;
+ } else if (ret == -ENODEV) {
+ netif_device_detach(dev->net);
+ }
}
}
- ret = 0;
-out:
- return ret;
+ return pipe_halted;
}
static int lan78xx_resume(struct usb_interface *intf)
{
struct lan78xx_net *dev = usb_get_intfdata(intf);
- struct sk_buff *skb;
- struct urb *res;
+ bool dev_open;
int ret;
- u32 buf;
- if (!timer_pending(&dev->stat_monitor)) {
- dev->delta = 1;
- mod_timer(&dev->stat_monitor,
- jiffies + STAT_UPDATE_TIMER);
- }
+ mutex_lock(&dev->dev_mutex);
- if (!--dev->suspend_count) {
- /* resume interrupt URBs */
- if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
- usb_submit_urb(dev->urb_intr, GFP_NOIO);
+ netif_dbg(dev, ifup, dev->net, "resuming device");
+
+ dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
+
+ if (dev_open) {
+ bool pipe_halted = false;
+
+ ret = lan78xx_flush_tx_fifo(dev);
+ if (ret < 0)
+ goto out;
+
+ if (dev->urb_intr) {
+ int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
- spin_lock_irq(&dev->txq.lock);
- while ((res = usb_get_from_anchor(&dev->deferred))) {
- skb = (struct sk_buff *)res->context;
- ret = usb_submit_urb(res, GFP_ATOMIC);
if (ret < 0) {
- dev_kfree_skb_any(skb);
- usb_free_urb(res);
- usb_autopm_put_interface_async(dev->intf);
- } else {
- netif_trans_update(dev->net);
- lan78xx_queue_skb(&dev->txq, skb, tx_start);
+ if (ret == -ENODEV)
+ netif_device_detach(dev->net);
+
+ netdev_warn(dev->net, "Failed to submit intr URB");
}
}
+ spin_lock_irq(&dev->txq.lock);
+
+ if (netif_device_present(dev->net)) {
+ pipe_halted = lan78xx_submit_deferred_urbs(dev);
+
+ if (pipe_halted)
+ lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+ }
+
clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
+
spin_unlock_irq(&dev->txq.lock);
- if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
- if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
- netif_start_queue(dev->net);
- tasklet_schedule(&dev->bh);
+ if (!pipe_halted &&
+ netif_device_present(dev->net) &&
+ (skb_queue_len(&dev->txq) < dev->tx_qlen))
+ netif_start_queue(dev->net);
+
+ ret = lan78xx_start_tx_path(dev);
+ if (ret < 0)
+ goto out;
+
+ tasklet_schedule(&dev->bh);
+
+ if (!timer_pending(&dev->stat_monitor)) {
+ dev->delta = 1;
+ mod_timer(&dev->stat_monitor,
+ jiffies + STAT_UPDATE_TIMER);
}
+
+ } else {
+ clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
}
ret = lan78xx_write_reg(dev, WUCSR2, 0);
+ if (ret < 0)
+ goto out;
ret = lan78xx_write_reg(dev, WUCSR, 0);
+ if (ret < 0)
+ goto out;
ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+ if (ret < 0)
+ goto out;
ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
WUCSR2_ARP_RCD_ |
WUCSR2_IPV6_TCPSYN_RCD_ |
WUCSR2_IPV4_TCPSYN_RCD_);
+ if (ret < 0)
+ goto out;
ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
WUCSR_EEE_RX_WAKE_ |
@@ -4123,23 +4590,32 @@ static int lan78xx_resume(struct usb_interface *intf)
WUCSR_WUFR_ |
WUCSR_MPR_ |
WUCSR_BCST_FR_);
+ if (ret < 0)
+ goto out;
- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
- buf |= MAC_TX_TXEN_;
- ret = lan78xx_write_reg(dev, MAC_TX, buf);
+ ret = 0;
+out:
+ mutex_unlock(&dev->dev_mutex);
- return 0;
+ return ret;
}
static int lan78xx_reset_resume(struct usb_interface *intf)
{
struct lan78xx_net *dev = usb_get_intfdata(intf);
+ int ret;
- lan78xx_reset(dev);
+ netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
+
+ ret = lan78xx_reset(dev);
+ if (ret < 0)
+ return ret;
phy_start(dev->net->phydev);
- return lan78xx_resume(intf);
+ ret = lan78xx_resume(intf);
+
+ return ret;
}
static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 17c9c63b8eeb..ce7862dac2b7 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -57,9 +57,7 @@
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
- return usbnet_read_cmd(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE,
+ return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
val, index, NULL, 0);
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8ef0a013874c..c2bd4abce6de 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1046,6 +1046,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1177,7 +1178,9 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9091, 2)}, /* Compal RXM-G1 */
{QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x90db, 2)}, /* Compal RXM-G1 */
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
@@ -1244,6 +1247,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x0189, 4)}, /* ZTE MF290 */
{QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
@@ -1281,7 +1285,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
- {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
+ {QMI_QUIRK_SET_DTR(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -1313,10 +1317,12 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
@@ -1353,6 +1359,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
@@ -1367,10 +1374,12 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+ {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 873f288e7cec..472b02bcfcbf 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -850,7 +850,7 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- value, index, tmp, size, 500);
+ value, index, tmp, size, USB_CTRL_GET_TIMEOUT);
if (ret < 0)
memset(data, 0xff, size);
else
@@ -873,7 +873,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
- value, index, tmp, size, 500);
+ value, index, tmp, size, USB_CTRL_SET_TIMEOUT);
kfree(tmp);
@@ -1479,7 +1479,9 @@ static void intr_callback(struct urb *urb)
"Stop submitting intr, status %d\n", status);
return;
case -EOVERFLOW:
- netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ if (net_ratelimit())
+ netif_info(tp, intr, tp->netdev,
+ "intr status -EOVERFLOW\n");
goto resubmit;
/* -EPIPE: should clear the halt */
default:
@@ -2251,6 +2253,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
struct r8152 *tp = container_of(napi, struct r8152, napi);
int work_done;
+ if (!budget)
+ return 0;
+
work_done = rx_bottom(tp, budget);
if (work_done < budget) {
@@ -5579,7 +5584,8 @@ static u8 rtl_get_version(struct usb_interface *intf)
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
+ PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
+ USB_CTRL_GET_TIMEOUT);
if (ret > 0)
ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
@@ -5773,6 +5779,9 @@ static int rtl8152_probe(struct usb_interface *intf,
out1:
tasklet_kill(&tp->tx_tl);
+ cancel_delayed_work_sync(&tp->hw_phy_work);
+ if (tp->rtl_ops.unload)
+ tp->rtl_ops.unload(tp);
usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
@@ -5821,6 +5830,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 1505fe3f87ed..1ff723e15d52 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -255,7 +255,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
off = le32_to_cpu(u.get_c->offset);
len = le32_to_cpu(u.get_c->len);
- if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE))
+ if (unlikely((off > CONTROL_BUFFER_SIZE - 8) ||
+ (len > CONTROL_BUFFER_SIZE - 8 - off)))
goto response_error;
if (*reply_len != -1 && len != *reply_len)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index aa848be459ec..9656561fc77f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 4)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
return ret;
@@ -2198,6 +2200,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
+ return 0;
+ }
+
if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x\n", rx_cmd_a);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bb4ccbda031a..7c579b038be7 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1040,7 +1040,7 @@ static int smsc95xx_reset(struct usbnet *dev)
if (timeout >= 100) {
netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
- return ret;
+ return -ETIMEDOUT;
}
ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
@@ -1935,6 +1935,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
size = (u16)((header & RX_STS_FL_) >> 16);
align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err header=0x%08x\n", header);
+ return 0;
+ }
+
if (unlikely(header & RX_STS_ES_)) {
netif_dbg(dev, rx_err, dev->net,
"Error header=0x%08x\n", header);
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index fce6713e970b..811c8751308c 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ignore the CRC length */
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
- if (len > ETH_FRAME_LEN || len > skb->len)
+ if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
return 0;
/* the last packet of current skb */
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 681e0def6356..a5332e99102a 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -736,7 +736,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
data->eeprom_len = SR9800_EEPROM_LEN;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* LED Setting Rule :
* AABB:CCDD
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 6598a4cba158..bc37e268a15e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -833,13 +833,11 @@ int usbnet_stop (struct net_device *net)
mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
- /* deferred work (task, timer, softirq) must also stop.
- * can't flush_scheduled_work() until we drop rtnl (later),
- * else workers could deadlock; so make workers a NOP.
- */
+ /* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ cancel_work_sync(&dev->kevent);
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1587,6 +1585,7 @@ void usbnet_disconnect (struct usb_interface *intf)
struct usbnet *dev;
struct usb_device *xdev;
struct net_device *net;
+ struct urb *urb;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -1603,9 +1602,11 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- cancel_work_sync(&dev->kevent);
-
- usb_scuttle_anchored_urbs(&dev->deferred);
+ while ((urb = usb_get_from_anchor(&dev->deferred))) {
+ dev_kfree_skb(urb->context);
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);
@@ -1755,6 +1756,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
} else if (!info->in || !info->out)
status = usbnet_get_endpoints (dev, udev);
else {
+ u8 ep_addrs[3] = {
+ info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
+ };
+
dev->in = usb_rcvbulkpipe (xdev, info->in);
dev->out = usb_sndbulkpipe (xdev, info->out);
if (!(info->flags & FLAG_NO_SETINT))
@@ -1764,6 +1769,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
else
status = 0;
+ if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs))
+ status = -EINVAL;
}
if (status >= 0 && dev->status)
status = init_status (dev, udev);
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 7984f2157d22..df3617c4c44e 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -289,11 +289,25 @@ static const struct usb_device_id products [] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
+ .idProduct = 0x8005, /* A-300 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x8006, /* B-500/SL-5600 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8007, /* C-700 */
@@ -301,6 +315,13 @@ static const struct usb_device_id products [] = {
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x8007, /* C-700 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x9031, /* C-750 C-760 */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 683425e3a353..cae7247a397a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -238,6 +238,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
+ int ret = NETDEV_TX_OK;
struct net_device *rcv;
int length = skb->len;
bool rcv_xdp = false;
@@ -270,6 +271,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
drop:
atomic64_inc(&priv->dropped);
+ ret = NET_XMIT_DROP;
}
if (rcv_xdp)
@@ -277,7 +279,7 @@ drop:
rcu_read_unlock();
- return NETDEV_TX_OK;
+ return ret;
}
static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
@@ -1255,10 +1257,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = rtnl_nla_parse_ifla(peer_tb,
- nla_data(nla_peer) + sizeof(struct ifinfomsg),
- nla_len(nla_peer) - sizeof(struct ifinfomsg),
- NULL);
+ err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
if (err < 0)
return err;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 579df7c5411d..4faf3275b1f6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -646,8 +646,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
int page_off,
unsigned int *len)
{
- struct page *page = alloc_page(GFP_ATOMIC);
+ int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ struct page *page;
+ if (page_off + *len + tailroom > PAGE_SIZE)
+ return NULL;
+
+ page = alloc_page(GFP_ATOMIC);
if (!page)
return NULL;
@@ -655,7 +660,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
page_off += *len;
while (--*num_buf) {
- int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
unsigned int buflen;
void *buf;
int off;
@@ -1910,8 +1914,8 @@ static int virtnet_close(struct net_device *dev)
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
- xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
napi_disable(&vi->rq[i].napi);
+ xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
@@ -2788,6 +2792,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -2795,26 +2820,16 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
+ cond_resched();
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
+ cond_resched();
}
}
@@ -2849,10 +2864,11 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
{
vq_callback_t **callbacks;
struct virtqueue **vqs;
- int ret = -ENOMEM;
- int i, total_vqs;
const char **names;
+ int ret = -ENOMEM;
+ int total_vqs;
bool *ctx;
+ u16 i;
/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
* possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
@@ -2889,8 +2905,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
callbacks[rxq2vq(i)] = skb_recv_done;
callbacks[txq2vq(i)] = skb_xmit_done;
- sprintf(vi->rq[i].name, "input.%d", i);
- sprintf(vi->sq[i].name, "output.%d", i);
+ sprintf(vi->rq[i].name, "input.%u", i);
+ sprintf(vi->sq[i].name, "output.%u", i);
names[rxq2vq(i)] = vi->rq[i].name;
names[txq2vq(i)] = vi->sq[i].name;
if (ctx)
@@ -3262,6 +3278,8 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ _virtnet_set_queues(vi, vi->curr_queue_pairs);
+
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
@@ -3270,8 +3288,6 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_unregister_netdev;
}
- virtnet_set_queues(vi, vi->curr_queue_pairs);
-
/* Assume link up if device can't report link status,
otherwise get link status from config. */
netif_carrier_off(dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f4869b1836f3..8808a6540b19 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -698,6 +698,32 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
+static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
+{
+ struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
+
+ /* Need to have Next Protocol set for interfaces in GPE mode. */
+ if (!gpe->np_applied)
+ return false;
+ /* "The initial version is 0. If a receiver does not support the
+ * version indicated it MUST drop the packet.
+ */
+ if (gpe->version != 0)
+ return false;
+ /* "When the O bit is set to 1, the packet is an OAM packet and OAM
+ * processing MUST occur." However, we don't implement OAM
+ * processing, thus drop the packet.
+ */
+ if (gpe->oam_flag)
+ return false;
+
+ *protocol = tun_p_to_eth_p(gpe->next_protocol);
+ if (!*protocol)
+ return false;
+
+ return true;
+}
+
static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
unsigned int off,
struct vxlanhdr *vh, size_t hdrlen,
@@ -1564,35 +1590,6 @@ out:
unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
-static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
- __be16 *protocol,
- struct sk_buff *skb, u32 vxflags)
-{
- struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
-
- /* Need to have Next Protocol set for interfaces in GPE mode. */
- if (!gpe->np_applied)
- return false;
- /* "The initial version is 0. If a receiver does not support the
- * version indicated it MUST drop the packet.
- */
- if (gpe->version != 0)
- return false;
- /* "When the O bit is set to 1, the packet is an OAM packet and OAM
- * processing MUST occur." However, we don't implement OAM
- * processing, thus drop the packet.
- */
- if (gpe->oam_flag)
- return false;
-
- *protocol = tun_p_to_eth_p(gpe->next_protocol);
- if (!*protocol)
- return false;
-
- unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
- return true;
-}
-
static bool vxlan_set_mac(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
struct sk_buff *skb, __be32 vni)
@@ -1694,8 +1691,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
* used by VXLAN extensions if explicitly requested.
*/
if (vs->flags & VXLAN_F_GPE) {
- if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
+ if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
goto drop;
+ unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
raw_proto = true;
}
@@ -2550,7 +2548,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
- skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
+ skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE));
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -2590,7 +2588,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
- skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
+ skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6));
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
@@ -2908,14 +2906,12 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
dst->remote_ifindex);
- bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
/* This check is different than dev->max_mtu, because it looks at
* the lowerdev->mtu, rather than the static dev->max_mtu
*/
if (lowerdev) {
- int max_mtu = lowerdev->mtu -
- (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+ int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags);
if (new_mtu > max_mtu)
return -EINVAL;
}
@@ -3514,11 +3510,11 @@ static void vxlan_config_apply(struct net_device *dev,
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
unsigned short needed_headroom = ETH_HLEN;
- bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
int max_mtu = ETH_MAX_MTU;
+ u32 flags = conf->flags;
if (!changelink) {
- if (conf->flags & VXLAN_F_GPE)
+ if (flags & VXLAN_F_GPE)
vxlan_raw_setup(dev);
else
vxlan_ether_setup(dev);
@@ -3544,8 +3540,7 @@ static void vxlan_config_apply(struct net_device *dev,
dev->needed_tailroom = lowerdev->needed_tailroom;
- max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
- VXLAN_HEADROOM);
+ max_mtu = lowerdev->mtu - vxlan_headroom(flags);
if (max_mtu < ETH_MIN_MTU)
max_mtu = ETH_MIN_MTU;
@@ -3556,10 +3551,9 @@ static void vxlan_config_apply(struct net_device *dev,
if (dev->mtu > max_mtu)
dev->mtu = max_mtu;
- if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
- needed_headroom += VXLAN6_HEADROOM;
- else
- needed_headroom += VXLAN_HEADROOM;
+ if (flags & VXLAN_F_COLLECT_METADATA)
+ flags |= VXLAN_F_IPV6;
+ needed_headroom += vxlan_headroom(flags);
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 1901ec7948d8..6ac6a649d4c2 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -569,8 +569,8 @@ static void do_bottom_half_rx(struct fst_card_info *card);
static void fst_process_tx_work_q(unsigned long work_q);
static void fst_process_int_work_q(unsigned long work_q);
-static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
-static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
+static DECLARE_TASKLET_OLD(fst_tx_task, fst_process_tx_work_q);
+static DECLARE_TASKLET_OLD(fst_int_task, fst_process_int_work_q);
static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
static spinlock_t fst_work_q_lock;
@@ -2613,6 +2613,7 @@ fst_remove_one(struct pci_dev *pdev)
for (i = 0; i < card->nports; i++) {
struct net_device *dev = port_to_dev(&card->ports[i]);
unregister_hdlc_device(dev);
+ free_netdev(dev);
}
fst_disable_intr(card);
@@ -2633,6 +2634,7 @@ fst_remove_one(struct pci_dev *pdev)
card->tx_dma_handle_card);
}
fst_card_array[card->card_no] = NULL;
+ kfree(card);
}
static struct pci_driver fst_driver = {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 034eb6535ab7..8a0c2ea03ff9 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -34,6 +34,8 @@
#define TDM_PPPOHT_SLIC_MAXIN
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
+static int uhdlc_close(struct net_device *dev);
+
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
@@ -710,6 +712,7 @@ static int uhdlc_open(struct net_device *dev)
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
+ int rc = 0;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
@@ -733,10 +736,13 @@ static int uhdlc_open(struct net_device *dev)
napi_enable(&priv->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
- hdlc_open(dev);
+
+ rc = hdlc_open(dev);
+ if (rc)
+ uhdlc_close(dev);
}
- return 0;
+ return rc;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
@@ -826,6 +832,8 @@ static int uhdlc_close(struct net_device *dev)
netdev_reset_queue(dev);
priv->hdlc_busy = 0;
+ hdlc_close(dev);
+
return 0;
}
@@ -1249,9 +1257,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
free_dev:
free_netdev(dev);
undo_uhdlc_init:
- iounmap(utdm->siram);
+ if (utdm)
+ iounmap(utdm->siram);
unmap_si_regs:
- iounmap(utdm->si_regs);
+ if (utdm)
+ iounmap(utdm->si_regs);
free_utdm:
if (uhdlc_priv->tsa)
kfree(utdm);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 4e42954d8cbf..4f52003bf422 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -341,6 +341,9 @@ static int lapbeth_new_device(struct net_device *dev)
ASSERT_RTNL();
+ if (dev->type != ARPHRD_ETHER)
+ return -EINVAL;
+
ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
lapbeth_setup);
if (!ndev)
@@ -403,7 +406,7 @@ static int lapbeth_device_event(struct notifier_block *this,
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
- if (!dev_is_ethdev(dev))
+ if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index a58301dd0c1f..1c63ae9a3ba8 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -101,17 +101,13 @@ __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
switch(sc->if_type){
case LMC_PPP:
return hdlc_type_trans(skb, sc->lmc_device);
- break;
case LMC_NET:
return htons(ETH_P_802_2);
- break;
case LMC_RAW: /* Packet type for skbuff kind of useless */
return htons(ETH_P_802_2);
- break;
default:
printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
return htons(ETH_P_802_2);
- break;
}
lmc_trace(sc->lmc_device, "lmc_proto_tye out");
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 7ad3d24195ba..138930c66ad2 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -702,7 +702,7 @@ EXPORT_SYMBOL(z8530_nop);
irqreturn_t z8530_interrupt(int irq, void *dev_id)
{
struct z8530_dev *dev=dev_id;
- u8 uninitialized_var(intr);
+ u8 intr;
static volatile int locker=0;
int work=0;
struct z8530_irqhandler *irqs;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 58e189ec672f..5d3cf354f6cb 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb)
}
}
+static void ar5523_cancel_tx_cmd(struct ar5523 *ar)
+{
+ usb_kill_urb(ar->tx_cmd.urb_tx);
+}
+
static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
int ilen, void *odata, int olen, int flags)
{
@@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
}
if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
+ ar5523_cancel_tx_cmd(ar);
cmd->odata = NULL;
ar5523_err(ar, "timeout waiting for command %02x reply\n",
code);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 01e05af5ae08..a03b7535c254 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1299,29 +1299,24 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
- spin_lock_bh(&ce->ce_lock);
-
- /* Clear the copy-complete interrupts that will be handled here. */
+ /*
+ * Clear before handling
+ *
+ * Misc CE interrupts are not being handled, but still need
+ * to be cleared.
+ *
+ * NOTE: When the last copy engine interrupt is cleared the
+ * hardware will go to sleep. Once this happens any access to
+ * the CE registers can cause a hardware fault.
+ */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
- wm_regs->cc_mask);
-
- spin_unlock_bh(&ce->ce_lock);
+ wm_regs->cc_mask | wm_regs->wm_mask);
if (ce_state->recv_cb)
ce_state->recv_cb(ce_state);
if (ce_state->send_cb)
ce_state->send_cb(ce_state);
-
- spin_lock_bh(&ce->ce_lock);
-
- /*
- * Misc CE interrupts are not being handled, but still need
- * to be cleared.
- */
- ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
-
- spin_unlock_bh(&ce->ce_lock);
}
EXPORT_SYMBOL(ath10k_ce_per_engine_service);
@@ -1372,45 +1367,55 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
-int ath10k_ce_disable_interrupts(struct ath10k *ar)
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr;
- int ce_id;
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- ce_state = &ce->ce_states[ce_id];
- if (ce_state->attr_flags & CE_ATTR_POLL)
- continue;
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
- ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
- ath10k_ce_error_intr_disable(ar, ctrl_addr);
- ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
- }
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
- return 0;
+void ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_disable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
-void ath10k_ce_enable_interrupts(struct ath10k *ar)
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
- int ce_id;
struct ath10k_ce_pipe *ce_state;
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
+
+ ath10k_ce_per_engine_handler_adjust(ce_state);
+}
+EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
+
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
/* Enable interrupts for copy engine that
* are not using polling mode.
*/
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- ce_state = &ce->ce_states[ce_id];
- if (ce_state->attr_flags & CE_ATTR_POLL)
- continue;
-
- ath10k_ce_per_engine_handler_adjust(ce_state);
- }
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_enable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index a7478c240f78..fe07521550b7 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -255,10 +255,13 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
-int ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id);
+void ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data);
+
void ath10k_ce_alloc_rri(struct ath10k *ar);
void ath10k_ce_free_rri(struct ath10k *ar);
@@ -369,18 +372,14 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
-#define CE_INTERRUPT_SUMMARY (GENMASK(CE_COUNT_MAX - 1, 0))
static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
- if (!ar->hw_params.per_ce_irq)
- return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
- ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
- CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
- else
- return CE_INTERRUPT_SUMMARY;
+ return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
+ ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
+ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
}
/* Host software's Copy Engine configuration. */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 383d4fa555a8..09e77be6e314 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -118,7 +118,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -154,7 +153,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -217,7 +215,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -252,7 +249,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -287,7 +283,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -325,7 +320,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -366,7 +360,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -414,7 +407,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -459,7 +451,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -494,7 +485,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -531,7 +521,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -573,7 +562,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -601,7 +589,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
- .per_ce_irq = true,
.shadow_reg_support = true,
.rri_on_ddr = true,
.hw_filter_reset_required = false,
@@ -2157,7 +2144,7 @@ static int ath10k_init_uart(struct ath10k *ar)
static int ath10k_init_hw_params(struct ath10k *ar)
{
- const struct ath10k_hw_params *uninitialized_var(hw_params);
+ const struct ath10k_hw_params *hw_params;
int i;
for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 04c50a26a4f4..34db968c4bd0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1138,7 +1138,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
u32 sset, u8 *data)
{
if (sset == ETH_SS_STATS)
- memcpy(data, *ath10k_gstrings_stats,
+ memcpy(data, ath10k_gstrings_stats,
sizeof(ath10k_gstrings_stats));
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index ae4c9edc445c..705ab83cdff4 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -590,9 +590,6 @@ struct ath10k_hw_params {
/* Target rx ring fill level */
u32 rx_ring_fill_level;
- /* target supporting per ce IRQ */
- bool per_ce_irq;
-
/* target supporting shadow register for ce write */
bool shadow_reg_support;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3026eb54a7f2..afa3cc92fc2a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -856,11 +856,36 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
return 0;
}
+static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
+{
+ int peer_id, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+}
+
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
- int peer_id;
- int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -872,25 +897,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
- for_each_set_bit(peer_id, peer->peer_ids,
- ATH10K_MAX_NUM_PEER_IDS) {
- ar->peer_map[peer_id] = NULL;
- }
-
- /* Double check that peer is properly un-referenced from
- * the peer_map
- */
- for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
- if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
- peer->addr, peer, i);
- ar->peer_map[i] = NULL;
- }
- }
-
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
spin_unlock_bh(&ar->data_lock);
}
@@ -6641,10 +6648,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/* Clean up the peer object as well since we
* must have failed to do this above.
*/
- list_del(&peer->list);
- ar->peer_map[i] = NULL;
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
}
spin_unlock_bh(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0f055e577749..464dd0246a97 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1960,8 +1960,9 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
- pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
- ar_pci->link_ctl);
+ pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC);
return 0;
}
@@ -2818,8 +2819,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar,
pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
&ar_pci->link_ctl);
- pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
- ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+ pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC);
/*
* Bring the target up cleanly.
@@ -3769,18 +3770,22 @@ static struct pci_driver ath10k_pci_driver = {
static int __init ath10k_pci_init(void)
{
- int ret;
+ int ret1, ret2;
- ret = pci_register_driver(&ath10k_pci_driver);
- if (ret)
+ ret1 = pci_register_driver(&ath10k_pci_driver);
+ if (ret1)
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
- ret);
+ ret1);
- ret = ath10k_ahb_init();
- if (ret)
- printk(KERN_ERR "ahb init failed: %d\n", ret);
+ ret2 = ath10k_ahb_init();
+ if (ret2)
+ printk(KERN_ERR "ahb init failed: %d\n", ret2);
- return ret;
+ if (ret1 && ret2)
+ return ret1;
+
+ /* registered to at least one bus */
+ return 0;
}
module_init(ath10k_pci_init);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index d4589b2ab3b6..e8700f0b23f7 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -821,12 +822,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
{
- ath10k_ce_disable_interrupts(ar);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++)
+ disable_irq(ar_snoc->ce_irqs[id].irq_line);
}
static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
{
- ath10k_ce_enable_interrupts(ar);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++)
+ enable_irq(ar_snoc->ce_irqs[id].irq_line);
}
static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
@@ -919,6 +928,7 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
napi_enable(&ar->napi);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@@ -1042,6 +1052,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
goto err_free_rri;
}
+ ath10k_ce_enable_interrupts(ar);
+
return 0;
err_free_rri:
@@ -1156,7 +1168,9 @@ static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
return IRQ_HANDLED;
}
- ath10k_snoc_irq_disable(ar);
+ ath10k_ce_disable_interrupt(ar, ce_id);
+ set_bit(ce_id, ar_snoc->pending_ce_irqs);
+
napi_schedule(&ar->napi);
return IRQ_HANDLED;
@@ -1165,20 +1179,25 @@ static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
{
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int done = 0;
+ int ce_id;
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
napi_complete(ctx);
return done;
}
- ath10k_ce_per_engine_service_any(ar);
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) {
+ ath10k_ce_per_engine_service(ar, ce_id);
+ ath10k_ce_enable_interrupt(ar, ce_id);
+ }
+
done = ath10k_htt_txrx_compl_task(ar, budget);
- if (done < budget) {
+ if (done < budget)
napi_complete(ctx);
- ath10k_snoc_irq_enable(ar);
- }
return done;
}
@@ -1192,13 +1211,12 @@ static void ath10k_snoc_init_napi(struct ath10k *ar)
static int ath10k_snoc_request_irq(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- int irqflags = IRQF_TRIGGER_RISING;
int ret, id;
for (id = 0; id < CE_COUNT_MAX; id++) {
ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
ath10k_snoc_per_engine_handler,
- irqflags, ce_name[id], ar);
+ IRQF_NO_AUTOEN, ce_name[id], ar);
if (ret) {
ath10k_err(ar,
"failed to register IRQ handler for CE %d: %d",
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index 9db823e46314..d61ca374fdf6 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -81,6 +81,7 @@ struct ath10k_snoc {
struct ath10k_clk_info *clk;
struct ath10k_qmi *qmi;
unsigned long flags;
+ DECLARE_BITMAP(pending_ce_irqs, CE_COUNT_MAX);
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 315d20f5c8eb..ee1c86bb5078 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -707,6 +707,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
}
ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
arg->desc_id = ev->desc_id;
arg->status = ev->status;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 796bd93c599b..4adbe3ab9c87 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -9462,7 +9462,5 @@ void ath10k_wmi_detach(struct ath10k *ar)
}
cancel_work_sync(&ar->svc_rdy_work);
-
- if (ar->svc_rdy_skb)
- dev_kfree_skb(ar->svc_rdy_skb);
+ dev_kfree_skb(ar->svc_rdy_skb);
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 01163b333945..92f5c8e83090 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -529,7 +529,7 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
ee->ee_n_piers[mode]++;
freq2 = (val >> 8) & 0xff;
- if (!freq2)
+ if (!freq2 || i >= max)
break;
pc[i++].freq = ath5k_eeprom_bin2freq(ee,
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index bde5a10d470c..af98e871199d 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -246,7 +246,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
return -EACCES;
}
- size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ size = sizeof(cid) + sizeof(addr) + sizeof(*param);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index c68848819a52..9b88d96bfe96 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -960,8 +960,8 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
* Thus the possibility of ar->htc_target being NULL
* via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
*/
- if (WARN_ON_ONCE(!target)) {
- ath6kl_err("Target not yet initialized\n");
+ if (!target) {
+ ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n");
status = -EINVAL;
goto free_skb;
}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index aa1c71a76ef7..811fad6d60c0 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1575,7 +1575,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
int ath6kl_init_hw_params(struct ath6kl *ar)
{
- const struct ath6kl_hw *uninitialized_var(hw);
+ const struct ath6kl_hw *hw;
int i;
for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 63019c3de034..26023e3b4b9d 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -136,8 +136,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
ah = sc->sc_ah;
ath9k_hw_name(ah, hw_name, sizeof(hw_name));
- wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
- hw_name, (unsigned long)mem, irq);
+ wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n",
+ hw_name, mem, irq);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 2fe12b0de5b4..dea8a998fb62 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1099,17 +1099,22 @@ static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue)
{
u32 dma_dbg_chain, dma_dbg_complete;
u8 dcu_chain_state, dcu_complete_state;
+ unsigned int dbg_reg, reg_offset;
int i;
- for (i = 0; i < NUM_STATUS_READS; i++) {
- if (queue < 6)
- dma_dbg_chain = REG_READ(ah, AR_DMADBG_4);
- else
- dma_dbg_chain = REG_READ(ah, AR_DMADBG_5);
+ if (queue < 6) {
+ dbg_reg = AR_DMADBG_4;
+ reg_offset = queue * 5;
+ } else {
+ dbg_reg = AR_DMADBG_5;
+ reg_offset = (queue - 6) * 5;
+ }
+ for (i = 0; i < NUM_STATUS_READS; i++) {
+ dma_dbg_chain = REG_READ(ah, dbg_reg);
dma_dbg_complete = REG_READ(ah, AR_DMADBG_6);
- dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f;
+ dcu_chain_state = (dma_dbg_chain >> reg_offset) & 0x1f;
dcu_complete_state = dma_dbg_complete & 0x3;
if ((dcu_chain_state != 0x6) || (dcu_complete_state != 0x1))
@@ -1128,6 +1133,7 @@ static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
u8 dcu_chain_state, dcu_complete_state;
bool dcu_wait_frdone = false;
unsigned long chk_dcu = 0;
+ unsigned int reg_offset;
unsigned int i = 0;
dma_dbg_4 = REG_READ(ah, AR_DMADBG_4);
@@ -1139,12 +1145,15 @@ static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
goto exit;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (i < 6)
+ if (i < 6) {
chk_dbg = dma_dbg_4;
- else
+ reg_offset = i * 5;
+ } else {
chk_dbg = dma_dbg_5;
+ reg_offset = (i - 6) * 5;
+ }
- dcu_chain_state = (chk_dbg >> (5 * i)) & 0x1f;
+ dcu_chain_state = (chk_dbg >> reg_offset) & 0x1f;
if (dcu_chain_state == 0x6) {
dcu_wait_frdone = true;
chk_dcu |= BIT(i);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 859a865c5995..8d98347e0ddf 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1284,7 +1284,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw,
u32 sset, u8 *data)
{
if (sset == ETH_SS_STATS)
- memcpy(data, *ath9k_gstrings_stats,
+ memcpy(data, ath9k_gstrings_stats,
sizeof(ath9k_gstrings_stats));
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index c8c7afe0e343..3aa915d21554 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -244,11 +244,11 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, txok);
if (txok) {
- TX_STAT_INC(skb_success);
- TX_STAT_ADD(skb_success_bytes, ln);
+ TX_STAT_INC(hif_dev, skb_success);
+ TX_STAT_ADD(hif_dev, skb_success_bytes, ln);
}
else
- TX_STAT_INC(skb_failed);
+ TX_STAT_INC(hif_dev, skb_failed);
}
}
@@ -302,7 +302,7 @@ static void hif_usb_tx_cb(struct urb *urb)
hif_dev->tx.tx_buf_cnt++;
if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
__hif_usb_tx(hif_dev); /* Check for pending SKBs */
- TX_STAT_INC(buf_completed);
+ TX_STAT_INC(hif_dev, buf_completed);
spin_unlock(&hif_dev->tx.tx_lock);
}
@@ -353,7 +353,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
tx_buf->len += tx_buf->offset;
__skb_queue_tail(&tx_buf->skb_queue, nskb);
- TX_STAT_INC(skb_queued);
+ TX_STAT_INC(hif_dev, skb_queued);
}
usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
@@ -368,11 +368,10 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
__skb_queue_head_init(&tx_buf->skb_queue);
list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
hif_dev->tx.tx_buf_cnt++;
+ } else {
+ TX_STAT_INC(hif_dev, buf_queued);
}
- if (!ret)
- TX_STAT_INC(buf_queued);
-
return ret;
}
@@ -515,7 +514,7 @@ static void hif_usb_sta_drain(void *hif_handle, u8 idx)
ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
skb, false);
hif_dev->tx.tx_skb_cnt--;
- TX_STAT_INC(skb_failed);
+ TX_STAT_INC(hif_dev, skb_failed);
}
}
@@ -535,6 +534,24 @@ static struct ath9k_htc_hif hif_usb = {
.send = hif_usb_send,
};
+/* Need to free remain_skb allocated in ath9k_hif_usb_rx_stream
+ * in case ath9k_hif_usb_rx_stream wasn't called next time to
+ * process the buffer and subsequently free it.
+ */
+static void ath9k_hif_usb_free_rx_remain_skb(struct hif_device_usb *hif_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->rx_lock, flags);
+ if (hif_dev->remain_skb) {
+ dev_kfree_skb_any(hif_dev->remain_skb);
+ hif_dev->remain_skb = NULL;
+ hif_dev->rx_remain_len = 0;
+ RX_STAT_INC(hif_dev, skb_dropped);
+ }
+ spin_unlock_irqrestore(&hif_dev->rx_lock, flags);
+}
+
static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
@@ -562,11 +579,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
memcpy(ptr, skb->data, rx_remain_len);
rx_pkt_len += rx_remain_len;
- hif_dev->rx_remain_len = 0;
skb_put(remain_skb, rx_pkt_len);
skb_pool[pool_index++] = remain_skb;
-
+ hif_dev->remain_skb = NULL;
+ hif_dev->rx_remain_len = 0;
} else {
index = rx_remain_len;
}
@@ -585,16 +602,21 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
pkt_len = get_unaligned_le16(ptr + index);
pkt_tag = get_unaligned_le16(ptr + index + 2);
+ /* It is supposed that if we have an invalid pkt_tag or
+ * pkt_len then the whole input SKB is considered invalid
+ * and dropped; the associated packets already in skb_pool
+ * are dropped, too.
+ */
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
- RX_STAT_INC(skb_dropped);
- return;
+ RX_STAT_INC(hif_dev, skb_dropped);
+ goto invalid_pkt;
}
if (pkt_len > 2 * MAX_RX_BUF_SIZE) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: invalid pkt_len (%x)\n", pkt_len);
- RX_STAT_INC(skb_dropped);
- return;
+ RX_STAT_INC(hif_dev, skb_dropped);
+ goto invalid_pkt;
}
pad_len = 4 - (pkt_len & 0x3);
@@ -606,11 +628,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
if (index > MAX_RX_BUF_SIZE) {
spin_lock(&hif_dev->rx_lock);
- hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
- hif_dev->rx_transfer_len =
- MAX_RX_BUF_SIZE - chk_idx - 4;
- hif_dev->rx_pad_len = pad_len;
-
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
@@ -618,8 +635,14 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
spin_unlock(&hif_dev->rx_lock);
goto err;
}
+
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
skb_reserve(nskb, 32);
- RX_STAT_INC(skb_allocated);
+ RX_STAT_INC(hif_dev, skb_allocated);
memcpy(nskb->data, &(skb->data[chk_idx+4]),
hif_dev->rx_transfer_len);
@@ -640,7 +663,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
goto err;
}
skb_reserve(nskb, 32);
- RX_STAT_INC(skb_allocated);
+ RX_STAT_INC(hif_dev, skb_allocated);
memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
skb_put(nskb, pkt_len);
@@ -650,11 +673,18 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
err:
for (i = 0; i < pool_index; i++) {
- RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len);
+ RX_STAT_ADD(hif_dev, skb_completed_bytes, skb_pool[i]->len);
ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
skb_pool[i]->len, USB_WLAN_RX_PIPE);
- RX_STAT_INC(skb_completed);
+ RX_STAT_INC(hif_dev, skb_completed);
+ }
+ return;
+invalid_pkt:
+ for (i = 0; i < pool_index; i++) {
+ dev_kfree_skb_any(skb_pool[i]);
+ RX_STAT_INC(hif_dev, skb_dropped);
}
+ return;
}
static void ath9k_hif_usb_rx_cb(struct urb *urb)
@@ -709,14 +739,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
- struct sk_buff *nskb;
int ret;
if (!skb)
return;
if (!hif_dev)
- goto free;
+ goto free_skb;
switch (urb->status) {
case 0:
@@ -725,7 +754,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
case -ECONNRESET:
case -ENODEV:
case -ESHUTDOWN:
- goto free;
+ goto free_skb;
default:
skb_reset_tail_pointer(skb);
skb_trim(skb, 0);
@@ -736,25 +765,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
if (likely(urb->actual_length != 0)) {
skb_put(skb, urb->actual_length);
- /* Process the command first */
+ /*
+ * Process the command first.
+ * skb is either freed here or passed to be
+ * managed to another callback function.
+ */
ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
skb->len, USB_REG_IN_PIPE);
-
- nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
- if (!nskb) {
+ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+ if (!skb) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: REG_IN memory allocation failure\n");
- urb->context = NULL;
- return;
+ goto free_rx_buf;
}
- rx_buf->skb = nskb;
+ rx_buf->skb = skb;
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
- nskb->data, MAX_REG_IN_BUF_SIZE,
+ skb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
}
@@ -763,12 +794,13 @@ resubmit:
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
usb_unanchor_urb(urb);
- goto free;
+ goto free_skb;
}
return;
-free:
+free_skb:
kfree_skb(skb);
+free_rx_buf:
kfree(rx_buf);
urb->context = NULL;
}
@@ -781,14 +813,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_buf, list) {
- usb_get_urb(tx_buf->urb);
- spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
- usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
- spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
@@ -858,6 +886,7 @@ err:
static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->rx_submitted);
+ ath9k_hif_usb_free_rx_remain_skb(hif_dev);
}
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
@@ -1330,10 +1359,24 @@ static int send_eject_command(struct usb_interface *interface)
static int ath9k_hif_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out;
struct usb_device *udev = interface_to_usbdev(interface);
+ struct usb_host_interface *alt;
struct hif_device_usb *hif_dev;
int ret = 0;
+ /* Verify the expected endpoints are present */
+ alt = interface->cur_altsetting;
+ if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 ||
+ usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE ||
+ usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE ||
+ usb_endpoint_num(int_in) != USB_REG_IN_PIPE ||
+ usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) {
+ dev_err(&udev->dev,
+ "ath9k_htc: Device endpoint numbers are not the expected ones\n");
+ return -ENODEV;
+ }
+
if (id->driver_info == STORAGE_DEVICE)
return send_eject_command(interface);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 9f64e32381f9..232e93dfbc83 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -325,14 +325,18 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
}
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
-
-#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
-#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
-#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
-#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
-#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
-
-#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
+#define __STAT_SAFE(hif_dev, expr) do { ((hif_dev)->htc_handle->drv_priv ? (expr) : 0); } while (0)
+#define CAB_STAT_INC(priv) do { ((priv)->debug.tx_stats.cab_queued++); } while (0)
+#define TX_QSTAT_INC(priv, q) do { ((priv)->debug.tx_stats.queue_stats[q]++); } while (0)
+
+#define TX_STAT_INC(hif_dev, c) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++)
+#define TX_STAT_ADD(hif_dev, c, a) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c += a)
+#define RX_STAT_INC(hif_dev, c) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c++)
+#define RX_STAT_ADD(hif_dev, c, a) \
+ __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c += a)
void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
struct ath_rx_status *rs);
@@ -372,13 +376,13 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
struct ethtool_stats *stats, u64 *data);
#else
-#define TX_STAT_INC(c) do { } while (0)
-#define TX_STAT_ADD(c, a) do { } while (0)
-#define RX_STAT_INC(c) do { } while (0)
-#define RX_STAT_ADD(c, a) do { } while (0)
-#define CAB_STAT_INC do { } while (0)
+#define TX_STAT_INC(hif_dev, c) do { } while (0)
+#define TX_STAT_ADD(hif_dev, c, a) do { } while (0)
+#define RX_STAT_INC(hif_dev, c) do { } while (0)
+#define RX_STAT_ADD(hif_dev, c, a) do { } while (0)
-#define TX_QSTAT_INC(c) do { } while (0)
+#define CAB_STAT_INC(priv)
+#define TX_QSTAT_INC(priv, c)
static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
struct ath_rx_status *rs)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index b3ed65e5c4da..e79bbcd3279a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -428,7 +428,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
u32 sset, u8 *data)
{
if (sset == ETH_SS_STATS)
- memcpy(data, *ath9k_htc_gstrings_stats,
+ memcpy(data, ath9k_htc_gstrings_stats,
sizeof(ath9k_htc_gstrings_stats));
}
@@ -491,7 +491,7 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
priv->hw->wiphy->debugfsdir);
- if (!priv->debug.debugfs_phy)
+ if (IS_ERR(priv->debug.debugfs_phy))
return -ENOMEM;
ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 11054c17a9b5..eaaafa64a3ee 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -944,7 +944,6 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
priv->hw = hw;
priv->htc = htc_handle;
priv->dev = dev;
- htc_handle->drv_priv = priv;
SET_IEEE80211_DEV(hw, priv->dev);
ret = ath9k_htc_wait_for_target(priv);
@@ -965,6 +964,8 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
if (ret)
goto err_init;
+ htc_handle->drv_priv = priv;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index eeaf63de71bf..be4fa41bdb12 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -106,20 +106,20 @@ static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
switch (qnum) {
case 0:
- TX_QSTAT_INC(IEEE80211_AC_VO);
+ TX_QSTAT_INC(priv, IEEE80211_AC_VO);
epid = priv->data_vo_ep;
break;
case 1:
- TX_QSTAT_INC(IEEE80211_AC_VI);
+ TX_QSTAT_INC(priv, IEEE80211_AC_VI);
epid = priv->data_vi_ep;
break;
case 2:
- TX_QSTAT_INC(IEEE80211_AC_BE);
+ TX_QSTAT_INC(priv, IEEE80211_AC_BE);
epid = priv->data_be_ep;
break;
case 3:
default:
- TX_QSTAT_INC(IEEE80211_AC_BK);
+ TX_QSTAT_INC(priv, IEEE80211_AC_BK);
epid = priv->data_bk_ep;
break;
}
@@ -323,7 +323,7 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
if (is_cab) {
- CAB_STAT_INC;
+ CAB_STAT_INC(priv);
tx_ctl->epid = priv->cab_ep;
return;
}
@@ -647,9 +647,10 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
struct ath9k_htc_tx_event *tx_pend;
int i;
- for (i = 0; i < txs->cnt; i++) {
- WARN_ON(txs->cnt > HTC_MAX_TX_STATUS);
+ if (WARN_ON_ONCE(txs->cnt > HTC_MAX_TX_STATUS))
+ return;
+ for (i = 0; i < txs->cnt; i++) {
__txs = &txs->txstatus[i];
skb = ath9k_htc_tx_get_packet(priv, __txs);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 994ec48b2f66..99667aba289d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -114,7 +114,13 @@ static void htc_process_conn_rsp(struct htc_target *target,
if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
epid = svc_rspmsg->endpoint_id;
- if (epid < 0 || epid >= ENDPOINT_MAX)
+
+ /* Check that the received epid for the endpoint to attach
+ * a new service is valid. ENDPOINT0 can't be used here as it
+ * is already reserved for HTC_CTRL_RSVD_SVC service and thus
+ * should not be modified.
+ */
+ if (epid <= ENDPOINT0 || epid >= ENDPOINT_MAX)
return;
service_id = be16_to_cpu(svc_rspmsg->service_id);
@@ -364,40 +370,34 @@ ret:
}
static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 len)
{
uint32_t *pattern = (uint32_t *)skb->data;
- switch (*pattern) {
- case 0x33221199:
- {
+ if (*pattern == 0x33221199 && len >= sizeof(struct htc_panic_bad_vaddr)) {
struct htc_panic_bad_vaddr *htc_panic;
htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
htc_panic->exccause, htc_panic->pc,
htc_panic->badvaddr);
- break;
- }
- case 0x33221299:
- {
+ return;
+ }
+ if (*pattern == 0x33221299) {
struct htc_panic_bad_epid *htc_panic;
htc_panic = (struct htc_panic_bad_epid *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"bad epid: 0x%08x\n", htc_panic->epid);
- break;
- }
- default:
- dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
- break;
+ return;
}
+ dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
}
/*
* HTC Messages are handled directly here and the obtained SKB
* is freed.
*
- * Service messages (Data, WMI) passed to the corresponding
+ * Service messages (Data, WMI) are passed to the corresponding
* endpoint RX handlers, which have to free the SKB.
*/
void ath9k_htc_rx_msg(struct htc_target *htc_handle,
@@ -411,16 +411,26 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
if (!htc_handle || !skb)
return;
+ /* A valid message requires len >= 8.
+ *
+ * sizeof(struct htc_frame_hdr) == 8
+ * sizeof(struct htc_ready_msg) == 8
+ * sizeof(struct htc_panic_bad_vaddr) == 16
+ * sizeof(struct htc_panic_bad_epid) == 8
+ */
+ if (unlikely(len < sizeof(struct htc_frame_hdr)))
+ goto invalid;
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
if (epid == 0x99) {
- ath9k_htc_fw_panic_report(htc_handle, skb);
+ ath9k_htc_fw_panic_report(htc_handle, skb, len);
kfree_skb(skb);
return;
}
if (epid < 0 || epid >= ENDPOINT_MAX) {
+invalid:
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
else
@@ -432,21 +442,30 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
/* Handle trailer */
if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
- if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) {
/* Move past the Watchdog pattern */
htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ len -= 4;
+ }
}
/* Get the message ID */
+ if (unlikely(len < sizeof(struct htc_frame_hdr) + sizeof(__be16)))
+ goto invalid;
msg_id = (__be16 *) ((void *) htc_hdr +
sizeof(struct htc_frame_hdr));
/* Now process HTC messages */
switch (be16_to_cpu(*msg_id)) {
case HTC_MSG_READY_ID:
+ if (unlikely(len < sizeof(struct htc_ready_msg)))
+ goto invalid;
htc_process_target_rdy(htc_handle, htc_hdr);
break;
case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ if (unlikely(len < sizeof(struct htc_frame_hdr) +
+ sizeof(struct htc_conn_svc_rspmsg)))
+ goto invalid;
htc_process_conn_rsp(htc_handle, htc_hdr);
break;
default:
@@ -465,6 +484,8 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
if (endpoint->ep_callbacks.rx)
endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
skb, epid);
+ else
+ goto invalid;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 17c318902cb8..68cc7803b91a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -230,7 +230,7 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
u32 val;
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index eb5751a45f26..5968fcec1173 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -200,7 +200,7 @@ void ath_cancel_work(struct ath_softc *sc)
void ath_restart_work(struct ath_softc *sc)
{
ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
- ATH_HW_CHECK_POLL_INT);
+ msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
@@ -847,7 +847,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
{
struct ath_hw *ah = sc->sc_ah;
- int i;
+ int i, j;
struct ath_txq *txq;
bool key_in_use = false;
@@ -865,8 +865,9 @@ static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
int idx = txq->txq_tailidx;
- while (!key_in_use &&
- !list_empty(&txq->txq_fifo[idx])) {
+ for (j = 0; !key_in_use &&
+ !list_empty(&txq->txq_fifo[idx]) &&
+ j < ATH_TXFIFO_DEPTH; j++) {
key_in_use = ath9k_txq_list_has_key(
&txq->txq_fifo[idx], keyix);
INCR(idx, ATH_TXFIFO_DEPTH);
@@ -2227,7 +2228,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
}
ieee80211_queue_delayed_work(hw, &sc->hw_check_work,
- ATH_HW_CHECK_POLL_INT);
+ msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
}
static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 92b2dd396436..cb3318bd3cad 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -993,8 +993,8 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->sc_ah->msi_reg = 0;
ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
- wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
- hw_name, (unsigned long)sc->mem, pdev->irq);
+ wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n",
+ hw_name, sc->mem, pdev->irq);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index e7a3127395be..dd8027b8af63 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -218,6 +218,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
if (unlikely(wmi->stopped))
goto free_skb;
+ /* Validate the obtained SKB. */
+ if (unlikely(skb->len < sizeof(struct wmi_cmd_hdr)))
+ goto free_skb;
+
hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_id = be16_to_cpu(hdr->command_id);
@@ -235,10 +239,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
goto free_skb;
}
- spin_unlock_irqrestore(&wmi->wmi_lock, flags);
/* WMI command response */
ath9k_wmi_rsp_callback(wmi, skb);
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
free_skb:
kfree_skb(skb);
@@ -276,7 +280,8 @@ int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
static int ath9k_wmi_cmd_issue(struct wmi *wmi,
struct sk_buff *skb,
- enum wmi_cmd_id cmd, u16 len)
+ enum wmi_cmd_id cmd, u16 len,
+ u8 *rsp_buf, u32 rsp_len)
{
struct wmi_cmd_hdr *hdr;
unsigned long flags;
@@ -286,6 +291,11 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi,
hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
spin_lock_irqsave(&wmi->wmi_lock, flags);
+
+ /* record the rsp buffer and length */
+ wmi->cmd_rsp_buf = rsp_buf;
+ wmi->cmd_rsp_len = rsp_len;
+
wmi->last_seq_id = wmi->tx_seq_id;
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
@@ -301,8 +311,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
struct ath_common *common = ath9k_hw_common(ah);
u16 headroom = sizeof(struct htc_frame_hdr) +
sizeof(struct wmi_cmd_hdr);
+ unsigned long time_left, flags;
struct sk_buff *skb;
- unsigned long time_left;
int ret = 0;
if (ah->ah_flags & AH_UNPLUGGED)
@@ -326,11 +336,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
goto out;
}
- /* record the rsp buffer and length */
- wmi->cmd_rsp_buf = rsp_buf;
- wmi->cmd_rsp_len = rsp_len;
-
- ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
+ ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len);
if (ret)
goto out;
@@ -338,6 +344,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
if (!time_left) {
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
+ wmi->last_seq_id = 0;
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
mutex_unlock(&wmi->op_mutex);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 304b4d4e506a..1e38fdf92d1d 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1021,20 +1021,14 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
void *cmd;
int cmdlen = len - sizeof(struct wmi_cmd_hdr);
u16 cmdid;
- int rc, rc1;
+ int rc1;
- if (cmdlen < 0)
+ if (cmdlen < 0 || *ppos != 0)
return -EINVAL;
- wmi = kmalloc(len, GFP_KERNEL);
- if (!wmi)
- return -ENOMEM;
-
- rc = simple_write_to_buffer(wmi, len, ppos, buf, len);
- if (rc < 0) {
- kfree(wmi);
- return rc;
- }
+ wmi = memdup_user(buf, len);
+ if (IS_ERR(wmi))
+ return PTR_ERR(wmi);
cmd = (cmdlen > 0) ? &wmi[1] : NULL;
cmdid = le16_to_cpu(wmi->command_id);
@@ -1044,7 +1038,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
- return rc;
+ return len;
}
static const struct file_operations fops_wmi = {
diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c
index 7afc9c5329fb..f5fa1a95b0c1 100644
--- a/drivers/net/wireless/atmel/atmel_cs.c
+++ b/drivers/net/wireless/atmel/atmel_cs.c
@@ -73,6 +73,7 @@ struct local_info {
static int atmel_probe(struct pcmcia_device *p_dev)
{
struct local_info *local;
+ int ret;
dev_dbg(&p_dev->dev, "atmel_attach()\n");
@@ -83,8 +84,16 @@ static int atmel_probe(struct pcmcia_device *p_dev)
p_dev->priv = local;
- return atmel_config(p_dev);
-} /* atmel_attach */
+ ret = atmel_config(p_dev);
+ if (ret)
+ goto err_free_priv;
+
+ return 0;
+
+err_free_priv:
+ kfree(p_dev->priv);
+ return ret;
+}
static void atmel_detach(struct pcmcia_device *link)
{
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 9fc7c088a539..c0d8fc0b22fb 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -651,7 +651,7 @@ struct b43_iv {
union {
__be16 d16;
__be32 d32;
- } data __packed;
+ } __packed data;
} __packed;
@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
return dev->__using_pio_transfers;
}
+static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_wake_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_wake_queue(dev->wl->hw, 0);
+}
+
+static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_stop_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_stop_queue(dev->wl->hw, 0);
+}
+
/* Message printing */
__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index 1325727a74ed..8d9a337624a7 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -493,7 +493,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
struct b43_wldev *dev;
struct b43_debugfs_fops *dfops;
struct b43_dfs_file *dfile;
- ssize_t uninitialized_var(ret);
+ ssize_t ret;
char *buf;
const size_t bufsize = 1024 * 16; /* 16 kiB buffer */
const size_t buforder = get_order(bufsize);
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 31bf71a80c26..19624133b64a 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -37,7 +37,7 @@
static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
enum b43_addrtype addrtype)
{
- u32 uninitialized_var(addr);
+ u32 addr;
switch (addrtype) {
case B43_DMA_ADDR_LOW:
@@ -1399,8 +1399,8 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
should_inject_overflow(ring)) {
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
- ieee80211_stop_queue(dev->wl->hw, skb_mapping);
- dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ b43_stop_queue(dev, skb_mapping);
+ dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1566,11 +1566,11 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
- dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ dev->wl->tx_queue_stopped[ring->queue_prio] = false;
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
- ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
+ b43_wake_queue(dev, ring->queue_prio);
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
diff --git a/drivers/net/wireless/broadcom/b43/lo.c b/drivers/net/wireless/broadcom/b43/lo.c
index 5d97cf06eceb..338b6545a1e7 100644
--- a/drivers/net/wireless/broadcom/b43/lo.c
+++ b/drivers/net/wireless/broadcom/b43/lo.c
@@ -729,7 +729,7 @@ struct b43_lo_calib *b43_calibrate_lo_setting(struct b43_wldev *dev,
};
int max_rx_gain;
struct b43_lo_calib *cal;
- struct lo_g_saved_values uninitialized_var(saved_regs);
+ struct lo_g_saved_values saved_regs;
/* Values from the "TXCTL Register and Value Table" */
u16 txctl_reg;
u16 txctl_value;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 3432dfe1ddb4..e466324a1fd2 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2585,7 +2585,8 @@ static void b43_request_firmware(struct work_struct *work)
start_ieee80211:
wl->hw->queues = B43_QOS_QUEUE_NUM;
- if (!modparam_qos || dev->fw.opensource)
+ if (!modparam_qos || dev->fw.opensource ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
wl->hw->queues = 1;
err = ieee80211_register_hw(wl->hw);
@@ -3600,8 +3601,8 @@ static void b43_tx_work(struct work_struct *work)
else
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
- wl->tx_queue_stopped[queue_num] = 1;
- ieee80211_stop_queue(wl->hw, queue_num);
+ wl->tx_queue_stopped[queue_num] = true;
+ b43_stop_queue(dev, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
}
@@ -3611,7 +3612,7 @@ static void b43_tx_work(struct work_struct *work)
}
if (!err)
- wl->tx_queue_stopped[queue_num] = 0;
+ wl->tx_queue_stopped[queue_num] = false;
}
#if B43_DEBUG
@@ -3625,6 +3626,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
+ u16 skb_queue_mapping;
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
@@ -3633,12 +3635,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
- if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ skb_queue_mapping = skb_get_queue_mapping(skb);
+ skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb_queue_mapping])
ieee80211_queue_work(wl->hw, &wl->tx_work);
- } else {
- ieee80211_stop_queue(wl->hw, skb->queue_mapping);
- }
+ else
+ b43_stop_queue(wl->current_dev, skb_queue_mapping);
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -5603,7 +5605,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
/* Initialize queues and flags. */
for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
skb_queue_head_init(&wl->tx_queue[queue_num]);
- wl->tx_queue_stopped[queue_num] = 0;
+ wl->tx_queue_stopped[queue_num] = false;
}
snprintf(chip_name, ARRAY_SIZE(chip_name),
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 0ef62ef77af6..eb4d35cae0c9 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -5643,7 +5643,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
u8 rfctl[2];
u8 afectl_core;
u16 tmp[6];
- u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
+ u16 cur_hpf1, cur_hpf2, cur_lna;
u32 real, imag;
enum nl80211_band band;
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 69f8b46c9015..468fd647d0a0 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (total_len > (q->buffer_size - q->buffer_used)) {
/* Not enough memory on the queue. */
err = -EBUSY;
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
goto out;
}
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
(q->free_packet_slots == 0)) {
/* The queue is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
list_add(&pack->list, &q->packets_list);
if (q->stopped) {
- ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
+ b43_wake_queue(dev, q->queue_prio);
q->stopped = false;
}
}
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 058745219516..2ff005b453cd 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -422,10 +422,10 @@ int b43_generate_txhdr(struct b43_wldev *dev,
if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) {
unsigned int len;
- struct ieee80211_hdr *uninitialized_var(hdr);
+ struct ieee80211_hdr *hdr;
int rts_rate, rts_rate_fb;
int rts_rate_ofdm, rts_rate_fb_ofdm;
- struct b43_plcp_hdr6 *uninitialized_var(plcp);
+ struct b43_plcp_hdr6 *plcp;
struct ieee80211_rate *rts_cts_rate;
rts_cts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info);
@@ -436,7 +436,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- struct ieee80211_cts *uninitialized_var(cts);
+ struct ieee80211_cts *cts;
switch (dev->fw.hdr_format) {
case B43_FW_HDR_598:
@@ -458,7 +458,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
mac_ctl |= B43_TXH_MAC_SENDCTS;
len = sizeof(struct ieee80211_cts);
} else {
- struct ieee80211_rts *uninitialized_var(rts);
+ struct ieee80211_rts *rts;
switch (dev->fw.hdr_format) {
case B43_FW_HDR_598:
@@ -650,8 +650,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
const struct b43_rxhdr_fw4 *rxhdr = _rxhdr;
__le16 fctl;
u16 phystat0, phystat3;
- u16 uninitialized_var(chanstat), uninitialized_var(mactime);
- u32 uninitialized_var(macstat);
+ u16 chanstat, mactime;
+ u32 macstat;
u16 chanid;
int padding, rate_idx;
diff --git a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
index 6b0cec467938..f49365d14619 100644
--- a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
@@ -379,7 +379,7 @@ struct b43legacy_iv {
union {
__be16 d16;
__be32 d32;
- } data __packed;
+ } __packed data;
} __packed;
#define B43legacy_PHYMODE(phytype) (1 << (phytype))
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index 082aab8353b8..cab2c4467693 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -190,7 +190,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
struct b43legacy_wldev *dev;
struct b43legacy_debugfs_fops *dfops;
struct b43legacy_dfs_file *dfile;
- ssize_t uninitialized_var(ret);
+ ssize_t ret;
char *buf;
const size_t bufsize = 1024 * 16; /* 16 KiB buffer */
const size_t buforder = get_order(bufsize);
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 5208a39fd6f7..220c11d34c23 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -2580,7 +2580,7 @@ static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev)
static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
unsigned int new_mode)
{
- struct b43legacy_wldev *uninitialized_var(up_dev);
+ struct b43legacy_wldev *up_dev;
struct b43legacy_wldev *down_dev;
int err;
bool gmode = false;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 6439adcd2f99..b7ceea0b3204 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -87,6 +87,9 @@
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
+#define BRCMF_MAX_CHANSPEC_LIST \
+ (BRCMF_DCMD_MEDLEN / sizeof(__le32) - 1)
+
static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
{
if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
@@ -1266,13 +1269,14 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_wsec_pmk_le pmk;
- int i, err;
+ int err;
- /* convert to firmware key format */
- pmk.key_len = cpu_to_le16(pmk_len << 1);
- pmk.flags = cpu_to_le16(BRCMF_WSEC_PASSPHRASE);
- for (i = 0; i < pmk_len; i++)
- snprintf(&pmk.key[2 * i], 3, "%02x", pmk_data[i]);
+ memset(&pmk, 0, sizeof(pmk));
+
+ /* pass pmk directly */
+ pmk.key_len = cpu_to_le16(pmk_len);
+ pmk.flags = cpu_to_le16(0);
+ memcpy(pmk.key, pmk_data, pmk_len);
/* store psk in firmware */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
@@ -5463,6 +5467,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
(struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
req_len = le32_to_cpu(assoc_info->req_len);
resp_len = le32_to_cpu(assoc_info->resp_len);
+ if (req_len > WL_EXTRA_BUF_MAX || resp_len > WL_EXTRA_BUF_MAX) {
+ bphy_err(drvr, "invalid lengths in assoc info: req %u resp %u\n",
+ req_len, resp_len);
+ return -EINVAL;
+ }
if (req_len) {
err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
cfg->extra_buf,
@@ -6067,6 +6076,13 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
total = le32_to_cpu(list->count);
+ if (total > BRCMF_MAX_CHANSPEC_LIST) {
+ bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
+ total);
+ err = -EINVAL;
+ goto fail_pbuf;
+ }
+
for (i = 0; i < total; i++) {
ch.chspec = (u16)le32_to_cpu(list->element[i]);
cfg->d11inf.decchspec(&ch);
@@ -6212,6 +6228,13 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
list = (struct brcmf_chanspec_list *)pbuf;
num_chan = le32_to_cpu(list->count);
+ if (num_chan > BRCMF_MAX_CHANSPEC_LIST) {
+ bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
+ num_chan);
+ kfree(pbuf);
+ return -EINVAL;
+ }
+
for (i = 0; i < num_chan; i++) {
ch.chspec = (u16)le32_to_cpu(list->element[i]);
cfg->d11inf.decchspec(&ch);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index dec25e415619..e7c97dfd6928 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -264,6 +264,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err);
goto done;
}
+ buf[sizeof(buf) - 1] = '\0';
ptr = (char *)buf;
strsep(&ptr, "\n");
@@ -280,15 +281,17 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
if (err) {
brcmf_dbg(TRACE, "retrieving clmver failed, %d\n", err);
} else {
+ buf[sizeof(buf) - 1] = '\0';
clmver = (char *)buf;
- /* store CLM version for adding it to revinfo debugfs file */
- memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
/* Replace all newline/linefeed characters with space
* character
*/
strreplace(clmver, '\n', ' ');
+ /* store CLM version for adding it to revinfo debugfs file */
+ memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
+
brcmf_dbg(INFO, "CLM version = %s\n", clmver);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index edb79e9665dc..4907a667f963 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -290,6 +290,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
int head_delta;
+ unsigned int tx_bytes = skb->len;
brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
@@ -332,6 +333,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
bphy_err(drvr, "%s: failed to expand headroom\n",
brcmf_ifname(ifp));
atomic_inc(&drvr->bus_if->stats.pktcow_failed);
+ dev_kfree_skb(skb);
goto done;
}
}
@@ -361,7 +363,7 @@ done:
ndev->stats.tx_dropped++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += tx_bytes;
}
/* Return ok: we always eat the packet */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 544ad80629a9..47e33fe53eeb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -703,6 +703,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
u32 i, j;
char end = '\0';
+ if (chiprev >= BITS_PER_TYPE(u32)) {
+ brcmf_err("Invalid chip revision %u\n", chiprev);
+ return NULL;
+ }
+
for (i = 0; i < table_size; i++) {
if (mapping_table[i].chipid == chip &&
mapping_table[i].revmask & BIT(chiprev))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index a30fcfbf2ee7..94f843158860 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
brcmf_fweh_event_name(event->code), event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
+ if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
+ bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
+ goto event_free;
+ }
/* convert event message */
emsg_be = &event->emsg;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index c2705d7a4247..fd54acb85924 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -345,8 +345,11 @@ brcmf_msgbuf_alloc_pktid(struct device *dev,
count++;
} while (count < pktids->array_size);
- if (count == pktids->array_size)
+ if (count == pktids->array_size) {
+ dma_unmap_single(dev, *physaddr, skb->len - data_offset,
+ pktids->direction);
return -ENOMEM;
+ }
array[*idx].data_offset = data_offset;
array[*idx].physaddr = *physaddr;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index b5d2e5b9f67c..6aa175bdadde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -616,7 +616,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
}
if (!brcmf_chip_set_active(devinfo->ci, resetintr))
- return -EINVAL;
+ return -EIO;
return 0;
}
@@ -1109,6 +1109,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
BRCMF_NROF_H2D_COMMON_MSGRINGS;
max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
}
+ if (max_flowrings > 512) {
+ brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
+ return -EIO;
+ }
if (devinfo->dma_idx_sz != 0) {
bufsz = (max_submissionrings + max_completionrings) *
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 14e530601ef3..7ec1630d9095 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -154,12 +154,12 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
struct brcmf_pno_macaddr_le pfn_mac;
u8 *mac_addr = NULL;
u8 *mac_mask = NULL;
- int err, i;
+ int err, i, ri;
- for (i = 0; i < pi->n_reqs; i++)
- if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- mac_addr = pi->reqs[i]->mac_addr;
- mac_mask = pi->reqs[i]->mac_addr_mask;
+ for (ri = 0; ri < pi->n_reqs; ri++)
+ if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ mac_addr = pi->reqs[ri]->mac_addr;
+ mac_mask = pi->reqs[ri]->mac_addr_mask;
break;
}
@@ -181,7 +181,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
pfn_mac.mac[0] |= 0x02;
brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
- pi->reqs[i]->reqid, pfn_mac.mac);
+ pi->reqs[ri]->reqid, pfn_mac.mac);
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index ddc999670484..5874f56c12da 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3367,6 +3367,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
/* Take arm out of reset */
if (!brcmf_chip_set_active(bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
+ bcmerror = -EIO;
goto err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index 2441714169de..849c8109b398 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
return sh;
}
-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
+static void wlc_phy_timercb_phycal(void *ptr)
{
+ struct brcms_phy *pi = ptr;
uint delay = 5;
if (PHY_PERICAL_MPHASE_PENDING(pi)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
index a0de5db0cd64..b72381791536 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
}
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name)
{
return (struct wlapi_timer *)
- brcms_init_timer(physhim->wl, (void (*)(void *))fn,
- arg, name);
+ brcms_init_timer(physhim->wl, fn, arg, name);
}
void wlapi_free_timer(struct wlapi_timer *t)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
index dd8774717ade..27d0934e600e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name);
void wlapi_free_timer(struct wlapi_timer *t);
void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index da0d3834b5f0..ebf0d3072290 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -6104,8 +6104,11 @@ static int airo_get_rate(struct net_device *dev,
{
struct airo_info *local = dev->ml_priv;
StatusRid status_rid; /* Card status info */
+ int ret;
- readStatusRid(local, &status_rid, 1);
+ ret = readStatusRid(local, &status_rid, 1);
+ if (ret)
+ return -EBUSY;
vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000;
/* If more than one rate, set auto */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index a162146a43a7..752489331e1e 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -2294,10 +2294,11 @@ static int ipw2100_alloc_skb(struct ipw2100_priv *priv,
return -ENOMEM;
packet->rxp = (struct ipw2100_rx *)packet->skb->data;
- packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data,
+ packet->dma_addr = dma_map_single(&priv->pci_dev->dev,
+ packet->skb->data,
sizeof(struct ipw2100_rx),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(priv->pci_dev, packet->dma_addr)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pci_dev->dev, packet->dma_addr)) {
dev_kfree_skb(packet->skb);
return -ENOMEM;
}
@@ -2478,9 +2479,8 @@ static void isr_rx(struct ipw2100_priv *priv, int i,
return;
}
- pci_unmap_single(priv->pci_dev,
- packet->dma_addr,
- sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr,
+ sizeof(struct ipw2100_rx), DMA_FROM_DEVICE);
skb_put(packet->skb, status->frame_size);
@@ -2562,8 +2562,8 @@ static void isr_rx_monitor(struct ipw2100_priv *priv, int i,
return;
}
- pci_unmap_single(priv->pci_dev, packet->dma_addr,
- sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr,
+ sizeof(struct ipw2100_rx), DMA_FROM_DEVICE);
memmove(packet->skb->data + sizeof(struct ipw_rt_hdr),
packet->skb->data, status->frame_size);
@@ -2688,9 +2688,9 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
/* Sync the DMA for the RX buffer so CPU is sure to get
* the correct values */
- pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
- sizeof(struct ipw2100_rx),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pci_dev->dev, packet->dma_addr,
+ sizeof(struct ipw2100_rx),
+ DMA_FROM_DEVICE);
if (unlikely(ipw2100_corruption_check(priv, i))) {
ipw2100_corruption_detected(priv, i);
@@ -2922,9 +2922,8 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv)
(packet->index + 1 + i) % txq->entries,
tbd->host_addr, tbd->buf_length);
- pci_unmap_single(priv->pci_dev,
- tbd->host_addr,
- tbd->buf_length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pci_dev->dev, tbd->host_addr,
+ tbd->buf_length, DMA_TO_DEVICE);
}
libipw_txb_free(packet->info.d_struct.txb);
@@ -3164,15 +3163,13 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
tbd->buf_length = packet->info.d_struct.txb->
fragments[i]->len - LIBIPW_3ADDR_LEN;
- tbd->host_addr = pci_map_single(priv->pci_dev,
+ tbd->host_addr = dma_map_single(&priv->pci_dev->dev,
packet->info.d_struct.
- txb->fragments[i]->
- data +
+ txb->fragments[i]->data +
LIBIPW_3ADDR_LEN,
tbd->buf_length,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(priv->pci_dev,
- tbd->host_addr)) {
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&priv->pci_dev->dev, tbd->host_addr)) {
IPW_DEBUG_TX("dma mapping error\n");
break;
}
@@ -3181,10 +3178,10 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
txq->next, tbd->host_addr,
tbd->buf_length);
- pci_dma_sync_single_for_device(priv->pci_dev,
- tbd->host_addr,
- tbd->buf_length,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&priv->pci_dev->dev,
+ tbd->host_addr,
+ tbd->buf_length,
+ DMA_TO_DEVICE);
txq->next++;
txq->next %= txq->entries;
@@ -3439,9 +3436,9 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
return -ENOMEM;
for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
- v = pci_zalloc_consistent(priv->pci_dev,
- sizeof(struct ipw2100_cmd_header),
- &p);
+ v = dma_alloc_coherent(&priv->pci_dev->dev,
+ sizeof(struct ipw2100_cmd_header), &p,
+ GFP_KERNEL);
if (!v) {
printk(KERN_ERR DRV_NAME ": "
"%s: PCI alloc failed for msg "
@@ -3460,11 +3457,10 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
return 0;
for (j = 0; j < i; j++) {
- pci_free_consistent(priv->pci_dev,
- sizeof(struct ipw2100_cmd_header),
- priv->msg_buffers[j].info.c_struct.cmd,
- priv->msg_buffers[j].info.c_struct.
- cmd_phys);
+ dma_free_coherent(&priv->pci_dev->dev,
+ sizeof(struct ipw2100_cmd_header),
+ priv->msg_buffers[j].info.c_struct.cmd,
+ priv->msg_buffers[j].info.c_struct.cmd_phys);
}
kfree(priv->msg_buffers);
@@ -3495,11 +3491,10 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
return;
for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
- pci_free_consistent(priv->pci_dev,
- sizeof(struct ipw2100_cmd_header),
- priv->msg_buffers[i].info.c_struct.cmd,
- priv->msg_buffers[i].info.c_struct.
- cmd_phys);
+ dma_free_coherent(&priv->pci_dev->dev,
+ sizeof(struct ipw2100_cmd_header),
+ priv->msg_buffers[i].info.c_struct.cmd,
+ priv->msg_buffers[i].info.c_struct.cmd_phys);
}
kfree(priv->msg_buffers);
@@ -4322,7 +4317,8 @@ static int status_queue_allocate(struct ipw2100_priv *priv, int entries)
IPW_DEBUG_INFO("enter\n");
q->size = entries * sizeof(struct ipw2100_status);
- q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
+ q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic,
+ GFP_KERNEL);
if (!q->drv) {
IPW_DEBUG_WARNING("Can not allocate status queue.\n");
return -ENOMEM;
@@ -4338,9 +4334,10 @@ static void status_queue_free(struct ipw2100_priv *priv)
IPW_DEBUG_INFO("enter\n");
if (priv->status_queue.drv) {
- pci_free_consistent(priv->pci_dev, priv->status_queue.size,
- priv->status_queue.drv,
- priv->status_queue.nic);
+ dma_free_coherent(&priv->pci_dev->dev,
+ priv->status_queue.size,
+ priv->status_queue.drv,
+ priv->status_queue.nic);
priv->status_queue.drv = NULL;
}
@@ -4356,7 +4353,8 @@ static int bd_queue_allocate(struct ipw2100_priv *priv,
q->entries = entries;
q->size = entries * sizeof(struct ipw2100_bd);
- q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
+ q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic,
+ GFP_KERNEL);
if (!q->drv) {
IPW_DEBUG_INFO
("can't allocate shared memory for buffer descriptors\n");
@@ -4376,7 +4374,8 @@ static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
return;
if (q->drv) {
- pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic);
+ dma_free_coherent(&priv->pci_dev->dev, q->size, q->drv,
+ q->nic);
q->drv = NULL;
}
@@ -4436,9 +4435,9 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
}
for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
- v = pci_alloc_consistent(priv->pci_dev,
- sizeof(struct ipw2100_data_header),
- &p);
+ v = dma_alloc_coherent(&priv->pci_dev->dev,
+ sizeof(struct ipw2100_data_header), &p,
+ GFP_KERNEL);
if (!v) {
printk(KERN_ERR DRV_NAME
": %s: PCI alloc failed for tx " "buffers.\n",
@@ -4458,11 +4457,10 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
return 0;
for (j = 0; j < i; j++) {
- pci_free_consistent(priv->pci_dev,
- sizeof(struct ipw2100_data_header),
- priv->tx_buffers[j].info.d_struct.data,
- priv->tx_buffers[j].info.d_struct.
- data_phys);
+ dma_free_coherent(&priv->pci_dev->dev,
+ sizeof(struct ipw2100_data_header),
+ priv->tx_buffers[j].info.d_struct.data,
+ priv->tx_buffers[j].info.d_struct.data_phys);
}
kfree(priv->tx_buffers);
@@ -4539,12 +4537,10 @@ static void ipw2100_tx_free(struct ipw2100_priv *priv)
priv->tx_buffers[i].info.d_struct.txb = NULL;
}
if (priv->tx_buffers[i].info.d_struct.data)
- pci_free_consistent(priv->pci_dev,
- sizeof(struct ipw2100_data_header),
- priv->tx_buffers[i].info.d_struct.
- data,
- priv->tx_buffers[i].info.d_struct.
- data_phys);
+ dma_free_coherent(&priv->pci_dev->dev,
+ sizeof(struct ipw2100_data_header),
+ priv->tx_buffers[i].info.d_struct.data,
+ priv->tx_buffers[i].info.d_struct.data_phys);
}
kfree(priv->tx_buffers);
@@ -4607,9 +4603,10 @@ static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
return 0;
for (j = 0; j < i; j++) {
- pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr,
+ dma_unmap_single(&priv->pci_dev->dev,
+ priv->rx_buffers[j].dma_addr,
sizeof(struct ipw2100_rx_packet),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_buffers[j].skb);
}
@@ -4661,10 +4658,10 @@ static void ipw2100_rx_free(struct ipw2100_priv *priv)
for (i = 0; i < RX_QUEUE_LENGTH; i++) {
if (priv->rx_buffers[i].rxp) {
- pci_unmap_single(priv->pci_dev,
+ dma_unmap_single(&priv->pci_dev->dev,
priv->rx_buffers[i].dma_addr,
sizeof(struct ipw2100_rx),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_buffers[i].skb);
}
}
@@ -6196,7 +6193,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
pci_set_drvdata(pci_dev, priv);
- err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_WARNING DRV_NAME
"Error calling pci_set_dma_mask.\n");
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ac5f797fb1ad..5ce1a4d8fcee 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3442,9 +3442,10 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) {
- pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
- IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(rxq->pool[i].skb);
+ dma_unmap_single(&priv->pci_dev->dev,
+ rxq->pool[i].dma_addr,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_irq(rxq->pool[i].skb);
rxq->pool[i].skb = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -3776,7 +3777,8 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
}
q->bd =
- pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
+ dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
+ &q->q.dma_addr, GFP_KERNEL);
if (!q->bd) {
IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
sizeof(q->bd[0]) * count);
@@ -3818,9 +3820,10 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
/* unmap chunks if any */
for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
- pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
+ dma_unmap_single(&dev->dev,
+ le32_to_cpu(bd->u.data.chunk_ptr[i]),
le16_to_cpu(bd->u.data.chunk_len[i]),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (txq->txb[txq->q.last_used]) {
libipw_txb_free(txq->txb[txq->q.last_used]);
txq->txb[txq->q.last_used] = NULL;
@@ -3852,8 +3855,8 @@ static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
}
/* free buffers belonging to queue itself */
- pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
- q->dma_addr);
+ dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
+ q->dma_addr);
kfree(txq->txb);
/* 0 fill whole structure */
@@ -5198,8 +5201,8 @@ static void ipw_rx_queue_replenish(void *data)
list_del(element);
rxb->dma_addr =
- pci_map_single(priv->pci_dev, rxb->skb->data,
- IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_map_single(&priv->pci_dev->dev, rxb->skb->data,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
@@ -5232,8 +5235,9 @@ static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) {
- pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
- IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ rxq->pool[i].dma_addr,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
}
@@ -8271,9 +8275,8 @@ static void ipw_rx(struct ipw_priv *priv)
}
priv->rxq->queue[i] = NULL;
- pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
- IPW_RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
pkt = (struct ipw_rx_packet *)rxb->skb->data;
IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
@@ -8425,8 +8428,8 @@ static void ipw_rx(struct ipw_priv *priv)
rxb->skb = NULL;
}
- pci_unmap_single(priv->pci_dev, rxb->dma_addr,
- IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &priv->rxq->rx_used);
i = (i + 1) % RX_QUEUE_SIZE;
@@ -10225,11 +10228,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
txb->fragments[i]->len - hdr_len);
tfd->u.data.chunk_ptr[i] =
- cpu_to_le32(pci_map_single
- (priv->pci_dev,
- txb->fragments[i]->data + hdr_len,
- txb->fragments[i]->len - hdr_len,
- PCI_DMA_TODEVICE));
+ cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
+ txb->fragments[i]->data + hdr_len,
+ txb->fragments[i]->len - hdr_len,
+ DMA_TO_DEVICE));
tfd->u.data.chunk_len[i] =
cpu_to_le16(txb->fragments[i]->len - hdr_len);
}
@@ -10259,10 +10261,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
dev_kfree_skb_any(txb->fragments[i]);
txb->fragments[i] = skb;
tfd->u.data.chunk_ptr[i] =
- cpu_to_le32(pci_map_single
- (priv->pci_dev, skb->data,
- remaining_bytes,
- PCI_DMA_TODEVICE));
+ cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
+ skb->data,
+ remaining_bytes,
+ DMA_TO_DEVICE));
le32_add_cpu(&tfd->u.data.num_chunks, 1);
}
@@ -11412,9 +11414,14 @@ static int ipw_wdev_init(struct net_device *dev)
set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
/* With that information in place, we can now register the wiphy... */
- if (wiphy_register(wdev->wiphy))
- rc = -EIO;
+ rc = wiphy_register(wdev->wiphy);
+ if (rc)
+ goto out;
+
+ return 0;
out:
+ kfree(priv->ieee->a_band.channels);
+ kfree(priv->ieee->bg_band.channels);
return rc;
}
@@ -11632,9 +11639,9 @@ static int ipw_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
goto out_pci_disable_device;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index e2e9c3e8fff5..a1bd61d5e024 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -2302,9 +2302,7 @@ __il3945_down(struct il_priv *il)
il3945_hw_txq_ctx_free(il);
exit:
memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
-
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
+ dev_kfree_skb(il->beacon_skb);
il->beacon_skb = NULL;
/* clear out any free frames */
@@ -3384,10 +3382,12 @@ static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log);
*
*****************************************************************************/
-static void
+static int
il3945_setup_deferred_work(struct il_priv *il)
{
il->workqueue = create_singlethread_workqueue(DRV_NAME);
+ if (!il->workqueue)
+ return -ENOMEM;
init_waitqueue_head(&il->wait_command_queue);
@@ -3406,6 +3406,8 @@ il3945_setup_deferred_work(struct il_priv *il)
tasklet_init(&il->irq_tasklet,
il3945_irq_tasklet,
(unsigned long)il);
+
+ return 0;
}
static void
@@ -3727,7 +3729,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
- il3945_setup_deferred_work(il);
+ err = il3945_setup_deferred_work(il);
+ if (err)
+ goto out_remove_sysfs;
+
il3945_setup_handlers(il);
il_power_initialize(il);
@@ -3739,7 +3744,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = il3945_setup_mac(il);
if (err)
- goto out_remove_sysfs;
+ goto out_destroy_workqueue;
il_dbgfs_register(il, DRV_NAME);
@@ -3748,9 +3753,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-out_remove_sysfs:
+out_destroy_workqueue:
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
+out_remove_sysfs:
sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
out_release_irq:
free_irq(il->pci_dev->irq, il);
@@ -3847,9 +3853,7 @@ il3945_pci_remove(struct pci_dev *pdev)
il_free_channel_map(il);
il_free_geos(il);
kfree(il->scan_cmd);
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
-
+ dev_kfree_skb(il->beacon_skb);
ieee80211_free_hw(il->hw);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 2ac494f5ae22..fd63eba47ba2 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -2100,7 +2100,7 @@ il3945_txpower_set_from_eeprom(struct il_priv *il)
/* set tx power value for all OFDM rates */
for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
- s32 uninitialized_var(power_idx);
+ s32 power_idx;
int rc;
/* use channel group's clip-power table,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 5fe17039a337..20c933602f0a 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -2768,7 +2768,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
struct ieee80211_tx_info *info;
struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
- int uninitialized_var(tid);
+ int tid;
int sta_id;
int freed;
u8 *qc = NULL;
@@ -6217,10 +6217,12 @@ out:
mutex_unlock(&il->mutex);
}
-static void
+static int
il4965_setup_deferred_work(struct il_priv *il)
{
il->workqueue = create_singlethread_workqueue(DRV_NAME);
+ if (!il->workqueue)
+ return -ENOMEM;
init_waitqueue_head(&il->wait_command_queue);
@@ -6241,6 +6243,8 @@ il4965_setup_deferred_work(struct il_priv *il)
tasklet_init(&il->irq_tasklet,
il4965_irq_tasklet,
(unsigned long)il);
+
+ return 0;
}
static void
@@ -6630,7 +6634,10 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_msi;
}
- il4965_setup_deferred_work(il);
+ err = il4965_setup_deferred_work(il);
+ if (err)
+ goto out_free_irq;
+
il4965_setup_handlers(il);
/*********************************************
@@ -6668,6 +6675,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_destroy_workqueue:
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
+out_free_irq:
free_irq(il->pci_dev->irq, il);
out_disable_msi:
pci_disable_msi(il->pci_dev);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 0a02d8aca320..b79a8aeab4c7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
/* Repeat initial/next rate.
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 1107b96a8a88..7cfd80d40a65 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -5182,8 +5182,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
/* new association get rid of ibss beacon skb */
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
+ dev_consume_skb_irq(il->beacon_skb);
il->beacon_skb = NULL;
il->timestamp = 0;
@@ -5302,10 +5301,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_irqsave(&il->lock, flags);
-
- if (il->beacon_skb)
- dev_kfree_skb(il->beacon_skb);
-
+ dev_consume_skb_irq(il->beacon_skb);
il->beacon_skb = skb;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 51158edce15b..f30fdbedd717 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -1086,6 +1086,7 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
{
__le16 key_flags;
struct iwl_addsta_cmd sta_cmd;
+ size_t to_copy;
int i;
spin_lock_bh(&priv->sta_lock);
@@ -1105,7 +1106,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
for (i = 0; i < 5; i++)
sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
- memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
+ /* keyconf may contain MIC rx/tx keys which iwl does not use */
+ to_copy = min_t(size_t, sizeof(sta_cmd.key.key), keyconf->keylen);
+ memcpy(sta_cmd.key.key, keyconf->key, to_copy);
break;
case WLAN_CIPHER_SUITE_WEP104:
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 2e763678dbdb..36bfc195a772 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -332,9 +332,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
- __le32 internal_base_addr;
- __le64 dram_base_addr;
- __le32 page_num;
+ __le32 internal_base_addr __packed;
+ __le64 dram_base_addr __packed;
+ __le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
};
__le32 data[];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 524f9dd2323d..f8785c70842d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1877,6 +1877,11 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
if (ret < 0)
return ret;
+ if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
+ ret = -EIO;
+ goto out;
+ }
+
rsp = (void *)hcmd.resp_pkt->data;
if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
ret = -ENXIO;
@@ -1954,6 +1959,11 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
if (ret < 0)
return ret;
+ if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
+ ret = -EIO;
+ goto out;
+ }
+
rsp = (void *)hcmd.resp_pkt->data;
if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
ret = -ENXIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index f49887379c43..f485c0dd75d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -508,6 +508,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp->n_channels);
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp, channels, n_channels)) {
+ resp_cp = ERR_PTR(-EINVAL);
+ goto exit;
+ }
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
@@ -519,6 +524,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp_v3, channels, n_channels)) {
+ resp_cp = ERR_PTR(-EINVAL);
+ goto exit;
+ }
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5973eecbc037..18c5975d7c03 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1167,8 +1167,11 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
mvmtxq->stopped = !start;
- if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
+ if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) {
+ local_bh_disable();
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ local_bh_enable();
+ }
}
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 5df4bbb6c6de..7befb92b5159 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1810,6 +1810,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
iwl_mvm_txq_from_mac80211(sta->txq[i]);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
}
@@ -2556,7 +2557,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (iwl_mvm_has_new_rx_api(mvm) && start) {
- u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+ u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
/* sparse doesn't like the __align() so don't check */
#ifndef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index d46e606b7b02..fbcd46aedade 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -529,16 +529,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/*
- * For data packets rate info comes from the fw. Only
- * set rate/antenna during connection establishment or in case
- * no station is given.
+ * For data and mgmt packets rate info comes from the fw. Only
+ * set rate/antenna for injected frames with fixed rate, or
+ * when no sta is given.
*/
- if (!sta || !ieee80211_is_data(hdr->frame_control) ||
- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
+ if (unlikely(!sta ||
+ info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
hdr->frame_control);
+ } else if (!ieee80211_is_data(hdr->frame_control) ||
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
+ /* These are important frames */
+ flags |= IWL_TX_FLAGS_HIGH_PRI;
}
if (mvm->trans->trans_cfg->device_family >=
@@ -1209,6 +1213,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct sk_buff_head mpdus_skbs;
unsigned int payload_len;
int ret;
+ struct sk_buff *orig_skb = skb;
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -1241,8 +1246,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
if (ret) {
+ /* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
- return ret;
+ /* skb here is not necessarily same as skb that entered this method,
+ * so free it explicitly.
+ */
+ if (skb == orig_skb)
+ ieee80211_free_txskb(mvm->hw, skb);
+ else
+ kfree_skb(skb);
+ /* there was error, but we consumed skb one way or another, so return 0 */
+ return 0;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index f34297fd453c..5153314e8555 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1173,6 +1173,9 @@ static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
+ if (!trans)
+ return;
+
iwl_drv_stop(trans->drv);
iwl_trans_pcie_free(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 8915030030c4..e7b90cf1f28c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -630,7 +630,6 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
- int t = 0;
int iter;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@ -645,6 +644,8 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
usleep_range(1000, 2000);
for (iter = 0; iter < 10; iter++) {
+ int t = 0;
+
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE);
@@ -2831,7 +2832,7 @@ static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
void *buf, ssize_t *size,
ssize_t *bytes_copied)
{
- int buf_size_left = count - *bytes_copied;
+ ssize_t buf_size_left = count - *bytes_copied;
buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
if (*size > buf_size_left)
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
index 61af5a28f269..af49aa421e47 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ b/drivers/net/wireless/intersil/orinoco/hw.c
@@ -931,6 +931,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFAUTHENTICATION_AGERE,
auth_flag);
+ if (err)
+ return err;
}
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFWEPENABLED_AGERE,
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
index a956f965a1e5..03bfd2482656 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
@@ -96,6 +96,7 @@ orinoco_cs_probe(struct pcmcia_device *link)
{
struct orinoco_private *priv;
struct orinoco_pccard *card;
+ int ret;
priv = alloc_orinocodev(sizeof(*card), &link->dev,
orinoco_cs_hard_reset, NULL);
@@ -107,8 +108,16 @@ orinoco_cs_probe(struct pcmcia_device *link)
card->p_dev = link;
link->priv = priv;
- return orinoco_cs_config(link);
-} /* orinoco_cs_attach */
+ ret = orinoco_cs_config(link);
+ if (ret)
+ goto err_free_orinocodev;
+
+ return 0;
+
+err_free_orinocodev:
+ free_orinocodev(priv);
+ return ret;
+}
static void orinoco_cs_detach(struct pcmcia_device *link)
{
diff --git a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
index b60048c95e0a..011c86e55923 100644
--- a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
@@ -157,6 +157,7 @@ spectrum_cs_probe(struct pcmcia_device *link)
{
struct orinoco_private *priv;
struct orinoco_pccard *card;
+ int ret;
priv = alloc_orinocodev(sizeof(*card), &link->dev,
spectrum_cs_hard_reset,
@@ -169,8 +170,16 @@ spectrum_cs_probe(struct pcmcia_device *link)
card->p_dev = link;
link->priv = priv;
- return spectrum_cs_config(link);
-} /* spectrum_cs_attach */
+ ret = spectrum_cs_config(link);
+ if (ret)
+ goto err_free_orinocodev;
+
+ return 0;
+
+err_free_orinocodev:
+ free_orinocodev(priv);
+ return ret;
+}
static void spectrum_cs_detach(struct pcmcia_device *link)
{
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index a3ca6620dc0c..8fa3ec71603e 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -682,7 +682,7 @@ static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
* queues have already been stopped and no new frames can sneak
* up from behind.
*/
- while ((total = p54_flush_count(priv) && i--)) {
+ while ((total = p54_flush_count(priv)) && i--) {
/* waste time */
msleep(20);
}
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ab0fe8565851..cdb57819684a 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -164,7 +164,7 @@ static int p54spi_request_firmware(struct ieee80211_hw *dev)
ret = p54_parse_firmware(dev, priv->firmware);
if (ret) {
- release_firmware(priv->firmware);
+ /* the firmware is released by the caller */
return ret;
}
@@ -659,6 +659,7 @@ static int p54spi_probe(struct spi_device *spi)
return 0;
err_free_common:
+ release_firmware(priv->firmware);
free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
err_free_gpio_irq:
gpio_free(p54spi_gpio_irq);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index ffe27104f654..634e8c1e71cc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -527,7 +527,7 @@ struct mac80211_hwsim_data {
bool ps_poll_pending;
struct dentry *debugfs;
- uintptr_t pending_cookie;
+ atomic_t pending_cookie;
struct sk_buff_head pending; /* packets pending */
/*
* Only radios in the same group can communicate together (the
@@ -663,6 +663,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *cb;
if (!vp->assoc)
return;
@@ -684,6 +685,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr2, mac, ETH_ALEN);
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+ cb = IEEE80211_SKB_CB(skb);
+ cb->control.rates[0].count = 1;
+ cb->control.rates[1].idx = -1;
+
rcu_read_lock();
mac80211_hwsim_tx_frame(data->hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
@@ -1113,8 +1118,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
/* We create a cookie to identify this skb */
- data->pending_cookie++;
- cookie = data->pending_cookie;
+ cookie = atomic_inc_return(&data->pending_cookie);
info->rate_driver_data[0] = (void *)cookie;
if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
goto nla_put_failure;
@@ -2319,7 +2323,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
u32 sset, u8 *data)
{
if (sset == ETH_SS_STATS)
- memcpy(data, *mac80211_hwsim_gstrings_stats,
+ memcpy(data, mac80211_hwsim_gstrings_stats,
sizeof(mac80211_hwsim_gstrings_stats));
}
@@ -3260,6 +3264,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
const u8 *src;
unsigned int hwsim_flags;
int i;
+ unsigned long flags;
bool found = false;
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
@@ -3284,18 +3289,20 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
goto out;
/* look for the skb matching the cookie passed back from user */
+ spin_lock_irqsave(&data2->pending.lock, flags);
skb_queue_walk_safe(&data2->pending, skb, tmp) {
- u64 skb_cookie;
+ uintptr_t skb_cookie;
txi = IEEE80211_SKB_CB(skb);
- skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0];
+ skb_cookie = (uintptr_t)txi->rate_driver_data[0];
if (skb_cookie == ret_skb_cookie) {
- skb_unlink(skb, &data2->pending);
+ __skb_unlink(skb, &data2->pending);
found = true;
break;
}
}
+ spin_unlock_irqrestore(&data2->pending.lock, flags);
/* not found */
if (!found)
@@ -3360,14 +3367,15 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
+ if (frame_data_len < sizeof(struct ieee80211_hdr_3addr) ||
+ frame_data_len > IEEE80211_MAX_DATA_LEN)
+ goto err;
+
/* Allocate new skb here */
skb = alloc_skb(frame_data_len, GFP_KERNEL);
if (skb == NULL)
goto err;
- if (frame_data_len > IEEE80211_MAX_DATA_LEN)
- goto err;
-
/* Copy the data */
skb_put_data(skb, frame_data, frame_data_len);
@@ -3409,6 +3417,8 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
rx_status.band = data2->channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
+ if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates)
+ goto out;
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
hdr = (void *)skb->data;
diff --git a/drivers/net/wireless/marvell/libertas/Kconfig b/drivers/net/wireless/marvell/libertas/Kconfig
index b9fe598130c3..38347a2e8320 100644
--- a/drivers/net/wireless/marvell/libertas/Kconfig
+++ b/drivers/net/wireless/marvell/libertas/Kconfig
@@ -2,8 +2,6 @@
config LIBERTAS
tristate "Marvell 8xxx Libertas WLAN driver support"
depends on CFG80211
- select WIRELESS_EXT
- select WEXT_SPY
select LIB80211
select FW_LOADER
---help---
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index a4d9dd73b258..db9a852fa58a 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -1133,7 +1133,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
if (!cmdarray[i].cmdbuf) {
lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
ret = -1;
- goto done;
+ goto free_cmd_array;
}
}
@@ -1141,8 +1141,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- ret = 0;
+ return 0;
+free_cmd_array:
+ for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+ if (cmdarray[i].cmdbuf) {
+ kfree(cmdarray[i].cmdbuf);
+ cmdarray[i].cmdbuf = NULL;
+ }
+ }
+ kfree(priv->cmd_array);
+ priv->cmd_array = NULL;
done:
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index b73d08381398..5908f07d62ed 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -48,7 +48,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv,
/* Free Tx and Rx packets */
spin_lock_irqsave(&priv->driver_lock, flags);
- kfree_skb(priv->currenttxskb);
+ dev_kfree_skb_irq(priv->currenttxskb);
priv->currenttxskb = NULL;
priv->tx_pending_len = 0;
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 5d6dc1dd050d..2240b4db8c03 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -287,6 +287,7 @@ static int if_usb_probe(struct usb_interface *intf,
return 0;
err_get_fw:
+ usb_put_dev(udev);
lbs_remove_card(priv);
err_add_card:
if_usb_reset_device(cardp);
@@ -636,7 +637,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN);
memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN,
priv->resp_len[i]);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
lbs_notify_command_response(priv, i);
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 2233b59cdf44..ff0b3a0e9dcd 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -217,7 +217,7 @@ int lbs_stop_iface(struct lbs_private *priv)
spin_lock_irqsave(&priv->driver_lock, flags);
priv->iface_running = false;
- kfree_skb(priv->currenttxskb);
+ dev_kfree_skb_irq(priv->currenttxskb);
priv->currenttxskb = NULL;
priv->tx_pending_len = 0;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -870,6 +870,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
if (ret) {
pr_err("Out of memory allocating event FIFO buffer\n");
+ lbs_free_cmd_buffer(priv);
goto out;
}
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index b30bcb28503a..f47db95299f3 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -613,7 +613,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
spin_lock_irqsave(&priv->driver_lock, flags);
memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
recvlength - MESSAGE_HEADER_LEN);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
lbtf_cmd_response_rx(priv);
spin_unlock_irqrestore(&priv->driver_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index acbef9f1a83b..b397a7e85e6b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -890,7 +890,7 @@ mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid)
*/
void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
{
- u8 i;
+ u8 i, j;
u32 tx_win_size;
struct mwifiex_private *priv;
@@ -921,8 +921,8 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
if (tx_win_size != priv->add_ba_param.tx_win_size) {
if (!priv->media_connected)
continue;
- for (i = 0; i < MAX_NUM_TID; i++)
- mwifiex_send_delba_txbastream_tbl(priv, i);
+ for (j = 0; j < MAX_NUM_TID; j++)
+ mwifiex_send_delba_txbastream_tbl(priv, j);
}
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 05a3c61ac603..793be2835134 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -977,8 +977,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
- tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
- tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+ tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
+ tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 1599ae74b066..857b61db9d2c 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1984,6 +1984,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
mwifiex_set_sys_config_invalid_data(bss_cfg);
+ memcpy(bss_cfg->mac_addr, priv->curr_addr, ETH_ALEN);
+
if (params->beacon_interval)
bss_cfg->beacon_period = params->beacon_interval;
if (params->dtim_period)
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 8ab114cf3467..a6f4655b301d 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -265,8 +265,11 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
if (!p)
return -ENOMEM;
- if (!priv || !priv->hist_data)
- return -EFAULT;
+ if (!priv || !priv->hist_data) {
+ ret = -EFAULT;
+ goto free_and_exit;
+ }
+
phist_data = priv->hist_data;
p += sprintf(p, "\n"
@@ -321,6 +324,8 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page,
(unsigned long)p - page);
+free_and_exit:
+ free_page(page);
return ret;
}
@@ -971,9 +976,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
mwifiex_dfs_dir);
- if (!priv->dfs_dev_dir)
- return;
-
MWIFIEX_DFS_ADD_FILE(info);
MWIFIEX_DFS_ADD_FILE(debug);
MWIFIEX_DFS_ADD_FILE(getlog);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 076ea1c4b921..3e3134bcc2b0 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -177,6 +177,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
+#define TLV_TYPE_UAP_MAC_ADDRESS (PROPRIETARY_TLV_BASE_ID + 43)
#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 0dd592ea6e83..96ff91655a77 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -119,6 +119,7 @@ struct mwifiex_uap_bss_param {
u8 qos_info;
u8 power_constraint;
struct mwifiex_types_wmm_info wmm_info;
+ u8 mac_addr[ETH_ALEN];
};
enum {
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index b316e3491795..78d7674e71f9 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -50,6 +50,8 @@ static int mwifiex_pcie_probe_of(struct device *dev)
}
static void mwifiex_pcie_work(struct work_struct *work);
+static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter);
+static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter);
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
@@ -58,8 +60,8 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
struct pcie_service_card *card = adapter->card;
struct mwifiex_dma_mapping mapping;
- mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
- if (pci_dma_mapping_error(card->dev, mapping.addr)) {
+ mapping.addr = dma_map_single(&card->dev->dev, skb->data, size, flags);
+ if (dma_mapping_error(&card->dev->dev, mapping.addr)) {
mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
return -1;
}
@@ -75,7 +77,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
struct mwifiex_dma_mapping mapping;
mwifiex_get_mapping(skb, &mapping);
- pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
+ dma_unmap_single(&card->dev->dev, mapping.addr, mapping.len, flags);
}
/*
@@ -465,10 +467,9 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
struct sk_buff *cmdrsp = card->cmdrsp_buf;
for (count = 0; count < max_delay_loop_cnt; count++) {
- pci_dma_sync_single_for_cpu(card->dev,
- MWIFIEX_SKB_DMA_ADDR(cmdrsp),
- sizeof(sleep_cookie),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+ sizeof(sleep_cookie), DMA_FROM_DEVICE);
buffer = cmdrsp->data;
sleep_cookie = get_unaligned_le32(buffer);
@@ -477,10 +478,10 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
"sleep cookie found at count %d\n", count);
break;
}
- pci_dma_sync_single_for_device(card->dev,
- MWIFIEX_SKB_DMA_ADDR(cmdrsp),
- sizeof(sleep_cookie),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+ sizeof(sleep_cookie),
+ DMA_FROM_DEVICE);
usleep_range(20, 30);
}
@@ -628,14 +629,15 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
if (!skb) {
mwifiex_dbg(adapter, ERROR,
"Unable to allocate skb for RX ring.\n");
- kfree(card->rxbd_ring_vbase);
return -ENOMEM;
}
if (mwifiex_map_pci_memory(adapter, skb,
MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE))
- return -1;
+ DMA_FROM_DEVICE)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -685,16 +687,14 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
if (!skb) {
mwifiex_dbg(adapter, ERROR,
"Unable to allocate skb for EVENT buf.\n");
- kfree(card->evtbd_ring_vbase);
return -ENOMEM;
}
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE)) {
+ DMA_FROM_DEVICE)) {
kfree_skb(skb);
- kfree(card->evtbd_ring_vbase);
- return -1;
+ return -ENOMEM;
}
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -734,7 +734,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -743,7 +743,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -773,7 +773,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -782,7 +782,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -808,7 +808,7 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
if (card->evt_buf_list[i]) {
skb = card->evt_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
card->evt_buf_list[i] = NULL;
@@ -849,9 +849,10 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: txbd_ring: Allocating %d bytes\n",
card->txbd_ring_size);
- card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->txbd_ring_size,
- &card->txbd_ring_pbase);
+ card->txbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->txbd_ring_size,
+ &card->txbd_ring_pbase,
+ GFP_KERNEL);
if (!card->txbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
"allocate consistent memory (%d bytes) failed!\n",
@@ -875,9 +876,9 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_txq_ring(adapter);
if (card->txbd_ring_vbase)
- pci_free_consistent(card->dev, card->txbd_ring_size,
- card->txbd_ring_vbase,
- card->txbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->txbd_ring_size,
+ card->txbd_ring_vbase,
+ card->txbd_ring_pbase);
card->txbd_ring_size = 0;
card->txbd_wrptr = 0;
card->txbd_rdptr = 0 | reg->tx_rollover_ind;
@@ -892,6 +893,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
{
+ int ret;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -913,9 +915,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: rxbd_ring: Allocating %d bytes\n",
card->rxbd_ring_size);
- card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->rxbd_ring_size,
- &card->rxbd_ring_pbase);
+ card->rxbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->rxbd_ring_size,
+ &card->rxbd_ring_pbase,
+ GFP_KERNEL);
if (!card->rxbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
"allocate consistent memory (%d bytes) failed!\n",
@@ -929,7 +932,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
(u32)((u64)card->rxbd_ring_pbase >> 32),
card->rxbd_ring_size);
- return mwifiex_init_rxq_ring(adapter);
+ ret = mwifiex_init_rxq_ring(adapter);
+ if (ret)
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+ return ret;
}
/*
@@ -943,9 +949,9 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_rxq_ring(adapter);
if (card->rxbd_ring_vbase)
- pci_free_consistent(card->dev, card->rxbd_ring_size,
- card->rxbd_ring_vbase,
- card->rxbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->rxbd_ring_size,
+ card->rxbd_ring_vbase,
+ card->rxbd_ring_pbase);
card->rxbd_ring_size = 0;
card->rxbd_wrptr = 0;
card->rxbd_rdptr = 0 | reg->rx_rollover_ind;
@@ -960,6 +966,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
{
+ int ret;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -977,9 +984,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: evtbd_ring: Allocating %d bytes\n",
card->evtbd_ring_size);
- card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->evtbd_ring_size,
- &card->evtbd_ring_pbase);
+ card->evtbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->evtbd_ring_size,
+ &card->evtbd_ring_pbase,
+ GFP_KERNEL);
if (!card->evtbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
"allocate consistent memory (%d bytes) failed!\n",
@@ -993,7 +1001,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
(u32)((u64)card->evtbd_ring_pbase >> 32),
card->evtbd_ring_size);
- return mwifiex_pcie_init_evt_ring(adapter);
+ ret = mwifiex_pcie_init_evt_ring(adapter);
+ if (ret)
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+ return ret;
}
/*
@@ -1007,9 +1018,9 @@ static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_evt_ring(adapter);
if (card->evtbd_ring_vbase)
- pci_free_consistent(card->dev, card->evtbd_ring_size,
- card->evtbd_ring_vbase,
- card->evtbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->evtbd_ring_size,
+ card->evtbd_ring_vbase,
+ card->evtbd_ring_pbase);
card->evtbd_wrptr = 0;
card->evtbd_rdptr = 0 | reg->evt_rollover_ind;
card->evtbd_ring_size = 0;
@@ -1036,7 +1047,7 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE)) {
+ DMA_FROM_DEVICE)) {
kfree_skb(skb);
return -1;
}
@@ -1060,14 +1071,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
if (card && card->cmdrsp_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
card->cmdrsp_buf = NULL;
}
if (card && card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
@@ -1082,8 +1093,10 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
u32 *cookie;
- card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
- &card->sleep_cookie_pbase);
+ card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
+ sizeof(u32),
+ &card->sleep_cookie_pbase,
+ GFP_KERNEL);
if (!card->sleep_cookie_vbase) {
mwifiex_dbg(adapter, ERROR,
"pci_alloc_consistent failed!\n");
@@ -1111,9 +1124,9 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
if (card && card->sleep_cookie_vbase) {
- pci_free_consistent(card->dev, sizeof(u32),
- card->sleep_cookie_vbase,
- card->sleep_cookie_pbase);
+ dma_free_coherent(&card->dev->dev, sizeof(u32),
+ card->sleep_cookie_vbase,
+ card->sleep_cookie_pbase);
card->sleep_cookie_vbase = NULL;
}
@@ -1185,7 +1198,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
skb, wrdoneidx);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
unmap_count++;
@@ -1278,7 +1291,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2);
if (mwifiex_map_pci_memory(adapter, skb, skb->len,
- PCI_DMA_TODEVICE))
+ DMA_TO_DEVICE))
return -1;
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
@@ -1368,7 +1381,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -EINPROGRESS;
done_unmap:
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
card->tx_buf_list[wrindx] = NULL;
atomic_dec(&adapter->tx_hw_pending);
if (reg->pfu_enabled)
@@ -1422,7 +1435,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (!skb_data)
return -ENOMEM;
- mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_data, DMA_FROM_DEVICE);
card->rx_buf_list[rd_index] = NULL;
/* Get data length from interface header -
@@ -1460,7 +1473,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb_tmp,
MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
@@ -1537,7 +1550,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
}
- if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -1549,7 +1562,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write download command to boot code.\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1561,7 +1574,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write download command to boot code.\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1570,7 +1583,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write command len to cmd_size scratch reg\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1579,7 +1592,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
mwifiex_dbg(adapter, ERROR,
"%s: failed to assert door-bell intr\n", __func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1638,7 +1651,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
put_unaligned_le16((u16)skb->len, &payload[0]);
put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]);
- if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
return -1;
card->cmd_buf = skb;
@@ -1738,17 +1751,16 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
"info: Rx CMD Response\n");
if (adapter->curr_cmd)
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_FROM_DEVICE);
else
- pci_dma_sync_single_for_cpu(card->dev,
- MWIFIEX_SKB_DMA_ADDR(skb),
- MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(skb),
+ MWIFIEX_UPLD_SIZE, DMA_FROM_DEVICE);
/* Unmap the command as a response has been received. */
if (card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
@@ -1759,10 +1771,10 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (!adapter->curr_cmd) {
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
- pci_dma_sync_single_for_device(card->dev,
- MWIFIEX_SKB_DMA_ADDR(skb),
- MWIFIEX_SLEEP_COOKIE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(skb),
+ MWIFIEX_SLEEP_COOKIE_SIZE,
+ DMA_FROM_DEVICE);
if (mwifiex_write_reg(adapter,
PCIE_CPU_INT_EVENT,
CPU_INTR_SLEEP_CFM_DONE)) {
@@ -1773,7 +1785,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
mwifiex_delay_for_sleep_cookie(adapter,
MWIFIEX_MAX_DELAY_COUNT);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_pull(skb, adapter->intf_hdr_len);
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1789,7 +1801,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
skb_push(skb, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
skb_pull(skb, adapter->intf_hdr_len);
@@ -1831,7 +1843,7 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
card->cmdrsp_buf = skb;
skb_push(card->cmdrsp_buf, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
}
@@ -1886,7 +1898,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
- mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_cmd, DMA_FROM_DEVICE);
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
@@ -1966,7 +1978,7 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
skb_put(skb, MAX_EVENT_SIZE - skb->len);
if (mwifiex_map_pci_memory(adapter, skb,
MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
card->evt_buf_list[rdptr] = skb;
desc = card->evtbd_ring[rdptr];
@@ -2248,7 +2260,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
"interrupt status during fw dnld.\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
ret = -1;
goto done;
}
@@ -2260,12 +2272,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
ret = -1;
goto done;
}
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
offset += txlen;
} while (true);
@@ -2935,13 +2947,13 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
pr_err("set_dma_mask(32) failed: %d\n", ret);
goto err_set_dma_mask;
}
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
pr_err("set_consistent_dma_mask(64) failed\n");
goto err_set_dma_mask;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 629af26675cf..1ab04adc53dc 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -2202,9 +2202,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (nd_config) {
adapter->nd_info =
- kzalloc(sizeof(struct cfg80211_wowlan_nd_match) +
- sizeof(struct cfg80211_wowlan_nd_match *) *
- scan_rsp->number_of_sets, GFP_ATOMIC);
+ kzalloc(struct_size(adapter->nd_info, matches,
+ scan_rsp->number_of_sets),
+ GFP_ATOMIC);
if (adapter->nd_info)
adapter->nd_info->n_matches = scan_rsp->number_of_sets;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index b322c2755e9a..9b39ffd80931 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -58,6 +58,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
};
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+ { .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" },
{ .compatible = "marvell,sd8997" },
{ }
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 52a2ce2e78b0..98157fd245f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -98,12 +98,23 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
- if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
- sizeof(bridge_tunnel_header))) ||
- (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
- sizeof(rfc1042_header)) &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
+ if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
+ rx_pkt_off > skb->len) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
+ skb->len, rx_pkt_off);
+ priv->stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+
+ if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
+ ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type
@@ -206,7 +217,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
- if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
+ if ((rx_pkt_offset + rx_pkt_length) > skb->len ||
+ sizeof(rx_pkt_hdr->eth803_hdr) + rx_pkt_offset > skb->len) {
mwifiex_dbg(adapter, ERROR,
"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
skb->len, rx_pkt_offset, rx_pkt_length);
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index f8f282ce39bd..17f837935192 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -734,6 +734,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
int ret;
u16 capab;
struct ieee80211_ht_cap *ht_cap;
+ unsigned int extra;
u8 radio, *pos;
capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
@@ -752,7 +753,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
switch (action_code) {
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
- skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
+ /* See the layout of 'struct ieee80211_mgmt'. */
+ extra = sizeof(mgmt->u.action.u.tdls_discover_resp) +
+ sizeof(mgmt->u.action.category);
+ skb_put(skb, extra);
mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
mgmt->u.action.u.tdls_discover_resp.action_code =
WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
@@ -761,8 +765,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
mgmt->u.action.u.tdls_discover_resp.capability =
cpu_to_le16(capab);
/* move back for addr4 */
- memmove(pos + ETH_ALEN, &mgmt->u.action.category,
- sizeof(mgmt->u.action.u.tdls_discover_resp));
+ memmove(pos + ETH_ALEN, &mgmt->u.action, extra);
/* init address 4 */
eth_broadcast_addr(pos);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 0939a8c8f3ab..1ab253c97c14 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -479,6 +479,7 @@ void mwifiex_config_uap_11d(struct mwifiex_private *priv,
static int
mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
{
+ struct host_cmd_tlv_mac_addr *mac_tlv;
struct host_cmd_tlv_dtim_period *dtim_period;
struct host_cmd_tlv_beacon_period *beacon_period;
struct host_cmd_tlv_ssid *ssid;
@@ -498,6 +499,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
int i;
u16 cmd_size = *param_size;
+ mac_tlv = (struct host_cmd_tlv_mac_addr *)tlv;
+ mac_tlv->header.type = cpu_to_le16(TLV_TYPE_UAP_MAC_ADDRESS);
+ mac_tlv->header.len = cpu_to_le16(ETH_ALEN);
+ memcpy(mac_tlv->mac_addr, bss_cfg->mac_addr, ETH_ALEN);
+ cmd_size += sizeof(struct host_cmd_tlv_mac_addr);
+ tlv += sizeof(struct host_cmd_tlv_mac_addr);
+
if (bss_cfg->ssid.ssid_len) {
ssid = (struct host_cmd_tlv_ssid *)tlv;
ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 354b09c5e8dc..cb3f72eee230 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -115,6 +115,16 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
return;
}
+ if (sizeof(*rx_pkt_hdr) +
+ le16_to_cpu(uap_rx_pd->rx_pkt_offset) > skb->len) {
+ mwifiex_dbg(adapter, ERROR,
+ "wrong rx packet offset: len=%d,rx_pkt_offset=%d\n",
+ skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
+ priv->stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
@@ -255,7 +265,15 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
if (is_multicast_ether_addr(ra)) {
skb_uap = skb_copy(skb, GFP_ATOMIC);
- mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
+ if (likely(skb_uap)) {
+ mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
+ } else {
+ mwifiex_dbg(adapter, ERROR,
+ "failed to copy skb for uAP\n");
+ priv->stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
} else {
if (mwifiex_get_sta_entry(priv, ra)) {
/* Requeue Intra-BSS packet */
@@ -383,6 +401,16 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+ if (le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
+ sizeof(rx_pkt_hdr->eth803_hdr) > skb->len) {
+ mwifiex_dbg(adapter, ERROR,
+ "wrong rx packet for struct ethhdr: len=%d, offset=%d\n",
+ skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
+ priv->stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source);
if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 3b0d31827681..135b197fb4a3 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -405,11 +405,15 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
}
rx_pd = (struct rxpd *)skb->data;
+ pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
+ if (pkt_len < sizeof(struct ieee80211_hdr) + sizeof(pkt_len)) {
+ mwifiex_dbg(priv->adapter, ERROR, "invalid rx_pkt_length");
+ return -1;
+ }
skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
skb_pull(skb, sizeof(pkt_len));
-
- pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
+ pkt_len -= sizeof(pkt_len);
ieee_hdr = (void *)skb->data;
if (ieee80211_is_mgmt(ieee_hdr->frame_control)) {
@@ -422,7 +426,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
skb->data + sizeof(struct ieee80211_hdr),
pkt_len - sizeof(struct ieee80211_hdr));
- pkt_len -= ETH_ALEN + sizeof(pkt_len);
+ pkt_len -= ETH_ALEN;
rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index d3efcbd48ee1..50a92aaa221f 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -411,6 +411,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
bool more;
spin_lock_bh(&q->lock);
+
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
if (!buf)
@@ -418,6 +419,12 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
skb_free_frag(buf);
} while (1);
+
+ if (q->rx_head) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+ }
+
spin_unlock_bh(&q->lock);
if (!q->rx_page.va)
@@ -440,12 +447,6 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
mt76_dma_rx_fill(dev, q);
-
- if (!q->rx_head)
- return;
-
- dev_kfree_skb(q->rx_head);
- q->rx_head = NULL;
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8bd191347b9f..179337eb39ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -103,6 +103,7 @@ static int mt76_led_init(struct mt76_dev *dev)
if (!of_property_read_u32(np, "led-sources", &led_pin))
dev->led_pin = led_pin;
dev->led_al = of_property_read_bool(np, "led-active-low");
+ of_node_put(np);
}
return led_classdev_register(dev->dev, &dev->led_cdev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
index e5af4f3389cc..47e36937a6ec 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -39,11 +39,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
}
if (intr & MT_INT_RX_DONE(0)) {
+ dev->rx_pse_check = 0;
mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
napi_schedule(&dev->mt76.napi[0]);
}
if (intr & MT_INT_RX_DONE(1)) {
+ dev->rx_pse_check = 0;
mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
napi_schedule(&dev->mt76.napi[1]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index ff3f3d98b625..5cfea0d8c776 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1416,20 +1416,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
{
u32 addr, val;
- if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
- return true;
-
if (mt7603_rx_fifo_busy(dev))
- return false;
+ goto out;
addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
mt76_wr(dev, addr, 3);
val = mt76_rr(dev, addr) >> 16;
- if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
- return true;
+ if (!(val & BIT(0)))
+ return false;
+
+ if (is_mt7628(dev))
+ val &= 0xa000;
+ else
+ val &= 0x8000;
+ if (!val)
+ return false;
+
+out:
+ if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
+ (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
+ return false;
- return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
+ return true;
}
static bool
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index a6c530b9ceee..0c813e2b9d29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -13,10 +13,7 @@
#include "../dma.h"
#include "mac.h"
-static inline s8 to_rssi(u32 field, u32 rxv)
-{
- return (FIELD_GET(field, rxv) - 220) / 2;
-}
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
u8 idx, bool unicast)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 92305bd31aa1..4209209ac940 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -77,10 +77,7 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
if (vif_idx == i) {
force_update = !!dev->beacons[i] ^ !!skb;
-
- if (dev->beacons[i])
- dev_kfree_skb(dev->beacons[i]);
-
+ dev_kfree_skb(dev->beacons[i]);
dev->beacons[i] = skb;
__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
} else if (force_update && dev->beacons[i]) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index c54c50fd639a..c0227b20b6a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
- u16 val;
u8 lna;
- val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
- *lna_2g = 0;
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
- memset(lna_5g, 0, sizeof(s8) * 3);
-
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 9f91556c7f38..3ee945eafa4d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -251,7 +251,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
- u8 lna;
+ bool use_lna;
+ u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@@ -270,7 +271,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (chan->band == NL80211_BAND_2GHZ)
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
+ else
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
+
+ if (use_lna)
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 6f2172be7b66..072888072b0d 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -118,7 +118,8 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
if (data_len < min_seg_len ||
WARN_ON_ONCE(!dma_len) ||
WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
- WARN_ON_ONCE(dma_len & 0x3))
+ WARN_ON_ONCE(dma_len & 0x3) ||
+ WARN_ON_ONCE(dma_len < min_seg_len))
return 0;
return MT_DMA_HDRS + dma_len;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index c99f1912e266..32d2528cdd5f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -4154,7 +4154,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
- rt2800_bbp_write(rt2x00dev, 86, 0);
+ if (rt2x00_rt(rt2x00dev, RT6352))
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0);
}
if (rf->channel <= 14) {
@@ -4355,7 +4358,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain;
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
- rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ if (rt2x00_rt(rt2x00dev, RT5592))
+ rt2800_iq_calibrate(rt2x00dev, rf->channel);
}
bbp = rt2800_bbp_read(rt2x00dev, 4);
@@ -5628,7 +5632,8 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
if (qual->vgc_level != vgc_level) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT3593) ||
- rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2x00_rt(rt2x00dev, RT3883) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
vgc_level);
} else if (rt2x00_rt(rt2x00dev, RT5592)) {
@@ -5851,7 +5856,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
} else if (rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
- rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0001);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
@@ -6113,6 +6118,27 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+ } else if (rt2x00_is_soc(rt2x00dev)) {
+ struct clk *clk = clk_get_sys("bus", NULL);
+ int rate;
+
+ if (IS_ERR(clk)) {
+ clk = clk_get_sys("cpu", NULL);
+
+ if (IS_ERR(clk)) {
+ rate = 125;
+ } else {
+ rate = clk_get_rate(clk) / 3000000;
+ clk_put(clk);
+ }
+ } else {
+ rate = clk_get_rate(clk) / 1000000;
+ clk_put(clk);
+ }
+
+ reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
+ rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, rate);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index c3eab767bc21..f504f3529407 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -101,6 +101,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00link_stop_tuner(rt2x00dev);
rt2x00queue_stop_queues(rt2x00dev);
rt2x00queue_flush_queues(rt2x00dev, true);
+ rt2x00queue_stop_queue(rt2x00dev->bcn);
/*
* Disable radio.
@@ -1283,6 +1284,7 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
rt2x00dev->intf_ap_count = 0;
rt2x00dev->intf_sta_count = 0;
rt2x00dev->intf_associated = 0;
+ rt2x00dev->intf_beaconing = 0;
/* Enable the radio */
retval = rt2x00lib_enable_radio(rt2x00dev);
@@ -1310,6 +1312,7 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
rt2x00dev->intf_ap_count = 0;
rt2x00dev->intf_sta_count = 0;
rt2x00dev->intf_associated = 0;
+ rt2x00dev->intf_beaconing = 0;
}
static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index beb20c5faf5f..a0fb167b58fe 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -578,6 +578,17 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
*/
if (changes & BSS_CHANGED_BEACON_ENABLED) {
mutex_lock(&intf->beacon_skb_mutex);
+
+ /*
+ * Clear the 'enable_beacon' flag and clear beacon because
+ * the beacon queue has been stopped after hardware reset.
+ */
+ if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags) &&
+ intf->enable_beacon) {
+ intf->enable_beacon = false;
+ rt2x00queue_clear_beacon(rt2x00dev, vif);
+ }
+
if (!bss_conf->enable_beacon && intf->enable_beacon) {
rt2x00dev->intf_beaconing--;
intf->enable_beacon = false;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 3836d6ac5304..d9c1ac5cb562 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -270,13 +270,14 @@ static int ray_probe(struct pcmcia_device *p_dev)
{
ray_dev_t *local;
struct net_device *dev;
+ int ret;
dev_dbg(&p_dev->dev, "ray_attach()\n");
/* Allocate space for private device-specific data */
dev = alloc_etherdev(sizeof(ray_dev_t));
if (!dev)
- goto fail_alloc_dev;
+ return -ENOMEM;
local = netdev_priv(dev);
local->finder = p_dev;
@@ -313,11 +314,16 @@ static int ray_probe(struct pcmcia_device *p_dev)
timer_setup(&local->timer, NULL, 0);
this_device = p_dev;
- return ray_config(p_dev);
+ ret = ray_config(p_dev);
+ if (ret)
+ goto err_free_dev;
+
+ return 0;
-fail_alloc_dev:
- return -ENOMEM;
-} /* ray_attach */
+err_free_dev:
+ free_netdev(dev);
+ return ret;
+}
static void ray_detach(struct pcmcia_device *link)
{
@@ -1641,38 +1647,34 @@ static void authenticate_timeout(struct timer_list *t)
/*===========================================================================*/
static int parse_addr(char *in_str, UCHAR *out)
{
+ int i, k;
int len;
- int i, j, k;
- int status;
if (in_str == NULL)
return 0;
- if ((len = strlen(in_str)) < 2)
+ len = strnlen(in_str, ADDRLEN * 2 + 1) - 1;
+ if (len < 1)
return 0;
memset(out, 0, ADDRLEN);
- status = 1;
- j = len - 1;
- if (j > 12)
- j = 12;
i = 5;
- while (j > 0) {
- if ((k = hex_to_bin(in_str[j--])) != -1)
+ while (len > 0) {
+ if ((k = hex_to_bin(in_str[len--])) != -1)
out[i] = k;
else
return 0;
- if (j == 0)
+ if (len == 0)
break;
- if ((k = hex_to_bin(in_str[j--])) != -1)
+ if ((k = hex_to_bin(in_str[len--])) != -1)
out[i] += k << 4;
else
return 0;
if (!i--)
break;
}
- return status;
+ return 1;
}
/*===========================================================================*/
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 6858f7de0915..b7da89ba1e7d 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1178,7 +1178,7 @@ struct rtl8723bu_c2h {
u8 dummy3_0;
} __packed ra_report;
};
-};
+} __packed;
struct rtl8xxxu_fileops;
@@ -1255,6 +1255,7 @@ struct rtl8xxxu_priv {
u32 rege9c;
u32 regeb4;
u32 regebc;
+ u32 regrcr;
int next_mbox;
int nr_out_eps;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 02ca80501c3a..e26a722852ee 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1671,6 +1671,11 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
val8 &= ~BIT(0);
rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+
+ /*
+ * Fix transmission failure of rtl8192e.
+ */
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
struct rtl8xxxu_fileops rtl8192eu_fops = {
@@ -1697,6 +1702,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
.has_s0s1 = 0,
.gen2_thermal_meter = 1,
+ .needs_full_init = 1,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 048984ca81fd..0d9e31746fd2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1608,18 +1608,18 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
- u32 val32, bonding;
+ u32 val32, bonding, sys_cfg;
u16 val16;
- val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
- priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
+ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = (sys_cfg & SYS_CFG_CHIP_VERSION_MASK) >>
SYS_CFG_CHIP_VERSION_SHIFT;
- if (val32 & SYS_CFG_TRP_VAUX_EN) {
+ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
dev_info(dev, "Unsupported test chip\n");
return -ENOTSUPP;
}
- if (val32 & SYS_CFG_BT_FUNC) {
+ if (sys_cfg & SYS_CFG_BT_FUNC) {
if (priv->chip_cut >= 3) {
sprintf(priv->chip_name, "8723BU");
priv->rtl_chip = RTL8723B;
@@ -1641,7 +1641,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
if (val32 & MULTI_GPS_FUNC_EN)
priv->has_gps = 1;
priv->is_multi_func = 1;
- } else if (val32 & SYS_CFG_TYPE_ID) {
+ } else if (sys_cfg & SYS_CFG_TYPE_ID) {
bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
bonding &= HPON_FSM_BONDING_MASK;
if (priv->fops->tx_desc_size ==
@@ -1689,7 +1689,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
case RTL8188E:
case RTL8192E:
case RTL8723B:
- switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
+ switch (sys_cfg & SYS_CFG_VENDOR_EXT_MASK) {
case SYS_CFG_VENDOR_ID_TSMC:
sprintf(priv->chip_vendor, "TSMC");
break;
@@ -1706,7 +1706,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
}
break;
default:
- if (val32 & SYS_CFG_VENDOR_ID) {
+ if (sys_cfg & SYS_CFG_VENDOR_ID) {
sprintf(priv->chip_vendor, "UMC");
priv->vendor_umc = 1;
} else {
@@ -1875,13 +1875,6 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
/* We have 8 bits to indicate validity */
map_addr = offset * 8;
- if (map_addr >= EFUSE_MAP_LEN) {
- dev_warn(dev, "%s: Illegal map_addr (%04x), "
- "efuse corrupt!\n",
- __func__, map_addr);
- ret = -EINVAL;
- goto exit;
- }
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (word_mask & BIT(i)) {
@@ -1892,6 +1885,13 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
if (ret)
goto exit;
+ if (map_addr >= EFUSE_MAP_LEN - 1) {
+ dev_warn(dev, "%s: Illegal map_addr (%04x), "
+ "efuse corrupt!\n",
+ __func__, map_addr);
+ ret = -EINVAL;
+ goto exit;
+ }
priv->efuse_wifi.raw[map_addr++] = val8;
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
@@ -2926,12 +2926,12 @@ bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
}
if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
- /* path B RX OK */
+ /* path B TX OK */
for (i = 4; i < 6; i++)
result[3][i] = result[c1][i];
}
- if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
+ if (!(simubitmap & 0xc0) && priv->tx_paths > 1) {
/* path B RX OK */
for (i = 6; i < 8; i++)
result[3][i] = result[c1][i];
@@ -4048,6 +4048,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
rtl8xxxu_write32(priv, REG_RCR, val32);
+ priv->regrcr = val32;
/*
* Accept all multicast
@@ -4372,12 +4373,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect)
{
-#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
/*
- * Barry Day reports this causes issues with 8192eu and 8723bu
- * devices reconnecting. The reason for this is unclear, but
- * until it is better understood, leave the code in place but
- * disabled, so it is not lost.
+ * The firmware turns on the rate control when it knows it's
+ * connected to a network.
*/
struct h2c_cmd h2c;
@@ -4390,7 +4388,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
h2c.media_status_rpt.parm &= ~BIT(0);
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
-#endif
}
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
@@ -4950,6 +4947,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (control && control->sta)
sta = control->sta;
+ queue = rtl8xxxu_queue_select(hw, skb);
+
tx_desc = skb_push(skb, tx_desc_size);
memset(tx_desc, 0, tx_desc_size);
@@ -4962,7 +4961,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
- queue = rtl8xxxu_queue_select(hw, skb);
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
if (tx_info->control.hw_key) {
@@ -5098,7 +5096,7 @@ static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
pending = priv->rx_urb_pending_count;
} else {
skb = (struct sk_buff *)rx_urb->urb.context;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
usb_free_urb(&rx_urb->urb);
}
@@ -5497,7 +5495,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
- u16 val16;
int ret = 0, channel;
bool ht40;
@@ -5507,14 +5504,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
__func__, hw->conf.chandef.chan->hw_value,
changed, hw->conf.chandef.width);
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- val16 = ((hw->conf.long_frame_max_tx_count <<
- RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
- ((hw->conf.short_frame_max_tx_count <<
- RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
- rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -5597,7 +5586,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags, u64 multicast)
{
struct rtl8xxxu_priv *priv = hw->priv;
- u32 rcr = rtl8xxxu_read32(priv, REG_RCR);
+ u32 rcr = priv->regrcr;
dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
__func__, changed_flags, *total_flags);
@@ -5643,6 +5632,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
*/
rtl8xxxu_write32(priv, REG_RCR, rcr);
+ priv->regrcr = rcr;
*total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC |
FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL |
@@ -6372,6 +6362,18 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192eu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192eu_fops},
+/* D-Link DWA-131 rev C1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3312, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+/* TP-Link TL-WN8200ND V2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0126, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+/* Mercusys MW300UM */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0100, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+/* Mercusys MW300UH */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0104, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
#endif
{ }
};
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index c9ad6761032a..8fb0b54738ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -195,8 +195,8 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
} else {
if (get_rf_type(rtlphy) == RF_1T2R ||
get_rf_type(rtlphy) == RF_2T2R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "1T2R or 2T2R\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "1T2R or 2T2R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
ht_cap->mcs.rx_mask[4] = 0x01;
@@ -204,7 +204,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.rx_highest =
cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
} else if (get_rf_type(rtlphy) == RF_1T1R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0x00;
@@ -1324,7 +1324,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
rtlpriv->cfg->ops->chk_switch_dmdp(hw);
}
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
mac->link_state = MAC80211_LINKING;
/* Dul mac */
@@ -1385,7 +1385,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
if (mac->act_scanning)
return false;
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
"%s ACT_ADDBAREQ From :%pM\n",
is_tx ? "Tx" : "Rx", hdr->addr2);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "req\n",
@@ -1400,8 +1400,8 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
rcu_read_lock();
sta = rtl_find_sta(hw, hdr->addr3);
if (sta == NULL) {
- RT_TRACE(rtlpriv, COMP_SEND | COMP_RECV,
- DBG_DMESG, "sta is NULL\n");
+ rtl_dbg(rtlpriv, COMP_SEND | COMP_RECV,
+ DBG_DMESG, "sta is NULL\n");
rcu_read_unlock();
return true;
}
@@ -1428,13 +1428,13 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
break;
case ACT_ADDBARSP:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "%s ACT_ADDBARSP From :%pM\n",
- is_tx ? "Tx" : "Rx", hdr->addr2);
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "%s ACT_ADDBARSP From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2);
break;
case ACT_DELBA:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "ACT_ADDBADEL From :%pM\n", hdr->addr2);
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "ACT_ADDBADEL From :%pM\n", hdr->addr2);
break;
}
break;
@@ -1519,9 +1519,9 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
/* 68 : UDP BOOTP client
* 67 : UDP BOOTP server
*/
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
- DBG_DMESG, "dhcp %s !!\n",
- (is_tx) ? "Tx" : "Rx");
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV),
+ DBG_DMESG, "dhcp %s !!\n",
+ (is_tx) ? "Tx" : "Rx");
if (is_tx)
setup_special_tx(rtlpriv, ppsc,
@@ -1540,8 +1540,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
rtlpriv->btcoexist.btc_info.in_4way = true;
rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies;
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
if (is_tx) {
rtlpriv->ra.is_special_data = true;
@@ -1583,12 +1583,12 @@ static void rtl_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
if (ack) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
- "tx report: ack\n");
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
+ "tx report: ack\n");
info->flags |= IEEE80211_TX_STAT_ACK;
} else {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
- "tx report: not ack\n");
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
+ "tx report: not ack\n");
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(hw, skb);
@@ -1626,8 +1626,8 @@ static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw,
tx_report->last_sent_time = jiffies;
tx_info->sn = sn;
tx_info->send_time = tx_report->last_sent_time;
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Send TX-Report sn=0x%X\n", sn);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Send TX-Report sn=0x%X\n", sn);
return sn;
}
@@ -1674,9 +1674,9 @@ void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len)
break;
}
}
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
- st, sn, retry);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
+ st, sn, retry);
}
EXPORT_SYMBOL_GPL(rtl_tx_report_handler);
@@ -1689,9 +1689,9 @@ bool rtl_check_tx_report_acked(struct ieee80211_hw *hw)
return true;
if (time_before(tx_report->last_sent_time + 3 * HZ, jiffies)) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
- "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
- tx_report->last_sent_sn, tx_report->last_recv_sn);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
+ "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
+ tx_report->last_sent_sn, tx_report->last_recv_sn);
return true; /* 3 sec. (timeout) seen as acked */
}
@@ -1707,8 +1707,8 @@ void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms)
if (rtl_check_tx_report_acked(hw))
break;
usleep_range(1000, 2000);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
}
}
@@ -1770,9 +1770,9 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- *ssn);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+ *ssn);
tid_data->agg.agg_state = RTL_AGG_START;
@@ -1789,8 +1789,8 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1829,8 +1829,8 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_RECV, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
return 0;
@@ -1845,8 +1845,8 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1866,8 +1866,8 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1887,9 +1887,9 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
&ctrl_agg_size, &agg_size);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
- reject_agg, ctrl_agg_size, agg_size);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
+ reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
(ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
@@ -1977,9 +1977,9 @@ void rtl_scan_list_expire(struct ieee80211_hw *hw)
list_del(&entry->list);
rtlpriv->scan_list.num--;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "BSSID=%pM is expire in scan list (total=%d)\n",
- entry->bssid, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "BSSID=%pM is expire in scan list (total=%d)\n",
+ entry->bssid, rtlpriv->scan_list.num);
kfree(entry);
}
@@ -2013,9 +2013,9 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) {
list_del_init(&entry->list);
entry_found = true;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Update BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Update BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
break;
}
}
@@ -2029,9 +2029,9 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
memcpy(entry->bssid, hdr->addr3, ETH_ALEN);
rtlpriv->scan_list.num++;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Add BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Add BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
}
entry->age = jiffies;
@@ -2191,8 +2191,8 @@ label_lps_done:
if ((rtlpriv->link_info.bcn_rx_inperiod +
rtlpriv->link_info.num_rx_inperiod) == 0) {
rtlpriv->link_info.roam_times++;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "AP off for %d s\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "AP off for %d s\n",
(rtlpriv->link_info.roam_times * 2));
/* if we can't recv beacon for 10s,
@@ -2305,11 +2305,11 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
switch (cmd_id) {
case C2H_DBG:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n");
break;
case C2H_TXBF:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_TXBF!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_TXBF!!\n");
break;
case C2H_TX_REPORT:
rtl_tx_report_handler(hw, cmd_buf, cmd_len);
@@ -2319,20 +2319,20 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
hal_ops->c2h_ra_report_handler(hw, cmd_buf, cmd_len);
break;
case C2H_BT_INFO:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_BT_INFO!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_BT_INFO!!\n");
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_btinfo_notify(rtlpriv, cmd_buf, cmd_len);
break;
case C2H_BT_MP:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_BT_MP!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_BT_MP!!\n");
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_btmpinfo_notify(rtlpriv, cmd_buf, cmd_len);
break;
default:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id);
break;
}
}
@@ -2356,8 +2356,8 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
if (!skb)
break;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n",
- *((u8 *)skb->cb));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n",
+ *((u8 *)skb->cb));
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_DMESG,
"C2H data: ", skb->data, skb->len);
@@ -2702,29 +2702,29 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
(memcmp(mac->bssid, ap5_6, 3) == 0) ||
vendor == PEER_ATH) {
vendor = PEER_ATH;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
} else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
(memcmp(mac->bssid, ap4_5, 3) == 0) ||
(memcmp(mac->bssid, ap4_1, 3) == 0) ||
(memcmp(mac->bssid, ap4_2, 3) == 0) ||
(memcmp(mac->bssid, ap4_3, 3) == 0) ||
vendor == PEER_RAL) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
vendor = PEER_RAL;
} else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
vendor == PEER_CISCO) {
vendor = PEER_CISCO;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
} else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
(memcmp(mac->bssid, ap3_2, 3) == 0) ||
(memcmp(mac->bssid, ap3_3, 3) == 0) ||
vendor == PEER_BROAD) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
vendor = PEER_BROAD;
} else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
vendor == PEER_MARV) {
vendor = PEER_MARV;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
}
mac->vendor = vendor;
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index bf0e0bb1f99b..7aa28da39409 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -43,14 +43,14 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[WCAMI], target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The Key ID is %d\n", entry_no);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[RWCAM], target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[WCAMI], target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The Key ID is %d\n", entry_no);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[RWCAM], target_command);
} else if (entry_i == 1) {
@@ -64,10 +64,10 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A4: %x\n", target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A0: %x\n", target_command);
} else {
@@ -83,15 +83,15 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A4: %x\n", target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A0: %x\n", target_command);
}
}
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "after set key, usconfig:%x\n", us_config);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "after set key, usconfig:%x\n", us_config);
}
u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
@@ -101,14 +101,14 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
u32 us_config;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
- ul_entry_idx, ul_key_id, ul_enc_alg,
- ul_default_key, mac_addr);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
+ ul_entry_idx, ul_key_id, ul_enc_alg,
+ ul_default_key, mac_addr);
if (ul_key_id == TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ulKeyId exceed!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ulKeyId exceed!\n");
return 0;
}
@@ -120,7 +120,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
(u8 *)key_content, us_config);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
return 1;
@@ -133,7 +133,7 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
u32 ul_command;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
ul_command = ul_key_id * CAM_CONTENT_COUNT;
ul_command = ul_command | BIT(31) | BIT(16);
@@ -141,10 +141,10 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_delete_one_entry(): WRITE A4: %x\n", 0);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_delete_one_entry(): WRITE A0: %x\n", ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A4: %x\n", __func__, 0);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
return 0;
@@ -195,10 +195,10 @@ void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_mark_invalid(): WRITE A4: %x\n", ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_mark_invalid(): WRITE A0: %x\n", ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A4: %x\n", __func__, ul_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
}
EXPORT_SYMBOL(rtl_cam_mark_invalid);
@@ -245,12 +245,10 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "rtl_cam_empty_entry(): WRITE A4: %x\n",
- ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "rtl_cam_empty_entry(): WRITE A0: %x\n",
- ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "%s: WRITE A4: %x\n", __func__, ul_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
}
}
@@ -313,8 +311,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Remove from HW Security CAM */
eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "&&&&&&&&&del entry %d\n", i);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "&&&&&&&&&del entry %d\n", i);
}
}
return;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f73e690bbe8e..c4c89aade8fb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -76,8 +76,8 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "Firmware callback routine entered!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
@@ -214,8 +214,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
u8 retry_limit = 0x30;
if (mac->vif) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "vif has been set!! mac->vif = 0x%p\n", mac->vif);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "vif has been set!! mac->vif = 0x%p\n", mac->vif);
return -EOPNOTSUPP;
}
@@ -230,16 +230,16 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
/*fall through*/
case NL80211_IFTYPE_STATION:
if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_STATION\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_STATION\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
}
break;
case NL80211_IFTYPE_ADHOC:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_ADHOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_ADHOC\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -256,8 +256,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
mac->p2p = P2P_ROLE_GO;
/*fall through*/
case NL80211_IFTYPE_AP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_AP\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_AP\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -271,8 +271,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
retry_limit = 0x07;
break;
case NL80211_IFTYPE_MESH_POINT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_MESH_POINT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_MESH_POINT\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -293,8 +293,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
}
if (mac->p2p) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p role %x\n", vif->type);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "p2p role %x\n", vif->type);
mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
@@ -360,8 +360,8 @@ static int rtl_op_change_interface(struct ieee80211_hw *hw,
vif->type = new_type;
vif->p2p = p2p;
ret = rtl_op_add_interface(hw, vif);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p %x\n", p2p);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "p2p %x\n", p2p);
return ret;
}
@@ -435,8 +435,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
memset(mask, 0, MAX_WOL_BIT_MASK_SIZE);
if (patterns[i].pattern_len < 0 ||
patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING,
- "Pattern[%d] is too long\n", i);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_WARNING,
+ "Pattern[%d] is too long\n", i);
continue;
}
pattern_os = patterns[i].pattern;
@@ -515,8 +515,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
"pattern to hw\n", content, len);
/* 3. calculate crc */
rtl_pattern.crc = _calculate_wol_pattern_crc(content, len);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
/* 4. write crc & mask_for_hw to hw */
rtlpriv->cfg->ops->add_wowlan_pattern(hw, &rtl_pattern, i);
@@ -531,7 +531,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
if (WARN_ON(!wow))
return -EINVAL;
@@ -557,7 +557,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
time64_t now;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
rtlhal->driver_is_goingto_unload = false;
rtlhal->enter_pnp_sleep = false;
rtlhal->wake_from_pnp_sleep = true;
@@ -588,8 +588,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&rtlpriv->locks.conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2)*/
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
}
/*For IPS */
@@ -632,9 +632,9 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+ hw->conf.long_frame_max_tx_count);
/* brought up everything changes (changed == ~0) indicates first
* open, so use our default value instead of that of wiphy.
*/
@@ -809,13 +809,13 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_ALLMULTI) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive multicast frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive multicast frame\n");
} else {
mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB]);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive multicast frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive multicast frame\n");
}
update_rcr = true;
}
@@ -823,12 +823,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive FCS error frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive FCS error frame\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive FCS error frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive FCS error frame\n");
}
if (!update_rcr)
update_rcr = true;
@@ -855,12 +855,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_CONTROL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive control frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive control frame.\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive control frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive control frame.\n");
}
if (!update_rcr)
update_rcr = true;
@@ -869,12 +869,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_OTHER_BSS) {
if (*new_flags & FIF_OTHER_BSS) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive other BSS's frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive other BSS's frame.\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive other BSS's frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive other BSS's frame.\n");
}
if (!update_rcr)
update_rcr = true;
@@ -923,7 +923,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
sta->supp_rates[0] &= 0xfffffff0;
memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
"Add sta addr is %pM\n", sta->addr);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0, true);
}
@@ -939,8 +939,8 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry;
if (sta) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "Remove sta addr is %pM\n", sta->addr);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "Remove sta addr is %pM\n", sta->addr);
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
sta_entry->wireless_mode = 0;
sta_entry->ratr_index = 0;
@@ -988,8 +988,8 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw,
int aci;
if (queue >= AC_MAX) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "queue number %d is incorrect!\n", queue);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "queue number %d is incorrect!\n", queue);
return -EINVAL;
}
@@ -1034,8 +1034,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_BEACON_ENABLED\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_BEACON_ENABLED\n");
/*start hw beacon interrupt. */
/*rtlpriv->cfg->ops->set_bcn_reg(hw); */
@@ -1052,8 +1052,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON_ENABLED &&
!bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "ADHOC DISABLE BEACON\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "ADHOC DISABLE BEACON\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -1062,8 +1062,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_BEACON_INT) {
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
- "BSS_CHANGED_BEACON_INT\n");
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_TRACE,
+ "BSS_CHANGED_BEACON_INT\n");
mac->beacon_interval = bss_conf->beacon_int;
rtlpriv->cfg->ops->set_bcn_intv(hw);
}
@@ -1102,8 +1102,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
goto out;
}
- RT_TRACE(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
- "send PS STATIC frame\n");
+ rtl_dbg(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
+ "send PS STATIC frame\n");
if (rtlpriv->dm.supp_phymode_switch) {
if (sta->ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
@@ -1143,8 +1143,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
HW_VAR_KEEP_ALIVE,
(u8 *)(&keep_alive));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_ASSOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_ASSOC\n");
} else {
struct cfg80211_bss *bss = NULL;
@@ -1161,14 +1161,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
IEEE80211_BSS_TYPE_ESS,
IEEE80211_PRIVACY_OFF);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid = %pMF\n", mac->bssid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid = %pMF\n", mac->bssid);
if (bss) {
cfg80211_unlink_bss(hw->wiphy, bss);
cfg80211_put_bss(hw->wiphy, bss);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "cfg80211_unlink !!\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "cfg80211_unlink !!\n");
}
eth_zero_addr(mac->bssid);
@@ -1179,8 +1179,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->chk_switch_dmdp)
rtlpriv->cfg->ops->chk_switch_dmdp(hw);
}
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_UN_ASSOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_UN_ASSOC\n");
}
rtlpriv->cfg->ops->set_network_type(hw, vif->type);
/* For FW LPS:
@@ -1198,14 +1198,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_CTS_PROT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_ERP_CTS_PROT\n");
mac->use_cts_protect = bss_conf->use_cts_prot;
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
bss_conf->use_short_preamble);
mac->short_preamble = bss_conf->use_short_preamble;
@@ -1214,8 +1214,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_SLOT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_ERP_SLOT\n");
if (bss_conf->use_short_slot)
mac->slot_time = RTL_SLOT_TIME_9;
@@ -1229,8 +1229,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_HT) {
struct ieee80211_sta *sta = NULL;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_HT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_HT\n");
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
@@ -1261,8 +1261,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
(u8 *)bss_conf->bssid);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid: %pM\n", bss_conf->bssid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid: %pM\n", bss_conf->bssid);
mac->vendor = PEER_UNKNOWN;
memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
@@ -1393,27 +1393,27 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
return rtl_tx_agg_start(hw, vif, sta, tid, ssn);
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
return rtl_tx_agg_stop(hw, vif, sta, tid);
case IEEE80211_AMPDU_TX_OPERATIONAL:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
rtl_tx_agg_oper(hw, sta, tid);
break;
case IEEE80211_AMPDU_RX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
return rtl_rx_agg_start(hw, sta, tid);
case IEEE80211_AMPDU_RX_STOP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
return rtl_rx_agg_stop(hw, sta, tid);
default:
pr_err("IEEE80211_AMPDU_ERR!!!!:\n");
@@ -1429,7 +1429,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = true;
if (rtlpriv->link_info.higher_busytraffic) {
mac->skip_scan = true;
@@ -1467,7 +1467,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = false;
mac->skip_scan = false;
@@ -1517,8 +1517,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->btcoexist.btc_info.in_4way = false;
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "not open hw encryption\n");
return -ENOSPC; /*User disabled HW-crypto */
}
/* To support IBSS, use sw-crypto for GTK */
@@ -1526,10 +1526,10 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif->type == NL80211_IFTYPE_MESH_POINT) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -ENOSPC;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s hardware based encryption for keyidx: %d, mac: %pM\n",
- cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
- sta ? sta->addr : bcast_addr);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s hardware based encryption for keyidx: %d, mac: %pM\n",
+ cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+ sta ? sta->addr : bcast_addr);
rtlpriv->sec.being_setkey = true;
rtl_ips_nic_on(hw);
mutex_lock(&rtlpriv->locks.conf_mutex);
@@ -1538,28 +1538,28 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key_type = WEP40_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
key_type = WEP104_ENCRYPTION;
break;
case WLAN_CIPHER_SUITE_TKIP:
key_type = TKIP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = AESCCMP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
/* HW don't support CMAC encryption,
* use software CMAC encryption
*/
key_type = AESCMAC_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "HW don't support CMAC encryption, use software CMAC encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "HW don't support CMAC encryption, use software CMAC encryption\n");
err = -EOPNOTSUPP;
goto out_unlock;
default:
@@ -1605,9 +1605,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type == WEP104_ENCRYPTION))
wep_only = true;
rtlpriv->sec.pairwise_enc_algorithm = key_type;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
- key_type);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
+ key_type);
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
}
@@ -1615,8 +1615,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
if (wep_only) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set WEP(group/pairwise) key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set WEP(group/pairwise) key\n");
/* Pairwise key with an assigned MAC address. */
rtlpriv->sec.pairwise_enc_algorithm = key_type;
rtlpriv->sec.group_enc_algorithm = key_type;
@@ -1626,8 +1626,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = key->keylen;
eth_zero_addr(mac_addr);
} else if (group_key) { /* group key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
/* group key */
rtlpriv->sec.group_enc_algorithm = key_type;
/*set local buf about group key. */
@@ -1636,8 +1636,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = key->keylen;
memcpy(mac_addr, bcast_addr, ETH_ALEN);
} else { /* pairwise key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set pairwise key\n");
if (!sta) {
WARN_ONCE(true,
"rtlwifi: pairwise key without mac_addr\n");
@@ -1669,8 +1669,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case DISABLE_KEY:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "disable key delete one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "disable key delete one entry\n");
/*set local buf about wep key. */
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
@@ -1718,9 +1718,9 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
rtlpriv->rfkill.rfkill_state = radio_state;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "wireless radio switch turned %s\n",
- radio_state ? "on" : "off");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "wireless radio switch turned %s\n",
+ radio_state ? "on" : "off");
blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
@@ -1765,26 +1765,27 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
do {
cfg_cmd = pwrcfgcmd[ary_idx];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
- GET_PWR_CFG_OFFSET(cfg_cmd),
- GET_PWR_CFG_CUT_MASK(cfg_cmd),
- GET_PWR_CFG_FAB_MASK(cfg_cmd),
- GET_PWR_CFG_INTF_MASK(cfg_cmd),
- GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
- GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+ __func__,
+ GET_PWR_CFG_OFFSET(cfg_cmd),
+ GET_PWR_CFG_CUT_MASK(cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(cfg_cmd),
+ GET_PWR_CFG_INTF_MASK(cfg_cmd),
+ GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
+ GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
(GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
(GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
switch (GET_PWR_CFG_CMD(cfg_cmd)) {
case PWR_CMD_READ:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
break;
case PWR_CMD_WRITE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_WRITE\n", __func__);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s(): PWR_CMD_WRITE\n", __func__);
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
@@ -1797,7 +1798,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
rtl_write_byte(rtlpriv, offset, value);
break;
case PWR_CMD_POLLING:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
polling_bit = false;
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
@@ -1818,8 +1819,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
} while (!polling_bit);
break;
case PWR_CMD_DELAY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: PWR_CMD_DELAY\n", __func__);
if (GET_PWR_CFG_VALUE(cfg_cmd) ==
PWRSEQ_DELAY_US)
udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
@@ -1827,8 +1828,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
break;
case PWR_CMD_END:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: PWR_CMD_END\n", __func__);
return true;
default:
WARN_ONCE(true,
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 55db71c766fe..3866ea300a70 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -298,8 +298,8 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -307,7 +307,7 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
if (num != 3)
- return count;
+ return -EINVAL;
switch (len) {
case 1:
@@ -349,8 +349,8 @@ static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -360,8 +360,8 @@ static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
&h2c_data[4], &h2c_data[5],
&h2c_data[6], &h2c_data[7]);
- if (h2c_len <= 0)
- return count;
+ if (h2c_len == 0)
+ return -EINVAL;
for (i = 0; i < h2c_len; i++)
h2c_data_packed[i] = (u8)h2c_data[i];
@@ -395,8 +395,8 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -404,9 +404,9 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
&path, &addr, &bitmask, &data);
if (num != 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "Format is <path> <addr> <mask> <data>\n");
- return count;
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "Format is <path> <addr> <mask> <data>\n");
+ return -EINVAL;
}
rtl_set_rfreg(hw, path, addr, bitmask, data);
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index 69f169d4d4ae..dbfb4d67ca31 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -160,6 +160,10 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *titlestring,
const void *hexdata, int hexdatalen);
+#define rtl_dbg(rtlpriv, comp, level, fmt, ...) \
+ _rtl_dbg_trace(rtlpriv, comp, level, \
+ fmt, ##__VA_ARGS__)
+
#define RT_TRACE(rtlpriv, comp, level, fmt, ...) \
_rtl_dbg_trace(rtlpriv, comp, level, \
fmt, ##__VA_ARGS__)
@@ -177,6 +181,13 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
struct rtl_priv;
__printf(4, 5)
+static inline void rtl_dbg(struct rtl_priv *rtlpriv,
+ u64 comp, int level,
+ const char *fmt, ...)
+{
+}
+
+__printf(4, 5)
static inline void RT_TRACE(struct rtl_priv *rtlpriv,
u64 comp, int level,
const char *fmt, ...)
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 264667203f6f..305d43818018 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -120,8 +120,8 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
const u32 efuse_len =
rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
- address, value);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
+ address, value);
if (address < efuse_len) {
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
@@ -211,9 +211,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
u8 efuse_usage;
if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "read_efuse(): Invalid offset(%#x) with read bytes(%#x)!!\n",
- _offset, _size_byte);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "%s: Invalid offset(%#x) with read bytes(%#x)!!\n",
+ __func__, _offset, _size_byte);
return;
}
@@ -376,9 +376,9 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
(EFUSE_MAX_SIZE - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
result = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_shadow_update_chk(): totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
- totalbytes, hdr_num, words_need, efuse_used);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "%s: totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+ __func__, totalbytes, hdr_num, words_need, efuse_used);
return result;
}
@@ -416,7 +416,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
u8 word_en = 0x0F;
u8 first_pg = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
if (!efuse_shadow_update_chk(hw)) {
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
@@ -424,8 +424,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse out of capacity!!\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "efuse out of capacity!!\n");
return false;
}
efuse_power_switch(hw, true, true);
@@ -464,8 +464,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
tmpdata)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "PG section(%#x) fail!!\n", offset);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "PG section(%#x) fail!!\n", offset);
break;
}
}
@@ -478,7 +478,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
return true;
}
@@ -616,8 +616,8 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmpidx = 0;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "Addr = %x Data=%x\n", addr, data);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "Addr = %x Data=%x\n", addr, data);
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
@@ -997,8 +997,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
if (efuse_addr >= (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_addr(%#x) Out of size!!\n", efuse_addr);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "efuse_addr(%#x) Out of size!!\n", efuse_addr);
}
return true;
@@ -1038,8 +1038,8 @@ static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
u8 tmpdata[8];
memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
if (!(word_en & BIT(0))) {
tmpaddr = start_addr;
@@ -1242,11 +1242,11 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != params[0]) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
@@ -1257,30 +1257,30 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
rtlefuse->eeprom_did = *(u16 *)&hwinfo[params[2]];
rtlefuse->eeprom_svid = *(u16 *)&hwinfo[params[3]];
rtlefuse->eeprom_smid = *(u16 *)&hwinfo[params[4]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[params[5] + i];
*((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
rtlefuse->eeprom_channelplan = *&hwinfo[params[6]];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[params[7]];
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *&hwinfo[params[8]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel plan to world wide 13 */
rtlefuse->channel_plan = params[9];
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 25335bd2873b..1c77b3b2173c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -164,21 +164,29 @@ static bool _rtl_pci_platform_switch_device_pci_aspm(
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ value &= PCI_EXP_LNKCTL_ASPMC;
+
if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
- value |= 0x40;
+ value |= PCI_EXP_LNKCTL_CCC;
- pci_write_config_byte(rtlpci->pdev, 0x80, value);
+ pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC | value,
+ value);
return false;
}
-/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
-static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
+/* @value is PCI_EXP_LNKCTL_CLKREQ_EN or 0 to enable/disable clk request. */
+static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u16 value)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- pci_write_config_byte(rtlpci->pdev, 0x81, value);
+ value &= PCI_EXP_LNKCTL_CLKREQ_EN;
+
+ pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN,
+ value);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
udelay(100);
@@ -192,11 +200,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
/*Retrieve original configuration settings. */
u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
- u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
- pcibridge_linkctrlreg;
u16 aspmlevel = 0;
u8 tmp_u1b = 0;
@@ -204,8 +209,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -221,16 +226,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
/*Set corresponding value. */
aspmlevel |= BIT(0) | BIT(1);
linkctrl_reg &= ~aspmlevel;
- pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
_rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
- udelay(50);
-
- /*4 Disable Pci Bridge ASPM */
- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
- pcibridge_linkctrlreg);
-
- udelay(50);
}
/*Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
@@ -245,39 +242,18 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
u16 aspmlevel;
- u8 u_pcibridge_aspmsetting;
u8 u_device_aspmsetting;
if (!ppsc->support_aspm)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "PCI(Bridge) UNKNOWN\n");
return;
}
- /*4 Enable Pci Bridge ASPM */
-
- u_pcibridge_aspmsetting =
- pcipriv->ndis_adapter.pcibridge_linkctrlreg |
- rtlpci->const_hostpci_aspm_setting;
-
- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
- u_pcibridge_aspmsetting &= ~BIT(0);
-
- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
- u_pcibridge_aspmsetting);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PlatformEnableASPM(): Write reg[%x] = %x\n",
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
- u_pcibridge_aspmsetting);
-
- udelay(50);
-
/*Get ASPM level (with/without Clock Req) */
aspmlevel = rtlpci->const_devicepci_aspm_setting;
u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
@@ -291,7 +267,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
_rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
- RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
+ RT_RF_OFF_LEVL_CLK_REQ) ?
+ PCI_EXP_LNKCTL_CLKREQ_EN : 0);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
}
udelay(100);
@@ -331,11 +308,11 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
list) {
tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "pcipriv->ndis_adapter.funcnumber %x\n",
pcipriv->ndis_adapter.funcnumber);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "tpcipriv->ndis_adapter.funcnumber %x\n",
tpcipriv->ndis_adapter.funcnumber);
if (pcipriv->ndis_adapter.busnumber ==
@@ -350,8 +327,8 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", find_buddy_priv);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "find_buddy_priv %d\n", find_buddy_priv);
if (find_buddy_priv)
*buddy_priv = tpriv;
@@ -359,22 +336,6 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
return find_buddy_priv;
}
-static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
-{
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
- u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
- u8 linkctrl_reg;
- u8 num4bbytes;
-
- num4bbytes = (capabilityoffset + 0x10) / 4;
-
- /*Read Link Control Register */
- pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
-
- pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
-}
-
static void rtl_pci_parse_configuration(struct pci_dev *pdev,
struct ieee80211_hw *hw)
{
@@ -388,8 +349,8 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
- pcipriv->ndis_adapter.linkctrl_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
+ pcipriv->ndis_adapter.linkctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT(4);
@@ -557,11 +518,11 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
if (rtlpriv->rtlhal.earlymode_enable)
skb_pull(skb, EM_HDR_LEN);
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
- "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
- ring->idx,
- skb_queue_len(&ring->queue),
- *(u16 *)(skb->data + 22));
+ rtl_dbg(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
+ "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
+ ring->idx,
+ skb_queue_len(&ring->queue),
+ *(u16 *)(skb->data + 22));
if (prio == TXCMD_QUEUE) {
dev_kfree_skb(skb);
@@ -608,10 +569,10 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
}
if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
- prio, ring->idx,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
+ prio, ring->idx,
+ skb_queue_len(&ring->queue));
ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
}
@@ -801,9 +762,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
skb_reserve(skb, stats.rx_drvinfo_size +
stats.rx_bufshift);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "skb->end - skb->tail = %d, len is %d\n",
- skb->end - skb->tail, len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "skb->end - skb->tail = %d, len is %d\n",
+ skb->end - skb->tail, len);
dev_kfree_skb_any(skb);
goto new_trx_end;
}
@@ -925,67 +886,67 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
/*<1> beacon related */
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "beacon ok interrupt!\n");
if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon err interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "beacon err interrupt!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "prepare beacon for interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "prepare beacon for interrupt!\n");
tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
}
/*<2> Tx related */
if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Manage ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "Manage ok interrupt!\n");
_rtl_pci_tx_isr(hw, MGNT_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "HIGH_QUEUE ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "HIGH_QUEUE ok interrupt!\n");
_rtl_pci_tx_isr(hw, HIGH_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BK Tx OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "BK Tx OK interrupt!\n");
_rtl_pci_tx_isr(hw, BK_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BE TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "BE TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, BE_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "VI TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "VI TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VI_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Vo TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "Vo TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VO_QUEUE);
}
@@ -993,8 +954,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "H2C TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "H2C TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, H2C_QUEUE);
}
}
@@ -1003,34 +964,34 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "CMD TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "CMD TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, TXCMD_QUEUE);
}
}
/*<3> Rx related */
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "rx descriptor unavailable!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "rx descriptor unavailable!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
_rtl_pci_rx_interrupt(hw);
}
/*<4> fw related*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "firmware interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "firmware interrupt!\n");
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.fwevt_wq, 0);
}
@@ -1046,8 +1007,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
if (unlikely(intvec.inta &
rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "hsisr interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "hsisr interrupt!\n");
_rtl_pci_hs_interrupt(hw);
}
}
@@ -1251,8 +1212,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
rtlpci->tx_ring[prio].entries = entries;
skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
- prio, desc);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
+ prio, desc);
/* init every desc in this ring */
if (!rtlpriv->use_new_trx_flow) {
@@ -1649,10 +1610,10 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
true, HW_DESC_OWN);
if (own == 1 && hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue));
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
flags);
@@ -1662,8 +1623,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->get_available_desc &&
rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "get_available_desc fail\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "get_available_desc fail\n");
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return skb->len;
}
@@ -1686,8 +1647,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
hw_queue, ring->idx, idx,
skb_queue_len(&ring->queue));
@@ -1794,8 +1755,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
err = rtlpriv->cfg->ops->hw_init(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Failed to config hardware!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Failed to config hardware!\n");
kfree(rtlpriv->btcoexist.btc_context);
kfree(rtlpriv->btcoexist.wifi_only_context);
return err;
@@ -1804,7 +1765,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
&rtlmac->retry_long);
rtlpriv->cfg->ops->enable_interrupt(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
rtl_init_rx_config(hw);
@@ -1815,7 +1776,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtlpci->up_first_time = false;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
return 0;
}
@@ -1909,71 +1870,71 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
deviceid == RTL_PCI_8171_DID) {
switch (revisionid) {
case RTL_PCI_REVISION_ID_8192PCIE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192 PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192 PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
return false;
case RTL_PCI_REVISION_ID_8192SE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192SE is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192SE is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Err: Unknown device - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
}
} else if (deviceid == RTL_PCI_8723AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8723AE PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8723AE PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192CET_DID ||
deviceid == RTL_PCI_8192CE_DID ||
deviceid == RTL_PCI_8191CE_DID ||
deviceid == RTL_PCI_8188CE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192C PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192C PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192DE_DID ||
deviceid == RTL_PCI_8192DE_DID2) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192D PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192D PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8188EE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8188EE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8188EE\n");
} else if (deviceid == RTL_PCI_8723BE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8723BE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8723BE\n");
} else if (deviceid == RTL_PCI_8192EE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8192EE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8192EE\n");
} else if (deviceid == RTL_PCI_8821AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8821AE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8821AE\n");
} else if (deviceid == RTL_PCI_8812AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8812AE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8812AE\n");
} else if (deviceid == RTL_PCI_8822BE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8822BE;
rtlhal->bandset = BAND_ON_BOTH;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8822BE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8822BE\n");
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Err: Unknown device - vid/did=%x/%x\n",
venderid, deviceid);
rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
@@ -1982,17 +1943,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
if (revisionid == 0 || revisionid == 1) {
if (revisionid == 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC0\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC0\n");
rtlhal->interfaceindex = 0;
} else if (revisionid == 1) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC1\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC1\n");
rtlhal->interfaceindex = 1;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
venderid, deviceid, revisionid);
rtlhal->interfaceindex = 0;
}
@@ -2026,9 +1987,9 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
pcipriv->ndis_adapter.pcibridge_vendor = tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Pci Bridge Vendor is found index: %d\n",
- tmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Pci Bridge Vendor is found index: %d\n",
+ tmp);
break;
}
}
@@ -2042,12 +2003,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
PCI_SLOT(bridge_pdev->devfn);
pcipriv->ndis_adapter.pcibridge_funcnum =
PCI_FUNC(bridge_pdev->devfn);
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
- pci_pcie_cap(bridge_pdev);
- pcipriv->ndis_adapter.num4bytes =
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
-
- rtl_pci_get_linkcontrol_field(hw);
if (pcipriv->ndis_adapter.pcibridge_vendor ==
PCI_BRIDGE_VENDOR_AMD) {
@@ -2056,22 +2011,20 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
- pcipriv->ndis_adapter.busnumber,
- pcipriv->ndis_adapter.devnumber,
- pcipriv->ndis_adapter.funcnumber,
- pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.busnumber,
+ pcipriv->ndis_adapter.devnumber,
+ pcipriv->ndis_adapter.funcnumber,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
- pcipriv->ndis_adapter.pcibridge_busnum,
- pcipriv->ndis_adapter.pcibridge_devnum,
- pcipriv->ndis_adapter.pcibridge_funcnum,
- pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
- pcipriv->ndis_adapter.amd_l1_patch);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "pci_bridge busnumber:devnumber:funcnumber:vendor:amd %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+ pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
@@ -2099,8 +2052,8 @@ static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
rtlpci->using_msi = true;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "MSI Interrupt Mode!\n");
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
+ "MSI Interrupt Mode!\n");
return 0;
}
@@ -2117,8 +2070,8 @@ static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
return ret;
rtlpci->using_msi = false;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "Pin-based Interrupt Mode!\n");
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
+ "Pin-based Interrupt Mode!\n");
return 0;
}
@@ -2245,10 +2198,10 @@ int rtl_pci_probe(struct pci_dev *pdev,
goto fail2;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
- pmem_start, pmem_len, pmem_flags,
- rtlpriv->io.pci_mem_start);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
+ pmem_start, pmem_len, pmem_flags,
+ rtlpriv->io.pci_mem_start);
/* Disable Clk Request */
pci_write_config_byte(pdev, 0x81, 0);
@@ -2310,9 +2263,9 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpci = rtl_pcidev(pcipriv);
err = rtl_pci_intr_mode_decide(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "%s: failed to register IRQ handler\n",
- wiphy_name(hw->wiphy));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s: failed to register IRQ handler\n",
+ wiphy_name(hw->wiphy));
goto fail3;
}
rtlpci->irq_alloc = 1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index 866861626a0a..d6307197dfea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -236,11 +236,6 @@ struct mp_adapter {
u16 pcibridge_vendorid;
u16 pcibridge_deviceid;
- u8 num4bytes;
-
- u8 pcibridge_pciehdr_offset;
- u8 pcibridge_linkctrlreg;
-
bool amd_l1_patch;
};
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index fff8dda14023..9a4601a71eae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -19,8 +19,8 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
rtlpriv->intf_ops->reset_trx_ring(hw);
if (is_hal_stop(rtlhal))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Driver is already down!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Driver is already down!\n");
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
@@ -81,9 +81,9 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
if (ppsc->rfchange_inprogress) {
spin_unlock(&rtlpriv->locks.rf_ps_lock);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "RF Change in progress! Wait to set..state_toset(%d).\n",
- state_toset);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "RF Change in progress! Wait to set..state_toset(%d).\n",
+ state_toset);
/* Set RF after the previous action is done. */
while (ppsc->rfchange_inprogress) {
@@ -195,8 +195,8 @@ void rtl_ips_nic_off_wq_callback(void *data)
enum rf_pwrstate rtstate;
if (mac->opmode != NL80211_IFTYPE_STATION) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not station return\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "not station return\n");
return;
}
@@ -232,8 +232,8 @@ void rtl_ips_nic_off_wq_callback(void *data)
!ppsc->swrf_processing &&
(mac->link_state == MAC80211_NOLINK) &&
!mac->act_scanning) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "IPSEnter(): Turn off RF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "IPSEnter(): Turn off RF\n");
ppsc->inactive_pwrstate = ERFOFF;
ppsc->in_powersavemode = true;
@@ -311,8 +311,8 @@ static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
ppsc->last_delaylps_stamp_jiffies);
if (ps_timediff < 2000) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
return false;
}
@@ -357,9 +357,9 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
if ((ppsc->fwctrl_lps) && ppsc->report_linked) {
if (ppsc->dot11_psmode == EACTIVE) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS leave ps_mode:%x\n",
- FW_PS_ACTIVE_MODE);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "FW LPS leave ps_mode:%x\n",
+ FW_PS_ACTIVE_MODE);
enter_fwlps = false;
ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
ppsc->smart_ps = 0;
@@ -372,9 +372,9 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
} else {
if (rtl_get_fwlps_doze(hw)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS enter ps_mode:%x\n",
- ppsc->fwctrl_psmode);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "FW LPS enter ps_mode:%x\n",
+ ppsc->fwctrl_psmode);
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
enter_fwlps = true;
@@ -424,8 +424,8 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
* bt_ccoexist may ask to enter lps.
* In normal case, this constraint move to rtl_lps_set_psmode().
*/
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Enter 802.11 power save mode...\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Enter 802.11 power save mode...\n");
rtl_lps_set_psmode(hw, EAUTOPS);
mutex_unlock(&rtlpriv->locks.lps_mutex);
@@ -453,8 +453,8 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Busy Traffic,Leave 802.11 power save..\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Busy Traffic,Leave 802.11 power save..\n");
rtl_lps_set_psmode(hw, EACTIVE);
}
@@ -538,8 +538,8 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.ps_work, MSECS(5));
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
}
}
EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
@@ -634,9 +634,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
/* this print should always be dtim_conter = 0 &
* sleep = dtim_period, that meaons, we should
* awake before every dtim */
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "dtim_counter:%x will sleep :%d beacon_intv\n",
- rtlpriv->psc.dtim_counter, sleep_intv);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "dtim_counter:%x will sleep :%d beacon_intv\n",
+ rtlpriv->psc.dtim_counter, sleep_intv);
/* we tested that 40ms is enough for sw & hw sw delay */
queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
@@ -748,9 +748,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
if (ie[0] == 12) {
find_p2p_ps_ie = true;
if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "P2P notice of absence: invalid length.%d\n",
+ noa_len);
return;
} else {
noa_num = (noa_len - 2) / 13;
@@ -761,8 +761,8 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
P2P_PS_NONE || noa_index != p2pinfo->noa_index) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "update NOA ie.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "update NOA ie.\n");
p2pinfo->noa_index = noa_index;
p2pinfo->opp_ps = (ie[4] >> 7);
p2pinfo->ctwindow = ie[4] & 0x7F;
@@ -833,7 +833,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
if (ie == NULL)
return;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
noa_len = READEF2BYTE((__le16 *)&ie[1]);
@@ -841,13 +841,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
return;
if (ie[0] == 12) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, "noa ie ",
ie, noa_len);
if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P notice of absence: invalid length.%d\n",
+ noa_len);
return;
} else {
noa_num = (noa_len - 2) / 13;
@@ -905,7 +905,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state)
struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n" , p2p_ps_state);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n", p2p_ps_state);
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
p2pinfo->p2p_ps_state = p2p_ps_state;
@@ -957,18 +957,18 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "ctwindow %x oppps %x\n",
- p2pinfo->ctwindow , p2pinfo->opp_ps);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "count %x duration %x index %x interval %x start time %x noa num %x\n",
- p2pinfo->noa_count_type[0],
- p2pinfo->noa_duration[0],
- p2pinfo->noa_index,
- p2pinfo->noa_interval[0],
- p2pinfo->noa_start_time[0],
- p2pinfo->noa_num);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "ctwindow %x oppps %x\n",
+ p2pinfo->ctwindow, p2pinfo->opp_ps);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "count %x duration %x index %x interval %x start time %x noa num %x\n",
+ p2pinfo->noa_count_type[0],
+ p2pinfo->noa_duration[0],
+ p2pinfo->noa_index,
+ p2pinfo->noa_interval[0],
+ p2pinfo->noa_start_time[0],
+ p2pinfo->noa_num);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
}
void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 8be31e0ad878..4cf8face0bbd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -393,13 +393,13 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.country_code =
channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
- rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
+ rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
}
@@ -414,9 +414,9 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.alpha2[1] = '0';
}
- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- "rtl: Country alpha2 being used: %c%c\n",
- rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_TRACE,
+ "rtl: Country alpha2 being used: %c%c\n",
+ rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
_rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
@@ -428,7 +428,7 @@ void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
_rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 333e355c9281..1ade4bea1d3f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -805,7 +805,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
}
if (rtlpriv->btcoexist.bt_edca_dl != 0) {
- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
+ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
bt_change_edca = true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 96d8f25b120f..4b8bdf3885db 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -16,7 +16,6 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask);
static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw);
static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -46,7 +45,7 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -69,7 +68,7 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}
@@ -95,7 +94,7 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
@@ -124,7 +123,7 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl88e_phy_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
@@ -210,17 +209,6 @@ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
-}
-
bool rtl88e_phy_mac_config(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index f2908ee5f860..17680aa8c0bc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
}
if (rtlpriv->btcoexist.bt_edca_dl != 0) {
- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
+ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
bt_change_edca = true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 0efd19aa4fe5..62ed75d6e2d3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -17,7 +17,7 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -40,7 +40,7 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}
@@ -143,18 +143,6 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
-}
-EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
-
static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
{
rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
index 75afa6253ad0..e64d377dfe9e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
@@ -196,7 +196,6 @@ bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
void rtl92c_phy_set_io(struct ieee80211_hw *hw);
void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
enum wireless_mode wirelessmode,
u8 txpwridx);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index f6574f31fa3b..e17d97550dbd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -39,7 +39,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
rfpath, regaddr);
}
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock(&rtlpriv->locks.rf_lock);
@@ -110,7 +110,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
@@ -122,7 +122,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_fw_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
index 7582a162bd11..c7a0d4c776f0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
@@ -94,7 +94,6 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
u32 offset);
u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset);
-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset, u32 data);
void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index df7a14320fd2..869775c711f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -618,8 +618,8 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
u8 queue_sel)
{
u16 beq, bkq, viq, voq, mgtq, hiq;
- u16 uninitialized_var(valuehi);
- u16 uninitialized_var(valuelow);
+ u16 valuehi;
+ u16 valuelow;
switch (queue_sel) {
case (TX_SELE_HQ | TX_SELE_LQ):
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index 9cd028cb2239..4043a2c59cd4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -32,7 +32,7 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_fw_rf_serial_read(hw,
rfpath, regaddr);
}
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
@@ -56,7 +56,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
@@ -67,7 +67,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = _rtl92c_phy_fw_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 0ae6371b6318..fb9355b2f6be 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -160,17 +160,14 @@ static u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = {
25711, 25658, 25606, 25554, 25502, 25451, 25328
};
-static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
-
- return i;
-}
+static const u8 channel_all[59] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
+ 114, 116, 118, 120, 122, 124, 126, 128, 130,
+ 132, 134, 136, 138, 140, 149, 151, 153, 155,
+ 157, 159, 161, 163, 165
+};
u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
{
@@ -194,7 +191,7 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
} else {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
}
- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"BBR MASK=0x%x Addr[0x%x]=0x%x\n",
@@ -226,7 +223,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
dbi_direct);
else
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}
if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob)
@@ -314,7 +311,7 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
regaddr, rfpath, bitmask);
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -341,7 +338,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92d_phy_rf_serial_read(hw,
rfpath, regaddr);
- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) |
(data << bitshift));
}
@@ -1361,14 +1358,6 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
{
- u8 channel_all[59] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130,
- 132, 134, 136, 138, 140, 149, 151, 153, 155,
- 157, 159, 161, 163, 165
- };
u8 place = chnl;
if (chnl > 14) {
@@ -2392,14 +2381,10 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
"Just Read IQK Matrix reg for channel:%d....\n",
channel);
- if ((rtlphy->iqk_matrix[indexforchannel].
- value[0] != NULL)
- /*&&(regea4 != 0) */)
+ if (rtlphy->iqk_matrix[indexforchannel].value[0][0] != 0)
_rtl92d_phy_patha_fill_iqk_matrix(hw, true,
- rtlphy->iqk_matrix[
- indexforchannel].value, 0,
- (rtlphy->iqk_matrix[
- indexforchannel].value[0][2] == 0));
+ rtlphy->iqk_matrix[indexforchannel].value, 0,
+ rtlphy->iqk_matrix[indexforchannel].value[0][2] == 0);
if (IS_92D_SINGLEPHY(rtlhal->version)) {
if ((rtlphy->iqk_matrix[
indexforchannel].value[0][4] != 0)
@@ -3227,37 +3212,28 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
u8 rtl92d_get_chnlgroup_fromarray(u8 chnl)
{
u8 group;
- u8 channel_info[59] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56,
- 58, 60, 62, 64, 100, 102, 104, 106, 108,
- 110, 112, 114, 116, 118, 120, 122, 124,
- 126, 128, 130, 132, 134, 136, 138, 140,
- 149, 151, 153, 155, 157, 159, 161, 163,
- 165
- };
- if (channel_info[chnl] <= 3)
+ if (channel_all[chnl] <= 3)
group = 0;
- else if (channel_info[chnl] <= 9)
+ else if (channel_all[chnl] <= 9)
group = 1;
- else if (channel_info[chnl] <= 14)
+ else if (channel_all[chnl] <= 14)
group = 2;
- else if (channel_info[chnl] <= 44)
+ else if (channel_all[chnl] <= 44)
group = 3;
- else if (channel_info[chnl] <= 54)
+ else if (channel_all[chnl] <= 54)
group = 4;
- else if (channel_info[chnl] <= 64)
+ else if (channel_all[chnl] <= 64)
group = 5;
- else if (channel_info[chnl] <= 112)
+ else if (channel_all[chnl] <= 112)
group = 6;
- else if (channel_info[chnl] <= 126)
+ else if (channel_all[chnl] <= 126)
group = 7;
- else if (channel_info[chnl] <= 140)
+ else if (channel_all[chnl] <= 140)
group = 8;
- else if (channel_info[chnl] <= 153)
+ else if (channel_all[chnl] <= 153)
group = 9;
- else if (channel_info[chnl] <= 159)
+ else if (channel_all[chnl] <= 159)
group = 10;
else
group = 11;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 222abc41669c..6fd422fc822d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -16,7 +16,6 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask);
static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw);
static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw,
@@ -46,7 +45,7 @@ u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -68,7 +67,7 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}
@@ -93,7 +92,7 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr);
- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
@@ -121,7 +120,7 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr);
- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = (original_value & (~bitmask)) | (data << bitshift);
}
@@ -204,17 +203,6 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
pphyreg->rf3wire_offset, data_and_addr);
}
-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
-}
-
bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw)
{
return _rtl92ee_phy_config_mac_with_headerfile(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index d5c0eb462315..f377531bc2bd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -14,18 +14,6 @@
#include "hw.h"
#include "table.h"
-static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
-
- return i;
-}
-
u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -35,7 +23,7 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
@@ -57,7 +45,7 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}
@@ -165,7 +153,7 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock(&rtlpriv->locks.rf_lock);
@@ -196,7 +184,7 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92s_phy_rf_serial_read(hw, rfpath,
regaddr);
- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) | (data << bitshift));
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index d8260c7afe09..9afe6aace942 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -578,7 +578,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw)
}
if (rtlpriv->btcoexist.bt_edca_dl != 0) {
- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
+ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
bt_change_edca = true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 54a3aec1dfa7..bc42bfc394a5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -50,7 +50,7 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
rfpath, regaddr);
}
- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
@@ -82,7 +82,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
original_value = rtl8723_phy_rf_serial_read(hw,
rfpath,
regaddr);
- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
@@ -91,7 +91,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data);
} else {
if (bitmask != RFREG_OFFSET_MASK) {
- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data =
((original_value & (~bitmask)) |
(data << bitshift));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index aa8a0950fcea..d753e3d15bdb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -42,7 +42,7 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
@@ -70,7 +70,7 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
if (bitmask != RFREG_OFFSET_MASK) {
original_value = rtl8723_phy_rf_serial_read(hw, path,
regaddr);
- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) |
(data << bitshift));
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
index aae14c68bf69..964292e82636 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
@@ -53,13 +53,9 @@ EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);
u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 979e434a4e73..6a5d9d1b2947 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -27,7 +27,13 @@ static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask);
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+{
+ if (WARN_ON_ONCE(!bitmask))
+ return 0;
+
+ return __ffs(bitmask);
+}
static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
/*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/
static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -274,17 +280,6 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
-static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
-}
-
bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw)
{
bool rtstatus = 0;
@@ -1587,7 +1582,7 @@ static void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw
}
/* string is in decimal */
-static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint)
+static bool _rtl8812ae_get_integer_from_string(const char *str, u8 *pint)
{
u16 i = 0;
*pint = 0;
@@ -1605,18 +1600,6 @@ static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint)
return true;
}
-static bool _rtl8812ae_eq_n_byte(u8 *str1, u8 *str2, u32 num)
-{
- if (num == 0)
- return false;
- while (num > 0) {
- num--;
- if (str1[num] != str2[num])
- return false;
- }
- return true;
-}
-
static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
u8 band, u8 channel)
{
@@ -1643,10 +1626,11 @@ static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
return channel_index;
}
-static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregulation,
- u8 *pband, u8 *pbandwidth,
- u8 *prate_section, u8 *prf_path,
- u8 *pchannel, u8 *ppower_limit)
+static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
+ const char *pregulation,
+ const char *pband, const char *pbandwidth,
+ const char *prate_section, const char *prf_path,
+ const char *pchannel, const char *ppower_limit)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
@@ -1654,8 +1638,8 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
u8 channel_index;
s8 power_limit = 0, prev_power_limit, ret;
- if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) ||
- !_rtl8812ae_get_integer_from_string((char *)ppower_limit,
+ if (!_rtl8812ae_get_integer_from_string(pchannel, &channel) ||
+ !_rtl8812ae_get_integer_from_string(ppower_limit,
&power_limit)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Illegal index of pwr_lmt table [chnl %d][val %d]\n",
@@ -1665,42 +1649,42 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
power_limit = power_limit > MAX_POWER_INDEX ?
MAX_POWER_INDEX : power_limit;
- if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("FCC"), 3))
+ if (strcmp(pregulation, "FCC") == 0)
regulation = 0;
- else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("MKK"), 3))
+ else if (strcmp(pregulation, "MKK") == 0)
regulation = 1;
- else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("ETSI"), 4))
+ else if (strcmp(pregulation, "ETSI") == 0)
regulation = 2;
- else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("WW13"), 4))
+ else if (strcmp(pregulation, "WW13") == 0)
regulation = 3;
- if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("CCK"), 3))
+ if (strcmp(prate_section, "CCK") == 0)
rate_section = 0;
- else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("OFDM"), 4))
+ else if (strcmp(prate_section, "OFDM") == 0)
rate_section = 1;
- else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) &&
- _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2))
+ else if (strcmp(prate_section, "HT") == 0 &&
+ strcmp(prf_path, "1T") == 0)
rate_section = 2;
- else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) &&
- _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2))
+ else if (strcmp(prate_section, "HT") == 0 &&
+ strcmp(prf_path, "2T") == 0)
rate_section = 3;
- else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) &&
- _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2))
+ else if (strcmp(prate_section, "VHT") == 0 &&
+ strcmp(prf_path, "1T") == 0)
rate_section = 4;
- else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) &&
- _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2))
+ else if (strcmp(prate_section, "VHT") == 0 &&
+ strcmp(prf_path, "2T") == 0)
rate_section = 5;
- if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("20M"), 3))
+ if (strcmp(pbandwidth, "20M") == 0)
bandwidth = 0;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("40M"), 3))
+ else if (strcmp(pbandwidth, "40M") == 0)
bandwidth = 1;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("80M"), 3))
+ else if (strcmp(pbandwidth, "80M") == 0)
bandwidth = 2;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("160M"), 4))
+ else if (strcmp(pbandwidth, "160M") == 0)
bandwidth = 3;
- if (_rtl8812ae_eq_n_byte(pband, (u8 *)("2.4G"), 4)) {
+ if (strcmp(pband, "2.4G") == 0) {
ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
BAND_ON_2_4G,
channel);
@@ -1724,7 +1708,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
regulation, bandwidth, rate_section, channel_index,
rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A]);
- } else if (_rtl8812ae_eq_n_byte(pband, (u8 *)("5G"), 2)) {
+ } else if (strcmp(pband, "5G") == 0) {
ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
BAND_ON_5G,
channel);
@@ -1755,10 +1739,10 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
}
static void _rtl8812ae_phy_config_bb_txpwr_lmt(struct ieee80211_hw *hw,
- u8 *regulation, u8 *band,
- u8 *bandwidth, u8 *rate_section,
- u8 *rf_path, u8 *channel,
- u8 *power_limit)
+ const char *regulation, const char *band,
+ const char *bandwidth, const char *rate_section,
+ const char *rf_path, const char *channel,
+ const char *power_limit)
{
_rtl8812ae_phy_set_txpower_limit(hw, regulation, band, bandwidth,
rate_section, rf_path, channel,
@@ -1771,7 +1755,7 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u32 i = 0;
u32 array_len;
- u8 **array;
+ const char **array;
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
array_len = RTL8812AE_TXPWR_LMT_ARRAY_LEN;
@@ -1785,13 +1769,13 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw)
"\n");
for (i = 0; i < array_len; i += 7) {
- u8 *regulation = array[i];
- u8 *band = array[i+1];
- u8 *bandwidth = array[i+2];
- u8 *rate = array[i+3];
- u8 *rf_path = array[i+4];
- u8 *chnl = array[i+5];
- u8 *val = array[i+6];
+ const char *regulation = array[i];
+ const char *band = array[i+1];
+ const char *bandwidth = array[i+2];
+ const char *rate = array[i+3];
+ const char *rf_path = array[i+4];
+ const char *chnl = array[i+5];
+ const char *val = array[i+6];
_rtl8812ae_phy_config_bb_txpwr_lmt(hw, regulation, band,
bandwidth, rate, rf_path,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index ed72a2aeb6c8..fcaaf664cbec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -2894,7 +2894,7 @@ u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY);
* TXPWR_LMT.TXT
******************************************************************************/
-u8 *RTL8812AE_TXPWR_LMT[] = {
+const char *RTL8812AE_TXPWR_LMT[] = {
"FCC", "2.4G", "20M", "CCK", "1T", "01", "36",
"ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
"MKK", "2.4G", "20M", "CCK", "1T", "01", "32",
@@ -3463,7 +3463,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT);
-u8 *RTL8821AE_TXPWR_LMT[] = {
+const char *RTL8821AE_TXPWR_LMT[] = {
"FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
"ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
"MKK", "2.4G", "20M", "CCK", "1T", "01", "32",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
index 540159c25078..76c62b7c0fb2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
@@ -28,7 +28,7 @@ extern u32 RTL8821AE_AGC_TAB_ARRAY[];
extern u32 RTL8812AE_AGC_TAB_1TARRAYLEN;
extern u32 RTL8812AE_AGC_TAB_ARRAY[];
extern u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN;
-extern u8 *RTL8812AE_TXPWR_LMT[];
+extern const char *RTL8812AE_TXPWR_LMT[];
extern u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN;
-extern u8 *RTL8821AE_TXPWR_LMT[];
+extern const char *RTL8821AE_TXPWR_LMT[];
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 9bcb187d37dc..fc1548ad434f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -259,15 +259,15 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
? USB_HIGH_SPEED_BULK_SIZE
: USB_FULL_SPEED_BULK_SIZE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
- rtlusb->max_bulk_out_size);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
+ rtlusb->max_bulk_out_size);
for (i = 0; i < __RTL_TXQ_NUM; i++) {
u32 ep_num = rtlusb->ep_map.ep_mapping[i];
if (!ep_num) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Invalid endpoint map setting!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Invalid endpoint map setting!\n");
return -EINVAL;
}
}
@@ -337,10 +337,10 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
else if (usb_endpoint_dir_out(pep_desc))
rtlusb->out_ep_nums++;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
- pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
- pep_desc->bInterval);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
+ pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
+ pep_desc->bInterval);
}
if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
pr_err("Too few input end points found\n");
@@ -931,7 +931,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
}
if (rtlpriv->psc.sw_ps_enabled) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 3bdda1c98339..abec9ceabe28 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -3230,4 +3230,11 @@ static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
return ieee80211_find_sta(mac->vif, mac_addr);
}
+static inline u32 calculate_bit_shift(u32 bitmask)
+{
+ if (WARN_ON_ONCE(!bitmask))
+ return 0;
+
+ return __ffs(bitmask);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 5a906101498d..12d81ad8b702 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -658,9 +658,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
rtw_debug_priv_ ##name.rtwdev = rtwdev; \
- if (!debugfs_create_file(#name, mode, \
+ if (IS_ERR(debugfs_create_file(#name, mode, \
parent, &rtw_debug_priv_ ##name,\
- &file_ops_ ##fopname)) \
+ &file_ops_ ##fopname))) \
pr_debug("Unable to initialize debugfs:%s\n", \
#name); \
} while (0)
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index b61b073031e5..94e69e97d5f5 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -210,7 +210,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
if (ret)
- return -EBUSY;
+ return ret;
idx++;
} while (1);
@@ -224,6 +224,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
+ int ret;
rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
@@ -245,8 +246,9 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
return -EALREADY;
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
- if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
- return -EINVAL;
+ ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index e5e3605bb693..efcfeccee15f 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -206,9 +206,9 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
- rtwdev->hal.rcr |= BIT_AM | BIT_AB;
+ rtwdev->hal.rcr |= BIT_AM;
else
- rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB);
+ rtwdev->hal.rcr &= ~(BIT_AM);
}
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL)
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index c8f8fe5497a8..ace016967ff0 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -700,8 +700,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
struct rndis_query *get;
struct rndis_query_c *get_c;
} u;
- int ret, buflen;
- int resplen, respoffs, copylen;
+ int ret;
+ size_t buflen, resplen, respoffs, copylen;
buflen = *len + sizeof(*u.get);
if (buflen < CONTROL_BUFFER_SIZE)
@@ -736,22 +736,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
if (respoffs > buflen) {
/* Device returned data offset outside buffer, error. */
- netdev_dbg(dev->net, "%s(%s): received invalid "
- "data offset: %d > %d\n", __func__,
- oid_to_string(oid), respoffs, buflen);
+ netdev_dbg(dev->net,
+ "%s(%s): received invalid data offset: %zu > %zu\n",
+ __func__, oid_to_string(oid), respoffs, buflen);
ret = -EINVAL;
goto exit_unlock;
}
- if ((resplen + respoffs) > buflen) {
- /* Device would have returned more data if buffer would
- * have been big enough. Copy just the bits that we got.
- */
- copylen = buflen - respoffs;
- } else {
- copylen = resplen;
- }
+ copylen = min(resplen, buflen - respoffs);
if (copylen > *len)
copylen = *len;
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index c8ba148f8c6c..acf4d8cb4b47 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -160,6 +160,7 @@ int rsi_coex_attach(struct rsi_common *common)
rsi_coex_scheduler_thread,
"Coex-Tx-Thread")) {
rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
+ kfree(coex_cb);
return -EINVAL;
}
return 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index c6c29034b2ea..a939b552a8e4 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -466,7 +466,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
tid, 0);
}
}
- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+
+ if (IEEE80211_SKB_CB(skb)->control.flags &
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
q_num = MGMT_SOFT_Q;
skb->priority = q_num;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 7d0b44fd5690..062c5da74104 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -162,12 +162,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
u8 header_size;
u8 vap_id = 0;
u8 dword_align_bytes;
+ bool tx_eapol;
u16 seq_num;
info = IEEE80211_SKB_CB(skb);
vif = info->control.vif;
tx_params = (struct skb_info *)info->driver_data;
+ tx_eapol = IEEE80211_SKB_CB(skb)->control.flags &
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+
header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
if (header_size > skb_headroom(skb)) {
rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
@@ -231,7 +235,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
}
}
- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ if (tx_eapol) {
rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n");
data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 4fe837090cda..22b0567ad826 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1479,9 +1479,6 @@ static void rsi_shutdown(struct device *dev)
if (sdev->write_fail)
rsi_dbg(INFO_ZONE, "###### Device is not ready #######\n");
- if (rsi_set_sdio_pm_caps(adapter))
- rsi_dbg(INFO_ZONE, "Setting power management caps failed\n");
-
rsi_dbg(INFO_ZONE, "***** RSI module shut down *****\n");
}
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index c46b044b7f7b..988581cc134b 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -120,8 +120,7 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- if (frame.skb)
- dev_kfree_skb(frame.skb);
+ dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 122d36439319..e6505624f0c2 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -134,8 +134,8 @@ static const struct {
/**
* iw_valid_channel - validate channel in regulatory domain
- * @reg_comain - regulatory domain
- * @channel - channel to validate
+ * @reg_domain: regulatory domain
+ * @channel: channel to validate
*
* Returns 0 if invalid in the specified regulatory domain, non-zero if valid.
*/
@@ -154,7 +154,7 @@ static int iw_valid_channel(int reg_domain, int channel)
/**
* iw_default_channel - get default channel for a regulatory domain
- * @reg_comain - regulatory domain
+ * @reg_domain: regulatory domain
*
* Returns the default channel for a regulatory domain
*/
@@ -237,6 +237,7 @@ static int wl3501_get_flash_mac_addr(struct wl3501_card *this)
/**
* wl3501_set_to_wla - Move 'size' bytes from PC to card
+ * @this: Card
* @dest: Card addressing space
* @src: PC addressing space
* @size: Bytes to move
@@ -259,6 +260,7 @@ static void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src,
/**
* wl3501_get_from_wla - Move 'size' bytes from card to PC
+ * @this: Card
* @src: Card addressing space
* @dest: PC addressing space
* @size: Bytes to move
@@ -455,12 +457,10 @@ out:
/**
* wl3501_send_pkt - Send a packet.
- * @this - card
- *
- * Send a packet.
- *
- * data = Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr,
+ * @this: Card
+ * @data: Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr,
* data[6] - data[11] is Src MAC Addr)
+ * @len: Packet length
* Ref: IEEE 802.11
*/
static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
@@ -723,7 +723,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
/**
* wl3501_block_interrupt - Mask interrupt from SUTRO
- * @this - card
+ * @this: Card
*
* Mask interrupt from SUTRO. (i.e. SUTRO cannot interrupt the HOST)
* Return: 1 if interrupt is originally enabled
@@ -740,7 +740,7 @@ static int wl3501_block_interrupt(struct wl3501_card *this)
/**
* wl3501_unblock_interrupt - Enable interrupt from SUTRO
- * @this - card
+ * @this: Card
*
* Enable interrupt from SUTRO. (i.e. SUTRO can interrupt the HOST)
* Return: 1 if interrupt is originally enabled
@@ -1114,8 +1114,8 @@ static inline void wl3501_ack_interrupt(struct wl3501_card *this)
/**
* wl3501_interrupt - Hardware interrupt from card.
- * @irq - Interrupt number
- * @dev_id - net_device
+ * @irq: Interrupt number
+ * @dev_id: net_device
*
* We must acknowledge the interrupt as soon as possible, and block the
* interrupt from the same card immediately to prevent re-entry.
@@ -1251,7 +1251,7 @@ static int wl3501_close(struct net_device *dev)
/**
* wl3501_reset - Reset the SUTRO.
- * @dev - network device
+ * @dev: network device
*
* It is almost the same as wl3501_open(). In fact, we may just wl3501_close()
* and wl3501_open() again, but I wouldn't like to free_irq() when the driver
@@ -1328,7 +1328,7 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
} else {
++dev->stats.tx_packets;
dev->stats.tx_bytes += skb->len;
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
if (this->tx_buffer_cnt < 2)
netif_stop_queue(dev);
@@ -1414,7 +1414,7 @@ static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
/**
* wl3501_detach - deletes a driver "instance"
- * @link - FILL_IN
+ * @link: FILL_IN
*
* This deletes a driver "instance". The device is de-registered with Card
* Services. If it has been released, all local data structures are freed.
@@ -1435,9 +1435,7 @@ static void wl3501_detach(struct pcmcia_device *link)
wl3501_release(link);
unregister_netdev(dev);
-
- if (link->priv)
- free_netdev(link->priv);
+ free_netdev(dev);
}
static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
@@ -1864,6 +1862,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
{
struct net_device *dev;
struct wl3501_card *this;
+ int ret;
/* The io structure describes IO port mapping */
p_dev->resource[0]->end = 16;
@@ -1875,8 +1874,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
dev = alloc_etherdev(sizeof(struct wl3501_card));
if (!dev)
- goto out_link;
-
+ return -ENOMEM;
dev->netdev_ops = &wl3501_netdev_ops;
dev->watchdog_timeo = 5 * HZ;
@@ -1889,9 +1887,15 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
netif_stop_queue(dev);
p_dev->priv = dev;
- return wl3501_config(p_dev);
-out_link:
- return -ENOMEM;
+ ret = wl3501_config(p_dev);
+ if (ret)
+ goto out_free_etherdev;
+
+ return 0;
+
+out_free_etherdev:
+ free_netdev(dev);
+ return ret;
}
static int wl3501_config(struct pcmcia_device *link)
@@ -1947,8 +1951,7 @@ static int wl3501_config(struct pcmcia_device *link)
goto failed;
}
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ((char *)&this->mac_addr)[i];
+ eth_hw_addr_set(dev, this->mac_addr);
/* print probe information */
printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, "
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index f7e746f1c9fb..56ab292cacc5 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -48,7 +48,6 @@
#include <linux/debugfs.h>
typedef unsigned int pending_ring_idx_t;
-#define INVALID_PENDING_RING_IDX (~0U)
struct pending_tx_info {
struct xen_netif_tx_request req; /* tx request */
@@ -82,8 +81,6 @@ struct xenvif_rx_meta {
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
-#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
-
#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
/* The maximum number of frags is derived from the size of a grant (same
@@ -169,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
- struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
+ struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
/* passed to gnttab_[un]map_refs with pages under (un)mapping */
@@ -364,11 +361,6 @@ void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
-int xenvif_schedulable(struct xenvif *vif);
-
-int xenvif_queue_stopped(struct xenvif_queue *queue);
-void xenvif_wake_queue(struct xenvif_queue *queue);
-
/* (Un)Map communication rings. */
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
@@ -391,17 +383,13 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
-void xenvif_rx_action(struct xenvif_queue *queue);
-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
-/* Unmap a pending page and release it back to the guest */
-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
-
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
return MAX_PENDING_REQS -
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 809089587301..91b35093f2aa 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -41,7 +41,6 @@
#include <asm/xen/hypercall.h>
#include <xen/balloon.h>
-#define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64
/* Number of bytes allowed on the internal guest Rx queue. */
@@ -70,7 +69,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
wake_up(&queue->dealloc_wq);
}
-int xenvif_schedulable(struct xenvif *vif)
+static int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) &&
test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
@@ -178,20 +177,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int xenvif_queue_stopped(struct xenvif_queue *queue)
-{
- struct net_device *dev = queue->vif->dev;
- unsigned int id = queue->id;
- return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
-}
-
-void xenvif_wake_queue(struct xenvif_queue *queue)
-{
- struct net_device *dev = queue->vif->dev;
- unsigned int id = queue->id;
- netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
-}
-
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -269,14 +254,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
skb_clear_hash(skb);
- xenvif_rx_queue_tail(queue, skb);
+ if (!xenvif_rx_queue_tail(queue, skb))
+ goto drop;
+
xenvif_kick_thread(queue);
return NETDEV_TX_OK;
drop:
vif->dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -538,8 +525,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->features = dev->hw_features | NETIF_F_RXCSUM;
dev->ethtool_ops = &xenvif_ethtool_ops;
- dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
-
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 995566a2785f..03ef772d02fd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -97,13 +97,14 @@ module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
- u8 status);
+ s8 status);
static void make_tx_response(struct xenvif_queue *queue,
- struct xen_netif_tx_request *txp,
+ const struct xen_netif_tx_request *txp,
unsigned int extra_count,
- s8 st);
-static void push_tx_responses(struct xenvif_queue *queue);
+ s8 status);
+
+static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
static inline int tx_work_todo(struct xenvif_queue *queue);
@@ -199,13 +200,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
unsigned int extra_count, RING_IDX end)
{
RING_IDX cons = queue->tx.req_cons;
- unsigned long flags;
do {
- spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
- push_tx_responses(queue);
- spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
RING_COPY_REQUEST(&queue->tx, cons++, txp);
@@ -323,10 +320,14 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb {
- u16 pending_idx;
+ u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
+ u8 copy_count;
+ u32 split_mask;
};
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
+#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
u16 pending_idx,
@@ -349,6 +350,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
struct sk_buff *skb =
alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
+
+ BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
if (unlikely(skb == NULL))
return NULL;
@@ -361,52 +364,152 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
return skb;
}
-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
- struct sk_buff *skb,
- struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *gop,
- unsigned int frag_overflow,
- struct sk_buff *nskb)
+static void xenvif_get_requests(struct xenvif_queue *queue,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txfrags,
+ unsigned *copy_ops,
+ unsigned *map_ops,
+ unsigned int frag_overflow,
+ struct sk_buff *nskb,
+ unsigned int extra_count,
+ unsigned int data_len)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
- u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
- int start;
+ u16 pending_idx;
pending_ring_idx_t index;
unsigned int nr_slots;
+ struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
+ struct xen_netif_tx_request *txp = first;
+
+ nr_slots = shinfo->nr_frags + frag_overflow + 1;
+
+ copy_count(skb) = 0;
+ XENVIF_TX_CB(skb)->split_mask = 0;
+
+ /* Create copy ops for exactly data_len bytes into the skb head. */
+ __skb_put(skb, data_len);
+ while (data_len > 0) {
+ int amount = data_len > txp->size ? txp->size : data_len;
+ bool split = false;
+
+ cop->source.u.ref = txp->gref;
+ cop->source.domid = queue->vif->domid;
+ cop->source.offset = txp->offset;
+
+ cop->dest.domid = DOMID_SELF;
+ cop->dest.offset = (offset_in_page(skb->data +
+ skb_headlen(skb) -
+ data_len)) & ~XEN_PAGE_MASK;
+ cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
+ - data_len);
+
+ /* Don't cross local page boundary! */
+ if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
+ amount = XEN_PAGE_SIZE - cop->dest.offset;
+ XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
+ split = true;
+ }
+
+ cop->len = amount;
+ cop->flags = GNTCOPY_source_gref;
+
+ index = pending_index(queue->pending_cons);
+ pending_idx = queue->pending_ring[index];
+ callback_param(queue, pending_idx).ctx = NULL;
+ copy_pending_idx(skb, copy_count(skb)) = pending_idx;
+ if (!split)
+ copy_count(skb)++;
- nr_slots = shinfo->nr_frags;
+ cop++;
+ data_len -= amount;
- /* Skip first skb fragment if it is on same page as header fragment. */
- start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+ if (amount == txp->size) {
+ /* The copy op covered the full tx_request */
+
+ memcpy(&queue->pending_tx_info[pending_idx].req,
+ txp, sizeof(*txp));
+ queue->pending_tx_info[pending_idx].extra_count =
+ (txp == first) ? extra_count : 0;
+
+ if (txp == first)
+ txp = txfrags;
+ else
+ txp++;
+ queue->pending_cons++;
+ nr_slots--;
+ } else {
+ /* The copy op partially covered the tx_request.
+ * The remainder will be mapped or copied in the next
+ * iteration.
+ */
+ txp->offset += amount;
+ txp->size -= amount;
+ }
+ }
+
+ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
+ nr_slots--) {
+ if (unlikely(!txp->size)) {
+ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+ ++txp;
+ continue;
+ }
- for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
- shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
- xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, txp,
+ txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
+
+ if (txp == first)
+ txp = txfrags;
+ else
+ txp++;
}
- if (frag_overflow) {
+ if (nr_slots > 0) {
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
- for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
- shinfo->nr_frags++, txp++, gop++) {
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+ if (unlikely(!txp->size)) {
+ make_tx_response(queue, txp, 0,
+ XEN_NETIF_RSP_OKAY);
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
}
- skb_shinfo(skb)->frag_list = nskb;
+ if (shinfo->nr_frags) {
+ skb_shinfo(skb)->frag_list = nskb;
+ nskb = NULL;
+ }
}
- return gop;
+ if (nskb) {
+ /* A frag_list skb was allocated but it is no longer needed
+ * because enough slots were converted to copy ops above or some
+ * were empty.
+ */
+ kfree_skb(nskb);
+ }
+
+ (*copy_ops) = cop - queue->tx_copy_ops;
+ (*map_ops) = gop - queue->tx_map_ops;
}
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
@@ -442,7 +545,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct gnttab_copy **gopp_copy)
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
- u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ u16 pending_idx;
/* This always points to the shinfo of the skb being checked, which
* could be either the first or the one on the frag_list
*/
@@ -453,24 +556,44 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
const bool sharedslot = nr_frags &&
- frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
- int i, err;
+ frag_get_pending_idx(&shinfo->frags[0]) ==
+ copy_pending_idx(skb, copy_count(skb) - 1);
+ int i, err = 0;
- /* Check status of header. */
- err = (*gopp_copy)->status;
- if (unlikely(err)) {
- if (net_ratelimit())
- netdev_dbg(queue->vif->dev,
- "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
- (*gopp_copy)->status,
- pending_idx,
- (*gopp_copy)->source.u.ref);
- /* The first frag might still have this slot mapped */
- if (!sharedslot)
- xenvif_idx_release(queue, pending_idx,
- XEN_NETIF_RSP_ERROR);
+ for (i = 0; i < copy_count(skb); i++) {
+ int newerr;
+
+ /* Check status of header. */
+ pending_idx = copy_pending_idx(skb, i);
+
+ newerr = (*gopp_copy)->status;
+
+ /* Split copies need to be handled together. */
+ if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
+ (*gopp_copy)++;
+ if (!newerr)
+ newerr = (*gopp_copy)->status;
+ }
+ if (likely(!newerr)) {
+ /* The first frag might still have this slot mapped */
+ if (i < copy_count(skb) - 1 || !sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ } else {
+ err = newerr;
+ if (net_ratelimit())
+ netdev_dbg(queue->vif->dev,
+ "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
+ (*gopp_copy)->status,
+ pending_idx,
+ (*gopp_copy)->source.u.ref);
+ /* The first frag might still have this slot mapped */
+ if (i < copy_count(skb) - 1 || !sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
+ }
+ (*gopp_copy)++;
}
- (*gopp_copy)++;
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -517,14 +640,6 @@ check_frags:
if (err)
continue;
- /* First error: if the header haven't shared a slot with the
- * first frag, release it as well.
- */
- if (!sharedslot)
- xenvif_idx_release(queue,
- XENVIF_TX_CB(skb)->pending_idx,
- XEN_NETIF_RSP_OKAY);
-
/* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
@@ -794,7 +909,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
struct sk_buff *skb, *nskb;
int ret;
unsigned int frag_overflow;
@@ -860,7 +974,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(ret == 0) ?
XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR);
- push_tx_responses(queue);
continue;
}
@@ -872,12 +985,15 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
make_tx_response(queue, &txreq, extra_count,
XEN_NETIF_RSP_OKAY);
- push_tx_responses(queue);
continue;
}
+ data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
+ XEN_NETBACK_TX_COPY_LEN : txreq.size;
+
ret = xenvif_count_requests(queue, &txreq, extra_count,
txfrags, work_to_do);
+
if (unlikely(ret < 0))
break;
@@ -892,10 +1008,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
- netdev_err(queue->vif->dev,
- "txreq.offset: %u, size: %u, end: %lu\n",
- txreq.offset, txreq.size,
- (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
+ netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
+ txreq.offset, txreq.size);
xenvif_fatal_tx_err(queue->vif);
break;
}
@@ -903,9 +1017,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
- data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
- ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
- XEN_NETBACK_TX_COPY_LEN : txreq.size;
+ if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
+ data_len = txreq.size;
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
@@ -916,8 +1029,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
}
skb_shinfo(skb)->nr_frags = ret;
- if (data_len < txreq.size)
- skb_shinfo(skb)->nr_frags++;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
@@ -979,54 +1090,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
type);
}
- XENVIF_TX_CB(skb)->pending_idx = pending_idx;
-
- __skb_put(skb, data_len);
- queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
- queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
- queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
-
- queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
- virt_to_gfn(skb->data);
- queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
- queue->tx_copy_ops[*copy_ops].dest.offset =
- offset_in_page(skb->data) & ~XEN_PAGE_MASK;
-
- queue->tx_copy_ops[*copy_ops].len = data_len;
- queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
-
- (*copy_ops)++;
-
- if (data_len < txreq.size) {
- frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
- pending_idx);
- xenvif_tx_create_map_op(queue, pending_idx, &txreq,
- extra_count, gop);
- gop++;
- } else {
- frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
- INVALID_PENDING_IDX);
- memcpy(&queue->pending_tx_info[pending_idx].req,
- &txreq, sizeof(txreq));
- queue->pending_tx_info[pending_idx].extra_count =
- extra_count;
- }
-
- queue->pending_cons++;
-
- gop = xenvif_get_requests(queue, skb, txfrags, gop,
- frag_overflow, nskb);
+ xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
+ map_ops, frag_overflow, nskb, extra_count,
+ data_len);
__skb_queue_tail(&queue->tx_queue, skb);
queue->tx.req_cons = idx;
- if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
+ if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
(*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
break;
}
- (*map_ops) = gop - queue->tx_map_ops;
return;
}
@@ -1105,9 +1181,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
- unsigned data_len;
- pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ pending_idx = copy_pending_idx(skb, 0);
txp = &queue->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
@@ -1126,18 +1201,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
continue;
}
- data_len = skb->len;
- callback_param(queue, pending_idx).ctx = NULL;
- if (data_len < txp->size) {
- /* Append the packet payload as a fragment. */
- txp->offset += data_len;
- txp->size -= data_len;
- } else {
- /* Schedule a response immediately. */
- xenvif_idx_release(queue, pending_idx,
- XEN_NETIF_RSP_OKAY);
- }
-
if (txp->flags & XEN_NETTXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (txp->flags & XEN_NETTXF_data_validated)
@@ -1323,7 +1386,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
{
- unsigned nr_mops, nr_cops = 0;
+ unsigned nr_mops = 0, nr_cops = 0;
int work_done, ret;
if (unlikely(!tx_work_todo(queue)))
@@ -1356,8 +1419,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
return work_done;
}
+static void _make_tx_response(struct xenvif_queue *queue,
+ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+ s8 status)
+{
+ RING_IDX i = queue->tx.rsp_prod_pvt;
+ struct xen_netif_tx_response *resp;
+
+ resp = RING_GET_RESPONSE(&queue->tx, i);
+ resp->id = txp->id;
+ resp->status = status;
+
+ while (extra_count-- != 0)
+ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+
+ queue->tx.rsp_prod_pvt = ++i;
+}
+
+static void push_tx_responses(struct xenvif_queue *queue)
+{
+ int notify;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
+}
+
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
- u8 status)
+ s8 status)
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
@@ -1367,8 +1457,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_lock_irqsave(&queue->response_lock, flags);
- make_tx_response(queue, &pending_tx_info->req,
- pending_tx_info->extra_count, status);
+ _make_tx_response(queue, &pending_tx_info->req,
+ pending_tx_info->extra_count, status);
/* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the
@@ -1382,35 +1472,22 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_unlock_irqrestore(&queue->response_lock, flags);
}
-
static void make_tx_response(struct xenvif_queue *queue,
- struct xen_netif_tx_request *txp,
+ const struct xen_netif_tx_request *txp,
unsigned int extra_count,
- s8 st)
+ s8 status)
{
- RING_IDX i = queue->tx.rsp_prod_pvt;
- struct xen_netif_tx_response *resp;
-
- resp = RING_GET_RESPONSE(&queue->tx, i);
- resp->id = txp->id;
- resp->status = st;
-
- while (extra_count-- != 0)
- RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+ unsigned long flags;
- queue->tx.rsp_prod_pvt = ++i;
-}
+ spin_lock_irqsave(&queue->response_lock, flags);
-static void push_tx_responses(struct xenvif_queue *queue)
-{
- int notify;
+ _make_tx_response(queue, txp, extra_count, status);
+ push_tx_responses(queue);
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
- if (notify)
- notify_remote_via_irq(queue->tx_irq);
+ spin_unlock_irqrestore(&queue->response_lock, flags);
}
-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
+static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
struct gnttab_unmap_grant_ref tx_unmap_op;
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index 85a5a622ec18..ab216970137c 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
return false;
}
-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
{
unsigned long flags;
+ bool ret = true;
spin_lock_irqsave(&queue->rx_queue.lock, flags);
@@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
- kfree_skb(skb);
- queue->vif->dev->stats.rx_dropped++;
+ ret = false;
} else {
if (skb_queue_empty(&queue->rx_queue))
xenvif_update_needed_slots(queue, skb);
@@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
}
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+
+ return ret;
}
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
@@ -473,7 +475,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue)
#define RX_BATCH_SIZE 64
-void xenvif_rx_action(struct xenvif_queue *queue)
+static void xenvif_rx_action(struct xenvif_queue *queue)
{
struct sk_buff_head completed_skbs;
unsigned int work_done = 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 44e353dd2ba1..43bd881ab3dd 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -202,10 +202,10 @@ static int netback_remove(struct xenbus_device *dev)
set_backend_state(be, XenbusStateClosed);
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
xen_unregister_watchers(be->vif);
- xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
xenvif_free(be->vif);
be->vif = NULL;
}
@@ -435,7 +435,6 @@ static void backend_disconnect(struct backend_info *be)
unsigned int queue_index;
xen_unregister_watchers(vif);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 810fa9968be7..9ae0903bc225 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1621,6 +1621,12 @@ static int netfront_resume(struct xenbus_device *dev)
netif_tx_unlock_bh(info->netdev);
xennet_disconnect_backend(info);
+
+ rtnl_lock();
+ if (info->queues)
+ xennet_destroy_queues(info);
+ rtnl_unlock();
+
return 0;
}
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index ad0abb1f0bae..7305f1a06e97 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -255,6 +255,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
len, sizeof(**fw_vsc_cfg),
GFP_KERNEL);
+ if (!*fw_vsc_cfg)
+ goto alloc_err;
+
r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
*fw_vsc_cfg, len);
@@ -268,6 +271,7 @@ vsc_read_err:
*fw_vsc_cfg = NULL;
}
+alloc_err:
dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
*clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
}
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 919b4d2f5d8b..fa6db971bee9 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -151,10 +151,15 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv,
ret = -EREMOTEIO;
} else
ret = 0;
+ }
+
+ if (ret) {
kfree_skb(skb);
+ return ret;
}
- return ret;
+ consume_skb(skb);
+ return 0;
}
static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index dd27c85190d3..b42d386350b7 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -336,10 +336,6 @@ static struct dentry *nfcsim_debugfs_root;
static void nfcsim_debugfs_init(void)
{
nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
-
- if (!nfcsim_debugfs_root)
- pr_err("Could not create debugfs entry\n");
-
}
static void nfcsim_debugfs_remove(void)
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 807b7b37d9dc..1e90ff17f87d 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1293,6 +1293,8 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
if (IS_ERR(resp))
return PTR_ERR(resp);
+ memset(&nfc_target, 0, sizeof(struct nfc_target));
+
rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
rc = rsp->status & PN533_CMD_RET_MASK;
@@ -1774,6 +1776,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
dev_dbg(dev->dev, "Creating new target\n");
+ memset(&nfc_target, 0, sizeof(struct nfc_target));
+
nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
nfc_target.nfcid1_len = 10;
memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index d7a355d05368..2021a9d31f4a 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
return usb_submit_urb(phy->ack_urb, flags);
}
+struct pn533_out_arg {
+ struct pn533_usb_phy *phy;
+ struct completion done;
+};
+
static int pn533_usb_send_frame(struct pn533 *dev,
struct sk_buff *out)
{
struct pn533_usb_phy *phy = dev->phy;
+ struct pn533_out_arg arg;
+ void *cntx;
int rc;
if (phy->priv == NULL)
@@ -168,10 +175,18 @@ static int pn533_usb_send_frame(struct pn533 *dev,
print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
out->data, out->len, false);
+ arg.phy = phy;
+ init_completion(&arg.done);
+ cntx = phy->out_urb->context;
+ phy->out_urb->context = &arg;
+
rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
if (rc)
return rc;
+ wait_for_completion(&arg.done);
+ phy->out_urb->context = cntx;
+
if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
/* request for response for sent packet directly */
rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
@@ -412,7 +427,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
return arg.rc;
}
-static void pn533_send_complete(struct urb *urb)
+static void pn533_out_complete(struct urb *urb)
+{
+ struct pn533_out_arg *arg = urb->context;
+ struct pn533_usb_phy *phy = arg->phy;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ dev_dbg(&phy->udev->dev,
+ "The urb has been stopped (status %d)\n",
+ urb->status);
+ break;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&phy->udev->dev,
+ "Urb failure (status %d)\n",
+ urb->status);
+ }
+
+ complete(&arg->done);
+}
+
+static void pn533_ack_complete(struct urb *urb)
{
struct pn533_usb_phy *phy = urb->context;
@@ -500,10 +539,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
usb_fill_bulk_urb(phy->out_urb, phy->udev,
usb_sndbulkpipe(phy->udev, out_endpoint),
- NULL, 0, pn533_send_complete, phy);
+ NULL, 0, pn533_out_complete, phy);
usb_fill_bulk_urb(phy->ack_urb, phy->udev,
usb_sndbulkpipe(phy->udev, out_endpoint),
- NULL, 0, pn533_send_complete, phy);
+ NULL, 0, pn533_ack_complete, phy);
switch (id->driver_info) {
case PN533_DEVICE_STD:
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index ba6c486d6465..9b43cd3a45af 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -97,11 +97,15 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
}
ret = s3fwrn5_write(info, skb);
- if (ret < 0)
+ if (ret < 0) {
kfree_skb(skb);
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ consume_skb(skb);
mutex_unlock(&info->mutex);
- return ret;
+ return 0;
}
static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 5d74c674368a..8ccf5a86ad1b 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -286,13 +286,15 @@ EXPORT_SYMBOL(ndlc_probe);
void ndlc_remove(struct llt_ndlc *ndlc)
{
- st_nci_remove(ndlc->ndev);
-
/* cancel timers */
del_timer_sync(&ndlc->t1_timer);
del_timer_sync(&ndlc->t2_timer);
ndlc->t2_active = false;
ndlc->t1_active = false;
+ /* cancel work */
+ cancel_work_sync(&ndlc->sm_work);
+
+ st_nci_remove(ndlc->ndev);
skb_queue_purge(&ndlc->rcv_q);
skb_queue_purge(&ndlc->send_q);
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index f25f1ec5f9e9..17793f6888e1 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -327,7 +327,7 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
*/
- if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
+ if (skb->len < NFC_MIN_AID_LENGTH + 2 ||
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
@@ -341,8 +341,10 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
/* Check next byte is PARAMETERS tag (82) */
if (skb->data[transaction->aid_len + 2] !=
- NFC_EVT_TRANSACTION_PARAMS_TAG)
+ NFC_EVT_TRANSACTION_PARAMS_TAG) {
+ devm_kfree(dev, transaction);
return -EPROTO;
+ }
transaction->params_len = skb->data[transaction->aid_len + 3];
memcpy(transaction->params, skb->data +
@@ -663,6 +665,12 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
ST_NCI_EVT_TRANSMIT_DATA, apdu,
apdu_length);
default:
+ /* Need to free cb_context here as at the moment we can't
+ * clearly indicate to the caller if the callback function
+ * would be called (and free it) or not. In both cases a
+ * negative value may be returned to the caller.
+ */
+ kfree(cb_context);
return -ENODEV;
}
}
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index d41636504246..6a1d3b2752fb 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -236,6 +236,12 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
ST21NFCA_EVT_TRANSMIT_DATA,
apdu, apdu_length);
default:
+ /* Need to free cb_context here as at the moment we can't
+ * clearly indicate to the caller if the callback function
+ * would be called (and free it) or not. In both cases a
+ * negative value may be returned to the caller.
+ */
+ kfree(cb_context);
return -ENODEV;
}
}
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index abb37659de34..50983d77329e 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1153,12 +1153,17 @@ static struct pci_driver amd_ntb_pci_driver = {
static int __init amd_ntb_pci_driver_init(void)
{
+ int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
if (debugfs_initialized())
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- return pci_register_driver(&amd_ntb_pci_driver);
+ ret = pci_register_driver(&amd_ntb_pci_driver);
+ if (ret)
+ debugfs_remove_recursive(debugfs_dir);
+
+ return ret;
}
module_init(amd_ntb_pci_driver_init);
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index dcf234680535..a0091900b0cf 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2908,6 +2908,7 @@ static struct pci_driver idt_pci_driver = {
static int __init idt_pci_driver_init(void)
{
+ int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
/* Create the top DebugFS directory if the FS is initialized */
@@ -2915,7 +2916,11 @@ static int __init idt_pci_driver_init(void)
dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
/* Register the NTB hardware driver to handle the PCI device */
- return pci_register_driver(&idt_pci_driver);
+ ret = pci_register_driver(&idt_pci_driver);
+ if (ret)
+ debugfs_remove_recursive(dbgfs_topdir);
+
+ return ret;
}
module_init(idt_pci_driver_init);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index bb57ec239029..8d8739bff9f3 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -2065,12 +2065,17 @@ static struct pci_driver intel_ntb_pci_driver = {
static int __init intel_ntb_pci_driver_init(void)
{
+ int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
if (debugfs_initialized())
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- return pci_register_driver(&intel_ntb_pci_driver);
+ ret = pci_register_driver(&intel_ntb_pci_driver);
+ if (ret)
+ debugfs_remove_recursive(debugfs_dir);
+
+ return ret;
}
module_init(intel_ntb_pci_driver_init);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 00a5d5764993..d1eb76386860 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -412,7 +412,7 @@ int ntb_transport_register_client_dev(char *device_name)
rc = device_register(dev);
if (rc) {
- kfree(client_dev);
+ put_device(dev);
goto err;
}
@@ -911,7 +911,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
return 0;
}
-static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp)
{
qp->link_is_up = false;
qp->active = false;
@@ -934,6 +934,13 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
qp->tx_async = 0;
}
+static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+{
+ ntb_qp_link_context_reset(qp);
+ if (qp->remote_rx_info)
+ qp->remote_rx_info->entry = qp->rx_max_entry - 1;
+}
+
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
{
struct ntb_transport_ctx *nt = qp->transport;
@@ -1176,7 +1183,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->ndev = nt->ndev;
qp->client_ready = false;
qp->event_handler = NULL;
- ntb_qp_link_down_reset(qp);
+ ntb_qp_link_context_reset(qp);
if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
@@ -2278,9 +2285,13 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
struct ntb_queue_entry *entry;
int rc;
- if (!qp || !qp->link_is_up || !len)
+ if (!qp || !len)
return -EINVAL;
+ /* If the qp link is down already, just ignore. */
+ if (!qp->link_is_up)
+ return 0;
+
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
if (!entry) {
qp->tx_err_no_buf++;
@@ -2420,7 +2431,7 @@ unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
unsigned int head = qp->tx_index;
unsigned int tail = qp->remote_rx_info->entry;
- return tail > head ? tail - head : qp->tx_max_entry + tail - head;
+ return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
}
EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index 311d6ab8d016..1f6414654622 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -367,14 +367,16 @@ static ssize_t tool_fn_write(struct tool_ctx *tc,
u64 bits;
int n;
+ if (*offp)
+ return 0;
+
buf = kmalloc(size + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = simple_write_to_buffer(buf, size, offp, ubuf, size);
- if (ret < 0) {
+ if (copy_from_user(buf, ubuf, size)) {
kfree(buf);
- return ret;
+ return -EFAULT;
}
buf[size] = 0;
@@ -996,6 +998,8 @@ static int tool_init_mws(struct tool_ctx *tc)
tc->peers[pidx].outmws =
devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmw_cnt,
sizeof(*tc->peers[pidx].outmws), GFP_KERNEL);
+ if (tc->peers[pidx].outmws == NULL)
+ return -ENOMEM;
for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) {
tc->peers[pidx].outmws[widx].pidx = pidx;
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 97187d6c0bdb..d84776b1497f 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -42,7 +42,13 @@ static int of_pmem_region_probe(struct platform_device *pdev)
return -ENOMEM;
priv->bus_desc.attr_groups = bus_attr_groups;
- priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
+ priv->bus_desc.provider_name = devm_kstrdup(&pdev->dev, pdev->name,
+ GFP_KERNEL);
+ if (!priv->bus_desc.provider_name) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+
priv->bus_desc.module = THIS_MODULE;
priv->bus_desc.of_node = np;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index b8236a9e8750..bfa310ab52e7 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -898,7 +898,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
{
unsigned int cpu, lane;
- cpu = get_cpu();
+ migrate_disable();
+ cpu = smp_processor_id();
if (nd_region->num_lanes < nr_cpu_ids) {
struct nd_percpu_lane *ndl_lock, *ndl_count;
@@ -917,16 +918,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
{
if (nd_region->num_lanes < nr_cpu_ids) {
- unsigned int cpu = get_cpu();
+ unsigned int cpu = smp_processor_id();
struct nd_percpu_lane *ndl_lock, *ndl_count;
ndl_count = per_cpu_ptr(nd_region->lane, cpu);
ndl_lock = per_cpu_ptr(nd_region->lane, lane);
if (--ndl_count->count == 0)
spin_unlock(&ndl_lock->lock);
- put_cpu();
}
- put_cpu();
+ migrate_enable();
}
EXPORT_SYMBOL(nd_region_release_lane);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d2ea6ca37c41..9144ed14b074 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2018,18 +2018,21 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
enum pr_type type, bool abort)
{
u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
+
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}
static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
- u32 cdw10 = 1 | (key ? 1 << 3 : 0);
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
+
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
+
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
@@ -2668,7 +2671,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
nvme_init_subnqn(subsys, ctrl, id);
memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
memcpy(subsys->model, id->mn, sizeof(subsys->model));
- memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
subsys->awupf = le16_to_cpu(id->awupf);
@@ -2804,10 +2806,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (!ctrl->identified) {
int i;
- ret = nvme_init_subsystem(ctrl, id);
- if (ret)
- goto out_free;
-
/*
* Check for quirks. Quirk can depend on firmware version,
* so, in principle, the set of quirks present can change
@@ -2820,7 +2818,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (quirk_matches(id, &core_quirks[i]))
ctrl->quirks |= core_quirks[i].quirks;
}
+
+ ret = nvme_init_subsystem(ctrl, id);
+ if (ret)
+ goto out_free;
}
+ memcpy(ctrl->subsys->firmware_rev, id->fr,
+ sizeof(ctrl->subsys->firmware_rev));
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
@@ -3023,11 +3027,17 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
dev_warn(ctrl->device, "resetting controller\n");
return nvme_reset_ctrl_sync(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
nvme_queue_scan(ctrl);
return 0;
default:
@@ -3966,11 +3976,19 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_get_fw_slot_info(ctrl);
}
-static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+static u32 nvme_aer_type(u32 result)
{
- u32 aer_notice_type = (result & 0xff00) >> 8;
+ return result & 0x7;
+}
- trace_nvme_async_event(ctrl, aer_notice_type);
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
+}
+
+static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+{
+ u32 aer_notice_type = nvme_aer_subtype(result);
switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
@@ -4001,24 +4019,40 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
}
}
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
+}
+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- u32 aer_type = result & 0x07;
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
+ trace_nvme_async_event(ctrl, result);
switch (aer_type) {
case NVME_AER_NOTICE:
nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
- trace_nvme_async_event(ctrl, aer_type);
ctrl->aen_result = result;
break;
default:
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 81a5b968253f..c492d7d32398 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -292,6 +292,11 @@ struct nvme_ctrl {
struct nvme_fault_inject fault_inject;
};
+static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
+{
+ return READ_ONCE(ctrl->state);
+}
+
enum nvme_iopolicy {
NVME_IOPOLICY_NUMA,
NVME_IOPOLICY_RR,
@@ -422,11 +427,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
static inline void nvme_should_fail(struct request *req) {}
#endif
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
+
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
+ int ret;
+
if (!ctrl->subsystem)
return -ENOTTY;
- return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (!nvme_wait_reset(ctrl))
+ return -EBUSY;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (ret)
+ return ret;
+
+ return nvme_try_sched_reset(ctrl);
}
/*
@@ -473,7 +490,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -525,7 +541,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
-int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 10fe7a7a2163..486e44d20b43 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -117,9 +117,9 @@ struct nvme_dev {
mempool_t *iod_mempool;
/* shadow doorbell buffer support: */
- u32 *dbbuf_dbs;
+ __le32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr;
- u32 *dbbuf_eis;
+ __le32 *dbbuf_eis;
dma_addr_t dbbuf_eis_dma_addr;
/* host memory buffer support: */
@@ -187,10 +187,10 @@ struct nvme_queue {
#define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2
#define NVMEQ_POLLED 3
- u32 *dbbuf_sq_db;
- u32 *dbbuf_cq_db;
- u32 *dbbuf_sq_ei;
- u32 *dbbuf_cq_ei;
+ __le32 *dbbuf_sq_db;
+ __le32 *dbbuf_cq_db;
+ __le32 *dbbuf_sq_ei;
+ __le32 *dbbuf_cq_ei;
struct completion delete_done;
};
@@ -311,11 +311,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
}
/* Update dbbuf and return true if an MMIO is required */
-static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
- volatile u32 *dbbuf_ei)
+static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
+ volatile __le32 *dbbuf_ei)
{
if (dbbuf_db) {
- u16 old_value;
+ u16 old_value, event_idx;
/*
* Ensure that the queue is written before updating
@@ -323,8 +323,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
wmb();
- old_value = *dbbuf_db;
- *dbbuf_db = value;
+ old_value = le32_to_cpu(*dbbuf_db);
+ *dbbuf_db = cpu_to_le32(value);
/*
* Ensure that the doorbell is updated before reading the event
@@ -334,7 +334,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
mb();
- if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
+ event_idx = le32_to_cpu(*dbbuf_ei);
+ if (!nvme_dbbuf_need_event(event_idx, value, old_value))
return false;
}
@@ -2840,8 +2841,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
size_t alloc_size;
node = dev_to_node(&pdev->dev);
- if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, first_memory_node);
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
@@ -3198,7 +3197,6 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
- { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
@@ -3208,6 +3206,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS },
+
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d5d7b2f98edc..6de507d88f5f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -905,6 +905,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_cleanup_connect_q;
if (!new) {
+ nvme_start_freeze(&ctrl->ctrl);
nvme_start_queues(&ctrl->ctrl);
if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
/*
@@ -913,6 +914,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
* to be safe.
*/
ret = -ENODEV;
+ nvme_unfreeze(&ctrl->ctrl);
goto out_wait_freeze_timed_out;
}
blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
@@ -958,7 +960,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (ctrl->ctrl.queue_count > 1) {
- nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 2a27ac9aedba..61296032ce6d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1074,7 +1074,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
if (result > 0)
pending = true;
- if (!pending)
+ if (!pending || !queue->rd_enabled)
return;
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
@@ -1387,22 +1387,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (ret)
goto err_init_connect;
- queue->rd_enabled = true;
set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
- nvme_tcp_init_recv_ctx(queue);
-
- write_lock_bh(&queue->sock->sk->sk_callback_lock);
- queue->sock->sk->sk_user_data = queue;
- queue->state_change = queue->sock->sk->sk_state_change;
- queue->data_ready = queue->sock->sk->sk_data_ready;
- queue->write_space = queue->sock->sk->sk_write_space;
- queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
- queue->sock->sk->sk_state_change = nvme_tcp_state_change;
- queue->sock->sk->sk_write_space = nvme_tcp_write_space;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- queue->sock->sk->sk_ll_usec = 1;
-#endif
- write_unlock_bh(&queue->sock->sk->sk_callback_lock);
return 0;
@@ -1419,7 +1404,7 @@ err_sock:
return ret;
}
-static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
+static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
{
struct socket *sock = queue->sock;
@@ -1434,7 +1419,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
{
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- nvme_tcp_restore_sock_calls(queue);
+ nvme_tcp_restore_sock_ops(queue);
cancel_work_sync(&queue->io_work);
}
@@ -1448,21 +1433,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
__nvme_tcp_stop_queue(queue);
}
+static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
+{
+ write_lock_bh(&queue->sock->sk->sk_callback_lock);
+ queue->sock->sk->sk_user_data = queue;
+ queue->state_change = queue->sock->sk->sk_state_change;
+ queue->data_ready = queue->sock->sk->sk_data_ready;
+ queue->write_space = queue->sock->sk->sk_write_space;
+ queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
+ queue->sock->sk->sk_state_change = nvme_tcp_state_change;
+ queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ queue->sock->sk->sk_ll_usec = 1;
+#endif
+ write_unlock_bh(&queue->sock->sk->sk_callback_lock);
+}
+
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[idx];
int ret;
+ queue->rd_enabled = true;
+ nvme_tcp_init_recv_ctx(queue);
+ nvme_tcp_setup_sock_ops(queue);
+
if (idx)
ret = nvmf_connect_io_queue(nctrl, idx, false);
else
ret = nvmf_connect_admin_queue(nctrl);
if (!ret) {
- set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
+ set_bit(NVME_TCP_Q_LIVE, &queue->flags);
} else {
- if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
- __nvme_tcp_stop_queue(&ctrl->queues[idx]);
+ if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
dev_err(nctrl->device,
"failed to connect queue: %d ret=%d\n", idx, ret);
}
@@ -1701,6 +1707,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_cleanup_connect_q;
if (!new) {
+ nvme_start_freeze(ctrl);
nvme_start_queues(ctrl);
if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
/*
@@ -1709,6 +1716,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
* to be safe.
*/
ret = -ENODEV;
+ nvme_unfreeze(ctrl);
goto out_wait_freeze_timed_out;
}
blk_mq_update_nr_hw_queues(ctrl->tagset,
@@ -1831,7 +1839,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
if (ctrl->queue_count <= 1)
return;
blk_mq_quiesce_queue(ctrl->admin_q);
- nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 35bac7a25422..700fdce2ecf1 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event,
),
TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
__entry->ctrl_id, __entry->result,
- __print_symbolic(__entry->result,
- aer_name(NVME_AER_NOTICE_NS_CHANGED),
- aer_name(NVME_AER_NOTICE_ANA),
- aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
- aer_name(NVME_AER_NOTICE_DISC_CHANGED),
- aer_name(NVME_AER_ERROR),
- aer_name(NVME_AER_SMART),
- aer_name(NVME_AER_CSS),
- aer_name(NVME_AER_VS))
+ __print_symbolic(__entry->result & 0x7,
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_NOTICE),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
)
);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ee81d94fe810..60941a08e589 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -709,6 +709,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_ns *ns = req->ns;
+
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
@@ -719,15 +721,17 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
trace_nvmet_req_complete(req);
- if (req->ns)
- nvmet_put_namespace(req->ns);
req->ops->queue_response(req);
+ if (ns)
+ nvmet_put_namespace(ns);
}
void nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_sq *sq = req->sq;
+
__nvmet_req_complete(req, status);
- percpu_ref_put(&req->sq->ref);
+ percpu_ref_put(&sq->ref);
}
EXPORT_SYMBOL_GPL(nvmet_req_complete);
@@ -1103,19 +1107,19 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
}
-u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
- struct nvmet_req *req, struct nvmet_ctrl **ret)
+struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
+ const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req)
{
+ struct nvmet_ctrl *ctrl = NULL;
struct nvmet_subsys *subsys;
- struct nvmet_ctrl *ctrl;
- u16 status = 0;
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
- return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ goto out;
}
mutex_lock(&subsys->lock);
@@ -1128,20 +1132,21 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!kref_get_unless_zero(&ctrl->ref))
continue;
- *ret = ctrl;
- goto out;
+ /* ctrl found */
+ goto found;
}
}
+ ctrl = NULL; /* ctrl not found */
pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
-out:
+found:
mutex_unlock(&subsys->lock);
nvmet_subsys_put(subsys);
- return status;
+out:
+ return ctrl;
}
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 5e47395afc1d..d4036508f32a 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -182,6 +182,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl);
if (status) {
@@ -213,7 +215,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
{
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
- struct nvmet_ctrl *ctrl = NULL;
+ struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid);
u16 status = 0;
@@ -237,11 +239,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out;
}
- status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
- le16_to_cpu(d->cntlid),
- req, &ctrl);
- if (status)
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ le16_to_cpu(d->cntlid), req);
+ if (!ctrl) {
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
goto out;
+ }
if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 9b07e8c7689a..2dd39299fba0 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -796,6 +796,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
int idx;
bool needrandom = true;
+ if (!tgtport->pe)
+ return NULL;
+
assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
if (!assoc)
return NULL;
@@ -1362,8 +1365,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else {
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
be16_to_cpu(rqst->assoc_cmd.sqsize));
- if (!queue)
+ if (!queue) {
ret = VERR_QUEUE_ALLOC_FAIL;
+ nvmet_fc_tgt_a_put(iod->assoc);
+ }
}
}
@@ -2178,8 +2183,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
- if (tgtport->pe)
- fod->req.port = tgtport->pe->port;
+ if (!tgtport->pe)
+ goto transport_error;
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index b50b53db3746..34345c45a3e5 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -431,10 +431,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ unsigned long flags;
int ret = 0;
bool aborted = false;
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
switch (tfcp_req->inistate) {
case INI_IO_START:
tfcp_req->inistate = INI_IO_ACTIVE;
@@ -443,11 +444,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
aborted = true;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(aborted))
ret = -ECANCELED;
@@ -468,8 +469,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
struct nvmefc_fcp_req *fcpreq;
bool completed = false;
+ unsigned long flags;
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
switch (tfcp_req->inistate) {
case INI_IO_ABORTED:
@@ -478,11 +480,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
completed = true;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(completed)) {
/* remove reference taken in original abort downcall */
@@ -494,9 +496,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req);
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->fcpreq = NULL;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
/* call_host_done releases reference for abort downcall */
@@ -512,11 +514,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
+ unsigned long flags;
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
tfcp_req->inistate = INI_IO_COMPLETED;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -620,13 +623,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
u32 rsplen = 0, xfrlen = 0;
int fcp_err = 0, active, aborted;
u8 op = tgt_fcpreq->op;
+ unsigned long flags;
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
fcpreq = tfcp_req->fcpreq;
active = tfcp_req->active;
aborted = tfcp_req->aborted;
tfcp_req->active = true;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (unlikely(active))
/* illegal - call while i/o active */
@@ -634,9 +638,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
if (unlikely(aborted)) {
/* target transport has aborted i/o prior */
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->active = false;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tgt_fcpreq->transferred_length = 0;
tgt_fcpreq->fcp_error = -ECANCELED;
tgt_fcpreq->done(tgt_fcpreq);
@@ -693,9 +697,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
}
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->active = false;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tgt_fcpreq->transferred_length = xfrlen;
tgt_fcpreq->fcp_error = fcp_err;
@@ -709,15 +713,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+ unsigned long flags;
/*
* mark aborted only in case there were 2 threads in transport
* (one doing io, other doing abort) and only kills ops posted
* after the abort request
*/
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
tfcp_req->aborted = true;
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
tfcp_req->status = NVME_SC_INTERNAL;
@@ -753,6 +758,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req;
bool abortio = true;
+ unsigned long flags;
spin_lock(&inireq->inilock);
tfcp_req = inireq->tfcp_req;
@@ -765,7 +771,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
return;
/* break initiator/target relationship for io */
- spin_lock_irq(&tfcp_req->reqlock);
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
switch (tfcp_req->inistate) {
case INI_IO_START:
case INI_IO_ACTIVE:
@@ -775,11 +781,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
abortio = false;
break;
default:
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
return;
}
- spin_unlock_irq(&tfcp_req->reqlock);
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
if (abortio)
/* leave the reference while the work item is scheduled */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c51f8dd01dc4..d625ec3e437b 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -394,8 +394,9 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
-u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
- struct nvmet_req *req, struct nvmet_ctrl **ret);
+struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
+ const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req);
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 4341c7244662..d40bd57537ba 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -18,6 +18,7 @@
#include "nvmet.h"
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
#define NVMET_TCP_RECV_BUDGET 8
#define NVMET_TCP_SEND_BUDGET 8
@@ -321,6 +322,15 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
}
+static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
+{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
+ if (status == -EPIPE || status == -ECONNRESET)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ else
+ nvmet_tcp_fatal_error(queue);
+}
+
static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
{
struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
@@ -714,11 +724,15 @@ static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
- if (ret <= 0)
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
break;
+ }
(*sends)++;
}
-
+done:
return ret;
}
@@ -805,7 +819,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
icresp->hdr.pdo = 0;
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
- icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
+ icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
icresp->cpda = 0;
if (queue->hdr_digest)
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@@ -816,15 +830,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
iov.iov_len = sizeof(*icresp);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (ret < 0)
- goto free_crypto;
+ return ret; /* queue removal will cleanup */
queue->state = NVMET_TCP_Q_LIVE;
nvmet_prepare_receive_pdu(queue);
return 0;
-free_crypto:
- if (queue->hdr_digest || queue->data_digest)
- nvmet_tcp_free_crypto(queue);
- return ret;
}
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
@@ -857,23 +867,43 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
+ unsigned int exp_data_len;
- if (likely(queue->nr_cmds))
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+ pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
+ queue->idx, data->ttag, queue->nr_cmds);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
cmd = &queue->cmds[data->ttag];
- else
+ } else {
cmd = &queue->connect;
+ }
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
data->ttag, le32_to_cpu(data->data_offset),
cmd->rbytes_done);
/* FIXME: use path and transport errors */
- nvmet_req_complete(&cmd->req,
- NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
+ exp_data_len = le32_to_cpu(data->hdr.plen) -
+ nvmet_tcp_hdgst_len(queue) -
+ nvmet_tcp_ddgst_len(queue) -
+ sizeof(*data);
+
cmd->pdu_len = le32_to_cpu(data->data_length);
+ if (unlikely(cmd->pdu_len != exp_data_len ||
+ cmd->pdu_len == 0 ||
+ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
+ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+ /* FIXME: use proper transport errors */
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
cmd->pdu_recv = 0;
nvmet_tcp_map_pdu_iovec(cmd);
queue->cmd = cmd;
@@ -1160,11 +1190,15 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_recv_one(queue);
- if (ret <= 0)
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
break;
+ }
(*recvs)++;
}
-
+done:
return ret;
}
@@ -1189,27 +1223,16 @@ static void nvmet_tcp_io_work(struct work_struct *w)
pending = false;
ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
- if (ret > 0) {
+ if (ret > 0)
pending = true;
- } else if (ret < 0) {
- if (ret == -EPIPE || ret == -ECONNRESET)
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- else
- nvmet_tcp_fatal_error(queue);
+ else if (ret < 0)
return;
- }
ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
- if (ret > 0) {
- /* transmitted message/data */
+ if (ret > 0)
pending = true;
- } else if (ret < 0) {
- if (ret == -EPIPE || ret == -ECONNRESET)
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- else
- nvmet_tcp_fatal_error(queue);
+ else if (ret < 0)
return;
- }
} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
@@ -1416,6 +1439,9 @@ static void nvmet_tcp_state_change(struct sock *sk)
goto done;
switch (sk->sk_state) {
+ case TCP_FIN_WAIT2:
+ case TCP_LAST_ACK:
+ break;
case TCP_FIN_WAIT1:
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
@@ -1762,7 +1788,8 @@ static int __init nvmet_tcp_init(void)
{
int ret;
- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvmet_tcp_wq)
return -ENOMEM;
@@ -1790,6 +1817,7 @@ static void __exit nvmet_tcp_exit(void)
flush_scheduled_work();
destroy_workqueue(nvmet_tcp_wq);
+ ida_destroy(&nvmet_tcp_queue_ida);
}
module_init(nvmet_tcp_init);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index c0f4324d8f7c..cde1d77845fe 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -439,7 +439,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (config->cells) {
rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
if (rval)
- goto err_teardown_compat;
+ goto err_remove_cells;
}
rval = nvmem_add_cells_from_table(nvmem);
@@ -456,7 +456,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
err_remove_cells:
nvmem_device_remove_all_cells(nvmem);
-err_teardown_compat:
if (config->compat)
nvmem_sysfs_remove_compat(nvmem, config);
err_device_del:
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index fc40555ca4cd..3297ced943ea 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -433,7 +433,7 @@ static const struct ocotp_params imx6sl_params = {
};
static const struct ocotp_params imx6sll_params = {
- .nregs = 128,
+ .nregs = 80,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
};
@@ -445,13 +445,13 @@ static const struct ocotp_params imx6sx_params = {
};
static const struct ocotp_params imx6ul_params = {
- .nregs = 128,
+ .nregs = 144,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
};
static const struct ocotp_params imx6ull_params = {
- .nregs = 64,
+ .nregs = 80,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
};
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 5893543918c8..4de3a1ba6eec 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2019 Xilinx, Inc.
+ * Copyright (C) 2017 - 2019 Xilinx, Inc.
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
@@ -10,40 +11,163 @@
#include <linux/firmware/xlnx-zynqmp.h>
#define SILICON_REVISION_MASK 0xF
+#define WORD_INBYTES (4)
+#define SOC_VER_SIZE (0x4)
+#define EFUSE_MEMORY_SIZE (0xF4)
+#define UNUSED_SPACE (0x8)
+#define ZYNQMP_NVMEM_SIZE (SOC_VER_SIZE + UNUSED_SPACE + \
+ EFUSE_MEMORY_SIZE)
+#define SOC_VERSION_OFFSET (0x0)
+#define EFUSE_START_OFFSET (0xC)
+#define EFUSE_END_OFFSET (0xFC)
+#define EFUSE_NOT_ENABLED (29)
+#define EFUSE_READ (0)
+#define EFUSE_WRITE (1)
-struct zynqmp_nvmem_data {
- struct device *dev;
- struct nvmem_device *nvmem;
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+/**
+ * struct xilinx_efuse - the basic structure
+ * @src: address of the buffer to store the data to be write/read
+ * @size: no of words to be read/write
+ * @offset: offset to be read/write`
+ * @flag: 0 - represents efuse read and 1- represents efuse write
+ *
+ * this structure stores all the required details to
+ * read/write efuse memory.
+ */
+struct xilinx_efuse {
+ u64 src;
+ u32 size;
+ u32 offset;
+ u32 flag;
+ u32 fullmap;
};
-static const struct zynqmp_eemi_ops *eemi_ops;
+static int zynqmp_efuse_access(void *context, unsigned int offset,
+ void *val, size_t bytes, unsigned int flag)
+{
+ size_t words = bytes / WORD_INBYTES;
+ struct device *dev = context;
+ dma_addr_t dma_addr, dma_buf;
+ struct xilinx_efuse *efuse;
+ char *data;
+ int ret;
+
+ if (!eemi_ops->efuse_access)
+ return -ENXIO;
+
+ if (bytes % WORD_INBYTES != 0) {
+ dev_err(dev,
+ "ERROR: Bytes requested should be word aligned\n\r");
+ return -ENOTSUPP;
+ }
+ if (offset % WORD_INBYTES != 0) {
+ dev_err(dev,
+ "ERROR: offset requested should be word aligned\n\r");
+ return -ENOTSUPP;
+ }
+
+ efuse = dma_alloc_coherent(dev, sizeof(struct xilinx_efuse),
+ &dma_addr, GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ data = dma_alloc_coherent(dev, sizeof(bytes),
+ &dma_buf, GFP_KERNEL);
+ if (!data) {
+ dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+ efuse, dma_addr);
+ return -ENOMEM;
+ }
+
+ if (flag == EFUSE_WRITE) {
+ memcpy(data, val, bytes);
+ efuse->flag = EFUSE_WRITE;
+ } else {
+ efuse->flag = EFUSE_READ;
+ }
+
+ efuse->src = dma_buf;
+ efuse->size = words;
+ efuse->offset = offset;
+
+ eemi_ops->efuse_access(dma_addr, &ret);
+ if (ret != 0) {
+ if (ret == EFUSE_NOT_ENABLED) {
+ dev_err(dev, "ERROR: efuse access is not enabled\n\r");
+ ret = -ENOTSUPP;
+ goto END;
+ }
+ dev_err(dev, "ERROR: in efuse read %x\n\r", ret);
+ ret = -EPERM;
+ goto END;
+ }
+
+ if (flag == EFUSE_READ)
+ memcpy(val, data, bytes);
+END:
+
+ dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+ efuse, dma_addr);
+ dma_free_coherent(dev, sizeof(bytes),
+ data, dma_buf);
+
+ return ret;
+}
static int zynqmp_nvmem_read(void *context, unsigned int offset,
- void *val, size_t bytes)
+ void *val, size_t bytes)
{
int ret;
int idcode, version;
- struct zynqmp_nvmem_data *priv = context;
if (!eemi_ops->get_chipid)
return -ENXIO;
- ret = eemi_ops->get_chipid(&idcode, &version);
- if (ret < 0)
- return ret;
+ switch (offset) {
+ /* Soc version offset is zero */
+ case SOC_VERSION_OFFSET:
+ if (bytes != SOC_VER_SIZE)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->get_chipid(&idcode, &version);
+ if (ret < 0)
+ return ret;
+
+ pr_debug("Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+ break;
+ /* Efuse offset starts from 0xc */
+ case EFUSE_START_OFFSET ... EFUSE_END_OFFSET:
+ ret = zynqmp_efuse_access(context, offset, val,
+ bytes, EFUSE_READ);
+ break;
+ default:
+ *(u32 *)val = 0xDEADBEEF;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
- dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
- *(int *)val = version & SILICON_REVISION_MASK;
+static int zynqmp_nvmem_write(void *context,
+ unsigned int offset, void *val, size_t bytes)
+{
+ /* Efuse offset starts from 0xc */
+ if (offset < EFUSE_START_OFFSET)
+ return -ENOTSUPP;
- return 0;
+ return(zynqmp_efuse_access(context, offset,
+ val, bytes, EFUSE_WRITE));
}
static struct nvmem_config econfig = {
.name = "zynqmp-nvmem",
.owner = THIS_MODULE,
.word_size = 1,
- .size = 1,
- .read_only = true,
+ .size = ZYNQMP_NVMEM_SIZE,
};
static const struct of_device_id zynqmp_nvmem_match[] = {
@@ -54,29 +178,37 @@ MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
static int zynqmp_nvmem_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct zynqmp_nvmem_data *priv;
-
- priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ struct nvmem_device *nvmem;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
return PTR_ERR(eemi_ops);
- priv->dev = dev;
- econfig.dev = dev;
+ econfig.dev = &pdev->dev;
+ econfig.priv = &pdev->dev;
econfig.reg_read = zynqmp_nvmem_read;
- econfig.priv = priv;
+ econfig.reg_write = zynqmp_nvmem_write;
+
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
- priv->nvmem = devm_nvmem_register(dev, &econfig);
+ platform_set_drvdata(pdev, nvmem);
- return PTR_ERR_OR_ZERO(priv->nvmem);
+ return 0;
+}
+
+static int zynqmp_nvmem_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ nvmem_unregister(nvmem);
+ return 0;
}
static struct platform_driver zynqmp_nvmem_driver = {
.probe = zynqmp_nvmem_probe,
+ .remove = zynqmp_nvmem_remove,
.driver = {
.name = "zynqmp-nvmem",
.of_match_table = zynqmp_nvmem_match,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index d91618641be6..e96c312216db 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -103,6 +103,13 @@ config OF_OVERLAY
config OF_NUMA
bool
+config OF_CONFIGFS
+ bool "Device Tree Overlay ConfigFS interface"
+ select CONFIGFS_FS
+ depends on OF_OVERLAY
+ help
+ Enable a simple user-space driven DT overlay interface.
+
config OF_DMA_DEFAULT_COHERENT
# arches should select this if DMA is coherent by default for OF devices
bool
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 663a4af0cccd..7f43d8b95017 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-y = base.o device.o platform.o property.o
+obj-$(CONFIG_OF_CONFIGFS) += configfs.o
obj-$(CONFIG_OF_KOBJ) += kobj.o
obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index b5c84607a74b..6fa209b3557b 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -306,7 +306,7 @@ struct device_node *__of_find_all_nodes(struct device_node *prev)
* @prev: Previous node or NULL to start iteration
* of_node_put() will be called on it
*
- * Returns a node pointer with refcount incremented, use
+ * Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_all_nodes(struct device_node *prev)
@@ -367,7 +367,7 @@ bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
return (u32)phys_id == cpu;
}
-/**
+/*
* Checks if the given "prop_name" property holds the physical id of the
* core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
* NULL, local thread number within the core is returned in it.
@@ -436,7 +436,7 @@ bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
* before booting secondary cores. This function uses arch_match_cpu_phys_id
* which can be overridden by architecture specific implementation.
*
- * Returns a node pointer for the logical cpu with refcount incremented, use
+ * Return: A node pointer for the logical cpu with refcount incremented, use
* of_node_put() on it when done. Returns NULL if not found.
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
@@ -456,8 +456,8 @@ EXPORT_SYMBOL(of_get_cpu_node);
*
* @cpu_node: Pointer to the device_node for CPU.
*
- * Returns the logical CPU number of the given CPU device_node.
- * Returns -ENODEV if the CPU is not found.
+ * Return: The logical CPU number of the given CPU device_node or -ENODEV if the
+ * CPU is not found.
*/
int of_cpu_node_to_id(struct device_node *cpu_node)
{
@@ -478,6 +478,42 @@ int of_cpu_node_to_id(struct device_node *cpu_node)
EXPORT_SYMBOL(of_cpu_node_to_id);
/**
+ * of_get_cpu_state_node - Get CPU's idle state node at the given index
+ *
+ * @cpu_node: The device node for the CPU
+ * @index: The index in the list of the idle states
+ *
+ * Two generic methods can be used to describe a CPU's idle states, either via
+ * a flattened description through the "cpu-idle-states" binding or via the
+ * hierarchical layout, using the "power-domains" and the "domain-idle-states"
+ * bindings. This function check for both and returns the idle state node for
+ * the requested index.
+ *
+ * Return: An idle state node if found at @index. The refcount is incremented
+ * for it, so call of_node_put() on it when done. Returns NULL if not found.
+ */
+struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index)
+{
+ struct of_phandle_args args;
+ int err;
+
+ err = of_parse_phandle_with_args(cpu_node, "power-domains",
+ "#power-domain-cells", 0, &args);
+ if (!err) {
+ struct device_node *state_node =
+ of_parse_phandle(args.np, "domain-idle-states", index);
+
+ of_node_put(args.np);
+ if (state_node)
+ return state_node;
+ }
+
+ return of_parse_phandle(cpu_node, "cpu-idle-states", index);
+}
+EXPORT_SYMBOL(of_get_cpu_state_node);
+
+/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
* @compat: required compatible string, NULL or "" for any match
@@ -587,7 +623,7 @@ int of_device_compatible_match(struct device_node *device,
* of_machine_is_compatible - Test root of device tree for a given compatible value
* @compat: compatible string to look for in root node's compatible property.
*
- * Returns a positive integer if the root node has the given value in its
+ * Return: A positive integer if the root node has the given value in its
* compatible property.
*/
int of_machine_is_compatible(const char *compat)
@@ -609,7 +645,7 @@ EXPORT_SYMBOL(of_machine_is_compatible);
*
* @device: Node to check for availability, with locks already held
*
- * Returns true if the status property is absent or set to "okay" or "ok",
+ * Return: True if the status property is absent or set to "okay" or "ok",
* false otherwise
*/
static bool __of_device_is_available(const struct device_node *device)
@@ -637,7 +673,7 @@ static bool __of_device_is_available(const struct device_node *device)
*
* @device: Node to check for availability
*
- * Returns true if the status property is absent or set to "okay" or "ok",
+ * Return: True if the status property is absent or set to "okay" or "ok",
* false otherwise
*/
bool of_device_is_available(const struct device_node *device)
@@ -658,7 +694,7 @@ EXPORT_SYMBOL(of_device_is_available);
*
* @device: Node to check for endianness
*
- * Returns true if the device has a "big-endian" property, or if the kernel
+ * Return: True if the device has a "big-endian" property, or if the kernel
* was compiled for BE *and* the device has a "native-endian" property.
* Returns false otherwise.
*
@@ -677,11 +713,11 @@ bool of_device_is_big_endian(const struct device_node *device)
EXPORT_SYMBOL(of_device_is_big_endian);
/**
- * of_get_parent - Get a node's parent if any
- * @node: Node to get parent
+ * of_get_parent - Get a node's parent if any
+ * @node: Node to get parent
*
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done.
*/
struct device_node *of_get_parent(const struct device_node *node)
{
@@ -699,15 +735,15 @@ struct device_node *of_get_parent(const struct device_node *node)
EXPORT_SYMBOL(of_get_parent);
/**
- * of_get_next_parent - Iterate to a node's parent
- * @node: Node to get parent of
+ * of_get_next_parent - Iterate to a node's parent
+ * @node: Node to get parent of
*
- * This is like of_get_parent() except that it drops the
- * refcount on the passed node, making it suitable for iterating
- * through a node's parents.
+ * This is like of_get_parent() except that it drops the
+ * refcount on the passed node, making it suitable for iterating
+ * through a node's parents.
*
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done.
*/
struct device_node *of_get_next_parent(struct device_node *node)
{
@@ -745,13 +781,13 @@ static struct device_node *__of_get_next_child(const struct device_node *node,
child = __of_get_next_child(parent, child))
/**
- * of_get_next_child - Iterate a node childs
- * @node: parent node
- * @prev: previous child of the parent node, or NULL to get first
+ * of_get_next_child - Iterate a node childs
+ * @node: parent node
+ * @prev: previous child of the parent node, or NULL to get first
*
- * Returns a node pointer with refcount incremented, use of_node_put() on
- * it when done. Returns NULL when prev is the last child. Decrements the
- * refcount of prev.
+ * Return: A node pointer with refcount incremented, use of_node_put() on
+ * it when done. Returns NULL when prev is the last child. Decrements the
+ * refcount of prev.
*/
struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev)
@@ -767,12 +803,12 @@ struct device_node *of_get_next_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_child);
/**
- * of_get_next_available_child - Find the next available child node
- * @node: parent node
- * @prev: previous child of the parent node, or NULL to get first
+ * of_get_next_available_child - Find the next available child node
+ * @node: parent node
+ * @prev: previous child of the parent node, or NULL to get first
*
- * This function is like of_get_next_child(), except that it
- * automatically skips any disabled nodes (i.e. status = "disabled").
+ * This function is like of_get_next_child(), except that it
+ * automatically skips any disabled nodes (i.e. status = "disabled").
*/
struct device_node *of_get_next_available_child(const struct device_node *node,
struct device_node *prev)
@@ -798,12 +834,12 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_available_child);
/**
- * of_get_next_cpu_node - Iterate on cpu nodes
- * @prev: previous child of the /cpus node, or NULL to get first
+ * of_get_next_cpu_node - Iterate on cpu nodes
+ * @prev: previous child of the /cpus node, or NULL to get first
*
- * Returns a cpu node pointer with refcount incremented, use of_node_put()
- * on it when done. Returns NULL when prev is the last child. Decrements
- * the refcount of prev.
+ * Return: A cpu node pointer with refcount incremented, use of_node_put()
+ * on it when done. Returns NULL when prev is the last child. Decrements
+ * the refcount of prev.
*/
struct device_node *of_get_next_cpu_node(struct device_node *prev)
{
@@ -842,7 +878,7 @@ EXPORT_SYMBOL(of_get_next_cpu_node);
* Lookup child node whose compatible property contains the given compatible
* string.
*
- * Returns a node pointer with refcount incremented, use of_node_put() on it
+ * Return: a node pointer with refcount incremented, use of_node_put() on it
* when done; or NULL if not found.
*/
struct device_node *of_get_compatible_child(const struct device_node *parent,
@@ -860,15 +896,15 @@ struct device_node *of_get_compatible_child(const struct device_node *parent,
EXPORT_SYMBOL(of_get_compatible_child);
/**
- * of_get_child_by_name - Find the child node by name for a given parent
- * @node: parent node
- * @name: child name to look for.
+ * of_get_child_by_name - Find the child node by name for a given parent
+ * @node: parent node
+ * @name: child name to look for.
*
- * This function looks for child node for given matching name
+ * This function looks for child node for given matching name
*
- * Returns a node pointer if found, with refcount incremented, use
- * of_node_put() on it when done.
- * Returns NULL if node is not found.
+ * Return: A node pointer if found, with refcount incremented, use
+ * of_node_put() on it when done.
+ * Returns NULL if node is not found.
*/
struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name)
@@ -919,22 +955,22 @@ struct device_node *__of_find_node_by_full_path(struct device_node *node,
}
/**
- * of_find_node_opts_by_path - Find a node matching a full OF path
- * @path: Either the full path to match, or if the path does not
- * start with '/', the name of a property of the /aliases
- * node (an alias). In the case of an alias, the node
- * matching the alias' value will be returned.
- * @opts: Address of a pointer into which to store the start of
- * an options string appended to the end of the path with
- * a ':' separator.
- *
- * Valid paths:
- * /foo/bar Full path
- * foo Valid alias
- * foo/bar Valid alias + relative path
- *
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * of_find_node_opts_by_path - Find a node matching a full OF path
+ * @path: Either the full path to match, or if the path does not
+ * start with '/', the name of a property of the /aliases
+ * node (an alias). In the case of an alias, the node
+ * matching the alias' value will be returned.
+ * @opts: Address of a pointer into which to store the start of
+ * an options string appended to the end of the path with
+ * a ':' separator.
+ *
+ * Valid paths:
+ * * /foo/bar Full path
+ * * foo Valid alias
+ * * foo/bar Valid alias + relative path
+ *
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done.
*/
struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
{
@@ -984,15 +1020,15 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
EXPORT_SYMBOL(of_find_node_opts_by_path);
/**
- * of_find_node_by_name - Find a node by its "name" property
- * @from: The node to start searching from or NULL; the node
+ * of_find_node_by_name - Find a node by its "name" property
+ * @from: The node to start searching from or NULL; the node
* you pass will not be searched, only the next one
* will. Typically, you pass what the previous call
* returned. of_node_put() will be called on @from.
- * @name: The name string to match against
+ * @name: The name string to match against
*
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done.
*/
struct device_node *of_find_node_by_name(struct device_node *from,
const char *name)
@@ -1011,16 +1047,16 @@ struct device_node *of_find_node_by_name(struct device_node *from,
EXPORT_SYMBOL(of_find_node_by_name);
/**
- * of_find_node_by_type - Find a node by its "device_type" property
- * @from: The node to start searching from, or NULL to start searching
+ * of_find_node_by_type - Find a node by its "device_type" property
+ * @from: The node to start searching from, or NULL to start searching
* the entire device tree. The node you pass will not be
* searched, only the next one will; typically, you pass
* what the previous call returned. of_node_put() will be
* called on from for you.
- * @type: The type string to match against
+ * @type: The type string to match against
*
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done.
*/
struct device_node *of_find_node_by_type(struct device_node *from,
const char *type)
@@ -1039,18 +1075,18 @@ struct device_node *of_find_node_by_type(struct device_node *from,
EXPORT_SYMBOL(of_find_node_by_type);
/**
- * of_find_compatible_node - Find a node based on type and one of the
+ * of_find_compatible_node - Find a node based on type and one of the
* tokens in its "compatible" property
- * @from: The node to start searching from or NULL, the node
- * you pass will not be searched, only the next one
- * will; typically, you pass what the previous call
- * returned. of_node_put() will be called on it
- * @type: The type string to match "device_type" or NULL to ignore
- * @compatible: The string to match to one of the tokens in the device
- * "compatible" list.
- *
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * @from: The node to start searching from or NULL, the node
+ * you pass will not be searched, only the next one
+ * will; typically, you pass what the previous call
+ * returned. of_node_put() will be called on it
+ * @type: The type string to match "device_type" or NULL to ignore
+ * @compatible: The string to match to one of the tokens in the device
+ * "compatible" list.
+ *
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done.
*/
struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compatible)
@@ -1070,16 +1106,16 @@ struct device_node *of_find_compatible_node(struct device_node *from,
EXPORT_SYMBOL(of_find_compatible_node);
/**
- * of_find_node_with_property - Find a node which has a property with
- * the given name.
- * @from: The node to start searching from or NULL, the node
- * you pass will not be searched, only the next one
- * will; typically, you pass what the previous call
- * returned. of_node_put() will be called on it
- * @prop_name: The name of the property to look for.
- *
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * of_find_node_with_property - Find a node which has a property with
+ * the given name.
+ * @from: The node to start searching from or NULL, the node
+ * you pass will not be searched, only the next one
+ * will; typically, you pass what the previous call
+ * returned. of_node_put() will be called on it
+ * @prop_name: The name of the property to look for.
+ *
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done.
*/
struct device_node *of_find_node_with_property(struct device_node *from,
const char *prop_name)
@@ -1128,10 +1164,10 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches,
/**
* of_match_node - Tell if a device_node has a matching of_match structure
- * @matches: array of of device match structures to search in
- * @node: the of device structure to match against
+ * @matches: array of of device match structures to search in
+ * @node: the of device structure to match against
*
- * Low level utility function used by device matching.
+ * Low level utility function used by device matching.
*/
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
@@ -1147,17 +1183,17 @@ const struct of_device_id *of_match_node(const struct of_device_id *matches,
EXPORT_SYMBOL(of_match_node);
/**
- * of_find_matching_node_and_match - Find a node based on an of_device_id
- * match table.
- * @from: The node to start searching from or NULL, the node
- * you pass will not be searched, only the next one
- * will; typically, you pass what the previous call
- * returned. of_node_put() will be called on it
- * @matches: array of of device match structures to search in
- * @match Updated to point at the matches entry which matched
- *
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * of_find_matching_node_and_match - Find a node based on an of_device_id
+ * match table.
+ * @from: The node to start searching from or NULL, the node
+ * you pass will not be searched, only the next one
+ * will; typically, you pass what the previous call
+ * returned. of_node_put() will be called on it
+ * @matches: array of of device match structures to search in
+ * @match: Updated to point at the matches entry which matched
+ *
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done.
*/
struct device_node *of_find_matching_node_and_match(struct device_node *from,
const struct of_device_id *matches,
@@ -1196,7 +1232,7 @@ EXPORT_SYMBOL(of_find_matching_node_and_match);
* It does this by stripping the manufacturer prefix (as delimited by a ',')
* from the first entry in the compatible list property.
*
- * This routine returns 0 on success, <0 on failure.
+ * Return: This routine returns 0 on success, <0 on failure.
*/
int of_modalias_node(struct device_node *node, char *modalias, int len)
{
@@ -1216,7 +1252,7 @@ EXPORT_SYMBOL_GPL(of_modalias_node);
* of_find_node_by_phandle - Find a node given a phandle
* @handle: phandle of the node to find
*
- * Returns a node pointer with refcount incremented, use
+ * Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_node_by_phandle(phandle handle)
@@ -1469,7 +1505,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
* @index: For properties holding a table of phandles, this is the index into
* the table
*
- * Returns the device_node pointer with refcount incremented. Use
+ * Return: The device_node pointer with refcount incremented. Use
* of_node_put() on it when done.
*/
struct device_node *of_parse_phandle(const struct device_node *np,
@@ -1503,21 +1539,21 @@ EXPORT_SYMBOL(of_parse_phandle);
* Caller is responsible to call of_node_put() on the returned out_args->np
* pointer.
*
- * Example:
+ * Example::
*
- * phandle1: node1 {
+ * phandle1: node1 {
* #list-cells = <2>;
- * }
+ * };
*
- * phandle2: node2 {
+ * phandle2: node2 {
* #list-cells = <1>;
- * }
+ * };
*
- * node3 {
+ * node3 {
* list = <&phandle1 1 2 &phandle2 3>;
- * }
+ * };
*
- * To get a device_node of the `node2' node you may call this:
+ * To get a device_node of the ``node2`` node you may call this:
* of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
*/
int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
@@ -1555,29 +1591,29 @@ EXPORT_SYMBOL(of_parse_phandle_with_args);
* Caller is responsible to call of_node_put() on the returned out_args->np
* pointer.
*
- * Example:
+ * Example::
*
- * phandle1: node1 {
- * #list-cells = <2>;
- * }
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * };
*
- * phandle2: node2 {
- * #list-cells = <1>;
- * }
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * };
*
- * phandle3: node3 {
- * #list-cells = <1>;
- * list-map = <0 &phandle2 3>,
- * <1 &phandle2 2>,
- * <2 &phandle1 5 1>;
- * list-map-mask = <0x3>;
- * };
+ * phandle3: node3 {
+ * #list-cells = <1>;
+ * list-map = <0 &phandle2 3>,
+ * <1 &phandle2 2>,
+ * <2 &phandle1 5 1>;
+ * list-map-mask = <0x3>;
+ * };
*
- * node4 {
- * list = <&phandle1 1 2 &phandle3 0>;
- * }
+ * node4 {
+ * list = <&phandle1 1 2 &phandle3 0>;
+ * };
*
- * To get a device_node of the `node2' node you may call this:
+ * To get a device_node of the ``node2`` node you may call this:
* of_parse_phandle_with_args(node4, "list", "list", 1, &args);
*/
int of_parse_phandle_with_args_map(const struct device_node *np,
@@ -1708,6 +1744,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
out_args->np = new;
of_node_put(cur);
cur = new;
+ new = NULL;
}
put:
of_node_put(cur);
@@ -1737,19 +1774,19 @@ EXPORT_SYMBOL(of_parse_phandle_with_args_map);
* Caller is responsible to call of_node_put() on the returned out_args->np
* pointer.
*
- * Example:
+ * Example::
*
- * phandle1: node1 {
- * }
+ * phandle1: node1 {
+ * };
*
- * phandle2: node2 {
- * }
+ * phandle2: node2 {
+ * };
*
- * node3 {
- * list = <&phandle1 0 2 &phandle2 2 3>;
- * }
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * };
*
- * To get a device_node of the `node2' node you may call this:
+ * To get a device_node of the ``node2`` node you may call this:
* of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
*/
int of_parse_phandle_with_fixed_args(const struct device_node *np,
@@ -1769,7 +1806,7 @@ EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
* @list_name: property name that contains a list
* @cells_name: property name that specifies phandles' arguments count
*
- * Returns the number of phandle + argument tuples within a property. It
+ * Return: The number of phandle + argument tuples within a property. It
* is a typical pattern to encode a list of phandle and variable
* arguments into a single property. The number of arguments is encoded
* by a property in the phandle-target node. For example, a gpios
@@ -1817,6 +1854,8 @@ EXPORT_SYMBOL(of_count_phandle_with_args);
/**
* __of_add_property - Add a property to a node without lock operations
+ * @np: Caller's Device Node
+ * @prob: Property to add
*/
int __of_add_property(struct device_node *np, struct property *prop)
{
@@ -1838,6 +1877,8 @@ int __of_add_property(struct device_node *np, struct property *prop)
/**
* of_add_property - Add a property to a node
+ * @np: Caller's Device Node
+ * @prob: Property to add
*/
int of_add_property(struct device_node *np, struct property *prop)
{
@@ -1882,6 +1923,8 @@ int __of_remove_property(struct device_node *np, struct property *prop)
/**
* of_remove_property - Remove a property from a node.
+ * @np: Caller's Device Node
+ * @prob: Property to remove
*
* Note that we don't actually remove it, since we have given out
* who-knows-how-many pointers to the data using get-property.
@@ -1988,13 +2031,12 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np,
/**
* of_alias_scan - Scan all properties of the 'aliases' node
+ * @dt_alloc: An allocator that provides a virtual address to memory
+ * for storing the resulting tree
*
* The function scans all the properties of the 'aliases' node and populates
* the global lookup table with the properties. It returns the
* number of alias properties found, or an error code in case of failure.
- *
- * @dt_alloc: An allocator that provides a virtual address to memory
- * for storing the resulting tree
*/
void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
{
@@ -2063,7 +2105,9 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
* @stem: Alias stem of the given device_node
*
* The function travels the lookup table to get the alias id for the given
- * device_node and alias stem. It returns the alias id if found.
+ * device_node and alias stem.
+ *
+ * Return: The alias id if found.
*/
int of_alias_get_id(struct device_node *np, const char *stem)
{
@@ -2167,13 +2211,14 @@ EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
/**
* of_console_check() - Test and setup console for DT setup
- * @dn - Pointer to device node
- * @name - Name to use for preferred console without index. ex. "ttyS"
- * @index - Index to use for preferred console.
+ * @dn: Pointer to device node
+ * @name: Name to use for preferred console without index. ex. "ttyS"
+ * @index: Index to use for preferred console.
*
* Check if the given device node matches the stdout-path property in the
- * /chosen node. If it does then register it as the preferred console and return
- * TRUE. Otherwise return FALSE.
+ * /chosen node. If it does then register it as the preferred console.
+ *
+ * Return: TRUE if console successfully setup. Otherwise return FALSE.
*/
bool of_console_check(struct device_node *dn, char *name, int index)
{
@@ -2189,12 +2234,12 @@ bool of_console_check(struct device_node *dn, char *name, int index)
EXPORT_SYMBOL_GPL(of_console_check);
/**
- * of_find_next_cache_node - Find a node's subsidiary cache
- * @np: node of type "cpu" or "cache"
+ * of_find_next_cache_node - Find a node's subsidiary cache
+ * @np: node of type "cpu" or "cache"
*
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done. Caller should hold a reference
- * to np.
+ * Return: A node pointer with refcount incremented, use
+ * of_node_put() on it when done. Caller should hold a reference
+ * to np.
*/
struct device_node *of_find_next_cache_node(const struct device_node *np)
{
@@ -2224,7 +2269,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
*
* @cpu: cpu number(logical index) for which the last cache level is needed
*
- * Returns the the level at which the last cache is present. It is exactly
+ * Return: The the level at which the last cache is present. It is exactly
* same as the total number of cache levels for the given logical cpu.
*/
int of_find_last_cache_level(unsigned int cpu)
@@ -2244,15 +2289,15 @@ int of_find_last_cache_level(unsigned int cpu)
}
/**
- * of_map_rid - Translate a requester ID through a downstream mapping.
+ * of_map_id - Translate an ID through a downstream mapping.
* @np: root complex device node.
- * @rid: device requester ID to map.
+ * @id: device ID to map.
* @map_name: property name of the map to use.
* @map_mask_name: optional property name of the mask to use.
* @target: optional pointer to a target device node.
* @id_out: optional pointer to receive the translated ID.
*
- * Given a device requester ID, look up the appropriate implementation-defined
+ * Given a device ID, look up the appropriate implementation-defined
* platform ID and/or the target device which receives transactions on that
* ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
* @id_out may be NULL if only the other is required. If @target points to
@@ -2262,11 +2307,11 @@ int of_find_last_cache_level(unsigned int cpu)
*
* Return: 0 on success or a standard error code on failure.
*/
-int of_map_rid(struct device_node *np, u32 rid,
+int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
- u32 map_mask, masked_rid;
+ u32 map_mask, masked_id;
int map_len;
const __be32 *map = NULL;
@@ -2278,7 +2323,7 @@ int of_map_rid(struct device_node *np, u32 rid,
if (target)
return -ENODEV;
/* Otherwise, no map implies no translation */
- *id_out = rid;
+ *id_out = id;
return 0;
}
@@ -2298,22 +2343,22 @@ int of_map_rid(struct device_node *np, u32 rid,
if (map_mask_name)
of_property_read_u32(np, map_mask_name, &map_mask);
- masked_rid = map_mask & rid;
+ masked_id = map_mask & id;
for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
struct device_node *phandle_node;
- u32 rid_base = be32_to_cpup(map + 0);
+ u32 id_base = be32_to_cpup(map + 0);
u32 phandle = be32_to_cpup(map + 1);
u32 out_base = be32_to_cpup(map + 2);
- u32 rid_len = be32_to_cpup(map + 3);
+ u32 id_len = be32_to_cpup(map + 3);
- if (rid_base & ~map_mask) {
- pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+ if (id_base & ~map_mask) {
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
np, map_name, map_name,
- map_mask, rid_base);
+ map_mask, id_base);
return -EFAULT;
}
- if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+ if (masked_id < id_base || masked_id >= id_base + id_len)
continue;
phandle_node = of_find_node_by_phandle(phandle);
@@ -2331,20 +2376,20 @@ int of_map_rid(struct device_node *np, u32 rid,
}
if (id_out)
- *id_out = masked_rid - rid_base + out_base;
+ *id_out = masked_id - id_base + out_base;
- pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
- np, map_name, map_mask, rid_base, out_base,
- rid_len, rid, masked_rid - rid_base + out_base);
+ pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
+ np, map_name, map_mask, id_base, out_base,
+ id_len, id, masked_id - id_base + out_base);
return 0;
}
- pr_info("%pOF: no %s translation for rid 0x%x on %pOF\n", np, map_name,
- rid, target && *target ? *target : NULL);
+ pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
+ id, target && *target ? *target : NULL);
/* Bypasses translation */
if (id_out)
- *id_out = rid;
+ *id_out = id;
return 0;
}
-EXPORT_SYMBOL_GPL(of_map_rid);
+EXPORT_SYMBOL_GPL(of_map_id);
diff --git a/drivers/of/configfs.c b/drivers/of/configfs.c
new file mode 100644
index 000000000000..f18f7d5a8146
--- /dev/null
+++ b/drivers/of/configfs.c
@@ -0,0 +1,293 @@
+/*
+ * Configfs entries for device-tree
+ *
+ * Copyright (C) 2013 - Pantelis Antoniou <panto@antoniou-consulting.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/ctype.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/spinlock.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/limits.h>
+#include <linux/file.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+
+#include "of_private.h"
+
+struct cfs_overlay_item {
+ struct config_item item;
+
+ char path[PATH_MAX];
+
+ const struct firmware *fw;
+ struct device_node *overlay;
+ int ov_id;
+
+ void *dtbo;
+ int dtbo_size;
+
+ void *mem;
+};
+
+static DEFINE_MUTEX(overlay_lock);
+
+static int create_overlay(struct cfs_overlay_item *overlay, void *blob)
+{
+ int err;
+
+ /* FIXME */
+ err = of_overlay_fdt_apply(blob, overlay->dtbo_size, &overlay->ov_id);
+ if (err < 0) {
+ pr_err("%s: Failed to create overlay (err=%d)\n",
+ __func__, err);
+ return err;
+ }
+
+ return err;
+}
+
+static inline struct cfs_overlay_item *to_cfs_overlay_item(
+ struct config_item *item)
+{
+ return item ? container_of(item, struct cfs_overlay_item, item) : NULL;
+}
+
+static ssize_t cfs_overlay_item_path_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%s\n", to_cfs_overlay_item(item)->path);
+}
+
+static ssize_t cfs_overlay_item_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+ const char *p = page;
+ char *s;
+ int err;
+
+ /* if it's set do not allow changes */
+ if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+ return -EPERM;
+
+ /* copy to path buffer (and make sure it's always zero terminated */
+ count = snprintf(overlay->path, sizeof(overlay->path) - 1, "%s", p);
+ overlay->path[sizeof(overlay->path) - 1] = '\0';
+
+ /* strip trailing newlines */
+ s = overlay->path + strlen(overlay->path);
+ while (s > overlay->path && *--s == '\n')
+ *s = '\0';
+
+ pr_debug("%s: path is '%s'\n", __func__, overlay->path);
+
+ err = request_firmware(&overlay->fw, overlay->path, NULL);
+ if (err != 0)
+ goto out_err;
+
+ overlay->dtbo_size = overlay->fw->size;
+ err = create_overlay(overlay, (void *)overlay->fw->data);
+ if (err < 0)
+ goto out_err;
+
+ return count;
+
+out_err:
+
+ release_firmware(overlay->fw);
+ overlay->fw = NULL;
+
+ overlay->path[0] = '\0';
+
+ return count;
+}
+
+static ssize_t cfs_overlay_item_status_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", to_cfs_overlay_item(item)->ov_id >= 0 ?
+ "applied" : "unapplied");
+}
+
+CONFIGFS_ATTR(cfs_overlay_item_, path);
+CONFIGFS_ATTR_RO(cfs_overlay_item_, status);
+
+static struct configfs_attribute *cfs_overlay_attrs[] = {
+ &cfs_overlay_item_attr_path,
+ &cfs_overlay_item_attr_status,
+ NULL,
+};
+
+ssize_t cfs_overlay_item_dtbo_read(struct config_item *item, void *buf,
+ size_t max_count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ pr_debug("%s: buf=%p max_count=%zu\n", __func__,
+ buf, max_count);
+
+ if (overlay->dtbo == NULL)
+ return 0;
+
+ /* copy if buffer provided */
+ if (buf != NULL) {
+ /* the buffer must be large enough */
+ if (overlay->dtbo_size > max_count)
+ return -ENOSPC;
+
+ memcpy(buf, overlay->dtbo, overlay->dtbo_size);
+ }
+
+ return overlay->dtbo_size;
+}
+
+ssize_t cfs_overlay_item_dtbo_write(struct config_item *item, const void *buf,
+ size_t count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+ int err;
+
+ /* if it's set do not allow changes */
+ if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+ return -EPERM;
+
+ /* copy the contents */
+ overlay->dtbo = kmemdup(buf, count, GFP_KERNEL);
+ if (overlay->dtbo == NULL)
+ return -ENOMEM;
+
+ overlay->dtbo_size = count;
+
+ err = create_overlay(overlay, overlay->dtbo);
+ if (err < 0)
+ goto out_err;
+
+ return count;
+
+out_err:
+ kfree(overlay->dtbo);
+ overlay->dtbo = NULL;
+ overlay->dtbo_size = 0;
+
+ return err;
+}
+
+CONFIGFS_BIN_ATTR(cfs_overlay_item_, dtbo, NULL, SZ_1M);
+
+static struct configfs_bin_attribute *cfs_overlay_bin_attrs[] = {
+ &cfs_overlay_item_attr_dtbo,
+ NULL,
+};
+
+static void cfs_overlay_release(struct config_item *item)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ if (overlay->ov_id >= 0)
+ of_overlay_remove(&overlay->ov_id);
+ if (overlay->fw)
+ release_firmware(overlay->fw);
+ /* kfree with NULL is safe */
+ kfree(overlay->dtbo);
+ kfree(overlay->mem);
+ kfree(overlay);
+}
+
+static struct configfs_item_operations cfs_overlay_item_ops = {
+ .release = cfs_overlay_release,
+};
+
+static struct config_item_type cfs_overlay_type = {
+ .ct_item_ops = &cfs_overlay_item_ops,
+ .ct_attrs = cfs_overlay_attrs,
+ .ct_bin_attrs = cfs_overlay_bin_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *cfs_overlay_group_make_item(
+ struct config_group *group, const char *name)
+{
+ struct cfs_overlay_item *overlay;
+
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
+ return ERR_PTR(-ENOMEM);
+ overlay->ov_id = -1;
+
+ config_item_init_type_name(&overlay->item, name, &cfs_overlay_type);
+ return &overlay->item;
+}
+
+static void cfs_overlay_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ config_item_put(&overlay->item);
+}
+
+static struct configfs_group_operations overlays_ops = {
+ .make_item = cfs_overlay_group_make_item,
+ .drop_item = cfs_overlay_group_drop_item,
+};
+
+static struct config_item_type overlays_type = {
+ .ct_group_ops = &overlays_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_group_operations of_cfs_ops = {
+ /* empty - we don't allow anything to be created */
+};
+
+static struct config_item_type of_cfs_type = {
+ .ct_group_ops = &of_cfs_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group of_cfs_overlay_group;
+
+static struct configfs_subsystem of_cfs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "device-tree",
+ .ci_type = &of_cfs_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(of_cfs_subsys.su_mutex),
+};
+
+static int __init of_cfs_init(void)
+{
+ int ret;
+
+ pr_info("%s\n", __func__);
+
+ config_group_init(&of_cfs_subsys.su_group);
+ config_group_init_type_name(&of_cfs_overlay_group, "overlays",
+ &overlays_type);
+ configfs_add_default_group(&of_cfs_overlay_group,
+ &of_cfs_subsys.su_group);
+
+ ret = configfs_register_subsystem(&of_cfs_subsys);
+ if (ret != 0) {
+ pr_err("%s: failed to register subsys\n", __func__);
+ goto out;
+ }
+ pr_info("%s: OK\n", __func__);
+out:
+ return ret;
+}
+late_initcall(of_cfs_init);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index da8158392010..7fb870097a84 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -246,12 +246,15 @@ int of_device_request_module(struct device *dev)
if (size < 0)
return size;
- str = kmalloc(size + 1, GFP_KERNEL);
+ /* Reserve an additional byte for the trailing '\0' */
+ size++;
+
+ str = kmalloc(size, GFP_KERNEL);
if (!str)
return -ENOMEM;
of_device_get_modalias(dev, str, size);
- str[size] = '\0';
+ str[size - 1] = '\0';
ret = request_module(str);
kfree(str);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 49b16f76d78e..1f9ad0132b88 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -27,7 +27,7 @@ static struct device_node *kobj_to_device_node(struct kobject *kobj)
* @node: Node to inc refcount, NULL is supported to simplify writing of
* callers
*
- * Returns node.
+ * Return: The node with refcount incremented.
*/
struct device_node *of_node_get(struct device_node *node)
{
@@ -104,8 +104,10 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
* @arg - argument of the of notifier
*
* Returns the new state of a device based on the notifier used.
- * Returns 0 on device going from enabled to disabled, 1 on device
- * going from disabled to enabled and -1 on no change.
+ *
+ * Return: OF_RECONFIG_CHANGE_REMOVE on device going from enabled to
+ * disabled, OF_RECONFIG_CHANGE_ADD on device going from disabled to
+ * enabled and OF_RECONFIG_NO_CHANGE on no change.
*/
int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
{
@@ -372,7 +374,8 @@ void of_node_release(struct kobject *kobj)
* property structure and the property name & contents. The property's
* flags have the OF_DYNAMIC bit set so that we can differentiate between
* dynamically allocated properties and not.
- * Returns the newly allocated property or NULL on out of memory error.
+ *
+ * Return: The newly allocated property or NULL on out of memory error.
*/
struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags)
{
@@ -415,7 +418,7 @@ struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags)
* another node. The node data are dynamically allocated and all the node
* flags have the OF_DYNAMIC & OF_DETACHED bits set.
*
- * Returns the newly allocated node or NULL on out of memory error.
+ * Return: The newly allocated node or NULL on out of memory error.
*/
struct device_node *__of_node_dup(const struct device_node *np,
const char *full_name)
@@ -781,7 +784,8 @@ static int __of_changeset_apply(struct of_changeset *ocs)
* Any side-effects of live tree state changes are applied here on
* success, like creation/destruction of devices and side-effects
* like creation of sysfs properties and directories.
- * Returns 0 on success, a negative error value in case of an error.
+ *
+ * Return: 0 on success, a negative error value in case of an error.
* On error the partially applied effects are reverted.
*/
int of_changeset_apply(struct of_changeset *ocs)
@@ -875,7 +879,8 @@ static int __of_changeset_revert(struct of_changeset *ocs)
* was before the application.
* Any side-effects like creation/destruction of devices and
* removal of sysfs properties and directories are applied.
- * Returns 0 on success, a negative error value in case of an error.
+ *
+ * Return: 0 on success, a negative error value in case of an error.
*/
int of_changeset_revert(struct of_changeset *ocs)
{
@@ -903,7 +908,8 @@ EXPORT_SYMBOL_GPL(of_changeset_revert);
* + OF_RECONFIG_ADD_PROPERTY
* + OF_RECONFIG_REMOVE_PROPERTY,
* + OF_RECONFIG_UPDATE_PROPERTY
- * Returns 0 on success, a negative error value in case of an error.
+ *
+ * Return: 0 on success, a negative error value in case of an error.
*/
int of_changeset_action(struct of_changeset *ocs, unsigned long action,
struct device_node *np, struct property *prop)
@@ -927,3 +933,176 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
return 0;
}
EXPORT_SYMBOL_GPL(of_changeset_action);
+
+/* changeset helpers */
+
+/**
+ * __of_changeset_add_property_copy - Create/update a new property copying
+ * name & value
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @value: pointer to the value data
+ * @length: length of the value in bytes
+ * @update: True on update operation
+ *
+ * Adds/updates a property to the changeset by making copies of the name & value
+ * entries. The @update parameter controls whether an add or update takes place.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+int __of_changeset_add_update_property_copy(struct of_changeset *ocs,
+ struct device_node *np, const char *name, const void *value,
+ int length, bool update)
+{
+ struct property *prop;
+ char *new_name;
+ void *new_value;
+ int ret = -ENOMEM;
+
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ new_name = kstrdup(name, GFP_KERNEL);
+ if (!new_name)
+ goto out_err;
+
+ /*
+ * NOTE: There is no check for zero length value.
+ * In case of a boolean property, this will allocate a value
+ * of zero bytes. We do this to work around the use
+ * of of_get_property() calls on boolean values.
+ */
+ new_value = kmemdup(value, length, GFP_KERNEL);
+ if (!new_value)
+ goto out_err;
+
+ of_property_set_flag(prop, OF_DYNAMIC);
+
+ prop->name = new_name;
+ prop->value = new_value;
+ prop->length = length;
+
+ if (!update)
+ ret = of_changeset_add_property(ocs, np, prop);
+ else
+ ret = of_changeset_update_property(ocs, np, prop);
+
+ if (!ret)
+ return 0;
+
+out_err:
+ kfree(prop->value);
+ kfree(prop->name);
+ kfree(prop);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__of_changeset_add_update_property_copy);
+
+/**
+ * of_changeset_add_property_stringf - Create a new formatted string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @fmt: format of string property
+ * ... arguments of the format string
+ *
+ * Adds a string property to the changeset by making copies of the name
+ * and the formatted value.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+__printf(4, 5) int of_changeset_add_property_stringf(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *fmt, ...)
+{
+ va_list vargs;
+ int ret;
+
+ va_start(vargs, fmt);
+ ret = __of_changeset_add_update_property_stringv(ocs, np, name, fmt,
+ vargs, false);
+ va_end(vargs);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_changeset_add_property_stringf);
+
+/**
+ * of_changeset_update_property_stringf - Update formatted string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @fmt: format of string property
+ * ... arguments of the format string
+ *
+ * Updates a string property to the changeset by making copies of the name
+ * and the formatted value.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+int of_changeset_update_property_stringf(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *fmt, ...)
+{
+ va_list vargs;
+ int ret;
+
+ va_start(vargs, fmt);
+ ret = __of_changeset_add_update_property_stringv(ocs, np, name, fmt,
+ vargs, true);
+ va_end(vargs);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_changeset_update_property_stringf);
+
+/**
+ * __of_changeset_add_update_property_string_list - Create/update a string
+ * list property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @strs: pointer to the string list
+ * @count: string count
+ * @update: True on update operation
+ *
+ * Adds a string list property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+int __of_changeset_add_update_property_string_list(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char **strs, int count, bool update)
+{
+ int total = 0, i, ret;
+ char *value, *s;
+
+ for (i = 0; i < count; i++) {
+ /* check if it's NULL */
+ if (!strs[i])
+ return -EINVAL;
+ total += strlen(strs[i]) + 1;
+ }
+
+ value = kmalloc(total, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ for (i = 0, s = value; i < count; i++) {
+ /* no need to check for NULL, check above */
+ strcpy(s, strs[i]);
+ s += strlen(strs[i]) + 1;
+ }
+
+ ret = __of_changeset_add_update_property_copy(ocs, np, name, value,
+ total, update);
+
+ kfree(value);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__of_changeset_add_update_property_string_list);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 943d2a60bfdf..1db951f43353 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -282,7 +282,7 @@ static void reverse_nodes(struct device_node *parent)
* @dad: Parent struct device_node
* @nodepp: The device_node tree created by the call
*
- * It returns the size of unflattened device tree or error code
+ * Return: The size of unflattened device tree or error code
*/
static int unflatten_dt_nodes(const void *blob,
void *mem,
@@ -315,7 +315,7 @@ static int unflatten_dt_nodes(const void *blob,
for (offset = 0;
offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
- if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
continue;
if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
@@ -349,11 +349,6 @@ static int unflatten_dt_nodes(const void *blob,
/**
* __unflatten_device_tree - create tree of device_nodes from flat blob
- *
- * unflattens a device-tree, creating the
- * tree of struct device_node. It also fills the "name" and "type"
- * pointers of the nodes so the normal device-tree walking functions
- * can be used.
* @blob: The blob to expand
* @dad: Parent device node
* @mynodes: The device_node tree created by the call
@@ -361,7 +356,11 @@ static int unflatten_dt_nodes(const void *blob,
* for the resulting tree
* @detached: if true set OF_DETACHED on @mynodes
*
- * Returns NULL on failure or the memory chunk containing the unflattened
+ * unflattens a device-tree, creating the tree of struct device_node. It also
+ * fills the "name" and "type" pointers of the nodes so the normal device-tree
+ * walking functions can be used.
+ *
+ * Return: NULL on failure or the memory chunk containing the unflattened
* device tree on success.
*/
void *__unflatten_device_tree(const void *blob,
@@ -442,7 +441,7 @@ static DEFINE_MUTEX(of_fdt_unflatten_mutex);
* pointers of the nodes so the normal device-tree walking functions
* can be used.
*
- * Returns NULL on failure or the memory chunk containing the unflattened
+ * Return: NULL on failure or the memory chunk containing the unflattened
* device tree on success.
*/
void *of_fdt_unflatten_tree(const unsigned long *blob,
@@ -720,7 +719,7 @@ const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
* @node: node to test
* @compat: compatible string to compare with compatible list.
*
- * On match, returns a non-zero value with smaller values returned for more
+ * Return: a non-zero value on match with smaller values returned for more
* specific compatible values.
*/
static int of_fdt_is_compatible(const void *blob,
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a296eaf52a5b..352e14b007e7 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -48,7 +48,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
* of_irq_find_parent - Given a device node, find its interrupt parent node
* @child: pointer to device node
*
- * Returns a pointer to the interrupt parent node, or NULL if the interrupt
+ * Return: A pointer to the interrupt parent node, or NULL if the interrupt
* parent could not be determined.
*/
struct device_node *of_irq_find_parent(struct device_node *child)
@@ -81,14 +81,14 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent);
* @addr: address specifier (start of "reg" property of the device) in be32 format
* @out_irq: structure of_phandle_args updated by this function
*
- * Returns 0 on success and a negative number on error
- *
* This function is a low-level interrupt tree walking function. It
* can be used to do a partial walk with synthetized reg and interrupts
* properties, for example when resolving PCI interrupts when no device
* node exist for the parent. It takes an interrupt specifier structure as
* input, walks the tree looking for any interrupt-map properties, translates
* the specifier for each map, and then returns the translated map.
+ *
+ * Return: 0 on success and a negative number on error
*/
int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
@@ -380,7 +380,7 @@ EXPORT_SYMBOL_GPL(of_irq_to_resource);
* @dev: pointer to device tree node
* @index: zero-based index of the IRQ
*
- * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * Return: Linux IRQ number on success, or 0 on the IRQ mapping failure, or
* -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
* of any other failure.
*/
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(of_irq_get);
* @dev: pointer to device tree node
* @name: IRQ name
*
- * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * Return: Linux IRQ number on success, or 0 on the IRQ mapping failure, or
* -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
* of any other failure.
*/
@@ -447,7 +447,7 @@ int of_irq_count(struct device_node *dev)
* @res: array of resources to fill in
* @nr_irqs: the number of IRQs (and upper bound for num of @res elements)
*
- * Returns the size of the filled in table (up to @nr_irqs).
+ * Return: The size of the filled in table (up to @nr_irqs).
*/
int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int nr_irqs)
@@ -576,55 +576,57 @@ err:
}
}
-static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
- u32 rid_in)
+static u32 __of_msi_map_id(struct device *dev, struct device_node **np,
+ u32 id_in)
{
struct device *parent_dev;
- u32 rid_out = rid_in;
+ u32 id_out = id_in;
/*
* Walk up the device parent links looking for one with a
* "msi-map" property.
*/
for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
- if (!of_map_rid(parent_dev->of_node, rid_in, "msi-map",
- "msi-map-mask", np, &rid_out))
+ if (!of_map_id(parent_dev->of_node, id_in, "msi-map",
+ "msi-map-mask", np, &id_out))
break;
- return rid_out;
+ return id_out;
}
/**
- * of_msi_map_rid - Map a MSI requester ID for a device.
+ * of_msi_map_id - Map a MSI ID for a device.
* @dev: device for which the mapping is to be done.
* @msi_np: device node of the expected msi controller.
- * @rid_in: unmapped MSI requester ID for the device.
+ * @id_in: unmapped MSI ID for the device.
*
* Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @rid_in.
+ * property. If found, apply the mapping to @id_in.
*
- * Returns the mapped MSI requester ID.
+ * Return: The mapped MSI ID.
*/
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in)
+u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in)
{
- return __of_msi_map_rid(dev, &msi_np, rid_in);
+ return __of_msi_map_id(dev, &msi_np, id_in);
}
/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
* @dev: device for which the mapping is to be done.
- * @rid: Requester ID for the device.
+ * @id: Device ID.
+ * @bus_token: Bus token
*
* Walk up the device hierarchy looking for devices with a "msi-map"
* property.
*
* Returns: the MSI domain for this device (or NULL on failure)
*/
-struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid)
+struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id,
+ u32 bus_token)
{
struct device_node *np = NULL;
- __of_msi_map_rid(dev, &np, rid);
- return irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+ __of_msi_map_id(dev, &np, id);
+ return irq_find_matching_host(np, bus_token);
}
/**
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 26ddb4cc675a..7a3de2b5de0c 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -281,6 +281,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
return 0;
unregister:
+ of_node_put(child);
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 8420ef42d89e..b551fe44f2f9 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -296,7 +296,7 @@ err_free_target_path:
*
* Update of property in symbols node is not allowed.
*
- * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
* invalid @overlay.
*/
static int add_changeset_property(struct overlay_changeset *ovcs,
@@ -401,7 +401,7 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
*
* NOTE_2: Multiple mods of created nodes not supported.
*
- * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
* invalid @overlay.
*/
static int add_changeset_node(struct overlay_changeset *ovcs,
@@ -473,7 +473,7 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
*
* Do not allow symbols node to have any children.
*
- * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
* invalid @overlay_node.
*/
static int build_changeset_next_level(struct overlay_changeset *ovcs,
@@ -547,7 +547,7 @@ static int find_dup_cset_node_entry(struct overlay_changeset *ovcs,
fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
- node_path_match = !strcmp(fn_1, fn_2);
+ node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
kfree(fn_1);
kfree(fn_2);
if (node_path_match) {
@@ -582,7 +582,7 @@ static int find_dup_cset_prop(struct overlay_changeset *ovcs,
fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
- node_path_match = !strcmp(fn_1, fn_2);
+ node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
kfree(fn_1);
kfree(fn_2);
if (node_path_match &&
@@ -604,7 +604,7 @@ static int find_dup_cset_prop(struct overlay_changeset *ovcs,
* the same node or duplicate {add, delete, or update} properties entries
* for the same property.
*
- * Returns 0 on success, or -EINVAL if duplicate changeset entry found.
+ * Return: 0 on success, or -EINVAL if duplicate changeset entry found.
*/
static int changeset_dup_entry_check(struct overlay_changeset *ovcs)
{
@@ -628,7 +628,7 @@ static int changeset_dup_entry_check(struct overlay_changeset *ovcs)
* any portions of the changeset that were successfully created will remain
* in @ovcs->cset.
*
- * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
* invalid overlay in @ovcs->fragments[].
*/
static int build_changeset(struct overlay_changeset *ovcs)
@@ -724,7 +724,7 @@ static struct device_node *find_target(struct device_node *info_node)
* the top level of @tree. The relevant top level nodes are the fragment
* nodes and the __symbols__ node. Any other top level node will be ignored.
*
- * Returns 0 on success, -ENOMEM if memory allocation failure, -EINVAL if error
+ * Return: 0 on success, -ENOMEM if memory allocation failure, -EINVAL if error
* detected in @tree, or -ENOSPC if idr_alloc() error.
*/
static int init_overlay_changeset(struct overlay_changeset *ovcs,
@@ -1180,7 +1180,7 @@ static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
* If an error is returned by an overlay changeset post-remove notifier
* then no further overlay changeset post-remove notifier will be called.
*
- * Returns 0 on success, or a negative error number. *ovcs_id is set to
+ * Return: 0 on success, or a negative error number. *ovcs_id is set to
* zero after reverting the changeset, even if a subsequent error occurs.
*/
int of_overlay_remove(int *ovcs_id)
@@ -1267,7 +1267,7 @@ EXPORT_SYMBOL_GPL(of_overlay_remove);
*
* Removes all overlays from the system in the correct order.
*
- * Returns 0 on success, or a negative error number
+ * Return: 0 on success, or a negative error number
*/
int of_overlay_remove_all(void)
{
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b47a2292fe8e..cf5dbc9536f2 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -44,7 +44,7 @@ static const struct of_device_id of_skipped_node_table[] = {
* Takes a reference to the embedded struct device which needs to be dropped
* after use.
*
- * Returns platform_device pointer, or NULL if not found
+ * Return: platform_device pointer, or NULL if not found
*/
struct platform_device *of_find_device_by_node(struct device_node *np)
{
@@ -160,7 +160,7 @@ EXPORT_SYMBOL(of_device_alloc);
* @platform_data: pointer to populate platform_data pointer with
* @parent: Linux device model parent device.
*
- * Returns pointer to created platform device, or NULL if a device was not
+ * Return: Pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
static struct platform_device *of_platform_device_create_pdata(
@@ -204,7 +204,7 @@ err_clear_flag:
* @bus_id: name to assign device
* @parent: Linux device model parent device.
*
- * Returns pointer to created platform device, or NULL if a device was not
+ * Return: Pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
struct platform_device *of_platform_device_create(struct device_node *np,
@@ -463,7 +463,7 @@ EXPORT_SYMBOL(of_platform_bus_probe);
* New board support should be using this function instead of
* of_platform_bus_probe().
*
- * Returns 0 on success, < 0 on failure.
+ * Return: 0 on success, < 0 on failure.
*/
int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
@@ -594,7 +594,7 @@ static void devm_of_platform_populate_release(struct device *dev, void *res)
* Similar to of_platform_populate(), but will automatically call
* of_platform_depopulate() when the device is unbound from the bus.
*
- * Returns 0 on success, < 0 on failure.
+ * Return: 0 on success, < 0 on failure.
*/
int devm_of_platform_populate(struct device *dev)
{
diff --git a/drivers/of/property.c b/drivers/of/property.c
index d7fa75e31f22..28ea326d102f 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -36,9 +36,11 @@
* @elem_size: size of the individual element
*
* Search for a property in a device node and count the number of elements of
- * size elem_size in it. Returns number of elements on sucess, -EINVAL if the
- * property does not exist or its length does not match a multiple of elem_size
- * and -ENODATA if the property does not have a value.
+ * size elem_size in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does not
+ * exist or its length does not match a multiple of elem_size and -ENODATA if
+ * the property does not have a value.
*/
int of_property_count_elems_of_size(const struct device_node *np,
const char *propname, int elem_size)
@@ -70,8 +72,9 @@ EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
* @len: if !=NULL, actual length is written to here
*
* Search for a property in a device node and valid the requested size.
- * Returns the property value on success, -EINVAL if the property does not
- * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the
+ *
+ * Return: The property value on success, -EINVAL if the property does not
+ * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data is too small or too large.
*
*/
@@ -104,7 +107,9 @@ static void *of_find_property_value_of_size(const struct device_node *np,
* @out_value: pointer to return value, modified only if no error.
*
* Search for a property in a device node and read nth 32-bit value from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
@@ -136,7 +141,9 @@ EXPORT_SYMBOL_GPL(of_property_read_u32_index);
* @out_value: pointer to return value, modified only if no error.
*
* Search for a property in a device node and read nth 64-bit value from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
@@ -171,12 +178,14 @@ EXPORT_SYMBOL_GPL(of_property_read_u64_index);
* sz_min will be read.
*
* Search for a property in a device node and read 8-bit value(s) from
- * it. Returns number of elements read on success, -EINVAL if the property
- * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
- * if the property data is smaller than sz_min or longer than sz_max.
+ * it.
*
* dts entry of array should be like:
- * property = /bits/ 8 <0x50 0x60 0x70>;
+ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+ *
+ * Return: The number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
*
* The out_values is modified only if a valid u8 value can be decoded.
*/
@@ -219,12 +228,14 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
* sz_min will be read.
*
* Search for a property in a device node and read 16-bit value(s) from
- * it. Returns number of elements read on success, -EINVAL if the property
- * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
- * if the property data is smaller than sz_min or longer than sz_max.
+ * it.
*
* dts entry of array should be like:
- * property = /bits/ 16 <0x5000 0x6000 0x7000>;
+ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+ *
+ * Return: The number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
*
* The out_values is modified only if a valid u16 value can be decoded.
*/
@@ -267,7 +278,9 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
* sz_min will be read.
*
* Search for a property in a device node and read 32-bit value(s) from
- * it. Returns number of elements read on success, -EINVAL if the property
+ * it.
+ *
+ * Return: The number of elements read on success, -EINVAL if the property
* does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
* if the property data is smaller than sz_min or longer than sz_max.
*
@@ -306,7 +319,9 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u32_array);
* @out_value: pointer to return value, modified only if return value is 0.
*
* Search for a property in a device node and read a 64-bit value from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
@@ -341,7 +356,9 @@ EXPORT_SYMBOL_GPL(of_property_read_u64);
* sz_min will be read.
*
* Search for a property in a device node and read 64-bit value(s) from
- * it. Returns number of elements read on success, -EINVAL if the property
+ * it.
+ *
+ * Return: The number of elements read on success, -EINVAL if the property
* does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
* if the property data is smaller than sz_min or longer than sz_max.
*
@@ -383,10 +400,11 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u64_array);
* return value is 0.
*
* Search for a property in a device tree node and retrieve a null
- * terminated string value (pointer to data, not a copy). Returns 0 on
- * success, -EINVAL if the property does not exist, -ENODATA if property
- * does not have a value, and -EILSEQ if the string is not null-terminated
- * within the length of the property data.
+ * terminated string value (pointer to data, not a copy).
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ * property does not have a value, and -EILSEQ if the string is not
+ * null-terminated within the length of the property data.
*
* The out_string pointer is modified only if a valid string can be decoded.
*/
@@ -750,7 +768,7 @@ EXPORT_SYMBOL(of_graph_get_remote_port_parent);
* @node: pointer to a local endpoint device_node
*
* Return: Remote port node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
+ * to @node. Use of_node_put() on it when done.
*/
struct device_node *of_graph_get_remote_port(const struct device_node *node)
{
@@ -783,7 +801,7 @@ EXPORT_SYMBOL(of_graph_get_endpoint_count);
* @endpoint: identifier (value of reg property) of the endpoint node
*
* Return: Remote device node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
+ * to @node. Use of_node_put() on it when done.
*/
struct device_node *of_graph_get_remote_node(const struct device_node *node,
u32 port, u32 endpoint)
@@ -918,8 +936,10 @@ of_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
nargs, index, &of_args);
if (ret < 0)
return ret;
- if (!args)
+ if (!args) {
+ of_node_put(of_args.np);
return 0;
+ }
args->nargs = of_args.args_count;
args->fwnode = of_fwnode_handle(of_args.np);
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 9b6807065827..009f4045c8e4 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -21,7 +21,13 @@ obj-$(CONFIG_OF_OVERLAY) += overlay.dtb.o \
overlay_bad_add_dup_prop.dtb.o \
overlay_bad_phandle.dtb.o \
overlay_bad_symbol.dtb.o \
- overlay_base.dtb.o
+ overlay_base.dtb.o \
+ overlay_gpio_01.dtb.o \
+ overlay_gpio_02a.dtb.o \
+ overlay_gpio_02b.dtb.o \
+ overlay_gpio_03.dtb.o \
+ overlay_gpio_04a.dtb.o \
+ overlay_gpio_04b.dtb.o
# enable creation of __symbols__ node
DTC_FLAGS_overlay += -@
diff --git a/drivers/of/unittest-data/overlay_gpio_01.dts b/drivers/of/unittest-data/overlay_gpio_01.dts
new file mode 100644
index 000000000000..699ff104ae10
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_01.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@0 {
+ compatible = "unittest-gpio";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B";
+
+ line-b {
+ gpio-hog;
+ gpios = <2 0>;
+ input;
+ line-name = "line-B-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_02a.dts b/drivers/of/unittest-data/overlay_gpio_02a.dts
new file mode 100644
index 000000000000..ec59aff6ed47
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_02a.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@2 {
+ compatible = "unittest-gpio";
+ reg = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B";
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_02b.dts b/drivers/of/unittest-data/overlay_gpio_02b.dts
new file mode 100644
index 000000000000..43ce111d41ce
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_02b.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@2 {
+ line-a {
+ gpio-hog;
+ gpios = <1 0>;
+ input;
+ line-name = "line-A-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_03.dts b/drivers/of/unittest-data/overlay_gpio_03.dts
new file mode 100644
index 000000000000..6e0312340a1b
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_03.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@3 {
+ compatible = "unittest-gpio";
+ reg = <3>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B", "line-C", "line-D";
+
+ line-d {
+ gpio-hog;
+ gpios = <4 0>;
+ input;
+ line-name = "line-D-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_04a.dts b/drivers/of/unittest-data/overlay_gpio_04a.dts
new file mode 100644
index 000000000000..7b1e04ebfa7a
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_04a.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@4 {
+ compatible = "unittest-gpio";
+ reg = <4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B", "line-C", "line-D";
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_04b.dts b/drivers/of/unittest-data/overlay_gpio_04b.dts
new file mode 100644
index 000000000000..a14e95c6699a
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_04b.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@4 {
+ line-c {
+ gpio-hog;
+ gpios = <3 0>;
+ input;
+ line-name = "line-C-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi
index 6b33be4c4416..aa0d7027ffa6 100644
--- a/drivers/of/unittest-data/tests-phandle.dtsi
+++ b/drivers/of/unittest-data/tests-phandle.dtsi
@@ -38,6 +38,13 @@
phandle-map-pass-thru = <0x0 0xf0>;
};
+ provider5: provider5 {
+ #phandle-cells = <2>;
+ phandle-map = <2 7 &provider4 2 3>;
+ phandle-map-mask = <0xff 0xf>;
+ phandle-map-pass-thru = <0x0 0xf0>;
+ };
+
consumer-a {
phandle-list = <&provider1 1>,
<&provider2 2 0>,
@@ -64,7 +71,8 @@
<&provider4 4 0x100>,
<&provider4 0 0x61>,
<&provider0>,
- <&provider4 19 0x20>;
+ <&provider4 19 0x20>,
+ <&provider5 2 7>;
phandle-list-bad-phandle = <12345678 0 0>;
phandle-list-bad-args = <&provider2 1 0>,
<&provider4 0>;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 5707c309a754..f1aa09215d4b 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -23,6 +23,7 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/gpio/driver.h>
#include <linux/bitops.h>
@@ -45,6 +46,103 @@ static struct unittest_results {
failed; \
})
+#ifdef CONFIG_OF_KOBJ
+#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref)
+#else
+#define OF_KREF_READ(NODE) 1
+#endif
+
+/*
+ * Expected message may have a message level other than KERN_INFO.
+ * Print the expected message only if the current loglevel will allow
+ * the actual message to print.
+ */
+#define EXPECT_BEGIN(level, fmt, ...) \
+ printk(level pr_fmt("EXPECT \\ : ") fmt, ##__VA_ARGS__)
+
+#define EXPECT_END(level, fmt, ...) \
+ printk(level pr_fmt("EXPECT / : ") fmt, ##__VA_ARGS__)
+
+struct unittest_gpio_dev {
+ struct gpio_chip chip;
+};
+
+static int unittest_gpio_chip_request_count;
+static int unittest_gpio_probe_count;
+static int unittest_gpio_probe_pass_count;
+
+static int unittest_gpio_chip_request(struct gpio_chip *chip, unsigned int offset)
+{
+ unittest_gpio_chip_request_count++;
+
+ pr_debug("%s(): %s %d %d\n", __func__, chip->label, offset,
+ unittest_gpio_chip_request_count);
+ return 0;
+}
+
+static int unittest_gpio_probe(struct platform_device *pdev)
+{
+ struct unittest_gpio_dev *devptr;
+ int ret;
+
+ unittest_gpio_probe_count++;
+
+ devptr = kzalloc(sizeof(*devptr), GFP_KERNEL);
+ if (!devptr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, devptr);
+
+ devptr->chip.of_node = pdev->dev.of_node;
+ devptr->chip.label = "of-unittest-gpio";
+ devptr->chip.base = -1; /* dynamic allocation */
+ devptr->chip.ngpio = 5;
+ devptr->chip.request = unittest_gpio_chip_request;
+
+ ret = gpiochip_add_data(&devptr->chip, NULL);
+
+ unittest(!ret,
+ "gpiochip_add_data() for node @%pOF failed, ret = %d\n", devptr->chip.of_node, ret);
+
+ if (!ret)
+ unittest_gpio_probe_pass_count++;
+ return ret;
+}
+
+static int unittest_gpio_remove(struct platform_device *pdev)
+{
+ struct unittest_gpio_dev *gdev = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
+
+ if (!gdev)
+ return -EINVAL;
+
+ if (gdev->chip.base != -1)
+ gpiochip_remove(&gdev->chip);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(gdev);
+
+ return 0;
+}
+
+static const struct of_device_id unittest_gpio_id[] = {
+ { .compatible = "unittest-gpio", },
+ {}
+};
+
+static struct platform_driver unittest_gpio_driver = {
+ .probe = unittest_gpio_probe,
+ .remove = unittest_gpio_remove,
+ .driver = {
+ .name = "unittest-gpio",
+ .of_match_table = of_match_ptr(unittest_gpio_id),
+ },
+};
+
static void __init of_unittest_find_node_by_name(void)
{
struct device_node *np;
@@ -52,7 +150,7 @@ static void __init of_unittest_find_node_by_name(void)
np = of_find_node_by_path("/testcase-data");
name = kasprintf(GFP_KERNEL, "%pOF", np);
- unittest(np && !strcmp("/testcase-data", name),
+ unittest(np && name && !strcmp("/testcase-data", name),
"find /testcase-data failed\n");
of_node_put(np);
kfree(name);
@@ -63,14 +161,14 @@ static void __init of_unittest_find_node_by_name(void)
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
name = kasprintf(GFP_KERNEL, "%pOF", np);
- unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
+ unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
"find /testcase-data/phandle-tests/consumer-a failed\n");
of_node_put(np);
kfree(name);
np = of_find_node_by_path("testcase-alias");
name = kasprintf(GFP_KERNEL, "%pOF", np);
- unittest(np && !strcmp("/testcase-data", name),
+ unittest(np && name && !strcmp("/testcase-data", name),
"find testcase-alias failed\n");
of_node_put(np);
kfree(name);
@@ -81,7 +179,7 @@ static void __init of_unittest_find_node_by_name(void)
np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a");
name = kasprintf(GFP_KERNEL, "%pOF", np);
- unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
+ unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
"find testcase-alias/phandle-tests/consumer-a failed\n");
of_node_put(np);
kfree(name);
@@ -430,6 +528,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
unittest(passed, "index %i - data error on node %pOF rc=%i\n",
i, args.np, rc);
+
+ if (rc == 0)
+ of_node_put(args.np);
}
/* Check for missing list property */
@@ -471,8 +572,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
static void __init of_unittest_parse_phandle_with_args_map(void)
{
- struct device_node *np, *p0, *p1, *p2, *p3;
+ struct device_node *np, *p[6] = {};
struct of_phandle_args args;
+ unsigned int prefs[6];
int i, rc;
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b");
@@ -481,34 +583,24 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
return;
}
- p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
- if (!p0) {
- pr_err("missing testcase data\n");
- return;
- }
-
- p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
- if (!p1) {
- pr_err("missing testcase data\n");
- return;
- }
-
- p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
- if (!p2) {
- pr_err("missing testcase data\n");
- return;
- }
-
- p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
- if (!p3) {
- pr_err("missing testcase data\n");
- return;
+ p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
+ p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
+ p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
+ p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
+ p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4");
+ p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5");
+ for (i = 0; i < ARRAY_SIZE(p); ++i) {
+ if (!p[i]) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+ prefs[i] = OF_KREF_READ(p[i]);
}
rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
- unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
+ unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc);
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 9; i++) {
bool passed = true;
memset(&args, 0, sizeof(args));
@@ -519,13 +611,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
switch (i) {
case 0:
passed &= !rc;
- passed &= (args.np == p1);
+ passed &= (args.np == p[1]);
passed &= (args.args_count == 1);
passed &= (args.args[0] == 1);
break;
case 1:
passed &= !rc;
- passed &= (args.np == p3);
+ passed &= (args.np == p[3]);
passed &= (args.args_count == 3);
passed &= (args.args[0] == 2);
passed &= (args.args[1] == 5);
@@ -536,28 +628,36 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
break;
case 3:
passed &= !rc;
- passed &= (args.np == p0);
+ passed &= (args.np == p[0]);
passed &= (args.args_count == 0);
break;
case 4:
passed &= !rc;
- passed &= (args.np == p1);
+ passed &= (args.np == p[1]);
passed &= (args.args_count == 1);
passed &= (args.args[0] == 3);
break;
case 5:
passed &= !rc;
- passed &= (args.np == p0);
+ passed &= (args.np == p[0]);
passed &= (args.args_count == 0);
break;
case 6:
passed &= !rc;
- passed &= (args.np == p2);
+ passed &= (args.np == p[2]);
passed &= (args.args_count == 2);
passed &= (args.args[0] == 15);
passed &= (args.args[1] == 0x20);
break;
case 7:
+ passed &= !rc;
+ passed &= (args.np == p[3]);
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 2);
+ passed &= (args.args[1] == 5);
+ passed &= (args.args[2] == 3);
+ break;
+ case 8:
passed &= (rc == -ENOENT);
break;
default:
@@ -566,6 +666,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
unittest(passed, "index %i - data error on node %s rc=%i\n",
i, args.np->full_name, rc);
+
+ if (rc == 0)
+ of_node_put(args.np);
}
/* Check for missing list property */
@@ -591,6 +694,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
"phandle", 1, &args);
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+ for (i = 0; i < ARRAY_SIZE(p); ++i) {
+ unittest(prefs[i] == OF_KREF_READ(p[i]),
+ "provider%d: expected:%d got:%d\n",
+ i, prefs[i], OF_KREF_READ(p[i]));
+ of_node_put(p[i]);
+ }
}
static void __init of_unittest_property_string(void)
@@ -1151,6 +1261,8 @@ static void attach_node_and_children(struct device_node *np)
const char *full_name;
full_name = kasprintf(GFP_KERNEL, "%pOF", np);
+ if (!full_name)
+ return;
if (!strcmp(full_name, "/__local_fixups__") ||
!strcmp(full_name, "/__fixups__")) {
@@ -1580,7 +1692,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
}
/* unittest device must be again in before state */
- if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) {
+ if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
unittest(0, "%s with device @\"%s\" %s\n",
overlay_name_from_nr(overlay_nr),
unittest_path(unittest_nr, ovtype),
@@ -2100,6 +2212,153 @@ static inline void of_unittest_overlay_i2c_15(void) { }
#endif
+static void __init of_unittest_overlay_gpio(void)
+{
+ int chip_request_count;
+ int probe_pass_count;
+ int ret;
+
+ /*
+ * tests: apply overlays before registering driver
+ * Similar to installing a driver as a module, the
+ * driver is registered after applying the overlays.
+ *
+ * - apply overlay_gpio_01
+ * - apply overlay_gpio_02a
+ * - apply overlay_gpio_02b
+ * - register driver
+ *
+ * register driver will result in
+ * - probe and processing gpio hog for overlay_gpio_01
+ * - probe for overlay_gpio_02a
+ * - processing gpio for overlay_gpio_02b
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ /*
+ * overlay_gpio_01 contains gpio node and child gpio hog node
+ * overlay_gpio_02a contains gpio node
+ * overlay_gpio_02b contains child gpio hog node
+ */
+
+ unittest(overlay_data_apply("overlay_gpio_01", NULL),
+ "Adding overlay 'overlay_gpio_01' failed\n");
+
+ unittest(overlay_data_apply("overlay_gpio_02a", NULL),
+ "Adding overlay 'overlay_gpio_02a' failed\n");
+
+ unittest(overlay_data_apply("overlay_gpio_02b", NULL),
+ "Adding overlay 'overlay_gpio_02b' failed\n");
+
+ /*
+ * messages are the result of the probes, after the
+ * driver is registered
+ */
+
+ EXPECT_BEGIN(KERN_INFO,
+ "gpio-<<int>> (line-B-input): hogged as input\n");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "gpio-<<int>> (line-A-input): hogged as input\n");
+
+ ret = platform_driver_register(&unittest_gpio_driver);
+ if (unittest(ret == 0, "could not register unittest gpio driver\n"))
+ return;
+
+ EXPECT_END(KERN_INFO,
+ "gpio-<<int>> (line-A-input): hogged as input\n");
+ EXPECT_END(KERN_INFO,
+ "gpio-<<int>> (line-B-input): hogged as input\n");
+
+ unittest(probe_pass_count + 2 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ unittest(chip_request_count + 2 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+
+ /*
+ * tests: apply overlays after registering driver
+ *
+ * Similar to a driver built-in to the kernel, the
+ * driver is registered before applying the overlays.
+ *
+ * overlay_gpio_03 contains gpio node and child gpio hog node
+ *
+ * - apply overlay_gpio_03
+ *
+ * apply overlay will result in
+ * - probe and processing gpio hog.
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "gpio-<<int>> (line-D-input): hogged as input\n");
+
+ /* overlay_gpio_03 contains gpio node and child gpio hog node */
+
+ unittest(overlay_data_apply("overlay_gpio_03", NULL),
+ "Adding overlay 'overlay_gpio_03' failed\n");
+
+ EXPECT_END(KERN_INFO,
+ "gpio-<<int>> (line-D-input): hogged as input\n");
+
+ unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+
+ /*
+ * overlay_gpio_04a contains gpio node
+ *
+ * - apply overlay_gpio_04a
+ *
+ * apply the overlay will result in
+ * - probe for overlay_gpio_04a
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ /* overlay_gpio_04a contains gpio node */
+
+ unittest(overlay_data_apply("overlay_gpio_04a", NULL),
+ "Adding overlay 'overlay_gpio_04a' failed\n");
+
+ unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ /*
+ * overlay_gpio_04b contains child gpio hog node
+ *
+ * - apply overlay_gpio_04b
+ *
+ * apply the overlay will result in
+ * - processing gpio for overlay_gpio_04b
+ */
+
+ EXPECT_BEGIN(KERN_INFO,
+ "gpio-<<int>> (line-C-input): hogged as input\n");
+
+ /* overlay_gpio_04b contains child gpio hog node */
+
+ unittest(overlay_data_apply("overlay_gpio_04b", NULL),
+ "Adding overlay 'overlay_gpio_04b' failed\n");
+
+ EXPECT_END(KERN_INFO,
+ "gpio-<<int>> (line-C-input): hogged as input\n");
+
+ unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+}
+
static void __init of_unittest_overlay(void)
{
struct device_node *bus_np = NULL;
@@ -2159,6 +2418,8 @@ static void __init of_unittest_overlay(void)
of_unittest_overlay_i2c_cleanup();
#endif
+ of_unittest_overlay_gpio();
+
of_unittest_destroy_tracked_overlays();
out:
@@ -2212,6 +2473,12 @@ OVERLAY_INFO_EXTERN(overlay_11);
OVERLAY_INFO_EXTERN(overlay_12);
OVERLAY_INFO_EXTERN(overlay_13);
OVERLAY_INFO_EXTERN(overlay_15);
+OVERLAY_INFO_EXTERN(overlay_gpio_01);
+OVERLAY_INFO_EXTERN(overlay_gpio_02a);
+OVERLAY_INFO_EXTERN(overlay_gpio_02b);
+OVERLAY_INFO_EXTERN(overlay_gpio_03);
+OVERLAY_INFO_EXTERN(overlay_gpio_04a);
+OVERLAY_INFO_EXTERN(overlay_gpio_04b);
OVERLAY_INFO_EXTERN(overlay_bad_add_dup_node);
OVERLAY_INFO_EXTERN(overlay_bad_add_dup_prop);
OVERLAY_INFO_EXTERN(overlay_bad_phandle);
@@ -2236,6 +2503,12 @@ static struct overlay_info overlays[] = {
OVERLAY_INFO(overlay_12, 0),
OVERLAY_INFO(overlay_13, 0),
OVERLAY_INFO(overlay_15, 0),
+ OVERLAY_INFO(overlay_gpio_01, 0),
+ OVERLAY_INFO(overlay_gpio_02a, 0),
+ OVERLAY_INFO(overlay_gpio_02b, 0),
+ OVERLAY_INFO(overlay_gpio_03, 0),
+ OVERLAY_INFO(overlay_gpio_04a, 0),
+ OVERLAY_INFO(overlay_gpio_04b, 0),
OVERLAY_INFO(overlay_bad_add_dup_node, -EINVAL),
OVERLAY_INFO(overlay_bad_add_dup_prop, -EINVAL),
OVERLAY_INFO(overlay_bad_phandle, -EINVAL),
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 088c93dc0085..e1810c4011b2 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1881,8 +1881,8 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
}
virt_dev = dev_pm_domain_attach_by_name(dev, *name);
- if (IS_ERR(virt_dev)) {
- ret = PTR_ERR(virt_dev);
+ if (IS_ERR_OR_NULL(virt_dev)) {
+ ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV;
dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
goto err;
}
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 609665e339b6..c7647410316e 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -162,7 +162,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
opp_table->dentry_name);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
__func__, dev_name(opp_dev->dev), dev_name(dev));
return;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 5013568c571e..fdd302d0a1c9 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1378,15 +1378,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
}
}
-static void __init ccio_init_resources(struct ioc *ioc)
+static int __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
-
+ if (unlikely(!name))
+ return -ENOMEM;
snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
+ return 0;
}
static int new_ioc_area(struct resource *res, unsigned long size,
@@ -1541,7 +1543,11 @@ static int __init ccio_probe(struct parisc_device *dev)
return -ENOMEM;
}
ccio_ioc_init(ioc);
- ccio_init_resources(ioc);
+ if (ccio_init_resources(ioc)) {
+ iounmap(ioc->ioc_regs);
+ kfree(ioc);
+ return -ENOMEM;
+ }
hppa_dma_ops = &ccio_ops;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 32f506f00c89..a5d9ec7950de 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -202,9 +202,9 @@ static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 va
static DEFINE_SPINLOCK(iosapic_lock);
-static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
+static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
{
- __raw_writel(data, addr);
+ __raw_writel((__force u32)data, addr);
}
/*
@@ -875,6 +875,7 @@ int iosapic_serial_irq(struct parisc_device *dev)
return vi->txn_irq;
}
+EXPORT_SYMBOL(iosapic_serial_irq);
#endif
diff --git a/drivers/parisc/iosapic_private.h b/drivers/parisc/iosapic_private.h
index 73ecc657ad95..bd8ff40162b4 100644
--- a/drivers/parisc/iosapic_private.h
+++ b/drivers/parisc/iosapic_private.h
@@ -118,8 +118,8 @@ struct iosapic_irt {
struct vector_info {
struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
struct irt_entry *irte; /* IRT entry */
- u32 __iomem *eoi_addr; /* precalculate EOI reg address */
- u32 eoi_data; /* IA64: ? PA: swapped txn_data */
+ __le32 __iomem *eoi_addr; /* precalculate EOI reg address */
+ __le32 eoi_data; /* IA64: ? PA: swapped txn_data */
int txn_irq; /* virtual IRQ number for processor */
ulong txn_addr; /* IA64: id_eid PA: partial HPA */
u32 txn_data; /* CPU interrupt bit */
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 73e37bb877a4..9b33e84e04fc 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -56,8 +56,8 @@
static int led_type __read_mostly = -1;
static unsigned char lastleds; /* LED state from most recent update */
static unsigned int led_heartbeat __read_mostly = 1;
-static unsigned int led_diskio __read_mostly = 1;
-static unsigned int led_lanrxtx __read_mostly = 1;
+static unsigned int led_diskio __read_mostly;
+static unsigned int led_lanrxtx __read_mostly;
static char lcd_text[32] __read_mostly;
static char lcd_text_default[32] __read_mostly;
static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */
@@ -137,6 +137,9 @@ static int start_task(void)
/* Create the work queue and queue the LED task */
led_wq = create_singlethread_workqueue("led_wq");
+ if (!led_wq)
+ return -ENOMEM;
+
queue_delayed_work(led_wq, &led_task, 0);
return 0;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1f17a39eabe8..23aced808242 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -475,7 +475,7 @@ static size_t parport_pc_fifo_write_block_pio(struct parport *port,
const unsigned char *bufp = buf;
size_t left = length;
unsigned long expire = jiffies + port->physport->cad->timeout;
- const int fifo = FIFO(port);
+ const unsigned long fifo = FIFO(port);
int poll_for = 8; /* 80 usecs */
const struct parport_pc_private *priv = port->physport->private_data;
const int fifo_depth = priv->fifo_depth;
@@ -2648,6 +2648,8 @@ enum parport_pc_pci_cards {
netmos_9865,
quatech_sppxp100,
wch_ch382l,
+ brainboxes_uc146,
+ brainboxes_px203,
};
@@ -2711,6 +2713,8 @@ static struct parport_pc_pci {
/* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
/* wch_ch382l */ { 1, { { 2, -1 }, } },
+ /* brainboxes_uc146 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_px203 */ { 1, { { 0, -1 }, } },
};
static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2802,6 +2806,23 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
/* WCH CH382L PCI-E single parallel port card */
{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
+ /* Brainboxes IX-500/550 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x402a,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ /* Brainboxes UC-146/UC-157 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0be1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0be2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
+ /* Brainboxes PX-146/PX-257 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x401c,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ /* Brainboxes PX-203 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4007,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 },
+ /* Brainboxes PX-475 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x401f,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 96b888bb49c6..46f213270689 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -65,6 +65,10 @@ enum parport_pc_pci_cards {
sunix_5069a,
sunix_5079a,
sunix_5099a,
+ brainboxes_uc257,
+ brainboxes_is300,
+ brainboxes_uc414,
+ brainboxes_px263,
};
/* each element directly indexed from enum list, above */
@@ -158,6 +162,10 @@ static struct parport_pc_pci cards[] = {
/* sunix_5069a */ { 1, { { 1, 2 }, } },
/* sunix_5079a */ { 1, { { 1, 2 }, } },
/* sunix_5099a */ { 1, { { 1, 2 }, } },
+ /* brainboxes_uc257 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_is300 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_uc414 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_px263 */ { 1, { { 3, -1 }, } },
};
static struct pci_device_id parport_serial_pci_tbl[] = {
@@ -277,6 +285,38 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
0x0104, 0, 0, sunix_5099a },
+ /* Brainboxes UC-203 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0bc1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0bc2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+ /* Brainboxes UC-257 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0862,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0863,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+ /* Brainboxes UC-414 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0e61,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc414 },
+
+ /* Brainboxes UC-475 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0981,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0982,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+ /* Brainboxes IS-300/IS-500 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0da0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_is300 },
+
+ /* Brainboxes PX-263/PX-295 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x402c,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px263 },
+
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
@@ -542,6 +582,30 @@ static struct pciserial_board pci_parport_serial_boards[] = {
.base_baud = 921600,
.uart_offset = 0x8,
},
+ [brainboxes_uc257] = {
+ .flags = FL_BASE2,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [brainboxes_is300] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [brainboxes_uc414] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [brainboxes_px263] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
};
struct parport_serial_private {
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index a304f5ea11b9..9d259372fbfd 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -52,7 +52,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- def_bool ARC || ARM || ARM64 || X86 || RISCV
+ def_bool ARC || ARM || ARM64 || X86 || RISCV || MICROBLAZE
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 70e078238899..0a5e9712b2a8 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -109,6 +109,14 @@ config PCIE_XILINX
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
+config PCIE_XDMA_PL
+ bool "Xilinx XDMA PL PCIe host bridge support"
+ depends on ARCH_ZYNQMP || MICROBLAZE
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say 'Y' here if you want kernel to enable support the
+ Xilinx XDMA PL PCIe Host Bridge driver.
+
config PCI_XGENE
bool "X-Gene PCIe controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index a2a22c9d91af..2aa3da574dbd 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCIE_XDMA_PL) += pcie-xdma-pl.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index b34b52b364d5..30c259f63239 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1295,6 +1295,13 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
static int __init imx6_pcie_init(void)
{
#ifdef CONFIG_ARM
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ if (!np)
+ return -ENODEV;
+ of_node_put(np);
+
/*
* Since probe() can be deferred we need to make sure that
* hook_fault_code is not called after __init memory is freed
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c8c702c494a2..b18ddb2b9ef8 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -861,8 +861,8 @@ static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
return ks_pcie_handle_error_irq(ks_pcie);
}
-static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
+static int ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
{
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
@@ -992,8 +992,8 @@ static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
.get_features = &ks_pcie_am654_get_features,
};
-static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
+static int ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
{
int ret;
struct dw_pcie_ep *ep;
@@ -1181,7 +1181,7 @@ static const struct of_device_id ks_pcie_of_match[] = {
{ },
};
-static int __init ks_pcie_probe(struct platform_device *pdev)
+static int ks_pcie_probe(struct platform_device *pdev)
{
const struct dw_pcie_host_ops *host_ops;
const struct dw_pcie_ep_ops *ep_ops;
@@ -1305,7 +1305,16 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_link;
}
+ /* Obtain references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
ret = ks_pcie_enable_phy(ks_pcie);
+
+ /* Release references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
if (ret) {
dev_err(dev, "failed to enable phy\n");
goto err_link;
@@ -1407,7 +1416,7 @@ err_link:
return ret;
}
-static int __exit ks_pcie_remove(struct platform_device *pdev)
+static int ks_pcie_remove(struct platform_device *pdev)
{
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
struct device_link **link = ks_pcie->link;
@@ -1423,9 +1432,9 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver ks_pcie_driver __refdata = {
+static struct platform_driver ks_pcie_driver = {
.probe = ks_pcie_probe,
- .remove = __exit_p(ks_pcie_remove),
+ .remove = ks_pcie_remove,
.driver = {
.name = "keystone-pcie",
.of_match_table = of_match_ptr(ks_pcie_of_match),
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index c06b05ab9f78..1cf94854c44f 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -7,6 +7,7 @@
* Author: Vidya Sagar <vidyas@nvidia.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
@@ -321,8 +322,7 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
*/
val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
if (val & PCI_EXP_LNKSTA_LBMS) {
- current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
+ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
if (pcie->init_link_width > current_link_width) {
dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
@@ -345,15 +345,14 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
- u32 val, tmp;
+ u32 val, status_l0, status_l1;
u16 val_w;
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
- if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
-
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
/* SBR & Surprise Link Down WAR */
val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
@@ -369,15 +368,15 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
}
}
- if (val & APPL_INTR_STATUS_L0_INT_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
- if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
APPL_INTR_STATUS_L1_8_0);
apply_bad_link_workaround(pp);
}
- if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
APPL_INTR_STATUS_L1_8_0);
@@ -389,25 +388,24 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
}
}
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+ if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
dev_info(pci->dev, "CDM check complete\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
dev_err(pci->dev, "CDM comparison mismatch\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
dev_err(pci->dev, "CDM Logic error\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
}
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
- dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+ dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
}
return IRQ_HANDLED;
@@ -598,8 +596,7 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
PCI_EXP_LNKSTA);
- pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
+ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
PCI_EXP_LNKCTL);
@@ -775,7 +772,7 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
/* Configure Max lane width from DT */
val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_MLW;
- val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
config_gen3_gen4_eq_presets(pcie);
@@ -856,7 +853,7 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)
offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
val &= ~PCI_DLF_EXCHANGE_ENABLE;
- dw_pcie_writel_dbi(pci, offset, val);
+ dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
tegra_pcie_prepare_host(pp);
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index bf5ece5d9291..88983fd0c1bd 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -458,22 +458,12 @@ static int faraday_pci_probe(struct platform_device *pdev)
p->dev = dev;
/* Retrieve and enable optional clocks */
- clk = devm_clk_get(dev, "PCLK");
+ clk = devm_clk_get_enabled(dev, "PCLK");
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "could not prepare PCLK\n");
- return ret;
- }
- p->bus_clk = devm_clk_get(dev, "PCICLK");
+ p->bus_clk = devm_clk_get_enabled(dev, "PCICLK");
if (IS_ERR(p->bus_clk))
return PTR_ERR(p->bus_clk);
- ret = clk_prepare_enable(p->bus_clk);
- if (ret) {
- dev_err(dev, "could not prepare PCICLK\n");
- return ret;
- }
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
p->base = devm_ioremap_resource(dev, regs);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 3d48fa685aaa..f66f07b8eebd 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2739,6 +2739,24 @@ static int hv_pci_query_relations(struct hv_device *hdev)
if (!ret)
ret = wait_for_response(hdev, &comp);
+ /*
+ * In the case of fast device addition/removal, it's possible that
+ * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
+ * already got a PCI_BUS_RELATIONS* message from the host and the
+ * channel callback already scheduled a work to hbus->wq, which can be
+ * running pci_devices_present_work() -> survey_child_resources() ->
+ * complete(&hbus->survey_event), even after hv_pci_query_relations()
+ * exits and the stack variable 'comp' is no longer valid; as a result,
+ * a hang or a page fault may happen when the complete() calls
+ * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
+ * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
+ * -ENODEV, there can't be any more work item scheduled to hbus->wq
+ * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
+ * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
+ * channel->rescind = true.
+ */
+ flush_workqueue(hbus->wq);
+
return ret;
}
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 99d505a85067..74c0ddd43381 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2267,13 +2267,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base))
- return PTR_ERR(rp->base);
+ if (IS_ERR(rp->base)) {
+ err = PTR_ERR(rp->base);
+ goto err_node_put;
+ }
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
if (!label) {
- dev_err(dev, "failed to create reset GPIO label\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_node_put;
}
/*
@@ -2289,9 +2291,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
rp->reset_gpio = NULL;
} else {
- dev_err(dev, "failed to get reset GPIO: %d\n",
- err);
- return PTR_ERR(rp->reset_gpio);
+ dev_err(dev, "failed to get reset GPIO: %ld\n",
+ PTR_ERR(rp->reset_gpio));
+ err = PTR_ERR(rp->reset_gpio);
+ goto err_node_put;
}
}
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 728a59655825..ff22bca818f3 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -623,14 +623,20 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
if (status & MSI_STATUS){
unsigned long imsi_status;
+ /*
+ * The interrupt status can be cleared even if the
+ * MSI status remains pending. As such, given the
+ * edge-triggered interrupt type, its status should
+ * be cleared before being dispatched to the
+ * handler of the underlying device.
+ */
+ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
virq = irq_find_mapping(port->inner_domain, bit);
generic_handle_irq(virq);
}
}
- /* Clear MSI interrupt status */
- writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
}
}
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index b82edefffd15..d7ab669b1b0d 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -124,6 +124,7 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
struct pci_epf_header *hdr)
{
+ u32 reg;
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
@@ -136,8 +137,9 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
PCIE_CORE_CONFIG_VENDOR);
}
- rockchip_pcie_write(rockchip, hdr->deviceid << 16,
- ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
+ reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
+ reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
+ rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
rockchip_pcie_write(rockchip,
hdr->revid |
@@ -311,15 +313,15 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
- u16 flags;
+ u32 flags;
flags = rockchip_pcie_read(rockchip,
ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
flags |=
- ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
- PCI_MSI_FLAGS_64BIT;
+ (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
rockchip_pcie_write(rockchip, flags,
ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
@@ -331,7 +333,7 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
- u16 flags;
+ u32 flags;
flags = rockchip_pcie_read(rockchip,
ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
@@ -344,48 +346,25 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
}
static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
- u8 intx, bool is_asserted)
+ u8 intx, bool do_assert)
{
struct rockchip_pcie *rockchip = &ep->rockchip;
- u32 r = ep->max_regions - 1;
- u32 offset;
- u32 status;
- u8 msg_code;
-
- if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
- ep->irq_pci_fn != fn)) {
- rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
- AXI_WRAPPER_NOR_MSG,
- ep->irq_phys_addr, 0, 0);
- ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
- ep->irq_pci_fn = fn;
- }
intx &= 3;
- if (is_asserted) {
+
+ if (do_assert) {
ep->irq_pending |= BIT(intx);
- msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_INT_IN_ASSERT |
+ PCIE_CLIENT_INT_PEND_ST_PEND,
+ PCIE_CLIENT_LEGACY_INT_CTRL);
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_INT_IN_DEASSERT |
+ PCIE_CLIENT_INT_PEND_ST_NORMAL,
+ PCIE_CLIENT_LEGACY_INT_CTRL);
}
-
- status = rockchip_pcie_read(rockchip,
- ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
- ROCKCHIP_PCIE_EP_CMD_STATUS);
- status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
-
- if ((status != 0) ^ (ep->irq_pending != 0)) {
- status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
- rockchip_pcie_write(rockchip, status,
- ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
- ROCKCHIP_PCIE_EP_CMD_STATUS);
- }
-
- offset =
- ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
- ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
- writel(0, ep->irq_cpu_addr + offset);
}
static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
@@ -415,7 +394,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
u8 interrupt_num)
{
struct rockchip_pcie *rockchip = &ep->rockchip;
- u16 flags, mme, data, data_mask;
+ u32 flags, mme, data, data_mask;
u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
@@ -505,6 +484,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = false,
+ .align = 256,
};
static const struct pci_epc_features*
@@ -630,6 +610,9 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
return 0;
err_epc_mem_exit:
pci_epc_mem_exit(epc);
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index c53d1322a3d6..b047437605cb 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -154,6 +155,12 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
}
EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
+#define rockchip_pcie_read_addr(addr) rockchip_pcie_read(rockchip, addr)
+/* 100 ms max wait time for PHY PLLs to lock */
+#define RK_PHY_PLL_LOCK_TIMEOUT_US 100000
+/* Sleep should be less than 20ms */
+#define RK_PHY_PLL_LOCK_SLEEP_US 1000
+
int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
@@ -255,6 +262,16 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
+ err = readx_poll_timeout(rockchip_pcie_read_addr,
+ PCIE_CLIENT_SIDE_BAND_STATUS,
+ regs, !(regs & PCIE_CLIENT_PHY_ST),
+ RK_PHY_PLL_LOCK_SLEEP_US,
+ RK_PHY_PLL_LOCK_TIMEOUT_US);
+ if (err) {
+ dev_err(dev, "PHY PLLs could not lock, %d\n", err);
+ goto err_power_off_phy;
+ }
+
/*
* Please don't reorder the deassert sequence of the following
* four reset pins.
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 8e87a059ce73..1c45b3c32151 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -37,6 +37,13 @@
#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_LEGACY_INT_CTRL (PCIE_CLIENT_BASE + 0x0c)
+#define PCIE_CLIENT_INT_IN_ASSERT HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_INT_IN_DEASSERT HIWORD_UPDATE(0x0002, 0)
+#define PCIE_CLIENT_INT_PEND_ST_PEND HIWORD_UPDATE_BIT(0x0001)
+#define PCIE_CLIENT_INT_PEND_ST_NORMAL HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_SIDE_BAND_STATUS (PCIE_CLIENT_BASE + 0x20)
+#define PCIE_CLIENT_PHY_ST BIT(12)
#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
@@ -132,6 +139,8 @@
#define PCIE_RC_RP_ATS_BASE 0x400000
#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
#define PCIE_RC_CONFIG_BASE 0xa00000
+#define PCIE_EP_CONFIG_BASE 0xa00000
+#define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
#define PCIE_RC_CONFIG_SCC_SHIFT 16
#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
@@ -223,6 +232,7 @@
#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4
#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19)
#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90
+#define ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET 16
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17)
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20
@@ -230,7 +240,6 @@
#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16)
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24)
#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
-#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
(PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
diff --git a/drivers/pci/controller/pcie-xdma-pl.c b/drivers/pci/controller/pcie-xdma-pl.c
new file mode 100644
index 000000000000..42f37913490d
--- /dev/null
+++ b/drivers/pci/controller/pcie-xdma-pl.c
@@ -0,0 +1,892 @@
+/*
+ * PCIe host controller driver for Xilinx XDMA PCIe Bridge
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "../pci.h"
+#include <linux/irqchip/chained_irq.h>
+
+/* Register definitions */
+#define XILINX_PCIE_REG_VSEC 0x0000012c
+#define XILINX_PCIE_REG_BIR 0x00000130
+#define XILINX_PCIE_REG_IDR 0x00000138
+#define XILINX_PCIE_REG_IMR 0x0000013c
+#define XILINX_PCIE_REG_PSCR 0x00000144
+#define XILINX_PCIE_REG_RPSC 0x00000148
+#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_REG_RPEFR 0x00000154
+#define XILINX_PCIE_REG_RPIFR1 0x00000158
+#define XILINX_PCIE_REG_RPIFR2 0x0000015c
+#define XILINX_PCIE_REG_IDRN 0x00000160
+#define XILINX_PCIE_REG_IDRN_MASK 0x00000164
+#define XILINX_PCIE_REG_MSI_LOW 0x00000170
+#define XILINX_PCIE_REG_MSI_HI 0x00000174
+#define XILINX_PCIE_REG_MSI_LOW_MASK 0x00000178
+#define XILINX_PCIE_REG_MSI_HI_MASK 0x0000017c
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
+#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
+#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
+#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
+#define XILINX_PCIE_INTR_NONFATAL BIT(10)
+#define XILINX_PCIE_INTR_FATAL BIT(11)
+#define XILINX_PCIE_INTR_INTX BIT(16)
+#define XILINX_PCIE_INTR_MSI BIT(17)
+#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
+#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
+#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
+#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
+#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
+#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
+#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
+#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
+#define XILINX_PCIE_IMR_ALL_MASK 0x0FF30FE9
+#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_IDRN_MASK GENMASK(19, 16)
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Interrupt FIFO Read Register 1 definitions */
+#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
+#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
+#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
+#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
+#define XILINX_PCIE_IDRN_SHIFT 16
+#define XILINX_PCIE_VSEC_REV_MASK GENMASK(19, 16)
+#define XILINX_PCIE_VSEC_REV_SHIFT 16
+#define XILINX_PCIE_FIFO_SHIFT 5
+
+/* Bridge Info Register definitions */
+#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
+#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
+
+/* Root Port Interrupt FIFO Read Register 2 definitions */
+#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
+
+/* ECAM definitions */
+#define ECAM_BUS_NUM_SHIFT 20
+#define ECAM_DEV_NUM_SHIFT 12
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 64
+#define INTX_NUM 4
+
+/* For CPM Versal */
+#define CPM_BRIDGE_BASE_OFF 0xCD8
+#define XILINX_PCIE_MISC_IR_STATUS 0x00000340
+#define XILINX_PCIE_MISC_IR_ENABLE 0x00000348
+#define XILINX_PCIE_MISC_IR_LOCAL BIT(1)
+
+/* CPM versal Interrupt registers */
+#define XILINX_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
+#define XILINX_PCIE_INTR_CFG_ERR_POISON BIT(12)
+#define XILINX_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
+#define XILINX_PCIE_INTR_PM_PME_RCVD BIT(17)
+#define XILINX_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
+
+#define XILINX_PCIE_IMR_ALL_MASK_CPM 0x1FF39FF9
+
+enum msi_mode {
+ MSI_DECD_MODE = 1,
+ MSI_FIFO_MODE,
+};
+
+struct xilinx_msi {
+ struct irq_domain *msi_domain;
+ unsigned long *bitmap;
+ struct irq_domain *dev_domain;
+ struct mutex lock; /* protect bitmap variable */
+ unsigned long msi_pages;
+ int irq_msi0;
+ int irq_msi1;
+};
+
+/**
+ * struct xilinx_pcie_port - PCIe port information
+ * @reg_base: IO Mapped Register Base
+ * @cpm_base: CPM SLCR Register Base
+ * @irq: Interrupt number
+ * @root_busno: Root Bus number
+ * @dev: Device pointer
+ * @leg_domain: Legacy IRQ domain pointer
+ * @resources: Bus Resources
+ * @msi: MSI information
+ * @irq_misc: Legacy and error interrupt number
+ * @msi_mode: MSI mode
+ */
+struct xilinx_pcie_port {
+ void __iomem *reg_base;
+ void __iomem *cpm_base;
+ u32 irq;
+ u8 root_busno;
+ struct device *dev;
+ struct irq_domain *leg_domain;
+ struct list_head resources;
+ struct xilinx_msi msi;
+ int irq_misc;
+ u8 msi_mode;
+};
+
+static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+{
+ if (!port->cpm_base)
+ return readl(port->reg_base + reg);
+ else
+ return readl(port->reg_base + reg + CPM_BRIDGE_BASE_OFF);
+}
+
+static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+{
+ if (!port->cpm_base)
+ writel(val, port->reg_base + reg);
+ else
+ writel(val, port->reg_base + reg + CPM_BRIDGE_BASE_OFF);
+}
+
+static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+{
+ return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+ XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+{
+ unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+
+ if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_PCIE_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+ XILINX_PCIE_REG_RPEFR);
+ }
+}
+
+/**
+ * xilinx_pcie_valid_device - Check if a valid device is present on bus
+ * @bus: PCI Bus structure
+ * @devfn: device/function
+ *
+ * Return: 'true' on success and 'false' if invalid device is found
+ */
+static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct xilinx_pcie_port *port = bus->sysdata;
+
+ /* Check if link is up when trying to access downstream ports */
+ if (bus->number != port->root_busno)
+ if (!xilinx_pcie_link_is_up(port))
+ return false;
+
+ /* Only one device down on each root port */
+ if (bus->number == port->root_busno && devfn > 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * xilinx_pcie_map_bus - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct xilinx_pcie_port *port = bus->sysdata;
+ int relbus;
+
+ if (!xilinx_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
+ (devfn << ECAM_DEV_NUM_SHIFT);
+
+ return port->reg_base + relbus + where;
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_pcie_ops = {
+ .map_bus = xilinx_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/**
+ * xilinx_pcie_enable_msi - Enable MSI support
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+{
+ struct xilinx_msi *msi = &port->msi;
+ phys_addr_t msg_addr;
+
+ msi->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+ msg_addr = virt_to_phys((void *)msi->msi_pages);
+ pcie_write(port, upper_32_bits(msg_addr), XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(port, lower_32_bits(msg_addr), XILINX_PCIE_REG_MSIBASE2);
+}
+
+/**
+ * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+static void xilinx_pcie_handle_msi_irq(struct xilinx_pcie_port *port,
+ u32 status_reg)
+{
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, status_reg)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, status_reg);
+ if (status_reg == XILINX_PCIE_REG_MSI_HI)
+ bit = bit + 32;
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void xilinx_pcie_msi_handler_high(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xilinx_pcie_port *port = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ xilinx_pcie_handle_msi_irq(port, XILINX_PCIE_REG_MSI_HI);
+ chained_irq_exit(chip, desc);
+}
+
+static void xilinx_pcie_msi_handler_low(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xilinx_pcie_port *port = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ xilinx_pcie_handle_msi_irq(port, XILINX_PCIE_REG_MSI_LOW);
+ chained_irq_exit(chip, desc);
+}
+
+/**
+ * xilinx_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
+{
+ struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+ u32 val, mask, status, msi_data, bit;
+ unsigned long intr_val;
+
+ /* Read interrupt decode and mask registers */
+ val = pcie_read(port, XILINX_PCIE_REG_IDR);
+ mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+
+ status = val & mask;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_PCIE_INTR_LINK_DOWN)
+ dev_warn(port->dev, "Link Down\n");
+
+ if (status & XILINX_PCIE_INTR_HOT_RESET)
+ dev_info(port->dev, "Hot reset\n");
+
+ if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
+ dev_warn(port->dev, "ECAM access timeout\n");
+
+ if (status & XILINX_PCIE_INTR_CORRECTABLE) {
+ dev_warn(port->dev, "Correctable error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_NONFATAL) {
+ dev_warn(port->dev, "Non fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_FATAL) {
+ dev_warn(port->dev, "Fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_INTX) {
+ /* Handle INTx Interrupt */
+ intr_val = pcie_read(port, XILINX_PCIE_REG_IDRN);
+ intr_val = intr_val >> XILINX_PCIE_IDRN_SHIFT;
+
+ for_each_set_bit(bit, &intr_val, INTX_NUM)
+ generic_handle_irq(irq_find_mapping(port->leg_domain,
+ bit));
+ }
+
+ if (port->msi_mode == MSI_FIFO_MODE &&
+ (status & XILINX_PCIE_INTR_MSI) && (!port->cpm_base)) {
+ /* MSI Interrupt */
+ val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+ dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ goto error;
+ }
+
+ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
+ msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+ XILINX_PCIE_RPIFR2_MSG_DATA;
+
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ /* Handle MSI Interrupt */
+ val = irq_find_mapping(port->msi.dev_domain,
+ msi_data);
+ if (val)
+ generic_handle_irq(val);
+ }
+ }
+ }
+
+ if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
+ dev_warn(port->dev, "Slave unsupported request\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_UNEXP)
+ dev_warn(port->dev, "Slave unexpected completion\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_COMPL)
+ dev_warn(port->dev, "Slave completion timeout\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ERRP)
+ dev_warn(port->dev, "Slave Error Poison\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_CMPABT)
+ dev_warn(port->dev, "Slave Completer Abort\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
+ dev_warn(port->dev, "Slave Illegal Burst\n");
+
+ if (status & XILINX_PCIE_INTR_MST_DECERR)
+ dev_warn(port->dev, "Master decode error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_SLVERR)
+ dev_warn(port->dev, "Master slave error\n");
+
+ if (port->cpm_base) {
+ if (status & XILINX_PCIE_INTR_CFG_PCIE_TIMEOUT)
+ dev_warn(port->dev, "PCIe ECAM access timeout\n");
+
+ if (status & XILINX_PCIE_INTR_CFG_ERR_POISON)
+ dev_warn(port->dev, "ECAM poisoned completion received\n");
+
+ if (status & XILINX_PCIE_INTR_PME_TO_ACK_RCVD)
+ dev_warn(port->dev, "PME_TO_ACK message received\n");
+
+ if (status & XILINX_PCIE_INTR_PM_PME_RCVD)
+ dev_warn(port->dev, "PM_PME message received\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_PCIE_TIMEOUT)
+ dev_warn(port->dev, "PCIe completion timeout received\n");
+ }
+
+error:
+ /* Clear the Interrupt Decode register */
+ pcie_write(port, status, XILINX_PCIE_REG_IDR);
+ if (port->cpm_base) {
+ val = readl(port->cpm_base + XILINX_PCIE_MISC_IR_STATUS);
+ if (val)
+ writel(val,
+ port->cpm_base + XILINX_PCIE_MISC_IR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip xilinx_msi_irq_chip = {
+ .name = "xilinx_pcie:msi",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info xilinx_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &xilinx_msi_irq_chip,
+};
+
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+ phys_addr_t msi_addr;
+
+ msi_addr = virt_to_phys((void *)msi->msi_pages);
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static int xilinx_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip xilinx_irq_chip = {
+ .name = "Xilinx MSI",
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+ .irq_set_affinity = xilinx_msi_set_affinity,
+};
+
+static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct xilinx_pcie_port *pcie = domain->host_data;
+ struct xilinx_msi *msi = &pcie->msi;
+ int bit;
+ int i;
+
+ mutex_lock(&msi->lock);
+ bit = bitmap_find_free_region(msi->bitmap, XILINX_NUM_MSI_IRQS,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &xilinx_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+ mutex_unlock(&msi->lock);
+ return 0;
+}
+
+static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+ .alloc = xilinx_irq_domain_alloc,
+ .free = xilinx_irq_domain_free,
+};
+
+static int xilinx_pcie_init_msi_irq_domain(struct xilinx_pcie_port *port)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
+ struct xilinx_msi *msi = &port->msi;
+ int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
+
+ msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
+ &dev_msi_domain_ops, port);
+ if (!msi->dev_domain) {
+ dev_err(port->dev, "failed to create dev IRQ domain\n");
+ return -ENOMEM;
+ }
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &xilinx_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(port->dev, "failed to create msi IRQ domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ mutex_init(&msi->lock);
+ msi->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!msi->bitmap)
+ return -ENOMEM;
+
+ xilinx_pcie_enable_msi(port);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return PTR_ERR(pcie_intc_node);
+ }
+
+ port->leg_domain = irq_domain_add_linear(pcie_intc_node, INTX_NUM,
+ &intx_domain_ops,
+ port);
+ if (!port->leg_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return PTR_ERR(port->leg_domain);
+ }
+
+ xilinx_pcie_init_msi_irq_domain(port);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+{
+ if (xilinx_pcie_link_is_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+ XILINX_PCIE_IMR_ALL_MASK,
+ XILINX_PCIE_REG_IDR);
+
+ /* Enable all interrupts */
+ if (!port->cpm_base)
+ pcie_write(port, XILINX_PCIE_IMR_ALL_MASK,
+ XILINX_PCIE_REG_IMR);
+ pcie_write(port, XILINX_PCIE_IDRN_MASK, XILINX_PCIE_REG_IDRN_MASK);
+ if (port->msi_mode == MSI_DECD_MODE) {
+ pcie_write(port, XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_MSI_LOW_MASK);
+ pcie_write(port, XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_MSI_HI_MASK);
+ }
+ /* Enable the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+ XILINX_PCIE_REG_RPSC_BEN,
+ XILINX_PCIE_REG_RPSC);
+
+ if (port->cpm_base) {
+ writel(XILINX_PCIE_MISC_IR_LOCAL,
+ port->cpm_base + XILINX_PCIE_MISC_IR_ENABLE);
+ pcie_write(port, XILINX_PCIE_IMR_ALL_MASK_CPM,
+ XILINX_PCIE_REG_IMR);
+ }
+}
+
+static int xilinx_request_misc_irq(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int err;
+
+ port->irq_misc = platform_get_irq_byname(pdev, "misc");
+ if (port->irq_misc <= 0) {
+ dev_err(dev, "Unable to find misc IRQ line\n");
+ return port->irq_misc;
+ }
+ err = devm_request_irq(dev, port->irq_misc,
+ xilinx_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request misc IRQ line %d\n",
+ port->irq_misc);
+ return err;
+ }
+
+ return 0;
+}
+
+static int xilinx_request_msi_irq(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+ if (port->msi.irq_msi0 <= 0) {
+ dev_err(dev, "Unable to find msi0 IRQ line\n");
+ return port->msi.irq_msi0;
+ }
+
+ irq_set_chained_handler_and_data(port->msi.irq_msi0,
+ xilinx_pcie_msi_handler_low,
+ port);
+
+ port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+ if (port->msi.irq_msi1 <= 0) {
+ dev_err(dev, "Unable to find msi1 IRQ line\n");
+ return port->msi.irq_msi1;
+ }
+
+ irq_set_chained_handler_and_data(port->msi.irq_msi1,
+ xilinx_pcie_msi_handler_high,
+ port);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct resource regs;
+ const char *type;
+ int err, mode_val, val;
+
+ if (of_device_is_compatible(node, "xlnx,xdma-host-3.00")) {
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ err = of_address_to_resource(node, 0, &regs);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ port->reg_base = devm_ioremap_resource(dev, &regs);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+
+ val = pcie_read(port, XILINX_PCIE_REG_BIR);
+ val = (val >> XILINX_PCIE_FIFO_SHIFT) & MSI_DECD_MODE;
+ mode_val = pcie_read(port, XILINX_PCIE_REG_VSEC) &
+ XILINX_PCIE_VSEC_REV_MASK;
+ mode_val = mode_val >> XILINX_PCIE_VSEC_REV_SHIFT;
+ if (mode_val && !val) {
+ port->msi_mode = MSI_DECD_MODE;
+ dev_info(dev, "Using MSI Decode mode\n");
+ } else {
+ port->msi_mode = MSI_FIFO_MODE;
+ dev_info(dev, "Using MSI FIFO mode\n");
+ }
+
+ if (port->msi_mode == MSI_DECD_MODE) {
+ err = xilinx_request_misc_irq(port);
+ if (err)
+ return err;
+
+ err = xilinx_request_msi_irq(port);
+ if (err)
+ return err;
+
+ } else if (port->msi_mode == MSI_FIFO_MODE) {
+ port->irq = irq_of_parse_and_map(node, 0);
+ if (!port->irq) {
+ dev_err(dev, "Unable to find IRQ line\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, port->irq,
+ xilinx_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request irq %d\n",
+ port->irq);
+ return err;
+ }
+ }
+ } else if (of_device_is_compatible(node, "xlnx,versal-cpm-host-1.00")) {
+ struct resource *res;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cpm_slcr");
+ port->cpm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->cpm_base))
+ return PTR_ERR(port->cpm_base);
+
+ err = xilinx_request_misc_irq(port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_probe(struct platform_device *pdev)
+{
+ struct xilinx_pcie_port *port;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge;
+ int err;
+ resource_size_t iobase = 0;
+ LIST_HEAD(res);
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
+ err = xilinx_pcie_parse_dt(port);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pcie_init_port(port);
+
+ err = xilinx_pcie_init_irq_domain(port);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
+ if (err) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return err;
+ }
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto error;
+
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = port;
+ bridge->busnr = port->root_busno;
+ bridge->ops = &xilinx_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err)
+ goto error;
+
+ bus = bridge->bus;
+
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+ return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return err;
+}
+
+static const struct of_device_id xilinx_pcie_of_match[] = {
+ { .compatible = "xlnx,xdma-host-3.00", },
+ { .compatible = "xlnx,versal-cpm-host-1.00", },
+ {}
+};
+
+static struct platform_driver xilinx_pcie_driver = {
+ .driver = {
+ .name = "xilinx-pcie",
+ .of_match_table = xilinx_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 11b046b20b92..b51ce573c963 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -38,6 +38,11 @@
#define E_ECAM_CONTROL 0x00000228
#define E_ECAM_BASE_LO 0x00000230
#define E_ECAM_BASE_HI 0x00000234
+#define E_DREG_CTRL 0x00000288
+#define E_DREG_BASE_LO 0x00000290
+
+#define DREG_DMA_EN BIT(0)
+#define DREG_DMA_BASE_LO 0xFD0F0000
/* Ingress - address translations */
#define I_MSII_CAPABILITIES 0x00000300
@@ -56,6 +61,10 @@
#define MSGF_MSI_STATUS_HI 0x00000444
#define MSGF_MSI_MASK_LO 0x00000448
#define MSGF_MSI_MASK_HI 0x0000044C
+/* Root DMA Interrupt register */
+#define MSGF_DMA_MASK 0x00000464
+
+#define MSGF_INTR_EN BIT(0)
/* Msg filter mask bits */
#define CFG_ENABLE_PM_MSG_FWD BIT(1)
@@ -493,7 +502,7 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
- domain->host_data, handle_simple_irq,
+ domain->host_data, handle_simple_irq,
NULL, NULL);
}
mutex_unlock(&msi->lock);
@@ -501,7 +510,7 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+ unsigned int nr_irqs)
{
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -755,7 +764,6 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
/* Enable all misc interrupts */
nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
-
/* Disable all legacy interrupts */
nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
@@ -763,6 +771,12 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
+ /* Enabling DREG translations */
+ nwl_bridge_writel(pcie, DREG_DMA_EN, E_DREG_CTRL);
+ nwl_bridge_writel(pcie, DREG_DMA_BASE_LO, E_DREG_BASE_LO);
+ /* Enabling Root DMA interrupts */
+ nwl_bridge_writel(pcie, MSGF_INTR_EN, MSGF_DMA_MASK);
+
/* Enable all legacy interrupts */
nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
@@ -844,12 +858,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
pcie->clk = devm_clk_get(dev, NULL);
if (IS_ERR(pcie->clk))
return PTR_ERR(pcie->clk);
-
- err = clk_prepare_enable(pcie->clk);
- if (err) {
- dev_err(dev, "can't enable PCIe ref clock\n");
- return err;
- }
+ clk_prepare_enable(pcie->clk);
err = nwl_pcie_bridge_init(pcie);
if (err) {
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index aa61d4c219d7..79a713f5dbf4 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,6 +72,8 @@ extern int pciehp_poll_time;
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
+ * @depth: Number of additional hotplug ports in the path to the root bus,
+ * used as lock subclass for @reset_lock
* @ist_running: flag to keep user request waiting while IRQ thread is running
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
@@ -102,6 +104,7 @@ struct controller {
struct hotplug_slot hotplug_slot; /* hotplug core interface */
struct rw_semaphore reset_lock;
+ unsigned int depth;
unsigned int ist_running;
int request_result;
wait_queue_head_t requester;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 312cc45c44c7..f5255045b149 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -165,7 +165,7 @@ static void pciehp_check_presence(struct controller *ctrl)
{
int occupied;
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
mutex_lock(&ctrl->state_lock);
occupied = pciehp_card_present_or_link_active(ctrl);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 6503d15effbb..45d0f6370715 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -258,6 +258,14 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
present = pciehp_card_present(ctrl);
link_active = pciehp_check_link_active(ctrl);
if (present <= 0 && link_active <= 0) {
+ if (ctrl->state == BLINKINGON_STATE) {
+ ctrl->state = OFF_STATE;
+ cancel_delayed_work(&ctrl->button_work);
+ pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
+ INDICATOR_NOOP);
+ ctrl_info(ctrl, "Slot(%s): Card not present\n",
+ slot_name(ctrl));
+ }
mutex_unlock(&ctrl->state_lock);
return;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 13f3bc239c66..bdbe01d4d9e9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -293,17 +293,11 @@ int pciehp_check_link_status(struct controller *ctrl)
static int __pciehp_link_set(struct controller *ctrl, bool enable)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 lnk_ctrl;
- pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_LD,
+ enable ? 0 : PCI_EXP_LNKCTL_LD);
- if (enable)
- lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
- else
- lnk_ctrl |= PCI_EXP_LNKCTL_LD;
-
- pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
- ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
return 0;
}
@@ -674,7 +668,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
* Disable requests have higher priority than Presence Detect Changed
* or Data Link Layer State Changed events.
*/
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
if (events & DISABLE_SLOT)
pciehp_handle_disable_request(ctrl);
else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
@@ -808,7 +802,7 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
if (probe)
return 0;
- down_write(&ctrl->reset_lock);
+ down_write_nested(&ctrl->reset_lock, ctrl->depth);
if (!ATTN_BUTTN(ctrl)) {
ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
@@ -864,6 +858,20 @@ static inline void dbg_ctrl(struct controller *ctrl)
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
+static inline int pcie_hotplug_depth(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ int depth = 0;
+
+ while (bus->parent) {
+ bus = bus->parent;
+ if (bus->self && bus->self->is_hotplug_bridge)
+ depth++;
+ }
+
+ return depth;
+}
+
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
@@ -877,6 +885,7 @@ struct controller *pcie_init(struct pcie_device *dev)
return NULL;
ctrl->pcie = dev;
+ ctrl->depth = pcie_hotplug_depth(dev->port);
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
if (pdev->hotplug_user_indicators)
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index d17f3bf36f70..ad12515a4a12 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -63,7 +63,14 @@ int pciehp_configure_device(struct controller *ctrl)
pci_assign_unassigned_bridge_resources(bridge);
pcie_bus_configure_settings(parent);
+
+ /*
+ * Release reset_lock during driver binding
+ * to avoid AB-BA deadlock with device_lock.
+ */
+ up_read(&ctrl->reset_lock);
pci_bus_add_devices(parent);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
out:
pci_unlock_rescan_remove();
@@ -104,7 +111,15 @@ void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
bus_list) {
pci_dev_get(dev);
+
+ /*
+ * Release reset_lock during driver unbinding
+ * to avoid AB-BA deadlock with device_lock.
+ */
+ up_read(&ctrl->reset_lock);
pci_stop_and_remove_bus_device(dev);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
+
/*
* Ensure that no new Requests will be generated from
* the device.
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index a1de501a2729..3f6a5d520259 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -94,6 +94,8 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
va_start(ap, fmt);
devname = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
+ if (!devname)
+ return -ENOMEM;
ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
irqflags, devname, dev_id);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 715c85d4e688..1701d3de24da 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1428,7 +1428,7 @@ irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
{
return (irq_hw_number_t)desc->msi_attrib.entry_nr |
pci_dev_id(dev) << 11 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+ ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
}
static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
@@ -1603,8 +1603,8 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
- rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) :
- iort_msi_map_rid(&pdev->dev, rid);
+ rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
+ iort_msi_map_id(&pdev->dev, rid);
return rid;
}
@@ -1624,9 +1624,10 @@ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
- dom = of_msi_map_get_device_domain(&pdev->dev, rid);
+ dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
if (!dom)
- dom = iort_get_device_domain(&pdev->dev, rid);
+ dom = iort_get_device_domain(&pdev->dev, rid,
+ DOMAIN_BUS_PCI_MSI);
return dom;
}
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e30c2a78a88f..86dc5ae17c6d 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -909,7 +909,7 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
{
int acpi_state, d_max;
- if (pdev->no_d3cold)
+ if (pdev->no_d3cold || !pdev->d3cold_allowed)
d_max = ACPI_STATE_D3_HOT;
else
d_max = ACPI_STATE_D3_COLD;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 5ea612a15550..70c8584b3ffc 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -946,7 +946,7 @@ static int pci_pm_resume_noirq(struct device *dev)
pcie_pme_root_status_cleanup(pci_dev);
if (!skip_bus_pm && prev_state == PCI_D3cold)
- pci_bridge_wait_for_secondary_bus(pci_dev);
+ pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT);
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
@@ -1355,7 +1355,7 @@ static int pci_pm_runtime_resume(struct device *dev)
pci_fixup_device(pci_fixup_resume, pci_dev);
if (prev_state == PCI_D3cold)
- pci_bridge_wait_for_secondary_bus(pci_dev);
+ pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT);
if (pm && pm->runtime_resume)
rc = pm->runtime_resume(dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e401f040f157..90d5a29a6ff3 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -517,10 +517,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
return -EINVAL;
pdev->d3cold_allowed = !!val;
- if (pdev->d3cold_allowed)
- pci_d3cold_enable(pdev);
- else
- pci_d3cold_disable(pdev);
+ pci_bridge_d3_update(pdev);
pm_runtime_resume(dev);
@@ -1157,11 +1154,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
sysfs_bin_attr_init(res_attr);
if (write_combine) {
- pdev->res_attr_wc[num] = res_attr;
sprintf(res_attr_name, "resource%d_wc", num);
res_attr->mmap = pci_mmap_resource_wc;
} else {
- pdev->res_attr[num] = res_attr;
sprintf(res_attr_name, "resource%d", num);
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
res_attr->read = pci_read_resource_io;
@@ -1177,10 +1172,17 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
res_attr->size = pci_resource_len(pdev, num);
res_attr->private = (void *)(unsigned long)num;
retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
- if (retval)
+ if (retval) {
kfree(res_attr);
+ return retval;
+ }
- return retval;
+ if (write_combine)
+ pdev->res_attr_wc[num] = res_attr;
+ else
+ pdev->res_attr[num] = res_attr;
+
+ return 0;
}
/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ec741f92246d..64c89b23e99f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2617,13 +2617,13 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
{
/*
* Downstream device is not accessible after putting a root port
- * into D3cold and back into D0 on Elo i2.
+ * into D3cold and back into D0 on Elo Continental Z2 board
*/
- .ident = "Elo i2",
+ .ident = "Elo Continental Z2",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
+ DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
},
},
#endif
@@ -4483,7 +4483,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
return -ENOTTY;
}
- if (delay > 1000)
+ if (delay > PCI_RESET_WAIT)
pci_info(dev, "not ready %dms after %s; waiting\n",
delay - 1, reset_type);
@@ -4492,7 +4492,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
pci_read_config_dword(dev, PCI_COMMAND, &id);
}
- if (delay > 1000)
+ if (delay > PCI_RESET_WAIT)
pci_info(dev, "ready %dms after %s\n", delay - 1,
reset_type);
@@ -4727,24 +4727,31 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
/**
* pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
* @dev: PCI bridge
+ * @reset_type: reset type in human-readable form
+ * @timeout: maximum time to wait for devices on secondary bus (milliseconds)
*
* Handle necessary delays before access to the devices on the secondary
- * side of the bridge are permitted after D3cold to D0 transition.
+ * side of the bridge are permitted after D3cold to D0 transition
+ * or Conventional Reset.
*
* For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
* conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
* 4.3.2.
+ *
+ * Return 0 on success or -ENOTTY if the first device on the secondary bus
+ * failed to become accessible.
*/
-void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
+ int timeout)
{
struct pci_dev *child;
int delay;
if (pci_dev_is_disconnected(dev))
- return;
+ return 0;
- if (!pci_is_bridge(dev) || !dev->bridge_d3)
- return;
+ if (!pci_is_bridge(dev))
+ return 0;
down_read(&pci_bus_sem);
@@ -4756,14 +4763,14 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
*/
if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
up_read(&pci_bus_sem);
- return;
+ return 0;
}
/* Take d3cold_delay requirements into account */
delay = pci_bus_max_d3cold_delay(dev->subordinate);
if (!delay) {
up_read(&pci_bus_sem);
- return;
+ return 0;
}
child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
@@ -4772,14 +4779,12 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
/*
* Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
- * accessing the device after reset (that is 1000 ms + 100 ms). In
- * practice this should not be needed because we don't do power
- * management for them (see pci_bridge_d3_possible()).
+ * accessing the device after reset (that is 1000 ms + 100 ms).
*/
if (!pci_is_pcie(dev)) {
pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
msleep(1000 + delay);
- return;
+ return 0;
}
/*
@@ -4796,11 +4801,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
* configuration requests if we only wait for 100 ms (see
* https://bugzilla.kernel.org/show_bug.cgi?id=203885).
*
- * Therefore we wait for 100 ms and check for the device presence.
- * If it is still not present give it an additional 100 ms.
+ * Therefore we wait for 100 ms and check for the device presence
+ * until the timeout expires.
*/
if (!pcie_downstream_port(dev))
- return;
+ return 0;
if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
@@ -4810,14 +4815,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
- return;
+ return -ENOTTY;
}
}
- if (!pci_device_is_present(child)) {
- pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
- msleep(delay);
- }
+ return pci_dev_wait(child, reset_type, timeout - delay);
}
void pci_reset_secondary_bus(struct pci_dev *dev)
@@ -4836,15 +4838,6 @@ void pci_reset_secondary_bus(struct pci_dev *dev)
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
-
- /*
- * Trhfa for conventional PCI is 2^25 clock cycles.
- * Assuming a minimum 33MHz clock this results in a 1s
- * delay before we can consider subordinate devices to
- * be re-initialized. PCIe has some ways to shorten this,
- * but we don't make use of them yet.
- */
- ssleep(1);
}
void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
@@ -4863,7 +4856,8 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
pcibios_reset_secondary_bus(dev);
- return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
+ return pci_bridge_wait_for_secondary_bus(dev, "bus reset",
+ PCIE_RESET_READY_POLL_MS);
}
EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
@@ -6093,6 +6087,8 @@ bool pci_device_is_present(struct pci_dev *pdev)
{
u32 v;
+ /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
+ pdev = pci_physfn(pdev);
if (pci_dev_is_disconnected(pdev))
return false;
return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 572c2f0a2f0c..725d2b0d4569 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -47,6 +47,13 @@ int pci_bus_error_reset(struct pci_dev *dev);
#define PCI_PM_D3COLD_WAIT 100
#define PCI_PM_BUS_WAIT 50
+/*
+ * Following exit from Conventional Reset, devices must be ready within 1 sec
+ * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional
+ * Reset (PCIe r6.0 sec 5.8).
+ */
+#define PCI_RESET_WAIT 1000 /* msec */
+
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
*
@@ -107,7 +114,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
-void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
+int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
+ int timeout);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -346,53 +354,36 @@ struct pci_sriov {
* @dev - pci device to set new error_state
* @new - the state we want dev to be in
*
- * Must be called with device_lock held.
+ * If the device is experiencing perm_failure, it has to remain in that state.
+ * Any other transition is allowed.
*
* Returns true if state has been changed to the requested state.
*/
static inline bool pci_dev_set_io_state(struct pci_dev *dev,
pci_channel_state_t new)
{
- bool changed = false;
+ pci_channel_state_t old;
- device_lock_assert(&dev->dev);
switch (new) {
case pci_channel_io_perm_failure:
- switch (dev->error_state) {
- case pci_channel_io_frozen:
- case pci_channel_io_normal:
- case pci_channel_io_perm_failure:
- changed = true;
- break;
- }
- break;
+ xchg(&dev->error_state, pci_channel_io_perm_failure);
+ return true;
case pci_channel_io_frozen:
- switch (dev->error_state) {
- case pci_channel_io_frozen:
- case pci_channel_io_normal:
- changed = true;
- break;
- }
- break;
+ old = cmpxchg(&dev->error_state, pci_channel_io_normal,
+ pci_channel_io_frozen);
+ return old != pci_channel_io_perm_failure;
case pci_channel_io_normal:
- switch (dev->error_state) {
- case pci_channel_io_frozen:
- case pci_channel_io_normal:
- changed = true;
- break;
- }
- break;
+ old = cmpxchg(&dev->error_state, pci_channel_io_frozen,
+ pci_channel_io_normal);
+ return old != pci_channel_io_perm_failure;
+ default:
+ return false;
}
- if (changed)
- dev->error_state = new;
- return changed;
}
static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
{
- device_lock(&dev->dev);
pci_dev_set_io_state(dev, pci_channel_io_perm_failure);
- device_unlock(&dev->dev);
return 0;
}
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 271aecfbc3bf..6b5c9f7916fa 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -782,7 +782,7 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
u8 bus = info->id >> 8;
u8 devfn = info->id & 0xff;
- pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
+ pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n",
info->multi_error_valid ? "Multiple " : "",
aer_error_severity_string[info->severity],
pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
@@ -968,7 +968,12 @@ static bool find_source_device(struct pci_dev *parent,
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
if (!e_info->error_dev_num) {
- pci_info(parent, "can't find device of ID%04x\n", e_info->id);
+ u8 bus = e_info->id >> 8;
+ u8 devfn = e_info->id & 0xff;
+
+ pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n",
+ pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
return false;
}
return true;
@@ -1212,7 +1217,7 @@ static irqreturn_t aer_isr(int irq, void *context)
{
struct pcie_device *dev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(dev);
- struct aer_err_source uninitialized_var(e_src);
+ struct aer_err_source e_src;
if (kfifo_is_empty(&rpc->aer_fifo))
return IRQ_NONE;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 7624c71011c6..ee51e433fded 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -200,12 +200,39 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_disable = blacklist ? 1 : 0;
}
-static bool pcie_retrain_link(struct pcie_link_state *link)
+static int pcie_wait_for_retrain(struct pci_dev *pdev)
{
- struct pci_dev *parent = link->pdev;
unsigned long end_jiffies;
u16 reg16;
+ /* Wait for Link Training to be cleared by hardware */
+ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+ do {
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ return 0;
+ msleep(1);
+ } while (time_before(jiffies, end_jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int pcie_retrain_link(struct pcie_link_state *link)
+{
+ struct pci_dev *parent = link->pdev;
+ int rc;
+ u16 reg16;
+
+ /*
+ * Ensure the updated LNKCTL parameters are used during link
+ * training by checking that there is no ongoing link training to
+ * avoid LTSSM race as recommended in Implementation Note at the
+ * end of PCIe r6.0.1 sec 7.5.3.7.
+ */
+ rc = pcie_wait_for_retrain(parent);
+ if (rc)
+ return rc;
+
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
reg16 |= PCI_EXP_LNKCTL_RL;
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
@@ -219,15 +246,7 @@ static bool pcie_retrain_link(struct pcie_link_state *link)
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
}
- /* Wait for link training end. Break out after waiting for timeout */
- end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
- do {
- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
- break;
- msleep(1);
- } while (time_before(jiffies, end_jiffies));
- return !(reg16 & PCI_EXP_LNKSTA_LT);
+ return pcie_wait_for_retrain(parent);
}
/*
@@ -238,7 +257,7 @@ static bool pcie_retrain_link(struct pcie_link_state *link)
static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
int same_clock = 1;
- u16 reg16, parent_reg, child_reg[8];
+ u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/*
@@ -260,6 +279,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
/* Port might be already in common clock mode */
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
bool consistent = true;
@@ -276,35 +296,30 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n");
}
+ ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
/* Configure downstream component, all functions */
list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
- child_reg[PCI_FUNC(child->devfn)] = reg16;
- if (same_clock)
- reg16 |= PCI_EXP_LNKCTL_CCC;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
+ child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
+ pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CCC, ccc);
}
/* Configure upstream component */
- pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
- parent_reg = reg16;
- if (same_clock)
- reg16 |= PCI_EXP_LNKCTL_CCC;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CCC, ccc);
- if (pcie_retrain_link(link))
- return;
+ if (pcie_retrain_link(link)) {
- /* Training failed. Restore common clock configurations */
- pci_err(parent, "ASPM: Could not configure common clock\n");
- list_for_each_entry(child, &linkbus->devices, bus_list)
- pcie_capability_write_word(child, PCI_EXP_LNKCTL,
- child_reg[PCI_FUNC(child->devfn)]);
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
+ /* Training failed. Restore common clock configurations */
+ pci_err(parent, "ASPM: Could not configure common clock\n");
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CCC,
+ child_old_ccc[PCI_FUNC(child->devfn)]);
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CCC, parent_old_ccc);
+ }
}
/* Convert L0s latency encoding to ns */
@@ -991,21 +1006,24 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- /*
- * All PCIe functions are in one slot, remove one function will remove
- * the whole slot, so just wait until we are the last function left.
- */
- if (!list_empty(&parent->subordinate->devices))
- goto out;
link = parent->link_state;
root = link->root;
parent_link = link->parent;
- /* All functions are removed, so just disable ASPM for the link */
+ /*
+ * link->downstream is a pointer to the pci_dev of function 0. If
+ * we remove that function, the pci_dev is about to be deallocated,
+ * so we can't use link->downstream again. Free the link state to
+ * avoid this.
+ *
+ * If we're removing a non-0 function, it's possible we could
+ * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
+ * programming the same ASPM Control value for all functions of
+ * multi-function devices, so disable ASPM for all of them.
+ */
pcie_config_aspm_link(link, 0);
list_del(&link->sibling);
- /* Clock PM is for endpoint device */
free_link_state(link);
/* Recheck latencies and configure upstream links */
@@ -1013,7 +1031,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
pcie_update_aspm_capable(root);
pcie_config_aspm_path(parent_link);
}
-out:
+
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index a32ec3487a8d..d5734a83606f 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -195,7 +195,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
for (i = 0; i < dpc->rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
clear_status:
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2a4bc8df8563..3bc705840415 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -597,7 +597,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
/*
* In the AMD NL platform, this device ([1022:7912]) has a class code of
* PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
- * claim it.
+ * claim it. The same applies on the VanGogh platform device ([1022:163a]).
*
* But the dwc3 driver is a more specific driver for this device, and we'd
* prefer to use it instead of xhci. To prevent xhci from claiming the
@@ -605,17 +605,22 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
* defines as "USB device (not host controller)". The dwc3 driver can then
* claim it based on its Vendor and Device ID.
*/
-static void quirk_amd_nl_class(struct pci_dev *pdev)
+static void quirk_amd_dwc_class(struct pci_dev *pdev)
{
u32 class = pdev->class;
- /* Use "USB Device (not host controller)" class */
- pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
- pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
- class, pdev->class);
+ if (class != PCI_CLASS_SERIAL_USB_DEVICE) {
+ /* Use "USB Device (not host controller)" class */
+ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
+ pci_info(pdev,
+ "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+ class, pdev->class);
+ }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
- quirk_amd_nl_class);
+ quirk_amd_dwc_class);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
+ quirk_amd_dwc_class);
/*
* Synopsys USB 3.x host HAPS platform has a class code of
@@ -3625,6 +3630,19 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
/*
+ * Spectrum-{1,2,3,4} devices report that a D3hot->D0 transition causes a reset
+ * (i.e., they advertise NoSoftRst-). However, this transition does not have
+ * any effect on the device: It continues to be operational and network ports
+ * remain up. Advertising this support makes it seem as if a PM reset is viable
+ * for these devices. Mark it as unavailable to skip it when testing reset
+ * methods.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcb84, quirk_no_pm_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf6c, quirk_no_pm_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf70, quirk_no_pm_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf80, quirk_no_pm_reset);
+
+/*
* Thunderbolt controllers with broken MSI hotplug signaling:
* Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
* of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge).
@@ -4168,6 +4186,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9235,
+ quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
@@ -4843,6 +4863,26 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+/*
+ * Wangxun 10G/1G NICs have no ACS capability, and on multi-function
+ * devices, peer-to-peer transactions are not be used between the functions.
+ * So add an ACS quirk for below devices to isolate functions.
+ * SFxxx 1G NICs(em).
+ * RP1000/RP2000 10G NICs(sp).
+ */
+static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ switch (dev->device) {
+ case 0x0100 ... 0x010F:
+ case 0x1001:
+ case 0x2001:
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+
+ return false;
+}
+
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
@@ -4943,6 +4983,9 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
/* Broadcom multi-function device */
{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
@@ -4985,6 +5028,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
/* Zhaoxin Root/Downstream Ports */
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
+ /* Wangxun nics */
+ { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs },
{ 0 }
};
@@ -5335,6 +5380,7 @@ static void quirk_no_flr(struct pci_dev *dev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
@@ -5350,6 +5396,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
@@ -5359,6 +5406,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
#ifdef CONFIG_PCI_ATS
+static void quirk_no_ats(struct pci_dev *pdev)
+{
+ pci_info(pdev, "disabling ATS\n");
+ pdev->ats_cap = 0;
+}
+
/*
* Some devices require additional driver setup to enable ATS. Don't use
* ATS for those devices as ATS will be enabled before the driver has had a
@@ -5371,8 +5424,7 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
(pdev->device == 0x7341 && pdev->revision != 0x00))
return;
- pci_info(pdev, "disabling ATS\n");
- pdev->ats_cap = 0;
+ quirk_no_ats(pdev);
}
/* AMD Stoney platform GPU */
@@ -5384,6 +5436,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
+
+/*
+ * Intel IPU E2000 revisions before C0 implement incorrect endianness
+ * in ATS Invalidate Request message body. Disable ATS for those devices.
+ */
+static void quirk_intel_e2000_no_ats(struct pci_dev *pdev)
+{
+ if (pdev->revision < 0x20)
+ quirk_no_ats(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1451, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1452, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1453, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1454, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1455, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1457, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1459, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145a, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145c, quirk_intel_e2000_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index da5023e27951..eca74d77dfcb 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -209,6 +209,17 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
root = pci_find_parent_resource(dev, res);
if (!root) {
+ /*
+ * If dev is behind a bridge, accesses will only reach it
+ * if res is inside the relevant bridge window.
+ */
+ if (pci_upstream_bridge(dev))
+ return -ENXIO;
+
+ /*
+ * On the root bus, assume the host bridge will forward
+ * everything.
+ */
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 2c9c3061894b..0b8325235852 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1082,13 +1082,6 @@ static void stdev_release(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
- if (stdev->dma_mrpc) {
- iowrite32(0, &stdev->mmio_mrpc->dma_en);
- flush_wc_buf(stdev);
- writeq(0, &stdev->mmio_mrpc->dma_addr);
- dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
- stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
- }
kfree(stdev);
}
@@ -1131,7 +1124,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
return ERR_PTR(-ENOMEM);
stdev->alive = true;
- stdev->pdev = pdev;
+ stdev->pdev = pci_dev_get(pdev);
INIT_LIST_HEAD(&stdev->mrpc_queue);
mutex_init(&stdev->mrpc_mutex);
stdev->mrpc_busy = 0;
@@ -1165,6 +1158,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
return stdev;
err_put:
+ pci_dev_put(stdev->pdev);
put_device(&stdev->dev);
return ERR_PTR(rc);
}
@@ -1407,6 +1401,18 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
return 0;
}
+static void switchtec_exit_pci(struct switchtec_dev *stdev)
+{
+ if (stdev->dma_mrpc) {
+ iowrite32(0, &stdev->mmio_mrpc->dma_en);
+ flush_wc_buf(stdev);
+ writeq(0, &stdev->mmio_mrpc->dma_addr);
+ dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
+ stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
+ stdev->dma_mrpc = NULL;
+ }
+}
+
static int switchtec_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1427,7 +1433,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
rc = switchtec_init_isr(stdev);
if (rc) {
dev_err(&stdev->dev, "failed to init isr.\n");
- goto err_put;
+ goto err_exit_pci;
}
iowrite32(SWITCHTEC_EVENT_CLEAR |
@@ -1448,6 +1454,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
err_devadd:
stdev_kill(stdev);
+err_exit_pci:
+ switchtec_exit_pci(stdev);
err_put:
ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
@@ -1464,6 +1472,9 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n");
stdev_kill(stdev);
+ switchtec_exit_pci(stdev);
+ pci_dev_put(stdev->pdev);
+ stdev->pdev = NULL;
put_device(&stdev->dev);
}
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index f70197154a36..820cce7c8b40 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
skt->thread = NULL;
complete(&skt->thread_done);
+ put_device(&skt->dev);
return 0;
}
ret = pccard_sysfs_add_socket(&skt->dev);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 09d06b082f8b..103862f7bdf1 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -518,9 +518,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
/* by default don't allow DMA */
p_dev->dma_mask = DMA_MASK_NONE;
p_dev->dev.dma_mask = &p_dev->dma_mask;
- dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
- if (!dev_name(&p_dev->dev))
- goto err_free;
p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
if (!p_dev->devname)
goto err_free;
@@ -578,8 +575,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
pcmcia_device_query(p_dev);
- if (device_register(&p_dev->dev))
- goto err_unreg;
+ dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
+ if (device_register(&p_dev->dev)) {
+ mutex_lock(&s->ops_mutex);
+ list_del(&p_dev->socket_device_list);
+ s->device_count--;
+ mutex_unlock(&s->ops_mutex);
+ put_device(&p_dev->dev);
+ return NULL;
+ }
return p_dev;
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 3a512513cb32..6b311d6f8bf0 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
q = p->next;
kfree(p);
}
+
+ kfree(data);
}
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 4594e2ed13d5..96e76915da56 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -816,7 +816,11 @@ static int __init dsu_pmu_init(void)
if (ret < 0)
return ret;
dsu_pmu_cpuhp_state = ret;
- return platform_driver_register(&dsu_pmu_driver);
+ ret = platform_driver_register(&dsu_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
+
+ return ret;
}
static void __exit dsu_pmu_exit(void)
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index e35cb76c8d10..6eb077db7384 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -118,7 +118,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
- if (irq && irq_is_percpu_devid(irq))
+ if ((irq > 0) && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 6a3fa1f69e68..de85e9191947 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -95,6 +95,7 @@
#define SMMU_PMCG_PA_SHIFT 12
#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
+#define SMMU_PMCG_HARDEN_DISABLE BIT(1)
static int cpuhp_state_num;
@@ -138,6 +139,20 @@ static inline void smmu_pmu_enable(struct pmu *pmu)
writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
}
+static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
+ struct perf_event *event, int idx);
+
+static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+ unsigned int idx;
+
+ for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
+ smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
+
+ smmu_pmu_enable(pmu);
+}
+
static inline void smmu_pmu_disable(struct pmu *pmu)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
@@ -146,6 +161,22 @@ static inline void smmu_pmu_disable(struct pmu *pmu)
writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
}
+static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+ unsigned int idx;
+
+ /*
+ * The global disable of PMU sometimes fail to stop the counting.
+ * Harden this by writing an invalid event type to each used counter
+ * to forcibly stop counting.
+ */
+ for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
+ writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
+
+ smmu_pmu_disable(pmu);
+}
+
static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
u32 idx, u64 value)
{
@@ -719,7 +750,10 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
switch (model) {
case IORT_SMMU_V3_PMCG_HISI_HIP08:
/* HiSilicon Erratum 162001800 */
- smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
+ smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
+ break;
+ case IORT_SMMU_V3_PMCG_HISI_HIP09:
+ smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
break;
}
@@ -808,6 +842,16 @@ static int smmu_pmu_probe(struct platform_device *pdev)
smmu_pmu_get_acpi_options(smmu_pmu);
+ /*
+ * For platforms suffer this quirk, the PMU disable sometimes fails to
+ * stop the counters. This will leads to inaccurate or error counting.
+ * Forcibly disable the counters with these quirk handler.
+ */
+ if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
+ smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
+ smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
+ }
+
/* Pick one CPU to be the preferred one to use */
smmu_pmu->on_cpu = raw_smp_processor_id();
WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
@@ -872,6 +916,8 @@ static struct platform_driver smmu_pmu_driver = {
static int __init arm_smmu_pmu_init(void)
{
+ int ret;
+
cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"perf/arm/pmcg:online",
NULL,
@@ -879,7 +925,11 @@ static int __init arm_smmu_pmu_init(void)
if (cpuhp_state_num < 0)
return cpuhp_state_num;
- return platform_driver_register(&smmu_pmu_driver);
+ ret = platform_driver_register(&smmu_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(cpuhp_state_num);
+
+ return ret;
}
module_init(arm_smmu_pmu_init);
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 912a220a9db9..a0e45c726bbf 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -77,6 +77,7 @@ struct ddr_pmu {
const struct fsl_ddr_devtype_data *devtype_data;
int irq;
int id;
+ int active_counter;
};
static ssize_t ddr_perf_cpumask_show(struct device *dev,
@@ -353,6 +354,10 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)
ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
+ if (!pmu->active_counter++)
+ ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER, true);
+
hwc->state = 0;
}
@@ -407,6 +412,10 @@ static void ddr_perf_event_stop(struct perf_event *event, int flags)
ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
ddr_perf_event_update(event);
+ if (!--pmu->active_counter)
+ ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER, false);
+
hwc->state |= PERF_HES_STOPPED;
}
@@ -425,25 +434,10 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
static void ddr_perf_pmu_enable(struct pmu *pmu)
{
- struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
-
- /* enable cycle counter if cycle is not active event list */
- if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
- ddr_perf_counter_enable(ddr_pmu,
- EVENT_CYCLES_ID,
- EVENT_CYCLES_COUNTER,
- true);
}
static void ddr_perf_pmu_disable(struct pmu *pmu)
{
- struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
-
- if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
- ddr_perf_counter_enable(ddr_pmu,
- EVENT_CYCLES_ID,
- EVENT_CYCLES_COUNTER,
- false);
}
static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 0263db2ac874..692fa987dba3 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -67,6 +67,15 @@ source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
+
+config PHY_XILINX_ZYNQMP
+ tristate "Xilinx ZynqMP PHY driver"
+ depends on ARCH_ZYNQMP
+ select GENERIC_PHY
+ help
+ Enable this to support ZynqMP High Speed Gigabit Transceiver
+ that is part of ZynqMP SoC.
+
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c96a1afc95bd..cd20defac8ff 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -28,3 +28,4 @@ obj-y += broadcom/ \
socionext/ \
st/ \
ti/
+obj-$(CONFIG_PHY_XILINX_ZYNQMP) += phy-zynqmp.o
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index 9b16f13b5ab2..96162b9a2e53 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -155,7 +155,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy, &priv->ports[i]);
i++;
- if (i > INNO_PHY_PORT_NUM) {
+ if (i >= INNO_PHY_PORT_NUM) {
dev_warn(dev, "Support %d ports in maximum\n", i);
break;
}
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 39d13f7e4cf3..a79d6cf20220 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -122,16 +122,10 @@ static int phy_mdm6600_power_on(struct phy *x)
{
struct phy_mdm6600 *ddata = phy_get_drvdata(x);
struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
- int error;
if (!ddata->enabled)
return -ENODEV;
- error = pinctrl_pm_select_default_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with default_state: %i\n",
- __func__, error);
-
gpiod_set_value_cansleep(enable_gpio, 1);
/* Allow aggressive PM for USB, it's only needed for n_gsm port */
@@ -160,11 +154,6 @@ static int phy_mdm6600_power_off(struct phy *x)
gpiod_set_value_cansleep(enable_gpio, 0);
- error = pinctrl_pm_select_sleep_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
- __func__, error);
-
return 0;
}
@@ -455,6 +444,7 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
{
struct gpio_desc *reset_gpio =
ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ int error;
ddata->enabled = false;
phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_BP_SHUTDOWN_REQ);
@@ -470,6 +460,17 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
} else {
dev_err(ddata->dev, "Timed out powering down\n");
}
+
+ /*
+ * Keep reset gpio high with padconf internal pull-up resistor to
+ * prevent modem from waking up during deeper SoC idle states. The
+ * gpio bank lines can have glitches if not in the always-on wkup
+ * domain.
+ */
+ error = pinctrl_pm_select_sleep_state(ddata->dev);
+ if (error)
+ dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
+ __func__, error);
}
static void phy_mdm6600_deferred_power_on(struct work_struct *work)
@@ -570,12 +571,6 @@ static int phy_mdm6600_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
- /* Active state selected in phy_mdm6600_power_on() */
- error = pinctrl_pm_select_sleep_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
- __func__, error);
-
error = phy_mdm6600_init_lines(ddata);
if (error)
return error;
@@ -626,10 +621,12 @@ idle:
pm_runtime_put_autosuspend(ddata->dev);
cleanup:
- if (error < 0)
+ if (error < 0) {
phy_mdm6600_device_power_off(ddata);
- pm_runtime_disable(ddata->dev);
- pm_runtime_dont_use_autosuspend(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ pm_runtime_dont_use_autosuspend(ddata->dev);
+ }
+
return error;
}
@@ -638,6 +635,7 @@ static int phy_mdm6600_remove(struct platform_device *pdev)
struct phy_mdm6600 *ddata = platform_get_drvdata(pdev);
struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ pm_runtime_get_noresume(ddata->dev);
pm_runtime_dont_use_autosuspend(ddata->dev);
pm_runtime_put_sync(ddata->dev);
pm_runtime_disable(ddata->dev);
diff --git a/drivers/phy/phy-zynqmp.c b/drivers/phy/phy-zynqmp.c
new file mode 100644
index 000000000000..6bd746ac84b8
--- /dev/null
+++ b/drivers/phy/phy-zynqmp.c
@@ -0,0 +1,1591 @@
+/*
+ * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
+ *
+ * Copyright (C) 2015 - 2016 Xilinx Inc.
+ *
+ * Author: Subbaraya Sundeep <sbhatta@xilinx.com>
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver is tested for USB and SATA currently.
+ * Other controllers PCIe, Display Port and SGMII should also
+ * work but that is experimental as of now.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <dt-bindings/phy/phy.h>
+#include <linux/soc/xilinx/zynqmp/fw.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/reset.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#define MAX_LANES 4
+
+#define RST_TIMEOUT 1000
+
+#define ICM_CFG0 0x10010
+#define ICM_CFG1 0x10014
+#define ICM_CFG0_L0_MASK 0x07
+#define ICM_CFG0_L1_MASK 0x70
+#define ICM_CFG1_L2_MASK 0x07
+#define ICM_CFG2_L3_MASK 0x70
+
+#define TM_CMN_RST 0x10018
+#define TM_CMN_RST_MASK 0x3
+#define TM_CMN_RST_EN 0x1
+#define TM_CMN_RST_SET 0x2
+
+#define ICM_PROTOCOL_PD 0x0
+#define ICM_PROTOCOL_PCIE 0x1
+#define ICM_PROTOCOL_SATA 0x2
+#define ICM_PROTOCOL_USB 0x3
+#define ICM_PROTOCOL_DP 0x4
+#define ICM_PROTOCOL_SGMII 0x5
+
+#define PLL_REF_SEL0 0x10000
+#define PLL_REF_OFFSET 0x4
+#define PLL_FREQ_MASK 0x1F
+
+#define L0_L0_REF_CLK_SEL 0x2860
+
+#define L0_PLL_STATUS_READ_1 0x23E4
+#define PLL_STATUS_READ_OFFSET 0x4000
+#define PLL_STATUS_LOCKED 0x10
+
+#define L0_PLL_SS_STEP_SIZE_0_LSB 0x2370
+#define L0_PLL_SS_STEP_SIZE_1 0x2374
+#define L0_PLL_SS_STEP_SIZE_2 0x2378
+#define L0_PLL_SS_STEP_SIZE_3_MSB 0x237C
+#define STEP_SIZE_OFFSET 0x4000
+#define STEP_SIZE_0_MASK 0xFF
+#define STEP_SIZE_1_MASK 0xFF
+#define STEP_SIZE_2_MASK 0xFF
+#define STEP_SIZE_3_MASK 0x3
+#define FORCE_STEP_SIZE 0x10
+#define FORCE_STEPS 0x20
+
+#define L0_PLL_SS_STEPS_0_LSB 0x2368
+#define L0_PLL_SS_STEPS_1_MSB 0x236C
+#define STEPS_OFFSET 0x4000
+#define STEPS_0_MASK 0xFF
+#define STEPS_1_MASK 0x07
+
+#define BGCAL_REF_SEL 0x10028
+#define BGCAL_REF_VALUE 0x0C
+
+#define L3_TM_CALIB_DIG19 0xEC4C
+#define L3_TM_CALIB_DIG19_NSW 0x07
+
+#define TM_OVERRIDE_NSW_CODE 0x20
+
+#define L3_CALIB_DONE_STATUS 0xEF14
+#define CALIB_DONE 0x02
+
+#define L0_TXPMA_ST_3 0x0B0C
+#define DN_CALIB_CODE 0x3F
+#define DN_CALIB_SHIFT 3
+
+#define L3_TM_CALIB_DIG18 0xEC48
+#define L3_TM_CALIB_DIG18_NSW 0xE0
+#define NSW_SHIFT 5
+#define NSW_PIPE_SHIFT 4
+
+#define L0_TM_PLL_DIG_37 0x2094
+#define TM_PLL_DIG_37_OFFSET 0x4000
+#define TM_COARSE_CODE_LIMIT 0x10
+
+#define L0_TM_DIG_6 0x106C
+#define TM_DIG_6_OFFSET 0x4000
+#define TM_DISABLE_DESCRAMBLE_DECODER 0x0F
+
+#define L0_TX_DIG_61 0x00F4
+#define TX_DIG_61_OFFSET 0x4000
+#define TM_DISABLE_SCRAMBLE_ENCODER 0x0F
+
+#define L0_TX_ANA_TM_18 0x0048
+#define TX_ANA_TM_18_OFFSET 0x4000
+
+#define L0_TX_ANA_TM_118 0x01D8
+#define TX_ANA_TM_118_OFFSET 0x4000
+#define L0_TX_ANA_TM_118_FORCE_17_0 BIT(0)
+
+#define L0_TXPMD_TM_45 0x0CB4
+#define TXPMD_TM_45_OFFSET 0x4000
+#define L0_TXPMD_TM_45_OVER_DP_MAIN BIT(0)
+#define L0_TXPMD_TM_45_ENABLE_DP_MAIN BIT(1)
+#define L0_TXPMD_TM_45_OVER_DP_POST1 BIT(2)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST1 BIT(3)
+#define L0_TXPMD_TM_45_OVER_DP_POST2 BIT(4)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST2 BIT(5)
+
+#define L0_TXPMD_TM_48 0x0CC0
+#define TXPMD_TM_48_OFFSET 0x4000
+
+#define TX_PROT_BUS_WIDTH 0x10040
+#define RX_PROT_BUS_WIDTH 0x10044
+
+#define PROT_BUS_WIDTH_SHIFT 2
+#define PROT_BUS_WIDTH_10 0x0
+#define PROT_BUS_WIDTH_20 0x1
+#define PROT_BUS_WIDTH_40 0x2
+
+#define LANE_CLK_SHARE_MASK 0x8F
+
+#define SATA_CONTROL_OFFSET 0x0100
+
+#define CONTROLLERS_PER_LANE 5
+
+#define PIPE_CLK_OFFSET 0x7c
+#define PIPE_CLK_ON 1
+#define PIPE_CLK_OFF 0
+#define PIPE_POWER_OFFSET 0x80
+#define PIPE_POWER_ON 1
+#define PIPE_POWER_OFF 0
+
+#define XPSGTR_TYPE_USB0 0 /* USB controller 0 */
+#define XPSGTR_TYPE_USB1 1 /* USB controller 1 */
+#define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */
+#define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */
+#define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */
+#define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */
+#define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */
+#define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */
+#define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */
+#define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */
+#define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */
+#define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */
+#define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */
+#define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */
+
+/*
+ * This table holds the valid combinations of controllers and
+ * lanes(Interconnect Matrix).
+ */
+static unsigned int icm_matrix[][CONTROLLERS_PER_LANE] = {
+ { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
+ { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
+ { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
+ { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
+};
+
+/* Allowed PLL reference clock frequencies */
+enum pll_frequencies {
+ REF_19_2M = 0,
+ REF_20M,
+ REF_24M,
+ REF_26M,
+ REF_27M,
+ REF_38_4M,
+ REF_40M,
+ REF_52M,
+ REF_100M,
+ REF_108M,
+ REF_125M,
+ REF_135M,
+ REF_150M,
+};
+
+/**
+ * struct xpsgtr_phy - representation of a lane
+ * @phy: pointer to the kernel PHY device
+ * @type: controller which uses this lane
+ * @lane: lane number
+ * @protocol: protocol in which the lane operates
+ * @ref_clk: enum of allowed ref clock rates for this lane PLL
+ * @pll_lock: PLL status
+ * @skip_phy_init: skip phy_init() if true
+ * @data: pointer to hold private data
+ * @refclk_rate: PLL reference clock frequency
+ * @share_laneclk: lane number of the clock to be shared
+ */
+struct xpsgtr_phy {
+ struct phy *phy;
+ u8 type;
+ u8 lane;
+ u8 protocol;
+ enum pll_frequencies ref_clk;
+ bool pll_lock;
+ bool skip_phy_init;
+ void *data;
+ u32 refclk_rate;
+ u32 share_laneclk;
+};
+
+/**
+ * struct xpsgtr_ssc - structure to hold SSC settings for a lane
+ * @refclk_rate: PLL reference clock frequency
+ * @pll_ref_clk: value to be written to register for corresponding ref clk rate
+ * @steps: number of steps of SSC (Spread Spectrum Clock)
+ * @step_size: step size of each step
+ */
+struct xpsgtr_ssc {
+ u32 refclk_rate;
+ u8 pll_ref_clk;
+ u32 steps;
+ u32 step_size;
+};
+
+/* lookup table to hold all settings needed for a ref clock frequency */
+static struct xpsgtr_ssc ssc_lookup[] = {
+ {19200000, 0x05, 608, 264020},
+ {20000000, 0x06, 634, 243454},
+ {24000000, 0x07, 760, 168973},
+ {26000000, 0x08, 824, 143860},
+ {27000000, 0x09, 856, 86551},
+ {38400000, 0x0A, 1218, 65896},
+ {40000000, 0x0B, 634, 243454},
+ {52000000, 0x0C, 824, 143860},
+ {100000000, 0x0D, 1058, 87533},
+ {108000000, 0x0E, 856, 86551},
+ {125000000, 0x0F, 992, 119497},
+ {135000000, 0x10, 1070, 55393},
+ {150000000, 0x11, 792, 187091}
+};
+
+/**
+ * struct xpsgtr_dev - representation of a ZynMP GT device
+ * @dev: pointer to device
+ * @serdes: serdes base address
+ * @siou: siou base address
+ * @gtr_mutex: mutex for locking
+ * @phys: pointer to all the lanes
+ * @tx_term_fix: fix for GT issue
+ * @saved_icm_cfg0: stored value of ICM CFG0 register
+ * @saved_icm_cfg1: stored value of ICM CFG1 register
+ * @sata_rst: a reset control for SATA
+ * @dp_rst: a reset control for DP
+ * @usb0_crst: a reset control for usb0 core
+ * @usb1_crst: a reset control for usb1 core
+ * @usb0_hibrst: a reset control for usb0 hibernation module
+ * @usb1_hibrst: a reset control for usb1 hibernation module
+ * @usb0_apbrst: a reset control for usb0 apb bus
+ * @usb1_apbrst: a reset control for usb1 apb bus
+ * @gem0_rst: a reset control for gem0
+ * @gem1_rst: a reset control for gem1
+ * @gem2_rst: a reset control for gem2
+ * @gem3_rst: a reset control for gem3
+ */
+struct xpsgtr_dev {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *siou;
+ struct mutex gtr_mutex;
+ struct xpsgtr_phy **phys;
+ bool tx_term_fix;
+ unsigned int saved_icm_cfg0;
+ unsigned int saved_icm_cfg1;
+ struct reset_control *sata_rst;
+ struct reset_control *dp_rst;
+ struct reset_control *usb0_crst;
+ struct reset_control *usb1_crst;
+ struct reset_control *usb0_hibrst;
+ struct reset_control *usb1_hibrst;
+ struct reset_control *usb0_apbrst;
+ struct reset_control *usb1_apbrst;
+ struct reset_control *gem0_rst;
+ struct reset_control *gem1_rst;
+ struct reset_control *gem2_rst;
+ struct reset_control *gem3_rst;
+};
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+int xpsgtr_override_deemph(struct phy *phy, u8 plvl, u8 vlvl)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ static u8 pe[4][4] = { { 0x2, 0x2, 0x2, 0x2 },
+ { 0x1, 0x1, 0x1, 0xff },
+ { 0x0, 0x0, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ writel(pe[plvl][vlvl],
+ gtr_dev->serdes + gtr_phy->lane * TX_ANA_TM_18_OFFSET +
+ L0_TX_ANA_TM_18);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_override_deemph);
+
+int xpsgtr_margining_factor(struct phy *phy, u8 plvl, u8 vlvl)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ static u8 vs[4][4] = { { 0x2a, 0x27, 0x24, 0x20 },
+ { 0x27, 0x23, 0x20, 0xff },
+ { 0x24, 0x20, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ writel(vs[plvl][vlvl],
+ gtr_dev->serdes + gtr_phy->lane * TXPMD_TM_48_OFFSET +
+ L0_TXPMD_TM_48);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_margining_factor);
+
+/**
+ * xpsgtr_configure_pll - configures SSC settings for a lane
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 reg;
+ u32 offset;
+ u32 steps;
+ u32 size;
+ u8 pll_ref_clk;
+
+ steps = ssc_lookup[gtr_phy->ref_clk].steps;
+ size = ssc_lookup[gtr_phy->ref_clk].step_size;
+ pll_ref_clk = ssc_lookup[gtr_phy->ref_clk].pll_ref_clk;
+
+ offset = gtr_phy->lane * PLL_REF_OFFSET + PLL_REF_SEL0;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~PLL_FREQ_MASK) | pll_ref_clk;
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* Enable lane clock sharing, if required */
+ if (gtr_phy->share_laneclk != gtr_phy->lane) {
+ /* Lane3 Ref Clock Selection Register */
+ offset = gtr_phy->lane * PLL_REF_OFFSET + L0_L0_REF_CLK_SEL;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~LANE_CLK_SHARE_MASK) |
+ (1 << gtr_phy->share_laneclk);
+ writel(reg, gtr_dev->serdes + offset);
+ }
+
+ /* SSC step size [7:0] */
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_0_LSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_0_MASK) |
+ (size & STEP_SIZE_0_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [15:8] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_1;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_1_MASK) |
+ (size & STEP_SIZE_1_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [23:16] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_2;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_2_MASK) |
+ (size & STEP_SIZE_2_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC steps [7:0] */
+ offset = gtr_phy->lane * STEPS_OFFSET + L0_PLL_SS_STEPS_0_LSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEPS_0_MASK) |
+ (steps & STEPS_0_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC steps [10:8] */
+ steps = steps >> 8;
+ offset = gtr_phy->lane * STEPS_OFFSET + L0_PLL_SS_STEPS_1_MSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEPS_1_MASK) |
+ (steps & STEPS_1_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [24:25] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_3_MSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_3_MASK) |
+ (size & STEP_SIZE_3_MASK);
+ reg |= FORCE_STEP_SIZE | FORCE_STEPS;
+ writel(reg, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_lane_setprotocol - sets required protocol in ICM registers
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_lane_setprotocol(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 reg;
+ u8 protocol = gtr_phy->protocol;
+
+ switch (gtr_phy->lane) {
+ case 0:
+ reg = readl(gtr_dev->serdes + ICM_CFG0);
+ reg = (reg & ~ICM_CFG0_L0_MASK) | protocol;
+ writel(reg, gtr_dev->serdes + ICM_CFG0);
+ break;
+ case 1:
+ reg = readl(gtr_dev->serdes + ICM_CFG0);
+ reg = (reg & ~ICM_CFG0_L1_MASK) | (protocol << 4);
+ writel(reg, gtr_dev->serdes + ICM_CFG0);
+ break;
+ case 2:
+ reg = readl(gtr_dev->serdes + ICM_CFG1);
+ reg = (reg & ~ICM_CFG0_L0_MASK) | protocol;
+ writel(reg, gtr_dev->serdes + ICM_CFG1);
+ break;
+ case 3:
+ reg = readl(gtr_dev->serdes + ICM_CFG1);
+ reg = (reg & ~ICM_CFG0_L1_MASK) | (protocol << 4);
+ writel(reg, gtr_dev->serdes + ICM_CFG1);
+ break;
+ default:
+ /* We already checked 0 <= lane <= 3 */
+ break;
+ }
+}
+
+/**
+ * xpsgtr_get_ssc - gets the required ssc settings based on clk rate
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_get_ssc(struct xpsgtr_phy *gtr_phy)
+{
+ u32 i;
+
+ /*
+ * Assign the required spread spectrum(SSC) settings
+ * from lane refernce clk rate
+ */
+ for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
+ if (gtr_phy->refclk_rate == ssc_lookup[i].refclk_rate) {
+ gtr_phy->ref_clk = i;
+ return 0;
+ }
+ }
+
+ /* Did not get valid ssc settings*/
+ return -EINVAL;
+}
+
+/**
+ * xpsgtr_configure_lane - configures SSC settings for a lane
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_configure_lane(struct xpsgtr_phy *gtr_phy)
+{
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ case XPSGTR_TYPE_USB1:
+ gtr_phy->protocol = ICM_PROTOCOL_USB;
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ gtr_phy->protocol = ICM_PROTOCOL_SATA;
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ gtr_phy->protocol = ICM_PROTOCOL_DP;
+ break;
+ case XPSGTR_TYPE_PCIE_0:
+ case XPSGTR_TYPE_PCIE_1:
+ case XPSGTR_TYPE_PCIE_2:
+ case XPSGTR_TYPE_PCIE_3:
+ gtr_phy->protocol = ICM_PROTOCOL_PCIE;
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ case XPSGTR_TYPE_SGMII1:
+ case XPSGTR_TYPE_SGMII2:
+ case XPSGTR_TYPE_SGMII3:
+ gtr_phy->protocol = ICM_PROTOCOL_SGMII;
+ break;
+ default:
+ gtr_phy->protocol = ICM_PROTOCOL_PD;
+ break;
+ }
+
+ /* Get SSC settinsg for refernce clk rate */
+ if (xpsgtr_get_ssc(gtr_phy) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xpsgtr_config_usbpipe - configures the PIPE3 signals for USB
+ * @gtr_phy: pointer to gtr phy device
+ */
+static void xpsgtr_config_usbpipe(struct xpsgtr_phy *gtr_phy)
+{
+ struct phy *phy = gtr_phy->phy;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ void __iomem *regs = dev_get_platdata(&phy->dev);
+
+ if (regs) {
+ /* Set PIPE power present signal */
+ writel(PIPE_POWER_ON, regs + PIPE_POWER_OFFSET);
+ /* Clear PIPE CLK signal */
+ writel(PIPE_CLK_OFF, regs + PIPE_CLK_OFFSET);
+ } else {
+ dev_info(gtr_dev->dev,
+ "%s: No valid Platform_data found\n", __func__);
+ }
+}
+
+/**
+ * xpsgtr_reset_assert - asserts reset using reset framework
+ * @rstc: pointer to reset_control
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_reset_assert(struct reset_control *rstc)
+{
+ unsigned long loop_time = msecs_to_jiffies(RST_TIMEOUT);
+ unsigned long timeout;
+
+ reset_control_assert(rstc);
+
+ /* wait until reset is asserted or timeout */
+ timeout = jiffies + loop_time;
+
+ while (!time_after_eq(jiffies, timeout)) {
+ if (reset_control_status(rstc) > 0)
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xpsgtr_reset_release - de-asserts reset using reset framework
+ * @rstc: pointer to reset_control
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_reset_release(struct reset_control *rstc)
+{
+ unsigned long loop_time = msecs_to_jiffies(RST_TIMEOUT);
+ unsigned long timeout;
+
+ reset_control_deassert(rstc);
+
+ /* wait until reset is de-asserted or timeout */
+ timeout = jiffies + loop_time;
+ while (!time_after_eq(jiffies, timeout)) {
+ if (!reset_control_status(rstc))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xpsgtr_controller_reset - puts controller in reset
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_controller_reset(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_crst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_hibrst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_apbrst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_crst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_hibrst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_apbrst);
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ ret = xpsgtr_reset_assert(gtr_dev->sata_rst);
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ ret = xpsgtr_reset_assert(gtr_dev->dp_rst);
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ ret = xpsgtr_reset_assert(gtr_dev->gem0_rst);
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ ret = xpsgtr_reset_assert(gtr_dev->gem1_rst);
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ ret = xpsgtr_reset_assert(gtr_dev->gem2_rst);
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ ret = xpsgtr_reset_assert(gtr_dev->gem3_rst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_controller_release_reset - releases controller from reset
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_controller_release_reset(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ xpsgtr_reset_release(gtr_dev->usb0_apbrst);
+
+ /* Config PIPE3 signals after releasing APB reset */
+ xpsgtr_config_usbpipe(gtr_phy);
+
+ ret = xpsgtr_reset_release(gtr_dev->usb0_crst);
+ ret = xpsgtr_reset_release(gtr_dev->usb0_hibrst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ xpsgtr_reset_release(gtr_dev->usb1_apbrst);
+
+ /* Config PIPE3 signals after releasing APB reset */
+ xpsgtr_config_usbpipe(gtr_phy);
+
+ ret = xpsgtr_reset_release(gtr_dev->usb1_crst);
+ ret = xpsgtr_reset_release(gtr_dev->usb1_hibrst);
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ ret = xpsgtr_reset_release(gtr_dev->sata_rst);
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ ret = xpsgtr_reset_release(gtr_dev->dp_rst);
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ ret = xpsgtr_reset_release(gtr_dev->gem0_rst);
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ ret = xpsgtr_reset_release(gtr_dev->gem1_rst);
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ ret = xpsgtr_reset_release(gtr_dev->gem2_rst);
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ ret = xpsgtr_reset_release(gtr_dev->gem3_rst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_usb_rst_assert - assert USB core reset
+ * @phy: pointer to phy
+ *
+ * Return: 0 on success or error on failure
+ */
+int xpsgtr_usb_crst_assert(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_crst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_crst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(xpsgtr_usb_crst_assert);
+
+/**
+ * xpsgtr_usb_rst_release - release USB core reset
+ * @phy: pointer to phy
+ *
+ * Return: 0 on success or error on failure
+ */
+int xpsgtr_usb_crst_release(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_release(gtr_dev->usb0_crst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_release(gtr_dev->usb1_crst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(xpsgtr_usb_crst_release);
+
+int xpsgtr_wait_pll_lock(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 offset, reg;
+ u32 timeout = 1000;
+ int ret = 0;
+
+ /* Check pll is locked */
+ offset = gtr_phy->lane * PLL_STATUS_READ_OFFSET + L0_PLL_STATUS_READ_1;
+ dev_dbg(gtr_dev->dev, "Waiting for PLL lock...\n");
+
+ do {
+ reg = readl(gtr_dev->serdes + offset);
+ if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED)
+ break;
+
+ if (!--timeout) {
+ dev_err(gtr_dev->dev, "PLL lock time out\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ udelay(1);
+ } while (1);
+
+ if (ret == 0)
+ gtr_phy->pll_lock = true;
+
+ dev_info(gtr_dev->dev, "Lane:%d type:%d protocol:%d pll_locked:%s\n",
+ gtr_phy->lane, gtr_phy->type, gtr_phy->protocol,
+ gtr_phy->pll_lock ? "yes" : "no");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_wait_pll_lock);
+
+/**
+ * xpsgtr_set_txwidth - This function sets the tx bus width of the lane
+ * @gtr_phy: pointer to lane
+ * @width: tx bus width size
+ */
+static void xpsgtr_set_txwidth(struct xpsgtr_phy *gtr_phy, u32 width)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ writel(gtr_phy->lane * PROT_BUS_WIDTH_SHIFT >> width,
+ gtr_dev->serdes + TX_PROT_BUS_WIDTH);
+}
+
+/**
+ * xpsgtr_set_rxwidth - This function sets the rx bus width of the lane
+ * @gtr_phy: pointer to lane
+ * @width: rx bus width size
+ */
+static void xpsgtr_set_rxwidth(struct xpsgtr_phy *gtr_phy, u32 width)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ writel(gtr_phy->lane * PROT_BUS_WIDTH_SHIFT >> width,
+ gtr_dev->serdes + RX_PROT_BUS_WIDTH);
+}
+
+/**
+ * xpsgtr_bypass_scramenc - This bypasses scrambler and 8b/10b encoder feature
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_bypass_scramenc(struct xpsgtr_phy *gtr_phy)
+{
+ u32 offset;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ offset = gtr_phy->lane * TX_DIG_61_OFFSET + L0_TX_DIG_61;
+ writel(TM_DISABLE_SCRAMBLE_ENCODER, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_bypass_descramdec - bypasses descrambler and 8b/10b encoder feature
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_bypass_descramdec(struct xpsgtr_phy *gtr_phy)
+{
+ u32 offset;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Descrambler and 8b/10b decoder */
+ offset = gtr_phy->lane * TM_DIG_6_OFFSET + L0_TM_DIG_6;
+ writel(TM_DISABLE_DESCRAMBLE_DECODER, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_misc_sgmii - miscellaneous settings for SGMII
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_misc_sgmii(struct xpsgtr_phy *gtr_phy)
+{
+ /* Set SGMII protocol tx bus width 10 bits */
+ xpsgtr_set_txwidth(gtr_phy, PROT_BUS_WIDTH_10);
+
+ /* Set SGMII protocol rx bus width 10 bits */
+ xpsgtr_set_rxwidth(gtr_phy, PROT_BUS_WIDTH_10);
+
+ /* bypass Descrambler and 8b/10b decoder */
+ xpsgtr_bypass_descramdec(gtr_phy);
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ xpsgtr_bypass_scramenc(gtr_phy);
+}
+
+/**
+ * xpsgtr_misc_sata - miscellaneous settings for SATA
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_misc_sata(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Descrambler and 8b/10b decoder */
+ xpsgtr_bypass_descramdec(gtr_phy);
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ xpsgtr_bypass_scramenc(gtr_phy);
+
+ writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
+}
+
+/**
+ * xpsgtr_ulpi_reset - This function perform's ULPI reset sequence.
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success, -EINVAL on non existing USB type or error from
+ * communication with firmware
+ */
+static int xpsgtr_ulpi_reset(struct xpsgtr_phy *gtr_phy)
+{
+ u32 node_id;
+ int ret = 0;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ node_id = NODE_USB_0;
+ break;
+ case XPSGTR_TYPE_USB1:
+ node_id = NODE_USB_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->ioctl(node_id, IOCTL_ULPI_RESET, 0, 0, NULL);
+ if (ret < 0)
+ dev_err(gtr_dev->dev, "failed to perform ULPI reset\n");
+
+ return ret;
+}
+
+/**
+ * xpsgtr_set_sgmii_pcs - This function sets the sgmii mode for GEM.
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success, -EINVAL on non existing SGMII type or error from
+ * communication with firmware
+ */
+static int xpsgtr_set_sgmii_pcs(struct xpsgtr_phy *gtr_phy)
+{
+ u32 node_id;
+ int ret = 0;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ /* Set the PCS signal detect to 1 */
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_SGMII0:
+ node_id = NODE_ETH_0;
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ node_id = NODE_ETH_1;
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ node_id = NODE_ETH_2;
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ node_id = NODE_ETH_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SGMII_MODE,
+ PM_SGMII_ENABLE, 0, NULL);
+ if (ret < 0) {
+ dev_err(gtr_dev->dev, "failed to set GEM to SGMII mode\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_phyinit_required - check if phy_init for the lane can be skipped
+ * @gtr_phy: pointer to the phy lane
+ *
+ * Return: true if phy_init can be skipped or false
+ */
+static bool xpsgtr_phyinit_required(struct xpsgtr_phy *gtr_phy)
+{
+ /*
+ * As USB may save the snapshot of the states during hibernation, doing
+ * phy_init() will put the USB controller into reset, resulting in the
+ * losing of the saved snapshot. So try to avoid phy_init() for USB
+ * except when gtr_phy->skip_phy_init is false (this happens when FPD is
+ * shutdown during suspend or when gt lane is changed from current one)
+ */
+ if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * xpsgtr_phy_init - initializes a lane
+ * @phy: pointer to kernel PHY device
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_phy_init(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret = 0;
+ u32 offset;
+ u32 reg;
+ u32 nsw;
+ u32 timeout = 500;
+
+ mutex_lock(&gtr_dev->gtr_mutex);
+
+ /* Check if phy_init() is required */
+ if (xpsgtr_phyinit_required(gtr_phy))
+ goto out;
+
+ /* Put controller in reset */
+ ret = xpsgtr_controller_reset(gtr_phy);
+ if (ret != 0) {
+ dev_err(gtr_dev->dev, "Failed to assert reset\n");
+ goto out;
+ }
+
+ /*
+ * There is a functional issue in the GT. The TX termination resistance
+ * can be out of spec due to a bug in the calibration logic. Below is
+ * the workaround to fix it. This below is required for XCZU9EG silicon.
+ */
+ if (gtr_dev->tx_term_fix) {
+ /* Enabling Test Mode control for CMN Rest */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ /* Set Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_EN;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ writel(0x00, gtr_dev->serdes + L3_TM_CALIB_DIG18);
+ writel(TM_OVERRIDE_NSW_CODE, gtr_dev->serdes +
+ L3_TM_CALIB_DIG19);
+
+ /* As a part of work around sequence for PMOS calibration fix,
+ * we need to configure any lane ICM_CFG to valid protocol. This
+ * will deassert the CMN_Resetn signal.
+ */
+ xpsgtr_lane_setprotocol(gtr_phy);
+
+ /* Clear Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ dev_dbg(gtr_dev->dev, "calibrating...\n");
+
+ do {
+ reg = readl(gtr_dev->serdes + L3_CALIB_DONE_STATUS);
+ if ((reg & CALIB_DONE) == CALIB_DONE)
+ break;
+
+ if (!--timeout) {
+ dev_err(gtr_dev->dev, "calibration time out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ udelay(1);
+ } while (1);
+
+ dev_dbg(gtr_dev->dev, "calibration done\n");
+
+ /* Reading NMOS Register Code */
+ nsw = readl(gtr_dev->serdes + L0_TXPMA_ST_3);
+
+ /* Set Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_EN;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ nsw = nsw & DN_CALIB_CODE;
+
+ /* Writing NMOS register values back [5:3] */
+ reg = nsw >> DN_CALIB_SHIFT;
+ writel(reg, gtr_dev->serdes + L3_TM_CALIB_DIG19);
+
+ /* Writing NMOS register value [2:0] */
+ reg = ((nsw & 0x7) << NSW_SHIFT) | (1 << NSW_PIPE_SHIFT);
+ writel(reg, gtr_dev->serdes + L3_TM_CALIB_DIG18);
+
+ /* Clear Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ gtr_dev->tx_term_fix = false;
+ }
+
+ /* Enable coarse code saturation limiting logic */
+ offset = gtr_phy->lane * TM_PLL_DIG_37_OFFSET + L0_TM_PLL_DIG_37;
+ writel(TM_COARSE_CODE_LIMIT, gtr_dev->serdes + offset);
+
+ xpsgtr_configure_pll(gtr_phy);
+ xpsgtr_lane_setprotocol(gtr_phy);
+
+ if (gtr_phy->protocol == ICM_PROTOCOL_SATA)
+ xpsgtr_misc_sata(gtr_phy);
+
+ if (gtr_phy->protocol == ICM_PROTOCOL_SGMII)
+ xpsgtr_misc_sgmii(gtr_phy);
+
+ /* Bring controller out of reset */
+ ret = xpsgtr_controller_release_reset(gtr_phy);
+ if (ret != 0) {
+ dev_err(gtr_dev->dev, "Failed to release reset\n");
+ goto out;
+ }
+
+ /* Wait till pll is locked for all protocols except DP. For DP
+ * pll locking function will be called from driver.
+ */
+ if (gtr_phy->protocol != ICM_PROTOCOL_DP) {
+ ret = xpsgtr_wait_pll_lock(phy);
+ if (ret != 0)
+ goto out;
+ } else {
+ offset = gtr_phy->lane * TXPMD_TM_45_OFFSET + L0_TXPMD_TM_45;
+ reg = L0_TXPMD_TM_45_OVER_DP_MAIN |
+ L0_TXPMD_TM_45_ENABLE_DP_MAIN |
+ L0_TXPMD_TM_45_OVER_DP_POST1 |
+ L0_TXPMD_TM_45_OVER_DP_POST2 |
+ L0_TXPMD_TM_45_ENABLE_DP_POST2;
+ writel(reg, gtr_dev->serdes + offset);
+ offset = gtr_phy->lane * TX_ANA_TM_118_OFFSET +
+ L0_TX_ANA_TM_118;
+ writel(L0_TX_ANA_TM_118_FORCE_17_0,
+ gtr_dev->serdes + offset);
+ }
+
+ /* Do ULPI reset for usb */
+ if (gtr_phy->protocol == ICM_PROTOCOL_USB)
+ ret = xpsgtr_ulpi_reset(gtr_phy);
+
+ /* Select SGMII Mode for GEM and set the PCS Signal detect*/
+ if (gtr_phy->protocol == ICM_PROTOCOL_SGMII)
+ ret = xpsgtr_set_sgmii_pcs(gtr_phy);
+out:
+ mutex_unlock(&gtr_dev->gtr_mutex);
+ return ret;
+}
+
+/**
+ * xpsgtr_set_lanetype - derives lane type from dts arguments
+ * @gtr_phy: pointer to lane
+ * @controller: type of controller
+ * @instance_num: instance number of the controller in case multilane controller
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_set_lanetype(struct xpsgtr_phy *gtr_phy, u8 controller,
+ u8 instance_num)
+{
+ switch (controller) {
+ case PHY_TYPE_SATA:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_SATA_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_SATA_1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_USB3:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_USB0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_USB1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_DP:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_DP_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_DP_1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_PCIE:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_1;
+ else if (instance_num == 2)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_2;
+ else if (instance_num == 3)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_3;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_SGMII:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_SGMII0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_SGMII1;
+ else if (instance_num == 2)
+ gtr_phy->type = XPSGTR_TYPE_SGMII2;
+ else if (instance_num == 3)
+ gtr_phy->type = XPSGTR_TYPE_SGMII3;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xpsgtr_xlate - provides a PHY specific to a controller
+ * @dev: pointer to device
+ * @args: arguments from dts
+ *
+ * Return: pointer to kernel PHY device or error on failure
+ */
+static struct phy *xpsgtr_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ struct xpsgtr_phy *gtr_phy = NULL;
+ struct device_node *phynode = args->np;
+ int index;
+ int i;
+ u8 controller;
+ u8 instance_num;
+
+ if (args->args_count != 4) {
+ dev_err(dev, "Invalid number of cells in 'phy' property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (!of_device_is_available(phynode)) {
+ dev_warn(dev, "requested PHY is disabled\n");
+ return ERR_PTR(-ENODEV);
+ }
+ for (index = 0; index < of_get_child_count(dev->of_node); index++) {
+ if (phynode == gtr_dev->phys[index]->phy->dev.of_node) {
+ gtr_phy = gtr_dev->phys[index];
+ break;
+ }
+ }
+ if (!gtr_phy) {
+ dev_err(dev, "failed to find appropriate phy\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* get type of controller from phys */
+ controller = args->args[0];
+
+ /* get controller instance number */
+ instance_num = args->args[1];
+
+ /* Check if lane sharing is required */
+ gtr_phy->share_laneclk = args->args[2];
+
+ /* get the required clk rate for controller from phys */
+ gtr_phy->refclk_rate = args->args[3];
+
+ /* derive lane type */
+ if (xpsgtr_set_lanetype(gtr_phy, controller, instance_num) < 0) {
+ dev_err(gtr_dev->dev, "Invalid lane type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* configures SSC settings for a lane */
+ if (xpsgtr_configure_lane(gtr_phy) < 0) {
+ dev_err(gtr_dev->dev, "Invalid clock rate: %d\n",
+ gtr_phy->refclk_rate);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Check Interconnect Matrix is obeyed i.e, given lane type
+ * is allowed to operate on the lane.
+ */
+ for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
+ if (icm_matrix[index][i] == gtr_phy->type)
+ return gtr_phy->phy;
+ }
+
+ /* Should not reach here */
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * xpsgtr_phy_exit - clears previous initialized variables
+ * @phy: pointer to kernel PHY device
+ *
+ * Return: 0 on success
+ */
+static int xpsgtr_phy_exit(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+
+ /* As we are exiting, clear skip_phy_init flag */
+ gtr_phy->skip_phy_init = false;
+
+ return 0;
+}
+
+static struct phy_ops xpsgtr_phyops = {
+ .init = xpsgtr_phy_init,
+ .exit = xpsgtr_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * xpsgtr_get_resets - Gets reset signals based on reset-names property
+ * @gtr_dev: pointer to structure which stores reset information
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int xpsgtr_get_resets(struct xpsgtr_dev *gtr_dev)
+{
+ char *name;
+ struct reset_control *rst_temp;
+
+ gtr_dev->sata_rst = devm_reset_control_get(gtr_dev->dev, "sata_rst");
+ if (IS_ERR(gtr_dev->sata_rst)) {
+ name = "sata_rst";
+ rst_temp = gtr_dev->sata_rst;
+ goto error;
+ }
+
+ gtr_dev->dp_rst = devm_reset_control_get(gtr_dev->dev, "dp_rst");
+ if (IS_ERR(gtr_dev->dp_rst)) {
+ name = "dp_rst";
+ rst_temp = gtr_dev->dp_rst;
+ goto error;
+ }
+
+ gtr_dev->usb0_crst = devm_reset_control_get(gtr_dev->dev, "usb0_crst");
+ if (IS_ERR(gtr_dev->usb0_crst)) {
+ name = "usb0_crst";
+ rst_temp = gtr_dev->usb0_crst;
+ goto error;
+ }
+
+ gtr_dev->usb1_crst = devm_reset_control_get(gtr_dev->dev, "usb1_crst");
+ if (IS_ERR(gtr_dev->usb1_crst)) {
+ name = "usb1_crst";
+ rst_temp = gtr_dev->usb1_crst;
+ goto error;
+ }
+
+ gtr_dev->usb0_hibrst = devm_reset_control_get(gtr_dev->dev,
+ "usb0_hibrst");
+ if (IS_ERR(gtr_dev->usb0_hibrst)) {
+ name = "usb0_hibrst";
+ rst_temp = gtr_dev->usb0_hibrst;
+ goto error;
+ }
+
+ gtr_dev->usb1_hibrst = devm_reset_control_get(gtr_dev->dev,
+ "usb1_hibrst");
+ if (IS_ERR(gtr_dev->usb1_hibrst)) {
+ name = "usb1_hibrst";
+ rst_temp = gtr_dev->usb1_hibrst;
+ goto error;
+ }
+
+ gtr_dev->usb0_apbrst = devm_reset_control_get(gtr_dev->dev,
+ "usb0_apbrst");
+ if (IS_ERR(gtr_dev->usb0_apbrst)) {
+ name = "usb0_apbrst";
+ rst_temp = gtr_dev->usb0_apbrst;
+ goto error;
+ }
+
+ gtr_dev->usb1_apbrst = devm_reset_control_get(gtr_dev->dev,
+ "usb1_apbrst");
+ if (IS_ERR(gtr_dev->usb1_apbrst)) {
+ name = "usb1_apbrst";
+ rst_temp = gtr_dev->usb1_apbrst;
+ goto error;
+ }
+
+ gtr_dev->gem0_rst = devm_reset_control_get(gtr_dev->dev, "gem0_rst");
+ if (IS_ERR(gtr_dev->gem0_rst)) {
+ name = "gem0_rst";
+ rst_temp = gtr_dev->gem0_rst;
+ goto error;
+ }
+
+ gtr_dev->gem1_rst = devm_reset_control_get(gtr_dev->dev, "gem1_rst");
+ if (IS_ERR(gtr_dev->gem1_rst)) {
+ name = "gem1_rst";
+ rst_temp = gtr_dev->gem1_rst;
+ goto error;
+ }
+
+ gtr_dev->gem2_rst = devm_reset_control_get(gtr_dev->dev, "gem2_rst");
+ if (IS_ERR(gtr_dev->gem2_rst)) {
+ name = "gem2_rst";
+ rst_temp = gtr_dev->gem2_rst;
+ goto error;
+ }
+
+ gtr_dev->gem3_rst = devm_reset_control_get(gtr_dev->dev, "gem3_rst");
+ if (IS_ERR(gtr_dev->gem3_rst)) {
+ name = "gem3_rst";
+ rst_temp = gtr_dev->gem3_rst;
+ goto error;
+ }
+
+ return 0;
+error:
+ dev_err(gtr_dev->dev, "failed to get %s reset signal\n", name);
+ return PTR_ERR(rst_temp);
+}
+
+/**
+ * xpsgtr_probe - The device probe function for driver initialization.
+ * @pdev: pointer to the platform device structure.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xpsgtr_probe(struct platform_device *pdev)
+{
+ struct device_node *child, *np = pdev->dev.of_node;
+ struct xpsgtr_dev *gtr_dev;
+ struct phy_provider *provider;
+ struct phy *phy;
+ struct resource *res;
+ char *soc_rev;
+ int lanecount, port = 0, index = 0;
+ int err;
+
+ if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
+ dev_warn(&pdev->dev, "This binding is deprecated, please use new compatible binding\n");
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
+ if (!gtr_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "serdes");
+ gtr_dev->serdes = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gtr_dev->serdes))
+ return PTR_ERR(gtr_dev->serdes);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "siou");
+ gtr_dev->siou = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gtr_dev->siou))
+ return PTR_ERR(gtr_dev->siou);
+
+ lanecount = of_get_child_count(np);
+ if (lanecount > MAX_LANES || lanecount == 0)
+ return -EINVAL;
+
+ gtr_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * lanecount,
+ GFP_KERNEL);
+ if (!gtr_dev->phys)
+ return -ENOMEM;
+
+ gtr_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gtr_dev);
+ mutex_init(&gtr_dev->gtr_mutex);
+
+ /* Deferred probe is also handled if nvmem is not ready */
+ soc_rev = zynqmp_nvmem_get_silicon_version(&pdev->dev,
+ "soc_revision");
+ if (IS_ERR(soc_rev))
+ return PTR_ERR(soc_rev);
+
+ if (*soc_rev == ZYNQMP_SILICON_V1)
+ gtr_dev->tx_term_fix = true;
+
+ kfree(soc_rev);
+
+ err = xpsgtr_get_resets(gtr_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ return err;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct xpsgtr_phy *gtr_phy;
+
+ gtr_phy = devm_kzalloc(&pdev->dev, sizeof(*gtr_phy),
+ GFP_KERNEL);
+ if (!gtr_phy)
+ return -ENOMEM;
+
+ /* Assign lane number to gtr_phy instance */
+ gtr_phy->lane = index;
+
+ /* Disable lane sharing as default */
+ gtr_phy->share_laneclk = -1;
+
+ gtr_dev->phys[port] = gtr_phy;
+ phy = devm_phy_create(&pdev->dev, child, &xpsgtr_phyops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+ gtr_dev->phys[port]->phy = phy;
+ phy_set_drvdata(phy, gtr_dev->phys[port]);
+ gtr_phy->data = gtr_dev;
+ port++;
+ index++;
+ }
+ provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(&pdev->dev, "registering provider failed\n");
+ return PTR_ERR(provider);
+ }
+ return 0;
+}
+
+static int xpsgtr_suspend(struct device *dev)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+
+ /* Save the ICM_CFG registers */
+ gtr_dev->saved_icm_cfg0 = readl(gtr_dev->serdes + ICM_CFG0);
+ gtr_dev->saved_icm_cfg1 = readl(gtr_dev->serdes + ICM_CFG1);
+
+ return 0;
+}
+
+static int xpsgtr_resume(struct device *dev)
+{
+ unsigned int icm_cfg0, icm_cfg1, index;
+ bool skip_phy_init;
+ struct xpsgtr_phy *gtr_phy;
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+
+ icm_cfg0 = readl(gtr_dev->serdes + ICM_CFG0);
+ icm_cfg1 = readl(gtr_dev->serdes + ICM_CFG1);
+
+ /* Just return if no gt lanes got configured before suspend */
+ if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
+ return 0;
+
+ /* Check if the ICM configurations changed after suspend */
+ if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
+ icm_cfg1 == gtr_dev->saved_icm_cfg1)
+ skip_phy_init = true;
+ else
+ skip_phy_init = false;
+
+ /* This below updates the skip_phy_init for all gtr_phy instances `*/
+ for (index = 0; index < of_get_child_count(dev->of_node); index++) {
+ gtr_phy = gtr_dev->phys[index];
+ gtr_phy->skip_phy_init = skip_phy_init;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops xpsgtr_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id xpsgtr_of_match[] = {
+ { .compatible = "xlnx,zynqmp-psgtr", },
+ { .compatible = "xlnx,zynqmp-psgtr-v1.1", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
+
+static struct platform_driver xpsgtr_driver = {
+ .probe = xpsgtr_probe,
+ .driver = {
+ .name = "xilinx-psgtr",
+ .of_match_table = xpsgtr_of_match,
+ .pm = &xpsgtr_pm_ops,
+ },
+};
+
+module_platform_driver(xpsgtr_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
index 04d18d52f700..d4741c2dbbb5 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
@@ -54,8 +54,10 @@ static int qcom_usb_hsic_phy_power_on(struct phy *phy)
/* Configure pins for HSIC functionality */
pins_default = pinctrl_lookup_state(uphy->pctl, PINCTRL_STATE_DEFAULT);
- if (IS_ERR(pins_default))
- return PTR_ERR(pins_default);
+ if (IS_ERR(pins_default)) {
+ ret = PTR_ERR(pins_default);
+ goto err_ulpi;
+ }
ret = pinctrl_select_state(uphy->pctl, pins_default);
if (ret)
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index cfb98bba7715..ddc41db1f65a 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -631,8 +631,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->irq = platform_get_irq_optional(pdev, 0);
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
- int ret;
-
channel->is_otg_channel = true;
channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
"renesas,no-otg-pins");
@@ -691,8 +689,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
ret = PTR_ERR(provider);
goto error;
} else if (channel->is_otg_channel) {
- int ret;
-
ret = device_create_file(dev, &dev_attr_role);
if (ret < 0)
goto error;
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index 9ca20c947283..2b0f5f2b4f33 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -745,10 +745,12 @@ unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
}
- inno->pixclock = vco;
- dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+ inno->pixclock = DIV_ROUND_CLOSEST((unsigned long)vco, 1000) * 1000;
- return vco;
+ dev_dbg(inno->dev, "%s rate %lu vco %llu\n",
+ __func__, inno->pixclock, vco);
+
+ return inno->pixclock;
}
static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
@@ -790,8 +792,8 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
RK3328_PRE_PLL_POWER_DOWN);
/* Configure pre-pll */
- inno_update_bits(inno, 0xa0, RK3228_PCLK_VCO_DIV_5_MASK,
- RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
+ inno_update_bits(inno, 0xa0, RK3328_PCLK_VCO_DIV_5_MASK,
+ RK3328_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
inno_write(inno, 0xa1, RK3328_PRE_PLL_PRE_DIV(cfg->prediv));
val = RK3328_SPREAD_SPECTRUM_MOD_DISABLE;
@@ -1021,9 +1023,10 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
inno_write(inno, 0xac, RK3328_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
if (cfg->postdiv == 1) {
- inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS);
inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS |
+ RK3328_POST_PLL_POWER_DOWN);
} else {
v = (cfg->postdiv / 2) - 1;
v &= RK3328_POST_PLL_POST_DIV_MASK;
@@ -1031,7 +1034,8 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
RK3328_POST_PLL_PRE_DIV(cfg->prediv));
inno_write(inno, 0xaa, RK3328_POST_PLL_POST_DIV_ENABLE |
- RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ RK3328_POST_PLL_REFCLK_SEL_TMDS |
+ RK3328_POST_PLL_POWER_DOWN);
}
for (v = 0; v < 14; v++)
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index eae865ff312c..b5f7a93543b0 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -467,8 +467,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
return ret;
ret = property_enable(base, &rport->port_cfg->phy_sus, false);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(rphy->clk480m);
return ret;
+ }
/* waiting for the utmi_clk to become stable */
usleep_range(1500, 2000);
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 24563160197f..ba805f1d4f6a 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -808,9 +808,8 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
struct extcon_dev *edev = tcphy->extcon;
union extcon_property_value property;
unsigned int id;
- bool ufp, dp;
u8 mode;
- int ret;
+ int ret, ufp, dp;
if (!edev)
return MODE_DFP_USB;
diff --git a/drivers/phy/st/phy-miphy28lp.c b/drivers/phy/st/phy-miphy28lp.c
index 068160a34f5c..e30305b77f0d 100644
--- a/drivers/phy/st/phy-miphy28lp.c
+++ b/drivers/phy/st/phy-miphy28lp.c
@@ -9,6 +9,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -484,19 +485,11 @@ static inline void miphy28lp_pcie_config_gen(struct miphy28lp_phy *miphy_phy)
static inline int miphy28lp_wait_compensation(struct miphy28lp_phy *miphy_phy)
{
- unsigned long finish = jiffies + 5 * HZ;
u8 val;
/* Waiting for Compensation to complete */
- do {
- val = readb_relaxed(miphy_phy->base + MIPHY_COMP_FSM_6);
-
- if (time_after_eq(jiffies, finish))
- return -EBUSY;
- cpu_relax();
- } while (!(val & COMP_DONE));
-
- return 0;
+ return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_COMP_FSM_6,
+ val, val & COMP_DONE, 1, 5 * USEC_PER_SEC);
}
@@ -805,7 +798,6 @@ static inline void miphy28lp_configure_usb3(struct miphy28lp_phy *miphy_phy)
static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
{
- unsigned long finish = jiffies + 5 * HZ;
u8 mask = HFC_PLL | HFC_RDY;
u8 val;
@@ -816,21 +808,14 @@ static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
if (miphy_phy->type == PHY_TYPE_SATA)
mask |= PHY_RDY;
- do {
- val = readb_relaxed(miphy_phy->base + MIPHY_STATUS_1);
- if ((val & mask) != mask)
- cpu_relax();
- else
- return 0;
- } while (!time_after_eq(jiffies, finish));
-
- return -EBUSY;
+ return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_STATUS_1,
+ val, (val & mask) == mask, 1,
+ 5 * USEC_PER_SEC);
}
static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
{
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
- unsigned long finish = jiffies + 5 * HZ;
u32 val;
if (!miphy_phy->osc_rdy)
@@ -839,17 +824,10 @@ static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
if (!miphy_phy->syscfg_reg[SYSCFG_STATUS])
return -EINVAL;
- do {
- regmap_read(miphy_dev->regmap,
- miphy_phy->syscfg_reg[SYSCFG_STATUS], &val);
-
- if ((val & MIPHY_OSC_RDY) != MIPHY_OSC_RDY)
- cpu_relax();
- else
- return 0;
- } while (!time_after_eq(jiffies, finish));
-
- return -EBUSY;
+ return regmap_read_poll_timeout(miphy_dev->regmap,
+ miphy_phy->syscfg_reg[SYSCFG_STATUS],
+ val, val & MIPHY_OSC_RDY, 1,
+ 5 * USEC_PER_SEC);
}
static int miphy28lp_get_resource_byname(struct device_node *child,
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 56bdea4b0bd9..e643c21c1217 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -393,6 +393,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
ret = of_property_read_u32(child, "reg", &index);
if (ret || index > usbphyc->nphys) {
dev_err(&phy->dev, "invalid reg property: %d\n", ret);
+ if (!ret)
+ ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index bf5d80b97597..efe7abf459fd 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -606,6 +606,7 @@ static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
usb2->base.lane = usb2->base.ops->map(&usb2->base);
if (IS_ERR(usb2->base.lane)) {
err = PTR_ERR(usb2->base.lane);
+ tegra_xusb_port_unregister(&usb2->base);
goto out;
}
@@ -658,6 +659,7 @@ static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
if (IS_ERR(ulpi->base.lane)) {
err = PTR_ERR(ulpi->base.lane);
+ tegra_xusb_port_unregister(&ulpi->base);
goto out;
}
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 471fe2e80f4e..fc2fee6d532d 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -58,7 +58,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
- if (!phy->comparator)
+ if (!phy->comparator || !phy->comparator->set_vbus)
return -ENODEV;
return phy->comparator->set_vbus(phy->comparator, enabled);
@@ -68,7 +68,7 @@ static int omap_usb_start_srp(struct usb_otg *otg)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
- if (!phy->comparator)
+ if (!phy->comparator || !phy->comparator->start_srp)
return -ENODEV;
return phy->comparator->start_srp(phy->comparator);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b372419d61f2..5db95e3c96bb 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -372,6 +372,14 @@ config PINCTRL_RK805
help
This selects the pinctrl driver for RK805.
+config PINCTRL_ZYNQMP
+ bool "Pinctrl driver for Xilinx ZynqMP"
+ depends on ARCH_ZYNQMP
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This selects the pinctrl driver for Xilinx ZynqMP.
+
config PINCTRL_OCELOT
bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index ac537fdbc998..a779d0465dad 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
+obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
obj-y += actions/
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 22aca6d182c0..2c1e799b029e 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -115,7 +115,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
int ret = 0;
if (!exprs)
- return true;
+ return -EINVAL;
while (*exprs && !ret) {
ret = aspeed_sig_expr_disable(ctx, *exprs);
diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
index 530426a74f75..b3cea8d56c4f 100644
--- a/drivers/pinctrl/cirrus/Kconfig
+++ b/drivers/pinctrl/cirrus/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config PINCTRL_LOCHNAGAR
tristate "Cirrus Logic Lochnagar pinctrl driver"
- depends on MFD_LOCHNAGAR
+ # Avoid clash caused by MIPS defining RST, which is used in the driver
+ depends on MFD_LOCHNAGAR && !MIPS
select GPIOLIB
select PINMUX
select PINCONF
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 9ebeef2ac7b1..a1c2dc304fb1 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1237,17 +1237,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
struct pinctrl_setting *setting, *setting2;
- struct pinctrl_state *old_state = p->state;
+ struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret;
- if (p->state) {
+ if (old_state) {
/*
* For each pinmux setting in the old state, forget SW's record
* of mux owner for that pingroup. Any pingroups which are
* still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below.
*/
- list_for_each_entry(setting, &p->state->settings, node) {
+ list_for_each_entry(setting, &old_state->settings, node) {
if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
continue;
pinmux_disable_setting(setting);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index dbaacde1b36a..362d84c2ead4 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -223,6 +223,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
for (state = 0; ; state++) {
/* Retrieve the pinctrl-* property */
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
+ if (!propname)
+ return -ENOMEM;
prop = of_find_property(np, propname, &size);
kfree(propname);
if (!prop) {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 8f06445a8e39..2b48901f1b2a 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1021,11 +1021,6 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
break;
- case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- if (!(ctrl1 & CHV_PADCTRL1_ODEN))
- return -EINVAL;
- break;
-
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: {
u32 cfg;
@@ -1035,6 +1030,16 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (ctrl1 & CHV_PADCTRL1_ODEN)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (!(ctrl1 & CHV_PADCTRL1_ODEN))
+ return -EINVAL;
+ break;
}
default:
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 4e89bbf6b76a..32c6326337f7 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -459,9 +459,14 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
writel(value, padcfg0);
}
+static int __intel_gpio_get_gpio_mode(u32 value)
+{
+ return (value & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
+}
+
static int intel_gpio_get_gpio_mode(void __iomem *padcfg0)
{
- return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
+ return __intel_gpio_get_gpio_mode(readl(padcfg0));
}
static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
@@ -1505,9 +1510,16 @@ int intel_pinctrl_probe_by_uid(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
#ifdef CONFIG_PM_SLEEP
+static bool __intel_gpio_is_direct_irq(u32 value)
+{
+ return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+ (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
+}
+
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+ u32 value;
if (!pd || !intel_pad_usable(pctrl, pin))
return false;
@@ -1522,6 +1534,24 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
gpiochip_line_is_irq(&pctrl->chip, intel_pin_to_gpio(pctrl, pin)))
return true;
+ /*
+ * The firmware on some systems may configure GPIO pins to be
+ * an interrupt source in so called "direct IRQ" mode. In such
+ * cases the GPIO controller driver has no idea if those pins
+ * are being used or not. At the same time, there is a known bug
+ * in the firmwares that don't restore the pin settings correctly
+ * after suspend, i.e. by an unknown reason the Rx value becomes
+ * inverted.
+ *
+ * Hence, let's save and restore the pins that are configured
+ * as GPIOs in the input mode with GPIROUTIOXAPIC bit set.
+ *
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
+ */
+ value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
+ if (__intel_gpio_is_direct_irq(value))
+ return true;
+
return false;
}
@@ -1631,7 +1661,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
void __iomem *padcfg;
u32 val;
- if (!intel_pinctrl_should_save(pctrl, desc->number))
+ if (!(intel_pinctrl_should_save(pctrl, desc->number) ||
+ /*
+ * If the firmware mangled the register contents too much,
+ * check the saved value for the Direct IRQ mode.
+ */
+ __intel_gpio_is_direct_irq(pads[i].padcfg0)))
continue;
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index 7e526bcf5e0b..24502dfeb83f 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -277,12 +277,15 @@ static struct irq_chip mtk_eint_irq_chip = {
static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
{
- void __iomem *reg = eint->base + eint->regs->dom_en;
+ void __iomem *dom_en = eint->base + eint->regs->dom_en;
+ void __iomem *mask_set = eint->base + eint->regs->mask_set;
unsigned int i;
for (i = 0; i < eint->hw->ap_num; i += 32) {
- writel(0xffffffff, reg);
- reg += 4;
+ writel(0xffffffff, dom_en);
+ writel(0xffffffff, mask_set);
+ dom_en += 4;
+ mask_set += 4;
}
return 0;
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index ad502eda4afa..89ce65e5309f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
GPIO_GROUP(GPIOA_15),
GPIO_GROUP(GPIOA_16),
GPIO_GROUP(GPIOA_17),
+ GPIO_GROUP(GPIOA_18),
GPIO_GROUP(GPIOA_19),
GPIO_GROUP(GPIOA_20),
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 9f00adfefba8..5d7e368a1e7e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1461,8 +1461,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
has_config = nmk_pinctrl_dt_get_config(np, &configs);
np_config = of_parse_phandle(np, "ste,config", 0);
- if (np_config)
+ if (np_config) {
has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+ of_node_put(np_config);
+ }
if (has_config) {
const char *gpio_name;
const char *pin;
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 355bc4c748e2..02c015577cf9 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -391,8 +391,10 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
for_each_available_child_of_node(np_config, np) {
ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps, type);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(np);
goto exit;
+ }
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 4c02439d3776..6dbaf096854d 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -123,6 +123,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+
+ /* Use special handling for Pin0 debounce */
+ if (offset == 0) {
+ pin_reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+ if (pin_reg & INTERNAL_GPIO0_DEBOUNCE)
+ debounce = 0;
+ }
+
pin_reg = readl(gpio_dev->base + offset * 4);
if (debounce) {
@@ -178,18 +186,6 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
return ret;
}
-static int amd_gpio_set_config(struct gpio_chip *gc, unsigned offset,
- unsigned long config)
-{
- u32 debounce;
-
- if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
- return -ENOTSUPP;
-
- debounce = pinconf_to_config_argument(config);
- return amd_gpio_set_debounce(gc, offset, debounce);
-}
-
#ifdef CONFIG_DEBUG_FS
static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
{
@@ -212,6 +208,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *output_value;
char *output_enable;
+ seq_printf(s, "WAKE_INT_MASTER_REG: 0x%08x\n", readl(gpio_dev->base + WAKE_INT_MASTER_REG));
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
seq_printf(s, "GPIO bank%d\t", bank);
@@ -662,7 +659,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
break;
default:
- dev_err(&gpio_dev->pdev->dev, "Invalid config param %04x\n",
+ dev_dbg(&gpio_dev->pdev->dev, "Invalid config param %04x\n",
param);
return -ENOTSUPP;
}
@@ -673,7 +670,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
}
static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *configs, unsigned num_configs)
+ unsigned long *configs, unsigned int num_configs)
{
int i;
u32 arg;
@@ -715,7 +712,7 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
default:
- dev_err(&gpio_dev->pdev->dev,
+ dev_dbg(&gpio_dev->pdev->dev,
"Invalid config param %04x\n", param);
ret = -ENOTSUPP;
}
@@ -763,6 +760,20 @@ static int amd_pinconf_group_set(struct pinctrl_dev *pctldev,
return 0;
}
+static int amd_gpio_set_config(struct gpio_chip *gc, unsigned int pin,
+ unsigned long config)
+{
+ struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+
+ if (pinconf_to_config_param(config) == PIN_CONFIG_INPUT_DEBOUNCE) {
+ u32 debounce = pinconf_to_config_argument(config);
+
+ return amd_gpio_set_debounce(gc, pin, debounce);
+ }
+
+ return amd_pinconf_set(gpio_dev->pctrl, pin, &config, 1);
+}
+
static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_get = amd_pinconf_get,
.pin_config_set = amd_pinconf_set,
@@ -770,6 +781,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
+static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+{
+ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
+ u32 pin_reg, mask;
+ int i;
+
+ mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+ BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+ BIT(WAKE_CNTRL_OFF_S4);
+
+ for (i = 0; i < desc->npins; i++) {
+ int pin = desc->pins[i].number;
+ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+
+ if (!pd)
+ continue;
+
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+
+ pin_reg = readl(gpio_dev->base + pin * 4);
+ pin_reg &= ~mask;
+ writel(pin_reg, gpio_dev->base + pin * 4);
+
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ }
+}
+
#ifdef CONFIG_PM_SLEEP
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
{
@@ -793,6 +832,7 @@ static int amd_gpio_suspend(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -801,7 +841,9 @@ static int amd_gpio_suspend(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -811,6 +853,7 @@ static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -819,7 +862,10 @@ static int amd_gpio_resume(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -845,6 +891,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
int irq_base;
struct resource *res;
struct amd_gpio *gpio_dev;
+ struct gpio_irq_chip *girq;
gpio_dev = devm_kzalloc(&pdev->dev,
sizeof(struct amd_gpio), GFP_KERNEL);
@@ -906,6 +953,18 @@ static int amd_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_dev->pctrl);
}
+ /* Disable and mask interrupts */
+ amd_gpio_irq_init(gpio_dev);
+
+ girq = &gpio_dev->gc.irq;
+ girq->chip = &amd_gpio_irqchip;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+
ret = gpiochip_add_data(&gpio_dev->gc, gpio_dev);
if (ret)
return ret;
@@ -917,17 +976,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2;
}
- ret = gpiochip_irqchip_add(&gpio_dev->gc,
- &amd_gpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev, "could not add irqchip\n");
- ret = -ENODEV;
- goto out2;
- }
-
ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index d4a192df5fab..55ff463bb996 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -17,6 +17,7 @@
#define AMD_GPIO_PINS_BANK3 32
#define WAKE_INT_MASTER_REG 0xfc
+#define INTERNAL_GPIO0_DEBOUNCE (1 << 15)
#define EOI_MASK (1 << 29)
#define WAKE_INT_STATUS_REG0 0x2f8
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index d6de4d360cd4..409506c1de14 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -928,6 +928,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = {
}
};
+/*
+ * This lock class allows to tell lockdep that parent IRQ and children IRQ do
+ * not share the same class so it does not raise false positive
+ */
+static struct lock_class_key atmel_lock_key;
+static struct lock_class_key atmel_request_key;
+
static int atmel_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1011,8 +1018,10 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
pin_desc[i].number = i;
/* Pin naming convention: P(bank_name)(bank_pin_number). */
- pin_desc[i].name = kasprintf(GFP_KERNEL, "P%c%d",
- bank + 'A', line);
+ pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d",
+ bank + 'A', line);
+ if (!pin_desc[i].name)
+ return -ENOMEM;
group->name = group_names[i] = pin_desc[i].name;
group->pin = pin_desc[i].number;
@@ -1069,7 +1078,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
dev_err(dev, "can't add the irq domain\n");
return -ENODEV;
}
- atmel_pioctrl->irq_domain->name = "atmel gpio";
for (i = 0; i < atmel_pioctrl->npins; i++) {
int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
@@ -1077,6 +1085,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
handle_simple_irq);
irq_set_chip_data(irq, atmel_pioctrl);
+ irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key);
dev_dbg(dev,
"atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
i, irq);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d6e7e9f0ddec..39a55fd85b19 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1891,7 +1891,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
}
for (i = 0; i < chip->ngpio; i++)
- names[i] = kasprintf(GFP_KERNEL, "pio%c%d", alias_idx + 'A', i);
+ names[i] = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pio%c%d", alias_idx + 'A', i);
chip->names = (const char *const *)names;
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 0a951a75c82b..cccf1cc8736c 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -420,7 +420,7 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
BIT(p), f << p);
regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
- BIT(p), f << (p - 1));
+ BIT(p), (f >> 1) << p);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 4b972be3487f..9388d6fac7d4 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -62,7 +62,7 @@ enum rockchip_pinctrl_type {
RK3399,
};
-/**
+/*
* Encode variants of iomux registers into a type variable
*/
#define IOMUX_GPIO_ONLY BIT(0)
@@ -72,6 +72,7 @@ enum rockchip_pinctrl_type {
#define IOMUX_WIDTH_3BIT BIT(4)
/**
+ * struct rockchip_iomux
* @type: iomux variant using IOMUX_* constants
* @offset: if initialized to -1 it will be autocalculated, by specifying
* an initial offset value the relevant source offset can be reset
@@ -82,7 +83,7 @@ struct rockchip_iomux {
int offset;
};
-/**
+/*
* enum type index corresponding to rockchip_perpin_drv_list arrays index.
*/
enum rockchip_pin_drv_type {
@@ -94,7 +95,7 @@ enum rockchip_pin_drv_type {
DRV_TYPE_MAX
};
-/**
+/*
* enum type index corresponding to rockchip_pull_list arrays index.
*/
enum rockchip_pin_pull_type {
@@ -104,6 +105,7 @@ enum rockchip_pin_pull_type {
};
/**
+ * struct rockchip_drv
* @drv_type: drive strength variant using rockchip_perpin_drv_type
* @offset: if initialized to -1 it will be autocalculated, by specifying
* an initial offset value the relevant source offset can be reset
@@ -117,8 +119,9 @@ struct rockchip_drv {
};
/**
+ * struct rockchip_pin_bank
* @reg_base: register base of the gpio bank
- * @reg_pull: optional separate register for additional pull settings
+ * @regmap_pull: optional separate register for additional pull settings
* @clk: clock of the gpio bank
* @irq: interrupt of the gpio bank
* @saved_masks: Saved content of GPIO_INTEN at suspend time.
@@ -136,6 +139,8 @@ struct rockchip_drv {
* @gpio_chip: gpiolib chip
* @grange: gpio range
* @slock: spinlock for the gpio bank
+ * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
+ * @recalced_mask: bit mask to indicate a need to recalulate the mask
* @route_mask: bits describing the routing pins of per bank
*/
struct rockchip_pin_bank {
@@ -310,6 +315,7 @@ enum rockchip_mux_route_location {
* @bank_num: bank number.
* @pin: index at register or used to calc index.
* @func: the min pin.
+ * @route_location: the mux route location (same, pmu, grf).
* @route_offset: the max pin.
* @route_val: the register offset.
*/
@@ -322,8 +328,6 @@ struct rockchip_mux_route_data {
u32 route_val;
};
-/**
- */
struct rockchip_pin_ctrl {
struct rockchip_pin_bank *pin_banks;
u32 nr_banks;
@@ -361,9 +365,7 @@ struct rockchip_pin_config {
* @name: name of the pin group, used to lookup the group.
* @pins: the pins included in this group.
* @npins: number of pins included in this group.
- * @func: the mux function number to be programmed when selected.
- * @configs: the config values to be set for each pin
- * @nconfigs: number of configs for each pin
+ * @data: local pin configuration
*/
struct rockchip_pin_group {
const char *name;
@@ -376,7 +378,7 @@ struct rockchip_pin_group {
* struct rockchip_pmx_func: represent a pin function.
* @name: name of the pin function, used to lookup the function.
* @groups: one or more names of pin groups that provide this function.
- * @num_groups: number of groups included in @groups.
+ * @ngroups: number of groups included in @groups.
*/
struct rockchip_pmx_func {
const char *name;
@@ -2534,6 +2536,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
np_config = of_find_node_by_phandle(be32_to_cpup(phandle));
ret = pinconf_generic_parse_dt_config(np_config, NULL,
&grp->data[j].configs, &grp->data[j].nconfigs);
+ of_node_put(np_config);
if (ret)
return ret;
}
diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c
index eda88cdf870d..8c3174d00750 100644
--- a/drivers/pinctrl/pinctrl-rza2.c
+++ b/drivers/pinctrl/pinctrl-rza2.c
@@ -14,6 +14,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinmux.h>
@@ -46,6 +47,7 @@ struct rza2_pinctrl_priv {
struct pinctrl_dev *pctl;
struct pinctrl_gpio_range gpio_range;
int npins;
+ struct mutex mutex; /* serialize adding groups and functions */
};
#define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */
@@ -359,10 +361,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
psel_val[i] = MUX_FUNC(value);
}
+ mutex_lock(&priv->mutex);
+
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL);
- if (gsel < 0)
- return gsel;
+ if (gsel < 0) {
+ ret = gsel;
+ goto unlock;
+ }
/*
* Register a single group function where the 'data' is an array PSEL
@@ -391,6 +397,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
(*map)->data.mux.function = np->name;
*num_maps = 1;
+ mutex_unlock(&priv->mutex);
+
return 0;
remove_function:
@@ -399,6 +407,9 @@ remove_function:
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
+unlock:
+ mutex_unlock(&priv->mutex);
+
dev_err(priv->dev, "Unable to parse DT node %s\n", np->name);
return ret;
@@ -476,6 +487,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ mutex_init(&priv->mutex);
+
platform_set_drvdata(pdev, priv);
priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) *
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index fb1c8965cb99..ce5be6f0b7aa 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -345,6 +345,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
if (!pcs->fmask)
return 0;
function = pinmux_generic_get_function(pctldev, fselector);
+ if (!function)
+ return -EINVAL;
func = function->data;
if (!func)
return -EINVAL;
@@ -703,7 +705,7 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
mux_bytes = pcs->width / BITS_PER_BYTE;
- if (pcs->bits_per_mux) {
+ if (pcs->bits_per_mux && pcs->fmask) {
pcs->bits_per_pin = fls(pcs->fmask);
nr_pins = (pcs->size * BITS_PER_BYTE) / pcs->bits_per_pin;
num_pins_in_register = pcs->width / pcs->bits_per_pin;
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
new file mode 100644
index 000000000000..9b3b4dc88a1f
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -0,0 +1,1074 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ZynqMP pin controller
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ * Chirag Parekh <chirag.parekh@xilinx.com>
+ */
+
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include "core.h"
+#include "pinctrl-utils.h"
+
+#define ZYNQMP_PIN_PREFIX "MIO"
+#define PINCTRL_GET_FUNC_NAME_RESP_LEN 16
+#define MAX_FUNC_NAME_LEN 16
+#define MAX_GROUP_PIN 50
+#define END_OF_FUNCTIONS "END_OF_FUNCTIONS"
+#define NUM_GROUPS_PER_RESP 6
+
+#define PINCTRL_GET_FUNC_GROUPS_RESP_LEN 12
+#define PINCTRL_GET_PIN_GROUPS_RESP_LEN 12
+#define NA_GROUP -1
+#define RESERVED_GROUP -2
+
+/**
+ * struct zynqmp_pmux_function - a pinmux function
+ * @name: Name of the pinmux function
+ * @groups: List of pingroups for this function
+ * @ngroups: Number of entries in @groups
+ * @node:` Firmware node matching with for function
+ *
+ * This structure holds information about pin control function
+ * and function group names supporting that function.
+ */
+struct zynqmp_pmux_function {
+ char name[MAX_FUNC_NAME_LEN];
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct zynqmp_pinctrl - driver data
+ * @pctrl: Pinctrl device
+ * @groups: Pingroups
+ * @ngroups: Number of @groups
+ * @funcs: Pinmux functions
+ * @nfuncs: Number of @funcs
+ *
+ * This struct is stored as driver data and used to retrieve
+ * information regarding pin control functions, groups and
+ * group pins.
+ */
+struct zynqmp_pinctrl {
+ struct pinctrl_dev *pctrl;
+ const struct zynqmp_pctrl_group *groups;
+ unsigned int ngroups;
+ const struct zynqmp_pmux_function *funcs;
+ unsigned int nfuncs;
+};
+
+/**
+ * struct zynqmp_pctrl_group - Pin control group info
+ * @name: Group name
+ * @pins: Group pin numbers
+ * @npins: Number of pins in group
+ */
+struct zynqmp_pctrl_group {
+ const char *name;
+ unsigned int pins[MAX_GROUP_PIN];
+ unsigned int npins;
+};
+
+/**
+ * enum zynqmp_pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard,
+ * the argument to this parameter (on a
+ * custom format) tells the driver which
+ * alternative IO standard to use
+ * @PIN_CONFIG_SCHMITTCMOS: this parameter (on a custom format) allows
+ * to select schmitt or cmos input for MIO pins
+ */
+enum zynqmp_pin_config_param {
+ PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1,
+ PIN_CONFIG_SCHMITTCMOS,
+};
+
+static const struct pinconf_generic_params zynqmp_dt_params[] = {
+ {"io-standard", PIN_CONFIG_IOSTANDARD, IO_STANDARD_LVCMOS18},
+ {"schmitt-cmos", PIN_CONFIG_SCHMITTCMOS, PIN_INPUT_TYPE_SCHMITT},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct
+pin_config_item zynqmp_conf_items[ARRAY_SIZE(zynqmp_dt_params)] = {
+ PCONFDUMP(PIN_CONFIG_IOSTANDARD, "IO-standard", NULL, true),
+ PCONFDUMP(PIN_CONFIG_SCHMITTCMOS, "schmitt-cmos", NULL, true),
+};
+#endif
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+static const struct pinctrl_pin_desc zynqmp_pins;
+static struct pinctrl_desc zynqmp_desc;
+
+/**
+ * zynqmp_pctrl_get_groups_count() - get group count
+ * @pctldev: Pincontrol device pointer.
+ *
+ * Get total groups count.
+ *
+ * Return: group count.
+ */
+static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->ngroups;
+}
+
+/**
+ * zynqmp_pctrl_get_group_name() - get group name
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ *
+ * Get gorup's name.
+ *
+ * Return: group name.
+ */
+static const char *zynqmp_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->groups[selector].name;
+}
+
+/**
+ * zynqmp_pctrl_get_group_pins() - get group pins
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ * @pins: Pin numbers.
+ * @npins: Number of pins in group.
+ *
+ * Get gorup's pin count and pin number.
+ *
+ * Return: Success.
+ */
+static int zynqmp_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *npins)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->groups[selector].pins;
+ *npins = pctrl->groups[selector].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops zynqmp_pctrl_ops = {
+ .get_groups_count = zynqmp_pctrl_get_groups_count,
+ .get_group_name = zynqmp_pctrl_get_group_name,
+ .get_group_pins = zynqmp_pctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+/**
+ * zynqmp_pinmux_request_pin() - Request a pin for muxing
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ *
+ * Request a pin from firmware for muxing.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_request_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ if (!eemi_ops->pinctrl_request)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->pinctrl_request(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "request failed for pin %u\n", pin);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pmux_get_functions_count() - get number of functions
+ * @pctldev: Pincontrol device pointer.
+ *
+ * Get total function count.
+ *
+ * Return: function count.
+ */
+static int zynqmp_pmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->nfuncs;
+}
+
+/**
+ * zynqmp_pmux_get_function_name() - get function name
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Function ID.
+ *
+ * Get function's name.
+ *
+ * Return: function name.
+ */
+static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->funcs[selector].name;
+}
+
+/**
+ * zynqmp_pmux_get_function_groups() - Get groups for the function
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Function ID
+ * @groups: Group names.
+ * @num_groups: Number of function groups.
+ *
+ * Get function's group count and group names.
+ *
+ * Return: Success.
+ */
+static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->funcs[selector].groups;
+ *num_groups = pctrl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinmux_set_mux() - Set requested function for the group
+ * @pctldev: Pincontrol device pointer.
+ * @function: Function ID.
+ * @group: Group ID.
+ *
+ * Loop though all pins of group and call firmware API
+ * to set requested function for all pins in group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[group];
+ int ret, i;
+
+ if (!eemi_ops->pinctrl_set_function)
+ return -ENOTSUPP;
+
+ for (i = 0; i < pgrp->npins; i++) {
+ unsigned int pin = pgrp->pins[i];
+
+ ret = eemi_ops->pinctrl_set_function(pin, function);
+ if (ret) {
+ dev_err(pctldev->dev, "set mux failed for pin %u\n",
+ pin);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinmux_release_pin() - Release a pin
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ *
+ * Release a pin from firmware.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_release_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ if (!eemi_ops->pinctrl_release)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->pinctrl_release(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "free pin failed for pin %u\n",
+ pin);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops zynqmp_pinmux_ops = {
+ .request = zynqmp_pinmux_request_pin,
+ .get_functions_count = zynqmp_pmux_get_functions_count,
+ .get_function_name = zynqmp_pmux_get_function_name,
+ .get_function_groups = zynqmp_pmux_get_function_groups,
+ .set_mux = zynqmp_pinmux_set_mux,
+ .free = zynqmp_pinmux_release_pin,
+};
+
+/**
+ * zynqmp_pinconf_cfg_get() - get config value for the pin
+ * @pctldev: Pin control device pointer.
+ * @pin: Pin number.
+ * @config: Value of config param.
+ *
+ * Get value of the requested configuration parameter for the
+ * given pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_get(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ int ret;
+ unsigned int arg = 0, param = pinconf_to_config_param(*config);
+
+ if (!eemi_ops->pinctrl_get_config)
+ return -ENOTSUPP;
+
+ if (pin >= zynqmp_desc.npins)
+ return -ENOTSUPP;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ &arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_UP)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_DOWN)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_DISABLE)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_IOSTANDARD:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ &arg);
+ break;
+ case PIN_CONFIG_SCHMITTCMOS:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ &arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ &arg);
+ switch (arg) {
+ case PM_PINCTRL_DRIVE_STRENGTH_2MA:
+ arg = DRIVE_STRENGTH_2MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_4MA:
+ arg = DRIVE_STRENGTH_4MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_8MA:
+ arg = DRIVE_STRENGTH_8MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_12MA:
+ arg = DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_cfg_set() - Set requested config for the pin
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ * @configs: Configuration to set.
+ * @num_groups: Number of configurations.
+ *
+ * Loop though all configurations and call firmware API
+ * to set requested configurations for the pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+
+ if (!eemi_ops->pinctrl_set_config)
+ return -ENOTSUPP;
+
+ if (pin >= zynqmp_desc.npins)
+ return -ENOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ unsigned int param = pinconf_to_config_param(configs[i]);
+ unsigned int arg = pinconf_to_config_argument(configs[i]);
+ unsigned int value;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_BIAS_PULL_UP);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_BIAS_PULL_DOWN);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ PM_PINCTRL_BIAS_DISABLE);
+ break;
+ case PIN_CONFIG_SCHMITTCMOS:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (arg) {
+ case DRIVE_STRENGTH_2MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_2MA;
+ break;
+ case DRIVE_STRENGTH_4MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_4MA;
+ break;
+ case DRIVE_STRENGTH_8MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_8MA;
+ break;
+ case DRIVE_STRENGTH_12MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ value);
+ break;
+ case PIN_CONFIG_IOSTANDARD:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ &value);
+
+ if (arg != value)
+ dev_warn(pctldev->dev,
+ "Invalid IO Standard requested for pin %d\n",
+ pin);
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_LOW_POWER_MODE:
+ /*
+ * This cases are mentioned in dts but configurable
+ * registers are unknown. So falling through to ignore
+ * boot time warnings as of now.
+ */
+ ret = 0;
+ break;
+ default:
+ dev_warn(pctldev->dev,
+ "unsupported configuration parameter '%u'\n",
+ param);
+ ret = -ENOTSUPP;
+ break;
+ }
+ if (ret)
+ dev_warn(pctldev->dev,
+ "%s failed: pin %u param %u value %u\n",
+ __func__, pin, param, arg);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_group_set() - Set requested config for the group
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ * @configs: Configuration to set.
+ * @num_groups: Number of configurations.
+ *
+ * Call function to set configs for each pin in group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[selector];
+
+ for (i = 0; i < pgrp->npins; i++) {
+ ret = zynqmp_pinconf_cfg_set(pctldev, pgrp->pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops zynqmp_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = zynqmp_pinconf_cfg_get,
+ .pin_config_set = zynqmp_pinconf_cfg_set,
+ .pin_config_group_set = zynqmp_pinconf_group_set,
+};
+
+static struct pinctrl_desc zynqmp_desc = {
+ .name = "zynqmp_pinctrl",
+ .owner = THIS_MODULE,
+ .pctlops = &zynqmp_pctrl_ops,
+ .pmxops = &zynqmp_pinmux_ops,
+ .confops = &zynqmp_pinconf_ops,
+};
+
+/**
+ * zynqmp_pinctrl_get_function_groups() - get groups for the function
+ * @fid: Function ID.
+ * @index: Group index.
+ * @groups: Groups data.
+ *
+ * Call firmware API to get groups for the given function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_function_groups(u32 fid, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+ qdata.arg2 = index;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &ret_payload[1], PINCTRL_GET_FUNC_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_func_num_groups() - get number of groups in function
+ * @fid: Function ID.
+ * @ngroups: Number of groups in function.
+ *
+ * Call firmware API to get number of group in function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_func_num_groups(u32 fid, unsigned int *ngroups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *ngroups = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_func_groups() - prepare function and groups data
+ * @dev: Device pointer.
+ * @fid: Function ID.
+ * @func: Function data.
+ * @groups: Groups data.
+ *
+ * Query firmware to get group IDs for each function. Firmware returns
+ * group IDs. Based on gorup index for the function, group names in
+ * function are stored. For example, first gorup in "eth0" function
+ * is named as "eth0_0", second as "eth0_1" and so on.
+ *
+ * Based on group ID received from firmware, function stores name of
+ * group for that group ID. For an example, if "eth0" first group ID
+ * is x, groups[x] name will be stored as "eth0_0".
+ *
+ * Once done for each function, each function would have its group names,
+ * and each groups would also have their names.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
+ struct zynqmp_pmux_function *func,
+ struct zynqmp_pctrl_group *groups)
+{
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+ const char **fgroups;
+ int ret, index, i;
+
+ fgroups = devm_kzalloc(dev, sizeof(*fgroups) * func->ngroups,
+ GFP_KERNEL);
+ if (!fgroups)
+ return -ENOMEM;
+
+ for (index = 0; index < func->ngroups; index += NUM_GROUPS_PER_RESP) {
+ ret = zynqmp_pinctrl_get_function_groups(fid, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == (u16)NA_GROUP)
+ goto done;
+ if (resp[i] == (u16)RESERVED_GROUP)
+ continue;
+ fgroups[index + i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ groups[resp[i]].name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ }
+ }
+done:
+ func->groups = fgroups;
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_function_name() - get function name
+ * @fid: Function ID.
+ * @name: Function name
+ *
+ * Call firmware API to get name of given function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_function_name(u32 fid, char *name)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_NAME;
+ qdata.arg1 = fid;
+
+ eemi_ops->query_data(qdata, ret_payload);
+ memcpy(name, ret_payload, PINCTRL_GET_FUNC_NAME_RESP_LEN);
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinctrl_get_num_functions() - get number of supported functions
+ * @nfuncs: Number of functions.
+ *
+ * Call firmware API to get number of functions supported by system/board.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_num_functions(unsigned int *nfuncs)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTIONS;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *nfuncs = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_pin_groups() - get groups for the pin
+ * @pin: Pin number.
+ * @index: Group index.
+ * @groups: Groups data.
+ *
+ * Call firmware API to get groups for the given pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_pin_groups(u32 pin, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_PIN_GROUPS;
+ qdata.arg1 = pin;
+ qdata.arg2 = index;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &ret_payload[1], PINCTRL_GET_PIN_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_group_add_pin() - add pin to given group
+ * @group: Group data.
+ * @pin: Pin number.
+ *
+ * Add pin number to respective group's pin array at end and
+ * increment pin count for the group.
+ *
+ * Return: 0 on success else error code.
+ */
+static void zynqmp_pinctrl_group_add_pin(struct zynqmp_pctrl_group *group,
+ unsigned int pin)
+{
+ group->pins[group->npins++] = pin;
+}
+
+/**
+ * zynqmp_pinctrl_create_pin_groups() - assign pins to respective groups
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @pin: Pin number.
+ *
+ * Query firmware to get groups available for the given pin.
+ * Based on firmware response(group IDs for the pin), add
+ * pin number to respective group's pin array.
+ *
+ * Once all pins are queries, each groups would have its number
+ * of pins and pin numbers data.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_create_pin_groups(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int pin)
+{
+ int ret, i, index = 0;
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+
+ do {
+ ret = zynqmp_pinctrl_get_pin_groups(pin, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == (u16)NA_GROUP)
+ goto done;
+ if (resp[i] == (u16)RESERVED_GROUP)
+ continue;
+ zynqmp_pinctrl_group_add_pin(&groups[resp[i]], pin);
+ }
+ index += NUM_GROUPS_PER_RESP;
+ } while (1);
+
+done:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_group_pins() - prepare each group's pin data
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @ngroups: Number of groups.
+ *
+ * Prepare pin number and number of pins data for each pins.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_group_pins(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int ngroups)
+{
+ unsigned int pin;
+ int ret;
+
+ for (pin = 0; pin < zynqmp_desc.npins; pin++) {
+ ret = zynqmp_pinctrl_create_pin_groups(dev, groups, pin);
+ if (ret)
+ goto done;
+ }
+done:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_function_info() - prepare function info
+ * @dev: Device pointer.
+ * @pctrl: Pin control driver data.
+ *
+ * Query firmware for functions, groups and pin information and
+ * prepare pin control driver data.
+ *
+ * Query number of functions and number of function groups (number
+ * of groups in given function) to allocate required memory buffers
+ * for functions and groups. Once buffers are allocated to store
+ * functions and groups data, query and store required information
+ * (numbe of groups and group names for each function, number of
+ * pins and pin numbers for each group).
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
+ struct zynqmp_pinctrl *pctrl)
+{
+ struct zynqmp_pmux_function *funcs;
+ struct zynqmp_pctrl_group *groups;
+ int ret, i;
+
+ ret = zynqmp_pinctrl_get_num_functions(&pctrl->nfuncs);
+ if (ret)
+ return ret;
+
+ funcs = devm_kzalloc(dev, sizeof(*funcs) * pctrl->nfuncs, GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ zynqmp_pinctrl_get_function_name(i, funcs[i].name);
+
+ ret = zynqmp_pinctrl_get_func_num_groups(i, &funcs[i].ngroups);
+ if (ret)
+ goto err;
+ pctrl->ngroups += funcs[i].ngroups;
+ }
+
+ groups = devm_kzalloc(dev, sizeof(*groups) * pctrl->ngroups,
+ GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ ret = zynqmp_pinctrl_prepare_func_groups(dev, i, &funcs[i],
+ groups);
+ if (ret)
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_group_pins(dev, groups, pctrl->ngroups);
+ if (ret)
+ goto err;
+
+ pctrl->funcs = funcs;
+ pctrl->groups = groups;
+
+err:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_num_pins() - get number of pins in system
+ * @npins: Number of pins in system/board.
+ *
+ * Call firmware API to get number of pins.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_num_pins(unsigned int *npins)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_PINS;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *npins = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_pin_desc() - prepare pin description info
+ * @dev: Device pointer.
+ * @zynqmp_pins: Pin information.
+ * @npins: Number of pins.
+ *
+ * Query number of pins information from firmware and prepare pin
+ * description containing pin number and pin name.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
+ const struct pinctrl_pin_desc **zynqmp_pins,
+ unsigned int *npins)
+{
+ struct pinctrl_pin_desc *pins, *pin;
+ int ret;
+ int i;
+
+ ret = zynqmp_pinctrl_get_num_pins(npins);
+ if (ret)
+ return ret;
+
+ pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < *npins; i++) {
+ pin = &pins[i];
+ pin->number = i;
+ pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
+ ZYNQMP_PIN_PREFIX, i);
+ }
+
+ *zynqmp_pins = pins;
+
+ return 0;
+}
+
+static int zynqmp_pinctrl_probe(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl;
+ int ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,pinctrl-zynqmp")) {
+ dev_err(&pdev->dev, "ERROR: This binding is deprecated, please use new compatible binding\n");
+ return -ENOENT;
+ }
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ if (!eemi_ops->query_data) {
+ dev_err(&pdev->dev, "%s: Firmware interface not available\n",
+ __func__);
+ ret = -ENOTSUPP;
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev,
+ &zynqmp_desc.pins,
+ &zynqmp_desc.npins);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() pin desc prepare fail with %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_function_info(&pdev->dev, pctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() function info prepare fail with %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ pctrl->pctrl = pinctrl_register(&zynqmp_desc, &pdev->dev, pctrl);
+ if (IS_ERR(pctrl->pctrl)) {
+ ret = PTR_ERR(pctrl->pctrl);
+ goto err;
+ }
+ platform_set_drvdata(pdev, pctrl);
+
+ dev_info(&pdev->dev, "zynqmp pinctrl initialized\n");
+err:
+ return ret;
+}
+
+static int zynqmp_pinctrl_remove(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_pinctrl_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pinctrl" },
+ { .compatible = "xlnx,pinctrl-zynqmp" },
+ { }
+};
+
+static struct platform_driver zynqmp_pinctrl_driver = {
+ .driver = {
+ .name = "zynqmp-pinctrl",
+ .of_match_table = zynqmp_pinctrl_of_match,
+ },
+ .probe = zynqmp_pinctrl_probe,
+ .remove = zynqmp_pinctrl_remove,
+};
+
+static int __init zynqmp_pinctrl_init(void)
+{
+ return platform_driver_register(&zynqmp_pinctrl_driver);
+}
+arch_initcall(zynqmp_pinctrl_init);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index 396db12ae904..bf68913ba821 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -844,8 +844,8 @@ static const struct msm_pingroup msm8916_groups[] = {
PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
- PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, cci_timer0, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, cci_timer1, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index e8149ff1d401..10595b43360b 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1250,6 +1250,7 @@ static struct irq_domain *stm32_pctrl_get_irq_domain(struct device_node *np)
return ERR_PTR(-ENXIO);
domain = irq_find_host(parent);
+ of_node_put(parent);
if (!domain)
/* domain not registered yet */
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
index 4557e18d5989..12c40f9c1a24 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -105,6 +105,7 @@ static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun50i_h6_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
};
static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index c4052eab6bfc..8c41f8b818b2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -616,7 +616,7 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
unsigned pin,
struct regulator *supply)
{
- unsigned short bank = pin / PINS_PER_BANK;
+ unsigned short bank;
unsigned long flags;
u32 val, reg;
int uV;
@@ -632,6 +632,9 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
if (uV == 0)
return 0;
+ pin -= pctl->desc->pin_base;
+ bank = pin / PINS_PER_BANK;
+
switch (pctl->desc->io_bias_cfg_variant) {
case BIAS_VOLTAGE_GRP_CONFIG:
/*
@@ -649,8 +652,6 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
else
val = 0xD; /* 3.3V */
- pin -= pctl->desc->pin_base;
-
reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
reg &= ~IO_BIAS_MASK;
writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 8723bcf10c93..954953133d56 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -716,6 +716,7 @@ static int __init
chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
const struct chromeos_laptop *src)
{
+ struct i2c_peripheral *i2c_peripherals;
struct i2c_peripheral *i2c_dev;
struct i2c_board_info *info;
int i;
@@ -724,17 +725,15 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
if (!src->num_i2c_peripherals)
return 0;
- cros_laptop->i2c_peripherals = kmemdup(src->i2c_peripherals,
- src->num_i2c_peripherals *
- sizeof(*src->i2c_peripherals),
- GFP_KERNEL);
- if (!cros_laptop->i2c_peripherals)
+ i2c_peripherals = kmemdup(src->i2c_peripherals,
+ src->num_i2c_peripherals *
+ sizeof(*src->i2c_peripherals),
+ GFP_KERNEL);
+ if (!i2c_peripherals)
return -ENOMEM;
- cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
-
- for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ for (i = 0; i < src->num_i2c_peripherals; i++) {
+ i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
error = chromeos_laptop_setup_irq(i2c_dev);
@@ -752,16 +751,19 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
}
}
+ cros_laptop->i2c_peripherals = i2c_peripherals;
+ cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
+
return 0;
err_out:
while (--i >= 0) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
if (info->properties)
property_entries_free(info->properties);
}
- kfree(cros_laptop->i2c_peripherals);
+ kfree(i2c_peripherals);
return error;
}
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 74ded441bb50..4791b62c2923 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -285,7 +285,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
u_cmd.insize > EC_MAX_MSG_BYTES)
return -EINVAL;
- s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+ s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
return -ENOMEM;
@@ -328,6 +328,9 @@ static long cros_ec_chardev_ioctl_readmem(struct cros_ec_dev *ec,
if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
return -EFAULT;
+ if (s_mem.bytes > sizeof(s_mem.buffer))
+ return -EINVAL;
+
num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes,
s_mem.buffer);
if (num <= 0)
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 92bda873d44a..767f4406e55f 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -56,6 +56,7 @@ struct mlxbf_tmfifo;
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
+ * @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
@@ -72,6 +73,7 @@ struct mlxbf_tmfifo_vring {
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
+ struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
@@ -83,6 +85,14 @@ struct mlxbf_tmfifo_vring {
struct mlxbf_tmfifo *fifo;
};
+/* Check whether vring is in drop mode. */
+#define IS_VRING_DROP(_r) ({ \
+ typeof(_r) (r) = (_r); \
+ (r->desc_head == &r->drop_desc ? true : false); })
+
+/* A stub length to drop maximum length packet. */
+#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
+
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
@@ -195,7 +205,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
-#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
@@ -243,6 +253,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
+ vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
@@ -348,7 +359,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
return len;
}
-static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
+static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
@@ -577,19 +588,26 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data, sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len, sizeof(u64));
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
+ memcpy(addr + vring->cur_len, &data,
+ sizeof(u64));
+ } else {
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
+ }
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data,
- len - vring->cur_len);
- else
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ } else {
+ data = 0;
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
+ }
vring->cur_len = len;
}
@@ -606,13 +624,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
- struct vring_desc *desc,
+ struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
+ bool drop_rx = false;
/* Read/Write packet header. */
if (is_rx) {
@@ -628,9 +647,12 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
vdev_id = VIRTIO_ID_NET;
hdr_len = sizeof(struct virtio_net_hdr);
config = &fifo->vdev[vdev_id]->config.net;
- if (ntohs(hdr.len) > config->mtu +
- MLXBF_TMFIFO_NET_L2_OVERHEAD)
- return;
+ /* A legacy-only interface for now. */
+ if (ntohs(hdr.len) >
+ __virtio16_to_cpu(virtio_legacy_is_little_endian(),
+ config->mtu) +
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
@@ -645,16 +667,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (!tm_dev2)
return;
- vring->desc = desc;
+ vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
+
+ if (drop_rx && !IS_VRING_DROP(vring)) {
+ if (vring->desc_head)
+ mlxbf_tmfifo_release_pkt(vring);
+ *desc = &vring->drop_desc;
+ vring->desc_head = *desc;
+ vring->desc = *desc;
+ }
+
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
- vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
@@ -687,15 +718,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
- if (!desc)
- return false;
+ if (!desc) {
+ /* Drop next Rx packet to avoid stuck. */
+ if (is_rx) {
+ desc = &vring->drop_desc;
+ vring->desc_head = desc;
+ vring->desc = desc;
+ } else {
+ return false;
+ }
+ }
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
- mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
+ mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
@@ -721,17 +760,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
vring->rem_len -= len;
/* Get the next desc on the chain. */
- if (vring->rem_len > 0 &&
+ if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
- /* Done and release the pending packet. */
- mlxbf_tmfifo_release_pending_pkt(vring);
+ /* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
+ if (!IS_VRING_DROP(vring)) {
+ mlxbf_tmfifo_release_pkt(vring);
+ } else {
+ vring->pkt_len = 0;
+ vring->desc_head = NULL;
+ vring->desc = NULL;
+ return false;
+ }
/*
* Make sure the load/store are in order before
@@ -865,6 +911,7 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
mlxbf_tmfifo_console_output(tm_vdev, vring);
spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
+ set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
&fifo->pend_events)) {
return true;
@@ -910,7 +957,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
/* Release the pending packet. */
if (vring->desc)
- mlxbf_tmfifo_release_pending_pkt(vring);
+ mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
@@ -1240,8 +1287,12 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
/* Create the network vdev. */
memset(&net_config, 0, sizeof(net_config));
- net_config.mtu = ETH_DATA_LEN;
- net_config.status = VIRTIO_NET_S_LINK_UP;
+
+ /* A legacy-only interface for now. */
+ net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+ ETH_DATA_LEN);
+ net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+ VIRTIO_NET_S_LINK_UP);
mlxbf_tmfifo_get_cfg_mac(net_config.mac);
rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
MLXBF_TMFIFO_NET_FEATURES, &net_config,
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 2db7113383fd..89d9fca02fe9 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -265,7 +265,7 @@ static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf,
int i, m;
unsigned char ec_cmd[EC_MAX_CMD_ARGS];
unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
- char cmdbuf[64];
+ char cmdbuf[64] = "";
int ec_cmd_bytes;
mutex_lock(&ec_dbgfs_lock);
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index d27a564389a4..d6c4cd4262df 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -93,6 +93,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_KEY, 0x27, {KEY_HELP} },
{KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
{KE_IGNORE, 0x41, {KEY_MUTE} },
{KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
@@ -106,7 +107,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
{KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
{KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
- {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ /*
+ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
+ * with the "Video Bus" input device events. But sometimes it is not
+ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
+ * udev/hwdb can override it on systems where it is not a dup.
+ */
+ {KE_KEY, 0x61, {KEY_UNKNOWN} },
{KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
{KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
@@ -531,6 +538,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = set_force_caps,
+ .ident = "Acer Aspire Switch V 10 SW5-017",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {
+ .callback = set_force_caps,
.ident = "Acer One 10 (S1003)",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 59b78a181723..78d357de2f04 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -528,6 +528,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
+ { KE_KEY, 0x33, { KEY_SCREENLOCK } },
{ KE_KEY, 0x35, { KEY_SCREENLOCK } },
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
@@ -554,6 +555,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
{ KE_IGNORE, 0x6E, }, /* Low Battery notification */
{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
+ { KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */
{ KE_KEY, 0x7c, { KEY_MICMUTE } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
{ KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
@@ -579,6 +581,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
{ KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
{ KE_KEY, 0xB5, { KEY_CALC } },
+ { KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ed83fb135bab..761cab698c75 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1195,6 +1195,8 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
cpu_to_le32(ports_available));
+ pci_dev_put(xhci_pdev);
+
pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
orig_ports_available, ports_available);
}
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 4f31b68642a0..6faf213f687a 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -18,7 +18,7 @@
#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1)
-#define ASUS_WMI_BRN_DOWN 0x20
+#define ASUS_WMI_BRN_DOWN 0x2e
#define ASUS_WMI_BRN_UP 0x2f
struct module;
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 3adcb0de0193..922ca87919bb 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -366,7 +366,7 @@ static ssize_t hdaps_variance_show(struct device *dev,
static ssize_t hdaps_temp1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u8 uninitialized_var(temp);
+ u8 temp;
int ret;
ret = hdaps_readb_one(HDAPS_PORT_TEMP1, &temp);
@@ -379,7 +379,7 @@ static ssize_t hdaps_temp1_show(struct device *dev,
static ssize_t hdaps_temp2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u8 uninitialized_var(temp);
+ u8 temp;
int ret;
ret = hdaps_readb_one(HDAPS_PORT_TEMP2, &temp);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index c3fdb0ecad96..d3a329b201b1 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -63,6 +63,7 @@ enum hp_wmi_event_ids {
HPWMI_PEAKSHIFT_PERIOD = 0x0F,
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
HPWMI_SANITIZATION_MODE = 0x17,
+ HPWMI_SMART_EXPERIENCE_APP = 0x21,
};
struct bios_args {
@@ -632,6 +633,8 @@ static void hp_wmi_notify(u32 value, void *context)
break;
case HPWMI_SANITIZATION_MODE:
break;
+ case HPWMI_SMART_EXPERIENCE_APP:
+ break;
default:
pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
@@ -880,8 +883,16 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wwan_rfkill = NULL;
rfkill2_count = 0;
- if (hp_wmi_rfkill_setup(device))
- hp_wmi_rfkill2_setup(device);
+ /*
+ * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
+ * BIOS no longer controls the power for the wireless
+ * devices. All features supported by this command will no
+ * longer be supported.
+ */
+ if (!hp_wmi_bios_2009_later()) {
+ if (hp_wmi_rfkill_setup(device))
+ hp_wmi_rfkill2_setup(device);
+ }
return 0;
}
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index 195a7f3638cb..e27f551a9afa 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -41,6 +41,8 @@ static const struct key_entry huawei_wmi_keymap[] = {
{ KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } },
{ KE_IGNORE, 0x294, { KEY_KBDILLUMUP } },
{ KE_IGNORE, 0x295, { KEY_KBDILLUMUP } },
+ // Ignore Ambient Light Sensoring
+ { KE_KEY, 0x2c1, { KEY_RESERVED } },
{ KE_END, 0 }
};
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 164bebbfea21..050f8f797037 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -449,7 +449,7 @@ static bool button_array_present(struct platform_device *device)
static int intel_hid_probe(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
- unsigned long long mode;
+ unsigned long long mode, dummy;
struct intel_hid_priv *priv;
acpi_status status;
int err;
@@ -501,18 +501,15 @@ static int intel_hid_probe(struct platform_device *device)
if (err)
goto err_remove_notify;
- if (priv->array) {
- unsigned long long dummy;
+ intel_button_array_enable(&device->dev, true);
- intel_button_array_enable(&device->dev, true);
-
- /* Call button load method to enable HID power button */
- if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN,
- &dummy)) {
- dev_warn(&device->dev,
- "failed to enable HID power button\n");
- }
- }
+ /*
+ * Call button load method to enable HID power button
+ * Always do this since it activates events on some devices without
+ * a button array too.
+ */
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, &dummy))
+ dev_warn(&device->dev, "failed to enable HID power button\n");
device_init_wakeup(&device->dev, true);
/*
diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c
index e1266f5c6359..42ed02ab8a61 100644
--- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
+++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c
@@ -18,6 +18,8 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <xen/xen.h>
+
static void intel_pmc_core_release(struct device *dev)
{
/* Nothing to do. */
@@ -56,6 +58,13 @@ static int __init pmc_core_platform_init(void)
if (acpi_dev_present("INT33A1", NULL, -1))
return -ENODEV;
+ /*
+ * Skip forcefully attaching the device for VMs. Make an exception for
+ * Xen dom0, which does have full hardware access.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain())
+ return -ENODEV;
+
if (!x86_match_cpu(intel_pmc_core_platform_ids))
return -ENODEV;
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index d4040bb222b4..59fc6624b1ac 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -102,7 +102,7 @@ static const struct telemetry_core_ops telm_defpltops = {
/**
* telemetry_update_events() - Update telemetry Configuration
* @pss_evtconfig: PSS related config. No change if num_evts = 0.
- * @pss_evtconfig: IOSS related config. No change if num_evts = 0.
+ * @ioss_evtconfig: IOSS related config. No change if num_evts = 0.
*
* This API updates the IOSS & PSS Telemetry configuration. Old config
* is overwritten. Call telemetry_reset_events when logging is over
@@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(telemetry_reset_events);
/**
* telemetry_get_eventconfig() - Returns the pss and ioss events enabled
* @pss_evtconfig: Pointer to PSS related configuration.
- * @pss_evtconfig: Pointer to IOSS related configuration.
+ * @ioss_evtconfig: Pointer to IOSS related configuration.
* @pss_len: Number of u32 elements allocated for pss_evtconfig array
* @ioss_len: Number of u32 elements allocated for ioss_evtconfig array
*
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 24ffc8e2d2d1..dfb4af759aa7 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -210,7 +210,7 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
return -EINVAL;
if (quirks->ec_read_only)
- return -EOPNOTSUPP;
+ return 0;
/* read current device state */
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
@@ -596,11 +596,10 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = {
{
.ident = "MSI S270",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR,
- "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
@@ -633,8 +632,7 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR,
- "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
@@ -843,15 +841,15 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
static void msi_init_rfkill(struct work_struct *ignored)
{
if (rfk_wlan) {
- rfkill_set_sw_state(rfk_wlan, !wlan_s);
+ msi_rfkill_set_state(rfk_wlan, !wlan_s);
rfkill_wlan_set(NULL, !wlan_s);
}
if (rfk_bluetooth) {
- rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
+ msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
rfkill_bluetooth_set(NULL, !bluetooth_s);
}
if (rfk_threeg) {
- rfkill_set_sw_state(rfk_threeg, !threeg_s);
+ msi_rfkill_set_state(rfk_threeg, !threeg_s);
rfkill_threeg_set(NULL, !threeg_s);
}
}
@@ -1048,8 +1046,7 @@ static int __init msi_init(void)
return -EINVAL;
/* Register backlight stuff */
-
- if (quirks->old_ec_model ||
+ if (quirks->old_ec_model &&
acpi_video_get_backlight_type() == acpi_backlight_vendor) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -1117,6 +1114,8 @@ fail_create_attr:
fail_create_group:
if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_touchpad_dwork);
+ input_unregister_device(msi_laptop_input_dev);
cancel_delayed_work_sync(&msi_rfkill_dwork);
cancel_work_sync(&msi_rfkill_work);
rfkill_cleanup();
@@ -1137,6 +1136,7 @@ static void __exit msi_cleanup(void)
{
if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_touchpad_dwork);
input_unregister_device(msi_laptop_input_dev);
cancel_delayed_work_sync(&msi_rfkill_dwork);
cancel_work_sync(&msi_rfkill_work);
diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
index 9a19fbd2f734..9a457956025a 100644
--- a/drivers/platform/x86/mxm-wmi.c
+++ b/drivers/platform/x86/mxm-wmi.c
@@ -35,13 +35,11 @@ int mxm_wmi_call_mxds(int adapter)
.xarg = 1,
};
struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
printk("calling mux switch %d\n", adapter);
- status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input,
- &output);
+ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
if (ACPI_FAILURE(status))
return status;
@@ -60,13 +58,11 @@ int mxm_wmi_call_mxmx(int adapter)
.xarg = 1,
};
struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
printk("calling mux switch %d\n", adapter);
- status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input,
- &output);
+ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
if (ACPI_FAILURE(status))
return status;
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 597cfabc0967..ee349a16b73a 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -244,7 +244,7 @@ static void pmc_power_off(void)
pm1_cnt_port = acpi_base_addr + PM1_CNT;
pm1_cnt_value = inl(pm1_cnt_port);
- pm1_cnt_value &= SLEEP_TYPE_MASK;
+ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
pm1_cnt_value |= SLEEP_TYPE_S5;
pm1_cnt_value |= SLEEP_ENABLE;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index fb088dd8529e..5a290fed63a5 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1899,14 +1899,21 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
break;
}
- ret = sony_call_snc_handle(handle, probe_base, &result);
- if (ret)
- return ret;
+ /*
+ * Only probe if there is a separate probe_base, otherwise the probe call
+ * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in
+ * the keyboard backlight being turned off.
+ */
+ if (probe_base) {
+ ret = sony_call_snc_handle(handle, probe_base, &result);
+ if (ret)
+ return ret;
- if ((handle == 0x0137 && !(result & 0x02)) ||
- !(result & 0x01)) {
- dprintk("no backlight keyboard found\n");
- return 0;
+ if ((handle == 0x0137 && !(result & 0x02)) ||
+ !(result & 0x01)) {
+ dprintk("no backlight keyboard found\n");
+ return 0;
+ }
}
kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 5d114088c88f..f0d6bb567d1d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -9699,6 +9699,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
* Individual addressing is broken on models that expose the
* primary battery as BAT1.
*/
+ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
TPACPI_Q_LNV('J', '7', true), /* B5400 */
TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 515c66ca1aec..0346bf751537 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -169,6 +169,23 @@ static const struct ts_dmi_data connect_tablet9_data = {
.properties = connect_tablet9_props,
};
+static const struct property_entry csl_panther_tab_hd_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data csl_panther_tab_hd_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = csl_panther_tab_hd_props,
+};
+
static const struct property_entry cube_iwork8_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
@@ -215,6 +232,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
.properties = dexp_ursus_7w_props,
};
+static const struct property_entry dexp_ursus_kx210i_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data dexp_ursus_kx210i_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = dexp_ursus_kx210i_props,
+};
+
static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -722,6 +755,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* CSL Panther Tab HD */
+ .driver_data = (void *)&csl_panther_tab_hd_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
+ },
+ },
+ {
/* CUBE iwork8 Air */
.driver_data = (void *)&cube_iwork8_air_data,
.matches = {
@@ -749,6 +790,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* DEXP Ursus KX210i */
+ .driver_data = (void *)&dexp_ursus_kx210i_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
+ },
+ },
+ {
/* Digma Citi E200 */
.driver_data = (void *)&digma_citi_e200_data,
.matches = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 8ff719c2f96b..edd639f295fe 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -39,7 +39,7 @@ MODULE_LICENSE("GPL");
static LIST_HEAD(wmi_block_list);
struct guid_block {
- char guid[16];
+ guid_t guid;
union {
char object_id[2];
struct {
@@ -110,17 +110,17 @@ static struct platform_driver acpi_wmi_driver = {
static bool find_guid(const char *guid_string, struct wmi_block **out)
{
- uuid_le guid_input;
+ guid_t guid_input;
struct wmi_block *wblock;
struct guid_block *block;
- if (uuid_le_to_bin(guid_string, &guid_input))
+ if (guid_parse(guid_string, &guid_input))
return false;
list_for_each_entry(wblock, &wmi_block_list, list) {
block = &wblock->gblock;
- if (memcmp(block->guid, &guid_input, 16) == 0) {
+ if (guid_equal(&block->guid, &guid_input)) {
if (out)
*out = wblock;
return true;
@@ -129,11 +129,20 @@ static bool find_guid(const char *guid_string, struct wmi_block **out)
return false;
}
+static bool guid_parse_and_compare(const char *string, const guid_t *guid)
+{
+ guid_t guid_input;
+
+ if (guid_parse(string, &guid_input))
+ return false;
+
+ return guid_equal(&guid_input, guid);
+}
+
static const void *find_guid_context(struct wmi_block *wblock,
struct wmi_driver *wdriver)
{
const struct wmi_device_id *id;
- uuid_le guid_input;
if (wblock == NULL || wdriver == NULL)
return NULL;
@@ -142,9 +151,7 @@ static const void *find_guid_context(struct wmi_block *wblock,
id = wdriver->id_table;
while (*id->guid_string) {
- if (uuid_le_to_bin(id->guid_string, &guid_input))
- continue;
- if (!memcmp(wblock->gblock.guid, &guid_input, 16))
+ if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
return id->context;
id++;
}
@@ -177,7 +184,7 @@ static int get_subobj_info(acpi_handle handle, const char *pathname,
static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
{
- struct guid_block *block = NULL;
+ struct guid_block *block;
char method[5];
acpi_status status;
acpi_handle handle;
@@ -251,8 +258,8 @@ EXPORT_SYMBOL_GPL(wmi_evaluate_method);
acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance,
u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
{
- struct guid_block *block = NULL;
- struct wmi_block *wblock = NULL;
+ struct guid_block *block;
+ struct wmi_block *wblock;
acpi_handle handle;
acpi_status status;
struct acpi_object_list input;
@@ -299,7 +306,7 @@ EXPORT_SYMBOL_GPL(wmidev_evaluate_method);
static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
struct acpi_buffer *out)
{
- struct guid_block *block = NULL;
+ struct guid_block *block;
acpi_handle handle;
acpi_status status, wc_status = AE_ERROR;
struct acpi_object_list input;
@@ -412,8 +419,8 @@ EXPORT_SYMBOL_GPL(wmidev_block_query);
acpi_status wmi_set_block(const char *guid_string, u8 instance,
const struct acpi_buffer *in)
{
- struct guid_block *block = NULL;
struct wmi_block *wblock = NULL;
+ struct guid_block *block;
acpi_handle handle;
struct acpi_object_list input;
union acpi_object params[2];
@@ -456,7 +463,7 @@ EXPORT_SYMBOL_GPL(wmi_set_block);
static void wmi_dump_wdg(const struct guid_block *g)
{
- pr_info("%pUL:\n", g->guid);
+ pr_info("%pUL:\n", &g->guid);
if (g->flags & ACPI_WMI_EVENT)
pr_info("\tnotify_id: 0x%02X\n", g->notify_id);
else
@@ -526,18 +533,18 @@ wmi_notify_handler handler, void *data)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
- uuid_le guid_input;
+ guid_t guid_input;
if (!guid || !handler)
return AE_BAD_PARAMETER;
- if (uuid_le_to_bin(guid, &guid_input))
+ if (guid_parse(guid, &guid_input))
return AE_BAD_PARAMETER;
list_for_each_entry(block, &wmi_block_list, list) {
acpi_status wmi_status;
- if (memcmp(block->gblock.guid, &guid_input, 16) == 0) {
+ if (guid_equal(&block->gblock.guid, &guid_input)) {
if (block->handler &&
block->handler != wmi_notify_debug)
return AE_ALREADY_ACQUIRED;
@@ -565,18 +572,18 @@ acpi_status wmi_remove_notify_handler(const char *guid)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
- uuid_le guid_input;
+ guid_t guid_input;
if (!guid)
return AE_BAD_PARAMETER;
- if (uuid_le_to_bin(guid, &guid_input))
+ if (guid_parse(guid, &guid_input))
return AE_BAD_PARAMETER;
list_for_each_entry(block, &wmi_block_list, list) {
acpi_status wmi_status;
- if (memcmp(block->gblock.guid, &guid_input, 16) == 0) {
+ if (guid_equal(&block->gblock.guid, &guid_input)) {
if (!block->handler ||
block->handler == wmi_notify_debug)
return AE_NULL_ENTRY;
@@ -612,7 +619,6 @@ acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
{
struct acpi_object_list input;
union acpi_object params[1];
- struct guid_block *gblock;
struct wmi_block *wblock;
input.count = 1;
@@ -621,7 +627,7 @@ acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
params[0].integer.value = event;
list_for_each_entry(wblock, &wmi_block_list, list) {
- gblock = &wblock->gblock;
+ struct guid_block *gblock = &wblock->gblock;
if ((gblock->flags & ACPI_WMI_EVENT) &&
(gblock->notify_id == event))
@@ -682,7 +688,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct wmi_block *wblock = dev_to_wblock(dev);
- return sprintf(buf, "wmi:%pUL\n", wblock->gblock.guid);
+ return sprintf(buf, "wmi:%pUL\n", &wblock->gblock.guid);
}
static DEVICE_ATTR_RO(modalias);
@@ -691,7 +697,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
{
struct wmi_block *wblock = dev_to_wblock(dev);
- return sprintf(buf, "%pUL\n", wblock->gblock.guid);
+ return sprintf(buf, "%pUL\n", &wblock->gblock.guid);
}
static DEVICE_ATTR_RO(guid);
@@ -774,10 +780,10 @@ static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct wmi_block *wblock = dev_to_wblock(dev);
- if (add_uevent_var(env, "MODALIAS=wmi:%pUL", wblock->gblock.guid))
+ if (add_uevent_var(env, "MODALIAS=wmi:%pUL", &wblock->gblock.guid))
return -ENOMEM;
- if (add_uevent_var(env, "WMI_GUID=%pUL", wblock->gblock.guid))
+ if (add_uevent_var(env, "WMI_GUID=%pUL", &wblock->gblock.guid))
return -ENOMEM;
return 0;
@@ -801,11 +807,7 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
return 0;
while (*id->guid_string) {
- uuid_le driver_guid;
-
- if (WARN_ON(uuid_le_to_bin(id->guid_string, &driver_guid)))
- continue;
- if (!memcmp(&driver_guid, wblock->gblock.guid, 16))
+ if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
return 1;
id++;
@@ -815,21 +817,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
}
static int wmi_char_open(struct inode *inode, struct file *filp)
{
- const char *driver_name = filp->f_path.dentry->d_iname;
- struct wmi_block *wblock = NULL;
- struct wmi_block *next = NULL;
-
- list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
- if (!wblock->dev.dev.driver)
- continue;
- if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
- filp->private_data = wblock;
- break;
- }
- }
+ /*
+ * The miscdevice already stores a pointer to itself
+ * inside filp->private_data
+ */
+ struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
- if (!filp->private_data)
- return -ENODEV;
+ filp->private_data = wblock;
return nonseekable_open(inode, filp);
}
@@ -849,8 +843,8 @@ static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct wmi_ioctl_buffer __user *input =
(struct wmi_ioctl_buffer __user *) arg;
struct wmi_block *wblock = filp->private_data;
- struct wmi_ioctl_buffer *buf = NULL;
- struct wmi_driver *wdriver = NULL;
+ struct wmi_ioctl_buffer *buf;
+ struct wmi_driver *wdriver;
int ret;
if (_IOC_TYPE(cmd) != WMI_IOC)
@@ -1039,7 +1033,6 @@ static const struct device_type wmi_type_data = {
};
static int wmi_create_device(struct device *wmi_bus_dev,
- const struct guid_block *gblock,
struct wmi_block *wblock,
struct acpi_device *device)
{
@@ -1047,12 +1040,12 @@ static int wmi_create_device(struct device *wmi_bus_dev,
char method[5];
int result;
- if (gblock->flags & ACPI_WMI_EVENT) {
+ if (wblock->gblock.flags & ACPI_WMI_EVENT) {
wblock->dev.dev.type = &wmi_type_event;
goto out_init;
}
- if (gblock->flags & ACPI_WMI_METHOD) {
+ if (wblock->gblock.flags & ACPI_WMI_METHOD) {
wblock->dev.dev.type = &wmi_type_method;
mutex_init(&wblock->char_mutex);
goto out_init;
@@ -1102,7 +1095,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev;
- dev_set_name(&wblock->dev.dev, "%pUL", gblock->guid);
+ dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
device_initialize(&wblock->dev.dev);
@@ -1122,13 +1115,12 @@ static void wmi_free_devices(struct acpi_device *device)
}
}
-static bool guid_already_parsed(struct acpi_device *device,
- const u8 *guid)
+static bool guid_already_parsed(struct acpi_device *device, const guid_t *guid)
{
struct wmi_block *wblock;
list_for_each_entry(wblock, &wmi_block_list, list) {
- if (memcmp(wblock->gblock.guid, guid, 16) == 0) {
+ if (guid_equal(&wblock->gblock.guid, guid)) {
/*
* Because we historically didn't track the relationship
* between GUIDs and ACPI nodes, we don't know whether
@@ -1154,8 +1146,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
struct wmi_block *wblock, *next;
union acpi_object *obj;
acpi_status status;
- int retval = 0;
u32 i, total;
+ int retval;
status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
if (ACPI_FAILURE(status))
@@ -1166,8 +1158,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
return -ENXIO;
if (obj->type != ACPI_TYPE_BUFFER) {
- retval = -ENXIO;
- goto out_free_pointer;
+ kfree(obj);
+ return -ENXIO;
}
gblock = (const struct guid_block *)obj->buffer.pointer;
@@ -1183,19 +1175,19 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
* case yet, so for now, we'll just ignore the duplicate
* for device creation.
*/
- if (guid_already_parsed(device, gblock[i].guid))
+ if (guid_already_parsed(device, &gblock[i].guid))
continue;
wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
if (!wblock) {
- retval = -ENOMEM;
- break;
+ dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
+ continue;
}
wblock->acpi_device = device;
wblock->gblock = gblock[i];
- retval = wmi_create_device(wmi_bus_dev, &gblock[i], wblock, device);
+ retval = wmi_create_device(wmi_bus_dev, wblock, device);
if (retval) {
kfree(wblock);
continue;
@@ -1220,7 +1212,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
retval = device_add(&wblock->dev.dev);
if (retval) {
dev_err(wmi_bus_dev, "failed to register %pUL\n",
- wblock->gblock.guid);
+ &wblock->gblock.guid);
if (debug_event)
wmi_method_enable(wblock, 0);
list_del(&wblock->list);
@@ -1228,9 +1220,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
}
}
-out_free_pointer:
- kfree(out.pointer);
- return retval;
+ kfree(obj);
+
+ return 0;
}
/*
@@ -1280,12 +1272,11 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
void *context)
{
- struct guid_block *block;
struct wmi_block *wblock;
bool found_it = false;
list_for_each_entry(wblock, &wmi_block_list, list) {
- block = &wblock->gblock;
+ struct guid_block *block = &wblock->gblock;
if (wblock->acpi_device->handle == handle &&
(block->flags & ACPI_WMI_EVENT) &&
@@ -1333,10 +1324,8 @@ static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
wblock->handler(event, wblock->handler_data);
}
- if (debug_event) {
- pr_info("DEBUG Event GUID: %pUL\n",
- wblock->gblock.guid);
- }
+ if (debug_event)
+ pr_info("DEBUG Event GUID: %pUL\n", &wblock->gblock.guid);
acpi_bus_generate_netlink_event(
wblock->acpi_device->pnp.device_class,
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 3bf18d718975..131b925b820d 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -160,14 +160,14 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
dev->dev.coherent_dma_mask = dev->dma_mask;
dev->dev.release = &pnp_release_device;
- dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
-
dev_id = pnp_add_id(dev, pnpid);
if (!dev_id) {
kfree(dev);
return NULL;
}
+ dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
+
return dev;
}
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index da78dc77aed3..9879deb4dc0b 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -151,13 +151,13 @@ static int vendor_resource_matches(struct pnp_dev *dev,
static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev,
struct acpi_resource_vendor_typed *vendor)
{
- if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) {
- u64 start, length;
+ struct { u64 start, length; } range;
- memcpy(&start, vendor->byte_data, sizeof(start));
- memcpy(&length, vendor->byte_data + 8, sizeof(length));
-
- pnp_add_mem_resource(dev, start, start + length - 1, 0);
+ if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid,
+ sizeof(range))) {
+ memcpy(&range, vendor->byte_data, sizeof(range));
+ pnp_add_mem_resource(dev, range.start, range.start +
+ range.length - 1, 0);
}
}
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 4684e7df833a..2365efe2dae1 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -942,6 +942,7 @@ static int omap_sr_probe(struct platform_device *pdev)
err_debugfs:
debugfs_remove_recursive(sr_info->dbg_dir);
err_list_del:
+ pm_runtime_disable(&pdev->dev);
list_del(&sr_info->node);
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index c8a22df65036..1f5cf4d7552b 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -919,10 +919,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
*/
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
{
- struct ab8500_btemp *di = power_supply_get_drvdata(psy);
-
- class_for_each_device(power_supply_class, NULL,
- di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+ class_for_each_device(power_supply_class, NULL, psy,
+ ab8500_btemp_get_ext_psy_data);
}
/* ab8500 btemp driver interrupts and their respective isr */
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 4c229e6fb750..75df20b2fe0e 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2380,10 +2380,8 @@ out:
*/
static void ab8500_fg_external_power_changed(struct power_supply *psy)
{
- struct ab8500_fg *di = power_supply_get_drvdata(psy);
-
- class_for_each_device(power_supply_class, NULL,
- di->fg_psy, ab8500_fg_get_ext_psy_data);
+ class_for_each_device(power_supply_class, NULL, psy,
+ ab8500_fg_get_ext_psy_data);
}
/**
diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c
index 003557043ab3..daee1161c305 100644
--- a/drivers/power/supply/adp5061.c
+++ b/drivers/power/supply/adp5061.c
@@ -427,11 +427,11 @@ static int adp5061_get_chg_type(struct adp5061_state *st,
if (ret < 0)
return ret;
- chg_type = adp5061_chg_type[ADP5061_CHG_STATUS_1_CHG_STATUS(status1)];
- if (chg_type > ADP5061_CHG_FAST_CV)
+ chg_type = ADP5061_CHG_STATUS_1_CHG_STATUS(status1);
+ if (chg_type >= ARRAY_SIZE(adp5061_chg_type))
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
else
- val->intval = chg_type;
+ val->intval = adp5061_chg_type[chg_type];
return ret;
}
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 64d87dccea82..f912284b2e55 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1211,8 +1211,19 @@ static void bq24190_input_current_limit_work(struct work_struct *work)
struct bq24190_dev_info *bdi =
container_of(work, struct bq24190_dev_info,
input_current_limit_work.work);
+ union power_supply_propval val;
+ int ret;
- power_supply_set_input_current_limit_from_supplier(bdi->charger);
+ ret = power_supply_get_property_from_supplier(bdi->charger,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ &val);
+ if (ret)
+ return;
+
+ bq24190_charger_set_property(bdi->charger,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ &val);
+ power_supply_changed(bdi->charger);
}
/* Sync the input-current-limit with our parent supply (if we have one) */
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index b1a37aa38880..e6c4dfdc58c4 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -886,10 +886,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
return ret;
mutex_lock(&bq27xxx_list_lock);
- list_for_each_entry(di, &bq27xxx_battery_devices, list) {
- cancel_delayed_work_sync(&di->work);
- schedule_delayed_work(&di->work, 0);
- }
+ list_for_each_entry(di, &bq27xxx_battery_devices, list)
+ mod_delayed_work(system_wq, &di->work, 0);
mutex_unlock(&bq27xxx_list_lock);
return ret;
@@ -1547,7 +1545,7 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
return POWER_SUPPLY_HEALTH_GOOD;
}
-void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
{
struct bq27xxx_reg_cache cache = {0, };
bool has_ci_flag = di->opts & BQ27XXX_O_ZERO;
@@ -1597,6 +1595,16 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
di->cache = cache;
di->last_update = jiffies;
+
+ if (!di->removed && poll_interval > 0)
+ mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
+}
+
+void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+{
+ mutex_lock(&di->lock);
+ bq27xxx_battery_update_unlocked(di);
+ mutex_unlock(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
@@ -1607,9 +1615,6 @@ static void bq27xxx_battery_poll(struct work_struct *work)
work.work);
bq27xxx_battery_update(di);
-
- if (poll_interval > 0)
- schedule_delayed_work(&di->work, poll_interval * HZ);
}
/*
@@ -1770,10 +1775,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
mutex_lock(&di->lock);
- if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
- cancel_delayed_work_sync(&di->work);
- bq27xxx_battery_poll(&di->work.work);
- }
+ if (time_is_before_jiffies(di->last_update + 5 * HZ))
+ bq27xxx_battery_update_unlocked(di);
mutex_unlock(&di->lock);
if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
@@ -1857,8 +1860,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
{
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
- cancel_delayed_work_sync(&di->work);
- schedule_delayed_work(&di->work, 0);
+ /* After charger plug in/out wait 0.5s for things to stabilize */
+ mod_delayed_work(system_wq, &di->work, HZ / 2);
}
int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
@@ -1910,22 +1913,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
{
- /*
- * power_supply_unregister call bq27xxx_battery_get_property which
- * call bq27xxx_battery_poll.
- * Make sure that bq27xxx_battery_poll will not call
- * schedule_delayed_work again after unregister (which cause OOPS).
- */
- poll_interval = 0;
-
- cancel_delayed_work_sync(&di->work);
-
- power_supply_unregister(di->bat);
-
mutex_lock(&bq27xxx_list_lock);
list_del(&di->list);
mutex_unlock(&bq27xxx_list_lock);
+ /* Set removed to avoid bq27xxx_battery_update() re-queuing the work */
+ mutex_lock(&di->lock);
+ di->removed = true;
+ mutex_unlock(&di->lock);
+
+ cancel_delayed_work_sync(&di->work);
+
+ power_supply_unregister(di->bat);
mutex_destroy(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 34229c1f43e3..08c7e2b4155a 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -187,7 +187,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, di);
if (client->irq) {
- ret = devm_request_threaded_irq(&client->dev, client->irq,
+ ret = request_threaded_irq(client->irq,
NULL, bq27xxx_battery_irq_handler_thread,
IRQF_ONESHOT,
di->name, di);
@@ -217,6 +217,9 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+ if (client->irq)
+ free_irq(client->irq, di);
+
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 6cc7c3910e09..0f80fdf88253 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -282,7 +282,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
port->psy_current_max = 0;
break;
default:
- dev_err(dev, "Port %d: default case!\n", port->port_number);
+ dev_dbg(dev, "Port %d: default case!\n", port->port_number);
port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
}
diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index f9314cc0cd75..6b987da58655 100644
--- a/drivers/power/supply/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
@@ -662,6 +662,7 @@ static int da9150_charger_remove(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(charger->usb_phy))
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
+ cancel_work_sync(&charger->otg_work);
power_supply_unregister(charger->battery);
power_supply_unregister(charger->usb);
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index 97b0e873e87d..c2d6378bb897 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -138,6 +138,9 @@ static int read_channel(struct gab *adc_bat, enum power_supply_property psp,
result);
if (ret < 0)
pr_err("read channel error\n");
+ else
+ *result *= 1000;
+
return ret;
}
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a2f56a68c50d..2d6836b33da3 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -347,6 +347,10 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
struct power_supply *psy = dev_get_drvdata(dev);
unsigned int *count = data;
+ if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret))
+ if (ret.intval == POWER_SUPPLY_SCOPE_DEVICE)
+ return 0;
+
(*count)++;
if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY)
if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE,
@@ -365,8 +369,8 @@ int power_supply_is_system_supplied(void)
__power_supply_is_system_supplied);
/*
- * If no power class device was found at all, most probably we are
- * running on a desktop system, so assume we are on mains power.
+ * If no system scope power class device was found at all, most probably we
+ * are running on a desktop system, so assume we are on mains power.
*/
if (count == 0)
return 1;
@@ -375,46 +379,49 @@ int power_supply_is_system_supplied(void)
}
EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
-static int __power_supply_get_supplier_max_current(struct device *dev,
- void *data)
+struct psy_get_supplier_prop_data {
+ struct power_supply *psy;
+ enum power_supply_property psp;
+ union power_supply_propval *val;
+};
+
+static int __power_supply_get_supplier_property(struct device *dev, void *_data)
{
- union power_supply_propval ret = {0,};
struct power_supply *epsy = dev_get_drvdata(dev);
- struct power_supply *psy = data;
+ struct psy_get_supplier_prop_data *data = _data;
- if (__power_supply_is_supplied_by(epsy, psy))
- if (!epsy->desc->get_property(epsy,
- POWER_SUPPLY_PROP_CURRENT_MAX,
- &ret))
- return ret.intval;
+ if (__power_supply_is_supplied_by(epsy, data->psy))
+ if (!epsy->desc->get_property(epsy, data->psp, data->val))
+ return 1; /* Success */
- return 0;
+ return 0; /* Continue iterating */
}
-int power_supply_set_input_current_limit_from_supplier(struct power_supply *psy)
+int power_supply_get_property_from_supplier(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
- union power_supply_propval val = {0,};
- int curr;
-
- if (!psy->desc->set_property)
- return -EINVAL;
+ struct psy_get_supplier_prop_data data = {
+ .psy = psy,
+ .psp = psp,
+ .val = val,
+ };
+ int ret;
/*
* This function is not intended for use with a supply with multiple
- * suppliers, we simply pick the first supply to report a non 0
- * max-current.
+ * suppliers, we simply pick the first supply to report the psp.
*/
- curr = class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_get_supplier_max_current);
- if (curr <= 0)
- return (curr == 0) ? -ENODEV : curr;
-
- val.intval = curr;
+ ret = class_for_each_device(power_supply_class, NULL, &data,
+ __power_supply_get_supplier_property);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ENODEV;
- return psy->desc->set_property(psy,
- POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val);
+ return 0;
}
-EXPORT_SYMBOL_GPL(power_supply_set_input_current_limit_from_supplier);
+EXPORT_SYMBOL_GPL(power_supply_get_property_from_supplier);
int power_supply_set_battery_charged(struct power_supply *psy)
{
@@ -648,6 +655,11 @@ int power_supply_get_battery_info(struct power_supply *psy,
int i, tab_len, size;
propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index);
+ if (!propname) {
+ power_supply_put_battery_info(psy, info);
+ err = -ENOMEM;
+ goto out_put_node;
+ }
list = of_get_property(battery_np, propname, &size);
if (!list || !size) {
dev_err(&psy->dev, "failed to get %s\n", propname);
@@ -1104,8 +1116,8 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
- device_del(dev);
wakeup_init_failed:
+ device_del(dev);
device_add_failed:
check_supplies_failed:
dev_set_name_failed:
diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index d69880cc3593..b7a2778f878d 100644
--- a/drivers/power/supply/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
@@ -34,8 +34,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_FULL);
- led_trigger_event(psy->charging_blink_full_solid_trig,
- LED_FULL);
+ /* Going from blink to LED on requires a LED_OFF event to stop blink */
+ led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
+ led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
led_trigger_event(psy->charging_full_trig, LED_FULL);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index f37ad4eae60b..d6c47ea27010 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -127,7 +127,8 @@ static ssize_t power_supply_show_property(struct device *dev,
if (ret < 0) {
if (ret == -ENODATA)
- dev_dbg(dev, "driver has no data for `%s' property\n",
+ dev_dbg_ratelimited(dev,
+ "driver has no data for `%s' property\n",
attr->attr.name);
else if (ret != -ENODEV && ret != -EAGAIN)
dev_err_ratelimited(dev,
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
index fbfb6a620961..a30eb42e379a 100644
--- a/drivers/power/supply/sbs-charger.c
+++ b/drivers/power/supply/sbs-charger.c
@@ -25,7 +25,7 @@
#define SBS_CHARGER_REG_STATUS 0x13
#define SBS_CHARGER_REG_ALARM_WARNING 0x16
-#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(1)
+#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(0)
#define SBS_CHARGER_STATUS_RES_COLD BIT(9)
#define SBS_CHARGER_STATUS_RES_HOT BIT(10)
#define SBS_CHARGER_STATUS_BATTERY_PRESENT BIT(14)
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index 5e5bcdbf2e69..557b02d40813 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -634,13 +634,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy,
return ret;
}
-static void sc27xx_fgu_external_power_changed(struct power_supply *psy)
-{
- struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy);
-
- power_supply_changed(data->battery);
-}
-
static int sc27xx_fgu_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
@@ -671,7 +664,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = {
.num_properties = ARRAY_SIZE(sc27xx_fgu_props),
.get_property = sc27xx_fgu_get_property,
.set_property = sc27xx_fgu_set_property,
- .external_power_changed = sc27xx_fgu_external_power_changed,
+ .external_power_changed = power_supply_changed,
.property_is_writeable = sc27xx_fgu_property_is_writeable,
};
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index dc1c1381d7fa..61fd5dfaf7a0 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -18,10 +18,12 @@ if POWERCAP
# Client driver configurations go here.
config INTEL_RAPL_CORE
tristate
+ depends on PCI
+ select IOSF_MBI
config INTEL_RAPL
tristate "Intel RAPL Support via MSR Interface"
- depends on X86 && IOSF_MBI
+ depends on X86 && PCI
select INTEL_RAPL_CORE
---help---
This enables support for the Intel Running Average Power Limit (RAPL)
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 925b0004a0ed..d5a505f32260 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -885,6 +885,9 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
y = value & 0x1f;
value = (1 << y) * (4 + f) * rp->time_unit / 4;
} else {
+ if (value < rp->time_unit)
+ return 0;
+
do_div(value, rp->time_unit);
y = ilog2(value);
f = div64_u64(4 * (value - (1 << y)), 1 << y);
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index d5487965bdfe..6091e462626a 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -22,7 +22,6 @@
#include <linux/processor.h>
#include <linux/platform_device.h>
-#include <asm/iosf_mbi.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 3f0b8e2ef3d4..7a3109a53881 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -530,9 +530,6 @@ struct powercap_zone *powercap_register_zone(
power_zone->name = kstrdup(name, GFP_KERNEL);
if (!power_zone->name)
goto err_name_alloc;
- dev_set_name(&power_zone->dev, "%s:%x",
- dev_name(power_zone->dev.parent),
- power_zone->id);
power_zone->constraints = kcalloc(nr_constraints,
sizeof(*power_zone->constraints),
GFP_KERNEL);
@@ -555,9 +552,16 @@ struct powercap_zone *powercap_register_zone(
power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
power_zone->dev_attr_groups[1] = NULL;
power_zone->dev.groups = power_zone->dev_attr_groups;
+ dev_set_name(&power_zone->dev, "%s:%x",
+ dev_name(power_zone->dev.parent),
+ power_zone->id);
result = device_register(&power_zone->dev);
- if (result)
- goto err_dev_ret;
+ if (result) {
+ put_device(&power_zone->dev);
+ mutex_unlock(&control_type->lock);
+
+ return ERR_PTR(result);
+ }
control_type->nr_zones++;
mutex_unlock(&control_type->lock);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 9d72ab593f13..87bd6c072ac2 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -443,7 +443,8 @@ ssize_t ptp_read(struct posix_clock *pc,
for (i = 0; i < cnt; i++) {
event[i] = queue->buf[queue->head];
- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+ /* Paired with READ_ONCE() in queue_cnt() */
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
}
spin_unlock_irqrestore(&queue->lock, flags);
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index eedf067ee8e3..a6ff02a02cab 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -55,10 +55,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
dst->t.sec = seconds;
dst->t.nsec = remainder;
+ /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
if (!queue_free(queue))
- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
- queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
+ WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
spin_unlock_irqrestore(&queue->lock, flags);
}
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 6b97155148f1..d2cb95670676 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -55,9 +55,13 @@ struct ptp_clock {
* that a writer might concurrently increment the tail does not
* matter, since the queue remains nonempty nonetheless.
*/
-static inline int queue_cnt(struct timestamp_event_queue *q)
+static inline int queue_cnt(const struct timestamp_event_queue *q)
{
- int cnt = q->tail - q->head;
+ /*
+ * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
+ * ptp_read(), extts_fifo_show().
+ */
+ int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
}
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index a577218d1ab7..ca211feadb38 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -604,7 +604,7 @@ static int ptp_qoriq_probe(struct platform_device *dev)
return 0;
no_clock:
- iounmap(ptp_qoriq->base);
+ iounmap(base);
no_ioremap:
release_resource(ptp_qoriq->rsrc);
no_resource:
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 8cd59e848163..8d52815e05b3 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -78,7 +78,8 @@ static ssize_t extts_fifo_show(struct device *dev,
qcnt = queue_cnt(queue);
if (qcnt) {
event = queue->buf[queue->head];
- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
+ /* Paired with READ_ONCE() in queue_cnt() */
+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
}
spin_unlock_irqrestore(&queue->lock, flags);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index fea612c45f20..fd8479cd6771 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -298,7 +298,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
{
struct brcmstb_pwm *p = dev_get_drvdata(dev);
- clk_disable(p->clk);
+ clk_disable_unprepare(p->clk);
return 0;
}
@@ -307,7 +307,7 @@ static int brcmstb_pwm_resume(struct device *dev)
{
struct brcmstb_pwm *p = dev_get_drvdata(dev);
- clk_enable(p->clk);
+ clk_prepare_enable(p->clk);
return 0;
}
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 89497448d217..ad4321f2b6f8 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -125,6 +125,7 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->enabled = (ret > 0);
state->period = EC_PWM_MAX_DUTY;
+ state->polarity = PWM_POLARITY_NORMAL;
/* Note that "disabled" and "duty cycle == 0" are treated the same */
state->duty_cycle = ret;
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index ad205fdad372..286e9b119ee5 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -146,6 +146,7 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
state->enabled = (PWM_ENABLE_MASK & value);
+ state->polarity = (PWM_POLARITY_MASK & value) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
}
static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 9145f6160649..85aad55b7a8f 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -405,6 +405,13 @@ static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
if (tpm->enable_count > 0)
return -EBUSY;
+ /*
+ * Force 'real_period' to be zero to force period update code
+ * can be executed after system resume back, since suspend causes
+ * the period related registers to become their reset values.
+ */
+ tpm->real_period = 0;
+
clk_disable_unprepare(tpm->clk);
return 0;
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 522f862eca52..504a8f506195 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -51,10 +51,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (duty_cycles > 255)
duty_cycles = 255;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~0xFFFF;
val |= (period_cycles << 8) | duty_cycles;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
return 0;
}
@@ -69,9 +69,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
if (ret)
return ret;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val |= PWM_ENABLE;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
return 0;
}
@@ -81,9 +81,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
u32 val;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~PWM_ENABLE;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
clk_disable_unprepare(lpc32xx->clk);
}
@@ -121,9 +121,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.base = -1;
/* If PWM is disabled, configure the output to the default value */
- val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~PWM_PIN_LEVEL;
- writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ writel(val, lpc32xx->base);
ret = pwmchip_add(&lpc32xx->chip);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 6245bbdb6e6c..768e6e691c7c 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -147,12 +147,13 @@ static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
return err;
}
- return pwm_set_chip_data(pwm, channel);
+ return 0;
}
static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+ struct meson_pwm *meson = to_meson_pwm(chip);
+ struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
if (channel)
clk_disable_unprepare(channel->clk);
@@ -161,13 +162,20 @@ static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
- unsigned int duty, period, pre_div, cnt, duty_cnt;
- unsigned long fin_freq = -1;
+ struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
+ unsigned int pre_div, cnt, duty_cnt;
+ unsigned long fin_freq;
+ u64 duty, period;
duty = state->duty_cycle;
period = state->period;
+ /*
+ * Note this is wrong. The result is an output wave that isn't really
+ * inverted and so is wrongly identified by .get_state as normal.
+ * Fixing this needs some care however as some machines might rely on
+ * this.
+ */
if (state->polarity == PWM_POLARITY_INVERSED)
duty = period - duty;
@@ -179,19 +187,19 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
- pre_div = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * 0xffffLL);
+ pre_div = div64_u64(fin_freq * period, NSEC_PER_SEC * 0xffffLL);
if (pre_div > MISC_CLK_DIV_MASK) {
dev_err(meson->chip.dev, "unable to get period pre_div\n");
return -EINVAL;
}
- cnt = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * (pre_div + 1));
+ cnt = div64_u64(fin_freq * period, NSEC_PER_SEC * (pre_div + 1));
if (cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get period cnt\n");
return -EINVAL;
}
- dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period,
+ dev_dbg(meson->chip.dev, "period=%llu pre_div=%u cnt=%u\n", period,
pre_div, cnt);
if (duty == period) {
@@ -204,14 +212,13 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
channel->lo = cnt;
} else {
/* Then check is we can have the duty with the same pre_div */
- duty_cnt = div64_u64(fin_freq * (u64)duty,
- NSEC_PER_SEC * (pre_div + 1));
+ duty_cnt = div64_u64(fin_freq * duty, NSEC_PER_SEC * (pre_div + 1));
if (duty_cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get duty cycle\n");
return -EINVAL;
}
- dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n",
+ dev_dbg(meson->chip.dev, "duty=%llu pre_div=%u duty_cnt=%u\n",
duty, pre_div, duty_cnt);
channel->pre_div = pre_div;
@@ -224,7 +231,7 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
{
- struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+ struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
struct meson_pwm_channel_data *channel_data;
unsigned long flags;
u32 value;
@@ -267,8 +274,8 @@ static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm)
static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
+ struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
int err = 0;
if (!state)
@@ -366,6 +373,7 @@ static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->period = 0;
state->duty_cycle = 0;
}
+ state->polarity = PWM_POLARITY_NORMAL;
}
static const struct pwm_ops meson_pwm_ops = {
@@ -417,7 +425,7 @@ static const struct meson_pwm_data pwm_axg_ee_data = {
};
static const char * const pwm_axg_ao_parent_names[] = {
- "aoclk81", "xtal", "fclk_div4", "fclk_div5"
+ "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5"
};
static const struct meson_pwm_data pwm_axg_ao_data = {
@@ -426,7 +434,7 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
};
static const char * const pwm_g12a_ao_ab_parent_names[] = {
- "xtal", "aoclk81", "fclk_div4", "fclk_div5"
+ "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5"
};
static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
@@ -435,7 +443,7 @@ static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
};
static const char * const pwm_g12a_ao_cd_parent_names[] = {
- "xtal", "aoclk81",
+ "xtal", "g12a_ao_clk81",
};
static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 83b8be0209b7..327c780d433d 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -74,6 +74,19 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
u64 div, rate;
int err;
+ err = clk_prepare_enable(mdp->clk_main);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ err = clk_prepare_enable(mdp->clk_mm);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
+ return err;
+ }
+
/*
* Find period, high_width and clk_div to suit duty_ns and period_ns.
* Calculate proper div value to keep period value in the bound.
@@ -87,8 +100,11 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
rate = clk_get_rate(mdp->clk_main);
clk_div = div_u64(rate * period_ns, NSEC_PER_SEC) >>
PWM_PERIOD_BIT_WIDTH;
- if (clk_div > PWM_CLKDIV_MAX)
+ if (clk_div > PWM_CLKDIV_MAX) {
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
return -EINVAL;
+ }
div = NSEC_PER_SEC * (clk_div + 1);
period = div64_u64(rate * period_ns, div);
@@ -98,14 +114,17 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
high_width = div64_u64(rate * duty_ns, div);
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
- err = clk_enable(mdp->clk_main);
- if (err < 0)
- return err;
-
- err = clk_enable(mdp->clk_mm);
- if (err < 0) {
- clk_disable(mdp->clk_main);
- return err;
+ if (mdp->data->bls_debug && !mdp->data->has_commit) {
+ /*
+ * For MT2701, disable double buffer before writing register
+ * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+ */
+ mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+ mdp->data->bls_debug_mask,
+ mdp->data->bls_debug_mask);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ mdp->data->con0_sel,
+ mdp->data->con0_sel);
}
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
@@ -124,8 +143,8 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
0x0);
}
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
return 0;
}
@@ -135,13 +154,16 @@ static int mtk_disp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
int err;
- err = clk_enable(mdp->clk_main);
- if (err < 0)
+ err = clk_prepare_enable(mdp->clk_main);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
return err;
+ }
- err = clk_enable(mdp->clk_mm);
+ err = clk_prepare_enable(mdp->clk_mm);
if (err < 0) {
- clk_disable(mdp->clk_main);
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
return err;
}
@@ -158,8 +180,8 @@ static void mtk_disp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
0x0);
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
}
static const struct pwm_ops mtk_disp_pwm_ops = {
@@ -194,14 +216,6 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
if (IS_ERR(mdp->clk_mm))
return PTR_ERR(mdp->clk_mm);
- ret = clk_prepare(mdp->clk_main);
- if (ret < 0)
- return ret;
-
- ret = clk_prepare(mdp->clk_mm);
- if (ret < 0)
- goto disable_clk_main;
-
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
mdp->chip.base = -1;
@@ -209,44 +223,22 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&mdp->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
- goto disable_clk_mm;
+ dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
+ return ret;
}
platform_set_drvdata(pdev, mdp);
- /*
- * For MT2701, disable double buffer before writing register
- * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
- */
- if (!mdp->data->has_commit) {
- mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
- mdp->data->bls_debug_mask,
- mdp->data->bls_debug_mask);
- mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
- mdp->data->con0_sel,
- mdp->data->con0_sel);
- }
-
return 0;
-
-disable_clk_mm:
- clk_unprepare(mdp->clk_mm);
-disable_clk_main:
- clk_unprepare(mdp->clk_main);
- return ret;
}
static int mtk_disp_pwm_remove(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&mdp->chip);
- clk_unprepare(mdp->clk_mm);
- clk_unprepare(mdp->clk_main);
+ pwmchip_remove(&mdp->chip);
- return ret;
+ return 0;
}
static const struct mtk_pwm_data mt2701_pwm_data = {
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index cc63f9baa481..16c70147ec40 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -43,7 +43,7 @@
struct pwm_sifive_ddata {
struct pwm_chip chip;
- struct mutex lock; /* lock to protect user_count */
+ struct mutex lock; /* lock to protect user_count and approx_period */
struct notifier_block notifier;
struct clk *clk;
void __iomem *regs;
@@ -78,6 +78,7 @@ static void pwm_sifive_free(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_unlock(&ddata->lock);
}
+/* Called holding ddata->lock */
static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata,
unsigned long rate)
{
@@ -166,7 +167,6 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
- mutex_lock(&ddata->lock);
cur_state = pwm->state;
enabled = cur_state.enabled;
@@ -185,14 +185,23 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/* The hardware cannot generate a 100% duty cycle */
frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1);
+ mutex_lock(&ddata->lock);
if (state->period != ddata->approx_period) {
- if (ddata->user_count != 1) {
+ /*
+ * Don't let a 2nd user change the period underneath the 1st user.
+ * However if ddate->approx_period == 0 this is the first time we set
+ * any period, so let whoever gets here first set the period so other
+ * users who agree on the period won't fail.
+ */
+ if (ddata->user_count != 1 && ddata->approx_period) {
+ mutex_unlock(&ddata->lock);
ret = -EBUSY;
goto exit;
}
ddata->approx_period = state->period;
pwm_sifive_update_clock(ddata, clk_get_rate(ddata->clk));
}
+ mutex_unlock(&ddata->lock);
writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP0 +
pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP);
@@ -202,7 +211,6 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
exit:
clk_disable(ddata->clk);
- mutex_unlock(&ddata->lock);
return ret;
}
@@ -221,8 +229,11 @@ static int pwm_sifive_clock_notifier(struct notifier_block *nb,
struct pwm_sifive_ddata *ddata =
container_of(nb, struct pwm_sifive_ddata, notifier);
- if (event == POST_RATE_CHANGE)
+ if (event == POST_RATE_CHANGE) {
+ mutex_lock(&ddata->lock);
pwm_sifive_update_clock(ddata, ndata->new_rate);
+ mutex_unlock(&ddata->lock);
+ }
return NOTIFY_OK;
}
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index 892d853d48a1..b30d664bf7d5 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -109,6 +109,7 @@ static void sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
duty = val & SPRD_PWM_DUTY_MSK;
tmp = (prescale + 1) * NSEC_PER_SEC * duty;
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate);
+ state->polarity = PWM_POLARITY_NORMAL;
/* Disable PWM clocks if the PWM channel is not in enable state. */
if (!state->enabled)
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 1508616d794c..9b2174adab3d 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
unsigned int cpt_num_devs;
unsigned int max_pwm_cnt;
unsigned int max_prescale;
+ struct sti_cpt_ddata *ddata;
};
struct sti_pwm_chip {
@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
struct sti_pwm_compat_data *cdata = pc->cdata;
- struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
+ struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
struct device *dev = pc->dev;
unsigned int effective_ticks;
unsigned long long high, low;
@@ -417,7 +418,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
while (cpt_int_stat) {
devicenum = ffs(cpt_int_stat) - 1;
- ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
+ ddata = &pc->cdata->ddata[devicenum];
/*
* Capture input:
@@ -593,61 +594,55 @@ static int sti_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (!cdata->pwm_num_devs)
- goto skip_pwm;
-
- pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
- if (IS_ERR(pc->pwm_clk)) {
- dev_err(dev, "failed to get PWM clock\n");
- return PTR_ERR(pc->pwm_clk);
- }
+ if (cdata->pwm_num_devs) {
+ pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
+ if (IS_ERR(pc->pwm_clk)) {
+ dev_err(dev, "failed to get PWM clock\n");
+ return PTR_ERR(pc->pwm_clk);
+ }
- ret = clk_prepare(pc->pwm_clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
+ ret = clk_prepare(pc->pwm_clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
}
-skip_pwm:
- if (!cdata->cpt_num_devs)
- goto skip_cpt;
+ if (cdata->cpt_num_devs) {
+ pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
+ if (IS_ERR(pc->cpt_clk)) {
+ dev_err(dev, "failed to get PWM capture clock\n");
+ return PTR_ERR(pc->cpt_clk);
+ }
- pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
- if (IS_ERR(pc->cpt_clk)) {
- dev_err(dev, "failed to get PWM capture clock\n");
- return PTR_ERR(pc->cpt_clk);
- }
+ ret = clk_prepare(pc->cpt_clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
- ret = clk_prepare(pc->cpt_clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
+ cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
+ if (!cdata->ddata)
+ return -ENOMEM;
}
-skip_cpt:
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
pc->chip.base = -1;
pc->chip.npwm = pc->cdata->pwm_num_devs;
- ret = pwmchip_add(&pc->chip);
- if (ret < 0) {
- clk_unprepare(pc->pwm_clk);
- clk_unprepare(pc->cpt_clk);
- return ret;
- }
-
for (i = 0; i < cdata->cpt_num_devs; i++) {
- struct sti_cpt_ddata *ddata;
-
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ struct sti_cpt_ddata *ddata = &cdata->ddata[i];
init_waitqueue_head(&ddata->wait);
mutex_init(&ddata->lock);
+ }
- pwm_set_chip_data(&pc->chip.pwms[i], ddata);
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+ clk_unprepare(pc->pwm_clk);
+ clk_unprepare(pc->cpt_clk);
+ return ret;
}
platform_set_drvdata(pdev, pc);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 05bb1f95a773..20657c649c65 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -127,7 +127,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/* ensure CMP & ARR registers are properly written */
ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
- (val & STM32_LPTIM_CMPOK_ARROK),
+ (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret) {
dev_err(priv->chip.dev, "ARR/CMP registers write issue\n");
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 2389b8669846..986f3a29a13d 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -424,6 +424,13 @@ static int pwm_class_resume_npwm(struct device *parent, unsigned int npwm)
if (!export)
continue;
+ /* If pwmchip was not enabled before suspend, do nothing. */
+ if (!export->suspend.enabled) {
+ /* release lock taken in pwm_class_get_state */
+ mutex_unlock(&export->lock);
+ continue;
+ }
+
state.enabled = export->suspend.enabled;
ret = pwm_class_apply_state(export, pwm, &state);
if (ret < 0)
@@ -448,7 +455,17 @@ static int __maybe_unused pwm_class_suspend(struct device *parent)
if (!export)
continue;
+ /*
+ * If pwmchip was not enabled before suspend, save
+ * state for resume time and do nothing else.
+ */
export->suspend = state;
+ if (!state.enabled) {
+ /* release lock taken in pwm_class_get_state */
+ mutex_unlock(&export->lock);
+ continue;
+ }
+
state.enabled = false;
ret = pwm_class_apply_state(export, pwm, &state);
if (ret < 0) {
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 2b08fdeb87c1..2371151bc8fc 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1807,8 +1807,11 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
0, 0xffff);
err = rio_add_device(rdev);
- if (err)
- goto cleanup;
+ if (err) {
+ put_device(&rdev->dev);
+ return err;
+ }
+
rio_dev_get(rdev);
return 0;
@@ -1904,10 +1907,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
priv->md = chdev;
- mutex_lock(&chdev->file_mutex);
- list_add_tail(&priv->list, &chdev->file_list);
- mutex_unlock(&chdev->file_mutex);
-
INIT_LIST_HEAD(&priv->db_filters);
INIT_LIST_HEAD(&priv->pw_filters);
spin_lock_init(&priv->fifo_lock);
@@ -1916,6 +1915,7 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
GFP_KERNEL);
if (ret < 0) {
+ put_device(&chdev->dev);
dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
ret = -ENOMEM;
goto err_fifo;
@@ -1926,6 +1926,9 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
spin_lock_init(&priv->req_lock);
mutex_init(&priv->dma_lock);
#endif
+ mutex_lock(&chdev->file_mutex);
+ list_add_tail(&priv->list, &chdev->file_list);
+ mutex_unlock(&chdev->file_mutex);
filp->private_data = priv;
goto out;
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 0e90c5d4bb2b..b1cd6e028f2b 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -456,8 +456,12 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
0, 0xffff);
ret = rio_add_device(rdev);
- if (ret)
- goto cleanup;
+ if (ret) {
+ if (rswitch)
+ kfree(rswitch->route_table);
+ put_device(&rdev->dev);
+ return NULL;
+ }
rio_dev_get(rdev);
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 606986c5ba2c..fcab174e5888 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -2267,11 +2267,16 @@ int rio_register_mport(struct rio_mport *port)
atomic_set(&port->state, RIO_DEVICE_RUNNING);
res = device_register(&port->dev);
- if (res)
+ if (res) {
dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n",
port->id, res);
- else
+ mutex_lock(&rio_mport_list_lock);
+ list_del(&port->node);
+ mutex_unlock(&rio_mport_list_lock);
+ put_device(&port->dev);
+ } else {
dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id);
+ }
return res;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ae2addadb36f..894915892eaf 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -217,6 +217,78 @@ void regulator_unlock(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_unlock);
+/**
+ * regulator_lock_two - lock two regulators
+ * @rdev1: first regulator
+ * @rdev2: second regulator
+ * @ww_ctx: w/w mutex acquire context
+ *
+ * Locks both rdevs using the regulator_ww_class.
+ */
+static void regulator_lock_two(struct regulator_dev *rdev1,
+ struct regulator_dev *rdev2,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ struct regulator_dev *tmp;
+ int ret;
+
+ ww_acquire_init(ww_ctx, &regulator_ww_class);
+
+ /* Try to just grab both of them */
+ ret = regulator_lock_nested(rdev1, ww_ctx);
+ WARN_ON(ret);
+ ret = regulator_lock_nested(rdev2, ww_ctx);
+ if (ret != -EDEADLOCK) {
+ WARN_ON(ret);
+ goto exit;
+ }
+
+ while (true) {
+ /*
+ * Start of loop: rdev1 was locked and rdev2 was contended.
+ * Need to unlock rdev1, slowly lock rdev2, then try rdev1
+ * again.
+ */
+ regulator_unlock(rdev1);
+
+ ww_mutex_lock_slow(&rdev2->mutex, ww_ctx);
+ rdev2->ref_cnt++;
+ rdev2->mutex_owner = current;
+ ret = regulator_lock_nested(rdev1, ww_ctx);
+
+ if (ret == -EDEADLOCK) {
+ /* More contention; swap which needs to be slow */
+ tmp = rdev1;
+ rdev1 = rdev2;
+ rdev2 = tmp;
+ } else {
+ WARN_ON(ret);
+ break;
+ }
+ }
+
+exit:
+ ww_acquire_done(ww_ctx);
+}
+
+/**
+ * regulator_unlock_two - unlock two regulators
+ * @rdev1: first regulator
+ * @rdev2: second regulator
+ * @ww_ctx: w/w mutex acquire context
+ *
+ * The inverse of regulator_lock_two().
+ */
+
+static void regulator_unlock_two(struct regulator_dev *rdev1,
+ struct regulator_dev *rdev2,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ regulator_unlock(rdev2);
+ regulator_unlock(rdev1);
+ ww_acquire_fini(ww_ctx);
+}
+
static bool regulator_supply_is_couple(struct regulator_dev *rdev)
{
struct regulator_dev *c_rdev;
@@ -344,6 +416,7 @@ static void regulator_lock_dependent(struct regulator_dev *rdev,
ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
old_contended_rdev = new_contended_rdev;
old_contended_rdev->ref_cnt++;
+ old_contended_rdev->mutex_owner = current;
}
err = regulator_lock_recursive(rdev,
@@ -956,7 +1029,7 @@ static int drms_uA_update(struct regulator_dev *rdev)
/* get input voltage */
input_uV = 0;
if (rdev->supply)
- input_uV = regulator_get_voltage(rdev->supply);
+ input_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
@@ -1387,7 +1460,13 @@ static int set_machine_constraints(struct regulator_dev *rdev)
if (rdev->supply_name && !rdev->supply)
return -EPROBE_DEFER;
- if (rdev->supply) {
+ /* If supplying regulator has already been enabled,
+ * it's not intended to have use_count increment
+ * when rdev is only boot-on.
+ */
+ if (rdev->supply &&
+ (rdev->constraints->always_on ||
+ !regulator_is_enabled(rdev->supply))) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
_regulator_put(rdev->supply);
@@ -1412,8 +1491,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
/**
* set_supply - set regulator supply regulator
- * @rdev: regulator name
- * @supply_rdev: supply regulator name
+ * @rdev: regulator (locked)
+ * @supply_rdev: supply regulator (locked))
*
* Called by platform initialisation code to set the supply regulator for this
* regulator. This ensures that a regulators supply will also be enabled by the
@@ -1431,6 +1510,7 @@ static int set_supply(struct regulator_dev *rdev,
rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
if (rdev->supply == NULL) {
+ module_put(supply_rdev->owner);
err = -ENOMEM;
return err;
}
@@ -1584,6 +1664,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
struct regulator *regulator;
int err = 0;
+ lockdep_assert_held_once(&rdev->mutex.base);
+
if (dev) {
char buf[REG_STR_SIZE];
int size;
@@ -1604,16 +1686,14 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
if (regulator == NULL) {
- kfree(supply_name);
+ kfree_const(supply_name);
return NULL;
}
regulator->rdev = rdev;
regulator->supply_name = supply_name;
- regulator_lock(rdev);
list_add(&regulator->list, &rdev->consumer_list);
- regulator_unlock(rdev);
if (dev) {
regulator->dev = dev;
@@ -1630,19 +1710,17 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
if (err != -EEXIST)
regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
- if (!regulator->debugfs) {
+ if (IS_ERR(regulator->debugfs))
rdev_dbg(rdev, "Failed to create debugfs directory\n");
- } else {
- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
- &regulator->uA_load);
- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].min_uV);
- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].max_uV);
- debugfs_create_file("constraint_flags", 0444,
- regulator->debugfs, regulator,
- &constraint_flags_fops);
- }
+
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
+ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+ regulator, &constraint_flags_fops);
/*
* Check now if the regulator is an always on regulator - if
@@ -1734,6 +1812,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
node = of_get_regulator(dev, supply);
if (node) {
r = of_find_regulator_by_node(node);
+ of_node_put(node);
if (r)
return r;
@@ -1778,6 +1857,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
{
struct regulator_dev *r;
struct device *dev = rdev->dev.parent;
+ struct ww_acquire_ctx ww_ctx;
int ret = 0;
/* No supply to resolve? */
@@ -1844,23 +1924,23 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
* between rdev->supply null check and setting rdev->supply in
* set_supply() from concurrent tasks.
*/
- regulator_lock(rdev);
+ regulator_lock_two(rdev, r, &ww_ctx);
/* Supply just resolved by a concurrent task? */
if (rdev->supply) {
- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);
put_device(&r->dev);
goto out;
}
ret = set_supply(rdev, r);
if (ret < 0) {
- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);
put_device(&r->dev);
goto out;
}
- regulator_unlock(rdev);
+ regulator_unlock_two(rdev, r, &ww_ctx);
/*
* In set_machine_constraints() we may have turned this regulator on
@@ -1975,7 +2055,9 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
return regulator;
}
+ regulator_lock(rdev);
regulator = create_regulator(rdev, dev, id);
+ regulator_unlock(rdev);
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
module_put(rdev->owner);
@@ -2486,13 +2568,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
*/
static int _regulator_handle_consumer_enable(struct regulator *regulator)
{
+ int ret;
struct regulator_dev *rdev = regulator->rdev;
lockdep_assert_held_once(&rdev->mutex.base);
regulator->enable_count++;
- if (regulator->uA_load && regulator->enable_count == 1)
- return drms_uA_update(rdev);
+ if (regulator->uA_load && regulator->enable_count == 1) {
+ ret = drms_uA_update(rdev);
+ if (ret)
+ regulator->enable_count--;
+ return ret;
+ }
return 0;
}
@@ -2571,7 +2658,8 @@ static int _regulator_enable(struct regulator *regulator)
/* Fallthrough on positive return values - already enabled */
}
- rdev->use_count++;
+ if (regulator->enable_count == 1)
+ rdev->use_count++;
return 0;
@@ -2649,37 +2737,40 @@ static int _regulator_disable(struct regulator *regulator)
lockdep_assert_held_once(&rdev->mutex.base);
- if (WARN(rdev->use_count <= 0,
+ if (WARN(regulator->enable_count == 0,
"unbalanced disables for %s\n", rdev_get_name(rdev)))
return -EIO;
- /* are we the last user and permitted to disable ? */
- if (rdev->use_count == 1 &&
- (rdev->constraints && !rdev->constraints->always_on)) {
-
- /* we are last user */
- if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
- ret = _notifier_call_chain(rdev,
- REGULATOR_EVENT_PRE_DISABLE,
- NULL);
- if (ret & NOTIFY_STOP_MASK)
- return -EINVAL;
-
- ret = _regulator_do_disable(rdev);
- if (ret < 0) {
- rdev_err(rdev, "failed to disable\n");
- _notifier_call_chain(rdev,
- REGULATOR_EVENT_ABORT_DISABLE,
+ if (regulator->enable_count == 1) {
+ /* disabling last enable_count from this regulator */
+ /* are we the last user and permitted to disable ? */
+ if (rdev->use_count == 1 &&
+ (rdev->constraints && !rdev->constraints->always_on)) {
+
+ /* we are last user */
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
+ ret = _notifier_call_chain(rdev,
+ REGULATOR_EVENT_PRE_DISABLE,
+ NULL);
+ if (ret & NOTIFY_STOP_MASK)
+ return -EINVAL;
+
+ ret = _regulator_do_disable(rdev);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to disable\n");
+ _notifier_call_chain(rdev,
+ REGULATOR_EVENT_ABORT_DISABLE,
+ NULL);
+ return ret;
+ }
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
NULL);
- return ret;
}
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
- }
- rdev->use_count = 0;
- } else if (rdev->use_count > 1) {
- rdev->use_count--;
+ rdev->use_count = 0;
+ } else if (rdev->use_count > 1) {
+ rdev->use_count--;
+ }
}
if (ret == 0)
@@ -4797,6 +4888,7 @@ static void regulator_dev_release(struct device *dev)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
+ debugfs_remove_recursive(rdev->debugfs);
kfree(rdev->constraints);
of_node_put(rdev->dev.of_node);
kfree(rdev);
@@ -4816,10 +4908,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
}
rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
- if (!rdev->debugfs) {
- rdev_warn(rdev, "Failed to create debugfs directory\n");
- return;
- }
+ if (IS_ERR(rdev->debugfs))
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
debugfs_create_u32("use_count", 0444, rdev->debugfs,
&rdev->use_count);
@@ -5260,6 +5350,7 @@ unset_supplies:
regulator_remove_coupling(rdev);
mutex_unlock(&regulator_list_mutex);
wash:
+ regulator_put(rdev->supply);
kfree(rdev->coupling_desc.coupled_rdevs);
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
@@ -5297,7 +5388,6 @@ void regulator_unregister(struct regulator_dev *rdev)
mutex_lock(&regulator_list_mutex);
- debugfs_remove_recursive(rdev->debugfs);
WARN_ON(rdev->open_count);
regulator_remove_coupling(rdev);
unset_regulator_supplies(rdev);
@@ -5641,6 +5731,7 @@ static void regulator_summary_lock(struct ww_acquire_ctx *ww_ctx)
ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
old_contended_rdev = new_contended_rdev;
old_contended_rdev->ref_cnt++;
+ old_contended_rdev->mutex_owner = current;
}
err = regulator_summary_lock_all(ww_ctx,
@@ -5701,8 +5792,8 @@ static int __init regulator_init(void)
ret = class_register(&regulator_class);
debugfs_root = debugfs_create_dir("regulator", NULL);
- if (!debugfs_root)
- pr_warn("regulator: Failed to create debugfs directory\n");
+ if (IS_ERR(debugfs_root))
+ pr_debug("regulator: Failed to create debugfs directory\n");
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("supply_map", 0444, debugfs_root, NULL,
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index bf80748f1ccc..7baa6121cc66 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -471,6 +471,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
chip->chip_irq = i2c->irq;
+ ret = da9211_regulator_init(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
+ return ret;
+ }
+
if (chip->chip_irq != 0) {
ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL,
da9211_irq_handler,
@@ -485,11 +491,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
dev_warn(chip->dev, "No IRQ configured\n");
}
- ret = da9211_regulator_init(chip);
-
- if (ret < 0)
- dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
-
return ret;
}
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index dbe477da4e55..99a1b2dc3093 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -8,18 +8,19 @@
// Copyright (c) 2012 Marvell Technology Ltd.
// Yunfan Zhang <yfzhang@marvell.com>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/param.h>
-#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/fan53555.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/of_device.h>
-#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/regmap.h>
-#include <linux/regulator/fan53555.h>
/* Voltage setting */
#define FAN53555_VSEL0 0x00
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f81533070058..2f0bed86467f 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -181,8 +181,8 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->enable_clock = devm_clk_get(dev, NULL);
if (IS_ERR(drvdata->enable_clock)) {
- dev_err(dev, "Cant get enable-clock from devicetree\n");
- return -ENOENT;
+ dev_err(dev, "Can't get enable-clock from devicetree\n");
+ return PTR_ERR(drvdata->enable_clock);
}
} else {
drvdata->desc.ops = &fixed_voltage_ops;
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index 7b8ec8c0bd15..660e179a82a2 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -95,9 +95,11 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev)
{
unsigned int val = MAX77802_OFF_PWRREQ;
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
int shift = max77802_get_opmode_shift(id);
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
max77802->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
@@ -111,7 +113,7 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev)
static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
unsigned int val;
int shift = max77802_get_opmode_shift(id);
@@ -128,6 +130,9 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
return -EINVAL;
}
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
+
max77802->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
@@ -136,8 +141,10 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
static unsigned max77802_get_mode(struct regulator_dev *rdev)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
return max77802_map_mode(max77802->opmode[id]);
}
@@ -161,10 +168,13 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
unsigned int val;
int shift = max77802_get_opmode_shift(id);
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
+
/*
* If the regulator has been disabled for suspend
* then is invalid to try setting a suspend mode.
@@ -210,9 +220,11 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev,
static int max77802_enable(struct regulator_dev *rdev)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
int shift = max77802_get_opmode_shift(id);
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
if (max77802->opmode[id] == MAX77802_OFF_PWRREQ)
max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
@@ -541,7 +553,7 @@ static int max77802_pmic_probe(struct platform_device *pdev)
for (i = 0; i < MAX77802_REG_MAX; i++) {
struct regulator_dev *rdev;
- int id = regulators[i].id;
+ unsigned int id = regulators[i].id;
int shift = max77802_get_opmode_shift(id);
int ret;
@@ -559,10 +571,12 @@ static int max77802_pmic_probe(struct platform_device *pdev)
* the hardware reports OFF as the regulator operating mode.
* Default to operating mode NORMAL in that case.
*/
- if (val == MAX77802_STATUS_OFF)
- max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
- else
- max77802->opmode[id] = val;
+ if (id < ARRAY_SIZE(max77802->opmode)) {
+ if (val == MAX77802_STATUS_OFF)
+ max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
+ else
+ max77802->opmode[id] = val;
+ }
rdev = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 87637eb6bcbc..7a0a235e4465 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -206,8 +206,12 @@ static int of_get_regulation_constraints(struct device *dev,
}
suspend_np = of_get_child_by_name(np, regulator_states[i]);
- if (!suspend_np || !suspend_state)
+ if (!suspend_np)
continue;
+ if (!suspend_state) {
+ of_node_put(suspend_np);
+ continue;
+ }
if (!of_property_read_u32(suspend_np, "regulator-mode",
&pval)) {
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index f873d97100e2..13609942d45c 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -788,7 +788,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
- sizeof(pfuze_chip->regulator_descs));
+ regulator_num * sizeof(struct pfuze_regulator));
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 0a9d61a91f43..1b06aaaaf8b8 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -158,6 +158,9 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
pwm_get_state(drvdata->pwm, &pstate);
voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
+ if (voltage < min(max_uV_duty, min_uV_duty) ||
+ voltage > max(max_uV_duty, min_uV_duty))
+ return -ENOTRECOVERABLE;
/*
* The dutycycle for min_uV might be greater than the one for max_uV.
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 7407cd5a1b74..84027e9f0dbc 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -812,6 +812,12 @@ static const struct rpm_regulator_data rpm_pm8018_regulators[] = {
};
static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
+ { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" },
+ { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" },
+
{ "l0", QCOM_RPM_PM8058_LDO0, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l1", QCOM_RPM_PM8058_LDO1, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l2", QCOM_RPM_PM8058_LDO2, &pm8058_pldo, "vdd_l2_l11_l12" },
@@ -839,12 +845,6 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
{ "l24", QCOM_RPM_PM8058_LDO24, &pm8058_nldo, "vdd_l23_l24_l25" },
{ "l25", QCOM_RPM_PM8058_LDO25, &pm8058_nldo, "vdd_l23_l24_l25" },
- { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" },
- { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" },
- { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" },
- { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" },
- { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" },
-
{ "lvs0", QCOM_RPM_PM8058_LVS0, &pm8058_switch, "vdd_l0_l1_lvs" },
{ "lvs1", QCOM_RPM_PM8058_LVS1, &pm8058_switch, "vdd_l0_l1_lvs" },
@@ -853,6 +853,12 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
};
static const struct rpm_regulator_data rpm_pm8901_regulators[] = {
+ { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" },
+ { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" },
+
{ "l0", QCOM_RPM_PM8901_LDO0, &pm8901_nldo, "vdd_l0" },
{ "l1", QCOM_RPM_PM8901_LDO1, &pm8901_pldo, "vdd_l1" },
{ "l2", QCOM_RPM_PM8901_LDO2, &pm8901_pldo, "vdd_l2" },
@@ -861,12 +867,6 @@ static const struct rpm_regulator_data rpm_pm8901_regulators[] = {
{ "l5", QCOM_RPM_PM8901_LDO5, &pm8901_pldo, "vdd_l5" },
{ "l6", QCOM_RPM_PM8901_LDO6, &pm8901_pldo, "vdd_l6" },
- { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" },
- { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" },
- { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" },
- { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" },
- { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" },
-
{ "lvs0", QCOM_RPM_PM8901_LVS0, &pm8901_switch, "lvs0_in" },
{ "lvs1", QCOM_RPM_PM8901_LVS1, &pm8901_switch, "lvs1_in" },
{ "lvs2", QCOM_RPM_PM8901_LVS2, &pm8901_switch, "lvs2_in" },
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 1e9f03a2ea1c..7ff480810cfa 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -924,10 +924,14 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
for (i = 0; i < pdata->num_regulators; i++) {
const struct sec_voltage_desc *desc;
- int id = pdata->regulators[i].id;
+ unsigned int id = pdata->regulators[i].id;
int enable_reg, enable_val;
struct regulator_dev *rdev;
+ BUILD_BUG_ON(ARRAY_SIZE(regulators) != ARRAY_SIZE(reg_voltage_map));
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(regulators)))
+ continue;
+
desc = reg_voltage_map[id];
if (desc) {
regulators[id].n_voltages =
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index a0565daecace..5a18d7e620a5 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -465,6 +465,8 @@ static int slg51000_i2c_probe(struct i2c_client *client,
chip->cs_gpiod = cs_gpiod;
}
+ usleep_range(10000, 11000);
+
i2c_set_clientdata(client, chip);
chip->chip_irq = client->irq;
chip->dev = dev;
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
index e0e627b0106e..b94da4992376 100644
--- a/drivers/regulator/stm32-pwr.c
+++ b/drivers/regulator/stm32-pwr.c
@@ -129,17 +129,16 @@ static const struct regulator_desc stm32_pwr_desc[] = {
static int stm32_pwr_regulator_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct stm32_pwr_reg *priv;
void __iomem *base;
struct regulator_dev *rdev;
struct regulator_config config = { };
int i, ret = 0;
- base = of_iomap(np, 0);
- if (!base) {
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
dev_err(&pdev->dev, "Unable to map IO memory\n");
- return -ENOMEM;
+ return PTR_ERR(base);
}
config.dev = &pdev->dev;
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index b8100c3cedad..71625db3a6f1 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -67,6 +67,7 @@ struct twlreg_info {
#define TWL6030_CFG_STATE_SLEEP 0x03
#define TWL6030_CFG_STATE_GRP_SHIFT 5
#define TWL6030_CFG_STATE_APP_SHIFT 2
+#define TWL6030_CFG_STATE_MASK 0x03
#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
TWL6030_CFG_STATE_APP_SHIFT)
@@ -128,13 +129,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
if (grp < 0)
return grp;
grp &= P1_GRP_6030;
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+ val = TWL6030_CFG_STATE_APP(val);
} else {
+ val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+ val &= TWL6030_CFG_STATE_MASK;
grp = 1;
}
- val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
- val = TWL6030_CFG_STATE_APP(val);
-
return grp && (val == TWL6030_CFG_STATE_ON);
}
@@ -187,7 +189,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev)
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
- switch (TWL6030_CFG_STATE_APP(val)) {
+ if (info->features & TWL6032_SUBCLASS)
+ val &= TWL6030_CFG_STATE_MASK;
+ else
+ val = TWL6030_CFG_STATE_APP(val);
+
+ switch (val) {
case TWL6030_CFG_STATE_ON:
return REGULATOR_STATUS_NORMAL;
@@ -530,6 +537,7 @@ static const struct twlreg_info TWL6030_INFO_##label = { \
#define TWL6032_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6032_INFO_##label = { \
.base = offset, \
+ .features = TWL6032_SUBCLASS, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
@@ -562,6 +570,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \
#define TWL6032_ADJUSTABLE_SMPS(label, offset) \
static const struct twlreg_info TWLSMPS_INFO_##label = { \
.base = offset, \
+ .features = TWL6032_SUBCLASS, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 94afdde4bc9f..cbe168fae53d 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -196,6 +196,26 @@ config ST_REMOTEPROC
processor framework.
This can be either built-in or a loadable module.
+config ZYNQ_REMOTEPROC
+ tristate "Support ZYNQ remoteproc"
+ depends on ARCH_ZYNQ && SMP && !DEBUG_SG
+ select RPMSG_VIRTIO
+ select HOTPLUG_CPU
+ select SRAM
+ help
+ Say y here to support Xilinx ZynQ remote processors (the second
+ ARM CORTEX-A9 cpu) via the remote processor framework.
+
+config ZYNQMP_R5_REMOTEPROC
+ tristate "ZynqMP_r5 remoteproc support"
+ depends on ARM64 && PM && ARCH_ZYNQMP
+ select RPMSG_VIRTIO
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
+ help
+ Say y here to support ZynqMP R5 remote processors via the remote
+ processor framework.
+
config ST_SLIM_REMOTEPROC
tristate
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 00f09e658cb3..f183378f52c9 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -25,5 +25,7 @@ obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
qcom_wcnss_pil-y += qcom_wcnss_iris.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
+obj-$(CONFIG_ZYNQ_REMOTEPROC) += zynq_remoteproc.o
+obj-$(CONFIG_ZYNQMP_R5_REMOTEPROC) += zynqmp_r5_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 5e54e6f5edb1..49faa90da93f 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -175,6 +176,9 @@ struct q6v5 {
void *mba_region;
size_t mba_size;
+ phys_addr_t mdata_phys;
+ size_t mdata_size;
+
phys_addr_t mpss_phys;
phys_addr_t mpss_reloc;
void *mpss_region;
@@ -679,15 +683,35 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
if (IS_ERR(metadata))
return PTR_ERR(metadata);
- ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
- if (!ptr) {
- kfree(metadata);
- dev_err(qproc->dev, "failed to allocate mdt buffer\n");
- return -ENOMEM;
+ if (qproc->mdata_phys) {
+ if (size > qproc->mdata_size) {
+ ret = -EINVAL;
+ dev_err(qproc->dev, "metadata size outside memory range\n");
+ goto free_metadata;
+ }
+
+ phys = qproc->mdata_phys;
+ ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
+ if (!ptr) {
+ ret = -EBUSY;
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &qproc->mdata_phys, size);
+ goto free_metadata;
+ }
+ } else {
+ ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
+ if (!ptr) {
+ ret = -ENOMEM;
+ dev_err(qproc->dev, "failed to allocate mdt buffer\n");
+ goto free_metadata;
+ }
}
memcpy(ptr, metadata, size);
+ if (qproc->mdata_phys)
+ memunmap(ptr);
+
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
@@ -714,7 +738,9 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
"mdt buffer not reclaimed system may become unstable\n");
free_dma_attrs:
- dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
+ if (!qproc->mdata_phys)
+ dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
+free_metadata:
kfree(metadata);
return ret < 0 ? ret : 0;
@@ -1383,6 +1409,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
static int q6v5_alloc_memory_region(struct q6v5 *qproc)
{
struct device_node *child;
+ struct reserved_mem *rmem;
struct device_node *node;
struct resource r;
int ret;
@@ -1417,6 +1444,26 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
qproc->mpss_phys = qproc->mpss_reloc = r.start;
qproc->mpss_size = resource_size(&r);
+ if (!child) {
+ node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
+ } else {
+ child = of_get_child_by_name(qproc->dev->of_node, "metadata");
+ node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
+ }
+
+ if (!node)
+ return 0;
+
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(qproc->dev, "unable to resolve metadata region\n");
+ return -EINVAL;
+ }
+
+ qproc->mdata_phys = rmem->base;
+ qproc->mdata_size = rmem->size;
+
return 0;
}
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index db4b3c4bacd7..d48f4b5c8df7 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -230,6 +230,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index c231314eab66..b7d0c35c5058 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -518,7 +518,9 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
if (sysmon->shutdown_irq != -ENODATA) {
dev_err(sysmon->dev,
"failed to retrieve shutdown-ack IRQ\n");
- return ERR_PTR(sysmon->shutdown_irq);
+ ret = sysmon->shutdown_irq;
+ kfree(sysmon);
+ return ERR_PTR(ret);
}
} else {
ret = devm_request_threaded_irq(sysmon->dev,
@@ -529,6 +531,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
if (ret) {
dev_err(sysmon->dev,
"failed to acquire shutdown-ack IRQ\n");
+ kfree(sysmon);
return ERR_PTR(ret);
}
}
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index c72f1b3b6085..18431ac09822 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -407,6 +407,7 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
irq_handler_t thread_fn)
{
int ret;
+ int irq_number;
ret = platform_get_irq_byname(pdev, name);
if (ret < 0 && optional) {
@@ -417,14 +418,19 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
return ret;
}
+ irq_number = ret;
+
ret = devm_request_threaded_irq(&pdev->dev, ret,
NULL, thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"wcnss", wcnss);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+ }
- return ret;
+ /* Return the IRQ number if the IRQ was successfully acquired */
+ return irq_number;
}
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 493ef9262411..05a7e629fdd9 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -119,4 +119,27 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
return NULL;
}
+static inline
+bool rproc_allow_sysfs_kick(struct rproc *rproc)
+{
+ return (rproc->sysfs_kick) ? true : false;
+}
+
+static inline
+bool rproc_peek_remote_kick(struct rproc *rproc, char *buf, size_t *len)
+{
+ if (rproc->ops->peek_remote_kick)
+ return rproc->ops->peek_remote_kick(rproc, buf, len);
+ else
+ return false;
+}
+
+static inline
+void rproc_ack_remote_kick(struct rproc *rproc)
+{
+ if (rproc->ops->ack_remote_kick)
+ rproc->ops->ack_remote_kick(rproc);
+}
+
+int rproc_create_kick_sysfs(struct rproc *rproc);
#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 7f8536b73295..79a92ad97811 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -123,6 +123,111 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(name);
+/**
+ * kick_store() - Kick remote from sysfs.
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ * @count: size of the contents in buf
+ *
+ * It will just raise a signal, no content is expected for now.
+ *
+ * Return: the input count if it allows kick from sysfs,
+ * as it is always expected to succeed.
+ */
+static ssize_t kick_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+ int id;
+ size_t cpy_len;
+
+ (void)attr;
+ cpy_len = count <= sizeof(id) ? count : sizeof(id);
+ memcpy((char *)(&id), buf, cpy_len);
+
+ if (rproc->ops->kick)
+ rproc->ops->kick(rproc, id);
+ else
+ count = -EINVAL;
+ return count;
+}
+static DEVICE_ATTR_WO(kick);
+
+/**
+ * remote_kick_show() - Check if remote has kicked
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ *
+ * It will check if the remote has kicked.
+ *
+ * Return: 2 if it allows kick from sysfs, and the value in the sysfs buffer
+ * shows if the remote has kicked. '0' - not kicked, '1' - kicked.
+ */
+static ssize_t remote_kick_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ buf[0] = '0';
+ buf[1] = '\n';
+ if (rproc_peek_remote_kick(rproc, NULL, NULL))
+ buf[0] += 1;
+ return 2;
+}
+
+/**
+ * remote_kick_store() - Ack the kick from remote
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ * @count: size of the contents in buf
+ *
+ * It will ack the remote, no response contents is expected.
+ *
+ * Return: the input count if it allows kick from sysfs,
+ * as it is always expected to succeed.
+ */
+static ssize_t remote_kick_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ rproc_ack_remote_kick(rproc);
+ return count;
+}
+static DEVICE_ATTR_RW(remote_kick);
+
+/**
+ * remote_pending_message_show() - Show pending message sent from remote
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ *
+ * It shows the pending message sent from remote
+ *
+ * Return: length of pending remote message.
+ */
+static ssize_t remote_pending_message_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+ size_t len;
+
+ if (rproc_peek_remote_kick(rproc, buf, &len)) {
+ buf[len] = '0';
+ return len;
+ } else {
+ return -EAGAIN;
+ }
+}
+static DEVICE_ATTR_RO(remote_pending_message);
+
static struct attribute *rproc_attrs[] = {
&dev_attr_firmware.attr,
&dev_attr_state.attr,
@@ -144,6 +249,41 @@ struct class rproc_class = {
.dev_groups = rproc_devgroups,
};
+/**
+ * rproc_create_kick_sysfs() - create kick remote sysfs entry
+ * @rproc: remoteproc
+ *
+ * It will create kick remote sysfs entry if kick remote
+ * from sysfs is allowed.
+ *
+ * Return: 0 for success, and negative value for failure.
+ */
+int rproc_create_kick_sysfs(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ if (!rproc_allow_sysfs_kick(rproc))
+ return -EINVAL;
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_kick.attr);
+ if (ret) {
+ dev_err(dev, "failed to create sysfs for kick.\n");
+ return ret;
+ }
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_remote_kick.attr);
+ if (ret) {
+ dev_err(dev, "failed to create sysfs for remote kick.\n");
+ return ret;
+ }
+ ret = sysfs_create_file(&dev->kobj,
+ &dev_attr_remote_pending_message.attr);
+ if (ret)
+ dev_err(dev,
+ "failed to create sysfs for remote pending message.\n");
+ return ret;
+}
+EXPORT_SYMBOL(rproc_create_kick_sysfs);
+
int __init rproc_init_sysfs(void)
{
/* create remoteproc device class for sysfs */
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index ee13d23b43a9..2ed8f96efe8e 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -129,6 +129,7 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
while (of_phandle_iterator_next(&it) == 0) {
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
+ of_node_put(it.node);
dev_err(dev, "unable to acquire memory-region\n");
return -EINVAL;
}
@@ -150,8 +151,10 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
it.node->name);
}
- if (!mem)
+ if (!mem) {
+ of_node_put(it.node);
return -ENOMEM;
+ }
rproc_add_carveout(rproc, mem);
index++;
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 2cf4b2992bfc..041dff62ddff 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -211,11 +211,13 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
while (of_phandle_iterator_next(&it) == 0) {
rmem = of_reserved_mem_lookup(it.node);
if (!rmem) {
+ of_node_put(it.node);
dev_err(dev, "unable to acquire memory-region\n");
return -EINVAL;
}
if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
+ of_node_put(it.node);
dev_err(dev, "memory region not valid %pa\n",
&rmem->base);
return -EINVAL;
@@ -242,8 +244,10 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
it.node->name);
}
- if (!mem)
+ if (!mem) {
+ of_node_put(it.node);
return -ENOMEM;
+ }
rproc_add_carveout(rproc, mem);
index++;
diff --git a/drivers/remoteproc/zynq_remoteproc.c b/drivers/remoteproc/zynq_remoteproc.c
new file mode 100644
index 000000000000..652b858d97e3
--- /dev/null
+++ b/drivers/remoteproc/zynq_remoteproc.c
@@ -0,0 +1,494 @@
+/*
+ * Zynq Remote Processor driver
+ *
+ * Copyright (C) 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012 PetaLogix
+ *
+ * Based on origin OMAP Remote Processor driver
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/remoteproc.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/smp.h>
+#include <linux/irqchip/arm-gic.h>
+#include <asm/outercache.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/genalloc.h>
+#include <../../arch/arm/mach-zynq/common.h>
+
+#include "remoteproc_internal.h"
+
+#define MAX_NUM_VRINGS 2
+#define NOTIFYID_ANY (-1)
+/* Maximum on chip memories used by the driver*/
+#define MAX_ON_CHIP_MEMS 32
+
+/* Structure for storing IRQs */
+struct irq_list {
+ int irq;
+ struct list_head list;
+};
+
+/* Structure for IPIs */
+struct ipi_info {
+ u32 irq;
+ u32 notifyid;
+ bool pending;
+};
+
+/**
+ * struct zynq_mem_res - zynq memory resource for firmware memory
+ * @res: memory resource
+ * @node: list node
+ */
+struct zynq_mem_res {
+ struct resource res;
+ struct list_head node;
+};
+
+/**
+ * struct zynq_rproc_data - zynq rproc private data
+ * @irqs: inter processor soft IRQs
+ * @rproc: pointer to remoteproc instance
+ * @ipis: interrupt processor interrupts statistics
+ * @fw_mems: list of firmware memories
+ */
+struct zynq_rproc_pdata {
+ struct irq_list irqs;
+ struct rproc *rproc;
+ struct ipi_info ipis[MAX_NUM_VRINGS];
+ struct list_head fw_mems;
+};
+
+static bool autoboot __read_mostly;
+
+/* Store rproc for IPI handler */
+static struct rproc *rproc;
+static struct work_struct workqueue;
+
+static void handle_event(struct work_struct *work)
+{
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ if (rproc_vq_interrupt(local->rproc, local->ipis[0].notifyid) ==
+ IRQ_NONE)
+ dev_dbg(rproc->dev.parent, "no message found in vqid 0\n");
+}
+
+static void ipi_kick(void)
+{
+ dev_dbg(rproc->dev.parent, "KICK Linux because of pending message\n");
+ schedule_work(&workqueue);
+}
+
+static void kick_pending_ipi(struct rproc *rproc)
+{
+ struct zynq_rproc_pdata *local = rproc->priv;
+ int i;
+
+ for (i = 0; i < MAX_NUM_VRINGS; i++) {
+
+ /* Send swirq to firmware */
+ if (local->ipis[i].pending == true) {
+ gic_raise_softirq(cpumask_of(1),
+ local->ipis[i].irq);
+ local->ipis[i].pending = false;
+ }
+ }
+}
+
+static int zynq_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+ INIT_WORK(&workqueue, handle_event);
+
+ ret = cpu_down(1);
+ /* EBUSY means CPU is already released */
+ if (ret && (ret != -EBUSY)) {
+ dev_err(dev, "Can't release cpu1\n");
+ return ret;
+ }
+
+ ret = zynq_cpun_start(rproc->bootaddr, 1);
+ /* Trigger pending kicks */
+ kick_pending_ipi(rproc);
+
+ return ret;
+}
+
+/* kick a firmware */
+static void zynq_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynq_rproc_pdata *local = rproc->priv;
+ struct rproc_vdev *rvdev, *rvtmp;
+ struct fw_rsc_vdev *rsc;
+ int i;
+
+ dev_dbg(dev, "KICK Firmware to start send messages vqid %d\n", vqid);
+
+ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) {
+ rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
+ for (i = 0; i < MAX_NUM_VRINGS; i++) {
+ struct rproc_vring *rvring = &rvdev->vring[i];
+
+ /* Send swirq to firmware */
+ if (rvring->notifyid == vqid) {
+ local->ipis[i].notifyid = vqid;
+ /* As we do not turn off CPU1 until start,
+ * we delay firmware kick
+ */
+ if (rproc->state == RPROC_RUNNING)
+ gic_raise_softirq(cpumask_of(1),
+ local->ipis[i].irq);
+ else
+ local->ipis[i].pending = true;
+ }
+ }
+
+ }
+}
+
+/* power off the remote processor */
+static int zynq_rproc_stop(struct rproc *rproc)
+{
+ int ret;
+ struct device *dev = rproc->dev.parent;
+
+ dev_dbg(rproc->dev.parent, "%s\n", __func__);
+
+ /* Cpu can't be power on - for example in nosmp mode */
+ ret = cpu_up(1);
+ if (ret)
+ dev_err(dev, "Can't power on cpu1 %d\n", ret);
+
+ return 0;
+}
+
+static void *zynq_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct zynq_rproc_pdata *local = rproc->priv;
+ struct zynq_mem_res *mem_res;
+ struct device *dev;
+
+ dev = rproc->dev.parent;
+ list_for_each_entry(mem_res, &local->fw_mems, node) {
+ /* This function will only get called if
+ * the reserved firmware memory is not yet
+ * added to the carved out list.
+ */
+ struct rproc_mem_entry *mem;
+ struct resource *res = &mem_res->res;
+ u64 res_da = (u64)res->start;
+ resource_size_t size;
+ int offset;
+ void *va;
+ dma_addr_t dma;
+
+ size = resource_size(res);
+ offset = (int)(da - res_da);
+ if (offset < 0)
+ continue;
+ if (da > (res_da + size))
+ continue;
+ va = devm_ioremap_wc(dev, res->start, size);
+ dma = (dma_addr_t)res->start;
+ mem = rproc_mem_entry_init(dev, va, dma, (int)size,
+ (u32)res->start,
+ NULL, NULL, res->name);
+ if (!mem)
+ return NULL;
+ dev_dbg(dev, "%s: %s, va = %p, da = 0x%x dma = 0x%x\n",
+ __func__, mem->name, mem->va,
+ mem->da, mem->dma);
+ rproc_add_carveout(rproc, mem);
+ return (char *)va + offset;
+ }
+ return NULL;
+}
+
+static struct rproc_ops zynq_rproc_ops = {
+ .start = zynq_rproc_start,
+ .stop = zynq_rproc_stop,
+ .kick = zynq_rproc_kick,
+ .da_to_va = zynq_rproc_da_to_va,
+};
+
+/* Just to detect bug if interrupt forwarding is broken */
+static irqreturn_t zynq_remoteproc_interrupt(int irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+
+ dev_err(dev, "GIC IRQ %d is not forwarded correctly\n", irq);
+
+ /*
+ * MS: Calling this function doesn't need to be BUG
+ * especially for cases where firmware doesn't disable
+ * interrupts. In next probing can be som interrupts pending.
+ * The next scenario is for cases when you want to monitor
+ * non frequent interrupt through Linux kernel. Interrupt happen
+ * and it is forwarded to Linux which update own statistic
+ * in (/proc/interrupt) and forward it to firmware.
+ *
+ * gic_set_cpu(1, irq); - setup cpu1 as destination cpu
+ * gic_raise_softirq(cpumask_of(1), irq); - forward irq to firmware
+ */
+
+ gic_set_cpu(1, irq);
+ return IRQ_HANDLED;
+}
+
+static void clear_irq(struct rproc *rproc)
+{
+ struct list_head *pos, *q;
+ struct irq_list *tmp;
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ dev_info(rproc->dev.parent, "Deleting the irq_list\n");
+ list_for_each_safe(pos, q, &local->irqs.list) {
+ tmp = list_entry(pos, struct irq_list, list);
+ free_irq(tmp->irq, rproc->dev.parent);
+ gic_set_cpu(0, tmp->irq);
+ list_del(pos);
+ kfree(tmp);
+ }
+}
+
+static int zynq_rproc_add_mems(struct zynq_rproc_pdata *pdata)
+{
+ int num_mems, i;
+ struct device *dev = pdata->rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+
+ INIT_LIST_HEAD(&pdata->fw_mems);
+ num_mems = of_count_phandle_with_args(np, "memory-region", NULL);
+ if (num_mems <= 0)
+ return 0;
+ for (i = 0; i < num_mems; i++) {
+ struct device_node *node;
+ struct zynq_mem_res *mem_res;
+ int ret;
+
+ node = of_parse_phandle(np, "memory-region", i);
+ ret = of_device_is_compatible(node, "shared-dma-pool");
+ if (ret) {
+ /* it is DMA memory. */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, i);
+ if (ret) {
+ dev_err(dev, "unable to reserve DMA mem.\n");
+ return ret;
+ }
+ dev_dbg(dev, "%s, dma memory %s.\n",
+ __func__, of_node_full_name(node));
+ continue;
+ }
+ /*
+ * It is non-DMA memory, used for firmware loading.
+ * It will be added to the remoteproc carveouts later while
+ * loading the firmware.
+ * It needs to keep track of the memory resource here and
+ * add it to carveouts when loading firmware because
+ * the carveouts will be removed when rproc stops the processor.
+ */
+ mem_res = devm_kzalloc(dev, sizeof(*mem_res), GFP_KERNEL);
+ if (!mem_res)
+ return -ENOMEM;
+ ret = of_address_to_resource(node, 0, &mem_res->res);
+ if (ret) {
+ dev_err(dev, "unable to resolve memory region.\n");
+ return ret;
+ }
+ list_add_tail(&mem_res->node, &pdata->fw_mems);
+ dev_dbg(dev, "%s, non-dma mem %s\n",
+ __func__, of_node_full_name(node));
+ }
+ return 0;
+}
+
+static int zynq_remoteproc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct irq_list *tmp;
+ int count = 0;
+ struct zynq_rproc_pdata *local;
+
+ rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev),
+ &zynq_rproc_ops, NULL,
+ sizeof(struct zynq_rproc_pdata));
+ if (!rproc) {
+ dev_err(&pdev->dev, "rproc allocation failed\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ local = rproc->priv;
+ local->rproc = rproc;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
+ goto dma_mask_fault;
+ }
+
+ /* Init list for IRQs - it can be long list */
+ INIT_LIST_HEAD(&local->irqs.list);
+
+ /* Alloc IRQ based on DTS to be sure that no other driver will use it */
+ while (1) {
+ int irq;
+
+ irq = platform_get_irq(pdev, count++);
+ if (irq == -ENXIO || irq == -EINVAL)
+ break;
+
+ tmp = kzalloc(sizeof(struct irq_list), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto irq_fault;
+ }
+
+ tmp->irq = irq;
+
+ dev_dbg(&pdev->dev, "%d: Alloc irq: %d\n", count, tmp->irq);
+
+ /* Allocating shared IRQs will ensure that any module will
+ * use these IRQs
+ */
+ ret = request_irq(tmp->irq, zynq_remoteproc_interrupt, 0,
+ dev_name(&pdev->dev), &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "IRQ %d already allocated\n",
+ tmp->irq);
+ goto irq_fault;
+ }
+
+ /*
+ * MS: Here is place for detecting problem with firmware
+ * which doesn't work correctly with interrupts
+ *
+ * MS: Comment if you want to count IRQs on Linux
+ */
+ gic_set_cpu(1, tmp->irq);
+ list_add(&(tmp->list), &(local->irqs.list));
+ }
+
+ /* Allocate free IPI number */
+ /* Read vring0 ipi number */
+ ret = of_property_read_u32(pdev->dev.of_node, "vring0",
+ &local->ipis[0].irq);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ goto irq_fault;
+ }
+
+ ret = set_ipi_handler(local->ipis[0].irq, ipi_kick,
+ "Firmware kick");
+ if (ret) {
+ dev_err(&pdev->dev, "IPI handler already registered\n");
+ goto irq_fault;
+ }
+
+ /* Read vring1 ipi number */
+ ret = of_property_read_u32(pdev->dev.of_node, "vring1",
+ &local->ipis[1].irq);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ goto ipi_fault;
+ }
+
+ /* Find on-chip memory */
+ ret = zynq_rproc_add_mems(local);
+ if (ret) {
+ dev_err(&pdev->dev, "rproc failed to add mems\n");
+ goto ipi_fault;
+ }
+
+ rproc->auto_boot = autoboot;
+
+ ret = rproc_add(local->rproc);
+ if (ret) {
+ dev_err(&pdev->dev, "rproc registration failed\n");
+ goto ipi_fault;
+ }
+
+ return 0;
+
+ipi_fault:
+ clear_ipi_handler(local->ipis[0].irq);
+
+irq_fault:
+ clear_irq(rproc);
+
+dma_mask_fault:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int zynq_remoteproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ rproc_del(rproc);
+
+ clear_ipi_handler(local->ipis[0].irq);
+ clear_irq(rproc);
+
+ of_reserved_mem_device_release(&pdev->dev);
+ rproc_free(rproc);
+
+ return 0;
+}
+
+/* Match table for OF platform binding */
+static const struct of_device_id zynq_remoteproc_match[] = {
+ { .compatible = "xlnx,zynq_remoteproc", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, zynq_remoteproc_match);
+
+static struct platform_driver zynq_remoteproc_driver = {
+ .probe = zynq_remoteproc_probe,
+ .remove = zynq_remoteproc_remove,
+ .driver = {
+ .name = "zynq_remoteproc",
+ .of_match_table = zynq_remoteproc_match,
+ },
+};
+module_platform_driver(zynq_remoteproc_driver);
+
+module_param_named(autoboot, autoboot, bool, 0444);
+MODULE_PARM_DESC(autoboot,
+ "enable | disable autoboot. (default: false)");
+
+MODULE_AUTHOR("Michal Simek <monstr@monstr.eu");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Zynq remote processor control driver");
diff --git a/drivers/remoteproc/zynqmp_r5_remoteproc.c b/drivers/remoteproc/zynqmp_r5_remoteproc.c
new file mode 100644
index 000000000000..f1b98903a175
--- /dev/null
+++ b/drivers/remoteproc/zynqmp_r5_remoteproc.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Zynq R5 Remote Processor driver
+ *
+ * Copyright (C) 2015 - 2018 Xilinx Inc.
+ * Copyright (C) 2015 Jason Wu <j.wu@xilinx.com>
+ *
+ * Based on origin OMAP and Zynq Remote Processor driver
+ *
+ * Copyright (C) 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012 PetaLogix
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#include <linux/atomic.h>
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/genalloc.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pfn.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "remoteproc_internal.h"
+
+#define MAX_RPROCS 2 /* Support up to 2 RPU */
+#define MAX_MEM_PNODES 4 /* Max power nodes for one RPU memory instance */
+
+#define DEFAULT_FIRMWARE_NAME "rproc-rpu-fw"
+
+/* PM proc states */
+#define PM_PROC_STATE_ACTIVE 1U
+
+/* RX mailbox client buffer max length */
+#define RX_MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
+ sizeof(struct zynqmp_ipi_message))
+
+static bool autoboot __read_mostly;
+static bool allow_sysfs_kick __read_mostly;
+
+struct zynqmp_rpu_domain_pdata;
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+/**
+ * struct zynqmp_r5_mem - zynqmp rpu memory data
+ * @pnode_id: TCM power domain ids
+ * @res: memory resource
+ * @node: list node
+ */
+struct zynqmp_r5_mem {
+ u32 pnode_id[MAX_MEM_PNODES];
+ struct resource res;
+ struct list_head node;
+};
+
+/**
+ * struct zynqmp_r5_pdata - zynqmp rpu remote processor private data
+ * @dev: device of RPU instance
+ * @rproc: rproc handle
+ * @parent: RPU slot platform data
+ * @pnode_id: RPU CPU power domain id
+ * @mems: memory resources
+ * @is_r5_mode_set: indicate if r5 operation mode is set
+ * @tx_mc: tx mailbox client
+ * @rx_mc: rx mailbox client
+ * @tx_chan: tx mailbox channel
+ * @rx_chan: rx mailbox channel
+ * @workqueue: workqueue for the RPU remoteproc
+ * @tx_mc_skbs: socket buffers for tx mailbox client
+ * @rx_mc_buf: rx mailbox client buffer to save the rx message
+ * @remote_kick: flag to indicate if there is a kick from remote
+ */
+struct zynqmp_r5_pdata {
+ struct device dev;
+ struct rproc *rproc;
+ struct zynqmp_rpu_domain_pdata *parent;
+ u32 pnode_id;
+ struct list_head mems;
+ bool is_r5_mode_set;
+ struct mbox_client tx_mc;
+ struct mbox_client rx_mc;
+ struct mbox_chan *tx_chan;
+ struct mbox_chan *rx_chan;
+ struct work_struct workqueue;
+ struct sk_buff_head tx_mc_skbs;
+ unsigned char rx_mc_buf[RX_MBOX_CLIENT_BUF_MAX];
+ atomic_t remote_kick;
+};
+
+/**
+ * struct zynqmp_rpu_domain_pdata - zynqmp rpu platform data
+ * @rpus: table of RPUs
+ * @rpu_mode: RPU core configuration
+ */
+struct zynqmp_rpu_domain_pdata {
+ struct zynqmp_r5_pdata rpus[MAX_RPROCS];
+ enum rpu_oper_mode rpu_mode;
+};
+
+/*
+ * r5_set_mode - set RPU operation mode
+ * @pdata: Remote processor private data
+ *
+ * set RPU oepration mode
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int r5_set_mode(struct zynqmp_r5_pdata *pdata)
+{
+ u32 val[PAYLOAD_ARG_CNT] = {0}, expect;
+ struct zynqmp_rpu_domain_pdata *parent;
+ struct device *dev = &pdata->dev;
+ int ret;
+
+ if (pdata->is_r5_mode_set)
+ return 0;
+ parent = pdata->parent;
+ expect = (u32)parent->rpu_mode;
+ ret = eemi_ops->ioctl(pdata->pnode_id, IOCTL_GET_RPU_OPER_MODE,
+ 0, 0, val);
+ if (ret < 0) {
+ dev_err(dev, "failed to get RPU oper mode.\n");
+ return ret;
+ }
+ if (val[0] == expect) {
+ dev_dbg(dev, "RPU mode matches: %x\n", val[0]);
+ } else {
+ ret = eemi_ops->ioctl(pdata->pnode_id,
+ IOCTL_SET_RPU_OPER_MODE,
+ expect, 0, val);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to set RPU oper mode.\n");
+ return ret;
+ }
+ }
+ if (expect == (u32)PM_RPU_MODE_LOCKSTEP)
+ expect = (u32)PM_RPU_TCM_COMB;
+ else
+ expect = (u32)PM_RPU_TCM_SPLIT;
+ ret = eemi_ops->ioctl(pdata->pnode_id, IOCTL_TCM_COMB_CONFIG,
+ expect, 0, val);
+ if (ret < 0) {
+ dev_err(dev, "failed to config TCM to %x.\n",
+ expect);
+ return ret;
+ }
+ pdata->is_r5_mode_set = true;
+ return 0;
+}
+
+/**
+ * r5_is_running - check if r5 is running
+ * @pdata: Remote processor private data
+ *
+ * check if R5 is running
+ *
+ * Return: true if r5 is running, false otherwise
+ */
+static bool r5_is_running(struct zynqmp_r5_pdata *pdata)
+{
+ u32 status, requirements, usage;
+ struct device *dev = &pdata->dev;
+
+ if (eemi_ops->get_node_status(pdata->pnode_id,
+ &status, &requirements, &usage)) {
+ dev_err(dev, "Failed to get RPU node %d status.\n",
+ pdata->pnode_id);
+ return false;
+ } else if (status != PM_PROC_STATE_ACTIVE) {
+ dev_dbg(dev, "RPU is not running.\n");
+ return false;
+ }
+
+ dev_dbg(dev, "RPU is running.\n");
+ return true;
+}
+
+/**
+ * r5_request_mem - request RPU memory
+ * @rproc: pointer to remoteproc instance
+ * @mem: pointer to RPU memory
+ *
+ * Request RPU memory resource to make it accessible by the kernel.
+ *
+ * Return: 0 if success, negative value for failure.
+ */
+static int r5_request_mem(struct rproc *rproc, struct zynqmp_r5_mem *mem)
+{
+ int i, ret;
+ struct device *dev = &rproc->dev;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ for (i = 0; i < MAX_MEM_PNODES; i++) {
+ if (mem->pnode_id[i]) {
+ ret = eemi_ops->request_node(mem->pnode_id[i],
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING
+ );
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to request power node: %u\n",
+ mem->pnode_id[i]);
+ return ret;
+ }
+ } else {
+ break;
+ }
+ }
+
+ ret = r5_set_mode(local);
+ if (ret < 0) {
+ dev_err(dev, "failed to set R5 operation mode.\n");
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * ZynqMP R5 remoteproc memory release function
+ */
+static int zynqmp_r5_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct zynqmp_r5_mem *priv;
+ int i, ret;
+ struct device *dev = &rproc->dev;
+
+ priv = mem->priv;
+ if (!priv)
+ return 0;
+ for (i = 0; i < MAX_MEM_PNODES; i++) {
+ if (priv->pnode_id[i]) {
+ dev_dbg(dev, "%s, pnode %d\n",
+ __func__, priv->pnode_id[i]);
+ ret = eemi_ops->release_node(priv->pnode_id[i]);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to release power node: %u\n",
+ priv->pnode_id[i]);
+ return ret;
+ }
+ } else {
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * ZynqMP R5 remoteproc operations
+ */
+static int zynqmp_r5_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+ enum rpu_boot_mem bootmem;
+ int ret;
+
+ /* Set up R5 */
+ ret = r5_set_mode(local);
+ if (ret) {
+ dev_err(dev, "failed to set R5 operation mode.\n");
+ return ret;
+ }
+ if ((rproc->bootaddr & 0xF0000000) == 0xF0000000)
+ bootmem = PM_RPU_BOOTMEM_HIVEC;
+ else
+ bootmem = PM_RPU_BOOTMEM_LOVEC;
+ dev_info(dev, "RPU boot from %s.",
+ bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
+
+ ret = eemi_ops->request_wakeup(local->pnode_id, 1, bootmem,
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ if (ret < 0) {
+ dev_err(dev, "failed to boot R5.\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int zynqmp_r5_rproc_stop(struct rproc *rproc)
+{
+ struct zynqmp_r5_pdata *local = rproc->priv;
+ int ret;
+
+ ret = eemi_ops->force_powerdown(local->pnode_id,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0) {
+ dev_err(&local->dev, "failed to shutdown R5.\n");
+ return ret;
+ }
+ local->is_r5_mode_set = false;
+ return 0;
+}
+
+static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret == -EINVAL)
+ ret = 0;
+ return ret;
+}
+
+static void *zynqmp_r5_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct zynqmp_r5_pdata *local = rproc->priv;
+ struct zynqmp_r5_mem *mem;
+ struct device *dev;
+
+ dev = &local->dev;
+ list_for_each_entry(mem, &local->mems, node) {
+ struct rproc_mem_entry *rproc_mem;
+ struct resource *res = &mem->res;
+ u64 res_da = (u64)res->start;
+ resource_size_t size;
+ int offset, ret;
+ void *va;
+ dma_addr_t dma;
+
+ if ((res_da & 0xfff00000) == 0xffe00000) {
+ res_da &= 0x000fffff;
+ if (res_da & 0x80000)
+ res_da -= 0x90000;
+ }
+
+ offset = (int)(da - res_da);
+ if (offset < 0)
+ continue;
+ size = resource_size(res);
+ if (offset + len > (int)size)
+ continue;
+
+ ret = r5_request_mem(rproc, mem);
+ if (ret < 0) {
+ dev_err(dev, "failed to request memory %pad.\n",
+ &res->start);
+ return NULL;
+ }
+
+ va = devm_ioremap_wc(dev, res->start, size);
+ dma = (dma_addr_t)res->start;
+ da = (u32)res_da;
+ rproc_mem = rproc_mem_entry_init(dev, va, dma, (int)size, da,
+ NULL, zynqmp_r5_mem_release,
+ res->name);
+ if (!rproc_mem)
+ return NULL;
+ rproc_mem->priv = mem;
+ dev_dbg(dev, "%s: %s, va = %p, da = 0x%x dma = 0x%llx\n",
+ __func__, rproc_mem->name, rproc_mem->va,
+ rproc_mem->da, rproc_mem->dma);
+ rproc_add_carveout(rproc, rproc_mem);
+ return (char *)va + offset;
+ }
+ return NULL;
+}
+
+/* kick a firmware */
+static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "KICK Firmware to start send messages vqid %d\n", vqid);
+
+ if (vqid < 0) {
+ /* If vqid is negative, does not pass the vqid to
+ * mailbox. As vqid is supposed to be 0 or possive.
+ * It also gives a way to just kick instead but
+ * not use the IPI buffer. It is better to provide
+ * a proper way to pass the short message, which will
+ * need to sync to upstream first, for now,
+ * use negative vqid to assume no message will be
+ * passed with IPI buffer, but just raise interrupt.
+ * This will be faster as it doesn't need to copy the
+ * message to the IPI buffer.
+ *
+ * It will ignore the return, as failure is due to
+ * there already kicks in the mailbox queue.
+ */
+ (void)mbox_send_message(local->tx_chan, NULL);
+ } else {
+ struct sk_buff *skb;
+ unsigned int skb_len;
+ struct zynqmp_ipi_message *mb_msg;
+ int ret;
+
+ skb_len = (unsigned int)(sizeof(vqid) + sizeof(mb_msg));
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(dev,
+ "Failed to allocate skb to kick remote.\n");
+ return;
+ }
+ mb_msg = (struct zynqmp_ipi_message *)skb_put(skb, skb_len);
+ mb_msg->len = sizeof(vqid);
+ memcpy(mb_msg->data, &vqid, sizeof(vqid));
+ skb_queue_tail(&local->tx_mc_skbs, skb);
+ ret = mbox_send_message(local->tx_chan, mb_msg);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to kick remote.\n");
+ skb_dequeue_tail(&local->tx_mc_skbs);
+ kfree_skb(skb);
+ }
+ }
+}
+
+static bool zynqmp_r5_rproc_peek_remote_kick(struct rproc *rproc,
+ char *buf, size_t *len)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "Peek if remote has kicked\n");
+
+ if (atomic_read(&local->remote_kick) != 0) {
+ if (buf && len) {
+ struct zynqmp_ipi_message *msg;
+
+ msg = (struct zynqmp_ipi_message *)local->rx_mc_buf;
+ memcpy(buf, msg->data, msg->len);
+ *len = (size_t)msg->len;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void zynqmp_r5_rproc_ack_remote_kick(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "Ack remote\n");
+
+ atomic_set(&local->remote_kick, 0);
+ (void)mbox_send_message(local->rx_chan, NULL);
+}
+
+static struct rproc_ops zynqmp_r5_rproc_ops = {
+ .start = zynqmp_r5_rproc_start,
+ .stop = zynqmp_r5_rproc_stop,
+ .load = rproc_elf_load_segments,
+ .parse_fw = zynqmp_r5_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .da_to_va = zynqmp_r5_da_to_va,
+ .kick = zynqmp_r5_rproc_kick,
+ .peek_remote_kick = zynqmp_r5_rproc_peek_remote_kick,
+ .ack_remote_kick = zynqmp_r5_rproc_ack_remote_kick,
+};
+
+/* zynqmp_r5_get_reserved_mems() - get reserved memories
+ * @pdata: pointer to the RPU remoteproc private data
+ *
+ * Function to retrieve the memories resources from memory-region
+ * property.
+ */
+static int zynqmp_r5_get_reserved_mems(struct zynqmp_r5_pdata *pdata)
+{
+ struct device *dev = &pdata->dev;
+ struct device_node *np = dev->of_node;
+ int num_mems;
+ int i;
+
+ num_mems = of_count_phandle_with_args(np, "memory-region", NULL);
+ if (num_mems <= 0)
+ return 0;
+ for (i = 0; i < num_mems; i++) {
+ struct device_node *node;
+ struct zynqmp_r5_mem *mem;
+ int ret;
+
+ node = of_parse_phandle(np, "memory-region", i);
+ ret = of_device_is_compatible(node, "shared-dma-pool");
+ if (ret) {
+ /* it is DMA memory. */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, i);
+ if (ret) {
+ dev_err(dev, "unable to reserve DMA mem.\n");
+ return ret;
+ }
+ dev_dbg(dev, "%s, dma memory %s.\n",
+ __func__, of_node_full_name(node));
+ continue;
+ }
+ /*
+ * It is non-DMA memory, used for firmware loading.
+ * It will be added to the R5 remoteproc mappings later.
+ */
+ mem = devm_kzalloc(dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ ret = of_address_to_resource(node, 0, &mem->res);
+ if (ret) {
+ dev_err(dev, "unable to resolve memory region.\n");
+ return ret;
+ }
+ list_add_tail(&mem->node, &pdata->mems);
+ dev_dbg(dev, "%s, non-dma mem %s\n",
+ __func__, of_node_full_name(node));
+ }
+ return 0;
+}
+
+/* zynqmp_r5_mem_probe() - probes RPU TCM memory device
+ * @pdata: pointer to the RPU remoteproc private data
+ * @node: pointer to the memory node
+ *
+ * Function to retrieve memories resources for RPU TCM memory device.
+ */
+static int zynqmp_r5_mem_probe(struct zynqmp_r5_pdata *pdata,
+ struct device_node *node)
+{
+ struct device *dev;
+ struct zynqmp_r5_mem *mem;
+ int ret;
+
+ dev = &pdata->dev;
+ mem = devm_kzalloc(dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ ret = of_address_to_resource(node, 0, &mem->res);
+ if (ret < 0) {
+ dev_err(dev, "failed to get resource of memory %s",
+ of_node_full_name(node));
+ return -EINVAL;
+ }
+
+ /* Get the power domain id */
+ if (of_find_property(node, "pnode-id", NULL)) {
+ struct property *prop;
+ const __be32 *cur;
+ u32 val;
+ int i = 0;
+
+ of_property_for_each_u32(node, "pnode-id", prop, cur, val)
+ mem->pnode_id[i++] = val;
+ }
+ list_add_tail(&mem->node, &pdata->mems);
+ return 0;
+}
+
+/**
+ * zynqmp_r5_release() - ZynqMP R5 device release function
+ * @dev: pointer to the device struct of ZynqMP R5
+ *
+ * Function to release ZynqMP R5 device.
+ */
+static void zynqmp_r5_release(struct device *dev)
+{
+ struct zynqmp_r5_pdata *pdata;
+ struct rproc *rproc;
+ struct sk_buff *skb;
+
+ pdata = dev_get_drvdata(dev);
+ rproc = pdata->rproc;
+ if (rproc) {
+ rproc_del(rproc);
+ rproc_free(rproc);
+ }
+ if (pdata->tx_chan)
+ mbox_free_channel(pdata->tx_chan);
+ if (pdata->rx_chan)
+ mbox_free_channel(pdata->rx_chan);
+ /* Discard all SKBs */
+ while (!skb_queue_empty(&pdata->tx_mc_skbs)) {
+ skb = skb_dequeue(&pdata->tx_mc_skbs);
+ kfree_skb(skb);
+ }
+
+ put_device(dev->parent);
+}
+
+/**
+ * event_notified_idr_cb() - event notified idr callback
+ * @id: idr id
+ * @ptr: pointer to idr private data
+ * @data: data passed to idr_for_each callback
+ *
+ * Pass notification to remtoeproc virtio
+ *
+ * Return: 0. having return is to satisfy the idr_for_each() function
+ * pointer input argument requirement.
+ **/
+static int event_notified_idr_cb(int id, void *ptr, void *data)
+{
+ struct rproc *rproc = data;
+
+ (void)rproc_vq_interrupt(rproc, id);
+ return 0;
+}
+
+/**
+ * handle_event_notified() - remoteproc notification work funciton
+ * @work: pointer to the work structure
+ *
+ * It checks each registered remoteproc notify IDs.
+ */
+static void handle_event_notified(struct work_struct *work)
+{
+ struct rproc *rproc;
+ struct zynqmp_r5_pdata *local;
+
+ local = container_of(work, struct zynqmp_r5_pdata, workqueue);
+
+ (void)mbox_send_message(local->rx_chan, NULL);
+ rproc = local->rproc;
+ if (rproc->sysfs_kick) {
+ sysfs_notify(&rproc->dev.kobj, NULL, "remote_kick");
+ return;
+ }
+ /*
+ * We only use IPI for interrupt. The firmware side may or may
+ * not write the notifyid when it trigger IPI.
+ * And thus, we scan through all the registered notifyids.
+ */
+ idr_for_each(&rproc->notifyids, event_notified_idr_cb, rproc);
+}
+
+/**
+ * zynqmp_r5_mb_rx_cb() - Receive channel mailbox callback
+ * @cl: mailbox client
+ * @mssg: message pointer
+ *
+ * It will schedule the R5 notification work.
+ */
+static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *mssg)
+{
+ struct zynqmp_r5_pdata *local;
+
+ local = container_of(cl, struct zynqmp_r5_pdata, rx_mc);
+ if (mssg) {
+ struct zynqmp_ipi_message *ipi_msg, *buf_msg;
+ size_t len;
+
+ ipi_msg = (struct zynqmp_ipi_message *)mssg;
+ buf_msg = (struct zynqmp_ipi_message *)local->rx_mc_buf;
+ len = (ipi_msg->len >= IPI_BUF_LEN_MAX) ?
+ IPI_BUF_LEN_MAX : ipi_msg->len;
+ buf_msg->len = len;
+ memcpy(buf_msg->data, ipi_msg->data, len);
+ }
+ atomic_set(&local->remote_kick, 1);
+ schedule_work(&local->workqueue);
+}
+
+/**
+ * zynqmp_r5_mb_tx_done() - Request has been sent to the remote
+ * @cl: mailbox client
+ * @mssg: pointer to the message which has been sent
+ * @r: status of last TX - OK or error
+ *
+ * It will be called by the mailbox framework when the last TX has done.
+ */
+static void zynqmp_r5_mb_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct zynqmp_r5_pdata *local;
+ struct sk_buff *skb;
+
+ if (!mssg)
+ return;
+ local = container_of(cl, struct zynqmp_r5_pdata, tx_mc);
+ skb = skb_dequeue(&local->tx_mc_skbs);
+ kfree_skb(skb);
+}
+
+/**
+ * zynqmp_r5_setup_mbox() - Setup mailboxes
+ *
+ * @pdata: pointer to the ZynqMP R5 processor platform data
+ * @node: pointer of the device node
+ *
+ * Function to setup mailboxes to talk to RPU.
+ *
+ * Return: 0 for success, negative value for failure.
+ */
+static int zynqmp_r5_setup_mbox(struct zynqmp_r5_pdata *pdata,
+ struct device_node *node)
+{
+ struct device *dev = &pdata->dev;
+ struct mbox_client *mclient;
+
+ /* Setup TX mailbox channel client */
+ mclient = &pdata->tx_mc;
+ mclient->dev = dev;
+ mclient->rx_callback = NULL;
+ mclient->tx_block = false;
+ mclient->knows_txdone = false;
+ mclient->tx_done = zynqmp_r5_mb_tx_done;
+
+ /* Setup TX mailbox channel client */
+ mclient = &pdata->rx_mc;
+ mclient->dev = dev;
+ mclient->rx_callback = zynqmp_r5_mb_rx_cb;
+ mclient->tx_block = false;
+ mclient->knows_txdone = false;
+
+ INIT_WORK(&pdata->workqueue, handle_event_notified);
+
+ atomic_set(&pdata->remote_kick, 0);
+ /* Request TX and RX channels */
+ pdata->tx_chan = mbox_request_channel_byname(&pdata->tx_mc, "tx");
+ if (IS_ERR(pdata->tx_chan)) {
+ dev_err(dev, "failed to request mbox tx channel.\n");
+ pdata->tx_chan = NULL;
+ return -EINVAL;
+ }
+ pdata->rx_chan = mbox_request_channel_byname(&pdata->rx_mc, "rx");
+ if (IS_ERR(pdata->rx_chan)) {
+ dev_err(dev, "failed to request mbox rx channel.\n");
+ pdata->rx_chan = NULL;
+ return -EINVAL;
+ }
+ skb_queue_head_init(&pdata->tx_mc_skbs);
+ return 0;
+}
+
+/**
+ * zynqmp_r5_probe() - Probes ZynqMP R5 processor device node
+ * @pdata: pointer to the ZynqMP R5 processor platform data
+ * @pdev: parent RPU domain platform device
+ * @node: pointer of the device node
+ *
+ * Function to retrieve the information of the ZynqMP R5 device node.
+ *
+ * Return: 0 for success, negative value for failure.
+ */
+static int zynqmp_r5_probe(struct zynqmp_r5_pdata *pdata,
+ struct platform_device *pdev,
+ struct device_node *node)
+{
+ struct device *dev = &pdata->dev;
+ struct rproc *rproc;
+ struct device_node *nc;
+ int ret;
+
+ /* Create device for ZynqMP R5 device */
+ dev->parent = &pdev->dev;
+ dev->release = zynqmp_r5_release;
+ dev->of_node = node;
+ dev_set_name(dev, "%s", of_node_full_name(node));
+ dev_set_drvdata(dev, pdata);
+ ret = device_register(dev);
+ if (ret) {
+ dev_err(dev, "failed to register device.\n");
+ return ret;
+ }
+ get_device(&pdev->dev);
+
+ /* Allocate remoteproc instance */
+ rproc = rproc_alloc(dev, dev_name(dev), &zynqmp_r5_rproc_ops, NULL, 0);
+ if (!rproc) {
+ dev_err(dev, "rproc allocation failed.\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ rproc->auto_boot = autoboot;
+ pdata->rproc = rproc;
+ rproc->priv = pdata;
+
+ /*
+ * The device has not been spawned from a device tree, so
+ * arch_setup_dma_ops has not been not called, thus leaving
+ * the device with dummy DMA ops.
+ * Fix this by inheriting the parent's DMA ops and mask.
+ */
+ rproc->dev.dma_mask = pdev->dev.dma_mask;
+ set_dma_ops(&rproc->dev, get_dma_ops(&pdev->dev));
+
+ /* Probe R5 memory devices */
+ INIT_LIST_HEAD(&pdata->mems);
+ for_each_available_child_of_node(node, nc) {
+ ret = zynqmp_r5_mem_probe(pdata, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe memory %s.\n",
+ of_node_full_name(nc));
+ goto error;
+ }
+ }
+
+ /* Probe reserved system memories used by R5 */
+ ret = zynqmp_r5_get_reserved_mems(pdata);
+ if (ret) {
+ dev_err(dev, "failed to get reserved memory.\n");
+ goto error;
+ }
+
+ /* Set up DMA mask */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "dma_set_coherent_mask failed: %d\n", ret);
+ /* If DMA is not configured yet, try to configure it. */
+ ret = of_dma_configure(dev, node, true);
+ if (ret) {
+ dev_err(dev, "failed to configure DMA.\n");
+ goto error;
+ }
+ }
+
+ /* Get R5 power domain node */
+ ret = of_property_read_u32(node, "pnode-id", &pdata->pnode_id);
+ if (ret) {
+ dev_err(dev, "failed to get power node id.\n");
+ goto error;
+ }
+
+ /* Check if R5 is running */
+ if (r5_is_running(pdata)) {
+ atomic_inc(&rproc->power);
+ rproc->state = RPROC_RUNNING;
+ }
+
+ if (!of_get_property(dev->of_node, "mboxes", NULL)) {
+ dev_info(dev, "no mailboxes.\n");
+ } else {
+ ret = zynqmp_r5_setup_mbox(pdata, node);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Add R5 remoteproc */
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc registration failed\n");
+ goto error;
+ }
+
+ if (allow_sysfs_kick) {
+ dev_info(dev, "Trying to create remote sysfs entry.\n");
+ rproc->sysfs_kick = 1;
+ (void)rproc_create_kick_sysfs(rproc);
+ }
+
+ return 0;
+error:
+ if (pdata->rproc)
+ rproc_free(pdata->rproc);
+ pdata->rproc = NULL;
+ device_unregister(dev);
+ put_device(&pdev->dev);
+ return ret;
+}
+
+static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
+{
+ const unsigned char *prop;
+ int ret = 0, i;
+ struct zynqmp_rpu_domain_pdata *local;
+ struct device *dev = &pdev->dev;
+ struct device_node *nc;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ local = devm_kzalloc(dev, sizeof(*local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, local);
+
+ prop = of_get_property(dev->of_node, "core_conf", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "core_conf is not used.\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "RPU core_conf: %s\n", prop);
+ if (!strcmp(prop, "split")) {
+ local->rpu_mode = PM_RPU_MODE_SPLIT;
+ } else if (!strcmp(prop, "lockstep")) {
+ local->rpu_mode = PM_RPU_MODE_LOCKSTEP;
+ } else {
+ dev_err(dev,
+ "Invalid core_conf mode provided - %s , %d\n",
+ prop, (int)local->rpu_mode);
+ return -EINVAL;
+ }
+
+ i = 0;
+ for_each_available_child_of_node(dev->of_node, nc) {
+ local->rpus[i].parent = local;
+ ret = zynqmp_r5_probe(&local->rpus[i], pdev, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe rpu %s.\n",
+ of_node_full_name(nc));
+ return ret;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int zynqmp_r5_remoteproc_remove(struct platform_device *pdev)
+{
+ struct zynqmp_rpu_domain_pdata *local = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < MAX_RPROCS; i++) {
+ struct zynqmp_r5_pdata *rpu = &local->rpus[i];
+ struct rproc *rproc;
+
+ rproc = rpu->rproc;
+ if (rproc) {
+ rproc_del(rproc);
+ rproc_free(rproc);
+ rpu->rproc = NULL;
+ }
+ if (rpu->tx_chan) {
+ mbox_free_channel(rpu->tx_chan);
+ rpu->tx_chan = NULL;
+ }
+ if (rpu->rx_chan) {
+ mbox_free_channel(rpu->rx_chan);
+ rpu->rx_chan = NULL;
+ }
+
+ device_unregister(&rpu->dev);
+ }
+
+ return 0;
+}
+
+/* Match table for OF platform binding */
+static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
+ { .compatible = "xlnx,zynqmp-r5-remoteproc-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
+
+static struct platform_driver zynqmp_r5_remoteproc_driver = {
+ .probe = zynqmp_r5_remoteproc_probe,
+ .remove = zynqmp_r5_remoteproc_remove,
+ .driver = {
+ .name = "zynqmp_r5_remoteproc",
+ .of_match_table = zynqmp_r5_remoteproc_match,
+ },
+};
+module_platform_driver(zynqmp_r5_remoteproc_driver);
+
+module_param_named(autoboot, autoboot, bool, 0444);
+MODULE_PARM_DESC(autoboot,
+ "enable | disable autoboot. (default: true)");
+module_param_named(allow_sysfs_kick, allow_sysfs_kick, bool, 0444);
+MODULE_PARM_DESC(allow_sysfs_kick,
+ "enable | disable allow kick from sysfs. (default: false)");
+
+MODULE_AUTHOR("Jason Wu <j.wu@xilinx.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ZynqMP R5 remote processor control driver");
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index cf60ce526064..949d24eecc6e 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -29,4 +29,3 @@ obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
-
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 688b4f6227fc..57219aa22ee8 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -597,6 +597,9 @@ static void __reset_control_put_internal(struct reset_control *rstc)
{
lockdep_assert_held(&reset_list_mutex);
+ if (IS_ERR_OR_NULL(rstc))
+ return;
+
kref_put(&rstc->refcnt, __reset_control_release);
}
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 24e6d420b26b..84e761f454b6 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -104,7 +104,7 @@ static int hi6220_reset_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev);
+ type = (uintptr_t)of_device_get_match_data(dev);
regmap = syscon_node_to_regmap(np);
if (IS_ERR(regmap)) {
diff --git a/drivers/reset/reset-ti-sci.c b/drivers/reset/reset-ti-sci.c
index bf68729ab729..b799aefad547 100644
--- a/drivers/reset/reset-ti-sci.c
+++ b/drivers/reset/reset-ti-sci.c
@@ -1,7 +1,7 @@
/*
* Texas Instrument's System Control Interface (TI-SCI) reset driver
*
- * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 0f10b3f84705..2dea1e7487bb 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -222,6 +222,10 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
channel->glink = glink;
channel->name = kstrdup(name, GFP_KERNEL);
+ if (!channel->name) {
+ kfree(channel);
+ return ERR_PTR(-ENOMEM);
+ }
init_completion(&channel->open_req);
init_completion(&channel->open_ack);
@@ -929,6 +933,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
spin_unlock_irqrestore(&glink->idr_lock, flags);
if (!channel) {
dev_err(glink->dev, "intents for non-existing channel\n");
+ qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
return;
}
@@ -1374,6 +1379,7 @@ static void qcom_glink_rpdev_release(struct device *dev)
struct glink_channel *channel = to_glink_channel(rpdev->ept);
channel->rpdev = NULL;
+ kfree(rpdev->driver_override);
kfree(rpdev);
}
@@ -1472,7 +1478,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
cancel_work_sync(&channel->intent_work);
if (channel->rpdev) {
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index a4db9f6100d2..b5167ef93abf 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1073,7 +1073,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
/* Assign public information to the rpmsg_device */
rpdev = &qsdev->rpdev;
- strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
+ strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
@@ -1304,7 +1304,7 @@ static void qcom_channel_state_worker(struct work_struct *work)
spin_unlock_irqrestore(&edge->channels_lock, flags);
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
rpmsg_unregister_device(&edge->dev, &chinfo);
@@ -1364,6 +1364,7 @@ static int qcom_smd_parse_edge(struct device *dev,
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
goto put_node;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index c1d4990beab0..865d977668cc 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -332,7 +332,8 @@ field##_store(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t sz) \
{ \
struct rpmsg_device *rpdev = to_rpmsg_device(dev); \
- char *new, *old; \
+ const char *old; \
+ char *new; \
\
new = kstrndup(buf, sz, GFP_KERNEL); \
if (!new) \
@@ -525,24 +526,52 @@ static struct bus_type rpmsg_bus = {
.remove = rpmsg_dev_remove,
};
-int rpmsg_register_device(struct rpmsg_device *rpdev)
+/*
+ * A helper for registering rpmsg device with driver override and name.
+ * Drivers should not be using it, but instead rpmsg_register_device().
+ */
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
{
struct device *dev = &rpdev->dev;
int ret;
+ if (driver_override)
+ strcpy(rpdev->id.name, driver_override);
+
dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
rpdev->id.name, rpdev->src, rpdev->dst);
rpdev->dev.bus = &rpmsg_bus;
- ret = device_register(&rpdev->dev);
+ device_initialize(dev);
+ if (driver_override) {
+ ret = driver_set_override(dev, &rpdev->driver_override,
+ driver_override,
+ strlen(driver_override));
+ if (ret) {
+ dev_err(dev, "device_set_override failed: %d\n", ret);
+ put_device(dev);
+ return ret;
+ }
+ }
+
+ ret = device_add(dev);
if (ret) {
- dev_err(dev, "device_register failed: %d\n", ret);
+ dev_err(dev, "device_add failed: %d\n", ret);
+ kfree(rpdev->driver_override);
+ rpdev->driver_override = NULL;
put_device(&rpdev->dev);
}
return ret;
}
+EXPORT_SYMBOL(rpmsg_register_device_override);
+
+int rpmsg_register_device(struct rpmsg_device *rpdev)
+{
+ return rpmsg_register_device_override(rpdev, NULL);
+}
EXPORT_SYMBOL(rpmsg_register_device);
/*
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 3fc83cd50e98..f305279e2e24 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -84,10 +84,7 @@ struct device *rpmsg_find_device(struct device *parent,
*/
static inline int rpmsg_chrdev_register_device(struct rpmsg_device *rpdev)
{
- strcpy(rpdev->id.name, "rpmsg_chrdev");
- rpdev->driver_override = "rpmsg_chrdev";
-
- return rpmsg_register_device(rpdev);
+ return rpmsg_register_device_override(rpdev, "rpmsg_ctrl");
}
#endif
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 376ebbf880d6..755a8e6abe9e 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -381,6 +381,7 @@ static void virtio_rpmsg_release_device(struct device *dev)
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+ kfree(rpdev->driver_override);
kfree(vch);
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 0ad8d84aeb33..22638878c981 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1811,7 +1811,8 @@ config RTC_DRV_MOXART
config RTC_DRV_MT6397
tristate "MediaTek PMIC based RTC"
- depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
+ depends on MFD_MT6397 || COMPILE_TEST
+ select IRQ_DOMAIN
help
This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 184e4a3e2bef..48a341a25c8f 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -1437,7 +1437,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
unreachable();
}
}
-EXPORT_SYMBOL(ds1685_rtc_poweroff);
+EXPORT_SYMBOL_GPL(ds1685_rtc_poweroff);
/* ----------------------------------------------------------------------- */
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index 89e5ba0dae69..1cf98542578f 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -23,7 +23,7 @@ static int meson_vrtc_read_time(struct device *dev, struct rtc_time *tm)
struct timespec64 time;
dev_dbg(dev, "%s\n", __func__);
- ktime_get_raw_ts64(&time);
+ ktime_get_real_ts64(&time);
rtc_time64_to_tm(time.tv_sec, tm);
return 0;
@@ -101,7 +101,7 @@ static int __maybe_unused meson_vrtc_suspend(struct device *dev)
long alarm_secs;
struct timespec64 time;
- ktime_get_raw_ts64(&time);
+ ktime_get_real_ts64(&time);
local_time = time.tv_sec;
dev_dbg(dev, "alarm_time = %lus, local_time=%lus\n",
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index d349cef09cb7..48595b00ebb3 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -337,8 +337,10 @@ static int mxc_rtc_probe(struct platform_device *pdev)
}
pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(pdata->rtc))
+ if (IS_ERR(pdata->rtc)) {
+ clk_disable_unprepare(pdata->clk);
return PTR_ERR(pdata->rtc);
+ }
pdata->rtc->ops = &mxc_rtc_ops;
pdata->rtc->range_max = U32_MAX;
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index a2941c875a06..bf502fcd6077 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtc.h>
+#include <linux/rtc/rtc-omap.h>
/*
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 1afa6d9fa9fb..3e7ea5244562 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -159,10 +159,10 @@ static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret)
return ret;
- alrm->time.tm_sec = bcd2bin(buf[0]);
- alrm->time.tm_min = bcd2bin(buf[1]);
- alrm->time.tm_hour = bcd2bin(buf[2]);
- alrm->time.tm_mday = bcd2bin(buf[3]);
+ alrm->time.tm_sec = bcd2bin(buf[0] & 0x7f);
+ alrm->time.tm_min = bcd2bin(buf[1] & 0x7f);
+ alrm->time.tm_hour = bcd2bin(buf[2] & 0x3f);
+ alrm->time.tm_mday = bcd2bin(buf[3] & 0x3f);
ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val);
if (ret)
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index 3450d615974d..bb962dce3ab2 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -407,7 +407,7 @@ static int pcf85363_probe(struct i2c_client *client,
if (client->irq > 0) {
regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
- PIN_IO_INTA_OUT, PIN_IO_INTAPM);
+ PIN_IO_INTAPM, PIN_IO_INTA_OUT);
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf85363_rtc_handle_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 17653ed52ebb..40f293621b01 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -326,16 +326,16 @@ static int pic32_rtc_probe(struct platform_device *pdev)
spin_lock_init(&pdata->alarm_lock);
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(pdata->rtc))
+ return PTR_ERR(pdata->rtc);
+
clk_prepare_enable(pdata->clk);
pic32_rtc_enable(pdata, 1);
device_init_wakeup(&pdev->dev, 1);
- pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(pdata->rtc))
- return PTR_ERR(pdata->rtc);
-
pdata->rtc->ops = &pic32_rtcops;
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index f5a30e0f16c2..c22a79550071 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -219,7 +219,6 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
int rc, i;
u8 value[NUM_8_BIT_RTC_REGS];
- unsigned int ctrl_reg;
unsigned long secs, irq_flags;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
@@ -231,6 +230,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
secs >>= 8;
}
+ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
+ regs->alarm_en, 0);
+ if (rc)
+ return rc;
+
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
@@ -240,19 +244,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
goto rtc_rw_fail;
}
- rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
- if (rc)
- goto rtc_rw_fail;
-
- if (alarm->enabled)
- ctrl_reg |= regs->alarm_en;
- else
- ctrl_reg &= ~regs->alarm_en;
-
- rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
- if (rc) {
- dev_err(dev, "Write to RTC alarm control register failed\n");
- goto rtc_rw_fail;
+ if (alarm->enabled) {
+ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
+ regs->alarm_en, regs->alarm_en);
+ if (rc)
+ goto rtc_rw_fail;
}
dev_dbg(dev, "Alarm Set for h:m:s=%ptRt, y-m-d=%ptRdr\n",
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 757f4daa7181..0f1e544ac8db 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -33,6 +33,14 @@
#define SNVS_LPPGDR_INIT 0x41736166
#define CNTR_TO_SECS_SH 15
+/* The maximum RTC clock cycles that are allowed to pass between two
+ * consecutive clock counter register reads. If the values are corrupted a
+ * bigger difference is expected. The RTC frequency is 32kHz. With 320 cycles
+ * we end at 10ms which should be enough for most cases. If it once takes
+ * longer than expected we do a retry.
+ */
+#define MAX_RTC_READ_DIFF_CYCLES 320
+
struct snvs_rtc_data {
struct rtc_device *rtc;
struct regmap *regmap;
@@ -57,6 +65,7 @@ static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
{
u64 read1, read2;
+ s64 diff;
unsigned int timeout = 100;
/* As expected, the registers might update between the read of the LSB
@@ -67,7 +76,8 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
do {
read2 = read1;
read1 = rtc_read_lpsrt(data);
- } while (read1 != read2 && --timeout);
+ diff = read1 - read2;
+ } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout);
if (!timeout)
dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
@@ -79,13 +89,15 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
{
u32 count1, count2;
+ s32 diff;
unsigned int timeout = 100;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
do {
count2 = count1;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
- } while (count1 != count2 && --timeout);
+ diff = count1 - count2;
+ } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout);
if (!timeout) {
dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
return -ETIMEDOUT;
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 49474a31c66d..2031d042c5e4 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -231,7 +231,7 @@ static int st_rtc_probe(struct platform_device *pdev)
enable_irq_wake(rtc->irq);
disable_irq(rtc->irq);
- rtc->clk = clk_get(&pdev->dev, NULL);
+ rtc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rtc->clk)) {
dev_err(&pdev->dev, "Unable to request clock\n");
return PTR_ERR(rtc->clk);
@@ -241,6 +241,7 @@ static int st_rtc_probe(struct platform_device *pdev)
rtc->clkrate = clk_get_rate(rtc->clk);
if (!rtc->clkrate) {
+ clk_disable_unprepare(rtc->clk);
dev_err(&pdev->dev, "Unable to fetch clock rate\n");
return -EINVAL;
}
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index c41bc8084d7c..5bef4148c623 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -129,7 +129,6 @@ struct sun6i_rtc_clk_data {
unsigned int fixed_prescaler : 16;
unsigned int has_prescaler : 1;
unsigned int has_out_clk : 1;
- unsigned int export_iosc : 1;
unsigned int has_losc_en : 1;
unsigned int has_auto_swt : 1;
};
@@ -251,23 +250,19 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
writel(reg, rtc->base + SUN6I_LOSC_CTRL);
}
- /* Switch to the external, more precise, oscillator */
- reg |= SUN6I_LOSC_CTRL_EXT_OSC;
- if (rtc->data->has_losc_en)
- reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN;
+ /* Switch to the external, more precise, oscillator, if present */
+ if (of_get_property(node, "clocks", NULL)) {
+ reg |= SUN6I_LOSC_CTRL_EXT_OSC;
+ if (rtc->data->has_losc_en)
+ reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN;
+ }
writel(reg, rtc->base + SUN6I_LOSC_CTRL);
/* Yes, I know, this is ugly. */
sun6i_rtc = rtc;
- /* Deal with old DTs */
- if (!of_get_property(node, "clocks", NULL))
- goto err;
-
- /* Only read IOSC name from device tree if it is exported */
- if (rtc->data->export_iosc)
- of_property_read_string_index(node, "clock-output-names", 2,
- &iosc_name);
+ of_property_read_string_index(node, "clock-output-names", 2,
+ &iosc_name);
rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL,
iosc_name,
@@ -280,11 +275,13 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
}
parents[0] = clk_hw_get_name(rtc->int_osc);
+ /* If there is no external oscillator, this will be NULL and ... */
parents[1] = of_clk_get_parent_name(node, 0);
rtc->hw.init = &init;
init.parent_names = parents;
+ /* ... number of clock parents will be 1. */
init.num_parents = of_clk_get_parent_count(node) + 1;
of_property_read_string_index(node, "clock-output-names", 0,
&init.name);
@@ -306,13 +303,10 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
goto err_register;
}
- clk_data->num = 2;
+ clk_data->num = 3;
clk_data->hws[0] = &rtc->hw;
clk_data->hws[1] = __clk_get_hw(rtc->ext_losc);
- if (rtc->data->export_iosc) {
- clk_data->hws[2] = rtc->int_osc;
- clk_data->num = 3;
- }
+ clk_data->hws[2] = rtc->int_osc;
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
return;
@@ -352,7 +346,6 @@ static const struct sun6i_rtc_clk_data sun8i_h3_rtc_data = {
.fixed_prescaler = 32,
.has_prescaler = 1,
.has_out_clk = 1,
- .export_iosc = 1,
};
static void __init sun8i_h3_rtc_clk_init(struct device_node *node)
@@ -370,7 +363,6 @@ static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
.fixed_prescaler = 32,
.has_prescaler = 1,
.has_out_clk = 1,
- .export_iosc = 1,
.has_losc_en = 1,
.has_auto_swt = 1,
};
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 2c762757fb54..636c9e73042c 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -38,13 +38,15 @@
#define RTC_CALIB_DEF 0x198233
#define RTC_CALIB_MASK 0x1FFFFF
+#define RTC_ALRM_MASK BIT(1)
+#define RTC_MSEC 1000
struct xlnx_rtc_dev {
struct rtc_device *rtc;
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- int calibval;
+ unsigned int calibval;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -123,11 +125,27 @@ static int xlnx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int xlnx_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
{
struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
-
- if (enabled)
+ unsigned int status;
+ ulong timeout;
+
+ timeout = jiffies + msecs_to_jiffies(RTC_MSEC);
+
+ if (enabled) {
+ while (1) {
+ status = readl(xrtcdev->reg_base + RTC_INT_STS);
+ if (!((status & RTC_ALRM_MASK) == RTC_ALRM_MASK))
+ break;
+
+ if (time_after_eq(jiffies, timeout)) {
+ dev_err(dev, "Time out occur, while clearing alarm status bit\n");
+ return -ETIMEDOUT;
+ }
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS);
+ }
writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_EN);
- else
+ } else {
writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS);
+ }
return 0;
}
@@ -183,8 +201,8 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
if (!(status & (RTC_INT_SEC | RTC_INT_ALRM)))
return IRQ_NONE;
- /* Clear RTC_INT_ALRM interrupt only */
- writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS);
+ /* Disable RTC_INT_ALRM interrupt only */
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS);
if (status & RTC_INT_ALRM)
rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_AF);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e0570cd0e520..a5cee56ffe61 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -737,18 +737,20 @@ static void dasd_profile_start(struct dasd_block *block,
* we count each request only once.
*/
device = cqr->startdev;
- if (device->profile.data) {
- counter = 1; /* request is not yet queued on the start device */
- list_for_each(l, &device->ccw_queue)
- if (++counter >= 31)
- break;
- }
+ if (!device->profile.data)
+ return;
+
+ spin_lock(get_ccwdev_lock(device->cdev));
+ counter = 1; /* request is not yet queued on the start device */
+ list_for_each(l, &device->ccw_queue)
+ if (++counter >= 31)
+ break;
+ spin_unlock(get_ccwdev_lock(device->cdev));
+
spin_lock(&device->profile.lock);
- if (device->profile.data) {
- device->profile.data->dasd_io_nr_req[counter]++;
- if (rq_data_dir(req) == READ)
- device->profile.data->dasd_read_nr_req[counter]++;
- }
+ device->profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ device->profile.data->dasd_read_nr_req[counter]++;
spin_unlock(&device->profile.lock);
}
@@ -2128,8 +2130,8 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
if (device->stopped &
~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
return;
- rc = device->discipline->verify_path(device,
- dasd_path_get_tbvpm(device));
+ rc = device->discipline->pe_handler(device,
+ dasd_path_get_tbvpm(device));
if (rc)
dasd_device_set_timer(device, 50);
else
@@ -2985,41 +2987,32 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
* Requeue a request back to the block request queue
* only works for block requests
*/
-static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
+static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
{
- struct dasd_block *block = cqr->block;
struct request *req;
- if (!block)
- return -EINVAL;
/*
* If the request is an ERP request there is nothing to requeue.
* This will be done with the remaining original request.
*/
if (cqr->refers)
- return 0;
+ return;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
- blk_mq_requeue_request(req, false);
+ blk_mq_requeue_request(req, true);
spin_unlock_irq(&cqr->dq->lock);
- return 0;
+ return;
}
-/*
- * Go through all request on the dasd_block request queue, cancel them
- * on the respective dasd_device, and return them to the generic
- * block layer.
- */
-static int dasd_flush_block_queue(struct dasd_block *block)
+static int _dasd_requests_to_flushqueue(struct dasd_block *block,
+ struct list_head *flush_queue)
{
struct dasd_ccw_req *cqr, *n;
- int rc, i;
- struct list_head flush_queue;
unsigned long flags;
+ int rc, i;
- INIT_LIST_HEAD(&flush_queue);
- spin_lock_bh(&block->queue_lock);
+ spin_lock_irqsave(&block->queue_lock, flags);
rc = 0;
restart:
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
@@ -3034,13 +3027,32 @@ restart:
* is returned from the dasd_device layer.
*/
cqr->callback = _dasd_wake_block_flush_cb;
- for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
- list_move_tail(&cqr->blocklist, &flush_queue);
+ for (i = 0; cqr; cqr = cqr->refers, i++)
+ list_move_tail(&cqr->blocklist, flush_queue);
if (i > 1)
/* moved more than one request - need to restart */
goto restart;
}
- spin_unlock_bh(&block->queue_lock);
+ spin_unlock_irqrestore(&block->queue_lock, flags);
+
+ return rc;
+}
+
+/*
+ * Go through all request on the dasd_block request queue, cancel them
+ * on the respective dasd_device, and return them to the generic
+ * block layer.
+ */
+static int dasd_flush_block_queue(struct dasd_block *block)
+{
+ struct dasd_ccw_req *cqr, *n;
+ struct list_head flush_queue;
+ unsigned long flags;
+ int rc;
+
+ INIT_LIST_HEAD(&flush_queue);
+ rc = _dasd_requests_to_flushqueue(block, &flush_queue);
+
/* Now call the callback function of flushed requests */
restart_cb:
list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
@@ -3977,75 +3989,36 @@ EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
*/
static int dasd_generic_requeue_all_requests(struct dasd_device *device)
{
+ struct dasd_block *block = device->block;
struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n;
- struct dasd_ccw_req *refers;
int rc;
- INIT_LIST_HEAD(&requeue_queue);
- spin_lock_irq(get_ccwdev_lock(device->cdev));
- rc = 0;
- list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
- /* Check status and move request to flush_queue */
- if (cqr->status == DASD_CQR_IN_IO) {
- rc = device->discipline->term_IO(cqr);
- if (rc) {
- /* unable to terminate requeust */
- dev_err(&device->cdev->dev,
- "Unable to terminate request %p "
- "on suspend\n", cqr);
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
- dasd_put_device(device);
- return rc;
- }
- }
- list_move_tail(&cqr->devlist, &requeue_queue);
- }
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
-
- list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
- wait_event(dasd_flush_wq,
- (cqr->status != DASD_CQR_CLEAR_PENDING));
+ if (!block)
+ return 0;
- /*
- * requeue requests to blocklayer will only work
- * for block device requests
- */
- if (_dasd_requeue_request(cqr))
- continue;
+ INIT_LIST_HEAD(&requeue_queue);
+ rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
- /* remove requests from device and block queue */
- list_del_init(&cqr->devlist);
- while (cqr->refers != NULL) {
- refers = cqr->refers;
- /* remove the request from the block queue */
- list_del(&cqr->blocklist);
- /* free the finished erp request */
- dasd_free_erp_request(cqr, cqr->memdev);
- cqr = refers;
+ /* Now call the callback function of flushed requests */
+restart_cb:
+ list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
+ wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
+ /* Process finished ERP request. */
+ if (cqr->refers) {
+ spin_lock_bh(&block->queue_lock);
+ __dasd_process_erp(block->base, cqr);
+ spin_unlock_bh(&block->queue_lock);
+ /* restart list_for_xx loop since dasd_process_erp
+ * might remove multiple elements
+ */
+ goto restart_cb;
}
-
- /*
- * _dasd_requeue_request already checked for a valid
- * blockdevice, no need to check again
- * all erp requests (cqr->refers) have a cqr->block
- * pointer copy from the original cqr
- */
+ _dasd_requeue_request(cqr);
list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
cqr, (struct request *) cqr->callback_data);
}
-
- /*
- * if requests remain then they are internal request
- * and go back to the device queue
- */
- if (!list_empty(&requeue_queue)) {
- /* move freeze_queue to start of the ccw_queue */
- spin_lock_irq(get_ccwdev_lock(device->cdev));
- list_splice_tail(&requeue_queue, &device->ccw_queue);
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
- }
dasd_schedule_device_bh(device);
return rc;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index ee73b0607e47..8598c792ded3 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2436,7 +2436,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
erp->block = cqr->block;
erp->magic = cqr->magic;
erp->expires = cqr->expires;
- erp->retries = 256;
+ erp->retries = device->default_retries;
erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index dc78a523a69f..b6b938aa6615 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_eckd_private *alias_priv, *private = base_device->private;
- struct alias_pav_group *group = private->pavgroup;
struct alias_lcu *lcu = private->lcu;
struct dasd_device *alias_device;
+ struct alias_pav_group *group;
unsigned long flags;
- if (!group || !lcu)
+ if (!lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
}
spin_lock_irqsave(&lcu->lock, flags);
+ group = private->pavgroup;
+ if (!group) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return NULL;
+ }
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f7ae03fd36cb..f029884eabd1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -644,12 +644,17 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
}
+static int dasd_diag_pe_handler(struct dasd_device *device, __u8 tbvpm)
+{
+ return dasd_generic_verify_path(device, tbvpm);
+}
+
static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
.check_device = dasd_diag_check_device,
- .verify_path = dasd_generic_verify_path,
+ .pe_handler = dasd_diag_pe_handler,
.fill_geometry = dasd_diag_fill_geometry,
.setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 7749deb614d7..c6930c159d2a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -103,7 +103,7 @@ struct ext_pool_exhaust_work_data {
};
/* definitions for the path verification worker */
-struct path_verification_work_data {
+struct pe_handler_work_data {
struct work_struct worker;
struct dasd_device *device;
struct dasd_ccw_req cqr;
@@ -112,8 +112,8 @@ struct path_verification_work_data {
int isglobal;
__u8 tbvpm;
};
-static struct path_verification_work_data *path_verification_worker;
-static DEFINE_MUTEX(dasd_path_verification_mutex);
+static struct pe_handler_work_data *pe_handler_worker;
+static DEFINE_MUTEX(dasd_pe_handler_mutex);
struct check_attention_work_data {
struct work_struct worker;
@@ -1219,7 +1219,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
}
static int rebuild_device_uid(struct dasd_device *device,
- struct path_verification_work_data *data)
+ struct pe_handler_work_data *data)
{
struct dasd_eckd_private *private = device->private;
__u8 lpm, opm = dasd_path_get_opm(device);
@@ -1257,10 +1257,9 @@ static int rebuild_device_uid(struct dasd_device *device,
return rc;
}
-static void do_path_verification_work(struct work_struct *work)
+static void dasd_eckd_path_available_action(struct dasd_device *device,
+ struct pe_handler_work_data *data)
{
- struct path_verification_work_data *data;
- struct dasd_device *device;
struct dasd_eckd_private path_private;
struct dasd_uid *uid;
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
@@ -1269,19 +1268,6 @@ static void do_path_verification_work(struct work_struct *work)
char print_uid[60];
int rc;
- data = container_of(work, struct path_verification_work_data, worker);
- device = data->device;
-
- /* delay path verification until device was resumed */
- if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
- schedule_work(work);
- return;
- }
- /* check if path verification already running and delay if so */
- if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
- schedule_work(work);
- return;
- }
opm = 0;
npm = 0;
ppm = 0;
@@ -1418,30 +1404,54 @@ static void do_path_verification_work(struct work_struct *work)
dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
+}
+
+static void do_pe_handler_work(struct work_struct *work)
+{
+ struct pe_handler_work_data *data;
+ struct dasd_device *device;
+
+ data = container_of(work, struct pe_handler_work_data, worker);
+ device = data->device;
+
+ /* delay path verification until device was resumed */
+ if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+ schedule_work(work);
+ return;
+ }
+ /* check if path verification already running and delay if so */
+ if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
+ schedule_work(work);
+ return;
+ }
+
+ dasd_eckd_path_available_action(device, data);
+
clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
dasd_put_device(device);
if (data->isglobal)
- mutex_unlock(&dasd_path_verification_mutex);
+ mutex_unlock(&dasd_pe_handler_mutex);
else
kfree(data);
}
-static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
+static int dasd_eckd_pe_handler(struct dasd_device *device, __u8 lpm)
{
- struct path_verification_work_data *data;
+ struct pe_handler_work_data *data;
data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
if (!data) {
- if (mutex_trylock(&dasd_path_verification_mutex)) {
- data = path_verification_worker;
+ if (mutex_trylock(&dasd_pe_handler_mutex)) {
+ data = pe_handler_worker;
data->isglobal = 1;
- } else
+ } else {
return -ENOMEM;
+ }
} else {
memset(data, 0, sizeof(*data));
data->isglobal = 0;
}
- INIT_WORK(&data->worker, do_path_verification_work);
+ INIT_WORK(&data->worker, do_pe_handler_work);
dasd_get_device(device);
data->device = device;
data->tbvpm = lpm;
@@ -4627,7 +4637,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
struct dasd_device *basedev;
struct req_iterator iter;
struct dasd_ccw_req *cqr;
- unsigned int first_offs;
unsigned int trkcount;
unsigned long *idaws;
unsigned int size;
@@ -4661,7 +4670,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
DASD_RAW_SECTORS_PER_TRACK;
trkcount = last_trk - first_trk + 1;
- first_offs = 0;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK;
@@ -4705,13 +4713,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
if (use_prefix) {
prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
- startdev, 1, first_offs + 1, trkcount, 0, 0);
+ startdev, 1, 0, trkcount, 0, 0);
} else {
define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
ccw[-1].flags |= CCW_FLAG_CC;
data += sizeof(struct DE_eckd_data);
- locate_record_ext(ccw++, data, first_trk, first_offs + 1,
+ locate_record_ext(ccw++, data, first_trk, 0,
trkcount, cmd, basedev, 0, 0);
}
@@ -6696,7 +6704,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.check_device = dasd_eckd_check_characteristics,
.uncheck_device = dasd_eckd_uncheck_device,
.do_analysis = dasd_eckd_do_analysis,
- .verify_path = dasd_eckd_verify_path,
+ .pe_handler = dasd_eckd_pe_handler,
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.basic_to_known = dasd_eckd_basic_to_known,
@@ -6755,18 +6763,20 @@ dasd_eckd_init(void)
return -ENOMEM;
dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
GFP_KERNEL | GFP_DMA);
- if (!dasd_vol_info_req)
+ if (!dasd_vol_info_req) {
+ kfree(dasd_reserve_req);
return -ENOMEM;
- path_verification_worker = kmalloc(sizeof(*path_verification_worker),
- GFP_KERNEL | GFP_DMA);
- if (!path_verification_worker) {
+ }
+ pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
+ GFP_KERNEL | GFP_DMA);
+ if (!pe_handler_worker) {
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
return -ENOMEM;
}
rawpadpage = (void *)__get_free_page(GFP_KERNEL);
if (!rawpadpage) {
- kfree(path_verification_worker);
+ kfree(pe_handler_worker);
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
return -ENOMEM;
@@ -6775,7 +6785,7 @@ dasd_eckd_init(void)
if (!ret)
wait_for_device_probe();
else {
- kfree(path_verification_worker);
+ kfree(pe_handler_worker);
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
free_page((unsigned long)rawpadpage);
@@ -6787,7 +6797,7 @@ static void __exit
dasd_eckd_cleanup(void)
{
ccw_driver_unregister(&dasd_eckd_driver);
- kfree(path_verification_worker);
+ kfree(pe_handler_worker);
kfree(dasd_reserve_req);
free_page((unsigned long)rawpadpage);
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 1a44e321b54e..b159575a2760 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -803,13 +803,18 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
+static int dasd_fba_pe_handler(struct dasd_device *device, __u8 tbvpm)
+{
+ return dasd_generic_verify_path(device, tbvpm);
+}
+
static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
- .verify_path = dasd_generic_verify_path,
+ .pe_handler = dasd_fba_pe_handler,
.setup_blk_queue = dasd_fba_setup_blk_queue,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 9d9685c25253..5d7d35ca5eb4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -298,7 +298,7 @@ struct dasd_discipline {
* e.g. verify that new path is compatible with the current
* configuration.
*/
- int (*verify_path)(struct dasd_device *, __u8);
+ int (*pe_handler)(struct dasd_device *, __u8);
/*
* Last things to do when a device is set online, and first things
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 9a5f3add325f..caab2cd2e0bd 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -137,6 +137,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
+ dasd_schedule_device_bh(base);
return 0;
}
@@ -457,10 +458,9 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
/*
* Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
*/
-static int dasd_ioctl_information(struct dasd_block *block,
- unsigned int cmd, void __user *argp)
+static int __dasd_ioctl_information(struct dasd_block *block,
+ struct dasd_information2_t *dasd_info)
{
- struct dasd_information2_t *dasd_info;
struct subchannel_id sch_id;
struct ccw_dev_id dev_id;
struct dasd_device *base;
@@ -473,15 +473,9 @@ static int dasd_ioctl_information(struct dasd_block *block,
if (!base->discipline || !base->discipline->fill_info)
return -EINVAL;
- dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
- if (dasd_info == NULL)
- return -ENOMEM;
-
rc = base->discipline->fill_info(base, dasd_info);
- if (rc) {
- kfree(dasd_info);
+ if (rc)
return rc;
- }
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
@@ -516,19 +510,28 @@ static int dasd_ioctl_information(struct dasd_block *block,
memcpy(dasd_info->type, base->discipline->name, 4);
- spin_lock_irqsave(&block->queue_lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
- spin_unlock_irqrestore(&block->queue_lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ return 0;
+}
- rc = 0;
- if (copy_to_user(argp, dasd_info,
- ((cmd == (unsigned int) BIODASDINFO2) ?
- sizeof(struct dasd_information2_t) :
- sizeof(struct dasd_information_t))))
- rc = -EFAULT;
+static int dasd_ioctl_information(struct dasd_block *block, void __user *argp,
+ size_t copy_size)
+{
+ struct dasd_information2_t *dasd_info;
+ int error;
+
+ dasd_info = kzalloc(sizeof(*dasd_info), GFP_KERNEL);
+ if (!dasd_info)
+ return -ENOMEM;
+
+ error = __dasd_ioctl_information(block, dasd_info);
+ if (!error && copy_to_user(argp, dasd_info, copy_size))
+ error = -EFAULT;
kfree(dasd_info);
- return rc;
+ return error;
}
/*
@@ -622,10 +625,12 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
rc = dasd_ioctl_check_format(bdev, argp);
break;
case BIODASDINFO:
- rc = dasd_ioctl_information(block, cmd, argp);
+ rc = dasd_ioctl_information(block, argp,
+ sizeof(struct dasd_information_t));
break;
case BIODASDINFO2:
- rc = dasd_ioctl_information(block, cmd, argp);
+ rc = dasd_ioctl_information(block, argp,
+ sizeof(struct dasd_information2_t));
break;
case BIODASDPRRD:
rc = dasd_ioctl_read_profile(block, argp);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index e01889394c84..d3133023a557 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -18,6 +18,7 @@
#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/io.h>
#include <asm/eadm.h>
#include "scm_blk.h"
@@ -131,7 +132,7 @@ static void scm_request_done(struct scm_request *scmrq)
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
msb = &scmrq->aob->msb[i];
- aidaw = msb->data_addr;
+ aidaw = (u64)phys_to_virt(msb->data_addr);
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
@@ -196,12 +197,12 @@ static int scm_request_prepare(struct scm_request *scmrq)
msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
msb->flags |= MSB_FLAG_IDA;
- msb->data_addr = (u64) aidaw;
+ msb->data_addr = (u64)virt_to_phys(aidaw);
rq_for_each_segment(bv, req, iter) {
WARN_ON(bv.bv_offset);
msb->blk_count += bv.bv_len >> 12;
- aidaw->data_addr = (u64) page_address(bv.bv_page);
+ aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
aidaw++;
}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 08f812475f5e..c9d172510509 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -53,6 +53,7 @@ static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
+static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
/*
@@ -69,19 +70,24 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
if (!hsa_available)
return -ENODATA;
+ mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
+ mutex_unlock(&hsa_buf_mutex);
return -EIO;
}
offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count);
- if (copy_to_user(dest, hsa_buf + offset, bytes))
+ if (copy_to_user(dest, hsa_buf + offset, bytes)) {
+ mutex_unlock(&hsa_buf_mutex);
return -EFAULT;
+ }
src += bytes;
dest += bytes;
count -= bytes;
}
+ mutex_unlock(&hsa_buf_mutex);
return 0;
}
@@ -99,9 +105,11 @@ int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
if (!hsa_available)
return -ENODATA;
+ mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
+ mutex_unlock(&hsa_buf_mutex);
return -EIO;
}
offset = src % PAGE_SIZE;
@@ -111,6 +119,7 @@ int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
dest += bytes;
count -= bytes;
}
+ mutex_unlock(&hsa_buf_mutex);
return 0;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 23e9227e60fd..d7ca75efb49f 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1385,6 +1385,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
enum io_sch_action {
IO_SCH_UNREG,
IO_SCH_ORPH_UNREG,
+ IO_SCH_UNREG_CDEV,
IO_SCH_ATTACH,
IO_SCH_UNREG_ATTACH,
IO_SCH_ORPH_ATTACH,
@@ -1417,7 +1418,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
}
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
- return IO_SCH_UNREG;
+ return IO_SCH_UNREG_CDEV;
return IO_SCH_DISC;
}
if (device_is_disconnected(cdev))
@@ -1479,6 +1480,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
case IO_SCH_ORPH_ATTACH:
ccw_device_set_disconnected(cdev);
break;
+ case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
case IO_SCH_UNREG:
if (!cdev)
@@ -1512,6 +1514,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
if (rc)
goto out;
break;
+ case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
spin_lock_irqsave(sch->lock, flags);
if (cdev->private->flags.resuming) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 3b0a4483a252..c78651be8d13 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -88,15 +88,15 @@ enum qdio_irq_states {
static inline int do_sqbs(u64 token, unsigned char state, int queue,
int *start, int *count)
{
- register unsigned long _ccq asm ("0") = *count;
- register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+ unsigned long _ccq = *count;
asm volatile(
- " .insn rsy,0xeb000000008A,%1,0,0(%2)"
- : "+d" (_ccq), "+d" (_queuestart)
- : "d" ((unsigned long)state), "d" (_token)
- : "memory", "cc");
+ " lgr 1,%[token]\n"
+ " .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
+ : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
+ : [state] "a" ((unsigned long)state), [token] "d" (token)
+ : "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
@@ -106,16 +106,17 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
int *start, int *count, int ack)
{
- register unsigned long _ccq asm ("0") = *count;
- register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = (unsigned long)ack << 63;
+ unsigned long _ccq = *count;
asm volatile(
- " .insn rrf,0xB99c0000,%1,%2,0,0"
- : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
- : "d" (_token)
- : "memory", "cc");
+ " lgr 1,%[token]\n"
+ " .insn rrf,0xb99c0000,%[qs],%[state],%[ccq],0"
+ : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart),
+ [state] "+&d" (_state)
+ : [token] "d" (token)
+ : "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
*state = _state & 0xff;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5b63c505a2f7..620655bcbe80 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -31,38 +31,41 @@ MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
static inline int do_siga_sync(unsigned long schid,
- unsigned int out_mask, unsigned int in_mask,
+ unsigned long out_mask, unsigned long in_mask,
unsigned int fc)
{
- register unsigned long __fc asm ("0") = fc;
- register unsigned long __schid asm ("1") = schid;
- register unsigned long out asm ("2") = out_mask;
- register unsigned long in asm ("3") = in_mask;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[out]\n"
+ " lgr 3,%[in]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc)
+ : [fc] "d" (fc), [schid] "d" (schid),
+ [out] "d" (out_mask), [in] "d" (in_mask)
+ : "cc", "0", "1", "2", "3");
return cc;
}
-static inline int do_siga_input(unsigned long schid, unsigned int mask,
- unsigned int fc)
+static inline int do_siga_input(unsigned long schid, unsigned long mask,
+ unsigned long fc)
{
- register unsigned long __fc asm ("0") = fc;
- register unsigned long __schid asm ("1") = schid;
- register unsigned long __mask asm ("2") = mask;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[mask]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc)
+ : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
+ : "cc", "0", "1", "2");
return cc;
}
@@ -78,23 +81,24 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
- unsigned int *bb, unsigned int fc,
+ unsigned int *bb, unsigned long fc,
unsigned long aob)
{
- register unsigned long __fc asm("0") = fc;
- register unsigned long __schid asm("1") = schid;
- register unsigned long __mask asm("2") = mask;
- register unsigned long __aob asm("3") = aob;
int cc;
asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[mask]\n"
+ " lgr 3,%[aob]\n"
" siga 0\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc), "+d" (__fc), "+d" (__aob)
- : "d" (__schid), "d" (__mask)
- : "cc");
- *bb = __fc >> 31;
+ " lgr %[fc],0\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [fc] "+&d" (fc)
+ : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
+ : "cc", "0", "1", "2", "3");
+ *bb = fc >> 31;
return cc;
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index fd590d1cffc1..d42e5a307437 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -247,19 +247,11 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
if (work_pending(&sch->todo_work))
goto out_unlock;
- if (cio_update_schib(sch)) {
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
- rc = 0;
- goto out_unlock;
- }
-
- private = dev_get_drvdata(&sch->dev);
- if (private->state == VFIO_CCW_STATE_NOT_OPER) {
- private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
- VFIO_CCW_STATE_STANDBY;
- }
rc = 0;
+ if (cio_update_schib(sch))
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5256e3ce84e5..fb1de363fb28 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -91,7 +91,7 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
* Tasklet & timer for AP request polling and interrupts
*/
static void ap_tasklet_fn(unsigned long);
-static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
+static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread;
static DEFINE_MUTEX(ap_poll_thread_mutex);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 7dc72cb718b0..22128eb44f7f 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -82,8 +82,9 @@ static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
static void vfio_ap_matrix_dev_release(struct device *dev)
{
- struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
+ struct ap_matrix_dev *matrix_dev;
+ matrix_dev = container_of(dev, struct ap_matrix_dev, device);
kfree(matrix_dev);
}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index ec41a8a76398..f376dfcd7dbe 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -397,6 +397,7 @@ static int zcdn_create(const char *name)
ZCRYPT_NAME "_%d", (int) MINOR(devt));
nodename[sizeof(nodename)-1] = '\0';
if (dev_set_name(&zcdndev->device, nodename)) {
+ kfree(zcdndev);
rc = -EINVAL;
goto unlockout;
}
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 437a6d822105..87d05b13fbd5 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -865,16 +865,9 @@ done:
/**
* Start transmission of a packet.
* Called from generic network device layer.
- *
- * skb Pointer to buffer containing the packet.
- * dev Pointer to interface struct.
- *
- * returns 0 if packet consumed, !0 if packet rejected.
- * Note: If we return !0, then the packet is free'd by
- * the generic network layer.
*/
/* first merge version - leaving both functions separated */
-static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev)
{
struct ctcm_priv *priv = dev->ml_priv;
@@ -917,7 +910,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
}
/* unmerged MPC variant of ctcm_tx */
-static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
{
int len = 0;
struct ctcm_priv *priv = dev->ml_priv;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 4eec7bfb5de9..73708166b255 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1518,9 +1518,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
/**
* Packet transmit function called by network stack
*/
-static int
-__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+ struct net_device *dev)
{
struct lcs_header *header;
int rc = NETDEV_TX_OK;
@@ -1581,8 +1580,7 @@ out:
return rc;
}
-static int
-lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lcs_card *card;
int rc;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 5ce2424ca729..e2984b54447b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1344,15 +1344,8 @@ out:
/**
* Start transmission of a packet.
* Called from generic network device layer.
- *
- * @param skb Pointer to buffer containing the packet.
- * @param dev Pointer to interface struct.
- *
- * @return 0 if packet consumed, !0 if packet rejected.
- * Note: If we return !0, then the packet is free'd by
- * the generic network layer.
*/
-static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netiucv_priv *privptr = netdev_priv(dev);
int rc;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8dee16aca421..fe2bea240942 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -305,9 +305,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
if (!recover) {
hash_del(&addr->hnode);
kfree(addr);
- continue;
+ } else {
+ /* prepare for recovery */
+ addr->disp_flag = QETH_DISP_ADDR_ADD;
}
- addr->disp_flag = QETH_DISP_ADDR_ADD;
}
mutex_unlock(&card->ip_lock);
@@ -335,11 +336,13 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
} else
rc = qeth_l3_register_addr_entry(card, addr);
- if (!rc) {
+ if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
+ /* keep it in the records */
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1)
qeth_l3_delete_ip(card, addr);
} else {
+ /* bad address */
hash_del(&addr->hnode);
kfree(addr);
}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e390f8c6d5f3..0d57432066ca 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -488,12 +488,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (port) {
put_device(&port->dev);
retval = -EEXIST;
- goto err_out;
+ goto err_put;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
- goto err_out;
+ goto err_put;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
@@ -516,7 +516,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
- goto err_out;
+ goto err_put;
}
retval = -EINVAL;
@@ -533,7 +533,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
-err_out:
+err_put:
zfcp_ccw_adapter_put(adapter);
+err_out:
return ERR_PTR(retval);
}
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index d4c2c44b863d..459eed7aca66 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -145,27 +145,33 @@ void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
+ int ret = -EIO;
+
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
- if (zfcp_fsf_open_wka_port(wka_port))
+ if (zfcp_fsf_open_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+ goto out;
+ }
}
- mutex_unlock(&wka_port->mutex);
-
- wait_event(wka_port->completion_wq,
+ wait_event(wka_port->opened,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
- return 0;
+ ret = 0;
+ goto out;
}
- return -EIO;
+out:
+ mutex_unlock(&wka_port->mutex);
+ return ret;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
@@ -181,9 +187,12 @@ static void zfcp_fc_wka_port_offline(struct work_struct *work)
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ goto out;
}
+ wait_event(wka_port->closed,
+ wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
out:
mutex_unlock(&wka_port->mutex);
}
@@ -193,13 +202,15 @@ static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
- schedule_delayed_work(&wka_port->work, HZ / 100);
+ queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
+ msecs_to_jiffies(10));
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
- init_waitqueue_head(&wka_port->completion_wq);
+ init_waitqueue_head(&wka_port->opened);
+ init_waitqueue_head(&wka_port->closed);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
@@ -523,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data)
/* re-init to undo drop from zfcp_fc_adisc() */
port->d_id = ntoh24(adisc_resp->adisc_port_id);
- /* port is good, unblock rport without going through erp */
- zfcp_scsi_schedule_rport_register(port);
+ /* port is still good, nothing to do */
out:
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
@@ -584,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work)
int retval;
set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
- get_device(&port->dev);
- port->rport_task = RPORT_DEL;
- zfcp_scsi_rport_work(&port->rport_work);
/* only issue one test command at one time per port */
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 6902ae1f8e4f..25bebfaa8cbc 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -185,7 +185,8 @@ enum zfcp_fc_wka_status {
/**
* struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
* @adapter: Pointer to adapter structure this WKA port belongs to
- * @completion_wq: Wait for completion of open/close command
+ * @opened: Wait for completion of open command
+ * @closed: Wait for completion of close command
* @status: Current status of WKA port
* @refcount: Reference count to keep port open as long as it is in use
* @d_id: FC destination id or well-known-address
@@ -195,7 +196,8 @@ enum zfcp_fc_wka_status {
*/
struct zfcp_fc_wka_port {
struct zfcp_adapter *adapter;
- wait_queue_head_t completion_wq;
+ wait_queue_head_t opened;
+ wait_queue_head_t closed;
enum zfcp_fc_wka_status status;
atomic_t refcount;
u32 d_id;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 5c652deb6fed..3fddf851bddc 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -755,7 +755,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
- int req_id = req->req_id;
+ unsigned long req_id = req->req_id;
zfcp_reqlist_add(adapter->req_list, req);
@@ -1625,7 +1625,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->opened);
}
/**
@@ -1684,7 +1684,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
}
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->closed);
}
/**
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3337b1e80412..f6f92033132a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2014,7 +2014,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
retval = pci_enable_device(pdev);
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
- goto out_disable_device;
+ return -ENODEV;
}
pci_set_master(pdev);
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 2b1e0d503020..75290aabd543 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2310,8 +2310,10 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_DISABLE_INTERRUPTS(tw_dev);
/* Initialize the card */
- if (tw_reset_sequence(tw_dev))
+ if (tw_reset_sequence(tw_dev)) {
+ retval = -EINVAL;
goto out_release_mem_region;
+ }
/* Set host specific parameters */
host->max_id = TW_MAX_UNITS;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 0068963bb933..0b7e9b1a8946 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1581,7 +1581,7 @@ NCR_700_intr(int irq, void *dev_id)
printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
#endif
resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
- } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
+ } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) &&
dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9ea30fcb4428..cab6e67ea606 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -443,7 +443,7 @@ config SCSI_MVUMI
config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support "
- depends on SCSI && PCI && VIRT_TO_BUS
+ depends on SCSI && PCI
help
This driver supports all of Adaptec's I2O based RAID controllers as
well as the DPT SmartRaid V cards. This is an Adaptec maintained
@@ -1286,7 +1286,7 @@ source "drivers/scsi/arm/Kconfig"
config JAZZ_ESP
bool "MIPS JAZZ FAS216 SCSI support"
- depends on MACH_JAZZ && SCSI
+ depends on MACH_JAZZ && SCSI=y
select SCSI_SPI_ATTRS
help
This is the driver for the onboard SCSI host adapter of MIPS Magnum
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index f923ed019d4a..593b167ceefe 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -50,6 +50,9 @@ static int asd_map_scatterlist(struct sas_task *task,
dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
task->total_xfer_len,
task->data_dir);
+ if (dma_mapping_error(&asd_ha->pcidev->dev, dma))
+ return -ENOMEM;
+
sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 2058d50d62e1..737d7087723a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -441,6 +441,10 @@ int beiscsi_iface_set_param(struct Scsi_Host *shost,
}
nla_for_each_attr(attrib, data, dt_len, rm_len) {
+ /* ignore nla_type as it is never used */
+ if (nla_len(attrib) < sizeof(*iface_param))
+ return -EINVAL;
+
iface_param = nla_data(attrib);
if (iface_param->param_type != ISCSI_NET_PARAM)
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 53b33f146f82..d34c881c3f8a 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -2694,6 +2694,7 @@ init_wrb_hndl_failed:
kfree(pwrb_context->pwrb_handle_base);
kfree(pwrb_context->pwrb_handle_basestd);
}
+ kfree(phwi_ctxt->be_wrbq);
return -ENOMEM;
}
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 7bd2ba1ad4d1..f30fe324e6ec 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -20,7 +20,6 @@
struct bfa_s;
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
-typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
/*
* Interrupt message handlers
@@ -437,4 +436,12 @@ struct bfa_cb_pending_q_s {
(__qe)->data = (__data); \
} while (0)
+#define bfa_pending_q_init_status(__qe, __cbfn, __cbarg, __data) do { \
+ bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
+ (__qe)->hcb_qe.cbfn_status = (__cbfn); \
+ (__qe)->hcb_qe.cbarg = (__cbarg); \
+ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
+ (__qe)->data = (__data); \
+} while (0)
+
#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0f554ebb8f2c..09819f076699 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1907,15 +1907,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
struct list_head *qe;
struct list_head *qen;
struct bfa_cb_qe_s *hcb_qe;
- bfa_cb_cbfn_status_t cbfn;
list_for_each_safe(qe, qen, comp_q) {
hcb_qe = (struct bfa_cb_qe_s *) qe;
if (hcb_qe->pre_rmv) {
/* qe is invalid after return, dequeue before cbfn() */
list_del(qe);
- cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
- cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+ hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status);
} else
hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 933a1c3890ff..5e568d6d7b26 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -361,14 +361,18 @@ struct bfa_reqq_wait_s {
void *cbarg;
};
-typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
/*
* Generic BFA callback element.
*/
struct bfa_cb_qe_s {
struct list_head qe;
- bfa_cb_cbfn_t cbfn;
+ union {
+ bfa_cb_cbfn_status_t cbfn_status;
+ bfa_cb_cbfn_t cbfn;
+ };
bfa_boolean_t once;
bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
bfa_status_t fw_status; /* to access fw status in comp proc */
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a76c968dbac5..9f5964944d78 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2135,8 +2135,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
struct bfa_cb_pending_q_s cb_qe;
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, &iocmd->stats);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2159,7 +2158,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
struct bfa_cb_pending_q_s cb_qe;
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
@@ -2443,8 +2442,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, &iocmd->stats);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
spin_lock_irqsave(&bfad->bfad_lock, flags);
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
@@ -2474,8 +2472,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, NULL);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
spin_lock_irqsave(&bfad->bfad_lock, flags);
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9ed109fb6b67..3bef2ed50a07 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -430,7 +430,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fcoe_ctlr *ctlr;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
- struct sk_buff *tmp_skb;
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
@@ -442,11 +441,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
goto err;
}
- tmp_skb = skb_share_check(skb, GFP_ATOMIC);
- if (!tmp_skb)
- goto err;
-
- skb = tmp_skb;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return -1;
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
index c38017b4af98..e50e93e7fe5a 100644
--- a/drivers/scsi/csiostor/csio_defs.h
+++ b/drivers/scsi/csiostor/csio_defs.h
@@ -73,7 +73,21 @@ csio_list_deleted(struct list_head *list)
#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
/* State machine */
-typedef void (*csio_sm_state_t)(void *, uint32_t);
+struct csio_lnode;
+
+/* State machine evets */
+enum csio_ln_ev {
+ CSIO_LNE_NONE = (uint32_t)0,
+ CSIO_LNE_LINKUP,
+ CSIO_LNE_FAB_INIT_DONE,
+ CSIO_LNE_LINK_DOWN,
+ CSIO_LNE_DOWN_LINK,
+ CSIO_LNE_LOGO,
+ CSIO_LNE_CLOSE,
+ CSIO_LNE_MAX_EVENT,
+};
+
+typedef void (*csio_sm_state_t)(struct csio_lnode *ln, enum csio_ln_ev evt);
struct csio_sm {
struct list_head sm_list;
@@ -83,7 +97,7 @@ struct csio_sm {
static inline void
csio_set_state(void *smp, void *state)
{
- ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
+ ((struct csio_sm *)smp)->sm_state = state;
}
static inline void
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index c3bf590f5d68..47803268b138 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1095,7 +1095,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
int
csio_is_lnode_ready(struct csio_lnode *ln)
{
- return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
+ return (csio_get_state(ln) == csio_lns_ready);
}
/*****************************************************************************/
@@ -1367,15 +1367,15 @@ csio_free_fcfinfo(struct kref *kref)
void
csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
{
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
+ if (csio_get_state(ln) == csio_lns_uninit) {
strcpy(str, "UNINIT");
return;
}
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
+ if (csio_get_state(ln) == csio_lns_ready) {
strcpy(str, "READY");
return;
}
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
+ if (csio_get_state(ln) == csio_lns_offline) {
strcpy(str, "OFFLINE");
return;
}
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 372a67d122d3..607698a0f063 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -53,19 +53,6 @@
extern int csio_fcoe_rnodes;
extern int csio_fdmi_enable;
-/* State machine evets */
-enum csio_ln_ev {
- CSIO_LNE_NONE = (uint32_t)0,
- CSIO_LNE_LINKUP,
- CSIO_LNE_FAB_INIT_DONE,
- CSIO_LNE_LINK_DOWN,
- CSIO_LNE_DOWN_LINK,
- CSIO_LNE_LOGO,
- CSIO_LNE_CLOSE,
- CSIO_LNE_MAX_EVENT,
-};
-
-
struct csio_fcf_info {
struct list_head list;
uint8_t priority;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index c4a6609d8fae..cd0c978dc169 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4168,7 +4168,7 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
int srb_idx = 0;
unsigned i = 0;
- struct SGentry *uninitialized_var(ptr);
+ struct SGentry *ptr;
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fe8a5e5c0df8..bf0b3178f84d 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1036,10 +1036,12 @@ static int alua_activate(struct scsi_device *sdev,
rcu_read_unlock();
mutex_unlock(&h->init_mutex);
- if (alua_rtpg_queue(pg, sdev, qdata, true))
+ if (alua_rtpg_queue(pg, sdev, qdata, true)) {
fn = NULL;
- else
+ } else {
+ kfree(qdata);
err = SCSI_DH_DEV_OFFLINED;
+ }
kref_put(&pg->kref, release_port_group);
out:
if (fn)
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index abc74fd474dc..912acd2b5067 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -56,7 +56,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
#include <asm/processor.h> /* for boot_cpu_data */
#include <asm/pgtable.h>
-#include <asm/io.h> /* for virt_to_bus, etc. */
+#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -585,51 +585,6 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
return 0;
}
-/*
- * Turn a pointer to ioctl reply data into an u32 'context'
- */
-static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
-{
-#if BITS_PER_LONG == 32
- return (u32)(unsigned long)reply;
-#else
- ulong flags = 0;
- u32 nr, i;
-
- spin_lock_irqsave(pHba->host->host_lock, flags);
- nr = ARRAY_SIZE(pHba->ioctl_reply_context);
- for (i = 0; i < nr; i++) {
- if (pHba->ioctl_reply_context[i] == NULL) {
- pHba->ioctl_reply_context[i] = reply;
- break;
- }
- }
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- if (i >= nr) {
- printk(KERN_WARNING"%s: Too many outstanding "
- "ioctl commands\n", pHba->name);
- return (u32)-1;
- }
-
- return i;
-#endif
-}
-
-/*
- * Go from an u32 'context' to a pointer to ioctl reply data.
- */
-static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
-{
-#if BITS_PER_LONG == 32
- return (void *)(unsigned long)context;
-#else
- void *p = pHba->ioctl_reply_context[context];
- pHba->ioctl_reply_context[context] = NULL;
-
- return p;
-#endif
-}
-
/*===========================================================================
* Error Handling routines
*===========================================================================
@@ -1652,208 +1607,6 @@ static int adpt_close(struct inode *inode, struct file *file)
return 0;
}
-
-static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
-{
- u32 msg[MAX_MESSAGE_SIZE];
- u32* reply = NULL;
- u32 size = 0;
- u32 reply_size = 0;
- u32 __user *user_msg = arg;
- u32 __user * user_reply = NULL;
- void **sg_list = NULL;
- u32 sg_offset = 0;
- u32 sg_count = 0;
- int sg_index = 0;
- u32 i = 0;
- u32 rcode = 0;
- void *p = NULL;
- dma_addr_t addr;
- ulong flags = 0;
-
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- return -EFAULT;
- }
- size = size>>16;
-
- user_reply = &user_msg[size];
- if(size > MAX_MESSAGE_SIZE){
- return -EFAULT;
- }
- size *= 4; // Convert to bytes
-
- /* Copy in the user's I2O command */
- if(copy_from_user(msg, user_msg, size)) {
- return -EFAULT;
- }
- get_user(reply_size, &user_reply[0]);
- reply_size = reply_size>>16;
- if(reply_size > REPLY_FRAME_SIZE){
- reply_size = REPLY_FRAME_SIZE;
- }
- reply_size *= 4;
- reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
- if(reply == NULL) {
- printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
- return -ENOMEM;
- }
- sg_offset = (msg[0]>>4)&0xf;
- msg[2] = 0x40000000; // IOCTL context
- msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1) {
- rcode = -EBUSY;
- goto free;
- }
-
- sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
- if (!sg_list) {
- rcode = -ENOMEM;
- goto free;
- }
- if(sg_offset) {
- // TODO add 64 bit API
- struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
- if (sg_count > pHba->sg_tablesize){
- printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
- rcode = -EINVAL;
- goto free;
- }
-
- for(i = 0; i < sg_count; i++) {
- int sg_size;
-
- if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
- printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
- rcode = -EINVAL;
- goto cleanup;
- }
- sg_size = sg[i].flag_count & 0xffffff;
- /* Allocate memory for the transfer */
- p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
- if(!p) {
- printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- pHba->name,sg_size,i,sg_count);
- rcode = -ENOMEM;
- goto cleanup;
- }
- sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
- /* Copy in the user's SG buffer if necessary */
- if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
- // sg_simple_element API is 32 bit
- if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
- printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- /* sg_simple_element API is 32 bit, but addr < 4GB */
- sg[i].addr_bus = addr;
- }
- }
-
- do {
- /*
- * Stop any new commands from enterring the
- * controller while processing the ioctl
- */
- if (pHba->host) {
- scsi_block_requests(pHba->host);
- spin_lock_irqsave(pHba->host->host_lock, flags);
- }
- rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
- if (rcode != 0)
- printk("adpt_i2o_passthru: post wait failed %d %p\n",
- rcode, reply);
- if (pHba->host) {
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- scsi_unblock_requests(pHba->host);
- }
- } while (rcode == -ETIMEDOUT);
-
- if(rcode){
- goto cleanup;
- }
-
- if(sg_offset) {
- /* Copy back the Scatter Gather buffers back to user space */
- u32 j;
- // TODO add 64 bit API
- struct sg_simple_element* sg;
- int sg_size;
-
- // re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- rcode = -EFAULT;
- goto cleanup;
- }
- size = size>>16;
- size *= 4;
- if (size > MAX_MESSAGE_SIZE) {
- rcode = -EINVAL;
- goto cleanup;
- }
- /* Copy in the user's I2O command */
- if (copy_from_user (msg, user_msg, size)) {
- rcode = -EFAULT;
- goto cleanup;
- }
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
-
- // TODO add 64 bit API
- sg = (struct sg_simple_element*)(msg + sg_offset);
- for (j = 0; j < sg_count; j++) {
- /* Copy out the SG list to user's buffer if necessary */
- if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
- sg_size = sg[j].flag_count & 0xffffff;
- // sg_simple_element API is 32 bit
- if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- }
- }
-
- /* Copy back the reply to user space */
- if (reply_size) {
- // we wrote our own values for context - now restore the user supplied ones
- if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
- printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
- rcode = -EFAULT;
- }
- if(copy_to_user(user_reply, reply, reply_size)) {
- printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
- rcode = -EFAULT;
- }
- }
-
-
-cleanup:
- if (rcode != -ETIME && rcode != -EINTR) {
- struct sg_simple_element *sg =
- (struct sg_simple_element*) (msg +sg_offset);
- while(sg_index) {
- if(sg_list[--sg_index]) {
- dma_free_coherent(&pHba->pDev->dev,
- sg[sg_index].flag_count & 0xffffff,
- sg_list[sg_index],
- sg[sg_index].addr_bus);
- }
- }
- }
-
-free:
- kfree(sg_list);
- kfree(reply);
- return rcode;
-}
-
#if defined __ia64__
static void adpt_ia64_info(sysInfo_S* si)
{
@@ -1980,8 +1733,6 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong ar
return -EFAULT;
}
break;
- case I2OUSRCMD:
- return adpt_i2o_passthru(pHba, argp);
case DPT_CTRLINFO:{
drvrHBAinfo_S HbaInfo;
@@ -2118,7 +1869,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
} else {
/* Ick, we should *never* be here */
printk(KERN_ERR "dpti: reply frame not from pool\n");
- reply = (u8 *)bus_to_virt(m);
+ continue;
}
if (readl(reply) & MSG_FAIL) {
@@ -2138,13 +1889,6 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
adpt_send_nop(pHba, old_m);
}
context = readl(reply+8);
- if(context & 0x40000000){ // IOCTL
- void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
- if( p != NULL) {
- memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
- }
- // All IOCTLs will also be post wait
- }
if(context & 0x80000000){ // Post wait message
status = readl(reply+16);
if(status >> 24){
@@ -2152,16 +1896,14 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
} else {
status = I2O_POST_WAIT_OK;
}
- if(!(context & 0x40000000)) {
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL) {
- printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
- }
+ /*
+ * The request tag is one less than the command tag
+ * as the firmware might treat a 0 tag as invalid
+ */
+ cmd = scsi_host_find_tag(pHba->host,
+ readl(reply + 12) - 1);
+ if(cmd != NULL) {
+ printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
}
adpt_i2o_post_wait_complete(context, status);
} else { // SCSI message
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 42b1e28b5884..87df0c1e0ab2 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -248,7 +248,6 @@ typedef struct _adpt_hba {
void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
u32 FwDebugFlags;
- u32 *ioctl_reply_context[4];
} adpt_hba;
struct sg_simple_element {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 25dae9f0b205..00ddb3fd940f 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2506,6 +2506,7 @@ static int __init fcoe_init(void)
out_free:
mutex_unlock(&fcoe_config_mutex);
+ fcoe_transport_detach(&fcoe_sw_transport);
out_destroy:
destroy_workqueue(fcoe_wq);
return rc;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 2cb7a8c93a15..b3086cf40617 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
error = device_register(&ctlr->dev);
- if (error)
- goto out_del_q2;
+ if (error) {
+ destroy_workqueue(ctlr->devloss_work_q);
+ destroy_workqueue(ctlr->work_q);
+ put_device(&ctlr->dev);
+ return NULL;
+ }
return ctlr;
-out_del_q2:
- destroy_workqueue(ctlr->devloss_work_q);
- ctlr->devloss_work_q = NULL;
out_del_q:
destroy_workqueue(ctlr->work_q);
ctlr->work_q = NULL;
@@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
fcf->selected = new_fcf->selected;
error = device_register(&fcf->dev);
- if (error)
- goto out_del;
+ if (error) {
+ put_device(&fcf->dev);
+ goto out;
+ }
fcf->state = FCOE_FCF_STATE_CONNECTED;
list_add_tail(&fcf->peers, &ctlr->fcfs);
return fcf;
-out_del:
- kfree(fcf);
out:
return NULL;
}
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 13f7d88d6e57..3f2e164b5068 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -67,9 +67,10 @@ int fnic_debugfs_init(void)
fc_trc_flag->fnic_trace = 2;
fc_trc_flag->fc_trace = 3;
fc_trc_flag->fc_clear = 4;
+ return 0;
}
- return 0;
+ return -ENOMEM;
}
/*
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 031aa4043c5e..9de27c7f6b01 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1397,7 +1397,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
device->linkrate = phy->sas_phy.linkrate;
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
- } else
+ } else if (!port->port_attached)
port->id = 0xff;
}
}
@@ -1577,10 +1577,10 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!hisi_hba->hw->soft_reset)
- return -1;
+ return -ENOENT;
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- return -1;
+ return -EPERM;
dev_info(dev, "controller resetting...\n");
hisi_sas_controller_reset_prepare(hisi_hba);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a86aae52d94f..c84d18b23e7b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3365,7 +3365,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
}
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- return -1;
+ return -EPERM;
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 45885e80992f..a8ae573294e5 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -179,6 +179,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
scsi_forget_host(shost);
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
+ scsi_proc_hostdir_rm(shost->hostt);
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_DEL))
@@ -318,9 +319,7 @@ static void scsi_host_dev_release(struct device *dev)
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
- scsi_proc_hostdir_rm(shost->hostt);
-
- /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
rcu_barrier();
if (shost->tmf_work_q)
@@ -519,7 +518,7 @@ EXPORT_SYMBOL(scsi_host_alloc);
static int __scsi_host_match(struct device *dev, const void *data)
{
struct Scsi_Host *p;
- const unsigned short *hostnum = data;
+ const unsigned int *hostnum = data;
p = class_to_shost(dev);
return p->host_no == *hostnum;
@@ -536,7 +535,7 @@ static int __scsi_host_match(struct device *dev, const void *data)
* that scsi_host_get() took. The put_device() below dropped
* the reference from class_find_device().
**/
-struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
+struct Scsi_Host *scsi_host_lookup(unsigned int hostnum)
{
struct device *cdev;
struct Scsi_Host *shost = NULL;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index bac705990a96..e670cce0cb6e 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5810,7 +5810,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
{
struct Scsi_Host *sh;
- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
if (sh == NULL) {
dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;
@@ -8903,7 +8903,7 @@ clean1: /* wq/aer/h */
destroy_workqueue(h->monitor_ctlr_wq);
h->monitor_ctlr_wq = NULL;
}
- kfree(h);
+ hpda_free_ctlr_info(h);
return rc;
}
@@ -9763,7 +9763,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h)
return 0;
free_sas_phy:
- hpsa_free_sas_phy(hpsa_sas_phy);
+ sas_phy_free(hpsa_sas_phy->phy);
+ kfree(hpsa_sas_phy);
free_sas_port:
hpsa_free_sas_port(hpsa_sas_port);
free_sas_node:
@@ -9799,10 +9800,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
if (rc)
- goto free_sas_port;
+ goto free_sas_rphy;
return 0;
+free_sas_rphy:
+ sas_rphy_free(rphy);
free_sas_port:
hpsa_free_sas_port(hpsa_sas_port);
device->sas_port = NULL;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index a163fd9331b3..205ab65c3e28 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1517,23 +1517,22 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
}
/**
- * strip_and_pad_whitespace - Strip and pad trailing whitespace.
- * @i: index into buffer
- * @buf: string to modify
+ * strip_whitespace - Strip and pad trailing whitespace.
+ * @i: size of buffer
+ * @buf: string to modify
*
- * This function will strip all trailing whitespace, pad the end
- * of the string with a single space, and NULL terminate the string.
+ * This function will strip all trailing whitespace and
+ * NUL terminate the string.
*
- * Return value:
- * new length of string
**/
-static int strip_and_pad_whitespace(int i, char *buf)
+static void strip_whitespace(int i, char *buf)
{
+ if (i < 1)
+ return;
+ i--;
while (i && buf[i] == ' ')
i--;
- buf[i+1] = ' ';
- buf[i+2] = '\0';
- return i + 2;
+ buf[i+1] = '\0';
}
/**
@@ -1548,19 +1547,21 @@ static int strip_and_pad_whitespace(int i, char *buf)
static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
struct ipr_vpd *vpd)
{
- char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
- int i = 0;
+ char vendor_id[IPR_VENDOR_ID_LEN + 1];
+ char product_id[IPR_PROD_ID_LEN + 1];
+ char sn[IPR_SERIAL_NUM_LEN + 1];
- memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
- i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
+ memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
+ strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
- memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
- i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
+ memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
+ strip_whitespace(IPR_PROD_ID_LEN, product_id);
- memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
- buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
+ memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
+ strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
- ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
+ ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
+ vendor_id, product_id, sn);
}
/**
@@ -10843,11 +10844,19 @@ static struct notifier_block ipr_notifier = {
**/
static int __init ipr_init(void)
{
+ int rc;
+
ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
register_reboot_notifier(&ipr_notifier);
- return pci_register_driver(&ipr_driver);
+ rc = pci_register_driver(&ipr_driver);
+ if (rc) {
+ unregister_reboot_notifier(&ipr_notifier);
+ return rc;
+ }
+
+ return 0;
}
/**
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 343d24c7e788..591aebb40a0f 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -3398,7 +3398,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
return SCI_FAILURE;
}
- return SCI_SUCCESS;
+ return status;
}
static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b5dd1caae5e9..9320a0a92bb2 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -770,7 +770,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
- struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_session *session;
struct iscsi_conn *conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
@@ -779,6 +779,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
+ session = tcp_sw_host->session;
if (!session)
return -ENOTCONN;
@@ -867,12 +868,14 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
- tcp_sw_host = iscsi_host_priv(shost);
- tcp_sw_host->session = session;
shost->can_queue = session->scsi_cmds_max;
if (iscsi_tcp_r2tpool_alloc(session))
goto remove_session;
+
+ /* We are now fully setup so expose the session to sysfs. */
+ tcp_sw_host = iscsi_host_priv(shost);
+ tcp_sw_host->session = session;
return cls_session;
remove_session:
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index bf2cc9656e19..5e00ee0645f2 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -270,6 +270,11 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
if (!fsp->seq_ptr)
return -EINVAL;
+ if (fsp->state & FC_SRB_ABORT_PENDING) {
+ FC_FCP_DBG(fsp, "abort already pending\n");
+ return -EBUSY;
+ }
+
per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
put_cpu();
@@ -1680,7 +1685,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
- fc_fcp_recovery(fsp, FC_ERROR);
+ fc_fcp_recovery(fsp, FC_TIMED_OUT);
break;
}
fc_fcp_unlock_pkt(fsp);
@@ -1698,11 +1703,12 @@ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
fsp->status_code = code;
fsp->cdb_status = 0;
fsp->io_status = 0;
- /*
- * if this fails then we let the scsi command timer fire and
- * scsi-ml escalate.
- */
- fc_fcp_send_abort(fsp);
+ if (!fsp->cmd)
+ /*
+ * Only abort non-scsi commands; otherwise let the
+ * scsi command timer fire and scsi-ml escalate.
+ */
+ fc_fcp_send_abort(fsp);
}
/**
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 9399e1455d59..97087eef05db 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -238,6 +238,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
}
mutex_lock(&lport->disc.disc_mutex);
lport->ptp_rdata = fc_rport_create(lport, remote_fid);
+ if (!lport->ptp_rdata) {
+ printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
+ lport->port_id);
+ mutex_unlock(&lport->disc.disc_mutex);
+ return;
+ }
kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 9fdb9c9fbda4..173f91ae38f0 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -85,7 +85,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
pr_notice("executing SMP task failed:%d\n", res);
break;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 088b764aefa4..7ce0d94cdc01 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -32,6 +32,7 @@
struct lpfc_sli2_slim;
#define ELX_MODEL_NAME_SIZE 80
+#define ELX_FW_NAME_SIZE 84
#define LPFC_PCI_DEV_LP 0x1
#define LPFC_PCI_DEV_OC 0x2
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index e15bb3dfe995..291fccf02d45 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2046,6 +2046,7 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
int i;
+ size_t bsize;
/* Protect copy from user */
if (!access_ok(buf, nbytes))
@@ -2053,7 +2054,9 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
memset(mybuf, 0, sizeof(mybuf));
- if (copy_from_user(mybuf, buf, nbytes))
+ bsize = min(nbytes, (sizeof(mybuf) - 1));
+
+ if (copy_from_user(mybuf, buf, bsize))
return -EFAULT;
pbuf = &mybuf[0];
@@ -2074,7 +2077,7 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
qp->lock_conflict.wq_access = 0;
}
}
- return nbytes;
+ return bsize;
}
#endif
@@ -2402,8 +2405,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_multixri_pool *multixri_pool;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
/* Protect copy from user */
if (!access_ok(buf, nbytes))
@@ -2487,8 +2490,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
if (!phba->targetport)
return -ENXIO;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2629,8 +2632,8 @@ lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2757,8 +2760,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 63)
- nbytes = 63;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2863,8 +2866,8 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
char *pbuf;
int i, j;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b5cee2a2ac66..f5e509381563 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6537,7 +6537,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -6911,6 +6911,9 @@ out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
@@ -10223,7 +10226,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
goto out_iounmap_all;
} else {
error = -ENOMEM;
- goto out_iounmap_all;
+ goto out_iounmap_ctrl;
}
}
@@ -10241,7 +10244,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
dev_err(&pdev->dev,
"ioremap failed for SLI4 HBA dpp registers.\n");
error = -ENOMEM;
- goto out_iounmap_ctrl;
+ goto out_iounmap_all;
}
phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
}
@@ -10266,9 +10269,11 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
return 0;
out_iounmap_all:
- iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ if (phba->sli4_hba.drbl_regs_memmap_p)
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
out_iounmap_ctrl:
- iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ if (phba->sli4_hba.ctrl_regs_memmap_p)
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
out_iounmap_conf:
iounmap(phba->sli4_hba.conf_regs_memmap_p);
@@ -12522,7 +12527,7 @@ out:
int
lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
{
- uint8_t file_name[ELX_MODEL_NAME_SIZE];
+ char file_name[ELX_FW_NAME_SIZE] = {0};
int ret;
const struct firmware *fw;
@@ -12531,7 +12536,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
LPFC_SLI_INTF_IF_TYPE_2)
return -EPERM;
- snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
+ scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
if (fw_upgrade == INT_FW_UPGRADE) {
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index cbab15d299ca..816235ccd299 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1942,7 +1942,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datasegcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -1950,8 +1950,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgde = NULL; /* s/g data entry */
struct sli4_sge_diseed *diseed = NULL;
dma_addr_t physaddr;
- int i = 0, num_sge = 0, status;
- uint32_t reftag;
+ int i = 0, status;
+ uint32_t reftag, num_sge = 0;
uint8_t txop, rxop;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
@@ -2122,7 +2122,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datacnt, int protcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -2146,8 +2146,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t rc;
#endif
uint32_t checking = 1;
- uint32_t dma_offset = 0;
- int num_sge = 0, j = 2;
+ uint32_t dma_offset = 0, num_sge = 0;
+ int j = 2;
struct sli4_hybrid_sgl *sgl_xtra = NULL;
sgpe = scsi_prot_sglist(sc);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index bd908dd27307..e489c68cfb63 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -20407,20 +20407,20 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
static struct lpfc_io_buf *
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
{
- struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
struct lpfc_io_buf *lpfc_ncmd_next;
unsigned long iflag;
struct lpfc_epd_pool *epd_pool;
epd_pool = &phba->epd_pool;
- lpfc_ncmd = NULL;
spin_lock_irqsave(&epd_pool->lock, iflag);
if (epd_pool->count > 0) {
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ list_for_each_entry_safe(iter, lpfc_ncmd_next,
&epd_pool->list, list) {
- list_del(&lpfc_ncmd->list);
+ list_del(&iter->list);
epd_pool->count--;
+ lpfc_ncmd = iter;
break;
}
}
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8b1ba690039b..8c88190673c2 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1439,6 +1439,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if (cmdid == CMDID_INT_CMDS) {
scb = &adapter->int_scb;
+ cmd = scb->cmd;
list_del_init(&scb->list);
scb->state = SCB_FREE;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index aa62cc8ffd0a..0c6989e32d36 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1515,6 +1515,8 @@ struct megasas_ctrl_info {
#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_SUPPORTED_LD_IDS 240
+
#define MEGASAS_MAX_SECTORS (2*1024)
#define MEGASAS_MAX_SECTORS_IEEE (2*128)
#define MEGASAS_DBG_LVL 1
@@ -2322,7 +2324,7 @@ struct megasas_instance {
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
bool use_seqnum_jbod_fp; /* Added for PD sequence */
bool smp_affinity_enable;
- spinlock_t crashdump_lock;
+ struct mutex crashdump_lock;
struct megasas_register_set __iomem *reg_set;
u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a261ce511e9e..603c99fcb74e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -252,13 +252,13 @@ u32 megasas_readl(struct megasas_instance *instance,
* Fusion registers could intermittently return all zeroes.
* This behavior is transient in nature and subsequent reads will
* return valid value. As a workaround in driver, retry readl for
- * upto three times until a non-zero value is read.
+ * up to thirty times until a non-zero value is read.
*/
if (instance->adapter_type == AERO_SERIES) {
do {
ret_val = readl(addr);
i++;
- } while (ret_val == 0 && i < 3);
+ } while (ret_val == 0 && i < 30);
return ret_val;
} else {
return readl(addr);
@@ -3208,14 +3208,13 @@ fw_crash_buffer_store(struct device *cdev,
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
int val = 0;
- unsigned long flags;
if (kstrtoint(buf, 0, &val) != 0)
return -EINVAL;
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
instance->fw_crash_buffer_offset = val;
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return strlen(buf);
}
@@ -3230,24 +3229,23 @@ fw_crash_buffer_show(struct device *cdev,
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
unsigned long chunk_left_bytes;
unsigned long src_addr;
- unsigned long flags;
u32 buff_offset;
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
buff_offset = instance->fw_crash_buffer_offset;
- if (!instance->crash_dump_buf &&
+ if (!instance->crash_dump_buf ||
!((instance->fw_crash_state == AVAILABLE) ||
(instance->fw_crash_state == COPYING))) {
dev_err(&instance->pdev->dev,
"Firmware crash dump is not available\n");
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return -EINVAL;
}
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
dev_err(&instance->pdev->dev,
"Firmware crash dump offset is out of range\n");
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return 0;
}
@@ -3259,7 +3257,7 @@ fw_crash_buffer_show(struct device *cdev,
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
(buff_offset % dmachunk);
memcpy(buf, (void *)src_addr, size);
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return size;
}
@@ -3284,7 +3282,6 @@ fw_crash_state_store(struct device *cdev,
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
int val = 0;
- unsigned long flags;
if (kstrtoint(buf, 0, &val) != 0)
return -EINVAL;
@@ -3298,9 +3295,9 @@ fw_crash_state_store(struct device *cdev,
instance->fw_crash_state = val;
if ((val == COPIED) || (val == COPY_ERROR)) {
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
megasas_free_host_crash_buffer(instance);
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
if (val == COPY_ERROR)
dev_info(&instance->pdev->dev, "application failed to "
"copy Firmware crash dump\n");
@@ -7301,7 +7298,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
- spin_lock_init(&instance->crashdump_lock);
+ mutex_init(&instance->crashdump_lock);
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->stream_lock);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 8bfb46dbbed3..ff20f4709081 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -359,7 +359,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
ld = MR_TargetIdToLdGet(i, drv_map);
/* For non existing VDs, iterate to next VD*/
- if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
continue;
raid = MR_LdRaidGet(ld, drv_map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index a78a702511fa..890002688bd4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -4659,7 +4659,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
devhandle = megasas_get_tm_devhandle(scmd->device);
if (devhandle == (u16)ULONG_MAX) {
- ret = SUCCESS;
+ ret = FAILED;
sdev_printk(KERN_INFO, scmd->device,
"task abort issued for invalid devhandle\n");
mutex_unlock(&instance->reset_mutex);
@@ -4729,7 +4729,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
devhandle = megasas_get_tm_devhandle(scmd->device);
if (devhandle == (u16)ULONG_MAX) {
- ret = SUCCESS;
+ ret = FAILED;
sdev_printk(KERN_INFO, scmd->device,
"target reset issued for invalid devhandle\n");
mutex_unlock(&instance->reset_mutex);
@@ -5182,7 +5182,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e72f1dbc91f7..04fa7337cb1e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -6040,7 +6040,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
return -EFAULT;
}
- issue_diag_reset:
+ return 0;
+
+issue_diag_reset:
rc = _base_diag_reset(ioc);
return rc;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 97c1f242ef0a..4731e464dfb9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3238,6 +3238,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
@@ -3272,7 +3273,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
- fw_event_work_put(fw_event);
}
}
@@ -11138,8 +11138,10 @@ _mpt3sas_init(void)
mpt3sas_ctl_init(hbas_to_enumerate);
error = pci_register_driver(&mpt3sas_driver);
- if (error)
+ if (error) {
+ mpt3sas_ctl_exit(hbas_to_enumerate);
scsih_exit();
+ }
return error;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 5324662751bf..ebe78ec42da8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -670,7 +670,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
port = sas_port_alloc_num(sas_node->parent_dev);
- if ((sas_port_add(port))) {
+ if (!port || (sas_port_add(port))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
goto out_fail;
@@ -695,6 +695,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
rphy = sas_expander_alloc(port,
mpt3sas_port->remote_identify.device_type);
+ if (!rphy) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_delete_port;
+ }
+
rphy->identify = mpt3sas_port->remote_identify;
if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -712,6 +718,9 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
if ((sas_rphy_add(rphy))) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
+ sas_rphy_free(rphy);
+ rphy = NULL;
+ goto out_delete_port;
}
if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -739,7 +748,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
rphy_to_expander_device(rphy));
return mpt3sas_port;
- out_fail:
+out_delete_port:
+ sas_port_delete(port);
+
+out_fail:
list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
port_siblings)
list_del(&mpt3sas_phy->port_siblings);
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index fec653b54307..cba30618e8fe 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4173,7 +4173,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
void *pMsg1 = NULL;
- u8 uninitialized_var(bc);
+ u8 bc;
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index ce67965a504f..bdc6812acf98 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3813,7 +3813,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
void *pMsg1 = NULL;
- u8 uninitialized_var(bc);
+ u8 bc;
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
u32 regval;
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index d979f095aeda..73e5756cade6 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -60,6 +60,8 @@ extern uint qedf_debug;
#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */
#define QEDF_LOG_WARN 0x80000000 /* Warning logs */
+#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE)
+
/* Debug context structure */
struct qedf_dbg_ctx {
unsigned int host_no;
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index b88bed9bb133..b0a28a6a9c64 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include "qedf.h"
#include "qedf_dbg.h"
@@ -100,7 +101,9 @@ static ssize_t
qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
+ ssize_t ret;
size_t cnt = 0;
+ char *cbuf;
int id;
struct qedf_fastpath *fp = NULL;
struct qedf_dbg_ctx *qedf_dbg =
@@ -110,19 +113,25 @@ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
- cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
+ cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN);
+ if (!cbuf)
+ return 0;
+
+ cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n");
for (id = 0; id < qedf->num_queues; id++) {
fp = &(qedf->fp_array[id]);
if (fp->sb_id == QEDF_SB_ID_NULL)
continue;
- cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
- fp->completions);
+ cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt,
+ "#%d: %lu\n", id, fp->completions);
}
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
+
+ vfree(cbuf);
+
+ return ret;
}
static ssize_t
@@ -140,15 +149,14 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
int cnt;
+ char cbuf[32];
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
- cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
+ cnt = scnprintf(cbuf, sizeof(cbuf), "debug mask = 0x%x\n", qedf_debug);
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
}
static ssize_t
@@ -187,18 +195,17 @@ qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int cnt;
+ char cbuf[7];
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
- cnt = sprintf(buffer, "%s\n",
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n",
qedf->stop_io_on_error ? "true" : "false");
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
}
static ssize_t
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index c95e04cc6424..858058f22819 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2914,9 +2914,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
* addresses of our queues
*/
if (!qedf->p_cpuq) {
- status = -EINVAL;
QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
- goto mem_alloc_failure;
+ return -EINVAL;
}
qedf->global_queues = kzalloc((sizeof(struct global_queue *)
@@ -3544,11 +3543,6 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
- if (qedf) {
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
- clear_bit(QEDF_PROBING, &qedf->flags);
- }
return rc;
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 92c4a367b7bd..6b47921202eb 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1910,8 +1910,9 @@ static int qedi_cpu_offline(unsigned int cpu)
struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct qedi_work *work, *tmp;
struct task_struct *thread;
+ unsigned long flags;
- spin_lock_bh(&p->p_work_lock);
+ spin_lock_irqsave(&p->p_work_lock, flags);
thread = p->iothread;
p->iothread = NULL;
@@ -1922,7 +1923,7 @@ static int qedi_cpu_offline(unsigned int cpu)
kfree(work);
}
- spin_unlock_bh(&p->p_work_lock);
+ spin_unlock_irqrestore(&p->p_work_lock, flags);
if (thread)
kthread_stop(thread);
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 59f5dc9876cc..2c295e1c9c9a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2574,6 +2574,7 @@ static void
qla2x00_terminate_rport_io(struct fc_rport *rport)
{
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ scsi_qla_host_t *vha;
if (!fcport)
return;
@@ -2583,9 +2584,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
return;
+ vha = fcport->vha;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 0, WAIT_TARGET);
return;
}
/*
@@ -2600,6 +2604,15 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
else
qla2x00_port_logout(fcport->vha, fcport);
}
+
+ /* check for any straggling io left behind */
+ if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) {
+ ql_log(ql_log_warn, vha, 0x300b,
+ "IO not return. Resetting. \n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ }
}
static int
@@ -2851,8 +2864,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index ce55121910e8..e584d39dc982 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -259,6 +259,10 @@ qla2x00_process_els(struct bsg_job *bsg_job)
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
rport = fc_bsg_to_rport(bsg_job);
+ if (!rport) {
+ rval = -ENOMEM;
+ goto done;
+ }
fcport = *(fc_port_t **) rport->dd_data;
host = rport_to_shost(rport);
vha = shost_priv(host);
@@ -2526,6 +2530,8 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
rport = fc_bsg_to_rport(bsg_job);
+ if (!rport)
+ return ret;
host = rport_to_shost(rport);
vha = shost_priv(host);
} else {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 88a56e8480f7..f5a30c2fcee8 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -22,7 +22,7 @@
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
- * | | | 0x302d,0x3033 |
+ * | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index caf28c5281aa..bfddae586995 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -593,13 +593,9 @@ typedef struct srb {
uint8_t pad[3];
struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv;
- wait_queue_head_t nvme_ls_waitq;
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
- unsigned int abort:1;
- unsigned int aborted:1;
- unsigned int completed:1;
uint32_t handle;
uint16_t flags;
@@ -3899,8 +3895,8 @@ struct qla_hw_data {
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_BIDI_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1a98e37c9be2..6d78818d5269 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -397,7 +397,8 @@ extern int
qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map,
+ u8 *num_entries);
extern int
qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a6e24cef89a0..8a0ac87f70a9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -485,6 +485,7 @@ static
void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
struct fc_port *fcport = ea->fcport;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
"%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
@@ -499,9 +500,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
__func__, ea->fcport->port_name);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
/* deleted = 0 & logout_on_delete = force fw cleanup */
- fcport->deleted = 0;
+ if (fcport->deleted == QLA_SESS_DELETED)
+ fcport->deleted = 0;
+
fcport->logout_on_delete = 1;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
qlt_schedule_sess_for_deletion(ea->fcport);
return;
}
@@ -1402,7 +1409,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
- ea->fcport->deleted = 0;
ea->fcport->logout_on_delete = 1;
if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
@@ -4176,15 +4182,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
+ QLA_FW_STARTED(ha);
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
+ QLA_FW_STOPPED(ha);
ql_log(ql_log_fatal, vha, 0x00d2,
"Init Firmware **** FAILED ****.\n");
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
- QLA_FW_STARTED(ha);
vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
}
@@ -5068,6 +5075,22 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
return (rval);
}
+static void
+qla_reinitialize_link(scsi_qla_host_t *vha)
+{
+ int rval;
+
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ rval = qla2x00_full_login_lip(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xd051,
+ "Link reinitialization failed (%d)\n", rval);
+ }
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5132,6 +5155,19 @@ skip_login:
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ u8 loop_map_entries = 0;
+ int rc;
+
+ rc = qla2x00_get_fcal_position_map(vha, NULL,
+ &loop_map_entries);
+ if (rc == QLA_SUCCESS && loop_map_entries > 1) {
+ /*
+ * There are devices that are still not logged
+ * in. Reinitialize to give them a chance.
+ */
+ qla_reinitialize_link(vha);
+ return QLA_FUNCTION_FAILED;
+ }
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5388,8 +5424,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
if (atomic_read(&fcport->state) == FCS_ONLINE)
return;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
@@ -5447,6 +5481,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
+ unsigned long flags;
+
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
@@ -5456,8 +5492,15 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
fcport->deleted = 0;
- fcport->logout_on_delete = 1;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->hw->current_topology == ISP_CFG_NL)
+ fcport->logout_on_delete = 0;
+ else
+ fcport->logout_on_delete = 1;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
switch (vha->hw->current_topology) {
@@ -5485,7 +5528,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_reg_remote_port(vha, fcport);
break;
case MODE_TARGET:
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (!vha->vha_tgt.qla_tgt->tgt_stop &&
!vha->vha_tgt.qla_tgt->tgt_stopped)
qlt_fc_port_added(vha, fcport);
@@ -5500,6 +5542,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
if (fcport->id_changed) {
fcport->id_changed = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 477b0b8a5f4b..c54b987d1f11 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -110,11 +110,13 @@ qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
{
int old_val;
uint8_t shiftbits, mask;
+ uint8_t port_dstate_str_sz;
/* This will have to change when the max no. of states > 16 */
shiftbits = 4;
mask = (1 << shiftbits) - 1;
+ port_dstate_str_sz = sizeof(port_dstate_str) / sizeof(char *);
fcport->disc_state = state;
while (1) {
old_val = atomic_read(&fcport->shadow_disc_state);
@@ -122,7 +124,8 @@ qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
old_val, (old_val << shiftbits) | state)) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
"FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
- fcport->port_name, port_dstate_str[old_val & mask],
+ fcport->port_name, (old_val & mask) < port_dstate_str_sz ?
+ port_dstate_str[old_val & mask] : "Unknown",
port_dstate_str[state], fcport->d_id.b24);
return;
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 103288b0377e..716f46a67bcd 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -601,7 +601,8 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
/* No data transfer */
- if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE ||
+ tot_dsds == 0) {
cmd_pkt->byte_count = cpu_to_le32(0);
return 0;
}
@@ -3665,7 +3666,7 @@ qla2x00_start_sp(srb_t *sp)
spin_lock_irqsave(qp->qp_lock_ptr, flags);
pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
- rval = EAGAIN;
+ rval = -EAGAIN;
ql_log(ql_log_warn, vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n");
goto done;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d7cf7f957087..c5021bd1ad5e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -639,8 +639,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
unsigned long flags;
fc_port_t *fcport = NULL;
- if (!vha->hw->flags.fw_started)
+ if (!vha->hw->flags.fw_started) {
+ ql_log(ql_log_warn, vha, 0x50ff,
+ "Dropping AEN - %04x %04x %04x %04x.\n",
+ mb[0], mb[1], mb[2], mb[3]);
return;
+ }
/* Setup to process RIO completion. */
handle_cnt = 0;
@@ -2475,11 +2479,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
- if (sp->abort)
- sp->aborted = 1;
- else
- sp->completed = 1;
-
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
ql_dbg(ql_dbg_io, vha, 0x3015,
@@ -2711,7 +2710,6 @@ check_scsi_status:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
case CS_PORT_UNAVAILABLE:
- case CS_TIMEOUT:
case CS_RESET:
/*
@@ -3568,16 +3566,12 @@ msix_register_fail:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
- } else
- if (ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
+ if (IS_MQUE_CAPABLE(ha) &&
+ (ha->msixbase && ha->mqiobase && ha->max_qpairs))
+ ha->mqenable = 1;
+ else
+ ha->mqenable = 0;
+
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 29f2730fbf66..77abecf88aa2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -271,6 +271,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117a,
+ "cmd=%x Timeout.\n", command);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
if (chip_reset != ha->chip_reset) {
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
@@ -281,12 +287,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_ABORTED;
goto premature_exit;
}
- ql_dbg(ql_dbg_mbx, vha, 0x117a,
- "cmd=%x Timeout.\n", command);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -2928,7 +2928,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
* Kernel context.
*/
int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
+ u8 *num_entries)
{
int rval;
mbx_cmd_t mc;
@@ -2968,6 +2969,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+ if (num_entries)
+ *num_entries = pmap[0];
}
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index a15af048cd82..11ee87f76f4b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -36,11 +36,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
- return 0;
-
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
@@ -157,18 +152,6 @@ out:
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
-static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
-{
- if (sp->flags & SRB_DMA_VALID) {
- struct srb_iocb *nvme = &sp->u.iocb_cmd;
- struct qla_hw_data *ha = sp->fcport->vha->hw;
-
- dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
- fd->rqstlen, DMA_TO_DEVICE);
- sp->flags &= ~SRB_DMA_VALID;
- }
-}
-
static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
@@ -186,7 +169,6 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
fd = priv->fd;
- qla_nvme_ls_unmap(sp, fd);
fd->done(fd, priv->comp_status);
out:
qla2x00_rel_sp(sp);
@@ -328,21 +310,16 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
nvme->u.nvme.rsp_len = fd->rsplen;
nvme->u.nvme.rsp_dma = fd->rspdma;
nvme->u.nvme.timeout_sec = fd->timeout;
- nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
- fd->rqstlen, DMA_TO_DEVICE);
+ nvme->u.nvme.cmd_dma = fd->rqstdma;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
- sp->flags |= SRB_DMA_VALID;
-
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- wake_up(&sp->nvme_ls_waitq);
sp->priv = NULL;
priv->sp = NULL;
- qla_nvme_ls_unmap(sp, fd);
qla2x00_rel_sp(sp);
return rval;
}
@@ -585,7 +562,6 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (!sp)
return -EBUSY;
- init_waitqueue_head(&sp->nvme_ls_waitq);
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
sp->priv = (void *)priv;
@@ -601,9 +577,8 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
rval = qla2x00_start_nvme_mq(sp);
if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x212d,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
- wake_up(&sp->nvme_ls_waitq);
sp->priv = NULL;
priv->sp = NULL;
qla2xxx_rel_qpair_sp(sp->qpair, sp);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a5dbaa3491f8..6da85ad96c9b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -814,7 +814,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
uint16_t hwq;
struct qla_qpair *qpair = NULL;
- tag = blk_mq_unique_tag(cmd->request);
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq = blk_mq_unique_tag_to_hwq(tag);
qpair = ha->queue_pair_map[hwq];
@@ -1243,17 +1243,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- if (sp->completed) {
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return SUCCESS;
- }
-
- if (sp->abort || sp->aborted) {
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return FAILED;
- }
-
- sp->abort = 1;
sp->comp = &comp;
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -1661,6 +1650,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
unsigned long *flags)
__releases(qp->qp_lock_ptr)
@@ -1669,6 +1662,7 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
DECLARE_COMPLETION_ONSTACK(comp);
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
int rval;
bool ret_cmd;
uint32_t ratov_j;
@@ -1688,7 +1682,6 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
sp->comp = &comp;
- sp->abort = 1;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
rval = ha->isp_ops->abort_command(sp);
@@ -1712,13 +1705,25 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- if (ret_cmd && (!sp->completed || !sp->aborted))
- sp->done(sp, res);
+ switch (sp->type) {
+ case SRB_SCSI_CMD:
+ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
+ sp->done(sp, res);
+ break;
+ default:
+ if (ret_cmd)
+ sp->done(sp, res);
+ break;
+ }
} else {
sp->done(sp, res);
}
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
@@ -1738,6 +1743,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
+ /*
+ * perform lockless completion during driver unload
+ */
+ if (qla2x00_chip_is_down(vha)) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ sp->done(sp, res);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ continue;
+ }
+
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1765,6 +1781,10 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{
@@ -3101,6 +3121,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
+
+ if (ql2xenabledif && ql2xenabledif != 2) {
+ ql_log(ql_log_warn, base_vha, 0x302d,
+ "Invalid value for ql2xenabledif, resetting it to default (2)\n");
+ ql2xenabledif = 2;
+ }
+
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
@@ -3332,8 +3359,6 @@ skip_dpc:
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
if (ql2xprotmask)
scsi_host_set_prot(host, ql2xprotmask);
else
@@ -4820,7 +4845,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
}
INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
- sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu",
+ QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
vha->host, vha->hw, vha,
@@ -4950,7 +4976,7 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
switch (code) {
case QLA_UEVENT_CODE_FW_DUMP:
- snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
vha->host_no);
break;
default:
@@ -6361,9 +6387,12 @@ qla2x00_do_dpc(void *data)
}
}
loop_resync_check:
- if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
+ if (!qla2x00_reset_active(base_vha) &&
+ test_and_clear_bit(LOOP_RESYNC_NEEDED,
&base_vha->dpc_flags)) {
-
+ /*
+ * Allow abort_isp to complete before moving on to scanning.
+ */
ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
"Loop resync scheduled.\n");
@@ -6607,7 +6636,7 @@ qla2x00_timer(struct timer_list *t)
/* if the loop has been down for 4 minutes, reinit adapter */
if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
- if (!(vha->device_flags & DFLG_NO_CABLE)) {
+ if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
ql_log(ql_log_warn, vha, 0x6009,
"Loop down - aborting ISP.\n");
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index cb97565b6a33..a95ea2f70f97 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1046,10 +1046,6 @@ void qlt_free_session_done(struct work_struct *work)
(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
}
- spin_lock_irqsave(&vha->work_lock, flags);
- sess->flags &= ~FCF_ASYNC_SENT;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->se_sess) {
sess->se_sess = NULL;
@@ -1059,7 +1055,6 @@ void qlt_free_session_done(struct work_struct *work)
qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
- sess->deleted = QLA_SESS_DELETED;
if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
vha->fcport_count--;
@@ -1111,7 +1106,12 @@ void qlt_free_session_done(struct work_struct *work)
sess->explicit_logout = 0;
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ sess->flags &= ~FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETED;
sess->free_pending = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
@@ -1161,12 +1161,12 @@ void qlt_unreg_sess(struct fc_port *sess)
* management from being sent.
*/
sess->flags |= FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index df43cf6405a8..ea15bbe0397f 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -939,6 +939,11 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
memset(&chap_rec, 0, sizeof(chap_rec));
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*param_info)) {
+ rc = -EINVAL;
+ goto exit_set_chap;
+ }
+
param_info = nla_data(attr);
switch (param_info->param) {
@@ -2723,6 +2728,11 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
}
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*iface_param)) {
+ rval = -EINVAL;
+ goto exit_init_fw_cb;
+ }
+
iface_param = nla_data(attr);
if (iface_param->param_type == ISCSI_NET_PARAM) {
@@ -8093,6 +8103,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*fnode_param)) {
+ rc = -EINVAL;
+ goto exit_set_param;
+ }
+
fnode_param = nla_data(attr);
switch (fnode_param->param) {
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 898a0bdf8df6..95a86e0dfd77 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -209,53 +209,6 @@ raid_attr_ro_state(level);
raid_attr_ro_fn(resync);
raid_attr_ro_state_fn(state);
-static void raid_component_release(struct device *dev)
-{
- struct raid_component *rc =
- container_of(dev, struct raid_component, dev);
- dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
- put_device(rc->dev.parent);
- kfree(rc);
-}
-
-int raid_component_add(struct raid_template *r,struct device *raid_dev,
- struct device *component_dev)
-{
- struct device *cdev =
- attribute_container_find_class_device(&r->raid_attrs.ac,
- raid_dev);
- struct raid_component *rc;
- struct raid_data *rd = dev_get_drvdata(cdev);
- int err;
-
- rc = kzalloc(sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&rc->node);
- device_initialize(&rc->dev);
- rc->dev.release = raid_component_release;
- rc->dev.parent = get_device(component_dev);
- rc->num = rd->component_count++;
-
- dev_set_name(&rc->dev, "component-%d", rc->num);
- list_add_tail(&rc->node, &rd->component_list);
- rc->dev.class = &raid_class.class;
- err = device_add(&rc->dev);
- if (err)
- goto err_out;
-
- return 0;
-
-err_out:
- list_del(&rc->node);
- rd->component_count--;
- put_device(component_dev);
- kfree(rc);
- return err;
-}
-EXPORT_SYMBOL(raid_component_add);
-
struct raid_template *
raid_class_attach(struct raid_function_template *ft)
{
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1ce3f90f782f..2921256b59a0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -331,11 +331,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
if (result)
return -EIO;
- /* Sanity check that we got the page back that we asked for */
+ /*
+ * Sanity check that we got the page back that we asked for and that
+ * the page size is not 0.
+ */
if (buffer[1] != page)
return -EIO;
- return get_unaligned_be16(&buffer[2]) + 4;
+ result = get_unaligned_be16(&buffer[2]);
+ if (!result)
+ return -EIO;
+
+ return result + 4;
}
/**
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 408166bd20f3..2c86ed1dc4b5 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3139,7 +3139,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
return illegal_condition_result;
}
- lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
+ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
if (lrdp == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
if (sdebug_verbose)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 7f76bf5cc8a8..e920a6a79594 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -232,6 +232,7 @@ static struct {
{"SGI", "RAID5", "*", BLIST_SPARSELUN},
{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 98e363d0025b..8d05faf95ac3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1534,6 +1534,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
"queuecommand : device blocked\n"));
+ atomic_dec(&cmd->device->iorequest_cnt);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
@@ -1566,6 +1567,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
trace_scsi_dispatch_cmd_start(cmd);
rtn = host->hostt->queuecommand(host, cmd);
if (rtn) {
+ atomic_dec(&cmd->device->iorequest_cnt);
trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
@@ -1645,12 +1647,7 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
- if (scsi_dev_queue_ready(q, sdev))
- return true;
-
- if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
- blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
- return false;
+ return scsi_dev_queue_ready(q, sdev);
}
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 5b313226f11c..0330890cc55d 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
size_t length, loff_t *ppos)
{
int host, channel, id, lun;
- char *buffer, *p;
+ char *buffer, *end, *p;
int err;
if (!buf || length > PAGE_SIZE)
@@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
goto out;
err = -EINVAL;
- if (length < PAGE_SIZE)
- buffer[length] = '\0';
- else if (buffer[PAGE_SIZE-1])
- goto out;
+ if (length < PAGE_SIZE) {
+ end = buffer + length;
+ *end = '\0';
+ } else {
+ end = buffer + PAGE_SIZE - 1;
+ if (*end)
+ goto out;
+ }
/*
* Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
@@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
if (!strncmp("scsi add-single-device", buffer, 22)) {
p = buffer + 23;
- host = simple_strtoul(p, &p, 0);
- channel = simple_strtoul(p + 1, &p, 0);
- id = simple_strtoul(p + 1, &p, 0);
- lun = simple_strtoul(p + 1, &p, 0);
+ host = (p < end) ? simple_strtoul(p, &p, 0) : 0;
+ channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
err = scsi_add_single_device(host, channel, id, lun);
@@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
} else if (!strncmp("scsi remove-single-device", buffer, 25)) {
p = buffer + 26;
- host = simple_strtoul(p, &p, 0);
- channel = simple_strtoul(p + 1, &p, 0);
- id = simple_strtoul(p + 1, &p, 0);
- lun = simple_strtoul(p + 1, &p, 0);
+ host = (p < end) ? simple_strtoul(p, &p, 0) : 0;
+ channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
err = scsi_remove_single_device(host, channel, id, lun);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 3fd109fd9335..d236322ced30 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1130,8 +1130,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* that no LUN is present, so don't add sdev in these cases.
* Two specific examples are:
* 1) NetApp targets: return PQ=1, PDT=0x1f
- * 2) IBM/2145 targets: return PQ=1, PDT=0
- * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+ * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
* in the UFI 1.0 spec (we cannot rely on reserved bits).
*
* References:
@@ -1145,8 +1144,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* PDT=00h Direct-access device (floppy)
* PDT=1Fh none (no FDD connected to the requested logical unit)
*/
- if (((result[0] >> 5) == 1 ||
- (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
+ if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
+ (result[0] & 0x1f) == 0x1f &&
!scsi_is_wlun(lun)) {
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: peripheral device type"
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6faf1d6451b0..530b14685fd7 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -795,6 +795,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
}
mutex_lock(&sdev->state_mutex);
+ switch (sdev->sdev_state) {
+ case SDEV_RUNNING:
+ case SDEV_OFFLINE:
+ break;
+ default:
+ mutex_unlock(&sdev->state_mutex);
+ return -EINVAL;
+ }
if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
ret = 0;
} else {
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f6cce0befa7d..51f53638629c 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2767,6 +2767,10 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
if (!conn || !session)
return -EINVAL;
+ /* data will be regarded as NULL-ended string, do length check */
+ if (strlen(data) > ev->u.set_param.len)
+ return -EINVAL;
+
switch (ev->u.set_param.param) {
case ISCSI_PARAM_SESS_RECOVERY_TMO:
sscanf(data, "%d", &value);
@@ -2919,6 +2923,10 @@ iscsi_set_host_param(struct iscsi_transport *transport,
return -ENODEV;
}
+ /* see similar check in iscsi_if_set_param() */
+ if (strlen(data) > ev->u.set_host_param.len)
+ return -EINVAL;
+
err = transport->set_host_param(shost, ev->u.set_host_param.param,
data, ev->u.set_host_param.len);
scsi_host_put(shost);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0a1734f34587..6a1428d453f3 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -433,8 +433,8 @@ int ses_match_host(struct enclosure_device *edev, void *data)
}
#endif /* 0 */
-static void ses_process_descriptor(struct enclosure_component *ecomp,
- unsigned char *desc)
+static int ses_process_descriptor(struct enclosure_component *ecomp,
+ unsigned char *desc, int max_desc_len)
{
int eip = desc[0] & 0x10;
int invalid = desc[0] & 0x80;
@@ -445,22 +445,32 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
unsigned char *d;
if (invalid)
- return;
+ return 0;
switch (proto) {
case SCSI_PROTOCOL_FCP:
if (eip) {
+ if (max_desc_len <= 7)
+ return 1;
d = desc + 4;
slot = d[3];
}
break;
case SCSI_PROTOCOL_SAS:
+
if (eip) {
+ if (max_desc_len <= 27)
+ return 1;
d = desc + 4;
slot = d[3];
d = desc + 8;
- } else
+ } else {
+ if (max_desc_len <= 23)
+ return 1;
d = desc + 4;
+ }
+
+
/* only take the phy0 addr */
addr = (u64)d[12] << 56 |
(u64)d[13] << 48 |
@@ -477,6 +487,8 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
}
ecomp->slot = slot;
scomp->addr = addr;
+
+ return 0;
}
struct efd {
@@ -491,9 +503,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
int i;
struct ses_component *scomp;
- if (!edev->component[0].scratch)
- return 0;
-
for (i = 0; i < edev->components; i++) {
scomp = edev->component[i].scratch;
if (scomp->addr != efd->addr)
@@ -549,7 +558,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
/* skip past overall descriptor */
desc_ptr += len + 4;
}
- if (ses_dev->page10)
+ if (ses_dev->page10 && ses_dev->page10_len > 9)
addl_desc_ptr = ses_dev->page10 + 8;
type_ptr = ses_dev->page1_types;
components = 0;
@@ -557,17 +566,22 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
for (j = 0; j < type_ptr[1]; j++) {
char *name = NULL;
struct enclosure_component *ecomp;
+ int max_desc_len;
if (desc_ptr) {
- if (desc_ptr >= buf + page7_len) {
+ if (desc_ptr + 3 >= buf + page7_len) {
desc_ptr = NULL;
} else {
len = (desc_ptr[2] << 8) + desc_ptr[3];
desc_ptr += 4;
- /* Add trailing zero - pushes into
- * reserved space */
- desc_ptr[len] = '\0';
- name = desc_ptr;
+ if (desc_ptr + len > buf + page7_len)
+ desc_ptr = NULL;
+ else {
+ /* Add trailing zero - pushes into
+ * reserved space */
+ desc_ptr[len] = '\0';
+ name = desc_ptr;
+ }
}
}
if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
@@ -579,14 +593,20 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
components++,
type_ptr[0],
name);
- else
+ else if (components < edev->components)
ecomp = &edev->component[components++];
+ else
+ ecomp = ERR_PTR(-EINVAL);
if (!IS_ERR(ecomp)) {
- if (addl_desc_ptr)
- ses_process_descriptor(
- ecomp,
- addl_desc_ptr);
+ if (addl_desc_ptr) {
+ max_desc_len = ses_dev->page10_len -
+ (addl_desc_ptr - ses_dev->page10);
+ if (ses_process_descriptor(ecomp,
+ addl_desc_ptr,
+ max_desc_len))
+ addl_desc_ptr = NULL;
+ }
if (create)
enclosure_component_register(
ecomp);
@@ -603,9 +623,11 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
/* these elements are optional */
type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
- type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
+ type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) {
addl_desc_ptr += addl_desc_ptr[1] + 2;
-
+ if (addl_desc_ptr + 1 >= ses_dev->page10 + ses_dev->page10_len)
+ addl_desc_ptr = NULL;
+ }
}
}
kfree(buf);
@@ -704,6 +726,7 @@ static int ses_intf_add(struct device *cdev,
type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
components += type_ptr[1];
}
+
ses_dev->page1 = buf;
ses_dev->page1_len = len;
buf = NULL;
@@ -745,9 +768,11 @@ static int ses_intf_add(struct device *cdev,
buf = NULL;
}
page2_not_supported:
- scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
- if (!scomp)
- goto err_free;
+ if (components > 0) {
+ scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
+ if (!scomp)
+ goto err_free;
+ }
edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
components, &ses_enclosure_callbacks);
@@ -827,7 +852,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
kfree(ses_dev->page2);
kfree(ses_dev);
- kfree(edev->component[0].scratch);
+ if (edev->components)
+ kfree(edev->component[0].scratch);
put_device(&edev->edev);
enclosure_unregister(edev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9c6bf13daaee..cc836610e21e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -190,7 +190,7 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
-static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
@@ -412,6 +412,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
+ bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr = NULL;
int retval = 0;
@@ -459,25 +460,19 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
} else
req_pack_id = old_hdr->pack_id;
}
- srp = sg_get_rq_mark(sfp, req_pack_id);
+ srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching)) {
- retval = -ENODEV;
- goto free_old_hdr;
- }
if (filp->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto free_old_hdr;
}
retval = wait_event_interruptible(sfp->read_wait,
- (atomic_read(&sdp->detaching) ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching)) {
- retval = -ENODEV;
- goto free_old_hdr;
- }
- if (retval) {
- /* -ERESTARTSYS as signal hit process */
+ ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
+ (!busy && atomic_read(&sdp->detaching))));
+ if (!srp) {
+ /* signal or detaching */
+ if (!retval)
+ retval = -ENODEV;
goto free_old_hdr;
}
}
@@ -928,9 +923,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
+ srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
@@ -2074,19 +2067,28 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
}
static Sg_request *
-sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
+ *busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
- /* look for requests that are ready + not SG_IO owned */
- if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ /* look for requests that are not SG_IO owned */
+ if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
- resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ switch (resp->done) {
+ case 0: /* request active */
+ *busy = true;
+ break;
+ case 1: /* request done; response ready to return */
+ resp->done = 2; /* guard against other readers */
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+ case 2: /* response already being returned */
+ break;
+ }
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2140,6 +2142,15 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ /*
+ * If the device is detaching, wakeup any readers in case we just
+ * removed the last response, which would leave nothing for them to
+ * return other than -ENODEV.
+ */
+ if (unlikely(atomic_read(&sfp->parentdp->detaching)))
+ wake_up_interruptible_all(&sfp->read_wait);
+
return res;
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 80ff00025c03..540d6eb2cc48 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -5018,10 +5018,10 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
}
switch (scmd->sc_data_direction) {
- case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
request->data_direction = SOP_READ_FLAG;
break;
- case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
request->data_direction = SOP_WRITE_FLAG;
break;
case DMA_NONE:
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index e9ccfb97773f..e362453e8d26 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -318,7 +318,10 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
ret);
put_device(&snic->shost->shost_gendev);
- kfree(tgt);
+ spin_lock_irqsave(snic->shost->host_lock, flags);
+ list_del(&tgt->list);
+ spin_unlock_irqrestore(snic->shost->host_lock, flags);
+ put_device(&tgt->dev);
tgt = NULL;
return tgt;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 33287b6bdf0e..20b6556e5dcb 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -109,7 +109,9 @@ enum {
TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
TASK_ATTRIBUTE_ORDERED = 0x2,
TASK_ATTRIBUTE_ACA = 0x4,
+};
+enum {
SS_STS_NORMAL = 0x80000000,
SS_STS_DONE = 0x40000000,
SS_STS_HANDSHAKE = 0x20000000,
@@ -121,7 +123,9 @@ enum {
SS_I2H_REQUEST_RESET = 0x2000,
SS_MU_OPERATIONAL = 0x80000000,
+};
+enum {
STEX_CDB_LENGTH = 16,
STATUS_VAR_LEN = 128,
@@ -668,16 +672,17 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
return 0;
case PASSTHRU_CMD:
if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
- struct st_drvver ver;
+ const struct st_drvver ver = {
+ .major = ST_VER_MAJOR,
+ .minor = ST_VER_MINOR,
+ .oem = ST_OEM,
+ .build = ST_BUILD_VER,
+ .signature[0] = PASSTHRU_SIGNATURE,
+ .console_id = host->max_id - 1,
+ .host_no = hba->host->host_no,
+ };
size_t cp_len = sizeof(ver);
- ver.major = ST_VER_MAJOR;
- ver.minor = ST_VER_MINOR;
- ver.oem = ST_OEM;
- ver.build = ST_BUILD_VER;
- ver.signature[0] = PASSTHRU_SIGNATURE;
- ver.console_id = host->max_id - 1;
- ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
cmd->result = sizeof(ver) == cp_len ?
DID_OK << 16 | COMMAND_COMPLETE << 8 :
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 5087ed6afbdc..44f4e10f9bf9 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1423,6 +1423,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
{
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
+ /* storvsc devices don't support MAINTENANCE_IN SCSI cmd */
+ sdevice->no_report_opcodes = 1;
sdevice->no_write_same = 1;
/*
@@ -1526,10 +1528,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
*/
static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
{
-#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
- if (scmnd->device->host->transportt == fc_transport_template)
- return fc_eh_timed_out(scmnd);
-#endif
return BLK_EH_RESET_TIMER;
}
@@ -1846,7 +1844,7 @@ static int storvsc_probe(struct hv_device *device,
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
if (!host_dev->handle_error_wq)
goto err_out2;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 670f4c7934f8..363adf489079 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3265,7 +3265,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
*/
ret = utf16s_to_utf8s(uc_str->uc,
uc_str->len - QUERY_DESC_HDR_SIZE,
- UTF16_BIG_ENDIAN, str, ascii_len);
+ UTF16_BIG_ENDIAN, str, ascii_len - 1);
/* replace non-printable or non-ASCII characters with spaces */
for (i = 0; i < ret; i++)
@@ -8530,5 +8530,6 @@ EXPORT_SYMBOL_GPL(ufshcd_init);
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
MODULE_LICENSE("GPL");
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index f8c08fb9891d..e0ffef6e9386 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -835,6 +835,8 @@ static struct siox_device *siox_device_add(struct siox_master *smaster,
err_device_register:
/* don't care to make the buffer smaller again */
+ put_device(&sdevice->dev);
+ sdevice = NULL;
err_buf_alloc:
siox_master_unlock(smaster);
diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c
index 75f87b3d8b95..73a2aa362957 100644
--- a/drivers/slimbus/stream.c
+++ b/drivers/slimbus/stream.c
@@ -67,10 +67,10 @@ static const int slim_presence_rate_table[] = {
384000,
768000,
0, /* Reserved */
- 110250,
- 220500,
- 441000,
- 882000,
+ 11025,
+ 22050,
+ 44100,
+ 88200,
176400,
352800,
705600,
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c
index 78f0f1aeca57..92125dd65f33 100644
--- a/drivers/soc/amlogic/meson-mx-socinfo.c
+++ b/drivers/soc/amlogic/meson-mx-socinfo.c
@@ -126,6 +126,7 @@ static int __init meson_mx_socinfo_init(void)
np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids);
if (np) {
analog_top_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(analog_top_regmap))
return PTR_ERR(analog_top_regmap);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index c6ec7d95bcfc..722fd54e537c 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -681,13 +681,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
const struct of_device_id *of_id = NULL;
struct device_node *dn;
void __iomem *base;
- int ret, i;
+ int ret, i, s;
/* AON ctrl registers */
base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
if (IS_ERR(base)) {
pr_err("error mapping AON_CTRL\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto aon_err;
}
ctrl.aon_ctrl_base = base;
@@ -697,8 +698,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
/* Assume standard offset */
ctrl.aon_sram = ctrl.aon_ctrl_base +
AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ s = 0;
} else {
ctrl.aon_sram = base;
+ s = 1;
}
writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
@@ -708,7 +711,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
(const void **)&ddr_phy_data);
if (IS_ERR(base)) {
pr_err("error mapping DDR PHY\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_phy_err;
}
ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
@@ -728,17 +732,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, ddr_shimphy_dt_ids) {
i = ctrl.num_memc;
if (i >= MAX_NUM_MEMC) {
+ of_node_put(dn);
pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
break;
}
base = of_io_request_and_map(dn, 0, dn->full_name);
if (IS_ERR(base)) {
+ of_node_put(dn);
if (!ctrl.support_warm_boot)
break;
pr_err("error mapping DDR SHIMPHY %d\n", i);
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_shimphy_err;
}
ctrl.memcs[i].ddr_shimphy_base = base;
ctrl.num_memc++;
@@ -749,14 +756,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, brcmstb_memc_of_match) {
base = of_iomap(dn, 0);
if (!base) {
+ of_node_put(dn);
pr_err("error mapping DDR Sequencer %d\n", i);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto brcmstb_memc_err;
}
of_id = of_match_node(brcmstb_memc_of_match, dn);
if (!of_id) {
iounmap(base);
- return -EINVAL;
+ of_node_put(dn);
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ddr_seq_data = of_id->data;
@@ -776,21 +787,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
dn = of_find_matching_node(NULL, sram_dt_ids);
if (!dn) {
pr_err("SRAM not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ret = brcmstb_init_sram(dn);
of_node_put(dn);
if (ret) {
pr_err("error setting up SRAM for PM\n");
- return ret;
+ goto brcmstb_memc_err;
}
ctrl.pdev = pdev;
ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
- if (!ctrl.s3_params)
- return -ENOMEM;
+ if (!ctrl.s3_params) {
+ ret = -ENOMEM;
+ goto s3_params_err;
+ }
ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
sizeof(*ctrl.s3_params),
DMA_TO_DEVICE);
@@ -810,7 +824,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
out:
kfree(ctrl.s3_params);
-
+s3_params_err:
+ iounmap(ctrl.boot_sram);
+brcmstb_memc_err:
+ for (i--; i >= 0; i--)
+ iounmap(ctrl.memcs[i].ddr_ctrl);
+ddr_shimphy_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_shimphy_base);
+
+ iounmap(ctrl.memcs[0].ddr_phy_base);
+ddr_phy_err:
+ iounmap(ctrl.aon_ctrl_base);
+ if (s)
+ iounmap(ctrl.aon_sram);
+aon_err:
pr_warn("PM: initialization failed with code %d\n", ret);
return ret;
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index f9ad8ad54a7d..df062477dab0 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select FSL_GUTS
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 34810f9bb2ee..2b7fb7a8805c 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -142,7 +142,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
const struct fsl_soc_die_attr *soc_die;
- const char *machine;
+ const char *machine = NULL;
u32 svr;
/* Initialize guts */
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index cfa4b2939992..3ed083860764 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -38,6 +38,7 @@ config QE_TDM
config QE_USB
bool
+ depends on QUICC_ENGINE
default y if USB_FSL_QE
help
QE USB Controller support
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 661e47acc354..ddcbeb8d05b6 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -56,19 +56,12 @@ config QCOM_GSBI
config QCOM_LLCC
tristate "Qualcomm Technologies, Inc. LLCC driver"
depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
help
Qualcomm Technologies, Inc. platform specific
- Last Level Cache Controller(LLCC) driver. This provides interfaces
- to clients that use the LLCC. Say yes here to enable LLCC slice
- driver.
-
-config QCOM_SDM845_LLCC
- tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
- depends on QCOM_LLCC
- help
- Say yes here to enable the LLCC driver for SDM845. This provides
- data required to configure LLCC so that clients can start using the
- LLCC slices.
+ Last Level Cache Controller(LLCC) driver for platforms such as,
+ SDM845. This provides interfaces to clients that use the LLCC.
+ Say yes here to enable LLCC slice driver.
config QCOM_MDT_LOADER
tristate
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 162788701a77..2559fe948ce0 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
-obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
-obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-qcom.c
index 4a6111635f82..431b214975c8 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
*/
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
@@ -46,6 +47,27 @@
#define BANK_OFFSET_STRIDE 0x80000
+static struct llcc_slice_config sdm845_data[] = {
+ { LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0 },
+ { LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1 },
+ { LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+};
+
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
static struct regmap_config llcc_regmap_config = {
@@ -301,13 +323,12 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev)
return ret;
}
-int qcom_llcc_remove(struct platform_device *pdev)
+static int qcom_llcc_remove(struct platform_device *pdev)
{
/* Set the global pointer to a error code to avoid referencing it */
drv_data = ERR_PTR(-ENODEV);
return 0;
}
-EXPORT_SYMBOL_GPL(qcom_llcc_remove);
static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
const char *name)
@@ -327,14 +348,17 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
}
-int qcom_llcc_probe(struct platform_device *pdev,
- const struct llcc_slice_config *llcc_cfg, u32 sz)
+static int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *llcc_cfg, u32 sz)
{
u32 num_banks;
struct device *dev = &pdev->dev;
int ret, i;
struct platform_device *llcc_edac;
+ if (!IS_ERR(drv_data))
+ return -EBUSY;
+
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
ret = -ENOMEM;
@@ -408,6 +432,31 @@ err:
drv_data = ERR_PTR(-ENODEV);
return ret;
}
-EXPORT_SYMBOL_GPL(qcom_llcc_probe);
+
+static int sdm845_qcom_llcc_remove(struct platform_device *pdev)
+{
+ return qcom_llcc_remove(pdev);
+}
+
+static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
+{
+ return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
+}
+
+static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
+ { .compatible = "qcom,sdm845-llcc", },
+ { }
+};
+
+static struct platform_driver sdm845_qcom_llcc_driver = {
+ .driver = {
+ .name = "sdm845-llcc",
+ .of_match_table = sdm845_qcom_llcc_of_match,
+ },
+ .probe = sdm845_qcom_llcc_probe,
+ .remove = sdm845_qcom_llcc_remove,
+};
+module_platform_driver(sdm845_qcom_llcc_driver);
+
+MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm Last Level Cache Controller");
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
deleted file mode 100644
index 86600d97c36d..000000000000
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/soc/qcom/llcc-qcom.h>
-
-/*
- * SCT(System Cache Table) entry contains of the following members:
- * usecase_id: Unique id for the client's use case
- * slice_id: llcc slice id for each client
- * max_cap: The maximum capacity of the cache slice provided in KB
- * priority: Priority of the client used to select victim line for replacement
- * fixed_size: Boolean indicating if the slice has a fixed capacity
- * bonus_ways: Bonus ways are additional ways to be used for any slice,
- * if client ends up using more than reserved cache ways. Bonus
- * ways are allocated only if they are not reserved for some
- * other client.
- * res_ways: Reserved ways for the cache slice, the reserved ways cannot
- * be used by any other client than the one its assigned to.
- * cache_mode: Each slice operates as a cache, this controls the mode of the
- * slice: normal or TCM(Tightly Coupled Memory)
- * probe_target_ways: Determines what ways to probe for access hit. When
- * configured to 1 only bonus and reserved ways are probed.
- * When configured to 0 all ways in llcc are probed.
- * dis_cap_alloc: Disable capacity based allocation for a client
- * retain_on_pc: If this bit is set and client has maintained active vote
- * then the ways assigned to this client are not flushed on power
- * collapse.
- * activate_on_init: Activate the slice immediately after the SCT is programmed
- */
-#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
- { \
- .usecase_id = uid, \
- .slice_id = sid, \
- .max_cap = mc, \
- .priority = p, \
- .fixed_size = fs, \
- .bonus_ways = bway, \
- .res_ways = rway, \
- .cache_mode = cmod, \
- .probe_target_ways = ptw, \
- .dis_cap_alloc = dca, \
- .retain_on_pc = rp, \
- .activate_on_init = a, \
- }
-
-static struct llcc_slice_config sdm845_data[] = {
- SCT_ENTRY(LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1),
- SCT_ENTRY(LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1),
- SCT_ENTRY(LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
-};
-
-static int sdm845_qcom_llcc_remove(struct platform_device *pdev)
-{
- return qcom_llcc_remove(pdev);
-}
-
-static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
-{
- return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
-}
-
-static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
- { .compatible = "qcom,sdm845-llcc", },
- { }
-};
-
-static struct platform_driver sdm845_qcom_llcc_driver = {
- .driver = {
- .name = "sdm845-llcc",
- .of_match_table = sdm845_qcom_llcc_of_match,
- },
- .probe = sdm845_qcom_llcc_probe,
- .remove = sdm845_qcom_llcc_remove,
-};
-module_platform_driver(sdm845_qcom_llcc_driver);
-
-MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index f16d6ec78064..bca98df55bc6 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -489,8 +489,10 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
continue;
ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
goto unroll;
+ }
}
if (!count)
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 3aaab71d1b2c..dbc8b4c93190 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -534,8 +534,8 @@ static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
decoded_bytes += rc;
}
- if (string_len > temp_ei->elem_len) {
- pr_err("%s: String len %d > Max Len %d\n",
+ if (string_len >= temp_ei->elem_len) {
+ pr_err("%s: String len %d >= Max Len %d\n",
__func__, string_len, temp_ei->elem_len);
return -ETOOSMALL;
} else if (string_len > tlv_len) {
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index d2b558438deb..41e929407196 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -136,6 +136,7 @@ static void qcom_smem_state_release(struct kref *ref)
struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
list_del(&state->list);
+ of_node_put(state->of_node);
kfree(state);
}
@@ -169,7 +170,7 @@ struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
kref_init(&state->refcount);
- state->of_node = of_node;
+ state->of_node = of_node_get(of_node);
state->ops = *ops;
state->priv = priv;
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 6564f15c5319..acba67dfbc85 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -511,7 +511,7 @@ static int qcom_smsm_probe(struct platform_device *pdev)
for (id = 0; id < smsm->num_hosts; id++) {
ret = smsm_parse_ipc(smsm, id);
if (ret < 0)
- return ret;
+ goto out_put;
}
/* Acquire the main SMSM state vector */
@@ -519,13 +519,14 @@ static int qcom_smsm_probe(struct platform_device *pdev)
smsm->num_entries * sizeof(u32));
if (ret < 0 && ret != -EEXIST) {
dev_err(&pdev->dev, "unable to allocate shared state entry\n");
- return ret;
+ goto out_put;
}
states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
if (IS_ERR(states)) {
dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
- return PTR_ERR(states);
+ ret = PTR_ERR(states);
+ goto out_put;
}
/* Acquire the list of interrupt mask vectors */
@@ -533,13 +534,14 @@ static int qcom_smsm_probe(struct platform_device *pdev)
ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
if (ret < 0 && ret != -EEXIST) {
dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
- return ret;
+ goto out_put;
}
intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
if (IS_ERR(intr_mask)) {
dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
- return PTR_ERR(intr_mask);
+ ret = PTR_ERR(intr_mask);
+ goto out_put;
}
/* Setup the reference to the local state bits */
@@ -550,7 +552,8 @@ static int qcom_smsm_probe(struct platform_device *pdev)
smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
if (IS_ERR(smsm->state)) {
dev_err(smsm->dev, "failed to register qcom_smem_state\n");
- return PTR_ERR(smsm->state);
+ ret = PTR_ERR(smsm->state);
+ goto out_put;
}
/* Register handlers for remote processor entries of interest. */
@@ -580,16 +583,19 @@ static int qcom_smsm_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, smsm);
+ of_node_put(local_node);
return 0;
unwind_interfaces:
+ of_node_put(node);
for (id = 0; id < smsm->num_entries; id++)
if (smsm->entries[id].domain)
irq_domain_remove(smsm->entries[id].domain);
qcom_smem_state_unregister(smsm->state);
-
+out_put:
+ of_node_put(local_node);
return ret;
}
diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c
index a8dbe55e8ba8..3d1ea245681b 100644
--- a/drivers/soc/renesas/r8a77980-sysc.c
+++ b/drivers/soc/renesas/r8a77980-sysc.c
@@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
PD_CPU_NOCR },
- { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
+ { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON,
+ PD_CPU_NOCR },
{ "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
{ "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
{ "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 1b0d50f36349..33139c7817c2 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -78,8 +78,8 @@ static struct sunxi_sram_desc sun4i_a10_sram_d = {
static struct sunxi_sram_desc sun50i_a64_sram_c = {
.data = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
- SUNXI_SRAM_MAP(0, 1, "cpu"),
- SUNXI_SRAM_MAP(1, 0, "de2")),
+ SUNXI_SRAM_MAP(1, 0, "cpu"),
+ SUNXI_SRAM_MAP(0, 1, "de2")),
};
static const struct of_device_id sunxi_sram_dt_ids[] = {
@@ -254,6 +254,7 @@ int sunxi_sram_claim(struct device *dev)
writel(val | ((device << sram_data->offset) & mask),
base + sram_data->reg);
+ sram_desc->claimed = true;
spin_unlock(&sram_lock);
return 0;
@@ -318,12 +319,11 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = {
.writeable_reg = sunxi_sram_regmap_accessible_reg,
};
-static int sunxi_sram_probe(struct platform_device *pdev)
+static int __init sunxi_sram_probe(struct platform_device *pdev)
{
- struct resource *res;
- struct dentry *d;
struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
+ struct device *dev = &pdev->dev;
sram_dev = &pdev->dev;
@@ -331,18 +331,10 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (!variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-
- d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
- &sunxi_sram_fops);
- if (!d)
- return -ENOMEM;
-
if (variant->has_emac_clock) {
emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
&sunxi_sram_emac_clock_regmap);
@@ -351,6 +343,10 @@ static int sunxi_sram_probe(struct platform_device *pdev)
return PTR_ERR(emac_clock);
}
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops);
+
return 0;
}
@@ -396,9 +392,8 @@ static struct platform_driver sunxi_sram_driver = {
.name = "sunxi-sram",
.of_match_table = sunxi_sram_dt_match,
},
- .probe = sunxi_sram_probe,
};
-module_platform_driver(sunxi_sram_driver);
+builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe);
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index b8210479ec99..341f1c7caa89 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -64,7 +64,7 @@ static DEFINE_MUTEX(knav_dev_lock);
* Newest followed by older ones. Search is done from start of the array
* until a firmware file is found.
*/
-const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
+static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
static bool device_ready;
bool knav_qmss_device_ready(void)
@@ -1789,9 +1789,9 @@ static int knav_queue_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->pdsps);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n");
return ret;
}
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
index d64feeb51a40..36d102230df2 100644
--- a/drivers/soc/ux500/ux500-soc-id.c
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -159,20 +159,18 @@ static ssize_t ux500_get_process(struct device *dev,
static const char *db8500_read_soc_id(struct device_node *backupram)
{
void __iomem *base;
- void __iomem *uid;
const char *retstr;
+ u32 uid[5];
base = of_iomap(backupram, 0);
if (!base)
return NULL;
- uid = base + 0x1fc0;
+ memcpy_fromio(uid, base + 0x1fc0, sizeof(uid));
/* Throw these device-specific numbers into the entropy pool */
- add_device_randomness(uid, 0x14);
+ add_device_randomness(uid, sizeof(uid));
retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
- readl((u32 *)uid+0),
- readl((u32 *)uid+1), readl((u32 *)uid+2),
- readl((u32 *)uid+3), readl((u32 *)uid+4));
+ uid[0], uid[1], uid[2], uid[3], uid[4]);
iounmap(base);
return retstr;
}
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 3fa162c1fde7..4cce9f315f4e 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -3,6 +3,7 @@ menu "Xilinx SoC drivers"
config XILINX_VCU
tristate "Xilinx VCU logicoreIP Init"
+ select MFD_CORE
depends on HAS_IOMEM
help
Provides the driver to enable and disable the isolation between the
@@ -21,11 +22,15 @@ config ZYNQMP_POWER
bool "Enable Xilinx Zynq MPSoC Power Management driver"
depends on PM && ZYNQMP_FIRMWARE
default y
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
help
Say yes to enable power management support for ZyqnMP SoC.
This driver uses firmware driver as an interface for power
management request to firmware. It registers isr to handle
- power management callbacks from firmware.
+ power management callbacks from firmware. It registers mailbox client
+ to handle power management callbacks from firmware.
+
If in doubt, say N.
config ZYNQMP_PM_DOMAINS
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index f66bfea5de17..12613b20001d 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
+obj-$(CONFIG_XILINX_VCU) += xlnx_vcu_core.o xlnx_vcu_clk.o xlnx_vcu.o
obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
+
+obj-$(CONFIG_ARCH_ZYNQMP) += zynqmp/
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a840c0272135..d89fcbaf9446 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -14,6 +14,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
/* Address map for different registers implemented in the VCU LogiCORE IP. */
#define VCU_ECODER_ENABLE 0x00
#define VCU_DECODER_ENABLE 0x04
@@ -26,14 +28,9 @@
#define VCU_ENC_FPS 0x20
#define VCU_MCU_CLK 0x24
#define VCU_CORE_CLK 0x28
-#define VCU_PLL_BYPASS 0x2c
-#define VCU_ENC_CLK 0x30
#define VCU_PLL_CLK 0x34
#define VCU_ENC_VIDEO_STANDARD 0x38
#define VCU_STATUS 0x3c
-#define VCU_AXI_ENC_CLK 0x40
-#define VCU_AXI_DEC_CLK 0x44
-#define VCU_AXI_MCU_CLK 0x48
#define VCU_DEC_VIDEO_STANDARD 0x4c
#define VCU_DEC_FRAME_SIZE_X 0x50
#define VCU_DEC_FRAME_SIZE_Y 0x54
@@ -41,196 +38,33 @@
#define VCU_BUFFER_B_FRAME 0x5c
#define VCU_WPP_EN 0x60
#define VCU_PLL_CLK_DEC 0x64
+#define VCU_NUM_CORE 0x6c
#define VCU_GASKET_INIT 0x74
#define VCU_GASKET_VALUE 0x03
-/* vcu slcr registers, bitmask and shift */
-#define VCU_PLL_CTRL 0x24
-#define VCU_PLL_CTRL_RESET_MASK 0x01
-#define VCU_PLL_CTRL_RESET_SHIFT 0
-#define VCU_PLL_CTRL_BYPASS_MASK 0x01
-#define VCU_PLL_CTRL_BYPASS_SHIFT 3
-#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
-#define VCU_PLL_CTRL_FBDIV_SHIFT 8
-#define VCU_PLL_CTRL_POR_IN_MASK 0x01
-#define VCU_PLL_CTRL_POR_IN_SHIFT 1
-#define VCU_PLL_CTRL_PWR_POR_MASK 0x01
-#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
-#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
-#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
-#define VCU_PLL_CTRL_DEFAULT 0
-#define VCU_PLL_DIV2 2
-
-#define VCU_PLL_CFG 0x28
-#define VCU_PLL_CFG_RES_MASK 0x0f
-#define VCU_PLL_CFG_RES_SHIFT 0
-#define VCU_PLL_CFG_CP_MASK 0x0f
-#define VCU_PLL_CFG_CP_SHIFT 5
-#define VCU_PLL_CFG_LFHF_MASK 0x03
-#define VCU_PLL_CFG_LFHF_SHIFT 10
-#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
-#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
-#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
-#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
-#define VCU_ENC_CORE_CTRL 0x30
-#define VCU_ENC_MCU_CTRL 0x34
-#define VCU_DEC_CORE_CTRL 0x38
-#define VCU_DEC_MCU_CTRL 0x3c
-#define VCU_PLL_DIVISOR_MASK 0x3f
-#define VCU_PLL_DIVISOR_SHIFT 4
-#define VCU_SRCSEL_MASK 0x01
-#define VCU_SRCSEL_SHIFT 0
-#define VCU_SRCSEL_PLL 1
-
-#define VCU_PLL_STATUS 0x60
-#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
-
#define MHZ 1000000
-#define FVCO_MIN (1500U * MHZ)
-#define FVCO_MAX (3000U * MHZ)
-#define DIVISOR_MIN 0
-#define DIVISOR_MAX 63
#define FRAC 100
-#define LIMIT (10 * MHZ)
/**
- * struct xvcu_device - Xilinx VCU init device structure
+ * struct xvcu_priv - Xilinx VCU private data
* @dev: Platform device
- * @pll_ref: pll ref clock source
- * @aclk: axi clock source
+ * @pll_ref: PLL ref clock source
+ * @core_enc: Core encoder clock
+ * @core_dec: Core decoder clock
+ * @mcu_enc: MCU encoder clock
+ * @mcu_dec: MCU decoder clock
* @logicore_reg_ba: logicore reg base address
* @vcu_slcr_ba: vcu_slcr Register base address
- * @coreclk: core clock frequency
*/
-struct xvcu_device {
+struct xvcu_priv {
struct device *dev;
struct clk *pll_ref;
- struct clk *aclk;
+ struct clk *core_enc;
+ struct clk *core_dec;
+ struct clk *mcu_enc;
+ struct clk *mcu_dec;
void __iomem *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
- u32 coreclk;
-};
-
-/**
- * struct xvcu_pll_cfg - Helper data
- * @fbdiv: The integer portion of the feedback divider to the PLL
- * @cp: PLL charge pump control
- * @res: PLL loop filter resistor control
- * @lfhf: PLL loop filter high frequency capacitor control
- * @lock_dly: Lock circuit configuration settings for lock windowsize
- * @lock_cnt: Lock circuit counter setting
- */
-struct xvcu_pll_cfg {
- u32 fbdiv;
- u32 cp;
- u32 res;
- u32 lfhf;
- u32 lock_dly;
- u32 lock_cnt;
-};
-
-static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
- { 25, 3, 10, 3, 63, 1000 },
- { 26, 3, 10, 3, 63, 1000 },
- { 27, 4, 6, 3, 63, 1000 },
- { 28, 4, 6, 3, 63, 1000 },
- { 29, 4, 6, 3, 63, 1000 },
- { 30, 4, 6, 3, 63, 1000 },
- { 31, 6, 1, 3, 63, 1000 },
- { 32, 6, 1, 3, 63, 1000 },
- { 33, 4, 10, 3, 63, 1000 },
- { 34, 5, 6, 3, 63, 1000 },
- { 35, 5, 6, 3, 63, 1000 },
- { 36, 5, 6, 3, 63, 1000 },
- { 37, 5, 6, 3, 63, 1000 },
- { 38, 5, 6, 3, 63, 975 },
- { 39, 3, 12, 3, 63, 950 },
- { 40, 3, 12, 3, 63, 925 },
- { 41, 3, 12, 3, 63, 900 },
- { 42, 3, 12, 3, 63, 875 },
- { 43, 3, 12, 3, 63, 850 },
- { 44, 3, 12, 3, 63, 850 },
- { 45, 3, 12, 3, 63, 825 },
- { 46, 3, 12, 3, 63, 800 },
- { 47, 3, 12, 3, 63, 775 },
- { 48, 3, 12, 3, 63, 775 },
- { 49, 3, 12, 3, 63, 750 },
- { 50, 3, 12, 3, 63, 750 },
- { 51, 3, 2, 3, 63, 725 },
- { 52, 3, 2, 3, 63, 700 },
- { 53, 3, 2, 3, 63, 700 },
- { 54, 3, 2, 3, 63, 675 },
- { 55, 3, 2, 3, 63, 675 },
- { 56, 3, 2, 3, 63, 650 },
- { 57, 3, 2, 3, 63, 650 },
- { 58, 3, 2, 3, 63, 625 },
- { 59, 3, 2, 3, 63, 625 },
- { 60, 3, 2, 3, 63, 625 },
- { 61, 3, 2, 3, 63, 600 },
- { 62, 3, 2, 3, 63, 600 },
- { 63, 3, 2, 3, 63, 600 },
- { 64, 3, 2, 3, 63, 600 },
- { 65, 3, 2, 3, 63, 600 },
- { 66, 3, 2, 3, 63, 600 },
- { 67, 3, 2, 3, 63, 600 },
- { 68, 3, 2, 3, 63, 600 },
- { 69, 3, 2, 3, 63, 600 },
- { 70, 3, 2, 3, 63, 600 },
- { 71, 3, 2, 3, 63, 600 },
- { 72, 3, 2, 3, 63, 600 },
- { 73, 3, 2, 3, 63, 600 },
- { 74, 3, 2, 3, 63, 600 },
- { 75, 3, 2, 3, 63, 600 },
- { 76, 3, 2, 3, 63, 600 },
- { 77, 3, 2, 3, 63, 600 },
- { 78, 3, 2, 3, 63, 600 },
- { 79, 3, 2, 3, 63, 600 },
- { 80, 3, 2, 3, 63, 600 },
- { 81, 3, 2, 3, 63, 600 },
- { 82, 3, 2, 3, 63, 600 },
- { 83, 4, 2, 3, 63, 600 },
- { 84, 4, 2, 3, 63, 600 },
- { 85, 4, 2, 3, 63, 600 },
- { 86, 4, 2, 3, 63, 600 },
- { 87, 4, 2, 3, 63, 600 },
- { 88, 4, 2, 3, 63, 600 },
- { 89, 4, 2, 3, 63, 600 },
- { 90, 4, 2, 3, 63, 600 },
- { 91, 4, 2, 3, 63, 600 },
- { 92, 4, 2, 3, 63, 600 },
- { 93, 4, 2, 3, 63, 600 },
- { 94, 4, 2, 3, 63, 600 },
- { 95, 4, 2, 3, 63, 600 },
- { 96, 4, 2, 3, 63, 600 },
- { 97, 4, 2, 3, 63, 600 },
- { 98, 4, 2, 3, 63, 600 },
- { 99, 4, 2, 3, 63, 600 },
- { 100, 4, 2, 3, 63, 600 },
- { 101, 4, 2, 3, 63, 600 },
- { 102, 4, 2, 3, 63, 600 },
- { 103, 5, 2, 3, 63, 600 },
- { 104, 5, 2, 3, 63, 600 },
- { 105, 5, 2, 3, 63, 600 },
- { 106, 5, 2, 3, 63, 600 },
- { 107, 3, 4, 3, 63, 600 },
- { 108, 3, 4, 3, 63, 600 },
- { 109, 3, 4, 3, 63, 600 },
- { 110, 3, 4, 3, 63, 600 },
- { 111, 3, 4, 3, 63, 600 },
- { 112, 3, 4, 3, 63, 600 },
- { 113, 3, 4, 3, 63, 600 },
- { 114, 3, 4, 3, 63, 600 },
- { 115, 3, 4, 3, 63, 600 },
- { 116, 3, 4, 3, 63, 600 },
- { 117, 3, 4, 3, 63, 600 },
- { 118, 3, 4, 3, 63, 600 },
- { 119, 3, 4, 3, 63, 600 },
- { 120, 3, 4, 3, 63, 600 },
- { 121, 3, 4, 3, 63, 600 },
- { 122, 3, 4, 3, 63, 600 },
- { 123, 3, 4, 3, 63, 600 },
- { 124, 3, 4, 3, 63, 600 },
- { 125, 3, 4, 3, 63, 600 },
};
/**
@@ -258,47 +92,71 @@ static inline void xvcu_write(void __iomem *iomem, u32 offset, u32 value)
}
/**
- * xvcu_write_field_reg - Write to the vcu reg field
- * @iomem: vcu reg space base address
- * @offset: vcu reg offset from base
- * @field: vcu reg field to write to
- * @mask: vcu reg mask
- * @shift: vcu reg number of bits to shift the bitfield
+ * xvcu_get_color_depth - read the color depth register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_color_depth(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_ENC_COLOR_DEPTH);
+}
+EXPORT_SYMBOL_GPL(xvcu_get_color_depth);
+
+/**
+ * xvcu_get_memory_depth - read the memory depth register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
*/
-static void xvcu_write_field_reg(void __iomem *iomem, int offset,
- u32 field, u32 mask, int shift)
+u32 xvcu_get_memory_depth(struct xvcu_device *xvcu)
{
- u32 val = xvcu_read(iomem, offset);
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_MEMORY_DEPTH);
+}
+EXPORT_SYMBOL_GPL(xvcu_get_memory_depth);
- val &= ~(mask << shift);
- val |= (field & mask) << shift;
+/**
+ * xvcu_get_clock_frequency - provide the core clock frequency
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_clock_frequency(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_CORE_CLK) * MHZ;
+}
+EXPORT_SYMBOL_GPL(xvcu_get_clock_frequency);
- xvcu_write(iomem, offset, val);
+/**
+ * xvcu_get_num_cores - read the number of core register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_num_cores(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_NUM_CORE);
}
+EXPORT_SYMBOL_GPL(xvcu_get_num_cores);
/**
- * xvcu_set_vcu_pll_info - Set the VCU PLL info
+ * xvcu_set_vcu_pll - Set the VCU PLL
* @xvcu: Pointer to the xvcu_device structure
*
* Programming the VCU PLL based on the user configuration
* (ref clock freq, core clock freq, mcu clock freq).
* Core clock frequency has higher priority than mcu clock frequency
- * Errors in following cases
- * - When mcu or clock clock get from logicoreIP is 0
- * - When VCU PLL DIV related bits value other than 1
- * - When proper data not found for given data
- * - When sis570_1 clocksource related operation failed
*
* Return: Returns status, either success or error+reason
*/
-static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
+static int xvcu_set_vcu_pll(struct xvcu_priv *xvcu)
{
u32 refclk, coreclk, mcuclk, inte, deci;
- u32 divisor_mcu, divisor_core, fvco;
- u32 clkoutdiv, vcu_pll_ctrl, pll_clk;
- u32 cfg_val, mod, ctrl;
- int ret, i;
- const struct xvcu_pll_cfg *found = NULL;
+ int ret;
inte = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK);
deci = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC);
@@ -314,175 +172,74 @@ static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
dev_dbg(xvcu->dev, "Core clock from logicoreIP is %uHz\n", coreclk);
dev_dbg(xvcu->dev, "Mcu clock from logicoreIP is %uHz\n", mcuclk);
- clk_disable_unprepare(xvcu->pll_ref);
ret = clk_set_rate(xvcu->pll_ref, refclk);
if (ret)
- dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate\n");
+ dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate %d\n"
+ , ret);
ret = clk_prepare_enable(xvcu->pll_ref);
if (ret) {
- dev_err(xvcu->dev, "failed to enable pll_ref clock source\n");
+ dev_err(xvcu->dev, "failed to enable pll_ref clock source %d\n",
+ ret);
return ret;
}
- refclk = clk_get_rate(xvcu->pll_ref);
-
- /*
- * The divide-by-2 should be always enabled (==1)
- * to meet the timing in the design.
- * Otherwise, it's an error
- */
- vcu_pll_ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_CTRL);
- clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
- clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
- if (clkoutdiv != 1) {
- dev_err(xvcu->dev, "clkoutdiv value is invalid\n");
- return -EINVAL;
- }
+ ret = clk_set_rate(xvcu->mcu_enc, mcuclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP mcu clk rate %d\n",
+ ret);
- for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
- const struct xvcu_pll_cfg *cfg = &xvcu_pll_cfg[i];
-
- fvco = cfg->fbdiv * refclk;
- if (fvco >= FVCO_MIN && fvco <= FVCO_MAX) {
- pll_clk = fvco / VCU_PLL_DIV2;
- if (fvco % VCU_PLL_DIV2 != 0)
- pll_clk++;
- mod = pll_clk % coreclk;
- if (mod < LIMIT) {
- divisor_core = pll_clk / coreclk;
- } else if (coreclk - mod < LIMIT) {
- divisor_core = pll_clk / coreclk;
- divisor_core++;
- } else {
- continue;
- }
- if (divisor_core >= DIVISOR_MIN &&
- divisor_core <= DIVISOR_MAX) {
- found = cfg;
- divisor_mcu = pll_clk / mcuclk;
- mod = pll_clk % mcuclk;
- if (mcuclk - mod < LIMIT)
- divisor_mcu++;
- break;
- }
- }
+ ret = clk_prepare_enable(xvcu->mcu_enc);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable mcu_enc %d\n", ret);
+ goto error_mcu_enc;
}
- if (!found) {
- dev_err(xvcu->dev, "Invalid clock combination.\n");
- return -EINVAL;
+ ret = clk_set_rate(xvcu->mcu_dec, mcuclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP mcu clk rate %d\n",
+ ret);
+
+ ret = clk_prepare_enable(xvcu->mcu_dec);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable mcu_dec %d\n", ret);
+ goto error_mcu_dec;
}
- xvcu->coreclk = pll_clk / divisor_core;
- mcuclk = pll_clk / divisor_mcu;
- dev_dbg(xvcu->dev, "Actual Ref clock freq is %uHz\n", refclk);
- dev_dbg(xvcu->dev, "Actual Core clock freq is %uHz\n", xvcu->coreclk);
- dev_dbg(xvcu->dev, "Actual Mcu clock freq is %uHz\n", mcuclk);
-
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
- vcu_pll_ctrl |= (found->fbdiv & VCU_PLL_CTRL_FBDIV_MASK) <<
- VCU_PLL_CTRL_FBDIV_SHIFT;
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_POR_IN_MASK <<
- VCU_PLL_CTRL_POR_IN_SHIFT);
- vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_POR_IN_MASK) <<
- VCU_PLL_CTRL_POR_IN_SHIFT;
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_PWR_POR_MASK <<
- VCU_PLL_CTRL_PWR_POR_SHIFT);
- vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_PWR_POR_MASK) <<
- VCU_PLL_CTRL_PWR_POR_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CTRL, vcu_pll_ctrl);
-
- /* Set divisor for the core and mcu clock */
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
- VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
- VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL, ctrl);
-
- /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
- cfg_val = (found->res << VCU_PLL_CFG_RES_SHIFT) |
- (found->cp << VCU_PLL_CFG_CP_SHIFT) |
- (found->lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
- (found->lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
- (found->lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
- xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CFG, cfg_val);
+ ret = clk_set_rate(xvcu->core_enc, coreclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP core clk rate %d\n",
+ ret);
- return 0;
-}
+ ret = clk_prepare_enable(xvcu->core_enc);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable core_enc %d\n", ret);
+ goto error_core_enc;
+ }
-/**
- * xvcu_set_pll - PLL init sequence
- * @xvcu: Pointer to the xvcu_device structure
- *
- * Call the api to set the PLL info and once that is done then
- * init the PLL sequence to make the PLL stable.
- *
- * Return: Returns status, either success or error+reason
- */
-static int xvcu_set_pll(struct xvcu_device *xvcu)
-{
- u32 lock_status;
- unsigned long timeout;
- int ret;
+ ret = clk_set_rate(xvcu->core_dec, coreclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP core clk rate %d\n",
+ ret);
- ret = xvcu_set_vcu_pll_info(xvcu);
+ ret = clk_prepare_enable(xvcu->core_dec);
if (ret) {
- dev_err(xvcu->dev, "failed to set pll info\n");
- return ret;
+ dev_err(xvcu->dev, "failed to enable core_dec %d\n", ret);
+ goto error_core_dec;
}
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 1, VCU_PLL_CTRL_BYPASS_MASK,
- VCU_PLL_CTRL_BYPASS_SHIFT);
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 1, VCU_PLL_CTRL_RESET_MASK,
- VCU_PLL_CTRL_RESET_SHIFT);
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 0, VCU_PLL_CTRL_RESET_MASK,
- VCU_PLL_CTRL_RESET_SHIFT);
- /*
- * Defined the timeout for the max time to wait the
- * PLL_STATUS to be locked.
- */
- timeout = jiffies + msecs_to_jiffies(2000);
- do {
- lock_status = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_STATUS);
- if (lock_status & VCU_PLL_STATUS_LOCK_STATUS_MASK) {
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 0, VCU_PLL_CTRL_BYPASS_MASK,
- VCU_PLL_CTRL_BYPASS_SHIFT);
- return 0;
- }
- } while (!time_after(jiffies, timeout));
-
- /* PLL is not locked even after the timeout of the 2sec */
- dev_err(xvcu->dev, "PLL is not locked\n");
- return -ETIMEDOUT;
+ return 0;
+
+error_core_dec:
+ clk_disable_unprepare(xvcu->core_enc);
+error_core_enc:
+ clk_disable_unprepare(xvcu->mcu_dec);
+error_mcu_dec:
+ clk_disable_unprepare(xvcu->mcu_enc);
+error_mcu_enc:
+ clk_disable_unprepare(xvcu->pll_ref);
+
+ return ret;
}
/**
@@ -496,8 +253,8 @@ static int xvcu_set_pll(struct xvcu_device *xvcu)
*/
static int xvcu_probe(struct platform_device *pdev)
{
- struct resource *res;
- struct xvcu_device *xvcu;
+ struct xvcu_priv *xvcu;
+ struct xvcu_device *xvcu_core = dev_get_drvdata(pdev->dev.parent);
int ret;
xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
@@ -505,85 +262,61 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENOMEM;
xvcu->dev = &pdev->dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
- if (!res) {
- dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
- return -ENODEV;
- }
+ xvcu->vcu_slcr_ba = xvcu_core->vcu_slcr_ba;
+ xvcu->logicore_reg_ba = xvcu_core->logicore_reg_ba;
- xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->vcu_slcr_ba) {
- dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
- return -ENOMEM;
+ xvcu->pll_ref = devm_clk_get(pdev->dev.parent, "pll_ref");
+ if (IS_ERR(xvcu->pll_ref)) {
+ dev_err(&pdev->dev, "Could not get pll_ref clock\n");
+ return PTR_ERR(xvcu->pll_ref);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
- if (!res) {
- dev_err(&pdev->dev, "get logicore memory resource failed.\n");
- return -ENODEV;
+ xvcu->core_enc = devm_clk_get(pdev->dev.parent, "vcu_core_enc");
+ if (IS_ERR(xvcu->core_enc)) {
+ dev_err(&pdev->dev, "Could not get core_enc clock\n");
+ return PTR_ERR(xvcu->core_enc);
}
- xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->logicore_reg_ba) {
- dev_err(&pdev->dev, "logicore register mapping failed.\n");
- return -ENOMEM;
+ xvcu->core_dec = devm_clk_get(pdev->dev.parent, "vcu_core_dec");
+ if (IS_ERR(xvcu->core_dec)) {
+ dev_err(&pdev->dev, "Could not get vcu_core_dec clock\n");
+ return PTR_ERR(xvcu->core_dec);
}
- xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
- if (IS_ERR(xvcu->aclk)) {
- dev_err(&pdev->dev, "Could not get aclk clock\n");
- return PTR_ERR(xvcu->aclk);
+ xvcu->mcu_enc = devm_clk_get(pdev->dev.parent, "vcu_mcu_enc");
+ if (IS_ERR(xvcu->mcu_enc)) {
+ dev_err(&pdev->dev, "Could not get mcu_enc clock\n");
+ return PTR_ERR(xvcu->mcu_enc);
}
- xvcu->pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
- if (IS_ERR(xvcu->pll_ref)) {
- dev_err(&pdev->dev, "Could not get pll_ref clock\n");
- return PTR_ERR(xvcu->pll_ref);
+ xvcu->mcu_dec = devm_clk_get(pdev->dev.parent, "vcu_mcu_dec");
+ if (IS_ERR(xvcu->mcu_dec)) {
+ dev_err(&pdev->dev, "Could not get mcu_dec clock\n");
+ return PTR_ERR(xvcu->mcu_dec);
}
- ret = clk_prepare_enable(xvcu->aclk);
+ /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
+ ret = xvcu_set_vcu_pll(xvcu);
if (ret) {
- dev_err(&pdev->dev, "aclk clock enable failed\n");
+ dev_err(&pdev->dev, "Failed to set the pll\n");
return ret;
}
- ret = clk_prepare_enable(xvcu->pll_ref);
- if (ret) {
- dev_err(&pdev->dev, "pll_ref clock enable failed\n");
- goto error_aclk;
- }
-
- /*
- * Do the Gasket isolation and put the VCU out of reset
- * Bit 0 : Gasket isolation
- * Bit 1 : put VCU out of reset
- */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+ dev_set_drvdata(&pdev->dev, xvcu);
- /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
- ret = xvcu_set_pll(xvcu);
+ ret = devm_of_platform_populate(pdev->dev.parent);
if (ret) {
- dev_err(&pdev->dev, "Failed to set the pll\n");
- goto error_pll_ref;
+ dev_err(&pdev->dev, "Failed to register allegro codecs\n");
+ return ret;
}
- dev_set_drvdata(&pdev->dev, xvcu);
-
dev_info(&pdev->dev, "%s: Probed successfully\n", __func__);
- return 0;
-
-error_pll_ref:
- clk_disable_unprepare(xvcu->pll_ref);
-error_aclk:
- clk_disable_unprepare(xvcu->aclk);
return ret;
}
/**
- * xvcu_remove - Insert gasket isolation
+ * xvcu_remove - Depopulate the child nodes, Insert gasket isolation
* and disable the clock
* @pdev: Pointer to the platform_device structure
*
@@ -592,32 +325,33 @@ error_aclk:
*/
static int xvcu_remove(struct platform_device *pdev)
{
- struct xvcu_device *xvcu;
+ struct xvcu_priv *xvcu;
xvcu = platform_get_drvdata(pdev);
if (!xvcu)
return -ENODEV;
- /* Add the the Gasket isolation and put the VCU in reset. */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+ clk_disable_unprepare(xvcu->core_enc);
+ devm_clk_put(pdev->dev.parent, xvcu->core_enc);
+
+ clk_disable_unprepare(xvcu->core_dec);
+ devm_clk_put(pdev->dev.parent, xvcu->core_dec);
+
+ clk_disable_unprepare(xvcu->mcu_enc);
+ devm_clk_put(pdev->dev.parent, xvcu->mcu_enc);
+
+ clk_disable_unprepare(xvcu->mcu_dec);
+ devm_clk_put(pdev->dev.parent, xvcu->mcu_dec);
clk_disable_unprepare(xvcu->pll_ref);
- clk_disable_unprepare(xvcu->aclk);
+ devm_clk_put(pdev->dev.parent, xvcu->pll_ref);
return 0;
}
-static const struct of_device_id xvcu_of_id_table[] = {
- { .compatible = "xlnx,vcu" },
- { .compatible = "xlnx,vcu-logicoreip-1.0" },
- { }
-};
-MODULE_DEVICE_TABLE(of, xvcu_of_id_table);
-
static struct platform_driver xvcu_driver = {
.driver = {
.name = "xilinx-vcu",
- .of_match_table = xvcu_of_id_table,
},
.probe = xvcu_probe,
.remove = xvcu_remove,
@@ -628,3 +362,4 @@ module_platform_driver(xvcu_driver);
MODULE_AUTHOR("Dhaval Shah <dshah@xilinx.com>");
MODULE_DESCRIPTION("Xilinx VCU init Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:xilinx-vcu");
diff --git a/drivers/soc/xilinx/xlnx_vcu_clk.c b/drivers/soc/xilinx/xlnx_vcu_clk.c
new file mode 100644
index 000000000000..5263f0eac4c3
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_vcu_clk.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU clock driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ * Tejas Patel <tejas.patel@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
+/* vcu slcr registers, bitmask and shift */
+#define VCU_PLL_CTRL 0x24
+#define VCU_PLL_CTRL_RESET_MASK BIT(0)
+#define VCU_PLL_CTRL_RESET_SHIFT 0
+#define VCU_PLL_CTRL_BYPASS_MASK BIT(3)
+#define VCU_PLL_CTRL_BYPASS_SHIFT 3
+#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
+#define VCU_PLL_CTRL_FBDIV_SHIFT 8
+#define VCU_PLL_CTRL_POR_IN_MASK BIT(1)
+#define VCU_PLL_CTRL_POR_IN_SHIFT 1
+#define VCU_PLL_CTRL_PWR_POR_MASK BIT(2)
+#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
+#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
+#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
+#define VCU_PLL_CTRL_DEFAULT 0
+
+#define VCU_PLL_CFG 0x28
+#define VCU_PLL_CFG_RES_MASK 0x0f
+#define VCU_PLL_CFG_RES_SHIFT 0
+#define VCU_PLL_CFG_CP_MASK 0x0f
+#define VCU_PLL_CFG_CP_SHIFT 5
+#define VCU_PLL_CFG_LFHF_MASK 0x03
+#define VCU_PLL_CFG_LFHF_SHIFT 10
+#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
+#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
+#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
+#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
+#define VCU_ENC_CORE_CTRL 0x30
+#define VCU_ENC_MCU_CTRL 0x34
+#define VCU_ENC_MCU_CTRL_GATE_BIT BIT(12)
+#define VCU_DEC_CORE_CTRL 0x38
+#define VCU_DEC_MCU_CTRL 0x3c
+#define VCU_PLL_DIVISOR_MASK 0x3f
+#define VCU_PLL_DIVISOR_SHIFT 4
+#define VCU_SRCSEL_MASK 0x01
+#define VCU_SRCSEL_SHIFT 0
+#define VCU_SRCSEL_PLL 1
+
+#define VCU_PLL_STATUS 0x60
+#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
+#define VCU_PLL_LOCK_TIMEOUT 2000000
+
+#define PLL_FBDIV_MIN 25
+#define PLL_FBDIV_MAX 125
+
+#define MHZ 1000000
+#define FVCO_MIN (1500U * MHZ)
+#define FVCO_MAX (3000U * MHZ)
+#define DIVISOR_MIN 0
+#define DIVISOR_MAX 63
+#define FRAC 100
+#define LIMIT (10 * MHZ)
+
+#define FRAC_OFFSET 0x8
+#define PLLFCFG_FRAC_EN BIT(31)
+#define FRAC_DIV 0x10000 /* 2^16 */
+
+#define to_vcu_pll(_hw) container_of(_hw, struct vcu_pll, hw)
+#define div_mask(width) ((1 << (width)) - 1)
+
+enum pll_mode {
+ PLL_MODE_INT,
+ PLL_MODE_FRAC,
+};
+
+enum vcu_clks {
+ vcu_pll_half, vcu_core_enc, vcu_core_dec,
+ mcu_core_enc, mcu_core_dec, clk_max
+};
+
+/**
+ * struct xvcu_pll_cfg - Helper data
+ * @fbdiv: The integer portion of the feedback divider to the PLL
+ * @cp: PLL charge pump control
+ * @res: PLL loop filter resistor control
+ * @lfhf: PLL loop filter high frequency capacitor control
+ * @lock_dly: Lock circuit configuration settings for lock windowsize
+ * @lock_cnt: Lock circuit counter setting
+ */
+struct xvcu_pll_cfg {
+ u32 fbdiv;
+ u32 cp;
+ u32 res;
+ u32 lfhf;
+ u32 lock_dly;
+ u32 lock_cnt;
+};
+
+/**
+ * struct vcu_pll - VCU PLL control/status data
+ * @hw: Clock hardware
+ * @pll_ctrl: PLL control register address
+ * @pll_status: PLL status register address
+ * @pll_cfg: PLL config register address
+ * @lockbit: PLL lock status bit
+ */
+struct vcu_pll {
+ struct clk_hw hw;
+ void __iomem *pll_ctrl;
+ void __iomem *pll_status;
+ void __iomem *pll_cfg;
+ u8 lockbit;
+};
+
+static struct clk_hw_onecell_data *vcu_clk_data;
+static const char * const vcu_mux_parents[] = {
+ "dummy_name",
+ "vcu_pll_half"
+};
+
+static DEFINE_SPINLOCK(mcu_enc_lock);
+static DEFINE_SPINLOCK(mcu_dec_lock);
+static DEFINE_SPINLOCK(core_enc_lock);
+static DEFINE_SPINLOCK(core_dec_lock);
+
+static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
+ { 25, 3, 10, 3, 63, 1000 },
+ { 26, 3, 10, 3, 63, 1000 },
+ { 27, 4, 6, 3, 63, 1000 },
+ { 28, 4, 6, 3, 63, 1000 },
+ { 29, 4, 6, 3, 63, 1000 },
+ { 30, 4, 6, 3, 63, 1000 },
+ { 31, 6, 1, 3, 63, 1000 },
+ { 32, 6, 1, 3, 63, 1000 },
+ { 33, 4, 10, 3, 63, 1000 },
+ { 34, 5, 6, 3, 63, 1000 },
+ { 35, 5, 6, 3, 63, 1000 },
+ { 36, 5, 6, 3, 63, 1000 },
+ { 37, 5, 6, 3, 63, 1000 },
+ { 38, 5, 6, 3, 63, 975 },
+ { 39, 3, 12, 3, 63, 950 },
+ { 40, 3, 12, 3, 63, 925 },
+ { 41, 3, 12, 3, 63, 900 },
+ { 42, 3, 12, 3, 63, 875 },
+ { 43, 3, 12, 3, 63, 850 },
+ { 44, 3, 12, 3, 63, 850 },
+ { 45, 3, 12, 3, 63, 825 },
+ { 46, 3, 12, 3, 63, 800 },
+ { 47, 3, 12, 3, 63, 775 },
+ { 48, 3, 12, 3, 63, 775 },
+ { 49, 3, 12, 3, 63, 750 },
+ { 50, 3, 12, 3, 63, 750 },
+ { 51, 3, 2, 3, 63, 725 },
+ { 52, 3, 2, 3, 63, 700 },
+ { 53, 3, 2, 3, 63, 700 },
+ { 54, 3, 2, 3, 63, 675 },
+ { 55, 3, 2, 3, 63, 675 },
+ { 56, 3, 2, 3, 63, 650 },
+ { 57, 3, 2, 3, 63, 650 },
+ { 58, 3, 2, 3, 63, 625 },
+ { 59, 3, 2, 3, 63, 625 },
+ { 60, 3, 2, 3, 63, 625 },
+ { 61, 3, 2, 3, 63, 600 },
+ { 62, 3, 2, 3, 63, 600 },
+ { 63, 3, 2, 3, 63, 600 },
+ { 64, 3, 2, 3, 63, 600 },
+ { 65, 3, 2, 3, 63, 600 },
+ { 66, 3, 2, 3, 63, 600 },
+ { 67, 3, 2, 3, 63, 600 },
+ { 68, 3, 2, 3, 63, 600 },
+ { 69, 3, 2, 3, 63, 600 },
+ { 70, 3, 2, 3, 63, 600 },
+ { 71, 3, 2, 3, 63, 600 },
+ { 72, 3, 2, 3, 63, 600 },
+ { 73, 3, 2, 3, 63, 600 },
+ { 74, 3, 2, 3, 63, 600 },
+ { 75, 3, 2, 3, 63, 600 },
+ { 76, 3, 2, 3, 63, 600 },
+ { 77, 3, 2, 3, 63, 600 },
+ { 78, 3, 2, 3, 63, 600 },
+ { 79, 3, 2, 3, 63, 600 },
+ { 80, 3, 2, 3, 63, 600 },
+ { 81, 3, 2, 3, 63, 600 },
+ { 82, 3, 2, 3, 63, 600 },
+ { 83, 4, 2, 3, 63, 600 },
+ { 84, 4, 2, 3, 63, 600 },
+ { 85, 4, 2, 3, 63, 600 },
+ { 86, 4, 2, 3, 63, 600 },
+ { 87, 4, 2, 3, 63, 600 },
+ { 88, 4, 2, 3, 63, 600 },
+ { 89, 4, 2, 3, 63, 600 },
+ { 90, 4, 2, 3, 63, 600 },
+ { 91, 4, 2, 3, 63, 600 },
+ { 92, 4, 2, 3, 63, 600 },
+ { 93, 4, 2, 3, 63, 600 },
+ { 94, 4, 2, 3, 63, 600 },
+ { 95, 4, 2, 3, 63, 600 },
+ { 96, 4, 2, 3, 63, 600 },
+ { 97, 4, 2, 3, 63, 600 },
+ { 98, 4, 2, 3, 63, 600 },
+ { 99, 4, 2, 3, 63, 600 },
+ { 100, 4, 2, 3, 63, 600 },
+ { 101, 4, 2, 3, 63, 600 },
+ { 102, 4, 2, 3, 63, 600 },
+ { 103, 5, 2, 3, 63, 600 },
+ { 104, 5, 2, 3, 63, 600 },
+ { 105, 5, 2, 3, 63, 600 },
+ { 106, 5, 2, 3, 63, 600 },
+ { 107, 3, 4, 3, 63, 600 },
+ { 108, 3, 4, 3, 63, 600 },
+ { 109, 3, 4, 3, 63, 600 },
+ { 110, 3, 4, 3, 63, 600 },
+ { 111, 3, 4, 3, 63, 600 },
+ { 112, 3, 4, 3, 63, 600 },
+ { 113, 3, 4, 3, 63, 600 },
+ { 114, 3, 4, 3, 63, 600 },
+ { 115, 3, 4, 3, 63, 600 },
+ { 116, 3, 4, 3, 63, 600 },
+ { 117, 3, 4, 3, 63, 600 },
+ { 118, 3, 4, 3, 63, 600 },
+ { 119, 3, 4, 3, 63, 600 },
+ { 120, 3, 4, 3, 63, 600 },
+ { 121, 3, 4, 3, 63, 600 },
+ { 122, 3, 4, 3, 63, 600 },
+ { 123, 3, 4, 3, 63, 600 },
+ { 124, 3, 4, 3, 63, 600 },
+ { 125, 3, 4, 3, 63, 600 },
+};
+
+static int xvcu_divider_get_val(unsigned long rate, unsigned long parent_rate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags)
+{
+ unsigned int div;
+
+ if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+ div = DIV_ROUND_CLOSEST_ULL((u64)parent_rate, rate);
+ else
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+
+ return min_t(unsigned int, div, div_mask(width));
+}
+
+static unsigned long xvcu_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long xvcu_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int bestdiv;
+
+ bestdiv = xvcu_divider_get_val(rate, *prate, divider->table,
+ divider->width, divider->flags);
+
+ *prate = rate * bestdiv;
+
+ return rate;
+}
+
+static int xvcu_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int value;
+ u32 val;
+
+ value = xvcu_divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ val = readl(divider->reg);
+ val &= ~(div_mask(divider->width) << divider->shift);
+ val |= (u32)value << divider->shift;
+ writel(val, divider->reg);
+
+ return 0;
+}
+
+static const struct clk_ops xvcu_divider_ops = {
+ .recalc_rate = xvcu_divider_recalc_rate,
+ .round_rate = xvcu_divider_round_rate,
+ .set_rate = xvcu_divider_set_rate,
+};
+
+/**
+ * xvcu_register_divider - Register custom divider hardware
+ * @dev: VCU clock device
+ * @name: Divider name
+ * @parent_name: Divider parent name
+ * @flags: Clock flags
+ * @reg: Divider register base address
+ * @shift: Divider bits shift
+ * @width: Divider bits width
+ * @clk_divider_flags: Divider specific flags
+ * @lock: Shared register lock
+ *
+ * Register custom divider hardware to CCF.
+ *
+ * Return: Clock hardware for generated clock
+ */
+static struct clk_hw *xvcu_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *reg, u8 shift,
+ u8 width, u8 clk_divider_flags,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &xvcu_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+/**
+ * xvcu_pll_bypass_ctrl - Enable/Disable PLL bypass mode
+ * @pll: PLL data
+ * @enable: Enable/Disable flag
+ *
+ * Enable/Disable PLL bypass mode:
+ * 0 - Disable
+ * 1 - Enable
+ */
+static void xvcu_pll_bypass_ctrl(struct vcu_pll *pll, bool enable)
+{
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+ if (enable)
+ reg |= VCU_PLL_CTRL_BYPASS_MASK;
+ else
+ reg &= ~VCU_PLL_CTRL_BYPASS_MASK;
+ writel(reg, pll->pll_ctrl);
+}
+
+/**
+ * xvcu_pll_config - Configure PLL based on FBDIV value
+ * @pll: PLL data
+ *
+ * PLL needs to be configured before taking out of reset. Configuration
+ * data depends on the value of FBDIV for proper PLL locking.
+ */
+static void xvcu_pll_config(struct vcu_pll *pll)
+{
+ unsigned int fbdiv, reg;
+ int i;
+
+ reg = readl(pll->pll_ctrl);
+ fbdiv = (reg >> VCU_PLL_CTRL_FBDIV_SHIFT) & VCU_PLL_CTRL_FBDIV_MASK;
+
+ for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
+ if (fbdiv != xvcu_pll_cfg[i].fbdiv)
+ continue;
+
+ /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
+ reg = (xvcu_pll_cfg[i].res << VCU_PLL_CFG_RES_SHIFT) |
+ (xvcu_pll_cfg[i].cp << VCU_PLL_CFG_CP_SHIFT) |
+ (xvcu_pll_cfg[i].lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
+ (xvcu_pll_cfg[i].lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
+ (xvcu_pll_cfg[i].lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
+ writel(reg, pll->pll_cfg);
+ }
+}
+
+/**
+ * xvcu_pll_enable_disable - Enable/Disable PLL
+ * @pll: PLL data
+ * @enable: Enable/Disable flag
+ *
+ * Enable/Disable PLL based on request:
+ * 0 - Disable
+ * 1 - Enable
+ */
+static void xvcu_pll_enable_disable(struct vcu_pll *pll, bool enable)
+{
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+ if (enable)
+ reg &= ~(VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK);
+ else
+ reg |= (VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK);
+ writel(reg, pll->pll_ctrl);
+}
+
+/**
+ * xvcu_pll_is_enabled - Check if PLL is enabled or not
+ * @hw: Clock hardware
+ *
+ * Check if PLL is enabled or not. PLL enabled means PLL is not in
+ * reset state.
+ *
+ * Return: PLL status (0 - Disabled, 1 - Enabled)
+ */
+static int xvcu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+
+ return !(reg & (VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK));
+}
+
+/**
+ * xvcu_pll_enable - Enable PLL
+ * @hw: Clock hardware
+ *
+ * Enable PLL if it is not enabled. Configure PLL, enable and wait for
+ * the PLL lock. Put PLL into bypass state during PLL configuration.
+ *
+ * Return: 0 on success else error code
+ */
+static int xvcu_pll_enable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 reg;
+ int ret;
+
+ if (xvcu_pll_is_enabled(hw))
+ return 0;
+
+ pr_info("VCU PLL: enable\n");
+
+ xvcu_pll_bypass_ctrl(pll, 1);
+
+ xvcu_pll_config(pll);
+
+ xvcu_pll_enable_disable(pll, 1);
+
+ ret = readl_poll_timeout_atomic(pll->pll_status, reg,
+ reg & VCU_PLL_STATUS_LOCK_STATUS_MASK,
+ 1, VCU_PLL_LOCK_TIMEOUT);
+ if (ret) {
+ pr_err("VCU PLL is not locked\n");
+ return ret;
+ }
+
+ xvcu_pll_bypass_ctrl(pll, 0);
+
+ return ret;
+}
+
+/**
+ * xvcu_pll_disable - Disable PLL
+ * @hw: Clock hardware
+ *
+ * Disable PLL if it is enabled.
+ *
+ * Return: 0 on success else error code
+ */
+static void xvcu_pll_disable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+
+ if (!xvcu_pll_is_enabled(hw))
+ return;
+
+ pr_info("PLL: shutdown\n");
+ xvcu_pll_enable_disable(pll, 0);
+}
+
+/**
+ * xvcu_pll_frac_get_mode - Get PLL fraction mode
+ * @hw: Clock hardware
+ *
+ * Check if PLL is configured for integer mode or fraction mode.
+ *
+ * Return: PLL mode:
+ * PLL_MODE_FRAC - Fraction mode
+ * PLL_MODE_INT - Integer mode
+ */
+static inline enum pll_mode xvcu_pll_frac_get_mode(struct clk_hw *hw)
+{
+ struct vcu_pll *clk = to_vcu_pll(hw);
+ u32 reg;
+
+ reg = readl(clk->pll_ctrl + FRAC_OFFSET);
+
+ reg = reg & PLLFCFG_FRAC_EN;
+ return reg ? PLL_MODE_FRAC : PLL_MODE_INT;
+}
+
+/**
+ * xvcu_pll_frac_set_mode - Set PLL fraction mode
+ * @hw: Clock hardware
+ * @on: Enable/Disable flag
+ *
+ * Configure PLL for integer mode or fraction mode.
+ * 1 - Fraction mode
+ * 0 - Integer mode
+ */
+static inline void xvcu_pll_frac_set_mode(struct clk_hw *hw, bool on)
+{
+ struct vcu_pll *clk = to_vcu_pll(hw);
+ u32 reg = 0;
+
+ if (on)
+ reg = PLLFCFG_FRAC_EN;
+
+ reg = readl(clk->pll_ctrl + FRAC_OFFSET);
+ reg |= PLLFCFG_FRAC_EN;
+ writel(reg, (clk->pll_ctrl + FRAC_OFFSET));
+}
+
+static long vcu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 fbdiv;
+ long rate_div, f;
+
+ /* Enable the fractional mode if needed */
+ rate_div = (rate * FRAC_DIV) / *prate;
+ f = rate_div % FRAC_DIV;
+ xvcu_pll_frac_set_mode(hw, !!f);
+
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ if (rate > FVCO_MAX) {
+ fbdiv = rate / FVCO_MAX;
+ rate = rate / (fbdiv + 1);
+ }
+ if (rate < FVCO_MIN) {
+ fbdiv = DIV_ROUND_UP(FVCO_MIN, rate);
+ rate = rate * fbdiv;
+ }
+ return rate;
+ }
+
+ fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ return *prate * fbdiv;
+}
+
+static unsigned long vcu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 fbdiv, data, reg;
+ unsigned long rate, frac;
+
+ reg = readl(pll->pll_ctrl);
+ fbdiv = (reg >> VCU_PLL_CTRL_FBDIV_SHIFT) & VCU_PLL_CTRL_FBDIV_MASK;
+
+ rate = parent_rate * fbdiv;
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ data = (readl(pll->pll_ctrl + FRAC_OFFSET) & 0xFFFF);
+ frac = (parent_rate * data) / FRAC_DIV;
+ rate = rate + frac;
+ }
+
+ return rate;
+}
+
+static int vcu_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 fbdiv, reg;
+ long rate_div, frac, m, f;
+
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ rate_div = ((rate * FRAC_DIV) / parent_rate);
+ m = rate_div / FRAC_DIV;
+ f = rate_div % FRAC_DIV;
+ m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
+ rate = parent_rate * m;
+ frac = (parent_rate * f) / FRAC_DIV;
+ reg = readl(pll->pll_ctrl);
+ reg &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
+ reg |= m << VCU_PLL_CTRL_FBDIV_SHIFT;
+ writel(reg, pll->pll_ctrl);
+
+ reg = readl(pll->pll_ctrl + FRAC_OFFSET);
+ reg &= ~0xFFFF;
+ reg |= (f & 0xFFFF);
+ writel(reg, pll->pll_ctrl + FRAC_OFFSET);
+
+ return (rate + frac);
+ }
+
+ fbdiv = DIV_ROUND_CLOSEST(rate, parent_rate);
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ reg = readl(pll->pll_ctrl);
+ reg &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
+ reg |= fbdiv << VCU_PLL_CTRL_FBDIV_SHIFT;
+ writel(reg, pll->pll_ctrl);
+
+ return parent_rate * fbdiv;
+}
+
+static const struct clk_ops vcu_pll_ops = {
+ .enable = xvcu_pll_enable,
+ .disable = xvcu_pll_disable,
+ .is_enabled = xvcu_pll_is_enabled,
+ .round_rate = vcu_pll_round_rate,
+ .recalc_rate = vcu_pll_recalc_rate,
+ .set_rate = vcu_pll_set_rate,
+};
+
+/**
+ * xvcu_register_pll - Register VCU PLL
+ * @dev: VCU clock device
+ * @name: PLL name
+ * @parent: PLL parent
+ * @reg_base: PLL register base address
+ * @flags: Hardware specific flags
+ *
+ * Register PLL to CCF.
+ *
+ * Return: Clock hardware for generated clock
+ */
+static struct clk_hw *xvcu_register_pll(struct device *dev, const char *name,
+ const char *parent,
+ void __iomem *reg_base,
+ unsigned long flags)
+{
+ struct vcu_pll *pll;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ init.name = name;
+ init.parent_names = &parent;
+ init.ops = &vcu_pll_ops;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll = devm_kmalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->hw.init = &init;
+ pll->pll_ctrl = reg_base + VCU_PLL_CTRL;
+ pll->pll_status = reg_base + VCU_PLL_STATUS;
+ pll->pll_cfg = reg_base + VCU_PLL_CFG;
+ pll->lockbit = VCU_PLL_STATUS_LOCK_STATUS_MASK;
+
+ hw = &pll->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk_hw_set_rate_range(hw, FVCO_MIN, FVCO_MAX);
+ if (ret < 0)
+ pr_err("%s:ERROR clk_set_rate_range failed %d\n", name, ret);
+
+ return hw;
+}
+
+/**
+ * register_vcu_leaf_clocks - Register VCU leaf clocks
+ * @dev: VCU clock device
+ * @name: Clock name
+ * @parents: Clock parents
+ * @nparents: Clock parent count
+ * @default_parent: Default parent to set
+ * @reg: Clock control register address
+ * @lock: Clock register access lock
+ *
+ * Register VCU leaf clocks. These clocks are MCU/core
+ * encoder and decoder clocks. Topology for these clocks
+ * are Mux, Divisor and Gate.
+ *
+ * Return: Clock hardware for the generated gate clock
+ */
+static struct clk_hw *register_vcu_leaf_clocks(struct device *dev,
+ const char *name,
+ const char * const *parents,
+ u8 nparents,
+ struct clk *default_parent,
+ void __iomem *reg,
+ spinlock_t *lock)
+{
+ char *clk_mux, *clk_div;
+ struct clk_hw *hw;
+
+ clk_mux = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_mux");
+ hw = clk_hw_register_mux(dev, clk_mux, parents, nparents,
+ CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ reg, VCU_SRCSEL_SHIFT, 1, 0, lock);
+
+ if (default_parent)
+ clk_set_parent(hw->clk, default_parent);
+
+ clk_div = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_div");
+ xvcu_register_divider(dev, clk_div, clk_mux,
+ CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ reg, VCU_PLL_DIVISOR_SHIFT, 6,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST,
+ lock);
+
+ return clk_hw_register_gate(dev, name, clk_div,
+ CLK_SET_RATE_PARENT,
+ reg, 12, 0, lock);
+}
+
+/**
+ * unregister_vcu_leaf_clocks - Unegister VCU leaf clocks
+ * @hw: VCU leaf clock hardware
+ *
+ * Unregister VCU leaf clocks. These clocks are MCU/core
+ * encoder and decoder clocks. Unregister clocks in order
+ * from gate, div and mux maintaining their parent dependency.
+ *
+ */
+static void unregister_vcu_leaf_clocks(struct clk_hw *hw)
+{
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent(hw);
+ clk_hw_unregister_gate(hw);
+ hw = parent;
+
+ parent = clk_hw_get_parent(hw);
+ clk_hw_unregister_divider(hw);
+ hw = parent;
+
+ clk_hw_unregister_mux(hw);
+}
+
+/**
+ * xvcu_clock_init - Initialize VCU clocks
+ * @dev: VCU clock device
+ * @reg_base: Clock register base address
+ *
+ * Register VCU PLL and clocks and add VCU to clock provider list.
+ *
+ * Return: 0 on success else error code.
+ */
+static int xvcu_clock_init(struct device *dev, void __iomem *reg_base)
+{
+ struct clk_hw *hw;
+ struct clk *ref_clk;
+ const char *parent;
+ u32 vcu_pll_ctrl, clkoutdiv;
+ int i;
+
+ ref_clk = devm_clk_get(dev, "pll_ref");
+ if (IS_ERR(ref_clk)) {
+ dev_err(dev, "failed to get pll_ref clock\n");
+ return PTR_ERR(ref_clk);
+ }
+
+ vcu_clk_data = devm_kzalloc(dev, sizeof(*vcu_clk_data) +
+ sizeof(*vcu_clk_data->hws) * clk_max,
+ GFP_KERNEL);
+ if (!vcu_clk_data)
+ return -ENOMEM;
+
+ parent = __clk_get_name(ref_clk);
+ hw = xvcu_register_pll(dev, "vcu_pll", parent, reg_base,
+ CLK_SET_RATE_NO_REPARENT);
+ if (IS_ERR(hw)) {
+ dev_err(dev, "VCU PLL registration failed\n");
+ return PTR_ERR(hw);
+ }
+
+ /*
+ * The divide-by-2 should be always enabled (== 1) to meet the timing
+ * in the design. Otherwise, it's an error
+ */
+ vcu_pll_ctrl = readl(reg_base + VCU_PLL_CTRL);
+ clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
+ clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
+ if (clkoutdiv != 1) {
+ dev_err(dev, "clkoutdiv is invalid\n");
+ return -EINVAL;
+ }
+
+ vcu_clk_data->hws[vcu_pll_half] =
+ clk_hw_register_fixed_factor(dev, "vcu_pll_half", "vcu_pll",
+ CLK_SET_RATE_NO_REPARENT |
+ CLK_SET_RATE_PARENT,
+ 1, 2);
+
+ vcu_clk_data->hws[vcu_core_enc] =
+ register_vcu_leaf_clocks(dev, "vcu_core_enc_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_ENC_CORE_CTRL,
+ &core_enc_lock);
+ vcu_clk_data->hws[vcu_core_dec] =
+ register_vcu_leaf_clocks(dev, "vcu_core_dec_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_DEC_CORE_CTRL,
+ &core_dec_lock);
+ vcu_clk_data->hws[mcu_core_enc] =
+ register_vcu_leaf_clocks(dev, "mcu_core_enc_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_ENC_MCU_CTRL,
+ &mcu_enc_lock);
+ vcu_clk_data->hws[mcu_core_dec] =
+ register_vcu_leaf_clocks(dev, "mcu_core_dec_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_DEC_MCU_CTRL,
+ &mcu_dec_lock);
+
+ for (i = 0; i < clk_max; i++) {
+ if (IS_ERR(vcu_clk_data->hws[i])) {
+ dev_err(dev, "clk %d: register failed with %ld\n",
+ i, PTR_ERR(vcu_clk_data->hws[i]));
+ }
+ }
+
+ vcu_clk_data->num = clk_max;
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ vcu_clk_data);
+}
+
+static int xvcu_clk_probe(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu = dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ ret = xvcu_clock_init(pdev->dev.parent, xvcu->vcu_slcr_ba);
+ if (ret)
+ dev_err(&pdev->dev, "clock init fail with error %d\n", ret);
+ else
+ dev_dbg(&pdev->dev, "clock init successful\n");
+
+ return ret;
+}
+
+static int xvcu_clk_remove(struct platform_device *pdev)
+{
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[vcu_core_enc]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[vcu_core_dec]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[mcu_core_enc]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[mcu_core_dec]);
+ clk_hw_unregister(vcu_clk_data->hws[vcu_pll_half]);
+ of_clk_del_provider(pdev->dev.parent->of_node);
+
+ devm_kfree(pdev->dev.parent, vcu_clk_data);
+
+ return 0;
+}
+
+static struct platform_driver xvcu_clk_driver = {
+ .driver = {
+ .name = "xilinx-vcu-clk",
+ },
+ .probe = xvcu_clk_probe,
+ .remove = xvcu_clk_remove,
+};
+
+module_platform_driver(xvcu_clk_driver);
+
+MODULE_AUTHOR("Rajan Vaja <rajan.vaja@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:xilinx-vcu-clk");
diff --git a/drivers/soc/xilinx/xlnx_vcu_core.c b/drivers/soc/xilinx/xlnx_vcu_core.c
new file mode 100644
index 000000000000..0415b283c133
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_vcu_core.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU core driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
+static const struct mfd_cell xvcu_devs[] = {
+ {
+ .name = "xilinx-vcu-clk",
+ },
+ {
+ .name = "xilinx-vcu",
+ },
+};
+
+static int xvcu_core_probe(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+ struct resource *res;
+ int ret;
+
+ xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
+ if (!xvcu)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
+ if (!res) {
+ dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->vcu_slcr_ba) {
+ dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->logicore_reg_ba) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(&pdev->dev, xvcu);
+
+ xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(xvcu->aclk)) {
+ dev_err(&pdev->dev, "Could not get aclk clock\n");
+ return PTR_ERR(xvcu->aclk);
+ }
+
+ ret = clk_prepare_enable(xvcu->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "aclk clock enable failed\n");
+ return ret;
+ }
+
+ /*
+ * Do the Gasket isolation and put the VCU out of reset
+ * Bit 0 : Gasket isolation
+ * Bit 1 : put VCU out of reset
+ */
+ xvcu->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvcu->reset_gpio)) {
+ ret = PTR_ERR(xvcu->reset_gpio);
+ dev_err(&pdev->dev, "failed to get reset gpio for vcu.\n");
+ return ret;
+ }
+
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ } else {
+ dev_warn(&pdev->dev, "No reset gpio info from dts for vcu. This may lead to incorrect functionality if VCU isolation is removed post initialization.\n");
+ }
+
+ iowrite32(VCU_GASKET_VALUE, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, xvcu_devs,
+ ARRAY_SIZE(xvcu_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+ goto err_mfd_add_devices;
+ }
+
+ dev_dbg(&pdev->dev, "Successfully added MFD devices\n");
+
+ return 0;
+
+err_mfd_add_devices:
+ /* Add the the Gasket isolation and put the VCU in reset. */
+ iowrite32(0, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ clk_disable_unprepare(xvcu->aclk);
+
+ return ret;
+}
+
+static int xvcu_core_remove(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+
+ xvcu = platform_get_drvdata(pdev);
+ if (!xvcu)
+ return -ENODEV;
+
+ mfd_remove_devices(&pdev->dev);
+
+ /* Add the the Gasket isolation and put the VCU in reset. */
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ }
+ iowrite32(0, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ clk_disable_unprepare(xvcu->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xvcu_core_of_id_table[] = {
+ { .compatible = "xlnx,vcu" },
+ { .compatible = "xlnx,vcu-logicoreip-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvcu_core_of_id_table);
+
+static struct platform_driver xvcu_core_driver = {
+ .driver = {
+ .name = "xilinx-vcu-core",
+ .of_match_table = xvcu_core_of_id_table,
+ },
+ .probe = xvcu_core_probe,
+ .remove = xvcu_core_remove,
+};
+
+module_platform_driver(xvcu_core_driver);
+
+MODULE_AUTHOR("Rajan Vaja <rajan.vaja@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/xilinx/zynqmp/Makefile b/drivers/soc/xilinx/zynqmp/Makefile
new file mode 100644
index 000000000000..29d19cb442bc
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp/Makefile
@@ -0,0 +1 @@
+obj-y += tap_delays.o
diff --git a/drivers/soc/xilinx/zynqmp/tap_delays.c b/drivers/soc/xilinx/zynqmp/tap_delays.c
new file mode 100644
index 000000000000..cf1836ea99c3
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp/tap_delays.c
@@ -0,0 +1,69 @@
+/*
+ * Xilinx Zynq MPSoC Tap Delay Programming
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/soc/xilinx/zynqmp/tap_delays.h>
+
+/**
+ * arasan_zynqmp_set_tap_delay - Program the tap delays.
+ * @deviceid: Unique Id of device
+ * @itap_delay: Input Tap Delay
+ * @oitap_delay: Output Tap Delay
+ */
+void arasan_zynqmp_set_tap_delay(u8 deviceid, u8 itap_delay, u8 otap_delay)
+{
+ u32 node_id = (deviceid == 0) ? NODE_SD_0 : NODE_SD_1;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return;
+
+ /* Set the Input Tap Delay */
+ if (itap_delay)
+ eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_INPUT, itap_delay, NULL);
+
+ /* Set the Output Tap Delay */
+ if (otap_delay)
+ eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_OUTPUT, otap_delay, NULL);
+}
+EXPORT_SYMBOL_GPL(arasan_zynqmp_set_tap_delay);
+
+/**
+ * arasan_zynqmp_dll_reset - Issue the DLL reset.
+ * @deviceid: Unique Id of device
+ */
+void zynqmp_dll_reset(u8 deviceid)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return;
+
+ /* Issue DLL Reset */
+ if (deviceid == 0)
+ eemi_ops->ioctl(NODE_SD_0, IOCTL_SD_DLL_RESET,
+ PM_DLL_RESET_PULSE, 0, NULL);
+ else
+ eemi_ops->ioctl(NODE_SD_1, IOCTL_SD_DLL_RESET,
+ PM_DLL_RESET_PULSE, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_dll_reset);
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 600f57cf0c2e..23d90cb12ba9 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -2,7 +2,7 @@
/*
* ZynqMP Generic PM domain support
*
- * Copyright (C) 2015-2018 Xilinx, Inc.
+ * Copyright (C) 2015-2019 Xilinx, Inc.
*
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
@@ -25,6 +25,8 @@
static const struct zynqmp_eemi_ops *eemi_ops;
+static int min_capability;
+
/**
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
* @gpd: Generic power domain
@@ -106,7 +108,7 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
int ret;
struct pm_domain_data *pdd, *tmp;
struct zynqmp_pm_domain *pd;
- u32 capabilities = 0;
+ u32 capabilities = min_capability;
bool may_wakeup;
if (!eemi_ops->set_requirement)
@@ -283,6 +285,10 @@ static int zynqmp_gpd_probe(struct platform_device *pdev)
if (!domains)
return -ENOMEM;
+ if (!of_device_is_compatible(dev->parent->of_node,
+ "xlnx,zynqmp-firmware"))
+ min_capability = ZYNQMP_PM_CAPABILITY_UNUSABLE;
+
for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) {
pd->node_id = 0;
pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i);
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 1b9d14411a15..01a2e9dc1fe5 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -9,13 +9,31 @@
* Rajan Vaja <rajan.vaja@xilinx.com>
*/
+#include <linux/compiler.h>
+#include <linux/interrupt.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+
+/**
+ * struct zynqmp_pm_work_struct - Wrapper for struct work_struct
+ * @callback_work: Work structure
+ * @args: Callback arguments
+ */
+struct zynqmp_pm_work_struct {
+ struct work_struct callback_work;
+ u32 args[CB_ARG_CNT];
+};
+static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
+static struct mbox_chan *rx_chan;
+static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -31,7 +49,6 @@ static const char *const suspend_modes[] = {
};
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
-static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
@@ -68,6 +85,53 @@ static irqreturn_t zynqmp_pm_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static void ipi_receive_callback(struct mbox_client *cl, void *data)
+{
+ struct zynqmp_ipi_message *msg = (struct zynqmp_ipi_message *)data;
+ u32 payload[IPI_BUF_LEN_MAX];
+ int ret;
+
+ memcpy(payload, msg->data, sizeof(msg->len));
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+ sizeof(zynqmp_pm_init_suspend_work->args));
+
+ queue_work(system_unbound_wq,
+ &zynqmp_pm_init_suspend_work->callback_work);
+
+ /* Send NULL message to mbox controller to ack the message */
+ ret = mbox_send_message(rx_chan, NULL);
+ if (ret)
+ pr_err("IPI ack failed. Error %d\n", ret);
+ }
+}
+
+/**
+ * zynqmp_pm_init_suspend_work_fn - Initialize suspend
+ * @work: Pointer to work_struct
+ *
+ * Bottom-half of PM callback IRQ handler.
+ */
+static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work)
+{
+ struct zynqmp_pm_work_struct *pm_work =
+ container_of(work, struct zynqmp_pm_work_struct, callback_work);
+
+ if (pm_work->args[0] == SUSPEND_SYSTEM_SHUTDOWN) {
+ orderly_poweroff(true);
+ } else if (pm_work->args[0] == SUSPEND_POWER_REQUEST) {
+ pm_suspend(PM_SUSPEND_MEM);
+ } else {
+ pr_err("%s Unsupported InitSuspendCb reason code %d.\n",
+ __func__, pm_work->args[0]);
+ }
+}
+
static ssize_t suspend_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -119,6 +183,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
{
int ret, irq;
u32 pm_api_version;
+ struct mbox_client *client;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
@@ -134,17 +199,46 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
if (pm_api_version < ZYNQMP_PM_VERSION)
return -ENODEV;
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
-
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(&pdev->dev), &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
- "with %d\n", irq, ret);
- return ret;
+ if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+ zynqmp_pm_init_suspend_work =
+ devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_suspend_work)
+ return -ENOMEM;
+
+ INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+ zynqmp_pm_init_suspend_work_fn);
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->dev = &pdev->dev;
+ client->rx_callback = ipi_receive_callback;
+
+ rx_chan = mbox_request_channel_byname(client, "rx");
+ if (IS_ERR(rx_chan)) {
+ dev_err(&pdev->dev, "Failed to request rx channel\n");
+ return IS_ERR(rx_chan);
+ }
+ } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ zynqmp_pm_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
+ "failed with %d\n", irq, ret);
+ return ret;
+ }
+ } else {
+ dev_err(&pdev->dev, "Required property not found in DT node\n");
+ return -ENOENT;
}
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
@@ -160,6 +254,9 @@ static int zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (!rx_chan)
+ mbox_free_channel(rx_chan);
+
return 0;
}
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 4a465f55039f..2fe5a51918c8 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -155,12 +155,8 @@ int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
drv->driver.owner = owner;
drv->driver.probe = sdw_drv_probe;
-
- if (drv->remove)
- drv->driver.remove = sdw_drv_remove;
-
- if (drv->shutdown)
- drv->driver.shutdown = sdw_drv_shutdown;
+ drv->driver.remove = sdw_drv_remove;
+ drv->driver.shutdown = sdw_drv_shutdown;
return driver_register(&drv->driver);
}
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index f7ca1f7a68f0..3b3f909407c3 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -709,14 +709,15 @@ error_1:
* sdw_ml_sync_bank_switch: Multilink register bank switch
*
* @bus: SDW bus instance
+ * @multi_link: whether this is a multi-link stream with hardware-based sync
*
* Caller function should free the buffers on error
*/
-static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
+static int sdw_ml_sync_bank_switch(struct sdw_bus *bus, bool multi_link)
{
unsigned long time_left;
- if (!bus->multi_link)
+ if (!multi_link)
return 0;
/* Wait for completion of transfer */
@@ -809,7 +810,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
/* Check if bank switch was successful */
- ret = sdw_ml_sync_bank_switch(bus);
+ ret = sdw_ml_sync_bank_switch(bus, multi_link);
if (ret < 0) {
dev_err(bus->dev,
"multi link bank switch failed: %d\n", ret);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 5bf754208777..db43517fdd8e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -865,10 +865,18 @@ config SPI_XTENSA_XTFPGA
config SPI_ZYNQ_QSPI
tristate "Xilinx Zynq QSPI controller"
depends on ARCH_ZYNQ || COMPILE_TEST
+ depends on SPI_MEM
+ depends on SPI_MASTER
help
- This enables support for the Zynq Quad SPI controller
- in master mode.
- This controller only supports SPI memory interface.
+ This selects the Xilinx ZYNQ Quad SPI controller master driver.
+
+config SPI_ZYNQ_QSPI_DUAL_STACKED
+ bool "Xilinx Zynq QSPI Dual stacked configuration"
+ depends on SPI_ZYNQ_QSPI
+ help
+ This selects the Xilinx ZYNQ Quad SPI controller in dual stacked mode.
+ Enable this option if your hw design is using dual stacked
+ configuration.
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index d933a6eda5fd..b4032d1e7c98 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi-mem.h>
+#include <linux/mtd/spi-nor.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include "spi-bcm-qspi.h"
@@ -976,7 +976,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
/* non-aligned and very short transfers are handled by MSPI */
if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
- len < 4)
+ len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
mspi_read = true;
if (!has_bspi(qspi) || mspi_read)
@@ -1250,13 +1250,9 @@ int bcm_qspi_probe(struct platform_device *pdev,
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mspi");
- if (res) {
- qspi->base[MSPI] = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->base[MSPI]))
- return PTR_ERR(qspi->base[MSPI]);
- } else {
- return 0;
- }
+ qspi->base[MSPI] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->base[MSPI]))
+ return PTR_ERR(qspi->base[MSPI]);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
if (res) {
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 509476a2d79b..2ddace15c0f8 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -20,6 +20,8 @@
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/reset.h>
+#include <linux/pm_runtime.h>
#define HSSPI_GLOBAL_CTRL_REG 0x0
#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
@@ -161,6 +163,7 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
int step_size = HSSPI_BUFFER_LEN;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
+ u32 val = 0;
bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
@@ -176,11 +179,16 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
step_size -= HSSPI_OPCODE_LEN;
if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
- (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL))
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
opcode |= HSSPI_OP_MULTIBIT;
- __raw_writel(1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT |
- 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT | 0xff,
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ val |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
+ if (t->tx_nbits == SPI_NBITS_DUAL)
+ val |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
+ }
+
+ __raw_writel(val | 0xff,
bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
while (pending > 0) {
@@ -428,13 +436,17 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
if (ret)
goto out_put_master;
+ pm_runtime_enable(&pdev->dev);
+
/* register and we are done */
ret = devm_spi_register_master(dev, master);
if (ret)
- goto out_put_master;
+ goto out_pm_disable;
return 0;
+out_pm_disable:
+ pm_runtime_disable(&pdev->dev);
out_put_master:
spi_master_put(master);
out_disable_pll_clk:
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index fdd7eaa0b8ed..ff2759616873 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -125,7 +125,7 @@ enum bcm63xx_regs_spi {
SPI_MSG_DATA_SIZE,
};
-#define BCM63XX_SPI_MAX_PREPEND 15
+#define BCM63XX_SPI_MAX_PREPEND 7
#define BCM63XX_SPI_MAX_CS 8
#define BCM63XX_SPI_BUS_NUM 0
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 858f0544289e..c7079a9cd577 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -21,6 +21,7 @@
#include <linux/spi/spi.h>
#include <linux/types.h>
#include <linux/platform_device.h>
+#include <linux/byteorder/generic.h>
#include "spi-fsl-cpm.h"
#include "spi-fsl-lib.h"
@@ -120,6 +121,21 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
mspi->rx_dma = mspi->dma_dummy_rx;
mspi->map_rx_dma = 0;
}
+ if (t->bits_per_word == 16 && t->tx_buf) {
+ const u16 *src = t->tx_buf;
+ u16 *dst;
+ int i;
+
+ dst = kmalloc(t->len, GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+
+ for (i = 0; i < t->len >> 1; i++)
+ dst[i] = cpu_to_le16p(src + i);
+
+ mspi->tx = dst;
+ mspi->map_tx_dma = 1;
+ }
if (mspi->map_tx_dma) {
void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
@@ -173,6 +189,13 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
if (mspi->map_rx_dma)
dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
mspi->xfer_in_progress = NULL;
+
+ if (t->bits_per_word == 16 && t->rx_buf) {
+ int i;
+
+ for (i = 0; i < t->len; i += 2)
+ le16_to_cpus(t->rx_buf + i);
+ }
}
EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 3e0200618af3..351b2989db07 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -121,7 +121,6 @@
struct chip_data {
u32 ctar_val;
- u16 void_write_data;
};
enum dspi_trans_mode {
@@ -190,7 +189,6 @@ struct fsl_dspi {
const void *tx;
void *rx;
void *rx_end;
- u16 void_write_data;
u16 tx_cmd;
u8 bits_per_word;
u8 bytes_per_word;
@@ -748,8 +746,6 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
- dspi->void_write_data = dspi->cur_chip->void_write_data;
-
dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
dspi->rx_end = dspi->rx + transfer->len;
@@ -820,7 +816,9 @@ out:
static int dspi_setup(struct spi_device *spi)
{
struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
+ u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
+ u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
u32 cs_sck_delay = 0, sck_cs_delay = 0;
struct fsl_dspi_platform_data *pdata;
unsigned char pasc = 0, asc = 0;
@@ -848,7 +846,18 @@ static int dspi_setup(struct spi_device *spi)
sck_cs_delay = pdata->sck_cs_delay;
}
- chip->void_write_data = 0;
+ /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
+ * glitches of half a cycle by never allowing tCSC + tASC to go below
+ * half a SCK period.
+ */
+ if (cs_sck_delay < quarter_period_ns)
+ cs_sck_delay = quarter_period_ns;
+ if (sck_cs_delay < quarter_period_ns)
+ sck_cs_delay = quarter_period_ns;
+
+ dev_dbg(&spi->dev,
+ "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
+ cs_sck_delay, sck_cs_delay);
clkrate = clk_get_rate(dspi->clk);
hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 02b999d48ca1..8006759c1a0c 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -204,24 +204,6 @@ static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
return bits_per_word;
}
-static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
- struct spi_device *spi,
- int bits_per_word)
-{
- /* QE uses Little Endian for words > 8
- * so transform all words > 8 into 8 bits
- * Unfortnatly that doesn't work for LSB so
- * reject these for now */
- /* Note: 32 bits word, LSB works iff
- * tfcr/rfcr is set to CPMFCR_GBL */
- if (spi->mode & SPI_LSB_FIRST &&
- bits_per_word > 8)
- return -EINVAL;
- if (bits_per_word > 8)
- return 8; /* pretend its 8 bits */
- return bits_per_word;
-}
-
static int fsl_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
@@ -249,9 +231,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
mpc8xxx_spi,
bits_per_word);
- else if (mpc8xxx_spi->flags & SPI_QE)
- bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
- bits_per_word);
if (bits_per_word < 0)
return bits_per_word;
@@ -369,14 +348,30 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
* In CPU mode, optimize large byte transfers to use larger
* bits_per_word values to reduce number of interrupts taken.
*/
- if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
- list_for_each_entry(t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
if (t->len < 256 || t->bits_per_word != 8)
continue;
if ((t->len & 3) == 0)
t->bits_per_word = 32;
else if ((t->len & 1) == 0)
t->bits_per_word = 16;
+ } else {
+ /*
+ * CPM/QE uses Little Endian for words > 8
+ * so transform 16 and 32 bits words into 8 bits
+ * Unfortnatly that doesn't work for LSB so
+ * reject these for now
+ * Note: 32 bits word, LSB works iff
+ * tfcr/rfcr is set to CPMFCR_GBL
+ */
+ if (m->spi->mode & SPI_LSB_FIRST && t->bits_per_word > 8)
+ return -EINVAL;
+ if (t->bits_per_word == 16 || t->bits_per_word == 32)
+ t->bits_per_word = 8; /* pretend its 8 bits */
+ if (t->bits_per_word == 8 && t->len >= 256 &&
+ (mpc8xxx_spi->flags & SPI_CPM1))
+ t->bits_per_word = 16;
}
}
@@ -635,8 +630,14 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
if (mpc8xxx_spi->type == TYPE_GRLIB)
fsl_spi_grlib_probe(dev);
- master->bits_per_word_mask =
- (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
+ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
+ else
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
+
+ master->bits_per_word_mask &=
SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 01b53d816497..ae1cbc321536 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -32,7 +32,7 @@
#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
#define SE_SPI_TRANS_CFG 0x25c
-#define CS_TOGGLE BIT(0)
+#define CS_TOGGLE BIT(1)
#define SE_SPI_WORD_LEN 0x268
#define WORD_LEN_MSK GENMASK(9, 0)
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e7dc1fad4a87..282c5ee41a62 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -244,9 +244,19 @@ static int spi_gpio_set_direction(struct spi_device *spi, bool output)
if (output)
return gpiod_direction_output(spi_gpio->mosi, 1);
- ret = gpiod_direction_input(spi_gpio->mosi);
- if (ret)
- return ret;
+ /*
+ * Only change MOSI to an input if using 3WIRE mode.
+ * Otherwise, MOSI could be left floating if there is
+ * no pull resistor connected to the I/O pin, or could
+ * be left logic high if there is a pull-up. Transmitting
+ * logic high when only clocking MISO data in can put some
+ * SPI devices in to a bad state.
+ */
+ if (spi->mode & SPI_3WIRE) {
+ ret = gpiod_direction_input(spi_gpio->mosi);
+ if (ret)
+ return ret;
+ }
/*
* Send a turnaround high impedance cycle when switching
* from output to input. Theoretically there should be
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 474d5a7fa95e..67f31183c118 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -239,6 +239,18 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
return true;
}
+/*
+ * Note the number of natively supported chip selects for MX51 is 4. Some
+ * devices may have less actual SS pins but the register map supports 4. When
+ * using gpio chip selects the cs values passed into the macros below can go
+ * outside the range 0 - 3. We therefore need to limit the cs value to avoid
+ * corrupting bits outside the allocated locations.
+ *
+ * The simplest way to do this is to just mask the cs bits to 2 bits. This
+ * still allows all 4 native chip selects to work as well as gpio chip selects
+ * (which can use any of the 4 chip select configurations).
+ */
+
#define MX51_ECSPI_CTRL 0x08
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
#define MX51_ECSPI_CTRL_XCH (1 << 2)
@@ -247,16 +259,16 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
-#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
+#define MX51_ECSPI_CTRL_CS(cs) ((cs & 3) << 18)
#define MX51_ECSPI_CTRL_BL_OFFSET 20
#define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
#define MX51_ECSPI_CONFIG 0x0c
-#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
-#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
-#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
-#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
-#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
+#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs & 3) + 0))
+#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs & 3) + 4))
+#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs & 3) + 8))
+#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs & 3) + 12))
+#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs & 3) + 20))
#define MX51_ECSPI_INT 0x10
#define MX51_ECSPI_INT_TEEN (1 << 0)
@@ -431,8 +443,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
unsigned int pre, post;
unsigned int fin = spi_imx->spi_clk;
- if (unlikely(fspi > fin))
- return 0;
+ fspi = min(fspi, fin);
post = fls(fin) - fls(fspi);
if (fin > fspi << post)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 33115bcfc787..2e04e80978e2 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -349,6 +349,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
+ xfers[xferpos].dummy = op->dummy.nbytes * 8;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
@@ -363,6 +364,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_nbits = op->data.buswidth;
}
+ xfers[xferpos].stripe = update_stripe(op->cmd.opcode);
xfers[xferpos].len = op->data.nbytes;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 29d44f5d5212..8105b458d12f 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -559,17 +559,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(master);
- cnt = mdata->xfer_len / 4;
- iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
- trans->tx_buf + mdata->num_xfered, cnt);
+ if (trans->tx_buf) {
+ cnt = mdata->xfer_len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+ trans->tx_buf + mdata->num_xfered, cnt);
- remainder = mdata->xfer_len % 4;
- if (remainder > 0) {
- reg_val = 0;
- memcpy(&reg_val,
- trans->tx_buf + (cnt * 4) + mdata->num_xfered,
- remainder);
- writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ remainder = mdata->xfer_len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val,
+ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+ remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
}
mtk_spi_enable_transfer(master);
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index b4b9b7309b5e..351b0ef52bbc 100644
--- a/drivers/spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -340,11 +340,9 @@ static int mt7621_spi_probe(struct platform_device *pdev)
return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n",
- status);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "unable to get SYS clock\n");
status = clk_prepare_enable(clk);
if (status)
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 1e770d673905..cc4e29cd86da 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -334,8 +334,9 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
uma_cfg |= ilog2(op->cmd.buswidth);
uma_cfg |= ilog2(op->addr.buswidth)
<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
- uma_cfg |= ilog2(op->dummy.buswidth)
- << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+ if (op->dummy.nbytes)
+ uma_cfg |= ilog2(op->dummy.buswidth)
+ << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
uma_cfg |= ilog2(op->data.buswidth)
<< NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 36a44a837031..ee1b488d7ded 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -897,6 +897,13 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
base + FSPI_AHBCR);
+ /* Reset the FLSHxCR1 registers. */
+ reg = FSPI_FLSHXCR1_TCSH(0x3) | FSPI_FLSHXCR1_TCSS(0x3);
+ fspi_writel(f, reg, base + FSPI_FLSHA1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHA2CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
+
/* AHB Read - Set lut sequence ID for all CS. */
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index f64d030c760a..89d89ad1064d 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -416,6 +416,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
return status;
err_fck:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi100k->fck);
err_ick:
clk_disable_unprepare(spi100k->ick);
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 0ea2d9a369d9..738a1e4e445e 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -170,10 +170,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
int scr;
u8 cdm = 0;
u32 speed;
- u8 bits_per_word;
/* Start with the generic configuration for this device. */
- bits_per_word = spi->bits_per_word;
speed = spi->max_speed_hz;
/*
@@ -181,9 +179,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
* the transfer to overwrite the generic configuration with zeros.
*/
if (t) {
- if (t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
if (t->speed_hz)
speed = min(t->speed_hz, spi->max_speed_hz);
}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index d1dfb52008b4..94aa51d91b06 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1030,23 +1030,8 @@ static int spi_qup_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = clk_prepare_enable(cclk);
- if (ret) {
- dev_err(dev, "cannot enable core clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(iclk);
- if (ret) {
- clk_disable_unprepare(cclk);
- dev_err(dev, "cannot enable iface clock\n");
- return ret;
- }
-
master = spi_alloc_master(dev, sizeof(struct spi_qup));
if (!master) {
- clk_disable_unprepare(cclk);
- clk_disable_unprepare(iclk);
dev_err(dev, "cannot allocate master\n");
return -ENOMEM;
}
@@ -1092,6 +1077,19 @@ static int spi_qup_probe(struct platform_device *pdev)
spin_lock_init(&controller->lock);
init_completion(&controller->done);
+ ret = clk_prepare_enable(cclk);
+ if (ret) {
+ dev_err(dev, "cannot enable core clock\n");
+ goto error_dma;
+ }
+
+ ret = clk_prepare_enable(iclk);
+ if (ret) {
+ clk_disable_unprepare(cclk);
+ dev_err(dev, "cannot enable iface clock\n");
+ goto error_dma;
+ }
+
iomode = readl_relaxed(base + QUP_IO_M_MODES);
size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
@@ -1121,7 +1119,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret) {
dev_err(dev, "cannot set RESET state\n");
- goto error_dma;
+ goto error_clk;
}
writel_relaxed(0, base + QUP_OPERATIONAL);
@@ -1145,7 +1143,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
IRQF_TRIGGER_HIGH, pdev->name, controller);
if (ret)
- goto error_dma;
+ goto error_clk;
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(dev);
@@ -1160,11 +1158,12 @@ static int spi_qup_probe(struct platform_device *pdev)
disable_pm:
pm_runtime_disable(&pdev->dev);
+error_clk:
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
error_dma:
spi_qup_release_dma(master);
error:
- clk_disable_unprepare(cclk);
- clk_disable_unprepare(iclk);
spi_master_put(master);
return ret;
}
@@ -1199,8 +1198,10 @@ static int spi_qup_pm_resume_runtime(struct device *device)
return ret;
ret = clk_prepare_enable(controller->cclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
return ret;
+ }
/* Disable clocks auto gaiting */
config = readl_relaxed(controller->base + QUP_CONFIG);
@@ -1246,14 +1247,25 @@ static int spi_qup_resume(struct device *device)
return ret;
ret = clk_prepare_enable(controller->cclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
return ret;
+ }
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret)
- return ret;
+ goto disable_clk;
+
+ ret = spi_master_resume(master);
+ if (ret)
+ goto disable_clk;
+
+ return 0;
- return spi_master_resume(master);
+disable_clk:
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return ret;
}
#endif /* CONFIG_PM_SLEEP */
@@ -1263,18 +1275,22 @@ static int spi_qup_remove(struct platform_device *pdev)
struct spi_qup *controller = spi_master_get_devdata(master);
int ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
- ret = spi_qup_set_state(controller, QUP_STATE_RESET);
- if (ret)
- return ret;
+ if (ret >= 0) {
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
+ ERR_PTR(ret));
- spi_qup_release_dma(master);
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ } else {
+ dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
+ ERR_PTR(ret));
+ }
- clk_disable_unprepare(controller->cclk);
- clk_disable_unprepare(controller->iclk);
+ spi_qup_release_dma(master);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 0524741d73b9..8ae2ac40b4b2 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -595,6 +595,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
+ if (tx)
+ dmaengine_synchronize(rspi->ctlr->dma_tx);
+ if (rx)
+ dmaengine_synchronize(rspi->ctlr->dma_rx);
} else {
if (!ret) {
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 1d948fee1a03..d9420561236c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -84,6 +84,7 @@
#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
+#define S3C64XX_SPI_PACKET_CNT_MASK GENMASK(15, 0)
#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
@@ -654,6 +655,13 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
return 0;
}
+static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX;
+}
+
static int s3c64xx_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
@@ -1118,6 +1126,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
+ master->max_transfer_size = s3c64xx_spi_max_transfer_size;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 8f134735291f..edb26b085706 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -32,12 +32,15 @@
#include <asm/unaligned.h>
+#define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0)
+
struct sh_msiof_chipdata {
u32 bits_per_word_mask;
u16 tx_fifo_size;
u16 rx_fifo_size;
u16 ctlr_flags;
u16 min_div_pow;
+ u32 flags;
};
struct sh_msiof_spi_priv {
@@ -1072,6 +1075,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
.min_div_pow = 1,
};
+static const struct sh_msiof_chipdata rcar_r8a7795_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
+ .min_div_pow = 1,
+ .flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
+};
+
static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
{ .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
@@ -1082,6 +1095,7 @@ static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
{ .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
@@ -1317,6 +1331,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
return -ENXIO;
}
+ if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200)
+ info->dtdl = 200;
+
if (info->mode == MSIOF_SPI_SLAVE)
ctlr = spi_alloc_slave(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 9ae16092206d..e843e9453c71 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -444,7 +444,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
u32 div, mbrdiv;
/* Ensure spi->clk_rate is even */
- div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
+ div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz);
/*
* SPI framework set xfer->speed_hz to master->max_speed_hz if
@@ -937,6 +937,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
static DEFINE_RATELIMIT_STATE(rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
1);
+ ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&rs))
dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index 785e7c445123..5d6e4a59ce29 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -472,10 +472,9 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
read_fifo(sspi);
}
- if (status < 0) {
- dev_err(sspi->dev, "failed to transfer. status: 0x%x\n",
- status);
- return status;
+ if (status == 0) {
+ dev_err(sspi->dev, "failed to transfer. Timeout.\n");
+ return -ETIMEDOUT;
}
return 0;
@@ -784,6 +783,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev)
ret = synquacer_spi_enable(master);
if (ret) {
+ clk_disable_unprepare(sspi->clk);
dev_err(dev, "failed to enable spi (%d)\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index ecb620169753..bfef41c717f3 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -456,7 +456,11 @@ static int tegra_sflash_probe(struct platform_device *pdev)
goto exit_free_master;
}
- tsd->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto exit_free_master;
+ tsd->irq = ret;
+
ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
dev_name(&pdev->dev), tsd);
if (ret < 0) {
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index d5f9d5fbb3e8..b3d2cc5cb6ec 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -16,10 +16,11 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
#include <linux/spi/xilinx_spi.h>
#include <linux/io.h>
-
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
#define XILINX_SPI_MAX_CS 32
#define XILINX_SPI_NAME "xilinx_spi"
@@ -27,8 +28,18 @@
/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
* Product Specification", DS464
*/
-#define XSPI_CR_OFFSET 0x60 /* Control Register */
-
+/* Register Offsets */
+#define XSPI_CR_OFFSET 0x60
+#define XSPI_SR_OFFSET 0x64
+#define XSPI_TXD_OFFSET 0x68
+#define XSPI_RXD_OFFSET 0x6c
+#define XSPI_SSR_OFFSET 0x70
+#define XIPIF_V123B_DGIER_OFFSET 0x1c
+#define XIPIF_V123B_IISR_OFFSET 0x20
+#define XIPIF_V123B_IIER_OFFSET 0x28
+#define XIPIF_V123B_RESETR_OFFSET 0x40
+
+/* Register bit masks */
#define XSPI_CR_LOOP 0x01
#define XSPI_CR_ENABLE 0x02
#define XSPI_CR_MASTER_MODE 0x04
@@ -41,133 +52,203 @@
#define XSPI_CR_MANUAL_SSELECT 0x80
#define XSPI_CR_TRANS_INHIBIT 0x100
#define XSPI_CR_LSB_FIRST 0x200
-
-#define XSPI_SR_OFFSET 0x64 /* Status Register */
-
-#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
-#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
-#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
-#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
-#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
-
-#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
-#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */
-
-#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
-
-/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
- * IPIF registers are 32 bit
- */
-#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
+#define XSPI_SR_RX_EMPTY_MASK 0x01
+#define XSPI_SR_RX_FULL_MASK 0x02
+#define XSPI_SR_TX_EMPTY_MASK 0x04
+#define XSPI_SR_TX_FULL_MASK 0x08
+#define XSPI_SR_MODE_FAULT_MASK 0x10
#define XIPIF_V123B_GINTR_ENABLE 0x80000000
-
-#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
-#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
-
-#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
-#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
- * disabled */
-#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
-#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
-#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
-#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
-#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
-
-#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
-#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
-
+#define XSPI_INTR_MODE_FAULT 0x01
+#define XSPI_INTR_SLAVE_MODE_FAULT 0x02
+#define XSPI_INTR_TX_EMPTY 0x04
+#define XSPI_INTR_TX_UNDERRUN 0x08
+#define XSPI_INTR_RX_FULL 0x10
+#define XSPI_INTR_RX_OVERRUN 0x20
+#define XSPI_INTR_TX_HALF_EMPTY 0x40
+#define XIPIF_V123B_RESET_MASK 0x0a
+
+/* Number of bits per word */
+#define XSPI_ONE_BITS_PER_WORD 1
+#define XSPI_TWO_BITS_PER_WORD 2
+#define XSPI_FOUR_BITS_PER_WORD 4
+
+/* Number of data lines used to receive */
+#define XSPI_RX_ONE_WIRE 1
+#define XSPI_RX_FOUR_WIRE 4
+
+/* Auto suspend timeout in milliseconds */
+#define SPI_AUTOSUSPEND_TIMEOUT 3000
+
+/* Command used for Dummy read Id */
+#define SPI_READ_ID 0x9F
+
+/**
+ * struct xilinx_spi - This definition define spi driver instance
+ * @regs: virt. address of the control registers
+ * @irq: IRQ number
+ * @axi_clk: Pointer to the AXI clock
+ * @axi4_clk: Pointer to the AXI4 clock
+ * @spi_clk: Pointer to the SPI clock
+ * @dev: Pointer to the device
+ * @rx_ptr: Pointer to the RX buffer
+ * @tx_ptr: Pointer to the TX buffer
+ * @bytes_per_word: Number of bytes in a word
+ * @buffer_size: Buffer size in words
+ * @cs_inactive: Level of the CS pins when inactive
+ * @read_fn: For reading data from SPI registers
+ * @write_fn: For writing data to SPI registers
+ * @bytes_to_transfer: Number of bytes left to transfer
+ * @bytes_to_receive: Number of bytes left to receive
+ * @rx_bus_width: Number of wires used to receive data
+ * @tx_fifo: For writing data to fifo
+ * @rx_fifo: For reading data from fifo
+ */
struct xilinx_spi {
- /* bitbang has to be first */
- struct spi_bitbang bitbang;
- struct completion done;
- void __iomem *regs; /* virt. address of the control registers */
-
- int irq;
-
- u8 *rx_ptr; /* pointer in the Tx buffer */
- const u8 *tx_ptr; /* pointer in the Rx buffer */
+ void __iomem *regs;
+ int irq;
+ struct clk *axi_clk;
+ struct clk *axi4_clk;
+ struct clk *spi_clk;
+ struct device *dev;
+ u8 *rx_ptr;
+ const u8 *tx_ptr;
u8 bytes_per_word;
- int buffer_size; /* buffer size in words */
- u32 cs_inactive; /* Level of the CS pins when inactive*/
- unsigned int (*read_fn)(void __iomem *);
- void (*write_fn)(u32, void __iomem *);
+ int buffer_size;
+ u32 cs_inactive;
+ unsigned int (*read_fn)(void __iomem *addr);
+ void (*write_fn)(u32, void __iomem *addr);
+ u32 bytes_to_transfer;
+ u32 bytes_to_receive;
+ u32 rx_bus_width;
+ void (*tx_fifo)(struct xilinx_spi *xqspi);
+ void (*rx_fifo)(struct xilinx_spi *xqspi);
};
+/**
+ * XSPI_FIFO_READ - Generate xspi_read_rx_fifo_* functions
+ * @size: bits_per_word that are read from RX FIFO
+ * @type: C type of value argument
+ *
+ * Generates xspi_read_rx_fifo_* functions used to write
+ * data into RX FIFO for different transaction widths.
+ */
+#define XSPI_FIFO_READ(size, type) \
+static void xspi_read_rx_fifo_##size(struct xilinx_spi *xqspi) \
+{ \
+ int i; \
+ int count = (xqspi->bytes_to_receive > xqspi->buffer_size) ? \
+ xqspi->buffer_size : xqspi->bytes_to_receive; \
+ u32 data; \
+ for (i = 0; i < count; i += (size/8)) { \
+ data = readl_relaxed(xqspi->regs + XSPI_RXD_OFFSET); \
+ if (xqspi->rx_ptr) \
+ ((type *)xqspi->rx_ptr)[i] = (type)data; \
+ } \
+ xqspi->bytes_to_receive -= count; \
+ if (xqspi->rx_ptr) \
+ xqspi->rx_ptr += count; \
+}
+
+/**
+ * XSPI_FIFO_WRITE - Generate xspi_fill_tx_fifo_* functions
+ * @size: bits_per_word that are written into TX FIFO
+ * @type: C type of value argument
+ *
+ * Generates xspi_fill_tx_fifo_* functions used to write
+ * data into TX FIFO for different transaction widths.
+ */
+#define XSPI_FIFO_WRITE(size, type) \
+static void xspi_fill_tx_fifo_##size(struct xilinx_spi *xqspi) \
+{ \
+ int i; \
+ int count = (xqspi->bytes_to_transfer > xqspi->buffer_size) ? \
+ xqspi->buffer_size : xqspi->bytes_to_transfer; \
+ u32 data = 0; \
+ for (i = 0; i < count; i += (size/8)) { \
+ if (xqspi->tx_ptr) \
+ data = (type)((u8 *)xqspi->tx_ptr)[i]; \
+ writel_relaxed(data, (xqspi->regs + XSPI_TXD_OFFSET)); \
+ } \
+ xqspi->bytes_to_transfer -= count; \
+ if (xqspi->tx_ptr) \
+ xqspi->tx_ptr += count; \
+}
+
+XSPI_FIFO_READ(8, u8)
+XSPI_FIFO_READ(16, u16)
+XSPI_FIFO_READ(32, u32)
+XSPI_FIFO_WRITE(8, u8)
+XSPI_FIFO_WRITE(16, u16)
+XSPI_FIFO_WRITE(32, u32)
+
+/**
+ * xspi_write32 - Write a value to the device register little endian
+ * @val: Value to write at the Register offset
+ * @addr: Register offset
+ *
+ * Write data to the paricular SPI register
+ */
static void xspi_write32(u32 val, void __iomem *addr)
{
iowrite32(val, addr);
}
+/**
+ * xspi_read32 - read a value from the device register little endian
+ * @addr: Register offset
+ *
+ * Read data from the paricular SPI register
+ *
+ * Return: return value from the SPI register.
+ */
static unsigned int xspi_read32(void __iomem *addr)
{
return ioread32(addr);
}
+/**
+ * xspi_write32_be - Write a value to the device register big endian
+ * @val: Value to write at the Register offset
+ * @addr: Register offset
+ *
+ * Write data to the paricular SPI register
+ */
static void xspi_write32_be(u32 val, void __iomem *addr)
{
iowrite32be(val, addr);
}
+/**
+ * xspi_read32_be - read a value from the device register big endian
+ * @addr: Register offset
+ *
+ * Read data from the paricular SPI register
+ *
+ * Return: return value from the SPI register.
+ */
static unsigned int xspi_read32_be(void __iomem *addr)
{
return ioread32be(addr);
}
-static void xilinx_spi_tx(struct xilinx_spi *xspi)
-{
- u32 data = 0;
-
- if (!xspi->tx_ptr) {
- xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
- return;
- }
-
- switch (xspi->bytes_per_word) {
- case 1:
- data = *(u8 *)(xspi->tx_ptr);
- break;
- case 2:
- data = *(u16 *)(xspi->tx_ptr);
- break;
- case 4:
- data = *(u32 *)(xspi->tx_ptr);
- break;
- }
-
- xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
- xspi->tx_ptr += xspi->bytes_per_word;
-}
-
-static void xilinx_spi_rx(struct xilinx_spi *xspi)
-{
- u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-
- if (!xspi->rx_ptr)
- return;
-
- switch (xspi->bytes_per_word) {
- case 1:
- *(u8 *)(xspi->rx_ptr) = data;
- break;
- case 2:
- *(u16 *)(xspi->rx_ptr) = data;
- break;
- case 4:
- *(u32 *)(xspi->rx_ptr) = data;
- break;
- }
-
- xspi->rx_ptr += xspi->bytes_per_word;
-}
-
+/**
+ * xspi_init_hw - Initialize the hardware
+ * @xspi: Pointer to the zynqmp_qspi structure
+ *
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable the SPI controller
+ */
static void xspi_init_hw(struct xilinx_spi *xspi)
{
void __iomem *regs_base = xspi->regs;
/* Reset the SPI device */
xspi->write_fn(XIPIF_V123B_RESET_MASK,
- regs_base + XIPIF_V123B_RESETR_OFFSET);
- /* Enable the transmit empty interrupt, which we use to determine
+ regs_base + XIPIF_V123B_RESETR_OFFSET);
+ /*
+ * Enable the transmit empty interrupt, which we use to determine
* progress on the transmission.
*/
xspi->write_fn(XSPI_INTR_TX_EMPTY,
@@ -176,262 +257,457 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
/* Deselect the slave on the SPI bus */
xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
- /* Disable the transmitter, enable Manual Slave Select Assertion,
- * put SPI controller into master mode, and enable it */
+ /*
+ * Disable the transmitter, enable Manual Slave Select Assertion,
+ * put SPI controller into master mode, and enable it
+ */
xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
regs_base + XSPI_CR_OFFSET);
}
-static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+/**
+ * xspi_chipselect - Select or deselect the chip select line
+ * @qspi: Pointer to the spi_device structure
+ * @is_high: Select(0) or deselect (1) the chip select line
+ *
+ */
+static void xspi_chipselect(struct spi_device *qspi, bool is_high)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- u16 cr;
+ struct xilinx_spi *xqspi = spi_master_get_devdata(qspi->master);
u32 cs;
- if (is_on == BITBANG_CS_INACTIVE) {
- /* Deselect the slave on the SPI bus */
- xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
- return;
+ if (is_high) {
+ /* Deselect the slave */
+ xqspi->write_fn(xqspi->cs_inactive,
+ xqspi->regs + XSPI_SSR_OFFSET);
+ } else {
+ cs = xqspi->cs_inactive;
+ cs ^= BIT(qspi->chip_select);
+ /* Activate the chip select */
+ xqspi->write_fn(cs, xqspi->regs + XSPI_SSR_OFFSET);
}
+}
- /* Set the SPI clock phase and polarity */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
- if (spi->mode & SPI_CPHA)
- cr |= XSPI_CR_CPHA;
- if (spi->mode & SPI_CPOL)
- cr |= XSPI_CR_CPOL;
- if (spi->mode & SPI_LSB_FIRST)
- cr |= XSPI_CR_LSB_FIRST;
- if (spi->mode & SPI_LOOP)
- cr |= XSPI_CR_LOOP;
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
-
- /* We do not check spi->max_speed_hz here as the SPI clock
- * frequency is not software programmable (the IP block design
- * parameter)
- */
+/**
+ * xilinx_spi_irq - Interrupt service routine of the SPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xspi structure
+ *
+ * This function handles TX empty only.
+ * On TX empty interrupt this function reads the received data from RX FIFO
+ * and fills the TX FIFO if there is any data remaining to be transferred.
+ *
+ * Return: IRQ_HANDLED when interrupt is handled
+ * IRQ_NONE otherwise.
+ */
+static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct xilinx_spi *xspi = spi_master_get_devdata(dev_id);
+ u32 ipif_isr;
+ int status = IRQ_NONE;
- cs = xspi->cs_inactive;
- cs ^= BIT(spi->chip_select);
+ /* Get the IPIF interrupts, and clear them immediately */
+ ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ if (ipif_isr & XSPI_INTR_TX_EMPTY) {
+ /* Transmission completed */
+ xspi->rx_fifo(xspi);
+ if (xspi->bytes_to_transfer) {
+ /* There is more data to send */
+ xspi->tx_fifo(xspi);
+ }
+ status = IRQ_HANDLED;
+ }
- /* Activate the chip select */
- xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
+ if (!xspi->bytes_to_receive && !xspi->bytes_to_transfer) {
+ spi_finalize_current_transfer(master);
+ /* Disable the interrupts here. */
+ xspi->write_fn(0x0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ }
+
+ return status;
}
-/* spi_bitbang requires custom setup_transfer() to be defined if there is a
- * custom txrx_bufs().
+/**
+ * xilinx_spi_startup_block - Perform a dummy read as a
+ * work around for the startup block issue in the spi controller.
+ * @xspi: Pointer to the xilinx_spi structure
+ * @cs_num: chip select number.
+ *
+ * Perform a dummy read if startup block is enabled in the
+ * spi controller.
+ *
+ * Return: None
*/
-static int xilinx_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
+static void xilinx_spi_startup_block(struct xilinx_spi *xspi, u32 cs_num)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ void __iomem *regs_base = xspi->regs;
+ u32 chip_sel, config_reg, status_reg;
- if (spi->mode & SPI_CS_HIGH)
- xspi->cs_inactive &= ~BIT(spi->chip_select);
+ /* Activate the chip select */
+ chip_sel = xspi->cs_inactive;
+ chip_sel ^= BIT(cs_num);
+ xspi->write_fn(chip_sel, regs_base + XSPI_SSR_OFFSET);
+
+ /* Write ReadId to the TXD register */
+ xspi->write_fn(SPI_READ_ID, regs_base + XSPI_TXD_OFFSET);
+ xspi->write_fn(0x0, regs_base + XSPI_TXD_OFFSET);
+ xspi->write_fn(0x0, regs_base + XSPI_TXD_OFFSET);
+
+ config_reg = xspi->read_fn(regs_base + XSPI_CR_OFFSET);
+ /* Enable master transaction */
+ config_reg &= ~XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(config_reg, regs_base + XSPI_CR_OFFSET);
+
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ while ((status_reg & XSPI_SR_TX_EMPTY_MASK) == 0)
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+
+ /* Disable master transaction */
+ config_reg |= XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(config_reg, regs_base + XSPI_CR_OFFSET);
+
+ /* Read the RXD Register */
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ while ((status_reg & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ xspi->read_fn(regs_base + XSPI_RXD_OFFSET);
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ }
+
+ xspi_init_hw(xspi);
+}
+
+/**
+ * xspi_setup_transfer - Configure SPI controller for specified
+ * transfer
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI
+ * transfer.
+ *
+ * Return: 0 always
+ */
+static int xspi_setup_transfer(struct spi_device *qspi,
+ struct spi_transfer *transfer)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(qspi->master);
+ u32 config_reg;
+
+ config_reg = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= ~(XSPI_CR_CPHA | XSPI_CR_CPOL);
+ if (qspi->mode & SPI_CPHA)
+ config_reg |= XSPI_CR_CPHA;
+ if (qspi->mode & SPI_CPOL)
+ config_reg |= XSPI_CR_CPOL;
+ if (qspi->mode & SPI_LSB_FIRST)
+ config_reg |= XSPI_CR_LSB_FIRST;
+ xqspi->write_fn(config_reg, xqspi->regs + XSPI_CR_OFFSET);
+
+ if (qspi->mode & SPI_CS_HIGH)
+ xqspi->cs_inactive &= ~BIT(qspi->chip_select);
else
- xspi->cs_inactive |= BIT(spi->chip_select);
+ xqspi->cs_inactive |= BIT(qspi->chip_select);
return 0;
}
-static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+/**
+ * xspi_setup - Configure the SPI controller
+ * @qspi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI
+ * transfer.
+ *
+ * Return: 0 on success; error value otherwise.
+ */
+static int xspi_setup(struct spi_device *qspi)
+{
+ int ret;
+ struct xilinx_spi *xqspi = spi_master_get_devdata(qspi->master);
+
+ if (qspi->master->busy)
+ return -EBUSY;
+
+ ret = pm_runtime_get_sync(xqspi->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = xspi_setup_transfer(qspi, NULL);
+ pm_runtime_put_sync(xqspi->dev);
+
+ return ret;
+}
+
+/**
+ * xspi_start_transfer - Initiates the SPI transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provide information
+ * about next transfer parameters
+ *
+ * This function fills the TX FIFO, starts the SPI transfer, and waits for the
+ * transfer to be completed.
+ *
+ * Return: Number of bytes transferred in the last transfer
+ */
+
+static int xspi_start_transfer(struct spi_master *master,
+ struct spi_device *qspi,
+ struct spi_transfer *transfer)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- int remaining_words; /* the number of words left to transfer */
- bool use_irq = false;
- u16 cr = 0;
-
- /* We get here with transmitter inhibited */
-
- xspi->tx_ptr = t->tx_buf;
- xspi->rx_ptr = t->rx_buf;
- remaining_words = t->len / xspi->bytes_per_word;
-
- if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
- u32 isr;
- use_irq = true;
- /* Inhibit irq to avoid spurious irqs on tx_empty*/
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
- /* ACK old irqs (if any) */
- isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
- if (isr)
- xspi->write_fn(isr,
- xspi->regs + XIPIF_V123B_IISR_OFFSET);
- /* Enable the global IPIF interrupt */
- xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
- xspi->regs + XIPIF_V123B_DGIER_OFFSET);
- reinit_completion(&xspi->done);
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
+ u32 cr;
+
+ xqspi->tx_ptr = transfer->tx_buf;
+ xqspi->rx_ptr = transfer->rx_buf;
+
+ if (transfer->dummy) {
+ xqspi->bytes_to_transfer = (transfer->len - (transfer->dummy/8))
+ + ((transfer->dummy/8) *
+ xqspi->rx_bus_width);
+ xqspi->bytes_to_receive = (transfer->len - (transfer->dummy/8))
+ + ((transfer->dummy/8) *
+ xqspi->rx_bus_width);
+ } else {
+ xqspi->bytes_to_transfer = transfer->len;
+ xqspi->bytes_to_receive = transfer->len;
}
- while (remaining_words) {
- int n_words, tx_words, rx_words;
- u32 sr;
- int stalled;
+ xspi_setup_transfer(qspi, transfer);
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ /* Enable master transaction inhibit */
+ cr |= XSPI_CR_TRANS_INHIBIT;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+ xqspi->tx_fifo(xqspi);
+ /* Disable master transaction inhibit */
+ cr &= ~XSPI_CR_TRANS_INHIBIT;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+ xqspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ xqspi->regs + XIPIF_V123B_DGIER_OFFSET);
+
+ return transfer->len;
+}
- n_words = min(remaining_words, xspi->buffer_size);
+/**
+ * xspi_prepare_transfer_hardware - Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int xspi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
- tx_words = n_words;
- while (tx_words--)
- xilinx_spi_tx(xspi);
+ u32 cr;
+ int ret;
- /* Start the transfer by not inhibiting the transmitter any
- * longer
- */
+ ret = pm_runtime_get_sync(xqspi->dev);
+ if (ret < 0)
+ return ret;
- if (use_irq) {
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
- wait_for_completion(&xspi->done);
- /* A transmit has just completed. Process received data
- * and check for more data to transmit. Always inhibit
- * the transmitter while the Isr refills the transmit
- * register/FIFO, or make sure it is stopped if we're
- * done.
- */
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
- sr = XSPI_SR_TX_EMPTY_MASK;
- } else
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-
- /* Read out all the data from the Rx FIFO */
- rx_words = n_words;
- stalled = 10;
- while (rx_words) {
- if (rx_words == n_words && !(stalled--) &&
- !(sr & XSPI_SR_TX_EMPTY_MASK) &&
- (sr & XSPI_SR_RX_EMPTY_MASK)) {
- dev_err(&spi->dev,
- "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
- xspi_init_hw(xspi);
- return -EIO;
- }
-
- if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
- xilinx_spi_rx(xspi);
- rx_words--;
- continue;
- }
-
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- if (!(sr & XSPI_SR_RX_EMPTY_MASK)) {
- xilinx_spi_rx(xspi);
- rx_words--;
- }
- }
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ cr |= XSPI_CR_ENABLE;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+
+ return 0;
+}
+
+/**
+ * xspi_unprepare_transfer_hardware - Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller.
+ *
+ * Return: Always 0
+ */
+static int xspi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
+ u32 cr;
+
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ cr &= ~XSPI_CR_ENABLE;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+
+ pm_runtime_put_sync(xqspi->dev);
+
+ return 0;
+}
+
+/**
+ * xilinx_spi_runtime_resume - Runtime resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function enables the clocks
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused xilinx_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ int ret;
- remaining_words -= n_words;
+ ret = clk_enable(xspi->axi_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable AXI clock\n");
+ return ret;
}
- if (use_irq) {
- xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ ret = clk_enable(xspi->axi4_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable AXI4 clock\n");
+ goto clk_disable_axi_clk;
}
- return t->len;
+ ret = clk_enable(xspi->spi_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable SPI clock\n");
+ goto clk_disable_axi4_clk;
+ }
+
+ return 0;
+
+clk_disable_axi4_clk:
+ clk_disable(xspi->axi4_clk);
+clk_disable_axi_clk:
+ clk_disable(xspi->axi_clk);
+
+ return ret;
}
+/**
+ * xilinx_spi_runtime_suspend - Runtime suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the clocks
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused xilinx_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+
+ clk_disable(xspi->axi_clk);
+ clk_disable(xspi->axi4_clk);
+ clk_disable(xspi->spi_clk);
+
+ return 0;
+}
-/* This driver supports single master mode only. Hence Tx FIFO Empty
- * is the only interrupt we care about.
- * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
- * Fault are not to happen.
+/**
+ * xilinx_spi_resume - Resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * The function starts the SPI driver queue and initializes the SPI
+ * controller
+ *
+ * Return: 0 on success; error value otherwise
*/
-static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+static int __maybe_unused xilinx_spi_resume(struct device *dev)
{
- struct xilinx_spi *xspi = dev_id;
- u32 ipif_isr;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ int ret = 0;
- /* Get the IPIF interrupts, and clear them immediately */
- ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
- xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ if (!pm_runtime_suspended(dev)) {
+ ret = xilinx_spi_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+ }
- if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
- complete(&xspi->done);
- return IRQ_HANDLED;
+ ret = spi_master_resume(master);
+ if (ret < 0) {
+ clk_disable(xspi->axi_clk);
+ clk_disable(xspi->axi4_clk);
+ clk_disable(xspi->spi_clk);
}
- return IRQ_NONE;
+ return ret;
}
-static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
+
+/**
+ * xilinx_spi_suspend - Suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function stops the SPI driver queue and disables the SPI controller
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused xilinx_spi_suspend(struct device *dev)
{
- u8 sr;
- int n_words = 0;
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret = 0;
- /*
- * Before the buffer_size detection we reset the core
- * to make sure we start with a clean state.
- */
- xspi->write_fn(XIPIF_V123B_RESET_MASK,
- xspi->regs + XIPIF_V123B_RESETR_OFFSET);
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ xilinx_spi_runtime_suspend(dev);
- /* Fill the Tx FIFO with as many words as possible */
- do {
- xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- n_words++;
- } while (!(sr & XSPI_SR_TX_FULL_MASK));
+ xspi_unprepare_transfer_hardware(master);
- return n_words;
+ return ret;
}
-static const struct of_device_id xilinx_spi_of_match[] = {
- { .compatible = "xlnx,axi-quad-spi-1.00.a", },
- { .compatible = "xlnx,xps-spi-2.00.a", },
- { .compatible = "xlnx,xps-spi-2.00.b", },
- {}
+static const struct dev_pm_ops xilinx_spi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(xilinx_spi_runtime_suspend,
+ xilinx_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_spi_suspend, xilinx_spi_resume)
};
-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+/**
+ * xilinx_spi_probe - Probe method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success; error value otherwise
+ */
static int xilinx_spi_probe(struct platform_device *pdev)
{
struct xilinx_spi *xspi;
- struct xspi_platform_data *pdata;
struct resource *res;
int ret, num_cs = 0, bits_per_word = 8;
+ u32 cs_num;
struct spi_master *master;
- u32 tmp;
- u8 i;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata) {
- num_cs = pdata->num_chipselect;
- bits_per_word = pdata->bits_per_word;
- } else {
- of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
- &num_cs);
- }
+ struct device_node *nc;
+ u32 tmp, rx_bus_width, fifo_size;
+ bool startup_block;
- if (!num_cs) {
- dev_err(&pdev->dev,
- "Missing slave select configuration data\n");
- return -EINVAL;
- }
+ of_property_read_u32(pdev->dev.of_node, "num-cs",
+ &num_cs);
+ if (!num_cs)
+ num_cs = 1;
if (num_cs > XILINX_SPI_MAX_CS) {
dev_err(&pdev->dev, "Invalid number of spi slaves\n");
return -EINVAL;
}
+ startup_block = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,startup-block");
+
master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
return -ENODEV;
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
- SPI_CS_HIGH;
-
xspi = spi_master_get_devdata(master);
- xspi->cs_inactive = 0xffffffff;
- xspi->bitbang.master = master;
- xspi->bitbang.chipselect = xilinx_spi_chipselect;
- xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
- xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
- init_completion(&xspi->done);
-
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, master);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xspi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xspi->regs)) {
@@ -439,20 +715,104 @@ static int xilinx_spi_probe(struct platform_device *pdev)
goto put_master;
}
- master->bus_num = pdev->id;
- master->num_chipselect = num_cs;
- master->dev.of_node = pdev->dev.of_node;
+ ret = of_property_read_u32(pdev->dev.of_node, "fifo-size",
+ &fifo_size);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Missing fifo size\n");
+ return -EINVAL;
+ }
+ of_property_read_u32(pdev->dev.of_node, "bits-per-word",
+ &bits_per_word);
+
+ xspi->rx_bus_width = XSPI_ONE_BITS_PER_WORD;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ if (startup_block) {
+ ret = of_property_read_u32(nc, "reg",
+ &cs_num);
+ if (ret < 0)
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(nc, "spi-rx-bus-width",
+ &rx_bus_width);
+ if (!ret) {
+ xspi->rx_bus_width = rx_bus_width;
+ break;
+ }
+ }
+
+ xspi->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
+ if (IS_ERR(xspi->axi_clk)) {
+ if (PTR_ERR(xspi->axi_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->axi_clk);
+ goto put_master;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->axi_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare AXI clock\n");
+ goto put_master;
+ }
+
+ xspi->axi4_clk = devm_clk_get(&pdev->dev, "axi4_clk");
+ if (IS_ERR(xspi->axi4_clk)) {
+ if (PTR_ERR(xspi->axi4_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->axi4_clk);
+ goto clk_unprepare_axi_clk;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->axi4_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->axi4_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare AXI4 clock\n");
+ goto clk_unprepare_axi_clk;
+ }
+
+ xspi->spi_clk = devm_clk_get(&pdev->dev, "spi_clk");
+ if (IS_ERR(xspi->spi_clk)) {
+ if (PTR_ERR(xspi->spi_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->spi_clk);
+ goto clk_unprepare_axi4_clk;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->spi_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->spi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare SPI clock\n");
+ goto clk_unprepare_axi4_clk;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto clk_unprepare_all;
+
+ xspi->dev = &pdev->dev;
- /*
- * Detect endianess on the IP via loop bit in CR. Detection
- * must be done before reset is sent because incorrect reset
- * value generates error interrupt.
- * Setup little endian helper functions first and try to use them
- * and check if bit was correctly setup or not.
- */
xspi->read_fn = xspi_read32;
xspi->write_fn = xspi_write32;
-
+ /* Detect endianness on the IP via loop bit in CR register*/
xspi->write_fn(XSPI_CR_LOOP, xspi->regs + XSPI_CR_OFFSET);
tmp = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
tmp &= XSPI_CR_LOOP;
@@ -461,62 +821,112 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->write_fn = xspi_write32_be;
}
- master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
- xspi->bytes_per_word = bits_per_word / 8;
- xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
-
+ xspi->buffer_size = fifo_size;
xspi->irq = platform_get_irq(pdev, 0);
if (xspi->irq < 0 && xspi->irq != -ENXIO) {
ret = xspi->irq;
- goto put_master;
+ goto clk_unprepare_all;
} else if (xspi->irq >= 0) {
/* Register for SPI Interrupt */
- ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
- dev_name(&pdev->dev), xspi);
+ ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq,
+ 0, dev_name(&pdev->dev), master);
if (ret)
- goto put_master;
+ goto clk_unprepare_all;
}
/* SPI controller initializations */
xspi_init_hw(xspi);
- ret = spi_bitbang_start(&xspi->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
- goto put_master;
+ pm_runtime_put(&pdev->dev);
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+ master->setup = xspi_setup;
+ master->set_cs = xspi_chipselect;
+ master->transfer_one = xspi_start_transfer;
+ master->prepare_transfer_hardware = xspi_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware = xspi_unprepare_transfer_hardware;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ xspi->bytes_per_word = bits_per_word / 8;
+ xspi->tx_fifo = xspi_fill_tx_fifo_8;
+ xspi->rx_fifo = xspi_read_rx_fifo_8;
+ if (xspi->rx_bus_width == XSPI_RX_ONE_WIRE) {
+ if (xspi->bytes_per_word == XSPI_TWO_BITS_PER_WORD) {
+ xspi->tx_fifo = xspi_fill_tx_fifo_16;
+ xspi->rx_fifo = xspi_read_rx_fifo_16;
+ } else if (xspi->bytes_per_word == XSPI_FOUR_BITS_PER_WORD) {
+ xspi->tx_fifo = xspi_fill_tx_fifo_32;
+ xspi->rx_fifo = xspi_read_rx_fifo_32;
+ }
+ } else if (xspi->rx_bus_width == XSPI_RX_FOUR_WIRE) {
+ master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
+ } else {
+ dev_err(&pdev->dev, "Dual Mode not supported\n");
+ goto clk_unprepare_all;
}
+ xspi->cs_inactive = 0xffffffff;
- dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
- (unsigned long long)res->start, xspi->regs, xspi->irq);
+ /*
+ * This is the work around for the startup block issue in
+ * the spi controller. SPI clock is passing through STARTUP
+ * block to FLASH. STARTUP block don't provide clock as soon
+ * as QSPI provides command. So first command fails.
+ */
+ if (startup_block)
+ xilinx_spi_startup_block(xspi, cs_num);
- if (pdata) {
- for (i = 0; i < pdata->num_devices; i++)
- spi_new_device(master, pdata->devices + i);
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_unprepare_all;
}
- platform_set_drvdata(pdev, master);
- return 0;
+ return ret;
+clk_unprepare_all:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_unprepare(xspi->spi_clk);
+clk_unprepare_axi4_clk:
+ clk_unprepare(xspi->axi4_clk);
+clk_unprepare_axi_clk:
+ clk_unprepare(xspi->axi_clk);
put_master:
spi_master_put(master);
return ret;
}
+/**
+ * xilinx_spi_remove - Remove method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 Always
+ */
static int xilinx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct xilinx_spi *xspi = spi_master_get_devdata(master);
void __iomem *regs_base = xspi->regs;
- spi_bitbang_stop(&xspi->bitbang);
-
/* Disable all the interrupts just in case */
xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
- spi_master_put(xspi->bitbang.master);
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(xspi->axi_clk);
+ clk_disable_unprepare(xspi->axi4_clk);
+ clk_disable_unprepare(xspi->spi_clk);
+
+ spi_unregister_master(master);
return 0;
}
@@ -524,12 +934,21 @@ static int xilinx_spi_remove(struct platform_device *pdev)
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.b", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+
static struct platform_driver xilinx_spi_driver = {
.probe = xilinx_spi_probe,
.remove = xilinx_spi_remove,
.driver = {
.name = XILINX_SPI_NAME,
.of_match_table = xilinx_spi_of_match,
+ .pm = &xilinx_spi_dev_pm_ops,
},
};
module_platform_driver(xilinx_spi_driver);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index b3588240eb39..31f825151a20 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -1,8 +1,12 @@
-// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2019 Xilinx, Inc.
+ * Xilinx Zynq Quad-SPI (QSPI) controller driver (master mode only)
*
- * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ * Copyright (C) 2009 - 2014 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <linux/clk.h>
@@ -16,7 +20,9 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
-#include <linux/spi/spi-mem.h>
+
+/* Name of this driver */
+#define DRIVER_NAME "zynq-qspi"
/* Register offset definitions */
#define ZYNQ_QSPI_CONFIG_OFFSET 0x00 /* Configuration Register, RW */
@@ -44,16 +50,16 @@
* This register contains various control bits that effect the operation
* of the QSPI controller
*/
-#define ZYNQ_QSPI_CONFIG_IFMODE_MASK BIT(31) /* Flash Memory Interface */
-#define ZYNQ_QSPI_CONFIG_MANSRT_MASK BIT(16) /* Manual TX Start */
-#define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK BIT(15) /* Enable Manual TX Mode */
-#define ZYNQ_QSPI_CONFIG_SSFORCE_MASK BIT(14) /* Manual Chip Select */
-#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3) /* Baud Rate Mask */
-#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2) /* Clock Phase Control */
-#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1) /* Clock Polarity Control */
-#define ZYNQ_QSPI_CONFIG_SSCTRL_MASK BIT(10) /* Slave Select Mask */
-#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6) /* FIFO width */
-#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
+#define ZYNQ_QSPI_CONFIG_IFMODE_MASK 0x80000000 /* Flash Memory Interface */
+#define ZYNQ_QSPI_CONFIG_MANSRT_MASK 0x00010000 /* Manual TX Start */
+#define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK 0x00008000 /* Enable Manual TX Mode */
+#define ZYNQ_QSPI_CONFIG_SSFORCE_MASK 0x00004000 /* Manual Chip Select */
+#define ZYNQ_QSPI_CONFIG_BDRATE_MASK 0x00000038 /* Baud Rate Divisor Mask */
+#define ZYNQ_QSPI_CONFIG_CPHA_MASK 0x00000004 /* Clock Phase Control */
+#define ZYNQ_QSPI_CONFIG_CPOL_MASK 0x00000002 /* Clock Polarity Control */
+#define ZYNQ_QSPI_CONFIG_SSCTRL_MASK 0x00000400 /* Slave Select Mask */
+#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK 0x000000C0 /* FIFO width */
+#define ZYNQ_QSPI_CONFIG_MSTREN_MASK 0x00000001 /* Master Mode */
/*
* QSPI Configuration Register - Baud rate and slave select
@@ -61,7 +67,7 @@
* These are the values used in the calculation of baud rate divisor and
* setting the slave select.
*/
-#define ZYNQ_QSPI_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
+#define ZYNQ_QSPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
#define ZYNQ_QSPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
#define ZYNQ_QSPI_SS_SHIFT 10 /* Slave Select field shift in CR */
@@ -71,19 +77,10 @@
* All the four interrupt registers (Status/Mask/Enable/Disable) have the same
* bit definitions.
*/
-#define ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK BIT(0) /* QSPI RX FIFO Overflow */
-#define ZYNQ_QSPI_IXR_TXNFULL_MASK BIT(2) /* QSPI TX FIFO Overflow */
-#define ZYNQ_QSPI_IXR_TXFULL_MASK BIT(3) /* QSPI TX FIFO is full */
-#define ZYNQ_QSPI_IXR_RXNEMTY_MASK BIT(4) /* QSPI RX FIFO Not Empty */
-#define ZYNQ_QSPI_IXR_RXF_FULL_MASK BIT(5) /* QSPI RX FIFO is full */
-#define ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK BIT(6) /* QSPI TX FIFO Underflow */
-#define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK | \
- ZYNQ_QSPI_IXR_TXNFULL_MASK | \
- ZYNQ_QSPI_IXR_TXFULL_MASK | \
- ZYNQ_QSPI_IXR_RXNEMTY_MASK | \
- ZYNQ_QSPI_IXR_RXF_FULL_MASK | \
- ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK)
-#define ZYNQ_QSPI_IXR_RXTX_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
+#define ZYNQ_QSPI_IXR_TXNFULL_MASK 0x00000004 /* QSPI TX FIFO Overflow */
+#define ZYNQ_QSPI_IXR_TXFULL_MASK 0x00000008 /* QSPI TX FIFO is full */
+#define ZYNQ_QSPI_IXR_RXNEMTY_MASK 0x00000010 /* QSPI RX FIFO Not Empty */
+#define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
ZYNQ_QSPI_IXR_RXNEMTY_MASK)
/*
@@ -91,7 +88,7 @@
*
* This register is used to enable or disable the QSPI controller
*/
-#define ZYNQ_QSPI_ENABLE_ENABLE_MASK BIT(0) /* QSPI Enable Bit Mask */
+#define ZYNQ_QSPI_ENABLE_ENABLE_MASK 0x00000001 /* QSPI Enable Bit Mask */
/*
* QSPI Linear Configuration Register
@@ -99,9 +96,9 @@
* It is named Linear Configuration but it controls other modes when not in
* linear mode also.
*/
-#define ZYNQ_QSPI_LCFG_TWO_MEM_MASK BIT(30) /* LQSPI Two memories Mask */
-#define ZYNQ_QSPI_LCFG_SEP_BUS_MASK BIT(29) /* LQSPI Separate bus Mask */
-#define ZYNQ_QSPI_LCFG_U_PAGE_MASK BIT(28) /* LQSPI Upper Page Mask */
+#define ZYNQ_QSPI_LCFG_TWO_MEM_MASK 0x40000000 /* LQSPI Two memories Mask */
+#define ZYNQ_QSPI_LCFG_SEP_BUS_MASK 0x20000000 /* LQSPI Separate bus Mask */
+#define ZYNQ_QSPI_LCFG_U_PAGE_MASK 0x10000000 /* LQSPI Upper Page Mask */
#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
@@ -114,7 +111,7 @@
* The modebits configurable by the driver to make the SPI support different
* data formats
*/
-#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
/* Default number of chip selects */
#define ZYNQ_QSPI_DEFAULT_NUM_CS 1
@@ -127,21 +124,23 @@
* @irq: IRQ number
* @txbuf: Pointer to the TX buffer
* @rxbuf: Pointer to the RX buffer
- * @tx_bytes: Number of bytes left to transfer
- * @rx_bytes: Number of bytes left to receive
- * @data_completion: completion structure
+ * @bytes_to_transfer: Number of bytes left to transfer
+ * @bytes_to_receive: Number of bytes left to receive
+ * @is_dual: Flag to indicate whether dual flash memories are used
+ * @is_instr: Flag to indicate if transfer contains an instruction
+ * (Used in dual parallel configuration)
*/
struct zynq_qspi {
- struct device *dev;
void __iomem *regs;
struct clk *refclk;
struct clk *pclk;
int irq;
- u8 *txbuf;
- u8 *rxbuf;
- int tx_bytes;
- int rx_bytes;
- struct completion data_completion;
+ const void *txbuf;
+ void *rxbuf;
+ int bytes_to_transfer;
+ int bytes_to_receive;
+ u32 is_dual;
+ u8 is_instr;
};
/*
@@ -166,7 +165,7 @@ static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
* reset are
* - Master mode
* - Baud rate divisor is set to 2
- * - Tx threshold set to 1l Rx threshold set to 32
+ * - Tx thresold set to 1l Rx threshold set to 32
* - Flash memory interface mode enabled
* - Size of the word to be transferred as 8 bit
* This function performs the following actions
@@ -183,7 +182,7 @@ static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
u32 config_reg;
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, 0x7F);
/* Disable linear mode as the boot loader may have used it */
zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, 0);
@@ -193,7 +192,7 @@ static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
ZYNQ_QSPI_IXR_RXNEMTY_MASK)
zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, 0x7F);
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
config_reg &= ~(ZYNQ_QSPI_CONFIG_MSTREN_MASK |
ZYNQ_QSPI_CONFIG_CPOL_MASK |
@@ -213,56 +212,70 @@ static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
ZYNQ_QSPI_TX_THRESHOLD);
+ if (xqspi->is_dual)
+ /* Enable two memories on separate buses */
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET,
+ (ZYNQ_QSPI_LCFG_TWO_MEM_MASK |
+ ZYNQ_QSPI_LCFG_SEP_BUS_MASK |
+ (1 << ZYNQ_QSPI_LCFG_DUMMY_SHIFT) |
+ ZYNQ_QSPI_FAST_READ_QOUT_CODE));
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ /* Enable two memories on shared bus */
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET,
+ (ZYNQ_QSPI_LCFG_TWO_MEM_MASK |
+ (1 << ZYNQ_QSPI_LCFG_DUMMY_SHIFT) |
+ ZYNQ_QSPI_FAST_READ_QOUT_CODE));
+#endif
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
}
-static bool zynq_qspi_supports_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
-{
- if (!spi_mem_default_supports_op(mem, op))
- return false;
-
- /*
- * The number of address bytes should be equal to or less than 3 bytes.
- */
- if (op->addr.nbytes > 3)
- return false;
-
- return true;
-}
-
/**
- * zynq_qspi_rxfifo_op - Read 1..4 bytes from RxFIFO to RX buffer
+ * zynq_qspi_read_rx_fifo - Read 1..4 bytes from RxFIFO to RX buffer
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be read (1..4)
+ *
+ * Note: In case of dual parallel connection, even number of bytes are read
+ * when odd bytes are requested to avoid transfer of a nibble to each flash.
+ * The receive buffer though, is populated with the number of bytes requested.
*/
-static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
+static void zynq_qspi_read_rx_fifo(struct zynq_qspi *xqspi, unsigned int size)
{
+ unsigned int xsize;
u32 data;
data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
if (xqspi->rxbuf) {
- memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
+ xsize = size;
+ if (xqspi->is_dual && !xqspi->is_instr && (size % 2))
+ xsize++;
+ memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - xsize, size);
xqspi->rxbuf += size;
}
- xqspi->rx_bytes -= size;
- if (xqspi->rx_bytes < 0)
- xqspi->rx_bytes = 0;
+ xqspi->bytes_to_receive -= size;
+ if (xqspi->bytes_to_receive < 0)
+ xqspi->bytes_to_receive = 0;
}
/**
- * zynq_qspi_txfifo_op - Write 1..4 bytes from TX buffer to TxFIFO
+ * zynq_qspi_write_tx_fifo - Write 1..4 bytes from TX buffer to TxFIFO
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be written (1..4)
+ *
+ * In dual parallel configuration, when read/write data operations
+ * are performed, odd data bytes have to be converted to even to
+ * avoid a nibble (of data when programming / dummy when reading)
+ * going to individual flash devices, where a byte is expected.
+ * This check is only for data and will not apply for commands.
*/
-static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
+static void zynq_qspi_write_tx_fifo(struct zynq_qspi *xqspi, unsigned int size)
{
static const unsigned int offset[4] = {
ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
+ unsigned int xsize;
u32 data;
if (xqspi->txbuf) {
@@ -273,39 +286,108 @@ static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
data = 0;
}
- xqspi->tx_bytes -= size;
- zynq_qspi_write(xqspi, offset[size - 1], data);
+ xqspi->bytes_to_transfer -= size;
+
+ xsize = size;
+ if (xqspi->is_dual && !xqspi->is_instr && (size % 2))
+ xsize++;
+ zynq_qspi_write(xqspi, offset[xsize - 1], data);
+}
+
+/**
+ * zynq_prepare_transfer_hardware - Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: Always 0
+ */
+static int zynq_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
+
+ clk_enable(xqspi->refclk);
+ clk_enable(xqspi->pclk);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * zynq_unprepare_transfer_hardware - Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller.
+ *
+ * Return: Always 0
+ */
+static int zynq_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
+ clk_disable(xqspi->refclk);
+ clk_disable(xqspi->pclk);
+
+ return 0;
}
/**
* zynq_qspi_chipselect - Select or deselect the chip select line
- * @spi: Pointer to the spi_device structure
- * @assert: 1 for select or 0 for deselect the chip select line
+ * @qspi: Pointer to the spi_device structure
+ * @is_high: Select(0) or deselect (1) the chip select line
*/
-static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
+static void zynq_qspi_chipselect(struct spi_device *qspi, bool is_high)
{
- struct spi_controller *ctrl = spi->master;
- struct zynq_qspi *xqspi = spi_controller_get_devdata(ctrl);
+ struct zynq_qspi *xqspi = spi_master_get_devdata(qspi->master);
u32 config_reg;
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ u32 lqspi_cfg_reg;
+#endif
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
- if (assert) {
+
+ /* Select upper/lower page before asserting CS */
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ lqspi_cfg_reg = zynq_qspi_read(xqspi,
+ ZYNQ_QSPI_LINEAR_CFG_OFFSET);
+ if (qspi->master->flags & SPI_MASTER_U_PAGE)
+ lqspi_cfg_reg |= ZYNQ_QSPI_LCFG_U_PAGE_MASK;
+ else
+ lqspi_cfg_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE_MASK;
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET,
+ lqspi_cfg_reg);
+#endif
+
+ if (is_high) {
+ /* Deselect the slave */
+ config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
+ } else {
/* Select the slave */
config_reg &= ~ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
- config_reg |= (((~(BIT(spi->chip_select))) <<
- ZYNQ_QSPI_SS_SHIFT) &
- ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
- } else {
- config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
+ if (gpio_is_valid(qspi->cs_gpio)) {
+ config_reg |= (((~(BIT(0))) <<
+ ZYNQ_QSPI_SS_SHIFT) &
+ ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
+ } else {
+ config_reg |= (((~(BIT(qspi->chip_select))) <<
+ ZYNQ_QSPI_SS_SHIFT) &
+ ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
+ }
+ xqspi->is_instr = 1;
}
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
}
/**
- * zynq_qspi_config_op - Configure QSPI controller for specified transfer
- * @xqspi: Pointer to the zynq_qspi structure
+ * zynq_qspi_setup_transfer - Configure QSPI controller for specified transfer
* @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides information
+ * about next transfer setup parameters
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -319,22 +401,21 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
* controller the driver will set the highest or lowest frequency supported by
* controller.
*/
-static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
+static int zynq_qspi_setup_transfer(struct spi_device *qspi,
+ struct spi_transfer *transfer)
{
- u32 config_reg, baud_rate_val = 0;
-
- /*
- * Set the clock frequency
- * The baud rate divisor is not a direct mapping to the value written
- * into the configuration register (config_reg[5:3])
- * i.e. 000 - divide by 2
- * 001 - divide by 4
- * ----------------
- * 111 - divide by 256
- */
+ struct zynq_qspi *xqspi = spi_master_get_devdata(qspi->master);
+ u32 config_reg, req_hz, baud_rate_val = 0;
+
+ if (transfer)
+ req_hz = transfer->speed_hz;
+ else
+ req_hz = qspi->max_speed_hz;
+
+ /* Set the clock frequency */
+ /* If req_hz == 0, default to lowest speed */
while ((baud_rate_val < ZYNQ_QSPI_BAUD_DIV_MAX) &&
- (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
- spi->max_speed_hz)
+ (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) > req_hz)
baud_rate_val++;
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
@@ -342,13 +423,14 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
/* Set the QSPI clock phase and clock polarity */
config_reg &= (~ZYNQ_QSPI_CONFIG_CPHA_MASK) &
(~ZYNQ_QSPI_CONFIG_CPOL_MASK);
- if (spi->mode & SPI_CPHA)
+ if (qspi->mode & SPI_CPHA)
config_reg |= ZYNQ_QSPI_CONFIG_CPHA_MASK;
- if (spi->mode & SPI_CPOL)
+ if (qspi->mode & SPI_CPOL)
config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
config_reg |= (baud_rate_val << ZYNQ_QSPI_BAUD_DIV_SHIFT);
+
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
return 0;
@@ -356,49 +438,54 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
/**
* zynq_qspi_setup - Configure the QSPI controller
- * @spi: Pointer to the spi_device structure
+ * @qspi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer, baud
* rate and divisor value to setup the requested qspi clock.
*
* Return: 0 on success and error value on failure
*/
-static int zynq_qspi_setup_op(struct spi_device *spi)
+static int zynq_qspi_setup(struct spi_device *qspi)
{
- struct spi_controller *ctrl = spi->master;
- struct zynq_qspi *qspi = spi_controller_get_devdata(ctrl);
+ struct device *dev = &qspi->master->dev;
+ int ret;
+
+ if (gpio_is_valid(qspi->cs_gpio)) {
+ ret = devm_gpio_request(dev, qspi->cs_gpio, dev_name(dev));
+ if (ret) {
+ dev_err(dev, "Invalid cs_gpio\n");
+ return ret;
+ }
- if (ctrl->busy)
- return -EBUSY;
+ gpio_direction_output(qspi->cs_gpio,
+ !(qspi->mode & SPI_CS_HIGH));
+ }
- clk_enable(qspi->refclk);
- clk_enable(qspi->pclk);
- zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
- ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+ if (qspi->master->busy)
+ return -EBUSY;
- return 0;
+ return zynq_qspi_setup_transfer(qspi, NULL);
}
/**
- * zynq_qspi_write_op - Fills the TX FIFO with as many bytes as possible
+ * zynq_qspi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
* @xqspi: Pointer to the zynq_qspi structure
* @txcount: Maximum number of words to write
* @txempty: Indicates that TxFIFO is empty
*/
-static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
- bool txempty)
+static void zynq_qspi_fill_tx_fifo(struct zynq_qspi *xqspi, int txcount,
+ bool txempty)
{
int count, len, k;
- len = xqspi->tx_bytes;
+ len = xqspi->bytes_to_transfer;
if (len && len < 4) {
/*
* We must empty the TxFIFO between accesses to TXD0,
* TXD1, TXD2, TXD3.
*/
if (txempty)
- zynq_qspi_txfifo_op(xqspi, len);
-
+ zynq_qspi_write_tx_fifo(xqspi, len);
return;
}
@@ -407,44 +494,44 @@ static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
count = txcount;
if (xqspi->txbuf) {
- iowrite32_rep(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
- xqspi->txbuf, count);
+ writesl(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
+ xqspi->txbuf, count);
xqspi->txbuf += count * 4;
} else {
for (k = 0; k < count; k++)
writel_relaxed(0, xqspi->regs +
ZYNQ_QSPI_TXD_00_00_OFFSET);
}
-
- xqspi->tx_bytes -= count * 4;
+ xqspi->bytes_to_transfer -= count * 4;
}
/**
- * zynq_qspi_read_op - Drains the RX FIFO by as many bytes as possible
+ * zynq_qspi_drain_rx_fifo - Drains the RX FIFO by as many bytes as possible
* @xqspi: Pointer to the zynq_qspi structure
* @rxcount: Maximum number of words to read
*/
-static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
+static void zynq_qspi_drain_rx_fifo(struct zynq_qspi *xqspi, int rxcount)
{
int count, len, k;
- len = xqspi->rx_bytes - xqspi->tx_bytes;
+ len = xqspi->bytes_to_receive - xqspi->bytes_to_transfer;
count = len / 4;
if (count > rxcount)
count = rxcount;
+
if (xqspi->rxbuf) {
- ioread32_rep(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
- xqspi->rxbuf, count);
+ readsl(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
+ xqspi->rxbuf, count);
xqspi->rxbuf += count * 4;
} else {
for (k = 0; k < count; k++)
readl_relaxed(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET);
}
- xqspi->rx_bytes -= count * 4;
+ xqspi->bytes_to_receive -= count * 4;
len -= count * 4;
if (len && len < 4 && count < rxcount)
- zynq_qspi_rxfifo_op(xqspi, len);
+ zynq_qspi_read_rx_fifo(xqspi, len);
}
/**
@@ -460,9 +547,10 @@ static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
*/
static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
{
+ struct spi_master *master = dev_id;
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
u32 intr_status;
bool txempty;
- struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
@@ -475,22 +563,25 @@ static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
* so this bit indicates Tx FIFO is empty.
*/
txempty = !!(intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK);
+
/* Read out the data from the RX FIFO */
- zynq_qspi_read_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
- if (xqspi->tx_bytes) {
+ zynq_qspi_drain_rx_fifo(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
+
+ if (xqspi->bytes_to_transfer) {
/* There is more data to send */
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
- txempty);
+ zynq_qspi_fill_tx_fifo(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
+ txempty);
} else {
/*
* If transfer and receive is completed then only send
* complete signal.
*/
- if (!xqspi->rx_bytes) {
+ if (!xqspi->bytes_to_receive) {
zynq_qspi_write(xqspi,
ZYNQ_QSPI_IDIS_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- complete(&xqspi->data_completion);
+ ZYNQ_QSPI_IXR_ALL_MASK);
+ spi_finalize_current_transfer(master);
+ xqspi->is_instr = 0;
}
}
return IRQ_HANDLED;
@@ -500,113 +591,100 @@ static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
}
/**
- * zynq_qspi_exec_mem_op() - Initiates the QSPI transfer
- * @mem: the SPI memory
- * @op: the memory operation to execute
+ * zynq_qspi_start_transfer - Initiates the QSPI transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provide information
+ * about next transfer parameters
*
- * Executes a memory operation.
+ * This function fills the TX FIFO, starts the QSPI transfer, and waits for the
+ * transfer to be completed.
*
- * This function first selects the chip and starts the memory operation.
+ * Return: Number of bytes transferred in the last transfer
+ */
+static int zynq_qspi_start_transfer(struct spi_master *master,
+ struct spi_device *qspi,
+ struct spi_transfer *transfer)
+{
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
+
+ xqspi->txbuf = transfer->tx_buf;
+ xqspi->rxbuf = transfer->rx_buf;
+ xqspi->bytes_to_transfer = transfer->len;
+ xqspi->bytes_to_receive = transfer->len;
+
+ if (!transfer->stripe)
+ xqspi->is_instr = true;
+ else
+ xqspi->is_instr = false;
+ zynq_qspi_setup_transfer(qspi, transfer);
+
+ zynq_qspi_fill_tx_fifo(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_ALL_MASK);
+
+ return transfer->len;
+}
+
+/**
+ * zynq_qspi_suspend - Suspend method for the QSPI driver
+ * @_dev: Address of the platform_device structure
*
- * Return: 0 in case of success, a negative error code otherwise.
+ * This function stops the QSPI driver queue and disables the QSPI controller
+ *
+ * Return: Always 0
*/
-static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
+static int __maybe_unused zynq_qspi_suspend(struct device *_dev)
{
- struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
- int err = 0, i;
- u8 *tmpbuf;
-
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
- zynq_qspi_chipselect(mem->spi, true);
- zynq_qspi_config_op(xqspi, mem->spi);
-
- if (op->cmd.opcode) {
- reinit_completion(&xqspi->data_completion);
- xqspi->txbuf = (u8 *)&op->cmd.opcode;
- xqspi->rxbuf = NULL;
- xqspi->tx_bytes = sizeof(op->cmd.opcode);
- xqspi->rx_bytes = sizeof(op->cmd.opcode);
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- if (!wait_for_completion_timeout(&xqspi->data_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
- }
+ struct platform_device *pdev = container_of(_dev,
+ struct platform_device, dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
- if (op->addr.nbytes) {
- for (i = 0; i < op->addr.nbytes; i++) {
- xqspi->txbuf[i] = op->addr.val >>
- (8 * (op->addr.nbytes - i - 1));
- }
+ spi_master_suspend(master);
- reinit_completion(&xqspi->data_completion);
- xqspi->rxbuf = NULL;
- xqspi->tx_bytes = op->addr.nbytes;
- xqspi->rx_bytes = op->addr.nbytes;
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- if (!wait_for_completion_timeout(&xqspi->data_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
- }
+ zynq_unprepare_transfer_hardware(master);
- if (op->dummy.nbytes) {
- tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
-
- memset(tmpbuf, 0xff, op->dummy.nbytes);
- reinit_completion(&xqspi->data_completion);
- xqspi->txbuf = tmpbuf;
- xqspi->rxbuf = NULL;
- xqspi->tx_bytes = op->dummy.nbytes;
- xqspi->rx_bytes = op->dummy.nbytes;
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- if (!wait_for_completion_timeout(&xqspi->data_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
-
- kfree(tmpbuf);
- }
+ return 0;
+}
- if (op->data.nbytes) {
- reinit_completion(&xqspi->data_completion);
- if (op->data.dir == SPI_MEM_DATA_OUT) {
- xqspi->txbuf = (u8 *)op->data.buf.out;
- xqspi->tx_bytes = op->data.nbytes;
- xqspi->rxbuf = NULL;
- xqspi->rx_bytes = op->data.nbytes;
- } else {
- xqspi->txbuf = NULL;
- xqspi->rxbuf = (u8 *)op->data.buf.in;
- xqspi->rx_bytes = op->data.nbytes;
- xqspi->tx_bytes = op->data.nbytes;
- }
+/**
+ * zynq_qspi_resume - Resume method for the QSPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * The function starts the QSPI driver queue and initializes the QSPI controller
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused zynq_qspi_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
+ int ret = 0;
+
+ ret = clk_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable APB clock.\n");
+ return ret;
+ }
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- if (!wait_for_completion_timeout(&xqspi->data_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
+ ret = clk_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable device clock.\n");
+ clk_disable(xqspi->pclk);
+ return ret;
}
- zynq_qspi_chipselect(mem->spi, false);
- return err;
+ spi_master_resume(master);
+
+ return 0;
}
-static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
- .supports_op = zynq_qspi_supports_op,
- .exec_op = zynq_qspi_exec_mem_op,
-};
+static SIMPLE_DEV_PM_OPS(zynq_qspi_dev_pm_ops, zynq_qspi_suspend,
+ zynq_qspi_resume);
/**
* zynq_qspi_probe - Probe method for the QSPI driver
@@ -619,25 +697,32 @@ static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
static int zynq_qspi_probe(struct platform_device *pdev)
{
int ret = 0;
- struct spi_controller *ctlr;
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
+ struct spi_master *master;
struct zynq_qspi *xqspi;
+ struct resource *res;
u32 num_cs;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
- if (!ctlr)
+ master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ if (!master)
return -ENOMEM;
- xqspi = spi_controller_get_devdata(ctlr);
- xqspi->dev = dev;
- platform_set_drvdata(pdev, xqspi);
- xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ xqspi = spi_master_get_devdata(master);
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
goto remove_master;
}
+ if (of_property_read_u32(pdev->dev.of_node, "is-dual",
+ &xqspi->is_dual)) {
+ dev_warn(&pdev->dev, "couldn't determine configuration info");
+ dev_warn(&pdev->dev, "about dual memories. defaulting to single memory\n");
+ }
+
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(&pdev->dev, "pclk clock not found.\n");
@@ -645,8 +730,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
goto remove_master;
}
- init_completion(&xqspi->data_completion);
-
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xqspi->refclk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
@@ -672,30 +755,37 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
+ dev_err(&pdev->dev, "irq resource not found\n");
goto remove_master;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
- 0, pdev->name, xqspi);
+ 0, pdev->name, master);
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
goto remove_master;
}
- ret = of_property_read_u32(np, "num-cs",
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs",
&num_cs);
if (ret < 0)
- ctlr->num_chipselect = ZYNQ_QSPI_DEFAULT_NUM_CS;
+ master->num_chipselect = ZYNQ_QSPI_DEFAULT_NUM_CS;
else
- ctlr->num_chipselect = num_cs;
-
- ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ master->num_chipselect = num_cs;
+
+ master->setup = zynq_qspi_setup;
+ master->set_cs = zynq_qspi_chipselect;
+ master->transfer_one = zynq_qspi_start_transfer;
+ master->prepare_transfer_hardware = zynq_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware = zynq_unprepare_transfer_hardware;
+ master->flags = SPI_MASTER_QUAD_MODE | SPI_MASTER_GPIO_SS;
+
+ master->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
- ctlr->mem_ops = &zynq_qspi_mem_ops;
- ctlr->setup = zynq_qspi_setup_op;
- ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
- ctlr->dev.of_node = np;
- ret = devm_spi_register_controller(&pdev->dev, ctlr);
+
+ ret = spi_register_master(master);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
goto clk_dis_all;
@@ -708,8 +798,7 @@ clk_dis_all:
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
remove_master:
- spi_controller_put(ctlr);
-
+ spi_master_put(master);
return ret;
}
@@ -725,13 +814,16 @@ remove_master:
*/
static int zynq_qspi_remove(struct platform_device *pdev)
{
- struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
+ spi_unregister_master(master);
+
return 0;
}
@@ -739,7 +831,6 @@ static const struct of_device_id zynq_qspi_of_match[] = {
{ .compatible = "xlnx,zynq-qspi-1.0", },
{ /* end of table */ }
};
-
MODULE_DEVICE_TABLE(of, zynq_qspi_of_match);
/*
@@ -749,8 +840,9 @@ static struct platform_driver zynq_qspi_driver = {
.probe = zynq_qspi_probe,
.remove = zynq_qspi_remove,
.driver = {
- .name = "zynq-qspi",
+ .name = DRIVER_NAME,
.of_match_table = zynq_qspi_of_match,
+ .pm = &zynq_qspi_dev_pm_ops,
},
};
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 7412a3042a8d..25266cd3f52b 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -33,6 +34,7 @@
#define GQSPI_RXD_OFST 0x00000120
#define GQSPI_TX_THRESHOLD_OFST 0x00000128
#define GQSPI_RX_THRESHOLD_OFST 0x0000012C
+#define IOU_TAPDLY_BYPASS_OFST 0x0000003C
#define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138
#define GQSPI_GEN_FIFO_OFST 0x00000140
#define GQSPI_SEL_OFST 0x00000144
@@ -47,6 +49,7 @@
#define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820
#define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800
#define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828
+#define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
/* GQSPI register bit masks */
#define GQSPI_SEL_MASK 0x00000001
@@ -132,12 +135,45 @@
#define GQSPI_SELECT_MODE_QUADSPI 0x4
#define GQSPI_DMA_UNALIGN 0x3
#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
+#define GQSPI_RX_BUS_WIDTH_QUAD 0x4
+#define GQSPI_RX_BUS_WIDTH_DUAL 0x2
+#define GQSPI_RX_BUS_WIDTH_SINGLE 0x1
+#define GQSPI_TX_BUS_WIDTH_QUAD 0x4
+#define GQSPI_TX_BUS_WIDTH_DUAL 0x2
+#define GQSPI_TX_BUS_WIDTH_SINGLE 0x1
+#define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5
+#define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2
+#define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3
+#define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3
+#define GQSPI_USE_DATA_DLY 0x1
+#define GQSPI_USE_DATA_DLY_SHIFT 31
+#define GQSPI_DATA_DLY_ADJ_VALUE 0x2
+#define GQSPI_DATA_DLY_ADJ_SHIFT 28
+#define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
+#define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2
+
+/* set to differentiate versal from zynqmp, 1=versal, 0=zynqmp */
+#define QSPI_QUIRK_HAS_TAPDELAY BIT(0)
+
+#define GQSPI_FREQ_37_5MHZ 37500000
+#define GQSPI_FREQ_40MHZ 40000000
+#define GQSPI_FREQ_100MHZ 100000000
+#define GQSPI_FREQ_150MHZ 150000000
+#define IOU_TAPDLY_BYPASS_MASK 0x7
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
static const struct zynqmp_eemi_ops *eemi_ops;
/**
+ * struct zynq_platform_data - zynqmp qspi platform data structure
+ * @quirks: Flags is used to identify the platform
+ */
+struct qspi_platform_data {
+ u32 quirks;
+};
+
+/**
* struct zynqmp_qspi - Defines qspi driver instance
* @regs: Virtual address of the QSPI controller registers
* @refclk: Pointer to the peripheral clock
@@ -152,8 +188,14 @@ static const struct zynqmp_eemi_ops *eemi_ops;
* @genfifobus: Used to select the upper or lower bus
* @dma_rx_bytes: Remaining bytes to receive by DMA mode
* @dma_addr: DMA address after mapping the kernel buffer
+ * @tx_bus_width: Used to represent number of data wires for tx
+ * @rx_bus_width: Used to represent number of data wires
* @genfifoentry: Used for storing the genfifoentry instruction.
+ * @isinstr: To determine whether the transfer is instruction
* @mode: Defines the mode in which QSPI is operating
+ * @speed_hz: Current SPI bus clock speed in hz
+ * @io_mode: Defines the operating mode, either IO or dma
+ * @has_tapdelay: Used for tapdelay register available in qspi
*/
struct zynqmp_qspi {
void __iomem *regs;
@@ -169,14 +211,22 @@ struct zynqmp_qspi {
u32 genfifobus;
u32 dma_rx_bytes;
dma_addr_t dma_addr;
+ u32 rx_bus_width;
+ u32 tx_bus_width;
u32 genfifoentry;
+ bool isinstr;
enum mode_type mode;
+ u32 speed_hz;
+ bool io_mode;
+ bool has_tapdelay;
};
/**
- * zynqmp_gqspi_read: For GQSPI controller read operation
+ * zynqmp_gqspi_read - For GQSPI controller read operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset from where to read
+ *
+ * Return: Value read from the qspi register
*/
static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
{
@@ -184,7 +234,7 @@ static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
}
/**
- * zynqmp_gqspi_write: For GQSPI controller write operation
+ * zynqmp_gqspi_write - For GQSPI controller write operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset where to write
* @val: Value to be written
@@ -196,10 +246,10 @@ static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
}
/**
- * zynqmp_gqspi_selectslave: For selection of slave device
+ * zynqmp_gqspi_selectslave - For selection of slave device
* @instanceptr: Pointer to the zynqmp_qspi structure
- * @flashcs: For chip select
- * @flashbus: To check which bus is selected- upper or lower
+ * @slavecs: For chip select
+ * @slavebus: To check which bus is selected- upper or lower
*/
static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
u8 slavecs, u8 slavebus)
@@ -243,7 +293,76 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
}
/**
- * zynqmp_qspi_init_hw: Initialize the hardware
+ * zynqmp_qspi_set_tapdelay - To configure qspi tap delays
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @baudrateval: Buadrate to configure
+ */
+static void zynqmp_qspi_set_tapdelay(struct zynqmp_qspi *xqspi, u32 baudrateval)
+{
+ u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
+ u32 reqhz = 0;
+
+ if (!eemi_ops->ioctl)
+ return;
+
+ clk_rate = clk_get_rate(xqspi->refclk);
+ reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
+
+ if (!xqspi->has_tapdelay) {
+ if (reqhz <= GQSPI_FREQ_40MHZ) {
+ eemi_ops->ioctl(NODE_QSPI, IOCTL_SET_TAPDELAY_BYPASS,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_BYPASS_ENABLE,
+ NULL);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ eemi_ops->ioctl(NODE_QSPI, IOCTL_SET_TAPDELAY_BYPASS,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_BYPASS_ENABLE,
+ NULL);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ datadlyadj |= ((GQSPI_USE_DATA_DLY <<
+ GQSPI_USE_DATA_DLY_SHIFT)
+ | (GQSPI_DATA_DLY_ADJ_VALUE <<
+ GQSPI_DATA_DLY_ADJ_SHIFT));
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj |= GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK;
+ }
+ } else {
+ if (reqhz <= GQSPI_FREQ_37_5MHZ) {
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ datadlyadj |= (GQSPI_USE_DATA_DLY <<
+ GQSPI_USE_DATA_DLY_SHIFT);
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK
+ | (GQSPI_LPBK_DLY_ADJ_DLY_1 <<
+ GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT));
+ }
+ zynqmp_gqspi_write(xqspi,
+ IOU_TAPDLY_BYPASS_OFST, tapdlybypass);
+ }
+
+ zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST, lpbkdlyadj);
+ zynqmp_gqspi_write(xqspi, GQSPI_DATA_DLY_ADJ_OFST, datadlyadj);
+}
+
+static u32 zynqmp_disable_intr(struct zynqmp_qspi *xqspi)
+{
+ u32 value;
+
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ value = zynqmp_gqspi_read(xqspi, GQSPI_IMASK_OFST);
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+
+ return value;
+}
+
+/**
+ * zynqmp_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynqmp_qspi structure
*
* The default settings of the QSPI controller's configurable parameters on
@@ -267,9 +386,7 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
/* Select the GQSPI mode */
zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK);
/* Clear and disable interrupts */
- zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST,
- zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST) |
- GQSPI_ISR_WR_TO_CLR_MASK);
+ zynqmp_disable_intr(xqspi);
/* Clear the DMA STS */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
zynqmp_gqspi_read(xqspi,
@@ -321,17 +438,18 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
zynqmp_gqspi_selectslave(xqspi,
GQSPI_SELECT_FLASH_CS_LOWER,
GQSPI_SELECT_FLASH_BUS_LOWER);
- /* Initialize DMA */
- zynqmp_gqspi_write(xqspi,
+ if (!xqspi->io_mode) {
+ /* Initialize DMA */
+ zynqmp_gqspi_write(xqspi,
GQSPI_QSPIDMA_DST_CTRL_OFST,
GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
-
+ }
/* Enable the GQSPI */
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
}
/**
- * zynqmp_qspi_copy_read_data: Copy data to RX buffer
+ * zynqmp_qspi_copy_read_data - Copy data to RX buffer
* @xqspi: Pointer to the zynqmp_qspi structure
* @data: The variable where data is stored
* @size: Number of bytes to be copied from data to RX buffer
@@ -345,7 +463,7 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
}
/**
- * zynqmp_prepare_transfer_hardware: Prepares hardware for transfer.
+ * zynqmp_prepare_transfer_hardware - Prepares hardware for transfer.
* @master: Pointer to the spi_master structure which provides
* information about the controller.
*
@@ -362,7 +480,7 @@ static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
}
/**
- * zynqmp_unprepare_transfer_hardware: Relaxes hardware after transfer
+ * zynqmp_unprepare_transfer_hardware - Relaxes hardware after transfer
* @master: Pointer to the spi_master structure which provides
* information about the controller.
*
@@ -379,7 +497,7 @@ static int zynqmp_unprepare_transfer_hardware(struct spi_master *master)
}
/**
- * zynqmp_qspi_chipselect: Select or deselect the chip select line
+ * zynqmp_qspi_chipselect - Select or deselect the chip select line
* @qspi: Pointer to the spi_device structure
* @is_high: Select(0) or deselect (1) the chip select line
*/
@@ -390,11 +508,27 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
u32 genfifoentry = 0x0, statusreg;
genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
+
+ if (qspi->master->flags & SPI_MASTER_BOTH_CS) {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_BOTH,
+ GQSPI_SELECT_FLASH_BUS_BOTH);
+ } else if (qspi->master->flags & SPI_MASTER_U_PAGE) {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_UPPER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
+ } else {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_LOWER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
+ }
+
genfifoentry |= xqspi->genfifobus;
if (!is_high) {
genfifoentry |= xqspi->genfifocs;
genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
+ xqspi->isinstr = true;
} else {
genfifoentry |= GQSPI_GENFIFO_CS_HOLD;
}
@@ -415,8 +549,7 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) &&
(statusreg & GQSPI_ISR_TXEMPTY_MASK))
break;
- else
- cpu_relax();
+ cpu_relax();
} while (!time_after_eq(jiffies, timeout));
if (time_after_eq(jiffies, timeout))
@@ -424,7 +557,7 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
}
/**
- * zynqmp_qspi_setup_transfer: Configure QSPI controller for specified
+ * zynqmp_qspi_setup_transfer - Configure QSPI controller for specified
* transfer
* @qspi: Pointer to the spi_device structure
* @transfer: Pointer to the spi_transfer structure which provides
@@ -457,33 +590,39 @@ static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
else
req_hz = qspi->max_speed_hz;
- /* Set the clock frequency */
- /* If req_hz == 0, default to lowest speed */
- clk_rate = clk_get_rate(xqspi->refclk);
+ if (xqspi->speed_hz != req_hz) {
+ /* Set the clock frequency */
+ /* If req_hz == 0, default to lowest speed */
+ clk_rate = clk_get_rate(xqspi->refclk);
- while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
- (clk_rate /
- (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
- baud_rate_val++;
+ while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
+ (clk_rate /
+ (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
+ baud_rate_val++;
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- /* Set the QSPI clock phase and clock polarity */
- config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) & (~GQSPI_CFG_CLK_POL_MASK);
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) &
+ (~GQSPI_CFG_CLK_POL_MASK);
- if (qspi->mode & SPI_CPHA)
- config_reg |= GQSPI_CFG_CLK_PHA_MASK;
- if (qspi->mode & SPI_CPOL)
- config_reg |= GQSPI_CFG_CLK_POL_MASK;
+ if (qspi->mode & SPI_CPHA)
+ config_reg |= GQSPI_CFG_CLK_PHA_MASK;
+ if (qspi->mode & SPI_CPOL)
+ config_reg |= GQSPI_CFG_CLK_POL_MASK;
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->speed_hz = clk_rate / (GQSPI_BAUD_DIV_SHIFT <<
+ baud_rate_val);
+ zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
+ }
- config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
- config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
return 0;
}
/**
- * zynqmp_qspi_setup: Configure the QSPI controller
+ * zynqmp_qspi_setup - Configure the QSPI controller
* @qspi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer,
@@ -499,7 +638,7 @@ static int zynqmp_qspi_setup(struct spi_device *qspi)
}
/**
- * zynqmp_qspi_filltxfifo: Fills the TX FIFO as long as there is room in
+ * zynqmp_qspi_filltxfifo - Fills the TX FIFO as long as there is room in
* the FIFO or the bytes required to be
* transmitted.
* @xqspi: Pointer to the zynqmp_qspi structure
@@ -525,7 +664,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
}
/**
- * zynqmp_qspi_readrxfifo: Fills the RX FIFO as long as there is room in
+ * zynqmp_qspi_readrxfifo - Fills the RX FIFO as long as there is room in
* the FIFO.
* @xqspi: Pointer to the zynqmp_qspi structure
* @size: Number of bytes to be copied from RX buffer to RX FIFO
@@ -538,7 +677,7 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
while ((count < size) && (xqspi->bytes_to_receive > 0)) {
if (xqspi->bytes_to_receive >= 4) {
(*(u32 *) xqspi->rxbuf) =
- zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
+ zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
xqspi->rxbuf += 4;
xqspi->bytes_to_receive -= 4;
count += 4;
@@ -553,7 +692,40 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
}
/**
- * zynqmp_process_dma_irq: Handler for DMA done interrupt of QSPI
+ * zynqmp_qspi_preparedummy - Prepares the dummy entry
+ *
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @transfer: It is a pointer to the structure containing transfer data.
+ * @genfifoentry: genfifoentry is pointer to the variable in which
+ * GENFIFO mask is returned to calling function
+ */
+static void zynqmp_qspi_preparedummy(struct zynqmp_qspi *xqspi,
+ struct spi_transfer *transfer,
+ u32 *genfifoentry)
+{
+ /* For dummy Tx and Rx are NULL */
+ *genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
+
+ /* SPI mode */
+ *genfifoentry &= ~GQSPI_GENFIFO_MODE_QUADSPI;
+ if (xqspi->rx_bus_width == GQSPI_RX_BUS_WIDTH_QUAD ||
+ xqspi->tx_bus_width == GQSPI_TX_BUS_WIDTH_QUAD)
+ *genfifoentry |= GQSPI_GENFIFO_MODE_QUADSPI;
+ else if (xqspi->rx_bus_width == GQSPI_RX_BUS_WIDTH_DUAL ||
+ xqspi->tx_bus_width == GQSPI_TX_BUS_WIDTH_DUAL)
+ *genfifoentry |= GQSPI_GENFIFO_MODE_DUALSPI;
+ else
+ *genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
+
+ /* Immediate data */
+ *genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+
+ if (transfer->dummy)
+ *genfifoentry |= transfer->dummy;
+}
+
+/**
+ * zynqmp_process_dma_irq - Handler for DMA done interrupt of QSPI
* controller
* @xqspi: zynqmp_qspi instance pointer
*
@@ -601,7 +773,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
}
/**
- * zynqmp_qspi_irq: Interrupt service routine of the QSPI controller
+ * zynqmp_qspi_irq - Interrupt service routine of the QSPI controller
* @irq: IRQ number
* @dev_id: Pointer to the xqspi structure
*
@@ -639,23 +811,29 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
zynqmp_process_dma_irq(xqspi);
ret = IRQ_HANDLED;
- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ } else if ((mask & GQSPI_IER_RXNEMPTY_MASK)) {
+ zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
+ ret = IRQ_HANDLED;
+ }
+ if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
ret = IRQ_HANDLED;
}
if ((xqspi->bytes_to_receive == 0) && (xqspi->bytes_to_transfer == 0)
&& ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
- zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ zynqmp_disable_intr(xqspi);
+ xqspi->isinstr = false;
spi_finalize_current_transfer(master);
ret = IRQ_HANDLED;
}
+
return ret;
}
/**
- * zynqmp_qspi_selectspimode: Selects SPI mode - x1 or x2 or x4.
+ * zynqmp_qspi_selectspimode - Selects SPI mode - x1 or x2 or x4.
* @xqspi: xqspi is a pointer to the GQSPI instance
* @spimode: spimode - SPI or DUAL or QUAD.
* Return: Mask to set desired SPI mode in GENFIFO entry.
@@ -683,7 +861,7 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
}
/**
- * zynq_qspi_setuprxdma: This function sets up the RX DMA operation
+ * zynq_qspi_setuprxdma - This function sets up the RX DMA operation
* @xqspi: xqspi is a pointer to the GQSPI instance.
*/
static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
@@ -692,8 +870,9 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
- if ((xqspi->bytes_to_receive < 8) ||
- ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
+ if (((xqspi->bytes_to_receive < 8) || (xqspi->io_mode)) ||
+ ((dma_align & GQSPI_DMA_UNALIGN) != 0x0) ||
+ is_vmalloc_addr(xqspi->rxbuf)) {
/* Setting to IO mode */
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
@@ -708,8 +887,10 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
rx_bytes, DMA_FROM_DEVICE);
- if (dma_mapping_error(xqspi->dev, addr))
+ if (dma_mapping_error(xqspi->dev, addr)) {
dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
+ return;
+ }
xqspi->dma_rx_bytes = rx_bytes;
xqspi->dma_addr = addr;
@@ -733,7 +914,7 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
}
/**
- * zynqmp_qspi_txrxsetup: This function checks the TX/RX buffers in
+ * zynqmp_qspi_txrxsetup - This function checks the TX/RX buffers in
* the transfer and sets up the GENFIFO entries,
* TX FIFO as required.
* @xqspi: xqspi is a pointer to the GQSPI instance.
@@ -755,7 +936,7 @@ static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
*genfifoentry |= GQSPI_GENFIFO_TX;
*genfifoentry |=
zynqmp_qspi_selectspimode(xqspi, transfer->tx_nbits);
- xqspi->bytes_to_transfer = transfer->len;
+ xqspi->bytes_to_transfer = transfer->len - (transfer->dummy/8);
if (xqspi->mode == GQSPI_MODE_DMA) {
config_reg = zynqmp_gqspi_read(xqspi,
GQSPI_CONFIG_OFST);
@@ -784,7 +965,7 @@ static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
}
/**
- * zynqmp_qspi_start_transfer: Initiates the QSPI transfer
+ * zynqmp_qspi_start_transfer - Initiates the QSPI transfer
* @master: Pointer to the spi_master structure which provides
* information about the controller.
* @qspi: Pointer to the spi_device structure
@@ -811,18 +992,28 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
genfifoentry |= xqspi->genfifocs;
genfifoentry |= xqspi->genfifobus;
+ if (!xqspi->isinstr && (master->flags & SPI_MASTER_DATA_STRIPE)) {
+ if (transfer->stripe)
+ genfifoentry |= GQSPI_GENFIFO_STRIPE;
+ }
zynqmp_qspi_txrxsetup(xqspi, transfer, &genfifoentry);
if (xqspi->mode == GQSPI_MODE_DMA)
transfer_len = xqspi->dma_rx_bytes;
else
- transfer_len = transfer->len;
+ transfer_len = transfer->len - (transfer->dummy/8);
xqspi->genfifoentry = genfifoentry;
if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
genfifoentry |= transfer_len;
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+ if (transfer->dummy || transfer->tx_nbits >= 1) {
+ zynqmp_qspi_preparedummy(xqspi, transfer,
+ &genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry);
+ }
} else {
int tempcount = transfer_len;
u32 exponent = 8; /* 2^8 = 256 */
@@ -849,6 +1040,12 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
if (imm_data != 0) {
genfifoentry &= ~GQSPI_GENFIFO_EXP;
genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ if (imm_data % 4 != 0) {
+ if (((imm_data + 4 - (imm_data % 4)) & 0xFF) == 0x00)
+ imm_data = 0xFF;
+ else
+ imm_data = imm_data + 4 - (imm_data % 4);
+ }
genfifoentry |= (u8) (imm_data & 0xFF);
zynqmp_gqspi_write(xqspi,
GQSPI_GEN_FIFO_OFST, genfifoentry);
@@ -869,7 +1066,6 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
if (xqspi->txbuf != NULL)
/* Enable interrupts for TX */
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
- GQSPI_IER_TXEMPTY_MASK |
GQSPI_IER_GENFIFOEMPTY_MASK |
GQSPI_IER_TXNOT_FULL_MASK);
@@ -883,8 +1079,7 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
} else {
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
GQSPI_IER_GENFIFOEMPTY_MASK |
- GQSPI_IER_RXNEMPTY_MASK |
- GQSPI_IER_RXEMPTY_MASK);
+ GQSPI_IER_RXNEMPTY_MASK);
}
}
@@ -892,8 +1087,8 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
}
/**
- * zynqmp_qspi_suspend: Suspend method for the QSPI driver
- * @_dev: Address of the platform_device structure
+ * zynqmp_qspi_suspend - Suspend method for the QSPI driver
+ * @dev: Address of the platform_device structure
*
* This function stops the QSPI driver queue and disables the QSPI controller
*
@@ -911,7 +1106,7 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
}
/**
- * zynqmp_qspi_resume: Resume method for the QSPI driver
+ * zynqmp_qspi_resume - Resume method for the QSPI driver
* @dev: Address of the platform_device structure
*
* The function starts the QSPI driver queue and initializes the QSPI
@@ -938,10 +1133,12 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
return ret;
}
+ zynqmp_qspi_init_hw(xqspi);
spi_master_resume(master);
clk_disable(xqspi->refclk);
clk_disable(xqspi->pclk);
+
return 0;
}
@@ -994,14 +1191,38 @@ static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
return 0;
}
+static int __maybe_unused zynqmp_runtime_idle(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ u32 value;
+
+ value = zynqmp_gqspi_read(xqspi, GQSPI_EN_OFST);
+ if (value)
+ return -EBUSY;
+
+ return 0;
+}
+
static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(zynqmp_runtime_suspend,
- zynqmp_runtime_resume, NULL)
+ zynqmp_runtime_resume, zynqmp_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(zynqmp_qspi_suspend, zynqmp_qspi_resume)
};
+static const struct qspi_platform_data versal_qspi_def = {
+ .quirks = QSPI_QUIRK_HAS_TAPDELAY,
+};
+
+static const struct of_device_id zynqmp_qspi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-qspi-1.0"},
+ { .compatible = "xlnx,versal-qspi-1.0", .data = &versal_qspi_def },
+ { /* End of table */ }
+};
+
/**
- * zynqmp_qspi_probe: Probe method for the QSPI driver
+ * zynqmp_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function initializes the driver data structures and the hardware.
@@ -1014,6 +1235,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct zynqmp_qspi *xqspi;
struct device *dev = &pdev->dev;
+ struct device_node *nc;
+ const struct of_device_id *match;
+ u32 num_cs;
+ u32 rx_bus_width;
+ u32 tx_bus_width;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
@@ -1027,6 +1253,14 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, master);
+ match = of_match_node(zynqmp_qspi_of_match, pdev->dev.of_node);
+ if (match) {
+ const struct qspi_platform_data *p_data = match->data;
+
+ if (p_data && (p_data->quirks & QSPI_QUIRK_HAS_TAPDELAY))
+ xqspi->has_tapdelay = true;
+ }
+
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
@@ -1064,11 +1298,16 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+
+ if (of_property_read_bool(pdev->dev.of_node, "has-io-mode"))
+ xqspi->io_mode = true;
+
/* QSPI controller initializations */
zynqmp_qspi_init_hw(xqspi);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
+
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
@@ -1082,8 +1321,37 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
- master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ xqspi->rx_bus_width = GQSPI_RX_BUS_WIDTH_SINGLE;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ ret = of_property_read_u32(nc, "spi-rx-bus-width",
+ &rx_bus_width);
+ if (!ret) {
+ xqspi->rx_bus_width = rx_bus_width;
+ break;
+ }
+ }
+ if (ret)
+ dev_err(dev, "rx bus width not found\n");
+
+ xqspi->tx_bus_width = GQSPI_TX_BUS_WIDTH_SINGLE;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ ret = of_property_read_u32(nc, "spi-tx-bus-width",
+ &tx_bus_width);
+ if (!ret) {
+ xqspi->tx_bus_width = tx_bus_width;
+ break;
+ }
+ }
+ if (ret)
+ dev_err(dev, "tx bus width not found\n");
+
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ if (ret < 0)
+ master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ else
+ master->num_chipselect = num_cs;
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
master->setup = zynqmp_qspi_setup;
master->set_cs = zynqmp_qspi_chipselect;
master->transfer_one = zynqmp_qspi_start_transfer;
@@ -1094,6 +1362,8 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
+ xqspi->speed_hz = master->max_speed_hz;
+ master->auto_runtime_pm = true;
if (master->dev.parent == NULL)
master->dev.parent = &master->dev;
@@ -1117,7 +1387,7 @@ remove_master:
}
/**
- * zynqmp_qspi_remove: Remove method for the QSPI driver
+ * zynqmp_qspi_remove - Remove method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function is called if a device is physically removed from the system or
@@ -1142,11 +1412,6 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id zynqmp_qspi_of_match[] = {
- { .compatible = "xlnx,zynqmp-qspi-1.0", },
- { /* End of table */ }
-};
-
MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
static struct platform_driver zynqmp_qspi_driver = {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b18ae50db1f5..464c11aba781 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1817,6 +1817,10 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
}
spi->max_speed_hz = value;
+ /* Multi die flash */
+ if (of_property_read_bool(nc, "multi-die"))
+ spi->multi_die = true;
+
return 0;
}
@@ -3249,6 +3253,14 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
}
}
+ /*
+ * Data stripe option is selected if and only if when
+ * two chips are enabled
+ */
+ if ((ctlr->flags & SPI_MASTER_DATA_STRIPE)
+ && !(ctlr->flags & SPI_MASTER_BOTH_CS))
+ return -EINVAL;
+
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index be503a0e6ef7..6d6fc7de9cf3 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -373,12 +373,23 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
/* read requests */
case SPI_IOC_RD_MODE:
- retval = put_user(spi->mode & SPI_MODE_MASK,
- (__u8 __user *)arg);
- break;
case SPI_IOC_RD_MODE32:
- retval = put_user(spi->mode & SPI_MODE_MASK,
- (__u32 __user *)arg);
+ tmp = spi->mode;
+
+ {
+ struct spi_controller *ctlr = spi->controller;
+
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ tmp &= ~SPI_CS_HIGH;
+ }
+
+ if (cmd == SPI_IOC_RD_MODE)
+ retval = put_user(tmp & SPI_MODE_MASK,
+ (__u8 __user *)arg);
+ else
+ retval = put_user(tmp & SPI_MODE_MASK,
+ (__u32 __user *)arg);
break;
case SPI_IOC_RD_LSB_FIRST:
retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
@@ -577,7 +588,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
if (!spidev->tx_buffer) {
spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->tx_buffer) {
- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
goto err_find_dev;
}
@@ -586,7 +596,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
if (!spidev->rx_buffer) {
spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->rx_buffer) {
- dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
goto err_alloc_rx_buf;
}
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index bbbd311eda03..e6de2aeece8d 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -887,7 +887,8 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
* version 5, there is more than one APID mapped to each PPID.
* The owner field for each of these mappings specifies the EE which is
* allowed to write to the APID. The owner of the last (highest) APID
- * for a given PPID will receive interrupts from the PPID.
+ * which has the IRQ owner bit set for a given PPID will receive
+ * interrupts from the PPID.
*/
for (i = 0; ; i++, apidd++) {
offset = pmic_arb->ver_ops->apid_map_offset(i);
@@ -910,16 +911,16 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
prev_apidd = &pmic_arb->apid_data[apid];
- if (valid && is_irq_ee &&
- prev_apidd->write_ee == pmic_arb->ee) {
+ if (!valid || apidd->write_ee == pmic_arb->ee) {
+ /* First PPID mapping or one for this EE */
+ pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
+ } else if (valid && is_irq_ee &&
+ prev_apidd->write_ee == pmic_arb->ee) {
/*
* Duplicate PPID mapping after the one for this EE;
* override the irq owner
*/
prev_apidd->irq_ee = apidd->irq_ee;
- } else if (!valid || is_irq_ee) {
- /* First PPID mapping or duplicate for another EE */
- pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
}
apidd->ppid = ppid;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index c16b60f645a4..8ca7e004a53d 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -348,7 +348,8 @@ static int spmi_drv_remove(struct device *dev)
const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
pm_runtime_get_sync(dev);
- sdrv->remove(to_spmi_device(dev));
+ if (sdrv->remove)
+ sdrv->remove(to_spmi_device(dev));
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 3861cb659cb9..6c647ba4ba0b 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -119,7 +119,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
- u32 uninitialized_var(tmp);
+ u32 tmp;
if (cc->dev->id.revision < 6) {
if (bus->bustype == SSB_BUSTYPE_SSB ||
@@ -149,7 +149,7 @@ static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc)
/* Get maximum or minimum (depending on get_max flag) slowclock frequency. */
static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max)
{
- int uninitialized_var(limit);
+ int limit;
enum ssb_clksrc clocksrc;
int divisor = 1;
u32 tmp;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 6f1fa4c849a1..614359bc16ab 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -78,6 +78,8 @@ source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/unisys/Kconfig"
+source "drivers/staging/apf/Kconfig"
+
source "drivers/staging/clocking-wizard/Kconfig"
source "drivers/staging/fbtft/Kconfig"
@@ -96,6 +98,8 @@ source "drivers/staging/vc04_services/Kconfig"
source "drivers/staging/pi433/Kconfig"
+source "drivers/staging/fclk/Kconfig"
+
source "drivers/staging/mt7621-pci/Kconfig"
source "drivers/staging/mt7621-pci-phy/Kconfig"
@@ -125,4 +129,16 @@ source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"
+source "drivers/staging/xlnx_ctrl_driver/Kconfig"
+
+source "drivers/staging/xlnx_ernic/Kconfig"
+
+source "drivers/staging/xroeframer/Kconfig"
+
+source "drivers/staging/xroetrafficgen/Kconfig"
+
+source "drivers/staging/xlnxsync/Kconfig"
+
+source "drivers/staging/xlnx_tsmux/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a90f9b308c8d..e3b3163a91a0 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,7 +29,9 @@ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_UNISYSSPAR) += unisys/
+obj-$(CONFIG_XILINX_APF) += apf/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
+obj-$(CONFIG_XILINX_FCLK) += fclk/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
obj-$(CONFIG_WILC1000) += wilc1000/
@@ -53,3 +55,8 @@ obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
+obj-y += xlnx_ctrl_driver/
+obj-$(CONFIG_ERNIC) += xlnx_ernic/
+obj-$(CONFIG_XROE_FRAMER) += xroeframer/
+obj-$(CONFIG_XLNX_SYNC) += xlnxsync/
+obj-$(CONFIG_XLNX_TSMUX) += xlnx_tsmux/
diff --git a/drivers/staging/apf/Kconfig b/drivers/staging/apf/Kconfig
new file mode 100644
index 000000000000..40d11d02e92d
--- /dev/null
+++ b/drivers/staging/apf/Kconfig
@@ -0,0 +1,20 @@
+#
+# APF driver configuration
+#
+
+menuconfig XILINX_APF
+ tristate "Xilinx APF Accelerator driver"
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ default n
+ select UIO
+ select DMA_SHARED_BUFFER
+ help
+ Select if you want to include APF accelerator driver
+
+config XILINX_DMA_APF
+ bool "Xilinx APF DMA engines support"
+ depends on XILINX_APF
+ select DMA_ENGINE
+ select DMADEVICES
+ help
+ Enable support for the Xilinx APF DMA controllers.
diff --git a/drivers/staging/apf/Makefile b/drivers/staging/apf/Makefile
new file mode 100644
index 000000000000..bf281a2c16df
--- /dev/null
+++ b/drivers/staging/apf/Makefile
@@ -0,0 +1,9 @@
+# gpio support: dedicated expander chips, etc
+
+ccflags-$(CONFIG_DEBUG_XILINX_APF) += -DDEBUG
+ccflags-$(CONFIG_XILINX_APF) += -Idrivers/dma
+
+obj-$(CONFIG_XILINX_APF) += xlnk.o
+obj-$(CONFIG_XILINX_APF) += xlnk-eng.o
+obj-$(CONFIG_XILINX_DMA_APF) += xilinx-dma-apf.o
+
diff --git a/drivers/staging/apf/dt-binding.txt b/drivers/staging/apf/dt-binding.txt
new file mode 100644
index 000000000000..fd73725fa589
--- /dev/null
+++ b/drivers/staging/apf/dt-binding.txt
@@ -0,0 +1,17 @@
+* Xilinx APF xlnk driver
+
+Required properties:
+- compatible: Should be "xlnx,xlnk"
+- clock-names: List of clock names
+- clocks: List of clock sources corresponding to the clock names
+
+The number of elements on the clock-names and clocks lists should be the same.
+If there are no controllable clocks, the xlnk node should be omitted from the
+devicetree.
+
+Example:
+ xlnk {
+ compatible = "xlnx,xlnk-1.0";
+ clock-names = "clk166", "clk150", "clk100", "clk200";
+ clocks = <&clkc 15>, <&clkc 16>, <&clkc 17>, <&clkc 18>;
+ };
diff --git a/drivers/staging/apf/xilinx-dma-apf.c b/drivers/staging/apf/xilinx-dma-apf.c
new file mode 100644
index 000000000000..55913130eafc
--- /dev/null
+++ b/drivers/staging/apf/xilinx-dma-apf.c
@@ -0,0 +1,1232 @@
+/*
+ * Xilinx AXI DMA Engine support
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * Description:
+ * This driver supports Xilinx AXI DMA engine:
+ * . Axi DMA engine, it does transfers between memory and device. It can be
+ * configured to have one channel or two channels. If configured as two
+ * channels, one is for transmit to device and another is for receive from
+ * device.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/pm.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/dma-buf.h>
+
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+
+#include "xilinx-dma-apf.h"
+
+#include "xlnk.h"
+
+static DEFINE_MUTEX(dma_list_mutex);
+static LIST_HEAD(dma_device_list);
+/* IO accessors */
+#define DMA_OUT_64(addr, val) (writeq(val, addr))
+#define DMA_OUT(addr, val) (iowrite32(val, addr))
+#define DMA_IN(addr) (ioread32(addr))
+
+#define GET_LOW(x) ((u32)((x) & 0xFFFFFFFF))
+#define GET_HI(x) ((u32)((x) / 0x100000000))
+
+static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt);
+/* Driver functions */
+static void xdma_clean_bd(struct xdma_desc_hw *bd)
+{
+ bd->src_addr = 0x0;
+ bd->control = 0x0;
+ bd->status = 0x0;
+ bd->app[0] = 0x0;
+ bd->app[1] = 0x0;
+ bd->app[2] = 0x0;
+ bd->app[3] = 0x0;
+ bd->app[4] = 0x0;
+ bd->dmahead = 0x0;
+ bd->sw_flag = 0x0;
+}
+
+static int dma_is_running(struct xdma_chan *chan)
+{
+ return !(DMA_IN(&chan->regs->sr) & XDMA_SR_HALTED_MASK) &&
+ (DMA_IN(&chan->regs->cr) & XDMA_CR_RUNSTOP_MASK);
+}
+
+static int dma_is_idle(struct xdma_chan *chan)
+{
+ return DMA_IN(&chan->regs->sr) & XDMA_SR_IDLE_MASK;
+}
+
+static void dma_halt(struct xdma_chan *chan)
+{
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) & ~XDMA_CR_RUNSTOP_MASK));
+}
+
+static void dma_start(struct xdma_chan *chan)
+{
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) | XDMA_CR_RUNSTOP_MASK));
+}
+
+static int dma_init(struct xdma_chan *chan)
+{
+ int loop = XDMA_RESET_LOOP;
+
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) | XDMA_CR_RESET_MASK));
+
+ /* Wait for the hardware to finish reset
+ */
+ while (loop) {
+ if (!(DMA_IN(&chan->regs->cr) & XDMA_CR_RESET_MASK))
+ break;
+
+ loop -= 1;
+ }
+
+ if (!loop)
+ return 1;
+
+ return 0;
+}
+
+static int xdma_alloc_chan_descriptors(struct xdma_chan *chan)
+{
+ int i;
+ u8 *ptr;
+
+ /*
+ * We need the descriptor to be aligned to 64bytes
+ * for meeting Xilinx DMA specification requirement.
+ */
+ ptr = (u8 *)dma_alloc_coherent(chan->dev,
+ (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT),
+ &chan->bd_phys_addr,
+ GFP_KERNEL);
+
+ if (!ptr) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ memset(ptr, 0, (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT));
+ chan->bd_cur = 0;
+ chan->bd_tail = 0;
+ chan->bd_used = 0;
+ chan->bd_chain_size = sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT;
+
+ /*
+ * Pre allocate all the channels.
+ */
+ for (i = 0; i < XDMA_MAX_BD_CNT; i++) {
+ chan->bds[i] = (struct xdma_desc_hw *)
+ (ptr + (sizeof(struct xdma_desc_hw) * i));
+ chan->bds[i]->next_desc = chan->bd_phys_addr +
+ (sizeof(struct xdma_desc_hw) *
+ ((i + 1) % XDMA_MAX_BD_CNT));
+ }
+
+ /* there is at least one descriptor free to be allocated */
+ return 0;
+}
+
+static void xdma_free_chan_resources(struct xdma_chan *chan)
+{
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+ dma_free_coherent(chan->dev, (sizeof(struct xdma_desc_hw) *
+ XDMA_MAX_BD_CNT), chan->bds[0], chan->bd_phys_addr);
+}
+
+static void xilinx_chan_desc_reinit(struct xdma_chan *chan)
+{
+ struct xdma_desc_hw *desc;
+ unsigned int start, end;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ start = 0;
+ end = XDMA_MAX_BD_CNT;
+
+ while (start < end) {
+ desc = chan->bds[start];
+ xdma_clean_bd(desc);
+ start++;
+ }
+ /* Re-initialize bd_cur and bd_tail values */
+ chan->bd_cur = 0;
+ chan->bd_tail = 0;
+ chan->bd_used = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void xilinx_chan_desc_cleanup(struct xdma_chan *chan)
+{
+ struct xdma_head *dmahead;
+ struct xdma_desc_hw *desc;
+ struct completion *cmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+#define XDMA_BD_STS_RXEOF_MASK 0x04000000
+ desc = chan->bds[chan->bd_cur];
+ while (desc->status & XDMA_BD_STS_ALL_MASK) {
+ if ((desc->status & XDMA_BD_STS_RXEOF_MASK) &&
+ !(desc->dmahead)) {
+ pr_info("ERROR: premature EOF on DMA\n");
+ dma_init(chan); /* reset the dma HW */
+ while (!(desc->dmahead)) {
+ xdma_clean_bd(desc);
+ chan->bd_used--;
+ chan->bd_cur++;
+ if (chan->bd_cur >= XDMA_MAX_BD_CNT)
+ chan->bd_cur = 0;
+ desc = chan->bds[chan->bd_cur];
+ }
+ }
+ if (desc->dmahead) {
+ if ((desc->sw_flag & XDMA_BD_SF_POLL_MODE_MASK))
+ if (!(desc->sw_flag & XDMA_BD_SF_SW_DONE_MASK))
+ break;
+
+ dmahead = (struct xdma_head *)desc->dmahead;
+ cmp = (struct completion *)&dmahead->cmp;
+ if (dmahead->nappwords_o)
+ memcpy(dmahead->appwords_o, desc->app,
+ dmahead->nappwords_o * sizeof(u32));
+
+ if (chan->poll_mode)
+ cmp->done = 1;
+ else
+ complete(cmp);
+ }
+ xdma_clean_bd(desc);
+ chan->bd_used--;
+ chan->bd_cur++;
+ if (chan->bd_cur >= XDMA_MAX_BD_CNT)
+ chan->bd_cur = 0;
+ desc = chan->bds[chan->bd_cur];
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void xdma_err_tasklet(unsigned long data)
+{
+ struct xdma_chan *chan = (struct xdma_chan *)data;
+
+ if (chan->err) {
+ /* If reset failed, need to hard reset
+ * Channel is no longer functional
+ */
+ if (!dma_init(chan))
+ chan->err = 0;
+ else
+ dev_err(chan->dev, "DMA channel reset failed, please reset system\n");
+ }
+
+ /* Barrier to assert descriptor init is reaches memory */
+ rmb();
+ xilinx_chan_desc_cleanup(chan);
+
+ xilinx_chan_desc_reinit(chan);
+}
+
+static void xdma_tasklet(unsigned long data)
+{
+ struct xdma_chan *chan = (struct xdma_chan *)data;
+
+ xilinx_chan_desc_cleanup(chan);
+}
+
+static void dump_cur_bd(struct xdma_chan *chan)
+{
+ u32 index;
+
+ index = (((u32)DMA_IN(&chan->regs->cdr)) - chan->bd_phys_addr) /
+ sizeof(struct xdma_desc_hw);
+
+ dev_err(chan->dev, "cur bd @ %08x\n", (u32)DMA_IN(&chan->regs->cdr));
+ dev_err(chan->dev, " buf = %p\n",
+ (void *)chan->bds[index]->src_addr);
+ dev_err(chan->dev, " ctrl = 0x%08x\n", chan->bds[index]->control);
+ dev_err(chan->dev, " sts = 0x%08x\n", chan->bds[index]->status);
+ dev_err(chan->dev, " next = %p\n",
+ (void *)chan->bds[index]->next_desc);
+}
+
+static irqreturn_t xdma_rx_intr_handler(int irq, void *data)
+{
+ struct xdma_chan *chan = data;
+ u32 stat;
+
+ stat = DMA_IN(&chan->regs->sr);
+
+ if (!(stat & XDMA_XR_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ /* Ack the interrupts */
+ DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
+
+ if (stat & XDMA_XR_IRQ_ERROR_MASK) {
+ dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
+ chan->name, (unsigned int)stat,
+ (unsigned int)DMA_IN(&chan->regs->cdr),
+ (unsigned int)DMA_IN(&chan->regs->tdr));
+
+ dump_cur_bd(chan);
+
+ chan->err = 1;
+ tasklet_schedule(&chan->dma_err_tasklet);
+ }
+
+ if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
+ (stat & XDMA_XR_IRQ_IOC_MASK)))
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xdma_tx_intr_handler(int irq, void *data)
+{
+ struct xdma_chan *chan = data;
+ u32 stat;
+
+ stat = DMA_IN(&chan->regs->sr);
+
+ if (!(stat & XDMA_XR_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ /* Ack the interrupts */
+ DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
+
+ if (stat & XDMA_XR_IRQ_ERROR_MASK) {
+ dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
+ chan->name, (unsigned int)stat,
+ (unsigned int)DMA_IN(&chan->regs->cdr),
+ (unsigned int)DMA_IN(&chan->regs->tdr));
+
+ dump_cur_bd(chan);
+
+ chan->err = 1;
+ tasklet_schedule(&chan->dma_err_tasklet);
+ }
+
+ if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
+ (stat & XDMA_XR_IRQ_IOC_MASK)))
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void xdma_start_transfer(struct xdma_chan *chan,
+ int start_index,
+ int end_index)
+{
+ xlnk_intptr_type cur_phys;
+ xlnk_intptr_type tail_phys;
+ u32 regval;
+
+ if (chan->err)
+ return;
+
+ cur_phys = chan->bd_phys_addr + (start_index *
+ sizeof(struct xdma_desc_hw));
+ tail_phys = chan->bd_phys_addr + (end_index *
+ sizeof(struct xdma_desc_hw));
+ /* If hardware is busy, move the tail & return */
+ if (dma_is_running(chan) || dma_is_idle(chan)) {
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->tdr, tail_phys);
+#else
+ DMA_OUT_64(&chan->regs->tdr, tail_phys);
+#endif
+ return;
+ }
+
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->cdr, cur_phys);
+#else
+ DMA_OUT_64(&chan->regs->cdr, cur_phys);
+#endif
+
+ dma_start(chan);
+
+ /* Enable interrupts */
+ regval = DMA_IN(&chan->regs->cr);
+ regval |= (chan->poll_mode ? XDMA_XR_IRQ_ERROR_MASK
+ : XDMA_XR_IRQ_ALL_MASK);
+ DMA_OUT(&chan->regs->cr, regval);
+
+ /* Update tail ptr register and start the transfer */
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->tdr, tail_phys);
+#else
+ DMA_OUT_64(&chan->regs->tdr, tail_phys);
+#endif
+}
+
+static int xdma_setup_hw_desc(struct xdma_chan *chan,
+ struct xdma_head *dmahead,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned int nappwords_i,
+ u32 *appwords_i)
+{
+ struct xdma_desc_hw *bd = NULL;
+ size_t copy;
+ struct scatterlist *sg;
+ size_t sg_used;
+ dma_addr_t dma_src;
+ int i, start_index = -1, end_index1 = 0, end_index2 = -1;
+ int status;
+ unsigned long flags;
+ unsigned int bd_used_saved;
+
+ if (!chan) {
+ pr_err("Requested transfer on invalid channel\n");
+ return -ENODEV;
+ }
+
+ /* if we almost run out of bd, try to recycle some */
+ if ((chan->poll_mode) && (chan->bd_used >= XDMA_BD_CLEANUP_THRESHOLD))
+ xilinx_chan_desc_cleanup(chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ bd_used_saved = chan->bd_used;
+ /*
+ * Build transactions using information in the scatter gather list
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ /* Allocate the link descriptor from DMA pool */
+ bd = chan->bds[chan->bd_tail];
+ if ((bd->control) & (XDMA_BD_STS_ACTUAL_LEN_MASK)) {
+ end_index2 = chan->bd_tail;
+ status = -ENOMEM;
+ /* If first was not set, then we failed to
+ * allocate the very first descriptor,
+ * and we're done
+ */
+ if (start_index == -1)
+ goto out_unlock;
+ else
+ goto out_clean;
+ }
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the DMA controller limit
+ */
+ copy = min((size_t)(sg_dma_len(sg) - sg_used),
+ (size_t)chan->max_len);
+ /*
+ * Only the src address for DMA
+ */
+ dma_src = sg_dma_address(sg) + sg_used;
+ bd->src_addr = dma_src;
+
+ /* Fill in the descriptor */
+ bd->control = copy;
+
+ /*
+ * If this is not the first descriptor, chain the
+ * current descriptor after the previous descriptor
+ *
+ * For the first DMA_TO_DEVICE transfer, set SOP
+ */
+ if (start_index == -1) {
+ start_index = chan->bd_tail;
+
+ if (nappwords_i)
+ memcpy(bd->app, appwords_i,
+ nappwords_i * sizeof(u32));
+
+ if (direction == DMA_TO_DEVICE)
+ bd->control |= XDMA_BD_SOP;
+ }
+
+ sg_used += copy;
+ end_index2 = chan->bd_tail;
+ chan->bd_tail++;
+ chan->bd_used++;
+ if (chan->bd_tail >= XDMA_MAX_BD_CNT) {
+ end_index1 = XDMA_MAX_BD_CNT;
+ chan->bd_tail = 0;
+ }
+ }
+ }
+
+ if (start_index == -1) {
+ status = -EINVAL;
+ goto out_unlock;
+ }
+
+ bd->dmahead = (xlnk_intptr_type)dmahead;
+ bd->sw_flag = chan->poll_mode ? XDMA_BD_SF_POLL_MODE_MASK : 0;
+ dmahead->last_bd_index = end_index2;
+
+ if (direction == DMA_TO_DEVICE)
+ bd->control |= XDMA_BD_EOP;
+
+ /* Barrier to assert control word write commits */
+ wmb();
+
+ xdma_start_transfer(chan, start_index, end_index2);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+
+out_clean:
+ if (!end_index1) {
+ for (i = start_index; i < end_index2; i++)
+ xdma_clean_bd(chan->bds[i]);
+ } else {
+ /* clean till the end of bd list first, and then 2nd end */
+ for (i = start_index; i < end_index1; i++)
+ xdma_clean_bd(chan->bds[i]);
+
+ end_index1 = 0;
+ for (i = end_index1; i < end_index2; i++)
+ xdma_clean_bd(chan->bds[i]);
+ }
+ /* Move the bd_tail back */
+ chan->bd_tail = start_index;
+ chan->bd_used = bd_used_saved;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return status;
+}
+
+/*
+ * create minimal length scatter gather list for physically contiguous buffer
+ * that starts at phy_buf and has length phy_buf_len bytes
+ */
+static unsigned int phy_buf_to_sgl(xlnk_intptr_type phy_buf,
+ unsigned int phy_buf_len,
+ struct scatterlist *sgl)
+{
+ unsigned int sgl_cnt = 0;
+ struct scatterlist *sgl_head;
+ unsigned int dma_len;
+ unsigned int num_bd;
+
+ if (!phy_buf || !phy_buf_len) {
+ pr_err("phy_buf is NULL or phy_buf_len = 0\n");
+ return sgl_cnt;
+ }
+
+ num_bd = (phy_buf_len + (XDMA_MAX_TRANS_LEN - 1))
+ / XDMA_MAX_TRANS_LEN;
+ sgl_head = sgl;
+ sg_init_table(sgl, num_bd);
+
+ while (phy_buf_len > 0) {
+ xlnk_intptr_type page_id = phy_buf >> PAGE_SHIFT;
+ unsigned int offset = phy_buf - (page_id << PAGE_SHIFT);
+
+ sgl_cnt++;
+ if (sgl_cnt > XDMA_MAX_BD_CNT)
+ return 0;
+
+ dma_len = (phy_buf_len > XDMA_MAX_TRANS_LEN) ?
+ XDMA_MAX_TRANS_LEN : phy_buf_len;
+
+ sg_set_page(sgl_head, pfn_to_page(page_id), dma_len, offset);
+ sg_dma_address(sgl_head) = (dma_addr_t)phy_buf;
+ sg_dma_len(sgl_head) = dma_len;
+ sgl_head = sg_next(sgl_head);
+
+ phy_buf += dma_len;
+ phy_buf_len -= dma_len;
+ }
+
+ return sgl_cnt;
+}
+
+/* merge sg list, sgl, with length sgl_len, to sgl_merged, to save dma bds */
+static unsigned int sgl_merge(struct scatterlist *sgl,
+ unsigned int sgl_len,
+ struct scatterlist *sgl_merged)
+{
+ struct scatterlist *sghead, *sgend, *sgnext, *sg_merged_head;
+ unsigned int sg_visited_cnt = 0, sg_merged_num = 0;
+ unsigned int dma_len = 0;
+
+ sg_init_table(sgl_merged, sgl_len);
+ sg_merged_head = sgl_merged;
+ sghead = sgl;
+
+ while (sghead && (sg_visited_cnt < sgl_len)) {
+ dma_len = sg_dma_len(sghead);
+ sgend = sghead;
+ sg_visited_cnt++;
+ sgnext = sg_next(sgend);
+
+ while (sgnext && (sg_visited_cnt < sgl_len)) {
+ if ((sg_dma_address(sgend) + sg_dma_len(sgend)) !=
+ sg_dma_address(sgnext))
+ break;
+
+ if (dma_len + sg_dma_len(sgnext) >= XDMA_MAX_TRANS_LEN)
+ break;
+
+ sgend = sgnext;
+ dma_len += sg_dma_len(sgend);
+ sg_visited_cnt++;
+ sgnext = sg_next(sgnext);
+ }
+
+ sg_merged_num++;
+ if (sg_merged_num > XDMA_MAX_BD_CNT)
+ return 0;
+
+ memcpy(sg_merged_head, sghead, sizeof(struct scatterlist));
+
+ sg_dma_len(sg_merged_head) = dma_len;
+
+ sg_merged_head = sg_next(sg_merged_head);
+ sghead = sg_next(sgend);
+ }
+
+ return sg_merged_num;
+}
+
+static int pin_user_pages(xlnk_intptr_type uaddr,
+ unsigned int ulen,
+ int write,
+ struct scatterlist **scatterpp,
+ unsigned int *cntp,
+ unsigned int user_flags)
+{
+ int status;
+ struct mm_struct *mm = current->mm;
+ unsigned int first_page;
+ unsigned int last_page;
+ unsigned int num_pages;
+ struct scatterlist *sglist;
+ struct page **mapped_pages;
+
+ unsigned int pgidx;
+ unsigned int pglen;
+ unsigned int pgoff;
+ unsigned int sublen;
+
+ first_page = uaddr / PAGE_SIZE;
+ last_page = (uaddr + ulen - 1) / PAGE_SIZE;
+ num_pages = last_page - first_page + 1;
+ mapped_pages = vmalloc(sizeof(*mapped_pages) * num_pages);
+ if (!mapped_pages)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ status = get_user_pages(uaddr, num_pages,
+ (write ? FOLL_WRITE : 0) | FOLL_FORCE,
+ mapped_pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ if (status == num_pages) {
+ sglist = kcalloc(num_pages,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!sglist) {
+ pr_err("%s: kcalloc failed to create sg list\n",
+ __func__);
+ vfree(mapped_pages);
+ return -ENOMEM;
+ }
+ sg_init_table(sglist, num_pages);
+ sublen = 0;
+ for (pgidx = 0; pgidx < status; pgidx++) {
+ if (pgidx == 0 && num_pages != 1) {
+ pgoff = uaddr & (~PAGE_MASK);
+ pglen = PAGE_SIZE - pgoff;
+ } else if (pgidx == 0 && num_pages == 1) {
+ pgoff = uaddr & (~PAGE_MASK);
+ pglen = ulen;
+ } else if (pgidx == num_pages - 1) {
+ pgoff = 0;
+ pglen = ulen - sublen;
+ } else {
+ pgoff = 0;
+ pglen = PAGE_SIZE;
+ }
+
+ sublen += pglen;
+
+ sg_set_page(&sglist[pgidx],
+ mapped_pages[pgidx],
+ pglen, pgoff);
+
+ sg_dma_len(&sglist[pgidx]) = pglen;
+ }
+
+ *scatterpp = sglist;
+ *cntp = num_pages;
+
+ vfree(mapped_pages);
+ return 0;
+ }
+ pr_err("Failed to pin user pages\n");
+ for (pgidx = 0; pgidx < status; pgidx++)
+ put_page(mapped_pages[pgidx]);
+ vfree(mapped_pages);
+ return -ENOMEM;
+}
+
+static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt)
+{
+ struct page *pg;
+ unsigned int i;
+
+ if (!sglist)
+ return 0;
+
+ for (i = 0; i < cnt; i++) {
+ pg = sg_page(sglist + i);
+ if (pg)
+ put_page(pg);
+ }
+
+ kfree(sglist);
+ return 0;
+}
+
+struct xdma_chan *xdma_request_channel(char *name)
+{
+ int i;
+ struct xdma_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
+ for (i = 0; i < device->channel_count; i++) {
+ if (!strcmp(device->chan[i]->name, name))
+ return device->chan[i];
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(xdma_request_channel);
+
+void xdma_release_channel(struct xdma_chan *chan)
+{ }
+EXPORT_SYMBOL(xdma_release_channel);
+
+void xdma_release_all_channels(void)
+{
+ int i;
+ struct xdma_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
+ for (i = 0; i < device->channel_count; i++) {
+ if (device->chan[i]->client_count) {
+ dma_halt(device->chan[i]);
+ xilinx_chan_desc_reinit(device->chan[i]);
+ pr_info("%s: chan %s freed\n",
+ __func__,
+ device->chan[i]->name);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(xdma_release_all_channels);
+
+static void xdma_release(struct device *dev)
+{
+}
+
+int xdma_submit(struct xdma_chan *chan,
+ xlnk_intptr_type userbuf,
+ void *kaddr,
+ unsigned int size,
+ unsigned int nappwords_i,
+ u32 *appwords_i,
+ unsigned int nappwords_o,
+ unsigned int user_flags,
+ struct xdma_head **dmaheadpp,
+ struct xlnk_dmabuf_reg *dp)
+{
+ struct xdma_head *dmahead;
+ struct scatterlist *pagelist = NULL;
+ struct scatterlist *sglist = NULL;
+ unsigned int pagecnt = 0;
+ unsigned int sgcnt = 0;
+ enum dma_data_direction dmadir;
+ int status;
+ unsigned long attrs = 0;
+
+ dmahead = kzalloc(sizeof(*dmahead), GFP_KERNEL);
+ if (!dmahead)
+ return -ENOMEM;
+
+ dmahead->chan = chan;
+ dmahead->userbuf = userbuf;
+ dmahead->size = size;
+ dmahead->dmadir = chan->direction;
+ dmahead->userflag = user_flags;
+ dmahead->dmabuf = dp;
+ dmadir = chan->direction;
+
+ if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (dp) {
+ int i;
+ struct scatterlist *sg;
+ unsigned int remaining_size = size;
+
+ if (IS_ERR_OR_NULL(dp->dbuf_sg_table)) {
+ pr_err("%s dmabuf not mapped: %p\n",
+ __func__, dp->dbuf_sg_table);
+ return -EINVAL;
+ }
+ if (dp->dbuf_sg_table->nents == 0) {
+ pr_err("%s: cannot map a scatterlist with 0 entries\n",
+ __func__);
+ return -EINVAL;
+ }
+ sglist = kmalloc_array(dp->dbuf_sg_table->nents,
+ sizeof(*sglist),
+ GFP_KERNEL);
+ if (!sglist)
+ return -ENOMEM;
+
+ sg_init_table(sglist, dp->dbuf_sg_table->nents);
+ sgcnt = 0;
+ for_each_sg(dp->dbuf_sg_table->sgl,
+ sg,
+ dp->dbuf_sg_table->nents,
+ i) {
+ sg_set_page(sglist + i,
+ sg_page(sg),
+ sg_dma_len(sg),
+ sg->offset);
+ sg_dma_address(sglist + i) = sg_dma_address(sg);
+ if (remaining_size == 0) {
+ sg_dma_len(sglist + i) = 0;
+ } else if (sg_dma_len(sg) > remaining_size) {
+ sg_dma_len(sglist + i) = remaining_size;
+ sgcnt++;
+ } else {
+ sg_dma_len(sglist + i) = sg_dma_len(sg);
+ remaining_size -= sg_dma_len(sg);
+ sgcnt++;
+ }
+ }
+ dmahead->userbuf = (xlnk_intptr_type)sglist->dma_address;
+ pagelist = NULL;
+ pagecnt = 0;
+ } else if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
+ size_t elem_cnt;
+
+ elem_cnt = DIV_ROUND_UP(size, XDMA_MAX_TRANS_LEN);
+ sglist = kmalloc_array(elem_cnt, sizeof(*sglist), GFP_KERNEL);
+ sgcnt = phy_buf_to_sgl(userbuf, size, sglist);
+ if (!sgcnt)
+ return -ENOMEM;
+
+ status = get_dma_ops(chan->dev)->map_sg(chan->dev,
+ sglist,
+ sgcnt,
+ dmadir,
+ attrs);
+
+ if (!status) {
+ pr_err("sg contiguous mapping failed\n");
+ return -ENOMEM;
+ }
+ pagelist = NULL;
+ pagecnt = 0;
+ } else {
+ status = pin_user_pages(userbuf,
+ size,
+ dmadir != DMA_TO_DEVICE,
+ &pagelist,
+ &pagecnt,
+ user_flags);
+ if (status < 0) {
+ pr_err("pin_user_pages failed\n");
+ return status;
+ }
+
+ status = get_dma_ops(chan->dev)->map_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ if (!status) {
+ pr_err("dma_map_sg failed\n");
+ unpin_user_pages(pagelist, pagecnt);
+ return -ENOMEM;
+ }
+
+ sglist = kmalloc_array(pagecnt, sizeof(*sglist), GFP_KERNEL);
+ if (sglist)
+ sgcnt = sgl_merge(pagelist, pagecnt, sglist);
+ if (!sgcnt) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ unpin_user_pages(pagelist, pagecnt);
+ kfree(sglist);
+ return -ENOMEM;
+ }
+ }
+ dmahead->sglist = sglist;
+ dmahead->sgcnt = sgcnt;
+ dmahead->pagelist = pagelist;
+ dmahead->pagecnt = pagecnt;
+
+ /* skipping config */
+ init_completion(&dmahead->cmp);
+
+ if (nappwords_i > XDMA_MAX_APPWORDS)
+ nappwords_i = XDMA_MAX_APPWORDS;
+
+ if (nappwords_o > XDMA_MAX_APPWORDS)
+ nappwords_o = XDMA_MAX_APPWORDS;
+
+ dmahead->nappwords_o = nappwords_o;
+
+ status = xdma_setup_hw_desc(chan, dmahead, sglist, sgcnt,
+ dmadir, nappwords_i, appwords_i);
+ if (status) {
+ pr_err("setup hw desc failed\n");
+ if (dmahead->pagelist) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ unpin_user_pages(pagelist, pagecnt);
+ } else if (!dp) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ sglist,
+ sgcnt,
+ dmadir,
+ attrs);
+ }
+ kfree(dmahead->sglist);
+ return -ENOMEM;
+ }
+
+ *dmaheadpp = dmahead;
+ return 0;
+}
+EXPORT_SYMBOL(xdma_submit);
+
+int xdma_wait(struct xdma_head *dmahead,
+ unsigned int user_flags,
+ unsigned int *operating_flags)
+{
+ struct xdma_chan *chan = dmahead->chan;
+ unsigned long attrs = 0;
+
+ if (chan->poll_mode) {
+ xilinx_chan_desc_cleanup(chan);
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ } else {
+ if (*operating_flags & XDMA_FLAGS_TRYWAIT) {
+ if (!try_wait_for_completion(&dmahead->cmp))
+ return 0;
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ } else {
+ wait_for_completion(&dmahead->cmp);
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ }
+ }
+
+ if (!dmahead->dmabuf) {
+ if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ dmahead->sglist,
+ dmahead->sgcnt,
+ dmahead->dmadir,
+ attrs);
+ } else {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ dmahead->pagelist,
+ dmahead->pagecnt,
+ dmahead->dmadir,
+ attrs);
+ unpin_user_pages(dmahead->pagelist, dmahead->pagecnt);
+ }
+ }
+ kfree(dmahead->sglist);
+
+ return 0;
+}
+EXPORT_SYMBOL(xdma_wait);
+
+int xdma_getconfig(struct xdma_chan *chan,
+ unsigned char *irq_thresh,
+ unsigned char *irq_delay)
+{
+ *irq_thresh = (DMA_IN(&chan->regs->cr) >> XDMA_COALESCE_SHIFT) & 0xff;
+ *irq_delay = (DMA_IN(&chan->regs->cr) >> XDMA_DELAY_SHIFT) & 0xff;
+ return 0;
+}
+EXPORT_SYMBOL(xdma_getconfig);
+
+int xdma_setconfig(struct xdma_chan *chan,
+ unsigned char irq_thresh,
+ unsigned char irq_delay)
+{
+ unsigned long val;
+
+ if (dma_is_running(chan))
+ return -EBUSY;
+
+ val = DMA_IN(&chan->regs->cr);
+ val &= ~((0xff << XDMA_COALESCE_SHIFT) |
+ (0xff << XDMA_DELAY_SHIFT));
+ val |= ((irq_thresh << XDMA_COALESCE_SHIFT) |
+ (irq_delay << XDMA_DELAY_SHIFT));
+
+ DMA_OUT(&chan->regs->cr, val);
+ return 0;
+}
+EXPORT_SYMBOL(xdma_setconfig);
+
+static const struct of_device_id gic_match[] = {
+ { .compatible = "arm,cortex-a9-gic", },
+ { .compatible = "arm,cortex-a15-gic", },
+ { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int xlate_irq(unsigned int hwirq)
+{
+ struct of_phandle_args irq_data;
+ unsigned int irq;
+
+ if (!gic_node)
+ gic_node = of_find_matching_node(NULL, gic_match);
+
+ if (WARN_ON(!gic_node))
+ return hwirq;
+
+ irq_data.np = gic_node;
+ irq_data.args_count = 3;
+ irq_data.args[0] = 0;
+#if XLNK_SYS_BIT_WIDTH == 32
+ irq_data.args[1] = hwirq - 32; /* GIC SPI offset */
+#else
+ irq_data.args[1] = hwirq;
+#endif
+ irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ irq = irq_create_of_mapping(&irq_data);
+ if (WARN_ON(!irq))
+ irq = hwirq;
+
+ pr_info("%s: hwirq %d, irq %d\n", __func__, hwirq, irq);
+
+ return irq;
+}
+
+/* Brute-force probing for xilinx DMA
+ */
+static int xdma_probe(struct platform_device *pdev)
+{
+ struct xdma_device *xdev;
+ struct resource *res;
+ int err, i, j;
+ struct xdma_chan *chan;
+ struct xdma_device_config *dma_config;
+ int dma_chan_dir;
+ int dma_chan_reg_offset;
+
+ pr_info("%s: probe dma %p, nres %d, id %d\n", __func__,
+ &pdev->dev, pdev->num_resources, pdev->id);
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(struct xdma_device), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+ xdev->dev = &pdev->dev;
+
+ /* Set this as configurable once HPC works */
+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, false);
+ dma_set_mask(&pdev->dev, 0xFFFFFFFFFFFFFFFFull);
+
+ dma_config = (struct xdma_device_config *)xdev->dev->platform_data;
+ if (dma_config->channel_count < 1 || dma_config->channel_count > 2)
+ return -EFAULT;
+
+ /* Get the memory resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (!xdev->regs) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return -EFAULT;
+ }
+
+ dev_info(&pdev->dev, "AXIDMA device %d physical base address=%pa\n",
+ pdev->id, &res->start);
+ dev_info(&pdev->dev, "AXIDMA device %d remapped to %pa\n",
+ pdev->id, &xdev->regs);
+
+ /* Allocate the channels */
+
+ dev_info(&pdev->dev, "has %d channel(s)\n", dma_config->channel_count);
+ for (i = 0; i < dma_config->channel_count; i++) {
+ chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ dma_chan_dir = strcmp(dma_config->channel_config[i].type,
+ "axi-dma-mm2s-channel") ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dma_chan_reg_offset = (dma_chan_dir == DMA_TO_DEVICE) ?
+ 0 :
+ 0x30;
+
+ /* Initialize channel parameters */
+ chan->id = i;
+ chan->regs = xdev->regs + dma_chan_reg_offset;
+ /* chan->regs = xdev->regs; */
+ chan->dev = xdev->dev;
+ chan->max_len = XDMA_MAX_TRANS_LEN;
+ chan->direction = dma_chan_dir;
+ sprintf(chan->name, "%s:%d", dma_config->name, chan->id);
+ pr_info(" chan %d name: %s\n", chan->id, chan->name);
+ pr_info(" chan %d direction: %s\n", chan->id,
+ dma_chan_dir == DMA_FROM_DEVICE ?
+ "FROM_DEVICE" : "TO_DEVICE");
+
+ spin_lock_init(&chan->lock);
+ tasklet_init(&chan->tasklet,
+ xdma_tasklet,
+ (unsigned long)chan);
+ tasklet_init(&chan->dma_err_tasklet,
+ xdma_err_tasklet,
+ (unsigned long)chan);
+
+ xdev->chan[chan->id] = chan;
+
+ /* The IRQ resource */
+ chan->irq = xlate_irq(dma_config->channel_config[i].irq);
+ if (chan->irq <= 0) {
+ pr_err("get_resource for IRQ for dev %d failed\n",
+ pdev->id);
+ return -ENODEV;
+ }
+
+ err = devm_request_irq(&pdev->dev,
+ chan->irq,
+ dma_chan_dir == DMA_TO_DEVICE ?
+ xdma_tx_intr_handler :
+ xdma_rx_intr_handler,
+ IRQF_SHARED,
+ pdev->name,
+ chan);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request IRQ\n");
+ return err;
+ }
+ pr_info(" chan%d irq: %d\n", chan->id, chan->irq);
+
+ chan->poll_mode = dma_config->channel_config[i].poll_mode;
+ pr_info(" chan%d poll mode: %s\n",
+ chan->id,
+ chan->poll_mode ? "on" : "off");
+
+ /* Allocate channel BD's */
+ err = xdma_alloc_chan_descriptors(xdev->chan[chan->id]);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate BD's\n");
+ return -ENOMEM;
+ }
+ pr_info(" chan%d bd ring @ 0x%p (size: 0x%x bytes)\n",
+ chan->id,
+ (void *)chan->bd_phys_addr,
+ chan->bd_chain_size);
+
+ err = dma_init(xdev->chan[chan->id]);
+ if (err) {
+ dev_err(&pdev->dev, "DMA init failed\n");
+ /* FIXME Check this - unregister all chan resources */
+ for (j = 0; j <= i; j++)
+ xdma_free_chan_resources(xdev->chan[j]);
+ return -EIO;
+ }
+ }
+ xdev->channel_count = dma_config->channel_count;
+ pdev->dev.release = xdma_release;
+ /* Add the DMA device to the global list */
+ mutex_lock(&dma_list_mutex);
+ list_add_tail(&xdev->node, &dma_device_list);
+ mutex_unlock(&dma_list_mutex);
+
+ platform_set_drvdata(pdev, xdev);
+
+ return 0;
+}
+
+static int xdma_remove(struct platform_device *pdev)
+{
+ int i;
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ /* Remove the DMA device from the global list */
+ mutex_lock(&dma_list_mutex);
+ list_del(&xdev->node);
+ mutex_unlock(&dma_list_mutex);
+
+ for (i = 0; i < XDMA_MAX_CHANS_PER_DEVICE; i++) {
+ if (xdev->chan[i])
+ xdma_free_chan_resources(xdev->chan[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver xdma_driver = {
+ .probe = xdma_probe,
+ .remove = xdma_remove,
+ .driver = {
+ .name = "xilinx-axidma",
+ },
+};
+
+module_platform_driver(xdma_driver);
+
+MODULE_DESCRIPTION("Xilinx DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xilinx-dma-apf.h b/drivers/staging/apf/xilinx-dma-apf.h
new file mode 100644
index 000000000000..8837fec01779
--- /dev/null
+++ b/drivers/staging/apf/xilinx-dma-apf.h
@@ -0,0 +1,234 @@
+/*
+ * Xilinx AXI DMA Engine support
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __XILINX_DMA_APF_H
+#define __XILINX_DMA_APF_H
+
+/* ioctls */
+#include <linux/ioctl.h>
+
+/* tasklet */
+#include <linux/interrupt.h>
+
+/* dma stuff */
+#include <linux/dma-mapping.h>
+
+/* xlnk structures */
+#include "xlnk.h"
+#include "xlnk-sysdef.h"
+
+#define XDMA_IOC_MAGIC 'X'
+#define XDMA_IOCRESET _IO(XDMA_IOC_MAGIC, 0)
+#define XDMA_IOCREQUEST _IOWR(XDMA_IOC_MAGIC, 1, unsigned long)
+#define XDMA_IOCRELEASE _IOWR(XDMA_IOC_MAGIC, 2, unsigned long)
+#define XDMA_IOCSUBMIT _IOWR(XDMA_IOC_MAGIC, 3, unsigned long)
+#define XDMA_IOCWAIT _IOWR(XDMA_IOC_MAGIC, 4, unsigned long)
+#define XDMA_IOCGETCONFIG _IOWR(XDMA_IOC_MAGIC, 5, unsigned long)
+#define XDMA_IOCSETCONFIG _IOWR(XDMA_IOC_MAGIC, 6, unsigned long)
+#define XDMA_IOC_MAXNR 6
+
+/* Specific hardware configuration-related constants
+ */
+#define XDMA_RESET_LOOP 1000000
+#define XDMA_HALT_LOOP 1000000
+#define XDMA_NO_CHANGE 0xFFFF
+
+/* General register bits definitions
+ */
+#define XDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+#define XDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA engine */
+
+#define XDMA_SR_HALTED_MASK 0x00000001 /* DMA channel halted */
+#define XDMA_SR_IDLE_MASK 0x00000002 /* DMA channel idle */
+
+#define XDMA_SR_ERR_INTERNAL_MASK 0x00000010/* Datamover internal err */
+#define XDMA_SR_ERR_SLAVE_MASK 0x00000020 /* Datamover slave err */
+#define XDMA_SR_ERR_DECODE_MASK 0x00000040 /* Datamover decode err */
+#define XDMA_SR_ERR_SG_INT_MASK 0x00000100 /* SG internal err */
+#define XDMA_SR_ERR_SG_SLV_MASK 0x00000200 /* SG slave err */
+#define XDMA_SR_ERR_SG_DEC_MASK 0x00000400 /* SG decode err */
+#define XDMA_SR_ERR_ALL_MASK 0x00000770 /* All errors */
+
+#define XDMA_XR_IRQ_IOC_MASK 0x00001000 /* Completion interrupt */
+#define XDMA_XR_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
+#define XDMA_XR_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
+#define XDMA_XR_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+
+#define XDMA_XR_DELAY_MASK 0xFF000000 /* Delay timeout counter */
+#define XDMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
+
+#define XDMA_DELAY_SHIFT 24
+#define XDMA_COALESCE_SHIFT 16
+
+#define XDMA_DELAY_MAX 0xFF /**< Maximum delay counter value */
+#define XDMA_COALESCE_MAX 0xFF /**< Maximum coalescing counter value */
+
+/* BD definitions for Axi DMA
+ */
+#define XDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF
+#define XDMA_BD_STS_COMPL_MASK 0x80000000
+#define XDMA_BD_STS_ERR_MASK 0x70000000
+#define XDMA_BD_STS_ALL_MASK 0xF0000000
+
+/* DMA BD special bits definitions
+ */
+#define XDMA_BD_SOP 0x08000000 /* Start of packet bit */
+#define XDMA_BD_EOP 0x04000000 /* End of packet bit */
+
+/* BD Software Flag definitions for Axi DMA
+ */
+#define XDMA_BD_SF_POLL_MODE_MASK 0x00000002
+#define XDMA_BD_SF_SW_DONE_MASK 0x00000001
+
+/* driver defines */
+#define XDMA_MAX_BD_CNT 16384
+#define XDMA_MAX_CHANS_PER_DEVICE 2
+#define XDMA_MAX_TRANS_LEN 0x7FF000
+#define XDMA_MAX_APPWORDS 5
+#define XDMA_BD_CLEANUP_THRESHOLD ((XDMA_MAX_BD_CNT * 8) / 10)
+
+#define XDMA_FLAGS_WAIT_COMPLETE 1
+#define XDMA_FLAGS_TRYWAIT 2
+
+/* Platform data definition until ARM supports device tree */
+struct xdma_channel_config {
+ char *type;
+ unsigned int include_dre;
+ unsigned int datawidth;
+ unsigned int max_burst_len;
+ unsigned int irq;
+ unsigned int poll_mode;
+ unsigned int lite_mode;
+};
+
+struct xdma_device_config {
+ char *type;
+ char *name;
+ unsigned int include_sg;
+ unsigned int sg_include_stscntrl_strm; /* dma only */
+ unsigned int channel_count;
+ struct xdma_channel_config *channel_config;
+};
+
+struct xdma_desc_hw {
+ xlnk_intptr_type next_desc; /* 0x00 */
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 pad1; /* 0x04 */
+#endif
+ xlnk_intptr_type src_addr; /* 0x08 */
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 pad2; /* 0x0c */
+#endif
+ u32 addr_vsize; /* 0x10 */
+ u32 hsize; /* 0x14 */
+ u32 control; /* 0x18 */
+ u32 status; /* 0x1c */
+ u32 app[5]; /* 0x20 */
+ xlnk_intptr_type dmahead;
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 Reserved0;
+#endif
+ u32 sw_flag; /* 0x3C */
+} __aligned(64);
+
+/* shared by all Xilinx DMA engines */
+struct xdma_regs {
+ u32 cr; /* 0x00 Control Register */
+ u32 sr; /* 0x04 Status Register */
+ u32 cdr; /* 0x08 Current Descriptor Register */
+ u32 cdr_hi;
+ u32 tdr; /* 0x10 Tail Descriptor Register */
+ u32 tdr_hi;
+ u32 src; /* 0x18 Source Address Register (cdma) */
+ u32 src_hi;
+ u32 dst; /* 0x20 Destination Address Register (cdma) */
+ u32 dst_hi;
+ u32 btt_ref; /* 0x28 Bytes To Transfer (cdma) or
+ * park_ref (vdma)
+ */
+ u32 version; /* 0x2c version (vdma) */
+};
+
+/* Per DMA specific operations should be embedded in the channel structure */
+struct xdma_chan {
+ char name[64];
+ struct xdma_regs __iomem *regs;
+ struct device *dev; /* The dma device */
+ struct xdma_desc_hw *bds[XDMA_MAX_BD_CNT];
+ dma_addr_t bd_phys_addr;
+ u32 bd_chain_size;
+ int bd_cur;
+ int bd_tail;
+ unsigned int bd_used; /* # of BDs passed to hw chan */
+ enum dma_data_direction direction; /* Transfer direction */
+ int id; /* Channel ID */
+ int irq; /* Channel IRQ */
+ int poll_mode; /* Poll mode turned on? */
+ spinlock_t lock; /* Descriptor operation lock */
+ struct tasklet_struct tasklet; /* Cleanup work after irq */
+ struct tasklet_struct dma_err_tasklet; /* Cleanup work after irq */
+ int max_len; /* Maximum len per transfer */
+ int err; /* Channel has errors */
+ int client_count;
+};
+
+struct xdma_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct list_head node;
+ struct xdma_chan *chan[XDMA_MAX_CHANS_PER_DEVICE];
+ u8 channel_count;
+};
+
+struct xdma_head {
+ xlnk_intptr_type userbuf;
+ unsigned int size;
+ unsigned int dmaflag;
+ enum dma_data_direction dmadir;
+ struct scatterlist *sglist;
+ unsigned int sgcnt;
+ struct scatterlist *pagelist;
+ unsigned int pagecnt;
+ struct completion cmp;
+ struct xdma_chan *chan;
+ unsigned int nappwords_o;
+ u32 appwords_o[XDMA_MAX_APPWORDS];
+ unsigned int userflag;
+ u32 last_bd_index;
+ struct xlnk_dmabuf_reg *dmabuf;
+};
+
+struct xdma_chan *xdma_request_channel(char *name);
+void xdma_release_channel(struct xdma_chan *chan);
+void xdma_release_all_channels(void);
+int xdma_submit(struct xdma_chan *chan,
+ xlnk_intptr_type userbuf,
+ void *kaddr,
+ unsigned int size,
+ unsigned int nappwords_i,
+ u32 *appwords_i,
+ unsigned int nappwords_o,
+ unsigned int user_flags,
+ struct xdma_head **dmaheadpp,
+ struct xlnk_dmabuf_reg *dp);
+int xdma_wait(struct xdma_head *dmahead,
+ unsigned int user_flags,
+ unsigned int *operating_flags);
+int xdma_getconfig(struct xdma_chan *chan,
+ unsigned char *irq_thresh,
+ unsigned char *irq_delay);
+int xdma_setconfig(struct xdma_chan *chan,
+ unsigned char irq_thresh,
+ unsigned char irq_delay);
+unsigned int xlate_irq(unsigned int hwirq);
+
+#endif
diff --git a/drivers/staging/apf/xlnk-eng.c b/drivers/staging/apf/xlnk-eng.c
new file mode 100644
index 000000000000..bc40128e93cf
--- /dev/null
+++ b/drivers/staging/apf/xlnk-eng.c
@@ -0,0 +1,242 @@
+/*
+ * Xilinx XLNK Engine Driver
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/uio_driver.h>
+
+
+#include "xlnk-eng.h"
+
+static DEFINE_MUTEX(xlnk_eng_list_mutex);
+static LIST_HEAD(xlnk_eng_list);
+
+int xlnk_eng_register_device(struct xlnk_eng_device *xlnk_dev)
+{
+ mutex_lock(&xlnk_eng_list_mutex);
+ /* todo: need to add more error checking */
+
+ list_add_tail(&xlnk_dev->global_node, &xlnk_eng_list);
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(xlnk_eng_register_device);
+
+
+void xlnk_eng_unregister_device(struct xlnk_eng_device *xlnk_dev)
+{
+ mutex_lock(&xlnk_eng_list_mutex);
+ /* todo: need to add more error checking */
+
+ list_del(&xlnk_dev->global_node);
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+}
+EXPORT_SYMBOL(xlnk_eng_unregister_device);
+
+struct xlnk_eng_device *xlnk_eng_request_by_name(char *name)
+{
+ struct xlnk_eng_device *device, *_d;
+ int found = 0;
+
+ mutex_lock(&xlnk_eng_list_mutex);
+
+ list_for_each_entry_safe(device, _d, &xlnk_eng_list, global_node) {
+ if (!strcmp(dev_name(device->dev), name)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ device = device->alloc(device);
+ else
+ device = NULL;
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+
+ return device;
+}
+EXPORT_SYMBOL(xlnk_eng_request_by_name);
+
+/**
+ * struct xilinx_xlnk_eng_device - device structure for xilinx_xlnk_eng
+ * @common: common device info
+ * @base: base address for device
+ * @lock: lock used by device
+ * @cnt: usage count
+ * @info: info for registering and unregistering uio device
+ */
+struct xilinx_xlnk_eng_device {
+ struct xlnk_eng_device common;
+ void __iomem *base;
+ spinlock_t lock;
+ int cnt;
+ struct uio_info *info;
+};
+
+static void xlnk_eng_release(struct device *dev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+ struct xlnk_eng_device *xlnk_dev;
+
+ xdev = dev_get_drvdata(dev);
+ xlnk_dev = &xdev->common;
+ if (!xlnk_dev)
+ return;
+
+ xlnk_dev->free(xlnk_dev);
+}
+
+#define DRIVER_NAME "xilinx-xlnk-eng"
+
+#define to_xilinx_xlnk(dev) container_of(dev, \
+ struct xilinx_xlnk_eng_device, common)
+
+static struct xlnk_eng_device *xilinx_xlnk_alloc(
+ struct xlnk_eng_device *xlnkdev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+ struct xlnk_eng_device *retdev;
+
+ xdev = to_xilinx_xlnk(xlnkdev);
+
+ if (xdev->cnt == 0) {
+ xdev->cnt++;
+ retdev = xlnkdev;
+ } else
+ retdev = NULL;
+
+ return retdev;
+}
+
+static void xilinx_xlnk_free(struct xlnk_eng_device *xlnkdev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+
+ xdev = to_xilinx_xlnk(xlnkdev);
+
+ xdev->cnt = 0;
+}
+
+static int xlnk_eng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xilinx_xlnk_eng_device *xdev;
+ struct uio_info *info;
+ char *devname;
+
+ pr_info("xlnk_eng_probe ...\n");
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+
+ /* more error handling */
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+ xdev->info = info;
+ devname = devm_kzalloc(&pdev->dev, 64, GFP_KERNEL);
+ if (!devname) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+ sprintf(devname, "%s.%d", DRIVER_NAME, pdev->id);
+ pr_info("uio name %s\n", devname);
+ /* iomap registers */
+
+ /* Get the data from the platform device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->base = devm_ioremap_resource(&pdev->dev, res);
+
+ /* %pa types should be used here */
+ dev_info(&pdev->dev, "physical base : 0x%lx\n",
+ (unsigned long)res->start);
+ dev_info(&pdev->dev, "register range : 0x%lx\n",
+ (unsigned long)resource_size(res));
+ dev_info(&pdev->dev, "base remapped to: 0x%lx\n",
+ (unsigned long)xdev->base);
+ if (!xdev->base) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return -ENOMEM;
+ }
+
+ info->mem[0].addr = res->start;
+ info->mem[0].size = resource_size(res);
+ info->mem[0].memtype = UIO_MEM_PHYS;
+ info->mem[0].internal_addr = xdev->base;
+
+ /* info->name = DRIVER_NAME; */
+ info->name = devname;
+ info->version = "0.0.1";
+
+ info->irq = -1;
+
+ xdev->common.dev = &pdev->dev;
+
+ xdev->common.alloc = xilinx_xlnk_alloc;
+ xdev->common.free = xilinx_xlnk_free;
+ xdev->common.dev->release = xlnk_eng_release;
+
+ dev_set_drvdata(&pdev->dev, xdev);
+
+ spin_lock_init(&xdev->lock);
+
+ xdev->cnt = 0;
+
+ xlnk_eng_register_device(&xdev->common);
+
+ if (uio_register_device(&pdev->dev, info)) {
+ dev_err(&pdev->dev, "uio_register_device failed\n");
+ return -ENODEV;
+ }
+ dev_info(&pdev->dev, "xilinx-xlnk-eng uio registered\n");
+
+ return 0;
+}
+
+static int xlnk_eng_remove(struct platform_device *pdev)
+{
+ struct uio_info *info;
+ struct xilinx_xlnk_eng_device *xdev;
+
+ xdev = dev_get_drvdata(&pdev->dev);
+ info = xdev->info;
+
+ uio_unregister_device(info);
+ dev_info(&pdev->dev, "xilinx-xlnk-eng uio unregistered\n");
+ xlnk_eng_unregister_device(&xdev->common);
+
+ return 0;
+}
+
+static struct platform_driver xlnk_eng_driver = {
+ .probe = xlnk_eng_probe,
+ .remove = xlnk_eng_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(xlnk_eng_driver);
+
+MODULE_DESCRIPTION("Xilinx xlnk engine generic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xlnk-eng.h b/drivers/staging/apf/xlnk-eng.h
new file mode 100644
index 000000000000..9f9519664705
--- /dev/null
+++ b/drivers/staging/apf/xlnk-eng.h
@@ -0,0 +1,33 @@
+/*
+ * Xilinx XLNK Engine Driver
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ *
+ */
+
+#ifndef XLNK_ENG_H
+#define XLNK_ENG_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+
+struct xlnk_eng_device {
+ struct list_head global_node;
+ struct xlnk_eng_device * (*alloc)(struct xlnk_eng_device *xdev);
+ void (*free)(struct xlnk_eng_device *xdev);
+ struct device *dev;
+};
+extern int xlnk_eng_register_device(struct xlnk_eng_device *xlnk_dev);
+extern void xlnk_eng_unregister_device(struct xlnk_eng_device *xlnk_dev);
+extern struct xlnk_eng_device *xlnk_eng_request_by_name(char *name);
+
+#endif
+
diff --git a/drivers/staging/apf/xlnk-ioctl.h b/drivers/staging/apf/xlnk-ioctl.h
new file mode 100644
index 000000000000..d909fa65459f
--- /dev/null
+++ b/drivers/staging/apf/xlnk-ioctl.h
@@ -0,0 +1,37 @@
+#ifndef _XLNK_IOCTL_H
+#define _XLNK_IOCTL_H
+
+#include <linux/ioctl.h>
+
+#define XLNK_IOC_MAGIC 'X'
+
+#define XLNK_IOCRESET _IO(XLNK_IOC_MAGIC, 0)
+
+#define XLNK_IOCALLOCBUF _IOWR(XLNK_IOC_MAGIC, 2, unsigned long)
+#define XLNK_IOCFREEBUF _IOWR(XLNK_IOC_MAGIC, 3, unsigned long)
+#define XLNK_IOCADDDMABUF _IOWR(XLNK_IOC_MAGIC, 4, unsigned long)
+#define XLNK_IOCCLEARDMABUF _IOWR(XLNK_IOC_MAGIC, 5, unsigned long)
+
+#define XLNK_IOCDMAREQUEST _IOWR(XLNK_IOC_MAGIC, 7, unsigned long)
+#define XLNK_IOCDMASUBMIT _IOWR(XLNK_IOC_MAGIC, 8, unsigned long)
+#define XLNK_IOCDMAWAIT _IOWR(XLNK_IOC_MAGIC, 9, unsigned long)
+#define XLNK_IOCDMARELEASE _IOWR(XLNK_IOC_MAGIC, 10, unsigned long)
+
+#define XLNK_IOCMEMOP _IOWR(XLNK_IOC_MAGIC, 25, unsigned long)
+#define XLNK_IOCDEVREGISTER _IOWR(XLNK_IOC_MAGIC, 16, unsigned long)
+#define XLNK_IOCDMAREGISTER _IOWR(XLNK_IOC_MAGIC, 17, unsigned long)
+#define XLNK_IOCDEVUNREGISTER _IOWR(XLNK_IOC_MAGIC, 18, unsigned long)
+#define XLNK_IOCCDMAREQUEST _IOWR(XLNK_IOC_MAGIC, 19, unsigned long)
+#define XLNK_IOCCDMASUBMIT _IOWR(XLNK_IOC_MAGIC, 20, unsigned long)
+#define XLNK_IOCMCDMAREGISTER _IOWR(XLNK_IOC_MAGIC, 23, unsigned long)
+#define XLNK_IOCCACHECTRL _IOWR(XLNK_IOC_MAGIC, 24, unsigned long)
+
+#define XLNK_IOCIRQREGISTER _IOWR(XLNK_IOC_MAGIC, 35, unsigned long)
+#define XLNK_IOCIRQUNREGISTER _IOWR(XLNK_IOC_MAGIC, 36, unsigned long)
+#define XLNK_IOCIRQWAIT _IOWR(XLNK_IOC_MAGIC, 37, unsigned long)
+
+#define XLNK_IOCSHUTDOWN _IOWR(XLNK_IOC_MAGIC, 100, unsigned long)
+#define XLNK_IOCRECRES _IOWR(XLNK_IOC_MAGIC, 101, unsigned long)
+#define XLNK_IOC_MAXNR 101
+
+#endif
diff --git a/drivers/staging/apf/xlnk-sysdef.h b/drivers/staging/apf/xlnk-sysdef.h
new file mode 100644
index 000000000000..b6334be3b9c4
--- /dev/null
+++ b/drivers/staging/apf/xlnk-sysdef.h
@@ -0,0 +1,34 @@
+#ifndef XLNK_SYSDEF_H
+#define XLNK_SYSDEF_H
+
+#if __SIZEOF_POINTER__ == 4
+ #define XLNK_SYS_BIT_WIDTH 32
+#elif __SIZEOF_POINTER__ == 8
+ #define XLNK_SYS_BIT_WIDTH 64
+#endif
+
+#include <linux/types.h>
+
+#if XLNK_SYS_BIT_WIDTH == 32
+
+ typedef u32 xlnk_intptr_type;
+ typedef s32 xlnk_int_type;
+ typedef u32 xlnk_uint_type;
+ typedef u8 xlnk_byte_type;
+ typedef s8 xlnk_char_type;
+ #define xlnk_enum_type s32
+
+#elif XLNK_SYS_BIT_WIDTH == 64
+
+ typedef u64 xlnk_intptr_type;
+ typedef s32 xlnk_int_type;
+ typedef u32 xlnk_uint_type;
+ typedef u8 xlnk_byte_type;
+ typedef s8 xlnk_char_type;
+ #define xlnk_enum_type s32
+
+#else
+ #error "Please define application bit width and system bit width"
+#endif
+
+#endif
diff --git a/drivers/staging/apf/xlnk.c b/drivers/staging/apf/xlnk.c
new file mode 100644
index 000000000000..4701898cc5ec
--- /dev/null
+++ b/drivers/staging/apf/xlnk.c
@@ -0,0 +1,1580 @@
+/*
+ * xlnk.c
+ *
+ * Xilinx Accelerator driver support.
+ *
+ * Copyright (C) 2010 Xilinx Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/dma-buf.h>
+
+#include <linux/string.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/dmaengine.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h> /* error codes */
+#include <linux/dma-mapping.h> /* dma */
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/uio_driver.h>
+#include <asm/cacheflush.h>
+#include <linux/semaphore.h>
+
+#include "xlnk-ioctl.h"
+#include "xlnk-sysdef.h"
+#include "xlnk.h"
+
+#ifdef CONFIG_XILINX_DMA_APF
+#include "xilinx-dma-apf.h"
+#endif
+
+#define DRIVER_NAME "xlnk"
+#define DRIVER_VERSION "0.2"
+
+static struct platform_device *xlnk_pdev;
+static struct device *xlnk_dev;
+
+static struct cdev xlnk_cdev;
+
+static struct class *xlnk_class;
+
+static s32 driver_major;
+
+static char *driver_name = DRIVER_NAME;
+
+static void *xlnk_dev_buf;
+static ssize_t xlnk_dev_size;
+static int xlnk_dev_vmas;
+
+#define XLNK_BUF_POOL_SIZE 4096
+static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
+static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
+static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
+static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
+static int xlnk_buf_process[XLNK_BUF_POOL_SIZE];
+static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
+static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
+static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
+static spinlock_t xlnk_buf_lock;
+
+#define XLNK_IRQ_POOL_SIZE 256
+static struct xlnk_irq_control *xlnk_irq_set[XLNK_IRQ_POOL_SIZE];
+static spinlock_t xlnk_irq_lock;
+
+static int xlnk_open(struct inode *ip, struct file *filp);
+static int xlnk_release(struct inode *ip, struct file *filp);
+static long xlnk_ioctl(struct file *filp, unsigned int code,
+ unsigned long args);
+static ssize_t xlnk_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offp);
+static ssize_t xlnk_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp);
+static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
+static void xlnk_vma_open(struct vm_area_struct *vma);
+static void xlnk_vma_close(struct vm_area_struct *vma);
+
+static int xlnk_init_bufpool(void);
+static void xlnk_init_irqpool(void);
+
+LIST_HEAD(xlnk_dmabuf_list);
+
+static int xlnk_shutdown(unsigned long buf);
+static int xlnk_recover_resource(unsigned long buf);
+
+static const struct file_operations xlnk_fops = {
+ .open = xlnk_open,
+ .release = xlnk_release,
+ .read = xlnk_read,
+ .write = xlnk_write,
+ .unlocked_ioctl = xlnk_ioctl,
+ .mmap = xlnk_mmap,
+};
+
+#define MAX_XLNK_DMAS 128
+
+struct xlnk_device_pack {
+ char name[64];
+ struct platform_device pdev;
+ struct resource res[8];
+ struct uio_info *io_ptr;
+ int refs;
+
+#ifdef CONFIG_XILINX_DMA_APF
+ struct xdma_channel_config dma_chan_cfg[4]; /* for xidane dma only */
+ struct xdma_device_config dma_dev_cfg; /* for xidane dma only */
+#endif
+};
+
+static struct semaphore xlnk_devpack_sem;
+static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
+static void xlnk_devpacks_init(void)
+{
+ unsigned int i;
+
+ sema_init(&xlnk_devpack_sem, 1);
+ for (i = 0; i < MAX_XLNK_DMAS; i++)
+ xlnk_devpacks[i] = NULL;
+}
+
+static struct xlnk_device_pack *xlnk_devpacks_alloc(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ if (!xlnk_devpacks[i]) {
+ struct xlnk_device_pack *ret;
+
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ ret->pdev.id = i;
+ xlnk_devpacks[i] = ret;
+
+ return ret;
+ }
+ }
+
+ return NULL;
+}
+
+static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++)
+ if (xlnk_devpacks[i] == devpack)
+ xlnk_devpacks[i] = NULL;
+ kfree(devpack);
+}
+
+static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ if (xlnk_devpacks[i] &&
+ xlnk_devpacks[i]->res[0].start == base)
+ return xlnk_devpacks[i];
+ }
+ return NULL;
+}
+
+static void xlnk_devpacks_free(xlnk_intptr_type base)
+{
+ struct xlnk_device_pack *devpack;
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ return;
+ }
+ devpack->refs--;
+ if (devpack->refs) {
+ up(&xlnk_devpack_sem);
+ return;
+ }
+ platform_device_unregister(&devpack->pdev);
+ xlnk_devpacks_delete(devpack);
+ kfree(devpack);
+ up(&xlnk_devpack_sem);
+}
+
+static void xlnk_devpacks_free_all(void)
+{
+ struct xlnk_device_pack *devpack;
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ devpack = xlnk_devpacks[i];
+ if (devpack) {
+ if (devpack->io_ptr) {
+ uio_unregister_device(devpack->io_ptr);
+ kfree(devpack->io_ptr);
+ } else {
+ platform_device_unregister(&devpack->pdev);
+ }
+ xlnk_devpacks_delete(devpack);
+ kfree(devpack);
+ }
+ }
+}
+
+static int xlnk_probe(struct platform_device *pdev)
+{
+ int err;
+ dev_t dev = 0;
+
+ xlnk_dev_buf = NULL;
+ xlnk_dev_size = 0;
+ xlnk_dev_vmas = 0;
+
+ /* use 2.6 device model */
+ err = alloc_chrdev_region(&dev, 0, 1, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "%s: Can't get major %d\n",
+ __func__, driver_major);
+ goto err1;
+ }
+
+ cdev_init(&xlnk_cdev, &xlnk_fops);
+
+ xlnk_cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&xlnk_cdev, dev, 1);
+
+ if (err) {
+ dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
+ __func__);
+ goto err3;
+ }
+
+ /* udev support */
+ xlnk_class = class_create(THIS_MODULE, "xlnk");
+ if (IS_ERR(xlnk_class)) {
+ dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
+ goto err3;
+ }
+
+ driver_major = MAJOR(dev);
+
+ dev_info(&pdev->dev, "Major %d\n", driver_major);
+
+ device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
+ NULL, "xlnk");
+
+ err = xlnk_init_bufpool();
+ if (err) {
+ dev_err(&pdev->dev, "%s: Failed to allocate buffer pool\n",
+ __func__);
+ goto err3;
+ }
+
+ xlnk_init_irqpool();
+
+ dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
+
+ xlnk_pdev = pdev;
+ xlnk_dev = &pdev->dev;
+
+ if (xlnk_pdev)
+ dev_info(&pdev->dev, "xlnk_pdev is not null\n");
+ else
+ dev_info(&pdev->dev, "xlnk_pdev is null\n");
+
+ xlnk_devpacks_init();
+
+ return 0;
+err3:
+ cdev_del(&xlnk_cdev);
+ unregister_chrdev_region(dev, 1);
+err1:
+ return err;
+}
+
+static int xlnk_buf_findnull(void)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (!xlnk_bufpool[i])
+ return i;
+ }
+
+ return 0;
+}
+
+static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (xlnk_bufpool[i] &&
+ xlnk_phyaddr[i] <= addr &&
+ xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
+ return i;
+ }
+
+ return 0;
+}
+
+static int xlnk_buf_find_by_user_addr(xlnk_intptr_type addr, int pid)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (xlnk_bufpool[i] &&
+ xlnk_buf_process[i] == pid &&
+ xlnk_userbuf[i] <= addr &&
+ xlnk_userbuf[i] + xlnk_buflen[i] > addr)
+ return i;
+ }
+
+ return 0;
+}
+
+/*
+ * allocate and return an id
+ * id must be a positve number
+ */
+static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
+{
+ int id;
+ void *kaddr;
+ dma_addr_t phys_addr_anchor;
+ unsigned long attrs;
+
+ attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
+
+ kaddr = dma_alloc_attrs(xlnk_dev,
+ len,
+ &phys_addr_anchor,
+ GFP_KERNEL | GFP_DMA,
+ attrs);
+ if (!kaddr)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ id = xlnk_buf_findnull();
+ if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
+ xlnk_bufpool_alloc_point[id] = kaddr;
+ xlnk_bufpool[id] = kaddr;
+ xlnk_buflen[id] = len;
+ xlnk_bufcacheable[id] = cacheable;
+ xlnk_phyaddr[id] = phys_addr_anchor;
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ if (id <= 0 || id >= XLNK_BUF_POOL_SIZE)
+ return -ENOMEM;
+
+ return id;
+}
+
+static int xlnk_init_bufpool(void)
+{
+ unsigned int i;
+
+ spin_lock_init(&xlnk_buf_lock);
+ xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
+ *((char *)xlnk_dev_buf) = '\0';
+
+ if (!xlnk_dev_buf) {
+ dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ xlnk_bufpool[0] = xlnk_dev_buf;
+ for (i = 1; i < xlnk_bufpool_size; i++)
+ xlnk_bufpool[i] = NULL;
+
+ return 0;
+}
+
+static void xlnk_init_irqpool(void)
+{
+ int i;
+
+ spin_lock_init(&xlnk_irq_lock);
+ for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++)
+ xlnk_irq_set[i] = NULL;
+}
+
+#define XLNK_SUSPEND NULL
+#define XLNK_RESUME NULL
+
+static int xlnk_remove(struct platform_device *pdev)
+{
+ dev_t devno;
+
+ kfree(xlnk_dev_buf);
+ xlnk_dev_buf = NULL;
+
+ devno = MKDEV(driver_major, 0);
+ cdev_del(&xlnk_cdev);
+ unregister_chrdev_region(devno, 1);
+ if (xlnk_class) {
+ /* remove the device from sysfs */
+ device_destroy(xlnk_class, MKDEV(driver_major, 0));
+ class_destroy(xlnk_class);
+ }
+
+ xlnk_devpacks_free_all();
+
+ return 0;
+}
+
+static const struct of_device_id xlnk_match[] = {
+ { .compatible = "xlnx,xlnk-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnk_match);
+
+static struct platform_driver xlnk_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnk_match,
+ },
+ .probe = xlnk_probe,
+ .remove = xlnk_remove,
+ .suspend = XLNK_SUSPEND,
+ .resume = XLNK_RESUME,
+};
+
+static u64 dma_mask = 0xFFFFFFFFFFFFFFFFull;
+
+/*
+ * This function is called when an application opens handle to the
+ * bridge driver.
+ */
+static int xlnk_open(struct inode *ip, struct file *filp)
+{
+ if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
+ xlnk_dev_size = 0;
+
+ return 0;
+}
+
+static ssize_t xlnk_read(struct file *filp,
+ char __user *buf,
+ size_t count,
+ loff_t *offp)
+{
+ ssize_t retval = 0;
+
+ if (*offp >= xlnk_dev_size)
+ goto out;
+
+ if (*offp + count > xlnk_dev_size)
+ count = xlnk_dev_size - *offp;
+
+ if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ *offp += count;
+ retval = count;
+
+ out:
+ return retval;
+}
+
+static ssize_t xlnk_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ ssize_t retval = 0;
+
+ if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ *offp += count;
+ retval = count;
+
+ if (xlnk_dev_size < *offp)
+ xlnk_dev_size = *offp;
+
+ out:
+ return retval;
+}
+
+/*
+ * This function is called when an application closes handle to the bridge
+ * driver.
+ */
+static int xlnk_release(struct inode *ip, struct file *filp)
+{
+ return 0;
+}
+
+static int xlnk_devregister(char *name,
+ unsigned int id,
+ xlnk_intptr_type base,
+ unsigned int size,
+ unsigned int *irqs,
+ xlnk_intptr_type *handle)
+{
+ unsigned int nres;
+ unsigned int nirq;
+ unsigned int *irqptr;
+ struct xlnk_device_pack *devpack;
+ unsigned int i;
+ int status;
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (devpack) {
+ *handle = (xlnk_intptr_type)devpack;
+ devpack->refs++;
+ status = 0;
+ } else {
+ nirq = 0;
+ irqptr = irqs;
+
+ while (*irqptr) {
+ nirq++;
+ irqptr++;
+ }
+
+ if (nirq > 7) {
+ up(&xlnk_devpack_sem);
+ return -ENOMEM;
+ }
+
+ nres = nirq + 1;
+
+ devpack = xlnk_devpacks_alloc();
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ pr_err("Failed to allocate device %s\n", name);
+ return -ENOMEM;
+ }
+ devpack->io_ptr = NULL;
+ strcpy(devpack->name, name);
+ devpack->pdev.name = devpack->name;
+
+ devpack->pdev.dev.dma_mask = &dma_mask;
+ devpack->pdev.dev.coherent_dma_mask = dma_mask;
+
+ devpack->res[0].start = base;
+ devpack->res[0].end = base + size - 1;
+ devpack->res[0].flags = IORESOURCE_MEM;
+
+ for (i = 0; i < nirq; i++) {
+ devpack->res[i + 1].start = irqs[i];
+ devpack->res[i + 1].end = irqs[i];
+ devpack->res[i + 1].flags = IORESOURCE_IRQ;
+ }
+
+ devpack->pdev.resource = devpack->res;
+ devpack->pdev.num_resources = nres;
+
+ status = platform_device_register(&devpack->pdev);
+ if (status) {
+ xlnk_devpacks_delete(devpack);
+ *handle = 0;
+ } else {
+ *handle = (xlnk_intptr_type)devpack;
+ }
+ }
+ up(&xlnk_devpack_sem);
+
+ return status;
+}
+
+static int xlnk_dmaregister(char *name,
+ unsigned int id,
+ xlnk_intptr_type base,
+ unsigned int size,
+ unsigned int chan_num,
+ unsigned int chan0_dir,
+ unsigned int chan0_irq,
+ unsigned int chan0_poll_mode,
+ unsigned int chan0_include_dre,
+ unsigned int chan0_data_width,
+ unsigned int chan1_dir,
+ unsigned int chan1_irq,
+ unsigned int chan1_poll_mode,
+ unsigned int chan1_include_dre,
+ unsigned int chan1_data_width,
+ xlnk_intptr_type *handle)
+{
+ int status = 0;
+
+#ifdef CONFIG_XILINX_DMA_APF
+
+ struct xlnk_device_pack *devpack;
+
+ if (chan_num < 1 || chan_num > 2) {
+ pr_err("%s: Expected either 1 or 2 channels, got %d\n",
+ __func__, chan_num);
+ return -EINVAL;
+ }
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (devpack) {
+ *handle = (xlnk_intptr_type)devpack;
+ devpack->refs++;
+ status = 0;
+ } else {
+ devpack = xlnk_devpacks_alloc();
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ return -ENOMEM;
+ }
+ strcpy(devpack->name, name);
+ devpack->pdev.name = "xilinx-axidma";
+
+ devpack->io_ptr = NULL;
+
+ devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
+ devpack->dma_chan_cfg[0].datawidth = chan0_data_width;
+ devpack->dma_chan_cfg[0].irq = chan0_irq;
+ devpack->dma_chan_cfg[0].poll_mode = chan0_poll_mode;
+ devpack->dma_chan_cfg[0].type =
+ (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
+ "axi-dma-s2mm-channel" :
+ "axi-dma-mm2s-channel";
+
+ if (chan_num > 1) {
+ devpack->dma_chan_cfg[1].include_dre =
+ chan1_include_dre;
+ devpack->dma_chan_cfg[1].datawidth = chan1_data_width;
+ devpack->dma_chan_cfg[1].irq = chan1_irq;
+ devpack->dma_chan_cfg[1].poll_mode = chan1_poll_mode;
+ devpack->dma_chan_cfg[1].type =
+ (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
+ "axi-dma-s2mm-channel" :
+ "axi-dma-mm2s-channel";
+ }
+
+ devpack->dma_dev_cfg.name = devpack->name;
+ devpack->dma_dev_cfg.type = "axi-dma";
+ devpack->dma_dev_cfg.include_sg = 1;
+ devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
+ devpack->dma_dev_cfg.channel_count = chan_num;
+ devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
+
+ devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
+
+ devpack->pdev.dev.dma_mask = &dma_mask;
+ devpack->pdev.dev.coherent_dma_mask = dma_mask;
+
+ devpack->res[0].start = base;
+ devpack->res[0].end = base + size - 1;
+ devpack->res[0].flags = IORESOURCE_MEM;
+
+ devpack->pdev.resource = devpack->res;
+ devpack->pdev.num_resources = 1;
+ status = platform_device_register(&devpack->pdev);
+ if (status) {
+ xlnk_devpacks_delete(devpack);
+ *handle = 0;
+ } else {
+ *handle = (xlnk_intptr_type)devpack;
+ }
+ }
+ up(&xlnk_devpack_sem);
+
+#endif
+ return status;
+}
+
+static int xlnk_allocbuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_int_type id;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ id = xlnk_allocbuf(temp_args.allocbuf.len,
+ temp_args.allocbuf.cacheable);
+
+ if (id <= 0)
+ return -ENOMEM;
+
+ temp_args.allocbuf.id = id;
+ temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args));
+
+ return status;
+}
+
+static int xlnk_freebuf(int id)
+{
+ void *alloc_point;
+ dma_addr_t p_addr;
+ size_t buf_len;
+ int cacheable;
+ unsigned long attrs;
+
+ if (id <= 0 || id >= xlnk_bufpool_size)
+ return -ENOMEM;
+
+ if (!xlnk_bufpool[id])
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ alloc_point = xlnk_bufpool_alloc_point[id];
+ p_addr = xlnk_phyaddr[id];
+ buf_len = xlnk_buflen[id];
+ xlnk_bufpool[id] = NULL;
+ xlnk_phyaddr[id] = (dma_addr_t)NULL;
+ xlnk_buflen[id] = 0;
+ cacheable = xlnk_bufcacheable[id];
+ xlnk_bufcacheable[id] = 0;
+ spin_unlock(&xlnk_buf_lock);
+
+ attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
+
+ dma_free_attrs(xlnk_dev,
+ buf_len,
+ alloc_point,
+ p_addr,
+ attrs);
+
+ return 0;
+}
+
+static void xlnk_free_all_buf(void)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++)
+ xlnk_freebuf(i);
+}
+
+static int xlnk_freebuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int id;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ id = temp_args.freebuf.id;
+ return xlnk_freebuf(id);
+}
+
+static int xlnk_adddmabuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ struct xlnk_dmabuf_reg *db;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ list_for_each_entry(db, &xlnk_dmabuf_list, list) {
+ if (db->user_vaddr == temp_args.dmasubmit.buf) {
+ pr_err("Attempting to register DMA-BUF for addr %llx that is already registered\n",
+ (unsigned long long)temp_args.dmabuf.user_addr);
+ spin_unlock(&xlnk_buf_lock);
+ return -EINVAL;
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ db = kzalloc(sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
+
+ db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
+ db->user_vaddr = temp_args.dmabuf.user_addr;
+ db->dbuf = dma_buf_get(db->dmabuf_fd);
+ db->dbuf_attach = dma_buf_attach(db->dbuf, xlnk_dev);
+ if (IS_ERR(db->dbuf_attach)) {
+ dma_buf_put(db->dbuf);
+ pr_err("Failed DMA-BUF attach\n");
+ return -EINVAL;
+ }
+
+ db->dbuf_sg_table = dma_buf_map_attachment(db->dbuf_attach,
+ DMA_BIDIRECTIONAL);
+
+ if (!db->dbuf_sg_table) {
+ pr_err("Failed DMA-BUF map_attachment\n");
+ dma_buf_detach(db->dbuf, db->dbuf_attach);
+ dma_buf_put(db->dbuf);
+ return -EINVAL;
+ }
+
+ spin_lock(&xlnk_buf_lock);
+ INIT_LIST_HEAD(&db->list);
+ list_add_tail(&db->list, &xlnk_dmabuf_list);
+ spin_unlock(&xlnk_buf_lock);
+
+ return 0;
+}
+
+static int xlnk_cleardmabuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ struct xlnk_dmabuf_reg *dp, *dp_temp;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
+ dma_buf_unmap_attachment(dp->dbuf_attach,
+ dp->dbuf_sg_table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(dp->dbuf, dp->dbuf_attach);
+ dma_buf_put(dp->dbuf);
+ list_del(&dp->list);
+ spin_unlock(&xlnk_buf_lock);
+ kfree(dp);
+ return 0;
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+ pr_err("Attempting to unregister a DMA-BUF that was not registered at addr %llx\n",
+ (unsigned long long)temp_args.dmabuf.user_addr);
+
+ return 1;
+}
+
+static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ int status;
+ struct xdma_chan *chan;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ if (!temp_args.dmarequest.name[0])
+ return 0;
+
+ down(&xlnk_devpack_sem);
+ chan = xdma_request_channel(temp_args.dmarequest.name);
+ up(&xlnk_devpack_sem);
+ if (!chan)
+ return -ENOMEM;
+ temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
+ temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
+ temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
+
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ struct xdma_head *dmahead;
+ struct xlnk_dmabuf_reg *dp, *cp = NULL;
+ int buf_id;
+ void *kaddr = NULL;
+ int status = -1;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ if (!temp_args.dmasubmit.dmachan)
+ return -ENODEV;
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
+ if (buf_id) {
+ xlnk_intptr_type addr_delta =
+ temp_args.dmasubmit.buf -
+ xlnk_phyaddr[buf_id];
+ kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
+ } else {
+ list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == temp_args.dmasubmit.buf) {
+ cp = dp;
+ break;
+ }
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ status = xdma_submit((struct xdma_chan *)
+ (temp_args.dmasubmit.dmachan),
+ temp_args.dmasubmit.buf,
+ kaddr,
+ temp_args.dmasubmit.len,
+ temp_args.dmasubmit.nappwords_i,
+ temp_args.dmasubmit.appwords_i,
+ temp_args.dmasubmit.nappwords_o,
+ temp_args.dmasubmit.flag,
+ &dmahead,
+ cp);
+
+ temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
+ temp_args.dmasubmit.last_bd_index =
+ (xlnk_intptr_type)dmahead->last_bd_index;
+
+ if (!status) {
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+ }
+ return status;
+#endif
+ return -ENOMEM;
+}
+
+static int xlnk_dmawait_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ int status = -1;
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ struct xdma_head *dmahead;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ dmahead = (struct xdma_head *)temp_args.dmawait.dmahandle;
+ status = xdma_wait(dmahead,
+ dmahead->userflag,
+ &temp_args.dmawait.flags);
+ if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
+ if (temp_args.dmawait.nappwords) {
+ memcpy(temp_args.dmawait.appwords,
+ dmahead->appwords_o,
+ dmahead->nappwords_o * sizeof(u32));
+ }
+ kfree(dmahead);
+ }
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+#endif
+
+ return status;
+}
+
+static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ int status = -1;
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+ down(&xlnk_devpack_sem);
+ xdma_release_channel((struct xdma_chan *)
+ (temp_args.dmarelease.dmachan));
+ up(&xlnk_devpack_sem);
+#endif
+
+ return status;
+}
+
+static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_intptr_type handle;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ status = xlnk_devregister(temp_args.devregister.name,
+ temp_args.devregister.id,
+ temp_args.devregister.base,
+ temp_args.devregister.size,
+ temp_args.devregister.irqs,
+ &handle);
+
+ return status;
+}
+
+static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_intptr_type handle;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ status = xlnk_dmaregister(temp_args.dmaregister.name,
+ temp_args.dmaregister.id,
+ temp_args.dmaregister.base,
+ temp_args.dmaregister.size,
+ temp_args.dmaregister.chan_num,
+ temp_args.dmaregister.chan0_dir,
+ temp_args.dmaregister.chan0_irq,
+ temp_args.dmaregister.chan0_poll_mode,
+ temp_args.dmaregister.chan0_include_dre,
+ temp_args.dmaregister.chan0_data_width,
+ temp_args.dmaregister.chan1_dir,
+ temp_args.dmaregister.chan1_irq,
+ temp_args.dmaregister.chan1_poll_mode,
+ temp_args.dmaregister.chan1_include_dre,
+ temp_args.dmaregister.chan1_data_width,
+ &handle);
+
+ return status;
+}
+
+static int xlnk_devunregister_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ xlnk_devpacks_free(temp_args.devunregister.base);
+
+ return 0;
+}
+
+static irqreturn_t xlnk_accel_isr(int irq, void *arg)
+{
+ struct xlnk_irq_control *irq_control = (struct xlnk_irq_control *)arg;
+
+ disable_irq_nosync(irq);
+ complete(&irq_control->cmp);
+
+ return IRQ_HANDLED;
+}
+
+static int xlnk_irq_register_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int i;
+ struct xlnk_irq_control *ctrl;
+ int irq_id = -1;
+ int irq_entry_new = 0;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(temp_args.irqregister));
+ if (status)
+ return -ENOMEM;
+
+ if (temp_args.irqregister.type !=
+ (XLNK_IRQ_LEVEL | XLNK_IRQ_ACTIVE_HIGH)) {
+ dev_err(xlnk_dev, "Unsupported interrupt type %x\n",
+ temp_args.irqregister.type);
+ return -EINVAL;
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->irq = xlate_irq(temp_args.irqregister.irq);
+ ctrl->enabled = 0;
+ init_completion(&ctrl->cmp);
+
+ spin_lock(&xlnk_irq_lock);
+ for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++) {
+ if (!xlnk_irq_set[i] && irq_id == -1) {
+ irq_entry_new = 1;
+ irq_id = i;
+ xlnk_irq_set[i] = ctrl;
+ } else if (xlnk_irq_set[i] &&
+ xlnk_irq_set[i]->irq == ctrl->irq) {
+ irq_id = i;
+ break;
+ }
+ }
+ spin_unlock(&xlnk_irq_lock);
+
+ if (irq_id == -1) {
+ kfree(ctrl);
+ return -ENOMEM;
+ }
+
+ if (!irq_entry_new) {
+ kfree(ctrl);
+ } else {
+ status = request_irq(ctrl->irq,
+ xlnk_accel_isr,
+ IRQF_SHARED,
+ "xlnk",
+ ctrl);
+ if (status) {
+ enable_irq(ctrl->irq);
+ xlnk_irq_set[irq_id] = NULL;
+ kfree(ctrl);
+ return -EINVAL;
+ }
+ disable_irq_nosync(ctrl->irq);
+ }
+
+ temp_args.irqregister.irq_id = irq_id;
+
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(temp_args.irqregister));
+
+ return status;
+}
+
+static int xlnk_irq_unregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int irq_id;
+ struct xlnk_irq_control *ctrl;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(union xlnk_args));
+ if (status)
+ return -ENOMEM;
+
+ irq_id = temp_args.irqunregister.irq_id;
+ if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
+ return -EINVAL;
+
+ ctrl = xlnk_irq_set[irq_id];
+ if (!ctrl)
+ return -EINVAL;
+
+ xlnk_irq_set[irq_id] = NULL;
+
+ if (ctrl->enabled) {
+ disable_irq_nosync(ctrl->irq);
+ complete(&ctrl->cmp);
+ }
+ free_irq(ctrl->irq, ctrl);
+ kfree(ctrl);
+
+ return 0;
+}
+
+static int xlnk_irq_wait_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int irq_id;
+ struct xlnk_irq_control *ctrl;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(temp_args.irqwait));
+ if (status)
+ return -ENOMEM;
+
+ irq_id = temp_args.irqwait.irq_id;
+ if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
+ return -EINVAL;
+
+ ctrl = xlnk_irq_set[irq_id];
+ if (!ctrl)
+ return -EINVAL;
+
+ if (!ctrl->enabled) {
+ ctrl->enabled = 1;
+ enable_irq(ctrl->irq);
+ }
+
+ if (temp_args.irqwait.polling) {
+ if (!try_wait_for_completion(&ctrl->cmp))
+ temp_args.irqwait.success = 0;
+ else
+ temp_args.irqwait.success = 1;
+ } else {
+ wait_for_completion(&ctrl->cmp);
+ temp_args.irqwait.success = 1;
+ }
+
+ if (temp_args.irqwait.success) {
+ reinit_completion(&ctrl->cmp);
+ ctrl->enabled = 0;
+ }
+
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(temp_args.irqwait));
+
+ return status;
+}
+
+static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status, size;
+ void *kaddr;
+ xlnk_intptr_type paddr;
+ int buf_id;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status) {
+ dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
+ status);
+ return -ENOMEM;
+ }
+
+ if (!(temp_args.cachecontrol.action == 0 ||
+ temp_args.cachecontrol.action == 1)) {
+ dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
+ temp_args.cachecontrol.action);
+ return -EINVAL;
+ }
+
+ size = temp_args.cachecontrol.size;
+ paddr = temp_args.cachecontrol.phys_addr;
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_phys_addr(paddr);
+ kaddr = xlnk_bufpool[buf_id];
+ spin_unlock(&xlnk_buf_lock);
+
+ if (buf_id == 0) {
+ pr_err("Illegal cachecontrol on non-sds_alloc memory");
+ return -EINVAL;
+ }
+
+#if XLNK_SYS_BIT_WIDTH == 32
+ __cpuc_flush_dcache_area(kaddr, size);
+ outer_flush_range(paddr, paddr + size);
+ if (temp_args.cachecontrol.action == 1)
+ outer_inv_range(paddr, paddr + size);
+#else
+ if (temp_args.cachecontrol.action == 1)
+ __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
+ else
+ __dma_map_area(kaddr, size, DMA_TO_DEVICE);
+#endif
+ return 0;
+}
+
+static int xlnk_memop_ioctl(struct file *filp, unsigned long arg_addr)
+{
+ union xlnk_args args;
+ xlnk_intptr_type p_addr = 0;
+ int status = 0;
+ int buf_id;
+ struct xlnk_dmabuf_reg *cp = NULL;
+ int cacheable = 1;
+ enum dma_data_direction dmadir;
+ xlnk_intptr_type page_id;
+ unsigned int page_offset;
+ struct scatterlist sg;
+ unsigned long attrs = 0;
+
+ status = copy_from_user(&args,
+ (void __user *)arg_addr,
+ sizeof(union xlnk_args));
+
+ if (status) {
+ pr_err("Error in copy_from_user. status = %d\n", status);
+ return status;
+ }
+
+ if (!(args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) &&
+ !(args.memop.flags & XLNK_FLAG_MEM_RELEASE)) {
+ pr_err("memop lacks acquire or release flag\n");
+ return -EINVAL;
+ }
+
+ if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE &&
+ args.memop.flags & XLNK_FLAG_MEM_RELEASE) {
+ pr_err("memop has both acquire and release defined\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_user_addr(args.memop.virt_addr,
+ current->pid);
+ if (buf_id > 0) {
+ cacheable = xlnk_bufcacheable[buf_id];
+ p_addr = xlnk_phyaddr[buf_id] +
+ (args.memop.virt_addr - xlnk_userbuf[buf_id]);
+ } else {
+ struct xlnk_dmabuf_reg *dp;
+
+ list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == args.memop.virt_addr) {
+ cp = dp;
+ break;
+ }
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ if (buf_id <= 0 && !cp) {
+ pr_err("Error, buffer not found\n");
+ return -EINVAL;
+ }
+
+ dmadir = (enum dma_data_direction)args.memop.dir;
+
+ if (args.memop.flags & XLNK_FLAG_COHERENT || !cacheable)
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (buf_id > 0) {
+ page_id = p_addr >> PAGE_SHIFT;
+ page_offset = p_addr - (page_id << PAGE_SHIFT);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg,
+ pfn_to_page(page_id),
+ args.memop.size,
+ page_offset);
+ sg_dma_len(&sg) = args.memop.size;
+ }
+
+ if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) {
+ if (buf_id > 0) {
+ status = get_dma_ops(xlnk_dev)->map_sg(xlnk_dev,
+ &sg,
+ 1,
+ dmadir,
+ attrs);
+ if (!status) {
+ pr_err("Failed to map address\n");
+ return -EINVAL;
+ }
+ args.memop.phys_addr = (xlnk_intptr_type)
+ sg_dma_address(&sg);
+ args.memop.token = (xlnk_intptr_type)
+ sg_dma_address(&sg);
+ status = copy_to_user((void __user *)arg_addr,
+ &args,
+ sizeof(union xlnk_args));
+ if (status)
+ pr_err("Error in copy_to_user. status = %d\n",
+ status);
+ } else {
+ if (cp->dbuf_sg_table->nents != 1) {
+ pr_err("Non-SG-DMA datamovers require physically contiguous DMABUFs. DMABUF is not physically contiguous\n");
+ return -EINVAL;
+ }
+ args.memop.phys_addr = (xlnk_intptr_type)
+ sg_dma_address(cp->dbuf_sg_table->sgl);
+ args.memop.token = 0;
+ status = copy_to_user((void __user *)arg_addr,
+ &args,
+ sizeof(union xlnk_args));
+ if (status)
+ pr_err("Error in copy_to_user. status = %d\n",
+ status);
+ }
+ } else {
+ if (buf_id > 0) {
+ sg_dma_address(&sg) = (dma_addr_t)args.memop.token;
+ get_dma_ops(xlnk_dev)->unmap_sg(xlnk_dev,
+ &sg,
+ 1,
+ dmadir,
+ attrs);
+ }
+ }
+
+ return status;
+}
+
+/* This function provides IO interface to the bridge driver. */
+static long xlnk_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(code) > XLNK_IOC_MAXNR)
+ return -ENOTTY;
+
+ /* some sanity check */
+ switch (code) {
+ case XLNK_IOCALLOCBUF:
+ return xlnk_allocbuf_ioctl(filp, code, args);
+ case XLNK_IOCFREEBUF:
+ return xlnk_freebuf_ioctl(filp, code, args);
+ case XLNK_IOCADDDMABUF:
+ return xlnk_adddmabuf_ioctl(filp, code, args);
+ case XLNK_IOCCLEARDMABUF:
+ return xlnk_cleardmabuf_ioctl(filp, code, args);
+ case XLNK_IOCDMAREQUEST:
+ return xlnk_dmarequest_ioctl(filp, code, args);
+ case XLNK_IOCDMASUBMIT:
+ return xlnk_dmasubmit_ioctl(filp, code, args);
+ case XLNK_IOCDMAWAIT:
+ return xlnk_dmawait_ioctl(filp, code, args);
+ case XLNK_IOCDMARELEASE:
+ return xlnk_dmarelease_ioctl(filp, code, args);
+ case XLNK_IOCDEVREGISTER:
+ return xlnk_devregister_ioctl(filp, code, args);
+ case XLNK_IOCDMAREGISTER:
+ return xlnk_dmaregister_ioctl(filp, code, args);
+ case XLNK_IOCDEVUNREGISTER:
+ return xlnk_devunregister_ioctl(filp, code, args);
+ case XLNK_IOCCACHECTRL:
+ return xlnk_cachecontrol_ioctl(filp, code, args);
+ case XLNK_IOCIRQREGISTER:
+ return xlnk_irq_register_ioctl(filp, code, args);
+ case XLNK_IOCIRQUNREGISTER:
+ return xlnk_irq_unregister_ioctl(filp, code, args);
+ case XLNK_IOCIRQWAIT:
+ return xlnk_irq_wait_ioctl(filp, code, args);
+ case XLNK_IOCSHUTDOWN:
+ return xlnk_shutdown(args);
+ case XLNK_IOCRECRES:
+ return xlnk_recover_resource(args);
+ case XLNK_IOCMEMOP:
+ return xlnk_memop_ioctl(filp, args);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct vm_operations_struct xlnk_vm_ops = {
+ .open = xlnk_vma_open,
+ .close = xlnk_vma_close,
+};
+
+/* This function maps kernel space memory to user space memory. */
+static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int bufid;
+ int status;
+
+ bufid = vma->vm_pgoff >> (16 - PAGE_SHIFT);
+
+ if (bufid == 0) {
+ unsigned long paddr = virt_to_phys(xlnk_dev_buf);
+
+ status = remap_pfn_range(vma,
+ vma->vm_start,
+ paddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ } else {
+ if (xlnk_bufcacheable[bufid] == 0)
+ vma->vm_page_prot =
+ pgprot_noncached(vma->vm_page_prot);
+ status = remap_pfn_range(vma, vma->vm_start,
+ xlnk_phyaddr[bufid]
+ >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ xlnk_userbuf[bufid] = vma->vm_start;
+ xlnk_buf_process[bufid] = current->pid;
+ }
+ if (status) {
+ pr_err("%s failed with code %d\n", __func__, status);
+ return status;
+ }
+
+ xlnk_vma_open(vma);
+ vma->vm_ops = &xlnk_vm_ops;
+ vma->vm_private_data = xlnk_bufpool[bufid];
+
+ return 0;
+}
+
+static void xlnk_vma_open(struct vm_area_struct *vma)
+{
+ xlnk_dev_vmas++;
+}
+
+static void xlnk_vma_close(struct vm_area_struct *vma)
+{
+ xlnk_dev_vmas--;
+}
+
+static int xlnk_shutdown(unsigned long buf)
+{
+ return 0;
+}
+
+static int xlnk_recover_resource(unsigned long buf)
+{
+ xlnk_free_all_buf();
+#ifdef CONFIG_XILINX_DMA_APF
+ xdma_release_all_channels();
+#endif
+ return 0;
+}
+
+module_platform_driver(xlnk_driver);
+
+MODULE_DESCRIPTION("Xilinx APF driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xlnk.h b/drivers/staging/apf/xlnk.h
new file mode 100644
index 000000000000..cbc2334c2e82
--- /dev/null
+++ b/drivers/staging/apf/xlnk.h
@@ -0,0 +1,175 @@
+#ifndef _XLNK_OS_H
+#define _XLNK_OS_H
+
+#include <linux/stddef.h>
+#include <linux/dmaengine.h>
+#include "xilinx-dma-apf.h"
+#include "xlnk-sysdef.h"
+
+#define XLNK_FLAG_COHERENT 0x00000001
+#define XLNK_FLAG_KERNEL_BUFFER 0x00000002
+#define XLNK_FLAG_DMAPOLLING 0x00000004
+#define XLNK_FLAG_IOMMU_VALID 0x00000008
+#define XLNK_FLAG_PHYSICAL_ADDR 0x00000100
+#define XLNK_FLAG_VIRTUAL_ADDR 0x00000200
+#define XLNK_FLAG_MEM_ACQUIRE 0x00001000
+#define XLNK_FLAG_MEM_RELEASE 0x00002000
+#define CF_FLAG_CACHE_FLUSH_INVALIDATE 0x00000001
+#define CF_FLAG_PHYSICALLY_CONTIGUOUS 0x00000002
+#define CF_FLAG_DMAPOLLING 0x00000004
+#define XLNK_IRQ_LEVEL 0x00000001
+#define XLNK_IRQ_EDGE 0x00000002
+#define XLNK_IRQ_ACTIVE_HIGH 0x00000004
+#define XLNK_IRQ_ACTIVE_LOW 0x00000008
+#define XLNK_IRQ_RESET_REG_VALID 0x00000010
+
+enum xlnk_dma_direction {
+ XLNK_DMA_BI = 0,
+ XLNK_DMA_TO_DEVICE = 1,
+ XLNK_DMA_FROM_DEVICE = 2,
+ XLNK_DMA_NONE = 3,
+};
+
+struct xlnk_dma_transfer_handle {
+ dma_addr_t dma_addr;
+ unsigned long transfer_length;
+ void *kern_addr;
+ unsigned long user_addr;
+ enum dma_data_direction transfer_direction;
+ int sg_effective_length;
+ int flags;
+ struct dma_chan *channel;
+ dma_cookie_t dma_cookie;
+ struct dma_async_tx_descriptor *async_desc;
+ struct completion completion_handle;
+};
+
+struct xlnk_dmabuf_reg {
+ xlnk_int_type dmabuf_fd;
+ xlnk_intptr_type user_vaddr;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *dbuf_sg_table;
+ int is_mapped;
+ int dma_direction;
+ struct list_head list;
+};
+
+struct xlnk_irq_control {
+ int irq;
+ int enabled;
+ struct completion cmp;
+};
+
+/* CROSSES KERNEL-USER BOUNDARY */
+union xlnk_args {
+ struct __attribute__ ((__packed__)) {
+ xlnk_uint_type len;
+ xlnk_int_type id;
+ xlnk_intptr_type phyaddr;
+ xlnk_byte_type cacheable;
+ } allocbuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_uint_type id;
+ xlnk_intptr_type buf;
+ } freebuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type dmabuf_fd;
+ xlnk_intptr_type user_addr;
+ } dmabuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_char_type name[64];
+ xlnk_intptr_type dmachan;
+ xlnk_uint_type bd_space_phys_addr;
+ xlnk_uint_type bd_space_size;
+ } dmarequest;
+#define XLNK_MAX_APPWORDS 5
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmachan;
+ xlnk_intptr_type buf;
+ xlnk_intptr_type buf2;
+ xlnk_uint_type buf_offset;
+ xlnk_uint_type len;
+ xlnk_uint_type bufflag;
+ xlnk_intptr_type sglist;
+ xlnk_uint_type sgcnt;
+ xlnk_enum_type dmadir;
+ xlnk_uint_type nappwords_i;
+ xlnk_uint_type appwords_i[XLNK_MAX_APPWORDS];
+ xlnk_uint_type nappwords_o;
+ xlnk_uint_type flag;
+ xlnk_intptr_type dmahandle; /* return value */
+ xlnk_uint_type last_bd_index;
+ } dmasubmit;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmahandle;
+ xlnk_uint_type nappwords;
+ xlnk_uint_type appwords[XLNK_MAX_APPWORDS];
+ /* appwords array we only accept 5 max */
+ xlnk_uint_type flags;
+ } dmawait;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmachan;
+ } dmarelease;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type base;
+ xlnk_uint_type size;
+ xlnk_uint_type irqs[8];
+ xlnk_char_type name[32];
+ xlnk_uint_type id;
+ } devregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type base;
+ } devunregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_char_type name[32];
+ xlnk_uint_type id;
+ xlnk_intptr_type base;
+ xlnk_uint_type size;
+ xlnk_uint_type chan_num;
+ xlnk_uint_type chan0_dir;
+ xlnk_uint_type chan0_irq;
+ xlnk_uint_type chan0_poll_mode;
+ xlnk_uint_type chan0_include_dre;
+ xlnk_uint_type chan0_data_width;
+ xlnk_uint_type chan1_dir;
+ xlnk_uint_type chan1_irq;
+ xlnk_uint_type chan1_poll_mode;
+ xlnk_uint_type chan1_include_dre;
+ xlnk_uint_type chan1_data_width;
+ } dmaregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type phys_addr;
+ xlnk_uint_type size;
+ xlnk_int_type action;
+ } cachecontrol;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type virt_addr;
+ xlnk_int_type size;
+ xlnk_enum_type dir;
+ xlnk_int_type flags;
+ xlnk_intptr_type phys_addr;
+ xlnk_intptr_type token;
+ } memop;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq;
+ xlnk_int_type subirq;
+ xlnk_uint_type type;
+ xlnk_intptr_type control_base;
+ xlnk_intptr_type reset_reg_base;
+ xlnk_uint_type reset_offset;
+ xlnk_uint_type reset_valid_high;
+ xlnk_uint_type reset_valid_low;
+ xlnk_int_type irq_id;
+ } irqregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq_id;
+ } irqunregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq_id;
+ xlnk_int_type polling;
+ xlnk_int_type success;
+ } irqwait;
+};
+
+#endif
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 15b7a82f4b1e..eb329e0cdc84 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -26,19 +26,40 @@
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_FRAC_SHIFT 16
+#define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
#define WZRD_DIVCLK_DIVIDE_SHIFT 0
#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
#define WZRD_CLKOUT_DIVIDE_SHIFT 0
+#define WZRD_CLKOUT_DIVIDE_WIDTH 8
#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_FRAC_SHIFT 8
+#define WZRD_CLKOUT_FRAC_MASK 0x3ff
+
+#define WZRD_DR_MAX_INT_DIV_VALUE 255
+#define WZRD_DR_NUM_RETRIES 10000
+#define WZRD_DR_STATUS_REG_OFFSET 0x04
+#define WZRD_DR_LOCK_BIT_MASK 0x00000001
+#define WZRD_DR_INIT_REG_OFFSET 0x25C
+#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
+#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
+
+/* Get the mask from width */
+#define div_mask(width) ((1 << (width)) - 1)
+
+/* Extract divider instance from clock hardware instance */
+#define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
enum clk_wzrd_int_clks {
wzrd_clk_mul,
wzrd_clk_mul_div,
+ wzrd_clk_mul_frac,
wzrd_clk_int_max
};
/**
- * struct clk_wzrd:
+ * struct clk_wzrd - Clock wizard private data structure
+ *
* @clk_data: Clock data
* @nb: Notifier block
* @base: Memory base
@@ -61,6 +82,29 @@ struct clk_wzrd {
bool suspended;
};
+/**
+ * struct clk_wzrd_divider - clock divider specific to clk_wzrd
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @base: base address of register containing the divider
+ * @offset: offset address of register containing the divider
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @flags: clk_wzrd divider flags
+ * @table: array of value/divider pairs, last entry should have div = 0
+ * @lock: register lock
+ */
+struct clk_wzrd_divider {
+ struct clk_hw hw;
+ void __iomem *base;
+ u16 offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock; /* divider lock */
+};
+
#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
/* maximum frequencies for input/output clocks per speed grade */
@@ -70,6 +114,319 @@ static const unsigned long clk_wzrd_max_freq[] = {
1066000000UL
};
+/* spin lock variable for clk_wzrd */
+static DEFINE_SPINLOCK(clkwzrd_lock);
+
+static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+ unsigned int val;
+
+ val = readl(div_addr) >> divider->shift;
+ val &= div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value;
+ unsigned long flags = 0;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ value = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ /* Cap the value to max */
+ if (value > WZRD_DR_MAX_INT_DIV_VALUE)
+ value = WZRD_DR_MAX_INT_DIV_VALUE;
+
+ /* Set divisor and clear phase offset */
+ writel(value, div_addr);
+ writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0)
+ err = -ETIMEDOUT;
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u8 div;
+
+ /*
+ * since we donot change parent rate we just round rate to closest
+ * achievable
+ */
+ div = DIV_ROUND_CLOSEST(*prate, rate);
+
+ return (*prate / div);
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops = {
+ .round_rate = clk_wzrd_round_rate,
+ .set_rate = clk_wzrd_dynamic_reconfig,
+ .recalc_rate = clk_wzrd_recalc_rate,
+};
+
+static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned int val;
+ u32 div, frac;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ val = readl(div_addr);
+ div = val & div_mask(divider->width);
+ frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
+
+ return ((parent_rate * 1000) / ((div * 1000) + frac));
+}
+
+static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value, pre;
+ unsigned long flags = 0;
+ unsigned long rate_div, f, clockout0_div;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ rate_div = ((parent_rate * 1000) / rate);
+ clockout0_div = rate_div / 1000;
+
+ pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
+ f = (u32)(pre - (clockout0_div * 1000));
+ f = f & WZRD_CLKOUT_FRAC_MASK;
+
+ value = ((f << WZRD_CLKOUT_DIVIDE_WIDTH) | (clockout0_div &
+ WZRD_CLKOUT_DIVIDE_MASK));
+
+ /* Set divisor and clear phase offset */
+ writel(value, div_addr);
+ writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (!retries) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (!retries)
+ err = -ETIMEDOUT;
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return rate;
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
+ .round_rate = clk_wzrd_round_rate_f,
+ .set_rate = clk_wzrd_dynamic_reconfig_f,
+ .recalc_rate = clk_wzrd_recalc_ratef,
+};
+
+static struct clk *clk_wzrd_register_divf(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops_f;
+
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ return ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
+static struct clk *clk_wzrd_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
void *data)
{
@@ -131,7 +488,7 @@ static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
static int clk_wzrd_probe(struct platform_device *pdev)
{
int i, ret;
- u32 reg;
+ u32 reg, reg_f, mult;
unsigned long rate;
const char *clk_name;
struct clk_wzrd *clk_wzrd;
@@ -183,17 +540,13 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- /* we don't support fractional div/mul yet */
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_CLKFBOUT_FRAC_EN;
- reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
- WZRD_CLKOUT0_FRAC_EN;
- if (reg)
- dev_warn(&pdev->dev, "fractional div/mul not supported\n");
-
/* register multiplier */
reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
+ reg_f = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLKFBOUT_FRAC_MASK) >> WZRD_CLKFBOUT_FRAC_SHIFT;
+
+ mult = ((reg * 1000) + reg_f);
clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
if (!clk_name) {
ret = -ENOMEM;
@@ -202,7 +555,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
(&pdev->dev, clk_name,
__clk_get_name(clk_wzrd->clk_in1),
- 0, reg, 1);
+ 0, mult, 1000);
kfree(clk_name);
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
@@ -240,11 +593,24 @@ static int clk_wzrd_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_rm_int_clks;
}
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12);
- reg &= WZRD_CLKOUT_DIVIDE_MASK;
- reg >>= WZRD_CLKOUT_DIVIDE_SHIFT;
- clk_wzrd->clkout[i] = clk_register_fixed_factor
- (&pdev->dev, clkout_name, clk_name, 0, 1, reg);
+ if (!i)
+ clk_wzrd->clkout[i] = clk_wzrd_register_divf
+ (&pdev->dev, clkout_name,
+ clk_name, 0,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
+ else
+ clk_wzrd->clkout[i] = clk_wzrd_register_divider
+ (&pdev->dev, clkout_name,
+ clk_name, 0,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
if (IS_ERR(clk_wzrd->clkout[i])) {
int j;
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/drivers/staging/clocking-wizard/dt-binding.txt
index 723271e93316..0439af67930b 100644
--- a/drivers/staging/clocking-wizard/dt-binding.txt
+++ b/drivers/staging/clocking-wizard/dt-binding.txt
@@ -9,6 +9,7 @@ http://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-
Required properties:
- compatible: Must be 'xlnx,clocking-wizard'
+ - #clock-cells: Number of cells in a clock specifier. Should be 1
- reg: Base and size of the cores register space
- clocks: Handle to input clock
- clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
@@ -19,12 +20,13 @@ Optional properties:
Example:
clock-generator@40040000 {
+ #clock-cells = <1>;
reg = <0x40040000 0x1000>;
compatible = "xlnx,clocking-wizard";
speed-grade = <1>;
clock-names = "clk_in1", "s_axi_aclk";
clocks = <&clkc 15>, <&clkc 15>;
- clock-output-names = "clk_out0", "clk_out1", "clk_out2",
+ clock-output-names = "clk_out1", "clk_out2",
"clk_out3", "clk_out4", "clk_out5",
"clk_out6", "clk_out7";
};
diff --git a/drivers/staging/comedi/drivers/adv_pci1760.c b/drivers/staging/comedi/drivers/adv_pci1760.c
index f460f21efb90..0f6faf263c82 100644
--- a/drivers/staging/comedi/drivers/adv_pci1760.c
+++ b/drivers/staging/comedi/drivers/adv_pci1760.c
@@ -59,7 +59,7 @@
#define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */
#define PCI1760_CMD_SET_DO 0x01 /* Set output state */
#define PCI1760_CMD_GET_DO 0x02 /* Read output status */
-#define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */
+#define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */
#define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */
#define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */
#define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index cc4c18c3fb36..7d18ad68be26 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -2593,10 +2593,15 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
req->unaligned = false;
if (req->unaligned) {
- if (!ep->virt_buf)
+ if (!ep->virt_buf) {
ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
&ep->phys_buf,
GFP_ATOMIC | GFP_DMA);
+ if (!ep->virt_buf) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -ENOMEM;
+ }
+ }
if (ep->epnum > 0) {
if (ep->direct == USB_DIR_IN)
memcpy(ep->virt_buf, req->req.buf,
diff --git a/drivers/staging/fclk/Kconfig b/drivers/staging/fclk/Kconfig
new file mode 100644
index 000000000000..5f68261a206d
--- /dev/null
+++ b/drivers/staging/fclk/Kconfig
@@ -0,0 +1,9 @@
+#
+# Xilinx PL clk enabler
+#
+
+config XILINX_FCLK
+ tristate "Xilinx PL clock enabler"
+ depends on COMMON_CLK && OF
+ ---help---
+ Support for the Xilinx fclk clock enabler.
diff --git a/drivers/staging/fclk/Makefile b/drivers/staging/fclk/Makefile
new file mode 100644
index 000000000000..71723036c94e
--- /dev/null
+++ b/drivers/staging/fclk/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XILINX_FCLK) += xilinx_fclk.o
diff --git a/drivers/staging/fclk/TODO b/drivers/staging/fclk/TODO
new file mode 100644
index 000000000000..912325fe5f4d
--- /dev/null
+++ b/drivers/staging/fclk/TODO
@@ -0,0 +1,2 @@
+TODO:
+ - Remove this hack and clock adapt all the drivers.
diff --git a/drivers/staging/fclk/dt-binding.txt b/drivers/staging/fclk/dt-binding.txt
new file mode 100644
index 000000000000..23521608b4a8
--- /dev/null
+++ b/drivers/staging/fclk/dt-binding.txt
@@ -0,0 +1,16 @@
+Binding for Xilinx pl clocks
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+ - compatible: Must be 'xlnx,fclk'
+ - clocks: Handle to input clock
+
+Example:
+ fclk3: fclk3 {
+ status = "disabled";
+ compatible = "xlnx,fclk";
+ clocks = <&clkc 71>;
+ };
diff --git a/drivers/staging/fclk/xilinx_fclk.c b/drivers/staging/fclk/xilinx_fclk.c
new file mode 100644
index 000000000000..189928b8dd79
--- /dev/null
+++ b/drivers/staging/fclk/xilinx_fclk.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct fclk_state {
+ struct device *dev;
+ struct clk *pl;
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id fclk_of_match[] = {
+ { .compatible = "xlnx,fclk",},
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, fclk_of_match);
+
+static ssize_t set_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fclk_state *st = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(st->pl));
+}
+
+static ssize_t set_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+ unsigned long rate;
+ struct fclk_state *st = dev_get_drvdata(dev);
+
+ ret = kstrtoul(buf, 0, &rate);
+ if (ret)
+ return -EINVAL;
+
+ rate = clk_round_rate(st->pl, rate);
+ ret = clk_set_rate(st->pl, rate);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(set_rate);
+
+static const struct attribute *fclk_ctrl_attrs[] = {
+ &dev_attr_set_rate.attr,
+ NULL,
+};
+
+static const struct attribute_group fclk_ctrl_attr_grp = {
+ .attrs = (struct attribute **)fclk_ctrl_attrs,
+};
+
+static int fclk_probe(struct platform_device *pdev)
+{
+ struct fclk_state *st;
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->dev = dev;
+ platform_set_drvdata(pdev, st);
+
+ st->pl = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(st->pl))
+ return PTR_ERR(st->pl);
+
+ ret = clk_prepare_enable(st->pl);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ ret = sysfs_create_group(&dev->kobj, &fclk_ctrl_attr_grp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int fclk_remove(struct platform_device *pdev)
+{
+ struct fclk_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->pl);
+ return 0;
+}
+
+static struct platform_driver fclk_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fclk_of_match,
+ },
+ .probe = fclk_probe,
+ .remove = fclk_remove,
+};
+
+module_platform_driver(fclk_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_DESCRIPTION("fclk enable");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index d2672b65c3f4..e59bb27236b9 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -100,15 +100,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
static struct gb_channel *get_channel_from_mode(struct gb_light *light,
u32 mode)
{
- struct gb_channel *channel = NULL;
+ struct gb_channel *channel;
int i;
for (i = 0; i < light->channels_count; i++) {
channel = &light->channels[i];
- if (channel && channel->mode == mode)
- break;
+ if (channel->mode == mode)
+ return channel;
}
- return channel;
+ return NULL;
}
static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index af0bcf95ee8a..54388660021e 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -624,7 +624,7 @@ static void ad5933_work(struct work_struct *work)
struct ad5933_state, work.work);
struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
__be16 buf[2];
- int val[2];
+ u16 val[2];
unsigned char status;
int ret;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index ed404355ea4c..a00693c61870 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -101,7 +101,7 @@ struct ad2s1210_state {
static const int ad2s1210_mode_vals[4][2] = {
[MOD_POS] = { 0, 0 },
[MOD_VEL] = { 0, 1 },
- [MOD_CONFIG] = { 1, 0 },
+ [MOD_CONFIG] = { 1, 1 },
};
static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index e61bd8e1d246..b565c26ca72f 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -1584,8 +1584,10 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
commit |= SME_WEP_FLAG;
}
if (enc->key_len) {
- memcpy(&key->key_val[0], &enc->key[0], enc->key_len);
- key->key_len = enc->key_len;
+ int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX);
+
+ memcpy(&key->key_val[0], &enc->key[0], key_len);
+ key->key_len = key_len;
commit |= (SME_WEP_VAL1 << index);
}
break;
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index a15d970adb98..135e1e64c244 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -800,6 +800,7 @@ static int ipu_csc_scaler_release(struct file *file)
dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 8dd1396909d7..a242bbe23ba2 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -1074,6 +1074,7 @@ static int vdec_probe(struct platform_device *pdev)
err_vdev_release:
video_device_release(vdev);
+ v4l2_device_unregister(&core->v4l2_dev);
return ret;
}
@@ -1082,6 +1083,7 @@ static int vdec_remove(struct platform_device *pdev)
struct amvdec_core *core = platform_get_drvdata(pdev);
video_unregister_device(core->vdev_dec);
+ v4l2_device_unregister(&core->v4l2_dev);
return 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index e80e82a276e9..6ad448899779 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -323,6 +323,8 @@ static int cedrus_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ platform_set_drvdata(pdev, dev);
+
dev->vfd = cedrus_video_device;
dev->dev = &pdev->dev;
dev->pdev = pdev;
@@ -392,8 +394,6 @@ static int cedrus_probe(struct platform_device *pdev)
goto err_m2m_mc;
}
- platform_set_drvdata(pdev, dev);
-
return 0;
err_m2m_mc:
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 64c979155a49..774abedad987 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -47,7 +47,7 @@ MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a powe
static DEFINE_SPINLOCK(dim_lock);
static void dim2_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
+static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn);
/**
* struct hdm_channel - private structure to keep channel specific data
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index a28ade050e34..d84a4cb9dd67 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -41,7 +41,7 @@
#endif
static void cvm_oct_tx_do_cleanup(unsigned long arg);
-static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
+static DECLARE_TASKLET_OLD(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup);
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index bcbf0c8cd420..be377e75703b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -767,6 +767,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
else
netif_wake_queue(dev);
+ priv->bfirst_after_down = false;
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 20e494186c9e..458ecca00ba1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -185,7 +185,6 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev);
static void _rtl92e_dm_deinit_fsync(struct net_device *dev);
static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
-static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev);
static void _rtl92e_dm_check_fsync(struct net_device *dev);
static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
@@ -238,8 +237,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
if (priv->being_init_adapter)
return;
- _rtl92e_dm_check_ac_dc_power(dev);
-
_rtl92e_dm_check_txrateandretrycount(dev);
_rtl92e_dm_check_edca_turbo(dev);
@@ -257,30 +254,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
_rtl92e_dm_cts_to_self(dev);
}
-static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- static char const ac_dc_script[] = "/etc/acpi/wireless-rtl-ac-dc-power.sh";
- char *argv[] = {(char *)ac_dc_script, DRV_NAME, NULL};
- static char *envp[] = {"HOME=/",
- "TERM=linux",
- "PATH=/usr/bin:/bin",
- NULL};
-
- if (priv->ResetProgress == RESET_TYPE_SILENT) {
- RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),
- "GPIOChangeRFWorkItemCallBack(): Silent Reset!!!!!!!\n");
- return;
- }
-
- if (priv->rtllib->state != RTLLIB_LINKED)
- return;
- call_usermodehelper(ac_dc_script, argv, envp, UMH_WAIT_PROC);
-
- return;
-};
-
-
void rtl92e_init_adaptive_rate(struct net_device *dev)
{
@@ -1800,10 +1773,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
u8 tmp1byte;
enum rt_rf_power_state eRfPowerStateToSet;
bool bActuallySet = false;
- char *argv[3];
- static char const RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh";
- static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin",
- NULL};
bActuallySet = false;
@@ -1835,14 +1804,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
mdelay(1000);
priv->bHwRfOffAction = 1;
rtl92e_set_rf_state(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
- if (priv->bHwRadioOff)
- argv[1] = "RFOFF";
- else
- argv[1] = "RFON";
-
- argv[0] = (char *)RadioPowerPath;
- argv[2] = NULL;
- call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC);
}
}
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 83c30e2d82f5..a78f914082fe 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1490,9 +1490,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
hdrlen += 4;
}
- rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
ieee->stats.rx_packets++;
ieee->stats.rx_bytes += skb->len;
+ rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
return 1;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 00e34c392a38..d51f734aca26 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -943,9 +943,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
#endif
if (ieee->iw_mode == IW_MODE_MONITOR) {
+ unsigned int len = skb->len;
+
ieee80211_monitor_rx(ieee, skb, rx_stats);
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += len;
return 1;
}
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index ec33fb9122e9..57badc1e91e3 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1013,7 +1013,7 @@ typedef struct r8192_priv {
bool bis_any_nonbepkts;
bool bcurrent_turbo_EDCA;
bool bis_cur_rdlstate;
- struct timer_list fsync_timer;
+ struct delayed_work fsync_work;
bool bfsync_processing; /* 500ms Fsync timer is active or not */
u32 rate_record;
u32 rateCountDiffRecord;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index c23e43b095d9..30b272da36f5 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -2585,19 +2585,20 @@ static void dm_init_fsync(struct net_device *dev)
priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
priv->ieee80211->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */
- timer_setup(&priv->fsync_timer, dm_fsync_timer_callback, 0);
+ INIT_DELAYED_WORK(&priv->fsync_work, dm_fsync_work_callback);
}
static void dm_deInit_fsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- del_timer_sync(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
}
-void dm_fsync_timer_callback(struct timer_list *t)
+void dm_fsync_work_callback(struct work_struct *work)
{
- struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
+ struct r8192_priv *priv =
+ container_of(work, struct r8192_priv, fsync_work.work);
struct net_device *dev = priv->ieee80211->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
@@ -2664,17 +2665,16 @@ void dm_fsync_timer_callback(struct timer_list *t)
}
}
if (bDoubleTimeInterval) {
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv
+ ->ieee80211->fsync_time_interval *
+ priv->ieee80211->fsync_multiple_timeinterval));
} else {
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv
+ ->ieee80211->fsync_time_interval));
}
} else {
/* Let Register return to default value; */
@@ -2702,7 +2702,7 @@ static void dm_EndSWFsync(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
RT_TRACE(COMP_HALDM, "%s\n", __func__);
- del_timer_sync(&(priv->fsync_timer));
+ cancel_delayed_work_sync(&priv->fsync_work);
/* Let Register return to default value; */
if (priv->bswitch_fsync) {
@@ -2744,11 +2744,9 @@ static void dm_StartSWFsync(struct net_device *dev)
if (priv->ieee80211->fsync_rate_bitmap & rateBitmap)
priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex];
}
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv->ieee80211->fsync_time_interval));
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 0b2a1c688597..2159018b4e38 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -166,7 +166,7 @@ void dm_force_tx_fw_info(struct net_device *dev,
void dm_init_edca_turbo(struct net_device *dev);
void dm_rf_operation_test_callback(unsigned long data);
void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-void dm_fsync_timer_callback(struct timer_list *t);
+void dm_fsync_work_callback(struct work_struct *work);
void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
void dm_shadow_init(struct net_device *dev);
void dm_initialize_txpower_tracking(struct net_device *dev);
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 0c3ae8495afb..c0982c13ece7 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -323,6 +323,7 @@ int r8712_init_drv_sw(struct _adapter *padapter)
mp871xinit(padapter);
init_default_value(padapter);
r8712_InitSwLeds(padapter);
+ mutex_init(&padapter->mutex_start);
return ret;
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index ff3cb09c57a6..30e965c410ff 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
kfree(pdrvcmd->pbuf);
}
-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
pcmd_r = NULL;
switch (pcmd->cmdcode) {
- case GEN_CMD_CODE(_Read_MACREG):
- read_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
- case GEN_CMD_CODE(_Write_MACREG):
- write_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
case GEN_CMD_CODE(_Read_BBREG):
read_bbreg_hdl(padapter, (u8 *)pcmd);
break;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index f7c1258eaa39..fef9233cef42 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -570,7 +570,6 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
if (rtl871x_load_fw(padapter))
goto error;
spin_lock_init(&padapter->lock_rx_ff0_filter);
- mutex_init(&padapter->mutex_start);
return 0;
error:
usb_put_dev(udev);
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 8d93c2f26890..a82114de21a7 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -165,8 +165,6 @@ No irqsave is necessary.
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- int res = 0;
-
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
@@ -178,18 +176,16 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
- if (!pcmdpriv->cmd_allocated_buf) {
- res = -ENOMEM;
- goto exit;
- }
+ if (!pcmdpriv->cmd_allocated_buf)
+ return -ENOMEM;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
if (!pcmdpriv->rsp_allocated_buf) {
- res = -ENOMEM;
- goto exit;
+ kfree(pcmdpriv->cmd_allocated_buf);
+ return -ENOMEM;
}
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3);
@@ -197,8 +193,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_issued_cnt = pcmdpriv->cmd_done_cnt = pcmdpriv->rsp_cnt = 0;
mutex_init(&pcmdpriv->sctx_mutex);
-exit:
- return res;
+
+ return 0;
}
static void c2h_wk_callback(_workitem *work);
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 81ecfd1a200d..11c3b47b8a22 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1781,7 +1781,7 @@ static void speakup_con_update(struct vc_data *vc)
{
unsigned long flags;
- if (!speakup_console[vc->vc_num] || spk_parked)
+ if (!speakup_console[vc->vc_num] || spk_parked || !synth)
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 082302944c37..18284c427b7e 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -560,7 +560,7 @@ err_free_rd:
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD0Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -606,7 +606,7 @@ err_free_rd:
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD1Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -670,7 +670,7 @@ static int device_init_td0_ring(struct vnt_private *priv)
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD0Rings[i];
kfree(desc->td_info);
}
@@ -710,7 +710,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD1Rings[i];
kfree(desc->td_info);
}
diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c
index 221e3d93db14..22e02fd068b4 100644
--- a/drivers/staging/wilc1000/wilc_hif.c
+++ b/drivers/staging/wilc1000/wilc_hif.c
@@ -441,38 +441,49 @@ out:
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto)
{
- struct wilc_join_bss_param *param;
- struct ieee80211_p2p_noa_attr noa_attr;
- u8 rates_len = 0;
- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ struct ieee80211_p2p_noa_attr noa_attr;
+ const struct cfg80211_bss_ies *ies;
+ struct wilc_join_bss_param *param;
+ u8 rates_len = 0, ies_len;
int ret;
- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return NULL;
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
+ if (!ies_data) {
+ rcu_read_unlock();
+ kfree(param);
+ return NULL;
+ }
+ ies_len = ies->len;
+ rcu_read_unlock();
+
param->beacon_period = cpu_to_le16(bss->beacon_interval);
param->cap_info = cpu_to_le16(bss->capability);
param->bss_type = WILC_FW_BSS_TYPE_INFRA;
param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
ether_addr_copy(param->bssid, bss->bssid);
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
if (ssid_elm) {
if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
}
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
if (tim_elm && tim_elm[1] >= 2)
param->dtim_period = tim_elm[3];
memset(param->p_suites, 0xFF, 3);
memset(param->akm_suites, 0xFF, 3);
- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
if (rates_ie) {
rates_len = rates_ie[1];
if (rates_len > WILC_MAX_RATES_SUPPORTED)
@@ -483,7 +494,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
if (rates_len < WILC_MAX_RATES_SUPPORTED) {
supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
- ies->data, ies->len);
+ ies_data, ies_len);
if (supp_rates_ie) {
u8 ext_rates = supp_rates_ie[1];
@@ -498,11 +509,11 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
}
- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
if (ht_ie)
param->ht_capable = true;
- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
+ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
@@ -526,7 +537,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wmm_ie) {
struct ieee80211_wmm_param_ie *ie;
@@ -541,13 +552,13 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wpa_ie) {
param->mode_802_11i = 1;
param->rsn_found = true;
}
- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
if (rsn_ie) {
int offset = 8;
@@ -570,6 +581,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
}
+ kfree(ies_data);
return (void *)param;
}
diff --git a/drivers/staging/wilc1000/wilc_netdev.c b/drivers/staging/wilc1000/wilc_netdev.c
index 508acb8bb089..f34b1f0d3a80 100644
--- a/drivers/staging/wilc1000/wilc_netdev.c
+++ b/drivers/staging/wilc1000/wilc_netdev.c
@@ -717,14 +717,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb->dev != ndev) {
netdev_err(ndev, "Packet not destined to this device\n");
- return 0;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
if (!tx_data) {
dev_kfree_skb(skb);
netif_wake_queue(ndev);
- return 0;
+ return NETDEV_TX_OK;
}
tx_data->buff = skb->data;
@@ -748,7 +749,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
mutex_unlock(&wilc->vif_mutex);
}
- return 0;
+ return NETDEV_TX_OK;
}
static int wilc_mac_close(struct net_device *ndev)
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index c787c5da8f2b..22a30da011e1 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -20,6 +20,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_WILC, SDIO_DEVICE_ID_WILC) },
{ },
};
+MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids);
#define WILC_SDIO_BLOCK_SIZE 512
diff --git a/drivers/staging/xlnx_ctrl_driver/Kconfig b/drivers/staging/xlnx_ctrl_driver/Kconfig
new file mode 100644
index 000000000000..3bff5e6d1aca
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/Kconfig
@@ -0,0 +1,15 @@
+config XLNX_CTRL_FRMBUF
+ tristate "FB Control driver"
+ help
+ This driver is to support Xilinx Framebuffer read and write IP. This
+ driver is simple control plane driver which is controlled by ioctls
+ from userspace. It is free from any other media framework like V4l2 or
+ DRM hence, doesn't need to adhere to V4L2 or DRM.
+
+config XLNX_CTRL_VPSS
+ tristate "VPSS Control driver"
+ help
+ This driver is to support Xilinx VPSS IP. This driver is simple
+ control plane driver which is controlled by ioctls from userspace. It
+ is free from any media framework like V4l2 or DRM hence, doesn't need
+ to adhere to V4L2 or DRM.
diff --git a/drivers/staging/xlnx_ctrl_driver/MAINTAINERS b/drivers/staging/xlnx_ctrl_driver/MAINTAINERS
new file mode 100644
index 000000000000..bcfd70d359ec
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX CONTROL DRIVER
+M: Saurabh Sengar <saurabh.singh@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_ctrl_driver
diff --git a/drivers/staging/xlnx_ctrl_driver/Makefile b/drivers/staging/xlnx_ctrl_driver/Makefile
new file mode 100644
index 000000000000..312bd1f5d233
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_XLNX_CTRL_FRMBUF) += xlnx_frmb.o
+obj-$(CONFIG_XLNX_CTRL_VPSS) += xlnx_vpss.o
diff --git a/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c b/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c
new file mode 100644
index 000000000000..0b36575e493b
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx Framebuffer read control driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabh.singh@xilinx.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/xlnx_ctrl.h>
+
+/* TODO: clock framework */
+
+#define XFBWR_FB_CTRL 0x00
+#define XFBWR_FB_WIDTH 0x10
+#define XFBWR_FB_HEIGHT 0x18
+#define XFBWR_FB_STRIDE 0x20
+#define XFBWR_FB_COLOR 0x28
+#define XFBWR_FB_PLANE1 0x30
+#define XFBWR_FB_PLANE2 0x3C
+
+#define XFBWR_FB_CTRL_START BIT(0)
+#define XFBWR_FB_CTRL_IDLE BIT(2)
+#define XFBWR_FB_CTRL_RESTART BIT(7)
+#define XFBWR_FB_CTRL_OFF 0
+
+static u64 dma_mask = -1ULL;
+
+struct frmb_dmabuf_reg {
+ s32 dmabuf_fd;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *dbuf_sg_table;
+};
+
+/**
+ * struct frmb_struct - Xilinx framebuffer ctrl object
+ *
+ * @dev: device structure
+ * @db: framebuffer ctrl driver dmabuf structure
+ * @frmb_miscdev: The misc device registered
+ * @regs: Base address of framebuffer IP
+ * @is_fbrd: True for framebuffer Read else false
+ */
+struct frmb_struct {
+ struct device *dev;
+ struct frmb_dmabuf_reg db;
+ struct miscdevice frmb_miscdev;
+ void __iomem *regs;
+ bool is_fbrd;
+};
+
+struct frmb_data {
+ u32 fd;
+ u32 height;
+ u32 width;
+ u32 stride;
+ u32 color;
+ u32 n_planes;
+ u32 offset;
+};
+
+struct match_struct {
+ char name[8];
+ bool is_read;
+};
+
+static const struct match_struct read_struct = {
+ .name = "fbrd",
+ .is_read = true,
+};
+
+static const struct match_struct write_struct = {
+ .name = "fbwr",
+ .is_read = false,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id frmb_of_match[] = {
+ { .compatible = "xlnx,ctrl-fbwr-1.0", .data = &write_struct},
+ { .compatible = "xlnx,ctrl-fbrd-1.0", .data = &read_struct},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, frmb_of_match);
+
+static inline struct frmb_struct *to_frmb_struct(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct frmb_struct, frmb_miscdev);
+}
+
+static inline u32 frmb_ior(void __iomem *lp, off_t offset)
+{
+ return readl(lp + offset);
+}
+
+static inline void frmb_iow(void __iomem *lp, off_t offset, u32 value)
+{
+ writel(value, (lp + offset));
+}
+
+phys_addr_t frmb_add_dmabuf(u32 fd, struct frmb_struct *frmb_g)
+{
+ frmb_g->db.dbuf = dma_buf_get(fd);
+ frmb_g->db.dbuf_attach = dma_buf_attach(frmb_g->db.dbuf, frmb_g->dev);
+ if (IS_ERR(frmb_g->db.dbuf_attach)) {
+ dma_buf_put(frmb_g->db.dbuf);
+ dev_err(frmb_g->dev, "Failed DMA-BUF attach\n");
+ return -EINVAL;
+ }
+
+ frmb_g->db.dbuf_sg_table = dma_buf_map_attachment(frmb_g->db.dbuf_attach
+ , DMA_BIDIRECTIONAL);
+
+ if (!frmb_g->db.dbuf_sg_table) {
+ dev_err(frmb_g->dev, "Failed DMA-BUF map_attachment\n");
+ dma_buf_detach(frmb_g->db.dbuf, frmb_g->db.dbuf_attach);
+ dma_buf_put(frmb_g->db.dbuf);
+ return -EINVAL;
+ }
+
+ return (u32)sg_dma_address(frmb_g->db.dbuf_sg_table->sgl);
+}
+
+static void xlnk_clear_dmabuf(struct frmb_struct *frmb_g)
+{
+ dma_buf_unmap_attachment(frmb_g->db.dbuf_attach,
+ frmb_g->db.dbuf_sg_table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(frmb_g->db.dbuf, frmb_g->db.dbuf_attach);
+ dma_buf_put(frmb_g->db.dbuf);
+}
+
+static long frmb_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct frmb_data data;
+ phys_addr_t phys_y = 0, phys_uv = 0;
+ struct frmb_struct *frmb_g = to_frmb_struct(file);
+
+ switch (cmd) {
+ case XSET_FB_POLL:
+ retval = frmb_ior(frmb_g->regs, XFBWR_FB_CTRL);
+ if (retval == XFBWR_FB_CTRL_IDLE)
+ retval = 0;
+ else
+ retval = 1;
+ break;
+ case XSET_FB_ENABLE_SNGL:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_START);
+ break;
+ case XSET_FB_ENABLE:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_START);
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL,
+ XFBWR_FB_CTRL_RESTART | XFBWR_FB_CTRL_START);
+ break;
+ case XSET_FB_DISABLE:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_OFF);
+ break;
+ case XSET_FB_CONFIGURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ break;
+ }
+ frmb_iow(frmb_g->regs, XFBWR_FB_WIDTH, data.width);
+ frmb_iow(frmb_g->regs, XFBWR_FB_HEIGHT, data.height);
+ frmb_iow(frmb_g->regs, XFBWR_FB_STRIDE, data.stride);
+ frmb_iow(frmb_g->regs, XFBWR_FB_COLOR, data.color);
+ break;
+ case XSET_FB_CAPTURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ break;
+ }
+ phys_y = frmb_add_dmabuf(data.fd, frmb_g);
+ frmb_iow(frmb_g->regs, XFBWR_FB_PLANE1, phys_y);
+ if (data.n_planes == 2) {
+ phys_uv = phys_y + data.offset;
+ frmb_iow(frmb_g->regs, XFBWR_FB_PLANE2, phys_uv);
+ }
+ break;
+ case XSET_FB_RELEASE:
+ xlnk_clear_dmabuf(frmb_g);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static const struct file_operations frmb_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = frmb_ioctl,
+};
+
+static int frmb_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+ struct resource *res_frmb;
+ const struct of_device_id *match;
+ struct frmb_struct *frmb_g;
+ struct gpio_desc *reset_gpio;
+ const struct match_struct *config;
+
+ pdev->dev.dma_mask = &dma_mask;
+ pdev->dev.coherent_dma_mask = dma_mask;
+
+ frmb_g = devm_kzalloc(&pdev->dev, sizeof(*frmb_g), GFP_KERNEL);
+ if (!frmb_g)
+ return -ENOMEM;
+
+ reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio)) {
+ ret = PTR_ERR(reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "No gpio probed, Deferring...\n");
+ else
+ dev_err(&pdev->dev, "No reset gpio info from dts\n");
+ return ret;
+ }
+ gpiod_set_value_cansleep(reset_gpio, 0);
+
+ platform_set_drvdata(pdev, frmb_g);
+ frmb_g->dev = &pdev->dev;
+ res_frmb = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ frmb_g->regs = devm_ioremap_resource(&pdev->dev, res_frmb);
+ if (IS_ERR(frmb_g->regs))
+ return PTR_ERR(frmb_g->regs);
+
+ match = of_match_node(frmb_of_match, node);
+ if (!match)
+ return -ENODEV;
+
+ config = match->data;
+ frmb_g->frmb_miscdev.name = config->name;
+ frmb_g->is_fbrd = config->is_read;
+
+ frmb_g->frmb_miscdev.minor = MISC_DYNAMIC_MINOR;
+ frmb_g->frmb_miscdev.fops = &frmb_fops;
+ frmb_g->frmb_miscdev.parent = NULL;
+ ret = misc_register(&frmb_g->frmb_miscdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "FrameBuffer control driver registration failed!\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "FrameBuffer control driver success!\n");
+
+ return ret;
+}
+
+static int frmb_remove(struct platform_device *pdev)
+{
+ struct frmb_struct *frmb_g = platform_get_drvdata(pdev);
+
+ misc_deregister(&frmb_g->frmb_miscdev);
+ return 0;
+}
+
+static struct platform_driver frmb_driver = {
+ .probe = frmb_probe,
+ .remove = frmb_remove,
+ .driver = {
+ .name = "xlnx_ctrl-frmb",
+ .of_match_table = frmb_of_match,
+ },
+};
+
+module_platform_driver(frmb_driver);
+
+MODULE_DESCRIPTION("Xilinx Framebuffer control driver");
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c b/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c
new file mode 100644
index 000000000000..017ad0a4cffd
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx VPSS control driver.
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabh.singh@xilinx.com>
+ */
+
+/* TODO: clock framework */
+
+#include <linux/fs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/xlnx_ctrl.h>
+
+/* VPSS block offset */
+#define XHSCALER_OFFSET 0
+#define XSAXIS_RST_OFFSET 0x10000
+#define XVSCALER_OFFSET 0x20000
+
+#define XVPSS_GPIO_CHAN 8
+
+#define XVPSS_MAX_WIDTH 3840
+#define XVPSS_MAX_HEIGHT 2160
+
+#define XVPSS_STEPPREC 65536
+
+/* Video IP PPC */
+#define XVPSS_PPC_1 1
+#define XVPSS_PPC_2 2
+
+#define XVPSS_MAX_TAPS 12
+#define XVPSS_PHASES 64
+#define XVPSS_TAPS_6 6
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVPSS_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVPSS_MASK_LOW_32BITS GENMASK(31, 0)
+#define XVPSS_STEP_PRECISION_SHIFT (16)
+#define XVPSS_PHASE_SHIFT_BY_6 (6)
+#define XVPSS_PHASE_MULTIPLIER (9)
+#define XVPSS_BITSHIFT_16 (16)
+
+/* VPSS AP Control Registers */
+#define XVPSS_START BIT(0)
+#define XVPSS_RESTART BIT(7)
+#define XVPSS_STREAM_ON (XVPSS_START | XVPSS_RESTART)
+
+/* H-scaler registers */
+#define XVPSS_H_AP_CTRL (0x0000)
+#define XVPSS_H_GIE (0x0004)
+#define XVPSS_H_IER (0x0008)
+#define XVPSS_H_ISR (0x000c)
+#define XVPSS_H_HEIGHT (0x0010)
+#define XVPSS_H_WIDTHIN (0x0018)
+#define XVPSS_H_WIDTHOUT (0x0020)
+#define XVPSS_H_COLOR (0x0028)
+#define XVPSS_H_PIXELRATE (0x0030)
+#define XVPSS_H_COLOROUT (0X0038)
+#define XVPSS_H_HFLTCOEFF_BASE (0x0800)
+#define XVPSS_H_HFLTCOEFF_HIGH (0x0bff)
+#define XVPSS_H_PHASESH_V_BASE (0x2000)
+#define XVPSS_H_PHASESH_V_HIGH (0x3fff)
+
+/* H-scaler masks */
+#define XVPSS_PHASESH_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XVPSS_V_AP_CTRL (0x000)
+#define XVPSS_V_GIE (0x004)
+#define XVPSS_V_IER (0x008)
+#define XVPSS_V_ISR (0x00c)
+#define XVPSS_V_HEIGHTIN (0x010)
+#define XVPSS_V_WIDTH (0x018)
+#define XVPSS_V_HEIGHTOUT (0x020)
+#define XVPSS_V_LINERATE (0x028)
+#define XVPSS_V_COLOR (0x030)
+#define XVPSS_V_VFLTCOEFF_BASE (0x800)
+#define XVPSS_V_VFLTCOEFF_HIGH (0xbff)
+
+#define XVPSS_GPIO_RST_SEL 1
+#define XVPSS_GPIO_VIDEO_IN BIT(0)
+#define XVPSS_RST_IP_AXIS BIT(1)
+#define XVPSS_GPIO_MASK_ALL (XVPSS_GPIO_VIDEO_IN | XVPSS_RST_IP_AXIS)
+
+enum xvpss_color {
+ XVPSS_YUV_RGB,
+ XVPSS_YUV_444,
+ XVPSS_YUV_422,
+ XVPSS_YUV_420,
+};
+
+/* VPSS coefficients for 6 tap filters */
+static const u16
+xvpss_coeff_taps6[XVPSS_PHASES][XVPSS_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+/**
+ * struct xvpss_struct - Xilinx VPSS ctrl object
+ *
+ * @dev: device structure
+ * @xvpss_miscdev: The misc device registered
+ * @regs: Base address of VPSS
+ * @n_taps: number of horizontal/vertical taps
+ * @ppc: Pixels per Clock cycle the IP operates upon
+ * @is_polyphase: True for polypshase else false
+ * @vpss_coeff: The complete array of H-scaler/V-scaler coefficients
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @reset_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ */
+struct xvpss_struct {
+ struct device *dev;
+ struct miscdevice xvpss_miscdev;
+ void __iomem *regs;
+ int n_taps;
+ int ppc;
+ bool is_polyphase;
+ short vpss_coeff[XVPSS_PHASES][XVPSS_MAX_TAPS];
+ u32 H_phases[XVPSS_MAX_WIDTH];
+ struct gpio_desc *reset_gpio;
+};
+
+struct xvpss_data {
+ u32 height_in;
+ u32 width_in;
+ u32 height_out;
+ u32 width_out;
+ u32 color_in;
+ u32 color_out;
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id xvpss_of_match[] = {
+ { .compatible = "xlnx,ctrl-xvpss-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xvpss_of_match);
+
+static inline struct xvpss_struct *to_xvpss_struct(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct xvpss_struct, xvpss_miscdev);
+}
+
+static inline u32 xvpss_ior(void __iomem *lp, off_t offset)
+{
+ return readl(lp + offset);
+}
+
+static inline void xvpss_iow(void __iomem *lp, off_t offset, u32 value)
+{
+ writel(value, (lp + offset));
+}
+
+static inline void xvpss_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ xvpss_iow(base, offset, xvpss_ior(base, offset) & ~clr);
+}
+
+static inline void xvpss_set(void __iomem *base, u32 offset, u32 set)
+{
+ xvpss_iow(base, offset, xvpss_ior(base, offset) | set);
+}
+
+static inline void xvpss_disable_block(struct xvpss_struct *xvpss_g,
+ u32 channel, u32 ip_block)
+{
+ xvpss_clr(xvpss_g->regs, ((channel - 1) * XVPSS_GPIO_CHAN) +
+ XSAXIS_RST_OFFSET, ip_block);
+}
+
+static inline void
+xvpss_enable_block(struct xvpss_struct *xvpss_g, u32 channel, u32 ip_block)
+{
+ xvpss_set(xvpss_g->regs, ((channel - 1) * XVPSS_GPIO_CHAN) +
+ XSAXIS_RST_OFFSET, ip_block);
+}
+
+static void xvpss_reset(struct xvpss_struct *xvpss_g)
+{
+ xvpss_disable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_GPIO_MASK_ALL);
+ xvpss_enable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_RST_IP_AXIS);
+}
+
+static void xvpss_enable(struct xvpss_struct *xvpss_g)
+{
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET +
+ XVPSS_H_AP_CTRL, XVPSS_STREAM_ON);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET +
+ XVPSS_V_AP_CTRL, XVPSS_STREAM_ON);
+ xvpss_enable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_RST_IP_AXIS);
+}
+
+static void xvpss_disable(struct xvpss_struct *xvpss_g)
+{
+ xvpss_disable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_GPIO_MASK_ALL);
+}
+
+static void xvpss_set_input(struct xvpss_struct *xvpss_g,
+ u32 width, u32 height, u32 color)
+{
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_HEIGHTIN, height);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_WIDTH, width);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_WIDTHIN, width);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_COLOR, color);
+}
+
+static void xvpss_set_output(struct xvpss_struct *xvpss_g, u32 width,
+ u32 height, u32 color)
+{
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_HEIGHTOUT, height);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_HEIGHT, height);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_WIDTHOUT, width);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_COLOROUT, color);
+}
+
+static void xvpss_load_ext_coeff(struct xvpss_struct *xvpss_g,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XVPSS_MAX_TAPS - ntaps;
+ offset = pad >> 1;
+ /* Load coefficients into vpss coefficient table */
+ for (i = 0; i < XVPSS_PHASES; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xvpss_g->vpss_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+ if (pad) {
+ for (i = 0; i < XVPSS_PHASES; i++) {
+ for (j = 0; j < offset; j++)
+ xvpss_g->vpss_coeff[i][j] = 0;
+ j = ntaps + offset;
+ for (; j < XVPSS_MAX_TAPS; j++)
+ xvpss_g->vpss_coeff[i][j] = 0;
+ }
+ }
+}
+
+static void xvpss_select_coeff(struct xvpss_struct *xvpss_g)
+{
+ const short *coeff;
+ u32 ntaps;
+
+ coeff = &xvpss_coeff_taps6[0][0];
+ ntaps = XVPSS_TAPS_6;
+
+ xvpss_load_ext_coeff(xvpss_g, coeff, ntaps);
+}
+
+static void xvpss_set_coeff(struct xvpss_struct *xvpss_g)
+{
+ u32 nphases = XVPSS_PHASES;
+ u32 ntaps = xvpss_g->n_taps;
+ int val, i, j, offset, rd_indx;
+ u32 v_addr, h_addr;
+
+ offset = (XVPSS_MAX_TAPS - ntaps) / 2;
+ v_addr = XVSCALER_OFFSET + XVPSS_V_VFLTCOEFF_BASE;
+ h_addr = XHSCALER_OFFSET + XVPSS_H_HFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xvpss_g->vpss_coeff[i][rd_indx + 1] <<
+ XVPSS_BITSHIFT_16) | (xvpss_g->vpss_coeff[i][rd_indx] &
+ XVPSS_MASK_LOW_16BITS);
+ xvpss_iow(xvpss_g->regs, v_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ xvpss_iow(xvpss_g->regs, h_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void xvpss_h_calculate_phases(struct xvpss_struct *xvpss_g,
+ u32 width_in, u32 width_out,
+ u32 pixel_rate)
+{
+ unsigned int loop_width, x, s, nphases = XVPSS_PHASES;
+ unsigned int nppc = xvpss_g->ppc;
+ unsigned int shift = XVPSS_STEP_PRECISION_SHIFT - ilog2(nphases);
+ int offset = 0, xwrite_pos = 0, nr_rds, nr_rds_clck;
+ bool output_write_en, get_new_pix;
+ u64 phaseH;
+ u32 array_idx = 0;
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ memset(xvpss_g->H_phases, 0, sizeof(xvpss_g->H_phases));
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XVPSS_STEP_PRECISION_SHIFT) != 0) {
+ get_new_pix = true;
+ offset -= (1 << XVPSS_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XVPSS_STEP_PRECISION_SHIFT) == 0) &&
+ xwrite_pos < width_out) {
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ xvpss_g->H_phases[x] |= (phaseH <<
+ (s * XVPSS_PHASE_MULTIPLIER));
+ xvpss_g->H_phases[x] |= (array_idx <<
+ (XVPSS_PHASE_SHIFT_BY_6 +
+ (s * XVPSS_PHASE_MULTIPLIER)));
+ if (output_write_en) {
+ xvpss_g->H_phases[x] |= (XVPSS_PHASESH_WR_EN <<
+ (s * XVPSS_PHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+static void xvpss_h_set_phases(struct xvpss_struct *xvpss_g)
+{
+ u32 loop_width, index, val, offset, i, lsb, msb;
+
+ loop_width = XVPSS_MAX_WIDTH / xvpss_g->ppc;
+ offset = XHSCALER_OFFSET + XVPSS_H_PHASESH_V_BASE;
+
+ switch (xvpss_g->ppc) {
+ case XVPSS_PPC_1:
+ index = 0;
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = xvpss_g->H_phases[i] & XVPSS_MASK_LOW_16BITS;
+ msb = xvpss_g->H_phases[i + 1] & XVPSS_MASK_LOW_16BITS;
+ val = (msb << 16 | lsb);
+ xvpss_iow(xvpss_g->regs, offset +
+ (index * 4), val);
+ ++index;
+ }
+ return;
+ case XVPSS_PPC_2:
+ for (i = 0; i < loop_width; i++) {
+ val = (xvpss_g->H_phases[i] & XVPSS_MASK_LOW_32BITS);
+ xvpss_iow(xvpss_g->regs, offset + (i * 4), val);
+ }
+ return;
+ }
+}
+
+static void xvpss_algo_config(struct xvpss_struct *xvpss_g,
+ struct xvpss_data data)
+{
+ u32 pxl_rate, line_rate;
+ u32 width_in = data.width_in;
+ u32 width_out = data.width_out;
+ u32 height_in = data.height_in;
+ u32 height_out = data.height_out;
+
+ line_rate = (height_in * XVPSS_STEPPREC) / height_out;
+
+ if (xvpss_g->is_polyphase) {
+ xvpss_select_coeff(xvpss_g);
+ xvpss_set_coeff(xvpss_g);
+ }
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_LINERATE, line_rate);
+ pxl_rate = (width_in * XVPSS_STEPPREC) / width_out;
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_PIXELRATE, pxl_rate);
+
+ xvpss_h_calculate_phases(xvpss_g, width_in, width_out, pxl_rate);
+ xvpss_h_set_phases(xvpss_g);
+}
+
+static long xvpss_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct xvpss_data data;
+ struct xvpss_struct *xvpss_g = to_xvpss_struct(file);
+ u32 hcol;
+
+ switch (cmd) {
+ case XVPSS_SET_CONFIGURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ xvpss_reset(xvpss_g);
+ xvpss_set_input(xvpss_g, data.width_in, data.height_in,
+ data.color_in);
+ hcol = data.color_in;
+ if (hcol == XVPSS_YUV_420)
+ hcol = XVPSS_YUV_422;
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_COLOR, hcol);
+ xvpss_set_output(xvpss_g, data.width_out, data.height_out,
+ data.color_out);
+ xvpss_algo_config(xvpss_g, data);
+ break;
+ case XVPSS_SET_ENABLE:
+ xvpss_enable(xvpss_g);
+ break;
+ case XVPSS_SET_DISABLE:
+ xvpss_disable(xvpss_g);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+end:
+ return retval;
+}
+
+static const struct file_operations xvpss_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = xvpss_ioctl,
+};
+
+static int xvpss_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+ struct xvpss_struct *xvpss_g;
+ struct device_node *node;
+
+ xvpss_g = devm_kzalloc(&pdev->dev, sizeof(*xvpss_g), GFP_KERNEL);
+ if (!xvpss_g)
+ return -ENOMEM;
+
+ xvpss_g->reset_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvpss_g->reset_gpio)) {
+ ret = PTR_ERR(xvpss_g->reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "No gpio probed, Deferring...\n");
+ else
+ dev_err(&pdev->dev, "No reset gpio info from dts\n");
+ return ret;
+ }
+ gpiod_set_value_cansleep(xvpss_g->reset_gpio, 0);
+
+ platform_set_drvdata(pdev, &xvpss_g);
+ xvpss_g->dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xvpss_g->regs = devm_ioremap_resource(xvpss_g->dev, res);
+ if (IS_ERR(xvpss_g->regs))
+ return PTR_ERR(xvpss_g->regs);
+
+ node = pdev->dev.of_node;
+ ret = of_property_read_u32(node, "xlnx,vpss-taps", &xvpss_g->n_taps);
+ if (ret < 0) {
+ dev_err(xvpss_g->dev, "taps not present in DT\n");
+ return ret;
+ }
+
+ switch (xvpss_g->n_taps) {
+ case 2:
+ case 4:
+ break;
+ case 6:
+ xvpss_g->is_polyphase = true;
+ break;
+ default:
+ dev_err(xvpss_g->dev, "taps value not supported\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,vpss-ppc", &xvpss_g->ppc);
+ if (ret < 0) {
+ dev_err(xvpss_g->dev, "PPC is missing in DT\n");
+ return ret;
+ }
+ if (xvpss_g->ppc != XVPSS_PPC_1 && xvpss_g->ppc != XVPSS_PPC_2) {
+ dev_err(xvpss_g->dev, "Unsupported ppc: %d", xvpss_g->ppc);
+ return -EINVAL;
+ }
+
+ xvpss_g->xvpss_miscdev.minor = MISC_DYNAMIC_MINOR;
+ xvpss_g->xvpss_miscdev.name = "xvpss";
+ xvpss_g->xvpss_miscdev.fops = &xvpss_fops;
+ ret = misc_register(&xvpss_g->xvpss_miscdev);
+ if (ret < 0) {
+ pr_err("Xilinx VPSS registration failed!\n");
+ return ret;
+ }
+
+ dev_info(xvpss_g->dev, "Xlnx VPSS control driver initialized!\n");
+
+ return ret;
+}
+
+static int xvpss_remove(struct platform_device *pdev)
+{
+ struct xvpss_struct *xvpss_g = platform_get_drvdata(pdev);
+
+ misc_deregister(&xvpss_g->xvpss_miscdev);
+ return 0;
+}
+
+static struct platform_driver xvpss_driver = {
+ .probe = xvpss_probe,
+ .remove = xvpss_remove,
+ .driver = {
+ .name = "xlnx_vpss",
+ .of_match_table = xvpss_of_match,
+ },
+};
+
+module_platform_driver(xvpss_driver);
+
+MODULE_DESCRIPTION("Xilinx VPSS control driver");
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnx_ernic/Kconfig b/drivers/staging/xlnx_ernic/Kconfig
new file mode 100644
index 000000000000..2d83fea0f3b9
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/Kconfig
@@ -0,0 +1,4 @@
+config ERNIC
+ tristate "Xilinx ERNIC driver"
+ help
+ Driver for the XILINX Embedded Remote DMA(RDMA) Enabled NIC.
diff --git a/drivers/staging/xlnx_ernic/MAINTAINERS b/drivers/staging/xlnx_ernic/MAINTAINERS
new file mode 100644
index 000000000000..0355f5d3320f
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX EMBEDDED REMOTE DMA ENABLED NIC
+M: Sandeep Dhanvada <sandeep.dhanvada@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_ernic
diff --git a/drivers/staging/xlnx_ernic/Makefile b/drivers/staging/xlnx_ernic/Makefile
new file mode 100644
index 000000000000..564933fa42d7
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/Makefile
@@ -0,0 +1,7 @@
+#TODO: Need to remove these flags and fix compilation warnings.
+ccflags-y := -Wno-incompatible-pointer-types -Wno-packed-bitfield-compat
+
+obj-m += xernic.o
+obj-m += xernic_bw_test.o
+
+xernic-objs := xmain.o xcm.o xqp.o xmr.o
diff --git a/drivers/staging/xlnx_ernic/dt-binding.txt b/drivers/staging/xlnx_ernic/dt-binding.txt
new file mode 100644
index 000000000000..2a9d098125b7
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/dt-binding.txt
@@ -0,0 +1,29 @@
+Xilinx Embedded RDMA NIC (ERNIC)
+--------------------------------
+
+The Xilinx Embedded Remote DMA(RDMA) NIC is an implementation of
+RDMA over Converged Ethernet (RoCEv2) enabled NIC functionality.
+
+Supported features by ERNIC are:
+1. both IPv4 and IPv6.
+2. 100 Gb/s data path.
+3. Incoming and outgoing RDMA READ, RDMA WRITE and RDMA SEND.
+
+Required properties:
+- compatible : Must contain "xlnx,ernic-1.0".
+- interrupts: Contains the interrupt line numbers.
+- reg: Physical base address and length of the registers set for the device.
+
+ernic_0: ernic@84000000 {
+ compatible = "xlnx,ernic-1.0";
+ interrupts = <4 2
+ 5 2
+ 6 2
+ 7 2
+ 8 2
+ 9 2
+ 10 2
+ 11 2
+ 12 2>;
+ reg = <0x84000000 0x40000>;
+};
diff --git a/drivers/staging/xlnx_ernic/xcm.c b/drivers/staging/xlnx_ernic/xcm.c
new file mode 100644
index 000000000000..64d102e540b4
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcm.c
@@ -0,0 +1,1962 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+
+unsigned int psn_num;
+unsigned int mad_tid = 0x11223344;
+/*****************************************************************************/
+
+/**
+ * xrnic_cm_prepare_mra() - Prepares Message Receipt Acknowledgment packet
+ * @qp_attr: qp info for which mra packet is prepared
+ * @msg : message being MRAed. 0x0- REQ, 0x1-REP, 0x2-LAP
+ * @rq_buf: Buffer to store the message
+ */
+static void xrnic_cm_prepare_mra(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_msg_mra msg, void *rq_buf)
+{
+ struct mra *mra;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ mra = (struct mra *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(MSG_RSP_ACK);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ mra = (struct mra *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(MSG_RSP_ACK);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ mra->local_cm_id = qp_attr->local_cm_id;
+ mra->remote_comm_id = qp_attr->remote_cm_id;
+ pr_info("[%d %s] remote_comm_id 0%x\n", __LINE__, __func__,
+ mra->remote_comm_id);
+ mra->message_mraed = msg;
+ mra->service_timeout = XRNIC_MRA_SERVICE_TIMEOUT;
+ /*4.096 ìS*2 Service Timeout*/
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_rep() - Prepares Reply packet
+ * @qp_attr: qp info for which reply packet is prepared
+ * @rq_buf: Buffer to store the data indicating the acceptance
+ */
+static void xrnic_cm_prepare_rep(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct rdma_qp_attr *rdma_qp_attr = (struct rdma_qp_attr *)
+ &((struct xrnic_reg_map *)xrnic_dev->xrnic_mmap.xrnic_regs)
+ ->rdma_qp_attr[qp_attr->qp_num - 2];
+ struct ethhdr_t *eth_hdr;
+ struct ipv4hdr *ipv4 = NULL;
+ struct ipv6hdr *ipv6 = NULL;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4 = NULL;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6 = NULL;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+ struct rep *rep;
+ struct req *req;
+ unsigned short temp;
+ unsigned char rq_opcode;
+ unsigned int config_value, start_psn_value;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ rep = (struct rep *)&send_sgl_temp_ipv4->mad.data;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv4);
+ ipv4 = (struct ipv4hdr *)
+ ((char *)recv_qp_pkt_ipv4 + XRNIC_ETH_HLEN);
+ req = (struct req *)&recv_qp_pkt_ipv4->mad.data;
+ temp = htons(CONNECT_REPLY);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ rep = (struct rep *)&send_sgl_temp_ipv6->mad.data;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv6);
+ ipv6 = (struct ipv6hdr *)
+ ((char *)recv_qp_pkt_ipv6 + XRNIC_ETH_HLEN);
+ req = (struct req *)&recv_qp_pkt_ipv6->mad.data;
+ temp = htons(CONNECT_REPLY);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ DEBUG_LOG("qp_num:%x\n", qp_attr->qp_num);
+
+ rep->local_cm_id = qp_attr->local_cm_id;
+ rep->remote_comm_id = qp_attr->remote_cm_id;
+
+ rep->local_qpn = ((qp_attr->qp_num >> 16) & 0xFF) |
+ (((qp_attr->qp_num >> 8) & 0xFF) << 8) |
+ ((qp_attr->qp_num & 0xFF) << 16);
+ DEBUG_LOG("local_qpn %d qp_num %d\n",
+ rep->local_qpn, qp_attr->qp_num);
+
+ memcpy((void *)rep->private_data,
+ (void *)&cm_id->conn_param.private_data,
+ cm_id->conn_param.private_data_len);
+
+ DEBUG_LOG("cm_id->conn_param.private_data_len %d\n",
+ cm_id->conn_param.private_data_len);
+ DEBUG_LOG("cm_id->conn_param.responder_resources %d\n",
+ cm_id->conn_param.responder_resources);
+ DEBUG_LOG("cm_id->conn_param.initiator_depth %d\n",
+ cm_id->conn_param.initiator_depth);
+ DEBUG_LOG("cm_id->conn_param.flow_control %d\n",
+ cm_id->conn_param.flow_control);
+ DEBUG_LOG("cm_id->conn_param.retry_count %d\n",
+ cm_id->conn_param.retry_count);
+ DEBUG_LOG("cm_id->conn_param.rnr_retry_count %d\n",
+ cm_id->conn_param.rnr_retry_count);
+
+ /*Inititator depth not rquired for Target.*/
+ rep->initiator_depth = cm_id->conn_param.initiator_depth;
+ rep->responder_resources = cm_id->conn_param.responder_resources;
+ rep->end_end_flow_control = cm_id->conn_param.flow_control;
+ rep->rnr_retry_count = cm_id->conn_param.rnr_retry_count;
+ rep->target_ack_delay = XRNIC_REP_TARGET_ACK_DELAY;
+ rep->fail_over_accepted = XRNIC_REP_FAIL_OVER_ACCEPTED;
+
+ DEBUG_LOG("req->initiator_depth %x\n", rep->initiator_depth);
+ DEBUG_LOG("rep->responder_resources %x\n", rep->responder_resources);
+
+ rep->sqr = XRNIC_REQ_SRQ;
+ rep->local_ca_guid[0] = 0x7c;
+ rep->local_ca_guid[1] = 0xfe;
+ rep->local_ca_guid[2] = 0x90;
+ rep->local_ca_guid[3] = 0x03;
+ rep->local_ca_guid[4] = 0x00;
+ rep->local_ca_guid[5] = 0xb8;
+ rep->local_ca_guid[6] = 0x57;
+ rep->local_ca_guid[7] = 0x70;
+
+ qp_attr->remote_qpn = req->local_qpn;
+
+ DEBUG_LOG("local_qpn [0x%x] [%d]\n", req->local_qpn,
+ ntohl(req->local_qpn));
+ config_value = ((req->local_qpn & 0xFF) << 16)
+ | (((req->local_qpn >> 8) & 0xFF) << 8)
+ | ((req->local_qpn >> 16) & 0xFF);
+
+ pr_info("config_value:%d req->local_qpn %d qp_attr->remote_qpn %d\n",
+ config_value, req->local_qpn, qp_attr->remote_qpn);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->dest_qp_conf)));
+
+ /* Set the MAC address */
+ config_value = eth_hdr->h_source[5] | (eth_hdr->h_source[4] << 8) |
+ (eth_hdr->h_source[3] << 16) |
+ (eth_hdr->h_source[2] << 24);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->mac_dest_addr_lsb)));
+ DEBUG_LOG("mac_xrnic_src_addr_lsb->0x%x\n", config_value);
+
+ config_value = eth_hdr->h_source[1] | (eth_hdr->h_source[0] << 8);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->mac_dest_addr_msb)));
+ DEBUG_LOG("mac_xrnic_src_addr_msb->0x%x\n", config_value);
+
+ config_value = 0;
+ DEBUG_LOG("req->start_psn:%x %x %x\n", req->start_psn[0],
+ req->start_psn[1], req->start_psn[2]);
+ config_value = (req->start_psn[2] | (req->start_psn[1] << 8) |
+ (req->start_psn[0] << 16));
+ DEBUG_LOG("req->start psn 0x%x\n", config_value);
+ start_psn_value = config_value;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_psn)));
+ memcpy(rep->start_psn, req->start_psn, 3);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ config_value = ipv4->src_addr;
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr1)));
+ config_value = ioread32((void *)&rdma_qp_attr->ip_dest_addr1);
+ DEBUG_LOG("read ipaddress:%x\n", config_value);
+ } else {
+ config_value = ipv6->saddr.in6_u.u6_addr32[3];
+ DEBUG_LOG("ipaddress1:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr1)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[2];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr2)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[1];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr3)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[0];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr4)));
+ config_value = ioread32((void *)&rdma_qp_attr->qp_conf);
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ DEBUG_LOG("read ipaddress:%x\n", config_value);
+ }
+ rq_opcode = XRNIC_RDMA_READ;
+ config_value = ((start_psn_value - 1) | (rq_opcode << 24));
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->last_rq_req)));
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_rej() - Prepares Reject packet
+ * @qp_attr: qp info for which reply packet is prepared
+ * @reason: reason for the rejection
+ * @msg: message whose contents cause sendor to reject communication
+ * 0x0-REQ, 0x1-REP, 0x2-No message
+ */
+void xrnic_cm_prepare_rej(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_rej_reason reason, enum xrnic_msg_rej msg)
+{
+ struct rej *rej;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ rej = (struct rej *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(CONNECT_REJECT);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ rej = (struct rej *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(CONNECT_REJECT);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ pr_info("Sending rej\n");
+
+ rej->local_cm_id = qp_attr->local_cm_id;
+ rej->remote_comm_id = qp_attr->remote_cm_id;
+ rej->message_rejected = msg;
+ rej->reason = htons(reason);
+ rej->reject_info_length = XRNIC_REJ_INFO_LEN;
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_prepare_initial_headers() - Retrieves information from the response
+ * @qp_attr: qp info on which the response is sent
+ * @rq_buf: receive queue buffer
+ */
+void xrnic_prepare_initial_headers(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct mad *mad;
+ unsigned char temp;
+ struct ethhdr_t *eth_hdr;
+ struct ipv4hdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct udphdr *udp;
+ struct bth *bthp;
+ struct deth *dethp;
+ unsigned short *ipv4_hdr_ptr;
+ unsigned int ipv4_hdr_chksum;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ int i;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv4);
+ ipv4 = (struct ipv4hdr *)
+ ((char *)recv_qp_pkt_ipv4 + XRNIC_ETH_HLEN);
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ /* In the ethernet header swap source and desitnation MAC */
+ memcpy(send_sgl_temp_ipv4->eth.h_source,
+ eth_hdr->h_dest, XRNIC_ETH_ALEN);
+ memcpy(send_sgl_temp_ipv4->eth.h_dest,
+ eth_hdr->h_source, XRNIC_ETH_ALEN);
+ /* Copy the ethernet type field */
+ send_sgl_temp_ipv4->eth.eth_type = eth_hdr->eth_type;
+
+ /* In the IP header swap source IP and desitnation IP */
+ memcpy(&send_sgl_temp_ipv4->ipv4, ipv4,
+ sizeof(struct ipv4hdr));
+ send_sgl_temp_ipv4->ipv4.dest_addr = ipv4->src_addr;
+ send_sgl_temp_ipv4->ipv4.src_addr = ipv4->dest_addr;
+ ipv4->total_length = (sizeof(struct ipv4hdr) +
+ sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad)) + 4;
+ DEBUG_LOG("ipv4->total_length:%d\n", ipv4->total_length);
+ DEBUG_LOG("ipv4 length:%d\n", sizeof(struct ipv4hdr));
+ DEBUG_LOG("udp length:%d\n", sizeof(struct udphdr));
+ DEBUG_LOG("ethhdr length:%d\n", sizeof(struct ethhdr_t));
+ DEBUG_LOG("bth length:%d\n", sizeof(struct bth));
+ DEBUG_LOG("deth length:%d\n", sizeof(struct deth));
+
+ send_sgl_temp_ipv4->ipv4.total_length =
+ htons(ipv4->total_length);
+ send_sgl_temp_ipv4->ipv4.hdr_chksum = 0;
+ send_sgl_temp_ipv4->ipv4.id = ipv4->id;
+
+ ipv4_hdr_ptr = (unsigned short *)
+ (&send_sgl_temp_ipv4->ipv4);
+ ipv4_hdr_chksum = 0;
+
+ for (i = 0; i < 10; i++) {
+ ipv4_hdr_chksum += *ipv4_hdr_ptr;
+ ipv4_hdr_ptr++;
+ }
+
+ ipv4_hdr_chksum = ~((ipv4_hdr_chksum & 0x0000FFFF) +
+ (ipv4_hdr_chksum >> 16));
+ send_sgl_temp_ipv4->ipv4.hdr_chksum = ipv4_hdr_chksum;
+ DEBUG_LOG("check sum :%x\n", ipv4_hdr_chksum);
+ udp = (struct udphdr *)((char *)recv_qp_pkt_ipv4 +
+ XRNIC_ETH_HLEN + sizeof(struct ipv4hdr));
+ /* Copy the UDP packets and update length field */
+ send_sgl_temp_ipv4->udp.source = udp->source;
+ send_sgl_temp_ipv4->udp.dest = udp->dest;
+ udp->len = sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad) +
+ XRNIC_ICRC_SIZE;
+ DEBUG_LOG("udp total_length:%x\n", udp->len);
+ DEBUG_LOG("mad size:%d\n", sizeof(struct mad));
+ send_sgl_temp_ipv4->udp.len = htons(udp->len);
+ udp->check = 0;
+ send_sgl_temp_ipv4->udp.check = htons(udp->check);
+
+ /* Base Transport header setings */
+ bthp = (struct bth *)((char *)udp + sizeof(struct udphdr));
+
+ /* Fill bth fields */
+ send_sgl_temp_ipv4->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ send_sgl_temp_ipv4->bth.solicited_event =
+ XRNIC_SET_SOLICT_EVENT;
+ send_sgl_temp_ipv4->bth.migration_req =
+ XRNIC_MIGRATION_REQ;
+ send_sgl_temp_ipv4->bth.pad_count = XRNIC_PAD_COUNT;
+ send_sgl_temp_ipv4->bth.transport_hdr_ver =
+ XRNIC_TRANSPORT_HDR_VER;
+ DEBUG_LOG("bth transport hdr ver:%x\n",
+ bthp->transport_hdr_ver);
+ send_sgl_temp_ipv4->bth.transport_hdr_ver =
+ bthp->transport_hdr_ver;
+ send_sgl_temp_ipv4->bth.destination_qp[0] = 0;
+ send_sgl_temp_ipv4->bth.destination_qp[1] = 0;
+ send_sgl_temp_ipv4->bth.destination_qp[2] =
+ XRNIC_DESTINATION_QP;
+ send_sgl_temp_ipv4->bth.reserved1 = XRNIC_RESERVED1;
+ send_sgl_temp_ipv4->bth.ack_request = XRNIC_ACK_REQ;
+ send_sgl_temp_ipv4->bth.reserved2 = XRNIC_RESERVED2;
+ send_sgl_temp_ipv4->bth.pkt_seq_num = 1;
+ send_sgl_temp_ipv4->bth.partition_key = 65535;
+
+ /* DETH setings */
+ dethp = (struct deth *)((char *)bthp + sizeof(struct bth));
+ send_sgl_temp_ipv4->deth.q_key = dethp->q_key;
+ send_sgl_temp_ipv4->deth.reserved = XRNIC_DETH_RESERVED;
+ send_sgl_temp_ipv4->deth.src_qp = dethp->src_qp;
+
+ /* MAD setings */
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ send_sgl_temp_ipv4->mad.base_ver = XRNIC_MAD_BASE_VER;
+ send_sgl_temp_ipv4->mad.class_version = 2;
+ DEBUG_LOG("class:%x\n", send_sgl_temp_ipv4->mad.class_version);
+ send_sgl_temp_ipv4->mad.mgmt_class = XRNIC_MAD_MGMT_CLASS;
+ temp = (XRNIC_MAD_RESP_BIT << 7) | XRNIC_MAD_COMM_SEND;
+ send_sgl_temp_ipv4->mad.resp_bit_method = temp;
+ DEBUG_LOG("mad method:%x\n",
+ send_sgl_temp_ipv4->mad.resp_bit_method);
+ send_sgl_temp_ipv4->mad.reserved = XRNIC_MAD_RESERVED;
+ send_sgl_temp_ipv4->mad.transaction_id = mad->transaction_id;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv6);
+ ipv6 = (struct ipv6hdr *)
+ ((char *)recv_qp_pkt_ipv6 + XRNIC_ETH_HLEN);
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ /* In the ethernet header swap source and desitnation MAC */
+ memcpy(send_sgl_temp_ipv6->eth.h_source,
+ eth_hdr->h_dest, XRNIC_ETH_ALEN);
+ memcpy(send_sgl_temp_ipv6->eth.h_dest,
+ eth_hdr->h_source, XRNIC_ETH_ALEN);
+ send_sgl_temp_ipv6->eth.eth_type = eth_hdr->eth_type;
+ memcpy(&send_sgl_temp_ipv6->ipv6, ipv6,
+ sizeof(struct ipv6hdr));
+ /* In the ethernet header swap source IP and desitnation IP */
+ memcpy(&send_sgl_temp_ipv6->ipv6.daddr, &ipv6->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&send_sgl_temp_ipv6->ipv6.saddr, &ipv6->daddr,
+ sizeof(struct in6_addr));
+ udp = (struct udphdr *)((char *)recv_qp_pkt_ipv6 +
+ XRNIC_ETH_HLEN + sizeof(struct ipv6hdr));
+ /* Copy the UDP packets and update length field */
+ send_sgl_temp_ipv6->udp.source = udp->source;
+ send_sgl_temp_ipv6->udp.dest = udp->dest;
+ udp->len = sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad) +
+ XRNIC_ICRC_SIZE;
+ DEBUG_LOG("udp total_length:%x\n", udp->len);
+ DEBUG_LOG("mad size:%d\n", sizeof(struct mad));
+ send_sgl_temp_ipv6->udp.len = htons(udp->len);
+ udp->check = 0;
+ send_sgl_temp_ipv6->udp.check = htons(udp->check);
+
+ /* Base Transport header setings */
+ bthp = (struct bth *)((char *)udp + sizeof(struct udphdr));
+
+ /* Fill bth fields */
+ send_sgl_temp_ipv6->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ send_sgl_temp_ipv6->bth.solicited_event =
+ XRNIC_SET_SOLICT_EVENT;
+ send_sgl_temp_ipv6->bth.migration_req = XRNIC_MIGRATION_REQ;
+ send_sgl_temp_ipv6->bth.pad_count = XRNIC_PAD_COUNT;
+ send_sgl_temp_ipv6->bth.transport_hdr_ver =
+ XRNIC_TRANSPORT_HDR_VER;
+ DEBUG_LOG("bth transport_hdr_ver:%x\n",
+ bthp->transport_hdr_ver);
+ send_sgl_temp_ipv6->bth.transport_hdr_ver =
+ bthp->transport_hdr_ver;
+ send_sgl_temp_ipv6->bth.destination_qp[0] = 0;
+ send_sgl_temp_ipv6->bth.destination_qp[1] = 0;
+ send_sgl_temp_ipv6->bth.destination_qp[2] =
+ XRNIC_DESTINATION_QP;
+ send_sgl_temp_ipv6->bth.reserved1 = XRNIC_RESERVED1;
+ send_sgl_temp_ipv6->bth.ack_request = XRNIC_ACK_REQ;
+ send_sgl_temp_ipv6->bth.reserved2 = XRNIC_RESERVED2;
+ send_sgl_temp_ipv6->bth.pkt_seq_num = 1;
+ send_sgl_temp_ipv6->bth.partition_key = 65535;
+
+ /* DETH setings */
+ dethp = (struct deth *)((char *)bthp + sizeof(struct bth));
+ send_sgl_temp_ipv6->deth.q_key = dethp->q_key;
+ send_sgl_temp_ipv6->deth.reserved = XRNIC_DETH_RESERVED;
+ send_sgl_temp_ipv6->deth.src_qp = dethp->src_qp;
+
+ /* MAD setings */
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ send_sgl_temp_ipv6->mad.base_ver = XRNIC_MAD_BASE_VER;
+ send_sgl_temp_ipv6->mad.class_version = 2;
+ DEBUG_LOG("class:%x\n", send_sgl_temp_ipv6->mad.class_version);
+ send_sgl_temp_ipv6->mad.mgmt_class = XRNIC_MAD_MGMT_CLASS;
+ temp = (XRNIC_MAD_RESP_BIT << 7) | XRNIC_MAD_COMM_SEND;
+ send_sgl_temp_ipv6->mad.resp_bit_method = temp;
+ DEBUG_LOG("mad method:%x\n",
+ send_sgl_temp_ipv6->mad.resp_bit_method);
+ send_sgl_temp_ipv6->mad.reserved = XRNIC_MAD_RESERVED;
+ send_sgl_temp_ipv6->mad.transaction_id = mad->transaction_id;
+ }
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_dreq() - Prepares Disconnection Request Packet
+ * @qp_attr: qp info to be released
+ */
+static void xrnic_cm_prepare_dreq(struct xrnic_qp_attr *qp_attr)
+{
+ struct dreq *dreq;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ dreq = (struct dreq *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(DISCONNECT_REQUEST);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ dreq = (struct dreq *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(DISCONNECT_REQUEST);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ dreq->local_cm_id = qp_attr->local_cm_id;
+ dreq->remote_comm_id = qp_attr->remote_cm_id;
+ dreq->remote_qpn_eecn = qp_attr->remote_qpn;
+
+ DEBUG_LOG("Exiting %s %d %d\n",
+ __func__, qp_attr->remote_qpn, dreq->remote_qpn_eecn);
+}
+
+/**
+ * xrnic_cm_disconnect_send_handler() - Sends Disconnection Request and frees
+ * all the attributes related to the qp
+ * @qp_attr: qp info to be released by dreq
+ */
+void xrnic_cm_disconnect_send_handler(struct xrnic_qp_attr *qp_attr)
+{
+ int qp1_send_pkt_size;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+
+ xrnic_cm_prepare_dreq(qp_attr);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_DREQ_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_drep() - Prepares disconnect reply packet
+ * @qp_attr: qp info for which drep packet is prepared
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_prepare_drep(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct drep *drep;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Enteing %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ drep = (struct drep *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(DISCONNECT_REPLY);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ drep = (struct drep *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(DISCONNECT_REPLY);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ drep->local_cm_id = qp_attr->local_cm_id;
+ drep->remote_comm_id = qp_attr->remote_cm_id;
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_disconnect_request_handler() - Handles Disconnection Request.
+ * @qp_attr: qp info on which the reply is to be sent
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_disconnect_request_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ int qp1_send_pkt_size;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s qp_num %d\n", __func__, qp_attr->qp_num);
+ if (qp_attr->cm_id) {
+ DEBUG_LOG("cm id is not clean qp_num %d\n", qp_attr->qp_num);
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_DREQ_RCVD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("CM ID is NULL\n");
+ }
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ qp_attr->curr_state = XRNIC_DREQ_RCVD;
+ xrnic_cm_prepare_drep(qp_attr, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->resend_count = 0;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_disconnect_reply_handler() - Handles disconnect reply packets.
+ * @qp_attr: qp info of which qp to be destroyed
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_disconnect_reply_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->curr_state = XRNIC_DREQ_RCVD;
+ /*Call back to nvmeof. */
+
+ /*TBD: Need to Change state while handling with Rimer.*/
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->resend_count = 0;
+
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_connect_reject_handler() - Handles connect reject packets.
+ * @qp_attr: qp info
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_connect_reject_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct rej *rej;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ rej = (struct rej *)&mad->data;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ rej = (struct rej *)&mad->data;
+ }
+
+ if (rej->message_rejected == XRNIC_REJ_REP ||
+ rej->message_rejected == XRNIC_REJ_REQ ||
+ rej->message_rejected == XRNIC_REJ_OTHERS) {
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+ qp_attr->cm_id = NULL;
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_REJ_RECV;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ }
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_msg_rsp_ack_handler() - Handles message response packets.
+ * @qp_attr: qp info
+ * @rq_buf: receive queue buffer
+ */
+void xrnic_cm_msg_rsp_ack_handler(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct mra *mra;
+
+ DEBUG_LOG("Enter ing %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ mra = (struct mra *)&mad->data;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ mra = (struct mra *)&mad->data;
+ }
+
+ if (mra->message_mraed == XRNIC_MRA_REP) {
+ qp_attr->curr_state = XRNIC_MRA_RCVD;
+ qp_attr->resend_count = 0;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_connect_rep_handler() - handles connect reply packets
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_connect_rep_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REP_RCVD;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_REP_RCVD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ pr_info("Connection Established Local QPn=%#x\n", qp_attr->qp_num);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_ready_to_use_handler() - handles ready to use packets
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_ready_to_use_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_ESTABLISHD;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_ESTABLISHD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ pr_info("Connection Established Local QPn=%x\n", qp_attr->qp_num);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_create_child_cm() - creates child cm.
+ * @cm_id_info : to update child cm info after creation
+ */
+static void xrnic_create_child_cm(struct xrnic_rdma_cm_id_info *cm_id_info)
+{
+ struct xrnic_rdma_cm_id *ch_cm;
+
+ ch_cm = kzalloc(sizeof(*ch_cm), GFP_ATOMIC);
+ cm_id_info->child_cm_id = ch_cm;
+}
+
+/**
+ * xrnic_cm_connect_request_handler() - handles connect request packets.
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_connect_request_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4 = NULL;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6 = NULL;
+ struct mad *mad = NULL;
+ struct req *req = NULL;
+ int qp1_send_pkt_size, child_qp_num, status;
+ enum xrnic_rej_reason reason = XRNIC_REJ_CONSUMER_REJECT;
+ enum xrnic_msg_rej msg_rej;
+ enum xrnic_msg_mra msg_mra;
+ u16 port_num;
+ void *temp;
+ struct xrnic_rdma_cm_id *child_cm_id;
+ struct xrnic_rdma_cm_id *parent_cm_id;
+ struct xrnic_rdma_cm_id_info *child_cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ }
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REQ_RCVD;
+
+ DEBUG_LOG("req-> local_cm_resp_tout:%x.\n", req->local_cm_resp_tout);
+ DEBUG_LOG("req-> path_packet_payload_mtu:%x.\n",
+ req->path_packet_payload_mtu);
+ if (req->remote_cm_resp_tout < XRNIC_REQ_REMOTE_CM_RESP_TOUT) {
+ pr_info("remote_cm_resp_tout:%x", req->remote_cm_resp_tout);
+
+ msg_mra = XRNIC_MRA_REQ;
+ xrnic_cm_prepare_mra(qp_attr, msg_mra, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+ qp_attr->curr_state = XRNIC_MRA_SENT;
+ }
+
+ temp = (char *)&req->private_data;
+ temp += 36;
+ port_num = htons(req->service_id[6] | req->service_id[7] << 8);
+ DEBUG_LOG("req-> service_id[0]:%x.\n", req->service_id[0]);
+ DEBUG_LOG("req-> service_id[1]:%x.\n", req->service_id[1]);
+ DEBUG_LOG("req-> service_id[2]:%x.\n", req->service_id[2]);
+ DEBUG_LOG("req-> service_id[3]:%x.\n", req->service_id[3]);
+ DEBUG_LOG("req-> service_id[4]:%x.\n", req->service_id[4]);
+ DEBUG_LOG("req-> service_id[5]:%x.\n", req->service_id[5]);
+ DEBUG_LOG("req-> service_id[6]:%x.\n", req->service_id[6]);
+ DEBUG_LOG("req-> service_id[7]:%x.\n", req->service_id[7]);
+ DEBUG_LOG("req->port_num:%d,%x\n", port_num, port_num);
+
+ if (xrnic_dev->port_status[port_num - 1] == XRNIC_PORT_QP_FREE ||
+ port_num < 1 || port_num > XRNIC_MAX_PORT_SUPPORT) {
+ /*We need to validate that.*/
+ pr_err("PORT number is not correct sending rej.\n");
+ reason = XRNIC_REJ_PRIM_LID_PORT_NOT_EXIST;
+ msg_rej = XRNIC_REJ_REQ;
+ goto send_rep_rej;
+ }
+
+ xrnic_create_child_cm(xrnic_dev->cm_id_info[port_num - 1]);
+ child_qp_num =
+ xrnic_dev->cm_id_info[port_num - 1]->parent_cm_id.child_qp_num++;
+ child_cm_id = xrnic_dev->cm_id_info[port_num - 1]->child_cm_id;
+ parent_cm_id = &xrnic_dev->cm_id_info[port_num - 1]->parent_cm_id;
+ child_cm_id->cm_id_info = xrnic_dev->cm_id_info[port_num - 1];
+ child_cm_id->cm_context = parent_cm_id->cm_context;
+ child_cm_id->ps = parent_cm_id->ps;
+ child_cm_id->xrnic_cm_handler = parent_cm_id->xrnic_cm_handler;
+ child_cm_id->local_cm_id = qp_attr->local_cm_id;
+ child_cm_id->port_num = port_num;
+ child_cm_id->child_qp_num = child_qp_num + 1;
+ child_cm_id->qp_info.qp_num = qp_attr->qp_num;
+ child_cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ child_cm_id_info = child_cm_id->cm_id_info;
+ child_cm_id_info->conn_event_info.cm_event = XRNIC_REQ_RCVD;
+ child_cm_id_info->conn_event_info.status = 0;
+ child_cm_id_info->conn_event_info.private_data = (void *)temp;
+ child_cm_id_info->conn_event_info.private_data_len = 32;
+ list_add_tail(&child_cm_id->list, &cm_id_list);
+ status = parent_cm_id->xrnic_cm_handler(child_cm_id,
+ &child_cm_id_info->conn_event_info);
+ if (status) {
+ pr_err("xrnic_cm_handler failed sending rej.\n");
+ reason = XRNIC_REJ_CONSUMER_REJECT;
+ msg_rej = XRNIC_REJ_REQ;
+ goto send_rep_rej;
+ }
+
+ qp_attr->remote_cm_id = req->local_cm_id;
+ qp_attr->cm_id = child_cm_id;
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ qp_attr->ipv4_addr = recv_qp_pkt_ipv4->ipv4.src_addr;
+ memcpy(&qp_attr->mac_addr,
+ &recv_qp_pkt_ipv4->eth.h_source, XRNIC_ETH_ALEN);
+ qp_attr->source_qp_num = recv_qp_pkt_ipv4->deth.src_qp;
+ } else {
+ memcpy(&qp_attr->ipv6_addr,
+ &recv_qp_pkt_ipv6->ipv6.saddr,
+ sizeof(struct in6_addr));
+ memcpy(&qp_attr->mac_addr,
+ &recv_qp_pkt_ipv6->eth.h_source, XRNIC_ETH_ALEN);
+ qp_attr->source_qp_num = recv_qp_pkt_ipv6->deth.src_qp;
+ }
+
+ xrnic_cm_prepare_rep(qp_attr, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REP_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+ return;
+send_rep_rej:
+
+ qp_attr->remote_cm_id = req->local_cm_id;
+
+ xrnic_cm_prepare_rej(qp_attr, msg_rej, reason);
+ /* Reject code added end */
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp, qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REJ_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s with reject reason [%d]\n", __func__, reason);
+}
+
+/**
+ * fill_cm_rtu_data() - Fills rtu data to send rtu packet.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ * @return: send_sgl_qp1 data pointer
+ */
+static char *fill_cm_rtu_data(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct cma_rtu *rtu_data;
+
+ SET_CM_HDR(send_sgl_qp1);
+ rtu_data = (struct cma_rtu *)send_sgl_qp1;
+ memset(rtu_data, 0, sizeof(*rtu_data));
+ rtu_data->local_comm_id = cm_id->local_cm_id;
+ rtu_data->remote_comm_id = cm_id->remote_cm_id;
+ return send_sgl_qp1;
+}
+
+/**
+ * fill_cm_req_data() - Fills request data to send in request packet.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ * @return: send_sgl_qp1 data pointer
+ */
+static char *fill_cm_req_data(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct ernic_cm_req *cm_req;
+ struct cma_hdr data;
+ int val;
+ int sgid, dgid;
+ unsigned int psn;
+ struct sockaddr_in *sin4, *din4;
+
+ sin4 = (struct sockaddr_in *)&cm_id->route.s_addr;
+ din4 = (struct sockaddr_in *)&cm_id->route.d_addr;
+
+ SET_CM_HDR(send_sgl_qp1);
+ cm_req = (struct ernic_cm_req *)send_sgl_qp1;
+ memset(cm_req, 0, sizeof(*cm_req));
+
+ cm_req->local_comm_id = cpu_to_be32(cm_id->local_cm_id);
+ cm_req->service_id = cpu_to_be64((cm_id->ps << 16) |
+ be16_to_cpu(din4->sin_port));
+ ether_addr_copy(&cm_req->local_ca_guid, &cm_id->route.smac);
+ cm_req->local_qkey = 0;
+ cm_req->offset32 = cpu_to_be32((cm_id->local_cm_id << 8) |
+ cm_id->conn_param.responder_resources);
+ cm_req->offset36 = cpu_to_be32 (cm_id->conn_param.initiator_depth);
+
+ val = (XRNIC_REQ_LOCAL_CM_RESP_TOUT | (XRNIC_SVC_TYPE_UC << 5) |
+ (cm_id->conn_param.flow_control << 7));
+ cm_req->offset40 = cpu_to_be32(val);
+ get_random_bytes(&psn, 24);
+ psn &= 0xFFFFFF;
+ val = ((psn << 8) | XRNIC_REQ_REMOTE_CM_RESP_TOUT |
+ (cm_id->conn_param.retry_count << 5));
+ cm_req->offset44 = cpu_to_be32(val);
+ cm_id->qp_info.starting_psn = psn;
+
+ cm_req->pkey = 0xFFFF;
+ cm_req->offset50 = ((1 << 4) |
+ (cm_id->conn_param.rnr_retry_count << 5));
+ cm_req->offset51 = (1 << 4);
+ cm_req->local_lid = cpu_to_be16(0xFFFF);
+ cm_req->remote_lid = cpu_to_be16(0xFFFF);
+ sgid = sin4->sin_addr.s_addr;
+ dgid = din4->sin_addr.s_addr;
+ val = cpu_to_be32(0xFFFF);
+ memcpy(cm_req->local_gid.raw + 8, &val, 4);
+ memcpy(cm_req->local_gid.raw + 12, &sgid, 4);
+ memcpy(cm_req->remote_gid.raw + 8, &val, 4);
+ memcpy(cm_req->remote_gid.raw + 12, &dgid, 4);
+ cm_req->offset88 = cpu_to_be32(1 << 2);
+ cm_req->traffic_class = 0;
+ cm_req->hop_limit = 0x40;
+ cm_req->offset94 = 0;
+ cm_req->offset95 = 0x18;
+
+ data.cma_version = CMA_VERSION;
+ data.ip_version = (4 << 4);
+ data.port = din4->sin_port;
+ data.src_addr.ip4.addr = sin4->sin_addr.s_addr;
+ data.dst_addr.ip4.addr = din4->sin_addr.s_addr;
+ memcpy(cm_req->private_data, &data, sizeof(data));
+
+ return send_sgl_qp1;
+}
+
+/**
+ * fill_ipv4_cm_req() - fills cm request data for rdma connect.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ */
+void fill_ipv4_cm_req(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ send_sgl_qp1 = fill_ipv4_headers(cm_id, send_sgl_qp1, cm_req_size);
+ send_sgl_qp1 = fill_mad_common_header(cm_id, send_sgl_qp1,
+ cm_req_size, CM_REQ_ATTR_ID);
+ send_sgl_qp1 = fill_cm_req_data(cm_id, send_sgl_qp1, cm_req_size);
+}
+
+/**
+ * xrnic_cm_send_rtu() - Sends Ready to use packet.
+ * @cm_id : CM ID
+ * @cm_rep : IPV4 mad data
+ */
+static void xrnic_cm_send_rtu(struct xrnic_rdma_cm_id *cm_id,
+ struct rep *cm_rep)
+{
+ int cm_req_size;
+ char *send_sgl_qp1, *head;
+
+ cm_req_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + IB_BTH_BYTES + IB_DETH_BYTES +
+ sizeof(struct ib_mad_hdr) + sizeof(struct cma_rtu) +
+ EXTRA_PKT_LEN;
+
+ head = kmalloc(cm_req_size, GFP_ATOMIC);
+ send_sgl_qp1 = head;
+ send_sgl_qp1 = fill_ipv4_headers(cm_id, send_sgl_qp1, cm_req_size);
+ send_sgl_qp1 = fill_mad_common_header(cm_id, send_sgl_qp1,
+ cm_req_size, CM_RTU_ATTR_ID);
+ send_sgl_qp1 = fill_cm_rtu_data(cm_id, send_sgl_qp1, cm_req_size);
+ xrnic_send_mad(head, cm_req_size - EXTRA_PKT_LEN);
+}
+
+/*
+ * xrnic_rdma_accept() - This function implements incoming connect request.
+ * accept functionality
+ * @cm_id : CM ID of the incoming connect request
+ * @conn_param : Connection parameters
+ * @return: XRNIC_SUCCESS if successfully accepts the connection,
+ * otherwise error representative value
+ */
+int xrnic_rdma_accept(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param)
+{
+ struct xrnic_qp_info *qp_info;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1] !=
+ XRNIC_PORT_QP_IN_USE)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_info = &cm_id->qp_info;
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->sq_depth > XRNIC_MAX_SQ_DEPTH ||
+ qp_info->rq_depth > XRNIC_MAX_RQ_DEPTH ||
+ qp_info->send_sge_size > XRNIC_MAX_SEND_SGL_SIZE ||
+ qp_info->send_pkt_size > XRNIC_MAX_SEND_PKT_SIZE)
+ return -XRNIC_INVALID_QP_INIT_ATTR;
+
+ /*Return Error if wrong conn_param is coming.*/
+ if (conn_param->private_data_len > XRNIC_CM_PRVATE_DATA_LENGTH ||
+ conn_param->responder_resources > XRNIC_RESPONDER_RESOURCES ||
+ conn_param->initiator_depth > XRNIC_INITIATOR_DEPTH ||
+ conn_param->flow_control > 1 ||
+ conn_param->retry_count > XRNIC_REQ_RETRY_COUNT ||
+ conn_param->rnr_retry_count > XRNIC_REP_RNR_RETRY_COUNT)
+ return -XRNIC_INVALID_QP_CONN_PARAM;
+
+ memcpy((void *)&cm_id->conn_param.private_data,
+ (void *)&conn_param->private_data,
+ conn_param->private_data_len);
+ cm_id->conn_param.private_data_len = conn_param->private_data_len;
+ cm_id->conn_param.responder_resources =
+ conn_param->responder_resources;
+ cm_id->conn_param.initiator_depth = conn_param->initiator_depth;
+ cm_id->conn_param.flow_control = conn_param->flow_control;
+ cm_id->conn_param.retry_count = conn_param->retry_count;
+ cm_id->conn_param.rnr_retry_count = conn_param->rnr_retry_count;
+
+ xrnic_qp_app_configuration(qp_info->qp_num, XRNIC_HW_QP_ENABLE);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_accept);
+
+/*
+ * xrnic_rdma_disconnect() - This function implements RDMA disconnect.
+ * @cm_id : CM ID to destroy or disconnect
+ * @return: XRNIC_SUCCESS if successfully disconnects
+ * otherwise error representative value
+ */
+int xrnic_rdma_disconnect(struct xrnic_rdma_cm_id *cm_id)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ int i;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1]) {
+ if (cm_id->local_cm_id >= 2) {
+ if (cm_id->child_qp_num < 1)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_info.qp_num) {
+ pr_err("CM ID of QP is not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ if (cm_id->qp_status == XRNIC_PORT_QP_FREE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ pr_info("Free local cm id[%d] ", cm_id->local_cm_id);
+ pr_info("Child qp number [%d] ", cm_id->child_qp_num);
+ pr_info("qp_num [%d]\n", cm_id->qp_info.qp_num);
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ } else if (cm_id->local_cm_id == 1) {
+ if (cm_id->qp_status == XRNIC_PORT_QP_FREE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ cm_id_info = (struct xrnic_rdma_cm_id_info *)
+ cm_id->cm_id_info;
+ for (i = 0; i < cm_id_info->num_child; i++) {
+ if (cm_id_info->child_cm_id[i].qp_status ==
+ XRNIC_PORT_QP_IN_USE){
+ pr_err("child CM IDs not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ }
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ } else {
+ pr_err("Received invalid CM ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ } else {
+ pr_err("Received invalid Port ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_disconnect);
+
+/*
+ * xrnic_rdma_destroy_id() - Function destroys CM ID of the channel.
+ * @cm_id : CM ID of the incoming connect request
+ * @flag : Flag to indicate disconnect send
+ * @return: XRNIC_SUCCESS if successfully,
+ * otherwise error representative value
+ */
+int xrnic_rdma_destroy_id(struct xrnic_rdma_cm_id *cm_id, int flag)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ int i;
+ u32 local_cm_id = cm_id->local_cm_id;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1]) {
+ if (local_cm_id >= 2) {
+ if (cm_id->child_qp_num < 1)
+ return -XRNIC_INVALID_CM_ID;
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE) {
+ pr_err("CM ID is not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ if (flag)
+ xrnic_cm_disconnect_send_handler
+ (&xrnic_dev->qp_attr[local_cm_id - 2]);
+
+ pr_info("Free local cm id[%d] ", cm_id->local_cm_id);
+ pr_info("Child qp number [%d] ", cm_id->child_qp_num);
+ pr_info("qp_num [%d]\n", cm_id->qp_info.qp_num);
+
+ cm_id_info =
+ xrnic_dev->cm_id_info[cm_id->port_num - 1];
+ cm_id_info->parent_cm_id.child_qp_num--;
+ __list_del_entry(&cm_id->list);
+ kfree(cm_id);
+ } else if (local_cm_id == 1) {
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+
+ cm_id_info = (struct xrnic_rdma_cm_id_info *)
+ cm_id->cm_id_info;
+ for (i = 0; i < cm_id_info->num_child; i++) {
+ if (cm_id_info->child_cm_id[i].qp_status ==
+ XRNIC_PORT_QP_IN_USE) {
+ pr_err("child CM IDs not destroyed\n");
+ return XRNIC_INVALID_CM_ID;
+ }
+ }
+ xrnic_dev->io_qp_count = xrnic_dev->io_qp_count +
+ cm_id_info->num_child;
+ xrnic_dev->cm_id_info[cm_id->port_num - 1] = NULL;
+ xrnic_dev->port_status[cm_id->port_num - 1] =
+ XRNIC_PORT_QP_FREE;
+ __list_del_entry(&cm_id->list);
+ kfree(cm_id_info->child_cm_id);
+ kfree(cm_id_info);
+ } else {
+ pr_err("Received invalid CM ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ } else {
+ return -XRNIC_INVALID_CM_ID;
+ }
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_destroy_id);
+
+/*
+ * xrnic_send_mad() - This function initiates sending a management packet on
+ * QP1.
+ * @send_buf : Input buffer to fill
+ * @size : Size of the send buffer
+ */
+void xrnic_send_mad(void *send_buf, u32 size)
+{
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+
+ xrnic_qp1_send_mad_pkt(send_buf, qp1_attr, size);
+}
+EXPORT_SYMBOL(xrnic_send_mad);
+
+/*
+ * xrnic_identify_remote_host () - This function searches internal data.
+ * structures for remote info
+ * @rq_buf : received data buffer from other end
+ * @qp_num : QP number on which packet has been received
+ * @return: XRNIC_SUCCESS if remote end info is available,
+ * XRNIC_FAILED otherwise
+ */
+int xrnic_identify_remote_host(void *rq_buf, int qp_num)
+{
+ /* First find our Which IP version came from IPV packet and accrdingly
+ * Compare IP address from eiither AF_INET or AF_INET6.
+ */
+ /* It may be two condition of failure, either we just bypass this
+ * CONNECT_REQUEST as we have alrady there or there
+ * is no QP free at all.
+ */
+ struct mad *mad;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ }
+
+ if (htons(mad->attribute_id) == CONNECT_REQUEST) {
+ if (qp1_attr->ip_addr_type == AF_INET6) {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ (!memcmp(&xrnic_dev->qp1_attr.ipv6_addr,
+ &xrnic_dev->qp_attr[qp_num].ipv6_addr,
+ sizeof(struct in6_addr))))
+ return XRNIC_SUCCESS;
+ } else {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ xrnic_dev->qp1_attr.ipv4_addr ==
+ xrnic_dev->qp_attr[qp_num].ipv4_addr)
+ return XRNIC_SUCCESS;
+ }
+ } else {
+ /* Need to Compare udp->source_port,ethernet->source_mac,
+ * ip->source_ip, deth->source_qp == 1, local_cm_id is le
+ */
+
+ if (qp1_attr->ip_addr_type == AF_INET6) {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ mad->data[1] ==
+ xrnic_dev->qp_attr[qp_num].local_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ (!memcmp(&xrnic_dev->qp1_attr.ipv6_addr,
+ &xrnic_dev->qp_attr[qp_num].ipv6_addr,
+ sizeof(struct in6_addr))))
+
+ return XRNIC_SUCCESS;
+ } else {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ mad->data[1] ==
+ xrnic_dev->qp_attr[qp_num].local_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ xrnic_dev->qp1_attr.ipv4_addr ==
+ xrnic_dev->qp_attr[qp_num].ipv4_addr)
+
+ return XRNIC_SUCCESS;
+ }
+ }
+ return XRNIC_FAILED;
+}
+
+/*
+ * xrnic_rdma_resolve_addr() - This function looks for a destination.
+ * address and initiates ARP if required
+ * @cm_id : CM channel ID which is being used for connection set up
+ * @src_addr : IPV4/IPV6 address of the source
+ * @dst_addr : IPV4/IPV6 address of the destination
+ * @timeout : Address resolve timeout
+ * @return: SUCCESS value if route resolved or error representative value
+ * otherwise
+ */
+int xrnic_rdma_resolve_addr(struct xrnic_rdma_cm_id *cm_id,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int timeout)
+{
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct neighbour *n;
+ int arp_retry = 3;
+ int ret = 0;
+ struct sockaddr_in sin4, *din4;
+ struct net_device *net_dev;
+ struct xrnic_rdma_cm_event_info event;
+
+ net_dev = dev_get_by_name(&init_net, "eth0");
+ memset(&fl4, 0, sizeof(fl4));
+ din4 = (struct sockaddr_in *)dst_addr;
+ fl4.daddr = din4->sin_addr.s_addr;
+ rt = ip_route_output_key(&init_net, &fl4);
+ if (IS_ERR(rt)) {
+ event.cm_event = XRNIC_CM_EVENT_ADDR_ERROR;
+ event.status = PTR_ERR(rt);
+ cm_id->xrnic_cm_handler(cm_id, &event);
+ ret = PTR_ERR(rt);
+ goto err;
+ }
+
+ event.cm_event = XRNIC_CM_EVENT_ADDR_RESOLVED;
+ event.status = 0;
+ cm_id->xrnic_cm_handler(cm_id, &event);
+
+ sin4.sin_addr.s_addr = fl4.saddr;
+ sin4.sin_port = cpu_to_be16(ERNIC_UDP_SRC_PORT);
+ sin4.sin_family = dst_addr->sa_family;
+
+ /* HACK: ARP is not resolved for the first time, retries are needed */
+ do {
+ n = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
+ } while (arp_retry-- > 0);
+
+ if (IS_ERR(n))
+ pr_info("ERNIC neigh lookup failed\n");
+
+ memcpy(&cm_id->route.s_addr, &sin4, sizeof(sin4));
+ memcpy(&cm_id->route.d_addr, dst_addr, sizeof(*dst_addr));
+ ether_addr_copy(cm_id->route.smac, net_dev->dev_addr);
+ ether_addr_copy(cm_id->route.dmac, n->ha);
+ event.cm_event = XRNIC_CM_EVENT_ROUTE_RESOLVED;
+ event.status = 0;
+ cm_id->xrnic_cm_handler(cm_id, &event);
+err:
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_rdma_resolve_addr);
+
+/*
+ * fill_ipv4_headers() - This function fills the IPV4 address for an
+ * outgoing packet.
+ * @cm_id : CM ID info for addresses
+ * @send_sgl_qp1 : SGL info
+ * @cm_req_size : request size
+ * @return: pointer to SGL info
+ */
+char *fill_ipv4_headers(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *udph;
+ struct sockaddr_in *sin4, *din4;
+
+ sin4 = (struct sockaddr_in *)&cm_id->route.s_addr;
+ din4 = (struct sockaddr_in *)&cm_id->route.d_addr;
+
+ SET_ETH_HDR(send_sgl_qp1);
+ eth = (struct ethhdr *)send_sgl_qp1;
+ ether_addr_copy(&eth->h_dest, &cm_id->route.dmac);
+ ether_addr_copy(&eth->h_source, &cm_id->route.smac);
+ eth->h_proto = cpu_to_be16(ETH_P_IP);
+
+ SET_IP_HDR(send_sgl_qp1);
+ iph = (struct iphdr *)send_sgl_qp1;
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->ttl = 32;
+ iph->tos = 0;
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = sin4->sin_addr.s_addr;
+ iph->daddr = din4->sin_addr.s_addr;
+ iph->id = 0;
+ iph->frag_off = cpu_to_be16(0x2 << 13);
+ iph->tot_len = cpu_to_be16(cm_req_size - ETH_HLEN);
+
+ ip_send_check(iph);
+
+ SET_NET_HDR(send_sgl_qp1);
+ udph = (struct udphdr *)send_sgl_qp1;
+ udph->source = sin4->sin_port;
+ udph->dest = din4->sin_port;
+ udph->len = cpu_to_be16(cm_req_size - ETH_HLEN - (iph->ihl * 4));
+ udph->check = 0;
+
+ return send_sgl_qp1;
+}
+
+/*
+ * fill_mad_common_header() - This function fills the MAD headers.
+ * @cm_id : CM ID info
+ * @send_sgl_qp1 : SGL info
+ * @cm_req_size : request size
+ * @cm_attr : cm attribute ID
+ * @return: pointer to SGL info
+ */
+char *fill_mad_common_header(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size,
+ int cm_attr)
+{
+ struct ib_bth *bth;
+ struct ib_deth *deth;
+ struct ib_mad_hdr *madh;
+ int val;
+
+ SET_BTH_HDR(send_sgl_qp1);
+ bth = (struct ib_bth *)send_sgl_qp1;
+ memset(bth, 0, sizeof(*bth));
+ val = (BTH_SET(OPCODE, IB_OPCODE_UD_SEND_ONLY) |
+ BTH_SET(SE, XRNIC_SET_SOLICT_EVENT) |
+ BTH_SET(MIG, XRNIC_MIGRATION_REQ) |
+ BTH_SET(PAD, XRNIC_PAD_COUNT) |
+ BTH_SET(TVER, XRNIC_TRANSPORT_HDR_VER) |
+ BTH_SET(PKEY, 65535));
+ bth->offset0 = cpu_to_be32(val);
+ bth->offset4 = cpu_to_be32(BTH_SET(DEST_QP, 1));
+ bth->offset8 = cpu_to_be32(BTH_SET(PSN, psn_num++));
+
+ SET_DETH_HDR(send_sgl_qp1);
+ deth = (struct ib_deth *)send_sgl_qp1;
+ deth->offset0 = cpu_to_be32 (IB_ENFORCED_QEY);
+ deth->offset4 = cpu_to_be32 (DETH_SET(SQP, 2));
+
+ SET_MAD_HDR(send_sgl_qp1);
+ madh = (struct ib_mad_hdr *)send_sgl_qp1;
+ memset(madh, 0, sizeof(*madh));
+ madh->base_version = IB_MGMT_BASE_VERSION;
+ madh->mgmt_class = IB_MGMT_CLASS_CM;
+ madh->class_version = IB_CM_CLASS_VER;
+ madh->method = IB_MGMT_METHOD_SEND;
+ madh->attr_id = cm_attr;
+ madh->tid = cpu_to_be64(mad_tid++);
+ madh->status = 0;
+ madh->class_specific = 0;
+ madh->attr_mod = 0;
+
+ return send_sgl_qp1;
+}
+
+/*
+ * xrnic_rdma_connect() - This function initiates connetion process.
+ * @cm_id : CM ID info
+ * @conn_param : Connection parameters for the new connection
+ * @return: XRNIC_SUCCESS
+ */
+int xrnic_rdma_connect(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param)
+{
+ int cm_req_size;
+ char *send_sgl_qp1, *head;
+
+ cm_req_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + IB_BTH_BYTES + IB_DETH_BYTES +
+ sizeof(struct ib_mad_hdr) +
+ sizeof(struct ernic_cm_req) + EXTRA_PKT_LEN;
+
+ head = kmalloc(cm_req_size, GFP_ATOMIC);
+ send_sgl_qp1 = head;
+ memcpy(&cm_id->conn_param, conn_param, sizeof(*conn_param));
+ fill_ipv4_cm_req(cm_id, send_sgl_qp1, cm_req_size);
+ xrnic_send_mad(head, cm_req_size - EXTRA_PKT_LEN);
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_connect);
+
+/*
+ * xrnic_process_mad_pkt() - This function process a received MAD packet.
+ * @rq_buf : receive queue pointer
+ * @return: XRNIC_SUCCESS if successfully processed the MAD packet otherwise
+ * XRNIC_FAILED
+ */
+static int xrnic_process_mad_pkt(void *rq_buf)
+{
+ int ret = 0;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct deth *deth;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ deth = (struct deth *)&recv_qp_pkt_ipv4->deth;
+ qp1_attr->ipv4_addr = recv_qp_pkt_ipv4->ipv4.src_addr;
+ memcpy(&qp1_attr->mac_addr,
+ &recv_qp_pkt_ipv4->eth.h_source, XRNIC_ETH_ALEN);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ deth = (struct deth *)&recv_qp_pkt_ipv6->deth;
+ memcpy(&qp1_attr->ipv6_addr,
+ &recv_qp_pkt_ipv6->ipv6.saddr,
+ sizeof(struct in6_addr));
+ memcpy(&qp1_attr->mac_addr,
+ &recv_qp_pkt_ipv6->eth.h_source,
+ XRNIC_ETH_ALEN);
+ }
+ qp1_attr->source_qp_num = deth->src_qp;
+
+ ret = xrnic_cm_establishment_handler(rq_buf);
+ if (ret) {
+ pr_err("cm establishment failed with ret code %d\n", ret);
+ return XRNIC_FAILED;
+ }
+
+ return XRNIC_SUCCESS;
+}
+
+/*
+ * xrnic_mad_pkt_recv_intr_handler() - Interrupt handler for MAD packet
+ * interrupt type
+ * @data : XRNIC device info
+ */
+void xrnic_mad_pkt_recv_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ qp1_attr->xrnic_mmap;
+ struct rdma_qp1_attr *rdma_qp1_attr = (struct rdma_qp1_attr *)
+ &xrnic_mmap->xrnic_regs->rdma_qp1_attr;
+ u32 config_value = 0;
+ u8 rq_buf[XRNIC_RECV_PKT_SIZE];
+ void *rq_buf_temp, *rq_buf_unaligned;
+ int ret = 0, j, rq_pkt_num = 0, rq_pkt_count = 0;
+ struct ethhdr_t *ethhdr;
+ unsigned long flag;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ rq_buf_unaligned = (void *)rq_buf;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware update
+ * for Queue spesific sq_cmpl_db_local register
+ * Also in case of resend some packect we
+ * need to maintain this variable
+ */
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp1_attr->qp_num - 1)));
+ pr_info("config_value = %d, db_local = %d\n",
+ config_value, qp1_attr->rq_wrptr_db_local);
+ if (qp1_attr->rq_wrptr_db_local == config_value) {
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+ return;
+ }
+
+ if (qp1_attr->rq_wrptr_db_local > config_value)
+ rq_pkt_count = (config_value + XRNIC_RQ_DEPTH) -
+ qp1_attr->rq_wrptr_db_local;
+ else
+ rq_pkt_count = config_value - qp1_attr->rq_wrptr_db_local;
+
+ DEBUG_LOG("rx pkt count = 0x%x\n", rq_pkt_count);
+ for (j = 0 ; j < rq_pkt_count ; j++) {
+ config_value = ioread32((char *)xrnic_mmap->sq_cmpl_db_add +
+ (4 * (qp1_attr->qp_num - 1)));
+
+ rq_pkt_num = qp1_attr->rq_wrptr_db_local;
+ if (rq_pkt_num >= XRNIC_RQ_DEPTH)
+ rq_pkt_num = rq_pkt_num - XRNIC_RQ_DEPTH;
+
+ ethhdr = (struct ethhdr_t *)((char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE));
+
+ if (ethhdr->eth_type == htons(XRNIC_ETH_P_IP)) {
+ rq_buf_temp = (char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE);
+ memcpy((char *)rq_buf_unaligned,
+ (char *)rq_buf_temp, XRNIC_RECV_PKT_SIZE);
+ qp1_attr->ip_addr_type = AF_INET;
+ } else {
+ rq_buf_temp = (char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE);
+ memcpy((char *)rq_buf_unaligned,
+ (char *)rq_buf_temp, XRNIC_RECV_PKT_SIZE);
+ qp1_attr->ip_addr_type = AF_INET6;
+ }
+ ret = xrnic_process_mad_pkt(rq_buf_unaligned);
+
+ if (ret) {
+ DEBUG_LOG("MAD pkt processing failed for pkt num %d\n",
+ rq_pkt_num);
+ }
+
+ qp1_attr->rq_wrptr_db_local = qp1_attr->rq_wrptr_db_local + 1;
+ config_value = qp1_attr->rq_wrptr_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->rq_ci_db)));
+
+ if (qp1_attr->rq_wrptr_db_local == XRNIC_RQ_DEPTH)
+ qp1_attr->rq_wrptr_db_local = 0;
+ }
+
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_cm_establishment_handler() - handles the state after the
+ * communication is established.
+ * @rq_buf : receive queue buffer
+ * @return: 0 on success, -1 incase of failure
+ */
+int xrnic_cm_establishment_handler(void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct req *req;
+ struct rep *rep;
+ struct deth *deth;
+ struct xrnic_qp_attr *qp_attr;
+ int i = 0, ret;
+ enum xrnic_rej_reason reason;
+ enum xrnic_msg_rej msg;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ int qp1_send_pkt_size;
+ struct xrnic_rdma_cm_id *cm_id, *tmp;
+ struct sockaddr_in *din4;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ }
+ switch (htons(mad->attribute_id)) {
+ case CONNECT_REQUEST:
+ DEBUG_LOG("Connect request recevied\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ ret = xrnic_find_free_qp();
+ DEBUG_LOG("Q pair no:%x, i = %d\n", ret, i);
+ if (ret < 0) {
+ qp_attr = qp1_attr;
+ qp_attr->ip_addr_type = qp1_attr->ip_addr_type;
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ pr_err("no QP is free for connection.\n");
+ reason = XRNIC_REJ_NO_QP_AVAILABLE;
+ msg = XRNIC_REJ_REQ;
+ qp_attr->remote_cm_id = req->local_cm_id;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ return XRNIC_FAILED;
+ }
+ i = ret;
+ }
+
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_LISTEN ||
+ qp_attr->curr_state == XRNIC_MRA_SENT ||
+ qp_attr->curr_state == XRNIC_REJ_SENT ||
+ qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_ESTABLISHD) {
+ qp_attr->ip_addr_type = qp1_attr->ip_addr_type;
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_connect_request_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state for Connect Request\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case READY_TO_USE:
+ DEBUG_LOG("RTU received\n");
+
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection. in RTU\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_ready_to_use_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve RTU\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case MSG_RSP_ACK:
+ DEBUG_LOG("Message received Ack interrupt\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_REP_SENT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_msg_rsp_ack_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve MSG RSP ACK\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case CONNECT_REPLY:
+ DEBUG_LOG("Connect reply received\n");
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ rep = (struct rep *)&recv_qp_pkt_ipv4->mad.data;
+ deth = (struct deth *)&recv_qp_pkt_ipv4->deth;
+ list_for_each_entry_safe(cm_id, tmp, &cm_id_list, list) {
+ if (cm_id->local_cm_id ==
+ be32_to_cpu(rep->remote_comm_id))
+ break;
+ }
+ /* Something wrong if qp num is 0. Don't send Reply
+ * TODO: Send Reject instead of muting the Reply
+ */
+ if (cm_id->qp_info.qp_num == 0)
+ goto done;
+ cm_id->local_cm_id = rep->remote_comm_id;
+ cm_id->remote_cm_id = rep->local_cm_id;
+ qp_attr = &xrnic_dev->qp_attr[(cm_id->qp_info.qp_num - 2)];
+ qp_attr->local_cm_id = rep->remote_comm_id;
+ qp_attr->remote_cm_id = rep->local_cm_id;
+ qp_attr->remote_qp = (be32_to_cpu(rep->local_qpn) >> 8);
+ qp_attr->source_qp_num = (deth->src_qp);
+ qp_attr->starting_psn = (cm_id->qp_info.starting_psn - 1);
+ qp_attr->rem_starting_psn = (rep->start_psn[2] |
+ rep->start_psn[1] << 8 |
+ rep->start_psn[0] << 16);
+ ether_addr_copy(qp_attr->mac_addr, cm_id->route.dmac);
+ din4 = &cm_id->route.d_addr;
+ cm_id->port_num = be16_to_cpu(din4->sin_port);
+ xrnic_dev->port_status[cm_id->port_num - 1] =
+ XRNIC_PORT_QP_IN_USE;
+ qp_attr->ipv4_addr = din4->sin_addr.s_addr;
+ qp_attr->ip_addr_type = AF_INET;
+ qp_attr->cm_id = cm_id;
+ xrnic_qp_app_configuration(cm_id->qp_info.qp_num,
+ XRNIC_HW_QP_ENABLE);
+ xrnic_cm_connect_rep_handler(qp_attr, NULL);
+ xrnic_cm_send_rtu(cm_id, rep);
+ qp_attr->curr_state = XRNIC_ESTABLISHD;
+done:
+ break;
+
+ case CONNECT_REJECT:
+ DEBUG_LOG("Connect Reject received\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_MRA_SENT ||
+ qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_connect_reject_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve connect reject\n");
+ return XRNIC_FAILED;
+ }
+
+ break;
+
+ case DISCONNECT_REQUEST:
+ DEBUG_LOG("Disconnect request received\n");
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QPis free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_ESTABLISHD ||
+ qp_attr->curr_state == XRNIC_DREQ_SENT ||
+ qp_attr->curr_state == XRNIC_TIMEWAIT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_disconnect_request_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to for Disconnect request\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case DISCONNECT_REPLY:
+ DEBUG_LOG("Disconnect reply received\n");
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_DREQ_SENT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_disconnect_reply_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to for Disconnect reply\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case SERVICE_ID_RESOLUTION_REQ:
+ DEBUG_LOG("Received service ID resolution request\n");
+ pr_err("Not handling service ID resolution request\n");
+ return XRNIC_FAILED;
+
+ case SERVICE_ID_RESOLUTION_REQ_REPLY:
+ DEBUG_LOG("Received service ID resolution reply\n");
+ pr_err("Not handling service ID resolution reply\n");
+ return XRNIC_FAILED;
+
+ case LOAD_ALTERNATE_PATH:
+ DEBUG_LOG("Received Load Alternate Path request\n");
+ pr_err("Not handling Load Alternate Path request\n");
+ return XRNIC_FAILED;
+
+ case ALTERNATE_PATH_RESPONSE:
+ DEBUG_LOG("Received LAP response\n");
+ pr_err("Not handling LAP response\n");
+ return XRNIC_FAILED;
+
+ default:
+ pr_err("default mad attribute 0x%x\n", mad->attribute_id);
+ break;
+ }
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+ return XRNIC_SUCCESS;
+}
diff --git a/drivers/staging/xlnx_ernic/xcm.h b/drivers/staging/xlnx_ernic/xcm.h
new file mode 100644
index 000000000000..6640b83e5166
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcm.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+#ifndef _CM_H
+#define _CM_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_cm.h>
+
+/************************** Constant Definitions *****************************/
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+/* As per RoCEv2 Annex17, SRC PORT can be Fixed for ordering issues.
+ * So, to make things simple, ERNIC also uses constant udp source port
+ */
+#define ERNIC_UDP_SRC_PORT 0xA000
+
+#define SET_VAL(start, size, val) ((((val) & ((1U << (size)) - 1)) << (start)))
+#define GET_VAL(start, size, val) (((val) >> (start)) & ((1U << (size)) - 1))
+#define BTH_SET(FIELD, v) SET_VAL(BTH_##FIELD##_OFF, \
+ BTH_##FIELD##_SZ, v)
+#define DETH_SET(FIELD, v) SET_VAL(DETH_##FIELD##_OFF, \
+ DETH_##FIELD##_SZ, v)
+
+#define SET_HDR_OFFSET(ptr, off) ((ptr) += off)
+#define SET_CM_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct ib_mad_hdr))
+#define SET_ETH_HDR(ptr) SET_HDR_OFFSET(ptr, 0)
+#define SET_IP_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct ethhdr))
+#define SET_NET_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct iphdr))
+#define SET_BTH_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct udphdr))
+#define SET_DETH_HDR(ptr) SET_HDR_OFFSET(ptr, IB_BTH_BYTES)
+#define SET_MAD_HDR(ptr) SET_HDR_OFFSET(ptr, IB_DETH_BYTES)
+
+#define CMA_VERSION 0
+#define IB_ENFORCED_QEY 0x80010000
+#define IB_CM_CLASS_VER 2
+/*****************************************************************************/
+struct ib_bth {
+ __be32 offset0;
+#define BTH_PKEY_OFF 0
+#define BTH_PKEY_SZ 16
+#define BTH_TVER_OFF 16
+#define BTH_TVER_SZ 4
+#define BTH_PAD_OFF 20
+#define BTH_PAD_SZ 2
+#define BTH_MIG_OFF 22
+#define BTH_MIG_SZ 1
+#define BTH_SE_OFF 23
+#define BTH_SE_SZ 1
+#define BTH_OPCODE_OFF 24
+#define BTH_OPCODE_SZ 8
+ __be32 offset4;
+#define BTH_DEST_QP_OFF 0
+#define BTH_DEST_QP_SZ 24
+ __be32 offset8;
+#define BTH_PSN_OFF 0
+#define BTH_PSN_SZ 24
+#define BTH_ACK_OFF 31
+#define BTH_ACK_SZ 1
+};
+
+struct ib_deth {
+ __be32 offset0;
+#define DETH_QKEY_OFF 0
+#define DETH_QKEY_SZ 32
+ __be32 offset4;
+#define DETH_SQP_OFF 0
+#define DETH_SQP_SZ 24
+};
+
+struct cma_rtu {
+ u32 local_comm_id;
+ u32 remote_comm_id;
+ u8 private_data[224];
+};
+
+union cma_ip_addr {
+ struct in6_addr ip6;
+ struct {
+ __be32 pad[3];
+ __be32 addr;
+ } ip4;
+};
+
+/* CA11-1: IP Addressing CM REQ Message Private Data Format */
+struct cma_hdr {
+ u8 cma_version;
+ u8 ip_version; /* IP version: 7:4 */
+ __be16 port;
+ union cma_ip_addr src_addr;
+ union cma_ip_addr dst_addr;
+};
+
+enum transport_svc_type {
+ XRNIC_SVC_TYPE_RC = 0,
+ XRNIC_SVC_TYPE_UC,
+ XRNIC_SVC_TYPE_RD,
+ XRNIC_SVC_TYPE_RSVD,
+};
+
+extern struct list_head cm_id_list;
+
+void xrnic_qp1_send_mad_pkt(void *send_sgl_temp,
+ struct xrnic_qp_attr *qp1_attr, u32 send_pkt_size);
+void xrnic_reset_io_qp(struct xrnic_qp_attr *qp_attr);
+void fill_ipv4_cm_req(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size);
+char *fill_ipv4_headers(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size);
+int xrnic_cm_establishment_handler(void *rq_buf);
+char *fill_mad_common_header(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size,
+ int cm_attr);
+void xrnic_prepare_initial_headers(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf);
+void xrnic_cm_msg_rsp_ack_handler(struct xrnic_qp_attr *qp_attr, void *rq_buf);
+void xrnic_cm_disconnect_send_handler(struct xrnic_qp_attr *qp_attr);
+void xrnic_cm_prepare_rej(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_rej_reason reason,
+ enum xrnic_msg_rej msg);
+void xrnic_send_mad(void *send_buf, u32 size);
+int xrnic_identify_remote_host(void *rq_buf, int qp_num);
+void xrnic_mad_pkt_recv_intr_handler(unsigned long data);
+
+struct ernic_cm_req {
+ u32 local_comm_id;
+ u32 rsvd1;
+ __u64 service_id;
+ __u64 local_ca_guid;
+ u32 rsvd2;
+ u32 local_qkey;
+ u32 offset32;
+ u32 offset36;
+ u32 offset40;
+ u32 offset44;
+ u16 pkey;
+ u8 offset50;
+ u8 offset51;
+ u16 local_lid;
+ u16 remote_lid;
+ union ib_gid local_gid;
+ union ib_gid remote_gid;
+ u32 offset88;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 offset94;
+ u8 offset95;
+ u8 rsvd3[45];
+ u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+} __packed;
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _CM_H*/
diff --git a/drivers/staging/xlnx_ernic/xcommon.h b/drivers/staging/xlnx_ernic/xcommon.h
new file mode 100644
index 000000000000..c7d9ff6c84b6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcommon.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef COMMOM_INCL_H
+#define COMMOM_INCL_H
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include "xif.h"
+#include "xrocev2.h"
+#include "xhw_def.h"
+#include "xqp.h"
+#include "xcm.h"
+#include "xmr.h"
+#include "xmain.h"
+
+#define XRNIC_FAILED -1
+#define XRNIC_SUCCESS 0
+#define DEBUG_LOG(x, ...) do { \
+ if (debug)\
+ pr_info(x, ##__VA_ARGS__); \
+ } while (0)
+
+extern int debug;
+
+struct xrnic_dev_info {
+ struct xrnic_memory_map xrnic_mmap;
+ struct xrnic_qp_attr qp1_attr;
+ /* TODO: Need to allocate qp_attr on heap.
+ * when max Queue Pairs increases in the design, static memory
+ * requirement will be huge.
+ */
+ struct xrnic_qp_attr qp_attr[XRNIC_MAX_QP_SUPPORT];
+ /* DESTINATION ADDR_FAMILY - IPv4/V6 */
+ u16 ip_addr_type;
+ /* DESTINATION addr in NBO */
+ u8 ipv6_addr[16];
+ u32 pmtu;
+ /* IPV4 address */
+ u8 ipv4_addr[4];
+ u32 qp_falat_local_ptr;
+ struct xrnic_rdma_cm_id_info *curr_cm_id_info;
+ /* TODO: Need to allocate cm_id_info and port_status on heap. */
+ struct xrnic_rdma_cm_id_info *cm_id_info[XRNIC_MAX_PORT_SUPPORT];
+ enum xrnic_port_qp_status port_status[XRNIC_MAX_PORT_SUPPORT];
+ /* Interrupt for RNIC */
+ u32 xrnic_irq;
+ struct tasklet_struct mad_pkt_recv_task;
+ struct tasklet_struct qp_pkt_recv_task;
+ struct tasklet_struct qp_fatal_task;
+ struct tasklet_struct wqe_completed_task;
+ u32 io_qp_count;
+ /*Character Driver Interface*/
+ struct device_node *dev_node;
+ struct resource resource;
+ struct cdev cdev;
+ char pkt_buffer[512];
+ struct device *dev;
+};
+
+extern struct xrnic_dev_info *xrnic_dev;
+#ifdef __cplusplus
+ }
+#endif
+#endif
diff --git a/drivers/staging/xlnx_ernic/xernic_bw_test.c b/drivers/staging/xlnx_ernic/xernic_bw_test.c
new file mode 100644
index 000000000000..0f0977660621
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xernic_bw_test.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC perftest driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <net/addrconf.h>
+#include "xcommon.h"
+#include "xperftest.h"
+
+/* Default Port Number for Perftest and Depths for XRNIC */
+#define PERFTEST_PORT 18515
+#define PERFTEST_SQ_DEPTH 0x80
+#define PERFTEST_RQ_DEPTH 0x40
+/* Admin and IO QPs */
+#define PERFTEST_ADMIN_QPS 1
+#define PERFTEST_IO_QPS 1
+#define PERFTEST_MAX_QPS (PERFTEST_ADMIN_QPS + PERFTEST_IO_QPS)
+#define PERFTEST_DEFAULT_MEM_SIZE (4 * 1024 * 1024)
+
+#define _1MB_BUF_SIZ (1024 * 1024)
+#define PERF_TEST_RQ_BUF_SIZ ((_1MB_BUF_SIZ + XRNIC_RECV_PKT_SIZE) *\
+ PERFTEST_RQ_DEPTH)
+
+struct xrnic_rdma_cm_id *cm_id;
+static char server_ip[32] = "0.0.0.0";
+struct ernic_pd *pd;
+int prev_qpn;
+
+/* TODO: currently, we have single instance.
+ * Need to convert as per-instance context.
+ */
+struct perftest_ctx {
+ struct xrnic_rdma_cm_id *cm_id;
+ struct ernic_pd *pd;
+ struct mr *reg_mr; /*registered MR */
+};
+
+phys_addr_t phys_mem[PERFTEST_MAX_QPS];
+int io_mr_idx;
+struct mr *perftest_io_mr[PERFTEST_IO_QPS];
+
+struct perftest_ctx perf_context[PERFTEST_MAX_QPS];
+
+struct perftest_wr {
+ union ctx ctx;
+ __u8 reserved1[2];
+ __u32 local_offset[2];
+ __u32 length;
+ __u8 opcode;
+ __u8 reserved2[3];
+ __u32 remote_offset[2];
+ __u32 remote_tag;
+ __u32 completion_info[4];
+ __u8 reserved4[16];
+} __packed;
+
+struct xrnic_qp_init_attr qp_attr;
+
+struct perftest_trinfo {
+ phys_addr_t rq_buf_ba_phys;
+ phys_addr_t send_sgl_phys;
+ phys_addr_t sq_ba_phys;
+ phys_addr_t cq_ba_phys;
+ phys_addr_t rq_wptr_db_phys;
+ phys_addr_t sq_cmpl_db_phys;
+ void __iomem *rq_buf_ba;
+ void __iomem *send_sgl;
+ void __iomem *sq_ba;
+ void __iomem *cq_ba;
+};
+
+struct perftest_trinfo trinfo;
+struct xrnic_rdma_conn_param conn_param;
+int rq_ci_db, sq_cmpl_db;
+
+int port = -1;
+module_param_string(server_ip, server_ip, sizeof(server_ip), 0444);
+module_param(port, int, 0444);
+MODULE_PARM_DESC(server_ip, "Target server ip address");
+
+/**
+ * perftest_parse_addr() - Parses the input IP address.
+ * @s_addr: IP address structure.
+ * @buf: Output IPV4 buffer pointer.
+ * return: 0 If address is either IPv6 or IPv4.
+ * else, returns EINVAL.
+ */
+int perftest_parse_addr(struct sockaddr_storage *s_addr, char *buf)
+{
+ size_t buflen = strlen(buf);
+ int ret;
+ const char *delim;
+
+ if (buflen <= INET_ADDRSTRLEN) {
+ struct sockaddr_in *sin_addr = (struct sockaddr_in *)s_addr;
+
+ ret = in4_pton(buf, buflen, (u8 *)&sin_addr->sin_addr.s_addr,
+ '\0', NULL);
+ if (!ret)
+ goto fail;
+
+ sin_addr->sin_family = AF_INET;
+ return 0;
+ }
+ if (buflen <= INET6_ADDRSTRLEN) {
+ struct sockaddr_in6 *sin6_addr = (struct sockaddr_in6 *)s_addr;
+
+ ret = in6_pton(buf, buflen,
+ (u8 *)&sin6_addr->sin6_addr.s6_addr,
+ -1, &delim);
+ if (!ret)
+ goto fail;
+
+ sin6_addr->sin6_family = AF_INET6;
+ return 0;
+ }
+fail:
+ return -EINVAL;
+}
+
+/**
+ * rq_handler() - receive packet callback routine.
+ * @rq_count: Rx packet count.
+ * @rq_context: context info.
+ */
+void rq_handler(u32 rq_count, void *rq_context)
+{
+ int i, qp_num, offset;
+ struct ernic_bwtest_struct *rq_buf;
+ struct xrnic_rdma_cm_id *cm_id;
+ struct perftest_wr *sq_wr;
+ struct mr *mem;
+ struct perftest_ctx *ctx;
+
+ ctx = (struct perftest_ctx *)rq_context;
+ cm_id = ctx->cm_id;
+ qp_num = cm_id->child_qp_num;
+ offset = sq_cmpl_db * XRNIC_SEND_SGL_SIZE;
+ for (i = 0; i < rq_count; i++) {
+ if (qp_num == 1) {
+ rq_buf = (struct ernic_bwtest_struct *)(char *)
+ cm_id->qp_info.rq_buf_ba_ca +
+ ((qp_num - 1) * rq_ci_db *
+ XRNIC_RECV_PKT_SIZE);
+ if (io_mr_idx > PERFTEST_IO_QPS)
+ goto done;
+ mem = perftest_io_mr[io_mr_idx];
+
+ rq_buf->rkey = htonl((unsigned int)mem->rkey);
+ rq_buf->vaddr = cpu_to_be64(mem->vaddr);
+
+ memcpy((u8 *)(trinfo.send_sgl + offset),
+ (u8 *)rq_buf,
+ sizeof(struct ernic_bwtest_struct));
+
+ sq_wr = (struct perftest_wr *)trinfo.sq_ba +
+ sq_cmpl_db;
+ sq_wr->ctx.wr_id = sq_cmpl_db;
+ sq_wr->length = sizeof(struct ernic_bwtest_struct);
+ sq_wr->remote_tag = ntohl(0xDEAD);
+ sq_wr->local_offset[0] = trinfo.send_sgl_phys + offset;
+ sq_wr->local_offset[1] = 0;
+
+ sq_wr->remote_offset[0] = 0x12345678;
+ sq_wr->remote_offset[1] = 0xABCDABCD;
+ sq_wr->completion_info[0] = htonl(0x11111111);
+ sq_wr->completion_info[1] = htonl(0x22222222);
+ sq_wr->completion_info[2] = htonl(0x33333333);
+ sq_wr->completion_info[3] = htonl(0x44444444);
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ }
+ xrnic_post_recv(&cm_id->qp_info, 1);
+ if (qp_num == 1) {
+ xrnic_post_send(&cm_id->qp_info, 1);
+ if (prev_qpn != rq_buf->qp_number) {
+ if (prev_qpn != 0)
+ io_mr_idx++;
+ prev_qpn = rq_buf->qp_number;
+ }
+ }
+
+done:
+ rq_ci_db++;
+
+ if (rq_ci_db >= (PERFTEST_RQ_DEPTH - 20))
+ rq_ci_db = 0;
+ if (qp_num == 1) {
+ sq_cmpl_db++;
+ if (sq_cmpl_db >= PERFTEST_SQ_DEPTH)
+ sq_cmpl_db = 0;
+ }
+ }
+}
+
+/**
+ * sq_handler() - completion call back.
+ * @sq_count: Tx packet count.
+ * @sq_context: context info.
+ */
+void sq_handler(u32 sq_count, void *sq_context)
+{
+/* TODO: This function is just a place holder for now.
+ * This function should handle completions for outgoing
+ * RDMA_SEND, RDMA_READ and RDMA_WRITE.
+ */
+ pr_info("XLNX[%d:%s]\n", __LINE__, __func__);
+}
+
+/**
+ * perftest_fill_wr() - Fills the workrequest in send queue base address.
+ * @sq_ba: send queue base address of the QP.
+ */
+void perftest_fill_wr(void __iomem *sq_ba)
+{
+ struct perftest_wr *sq_wr;
+ int i;
+
+ for (i = 0; i < XRNIC_SQ_DEPTH; i++) {
+ sq_wr = (struct perftest_wr *)sq_ba + i;
+ sq_wr->ctx.wr_id = i;
+ sq_wr->length = 16;
+ sq_wr->completion_info[0] = 0xAAAAAAAA;
+ sq_wr->completion_info[1] = 0xBBBBBBBB;
+ sq_wr->completion_info[2] = 0xCCCCCCCC;
+ sq_wr->completion_info[3] = 0xDDDDDDDD;
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ }
+}
+
+/**
+ * perftest_cm_handler() - CM handler call back routine.
+ * @cm_id: CM ID on which event received.
+ * @conn_event: Event information on the CM.
+ * @return: 0 on success or error code on failure.
+ */
+static int perftest_cm_handler(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event)
+{
+ int qp_num, per_qp_size;
+ struct perftest_ctx *ctx;
+
+ qp_num = cm_id->child_qp_num;
+ memset(&qp_attr, 0, sizeof(struct xrnic_qp_init_attr));
+ ctx = &perf_context[qp_num - 1];
+ switch (conn_event->cm_event) {
+ case XRNIC_REQ_RCVD:
+ qp_attr.xrnic_rq_event_handler = rq_handler;
+ qp_attr.xrnic_sq_event_handler = sq_handler;
+ qp_attr.qp_type = XRNIC_QPT_RC;
+ if (qp_num > 1) {
+ qp_attr.recv_pkt_size = _1MB_BUF_SIZ;
+ per_qp_size = (qp_num - 2) * _1MB_BUF_SIZ *
+ PERFTEST_RQ_DEPTH + XRNIC_RECV_PKT_SIZE *
+ PERFTEST_RQ_DEPTH;
+ } else {
+ qp_attr.recv_pkt_size = XRNIC_RECV_PKT_SIZE;
+ per_qp_size = 0;
+ }
+ qp_attr.rq_buf_ba_ca_phys = trinfo.rq_buf_ba_phys +
+ per_qp_size;
+ qp_attr.rq_buf_ba_ca = (char *)trinfo.rq_buf_ba +
+ per_qp_size;
+ per_qp_size = (qp_num - 1) * sizeof(struct perftest_wr) *
+ PERFTEST_SQ_DEPTH;
+ qp_attr.sq_ba_phys = trinfo.sq_ba_phys + per_qp_size;
+ qp_attr.sq_ba = (char *)trinfo.sq_ba + per_qp_size;
+ per_qp_size = (qp_num - 1) * (PERFTEST_SQ_DEPTH * 4);
+ qp_attr.cq_ba_phys = trinfo.cq_ba_phys + per_qp_size;
+ qp_attr.cq_ba = (char *)trinfo.cq_ba + per_qp_size;
+ qp_attr.rq_context = ctx;
+ qp_attr.sq_context = ctx;
+ ctx->cm_id = cm_id;
+ qp_attr.sq_depth = PERFTEST_SQ_DEPTH;
+ qp_attr.rq_depth = PERFTEST_RQ_DEPTH;
+ ctx->reg_mr = reg_phys_mr(pd, phys_mem[qp_num - 1],
+ PERFTEST_DEFAULT_MEM_SIZE,
+ MR_ACCESS_RDWR, NULL);
+ if (qp_num > 1)
+ perftest_io_mr[qp_num - 2] = ctx->reg_mr;
+
+ xrnic_rdma_create_qp(cm_id, ctx->reg_mr->pd,
+ &qp_attr);
+
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = 16;
+ conn_param.responder_resources = 16;
+ xrnic_rdma_accept(cm_id, &conn_param);
+ break;
+ case XRNIC_ESTABLISHD:
+ if (cm_id->child_qp_num > 1) {
+ perftest_fill_wr((char *)trinfo.sq_ba +
+ ((qp_num - 1) *
+ sizeof(struct perftest_wr) *
+ PERFTEST_SQ_DEPTH));
+ xrnic_hw_hs_reset_sq_cq(&cm_id->qp_info, NULL);
+ }
+ break;
+ case XRNIC_DREQ_RCVD:
+ xrnic_destroy_qp(&cm_id->qp_info);
+ xrnic_rdma_disconnect(cm_id);
+ xrnic_rdma_destroy_id(cm_id, 0);
+ dereg_mr(ctx->reg_mr);
+ io_mr_idx = 0;
+ prev_qpn = 0;
+ rq_ci_db = 0;
+ sq_cmpl_db = 0;
+ break;
+ default:
+ pr_info("Unhandled CM Event: %d\n",
+ conn_event->cm_event);
+ }
+ return 0;
+}
+
+/**
+ * perftest_init() - Perf test init function.
+ * @return: 0 on success or error code on failure.
+ */
+static int __init perftest_init(void)
+{
+ int ret, i;
+ struct sockaddr_storage s_addr;
+ struct sockaddr_in *sin_addr;
+ struct sockaddr_in6 *sin6_addr;
+
+ if (strcmp(server_ip, "0.0.0.0") == 0) {
+ pr_err("server ip module parameter not provided\n");
+ return -EINVAL;
+ }
+
+ /* If port number is not set, then it should point to the default */
+ if (-1 == port) {
+ port = PERFTEST_PORT;
+ pr_info("Using app default port number: %d\n", port);
+ } else if (port < 0) {
+ /* Any other -ve value */
+ /* Some ports are reserved and few other may be use,
+ * we could add check here to validate given port number
+ * is free to use or not
+ */
+ pr_err("port number should not be a negative value\n");
+ return -EINVAL;
+ }
+ pr_info("Using port number %d\n", port);
+
+ cm_id = xrnic_rdma_create_id(perftest_cm_handler, NULL, XRNIC_PS_TCP,
+ XRNIC_QPT_UC, PERFTEST_MAX_QPS);
+ if (!cm_id)
+ goto err;
+
+ if (perftest_parse_addr(&s_addr, server_ip))
+ goto err;
+
+ if (s_addr.ss_family == AF_INET) {
+ sin_addr = (struct sockaddr_in *)&s_addr;
+ ret = xrnic_rdma_bind_addr(cm_id,
+ (u8 *)&sin_addr->sin_addr.s_addr,
+ port, AF_INET);
+ if (ret < 0) {
+ pr_err("RDMA BIND Failed for IPv4\n");
+ goto err;
+ }
+ }
+ if (s_addr.ss_family == AF_INET6) {
+ sin6_addr = (struct sockaddr_in6 *)&s_addr;
+ ret = xrnic_rdma_bind_addr(cm_id,
+ (u8 *)&sin6_addr->sin6_addr.s6_addr,
+ port, AF_INET6);
+ if (ret < 0) {
+ pr_err("RDMA BIND Failed for IPv6\n");
+ goto err;
+ }
+ }
+
+ if (xrnic_rdma_listen(cm_id, 1) != XRNIC_SUCCESS)
+ goto err;
+
+ trinfo.rq_buf_ba_phys = alloc_mem(NULL, PERF_TEST_RQ_BUF_SIZ);
+ if (-ENOMEM == trinfo.rq_buf_ba_phys)
+ goto err;
+ trinfo.rq_buf_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr
+ (trinfo.rq_buf_ba_phys);
+
+ trinfo.send_sgl_phys = alloc_mem(NULL, 0x400000);
+ if (-ENOMEM == trinfo.send_sgl_phys)
+ goto err;
+ trinfo.send_sgl =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.send_sgl_phys);
+
+ trinfo.sq_ba_phys = alloc_mem(NULL, 0x100000);
+ if (-ENOMEM == trinfo.sq_ba_phys)
+ goto err;
+ trinfo.sq_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.sq_ba_phys);
+
+ trinfo.cq_ba_phys = alloc_mem(NULL, 0x40000);
+ if (-ENOMEM == trinfo.cq_ba_phys)
+ goto err;
+ trinfo.cq_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.cq_ba_phys);
+ trinfo.rq_wptr_db_phys = alloc_mem(NULL, 8);
+ trinfo.sq_cmpl_db_phys = alloc_mem(NULL, 8);
+ pd = alloc_pd();
+ for (i = 0; i < PERFTEST_MAX_QPS; i++) {
+ phys_mem[i] = alloc_mem(pd, PERFTEST_DEFAULT_MEM_SIZE);
+ if (IS_ERR_VALUE(phys_mem[i])) {
+ pr_err("PERFTEST[%d:%s] Mem registration failed: %lld\n",
+ __LINE__, __func__, phys_mem[i]);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+/* free_mem() works on only valid physical address returned from alloc_mem(),
+ * and ignores if NULL or invalid address is passed.
+ * So, even if any of the above allocations fail in the middle,
+ * we can safely call free_mem() on all addresses.
+ *
+ * we are using carve-out memory for the requirements of ERNIC.
+ * so, we cannot use devm_kzalloc() as kernel cannot see these
+ * memories until ioremapped.
+ */
+ free_mem(trinfo.rq_buf_ba_phys);
+ free_mem(trinfo.send_sgl_phys);
+ free_mem(trinfo.sq_ba_phys);
+ free_mem(trinfo.cq_ba_phys);
+ free_mem(trinfo.rq_wptr_db_phys);
+ free_mem(trinfo.sq_cmpl_db_phys);
+ for (i = 0; i < PERFTEST_MAX_QPS; i++)
+ free_mem(phys_mem[i]);
+
+ dealloc_pd(pd);
+
+ return -EINVAL;
+}
+
+/**
+ * perftest_exit() - perftest module exit function.
+ */
+static void __exit perftest_exit(void)
+{
+ int i;
+
+ free_mem(trinfo.rq_buf_ba_phys);
+ free_mem(trinfo.send_sgl_phys);
+ free_mem(trinfo.sq_ba_phys);
+ free_mem(trinfo.cq_ba_phys);
+ free_mem(trinfo.rq_wptr_db_phys);
+ free_mem(trinfo.sq_cmpl_db_phys);
+ for (i = 0; i < PERFTEST_MAX_QPS; i++)
+ free_mem(phys_mem[i]);
+
+ dealloc_pd(pd);
+}
+
+/* This driver is an example driver, which uses the APIs exported in
+ * ernic driver, to demonstrate the RDMA communication between peers
+ * on the infiniband network. The remote peer can be any RDMA enbled NIC.
+ * There is no real device for this driver and so, compatibility string and
+ * probe function are not needed for this driver.
+ */
+module_init(perftest_init);
+module_exit(perftest_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Perftest Example driver");
+MODULE_AUTHOR("SDHANVAD");
diff --git a/drivers/staging/xlnx_ernic/xhw_config.h b/drivers/staging/xlnx_ernic/xhw_config.h
new file mode 100644
index 000000000000..7846abd18bec
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xhw_config.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_HW_CONFIG_H
+#define _XRNIC_HW_CONFIG_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+
+#define XRNIC_HW_MAX_QP_ENABLE 30
+#define XRNIC_HW_MAX_QP_SUPPORT 28
+#define XRNIC_HW_FLOW_CONTROL_VALUE 0
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_HW_CONFIG_H*/
diff --git a/drivers/staging/xlnx_ernic/xhw_def.h b/drivers/staging/xlnx_ernic/xhw_def.h
new file mode 100644
index 000000000000..c59f266c03f6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xhw_def.h
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_HW_DEF_H
+#define _XRNIC_HW_DEF_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include "xhw_config.h"
+
+#define XRNIC_MAX_QP_ENABLE XRNIC_HW_MAX_QP_ENABLE
+#define XRNIC_MAX_QP_SUPPORT XRNIC_HW_MAX_QP_SUPPORT
+#define XRNIC_MAX_PORT_SUPPORT 0xFFFE
+#define XRNIC_REG_WIDTH 32
+#define XRNIC_QPS_ENABLED XRNIC_MAX_QP_ENABLE
+#define XRNIC_QP1_SEND_PKT_SIZE 512
+#define XRNIC_FLOW_CONTROL_VALUE XRNIC_HW_FLOW_CONTROL_VALUE
+#define XRNIC_CONFIG_XRNIC_EN 0x1
+#define XRNIC_UDP_SRC_PORT 0x12B7
+#define XRNIC_CONFIG_IP_VERSION (0x1 << 1)
+#define XRNIC_CONFIG_DEPKT_BYPASS_EN (0x1 << 2)
+#define XRNIC_CONFIG_ERR_BUF_EN (0x1 << 5)
+#define XRNIC_CONFIG_FLOW_CONTROL_EN (XRNIC_FLOW_CONTROL_VALUE << 6)
+#define XRNIC_CONFIG_NUM_QPS_ENABLED (XRNIC_QPS_ENABLED << 8)
+#define XRNIC_CONFIG_UDP_SRC_PORT (XRNIC_UDP_SRC_PORT << 16)
+
+#define XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED 1
+
+/* Clear the the interrupt writing that bit to interrupt status register.*/
+#define RDMA_READ 4
+#define RDMA_SEND 2
+#define RDMA_WRITE 0
+
+#define XRNIC_QP_TIMEOUT_RETRY_CNT 0x3 /*0x3*/
+#define XRNIC_QP_TIMEOUT_RNR_NAK_TVAL 0x1F /*MAX*/
+#define XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT 0x1F /*MAX 0x1f*/
+#define XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT \
+ (XRNIC_QP_TIMEOUT_RETRY_CNT << 8)
+#define XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT \
+ (XRNIC_QP_TIMEOUT_RETRY_CNT << 11)
+#define XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL \
+ (XRNIC_QP_TIMEOUT_RNR_NAK_TVAL << 16)
+
+#define XRNIC_QP_PMTU 0x4
+#define XRNIC_QP_MAX_RD_OS 0xFF
+#define XRNIC_QP_RQ_BUFF_SZ 0x2
+#define XRNIC_QP1_RQ_BUFF_SZ 0x02
+#define XRNIC_QP_CONFIG_QP_ENABLE 0x1
+#define XRNIC_QP_CONFIG_ACK_COALSE_EN BIT(1)
+#define XRNIC_QP_CONFIG_RQ_INTR_EN BIT(2)
+#define XRNIC_QP_CONFIG_CQE_INTR_EN BIT(3)
+#define XRNIC_QP_CONFIG_HW_HNDSHK_DIS BIT(4)
+#define XRNIC_QP_CONFIG_CQE_WRITE_EN BIT(5)
+#define XRNIC_QP_CONFIG_UNDER_RECOVERY BIT(6)
+#define XRNIC_QP_CONFIG_IPV6_EN BIT(7)
+#define XRNIC_QP_CONFIG_PMTU (0x4 << 8)
+#define XRNIC_QP_CONFIG_PMTU_256 (0x0 << 8)
+#define XRNIC_QP_CONFIG_PMTU_512 (0x1 << 8)
+#define XRNIC_QP_CONFIG_PMTU_1024 (0x2 << 8)
+#define XRNIC_QP_CONFIG_PMTU_2048 (0x3 << 8)
+#define XRNIC_QP_CONFIG_PMTU_4096 (0x4 << 8)
+#define XRNIC_QP_RQ_BUF_SIZ_DIV (256)
+#define XRNIC_QP_RQ_BUF_CFG_REG_BIT_OFS (16)
+#define XRNIC_QP_CONFIG_RQ_BUFF_SZ(x) (((x) / XRNIC_QP_RQ_BUF_SIZ_DIV)\
+ << XRNIC_QP_RQ_BUF_CFG_REG_BIT_OFS)
+#define XRNIC_QP1_CONFIG_RQ_BUFF_SZ (XRNIC_QP1_RQ_BUFF_SZ << 16)
+
+#define XRNIC_QP_PARTITION_KEY 0xFFFF
+#define XRNIC_QP_TIME_TO_LIVE 0x40
+
+#define XRNIC_QP_ADV_CONFIG_TRAFFIC_CLASS 0x3F
+#define XRNIC_QP_ADV_CONFIG_TIME_TO_LIVE (XRNIC_QP_TIME_TO_LIVE << 8)
+#define XRNIC_QP_ADV_CONFIG_PARTITION_KEY (XRNIC_QP_PARTITION_KEY << 16)
+
+#define XRNIC_REJ_RESEND_COUNT 3
+#define XRNIC_REP_RESEND_COUNT 3
+#define XRNIC_DREQ_RESEND_COUNT 3
+
+#define XNVEMEOF_RNIC_IF_RHOST_BASE_ADDRESS 0x8c000000
+#define XRNIC_CONFIG_ENABLE 1
+#define XRNIC_RESERVED_SPACE 0x4000
+#define XRNIC_NUM_OF_TX_HDR 128
+#define XRNIC_SIZE_OF_TX_HDR 128
+#define XRNIC_NUM_OF_TX_SGL 256
+#define XRNIC_SIZE_OF_TX_SGL 64
+#define XRNIC_NUM_OF_BYPASS_BUF 32
+#define XRNIC_SIZE_OF_BYPASS_BUF 512
+#define XRNIC_NUM_OF_ERROR_BUF 64
+#define XRNIC_SIZE_OF_ERROR_BUF 256
+#define XRNIC_OUT_ERRST_Q_NUM_ENTRIES 0x40
+#define XRNIC_OUT_ERRST_Q_WRPTR 0x0
+#define XRNIC_IN_ERRST_Q_NUM_ENTRIES 0x40
+#define XRNIC_IN_ERRST_Q_WRPTR 0x0
+#define XRNIC_NUM_OF_DATA_BUF 4096
+#define XRNIC_SIZE_OF_DATA_BUF 4096
+#define XRNIC_NUM_OF_RESP_ERR_BUF 64
+#define XRNIC_SIZE_OF_RESP_ERR_BUF 256
+#define XRNIC_MAD_HEADER 24
+#define XRNIC_MAD_DATA 232
+#define XRNIC_RECV_PKT_SIZE 512
+#define XRNIC_SEND_PKT_SIZE 64
+#define XRNIC_SEND_SGL_SIZE 4096
+#define XRNIC_MAX_SEND_SGL_SIZE 4096
+#define XRNIC_MAX_SEND_PKT_SIZE 4096
+#define XRNIC_MAX_RECV_PKT_SIZE 4096
+#define XRNIC_MAX_SQ_DEPTH 256
+#define XRNIC_MAX_RQ_DEPTH 256
+#define XRNIC_SQ_DEPTH 128
+#define XRNIC_RQ_DEPTH 64
+#define XRNIC_RQ_WRPTR_DBL 0xBC004000
+#define XRNIC_BYPASS_BUF_WRPTR 0xBC00C000
+#define XRNIC_ERROR_BUF_WRPTR 0xBC010000
+
+#define PKT_VALID_ERR_INTR_EN 0x1
+#define MAD_PKT_RCVD_INTR_EN (0x1 << 1)
+#define BYPASS_PKT_RCVD_INTR_EN (0x1 << 2)
+#define RNR_NACK_GEN_INTR_EN (0x1 << 3)
+#define WQE_COMPLETED_INTR_EN (0x1 << 4)
+#define ILL_OPC_SENDQ_INTR_EN (0x1 << 5)
+#define QP_PKT_RCVD_INTR_EN (0x1 << 6)
+#define FATAL_ERR_INTR_EN (0x1 << 7)
+#define ERNIC_MEM_REGISTER
+
+#define XRNIC_INTR_ENABLE_DEFAULT 0x000000FF
+#define XRNIC_VALID_INTR_ENABLE 0
+
+/* XRNIC Controller global configuration registers */
+
+struct xrnic_conf {
+ __u32 xrnic_en:1;
+ __u32 ip_version:1; //IPv6 or IPv4
+ __u32 depkt_bypass_en:1;
+ __u32 reserved:5;
+ __u32 num_qps_enabled:8;
+ __u32 udp_src_port:16;
+} __packed;
+
+struct tx_hdr_buf_sz {
+ __u32 num_hdrs:16;
+ __u32 buffer_sz:16; //in bytes
+} __packed;
+
+struct tx_sgl_buf_sz {
+ __u32 num_sgls:16;
+ __u32 buffer_sz:16; //in bytes
+} __packed;
+
+struct bypass_buf_sz {
+ __u32 num_bufs:16;
+ __u32 buffer_sz:16;
+} __packed;
+
+struct err_pkt_buf_sz {
+ __u32 num_bufs:16;
+ __u32 buffer_sz:16;
+} __packed;
+
+struct timeout_conf {
+ __u32 timeout:5;
+ __u32 reserved:3;
+ __u32 retry_cnt:3;
+ __u32 retry_cnt_rnr:3;
+ __u32 reserved1:2;
+ __u32 rnr_nak_tval:5;
+ __u32 reserved2:11;
+
+} __packed;
+
+struct out_errsts_q_sz {
+ __u32 num_entries:16;
+ __u32 reserved:16;
+} __packed;
+
+struct in_errsts_q_sz {
+ __u32 num_entries:16;
+ __u32 reserved:16;
+} __packed;
+
+struct inc_sr_pkt_cnt {
+ __u32 inc_send_cnt:16;
+ __u32 inc_rresp_cnt:16;
+} __packed;
+
+struct inc_am_pkt_cnt {
+ __u32 inc_acknack_cnt:16;
+ __u32 inc_mad_cnt:16;
+} __packed;
+
+struct out_io_pkt_cnt {
+ __u32 inc_send_cnt:16;
+ __u32 inc_rw_cnt:16;
+} __packed;
+
+struct out_am_pkt_cnt {
+ __u32 inc_acknack_cnt:16;
+ __u32 inc_mad_cnt:16;
+} __packed;
+
+struct last_in_pkt {
+ __u32 opcode:8;
+ __u32 qpid:8;
+ __u32 psn_lsb:16;
+} __packed;
+
+struct last_out_pkt {
+ __u32 opcode:8;
+ __u32 qpid:8;
+ __u32 psn_lsb:16;
+} __packed;
+
+/*Interrupt register definition.*/
+struct intr_en {
+ __u32 pkt_valdn_err_intr_en:1;
+ __u32 mad_pkt_rcvd_intr_en:1;
+ __u32 bypass_pkt_rcvd_intr_en:1;
+ __u32 rnr_nack_gen_intr_en:1;
+ __u32 wqe_completed_i:1;
+ __u32 ill_opc_in_sq_intr_en:1;
+ __u32 qp_pkt_rcvd_intr_en:1;
+ __u32 fatal_err_intr_en:1;
+ __u32 reverved:24;
+} __packed;
+
+struct data_buf_sz {
+ __u16 num_bufs;
+ __u16 buffer_sz;
+};
+
+struct resp_err_buf_sz {
+ __u16 num_bufs;
+ __u16 buffer_sz;
+};
+
+/*Global register configuration*/
+struct xrnic_ctrl_config {
+ struct xrnic_conf xrnic_conf;
+ __u32 xrnic_adv_conf;
+ __u32 reserved1[2];
+ __u32 mac_xrnic_src_addr_lsb;
+ __u32 mac_xrnic_src_addr_msb;
+ __u32 reserved2[2];
+ __u32 ip_xrnic_addr1; //0x0020
+ __u32 ip_xrnic_addr2; //0x0024
+ __u32 ip_xrnic_addr3; //0x0028
+ __u32 ip_xrnic_addr4; //0x002C
+ __u32 tx_hdr_buf_ba; //0x0030
+ __u32 reserved_0x34; //0x0034
+ struct tx_hdr_buf_sz tx_hdr_buf_sz; //0x0038
+ __u32 reserved_0x3c;
+
+ __u32 tx_sgl_buf_ba; //0x0040
+ __u32 reserved_0x44; //0x0044
+ struct tx_sgl_buf_sz tx_sgl_buf_sz; //0x0048
+ __u32 reserved_0x4c;
+
+ __u32 bypass_buf_ba; //0x0050
+ __u32 reserved_0x54; //0x0054
+ struct bypass_buf_sz bypass_buf_sz; //0x0058
+ __u32 bypass_buf_wrptr; //0x005C
+ __u32 err_pkt_buf_ba; //0x0060
+ __u32 reserved_0x64; //0x0064
+ struct err_pkt_buf_sz err_pkt_buf_sz; //0x0068
+ __u32 err_buf_wrptr; //0x006C
+ __u32 ipv4_address; //0x0070
+ __u32 reserved_0x74;
+
+ __u32 out_errsts_q_ba; //0x0078
+ __u32 reserved_0x7c;
+ struct out_errsts_q_sz out_errsts_q_sz; //0x0080
+ __u32 out_errsts_q_wrptr; //0x0084
+
+ __u32 in_errsts_q_ba; //0x0088
+ __u32 reserved_0x8c;
+ struct in_errsts_q_sz in_errsts_q_sz; //0x0090
+ __u32 in_errsts_q_wrptr; //0x0094
+
+ __u32 reserved_0x98; //0x0098
+ __u32 reserved_0x9c; //0x009C
+
+ __u32 data_buf_ba; //0x00A0
+ __u32 reserved_0xa4; //0x00A4
+ struct data_buf_sz data_buf_sz; //0x00A8
+
+ __u32 cnct_io_conf; //0x00AC
+
+ __u32 resp_err_pkt_buf_ba; //0x00B0
+ __u32 reserved_0xb4; //0x00B4
+ struct resp_err_buf_sz resp_err_buf_sz; //0x00B8
+
+ __u32 reserved3[17]; //0x0095
+
+ struct inc_sr_pkt_cnt inc_sr_pkt_cnt;//0x0100
+ struct inc_am_pkt_cnt inc_am_pkt_cnt;//0x0104
+ struct out_io_pkt_cnt out_io_pkt_cnt;//0x108
+ struct out_am_pkt_cnt out_am_pkt_cnt;//0x010c
+ struct last_in_pkt last_in_pkt; //0x0110
+ struct last_out_pkt last_out_pkt; //0x0114
+
+ __u32 inv_dup_pkt_cnt; //0x0118 incoming invalid duplicate
+
+ __u32 rnr_in_pkt_sts; //0x011C
+ __u32 rnr_out_pkt_sts; //0x0120
+
+ __u32 wqe_proc_sts; //0x0124
+
+ __u32 pkt_hdr_vld_sts; //0x0128
+ __u32 qp_mgr_sts; //0x012C
+
+ __u32 incoming_all_drop_count; //0x130
+ __u32 incoming_nack_pkt_count; //0x134
+ __u32 outgoing_nack_pkt_count; //0x138
+ __u32 resp_handler_status; //0x13C
+
+ __u32 reserved4[16];
+
+ struct intr_en intr_en; //0x0180
+ __u32 intr_sts; //0x0184
+ __u32 reserved5[2];
+ __u32 rq_intr_sts_1; //0x0190
+ __u32 rq_intr_sts_2; //0x0194
+ __u32 rq_intr_sts_3; //0x0198
+ __u32 rq_intr_sts_4; //0x019C
+ __u32 rq_intr_sts_5; //0x01A0
+ __u32 rq_intr_sts_6; //0x01A4
+ __u32 rq_intr_sts_7; //0x01A8
+ __u32 rq_intr_sts_8; //0x01AC
+
+ __u32 cq_intr_sts_1; //0x01B0
+ __u32 cq_intr_sts_2; //0x01B4
+ __u32 cq_intr_sts_3; //0x01B8
+ __u32 cq_intr_sts_4; //0x01BC
+ __u32 cq_intr_sts_5; //0x01B0
+ __u32 cq_intr_sts_6; //0x01B4
+ __u32 cq_intr_sts_7; //0x01B8
+ __u32 cq_intr_sts_8; //0x01BC
+
+ __u32 reserved6[12];
+};
+
+struct qp_conf {
+ __u32 qp_enable:1;
+ __u32 ack_coalsc_en:1;
+ __u32 rq_intr_en:1;
+ __u32 cq_intr_en:1;
+ __u32 hw_hndshk_dis:1;
+ __u32 cqe_write_en:1;
+ __u32 qp_under_recovery:1;
+ __u32 ip_version:1;
+ __u32 pmtu :3;
+ __u32 reserved2:5;
+ __u32 rq_buf_sz:16; //RQ buffer size (in multiples of 256B)
+} __packed;
+
+struct qp_adv_conf {
+ __u32 traffic_class:6;
+ __u32 reserved1 :2;
+ __u32 time_to_live:8;
+ __u32 partition_key:16;
+} __packed;
+
+struct time_out {
+ __u32 timeout:5;
+ __u32 reserved1:3;
+ __u32 retry_cnt:3;
+ __u32 reserved2:5;
+ __u32 rnr_nak_tval:5;
+ __u32 reserved3:3;
+ __u32 curr_retry_cnt:3;
+ __u32 reserved4:2;
+ __u32 curr_rnr_nack_cnt:3;
+ __u32 reserved:1;
+} __packed;
+
+struct qp_status {
+ __u32 qp_fatal:1;
+ __u32 rq_ovfl:1;
+ __u32 sq_full:1;
+ __u32 osq_full:1;
+ __u32 cq_full:1;
+ __u32 reserved1:4;
+ __u32 sq_empty:1;
+ __u32 osq_empty:1;
+ __u32 qp_retried:1;
+ __u32 reserved2:4;
+ __u32 nak_syndr_rcvd:7;
+ __u32 reserved3:1;
+ __u32 curr_retry_cnt:3;
+ __u32 reserved4:1;
+ __u32 curr_rnr_nack_cnt:3;
+ __u32 reserved5:1;
+} __packed;
+
+//This structure is applicable to the rdma queue pair other than QP1.
+struct rq_buf_ba_ca {
+ __u32 reserved:8; //0x308
+ __u32 rq_buf_ba:24;
+} __packed;
+
+struct sq_ba {
+ __u32 reserved1:5; //0x310
+ __u32 sq_ba:27;
+} __packed;
+
+struct cq_ba {
+ __u32 reserved2:5; //0x318
+ __u32 cq_ba:27;
+} __packed;
+
+struct cq_head {
+ __u32 cq_head:16; //0x330
+ __u32 reserved5:16;
+} __packed;
+
+struct rq_ci_db {
+ __u32 rq_ci_db:16; //0x334
+ __u32 reserved6:16;
+} __packed;
+
+struct sq_pi_db {
+ __u32 sq_pi_db:16; //0x338
+ __u32 reserved7:16;
+} __packed;
+
+struct q_depth {
+ __u32 sq_depth:16; //0x33c
+ __u32 cq_depth:16;
+} __packed;
+
+struct sq_psn {
+ __u32 sq_psn:24; //0x340
+ __u32 reserved8:8;
+} __packed;
+
+struct last_rq_req {
+ __u32 rq_psn:24; //0x344
+ __u32 rq_opcode:8;
+} __packed;
+
+struct dest_qp_conf {
+ __u32 dest_qpid:24; //0x348
+ __u32 reserved9:8;
+} __packed;
+
+struct stat_ssn {
+ __u32 exp_ssn:24; //0x380
+ __u32 reserved10:8;
+} __packed;
+
+struct stat_msn {
+ __u32 curr_msn:24; //0x384
+ __u32 reserved11:8;
+
+} __packed;
+
+struct stat_curr_sqptr_pro {
+ __u32 curr_sqptr_proc:16;
+ __u32 reserved12:16;
+} __packed;
+
+struct stat_resp_psn {
+ __u32 exp_resp_psn:24;
+ __u32 reserved:8;
+} __packed;
+
+struct stat_rq_buf_ca {
+ __u32 reserved:8;
+ __u32 rq_buf_ca:24;
+} __packed;
+
+/*QP1 is special attribue for all the management packets as per ROCEv2 spec */
+struct rdma_qp1_attr {
+ struct qp_conf qp_conf; //0x200
+ struct qp_adv_conf qp_adv_conf; //0x204
+ struct rq_buf_ba_ca rq_buf_ba_ca; //0x208
+ __u32 reserved_0x20c; //0x20c
+ struct sq_ba sq_ba; //0x210
+ __u32 reserved_0x214; //0x214
+ struct cq_ba cq_ba; //0x218
+ __u32 reserved_0x21c; //0x2c0
+ __u32 rq_wrptr_db_add; //0x220
+ __u32 reserved_0x224; //0x224
+ __u32 sq_cmpl_db_add; //0x228
+ __u32 reserved_0x22c; //0x22c
+ struct cq_head cq_head; //0x230
+ struct rq_ci_db rq_ci_db; //0x234
+ struct sq_pi_db sq_pi_db; //0x238
+ struct q_depth q_depth; //0x23c
+ __u32 reserved1[2]; //0x240
+ struct dest_qp_conf dest_qp_conf; //0x248
+ struct timeout_conf timeout_conf; //0x24C
+ __u32 mac_dest_addr_lsb; //0x250
+ __u32 mac_dest_addr_msb; //0x254
+ __u32 reserved2[2];
+ __u32 ip_dest_addr1; //0x260
+ __u32 ip_dest_addr2; //0x264
+ __u32 ip_dest_addr3; //0x268
+ __u32 ip_dest_addr4; //0x26C
+ __u32 reserved3[6]; //0x270-287(inclusive)
+ struct qp_status qp_status; //0x288
+ __u32 reserved4[2]; //0x240-287(inclusive)
+ struct stat_rq_buf_ca stat_rq_buf_ca;//0x294
+ __u32 reserved5[26]; //0x298-2Ff(inclusive)
+};
+
+/* General RDMA QP attribute*/
+struct rdma_qp_attr {
+ struct qp_conf qp_conf; //0x300
+ struct qp_adv_conf qp_adv_conf; //0x304
+ struct rq_buf_ba_ca rq_buf_ba_ca;//0x308
+ __u32 reserved_0x30c; //0x30c
+ struct sq_ba sq_ba; //0x310
+ __u32 reserved_0x314; //0x214
+ struct cq_ba cq_ba; //0x318
+ __u32 reserved_0x31c; //0x31c
+ __u32 rq_wrptr_db_add; //0x320
+ __u32 reserved_0x324; //0x324
+ __u32 sq_cmpl_db_add; //0x328
+ __u32 reserved_0x32c; //0x22c
+ struct cq_head cq_head; //0x330
+ struct rq_ci_db rq_ci_db;//0x334
+ struct sq_pi_db sq_pi_db; //0x338
+ struct q_depth q_depth;//0x33c
+ struct sq_psn sq_psn; //0x340
+ struct last_rq_req last_rq_req;//0x344
+ struct dest_qp_conf dest_qp_conf; //0x348
+ struct timeout_conf timeout_conf; //0x34C
+ __u32 mac_dest_addr_lsb; //0x350
+ __u32 mac_dest_addr_msb; //0x354
+ __u32 reserved1[2]; //0x358
+ __u32 ip_dest_addr1; //0x360
+ __u32 ip_dest_addr2; //0x364
+ __u32 ip_dest_addr3; //0x368
+ __u32 ip_dest_addr4; //0x36C
+ __u32 reserved2[4];
+ struct stat_ssn stat_ssn;//0x380
+ struct stat_msn stat_msn;//0x384
+ struct qp_status qp_status; //0x388
+ struct stat_curr_sqptr_pro stat_curr_sqptr_pro;//0x38C
+ struct stat_resp_psn stat_resp_psn; //0x0390
+ struct stat_rq_buf_ca stat_rq_buf_ca;//0x0394
+ __u32 stat_wqe; //0x398
+ __u32 stat_rq_pi_db; //0x39C
+#ifdef ERNIC_MEM_REGISTER
+ __u32 reserved3[4];
+ __u32 pd;
+ __u32 reserved[19];
+#else
+ __u32 reserved3[24];
+#endif
+};
+
+union ctx { // 2 Byte
+ __u16 context;
+ __u16 wr_id;
+} __packed;
+
+//Work request 64Byte size
+struct wr {
+ union ctx ctx; // 2 Byte
+ __u8 reserved1[2];
+ __u32 local_offset[2];
+ __u32 length;
+ __u8 opcode;
+ __u8 reserved2[3];
+ __u32 remote_offset[2];
+ __u32 remote_tag;
+ __u32 completion_info[4];
+ __u8 reserved4[16];
+} __packed;
+
+union ctxe {
+ __u16 context :16;
+ __u16 wr_id:16;
+} __packed;
+
+//Completion Queue Entry 16 Byte
+struct cqe {
+ union ctxe ctxe; // 2 Byte
+ __u8 opcode;
+ __u8 err_flag;
+} __packed;
+
+struct xrnic_reg_map {
+ struct xrnic_ctrl_config xrnic_ctrl_config;
+ struct rdma_qp1_attr rdma_qp1_attr;
+ struct rdma_qp_attr rdma_qp_attr[255];
+
+};
+
+struct xrnic_memory_map {
+ struct xrnic_reg_map *xrnic_regs;
+ u64 xrnic_regs_phys;
+ void *send_sgl;
+ u64 send_sgl_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ struct wr *sq_ba;
+ u64 sq_ba_phys;
+ void *tx_hdr_buf_ba;
+ u64 tx_hdr_buf_ba_phys;
+ void *tx_sgl_buf_ba;
+ u64 tx_sgl_buf_ba_phys;
+ void *bypass_buf_ba;
+ u64 bypass_buf_ba_phys;
+ void *err_pkt_buf_ba;
+ u64 err_pkt_buf_ba_phys;
+ void *out_errsts_q_ba;
+ u64 out_errsts_q_ba_phys;
+ void *in_errsts_q_ba;
+ u64 in_errsts_q_ba_phys;
+ void *rq_wrptr_db_add;
+ u64 rq_wrptr_db_add_phys;
+ void *sq_cmpl_db_add;
+ u64 sq_cmpl_db_add_phys;
+ void *stat_rq_buf_ca;
+ u64 stat_rq_buf_ca_phys;
+ void *data_buf_ba;
+ u64 data_buf_ba_phys;
+ u64 resp_err_pkt_buf_ba_phys;
+ void *resp_err_pkt_buf_ba;
+ u32 intr_en;
+ u32 cq_intr[8];
+ u32 rq_intr[8];
+ u64 xrnicif_phys;
+};
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_HW_DEF_H*/
diff --git a/drivers/staging/xlnx_ernic/xif.h b/drivers/staging/xlnx_ernic/xif.h
new file mode 100644
index 000000000000..fb5f02d8c08c
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xif.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_IF_H
+#define _XRNIC_IF_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/udp.h>
+
+#define XRNIC_MAX_CHILD_CM_ID 255
+#define XRNIC_CM_PRVATE_DATA_LENGTH 32
+
+enum xrnic_wc_event {
+ XRNIC_WC_RDMA_WRITE = 0x0,
+ XRNIC_WC_SEND = 0x2,
+ XRNIC_WC_RDMA_READ = 0x4,
+};
+
+union xrnic_ctxe { // 2 Byte
+ __u16 context :16;
+ __u16 wr_id:16;
+} __packed;
+
+struct xrnic_cqe {
+ union xrnic_ctxe ctxe; // 2 Byte
+ __u8 opcode; // 1 Byte
+ __u8 err_flag; // 1 Byte
+} __packed;
+
+enum xrnic_port_space {
+ XRNIC_PS_SDP = 0x0001,
+ XRNIC_PS_IPOIB = 0x0002,
+ XRNIC_PS_IB = 0x013F,
+ XRNIC_PS_TCP = 0x0106,
+ XRNIC_PS_UDP = 0x0111,
+};
+
+enum xrnic_cm_error {
+ XRNIC_INVALID_CM_ID = 2,
+ XRNIC_INVALID_CM_OUTSTANDING = 3,
+ XRNIC_INVALID_QP_ID = 4,
+ XRNIC_INVALID_QP_INIT_ATTR = 5,
+ XRNIC_INVALID_NUM_CHILD = 6,
+ XRNIC_INVALID_CHILD_ID = 7,
+ XRNIC_INVALID_CHILD_NUM = 8,
+ XRNIC_INVALID_QP_TYPE = 9,
+ XRNIC_INVALID_PORT = 10,
+ XRNIC_INVALID_ADDR = 11,
+ XRNIC_INVALID_PKT_CNT = 12,
+ XRNIC_INVALID_ADDR_TYPE = 13,
+ XRNIC_INVALID_QP_CONN_PARAM = 14,
+ XRNIC_INVALID_QP_STATUS = 15,
+};
+
+enum xrnic_qp_type {
+ XRNIC_QPT_RC,
+ XRNIC_QPT_UC,
+ XRNIC_QPT_UD,
+};
+
+enum xrnic_rdma_cm_event_type {
+ XRNIC_LISTEN = 1,
+ XRNIC_REQ_RCVD,
+ XRNIC_MRA_SENT,
+ XRNIC_REJ_SENT,
+ XRNIC_REJ_RECV,
+ XRNIC_REP_SENT,
+ XRNIC_MRA_RCVD,
+ XRNIC_ESTABLISHD,
+ XRNIC_DREQ_RCVD,
+ XRNIC_DREQ_SENT,
+ XRNIC_RTU_TIMEOUT,
+ XRNIC_TIMEWAIT,
+ XRNIC_DREP_TIMEOUT,
+ XRNIC_REP_RCVD,
+ XRNIC_CM_EVENT_ADDR_ERROR,
+ XRNIC_CM_EVENT_ADDR_RESOLVED,
+ XRNIC_CM_EVENT_ROUTE_RESOLVED,
+};
+
+struct xrnic_hw_handshake_info {
+ u32 rq_wrptr_db_add;
+ u32 sq_cmpl_db_add;
+ u32 cnct_io_conf_l_16b;
+};
+
+struct xrnic_qp_info {
+ void (*xrnic_rq_event_handler)(u32 rq_count, void *rp_context);
+ void *rq_context;
+ void (*xrnic_sq_event_handler)(u32 cq_head, void *sp_context);
+ void *sq_context;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+ u32 qp_num;
+ u32 starting_psn;
+ struct ernic_pd *pd;
+};
+
+struct xrnic_qp_init_attr {
+ void (*xrnic_rq_event_handler)(u32 rq_count, void *rp_context);
+ void *rq_context;
+ void (*xrnic_sq_event_handler)(u32 cq_head, void *sp_context);
+ void *sq_context;
+ enum xrnic_qp_type qp_type;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+};
+
+struct xrnic_rdma_route {
+ u8 src_addr[16];
+ u8 dst_addr[16];
+ u16 ip_addr_type;
+ u8 smac[ETH_ALEN];
+ u8 dmac[ETH_ALEN];
+ struct sockaddr_storage s_addr;
+ struct sockaddr_storage d_addr;
+};
+
+enum xrnic_port_qp_status {
+ XRNIC_PORT_QP_FREE,
+ XRNIC_PORT_QP_IN_USE,
+};
+
+struct xrnic_rdma_cm_event_info {
+ enum xrnic_rdma_cm_event_type cm_event;
+ int status;
+ void *private_data;
+ u32 private_data_len;
+};
+
+struct xrnic_rdma_conn_param {
+ u8 private_data[XRNIC_CM_PRVATE_DATA_LENGTH];
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u32 qp_num;
+ u32 srq;
+};
+
+enum xrnic_cm_state {
+ XRNIC_CM_REQ_SENT = 0,
+ XRNIC_CM_REP_RCVD,
+ XRNIC_CM_ESTABLISHED,
+};
+
+struct xrnic_rdma_cm_id {
+ int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *event);
+ void *cm_context;
+ u32 local_cm_id;
+ u32 remote_cm_id;
+ struct xrnic_qp_info qp_info;
+ struct xrnic_rdma_route route;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ enum xrnic_port_space ps;
+ enum xrnic_qp_type qp_type;
+ u16 port_num;
+ u16 child_qp_num;
+ struct xrnic_rdma_conn_param conn_param;
+ enum xrnic_port_qp_status qp_status;
+ int cm_state;
+ struct list_head list;
+};
+
+struct xrnic_rdma_cm_id_info {
+ struct xrnic_rdma_cm_id parent_cm_id;
+ struct xrnic_rdma_cm_id *child_cm_id;
+ u32 num_child;
+ struct xrnic_rdma_cm_event_info conn_event_info;
+};
+
+void xrnic_rq_event_handler (u32 rq_count, void *user_arg);
+void xrnic_sq_event_handler (u32 cq_head, void *user_arg);
+int xrnic_cm_handler (struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info);
+
+struct xrnic_rdma_cm_id *xrnic_rdma_create_id
+ (int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info), void *cm_context,
+ enum xrnic_port_space ps, enum xrnic_qp_type qp_type,
+ int num_child_qp);
+
+int xrnic_rdma_bind_addr(struct xrnic_rdma_cm_id *cm_id,
+ u8 *addr, u16 port_num, u16 ip_addr_type);
+
+int xrnic_rdma_listen(struct xrnic_rdma_cm_id *cm_id, int outstanding);
+int xrnic_rdma_create_qp(struct xrnic_rdma_cm_id *cm_id, struct ernic_pd *pd,
+ struct xrnic_qp_init_attr *init_attr);
+int xrnic_rdma_accept(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param);
+int xrnic_post_recv(struct xrnic_qp_info *qp_info, u32 rq_count);
+int xrnic_post_send(struct xrnic_qp_info *qp_info, u32 sq_count);
+int xrnic_destroy_qp(struct xrnic_qp_info *qp_info);
+int xrnic_rdma_disconnect(struct xrnic_rdma_cm_id *cm_id);
+int xrnic_rdma_destroy_id(struct xrnic_rdma_cm_id *cm_id, int flag);
+int xrnic_hw_hs_reset_sq_cq(struct xrnic_qp_info *qp_info,
+ struct xrnic_hw_handshake_info *hw_hs_info);
+int xrnic_hw_hs_reset_rq(struct xrnic_qp_info *qp_info);
+
+int xrnic_rdma_resolve_addr(struct xrnic_rdma_cm_id *cm_id,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int timeout);
+int xrnic_rdma_connect(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param);
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_IF_H*/
diff --git a/drivers/staging/xlnx_ernic/xioctl.h b/drivers/staging/xlnx_ernic/xioctl.h
new file mode 100644
index 000000000000..8c9738e69383
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xioctl.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+#ifndef _XRNIC_IOCTL_H_
+#define _XRNIC_IOCTL_H_
+
+#include <asm/ioctl.h>
+#include "xlog.h"
+
+#define XRNIC_MAGIC 'L'
+
+#define XRNIC_DISPLAY_MMAP_ALL _IOW(XRNIC_MAGIC, 1, uint)
+#define XRNIC_DISPLAY_MMAP_CONFIG _IOW(XRNIC_MAGIC, 2, uint)
+#define XRNIC_DISPLAY_MMAP_QP1 _IOW(XRNIC_MAGIC, 3, uint)
+#define XRNIC_DISPLAY_MMAP_QPX _IOW(XRNIC_MAGIC, 4, uint)
+#define XRNIC_DISPLAY_PKT _IOW(XRNIC_MAGIC, 5, uint)
+
+#define XRNIC_MAX_CMDS 5
+
+#endif /* _XRNIC_IOCTL_H_ */
diff --git a/drivers/staging/xlnx_ernic/xmain.c b/drivers/staging/xlnx_ernic/xmain.c
new file mode 100644
index 000000000000..67d525b51716
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmain.c
@@ -0,0 +1,1592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author : Sandeep Dhanvada <sandeep.dhanvada@xilinx.com>
+ * : Anjaneyulu Reddy Mule <anjaneyulu.reddy.mule@xilinx.com>
+ * : Srija Malyala <srija.malyala@xilinx.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/inet.h>
+#include <linux/time.h>
+#include <linux/cdev.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <net/addrconf.h>
+#include <linux/types.h>
+#include "xcommon.h"
+
+/* TODO: Need to remove this macro as all the experimental code is verified.
+ * All the non-experimental code should be deleted.
+ */
+#define EXPERIMENTAL_CODE
+int debug;
+struct class *xrnic_class;
+/* Need to enable this using sysfs.*/
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none, 1=all)");
+
+#define XRNIC_REG_MAP_NODE 0
+#define cpu_to_be24(x) ((x) << 16)
+
+struct xrnic_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u8 srq;
+ u8 qp_num;
+};
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+
+struct xrnic_dev_info *xrnic_dev;
+static dev_t xrnic_dev_number;
+
+/*
+ * To store the IP address of the controller, which is passed as a
+ * module param
+ */
+static char server_ip[16];
+/* To store the port number. This is passed as a module param */
+static unsigned short port_num;
+/* To store the mac_address. This is passed as a module param */
+static ushort mac_address[6] = {0x1, 0x0, 0x0, 0x35, 0x0a, 0x00};
+/* To store the ethernet interface name, which is passed as a module param */
+static char *ifname = "eth0";
+
+module_param(port_num, ushort, 0444);
+MODULE_PARM_DESC(port_num, "network port number");
+
+module_param_array(mac_address, ushort, NULL, 0444);
+MODULE_PARM_DESC(mac_address, "mac address");
+
+module_param_string(server_ip, server_ip, 32, 0444);
+MODULE_PARM_DESC(server_ip, "Target server ip address");
+
+module_param(ifname, charp, 0444);
+MODULE_PARM_DESC(ifname, "Target server interface name eth0..");
+
+/**
+ * xrnic_rdma_create_id() - Creates and RDMA ID
+ * @xrnic_cm_handler: communication event handler
+ * @cm_context: CM context
+ * @ps: Port space
+ * @qp_type: Queue transport type
+ * @num_child: Max QP count
+ *
+ * @return: 0 on success, other value incase of failure
+ */
+struct xrnic_rdma_cm_id *xrnic_rdma_create_id
+ (int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info), void *cm_context,
+ enum xrnic_port_space ps, enum xrnic_qp_type qp_type, int num_child)
+{
+ struct xrnic_qp_attr *qp1_attr = NULL;
+ struct xrnic_rdma_cm_id *cm_id = NULL;
+ struct xrnic_qp_info *qp_info = NULL;
+ struct xrnic_rdma_cm_id_info *cm_id_info = NULL;
+
+ if (!xrnic_dev) {
+ pr_err("Received NULL pointer\n");
+ return (struct xrnic_rdma_cm_id *)NULL;
+ }
+
+ qp1_attr = &xrnic_dev->qp1_attr;
+ if (xrnic_dev->io_qp_count < num_child ||
+ num_child < 0 || qp_type != qp1_attr->qp_type) {
+ pr_err("Invalid info received\n");
+ return NULL;
+ }
+
+ cm_id_info = kzalloc(sizeof(*cm_id_info), GFP_KERNEL);
+ if (!cm_id_info)
+ return ERR_PTR(-ENOMEM);
+
+ xrnic_dev->curr_cm_id_info = cm_id_info;
+ cm_id = (struct xrnic_rdma_cm_id *)&cm_id_info->parent_cm_id;
+ cm_id->xrnic_cm_handler = xrnic_cm_handler;
+ cm_id->cm_context = cm_context;
+ cm_id->ps = ps;
+ cm_id->qp_type = qp_type;
+ cm_id->cm_id_info = cm_id_info;
+ cm_id->child_qp_num = 0;
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+
+ qp_info = &cm_id->qp_info;
+ memset(qp_info, 0, sizeof(*qp_info));
+
+ qp_info->qp_num = qp1_attr->qp_num;
+ list_add_tail(&cm_id->list, &cm_id_list);
+
+ return cm_id;
+}
+EXPORT_SYMBOL(xrnic_rdma_create_id);
+
+/**
+ * ipv6_addr_compare() - Compares IPV6 addresses
+ * @addr1: Address 1 to compare
+ * @addr2: Address 2 to compare
+ * @size: size of the address
+ *
+ * @return: 0 on success, -1 incase of a mismatch
+ */
+static int ipv6_addr_compare(u8 *addr1, u8 *addr2, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (addr1[(size - 1) - i] != addr2[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * xrnic_rdma_bind_addr() - Binds IP-V4/V6 addresses
+ * @cm_id: CM ID to with address CM info
+ * @addr: Address to bind to
+ * @port_num: Tranport port number
+ * @ip_addr_type: IP-V4/V6
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+int xrnic_rdma_bind_addr(struct xrnic_rdma_cm_id *cm_id,
+ u8 *addr, u16 port_num, u16 ip_addr_type)
+{
+ if (!cm_id || !xrnic_dev) {
+ pr_err("Invalid CM ID or XRNIC device info\n");
+ return -EINVAL;
+ }
+
+ if (xrnic_dev->curr_cm_id_info != cm_id->cm_id_info)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (port_num < 1UL || port_num > XRNIC_MAX_PORT_SUPPORT)
+ return -XRNIC_INVALID_PORT;
+
+ if (!cm_id)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->child_qp_num)
+ return -XRNIC_INVALID_CHILD_NUM;
+
+ if (xrnic_dev->cm_id_info[port_num - 1])
+ return -XRNIC_INVALID_PORT;
+
+ if (xrnic_dev->port_status[port_num - 1] == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_CM_ID;
+
+ if (ip_addr_type == AF_INET6) {
+ if (ipv6_addr_compare((u8 *)&xrnic_dev->ipv6_addr, addr,
+ sizeof(struct in6_addr)))
+ return -XRNIC_INVALID_ADDR;
+ memcpy((void *)&cm_id->route.src_addr, (void *)addr,
+ sizeof(struct in6_addr));
+ } else if (ip_addr_type == AF_INET) {
+ if (memcmp(&xrnic_dev->ipv4_addr, addr,
+ sizeof(struct in_addr)))
+ return -XRNIC_INVALID_ADDR;
+ memcpy((void *)&cm_id->route.src_addr, (void *)addr,
+ sizeof(struct in_addr));
+ } else {
+ return -XRNIC_INVALID_ADDR_TYPE;
+ }
+ xrnic_dev->cm_id_info[port_num - 1] = cm_id->cm_id_info;
+ cm_id->port_num = port_num;
+ cm_id->route.ip_addr_type = ip_addr_type;
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_bind_addr);
+
+/**
+ * xrnic_rdma_listen() - Initiates listen on the socket
+ * @cm_id: CM ID
+ * @backlog: back log
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+int xrnic_rdma_listen(struct xrnic_rdma_cm_id *cm_id, int backlog)
+{
+ if (!cm_id || !xrnic_dev) {
+ pr_err("Rx invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (xrnic_dev->curr_cm_id_info != cm_id->cm_id_info)
+ return XRNIC_INVALID_CM_ID;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1] ==
+ XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_PORT;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_QP_ID;
+
+ xrnic_dev->port_status[cm_id->port_num - 1] = XRNIC_PORT_QP_IN_USE;
+ xrnic_dev->curr_cm_id_info = NULL;
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_listen);
+
+/**
+ * xrnic_hw_hs_reset_sq_cq() - Enables HW Handshake for a given QP
+ * @qp_info: QP which should be enabled for HW Handshake
+ * @hw_hs_info: HW Handshake info with which QP config needs to be updated
+ *
+ * @return: XRNIC_SUCCESS on success, error indicative value incase of failure
+ */
+int xrnic_hw_hs_reset_sq_cq(struct xrnic_qp_info *qp_info,
+ struct xrnic_hw_handshake_info *hw_hs_info)
+{
+ struct xrnic_qp_attr *qp_attr;
+
+ if (!qp_info) {
+ pr_err("Rx invalid qp info\n");
+ return -EINVAL;
+ }
+
+ if (!xrnic_dev) {
+ pr_err("Invalid ERNIC info\n");
+ return -EINVAL;
+ }
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ xrnic_reset_io_qp_sq_cq_ptr(qp_attr, hw_hs_info);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_hw_hs_reset_sq_cq);
+
+/**
+ * xrnic_hw_hs_reset_rq() - Updates HW handshake for RQ
+ * @qp_info: QP which should be enabled for HW Handshake
+ *
+ * @return: XRNIC_SUCCESS on success, error indicative value incase of failure
+ */
+int xrnic_hw_hs_reset_rq(struct xrnic_qp_info *qp_info)
+{
+ struct xrnic_qp_attr *qp_attr;
+
+ if (!qp_info) {
+ pr_err("Rx invalid qp info\n");
+ return -EINVAL;
+ }
+
+ if (!xrnic_dev) {
+ pr_err("Invalid ERNIC info\n");
+ return -EINVAL;
+ }
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ xrnic_reset_io_qp_rq_ptr(qp_attr);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_hw_hs_reset_rq);
+
+/**
+ * set_ipv4_ipaddress() - Configures XRNIC IP address
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int set_ipv4_ipaddress(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ u32 ipv4_addr = 0;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+ struct in_device *inet_dev;
+
+ inet_dev = (struct in_device *)dev->ip_ptr;
+
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ if (((struct in_device *)dev->ip_ptr)->ifa_list) {
+ ipv4_addr = inet_dev->ifa_list->ifa_address;
+ if (!ipv4_addr) {
+ pr_err("cmac ip addr: ifa_address not available\n");
+ return XRNIC_FAILED;
+ }
+ snprintf(server_ip, 16, "%pI4", &ipv4_addr);
+ in4_pton(server_ip, strlen(server_ip), xrnic_dev->ipv4_addr,
+ '\0', NULL);
+ DEBUG_LOG("xcmac ip_address:%s\n", server_ip);
+ } else {
+ pr_info("xcmac ip address: not available at present\n");
+ return 0;
+ }
+
+ switch (dev->mtu) {
+ case 340:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_256;
+ break;
+ case 592:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_512;
+ break;
+ case 1500:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_1024;
+ break;
+ case 2200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_2048;
+ break;
+ case 4200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ break;
+ default:
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ }
+ config_value = (xrnic_dev->ipv4_addr[3] << 0) |
+ (xrnic_dev->ipv4_addr[2] << 8) |
+ (xrnic_dev->ipv4_addr[1] << 16) |
+ (xrnic_dev->ipv4_addr[0] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->ipv4_address)));
+ DEBUG_LOG("XRNIC IPV4 address [%x]\n", config_value);
+ return 0;
+}
+
+/**
+ * set_ipv6_ipaddress() - Configures XRNIC IPV6 address
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int set_ipv6_ipaddress(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_conf;
+ u32 config_value = 0;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ u8 i, ip6_set = 0;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+
+ xrnic_ctrl_conf = &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ idev = __in6_dev_get(dev);
+ if (!idev) {
+ pr_err("ipv6 inet device not found\n");
+ return 0;
+ }
+
+ list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
+ DEBUG_LOG("IP=%pI6, MAC=%pM\n", &ifp->addr, dev->dev_addr);
+ for (i = 0; i < 16; i++) {
+ DEBUG_LOG("IP=%x\n", ifp->addr.s6_addr[i]);
+ xrnic_dev->ipv6_addr[15 - i] = ifp->addr.s6_addr[i];
+ }
+ ip6_set = 1;
+ }
+ if (ip6_set == 0) {
+ pr_info("xcmac ipv6 address: not available at present\n");
+ return 0;
+ }
+
+ switch (dev->mtu) {
+ case 340:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_256;
+ break;
+ case 592:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_512;
+ break;
+ case 1500:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_1024;
+ break;
+ case 2200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_2048;
+ break;
+ case 4200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ break;
+ default:
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ }
+ config_value = (xrnic_dev->ipv6_addr[0] << 0) |
+ (xrnic_dev->ipv6_addr[1] << 8) |
+ (xrnic_dev->ipv6_addr[2] << 16) |
+ (xrnic_dev->ipv6_addr[3] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr1)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[4] << 0) |
+ (xrnic_dev->ipv6_addr[5] << 8) |
+ (xrnic_dev->ipv6_addr[6] << 16) |
+ (xrnic_dev->ipv6_addr[7] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr2)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[8] << 0) |
+ (xrnic_dev->ipv6_addr[9] << 8) |
+ (xrnic_dev->ipv6_addr[10] << 16) |
+ (xrnic_dev->ipv6_addr[11] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr3)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[12] << 0) |
+ (xrnic_dev->ipv6_addr[13] << 8) |
+ (xrnic_dev->ipv6_addr[14] << 16) |
+ (xrnic_dev->ipv6_addr[15] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr4)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+ return 0;
+}
+
+/**
+ * cmac_inet6addr_event() - Handles IPV6 events
+ * @notifier: notifier info
+ * @event: Rx event
+ * @data: Event specific data
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int cmac_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case NETDEV_DOWN:
+ pr_info("Driver link down\r\n");
+ break;
+ case NETDEV_UP:
+ pr_info("Driver link up ipv6\r\n");
+ if (set_ipv6_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ case NETDEV_CHANGEADDR:
+ pr_info("Driver link change address ipv6\r\n");
+ if (set_ipv6_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * cmac_inetaddr_event() - Handles IPV4 events
+ * @notifier: notifier info
+ * @event: Rx event
+ * @data: Event specific data
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int cmac_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ struct in_ifaddr *ifa = data;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ if (event_netdev != dev)
+ return 0;
+ pr_info("Xrnic: event = %ld\n", event);
+ switch (event) {
+ case NETDEV_DOWN:
+ pr_info("Xrnic: link down\n");
+ break;
+ case NETDEV_UP:
+ pr_info("Xrnic: link up\n");
+ if (set_ipv4_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ case NETDEV_CHANGEADDR:
+ pr_info("Xrnic: ip address change detected\n");
+ if (set_ipv4_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ }
+ return 0;
+}
+
+struct notifier_block cmac_inetaddr_notifier = {
+ .notifier_call = cmac_inetaddr_event
+};
+
+struct notifier_block cmac_inet6addr_notifier = {
+ .notifier_call = cmac_inet6addr_event
+};
+
+static const struct file_operations xrnic_fops = {
+ /*TODO: Implement read/write/ioctl operations. */
+ .owner = THIS_MODULE, /* Owner */
+};
+
+/**
+ * xrnic_irq_handler() - XRNIC interrupt handler
+ * @irq: Irq number
+ * @data: Pointer to XRNIC device info structure
+ *
+ * @return: IRQ_HANDLED incase of success or other value in case of failure
+ */
+static irqreturn_t xrnic_irq_handler(int irq, void *data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ unsigned long flag;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ config_value = ioread32((void *)&xrnic_ctrl_config->intr_sts);
+
+ /* We are checking masked interrupt.*/
+ config_value = config_value & xrnic_dev->xrnic_mmap.intr_en;
+ if (!config_value)
+ pr_err("Rx disabled or masked interrupt\n");
+
+ if (config_value & PKT_VALID_ERR_INTR_EN) {
+ pr_info("Packet validation fail interrupt rx\n");
+ iowrite32(PKT_VALID_ERR_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & MAD_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("MAD Packet rx interrupt\n");
+ /* Clear the interrupt */
+ iowrite32(MAD_PKT_RCVD_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ /* process the MAD pkt */
+ tasklet_schedule(&xrnic_dev->mad_pkt_recv_task);
+ }
+
+ if (config_value & BYPASS_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("Bypass packet Rx interrupt\n");
+ iowrite32(BYPASS_PKT_RCVD_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & RNR_NACK_GEN_INTR_EN) {
+ DEBUG_LOG("Rx RNR Nack interrupt\n");
+ iowrite32(RNR_NACK_GEN_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & WQE_COMPLETED_INTR_EN) {
+ DEBUG_LOG("Rx WQE completion interrupt\n");
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en &
+ (~WQE_COMPLETED_INTR_EN);
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ tasklet_schedule(&xrnic_dev->wqe_completed_task);
+ }
+
+ if (config_value & ILL_OPC_SENDQ_INTR_EN) {
+ DEBUG_LOG("Rx illegal opcode interrupt\n");
+ iowrite32(ILL_OPC_SENDQ_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & QP_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("Rx data packet interrupt\n");
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en &
+ (~QP_PKT_RCVD_INTR_EN);
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ tasklet_schedule(&xrnic_dev->qp_pkt_recv_task);
+ }
+
+ if (config_value & FATAL_ERR_INTR_EN) {
+ pr_info("Rx Fatal error interrupt\n");
+
+ iowrite32(FATAL_ERR_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ /* 0 is some random value*/
+ xrnic_qp_fatal_handler(0);
+ }
+
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xrnic_ctrl_hw_configuration() - Xrnic control configuration initizations
+ * @return: 0 on success, other value incase of failure
+ */
+static int xrnic_ctrl_hw_configuration(void)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_ctrl_conf;
+ u32 config_value = 0;
+ struct net_device *dev = NULL;
+
+ xrnic_ctrl_conf = &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+
+ if (!xrnic_dev || !xrnic_dev->xrnic_mmap.xrnic_regs ||
+ !xrnic_ctrl_conf) {
+ pr_err("Invalid device pointers\n");
+ return -EINVAL;
+ }
+
+ xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ dev = __dev_get_by_name(&init_net, ifname);
+ if (!dev) {
+ pr_err("Ethernet mac address not configured\n");
+ return XRNIC_FAILED;
+ }
+ /* Set the MAC address */
+ config_value = dev->dev_addr[5] | (dev->dev_addr[4] << 8) |
+ (dev->dev_addr[3] << 16) | (dev->dev_addr[2] << 24);
+ DEBUG_LOG("Source MAC address LSB [%x]\n", config_value);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->mac_xrnic_src_addr_lsb)));
+
+ DEBUG_LOG("Source MAC address LSB [%x]\n", config_value);
+ config_value = dev->dev_addr[1] | (dev->dev_addr[0] << 8);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->mac_xrnic_src_addr_msb)));
+ DEBUG_LOG("Source MAC address MSB [%x]\n", config_value);
+
+ if (set_ipv4_ipaddress() == XRNIC_FAILED) {
+ pr_err("ETH0 AF_INET address: ifa_list not available.\n");
+ return XRNIC_FAILED;
+ }
+
+ if (set_ipv6_ipaddress() == XRNIC_FAILED) {
+ pr_err("ETH0 AF_INET6 address: ifa_list not available.\n");
+ return XRNIC_FAILED;
+ }
+
+ /* At present 128 TX headers and each size 128 bytes */
+ config_value = xrnic_mmap->tx_hdr_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_hdr_buf_ba)));
+ DEBUG_LOG("Tx header buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_TX_HDR | (XRNIC_SIZE_OF_TX_HDR << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_hdr_buf_sz)));
+ DEBUG_LOG("Tx header buf size [0x%x]\n", config_value);
+
+ /* At present 256 TX SGL and each size 16 bytes */
+ config_value = xrnic_mmap->tx_sgl_buf_ba_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_sgl_buf_ba)));
+ DEBUG_LOG("Tx SGL buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_TX_SGL | (XRNIC_SIZE_OF_TX_SGL << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_sgl_buf_sz)));
+ DEBUG_LOG("Tx SGL buf size [0x%x]\n", config_value);
+
+ /* At present 32 Bypass buffers and each size 512 bytes */
+ config_value = xrnic_mmap->bypass_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->bypass_buf_ba)));
+ DEBUG_LOG("Bypass buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_BYPASS_BUF |
+ (XRNIC_SIZE_OF_BYPASS_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->bypass_buf_sz)));
+ DEBUG_LOG("Bypass buf size [0x%x]\n", config_value);
+
+ config_value = XRNIC_BYPASS_BUF_WRPTR;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->bypass_buf_wrptr)));
+ DEBUG_LOG("Bypass buffer write pointer [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->err_pkt_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_pkt_buf_ba)));
+ DEBUG_LOG("Error packet buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_ERROR_BUF |
+ (XRNIC_SIZE_OF_ERROR_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_pkt_buf_sz)));
+ DEBUG_LOG("Error packet buf size [0x%x]\n", config_value);
+
+ config_value = XRNIC_ERROR_BUF_WRPTR;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_buf_wrptr)));
+ DEBUG_LOG("Error pakcet buf write pointer [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->out_errsts_q_ba_phys;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->out_errsts_q_ba)));
+ DEBUG_LOG("Outgoing error status queue base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_OUT_ERRST_Q_NUM_ENTRIES;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->out_errsts_q_sz)));
+ DEBUG_LOG("Outgoing error status queue size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->in_errsts_q_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->in_errsts_q_ba)));
+ DEBUG_LOG("Incoming error status queue base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_IN_ERRST_Q_NUM_ENTRIES;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->in_errsts_q_sz)));
+ DEBUG_LOG("Incoming error status queue size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->data_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->data_buf_ba)));
+ DEBUG_LOG("RDMA Outgoing data buf base addr [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_DATA_BUF | (XRNIC_SIZE_OF_DATA_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->data_buf_sz)));
+ DEBUG_LOG("RDMA Outgoing data buf size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->resp_err_pkt_buf_ba_phys;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->resp_err_pkt_buf_ba)));
+ DEBUG_LOG("Response error packet buf base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_NUM_OF_RESP_ERR_BUF |
+ (XRNIC_SIZE_OF_RESP_ERR_BUF << 16);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->resp_err_buf_sz)));
+ DEBUG_LOG("Response error packet buf size [0x%x]\n", config_value);
+
+ /* Enable the RNIC configuration*/
+ config_value = (XRNIC_CONFIG_XRNIC_EN |
+ XRNIC_CONFIG_ERR_BUF_EN |
+ XRNIC_CONFIG_NUM_QPS_ENABLED |
+ XRNIC_CONFIG_FLOW_CONTROL_EN |
+ XRNIC_CONFIG_UDP_SRC_PORT);
+
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->xrnic_conf)));
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_ctrl_hw_init() - Xrnic control configuration initizations
+ * @return: 0 on success, other value incase of failure
+ */
+static int xrnic_ctrl_hw_init(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ int ret = 0, i;
+
+ /* Invoking rnic global initialization configuration */
+ ret = xrnic_ctrl_hw_configuration();
+ if (ret) {
+ pr_err("xrnic hw config failed with ret code [%d]\n", ret);
+ return ret;
+ }
+
+ /* Invoking RDMA QP1 configuration */
+ ret = xrnic_qp1_hw_configuration();
+ if (ret) {
+ pr_err("xrnic qp1 config failed with ret code [%d]\n", ret);
+ return ret;
+ }
+
+ /* Invoking RDMA other data path QP configuration, as we are not
+ * resgistring any data path interrupt handler so no ret.
+ */
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++)
+ xrnic_qp_hw_configuration(i);
+
+ /* Enabling xrnic interrupts. */
+ config_value = MAD_PKT_RCVD_INTR_EN |
+ RNR_NACK_GEN_INTR_EN |
+ WQE_COMPLETED_INTR_EN | ILL_OPC_SENDQ_INTR_EN |
+ QP_PKT_RCVD_INTR_EN | FATAL_ERR_INTR_EN;
+
+ if (config_value & ~XRNIC_INTR_ENABLE_DEFAULT) {
+ DEBUG_LOG("Setting the default interrupt enable config\n");
+ config_value = XRNIC_INTR_ENABLE_DEFAULT;
+ }
+
+ /*Writing to interrupt enable register.*/
+ xrnic_dev->xrnic_mmap.intr_en = config_value;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->intr_en)));
+
+ DEBUG_LOG("Interrupt enable reg value [%#x]\n",
+ ioread32((void __iomem *)&xrnic_ctrl_config->intr_en));
+ return ret;
+}
+
+/**
+ * xrnic_fill_wr() - This function fills the Send queue work request info
+ * @qp_attr: qp config info to fill the WR
+ * @qp_depth: Depth of the Queue
+ */
+void xrnic_fill_wr(struct xrnic_qp_attr *qp_attr, u32 qp_depth)
+{
+ int i;
+ struct wr *sq_wr; /*sq_ba*/
+
+ for (i = 0; i < qp_depth; i++) {
+ sq_wr = (struct wr *)qp_attr->sq_ba + i;
+ sq_wr->ctx.wr_id = i;
+ sq_wr->local_offset[0] = (qp_attr->send_sgl_phys & 0xffffffff)
+ + (i * XRNIC_SEND_SGL_SIZE);
+ sq_wr->local_offset[1] = 0;
+ sq_wr->length = XRNIC_SEND_SGL_SIZE;
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ sq_wr->remote_offset[0] = 0;
+ sq_wr->remote_offset[1] = 0;
+ sq_wr->remote_tag = 0;
+ }
+}
+
+static int xernic_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device_node *np = NULL;
+ struct resource resource;
+ void __iomem *virt_addr;
+ u64 start_addr;
+ int status;
+ int len;
+/* TODO: Not using pdev. Rather using a global data structure xrnic_dev,
+ * which is shared among all the objects in ernic driver.
+ * Need to set platform private data as xrnic_dev and all the objects of
+ * ernic driver has to retrieve from platform_device pointer.
+ */
+#ifdef EXPERIMENTAL_CODE
+ int val = 0;
+#endif
+ phys_addr_t phy_addr;
+
+ pr_info("XRNIC driver Version = %s\n", XRNIC_VERSION);
+
+ register_inetaddr_notifier(&cmac_inetaddr_notifier);
+ register_inet6addr_notifier(&cmac_inet6addr_notifier);
+ init_mr(MEMORY_REGION_BASE, MEMORY_REGION_LEN);
+
+ np = of_find_node_by_name(NULL, "ernic");
+ if (!np) {
+ pr_err("xrnic can't find compatible node in device tree.\n");
+ return -ENODEV;
+ }
+
+ xrnic_dev = kzalloc(sizeof(*xrnic_dev), GFP_KERNEL);
+ if (!xrnic_dev)
+ return -ENOMEM;
+ ret = alloc_chrdev_region(&xrnic_dev_number, 0,
+ NUM_XRNIC_DEVS, DEVICE_NAME);
+ if (ret) {
+ DEBUG_LOG("XRNIC:: Failed to register char device\n");
+ goto alloc_failed;
+ } else {
+ DEBUG_LOG(KERN_INFO "XRNIC Registered with :\n");
+ DEBUG_LOG(KERN_INFO "Major : %u || ", MAJOR(xrnic_dev_number));
+ DEBUG_LOG(KERN_INFO "Minor : %u\n", MINOR(xrnic_dev_number));
+ }
+/* TODO: xrnic_class is created but not used. Need to enable debug and
+ * statistic counters though this interface.
+ */
+ xrnic_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(xrnic_class)) {
+ ret = PTR_ERR(xrnic_class);
+ goto class_failed;
+ }
+
+ /* Connect the file operations with the cdev */
+ /* TODO: cdev created but not used. Need to implement when
+ * userspace applications are implemented. Currently all the
+ * callbacks in xrnic_fops are dummy.
+ */
+ cdev_init(&xrnic_dev->cdev, &xrnic_fops);
+ xrnic_dev->cdev.owner = THIS_MODULE;
+
+ /* Connect the major/minor number to the cdev */
+ ret = cdev_add(&xrnic_dev->cdev, xrnic_dev_number, 1);
+ if (IS_ERR(ERR_PTR(ret))) {
+ DEBUG_LOG("ERROR: XRNIC cdev allocation failed\n");
+ goto cdev_failed;
+ }
+
+ device_create(xrnic_class, NULL, xrnic_dev_number, NULL,
+ "%s", "xrnic0");
+
+ /* The node offset argument 0 xrnic 0x0 0x84000000 len 128K*/
+ ret = of_address_to_resource(np, XRNIC_REG_MAP_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 0.\n");
+ goto dev_failed;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_REG_MAP_NODE);
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.xrnic_regs_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.xrnic_regs = (struct xrnic_reg_map *)virt_addr;
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Tx HDR BUF Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+#else
+ /*Mapping for Xrnic TX HEADERS 0x20100000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_TX_HDR_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 5.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_TX_HDR_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory TX header 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Tx SGL Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+#else
+ /*Mapping for Xrnic TX DMA SGL 0xB4000000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_TX_SGL_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 6.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_TX_SGL_BUF_NODE);
+ DEBUG_LOG("xrnic memory TX SGL 0x%llx of size=%x\n",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba = (void *)(uintptr_t)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.bypass_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.bypass_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.bypass_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Bypass Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+#else
+ /*Mapping for Xrnic BYPASS PL 0x20120000 to 16 kb.*/
+ /*Mapping for Xrnic BYPASS PS 0x20120000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_BYPASS_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 7.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_BYPASS_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory BYPASS:0x%llx of siz:%xb mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.bypass_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.bypass_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_ERROR_BUF * XRNIC_SIZE_OF_ERROR_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.err_pkt_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory ERR PKT Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+#else
+ /*Mapping for Xrnic ERROR-DROPP PL 0x20110000 to 16 kb.*/
+ /*Mapping for Xrnic ERROR-DROPP PS 0x20110000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_ERRPKT_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 8.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_ERRPKT_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory ERROR PKT 0x%llx of size=%x\n",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_OUT_ERRST_Q_NUM_ENTRIES;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.out_errsts_q_ba, 0, len);
+ DEBUG_LOG("xrnic memory OUT ERR STS Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+#else
+ /*Mapping for Xrnic OUT ERR_STS 0x29000000 to 4 kb.*/
+ ret = of_address_to_resource(np, XRNIC_OUTERR_STS_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 9.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_OUTERR_STS_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_IN_ERRST_Q_NUM_ENTRIES;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.in_errsts_q_ba, 0, len);
+ DEBUG_LOG("xrnic memory IN ERR STS Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+#else
+ /*Mapping for Xrnic IN ERR_STS PL 0x29001000 to 16 kb.*/
+ /*Mapping for Xrnic IN ERR_STS PS 0x29001000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_INERR_STS_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 10.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_INERR_STS_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba = (void *)virt_addr;
+#endif
+
+ /*Mapping for Xrnic RQ WR DBRL PL 0x29002000 to 4 kb.*/
+ /*Mapping for Xrnic RQ WR DBRL PS 0x29002000 to 4 kb.*/
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_DATA_BUF * XRNIC_SIZE_OF_DATA_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.data_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.data_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.data_buf_ba, 0, len);
+#else
+ /*Mapping for Xrnic RQ STATUS PER QP 0x29040000 to 4 kb.*/
+ ret = of_address_to_resource(np, XRNIC_DATA_BUF_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 14.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_DATA_BUF_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory DATA BUFF BA 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.data_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.data_buf_ba = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_RESP_ERR_BUF * XRNIC_SIZE_OF_RESP_ERR_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba, 0, len);
+#else
+ /*Mapping for Xrnic RQ STATUS PER QP 0x20130000 to 16kb.*/
+ ret = of_address_to_resource(np, XRNIC_RESP_ERR_PKT_BUF_BA, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 14.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RESP_ERR_PKT_BUF_BA);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic response error packet buffer base address [0x%llx]",
+ start_addr);
+ DEBUG_LOG(" of size=%x bytes mapped at 0x%p\n",
+ (u32)resource.end - (u32)resource.start, virt_addr);
+
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SEND_SGL_SIZE * XRNIC_SQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.send_sgl_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.send_sgl =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+
+ memset(xrnic_dev->xrnic_mmap.send_sgl, 0, len);
+ DEBUG_LOG("xrnic memory Send SGL Base Addr = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.send_sgl_phys);
+
+#else /* EXPERIMENTAL_CODE */
+ ret = of_address_to_resource(np, XRNIC_SEND_SGL_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 1.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SEND_SGL_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+
+ DEBUG_LOG("xrnic memory send sgl 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.send_sgl_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.send_sgl = (void *)virt_addr;
+#endif /* EXPERIMENTAL_CODE */
+
+ DEBUG_LOG("send SGL physical address :%llx\n",
+ xrnic_dev->xrnic_mmap.send_sgl_phys);
+ DEBUG_LOG("xrnic mmap:%p\n", &xrnic_dev->xrnic_mmap);
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SQ_DEPTH * sizeof(struct xrnic_cqe);
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.cq_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.cq_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.cq_ba, 0, len);
+ DEBUG_LOG("xrnic memory CQ BA Base Addr = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.cq_ba_phys);
+
+#else
+ ret = of_address_to_resource(np, XRNIC_CQ_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 2.\n");
+ goto mem_config_err;
+ }
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_CQ_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory send CQ 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.cq_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.cq_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_RECV_PKT_SIZE * XRNIC_RQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+
+ memset(xrnic_dev->xrnic_mmap.rq_buf_ba_ca, 0, len);
+ DEBUG_LOG("xrnic memory Receive Q Buffer = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+
+#else /* EXPERIMENTAL_CODE */
+ ret = of_address_to_resource(np, XRNIC_RQ_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 3.\n");
+ goto mem_config_err;
+ }
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RQ_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory receive Q Buf 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca = (void *)virt_addr;
+#endif /* EXPERIMENTAL_CODE */
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SEND_PKT_SIZE * XRNIC_SQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.sq_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.sq_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.sq_ba, 0, len);
+ DEBUG_LOG("xrnic memory Send Q Base Addr = %#x, %llx.\n",
+ val, xrnic_dev->xrnic_mmap.sq_ba_phys);
+#else
+ ret = of_address_to_resource(np, XRNIC_SQ_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 4.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SQ_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory SEND Q 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.sq_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.sq_ba = (struct wr *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.rq_wrptr_db_add, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_RQWR_PTR_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 11.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RQWR_PTR_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory RQ WPTR 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.sq_cmpl_db_add, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_SQ_CMPL_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 12.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SQ_CMPL_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory SQ CMPL 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG("bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.stat_rq_buf_ca, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_STAT_XRNIC_RQ_BUF_NODE,
+ &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 13.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_STAT_XRNIC_RQ_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory STAT RQ BUF 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca = (void *)virt_addr;
+#endif
+ xrnic_dev->io_qp_count = XRNIC_MAX_QP_SUPPORT;
+ /* XRNIC controller H/W configuration which includes XRNIC
+ * global configuration, QP1 initialization and interrupt enable.
+ */
+ ret = xrnic_ctrl_hw_init();
+ if (ret < 0) {
+ pr_err("xrnic hw init failed.\n");
+ goto mem_config_err;
+ }
+ /* TODO: Currently, ERNIC IP is exporting 8 interrupt lines in DTS.
+ * But, IP will assert only first interrupt line for all 8 lines.
+ * Internally, all 8 lines are logically ORed and given as
+ * Single interrupt with interrupt status register showing which
+ * line is asserted. So, we are parsing just the 0th index of irq_map
+ * from DTS and in interrupt handler routine, we are reading the
+ * interrupt status register to identify which interrupt is asserted.
+ *
+ * Need to fix the design to export only 1 interrupt line in DTS.
+ */
+ xrnic_dev->xrnic_irq = irq_of_parse_and_map(np, 0);
+ if (!xrnic_dev->xrnic_irq) {
+ pr_err("xrnic can't determine irq.\n");
+ ret = XRNIC_FAILED;
+ }
+ status = request_irq(xrnic_dev->xrnic_irq, xrnic_irq_handler, 0,
+ "xrnic_irq", xrnic_dev);
+ if (status) {
+ pr_err("XRNIC irq request handler failed\n");
+ goto err_irq;
+ }
+
+ tasklet_init(&xrnic_dev->mad_pkt_recv_task,
+ xrnic_mad_pkt_recv_intr_handler,
+ (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->qp_pkt_recv_task,
+ xrnic_qp_pkt_recv_intr_handler, (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->qp_fatal_task,
+ xrnic_qp_fatal_handler, (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->wqe_completed_task,
+ xrnic_wqe_completed_intr_handler,
+ (unsigned long)xrnic_dev);
+ INIT_LIST_HEAD(&cm_id_list);
+
+ return XRNIC_SUCCESS;
+err_irq:
+mem_config_err:
+/* free_mem() works on only valid physical address returned from alloc_mem(),
+ * and ignores if NULL or invalid address is passed.
+ * So, even if any of the above allocations fail in the middle,
+ * we can safely call free_mem() on all addresses.
+ *
+ * we are using carve-out memory for the requirements of ERNIC.
+ * so, we cannot use devm_kzalloc() as kernel cannot see these
+ * memories until ioremapped.
+ */
+ iounmap(xrnic_dev->xrnic_mmap.xrnic_regs);
+ free_mem(xrnic_dev->xrnic_mmap.send_sgl_phys);
+ free_mem(xrnic_dev->xrnic_mmap.cq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.data_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys);
+
+dev_failed:
+ /* Remove the cdev */
+ cdev_del(&xrnic_dev->cdev);
+
+ /* Remove the device node entry */
+ device_destroy(xrnic_class, xrnic_dev_number);
+
+cdev_failed:
+ /* Destroy xrnic_class */
+ class_destroy(xrnic_class);
+
+class_failed:
+ /* Release the major number */
+ unregister_chrdev_region(MAJOR(xrnic_dev_number), 1);
+
+alloc_failed:
+ kfree(xrnic_dev);
+ return ret;
+}
+
+static int xernic_remove(struct platform_device *pdev)
+{
+/* TODO: Not using pdev. Rather using a global data structure xrnic_dev,
+ * which is shared among all the objects in ernic driver.
+ * Need to get xrnic_dev from platform_device pointer.
+ */
+ iounmap(xrnic_dev->xrnic_mmap.xrnic_regs);
+ free_mem(xrnic_dev->xrnic_mmap.send_sgl_phys);
+ free_mem(xrnic_dev->xrnic_mmap.cq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.data_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys);
+
+ cdev_del(&xrnic_dev->cdev);
+ device_destroy(xrnic_class, xrnic_dev_number);
+ cdev_del(&xrnic_dev->cdev);
+ unregister_chrdev_region(MAJOR(xrnic_dev_number), 1);
+ free_irq(xrnic_dev->xrnic_irq, xrnic_dev);
+ kfree(xrnic_dev);
+ class_destroy(xrnic_class);
+ unregister_inetaddr_notifier(&cmac_inetaddr_notifier);
+ unregister_inet6addr_notifier(&cmac_inet6addr_notifier);
+
+ return 0;
+}
+
+static const struct of_device_id xernic_of_match[] = {
+ { .compatible = "xlnx,ernic-1.0", },
+ { /* end of table*/ }
+};
+MODULE_DEVICE_TABLE(of, xernic_of_match);
+
+static struct platform_driver xernic_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xernic_of_match,
+ },
+ .probe = xernic_probe,
+ .remove = xernic_remove,
+};
+
+module_platform_driver(xernic_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xilinx RNIC driver");
+MODULE_AUTHOR("Sandeep Dhanvada");
diff --git a/drivers/staging/xlnx_ernic/xmain.h b/drivers/staging/xlnx_ernic/xmain.h
new file mode 100644
index 000000000000..2f45f94d2f85
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmain.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XLNX_MAIN_H_
+#define _XLNX_MAIN_H_
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#define XRNIC_VERSION "1.2"
+#define NUM_XRNIC_DEVS 1
+#define DEVICE_NAME "xrnic"
+#define DRIVER_NAME "xrnic"
+
+int xrnic_open(struct inode *inode, struct file *file);
+int xrnic_release(struct inode *inode, struct file *file);
+long xrnic_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ssize_t xrnic_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos);
+ssize_t xrnic_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos);
+void xrnic_fill_wr(struct xrnic_qp_attr *qp_attr, u32 qp_depth);
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/drivers/staging/xlnx_ernic/xmr.c b/drivers/staging/xlnx_ernic/xmr.c
new file mode 100644
index 000000000000..4959595d48d0
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmr.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory registrations helpers for RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+#include "xhw_config.h"
+
+struct list_head mr_free;
+struct list_head mr_alloc;
+
+atomic_t pd_index = ATOMIC_INIT(0);
+int free_mem_ceil;
+int free_mem_remain;
+void __iomem *mtt_va;
+
+DECLARE_BITMAP(ernic_memtable, XRNIC_HW_MAX_QP_SUPPORT);
+/**
+ * alloc_pool_remove() - remove an entry from alloc pool
+ * @chunk: memory region to be removed from alloc pool.
+ * @return: 0 on success.
+ *
+ * TODO: Need to modify the return value as void and remove return statement.
+ */
+int alloc_pool_remove(struct mr *chunk)
+{
+ struct mr *next, *tmp;
+
+ list_for_each_entry_safe(next, tmp, &mr_alloc, list) {
+ if (next->paddr == chunk->paddr) {
+ __list_del_entry(&next->list);
+ free_mem_remain += chunk->len;
+ }
+ }
+ return 0;
+}
+
+/**
+ * free_pool_insert() - inserts specified memory region in the free pool
+ * @chunk: memory region to be inserted in free pool.
+ * @return: 0 on success. else, returns -ENOMEM.
+ *
+ * Adds the specified memory to the free pool and if possible,
+ * merges it with adjacent regions in free pool.
+ */
+int free_pool_insert(struct mr *chunk)
+{
+ struct mr *next, *dup, *tmp;
+ struct mr *prev = NULL;
+
+ dup = kzalloc(sizeof(*dup), GFP_ATOMIC);
+ memcpy(dup, chunk, sizeof(*dup));
+
+ /* If list is empty, then, add the new region to the free pool */
+ if (list_empty(&mr_free)) {
+ list_add_tail(&dup->list, &mr_free);
+ goto done;
+ }
+
+ /* If the new region size exceeds the free memory limit,
+ * return error.
+ */
+ if (free_mem_ceil < (free_mem_remain + dup->len))
+ return -ENOMEM;
+
+ /* For a non-empty list, add the region at a suitable place
+ * in the free pool.
+ */
+ list_for_each_entry_safe(next, tmp, &mr_free, list) {
+ if (dup->paddr < next->paddr) {
+ prev = list_prev_entry(next, list);
+ list_add(&dup->list, &prev->list);
+ goto merge_free_pool;
+ }
+ }
+ /*
+ * If no suitable position to insert within free pool, then,
+ * append at the tail.
+ */
+ list_add_tail(&dup->list, &mr_free);
+
+ /* If possible, merge the region with previous and next regions. */
+merge_free_pool:
+ if (next && (dup->paddr + dup->len == next->paddr)) {
+ dup->len += next->len;
+ __list_del_entry(&next->list);
+ }
+
+ if (prev && (prev->paddr + prev->len == dup->paddr)) {
+ prev->len += dup->len;
+ __list_del_entry(&dup->list);
+ }
+ /* Except Phys and Virt address, clear all the contents of the region,
+ * If this region is in alloc pool, remove it from alloc pool.
+ */
+done:
+ dup->lkey = 0;
+ dup->rkey = 0;
+ dup->vaddr = 0;
+ dup->access = MR_ACCESS_RESVD;
+ alloc_pool_remove(chunk);
+ return 0;
+}
+EXPORT_SYMBOL(free_pool_insert);
+
+/**
+ * alloc_pd() - Allocates a Protection Domain
+ * @return: returns pointer to ernic_pd struct.
+ *
+ */
+struct ernic_pd *alloc_pd(void)
+{
+ struct ernic_pd *new_pd;
+ /* TODO: Need to check for return value and return ENOMEM */
+ new_pd = kzalloc(sizeof(*new_pd), GFP_ATOMIC);
+ atomic_inc(&pd_index);
+ atomic_set(&new_pd->id, atomic_read(&pd_index));
+ return new_pd;
+}
+EXPORT_SYMBOL(alloc_pd);
+
+/**
+ * dealloc_pd() - Allocates a Protection Domain
+ * @pd: protection domain to be deallocated.
+ *
+ */
+void dealloc_pd(struct ernic_pd *pd)
+{
+ atomic_dec(&pd_index);
+ kfree(pd);
+}
+EXPORT_SYMBOL(dealloc_pd);
+
+/**
+ * dereg_mr() - deregisters the memory region from the Channel adapter.
+ * @mr: memory region to be de-registered.
+ *
+ * dereg_mr() de-registers a memory region with CA and clears the memory region
+ * registered with CA.
+ */
+void dereg_mr(struct mr *mr)
+{
+ int mtt_idx = (mr->rkey & 0xFF);
+
+ //memset(mtt_va + mtt_offset, 0, sizeof(struct ernic_mtt));
+ clear_bit(mtt_idx, ernic_memtable);
+}
+EXPORT_SYMBOL(dereg_mr);
+
+/**
+ * alloc_mem() - Allocates a Memory Region
+ * @pd: Protection domain mapped to the memory region
+ * @len: Length of the memory region required
+ * @return: on success, returns the physical address.
+ * else, returns -ENOMEM.
+ */
+phys_addr_t alloc_mem(struct ernic_pd *pd, int len)
+{
+ struct mr *next, *new_alloc, *new_free, *tmp;
+ int _len;
+
+ _len = round_up(len, 256);
+ new_alloc = kzalloc(sizeof(*new_alloc), GFP_KERNEL);
+ new_free = kzalloc(sizeof(*new_free), GFP_KERNEL);
+
+ /* requested more memory than the free pool capacity? */
+ if (free_mem_remain < _len)
+ goto err;
+
+ list_for_each_entry_safe(next, tmp, &mr_free, list) {
+ if (next->len == _len) {
+ new_alloc->paddr = next->paddr;
+ __list_del_entry(&next->list);
+ goto reg_mr;
+ }
+ if (next->len > _len) {
+ __list_del_entry(&next->list);
+ new_alloc->paddr = next->paddr;
+ new_free->paddr = next->paddr + _len;
+ new_free->len = next->len - _len;
+ free_pool_insert(new_free);
+ goto reg_mr;
+ }
+ }
+
+err:
+ /* No free memory of requested size */
+ kfree(new_alloc);
+ kfree(new_free);
+
+ return -ENOMEM;
+reg_mr:
+ free_mem_remain = free_mem_remain - _len;
+ new_alloc->pd = pd;
+ new_alloc->len = _len;
+ new_alloc->vaddr = (u64)(uintptr_t)ioremap(new_alloc->paddr, _len);
+ list_add_tail(&new_alloc->list, &mr_alloc);
+ return new_alloc->paddr;
+}
+EXPORT_SYMBOL(alloc_mem);
+
+u64 get_virt_addr(phys_addr_t phys_addr)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == phys_addr)
+ return next->vaddr;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(get_virt_addr);
+
+/**
+ * free_mem() - inserts a memory region in free pool and
+ * removes from alloc pool
+ * @paddr: physical address to be freed.
+ *
+ */
+void free_mem(phys_addr_t paddr)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == paddr)
+ goto found;
+ }
+ return;
+found:
+ iounmap((void __iomem *)(unsigned long)next->vaddr);
+ free_pool_insert(next);
+}
+EXPORT_SYMBOL(free_mem);
+
+/**
+ * register_mem_to_ca() - Registers a memory region with the Channel Adapter
+ * @mr: memory region to register.
+ * @return: a pointer to struct mr
+ *
+ * register_mem_to_ca() validates the memory region provided and registers
+ * the memory region with the CA and updates the mkey in the registered region.
+ *
+ */
+static struct mr *register_mem_to_ca(struct mr *mr)
+{
+ int bit, mtt_idx, offset;
+ struct ernic_mtt mtt;
+
+ bit = find_first_zero_bit(ernic_memtable, XRNIC_HW_MAX_QP_SUPPORT);
+ set_bit(bit, ernic_memtable);
+ mtt_idx = bit;
+ mtt.pa = mr->paddr;
+ mtt.iova = mr->vaddr;
+ mtt.pd = atomic_read(&mr->pd->id);
+ mr->rkey = (mtt_idx << 8) | bit;
+ mtt.rkey = mr->rkey;
+ mtt.access = mr->access;
+ mtt.len = mr->len;
+ offset = (int)(mtt_va + (mtt_idx * 0x100));
+
+ iowrite32(mtt.pd, (void __iomem *)(offset + ERNIC_PD_OFFSET));
+ iowrite32((mtt.iova & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_IOVA_OFFSET));
+ iowrite32(((mtt.iova >> 32) & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_IOVA_OFFSET + 4));
+ iowrite32((mtt.pa & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_PA_OFFSET));
+ iowrite32(((mtt.pa >> 32) & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_PA_OFFSET + 4));
+ iowrite32((mtt.rkey & 0xFFFF),
+ (void __iomem *)(offset + ERNIC_RKEY_OFFSET));
+ iowrite32(mtt.len, (void __iomem *)(offset + ERNIC_LEN_OFFSET));
+ iowrite32(mtt.access, (void __iomem *)(offset + ERNIC_ACCESS_OFFSET));
+ return mr;
+}
+
+/**
+ * reg_phys_mr() - Registers a physical address with the Channel Adapter
+ * @pd: Protection domian associtated with the physical address.
+ * @phys_addr: The physical address to be registered.
+ * @len: length of the buffer to be registered.
+ * @access: access permissions for the registered buffer.
+ * @va_reg_base: Virtual address. Currently, ERNIC doesn't support either
+ * Base Memory Extensions or Zero Based VA. So, this arg is
+ * ignired for now. This is just to satisfy the Verbs Signature.
+ * @return: on success, returns a pointer to struct mr.
+ * else, returns a pointer to error.
+ *
+ * register_mem_to_ca() validates the memory region provided and registers
+ * the memory region with the CA and updates the mkey in the registered region.
+ */
+struct mr *reg_phys_mr(struct ernic_pd *pd, phys_addr_t phys_addr,
+ int len, int access, void *va_reg_base)
+{
+ struct mr *phys_mr;
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == phys_addr)
+ goto found;
+ }
+ /* Physical Address of the requested region is invalid */
+ return ERR_PTR(-EINVAL);
+found:
+ phys_mr = kzalloc(sizeof(*phys_mr), GFP_KERNEL);
+ phys_mr->paddr = phys_addr;
+ phys_mr->vaddr = next->vaddr;
+ phys_mr->len = len;
+ phys_mr->access = access;
+ phys_mr->pd = pd;
+
+ return register_mem_to_ca(phys_mr);
+}
+EXPORT_SYMBOL(reg_phys_mr);
+
+struct mr *query_mr(struct ernic_pd *pd)
+{
+ struct mr *next, *tmp;
+
+ list_for_each_entry_safe(next, tmp, &mr_alloc, list) {
+ if (atomic_read(&next->pd->id) == atomic_read(&pd->id)) {
+ pr_info("Found MR\n");
+ goto ret;
+ }
+ }
+ return ERR_PTR(-EINVAL);
+ret:
+ return next;
+}
+EXPORT_SYMBOL(query_mr);
+
+/**
+ * dump_list() - prints all the regions for the specified list.
+ * @head: HEAD pointer for the list to be printed.
+ *
+ * dump_list() iterates over the specified list HEAD and
+ * prints all the physical address and length at each node in the list.
+ */
+static void dump_list(struct list_head *head)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, head, list) {
+ pr_info("MR [%d:%s] Phys_addr = %#x, vaddr = %llx, len = %d\n",
+ __LINE__, __func__,
+ next->paddr, next->vaddr, next->len);
+ }
+}
+
+/**
+ * dump_free_list() - prints all the regions in the free pool.
+ *
+ * dump_free_list() is a wrapper function for dump_list()
+ * to print free pool data
+ *
+ */
+void dump_free_list(void)
+{
+ dump_list(&mr_free);
+}
+EXPORT_SYMBOL(dump_free_list);
+
+/**
+ * dump_alloc_list() - prints all the regions in the alloc pool.
+ *
+ * dump_alloc_list() is a wrapper function for dump_list()
+ * to print alloc pool data
+ */
+void dump_alloc_list(void)
+{
+ dump_list(&mr_alloc);
+}
+EXPORT_SYMBOL(dump_alloc_list);
+
+/**
+ * init_mr() - Initialization function for memory region.
+ * @addr: Physical Address of the starting memory region.
+ * @length: Length of the region to initialize.
+ * @return: 0 on success.
+ * else, -EINVAL.
+ *
+ * init_mr() initializes a region of free memory
+ *
+ * Note: This should be called only once by the RNIC driver.
+ */
+int init_mr(phys_addr_t addr, int length)
+{
+ struct mr *reg = kmalloc(sizeof(struct mr *), GFP_KERNEL);
+
+ /* Multiple init_mr() calls? */
+ if (free_mem_ceil > 0)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&mr_free);
+ INIT_LIST_HEAD(&mr_alloc);
+ reg->paddr = addr;
+ reg->len = length;
+ free_pool_insert(reg);
+ free_mem_remain = reg->len;
+ free_mem_ceil = free_mem_remain;
+/* TODO: 0x2000 is the current Protection domain length for 255
+ * Protection Domains.
+ * Need to retrieve number of Protections doamins and length of each
+ * protection domains from DTS and calculate the overall remap size for
+ * all protection domains, instead of using a hard-coded value.
+ * currently, length of each protection domain is not exported in DTS.
+ */
+ mtt_va = ioremap(MTT_BASE, 0x2000);
+ return 0;
+}
diff --git a/drivers/staging/xlnx_ernic/xmr.h b/drivers/staging/xlnx_ernic/xmr.h
new file mode 100644
index 000000000000..7c822b22eff9
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmr.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+struct mr {
+ phys_addr_t paddr;
+ u64 vaddr;
+ int len;
+ unsigned int access;
+ struct ernic_pd *pd;
+ int lkey;
+ int rkey;
+ struct list_head list;
+};
+
+struct ernic_pd {
+ atomic_t id;
+};
+
+struct ernic_mtt {
+ unsigned long pd;
+#define ERNIC_PD_OFFSET 0
+ u64 iova;
+#define ERNIC_IOVA_OFFSET 4
+ u64 pa;
+#define ERNIC_PA_OFFSET 12
+ int rkey;
+#define ERNIC_RKEY_OFFSET 20
+ int len;
+#define ERNIC_LEN_OFFSET 24
+ unsigned int access;
+#define ERNIC_ACCESS_OFFSET 28
+};
+
+phys_addr_t alloc_mem(struct ernic_pd *pd, int len);
+void free_mem(phys_addr_t paddr);
+struct mr *query_mr(struct ernic_pd *pd);
+struct ernic_pd *alloc_pd(void);
+void dealloc_pd(struct ernic_pd *pd);
+void dump_free_list(void);
+void dump_alloc_list(void);
+int init_mr(phys_addr_t addr, int len);
+int free_pool_insert(struct mr *chunk);
+void dereg_mr(struct mr *mr);
+u64 get_virt_addr(phys_addr_t phys_addr);
+struct mr *reg_phys_mr(struct ernic_pd *pd, phys_addr_t phys_addr,
+ int len, int access, void *va_reg_base);
+int alloc_pool_remove(struct mr *chunk);
+
+extern void __iomem *mtt_va;
+/* TODO: Get the Base address and Length from DTS, instead of Macro.
+ * Currently, the design is only for Microblaze with a fixed memory
+ * in the design.
+ *
+ * MEMORY_REGION_BASE is a carve-out memory which will be ioremapped
+ * when required for ERNIC Configuration and Queue Pairs.
+ */
+#define MEMORY_REGION_BASE 0xC4000000
+#define MEMORY_REGION_LEN 0x3BFFFFFF
+/* TODO: Get MTT_BASE from DTS instead of Macro. */
+#define MTT_BASE 0x84000000
+#define MR_ACCESS_READ 0
+#define MR_ACCESS_WRITE 1
+#define MR_ACCESS_RDWR 2
+#define MR_ACCESS_RESVD 3
diff --git a/drivers/staging/xlnx_ernic/xperftest.h b/drivers/staging/xlnx_ernic/xperftest.h
new file mode 100644
index 000000000000..609469450a9f
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xperftest.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ */
+
+#ifndef _PERF_TEST_H
+#define _PERF_TEST_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+struct ernic_bwtest_struct {
+ u64 reserved1;
+ int qp_number;
+ int reserved2;
+ unsigned long long rkey;
+ unsigned long long vaddr;
+ char reserved3[24];
+};
+
+int perftest_parse_addr(struct sockaddr_storage *s_addr, char *buf);
+void rq_handler(u32 rq_count, void *rq_context);
+void sq_handler(u32 rq_count, void *sq_context);
+void perftest_fill_wr(void __iomem *sq_ba);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _PERF_TEST_H*/
diff --git a/drivers/staging/xlnx_ernic/xqp.c b/drivers/staging/xlnx_ernic/xqp.c
new file mode 100644
index 000000000000..dae21fda5da6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xqp.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+
+#define DISPLAY_REGS_ON_DISCONNECT
+#define EXPERIMENTAL_CODE
+
+struct xrnic_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u8 srq;
+ u8 qp_num;
+};
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+
+#define cpu_to_be24(x) ((x) << 16)
+
+#define CMA_VERSION 0
+#define QP_STAT_SQ_EMPTY_BIT_POS (9)
+#define QP_STAT_OUTSTANDG_EMPTY_Q_BIT_POS (10)
+
+int in_err_wr_ptr;
+struct list_head cm_id_list;
+
+/**
+ * xrnic_set_qp_state() - Sets the qp state to the desired state
+ * @qp_num: XRNIC QP number
+ * @state: State to set
+ *
+ * @return: XRNIC_SUCCESS in case of success or a error representative value
+ */
+int xrnic_set_qp_state(int qp_num, int state)
+{
+ if (qp_num < 0)
+ return -XRNIC_INVALID_QP_ID;
+
+ if (state != XRNIC_QP_IN_USE && state != XRNIC_QP_FREE)
+ return -XRNIC_INVALID_QP_STATUS;
+
+ xrnic_dev->qp_attr[qp_num].qp_status = state;
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_find_free_qp() - Finds the free qp to use
+ * @return: free QP Num or error value incase of no free QP
+ */
+int xrnic_find_free_qp(void)
+{
+ int i;
+
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ /*Checking for QP with ZERO REMOTE and LOCAL cm id*/
+ if (xrnic_dev->qp_attr[i].qp_status == XRNIC_QP_FREE)
+ return i;
+ }
+ return XRNIC_FAILED;
+}
+
+/**
+ * xrnic_rdma_create_qp() - Finds the free qp to use
+ * @cm_id: CM ID to associate with QP
+ * @pd: Protection domain to assosciate the QP with
+ * @init_attr: QP attributes or config values
+ * @return: XRNIC_SUCCESS if successful otherwise error representing code
+ */
+int xrnic_rdma_create_qp(struct xrnic_rdma_cm_id *cm_id, struct ernic_pd *pd,
+ struct xrnic_qp_init_attr *init_attr)
+{
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_qp_info *qp_info;
+ int ret;
+
+ if (init_attr->sq_depth > XRNIC_MAX_SQ_DEPTH ||
+ init_attr->rq_depth > XRNIC_MAX_RQ_DEPTH ||
+ init_attr->send_sge_size > XRNIC_MAX_SEND_SGL_SIZE ||
+ init_attr->send_pkt_size > XRNIC_MAX_SEND_PKT_SIZE) {
+ return -XRNIC_INVALID_QP_INIT_ATTR;
+ }
+
+ qp_info = &cm_id->qp_info;
+
+ qp_info->qp_num = xrnic_find_free_qp();
+ qp_info->qp_num += 2;
+
+ ret = xrnic_set_qp_state((qp_info->qp_num - 2), XRNIC_QP_IN_USE);
+ if (ret < 0)
+ return ret;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+
+ if (qp_info->qp_num < 2 || qp_attr->qp_type != init_attr->qp_type)
+ return -XRNIC_INVALID_QP_ID;
+
+ cm_id->qp_type = init_attr->qp_type;
+ cm_id->local_cm_id = (qp_info->qp_num);
+
+ qp_info->xrnic_rq_event_handler = init_attr->xrnic_rq_event_handler;
+ qp_info->rq_context = init_attr->rq_context;
+ qp_info->xrnic_sq_event_handler = init_attr->xrnic_sq_event_handler;
+ qp_info->sq_context = init_attr->sq_context;
+
+ qp_info->rq_buf_ba_ca = init_attr->rq_buf_ba_ca;
+ qp_info->rq_buf_ba_ca_phys = init_attr->rq_buf_ba_ca_phys;
+ qp_info->sq_ba = init_attr->sq_ba;
+ qp_info->sq_ba_phys = init_attr->sq_ba_phys;
+ qp_info->cq_ba = init_attr->cq_ba;
+ qp_info->cq_ba_phys = init_attr->cq_ba_phys;
+
+ qp_info->sq_depth = init_attr->sq_depth;
+ qp_info->rq_depth = init_attr->rq_depth;
+ qp_info->send_sge_size = init_attr->send_sge_size;
+ qp_info->send_pkt_size = init_attr->send_pkt_size;
+ qp_info->recv_pkt_size = init_attr->recv_pkt_size;
+
+ qp_attr->rq_buf_ba_ca = qp_info->rq_buf_ba_ca;
+ qp_attr->rq_buf_ba_ca_phys = qp_info->rq_buf_ba_ca_phys;
+ qp_attr->sq_ba = qp_info->sq_ba;
+ qp_attr->sq_ba_phys = qp_info->sq_ba_phys;
+ qp_attr->cq_ba = qp_info->cq_ba;
+ qp_attr->cq_ba_phys = qp_info->cq_ba_phys;
+
+ qp_attr->sq_depth = qp_info->sq_depth;
+ qp_attr->rq_depth = qp_info->rq_depth;
+ qp_attr->send_sge_size = qp_info->send_sge_size;
+ qp_attr->send_pkt_size = qp_info->send_pkt_size;
+ qp_attr->recv_pkt_size = qp_info->recv_pkt_size;
+#ifdef ERNIC_MEM_REGISTER
+ if (pd)
+ qp_attr->pd = atomic_read(&pd->id);
+#endif
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_create_qp);
+
+/**
+ * xrnic_post_recv() - This function receives an incoming packet
+ * @qp_info: QP info on which packet should be received
+ * @rq_count: Number of packets to receive
+ * @return: SUCCESS if received required number of packets else error
+ * representative value
+ */
+int xrnic_post_recv(struct xrnic_qp_info *qp_info, u32 rq_count)
+{
+ struct xrnic_qp_attr *qp_attr;
+ int ret = -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ ret = xrnic_qp_recv_pkt(qp_attr, rq_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_post_recv);
+
+/**
+ * xrnic_post_send() - This function post a SEND WR
+ * @qp_info: QP info to post the request
+ * @sq_count: SEND packet count
+ * @return: SUCCESS if successfully posts a SEND,
+ * otherwise error representative value
+ */
+int xrnic_post_send(struct xrnic_qp_info *qp_info, u32 sq_count)
+{
+ struct xrnic_qp_attr *qp_attr;
+ int ret = -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ ret = xrnic_qp_send_pkt(qp_attr, sq_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_post_send);
+
+/**
+ * xrnic_destroy_qp() - Function destroys QP and reset the QP info
+ * @qp_info: QP info or config
+ * @return: XRNIC_SUCCESS if successfully destroys the QP,
+ * otherwise error representative value
+ */
+int xrnic_destroy_qp(struct xrnic_qp_info *qp_info)
+{
+ u32 qp_num;
+ struct xrnic_qp_attr *qp_attr;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num >= 2) {
+ qp_num = qp_info->qp_num;
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ xrnic_set_qp_state((qp_num - 2), XRNIC_QP_FREE);
+
+ memset((void *)qp_info, 0, sizeof(struct xrnic_qp_info));
+
+ qp_attr->rq_buf_ba_ca = qp_info->rq_buf_ba_ca;
+ qp_attr->rq_buf_ba_ca_phys = qp_info->rq_buf_ba_ca_phys;
+ qp_attr->sq_ba = qp_info->sq_ba;
+ qp_attr->sq_ba_phys = qp_info->sq_ba_phys;
+ qp_attr->cq_ba = qp_info->cq_ba;
+ qp_attr->cq_ba_phys = qp_info->cq_ba_phys;
+
+ qp_attr->sq_depth = qp_info->sq_depth;
+ qp_attr->rq_depth = qp_info->rq_depth;
+ qp_attr->send_sge_size = qp_info->send_sge_size;
+ qp_attr->send_pkt_size = qp_info->send_pkt_size;
+ qp_attr->recv_pkt_size = qp_info->recv_pkt_size;
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("Received invalid QP ID\n");
+ return -XRNIC_INVALID_QP_ID;
+ }
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_destroy_qp);
+
+/**
+ * xrnic_reset_io_qp() - This function reset the QP config
+ * @qp_attr: QP memory map or config
+ */
+void xrnic_reset_io_qp(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct xrnic_reg_map *reg_map;
+ unsigned long timeout;
+ u32 sq_pi_db_val, cq_head_val;
+ u32 rq_ci_db_val, stat_rq_pi_db_val;
+ u32 config_value;
+ int qp_num = qp_attr->qp_num - 2;
+ struct rdma_qp_attr *rdma_qp_attr;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ /* 1. WAIT FOR SQ/OSQ EMPTY TO BE SET */
+ while (!((ioread32(&rdma_qp_attr->qp_status) >> 9) & 0x3))
+ ;
+
+ /* 2 WAIT FOR register values SQ_PI_DB == CQ_HEAD */
+ sq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ cq_head_val = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+
+ timeout = jiffies;
+ while (!(sq_pi_db_val == cq_head_val)) {
+ sq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+ cq_head_val = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+ if (time_after(jiffies, (timeout + 1 * HZ)))
+ break;
+ }
+
+ /* 3. WAIT FOR register values STAT_RQ_PI_DB == RQ_CI_DB */
+ rq_ci_db_val = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ stat_rq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ timeout = jiffies;
+ while (!(rq_ci_db_val == stat_rq_pi_db_val)) {
+ rq_ci_db_val = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+ stat_rq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->stat_rq_pi_db)));
+ if (time_after(jiffies, (timeout + 1 * HZ)))
+ break;
+ }
+ /* 4. SET QP_CONF register HW handshake disable to 1 */
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value | XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_RQ_INTR_EN | XRNIC_QP_CONFIG_CQE_INTR_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ DEBUG_LOG("QP config value is 0x%x\n", config_value);
+
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ config_value = (xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ /* 5. SET QP_CONF register QP ENABLE TO 0 and QP_ADV_CONF register
+ * SW OVERRIDE TO 1
+ */
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value & ~XRNIC_QP_CONFIG_QP_ENABLE;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ /* Enable SW override enable */
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ /* 6. Initialized QP under reset: */
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_head)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_psn)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->last_rq_req)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_msn)));
+
+ /* 7.Initialized Ethernet side registers */
+ /* NO need as we are doing during connect initiatlization */
+
+ /* 8. Set QP_CONF register QP ENABLE TO 1 */
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value | XRNIC_QP_CONFIG_QP_ENABLE;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = ioread32((void *)&rdma_qp_attr->qp_conf);
+ config_value = config_value & ~XRNIC_QP_CONFIG_UNDER_RECOVERY;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* 9.Set QP_ADV_CONF register SW_OVERRIDE SET TO 0 */
+ /* Disable SW override enable */
+ config_value = 0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->sqhd = 0;
+}
+
+/**
+ * xrnic_reset_io_qp_sq_cq_ptr() - This function resets SQ, CQ pointers of QP
+ * @qp_attr: QP config
+ * @hw_hs_info: QP HW handshake config
+ */
+void xrnic_reset_io_qp_sq_cq_ptr(struct xrnic_qp_attr *qp_attr,
+ struct xrnic_hw_handshake_info *hw_hs_info)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_reg_map *reg_map;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ /* Enable SW override enable */
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ if (!hw_hs_info)
+ goto enable_hw_hs;
+
+ config_value = 0;
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_head)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ config_value = hw_hs_info->rq_wrptr_db_add;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = hw_hs_info->sq_cmpl_db_add;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ config_value = ioread32((void *)(&rdma_qp_attr->stat_rq_pi_db));
+
+ config_value = hw_hs_info->cnct_io_conf_l_16b |
+ ((config_value & 0xFFFF) << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->cnct_io_conf)));
+enable_hw_hs:
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(qp_attr->recv_pkt_size);
+
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* Disable SW override enable */
+
+ config_value = 0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ config_value = ioread32(((void *)
+ (&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->sqhd = 0;
+}
+
+/**
+ * xrnic_reset_io_qp_rq_ptr() - This function resets RQ pointers of QP
+ * @qp_attr: QP config
+ */
+void xrnic_reset_io_qp_rq_ptr(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_reg_map *reg_map;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(qp_attr->recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ /* Disable SW override enable */
+ config_value = 0x0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->stat_rq_buf_ca)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+}
+
+/**
+ * xrnic_qp_send_pkt() - This function sends packets
+ * @qp_attr: QP config
+ * @sq_pkt_count: Number of packets to send
+ * @return: XRNIC_SUCCESS if successful
+ * otherwise error representative value
+ */
+int xrnic_qp_send_pkt(struct xrnic_qp_attr *qp_attr, u32 sq_pkt_count)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0, sq_pkt_count_tmp;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+
+ config_value = ioread32((char *)xrnic_mmap->sq_cmpl_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (config_value == 0)
+ sq_pkt_count_tmp = qp_attr->sq_depth;
+ else if (qp_attr->sq_cmpl_db_local >= config_value)
+ sq_pkt_count_tmp = (config_value + qp_attr->sq_depth) -
+ qp_attr->sq_cmpl_db_local;
+ else
+ sq_pkt_count_tmp = config_value - qp_attr->sq_cmpl_db_local;
+ if (sq_pkt_count_tmp < sq_pkt_count)
+ return -XRNIC_INVALID_PKT_CNT;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+
+ qp_attr->sq_cmpl_db_local = qp_attr->sq_cmpl_db_local + sq_pkt_count;
+ if (qp_attr->sq_cmpl_db_local > qp_attr->sq_depth)
+ qp_attr->sq_cmpl_db_local = qp_attr->sq_cmpl_db_local
+ - qp_attr->sq_depth;
+ config_value = qp_attr->sq_cmpl_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_qp_recv_pkt() - This function receives packets
+ * @qp_attr: QP config
+ * @rq_pkt_count: receive packet count
+ * @return: XRNIC_SUCCESS if successful
+ * otherwise error representative value
+ */
+int xrnic_qp_recv_pkt(struct xrnic_qp_attr *qp_attr, u32 rq_pkt_count)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0, rq_pkt_count_tmp;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (config_value == 0)
+ rq_pkt_count_tmp = qp_attr->rq_depth;
+ else if (qp_attr->rq_wrptr_db_local >= config_value)
+ rq_pkt_count_tmp = (config_value + qp_attr->rq_depth) -
+ qp_attr->rq_wrptr_db_local;
+ else
+ rq_pkt_count_tmp = config_value - qp_attr->rq_wrptr_db_local;
+
+ if (rq_pkt_count_tmp < rq_pkt_count)
+ return -XRNIC_INVALID_PKT_CNT;
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+
+ qp_attr->rq_wrptr_db_local = qp_attr->rq_wrptr_db_local + rq_pkt_count;
+ if (qp_attr->rq_wrptr_db_local > qp_attr->rq_depth)
+ qp_attr->rq_wrptr_db_local = qp_attr->rq_wrptr_db_local
+ - qp_attr->rq_depth;
+
+ config_value = qp_attr->rq_wrptr_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_qp1_send_mad_pkt() - This function initiates sending a management
+ * datagram packet.
+ * @send_sgl_temp: Scatter gather list
+ * @qp1_attr: QP1 info
+ * @send_pkt_size: Send packe size
+ */
+void xrnic_qp1_send_mad_pkt(void *send_sgl_temp,
+ struct xrnic_qp_attr *qp1_attr, u32 send_pkt_size)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp1_attr *rdma_qp1_attr;
+ u32 config_value = 0;
+ struct wr *sq_wr; /*sq_ba*/
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp1_attr->xrnic_mmap;
+ rdma_qp1_attr = &xrnic_mmap->xrnic_regs->rdma_qp1_attr;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+ sq_wr = (struct wr *)qp1_attr->sq_ba + qp1_attr->sq_cmpl_db_local;
+ /* All will be 4096 that is madatory.*/
+ sq_wr->length = send_pkt_size;
+ memcpy((void *)((char *)qp1_attr->send_sgl +
+ (qp1_attr->sq_cmpl_db_local * XRNIC_SEND_SGL_SIZE)),
+ (const void *)send_sgl_temp,
+ XRNIC_SEND_SGL_SIZE);
+ qp1_attr->sq_cmpl_db_local = qp1_attr->sq_cmpl_db_local + 1;
+
+ config_value = qp1_attr->sq_cmpl_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->sq_pi_db)));
+
+ if (qp1_attr->sq_cmpl_db_local == XRNIC_SQ_DEPTH)
+ qp1_attr->sq_cmpl_db_local = 0;
+}
+
+/**
+ * xrnic_qp_pkt_recv() - This function process received data packets
+ * @qp_attr: QP info on which data packet has been received
+ */
+static void xrnic_qp_pkt_recv(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ qp_attr->xrnic_mmap;
+ u32 config_value = 0;
+ unsigned long flag;
+ int rq_pkt_count = 0;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+
+ spin_lock_irqsave(&qp_attr->qp_lock, flag);
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (qp_attr->rq_wrptr_db_local == config_value) {
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+ return;
+ }
+ if (qp_attr->rq_wrptr_db_local > config_value) {
+ rq_pkt_count = (config_value + qp_attr->rq_depth) -
+ qp_attr->rq_wrptr_db_local;
+ } else {
+ rq_pkt_count = config_value - qp_attr->rq_wrptr_db_local;
+ }
+
+ cm_id->qp_info.xrnic_rq_event_handler(rq_pkt_count,
+ cm_id->qp_info.rq_context);
+
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_wqe_completed() - This function process completion interrupts
+ * @qp_attr: QP info for which completion is received
+ */
+static void xrnic_wqe_completed(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ unsigned long flag;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+ int qp_num = qp_attr->qp_num;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+ /* We need to maintain sq_cmpl_db_local as per hardware update
+ * for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect we
+ * need to maintain this variable.
+ */
+ spin_lock_irqsave(&qp_attr->qp_lock, flag);
+ config_value = ioread32((char *)&rdma_qp_attr->cq_head);
+ cm_id->qp_info.xrnic_sq_event_handler(config_value,
+ cm_id->qp_info.sq_context);
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_wqe_completed_intr_handler() - Interrupt handler for completion
+ * interrupt type
+ * @data: XRNIC device info
+ */
+void xrnic_wqe_completed_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ unsigned long cq_intr = 0, qp_num, i, j;
+ unsigned long flag;
+
+ for (i = 0 ; i < XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED ; i++) {
+ cq_intr = ioread32((void __iomem *)
+ ((&xrnic_ctrl_config->cq_intr_sts_1) +
+ (i * 4)));
+
+ if (!cq_intr)
+ continue;
+
+ for (j = find_first_bit(&cq_intr, XRNIC_REG_WIDTH);
+ j < XRNIC_REG_WIDTH;
+ j = find_next_bit(&cq_intr, XRNIC_REG_WIDTH, j + 1)) {
+ qp_num = (i << 5) + j;
+ iowrite32((1 << j), (void __iomem *)
+ ((&xrnic_ctrl_config->cq_intr_sts_1) +
+ (i * 4)));
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ if (qp_attr->cm_id)
+ xrnic_wqe_completed(qp_attr);
+ else
+ pr_err("Received CM ID is NULL\n");
+ }
+ }
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en |
+ WQE_COMPLETED_INTR_EN;
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_pkt_recv_intr_handler() - Interrupt handler for data
+ * packet interrupt
+ * @data: XRNIC device info
+ */
+void xrnic_qp_pkt_recv_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_memory_map *xrnic_mmap =
+ (struct xrnic_memory_map *)&xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_qp_attr *qp_attr;
+ struct rdma_qp_attr *rdma_qp_attr;
+ struct xrnic_reg_map *regs;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ unsigned long rq_intr = 0, qp_num, i, j, config_value;
+ unsigned long flag;
+
+ for (i = 0 ; i < XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED ; i++) {
+ rq_intr = ioread32((void __iomem *)
+ (&xrnic_ctrl_config->rq_intr_sts_1 + (i * 4)));
+
+ if (!rq_intr)
+ continue;
+
+ for (j = find_first_bit(&rq_intr, XRNIC_REG_WIDTH);
+ j < XRNIC_REG_WIDTH; j = find_next_bit
+ (&rq_intr, XRNIC_REG_WIDTH, j + 1)) {
+ qp_num = (i << 5) + j;
+ /* We need to change this with Work Request as
+ * for other Admin QP required wait events.
+ */
+ iowrite32((1 << j), ((void __iomem *)
+ (&xrnic_ctrl_config->rq_intr_sts_1) +
+ (i * 4)));
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ regs = xrnic_mmap->xrnic_regs;
+ rdma_qp_attr = &regs->rdma_qp_attr[qp_num - 2];
+ config_value = ioread32((void *)
+ (&rdma_qp_attr->qp_conf));
+ if (qp_attr->cm_id &&
+ (config_value & XRNIC_QP_CONFIG_HW_HNDSHK_DIS)) {
+ xrnic_qp_pkt_recv(qp_attr);
+ } else {
+ if (qp_attr->cm_id)
+ pr_err("Received CM ID is NULL\n");
+ else
+ pr_err("HW handshake is enabled\n");
+ }
+ }
+ }
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en |
+ QP_PKT_RCVD_INTR_EN;
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_fatal_handler() - Interrupt handler for QP fatal interrupt type
+ * @data: XRNIC device info
+ */
+void xrnic_qp_fatal_handler(unsigned long data)
+{
+ struct xrnic_memory_map *xrnic_mmap =
+ (struct xrnic_memory_map *)&xrnic_dev->xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_conf =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ int i, err_entries;
+ unsigned long timeout;
+ unsigned long config_value, qp_num, qp, sq_pi_db_val, cq_head_val;
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ err_entries = ioread32((void *)&xrnic_conf->in_errsts_q_wrptr);
+ pr_info("No of QPs in Fatal: %d\r\n", err_entries - in_err_wr_ptr);
+ for (i = 0; i < (err_entries - in_err_wr_ptr); i++) {
+ qp_num = ioread32((char *)xrnic_mmap->in_errsts_q_ba +
+ ((8 * in_err_wr_ptr) + (8 * i)));
+ qp_num = (qp_num & 0xFFFF0000) >> 16;
+ qp = qp_num - 2;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp];
+ if (rdma_qp_attr) {
+ while (!((ioread32(&rdma_qp_attr->qp_status) >> 9) &
+ 0x3))
+ DEBUG_LOG("Fatal wait for SQ/OSQ empty\n");
+
+ /* 2 WAIT FOR register values SQ_PI_DB == CQ_HEAD */
+ sq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->sq_pi_db)));
+
+ cq_head_val = ioread32((void *)&rdma_qp_attr->cq_head);
+
+ timeout = jiffies;
+ while (!(sq_pi_db_val == cq_head_val)) {
+ sq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->sq_pi_db)));
+ cq_head_val = ioread32(((void *)
+ (&rdma_qp_attr->cq_head)));
+ if (time_after(jiffies, (timeout + 1 * HZ))) {
+ pr_info("SQ PI != CQ Head\n");
+ break;
+ }
+ }
+
+ /* Poll and wait for register value
+ * RESP_HNDL_STS.sq_pici_db_check_en == ‘1’
+ */
+ while (!((ioread32(&xrnic_conf->resp_handler_status)
+ >> 16) & 0x1))
+ DEBUG_LOG("waiting for RESP_HNDL_STS\n");
+
+ config_value = ioread32((void *)
+ &rdma_qp_attr->qp_conf);
+ config_value = config_value &
+ (~XRNIC_QP_CONFIG_QP_ENABLE);
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = ioread32((void *)
+ &rdma_qp_attr->qp_conf);
+ config_value = config_value |
+ XRNIC_QP_CONFIG_UNDER_RECOVERY;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* Calling CM Handler to disconnect QP.*/
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event =
+ XRNIC_DREQ_RCVD;
+ cm_id_info->conn_event_info.status = 1;
+ cm_id_info->conn_event_info.private_data_len =
+ 0;
+ cm_id_info->conn_event_info.private_data =
+ NULL;
+ qp_attr->cm_id->xrnic_cm_handler
+ (qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("Received CM ID is NULL\n");
+ }
+ }
+ in_err_wr_ptr++;
+ }
+}
+
+/**
+ * xrnic_qp1_hw_configuration() - This function configures the QP1 registers
+ * @return: 0 if successfully configures QP1
+ */
+int xrnic_qp1_hw_configuration(void)
+{
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr = (struct xrnic_qp_attr *)
+ &xrnic_dev->qp1_attr;
+ struct rdma_qp1_attr *rdma_qp1_attr;
+ u32 config_value = 0;
+
+ qp1_attr->qp_num = 1;
+ rdma_qp1_attr = &xrnic_dev->xrnic_mmap.xrnic_regs->rdma_qp1_attr;
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE | xrnic_dev->pmtu |
+ XRNIC_QP1_CONFIG_RQ_BUFF_SZ |
+ XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS;
+
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->qp_conf)));
+
+ config_value = (xrnic_mmap->rq_buf_ba_ca_phys +
+ ((qp1_attr->qp_num - 1) * XRNIC_RECV_PKT_SIZE *
+ XRNIC_RQ_DEPTH)) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->rq_buf_ba_ca)));
+
+ qp1_attr->rq_buf_ba_ca = xrnic_mmap->rq_buf_ba_ca +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_RECV_PKT_SIZE *
+ XRNIC_RQ_DEPTH);
+
+ qp1_attr->rq_buf_ba_ca_phys = config_value;
+
+ config_value = xrnic_mmap->sq_ba_phys + ((qp1_attr->qp_num - 1) *
+ XRNIC_SEND_PKT_SIZE * XRNIC_SQ_DEPTH);
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->sq_ba)));
+
+ qp1_attr->sq_ba = (struct wr *)((void *)xrnic_mmap->sq_ba +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_SEND_PKT_SIZE *
+ XRNIC_SQ_DEPTH));
+ qp1_attr->sq_ba_phys = config_value;
+
+ qp1_attr->send_sgl_phys = xrnic_mmap->send_sgl_phys +
+ (XRNIC_SEND_SGL_SIZE *
+ XRNIC_SQ_DEPTH *
+ (qp1_attr->qp_num - 1));
+ qp1_attr->send_sgl = xrnic_mmap->send_sgl +
+ (XRNIC_SEND_SGL_SIZE *
+ XRNIC_SQ_DEPTH *
+ (qp1_attr->qp_num - 1));
+
+ xrnic_fill_wr(qp1_attr, XRNIC_SQ_DEPTH);
+
+ config_value = xrnic_mmap->cq_ba_phys + ((qp1_attr->qp_num - 1) *
+ XRNIC_SQ_DEPTH * sizeof(struct cqe));
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->cq_ba)));
+
+ qp1_attr->cq_ba = (struct cqe *)(xrnic_mmap->cq_ba +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_SQ_DEPTH *
+ sizeof(struct cqe)));
+ config_value = (xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->sq_cmpl_db_add)));
+
+ config_value = XRNIC_SQ_DEPTH | (XRNIC_RQ_DEPTH << 16);
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->q_depth)));
+
+ config_value = (xrnic_mmap->stat_rq_buf_ca_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->stat_rq_buf_ca)));
+
+ config_value = XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT |
+ XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->timeout_conf)));
+ qp1_attr->qp1_attr = (struct xrnic_qp_attr *)&xrnic_dev->qp1_attr;
+ qp1_attr->rq_wrptr_db_local = 0;
+ qp1_attr->sq_cmpl_db_local = 0;
+ qp1_attr->rq_ci_db_local = 0;
+ qp1_attr->sq_pi_db_local = 0;
+
+ qp1_attr->resend_count = 0;
+ qp1_attr->local_cm_id = htonl(qp1_attr->qp_num);
+ qp1_attr->remote_cm_id = 0;
+
+ qp1_attr->curr_state = XRNIC_LISTEN;
+
+ qp1_attr->sqhd = 0;
+ qp1_attr->qp_type = XRNIC_QPT_UC;
+ qp1_attr->ip_addr_type = 0;
+
+ qp1_attr->xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ spin_lock_init(&qp1_attr->qp_lock);
+ return 0;
+}
+
+/**
+ * xrnic_display_qp_reg() - This function displays qp register info
+ * @qp_num: QP num for which register dump is required
+ */
+void xrnic_display_qp_reg(int qp_num)
+{
+ int i;
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+
+ for (i = 0; i < 45; i++)
+ pr_info("0x%X: 0x%08X\n",
+ (0x84020000 + (0x100 * (qp_num + 1)) + (i * 4)),
+ ioread32((void __iomem *)rdma_qp_attr + (i * 4)));
+}
+
+/**
+ * xrnic_qp_timer() - This function configures QP timer
+ * @data: QP attribute info
+ */
+void xrnic_qp_timer(struct timer_list *data)
+{
+ struct xrnic_qp_attr *qp_attr = (struct xrnic_qp_attr *)data;
+ struct xrnic_qp_attr *qp1_attr = qp_attr->qp1_attr;
+ enum xrnic_rej_reason reason;
+ enum xrnic_msg_rej msg;
+ unsigned long flag;
+ int qp1_send_pkt_size;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ if (qp_attr->curr_state == XRNIC_REJ_SENT) {
+ DEBUG_LOG("REJ SENT\n");
+ if (qp_attr->resend_count < XRNIC_REJ_RESEND_COUNT) {
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_REJ_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0,
+ XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ }
+ } else if (qp_attr->curr_state == XRNIC_REP_SENT) {
+ DEBUG_LOG("REP SENT\n");
+ if (qp_attr->resend_count < XRNIC_REJ_RESEND_COUNT) {
+ qp_attr->curr_state = XRNIC_RTU_TIMEOUT;
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_REP_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ reason = XRNIC_REJ_TIMEOUT;
+ msg = XRNIC_REJ_REP;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ } else if (qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ DEBUG_LOG("MRA Received\n");
+ qp_attr->curr_state = XRNIC_RTU_TIMEOUT;
+
+ reason = XRNIC_REJ_TIMEOUT;
+ msg = XRNIC_REJ_TIMEOUT;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else if (qp_attr->curr_state == XRNIC_DREQ_SENT) {
+ DEBUG_LOG("Disconnect Req Sent\n");
+ if (qp_attr->resend_count < XRNIC_DREQ_RESEND_COUNT) {
+ qp_attr->curr_state = XRNIC_DREP_TIMEOUT;
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_DREQ_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ } else if (qp_attr->curr_state == XRNIC_TIMEWAIT) {
+ DEBUG_LOG("In time wait state\n");
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+#ifdef DISPLAY_REGS_ON_DISCONNECT
+ xrnic_display_qp_reg(qp_attr->qp_num);
+#endif
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->qp_timer.expires = 0;
+ }
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_app_configuration() - This function programs the QP registers
+ * @qp_num: QP num to configure
+ * @hw_qp_status: value to indicae HW QP or not
+ */
+void xrnic_qp_app_configuration(int qp_num,
+ enum xrnic_hw_qp_status hw_qp_status)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+ u32 config_value = 0;
+ int recv_pkt_size = qp_attr->recv_pkt_size;
+
+ /* Host number will directly map to local cm id.*/
+ if (hw_qp_status == XRNIC_HW_QP_ENABLE) {
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ } else if (hw_qp_status == XRNIC_HW_QP_DISABLE) {
+ config_value = XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ config_value = 0;
+ } else {
+ DEBUG_LOG("Invalid HW QP status\n");
+ }
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = qp_attr->sq_ba_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_ba)));
+
+ config_value = qp_attr->cq_ba_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_ba)));
+
+ config_value = qp_attr->sq_depth | (qp_attr->rq_depth << 16);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->q_depth)));
+
+ config_value = (qp_attr->starting_psn |
+ (IB_OPCODE_RC_SEND_ONLY << 24));
+ iowrite32(config_value, (void *)&rdma_qp_attr->last_rq_req);
+
+ config_value = be32_to_cpu(qp_attr->ipv4_addr);
+ iowrite32(config_value, (void *)&rdma_qp_attr->ip_dest_addr1);
+ config_value = ((qp_attr->mac_addr[2] << 24) |
+ (qp_attr->mac_addr[3] << 16) |
+ (qp_attr->mac_addr[4] << 8) |
+ qp_attr->mac_addr[5]);
+ iowrite32(config_value, (void *)&rdma_qp_attr->mac_dest_addr_lsb);
+
+ config_value = ((qp_attr->mac_addr[0] << 8) | qp_attr->mac_addr[1]);
+ iowrite32(config_value, (void *)&rdma_qp_attr->mac_dest_addr_msb);
+
+ config_value = qp_attr->remote_qp;
+ iowrite32(config_value, (void *)&rdma_qp_attr->dest_qp_conf);
+
+ iowrite32(qp_attr->rem_starting_psn, (void *)&rdma_qp_attr->sq_psn);
+#ifdef ERNIC_MEM_REGISTER
+ if (qp_attr->pd)
+ iowrite32(qp_attr->pd, ((void *)(&rdma_qp_attr->pd)));
+#endif
+}
+
+/**
+ * xrnic_qp_hw_configuration() - This function configures QP registers
+ * @qp_num: QP num
+ */
+void xrnic_qp_hw_configuration(int qp_num)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp_attr = &xrnic_dev->qp_attr[qp_num];
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ u32 config_value = 0;
+
+ /* As qp_num start from 0 and data QP start from 2 */
+ qp_attr->qp_num = qp_num + 2;
+
+ config_value = XRNIC_QP_ADV_CONFIG_TRAFFIC_CLASS |
+ XRNIC_QP_ADV_CONFIG_TIME_TO_LIVE |
+ XRNIC_QP_ADV_CONFIG_PARTITION_KEY;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_adv_conf)));
+
+ /*DDR address for RQ and SQ doorbell.*/
+
+ config_value = xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp_attr->qp_num - 1));
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp_attr->qp_num - 1)))
+ & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ config_value = (xrnic_mmap->stat_rq_buf_ca_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_buf_ca)));
+
+ config_value = XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT |
+ XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->timeout_conf)));
+ qp_attr->qp1_attr = (struct xrnic_qp_attr *)&xrnic_dev->qp1_attr;
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->cm_id = NULL;
+ qp_attr->resend_count = 0;
+ qp_attr->local_cm_id = qp_attr->qp_num;
+ qp_attr->remote_cm_id = 0;
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ qp_attr->sqhd = 0;
+ qp_attr->qp_type = XRNIC_QPT_RC;
+ qp_attr->ip_addr_type = 0;
+
+ qp_attr->curr_state = XRNIC_LISTEN;
+
+ qp_attr->xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ /* Intitialize State with XRNIC_LISTEN */
+ timer_setup(&qp_attr->qp_timer, xrnic_qp_timer,
+ (unsigned long)qp_attr);
+
+ spin_lock_init(&qp_attr->qp_lock);
+}
+
+#ifdef EXPERIMENTAL_CODE
+#define XRNIC_REG_MAP_NODE 0
+#define XRNIC_SEND_SGL_NODE 1
+#define XRNIC_CQ_BA_NODE 1
+#define XRNIC_RQ_BUF_NODE 1
+#define XRNIC_SQ_BA_NODE 1
+#define XRNIC_TX_HDR_BUF_NODE 1
+#define XRNIC_TX_SGL_BUF_NODE 1
+#define XRNIC_BYPASS_BUF_NODE 1
+#define XRNIC_ERRPKT_BUF_NODE 1
+#define XRNIC_OUTERR_STS_NODE 1
+
+#define XRNIC_RQWR_PTR_NODE 1
+#define XRNIC_SQ_CMPL_NODE 2
+#define XRNIC_STAT_XRNIC_RQ_BUF_NODE 3
+#else /* ! EXPERIMENTAL_CODE */
+#define XRNIC_REG_MAP_NODE 0
+#define XRNIC_SEND_SGL_NODE 1
+#define XRNIC_CQ_BA_NODE 2
+#define XRNIC_RQ_BUF_NODE 3
+#define XRNIC_SQ_BA_NODE 4
+#define XRNIC_TX_HDR_BUF_NODE 5
+#define XRNIC_TX_SGL_BUF_NODE 6
+#define XRNIC_BYPASS_BUF_NODE 7
+#define XRNIC_ERRPKT_BUF_NODE 8
+#define XRNIC_OUTERR_STS_NODE 9
+#define XRNIC_INERR_STS_NODE 10
+#define XRNIC_RQWR_PTR_NODE 11
+#define XRNIC_SQ_CMPL_NODE 12
+#define XRNIC_STAT_XRNIC_RQ_BUF_NODE 13
+#define XRNIC_DATA_BUF_BA_NODE 14
+#define XRNIC_RESP_ERR_PKT_BUF_BA 15
+#endif /* EXPERIMENTAL_CODE */
diff --git a/drivers/staging/xlnx_ernic/xqp.h b/drivers/staging/xlnx_ernic/xqp.h
new file mode 100644
index 000000000000..442932f66daf
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xqp.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef QP_H
+#define QP_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/interrupt.h>
+enum qp_type {
+ XRNIC_NOT_ALLOCATED = 1,
+ XRNIC_DISC_CTRL_QP = 2,
+ XRNIC_NVMEOF_CTRL_QP = 3,
+ XRNIC_NVMEOF_IO_QP = 4,
+};
+
+enum ernic_qp_status {
+ XRNIC_QP_FREE,
+ XRNIC_QP_IN_USE,
+};
+
+struct xrnic_qp_attr {
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr;
+ struct xrnic_rdma_cm_id *cm_id;
+ void *send_sgl;
+ u64 send_sgl_phys;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+ u32 qp_num;
+ u32 local_cm_id;
+ u32 remote_cm_id;
+ u32 remote_qpn;
+ u32 qp_status;
+ u32 starting_psn;
+ u32 rem_starting_psn;
+ u8 send_sgl_temp[XRNIC_QP1_SEND_PKT_SIZE];
+ u32 resend_count;
+ u32 rq_wrptr_db_local;
+ u32 sq_cmpl_db_local;
+ u32 rq_ci_db_local;
+ u32 sq_pi_db_local;
+ u16 ip_addr_type; /* DESTINATION ADDR_FAMILY */
+ u32 ipv4_addr; /* DESTINATION IP addr */
+ u8 ipv6_addr[16];
+ u8 mac_addr[6];
+ u32 source_qp_num;
+ /* remote qpn used in Active CM. source_qp_num is the source
+ * queue pair in deth
+ */
+ u32 remote_qp;
+ enum xrnic_rdma_cm_event_type curr_state;
+ /* DISC or NVMECTRL Its direct mapping to host ID to
+ * particular host_no.
+ */
+ enum xrnic_qp_type qp_type;
+ u16 sqhd;
+ /*Its direct mapping to host ID to access particular host_no.*/
+ u16 nvmeof_cntlid;
+ u32 nvmeof_qp_id;
+ struct timer_list qp_timer;
+ struct tasklet_struct qp_task;
+ /* kernel locking primitive */
+ spinlock_t qp_lock;
+ char irq_name[32];
+ u32 irq_vect;
+ u32 pd;
+};
+
+enum xrnic_hw_qp_status {
+ XRNIC_HW_QP_ENABLE,
+ XRNIC_HW_QP_DISABLE,
+};
+
+void xrnic_display_qp_reg(int qp_num);
+void xrnic_qp_fatal_handler(unsigned long data);
+void xrnic_qp_timer(struct timer_list *data);
+void xrnic_qp_pkt_recv_intr_handler(unsigned long data);
+void xrnic_qp_task_handler(unsigned long data);
+void xrnic_wqe_completed_intr_handler(unsigned long data);
+
+/* QP Specific function templates */
+int xrnic_qp_recv_pkt(struct xrnic_qp_attr *qp_attr, u32 rq_pkt_count);
+int xrnic_qp_send_pkt(struct xrnic_qp_attr *qp_attr, u32 sq_pkt_count);
+void xrnic_reset_io_qp_rq_ptr(struct xrnic_qp_attr *qp_attr);
+void xrnic_reset_io_qp_sq_cq_ptr(struct xrnic_qp_attr *qp_attr,
+ struct xrnic_hw_handshake_info *hw_hs_info);
+void xrnic_qp_hw_configuration(int qp_num);
+int xrnic_qp1_hw_configuration(void);
+void xrnic_qp_app_configuration(int qp_num,
+ enum xrnic_hw_qp_status hw_qp_status);
+int xrnic_find_free_qp(void);
+int xrnic_set_qp_state(int qp_num, int state);
+
+#ifdef __cplusplus
+ }
+#endif
+#endif
diff --git a/drivers/staging/xlnx_ernic/xrocev2.h b/drivers/staging/xlnx_ernic/xrocev2.h
new file mode 100644
index 000000000000..fec90081d094
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xrocev2.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_ROCEV2_H
+#define _XRNIC_ROCEV2_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <rdma/ib_pack.h>
+
+#define XRNIC_REQ_QPN 0x1
+#define XRNIC_RESPONDER_RESOURCES 0x10
+#define XRNIC_INITIATOR_DEPTH 0x10
+#define XRNIC_REQ_LOCAL_CM_RESP_TOUT 0x11
+#define XRNIC_REQ_REMOTE_CM_RESP_TOUT 0x14
+#define XRNIC_REQ_PATH_PKT_PAYLOAD_MTU 92
+#define XRNIC_REQ_RETRY_COUNT 0x7
+#define XRNIC_REQ_RDC_EXISTS 1
+#define XRNIC_REQ_SRQ 0
+
+#define XRNIC_REJ_INFO_LEN 0
+
+#define XRNIC_MRA_SERVICE_TIMEOUT 0x11
+
+#define XRNIC_REP_END_END_FLOW_CONTROL 0x0
+#define XRNIC_REP_FAIL_OVER_ACCEPTED 0x3
+#define XRNIC_REP_TARGET_ACK_DELAY 0x1F
+#define XRNIC_REP_RNR_RETRY_COUNT 0x7
+
+#define XRNIC_CM_TIMEOUT 0x4
+#define XRNIC_CM_TIMER_TIMEOUT 0x11
+
+enum xrnic_wc_opcod {
+ XRNIC_RDMA_WRITE = 0x0,
+ XRNIC_SEND_ONLY = 0x2,
+ XRNIC_RDMA_READ = 0x4
+};
+
+enum xrnic_msg_rej {
+ XRNIC_REJ_REQ = 0x0,
+ XRNIC_REJ_REP = 0x1,
+ XRNIC_REJ_OTHERS = 0x2,
+};
+
+enum xrnic_msg_mra {
+ XRNIC_MRA_REQ = 0x0,
+ XRNIC_MRA_REP = 0x1,
+ XRNIC_MRA_LAP = 0x2,
+};
+
+enum xrnic_rej_reason {
+ XRNIC_REJ_NO_QP_AVAILABLE = 1,
+ XRNIC_REJ_NO_EE_AVAILABLE = 2,
+ XRNIC_REJ_NO_RESOURCE_AVAILABLE = 3,
+ XRNIC_REJ_TIMEOUT = 4,
+ XRNIC_REJ_UNSUPPORTED_REQ = 5,
+ XRNIC_REJ_INVALID_CM_ID = 6,
+ XRNIC_REJ_INVALID_QPN = 7,
+ XRNIC_REJ_RDC_NOT_EXIST = 11,
+ XRNIC_REJ_PRIM_LID_PORT_NOT_EXIST = 13,
+ XRNIC_REJ_INVALID_MTU = 26,
+ XRNIC_REJ_INSUFFICIENT_RESP_RESOURCE = 27,
+ XRNIC_REJ_CONSUMER_REJECT = 28,
+ XRNIC_REJ_DUPLICATE_LOCAL_CM_ID = 30,
+ XRNIC_REJ_UNSUPPORTED_CLASS_VERSION = 31,
+};
+
+//mad common status field
+struct mad_comm_status {
+ __u8 busy:1;
+ __u8 redir_reqd:1;
+ __u8 invalid_field_code:3;
+ __u8 reserved:3;
+ __u8 class_specific;
+} __packed;
+
+#define XRNIC_MAD_BASE_VER 1
+#define XRNIC_MAD_MGMT_CLASS 0x07
+#define XRNIC_MAD_RESP_BIT 0x0
+#define XRNIC_MAD_COMM_SEND 0x3
+#define XRNIC_MAD_RESERVED 0x0
+
+/* Management data gram (MAD's) */
+struct mad //Size 256Byte
+{
+ __u8 base_ver;
+ __u8 mgmt_class;
+ __u8 class_version;
+ __u8 resp_bit_method;
+ struct mad_comm_status status;// 2 bytes
+ __be16 class_specific;
+ __be64 transaction_id;
+ __be16 attribute_id;
+ __be16 reserved;
+ __be32 attrb_modifier;
+ __be32 data[58];
+} __packed;
+
+struct req {
+ __u32 local_cm_id;
+ __u32 reserved1;
+ __u8 service_id[8];
+ __u8 local_ca_guid[8];
+ __u32 reserved2;
+ __u32 local_q_key;
+ __u32 local_qpn:24;
+ __u8 responder_resources:8;
+ __u32 local_eecn:24;
+ __u32 initiator_depth:8;
+ __u32 remote_eecn:24;
+
+ __u32 remote_cm_resp_tout:5;
+ __u32 transport_svc_type:2;
+ __u32 e2e_flow_control:1;
+ __u8 start_psn[3];
+ __u8 local_cm_resp_tout:5;
+ __u8 retry_count: 3;
+ __u16 p_key;
+ __u8 path_packet_payload_mtu:4;
+ __u8 rdc_exists:1;
+ __u8 rnr_retry_count:3;
+ __u8 max_cm_retries:4;
+ __u8 srq:1;
+ __u8 reserved3:3;
+ __u16 primary_local_port_lid;
+ __u16 primary_remote_port_lid;
+ __u64 primary_local_port_gid[2];
+ __u64 primary_remote_port_gid[2];
+ __u32 primary_flow_label:20;
+ __u32 reserved4:6;
+ __u32 primary_packet_rate:6;
+ __u32 primary_traffic_class:8;
+ __u32 primary_hop_limit:8;
+ __u32 primary_sl:4;
+ __u32 primary_subnet_local:1;
+ __u32 reserved5:3;
+ __u32 primary_local_ack_tout:5;
+ __u32 reserved6:3;
+ __u32 alternate_local_port_lid:16;
+ __u32 alternate_remote_port_lid:16;
+ __u64 alternate_local_port_gid[2];
+ __u64 alternate_remote_port_gid[2];
+ __u32 alternate_flow_labe:20;
+ __u32 reserved7:6;
+ __u32 alternate_packet_rate:6;
+ __u32 alternate_traffic_class:8;
+ __u32 alternate_hop_limit:8;
+ __u32 alternate_sl:4;
+ __u32 alternate_subnet_local:1;
+ __u32 reserved8:3;
+ __u32 alternate_local_ack_timeout: 5;
+ __u32 reserved9:3;
+ __u8 private_data[92];
+} __packed;
+
+/* MRA Message contents */
+/* Message Receipt Acknoldgement */
+struct mra {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 message_mraed:2;
+ __u8 reserved1:6;
+ __u8 service_timeout:5;
+ __u8 reserved2:3;
+ __u8 private_data[222];
+} __packed;
+
+/* REJ Message contents */
+struct rej {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 message_rejected:2;
+ __u8 reserved1:6;
+ __u8 reject_info_length:7;
+ __u8 reserved2:1;
+ __u16 reason;
+ __u8 additional_reject_info[72];
+ __u8 private_data[148];
+} __packed;
+
+/* REP Message contents */
+struct rep {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 local_q_key;
+ __u32 local_qpn:24;
+ __u8 reserved1:8;
+ __u32 local_ee_context:24;
+ __u32 reserved2:8;
+ __u8 start_psn[3];
+ __u8 reserved3;
+ __u8 responder_resources;
+ __u8 initiator_depth;
+ union {
+ __u8 target_fail_end;
+ __u8 target_ack_delay:5;
+ __u8 fail_over_accepted:2;
+ };
+ __u8 end_end_flow_control:1;
+ __u8 rnr_retry_count:3;
+ __u8 sqr:1;
+ __u8 reserved4:4;
+ __u8 local_ca_guid[8];
+ __u8 private_data[196];
+} __packed;
+
+/* RTU indicates that the connection is established,
+ * and that the recipient
+ * may begin transmitting
+ */
+struct rtu {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 private_data[224];
+} __packed;
+
+#define XRNIC_SEND_UD 0x64
+#define XRNIC_SET_SOLICT_EVENT 0x0
+#define XRNIC_RESET_SOLICT_EVENT 0x0
+#define XRNIC_MIGRATION_REQ 0x0
+#define XRNIC_PAD_COUNT 0x0
+#define XRNIC_TRANSPORT_HDR_VER 0x0
+#define XRNIC_DESTINATION_QP 0x1
+#define XRNIC_RESERVED1 0x0
+#define XRNIC_ACK_REQ 0x0
+#define XRNIC_RESERVED2 0x0
+
+struct bth {
+ __u8 opcode;
+ __u8 solicited_event:1;
+ __u8 migration_req:1;
+ __u8 pad_count:2;
+ __u8 transport_hdr_ver:4;
+ __be16 partition_key;
+ __u8 reserved1;
+ __u8 destination_qp[3];
+ __u32 ack_request:1;
+ __u32 reserved2:7;
+ __u32 pkt_seq_num:24;
+} __packed;
+
+#define XRNIC_DETH_RESERVED 0
+struct deth {
+ __be32 q_key;
+ __u8 reserved;
+ __be32 src_qp:24;
+} __packed;
+
+/* DREQ request for communication release*/
+struct dreq {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 remote_qpn_eecn:24;
+ __u32 reserved:8;
+ __u8 private_data[220];
+} __packed;
+
+/* DREP - reply to request for communication release */
+struct drep {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 private_data[228];
+} __packed;
+
+/* LAP - load alternate path */
+struct lap {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 reserved1;
+ __u32 remote_QPN_EECN:24;
+ __u32 remote_cm_response_timeout:5;
+ __u32 reserved2:3;
+ __u32 reserved3;
+ __u32 alt_local_port_id:16;
+ __u32 alt_remote_port_id:16;
+ __u64 alt_local_port_gid[2];
+ __u64 alt_remote_port_gid[2];
+ __u32 alt_flow_label:20;
+ __u32 reserved4:4;
+ __u32 alt_traffic_class:8;
+ __u32 alt_hope_limit:8;
+ __u32 reserved5:2;
+ __u32 alt_pkt_rate:6;
+ __u32 alt_sl:4;
+ __u32 alt_subnet_local:1;
+ __u32 reserved6:3;
+ __u32 alt_local_ack_timeout:5;
+ __u32 reserved7:3;
+ __u8 private_data[168];
+} __packed;
+
+/* APR - alternate path response */
+struct apr {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 additional_info_length;
+ __u8 ap_status;
+ __u8 reserved1[2];
+ __u8 additional_info[72];
+ __u8 private_data[148];
+} __packed;
+
+enum cm_establishment_states {
+ CLASS_PORT_INFO = 0x1,
+ CONNECT_REQUEST = 0x10, /* Request for connection */
+ MSG_RSP_ACK = 0x11, /* Message Response Ack */
+ CONNECT_REJECT = 0x12, /* Connect Reject */
+ CONNECT_REPLY = 0x13, /* Reply for request communication */
+ READY_TO_USE = 0x14, /* Ready to use */
+ DISCONNECT_REQUEST = 0x15, /* Receive Disconnect req */
+ DISCONNECT_REPLY = 0x16, /* Send Disconnect reply */
+ SERVICE_ID_RESOLUTION_REQ = 0x17,
+ SERVICE_ID_RESOLUTION_REQ_REPLY = 0x18,
+ LOAD_ALTERNATE_PATH = 0x19,
+ ALTERNATE_PATH_RESPONSE = 0x1a,
+};
+
+#define XRNIC_ETH_ALEN 6
+#define XRNIC_ETH_P_IP 0x0800
+#define XRNIC_ETH_P_ARP 0x0806
+#define XRNIC_ETH_HLEN 14
+#define XRNIC_ICRC_SIZE 4
+
+//Ethernet header
+struct ethhdr_t {
+ unsigned char h_dest[XRNIC_ETH_ALEN];
+ unsigned char h_source[XRNIC_ETH_ALEN];
+ __be16 eth_type; /*< packet type ID field */
+} __packed;
+
+struct ipv4hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:4, /*< Internet Header Length */
+ ihl:4; /*< Version */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 tos; /*< Type of service */
+ __be16 total_length; /*< Total length */
+ __be16 id; /*< Identification */
+ u16 frag_off; /*< Fragment offset */
+ __u8 time_to_live; /*< Time to live */
+ __u8 protocol; /*< Protocol */
+ __be16 hdr_chksum; /*< Header checksum */
+ __be32 src_addr; /*< Source address */
+ __be32 dest_addr; /*< Destination address */
+} __packed;
+
+struct qp_cm_pkt {
+ struct ethhdr_t eth; //14 Byte
+ union {
+ struct ipv4hdr ipv4; //20 bytes
+ struct ipv4hdr ipv6; //20 bytes
+ } ip;
+ struct udphdr udp; //8 Byte
+ struct bth bth; //12 Bytes
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+/*
+ * RoCEv2 packet for receiver. Duplicated for ease of code readability.
+ */
+struct qp_cm_pkt_hdr_ipv4 {
+ struct ethhdr_t eth; //14 Byte
+ struct ipv4hdr ipv4;
+ struct udphdr udp; //8 Byte
+ struct bth bth;
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+struct qp_cm_pkt_hdr_ipv6 {
+ struct ethhdr_t eth; //14 Byte
+ struct ipv6hdr ipv6;
+ struct udphdr udp; //8 Byte
+ struct bth bth;
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+/* MAD Packet validation defines */
+#define MAD_BASIC_VER 1
+#define OPCODE_SEND_UD 0x64
+
+#define MAD_SUBNET_CLASS 0x1
+#define MAD_DIRECT_SUBNET_CLASS 0x81
+
+#define MAD_SEND_CM_MSG 0x03
+#define MAD_VERF_FAILED -1
+#define MAD_VERF_SUCCESS 0
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_ROCEV2_H*/
diff --git a/drivers/staging/xlnx_tsmux/Kconfig b/drivers/staging/xlnx_tsmux/Kconfig
new file mode 100644
index 000000000000..0c1d9498e35b
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/Kconfig
@@ -0,0 +1,11 @@
+config XLNX_TSMUX
+ tristate "Xilinx MPEG2 Transport Stream Muxer"
+ select DMA_SHARED_BUFFER
+ help
+ This driver is developed for mpeg2 transport stream muxer,
+ designed to allow passage of multimedia streams from the source
+ kernel sub-system, prepares mpeg2 transport stream and forward
+ to the sink kernel subsystem.
+
+ To compile this driver as a module, choose M here.
+ If unsure, choose N.
diff --git a/drivers/staging/xlnx_tsmux/MAINTAINERS b/drivers/staging/xlnx_tsmux/MAINTAINERS
new file mode 100644
index 000000000000..cfab4fa55698
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX MPG2TSMUX DRIVER
+M: Venkateshwar Rao <venkateshwar.rao.gannavarapu@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_tsmux
diff --git a/drivers/staging/xlnx_tsmux/Makefile b/drivers/staging/xlnx_tsmux/Makefile
new file mode 100644
index 000000000000..4437068337e7
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XLNX_TSMUX) += xlnx_mpg2tsmux.o
diff --git a/drivers/staging/xlnx_tsmux/dt-binding.txt b/drivers/staging/xlnx_tsmux/dt-binding.txt
new file mode 100644
index 000000000000..e4a7095d92e1
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/dt-binding.txt
@@ -0,0 +1,28 @@
+The Xilinx mpegtsmux IP reads the elementary streams from memory and
+writes the MPEG2 TS(transport stream) to memory.
+
+The mpeg2 ts muxer follows the dma descriptor based approach. Each DMA
+descriptor contains information about each of the elementary stream
+buffer properties and buffer address. It reads the descriptors one after
+the other and generates the TS packets with the information in the
+descriptor. The IP writes the generated TS packets at the output buffer
+address.
+
+Required properties:
+
+- compatible: must be "xlnx,tsmux-1.0"
+- interrupts: interrupt number
+- interrupts-parent: phandle for interrupt controller
+- reg: base address and size of the IP core
+- clock-names: must contain "ap_clk"
+- clocks: phandle to AXI Lite
+
+Example:
+ ts2mux: ts2mux@0xa0200000 {
+ compatible = "xlnx,tsmux-1.0";
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ reg = <0x0 0xa0200000 0x0 0x30000>;
+ clock-names = "ap_clk";
+ clocks = <&misc_clk_0>;
+ };
diff --git a/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c b/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c
new file mode 100644
index 000000000000..84f4b501570a
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c
@@ -0,0 +1,1510 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx TS mux driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <venkateshwar.rao.gannavarapu@xilinx.com>
+ */
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/dma-buf.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <uapi/linux/xlnx_mpg2tsmux_interface.h>
+
+#define DRIVER_NAME "mpegtsmux-1.0"
+#define DRIVER_CLASS "mpg2mux_ts_cls"
+#define DRIVER_MAX_DEV (10)
+
+/* Register offsets and bit masks */
+#define XTSMUX_RST_CTRL 0x00
+#define XTSMUX_GLBL_IER 0x04
+#define XTSMUX_IER_STAT 0x08
+#define XTSMUX_ISR_STAT 0x0c
+#define XTSMUX_ERR_STAT 0x10
+#define XTSMUX_LAST_NODE_PROCESSED 0x14
+#define XTSMUX_MUXCONTEXT_ADDR 0x20
+#define XTSMUX_STREAMCONTEXT_ADDR 0x30
+#define XTSMUX_NUM_STREAM_IDTBL 0x48
+#define XTSMUX_NUM_DESC 0x70
+#define XTSMUX_STREAM_IDTBL_ADDR 0x78
+#define XTSMUX_CONTEXT_DATA_SIZE 64
+
+#define XTSMUX_RST_CTRL_START_MASK BIT(0)
+#define XTSMUX_GLBL_IER_ENABLE_MASK BIT(0)
+#define XTSMUX_IER_ENABLE_MASK BIT(0)
+
+/* Number of input/output streams supported */
+#define XTSMUX_MAXIN_STRM 112
+#define XTSMUX_MAXIN_PLSTRM 16
+#define XTSMUX_MAXIN_TLSTRM (XTSMUX_MAXIN_STRM + XTSMUX_MAXIN_PLSTRM)
+#define XTSMUX_MAXOUT_STRM 112
+#define XTSMUX_MAXOUT_PLSTRM 16
+#define XTSMUX_MAXOUT_TLSTRM (XTSMUX_MAXOUT_STRM + XTSMUX_MAXOUT_PLSTRM)
+#define XTSMUX_POOL_SIZE 128
+/* Initial version is tested with 256 align only */
+#define XTSMUX_POOL_ALIGN 256
+#define XTSMUX_STRMBL_FREE 0
+#define XTSMUX_STRMBL_BUSY 1
+
+/**
+ * struct stream_context - struct to enqueue a stream context descriptor
+ * @command: stream context type
+ * @is_pcr_stream: flag for pcr(programmable clock recovery) stream
+ * @stream_id: stream identification number
+ * @extended_stream_id: extended stream id
+ * @reserved1: reserved for hardware alignment
+ * @pid: packet id number
+ * @dmabuf_id: 0 for buf allocated by driver, nonzero for external buf
+ * @size_data_in: size in bytes of input buffer
+ * @pts: presentation time stamp
+ * @dts: display time stamp
+ * @in_buf_pointer: physical address of src buf address
+ * @reserved2: reserved for hardware alignment
+ * @insert_pcr: inserting pcr in stream context
+ * @reserved3: reserved for hardware alignment
+ * @pcr_extension: pcr extension number
+ * @pcr_base: pcr base number
+ */
+struct stream_context {
+ enum ts_mux_command command;
+ u8 is_pcr_stream;
+ u8 stream_id;
+ u8 extended_stream_id;
+ u8 reserved1;
+ u16 pid;
+ u16 dmabuf_id;
+ u32 size_data_in;
+ u64 pts;
+ u64 dts;
+ u64 in_buf_pointer;
+ u32 reserved2;
+ u8 insert_pcr;
+ u8 reserved3;
+ u16 pcr_extension;
+ u64 pcr_base;
+};
+
+/**
+ * enum node_status_info - status of stream context
+ * @NOT_FILLED: node not filled
+ * @UPDATED_BY_DRIVER: updated by driver
+ * @READ_BY_IP: read by IP
+ * @USED_BY_IP: used by IP
+ * @NODE_INVALID: invalid node
+ */
+enum node_status_info {
+ NOT_FILLED = 0,
+ UPDATED_BY_DRIVER,
+ READ_BY_IP,
+ USED_BY_IP,
+ NODE_INVALID
+};
+
+/**
+ * enum stream_errors - stream context error type
+ * @NO_ERROR: no error
+ * @PARTIAL_FRAME_WRITTEN: partial frame written
+ * @DESCRIPTOR_NOT_READABLE: descriptor not readable
+ */
+enum stream_errors {
+ NO_ERROR = 0,
+ PARTIAL_FRAME_WRITTEN,
+ DESCRIPTOR_NOT_READABLE
+};
+
+/**
+ * struct strm_node - struct to describe stream node in linked list
+ * @node_number: node number to handle streams
+ * @node_status: status of stream node
+ * @element: stream context info
+ * @error_code: error codes
+ * @reserved1: reserved bits for hardware align
+ * @tail_pointer: physical address of next stream node in linked list
+ * @strm_phy_addr: physical address of stream context
+ * @node: struct of linked list head
+ * @reserved2: reserved for hardware align
+ */
+struct stream_context_node {
+ u32 node_number;
+ enum node_status_info node_status;
+ struct stream_context element;
+ enum stream_errors error_code;
+ u32 reserved1;
+ u64 tail_pointer;
+ u64 strm_phy_addr;
+ struct list_head node;
+ u64 reserved2;
+};
+
+/**
+ * struct strm_info - struct to describe streamid node in streamid table
+ * @pid: identification number of stream
+ * @continuity_counter: counter to maintain packet count for a stream
+ * @usageflag: flag to know free or under use for allocating streamid node
+ * @strmtbl_update: struct to know enqueue or dequeue streamid in table
+ */
+struct stream_info {
+ u16 pid;
+ u8 continuity_counter;
+ u8 usageflag;
+ enum strmtbl_cnxt strmtbl_update;
+};
+
+/* Enum for error handling of mux context */
+enum mux_op_errs {
+ MUXER_NO_ERROR = 0,
+ ERROR_OUTPUT_BUFFER_IS_NOT_ACCESIBLE,
+ ERROR_PARTIAL_PACKET_WRITTEN
+};
+
+/**
+ * struct muxer_context - struct to describe mux node in linked list
+ * @node_status: status of mux node
+ * @reserved: reserved for hardware align
+ * @dst_buf_start_addr: physical address of dst buf
+ * @dst_buf_size: size of the output buffer
+ * @dst_buf_written: size of data written in dst buf
+ * @num_of_pkts_written: number of packets in dst buf
+ * @error_code: error status of mux node updated by IP
+ * @mux_phy_addr: physical address of muxer
+ * @node: struct of linked list head
+ */
+struct muxer_context {
+ enum node_status_info node_status;
+ u32 reserved;
+ u64 dst_buf_start_addr;
+ u32 dst_buf_size;
+ u32 dst_buf_written;
+ u32 num_of_pkts_written;
+ enum mux_op_errs error_code;
+ u64 mux_phy_addr;
+ struct list_head node;
+};
+
+/**
+ * struct xlnx_tsmux_dmabufintl - dma buf internal info
+ * @dbuf: reference to a buffer's dmabuf struct
+ * @attach: attachment to the buffer's dmabuf
+ * @sgt: scatterlist info for the buffer's dmabuf
+ * @dmabuf_addr: buffer physical address
+ * @dmabuf_fd: dma buffer fd
+ * @buf_id: dma buffer reference id
+ */
+struct xlnx_tsmux_dmabufintl {
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ dma_addr_t dmabuf_addr;
+ s32 dmabuf_fd;
+ u16 buf_id;
+};
+
+/**
+ * struct xlnx_tsmux - xilinx mpeg2 TS muxer device
+ * @dev: pointer to struct device instance used by the driver
+ * @iomem: base address of the HW/IP
+ * @chdev: char device handle
+ * @user_count: count of users who have opened the device
+ * @lock: spinlock to protect driver data structures
+ * @waitq: wait queue used by the driver
+ * @irq: irq number
+ * @id: device instance ID
+ * @num_inbuf: number of input buffers allocated uisng DMA
+ * @num_outbuf: number of output buffers allocated uisng DMA
+ * @srcbuf_size: size of each source buffer
+ * @dstbuf_size: size of each destination buffer
+ * @strm_node: list containing descriptors of stream context
+ * @mux_node: list containing descriptors of mux context
+ * @stcxt_node_cnt: stream number used for maintaing list
+ * @num_strmnodes: number of stream nodes in the streamid table
+ * @intn_stream_count: internal count of streams added to stream context
+ * @outbuf_idx: index number to maintain output buffers
+ * @srcbuf_addrs: physical address of source buffer
+ * @dstbuf_addrs: physical address of destination buffer
+ * @src_kaddrs: kernel VA for source buffer allocated by the driver
+ * @dst_kaddrs: kernel VA for destination buffer allocated by the driver
+ * @strm_ctx_pool: dma pool to allocate stream context buffers
+ * @mux_ctx_pool: dma pool to allocate mux context buffers
+ * @strmtbl_addrs: physical address of streamid table
+ * @strmtbl_kaddrs: kernel VA for streamid table
+ * @intn_strmtbl_addrs: physical address of streamid table for internal
+ * @intn_strmtbl_kaddrs: kernel VA for streamid table for internal
+ * @ap_clk: interface clock
+ * @src_dmabufintl: array of src DMA buf allocated by user
+ * @dst_dmabufintl: array of src DMA buf allocated by user
+ * @outbuf_written: size in bytes written in output buffer
+ * @stream_count: stream count
+ */
+struct xlnx_tsmux {
+ struct device *dev;
+ void __iomem *iomem;
+ struct cdev chdev;
+ atomic_t user_count;
+ /* lock is used to protect access to sync_err and wdg_err */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ s32 irq;
+ s32 id;
+ u32 num_inbuf;
+ u32 num_outbuf;
+ size_t srcbuf_size;
+ size_t dstbuf_size;
+ struct list_head strm_node;
+ struct list_head mux_node;
+ u32 stcxt_node_cnt;
+ u32 num_strmnodes;
+ atomic_t intn_stream_count;
+ atomic_t outbuf_idx;
+ dma_addr_t srcbuf_addrs[XTSMUX_MAXIN_TLSTRM];
+ dma_addr_t dstbuf_addrs[XTSMUX_MAXOUT_TLSTRM];
+ void *src_kaddrs[XTSMUX_MAXIN_TLSTRM];
+ void *dst_kaddrs[XTSMUX_MAXOUT_TLSTRM];
+ struct dma_pool *strm_ctx_pool;
+ struct dma_pool *mux_ctx_pool;
+ dma_addr_t strmtbl_addrs;
+ void *strmtbl_kaddrs;
+ dma_addr_t intn_strmtbl_addrs;
+ void *intn_strmtbl_kaddrs;
+ struct clk *ap_clk;
+ struct xlnx_tsmux_dmabufintl src_dmabufintl[XTSMUX_MAXIN_STRM];
+ struct xlnx_tsmux_dmabufintl dst_dmabufintl[XTSMUX_MAXOUT_STRM];
+ s32 outbuf_written;
+ atomic_t stream_count;
+};
+
+static inline u32 xlnx_tsmux_read(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg)
+{
+ return ioread32(mpgmuxts->iomem + reg);
+}
+
+static inline void xlnx_tsmux_write(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg, const u32 val)
+{
+ iowrite32(val, (void __iomem *)(mpgmuxts->iomem + reg));
+}
+
+/* TODO: Optimize using iowrite64 call */
+static inline void xlnx_tsmux_write64(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg, const u64 val)
+{
+ iowrite32(lower_32_bits(val), (void __iomem *)(mpgmuxts->iomem + reg));
+ iowrite32(upper_32_bits(val), (void __iomem *)(mpgmuxts->iomem +
+ reg + 4));
+}
+
+static int xlnx_tsmux_start_muxer(struct xlnx_tsmux *mpgmuxts)
+{
+ struct stream_context_node *new_strm_node;
+ struct muxer_context *new_mux_node;
+
+ new_mux_node = list_first_entry_or_null(&mpgmuxts->mux_node,
+ struct muxer_context, node);
+ if (!new_mux_node)
+ return -ENXIO;
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_MUXCONTEXT_ADDR,
+ new_mux_node->mux_phy_addr);
+
+ new_strm_node = list_first_entry_or_null(&mpgmuxts->strm_node,
+ struct stream_context_node,
+ node);
+ if (!new_strm_node)
+ return -ENXIO;
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_STREAMCONTEXT_ADDR,
+ new_strm_node->strm_phy_addr);
+
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_NUM_DESC,
+ atomic_read(&mpgmuxts->intn_stream_count));
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_STREAM_IDTBL_ADDR,
+ (u64)mpgmuxts->intn_strmtbl_addrs);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_NUM_STREAM_IDTBL, 1);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_GLBL_IER,
+ XTSMUX_GLBL_IER_ENABLE_MASK);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_IER_STAT,
+ XTSMUX_IER_ENABLE_MASK);
+
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_RST_CTRL,
+ XTSMUX_RST_CTRL_START_MASK);
+
+ return 0;
+}
+
+static void xlnx_tsmux_stop_muxer(const struct xlnx_tsmux *mpgmuxts)
+{
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_GLBL_IER, 0);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_IER_STAT, 0);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_RST_CTRL, 0);
+}
+
+static enum xlnx_tsmux_status xlnx_tsmux_get_status(const struct
+ xlnx_tsmux * mpgmuxts)
+{
+ u32 status;
+
+ status = xlnx_tsmux_read(mpgmuxts, XTSMUX_RST_CTRL);
+
+ if (!status)
+ return MPG2MUX_ERROR;
+
+ if (status & XTSMUX_RST_CTRL_START_MASK)
+ return MPG2MUX_BUSY;
+
+ return MPG2MUX_READY;
+}
+
+static struct class *xlnx_tsmux_class;
+static dev_t xlnx_tsmux_devt;
+static atomic_t xlnx_tsmux_ndevs = ATOMIC_INIT(0);
+
+static int xlnx_tsmux_open(struct inode *pin, struct file *fptr)
+{
+ struct xlnx_tsmux *mpgtsmux;
+
+ mpgtsmux = container_of(pin->i_cdev, struct xlnx_tsmux, chdev);
+
+ fptr->private_data = mpgtsmux;
+ atomic_inc(&mpgtsmux->user_count);
+ atomic_set(&mpgtsmux->outbuf_idx, 0);
+ mpgtsmux->stcxt_node_cnt = 0;
+
+ return 0;
+}
+
+static int xlnx_tsmux_release(struct inode *pin, struct file *fptr)
+{
+ struct xlnx_tsmux *mpgtsmux = (struct xlnx_tsmux *)fptr->private_data;
+
+ if (!mpgtsmux)
+ return -EIO;
+
+ return 0;
+}
+
+/* TODO: Optimize buf alloc, dealloc API's to accommodate src, dst, strmtbl */
+static int xlnx_tsmux_ioctl_srcbuf_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ unsigned int i;
+
+ for (i = 0; i < mpgmuxts->num_inbuf; i++) {
+ if (!mpgmuxts->src_kaddrs[i] || !mpgmuxts->srcbuf_addrs[i])
+ break;
+ dma_free_coherent(mpgmuxts->dev, mpgmuxts->srcbuf_size,
+ mpgmuxts->src_kaddrs[i],
+ mpgmuxts->srcbuf_addrs[i]);
+ mpgmuxts->src_kaddrs[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_srcbuf_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ unsigned int i;
+ struct strc_bufs_info buf_data;
+
+ ret = copy_from_user(&buf_data, arg, sizeof(struct strc_bufs_info));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Failed to read input buffer info\n");
+ return ret;
+ }
+
+ if (buf_data.num_buf > XTSMUX_MAXIN_PLSTRM) {
+ dev_dbg(mpgmuxts->dev, "Excessive input payload. supported %d",
+ XTSMUX_MAXIN_PLSTRM);
+ return -EINVAL;
+ }
+
+ mpgmuxts->num_inbuf = buf_data.num_buf;
+ mpgmuxts->srcbuf_size = buf_data.buf_size;
+ /* buf_size & num_buf boundary conditions are handled in application
+ * and initial version of driver tested with 32-bit addressing only
+ */
+ for (i = 0; i < mpgmuxts->num_inbuf; i++) {
+ mpgmuxts->src_kaddrs[i] =
+ dma_alloc_coherent(mpgmuxts->dev,
+ mpgmuxts->srcbuf_size,
+ &mpgmuxts->srcbuf_addrs[i],
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->src_kaddrs[i]) {
+ dev_dbg(mpgmuxts->dev, "dma alloc fail %d buffer", i);
+ goto exit_free;
+ }
+ }
+
+ return 0;
+
+exit_free:
+ xlnx_tsmux_ioctl_srcbuf_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+static int xlnx_tsmux_ioctl_dstbuf_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ unsigned int i;
+
+ for (i = 0; i < mpgmuxts->num_outbuf; i++) {
+ if (!mpgmuxts->dst_kaddrs[i] || !mpgmuxts->dstbuf_addrs[i])
+ break;
+ dma_free_coherent(mpgmuxts->dev, mpgmuxts->dstbuf_size,
+ mpgmuxts->dst_kaddrs[i],
+ mpgmuxts->dstbuf_addrs[i]);
+ mpgmuxts->dst_kaddrs[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_dstbuf_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ unsigned int i;
+ struct strc_bufs_info buf_data;
+
+ ret = copy_from_user(&buf_data, arg, sizeof(struct strc_bufs_info));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "%s: Failed to read output buffer info",
+ __func__);
+ return ret;
+ }
+
+ if (buf_data.num_buf > XTSMUX_MAXOUT_PLSTRM) {
+ dev_dbg(mpgmuxts->dev, "Excessive output payload supported %d",
+ XTSMUX_MAXOUT_PLSTRM);
+ return -EINVAL;
+ }
+
+ mpgmuxts->num_outbuf = buf_data.num_buf;
+ mpgmuxts->dstbuf_size = buf_data.buf_size;
+ /* buf_size & num_buf boundary conditions are handled in application*/
+ for (i = 0; i < mpgmuxts->num_outbuf; i++) {
+ mpgmuxts->dst_kaddrs[i] =
+ dma_alloc_coherent(mpgmuxts->dev,
+ mpgmuxts->dstbuf_size,
+ &mpgmuxts->dstbuf_addrs[i],
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->dst_kaddrs[i]) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for %d", i);
+ goto exit_free;
+ }
+ }
+
+ return 0;
+
+exit_free:
+ xlnx_tsmux_ioctl_dstbuf_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+static int xlnx_tsmux_ioctl_strmtbl_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ u32 buf_size;
+
+ buf_size = sizeof(struct stream_info) * mpgmuxts->num_strmnodes;
+ if (!mpgmuxts->strmtbl_kaddrs || !mpgmuxts->strmtbl_addrs)
+ return 0;
+
+ dma_free_coherent(mpgmuxts->dev, buf_size, mpgmuxts->strmtbl_kaddrs,
+ mpgmuxts->strmtbl_addrs);
+ mpgmuxts->strmtbl_kaddrs = NULL;
+
+ if (!mpgmuxts->intn_strmtbl_kaddrs || !mpgmuxts->intn_strmtbl_addrs)
+ return 0;
+ dma_free_coherent(mpgmuxts->dev, buf_size,
+ mpgmuxts->intn_strmtbl_kaddrs,
+ mpgmuxts->intn_strmtbl_addrs);
+ mpgmuxts->intn_strmtbl_kaddrs = NULL;
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_strmtbl_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret, buf_size;
+ u16 num_nodes;
+
+ ret = copy_from_user(&num_nodes, arg, sizeof(u16));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Failed to read streamid table info");
+ return ret;
+ }
+ mpgmuxts->num_strmnodes = num_nodes;
+ buf_size = sizeof(struct stream_info) * mpgmuxts->num_strmnodes;
+
+ mpgmuxts->strmtbl_kaddrs =
+ dma_alloc_coherent(mpgmuxts->dev,
+ buf_size, &mpgmuxts->strmtbl_addrs,
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->strmtbl_kaddrs) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for strm table");
+ return -ENOMEM;
+ }
+
+ /* Allocating memory for internal streamid table */
+ mpgmuxts->intn_strmtbl_kaddrs =
+ dma_alloc_coherent(mpgmuxts->dev,
+ buf_size, &mpgmuxts->intn_strmtbl_addrs,
+ GFP_KERNEL | GFP_DMA32);
+
+ if (!mpgmuxts->intn_strmtbl_kaddrs) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for intr strm table");
+ goto exist_free;
+ }
+
+ return 0;
+exist_free:
+ xlnx_tsmux_ioctl_strmtbl_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+static int xlnx_tsmux_update_strminfo_table(struct xlnx_tsmux *mpgmuxts,
+ struct strc_strminfo new_strm_info)
+{
+ u32 i = 0;
+ struct stream_info *cptr;
+
+ cptr = (struct stream_info *)mpgmuxts->strmtbl_kaddrs;
+
+ if (new_strm_info.strmtbl_ctxt == ADD_TO_TBL) {
+ /* Finding free memory block and writing input data into the block*/
+ for (i = 0; i < mpgmuxts->num_strmnodes; i++, cptr++) {
+ if (!cptr->usageflag) {
+ cptr->pid = new_strm_info.pid;
+ cptr->continuity_counter = 0;
+ cptr->usageflag = XTSMUX_STRMBL_BUSY;
+ break;
+ }
+ }
+ } else if (new_strm_info.strmtbl_ctxt == DEL_FR_TBL) {
+ for (i = 0; i < mpgmuxts->num_strmnodes; i++, cptr++) {
+ if (cptr->pid == new_strm_info.pid) {
+ cptr->usageflag = XTSMUX_STRMBL_FREE;
+ break;
+ }
+ }
+ }
+
+ if (i == mpgmuxts->num_strmnodes)
+ return -EIO;
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_update_strmtbl(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ struct strc_strminfo new_strm_info;
+
+ ret = copy_from_user(&new_strm_info, arg, sizeof(struct strc_strminfo));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Reading strmInfo failed");
+ return ret;
+ }
+
+ return xlnx_tsmux_update_strminfo_table(mpgmuxts, new_strm_info);
+}
+
+static int xlnx_tsmux_enqueue_stream_context(struct xlnx_tsmux *mpgmuxts,
+ struct
+ stream_context_in * stream_data)
+{
+ struct stream_context_node *new_strm_node, *prev_strm_node;
+ void *kaddr_strm_node;
+ dma_addr_t strm_phy_addr;
+ unsigned long flags;
+ u32 i;
+
+ kaddr_strm_node = dma_pool_alloc(mpgmuxts->strm_ctx_pool,
+ GFP_KERNEL | GFP_DMA32,
+ &strm_phy_addr);
+
+ new_strm_node = (struct stream_context_node *)kaddr_strm_node;
+ if (!new_strm_node)
+ return -ENOMEM;
+
+ /* update the stream context node */
+ wmb();
+ new_strm_node->element.command = stream_data->command;
+ new_strm_node->element.is_pcr_stream = stream_data->is_pcr_stream;
+ new_strm_node->element.stream_id = stream_data->stream_id;
+ new_strm_node->element.extended_stream_id =
+ stream_data->extended_stream_id;
+ new_strm_node->element.pid = stream_data->pid;
+ new_strm_node->element.size_data_in = stream_data->size_data_in;
+ new_strm_node->element.pts = stream_data->pts;
+ new_strm_node->element.dts = stream_data->dts;
+ new_strm_node->element.insert_pcr = stream_data->insert_pcr;
+ new_strm_node->element.pcr_base = stream_data->pcr_base;
+ new_strm_node->element.pcr_extension = stream_data->pcr_extension;
+
+ /* Check for external dma buffer */
+ if (!stream_data->is_dmabuf) {
+ new_strm_node->element.in_buf_pointer =
+ mpgmuxts->srcbuf_addrs[stream_data->srcbuf_id];
+ new_strm_node->element.dmabuf_id = 0;
+ } else {
+ for (i = 0; i < XTSMUX_MAXIN_STRM; i++) {
+ /* Serching dma buf info based on srcbuf_id */
+ if (stream_data->srcbuf_id ==
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd) {
+ new_strm_node->element.in_buf_pointer =
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr;
+ new_strm_node->element.dmabuf_id =
+ mpgmuxts->src_dmabufintl[i].buf_id;
+ break;
+ }
+ }
+
+ /* No dma buf found with srcbuf_id*/
+ if (i == XTSMUX_MAXIN_STRM) {
+ dev_err(mpgmuxts->dev, "No DMA buffer with %d",
+ stream_data->srcbuf_id);
+ return -ENOMEM;
+ }
+ }
+
+ new_strm_node->strm_phy_addr = (u64)strm_phy_addr;
+ new_strm_node->node_number = mpgmuxts->stcxt_node_cnt + 1;
+ mpgmuxts->stcxt_node_cnt++;
+ new_strm_node->node_status = UPDATED_BY_DRIVER;
+ new_strm_node->error_code = NO_ERROR;
+ new_strm_node->tail_pointer = 0;
+
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ /* If it is not first stream in stream node linked list find
+ * physical address of current node and add to last node in list
+ */
+ if (!list_empty_careful(&mpgmuxts->strm_node)) {
+ prev_strm_node = list_last_entry(&mpgmuxts->strm_node,
+ struct stream_context_node,
+ node);
+ prev_strm_node->tail_pointer = new_strm_node->strm_phy_addr;
+ }
+ /* update the list and stream count */
+ wmb();
+ list_add_tail(&new_strm_node->node, &mpgmuxts->strm_node);
+ atomic_inc(&mpgmuxts->stream_count);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static int xlnx_tsmux_set_stream_desc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct stream_context_in *stream_data;
+ int ret = 0;
+
+ stream_data = kzalloc(sizeof(*stream_data), GFP_KERNEL);
+ if (!stream_data)
+ return -ENOMEM;
+
+ ret = copy_from_user(stream_data, arg,
+ sizeof(struct stream_context_in));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Failed to copy stream data from user");
+ goto error_free;
+ }
+
+ ret = xlnx_tsmux_enqueue_stream_context(mpgmuxts, stream_data);
+
+error_free:
+ kfree(stream_data);
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_set_stream_context(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+
+ ret = xlnx_tsmux_set_stream_desc(mpgmuxts, arg);
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "Setting stream descripter failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static enum xlnx_tsmux_status xlnx_tsmux_get_device_status(struct xlnx_tsmux *
+ mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_status;
+
+ ip_status = xlnx_tsmux_get_status(mpgmuxts);
+
+ if (ip_status == MPG2MUX_ERROR) {
+ dev_err(mpgmuxts->dev, "Failed to get device status");
+ return -EACCES;
+ }
+
+ if (ip_status == MPG2MUX_BUSY)
+ return -EBUSY;
+
+ return MPG2MUX_READY;
+}
+
+static int xlnx_tsmux_ioctl_start(struct xlnx_tsmux *mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_stat;
+ int cnt;
+
+ /* get IP status */
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+ if (ip_stat != MPG2MUX_READY) {
+ dev_err(mpgmuxts->dev, "device is busy");
+ return ip_stat;
+ }
+
+ if (list_empty(&mpgmuxts->mux_node) ||
+ list_empty(&mpgmuxts->strm_node)) {
+ dev_err(mpgmuxts->dev, "No stream or mux to start device");
+ return -EIO;
+ }
+
+ cnt = atomic_read(&mpgmuxts->stream_count);
+ atomic_set(&mpgmuxts->intn_stream_count, cnt);
+
+ return xlnx_tsmux_start_muxer(mpgmuxts);
+}
+
+static void xlnx_tsmux_free_dmalloc(struct xlnx_tsmux *mpgmuxts)
+{
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+}
+
+static int xlnx_tsmux_ioctl_stop(struct xlnx_tsmux *mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_stat;
+ unsigned long flags;
+
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+ if (ip_stat != MPG2MUX_READY) {
+ dev_err(mpgmuxts->dev, "device is busy");
+ return ip_stat;
+ }
+
+ /* Free all driver allocated memory and reset linked list
+ * Reset IP registers
+ */
+ xlnx_tsmux_free_dmalloc(mpgmuxts);
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ INIT_LIST_HEAD(&mpgmuxts->strm_node);
+ INIT_LIST_HEAD(&mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+ xlnx_tsmux_stop_muxer(mpgmuxts);
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_get_status(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ enum xlnx_tsmux_status ip_stat;
+
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+
+ ret = copy_to_user(arg, (void *)&ip_stat,
+ (unsigned long)(sizeof(enum xlnx_tsmux_status)));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Unable to copy device status to user");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_get_outbufinfo(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ int out_index;
+ struct out_buffer out_info;
+
+ out_info.buf_write = mpgmuxts->outbuf_written;
+ mpgmuxts->outbuf_written = 0;
+ out_index = atomic_read(&mpgmuxts->outbuf_idx);
+ if (out_index)
+ out_info.buf_id = 0;
+ else
+ out_info.buf_id = 1;
+
+ ret = copy_to_user(arg, (void *)&out_info,
+ (unsigned long)(sizeof(struct out_buffer)));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Unable to copy outbuf info");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_enqueue_mux_context(struct xlnx_tsmux *mpgmuxts,
+ struct muxer_context_in *mux_data)
+{
+ struct muxer_context *new_mux_node;
+ u32 out_index;
+ void *kaddr_mux_node;
+ dma_addr_t mux_phy_addr;
+ unsigned long flags;
+ s32 i;
+
+ kaddr_mux_node = dma_pool_alloc(mpgmuxts->mux_ctx_pool,
+ GFP_KERNEL | GFP_DMA32,
+ &mux_phy_addr);
+
+ new_mux_node = (struct muxer_context *)kaddr_mux_node;
+ if (!new_mux_node)
+ return -EAGAIN;
+
+ new_mux_node->node_status = UPDATED_BY_DRIVER;
+ new_mux_node->mux_phy_addr = (u64)mux_phy_addr;
+
+ /* Check for external dma buffer */
+ if (!mux_data->is_dmabuf) {
+ out_index = 0;
+ new_mux_node->dst_buf_start_addr =
+ (u64)mpgmuxts->dstbuf_addrs[out_index];
+ new_mux_node->dst_buf_size = mpgmuxts->dstbuf_size;
+ if (out_index)
+ atomic_set(&mpgmuxts->outbuf_idx, 0);
+ else
+ atomic_set(&mpgmuxts->outbuf_idx, 1);
+ } else {
+ for (i = 0; i < XTSMUX_MAXOUT_STRM; i++) {
+ if (mux_data->dstbuf_id ==
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd) {
+ new_mux_node->dst_buf_start_addr =
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr;
+ break;
+ }
+ }
+ if (i == XTSMUX_MAXOUT_STRM) {
+ dev_err(mpgmuxts->dev, "No DMA buffer with %d",
+ mux_data->dstbuf_id);
+ return -ENOMEM;
+ }
+ new_mux_node->dst_buf_size = mux_data->dmabuf_size;
+ }
+ new_mux_node->error_code = MUXER_NO_ERROR;
+
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ list_add_tail(&new_mux_node->node, &mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static int xlnx_tsmux_set_mux_desc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct muxer_context_in *mux_data;
+ int ret = 0;
+
+ mux_data = kzalloc(sizeof(*mux_data), GFP_KERNEL);
+ if (!mux_data)
+ return -ENOMEM;
+
+ ret = copy_from_user(mux_data, arg,
+ sizeof(struct muxer_context_in));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "failed to copy muxer data from user");
+ goto kmem_free;
+ }
+
+ return xlnx_tsmux_enqueue_mux_context(mpgmuxts, mux_data);
+
+kmem_free:
+ kfree(mux_data);
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_set_mux_context(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+
+ ret = xlnx_tsmux_set_mux_desc(mpgmuxts, arg);
+ if (ret < 0)
+ dev_dbg(mpgmuxts->dev, "Setting mux context failed");
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_verify_dmabuf(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct xlnx_tsmux_dmabuf_info *dbuf_info;
+ s32 i;
+ int ret = 0;
+
+ dbuf_info = kzalloc(sizeof(*dbuf_info), GFP_KERNEL);
+ if (!dbuf_info)
+ return -ENOMEM;
+
+ ret = copy_from_user(dbuf_info, arg,
+ sizeof(struct xlnx_tsmux_dmabuf_info));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Failed to copy from user");
+ goto dmak_free;
+ }
+ if (dbuf_info->dir != DMA_TO_MPG2MUX &&
+ dbuf_info->dir != DMA_FROM_MPG2MUX) {
+ dev_err(mpgmuxts->dev, "Incorrect DMABUF direction %d",
+ dbuf_info->dir);
+ ret = -EINVAL;
+ goto dmak_free;
+ }
+ dbuf = dma_buf_get(dbuf_info->buf_fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(mpgmuxts->dev, "dma_buf_get fail fd %d direction %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(dbuf);
+ goto dmak_free;
+ }
+ attach = dma_buf_attach(dbuf, mpgmuxts->dev);
+ if (IS_ERR(attach)) {
+ dev_err(mpgmuxts->dev, "dma_buf_attach fail fd %d dir %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+ sgt = dma_buf_map_attachment(attach,
+ (enum dma_data_direction)(dbuf_info->dir));
+ if (IS_ERR(sgt)) {
+ dev_err(mpgmuxts->dev, "dma_buf_map_attach fail fd %d dir %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(sgt);
+ goto err_dmabuf_detach;
+ }
+
+ if (sgt->nents > 1) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "Not contig nents %d fd %d direction %d",
+ sgt->nents, dbuf_info->buf_fd, dbuf_info->dir);
+ goto err_dmabuf_unmap_attachment;
+ }
+ dev_dbg(mpgmuxts->dev, "dmabuf %s is physically contiguous",
+ (dbuf_info->dir ==
+ DMA_TO_MPG2MUX ? "Source" : "Destination"));
+
+ if (dbuf_info->dir == DMA_TO_MPG2MUX) {
+ for (i = 0; i < XTSMUX_MAXIN_STRM; i++) {
+ if (!mpgmuxts->src_dmabufintl[i].buf_id) {
+ mpgmuxts->src_dmabufintl[i].dbuf = dbuf;
+ mpgmuxts->src_dmabufintl[i].attach = attach;
+ mpgmuxts->src_dmabufintl[i].sgt = sgt;
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr =
+ sg_dma_address(sgt->sgl);
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd =
+ dbuf_info->buf_fd;
+ mpgmuxts->src_dmabufintl[i].buf_id = i + 1;
+ dev_dbg(mpgmuxts->dev,
+ "%s: phy-addr=0x%llx for src dmabuf=%d",
+ __func__,
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr,
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd);
+ break;
+ }
+ }
+ /* External src streams more than XTSMUX_MAXIN_STRM
+ * can not be handled
+ */
+ if (i == XTSMUX_MAXIN_STRM) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "src DMA bufs more than %d",
+ XTSMUX_MAXIN_STRM);
+ goto err_dmabuf_unmap_attachment;
+ }
+ } else {
+ for (i = 0; i < XTSMUX_MAXOUT_STRM; i++) {
+ if (!mpgmuxts->dst_dmabufintl[i].buf_id) {
+ mpgmuxts->dst_dmabufintl[i].dbuf = dbuf;
+ mpgmuxts->dst_dmabufintl[i].attach = attach;
+ mpgmuxts->dst_dmabufintl[i].sgt = sgt;
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr =
+ sg_dma_address(sgt->sgl);
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd =
+ dbuf_info->buf_fd;
+ mpgmuxts->dst_dmabufintl[i].buf_id = i + 1;
+ dev_dbg(mpgmuxts->dev,
+ "phy-addr=0x%llx for src dmabuf=%d",
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr,
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd);
+ break;
+ }
+ }
+ /* External dst streams more than XTSMUX_MAXOUT_STRM
+ * can not be handled
+ */
+ if (i == XTSMUX_MAXOUT_STRM) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "dst DMA bufs more than %d",
+ XTSMUX_MAXOUT_STRM);
+ goto err_dmabuf_unmap_attachment;
+ }
+ }
+
+ return 0;
+
+err_dmabuf_unmap_attachment:
+ dma_buf_unmap_attachment(attach, sgt,
+ (enum dma_data_direction)dbuf_info->dir);
+err_dmabuf_detach:
+ dma_buf_detach(dbuf, attach);
+err_dmabuf_put:
+ dma_buf_put(dbuf);
+dmak_free:
+ kfree(dbuf_info);
+
+ return ret;
+}
+
+static long xlnx_tsmux_ioctl(struct file *fptr,
+ unsigned int cmd, unsigned long data)
+{
+ struct xlnx_tsmux *mpgmuxts;
+ void __user *arg;
+ int ret;
+
+ mpgmuxts = fptr->private_data;
+ if (!mpgmuxts)
+ return -EINVAL;
+
+ arg = (void __user *)data;
+ switch (cmd) {
+ case MPG2MUX_INBUFALLOC:
+ ret = xlnx_tsmux_ioctl_srcbuf_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_INBUFDEALLOC:
+ ret = xlnx_tsmux_ioctl_srcbuf_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_OUTBUFALLOC:
+ ret = xlnx_tsmux_ioctl_dstbuf_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_OUTBUFDEALLOC:
+ ret = xlnx_tsmux_ioctl_dstbuf_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_STBLALLOC:
+ ret = xlnx_tsmux_ioctl_strmtbl_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_STBLDEALLOC:
+ ret = xlnx_tsmux_ioctl_strmtbl_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_TBLUPDATE:
+ ret = xlnx_tsmux_ioctl_update_strmtbl(mpgmuxts, arg);
+ break;
+ case MPG2MUX_SETSTRM:
+ ret = xlnx_tsmux_ioctl_set_stream_context(mpgmuxts, arg);
+ break;
+ case MPG2MUX_START:
+ ret = xlnx_tsmux_ioctl_start(mpgmuxts);
+ break;
+ case MPG2MUX_STOP:
+ ret = xlnx_tsmux_ioctl_stop(mpgmuxts);
+ break;
+ case MPG2MUX_STATUS:
+ ret = xlnx_tsmux_ioctl_get_status(mpgmuxts, arg);
+ break;
+ case MPG2MUX_GETOUTBUF:
+ ret = xlnx_tsmux_ioctl_get_outbufinfo(mpgmuxts, arg);
+ break;
+ case MPG2MUX_SETMUX:
+ ret = xlnx_tsmux_ioctl_set_mux_context(mpgmuxts, arg);
+ break;
+ case MPG2MUX_VDBUF:
+ ret = xlnx_tsmux_ioctl_verify_dmabuf(mpgmuxts, arg);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret < 0)
+ dev_err(mpgmuxts->dev, "ioctl %d failed\n", cmd);
+
+ return ret;
+}
+
+static int xlnx_tsmux_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct xlnx_tsmux *mpgmuxts = fp->private_data;
+ int ret, buf_id;
+
+ if (!mpgmuxts)
+ return -ENODEV;
+
+ buf_id = vma->vm_pgoff;
+
+ if (buf_id < mpgmuxts->num_inbuf) {
+ if (!mpgmuxts->srcbuf_addrs[buf_id]) {
+ dev_err(mpgmuxts->dev, "Mem not allocated for src %d",
+ buf_id);
+ return -EINVAL;
+ }
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ ret = remap_pfn_range(vma, vma->vm_start,
+ mpgmuxts->srcbuf_addrs[buf_id] >>
+ PAGE_SHIFT, vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (ret) {
+ dev_err(mpgmuxts->dev, "mmap fail bufid = %d", buf_id);
+ return -EINVAL;
+ }
+ } else if (buf_id < (mpgmuxts->num_inbuf + mpgmuxts->num_outbuf)) {
+ buf_id -= mpgmuxts->num_inbuf;
+ if (!mpgmuxts->dstbuf_addrs[buf_id]) {
+ dev_err(mpgmuxts->dev, "Mem not allocated fordst %d",
+ buf_id);
+ return -EINVAL;
+ }
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ ret =
+ remap_pfn_range(vma, vma->vm_start,
+ mpgmuxts->dstbuf_addrs[buf_id] >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ if (ret) {
+ dev_err(mpgmuxts->dev, "mmap fail buf_id = %d", buf_id);
+ ret = -EINVAL;
+ }
+ } else {
+ dev_err(mpgmuxts->dev, "Wrong buffer id -> %d buf", buf_id);
+ return -EINVAL;
+ }
+ fp->private_data = mpgmuxts;
+ return 0;
+}
+
+static __poll_t xlnx_tsmux_poll(struct file *fptr, poll_table *wait)
+{
+ struct xlnx_tsmux *mpgmuxts = fptr->private_data;
+
+ poll_wait(fptr, &mpgmuxts->waitq, wait);
+
+ if (xlnx_tsmux_read(mpgmuxts, XTSMUX_LAST_NODE_PROCESSED))
+ return EPOLLIN | EPOLLPRI;
+
+ return 0;
+}
+
+static const struct file_operations mpg2mux_fops = {
+ .open = xlnx_tsmux_open,
+ .release = xlnx_tsmux_release,
+ .unlocked_ioctl = xlnx_tsmux_ioctl,
+ .mmap = xlnx_tsmux_mmap,
+ .poll = xlnx_tsmux_poll,
+};
+
+static void xlnx_tsmux_free_dmabufintl(struct xlnx_tsmux_dmabufintl
+ *intl_dmabuf, u16 dmabuf_id,
+ enum xlnx_tsmux_dma_dir dir)
+{
+ unsigned int i = dmabuf_id - 1;
+
+ if (intl_dmabuf[i].dmabuf_fd) {
+ dma_buf_unmap_attachment(intl_dmabuf[i].attach,
+ intl_dmabuf[i].sgt,
+ (enum dma_data_direction)dir);
+ dma_buf_detach(intl_dmabuf[i].dbuf, intl_dmabuf[i].attach);
+ dma_buf_put(intl_dmabuf[i].dbuf);
+ intl_dmabuf[i].dmabuf_fd = 0;
+ intl_dmabuf[i].buf_id = 0;
+ }
+}
+
+static int xlnx_tsmux_update_complete(struct xlnx_tsmux *mpgmuxts)
+{
+ struct stream_context_node *tstrm_node;
+ struct muxer_context *temp_mux;
+ u32 num_strm_node, i;
+ u32 num_strms;
+ unsigned long flags;
+
+ num_strm_node = xlnx_tsmux_read(mpgmuxts, XTSMUX_LAST_NODE_PROCESSED);
+ if (num_strm_node == 0)
+ return -1;
+
+ /* Removing completed stream nodes from the list */
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ num_strms = atomic_read(&mpgmuxts->intn_stream_count);
+ for (i = 0; i < num_strms; i++) {
+ tstrm_node =
+ list_first_entry(&mpgmuxts->strm_node,
+ struct stream_context_node, node);
+ list_del(&tstrm_node->node);
+ atomic_dec(&mpgmuxts->stream_count);
+ if (tstrm_node->element.dmabuf_id)
+ xlnx_tsmux_free_dmabufintl
+ (mpgmuxts->src_dmabufintl,
+ tstrm_node->element.dmabuf_id,
+ DMA_TO_MPG2MUX);
+ if (tstrm_node->node_number == num_strm_node) {
+ dma_pool_free(mpgmuxts->strm_ctx_pool, tstrm_node,
+ tstrm_node->strm_phy_addr);
+ break;
+ }
+ }
+
+ /* Removing completed mux nodes from the list */
+ temp_mux = list_first_entry(&mpgmuxts->mux_node, struct muxer_context,
+ node);
+ mpgmuxts->outbuf_written = temp_mux->dst_buf_written;
+
+ list_del(&temp_mux->node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static irqreturn_t xlnx_tsmux_intr_handler(int irq, void *ctx)
+{
+ u32 status;
+ struct xlnx_tsmux *mpgmuxts = (struct xlnx_tsmux *)ctx;
+
+ status = xlnx_tsmux_read(mpgmuxts, XTSMUX_ISR_STAT);
+ status &= XTSMUX_IER_ENABLE_MASK;
+
+ if (status) {
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_ISR_STAT, status);
+ xlnx_tsmux_update_complete(mpgmuxts);
+ if (mpgmuxts->outbuf_written)
+ wake_up_interruptible(&mpgmuxts->waitq);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int xlnx_tsmux_probe(struct platform_device *pdev)
+{
+ struct xlnx_tsmux *mpgmuxts;
+ struct device *dev = &pdev->dev;
+ struct device *dev_crt;
+ struct resource *dev_resrc;
+ int ret = -1;
+ unsigned long flags;
+
+ /* DRIVER_MAX_DEV is to limit the number of instances, but
+ * Initial version is tested with single instance only.
+ * TODO: replace atomic_read with ida_simple_get
+ */
+ if (atomic_read(&xlnx_tsmux_ndevs) >= DRIVER_MAX_DEV) {
+ dev_err(&pdev->dev, "Limit of %d number of device is reached",
+ DRIVER_MAX_DEV);
+ return -EIO;
+ }
+
+ mpgmuxts = devm_kzalloc(&pdev->dev, sizeof(struct xlnx_tsmux),
+ GFP_KERNEL);
+ if (!mpgmuxts)
+ return -ENOMEM;
+ mpgmuxts->dev = &pdev->dev;
+ dev_resrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpgmuxts->iomem = devm_ioremap_resource(mpgmuxts->dev, dev_resrc);
+ if (IS_ERR(mpgmuxts->iomem))
+ return PTR_ERR(mpgmuxts->iomem);
+
+ mpgmuxts->irq = irq_of_parse_and_map(mpgmuxts->dev->of_node, 0);
+ if (!mpgmuxts->irq) {
+ dev_err(mpgmuxts->dev, "Unable to get IRQ");
+ return -EINVAL;
+ }
+
+ mpgmuxts->ap_clk = devm_clk_get(dev, "ap_clk");
+ if (IS_ERR(mpgmuxts->ap_clk)) {
+ ret = PTR_ERR(mpgmuxts->ap_clk);
+ dev_err(dev, "failed to get ap clk %d\n", ret);
+ goto cdev_err;
+ }
+ ret = clk_prepare_enable(mpgmuxts->ap_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ap clk %d\n", ret);
+ goto err_disable_ap_clk;
+ }
+
+ /* Initializing variables used in Muxer */
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ INIT_LIST_HEAD(&mpgmuxts->strm_node);
+ INIT_LIST_HEAD(&mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+ mpgmuxts->strm_ctx_pool = dma_pool_create("strcxt_pool", mpgmuxts->dev,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_ALIGN,
+ XTSMUX_POOL_SIZE *
+ XTSMUX_MAXIN_TLSTRM);
+ if (!mpgmuxts->strm_ctx_pool) {
+ dev_err(mpgmuxts->dev, "Allocation fail for strm ctx pool");
+ return -ENOMEM;
+ }
+
+ mpgmuxts->mux_ctx_pool = dma_pool_create("muxcxt_pool", mpgmuxts->dev,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_SIZE *
+ XTSMUX_MAXIN_TLSTRM);
+
+ if (!mpgmuxts->mux_ctx_pool) {
+ dev_err(mpgmuxts->dev, "Allocation fail for mux ctx pool");
+ goto mux_err;
+ }
+
+ init_waitqueue_head(&mpgmuxts->waitq);
+
+ ret = devm_request_irq(mpgmuxts->dev, mpgmuxts->irq,
+ xlnx_tsmux_intr_handler, IRQF_SHARED,
+ DRIVER_NAME, mpgmuxts);
+
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "Unable to register IRQ");
+ goto mux_err;
+ }
+
+ cdev_init(&mpgmuxts->chdev, &mpg2mux_fops);
+ mpgmuxts->chdev.owner = THIS_MODULE;
+ mpgmuxts->id = atomic_read(&xlnx_tsmux_ndevs);
+ ret = cdev_add(&mpgmuxts->chdev, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id), 1);
+
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "cdev_add failed");
+ goto cadd_err;
+ }
+
+ dev_crt = device_create(xlnx_tsmux_class, mpgmuxts->dev,
+ MKDEV(MAJOR(xlnx_tsmux_devt), mpgmuxts->id),
+ mpgmuxts, "mpgmuxts%d", mpgmuxts->id);
+
+ if (IS_ERR(dev_crt)) {
+ ret = PTR_ERR(dev_crt);
+ dev_err(mpgmuxts->dev, "Unable to create device");
+ goto cdev_err;
+ }
+
+ dev_info(mpgmuxts->dev,
+ "Xilinx mpeg2 TS muxer device probe completed");
+
+ atomic_inc(&xlnx_tsmux_ndevs);
+
+ return 0;
+
+err_disable_ap_clk:
+ clk_disable_unprepare(mpgmuxts->ap_clk);
+cdev_err:
+ cdev_del(&mpgmuxts->chdev);
+ device_destroy(xlnx_tsmux_class, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id));
+cadd_err:
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+mux_err:
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+
+ return ret;
+}
+
+static int xlnx_tsmux_remove(struct platform_device *pdev)
+{
+ struct xlnx_tsmux *mpgmuxts;
+
+ mpgmuxts = platform_get_drvdata(pdev);
+ if (!mpgmuxts || !xlnx_tsmux_class)
+ return -EIO;
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+
+ device_destroy(xlnx_tsmux_class, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id));
+ cdev_del(&mpgmuxts->chdev);
+ atomic_dec(&xlnx_tsmux_ndevs);
+ clk_disable_unprepare(mpgmuxts->ap_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_tsmux_of_match[] = {
+ { .compatible = "xlnx,tsmux-1.0", },
+ { }
+};
+
+static struct platform_driver xlnx_tsmux_driver = {
+ .probe = xlnx_tsmux_probe,
+ .remove = xlnx_tsmux_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnx_tsmux_of_match,
+ },
+};
+
+static int __init xlnx_tsmux_mod_init(void)
+{
+ int err;
+
+ xlnx_tsmux_class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(xlnx_tsmux_class)) {
+ pr_err("%s : Unable to create driver class", __func__);
+ return PTR_ERR(xlnx_tsmux_class);
+ }
+
+ err = alloc_chrdev_region(&xlnx_tsmux_devt, 0, DRIVER_MAX_DEV,
+ DRIVER_NAME);
+ if (err < 0) {
+ pr_err("%s : Unable to get major number", __func__);
+ goto err_class;
+ }
+
+ err = platform_driver_register(&xlnx_tsmux_driver);
+ if (err < 0) {
+ pr_err("%s : Unable to register %s driver", __func__,
+ DRIVER_NAME);
+ goto err_driver;
+ }
+
+ return 0;
+
+err_driver:
+ unregister_chrdev_region(xlnx_tsmux_devt, DRIVER_MAX_DEV);
+err_class:
+ class_destroy(xlnx_tsmux_class);
+
+ return err;
+}
+
+static void __exit xlnx_tsmux_mod_exit(void)
+{
+ platform_driver_unregister(&xlnx_tsmux_driver);
+ unregister_chrdev_region(xlnx_tsmux_devt, DRIVER_MAX_DEV);
+ class_destroy(xlnx_tsmux_class);
+ xlnx_tsmux_class = NULL;
+}
+
+module_init(xlnx_tsmux_mod_init);
+module_exit(xlnx_tsmux_mod_exit);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx mpeg2 transport stream muxer IP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnxsync/Kconfig b/drivers/staging/xlnxsync/Kconfig
new file mode 100644
index 000000000000..08e73384dc94
--- /dev/null
+++ b/drivers/staging/xlnxsync/Kconfig
@@ -0,0 +1,11 @@
+config XLNX_SYNC
+ tristate "Xilinx Synchronizer"
+ depends on ARCH_ZYNQMP
+ help
+ This driver is developed for Xilinx Synchronizer IP. It is used to
+ monitor the AXI addresses of the producer and initiate the
+ consumer to start earlier, thereby reducing the latency to process
+ the data.
+
+ To compile this driver as a module, choose M here.
+ If unsure, choose N
diff --git a/drivers/staging/xlnxsync/MAINTAINERS b/drivers/staging/xlnxsync/MAINTAINERS
new file mode 100644
index 000000000000..e2d720419783
--- /dev/null
+++ b/drivers/staging/xlnxsync/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX SYNCHRONIZER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnxsync
diff --git a/drivers/staging/xlnxsync/Makefile b/drivers/staging/xlnxsync/Makefile
new file mode 100644
index 000000000000..b126a36da37c
--- /dev/null
+++ b/drivers/staging/xlnxsync/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XLNX_SYNC) += xlnxsync.o
diff --git a/drivers/staging/xlnxsync/dt-binding.txt b/drivers/staging/xlnxsync/dt-binding.txt
new file mode 100644
index 000000000000..f1ed9d724de8
--- /dev/null
+++ b/drivers/staging/xlnxsync/dt-binding.txt
@@ -0,0 +1,34 @@
+Xilinx Synchronizer
+-------------------
+
+The Xilinx Synchronizer is used for buffer synchronization between
+producer and consumer blocks. It manages to do so by tapping onto the bus
+where the producer block is writing frame data to memory and consumer block is
+reading the frame data from memory.
+
+It can work on the encode path with max 4 channels or on decode path with
+max 2 channels.
+
+Required properties:
+- compatible : Must contain "xlnx,sync-ip-1.0"
+- reg: Physical base address and length of the registers set for the device.
+- interrupts: Contains the interrupt line number.
+- interrupt-parent: phandle to interrupt controller.
+- clock-names: The input clock names for axilite, producer and consumer clock.
+- clocks: Reference to the clock that drives the axi interface, producer and consumer.
+- xlnx,num-chan: Range from 1 to 2 for decode.
+ Range from 1 to 4 for encode.
+
+Optional properties:
+- xlnx,encode: Present if IP configured for encoding path, else absent.
+
+v_sync_vcu: subframe_sync_vcu@a00e0000 {
+ compatible = "xlnx,sync-ip-1.0";
+ reg = <0x0 0xa00e0000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 96 4>;
+ clock-names = "s_axi_ctrl_aclk", "s_axi_mm_p_aclk", "s_axi_mm_aclk";
+ clocks = <&vid_s_axi_clk>, <&vid_stream_clk>, <&vid_stream_clk>;
+ xlnx,num-chan = <4>;
+ xlnx,encode;
+};
diff --git a/drivers/staging/xlnxsync/xlnxsync.c b/drivers/staging/xlnxsync/xlnxsync.c
new file mode 100644
index 000000000000..2de6714fe33c
--- /dev/null
+++ b/drivers/staging/xlnxsync/xlnxsync.c
@@ -0,0 +1,1290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Synchronizer IP driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Vishal Sagar <vishal.sagar@xilinx.com>
+ *
+ * This driver is used to control the Xilinx Synchronizer IP
+ * to achieve sub frame latency for encode and decode with VCU.
+ * This is done by monitoring the address lines for specific values.
+ */
+
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/xlnxsync.h>
+
+/* Register offsets and bit masks */
+#define XLNXSYNC_CTRL_REG 0x00
+#define XLNXSYNC_ISR_REG 0x04
+/* Producer Luma/Chroma Start/End Address */
+#define XLNXSYNC_PL_START_LO_REG 0x08
+#define XLNXSYNC_PL_START_HI_REG 0x0C
+#define XLNXSYNC_PC_START_LO_REG 0x20
+#define XLNXSYNC_PC_START_HI_REG 0x24
+#define XLNXSYNC_PL_END_LO_REG 0x38
+#define XLNXSYNC_PL_END_HI_REG 0x3C
+#define XLNXSYNC_PC_END_LO_REG 0x50
+#define XLNXSYNC_PC_END_HI_REG 0x54
+#define XLNXSYNC_L_MARGIN_REG 0x68
+#define XLNXSYNC_C_MARGIN_REG 0x74
+#define XLNXSYNC_IER_REG 0x80
+#define XLNXSYNC_DBG_REG 0x84
+/* Consumer Luma/Chroma Start/End Address */
+#define XLNXSYNC_CL_START_LO_REG 0x88
+#define XLNXSYNC_CL_START_HI_REG 0x8C
+#define XLNXSYNC_CC_START_LO_REG 0xA0
+#define XLNXSYNC_CC_START_HI_REG 0xA4
+#define XLNXSYNC_CL_END_LO_REG 0xB8
+#define XLNXSYNC_CL_END_HI_REG 0xBC
+#define XLNXSYNC_CC_END_LO_REG 0xD0
+#define XLNXSYNC_CC_END_HI_REG 0xD4
+
+/* Luma/Chroma Core offset registers */
+#define XLNXSYNC_LCOREOFF_REG 0x400
+#define XLNXSYNC_CCOREOFF_REG 0x410
+#define XLNXSYNC_COREOFF_NEXT 0x4
+
+#define XLNXSYNC_CTRL_ENCDEC_MASK BIT(0)
+#define XLNXSYNC_CTRL_ENABLE_MASK BIT(1)
+#define XLNXSYNC_CTRL_INTR_EN_MASK BIT(2)
+#define XLNXSYNC_CTRL_SOFTRESET BIT(3)
+
+#define XLNXSYNC_ISR_SYNC_FAIL_MASK BIT(0)
+#define XLNXSYNC_ISR_WDG_ERR_MASK BIT(1)
+/* Producer related */
+#define XLNXSYNC_ISR_PLDONE_SHIFT (2)
+#define XLNXSYNC_ISR_PLDONE_MASK GENMASK(3, 2)
+#define XLNXSYNC_ISR_PLSKIP_MASK BIT(4)
+#define XLNXSYNC_ISR_PLVALID_MASK BIT(5)
+#define XLNXSYNC_ISR_PCDONE_SHIFT (6)
+#define XLNXSYNC_ISR_PCDONE_MASK GENMASK(7, 6)
+#define XLNXSYNC_ISR_PCSKIP_MASK BIT(8)
+#define XLNXSYNC_ISR_PCVALID_MASK BIT(9)
+/* Consumer related */
+#define XLNXSYNC_ISR_CLDONE_SHIFT (10)
+#define XLNXSYNC_ISR_CLDONE_MASK GENMASK(11, 10)
+#define XLNXSYNC_ISR_CLSKIP_MASK BIT(12)
+#define XLNXSYNC_ISR_CLVALID_MASK BIT(13)
+#define XLNXSYNC_ISR_CCDONE_SHIFT (14)
+#define XLNXSYNC_ISR_CCDONE_MASK GENMASK(15, 14)
+#define XLNXSYNC_ISR_CCSKIP_MASK BIT(16)
+#define XLNXSYNC_ISR_CCVALID_MASK BIT(17)
+
+#define XLNXSYNC_ISR_LDIFF BIT(18)
+#define XLNXSYNC_ISR_CDIFF BIT(19)
+
+/* bit 44 of start address */
+#define XLNXSYNC_FB_VALID_MASK BIT(12)
+#define XLNXSYNC_FB_HI_ADDR_MASK GENMASK(11, 0)
+
+#define XLNXSYNC_IER_SYNC_FAIL_MASK BIT(0)
+#define XLNXSYNC_IER_WDG_ERR_MASK BIT(1)
+/* Producer */
+#define XLNXSYNC_IER_PLVALID_MASK BIT(5)
+#define XLNXSYNC_IER_PCVALID_MASK BIT(9)
+/* Consumer */
+#define XLNXSYNC_IER_CLVALID_MASK BIT(13)
+#define XLNXSYNC_IER_CCVALID_MASK BIT(17)
+/* Diff */
+#define XLNXSYNC_IER_LDIFF BIT(18)
+#define XLNXSYNC_IER_CDIFF BIT(19)
+
+#define XLNXSYNC_IER_ALL_MASK (XLNXSYNC_IER_SYNC_FAIL_MASK |\
+ XLNXSYNC_IER_WDG_ERR_MASK |\
+ XLNXSYNC_IER_PLVALID_MASK |\
+ XLNXSYNC_IER_PCVALID_MASK |\
+ XLNXSYNC_IER_CLVALID_MASK |\
+ XLNXSYNC_IER_CCVALID_MASK |\
+ XLNXSYNC_IER_LDIFF |\
+ XLNXSYNC_IER_CDIFF)
+
+/* Other macros */
+#define XLNXSYNC_CHAN_OFFSET 0x100
+
+#define XLNXSYNC_DEVNAME_LEN (32)
+
+#define XLNXSYNC_DRIVER_NAME "xlnxsync"
+#define XLNXSYNC_DRIVER_VERSION "0.1"
+
+#define XLNXSYNC_DEV_MAX 256
+
+/* Module Parameters */
+static struct class *xlnxsync_class;
+static dev_t xlnxsync_devt;
+/* Used to keep track of sync devices */
+static DEFINE_IDA(xs_ida);
+
+/**
+ * struct xlnxsync_device - Xilinx Synchronizer struct
+ * @chdev: Character device driver struct
+ * @dev: Pointer to device
+ * @iomem: Pointer to the register space
+ * @sync_mutex: Mutex used to serialize ioctl calls
+ * @wq_fbdone: wait queue for frame buffer done events
+ * @wq_error: wait queue for error events
+ * @l_done: Luma done result array
+ * @c_done: Chroma done result array
+ * @sync_err: Capture synchronization error per channel
+ * @wdg_err: Capture watchdog error per channel
+ * @ldiff_err: Luma buffer diff > 1
+ * @cdiff_err: Chroma buffer diff > 1
+ * @axi_clk: Pointer to clock structure for axilite clock
+ * @p_clk: Pointer to clock structure for producer clock
+ * @c_clk: Pointer to clock structure for consumer clock
+ * @user_count: Usage count
+ * @reserved: Channel reserved status
+ * @irq: IRQ number
+ * @irq_lock: Spinlock used to protect access to sync and watchdog error
+ * @minor: device id count
+ * @config: IP config struct
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xlnxsync_device {
+ struct cdev chdev;
+ struct device *dev;
+ void __iomem *iomem;
+ /* sync_mutex is used to serialize ioctl calls */
+ struct mutex sync_mutex;
+ wait_queue_head_t wq_fbdone;
+ wait_queue_head_t wq_error;
+ bool l_done[XLNXSYNC_MAX_ENC_CHAN][XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+ bool c_done[XLNXSYNC_MAX_ENC_CHAN][XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+ bool sync_err[XLNXSYNC_MAX_ENC_CHAN];
+ bool wdg_err[XLNXSYNC_MAX_ENC_CHAN];
+ bool ldiff_err[XLNXSYNC_MAX_ENC_CHAN];
+ bool cdiff_err[XLNXSYNC_MAX_ENC_CHAN];
+ struct clk *axi_clk;
+ struct clk *p_clk;
+ struct clk *c_clk;
+ atomic_t user_count;
+ bool reserved[XLNXSYNC_MAX_ENC_CHAN];
+ int irq;
+ /* irq_lock is used to protect access to sync_err and wdg_err */
+ spinlock_t irq_lock;
+ int minor;
+ struct xlnxsync_config config;
+};
+
+/**
+ * struct xlnxsync_ctx - Synchronizer context struct
+ * @dev: Xilinx synchronizer device struct
+ * @chan_id: Channel id
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xlnxsync_ctx {
+ struct xlnxsync_device *dev;
+ u32 chan_id;
+};
+
+static inline u32 xlnxsync_read(struct xlnxsync_device *dev, u32 chan, u32 reg)
+{
+ return ioread32(dev->iomem + (chan * XLNXSYNC_CHAN_OFFSET) + reg);
+}
+
+static inline void xlnxsync_write(struct xlnxsync_device *dev, u32 chan,
+ u32 reg, u32 val)
+{
+ iowrite32(val, dev->iomem + (chan * XLNXSYNC_CHAN_OFFSET) + reg);
+}
+
+static inline void xlnxsync_clr(struct xlnxsync_device *dev, u32 chan, u32 reg,
+ u32 clr)
+{
+ xlnxsync_write(dev, chan, reg, xlnxsync_read(dev, chan, reg) & ~clr);
+}
+
+static inline void xlnxsync_set(struct xlnxsync_device *dev, u32 chan, u32 reg,
+ u32 set)
+{
+ xlnxsync_write(dev, chan, reg, xlnxsync_read(dev, chan, reg) | set);
+}
+
+static bool xlnxsync_is_buf_done(struct xlnxsync_device *dev,
+ u32 channel, u32 buf, u32 io)
+{
+ u32 luma_valid, chroma_valid;
+ u32 reg_laddr, reg_caddr;
+
+ switch (io) {
+ case XLNXSYNC_PROD:
+ reg_laddr = XLNXSYNC_PL_START_HI_REG;
+ reg_caddr = XLNXSYNC_PC_START_HI_REG;
+ break;
+ case XLNXSYNC_CONS:
+ reg_laddr = XLNXSYNC_CL_START_HI_REG;
+ reg_caddr = XLNXSYNC_CC_START_HI_REG;
+ break;
+ default:
+ return false;
+ }
+
+ luma_valid = xlnxsync_read(dev, channel, reg_laddr + (buf << 3)) &
+ XLNXSYNC_FB_VALID_MASK;
+ chroma_valid = xlnxsync_read(dev, channel, reg_caddr + (buf << 3)) &
+ XLNXSYNC_FB_VALID_MASK;
+ if (!luma_valid && !chroma_valid)
+ return true;
+
+ return false;
+}
+
+static void xlnxsync_reset_chan(struct xlnxsync_device *dev, u32 chan)
+{
+ u8 num_retries = 50;
+
+ xlnxsync_set(dev, chan, XLNXSYNC_CTRL_REG, XLNXSYNC_CTRL_SOFTRESET);
+ /* Wait for a maximum of ~100ms to flush pending transactions */
+ while (num_retries--) {
+ if (!(xlnxsync_read(dev, chan, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_SOFTRESET))
+ break;
+ usleep_range(2000, 2100);
+ }
+}
+
+static void xlnxsync_reset(struct xlnxsync_device *dev)
+{
+ u32 i;
+
+ for (i = 0; i < dev->config.max_channels; i++)
+ xlnxsync_reset_chan(dev, i);
+}
+
+static dma_addr_t xlnxsync_get_phy_addr(struct xlnxsync_device *dev,
+ u32 fd)
+{
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ dma_addr_t phy_addr = 0;
+
+ dbuf = dma_buf_get(fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(dev->dev, "%s : Failed to get dma buf\n", __func__);
+ goto get_phy_addr_err;
+ }
+
+ attach = dma_buf_attach(dbuf, dev->dev);
+ if (IS_ERR(attach)) {
+ dev_err(dev->dev, "%s : Failed to attach buf\n", __func__);
+ goto fail_attach;
+ }
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ dev_err(dev->dev, "%s : Failed to attach map\n", __func__);
+ goto fail_map;
+ }
+
+ phy_addr = sg_dma_address(sgt->sgl);
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+
+fail_map:
+ dma_buf_detach(dbuf, attach);
+fail_attach:
+ dma_buf_put(dbuf);
+get_phy_addr_err:
+ return phy_addr;
+}
+
+static int xlnxsync_config_channel(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_chan_config cfg;
+ int ret, i = 0, j;
+ dma_addr_t phy_start_address;
+ u64 luma_start_address[XLNXSYNC_IO];
+ u64 chroma_start_address[XLNXSYNC_IO];
+ u64 luma_end_address[XLNXSYNC_IO];
+ u64 chroma_end_address[XLNXSYNC_IO];
+
+ ret = copy_from_user(&cfg, arg, sizeof(cfg));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (cfg.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ cfg.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ /* Calculate luma/chroma physical addresses */
+ phy_start_address = xlnxsync_get_phy_addr(dev, cfg.dma_fd);
+ if (!phy_start_address) {
+ dev_err(dev->dev, "%s : Failed to obtain physical address\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ luma_start_address[XLNXSYNC_PROD] =
+ cfg.luma_start_offset[XLNXSYNC_PROD] + phy_start_address;
+ luma_start_address[XLNXSYNC_CONS] =
+ cfg.luma_start_offset[XLNXSYNC_CONS] + phy_start_address;
+ chroma_start_address[XLNXSYNC_PROD] =
+ cfg.chroma_start_offset[XLNXSYNC_PROD] + phy_start_address;
+ chroma_start_address[XLNXSYNC_CONS] =
+ cfg. chroma_start_offset[XLNXSYNC_CONS] + phy_start_address;
+ luma_end_address[XLNXSYNC_PROD] =
+ cfg.luma_end_offset[XLNXSYNC_PROD] + phy_start_address;
+ luma_end_address[XLNXSYNC_CONS] =
+ cfg.luma_end_offset[XLNXSYNC_CONS] + phy_start_address;
+ chroma_end_address[XLNXSYNC_PROD] =
+ cfg.chroma_end_offset[XLNXSYNC_PROD] + phy_start_address;
+ chroma_end_address[XLNXSYNC_CONS] =
+ cfg.chroma_end_offset[XLNXSYNC_CONS] + phy_start_address;
+
+ if (cfg.channel_id >= dev->config.max_channels) {
+ dev_err(dev->dev, "%s : Incorrect channel id %d\n",
+ __func__, cfg.channel_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "Channel id = %d", cfg.channel_id);
+ dev_dbg(dev->dev, "Producer address\n");
+ dev_dbg(dev->dev, "Luma Start Addr = 0x%llx End Addr = 0x%llx Margin = 0x%08x\n",
+ luma_start_address[XLNXSYNC_PROD],
+ luma_end_address[XLNXSYNC_PROD], cfg.luma_margin);
+ dev_dbg(dev->dev, "Chroma Start Addr = 0x%llx End Addr = 0x%llx Margin = 0x%08x\n",
+ chroma_start_address[XLNXSYNC_PROD],
+ chroma_end_address[XLNXSYNC_PROD], cfg.chroma_margin);
+ dev_dbg(dev->dev, "FB id = %d IsMono = %d\n",
+ cfg.fb_id[XLNXSYNC_PROD], cfg.ismono[XLNXSYNC_PROD]);
+ dev_dbg(dev->dev, "Consumer address\n");
+ dev_dbg(dev->dev, "Luma Start Addr = 0x%llx End Addr = 0x%llx\n",
+ luma_start_address[XLNXSYNC_CONS],
+ luma_end_address[XLNXSYNC_CONS]);
+ dev_dbg(dev->dev, "Chroma Start Addr = 0x%llx End Addr = 0x%llx\n",
+ chroma_start_address[XLNXSYNC_CONS],
+ chroma_end_address[XLNXSYNC_CONS]);
+ dev_dbg(dev->dev, "FB id = %d IsMono = %d\n",
+ cfg.fb_id[XLNXSYNC_CONS], cfg.ismono[XLNXSYNC_CONS]);
+
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ u32 l_start_reg, l_end_reg, c_start_reg, c_end_reg;
+
+ if (cfg.fb_id[j] == XLNXSYNC_AUTO_SEARCH) {
+ /*
+ * When fb_id is 0xFF auto search for free fb
+ * in a channel
+ */
+ dev_dbg(dev->dev, "%s : auto search free fb\n",
+ __func__);
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ if (xlnxsync_is_buf_done(dev, cfg.channel_id, i,
+ j))
+ break;
+ dev_dbg(dev->dev, "Channel %d %s FB %d is busy\n",
+ cfg.channel_id, j ? "prod" : "cons", i);
+ }
+
+ if (i == XLNXSYNC_BUF_PER_CHAN)
+ return -EBUSY;
+
+ } else if (cfg.fb_id[j] >= 0 &&
+ cfg.fb_id[j] < XLNXSYNC_BUF_PER_CHAN) {
+ /* If fb_id is specified, check its availability */
+ if (!(xlnxsync_is_buf_done(dev, cfg.channel_id,
+ cfg.fb_id[j], j))) {
+ dev_dbg(dev->dev,
+ "%s : %s FB %d in channel %d is busy!\n",
+ __func__, j ? "prod" : "cons",
+ i, cfg.channel_id);
+ return -EBUSY;
+ }
+ dev_dbg(dev->dev, "%s : Configure fb %d\n",
+ __func__, i);
+ } else {
+ /* Invalid fb_id passed */
+ dev_err(dev->dev, "Invalid FB id %d for configuration!\n",
+ cfg.fb_id[j]);
+ return -EINVAL;
+ }
+
+ if (j == XLNXSYNC_PROD) {
+ l_start_reg = XLNXSYNC_PL_START_LO_REG;
+ l_end_reg = XLNXSYNC_PL_END_LO_REG;
+ c_start_reg = XLNXSYNC_PC_START_LO_REG;
+ c_end_reg = XLNXSYNC_PC_END_LO_REG;
+ } else {
+ l_start_reg = XLNXSYNC_CL_START_LO_REG;
+ l_end_reg = XLNXSYNC_CL_END_LO_REG;
+ c_start_reg = XLNXSYNC_CC_START_LO_REG;
+ c_end_reg = XLNXSYNC_CC_END_LO_REG;
+ }
+
+ /* Start Address */
+ xlnxsync_write(dev, cfg.channel_id, l_start_reg + (i << 3),
+ lower_32_bits(luma_start_address[j]));
+
+ xlnxsync_write(dev, cfg.channel_id,
+ (l_start_reg + 4) + (i << 3),
+ upper_32_bits(luma_start_address[j]) &
+ XLNXSYNC_FB_HI_ADDR_MASK);
+
+ /* End Address */
+ xlnxsync_write(dev, cfg.channel_id, l_end_reg + (i << 3),
+ lower_32_bits(luma_end_address[j]));
+ xlnxsync_write(dev, cfg.channel_id, l_end_reg + 4 + (i << 3),
+ upper_32_bits(luma_end_address[j]));
+
+ /* Set margin */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_L_MARGIN_REG + (i << 2),
+ cfg.luma_margin);
+
+ if (!cfg.ismono[j]) {
+ dev_dbg(dev->dev, "%s : Not monochrome. Program Chroma\n",
+ __func__);
+
+ /* Chroma Start Address */
+ xlnxsync_write(dev, cfg.channel_id,
+ c_start_reg + (i << 3),
+ lower_32_bits(chroma_start_address[j]));
+
+ xlnxsync_write(dev, cfg.channel_id,
+ c_start_reg + 4 + (i << 3),
+ upper_32_bits(chroma_start_address[j]) &
+ XLNXSYNC_FB_HI_ADDR_MASK);
+
+ /* Chroma End Address */
+ xlnxsync_write(dev, cfg.channel_id,
+ c_end_reg + (i << 3),
+ lower_32_bits(chroma_end_address[j]));
+
+ xlnxsync_write(dev, cfg.channel_id,
+ c_end_reg + 4 + (i << 3),
+ upper_32_bits(chroma_end_address[j]));
+
+ /* Chroma Margin */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_C_MARGIN_REG + (i << 2),
+ cfg.chroma_margin);
+
+ /* Set the Valid bit */
+ xlnxsync_set(dev, cfg.channel_id,
+ c_start_reg + 4 + (i << 3),
+ XLNXSYNC_FB_VALID_MASK);
+ }
+
+ /* Set the Valid bit */
+ xlnxsync_set(dev, cfg.channel_id, l_start_reg + 4 + (i << 3),
+ XLNXSYNC_FB_VALID_MASK);
+ }
+
+ for (i = 0; i < XLNXSYNC_MAX_CORES; i++) {
+ iowrite32(cfg.luma_core_offset[i],
+ dev->iomem + XLNXSYNC_LCOREOFF_REG +
+ (i * XLNXSYNC_COREOFF_NEXT));
+
+ iowrite32(cfg.chroma_core_offset[i],
+ dev->iomem + XLNXSYNC_CCOREOFF_REG +
+ (i * XLNXSYNC_COREOFF_NEXT));
+ }
+
+ return 0;
+}
+
+static int xlnxsync_get_channel_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ int ret;
+ u32 i, j, k;
+ unsigned long flags;
+ struct xlnxsync_stat status;
+
+ for (i = 0; i < dev->config.max_channels; i++) {
+ /* Update Buffers status */
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++) {
+ for (k = 0; k < XLNXSYNC_IO; k++) {
+ if (xlnxsync_is_buf_done(dev, i, j, k))
+ status.fbdone[i][j][k] = true;
+ else
+ status.fbdone[i][j][k] = false;
+ }
+ }
+
+ /* Update channel enable status */
+ if (xlnxsync_read(dev, i, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_ENABLE_MASK)
+ status.enable[i] = true;
+
+ /* Update channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ status.sync_err[i] = dev->sync_err[i];
+ status.wdg_err[i] = dev->wdg_err[i];
+ status.ldiff_err[i] = dev->ldiff_err[i];
+ status.cdiff_err[i] = dev->cdiff_err[i];
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ }
+
+ status.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+
+ ret = copy_to_user(arg, &status, sizeof(status));
+ if (ret)
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ return ret;
+}
+
+static int xlnxsync_enable(struct xlnxsync_device *dev, u32 channel,
+ bool enable)
+{
+ if (dev->config.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "ioctl not supported!\n");
+ return -EINVAL;
+ }
+
+ /* check channel v/s max from dt */
+ if (channel >= dev->config.max_channels) {
+ dev_err(dev->dev, "Invalid channel %d. Max channels = %d!\n",
+ channel, dev->config.max_channels);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ dev_dbg(dev->dev, "Enabling %d channel\n", channel);
+ xlnxsync_set(dev, channel, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ xlnxsync_set(dev, channel, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ } else {
+ dev_dbg(dev->dev, "Disabling %d channel\n", channel);
+ xlnxsync_reset_chan(dev, channel);
+ xlnxsync_clr(dev, channel, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ xlnxsync_clr(dev, channel, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ dev->reserved[channel] = false;
+ }
+
+ return 0;
+}
+
+static int xlnxsync_get_config(struct xlnxsync_device *dev, void __user *arg)
+{
+ struct xlnxsync_config cfg;
+ int ret;
+
+ cfg.encode = dev->config.encode;
+ cfg.max_channels = dev->config.max_channels;
+ cfg.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+
+ dev_dbg(dev->dev, "IP Config : encode = %d max_channels = %d\n",
+ cfg.encode, cfg.max_channels);
+ dev_dbg(dev->dev, "ioctl version = 0x%llx\n", cfg.hdr_ver);
+ ret = copy_to_user(arg, &cfg, sizeof(cfg));
+ if (ret) {
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlnxsync_clr_chan_err(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_clr_err errcfg;
+ int ret;
+ unsigned long flags;
+
+ ret = copy_from_user(&errcfg, arg, sizeof(errcfg));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (errcfg.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ errcfg.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ if (errcfg.channel_id >= dev->config.max_channels) {
+ dev_err(dev->dev, "%s : Incorrect channel id %d\n",
+ __func__, errcfg.channel_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "%s : Clearing %d channel errors\n",
+ __func__, errcfg.channel_id);
+ /* Clear channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ if (dev->sync_err[errcfg.channel_id])
+ dev->sync_err[errcfg.channel_id] = false;
+
+ if (dev->wdg_err[errcfg.channel_id])
+ dev->wdg_err[errcfg.channel_id] = false;
+
+ if (dev->ldiff_err[errcfg.channel_id])
+ dev->ldiff_err[errcfg.channel_id] = false;
+
+ if (dev->cdiff_err[errcfg.channel_id])
+ dev->cdiff_err[errcfg.channel_id] = false;
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return 0;
+}
+
+static int xlnxsync_get_fbdone_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_fbdone fbdone_stat;
+ int ret, i, j, k;
+
+ fbdone_stat.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+
+ for (i = 0; i < dev->config.max_channels; i++)
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++)
+ for (k = 0; k < XLNXSYNC_IO; k++)
+ if (dev->l_done[i][j][k] &&
+ dev->c_done[i][j][k])
+ fbdone_stat.status[i][j][k] = true;
+
+ ret = copy_to_user(arg, &fbdone_stat, sizeof(fbdone_stat));
+ if (ret)
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+
+ return ret;
+}
+
+static int xlnxsync_clr_fbdone_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_fbdone fbd;
+ int ret, i, j, k;
+ unsigned long flags;
+
+ ret = copy_from_user(&fbd, arg, sizeof(fbd));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (fbd.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ fbd.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ /* Clear channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ for (i = 0; i < dev->config.max_channels; i++) {
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++) {
+ for (k = 0; k < XLNXSYNC_IO; k++) {
+ fbd.status[i][j][k] = false;
+ dev->l_done[i][j][k] = false;
+ dev->c_done[i][j][k] = false;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return 0;
+}
+
+static int xlnxsync_reserve_get_channel(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ int ret;
+ u8 i;
+
+ if (dev->config.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "ioctl not supported!\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->config.max_channels; i++) {
+ if (!dev->reserved[i])
+ break;
+ }
+
+ if (i == dev->config.max_channels) {
+ ret = -EBUSY;
+ dev_dbg(dev->dev, "No channel is free!\n");
+ return ret;
+ }
+
+ dev_dbg(dev->dev, "Reserving channel %d\n", i);
+ dev->reserved[i] = true;
+ ret = copy_to_user(arg, &i, sizeof(i));
+ if (ret) {
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static long xlnxsync_ioctl(struct file *fptr, unsigned int cmd,
+ unsigned long data)
+{
+ int ret = -EINVAL;
+ u32 channel = data;
+ void __user *arg = (void __user *)data;
+ struct xlnxsync_ctx *ctx = fptr->private_data;
+ struct xlnxsync_device *xlnxsync_dev;
+
+ xlnxsync_dev = ctx->dev;
+ if (!xlnxsync_dev) {
+ pr_err("%s: File op error\n", __func__);
+ return -EIO;
+ }
+
+ dev_dbg(xlnxsync_dev->dev, "ioctl = 0x%08x\n", cmd);
+
+ mutex_lock(&xlnxsync_dev->sync_mutex);
+
+ switch (cmd) {
+ case XLNXSYNC_GET_CFG:
+ ret = xlnxsync_get_config(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_GET_CHAN_STATUS:
+ ret = xlnxsync_get_channel_status(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_SET_CHAN_CONFIG:
+ ret = xlnxsync_config_channel(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_CHAN_ENABLE:
+ ctx->chan_id = channel;
+ ret = xlnxsync_enable(xlnxsync_dev, channel, true);
+ break;
+ case XLNXSYNC_CHAN_DISABLE:
+ ret = xlnxsync_enable(xlnxsync_dev, channel, false);
+ break;
+ case XLNXSYNC_CLR_CHAN_ERR:
+ ret = xlnxsync_clr_chan_err(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_GET_CHAN_FBDONE_STAT:
+ ret = xlnxsync_get_fbdone_status(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_CLR_CHAN_FBDONE_STAT:
+ ret = xlnxsync_clr_fbdone_status(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_RESERVE_GET_CHAN_ID:
+ ret = xlnxsync_reserve_get_channel(xlnxsync_dev, arg);
+ break;
+ }
+
+ mutex_unlock(&xlnxsync_dev->sync_mutex);
+
+ return ret;
+}
+
+static __poll_t xlnxsync_poll(struct file *fptr, poll_table *wait)
+{
+ u32 j, k;
+ bool err_event, framedone_event;
+ __poll_t ret = 0, req_events = poll_requested_events(wait);
+ unsigned long flags;
+ struct xlnxsync_ctx *ctx = fptr->private_data;
+ struct xlnxsync_device *dev;
+
+ dev = ctx->dev;
+ if (!dev) {
+ pr_err("%s: File op error\n", __func__);
+ return -EIO;
+ }
+
+ dev_dbg_ratelimited(dev->dev, "%s : entered req_events = 0x%x!\n",
+ __func__, req_events);
+
+ if (!(req_events & (POLLPRI | POLLIN)))
+ return 0;
+
+ if (req_events & EPOLLPRI) {
+ poll_wait(fptr, &dev->wq_error, wait);
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ err_event = false;
+ if (dev->sync_err[ctx->chan_id] || dev->wdg_err[ctx->chan_id] ||
+ dev->ldiff_err[ctx->chan_id] ||
+ dev->cdiff_err[ctx->chan_id])
+ err_event = true;
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ dev_dbg_ratelimited(dev->dev, "%s : error event occurred!\n",
+ __func__);
+ if (err_event)
+ ret |= POLLPRI;
+ }
+
+ if (req_events & EPOLLIN) {
+ poll_wait(fptr, &dev->wq_fbdone, wait);
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ framedone_event = false;
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++) {
+ for (k = 0; k < XLNXSYNC_IO; k++) {
+ if (dev->l_done[ctx->chan_id][j][k] &&
+ dev->c_done[ctx->chan_id][j][k])
+ framedone_event = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ dev_dbg_ratelimited(dev->dev, "%s : framedone event occurred!\n",
+ __func__);
+ if (framedone_event)
+ ret |= POLLIN;
+ }
+
+ return ret;
+}
+
+static int xlnxsync_open(struct inode *iptr, struct file *fptr)
+{
+ struct xlnxsync_device *xlnxsync;
+ struct xlnxsync_ctx *ctx;
+
+ xlnxsync = container_of(iptr->i_cdev, struct xlnxsync_device, chdev);
+ if (!xlnxsync) {
+ pr_err("%s: failed to get xlnxsync driver handle\n", __func__);
+ return -EAGAIN;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = xlnxsync;
+ fptr->private_data = ctx;
+
+ atomic_inc(&xlnxsync->user_count);
+ dev_dbg(xlnxsync->dev, "%s: tid=%d Opened with user count = %d\n",
+ __func__, current->pid, atomic_read(&xlnxsync->user_count));
+
+ return 0;
+}
+
+static int xlnxsync_release(struct inode *iptr, struct file *fptr)
+{
+ struct xlnxsync_device *xlnxsync;
+ struct xlnxsync_ctx *ctx = fptr->private_data;
+ unsigned int i, j;
+
+ xlnxsync = container_of(iptr->i_cdev, struct xlnxsync_device, chdev);
+ if (!xlnxsync) {
+ pr_err("%s: failed to get xlnxsync driver handle", __func__);
+ return -EAGAIN;
+ }
+
+ dev_dbg(xlnxsync->dev, "%s: tid=%d user count = %d chan_id = %d\n",
+ __func__, current->pid, atomic_read(&xlnxsync->user_count),
+ ctx->chan_id);
+
+ if (xlnxsync_read(xlnxsync, ctx->chan_id, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_ENABLE_MASK) {
+ dev_dbg(xlnxsync->dev, "Disabling %d channel\n", ctx->chan_id);
+ xlnxsync_reset_chan(xlnxsync, ctx->chan_id);
+ xlnxsync_clr(xlnxsync, ctx->chan_id, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ xlnxsync_clr(xlnxsync, ctx->chan_id, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ }
+
+ xlnxsync->reserved[ctx->chan_id] = false;
+ xlnxsync->sync_err[ctx->chan_id] = false;
+ xlnxsync->wdg_err[ctx->chan_id] = false;
+ xlnxsync->ldiff_err[ctx->chan_id] = false;
+ xlnxsync->cdiff_err[ctx->chan_id] = false;
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ xlnxsync->l_done[ctx->chan_id][i][j] = false;
+ xlnxsync->c_done[ctx->chan_id][i][j] = false;
+ }
+ }
+
+ if (atomic_dec_and_test(&xlnxsync->user_count)) {
+ xlnxsync_reset(xlnxsync);
+ dev_dbg(xlnxsync->dev,
+ "%s: tid=%d Stopping and clearing device",
+ __func__, current->pid);
+ }
+
+ kfree(ctx);
+ return 0;
+}
+
+static const struct file_operations xlnxsync_fops = {
+ .open = xlnxsync_open,
+ .release = xlnxsync_release,
+ .unlocked_ioctl = xlnxsync_ioctl,
+ .poll = xlnxsync_poll,
+};
+
+static irqreturn_t xlnxsync_irq_handler(int irq, void *data)
+{
+ struct xlnxsync_device *xlnxsync = (struct xlnxsync_device *)data;
+ u32 val, i;
+ bool err_event;
+ bool framedone_event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xlnxsync->irq_lock, flags);
+ err_event = false;
+ framedone_event = false;
+ for (i = 0; i < xlnxsync->config.max_channels; i++) {
+ u32 j, k;
+
+ val = xlnxsync_read(xlnxsync, i, XLNXSYNC_ISR_REG);
+ xlnxsync_write(xlnxsync, i, XLNXSYNC_ISR_REG, val);
+
+ if (val & XLNXSYNC_ISR_SYNC_FAIL_MASK)
+ xlnxsync->sync_err[i] = true;
+ if (val & XLNXSYNC_ISR_WDG_ERR_MASK)
+ xlnxsync->wdg_err[i] = true;
+ if (val & XLNXSYNC_ISR_LDIFF)
+ xlnxsync->ldiff_err[i] = true;
+ if (val & XLNXSYNC_ISR_CDIFF)
+ xlnxsync->cdiff_err[i] = true;
+ if (xlnxsync->sync_err[i] || xlnxsync->wdg_err[i] ||
+ xlnxsync->ldiff_err[i] || xlnxsync->cdiff_err[i])
+ err_event = true;
+
+ if (val & XLNXSYNC_ISR_PLDONE_MASK) {
+ j = (val & XLNXSYNC_ISR_PLDONE_MASK) >>
+ XLNXSYNC_ISR_PLDONE_SHIFT;
+
+ xlnxsync->l_done[i][j][XLNXSYNC_PROD] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_PCDONE_MASK) {
+ j = (val & XLNXSYNC_ISR_PCDONE_MASK) >>
+ XLNXSYNC_ISR_PCDONE_SHIFT;
+
+ xlnxsync->c_done[i][j][XLNXSYNC_PROD] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_CLDONE_MASK) {
+ j = (val & XLNXSYNC_ISR_CLDONE_MASK) >>
+ XLNXSYNC_ISR_CLDONE_SHIFT;
+
+ xlnxsync->l_done[i][j][XLNXSYNC_CONS] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_CCDONE_MASK) {
+ j = (val & XLNXSYNC_ISR_CCDONE_MASK) >>
+ XLNXSYNC_ISR_CCDONE_SHIFT;
+
+ xlnxsync->c_done[i][j][XLNXSYNC_CONS] = true;
+ }
+
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++) {
+ for (k = 0; k < XLNXSYNC_IO; k++) {
+ if (xlnxsync->l_done[i][j][k] &&
+ xlnxsync->c_done[i][j][k])
+ framedone_event = true;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&xlnxsync->irq_lock, flags);
+
+ if (err_event) {
+ dev_dbg_ratelimited(xlnxsync->dev, "%s : error occurred\n",
+ __func__);
+ wake_up_interruptible(&xlnxsync->wq_error);
+ }
+
+ if (framedone_event) {
+ dev_dbg_ratelimited(xlnxsync->dev, "%s : framedone occurred\n",
+ __func__);
+ wake_up_interruptible(&xlnxsync->wq_fbdone);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xlnxsync_parse_dt_prop(struct xlnxsync_device *xlnxsync)
+{
+ struct device_node *node = xlnxsync->dev->of_node;
+ int ret;
+
+ xlnxsync->config.encode = of_property_read_bool(node, "xlnx,encode");
+ dev_dbg(xlnxsync->dev, "synchronizer type = %s\n",
+ xlnxsync->config.encode ? "encode" : "decode");
+
+ ret = of_property_read_u32(node, "xlnx,num-chan",
+ (u32 *)&xlnxsync->config.max_channels);
+ if (ret)
+ return ret;
+
+ dev_dbg(xlnxsync->dev, "max channels = %d\n",
+ xlnxsync->config.max_channels);
+
+ if (xlnxsync->config.max_channels == 0 ||
+ xlnxsync->config.max_channels > XLNXSYNC_MAX_ENC_CHAN) {
+ dev_err(xlnxsync->dev, "Number of channels should be 1 to 4.\n");
+ dev_err(xlnxsync->dev, "Invalid number of channels : %d\n",
+ xlnxsync->config.max_channels);
+ return -EINVAL;
+ }
+
+ if (!xlnxsync->config.encode &&
+ xlnxsync->config.max_channels > XLNXSYNC_MAX_DEC_CHAN) {
+ dev_err(xlnxsync->dev, "Decode can't have more than 2 channels.\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int xlnxsync_clk_setup(struct xlnxsync_device *xlnxsync)
+{
+ int ret;
+
+ xlnxsync->axi_clk = devm_clk_get(xlnxsync->dev, "s_axi_ctrl_aclk");
+ if (IS_ERR(xlnxsync->axi_clk)) {
+ ret = PTR_ERR(xlnxsync->axi_clk);
+ dev_err(xlnxsync->dev, "failed to get axi_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ xlnxsync->p_clk = devm_clk_get(xlnxsync->dev, "s_axi_mm_p_aclk");
+ if (IS_ERR(xlnxsync->p_clk)) {
+ ret = PTR_ERR(xlnxsync->p_clk);
+ dev_err(xlnxsync->dev, "failed to get p_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ xlnxsync->c_clk = devm_clk_get(xlnxsync->dev, "s_axi_mm_aclk");
+ if (IS_ERR(xlnxsync->c_clk)) {
+ ret = PTR_ERR(xlnxsync->c_clk);
+ dev_err(xlnxsync->dev, "failed to get axi_mm (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->axi_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable axi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->p_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable p_clk (%d)\n", ret);
+ goto err_pclk;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->c_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable axi_mm (%d)\n", ret);
+ goto err_cclk;
+ }
+
+ return ret;
+
+err_cclk:
+ clk_disable_unprepare(xlnxsync->p_clk);
+err_pclk:
+ clk_disable_unprepare(xlnxsync->axi_clk);
+
+ return ret;
+}
+
+static int xlnxsync_probe(struct platform_device *pdev)
+{
+ struct xlnxsync_device *xlnxsync;
+ struct device *dc;
+ struct resource *res;
+ int ret;
+
+ xlnxsync = devm_kzalloc(&pdev->dev, sizeof(*xlnxsync), GFP_KERNEL);
+ if (!xlnxsync)
+ return -ENOMEM;
+
+ xlnxsync->minor = ida_simple_get(&xs_ida, 0, XLNXSYNC_DEV_MAX,
+ GFP_KERNEL);
+ if (xlnxsync->minor < 0)
+ return xlnxsync->minor;
+
+ xlnxsync->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource.\n");
+ return -ENODEV;
+ }
+
+ xlnxsync->iomem = devm_ioremap_nocache(xlnxsync->dev, res->start,
+ resource_size(res));
+ if (!xlnxsync->iomem) {
+ dev_err(&pdev->dev, "ip register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = xlnxsync_parse_dt_prop(xlnxsync);
+ if (ret < 0)
+ return ret;
+
+ xlnxsync->config.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+ dev_info(xlnxsync->dev, "ioctl header version = 0x%llx\n",
+ xlnxsync->config.hdr_ver);
+
+ xlnxsync->irq = irq_of_parse_and_map(xlnxsync->dev->of_node, 0);
+ if (!xlnxsync->irq) {
+ dev_err(xlnxsync->dev, "Unable to parse and get irq.\n");
+ return -EINVAL;
+ }
+ ret = devm_request_threaded_irq(xlnxsync->dev, xlnxsync->irq, NULL,
+ xlnxsync_irq_handler, IRQF_ONESHOT,
+ dev_name(xlnxsync->dev), xlnxsync);
+
+ if (ret) {
+ dev_err(xlnxsync->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ ret = xlnxsync_clk_setup(xlnxsync);
+ if (ret) {
+ dev_err(xlnxsync->dev, "clock setup failed!\n");
+ return ret;
+ }
+
+ init_waitqueue_head(&xlnxsync->wq_fbdone);
+ init_waitqueue_head(&xlnxsync->wq_error);
+ spin_lock_init(&xlnxsync->irq_lock);
+ mutex_init(&xlnxsync->sync_mutex);
+
+ cdev_init(&xlnxsync->chdev, &xlnxsync_fops);
+ xlnxsync->chdev.owner = THIS_MODULE;
+ ret = cdev_add(&xlnxsync->chdev,
+ MKDEV(MAJOR(xlnxsync_devt), xlnxsync->minor), 1);
+ if (ret < 0) {
+ dev_err(xlnxsync->dev, "cdev_add failed");
+ goto clk_err;
+ }
+
+ if (!xlnxsync_class) {
+ dev_err(xlnxsync->dev, "xvfsync device class not created");
+ goto cdev_err;
+ }
+ dc = device_create(xlnxsync_class, xlnxsync->dev,
+ MKDEV(MAJOR(xlnxsync_devt), xlnxsync->minor),
+ xlnxsync, "xlnxsync%d", xlnxsync->minor);
+ if (IS_ERR(dc)) {
+ ret = PTR_ERR(dc);
+ dev_err(xlnxsync->dev, "Unable to create device");
+ goto cdev_err;
+ }
+
+ platform_set_drvdata(pdev, xlnxsync);
+ dev_info(xlnxsync->dev, "Xilinx Synchronizer probe successful!\n");
+
+ return 0;
+
+cdev_err:
+ cdev_del(&xlnxsync->chdev);
+clk_err:
+ clk_disable_unprepare(xlnxsync->c_clk);
+ clk_disable_unprepare(xlnxsync->p_clk);
+ clk_disable_unprepare(xlnxsync->axi_clk);
+ ida_simple_remove(&xs_ida, xlnxsync->minor);
+
+ return ret;
+}
+
+static int xlnxsync_remove(struct platform_device *pdev)
+{
+ struct xlnxsync_device *xlnxsync = platform_get_drvdata(pdev);
+
+ if (!xlnxsync || !xlnxsync_class)
+ return -EIO;
+
+ cdev_del(&xlnxsync->chdev);
+ clk_disable_unprepare(xlnxsync->c_clk);
+ clk_disable_unprepare(xlnxsync->p_clk);
+ clk_disable_unprepare(xlnxsync->axi_clk);
+ ida_simple_remove(&xs_ida, xlnxsync->minor);
+
+ return 0;
+}
+
+static const struct of_device_id xlnxsync_of_match[] = {
+ { .compatible = "xlnx,sync-ip-1.0", },
+ { /* end of table*/ }
+};
+MODULE_DEVICE_TABLE(of, xlnxsync_of_match);
+
+static struct platform_driver xlnxsync_driver = {
+ .driver = {
+ .name = XLNXSYNC_DRIVER_NAME,
+ .of_match_table = xlnxsync_of_match,
+ },
+ .probe = xlnxsync_probe,
+ .remove = xlnxsync_remove,
+};
+
+static int __init xlnxsync_init_mod(void)
+{
+ int err;
+
+ xlnxsync_class = class_create(THIS_MODULE, XLNXSYNC_DRIVER_NAME);
+ if (IS_ERR(xlnxsync_class)) {
+ pr_err("%s : Unable to create xlnxsync class", __func__);
+ return PTR_ERR(xlnxsync_class);
+ }
+ err = alloc_chrdev_region(&xlnxsync_devt, 0,
+ XLNXSYNC_DEV_MAX, XLNXSYNC_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("%s: Unable to get major number for xlnxsync", __func__);
+ goto err_class;
+ }
+ err = platform_driver_register(&xlnxsync_driver);
+ if (err < 0) {
+ pr_err("%s: Unable to register %s driver",
+ __func__, XLNXSYNC_DRIVER_NAME);
+ goto err_pdrv;
+ }
+ return 0;
+err_pdrv:
+ unregister_chrdev_region(xlnxsync_devt, XLNXSYNC_DEV_MAX);
+err_class:
+ class_destroy(xlnxsync_class);
+ return err;
+}
+
+static void __exit xlnxsync_cleanup_mod(void)
+{
+ platform_driver_unregister(&xlnxsync_driver);
+ unregister_chrdev_region(xlnxsync_devt, XLNXSYNC_DEV_MAX);
+ class_destroy(xlnxsync_class);
+ xlnxsync_class = NULL;
+}
+module_init(xlnxsync_init_mod);
+module_exit(xlnxsync_cleanup_mod);
+
+MODULE_AUTHOR("Vishal Sagar");
+MODULE_DESCRIPTION("Xilinx Synchronizer IP Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(XLNXSYNC_DRIVER_VERSION);
diff --git a/drivers/staging/xroeframer/Kconfig b/drivers/staging/xroeframer/Kconfig
new file mode 100644
index 000000000000..16aa1f2c6a78
--- /dev/null
+++ b/drivers/staging/xroeframer/Kconfig
@@ -0,0 +1,18 @@
+#
+# Xilinx Radio over Ethernet Framer driver
+#
+
+config XROE_FRAMER
+ tristate "Xilinx Radio over Ethernet Framer driver"
+ ---help---
+ The "Radio Over Ethernet Framer" IP (roe_framer) ingests/generates
+ Ethernet packet data, (de-)multiplexes packets based on protocol
+ into/from various Radio Antenna data streams.
+
+ It has 2 main, independent, data paths:
+
+ - Downlink, from the BaseBand to the Phone, Ethernet to Antenna,
+ we call this the De-Framer path, or defm on all related IP signals.
+
+ - Uplink, from the Phone to the BaseBand, Antenna to Ethernet,
+ we call this the Framer path, or fram on all related IP signals.
diff --git a/drivers/staging/xroeframer/Makefile b/drivers/staging/xroeframer/Makefile
new file mode 100644
index 000000000000..f7bf07e98243
--- /dev/null
+++ b/drivers/staging/xroeframer/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Radio over Ethernet Framer driver
+#
+obj-$(CONFIG_XROE_FRAMER) := framer.o
+
+framer-objs := xroe_framer.o \
+ sysfs_xroe.o \
+ sysfs_xroe_framer_ipv4.o \
+ sysfs_xroe_framer_ipv6.o \
+ sysfs_xroe_framer_udp.o \
+ sysfs_xroe_framer_stats.o
diff --git a/drivers/staging/xroeframer/README b/drivers/staging/xroeframer/README
new file mode 100644
index 000000000000..505a46c2cf62
--- /dev/null
+++ b/drivers/staging/xroeframer/README
@@ -0,0 +1,47 @@
+Xilinx Radio over Ethernet Framer driver
+=========================================
+
+About the RoE Framer
+
+The "Radio Over Ethernet Framer" IP (roe_framer) ingests/generates Ethernet
+packet data, (de-)multiplexes packets based on protocol into/from various
+Radio Antenna data streams.
+
+It has 2 main, independent, data paths
+
+- Downlink, from the BaseBand to the Phone, Ethernet to Antenna,
+we call this the De-Framer path, or defm on all related IP signals.
+
+- Uplink, from the Phone to the BaseBand, Antenna to Ethernet,
+we call this the Framer path, or fram on all related IP signals.
+
+Key points:
+
+- Apart from the AXI4-Lite configuration port and a handful of strobe/control
+signals all data interfaces are AXI Stream(AXIS).
+- The IP does not contain an Ethernet MAC IP, rather it routes, or creates
+packets based on the direction through the roe_framer.
+- Currently designed to work with
+ - 1, 2 or 4 10G Ethernet AXIS stream ports to/from 1, 2, 4, 8, 16,
+ or 32 antenna ports
+ Note: each Ethernet port is 64 bit data @ 156.25MHz
+ - 1 or 2 25G Ethernet AXIS stream ports to/from 1, 2, 4, 8, 16,
+ or 32 antenna ports
+ Note: each Ethernet port is 64 bit data @ 390.25MHz
+- Contains a filter so that all non-protocol packets, or non-hardware-IP
+processed packets can be forwarded to another block for processing. In general
+this in a Microprocessor, specifically the Zynq ARM in our case. This filter
+function can move into the optional switch when TSN is used.
+
+About the Linux Driver
+
+The RoE Framer Linux Driver provides sysfs access to the framer controls. The
+loading of the driver to the hardware is possible using Device Tree binding
+(see "dt-binding.txt" for more information). When the driver is loaded, the
+general controls (such as framing mode, enable, restart etc) are exposed
+under /sys/kernel/xroe. Furthermore, specific controls can be found under
+/sys/kernel/xroe/framer. These include protocol-specific settings, for
+IPv4, IPv6 & UDP.
+
+There is also the option of accessing the framer's register map using
+ioctl calls for both reading and writing (where permitted) directly.
diff --git a/drivers/staging/xroeframer/roe_framer_ctrl.h b/drivers/staging/xroeframer/roe_framer_ctrl.h
new file mode 100644
index 000000000000..162c49a9bc3b
--- /dev/null
+++ b/drivers/staging/xroeframer/roe_framer_ctrl.h
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+/*-----------------------------------------------------------------------------
+ * C Header bank BASE definitions
+ *-----------------------------------------------------------------------------
+ */
+#define ROE_FRAMER_V1_0_CFG_BASE_ADDR 0x0 /* 0 */
+#define ROE_FRAMER_V1_0_FRAM_BASE_ADDR 0x2000 /* 8192 */
+#define ROE_FRAMER_V1_0_FRAM_DRP_BASE_ADDR 0x4000 /* 16384 */
+#define ROE_FRAMER_V1_0_DEFM_BASE_ADDR 0x6000 /* 24576 */
+#define ROE_FRAMER_V1_0_DEFM_DRP_BASE_ADDR 0x8000 /* 32768 */
+#define ROE_FRAMER_V1_0_ETH_BASE_ADDR 0xa000 /* 40960 */
+#define ROE_FRAMER_V1_0_STATS_BASE_ADDR 0xc000 /* 49152 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_cfg
+ * with prefix cfg_ @ address 0x0
+ *-----------------------------------------------------------------------------
+ */
+/* Type = roInt */
+#define CFG_MAJOR_REVISION_ADDR 0x0 /* 0 */
+#define CFG_MAJOR_REVISION_MASK 0xff000000 /* 4278190080 */
+#define CFG_MAJOR_REVISION_OFFSET 0x18 /* 24 */
+#define CFG_MAJOR_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_MAJOR_REVISION_DEFAULT 0x1 /* 1 */
+
+/* Type = roInt */
+#define CFG_MINOR_REVISION_ADDR 0x0 /* 0 */
+#define CFG_MINOR_REVISION_MASK 0xff0000 /* 16711680 */
+#define CFG_MINOR_REVISION_OFFSET 0x10 /* 16 */
+#define CFG_MINOR_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_MINOR_REVISION_DEFAULT 0x0 /* 0 */
+
+/* Type = roInt */
+#define CFG_VERSION_REVISION_ADDR 0x0 /* 0 */
+#define CFG_VERSION_REVISION_MASK 0xff00 /* 65280 */
+#define CFG_VERSION_REVISION_OFFSET 0x8 /* 8 */
+#define CFG_VERSION_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_VERSION_REVISION_DEFAULT 0x0 /* 0 */
+
+/* Type = roInt */
+#define CFG_INTERNAL_REVISION_ADDR 0x4 /* 4 */
+#define CFG_INTERNAL_REVISION_MASK 0xffffffff /* 4294967295 */
+#define CFG_INTERNAL_REVISION_OFFSET 0x0 /* 0 */
+#define CFG_INTERNAL_REVISION_WIDTH 0x20 /* 32 */
+#define CFG_INTERNAL_REVISION_DEFAULT 0x12345678 /* 305419896 */
+
+/* Type = rw */
+#define CFG_TIMEOUT_VALUE_ADDR 0x8 /* 8 */
+#define CFG_TIMEOUT_VALUE_MASK 0xfff /* 4095 */
+#define CFG_TIMEOUT_VALUE_OFFSET 0x0 /* 0 */
+#define CFG_TIMEOUT_VALUE_WIDTH 0xc /* 12 */
+#define CFG_TIMEOUT_VALUE_DEFAULT 0x80 /* 128 */
+
+/* Type = rw */
+#define CFG_USER_RW_OUT_ADDR 0xc /* 12 */
+#define CFG_USER_RW_OUT_MASK 0xff /* 255 */
+#define CFG_USER_RW_OUT_OFFSET 0x0 /* 0 */
+#define CFG_USER_RW_OUT_WIDTH 0x8 /* 8 */
+#define CFG_USER_RW_OUT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_USER_RO_IN_ADDR 0xc /* 12 */
+#define CFG_USER_RO_IN_MASK 0xff0000 /* 16711680 */
+#define CFG_USER_RO_IN_OFFSET 0x10 /* 16 */
+#define CFG_USER_RO_IN_WIDTH 0x8 /* 8 */
+#define CFG_USER_RO_IN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_MASTER_INT_ENABLE_ADDR 0x10 /* 16 */
+#define CFG_MASTER_INT_ENABLE_MASK 0x1 /* 1 */
+#define CFG_MASTER_INT_ENABLE_OFFSET 0x0 /* 0 */
+#define CFG_MASTER_INT_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_MASTER_INT_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_FRAM_FIFO_OF_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_FRAM_FIFO_OF_ENABLE_MASK 0x1 /* 1 */
+#define CFG_FRAM_FIFO_OF_ENABLE_OFFSET 0x0 /* 0 */
+#define CFG_FRAM_FIFO_OF_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_FIFO_OF_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_FRAM_FIFO_UF_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_FRAM_FIFO_UF_ENABLE_MASK 0x2 /* 2 */
+#define CFG_FRAM_FIFO_UF_ENABLE_OFFSET 0x1 /* 1 */
+#define CFG_FRAM_FIFO_UF_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_FIFO_UF_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_AXI_TIMEOUT_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_AXI_TIMEOUT_ENABLE_MASK 0x80000000 /* 2147483648 */
+#define CFG_AXI_TIMEOUT_ENABLE_OFFSET 0x1f /* 31 */
+#define CFG_AXI_TIMEOUT_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_AXI_TIMEOUT_ENABLE_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define CFG_INTERRUPT_STATUS_SAMPLE_ADDR 0x1c /* 28 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_MASK 0x1 /* 1 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_OFFSET 0x0 /* 0 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_WIDTH 0x1 /* 1 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_DEFAULT 0x1 /* 1 */
+
+/* Type = roSig */
+#define CFG_FRAM_RESET_STATUS_ADDR 0x18 /* 24 */
+#define CFG_FRAM_RESET_STATUS_MASK 0x1 /* 1 */
+#define CFG_FRAM_RESET_STATUS_OFFSET 0x0 /* 0 */
+#define CFG_FRAM_RESET_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_RESET_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_DEFM_RESET_STATUS_ADDR 0x18 /* 24 */
+#define CFG_DEFM_RESET_STATUS_MASK 0x2 /* 2 */
+#define CFG_DEFM_RESET_STATUS_OFFSET 0x1 /* 1 */
+#define CFG_DEFM_RESET_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_DEFM_RESET_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ANT_OF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_MASK 0x100 /* 256 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_OFFSET 0x8 /* 8 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ETH_OF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_MASK 0x200 /* 512 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_OFFSET 0x9 /* 9 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ANT_UF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_MASK 0x400 /* 1024 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_OFFSET 0xa /* 10 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ETH_UF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_MASK 0x800 /* 2048 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_OFFSET 0xb /* 11 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_AXI_TIMEOUT_STATUS_ADDR 0x18 /* 24 */
+#define CFG_AXI_TIMEOUT_STATUS_MASK 0x80000000 /* 2147483648 */
+#define CFG_AXI_TIMEOUT_STATUS_OFFSET 0x1f /* 31 */
+#define CFG_AXI_TIMEOUT_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_AXI_TIMEOUT_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_ADDR 0x20 /* 32 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_MASK 0xffff /* 65535 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_OFFSET 0x0 /* 0 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_WIDTH 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_ADDR 0x20 /* 32 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_MASK 0xffff0000 /* 4294901760 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_OFFSET 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_WIDTH 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_ADDR 0x24 /* 36 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_MASK 0x3ff /* 1023 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_OFFSET 0x0 /* 0 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_WIDTH 0xa /* 10 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_ETH_SPEED_ADDR 0x24 /* 36 */
+#define CFG_CONFIG_ETH_SPEED_MASK 0x3ff0000 /* 67043328 */
+#define CFG_CONFIG_ETH_SPEED_OFFSET 0x10 /* 16 */
+#define CFG_CONFIG_ETH_SPEED_WIDTH 0xa /* 10 */
+#define CFG_CONFIG_ETH_SPEED_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_fram
+ * with prefix fram_ @ address 0x2000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define FRAM_DISABLE_ADDR 0x2000 /* 8192 */
+#define FRAM_DISABLE_MASK 0x1 /* 1 */
+#define FRAM_DISABLE_OFFSET 0x0 /* 0 */
+#define FRAM_DISABLE_WIDTH 0x1 /* 1 */
+#define FRAM_DISABLE_DEFAULT 0x1 /* 1 */
+
+/* Type = roSig */
+#define FRAM_READY_ADDR 0x2000 /* 8192 */
+#define FRAM_READY_MASK 0x2 /* 2 */
+#define FRAM_READY_OFFSET 0x1 /* 1 */
+#define FRAM_READY_WIDTH 0x1 /* 1 */
+#define FRAM_READY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define FRAM_FIFO_FULL_INDICATOR_ADDR 0x2004 /* 8196 */
+#define FRAM_FIFO_FULL_INDICATOR_MASK 0xffffffff /* 4294967295 */
+#define FRAM_FIFO_FULL_INDICATOR_OFFSET 0x0 /* 0 */
+#define FRAM_FIFO_FULL_INDICATOR_WIDTH 0x20 /* 32 */
+#define FRAM_FIFO_FULL_INDICATOR_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_MIN_ADDR 0x2020 /* 8224 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_MAX_ADDR 0x2024 /* 8228 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_ADDR 0x2028 /* 8232 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_DEFAULT 0x75 /* 117 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_ADDR 0x202c /* 8236 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_ADDR 0x2030 /* 8240 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_ADDR 0x2034 /* 8244 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_ADDR 0x2038 /* 8248 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_ADDR 0x203c /* 8252 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_ADDR 0x2050 /* 8272 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_ADDR 0x2054 /* 8276 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_ADDR 0x2058 /* 8280 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_DEFAULT 0x75 /* 117 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_ADDR 0x205c /* 8284 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_ADDR 0x2060 /* 8288 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_ADDR 0x2064 /* 8292 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_ADDR 0x2068 /* 8296 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_ADDR 0x206c /* 8300 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_PROTOCOL_DEFINITION_ADDR 0x2200 /* 8704 */
+#define FRAM_PROTOCOL_DEFINITION_MASK 0xf /* 15 */
+#define FRAM_PROTOCOL_DEFINITION_OFFSET 0x0 /* 0 */
+#define FRAM_PROTOCOL_DEFINITION_WIDTH 0x4 /* 4 */
+#define FRAM_PROTOCOL_DEFINITION_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_GEN_VLAN_TAG_ADDR 0x2200 /* 8704 */
+#define FRAM_GEN_VLAN_TAG_MASK 0x10 /* 16 */
+#define FRAM_GEN_VLAN_TAG_OFFSET 0x4 /* 4 */
+#define FRAM_GEN_VLAN_TAG_WIDTH 0x1 /* 1 */
+#define FRAM_GEN_VLAN_TAG_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_ADDR 0x2200 /* 8704 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_MASK 0x60 /* 96 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_OFFSET 0x5 /* 5 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_WIDTH 0x2 /* 2 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_fram_drp
+ * with prefix fram_drp @ address 0x4000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_PC_ID_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_PC_ID_MASK 0xffff /* 65535 */
+#define FRAM_DRPFRAM_DATA_PC_ID_OFFSET 0x0 /* 0 */
+#define FRAM_DRPFRAM_DATA_PC_ID_WIDTH 0x10 /* 16 */
+#define FRAM_DRPFRAM_DATA_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_MASK 0xff0000 /* 16711680 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_OFFSET 0x10 /* 16 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_MASK 0xff000000 /* 4278190080 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_OFFSET 0x18 /* 24 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_PC_ID_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_MASK 0xffff /* 65535 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_OFFSET 0x0 /* 0 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_WIDTH 0x10 /* 16 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_MASK 0xff0000 /* 16711680 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_OFFSET 0x10 /* 16 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_MASK 0xff000000 /* 4278190080 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_OFFSET 0x18 /* 24 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_defm
+ * with prefix defm_ @ address 0x6000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define DEFM_RESTART_ADDR 0x6000 /* 24576 */
+#define DEFM_RESTART_MASK 0x1 /* 1 */
+#define DEFM_RESTART_OFFSET 0x0 /* 0 */
+#define DEFM_RESTART_WIDTH 0x1 /* 1 */
+#define DEFM_RESTART_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_READY_ADDR 0x6000 /* 24576 */
+#define DEFM_READY_MASK 0x2 /* 2 */
+#define DEFM_READY_OFFSET 0x1 /* 1 */
+#define DEFM_READY_WIDTH 0x1 /* 1 */
+#define DEFM_READY_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_ERR_PACKET_FILTER_ADDR 0x6004 /* 24580 */
+#define DEFM_ERR_PACKET_FILTER_MASK 0x3 /* 3 */
+#define DEFM_ERR_PACKET_FILTER_OFFSET 0x0 /* 0 */
+#define DEFM_ERR_PACKET_FILTER_WIDTH 0x2 /* 2 */
+#define DEFM_ERR_PACKET_FILTER_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_ADDR 0x6008 /* 24584 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_MASK 0xff /* 255 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET 0x0 /* 0 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_ADDR 0x600c /* 24588 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_MASK 0xff /* 255 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_OFFSET 0x0 /* 0 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_MIN_ADDR 0x6020 /* 24608 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_MAX_ADDR 0x6024 /* 24612 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_ADDR 0x602c /* 24620 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_ADDR 0x6030 /* 24624 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_ADDR 0x6034 /* 24628 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_ADDR 0x603c /* 24636 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_ADDR 0x6050 /* 24656 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_ADDR 0x6054 /* 24660 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_ADDR 0x605c /* 24668 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_ADDR 0x6060 /* 24672 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_ADDR 0x6064 /* 24676 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_ADDR 0x606c /* 24684 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_31_0_ADDR 0x6100 /* 24832 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_63_32_ADDR 0x6104 /* 24836 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_95_64_ADDR 0x6108 /* 24840 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_127_96_ADDR 0x610c /* 24844 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_DEFAULT 0xfffffeae /* 4294966958 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_MASK_ADDR 0x6110 /* 24848 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_DEFAULT 0xcfff /* 53247 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_31_0_ADDR 0x6120 /* 24864 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_63_32_ADDR 0x6124 /* 24868 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_95_64_ADDR 0x6128 /* 24872 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_127_96_ADDR 0x612c /* 24876 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_MASK_ADDR 0x6130 /* 24880 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_DEFAULT 0xffff /* 65535 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_31_0_ADDR 0x6140 /* 24896 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_63_32_ADDR 0x6144 /* 24900 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_95_64_ADDR 0x6148 /* 24904 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_127_96_ADDR 0x614c /* 24908 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_MASK_ADDR 0x6150 /* 24912 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_DEFAULT 0xffff /* 65535 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_31_0_ADDR 0x6160 /* 24928 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_63_32_ADDR 0x6164 /* 24932 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_95_64_ADDR 0x6168 /* 24936 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_127_96_ADDR 0x616c /* 24940 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_MASK_ADDR 0x6170 /* 24944 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_DEFAULT 0xffff /* 65535 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_defm_drp
+ * with prefix defm_drp @ address 0x8000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define DEFM_DRPDEFM_DATA_PC_ID_ADDR 0x8000 /* 32768 */
+#define DEFM_DRPDEFM_DATA_PC_ID_MASK 0xffff /* 65535 */
+#define DEFM_DRPDEFM_DATA_PC_ID_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_DATA_PC_ID_WIDTH 0x10 /* 16 */
+#define DEFM_DRPDEFM_DATA_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_DRPDEFM_CTRL_PC_ID_ADDR 0x8400 /* 33792 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_MASK 0xffff /* 65535 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_WIDTH 0x10 /* 16 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_MASK 0xffffff /* 16777215 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_WIDTH 0x18 /* 24 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_MASK 0x1000000 /* 16777216 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_OFFSET 0x18 /* 24 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_MASK 0x2000000 /* 33554432 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_OFFSET 0x19 /* 25 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_MASK 0x4000000 /* 67108864 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_OFFSET 0x1a /* 26 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_MASK 0x8000000 /* 134217728 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_OFFSET 0x1b /* 27 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_MASK 0xf0000000 /* 4026531840 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_OFFSET 0x1c /* 28 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_WIDTH 0x4 /* 4 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_MASK 0xffffff /* 16777215 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_WIDTH 0x18 /* 24 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_MASK 0x1000000 /* 16777216 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_OFFSET 0x18 /* 24 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_MASK 0x2000000 /* 33554432 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_OFFSET 0x19 /* 25 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_MASK 0x4000000 /* 67108864 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_OFFSET 0x1a /* 26 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_MASK 0x8000000 /* 134217728 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_OFFSET 0x1b /* 27 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_MASK 0xf0000000 /* 4026531840 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_OFFSET 0x1c /* 28 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_WIDTH 0x4 /* 4 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_eth
+ * with prefix eth_ @ address 0xa000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define ETH_DEST_ADDR_31_0_ADDR 0xa000 /* 40960 */
+#define ETH_DEST_ADDR_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_DEST_ADDR_31_0_OFFSET 0x0 /* 0 */
+#define ETH_DEST_ADDR_31_0_WIDTH 0x20 /* 32 */
+#define ETH_DEST_ADDR_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_DEST_ADDR_47_32_ADDR 0xa004 /* 40964 */
+#define ETH_DEST_ADDR_47_32_MASK 0xffff /* 65535 */
+#define ETH_DEST_ADDR_47_32_OFFSET 0x0 /* 0 */
+#define ETH_DEST_ADDR_47_32_WIDTH 0x10 /* 16 */
+#define ETH_DEST_ADDR_47_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_SRC_ADDR_31_0_ADDR 0xa008 /* 40968 */
+#define ETH_SRC_ADDR_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_SRC_ADDR_31_0_OFFSET 0x0 /* 0 */
+#define ETH_SRC_ADDR_31_0_WIDTH 0x20 /* 32 */
+#define ETH_SRC_ADDR_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_SRC_ADDR_47_32_ADDR 0xa00c /* 40972 */
+#define ETH_SRC_ADDR_47_32_MASK 0xffff /* 65535 */
+#define ETH_SRC_ADDR_47_32_OFFSET 0x0 /* 0 */
+#define ETH_SRC_ADDR_47_32_WIDTH 0x10 /* 16 */
+#define ETH_SRC_ADDR_47_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_ID_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_ID_MASK 0xfff /* 4095 */
+#define ETH_VLAN_ID_OFFSET 0x0 /* 0 */
+#define ETH_VLAN_ID_WIDTH 0xc /* 12 */
+#define ETH_VLAN_ID_DEFAULT 0x1 /* 1 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_DEI_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_DEI_MASK 0x1000 /* 4096 */
+#define ETH_VLAN_DEI_OFFSET 0xc /* 12 */
+#define ETH_VLAN_DEI_WIDTH 0x1 /* 1 */
+#define ETH_VLAN_DEI_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_PCP_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_PCP_MASK 0xe000 /* 57344 */
+#define ETH_VLAN_PCP_OFFSET 0xd /* 13 */
+#define ETH_VLAN_PCP_WIDTH 0x3 /* 3 */
+#define ETH_VLAN_PCP_DEFAULT 0x7 /* 7 */
+
+/* Type = rw */
+#define ETH_IPV4_VERSION_ADDR 0xa030 /* 41008 */
+#define ETH_IPV4_VERSION_MASK 0xf /* 15 */
+#define ETH_IPV4_VERSION_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_VERSION_WIDTH 0x4 /* 4 */
+#define ETH_IPV4_VERSION_DEFAULT 0x4 /* 4 */
+
+/* Type = rw */
+#define ETH_IPV4_IHL_ADDR 0xa030 /* 41008 */
+#define ETH_IPV4_IHL_MASK 0xf0 /* 240 */
+#define ETH_IPV4_IHL_OFFSET 0x4 /* 4 */
+#define ETH_IPV4_IHL_WIDTH 0x4 /* 4 */
+#define ETH_IPV4_IHL_DEFAULT 0x5 /* 5 */
+
+/* Type = rw */
+#define ETH_IPV4_DSCP_ADDR 0xa034 /* 41012 */
+#define ETH_IPV4_DSCP_MASK 0x3f /* 63 */
+#define ETH_IPV4_DSCP_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_DSCP_WIDTH 0x6 /* 6 */
+#define ETH_IPV4_DSCP_DEFAULT 0x2e /* 46 */
+
+/* Type = rw */
+#define ETH_IPV4_ECN_ADDR 0xa034 /* 41012 */
+#define ETH_IPV4_ECN_MASK 0xc0 /* 192 */
+#define ETH_IPV4_ECN_OFFSET 0x6 /* 6 */
+#define ETH_IPV4_ECN_WIDTH 0x2 /* 2 */
+#define ETH_IPV4_ECN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_ID_ADDR 0xa038 /* 41016 */
+#define ETH_IPV4_ID_MASK 0xffff /* 65535 */
+#define ETH_IPV4_ID_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_ID_WIDTH 0x10 /* 16 */
+#define ETH_IPV4_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_FLAGS_ADDR 0xa03c /* 41020 */
+#define ETH_IPV4_FLAGS_MASK 0x7 /* 7 */
+#define ETH_IPV4_FLAGS_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_FLAGS_WIDTH 0x3 /* 3 */
+#define ETH_IPV4_FLAGS_DEFAULT 0x2 /* 2 */
+
+/* Type = rw */
+#define ETH_IPV4_FRAGMENT_OFFSET_ADDR 0xa03c /* 41020 */
+#define ETH_IPV4_FRAGMENT_OFFSET_MASK 0x1fff8 /* 131064 */
+#define ETH_IPV4_FRAGMENT_OFFSET_OFFSET 0x3 /* 3 */
+#define ETH_IPV4_FRAGMENT_OFFSET_WIDTH 0xe /* 14 */
+#define ETH_IPV4_FRAGMENT_OFFSET_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_TIME_TO_LIVE_ADDR 0xa040 /* 41024 */
+#define ETH_IPV4_TIME_TO_LIVE_MASK 0xff /* 255 */
+#define ETH_IPV4_TIME_TO_LIVE_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_TIME_TO_LIVE_WIDTH 0x8 /* 8 */
+#define ETH_IPV4_TIME_TO_LIVE_DEFAULT 0x40 /* 64 */
+
+/* Type = rw */
+#define ETH_IPV4_PROTOCOL_ADDR 0xa044 /* 41028 */
+#define ETH_IPV4_PROTOCOL_MASK 0xff /* 255 */
+#define ETH_IPV4_PROTOCOL_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_PROTOCOL_WIDTH 0x8 /* 8 */
+#define ETH_IPV4_PROTOCOL_DEFAULT 0x11 /* 17 */
+
+/* Type = rwpdef */
+#define ETH_IPV4_SOURCE_ADD_ADDR 0xa048 /* 41032 */
+#define ETH_IPV4_SOURCE_ADD_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV4_SOURCE_ADD_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_SOURCE_ADD_WIDTH 0x20 /* 32 */
+#define ETH_IPV4_SOURCE_ADD_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV4_DESTINATION_ADD_ADDR 0xa04c /* 41036 */
+#define ETH_IPV4_DESTINATION_ADD_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV4_DESTINATION_ADD_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_DESTINATION_ADD_WIDTH 0x20 /* 32 */
+#define ETH_IPV4_DESTINATION_ADD_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_UDP_SOURCE_PORT_ADDR 0xa050 /* 41040 */
+#define ETH_UDP_SOURCE_PORT_MASK 0xffff /* 65535 */
+#define ETH_UDP_SOURCE_PORT_OFFSET 0x0 /* 0 */
+#define ETH_UDP_SOURCE_PORT_WIDTH 0x10 /* 16 */
+#define ETH_UDP_SOURCE_PORT_DEFAULT 0x8000 /* 32768 */
+
+/* Type = rw */
+#define ETH_UDP_DESTINATION_PORT_ADDR 0xa050 /* 41040 */
+#define ETH_UDP_DESTINATION_PORT_MASK 0xffff0000 /* 4294901760 */
+#define ETH_UDP_DESTINATION_PORT_OFFSET 0x10 /* 16 */
+#define ETH_UDP_DESTINATION_PORT_WIDTH 0x10 /* 16 */
+#define ETH_UDP_DESTINATION_PORT_DEFAULT 0xc000 /* 49152 */
+
+/* Type = rw */
+#define ETH_IPV6_V_ADDR 0xa080 /* 41088 */
+#define ETH_IPV6_V_MASK 0xf /* 15 */
+#define ETH_IPV6_V_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_V_WIDTH 0x4 /* 4 */
+#define ETH_IPV6_V_DEFAULT 0x6 /* 6 */
+
+/* Type = rw */
+#define ETH_IPV6_TRAFFIC_CLASS_ADDR 0xa084 /* 41092 */
+#define ETH_IPV6_TRAFFIC_CLASS_MASK 0xff /* 255 */
+#define ETH_IPV6_TRAFFIC_CLASS_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_TRAFFIC_CLASS_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_TRAFFIC_CLASS_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV6_FLOW_LABEL_ADDR 0xa088 /* 41096 */
+#define ETH_IPV6_FLOW_LABEL_MASK 0xfffff /* 1048575 */
+#define ETH_IPV6_FLOW_LABEL_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_FLOW_LABEL_WIDTH 0x14 /* 20 */
+#define ETH_IPV6_FLOW_LABEL_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV6_NEXT_HEADER_ADDR 0xa08c /* 41100 */
+#define ETH_IPV6_NEXT_HEADER_MASK 0xff /* 255 */
+#define ETH_IPV6_NEXT_HEADER_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_NEXT_HEADER_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_NEXT_HEADER_DEFAULT 0x11 /* 17 */
+
+/* Type = rw */
+#define ETH_IPV6_HOP_LIMIT_ADDR 0xa090 /* 41104 */
+#define ETH_IPV6_HOP_LIMIT_MASK 0xff /* 255 */
+#define ETH_IPV6_HOP_LIMIT_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_HOP_LIMIT_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_HOP_LIMIT_DEFAULT 0x40 /* 64 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_31_0_ADDR 0xa094 /* 41108 */
+#define ETH_IPV6_SOURCE_ADD_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_31_0_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_31_0_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_63_32_ADDR 0xa098 /* 41112 */
+#define ETH_IPV6_SOURCE_ADD_63_32_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_63_32_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_63_32_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_63_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_95_64_ADDR 0xa09c /* 41116 */
+#define ETH_IPV6_SOURCE_ADD_95_64_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_95_64_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_95_64_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_95_64_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_127_96_ADDR 0xa0a0 /* 41120 */
+#define ETH_IPV6_SOURCE_ADD_127_96_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_127_96_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_127_96_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_127_96_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_31_0_ADDR 0xa0a4 /* 41124 */
+#define ETH_IPV6_DEST_ADD_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_31_0_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_31_0_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_63_32_ADDR 0xa0a8 /* 41128 */
+#define ETH_IPV6_DEST_ADD_63_32_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_63_32_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_63_32_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_63_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_95_64_ADDR 0xa0ac /* 41132 */
+#define ETH_IPV6_DEST_ADD_95_64_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_95_64_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_95_64_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_95_64_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_127_96_ADDR 0xa0b0 /* 41136 */
+#define ETH_IPV6_DEST_ADD_127_96_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_127_96_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_127_96_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_127_96_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_stats
+ * with prefix stats_ @ address 0xc000
+ *------------------------------------------------------------------------------
+ */
+/* Type = roSig */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_ADDR 0xc000 /* 49152 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_ADDR 0xc004 /* 49156 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_ADDR 0xc008 /* 49160 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_PACKETS_CNT_ADDR 0xc00c /* 49164 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_ADDR 0xc010 /* 49168 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_ADDR 0xc014 /* 49172 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_ADDR 0xc018 /* 49176 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_ADDR 0xc01c /* 49180 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_ADDR 0xc020 /* 49184 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_ADDR 0xc024 /* 49188 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_ADDR 0xc028 /* 49192 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_PKTS_RATE_ADDR 0xc02c /* 49196 */
+#define STATS_USER_DATA_RX_PKTS_RATE_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_PKTS_RATE_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_PKTS_RATE_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_PKTS_RATE_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_PKTS_RATE_ADDR 0xc030 /* 49200 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_DEFAULT 0x0 /* 0 */
diff --git a/drivers/staging/xroeframer/sysfs_xroe.c b/drivers/staging/xroeframer/sysfs_xroe.c
new file mode 100644
index 000000000000..9caf5e50b02f
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+/**
+ * version_show - Returns the block's revision number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the revision string
+ *
+ * Returns the block's major, minor & version revision numbers
+ * in a %d.%d.%d format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 major_rev;
+ u32 minor_rev;
+ u32 version_rev;
+
+ major_rev = utils_sysfs_show_wrapper(CFG_MAJOR_REVISION_ADDR,
+ CFG_MAJOR_REVISION_OFFSET,
+ CFG_MAJOR_REVISION_MASK, kobj);
+ minor_rev = utils_sysfs_show_wrapper(CFG_MINOR_REVISION_ADDR,
+ CFG_MINOR_REVISION_OFFSET,
+ CFG_MINOR_REVISION_MASK, kobj);
+ version_rev = utils_sysfs_show_wrapper(CFG_VERSION_REVISION_ADDR,
+ CFG_VERSION_REVISION_OFFSET,
+ CFG_VERSION_REVISION_MASK, kobj);
+ sprintf(buff, "%d.%d.%d\n", major_rev, minor_rev, version_rev);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * version_store - Writes to the framer version sysfs entry (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the revision string
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the framer version sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t version_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ return 0;
+}
+
+/**
+ * enable_show - Returns the framer's enable status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the enable status
+ *
+ * Reads and writes the framer's enable status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t enable_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 enable;
+
+ enable = utils_sysfs_show_wrapper(CFG_MASTER_INT_ENABLE_ADDR,
+ CFG_MASTER_INT_ENABLE_OFFSET,
+ CFG_MASTER_INT_ENABLE_MASK, kobj);
+ if (enable)
+ sprintf(buff, "true\n");
+ else
+ sprintf(buff, "false\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * version_store - Writes to the framer's enable status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the enable status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the framer's enable status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t enable_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 enable = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ enable = 1;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ enable = 0;
+ utils_sysfs_store_wrapper(CFG_MASTER_INT_ENABLE_ADDR,
+ CFG_MASTER_INT_ENABLE_OFFSET,
+ CFG_MASTER_INT_ENABLE_MASK, enable, kobj);
+ return xroe_size;
+}
+
+/**
+ * framer_restart_show - Returns the framer's restart status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ *
+ * Reads and writes the framer's restart status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t framer_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 restart;
+
+ restart = utils_sysfs_show_wrapper(FRAM_DISABLE_ADDR,
+ FRAM_DISABLE_OFFSET,
+ FRAM_DISABLE_MASK, kobj);
+ if (restart)
+ sprintf(buff, "true\n");
+
+ else
+ sprintf(buff, "false\n");
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * framer_restart_store - Writes to the framer's restart status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the framer's restart status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t framer_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ restart = 0x01;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ restart = 0x00;
+ utils_sysfs_store_wrapper(FRAM_DISABLE_ADDR, FRAM_DISABLE_OFFSET,
+ FRAM_DISABLE_MASK, restart, kobj);
+ return xroe_size;
+}
+
+/**
+ * deframer_restart_show - Returns the deframer's restart status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ *
+ * Reads and writes the deframer's restart status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t deframer_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 offset = DEFM_RESTART_OFFSET;
+ u32 mask = DEFM_RESTART_MASK;
+ u32 buffer = 0;
+ u32 restart = 0;
+ void __iomem *working_address = ((u8 *)lp->base_addr
+ + DEFM_RESTART_ADDR);
+
+ buffer = ioread32(working_address);
+ restart = (buffer & mask) >> offset;
+
+ if (restart)
+ sprintf(buff, "true\n");
+
+ else
+ sprintf(buff, "false\n");
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * deframer_restart_store - Writes to the deframer's restart status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the deframer's restart status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t deframer_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = DEFM_RESTART_OFFSET;
+ u32 mask = DEFM_RESTART_MASK;
+ void __iomem *working_address = ((u8 *)lp->base_addr
+ + DEFM_RESTART_ADDR);
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0) {
+ restart = 0x01;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ } else if (strncmp(xroe_tmp, "false", xroe_size) == 0) {
+ restart = 0x00;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ }
+
+ return xroe_size;
+}
+
+/**
+ * xxv_reset_show - Returns the XXV's reset status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ *
+ * Reads and writes the XXV's reset status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t xxv_reset_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 offset = CFG_USER_RW_OUT_OFFSET;
+ u32 mask = CFG_USER_RW_OUT_MASK;
+ u32 buffer = 0;
+ u32 restart = 0;
+ void __iomem *working_address = ((u8 *)lp->base_addr +
+ CFG_USER_RW_OUT_ADDR);
+
+ buffer = ioread32(working_address);
+ restart = (buffer & mask) >> offset;
+ if (restart)
+ sprintf(buff, "true\n");
+ else
+ sprintf(buff, "false\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * xxv_reset_store - Writes to the XXV's reset register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the XXV's reset status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t xxv_reset_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = CFG_USER_RW_OUT_OFFSET;
+ u32 mask = CFG_USER_RW_OUT_MASK;
+ void __iomem *working_address = ((u8 *)lp->base_addr +
+ CFG_USER_RW_OUT_ADDR);
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0) {
+ restart = 0x01;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ } else if (strncmp(xroe_tmp, "false", xroe_size) == 0) {
+ restart = 0x00;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ }
+ return xroe_size;
+}
+
+/**
+ * framing_show - Returns the current framing
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ *
+ * Reads and writes the current framing type to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t framing_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 offset = (DEFM_DATA_PKT_MESSAGE_TYPE_ADDR +
+ DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET);
+ u8 buffer = 0;
+ u8 framing = 0xff;
+ void __iomem *working_address = ((u8 *)lp->base_addr + offset);
+
+ buffer = ioread8(working_address);
+ framing = buffer;
+ if (framing == 0)
+ sprintf(buff, "eCPRI\n");
+ else if (framing == 1)
+ sprintf(buff, "1914.3\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * framing_store - Writes to the current framing register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the current framing
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t framing_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = (DEFM_DATA_PKT_MESSAGE_TYPE_ADDR +
+ DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET);
+ void __iomem *working_address = ((u8 *)lp->base_addr + offset);
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "eCPRI", xroe_size) == 0)
+ iowrite8(0, working_address);
+ else if (strncmp(xroe_tmp, "1914.3", xroe_size) == 0)
+ iowrite8(1, working_address);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, version_show, version_store);
+
+static struct kobj_attribute enable_attribute =
+ __ATTR(enable, 0660, enable_show, enable_store);
+
+static struct kobj_attribute framer_restart =
+ __ATTR(framer_restart, 0660, framer_restart_show, framer_restart_store);
+
+static struct kobj_attribute deframer_restart =
+ __ATTR(deframer_restart, 0660, deframer_restart_show,
+ deframer_restart_store);
+
+static struct kobj_attribute xxv_reset =
+ __ATTR(xxv_reset, 0660, xxv_reset_show, xxv_reset_store);
+
+static struct kobj_attribute framing_attribute =
+ __ATTR(framing, 0660, framing_show, framing_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &enable_attribute.attr,
+ &framer_restart.attr,
+ &deframer_restart.attr,
+ &xxv_reset.attr,
+ &framing_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *root_xroe_kobj;
+
+/**
+ * xroe_sysfs_init - Creates the xroe sysfs directory and entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs directory and entries, as well as the
+ * subdirectories for IPv4, IPv6 & UDP
+ */
+int xroe_sysfs_init(void)
+{
+ int ret;
+
+ root_xroe_kobj = kobject_create_and_add("xroe", kernel_kobj);
+ if (!root_xroe_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(root_xroe_kobj, &attr_group);
+ if (ret)
+ kobject_put(root_xroe_kobj);
+ ret = xroe_sysfs_ipv4_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_ipv6_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_udp_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_stats_init();
+ return ret;
+}
+
+/**
+ * xroe_sysfs_exit - Deletes the xroe sysfs directory and entries
+ *
+ * Deletes the xroe sysfs directory and entries, as well as the
+ * subdirectories for IPv4, IPv6 & UDP
+ *
+ */
+void xroe_sysfs_exit(void)
+{
+ int i;
+
+ xroe_sysfs_ipv4_exit();
+ xroe_sysfs_ipv6_exit();
+ xroe_sysfs_udp_exit();
+ xroe_sysfs_stats_exit();
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_eth_ports[i]);
+ kobject_put(kobj_framer);
+ kobject_put(root_xroe_kobj);
+}
+
+/**
+ * utils_write32withmask - Writes a masked 32-bit value
+ * @working_address: The starting address to write
+ * @value: The value to be written
+ * @mask: The mask to be used
+ * @offset: The offset from the provided starting address
+ *
+ * Writes a 32-bit value to the provided address with the input mask
+ *
+ * Return: 0 on success
+ */
+int utils_write32withmask(void __iomem *working_address, u32 value,
+ u32 mask, u32 offset)
+{
+ u32 read_register_value = 0;
+ u32 register_value_to_write = 0;
+ u32 delta = 0, buffer = 0;
+
+ read_register_value = ioread32(working_address);
+ buffer = (value << offset);
+ register_value_to_write = read_register_value & ~mask;
+ delta = buffer & mask;
+ register_value_to_write |= delta;
+ iowrite32(register_value_to_write, working_address);
+ return 0;
+}
+
+/**
+ * utils_sysfs_path_to_eth_port_num - Get the current ethernet port
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Extracts the number of the current ethernet port instance
+ *
+ * Return: The number of the ethernet port instance (0 - MAX_NUM_ETH_PORTS) on
+ * success, -1 otherwise
+ */
+static int utils_sysfs_path_to_eth_port_num(struct kobject *kobj)
+{
+ char *current_path = NULL;
+ int port;
+ int ret;
+
+ current_path = kobject_get_path(kobj, GFP_KERNEL);
+ ret = sscanf(current_path, "/kernel/xroe/framer/eth_port_%d/", &port);
+ /* if sscanf() returns 0, no fields were assigned, therefore no
+ * adjustments will be made for port number
+ */
+ if (ret == 0)
+ port = 0;
+// printk(KERN_ALERT "current_path: %s port: %d\n", current_path, port);
+ kfree(current_path);
+ return port;
+}
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @address: The address of the register to be written
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be written
+ * @value: The value to be written to the register
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Wraps the core functionality of all "store" functions of sysfs entries.
+ * After calculating the ethernet port number (in N/A cases, it's 0), the value
+ * is written to the designated register
+ *
+ */
+void utils_sysfs_store_wrapper(u32 address, u32 offset, u32 mask, u32 value,
+ struct kobject *kobj)
+{
+ int port;
+ void __iomem *working_address;
+
+ port = utils_sysfs_path_to_eth_port_num(kobj);
+ working_address = (void __iomem *)(lp->base_addr +
+ (address + (0x100 * port)));
+ utils_write32withmask(working_address, value, mask, offset);
+}
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @address: The address of the register to be read
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be read
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Wraps the core functionality of all "show" functions of sysfs entries.
+ * After calculating the ethernet port number (in N/A cases, it's 0), the value
+ * is read from the designated register and returned.
+ *
+ * Return: The value designated by the address, offset and mask
+ */
+u32 utils_sysfs_show_wrapper(u32 address, u32 offset, u32 mask,
+ struct kobject *kobj)
+{
+ int port;
+ void __iomem *working_address;
+ u32 buffer;
+
+ port = utils_sysfs_path_to_eth_port_num(kobj);
+ working_address = (void __iomem *)(lp->base_addr +
+ (address + (0x100 * port)));
+ buffer = ioread32(working_address);
+ return (buffer & mask) >> offset;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c
new file mode 100644
index 000000000000..aaaefb10c597
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+static void utils_ipv4addr_hextochar(u32 ip, unsigned char *bytes);
+static int utils_ipv4addr_chartohex(char *ip_addr, uint32_t *p_ip_addr);
+
+/**
+ * ipv4_version_show - Returns the IPv4 version number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 version number
+ *
+ * Returns the IPv4 version number
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 version;
+
+ version = utils_sysfs_show_wrapper(ETH_IPV4_VERSION_ADDR,
+ ETH_IPV4_VERSION_OFFSET,
+ ETH_IPV4_VERSION_MASK, kobj);
+ sprintf(buff, "%d\n", version);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_version_store - Writes to the IPv4 version number sysfs entry
+ * (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 version
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 version number sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t ipv4_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ return 0;
+}
+
+/**
+ * ipv4_ihl_show - Returns the IPv4 IHL
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 IHL
+ *
+ * Returns the IPv4 IHL
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ihl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 ihl;
+
+ ihl = utils_sysfs_show_wrapper(ETH_IPV4_IHL_ADDR, ETH_IPV4_IHL_OFFSET,
+ ETH_IPV4_IHL_MASK, kobj);
+ sprintf(buff, "%d\n", ihl);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ihl_store - Writes to the IPv4 IHL sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 IHL
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 IHL sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ihl_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ihl;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ihl);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_IHL_ADDR, ETH_IPV4_IHL_OFFSET,
+ ETH_IPV4_IHL_MASK, ihl, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_dscp_show - Returns the IPv4 DSCP
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 DSCP
+ *
+ * Returns the IPv4 DSCP
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_dscp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 dscp;
+
+ dscp = utils_sysfs_show_wrapper(ETH_IPV4_DSCP_ADDR,
+ ETH_IPV4_DSCP_OFFSET,
+ ETH_IPV4_DSCP_MASK, kobj);
+ sprintf(buff, "%d\n", dscp);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_dscp_store - Writes to the IPv4 DSCP sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 DSCP
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 DSCP sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_dscp_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 dscp;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &dscp);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_DSCP_ADDR, ETH_IPV4_DSCP_OFFSET,
+ ETH_IPV4_DSCP_MASK, dscp, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_ecn_show - Returns the IPv4 ECN
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ECN
+ *
+ * Returns the IPv4 ECN
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ecn_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 ecn;
+
+ ecn = utils_sysfs_show_wrapper(ETH_IPV4_ECN_ADDR, ETH_IPV4_ECN_OFFSET,
+ ETH_IPV4_ECN_MASK, kobj);
+ sprintf(buff, "%d\n", ecn);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ecn_store - Writes to the IPv4 ECN sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ECN
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 ECN sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ecn_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ecn;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ecn);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_ECN_ADDR, ETH_IPV4_ECN_OFFSET,
+ ETH_IPV4_ECN_MASK, ecn, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_id_show - Returns the IPv4 ID
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ID
+ *
+ * Returns the IPv4 ID
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_id_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 id;
+
+ id = utils_sysfs_show_wrapper(ETH_IPV4_ID_ADDR, ETH_IPV4_ID_OFFSET,
+ ETH_IPV4_ID_MASK, kobj);
+ sprintf(buff, "%d\n", id);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_id_store - Writes to the IPv4 ID sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ID
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 ID sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_id_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 id;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &id);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_ID_ADDR, ETH_IPV4_ID_OFFSET,
+ ETH_IPV4_ID_MASK, id, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_flags_show - Returns the IPv4 flags
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 flags
+ *
+ * Returns the IPv4 flags
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_flags_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 flags;
+
+ flags = utils_sysfs_show_wrapper(ETH_IPV4_FLAGS_ADDR,
+ ETH_IPV4_FLAGS_OFFSET,
+ ETH_IPV4_FLAGS_MASK, kobj);
+ sprintf(buff, "%d\n", flags);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_flags_store - Writes to the IPv4 flags sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 flags
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 flags sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_flags_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 flags;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &flags);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_FLAGS_ADDR, ETH_IPV4_FLAGS_OFFSET,
+ ETH_IPV4_FLAGS_MASK, flags, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_fragment_offset_show - Returns the IPv4 fragment offset
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 fragment offset
+ *
+ * Returns the IPv4 fragment offset
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_fragment_offset_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 fragment;
+
+ fragment = utils_sysfs_show_wrapper(ETH_IPV4_FRAGMENT_OFFSET_ADDR,
+ ETH_IPV4_FRAGMENT_OFFSET_OFFSET,
+ ETH_IPV4_FRAGMENT_OFFSET_MASK,
+ kobj);
+ sprintf(buff, "%d\n", fragment);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_fragment_offset_store - Writes to the IPv4 fragment offset sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 fragment offset
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 fragment offset sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_fragment_offset_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ int ret;
+ u32 fragment;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &fragment);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_FRAGMENT_OFFSET_ADDR,
+ ETH_IPV4_FRAGMENT_OFFSET_OFFSET,
+ ETH_IPV4_FRAGMENT_OFFSET_MASK, fragment,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_ttl_show - Returns the IPv4 TTL
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 TTL
+ *
+ * Returns the IPv4 TTL
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ttl_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 ttl;
+
+ ttl = utils_sysfs_show_wrapper(ETH_IPV4_TIME_TO_LIVE_ADDR,
+ ETH_IPV4_TIME_TO_LIVE_OFFSET,
+ ETH_IPV4_TIME_TO_LIVE_MASK, kobj);
+ sprintf(buff, "%d\n", ttl);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ttl_store - Writes to the IPv4 TTL sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 TTL
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 TTL sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ttl_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ttl;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ttl);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_TIME_TO_LIVE_ADDR,
+ ETH_IPV4_TIME_TO_LIVE_OFFSET,
+ ETH_IPV4_TIME_TO_LIVE_MASK, ttl, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_protocol_show - Returns the IPv4 protocol
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 protocol
+ *
+ * Returns the IPv4 protocol
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_protocol_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 protocol;
+
+ protocol = utils_sysfs_show_wrapper(ETH_IPV4_PROTOCOL_ADDR,
+ ETH_IPV4_PROTOCOL_OFFSET,
+ ETH_IPV4_PROTOCOL_MASK, kobj);
+ sprintf(buff, "%d\n", protocol);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_protocol_store - Writes to the IPv4 protocol sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 protocol
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 protocol sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_protocol_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 protocol;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &protocol);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_PROTOCOL_ADDR,
+ ETH_IPV4_PROTOCOL_OFFSET,
+ ETH_IPV4_PROTOCOL_MASK, protocol, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_source_address_show - Returns the IPv4 source address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ *
+ * Returns the IPv4 source address in x.x.x.x format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_source_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 source_add = 0;
+ unsigned char ip_addr_char[4];
+
+ source_add = utils_sysfs_show_wrapper(ETH_IPV4_SOURCE_ADD_ADDR,
+ ETH_IPV4_SOURCE_ADD_OFFSET,
+ ETH_IPV4_SOURCE_ADD_MASK, kobj);
+ utils_ipv4addr_hextochar(source_add, ip_addr_char);
+ sprintf(buff, "%d.%d.%d.%d\n", ip_addr_char[3], ip_addr_char[2],
+ ip_addr_char[1], ip_addr_char[0]);
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_source_address_store - Writes to the IPv4 source address sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 source address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_source_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 source_add = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv4addr_chartohex(xroe_tmp, &source_add) == 4)
+ utils_sysfs_store_wrapper(ETH_IPV4_SOURCE_ADD_ADDR,
+ ETH_IPV4_SOURCE_ADD_OFFSET,
+ ETH_IPV4_SOURCE_ADD_MASK, source_add,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_destination_address_show - Returns the IPv4 destination address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ *
+ * Returns the IPv4 destination address in x.x.x.x format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_destination_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 dest_add = 0;
+ unsigned char ip_addr_char[4];
+
+ dest_add = utils_sysfs_show_wrapper(ETH_IPV4_DESTINATION_ADD_ADDR,
+ ETH_IPV4_DESTINATION_ADD_OFFSET,
+ ETH_IPV4_DESTINATION_ADD_MASK,
+ kobj);
+ utils_ipv4addr_hextochar(dest_add, ip_addr_char);
+ sprintf(buff, "%d.%d.%d.%d\n", ip_addr_char[3], ip_addr_char[2],
+ ip_addr_char[1], ip_addr_char[0]);
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_destination_address_store - Writes to the IPv4 destination address
+ * sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 destination address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_destination_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 dest_add = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv4addr_chartohex(xroe_tmp, &dest_add) == 4)
+ utils_sysfs_store_wrapper(ETH_IPV4_DESTINATION_ADD_ADDR,
+ ETH_IPV4_DESTINATION_ADD_OFFSET,
+ ETH_IPV4_DESTINATION_ADD_MASK,
+ dest_add, kobj);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, ipv4_version_show, ipv4_version_store);
+static struct kobj_attribute ihl_attribute =
+ __ATTR(ihl, 0660, ipv4_ihl_show, ipv4_ihl_store);
+static struct kobj_attribute dscp_attribute =
+ __ATTR(dscp, 0660, ipv4_dscp_show, ipv4_dscp_store);
+static struct kobj_attribute ecn_attribute =
+ __ATTR(ecn, 0660, ipv4_ecn_show, ipv4_ecn_store);
+static struct kobj_attribute id_attribute =
+ __ATTR(id, 0660, ipv4_id_show, ipv4_id_store);
+static struct kobj_attribute flags_attribute =
+ __ATTR(flags, 0660, ipv4_flags_show, ipv4_flags_store);
+static struct kobj_attribute fragment_offset_attribute =
+ __ATTR(fragment_offset, 0660, ipv4_fragment_offset_show,
+ ipv4_fragment_offset_store);
+static struct kobj_attribute ttl_attribute =
+ __ATTR(ttl, 0660, ipv4_ttl_show, ipv4_ttl_store);
+static struct kobj_attribute protocol_attribute =
+ __ATTR(protocol, 0660, ipv4_protocol_show, ipv4_protocol_store);
+static struct kobj_attribute source_add_attribute =
+ __ATTR(source_add, 0660, ipv4_source_address_show,
+ ipv4_source_address_store);
+static struct kobj_attribute destination_add_attribute =
+ __ATTR(dest_add, 0660, ipv4_destination_address_show,
+ ipv4_destination_address_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &ihl_attribute.attr,
+ &dscp_attribute.attr,
+ &ecn_attribute.attr,
+ &id_attribute.attr,
+ &flags_attribute.attr,
+ &fragment_offset_attribute.attr,
+ &ttl_attribute.attr,
+ &protocol_attribute.attr,
+ &source_add_attribute.attr,
+ &destination_add_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *kobj_framer;
+static struct kobject *kobj_ipv4[MAX_NUM_ETH_PORTS];
+struct kobject *kobj_eth_ports[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_ipv4_init - Creates the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "ipv4" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_ipv4_init(void)
+{
+ int ret;
+ int i;
+ char eth_port_dir_name[11];
+
+ kobj_framer = kobject_create_and_add("framer", root_xroe_kobj);
+ if (!kobj_framer)
+ return -ENOMEM;
+ for (i = 0; i < 4; i++) {
+ snprintf(eth_port_dir_name, sizeof(eth_port_dir_name),
+ "eth_port_%d", i);
+ kobj_eth_ports[i] = kobject_create_and_add(eth_port_dir_name,
+ kobj_framer);
+ if (!kobj_eth_ports[i])
+ return -ENOMEM;
+ kobj_ipv4[i] = kobject_create_and_add("ipv4",
+ kobj_eth_ports[i]);
+ if (!kobj_ipv4[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_ipv4[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_ipv4[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv4_exit - Deletes the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "ipv4" subdirectory and entries,
+ * under the "xroe" entry
+ */
+void xroe_sysfs_ipv4_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_ipv4[i]);
+}
+
+/**
+ * utils_ipv4addr_hextochar - Integer to char array for IPv4 addresses
+ * @ip: The IP address in integer format
+ * @bytes: The IP address in a 4-byte array
+ *
+ * Coverts an IPv4 address given in unsigned integer format to a character array
+ */
+static void utils_ipv4addr_hextochar(u32 ip, unsigned char *bytes)
+{
+ bytes[0] = ip & 0xFF;
+ bytes[1] = (ip >> 8) & 0xFF;
+ bytes[2] = (ip >> 16) & 0xFF;
+ bytes[3] = (ip >> 24) & 0xFF;
+}
+
+/**
+ * utils_ipv4addr_chartohex - Character to char array for IPv4 addresses
+ * @ip_addr: The character array containing the IP address
+ * @p_ip_addr: The converted IPv4 address
+ *
+ * Coverts an IPv4 address given as a character array to integer format
+ *
+ * Return: 4 (the length of the resulting character array) on success,
+ * -1 in case of wrong input
+ */
+static int utils_ipv4addr_chartohex(char *ip_addr, uint32_t *p_ip_addr)
+{
+ int count = 0, ret = -1;
+ char *string;
+ unsigned char *found;
+ u32 byte_array[4];
+ u32 byte = 0;
+
+ string = ip_addr;
+ while ((found = (unsigned char *)strsep(&string, ".")) != NULL) {
+ if (count <= 4) {
+ ret = kstrtouint(found, 10, &byte);
+ if (ret)
+ return ret;
+ byte_array[count] = byte;
+ } else {
+ break;
+ }
+ count++;
+ }
+
+ if (count == 4) {
+ ret = count;
+ *p_ip_addr = byte_array[3] | (byte_array[2] << 8)
+ | (byte_array[1] << 16) | (byte_array[0] << 24);
+ }
+ return ret;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c
new file mode 100644
index 000000000000..c26eae426cc1
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 60 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+static void utils_ipv6addr_32to16(u32 *ip32, uint16_t *ip16);
+static int utils_ipv6addr_chartohex(char *ip_addr, uint32_t *p_ip_addr);
+
+/**
+ * ipv6_version_show - Returns the IPv6 version number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 version number
+ *
+ * Returns the IPv6 version number
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 version;
+
+ version = utils_sysfs_show_wrapper(ETH_IPV6_V_ADDR, ETH_IPV6_V_OFFSET,
+ ETH_IPV6_V_MASK, kobj);
+ sprintf(buff, "%d\n", version);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_version_store - Writes to the IPv6 version number sysfs entry
+ * (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 version
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 version number sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t ipv6_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ return 0;
+}
+
+/**
+ * ipv6_traffic_class_show - Returns the IPv6 traffic class
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 traffic class
+ *
+ * Returns the IPv6 traffic class
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_traffic_class_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 traffic_class;
+
+ traffic_class = utils_sysfs_show_wrapper(ETH_IPV6_TRAFFIC_CLASS_ADDR,
+ ETH_IPV6_TRAFFIC_CLASS_OFFSET,
+ ETH_IPV6_TRAFFIC_CLASS_MASK,
+ kobj);
+ sprintf(buff, "%d\n", traffic_class);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_traffic_class_store - Writes to the IPv6 traffic class
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 traffic class
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 traffic class sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_traffic_class_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 traffic_class;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &traffic_class);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_TRAFFIC_CLASS_ADDR,
+ ETH_IPV6_TRAFFIC_CLASS_OFFSET,
+ ETH_IPV6_TRAFFIC_CLASS_MASK, traffic_class,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_flow_label_show - Returns the IPv6 flow label
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 flow label
+ *
+ * Returns the IPv6 flow label
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_flow_label_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 flow_label;
+
+ flow_label = utils_sysfs_show_wrapper(ETH_IPV6_FLOW_LABEL_ADDR,
+ ETH_IPV6_FLOW_LABEL_OFFSET,
+ ETH_IPV6_FLOW_LABEL_MASK, kobj);
+ sprintf(buff, "%d\n", flow_label);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_flow_label_store - Writes to the IPv6 flow label
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 flow label
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 flow label sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_flow_label_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 flow_label;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &flow_label);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_FLOW_LABEL_ADDR,
+ ETH_IPV6_FLOW_LABEL_OFFSET,
+ ETH_IPV6_FLOW_LABEL_MASK, flow_label, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_next_header_show - Returns the IPv6 next header
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 next header
+ *
+ * Returns the IPv6 next header
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_next_header_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 next_header;
+
+ next_header = utils_sysfs_show_wrapper(ETH_IPV6_NEXT_HEADER_ADDR,
+ ETH_IPV6_NEXT_HEADER_OFFSET,
+ ETH_IPV6_NEXT_HEADER_MASK, kobj);
+ sprintf(buff, "%d\n", next_header);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_next_header_store - Writes to the IPv6 next header
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 next header
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 next header sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_next_header_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 next_header;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &next_header);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_NEXT_HEADER_ADDR,
+ ETH_IPV6_NEXT_HEADER_OFFSET,
+ ETH_IPV6_NEXT_HEADER_MASK, next_header, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_hop_limit_show - Returns the IPv6 hop limit
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 hop limit
+ *
+ * Returns the IPv6 hop limit
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_hop_limit_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 hop_limit;
+
+ hop_limit = utils_sysfs_show_wrapper(ETH_IPV6_HOP_LIMIT_ADDR,
+ ETH_IPV6_HOP_LIMIT_OFFSET,
+ ETH_IPV6_HOP_LIMIT_MASK, kobj);
+ sprintf(buff, "%d\n", hop_limit);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_hop_limit_store - Writes to the IPv6 hop limit
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 hop limit
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 hop limit sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_hop_limit_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ int ret;
+ u32 hop_limit;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &hop_limit);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_HOP_LIMIT_ADDR,
+ ETH_IPV6_HOP_LIMIT_OFFSET,
+ ETH_IPV6_HOP_LIMIT_MASK, hop_limit, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_source_address_show - Returns the IPv6 source address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ *
+ * Returns the IPv6 source address in xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx
+ * format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_source_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 source[4];
+ u16 source_add16[8];
+
+ source[0] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_31_0_ADDR,
+ ETH_IPV6_SOURCE_ADD_31_0_OFFSET,
+ ETH_IPV6_SOURCE_ADD_31_0_MASK,
+ kobj);
+ source[1] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_63_32_ADDR,
+ ETH_IPV6_SOURCE_ADD_63_32_OFFSET,
+ ETH_IPV6_SOURCE_ADD_63_32_MASK,
+ kobj);
+ source[2] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_95_64_ADDR,
+ ETH_IPV6_SOURCE_ADD_95_64_OFFSET,
+ ETH_IPV6_SOURCE_ADD_95_64_MASK,
+ kobj);
+ source[3] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_127_96_ADDR,
+ ETH_IPV6_SOURCE_ADD_127_96_OFFSET,
+ ETH_IPV6_SOURCE_ADD_127_96_MASK,
+ kobj);
+
+ utils_ipv6addr_32to16(source, source_add16);
+ sprintf(buff, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ source_add16[0], source_add16[1], source_add16[2],
+ source_add16[3],
+ source_add16[4], source_add16[5], source_add16[6],
+ source_add16[7]);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_source_address_store - Writes to the IPv6 source address sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 source address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_source_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 source_add[4];
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv6addr_chartohex(xroe_tmp, source_add) == 8) {
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_31_0_ADDR,
+ ETH_IPV6_SOURCE_ADD_31_0_OFFSET,
+ ETH_IPV6_SOURCE_ADD_31_0_MASK,
+ source_add[0], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_63_32_ADDR,
+ ETH_IPV6_SOURCE_ADD_63_32_OFFSET,
+ ETH_IPV6_SOURCE_ADD_63_32_MASK,
+ source_add[1], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_95_64_ADDR,
+ ETH_IPV6_SOURCE_ADD_95_64_OFFSET,
+ ETH_IPV6_SOURCE_ADD_95_64_MASK,
+ source_add[2], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_127_96_ADDR,
+ ETH_IPV6_SOURCE_ADD_127_96_OFFSET,
+ ETH_IPV6_SOURCE_ADD_127_96_MASK,
+ source_add[3], kobj);
+ }
+ return xroe_size;
+}
+
+/**
+ * ipv6_destination_address_show - Returns the IPv6 destination address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ *
+ * Returns the IPv6 destination address in
+ * xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_destination_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 dest[4];
+ u16 dest_add16[8];
+
+ dest[0] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_31_0_ADDR,
+ ETH_IPV6_DEST_ADD_31_0_OFFSET,
+ ETH_IPV6_DEST_ADD_31_0_MASK,
+ kobj);
+ dest[1] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_63_32_ADDR,
+ ETH_IPV6_DEST_ADD_63_32_OFFSET,
+ ETH_IPV6_DEST_ADD_63_32_MASK,
+ kobj);
+ dest[2] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_95_64_ADDR,
+ ETH_IPV6_DEST_ADD_95_64_OFFSET,
+ ETH_IPV6_DEST_ADD_95_64_MASK,
+ kobj);
+ dest[3] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_127_96_ADDR,
+ ETH_IPV6_DEST_ADD_127_96_OFFSET,
+ ETH_IPV6_DEST_ADD_127_96_MASK,
+ kobj);
+
+ utils_ipv6addr_32to16(dest, dest_add16);
+ sprintf(buff, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ dest_add16[0], dest_add16[1], dest_add16[2],
+ dest_add16[3],
+ dest_add16[4], dest_add16[5], dest_add16[6],
+ dest_add16[7]);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_destination_address_store - Writes to the IPv6 destination address
+ * sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 destination address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_destination_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 dest_add[4];
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv6addr_chartohex(xroe_tmp, dest_add) == 8) {
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_31_0_ADDR,
+ ETH_IPV6_DEST_ADD_31_0_OFFSET,
+ ETH_IPV6_DEST_ADD_31_0_MASK,
+ dest_add[0], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_63_32_ADDR,
+ ETH_IPV6_DEST_ADD_63_32_OFFSET,
+ ETH_IPV6_DEST_ADD_63_32_MASK,
+ dest_add[1], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_95_64_ADDR,
+ ETH_IPV6_DEST_ADD_95_64_OFFSET,
+ ETH_IPV6_DEST_ADD_95_64_MASK,
+ dest_add[2], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_127_96_ADDR,
+ ETH_IPV6_DEST_ADD_127_96_OFFSET,
+ ETH_IPV6_DEST_ADD_127_96_MASK,
+ dest_add[3], kobj);
+ }
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, ipv6_version_show, ipv6_version_store);
+static struct kobj_attribute traffic_class =
+ __ATTR(traffic_class, 0660, ipv6_traffic_class_show,
+ ipv6_traffic_class_store);
+static struct kobj_attribute flow_label =
+ __ATTR(flow_label, 0660, ipv6_flow_label_show, ipv6_flow_label_store);
+static struct kobj_attribute next_header =
+ __ATTR(next_header, 0660, ipv6_next_header_show,
+ ipv6_next_header_store);
+static struct kobj_attribute hop_limit =
+ __ATTR(hop_limit, 0660, ipv6_hop_limit_show, ipv6_hop_limit_store);
+static struct kobj_attribute source_add_attribute =
+ __ATTR(source_add, 0660, ipv6_source_address_show,
+ ipv6_source_address_store);
+static struct kobj_attribute dest_add_attribute =
+ __ATTR(dest_add, 0660, ipv6_destination_address_show,
+ ipv6_destination_address_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &traffic_class.attr,
+ &flow_label.attr,
+ &next_header.attr,
+ &hop_limit.attr,
+ &source_add_attribute.attr,
+ &dest_add_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *kobj_ipv6[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_ipv6_init - Creates the xroe sysfs "ipv6" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "ipv6" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_ipv6_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ kobj_ipv6[i] = kobject_create_and_add("ipv6",
+ kobj_eth_ports[i]);
+ if (!kobj_ipv6[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_ipv6[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_ipv6[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv4_exit - Deletes the xroe sysfs "ipv6" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "ipv6" subdirectory and entries,
+ * under the "xroe" entry
+ *
+ */
+void xroe_sysfs_ipv6_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_ipv6[i]);
+}
+
+/**
+ * utils_ipv6addr_32to16 - uint32_t to uint16_t for IPv6 addresses
+ * @ip32: The IPv6 address in uint32_t format
+ * @ip16: The IPv6 address in uint16_t format
+ *
+ * Coverts an IPv6 address given in uint32_t format to uint16_t
+ */
+static void utils_ipv6addr_32to16(u32 *ip32, uint16_t *ip16)
+{
+ ip16[0] = ip32[0] >> 16;
+ ip16[1] = ip32[0] & 0x0000FFFF;
+ ip16[2] = ip32[1] >> 16;
+ ip16[3] = ip32[1] & 0x0000FFFF;
+ ip16[4] = ip32[2] >> 16;
+ ip16[5] = ip32[2] & 0x0000FFFF;
+ ip16[6] = ip32[3] >> 16;
+ ip16[7] = ip32[3] & 0x0000FFFF;
+}
+
+/**
+ * utils_ipv6addr_chartohex - Character to char array for IPv6 addresses
+ * @ip_addr: The character array containing the IP address
+ * @p_ip_addr: The converted IPv4 address
+ *
+ * Coverts an IPv6 address given as a character array to integer format
+ *
+ * Return: 8 (the length of the resulting character array) on success,
+ * -1 in case of wrong input
+ */
+static int utils_ipv6addr_chartohex(char *ip_addr, uint32_t *p_ip_addr)
+{
+ int ret;
+ int count;
+ char *string;
+ unsigned char *found;
+ u16 ip_array_16[8];
+ u32 field;
+
+ ret = -1;
+ count = 0;
+ string = ip_addr;
+ while ((found = (unsigned char *)strsep(&string, ":")) != NULL) {
+ if (count <= 8) {
+ ret = kstrtouint(found, 16, &field);
+ if (ret)
+ return ret;
+ ip_array_16[count] = (uint16_t)field;
+ } else {
+ break;
+ }
+ count++;
+ }
+ if (count == 8) {
+ p_ip_addr[0] = ip_array_16[1] | (ip_array_16[0] << 16);
+ p_ip_addr[1] = ip_array_16[3] | (ip_array_16[2] << 16);
+ p_ip_addr[2] = ip_array_16[5] | (ip_array_16[4] << 16);
+ p_ip_addr[3] = ip_array_16[7] | (ip_array_16[6] << 16);
+ ret = count;
+ }
+ return ret;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c b/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c
new file mode 100644
index 000000000000..063664bb987a
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+/**
+ * total_rx_good_pkt_show - Returns the total good rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_GOOD_PKT_CNT_ADDR,
+ STATS_TOTAL_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_TOTAL_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_pkt_show - Returns the total bad rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_BAD_PKT_CNT_ADDR,
+ STATS_TOTAL_RX_BAD_PKT_CNT_OFFSET,
+ STATS_TOTAL_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_fcs_show - Returns the total bad fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_BAD_FCS_CNT_ADDR,
+ STATS_TOTAL_RX_BAD_FCS_CNT_OFFSET,
+ STATS_TOTAL_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_user_pkt_show - Returns the total user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_PACKETS_CNT_ADDR,
+ STATS_USER_DATA_RX_PACKETS_CNT_OFFSET,
+ STATS_USER_DATA_RX_PACKETS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_good_user_pkt_show - Returns the total good user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_GOOD_PKT_CNT_ADDR,
+ STATS_USER_DATA_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_USER_DATA_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_pkt_show - Returns the total bad user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_BAD_PKT_CNT_ADDR,
+ STATS_USER_DATA_RX_BAD_PKT_CNT_OFFSET,
+ STATS_USER_DATA_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_fcs_show - Returns the total bad user rx fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_BAD_FCS_CNT_ADDR,
+ STATS_USER_DATA_RX_BAD_FCS_CNT_OFFSET,
+ STATS_USER_DATA_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_user_ctrl_pkt_show - Returns the total user rx control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_PACKETS_CNT_ADDR,
+ STATS_USER_CTRL_RX_PACKETS_CNT_OFFSET,
+ STATS_USER_CTRL_RX_PACKETS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_good_user_ctrl_pkt_show - Returns the total good user rx
+ * control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_GOOD_PKT_CNT_ADDR,
+ STATS_USER_CTRL_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_USER_CTRL_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_ctrl_pkt_show - Returns the total bad user rx
+ * control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_BAD_PKT_CNT_ADDR,
+ STATS_USER_CTRL_RX_BAD_PKT_CNT_OFFSET,
+ STATS_USER_CTRL_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_ctrl_fcs_show - Returns the total bad user rx
+ * control fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user control frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_ctrl_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_BAD_FCS_CNT_ADDR,
+ STATS_USER_CTRL_RX_BAD_FCS_CNT_OFFSET,
+ STATS_USER_CTRL_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * rx_user_pkt_rate_show - Returns the rate of user packets
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: Returns the rate of user packets
+ */
+static ssize_t rx_user_pkt_rate_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 rate;
+
+ rate = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_PKTS_RATE_ADDR,
+ STATS_USER_DATA_RX_PKTS_RATE_OFFSET,
+ STATS_USER_DATA_RX_PKTS_RATE_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", rate);
+}
+
+/**
+ * rx_user_ctrl_pkt_rate_show - Returns the rate of user control packets
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: Returns the rate of user control packets
+ */
+static ssize_t rx_user_ctrl_pkt_rate_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 rate;
+
+ rate = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_PKTS_RATE_ADDR,
+ STATS_USER_CTRL_RX_PKTS_RATE_OFFSET,
+ STATS_USER_CTRL_RX_PKTS_RATE_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", rate);
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+static struct kobj_attribute total_rx_good_pkt_attribute =
+ __ATTR(total_rx_good_pkt, 0444, total_rx_good_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_pkt_attribute =
+ __ATTR(total_rx_bad_pkt, 0444, total_rx_bad_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_fcs_attribute =
+ __ATTR(total_rx_bad_fcs, 0444, total_rx_bad_fcs_show, NULL);
+static struct kobj_attribute total_rx_user_pkt_attribute =
+ __ATTR(total_rx_user_pkt, 0444, total_rx_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_good_user_pkt_attribute =
+ __ATTR(total_rx_good_user_pkt, 0444, total_rx_good_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_pkt_attribute =
+ __ATTR(total_rx_bad_user_pkt, 0444, total_rx_bad_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_fcs_attribute =
+ __ATTR(total_rx_bad_user_fcs, 0444, total_rx_bad_user_fcs_show, NULL);
+static struct kobj_attribute total_rx_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_user_ctrl_pkt, 0444, total_rx_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_good_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_good_user_ctrl_pkt, 0444,
+ total_rx_good_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_bad_user_ctrl_pkt, 0444,
+ total_rx_bad_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_ctrl_fcs_attribute =
+ __ATTR(total_rx_bad_user_ctrl_fcs, 0444,
+ total_rx_bad_user_ctrl_fcs_show, NULL);
+static struct kobj_attribute rx_user_pkt_rate_attribute =
+ __ATTR(rx_user_pkt_rate, 0444, rx_user_pkt_rate_show, NULL);
+static struct kobj_attribute rx_user_ctrl_pkt_rate_attribute =
+ __ATTR(rx_user_ctrl_pkt_rate, 0444, rx_user_ctrl_pkt_rate_show, NULL);
+
+static struct attribute *attrs[] = {
+ &total_rx_good_pkt_attribute.attr,
+ &total_rx_bad_pkt_attribute.attr,
+ &total_rx_bad_fcs_attribute.attr,
+ &total_rx_user_pkt_attribute.attr,
+ &total_rx_good_user_pkt_attribute.attr,
+ &total_rx_bad_user_pkt_attribute.attr,
+ &total_rx_bad_user_fcs_attribute.attr,
+ &total_rx_user_ctrl_pkt_attribute.attr,
+ &total_rx_good_user_ctrl_pkt_attribute.attr,
+ &total_rx_bad_user_ctrl_pkt_attribute.attr,
+ &total_rx_bad_user_ctrl_fcs_attribute.attr,
+ &rx_user_pkt_rate_attribute.attr,
+ &rx_user_ctrl_pkt_rate_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *kobj_stats;
+
+/**
+ * xroe_sysfs_stats_init - Creates the xroe sysfs "stats" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "stats" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_stats_init(void)
+{
+ int ret;
+
+ kobj_stats = kobject_create_and_add("stats", root_xroe_kobj);
+ if (!kobj_stats)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(kobj_stats, &attr_group);
+ if (ret)
+ kobject_put(kobj_stats);
+
+ return ret;
+}
+
+/**
+ * xroe_sysfs_stats_exit - Deletes the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "stats" subdirectory and entries,
+ * under the "xroe" entry
+ */
+void xroe_sysfs_stats_exit(void)
+{
+ kobject_put(kobj_stats);
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c b/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c
new file mode 100644
index 000000000000..8f8a77b25da7
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+
+/**
+ * udp_source_port_show - Returns the UDP source port
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP source port
+ *
+ * Returns the UDP source port
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t udp_source_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 source_port;
+
+ source_port = utils_sysfs_show_wrapper(ETH_UDP_SOURCE_PORT_ADDR,
+ ETH_UDP_SOURCE_PORT_OFFSET,
+ ETH_UDP_SOURCE_PORT_MASK, kobj);
+ sprintf(buff, "%d\n", source_port);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * udp_source_port_store - Writes to the UDP source port sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP source port
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the UDP source port sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t udp_source_port_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 source_port;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &source_port);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_UDP_SOURCE_PORT_ADDR,
+ ETH_UDP_SOURCE_PORT_OFFSET,
+ ETH_UDP_SOURCE_PORT_MASK, source_port, kobj);
+ return xroe_size;
+}
+
+/**
+ * udp_destination_port_show - Returns the UDP destination port
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP destination port
+ *
+ * Returns the UDP destination port
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t udp_destination_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 dest_port;
+
+ dest_port = utils_sysfs_show_wrapper(ETH_UDP_DESTINATION_PORT_ADDR,
+ ETH_UDP_DESTINATION_PORT_OFFSET,
+ ETH_UDP_DESTINATION_PORT_MASK,
+ kobj);
+ sprintf(buff, "%d\n", dest_port);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * udp_destination_port_store - Writes to the UDP destination port sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP destination port
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the UDP destination port sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t udp_destination_port_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 dest_port;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &dest_port);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_UDP_DESTINATION_PORT_ADDR,
+ ETH_UDP_DESTINATION_PORT_OFFSET,
+ ETH_UDP_DESTINATION_PORT_MASK, dest_port,
+ kobj);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute source_port =
+ __ATTR(source_port, 0660, udp_source_port_show,
+ udp_source_port_store);
+static struct kobj_attribute dest_port =
+ __ATTR(dest_port, 0660, udp_destination_port_show,
+ udp_destination_port_store);
+
+static struct attribute *attrs[] = {
+ &source_port.attr,
+ &dest_port.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *kobj_udp[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_udp_init - Creates the xroe sysfs "udp" subdirectory and entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "udp" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_udp_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ kobj_udp[i] = kobject_create_and_add("udp", kobj_eth_ports[i]);
+ if (!kobj_udp[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_udp[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_udp[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv6_exit - Deletes the xroe sysfs "udp" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "udp" subdirectory and entries,
+ * under the "xroe" entry
+ *
+ */
+void xroe_sysfs_udp_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_udp[i]);
+}
diff --git a/drivers/staging/xroeframer/xroe_framer.c b/drivers/staging/xroeframer/xroe_framer.c
new file mode 100644
index 000000000000..dba7c69b010f
--- /dev/null
+++ b/drivers/staging/xroeframer/xroe_framer.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "xroe_framer.h"
+
+#define DRIVER_NAME "framer"
+
+/*
+ * TODO: to be made static as well, so that multiple instances can be used. As
+ * of now, the "lp" structure is shared among the multiple source files
+ */
+struct framer_local *lp;
+static struct platform_driver framer_driver;
+/*
+ * TODO: placeholder for the IRQ once it's been implemented
+ * in the framer block
+ */
+static irqreturn_t framer_irq(int irq, void *lp)
+{
+ return IRQ_HANDLED;
+}
+
+/**
+ * framer_probe - Probes the device tree to locate the framer block
+ * @pdev: The structure containing the device's details
+ *
+ * Probes the device tree to locate the framer block and maps it to
+ * the kernel virtual memory space
+ *
+ * Return: 0 on success or a negative errno on error.
+ */
+static int framer_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem; /* IO mem resources */
+ struct resource *r_irq;
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+
+ dev_dbg(dev, "Device Tree Probing\n");
+ lp = devm_kzalloc(&pdev->dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ /* Get iospace for the device */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->base_addr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ dev_set_drvdata(dev, lp);
+ xroe_sysfs_init();
+ /* Get IRQ for the device */
+ /*
+ * TODO: No IRQ *yet* in the DT from the framer block, as it's still
+ * under development. To be added once it's in the block, and also
+ * replace with platform_get_irq_byname()
+ */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (IS_ERR(r_irq)) {
+ dev_info(dev, "no IRQ found\n");
+ /*
+ * TODO: Return non-zero (error) code on no IRQ found.
+ * To be implemented once the IRQ is in the block
+ */
+ return 0;
+ }
+ rc = devm_request_irq(dev, lp->irq, &framer_irq, 0, DRIVER_NAME, lp);
+ if (rc) {
+ dev_err(dev, "testmodule: Could not allocate interrupt %d.\n",
+ lp->irq);
+ /*
+ * TODO: Return non-zero (error) code on no IRQ found.
+ * To be implemented once the IRQ is in the block
+ */
+ return 0;
+ }
+
+ return rc;
+}
+
+/**
+ * framer_init - Registers the driver
+ *
+ * Return: 0 on success, -1 on allocation error
+ *
+ * Registers the framer driver and creates character device drivers
+ * for the whole block, as well as separate ones for stats and
+ * radio control.
+ */
+static int __init framer_init(void)
+{
+ int ret;
+
+ pr_debug("XROE framer driver init\n");
+
+ ret = platform_driver_register(&framer_driver);
+
+ return ret;
+}
+
+/**
+ * framer_exit - Destroys the driver
+ *
+ * Unregisters the framer driver and destroys the character
+ * device driver for the whole block, as well as the separate ones
+ * for stats and radio control. Returns 0 upon successful execution
+ */
+static void __exit framer_exit(void)
+{
+ xroe_sysfs_exit();
+ platform_driver_unregister(&framer_driver);
+ pr_info("XROE Framer exit\n");
+}
+
+module_init(framer_init);
+module_exit(framer_exit);
+
+static const struct of_device_id framer_of_match[] = {
+ { .compatible = "xlnx,roe-framer-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, framer_of_match);
+
+static struct platform_driver framer_driver = {
+ .driver = {
+ /*
+ * TODO: .name shouldn't be necessary, though removing
+ * it results in kernel panic. To investigate further
+ */
+ .name = DRIVER_NAME,
+ .of_match_table = framer_of_match,
+ },
+ .probe = framer_probe,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("framer - Xilinx Radio over Ethernet Framer driver");
diff --git a/drivers/staging/xroeframer/xroe_framer.h b/drivers/staging/xroeframer/xroe_framer.h
new file mode 100644
index 000000000000..03b8bb39095c
--- /dev/null
+++ b/drivers/staging/xroeframer/xroe_framer.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include "roe_framer_ctrl.h"
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/stat.h> /* S_IRUSR, S_IWUSR */
+
+/* TODO: Remove hardcoded value of number of Ethernet ports and read the value
+ * from the device tree.
+ */
+#define MAX_NUM_ETH_PORTS 0x4
+/* TODO: to be made static as well, so that multiple instances can be used. As
+ * of now, the following 3 structures are shared among the multiple
+ * source files
+ */
+extern struct framer_local *lp;
+extern struct kobject *root_xroe_kobj;
+extern struct kobject *kobj_framer;
+extern struct kobject *kobj_eth_ports[MAX_NUM_ETH_PORTS];
+struct framer_local {
+ int irq;
+ unsigned long mem_start;
+ unsigned long mem_end;
+ void __iomem *base_addr;
+};
+
+int xroe_sysfs_init(void);
+int xroe_sysfs_ipv4_init(void);
+int xroe_sysfs_ipv6_init(void);
+int xroe_sysfs_udp_init(void);
+int xroe_sysfs_stats_init(void);
+void xroe_sysfs_exit(void);
+void xroe_sysfs_ipv4_exit(void);
+void xroe_sysfs_ipv6_exit(void);
+void xroe_sysfs_udp_exit(void);
+void xroe_sysfs_stats_exit(void);
+int utils_write32withmask(void __iomem *working_address, u32 value,
+ u32 mask, u32 offset);
+int utils_check_address_offset(u32 offset, size_t device_size);
+void utils_sysfs_store_wrapper(u32 address, u32 offset, u32 mask, u32 value,
+ struct kobject *kobj);
+u32 utils_sysfs_show_wrapper(u32 address, u32 offset, u32 mask,
+ struct kobject *kobj);
diff --git a/drivers/staging/xroetrafficgen/Kconfig b/drivers/staging/xroetrafficgen/Kconfig
new file mode 100644
index 000000000000..d2ead1483408
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Xilinx Radio over Ethernet Traffic Generator driver
+#
+
+config XROE_TRAFFIC_GEN
+ tristate "Xilinx Radio over Ethernet Traffic Generator driver"
+ help
+ The Traffic Generator is used for in testing of other RoE IP Blocks
+ (currenty the XRoE Framer) and simulates an radio antenna interface.
+ It generates rolling rampdata for eCPRI antenna paths.
+ Each path is tagged with the antenna number. The sink locks to this
+ ramp data, then checks the next value is as expected.
diff --git a/drivers/staging/xroetrafficgen/Makefile b/drivers/staging/xroetrafficgen/Makefile
new file mode 100644
index 000000000000..e180a9bbc589
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Radio over Ethernet Framer driver
+#
+obj-$(XROE_TRAFFIC_GEN) := xroe_traffic_gen.o
+
+framer-objs := xroe-traffic-gen.o \
+ xroe-traffic-gen-sysfs.o \
diff --git a/drivers/staging/xroetrafficgen/README b/drivers/staging/xroetrafficgen/README
new file mode 100644
index 000000000000..1828426af847
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/README
@@ -0,0 +1,19 @@
+Xilinx Radio over Ethernet Traffic Generator driver
+===================================================
+
+About the RoE Framer Traffic Generator
+
+The Traffic Generator is used for in testing of other RoE IP Blocks (currenty
+the XRoE Framer) and simulates an radio antenna interface. It generates rolling
+rampdata for eCPRI antenna paths. Each path is tagged with the antenna number.
+The sink locks to this ramp data, then checks the next value is as expected.
+
+
+About the Linux Driver
+
+The RoE Traffic Generator Linux Driver provides sysfs access to control a
+simulated radio antenna interface.
+The loading of the driver to the hardware is possible using Device Tree binding
+(see "dt-binding.txt" for more information). When the driver is loaded, the
+general controls (such as sink lock, enable, loopback etc) are exposed
+under /sys/kernel/xroetrafficgen.
diff --git a/drivers/staging/xroetrafficgen/roe_radio_ctrl.h b/drivers/staging/xroetrafficgen/roe_radio_ctrl.h
new file mode 100644
index 000000000000..e093386f3e94
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/roe_radio_ctrl.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+/*-----------------------------------------------------------------------------
+ * C Header bank BASE definitions
+ *-----------------------------------------------------------------------------
+ */
+#define ROE_RADIO_CFG_BASE_ADDR 0x0
+#define ROE_RADIO_SOURCE_BASE_ADDR 0x1000
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_radio_cfg
+ * with prefix radio_ @ address 0x0
+ *-----------------------------------------------------------------------------
+ */
+/* Type = roInt */
+#define RADIO_ID_ADDR 0x0
+#define RADIO_ID_MASK 0xffffffff
+#define RADIO_ID_OFFSET 0x0
+#define RADIO_ID_WIDTH 0x20
+#define RADIO_ID_DEFAULT 0x120001
+
+/* Type = rw */
+#define RADIO_TIMEOUT_ENABLE_ADDR 0x4
+#define RADIO_TIMEOUT_ENABLE_MASK 0x1
+#define RADIO_TIMEOUT_ENABLE_OFFSET 0x0
+#define RADIO_TIMEOUT_ENABLE_WIDTH 0x1
+#define RADIO_TIMEOUT_ENABLE_DEFAULT 0x0
+
+/* Type = ro */
+#define RADIO_TIMEOUT_STATUS_ADDR 0x8
+#define RADIO_TIMEOUT_STATUS_MASK 0x1
+#define RADIO_TIMEOUT_STATUS_OFFSET 0x0
+#define RADIO_TIMEOUT_STATUS_WIDTH 0x1
+#define RADIO_TIMEOUT_STATUS_DEFAULT 0x1
+
+/* Type = rw */
+#define RADIO_TIMEOUT_VALUE_ADDR 0xc
+#define RADIO_TIMEOUT_VALUE_MASK 0xfff
+#define RADIO_TIMEOUT_VALUE_OFFSET 0x0
+#define RADIO_TIMEOUT_VALUE_WIDTH 0xc
+#define RADIO_TIMEOUT_VALUE_DEFAULT 0x80
+
+/* Type = rw */
+#define RADIO_GPIO_CDC_LEDMODE2_ADDR 0x10
+#define RADIO_GPIO_CDC_LEDMODE2_MASK 0x1
+#define RADIO_GPIO_CDC_LEDMODE2_OFFSET 0x0
+#define RADIO_GPIO_CDC_LEDMODE2_WIDTH 0x1
+#define RADIO_GPIO_CDC_LEDMODE2_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_GPIO_CDC_LEDGPIO_ADDR 0x10
+#define RADIO_GPIO_CDC_LEDGPIO_MASK 0x30
+#define RADIO_GPIO_CDC_LEDGPIO_OFFSET 0x4
+#define RADIO_GPIO_CDC_LEDGPIO_WIDTH 0x2
+#define RADIO_GPIO_CDC_LEDGPIO_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_GPIO_CDC_DIPSTATUS_ADDR 0x14
+#define RADIO_GPIO_CDC_DIPSTATUS_MASK 0xff
+#define RADIO_GPIO_CDC_DIPSTATUS_OFFSET 0x0
+#define RADIO_GPIO_CDC_DIPSTATUS_WIDTH 0x8
+#define RADIO_GPIO_CDC_DIPSTATUS_DEFAULT 0x0
+
+/* Type = wPlsH */
+#define RADIO_SW_TRIGGER_ADDR 0x20
+#define RADIO_SW_TRIGGER_MASK 0x1
+#define RADIO_SW_TRIGGER_OFFSET 0x0
+#define RADIO_SW_TRIGGER_WIDTH 0x1
+#define RADIO_SW_TRIGGER_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_CDC_ENABLE_ADDR 0x24
+#define RADIO_CDC_ENABLE_MASK 0x1
+#define RADIO_CDC_ENABLE_OFFSET 0x0
+#define RADIO_CDC_ENABLE_WIDTH 0x1
+#define RADIO_CDC_ENABLE_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_ADDR 0x24
+#define RADIO_CDC_ERROR_MASK 0x2
+#define RADIO_CDC_ERROR_OFFSET 0x1
+#define RADIO_CDC_ERROR_WIDTH 0x1
+#define RADIO_CDC_ERROR_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_ADDR 0x24
+#define RADIO_CDC_STATUS_MASK 0x4
+#define RADIO_CDC_STATUS_OFFSET 0x2
+#define RADIO_CDC_STATUS_WIDTH 0x1
+#define RADIO_CDC_STATUS_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_CDC_LOOPBACK_ADDR 0x28
+#define RADIO_CDC_LOOPBACK_MASK 0x1
+#define RADIO_CDC_LOOPBACK_OFFSET 0x0
+#define RADIO_CDC_LOOPBACK_WIDTH 0x1
+#define RADIO_CDC_LOOPBACK_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_SINK_ENABLE_ADDR 0x2c
+#define RADIO_SINK_ENABLE_MASK 0x1
+#define RADIO_SINK_ENABLE_OFFSET 0x0
+#define RADIO_SINK_ENABLE_WIDTH 0x1
+#define RADIO_SINK_ENABLE_DEFAULT 0x1
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_31_0_ADDR 0x30
+#define RADIO_CDC_ERROR_31_0_MASK 0xffffffff
+#define RADIO_CDC_ERROR_31_0_OFFSET 0x0
+#define RADIO_CDC_ERROR_31_0_WIDTH 0x20
+#define RADIO_CDC_ERROR_31_0_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_63_32_ADDR 0x34
+#define RADIO_CDC_ERROR_63_32_MASK 0xffffffff
+#define RADIO_CDC_ERROR_63_32_OFFSET 0x0
+#define RADIO_CDC_ERROR_63_32_WIDTH 0x20
+#define RADIO_CDC_ERROR_63_32_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_95_64_ADDR 0x38
+#define RADIO_CDC_ERROR_95_64_MASK 0xffffffff
+#define RADIO_CDC_ERROR_95_64_OFFSET 0x0
+#define RADIO_CDC_ERROR_95_64_WIDTH 0x20
+#define RADIO_CDC_ERROR_95_64_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_127_96_ADDR 0x3c
+#define RADIO_CDC_ERROR_127_96_MASK 0xffffffff
+#define RADIO_CDC_ERROR_127_96_OFFSET 0x0
+#define RADIO_CDC_ERROR_127_96_WIDTH 0x20
+#define RADIO_CDC_ERROR_127_96_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_31_0_ADDR 0x40
+#define RADIO_CDC_STATUS_31_0_MASK 0xffffffff
+#define RADIO_CDC_STATUS_31_0_OFFSET 0x0
+#define RADIO_CDC_STATUS_31_0_WIDTH 0x20
+#define RADIO_CDC_STATUS_31_0_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_63_32_ADDR 0x44
+#define RADIO_CDC_STATUS_63_32_MASK 0xffffffff
+#define RADIO_CDC_STATUS_63_32_OFFSET 0x0
+#define RADIO_CDC_STATUS_63_32_WIDTH 0x20
+#define RADIO_CDC_STATUS_63_32_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_95_64_ADDR 0x48
+#define RADIO_CDC_STATUS_95_64_MASK 0xffffffff
+#define RADIO_CDC_STATUS_95_64_OFFSET 0x0
+#define RADIO_CDC_STATUS_95_64_WIDTH 0x20
+#define RADIO_CDC_STATUS_95_64_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_127_96_ADDR 0x4c
+#define RADIO_CDC_STATUS_127_96_MASK 0xffffffff
+#define RADIO_CDC_STATUS_127_96_OFFSET 0x0
+#define RADIO_CDC_STATUS_127_96_WIDTH 0x20
+#define RADIO_CDC_STATUS_127_96_DEFAULT 0x0
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_radio_source
+ * with prefix fram_ @ address 0x1000
+ *-----------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define FRAM_PACKET_DATA_SIZE_ADDR 0x1000
+#define FRAM_PACKET_DATA_SIZE_MASK 0x7f
+#define FRAM_PACKET_DATA_SIZE_OFFSET 0x0
+#define FRAM_PACKET_DATA_SIZE_WIDTH 0x7
+#define FRAM_PACKET_DATA_SIZE_DEFAULT 0x0
+
+/* Type = rwpdef */
+#define FRAM_PAUSE_DATA_SIZE_ADDR 0x1004
+#define FRAM_PAUSE_DATA_SIZE_MASK 0x7f
+#define FRAM_PAUSE_DATA_SIZE_OFFSET 0x0
+#define FRAM_PAUSE_DATA_SIZE_WIDTH 0x7
+#define FRAM_PAUSE_DATA_SIZE_DEFAULT 0x0
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c b/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c
new file mode 100644
index 000000000000..c9b05866fd78
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "roe_radio_ctrl.h"
+#include "xroe-traffic-gen.h"
+
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @dev: The structure containing the device's information
+ * @address: The address of the register to be written
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be written
+ * @value: The value to be written to the register
+ *
+ * Wraps the core functionality of all "store" functions of sysfs entries.
+ */
+static void utils_sysfs_store_wrapper(struct device *dev, u32 address,
+ u32 offset, u32 mask, u32 value)
+{
+ void __iomem *working_address;
+ u32 read_register_value = 0;
+ u32 register_value_to_write = 0;
+ u32 delta = 0;
+ u32 buffer = 0;
+ struct xroe_traffic_gen_local *lp = dev_get_drvdata(dev);
+
+ working_address = (void __iomem *)(lp->base_addr + address);
+ read_register_value = ioread32(working_address);
+ buffer = (value << offset);
+ register_value_to_write = read_register_value & ~mask;
+ delta = buffer & mask;
+ register_value_to_write |= delta;
+ iowrite32(register_value_to_write, working_address);
+}
+
+/**
+ * utils_sysfs_show_wrapper - Wraps the "show" function for sysfs entries
+ * @dev: The structure containing the device's information
+ * @address: The address of the register to be read
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be read
+ *
+ * Wraps the core functionality of all "show" functions of sysfs entries.
+ *
+ * Return: The value designated by the address, offset and mask
+ */
+static u32 utils_sysfs_show_wrapper(struct device *dev, u32 address, u32 offset,
+ u32 mask)
+{
+ void __iomem *working_address;
+ u32 buffer;
+ struct xroe_traffic_gen_local *lp = dev_get_drvdata(dev);
+
+ working_address = (void __iomem *)(lp->base_addr + address);
+ buffer = ioread32(working_address);
+ return (buffer & mask) >> offset;
+}
+
+/**
+ * radio_id_show - Returns the block's ID number
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's ID (0x1179649 by default)
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_id;
+
+ radio_id = utils_sysfs_show_wrapper(dev, RADIO_ID_ADDR,
+ RADIO_ID_OFFSET,
+ RADIO_ID_MASK);
+ return sprintf(buf, "%d\n", radio_id);
+}
+static DEVICE_ATTR_RO(radio_id);
+
+/**
+ * timeout_enable_show - Returns the traffic gen's timeout enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's timeout enable status to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout_enable;
+
+ timeout_enable = utils_sysfs_show_wrapper(dev,
+ RADIO_TIMEOUT_ENABLE_ADDR,
+ RADIO_TIMEOUT_ENABLE_OFFSET,
+ RADIO_TIMEOUT_ENABLE_MASK);
+ return sprintf(buf, "%d\n", timeout_enable);
+}
+
+/**
+ * timeout_enable_store - Writes to the traffic gens's timeout enable
+ * status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's timeout enable
+ * status to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t timeout_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 enable = 0;
+
+ strncpy(xroe_tmp, buf, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ enable = 1;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ enable = 0;
+ utils_sysfs_store_wrapper(dev, RADIO_TIMEOUT_ENABLE_ADDR,
+ RADIO_TIMEOUT_ENABLE_OFFSET,
+ RADIO_TIMEOUT_ENABLE_MASK, enable);
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_enable);
+
+/**
+ * timeout_status_show - Returns the timeout status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's timeout status (0x1 by default)
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout;
+
+ timeout = utils_sysfs_show_wrapper(dev, RADIO_TIMEOUT_STATUS_ADDR,
+ RADIO_TIMEOUT_STATUS_OFFSET,
+ RADIO_TIMEOUT_STATUS_MASK);
+ return sprintf(buf, "%d\n", timeout);
+}
+static DEVICE_ATTR_RO(timeout_status);
+
+/**
+ * timeout_enable_show - Returns the traffic gen's timeout value
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's timeout value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout_value;
+
+ timeout_value = utils_sysfs_show_wrapper(dev, RADIO_TIMEOUT_VALUE_ADDR,
+ RADIO_TIMEOUT_VALUE_OFFSET,
+ RADIO_TIMEOUT_VALUE_MASK);
+ return sprintf(buf, "%d\n", timeout_value);
+}
+
+/**
+ * timeout_enable_store - Writes to the traffic gens's timeout value
+ * status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's timeout value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t timeout_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 timeout_value;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &timeout_value);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_TIMEOUT_VALUE_ADDR,
+ RADIO_TIMEOUT_VALUE_OFFSET,
+ RADIO_TIMEOUT_VALUE_MASK, timeout_value);
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_value);
+
+/**
+ * ledmode_show - Returns the current LED mode
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's LED mode value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t ledmode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ledmode;
+
+ ledmode = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDMODE2_ADDR,
+ RADIO_GPIO_CDC_LEDMODE2_OFFSET,
+ RADIO_GPIO_CDC_LEDMODE2_MASK);
+ return sprintf(buf, "%d\n", ledmode);
+}
+
+/**
+ * ledmode_store - Writes to the current LED mode register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's LED mode value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t ledmode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 ledmode;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &ledmode);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_GPIO_CDC_LEDMODE2_ADDR,
+ RADIO_GPIO_CDC_LEDMODE2_OFFSET,
+ RADIO_GPIO_CDC_LEDMODE2_MASK, ledmode);
+ return count;
+}
+static DEVICE_ATTR_RW(ledmode);
+
+/**
+ * ledgpio_show - Returns the current LED gpio
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's LED gpio value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t ledgpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ledgpio;
+
+ ledgpio = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK);
+ return sprintf(buf, "%d\n", ledgpio);
+}
+
+/**
+ * ledgpio_store - Writes to the current LED gpio register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's LED gpio value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t ledgpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 ledgpio;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &ledgpio);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK, ledgpio);
+ return count;
+}
+static DEVICE_ATTR_RW(ledgpio);
+
+/**
+ * dip_status_show - Returns the current DIP switch value
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the GPIO DIP switch value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t dip_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 dip_status;
+
+ dip_status = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK);
+ return sprintf(buf, "0x%08x\n", dip_status);
+}
+static DEVICE_ATTR_RO(dip_status);
+
+/**
+ * sw_trigger_show - Returns the current SW trigger status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's SW trigger status value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t sw_trigger_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 sw_trigger;
+
+ sw_trigger = utils_sysfs_show_wrapper(dev, RADIO_SW_TRIGGER_ADDR,
+ RADIO_SW_TRIGGER_OFFSET,
+ RADIO_SW_TRIGGER_MASK);
+ return sprintf(buf, "%d\n", sw_trigger);
+}
+
+/**
+ * sw_trigger_store - Writes to the SW trigger status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's SW trigger
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t sw_trigger_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 sw_trigger;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &sw_trigger);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_SW_TRIGGER_ADDR,
+ RADIO_SW_TRIGGER_OFFSET,
+ RADIO_SW_TRIGGER_MASK, sw_trigger);
+ return count;
+}
+static DEVICE_ATTR_RW(sw_trigger);
+
+/**
+ * radio_enable_show - Returns the current radio enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's radio enable value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_enable;
+
+ radio_enable = utils_sysfs_show_wrapper(dev, RADIO_CDC_ENABLE_ADDR,
+ RADIO_CDC_ENABLE_OFFSET,
+ RADIO_CDC_ENABLE_MASK);
+ return sprintf(buf, "%d\n", radio_enable);
+}
+
+/**
+ * radio_enable_store - Writes to the radio enable register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio enable
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 radio_enable;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &radio_enable);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_CDC_ENABLE_ADDR,
+ RADIO_CDC_ENABLE_OFFSET,
+ RADIO_CDC_ENABLE_MASK,
+ radio_enable);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_enable);
+
+/**
+ * radio_error_show - Returns the current radio error status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the error status
+ *
+ * Reads and writes the traffic gen's radio error value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_error;
+
+ radio_error = utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_ADDR,
+ RADIO_CDC_STATUS_OFFSET,
+ RADIO_CDC_STATUS_MASK);
+ return sprintf(buf, "%d\n", radio_error);
+}
+static DEVICE_ATTR_RO(radio_error);
+
+/**
+ * radio_status_show - Returns the current radio status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the status
+ *
+ * Reads and writes the traffic gen's radio status value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_status;
+
+ radio_status = utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_ADDR,
+ RADIO_CDC_STATUS_OFFSET,
+ RADIO_CDC_STATUS_MASK);
+ return sprintf(buf, "%d\n", radio_status);
+}
+static DEVICE_ATTR_RO(radio_status);
+
+/**
+ * radio_loopback_show - Returns the current radio loopback status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's radio loopback value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_loopback_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_loopback;
+
+ radio_loopback = utils_sysfs_show_wrapper(dev,
+ RADIO_CDC_LOOPBACK_ADDR,
+ RADIO_CDC_LOOPBACK_OFFSET,
+ RADIO_CDC_LOOPBACK_MASK);
+ return sprintf(buf, "%d\n", radio_loopback);
+}
+
+/**
+ * radio_loopback_store - Writes to the radio loopback register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio loopback
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_loopback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 radio_loopback;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &radio_loopback);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_CDC_LOOPBACK_ADDR,
+ RADIO_CDC_LOOPBACK_OFFSET,
+ RADIO_CDC_LOOPBACK_MASK, radio_loopback);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_loopback);
+
+/**
+ * radio_sink_enable_show - Returns the current radio sink enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's radio sink enable value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_sink_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 sink_enable;
+
+ sink_enable = utils_sysfs_show_wrapper(dev, RADIO_SINK_ENABLE_ADDR,
+ RADIO_SINK_ENABLE_OFFSET,
+ RADIO_SINK_ENABLE_MASK);
+ return sprintf(buf, "%d\n", sink_enable);
+}
+
+/**
+ * radio_sink_enable_store - Writes to the radio sink enable register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio sink
+ * enable value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_sink_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 sink_enable;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &sink_enable);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_SINK_ENABLE_ADDR,
+ RADIO_SINK_ENABLE_OFFSET,
+ RADIO_SINK_ENABLE_MASK, sink_enable);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_sink_enable);
+
+/**
+ * antenna_status_show - Returns the status for all antennas
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's status for all antennas
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t antenna_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 status_0_31;
+ u32 status_63_32;
+ u32 status_95_64;
+ u32 status_127_96;
+
+ status_0_31 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_31_0_ADDR,
+ RADIO_CDC_STATUS_31_0_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_31_0_MASK));
+ status_63_32 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_63_32_ADDR,
+ RADIO_CDC_STATUS_63_32_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_63_32_MASK));
+ status_95_64 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_95_64_ADDR,
+ RADIO_CDC_STATUS_95_64_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_95_64_MASK));
+ status_127_96 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_127_96_ADDR,
+ RADIO_CDC_STATUS_127_96_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_127_96_MASK));
+
+ return sprintf(buf, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ status_0_31, status_63_32, status_95_64, status_127_96);
+}
+static DEVICE_ATTR_RO(antenna_status);
+
+/**
+ * antenna_error_show - Returns the error for all antennas
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's error for all antennas
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t antenna_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 error_0_31;
+ u32 error_63_32;
+ u32 error_95_64;
+ u32 error_127_96;
+
+ error_0_31 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_31_0_ADDR,
+ RADIO_CDC_ERROR_31_0_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_31_0_MASK));
+ error_63_32 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_63_32_ADDR,
+ RADIO_CDC_ERROR_63_32_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_63_32_MASK));
+ error_95_64 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_95_64_ADDR,
+ RADIO_CDC_ERROR_95_64_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_95_64_MASK));
+ error_127_96 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_127_96_ADDR,
+ RADIO_CDC_ERROR_127_96_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_127_96_MASK));
+
+ return sprintf(buf, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ error_0_31, error_63_32, error_95_64, error_127_96);
+}
+static DEVICE_ATTR_RO(antenna_error);
+
+/**
+ * framer_packet_size_show - Returns the size of the framer's packet
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's framer packet size value
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t framer_packet_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 packet_size;
+
+ packet_size = utils_sysfs_show_wrapper(dev, FRAM_PACKET_DATA_SIZE_ADDR,
+ FRAM_PACKET_DATA_SIZE_OFFSET,
+ FRAM_PACKET_DATA_SIZE_MASK);
+ return sprintf(buf, "%d\n", packet_size);
+}
+
+/**
+ * framer_packet_size_store - Writes to the framer's packet size register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's framer packet
+ * size value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t framer_packet_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 packet_size;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &packet_size);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, FRAM_PACKET_DATA_SIZE_ADDR,
+ FRAM_PACKET_DATA_SIZE_OFFSET,
+ FRAM_PACKET_DATA_SIZE_MASK, packet_size);
+ return count;
+}
+static DEVICE_ATTR_RW(framer_packet_size);
+
+/**
+ * framer_pause_size_show - Returns the size of the framer's pause
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's framer pause size value
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t framer_pause_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 pause_size;
+
+ pause_size = utils_sysfs_show_wrapper(dev, FRAM_PAUSE_DATA_SIZE_ADDR,
+ FRAM_PAUSE_DATA_SIZE_OFFSET,
+ FRAM_PAUSE_DATA_SIZE_MASK);
+ return sprintf(buf, "%d\n", pause_size);
+}
+
+/**
+ * framer_pause_size_store - Writes to the framer's pause size register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's framer pause
+ * size value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t framer_pause_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 pause_size;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &pause_size);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, FRAM_PAUSE_DATA_SIZE_ADDR,
+ FRAM_PAUSE_DATA_SIZE_OFFSET,
+ FRAM_PAUSE_DATA_SIZE_MASK, pause_size);
+ return count;
+}
+static DEVICE_ATTR_RW(framer_pause_size);
+
+static struct attribute *xroe_traffic_gen_attrs[] = {
+ &dev_attr_radio_id.attr,
+ &dev_attr_timeout_enable.attr,
+ &dev_attr_timeout_status.attr,
+ &dev_attr_timeout_value.attr,
+ &dev_attr_ledmode.attr,
+ &dev_attr_ledgpio.attr,
+ &dev_attr_dip_status.attr,
+ &dev_attr_sw_trigger.attr,
+ &dev_attr_radio_enable.attr,
+ &dev_attr_radio_error.attr,
+ &dev_attr_radio_status.attr,
+ &dev_attr_radio_loopback.attr,
+ &dev_attr_radio_sink_enable.attr,
+ &dev_attr_antenna_status.attr,
+ &dev_attr_antenna_error.attr,
+ &dev_attr_framer_packet_size.attr,
+ &dev_attr_framer_pause_size.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(xroe_traffic_gen);
+
+/**
+ * xroe_traffic_gen_sysfs_init - Creates the xroe sysfs directory and entries
+ * @dev: The device's structure
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroetrafficgen sysfs directory and entries
+ */
+int xroe_traffic_gen_sysfs_init(struct device *dev)
+{
+ int ret;
+
+ dev->groups = xroe_traffic_gen_groups;
+ ret = sysfs_create_group(&dev->kobj, *xroe_traffic_gen_groups);
+ if (ret)
+ dev_err(dev, "sysfs creation failed\n");
+
+ return ret;
+}
+
+/**
+ * xroe_traffic_gen_sysfs_exit - Deletes the xroe sysfs directory and entries
+ * @dev: The device's structure
+ *
+ * Deletes the xroetrafficgen sysfs directory and entries
+ */
+void xroe_traffic_gen_sysfs_exit(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, *xroe_traffic_gen_groups);
+}
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen.c b/drivers/staging/xroetrafficgen/xroe-traffic-gen.c
new file mode 100644
index 000000000000..1ed6e488d38d
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "xroe-traffic-gen.h"
+
+#define DRIVER_NAME "xroe_traffic_gen"
+
+static struct platform_driver xroe_traffic_gen_driver;
+
+/**
+ * xroe_traffic_gen_probe - Probes the device tree to locate the traffic gen
+ * block
+ * @pdev: The structure containing the device's details
+ *
+ * Probes the device tree to locate the traffic gen block and maps it to
+ * the kernel virtual memory space
+ *
+ * Return: 0 on success or a negative errno on error.
+ */
+static int xroe_traffic_gen_probe(struct platform_device *pdev)
+{
+ struct xroe_traffic_gen_local *lp;
+ struct resource *r_mem; /* IO mem resources */
+ struct device *dev = &pdev->dev;
+
+ lp = devm_kzalloc(&pdev->dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ /* Get iospace for the device */
+ /*
+ * TODO: Use platform_get_resource_byname() instead when the DT entry
+ * of the traffic gen block has been finalised (when it gets out of
+ * the development stage).
+ */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->base_addr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ dev_set_drvdata(dev, lp);
+ xroe_traffic_gen_sysfs_init(dev);
+ return 0;
+}
+
+/**
+ * xroe_traffic_gen_remove - Removes the sysfs entries created by the driver
+ * @pdev: The structure containing the device's details
+ *
+ * Removes the sysfs entries created by the driver
+ *
+ * Return: 0
+ */
+static int xroe_traffic_gen_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ xroe_traffic_gen_sysfs_exit(dev);
+ return 0;
+}
+
+/**
+ * xroe_traffic_gen_init - Registers the driver
+ *
+ * Return: 0 on success, -1 on allocation error
+ *
+ * Registers the traffic gen driver and creates the sysfs entries related
+ * to it
+ */
+static int __init xroe_traffic_gen_init(void)
+{
+ int ret;
+
+ pr_info("XROE traffic generator driver init\n");
+ ret = platform_driver_register(&xroe_traffic_gen_driver);
+ return ret;
+}
+
+/**
+ * xroe_traffic_gen_exit - Destroys the driver
+ *
+ * Unregisters the traffic gen driver
+ */
+static void __exit xroe_traffic_gen_exit(void)
+{
+ platform_driver_unregister(&xroe_traffic_gen_driver);
+ pr_debug("XROE traffic generator driver exit\n");
+}
+
+static const struct of_device_id xroe_traffic_gen_of_match[] = {
+ { .compatible = "xlnx,roe-traffic-gen-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xroe_traffic_gen_of_match);
+
+static struct platform_driver xroe_traffic_gen_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xroe_traffic_gen_of_match,
+ },
+ .probe = xroe_traffic_gen_probe,
+ .remove = xroe_traffic_gen_remove,
+};
+
+module_init(xroe_traffic_gen_init);
+module_exit(xroe_traffic_gen_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Radio over Ethernet Traffic Generator driver");
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen.h b/drivers/staging/xroetrafficgen/xroe-traffic-gen.h
new file mode 100644
index 000000000000..55d968d89e10
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+struct xroe_traffic_gen_local {
+ void __iomem *base_addr;
+};
+
+enum { XROE_SIZE_MAX = 15 };
+
+int xroe_traffic_gen_sysfs_init(struct device *dev);
+void xroe_traffic_gen_sysfs_exit(struct device *dev);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 3403667a9592..ab2f0ceb1e23 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4084,9 +4084,12 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
struct se_cmd *se_cmd = &cmd->se_cmd;
- if (se_cmd->se_tfo != NULL) {
- spin_lock_irq(&se_cmd->t_state_lock);
- if (se_cmd->transport_state & CMD_T_ABORTED) {
+ if (!se_cmd->se_tfo)
+ continue;
+
+ spin_lock_irq(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & CMD_T_ABORTED) {
+ if (!(se_cmd->transport_state & CMD_T_TAS))
/*
* LIO's abort path owns the cleanup for this,
* so put it back on the list and let
@@ -4094,11 +4097,10 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
*/
list_move_tail(&cmd->i_conn_node,
&conn->conn_cmd_list);
- } else {
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- }
- spin_unlock_irq(&se_cmd->t_state_lock);
+ } else {
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
}
+ spin_unlock_irq(&se_cmd->t_state_lock);
}
spin_unlock_bh(&conn->cmd_lock);
@@ -4383,6 +4385,9 @@ int iscsit_close_session(struct iscsi_session *sess)
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
+ if (sess->sess_ops->ErrorRecoveryLevel == 2)
+ iscsit_free_connection_recovery_entries(sess);
+
/*
* transport_deregister_session_configfs() will clear the
* struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
@@ -4411,9 +4416,6 @@ int iscsit_close_session(struct iscsi_session *sess)
transport_deregister_session(sess->se_sess);
- if (sess->sess_ops->ErrorRecoveryLevel == 2)
- iscsit_free_connection_recovery_entries(sess);
-
iscsit_free_all_ooo_cmdsns(sess);
spin_lock_bh(&se_tpg->session_lock);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0fa1d57b26fa..3cd671bbb9a4 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -508,102 +508,102 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (!se_sess) {
- rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+ rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator"
" Endpoint: %s\n", se_nacl->initiatorname);
} else {
sess = se_sess->fabric_sess_ptr;
- rb += sprintf(page+rb, "InitiatorName: %s\n",
+ rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n",
sess->sess_ops->InitiatorName);
- rb += sprintf(page+rb, "InitiatorAlias: %s\n",
+ rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n",
sess->sess_ops->InitiatorAlias);
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ",
sess->sid, sess->isid, sess->tsih);
- rb += sprintf(page+rb, "SessionType: %s\n",
+ rb += sysfs_emit_at(page, rb, "SessionType: %s\n",
(sess->sess_ops->SessionType) ?
"Discovery" : "Normal");
- rb += sprintf(page+rb, "Session State: ");
+ rb += sysfs_emit_at(page, rb, "Session State: ");
switch (sess->session_state) {
case TARG_SESS_STATE_FREE:
- rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n");
break;
case TARG_SESS_STATE_ACTIVE:
- rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n");
break;
case TARG_SESS_STATE_LOGGED_IN:
- rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n");
break;
case TARG_SESS_STATE_FAILED:
- rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n");
break;
case TARG_SESS_STATE_IN_CONTINUE:
- rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n");
break;
default:
- rb += sprintf(page+rb, "ERROR: Unknown Session"
+ rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session"
" State!\n");
break;
}
- rb += sprintf(page+rb, "---------------------[iSCSI Session"
+ rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session"
" Values]-----------------------\n");
- rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
+ rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
" : MaxCmdSN : ITT : TTT\n");
max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
- rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
+ rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
" 0x%08x 0x%08x\n",
sess->cmdsn_window,
(max_cmd_sn - sess->exp_cmd_sn) + 1,
sess->exp_cmd_sn, max_cmd_sn,
sess->init_task_tag, sess->targ_xfer_tag);
- rb += sprintf(page+rb, "----------------------[iSCSI"
+ rb += sysfs_emit_at(page, rb, "----------------------[iSCSI"
" Connections]-------------------------\n");
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
- rb += sprintf(page+rb, "CID: %hu Connection"
+ rb += sysfs_emit_at(page, rb, "CID: %hu Connection"
" State: ", conn->cid);
switch (conn->conn_state) {
case TARG_CONN_STATE_FREE:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_FREE\n");
break;
case TARG_CONN_STATE_XPT_UP:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_XPT_UP\n");
break;
case TARG_CONN_STATE_IN_LOGIN:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGIN\n");
break;
case TARG_CONN_STATE_LOGGED_IN:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGGED_IN\n");
break;
case TARG_CONN_STATE_IN_LOGOUT:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGOUT\n");
break;
case TARG_CONN_STATE_LOGOUT_REQUESTED:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
break;
case TARG_CONN_STATE_CLEANUP_WAIT:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_CLEANUP_WAIT\n");
break;
default:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"ERROR: Unknown Connection State!\n");
break;
}
- rb += sprintf(page+rb, " Address %pISc %s", &conn->login_sockaddr,
+ rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr,
(conn->network_transport == ISCSI_TCP) ?
"TCP" : "SCTP");
- rb += sprintf(page+rb, " StatSN: 0x%08x\n",
+ rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n",
conn->stat_sn);
}
spin_unlock(&sess->conn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index e32d93b92742..4017464a5909 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1053,6 +1053,7 @@ int iscsi_target_locate_portal(
iscsi_target_set_sock_callbacks(conn);
login->np = np;
+ conn->tpg = NULL;
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
@@ -1122,7 +1123,6 @@ int iscsi_target_locate_portal(
*/
sessiontype = strncmp(s_buf, DISCOVERY, 9);
if (!sessiontype) {
- conn->tpg = iscsit_global->discovery_tpg;
if (!login->leading_connection)
goto get_target;
@@ -1139,9 +1139,11 @@ int iscsi_target_locate_portal(
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
*/
+ conn->tpg = iscsit_global->discovery_tpg;
if (iscsit_access_np(np, conn->tpg) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ conn->tpg = NULL;
ret = -1;
goto out;
}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 7a461fbb1566..31cd3c02e517 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1262,18 +1262,20 @@ static struct iscsi_param *iscsi_check_key(
return param;
if (!(param->phase & phase)) {
- pr_err("Key \"%s\" may not be negotiated during ",
- param->name);
+ char *phase_name;
+
switch (phase) {
case PHASE_SECURITY:
- pr_debug("Security phase.\n");
+ phase_name = "Security";
break;
case PHASE_OPERATIONAL:
- pr_debug("Operational phase.\n");
+ phase_name = "Operational";
break;
default:
- pr_debug("Unknown phase.\n");
+ phase_name = "Unknown";
}
+ pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
+ param->name, phase_name);
return NULL;
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 16d5a4e117a2..5ae5d94c5b93 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -394,6 +394,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
ret = device_register(&tl_hba->dev);
if (ret) {
pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
+ put_device(&tl_hba->dev);
return -ENODEV;
}
@@ -1072,7 +1073,7 @@ check_len:
*/
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
if (ret)
- goto out;
+ return ERR_PTR(ret);
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 20fe28703985..edddc66ad133 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -151,7 +151,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
struct se_session *se_sess = se_cmd->se_sess;
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
- unsigned long flags;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
@@ -182,10 +181,6 @@ out_unlock:
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
- spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
- list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
- spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
-
return 0;
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);
@@ -856,7 +851,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
- struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
@@ -866,11 +860,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
+ struct config_item *item;
int ret;
- config_item_put(iter->prev_item);
- iter->prev_item = NULL;
-
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
@@ -880,12 +872,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
if (!target_dev_configured(dev))
return 0;
- iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
- if (!iter->prev_item)
+ item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
+ config_item_put(item);
mutex_lock(&device_mutex);
return ret;
@@ -908,7 +901,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
- config_item_put(iter.prev_item);
return ret;
}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7143d03f0e02..18fbbe510d01 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -340,7 +340,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
len += sg->length;
}
- iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
+ iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
if (is_write)
ret = vfs_iter_write(fd, &iter, &pos, 0);
else
@@ -477,7 +477,7 @@ fd_execute_write_same(struct se_cmd *cmd)
len += se_dev->dev_attrib.block_size;
}
- iov_iter_bvec(&iter, READ, bvec, nolb, len);
+ iov_iter_bvec(&iter, WRITE, bvec, nolb, len);
ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
kfree(bvec);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index feeba3966617..6928ebf0be9c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -82,8 +82,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
{
struct se_session *sess = se_cmd->se_sess;
- assert_spin_locked(&sess->sess_cmd_lock);
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_held(&sess->sess_cmd_lock);
+
/*
* If command already reached CMD_T_COMPLETE state within
* target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index f52fe4000225..82d9e2659abe 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3392,6 +3392,10 @@ int transport_generic_handle_tmr(
unsigned long flags;
bool aborted = false;
+ spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
+ list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
+ spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
+
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->transport_state & CMD_T_ABORTED) {
aborted = true;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index acff7dd677d6..71c491cbb0c5 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -89,7 +89,7 @@ static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
rc = device_register(&optee_device->dev);
if (rc) {
pr_err("device registration failed, err: %d\n", rc);
- kfree(optee_device);
+ put_device(&optee_device->dev);
}
return rc;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index a7ccd4d2bd10..2db144d2d26f 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -182,6 +182,9 @@ tee_ioctl_shm_register(struct tee_context *ctx,
if (data.flags)
return -EINVAL;
+ if (!access_ok((void __user *)(unsigned long)data.addr, data.length))
+ return -EFAULT;
+
shm = tee_shm_register(ctx, data.addr, data.length,
TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
if (IS_ERR(shm))
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 2d26ae80e202..bee6d3524e52 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -435,10 +435,6 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
data->sensor[0].irq_name = "tsensor_a73";
data->sensor[0].data = data;
- data->sensor[1].id = HI3660_LITTLE_SENSOR;
- data->sensor[1].irq_name = "tsensor_a53";
- data->sensor[1].data = data;
-
return 0;
}
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index 8025b21f43fa..b5427579fae5 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -60,7 +60,8 @@ endmenu
config INTEL_BXT_PMIC_THERMAL
tristate "Intel Broxton PMIC thermal driver"
- depends on X86 && INTEL_SOC_PMIC_BXTWC && REGMAP
+ depends on X86 && INTEL_SOC_PMIC_BXTWC
+ select REGMAP
help
Select this driver for Intel Broxton PMIC with ADC channels monitoring
system temperature measurements and alerts.
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index c313c4f0e856..9090f87b4491 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -43,11 +43,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
int trip, int *temp)
{
struct int34x_thermal_zone *d = zone->devdata;
- int i;
+ int i, ret = 0;
if (d->override_ops && d->override_ops->get_trip_temp)
return d->override_ops->get_trip_temp(zone, trip, temp);
+ mutex_lock(&d->trip_mutex);
+
if (trip < d->aux_trip_nr)
*temp = d->aux_trips[trip];
else if (trip == d->crt_trip_id)
@@ -65,10 +67,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
}
}
if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ mutex_unlock(&d->trip_mutex);
+
+ return ret;
}
static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
@@ -76,11 +80,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
enum thermal_trip_type *type)
{
struct int34x_thermal_zone *d = zone->devdata;
- int i;
+ int i, ret = 0;
if (d->override_ops && d->override_ops->get_trip_type)
return d->override_ops->get_trip_type(zone, trip, type);
+ mutex_lock(&d->trip_mutex);
+
if (trip < d->aux_trip_nr)
*type = THERMAL_TRIP_PASSIVE;
else if (trip == d->crt_trip_id)
@@ -98,10 +104,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
}
}
if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ mutex_unlock(&d->trip_mutex);
+
+ return ret;
}
static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
@@ -173,6 +181,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
int trip_cnt = int34x_zone->aux_trip_nr;
int i;
+ mutex_lock(&int34x_zone->trip_mutex);
+
int34x_zone->crt_trip_id = -1;
if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
&int34x_zone->crt_temp))
@@ -200,6 +210,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
int34x_zone->act_trips[i].valid = true;
}
+ mutex_unlock(&int34x_zone->trip_mutex);
+
return trip_cnt;
}
EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
@@ -223,6 +235,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
if (!int34x_thermal_zone)
return ERR_PTR(-ENOMEM);
+ mutex_init(&int34x_thermal_zone->trip_mutex);
+
int34x_thermal_zone->adev = adev;
int34x_thermal_zone->override_ops = override_ops;
@@ -269,6 +283,7 @@ err_thermal_zone:
acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
kfree(int34x_thermal_zone->aux_trips);
err_trip_alloc:
+ mutex_destroy(&int34x_thermal_zone->trip_mutex);
kfree(int34x_thermal_zone);
return ERR_PTR(ret);
}
@@ -280,6 +295,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
thermal_zone_device_unregister(int34x_thermal_zone->zone);
acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
kfree(int34x_thermal_zone->aux_trips);
+ mutex_destroy(&int34x_thermal_zone->trip_mutex);
kfree(int34x_thermal_zone);
}
EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
index 3b4971df1b33..8f9872afd0d3 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
struct thermal_zone_device_ops *override_ops;
void *priv_data;
struct acpi_lpat_conversion_table *lpat_table;
+ struct mutex trip_mutex;
};
struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 53216dcbe173..d24f637a6a1e 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -57,6 +57,7 @@
static unsigned int target_mwait;
static struct dentry *debug_dir;
+static bool poll_pkg_cstate_enable;
/* user selected target */
static unsigned int set_target_ratio;
@@ -265,6 +266,9 @@ static unsigned int get_compensation(int ratio)
{
unsigned int comp = 0;
+ if (!poll_pkg_cstate_enable)
+ return 0;
+
/* we only use compensation if all adjacent ones are good */
if (ratio == 1 &&
cal_data[ratio].confidence >= CONFIDENCE_OK &&
@@ -534,12 +538,11 @@ static int start_power_clamp(void)
get_online_cpus();
/* prefer BSP */
- control_cpu = 0;
- if (!cpu_online(control_cpu))
- control_cpu = smp_processor_id();
+ control_cpu = cpumask_first(cpu_online_mask);
clamping = true;
- schedule_delayed_work(&poll_pkg_cstate_work, 0);
+ if (poll_pkg_cstate_enable)
+ schedule_delayed_work(&poll_pkg_cstate_work, 0);
/* start one kthread worker per online cpu */
for_each_online_cpu(cpu) {
@@ -608,11 +611,15 @@ static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- if (true == clamping)
- *state = pkg_cstate_ratio_cur;
- else
+ if (clamping) {
+ if (poll_pkg_cstate_enable)
+ *state = pkg_cstate_ratio_cur;
+ else
+ *state = set_target_ratio;
+ } else {
/* to save power, do not poll idle ratio while not clamping */
*state = -1; /* indicates invalid state */
+ }
return 0;
}
@@ -737,6 +744,9 @@ static int __init powerclamp_init(void)
goto exit_unregister;
}
+ if (topology_max_packages() == 1 && topology_max_die_per_package() == 1)
+ poll_pkg_cstate_enable = true;
+
cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
&powerclamp_cooling_ops);
if (IS_ERR(cooling_dev)) {
diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
index 5d33b350da1c..ad92d8f0add1 100644
--- a/drivers/thermal/intel/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
@@ -440,22 +440,14 @@ MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
static int __init intel_quark_thermal_init(void)
{
- int err = 0;
-
if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available())
return -ENODEV;
soc_dts = alloc_soc_dts();
- if (IS_ERR(soc_dts)) {
- err = PTR_ERR(soc_dts);
- goto err_free;
- }
+ if (IS_ERR(soc_dts))
+ return PTR_ERR(soc_dts);
return 0;
-
-err_free:
- free_soc_dts(soc_dts);
- return err;
}
static void __exit intel_quark_thermal_exit(void)
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index 5716b62e0f73..410f13f6cba4 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -396,7 +396,7 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
{
struct intel_soc_dts_sensors *sensors;
bool notification;
- u32 tj_max;
+ int tj_max;
int ret;
int i;
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 68d0c181ec7b..1f38da5da6e4 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -91,7 +91,7 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz,
{
struct __thermal_zone *data = tz->devdata;
- if (!data->ops->get_temp)
+ if (!data->ops || !data->ops->get_temp)
return -EINVAL;
return data->ops->get_temp(data->sensor_data, temp);
@@ -188,6 +188,9 @@ static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
{
struct __thermal_zone *data = tz->devdata;
+ if (!data->ops || !data->ops->set_emul_temp)
+ return -EINVAL;
+
return data->ops->set_emul_temp(data->sensor_data, temp);
}
@@ -196,7 +199,7 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
{
struct __thermal_zone *data = tz->devdata;
- if (!data->ops->get_trend)
+ if (!data->ops || !data->ops->get_trend)
return -EINVAL;
return data->ops->get_trend(data->sensor_data, trip, trend);
@@ -336,7 +339,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
if (trip >= data->ntrips || trip < 0)
return -EDOM;
- if (data->ops->set_trip_temp) {
+ if (data->ops && data->ops->set_trip_temp) {
int ret;
ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f4490b812017..450a1eb90af5 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -740,7 +740,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (result)
goto release_ida;
- sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
+ snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point",
+ dev->id);
sysfs_attr_init(&dev->attr.attr);
dev->attr.attr.name = dev->attr_name;
dev->attr.attr.mode = 0444;
@@ -749,7 +750,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (result)
goto remove_symbol_link;
- sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
+ snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name),
+ "cdev%d_weight", dev->id);
sysfs_attr_init(&dev->weight_attr.attr);
dev->weight_attr.attr.name = dev->weight_attr_name;
dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 4dce4a8f71ed..17b2361bc8f2 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -909,12 +909,13 @@ static const struct attribute_group cooling_device_stats_attr_group = {
static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
{
+ const struct attribute_group *stats_attr_group = NULL;
struct cooling_dev_stats *stats;
unsigned long states;
int var;
if (cdev->ops->get_max_state(cdev, &states))
- return;
+ goto out;
states++; /* Total number of states is highest state + 1 */
@@ -924,7 +925,7 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
stats = kzalloc(var, GFP_KERNEL);
if (!stats)
- return;
+ goto out;
stats->time_in_state = (ktime_t *)(stats + 1);
stats->trans_table = (unsigned int *)(stats->time_in_state + states);
@@ -934,9 +935,12 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
spin_lock_init(&stats->lock);
+ stats_attr_group = &cooling_device_stats_attr_group;
+
+out:
/* Fill the empty slot left in cooling_device_attr_groups */
var = ARRAY_SIZE(cooling_device_attr_groups) - 2;
- cooling_device_attr_groups[var] = &cooling_device_stats_attr_group;
+ cooling_device_attr_groups[var] = stats_attr_group;
}
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2ec1af8f7968..fd074c6ebe0d 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -388,7 +388,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
static int tb_async_error(const struct ctl_pkg *pkg)
{
- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+ const struct cfg_error_pkg *error = pkg->buffer;
if (pkg->frame.eof != TB_CFG_PKG_ERROR)
return false;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 73a698eec743..8a117d1c8888 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -40,7 +40,7 @@
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
-static int ring_interrupt_index(struct tb_ring *ring)
+static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
if (!ring->is_tx)
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 26581d2456c8..44ffa2b4c9fc 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -3643,7 +3643,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
struct cyclades_card *card;
void __iomem *addr0 = NULL, *addr2 = NULL;
char *card_name = NULL;
- u32 uninitialized_var(mailbox);
+ u32 mailbox;
unsigned int device_id, nchan = 0, card_no, i, j;
unsigned char plx_ver;
int retval, irq;
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 02629a1f193d..8fedd2de1f68 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -6,12 +6,40 @@
#include <asm/dcc.h>
#include <asm/processor.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/serial_core.h>
+
#include "hvc_console.h"
/* DCC Status Bits */
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
+static void dcc_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (__dcc_getstatus() & DCC_STATUS_TX)
+ cpu_relax();
+
+ __dcc_putchar(ch);
+}
+
+static void dcc_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, dcc_uart_console_putchar);
+}
+
+static int __init dcc_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->con->write = dcc_early_write;
+
+ return 0;
+}
+EARLYCON_DECLARE(dcc, dcc_early_console_setup);
+
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
{
int i;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 2d2d04c07140..abd68fd7a34d 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -43,6 +43,7 @@ struct xencons_info {
int irq;
int vtermno;
grant_ref_t gntref;
+ spinlock_t ring_lock;
};
static LIST_HEAD(xenconsoles);
@@ -52,17 +53,22 @@ static DEFINE_SPINLOCK(xencons_lock);
static struct xencons_info *vtermno_to_xencons(int vtermno)
{
- struct xencons_info *entry, *n, *ret = NULL;
+ struct xencons_info *entry, *ret = NULL;
+ unsigned long flags;
- if (list_empty(&xenconsoles))
- return NULL;
+ spin_lock_irqsave(&xencons_lock, flags);
+ if (list_empty(&xenconsoles)) {
+ spin_unlock_irqrestore(&xencons_lock, flags);
+ return NULL;
+ }
- list_for_each_entry_safe(entry, n, &xenconsoles, list) {
+ list_for_each_entry(entry, &xenconsoles, list) {
if (entry->vtermno == vtermno) {
ret = entry;
break;
}
}
+ spin_unlock_irqrestore(&xencons_lock, flags);
return ret;
}
@@ -84,12 +90,15 @@ static int __write_console(struct xencons_info *xencons,
XENCONS_RING_IDX cons, prod;
struct xencons_interface *intf = xencons->intf;
int sent = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->out_cons;
prod = intf->out_prod;
mb(); /* update queue values before going on */
if ((prod - cons) > sizeof(intf->out)) {
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
@@ -99,6 +108,7 @@ static int __write_console(struct xencons_info *xencons,
wmb(); /* write ring before updating pointer */
intf->out_prod = prod;
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
if (sent)
notify_daemon(xencons);
@@ -141,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
int recv = 0;
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
unsigned int eoiflag = 0;
+ unsigned long flags;
if (xencons == NULL)
return -EINVAL;
intf = xencons->intf;
+ spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->in_cons;
prod = intf->in_prod;
mb(); /* get pointers before reading ring */
if ((prod - cons) > sizeof(intf->in)) {
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
@@ -174,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
xencons->out_cons = intf->out_cons;
xencons->out_cons_same = 0;
}
+ if (!recv && xencons->out_cons_same++ > 1) {
+ eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ }
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
+
if (recv) {
notify_daemon(xencons);
- } else if (xencons->out_cons_same++ > 1) {
- eoiflag = XEN_EOI_FLAG_SPURIOUS;
}
xen_irq_lateeoi(xencons->irq, eoiflag);
@@ -223,7 +239,7 @@ static int xen_hvm_console_init(void)
{
int r;
uint64_t v = 0;
- unsigned long gfn;
+ unsigned long gfn, flags;
struct xencons_info *info;
if (!xen_hvm_domain())
@@ -234,6 +250,7 @@ static int xen_hvm_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
} else if (info->intf != NULL) {
/* already configured */
return 0;
@@ -258,9 +275,9 @@ static int xen_hvm_console_init(void)
goto err;
info->vtermno = HVC_COOKIE;
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
err:
@@ -270,6 +287,7 @@ err:
static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
{
+ spin_lock_init(&info->ring_lock);
info->evtchn = xen_start_info->console.domU.evtchn;
/* GFN == MFN for PV guest */
info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
@@ -283,6 +301,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
static int xen_pv_console_init(void)
{
struct xencons_info *info;
+ unsigned long flags;
if (!xen_pv_domain())
return -ENODEV;
@@ -299,9 +318,9 @@ static int xen_pv_console_init(void)
/* already configured */
return 0;
}
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
xencons_info_pv_init(info, HVC_COOKIE);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
}
@@ -309,6 +328,7 @@ static int xen_pv_console_init(void)
static int xen_initial_domain_console_init(void)
{
struct xencons_info *info;
+ unsigned long flags;
if (!xen_initial_domain())
return -ENODEV;
@@ -318,14 +338,15 @@ static int xen_initial_domain_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
}
info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
info->vtermno = HVC_COOKIE;
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
}
@@ -380,10 +401,12 @@ static void xencons_free(struct xencons_info *info)
static int xen_console_remove(struct xencons_info *info)
{
+ unsigned long flags;
+
xencons_disconnect_backend(info);
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_del(&info->list);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
if (info->xbdev != NULL)
xencons_free(info);
else {
@@ -464,6 +487,7 @@ static int xencons_probe(struct xenbus_device *dev,
{
int ret, devid;
struct xencons_info *info;
+ unsigned long flags;
devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
if (devid == 0)
@@ -472,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev,
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->vtermno = xenbus_devid_to_vtermno(devid);
@@ -482,9 +507,9 @@ static int xencons_probe(struct xenbus_device *dev,
ret = xencons_connect_backend(dev, info);
if (ret < 0)
goto error;
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_add_tail(&info->list, &xenconsoles);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
return 0;
@@ -562,7 +587,7 @@ static int __init xen_hvc_init(void)
ops = &dom0_hvc_ops;
r = xen_initial_domain_console_init();
if (r < 0)
- return r;
+ goto register_fe;
info = vtermno_to_xencons(HVC_COOKIE);
} else {
ops = &domU_hvc_ops;
@@ -571,7 +596,7 @@ static int __init xen_hvc_init(void)
else
r = xen_pv_console_init();
if (r < 0)
- return r;
+ goto register_fe;
info = vtermno_to_xencons(HVC_COOKIE);
info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
@@ -583,10 +608,12 @@ static int __init xen_hvc_init(void)
info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
if (IS_ERR(info->hvc)) {
+ unsigned long flags;
+
r = PTR_ERR(info->hvc);
- spin_lock(&xencons_lock);
+ spin_lock_irqsave(&xencons_lock, flags);
list_del(&info->list);
- spin_unlock(&xencons_lock);
+ spin_unlock_irqrestore(&xencons_lock, flags);
if (info->irq)
unbind_from_irqhandler(info->irq, NULL);
kfree(info);
@@ -594,6 +621,7 @@ static int __init xen_hvc_init(void)
}
r = 0;
+ register_fe:
#ifdef CONFIG_HVC_XEN_FRONTEND
r = xenbus_register_frontend(&xencons_driver);
#endif
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index fc38f96475bf..3b2f9fb01aa0 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -1514,7 +1514,7 @@ static unsigned int card_count;
static int isicom_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- unsigned int uninitialized_var(signature), index;
+ unsigned int signature, index;
int retval = -EPERM;
struct isi_board *board = NULL;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 907a4d0784ac..98c67ddf2fd9 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -411,6 +411,27 @@ static int gsm_read_ea(unsigned int *val, u8 c)
}
/**
+ * gsm_read_ea_val - read a value until EA
+ * @val: variable holding value
+ * @data: buffer of data
+ * @dlen: length of data
+ *
+ * Processes an EA value. Updates the passed variable and
+ * returns the processed data length.
+ */
+static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen)
+{
+ unsigned int len = 0;
+
+ for (; dlen > 0; dlen--) {
+ len++;
+ if (gsm_read_ea(val, *data++))
+ break;
+ }
+ return len;
+}
+
+/**
* gsm_encode_modem - encode modem data bits
* @dlci: DLCI to encode from
*
@@ -658,6 +679,37 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
}
/**
+ * gsm_is_flow_ctrl_msg - checks if flow control message
+ * @msg: message to check
+ *
+ * Returns true if the given message is a flow control command of the
+ * control channel. False is returned in any other case.
+ */
+static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg)
+{
+ unsigned int cmd;
+
+ if (msg->addr > 0)
+ return false;
+
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ cmd = 0;
+ if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1)
+ break;
+ switch (cmd & ~PF) {
+ case CMD_FCOFF:
+ case CMD_FCON:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+/**
* gsm_data_kick - poke the queue
* @gsm: GSM Mux
*
@@ -675,7 +727,7 @@ static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
int len;
list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
- if (gsm->constipated && msg->addr)
+ if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg))
continue;
if (gsm->encoding != 0) {
gsm->txframe[0] = GSM1_SOF;
@@ -1330,7 +1382,7 @@ static void gsm_control_retransmit(struct timer_list *t)
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
- if (gsm->cretries == 0) {
+ if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
ctrl->done = 1;
@@ -1361,7 +1413,7 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
unsigned int command, u8 *data, int clen)
{
struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
- GFP_KERNEL);
+ GFP_ATOMIC);
unsigned long flags;
if (ctrl == NULL)
return NULL;
@@ -1482,8 +1534,8 @@ static void gsm_dlci_t1(struct timer_list *t)
switch (dlci->state) {
case DLCI_OPENING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
@@ -1498,8 +1550,8 @@ static void gsm_dlci_t1(struct timer_list *t)
break;
case DLCI_CLOSING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else
@@ -1842,7 +1894,7 @@ static void gsm_queue(struct gsm_mux *gsm)
goto invalid;
#endif
if (dlci == NULL || dlci->state != DLCI_OPEN) {
- gsm_command(gsm, address, DM|PF);
+ gsm_response(gsm, address, DM|PF);
return;
}
dlci->data(dlci, gsm->buf, gsm->len);
@@ -2599,11 +2651,24 @@ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
- int space = tty_write_room(tty);
+ struct gsm_mux *gsm = tty->disc_data;
+ unsigned long flags;
+ int space;
+ int ret;
+
+ if (!gsm)
+ return -ENODEV;
+
+ ret = -ENOBUFS;
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ space = tty_write_room(tty);
if (space >= nr)
- return tty->ops->write(tty, buf, nr);
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- return -ENOBUFS;
+ ret = tty->ops->write(tty, buf, nr);
+ else
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ return ret;
}
/**
@@ -2628,12 +2693,15 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
+
+ if (gsm->dead)
+ mask |= EPOLLHUP;
if (tty_hung_up_p(file))
mask |= EPOLLHUP;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= EPOLLHUP;
if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (gsm->dead)
- mask |= EPOLLHUP;
return mask;
}
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 33ad9d6de532..04bf2b297fdc 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -87,7 +87,6 @@ struct serial8250_config {
#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
-#define UART_BUG_PARITY (1 << 4) /* UART mishandles parity if FIFO enabled */
#ifdef CONFIG_SERIAL_8250_SHARE_IRQ
@@ -305,6 +304,13 @@ extern int serial8250_rx_dma(struct uart_8250_port *);
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
extern void serial8250_release_dma(struct uart_8250_port *);
+
+static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ return dma && dma->tx_running;
+}
#else
static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
@@ -320,6 +326,11 @@ static inline int serial8250_request_dma(struct uart_8250_port *p)
return -1;
}
static inline void serial8250_release_dma(struct uart_8250_port *p) { }
+
+static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
+{
+ return false;
+}
#endif
static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 2675771a03a0..d7afff1e7685 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1138,6 +1138,7 @@ void serial8250_unregister_port(int line)
uart->port.type = PORT_UNKNOWN;
uart->port.dev = &serial8250_isa_devs->dev;
uart->capabilities = 0;
+ serial8250_init_port(uart);
serial8250_apply_quirks(uart);
uart_add_one_port(&serial8250_reg, &uart->port);
} else {
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 890fa7ddaa7f..bdc7bb7a1b92 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -46,19 +46,39 @@ static void __dma_rx_complete(void *param)
struct uart_8250_dma *dma = p->dma;
struct tty_port *tty_port = &p->port.state->port;
struct dma_tx_state state;
+ enum dma_status dma_status;
int count;
- dma->rx_running = 0;
- dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ /*
+ * New DMA Rx can be started during the completion handler before it
+ * could acquire port's lock and it might still be ongoing. Don't to
+ * anything in such case.
+ */
+ dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ if (dma_status == DMA_IN_PROGRESS)
+ return;
count = dma->rx_size - state.residue;
tty_insert_flip_string(tty_port, dma->rx_buf, count);
p->port.icount.rx += count;
+ dma->rx_running = 0;
tty_flip_buffer_push(tty_port);
}
+static void dma_rx_complete(void *param)
+{
+ struct uart_8250_port *p = param;
+ struct uart_8250_dma *dma = p->dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->port.lock, flags);
+ if (dma->rx_running)
+ __dma_rx_complete(p);
+ spin_unlock_irqrestore(&p->port.lock, flags);
+}
+
int serial8250_tx_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
@@ -121,7 +141,7 @@ int serial8250_rx_dma(struct uart_8250_port *p)
return -EBUSY;
dma->rx_running = 1;
- desc->callback = __dma_rx_complete;
+ desc->callback = dma_rx_complete;
desc->callback_param = p;
dma->rx_cookie = dmaengine_submit(desc);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 381c5117aec1..2d5a039229ac 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -110,12 +110,15 @@ static void dw8250_check_lcr(struct uart_port *p, int value)
/* Returns once the transmitter is empty or we run out of retries */
static void dw8250_tx_wait_empty(struct uart_port *p)
{
+ struct uart_8250_port *up = up_to_u8250p(p);
unsigned int tries = 20000;
unsigned int delay_threshold = tries - 1000;
unsigned int lsr;
while (tries--) {
lsr = readb (p->membase + (UART_LSR << p->regshift));
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+
if (lsr & UART_LSR_TEMT)
break;
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 6d6a78eead3e..1cf229cca592 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -80,7 +80,7 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
void dw8250_setup_port(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
- u32 reg;
+ u32 reg, old_dlf;
/*
* If the Component Version Register returns zero, we know that
@@ -93,9 +93,11 @@ void dw8250_setup_port(struct uart_port *p)
dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
(reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
+ /* Preserve value written by firmware or bootloader */
+ old_dlf = dw8250_readl_ext(p, DW_UART_DLF);
dw8250_writel_ext(p, DW_UART_DLF, ~0U);
reg = dw8250_readl_ext(p, DW_UART_DLF);
- dw8250_writel_ext(p, DW_UART_DLF, 0);
+ dw8250_writel_ext(p, DW_UART_DLF, old_dlf);
if (reg) {
struct dw8250_port_data *d = p->private_data;
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 5cd8c36c8fcc..2acaea5f3232 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -176,6 +176,7 @@ static int __init early_omap8250_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
+OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup);
#endif
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index 2a76e22d2ec0..5670c8a267d8 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -102,8 +102,8 @@ static int serial8250_em_probe(struct platform_device *pdev)
memset(&up, 0, sizeof(up));
up.port.mapbase = regs->start;
up.port.irq = irq->start;
- up.port.type = PORT_UNKNOWN;
- up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP;
+ up.port.type = PORT_16750;
+ up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE;
up.port.dev = &pdev->dev;
up.port.private_data = priv;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 93367dea4d8a..0dfe9ceb032a 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -639,6 +639,7 @@ static void exar_pci_remove(struct pci_dev *pcidev)
for (i = 0; i < priv->nr; i++)
serial8250_unregister_port(priv->line[i]);
+ /* Ensure that every init quirk is properly torn down */
if (priv->board->exit)
priv->board->exit(pcidev);
}
@@ -653,10 +654,6 @@ static int __maybe_unused exar_suspend(struct device *dev)
if (priv->line[i] >= 0)
serial8250_suspend_port(priv->line[i]);
- /* Ensure that every init quirk is properly torn down */
- if (priv->board->exit)
- priv->board->exit(pcidev);
-
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 5f72ef3ea574..406af8b1d87e 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -258,8 +258,13 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
struct dw_dma_slave *rx_param, *tx_param;
struct device *dev = port->port.dev;
- if (!lpss->dma_param.dma_dev)
+ if (!lpss->dma_param.dma_dev) {
+ dma = port->dma;
+ if (dma)
+ goto out_configuration_only;
+
return 0;
+ }
rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
if (!rx_param)
@@ -270,16 +275,18 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
return -ENOMEM;
*rx_param = lpss->dma_param;
- dma->rxconf.src_maxburst = lpss->dma_maxburst;
-
*tx_param = lpss->dma_param;
- dma->txconf.dst_maxburst = lpss->dma_maxburst;
dma->fn = lpss8250_dma_filter;
dma->rx_param = rx_param;
dma->tx_param = tx_param;
port->dma = dma;
+
+out_configuration_only:
+ dma->rxconf.src_maxburst = lpss->dma_maxburst;
+ dma->txconf.dst_maxburst = lpss->dma_maxburst;
+
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index efe793a2fc65..6bb8bbaa4fdb 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -169,27 +169,10 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
struct omap8250_priv *priv)
{
- u8 timeout = 255;
-
serial_out(up, UART_OMAP_MDR1, priv->mdr1);
udelay(2);
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
UART_FCR_CLEAR_RCVR);
- /*
- * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
- * TX_FIFO_E bit is 1.
- */
- while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
- (UART_LSR_THRE | UART_LSR_DR))) {
- timeout--;
- if (!timeout) {
- /* Should *never* happen. we warn and carry on */
- dev_crit(up->port.dev, "Errata i202: timedout %x\n",
- serial_in(up, UART_LSR));
- break;
- }
- udelay(1);
- }
}
static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
@@ -268,6 +251,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
{
struct omap8250_priv *priv = up->port.private_data;
struct uart_8250_dma *dma = up->dma;
+ u8 mcr = serial8250_in_MCR(up);
if (dma && dma->tx_running) {
/*
@@ -284,7 +268,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- serial8250_out_MCR(up, UART_MCR_TCRTLR);
+ serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR);
serial_out(up, UART_FCR, up->fcr);
omap8250_update_scr(up, priv);
@@ -300,7 +284,8 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_LCR, 0);
/* drop TCR + TLR access, we setup XON/XOFF later */
- serial8250_out_MCR(up, up->mcr);
+ serial8250_out_MCR(up, mcr);
+
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
@@ -611,7 +596,6 @@ static int omap_8250_startup(struct uart_port *port)
pm_runtime_get_sync(port->dev);
- up->mcr = 0;
serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_LCR, UART_LCR_WLEN8);
@@ -1290,9 +1274,15 @@ err:
static int omap8250_remove(struct platform_device *pdev)
{
struct omap8250_priv *priv = platform_get_drvdata(pdev);
+ int err;
+
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ dev_err(&pdev->dev, "Failed to resume hardware\n");
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ flush_work(&priv->qos_work);
pm_runtime_disable(&pdev->dev);
serial8250_unregister_port(priv->line);
pm_qos_remove_request(&priv->pm_qos_request);
@@ -1324,25 +1314,35 @@ static int omap8250_suspend(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
+ int err;
serial8250_suspend_port(priv->line);
- pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
if (!device_may_wakeup(dev))
priv->wer = 0;
serial_out(up, UART_OMAP_WER, priv->wer);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
-
+ err = pm_runtime_force_suspend(dev);
flush_work(&priv->qos_work);
- return 0;
+
+ return err;
}
static int omap8250_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
+ int err;
+ err = pm_runtime_force_resume(dev);
+ if (err)
+ return err;
serial8250_resume_port(priv->line);
+ /* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
#else
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 92e2ee278523..1ff5920e5278 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1068,14 +1068,6 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev)
return number_uarts;
}
-static int pci_asix_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
-{
- port->bugs |= UART_BUG_PARITY;
- return pci_default_setup(priv, board, port, idx);
-}
-
/* Quatech devices have their own extra interface features */
struct quatech_feature {
@@ -1837,6 +1829,8 @@ pci_moxa_setup(struct serial_private *priv,
#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
#define PCI_VENDOR_ID_ADVANTECH 0x13fe
#define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611
#define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
#define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618
#define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618
@@ -1870,7 +1864,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173
#define PCI_VENDOR_ID_AGESTAR 0x5372
#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
-#define PCI_VENDOR_ID_ASIX 0x9710
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
@@ -2670,16 +2663,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_wch_ch38x_setup,
},
/*
- * ASIX devices with FIFO bug
- */
- {
- .vendor = PCI_VENDOR_ID_ASIX,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_asix_setup,
- },
- /*
* Broadcom TruManage (NetXtreme)
*/
{
@@ -4157,6 +4140,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one,
pciserial_resume_one);
static const struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600,
+ PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
/* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
{ PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
@@ -5119,6 +5105,12 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b1_bt_1_115200 },
/*
+ * IntaShield IS-100
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0D60,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_115200 },
+ /*
* IntaShield IS-200
*/
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200,
@@ -5145,10 +5137,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
/*
- * Brainboxes UC-257
+ * Brainboxes UC-253/UC-734
*/
- { PCI_VENDOR_ID_INTASHIELD, 0x0861,
+ { PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
@@ -5164,6 +5160,66 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
pbn_b2_4_115200 },
/*
+ * Brainboxes UP-189
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UP-200
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B21,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B22,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B23,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UP-869
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C01,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C02,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C03,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UP-880
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C21,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C22,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C23,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
* Brainboxes UC-268
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0841,
@@ -5184,6 +5240,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x08E2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x08E3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
/*
* Brainboxes UC-310
*/
@@ -5194,6 +5258,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* Brainboxes UC-313
*/
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08A3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@@ -5208,6 +5280,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* Brainboxes UC-346
*/
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B01,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0B02,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@@ -5219,6 +5295,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A82,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0A83,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@@ -5231,13 +5311,35 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b2_4_115200 },
/*
- * Brainboxes UC-420/431
+ * Brainboxes UC-420
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0921,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
+ * Brainboxes UC-607
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x09A1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x09A2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x09A3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-836
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0D41,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
* Perle PCI-RAS cards
*/
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f8819f72304a..5d8022cdb50a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -19,6 +19,7 @@
#include <linux/moduleparam.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
@@ -1820,10 +1821,13 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
{
switch (iir & 0x3f) {
+ case UART_IIR_RDI:
+ if (!up->dma->rx_running)
+ break;
+ fallthrough;
+ case UART_IIR_RLSI:
case UART_IIR_RX_TIMEOUT:
serial8250_rx_dma_flush(up);
- /* fall-through */
- case UART_IIR_RLSI:
return true;
}
return up->dma->rx_dma(up);
@@ -1837,6 +1841,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
unsigned char status;
unsigned long flags;
struct uart_8250_port *up = up_to_u8250p(port);
+ struct tty_port *tport = &port->state->port;
bool skip_rx = false;
if (iir & UART_IIR_NO_INT)
@@ -1860,6 +1865,11 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
+ struct irq_data *d;
+
+ d = irq_get_irq_data(port->irq);
+ if (d && irqd_is_wakeup_set(d))
+ pm_wakeup_event(tport->tty->dev, 0);
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
@@ -1915,19 +1925,25 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned int result = 0;
unsigned long flags;
unsigned int lsr;
serial8250_rpm_get(up);
spin_lock_irqsave(&port->lock, flags);
- lsr = serial_port_in(port, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ if (!serial8250_tx_dma_running(up)) {
+ lsr = serial_port_in(port, UART_LSR);
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+
+ if ((lsr & BOTH_EMPTY) == BOTH_EMPTY)
+ result = TIOCSER_TEMT;
+ }
spin_unlock_irqrestore(&port->lock, flags);
serial8250_rpm_put(up);
- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+ return result;
}
unsigned int serial8250_do_get_mctrl(struct uart_port *port)
@@ -2522,11 +2538,8 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
if (c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
- if (c_cflag & PARENB) {
+ if (c_cflag & PARENB)
cval |= UART_LCR_PARITY;
- if (up->bugs & UART_BUG_PARITY)
- up->fifo_bug = true;
- }
if (!(c_cflag & PARODD))
cval |= UART_LCR_EPAR;
#ifdef CMSPAR
@@ -2633,8 +2646,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
up->lcr = cval; /* Save computed LCR */
if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
- /* NOTE: If fifo_bug is not set, a user can set RX_trigger. */
- if ((baud < 2400 && !up->dma) || up->fifo_bug) {
+ if (baud < 2400 && !up->dma) {
up->fcr &= ~UART_FCR_TRIGGER_MASK;
up->fcr |= UART_FCR_TRIGGER_1;
}
@@ -2970,8 +2982,7 @@ static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
struct uart_8250_port *up = up_to_u8250p(uport);
int rxtrig;
- if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 ||
- up->fifo_bug)
+ if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
return -EINVAL;
rxtrig = bytes_to_fcr_rxtrig(up, bytes);
@@ -3127,6 +3138,7 @@ void serial8250_init_port(struct uart_8250_port *up)
struct uart_port *port = &up->port;
spin_lock_init(&port->lock);
+ port->pm = NULL;
port->ops = &serial8250_pops;
up->cur_iotype = 0xFF;
@@ -3180,8 +3192,13 @@ static void serial8250_console_restore(struct uart_8250_port *up)
unsigned int baud, quot, frac = 0;
termios.c_cflag = port->cons->cflag;
- if (port->state->port.tty && termios.c_cflag == 0)
+ termios.c_ispeed = port->cons->ispeed;
+ termios.c_ospeed = port->cons->ospeed;
+ if (port->state->port.tty && termios.c_cflag == 0) {
termios.c_cflag = port->state->port.tty->termios.c_cflag;
+ termios.c_ispeed = port->state->port.tty->termios.c_ispeed;
+ termios.c_ospeed = port->state->port.tty->termios.c_ospeed;
+ }
baud = serial8250_get_baud_rate(port, &termios, NULL);
quot = serial8250_get_divisor(port, baud, &frac);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 7ef60f8b6e2c..577612ab2a2c 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -108,7 +108,7 @@ config SERIAL_8250_CONSOLE
config SERIAL_8250_GSC
tristate
- depends on SERIAL_8250 && GSC
+ depends on SERIAL_8250 && PARISC
default SERIAL_8250
config SERIAL_8250_DMA
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a9751a83d5db..ff56d91172bf 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -354,6 +354,7 @@ config SERIAL_MAX310X
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
+ select REGMAP_I2C if I2C
help
This selects support for an advanced UART from Maxim (Dallas).
Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830.
@@ -548,15 +549,6 @@ config SERIAL_UARTLITE_CONSOLE
console (the system console is the device which receives all kernel
messages and warnings and which allows logins in single user mode).
-config SERIAL_UARTLITE_NR_UARTS
- int "Maximum number of uartlite serial ports"
- depends on SERIAL_UARTLITE
- range 1 256
- default 1
- help
- Set this to the number of uartlites in your system, or the number
- you think you might implement.
-
config SERIAL_SUNCORE
bool
depends on SPARC
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 0e487ce091ac..d91f76b1d353 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -199,9 +199,8 @@ static void altera_uart_set_termios(struct uart_port *port,
*/
}
-static void altera_uart_rx_chars(struct altera_uart *pp)
+static void altera_uart_rx_chars(struct uart_port *port)
{
- struct uart_port *port = &pp->port;
unsigned char ch, flag;
unsigned short status;
@@ -248,9 +247,8 @@ static void altera_uart_rx_chars(struct altera_uart *pp)
spin_lock(&port->lock);
}
-static void altera_uart_tx_chars(struct altera_uart *pp)
+static void altera_uart_tx_chars(struct uart_port *port)
{
- struct uart_port *port = &pp->port;
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
@@ -274,26 +272,25 @@ static void altera_uart_tx_chars(struct altera_uart *pp)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (xmit->head == xmit->tail) {
- pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
- altera_uart_update_ctrl_reg(pp);
- }
+ if (uart_circ_empty(xmit))
+ altera_uart_stop_tx(port);
}
static irqreturn_t altera_uart_interrupt(int irq, void *data)
{
struct uart_port *port = data;
struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
unsigned int isr;
isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
if (isr & ALTERA_UART_STATUS_RRDY_MSK)
- altera_uart_rx_chars(pp);
+ altera_uart_rx_chars(port);
if (isr & ALTERA_UART_STATUS_TRDY_MSK)
- altera_uart_tx_chars(pp);
- spin_unlock(&port->lock);
+ altera_uart_tx_chars(port);
+ spin_unlock_irqrestore(&port->lock, flags);
return IRQ_RETVAL(isr);
}
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 52b7d559b44b..8bf6da0102fa 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -227,17 +227,18 @@ static struct vendor_data vendor_zte = {
/* Deals with DMA transactions */
-struct pl011_sgbuf {
- struct scatterlist sg;
- char *buf;
+struct pl011_dmabuf {
+ dma_addr_t dma;
+ size_t len;
+ char *buf;
};
struct pl011_dmarx_data {
struct dma_chan *chan;
struct completion complete;
bool use_buf_b;
- struct pl011_sgbuf sgbuf_a;
- struct pl011_sgbuf sgbuf_b;
+ struct pl011_dmabuf dbuf_a;
+ struct pl011_dmabuf dbuf_b;
dma_cookie_t cookie;
bool running;
struct timer_list timer;
@@ -250,7 +251,8 @@ struct pl011_dmarx_data {
struct pl011_dmatx_data {
struct dma_chan *chan;
- struct scatterlist sg;
+ dma_addr_t dma;
+ size_t len;
char *buf;
bool queued;
};
@@ -371,32 +373,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
+static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
enum dma_data_direction dir)
{
- dma_addr_t dma_addr;
-
- sg->buf = dma_alloc_coherent(chan->device->dev,
- PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
- if (!sg->buf)
+ db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
+ &db->dma, GFP_KERNEL);
+ if (!db->buf)
return -ENOMEM;
-
- sg_init_table(&sg->sg, 1);
- sg_set_page(&sg->sg, phys_to_page(dma_addr),
- PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
- sg_dma_address(&sg->sg) = dma_addr;
- sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
+ db->len = PL011_DMA_BUFFER_SIZE;
return 0;
}
-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
+static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
enum dma_data_direction dir)
{
- if (sg->buf) {
+ if (db->buf) {
dma_free_coherent(chan->device->dev,
- PL011_DMA_BUFFER_SIZE, sg->buf,
- sg_dma_address(&sg->sg));
+ PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
}
}
@@ -557,8 +551,8 @@ static void pl011_dma_tx_callback(void *data)
spin_lock_irqsave(&uap->port.lock, flags);
if (uap->dmatx.queued)
- dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
+ dmatx->len, DMA_TO_DEVICE);
dmacr = uap->dmacr;
uap->dmacr = dmacr & ~UART011_TXDMAE;
@@ -644,18 +638,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
memcpy(&dmatx->buf[first], &xmit->buf[0], second);
}
- dmatx->sg.length = count;
-
- if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
+ dmatx->len = count;
+ dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
+ DMA_TO_DEVICE);
+ if (dmatx->dma == DMA_MAPPING_ERROR) {
uap->dmatx.queued = false;
dev_dbg(uap->port.dev, "unable to map TX DMA\n");
return -EBUSY;
}
- desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
+ desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
+ dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
/*
* If DMA cannot be used right now, we complete this
@@ -819,8 +814,8 @@ __acquires(&uap->port.lock)
dmaengine_terminate_async(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
+ uap->dmatx.len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
uap->dmacr &= ~UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -834,15 +829,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
struct dma_chan *rxchan = uap->dmarx.chan;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_async_tx_descriptor *desc;
- struct pl011_sgbuf *sgbuf;
+ struct pl011_dmabuf *dbuf;
if (!rxchan)
return -EIO;
/* Start the RX DMA job */
- sgbuf = uap->dmarx.use_buf_b ?
- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
- desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ dbuf = uap->dmarx.use_buf_b ?
+ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
@@ -882,8 +877,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
bool readfifo)
{
struct tty_port *port = &uap->port.state->port;
- struct pl011_sgbuf *sgbuf = use_buf_b ?
- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ struct pl011_dmabuf *dbuf = use_buf_b ?
+ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
int dma_count = 0;
u32 fifotaken = 0; /* only used for vdbg() */
@@ -892,7 +887,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
if (uap->dmarx.poll_rate) {
/* The data can be taken by polling */
- dmataken = sgbuf->sg.length - dmarx->last_residue;
+ dmataken = dbuf->len - dmarx->last_residue;
/* Recalculate the pending size */
if (pending >= dmataken)
pending -= dmataken;
@@ -906,7 +901,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
pending);
uap->port.icount.rx += dma_count;
@@ -917,7 +912,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
/* Reset the last_residue for Rx DMA poll */
if (uap->dmarx.poll_rate)
- dmarx->last_residue = sgbuf->sg.length;
+ dmarx->last_residue = dbuf->len;
/*
* Only continue with trying to read the FIFO if all DMA chars have
@@ -954,8 +949,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
{
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+ &dmarx->dbuf_b : &dmarx->dbuf_a;
size_t pending;
struct dma_tx_state state;
enum dma_status dmastat;
@@ -977,7 +972,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
pl011_write(uap->dmacr, uap, REG_DMACR);
uap->dmarx.running = false;
- pending = sgbuf->sg.length - state.residue;
+ pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
@@ -1004,8 +999,8 @@ static void pl011_dma_rx_callback(void *data)
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
bool lastbuf = dmarx->use_buf_b;
- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+ &dmarx->dbuf_b : &dmarx->dbuf_a;
size_t pending;
struct dma_tx_state state;
int ret;
@@ -1023,7 +1018,7 @@ static void pl011_dma_rx_callback(void *data)
* the DMA irq handler. So we check the residue here.
*/
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
- pending = sgbuf->sg.length - state.residue;
+ pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
@@ -1053,6 +1048,9 @@ static void pl011_dma_rx_callback(void *data)
*/
static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
{
+ if (!uap->using_rx_dma)
+ return;
+
/* FIXME. Just disable the DMA enable */
uap->dmacr &= ~UART011_RXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -1072,16 +1070,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
unsigned long flags = 0;
unsigned int dmataken = 0;
unsigned int size = 0;
- struct pl011_sgbuf *sgbuf;
+ struct pl011_dmabuf *dbuf;
int dma_count;
struct dma_tx_state state;
- sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
if (likely(state.residue < dmarx->last_residue)) {
- dmataken = sgbuf->sg.length - dmarx->last_residue;
+ dmataken = dbuf->len - dmarx->last_residue;
size = dmarx->last_residue - state.residue;
- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
size);
if (dma_count == size)
dmarx->last_residue = state.residue;
@@ -1128,7 +1126,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
return;
}
- sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
+ uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
@@ -1138,7 +1136,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
goto skip_rx;
/* Allocate and map DMA RX buffers */
- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
@@ -1146,12 +1144,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
goto skip_rx;
}
- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer B", ret);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
DMA_FROM_DEVICE);
goto skip_rx;
}
@@ -1205,8 +1203,9 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
/* In theory, this should already be done by pl011_dma_flush_buffer */
dmaengine_terminate_all(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(uap->dmatx.chan->device->dev,
+ uap->dmatx.dma, uap->dmatx.len,
+ DMA_TO_DEVICE);
uap->dmatx.queued = false;
}
@@ -1217,8 +1216,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
if (uap->using_rx_dma) {
dmaengine_terminate_all(uap->dmarx.chan);
/* Clean up the RX DMA */
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
if (uap->dmarx.poll_rate)
del_timer_sync(&uap->dmarx.timer);
uap->using_rx_dma = false;
@@ -1768,8 +1767,17 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
static void pl011_unthrottle_rx(struct uart_port *port)
{
struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
+ unsigned long flags;
- pl011_enable_interrupts(uap);
+ spin_lock_irqsave(&uap->port.lock, flags);
+
+ uap->im = UART011_RTIM;
+ if (!pl011_dma_rx_running(uap))
+ uap->im |= UART011_RXIM;
+
+ pl011_write(uap->im, uap, REG_IMSC);
+
+ spin_unlock_irqrestore(&uap->port.lock, flags);
}
static int pl011_startup(struct uart_port *port)
@@ -2778,6 +2786,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
.remove = sbsa_uart_remove,
.driver = {
.name = "sbsa-uart",
+ .pm = &pl011_dev_pm_ops,
.of_match_table = of_match_ptr(sbsa_uart_of_match),
.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index d904a3a345e7..dd4be3c8c049 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -613,10 +613,11 @@ static int arc_serial_probe(struct platform_device *pdev)
}
uart->baud = val;
- port->membase = of_iomap(np, 0);
- if (!port->membase)
+ port->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(port->membase)) {
/* No point of dev_err since UART itself is hosed here */
- return -ENXIO;
+ return PTR_ERR(port->membase);
+ }
port->irq = irq_of_parse_and_map(np, 0);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 3b2c25bd2e06..8d0838c904c8 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -306,16 +306,16 @@ static int atmel_config_rs485(struct uart_port *port,
mode = atmel_uart_readl(port, ATMEL_US_MR);
- /* Resetting serial mode to RS232 (0x0) */
- mode &= ~ATMEL_US_USMODE;
-
- port->rs485 = *rs485conf;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ if (rs485conf->flags & SER_RS485_RX_DURING_TX)
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ else
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+
atmel_uart_writel(port, ATMEL_US_TTGR,
rs485conf->delay_rts_after_send);
+ mode &= ~ATMEL_US_USMODE;
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -832,7 +832,7 @@ static void atmel_tx_chars(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (port->x_char &&
- (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
+ (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) {
atmel_uart_write_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
@@ -840,8 +840,7 @@ static void atmel_tx_chars(struct uart_port *port)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
- while (atmel_uart_readl(port, ATMEL_US_CSR) &
- atmel_port->tx_done_mask) {
+ while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) {
atmel_uart_write_char(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -852,10 +851,20 @@ static void atmel_tx_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (!uart_circ_empty(xmit))
+ if (!uart_circ_empty(xmit)) {
+ /* we still have characters to transmit, so we should continue
+ * transmitting them when TX is ready, regardless of
+ * mode or duplexity
+ */
+ atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
+
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
+ } else {
+ if (atmel_uart_is_half_duplex(port))
+ atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
+ }
}
static void atmel_complete_tx_dma(void *arg)
@@ -875,11 +884,11 @@ static void atmel_complete_tx_dma(void *arg)
port->icount.tx += atmel_port->tx_len;
- spin_lock_irq(&atmel_port->lock_tx);
+ spin_lock(&atmel_port->lock_tx);
async_tx_ack(atmel_port->desc_tx);
atmel_port->cookie_tx = -EINVAL;
atmel_port->desc_tx = NULL;
- spin_unlock_irq(&atmel_port->lock_tx);
+ spin_unlock(&atmel_port->lock_tx);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -2541,8 +2550,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
* Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
* ENDTX|TXBUFE
*/
- if (port->rs485.flags & SER_RS485_ENABLED ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
else if (atmel_use_pdc_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
@@ -2634,13 +2642,7 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
else if (mr == ATMEL_US_PAR_ODD)
*parity = 'o';
- /*
- * The serial core only rounds down when matching this to a
- * supported baud rate. Make sure we don't end up slightly
- * lower than one of those, as it would make us fall through
- * to a much lower baud rate than we really want.
- */
- *baud = port->uartclk / (16 * (quot - 1));
+ *baud = port->uartclk / (16 * quot);
}
static int __init atmel_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index de6d02f7abe2..c37036fee231 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1267,19 +1267,14 @@ static void cpm_uart_console_write(struct console *co, const char *s,
{
struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
unsigned long flags;
- int nolock = oops_in_progress;
- if (unlikely(nolock)) {
+ if (unlikely(oops_in_progress)) {
local_irq_save(flags);
- } else {
- spin_lock_irqsave(&pinfo->port.lock, flags);
- }
-
- cpm_uart_early_write(pinfo, s, count, true);
-
- if (unlikely(nolock)) {
+ cpm_uart_early_write(pinfo, s, count, true);
local_irq_restore(flags);
} else {
+ spin_lock_irqsave(&pinfo->port.lock, flags);
+ cpm_uart_early_write(pinfo, s, count, true);
spin_unlock_irqrestore(&pinfo->port.lock, flags);
}
}
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 4bdc12908146..9230d96ed3cd 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -807,11 +807,17 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
struct lpuart_port, port);
unsigned long stat = lpuart32_read(port, UARTSTAT);
unsigned long sfifo = lpuart32_read(port, UARTFIFO);
+ unsigned long ctrl = lpuart32_read(port, UARTCTRL);
if (sport->dma_tx_in_progress)
return 0;
- if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
+ /*
+ * LPUART Transmission Complete Flag may never be set while queuing a break
+ * character, so avoid checking for transmission complete when UARTCTRL_SBK
+ * is asserted.
+ */
+ if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK)
return TIOCSER_TEMT;
return 0;
@@ -1017,8 +1023,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
- /* Read DR to clear the error flags */
- lpuart32_read(&sport->port, UARTDATA);
+ /* Clear the error flags */
+ lpuart32_write(&sport->port, sr, UARTSTAT);
if (sr & UARTSTAT_PE)
sport->port.icount.parity++;
@@ -1166,7 +1172,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
* 10ms at any baud rate.
*/
sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud / bits / 1000) * 2;
- sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
+ sport->rx_dma_rng_buf_len = (1 << fls(sport->rx_dma_rng_buf_len));
if (sport->rx_dma_rng_buf_len < 16)
sport->rx_dma_rng_buf_len = 16;
@@ -1277,9 +1283,9 @@ static int lpuart_config_rs485(struct uart_port *port,
* Note: UART is assumed to be active high.
*/
if (rs485->flags & SER_RS485_RTS_ON_SEND)
- modem &= ~UARTMODEM_TXRTSPOL;
- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
modem |= UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
}
/* Store the new configuration */
@@ -1372,12 +1378,34 @@ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
{
unsigned long temp;
- temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
-
- if (break_state != 0)
- temp |= UARTCTRL_SBK;
+ temp = lpuart32_read(port, UARTCTRL);
- lpuart32_write(port, temp, UARTCTRL);
+ /*
+ * LPUART IP now has two known bugs, one is CTS has higher priority than the
+ * break signal, which causes the break signal sending through UARTCTRL_SBK
+ * may impacted by the CTS input if the HW flow control is enabled. It
+ * exists on all platforms we support in this driver.
+ * Another bug is i.MX8QM LPUART may have an additional break character
+ * being sent after SBK was cleared.
+ * To avoid above two bugs, we use Transmit Data Inversion function to send
+ * the break signal instead of UARTCTRL_SBK.
+ */
+ if (break_state != 0) {
+ /*
+ * Disable the transmitter to prevent any data from being sent out
+ * during break, then invert the TX line to send break.
+ */
+ temp &= ~UARTCTRL_TE;
+ lpuart32_write(port, temp, UARTCTRL);
+ temp |= UARTCTRL_TXINV;
+ lpuart32_write(port, temp, UARTCTRL);
+ } else {
+ /* Disable the TXINV to turn off break and re-enable transmitter. */
+ temp &= ~UARTCTRL_TXINV;
+ lpuart32_write(port, temp, UARTCTRL);
+ temp |= UARTCTRL_TE;
+ lpuart32_write(port, temp, UARTCTRL);
+ }
}
static void lpuart_setup_watermark(struct lpuart_port *sport)
@@ -1593,6 +1621,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
if (sport->lpuart_dma_rx_use) {
del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
+ sport->lpuart_dma_rx_use = false;
}
if (sport->lpuart_dma_tx_use) {
@@ -1601,6 +1630,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
sport->dma_tx_in_progress = false;
dmaengine_terminate_all(sport->dma_tx_chan);
}
+ sport->lpuart_dma_tx_use = false;
}
}
@@ -1980,8 +2010,15 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
- /* wait transmit engin complete */
- lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+ /*
+ * LPUART Transmission Complete Flag may never be set while queuing a break
+ * character, so skip waiting for transmission complete when UARTCTRL_SBK is
+ * asserted.
+ */
+ if (!(old_ctrl & UARTCTRL_SBK)) {
+ lpuart32_write(&sport->port, 0, UARTMODIR);
+ lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+ }
/* disable transmit and receive */
lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
@@ -2372,6 +2409,7 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 8b41a783b37e..80cb72350cea 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2277,7 +2277,7 @@ static int imx_uart_probe(struct platform_device *pdev)
/* For register access, we only need to enable the ipg clock. */
ret = clk_prepare_enable(sport->clk_ipg);
if (ret) {
- dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
+ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
return ret;
}
@@ -2551,6 +2551,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = {
.suspend_noirq = imx_uart_suspend_noirq,
.resume_noirq = imx_uart_resume_noirq,
.freeze_noirq = imx_uart_suspend_noirq,
+ .thaw_noirq = imx_uart_resume_noirq,
.restore_noirq = imx_uart_resume_noirq,
.suspend = imx_uart_suspend,
.resume = imx_uart_resume,
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 592e51d8944e..07e9be9865c7 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -212,7 +212,8 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
default:
- return -ENXIO;
+ rc = -ENXIO;
+ goto out_kfree_brd;
}
rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index fcbea43dc334..cf7fffe8396e 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -274,6 +274,7 @@ lqasc_err_int(int irq, void *_port)
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
spin_lock_irqsave(&ltq_port->lock, flags);
+ __raw_writel(ASC_IRNCR_EIR, port->membase + LTQ_ASC_IRNCR);
/* clear any pending interrupts */
asc_update_bits(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 5bf8dd6198bb..5570fd3b84e1 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -14,9 +14,10 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
@@ -72,7 +73,8 @@
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
-#define MAX310X_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
@@ -235,6 +237,14 @@
#define MAX310x_REV_MASK (0xf8)
#define MAX310X_WRITE_BIT 0x80
+/* Port startup definitions */
+#define MAX310X_PORT_STARTUP_WAIT_RETRIES 20 /* Number of retries */
+#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS 10 /* Delay between retries */
+
+/* Crystal-related definitions */
+#define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */
+#define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */
+
/* MAX3107 specific */
#define MAX3107_REV_ID (0xa0)
@@ -245,7 +255,17 @@
#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */
#define MAX14830_REV_ID (0xb0)
+struct max310x_if_cfg {
+ int (*extended_reg_enable)(struct device *dev, bool enable);
+
+ unsigned int rev_id_reg;
+};
+
struct max310x_devtype {
+ struct {
+ unsigned short min;
+ unsigned short max;
+ } slave_addr;
char name[9];
int nr;
u8 mode1;
@@ -258,16 +278,16 @@ struct max310x_one {
struct work_struct tx_work;
struct work_struct md_work;
struct work_struct rs_work;
+ struct regmap *regmap;
- u8 wr_header;
- u8 rd_header;
u8 rx_buf[MAX310X_FIFO_SIZE];
};
#define to_max310x_port(_port) \
container_of(_port, struct max310x_one, port)
struct max310x_port {
- struct max310x_devtype *devtype;
+ const struct max310x_devtype *devtype;
+ const struct max310x_if_cfg *if_cfg;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
@@ -289,26 +309,26 @@ static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
unsigned int val = 0;
- regmap_read(s->regmap, port->iobase + reg, &val);
+ regmap_read(one->regmap, reg, &val);
return val;
}
static void max310x_port_write(struct uart_port *port, u8 reg, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_write(s->regmap, port->iobase + reg, val);
+ regmap_write(one->regmap, reg, val);
}
static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_update_bits(s->regmap, port->iobase + reg, mask, val);
+ regmap_update_bits(one->regmap, reg, mask, val);
}
static int max3107_detect(struct device *dev)
@@ -357,13 +377,12 @@ static int max3109_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -388,13 +407,12 @@ static int max14830_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -419,6 +437,10 @@ static const struct max310x_devtype max3107_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
.detect = max3107_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x2c,
+ .max = 0x2f,
+ },
};
static const struct max310x_devtype max3108_devtype = {
@@ -427,6 +449,10 @@ static const struct max310x_devtype max3108_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3108_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max3109_devtype = {
@@ -435,6 +461,10 @@ static const struct max310x_devtype max3109_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3109_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max14830_devtype = {
@@ -443,11 +473,15 @@ static const struct max310x_devtype max14830_devtype = {
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
.detect = max14830_detect,
.power = max14830_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -464,7 +498,7 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
@@ -486,7 +520,7 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -499,6 +533,11 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
return false;
}
+static bool max310x_reg_noinc(struct device *dev, unsigned int reg)
+{
+ return reg == MAX310X_RHR_REG;
+}
+
static int max310x_set_baud(struct uart_port *port, int baud)
{
unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
@@ -552,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
return 1;
}
-static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
unsigned long freq, bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
@@ -610,45 +649,37 @@ static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
/* Wait for crystal */
if (xtal) {
- unsigned int val;
- msleep(10);
- regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
- if (!(val & MAX310X_STS_CLKREADY_BIT)) {
- dev_warn(dev, "clock is not stable yet\n");
- }
+ bool stable = false;
+ unsigned int try = 0, val = 0;
+
+ do {
+ msleep(MAX310X_XTAL_WAIT_DELAY_MS);
+ regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+
+ if (val & MAX310X_STS_CLKREADY_BIT)
+ stable = true;
+ } while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES));
+
+ if (!stable)
+ return dev_err_probe(dev, -EAGAIN,
+ "clock is not stable\n");
}
- return (int)bestfreq;
+ return bestfreq;
}
static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->wr_header,
- .len = sizeof(one->wr_header),
- }, {
- .tx_buf = txbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_noinc_write(one->regmap, MAX310X_THR_REG, txbuf, len);
}
static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->rd_header,
- .len = sizeof(one->rd_header),
- }, {
- .rx_buf = rxbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_noinc_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
}
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
@@ -1250,16 +1281,18 @@ static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
#endif
-static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
- struct regmap *regmap, int irq)
+static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype,
+ const struct max310x_if_cfg *if_cfg,
+ struct regmap *regmaps[], int irq)
{
- int i, ret, fmin, fmax, freq, uartclk;
- struct clk *clk_osc, *clk_xtal;
+ int i, ret, fmin, fmax, freq;
struct max310x_port *s;
- bool xtal = false;
+ s32 uartclk = 0;
+ bool xtal;
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ for (i = 0; i < devtype->nr; i++)
+ if (IS_ERR(regmaps[i]))
+ return PTR_ERR(regmaps[i]);
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
@@ -1268,23 +1301,20 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
return -ENOMEM;
}
- clk_osc = devm_clk_get(dev, "osc");
- clk_xtal = devm_clk_get(dev, "xtal");
- if (!IS_ERR(clk_osc)) {
- s->clk = clk_osc;
- fmin = 500000;
- fmax = 35000000;
- } else if (!IS_ERR(clk_xtal)) {
- s->clk = clk_xtal;
- fmin = 1000000;
- fmax = 4000000;
- xtal = true;
- } else if (PTR_ERR(clk_osc) == -EPROBE_DEFER ||
- PTR_ERR(clk_xtal) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
+ /* Always ask for fixed clock rate from a property. */
+ device_property_read_u32(dev, "clock-frequency", &uartclk);
+
+ s->clk = devm_clk_get_optional(dev, "osc");
+ if (IS_ERR(s->clk))
+ return PTR_ERR(s->clk);
+ if (s->clk) {
+ xtal = false;
} else {
- dev_err(dev, "Cannot get clock\n");
- return -EINVAL;
+ s->clk = devm_clk_get_optional(dev, "xtal");
+ if (IS_ERR(s->clk))
+ return PTR_ERR(s->clk);
+
+ xtal = true;
}
ret = clk_prepare_enable(s->clk);
@@ -1292,14 +1322,31 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
return ret;
freq = clk_get_rate(s->clk);
+ if (freq == 0)
+ freq = uartclk;
+ if (freq == 0) {
+ dev_err(dev, "Cannot get clock rate\n");
+ ret = -EINVAL;
+ goto out_clk;
+ }
+
+ if (xtal) {
+ fmin = 1000000;
+ fmax = 4000000;
+ } else {
+ fmin = 500000;
+ fmax = 35000000;
+ }
+
/* Check frequency limits */
if (freq < fmin || freq > fmax) {
ret = -ERANGE;
goto out_clk;
}
- s->regmap = regmap;
+ s->regmap = regmaps[0];
s->devtype = devtype;
+ s->if_cfg = if_cfg;
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
@@ -1308,25 +1355,38 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
- unsigned int offs = i << 5;
+ bool started = false;
+ unsigned int try = 0, val = 0;
/* Reset port */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs,
+ regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
/* Clear port reset */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs, 0);
+ regmap_write(regmaps[i], MAX310X_MODE2_REG, 0);
/* Wait for port startup */
do {
- regmap_read(s->regmap,
- MAX310X_BRGDIVLSB_REG + offs, &ret);
- } while (ret != 0x01);
+ msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS);
+ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val);
+
+ if (val == 0x01)
+ started = true;
+ } while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES));
+
+ if (!started) {
+ ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n");
+ goto out_uart;
+ }
- regmap_write(s->regmap, MAX310X_MODE1_REG + offs,
- devtype->mode1);
+ regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
+ if (uartclk < 0) {
+ ret = uartclk;
+ goto out_uart;
+ }
+
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
for (i = 0; i < devtype->nr; i++) {
@@ -1346,11 +1406,13 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
- s->p[i].port.iobase = i * 0x20;
+ s->p[i].port.iobase = i;
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
s->p[i].port.rs485_config = max310x_rs485_config;
s->p[i].port.ops = &max310x_ops;
+ s->p[i].regmap = regmaps[i];
+
/* Disable all interrupts */
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
/* Clear IRQ status register */
@@ -1361,10 +1423,6 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Initialize queue for changing RS485 mode */
INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
- /* Initialize SPI-transfer buffers */
- s->p[i].wr_header = (s->p[i].port.iobase + MAX310X_THR_REG) |
- MAX310X_WRITE_BIT;
- s->p[i].rd_header = (s->p[i].port.iobase + MAX310X_RHR_REG);
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
@@ -1402,7 +1460,7 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
if (!ret)
return 0;
- dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+ dev_err(dev, "Unable to request IRQ %i\n", irq);
out_uart:
for (i = 0; i < devtype->nr; i++) {
@@ -1451,16 +1509,35 @@ static struct regmap_config regcfg = {
.val_bits = 8,
.write_flag_mask = MAX310X_WRITE_BIT,
.cache_type = REGCACHE_RBTREE,
+ .max_register = MAX310X_REG_1F,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
+ .writeable_noinc_reg = max310x_reg_noinc,
+ .readable_noinc_reg = max310x_reg_noinc,
+ .max_raw_read = MAX310X_FIFO_SIZE,
+ .max_raw_write = MAX310X_FIFO_SIZE,
};
#ifdef CONFIG_SPI_MASTER
+static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+
+ return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
+ enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL);
+}
+
+static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
+ .extended_reg_enable = max310x_spi_extended_reg_enable,
+ .rev_id_reg = MAX310X_SPI_REVID_EXTREG,
+};
+
static int max310x_spi_probe(struct spi_device *spi)
{
- struct max310x_devtype *devtype;
- struct regmap *regmap;
+ const struct max310x_devtype *devtype;
+ struct regmap *regmaps[4];
+ unsigned int i;
int ret;
/* Setup SPI bus */
@@ -1471,23 +1548,18 @@ static int max310x_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- if (spi->dev.of_node) {
- const struct of_device_id *of_id =
- of_match_device(max310x_dt_ids, &spi->dev);
- if (!of_id)
- return -ENODEV;
+ devtype = device_get_match_data(&spi->dev);
+ if (!devtype)
+ devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
- devtype = (struct max310x_devtype *)of_id->data;
- } else {
- const struct spi_device_id *id_entry = spi_get_device_id(spi);
-
- devtype = (struct max310x_devtype *)id_entry->driver_data;
+ for (i = 0; i < devtype->nr; i++) {
+ u8 port_mask = i * 0x20;
+ regcfg.read_flag_mask = port_mask;
+ regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
+ regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
}
- regcfg.max_register = devtype->nr * 0x20 - 1;
- regmap = devm_regmap_init_spi(spi, &regcfg);
-
- return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
+ return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq);
}
static int max310x_spi_remove(struct spi_device *spi)
@@ -1507,7 +1579,7 @@ MODULE_DEVICE_TABLE(spi, max310x_id_table);
static struct spi_driver max310x_spi_driver = {
.driver = {
.name = MAX310X_NAME,
- .of_match_table = of_match_ptr(max310x_dt_ids),
+ .of_match_table = max310x_dt_ids,
.pm = &max310x_pm_ops,
},
.probe = max310x_spi_probe,
@@ -1516,6 +1588,101 @@ static struct spi_driver max310x_spi_driver = {
};
#endif
+#ifdef CONFIG_I2C
+static int max310x_i2c_extended_reg_enable(struct device *dev, bool enable)
+{
+ return 0;
+}
+
+static struct regmap_config regcfg_i2c = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = max310x_reg_writeable,
+ .volatile_reg = max310x_reg_volatile,
+ .precious_reg = max310x_reg_precious,
+ .max_register = MAX310X_I2C_REVID_EXTREG,
+ .writeable_noinc_reg = max310x_reg_noinc,
+ .readable_noinc_reg = max310x_reg_noinc,
+ .max_raw_read = MAX310X_FIFO_SIZE,
+ .max_raw_write = MAX310X_FIFO_SIZE,
+};
+
+static const struct max310x_if_cfg max310x_i2c_if_cfg = {
+ .extended_reg_enable = max310x_i2c_extended_reg_enable,
+ .rev_id_reg = MAX310X_I2C_REVID_EXTREG,
+};
+
+static unsigned short max310x_i2c_slave_addr(unsigned short addr,
+ unsigned int nr)
+{
+ /*
+ * For MAX14830 and MAX3109, the slave address depends on what the
+ * A0 and A1 pins are tied to.
+ * See Table I2C Address Map of the datasheet.
+ * Based on that table, the following formulas were determined.
+ * UART1 - UART0 = 0x10
+ * UART2 - UART1 = 0x20 + 0x10
+ * UART3 - UART2 = 0x10
+ */
+
+ addr -= nr * 0x10;
+
+ if (nr >= 2)
+ addr -= 0x20;
+
+ return addr;
+}
+
+static int max310x_i2c_probe(struct i2c_client *client)
+{
+ const struct max310x_devtype *devtype =
+ device_get_match_data(&client->dev);
+ struct i2c_client *port_client;
+ struct regmap *regmaps[4];
+ unsigned int i;
+ u8 port_addr;
+
+ if (client->addr < devtype->slave_addr.min ||
+ client->addr > devtype->slave_addr.max)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
+ client->addr, devtype->slave_addr.min,
+ devtype->slave_addr.max);
+
+ regmaps[0] = devm_regmap_init_i2c(client, &regcfg_i2c);
+
+ for (i = 1; i < devtype->nr; i++) {
+ port_addr = max310x_i2c_slave_addr(client->addr, i);
+ port_client = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ port_addr);
+
+ regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
+ }
+
+ return max310x_probe(&client->dev, devtype, &max310x_i2c_if_cfg,
+ regmaps, client->irq);
+}
+
+static int max310x_i2c_remove(struct i2c_client *client)
+{
+ max310x_remove(&client->dev);
+
+ return 0;
+}
+
+static struct i2c_driver max310x_i2c_driver = {
+ .driver = {
+ .name = MAX310X_NAME,
+ .of_match_table = max310x_dt_ids,
+ .pm = &max310x_pm_ops,
+ },
+ .probe_new = max310x_i2c_probe,
+ .remove = max310x_i2c_remove,
+};
+#endif
+
static int __init max310x_uart_init(void)
{
int ret;
@@ -1529,15 +1696,35 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
if (ret)
- uart_unregister_driver(&max310x_uart);
+ goto err_spi_register;
+#endif
+
+#ifdef CONFIG_I2C
+ ret = i2c_add_driver(&max310x_i2c_driver);
+ if (ret)
+ goto err_i2c_register;
+#endif
+
+ return 0;
+
+#ifdef CONFIG_I2C
+err_i2c_register:
+ spi_unregister_driver(&max310x_spi_driver);
#endif
+err_spi_register:
+ uart_unregister_driver(&max310x_uart);
+
return ret;
}
module_init(max310x_uart_init);
static void __exit max310x_uart_exit(void)
{
+#ifdef CONFIG_I2C
+ i2c_del_driver(&max310x_i2c_driver);
+#endif
+
#ifdef CONFIG_SPI_MASTER
spi_unregister_driver(&max310x_spi_driver);
#endif
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 849ce8c1ef39..adb0bbcecd24 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -371,10 +371,14 @@ static void meson_uart_set_termios(struct uart_port *port,
else
val |= AML_UART_STOP_BIT_1SB;
- if (cflags & CRTSCTS)
- val &= ~AML_UART_TWO_WIRE_EN;
- else
+ if (cflags & CRTSCTS) {
+ if (port->flags & UPF_HARD_FLOW)
+ val &= ~AML_UART_TWO_WIRE_EN;
+ else
+ termios->c_cflag &= ~CRTSCTS;
+ } else {
val |= AML_UART_TWO_WIRE_EN;
+ }
writel(val, port->membase + AML_UART_CONTROL);
@@ -665,15 +669,19 @@ static int meson_uart_probe_clocks(struct platform_device *pdev,
static int meson_uart_probe(struct platform_device *pdev)
{
- struct resource *res_mem, *res_irq;
+ struct resource *res_mem;
struct uart_port *port;
+ u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
int ret = 0;
- int id = -1;
+ int irq;
+ bool has_rtscts;
if (pdev->dev.of_node)
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
if (pdev->id < 0) {
+ int id;
+
for (id = AML_UART_PORT_OFFSET; id < AML_UART_PORT_NUM; id++) {
if (!meson_ports[id]) {
pdev->id = id;
@@ -689,9 +697,12 @@ static int meson_uart_probe(struct platform_device *pdev)
if (!res_mem)
return -ENODEV;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq)
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
+ has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts");
if (meson_ports[pdev->id]) {
dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
@@ -714,14 +725,16 @@ static int meson_uart_probe(struct platform_device *pdev)
port->iotype = UPIO_MEM;
port->mapbase = res_mem->start;
port->mapsize = resource_size(res_mem);
- port->irq = res_irq->start;
+ port->irq = irq;
port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
+ if (has_rtscts)
+ port->flags |= UPF_HARD_FLOW;
port->dev = &pdev->dev;
port->line = pdev->id;
port->type = PORT_MESON;
port->x_char = 0;
port->ops = &meson_uart_ops;
- port->fifosize = 64;
+ port->fifosize = fifosize;
meson_ports[pdev->id] = port;
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 2ce0d05be368..bc5f77ac538e 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -238,6 +238,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = 0;
+ int ret;
do {
if (status & STAT_RX_RDY(port)) {
@@ -250,6 +251,16 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
port->icount.parity++;
}
+ /*
+ * For UART2, error bits are not cleared on buffer read.
+ * This causes interrupt loop and system hang.
+ */
+ if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+ }
+
if (status & STAT_BRK_DET) {
port->icount.brk++;
status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 77f18445bb98..317067184bfa 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -718,6 +718,7 @@ static void pch_request_dma(struct uart_port *port)
if (!chan) {
dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n",
__func__);
+ pci_dev_put(dma_dev);
return;
}
priv->chan_tx = chan;
@@ -734,6 +735,7 @@ static void pch_request_dma(struct uart_port *port)
__func__);
dma_release_channel(priv->chan_tx);
priv->chan_tx = NULL;
+ pci_dev_put(dma_dev);
return;
}
@@ -741,6 +743,8 @@ static void pch_request_dma(struct uart_port *port)
priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize,
&priv->rx_buf_dma, GFP_KERNEL);
priv->chan_rx = chan;
+
+ pci_dev_put(dma_dev);
}
static void pch_dma_rx_complete(void *arg)
@@ -772,7 +776,7 @@ static void pch_dma_tx_complete(void *arg)
}
xmit->tail &= UART_XMIT_SIZE - 1;
async_tx_ack(priv->desc_tx);
- dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
+ dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE);
priv->tx_dma_use = 0;
priv->nent = 0;
priv->orig_nent = 0;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 1df74bad1063..78d97dbfc18a 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -800,11 +800,10 @@ static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
if ((ufstat & info->tx_fifomask) != 0 ||
(ufstat & info->tx_fifofull))
return 0;
-
- return 1;
+ return TIOCSER_TEMT;
}
- return s3c24xx_serial_txempty_nofifo(port);
+ return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0;
}
/* no modem control lines */
@@ -1199,8 +1198,12 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
continue;
rate = clk_get_rate(clk);
- if (!rate)
+ if (!rate) {
+ dev_err(ourport->port.dev,
+ "Failed to get clock rate for %s.\n", clkname);
+ clk_put(clk);
continue;
+ }
if (ourport->info->has_divslot) {
unsigned long div = rate / req_baud;
@@ -1226,10 +1229,18 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
calc_deviation = -calc_deviation;
if (calc_deviation < deviation) {
+ /*
+ * If we find a better clk, release the previous one, if
+ * any.
+ */
+ if (!IS_ERR(*best_clk))
+ clk_put(*best_clk);
*best_clk = clk;
best_quot = quot;
*clk_num = cnt;
deviation = calc_deviation;
+ } else {
+ clk_put(clk);
}
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 7d3ae31cc720..892e27cddb0f 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -24,6 +24,7 @@
#include <linux/tty_flip.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
+#include <linux/units.h>
#include <uapi/linux/sched/types.h>
#define SC16IS7XX_NAME "sc16is7xx"
@@ -694,6 +695,18 @@ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
case SC16IS7XX_IIR_RTOI_SRC:
case SC16IS7XX_IIR_XOFFI_SRC:
rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
+
+ /*
+ * There is a silicon bug that makes the chip report a
+ * time-out interrupt but no data in the FIFO. This is
+ * described in errata section 18.1.4.
+ *
+ * When this happens, read one byte from the FIFO to
+ * clear the interrupt.
+ */
+ if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
+ rxlen = 1;
+
if (rxlen)
sc16is7xx_handle_rx(port, rxlen, iir);
break;
@@ -1166,9 +1179,18 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
state |= BIT(offset);
else
state &= ~BIT(offset);
- sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
+
+ /*
+ * If we write IOSTATE first, and then IODIR, the output value is not
+ * transferred to the corresponding I/O pin.
+ * The datasheet states that each register bit will be transferred to
+ * the corresponding I/O pin programmed as output when writing to
+ * IOSTATE. Therefore, configure direction first with IODIR, and then
+ * set value after with IOSTATE.
+ */
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
BIT(offset));
+ sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
return 0;
}
@@ -1261,6 +1283,13 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.type = PORT_SC16IS7XX;
s->p[i].port.fifosize = SC16IS7XX_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
+ s->p[i].port.iobase = i;
+ /*
+ * Use all ones as membase to make sure uart_configure_port() in
+ * serial_core.c does not abort for SPI/I2C devices where the
+ * membase address is not applicable.
+ */
+ s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
@@ -1384,9 +1413,12 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
/* Setup SPI bus */
spi->bits_per_word = 8;
- /* only supports mode 0 on SC16IS762 */
+ /* For all variants, only mode 0 is supported */
+ if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
+ return dev_err_probe(&spi->dev, -EINVAL, "Unsupported SPI mode\n");
+
spi->mode = spi->mode ? : SPI_MODE_0;
- spi->max_speed_hz = spi->max_speed_hz ? : 15000000;
+ spi->max_speed_hz = spi->max_speed_hz ? : 4 * HZ_PER_MHZ;
ret = spi_setup(spi);
if (ret)
return ret;
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 74c21152367a..2b5d26df5fcf 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -141,6 +141,7 @@ struct tegra_uart_port {
int configured_rate;
bool use_rx_pio;
bool use_tx_pio;
+ bool rx_dma_active;
};
static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
@@ -519,7 +520,7 @@ static void tegra_uart_tx_dma_complete(void *args)
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
spin_lock_irqsave(&tup->uport.lock, flags);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
@@ -606,18 +607,18 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
static void tegra_uart_stop_tx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
- struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
unsigned int count;
if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
return;
- dmaengine_terminate_all(tup->tx_dma_chan);
+ dmaengine_pause(tup->tx_dma_chan);
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
+ dmaengine_terminate_all(tup->tx_dma_chan);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
}
@@ -693,11 +694,22 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
}
+static void do_handle_rx_pio(struct tegra_uart_port *tup)
+{
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &tup->uport.state->port;
+
+ tegra_uart_handle_rx_pio(tup, port);
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+}
+
static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
unsigned int residue)
{
struct tty_port *port = &tup->uport.state->port;
- struct tty_struct *tty = tty_port_tty_get(port);
unsigned int count;
async_tx_ack(tup->rx_dma_desc);
@@ -706,11 +718,7 @@ static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
/* If we are here, DMA is stopped */
tegra_uart_copy_rx_to_tty(tup, port, count);
- tegra_uart_handle_rx_pio(tup, port);
- if (tty) {
- tty_flip_buffer_push(port);
- tty_kref_put(tty);
- }
+ do_handle_rx_pio(tup);
}
static void tegra_uart_rx_dma_complete(void *args)
@@ -734,6 +742,7 @@ static void tegra_uart_rx_dma_complete(void *args)
if (tup->rts_active)
set_rts(tup, false);
+ tup->rx_dma_active = false;
tegra_uart_rx_buffer_push(tup, 0);
tegra_uart_start_rx_dma(tup);
@@ -745,18 +754,30 @@ done:
spin_unlock_irqrestore(&u->lock, flags);
}
-static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
{
struct dma_tx_state state;
+ if (!tup->rx_dma_active) {
+ do_handle_rx_pio(tup);
+ return;
+ }
+
+ dmaengine_pause(tup->rx_dma_chan);
+ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+ dmaengine_terminate_all(tup->rx_dma_chan);
+
+ tegra_uart_rx_buffer_push(tup, state.residue);
+ tup->rx_dma_active = false;
+}
+
+static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+{
/* Deactivate flow control to stop sender */
if (tup->rts_active)
set_rts(tup, false);
- dmaengine_terminate_all(tup->rx_dma_chan);
- dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
- tegra_uart_rx_buffer_push(tup, state.residue);
- tegra_uart_start_rx_dma(tup);
+ tegra_uart_terminate_rx_dma(tup);
if (tup->rts_active)
set_rts(tup, true);
@@ -766,6 +787,9 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
{
unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
+ if (tup->rx_dma_active)
+ return 0;
+
tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
@@ -774,6 +798,7 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
return -EIO;
}
+ tup->rx_dma_active = true;
tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
tup->rx_dma_desc->callback_param = tup;
dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
@@ -805,24 +830,13 @@ static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
}
-static void do_handle_rx_pio(struct tegra_uart_port *tup)
-{
- struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
- struct tty_port *port = &tup->uport.state->port;
-
- tegra_uart_handle_rx_pio(tup, port);
- if (tty) {
- tty_flip_buffer_push(port);
- tty_kref_put(tty);
- }
-}
-
static irqreturn_t tegra_uart_isr(int irq, void *data)
{
struct tegra_uart_port *tup = data;
struct uart_port *u = &tup->uport;
unsigned long iir;
unsigned long ier;
+ bool is_rx_start = false;
bool is_rx_int = false;
unsigned long flags;
@@ -835,10 +849,12 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
if (tup->rx_in_progress) {
ier = tup->ier_shadow;
ier |= (UART_IER_RLSI | UART_IER_RTOIE |
- TEGRA_UART_IER_EORD);
+ TEGRA_UART_IER_EORD | UART_IER_RDI);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
}
+ } else if (is_rx_start) {
+ tegra_uart_start_rx_dma(tup);
}
spin_unlock_irqrestore(&u->lock, flags);
return IRQ_HANDLED;
@@ -857,17 +873,23 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
case 4: /* End of data */
case 6: /* Rx timeout */
- case 2: /* Receive */
- if (!tup->use_rx_pio && !is_rx_int) {
- is_rx_int = true;
+ if (!tup->use_rx_pio) {
+ is_rx_int = tup->rx_in_progress;
/* Disable Rx interrupts */
ier = tup->ier_shadow;
- ier |= UART_IER_RDI;
- tegra_uart_write(tup, ier, UART_IER);
ier &= ~(UART_IER_RDI | UART_IER_RLSI |
UART_IER_RTOIE | TEGRA_UART_IER_EORD);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
+ break;
+ }
+ /* Fall through */
+ case 2: /* Receive */
+ if (!tup->use_rx_pio) {
+ is_rx_start = tup->rx_in_progress;
+ tup->ier_shadow &= ~UART_IER_RDI;
+ tegra_uart_write(tup, tup->ier_shadow,
+ UART_IER);
} else {
do_handle_rx_pio(tup);
}
@@ -889,7 +911,6 @@ static void tegra_uart_stop_rx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
struct tty_port *port = &tup->uport.state->port;
- struct dma_tx_state state;
unsigned long ier;
if (tup->rts_active)
@@ -906,13 +927,11 @@ static void tegra_uart_stop_rx(struct uart_port *u)
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
tup->rx_in_progress = 0;
- if (tup->rx_dma_chan && !tup->use_rx_pio) {
- dmaengine_terminate_all(tup->rx_dma_chan);
- dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
- tegra_uart_rx_buffer_push(tup, state.residue);
- } else {
+
+ if (!tup->use_rx_pio)
+ tegra_uart_terminate_rx_dma(tup);
+ else
tegra_uart_handle_rx_pio(tup, port);
- }
}
static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
@@ -981,7 +1000,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->ier_shadow = 0;
tup->current_baud = 0;
- clk_prepare_enable(tup->uart_clk);
+ ret = clk_prepare_enable(tup->uart_clk);
+ if (ret) {
+ dev_err(tup->uport.dev, "could not enable clk\n");
+ return ret;
+ }
/* Reset the UART controller to clear all previous status.*/
reset_control_assert(tup->rst);
@@ -1057,12 +1080,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
tup->fcr_shadow |= UART_FCR_DMA_SELECT;
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
-
- ret = tegra_uart_start_rx_dma(tup);
- if (ret < 0) {
- dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
- return ret;
- }
} else {
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
}
@@ -1072,10 +1089,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
* Enable IE_RXS for the receive status interrupts like line errros.
* Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
*
- * If using DMA mode, enable EORD instead of receive interrupt which
- * will interrupt after the UART is done with the receive instead of
- * the interrupt when the FIFO "threshold" is reached.
- *
* EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
* the DATA is sitting in the FIFO and couldn't be transferred to the
* DMA as the DMA size alignment (4 bytes) is not met. EORD will be
@@ -1086,11 +1099,14 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
* both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
* then the EORD.
*/
+ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+
+ /*
+ * If using DMA mode, enable EORD interrupt to notify about RX
+ * completion.
+ */
if (!tup->use_rx_pio)
- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE |
- TEGRA_UART_IER_EORD;
- else
- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+ tup->ier_shadow |= TEGRA_UART_IER_EORD;
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
return 0;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index c066bb7f07b0..95db67c07c34 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2925,6 +2925,13 @@ static int sci_init_single(struct platform_device *dev,
sci_port->irqs[i] = platform_get_irq(dev, i);
}
+ /*
+ * The fourth interrupt on SCI port is transmit end interrupt, so
+ * shuffle the interrupts.
+ */
+ if (p->type == PORT_SCI)
+ swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]);
+
/* The SCI generates several interrupts. They can be muxed together or
* connected to different interrupt lines. In the muxed case only one
* interrupt resource is specified as there is only one interrupt ID.
@@ -2990,7 +2997,7 @@ static int sci_init_single(struct platform_device *dev,
port->flags = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
port->fifosize = sci_port->params->fifosize;
- if (port->type == PORT_SCI) {
+ if (port->type == PORT_SCI && !dev->dev.of_node) {
if (sci_port->reg_size >= 0x20)
port->regshift = 2;
else
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 7015632c4990..e11756a8788b 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -821,7 +821,7 @@ static void sifive_serial_console_write(struct console *co, const char *s,
local_irq_restore(flags);
}
-static int __init sifive_serial_console_setup(struct console *co, char *options)
+static int sifive_serial_console_setup(struct console *co, char *options)
{
struct sifive_serial_port *ssp;
int baud = SIFIVE_DEFAULT_BAUD_RATE;
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 07573de70445..8b45b3ab6341 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -371,7 +371,7 @@ static void sprd_rx_free_buf(struct sprd_uart_port *sp)
if (sp->rx_dma.virt)
dma_free_coherent(sp->port.dev, SPRD_UART_RX_SIZE,
sp->rx_dma.virt, sp->rx_dma.phys_addr);
-
+ sp->rx_dma.virt = NULL;
}
static int sprd_rx_dma_config(struct uart_port *port, u32 burst)
@@ -1073,29 +1073,6 @@ static struct uart_driver sprd_uart_driver = {
.cons = SPRD_CONSOLE,
};
-static int sprd_probe_dt_alias(int index, struct device *dev)
-{
- struct device_node *np;
- int ret = index;
-
- if (!IS_ENABLED(CONFIG_OF))
- return ret;
-
- np = dev->of_node;
- if (!np)
- return ret;
-
- ret = of_alias_get_id(np, "serial");
- if (ret < 0)
- ret = index;
- else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) {
- dev_warn(dev, "requested serial port %d not available.\n", ret);
- ret = index;
- }
-
- return ret;
-}
-
static int sprd_remove(struct platform_device *dev)
{
struct sprd_uart_port *sup = platform_get_drvdata(dev);
@@ -1126,7 +1103,7 @@ static bool sprd_uart_is_console(struct uart_port *uport)
static int sprd_clk_init(struct uart_port *uport)
{
struct clk *clk_uart, *clk_parent;
- struct sprd_uart_port *u = sprd_port[uport->line];
+ struct sprd_uart_port *u = container_of(uport, struct sprd_uart_port, port);
clk_uart = devm_clk_get(uport->dev, "uart");
if (IS_ERR(clk_uart)) {
@@ -1169,25 +1146,22 @@ static int sprd_probe(struct platform_device *pdev)
{
struct resource *res;
struct uart_port *up;
+ struct sprd_uart_port *sport;
int irq;
int index;
int ret;
- for (index = 0; index < ARRAY_SIZE(sprd_port); index++)
- if (sprd_port[index] == NULL)
- break;
-
- if (index == ARRAY_SIZE(sprd_port))
- return -EBUSY;
-
- index = sprd_probe_dt_alias(index, &pdev->dev);
+ index = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (index < 0 || index >= UART_NR_MAX) {
+ dev_err(&pdev->dev, "got a wrong serial alias id %d\n", index);
+ return -EINVAL;
+ }
- sprd_port[index] = devm_kzalloc(&pdev->dev, sizeof(*sprd_port[index]),
- GFP_KERNEL);
- if (!sprd_port[index])
+ sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
+ if (!sport)
return -ENOMEM;
- up = &sprd_port[index]->port;
+ up = &sport->port;
up->dev = &pdev->dev;
up->line = index;
up->type = PORT_SPRD;
@@ -1217,7 +1191,7 @@ static int sprd_probe(struct platform_device *pdev)
* Allocate one dma buffer to prepare for receive transfer, in case
* memory allocation failure at runtime.
*/
- ret = sprd_rx_alloc_buf(sprd_port[index]);
+ ret = sprd_rx_alloc_buf(sport);
if (ret)
return ret;
@@ -1225,19 +1199,27 @@ static int sprd_probe(struct platform_device *pdev)
ret = uart_register_driver(&sprd_uart_driver);
if (ret < 0) {
pr_err("Failed to register SPRD-UART driver\n");
- return ret;
+ goto free_rx_buf;
}
}
+
sprd_ports_num++;
+ sprd_port[index] = sport;
ret = uart_add_one_port(&sprd_uart_driver, up);
- if (ret) {
- sprd_port[index] = NULL;
- sprd_remove(pdev);
- }
+ if (ret)
+ goto clean_port;
platform_set_drvdata(pdev, up);
+ return 0;
+
+clean_port:
+ sprd_port[index] = NULL;
+ if (--sprd_ports_num == 0)
+ uart_unregister_driver(&sprd_uart_driver);
+free_rx_buf:
+ sprd_rx_free_buf(sport);
return ret;
}
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 72131b5e132e..beca02c30498 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -1140,7 +1140,13 @@ static int __init sunsab_init(void)
}
}
- return platform_driver_register(&sab_driver);
+ err = platform_driver_register(&sab_driver);
+ if (err) {
+ kfree(sunsab_ports);
+ sunsab_ports = NULL;
+ }
+
+ return err;
}
static void __exit sunsab_exit(void)
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index aaf8748a6147..31ae705aa38b 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
break;
tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(port, count);
}
uart_write_wakeup(port);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 9a4049c894f7..82313ec6c411 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -27,7 +27,6 @@
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
#define ULITE_MINOR 187
-#define ULITE_NR_UARTS CONFIG_SERIAL_UARTLITE_NR_UARTS
/* ---------------------------------------------------------------------
* Register definitions
@@ -65,6 +64,7 @@ static struct uart_port *console_port;
struct uartlite_data {
const struct uartlite_reg_ops *reg_ops;
struct clk *clk;
+ int id;
struct uart_driver *ulite_uart_driver;
};
@@ -117,7 +117,6 @@ static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
pdata->reg_ops->out(val, port->membase + offset);
}
-static struct uart_port ulite_ports[ULITE_NR_UARTS];
/* ---------------------------------------------------------------------
* Core UART driver operations
@@ -537,18 +536,6 @@ static int ulite_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-static struct uart_driver ulite_uart_driver;
-
-static struct console ulite_console = {
- .name = ULITE_NAME,
- .write = ulite_console_write,
- .device = uart_console_device,
- .setup = ulite_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */
- .data = &ulite_uart_driver,
-};
-
static void early_uartlite_putc(struct uart_port *port, int c)
{
/*
@@ -592,18 +579,6 @@ OF_EARLYCON_DECLARE(uartlite_a, "xlnx,xps-uartlite-1.00.a", early_uartlite_setup
#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
-static struct uart_driver ulite_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "uartlite",
- .dev_name = ULITE_NAME,
- .major = ULITE_MAJOR,
- .minor = ULITE_MINOR,
- .nr = ULITE_NR_UARTS,
-#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
- .cons = &ulite_console,
-#endif
-};
-
/* ---------------------------------------------------------------------
* Port assignment functions (mapping devices to uart_port structures)
*/
@@ -624,24 +599,9 @@ static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq,
struct uart_port *port;
int rc;
- /* if id = -1; then scan for a free id and use that */
- if (id < 0) {
- for (id = 0; id < ULITE_NR_UARTS; id++)
- if (ulite_ports[id].mapbase == 0)
- break;
- }
- if (id < 0 || id >= ULITE_NR_UARTS) {
- dev_err(dev, "%s%i too large\n", ULITE_NAME, id);
- return -EINVAL;
- }
-
- if ((ulite_ports[id].mapbase) && (ulite_ports[id].mapbase != base)) {
- dev_err(dev, "cannot assign to %s%i; it is already in use\n",
- ULITE_NAME, id);
- return -EBUSY;
- }
-
- port = &ulite_ports[id];
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
spin_lock_init(&port->lock);
port->fifosize = 16;
@@ -655,13 +615,23 @@ static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq,
port->flags = UPF_BOOT_AUTOCONF;
port->dev = dev;
port->type = PORT_UNKNOWN;
- port->line = id;
port->private_data = pdata;
dev_set_drvdata(dev, port);
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ /*
+ * If console hasn't been found yet try to assign this port
+ * because it is required to be assigned for console setup function.
+ * If register_console() don't assign value, then console_port pointer
+ * is cleanup.
+ */
+ if (!console_port)
+ console_port = port;
+#endif
+
/* Register the port */
- rc = uart_add_one_port(&ulite_uart_driver, port);
+ rc = uart_add_one_port(pdata->ulite_uart_driver, port);
if (rc) {
dev_err(dev, "uart_add_one_port() failed; err=%i\n", rc);
port->mapbase = 0;
@@ -669,6 +639,13 @@ static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq,
return rc;
}
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ /* This is not port which is used for console that's why clean it up */
+ if (console_port == port &&
+ !(pdata->ulite_uart_driver->cons->flags & CON_ENABLED))
+ console_port = NULL;
+#endif
+
return 0;
}
@@ -767,11 +744,24 @@ static const struct of_device_id ulite_of_match[] = {
MODULE_DEVICE_TABLE(of, ulite_of_match);
#endif /* CONFIG_OF */
-static int ulite_probe(struct platform_device *pdev)
+/*
+ * Maximum number of instances without alias IDs but if there is alias
+ * which target "< MAX_UART_INSTANCES" range this ID can't be used.
+ */
+#define MAX_UART_INSTANCES 256
+
+/* Stores static aliases list */
+static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES);
+static int alias_bitmap_initialized;
+
+/* Stores actual bitmap of allocated IDs with alias IDs together */
+static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES);
+/* Protect bitmap operations to have unique IDs */
+static DEFINE_MUTEX(bitmap_lock);
+
+static int ulite_get_id(struct platform_device *pdev)
{
- struct resource *res;
- struct uartlite_data *pdata;
- int irq, ret;
+ int ret;
int id = pdev->id;
#ifdef CONFIG_OF
const __be32 *prop;
@@ -780,39 +770,158 @@ static int ulite_probe(struct platform_device *pdev)
if (prop)
id = be32_to_cpup(prop);
#endif
- if (id < 0) {
- /* Look for a serialN alias */
- id = of_alias_get_id(pdev->dev.of_node, "serial");
- if (id < 0)
- id = 0;
- }
- if (!ulite_uart_driver.state) {
- dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
- ret = uart_register_driver(&ulite_uart_driver);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register driver\n");
+ mutex_lock(&bitmap_lock);
+
+ /* Alias list is stable that's why get alias bitmap only once */
+ if (!alias_bitmap_initialized) {
+ ret = of_alias_get_alias_list(of_match_ptr(ulite_of_match), "serial",
+ alias_bitmap, MAX_UART_INSTANCES);
+ if (ret) {
+ mutex_unlock(&bitmap_lock);
return ret;
}
+
+ alias_bitmap_initialized++;
}
+ /* Make sure that alias ID is not taken by instance without alias */
+ bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES);
+
+ dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n",
+ MAX_UART_INSTANCES, bitmap);
+
+ /* Look for a serialN alias */
+ if (id < 0)
+ id = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (id < 0) {
+ dev_warn(&pdev->dev,
+ "No serial alias passed. Using the first free id\n");
+
+ /*
+ * Start with id 0 and check if there is no serial0 alias
+ * which points to device which is compatible with this driver.
+ * If alias exists then try next free position.
+ */
+ id = 0;
+
+ for (;;) {
+ dev_info(&pdev->dev, "Checking id %d\n", id);
+ id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id);
+
+ /* No free empty instance */
+ if (id == MAX_UART_INSTANCES) {
+ dev_err(&pdev->dev, "No free ID\n");
+ mutex_unlock(&bitmap_lock);
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "The empty id is %d\n", id);
+ /* Check if ID is empty */
+ if (!test_and_set_bit(id, bitmap)) {
+ /* Break the loop if bit is taken */
+ dev_dbg(&pdev->dev,
+ "Selected ID %d allocation passed\n",
+ id);
+ break;
+ }
+ dev_dbg(&pdev->dev,
+ "Selected ID %d allocation failed\n", id);
+ /* if taking bit fails then try next one */
+ id++;
+ }
+ }
+
+ mutex_unlock(&bitmap_lock);
+
+ return id;
+}
+
+static int ulite_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct uartlite_data *pdata;
+ int irq, ret;
+ struct uart_driver *ulite_uart_driver;
+ char *driver_name;
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ struct console *ulite_console;
+#endif
+
pdata = devm_kzalloc(&pdev->dev, sizeof(struct uartlite_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
+ ulite_uart_driver = devm_kzalloc(&pdev->dev,
+ sizeof(*ulite_uart_driver),
+ GFP_KERNEL);
+ if (!ulite_uart_driver)
+ return -ENOMEM;
+
+ pdata->id = ulite_get_id(pdev);
+ if (pdata->id < 0)
+ return pdata->id;
+
+ /* There is a need to use unique driver name */
+ driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d",
+ ULITE_NAME, pdata->id);
+ if (!driver_name) {
+ ret = -ENOMEM;
+ goto err_out_id;
+ }
+
+ ulite_uart_driver->owner = THIS_MODULE;
+ ulite_uart_driver->driver_name = driver_name;
+ ulite_uart_driver->dev_name = ULITE_NAME;
+ ulite_uart_driver->major = ULITE_MAJOR;
+ ulite_uart_driver->minor = pdata->id;
+ ulite_uart_driver->nr = 1;
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ ulite_console = devm_kzalloc(&pdev->dev, sizeof(*ulite_console),
+ GFP_KERNEL);
+ if (!ulite_console) {
+ ret = -ENOMEM;
+ goto err_out_id;
+ }
+
+ strncpy(ulite_console->name, ULITE_NAME,
+ sizeof(ulite_console->name));
+ ulite_console->index = pdata->id;
+ ulite_console->write = ulite_console_write;
+ ulite_console->device = uart_console_device;
+ ulite_console->setup = ulite_console_setup;
+ ulite_console->flags = CON_PRINTBUFFER;
+ ulite_uart_driver->cons = ulite_console;
+ ulite_console->data = ulite_uart_driver;
+#endif
+
+ dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
+ ret = uart_register_driver(ulite_uart_driver);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register driver\n");
+ goto err_out_id;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+ if (!res) {
+ ret = -ENODEV;
+ goto err_out_unregister_driver;
+ }
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
+ if (irq <= 0) {
+ ret = -ENXIO;
+ goto err_out_unregister_driver;
+ }
pdata->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
if (IS_ERR(pdata->clk)) {
- if (PTR_ERR(pdata->clk) != -ENOENT)
- return PTR_ERR(pdata->clk);
+ if (PTR_ERR(pdata->clk) != -ENOENT) {
+ ret = PTR_ERR(pdata->clk);
+ goto err_out_unregister_driver;
+ }
/*
* Clock framework support is optional, continue on
@@ -821,11 +930,10 @@ static int ulite_probe(struct platform_device *pdev)
pdata->clk = NULL;
}
- pdata->ulite_uart_driver = &ulite_uart_driver;
ret = clk_prepare_enable(pdata->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare clock\n");
- return ret;
+ goto err_out_unregister_driver;
}
pm_runtime_use_autosuspend(&pdev->dev);
@@ -833,11 +941,27 @@ static int ulite_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
+ ulite_uart_driver->tty_driver->name_base = pdata->id;
+ pdata->ulite_uart_driver = ulite_uart_driver;
+ ret = ulite_assign(&pdev->dev, pdata->id, res->start, irq, pdata);
+ if (ret < 0)
+ goto err_out_clk_disable;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
+ return 0;
+err_out_clk_disable:
+ clk_disable_unprepare(pdata->clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+err_out_unregister_driver:
+ uart_unregister_driver(ulite_uart_driver);
+err_out_id:
+ mutex_lock(&bitmap_lock);
+ clear_bit(pdata->id, bitmap);
+ mutex_unlock(&bitmap_lock);
return ret;
}
@@ -849,6 +973,16 @@ static int ulite_remove(struct platform_device *pdev)
clk_unprepare(pdata->clk);
rc = ulite_release(&pdev->dev);
+ mutex_lock(&bitmap_lock);
+ clear_bit(pdata->id, bitmap);
+ mutex_unlock(&bitmap_lock);
+
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ if (console_port == port)
+ console_port = NULL;
+#endif
+
+ uart_unregister_driver(pdata->ulite_uart_driver);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -874,7 +1008,6 @@ static struct platform_driver ulite_platform_driver = {
static int __init ulite_init(void)
{
-
pr_debug("uartlite: calling platform_driver_register()\n");
return platform_driver_register(&ulite_platform_driver);
}
@@ -882,8 +1015,6 @@ static int __init ulite_init(void)
static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
- if (ulite_uart_driver.state)
- uart_unregister_driver(&ulite_uart_driver);
}
module_init(ulite_init);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index a0555ae2b1ef..181d55e0c60f 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1141,6 +1141,8 @@ static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
/* No compatible property, so try the name. */
soc_string = np->name;
+ of_node_put(np);
+
/* Extract the SOC number from the "PowerPC," string */
if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
return 0;
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index a1409251fbcc..32f693394000 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -368,7 +368,11 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
cdns_uart_handle_tx(dev_id);
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
/*
* Skip RX processing if RX is disabled as RXEMPTY will never be set
* as read bytes will not be removed from the FIFO.
@@ -1130,6 +1134,13 @@ static struct uart_driver cdns_uart_uart_driver;
*/
static void cdns_uart_console_putchar(struct uart_port *port, int ch)
{
+ unsigned int ctrl_reg;
+
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+ while (ctrl_reg & CDNS_UART_CR_TX_DIS) {
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+ cpu_relax();
+ }
while (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)
cpu_relax();
writel(ch, port->membase + CDNS_UART_FIFO);
@@ -1552,6 +1563,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
port->private_data = cdns_uart_data;
+ port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
+ CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index ddfe873b5fcc..bb14bd4f6e55 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -876,13 +876,13 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
return i;
}
-static void tty_write_unlock(struct tty_struct *tty)
+void tty_write_unlock(struct tty_struct *tty)
{
mutex_unlock(&tty->atomic_write_lock);
wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
}
-static int tty_write_lock(struct tty_struct *tty, int ndelay)
+int tty_write_lock(struct tty_struct *tty, int ndelay)
{
if (!mutex_trylock(&tty->atomic_write_lock)) {
if (ndelay)
@@ -1156,14 +1156,16 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
{
struct tty_struct *tty;
- if (driver->ops->lookup)
+ if (driver->ops->lookup) {
if (!file)
tty = ERR_PTR(-EIO);
else
tty = driver->ops->lookup(driver, file, idx);
- else
+ } else {
+ if (idx >= driver->num)
+ return ERR_PTR(-EINVAL);
tty = driver->ttys[idx];
-
+ }
if (!IS_ERR(tty))
tty_kref_get(tty);
return tty;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 9245fffdbceb..5d833f0b412d 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -397,21 +397,42 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios);
tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios);
- ld = tty_ldisc_ref(tty);
+ if (opt & (TERMIOS_FLUSH|TERMIOS_WAIT)) {
+retry_write_wait:
+ retval = wait_event_interruptible(tty->write_wait, !tty_chars_in_buffer(tty));
+ if (retval < 0)
+ return retval;
- if (ld != NULL) {
- if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
- ld->ops->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
+ if (tty_write_lock(tty, 0) < 0)
+ goto retry_write_wait;
- if (opt & TERMIOS_WAIT) {
- tty_wait_until_sent(tty, 0);
- if (signal_pending(current))
- return -ERESTARTSYS;
- }
+ /* Racing writer? */
+ if (tty_chars_in_buffer(tty)) {
+ tty_write_unlock(tty);
+ goto retry_write_wait;
+ }
+
+ ld = tty_ldisc_ref(tty);
+ if (ld != NULL) {
+ if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+
+ if ((opt & TERMIOS_WAIT) && tty->ops->wait_until_sent) {
+ tty->ops->wait_until_sent(tty, 0);
+ if (signal_pending(current)) {
+ tty_write_unlock(tty);
+ return -ERESTARTSYS;
+ }
+ }
+
+ tty_set_termios(tty, &tmp_termios);
- tty_set_termios(tty, &tmp_termios);
+ tty_write_unlock(tty);
+ } else {
+ tty_set_termios(tty, &tmp_termios);
+ }
/* FIXME: Arguably if tmp_termios == tty->termios AND the
actual requested termios was not tmp_termios then we may
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index 813be2c05262..c4bf741533ab 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -290,12 +290,7 @@ void disassociate_ctty(int on_exit)
return;
}
- spin_lock_irq(&current->sighand->siglock);
- put_pid(current->signal->tty_old_pgrp);
- current->signal->tty_old_pgrp = NULL;
- tty = tty_kref_get(current->signal->tty);
- spin_unlock_irq(&current->sighand->siglock);
-
+ tty = get_current_tty();
if (tty) {
unsigned long flags;
@@ -310,6 +305,16 @@ void disassociate_ctty(int on_exit)
tty_kref_put(tty);
}
+ /* If tty->ctrl.pgrp is not NULL, it may be assigned to
+ * current->signal->tty_old_pgrp in a race condition, and
+ * cause pid memleak. Release current->signal->tty_old_pgrp
+ * after tty->ctrl.pgrp set to NULL.
+ */
+ spin_lock_irq(&current->sighand->siglock);
+ put_pid(current->signal->tty_old_pgrp);
+ current->signal->tty_old_pgrp = NULL;
+ spin_unlock_irq(&current->sighand->siglock);
+
/* Now clear signal->tty under the lock */
read_lock(&tasklist_lock);
session_clear_tty(task_session(current));
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index 9ffd42e333b8..6b2d35ac6e3b 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -587,18 +587,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
return -ENOMEM;
name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
+ if (!name) {
+ rv = -ENOMEM;
+ goto free_port;
+ }
rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
ARRAY_SIZE(vcc_versions), NULL, name);
if (rv)
- goto free_port;
+ goto free_name;
port->vio.debug = vcc_dbg_vio;
vcc_ldc_cfg.debug = vcc_dbg_ldc;
rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
if (rv)
- goto free_port;
+ goto free_name;
spin_lock_init(&port->lock);
@@ -632,6 +636,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto unreg_tty;
}
port->domain = kstrdup(domain, GFP_KERNEL);
+ if (!port->domain) {
+ rv = -ENOMEM;
+ goto unreg_tty;
+ }
+
mdesc_release(hp);
@@ -661,8 +670,9 @@ free_table:
vcc_table_remove(port->index);
free_ldc:
vio_ldc_free(&port->vio);
-free_port:
+free_name:
kfree(name);
+free_port:
kfree(port);
return rv;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 68643f61f6f9..0da9e0ab045b 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1241,7 +1241,7 @@ static void kbd_bh(unsigned long dummy)
}
}
-DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0);
+DECLARE_TASKLET_DISABLED_OLD(keyboard_tasklet, kbd_bh);
#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\
defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 778f83ea2249..a6813e3393ec 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -200,39 +200,47 @@ vcs_vc(struct inode *inode, int *viewed)
return vc_cons[currcons].d;
}
-/*
- * Returns size for VC carried by inode.
+/**
+ * vcs_size -- return size for a VC in @vc
+ * @vc: which VC
+ * @attr: does it use attributes?
+ * @unicode: is it unicode?
+ *
* Must be called with console_lock.
*/
-static int
-vcs_size(struct inode *inode)
+static int vcs_size(const struct vc_data *vc, bool attr, bool unicode)
{
int size;
- struct vc_data *vc;
WARN_CONSOLE_UNLOCKED();
- vc = vcs_vc(inode, NULL);
- if (!vc)
- return -ENXIO;
-
size = vc->vc_rows * vc->vc_cols;
- if (use_attributes(inode)) {
- if (use_unicode(inode))
+ if (attr) {
+ if (unicode)
return -EOPNOTSUPP;
- size = 2*size + HEADER_SIZE;
- } else if (use_unicode(inode))
+
+ size = 2 * size + HEADER_SIZE;
+ } else if (unicode)
size *= 4;
+
return size;
}
static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
{
+ struct inode *inode = file_inode(file);
+ struct vc_data *vc;
int size;
console_lock();
- size = vcs_size(file_inode(file));
+ vc = vcs_vc(inode, NULL);
+ if (!vc) {
+ console_unlock();
+ return -ENXIO;
+ }
+
+ size = vcs_size(vc, use_attributes(inode), use_unicode(inode));
console_unlock();
if (size < 0)
return size;
@@ -265,10 +273,6 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
uni_mode = use_unicode(inode);
attr = use_attributes(inode);
- ret = -ENXIO;
- vc = vcs_vc(inode, &viewed);
- if (!vc)
- goto unlock_out;
ret = -EINVAL;
if (pos < 0)
@@ -288,16 +292,20 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
ssize_t orig_count;
long p = pos;
+ vc = vcs_vc(inode, &viewed);
+ if (!vc) {
+ ret = -ENXIO;
+ break;
+ }
+
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
* could sleep.
*/
- size = vcs_size(inode);
+ size = vcs_size(vc, attr, uni_mode);
if (size < 0) {
- if (read)
- break;
ret = size;
- goto unlock_out;
+ break;
}
if (pos >= size)
break;
@@ -476,7 +484,11 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
if (!vc)
goto unlock_out;
- size = vcs_size(inode);
+ size = vcs_size(vc, attr, false);
+ if (size < 0) {
+ ret = size;
+ goto unlock_out;
+ }
ret = -EINVAL;
if (pos < 0 || pos > size)
goto unlock_out;
@@ -511,11 +523,18 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
}
}
- /* The vcs_size might have changed while we slept to grab
- * the user buffer, so recheck.
+ /* The vc might have been freed or vcs_size might have changed
+ * while we slept to grab the user buffer, so recheck.
* Return data written up to now on failure.
*/
- size = vcs_size(inode);
+ vc = vcs_vc(inode, &viewed);
+ if (!vc) {
+ if (written)
+ break;
+ ret = -ENXIO;
+ goto unlock_out;
+ }
+ size = vcs_size(vc, attr, false);
if (size < 0) {
if (written)
break;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 9d9e056f94ac..171e643cf200 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -351,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
- p = vmalloc(memsize);
+ p = vzalloc(memsize);
if (!p)
return NULL;
@@ -2508,7 +2508,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
}
return;
case EScsiignore:
- if (c >= 20 && c <= 0x3f)
+ if (c >= 0x20 && c <= 0x3f)
return;
vc->vc_state = ESnormal;
return;
@@ -4587,9 +4587,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
- else if (vc->vc_sw->con_font_set)
+ else if (vc->vc_sw->con_font_set) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
- else
+ } else
rc = -ENOSYS;
console_unlock();
kfree(font.data);
@@ -4616,9 +4618,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
console_unlock();
return -EINVAL;
}
- if (vc->vc_sw->con_font_default)
+ if (vc->vc_sw->con_font_default) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_default(vc, &font, s);
- else
+ } else
rc = -ENOSYS;
console_unlock();
if (!rc) {
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 202ee81cfc2b..bae8e2904c56 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -2,6 +2,7 @@
menuconfig UIO
tristate "Userspace I/O drivers"
depends on MMU
+ select DMA_SHARED_BUFFER
help
Enable this to allow the userspace driver core code to be
built. This code allows userspace programs easy access to
@@ -165,4 +166,27 @@ config UIO_HV_GENERIC
to network and storage devices from userspace.
If you compile this as a module, it will be called uio_hv_generic.
+
+config UIO_XILINX_APM
+ tristate "Xilinx AXI Performance Monitor driver"
+ depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP
+ help
+ This driver is developed for AXI Performance Monitor IP, designed to
+ monitor AXI4 traffic for performance analysis of AXI bus in the
+ system. Driver maps HW registers and parameters to userspace.
+
+ To compile this driver as a module, choose M here; the module
+ will be called uio_xilinx_apm.
+
+config UIO_XILINX_AI_ENGINE
+ tristate "Xilinx AI Engine driver"
+ select IRQ_SIM
+ select UIO_DMEM_GENIRQ
+ select UIO_PDRV_GENIRQ
+ help
+ The driver for Xilinx AI Engine that utilizes the uio_dmem_genirq.
+ The userspace library will use this to interact with the AI Engine
+ hardware, as well as for the memory allocation.
+ Say 'y' only for platforms with the AI Engine IP.
+
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index c285dd2a4539..622cb4404d1d 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+uio-y := uio_core.o uio_dmabuf.o
+
obj-$(CONFIG_UIO) += uio.o
obj-$(CONFIG_UIO_CIF) += uio_cif.o
obj-$(CONFIG_UIO_PDRV_GENIRQ) += uio_pdrv_genirq.o
@@ -9,5 +11,7 @@ obj-$(CONFIG_UIO_PCI_GENERIC) += uio_pci_generic.o
obj-$(CONFIG_UIO_NETX) += uio_netx.o
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
+obj-$(CONFIG_UIO_XILINX_APM) += uio_xilinx_apm.o
obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o
+obj-$(CONFIG_UIO_XILINX_AI_ENGINE) += uio_xilinx_ai_engine.o
diff --git a/drivers/uio/uio.c b/drivers/uio/uio_core.c
index d91e051d1367..4d926429cbab 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio_core.c
@@ -24,6 +24,12 @@
#include <linux/kobject.h>
#include <linux/cdev.h>
#include <linux/uio_driver.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <uapi/linux/uio/uio.h>
+
+#include "uio_dmabuf.h"
#define UIO_MAX_DEVICES (1U << MINORBITS)
@@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
struct uio_listener {
struct uio_device *dev;
s32 event_count;
+ struct list_head dbufs;
+ struct mutex dbufs_lock; /* protect @dbufs */
};
static int uio_open(struct inode *inode, struct file *filep)
@@ -464,13 +472,13 @@ static int uio_open(struct inode *inode, struct file *filep)
mutex_lock(&minor_lock);
idev = idr_find(&uio_idr, iminor(inode));
- mutex_unlock(&minor_lock);
if (!idev) {
ret = -ENODEV;
+ mutex_unlock(&minor_lock);
goto out;
}
-
get_device(&idev->dev);
+ mutex_unlock(&minor_lock);
if (!try_module_get(idev->owner)) {
ret = -ENODEV;
@@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
if (ret)
goto err_infoopen;
+ INIT_LIST_HEAD(&listener->dbufs);
+ mutex_init(&listener->dbufs_lock);
+
return 0;
err_infoopen:
@@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
+ ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
+ if (ret)
+ dev_err(&idev->dev, "failed to clean up the dma bufs\n");
+
mutex_lock(&idev->info_lock);
if (idev->info && idev->info->release)
ret = idev->info->release(idev->info, inode);
@@ -652,6 +667,33 @@ out:
return retval ? retval : sizeof(s32);
}
+static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct uio_listener *listener = filep->private_data;
+ struct uio_device *idev = listener->dev;
+ long ret;
+
+ if (!idev->info)
+ return -EIO;
+
+ switch (cmd) {
+ case UIO_IOC_MAP_DMABUF:
+ ret = uio_dmabuf_map(idev, &listener->dbufs,
+ &listener->dbufs_lock, (void __user *)arg);
+ break;
+ case UIO_IOC_UNMAP_DMABUF:
+ ret = uio_dmabuf_unmap(idev, &listener->dbufs,
+ &listener->dbufs_lock,
+ (void __user *)arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int uio_find_mem_index(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
@@ -821,6 +863,7 @@ static const struct file_operations uio_fops = {
.write = uio_write,
.mmap = uio_mmap,
.poll = uio_poll,
+ .unlocked_ioctl = uio_ioctl,
.fasync = uio_fasync,
.llseek = noop_llseek,
};
@@ -1024,9 +1067,8 @@ void uio_unregister_device(struct uio_info *info)
wake_up_interruptible(&idev->wait);
kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
- device_unregister(&idev->dev);
-
uio_free_minor(minor);
+ device_unregister(&idev->dev);
return;
}
diff --git a/drivers/uio/uio_dmabuf.c b/drivers/uio/uio_dmabuf.c
new file mode 100644
index 000000000000..b18f1469f6c8
--- /dev/null
+++ b/drivers/uio/uio_dmabuf.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * DMA buf support for UIO device
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+#include <linux/slab.h>
+
+#include <uapi/linux/uio/uio.h>
+
+#include "uio_dmabuf.h"
+
+struct uio_dmabuf_mem {
+ int dbuf_fd;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ struct list_head list;
+};
+
+long uio_dmabuf_map(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args)
+{
+ struct uio_dmabuf_args args;
+ struct uio_dmabuf_mem *dbuf_mem;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ enum dma_data_direction dir;
+ struct sg_table *sgt;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args))) {
+ ret = -EFAULT;
+ dev_err(dev->dev.parent, "failed to copy from user\n");
+ goto err;
+ }
+
+ dbuf = dma_buf_get(args.dbuf_fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(dev->dev.parent, "failed to get dmabuf\n");
+ return PTR_ERR(dbuf);
+ }
+
+ dbuf_attach = dma_buf_attach(dbuf, dev->dev.parent);
+ if (IS_ERR(dbuf_attach)) {
+ dev_err(dev->dev.parent, "failed to attach dmabuf\n");
+ ret = PTR_ERR(dbuf_attach);
+ goto err_put;
+ }
+
+ switch (args.dir) {
+ case UIO_DMABUF_DIR_BIDIR:
+ dir = DMA_BIDIRECTIONAL;
+ break;
+ case UIO_DMABUF_DIR_TO_DEV:
+ dir = DMA_TO_DEVICE;
+ break;
+ case UIO_DMABUF_DIR_FROM_DEV:
+ dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ /* Not needed with check. Just here for any future change */
+ dev_err(dev->dev.parent, "invalid direction\n");
+ ret = -EINVAL;
+ goto err_detach;
+ }
+
+ sgt = dma_buf_map_attachment(dbuf_attach, dir);
+ if (IS_ERR(sgt)) {
+ dev_err(dev->dev.parent, "failed to get dmabuf scatterlist\n");
+ ret = PTR_ERR(sgt);
+ goto err_detach;
+ }
+
+ /* Accept only contiguous one */
+ if (sgt->nents != 1) {
+ dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (!sg_dma_len(s))
+ continue;
+
+ if (sg_dma_address(s) != next_addr) {
+ dev_err(dev->dev.parent,
+ "dmabuf not contiguous\n");
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+
+ next_addr = sg_dma_address(s) + sg_dma_len(s);
+ }
+ }
+
+ dbuf_mem = kzalloc(sizeof(*dbuf_mem), GFP_KERNEL);
+ if (!dbuf_mem) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ dbuf_mem->dbuf_fd = args.dbuf_fd;
+ dbuf_mem->dbuf = dbuf;
+ dbuf_mem->dbuf_attach = dbuf_attach;
+ dbuf_mem->sgt = sgt;
+ dbuf_mem->dir = dir;
+ args.dma_addr = sg_dma_address(sgt->sgl);
+ args.size = dbuf->size;
+
+ if (copy_to_user(user_args, &args, sizeof(args))) {
+ ret = -EFAULT;
+ dev_err(dev->dev.parent, "failed to copy to user\n");
+ goto err_free;
+ }
+
+ mutex_lock(dbufs_lock);
+ list_add(&dbuf_mem->list, dbufs);
+ mutex_unlock(dbufs_lock);
+
+ return 0;
+
+err_free:
+ kfree(dbuf_mem);
+err_unmap:
+ dma_buf_unmap_attachment(dbuf_attach, sgt, dir);
+err_detach:
+ dma_buf_detach(dbuf, dbuf_attach);
+err_put:
+ dma_buf_put(dbuf);
+err:
+ return ret;
+}
+
+long uio_dmabuf_unmap(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args)
+
+{
+ struct uio_dmabuf_args args;
+ struct uio_dmabuf_mem *dbuf_mem;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ mutex_lock(dbufs_lock);
+ list_for_each_entry(dbuf_mem, dbufs, list) {
+ if (dbuf_mem->dbuf_fd == args.dbuf_fd)
+ break;
+ }
+
+ if (dbuf_mem->dbuf_fd != args.dbuf_fd) {
+ dev_err(dev->dev.parent, "failed to find the dmabuf (%d)\n",
+ args.dbuf_fd);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ list_del(&dbuf_mem->list);
+ mutex_unlock(dbufs_lock);
+
+ dma_buf_unmap_attachment(dbuf_mem->dbuf_attach, dbuf_mem->sgt,
+ dbuf_mem->dir);
+ dma_buf_detach(dbuf_mem->dbuf, dbuf_mem->dbuf_attach);
+ dma_buf_put(dbuf_mem->dbuf);
+ kfree(dbuf_mem);
+
+ memset(&args, 0x0, sizeof(args));
+
+ if (copy_to_user(user_args, &args, sizeof(args))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(dbufs_lock);
+err:
+ return ret;
+}
+
+int uio_dmabuf_cleanup(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock)
+{
+ struct uio_dmabuf_mem *dbuf_mem, *next;
+
+ mutex_lock(dbufs_lock);
+ list_for_each_entry_safe(dbuf_mem, next, dbufs, list) {
+ list_del(&dbuf_mem->list);
+ dma_buf_unmap_attachment(dbuf_mem->dbuf_attach, dbuf_mem->sgt,
+ dbuf_mem->dir);
+ dma_buf_detach(dbuf_mem->dbuf, dbuf_mem->dbuf_attach);
+ dma_buf_put(dbuf_mem->dbuf);
+ kfree(dbuf_mem);
+ }
+ mutex_unlock(dbufs_lock);
+
+ return 0;
+}
diff --git a/drivers/uio/uio_dmabuf.h b/drivers/uio/uio_dmabuf.h
new file mode 100644
index 000000000000..30200306d53a
--- /dev/null
+++ b/drivers/uio/uio_dmabuf.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * DMA buf support for UIO device
+ *
+ */
+
+#ifndef _UIO_DMABUF_H_
+#define _UIO_DMABUF_H_
+
+struct uio_device;
+struct list_head;
+struct mutex;
+
+long uio_dmabuf_map(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args);
+long uio_dmabuf_unmap(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args);
+
+int uio_dmabuf_cleanup(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock);
+
+#endif
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 44858f70f5f5..39dbd8c56249 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -110,8 +110,10 @@ static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
* remember the state so we can allow user space to enable it later.
*/
+ spin_lock(&priv->lock);
if (!test_and_set_bit(0, &priv->flags))
disable_irq_nosync(irq);
+ spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
@@ -125,20 +127,19 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
* in the interrupt controller, but keep track of the
* state to prevent per-irq depth damage.
*
- * Serialize this operation to support multiple tasks.
+ * Serialize this operation to support multiple tasks and concurrency
+ * with irq handler on SMP systems.
*/
spin_lock_irqsave(&priv->lock, flags);
if (irq_on) {
if (test_and_clear_bit(0, &priv->flags))
enable_irq(dev_info->irq);
- spin_unlock_irqrestore(&priv->lock, flags);
} else {
- if (!test_and_set_bit(0, &priv->flags)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- disable_irq(dev_info->irq);
- }
+ if (!test_and_set_bit(0, &priv->flags))
+ disable_irq_nosync(dev_info->irq);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
diff --git a/drivers/uio/uio_xilinx_ai_engine.c b/drivers/uio/uio_xilinx_ai_engine.c
new file mode 100644
index 000000000000..174efa805b52
--- /dev/null
+++ b/drivers/uio/uio_xilinx_ai_engine.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx UIO driver for AI Engine
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/irq_sim.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_data/uio_dmem_genirq.h>
+#include <linux/platform_device.h>
+#include <linux/uio_driver.h>
+
+#define DRIVER_NAME "xilinx-aiengine"
+#define XILINX_AI_ENGINE_MAX_IRQ 4
+
+static uint xilinx_ai_engine_mem_cnt = 1;
+module_param_named(mem_cnt, xilinx_ai_engine_mem_cnt, uint, 0444);
+MODULE_PARM_DESC(mem_cnt, "Dynamic memory allocation count (default: 1)");
+
+static uint xilinx_ai_engine_mem_size = 32 * 1024 * 1024;
+module_param_named(mem_size, xilinx_ai_engine_mem_size, uint, 0444);
+MODULE_PARM_DESC(mem_size,
+ "Dynamic memory allocation size in bytes (default: 32 MB)");
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t xilinx_ai_engine_debugfs_write(struct file *f,
+ const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct irq_sim *irq_sim = file_inode(f)->i_private;
+
+ irq_sim_fire(irq_sim, 1);
+
+ return size;
+}
+
+static const struct file_operations debugfs_ops = {
+ .owner = THIS_MODULE,
+ .write = xilinx_ai_engine_debugfs_write,
+};
+
+/**
+ * xilinx_ai_engine_debugfs_init - Initialize the debugfs for irq sim
+ * @pdev: platform device to simulate irq for
+ * @irq_sim: simualated irq
+ *
+ * Initialize the debugfs for irq simulation. This allows to generate
+ * the simulated interrupt from user.
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+static int xilinx_ai_engine_debugfs_init(struct platform_device *pdev,
+ struct irq_sim *irq_sim)
+{
+ int ret;
+ struct dentry *debugfs_dir, *debugfs_file;
+
+ debugfs_dir = debugfs_create_dir("xilinx-ai-engine", NULL);
+ if (!debugfs_dir)
+ return -ENODEV;
+
+ debugfs_file = debugfs_create_file(dev_name(&pdev->dev), 0644,
+ debugfs_dir, irq_sim, &debugfs_ops);
+ if (!debugfs_file) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(debugfs_dir);
+ return ret;
+}
+
+/**
+ * xilinx_ai_engine_simulate_irq - Simulate the irq
+ * @pdev: platform device to simulate irq for
+ *
+ * Simulate the irq so the irq can be generated from user. This is only for
+ * debugging purpose.
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+static int xilinx_ai_engine_simulate_irq(struct platform_device *pdev)
+{
+ struct irq_sim *irq_sim;
+ int irq, ret;
+
+ irq_sim = devm_kzalloc(&pdev->dev, sizeof(*irq_sim), GFP_KERNEL);
+ if (!irq_sim)
+ return -ENOMEM;
+
+ /*
+ * Sometimes, the returned base value is 0, so allocate 2 irqs, and
+ * always use the 2nd one.
+ */
+ irq = devm_irq_sim_init(&pdev->dev, irq_sim, 2);
+ if (irq < 0)
+ return irq;
+
+ ret = xilinx_ai_engine_debugfs_init(pdev, irq_sim);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed create debugfs for sim irq");
+ return ret;
+ }
+
+ return irq_sim_irqnum(irq_sim, 1);
+}
+
+#else
+
+static int xilinx_ai_engine_simulate_irq(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+#endif
+
+static int xilinx_ai_engine_mem_index(struct uio_info *info,
+ struct vm_area_struct *vma)
+{
+ if (vma->vm_pgoff < MAX_UIO_MAPS) {
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -1;
+ return (int)vma->vm_pgoff;
+ }
+ return -1;
+}
+
+static const struct vm_operations_struct xilinx_ai_engine_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+static int xilinx_ai_engine_mmap(struct uio_info *info,
+ struct vm_area_struct *vma)
+{
+ int mi = xilinx_ai_engine_mem_index(info, vma);
+ struct uio_mem *mem;
+
+ if (mi < 0)
+ return -EINVAL;
+ mem = info->mem + mi;
+
+ if (mem->addr & ~PAGE_MASK)
+ return -ENODEV;
+ if (vma->vm_end - vma->vm_start > mem->size)
+ return -EINVAL;
+
+ vma->vm_ops = &xilinx_ai_engine_vm_ops;
+ /*
+ * Make the dynamic memory mapping as write-combined. Only first one
+ * will be the mmio region, which will be mapped as noncached.
+ */
+ if (mi < 1)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /*
+ * We cannot use the vm_iomap_memory() helper here,
+ * because vma->vm_pgoff is the map index we looked
+ * up above in uio_find_mem_index(), rather than an
+ * actual page offset into the mmap.
+ *
+ * So we just do the physical mmap without a page
+ * offset.
+ */
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static int xilinx_ai_engine_probe(struct platform_device *pdev)
+{
+ struct platform_device *uio;
+ struct uio_dmem_genirq_pdata *pdata;
+ unsigned int i;
+ static const char * const interrupt_names[] = { "interrupt0",
+ "interrupt1",
+ "interrupt2",
+ "interrupt3" };
+ int ret;
+
+ uio = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!uio)
+ return -ENOMEM;
+ uio->driver_override = "uio_dmem_genirq";
+ uio->dev.parent = &pdev->dev;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ pdata->num_dynamic_regions = xilinx_ai_engine_mem_cnt;
+ pdata->dynamic_region_sizes = &xilinx_ai_engine_mem_size;
+ pdata->uioinfo.name = DRIVER_NAME;
+ pdata->uioinfo.version = "devicetree";
+ pdata->uioinfo.mmap = xilinx_ai_engine_mmap;
+ /* Set the offset value as it's map index for each memory */
+ for (i = 0; i < MAX_UIO_MAPS; i++)
+ pdata->uioinfo.mem[i].offs = i << PAGE_SHIFT;
+
+ /* TODO: Only one interrupt is supported out of 4 */
+ for (i = 0; i < XILINX_AI_ENGINE_MAX_IRQ; i++) {
+ ret = platform_get_irq_byname(pdev, interrupt_names[i]);
+ if (ret >= 0) {
+ dev_info(&pdev->dev, "%s is used", interrupt_names[i]);
+ break;
+ }
+ }
+
+ /* Interrupt is optional */
+ if (ret < 0) {
+ ret = xilinx_ai_engine_simulate_irq(pdev);
+ if (ret < 0)
+ ret = UIO_IRQ_CUSTOM;
+ }
+ pdata->uioinfo.irq = ret;
+
+ ret = platform_device_add_data(uio, pdata, sizeof(*pdata));
+ if (ret)
+ goto err_out;
+
+ /* Mirror the parent device resource to uio device */
+ ret = platform_device_add_resources(uio, pdev->resource,
+ pdev->num_resources);
+ if (ret)
+ goto err_out;
+
+ /* Configure the dma for uio device using the parent of_node */
+ uio->dev.bus = &platform_bus_type;
+ ret = of_dma_configure(&uio->dev, of_node_get(pdev->dev.of_node), true);
+ of_node_put(pdev->dev.of_node);
+ if (ret)
+ goto err_out;
+
+ ret = platform_device_add(uio);
+ if (ret)
+ goto err_out;
+ platform_set_drvdata(uio, pdata);
+ platform_set_drvdata(pdev, uio);
+
+ dev_info(&pdev->dev, "Xilinx AI Engine UIO driver probed");
+ return 0;
+
+err_out:
+ platform_device_put(uio);
+ dev_err(&pdev->dev,
+ "failed to probe Xilinx AI Engine UIO driver");
+ return ret;
+}
+
+static int xilinx_ai_engine_remove(struct platform_device *pdev)
+{
+ struct platform_device *uio = platform_get_drvdata(pdev);
+
+ platform_device_unregister(uio);
+ of_node_put(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_ai_engine_of_match[] = {
+ { .compatible = "xlnx,ai_engine", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_ai_engine_of_match);
+
+static struct platform_driver xilinx_ai_engine_driver = {
+ .probe = xilinx_ai_engine_probe,
+ .remove = xilinx_ai_engine_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xilinx_ai_engine_of_match,
+ },
+};
+
+module_platform_driver(xilinx_ai_engine_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/uio_xilinx_apm.c b/drivers/uio/uio_xilinx_apm.c
new file mode 100644
index 000000000000..90d70a5a9425
--- /dev/null
+++ b/drivers/uio/uio_xilinx_apm.c
@@ -0,0 +1,369 @@
+/*
+ * Xilinx AXI Performance Monitor
+ *
+ * Copyright (C) 2013 Xilinx, Inc. All rights reserved.
+ *
+ * Description:
+ * This driver is developed for AXI Performance Monitor IP,
+ * designed to monitor AXI4 traffic for performance analysis
+ * of AXI bus in the system. Driver maps HW registers and parameters
+ * to userspace. Userspace need not clear the interrupt of IP since
+ * driver clears the interrupt.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uio_driver.h>
+
+#define XAPM_IS_OFFSET 0x0038 /* Interrupt Status Register */
+#define DRV_NAME "xilinxapm_uio"
+#define DRV_VERSION "1.0"
+#define UIO_DUMMY_MEMSIZE 4096
+#define XAPM_MODE_ADVANCED 1
+#define XAPM_MODE_PROFILE 2
+#define XAPM_MODE_TRACE 3
+
+/**
+ * struct xapm_param - HW parameters structure
+ * @mode: Mode in which APM is working
+ * @maxslots: Maximum number of Slots in APM
+ * @eventcnt: Event counting enabled in APM
+ * @eventlog: Event logging enabled in APM
+ * @sampledcnt: Sampled metric counters enabled in APM
+ * @numcounters: Number of counters in APM
+ * @metricwidth: Metric Counter width (32/64)
+ * @sampledwidth: Sampled metric counter width
+ * @globalcntwidth: Global Clock counter width
+ * @scalefactor: Scaling factor
+ * @isr: Interrupts info shared to userspace
+ * @is_32bit_filter: Flags for 32bit filter
+ * @clk: Clock handle
+ */
+struct xapm_param {
+ u32 mode;
+ u32 maxslots;
+ u32 eventcnt;
+ u32 eventlog;
+ u32 sampledcnt;
+ u32 numcounters;
+ u32 metricwidth;
+ u32 sampledwidth;
+ u32 globalcntwidth;
+ u32 scalefactor;
+ u32 isr;
+ bool is_32bit_filter;
+ struct clk *clk;
+};
+
+/**
+ * struct xapm_dev - Global driver structure
+ * @info: uio_info structure
+ * @param: xapm_param structure
+ * @regs: IOmapped base address
+ */
+struct xapm_dev {
+ struct uio_info info;
+ struct xapm_param param;
+ void __iomem *regs;
+};
+
+/**
+ * xapm_handler - Interrupt handler for APM
+ * @irq: IRQ number
+ * @info: Pointer to uio_info structure
+ *
+ * Return: Always returns IRQ_HANDLED
+ */
+static irqreturn_t xapm_handler(int irq, struct uio_info *info)
+{
+ struct xapm_dev *xapm = (struct xapm_dev *)info->priv;
+ void *ptr;
+
+ ptr = (unsigned long *)xapm->info.mem[1].addr;
+ /* Clear the interrupt and copy the ISR value to userspace */
+ xapm->param.isr = readl(xapm->regs + XAPM_IS_OFFSET);
+ writel(xapm->param.isr, xapm->regs + XAPM_IS_OFFSET);
+ memcpy(ptr, &xapm->param, sizeof(struct xapm_param));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xapm_getprop - Retrieves dts properties to param structure
+ * @pdev: Pointer to platform device
+ * @param: Pointer to param structure
+ *
+ * Returns: '0' on success and failure value on error
+ */
+static int xapm_getprop(struct platform_device *pdev, struct xapm_param *param)
+{
+ u32 mode = 0;
+ int ret;
+ struct device_node *node;
+
+ node = pdev->dev.of_node;
+
+ /* Retrieve required dts properties and fill param structure */
+ ret = of_property_read_u32(node, "xlnx,enable-profile", &mode);
+ if (ret < 0)
+ dev_info(&pdev->dev, "no property xlnx,enable-profile\n");
+ else if (mode)
+ param->mode = XAPM_MODE_PROFILE;
+
+ ret = of_property_read_u32(node, "xlnx,enable-trace", &mode);
+ if (ret < 0)
+ dev_info(&pdev->dev, "no property xlnx,enable-trace\n");
+ else if (mode)
+ param->mode = XAPM_MODE_TRACE;
+
+ ret = of_property_read_u32(node, "xlnx,num-monitor-slots",
+ &param->maxslots);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,num-monitor-slots");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,enable-event-count",
+ &param->eventcnt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,enable-event-count");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,enable-event-log",
+ &param->eventlog);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,enable-event-log");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,have-sampled-metric-cnt",
+ &param->sampledcnt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,have-sampled-metric-cnt");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-of-counters",
+ &param->numcounters);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,num-of-counters");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metric-count-width",
+ &param->metricwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,metric-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metrics-sample-count-width",
+ &param->sampledwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property metrics-sample-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,global-count-width",
+ &param->globalcntwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,global-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metric-count-scale",
+ &param->scalefactor);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,metric-count-scale");
+ return ret;
+ }
+
+ param->is_32bit_filter = of_property_read_bool(node,
+ "xlnx,id-filter-32bit");
+
+ return 0;
+}
+
+/**
+ * xapm_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Returns: '0' on success and failure value on error
+ */
+
+static int xapm_probe(struct platform_device *pdev)
+{
+ struct xapm_dev *xapm;
+ struct resource *res;
+ int irq;
+ int ret;
+ void *ptr;
+
+ xapm = devm_kzalloc(&pdev->dev, (sizeof(struct xapm_dev)), GFP_KERNEL);
+ if (!xapm)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xapm->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xapm->regs)) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return PTR_ERR(xapm->regs);
+ }
+
+ xapm->param.clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xapm->param.clk)) {
+ if (PTR_ERR(xapm->param.clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "axi clock error\n");
+ return PTR_ERR(xapm->param.clk);
+ }
+
+ ret = clk_prepare_enable(xapm->param.clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ /* Initialize mode as Advanced so that if no mode in dts, default
+ * is Advanced
+ */
+ xapm->param.mode = XAPM_MODE_ADVANCED;
+ ret = xapm_getprop(pdev, &xapm->param);
+ if (ret < 0)
+ goto err_clk_dis;
+
+ xapm->info.mem[0].name = "xilinx_apm";
+ xapm->info.mem[0].addr = res->start;
+ xapm->info.mem[0].size = resource_size(res);
+ xapm->info.mem[0].memtype = UIO_MEM_PHYS;
+
+ xapm->info.mem[1].addr = (unsigned long)kzalloc(UIO_DUMMY_MEMSIZE,
+ GFP_KERNEL);
+ ptr = (unsigned long *)xapm->info.mem[1].addr;
+ xapm->info.mem[1].size = UIO_DUMMY_MEMSIZE;
+ xapm->info.mem[1].memtype = UIO_MEM_LOGICAL;
+
+ xapm->info.name = "axi-pmon";
+ xapm->info.version = DRV_VERSION;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get irq\n");
+ ret = irq;
+ goto err_clk_dis;
+ }
+
+ xapm->info.irq = irq;
+ xapm->info.handler = xapm_handler;
+ xapm->info.priv = xapm;
+ xapm->info.irq_flags = IRQF_SHARED;
+
+ memcpy(ptr, &xapm->param, sizeof(struct xapm_param));
+
+ ret = uio_register_device(&pdev->dev, &xapm->info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register to UIO\n");
+ goto err_clk_dis;
+ }
+
+ platform_set_drvdata(pdev, xapm);
+
+ dev_info(&pdev->dev, "Probed Xilinx APM\n");
+
+ return 0;
+
+err_clk_dis:
+ clk_disable_unprepare(xapm->param.clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return ret;
+}
+
+/**
+ * xapm_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always returns '0'
+ */
+static int xapm_remove(struct platform_device *pdev)
+{
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+
+ uio_unregister_device(&xapm->info);
+ clk_disable_unprepare(xapm->param.clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused xapm_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(xapm->param.clk);
+ return 0;
+};
+
+static int __maybe_unused xapm_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(xapm->param.clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ return 0;
+};
+
+static const struct dev_pm_ops xapm_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xapm_runtime_suspend, xapm_runtime_resume)
+ SET_RUNTIME_PM_OPS(xapm_runtime_suspend,
+ xapm_runtime_resume, NULL)
+};
+
+static const struct of_device_id xapm_of_match[] = {
+ { .compatible = "xlnx,axi-perf-monitor", },
+ { /* end of table*/ }
+};
+
+MODULE_DEVICE_TABLE(of, xapm_of_match);
+
+static struct platform_driver xapm_driver = {
+ .driver = {
+ .name = "xilinx-axipmon",
+ .of_match_table = xapm_of_match,
+ .pm = &xapm_dev_pm_ops,
+ },
+ .probe = xapm_probe,
+ .remove = xapm_remove,
+};
+
+module_platform_driver(xapm_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx AXI Performance Monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
index b0a29efe7d31..14878e3f8fbf 100644
--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
return NULL;
}
+ if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
+ func->devfn != PCI_DEV_FN_OTG) {
+ return NULL;
+ }
+
return func;
}
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 296f2ee1b680..61283e7e602a 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -549,9 +549,9 @@ static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
trace_cdns3_wa2(priv_ep, "removes eldest request");
kfree(priv_req->request.buf);
+ list_del_init(&priv_req->list);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
- list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
if (!chain)
@@ -661,7 +661,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
return;
}
- if (request->complete) {
+ /*
+ * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
+ * gadget composite driver.
+ */
+ if (request->complete && request->buf != priv_dev->zlp_buf) {
spin_unlock(&priv_dev->lock);
usb_gadget_giveback_request(&priv_ep->endpoint,
request);
@@ -1259,6 +1263,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
ep_cfg &= ~EP_CFG_ENABLE;
writel(ep_cfg, &priv_dev->regs->ep_cfg);
priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
+ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
}
cdns3_transfer_completed(priv_dev, priv_ep);
} else if (!(priv_ep->flags & EP_STALLED) &&
@@ -1950,11 +1955,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+ list_del_init(&priv_req->list);
kfree(priv_req->request.buf);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
- list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
}
@@ -2166,6 +2171,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
struct usb_request *request;
struct cdns3_request *priv_req;
struct cdns3_trb *trb = NULL;
+ struct cdns3_trb trb_tmp;
int ret;
int val;
@@ -2175,8 +2181,10 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
if (request) {
priv_req = to_cdns3_request(request);
trb = priv_req->trb;
- if (trb)
+ if (trb) {
+ trb_tmp = *trb;
trb->control = trb->control ^ TRB_CYCLE;
+ }
}
writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
@@ -2191,7 +2199,8 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
if (request) {
if (trb)
- trb->control = trb->control ^ TRB_CYCLE;
+ *trb = trb_tmp;
+
cdns3_rearm_transfer(priv_ep, 1);
}
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6911aef500e9..ff61f88fc867 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -203,6 +203,7 @@ struct hw_bank {
* @in_lpm: if the core in low power mode
* @wakeup_int: if wakeup interrupt occur
* @rev: The revision number for controller
+ * @mutex: protect code from concorrent running when doing role switch
*/
struct ci_hdrc {
struct device *dev;
@@ -256,6 +257,7 @@ struct ci_hdrc {
bool in_lpm;
bool wakeup_int;
enum ci_revision rev;
+ struct mutex mutex;
};
static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 85561b3194a1..0fe545815c5c 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
CI_HDRC_PMQOS,
};
+static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
+ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
+};
+
static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
{ .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
@@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
{ .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
{ .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
+ { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index c044fba463e4..83238293e5be 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -30,6 +30,7 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
static struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_PHY_VBUS_CONTROL,
};
static const struct of_device_id ci_hdrc_usb2_of_match[] = {
@@ -58,6 +59,10 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
if (match && match->data) {
/* struct copy */
*ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
+ ci_pdata->usb_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy",
+ 0);
+ if (IS_ERR(ci_pdata->usb_phy))
+ return PTR_ERR(ci_pdata->usb_phy);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74cdc7ef476a..a07c27004bae 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -541,6 +541,13 @@ static irqreturn_t ci_irq_handler(int irq, void *data)
u32 otgsc = 0;
if (ci->in_lpm) {
+ /*
+ * If we already have a wakeup irq pending there,
+ * let's just return to wait resume finished firstly.
+ */
+ if (ci->wakeup_int)
+ return IRQ_HANDLED;
+
disable_irq_nosync(irq);
ci->wakeup_int = true;
pm_runtime_get(ci->dev);
@@ -960,9 +967,16 @@ static ssize_t role_store(struct device *dev,
strlen(ci->roles[role]->name)))
break;
- if (role == CI_ROLE_END || role == ci->role)
+ if (role == CI_ROLE_END)
return -EINVAL;
+ mutex_lock(&ci->mutex);
+
+ if (role == ci->role) {
+ mutex_unlock(&ci->mutex);
+ return n;
+ }
+
pm_runtime_get_sync(dev);
disable_irq(ci->irq);
ci_role_stop(ci);
@@ -971,6 +985,7 @@ static ssize_t role_store(struct device *dev,
ci_handle_vbus_change(ci);
enable_irq(ci->irq);
pm_runtime_put_sync(dev);
+ mutex_unlock(&ci->mutex);
return (ret == 0) ? n : ret;
}
@@ -1006,6 +1021,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&ci->lock);
+ mutex_init(&ci->mutex);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -1075,7 +1091,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ret = ci_usb_phy_init(ci);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
- return ret;
+ goto ulpi_exit;
}
ci->hw_bank.phys = res->start;
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index f5f56ee07729..21af19a1c1e0 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -57,6 +57,14 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
priv->enabled = enable;
}
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus) {
+ if (enable)
+ ci->usb_phy->set_vbus(ci->usb_phy, 1);
+ else
+ ci->usb_phy->set_vbus(ci->usb_phy, 0);
+ }
+
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
/*
* Marvell 28nm HSIC PHY requires forcing the port to HS mode.
@@ -65,6 +73,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
hw_port_test_set(ci, 5);
hw_port_test_set(ci, 0);
}
+
return 0;
};
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index fbfb02e05c97..0fcb8d8776cc 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -164,8 +164,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
static void ci_handle_id_switch(struct ci_hdrc *ci)
{
- enum ci_role role = ci_otg_role(ci);
+ enum ci_role role;
+ mutex_lock(&ci->mutex);
+ role = ci_otg_role(ci);
if (role != ci->role) {
dev_dbg(ci->dev, "switching from %s to %s\n",
ci_role(ci)->name, ci->roles[role]->name);
@@ -188,6 +190,7 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
if (role == CI_ROLE_GADGET)
ci_handle_vbus_change(ci);
}
+ mutex_unlock(&ci->mutex);
}
/**
* ci_otg_work - perform otg (vbus/id) event handle
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 6ed4b00dba96..b26c1a0c6de3 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -256,8 +256,10 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
ci->enabled_otg_timer_bits &= ~(1 << t);
if (ci->next_otg_timer == t) {
if (ci->enabled_otg_timer_bits == 0) {
+ spin_unlock_irqrestore(&ci->lock, flags);
/* No enabled timers after delete it */
hrtimer_cancel(&ci->otg_fsm_hrtimer);
+ spin_lock_irqsave(&ci->lock, flags);
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
} else {
/* Find the next timer */
@@ -471,6 +473,11 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
return;
}
}
+
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus)
+ ci->usb_phy->set_vbus(ci->usb_phy, 1);
+
/* Disable data pulse irq */
hw_write_otgsc(ci, OTGSC_DPIE, 0);
@@ -480,6 +487,10 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
if (ci->platdata->reg_vbus)
regulator_disable(ci->platdata->reg_vbus);
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus)
+ ci->usb_phy->set_vbus(ci->usb_phy, 0);
+
fsm->a_bus_drop = 1;
fsm->a_bus_req = 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5dc8827ede7e..edb62b49f572 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1843,6 +1843,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 77b1802f829b..fdfa2da24287 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1898,6 +1898,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
if (request.req.wLength > USBTMC_BUFSIZE)
return -EMSGSIZE;
+ if (request.req.wLength == 0) /* Length-0 requests are never IN */
+ request.req.bRequestType &= ~USB_DIR_IN;
is_in = request.req.bRequestType & USB_DIR_IN;
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index ed204cbb63ea..67d00d0ef621 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -38,6 +38,7 @@ struct usb_conn_info {
struct gpio_desc *vbus_gpiod;
int id_irq;
int vbus_irq;
+ bool initial_detection;
};
/**
@@ -82,11 +83,13 @@ static void usb_conn_detect_cable(struct work_struct *work)
dev_dbg(info->dev, "role %d/%d, gpios: id %d, vbus %d\n",
info->last_role, role, id, vbus);
- if (info->last_role == role) {
+ if (!info->initial_detection && info->last_role == role) {
dev_warn(info->dev, "repeated role: %d\n", role);
return;
}
+ info->initial_detection = false;
+
if (info->last_role == USB_ROLE_HOST)
regulator_disable(info->vbus);
@@ -206,6 +209,7 @@ static int usb_conn_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
/* Perform initial detection */
+ info->initial_detection = true;
usb_conn_queue_dwork(info, 0);
return 0;
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 6cf22c27f2d2..be8738750948 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -170,3 +170,44 @@ void hcd_buffer_free(
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
+
+void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
+ size_t size, gfp_t mem_flags, dma_addr_t *dma)
+{
+ if (size == 0)
+ return NULL;
+
+ if (hcd->localmem_pool)
+ return gen_pool_dma_alloc_align(hcd->localmem_pool,
+ size, dma, PAGE_SIZE);
+
+ /* some USB hosts just use PIO */
+ if (!hcd_uses_dma(hcd)) {
+ *dma = DMA_MAPPING_ERROR;
+ return (void *)__get_free_pages(mem_flags,
+ get_order(size));
+ }
+
+ return dma_alloc_coherent(hcd->self.sysdev,
+ size, dma, mem_flags);
+}
+
+void hcd_buffer_free_pages(struct usb_hcd *hcd,
+ size_t size, void *addr, dma_addr_t dma)
+{
+ if (!addr)
+ return;
+
+ if (hcd->localmem_pool) {
+ gen_pool_free(hcd->localmem_pool,
+ (unsigned long)addr, size);
+ return;
+ }
+
+ if (!hcd_uses_dma(hcd)) {
+ free_pages((unsigned long)addr, get_order(size));
+ return;
+ }
+
+ dma_free_coherent(hcd->self.sysdev, size, addr, dma);
+}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index d037deb95884..087ab2248855 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -173,6 +173,7 @@ static int connected(struct usb_dev_state *ps)
static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
{
struct usb_dev_state *ps = usbm->ps;
+ struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
@@ -181,8 +182,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
list_del(&usbm->memlist);
spin_unlock_irqrestore(&ps->lock, flags);
- usb_free_coherent(ps->dev, usbm->size, usbm->mem,
- usbm->dma_handle);
+ hcd_buffer_free_pages(hcd, usbm->size,
+ usbm->mem, usbm->dma_handle);
usbfs_decrease_memory_usage(
usbm->size + sizeof(struct usb_memory));
kfree(usbm);
@@ -221,7 +222,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
void *mem;
unsigned long flags;
- dma_addr_t dma_handle;
+ dma_addr_t dma_handle = DMA_MAPPING_ERROR;
int ret;
ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
@@ -234,8 +235,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
goto error_decrease_mem;
}
- mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
- &dma_handle);
+ mem = hcd_buffer_alloc_pages(hcd,
+ size, GFP_USER | __GFP_NOWARN, &dma_handle);
if (!mem) {
ret = -ENOMEM;
goto error_free_usbm;
@@ -251,7 +252,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist);
- if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
+ /*
+ * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
+ * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
+ * whether we are in such cases, and then use remap_pfn_range (or
+ * dma_mmap_coherent) to map normal (or DMA) pages into the user
+ * space, respectively.
+ */
+ if (dma_handle == DMA_MAPPING_ERROR) {
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(usbm->mem) >> PAGE_SHIFT,
size, vma->vm_page_prot) < 0) {
@@ -726,6 +734,7 @@ static int driver_resume(struct usb_interface *intf)
return 0;
}
+#ifdef CONFIG_PM
/* The following routines apply to the entire device, not interfaces */
void usbfs_notify_suspend(struct usb_device *udev)
{
@@ -744,6 +753,7 @@ void usbfs_notify_resume(struct usb_device *udev)
}
mutex_unlock(&usbfs_mutex);
}
+#endif
struct usb_driver usbfs_driver = {
.name = "usbfs",
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index fde211519a97..d58098c2af10 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1688,7 +1688,6 @@ static void usb_giveback_urb_bh(unsigned long param)
spin_lock_irq(&bh->lock);
bh->running = true;
- restart:
list_replace_init(&bh->head, &local_list);
spin_unlock_irq(&bh->lock);
@@ -1702,10 +1701,17 @@ static void usb_giveback_urb_bh(unsigned long param)
bh->completing_ep = NULL;
}
- /* check if there are new URBs to giveback */
+ /*
+ * giveback new URBs next time to prevent this function
+ * from not exiting for a long time.
+ */
spin_lock_irq(&bh->lock);
- if (!list_empty(&bh->head))
- goto restart;
+ if (!list_empty(&bh->head)) {
+ if (bh->high_prio)
+ tasklet_hi_schedule(&bh->bh);
+ else
+ tasklet_schedule(&bh->bh);
+ }
bh->running = false;
spin_unlock_irq(&bh->lock);
}
@@ -1730,7 +1736,7 @@ static void usb_giveback_urb_bh(unsigned long param)
void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct giveback_urb_bh *bh;
- bool running, high_prio_bh;
+ bool running;
/* pass status to tasklet via unlinked */
if (likely(!urb->unlinked))
@@ -1741,13 +1747,10 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
return;
}
- if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
+ if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe))
bh = &hcd->high_prio_bh;
- high_prio_bh = true;
- } else {
+ else
bh = &hcd->low_prio_bh;
- high_prio_bh = false;
- }
spin_lock(&bh->lock);
list_add_tail(&urb->urb_list, &bh->head);
@@ -1756,7 +1759,7 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
if (running)
;
- else if (high_prio_bh)
+ else if (bh->high_prio)
tasklet_hi_schedule(&bh->bh);
else
tasklet_schedule(&bh->bh);
@@ -2796,6 +2799,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
/* initialize tasklets */
init_giveback_urb_bh(&hcd->high_prio_bh);
+ hcd->high_prio_bh.high_prio = true;
init_giveback_urb_bh(&hcd->low_prio_bh);
/* enable irqs just before we start the controller,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4cf0dc7f330d..238674fab7ce 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -41,8 +41,11 @@
#define USB_PRODUCT_USB5534B 0x5534
#define USB_VENDOR_CYPRESS 0x04b4
#define USB_PRODUCT_CY7C65632 0x6570
-#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
-#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
+#define USB_VENDOR_TEXAS_INSTRUMENTS 0x0451
+#define USB_PRODUCT_TUSB8041_USB3 0x8140
+#define USB_PRODUCT_TUSB8041_USB2 0x8142
+#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND BIT(0)
+#define HUB_QUIRK_DISABLE_AUTOSUSPEND BIT(1)
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
@@ -145,6 +148,10 @@ int usb_device_supports_lpm(struct usb_device *udev)
if (udev->quirks & USB_QUIRK_NO_LPM)
return 0;
+ /* Skip if the device BOS descriptor couldn't be read */
+ if (!udev->bos)
+ return 0;
+
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
*/
@@ -321,6 +328,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
return;
+ /* Skip if the device BOS descriptor couldn't be read */
+ if (!udev->bos)
+ return;
+
hub = usb_hub_to_struct_hub(udev->parent);
/* It doesn't take time to transition the roothub into U0, since it
* doesn't have an upstream link.
@@ -2354,17 +2365,25 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
}
} else if (desc->bLength == sizeof
(struct usb_otg_descriptor)) {
- /* Set a_alt_hnp_support for legacy otg device */
- err = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- USB_REQ_SET_FEATURE, 0,
- USB_DEVICE_A_ALT_HNP_SUPPORT,
- 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- if (err < 0)
- dev_err(&udev->dev,
- "set a_alt_hnp_support failed: %d\n",
- err);
+ /*
+ * We are operating on a legacy OTP device
+ * These should be told that they are operating
+ * on the wrong port if we have another port that does
+ * support HNP
+ */
+ if (bus->otg_port != 0) {
+ /* Set a_alt_hnp_support for legacy otg device */
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_A_ALT_HNP_SUPPORT,
+ 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err < 0)
+ dev_err(&udev->dev,
+ "set a_alt_hnp_support failed: %d\n",
+ err);
+ }
}
}
#endif
@@ -2376,9 +2395,8 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
* usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
- * This is only called by usb_new_device() and usb_authorize_device()
- * and FIXME -- all comments that apply to them apply here wrt to
- * environment.
+ * This is only called by usb_new_device() -- all comments that apply there
+ * apply here wrt to environment.
*
* If the device is WUSB and not authorized, we don't attempt to read
* the string descriptors, as they will be errored out by the device
@@ -2676,7 +2694,8 @@ out_authorized:
}
/*
- * Return 1 if port speed is SuperSpeedPlus, 0 otherwise
+ * Return 1 if port speed is SuperSpeedPlus, 0 otherwise or if the
+ * capability couldn't be checked.
* check it from the link protocol field of the current speed ID attribute.
* current speed ID is got from ext port status request. Sublink speed attribute
* table is returned with the hub BOS SSP device capability descriptor
@@ -2686,8 +2705,12 @@ static int port_speed_is_ssp(struct usb_device *hdev, int speed_id)
int ssa_count;
u32 ss_attr;
int i;
- struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
+ struct usb_ssp_cap_descriptor *ssp_cap;
+ if (!hdev->bos)
+ return 0;
+
+ ssp_cap = hdev->bos->ssp_cap;
if (!ssp_cap)
return 0;
@@ -4089,8 +4112,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
int timeout, ret;
- __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
- __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
+ __u8 u1_mel;
+ __le16 u2_mel;
+
+ /* Skip if the device BOS descriptor couldn't be read */
+ if (!udev->bos)
+ return;
+
+ u1_mel = udev->bos->ss_cap->bU1devExitLat;
+ u2_mel = udev->bos->ss_cap->bU2DevExitLat;
/* If the device says it doesn't have *any* exit latency to come out of
* U1 or U2, it's probably lying. Assume it doesn't implement that link
@@ -5587,6 +5617,16 @@ static const struct usb_device_id hub_id_table[] = {
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT,
+ .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+ .idProduct = USB_PRODUCT_TUSB8041_USB2,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT,
+ .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+ .idProduct = USB_PRODUCT_TUSB8041_USB3,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
.bDeviceClass = USB_CLASS_HUB},
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
@@ -5923,6 +5963,11 @@ re_enumerate_no_bos:
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
+ * However, if a reset is already in progress (for instance, if a
+ * driver doesn't have pre_reset() or post_reset() callbacks, and while
+ * being unbound or re-bound during the ongoing reset its disconnect()
+ * or probe() routine tries to perform a second, nested reset), the
+ * routine returns -EINPROGRESS.
*
* Note:
* The caller must own the device lock. For example, it's safe to use
@@ -5956,6 +6001,10 @@ int usb_reset_device(struct usb_device *udev)
return -EISDIR;
}
+ if (udev->reset_in_progress)
+ return -EINPROGRESS;
+ udev->reset_in_progress = 1;
+
port_dev = hub->ports[udev->portnum - 1];
/*
@@ -6020,6 +6069,7 @@ int usb_reset_device(struct usb_device *udev)
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
+ udev->reset_in_progress = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index a8f23f8bc6ef..1c455800f7d3 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -141,7 +141,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev)
{
return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS &&
le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 &&
- hdev->bos->ssp_cap);
+ hdev->bos && hdev->bos->ssp_cap);
}
static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f8f2de7899a9..48cda9b7a8f2 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -362,6 +362,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+ /* Realforce 87U Keyboard */
+ { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -388,6 +391,15 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+ /* NVIDIA Jetson devices in Force Recovery mode */
+ { USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
@@ -425,6 +437,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* novation SoundControl XL */
{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Focusrite Scarlett Solo USB */
+ { USB_DEVICE(0x1235, 0x8211), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
@@ -438,6 +454,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Lenovo ThinkPad OneLink+ Dock twin hub controllers (VIA Labs VL812) */
+ { USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x17ef, 0x1019), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
{ USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
@@ -511,6 +531,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
+ { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
+
/* DELL USB GEN2 */
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 2f594c88d905..f19694e69f5c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -889,11 +889,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
size_t srclen, n;
int cfgno;
void *src;
- int retval;
- retval = usb_lock_device_interruptible(udev);
- if (retval < 0)
- return -EINTR;
/* The binary attribute begins with the device descriptor.
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
@@ -918,7 +914,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
- usb_unlock_device(udev);
return count - nleft;
}
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 9043d7242d67..14274b60b47b 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -37,6 +37,71 @@ bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
}
EXPORT_SYMBOL_GPL(usb_acpi_power_manageable);
+#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899"
+#define USB_DSM_DISABLE_U1_U2_FOR_PORT 5
+
+/**
+ * usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port.
+ * @hdev: USB device belonging to the usb hub
+ * @index: zero based port index
+ *
+ * Some USB3 ports may not support USB3 link power management U1/U2 states
+ * due to different retimer setup. ACPI provides _DSM method which returns 0x01
+ * if U1 and U2 states should be disabled. Evaluate _DSM with:
+ * Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899
+ * Arg1: Revision ID = 0
+ * Arg2: Function Index = 5
+ * Arg3: (empty)
+ *
+ * Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0
+ */
+
+int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+{
+ union acpi_object *obj;
+ acpi_handle port_handle;
+ int port1 = index + 1;
+ guid_t guid;
+ int ret;
+
+ ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid);
+ if (ret)
+ return ret;
+
+ port_handle = usb_get_hub_port_acpi_handle(hdev, port1);
+ if (!port_handle) {
+ dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1);
+ return -ENODEV;
+ }
+
+ if (!acpi_check_dsm(port_handle, &guid, 0,
+ BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) {
+ dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n",
+ port1, USB_DSM_DISABLE_U1_U2_FOR_PORT);
+ return -ENODEV;
+ }
+
+ obj = acpi_evaluate_dsm(port_handle, &guid, 0,
+ USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL);
+
+ if (!obj)
+ return -ENODEV;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1);
+ ACPI_FREE(obj);
+ return -EINVAL;
+ }
+
+ if (obj->integer.value == 0x01)
+ ret = 1;
+
+ ACPI_FREE(obj);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable);
+
/**
* usb_acpi_set_power_state - control usb port's power via acpi power
* resource
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index f16c26dc079d..502d911f71fa 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -209,6 +209,82 @@ int usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse);
/**
+ * usb_find_endpoint() - Given an endpoint address, search for the endpoint's
+ * usb_host_endpoint structure in an interface's current altsetting.
+ * @intf: the interface whose current altsetting should be searched
+ * @ep_addr: the endpoint address (number and direction) to find
+ *
+ * Search the altsetting's list of endpoints for one with the specified address.
+ *
+ * Return: Pointer to the usb_host_endpoint if found, %NULL otherwise.
+ */
+static const struct usb_host_endpoint *usb_find_endpoint(
+ const struct usb_interface *intf, unsigned int ep_addr)
+{
+ int n;
+ const struct usb_host_endpoint *ep;
+
+ n = intf->cur_altsetting->desc.bNumEndpoints;
+ ep = intf->cur_altsetting->endpoint;
+ for (; n > 0; (--n, ++ep)) {
+ if (ep->desc.bEndpointAddress == ep_addr)
+ return ep;
+ }
+ return NULL;
+}
+
+/**
+ * usb_check_bulk_endpoints - Check whether an interface's current altsetting
+ * contains a set of bulk endpoints with the given addresses.
+ * @intf: the interface whose current altsetting should be searched
+ * @ep_addrs: 0-terminated array of the endpoint addresses (number and
+ * direction) to look for
+ *
+ * Search for endpoints with the specified addresses and check their types.
+ *
+ * Return: %true if all the endpoints are found and are bulk, %false otherwise.
+ */
+bool usb_check_bulk_endpoints(
+ const struct usb_interface *intf, const u8 *ep_addrs)
+{
+ const struct usb_host_endpoint *ep;
+
+ for (; *ep_addrs; ++ep_addrs) {
+ ep = usb_find_endpoint(intf, *ep_addrs);
+ if (!ep || !usb_endpoint_xfer_bulk(&ep->desc))
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(usb_check_bulk_endpoints);
+
+/**
+ * usb_check_int_endpoints - Check whether an interface's current altsetting
+ * contains a set of interrupt endpoints with the given addresses.
+ * @intf: the interface whose current altsetting should be searched
+ * @ep_addrs: 0-terminated array of the endpoint addresses (number and
+ * direction) to look for
+ *
+ * Search for endpoints with the specified addresses and check their types.
+ *
+ * Return: %true if all the endpoints are found and are interrupt,
+ * %false otherwise.
+ */
+bool usb_check_int_endpoints(
+ const struct usb_interface *intf, const u8 *ep_addrs)
+{
+ const struct usb_host_endpoint *ep;
+
+ for (; *ep_addrs; ++ep_addrs) {
+ ep = usb_find_endpoint(intf, *ep_addrs);
+ if (!ep || !usb_endpoint_xfer_int(&ep->desc))
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(usb_check_int_endpoints);
+
+/**
* usb_find_alt_setting() - Given a configuration, find the alternate setting
* for the given interface.
* @config: the configuration to search (not necessarily the current config).
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index a412ef67af18..7b8dee3087f3 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4684,8 +4684,8 @@ fail3:
if (qh_allocated && qh->channel && qh->channel->qh == qh)
qh->channel->qh = NULL;
fail2:
- spin_unlock_irqrestore(&hsotg->lock, flags);
urb->hcpriv = NULL;
+ spin_unlock_irqrestore(&hsotg->lock, flags);
kfree(qtd);
fail1:
if (qh_allocated) {
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index d5f4ec1b73b1..08e2792cb732 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -2045,15 +2045,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
{
struct dwc2_qtd *qtd;
struct dwc2_host_chan *chan;
- u32 hcint, hcintmsk;
+ u32 hcint, hcintraw, hcintmsk;
chan = hsotg->hc_ptr_array[chnum];
- hcint = dwc2_readl(hsotg, HCINT(chnum));
+ hcintraw = dwc2_readl(hsotg, HCINT(chnum));
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
+ hcint = hcintraw & hcintmsk;
+ dwc2_writel(hsotg, hcint, HCINT(chnum));
+
if (!chan) {
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
- dwc2_writel(hsotg, hcint, HCINT(chnum));
return;
}
@@ -2062,11 +2064,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
chnum);
dev_vdbg(hsotg->dev,
" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
- hcint, hcintmsk, hcint & hcintmsk);
+ hcintraw, hcintmsk, hcint);
}
- dwc2_writel(hsotg, hcint, HCINT(chnum));
-
/*
* If we got an interrupt after someone called
* dwc2_hcd_endpoint_disable() we don't want to crash below
@@ -2076,8 +2076,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
return;
}
- chan->hcint = hcint;
- hcint &= hcintmsk;
+ chan->hcint = hcintraw;
/*
* If the channel was halted due to a dequeue, the qtd list might
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 34bb6124f1e2..4f640c0c51b3 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -142,9 +142,9 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_init) {
ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_power_on(hsotg->phy);
+ ret = phy_init(hsotg->phy);
if (ret == 0)
- ret = phy_init(hsotg->phy);
+ ret = phy_power_on(hsotg->phy);
}
return ret;
@@ -176,9 +176,9 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_exit) {
ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_exit(hsotg->phy);
+ ret = phy_power_off(hsotg->phy);
if (ret == 0)
- ret = phy_power_off(hsotg->phy);
+ ret = phy_exit(hsotg->phy);
}
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 556a876c7896..d072da7cd71b 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -23,6 +23,7 @@ config USB_DWC3_ULPI
choice
bool "DWC3 Mode Selection"
default USB_DWC3_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_DWC3_OTG if (USB && USB_GADGET && USB_OTG && USB_OTG_FSM)
default USB_DWC3_HOST if (USB && !USB_GADGET)
default USB_DWC3_GADGET if (!USB && USB_GADGET)
@@ -48,6 +49,13 @@ config USB_DWC3_DUAL_ROLE
This is the default mode of working of DWC3 controller where
both host and gadget features are enabled.
+config USB_DWC3_OTG
+ bool "Dual Role mode + OTG"
+ depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3) && USB_OTG && USB_OTG_FSM && PM)
+ help
+ This is the default mode of working of DWC3 controller where
+ both host and gadget features are enabled with OTG support.
+
endchoice
comment "Platform Glue Driver Support"
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index ae86da0dc5bd..258bc4bfca87 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -10,12 +10,16 @@ ifneq ($(CONFIG_TRACING),)
dwc3-y += trace.o
endif
-ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
+ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE) $(CONFIG_USB_DWC3_OTG)),)
dwc3-y += host.o
endif
-ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
- dwc3-y += gadget.o ep0.o
+ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE) $(CONFIG_USB_DWC3_OTG)),)
+ dwc3-y += gadget.o ep0.o gadget_hibernation.o
+endif
+
+ifneq ($(CONFIG_USB_DWC3_OTG),)
+ dwc3-y += otg.o
endif
ifneq ($(CONFIG_USB_DWC3_DUAL_ROLE),)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 8cc81f193e9c..01d67ca03a6f 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -25,6 +25,7 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/of_address.h>
#include <linux/reset.h>
#include <linux/usb/ch9.h>
@@ -227,7 +228,7 @@ u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
*/
-static int dwc3_core_soft_reset(struct dwc3 *dwc)
+int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
int retries = 1000;
@@ -250,6 +251,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
* XHCI driver will reset the host block. If dwc3 was configured for
* host-only mode, then we can return early.
*/
+ if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->is_hibernated == true)
+ return 0;
+
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;
@@ -307,7 +311,7 @@ static const struct clk_bulk_data dwc3_core_clks[] = {
*/
static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
{
- u32 reg;
+ u32 reg, gfladj;
u32 dft;
if (dwc->revision < DWC3_REVISION_250A)
@@ -316,13 +320,27 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
if (dwc->fladj == 0)
return;
+ /* Save the initial DWC3_GFLADJ register value */
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+ gfladj = reg;
+
+ if (dwc->refclk_fladj) {
+ if ((reg & DWC3_GFLADJ_REFCLK_FLADJ) !=
+ (dwc->fladj & DWC3_GFLADJ_REFCLK_FLADJ)) {
+ reg &= ~DWC3_GFLADJ_REFCLK_FLADJ;
+ reg |= (dwc->fladj & DWC3_GFLADJ_REFCLK_FLADJ);
+ }
+ }
+
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
if (dft != dwc->fladj) {
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
- dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
+
+ /* Update DWC3_GFLADJ if there is any change from initial value */
+ if (reg != gfladj)
+ dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
/**
@@ -371,7 +389,7 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
* dwc3_free_event_buffers - frees all allocated event buffers
* @dwc: Pointer to our controller context structure
*/
-static void dwc3_free_event_buffers(struct dwc3 *dwc)
+void dwc3_free_event_buffers(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
@@ -388,7 +406,7 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
* Returns 0 on success otherwise negative errno. In the error case, dwc
* may contain some buffers allocated but not all which were requested.
*/
-static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
+int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
struct dwc3_event_buffer *evt;
@@ -412,6 +430,9 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
evt = dwc->ev_buf;
evt->lpos = 0;
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
@@ -442,26 +463,46 @@ void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc)
{
+ u32 size;
+
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
if (!dwc->has_hibernation)
return 0;
if (!dwc->nr_scratch)
return 0;
- dwc->scratchbuf = kmalloc_array(dwc->nr_scratch,
- DWC3_SCRATCHBUF_SIZE, GFP_KERNEL);
+ /* Allocate only if scratchbuf is NULL */
+ if (dwc->scratchbuf)
+ return 0;
+
+ size = dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE;
+
+ dwc->scratchbuf = kzalloc(size, GFP_KERNEL);
+
if (!dwc->scratchbuf)
return -ENOMEM;
+ dwc->scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dwc->dev, dwc->scratch_addr)) {
+ dev_err(dwc->dev, "failed to map scratch buffer\n");
+ return -EFAULT;
+ }
+
return 0;
}
static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
{
- dma_addr_t scratch_addr;
u32 param;
int ret;
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
if (!dwc->has_hibernation)
return 0;
@@ -469,28 +510,17 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
return 0;
/* should never fall here */
- if (!WARN_ON(dwc->scratchbuf))
+ if (WARN_ON(!dwc->scratchbuf))
return 0;
- scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
- dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
- dev_err(dwc->sysdev, "failed to map scratch buffer\n");
- ret = -EFAULT;
- goto err0;
- }
-
- dwc->scratch_addr = scratch_addr;
-
- param = lower_32_bits(scratch_addr);
+ param = lower_32_bits(dwc->scratch_addr);
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param);
if (ret < 0)
goto err1;
- param = upper_32_bits(scratch_addr);
+ param = upper_32_bits(dwc->scratch_addr);
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param);
@@ -503,7 +533,6 @@ err1:
dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
-err0:
return ret;
}
@@ -516,7 +545,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
return;
/* should never fall here */
- if (!WARN_ON(dwc->scratchbuf))
+ if (WARN_ON(!dwc->scratchbuf))
return;
dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
@@ -546,6 +575,45 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
}
+static int dwc3_config_soc_bus(struct dwc3 *dwc)
+{
+ int ret;
+
+ /*
+ * Check if CCI is enabled for USB. Returns true
+ * if the node has property 'dma-coherent'. Otherwise
+ * returns false.
+ */
+ if (of_dma_is_coherent(dwc->dev->of_node)) {
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
+ reg |= DWC3_GSBUSCFG0_DATRDREQINFO |
+ DWC3_GSBUSCFG0_DESRDREQINFO |
+ DWC3_GSBUSCFG0_DATWRREQINFO |
+ DWC3_GSBUSCFG0_DESWRREQINFO;
+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg);
+ }
+
+ /*
+ * This routes the usb dma traffic to go through CCI path instead
+ * of reaching DDR directly. This traffic routing is needed to
+ * to make SMMU and CCI work with USB dma.
+ */
+ if (of_dma_is_coherent(dwc->dev->of_node) || dwc->dev->iommu_group) {
+ ret = dwc3_enable_hw_coherency(dwc->dev);
+ if (ret)
+ return ret;
+ }
+
+ /* Send struct dwc3 to dwc3-of-simple for configuring VBUS
+ * during suspend/resume
+ */
+ dwc3_set_simple_data(dwc);
+
+ return 0;
+}
+
static int dwc3_core_ulpi_init(struct dwc3 *dwc)
{
int intf;
@@ -692,17 +760,11 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
static void dwc3_core_exit(struct dwc3 *dwc)
{
- dwc3_event_buffers_cleanup(dwc);
-
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- phy_power_off(dwc->usb2_generic_phy);
- phy_power_off(dwc->usb3_generic_phy);
clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
reset_control_assert(dwc->reset);
}
@@ -760,8 +822,15 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
reg &= ~DWC3_GCTL_DSBLCLKGTNG;
break;
case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
- /* enable hibernation here */
- dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
+ if (!device_property_read_bool(dwc->dev,
+ "snps,enable-hibernation")) {
+ dev_dbg(dwc->dev, "Hibernation not enabled\n");
+ } else {
+ /* enable hibernation here */
+ dwc->nr_scratch =
+ DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
+ dwc->has_hibernation = 1;
+ }
/*
* REVISIT Enabling this bit so that host-mode hibernation
@@ -906,7 +975,7 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
*
* Returns 0 on success otherwise negative errno.
*/
-static int dwc3_core_init(struct dwc3 *dwc)
+int dwc3_core_init(struct dwc3 *dwc)
{
u32 reg;
int ret;
@@ -930,8 +999,13 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (!dwc->ulpi_ready) {
ret = dwc3_core_ulpi_init(dwc);
- if (ret)
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ dwc3_core_soft_reset(dwc);
+ ret = -EPROBE_DEFER;
+ }
goto err0;
+ }
dwc->ulpi_ready = true;
}
@@ -946,18 +1020,36 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err0a;
+ if (dwc->mask_phy_rst)
+ dwc3_mask_phy_reset(dwc->dev, TRUE);
+
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
+ if (dwc->scratchbuf == NULL) {
+ ret = dwc3_alloc_scratch_buffers(dwc);
+ if (ret) {
+ dev_err(dwc->dev,
+ "Not enough memory for scratch buffers\n");
+ goto err1;
+ }
+ }
+
ret = dwc3_setup_scratch_buffers(dwc);
- if (ret)
+ if (ret) {
+ dev_err(dwc->dev, "Failed to setup scratch buffers: %d\n", ret);
goto err1;
+ }
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
dwc3_set_incr_burst_type(dwc);
+ ret = dwc3_config_soc_bus(dwc);
+ if (ret)
+ goto err1;
+
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
ret = phy_power_on(dwc->usb2_generic_phy);
@@ -974,6 +1066,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
goto err4;
}
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ break;
+ case USB_DR_MODE_HOST:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ break;
+ case USB_DR_MODE_OTG:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ break;
+ default:
+ dev_warn(dwc->dev, "Unsupported mode %d\n", dwc->dr_mode);
+ break;
+ }
+
/*
* ENDXFER polling is available on version 3.10a and later of
* the DWC_usb3 controller. It is NOT available in the
@@ -985,6 +1092,32 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /* When configured in HOST mode, after issuing U3/L2 exit controller
+ * fails to send proper CRC checksum in CRC5 feild. Because of this
+ * behaviour Transaction Error is generated, resulting in reset and
+ * re-enumeration of usb device attached. Enabling bit 10 of GUCTL1
+ * will correct this problem
+ */
+ if (dwc->enable_guctl1_resume_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_RESUME_QUIRK;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
+ /* SNPS controller when configureed in HOST mode maintains Inter Packet
+ * Delay (IPD) of ~380ns which works with most of the super-speed hubs
+ * except VIA-LAB hubs. When IPD is ~380ns HOST controller fails to
+ * enumerate FS/LS devices when connected behind VIA-LAB hubs.
+ * Enabling bit 9 of GUCTL1 enables the workaround in HW to reduce the
+ * ULPI clock latency by 1 cycle, thus reducing the IPD (~360ns) and
+ * making controller enumerate FS/LS devices connected behind VIA-LAB.
+ */
+ if (dwc->enable_guctl1_ipd_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_IPD_QUIRK;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
if (dwc->revision >= DWC3_REVISION_250A) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
@@ -1004,22 +1137,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
- if (dwc->dr_mode == USB_DR_MODE_HOST ||
- dwc->dr_mode == USB_DR_MODE_OTG) {
- reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
-
- /*
- * Enable Auto retry Feature to make the controller operating in
- * Host mode on seeing transaction errors(CRC errors or internal
- * overrun scenerios) on IN transfers to reply to the device
- * with a non-terminating retry ACK (i.e, an ACK transcation
- * packet with Retry=1 & Nump != 0)
- */
- reg |= DWC3_GUCTL_HSTINAUTORETRY;
-
- dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
- }
-
/*
* Must config both number of packets and max burst settings to enable
* RX and/or TX threshold.
@@ -1193,6 +1310,11 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dev_err(dev, "failed to initialize dual-role\n");
return ret;
}
+
+#if IS_ENABLED(CONFIG_USB_DWC3_OTG)
+ dwc->current_dr_role = 0;
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+#endif
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -1326,8 +1448,16 @@ static void dwc3_get_properties(struct dwc3 *dwc)
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
&dwc->fladj);
+ dwc->refclk_fladj = device_property_read_bool(dev,
+ "snps,refclk_fladj");
+ dwc->enable_guctl1_resume_quirk = device_property_read_bool(dev,
+ "snps,enable_guctl1_resume_quirk");
+ dwc->enable_guctl1_ipd_quirk = device_property_read_bool(dev,
+ "snps,enable_guctl1_ipd_quirk");
dwc->dis_metastability_quirk = device_property_read_bool(dev,
"snps,dis_metastability_quirk");
+ dwc->mask_phy_rst = device_property_read_bool(dev,
+ "snps,mask_phy_reset");
dwc->dis_split_quirk = device_property_read_bool(dev,
"snps,dis-split-quirk");
@@ -1337,6 +1467,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->hird_threshold = hird_threshold;
+ /* Check if extra quirks to be added */
+ dwc3_simple_check_quirks(dwc);
+
dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
dwc->rx_max_burst_prd = rx_max_burst_prd;
@@ -1409,9 +1542,8 @@ static int dwc3_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res, dwc_res;
struct dwc3 *dwc;
-
int ret;
-
+ u32 mdwidth;
void __iomem *regs;
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
@@ -1490,13 +1622,16 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
+ /* Set dma coherent mask to DMA BUS data width */
+ mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
+ dev_dbg(dev, "Enabling %d-bit DMA addresses.\n", mdwidth);
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(mdwidth));
+
+ pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto err1;
pm_runtime_forbid(dev);
@@ -1511,10 +1646,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (ret)
goto err3;
- ret = dwc3_alloc_scratch_buffers(dwc);
- if (ret)
- goto err3;
-
ret = dwc3_core_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -1531,22 +1662,24 @@ static int dwc3_probe(struct platform_device *pdev)
pm_runtime_put(dev);
+ dma_set_max_seg_size(dev, UINT_MAX);
+
return 0;
err5:
dwc3_debugfs_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
-
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
dwc3_ulpi_exit(dwc);
err4:
@@ -1556,12 +1689,10 @@ err3:
dwc3_free_event_buffers(dwc);
err2:
- pm_runtime_allow(&pdev->dev);
-
-err1:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
+ pm_runtime_allow(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
disable_clks:
clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
@@ -1579,9 +1710,11 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_core_exit_mode(dwc);
dwc3_debugfs_exit(dwc);
+ dwc3_event_buffers_cleanup(dwc);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
+ pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
@@ -1673,6 +1806,18 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
break;
}
+ dwc3_event_buffers_cleanup(dwc);
+
+ /* Put the core into D3 state */
+ dwc3_set_usb_core_power(dwc, false);
+
+ /*
+ * To avoid reinit of phy during resume, prevent calling the
+ * dwc3_core_exit() when in D3 state
+ */
+ if (!dwc->is_d3)
+ dwc3_core_exit(dwc);
+
return 0;
}
@@ -1682,6 +1827,13 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
int ret;
u32 reg;
+ /* Bring core to D0 state */
+ dwc3_set_usb_core_power(dwc, true);
+
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ return ret;
+
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
ret = dwc3_core_init_for_resume(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index da296f888f45..b6dde5cb34d2 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -165,6 +165,9 @@
/* Bit fields */
+/* Global Status Register */
+#define DWC3_GSTS_CUR_MODE (1 << 0)
+
/* Global SoC Bus Configuration INCRx Register 0 */
#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */
#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */
@@ -197,6 +200,12 @@
#define DWC3_EVENTQ 7
#define DWC3_AUXEVENTQ 8
+/* Global SoC Bus Configuration Register */
+#define DWC3_GSBUSCFG0_DATRDREQINFO (0xf << 28)
+#define DWC3_GSBUSCFG0_DESRDREQINFO (0xf << 24)
+#define DWC3_GSBUSCFG0_DATWRREQINFO (0xf << 20)
+#define DWC3_GSBUSCFG0_DESWRREQINFO (0xf << 16)
+
/* Global RX Threshold Configuration Register */
#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
@@ -246,9 +255,6 @@
#define DWC3_GCTL_GBLHIBERNATIONEN BIT(1)
#define DWC3_GCTL_DSBLCLKGTNG BIT(0)
-/* Global User Control Register */
-#define DWC3_GUCTL_HSTINAUTORETRY BIT(14)
-
/* Global User Control 1 Register */
#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
@@ -373,6 +379,11 @@
/* Global Frame Length Adjustment Register */
#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
+#define DWC3_GFLADJ_REFCLK_FLADJ (0x3fff << 8)
+
+/* Global User Control Register 1 */
+#define DWC3_GUCTL1_RESUME_QUIRK (1 << 10)
+#define DWC3_GUCTL1_IPD_QUIRK (1 << 9)
/* Global User Control Register 2 */
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
@@ -456,6 +467,7 @@
/* Device Status Register */
#define DWC3_DSTS_DCNRD BIT(29)
+#define DWC3_DSTS_SRE BIT(28)
/* This applies for core versions 1.87a and earlier */
#define DWC3_DSTS_PWRUPREQ BIT(24)
@@ -615,6 +627,9 @@
#define DWC3_OSTS_VBUSVLD BIT(1)
#define DWC3_OSTS_CONIDSTS BIT(0)
+/* Stream timer timeout value in millisecs */
+#define STREAM_TIMEOUT_MS 50
+
/* Structures */
struct dwc3_trb;
@@ -876,6 +891,11 @@ struct dwc3_hwparams {
* @epnum: endpoint number to which this request refers
* @trb: pointer to struct dwc3_trb
* @trb_dma: DMA address of @trb
+ * @stream_timeout_timer: Some endpoints may go out of sync with host and
+ * enter into deadlock. For example, stream capable endpoints may enter
+ * into deadlock where the host waits on gadget to issue ERDY and gadget
+ * waits for host to issue prime transaction. To avoid such deadlock this
+ * timer is used.
* @num_trbs: number of TRBs used by this request
* @needs_extra_trb: true when request needs one extra TRB (either due to ZLP
* or unaligned OUT)
@@ -891,6 +911,7 @@ struct dwc3_request {
unsigned num_pending_sgs;
unsigned int num_queued_sgs;
+ u8 first_trb_index;
unsigned remaining;
unsigned int status;
@@ -903,6 +924,7 @@ struct dwc3_request {
u8 epnum;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
+ struct timer_list stream_timeout_timer;
unsigned num_trbs;
@@ -946,7 +968,9 @@ struct dwc3_scratchpad_array {
* @regs: base address for our registers
* @regs_size: address space size
* @fladj: frame length adjustment
+ * @refclk_fladj: boolean to update GFLADJ_REFCLK_FLADJ field also
* @irq_gadget: peripheral controller's IRQ number
+ * @otg: pointer to the dwc3_otg structure
* @otg_irq: IRQ number for OTG IRQs
* @current_otg_role: current role of operation while using the OTG block
* @desired_otg_role: desired role of operation while using the OTG block
@@ -994,6 +1018,7 @@ struct dwc3_scratchpad_array {
* @tx_max_burst_prd: max periodic ESS transmit burst size
* @hsphy_interface: "utmi" or "ulpi"
* @connected: true when we're connected to a host, false otherwise
+ * @softconnect: true when gadget connect is called, false when disconnect runs
* @delayed_status: true when gadget driver asks for delayed status
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
@@ -1013,6 +1038,7 @@ struct dwc3_scratchpad_array {
* not needed for DWC_usb31 version 1.70a-ea06 and below
* @usb3_lpm_capable: set if hadrware supports Link Power Management
* @usb2_lpm_disable: set to disable usb2 lpm
+ * @remote_wakeup: set if host supports Remote Wakeup from Peripheral
* @disable_scramble_quirk: set if we enable the disable scramble quirk
* @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
* @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
@@ -1033,11 +1059,16 @@ struct dwc3_scratchpad_array {
* provide a free-running PHY clock.
* @dis_del_phy_power_chg_quirk: set if we disable delay phy power
* change quirk.
+ * @enable_guctl1_resume_quirk: Set if we enable quirk for fixing improper crc
+ * generation after resume from suspend.
+ * @enable_guctl1_ipd_quirk: set if we enable quirk for reducing timing of inter
+ * packet delay(ipd).
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
* @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
* instances in park mode.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
+ * @is_hibernated: true when dwc3 is hibernated; abort processing events
* @tx_de_emphasis: Tx de-emphasis value
* 0 - -6dB de-emphasis
* 1 - -3.5dB de-emphasis
@@ -1047,6 +1078,12 @@ struct dwc3_scratchpad_array {
* @dis_split_quirk: set to disable split boundary.
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
+ * @is_d3: set if the controller is in d3 state
+ * @saved_regs: registers to be saved/restored during hibernation/wakeup events
+ * @irq_wakeup: wakeup IRQ number, triggered when host asks to wakeup from
+ * hibernation
+ * @force_hiber_wake: flag set when the gadget driver is forcefully triggering
+ a hibernation wakeup event
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1080,6 +1117,8 @@ struct dwc3 {
struct reset_control *reset;
+ struct dwc3_otg *otg;
+
struct usb_phy *usb2_phy;
struct usb_phy *usb3_phy;
@@ -1102,6 +1141,7 @@ struct dwc3 {
enum usb_phy_interface hsphy_mode;
u32 fladj;
+ bool refclk_fladj;
u32 irq_gadget;
u32 otg_irq;
u32 current_otg_role;
@@ -1196,6 +1236,7 @@ struct dwc3 {
const char *hsphy_interface;
unsigned connected:1;
+ unsigned softconnect:1;
unsigned delayed_status:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
@@ -1211,6 +1252,7 @@ struct dwc3 {
unsigned dis_start_transfer_quirk:1;
unsigned usb3_lpm_capable:1;
unsigned usb2_lpm_disable:1;
+ unsigned remote_wakeup:1;
unsigned disable_scramble_quirk:1;
unsigned u2exit_lfps_quirk:1;
@@ -1228,17 +1270,25 @@ struct dwc3 {
unsigned dis_rxdet_inp3_quirk:1;
unsigned dis_u2_freeclk_exists_quirk:1;
unsigned dis_del_phy_power_chg_quirk:1;
+ unsigned enable_guctl1_resume_quirk:1;
+ unsigned enable_guctl1_ipd_quirk:1;
unsigned dis_tx_ipgap_linecheck_quirk:1;
unsigned parkmode_disable_ss_quirk:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+ unsigned is_hibernated:1;
unsigned dis_metastability_quirk:1;
+ unsigned mask_phy_rst:1;
unsigned dis_split_quirk:1;
u16 imod_interval;
+ bool is_d3;
+ u32 *saved_regs;
+ u32 irq_wakeup;
+ bool force_hiber_wake;
};
#define INCRX_BURST_MODE 0
@@ -1415,12 +1465,35 @@ static inline bool dwc3_is_usb31(struct dwc3 *dwc)
return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
}
-bool dwc3_has_imod(struct dwc3 *dwc);
+#if IS_ENABLED(CONFIG_USB_DWC3_OF_SIMPLE)
+int dwc3_enable_hw_coherency(struct device *dev);
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup);
+void dwc3_set_simple_data(struct dwc3 *dwc);
+void dwc3_simple_check_quirks(struct dwc3 *dwc);
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on);
+void dwc3_mask_phy_reset(struct device *dev, bool mask);
+#else
+static inline int dwc3_enable_hw_coherency(struct device *dev)
+{ return 1; }
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup)
+{ ; }
+void dwc3_set_simple_data(struct dwc3 *dwc)
+{ ; }
+void dwc3_simple_check_quirks(struct dwc3 *dwc)
+{ ; }
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on)
+{ ; }
+void dwc3_mask_phy_reset(struct device *dev, bool mask)
+{ ; }
+#endif
+bool dwc3_has_imod(struct dwc3 *dwc);
int dwc3_event_buffers_setup(struct dwc3 *dwc);
void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
-#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+int dwc3_core_soft_reset(struct dwc3 *dwc);
+#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)\
+ || IS_ENABLED(CONFIG_USB_DWC3_OTG)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
#else
@@ -1430,7 +1503,8 @@ static inline void dwc3_host_exit(struct dwc3 *dwc)
{ }
#endif
-#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)\
+ || IS_ENABLED(CONFIG_USB_DWC3_OTG)
int dwc3_gadget_init(struct dwc3 *dwc);
void dwc3_gadget_exit(struct dwc3 *dwc);
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
@@ -1439,6 +1513,7 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
struct dwc3_gadget_ep_cmd_params *params);
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
+int dwc3_core_init(struct dwc3 *dwc);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
@@ -1460,11 +1535,19 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
{ return 0; }
#endif
+#if IS_ENABLED(CONFIG_USB_DWC3_OTG) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+void dwc3_otg_init(struct dwc3 *dwc);
+void dwc3_otg_exit(struct dwc3 *dwc);
+#else
+static inline void dwc3_otg_init(struct dwc3 *dwc)
+{ }
+static inline void dwc3_otg_exit(struct dwc3 *dwc)
+{ }
+#endif
+
#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_drd_init(struct dwc3 *dwc);
void dwc3_drd_exit(struct dwc3 *dwc);
-void dwc3_otg_init(struct dwc3 *dwc);
-void dwc3_otg_exit(struct dwc3 *dwc);
void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus);
void dwc3_otg_host_init(struct dwc3 *dwc);
#else
@@ -1472,10 +1555,6 @@ static inline int dwc3_drd_init(struct dwc3 *dwc)
{ return 0; }
static inline void dwc3_drd_exit(struct dwc3 *dwc)
{ }
-static inline void dwc3_otg_init(struct dwc3 *dwc)
-{ }
-static inline void dwc3_otg_exit(struct dwc3 *dwc)
-{ }
static inline void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus)
{ }
static inline void dwc3_otg_host_init(struct dwc3 *dwc)
@@ -1513,4 +1592,8 @@ static inline void dwc3_ulpi_exit(struct dwc3 *dwc)
{ }
#endif
+int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length);
+void dwc3_free_event_buffers(struct dwc3 *dwc);
+int dwc3_event_buffers_setup(struct dwc3 *dwc);
+
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9fb519b2efb4..fcb2e3d35e69 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -327,6 +327,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
unsigned int current_mode;
unsigned long flags;
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
@@ -345,6 +350,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
}
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -390,6 +397,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
@@ -409,6 +421,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
}
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -455,6 +469,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -485,6 +504,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
seq_printf(s, "UNKNOWN %d\n", reg);
}
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -501,6 +522,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
unsigned long flags;
u32 testmode = 0;
char buf[32];
+ int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -518,10 +540,16 @@ static ssize_t dwc3_testmode_write(struct file *file,
else
testmode = 0;
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_set_test_mode(dwc, testmode);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return count;
}
@@ -540,12 +568,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
enum dwc3_link_state state;
u32 reg;
u8 speed;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
seq_puts(s, "Not available\n");
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
return 0;
}
@@ -558,6 +592,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
dwc3_gadget_hs_link_string(state));
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -576,6 +612,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
char buf[32];
u32 reg;
u8 speed;
+ int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -595,10 +632,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
else
return -EINVAL;
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
return -EINVAL;
}
@@ -608,12 +650,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
if (speed < DWC3_DSTS_SUPERSPEED &&
state != DWC3_LINK_STATE_RECOV) {
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
return -EINVAL;
}
dwc3_gadget_set_link_state(dwc, state);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return count;
}
@@ -625,6 +670,53 @@ static const struct file_operations dwc3_link_state_fops = {
.release = single_release,
};
+static int dwc3_hiber_enable_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+
+ seq_printf(s, "%s\n", (dwc->has_hibernation ? "Enabled" : "Disabled"));
+
+ return 0;
+}
+
+static int dwc3_hiber_enable_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_hiber_enable_show, inode->i_private);
+}
+
+static ssize_t dwc3_hiber_enable_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ /* Enable hibernation feature */
+ if (!strncmp(buf, "Enable", 6)) {
+ dwc3_gadget_exit(dwc);
+ dwc->has_hibernation = 1;
+ dwc3_gadget_init(dwc);
+ } else if (!strncmp(buf, "Disable", 6)) {
+ dwc3_gadget_exit(dwc);
+ dwc->has_hibernation = 0;
+ dwc3_gadget_init(dwc);
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations dwc3_hiber_enable_fops = {
+ .open = dwc3_hiber_enable_open,
+ .write = dwc3_hiber_enable_write,
+ .read = seq_read,
+};
+
struct dwc3_ep_file_map {
const char name[25];
const struct file_operations *const fops;
@@ -636,6 +728,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
@@ -646,6 +743,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -655,6 +754,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
@@ -665,6 +769,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -674,12 +780,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -689,12 +802,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -704,12 +824,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -719,12 +846,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -734,12 +868,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -785,6 +926,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int i;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
if (dep->number <= 1) {
@@ -814,6 +960,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
out:
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -826,6 +974,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
u32 lower_32_bits;
u32 upper_32_bits;
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
@@ -838,6 +991,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
seq_printf(s, "0x%016llx\n", ep_info);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -899,6 +1054,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->regs = dwc3_regs;
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
+ dwc->regset->dev = dwc->dev;
root = debugfs_create_dir(dev_name(dwc->dev), NULL);
dwc->root = root;
@@ -919,6 +1075,10 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
&dwc3_testmode_fops);
debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, dwc,
&dwc3_link_state_fops);
+ debugfs_create_file("hiber_enable", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_hiber_enable_fops);
+
+ dwc3_debugfs_create_endpoint_dirs(dwc, root);
}
}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index c1e9ea621f41..c594e3ebdbfe 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -37,15 +37,6 @@ struct dwc3_exynos {
struct regulator *vdd10;
};
-static int dwc3_exynos_remove_child(struct device *dev, void *unused)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int dwc3_exynos_probe(struct platform_device *pdev)
{
struct dwc3_exynos *exynos;
@@ -142,7 +133,7 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
int i;
- device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
+ of_platform_depopulate(&pdev->dev);
for (i = exynos->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(exynos->clks[i]);
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index d055e00f8180..462edbb7069d 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -21,16 +21,260 @@
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/soc/xilinx/zynqmp/fw.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/slab.h>
+
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/of_address.h>
+
+#include "core.h"
+
+/* USB phy reset mask register */
+#define XLNX_USB_PHY_RST 0x001C
+#define XLNX_PHY_RST_MASK 0x1
+
+/* Xilinx USB 3.0 IP Register */
+#define XLNX_USB_COHERENCY 0x005C
+#define XLNX_USB_COHERENCY_ENABLE 0x1
+
+/* ULPI control registers */
+#define ULPI_OTG_CTRL_SET 0xB
+#define ULPI_OTG_CTRL_CLEAR 0XC
+#define OTG_CTRL_DRVVBUS_OFFSET 5
+
+#define XLNX_USB_CUR_PWR_STATE 0x0000
+#define XLNX_CUR_PWR_STATE_D0 0x00
+#define XLNX_CUR_PWR_STATE_D3 0x0F
+#define XLNX_CUR_PWR_STATE_BITMASK 0x0F
+
+#define XLNX_USB_PME_ENABLE 0x0034
+#define XLNX_PME_ENABLE_SIG_GEN 0x01
+
+#define XLNX_USB_REQ_PWR_STATE 0x003c
+#define XLNX_REQ_PWR_STATE_D0 0x00
+#define XLNX_REQ_PWR_STATE_D3 0x03
+
+/* Number of retries for USB operations */
+#define DWC3_PWR_STATE_RETRIES 1000
+#define DWC3_PWR_TIMEOUT 100
+
+/* Versal USB Node ID */
+#define VERSAL_USB_NODE_ID 0x18224018
+
+/* Versal USB Reset ID */
+#define VERSAL_USB_RESET_ID 0xC104036
+
+#define DWC3_OF_ADDRESS(ADDR) ((ADDR) - DWC3_GLOBALS_REGS_START)
+
+static const struct zynqmp_eemi_ops *eemi_ops;
struct dwc3_of_simple {
struct device *dev;
struct clk_bulk_data *clks;
int num_clocks;
+ void __iomem *regs;
+ struct dwc3 *dwc;
+ struct phy *phy;
+ bool wakeup_capable;
+ bool dis_u3_susphy_quirk;
+ bool enable_d3_suspend;
+ char soc_rev;
struct reset_control *resets;
bool pulse_resets;
bool need_reset;
};
+int dwc3_enable_hw_coherency(struct device *dev)
+{
+ struct device_node *node = of_get_parent(dev->of_node);
+
+ if (of_device_is_compatible(node, "xlnx,zynqmp-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ void __iomem *regs;
+ u32 reg;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+ regs = simple->regs;
+
+ reg = readl(regs + XLNX_USB_COHERENCY);
+ reg |= XLNX_USB_COHERENCY_ENABLE;
+ writel(reg, regs + XLNX_USB_COHERENCY);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dwc3_enable_hw_coherency);
+
+void dwc3_mask_phy_reset(struct device *dev, bool mask)
+{
+ struct device_node *node = of_get_parent(dev->of_node);
+
+ /* This is only valid for versal platforms */
+ if (of_device_is_compatible(node, "xlnx,versal-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ u32 reg;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ reg = readl(simple->regs + XLNX_USB_PHY_RST);
+
+ if (mask)
+ /*
+ * Mask the phy reset signal from comtroller
+ * reaching ULPI phy. This can be done by
+ * writing 0 into usb2_phy_reset register
+ */
+ reg &= ~XLNX_PHY_RST_MASK;
+ else
+ /*
+ * Allow phy reset signal from controller to
+ * reset ULPI phy. This can be done by writing
+ * 0x1 into usb2_phy_reset register
+ */
+ reg |= XLNX_PHY_RST_MASK;
+
+ writel(reg, simple->regs + XLNX_USB_PHY_RST);
+ }
+}
+EXPORT_SYMBOL(dwc3_mask_phy_reset);
+
+void dwc3_set_simple_data(struct dwc3 *dwc)
+{
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ if (node && (of_device_is_compatible(node, "xlnx,zynqmp-dwc3") ||
+ of_device_is_compatible(node, "xlnx,versal-dwc3"))) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Set (struct dwc3 *) to simple->dwc for future use */
+ simple->dwc = dwc;
+ }
+}
+EXPORT_SYMBOL(dwc3_set_simple_data);
+
+void dwc3_simple_check_quirks(struct dwc3 *dwc)
+{
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ if (node && of_device_is_compatible(node, "xlnx,zynqmp-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Add snps,dis_u3_susphy_quirk */
+ dwc->dis_u3_susphy_quirk = simple->dis_u3_susphy_quirk;
+ }
+}
+EXPORT_SYMBOL(dwc3_simple_check_quirks);
+
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup)
+{
+ struct device_node *node = of_node_get(dev->parent->of_node);
+
+ /* check for valid parent node */
+ while (node) {
+ if (!of_device_is_compatible(node, "xlnx,zynqmp-dwc3") ||
+ !of_device_is_compatible(node, "xlnx,versal-dwc3"))
+ node = of_get_next_parent(node);
+ else
+ break;
+ }
+
+ if (node) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Set wakeup capable as true or false */
+ simple->wakeup_capable = wakeup;
+
+ /* Allow D3 state if wakeup capable only */
+ simple->enable_d3_suspend = wakeup;
+ }
+}
+EXPORT_SYMBOL(dwc3_simple_wakeup_capable);
+
+static int dwc3_simple_set_phydata(struct dwc3_of_simple *simple)
+{
+ struct device *dev = simple->dev;
+ struct device_node *np = dev->of_node;
+ struct phy *phy;
+
+ np = of_get_next_child(np, NULL);
+
+ if (np) {
+ phy = of_phy_get(np, "usb3-phy");
+ if (IS_ERR(phy)) {
+ dev_err(dev, "%s: Can't find usb3-phy\n", __func__);
+ return PTR_ERR(phy);
+ }
+
+ /* Store phy for future usage */
+ simple->phy = phy;
+
+ /* assign USB vendor regs addr to phy platform_data */
+ phy->dev.platform_data = simple->regs;
+
+ phy_put(phy);
+ } else {
+ dev_err(dev, "%s: Can't find child node\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc3_dis_u3phy_suspend(struct platform_device *pdev,
+ struct dwc3_of_simple *simple)
+{
+ char *soc_rev;
+
+ /* The below is only valid for ZynqMP SOC */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,zynqmp-dwc3")) {
+ /* read Silicon version using nvmem driver */
+ soc_rev = zynqmp_nvmem_get_silicon_version(&pdev->dev,
+ "soc_revision");
+
+ if (PTR_ERR(soc_rev) == -EPROBE_DEFER)
+ /* Do a deferred probe */
+ return -EPROBE_DEFER;
+ else if (!IS_ERR(soc_rev) && *soc_rev < ZYNQMP_SILICON_V4)
+ /* Add snps,dis_u3_susphy_quirk
+ * for SOC revison less than v4
+ */
+ simple->dis_u3_susphy_quirk = true;
+
+ if (!IS_ERR(soc_rev)) {
+ /* Update soc_rev to simple for future use */
+ simple->soc_rev = *soc_rev;
+
+ /* Clean soc_rev if got a valid pointer from nvmem
+ * driver else we may end up in kernel panic
+ */
+ kfree(soc_rev);
+ } else {
+ /* Return error */
+ return PTR_ERR(soc_rev);
+ }
+ }
+
+ return 0;
+}
+
static int dwc3_of_simple_probe(struct platform_device *pdev)
{
struct dwc3_of_simple *simple;
@@ -44,9 +288,43 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (!simple)
return -ENOMEM;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops)) {
+ dev_err(dev, "Failed to get eemi_ops\n");
+ return PTR_ERR(eemi_ops);
+ }
+
platform_set_drvdata(pdev, simple);
simple->dev = dev;
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynqmp-dwc3") ||
+ of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-dwc3")) {
+
+ struct resource *res;
+ void __iomem *regs;
+
+ res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 0);
+
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /* Store the usb control regs into simple for further usage */
+ simple->regs = regs;
+
+ /*
+ * ZynqMP silicon revision lesser than 4.0 needs to disable
+ * suspend of usb 3.0 phy.
+ */
+ ret = dwc3_dis_u3phy_suspend(pdev, simple);
+ if (ret)
+ return ret;
+ }
+
+ /* Set phy data for future use */
+ dwc3_simple_set_phydata(simple);
+
/*
* Some controllers need to toggle the usb3-otg reset before trying to
* initialize the PHY, otherwise the PHY times out.
@@ -132,6 +410,218 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static void dwc3_simple_vbus(struct dwc3 *dwc, bool vbus_off)
+{
+ u32 reg, addr;
+ u8 val;
+
+ if (vbus_off)
+ addr = ULPI_OTG_CTRL_CLEAR;
+ else
+ addr = ULPI_OTG_CTRL_SET;
+
+ val = (1 << OTG_CTRL_DRVVBUS_OFFSET);
+
+ reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_GUSB2PHYACC_ADDR(addr);
+ reg |= DWC3_GUSB2PHYACC_WRITE | val;
+
+ addr = DWC3_OF_ADDRESS(DWC3_GUSB2PHYACC(0));
+ writel(reg, dwc->regs + addr);
+}
+
+static void dwc3_usb2phycfg(struct dwc3 *dwc, bool suspend)
+{
+ u32 addr, reg;
+
+ addr = DWC3_OF_ADDRESS(DWC3_GUSB2PHYCFG(0));
+
+ if (suspend) {
+ reg = readl(dwc->regs + addr);
+ if (!(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ writel(reg, (dwc->regs + addr));
+ }
+ } else {
+ reg = readl(dwc->regs + addr);
+ if ((reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ writel(reg, (dwc->regs + addr));
+ }
+ }
+}
+
+static int dwc3_zynqmp_power_req(struct dwc3 *dwc, bool on)
+{
+ u32 reg, retries;
+ void __iomem *reg_base;
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+ reg_base = simple->regs;
+
+ /* Check if entering into D3 state is allowed during suspend */
+ if ((simple->soc_rev < ZYNQMP_SILICON_V4) || !simple->enable_d3_suspend)
+ return 0;
+
+ if (!simple->phy)
+ return 0;
+
+ if (on) {
+ dev_dbg(dwc->dev, "trying to set power state to D0....\n");
+
+ /* Release USB core reset , which was assert during D3 entry */
+ xpsgtr_usb_crst_release(simple->phy);
+
+ /* change power state to D0 */
+ writel(XLNX_REQ_PWR_STATE_D0,
+ reg_base + XLNX_USB_REQ_PWR_STATE);
+
+ /* wait till current state is changed to D0 */
+ retries = DWC3_PWR_STATE_RETRIES;
+ do {
+ reg = readl(reg_base + XLNX_USB_CUR_PWR_STATE);
+ if ((reg & XLNX_CUR_PWR_STATE_BITMASK) ==
+ XLNX_CUR_PWR_STATE_D0)
+ break;
+
+ udelay(DWC3_PWR_TIMEOUT);
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(dwc->dev, "Failed to set power state to D0\n");
+ return -EIO;
+ }
+
+ dwc->is_d3 = false;
+
+ /* Clear Suspend PHY bit if dis_u2_susphy_quirk is set */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, false);
+ } else {
+ dev_dbg(dwc->dev, "Trying to set power state to D3...\n");
+
+ /*
+ * Set Suspend PHY bit before entering D3 if
+ * dis_u2_susphy_quirk is set
+ */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, true);
+
+ /* enable PME to wakeup from hibernation */
+ writel(XLNX_PME_ENABLE_SIG_GEN, reg_base + XLNX_USB_PME_ENABLE);
+
+ /* change power state to D3 */
+ writel(XLNX_REQ_PWR_STATE_D3,
+ reg_base + XLNX_USB_REQ_PWR_STATE);
+
+ /* wait till current state is changed to D3 */
+ retries = DWC3_PWR_STATE_RETRIES;
+ do {
+ reg = readl(reg_base + XLNX_USB_CUR_PWR_STATE);
+ if ((reg & XLNX_CUR_PWR_STATE_BITMASK) ==
+ XLNX_CUR_PWR_STATE_D3)
+ break;
+
+ udelay(DWC3_PWR_TIMEOUT);
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(dwc->dev, "Failed to set power state to D3\n");
+ return -EIO;
+ }
+
+ /* Assert USB core reset after entering D3 state */
+ xpsgtr_usb_crst_assert(simple->phy);
+
+ dwc->is_d3 = true;
+ }
+
+ return 0;
+}
+
+static int dwc3_versal_power_req(struct dwc3 *dwc, bool on)
+{
+ int ret;
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ if (!eemi_ops->ioctl || !eemi_ops->reset_assert)
+ return -ENOMEM;
+
+ if (on) {
+ dev_dbg(dwc->dev, "Trying to set power state to D0....\n");
+ ret = eemi_ops->reset_assert(VERSAL_USB_RESET_ID,
+ PM_RESET_ACTION_RELEASE);
+ if (ret < 0)
+ dev_err(simple->dev, "failed to De-assert Reset\n");
+
+ ret = eemi_ops->ioctl(VERSAL_USB_NODE_ID, IOCTL_USB_SET_STATE,
+ XLNX_REQ_PWR_STATE_D0,
+ DWC3_PWR_STATE_RETRIES * DWC3_PWR_TIMEOUT,
+ NULL);
+ if (ret < 0)
+ dev_err(simple->dev, "failed to enter D0 state\n");
+
+ dwc->is_d3 = false;
+
+ /* Clear Suspend PHY bit if dis_u2_susphy_quirk is set */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, false);
+ } else {
+ dev_dbg(dwc->dev, "Trying to set power state to D3...\n");
+
+ /*
+ * Set Suspend PHY bit before entering D3 if
+ * dis_u2_susphy_quirk is set
+ */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, true);
+
+ ret = eemi_ops->ioctl(VERSAL_USB_NODE_ID, IOCTL_USB_SET_STATE,
+ XLNX_REQ_PWR_STATE_D3,
+ DWC3_PWR_STATE_RETRIES * DWC3_PWR_TIMEOUT,
+ NULL);
+ if (ret < 0)
+ dev_err(simple->dev, "failed to enter D3 state\n");
+
+ ret = eemi_ops->reset_assert(VERSAL_USB_RESET_ID,
+ PM_RESET_ACTION_ASSERT);
+ if (ret < 0)
+ dev_err(simple->dev, "failed to assert Reset\n");
+
+ dwc->is_d3 = true;
+ }
+
+ return ret;
+}
+
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on)
+{
+ int ret;
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ if (of_device_is_compatible(node, "xlnx,zynqmp-dwc3"))
+ /* Set D3/D0 state for ZynqMP */
+ ret = dwc3_zynqmp_power_req(dwc, on);
+ else if (of_device_is_compatible(node, "xlnx,versal-dwc3"))
+ /* Set D3/D0 state for Versal */
+ ret = dwc3_versal_power_req(dwc, on);
+ else
+ /* This is only for Xilinx devices */
+ return 0;
+
+ return ret;
+}
+EXPORT_SYMBOL(dwc3_set_usb_core_power);
+#endif
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
@@ -153,6 +643,13 @@ static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+ if (!simple->wakeup_capable && !simple->dwc->is_d3) {
+ /* Ask ULPI to turn OFF Vbus */
+ dwc3_simple_vbus(simple->dwc, true);
+
+ clk_bulk_disable(simple->num_clocks, simple->clks);
+ }
+
if (simple->need_reset)
reset_control_assert(simple->resets);
@@ -162,11 +659,18 @@ static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+ int ret;
+
+ if (simple->wakeup_capable || simple->dwc->is_d3)
+ return 0;
+
+ ret = clk_bulk_enable(simple->num_clocks, simple->clks);
+ dwc3_simple_vbus(simple->dwc, false);
if (simple->need_reset)
reset_control_deassert(simple->resets);
- return 0;
+ return ret;
}
static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
@@ -178,6 +682,7 @@ static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "rockchip,rk3399-dwc3" },
{ .compatible = "xlnx,zynqmp-dwc3" },
+ { .compatible = "xlnx,versal-dwc3" },
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "amlogic,meson-axg-dwc3" },
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 955bf820f410..8d4f1b13f415 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -171,10 +171,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
/*
* A lot of BYT devices lack ACPI resource entries for
- * the GPIOs, add a fallback mapping to the reference
+ * the GPIOs. If the ACPI entry for the GPIO controller
+ * is present add a fallback mapping to the reference
* design GPIOs which all boards seem to use.
*/
- gpiod_add_lookup_table(&platform_bytcr_gpios);
+ if (acpi_dev_present("INT33FC", NULL, -1))
+ gpiod_add_lookup_table(&platform_bytcr_gpios);
/*
* These GPIOs will turn on the USB2 PHY. Note that we have to
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 7874b97e3322..742be1e07a01 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -102,7 +102,7 @@ static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val)
readl(base + offset);
}
-static void dwc3_qcom_vbus_overrride_enable(struct dwc3_qcom *qcom, bool enable)
+static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable)
{
if (enable) {
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
@@ -123,7 +123,7 @@ static int dwc3_qcom_vbus_notifier(struct notifier_block *nb,
struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb);
/* enable vbus override for device mode */
- dwc3_qcom_vbus_overrride_enable(qcom, event);
+ dwc3_qcom_vbus_override_enable(qcom, event);
qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST;
return NOTIFY_DONE;
@@ -135,7 +135,7 @@ static int dwc3_qcom_host_notifier(struct notifier_block *nb,
struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb);
/* disable vbus override in host mode */
- dwc3_qcom_vbus_overrride_enable(qcom, !event);
+ dwc3_qcom_vbus_override_enable(qcom, !event);
qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL;
return NOTIFY_DONE;
@@ -190,50 +190,61 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
return 0;
}
+/* Only usable in contexts where the role can not change. */
+static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ dwc = platform_get_drvdata(qcom->dwc3);
+
+ /* Core driver may not have probed yet. */
+ if (!dwc)
+ return false;
+
+ return dwc->xhci;
+}
+
+static void dwc3_qcom_enable_wakeup_irq(int irq)
+{
+ if (!irq)
+ return;
+
+ enable_irq(irq);
+ enable_irq_wake(irq);
+}
+
+static void dwc3_qcom_disable_wakeup_irq(int irq)
+{
+ if (!irq)
+ return;
+
+ disable_irq_wake(irq);
+ disable_irq_nosync(irq);
+}
+
static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
{
- if (qcom->hs_phy_irq) {
- disable_irq_wake(qcom->hs_phy_irq);
- disable_irq_nosync(qcom->hs_phy_irq);
- }
+ dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq);
- if (qcom->dp_hs_phy_irq) {
- disable_irq_wake(qcom->dp_hs_phy_irq);
- disable_irq_nosync(qcom->dp_hs_phy_irq);
- }
+ dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
- if (qcom->dm_hs_phy_irq) {
- disable_irq_wake(qcom->dm_hs_phy_irq);
- disable_irq_nosync(qcom->dm_hs_phy_irq);
- }
+ dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
- if (qcom->ss_phy_irq) {
- disable_irq_wake(qcom->ss_phy_irq);
- disable_irq_nosync(qcom->ss_phy_irq);
- }
+ dwc3_qcom_disable_wakeup_irq(qcom->ss_phy_irq);
}
static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
{
- if (qcom->hs_phy_irq) {
- enable_irq(qcom->hs_phy_irq);
- enable_irq_wake(qcom->hs_phy_irq);
- }
+ dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq);
- if (qcom->dp_hs_phy_irq) {
- enable_irq(qcom->dp_hs_phy_irq);
- enable_irq_wake(qcom->dp_hs_phy_irq);
- }
+ dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq);
- if (qcom->dm_hs_phy_irq) {
- enable_irq(qcom->dm_hs_phy_irq);
- enable_irq_wake(qcom->dm_hs_phy_irq);
- }
+ dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq);
- if (qcom->ss_phy_irq) {
- enable_irq(qcom->ss_phy_irq);
- enable_irq_wake(qcom->ss_phy_irq);
- }
+ dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq);
}
static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
@@ -297,7 +308,11 @@ static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
if (qcom->pm_suspended)
return IRQ_HANDLED;
- if (dwc->xhci)
+ /*
+ * This is safe as role switching is done from a freezable workqueue
+ * and the wakeup interrupts are disabled as part of resume.
+ */
+ if (dwc3_qcom_is_host(qcom))
pm_runtime_resume(&dwc->xhci->dev);
return IRQ_HANDLED;
@@ -346,7 +361,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT,
"qcom_dwc3 HS", qcom);
if (ret) {
dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
@@ -361,7 +376,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT,
"qcom_dwc3 DP_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
@@ -376,7 +391,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT,
"qcom_dwc3 DM_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
@@ -391,7 +406,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT,
"qcom_dwc3 SS", qcom);
if (ret) {
dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
@@ -538,6 +553,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
if (!qcom->dwc3) {
ret = -ENODEV;
dev_err(dev, "failed to get dwc3 platform device\n");
+ of_platform_depopulate(dev);
}
node_put:
@@ -562,6 +578,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
struct resource *res, *parent_res = NULL;
+ struct resource local_res;
int ret, i;
bool ignore_pipe_clk;
@@ -612,9 +629,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (np) {
parent_res = res;
} else {
- parent_res = kmemdup(res, sizeof(struct resource), GFP_KERNEL);
- if (!parent_res)
- return -ENOMEM;
+ memcpy(&local_res, res, sizeof(struct resource));
+ parent_res = &local_res;
parent_res->start = res->start +
qcom->acpi_pdata->qscratch_base_offset;
@@ -651,14 +667,14 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
- goto depopulate;
+ goto clk_disable;
}
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
/* enable vbus override for device mode */
- if (qcom->mode == USB_DR_MODE_PERIPHERAL)
- dwc3_qcom_vbus_overrride_enable(qcom, true);
+ if (qcom->mode != USB_DR_MODE_HOST)
+ dwc3_qcom_vbus_override_enable(qcom, true);
/* register extcon to override sw_vbus on Vbus change later */
ret = dwc3_qcom_register_extcon(qcom);
@@ -677,7 +693,8 @@ depopulate:
if (np)
of_platform_depopulate(&pdev->dev);
else
- platform_device_put(pdev);
+ platform_device_del(qcom->dwc3);
+ platform_device_put(qcom->dwc3);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
@@ -692,10 +709,15 @@ reset_assert:
static int dwc3_qcom_remove(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int i;
- of_platform_depopulate(dev);
+ if (np)
+ of_platform_depopulate(&pdev->dev);
+ else
+ platform_device_del(qcom->dwc3);
+ platform_device_put(qcom->dwc3);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 4f28122f1bb8..c47e53ccd7b2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -236,7 +236,10 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
struct dwc3_request *req;
req = next_request(&dep->pending_list);
- dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ if (!dwc->connected)
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ else
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
}
dwc->ep0state = EP0_SETUP_PHASE;
@@ -339,6 +342,11 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
}
+ /* Sends the status indicating if the remote wakeup is
+ * supported by device.
+ */
+ usb_status |= dwc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+
break;
case USB_RECIP_INTERFACE:
@@ -457,7 +465,12 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
+ if (set)
+ dwc->remote_wakeup = 1;
+ else
+ dwc->remote_wakeup = 0;
break;
+
/*
* 9.4.1 says only only for SS, in AddressState only for
* default control pipe
@@ -474,6 +487,34 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
case USB_DEVICE_TEST_MODE:
ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
break;
+ case USB_DEVICE_B_HNP_ENABLE:
+ if (set) {
+ if (dwc->gadget.host_request_flag) {
+ struct usb_phy *phy =
+ usb_get_phy(USB_PHY_TYPE_USB3);
+
+ dwc->gadget.b_hnp_enable = 0;
+ dwc->gadget.host_request_flag = 0;
+ otg_start_hnp(phy->otg);
+ usb_put_phy(phy);
+ } else {
+ dwc->gadget.b_hnp_enable = 1;
+ }
+ } else
+ return -EINVAL;
+ break;
+
+ case USB_DEVICE_A_HNP_SUPPORT:
+ /* RH port supports HNP */
+ dev_dbg(dwc->dev,
+ "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
+ break;
+
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ /* other RH port does */
+ dev_dbg(dwc->dev,
+ "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
+ break;
default:
ret = -EINVAL;
}
@@ -760,7 +801,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
- ret = dwc3_ep0_handle_status(dwc, ctrl);
+ if (le16_to_cpu(ctrl->wIndex) == OTG_STS_SELECTOR)
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ else
+ ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 80fee7ea83ca..7f0236fb7196 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -177,6 +177,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
list_del(&req->list);
req->remaining = 0;
req->needs_extra_trb = false;
+ req->num_trbs = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
@@ -207,6 +208,9 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
{
struct dwc3 *dwc = dep->dwc;
+ if (dep->stream_capable && timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+
dwc3_gadget_del_and_unmap_request(dep, req, status);
req->status = DWC3_REQUEST_STATUS_COMPLETED;
@@ -420,8 +424,7 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
}
-static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
- struct dwc3_trb *trb)
+dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, struct dwc3_trb *trb)
{
u32 offset = (char *) trb - (char *) dep->trb_pool;
@@ -536,6 +539,19 @@ static int dwc3_gadget_start_config(struct dwc3_ep *dep)
return 0;
}
+static void stream_timeout_function(struct timer_list *arg)
+{
+ struct dwc3_request *req = from_timer(req, arg, stream_timeout_timer);
+ struct dwc3_ep *dep = req->dep;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_stop_active_transfer(dep, true, false);
+ __dwc3_gadget_kick_transfer(dep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
{
const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -569,7 +585,8 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
- | DWC3_DEPCFG_STREAM_EVENT_EN;
+ | DWC3_DEPCFG_STREAM_EVENT_EN
+ | DWC3_DEPCFG_XFER_COMPLETE_EN;
dep->stream_capable = true;
}
@@ -622,7 +639,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
* Caller should take care of locking. Execute all necessary commands to
* initialize a HW endpoint so it can be used by a gadget driver.
*/
-static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
+int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
@@ -630,7 +647,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
u32 reg;
int ret;
- if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_ENABLED) || dwc->is_hibernated) {
ret = dwc3_gadget_start_config(dep);
if (ret)
return ret;
@@ -640,7 +657,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
if (ret)
return ret;
- if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_ENABLED) || dwc->is_hibernated) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
@@ -654,11 +671,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
if (usb_endpoint_xfer_control(desc))
goto out;
- /* Initialize the TRB ring */
- dep->trb_dequeue = 0;
- dep->trb_enqueue = 0;
- memset(dep->trb_pool, 0,
- sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
+ if (!dwc->is_hibernated) {
+ /* Initialize the TRB ring */
+ dep->trb_dequeue = 0;
+ dep->trb_enqueue = 0;
+ memset(dep->trb_pool, 0,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
+ }
/* Link TRB. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
@@ -674,8 +693,8 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
* Issue StartTransfer here with no-op TRB so we can always rely on No
* Response Update Transfer command.
*/
- if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
- usb_endpoint_xfer_int(desc)) {
+ if (((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
+ usb_endpoint_xfer_int(desc)) && !dwc->is_hibernated) {
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
@@ -701,8 +720,6 @@ out:
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
- bool interrupt);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
@@ -739,7 +756,7 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
*
* Caller should take care of locking.
*/
-static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -981,8 +998,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
}
- /* always enable Interrupt on Missed ISOC */
- trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+ if (!no_interrupt && !chain)
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
break;
case USB_ENDPOINT_XFER_BULK:
@@ -1016,6 +1033,16 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ /*
+ * To start transfer on another stream number endpoint need to relase
+ * previously acquired transfer resource for doing that there is two
+ * ways 1. end transfer 2. set lst bit of control trb
+ *
+ * by using lst bit in ctrl trb we will be able to save the time of
+ * ending transfer hence improved performance
+ */
+ else if (dep->stream_capable)
+ trb->ctrl |= DWC3_TRB_CTRL_LST;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
@@ -1306,7 +1333,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
-static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
+int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
@@ -1336,8 +1363,12 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
if (dep->stream_capable)
cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
+ if (dep->stream_capable)
+ cmd = cmd | DWC3_DEPCMD_PARAM(req->request.stream_id);
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
+
} else {
cmd = DWC3_DEPCMD_UPDATETRANSFER |
DWC3_DEPCMD_PARAM(dep->resource_index);
@@ -1362,6 +1393,13 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
return ret;
}
+ if (starting && dep->stream_capable) {
+ req->stream_timeout_timer.expires = jiffies +
+ msecs_to_jiffies(STREAM_TIMEOUT_MS);
+ mod_timer(&req->stream_timeout_timer,
+ req->stream_timeout_timer.expires);
+ }
+
return 0;
}
@@ -1515,11 +1553,13 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
ret = __dwc3_gadget_kick_transfer(dep);
if (ret != -EAGAIN)
break;
+ dep->flags &= ~DWC3_EP_PENDING_REQUEST;
}
return ret;
}
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc);
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
@@ -1544,6 +1584,10 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
req->request.actual = 0;
req->request.status = -EINPROGRESS;
+ if (dep->stream_capable)
+ timer_setup(&req->stream_timeout_timer,
+ stream_timeout_function, 0);
+
trace_dwc3_ep_queue(req);
list_add_tail(&req->list, &dep->pending_list);
@@ -1560,6 +1604,13 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return 0;
}
+ /* If core is hibernated, need to wakeup (remote wakeup) */
+ if (dwc->is_hibernated) {
+ dwc->force_hiber_wake = true;
+ gadget_wakeup_interrupt(dwc);
+ dwc->force_hiber_wake = false;
+ }
+
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
@@ -1570,13 +1621,22 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
if (!(dep->flags & DWC3_EP_PENDING_REQUEST) &&
- !(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ !(dep->flags & DWC3_EP_TRANSFER_STARTED))
return 0;
- if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
- return __dwc3_gadget_start_isoc(dep);
+ if (dep->flags & DWC3_EP_PENDING_REQUEST) {
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
+ /*
+ * If there are not entries in request list
+ * then PENDING flag would be set, so that END
+ * TRANSFER is issued when an entry is added
+ * into request list.
+ */
+ dwc3_stop_active_transfer(dep, true, true);
+ dep->flags = DWC3_EP_ENABLED;
}
+ /* Rest is taken care by DWC3_DEPEVT_XFERNOTREADY */
+ return 0;
}
}
@@ -1659,6 +1719,9 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
spin_lock_irqsave(&dwc->lock, flags);
+ if (dep->stream_capable && timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+
list_for_each_entry(r, &dep->pending_list, list) {
if (r == req)
break;
@@ -1959,7 +2022,7 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc)
}
}
-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
+int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
u32 timeout = 500;
@@ -2008,14 +2071,42 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
static int __dwc3_gadget_start(struct dwc3 *dwc);
+static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->connected = false;
+
+ /*
+ * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 4.1.8 Table 4-7, it states that for a device-initiated
+ * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
+ * command for any active transfers" before clearing the RunStop
+ * bit.
+ */
+ dwc3_stop_active_transfers(dwc);
+ __dwc3_gadget_stop(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /*
+ * Note: if the GEVNTCOUNT indicates events in the event buffer, the
+ * driver needs to acknowledge them before the controller can halt.
+ * Simply let the interrupt handler acknowledges and handle the
+ * remaining event generated by the controller while polling for
+ * DSTS.DEVCTLHLT.
+ */
+ return dwc3_gadget_run_stop(dwc, false, false);
+}
+
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
struct dwc3 *dwc = gadget_to_dwc(g);
- unsigned long flags;
int ret;
is_on = !!is_on;
+ dwc->softconnect = is_on;
/*
* Per databook, when we want to stop the gadget, if a control transfer
* is still in process, complete it and get the core into setup phase.
@@ -2048,59 +2139,38 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = pm_runtime_get_sync(dwc->dev);
if (!ret || ret < 0) {
pm_runtime_put(dwc->dev);
- return 0;
+ if (ret < 0)
+ pm_runtime_set_suspended(dwc->dev);
+ return ret;
}
- /*
- * Synchronize and disable any further event handling while controller
- * is being enabled/disabled.
- */
- disable_irq(dwc->irq_gadget);
-
- spin_lock_irqsave(&dwc->lock, flags);
+ if (dwc->pullups_connected == is_on) {
+ pm_runtime_put(dwc->dev);
+ return 0;
+ }
if (!is_on) {
- u32 count;
-
- dwc->connected = false;
+ ret = dwc3_gadget_soft_disconnect(dwc);
+ } else {
/*
- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
- * Section 4.1.8 Table 4-7, it states that for a device-initiated
- * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
- * command for any active transfers" before clearing the RunStop
- * bit.
+ * In the Synopsys DWC_usb31 1.90a programming guide section
+ * 4.1.9, it specifies that for a reconnect after a
+ * device-initiated disconnect requires a core soft reset
+ * (DCTL.CSftRst) before enabling the run/stop bit.
*/
- dwc3_stop_active_transfers(dwc);
- __dwc3_gadget_stop(dwc);
+ dwc3_core_soft_reset(dwc);
- /*
- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
- * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
- * "software needs to acknowledge the events that are generated
- * (by writing to GEVNTCOUNTn) while it is waiting for this bit
- * to be set to '1'."
- */
- count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
- count &= DWC3_GEVNTCOUNT_MASK;
- if (count > 0) {
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
- dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
- dwc->ev_buf->length;
- }
- } else {
+ dwc3_event_buffers_setup(dwc);
__dwc3_gadget_start(dwc);
+ ret = dwc3_gadget_run_stop(dwc, true, false);
}
- ret = dwc3_gadget_run_stop(dwc, is_on, false);
- spin_unlock_irqrestore(&dwc->lock, flags);
- enable_irq(dwc->irq_gadget);
-
pm_runtime_put(dwc->dev);
return ret;
}
-static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+void dwc3_gadget_enable_irq(struct dwc3 *dwc)
{
u32 reg;
@@ -2114,6 +2184,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
+ /* Enable hibernation IRQ */
+ if (dwc->has_hibernation)
+ reg |= DWC3_DEVTEN_HIBERNATIONREQEVTEN;
+
if (dwc->revision < DWC3_REVISION_250A)
reg |= DWC3_DEVTEN_ULSTCNGEN;
@@ -2124,7 +2198,7 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
-static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+void dwc3_gadget_disable_irq(struct dwc3 *dwc)
{
/* mask all interrupts */
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
@@ -2208,6 +2282,16 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_setup_nump(dwc);
+ /* For OTG mode, check if the core is currently in Host mode.
+ * This is not an error condition as there are times when the core is
+ * working as host and kernel is told to initiate bind operation with
+ * gadget class driver module.
+ * The below remaining operations are handled in OTG driver whenever
+ * required.
+ */
+ if (dwc3_readl(dwc->regs, DWC3_GSTS) & DWC3_GSTS_CUR_MODE)
+ return 0;
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2242,6 +2326,7 @@ err0:
return ret;
}
+static irqreturn_t wakeup_interrupt(int irq, void *_dwc);
static int dwc3_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
@@ -2259,6 +2344,18 @@ static int dwc3_gadget_start(struct usb_gadget *g,
goto err0;
}
+ /* look for wakeup interrupt if hibernation is supported */
+ if (dwc->has_hibernation) {
+ irq = dwc->irq_wakeup;
+ ret = devm_request_irq(dwc->dev, irq, wakeup_interrupt,
+ IRQF_SHARED, "usb-wakeup", dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request wakeup irq #%d --> %d\n",
+ irq, ret);
+ goto err0;
+ }
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->gadget_driver) {
dev_err(dwc->dev, "%s is already bound to %s\n",
@@ -2275,7 +2372,10 @@ static int dwc3_gadget_start(struct usb_gadget *g,
err1:
spin_unlock_irqrestore(&dwc->lock, flags);
- free_irq(irq, dwc);
+ if (dwc->irq_gadget)
+ free_irq(dwc->irq_gadget, dwc->ev_buf);
+ if (dwc->irq_wakeup)
+ free_irq(dwc->irq_wakeup, dwc);
err0:
return ret;
@@ -2298,6 +2398,7 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
spin_unlock_irqrestore(&dwc->lock, flags);
free_irq(dwc->irq_gadget, dwc->ev_buf);
+ free_irq(dwc->irq_wakeup, dwc);
return 0;
}
@@ -2676,7 +2777,11 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
- if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
+ if ((event->status & DEPEVT_STATUS_IOC) &&
+ (trb->ctrl & DWC3_TRB_CTRL_IOC))
+ return 1;
+
+ if ((event->status & DEPEVT_STATUS_LST) &&
(trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;
@@ -2758,35 +2863,18 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->needs_extra_trb = false;
}
- /*
- * The event status only reflects the status of the TRB with IOC set.
- * For the requests that don't set interrupt on completion, the driver
- * needs to check and return the status of the completed TRBs associated
- * with the request. Use the status of the last TRB of the request.
- */
- if (req->request.no_interrupt) {
- struct dwc3_trb *trb;
+ req->request.actual = req->request.length - req->remaining;
- trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
- switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
- case DWC3_TRBSTS_MISSED_ISOC:
- /* Isoc endpoint only */
- request_status = -EXDEV;
- break;
- case DWC3_TRB_STS_XFER_IN_PROG:
- /* Applicable when End Transfer with ForceRM=0 */
- case DWC3_TRBSTS_SETUP_PENDING:
- /* Control endpoint only */
- case DWC3_TRBSTS_OK:
- default:
- request_status = 0;
- break;
+ if ((!dwc3_gadget_ep_request_completed(req) &&
+ req->num_pending_sgs) || req->num_pending_sgs) {
+ if (!(event->status &
+ (DEPEVT_STATUS_SHORT | DEPEVT_STATUS_LST))) {
+ __dwc3_gadget_kick_transfer(dep);
+ goto out;
}
- } else {
- request_status = status;
}
- dwc3_gadget_giveback(dep, req, request_status);
+ dwc3_gadget_giveback(dep, req, status);
out:
return ret;
@@ -2844,10 +2932,26 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_BUSERR)
status = -ECONNRESET;
- if (event->status & DEPEVT_STATUS_MISSED_ISOC) {
+ if ((event->status & DEPEVT_STATUS_MISSED_ISOC) &&
+ usb_endpoint_xfer_isoc(dep->endpoint.desc))
status = -EXDEV;
- if (list_empty(&dep->started_list))
+ dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
+
+ if (dep->stream_capable && !list_empty(&dep->started_list))
+ __dwc3_gadget_kick_transfer(dep);
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ list_empty(&dep->started_list)) {
+ if (list_empty(&dep->pending_list))
+ /*
+ * If there is no entry in request list then do
+ * not issue END TRANSFER now. Just set PENDING
+ * flag, so that END TRANSFER is issued when an
+ * entry is added into request list.
+ */
+ dep->flags |= DWC3_EP_PENDING_REQUEST;
+ else
stop = true;
}
@@ -2891,6 +2995,28 @@ static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
(void) __dwc3_gadget_start_isoc(dep);
}
+static void dwc3_endpoint_stream_event(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep;
+ struct dwc3_request *req;
+ u8 epnum = event->endpoint_number;
+ u8 stream_id;
+
+ dep = dwc->eps[epnum];
+
+ stream_id = event->parameters;
+
+ /* Check for request matching the streamid and delete the timer */
+ list_for_each_entry(req, &dep->started_list, list) {
+ if (req->request.stream_id == stream_id) {
+ if (timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+ break;
+ }
+ }
+}
+
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
@@ -2915,12 +3041,21 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
}
switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ if (!dep->stream_capable)
+ break;
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ /* Fall Through */
case DWC3_DEPEVT_XFERINPROGRESS:
dwc3_gadget_endpoint_transfer_in_progress(dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
dwc3_gadget_endpoint_transfer_not_ready(dep, event);
break;
+ case DWC3_DEPEVT_STREAMEVT:
+ if (event->status == DEPEVT_STREAMEVT_FOUND)
+ dwc3_endpoint_stream_event(dwc, event);
+ break;
case DWC3_DEPEVT_EPCMDCMPLT:
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
@@ -2955,8 +3090,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
- case DWC3_DEPEVT_STREAMEVT:
- case DWC3_DEPEVT_XFERCOMPLETE:
case DWC3_DEPEVT_RXTXFIFOEVT:
break;
}
@@ -3001,7 +3134,7 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
}
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{
struct dwc3 *dwc = dep->dwc;
@@ -3053,6 +3186,12 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
WARN_ON_ONCE(ret);
dep->resource_index = 0;
+ /*
+ * when transfer is stopped with force rm bit false, it can be
+ * restarted by passing resource_index in params; don't loose it
+ */
+ if (force)
+ dep->resource_index = 0;
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
else
@@ -3097,6 +3236,15 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc3_disconnect_gadget(dwc);
+ /* In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * clear DWC3_DCTL_KEEP_CONNECT bit.
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
@@ -3286,6 +3434,16 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
}
/*
+ * In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * set DWC3_DCTL_KEEP_CONNECT bit here
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
+ /*
* Configure PHY via GUSB3PIPECTLn if required.
*
* Update GTXFIFOSIZn
@@ -3308,6 +3466,17 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
}
}
+static irqreturn_t wakeup_interrupt(int irq, void *_dwc)
+{
+ struct dwc3 *dwc = (struct dwc3 *)_dwc;
+
+ spin_lock(&dwc->lock);
+ gadget_wakeup_interrupt(dwc);
+ spin_unlock(&dwc->lock);
+
+ return IRQ_HANDLED;
+}
+
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
@@ -3435,10 +3604,12 @@ static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
* STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
* Device Fallback from SuperSpeed
*/
- if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
+ if ((!!is_ss ^ (dwc->speed >= DWC3_DSTS_SUPERSPEED)) &&
+ (!(dwc->has_hibernation)))
return;
/* enter hibernation here */
+ gadget_hibernation_interrupt(dwc);
}
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
@@ -3532,11 +3703,17 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
*/
evt->lpos = (evt->lpos + 4) % evt->length;
left -= 4;
+
+ if (dwc->is_hibernated)
+ break;
}
evt->count = 0;
ret = IRQ_HANDLED;
+ if (dwc->is_hibernated)
+ return ret;
+
/* Unmask interrupt */
reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
reg &= ~DWC3_GEVNTSIZ_INTMASK;
@@ -3577,12 +3754,20 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
u32 reg;
if (pm_runtime_suspended(dwc->dev)) {
+ dwc->pending_events = true;
+ /*
+ * Trigger runtime resume. The get() function will be balanced
+ * after processing the pending events in dwc3_process_pending
+ * events().
+ */
pm_runtime_get(dwc->dev);
disable_irq_nosync(dwc->irq_gadget);
- dwc->pending_events = true;
return IRQ_HANDLED;
}
+ if (dwc->is_hibernated)
+ return IRQ_HANDLED;
+
/*
* With PCIe legacy interrupt, test shows that top-half irq handler can
* be called again after HW interrupt deassertion. Check if bottom-half
@@ -3626,7 +3811,7 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
static int dwc3_gadget_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
- int irq;
+ int irq, irq_hiber;
irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
if (irq > 0)
@@ -3644,12 +3829,23 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
irq = platform_get_irq(dwc3_pdev, 0);
if (irq > 0)
- goto out;
-
- if (!irq)
- irq = -EINVAL;
+ dwc->irq_gadget = irq;
+ if (irq == -EPROBE_DEFER)
+ goto out;
out:
+ /* look for wakeup interrupt if hibernation is supported */
+ if (dwc->has_hibernation) {
+ irq_hiber = platform_get_irq_byname(dwc3_pdev, "hiber");
+ if (irq_hiber > 0) {
+ dwc->irq_wakeup = irq_hiber;
+ } else {
+ irq_hiber = platform_get_irq(dwc3_pdev, 2);
+ if (irq_hiber > 0)
+ dwc->irq_wakeup = irq_hiber;
+ }
+ }
+
return irq;
}
@@ -3742,6 +3938,28 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
+ if (dwc->dr_mode == USB_DR_MODE_OTG) {
+ struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (!IS_ERR(phy)) {
+ if (phy && phy->otg) {
+ ret = otg_set_peripheral(phy->otg,
+ &dwc->gadget);
+ if (ret) {
+ dev_err(dwc->dev,
+ "otg_set_peripheral failed\n");
+ usb_put_phy(phy);
+ phy = NULL;
+ goto err4;
+ }
+ } else {
+ usb_put_phy(phy);
+ phy = NULL;
+ }
+ }
+ }
+
return 0;
err4:
@@ -3780,6 +3998,16 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
if (!dwc->gadget_driver)
return 0;
+ if (dwc->is_hibernated) {
+ /*
+ * As we are about to suspend, wake the controller from
+ * D3 & hibernation states
+ */
+ dwc->force_hiber_wake = true;
+ gadget_wakeup_interrupt(dwc);
+ dwc->force_hiber_wake = false;
+ }
+
dwc3_gadget_run_stop(dwc, false, false);
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
@@ -3790,8 +4018,9 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
int dwc3_gadget_resume(struct dwc3 *dwc)
{
int ret;
+ u32 reg;
- if (!dwc->gadget_driver)
+ if (!dwc->gadget_driver || !dwc->softconnect)
return 0;
ret = __dwc3_gadget_start(dwc);
@@ -3802,6 +4031,15 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
if (ret < 0)
goto err1;
+ /* In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * set DWC3_DCTL_KEEP_CONNECT bit.
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
return 0;
err1:
@@ -3815,6 +4053,8 @@ void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
{
if (dwc->pending_events) {
dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
+ dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
+ pm_runtime_put(dwc->dev);
dwc->pending_events = false;
enable_irq(dwc->irq_gadget);
}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index f207e59c7d03..9122afe6fc43 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -54,6 +54,14 @@ struct dwc3;
/* U2 Device exit Latency */
#define DWC3_DEFAULT_U2_DEV_EXIT_LAT 0x1FF /* Less then 511 microsec */
+/* Below used in hibernation */
+#define DWC3_NON_STICKY_RESTORE_RETRIES 500
+#define DWC3_NON_STICKY_SAVE_RETRIES 500
+#define DWC3_DEVICE_CTRL_READY_RETRIES 20000
+#define DWC3_NON_STICKY_RESTORE_DELAY 100
+#define DWC3_NON_STICKY_SAVE_DELAY 100
+#define DWC3_DEVICE_CTRL_READY_DELAY 5
+
/* -------------------------------------------------------------------------- */
#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
@@ -106,11 +114,21 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_gadget_enable_irq(struct dwc3 *dwc);
+void dwc3_gadget_disable_irq(struct dwc3 *dwc);
int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action);
+int __dwc3_gadget_ep_disable(struct dwc3_ep *dep);
+int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep);
+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
+int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend);
+dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, struct dwc3_trb *trb);
+void gadget_hibernation_interrupt(struct dwc3 *dwc);
+void gadget_wakeup_interrupt(struct dwc3 *dwc);
void dwc3_ep0_send_delayed_status(struct dwc3 *dwc);
/**
diff --git a/drivers/usb/dwc3/gadget_hibernation.c b/drivers/usb/dwc3/gadget_hibernation.c
new file mode 100644
index 000000000000..3f6a98150764
--- /dev/null
+++ b/drivers/usb/dwc3/gadget_hibernation.c
@@ -0,0 +1,567 @@
+/**
+ * gadget_hibernation.c - DesignWare USB3 DRD Controller gadget hibernation file
+ *
+ * This file has routines to handle hibernation and wakeup events in gadget mode
+ *
+ * Author: Mayank Adesara <madesara@xilinx.com>
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "core.h"
+#include "gadget.h"
+#include "debug.h"
+#include "io.h"
+
+/* array of registers to save on hibernation and restore them on wakeup */
+static u32 save_reg_addr[] = {
+ DWC3_DCTL,
+ DWC3_DCFG,
+ DWC3_DEVTEN
+};
+
+/*
+ * wait_timeout - Waits until timeout
+ * @wait_time: time to wait in jiffies
+ */
+static void wait_timeout(unsigned long wait_time)
+{
+ unsigned long timeout = jiffies + wait_time;
+
+ while (!time_after_eq(jiffies, timeout))
+ cpu_relax();
+}
+
+/**
+ * save_regs - Saves registers on hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int save_regs(struct dwc3 *dwc)
+{
+ int i;
+
+ if (!dwc->saved_regs) {
+ dwc->saved_regs = devm_kmalloc(dwc->dev,
+ sizeof(save_reg_addr),
+ GFP_KERNEL);
+ if (!dwc->saved_regs) {
+ dev_err(dwc->dev, "Not enough memory to save regs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(save_reg_addr); i++)
+ dwc->saved_regs[i] = dwc3_readl(dwc->regs,
+ save_reg_addr[i]);
+ return 0;
+}
+
+/**
+ * restore_regs - Restores registers on wakeup
+ * @dwc: pointer to our controller context structure
+ */
+static void restore_regs(struct dwc3 *dwc)
+{
+ int i;
+
+ if (!dwc->saved_regs) {
+ dev_warn(dwc->dev, "Regs not saved\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(save_reg_addr); i++)
+ dwc3_writel(dwc->regs, save_reg_addr[i],
+ dwc->saved_regs[i]);
+}
+
+/**
+ * restart_ep0_trans - Restarts EP0 transfer on wakeup
+ * @dwc: pointer to our controller context structure
+ * epnum: endpoint number
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restart_ep0_trans(struct dwc3 *dwc, int epnum)
+{
+ struct dwc3_ep *dep = dwc->eps[epnum];
+ struct dwc3_trb *trb = dwc->ep0_trb;
+ struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+ u32 cmd;
+
+ memset(&params, 0, sizeof(params));
+ params.param0 = upper_32_bits(dwc->ep0_trb_addr);
+ params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+
+ /* set HWO bit back to 1 and restart transfer */
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+ /* Clear the TRBSTS feild */
+ trb->size &= ~(0x0F << 28);
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_PARAM(0);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret < 0) {
+ dev_err(dwc->dev, "failed to restart transfer on %s\n",
+ dep->name);
+ return ret;
+ }
+
+ dwc3_gadget_ep_get_transfer_index(dep);
+
+ return 0;
+}
+
+extern dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
+ struct dwc3_trb *trb);
+/**
+ * restore_eps - Restores non EP0 eps in the same state as they were before
+ * hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restore_eps(struct dwc3 *dwc)
+{
+ int epnum, ret;
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ /* Enable the endpoint */
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ ret = __dwc3_gadget_ep_enable(dep, true);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return ret;
+ }
+ }
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (dep->flags & DWC3_EP_STALL) {
+ /* Set stall for the endpoint */
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
+ dep->name);
+ return ret;
+ }
+ } else {
+ u32 cmd;
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_trb *trb;
+ u8 trb_dequeue = dep->trb_dequeue;
+
+ trb = &dep->trb_pool[trb_dequeue];
+
+ /*
+ * check the last processed TRBSTS field has value
+ * 4 (TRBInProgress), if yes resubmit the same TRB
+ */
+ if (DWC3_TRB_SIZE_TRBSTS(trb->size) ==
+ DWC3_TRB_STS_XFER_IN_PROG) {
+ /* Set the HWO bit */
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+ /* Clear the TRBSTS field */
+ trb->size &= ~(0x0F << 28);
+
+ memset(&params, 0, sizeof(params));
+
+ /* Issue starttransfer */
+ params.param0 =
+ upper_32_bits(dwc3_trb_dma_offset(dep,
+ trb));
+ params.param1 =
+ lower_32_bits(dwc3_trb_dma_offset(dep,
+ trb));
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER |
+ DWC3_DEPCMD_PARAM(0);
+
+ dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+
+ dwc3_gadget_ep_get_transfer_index(dep);
+ } else {
+ ret = __dwc3_gadget_kick_transfer(dep);
+ if (ret) {
+ dev_err(dwc->dev,
+ "%s: restart transfer failed\n",
+ dep->name);
+ return ret;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * restore_ep0 - Restores EP0 in the same state as they were before hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restore_ep0(struct dwc3 *dwc)
+{
+ int epnum, ret;
+
+ for (epnum = 0; epnum < 2; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ ret = __dwc3_gadget_ep_enable(dep, true);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return ret;
+ }
+
+ if (dep->flags & DWC3_EP_STALL) {
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
+ dep->name);
+ return ret;
+ }
+ } else {
+ if (!dep->resource_index && epnum)
+ continue;
+
+ ret = restart_ep0_trans(dwc, epnum);
+ if (ret) {
+ dev_err(dwc->dev,
+ "failed to restart transfer on: %s\n",
+ dep->name);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * save_endpoint_state - Saves ep state on hibernation
+ * @dep: endpoint to get state
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int save_endpoint_state(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+
+ memset(&params, 0, sizeof(params));
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_GETEPSTATE,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "Failed to get endpoint state on %s\n",
+ dep->name);
+ return ret;
+ }
+
+ dep->saved_state = dwc3_readl(dep->regs, DWC3_DEPCMDPAR2);
+ return 0;
+}
+
+/**
+ * gadget_hibernation_interrupt - Interrupt handler of hibernation
+ * @dwc: pointer to our controller context structure
+ */
+void gadget_hibernation_interrupt(struct dwc3 *dwc)
+{
+ u32 epnum, reg;
+ int retries, ret;
+
+ /* Check if the link state is valid before hibernating */
+ switch (dwc3_gadget_get_link_state(dwc)) {
+ case DWC3_LINK_STATE_U3:
+ case DWC3_LINK_STATE_SS_DIS:
+ break;
+ default:
+ dev_dbg(dwc->dev,
+ "%s: Got fake hiber event\n", __func__);
+ return;
+ }
+
+ /* stop all active transfers and save endpoint status */
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED)
+ dwc3_stop_active_transfer(dep, false, true);
+
+ save_endpoint_state(dep);
+ }
+
+ /* stop the controller */
+ dwc3_gadget_run_stop(dwc, false, true);
+ dwc->is_hibernated = true;
+
+ /*
+ * ack events, don't process them; h/w decrements the count by the value
+ * written
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
+ dwc->ev_buf->count = 0;
+ dwc->ev_buf->flags &= ~DWC3_EVENT_PENDING;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+ /* disable keep connect if we are disconnected right now */
+ if (dwc3_gadget_get_link_state(dwc) == DWC3_LINK_STATE_SS_DIS) {
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ } else {
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
+ /* save generic registers */
+ save_regs(dwc);
+
+ /* initiate controller save state */
+ reg |= DWC3_DCTL_CSS;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* wait till controller saves state */
+ retries = DWC3_NON_STICKY_SAVE_RETRIES;
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_SSS))
+ break;
+
+ udelay(DWC3_NON_STICKY_SAVE_DELAY);
+ } while (--retries);
+
+ if (retries < 0) {
+ dev_err(dwc->dev, "USB core failed to save state\n");
+ goto err;
+ }
+
+ /* Set the controller as wakeup capable */
+ dwc3_simple_wakeup_capable(dwc->dev, true);
+
+ /* set USB core power state to D3 - power down */
+ ret = dwc3_set_usb_core_power(dwc, false);
+ if (ret < 0) {
+ dev_err(dwc->dev, "%s: Failed to hibernate\n", __func__);
+ /* call wakeup handler */
+ gadget_wakeup_interrupt(dwc);
+ return;
+ }
+
+ dev_info(dwc->dev, "Hibernated!\n");
+ return;
+
+err:
+ dev_err(dwc->dev, "Fail in handling Hibernation Interrupt\n");
+}
+
+/**
+ * gadget_wakeup_interrupt - Interrupt handler of wakeup
+ * @dwc: pointer to our controller context structure
+ */
+void gadget_wakeup_interrupt(struct dwc3 *dwc)
+{
+ u32 reg, link_state;
+ int ret, retries;
+ bool enter_hiber = false;
+
+ /* On USB 2.0 we observed back to back wakeup interrupts */
+ if (!dwc->is_hibernated) {
+ dev_err(dwc->dev, "Not in hibernated state\n");
+ goto err;
+ }
+
+ /* Restore power to USB core */
+ if (dwc3_set_usb_core_power(dwc, true)) {
+ dev_err(dwc->dev, "Failed to restore USB core power\n");
+ goto err;
+ }
+
+ /* Clear the controller wakeup capable flag */
+ dwc3_simple_wakeup_capable(dwc->dev, false);
+
+ /* Initialize the core and restore the saved registers */
+ dwc3_core_init(dwc);
+ restore_regs(dwc);
+
+ /* ask controller to save the non-sticky registers */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CRS;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* Wait till non-sticky registers are restored */
+ retries = DWC3_NON_STICKY_RESTORE_RETRIES;
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_RSS))
+ break;
+
+ udelay(DWC3_NON_STICKY_RESTORE_DELAY);
+ } while (--retries);
+
+ if (retries < 0 || (reg & DWC3_DSTS_SRE)) {
+ dev_err(dwc->dev, "Failed to restore non-sticky regs\n");
+ goto err;
+ }
+
+ /* restore ep0 endpoints */
+ ret = restore_ep0(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "Failed in restorig EP0 states\n");
+ goto err;
+ }
+
+ /* start the controller */
+ ret = dwc3_gadget_run_stop(dwc, true, false);
+ if (ret < 0) {
+ dev_err(dwc->dev, "USB core failed to start on wakeup\n");
+ goto err;
+ }
+
+ /* Wait until device controller is ready */
+ retries = DWC3_DEVICE_CTRL_READY_RETRIES;
+ while (--retries) {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (reg & DWC3_DSTS_DCNRD)
+ udelay(DWC3_DEVICE_CTRL_READY_DELAY);
+ else
+ break;
+ }
+
+ if (retries < 0) {
+ dev_err(dwc->dev, "USB core failed to restore controller\n");
+ goto err;
+ }
+
+ /*
+ * As some suprious signals also cause wakeup event, wait for some time
+ * and check the link state to confirm if the wakeup signal is real
+ */
+ wait_timeout(msecs_to_jiffies(10));
+
+ link_state = dwc3_gadget_get_link_state(dwc);
+
+ /* check if the link state is in a valid state */
+ switch (link_state) {
+ case DWC3_LINK_STATE_RESET:
+ /* Reset devaddr */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_DEVADDR_MASK);
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ /* issue recovery on the link */
+ ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+ if (ret < 0) {
+ dev_err(dwc->dev,
+ "Failed to set link state to Recovery\n");
+ goto err;
+ }
+
+ break;
+
+ case DWC3_LINK_STATE_SS_DIS:
+ /* Clear keep connect from reconnecting to HOST */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ /* fall through */
+ case DWC3_LINK_STATE_U3:
+ /* Ignore wakeup event as the link is still in U3 state */
+ dev_dbg(dwc->dev, "False wakeup event %d\n", link_state);
+
+ if (!dwc->force_hiber_wake)
+ enter_hiber = true;
+ break;
+
+ default:
+ /* issue recovery on the link */
+ ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+ if (ret < 0) {
+ dev_err(dwc->dev,
+ "Failed to set link state to Recovery\n");
+ goto err;
+ }
+
+ break;
+ }
+
+ if (link_state != DWC3_LINK_STATE_SS_DIS) {
+ /* Restore non EP0 EPs */
+ ret = restore_eps(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "Failed restoring non-EP0 states\n");
+ goto err;
+ }
+ }
+
+ /* clear the flag */
+ dwc->is_hibernated = false;
+
+ if (enter_hiber) {
+ /*
+ * as the wakeup was because of the spurious signals,
+ * enter hibernation again
+ */
+ gadget_hibernation_interrupt(dwc);
+ return;
+ }
+
+ dev_info(dwc->dev, "We are back from hibernation!\n");
+ return;
+
+err:
+ dev_err(dwc->dev, "Fail in handling Wakeup Interrupt\n");
+}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index fa252870c926..b383953f0b26 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -8,9 +8,21 @@
*/
#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/usb/xhci_pdriver.h>
#include "core.h"
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup)
+{
+ dwc3_simple_wakeup_capable(dev, wakeup);
+}
+EXPORT_SYMBOL(dwc3_host_wakeup_capable);
+
+static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
+ .quirks = XHCI_SKIP_PHY_INIT,
+};
+
static int dwc3_host_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
@@ -93,6 +105,10 @@ int dwc3_host_init(struct dwc3 *dwc)
if (dwc->usb2_lpm_disable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
+ if (device_property_read_bool(&dwc3_pdev->dev,
+ "snps,xhci-stream-quirk"))
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-stream-quirk");
+
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
* where Port Disable command doesn't work.
@@ -113,6 +129,17 @@ int dwc3_host_init(struct dwc3 *dwc)
}
}
+ if (dwc->dr_mode == USB_DR_MODE_OTG) {
+ struct usb_phy *phy = usb_get_phy(USB_PHY_TYPE_USB3);
+
+ if (!IS_ERR(phy)) {
+ if (phy && phy->otg)
+ otg_set_host(phy->otg,
+ (struct usb_bus *)0xdeadbeef);
+ usb_put_phy(phy);
+ }
+ }
+
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
@@ -128,4 +155,5 @@ err:
void dwc3_host_exit(struct dwc3 *dwc)
{
platform_device_unregister(dwc->xhci);
+ dwc->xhci = NULL;
}
diff --git a/drivers/usb/dwc3/otg.c b/drivers/usb/dwc3/otg.c
new file mode 100644
index 000000000000..247f942e7078
--- /dev/null
+++ b/drivers/usb/dwc3/otg.c
@@ -0,0 +1,2199 @@
+/**
+ * otg.c - DesignWare USB3 DRD Controller OTG file
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Author: Manish Narani <mnarani@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+#include <linux/sysfs.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/phy.h>
+
+#include <../drivers/usb/host/xhci.h>
+#include "platform_data.h"
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+#include "otg.h"
+
+#include <linux/ulpi/regs.h>
+#include <linux/ulpi/driver.h>
+#include "debug.h"
+
+/* Print the hardware registers' value for debugging purpose */
+static void print_debug_regs(struct dwc3_otg *otg)
+{
+ u32 gctl = otg_read(otg, DWC3_GCTL);
+ u32 gsts = otg_read(otg, DWC3_GSTS);
+ u32 gdbgltssm = otg_read(otg, DWC3_GDBGLTSSM);
+ u32 gusb2phycfg0 = otg_read(otg, DWC3_GUSB2PHYCFG(0));
+ u32 gusb3pipectl0 = otg_read(otg, DWC3_GUSB3PIPECTL(0));
+ u32 dcfg = otg_read(otg, DWC3_DCFG);
+ u32 dctl = otg_read(otg, DWC3_DCTL);
+ u32 dsts = otg_read(otg, DWC3_DSTS);
+ u32 ocfg = otg_read(otg, OCFG);
+ u32 octl = otg_read(otg, OCTL);
+ u32 oevt = otg_read(otg, OEVT);
+ u32 oevten = otg_read(otg, OEVTEN);
+ u32 osts = otg_read(otg, OSTS);
+
+ otg_info(otg, "gctl = %08x\n", gctl);
+ otg_info(otg, "gsts = %08x\n", gsts);
+ otg_info(otg, "gdbgltssm = %08x\n", gdbgltssm);
+ otg_info(otg, "gusb2phycfg0 = %08x\n", gusb2phycfg0);
+ otg_info(otg, "gusb3pipectl0 = %08x\n", gusb3pipectl0);
+ otg_info(otg, "dcfg = %08x\n", dcfg);
+ otg_info(otg, "dctl = %08x\n", dctl);
+ otg_info(otg, "dsts = %08x\n", dsts);
+ otg_info(otg, "ocfg = %08x\n", ocfg);
+ otg_info(otg, "octl = %08x\n", octl);
+ otg_info(otg, "oevt = %08x\n", oevt);
+ otg_info(otg, "oevten = %08x\n", oevten);
+ otg_info(otg, "osts = %08x\n", osts);
+}
+
+/* Check whether the hardware supports HNP or not */
+static int hnp_capable(struct dwc3_otg *otg)
+{
+ if (otg->hwparams6 & GHWPARAMS6_HNP_SUPPORT_ENABLED)
+ return 1;
+ return 0;
+}
+
+/* Check whether the hardware supports SRP or not */
+static int srp_capable(struct dwc3_otg *otg)
+{
+ if (otg->hwparams6 & GHWPARAMS6_SRP_SUPPORT_ENABLED)
+ return 1;
+ return 0;
+}
+
+/* Wakeup main thread to execute the OTG flow after an event */
+static void wakeup_main_thread(struct dwc3_otg *otg)
+{
+ if (!otg->main_thread)
+ return;
+
+ otg_vdbg(otg, "\n");
+ /* Tell the main thread that something has happened */
+ otg->main_wakeup_needed = 1;
+ wake_up_interruptible(&otg->main_wq);
+}
+
+/* Sleep main thread for 'msecs' to wait for an event to occur */
+static int sleep_main_thread_timeout(struct dwc3_otg *otg, int msecs)
+{
+ signed long jiffies;
+ int rc = msecs;
+
+ if (signal_pending(current)) {
+ otg_dbg(otg, "Main thread signal pending\n");
+ rc = -EINTR;
+ goto done;
+ }
+ if (otg->main_wakeup_needed) {
+ otg_dbg(otg, "Main thread wakeup needed\n");
+ rc = msecs;
+ goto done;
+ }
+
+ jiffies = msecs_to_jiffies(msecs);
+ rc = wait_event_freezable_timeout(otg->main_wq,
+ otg->main_wakeup_needed,
+ jiffies);
+
+ if (rc > 0)
+ rc = jiffies_to_msecs(rc);
+
+done:
+ otg->main_wakeup_needed = 0;
+ return rc;
+}
+
+/* Sleep main thread to wait for an event to occur */
+static int sleep_main_thread(struct dwc3_otg *otg)
+{
+ int rc;
+
+ do {
+ rc = sleep_main_thread_timeout(otg, 5000);
+ } while (rc == 0);
+
+ return rc;
+}
+
+static void get_events(struct dwc3_otg *otg, u32 *otg_events, u32 *user_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&otg->lock, flags);
+
+ if (otg_events)
+ *otg_events = otg->otg_events;
+
+ if (user_events)
+ *user_events = otg->user_events;
+
+ spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static void get_and_clear_events(struct dwc3_otg *otg, u32 *otg_events,
+ u32 *user_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&otg->lock, flags);
+
+ if (otg_events)
+ *otg_events = otg->otg_events;
+
+ if (user_events)
+ *user_events = otg->user_events;
+
+ otg->otg_events = 0;
+ otg->user_events = 0;
+
+ spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static int check_event(struct dwc3_otg *otg, u32 otg_mask, u32 user_mask)
+{
+ u32 otg_events;
+ u32 user_events;
+
+ get_events(otg, &otg_events, &user_events);
+ if ((otg_events & otg_mask) || (user_events & user_mask)) {
+ otg_dbg(otg, "Event occurred: otg_events=%x, otg_mask=%x, \
+ user_events=%x, user_mask=%x\n", otg_events,
+ otg_mask, user_events, user_mask);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sleep_until_event(struct dwc3_otg *otg, u32 otg_mask, u32 user_mask,
+ u32 *otg_events, u32 *user_events, int timeout)
+{
+ int rc;
+
+ /* Enable the events */
+ if (otg_mask)
+ otg_write(otg, OEVTEN, otg_mask);
+
+ /* Wait until it occurs, or timeout, or interrupt. */
+ if (timeout) {
+ otg_vdbg(otg, "Waiting for event (timeout=%d)...\n", timeout);
+ rc = sleep_main_thread_until_condition_timeout(otg,
+ check_event(otg, otg_mask, user_mask), timeout);
+ } else {
+ otg_vdbg(otg, "Waiting for event (no timeout)...\n");
+ rc = sleep_main_thread_until_condition(otg,
+ check_event(otg, otg_mask, user_mask));
+ }
+
+ /* Disable the events */
+ otg_write(otg, OEVTEN, 0);
+
+ otg_vdbg(otg, "Woke up rc=%d\n", rc);
+ if (rc >= 0)
+ get_and_clear_events(otg, otg_events, user_events);
+
+ return rc;
+}
+
+static void set_capabilities(struct dwc3_otg *otg)
+{
+ u32 ocfg = 0;
+
+ otg_dbg(otg, "\n");
+ if (srp_capable(otg))
+ ocfg |= OCFG_SRP_CAP;
+
+ if (hnp_capable(otg))
+ ocfg |= OCFG_HNP_CAP;
+
+ otg_write(otg, OCFG, ocfg);
+
+ otg_dbg(otg, "Enabled SRP and HNP capabilities in OCFG\n");
+}
+
+static int otg3_handshake(struct dwc3_otg *otg, u32 reg, u32 mask, u32 done,
+ u32 msec)
+{
+ u32 result;
+ u32 usec = msec * 1000;
+
+ otg_vdbg(otg, "reg=%08x, mask=%08x, value=%08x\n", reg, mask, done);
+ do {
+ result = otg_read(otg, reg);
+ if ((result & mask) == done)
+ return 1;
+ udelay(1);
+ usec -= 1;
+ } while (usec > 0);
+
+ return 0;
+}
+
+static int reset_port(struct dwc3_otg *otg)
+{
+ otg_dbg(otg, "\n");
+ if (!otg->otg.host)
+ return -ENODEV;
+ return usb_bus_start_enum(otg->otg.host, 1);
+}
+
+static int set_peri_mode(struct dwc3_otg *otg, int mode)
+{
+ u32 octl;
+
+ /* Set peri_mode */
+ octl = otg_read(otg, OCTL);
+ if (mode)
+ octl |= OCTL_PERI_MODE;
+ else
+ octl &= ~OCTL_PERI_MODE;
+
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL PERI_MODE = %d in OCTL\n", mode);
+
+ if (mode)
+ return otg3_handshake(otg, OSTS, OSTS_PERIP_MODE,
+ OSTS_PERIP_MODE, 100);
+ else
+ return otg3_handshake(otg, OSTS, OSTS_PERIP_MODE, 0, 100);
+
+ msleep(20);
+}
+
+static int start_host(struct dwc3_otg *otg)
+{
+ int ret = -ENODEV;
+ int flg;
+ u32 octl;
+ u32 osts;
+ u32 ocfg;
+ u32 dctl;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host)
+ return -ENODEV;
+
+ /*
+ * Prevent the host USBCMD.HCRST from resetting OTG core by setting
+ * OCFG.OTGSftRstMsk
+ */
+ ocfg = otg_read(otg, OCFG);
+ ocfg |= DWC3_OCFG_SFTRSTMASK;
+ otg_write(otg, OCFG, ocfg);
+
+ dctl = otg_read(otg, DCTL);
+ if (dctl & DWC3_DCTL_RUN_STOP) {
+ otg_dbg(otg, "Disabling the RUN/STOP bit\n");
+ dctl &= ~DWC3_DCTL_RUN_STOP;
+ otg_write(otg, DCTL, dctl);
+ }
+
+ if (!set_peri_mode(otg, PERI_MODE_HOST)) {
+ otg_err(otg, "Failed to start host\n");
+ return -EINVAL;
+ }
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ otg_dbg(otg, "hcd=%p xhci=%p\n", hcd, xhci);
+
+ if (otg->host_started) {
+ otg_info(otg, "Host already started\n");
+ goto skip;
+ }
+
+ /* Start host driver */
+
+ *(struct xhci_hcd **)hcd->hcd_priv = xhci;
+ ret = usb_add_hcd(hcd, otg->hcd_irq, IRQF_SHARED);
+ if (ret) {
+ otg_err(otg, "%s: failed to start primary hcd, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ *(struct xhci_hcd **)xhci->shared_hcd->hcd_priv = xhci;
+ if (xhci->shared_hcd) {
+ ret = usb_add_hcd(xhci->shared_hcd, otg->hcd_irq, IRQF_SHARED);
+ if (ret) {
+ otg_err(otg,
+ "%s: failed to start secondary hcd, ret=%d\n",
+ __func__, ret);
+ usb_remove_hcd(hcd);
+ return ret;
+ }
+ }
+
+ otg->host_started = 1;
+skip:
+ hcd->self.otg_port = 1;
+ if (xhci->shared_hcd)
+ xhci->shared_hcd->self.otg_port = 1;
+
+ set_capabilities(otg);
+
+ /* Power the port only for A-host */
+ if (otg->otg.state == OTG_STATE_A_WAIT_VRISE) {
+ /* Spin on xhciPrtPwr bit until it becomes 1 */
+ osts = otg_read(otg, OSTS);
+ flg = otg3_handshake(otg, OSTS,
+ OSTS_XHCI_PRT_PWR,
+ OSTS_XHCI_PRT_PWR,
+ 1000);
+ if (flg) {
+ otg_dbg(otg, "Port is powered by xhci-hcd\n");
+ /* Set port power control bit */
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_PRT_PWR_CTL;
+ otg_write(otg, OCTL, octl);
+ } else {
+ otg_dbg(otg, "Port is not powered by xhci-hcd\n");
+ }
+ }
+
+ return ret;
+}
+
+static int stop_host(struct dwc3_otg *otg)
+{
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->host_started) {
+ otg_info(otg, "Host already stopped\n");
+ return 1;
+ }
+
+ if (!otg->otg.host)
+ return -ENODEV;
+
+ otg_dbg(otg, "%s: turn off host %s\n",
+ __func__, otg->otg.host->bus_name);
+
+ if (work_pending(&otg->hp_work.work)) {
+ while (!cancel_delayed_work(&otg->hp_work))
+ msleep(20);
+ }
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+
+ if (xhci->shared_hcd)
+ usb_remove_hcd(xhci->shared_hcd);
+ usb_remove_hcd(hcd);
+
+ otg->host_started = 0;
+ otg->dev_enum = 0;
+ return 0;
+}
+
+int dwc3_otg_host_release(struct usb_hcd *hcd)
+{
+ struct usb_bus *bus;
+ struct usb_device *rh;
+ struct usb_device *udev;
+
+ if (!hcd)
+ return -EINVAL;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return 0;
+
+ rh = bus->root_hub;
+ udev = usb_hub_find_child(rh, bus->otg_port);
+ if (!udev)
+ return 0;
+
+ if (udev->config && udev->parent == udev->bus->root_hub) {
+ struct usb_otg20_descriptor *desc;
+
+ if (__usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **) &desc) == 0) {
+ int err;
+
+ dev_info(&udev->dev, "found OTG descriptor\n");
+ if ((desc->bcdOTG >= 0x0200) &&
+ (udev->speed == USB_SPEED_HIGH)) {
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_TEST_MODE,
+ 7 << 8,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
+ dev_info(&udev->dev,
+ "can't initiate HNP from host: %d\n",
+ err);
+ return -1;
+ }
+ }
+ } else {
+ dev_info(&udev->dev, "didn't find OTG descriptor\n");
+ }
+ } else {
+ dev_info(&udev->dev,
+ "udev->config NULL or udev->parent != udev->bus->root_hub\n");
+ }
+
+ return 0;
+}
+
+/* Sends the host release set feature request */
+static void host_release(struct dwc3_otg *otg)
+{
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+ if (!otg->otg.host)
+ return;
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ dwc3_otg_host_release(hcd);
+ if (xhci->shared_hcd)
+ dwc3_otg_host_release(xhci->shared_hcd);
+}
+
+static void dwc3_otg_setup_event_buffers(struct dwc3_otg *otg)
+{
+ if (dwc3_readl(otg->dwc->regs, DWC3_GEVNTADRLO(0)) == 0x0) {
+
+ otg_dbg(otg, "setting up event buffers\n");
+ dwc3_event_buffers_setup(otg->dwc);
+ }
+
+}
+
+static void start_peripheral(struct dwc3_otg *otg)
+{
+ struct usb_gadget *gadget = otg->otg.gadget;
+ struct dwc3 *dwc = otg->dwc;
+ u32 ocfg;
+
+ otg_dbg(otg, "\n");
+ if (!gadget)
+ return;
+
+ /*
+ * Prevent the gadget DCTL.CSFTRST from resetting OTG core by setting
+ * OCFG.OTGSftRstMsk
+ */
+ ocfg = otg_read(otg, OCFG);
+ ocfg |= DWC3_OCFG_SFTRSTMASK;
+ otg_write(otg, OCFG, ocfg);
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to set peripheral mode\n");
+
+ if (otg->peripheral_started) {
+ otg_info(otg, "Peripheral already started\n");
+ return;
+ }
+
+ set_capabilities(otg);
+
+ dwc3_otg_setup_event_buffers(otg);
+
+ if (dwc->gadget_driver) {
+ struct dwc3_ep *dep;
+ int ret;
+
+ spin_lock(&otg->lock);
+ dep = dwc->eps[0];
+
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ if (ret)
+ goto err0;
+
+ dep = dwc->eps[1];
+
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ if (ret)
+ goto err1;
+
+ otg_dbg(otg, "enabled ep in gadget driver\n");
+ /* begin to receive SETUP packets */
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+
+ otg_dbg(otg, "enabled irq\n");
+ dwc3_gadget_enable_irq(dwc);
+
+ otg_write(otg, DCTL, otg_read(otg, DCTL) | DCTL_RUN_STOP);
+ otg_dbg(otg, "Setting DCTL_RUN_STOP to 1 in DCTL\n");
+ spin_unlock(&otg->lock);
+ }
+
+ gadget->b_hnp_enable = 0;
+ gadget->host_request_flag = 0;
+
+ otg->peripheral_started = 1;
+
+ /*
+ * During HNP the bus shouldn't be idle for more than 155 ms, so
+ * give enough time for the host to load the stack before start
+ * triggerring events
+ */
+ msleep(500);
+
+ return;
+err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+err0:
+ return;
+}
+
+static void stop_peripheral(struct dwc3_otg *otg)
+{
+ struct usb_gadget *gadget = otg->otg.gadget;
+ struct dwc3 *dwc = otg->dwc;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->peripheral_started) {
+ otg_info(otg, "Peripheral already stopped\n");
+ return;
+ }
+
+ if (!gadget)
+ return;
+
+ otg_dbg(otg, "disabled ep in gadget driver\n");
+ spin_lock(&otg->lock);
+
+ dwc3_gadget_disable_irq(dwc);
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_ep_disable(dwc->eps[1]);
+
+ spin_unlock(&otg->lock);
+
+ otg->peripheral_started = 0;
+ msleep(20);
+}
+
+static void set_b_host(struct dwc3_otg *otg, int val)
+{
+ otg->otg.host->is_b_host = val;
+}
+
+static enum usb_otg_state do_b_idle(struct dwc3_otg *otg);
+
+static int init_b_device(struct dwc3_otg *otg)
+{
+ otg_dbg(otg, "\n");
+ set_capabilities(otg);
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to start peripheral\n");
+
+ return do_b_idle(otg);
+}
+
+static int init_a_device(struct dwc3_otg *otg)
+{
+ otg_write(otg, OCFG, 0);
+ otg_write(otg, OCTL, 0);
+
+ otg_dbg(otg, "Write 0 to OCFG and OCTL\n");
+ return OTG_STATE_A_IDLE;
+}
+
+static enum usb_otg_state do_connector_id_status(struct dwc3_otg *otg)
+{
+ enum usb_otg_state state;
+ u32 osts;
+
+ otg_dbg(otg, "\n");
+
+ otg_write(otg, OCFG, 0);
+ otg_write(otg, OEVTEN, 0);
+ otg_write(otg, OEVT, 0xffffffff);
+ otg_write(otg, OEVTEN, OEVT_CONN_ID_STS_CHNG_EVNT);
+
+ msleep(60);
+
+ osts = otg_read(otg, OSTS);
+ if (!(osts & OSTS_CONN_ID_STS)) {
+ otg_dbg(otg, "Connector ID is A\n");
+ state = init_a_device(otg);
+ } else {
+ otg_dbg(otg, "Connector ID is B\n");
+ stop_host(otg);
+ state = init_b_device(otg);
+ }
+
+ /* TODO: This is a workaround for latest hibernation-enabled bitfiles
+ * which have problems before initializing SRP.
+ */
+ msleep(50);
+
+ return state;
+}
+
+static void reset_hw(struct dwc3_otg *otg)
+{
+ u32 temp;
+
+ otg_dbg(otg, "\n");
+
+ otg_write(otg, OEVTEN, 0);
+ temp = otg_read(otg, OCTL);
+ temp &= OCTL_PERI_MODE;
+ otg_write(otg, OCTL, temp);
+ temp = otg_read(otg, GCTL);
+ temp |= GCTL_PRT_CAP_DIR_OTG << GCTL_PRT_CAP_DIR_SHIFT;
+ otg_write(otg, GCTL, temp);
+}
+
+#define SRP_TIMEOUT 6000
+
+static void start_srp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_SES_REQ;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL_SES_REQ in OCTL\n");
+}
+
+static void start_b_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set (OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN) in OCTL\n");
+}
+
+static void stop_b_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl &= ~(OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN);
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "Clear ~(OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN) in OCTL\n");
+}
+
+static void start_a_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_HST_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL_HST_SET_HNP_EN in OCTL\n");
+}
+
+static void stop_a_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl &= ~OCTL_HST_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "clear OCTL_HST_SET_HNP_EN in OCTL\n");
+}
+
+static enum usb_otg_state do_a_hnp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_HNP_CHNG_EVNT;
+
+ start_a_hnp(otg);
+ rc = 3000;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ stop_a_hnp(otg);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_HNP_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_HNP_CHNG_EVNT\n");
+ if (otg_events & OEVT_HST_NEG_SCS) {
+ otg_dbg(otg, "A-HNP Success\n");
+ return OTG_STATE_A_PERIPHERAL;
+
+ } else {
+ otg_dbg(otg, "A-HNP Failed\n");
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ } else if (rc == 0) {
+ otg_dbg(otg, "A-HNP Failed (Timed out)\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else {
+ goto again;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_host(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT;
+ user_mask = USER_SRP_EVENT |
+ USER_HNP_EVENT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (user_events & USER_HNP_EVENT) {
+ otg_dbg(otg, "USER_HNP_EVENT\n");
+ return OTG_STATE_A_SUSPEND;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+#define A_WAIT_VFALL_TIMEOUT 1000
+
+static enum usb_otg_state do_a_wait_vfall(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_A_DEV_IDLE_EVNT;
+
+ rc = A_WAIT_VFALL_TIMEOUT;
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_A_DEV_IDLE_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_IDLE_EVNT\n");
+ return OTG_STATE_A_IDLE;
+
+ } else if (rc == 0) {
+ otg_dbg(otg, "A_WAIT_VFALL_TIMEOUT\n");
+ return OTG_STATE_A_IDLE;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+
+}
+
+#define A_WAIT_BCON_TIMEOUT 1000
+
+static enum usb_otg_state do_a_wait_bconn(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT |
+ OEVT_A_DEV_HOST_EVNT;
+
+ rc = A_WAIT_BCON_TIMEOUT;
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (otg_events & OEVT_A_DEV_HOST_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_HOST_EVNT\n");
+ return OTG_STATE_A_HOST;
+
+ } else if (rc == 0) {
+ if (otg_read(otg, OCTL) & OCTL_PRT_PWR_CTL)
+ return OTG_STATE_A_HOST;
+ else
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+#define A_WAIT_VRISE_TIMEOUT 100
+
+static enum usb_otg_state do_a_wait_vrise(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "");
+ set_b_host(otg, 0);
+ start_host(otg);
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ usb_kick_hub_wq(hcd->self.root_hub);
+ if (xhci->shared_hcd)
+ usb_kick_hub_wq(xhci->shared_hcd->self.root_hub);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT;
+
+ rc = A_WAIT_VRISE_TIMEOUT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (rc == 0) {
+ if (otg_read(otg, OCTL) & OCTL_PRT_PWR_CTL)
+ return OTG_STATE_A_WAIT_BCON;
+ else
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_idle(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT | OEVT_A_DEV_SRP_DET_EVNT;
+ user_mask = USER_SRP_EVENT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events,
+ 0);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_A_DEV_SRP_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SRP_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ } else if (user_events & USER_SRP_EVENT) {
+ otg_dbg(otg, "User initiated VBUS\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_peripheral(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT |
+ OEVT_A_DEV_B_DEV_HOST_END_EVNT;
+ user_mask = USER_HNP_END_SESSION;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (otg_events & OEVT_A_DEV_B_DEV_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_B_DEV_HOST_END_EVNT\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+#define HNP_TIMEOUT 4000
+
+static enum usb_otg_state do_b_hnp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_HNP_CHNG_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+
+ start_b_hnp(otg);
+ rc = HNP_TIMEOUT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &events, NULL, rc);
+ stop_b_hnp(otg);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ return OTG_STATE_B_IDLE;
+ } else if (events & OEVT_B_DEV_HNP_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_HNP_CHNG_EVNT\n");
+ if (events & OEVT_HST_NEG_SCS) {
+ otg_dbg(otg, "B-HNP Success\n");
+ return OTG_STATE_B_WAIT_ACON;
+
+ } else {
+ otg_err(otg, "B-HNP Failed\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+ } else if (rc == 0) {
+ /* Timeout */
+ otg_err(otg, "HNP timed out!\n");
+ return OTG_STATE_B_PERIPHERAL;
+
+ } else {
+ goto again;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_peripheral(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT | OEVT_B_DEV_VBUS_CHNG_EVNT;
+ user_mask = USER_HNP_EVENT | USER_END_SESSION |
+ USER_SRP_EVENT | INITIAL_SRP;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+
+ } else if (user_events & USER_HNP_EVENT) {
+ otg_dbg(otg, "USER_HNP_EVENT\n");
+ return do_b_hnp_init(otg);
+ } else if (user_events & USER_END_SESSION) {
+ otg_dbg(otg, "USER_END_SESSION\n");
+ return OTG_STATE_B_IDLE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_wait_acon(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask = 0;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "");
+ set_b_host(otg, 1);
+ start_host(otg);
+ otg_mask = OEVT_B_DEV_B_HOST_END_EVNT;
+ otg_write(otg, OEVTEN, otg_mask);
+ reset_port(otg);
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ usb_kick_hub_wq(hcd->self.root_hub);
+ if (xhci->shared_hcd)
+ usb_kick_hub_wq(xhci->shared_hcd->self.root_hub);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_B_HOST_END_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT |
+ OEVT_HOST_ROLE_REQ_INIT_EVNT;
+ user_mask = USER_A_CONN_EVENT | USER_HNP_END_SESSION;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_B_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_B_HOST_END_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+ } else if (user_events & USER_A_CONN_EVENT) {
+ otg_dbg(otg, "A-device connected\n");
+ return OTG_STATE_B_HOST;
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_host(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask = 0;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_B_HOST_END_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT |
+ OEVT_HOST_ROLE_REQ_INIT_EVNT;
+ user_mask = USER_HNP_END_SESSION;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_B_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_B_HOST_END_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_idle(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to set peripheral mode\n");
+
+ dwc3_otg_setup_event_buffers(otg);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_SES_VLD_DET_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+ user_mask = USER_SRP_EVENT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if ((otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) ||
+ (otg_events & OEVT_B_DEV_SES_VLD_DET_EVNT)) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ return OTG_STATE_B_PERIPHERAL;
+
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ goto again;
+ }
+ } else if (user_events & USER_SRP_EVENT) {
+ otg_dbg(otg, "USER_SRP_EVENT\n");
+ return OTG_STATE_B_SRP_INIT;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_srp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_SES_VLD_DET_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+
+ otg_write(otg, OEVTEN, otg_mask);
+ start_srp(otg);
+
+ rc = SRP_TIMEOUT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_SES_VLD_DET_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (rc == 0) {
+ otg_dbg(otg, "SRP Timeout (rc=%d)\n", rc);
+ otg_info(otg, "DEVICE NO RESPONSE FOR SRP\n");
+ return OTG_STATE_B_IDLE;
+
+ } else {
+ goto again;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+int otg_main_thread(void *data)
+{
+ struct dwc3_otg *otg = (struct dwc3_otg *)data;
+ enum usb_otg_state prev = OTG_STATE_UNDEFINED;
+
+#ifdef VERBOSE_DEBUG
+ u32 snpsid = otg_read(otg, 0xc120);
+
+ otg_vdbg(otg, "io_priv=%p\n", otg->regs);
+ otg_vdbg(otg, "c120: %x\n", snpsid);
+#endif
+
+ /* Allow the thread to be killed by a signal, but set the signal mask
+ * to block everything but INT, TERM, KILL, and USR1.
+ */
+ allow_signal(SIGINT);
+ allow_signal(SIGTERM);
+ allow_signal(SIGKILL);
+ allow_signal(SIGUSR1);
+
+ /* Allow the thread to be frozen */
+ set_freezable();
+
+ /* Allow host/peripheral driver load to finish */
+ msleep(100);
+
+ reset_hw(otg);
+
+ stop_host(otg);
+ stop_peripheral(otg);
+
+ otg_dbg(otg, "Thread running\n");
+ while (1) {
+ enum usb_otg_state next = OTG_STATE_UNDEFINED;
+
+ otg_vdbg(otg, "Main thread entering state\n");
+
+ switch (otg->otg.state) {
+ case OTG_STATE_UNDEFINED:
+ otg_dbg(otg, "OTG_STATE_UNDEFINED\n");
+ next = do_connector_id_status(otg);
+ break;
+
+ case OTG_STATE_A_IDLE:
+ otg_dbg(otg, "OTG_STATE_A_IDLE\n");
+ stop_peripheral(otg);
+
+ if (prev == OTG_STATE_UNDEFINED)
+ next = OTG_STATE_A_WAIT_VRISE;
+ else
+ next = do_a_idle(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_VRISE:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_VRISE\n");
+ next = do_a_wait_vrise(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_BCON:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_BCON\n");
+ next = do_a_wait_bconn(otg);
+ break;
+
+ case OTG_STATE_A_HOST:
+ otg_dbg(otg, "OTG_STATE_A_HOST\n");
+ stop_peripheral(otg);
+ next = do_a_host(otg);
+ /* Don't stop the host here if we are going into
+ * A_SUSPEND. We need to delay that until later. It
+ * will be stopped when coming out of A_SUSPEND
+ * state.
+ */
+ if (next != OTG_STATE_A_SUSPEND)
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ otg_dbg(otg, "OTG_STATE_A_SUSPEND\n");
+ next = do_a_hnp_init(otg);
+
+ /* Stop the host. */
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_VFALL:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_VFALL\n");
+ next = do_a_wait_vfall(otg);
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_PERIPHERAL:
+ otg_dbg(otg, "OTG_STATE_A_PERIPHERAL\n");
+ stop_host(otg);
+ start_peripheral(otg);
+ next = do_a_peripheral(otg);
+ stop_peripheral(otg);
+ break;
+
+ case OTG_STATE_B_IDLE:
+ otg_dbg(otg, "OTG_STATE_B_IDLE\n");
+ next = do_b_idle(otg);
+ break;
+
+ case OTG_STATE_B_PERIPHERAL:
+ otg_dbg(otg, "OTG_STATE_B_PERIPHERAL\n");
+ stop_host(otg);
+ start_peripheral(otg);
+ next = do_b_peripheral(otg);
+ stop_peripheral(otg);
+ break;
+
+ case OTG_STATE_B_SRP_INIT:
+ otg_dbg(otg, "OTG_STATE_B_SRP_INIT\n");
+ otg_read(otg, OSTS);
+ next = do_b_srp_init(otg);
+ break;
+
+ case OTG_STATE_B_WAIT_ACON:
+ otg_dbg(otg, "OTG_STATE_B_WAIT_ACON\n");
+ next = do_b_wait_acon(otg);
+ break;
+
+ case OTG_STATE_B_HOST:
+ otg_dbg(otg, "OTG_STATE_B_HOST\n");
+ next = do_b_host(otg);
+ stop_host(otg);
+ break;
+
+ default:
+ otg_err(otg, "Unknown state %d, sleeping...\n",
+ otg->state);
+ sleep_main_thread(otg);
+ break;
+ }
+
+ prev = otg->otg.state;
+ otg->otg.state = next;
+ if (kthread_should_stop())
+ break;
+ }
+
+ otg->main_thread = NULL;
+ otg_dbg(otg, "OTG main thread exiting....\n");
+
+ return 0;
+}
+
+static void start_main_thread(struct dwc3_otg *otg)
+{
+ if (!otg->main_thread && otg->otg.gadget && otg->otg.host) {
+ otg_dbg(otg, "Starting OTG main thread\n");
+ otg->main_thread = kthread_create(otg_main_thread, otg, "otg");
+ wake_up_process(otg->main_thread);
+ }
+}
+
+static inline struct dwc3_otg *otg_to_dwc3_otg(struct usb_otg *x)
+{
+ return container_of(x, struct dwc3_otg, otg);
+}
+
+static irqreturn_t dwc3_otg_irq(int irq, void *_otg)
+{
+ struct dwc3_otg *otg;
+ u32 oevt;
+ u32 osts;
+ u32 octl;
+ u32 ocfg;
+ u32 oevten;
+ u32 otg_mask = OEVT_ALL;
+
+ if (!_otg)
+ return 0;
+
+ otg = (struct dwc3_otg *)_otg;
+
+ oevt = otg_read(otg, OEVT);
+ osts = otg_read(otg, OSTS);
+ octl = otg_read(otg, OCTL);
+ ocfg = otg_read(otg, OCFG);
+ oevten = otg_read(otg, OEVTEN);
+
+ /* Clear handled events */
+ otg_write(otg, OEVT, oevt);
+
+ otg_vdbg(otg, "\n");
+ otg_vdbg(otg, " oevt = %08x\n", oevt);
+ otg_vdbg(otg, " osts = %08x\n", osts);
+ otg_vdbg(otg, " octl = %08x\n", octl);
+ otg_vdbg(otg, " ocfg = %08x\n", ocfg);
+ otg_vdbg(otg, " oevten = %08x\n", oevten);
+
+ otg_vdbg(otg, "oevt[DeviceMode] = %s\n",
+ oevt & OEVT_DEV_MOD_EVNT ? "Device" : "Host");
+
+ if (oevt & OEVT_CONN_ID_STS_CHNG_EVNT)
+ otg_dbg(otg, "Connector ID Status Change Event\n");
+ if (oevt & OEVT_HOST_ROLE_REQ_INIT_EVNT)
+ otg_dbg(otg, "Host Role Request Init Notification Event\n");
+ if (oevt & OEVT_HOST_ROLE_REQ_CONFIRM_EVNT)
+ otg_dbg(otg, "Host Role Request Confirm Notification Event\n");
+ if (oevt & OEVT_A_DEV_B_DEV_HOST_END_EVNT)
+ otg_dbg(otg, "A-Device B-Host End Event\n");
+ if (oevt & OEVT_A_DEV_HOST_EVNT)
+ otg_dbg(otg, "A-Device Host Event\n");
+ if (oevt & OEVT_A_DEV_HNP_CHNG_EVNT)
+ otg_dbg(otg, "A-Device HNP Change Event\n");
+ if (oevt & OEVT_A_DEV_SRP_DET_EVNT)
+ otg_dbg(otg, "A-Device SRP Detect Event\n");
+ if (oevt & OEVT_A_DEV_SESS_END_DET_EVNT)
+ otg_dbg(otg, "A-Device Session End Detected Event\n");
+ if (oevt & OEVT_B_DEV_B_HOST_END_EVNT)
+ otg_dbg(otg, "B-Device B-Host End Event\n");
+ if (oevt & OEVT_B_DEV_HNP_CHNG_EVNT)
+ otg_dbg(otg, "B-Device HNP Change Event\n");
+ if (oevt & OEVT_B_DEV_SES_VLD_DET_EVNT)
+ otg_dbg(otg, "B-Device Session Valid Detect Event\n");
+ if (oevt & OEVT_B_DEV_VBUS_CHNG_EVNT)
+ otg_dbg(otg, "B-Device VBUS Change Event\n");
+
+ if (oevt & otg_mask) {
+ /* Pass event to main thread */
+ spin_lock(&otg->lock);
+ otg->otg_events |= oevt;
+ wakeup_main_thread(otg);
+ spin_unlock(&otg->lock);
+ return 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void hnp_polling_work(struct work_struct *w)
+{
+ struct dwc3_otg *otg = container_of(w, struct dwc3_otg,
+ hp_work.work);
+ struct usb_bus *bus;
+ struct usb_device *udev;
+ struct usb_hcd *hcd;
+ u8 *otgstatus;
+ int ret;
+ int err;
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ if (!hcd)
+ return;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return;
+
+ udev = usb_hub_find_child(bus->root_hub, bus->otg_port);
+ if (!udev)
+ return;
+
+ otgstatus = kmalloc(sizeof(*otgstatus), GFP_NOIO);
+ if (!otgstatus)
+ return;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ USB_REQ_GET_STATUS, USB_DIR_IN | USB_RECIP_DEVICE,
+ 0, 0xf000, otgstatus, sizeof(*otgstatus),
+ USB_CTRL_GET_TIMEOUT);
+
+ if (ret == sizeof(*otgstatus) && (*otgstatus & 0x1)) {
+ /* enable HNP before suspend, it's simpler */
+
+ udev->bus->b_hnp_enable = 1;
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ udev->bus->b_hnp_enable
+ ? USB_DEVICE_B_HNP_ENABLE
+ : USB_DEVICE_A_ALT_HNP_SUPPORT,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+
+ if (err < 0) {
+ /* OTG MESSAGE: report errors here,
+ * customize to match your product.
+ */
+ otg_info(otg, "ERROR : Device no response\n");
+ dev_info(&udev->dev, "can't set HNP mode: %d\n",
+ err);
+ udev->bus->b_hnp_enable = 0;
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x1a0a) {
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND)
+ < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n",
+ err);
+ }
+ } else {
+ /* Device wants role-switch, suspend the bus. */
+ static struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ otg_start_hnp(phy->otg);
+ usb_put_phy(phy);
+
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND) < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
+ }
+ } else if (ret < 0) {
+ udev->bus->b_hnp_enable = 1;
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_B_HNP_ENABLE,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND) < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
+ } else {
+ schedule_delayed_work(&otg->hp_work, 1 * HZ);
+ }
+
+ kfree(otgstatus);
+}
+
+static int dwc3_otg_notify_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct usb_bus *bus;
+ struct usb_device *udev;
+ struct usb_hcd *hcd;
+ struct dwc3_otg *otg;
+ int err = 0;
+
+ otg = otg_to_dwc3_otg(phy->otg);
+
+ hcd = container_of(phy->otg->host, struct usb_hcd, self);
+ if (!hcd)
+ return -EINVAL;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return 0;
+
+ udev = usb_hub_find_child(bus->root_hub, bus->otg_port);
+ if (!udev)
+ return 0;
+
+ /*
+ * OTG-aware devices on OTG-capable root hubs may be able to use SRP,
+ * to wake us after we've powered off VBUS; and HNP, switching roles
+ * "host" to "peripheral". The OTG descriptor helps figure this out.
+ */
+ if (udev->config && udev->parent == udev->bus->root_hub) {
+ struct usb_otg20_descriptor *desc = NULL;
+
+ /* descriptor may appear anywhere in config */
+ err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **) &desc);
+ if (err || !(desc->bmAttributes & USB_OTG_HNP))
+ return 0;
+
+ if (udev->portnum == udev->bus->otg_port) {
+ INIT_DELAYED_WORK(&otg->hp_work,
+ hnp_polling_work);
+ schedule_delayed_work(&otg->hp_work, HZ);
+ }
+
+ }
+
+ return err;
+}
+
+static int dwc3_otg_notify_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct dwc3_otg *otg;
+
+ otg = otg_to_dwc3_otg(phy->otg);
+
+ if (work_pending(&otg->hp_work.work)) {
+ while (!cancel_delayed_work(&otg->hp_work))
+ msleep(20);
+ }
+ return 0;
+}
+
+static void dwc3_otg_set_peripheral(struct usb_otg *_otg, int yes)
+{
+ struct dwc3_otg *otg;
+
+ if (!_otg)
+ return;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if (yes) {
+ if (otg->hwparams6 == 0xdeadbeef)
+ otg->hwparams6 = otg_read(otg, GHWPARAMS6);
+ stop_host(otg);
+ } else {
+ stop_peripheral(otg);
+ }
+
+ set_peri_mode(otg, yes);
+}
+EXPORT_SYMBOL(dwc3_otg_set_peripheral);
+
+static int dwc3_otg_set_periph(struct usb_otg *_otg, struct usb_gadget *gadget)
+{
+ struct dwc3_otg *otg;
+
+ if (!_otg)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if ((long)gadget == 1) {
+ dwc3_otg_set_peripheral(_otg, 1);
+ return 0;
+ }
+
+ if (!gadget) {
+ otg->otg.gadget = NULL;
+ return -ENODEV;
+ }
+
+ otg->otg.gadget = gadget;
+ otg->otg.gadget->hnp_polling_support = 1;
+ otg->otg.state = OTG_STATE_B_IDLE;
+
+ start_main_thread(otg);
+ return 0;
+}
+
+static int dwc3_otg_set_host(struct usb_otg *_otg, struct usb_bus *host)
+{
+ struct dwc3_otg *otg;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ if (!_otg)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if (host == (struct usb_bus *)0xdeadbeef) {
+ dwc3_otg_set_peripheral(_otg, 0);
+ return 0;
+ }
+
+ if (!host) {
+ otg->otg.host = NULL;
+ otg->hcd_irq = 0;
+ return -ENODEV;
+ }
+
+ hcd = container_of(host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ otg_dbg(otg, "hcd=%p xhci=%p\n", hcd, xhci);
+
+ hcd->self.otg_port = 1;
+ if (xhci->shared_hcd) {
+ xhci->shared_hcd->self.otg_port = 1;
+ otg_dbg(otg, "shared_hcd=%p\n", xhci->shared_hcd);
+ }
+
+ otg->otg.host = host;
+ otg->hcd_irq = hcd->irq;
+ otg_dbg(otg, "host=%p irq=%d\n", otg->otg.host, otg->hcd_irq);
+
+
+ otg->host_started = 1;
+ otg->dev_enum = 0;
+ start_main_thread(otg);
+ return 0;
+}
+
+static int dwc3_otg_start_srp(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_SRP_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int dwc3_otg_start_hnp(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_HNP_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int dwc3_otg_end_session(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_END_SESSION;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int otg_end_session(struct usb_otg *otg)
+{
+ return dwc3_otg_end_session(otg);
+}
+EXPORT_SYMBOL(otg_end_session);
+
+static int dwc3_otg_received_host_release(struct usb_otg *x)
+{
+ struct dwc3_otg *otg;
+ unsigned long flags;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= PCD_RECEIVED_HOST_RELEASE_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+int otg_host_release(struct usb_otg *otg)
+{
+ return dwc3_otg_received_host_release(otg);
+}
+EXPORT_SYMBOL(otg_host_release);
+
+static void dwc3_otg_enable_irq(struct dwc3_otg *otg)
+{
+ u32 reg;
+
+ /* Enable OTG IRQs */
+ reg = OEVT_ALL;
+
+ otg_write(otg, OEVTEN, reg);
+}
+
+static ssize_t store_srp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg_start_srp(otg);
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(srp, 0220, NULL, store_srp);
+
+static ssize_t store_end(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg_end_session(otg);
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(end, 0220, NULL, store_end);
+
+static ssize_t store_hnp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct usb_phy *phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ struct usb_otg *otg;
+
+ dev_dbg(dwc->dev, "%s()\n", __func__);
+
+ if (IS_ERR(phy) || !phy) {
+ dev_info(dwc->dev, "NO PHY!!\n");
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ dev_info(dwc->dev, "NO OTG!!\n");
+ usb_put_phy(phy);
+ return count;
+ }
+
+ dev_info(dev, "b_hnp_enable is FALSE\n");
+ dwc->gadget.host_request_flag = 1;
+
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(hnp, 0220, NULL, store_hnp);
+
+static ssize_t store_hnp_end(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ unsigned long flags;
+ struct dwc3_otg *dwc_otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ dwc_otg = otg_to_dwc3_otg(otg);
+
+ spin_lock_irqsave(&dwc_otg->lock, flags);
+ dwc_otg->user_events |= USER_HNP_END_SESSION;
+ wakeup_main_thread(dwc_otg);
+ spin_unlock_irqrestore(&dwc_otg->lock, flags);
+
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(hnp_end, 0220, NULL, store_hnp_end);
+
+static ssize_t store_a_hnp_reqd(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_otg *otg;
+
+ otg = dwc->otg;
+ host_release(otg);
+ return count;
+}
+static DEVICE_ATTR(a_hnp_reqd, 0220, NULL, store_a_hnp_reqd);
+
+static ssize_t store_print_dbg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_otg *otg;
+
+ otg = dwc->otg;
+ print_debug_regs(otg);
+
+ return count;
+}
+static DEVICE_ATTR(print_dbg, 0220, NULL, store_print_dbg);
+
+void dwc_usb3_remove_dev_files(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_print_dbg);
+ device_remove_file(dev, &dev_attr_a_hnp_reqd);
+ device_remove_file(dev, &dev_attr_end);
+ device_remove_file(dev, &dev_attr_srp);
+ device_remove_file(dev, &dev_attr_hnp);
+ device_remove_file(dev, &dev_attr_hnp_end);
+}
+
+int dwc3_otg_create_dev_files(struct device *dev)
+{
+ int retval;
+
+ retval = device_create_file(dev, &dev_attr_hnp);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_hnp_end);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_srp);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_end);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_a_hnp_reqd);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_print_dbg);
+ if (retval)
+ goto fail;
+
+ return 0;
+
+fail:
+ dev_err(dev, "Failed to create one or more sysfs files!!\n");
+ return retval;
+}
+
+void dwc3_otg_init(struct dwc3 *dwc)
+{
+ struct dwc3_otg *otg;
+ int err;
+ u32 reg;
+
+ dev_dbg(dwc->dev, "dwc3_otg_init\n");
+
+ /*
+ * GHWPARAMS6[10] bit is SRPSupport.
+ * This bit also reflects DWC_USB3_EN_OTG
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
+ if (!(reg & GHWPARAMS6_SRP_SUPPORT_ENABLED)) {
+ /*
+ * No OTG support in the HW core.
+ * We return 0 to indicate no error, since this is acceptable
+ * situation, just continue probe the dwc3 driver without otg.
+ */
+ dev_dbg(dwc->dev, "dwc3_otg address space is not supported\n");
+ return;
+ }
+
+ otg = kzalloc(sizeof(*otg), GFP_KERNEL);
+ if (!otg) {
+ dev_err(otg->dev, "failed to allocate memroy\n");
+ return;
+ }
+
+ dwc->otg = otg;
+ otg->dev = dwc->dev;
+ otg->dwc = dwc;
+
+ otg->regs = dwc->regs - DWC3_GLOBALS_REGS_START;
+ otg->otg.usb_phy = kzalloc(sizeof(struct usb_phy), GFP_KERNEL);
+ otg->otg.usb_phy->dev = otg->dev;
+ otg->otg.usb_phy->label = "dwc3_otg";
+ otg->otg.state = OTG_STATE_UNDEFINED;
+ otg->otg.usb_phy->otg = &otg->otg;
+ otg->otg.usb_phy->notify_connect = dwc3_otg_notify_connect;
+ otg->otg.usb_phy->notify_disconnect = dwc3_otg_notify_disconnect;
+
+ otg->otg.start_srp = dwc3_otg_start_srp;
+ otg->otg.start_hnp = dwc3_otg_start_hnp;
+ otg->otg.set_host = dwc3_otg_set_host;
+ otg->otg.set_peripheral = dwc3_otg_set_periph;
+
+ otg->hwparams6 = reg;
+ otg->state = OTG_STATE_UNDEFINED;
+
+ spin_lock_init(&otg->lock);
+ init_waitqueue_head(&otg->main_wq);
+
+ err = usb_add_phy(otg->otg.usb_phy, USB_PHY_TYPE_USB3);
+ if (err) {
+ dev_err(otg->dev, "can't register transceiver, err: %d\n",
+ err);
+ goto exit;
+ }
+
+ otg->irq = platform_get_irq(to_platform_device(otg->dev), 1);
+
+ dwc3_otg_create_dev_files(otg->dev);
+
+ /* Set irq handler */
+ err = request_irq(otg->irq, dwc3_otg_irq, IRQF_SHARED, "dwc3_otg", otg);
+ if (err) {
+ dev_err(otg->otg.usb_phy->dev, "failed to request irq #%d --> %d\n",
+ otg->irq, err);
+ goto exit;
+ }
+
+ dwc3_otg_enable_irq(otg);
+
+ err = dwc3_gadget_init(dwc);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(otg->otg.usb_phy->dev,
+ "failed to initialize gadget\n");
+ goto exit;
+ }
+
+ err = dwc3_host_init(dwc);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(otg->otg.usb_phy->dev,
+ "failed to initialize host\n");
+ goto exit;
+ }
+
+ return;
+
+exit:
+ kfree(otg->otg.usb_phy);
+ kfree(otg);
+}
+
+void dwc3_otg_exit(struct dwc3 *dwc)
+{
+ struct dwc3_otg *otg = dwc->otg;
+
+ otg_dbg(otg, "\n");
+ usb_remove_phy(otg->otg.usb_phy);
+ kfree(otg->otg.usb_phy);
+ kfree(otg);
+}
diff --git a/drivers/usb/dwc3/otg.h b/drivers/usb/dwc3/otg.h
new file mode 100644
index 000000000000..25de1e5631a7
--- /dev/null
+++ b/drivers/usb/dwc3/otg.h
@@ -0,0 +1,252 @@
+/**
+ * otg.h - DesignWare USB3 DRD OTG Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define otg_dbg(d, fmt, args...) dev_dbg((d)->dev, "%s(): " fmt,\
+ __func__, ## args)
+#define otg_vdbg(d, fmt, args...) dev_vdbg((d)->dev, "%s(): " fmt,\
+ __func__, ## args)
+#define otg_err(d, fmt, args...) dev_err((d)->dev, "%s(): ERROR: " fmt,\
+ __func__, ## args)
+#define otg_warn(d, fmt, args...) dev_warn((d)->dev, "%s(): WARN: " fmt,\
+ __func__, ## args)
+#define otg_info(d, fmt, args...) dev_info((d)->dev, "%s(): INFO: " fmt,\
+ __func__, ## args)
+
+#ifdef VERBOSE_DEBUG
+#define otg_write(o, reg, val) do { \
+ otg_vdbg(o, "OTG_WRITE: reg=0x%05x, val=0x%08x\n", reg, val); \
+ writel(val, ((void *)((o)->regs)) + reg); \
+ } while (0)
+
+#define otg_read(o, reg) ({ \
+ u32 __r = readl(((void *)((o)->regs)) + reg); \
+ otg_vdbg(o, "OTG_READ: reg=0x%05x, val=0x%08x\n", reg, __r); \
+ __r; \
+ })
+#else
+#define otg_write(o, reg, val) writel(val, ((void *)((o)->regs)) + reg)
+#define otg_read(o, reg) readl(((void *)((o)->regs)) + reg)
+#endif
+
+#define sleep_main_thread_until_condition_timeout(otg, condition, msecs) ({ \
+ int __timeout = msecs; \
+ while (!(condition)) { \
+ otg_dbg(otg, " ... sleeping for %d\n", __timeout); \
+ __timeout = sleep_main_thread_timeout(otg, __timeout); \
+ if (__timeout <= 0) { \
+ break; \
+ } \
+ } \
+ __timeout; \
+ })
+
+#define sleep_main_thread_until_condition(otg, condition) ({ \
+ int __rc; \
+ do { \
+ __rc = sleep_main_thread_until_condition_timeout(otg, \
+ condition, 50000); \
+ } while (__rc == 0); \
+ __rc; \
+ })
+
+#define GHWPARAMS6 0xc158
+#define GHWPARAMS6_SRP_SUPPORT_ENABLED 0x0400
+#define GHWPARAMS6_HNP_SUPPORT_ENABLED 0x0800
+
+#define GCTL 0xc110
+#define GCTL_PRT_CAP_DIR 0x3000
+#define GCTL_PRT_CAP_DIR_SHIFT 12
+#define GCTL_PRT_CAP_DIR_HOST 1
+#define GCTL_PRT_CAP_DIR_DEV 2
+#define GCTL_PRT_CAP_DIR_OTG 3
+#define GCTL_GBL_HIBERNATION_EN 0x2
+
+#define OCFG 0xcc00
+#define OCFG_SRP_CAP 0x01
+#define OCFG_SRP_CAP_SHIFT 0
+#define OCFG_HNP_CAP 0x02
+#define OCFG_HNP_CAP_SHIFT 1
+#define OCFG_OTG_VERSION 0x04
+#define OCFG_OTG_VERSION_SHIFT 2
+
+#define OCTL 0xcc04
+#define OCTL_HST_SET_HNP_EN 0x01
+#define OCTL_HST_SET_HNP_EN_SHIFT 0
+#define OCTL_DEV_SET_HNP_EN 0x02
+#define OCTL_DEV_SET_HNP_EN_SHIFT 1
+#define OCTL_TERM_SEL_DL_PULSE 0x04
+#define OCTL_TERM_SEL_DL_PULSE_SHIFT 2
+#define OCTL_SES_REQ 0x08
+#define OCTL_SES_REQ_SHIFT 3
+#define OCTL_HNP_REQ 0x10
+#define OCTL_HNP_REQ_SHIFT 4
+#define OCTL_PRT_PWR_CTL 0x20
+#define OCTL_PRT_PWR_CTL_SHIFT 5
+#define OCTL_PERI_MODE 0x40
+#define OCTL_PERI_MODE_SHIFT 6
+
+#define OEVT 0xcc08
+#define OEVT_ERR 0x00000001
+#define OEVT_ERR_SHIFT 0
+#define OEVT_SES_REQ_SCS 0x00000002
+#define OEVT_SES_REQ_SCS_SHIFT 1
+#define OEVT_HST_NEG_SCS 0x00000004
+#define OEVT_HST_NEG_SCS_SHIFT 2
+#define OEVT_B_SES_VLD_EVT 0x00000008
+#define OEVT_B_SES_VLD_EVT_SHIFT 3
+#define OEVT_B_DEV_VBUS_CHNG_EVNT 0x00000100
+#define OEVT_B_DEV_VBUS_CHNG_EVNT_SHIFT 8
+#define OEVT_B_DEV_SES_VLD_DET_EVNT 0x00000200
+#define OEVT_B_DEV_SES_VLD_DET_EVNT_SHIFT 9
+#define OEVT_B_DEV_HNP_CHNG_EVNT 0x00000400
+#define OEVT_B_DEV_HNP_CHNG_EVNT_SHIFT 10
+#define OEVT_B_DEV_B_HOST_END_EVNT 0x00000800
+#define OEVT_B_DEV_B_HOST_END_EVNT_SHIFT 11
+#define OEVT_A_DEV_SESS_END_DET_EVNT 0x00010000
+#define OEVT_A_DEV_SESS_END_DET_EVNT_SHIFT 16
+#define OEVT_A_DEV_SRP_DET_EVNT 0x00020000
+#define OEVT_A_DEV_SRP_DET_EVNT_SHIFT 17
+#define OEVT_A_DEV_HNP_CHNG_EVNT 0x00040000
+#define OEVT_A_DEV_HNP_CHNG_EVNT_SHIFT 18
+#define OEVT_A_DEV_HOST_EVNT 0x00080000
+#define OEVT_A_DEV_HOST_EVNT_SHIFT 19
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT 0x00100000
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT_SHIFT 20
+#define OEVT_A_DEV_IDLE_EVNT 0x00200000
+#define OEVT_A_DEV_IDLE_EVNT_SHIFT 21
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT 0x00400000
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT_SHIFT 22
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT 0x00800000
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT_SHIFT 23
+#define OEVT_CONN_ID_STS_CHNG_EVNT 0x01000000
+#define OEVT_CONN_ID_STS_CHNG_EVNT_SHIFT 24
+#define OEVT_DEV_MOD_EVNT 0x80000000
+#define OEVT_DEV_MOD_EVNT_SHIFT 31
+
+#define OEVTEN 0xcc0c
+
+#define OEVT_ALL (OEVT_CONN_ID_STS_CHNG_EVNT | \
+ OEVT_HOST_ROLE_REQ_INIT_EVNT | \
+ OEVT_HOST_ROLE_REQ_CONFIRM_EVNT | \
+ OEVT_A_DEV_B_DEV_HOST_END_EVNT | \
+ OEVT_A_DEV_HOST_EVNT | \
+ OEVT_A_DEV_HNP_CHNG_EVNT | \
+ OEVT_A_DEV_SRP_DET_EVNT | \
+ OEVT_A_DEV_SESS_END_DET_EVNT | \
+ OEVT_B_DEV_B_HOST_END_EVNT | \
+ OEVT_B_DEV_HNP_CHNG_EVNT | \
+ OEVT_B_DEV_SES_VLD_DET_EVNT | \
+ OEVT_B_DEV_VBUS_CHNG_EVNT)
+
+#define OSTS 0xcc10
+#define OSTS_CONN_ID_STS 0x0001
+#define OSTS_CONN_ID_STS_SHIFT 0
+#define OSTS_A_SES_VLD 0x0002
+#define OSTS_A_SES_VLD_SHIFT 1
+#define OSTS_B_SES_VLD 0x0004
+#define OSTS_B_SES_VLD_SHIFT 2
+#define OSTS_XHCI_PRT_PWR 0x0008
+#define OSTS_XHCI_PRT_PWR_SHIFT 3
+#define OSTS_PERIP_MODE 0x0010
+#define OSTS_PERIP_MODE_SHIFT 4
+#define OSTS_OTG_STATES 0x0f00
+#define OSTS_OTG_STATE_SHIFT 8
+
+#define DCTL 0xc704
+#define DCTL_RUN_STOP 0x80000000
+
+#define OTG_STATE_INVALID -1
+#define OTG_STATE_EXIT 14
+#define OTG_STATE_TERMINATED 15
+
+#define PERI_MODE_HOST 0
+#define PERI_MODE_PERIPHERAL 1
+
+/** The main structure to keep track of OTG driver state. */
+struct dwc3_otg {
+
+ /** OTG PHY */
+ struct usb_otg otg;
+ struct device *dev;
+ struct dwc3 *dwc;
+
+ void __iomem *regs;
+
+ int main_wakeup_needed;
+ struct task_struct *main_thread;
+ wait_queue_head_t main_wq;
+
+ spinlock_t lock;
+
+ int otg_srp_reqd;
+
+ /* Events */
+ u32 otg_events;
+
+ u32 user_events;
+
+ /** User initiated SRP.
+ *
+ * Valid in B-device during sensing/probing. Initiates SRP signalling
+ * across the bus.
+ *
+ * Also valid as an A-device during probing. This causes the A-device to
+ * apply V-bus manually and check for a device. Can be used if the
+ * device does not support SRP and the host does not support ADP.
+ */
+#define USER_SRP_EVENT 0x1
+ /** User initiated HNP (only valid in B-peripheral) */
+#define USER_HNP_EVENT 0x2
+ /** User has ended the session (only valid in B-peripheral) */
+#define USER_END_SESSION 0x4
+ /** User initiated VBUS. This will cause the A-device to turn on the
+ * VBUS and see if a device will connect (only valid in A-device during
+ * sensing/probing)
+ */
+#define USER_VBUS_ON 0x8
+ /** User has initiated RSP */
+#define USER_RSP_EVENT 0x10
+ /** Host release event */
+#define PCD_RECEIVED_HOST_RELEASE_EVENT 0x20
+ /** Initial SRP */
+#define INITIAL_SRP 0x40
+ /** A-device connected event*/
+#define USER_A_CONN_EVENT 0x80
+ /** User initiated HNP END Session. This will make the A-device and
+ * B-device to return back to their previous roles before HNP got
+ * initiated
+ */
+#define USER_HNP_END_SESSION 0x100
+
+ /* States */
+ enum usb_otg_state prev;
+ enum usb_otg_state state;
+
+ u32 hwparams6;
+ int hcd_irq;
+ int irq;
+ int host_started;
+ int peripheral_started;
+ int dev_enum;
+
+ struct delayed_work hp_work; /* drives HNP polling */
+
+};
+
+extern int usb_port_suspend(struct usb_device *udev, pm_message_t msg);
+extern void usb_kick_hub_wq(struct usb_device *dev);
diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
new file mode 100644
index 000000000000..ae659e367804
--- /dev/null
+++ b/drivers/usb/dwc3/platform_data.h
@@ -0,0 +1,54 @@
+/**
+ * platform_data.h - USB DWC3 Platform Data Support
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/otg.h>
+
+struct dwc3_platform_data {
+ enum usb_device_speed maximum_speed;
+ enum usb_dr_mode dr_mode;
+ bool usb3_lpm_capable;
+
+ unsigned is_utmi_l1_suspend:1;
+ u8 hird_threshold;
+
+ u8 lpm_nyet_threshold;
+
+ unsigned disable_scramble_quirk:1;
+ unsigned has_lpm_erratum:1;
+ unsigned u2exit_lfps_quirk:1;
+ unsigned u2ss_inp3_quirk:1;
+ unsigned req_p1p2p3_quirk:1;
+ unsigned del_p1p2p3_quirk:1;
+ unsigned del_phy_power_chg_quirk:1;
+ unsigned lfps_filter_quirk:1;
+ unsigned rx_detect_poll_quirk:1;
+ unsigned dis_u3_susphy_quirk:1;
+ unsigned dis_u2_susphy_quirk:1;
+ unsigned dis_enblslpm_quirk:1;
+ unsigned dis_rxdet_inp3_quirk:1;
+
+ unsigned tx_de_emphasis_quirk:1;
+ unsigned tx_de_emphasis:2;
+
+ u32 fladj_value;
+ bool refclk_fladj;
+
+ const char *hsphy_interface;
+};
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 2350e97a1662..6657393c34f4 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -237,6 +237,47 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
return len;
}
+static ssize_t gadget_dev_desc_max_speed_show(struct config_item *item,
+ char *page)
+{
+ struct gadget_info *gi = to_gadget_info(item);
+ enum usb_device_speed max_speed = gi->composite.gadget_driver.max_speed;
+
+ return sprintf(page, "%s\n", usb_speed_string(max_speed));
+}
+
+static ssize_t gadget_dev_desc_max_speed_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = to_gadget_info(item);
+ char *name;
+ int ret;
+ const char * const speed_names[] = {
+ [USB_SPEED_UNKNOWN] = "UNKNOWN",
+ [USB_SPEED_LOW] = "low-speed",
+ [USB_SPEED_FULL] = "full-speed",
+ [USB_SPEED_HIGH] = "high-speed",
+ [USB_SPEED_WIRELESS] = "wireless",
+ [USB_SPEED_SUPER] = "super-speed",
+ [USB_SPEED_SUPER_PLUS] = "super-speed-plus",
+ };
+
+ name = kstrdup(page, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ if (name[len - 1] == '\n')
+ name[len - 1] = '\0';
+
+ ret = match_string(speed_names, ARRAY_SIZE(speed_names), name);
+
+ if (ret != -EINVAL) {
+ gi->composite.gadget_driver.max_speed = ret;
+ return len;
+ }
+
+ return ret;
+}
+
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
struct gadget_info *gi = to_gadget_info(item);
@@ -317,6 +358,7 @@ CONFIGFS_ATTR(gadget_dev_desc_, idVendor);
CONFIGFS_ATTR(gadget_dev_desc_, idProduct);
CONFIGFS_ATTR(gadget_dev_desc_, bcdDevice);
CONFIGFS_ATTR(gadget_dev_desc_, bcdUSB);
+CONFIGFS_ATTR(gadget_dev_desc_, max_speed);
CONFIGFS_ATTR(gadget_dev_desc_, UDC);
static struct configfs_attribute *gadget_root_attrs[] = {
@@ -328,6 +370,7 @@ static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_idProduct,
&gadget_dev_desc_attr_bcdDevice,
&gadget_dev_desc_attr_bcdUSB,
+ &gadget_dev_desc_attr_max_speed,
&gadget_dev_desc_attr_UDC,
NULL,
};
@@ -1560,7 +1603,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
+ gi->composite.max_speed = gi->composite.gadget_driver.max_speed;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5fd4fc49aef9..588a659f245e 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -278,6 +278,11 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
struct usb_request *req = ffs->ep0req;
int ret;
+ if (!req) {
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ return -EINVAL;
+ }
+
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
spin_unlock_irq(&ffs->ev.waitq.lock);
@@ -1900,10 +1905,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
ENTER();
if (!WARN_ON(!ffs->gadget)) {
+ /* dequeue before freeing ep0req */
+ usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
+ mutex_lock(&ffs->mutex);
usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
ffs->ep0req = NULL;
ffs->gadget = NULL;
clear_bit(FFS_FL_BOUND, &ffs->flags);
+ mutex_unlock(&ffs->mutex);
ffs_data_put(ffs);
}
}
@@ -3619,6 +3628,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
/* Drain any pending AIO completions */
drain_workqueue(ffs->io_completion_wq);
+ ffs_event_add(ffs, FUNCTIONFS_UNBIND);
if (!--opts->refcnt)
functionfs_unbind(ffs);
@@ -3643,7 +3653,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->function.ssp_descriptors = NULL;
func->interfaces_nums = NULL;
- ffs_event_add(ffs, FUNCTIONFS_UNBIND);
}
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index e4d71410a4b1..571560d689c8 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -45,12 +45,25 @@ struct f_hidg {
unsigned short report_desc_length;
char *report_desc;
unsigned short report_length;
+ /*
+ * use_out_ep - if true, the OUT Endpoint (interrupt out method)
+ * will be used to receive reports from the host
+ * using functions with the "intout" suffix.
+ * Otherwise, the OUT Endpoint will not be configured
+ * and the SETUP/SET_REPORT method ("ssreport" suffix)
+ * will be used to receive reports.
+ */
+ bool use_out_ep;
/* recv report */
- struct list_head completed_out_req;
spinlock_t read_spinlock;
wait_queue_head_t read_queue;
+ /* recv report - interrupt out only (use_out_ep == 1) */
+ struct list_head completed_out_req;
unsigned int qlen;
+ /* recv report - setup set_report only (use_out_ep == 0) */
+ char *set_report_buf;
+ unsigned int set_report_length;
/* send report */
spinlock_t write_spinlock;
@@ -58,7 +71,7 @@ struct f_hidg {
wait_queue_head_t write_queue;
struct usb_request *req;
- int minor;
+ struct device dev;
struct cdev cdev;
struct usb_function func;
@@ -71,6 +84,15 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f)
return container_of(f, struct f_hidg, func);
}
+static void hidg_release(struct device *dev)
+{
+ struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
+
+ kfree(hidg->report_desc);
+ kfree(hidg->set_report_buf);
+ kfree(hidg);
+}
+
/*-------------------------------------------------------------------------*/
/* Static descriptors */
@@ -79,7 +101,7 @@ static struct usb_interface_descriptor hidg_interface_desc = {
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bAlternateSetting = 0,
- .bNumEndpoints = 2,
+ /* .bNumEndpoints = DYNAMIC (depends on use_out_ep) */
.bInterfaceClass = USB_CLASS_HID,
/* .bInterfaceSubClass = DYNAMIC */
/* .bInterfaceProtocol = DYNAMIC */
@@ -140,7 +162,7 @@ static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
/* .wBytesPerInterval = DYNAMIC */
};
-static struct usb_descriptor_header *hidg_ss_descriptors[] = {
+static struct usb_descriptor_header *hidg_ss_descriptors_intout[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
@@ -150,6 +172,14 @@ static struct usb_descriptor_header *hidg_ss_descriptors[] = {
NULL,
};
+static struct usb_descriptor_header *hidg_ss_descriptors_ssreport[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
+ NULL,
+};
+
/* High-Speed Support */
static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
@@ -176,7 +206,7 @@ static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = {
*/
};
-static struct usb_descriptor_header *hidg_hs_descriptors[] = {
+static struct usb_descriptor_header *hidg_hs_descriptors_intout[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
@@ -184,6 +214,13 @@ static struct usb_descriptor_header *hidg_hs_descriptors[] = {
NULL,
};
+static struct usb_descriptor_header *hidg_hs_descriptors_ssreport[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
+ NULL,
+};
+
/* Full-Speed Support */
static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
@@ -210,7 +247,7 @@ static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = {
*/
};
-static struct usb_descriptor_header *hidg_fs_descriptors[] = {
+static struct usb_descriptor_header *hidg_fs_descriptors_intout[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
@@ -218,6 +255,13 @@ static struct usb_descriptor_header *hidg_fs_descriptors[] = {
NULL,
};
+static struct usb_descriptor_header *hidg_fs_descriptors_ssreport[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
+ NULL,
+};
+
/*-------------------------------------------------------------------------*/
/* Strings */
@@ -241,8 +285,8 @@ static struct usb_gadget_strings *ct_func_strings[] = {
/*-------------------------------------------------------------------------*/
/* Char Device */
-static ssize_t f_hidg_read(struct file *file, char __user *buffer,
- size_t count, loff_t *ptr)
+static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ptr)
{
struct f_hidg *hidg = file->private_data;
struct f_hidg_req_list *list;
@@ -258,15 +302,15 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
spin_lock_irqsave(&hidg->read_spinlock, flags);
-#define READ_COND (!list_empty(&hidg->completed_out_req))
+#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req))
/* wait for at least one buffer to complete */
- while (!READ_COND) {
+ while (!READ_COND_INTOUT) {
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- if (wait_event_interruptible(hidg->read_queue, READ_COND))
+ if (wait_event_interruptible(hidg->read_queue, READ_COND_INTOUT))
return -ERESTARTSYS;
spin_lock_irqsave(&hidg->read_spinlock, flags);
@@ -316,6 +360,60 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
return count;
}
+#define READ_COND_SSREPORT (hidg->set_report_buf != NULL)
+
+static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ptr)
+{
+ struct f_hidg *hidg = file->private_data;
+ char *tmp_buf = NULL;
+ unsigned long flags;
+
+ if (!count)
+ return 0;
+
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+
+ while (!READ_COND_SSREPORT) {
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(hidg->read_queue, READ_COND_SSREPORT))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+ }
+
+ count = min_t(unsigned int, count, hidg->set_report_length);
+ tmp_buf = hidg->set_report_buf;
+ hidg->set_report_buf = NULL;
+
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+
+ if (tmp_buf != NULL) {
+ count -= copy_to_user(buffer, tmp_buf, count);
+ kfree(tmp_buf);
+ } else {
+ count = -ENOMEM;
+ }
+
+ wake_up(&hidg->read_queue);
+
+ return count;
+}
+
+static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ptr)
+{
+ struct f_hidg *hidg = file->private_data;
+
+ if (hidg->use_out_ep)
+ return f_hidg_intout_read(file, buffer, count, ptr);
+ else
+ return f_hidg_ssreport_read(file, buffer, count, ptr);
+}
+
static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
@@ -439,14 +537,20 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
if (WRITE_COND)
ret |= EPOLLOUT | EPOLLWRNORM;
- if (READ_COND)
- ret |= EPOLLIN | EPOLLRDNORM;
+ if (hidg->use_out_ep) {
+ if (READ_COND_INTOUT)
+ ret |= EPOLLIN | EPOLLRDNORM;
+ } else {
+ if (READ_COND_SSREPORT)
+ ret |= EPOLLIN | EPOLLRDNORM;
+ }
return ret;
}
#undef WRITE_COND
-#undef READ_COND
+#undef READ_COND_SSREPORT
+#undef READ_COND_INTOUT
static int f_hidg_release(struct inode *inode, struct file *fd)
{
@@ -473,7 +577,7 @@ static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
return alloc_ep_req(ep, length);
}
-static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
+static void hidg_intout_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *) req->context;
struct usb_composite_dev *cdev = hidg->func.config->cdev;
@@ -508,6 +612,37 @@ free_req:
}
}
+static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_hidg *hidg = (struct f_hidg *)req->context;
+ struct usb_composite_dev *cdev = hidg->func.config->cdev;
+ char *new_buf = NULL;
+ unsigned long flags;
+
+ if (req->status != 0 || req->buf == NULL || req->actual == 0) {
+ ERROR(cdev,
+ "%s FAILED: status=%d, buf=%p, actual=%d\n",
+ __func__, req->status, req->buf, req->actual);
+ return;
+ }
+
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+
+ new_buf = krealloc(hidg->set_report_buf, req->actual, GFP_ATOMIC);
+ if (new_buf == NULL) {
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+ return;
+ }
+ hidg->set_report_buf = new_buf;
+
+ hidg->set_report_length = req->actual;
+ memcpy(hidg->set_report_buf, req->buf, req->actual);
+
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+
+ wake_up(&hidg->read_queue);
+}
+
static int hidg_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
@@ -555,7 +690,11 @@ static int hidg_setup(struct usb_function *f,
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_REPORT):
VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
- goto stall;
+ if (hidg->use_out_ep)
+ goto stall;
+ req->complete = hidg_ssreport_complete;
+ req->context = hidg;
+ goto respond;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
@@ -643,15 +782,18 @@ static void hidg_disable(struct usb_function *f)
unsigned long flags;
usb_ep_disable(hidg->in_ep);
- usb_ep_disable(hidg->out_ep);
- spin_lock_irqsave(&hidg->read_spinlock, flags);
- list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
- free_ep_req(hidg->out_ep, list->req);
- list_del(&list->list);
- kfree(list);
+ if (hidg->out_ep) {
+ usb_ep_disable(hidg->out_ep);
+
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+ list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
+ free_ep_req(hidg->out_ep, list->req);
+ list_del(&list->list);
+ kfree(list);
+ }
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
}
- spin_unlock_irqrestore(&hidg->read_spinlock, flags);
spin_lock_irqsave(&hidg->write_spinlock, flags);
if (!hidg->write_pending) {
@@ -697,8 +839,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
}
-
- if (hidg->out_ep != NULL) {
+ if (hidg->use_out_ep && hidg->out_ep != NULL) {
/* restart endpoint */
usb_ep_disable(hidg->out_ep);
@@ -723,7 +864,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
hidg_alloc_ep_req(hidg->out_ep,
hidg->report_length);
if (req) {
- req->complete = hidg_set_report_complete;
+ req->complete = hidg_intout_complete;
req->context = hidg;
status = usb_ep_queue(hidg->out_ep, req,
GFP_ATOMIC);
@@ -749,7 +890,8 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
return 0;
disable_out_ep:
- usb_ep_disable(hidg->out_ep);
+ if (hidg->out_ep)
+ usb_ep_disable(hidg->out_ep);
free_req_in:
if (req_in)
free_ep_req(hidg->in_ep, req_in);
@@ -777,9 +919,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
struct f_hidg *hidg = func_to_hidg(f);
struct usb_string *us;
- struct device *device;
int status;
- dev_t dev;
/* maybe allocate device-global string IDs, and patch descriptors */
us = usb_gstrings_attach(c->cdev, ct_func_strings,
@@ -801,14 +941,21 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
hidg->in_ep = ep;
- ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc);
- if (!ep)
- goto fail;
- hidg->out_ep = ep;
+ hidg->out_ep = NULL;
+ if (hidg->use_out_ep) {
+ ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc);
+ if (!ep)
+ goto fail;
+ hidg->out_ep = ep;
+ }
+
+ /* used only if use_out_ep == 1 */
+ hidg->set_report_buf = NULL;
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+ hidg_interface_desc.bNumEndpoints = hidg->use_out_ep ? 2 : 1;
hidg->protocol = HID_REPORT_PROTOCOL;
hidg->idle = 1;
hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
@@ -839,9 +986,19 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_ss_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
- status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, hidg_ss_descriptors,
- hidg_ss_descriptors);
+ if (hidg->use_out_ep)
+ status = usb_assign_descriptors(f,
+ hidg_fs_descriptors_intout,
+ hidg_hs_descriptors_intout,
+ hidg_ss_descriptors_intout,
+ hidg_ss_descriptors_intout);
+ else
+ status = usb_assign_descriptors(f,
+ hidg_fs_descriptors_ssreport,
+ hidg_hs_descriptors_ssreport,
+ hidg_ss_descriptors_ssreport,
+ hidg_ss_descriptors_ssreport);
+
if (status)
goto fail;
@@ -855,21 +1012,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
/* create char device */
cdev_init(&hidg->cdev, &f_hidg_fops);
- dev = MKDEV(major, hidg->minor);
- status = cdev_add(&hidg->cdev, dev, 1);
+ status = cdev_device_add(&hidg->cdev, &hidg->dev);
if (status)
goto fail_free_descs;
- device = device_create(hidg_class, NULL, dev, NULL,
- "%s%d", "hidg", hidg->minor);
- if (IS_ERR(device)) {
- status = PTR_ERR(device);
- goto del;
- }
-
return 0;
-del:
- cdev_del(&hidg->cdev);
fail_free_descs:
usb_free_all_descriptors(f);
fail:
@@ -956,6 +1103,7 @@ CONFIGFS_ATTR(f_hid_opts_, name)
F_HID_OPT(subclass, 8, 255);
F_HID_OPT(protocol, 8, 255);
+F_HID_OPT(no_out_endpoint, 8, 1);
F_HID_OPT(report_length, 16, 65535);
static ssize_t f_hid_opts_report_desc_show(struct config_item *item, char *page)
@@ -1015,6 +1163,7 @@ CONFIGFS_ATTR_RO(f_hid_opts_, dev);
static struct configfs_attribute *hid_attrs[] = {
&f_hid_opts_attr_subclass,
&f_hid_opts_attr_protocol,
+ &f_hid_opts_attr_no_out_endpoint,
&f_hid_opts_attr_report_length,
&f_hid_opts_attr_report_desc,
&f_hid_opts_attr_dev,
@@ -1098,8 +1247,7 @@ static void hidg_free(struct usb_function *f)
hidg = func_to_hidg(f);
opts = container_of(f->fi, struct f_hid_opts, func_inst);
- kfree(hidg->report_desc);
- kfree(hidg);
+ put_device(&hidg->dev);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
@@ -1109,8 +1257,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_hidg *hidg = func_to_hidg(f);
- device_destroy(hidg_class, MKDEV(major, hidg->minor));
- cdev_del(&hidg->cdev);
+ cdev_device_del(&hidg->cdev, &hidg->dev);
usb_free_all_descriptors(f);
}
@@ -1119,6 +1266,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
{
struct f_hidg *hidg;
struct f_hid_opts *opts;
+ int ret;
/* allocate and initialize one new instance */
hidg = kzalloc(sizeof(*hidg), GFP_KERNEL);
@@ -1130,7 +1278,17 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
mutex_lock(&opts->lock);
++opts->refcnt;
- hidg->minor = opts->minor;
+ device_initialize(&hidg->dev);
+ hidg->dev.release = hidg_release;
+ hidg->dev.class = hidg_class;
+ hidg->dev.devt = MKDEV(major, opts->minor);
+ ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor);
+ if (ret) {
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+ return ERR_PTR(ret);
+ }
+
hidg->bInterfaceSubClass = opts->subclass;
hidg->bInterfaceProtocol = opts->protocol;
hidg->report_length = opts->report_length;
@@ -1140,11 +1298,13 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
opts->report_desc_length,
GFP_KERNEL);
if (!hidg->report_desc) {
- kfree(hidg);
+ put_device(&hidg->dev);
+ --opts->refcnt;
mutex_unlock(&opts->lock);
return ERR_PTR(-ENOMEM);
}
}
+ hidg->use_out_ep = !opts->no_out_endpoint;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 7c96c4665178..9fee28a35a5e 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -574,21 +574,37 @@ static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
+ int rc;
+
if (!fsg_is_set(common))
return false;
bh->state = BUF_STATE_SENDING;
- if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
+ rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq);
+ if (rc) {
bh->state = BUF_STATE_EMPTY;
+ if (rc == -ESHUTDOWN) {
+ common->running = 0;
+ return false;
+ }
+ }
return true;
}
static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
+ int rc;
+
if (!fsg_is_set(common))
return false;
bh->state = BUF_STATE_RECEIVING;
- if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
+ rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq);
+ if (rc) {
bh->state = BUF_STATE_FULL;
+ if (rc == -ESHUTDOWN) {
+ common->running = 0;
+ return false;
+ }
+ }
return true;
}
@@ -950,7 +966,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
struct inode *inode = file_inode(filp);
- unsigned long rc;
+ unsigned long __maybe_unused rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 9b19d6935df9..b8597d4a4864 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -85,7 +85,9 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
/* peak (theoretical) bulk transfer rate in bits-per-second */
static inline unsigned ncm_bitrate(struct usb_gadget *g)
{
- if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+ if (!g)
+ return 0;
+ else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
return 4250000000U;
else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
return 3750000000U;
@@ -1178,7 +1180,8 @@ static int ncm_unwrap_ntb(struct gether *port,
struct sk_buff_head *list)
{
struct f_ncm *ncm = func_to_ncm(&port->func);
- __le16 *tmp = (void *) skb->data;
+ unsigned char *ntb_ptr = skb->data;
+ __le16 *tmp;
unsigned index, index2;
int ndp_index;
unsigned dg_len, dg_len2;
@@ -1191,6 +1194,10 @@ static int ncm_unwrap_ntb(struct gether *port,
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
+ int to_process = skb->len;
+
+parse_ntb:
+ tmp = (__le16 *)ntb_ptr;
/* dwSignature */
if (get_unaligned_le32(tmp) != opts->nth_sign) {
@@ -1237,7 +1244,7 @@ static int ncm_unwrap_ntb(struct gether *port,
* walk through NDP
* dwSignature
*/
- tmp = (void *)(skb->data + ndp_index);
+ tmp = (__le16 *)(ntb_ptr + ndp_index);
if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
goto err;
@@ -1294,11 +1301,11 @@ static int ncm_unwrap_ntb(struct gether *port,
if (ncm->is_crc) {
uint32_t crc, crc2;
- crc = get_unaligned_le32(skb->data +
+ crc = get_unaligned_le32(ntb_ptr +
index + dg_len -
crc_len);
crc2 = ~crc32_le(~0,
- skb->data + index,
+ ntb_ptr + index,
dg_len - crc_len);
if (crc != crc2) {
INFO(port->func.config->cdev,
@@ -1325,7 +1332,7 @@ static int ncm_unwrap_ntb(struct gether *port,
dg_len - crc_len);
if (skb2 == NULL)
goto err;
- skb_put_data(skb2, skb->data + index,
+ skb_put_data(skb2, ntb_ptr + index,
dg_len - crc_len);
skb_queue_tail(list, skb2);
@@ -1338,10 +1345,25 @@ static int ncm_unwrap_ntb(struct gether *port,
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);
- dev_consume_skb_any(skb);
-
VDBG(port->func.config->cdev,
"Parsed NTB with %d frames\n", dgram_counter);
+
+ to_process -= block_len;
+
+ /*
+ * Windows NCM driver avoids USB ZLPs by adding a 1-byte
+ * zero pad as needed.
+ */
+ if (to_process == 1 &&
+ (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
+ to_process--;
+ } else if (to_process > 0) {
+ ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
+ goto parse_ntb;
+ }
+
+ dev_consume_skb_any(skb);
+
return 0;
err:
skb_queue_purge(list);
@@ -1421,7 +1443,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_composite_dev *cdev = c->cdev;
struct f_ncm *ncm = func_to_ncm(f);
struct usb_string *us;
- int status;
+ int status = 0;
struct usb_ep *ep;
struct f_ncm_opts *ncm_opts;
@@ -1439,22 +1461,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
}
- /*
- * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
- * configurations are bound in sequence with list_for_each_entry,
- * in each configuration its functions are bound in sequence
- * with list_for_each_entry, so we assume no race condition
- * with regard to ncm_opts->bound access
- */
- if (!ncm_opts->bound) {
- mutex_lock(&ncm_opts->lock);
- gether_set_gadget(ncm_opts->net, cdev->gadget);
+ mutex_lock(&ncm_opts->lock);
+ gether_set_gadget(ncm_opts->net, cdev->gadget);
+ if (!ncm_opts->bound)
status = gether_register_netdev(ncm_opts->net);
- mutex_unlock(&ncm_opts->lock);
- if (status)
- goto fail;
- ncm_opts->bound = true;
- }
+ mutex_unlock(&ncm_opts->lock);
+
+ if (status)
+ goto fail;
+
+ ncm_opts->bound = true;
+
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
if (IS_ERR(us)) {
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 2a1868b2d24c..dd5eb6202fe1 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -87,7 +87,7 @@ struct printer_dev {
u8 printer_cdev_open;
wait_queue_head_t wait;
unsigned q_len;
- char *pnp_string; /* We don't own memory! */
+ char **pnp_string; /* We don't own memory! */
struct usb_function function;
};
@@ -963,16 +963,16 @@ static int printer_func_setup(struct usb_function *f,
if ((wIndex>>8) != dev->interface)
break;
- if (!dev->pnp_string) {
+ if (!*dev->pnp_string) {
value = 0;
break;
}
- value = strlen(dev->pnp_string);
+ value = strlen(*dev->pnp_string);
buf[0] = (value >> 8) & 0xFF;
buf[1] = value & 0xFF;
- memcpy(buf + 2, dev->pnp_string, value);
+ memcpy(buf + 2, *dev->pnp_string, value);
DBG(dev, "1284 PNP String: %x %s\n", value,
- dev->pnp_string);
+ *dev->pnp_string);
break;
case GET_PORT_STATUS: /* Get Port Status */
@@ -1435,7 +1435,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
kref_init(&dev->kref);
++opts->refcnt;
dev->minor = opts->minor;
- dev->pnp_string = opts->pnp_string;
+ dev->pnp_string = &opts->pnp_string;
dev->q_len = opts->q_len;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 41a10bcc2efc..60e40c118d13 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -43,14 +43,17 @@ static inline struct f_uas *to_f_uas(struct usb_function *f)
/* Start bot.c code */
+static struct usbg_cdb *acquire_cmd_request(struct f_uas *fu);
+static void release_cmd_request(struct f_uas *fu, struct usb_request *req);
static int bot_enqueue_cmd_cbw(struct f_uas *fu)
{
int ret;
+ struct usbg_cdb *cmd = acquire_cmd_request(fu);
if (fu->flags & USBG_BOT_CMD_PEND)
return 0;
- ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
+ ret = usb_ep_queue(fu->ep_out, cmd->req, GFP_ATOMIC);
if (!ret)
fu->flags |= USBG_BOT_CMD_PEND;
return ret;
@@ -61,6 +64,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct f_uas *fu = cmd->fu;
+ release_cmd_request(fu, req);
transport_generic_free_cmd(&cmd->se_cmd, 0);
if (req->status < 0) {
pr_err("ERR %s(%d)\n", __func__, __LINE__);
@@ -136,7 +140,7 @@ static void bot_send_bad_status(struct usbg_cmd *cmd)
}
req->complete = bot_err_compl;
req->context = cmd;
- req->buf = fu->cmd.buf;
+ req->buf = fu->cmd[0]->buf;
usb_ep_queue(ep, req, GFP_KERNEL);
} else {
bot_enqueue_sense_code(fu, cmd);
@@ -245,7 +249,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
init_completion(&cmd->write_complete);
@@ -256,22 +259,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
return -EINVAL;
}
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- fu->bot_req_out->buf = cmd->data_buf;
- } else {
- fu->bot_req_out->buf = NULL;
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_out->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_out->complete = usbg_data_write_cmpl;
- fu->bot_req_out->length = se_cmd->data_length;
- fu->bot_req_out->context = cmd;
-
ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
if (ret)
goto cleanup;
@@ -297,11 +284,84 @@ static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
if (req->status < 0)
return;
+ release_cmd_request(fu, req);
ret = bot_submit_command(fu, req->buf, req->actual);
if (ret)
pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
}
+static struct usbg_cdb *acquire_cmd_request(struct f_uas *fu)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (!fu->cmd[i]->claimed) {
+ fu->cmd[i]->claimed = true;
+ return fu->cmd[i];
+ }
+ }
+ return NULL;
+}
+
+static void release_cmd_request(struct f_uas *fu, struct usb_request *req)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (fu->cmd[i]->req == req)
+ fu->cmd[i]->claimed = false;
+ }
+}
+
+static void free_cmd_resource(struct f_uas *fu, struct usb_ep *ep)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (fu->cmd[i]->req)
+ usb_ep_free_request(ep, fu->cmd[i]->req);
+
+ kfree(fu->cmd[i]->buf);
+ fu->cmd[i]->buf = NULL;
+
+ kfree(fu->cmd[i]);
+ fu->cmd[i] = NULL;
+ }
+}
+
+static int alloc_cmd_resource(struct f_uas *fu, int num, struct usb_ep *ep,
+ void (*complete)(struct usb_ep *ep,
+ struct usb_request *req))
+{
+ int i;
+
+ fu->ncmd = num;
+ for (i = 0; i < fu->ncmd; i++) {
+ fu->cmd[i] = kcalloc(fu->ncmd, sizeof(struct usbg_cdb),
+ GFP_KERNEL);
+ if (!fu->cmd)
+ goto err_cmd;
+
+ fu->cmd[i]->req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!fu->cmd[i]->req)
+ goto err_cmd;
+
+ fu->cmd[i]->buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
+ if (!fu->cmd[i]->buf)
+ goto err_cmd;
+
+ fu->cmd[i]->req->complete = complete;
+ fu->cmd[i]->req->buf = fu->cmd[i]->buf;
+ fu->cmd[i]->req->length = fu->ep_out->maxpacket;
+ fu->cmd[i]->req->context = fu;
+ }
+
+ return 0;
+err_cmd:
+ free_cmd_resource(fu, ep);
+ return -ENOMEM;
+}
+
static int bot_prepare_reqs(struct f_uas *fu)
{
int ret;
@@ -314,10 +374,6 @@ static int bot_prepare_reqs(struct f_uas *fu)
if (!fu->bot_req_out)
goto err_out;
- fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
- if (!fu->cmd.req)
- goto err_cmd;
-
fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
if (!fu->bot_status.req)
goto err_sts;
@@ -327,28 +383,20 @@ static int bot_prepare_reqs(struct f_uas *fu)
fu->bot_status.req->complete = bot_status_complete;
fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
- fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
- goto err_buf;
-
- fu->cmd.req->complete = bot_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_out->maxpacket;
- fu->cmd.req->context = fu;
+ ret = alloc_cmd_resource(fu, BOT_MAX_COMMANDS, fu->ep_out,
+ bot_cmd_complete);
+ if (ret)
+ goto err_cmd;
ret = bot_enqueue_cmd_cbw(fu);
if (ret)
goto err_queue;
return 0;
err_queue:
- kfree(fu->cmd.buf);
- fu->cmd.buf = NULL;
-err_buf:
+ free_cmd_resource(fu, fu->ep_out);
+err_cmd:
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
err_sts:
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
- fu->cmd.req = NULL;
-err_cmd:
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
fu->bot_req_out = NULL;
err_out:
@@ -372,16 +420,13 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
- kfree(fu->cmd.buf);
+ free_cmd_resource(fu, fu->ep_out);
fu->bot_req_in = NULL;
fu->bot_req_out = NULL;
- fu->cmd.req = NULL;
fu->bot_status.req = NULL;
- fu->cmd.buf = NULL;
}
static void bot_set_alt(struct f_uas *fu)
@@ -480,14 +525,6 @@ static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
stream->req_status = NULL;
}
-static void uasp_free_cmdreq(struct f_uas *fu)
-{
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
- kfree(fu->cmd.buf);
- fu->cmd.req = NULL;
- fu->cmd.buf = NULL;
-}
-
static void uasp_cleanup_old_alt(struct f_uas *fu)
{
int i;
@@ -502,7 +539,7 @@ static void uasp_cleanup_old_alt(struct f_uas *fu)
for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
uasp_cleanup_one_stream(fu, &fu->stream[i]);
- uasp_free_cmdreq(fu);
+ free_cmd_resource(fu, fu->ep_cmd);
}
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
@@ -565,6 +602,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct uas_stream *stream = cmd->stream;
struct f_uas *fu = cmd->fu;
+ struct usbg_cdb *cmd_cdb;
int ret;
if (req->status < 0)
@@ -599,7 +637,8 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
case UASP_QUEUE_COMMAND:
transport_generic_free_cmd(&cmd->se_cmd, 0);
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ cmd_cdb = acquire_cmd_request(fu);
+ usb_ep_queue(fu->ep_cmd, cmd_cdb->req, GFP_ATOMIC);
break;
default:
@@ -719,11 +758,13 @@ static int usbg_submit_command(struct f_uas *, void *, unsigned int);
static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_uas *fu = req->context;
+ struct usbg_cdb *cmd;
int ret;
if (req->status < 0)
return;
+ release_cmd_request(fu, req);
ret = usbg_submit_command(fu, req->buf, req->actual);
/*
* Once we tune for performance enqueue the command req here again so
@@ -733,7 +774,8 @@ static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
*/
if (!ret)
return;
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ cmd = acquire_cmd_request(fu);
+ usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
}
static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
@@ -762,28 +804,6 @@ out:
return -ENOMEM;
}
-static int uasp_alloc_cmd(struct f_uas *fu)
-{
- fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
- if (!fu->cmd.req)
- goto err;
-
- fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
- goto err_buf;
-
- fu->cmd.req->complete = uasp_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_cmd->maxpacket;
- fu->cmd.req->context = fu;
- return 0;
-
-err_buf:
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
-err:
- return -ENOMEM;
-}
-
static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
{
int i;
@@ -801,12 +821,15 @@ static int uasp_prepare_reqs(struct f_uas *fu)
{
int ret;
int i;
- int max_streams;
+ int max_streams, max_commands;
- if (fu->flags & USBG_USE_STREAMS)
+ if (fu->flags & USBG_USE_STREAMS) {
+ max_commands = UASP_MAX_COMMANDS;
max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
- else
+ } else {
+ max_commands = 1;
max_streams = 1;
+ }
for (i = 0; i < max_streams; i++) {
ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
@@ -814,19 +837,25 @@ static int uasp_prepare_reqs(struct f_uas *fu)
goto err_cleanup;
}
- ret = uasp_alloc_cmd(fu);
+ ret = alloc_cmd_resource(fu, max_commands, fu->ep_cmd,
+ uasp_cmd_complete);
if (ret)
goto err_free_stream;
uasp_setup_stream_res(fu, max_streams);
- ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
- if (ret)
- goto err_free_stream;
+ /* queue number of commands */
+ for (i = 0; i < fu->ncmd; i++) {
+ struct usbg_cdb *cmd = acquire_cmd_request(fu);
+
+ ret = usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
+ if (ret)
+ goto err_free_stream;
+ }
return 0;
err_free_stream:
- uasp_free_cmdreq(fu);
+ free_cmd_resource(fu, fu->ep_cmd);
err_cleanup:
if (i) {
@@ -839,16 +868,28 @@ err_cleanup:
return ret;
}
+#define SS_BOT_INTERFACE_DESC_NO 5
static void uasp_set_alt(struct f_uas *fu)
{
struct usb_function *f = &fu->function;
struct usb_gadget *gadget = f->config->cdev->gadget;
+ struct usb_descriptor_header **ss_uasp_backup = f->ss_descriptors;
int ret;
fu->flags = USBG_IS_UAS;
- if (gadget->speed == USB_SPEED_SUPER)
+ if (gadget->speed == USB_SPEED_SUPER) {
fu->flags |= USBG_USE_STREAMS;
+ /* If device connect in SS then comp_descriptor with stream
+ * should be attached to descriptor. Since BOT and UAS using
+ * same endpoint, config_ep_by_speed will returns first match
+ * with comp_descriptor without stream. This is just workaround
+ * proper fix need to be introduced. Here advancing descritor
+ * header ss_descriptors with number of descriptor present in
+ * BOT mode.
+ */
+ f->ss_descriptors += SS_BOT_INTERFACE_DESC_NO;
+ }
config_ep_by_speed(gadget, f, fu->ep_in);
ret = usb_ep_enable(fu->ep_in);
@@ -874,6 +915,10 @@ static void uasp_set_alt(struct f_uas *fu)
goto err_wq;
fu->flags |= USBG_ENABLED;
+ /* restore ss_descriptors */
+ if (gadget->speed == USB_SPEED_SUPER)
+ f->ss_descriptors = ss_uasp_backup;
+
pr_info("Using the UAS protocol\n");
return;
err_wq:
@@ -885,6 +930,9 @@ err_cmd:
err_b_out:
usb_ep_disable(fu->ep_in);
err_b_in:
+ /* restore ss_descriptors */
+ if (gadget->speed == USB_SPEED_SUPER)
+ f->ss_descriptors = ss_uasp_backup;
fu->flags = 0;
}
@@ -950,6 +998,56 @@ static int get_cmd_dir(const unsigned char *cdb)
return ret;
}
+static void recover_w_length_with_maxpacket(struct usbg_cmd *cmd,
+ struct usb_request *req)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ int rem;
+
+ rem = se_cmd->data_length % fu->ep_out->maxpacket;
+ if (rem) {
+ /* recover paded data length */
+ cmd->data_len -= fu->ep_out->maxpacket - rem;
+
+ if (gadget->sg_supported) {
+ struct scatterlist *s = sg_last(se_cmd->t_data_sg,
+ se_cmd->t_data_nents);
+
+ s->length -= fu->ep_out->maxpacket - rem;
+ }
+ }
+}
+
+static void adjust_w_length_with_maxpacket(struct usbg_cmd *cmd,
+ struct usb_request *req)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ int rem;
+
+ cmd->data_len = se_cmd->data_length;
+ rem = cmd->data_len % fu->ep_out->maxpacket;
+ if (rem) {
+ /* pad data length so that transfer size can be in multiple of
+ * max packet size
+ */
+ cmd->data_len += fu->ep_out->maxpacket - rem;
+
+ if (gadget->sg_supported) {
+ /* if sg is supported and data length in page also need
+ * to be adjusted as multiple of max packet size.
+ */
+ struct scatterlist *s = sg_last(se_cmd->t_data_sg,
+ se_cmd->t_data_nents);
+
+ s->length += fu->ep_out->maxpacket - rem;
+ }
+ }
+}
+
static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
@@ -960,6 +1058,8 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
goto cleanup;
}
+ recover_w_length_with_maxpacket(cmd, req);
+
if (req->num_sgs == 0) {
sg_copy_from_buffer(se_cmd->t_data_sg,
se_cmd->t_data_nents,
@@ -980,8 +1080,10 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
struct f_uas *fu = cmd->fu;
struct usb_gadget *gadget = fuas_to_gadget(fu);
+ adjust_w_length_with_maxpacket(cmd, req);
+
if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+ cmd->data_buf = kmalloc(cmd->data_len, GFP_ATOMIC);
if (!cmd->data_buf)
return -ENOMEM;
@@ -993,7 +1095,7 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
}
req->complete = usbg_data_write_cmpl;
- req->length = se_cmd->data_length;
+ req->length = cmd->data_len;
req->context = cmd;
return 0;
}
@@ -1186,7 +1288,8 @@ static void bot_cmd_work(struct work_struct *work)
if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0) < 0)
+ cmd->data_len, cmd->prio_attr, dir,
+ TARGET_SCF_ACK_KREF) < 0)
goto out;
return;
@@ -1675,9 +1778,11 @@ static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
CONFIGFS_ATTR(tcm_usbg_tpg_, enable);
CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
+static struct configfs_attribute tcm_usbg_tpg_attr_maxburst;
static struct configfs_attribute *usbg_base_attrs[] = {
&tcm_usbg_tpg_attr_enable,
&tcm_usbg_tpg_attr_nexus,
+ &tcm_usbg_tpg_attr_maxburst,
NULL,
};
@@ -1985,6 +2090,32 @@ static struct usb_gadget_strings *tcm_strings[] = {
NULL,
};
+static ssize_t tcm_usbg_tpg_maxburst_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", uasp_cmd_comp_desc.bMaxBurst);
+}
+
+static ssize_t tcm_usbg_tpg_maxburst_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ int value;
+ int ret;
+
+ ret = kstrtouint(page, 10, &value);
+ if (ret)
+ return ret;
+
+ uasp_bi_ep_comp_desc.bMaxBurst = value;
+ uasp_bo_ep_comp_desc.bMaxBurst = value;
+ uasp_status_in_ep_comp_desc.bMaxBurst = value;
+ uasp_cmd_comp_desc.bMaxBurst = value;
+ bot_bi_ep_comp_desc.bMaxBurst = value;
+ bot_bo_ep_comp_desc.bMaxBurst = value;
+
+ return count;
+}
+CONFIGFS_ATTR(tcm_usbg_tpg_, maxburst);
+
static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct f_uas *fu = to_f_uas(f);
@@ -2114,6 +2245,13 @@ static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
return -EOPNOTSUPP;
}
+static int tcm_get_alt(struct usb_function *f, unsigned int intf)
+{
+ struct f_uas *fu = to_f_uas(f);
+
+ return fu->flags & USBG_IS_UAS ? 1 : 0;
+}
+
static void tcm_disable(struct usb_function *f)
{
struct f_uas *fu = to_f_uas(f);
@@ -2302,6 +2440,7 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
fu->function.bind = tcm_bind;
fu->function.unbind = tcm_unbind;
fu->function.set_alt = tcm_set_alt;
+ fu->function.get_alt = tcm_get_alt;
fu->function.setup = tcm_setup;
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index c03b67aab1a8..094a88ff9a67 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -213,8 +213,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DATA;
- uvc_event->data.length = req->actual;
- memcpy(&uvc_event->data.data, req->buf, req->actual);
+ uvc_event->data.length = min_t(unsigned int, req->actual,
+ sizeof(uvc_event->data.data));
+ memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length);
v4l2_event_queue(&uvc->vdev, &v4l2_event);
}
}
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index f7e6c42558eb..021984921f91 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub);
void store_cdrom_address(u8 *dest, int msf, u32 addr)
{
if (msf) {
- /* Convert to Minutes-Seconds-Frames */
- addr >>= 2; /* Convert to 2048-byte frames */
+ /*
+ * Convert to Minutes-Seconds-Frames.
+ * Sector size is already set to 2048 bytes.
+ */
addr += 2*75; /* Lead-in occupies 2 seconds */
dest[3] = addr % 75; /* Frames */
addr /= 75;
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
index 3cd565794ad7..54ed3bca8add 100644
--- a/drivers/usb/gadget/function/tcm.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -98,6 +98,7 @@ struct uas_stream {
struct usbg_cdb {
struct usb_request *req;
void *buf;
+ bool claimed;
};
struct bot_status {
@@ -105,6 +106,9 @@ struct bot_status {
struct bulk_cs_wrap csw;
};
+#define UASP_MAX_COMMANDS 6
+#define BOT_MAX_COMMANDS 1
+#define MAX_COMMANDS UASP_MAX_COMMANDS
struct f_uas {
struct usbg_tpg *tpg;
struct usb_function function;
@@ -117,7 +121,8 @@ struct f_uas {
#define USBG_IS_BOT (1 << 3)
#define USBG_BOT_CMD_PEND (1 << 4)
- struct usbg_cdb cmd;
+ u32 ncmd;
+ struct usbg_cdb *cmd[MAX_COMMANDS];
struct usb_ep *ep_in;
struct usb_ep *ep_out;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 4e01ba0ab8ec..53b30de16a89 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -626,7 +626,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
uac = g_audio->uac;
card = uac->card;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);
kfree(uac->p_prm.ureq);
kfree(uac->c_prm.ureq);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 3f053b11e2ce..af3cb3bfdc29 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -17,6 +17,8 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/string_helpers.h>
+#include <linux/usb/composite.h>
#include "u_ether.h"
@@ -102,41 +104,6 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
/*-------------------------------------------------------------------------*/
-/* REVISIT there must be a better way than having two sets
- * of debug calls ...
- */
-
-#undef DBG
-#undef VDBG
-#undef ERROR
-#undef INFO
-
-#define xprintk(d, level, fmt, args...) \
- printk(level "%s: " fmt , (d)->net->name , ## args)
-
-#ifdef DEBUG
-#undef DEBUG
-#define DBG(dev, fmt, args...) \
- xprintk(dev , KERN_DEBUG , fmt , ## args)
-#else
-#define DBG(dev, fmt, args...) \
- do { } while (0)
-#endif /* DEBUG */
-
-#ifdef VERBOSE_DEBUG
-#define VDBG DBG
-#else
-#define VDBG(dev, fmt, args...) \
- do { } while (0)
-#endif /* DEBUG */
-
-#define ERROR(dev, fmt, args...) \
- xprintk(dev , KERN_ERR , fmt , ## args)
-#define INFO(dev, fmt, args...) \
- xprintk(dev , KERN_INFO , fmt , ## args)
-
-/*-------------------------------------------------------------------------*/
-
/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
@@ -974,6 +941,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
dev = netdev_priv(net);
snprintf(host_addr, len, "%pm", dev->host_mac);
+ string_upper(host_addr, host_addr);
+
return strlen(host_addr);
}
EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
index 1594bfa312eb..90d8b1c0f25f 100644
--- a/drivers/usb/gadget/function/u_hid.h
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -20,6 +20,7 @@ struct f_hid_opts {
int minor;
unsigned char subclass;
unsigned char protocol;
+ unsigned char no_out_endpoint;
unsigned short report_length;
unsigned short report_desc_length;
unsigned char *report_desc;
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 5c042f380708..c4212226e01b 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -191,7 +191,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
goto requeue;
default:
- uvcg_info(&video->uvc->func,
+ uvcg_warn(&video->uvc->func,
"VS request completed with status %d.\n",
req->status);
uvcg_queue_cancel(queue, 0);
@@ -207,8 +207,8 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
video->encode(req, video, buf);
- ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ ret = uvcg_video_ep_queue(video, req);
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
@@ -332,9 +332,10 @@ int uvcg_video_pump(struct uvc_video *video)
video->encode(req, video, buf);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
/* Queue the USB request */
ret = uvcg_video_ep_queue(video, req);
- spin_unlock_irqrestore(&queue->irqlock, flags);
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 9cd80ad075bd..dd72e75beb82 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -229,6 +229,7 @@ static void put_ep (struct ep_data *data)
*/
static const char *CHIP;
+static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */
/*----------------------------------------------------------------------*/
@@ -362,6 +363,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
spin_unlock_irq (&epdata->dev->lock);
DBG (epdata->dev, "endpoint gone\n");
+ wait_for_completion(&done);
epdata->status = -ENODEV;
}
}
@@ -2012,13 +2014,20 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct dev_data *dev;
+ int rc;
- if (the_device)
- return -ESRCH;
+ mutex_lock(&sb_mutex);
+
+ if (the_device) {
+ rc = -ESRCH;
+ goto Done;
+ }
CHIP = usb_get_gadget_udc_name();
- if (!CHIP)
- return -ENODEV;
+ if (!CHIP) {
+ rc = -ENODEV;
+ goto Done;
+ }
/* superblock */
sb->s_blocksize = PAGE_SIZE;
@@ -2055,13 +2064,17 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
* from binding to a controller.
*/
the_device = dev;
- return 0;
+ rc = 0;
+ goto Done;
-Enomem:
+ Enomem:
kfree(CHIP);
CHIP = NULL;
+ rc = -ENOMEM;
- return -ENOMEM;
+ Done:
+ mutex_unlock(&sb_mutex);
+ return rc;
}
/* "mount -t gadgetfs path /dev/gadget" ends up here */
@@ -2083,6 +2096,7 @@ static int gadgetfs_init_fs_context(struct fs_context *fc)
static void
gadgetfs_kill_sb (struct super_block *sb)
{
+ mutex_lock(&sb_mutex);
kill_litter_super (sb);
if (the_device) {
put_dev (the_device);
@@ -2090,6 +2104,7 @@ gadgetfs_kill_sb (struct super_block *sb)
}
kfree(CHIP);
CHIP = NULL;
+ mutex_unlock(&sb_mutex);
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
index 2c9eab2b863d..ff970a943347 100644
--- a/drivers/usb/gadget/legacy/webcam.c
+++ b/drivers/usb/gadget/legacy/webcam.c
@@ -293,6 +293,7 @@ static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
@@ -305,6 +306,7 @@ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
@@ -317,6 +319,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index f985bb4a42db..ccf2c736d495 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -311,7 +311,7 @@ source "drivers/usb/gadget/udc/bdc/Kconfig"
config USB_AMD5536UDC
tristate "AMD5536 UDC"
- depends on USB_PCI
+ depends on USB_PCI && HAS_DMA
select USB_SNP_CORE
help
The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 362284057d30..a3d15c3fb82a 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -171,6 +171,9 @@ static int udc_pci_probe(
retval = -ENODEV;
goto err_probe;
}
+
+ udc = dev;
+
return 0;
err_probe:
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 7bfd58c846f7..71ed3a15130f 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -151,6 +151,7 @@ static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit)
bdc->delayed_status = false;
bdc->reinit = reinit;
bdc->test_mode = false;
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
}
/* TNotify wkaeup timer */
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index c313d07ec16f..30389e966e42 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -629,10 +629,10 @@ static void fotg210_request_error(struct fotg210_udc *fotg210)
static void fotg210_set_address(struct fotg210_udc *fotg210,
struct usb_ctrlrequest *ctrl)
{
- if (ctrl->wValue >= 0x0100) {
+ if (le16_to_cpu(ctrl->wValue) >= 0x0100) {
fotg210_request_error(fotg210);
} else {
- fotg210_set_dev_addr(fotg210, ctrl->wValue);
+ fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue));
fotg210_set_cxdone(fotg210);
}
}
@@ -713,17 +713,17 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
- fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED;
+ fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED);
break;
case USB_RECIP_INTERFACE:
- fotg210->ep0_data = 0;
+ fotg210->ep0_data = cpu_to_le16(0);
break;
case USB_RECIP_ENDPOINT:
epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
if (epnum)
fotg210->ep0_data =
- fotg210_is_epnstall(fotg210->ep[epnum])
- << USB_ENDPOINT_HALT;
+ cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum])
+ << USB_ENDPOINT_HALT);
else
fotg210_request_error(fotg210);
break;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 2707be628298..cbd8d6c74c93 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -1950,9 +1950,13 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
/* Get endpoint status */
int pipe = index & USB_ENDPOINT_NUMBER_MASK;
- struct qe_ep *target_ep = &udc->eps[pipe];
+ struct qe_ep *target_ep;
u16 usep;
+ if (pipe >= USB_MAX_ENDPOINTS)
+ goto stall;
+ target_ep = &udc->eps[pipe];
+
/* stall if endpoint doesn't exist */
if (!target_ep->ep.desc)
goto stall;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 5980540a8fff..de7757260d6c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2632,7 +2632,7 @@ net2272_plat_probe(struct platform_device *pdev)
goto err_req;
}
- ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
+ ret = net2272_probe_fin(dev, irqflags);
if (ret)
goto err_io;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 0bbd180022aa..e04acf2dfa65 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2553,6 +2553,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
debugfs_remove_recursive(usb3->dentry);
device_remove_file(&pdev->dev, &dev_attr_role);
+ cancel_work_sync(&usb3->role_work);
usb_role_switch_unregister(usb3->role_sw);
usb_del_gadget_udc(&usb3->gadget);
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 3fcded31405a..e76f1a50b0fc 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -96,9 +96,7 @@ static int stop_pollstall_timer;
static DECLARE_COMPLETION(on_pollstall_exit);
/* tasklet for usb disconnect */
-static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
- (unsigned long) &udc);
-
+static DECLARE_TASKLET_OLD(disconnect_tasklet, udc_tasklet_disconnect);
/* endpoint names used for print */
static const char ep0_string[] = "ep0in";
@@ -1661,7 +1659,7 @@ static void usb_disconnect(struct udc *dev)
/* Tasklet for disconnect to be outside of interrupt context */
static void udc_tasklet_disconnect(unsigned long par)
{
- struct udc *dev = (struct udc *)(*((struct udc **) par));
+ struct udc *dev = udc;
u32 tmp;
DBG(dev, "Tasklet disconnect\n");
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index de22dd543653..325f518a991b 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -11,6 +11,7 @@
* USB peripheral controller (at91_udc.c).
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -171,6 +172,7 @@ struct xusb_ep {
* @addr: the usb device base address
* @lock: instance of spinlock
* @dma_enabled: flag indicating whether the dma is included in the system
+ * @clk: pointer to struct clk
* @read_fn: function pointer to read device registers
* @write_fn: function pointer to write to device registers
*/
@@ -188,8 +190,9 @@ struct xusb_udc {
void __iomem *addr;
spinlock_t lock;
bool dma_enabled;
+ struct clk *clk;
- unsigned int (*read_fn)(void __iomem *);
+ unsigned int (*read_fn)(void __iomem *reg);
void (*write_fn)(void __iomem *, u32, u32);
};
@@ -496,11 +499,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
/* Get the Buffer address and copy the transmit data.*/
eprambase = (u32 __force *)(udc->addr + ep->rambase);
if (ep->is_in) {
- memcpy(eprambase, bufferptr, bytestosend);
+ memcpy_toio((void __iomem *)eprambase, bufferptr,
+ bytestosend);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF0COUNT_OFFSET, bufferlen);
} else {
- memcpy(bufferptr, eprambase, bytestosend);
+ memcpy_toio((void __iomem *)bufferptr, eprambase,
+ bytestosend);
}
/*
* Enable the buffer for transmission.
@@ -514,11 +519,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
eprambase = (u32 __force *)(udc->addr + ep->rambase +
ep->ep_usb.maxpacket);
if (ep->is_in) {
- memcpy(eprambase, bufferptr, bytestosend);
+ memcpy_toio((void __iomem *)eprambase, bufferptr,
+ bytestosend);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF1COUNT_OFFSET, bufferlen);
} else {
- memcpy(bufferptr, eprambase, bytestosend);
+ memcpy_toio((void __iomem *)bufferptr, eprambase,
+ bytestosend);
}
/*
* Enable the buffer for transmission.
@@ -1020,7 +1027,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
udc->addr);
length = req->usb_req.actual = min_t(u32, length,
EP0_MAX_PACKET);
- memcpy(corebuf, req->usb_req.buf, length);
+ memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length);
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
} else {
@@ -1399,7 +1406,6 @@ err:
/**
* xudc_stop - stops the device.
* @gadget: pointer to the usb gadget structure
- * @driver: pointer to usb gadget driver structure
*
* Return: zero always
*/
@@ -1738,7 +1744,7 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
*
* Process setup packet and delegate to gadget layer.
*/
-static void xudc_handle_setup(struct xusb_udc *udc)
+static void xudc_handle_setup(struct xusb_udc *udc) __must_hold(&udc->lock)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct usb_ctrlrequest setup;
@@ -1746,7 +1752,7 @@ static void xudc_handle_setup(struct xusb_udc *udc)
/* Load up the chapter 9 command buffer.*/
ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET);
- memcpy(&setup, ep0rambase, 8);
+ memcpy_toio((void __iomem *)&setup, ep0rambase, 8);
udc->setup = setup;
udc->setup.wValue = cpu_to_le16(setup.wValue);
@@ -1833,7 +1839,7 @@ static void xudc_ep0_out(struct xusb_udc *udc)
(ep0->rambase << 2));
buffer = req->usb_req.buf + req->usb_req.actual;
req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
- memcpy(buffer, ep0rambase, bytes_to_rx);
+ memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx);
if (req->usb_req.length == req->usb_req.actual) {
/* Data transfer completed get ready for Status stage */
@@ -1909,7 +1915,7 @@ static void xudc_ep0_in(struct xusb_udc *udc)
(ep0->rambase << 2));
buffer = req->usb_req.buf + req->usb_req.actual;
req->usb_req.actual = req->usb_req.actual + length;
- memcpy(ep0rambase, buffer, length);
+ memcpy_toio((void __iomem *)ep0rambase, buffer, length);
}
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
@@ -2098,6 +2104,26 @@ static int xudc_probe(struct platform_device *pdev)
udc->gadget.ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO].ep_usb;
udc->gadget.name = driver_name;
+ udc->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(udc->clk)) {
+ if (PTR_ERR(udc->clk) != -ENOENT) {
+ ret = PTR_ERR(udc->clk);
+ goto fail;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ udc->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(udc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
spin_lock_init(&udc->lock);
/* Check for IP endianness */
@@ -2153,10 +2179,62 @@ static int xudc_remove(struct platform_device *pdev)
struct xusb_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
+ clk_disable_unprepare(udc->clk);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int xudc_suspend(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (udc->driver && udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
+
+ clk_disable(udc->clk);
+
+ return 0;
+}
+
+static int xudc_resume(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ clk_enable(udc->clk);
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg |= XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xudc_suspend, xudc_resume)
+};
+
/* Match table for of_platform binding */
static const struct of_device_id usb_of_match[] = {
{ .compatible = "xlnx,usb2-device-4.00.a", },
@@ -2168,6 +2246,7 @@ static struct platform_driver xudc_driver = {
.driver = {
.name = driver_name,
.of_match_table = usb_of_match,
+ .pm = &xudc_pm_ops,
},
.probe = xudc_probe,
.remove = xudc_remove,
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 9e9c232e896f..b66cc64f084c 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -29,7 +29,7 @@
#include "ehci-fsl.h"
#define DRIVER_DESC "Freescale EHCI Host controller driver"
-#define DRV_NAME "ehci-fsl"
+#define DRV_NAME "fsl-ehci"
static struct hc_driver __read_mostly fsl_ehci_hc_driver;
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 6bbaee74f7e7..28a19693c19f 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -148,6 +148,7 @@ static int ehci_hcd_ppc_of_probe(struct platform_device *op)
} else {
ehci->has_amcc_usb23 = 1;
}
+ of_node_put(np);
}
if (of_get_property(dn, "big-endian", NULL)) {
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 67a6ee8cb5d8..3cefa3cb6c5a 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -32,6 +32,8 @@
* There are cases when the host controller fails to enable the port due to,
* for example, insufficient power that can be supplied to the device from
* the USB bus. In those cases, the messages printed here are not helpful.
+ *
+ * Return: Always return 0
*/
static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
{
@@ -46,11 +48,9 @@ static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
dev_warn(hcd->self.controller,
"Maybe your device is not a high speed device?\n");
dev_warn(hcd->self.controller,
- "The USB host controller does not support full speed "
- "nor low speed devices\n");
+ "USB host controller doesn't support FS/LS devices\n");
dev_warn(hcd->self.controller,
- "You can reconfigure the host controller to have "
- "full speed support\n");
+ "You can reconfigure host controller to support FS\n");
}
return 0;
@@ -112,6 +112,8 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
+ *
+ * Return: zero on success, 'rv' value on failure
*/
static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
@@ -196,6 +198,8 @@ err_irq:
*
* Remove the hcd structure, and release resources that has been requested
* during probe.
+ *
+ * Return: Always return 0
*/
static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 3235d5307403..5c423f240a1f 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -677,7 +677,7 @@ static void process_done_list(unsigned long data)
enable_irq(fhci_to_hcd(fhci)->irq);
}
-DECLARE_TASKLET(fhci_tasklet, process_done_list, 0);
+DECLARE_TASKLET_OLD(fhci_tasklet, process_done_list);
/* transfer complted callback */
u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci)
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index f457e083a6f8..c0f727e79307 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -428,8 +428,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
temp = size;
size -= temp;
next += temp;
- if (temp == size)
- goto done;
}
temp = snprintf(next, size, "\n");
@@ -439,7 +437,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
size -= temp;
next += temp;
-done:
*sizep = size;
*nextp = next;
}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index fc35a7993b7b..4374dba4e384 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -645,7 +645,13 @@ ohci_hcd_at91_drv_resume(struct device *dev)
at91_start_clock(ohci_at91);
- ohci_resume(hcd, false);
+ /*
+ * According to the comment in ohci_hcd_at91_drv_suspend()
+ * we need to do a reset if the 48Mhz clock was stopped,
+ * that is, if ohci_at91->wakeup is clear. Tell ohci_resume()
+ * to reset in this case by setting its "hibernated" flag.
+ */
+ ohci_resume(hcd, !ohci_at91->wakeup);
ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index c561881d0e79..07cee8c7c25e 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -164,6 +164,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
isp1301_i2c_client = isp1301_get_client(isp1301_node);
+ of_node_put(isp1301_node);
if (!isp1301_i2c_client)
return -EPROBE_DEFER;
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 45f7cceb6df3..98e46725999e 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -169,6 +169,7 @@ static int ohci_hcd_ppc_of_probe(struct platform_device *op)
release_mem_region(res.start, 0x4);
} else
pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
+ of_node_put(np);
}
irq_dispose_mapping(irq);
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 0fa3d72bae26..79a6f1442160 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -119,11 +119,13 @@ static int uhci_pci_init(struct usb_hcd *hcd)
uhci->rh_numports = uhci_count_ports(hcd);
- /* Intel controllers report the OverCurrent bit active on.
- * VIA controllers report it active off, so we'll adjust the
- * bit value. (It's not standardized in the UHCI spec.)
+ /*
+ * Intel controllers report the OverCurrent bit active on. VIA
+ * and ZHAOXIN controllers report it active off, so we'll adjust
+ * the bit value. (It's not standardized in the UHCI spec.)
*/
- if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA)
+ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA ||
+ to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN)
uhci->oc_low = 1;
/* HP's server management chip requires a longer port reset delay. */
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index f5c8e4eb62ae..dc332f5800ad 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -132,6 +132,7 @@ static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
regset->regs = regs;
regset->nregs = nregs;
regset->base = hcd->regs + base;
+ regset->dev = hcd->self.controller;
debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9c066d1c512b..7ede2d0af0a6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -566,7 +566,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
* It will release and re-aquire the lock while calling ACPI
* method.
*/
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
u16 index, bool on, unsigned long *flags)
{
struct xhci_hub *rhub;
@@ -1555,6 +1555,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
status = bus_state->resuming_ports;
+ /*
+ * SS devices are only visible to roothub after link training completes.
+ * Keep polling roothubs for a grace period after xHC start
+ */
+ if (xhci->run_graceperiod) {
+ if (time_before(jiffies, xhci->run_graceperiod))
+ status = 1;
+ else
+ xhci->run_graceperiod = 0;
+ }
+
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
/* For each port, did anything change? If so, set that bit in buf. */
@@ -1834,6 +1845,13 @@ int xhci_bus_resume(struct usb_hcd *hcd)
}
}
+ /* After resuming back from suspend, the controller may not initiate
+ * LFPS.U3_exit signalling if not given a delay after updating the
+ * link from U3->U0. So, lets wait for atleast 1ms
+ */
+ if (next_state == XDEV_U0)
+ mdelay(1);
+
/* poll for U0 link state complete, both USB2 and USB3 */
for_each_set_bit(port_index, &bus_state->bus_suspended, BITS_PER_LONG) {
sret = xhci_handshake(ports[port_index]->addr, PORT_PLC,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ef23a69c6553..489091a28a3e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -650,7 +650,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
num_stream_ctxs, &stream_info->ctx_array_dma,
mem_flags);
if (!stream_info->stream_ctx_array)
- goto cleanup_ctx;
+ goto cleanup_ring_array;
memset(stream_info->stream_ctx_array, 0,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
@@ -685,6 +685,16 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
+ if (xhci->quirks & XHCI_STREAM_QUIRK) {
+ /* dwc3 host controller has an issue where it doesn't
+ * process BULK IN stream rings even after ringing
+ * DoorBell, so setup a timer to aviod hang condition.
+ */
+ timer_setup(&cur_ring->stream_timer,
+ xhci_stream_timeout, 0);
+ cur_ring->xhci = xhci;
+ }
+
ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
@@ -711,6 +721,11 @@ cleanup_rings:
}
xhci_free_command(xhci, stream_info->free_streams_command);
cleanup_ctx:
+ xhci_free_stream_ctx(xhci,
+ stream_info->num_stream_ctxs,
+ stream_info->stream_ctx_array,
+ stream_info->ctx_array_dma);
+cleanup_ring_array:
kfree(stream_info->stream_rings);
cleanup_info:
kfree(stream_info);
@@ -771,6 +786,10 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < stream_info->num_streams;
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
+
+ if (xhci->quirks & XHCI_STREAM_QUIRK)
+ del_timer_sync(&cur_ring->stream_timer);
+
if (cur_ring) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
@@ -901,15 +920,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
- /* Endpoints on the TT/root port lists should have been removed
- * when usb_disable_device() was called for the device.
- * We can't drop them anyway, because the udev might have gone
- * away by this point, and we can't tell what speed it was.
+ /*
+ * Endpoints are normally deleted from the bandwidth list when
+ * endpoints are dropped, before device is freed.
+ * If host is dying or being removed then endpoints aren't
+ * dropped cleanly, so delete the endpoint from list here.
+ * Only applicable for hosts with software bandwidth checking.
*/
- if (!list_empty(&dev->eps[i].bw_endpoint_list))
- xhci_warn(xhci, "Slot %u endpoint %u "
- "not removed from BW list!\n",
- slot_id, i);
+
+ if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
+ list_del_init(&dev->eps[i].bw_endpoint_list);
+ xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
+ slot_id, i);
+ }
}
/* If this is a hub, free the TT(s) from the TT list */
xhci_free_tt_info(xhci, dev, slot_id);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 8950d1f10a7f..86c4bc9df3b8 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -25,6 +25,13 @@
*/
#define TT_MICROFRAMES_MAX 9
+/* schedule error type */
+#define ESCH_SS_Y6 1001
+#define ESCH_SS_OVERLAP 1002
+#define ESCH_CS_OVERFLOW 1003
+#define ESCH_BW_OVERFLOW 1004
+#define ESCH_FIXME 1005
+
/* mtk scheduler bitmasks */
#define EP_BPKTS(p) ((p) & 0x7f)
#define EP_BCSCOUNT(p) (((p) & 0x7) << 8)
@@ -32,6 +39,24 @@
#define EP_BOFFSET(p) ((p) & 0x3fff)
#define EP_BREPEAT(p) (((p) & 0x7fff) << 16)
+static char *sch_error_string(int err_num)
+{
+ switch (err_num) {
+ case ESCH_SS_Y6:
+ return "Can't schedule Start-Split in Y6";
+ case ESCH_SS_OVERLAP:
+ return "Can't find a suitable Start-Split location";
+ case ESCH_CS_OVERFLOW:
+ return "The last Complete-Split is greater than 7";
+ case ESCH_BW_OVERFLOW:
+ return "Bandwidth exceeds the maximum limit";
+ case ESCH_FIXME:
+ return "FIXME, to be resolved";
+ default:
+ return "Unknown";
+ }
+}
+
static int is_fs_or_ls(enum usb_device_speed speed)
{
return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW;
@@ -375,7 +400,6 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
sch_ep->bw_budget_table[j];
}
}
- sch_ep->allocated = used;
}
static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
@@ -384,19 +408,20 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
u32 num_esit, tmp;
int base;
int i, j;
+ u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
+
+ if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP)
+ offset++;
+
for (i = 0; i < num_esit; i++) {
base = offset + i * sch_ep->esit;
- /*
- * Compared with hs bus, no matter what ep type,
- * the hub will always delay one uframe to send data
- */
- for (j = 0; j < sch_ep->cs_count; j++) {
+ for (j = 0; j < uframes; j++) {
tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
if (tmp > FS_PAYLOAD_MAX)
- return -ERANGE;
+ return -ESCH_BW_OVERFLOW;
}
}
@@ -406,15 +431,11 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
static int check_sch_tt(struct usb_device *udev,
struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
- struct mu3h_sch_tt *tt = sch_ep->sch_tt;
u32 extra_cs_count;
- u32 fs_budget_start;
u32 start_ss, last_ss;
u32 start_cs, last_cs;
- int i;
start_ss = offset % 8;
- fs_budget_start = (start_ss + 1) % 8;
if (sch_ep->ep_type == ISOC_OUT_EP) {
last_ss = start_ss + sch_ep->cs_count - 1;
@@ -424,11 +445,7 @@ static int check_sch_tt(struct usb_device *udev,
* must never schedule Start-Split in Y6
*/
if (!(start_ss == 7 || last_ss < 6))
- return -ERANGE;
-
- for (i = 0; i < sch_ep->cs_count; i++)
- if (test_bit(offset + i, tt->ss_bit_map))
- return -ERANGE;
+ return -ESCH_SS_Y6;
} else {
u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
@@ -438,29 +455,24 @@ static int check_sch_tt(struct usb_device *udev,
* must never schedule Start-Split in Y6
*/
if (start_ss == 6)
- return -ERANGE;
+ return -ESCH_SS_Y6;
/* one uframe for ss + one uframe for idle */
start_cs = (start_ss + 2) % 8;
last_cs = start_cs + cs_count - 1;
if (last_cs > 7)
- return -ERANGE;
+ return -ESCH_CS_OVERFLOW;
if (sch_ep->ep_type == ISOC_IN_EP)
extra_cs_count = (last_cs == 7) ? 1 : 2;
else /* ep_type : INTR IN / INTR OUT */
- extra_cs_count = (fs_budget_start == 6) ? 1 : 2;
+ extra_cs_count = 1;
cs_count += extra_cs_count;
if (cs_count > 7)
cs_count = 7; /* HW limit */
- for (i = 0; i < cs_count + 2; i++) {
- if (test_bit(offset + i, tt->ss_bit_map))
- return -ERANGE;
- }
-
sch_ep->cs_count = cs_count;
/* one for ss, the other for idle */
sch_ep->num_budget_microframes = cs_count + 2;
@@ -482,28 +494,24 @@ static void update_sch_tt(struct usb_device *udev,
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
u32 base, num_esit;
int bw_updated;
- int bits;
int i, j;
+ int offset = sch_ep->offset;
+ u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
- bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
if (used)
bw_updated = sch_ep->bw_cost_per_microframe;
else
bw_updated = -sch_ep->bw_cost_per_microframe;
- for (i = 0; i < num_esit; i++) {
- base = sch_ep->offset + i * sch_ep->esit;
+ if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP)
+ offset++;
- for (j = 0; j < bits; j++) {
- if (used)
- set_bit(base + j, tt->ss_bit_map);
- else
- clear_bit(base + j, tt->ss_bit_map);
- }
+ for (i = 0; i < num_esit; i++) {
+ base = offset + i * sch_ep->esit;
- for (j = 0; j < sch_ep->cs_count; j++)
+ for (j = 0; j < uframes; j++)
tt->fs_bus_bw[base + j] += bw_updated;
}
@@ -513,21 +521,48 @@ static void update_sch_tt(struct usb_device *udev,
list_del(&sch_ep->tt_endpoint);
}
+static int load_ep_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw,
+ struct mu3h_sch_ep_info *sch_ep, bool loaded)
+{
+ if (sch_ep->sch_tt)
+ update_sch_tt(udev, sch_ep, loaded);
+
+ /* update bus bandwidth info */
+ update_bus_bw(sch_bw, sch_ep, loaded);
+ sch_ep->allocated = loaded;
+
+ return 0;
+}
+
+static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep)
+{
+ u32 boundary = sch_ep->esit;
+
+ if (sch_ep->sch_tt) { /* LS/FS with TT */
+ /*
+ * tune for CS, normally esit >= 8 for FS/LS,
+ * not add one for other types to avoid access array
+ * out of boundary
+ */
+ if (sch_ep->ep_type == ISOC_OUT_EP && boundary > 1)
+ boundary--;
+ }
+
+ return boundary;
+}
+
static int check_sch_bw(struct usb_device *udev,
struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
{
u32 offset;
- u32 esit;
u32 min_bw;
u32 min_index;
u32 worst_bw;
u32 bw_boundary;
+ u32 esit_boundary;
u32 min_num_budget;
u32 min_cs_count;
- bool tt_offset_ok = false;
- int ret;
-
- esit = sch_ep->esit;
+ int ret = 0;
/*
* Search through all possible schedule microframes.
@@ -537,16 +572,15 @@ static int check_sch_bw(struct usb_device *udev,
min_index = 0;
min_cs_count = sch_ep->cs_count;
min_num_budget = sch_ep->num_budget_microframes;
- for (offset = 0; offset < esit; offset++) {
- if (is_fs_or_ls(udev->speed)) {
+ esit_boundary = get_esit_boundary(sch_ep);
+ for (offset = 0; offset < sch_ep->esit; offset++) {
+ if (sch_ep->sch_tt) {
ret = check_sch_tt(udev, sch_ep, offset);
if (ret)
continue;
- else
- tt_offset_ok = true;
}
- if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit)
+ if ((offset + sch_ep->num_budget_microframes) > esit_boundary)
break;
worst_bw = get_max_bw(sch_bw, sch_ep, offset);
@@ -569,35 +603,21 @@ static int check_sch_bw(struct usb_device *udev,
/* check bandwidth */
if (min_bw > bw_boundary)
- return -ERANGE;
+ return ret ? ret : -ESCH_BW_OVERFLOW;
sch_ep->offset = min_index;
sch_ep->cs_count = min_cs_count;
sch_ep->num_budget_microframes = min_num_budget;
- if (is_fs_or_ls(udev->speed)) {
- /* all offset for tt is not ok*/
- if (!tt_offset_ok)
- return -ERANGE;
-
- update_sch_tt(udev, sch_ep, 1);
- }
-
- /* update bus bandwidth info */
- update_bus_bw(sch_bw, sch_ep, 1);
-
- return 0;
+ return load_ep_bw(udev, sch_bw, sch_ep, true);
}
static void destroy_sch_ep(struct usb_device *udev,
struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
{
/* only release ep bw check passed by check_sch_bw() */
- if (sch_ep->allocated) {
- update_bus_bw(sch_bw, sch_ep, 0);
- if (sch_ep->sch_tt)
- update_sch_tt(udev, sch_ep, 0);
- }
+ if (sch_ep->allocated)
+ load_ep_bw(udev, sch_bw, sch_ep, false);
if (sch_ep->sch_tt)
drop_tt(udev);
@@ -760,7 +780,8 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
ret = check_sch_bw(udev, sch_bw, sch_ep);
if (ret) {
- xhci_err(xhci, "Not enough bandwidth!\n");
+ xhci_err(xhci, "Not enough bandwidth! (%s)\n",
+ sch_error_string(-ret));
return -ENOSPC;
}
}
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 5c0eb35cd007..c56c6a0d6222 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -540,6 +540,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
}
device_init_wakeup(dev, true);
+ dma_set_max_seg_size(dev, UINT_MAX);
xhci = hcd_to_xhci(hcd);
xhci->main_hcd = hcd;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 985e7a19f6f6..2f702342de66 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -20,14 +20,12 @@
#define XHCI_MTK_MAX_ESIT 64
/**
- * @ss_bit_map: used to avoid start split microframes overlay
* @fs_bus_bw: array to keep track of bandwidth already used for FS
* @ep_list: Endpoints using this TT
* @usb_tt: usb TT related
* @tt_port: TT port number
*/
struct mu3h_sch_tt {
- DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
struct list_head ep_list;
struct usb_tt *usb_tt;
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index f27d5c2c42f3..bb70dcc2f84f 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -33,7 +33,7 @@ static void xhci_mvebu_mbus_config(void __iomem *base,
/* Program each DRAM CS in a seperate window */
for (win = 0; win < dram->num_cs; win++) {
- const struct mbus_dram_window *cs = dram->cs + win;
+ const struct mbus_dram_window *cs = &dram->cs[win];
writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e7533787db41..aa4c21a12de7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -79,9 +79,12 @@ static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
static int xhci_pci_setup(struct usb_hcd *hcd);
+static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
.reset = xhci_pci_setup,
+ .update_hub_device = xhci_pci_update_hub_device,
};
/* called after powerup, by probe or system-pm "wakeup" */
@@ -272,8 +275,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
- pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
+ /*
+ * try to tame the ASMedia 1042 controller which reports 0.96
+ * but appears to behave more like 1.0
+ */
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -330,8 +339,38 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
NULL);
ACPI_FREE(obj);
}
+
+static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_hub *rhub = &xhci->usb3_rhub;
+ int ret;
+ int i;
+
+ /* This is not the usb3 roothub we are looking for */
+ if (hcd != rhub->hcd)
+ return;
+
+ if (hdev->maxchild > rhub->num_ports) {
+ dev_err(&hdev->dev, "USB3 roothub port number mismatch\n");
+ return;
+ }
+
+ for (i = 0; i < hdev->maxchild; i++) {
+ ret = usb_acpi_port_lpm_incapable(hdev, i);
+
+ dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret);
+
+ if (ret >= 0) {
+ rhub->ports[i]->lpm_incapable = ret;
+ continue;
+ }
+ }
+}
+
#else
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { }
#endif /* CONFIG_ACPI */
/* called during probe() after chip reset completes */
@@ -364,6 +403,16 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
return xhci_pci_reinit(xhci, pdev);
}
+static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags)
+{
+ /* Check if acpi claims some USB3 roothub ports are lpm incapable */
+ if (!hdev->parent)
+ xhci_find_lpm_incapable_ports(hcd, hdev);
+
+ return xhci_update_hub_device(hcd, hdev, tt, mem_flags);
+}
+
/*
* We need to register our own PCI probe function (instead of the USB core's
* function) in order to create a second roothub under xHCI.
@@ -421,6 +470,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_allow(&dev->dev);
+ dma_set_max_seg_size(&dev->dev, UINT_MAX);
+
return 0;
put_usb3_hcd:
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 84cfa8544285..5791bba89ac7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -19,6 +19,8 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/xhci_pdriver.h>
#include "xhci.h"
#include "xhci-plat.h"
@@ -164,6 +166,35 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
+static int usb_otg_set_host(struct device *dev, struct usb_hcd *hcd, bool yes)
+{
+ int ret = 0;
+
+ hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (!IS_ERR_OR_NULL(hcd->usb_phy) && hcd->usb_phy->otg) {
+ if (yes) {
+ if (otg_set_host(hcd->usb_phy->otg, &hcd->self)) {
+ usb_put_phy(hcd->usb_phy);
+ goto disable_phy;
+ }
+ } else {
+ ret = otg_set_host(hcd->usb_phy->otg, NULL);
+ usb_put_phy(hcd->usb_phy);
+ goto disable_phy;
+ }
+
+ } else {
+ goto disable_phy;
+ }
+
+ return 0;
+
+disable_phy:
+ hcd->usb_phy = NULL;
+
+ return ret;
+}
+
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
@@ -221,6 +252,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
return ret;
}
+ /* Set the controller as wakeup capable */
+ device_set_wakeup_capable(&pdev->dev, true);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
@@ -276,7 +310,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
*priv = *priv_match;
}
- device_wakeup_enable(hcd->self.controller);
+ device_set_wakeup_capable(&pdev->dev, true);
xhci->main_hcd = hcd;
xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
@@ -292,6 +326,12 @@ static int xhci_plat_probe(struct platform_device *pdev)
/* Iterate over all parent nodes for finding quirks */
for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
+ if (device_property_read_bool(&pdev->dev, "xhci-stream-quirk"))
+ xhci->quirks |= XHCI_STREAM_QUIRK;
+
+ if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
+ xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
xhci->quirks |= XHCI_HW_LPM_DISABLE;
@@ -340,6 +380,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto dealloc_usb2_hcd;
+ ret = usb_otg_set_host(&pdev->dev, hcd, true);
+ if (ret)
+ goto dealloc_usb2_hcd;
+
device_enable_async_suspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -392,6 +436,8 @@ static int xhci_plat_remove(struct platform_device *dev)
xhci->shared_hcd = NULL;
usb_phy_shutdown(hcd->usb_phy);
+ usb_otg_set_host(&dev->dev, hcd, false);
+
usb_remove_hcd(hcd);
usb_put_hcd(shared_hcd);
@@ -411,6 +457,16 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+#if IS_ENABLED(CONFIG_USB_DWC3_OF_SIMPLE)
+ /* Inform dwc3 driver about the device wakeup capability */
+ if (device_may_wakeup(&hcd->self.root_hub->dev)) {
+ enable_irq_wake(hcd->irq);
+ dwc3_host_wakeup_capable(dev, true);
+ } else {
+ dwc3_host_wakeup_capable(dev, false);
+ }
+#endif
+
/*
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
* to do wakeup during suspend. Since xhci_plat_suspend is currently
@@ -448,6 +504,9 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ if (device_may_wakeup(&hcd->self.root_hub->dev))
+ disable_irq_wake(hcd->irq);
+
return xhci_resume(xhci, 0);
}
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 3da75b367f95..5d7835299245 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -74,7 +74,6 @@ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
/* For soc_device_attribute */
#define RCAR_XHCI_FIRMWARE_V2 BIT(0) /* FIRMWARE V2 */
-#define RCAR_XHCI_FIRMWARE_V3 BIT(1) /* FIRMWARE V3 */
static const struct soc_device_attribute rcar_quirks_match[] = {
{
@@ -147,8 +146,6 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
if (quirks & RCAR_XHCI_FIRMWARE_V2)
firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2;
- else if (quirks & RCAR_XHCI_FIRMWARE_V3)
- firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3;
else
firmware_name = priv->firmware_name;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6cedae902adf..ab7a2c930643 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -714,7 +714,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
struct xhci_ring *ring, struct xhci_td *td)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_segment *seg = td->bounce_seg;
struct urb *urb = td->urb;
size_t len;
@@ -880,9 +880,21 @@ remove_finished_td:
*/
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
+
inc_td_cnt(cur_td->urb);
- if (last_td_in_urb(cur_td))
- xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+
+ if (last_td_in_urb(cur_td)) {
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
+ (ep_ring->stream_timeout_handler == true)) {
+ /* We get here if stream timer timed out and stop
+ * command is issued. Send urb status as -EAGAIN
+ * so that the same urb can be re-submitted.
+ */
+ xhci_giveback_urb_in_irq(xhci, cur_td, -EAGAIN);
+ ep_ring->stream_timeout_handler = false;
+ } else
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+ }
/* Stop processing the cancelled list if the watchdog timer is
* running.
@@ -921,7 +933,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep;
struct xhci_ring *ring;
- ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+
if ((ep->ep_state & EP_HAS_STREAMS) ||
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
@@ -991,6 +1006,84 @@ void xhci_hc_died(struct xhci_hcd *xhci)
usb_hc_died(xhci_to_hcd(xhci));
}
+/* This function is called when the stream ring timer gets timedout.
+ * dwc3 host controller has an issue where it doesn't process the BULK IN
+ * stream ring TD's(once in a while) even after ringing DoorBell for that
+ * stream ring. Because of this behaviour there will be no transfer events
+ * generated by the controller on the stream ring, resulting in the hang
+ * condition. xhci_stream_timeout() solves this issue by sending a stop
+ * command on the stream ring after stream timer gets timedout.
+ */
+void xhci_stream_timeout(struct timer_list *arg)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id, ep_index, stream_id;
+ struct xhci_td *td = NULL;
+ struct urb *urb = NULL;
+ struct urb_priv *urb_priv;
+ struct xhci_command *command;
+ unsigned long flags;
+ int i;
+
+ ep_ring = from_timer(ep_ring, arg, stream_timer);
+ xhci = ep_ring->xhci;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (!list_empty(&ep_ring->td_list)) {
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+
+ slot_id = urb->dev->slot_id;
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ stream_id = ep_ring->stream_id;
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep_ring->stream_timeout_handler = true;
+
+ /* Delete the stream ring timer */
+ del_timer(&ep_ring->stream_timer);
+
+ for (i = 0; i < urb_priv->num_tds; i++) {
+ td = &urb_priv->td[i];
+ list_add_tail(&td->cancelled_td_list,
+ &ep->cancelled_td_list);
+ }
+
+ /* Queue a stop endpoint command, but only if this is
+ * the first cancellation to be handled.
+ */
+ if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
+ command = xhci_alloc_command(xhci, false,
+ GFP_ATOMIC);
+ if (!command) {
+ xhci_warn(xhci,
+ "%s: Failed to allocate command\n",
+ __func__);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
+
+ ep->ep_state |= EP_STOP_CMD_PENDING;
+ ep->stop_cmd_timer.expires = jiffies +
+ XHCI_STOP_EP_CMD_TIMEOUT * HZ;
+ add_timer(&ep->stop_cmd_timer);
+ xhci_queue_stop_endpoint(xhci, command,
+ urb->dev->slot_id, ep_index, 0);
+ xhci_ring_cmd_db(xhci);
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ /* let the SCSI stack take care */
+ del_timer(&ep_ring->stream_timer);
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -2445,6 +2538,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num++;
}
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
+ (ep->ep_state & EP_HAS_STREAMS))
+ del_timer(&ep_ring->stream_timer);
+
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
@@ -3280,7 +3377,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
u32 *trb_buff_len, struct xhci_segment *seg)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;
@@ -3500,6 +3597,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
check_trb_math(urb, enqd_len);
+
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0) &&
+ (usb_endpoint_dir_in(&urb->ep->desc) == 1)) {
+ /* Start the stream timer so that xhci_stream_timeout() can be
+ * triggered if xhci is stuck while processing BULK IN streams.
+ */
+ ring->stream_timeout_handler = false;
+ mod_timer(&ring->stream_timer, jiffies + 5 * HZ);
+ }
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 9fe35bb67731..c03f7e40c79a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -9,6 +9,7 @@
*/
#include <linux/pci.h>
+#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/log2.h>
@@ -149,9 +150,11 @@ int xhci_start(struct xhci_hcd *xhci)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
- if (!ret)
+ if (!ret) {
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
+ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
+ }
return ret;
}
@@ -183,7 +186,11 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
command = readl(&xhci->op_regs->command);
+#ifdef CONFIG_USB_DWC3_OTG
+ command |= CMD_LRESET;
+#else
command |= CMD_RESET;
+#endif
writel(command, &xhci->op_regs->command);
/* Existing Intel xHCI controllers require a delay of 1 mS,
@@ -196,7 +203,13 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
if (xhci->quirks & XHCI_INTEL_HOST)
udelay(1000);
- ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
+ ret = xhci_handshake(&xhci->op_regs->command,
+#ifdef CONFIG_USB_DWC3_OTG
+ CMD_LRESET,
+#else
+ CMD_RESET,
+#endif
+ 0, 10 * 1000 * 1000);
if (ret)
return ret;
@@ -224,6 +237,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct iommu_domain *domain;
int err, i;
u64 val;
u32 intrs;
@@ -242,7 +256,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
* an iommu. Doing anything when there is no iommu is definitely
* unsafe...
*/
- if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
+ domain = iommu_get_domain_for_dev(dev);
+ if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
+ domain->type == IOMMU_DOMAIN_IDENTITY)
return;
xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
@@ -691,6 +707,7 @@ int xhci_run(struct usb_hcd *hcd)
if (ret)
xhci_free_command(xhci, command);
}
+ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
@@ -720,6 +737,12 @@ static void xhci_stop(struct usb_hcd *hcd)
/* Only halt host and free memory after both hcds are removed */
if (!usb_hcd_is_primary_hcd(hcd)) {
+ /* Remove shared_hcd if no otg ports are present */
+ if (!hcd->self.otg_port) {
+ /* usb core will free this hcd shortly, unset pointer */
+ xhci->shared_hcd = NULL;
+ }
+
mutex_unlock(&xhci->mutex);
return;
}
@@ -775,8 +798,6 @@ static void xhci_stop(struct usb_hcd *hcd)
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- unsigned long flags;
- int i;
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
@@ -792,21 +813,12 @@ void xhci_shutdown(struct usb_hcd *hcd)
del_timer_sync(&xhci->shared_hcd->rh_timer);
}
- spin_lock_irqsave(&xhci->lock, flags);
+ spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
-
- /* Power off USB2 ports*/
- for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
-
- /* Power off USB3 ports*/
- for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
-
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
@@ -1172,7 +1184,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* re-initialize the HC on Restore Error, or Host Controller Error */
if (temp & (STS_SRE | STS_HCE)) {
reinit_xhc = true;
- xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ if (!xhci->broken_suspend)
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
}
if (reinit_xhc) {
@@ -1715,8 +1728,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto err_giveback;
}
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Delete the stream timer */
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0))
+ del_timer(&ep_ring->stream_timer);
+
i = urb_priv->num_tds_done;
if (i < urb_priv->num_tds)
+
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Cancel URB %p, dev %s, ep 0x%x, "
"starting at offset 0x%llx",
@@ -3917,6 +3943,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
+ unsigned long flags;
int i, ret;
/*
@@ -3945,7 +3972,11 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
virt_dev->udev = NULL;
xhci_disable_slot(xhci, udev->slot_id);
+
+ spin_lock_irqsave(&xhci->lock, flags);
xhci_free_virt_device(xhci, udev->slot_id);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -5002,6 +5033,7 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
+ struct xhci_port *port;
u16 hub_encoded_timeout;
int mel;
int ret;
@@ -5015,6 +5047,13 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
!xhci->devs[udev->slot_id])
return USB3_LPM_DISABLED;
+ /* If connected to root port then check port can handle lpm */
+ if (udev->parent && !udev->parent->parent) {
+ port = xhci->usb3_rhub.ports[udev->portnum - 1];
+ if (port->lpm_incapable)
+ return USB3_LPM_DISABLED;
+ }
+
hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
if (mel < 0) {
@@ -5074,7 +5113,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
-static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -5174,6 +5213,7 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
xhci_free_command(xhci, config_cmd);
return ret;
}
+EXPORT_SYMBOL_GPL(xhci_update_hub_device);
static int xhci_get_frame(struct usb_hcd *hcd)
{
@@ -5443,6 +5483,8 @@ void xhci_init_driver(struct hc_driver *drv,
drv->check_bandwidth = over->check_bandwidth;
if (over->reset_bandwidth)
drv->reset_bandwidth = over->reset_bandwidth;
+ if (over->update_hub_device)
+ drv->update_hub_device = over->update_hub_device;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a9031f494984..9e7d0f700288 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1621,6 +1621,9 @@ struct xhci_ring {
enum xhci_ring_type type;
bool last_td_was_short;
struct radix_tree_root *trb_address_map;
+ struct timer_list stream_timer;
+ bool stream_timeout_handler;
+ struct xhci_hcd *xhci;
};
struct xhci_erst_entry {
@@ -1724,6 +1727,7 @@ struct xhci_port {
int hcd_portnum;
struct xhci_hub *rhub;
struct xhci_port_cap *port_cap;
+ unsigned int lpm_incapable:1;
};
struct xhci_hub {
@@ -1814,7 +1818,7 @@ struct xhci_hcd {
/* Host controller watchdog timer structures */
unsigned int xhc_state;
-
+ unsigned long run_graceperiod;
u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
@@ -1880,6 +1884,7 @@ struct xhci_hcd {
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
+#define XHCI_STREAM_QUIRK BIT_ULL(36) /* FIXME this is wrong */
#define XHCI_SKIP_PHY_INIT BIT_ULL(37)
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
@@ -1922,6 +1927,8 @@ struct xhci_driver_overrides {
int (*start)(struct usb_hcd *hcd);
int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+ int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
};
#define XHCI_CFC_DELAY 10
@@ -2076,6 +2083,8 @@ void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
@@ -2134,6 +2143,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_td *td);
void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
+void xhci_stream_timeout(struct timer_list *unused);
void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
@@ -2155,8 +2165,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
- bool on, unsigned long *flags);
void xhci_hc_died(struct xhci_hcd *xhci);
@@ -2376,7 +2384,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_STOP_RING:
- sprintf(str,
+ snprintf(str, size,
"%s: slot %d sp %d ep %d flags %c",
xhci_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index bb24527f3c70..ba2b6fbab9b8 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -178,10 +178,6 @@ static int idmouse_create_image(struct usb_idmouse *dev)
bytes_read += bulk_read;
}
- /* reset the device */
-reset:
- ftip_command(dev, FTIP_RELEASE, 0, 0);
-
/* check for valid image */
/* right border should be black (0x00) */
for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH)
@@ -193,6 +189,10 @@ reset:
if (dev->bulk_in_buffer[bytes_read] != 0xFF)
return -EAGAIN;
+ /* reset the device */
+reset:
+ ftip_command(dev, FTIP_RELEASE, 0, 0);
+
/* should be IMGSIZE == 65040 */
dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n",
bytes_read);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 888defdea546..c366a65fbd80 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -817,7 +817,7 @@ static int iowarrior_probe(struct usb_interface *interface,
break;
case USB_DEVICE_ID_CODEMERCS_IOW100:
- dev->report_size = 13;
+ dev->report_size = 12;
break;
}
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 0734e6dd9386..d242b73cfdb2 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3014,6 +3014,20 @@ static int sisusb_probe(struct usb_interface *intf,
struct usb_device *dev = interface_to_usbdev(intf);
struct sisusb_usb_data *sisusb;
int retval = 0, i;
+ static const u8 ep_addresses[] = {
+ SISUSB_EP_GFX_IN | USB_DIR_IN,
+ SISUSB_EP_GFX_OUT | USB_DIR_OUT,
+ SISUSB_EP_GFX_BULK_OUT | USB_DIR_OUT,
+ SISUSB_EP_GFX_LBULK_OUT | USB_DIR_OUT,
+ SISUSB_EP_BRIDGE_IN | USB_DIR_IN,
+ SISUSB_EP_BRIDGE_OUT | USB_DIR_OUT,
+ 0};
+
+ /* Are the expected endpoints present? */
+ if (!usb_check_bulk_endpoints(intf, ep_addresses)) {
+ dev_err(&intf->dev, "Invalid USB2VGA device\n");
+ return -EINVAL;
+ }
dev_info(&dev->dev, "USB2VGA dongle found at address %d\n",
dev->devnum);
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f48a23adbc35..35483217b1f6 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1247,14 +1247,19 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
struct mon_reader_bin *rp = vmf->vma->vm_private_data;
unsigned long offset, chunk_idx;
struct page *pageptr;
+ unsigned long flags;
+ spin_lock_irqsave(&rp->b_lock, flags);
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= rp->b_size)
+ if (offset >= rp->b_size) {
+ spin_unlock_irqrestore(&rp->b_lock, flags);
return VM_FAULT_SIGBUS;
+ }
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
vmf->page = pageptr;
+ spin_unlock_irqrestore(&rp->b_lock, flags);
return 0;
}
@@ -1268,6 +1273,11 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
{
/* don't do anything here: "fault" will set up page table entries */
vma->vm_ops = &mon_bin_vm_ops;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ vma->vm_flags &= ~VM_MAYWRITE;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 2ea3157ddb6e..e65586147965 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -210,6 +210,7 @@ static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
return ring->enqueue;
}
+/* @dequeue may be NULL if ring is unallocated or freed */
static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
{
if (ring->dequeue < ring->end)
@@ -484,7 +485,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
- while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
mreq = next_request(mep);
@@ -523,7 +524,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
- while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
mreq = next_request(mep);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index b4d6d9bb3239..c545b27ea568 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1146,7 +1146,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
struct musb_hw_ep *hw_ep = NULL;
u32 rx, tx;
int i, index;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
cppi = container_of(musb->dma_controller, struct cppi, controller);
if (cppi->irq)
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 0c6204add616..1efd5ce48f89 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = {
{ "IntrUsbE", MUSB_INTRUSBE, 8 },
{ "DevCtl", MUSB_DEVCTL, 8 },
{ "VControl", 0x68, 32 },
- { "HWVers", 0x69, 16 },
+ { "HWVers", MUSB_HWVERS, 16 },
{ "LinkInfo", MUSB_LINKINFO, 8 },
{ "VPLen", MUSB_VPLEN, 8 },
{ "HS_EOF1", MUSB_HS_EOF1, 8 },
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 4622400ba4dd..b8fc818c154a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -760,6 +760,9 @@ static void rxstate(struct musb *musb, struct musb_request *req)
musb_writew(epio, MUSB_RXCSR, csr);
buffer_aint_mapped:
+ fifo_count = min_t(unsigned int,
+ request->length - request->actual,
+ (unsigned int)fifo_count);
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
@@ -1626,8 +1629,6 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct musb *musb = gadget_to_musb(gadget);
- if (!musb->xceiv->set_power)
- return -EOPNOTSUPP;
return usb_phy_set_power(musb->xceiv, mA);
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index fa9922c0c910..eb88d52e6d94 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -339,10 +339,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
musb_giveback(musb, urb, status);
qh->is_ready = ready;
+ /*
+ * musb->lock had been unlocked in musb_giveback, so qh may
+ * be freed, need to get it again
+ */
+ qh = musb_ep_get_qh(hw_ep, is_in);
+
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
* invalidate qh as soon as list_empty(&hep->urb_list)
*/
- if (list_empty(&qh->hep->urb_list)) {
+ if (qh && list_empty(&qh->hep->urb_list)) {
struct list_head *head;
struct dma_controller *dma = musb->dma_controller;
@@ -2424,6 +2430,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* and its URB list has emptied, recycle this qh.
*/
if (ready && list_empty(&qh->hep->urb_list)) {
+ musb_ep_set_qh(qh->hw_ep, is_in, NULL);
qh->hep->hcpriv = NULL;
list_del(&qh->ring);
kfree(qh);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 24b4f091acb8..6b9114b2a6e6 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -173,6 +173,7 @@ config USB_TEGRA_PHY
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
depends on ARM || ARM64
+ depends on USB_PHY
select USB_ULPI_VIEWPORT
help
Enable this to support ULPI connected USB OTG transceivers which
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 70b8c8248caf..6dfecbd47d7a 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -388,14 +388,7 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
{
- void __iomem *base = mxs_phy->phy.io_priv;
- u32 phyctrl = readl(base + HW_USBPHY_CTRL);
-
- if (IS_ENABLED(CONFIG_USB_OTG) &&
- !(phyctrl & BM_USBPHY_CTRL_OTG_ID_VALUE))
- return true;
-
- return false;
+ return mxs_phy->phy.last_event == USB_EVENT_ID;
}
static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index a3e043e3e4aa..d0672b671298 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -395,7 +395,7 @@ static int tahvo_usb_probe(struct platform_device *pdev)
tu->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0)
- return ret;
+ goto err_remove_phy;
ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
IRQF_ONESHOT,
"tahvo-vbus", tu);
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index a43c49369a60..0f7f6eb16041 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -13,9 +13,16 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
+#include <linux/usb/phy.h>
struct ulpi_info {
@@ -39,6 +46,13 @@ static struct ulpi_info ulpi_ids[] = {
ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
};
+struct ulpi_phy {
+ struct usb_phy *usb_phy;
+ void __iomem *regs;
+ unsigned int vp_offset;
+ unsigned int flags;
+};
+
static int ulpi_set_otg_flags(struct usb_phy *phy)
{
unsigned int flags = ULPI_OTG_CTRL_DP_PULLDOWN |
@@ -240,6 +254,23 @@ static int ulpi_set_vbus(struct usb_otg *otg, bool on)
return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
}
+static int usbphy_set_vbus(struct usb_phy *phy, int on)
+{
+ unsigned int flags = usb_phy_io_read(phy, ULPI_OTG_CTRL);
+
+ flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT);
+
+ if (on) {
+ if (phy->flags & ULPI_OTG_DRVVBUS)
+ flags |= ULPI_OTG_CTRL_DRVVBUS;
+
+ if (phy->flags & ULPI_OTG_DRVVBUS_EXT)
+ flags |= ULPI_OTG_CTRL_DRVVBUS_EXT;
+ }
+
+ return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
+}
+
struct usb_phy *
otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags)
@@ -262,6 +293,7 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
phy->io_ops = ops;
phy->otg = otg;
phy->init = ulpi_init;
+ phy->set_vbus = usbphy_set_vbus;
otg->usb_phy = phy;
otg->set_host = ulpi_set_host;
@@ -271,3 +303,70 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
}
EXPORT_SYMBOL_GPL(otg_ulpi_create);
+static int ulpi_phy_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ struct ulpi_phy *uphy;
+ bool flag;
+ int ret;
+
+ uphy = devm_kzalloc(&pdev->dev, sizeof(*uphy), GFP_KERNEL);
+ if (!uphy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ uphy->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(uphy->regs))
+ return PTR_ERR(uphy->regs);
+
+ ret = of_property_read_u32(np, "view-port", &uphy->vp_offset);
+ if (IS_ERR(uphy->regs)) {
+ dev_err(&pdev->dev, "view-port register not specified\n");
+ return PTR_ERR(uphy->regs);
+ }
+
+ flag = of_property_read_bool(np, "drv-vbus");
+ if (flag)
+ uphy->flags |= ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT;
+
+ uphy->usb_phy = otg_ulpi_create(&ulpi_viewport_access_ops, uphy->flags);
+
+ uphy->usb_phy->dev = &pdev->dev;
+
+ uphy->usb_phy->io_priv = uphy->regs + uphy->vp_offset;
+
+ ret = usb_add_phy_dev(uphy->usb_phy);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ulpi_phy_remove(struct platform_device *pdev)
+{
+ struct ulpi_phy *uphy = platform_get_drvdata(pdev);
+
+ usb_remove_phy(uphy->usb_phy);
+
+ return 0;
+}
+
+static const struct of_device_id ulpi_phy_table[] = {
+ { .compatible = "ulpi-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ulpi_phy_table);
+
+static struct platform_driver ulpi_phy_driver = {
+ .probe = ulpi_phy_probe,
+ .remove = ulpi_phy_remove,
+ .driver = {
+ .name = "ulpi-phy",
+ .of_match_table = ulpi_phy_table,
+ },
+};
+module_platform_driver(ulpi_phy_driver);
+
+MODULE_DESCRIPTION("ULPI PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c
index 24de64edb674..2d77edefb4b3 100644
--- a/drivers/usb/renesas_usbhs/rza.c
+++ b/drivers/usb/renesas_usbhs/rza.c
@@ -23,6 +23,10 @@ static int usbhs_rza1_hardware_init(struct platform_device *pdev)
extal_clk = of_find_node_by_name(NULL, "extal");
of_property_read_u32(usb_x1_clk, "clock-frequency", &freq_usb);
of_property_read_u32(extal_clk, "clock-frequency", &freq_extal);
+
+ of_node_put(usb_x1_clk);
+ of_node_put(extal_clk);
+
if (freq_usb == 0) {
if (freq_extal == 12000000) {
/* Select 12MHz XTAL */
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 97e3d75b19a3..aa4fb7a66dce 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -20,6 +20,7 @@ struct usb_role_switch {
struct device dev;
struct mutex lock; /* device lock*/
enum usb_role role;
+ bool registered;
/* From descriptor */
struct device *usb2_port;
@@ -46,6 +47,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
if (IS_ERR_OR_NULL(sw))
return 0;
+ if (!sw->registered)
+ return -EOPNOTSUPP;
+
mutex_lock(&sw->lock);
ret = sw->set(sw->dev.parent, role);
@@ -69,7 +73,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
{
enum usb_role role;
- if (IS_ERR_OR_NULL(sw))
+ if (IS_ERR_OR_NULL(sw) || !sw->registered)
return USB_ROLE_NONE;
mutex_lock(&sw->lock);
@@ -108,10 +112,13 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode)
struct fwnode_handle *parent = fwnode_get_parent(fwnode);
struct device *dev;
- if (!parent || !fwnode_property_present(parent, "usb-role-switch"))
+ if (!fwnode_property_present(parent, "usb-role-switch")) {
+ fwnode_handle_put(parent);
return NULL;
+ }
dev = class_find_device_by_fwnode(role_class, parent);
+ fwnode_handle_put(parent);
return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
@@ -316,6 +323,8 @@ usb_role_switch_register(struct device *parent,
return ERR_PTR(ret);
}
+ sw->registered = true;
+
/* TODO: Symlinks for the host port and the device controller. */
return sw;
@@ -330,8 +339,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ sw->registered = false;
device_unregister(&sw->dev);
+ }
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index f06a09e59d8b..f37bde88eb5d 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -96,7 +96,9 @@ struct ch341_private {
u8 mcr;
u8 msr;
u8 lcr;
+
unsigned long quirks;
+ u8 version;
};
static void ch341_set_termios(struct tty_struct *tty,
@@ -175,13 +177,20 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
+ *
+ * At least one device with version 0x27 appears to have this bit
+ * inverted.
*/
- a |= BIT(7);
+ if (priv->version > 0x27)
+ a |= BIT(7);
r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
if (r)
return r;
+ if (priv->version < 0x30)
+ return 0;
+
r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr);
if (r)
return r;
@@ -233,7 +242,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r < 0)
goto out;
- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
+
+ priv->version = buffer[0];
+ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index d5a1832aa48e..1724cade102e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
{ USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
@@ -120,6 +121,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */
{ USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
{ USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
{ USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
@@ -131,6 +133,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
@@ -144,6 +147,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
{ USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
{ USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
@@ -195,6 +199,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
{ USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
{ USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
+ { USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */
+ { USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */
{ USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
{ USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 43fa1f0716b7..d257d38e3732 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -78,9 +78,6 @@ static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ,
static int calc_baud_divisor(speed_t baudrate, speed_t clockrate)
{
- if (!baudrate)
- return 0;
-
return DIV_ROUND_CLOSEST(clockrate, baudrate);
}
@@ -423,9 +420,14 @@ static void f81232_set_baudrate(struct tty_struct *tty,
speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE };
for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
- idx = f81232_find_clk(baud_list[i]);
+ baudrate = baud_list[i];
+ if (baudrate == 0) {
+ tty_encode_baud_rate(tty, 0, 0);
+ return;
+ }
+
+ idx = f81232_find_clk(baudrate);
if (idx >= 0) {
- baudrate = baud_list[i];
tty_encode_baud_rate(tty, baudrate, baudrate);
break;
}
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 2b39bda035c7..1b2d2c2e8595 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -538,9 +538,6 @@ static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags)
static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
{
- if (!baudrate)
- return 0;
-
/* Round to nearest divisor */
return DIV_ROUND_CLOSEST(clockrate, baudrate);
}
@@ -570,9 +567,14 @@ static int f81534_set_port_config(struct usb_serial_port *port,
u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE};
for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
- idx = f81534_find_clk(baud_list[i]);
+ baudrate = baud_list[i];
+ if (baudrate == 0) {
+ tty_encode_baud_rate(tty, 0, 0);
+ return 0;
+ }
+
+ idx = f81534_find_clk(baudrate);
if (idx >= 0) {
- baudrate = baud_list[i];
tty_encode_baud_rate(tty, baudrate, baudrate);
break;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d671e096594b..c97923858fce 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1011,9 +1011,9 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) },
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
@@ -1045,6 +1045,8 @@ static const struct usb_device_id id_table_combined[] = {
/* IDS GmbH devices */
{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
+ /* Omron devices */
+ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
@@ -1318,8 +1320,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
case 38400: div_value = ftdi_sio_b38400; break;
case 57600: div_value = ftdi_sio_b57600; break;
case 115200: div_value = ftdi_sio_b115200; break;
- } /* baud */
- if (div_value == 0) {
+ default:
dev_dbg(dev, "%s - Baudrate (%d) requested is not supported\n",
__func__, baud);
div_value = ftdi_sio_b9600;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e92c165c86b..9a0f9fc99124 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -662,6 +662,12 @@
#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
+ * Omron corporation (https://www.omron.com)
+ */
+ #define OMRON_VID 0x0590
+ #define OMRON_CS1W_CIF31_PID 0x00b2
+
+/*
* Acton Research Corp.
*/
#define ACTON_VID 0x0647 /* Vendor ID */
@@ -1555,9 +1561,9 @@
#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
-#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
-#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
-#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
+#define ACTISENSE_UID_PID 0xD9AC /* USB Isolating Device */
+#define ACTISENSE_USA_PID 0xD9AD /* USB to Serial Adapter */
+#define ACTISENSE_NGX_PID 0xD9AE /* NGX NMEA2000 Gateway */
#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2317ed357d8e..406cfd5ad9f4 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -162,6 +162,8 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
+#define UBLOX_VENDOR_ID 0x1546
+
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
#define AMOI_PRODUCT_H01 0x0800
@@ -201,6 +203,9 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
+#define DELL_PRODUCT_FM101R_ESIM 0x8213
+#define DELL_PRODUCT_FM101R 0x8215
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -240,22 +245,38 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_UC15 0x9090
/* These u-blox products use Qualcomm's vendor ID */
#define UBLOX_PRODUCT_R410M 0x90b2
-#define UBLOX_PRODUCT_R6XX 0x90fa
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
+#define QUECTEL_PRODUCT_EM061K_LTA 0x0123
+#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
+#define QUECTEL_PRODUCT_EM060K_128 0x0128
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM05G 0x030a
+#define QUECTEL_PRODUCT_EM060K 0x030b
+#define QUECTEL_PRODUCT_EM05G_CS 0x030c
+#define QUECTEL_PRODUCT_EM05GV2 0x030e
+#define QUECTEL_PRODUCT_EM05CN_SG 0x0310
+#define QUECTEL_PRODUCT_EM05G_SG 0x0311
+#define QUECTEL_PRODUCT_EM05CN 0x0312
+#define QUECTEL_PRODUCT_EM05G_GR 0x0313
+#define QUECTEL_PRODUCT_EM05G_RS 0x0314
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_RM520N 0x0801
+#define QUECTEL_PRODUCT_EC200U 0x0901
+#define QUECTEL_PRODUCT_EG912Y 0x6001
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
+#define QUECTEL_PRODUCT_EC200A 0x6005
+#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
+#define QUECTEL_PRODUCT_EM061K_LCN 0x6009
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
@@ -392,6 +413,8 @@ static void option_instat_callback(struct urb *urb);
#define LONGCHEER_VENDOR_ID 0x1c9e
/* 4G Systems products */
+/* This one was sold as the VW and Skoda "Carstick LTE" */
+#define FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE 0x7605
/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
* It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
@@ -438,6 +461,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
+#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
+#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -573,6 +598,20 @@ static void option_instat_callback(struct urb *urb);
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
+/* OPPO products */
+#define OPPO_VENDOR_ID 0x22d9
+#define OPPO_PRODUCT_R11 0x276c
+
+/* Sierra Wireless products */
+#define SIERRA_VENDOR_ID 0x1199
+#define SIERRA_PRODUCT_EM9191 0x90d3
+
+/* UNISOC (Spreadtrum) products */
+#define UNISOC_VENDOR_ID 0x1782
+/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
+#define TOZED_PRODUCT_LT70C 0x4055
+/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
+#define LUAT_PRODUCT_AIR720U 0x4e00
/* Device flags */
@@ -1075,6 +1114,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
.driver_info = RSVD(0) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R_ESIM, 0xff) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1116,8 +1157,20 @@ static const struct usb_device_id option_ids[] = {
/* u-blox products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
- { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
.driver_info = RSVD(3) },
+ /* u-blox products */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1311) }, /* u-blox LARA-R6 01B */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1312), /* u-blox LARA-R6 01B (RMNET) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(UBLOX_VENDOR_ID, 0x1313, 0xff) }, /* u-blox LARA-R6 01B (ECM) */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */
+ .driver_info = RSVD(4) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
@@ -1131,13 +1184,47 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
+ .driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff),
+ .driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
.driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05GV2, 0xff),
+ .driver_info = RSVD(4) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
@@ -1146,11 +1233,20 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
.driver_info = RSVD(3) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
+ .driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200A, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
@@ -1204,6 +1300,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
@@ -1250,6 +1347,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
.driver_info = RSVD(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1446,7 +1551,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
@@ -1928,6 +2034,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
.driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
@@ -1993,8 +2101,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2133,12 +2245,19 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff), /* Foxconn T99W265 MBIM variant */
+ .driver_info = RSVD(3) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
.driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */
+ .driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */
+ .driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
@@ -2147,14 +2266,22 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 3f437f07356b..5570c50d005d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -177,12 +177,15 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81c2)}, /* Dell Wireless 5811e */
{DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
{DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x8217)}, /* Dell Wireless DW5826e */
+ {DEVICE_SWI(0x413c, 0x8218)}, /* Dell Wireless DW5826e QDL */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index a43263a0edd8..891e52bc5002 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -757,7 +757,8 @@ static void sierra_close(struct usb_serial_port *port)
/*
* Need to take susp_lock to make sure port is not already being
- * resumed, but no need to hold it due to initialized
+ * resumed, but no need to hold it due to the tty-port initialized
+ * flag.
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4c6747889a19..24b8772a345e 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -38,16 +38,6 @@ static struct usb_serial_driver vendor##_device = { \
{ USB_DEVICE(0x0a21, 0x8001) } /* MMT-7305WW */
DEVICE(carelink, CARELINK_IDS);
-/* ZIO Motherboard USB driver */
-#define ZIO_IDS() \
- { USB_DEVICE(0x1CBE, 0x0103) }
-DEVICE(zio, ZIO_IDS);
-
-/* Funsoft Serial USB driver */
-#define FUNSOFT_IDS() \
- { USB_DEVICE(0x1404, 0xcddc) }
-DEVICE(funsoft, FUNSOFT_IDS);
-
/* Infineon Flashloader driver */
#define FLASHLOADER_IDS() \
{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
@@ -55,6 +45,11 @@ DEVICE(funsoft, FUNSOFT_IDS);
{ USB_DEVICE(0x8087, 0x0801) }
DEVICE(flashloader, FLASHLOADER_IDS);
+/* Funsoft Serial USB driver */
+#define FUNSOFT_IDS() \
+ { USB_DEVICE(0x1404, 0xcddc) }
+DEVICE(funsoft, FUNSOFT_IDS);
+
/* Google Serial USB SubClass */
#define GOOGLE_IDS() \
{ USB_VENDOR_AND_INTERFACE_INFO(0x18d1, \
@@ -63,16 +58,21 @@ DEVICE(flashloader, FLASHLOADER_IDS);
0x01) }
DEVICE(google, GOOGLE_IDS);
+/* HP4x (48/49) Generic Serial driver */
+#define HP4X_IDS() \
+ { USB_DEVICE(0x03f0, 0x0121) }
+DEVICE(hp4x, HP4X_IDS);
+
+/* KAUFMANN RKS+CAN VCP */
+#define KAUFMANN_IDS() \
+ { USB_DEVICE(0x16d0, 0x0870) }
+DEVICE(kaufmann, KAUFMANN_IDS);
+
/* Libtransistor USB console */
#define LIBTRANSISTOR_IDS() \
{ USB_DEVICE(0x1209, 0x8b00) }
DEVICE(libtransistor, LIBTRANSISTOR_IDS);
-/* ViVOpay USB Serial Driver */
-#define VIVOPAY_IDS() \
- { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
-DEVICE(vivopay, VIVOPAY_IDS);
-
/* Motorola USB Phone driver */
#define MOTO_IDS() \
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \
@@ -101,10 +101,10 @@ DEVICE(nokia, NOKIA_IDS);
{ USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
DEVICE_N(novatel_gps, NOVATEL_IDS, 3);
-/* HP4x (48/49) Generic Serial driver */
-#define HP4X_IDS() \
- { USB_DEVICE(0x03f0, 0x0121) }
-DEVICE(hp4x, HP4X_IDS);
+/* Siemens USB/MPI adapter */
+#define SIEMENS_IDS() \
+ { USB_DEVICE(0x908, 0x0004) }
+DEVICE(siemens_mpi, SIEMENS_IDS);
/* Suunto ANT+ USB Driver */
#define SUUNTO_IDS() \
@@ -112,45 +112,52 @@ DEVICE(hp4x, HP4X_IDS);
{ USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
DEVICE(suunto, SUUNTO_IDS);
-/* Siemens USB/MPI adapter */
-#define SIEMENS_IDS() \
- { USB_DEVICE(0x908, 0x0004) }
-DEVICE(siemens_mpi, SIEMENS_IDS);
+/* ViVOpay USB Serial Driver */
+#define VIVOPAY_IDS() \
+ { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
+DEVICE(vivopay, VIVOPAY_IDS);
+
+/* ZIO Motherboard USB driver */
+#define ZIO_IDS() \
+ { USB_DEVICE(0x1CBE, 0x0103) }
+DEVICE(zio, ZIO_IDS);
/* All of the above structures mushed into two lists */
static struct usb_serial_driver * const serial_drivers[] = {
&carelink_device,
- &zio_device,
- &funsoft_device,
&flashloader_device,
+ &funsoft_device,
&google_device,
+ &hp4x_device,
+ &kaufmann_device,
&libtransistor_device,
- &vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
&nokia_device,
&novatel_gps_device,
- &hp4x_device,
- &suunto_device,
&siemens_mpi_device,
+ &suunto_device,
+ &vivopay_device,
+ &zio_device,
NULL
};
static const struct usb_device_id id_table[] = {
CARELINK_IDS(),
- ZIO_IDS(),
- FUNSOFT_IDS(),
FLASHLOADER_IDS(),
+ FUNSOFT_IDS(),
GOOGLE_IDS(),
+ HP4X_IDS(),
+ KAUFMANN_IDS(),
LIBTRANSISTOR_IDS(),
- VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
NOKIA_IDS(),
NOVATEL_IDS(),
- HP4X_IDS(),
- SUUNTO_IDS(),
SIEMENS_IDS(),
+ SUUNTO_IDS(),
+ VIVOPAY_IDS(),
+ ZIO_IDS(),
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index dc7a65b9ec98..2a2469b76cc5 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -254,7 +254,7 @@ static int serial_open(struct tty_struct *tty, struct file *filp)
*
* Shut down a USB serial port. Serialized against activate by the
* tport mutex and kept to matching open/close pairs
- * of calls by the initialized flag.
+ * of calls by the tty-port initialized flag.
*
* Not called if tty is console.
*/
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b2285d5a869d..628a75d1232a 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -435,7 +435,8 @@ void usb_wwan_close(struct usb_serial_port *port)
/*
* Need to take susp_lock to make sure port is not already being
- * resumed, but no need to hold it due to initialized
+ * resumed, but no need to hold it due to the tty-port initialized
+ * flag.
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index ddab2cd3d2e7..dcc4778d1ae9 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data)
rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
command, 0xc0, 0, 1, data, 2);
- usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
+ if (rc == USB_STOR_XFER_GOOD)
+ usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
return rc;
}
@@ -438,6 +439,8 @@ static int alauda_init_media(struct us_data *us)
+ MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift);
MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
+ if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL)
+ return USB_STOR_TRANSPORT_ERROR;
if (alauda_reset_media(us) != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -452,10 +455,14 @@ static int alauda_init_media(struct us_data *us)
static int alauda_check_media(struct us_data *us)
{
struct alauda_info *info = (struct alauda_info *) us->extra;
- unsigned char status[2];
+ unsigned char *status = us->iobuf;
int rc;
rc = alauda_get_media_status(us, status);
+ if (rc != USB_STOR_XFER_GOOD) {
+ status[0] = 0xF0; /* Pretend there's no media */
+ status[1] = 0;
+ }
/* Check for no media or door open */
if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10)
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 9c984f3c7248..b076260a2ca4 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -938,7 +938,7 @@ static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageDa
struct ms_lib_type_extdat ExtraData;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
- PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
+ PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL);
if (PageBuffer == NULL)
return (u32)-1;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 2adcabe060c5..49d17988e2ce 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -407,22 +407,25 @@ static DEF_SCSI_QCMD(queuecommand)
***********************************************************************/
/* Command timeout and abort */
-static int command_abort(struct scsi_cmnd *srb)
+static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match)
{
- struct us_data *us = host_to_us(srb->device->host);
-
- usb_stor_dbg(us, "%s called\n", __func__);
-
/*
* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
* bits are protected by the host lock.
*/
scsi_lock(us_to_host(us));
- /* Is this command still active? */
- if (us->srb != srb) {
+ /* is there any active pending command to abort ? */
+ if (!us->srb) {
scsi_unlock(us_to_host(us));
usb_stor_dbg(us, "-- nothing to abort\n");
+ return SUCCESS;
+ }
+
+ /* Does the command match the passed srb if any ? */
+ if (srb_match && us->srb != srb_match) {
+ scsi_unlock(us_to_host(us));
+ usb_stor_dbg(us, "-- pending command mismatch\n");
return FAILED;
}
@@ -445,6 +448,14 @@ static int command_abort(struct scsi_cmnd *srb)
return SUCCESS;
}
+static int command_abort(struct scsi_cmnd *srb)
+{
+ struct us_data *us = host_to_us(srb->device->host);
+
+ usb_stor_dbg(us, "%s called\n", __func__);
+ return command_abort_matching(us, srb);
+}
+
/*
* This invokes the transport reset mechanism to reset the state of the
* device
@@ -456,6 +467,9 @@ static int device_reset(struct scsi_cmnd *srb)
usb_stor_dbg(us, "%s called\n", __func__);
+ /* abort any pending command before reset */
+ command_abort_matching(us, NULL);
+
/* lock the device pointers and do the reset */
mutex_lock(&(us->dev_mutex));
result = us->transport_reset(us);
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index ba955d65eb0e..c8a988d2cfdd 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -554,8 +554,8 @@ static int sddr55_reset(struct us_data *us)
static unsigned long sddr55_get_capacity(struct us_data *us) {
- unsigned char uninitialized_var(manufacturerID);
- unsigned char uninitialized_var(deviceID);
+ unsigned char manufacturerID;
+ unsigned char deviceID;
int result;
struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra;
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 3734a25e09e5..44f0b78be8a9 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -116,6 +116,19 @@ static int uas_use_uas_driver(struct usb_interface *intf,
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
flags |= US_FL_NO_ATA_1X;
+ /*
+ * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues
+ * with UAS. This isn't distinguishable with just idVendor and
+ * idProduct, use manufacturer and product too.
+ *
+ * Reported-by: Hongling Zeng <zenghongling@kylinos.cn>
+ */
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0x9210 &&
+ (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) &&
+ (udev->product && !strcmp(udev->product, "MD202")))
+ flags |= US_FL_IGNORE_UAS;
+
usb_stor_adjust_quirks(udev, &flags);
if (flags & US_FL_IGNORE_UAS) {
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 678903d1ce4d..d70bd5814796 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -80,6 +80,8 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status);
+static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd);
/*
* This driver needs its own workqueue, as we need to control memory allocation.
@@ -296,18 +298,283 @@ static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *
return response_code == RC_TMF_SUCCEEDED;
}
+static void dummy_scsi_done(struct scsi_cmnd *cmnd)
+{
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
+
+ devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
+ kfree(cmnd->request);
+ kfree(cmnd);
+}
+
+static void uas_workaround_cmplt(struct urb *urb)
+{
+ struct scsi_cmnd *cmnd;
+ struct uas_cmd_info *cmdinfo;
+
+ if ((urb->context != NULL) && (urb->status == 0)) {
+ cmnd = urb->context;
+ cmdinfo = (struct uas_cmd_info *)&cmnd->SCp;
+
+ if (cmdinfo->data_in_urb != urb)
+ cmnd->scsi_done(cmnd);
+ }
+
+ usb_free_urb(urb);
+}
+
+static struct urb *uas_workaround_cmnd(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd) {
+ struct scsi_device *sdev = cmnd->device;
+ struct urb *urb;
+ int err;
+
+ urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd);
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate cmnd URB\n", __func__);
+ return NULL;
+ }
+
+ err = usb_submit_urb(urb, gfp);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit cmd, err=%d\n", __func__, err);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->cmd_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+
+}
+
+static struct urb *uas_workaround_data(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd) {
+ struct scsi_device *sdev = cmnd->device;
+ struct usb_device *udev = devinfo->udev;
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct urb *urb = usb_alloc_urb(0, gfp);
+ struct scsi_data_buffer *sdb = NULL;
+ void *temp_buf;
+ unsigned int pipe;
+ int err;
+
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate URB\n", __func__);
+ return NULL;
+ }
+
+ cmdinfo->data_in_urb = urb;
+ sdb = &cmnd->sdb;
+ pipe = devinfo->data_in_pipe;
+ temp_buf = kzalloc(sdb->length, GFP_ATOMIC);
+ if (!temp_buf) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory\n", __func__);
+ goto free;
+ }
+
+ usb_fill_bulk_urb(urb, udev, pipe, temp_buf, sdb->length,
+ uas_workaround_cmplt, cmnd);
+ if (devinfo->use_streams)
+ urb->stream_id = cmdinfo->uas_tag;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit Data In urb, err = %d\n",
+ __func__, err);
+ goto free;
+ }
+
+ usb_anchor_urb(urb, &devinfo->data_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+}
+
+static struct urb *uas_workaround_sense(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd) {
+ struct scsi_device *sdev = cmnd->device;
+ struct usb_device *udev = devinfo->udev;
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct urb *urb = usb_alloc_urb(0, gfp);
+ struct sense_iu *iu;
+ int err;
+
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate URB\n", __func__);
+ return NULL;
+ }
+
+ iu = kzalloc(sizeof(*iu), gfp);
+ if (!iu) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for sense_iu\n",
+ __func__);
+ goto free;
+ }
+
+ usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
+ uas_workaround_cmplt, cmnd);
+ if (devinfo->use_streams)
+ urb->stream_id = cmdinfo->uas_tag;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit Sense urb, err = %d\n",
+ __func__, err);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->sense_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+}
+
+/*
+ * This function is called only if the DATA IN stream timer expired, which
+ * means xhci host controller has failed to process the TRB's present in the
+ * stream ring. As a part of recovery sequence, this function re-submits the
+ * previous stopped urb on which xhci failed to process data and along with
+ * that urb it prepares & submits sense, data and cmnd urb with scsi command
+ * set to standard inquiry request containing the next free stream id tag.
+ * Doing so will make the xhci start processing the previous stopped urb
+ * along with the urb that has standard inquiry scsi command.
+ */
+static int uas_workaround(struct urb *urb)
+{
+ struct scsi_cmnd *cmnd = urb->context;
+ struct scsi_device *sdev = cmnd->device;
+ struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
+ struct scsi_cmnd *temp_cmnd;
+ struct uas_cmd_info *temp_cmdinfo;
+ struct urb *sense_urb, *data_urb, *cmnd_urb;
+ struct request *temp_request;
+ unsigned int idx;
+ int err;
+ char inquiry[16] = { 0x12, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 };
+
+
+ /* Find a free uas-tag */
+ for (idx = 0; idx < devinfo->qdepth; idx++) {
+ if (!devinfo->cmnd[idx])
+ break;
+ }
+
+ if (idx == devinfo->qdepth) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to find free tag\n", __func__);
+ err = -EINVAL;
+ goto free;
+ }
+
+ /* Create a scsi_cmnd and send dummy inquiry data on the next
+ * available tag
+ */
+ temp_cmnd = kzalloc(sizeof(struct scsi_cmnd), GFP_ATOMIC);
+ if (!temp_cmnd) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for scsi_cmnd\n",
+ __func__);
+ err = -ENOMEM;
+ goto free;
+ }
+
+ temp_request = kzalloc(sizeof(struct request), GFP_ATOMIC);
+ if (!temp_cmnd) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for request\n",
+ __func__);
+ err = -ENOMEM;
+ goto free;
+ }
+
+ temp_cmnd->device = cmnd->device;
+ temp_cmnd->cmnd = inquiry;
+ temp_cmnd->cmd_len = 16;
+ temp_cmnd->sdb.length = 0x10;
+ temp_cmnd->scsi_done = dummy_scsi_done;
+ temp_request->tag = idx;
+ temp_cmnd->request = temp_request;
+
+ temp_cmdinfo = (struct uas_cmd_info *)&temp_cmnd->SCp;
+ memset(temp_cmdinfo, 0, sizeof(struct uas_cmd_info));
+
+ temp_cmdinfo->uas_tag = idx + 1;
+ devinfo->cmnd[idx] = temp_cmnd;
+
+ /* Submit previously stopped URB first */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: submit err %d\n", __func__, err);
+ kfree(temp_cmnd);
+ kfree(temp_request);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->data_urbs);
+
+ /* Allocate and submit SENSE urb for next available tag */
+ sense_urb = uas_workaround_sense(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!sense_urb) {
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ goto free;
+ }
+
+ /* Allocate and submit DATA IN urb for next available tag */
+ data_urb = uas_workaround_data(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!data_urb) {
+ /* Kill previously allocated sense urb */
+ sense_urb->context = NULL;
+ usb_kill_urb(sense_urb);
+ usb_put_urb(sense_urb);
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ goto free;
+ }
+
+ /* Allocate and submit CMND urb with dummy inquiry data */
+ cmnd_urb = uas_workaround_cmnd(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!cmnd_urb) {
+ /* Kill previously allocated data urb */
+ data_urb->context = NULL;
+ usb_kill_urb(data_urb);
+ usb_put_urb(data_urb);
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ }
+
+free:
+ return err;
+}
+
static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
- struct Scsi_Host *shost = urb->context;
- struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)urb->context;
+ struct uas_dev_info *devinfo =
+ (struct uas_dev_info *)cmnd->device->hostdata;
struct urb *data_in_urb = NULL;
struct urb *data_out_urb = NULL;
- struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
unsigned int idx;
int status = urb->status;
+ int err;
bool success;
spin_lock_irqsave(&devinfo->lock, flags);
@@ -316,6 +583,21 @@ static void uas_stat_cmplt(struct urb *urb)
goto out;
if (status) {
+ if (status == -EAGAIN) {
+ /* We get here only if the xhci stream timer expires,
+ * call uas_workaround() with this urb as argument.
+ */
+ err = uas_workaround(urb);
+ if (err != 0) {
+ dev_err(&urb->dev->dev,
+ "%s: uas_workaround() failed, err=%d\n",
+ __func__, err);
+ goto out;
+ }
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return;
+ }
+
if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
goto out;
@@ -398,10 +680,27 @@ static void uas_data_cmplt(struct urb *urb)
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned long flags;
+ int err;
int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
+ if ((status == -EAGAIN) && (!devinfo->resetting) &&
+ (cmdinfo->data_in_urb == urb)) {
+ /* We get here only if the xhci stream timer expires,
+ * call uas_workaround() with this urb as argument.
+ */
+ err = uas_workaround(urb);
+ if (err != 0) {
+ dev_err(&urb->dev->dev,
+ "%s: uas_workaround() failed, err=%d\n",
+ __func__, err);
+ goto out;
+ }
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return;
+ }
+
if (cmdinfo->data_in_urb == urb) {
cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
cmdinfo->data_in_urb = NULL;
@@ -480,7 +779,7 @@ static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
goto free;
usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
- uas_stat_cmplt, cmnd->device->host);
+ uas_stat_cmplt, cmnd);
if (devinfo->use_streams)
urb->stream_id = cmdinfo->uas_tag;
urb->transfer_flags |= URB_FREE_BUFFER;
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index fb99e526cd48..7f7534098d53 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -19,7 +19,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 66e7f5d123c4..b270be141b8e 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1275,12 +1275,6 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
USB_SC_RBC, USB_PR_BULK, NULL,
0 ),
-UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
- "Samsung",
- "Flash Drive FIT",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_MAX_SECTORS_64),
-
/* aeb */
UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
"Feiya",
@@ -2294,6 +2288,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+/* Reported by Witold Lipieta <witold.lipieta@thaumatec.com> */
+UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100,
+ "NXP Semiconductors",
+ "PN7462AU",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Supplied with some Castlewood ORB removable drives */
UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
"Double-H Technology",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 2f72753c3e22..fcee9141336e 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
@@ -62,6 +69,19 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+UNUSUAL_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
+ "Netchip",
+ "Target Product",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Tom Hu <huxiaoying@kylinos.cn> */
+UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
+ "ASUS",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
@@ -97,6 +117,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),
+/* Reported by: Yaroslav Furman <yaro330@gmail.com> */
+UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
+ "JMicron",
+ "JMS583Gen 2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
"PNY",
@@ -111,6 +138,13 @@ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999,
+ "Thinkplus",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 4092248a5936..a2a1baabca93 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -87,8 +87,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
+ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
break;
default:
break;
@@ -100,8 +100,12 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
- else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
+ else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) {
pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
+ /* Default to pin assign C if available */
+ if (pin_assign & BIT(DP_PIN_ASSIGN_C))
+ pin_assign = BIT(DP_PIN_ASSIGN_C);
+ }
if (!pin_assign)
return -EINVAL;
@@ -407,6 +411,18 @@ static const char * const pin_assignments[] = {
[DP_PIN_ASSIGN_F] = "F",
};
+/*
+ * Helper function to extract a peripheral's currently supported
+ * Pin Assignments from its DisplayPort alternate mode state.
+ */
+static u8 get_current_pin_assignments(struct dp_altmode *dp)
+{
+ if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
+ return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo);
+ else
+ return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo);
+}
+
static ssize_t
pin_assignment_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
@@ -433,10 +449,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr,
goto out_unlock;
}
- if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
- assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
- else
- assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+ assignments = get_current_pin_assignments(dp);
if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) {
ret = -EINVAL;
@@ -473,10 +486,7 @@ static ssize_t pin_assignment_show(struct device *dev,
cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
- if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
- assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
- else
- assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+ assignments = get_current_pin_assignments(dp);
for (i = 0; assignments; assignments >>= 1, i++) {
if (assignments & 1) {
@@ -491,6 +501,10 @@ static ssize_t pin_assignment_show(struct device *dev,
mutex_unlock(&dp->lock);
+ /* get_current_pin_assignments can return 0 when no matching pin assignments are found */
+ if (len == 0)
+ len++;
+
buf[len - 1] = '\n';
return len;
}
@@ -516,10 +530,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
/* FIXME: Port can only be DFP_U. */
/* Make sure we have compatiple pin configurations */
- if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
- !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
+ if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
+ !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index c950171556d8..052b8fb21344 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -126,7 +126,7 @@ int typec_altmode_exit(struct typec_altmode *adev)
if (!adev || !adev->active)
return 0;
- if (!pdev->ops || !pdev->ops->enter)
+ if (!pdev->ops || !pdev->ops->exit)
return -EOPNOTSUPP;
/* Moving to USB Safe State */
@@ -146,12 +146,20 @@ EXPORT_SYMBOL_GPL(typec_altmode_exit);
*
* Notifies the partner of @adev about Attention command.
*/
-void typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
+int typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
{
- struct typec_altmode *pdev = &to_altmode(adev)->partner->adev;
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev;
+
+ if (!partner)
+ return -ENODEV;
+
+ pdev = &partner->adev;
if (pdev->ops && pdev->ops->attention)
pdev->ops->attention(pdev, vdo);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(typec_altmode_attention);
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index db926641b79d..371e010e0e85 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -189,11 +189,13 @@ static void typec_altmode_put_partner(struct altmode *altmode)
{
struct altmode *partner = altmode->partner;
struct typec_altmode *adev;
+ struct typec_altmode *partner_adev;
if (!partner)
return;
- adev = &partner->adev;
+ adev = &altmode->adev;
+ partner_adev = &partner->adev;
if (is_typec_plug(adev->dev.parent)) {
struct typec_plug *plug = to_typec_plug(adev->dev.parent);
@@ -202,7 +204,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
} else {
partner->partner = NULL;
}
- put_device(&adev->dev);
+ put_device(&partner_adev->dev);
}
static void *typec_port_match(struct device_connection *con, int ep, void *data)
@@ -465,7 +467,8 @@ static void typec_altmode_release(struct device *dev)
{
struct altmode *alt = to_altmode(to_typec_altmode(dev));
- typec_altmode_put_partner(alt);
+ if (!is_typec_port(dev->parent))
+ typec_altmode_put_partner(alt);
altmode_id_remove(alt->adev.dev.parent, alt->id);
kfree(alt);
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 6caed68ce1be..ccb72875c8ee 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -379,6 +379,10 @@ static int tcpci_init(struct tcpc_dev *tcpc)
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
+ ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
+ if (ret < 0)
+ return ret;
+
/* Handle vendor init */
if (tcpci->data->init) {
ret = tcpci->data->init(tcpci, tcpci->data);
@@ -551,8 +555,10 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
return ERR_PTR(err);
tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
- if (IS_ERR(tcpci->port))
+ if (IS_ERR(tcpci->port)) {
+ fwnode_handle_put(tcpci->tcpc.fwnode);
return ERR_CAST(tcpci->port);
+ }
return tcpci;
}
@@ -561,6 +567,7 @@ EXPORT_SYMBOL_GPL(tcpci_register_port);
void tcpci_unregister_port(struct tcpci *tcpci)
{
tcpm_unregister_port(tcpci->port);
+ fwnode_handle_put(tcpci->tcpc.fwnode);
}
EXPORT_SYMBOL_GPL(tcpci_unregister_port);
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 303ebde26546..dcf60399f161 100644
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
@@ -72,6 +72,7 @@
#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
#define TCPC_FAULT_STATUS 0x1f
+#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
#define TCPC_COMMAND 0x23
#define TCPC_CMD_WAKE_I2C 0x11
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index fb18264b702e..07db1f1a1f72 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -159,6 +159,14 @@ enum pd_msg_request {
PD_MSG_DATA_SOURCE_CAP,
};
+enum adev_actions {
+ ADEV_NONE = 0,
+ ADEV_NOTIFY_USB_AND_QUEUE_VDM,
+ ADEV_QUEUE_VDM,
+ ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
+ ADEV_ATTENTION,
+};
+
/* Events from low level driver */
#define TCPM_CC_EVENT BIT(0)
@@ -969,16 +977,15 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
port->vdm_state = VDM_STATE_READY;
}
-static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
- int cnt)
+static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
{
- u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
- u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
+ u32 vdo = p[VDO_INDEX_IDH];
+ u32 product = p[VDO_INDEX_PRODUCT];
memset(&port->mode_data, 0, sizeof(port->mode_data));
port->partner_ident.id_header = vdo;
- port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
+ port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
port->partner_ident.product = product;
typec_partner_set_identity(port->partner);
@@ -988,17 +995,15 @@ static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
PD_PRODUCT_PID(product), product & 0xffff);
}
-static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
- int cnt)
+static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
{
struct pd_mode_data *pmdata = &port->mode_data;
int i;
for (i = 1; i < cnt; i++) {
- u32 p = le32_to_cpu(payload[i]);
u16 svid;
- svid = (p >> 16) & 0xffff;
+ svid = (p[i] >> 16) & 0xffff;
if (!svid)
return false;
@@ -1008,7 +1013,7 @@ static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
pmdata->svids[pmdata->nsvids++] = svid;
tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
- svid = p & 0xffff;
+ svid = p[i] & 0xffff;
if (!svid)
return false;
@@ -1018,14 +1023,27 @@ static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
pmdata->svids[pmdata->nsvids++] = svid;
tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
}
- return true;
+
+ /*
+ * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
+ * 6-43), and can be returned maximum 6 VDOs per response (see Figure
+ * 6-19). If the Respondersupports 12 or more SVID then the Discover
+ * SVIDs Command Shall be executed multiple times until a Discover
+ * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
+ * the last part of the last VDO or with a VDO containing two SVIDs
+ * with values of 0x0000.
+ *
+ * However, some odd dockers support SVIDs less than 12 but without
+ * 0x0000 in the last VDO, so we need to break the Discover SVIDs
+ * request and return false here.
+ */
+ return cnt == 7;
abort:
tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
return false;
}
-static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
- int cnt)
+static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
{
struct pd_mode_data *pmdata = &port->mode_data;
struct typec_altmode_desc *paltmode;
@@ -1042,7 +1060,7 @@ static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
paltmode->svid = pmdata->svids[pmdata->svid_index];
paltmode->mode = i;
- paltmode->vdo = le32_to_cpu(payload[i]);
+ paltmode->vdo = p[i];
tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
pmdata->altmodes, paltmode->svid,
@@ -1070,21 +1088,17 @@ static void tcpm_register_partner_altmodes(struct tcpm_port *port)
#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
-static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
- u32 *response)
+static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ const u32 *p, int cnt, u32 *response,
+ enum adev_actions *adev_action)
{
- struct typec_altmode *adev;
struct typec_altmode *pdev;
struct pd_mode_data *modep;
- u32 p[PD_MAX_PAYLOAD];
int rlen = 0;
int cmd_type;
int cmd;
int i;
- for (i = 0; i < cnt; i++)
- p[i] = le32_to_cpu(payload[i]);
-
cmd_type = PD_VDO_CMDT(p[0]);
cmd = PD_VDO_CMD(p[0]);
@@ -1093,9 +1107,6 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
modep = &port->mode_data;
- adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
- PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
-
pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
@@ -1121,8 +1132,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
break;
case CMD_ATTENTION:
/* Attention command does not have response */
- if (adev)
- typec_altmode_attention(adev, p[1]);
+ *adev_action = ADEV_ATTENTION;
return 0;
default:
break;
@@ -1145,13 +1155,13 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
switch (cmd) {
case CMD_DISCOVER_IDENT:
/* 6.4.4.3.1 */
- svdm_consume_identity(port, payload, cnt);
+ svdm_consume_identity(port, p, cnt);
response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
rlen = 1;
break;
case CMD_DISCOVER_SVID:
/* 6.4.4.3.2 */
- if (svdm_consume_svids(port, payload, cnt)) {
+ if (svdm_consume_svids(port, p, cnt)) {
response[0] = VDO(USB_SID_PD, 1,
CMD_DISCOVER_SVID);
rlen = 1;
@@ -1163,7 +1173,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
break;
case CMD_DISCOVER_MODES:
/* 6.4.4.3.3 */
- svdm_consume_modes(port, payload, cnt);
+ svdm_consume_modes(port, p, cnt);
modep->svid_index++;
if (modep->svid_index < modep->nsvids) {
u16 svid = modep->svids[modep->svid_index];
@@ -1176,23 +1186,15 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
case CMD_ENTER_MODE:
if (adev && pdev) {
typec_altmode_update_active(pdev, true);
-
- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
- response[0] = VDO(adev->svid, 1,
- CMD_EXIT_MODE);
- response[0] |= VDO_OPOS(adev->mode);
- return 1;
- }
+ *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
}
return 0;
case CMD_EXIT_MODE:
if (adev && pdev) {
typec_altmode_update_active(pdev, false);
-
/* Back to USB Operation */
- WARN_ON(typec_altmode_notify(adev,
- TYPEC_STATE_USB,
- NULL));
+ *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
+ return 0;
}
break;
default:
@@ -1203,11 +1205,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
switch (cmd) {
case CMD_ENTER_MODE:
/* Back to USB Operation */
- if (adev)
- WARN_ON(typec_altmode_notify(adev,
- TYPEC_STATE_USB,
- NULL));
- break;
+ *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
+ return 0;
default:
break;
}
@@ -1217,24 +1216,30 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
}
/* Informing the alternate mode drivers about everything */
- if (adev)
- typec_altmode_vdm(adev, p[0], &p[1], cnt);
-
+ *adev_action = ADEV_QUEUE_VDM;
return rlen;
}
static void tcpm_handle_vdm_request(struct tcpm_port *port,
const __le32 *payload, int cnt)
{
- int rlen = 0;
+ enum adev_actions adev_action = ADEV_NONE;
+ struct typec_altmode *adev;
+ u32 p[PD_MAX_PAYLOAD];
u32 response[8] = { };
- u32 p0 = le32_to_cpu(payload[0]);
+ int i, rlen = 0;
+
+ for (i = 0; i < cnt; i++)
+ p[i] = le32_to_cpu(payload[i]);
+
+ adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
if (port->vdm_state == VDM_STATE_BUSY) {
/* If UFP responded busy retry after timeout */
- if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
+ if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
- port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
+ port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
CMDT_INIT;
mod_delayed_work(port->wq, &port->vdm_state_machine,
msecs_to_jiffies(PD_T_VDM_BUSY));
@@ -1243,8 +1248,33 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
port->vdm_state = VDM_STATE_DONE;
}
- if (PD_VDO_SVDM(p0))
- rlen = tcpm_pd_svdm(port, payload, cnt, response);
+ if (PD_VDO_SVDM(p[0]))
+ rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
+
+ if (adev) {
+ switch (adev_action) {
+ case ADEV_NONE:
+ break;
+ case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
+ WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
+ typec_altmode_vdm(adev, p[0], &p[1], cnt);
+ break;
+ case ADEV_QUEUE_VDM:
+ typec_altmode_vdm(adev, p[0], &p[1], cnt);
+ break;
+ case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
+ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+ response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ rlen = 1;
+ }
+ break;
+ case ADEV_ATTENTION:
+ if (typec_altmode_attention(adev, p[1]))
+ tcpm_log(port, "typec_altmode_attention no port partner altmode");
+ break;
+ }
+ }
if (rlen > 0) {
tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 3c6d452e3bf4..4104eea03e80 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -462,8 +462,13 @@ static void stub_disconnect(struct usb_device *udev)
/* release port */
rc = usb_hub_release_port(udev->parent, udev->portnum,
(struct usb_dev_state *) udev);
- if (rc) {
- dev_dbg(&udev->dev, "unable to release port\n");
+ /*
+ * NOTE: If a HUB disconnect triggered disconnect of the down stream
+ * device usb_hub_release_port will return -ENODEV so we can safely ignore
+ * that error here.
+ */
+ if (rc && (rc != -ENODEV)) {
+ dev_dbg(&udev->dev, "unable to release port (%i)\n", rc);
return;
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 6f727034679f..46a72fe39719 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -72,12 +72,11 @@ static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
const char **extra_dbg)
{
#ifdef CONFIG_ACPI
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct device *dev = vdev->device;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_status acpi_ret;
- acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer);
+ acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL);
if (ACPI_FAILURE(acpi_ret)) {
if (extra_dbg)
*extra_dbg = acpi_format_exception(acpi_ret);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 388597930b64..efd3782ead97 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1802,6 +1802,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
if (!buf) {
kfree(caps->buf);
+ caps->buf = NULL;
caps->size = 0;
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 3e0267ead718..b19d60adc606 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1049,7 +1049,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
/* len is always initialized before use since we are always called with
* datalen > 0.
*/
- u32 uninitialized_var(len);
+ u32 len;
while (datalen > 0 && headcount < quota) {
if (unlikely(seg >= UIO_MAXIOV)) {
@@ -1106,7 +1106,7 @@ static void handle_rx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &nvq->vq;
- unsigned uninitialized_var(in), log;
+ unsigned in, log;
struct vhost_log *vq_log;
struct msghdr msg = {
.msg_name = NULL,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 97be299f0a8d..b2e92927e8a1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2050,7 +2050,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct vhost_dev *dev = vq->dev;
struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
- u64 s = 0;
+ u64 s = 0, last = addr + len - 1;
int ret = 0;
while ((u64)len > s) {
@@ -2061,7 +2061,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
}
node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
- addr, addr + len - 1);
+ addr, last);
if (node == NULL || node->start > addr) {
if (umem != dev->iotlb) {
ret = -EFAULT;
@@ -2583,12 +2583,11 @@ EXPORT_SYMBOL_GPL(vhost_disable_notify);
/* Create a new message. */
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
{
- struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
+ /* Make sure all padding within the structure is initialized. */
+ struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return NULL;
- /* Make sure all padding within the structure is initialized. */
- memset(&node->msg, 0, sizeof node->msg);
node->vq = vq;
node->msg.type = type;
return node;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 308df62655dd..64806e562bf6 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -353,7 +353,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
+ pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
if (!pkt->buf) {
kfree(pkt);
return NULL;
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index d344fb03cb86..ba0041194029 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -107,7 +107,7 @@ static int bd6107_backlight_check_fb(struct backlight_device *backlight,
{
struct bd6107 *bd = bl_get_data(backlight);
- return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->dev;
+ return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->device;
}
static const struct backlight_ops bd6107_backlight_ops = {
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index 882359dd288c..aa00379392a0 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -117,6 +117,7 @@ static int da9052_backlight_probe(struct platform_device *pdev)
wleds->led_reg = platform_get_device_id(pdev)->driver_data;
wleds->state = DA9052_WLEDS_OFF;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = DA9052_MAX_BRIGHTNESS;
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 18e053e4716c..876c1be74787 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -46,7 +46,7 @@ static int gpio_backlight_check_fb(struct backlight_device *bl,
{
struct gpio_backlight *gbl = bl_get_data(bl);
- return gbl->fbdev == NULL || gbl->fbdev == info->dev;
+ return gbl->fbdev == NULL || gbl->fbdev == info->device;
}
static const struct backlight_ops gpio_backlight_ops = {
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 8096202fbe5d..8d22ba62d78b 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -227,7 +227,7 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
if (rval < 0)
goto out_i2c_err;
brightness |= rval;
- goto out;
+ return brightness;
}
/* disable sleep */
@@ -238,11 +238,8 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
rval = lm3630a_read(pchip, REG_BRT_A);
if (rval < 0)
goto out_i2c_err;
- brightness = rval;
+ return rval;
-out:
- bl->props.brightness = brightness;
- return bl->props.brightness;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access register\n");
return 0;
@@ -304,7 +301,7 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
if (rval < 0)
goto out_i2c_err;
brightness |= rval;
- goto out;
+ return brightness;
}
/* disable sleep */
@@ -315,11 +312,8 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
rval = lm3630a_read(pchip, REG_BRT_B);
if (rval < 0)
goto out_i2c_err;
- brightness = rval;
+ return rval;
-out:
- bl->props.brightness = brightness;
- return bl->props.brightness;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access register\n");
return 0;
@@ -337,6 +331,7 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
struct backlight_properties props;
const char *label;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
props.brightness = pdata->leda_init_brt;
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 48c04155a5f9..bb617f4673e9 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -339,6 +339,7 @@ static int lm3639_probe(struct i2c_client *client,
}
/* backlight */
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.brightness = pdata->init_brt_led;
props.max_brightness = pdata->max_brt_led;
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index ba42f3fe0c73..d9b95dbd40d3 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -191,6 +191,7 @@ static int lp8788_backlight_register(struct lp8788_bl *bl)
int init_brt;
char *name;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = MAX_BRIGHTNESS;
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index c6ad73a784e2..c139bbc881ff 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -72,7 +72,7 @@ static int lv5207lp_backlight_check_fb(struct backlight_device *backlight,
{
struct lv5207lp *lv = bl_get_data(backlight);
- return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->dev;
+ return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->device;
}
static const struct backlight_ops lv5207lp_backlight_ops = {
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index a7e5f12687b7..0396df868bc7 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2032,7 +2032,7 @@ config FB_COBALT
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
- depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -2243,7 +2243,6 @@ config FB_SSD1307
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
- select PWM
select FB_BACKLIGHT
help
This driver implements support for the Solomon SSD1307
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 3b7a7c74bf0a..09774ada36fb 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -714,16 +714,18 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
return -ENODEV;
panel = of_graph_get_remote_port_parent(endpoint);
- if (!panel)
- return -ENODEV;
+ if (!panel) {
+ err = -ENODEV;
+ goto out_endpoint_put;
+ }
err = clcdfb_of_get_backlight(panel, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
&max_bandwidth);
@@ -752,11 +754,21 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (of_property_read_u32_array(endpoint,
"arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0)
- return -ENOENT;
+ tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) {
+ err = -ENOENT;
+ goto out_panel_put;
+ }
+
+ of_node_put(panel);
+ of_node_put(endpoint);
return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
tft_r0b0g0[1], tft_r0b0g0[2]);
+out_panel_put:
+ of_node_put(panel);
+out_endpoint_put:
+ of_node_put(endpoint);
+ return err;
}
static int clcdfb_of_vram_setup(struct clcd_fb *fb)
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index f940e8b66b85..6b1ad19b8db4 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -778,7 +778,12 @@ static int arkfb_set_par(struct fb_info *info)
return -EINVAL;
}
- ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
+ value = (hdiv * info->var.pixclock) / hmul;
+ if (!value) {
+ fb_dbg(info, "invalid pixclock\n");
+ value = 1;
+ }
+ ark_set_pixclock(info, value);
svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
@@ -789,6 +794,8 @@ static int arkfb_set_par(struct fb_info *info)
value = ((value * hmul / hdiv) / 8) - 5;
vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 6dda5d885a03..bb9ecf12e763 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3410,11 +3410,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
}
info->fix.mmio_start = raddr;
+#if defined(__i386__) || defined(__ia64__)
/*
* By using strong UC we force the MTRR to never have an
* effect on the MMIO region on both non-PAT and PAT systems.
*/
par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
+#else
+ par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
+#endif
if (par->ati_regbase == NULL)
return -ENOMEM;
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 265d3b45efd0..d0335d4d5ab5 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1040,6 +1040,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
u32 pixclock;
int screen_size, plane;
+ if (!var->pixclock)
+ return -EINVAL;
+
plane = fbdev->plane;
/* Make sure that the mode respect all LCD controller and
@@ -1729,6 +1732,9 @@ static int au1200fb_drv_probe(struct platform_device *dev)
/* Now hook interrupt too */
irq = platform_get_irq(dev, 0);
+ if (irq < 0)
+ return irq;
+
ret = request_irq(irq, au1200fb_handle_irq,
IRQF_SHARED, "lcd", (void *)dev);
if (ret) {
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 80fdd3ee0565..57b1e011d2d3 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
err_release_fb:
framebuffer_release(p);
err_disable:
+ pci_disable_device(dp);
err_out:
return rc;
}
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 436365efae73..5bb2b07cbe1a 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -247,6 +247,9 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
+ if (!vc->vc_font.data)
+ return;
+
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 82c20c6047b0..148ef1561c3f 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -78,11 +78,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
return 0;
inode_lock(inode);
- /* Kill off the delayed work */
- cancel_delayed_work_sync(&info->deferred_work);
-
- /* Run it immediately */
- schedule_delayed_work(&info->deferred_work, 0);
+ flush_delayed_work(&info->deferred_work);
inode_unlock(inode);
return 0;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 8721b7513136..e48792828939 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -123,8 +123,8 @@ static int logo_lines;
enums. */
static int logo_shown = FBCON_LOGO_CANSHOW;
/* console mappings */
-static int first_fb_vc;
-static int last_fb_vc = MAX_NR_CONSOLES - 1;
+static unsigned int first_fb_vc;
+static unsigned int last_fb_vc = MAX_NR_CONSOLES - 1;
static int fbcon_is_default = 1;
static int primary_device = -1;
static int fbcon_has_console_bind;
@@ -474,10 +474,12 @@ static int __init fb_console_setup(char *this_opt)
options += 3;
if (*options)
first_fb_vc = simple_strtoul(options, &options, 10) - 1;
- if (first_fb_vc < 0)
+ if (first_fb_vc >= MAX_NR_CONSOLES)
first_fb_vc = 0;
if (*options++ == '-')
last_fb_vc = simple_strtoul(options, &options, 10) - 1;
+ if (last_fb_vc < first_fb_vc || last_fb_vc >= MAX_NR_CONSOLES)
+ last_fb_vc = MAX_NR_CONSOLES - 1;
fbcon_is_default = 0;
continue;
}
@@ -602,7 +604,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (scr_readw(r) != vc->vc_video_erase_char)
break;
if (r != q && new_rows >= rows + logo_lines) {
- save = kmalloc(array3_size(logo_lines, new_cols, 2),
+ save = kzalloc(array3_size(logo_lines, new_cols, 2),
GFP_KERNEL);
if (save) {
int i = cols < new_cols ? cols : new_cols;
@@ -2495,9 +2497,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
return -EINVAL;
+ if (font->width > 32 || font->height > 32)
+ return -EINVAL;
+
/* Make sure drawing engine can handle the font */
- if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
- !(info->pixmap.blit_y & (1 << (font->height - 1))))
+ if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
+ !(info->pixmap.blit_y & BIT(font->height - 1)))
return -EINVAL;
/* Make sure driver can handle the font length */
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 6473e0dfe146..e78ec7f72846 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -257,6 +257,11 @@ static const struct fb_videomode modedb[] = {
{ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0,
FB_VMODE_DOUBLE },
+ /* 1920x1080 @ 60 Hz, 67.3 kHz hsync */
+ { NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
{ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
index a4d05b1b17d7..665ef7a0a249 100644
--- a/drivers/video/fbdev/core/sysimgblt.c
+++ b/drivers/video/fbdev/core/sysimgblt.c
@@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
{
u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
- u32 bit_mask, end_mask, eorx, shift;
- const char *s = image->data, *src;
+ u32 bit_mask, eorx, shift;
+ const u8 *s = image->data, *src;
u32 *dst;
- const u32 *tab = NULL;
+ const u32 *tab;
+ size_t tablen;
+ u32 colortab[16];
int i, j, k;
switch (bpp) {
case 8:
tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
+ tablen = 16;
break;
case 16:
tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
+ tablen = 4;
break;
case 32:
- default:
tab = cfb_tab32;
+ tablen = 2;
break;
+ default:
+ return;
}
for (i = ppw-1; i--; ) {
@@ -218,20 +224,62 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
eorx = fgx ^ bgx;
k = image->width/ppw;
+ for (i = 0; i < tablen; ++i)
+ colortab[i] = (tab[i] & eorx) ^ bgx;
+
for (i = image->height; i--; ) {
dst = dst1;
shift = 8;
src = s;
- for (j = k; j--; ) {
+ /*
+ * Manually unroll the per-line copying loop for better
+ * performance. This works until we processed the last
+ * completely filled source byte (inclusive).
+ */
+ switch (ppw) {
+ case 4: /* 8 bpp */
+ for (j = k; j >= 2; j -= 2, ++src) {
+ *dst++ = colortab[(*src >> 4) & bit_mask];
+ *dst++ = colortab[(*src >> 0) & bit_mask];
+ }
+ break;
+ case 2: /* 16 bpp */
+ for (j = k; j >= 4; j -= 4, ++src) {
+ *dst++ = colortab[(*src >> 6) & bit_mask];
+ *dst++ = colortab[(*src >> 4) & bit_mask];
+ *dst++ = colortab[(*src >> 2) & bit_mask];
+ *dst++ = colortab[(*src >> 0) & bit_mask];
+ }
+ break;
+ case 1: /* 32 bpp */
+ for (j = k; j >= 8; j -= 8, ++src) {
+ *dst++ = colortab[(*src >> 7) & bit_mask];
+ *dst++ = colortab[(*src >> 6) & bit_mask];
+ *dst++ = colortab[(*src >> 5) & bit_mask];
+ *dst++ = colortab[(*src >> 4) & bit_mask];
+ *dst++ = colortab[(*src >> 3) & bit_mask];
+ *dst++ = colortab[(*src >> 2) & bit_mask];
+ *dst++ = colortab[(*src >> 1) & bit_mask];
+ *dst++ = colortab[(*src >> 0) & bit_mask];
+ }
+ break;
+ }
+
+ /*
+ * For image widths that are not a multiple of 8, there
+ * are trailing pixels left on the current line. Print
+ * them as well.
+ */
+ for (; j--; ) {
shift -= ppw;
- end_mask = tab[(*src >> shift) & bit_mask];
- *dst++ = (end_mask & eorx) ^ bgx;
+ *dst++ = colortab[(*src >> shift) & bit_mask];
if (!shift) {
shift = 8;
- src++;
+ ++src;
}
}
+
dst1 += p->fix.line_length;
s += spitch;
}
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index d04a047094fc..546a5db5241b 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -474,7 +474,6 @@ static int ep93xxfb_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- info->dev = &pdev->dev;
platform_set_drvdata(pdev, info);
fbi = info->par;
fbi->mach_info = mach_info;
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index d19f58263b4e..d4c2a6b3839e 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
* Workaround for failed writing desc register of planes.
* Needed with MPC5121 DIU rev 2.0 silicon.
*/
-void wr_reg_wa(u32 *reg, u32 val)
+static void wr_reg_wa(u32 *reg, u32 val)
{
do {
out_be32(reg, val);
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index b0f07d676eb3..ffda25089e2c 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -234,6 +234,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ if (!var->pixclock)
+ return -EINVAL;
+
if (var->xres > 1920 || var->yres > 1440)
return -EINVAL;
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 347cf8babc3e..1434eb0220e7 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -400,7 +400,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
u32 xres, right, hslen, left, xtotal;
u32 yres, lower, vslen, upper, ytotal;
u32 vxres, xoffset, vyres, yoffset;
- u32 bpp, base, dacspeed24, mem;
+ u32 bpp, base, dacspeed24, mem, freq;
u8 r7;
int i;
@@ -643,7 +643,12 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
par->atc[VGA_ATC_OVERSCAN] = 0;
/* Calculate VCLK that most closely matches the requested dot clock */
- i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par);
+ freq = (((u32)1e9) / var->pixclock) * (u32)(1e3);
+ if (freq < I740_RFREQ_FIX) {
+ fb_dbg(info, "invalid pixclock\n");
+ freq = I740_RFREQ_FIX;
+ }
+ i740_calc_vclk(freq, par);
/* Since we program the clocks ourselves, always use VCLK2. */
par->misc |= 0x0C;
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 5f610d4929dd..3155424cca6b 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1346,7 +1346,7 @@ static struct fb_ops imsttfb_ops = {
.fb_ioctl = imsttfb_ioctl,
};
-static void init_imstt(struct fb_info *info)
+static int init_imstt(struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 i, tmp, *ip, *end;
@@ -1419,7 +1419,7 @@ static void init_imstt(struct fb_info *info)
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
framebuffer_release(info);
- return;
+ return -ENODEV;
}
sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP");
@@ -1455,12 +1455,13 @@ static void init_imstt(struct fb_info *info)
if (register_framebuffer(info) < 0) {
framebuffer_release(info);
- return;
+ return -ENODEV;
}
tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n",
info->fix.id, info->fix.smem_len >> 20, tmp);
+ return 0;
}
static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1469,6 +1470,7 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct imstt_par *par;
struct fb_info *info;
struct device_node *dp;
+ int ret = -ENOMEM;
dp = pci_device_to_OF_node(pdev);
if(dp)
@@ -1487,8 +1489,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!request_mem_region(addr, size, "imsttfb")) {
printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
- framebuffer_release(info);
- return -ENODEV;
+ ret = -ENODEV;
+ goto release_info;
}
switch (pdev->device) {
@@ -1504,23 +1506,42 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
"contact maintainer.\n", pdev->device);
- release_mem_region(addr, size);
- framebuffer_release(info);
- return -ENODEV;
+ ret = -ENODEV;
+ goto release_mem_region;
}
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
+ if (!info->screen_base)
+ goto release_mem_region;
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ if (!par->dc_regs)
+ goto unmap_screen_base;
par->cmap_regs_phys = addr + 0x840000;
par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ if (!par->cmap_regs)
+ goto unmap_dc_regs;
info->pseudo_palette = par->palette;
- init_imstt(info);
+ ret = init_imstt(info);
+ if (ret)
+ goto unmap_cmap_regs;
pci_set_drvdata(pdev, info);
return 0;
+
+unmap_cmap_regs:
+ iounmap(par->cmap_regs);
+unmap_dc_regs:
+ iounmap(par->dc_regs);
+unmap_screen_base:
+ iounmap(info->screen_base);
+release_mem_region:
+ release_mem_region(addr, size);
+release_info:
+ framebuffer_release(info);
+ return ret;
}
static void imsttfb_remove(struct pci_dev *pdev)
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index ffde3107104b..dbc8808b093a 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -601,10 +601,10 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
if (var->hsync_len < 1 || var->hsync_len > 64)
printk(KERN_ERR "%s: invalid hsync_len %d\n",
info->fix.id, var->hsync_len);
- if (var->left_margin > 255)
+ if (var->left_margin < 3 || var->left_margin > 255)
printk(KERN_ERR "%s: invalid left_margin %d\n",
info->fix.id, var->left_margin);
- if (var->right_margin > 255)
+ if (var->right_margin < 1 || var->right_margin > 255)
printk(KERN_ERR "%s: invalid right_margin %d\n",
info->fix.id, var->right_margin);
if (var->yres < 1 || var->yres > ymax_mask)
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index a76c61512c60..274e6bb4a961 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -1214,6 +1214,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var,
dinfo = GET_DINFO(info);
+ if (!var->pixclock)
+ return -EINVAL;
+
/* update the pitch */
if (intelfbhw_validate_mode(dinfo, var) != 0)
return -EINVAL;
diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c
index eda893b7a2e9..9a98c4a6ba33 100644
--- a/drivers/video/fbdev/matrox/matroxfb_maven.c
+++ b/drivers/video/fbdev/matrox/matroxfb_maven.c
@@ -300,7 +300,7 @@ static int matroxfb_mavenclock(const struct matrox_pll_ctl *ctl,
unsigned int* in, unsigned int* feed, unsigned int* post,
unsigned int* htotal2) {
unsigned int fvco;
- unsigned int uninitialized_var(p);
+ unsigned int p;
fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2);
if (!fvco)
@@ -732,8 +732,8 @@ static int maven_find_exact_clocks(unsigned int ht, unsigned int vt,
for (x = 0; x < 8; x++) {
unsigned int c;
- unsigned int uninitialized_var(a), uninitialized_var(b),
- uninitialized_var(h2);
+ unsigned int a, b,
+ h2;
unsigned int h = ht + 2 + x;
if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) {
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 17174cd7a5bb..b02b0bc10613 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -510,7 +510,9 @@ static int mmphw_probe(struct platform_device *pdev)
ret = -ENOENT;
goto failed;
}
- clk_prepare_enable(ctrl->clk);
+ ret = clk_prepare_enable(ctrl->clk);
+ if (ret)
+ goto failed;
/* init global regs */
ctrl_set_default(ctrl);
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index fbeeed5afe35..aa502b3ba25a 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -766,6 +766,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
int pitch, err = 0;
NVTRACE_ENTER();
+ if (!var->pixclock)
+ return -EINVAL;
var->transp.offset = 0;
var->transp.length = 0;
diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
index a75ae0c9b14c..d1cd8785d011 100644
--- a/drivers/video/fbdev/omap/lcd_mipid.c
+++ b/drivers/video/fbdev/omap/lcd_mipid.c
@@ -563,11 +563,15 @@ static int mipid_spi_probe(struct spi_device *spi)
r = mipid_detect(md);
if (r < 0)
- return r;
+ goto free_md;
omapfb_register_panel(&md->panel);
return 0;
+
+free_md:
+ kfree(md);
+ return r;
}
static int mipid_spi_remove(struct spi_device *spi)
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 1dcf02e12af4..0ec4be2f2e8c 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL;
}
+ if (!var->pixclock) {
+ DPRINTK("pixclock is zero\n");
+ return -EINVAL;
+ }
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock));
@@ -1524,8 +1529,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev);
- if (!info)
- return -ENOMEM;
+ if (!info) {
+ err = -ENOMEM;
+ goto err_exit_disable;
+ }
default_par = info->par;
switch (pdev->device) {
@@ -1706,6 +1713,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
err_exit_neither:
framebuffer_release(info);
+ err_exit_disable:
+ pci_disable_device(pdev);
return retval;
}
@@ -1732,6 +1741,7 @@ static void pm2fb_remove(struct pci_dev *pdev)
fb_dealloc_cmap(&info->cmap);
kfree(info->pixmap.addr);
framebuffer_release(info);
+ pci_disable_device(pdev);
}
static const struct pci_device_id pm2fb_id_table[] = {
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 6130aa56a1e9..7bd45334dcac 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -821,9 +821,9 @@ static void pm3fb_write_mode(struct fb_info *info)
wmb();
{
- unsigned char uninitialized_var(m); /* ClkPreScale */
- unsigned char uninitialized_var(n); /* ClkFeedBackScale */
- unsigned char uninitialized_var(p); /* ClkPostScale */
+ unsigned char m; /* ClkPreScale */
+ unsigned char n; /* ClkFeedBackScale */
+ unsigned char p; /* ClkPostScale */
unsigned long pixclock = PICOS2KHZ(info->var.pixclock);
(void)pm3fb_calculate_clock(pixclock, &m, &n, &p);
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 7c4694d70dac..15162b37f302 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -382,7 +382,7 @@ pxa3xx_gcu_write(struct file *file, const char *buff,
struct pxa3xx_gcu_batch *buffer;
struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
- int words = count / 4;
+ size_t words = count / 4;
/* Does not need to be atomic. There's a lock in user space,
* but anyhow, this is just for statistics. */
diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c
index 0601c13f2105..f90b9327bae7 100644
--- a/drivers/video/fbdev/riva/riva_hw.c
+++ b/drivers/video/fbdev/riva/riva_hw.c
@@ -1245,8 +1245,7 @@ int CalcStateExt
)
{
int pixelDepth;
- int uninitialized_var(VClk),uninitialized_var(m),
- uninitialized_var(n), uninitialized_var(p);
+ int VClk, m, n, p;
/*
* Save mode parameters.
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index be16c349c10f..228e5ee7a547 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -902,6 +902,8 @@ static int s3fb_set_par(struct fb_info *info)
value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index d5d22d9c0f56..368500c40140 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo *var,
DBG("savagefb_check_var");
+ if (!var->pixclock)
+ return -EINVAL;
+
var->transp.offset = 0;
var->transp.length = 0;
switch (var->bits_per_pixel) {
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index fde27feae5d0..d6b2ce95a859 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -355,12 +355,12 @@ SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay,
}
break;
case 400:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDwidth >= 600))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDheight >= 600))) {
if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth];
}
break;
case 512:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDwidth >= 768))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDheight >= 768))) {
if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth];
}
break;
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index b443a8ed4600..2fdd02e51f5f 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1474,6 +1474,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
+ if (!var->pixclock)
+ return -EINVAL;
pixclock = var->pixclock;
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index ad60f739c6a1..53fb34f074c7 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -97,7 +97,6 @@ struct ufx_data {
struct kref kref;
int fb_count;
bool virtualized; /* true when physical usb device not present */
- struct delayed_work free_framebuffer_work;
atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
u8 *edid; /* null until we read edid from hw or get from sysfs */
@@ -137,6 +136,8 @@ static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len);
static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size);
static void ufx_free_urb_list(struct ufx_data *dev);
+static DEFINE_MUTEX(disconnect_mutex);
+
/* reads a control register */
static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data)
{
@@ -1070,9 +1071,13 @@ static int ufx_ops_open(struct fb_info *info, int user)
if (user == 0 && !console)
return -EBUSY;
+ mutex_lock(&disconnect_mutex);
+
/* If the USB device is gone, we don't accept new opens */
- if (dev->virtualized)
+ if (dev->virtualized) {
+ mutex_unlock(&disconnect_mutex);
return -ENODEV;
+ }
dev->fb_count++;
@@ -1096,6 +1101,8 @@ static int ufx_ops_open(struct fb_info *info, int user)
pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d",
info->node, user, info, dev->fb_count);
+ mutex_unlock(&disconnect_mutex);
+
return 0;
}
@@ -1108,15 +1115,24 @@ static void ufx_free(struct kref *kref)
{
struct ufx_data *dev = container_of(kref, struct ufx_data, kref);
- /* this function will wait for all in-flight urbs to complete */
- if (dev->urbs.count > 0)
- ufx_free_urb_list(dev);
+ kfree(dev);
+}
- pr_debug("freeing ufx_data %p", dev);
+static void ufx_ops_destory(struct fb_info *info)
+{
+ struct ufx_data *dev = info->par;
+ int node = info->node;
- kfree(dev);
+ /* Assume info structure is freed after this point */
+ framebuffer_release(info);
+
+ pr_debug("fb_info for /dev/fb%d has been freed", node);
+
+ /* release reference taken by kref_init in probe() */
+ kref_put(&dev->kref, ufx_free);
}
+
static void ufx_release_urb_work(struct work_struct *work)
{
struct urb_node *unode = container_of(work, struct urb_node,
@@ -1125,14 +1141,9 @@ static void ufx_release_urb_work(struct work_struct *work)
up(&unode->dev->urbs.limit_sem);
}
-static void ufx_free_framebuffer_work(struct work_struct *work)
+static void ufx_free_framebuffer(struct ufx_data *dev)
{
- struct ufx_data *dev = container_of(work, struct ufx_data,
- free_framebuffer_work.work);
struct fb_info *info = dev->info;
- int node = info->node;
-
- unregister_framebuffer(info);
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
@@ -1144,11 +1155,6 @@ static void ufx_free_framebuffer_work(struct work_struct *work)
dev->info = NULL;
- /* Assume info structure is freed after this point */
- framebuffer_release(info);
-
- pr_debug("fb_info for /dev/fb%d has been freed", node);
-
/* ref taken in probe() as part of registering framebfufer */
kref_put(&dev->kref, ufx_free);
}
@@ -1160,11 +1166,13 @@ static int ufx_ops_release(struct fb_info *info, int user)
{
struct ufx_data *dev = info->par;
+ mutex_lock(&disconnect_mutex);
+
dev->fb_count--;
/* We can't free fb_info here - fbmem will touch it when we return */
if (dev->virtualized && (dev->fb_count == 0))
- schedule_delayed_work(&dev->free_framebuffer_work, HZ);
+ ufx_free_framebuffer(dev);
if ((dev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
@@ -1178,6 +1186,8 @@ static int ufx_ops_release(struct fb_info *info, int user)
kref_put(&dev->kref, ufx_free);
+ mutex_unlock(&disconnect_mutex);
+
return 0;
}
@@ -1284,6 +1294,7 @@ static struct fb_ops ufx_ops = {
.fb_blank = ufx_ops_blank,
.fb_check_var = ufx_ops_check_var,
.fb_set_par = ufx_ops_set_par,
+ .fb_destroy = ufx_ops_destory,
};
/* Assumes &info->lock held by caller
@@ -1611,7 +1622,7 @@ static int ufx_usb_probe(struct usb_interface *interface,
struct usb_device *usbdev;
struct ufx_data *dev;
struct fb_info *info;
- int retval;
+ int retval = -ENOMEM;
u32 id_rev, fpga_rev;
/* usb initialization */
@@ -1643,15 +1654,17 @@ static int ufx_usb_probe(struct usb_interface *interface,
if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
dev_err(dev->gdev, "ufx_alloc_urb_list failed\n");
- goto e_nomem;
+ goto put_ref;
}
/* We don't register a new USB class. Our client interface is fbdev */
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &usbdev->dev);
- if (!info)
- goto e_nomem;
+ if (!info) {
+ dev_err(dev->gdev, "framebuffer_alloc failed\n");
+ goto free_urb_list;
+ }
dev->info = info;
info->par = dev;
@@ -1665,9 +1678,6 @@ static int ufx_usb_probe(struct usb_interface *interface,
goto destroy_modedb;
}
- INIT_DELAYED_WORK(&dev->free_framebuffer_work,
- ufx_free_framebuffer_work);
-
retval = ufx_reg_read(dev, 0x3000, &id_rev);
check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
@@ -1697,22 +1707,34 @@ static int ufx_usb_probe(struct usb_interface *interface,
check_warn_goto_error(retval, "unable to find common mode for display and adapter");
retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001);
- check_warn_goto_error(retval, "error %d enabling graphics engine", retval);
+ if (retval < 0) {
+ dev_err(dev->gdev, "error %d enabling graphics engine", retval);
+ goto setup_modes;
+ }
/* ready to begin using device */
atomic_set(&dev->usb_active, 1);
dev_dbg(dev->gdev, "checking var");
retval = ufx_ops_check_var(&info->var, info);
- check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval);
+ if (retval < 0) {
+ dev_err(dev->gdev, "error %d ufx_ops_check_var", retval);
+ goto reset_active;
+ }
dev_dbg(dev->gdev, "setting par");
retval = ufx_ops_set_par(info);
- check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval);
+ if (retval < 0) {
+ dev_err(dev->gdev, "error %d ufx_ops_set_par", retval);
+ goto reset_active;
+ }
dev_dbg(dev->gdev, "registering framebuffer");
retval = register_framebuffer(info);
- check_warn_goto_error(retval, "error %d register_framebuffer", retval);
+ if (retval < 0) {
+ dev_err(dev->gdev, "error %d register_framebuffer", retval);
+ goto reset_active;
+ }
dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution."
" Using %dK framebuffer memory\n", info->node,
@@ -1720,28 +1742,34 @@ static int ufx_usb_probe(struct usb_interface *interface,
return 0;
-error:
- fb_dealloc_cmap(&info->cmap);
-destroy_modedb:
+reset_active:
+ atomic_set(&dev->usb_active, 0);
+setup_modes:
fb_destroy_modedb(info->monspecs.modedb);
vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
+error:
+ fb_dealloc_cmap(&info->cmap);
+destroy_modedb:
framebuffer_release(info);
+free_urb_list:
+ if (dev->urbs.count > 0)
+ ufx_free_urb_list(dev);
put_ref:
kref_put(&dev->kref, ufx_free); /* ref for framebuffer */
kref_put(&dev->kref, ufx_free); /* last ref from kref_init */
return retval;
-
-e_nomem:
- retval = -ENOMEM;
- goto put_ref;
}
static void ufx_usb_disconnect(struct usb_interface *interface)
{
struct ufx_data *dev;
+ struct fb_info *info;
+
+ mutex_lock(&disconnect_mutex);
dev = usb_get_intfdata(interface);
+ info = dev->info;
pr_debug("USB disconnect starting\n");
@@ -1755,12 +1783,17 @@ static void ufx_usb_disconnect(struct usb_interface *interface)
/* if clients still have us open, will be freed on last close */
if (dev->fb_count == 0)
- schedule_delayed_work(&dev->free_framebuffer_work, 0);
+ ufx_free_framebuffer(dev);
- /* release reference taken by kref_init in probe() */
- kref_put(&dev->kref, ufx_free);
+ /* this function will wait for all in-flight urbs to complete */
+ if (dev->urbs.count > 0)
+ ufx_free_urb_list(dev);
+
+ pr_debug("freeing ufx_data %p", dev);
+
+ unregister_framebuffer(info);
- /* consider ufx_data freed */
+ mutex_unlock(&disconnect_mutex);
}
static struct usb_driver ufx_driver = {
diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
index fb8f58f9867a..0416e2bc27d8 100644
--- a/drivers/video/fbdev/sticore.h
+++ b/drivers/video/fbdev/sticore.h
@@ -237,7 +237,7 @@ struct sti_rom_font {
u8 height;
u8 font_type; /* language type */
u8 bytes_per_char;
- u32 next_font;
+ s32 next_font; /* note: signed int */
u8 underline_height;
u8 underline_pos;
u8 res008[2];
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 9e88e3f594c2..9c2be0802651 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -922,6 +922,28 @@ SETUP_HCRX(struct stifb_info *fb)
/* ------------------- driver specific functions --------------------------- */
static int
+stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ if (var->xres != fb->info.var.xres ||
+ var->yres != fb->info.var.yres ||
+ var->bits_per_pixel != fb->info.var.bits_per_pixel)
+ return -EINVAL;
+
+ var->xres_virtual = var->xres;
+ var->yres_virtual = var->yres;
+ var->xoffset = 0;
+ var->yoffset = 0;
+ var->grayscale = fb->info.var.grayscale;
+ var->red.length = fb->info.var.red.length;
+ var->green.length = fb->info.var.green.length;
+ var->blue.length = fb->info.var.blue.length;
+
+ return 0;
+}
+
+static int
stifb_setcolreg(u_int regno, u_int red, u_int green,
u_int blue, u_int transp, struct fb_info *info)
{
@@ -1103,6 +1125,7 @@ stifb_init_display(struct stifb_info *fb)
static struct fb_ops stifb_ops = {
.owner = THIS_MODULE,
+ .fb_check_var = stifb_check_var,
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
.fb_fillrect = cfb_fillrect,
@@ -1122,6 +1145,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
struct stifb_info *fb;
struct fb_info *info;
unsigned long sti_rom_address;
+ char modestr[32];
char *dev_name;
int bpp, xres, yres;
@@ -1257,7 +1281,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
/* limit fbsize to max visible screen size */
if (fix->smem_len > yres*fix->line_length)
- fix->smem_len = yres*fix->line_length;
+ fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024);
fix->accel = FB_ACCEL_NONE;
@@ -1300,6 +1324,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
info->pseudo_palette = &fb->pseudo_palette;
+ scnprintf(modestr, sizeof(modestr), "%dx%d-%d", xres, yres, bpp);
+ fb_find_mode(&info->var, info, modestr, NULL, 0, NULL, bpp);
+
/* This has to be done !!! */
if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0))
goto out_err1;
@@ -1344,6 +1371,7 @@ out_err1:
iounmap(info->screen_base);
out_err0:
kfree(fb);
+ sti->info = NULL;
return -ENXIO;
}
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 286b2371c7dd..eab2b4f87d68 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -166,6 +166,9 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct tga_par *par = (struct tga_par *)info->par;
+ if (!var->pixclock)
+ return -EINVAL;
+
if (par->tga_type == TGA_TYPE_8PLANE) {
if (var->bits_per_pixel != 8)
return -EINVAL;
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 8e8af408998e..24e82fca19ad 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -27,6 +27,8 @@
#include <video/udlfb.h>
#include "edid.h"
+#define OUT_EP_NUM 1 /* The endpoint number we will use */
+
static const struct fb_fix_screeninfo dlfb_fix = {
.id = "udlfb",
.type = FB_TYPE_PACKED_PIXELS,
@@ -1652,7 +1654,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
struct fb_info *info;
int retval;
struct usb_device *usbdev = interface_to_usbdev(intf);
- struct usb_endpoint_descriptor *out;
+ static u8 out_ep[] = {OUT_EP_NUM + USB_DIR_OUT, 0};
/* usb initialization */
dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
@@ -1666,9 +1668,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
dlfb->udev = usb_get_dev(usbdev);
usb_set_intfdata(intf, dlfb);
- retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
- if (retval) {
- dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
+ if (!usb_check_bulk_endpoints(intf, out_ep)) {
+ dev_err(&intf->dev, "Invalid DisplayLink device!\n");
+ retval = -EINVAL;
goto error;
}
@@ -1927,7 +1929,8 @@ retry:
}
/* urb->transfer_buffer_length set to actual before submit */
- usb_fill_bulk_urb(urb, dlfb->udev, usb_sndbulkpipe(dlfb->udev, 1),
+ usb_fill_bulk_urb(urb, dlfb->udev,
+ usb_sndbulkpipe(dlfb->udev, OUT_EP_NUM),
buf, size, dlfb_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 27c92315e01c..8807ea7ad2d1 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1771,6 +1771,7 @@ static int uvesafb_probe(struct platform_device *dev)
out_unmap:
iounmap(info->screen_base);
out_mem:
+ arch_phys_wc_del(par->mtrr_handle);
release_mem_region(info->fix.smem_start, info->fix.smem_len);
out_reg:
release_region(0x3c0, 32);
@@ -1949,10 +1950,10 @@ static void uvesafb_exit(void)
}
}
- cn_del_callback(&uvesafb_cn_id);
driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
platform_device_unregister(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
+ cn_del_callback(&uvesafb_cn_id);
}
module_exit(uvesafb_exit);
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 498038a964ee..ea6671723606 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -277,8 +277,10 @@ static int vmlfb_get_gpu(struct vml_par *par)
mutex_unlock(&vml_mutex);
- if (pci_enable_device(par->gpu) < 0)
+ if (pci_enable_device(par->gpu) < 0) {
+ pci_dev_put(par->gpu);
return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index ffa2ca2d3f5e..ce366b80bda4 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -732,7 +732,14 @@ static int __init via_core_init(void)
return ret;
viafb_i2c_init();
viafb_gpio_init();
- return pci_register_driver(&via_driver);
+ ret = pci_register_driver(&via_driver);
+ if (ret) {
+ viafb_gpio_exit();
+ viafb_i2c_exit();
+ return ret;
+ }
+
+ return 0;
}
static void __exit via_core_exit(void)
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index c339a8fbad81..61e2028924a6 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -504,6 +504,8 @@ static int vt8623fb_set_par(struct fb_info *info)
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
1, info->node);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 32c2c52f7e84..484c2f09f2ea 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -361,8 +361,8 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
goto err_vbg_core_exit;
}
- ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
- DEVICE_NAME, gdev);
+ ret = request_irq(pci->irq, vbg_core_isr, IRQF_SHARED, DEVICE_NAME,
+ gdev);
if (ret) {
vbg_err("vboxguest: Error requesting irq: %d\n", ret);
goto err_vbg_core_exit;
@@ -372,7 +372,7 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (ret) {
vbg_err("vboxguest: Error misc_register %s failed: %d\n",
DEVICE_NAME, ret);
- goto err_vbg_core_exit;
+ goto err_free_irq;
}
ret = misc_register(&gdev->misc_device_user);
@@ -408,6 +408,8 @@ err_unregister_misc_device_user:
misc_deregister(&gdev->misc_device_user);
err_unregister_misc_device:
misc_deregister(&gdev->misc_device);
+err_free_irq:
+ free_irq(pci->irq, gdev);
err_vbg_core_exit:
vbg_core_exit(gdev);
err_disable_pcidev:
@@ -424,6 +426,7 @@ static void vbg_pci_remove(struct pci_dev *pci)
vbg_gdev = NULL;
mutex_unlock(&vbg_gdev_mutex);
+ free_irq(pci->irq, gdev);
device_remove_file(gdev->dev, &dev_attr_host_features);
device_remove_file(gdev->dev, &dev_attr_host_version);
misc_deregister(&gdev->misc_device_user);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1e444826a66e..4c44bbe1ae7c 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -365,7 +365,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
num_pages = le32_to_cpu((__force __le32)num_pages);
- target = num_pages;
+ /*
+ * Aligned up to guest page size to avoid inflating and deflating
+ * balloon endlessly.
+ */
+ target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
return target - vb->num_pages;
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index e781e5e9215f..db1015db07b5 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -542,11 +542,9 @@ static void virtio_mmio_release_dev(struct device *_d)
{
struct virtio_device *vdev =
container_of(_d, struct virtio_device, dev);
- struct virtio_mmio_device *vm_dev =
- container_of(vdev, struct virtio_mmio_device, vdev);
- struct platform_device *pdev = vm_dev->pdev;
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- devm_kfree(&pdev->dev, vm_dev);
+ kfree(vm_dev);
}
/* Platform device */
@@ -554,19 +552,10 @@ static void virtio_mmio_release_dev(struct device *_d)
static int virtio_mmio_probe(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev;
- struct resource *mem;
unsigned long magic;
int rc;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -EINVAL;
-
- if (!devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem), pdev->name))
- return -EBUSY;
-
- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
+ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
if (!vm_dev)
return -ENOMEM;
@@ -577,15 +566,18 @@ static int virtio_mmio_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&vm_dev->virtqueues);
spin_lock_init(&vm_dev->lock);
- vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
- if (vm_dev->base == NULL)
- return -EFAULT;
+ vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vm_dev->base)) {
+ rc = PTR_ERR(vm_dev->base);
+ goto free_vm_dev;
+ }
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
/* Check device version */
@@ -593,7 +585,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
- return -ENXIO;
+ rc = -ENXIO;
+ goto free_vm_dev;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -602,7 +595,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
@@ -632,6 +626,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
put_device(&vm_dev->vdev.dev);
return rc;
+
+free_vm_dev:
+ kfree(vm_dev);
+ return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index deb72fd7ec50..e3c78c9f8458 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -424,7 +424,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
struct vring_virtqueue *vq = to_vvq(_vq);
struct scatterlist *sg;
struct vring_desc *desc;
- unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
+ unsigned int i, n, avail, descs_used, prev, err_idx;
int head;
bool indirect;
@@ -1101,8 +1101,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
struct vring_packed_desc *desc;
struct scatterlist *sg;
unsigned int i, n, c, descs_used, err_idx;
- __le16 uninitialized_var(head_flags), flags;
- u16 head, id, uninitialized_var(prev), curr, avail_used_flags;
+ __le16 head_flags, flags;
+ u16 head, id, prev, curr, avail_used_flags;
START_USE(vq);
@@ -1180,7 +1180,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
}
}
- if (i < head)
+ if (i <= head)
vq->packed.avail_wrap_counter ^= 1;
/* We're using some buffers from the free list. */
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
index 6a1bc284f297..eae78366eb02 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/vme/bridges/vme_fake.c
@@ -1073,6 +1073,8 @@ static int __init fake_init(void)
/* We need a fake parent device */
vme_root = __root_device_register("vme", THIS_MODULE);
+ if (IS_ERR(vme_root))
+ return PTR_ERR(vme_root);
/* If we want to support more than one bridge at some point, we need to
* dynamically allocate this so we get one per device.
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 7e079d39bd76..f2da16bf1439 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1771,6 +1771,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
return 0;
err_dma:
+ list_del(&entry->list);
err_dest:
err_source:
err_align:
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index e58c7592008d..e08f40c9d54c 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1131,6 +1131,8 @@ int w1_process(void *data)
/* remainder if it woke up early */
unsigned long jremain = 0;
+ atomic_inc(&dev->refcnt);
+
for (;;) {
if (!jremain && dev->search_count) {
@@ -1158,8 +1160,10 @@ int w1_process(void *data)
*/
mutex_unlock(&dev->list_mutex);
- if (kthread_should_stop())
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
break;
+ }
/* Only sleep when the search is active. */
if (dev->search_count) {
@@ -1224,10 +1228,10 @@ err_out_exit_init:
static void __exit w1_fini(void)
{
- struct w1_master *dev;
+ struct w1_master *dev, *n;
/* Set netlink removal messages and some cleanup */
- list_for_each_entry(dev, &w1_masters, w1_master_entry)
+ list_for_each_entry_safe(dev, n, &w1_masters, w1_master_entry)
__w1_remove_master_device(dev);
w1_fini_netlink();
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index b3e1792d9c49..3a71c5eb2f83 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
dev->search_count = w1_search_count;
dev->enable_pullup = w1_enable_pullup;
- /* 1 for w1_process to decrement
- * 1 for __w1_remove_master_device to decrement
+ /* For __w1_remove_master_device to decrement
*/
- atomic_set(&dev->refcnt, 2);
+ atomic_set(&dev->refcnt, 1);
INIT_LIST_HEAD(&dev->slist);
INIT_LIST_HEAD(&dev->async_list);
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index e5dcb26d85f0..dcb3ffda3fad 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -274,6 +274,8 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!dev->reg)
+ return -ENOMEM;
/* init clock */
dev->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 292b5a1ca831..fed7be246442 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -206,10 +206,9 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
"min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
- err = request_irq(wdt->irq, wdt_interrupt,
- IRQF_SHARED | IRQF_IRQPOLL |
- IRQF_NO_SUSPEND,
- pdev->name, wdt);
+ err = devm_request_irq(dev, wdt->irq, wdt_interrupt,
+ IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND,
+ pdev->name, wdt);
if (err)
return err;
}
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index dec6ca019bea..3a8dec05b591 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -42,6 +42,7 @@
#define SECS_TO_WDOG_TICKS(x) ((x) << 16)
#define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
+#define WDOG_TICKS_TO_MSECS(x) ((x) * 1000 >> 16)
struct bcm2835_wdt {
void __iomem *base;
@@ -140,7 +141,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
.info = &bcm2835_wdt_info,
.ops = &bcm2835_wdt_ops,
.min_timeout = 1,
- .max_timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
+ .max_hw_heartbeat_ms = WDOG_TICKS_TO_MSECS(PM_WDOG_TIME_SET),
.timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
};
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index f8d4e91d0383..3b310768fbbf 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -2,7 +2,7 @@
/*
* Cadence WDT driver - Used by Xilinx Zynq
*
- * Copyright (C) 2010 - 2014 Xilinx, Inc.
+ * Copyright (C) 2010 - 2019 Xilinx, Inc.
*
*/
@@ -328,15 +328,20 @@ static int cdns_wdt_probe(struct platform_device *pdev)
/* Initialize the members of cdns_wdt structure */
cdns_wdt_device->parent = dev;
- watchdog_init_timeout(cdns_wdt_device, wdt_timeout, dev);
+ ret = watchdog_init_timeout(cdns_wdt_device, wdt_timeout, dev);
+ if (ret)
+ dev_warn(&pdev->dev, "unable to set timeout value\n");
+
watchdog_set_nowayout(cdns_wdt_device, nowayout);
watchdog_stop_on_reboot(cdns_wdt_device);
watchdog_set_drvdata(cdns_wdt_device, wdt);
wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(dev, "input clock not found\n");
- return PTR_ERR(wdt->clk);
+ ret = PTR_ERR(wdt->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found\n");
+ return ret;
}
ret = clk_prepare_enable(wdt->clk);
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index aafc8d98bf9f..370f648cb4b1 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -86,7 +86,7 @@ static int __diag288(unsigned int func, unsigned int timeout,
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (err) : "d"(__func), "d"(__timeout),
- "d"(__action), "d"(__len) : "1", "cc");
+ "d"(__action), "d"(__len) : "1", "cc", "memory");
return err;
}
@@ -272,12 +272,21 @@ static int __init diag288_init(void)
char ebc_begin[] = {
194, 197, 199, 201, 213
};
+ char *ebc_cmd;
watchdog_set_nowayout(&wdt_dev, nowayout_info);
if (MACHINE_IS_VM) {
- if (__diag288_vm(WDT_FUNC_INIT, 15,
- ebc_begin, sizeof(ebc_begin)) != 0) {
+ ebc_cmd = kmalloc(sizeof(ebc_begin), GFP_KERNEL);
+ if (!ebc_cmd) {
+ pr_err("The watchdog cannot be initialized\n");
+ return -ENOMEM;
+ }
+ memcpy(ebc_cmd, ebc_begin, sizeof(ebc_begin));
+ ret = __diag288_vm(WDT_FUNC_INIT, 15,
+ ebc_cmd, sizeof(ebc_begin));
+ kfree(ebc_cmd);
+ if (ret != 0) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 7d34bcf1c45b..53573c3ddd1a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -174,7 +174,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
"3. OA Forward Progress Log\n"
"4. iLO Event Log";
- if (ilo5 && ulReason == NMI_UNKNOWN && !mynmi)
+ if (ulReason == NMI_UNKNOWN && !mynmi)
return NMI_DONE;
if (ilo5 && !pretimeout && !mynmi)
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index e707c4797f76..134237d8b8fc 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -413,6 +413,20 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
return time_left;
}
+/* Returns true if the watchdog was running */
+static bool iTCO_wdt_set_running(struct iTCO_wdt_private *p)
+{
+ u16 val;
+
+ /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled */
+ val = inw(TCO1_CNT(p));
+ if (!(val & BIT(11))) {
+ set_bit(WDOG_HW_RUNNING, &p->wddev.status);
+ return true;
+ }
+ return false;
+}
+
/*
* Kernel Interfaces
*/
@@ -501,9 +515,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
return -ENODEV; /* Cannot reset NO_REBOOT bit */
}
- /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
- p->update_no_reboot_bit(p->no_reboot_priv, true);
-
if (turn_SMI_watchdog_clear_off >= p->iTCO_version) {
/*
* Bit 13: TCO_EN -> 0
@@ -555,8 +566,13 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&p->wddev, p);
platform_set_drvdata(pdev, p);
- /* Make sure the watchdog is not running */
- iTCO_wdt_stop(&p->wddev);
+ if (!iTCO_wdt_set_running(p)) {
+ /*
+ * If the watchdog was not running set NO_REBOOT now to
+ * prevent later reboots.
+ */
+ p->update_no_reboot_bit(p->no_reboot_priv, true);
+ }
/* Check that the heartbeat value is within it's range;
if not reset to the default */
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 2cdbd37c700c..7ee355c28628 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -181,3 +181,4 @@ module_platform_driver(mid_wdt_driver);
MODULE_AUTHOR("David Cohen <david.a.cohen@linux.intel.com>");
MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:intel_mid_wdt");
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index ed18238c5407..96a25d18ab64 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -98,14 +98,6 @@ static const struct watchdog_ops men_z069_ops = {
.set_timeout = men_z069_wdt_set_timeout,
};
-static struct watchdog_device men_z069_wdt = {
- .info = &men_z069_info,
- .ops = &men_z069_ops,
- .timeout = MEN_Z069_DEFAULT_TIMEOUT,
- .min_timeout = 1,
- .max_timeout = MEN_Z069_WDT_COUNTER_MAX / MEN_Z069_TIMER_FREQ,
-};
-
static int men_z069_probe(struct mcb_device *dev,
const struct mcb_device_id *id)
{
@@ -125,15 +117,19 @@ static int men_z069_probe(struct mcb_device *dev,
goto release_mem;
drv->mem = mem;
+ drv->wdt.info = &men_z069_info;
+ drv->wdt.ops = &men_z069_ops;
+ drv->wdt.timeout = MEN_Z069_DEFAULT_TIMEOUT;
+ drv->wdt.min_timeout = 1;
+ drv->wdt.max_timeout = MEN_Z069_WDT_COUNTER_MAX / MEN_Z069_TIMER_FREQ;
- drv->wdt = men_z069_wdt;
watchdog_init_timeout(&drv->wdt, 0, &dev->dev);
watchdog_set_nowayout(&drv->wdt, nowayout);
watchdog_set_drvdata(&drv->wdt, drv);
drv->wdt.parent = &dev->dev;
mcb_set_drvdata(dev, drv);
- return watchdog_register_device(&men_z069_wdt);
+ return watchdog_register_device(&drv->wdt);
release_mem:
mcb_release_mem(mem);
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 7fe4f7c3f7ce..7e7567acc33a 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -2,7 +2,7 @@
/*
* Watchdog Device Driver for Xilinx axi/xps_timebase_wdt
*
- * (C) Copyright 2013 - 2014 Xilinx, Inc.
+ * (C) Copyright 2013 - 2019 Xilinx, Inc.
* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
*/
@@ -18,18 +18,37 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
+#define XWT_WWDT_DEFAULT_TIMEOUT 10
+#define XWT_WWDT_MIN_TIMEOUT 1
+#define XWT_WWDT_MAX_TIMEOUT 80
+
/* Register offsets for the Wdt device */
#define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */
#define XWT_TWCSR1_OFFSET 0x4 /* Control/Status Register1 */
#define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */
+#define XWT_WWREF_OFFSET 0x1000 /* Refresh Register */
+#define XWT_WWCSR_OFFSET 0x2000 /* Control/Status Register */
+#define XWT_WWOFF_OFFSET 0x2008 /* Offset Register */
+#define XWT_WWCMP0_OFFSET 0x2010 /* Compare Value Register0 */
+#define XWT_WWCMP1_OFFSET 0x2014 /* Compare Value Register1 */
+#define XWT_WWWRST_OFFSET 0x2FD0 /* Warm Reset Register */
/* Control/Status Register Masks */
-#define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */
-#define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */
-#define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */
+#define XWT_CSR0_WRS_MASK BIT(3) /* Reset status */
+#define XWT_CSR0_WDS_MASK BIT(2) /* Timer state */
+#define XWT_CSR0_EWDT1_MASK BIT(1) /* Enable bit 1 */
/* Control/Status Register 0/1 bits */
-#define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */
+#define XWT_CSRX_EWDT2_MASK BIT(0) /* Enable bit 2 */
+
+/* Refresh Register Masks */
+#define XWT_WWREF_GWRR_MASK BIT(0) /* Refresh and start new period */
+
+/* Generic Control/Status Register Masks */
+#define XWT_WWCSR_GWEN_MASK BIT(0) /* Enable Bit */
+
+/* Warm Reset Register Masks */
+#define XWT_WWRST_GWWRR_MASK BIT(0) /* Warm Reset Register */
/* SelfTest constants */
#define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000
@@ -37,10 +56,34 @@
#define WATCHDOG_NAME "Xilinx Watchdog"
+static int wdt_timeout;
+
+module_param(wdt_timeout, int, 0644);
+MODULE_PARM_DESC(wdt_timeout,
+ "Watchdog time in seconds. (default="
+ __MODULE_STRING(XWT_WWDT_DEFAULT_TIMEOUT) ")");
+
+/**
+ * enum xwdt_ip_type - WDT IP type.
+ *
+ * @XWDT_WDT: Soft wdt ip.
+ * @XWDT_WWDT: Window wdt ip.
+ */
+enum xwdt_ip_type {
+ XWDT_WDT = 0,
+ XWDT_WWDT,
+};
+
+struct xwdt_devtype_data {
+ enum xwdt_ip_type wdttype;
+ const struct watchdog_ops *xwdt_ops;
+ const struct watchdog_info *xwdt_info;
+};
+
struct xwdt_device {
void __iomem *base;
u32 wdt_interval;
- spinlock_t spinlock;
+ spinlock_t spinlock; /* spinlock for register handling */
struct watchdog_device xilinx_wdt_wdd;
struct clk *clk;
};
@@ -50,6 +93,7 @@ static int xilinx_wdt_start(struct watchdog_device *wdd)
int ret;
u32 control_status_reg;
struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
ret = clk_enable(xdev->clk);
if (ret) {
@@ -70,6 +114,8 @@ static int xilinx_wdt_start(struct watchdog_device *wdd)
spin_unlock(&xdev->spinlock);
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Started!\n");
+
return 0;
}
@@ -77,6 +123,7 @@ static int xilinx_wdt_stop(struct watchdog_device *wdd)
{
u32 control_status_reg;
struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
spin_lock(&xdev->spinlock);
@@ -91,7 +138,7 @@ static int xilinx_wdt_stop(struct watchdog_device *wdd)
clk_disable(xdev->clk);
- pr_info("Stopped!\n");
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Stopped!\n");
return 0;
}
@@ -126,6 +173,126 @@ static const struct watchdog_ops xilinx_wdt_ops = {
.ping = xilinx_wdt_keepalive,
};
+static int xilinx_wwdt_start(struct watchdog_device *wdd)
+{
+ int ret;
+ u32 control_status_reg;
+ u64 count;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ unsigned long clock_f = clk_get_rate(xdev->clk);
+
+ /* Calculate timeout count */
+ count = wdd->timeout * clock_f;
+ ret = clk_enable(xdev->clk);
+ if (ret) {
+ dev_err(wdd->parent, "Failed to enable clock\n");
+ return ret;
+ }
+
+ spin_lock(&xdev->spinlock);
+
+ /*
+ * Timeout count is half as there are two windows
+ * first window overflow is ignored (interrupt),
+ * reset is only generated at second window overflow
+ */
+ count = count >> 1;
+
+ /* Disable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg &= ~(XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ /* Set compare and offset registers for generic watchdog timeout */
+ iowrite32((u32)count, xdev->base + XWT_WWCMP0_OFFSET);
+ iowrite32((u32)0, xdev->base + XWT_WWCMP1_OFFSET);
+ iowrite32((u32)count, xdev->base + XWT_WWOFF_OFFSET);
+
+ /* Enable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg |= (XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Started!\n");
+
+ return 0;
+}
+
+static int xilinx_wwdt_stop(struct watchdog_device *wdd)
+{
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ spin_lock(&xdev->spinlock);
+
+ /* Disable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg &= ~(XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ clk_disable(xdev->clk);
+
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Stopped!\n");
+
+ return 0;
+}
+
+static int xilinx_wwdt_keepalive(struct watchdog_device *wdd)
+{
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+
+ spin_lock(&xdev->spinlock);
+
+ iowrite32(XWT_WWREF_GWRR_MASK, xdev->base + XWT_WWREF_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ return 0;
+}
+
+static int xilinx_wwdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int new_time)
+{
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ if (new_time < XWT_WWDT_MIN_TIMEOUT ||
+ new_time > XWT_WWDT_MAX_TIMEOUT) {
+ dev_warn(xilinx_wdt_wdd->parent,
+ "timeout value must be %d<=x<=%d, using %d\n",
+ XWT_WWDT_MIN_TIMEOUT,
+ XWT_WWDT_MAX_TIMEOUT, new_time);
+ return -EINVAL;
+ }
+
+ wdd->timeout = new_time;
+
+ return xilinx_wwdt_start(wdd);
+}
+
+static const struct watchdog_info xilinx_wwdt_ident = {
+ .options = WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .firmware_version = 1,
+ .identity = "xlnx_wwdt watchdog",
+};
+
+static const struct watchdog_ops xilinx_wwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = xilinx_wwdt_start,
+ .stop = xilinx_wwdt_stop,
+ .ping = xilinx_wwdt_keepalive,
+ .set_timeout = xilinx_wwdt_set_timeout,
+};
+
static u32 xwdt_selftest(struct xwdt_device *xdev)
{
int i;
@@ -156,6 +323,29 @@ static void xwdt_clk_disable_unprepare(void *data)
clk_disable_unprepare(data);
}
+static const struct xwdt_devtype_data xwdt_wdt_data = {
+ .wdttype = XWDT_WDT,
+ .xwdt_info = &xilinx_wdt_ident,
+ .xwdt_ops = &xilinx_wdt_ops,
+};
+
+static const struct xwdt_devtype_data xwdt_wwdt_data = {
+ .wdttype = XWDT_WWDT,
+ .xwdt_info = &xilinx_wwdt_ident,
+ .xwdt_ops = &xilinx_wwdt_ops,
+};
+
+static const struct of_device_id xwdt_of_match[] = {
+ { .compatible = "xlnx,xps-timebase-wdt-1.00.a",
+ .data = &xwdt_wdt_data },
+ { .compatible = "xlnx,xps-timebase-wdt-1.01.a",
+ .data = &xwdt_wdt_data },
+ { .compatible = "xlnx,versal-wwdt-1.0",
+ .data = &xwdt_wwdt_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xwdt_of_match);
+
static int xwdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -163,32 +353,49 @@ static int xwdt_probe(struct platform_device *pdev)
u32 pfreq = 0, enable_once = 0;
struct xwdt_device *xdev;
struct watchdog_device *xilinx_wdt_wdd;
+ const struct of_device_id *of_id;
+ enum xwdt_ip_type wdttype;
+ const struct xwdt_devtype_data *devtype;
xdev = devm_kzalloc(dev, sizeof(*xdev), GFP_KERNEL);
if (!xdev)
return -ENOMEM;
xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
- xilinx_wdt_wdd->info = &xilinx_wdt_ident;
- xilinx_wdt_wdd->ops = &xilinx_wdt_ops;
+
+ of_id = of_match_device(xwdt_of_match, &pdev->dev);
+ if (!of_id)
+ return -EINVAL;
+
+ devtype = of_id->data;
+
+ wdttype = devtype->wdttype;
+
+ xilinx_wdt_wdd->info = devtype->xwdt_info;
+ xilinx_wdt_wdd->ops = devtype->xwdt_ops;
xilinx_wdt_wdd->parent = dev;
xdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->base))
return PTR_ERR(xdev->base);
- rc = of_property_read_u32(dev->of_node, "xlnx,wdt-interval",
- &xdev->wdt_interval);
- if (rc)
- dev_warn(dev, "Parameter \"xlnx,wdt-interval\" not found\n");
+ if (wdttype == XWDT_WDT) {
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,wdt-interval",
+ &xdev->wdt_interval);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Parameter \"xlnx,wdt-interval\" not found\n");
- rc = of_property_read_u32(dev->of_node, "xlnx,wdt-enable-once",
- &enable_once);
- if (rc)
- dev_warn(dev,
- "Parameter \"xlnx,wdt-enable-once\" not found\n");
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,wdt-enable-once",
+ &enable_once);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Parameter \"xlnx,wdt-enable-once\" not found\n");
- watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
+ watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
+ }
xdev->clk = devm_clk_get(dev, NULL);
if (IS_ERR(xdev->clk)) {
@@ -210,13 +417,26 @@ static int xwdt_probe(struct platform_device *pdev)
pfreq = clk_get_rate(xdev->clk);
}
- /*
- * Twice of the 2^wdt_interval / freq because the first wdt overflow is
- * ignored (interrupt), reset is only generated at second wdt overflow
- */
- if (pfreq && xdev->wdt_interval)
- xilinx_wdt_wdd->timeout = 2 * ((1 << xdev->wdt_interval) /
- pfreq);
+ if (wdttype == XWDT_WDT) {
+ /*
+ * Twice of the 2^wdt_interval / freq because
+ * the first wdt overflow is ignored (interrupt),
+ * reset is only generated at second wdt overflow
+ */
+ if (pfreq && xdev->wdt_interval)
+ xilinx_wdt_wdd->timeout =
+ 2 * ((1 << xdev->wdt_interval) /
+ pfreq);
+ } else {
+ xilinx_wdt_wdd->timeout = XWT_WWDT_DEFAULT_TIMEOUT;
+ xilinx_wdt_wdd->min_timeout = XWT_WWDT_MIN_TIMEOUT;
+ xilinx_wdt_wdd->max_timeout = XWT_WWDT_MAX_TIMEOUT;
+
+ rc = watchdog_init_timeout(xilinx_wdt_wdd,
+ wdt_timeout, &pdev->dev);
+ if (rc)
+ dev_warn(&pdev->dev, "unable to set timeout value\n");
+ }
spin_lock_init(&xdev->spinlock);
watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
@@ -231,10 +451,12 @@ static int xwdt_probe(struct platform_device *pdev)
if (rc)
return rc;
- rc = xwdt_selftest(xdev);
- if (rc == XWT_TIMER_FAILED) {
- dev_err(dev, "SelfTest routine error\n");
- return rc;
+ if (wdttype == XWDT_WDT) {
+ rc = xwdt_selftest(xdev);
+ if (rc == XWT_TIMER_FAILED) {
+ dev_err(dev, "SelfTest routine error\n");
+ return rc;
+ }
}
rc = devm_watchdog_register_device(dev, xilinx_wdt_wdd);
@@ -260,9 +482,10 @@ static int xwdt_probe(struct platform_device *pdev)
static int __maybe_unused xwdt_suspend(struct device *dev)
{
struct xwdt_device *xdev = dev_get_drvdata(dev);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
- if (watchdog_active(&xdev->xilinx_wdt_wdd))
- xilinx_wdt_stop(&xdev->xilinx_wdt_wdd);
+ if (watchdog_active(xilinx_wdt_wdd))
+ xilinx_wdt_wdd->ops->stop(xilinx_wdt_wdd);
return 0;
}
@@ -276,24 +499,17 @@ static int __maybe_unused xwdt_suspend(struct device *dev)
static int __maybe_unused xwdt_resume(struct device *dev)
{
struct xwdt_device *xdev = dev_get_drvdata(dev);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
int ret = 0;
- if (watchdog_active(&xdev->xilinx_wdt_wdd))
- ret = xilinx_wdt_start(&xdev->xilinx_wdt_wdd);
+ if (watchdog_active(xilinx_wdt_wdd))
+ ret = xilinx_wdt_wdd->ops->start(xilinx_wdt_wdd);
return ret;
}
static SIMPLE_DEV_PM_OPS(xwdt_pm_ops, xwdt_suspend, xwdt_resume);
-/* Match table for of_platform binding */
-static const struct of_device_id xwdt_of_match[] = {
- { .compatible = "xlnx,xps-timebase-wdt-1.00.a", },
- { .compatible = "xlnx,xps-timebase-wdt-1.01.a", },
- {},
-};
-MODULE_DEVICE_TABLE(of, xwdt_of_match);
-
static struct platform_driver xwdt_driver = {
.probe = xwdt_probe,
.driver = {
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 6727f8ab2d18..ce94b6f35488 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -325,7 +325,8 @@ static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t)
static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
int *temperature)
{
- unsigned char msb, lsb;
+ unsigned char msb = 0x00;
+ unsigned char lsb = 0x00;
usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb);
@@ -341,7 +342,8 @@ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd,
int *time_left)
{
- unsigned char msb, lsb;
+ unsigned char msb = 0x00;
+ unsigned char lsb = 0x00;
/* Read the time that's left before rebooting */
/* Note: if the board is not yet armed then we will read 0xFFFF */
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index f0f1e3b2e463..4cbe6ba52754 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -121,6 +121,7 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
wdd->timeout = timeout;
+ timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
if (action)
writel(gwdt->clk * timeout,
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 93bd302ae7c5..704d7c527fcb 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -98,6 +98,10 @@ static int tco_timer_start(struct watchdog_device *wdd)
val |= SP5100_WDT_START_STOP_BIT;
writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+ /* This must be a distinct write. */
+ val |= SP5100_WDT_TRIGGER_BIT;
+ writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+
return 0;
}
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index 25188d6bbe15..16dd1aab7c67 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/watchdog.h>
+#define DEFAULT_TIMEOUT 10
+
/* IWDG registers */
#define IWDG_KR 0x00 /* Key register */
#define IWDG_PR 0x04 /* Prescaler Register */
@@ -254,6 +256,7 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
wdd->parent = dev;
wdd->info = &stm32_iwdg_info;
wdd->ops = &stm32_iwdg_ops;
+ wdd->timeout = DEFAULT_TIMEOUT;
wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate);
wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler *
1000) / wdt->rate;
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 8494846ccdc5..6fb860542c86 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1007,6 +1007,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
/* Fill in the data structures */
cdev_init(&wd_data->cdev, &watchdog_fops);
+ wd_data->cdev.owner = wdd->ops->owner;
/* Add the device */
err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
@@ -1016,13 +1017,11 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
if (wdd->id == 0) {
misc_deregister(&watchdog_miscdev);
old_wd_data = NULL;
- put_device(&wd_data->dev);
}
+ put_device(&wd_data->dev);
return err;
}
- wd_data->cdev.owner = wdd->ops->owner;
-
/* Record time of most recent heartbeat as 'just before now'. */
wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
watchdog_set_open_deadline(wd_data);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 87cfadd70d0d..91806dc1236d 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -84,22 +84,12 @@ const struct evtchn_ops *evtchn_ops;
static DEFINE_MUTEX(irq_mapping_update_lock);
/*
- * Lock protecting event handling loop against removing event channels.
- * Adding of event channels is no issue as the associated IRQ becomes active
- * only after everything is setup (before request_[threaded_]irq() the handler
- * can't be entered for an event, as the event channel will be unmasked only
- * then).
- */
-static DEFINE_RWLOCK(evtchn_rwlock);
-
-/*
* Lock hierarchy:
*
* irq_mapping_update_lock
- * evtchn_rwlock
- * IRQ-desc lock
- * percpu eoi_list_lock
- * irq_info->lock
+ * IRQ-desc lock
+ * percpu eoi_list_lock
+ * irq_info->lock
*/
static LIST_HEAD(xen_irq_list_head);
@@ -214,6 +204,22 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info);
}
+static void delayed_free_irq(struct work_struct *work)
+{
+ struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
+ rwork);
+ unsigned int irq = info->irq;
+
+ /* Remove the info pointer only now, with no potential users left. */
+ set_info_for_irq(irq, NULL);
+
+ kfree(info);
+
+ /* Legacy IRQ descriptors are managed by the arch. */
+ if (irq >= nr_legacy_irqs())
+ irq_free_desc(irq);
+}
+
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
@@ -485,7 +491,9 @@ static void lateeoi_list_add(struct irq_info *info)
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
- if (list_empty(&eoi->eoi_list)) {
+ elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
+ eoi_list);
+ if (!elem || info->eoi_time < elem->eoi_time) {
list_add(&info->eoi_list, &eoi->eoi_list);
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed, delay);
@@ -548,33 +556,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
- read_lock_irqsave(&evtchn_rwlock, flags);
+ rcu_read_lock();
while (true) {
- spin_lock(&eoi->eoi_list_lock);
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
eoi_list);
- if (info == NULL || now < info->eoi_time) {
- spin_unlock(&eoi->eoi_list_lock);
+ if (info == NULL)
+ break;
+
+ if (now < info->eoi_time) {
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed,
+ info->eoi_time - now);
break;
}
list_del_init(&info->eoi_list);
- spin_unlock(&eoi->eoi_list_lock);
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
info->eoi_time = 0;
xen_irq_lateeoi_locked(info, false);
}
- if (info)
- mod_delayed_work_on(info->eoi_cpu, system_wq,
- &eoi->delayed, info->eoi_time - now);
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
- read_unlock_irqrestore(&evtchn_rwlock, flags);
+ rcu_read_unlock();
}
static void xen_cpu_init_eoi(unsigned int cpu)
@@ -589,16 +600,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
{
struct irq_info *info;
- unsigned long flags;
- read_lock_irqsave(&evtchn_rwlock, flags);
+ rcu_read_lock();
info = info_for_irq(irq);
if (info)
xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
- read_unlock_irqrestore(&evtchn_rwlock, flags);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
@@ -617,6 +627,7 @@ static void xen_irq_init(unsigned irq)
info->type = IRQT_UNBOUND;
info->refcnt = -1;
+ INIT_RCU_WORK(&info->rwork, delayed_free_irq);
set_info_for_irq(irq, info);
@@ -669,31 +680,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
static void xen_free_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
- unsigned long flags;
if (WARN_ON(!info))
return;
- write_lock_irqsave(&evtchn_rwlock, flags);
-
if (!list_empty(&info->eoi_list))
lateeoi_list_del(info);
list_del(&info->list);
- set_info_for_irq(irq, NULL);
-
WARN_ON(info->refcnt > 0);
- write_unlock_irqrestore(&evtchn_rwlock, flags);
-
- kfree(info);
-
- /* Legacy IRQ descriptors are managed by the arch. */
- if (irq < nr_legacy_irqs())
- return;
-
- irq_free_desc(irq);
+ queue_rcu_work(system_wq, &info->rwork);
}
static void xen_evtchn_close(unsigned int port)
@@ -1604,7 +1602,14 @@ static void __xen_evtchn_do_upcall(void)
unsigned count;
struct evtchn_loop_ctrl ctrl = { 0 };
- read_lock(&evtchn_rwlock);
+ /*
+ * When closing an event channel the associated IRQ must not be freed
+ * until all cpus have left the event handling loop. This is ensured
+ * by taking the rcu_read_lock() while handling events, as freeing of
+ * the IRQ is handled via queue_rcu_work() _after_ closing the event
+ * channel.
+ */
+ rcu_read_lock();
do {
vcpu_info->evtchn_upcall_pending = 0;
@@ -1621,7 +1626,7 @@ static void __xen_evtchn_do_upcall(void)
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
out:
- read_unlock(&evtchn_rwlock);
+ rcu_read_unlock();
/*
* Increment irq_epoch only now to defer EOIs only for
@@ -2101,8 +2106,8 @@ void xen_callback_vector(void)
void xen_callback_vector(void) {}
#endif
-static bool fifo_events = true;
-module_param(fifo_events, bool, 0);
+bool xen_fifo_events = true;
+module_param_named(fifo_events, xen_fifo_events, bool, 0);
static int xen_evtchn_cpu_prepare(unsigned int cpu)
{
@@ -2131,10 +2136,12 @@ void __init xen_init_IRQ(void)
int ret = -EINVAL;
unsigned int evtchn;
- if (fifo_events)
+ if (xen_fifo_events)
ret = xen_evtchn_fifo_init();
- if (ret < 0)
+ if (ret < 0) {
xen_evtchn_2l_init();
+ xen_fifo_events = false;
+ }
xen_cpu_init_eoi(smp_processor_id());
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index eb012fbb62e7..db64f48124c2 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -6,6 +6,7 @@
*/
#ifndef __EVENTS_INTERNAL_H__
#define __EVENTS_INTERNAL_H__
+#include <linux/rcupdate.h>
/* Interrupt types. */
enum xen_irq_type {
@@ -31,6 +32,7 @@ enum xen_irq_type {
struct irq_info {
struct list_head list;
struct list_head eoi_list;
+ struct rcu_work rwork;
short refcnt;
short spurious_cnt;
short type; /* type */
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bba849e5d8a7..4a2a540a0896 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -384,8 +384,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
- if (!use_ptemod)
- alloced++;
+ alloced++;
} else if (!err)
err = -EINVAL;
@@ -394,8 +393,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
if (use_ptemod) {
if (map->kmap_ops[i].status == GNTST_okay) {
- if (map->map_ops[i].status == GNTST_okay)
- alloced++;
+ alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
} else if (!err)
err = -EINVAL;
@@ -411,20 +409,42 @@ static void __unmap_grant_pages_done(int result,
unsigned int i;
struct gntdev_grant_map *map = data->data;
unsigned int offset = data->unmap_ops - map->unmap_ops;
+ int successful_unmaps = 0;
+ int live_grants;
for (i = 0; i < data->count; i++) {
+ if (map->unmap_ops[offset + i].status == GNTST_okay &&
+ map->unmap_ops[offset + i].handle != -1)
+ successful_unmaps++;
+
WARN_ON(map->unmap_ops[offset+i].status &&
map->unmap_ops[offset+i].handle != -1);
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = -1;
+ if (use_ptemod) {
+ if (map->kunmap_ops[offset + i].status == GNTST_okay &&
+ map->kunmap_ops[offset + i].handle != -1)
+ successful_unmaps++;
+
+ WARN_ON(map->kunmap_ops[offset+i].status &&
+ map->kunmap_ops[offset+i].handle != -1);
+ pr_debug("kunmap handle=%u st=%d\n",
+ map->kunmap_ops[offset+i].handle,
+ map->kunmap_ops[offset+i].status);
+ map->kunmap_ops[offset+i].handle = -1;
+ }
}
+
/*
* Decrease the live-grant counter. This must happen after the loop to
* prevent premature reuse of the grants by gnttab_mmap().
*/
- atomic_sub(data->count, &map->live_grants);
+ live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
+ if (WARN_ON(live_grants < 0))
+ pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
+ __func__, live_grants, successful_unmaps);
/* Release reference taken by __unmap_grant_pages */
gntdev_put_map(NULL, map);
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index cdc6daa7a9f6..9cf7085a260b 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu)
err = device_register(dev);
if (err) {
- pcpu_release(dev);
+ put_device(dev);
return err;
}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index c45646450135..e1cb277a9e16 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -137,7 +137,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_warn(&pdev->dev, "Unable to set the evtchn callback "
"err=%d\n", ret);
- goto out;
+ goto irq_out;
}
}
@@ -145,13 +145,16 @@ static int platform_pci_probe(struct pci_dev *pdev,
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
if (ret)
- goto out;
+ goto irq_out;
ret = gnttab_init();
if (ret)
goto grant_out;
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
+irq_out:
+ if (!xen_have_vector_callback)
+ free_irq(pdev->irq, pdev);
out:
pci_release_region(pdev, 0);
mem_out:
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index d4ff944cd16e..c4b0de4a542b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -766,7 +766,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
goto out;
}
- pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
+ pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
if (!pfns) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 9439de2ca0e4..6c2afd8bbf52 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -129,13 +129,13 @@ static bool pvcalls_conn_back_read(void *opaque)
if (masked_prod < masked_cons) {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = wanted;
- iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
+ iov_iter_kvec(&msg.msg_iter, READ, vec, 1, wanted);
} else {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = array_size - masked_prod;
vec[1].iov_base = data->in;
vec[1].iov_len = wanted - vec[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
+ iov_iter_kvec(&msg.msg_iter, READ, vec, 2, wanted);
}
atomic_set(&map->read, 0);
@@ -188,13 +188,13 @@ static bool pvcalls_conn_back_write(struct sock_mapping *map)
if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = size;
- iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
+ iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, size);
} else {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
vec[1].iov_base = data->out;
vec[1].iov_len = size - vec[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
+ iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, size);
}
atomic_set(&map->write, 0);
@@ -321,8 +321,10 @@ static struct sock_mapping *pvcalls_new_active_socket(
void *page;
map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (map == NULL)
+ if (map == NULL) {
+ sock_release(sock);
return NULL;
+ }
map->fedata = fedata;
map->sock = sock;
@@ -414,10 +416,8 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
req->u.connect.ref,
req->u.connect.evtchn,
sock);
- if (!map) {
+ if (!map)
ret = -EFAULT;
- sock_release(sock);
- }
out:
rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
@@ -558,7 +558,6 @@ static void __pvcalls_back_accept(struct work_struct *work)
sock);
if (!map) {
ret = -EFAULT;
- sock_release(sock);
goto out_error;
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 06346422f743..486d7978ea97 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -411,7 +411,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
+ xen_dma_sync_for_device(dev_addr, phys, size, dir);
return dev_addr;
}
@@ -431,7 +431,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
+ xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
@@ -445,7 +445,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
+ xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@@ -461,7 +461,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
+ xen_dma_sync_for_device(dma_addr, paddr, size, dir);
}
/*
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 597af455a522..0792fda49a15 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -128,7 +128,7 @@ static ssize_t xenbus_file_read(struct file *filp,
{
struct xenbus_file_priv *u = filp->private_data;
struct read_buffer *rb;
- unsigned i;
+ ssize_t i;
int ret;
mutex_lock(&u->reply_mutex);
@@ -148,7 +148,7 @@ again:
rb = list_entry(u->read_buffers.next, struct read_buffer, list);
i = 0;
while (i < len) {
- unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
+ size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
diff --git a/fs/Makefile b/fs/Makefile
index 7876ba238244..97375ba820dd 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -131,6 +131,7 @@ obj-$(CONFIG_F2FS_FS) += f2fs/
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/
obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_YAFFS_FS) += yaffs2/
obj-$(CONFIG_AUFS_FS) += aufs/
diff --git a/fs/affs/file.c b/fs/affs/file.c
index ba084b0b214b..82bb38370aa9 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -878,7 +878,7 @@ affs_truncate(struct inode *inode)
if (inode->i_size > AFFS_I(inode)->mmu_private) {
struct address_space *mapping = inode->i_mapping;
struct page *page;
- void *fsdata;
+ void *fsdata = NULL;
loff_t isize = inode->i_size;
int res;
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 296b489861a9..1522fadd8d2d 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -404,10 +404,12 @@ static int afs_update_cell(struct afs_cell *cell)
if (ret == -ENOMEM)
goto out_wake;
- ret = -ENOMEM;
vllist = afs_alloc_vlserver_list(0);
- if (!vllist)
+ if (!vllist) {
+ if (ret >= 0)
+ ret = -ENOMEM;
goto out_wake;
+ }
switch (ret) {
case -ENODATA:
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 3a355a209919..43f5b972fcea 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1023,7 +1023,7 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct afs_vnode *vnode, *dir;
- struct afs_fid uninitialized_var(fid);
+ struct afs_fid fid;
struct dentry *parent;
struct inode *inode;
struct key *key;
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index f07e53ab808e..d06994990fc3 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -20,6 +20,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
struct afs_net *net = afs_d2net(dentry);
const char *name = dentry->d_name.name;
size_t len = dentry->d_name.len;
+ char *result = NULL;
int ret;
/* Names prefixed with a dot are R/W mounts. */
@@ -37,9 +38,22 @@ static int afs_probe_cell_name(struct dentry *dentry)
}
ret = dns_query(net->net, "afsdb", name, len, "srv=1",
- NULL, NULL, false);
- if (ret == -ENODATA)
- ret = -EDESTADDRREQ;
+ &result, NULL, false);
+ if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
+ ret = -ENOENT;
+ if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
+ struct dns_server_list_v1_header *v1 = (void *)result;
+
+ if (v1->hdr.zero == 0 &&
+ v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
+ v1->hdr.version == 1 &&
+ (v1->status != DNS_LOOKUP_GOOD &&
+ v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
+ return -ENOENT;
+
+ }
+
+ kfree(result);
return ret;
}
@@ -163,20 +177,9 @@ static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
return 1;
}
-/*
- * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
- * sleep)
- * - called from dput() when d_count is going to 0.
- * - return 1 to request dentry be unhashed, 0 otherwise
- */
-static int afs_dynroot_d_delete(const struct dentry *dentry)
-{
- return d_really_is_positive(dentry);
-}
-
const struct dentry_operations afs_dynroot_dentry_operations = {
.d_revalidate = afs_dynroot_d_revalidate,
- .d_delete = afs_dynroot_d_delete,
+ .d_delete = always_delete_dentry,
.d_release = afs_d_release,
.d_automount = afs_d_automount,
};
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index d5e5a6ddc847..0fa05998a24d 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -75,7 +75,7 @@ void afs_lock_op_done(struct afs_call *call)
if (call->error == 0) {
spin_lock(&vnode->lock);
trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
- vnode->locked_at = call->reply_time;
+ vnode->locked_at = call->issue_time;
afs_schedule_lock_extension(vnode);
spin_unlock(&vnode->lock);
}
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index 51ee3dd79700..e9282e025912 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -92,8 +92,8 @@ responded:
}
}
- if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
- rtt_us < server->probe.rtt) {
+ rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
+ if (rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
alist->preferred = index;
have_result = true;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 5c2729fc07e5..254580b1dc74 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -136,7 +136,7 @@ bad:
static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
{
- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
+ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry;
}
static void xdr_decode_AFSCallBack(const __be32 **_bp,
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 622363af4c1b..fd681eec49aa 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -227,6 +227,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
}
change_size = true;
+ data_changed = true;
} else if (vnode->status.type == AFS_FTYPE_DIR) {
/* Expected directory change is handled elsewhere so
* that we can locally edit the directory and save on a
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index c3ad582f9fd0..8d6582713fe7 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -159,7 +159,6 @@ struct afs_call {
bool need_attention; /* T if RxRPC poked us */
bool async; /* T if asynchronous */
bool upgrade; /* T to request service upgrade */
- bool have_reply_time; /* T if have got reply_time */
bool intr; /* T if interruptible */
bool unmarshalling_error; /* T if an unmarshalling error occurred */
u16 service_id; /* Actual service ID (after upgrade) */
@@ -173,7 +172,7 @@ struct afs_call {
} __attribute__((packed));
__be64 tmp64;
};
- ktime_t reply_time; /* Time of first reply packet */
+ ktime_t issue_time; /* Time of issue of operation */
};
struct afs_call_type {
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 5334f1bd2bca..5171d6d99031 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -69,6 +69,7 @@ int afs_abort_to_error(u32 abort_code)
/* Unified AFS error table */
case UAEPERM: return -EPERM;
case UAENOENT: return -ENOENT;
+ case UAEAGAIN: return -EAGAIN;
case UAEACCES: return -EACCES;
case UAEBUSY: return -EBUSY;
case UAEEXIST: return -EEXIST;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 6adab30a8399..30952f073fb4 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -428,6 +428,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
if (call->max_lifespan)
rxrpc_kernel_set_max_life(call->net->socket, rxcall,
call->max_lifespan);
+ call->issue_time = ktime_get_real();
/* send the request */
iov[0].iov_base = call->request;
@@ -489,7 +490,7 @@ error_kill_call:
if (call->async) {
if (cancel_work_sync(&call->async_work))
afs_put_call(call);
- afs_put_call(call);
+ afs_set_call_complete(call, ret, 0);
}
ac->error = ret;
@@ -532,12 +533,6 @@ static void afs_deliver_to_call(struct afs_call *call)
return;
}
- if (!call->have_reply_time &&
- rxrpc_kernel_get_reply_time(call->net->socket,
- call->rxcall,
- &call->reply_time))
- call->have_reply_time = true;
-
ret = call->type->deliver(call);
state = READ_ONCE(call->state);
if (ret == 0 && call->unmarshalling_error)
diff --git a/fs/afs/security.c b/fs/afs/security.c
index ce9de1e6742b..207a54ce4540 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -401,7 +401,7 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key,
int afs_permission(struct inode *inode, int mask)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
- afs_access_t uninitialized_var(access);
+ afs_access_t access;
struct key *key;
int ret = 0;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index d3a9288f7556..44985ca6602e 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -35,7 +35,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
const struct afs_addr_list *alist;
struct afs_server *server = NULL;
unsigned int i;
- int seq = 0, diff;
+ int seq = 1, diff;
rcu_read_lock();
@@ -43,6 +43,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
if (server)
afs_put_server(net, server, afs_server_trace_put_find_rsq);
server = NULL;
+ seq++; /* 2 on the 1st/lockless path, otherwise odd */
read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
if (srx->transport.family == AF_INET6) {
@@ -98,7 +99,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
{
struct afs_server *server = NULL;
struct rb_node *p;
- int diff, seq = 0;
+ int diff, seq = 1;
_enter("%pU", uuid);
@@ -110,7 +111,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
if (server)
afs_put_server(net, server, afs_server_trace_put_uuid_rsq);
server = NULL;
-
+ seq++; /* 2 on the 1st/lockless path, otherwise odd */
read_seqbegin_or_lock(&net->fs_lock, &seq);
p = net->fs_servers.rb_node;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index eb04dcc54328..554119068ea4 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -391,6 +391,8 @@ static int afs_validate_fc(struct fs_context *fc)
return PTR_ERR(volume);
ctx->volume = volume;
+ if (volume->type != AFSVL_RWVOL)
+ ctx->flock_mode = afs_flock_mode_local;
}
return 0;
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
index 081b7e5b13f5..163069d9fc16 100644
--- a/fs/afs/vl_probe.c
+++ b/fs/afs/vl_probe.c
@@ -92,8 +92,8 @@ responded:
}
}
- if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
- rtt_us < server->probe.rtt) {
+ rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
+ if (rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
alist->preferred = index;
have_result = true;
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index 9a5ce9687779..370c27cae2e6 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
}
/* Status load is ordered after lookup counter load */
+ if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
+ pr_warn("No record of cell %s\n", cell->name);
+ vc->error = -ENOENT;
+ return false;
+ }
+
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
vc->error = -EDESTADDRREQ;
return false;
@@ -276,6 +282,7 @@ failed:
*/
static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
{
+ struct afs_cell *cell = vc->cell;
static int count;
int i;
@@ -285,6 +292,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
rcu_read_lock();
pr_notice("EDESTADDR occurred\n");
+ pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
+ pr_notice("DNS: src=%u st=%u lc=%x\n",
+ cell->dns_source, cell->dns_status, cell->dns_lookup_count);
pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 4310336b9bb8..4225bc766d46 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -221,7 +221,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
{
struct afs_server_list *new, *old, *discard;
struct afs_vldb_entry *vldb;
- char idbuf[16];
+ char idbuf[24];
int ret, idsz;
_enter("");
@@ -229,7 +229,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
/* We look up an ID by passing it as a decimal string in the
* operation's name parameter.
*/
- idsz = sprintf(idbuf, "%llu", volume->vid);
+ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
if (IS_ERR(vldb)) {
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 3b19b009452a..fa85b359f325 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -241,8 +241,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp,
struct afs_callback *cb = &scb->callback;
ktime_t cb_expiry;
- cb_expiry = call->reply_time;
- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
+ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100);
cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
scb->have_cb = true;
*_bp += xdr_size(x);
diff --git a/fs/aio.c b/fs/aio.c
index fb92c32a6f1e..7a50c97cffc0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -336,6 +336,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
spin_lock(&mm->ioctx_lock);
rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
+ if (!table)
+ goto out_unlock;
+
for (i = 0; i < table->nr; i++) {
struct kioctx *ctx;
@@ -349,6 +352,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
}
}
+out_unlock:
rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
return res;
@@ -566,6 +570,13 @@ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
struct kioctx *ctx = req->ki_ctx;
unsigned long flags;
+ /*
+ * kiocb didn't come from aio or is neither a read nor a write, hence
+ * ignore it.
+ */
+ if (!(iocb->ki_flags & IOCB_AIO_RW))
+ return;
+
if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
return;
@@ -1451,7 +1462,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
req->ki_complete = aio_complete_rw;
req->private = NULL;
req->ki_pos = iocb->aio_offset;
- req->ki_flags = iocb_flags(req->ki_filp);
+ req->ki_flags = iocb_flags(req->ki_filp) | IOCB_AIO_RW;
if (iocb->aio_flags & IOCB_FLAG_RESFD)
req->ki_flags |= IOCB_EVENTFD;
req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
diff --git a/fs/attr.c b/fs/attr.c
index b4bbdbd4c8ca..95bbd49f75c8 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -134,6 +134,8 @@ EXPORT_SYMBOL(setattr_prepare);
*/
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
+ if (offset < 0)
+ return -EINVAL;
if (inode->i_size < offset) {
unsigned long limit;
@@ -251,9 +253,25 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
}
if ((ia_valid & ATTR_MODE)) {
- umode_t amode = attr->ia_mode;
+ /*
+ * Don't allow changing the mode of symlinks:
+ *
+ * (1) The vfs doesn't take the mode of symlinks into account
+ * during permission checking.
+ * (2) This has never worked correctly. Most major filesystems
+ * did return EOPNOTSUPP due to interactions with POSIX ACLs
+ * but did still updated the mode of the symlink.
+ * This inconsistency led system call wrapper providers such
+ * as libc to block changing the mode of symlinks with
+ * EOPNOTSUPP already.
+ * (3) To even do this in the first place one would have to use
+ * specific file descriptors and quite some effort.
+ */
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
/* Flag setting protected by i_mutex */
- if (is_sxid(amode))
+ if (is_sxid(attr->ia_mode))
inode->i_flags &= ~S_NOSEC;
}
diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c
index b04c528b19d3..1230bdf32989 100644
--- a/fs/autofs/waitq.c
+++ b/fs/autofs/waitq.c
@@ -32,8 +32,9 @@ void autofs_catatonic_mode(struct autofs_sb_info *sbi)
wq->status = -ENOENT; /* Magic is gone - report failure */
kfree(wq->name.name);
wq->name.name = NULL;
- wq->wait_ctr--;
wake_up_interruptible(&wq->queue);
+ if (!--wq->wait_ctr)
+ kfree(wq);
wq = nwq;
}
fput(sbi->pipe); /* Close the pipe */
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 8e8346a81723..ace587b66904 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -162,6 +162,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
set_personality(PER_LINUX);
#endif
setup_new_exec(bprm);
+ install_exec_creds(bprm);
current->mm->end_code = ex.a_text +
(current->mm->start_code = N_TXTADDR(ex));
@@ -174,7 +175,6 @@ static int load_aout_binary(struct linux_binprm * bprm)
if (retval < 0)
return retval;
- install_exec_creds(bprm);
if (N_MAGIC(ex) == OMAGIC) {
unsigned long text_addr, map_size;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index d86ebd0dcc3d..28aef31a6e6f 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -345,14 +345,14 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
/* there's now no turning back... the old userspace image is dead,
* defunct, deceased, etc.
*/
+ SET_PERSONALITY(exec_params.hdr);
if (elf_check_fdpic(&exec_params.hdr))
- set_personality(PER_LINUX_FDPIC);
- else
- set_personality(PER_LINUX);
+ current->personality |= PER_LINUX_FDPIC;
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
setup_new_exec(bprm);
+ install_exec_creds(bprm);
set_binfmt(&elf_fdpic_format);
@@ -434,9 +434,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
current->mm->start_stack = current->mm->start_brk + stack_size;
#endif
- install_exec_creds(bprm);
- if (create_elf_fdpic_tables(bprm, current->mm,
- &exec_params, &interp_params) < 0)
+ retval = create_elf_fdpic_tables(bprm, current->mm, &exec_params,
+ &interp_params);
+ if (retval < 0)
goto error;
kdebug("- start_code %lx", current->mm->start_code);
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index c999bc0c0691..22a7d7547a91 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -565,6 +565,7 @@ static int load_flat_file(struct linux_binprm *bprm,
/* OK, This is the point of no return */
set_personality(PER_LINUX_32BIT);
setup_new_exec(bprm);
+ install_exec_creds(bprm);
}
/*
@@ -992,8 +993,6 @@ static int load_flat_binary(struct linux_binprm *bprm)
}
}
- install_exec_creds(bprm);
-
set_binfmt(&flat_format);
#ifdef CONFIG_MMU
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 056a68292e15..23b563ff0dd7 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -44,10 +44,10 @@ static LIST_HEAD(entries);
static int enabled = 1;
enum {Enabled, Magic};
-#define MISC_FMT_PRESERVE_ARGV0 (1 << 31)
-#define MISC_FMT_OPEN_BINARY (1 << 30)
-#define MISC_FMT_CREDENTIALS (1 << 29)
-#define MISC_FMT_OPEN_FILE (1 << 28)
+#define MISC_FMT_PRESERVE_ARGV0 (1UL << 31)
+#define MISC_FMT_OPEN_BINARY (1UL << 30)
+#define MISC_FMT_CREDENTIALS (1UL << 29)
+#define MISC_FMT_OPEN_FILE (1UL << 28)
typedef struct {
struct list_head list;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index fa329c7eddf0..e528ad860143 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -2114,21 +2114,26 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- /* Invalidate the page cache, including dirty pages. */
+ /*
+ * Invalidate the page cache, including dirty pages, for valid
+ * de-allocate mode calls to fallocate().
+ */
mapping = bdev->bd_inode->i_mapping;
- truncate_inode_pages_range(mapping, start, end);
switch (mode) {
case FALLOC_FL_ZERO_RANGE:
case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
+ truncate_inode_pages_range(mapping, start, end);
error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
break;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+ truncate_inode_pages_range(mapping, start, end);
error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK);
break;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
+ truncate_inode_pages_range(mapping, start, end);
error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
GFP_KERNEL, 0);
break;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index c701a19fac53..89411e96dcc0 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -136,6 +136,7 @@ struct share_check {
u64 root_objectid;
u64 inum;
int share_count;
+ bool have_delayed_delete_refs;
};
static inline int extent_is_shared(struct share_check *sc)
@@ -286,8 +287,10 @@ static void prelim_release(struct preftree *preftree)
struct prelim_ref *ref, *next_ref;
rbtree_postorder_for_each_entry_safe(ref, next_ref,
- &preftree->root.rb_root, rbnode)
+ &preftree->root.rb_root, rbnode) {
+ free_inode_elem_list(ref->inode_list);
free_pref(ref);
+ }
preftree->root = RB_ROOT_CACHED;
preftree->count = 0;
@@ -428,6 +431,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
u64 wanted_disk_byte = ref->wanted_disk_byte;
u64 count = 0;
u64 data_offset;
+ u8 type;
if (level != 0) {
eb = path->nodes[level];
@@ -482,6 +486,9 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
continue;
}
fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+ type = btrfs_file_extent_type(eb, fi);
+ if (type == BTRFS_FILE_EXTENT_INLINE)
+ goto next;
disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
data_offset = btrfs_file_extent_offset(eb, fi);
@@ -641,6 +648,18 @@ unode_aux_to_inode_list(struct ulist_node *node)
return (struct extent_inode_elem *)(uintptr_t)node->aux;
}
+static void free_leaf_list(struct ulist *ulist)
+{
+ struct ulist_node *node;
+ struct ulist_iterator uiter;
+
+ ULIST_ITER_INIT(&uiter);
+ while ((node = ulist_next(ulist, &uiter)))
+ free_inode_elem_list(unode_aux_to_inode_list(node));
+
+ ulist_free(ulist);
+}
+
/*
* We maintain three separate rbtrees: one for direct refs, one for
* indirect refs which have a key, and one for indirect refs which do not
@@ -755,7 +774,11 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
cond_resched();
}
out:
- ulist_free(parents);
+ /*
+ * We may have inode lists attached to refs in the parents ulist, so we
+ * must free them before freeing the ulist and its refs.
+ */
+ free_leaf_list(parents);
return ret;
}
@@ -812,16 +835,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
struct preftrees *preftrees, struct share_check *sc)
{
struct btrfs_delayed_ref_node *node;
- struct btrfs_delayed_extent_op *extent_op = head->extent_op;
struct btrfs_key key;
- struct btrfs_key tmp_op_key;
struct rb_node *n;
int count;
int ret = 0;
- if (extent_op && extent_op->update_key)
- btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
-
spin_lock(&head->lock);
for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
node = rb_entry(n, struct btrfs_delayed_ref_node,
@@ -847,10 +865,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
case BTRFS_TREE_BLOCK_REF_KEY: {
/* NORMAL INDIRECT METADATA backref */
struct btrfs_delayed_tree_ref *ref;
+ struct btrfs_key *key_ptr = NULL;
+
+ if (head->extent_op && head->extent_op->update_key) {
+ btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
+ key_ptr = &key;
+ }
ref = btrfs_delayed_node_to_tree_ref(node);
ret = add_indirect_ref(fs_info, preftrees, ref->root,
- &tmp_op_key, ref->level + 1,
+ key_ptr, ref->level + 1,
node->bytenr, count, sc,
GFP_ATOMIC);
break;
@@ -876,13 +900,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
key.offset = ref->offset;
/*
- * Found a inum that doesn't match our known inum, we
- * know it's shared.
+ * If we have a share check context and a reference for
+ * another inode, we can't exit immediately. This is
+ * because even if this is a BTRFS_ADD_DELAYED_REF
+ * reference we may find next a BTRFS_DROP_DELAYED_REF
+ * which cancels out this ADD reference.
+ *
+ * If this is a DROP reference and there was no previous
+ * ADD reference, then we need to signal that when we
+ * process references from the extent tree (through
+ * add_inline_refs() and add_keyed_refs()), we should
+ * not exit early if we find a reference for another
+ * inode, because one of the delayed DROP references
+ * may cancel that reference in the extent tree.
*/
- if (sc && sc->inum && ref->objectid != sc->inum) {
- ret = BACKREF_FOUND_SHARED;
- goto out;
- }
+ if (sc && count < 0)
+ sc->have_delayed_delete_refs = true;
ret = add_indirect_ref(fs_info, preftrees, ref->root,
&key, 0, node->bytenr, count, sc,
@@ -912,7 +945,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
}
if (!ret)
ret = extent_is_shared(sc);
-out:
+
spin_unlock(&head->lock);
return ret;
}
@@ -1015,7 +1048,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
- if (sc && sc->inum && key.objectid != sc->inum) {
+ if (sc && sc->inum && key.objectid != sc->inum &&
+ !sc->have_delayed_delete_refs) {
ret = BACKREF_FOUND_SHARED;
break;
}
@@ -1025,6 +1059,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
ret = add_indirect_ref(fs_info, preftrees, root,
&key, 0, bytenr, count,
sc, GFP_NOFS);
+
break;
}
default:
@@ -1114,7 +1149,8 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
- if (sc && sc->inum && key.objectid != sc->inum) {
+ if (sc && sc->inum && key.objectid != sc->inum &&
+ !sc->have_delayed_delete_refs) {
ret = BACKREF_FOUND_SHARED;
break;
}
@@ -1353,6 +1389,12 @@ again:
if (ret < 0)
goto out;
ref->inode_list = eie;
+ /*
+ * We transferred the list ownership to the ref,
+ * so set to NULL to avoid a double free in case
+ * an error happens after this.
+ */
+ eie = NULL;
}
ret = ulist_add_merge_ptr(refs, ref->parent,
ref->inode_list,
@@ -1378,6 +1420,14 @@ again:
eie->next = ref->inode_list;
}
eie = NULL;
+ /*
+ * We have transferred the inode list ownership from
+ * this ref to the ref we added to the 'refs' ulist.
+ * So set this ref's inode list to NULL to avoid
+ * use-after-free when our caller uses it or double
+ * frees in case an error happens before we return.
+ */
+ ref->inode_list = NULL;
}
cond_resched();
}
@@ -1394,24 +1444,6 @@ out:
return ret;
}
-static void free_leaf_list(struct ulist *blocks)
-{
- struct ulist_node *node = NULL;
- struct extent_inode_elem *eie;
- struct ulist_iterator uiter;
-
- ULIST_ITER_INIT(&uiter);
- while ((node = ulist_next(blocks, &uiter))) {
- if (!node->aux)
- continue;
- eie = unode_aux_to_inode_list(node);
- free_inode_elem_list(eie);
- node->aux = 0;
- }
-
- ulist_free(blocks);
-}
-
/*
* Finds all leafs with a reference to the specified combination of bytenr and
* offset. key_list_head will point to a list of corresponding keys (caller must
@@ -1537,6 +1569,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
.root_objectid = root->root_key.objectid,
.inum = inum,
.share_count = 0,
+ .have_delayed_delete_refs = false,
};
ulist_init(roots);
@@ -1571,6 +1604,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
break;
bytenr = node->val;
shared.share_count = 0;
+ shared.have_delayed_delete_refs = false;
cond_resched();
}
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index bcf19dfb0af3..278933cd3a09 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -2938,6 +2938,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
* attempt.
*/
wait_for_alloc = true;
+ force = CHUNK_ALLOC_NO_FORCE;
spin_unlock(&space_info->lock);
mutex_lock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->chunk_mutex);
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index 343400d49bd1..63205d2f4d84 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -29,7 +29,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
} else {
num_bytes = 0;
}
- if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
+ if (qgroup_to_release_ret &&
+ block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
qgroup_to_release = block_rsv->qgroup_rsv_reserved -
block_rsv->qgroup_rsv_size;
block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
@@ -391,7 +392,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
block_rsv = get_block_rsv(trans, root);
- if (unlikely(block_rsv->size == 0))
+ if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
goto try_reserve;
again:
ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
index d1428bb73fc5..69770360917c 100644
--- a/fs/btrfs/block-rsv.h
+++ b/fs/btrfs/block-rsv.h
@@ -98,4 +98,20 @@ static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
btrfs_block_rsv_release(fs_info, block_rsv, 0);
}
+/*
+ * Get the size of a block reserve in a context where getting a stale value is
+ * acceptable, instead of accessing it directly and trigger data race warning
+ * from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
+{
+ u64 ret;
+
+ spin_lock(&rsv->lock);
+ ret = rsv->size;
+ spin_unlock(&rsv->lock);
+
+ return ret;
+}
+
#endif /* BTRFS_BLOCK_RSV_H */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 822c615840e8..608e41b61689 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -3589,6 +3589,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
if (ret) {
+ btrfs_tree_unlock(split);
+ free_extent_buffer(split);
btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -5138,10 +5140,12 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
{
struct btrfs_key key;
+ struct btrfs_key orig_key;
struct btrfs_disk_key found_key;
int ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
+ orig_key = key;
if (key.offset > 0) {
key.offset--;
@@ -5158,8 +5162,36 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
+ if (ret <= 0)
return ret;
+
+ /*
+ * Previous key not found. Even if we were at slot 0 of the leaf we had
+ * before releasing the path and calling btrfs_search_slot(), we now may
+ * be in a slot pointing to the same original key - this can happen if
+ * after we released the path, one of more items were moved from a
+ * sibling leaf into the front of the leaf we had due to an insertion
+ * (see push_leaf_right()).
+ * If we hit this case and our slot is > 0 and just decrement the slot
+ * so that the caller does not process the same key again, which may or
+ * may not break the caller, depending on its logic.
+ */
+ if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
+ btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
+ ret = comp_keys(&found_key, &orig_key);
+ if (ret == 0) {
+ if (path->slots[0] > 0) {
+ path->slots[0]--;
+ return 0;
+ }
+ /*
+ * At slot 0, same key as before, it means orig_key is
+ * the lowest, leftmost, key in the tree. We're done.
+ */
+ return 1;
+ }
+ }
+
btrfs_item_key(path->nodes[0], &found_key, 0);
ret = comp_keys(&found_key, &key);
/*
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index cd77c0621a55..b141a7ba4507 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -482,8 +482,6 @@ struct btrfs_swapfile_pin {
bool is_block_group;
};
-bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
-
enum {
BTRFS_FS_BARRIER,
BTRFS_FS_CLOSING_START,
@@ -2727,7 +2725,7 @@ struct btrfs_dir_item *
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
- u64 objectid, const char *name, int name_len,
+ u64 index, const char *name, int name_len,
int mod);
struct btrfs_dir_item *
btrfs_search_dir_index_item(struct btrfs_root *root,
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index f4f531c4aa96..d3f16dc33d0f 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -324,9 +324,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
} else {
if (current->journal_info)
flush = BTRFS_RESERVE_FLUSH_LIMIT;
-
- if (btrfs_transaction_in_commit(fs_info))
- schedule_timeout(1);
}
if (delalloc_lock)
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index e96890475bac..eacc020b1419 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1175,20 +1175,33 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
ret = __btrfs_commit_inode_delayed_items(trans, path,
curr_node);
if (ret) {
- btrfs_release_delayed_node(curr_node);
- curr_node = NULL;
btrfs_abort_transaction(trans, ret);
break;
}
prev_node = curr_node;
curr_node = btrfs_next_delayed_node(curr_node);
+ /*
+ * See the comment below about releasing path before releasing
+ * node. If the commit of delayed items was successful the path
+ * should always be released, but in case of an error, it may
+ * point to locked extent buffers (a leaf at the very least).
+ */
+ ASSERT(path->nodes[0] == NULL);
btrfs_release_delayed_node(prev_node);
}
+ /*
+ * Release the path to avoid a potential deadlock and lockdep splat when
+ * releasing the delayed node, as that requires taking the delayed node's
+ * mutex. If another task starts running delayed items before we take
+ * the mutex, it will first lock the mutex and then it may try to lock
+ * the same btree path (leaf).
+ */
+ btrfs_free_path(path);
+
if (curr_node)
btrfs_release_delayed_node(curr_node);
- btrfs_free_path(path);
trans->block_rsv = block_rsv;
return ret;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 1cb7f5d79765..4abc0db6527e 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -125,7 +125,7 @@ no_valid_dev_replace_entry_found:
if (btrfs_find_device(fs_info->fs_devices,
BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
btrfs_err(fs_info,
- "replace devid present without an active replace item");
+"replace without active item, run 'device scan --forget' on the target device");
ret = -EUCLEAN;
} else {
dev_replace->srcdev = NULL;
@@ -535,6 +535,23 @@ leave:
return ret;
}
+static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
+{
+ if (args->start.srcdevid == 0) {
+ if (memchr(args->start.srcdev_name, 0,
+ sizeof(args->start.srcdev_name)) == NULL)
+ return -ENAMETOOLONG;
+ } else {
+ args->start.srcdev_name[0] = 0;
+ }
+
+ if (memchr(args->start.tgtdev_name, 0,
+ sizeof(args->start.tgtdev_name)) == NULL)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
@@ -547,10 +564,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
default:
return -EINVAL;
}
-
- if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
- args->start.tgtdev_name[0] == '\0')
- return -EINVAL;
+ ret = btrfs_check_replace_dev_names(args);
+ if (ret < 0)
+ return ret;
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,
@@ -918,8 +934,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
up_write(&dev_replace->rwsem);
/* Scrub for replace must not be running in suspended state */
- ret = btrfs_scrub_cancel(fs_info);
- ASSERT(ret != -ENOTCONN);
+ btrfs_scrub_cancel(fs_info);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 863367c2c620..98c6faa8ce15 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -171,10 +171,40 @@ out_free:
return 0;
}
+static struct btrfs_dir_item *btrfs_lookup_match_dir(
+ struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *key, const char *name,
+ int name_len, int mod)
+{
+ const int ins_len = (mod < 0 ? -1 : 0);
+ const int cow = (mod != 0);
+ int ret;
+
+ ret = btrfs_search_slot(trans, root, key, path, ins_len, cow);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (ret > 0)
+ return ERR_PTR(-ENOENT);
+
+ return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+}
+
/*
- * lookup a directory item based on name. 'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory item by name.
+ *
+ * @trans: The transaction handle to use. Can be NULL if @mod is 0.
+ * @root: The root of the target tree.
+ * @path: Path to use for the search.
+ * @dir: The inode number (objectid) of the directory.
+ * @name: The name associated to the directory entry we are looking for.
+ * @name_len: The length of the name.
+ * @mod: Used to indicate if the tree search is meant for a read only
+ * lookup, for a modification lookup or for a deletion lookup, so
+ * its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir item does not exists, an error pointer if an error
+ * happened, or a pointer to a dir item if a dir item exists for the given name.
*/
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -182,23 +212,18 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
const char *name, int name_len,
int mod)
{
- int ret;
struct btrfs_key key;
- int ins_len = mod < 0 ? -1 : 0;
- int cow = mod != 0;
+ struct btrfs_dir_item *di;
key.objectid = dir;
key.type = BTRFS_DIR_ITEM_KEY;
-
key.offset = btrfs_name_hash(name, name_len);
- ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
- if (ret < 0)
- return ERR_PTR(ret);
- if (ret > 0)
+ di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
return NULL;
- return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+ return di;
}
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
@@ -212,7 +237,6 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
int slot;
struct btrfs_path *path;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -221,20 +245,20 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
key.type = BTRFS_DIR_ITEM_KEY;
key.offset = btrfs_name_hash(name, name_len);
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-
- /* return back any errors */
- if (ret < 0)
- goto out;
+ di = btrfs_lookup_match_dir(NULL, root, path, &key, name, name_len, 0);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ /* Nothing found, we're safe */
+ if (ret == -ENOENT) {
+ ret = 0;
+ goto out;
+ }
- /* nothing found, we're safe */
- if (ret > 0) {
- ret = 0;
- goto out;
+ if (ret < 0)
+ goto out;
}
/* we found an item, look for our name in the item */
- di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
if (di) {
/* our exact name was found */
ret = -EEXIST;
@@ -261,35 +285,42 @@ out:
}
/*
- * lookup a directory item based on index. 'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory index item by name and index number.
+ *
+ * @trans: The transaction handle to use. Can be NULL if @mod is 0.
+ * @root: The root of the target tree.
+ * @path: Path to use for the search.
+ * @dir: The inode number (objectid) of the directory.
+ * @index: The index number.
+ * @name: The name associated to the directory entry we are looking for.
+ * @name_len: The length of the name.
+ * @mod: Used to indicate if the tree search is meant for a read only
+ * lookup, for a modification lookup or for a deletion lookup, so
+ * its value should be 0, 1 or -1, respectively.
*
- * The name is used to make sure the index really points to the name you were
- * looking for.
+ * Returns: NULL if the dir index item does not exists, an error pointer if an
+ * error happened, or a pointer to a dir item if the dir index item exists and
+ * matches the criteria (name and index number).
*/
struct btrfs_dir_item *
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
- u64 objectid, const char *name, int name_len,
+ u64 index, const char *name, int name_len,
int mod)
{
- int ret;
+ struct btrfs_dir_item *di;
struct btrfs_key key;
- int ins_len = mod < 0 ? -1 : 0;
- int cow = mod != 0;
key.objectid = dir;
key.type = BTRFS_DIR_INDEX_KEY;
- key.offset = objectid;
+ key.offset = index;
- ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
- if (ret < 0)
- return ERR_PTR(ret);
- if (ret > 0)
- return ERR_PTR(-ENOENT);
- return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+ di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ if (di == ERR_PTR(-ENOENT))
+ return NULL;
+
+ return di;
}
struct btrfs_dir_item *
@@ -346,21 +377,18 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
const char *name, u16 name_len,
int mod)
{
- int ret;
struct btrfs_key key;
- int ins_len = mod < 0 ? -1 : 0;
- int cow = mod != 0;
+ struct btrfs_dir_item *di;
key.objectid = dir;
key.type = BTRFS_XATTR_ITEM_KEY;
key.offset = btrfs_name_hash(name, name_len);
- ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
- if (ret < 0)
- return ERR_PTR(ret);
- if (ret > 0)
+
+ di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+ if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
return NULL;
- return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
+ return di;
}
/*
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a4b3e6f6bf02..b4ed11b5f148 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2239,6 +2239,23 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
fs_info->csum_shash = csum_shash;
+ /*
+ * Check if the checksum implementation is a fast accelerated one.
+ * As-is this is a bit of a hack and should be replaced once the csum
+ * implementations provide that information themselves.
+ */
+ switch (csum_type) {
+ case BTRFS_CSUM_TYPE_CRC32:
+ if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
+ set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
+ break;
+ default:
+ break;
+ }
+
+ btrfs_info(fs_info, "using %s (%s) checksum algorithm",
+ btrfs_super_csum_name(csum_type),
+ crypto_shash_driver_name(csum_shash));
return 0;
}
@@ -2463,21 +2480,18 @@ static int validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
- if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
- BTRFS_FSID_SIZE)) {
+ if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
- fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+ sb->fsid, fs_info->fs_devices->fsid);
ret = -EINVAL;
}
- if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
- memcmp(fs_info->fs_devices->metadata_uuid,
- fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+ if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
+ BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
- fs_info->super_copy->metadata_uuid,
- fs_info->fs_devices->metadata_uuid);
+ btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
ret = -EINVAL;
}
@@ -2970,6 +2984,20 @@ int open_ctree(struct super_block *sb,
err = -EINVAL;
goto fail_csum;
}
+ /*
+ * We have unsupported RO compat features, although RO mounted, we
+ * should not cause any metadata write, including log replay.
+ * Or we could screw up whatever the new feature requires.
+ */
+ if (unlikely(features && btrfs_super_log_root(disk_super) &&
+ !btrfs_test_opt(fs_info, NOLOGREPLAY))) {
+ btrfs_err(fs_info,
+"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
+ features);
+ err = -EINVAL;
+ goto fail_alloc;
+ }
+
ret = btrfs_init_workqueues(fs_info, fs_devices);
if (ret) {
@@ -4075,6 +4103,11 @@ void close_ctree(struct btrfs_fs_info *fs_info)
ASSERT(list_empty(&fs_info->delayed_iputs));
set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
+ if (btrfs_check_quota_leak(fs_info)) {
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ btrfs_err(fs_info, "qgroup reserved space leaked");
+ }
+
btrfs_free_qgroup_config(fs_info);
ASSERT(list_empty(&fs_info->delalloc_roots));
@@ -4377,7 +4410,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
*/
inode = igrab(&btrfs_inode->vfs_inode);
if (inode) {
+ unsigned int nofs_flag;
+
+ nofs_flag = memalloc_nofs_save();
invalidate_inode_pages2(inode->i_mapping);
+ memalloc_nofs_restore(nofs_flag);
iput(inode);
}
spin_lock(&root->delalloc_lock);
@@ -4495,7 +4532,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
inode = cache->io_ctl.inode;
if (inode) {
+ unsigned int nofs_flag;
+
+ nofs_flag = memalloc_nofs_save();
invalidate_inode_pages2(inode->i_mapping);
+ memalloc_nofs_restore(nofs_flag);
+
BTRFS_I(inode)->generation = 0;
cache->io_ctl.inode = NULL;
iput(inode);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 93cceeba484c..6e4727304b7b 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -58,7 +58,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
}
struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
- u64 root_objectid, u32 generation,
+ u64 root_objectid, u64 generation,
int check_generation)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
index f32f4113c976..5afb7ca42828 100644
--- a/fs/btrfs/export.h
+++ b/fs/btrfs/export.h
@@ -19,7 +19,7 @@ struct btrfs_fid {
} __attribute__ ((packed));
struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
- u64 root_objectid, u32 generation,
+ u64 root_objectid, u64 generation,
int check_generation);
struct dentry *btrfs_get_parent(struct dentry *child);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 19d2104c0462..a28b0eafb65a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -895,6 +895,11 @@ again:
err = -ENOENT;
goto out;
} else if (WARN_ON(ret)) {
+ btrfs_print_leaf(path->nodes[0]);
+ btrfs_err(fs_info,
+"extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
+ bytenr, num_bytes, parent, root_objectid, owner,
+ offset);
err = -EIO;
goto out;
}
@@ -1238,7 +1243,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
u64 bytes_left, end;
u64 aligned_start = ALIGN(start, 1 << 9);
- if (WARN_ON(start != aligned_start)) {
+ /* Adjust the range to be aligned to 512B sectors if necessary. */
+ if (start != aligned_start) {
len -= aligned_start - start;
len = round_down(len, 1 << 9);
start = aligned_start;
@@ -1676,12 +1682,12 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
parent = ref->parent;
ref_root = ref->root;
- if (node->ref_mod != 1) {
+ if (unlikely(node->ref_mod != 1)) {
btrfs_err(trans->fs_info,
- "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
+ "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
- return -EIO;
+ return -EUCLEAN;
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
@@ -3989,8 +3995,11 @@ have_block_group:
ret = 0;
}
- if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
+ if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
+ if (!cache_block_group_error)
+ cache_block_group_error = -EIO;
goto loop;
+ }
/*
* Ok we want to try and use the cluster allocator, so
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 95ddeb477797..04788940afaf 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4024,11 +4024,12 @@ retry:
free_extent_buffer(eb);
/*
- * the filesystem may choose to bump up nr_to_write.
+ * The filesystem may choose to bump up nr_to_write.
* We have to make sure to honor the new nr_to_write
- * at any time
+ * at any time.
*/
- nr_to_write_done = wbc->nr_to_write <= 0;
+ nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
+ wbc->nr_to_write <= 0);
}
pagevec_release(&pvec);
cond_resched();
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 61b82c69eed5..1a7183cdfe95 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -499,7 +499,9 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
bytes_left), GFP_KERNEL);
memalloc_nofs_restore(nofs_flag);
- BUG_ON(!sums); /* -ENOMEM */
+ if (!sums)
+ return BLK_STS_RESOURCE;
+
sums->len = bytes_left;
ordered = btrfs_lookup_ordered_extent(inode,
offset);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d2d32fed8f2e..0cb93f73acb2 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -784,15 +784,16 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
}
spin_lock(&ctl->tree_lock);
ret = link_free_space(ctl, e);
- ctl->total_bitmaps++;
- ctl->op->recalc_thresholds(ctl);
- spin_unlock(&ctl->tree_lock);
if (ret) {
+ spin_unlock(&ctl->tree_lock);
btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
+ ctl->total_bitmaps++;
+ ctl->op->recalc_thresholds(ctl);
+ spin_unlock(&ctl->tree_lock);
list_add_tail(&e->list, &bitmaps);
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7755a0362a3a..c89e85a7da7d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6830,7 +6830,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_ino(root, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -6894,7 +6894,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_ino(root, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -7039,7 +7039,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_ino(root, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_fail;
@@ -9751,8 +9751,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* force full log commit if subvolume involved. */
btrfs_set_log_full_commit(trans);
} else {
- btrfs_pin_log_trans(root);
- root_log_pinned = true;
ret = btrfs_insert_inode_ref(trans, dest,
new_dentry->d_name.name,
new_dentry->d_name.len,
@@ -9768,8 +9766,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* force full log commit if subvolume involved. */
btrfs_set_log_full_commit(trans);
} else {
- btrfs_pin_log_trans(dest);
- dest_log_pinned = true;
ret = btrfs_insert_inode_ref(trans, root,
old_dentry->d_name.name,
old_dentry->d_name.len,
@@ -9797,6 +9793,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
BTRFS_I(new_inode), 1);
}
+ /*
+ * Now pin the logs of the roots. We do it to ensure that no other task
+ * can sync the logs while we are in progress with the rename, because
+ * that could result in an inconsistency in case any of the inodes that
+ * are part of this rename operation were logged before.
+ *
+ * We pin the logs even if at this precise moment none of the inodes was
+ * logged before. This is because right after we checked for that, some
+ * other task fsyncing some other inode not involved with this rename
+ * operation could log that one of our inodes exists.
+ *
+ * We don't need to pin the logs before the above calls to
+ * btrfs_insert_inode_ref(), since those don't ever need to change a log.
+ */
+ if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ btrfs_pin_log_trans(root);
+ root_log_pinned = true;
+ }
+ if (new_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ btrfs_pin_log_trans(dest);
+ dest_log_pinned = true;
+ }
+
/* src is a subvolume */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
@@ -9911,7 +9930,7 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
u64 objectid;
u64 index;
- ret = btrfs_find_free_ino(root, &objectid);
+ ret = btrfs_find_free_objectid(root, &objectid);
if (ret)
return ret;
@@ -10046,8 +10065,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
/* force full log commit if subvolume involved. */
btrfs_set_log_full_commit(trans);
} else {
- btrfs_pin_log_trans(root);
- log_pinned = true;
ret = btrfs_insert_inode_ref(trans, dest,
new_dentry->d_name.name,
new_dentry->d_name.len,
@@ -10071,6 +10088,25 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
} else {
+ /*
+ * Now pin the log. We do it to ensure that no other task can
+ * sync the log while we are in progress with the rename, as
+ * that could result in an inconsistency in case any of the
+ * inodes that are part of this rename operation were logged
+ * before.
+ *
+ * We pin the log even if at this precise moment none of the
+ * inodes was logged before. This is because right after we
+ * checked for that, some other task fsyncing some other inode
+ * not involved with this rename operation could log that one of
+ * our inodes exists.
+ *
+ * We don't need to pin the logs before the above call to
+ * btrfs_insert_inode_ref(), since that does not need to change
+ * a log.
+ */
+ btrfs_pin_log_trans(root);
+ log_pinned = true;
ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
BTRFS_I(d_inode(old_dentry)),
old_dentry->d_name.name,
@@ -10380,7 +10416,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_ino(root, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -10663,7 +10699,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_find_free_ino(root, &objectid);
+ ret = btrfs_find_free_objectid(root, &objectid);
if (ret)
goto out;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 675112aa998f..874a441dc8b5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1847,6 +1847,15 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
* are limited to own subvolumes only
*/
ret = -EPERM;
+ } else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) {
+ /*
+ * Snapshots must be made with the src_inode referring
+ * to the subvolume inode, otherwise the permission
+ * checking above is useless because we may have
+ * permission on a lower directory but not the subvol
+ * itself.
+ */
+ ret = -EINVAL;
} else {
ret = btrfs_mksubvol(&file->f_path, name, namelen,
BTRFS_I(src_inode)->root,
@@ -2110,7 +2119,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
static noinline int copy_to_sk(struct btrfs_path *path,
struct btrfs_key *key,
struct btrfs_ioctl_search_key *sk,
- size_t *buf_size,
+ u64 *buf_size,
char __user *ubuf,
unsigned long *sk_offset,
int *num_found)
@@ -2242,7 +2251,7 @@ out:
static noinline int search_ioctl(struct inode *inode,
struct btrfs_ioctl_search_key *sk,
- size_t *buf_size,
+ u64 *buf_size,
char __user *ubuf)
{
struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
@@ -2314,7 +2323,7 @@ static noinline int btrfs_ioctl_tree_search(struct file *file,
struct btrfs_ioctl_search_key sk;
struct inode *inode;
int ret;
- size_t buf_size;
+ u64 buf_size;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -2348,8 +2357,8 @@ static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
struct btrfs_ioctl_search_args_v2 args;
struct inode *inode;
int ret;
- size_t buf_size;
- const size_t buf_limit = SZ_16M;
+ u64 buf_size;
+ const u64 buf_limit = SZ_16M;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -2824,6 +2833,8 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
}
}
+ btrfs_free_path(path);
+ path = NULL;
if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
ret = -EFAULT;
@@ -2914,6 +2925,8 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp)
}
out:
+ btrfs_free_path(path);
+
if (!ret || ret == -EOVERFLOW) {
rootrefs->num_items = found;
/* update min_treeid for next search */
@@ -2925,7 +2938,6 @@ out:
}
kfree(rootrefs);
- btrfs_free_path(path);
return ret;
}
@@ -3090,6 +3102,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
kfree(range);
goto out;
}
+ if (range->flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
/* compression requires us to start the IO */
if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
@@ -3296,13 +3312,10 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
di_args->bytes_used = btrfs_device_get_bytes_used(dev);
di_args->total_bytes = btrfs_device_get_total_bytes(dev);
memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
- if (dev->name) {
- strncpy(di_args->path, rcu_str_deref(dev->name),
- sizeof(di_args->path) - 1);
- di_args->path[sizeof(di_args->path) - 1] = 0;
- } else {
+ if (dev->name)
+ strscpy(di_args->path, rcu_str_deref(dev->name), sizeof(di_args->path));
+ else
di_args->path[0] = '\0';
- }
out:
rcu_read_unlock();
@@ -4143,7 +4156,7 @@ static void get_block_group_info(struct list_head *groups_list,
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
void __user *arg)
{
- struct btrfs_ioctl_space_args space_args;
+ struct btrfs_ioctl_space_args space_args = { 0 };
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
struct btrfs_ioctl_space_info *dest_orig;
@@ -4339,6 +4352,11 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
if (IS_ERR(sa))
return PTR_ERR(sa);
+ if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
ret = mnt_want_write_file(file);
if (ret)
@@ -4515,6 +4533,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
ipath->fspath->val[i] = rel_ptr;
}
+ btrfs_free_path(path);
+ path = NULL;
ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
ipath->fspath, size);
if (ret) {
@@ -4585,21 +4605,20 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
size = min_t(u32, loi->size, SZ_16M);
}
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
inodes = init_data_container(size);
if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes);
- inodes = NULL;
- goto out;
+ goto out_loi;
}
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
build_ino_list, inodes, ignore_offset);
+ btrfs_free_path(path);
if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
@@ -4611,7 +4630,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
ret = -EFAULT;
out:
- btrfs_free_path(path);
kvfree(inodes);
out_loi:
kfree(loi);
@@ -4912,7 +4930,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
}
/* update qgroup status and info */
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
err = btrfs_run_qgroups(trans);
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (err < 0)
btrfs_handle_fs_error(fs_info, err,
"failed to update qgroup status and info");
@@ -4954,6 +4974,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
goto out;
}
+ if (sa->create && is_fstree(sa->qgroupid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -5508,7 +5533,7 @@ static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
if (compat) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
- struct btrfs_ioctl_send_args_32 args32;
+ struct btrfs_ioctl_send_args_32 args32 = { 0 };
ret = copy_from_user(&args32, argp, sizeof(args32));
if (ret)
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index f4edadf1067f..cec6c283a691 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -109,10 +109,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
pr_cont("shared data backref parent %llu count %u\n",
offset, btrfs_shared_data_ref_count(eb, sref));
/*
- * offset is supposed to be a tree block which
- * must be aligned to nodesize.
+ * Offset is supposed to be a tree block which must be
+ * aligned to sectorsize.
*/
- if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
+ if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
pr_info(
"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
offset, eb->fs_info->sectorsize);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5a3006c75d63..42412a4554d3 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -504,6 +504,49 @@ out:
return ret < 0 ? ret : 0;
}
+static u64 btrfs_qgroup_subvolid(u64 qgroupid)
+{
+ return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
+}
+
+/*
+ * Called in close_ctree() when quota is still enabled. This verifies we don't
+ * leak some reserved space.
+ *
+ * Return false if no reserved space is left.
+ * Return true if some reserved space is leaked.
+ */
+bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
+{
+ struct rb_node *node;
+ bool ret = false;
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return ret;
+ /*
+ * Since we're unmounting, there is no race and no need to grab qgroup
+ * lock. And here we don't go post-order to provide a more user
+ * friendly sorted result.
+ */
+ for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
+ struct btrfs_qgroup *qgroup;
+ int i;
+
+ qgroup = rb_entry(node, struct btrfs_qgroup, node);
+ for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
+ if (qgroup->rsv.values[i]) {
+ ret = true;
+ btrfs_warn(fs_info,
+ "qgroup %llu/%llu has unreleased space, type %d rsv %llu",
+ btrfs_qgroup_level(qgroup->qgroupid),
+ btrfs_qgroup_subvolid(qgroup->qgroupid),
+ i, qgroup->rsv.values[i]);
+ }
+ }
+ }
+ return ret;
+}
+
/*
* This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
* first two are in single-threaded paths.And for the third one, we have set
@@ -1075,6 +1118,21 @@ out_add_root:
fs_info->qgroup_rescan_running = true;
btrfs_queue_work(fs_info->qgroup_rescan_workers,
&fs_info->qgroup_rescan_work);
+ } else {
+ /*
+ * We have set both BTRFS_FS_QUOTA_ENABLED and
+ * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
+ * -EINPROGRESS. That can happen because someone started the
+ * rescan worker by calling quota rescan ioctl before we
+ * attempted to initialize the rescan worker. Failure due to
+ * quotas disabled in the meanwhile is not possible, because
+ * we are holding a write lock on fs_info->subvol_sem, which
+ * is also acquired when disabling quotas.
+ * Ignore such error, and any other error would need to undo
+ * everything we did in the transaction we just committed.
+ */
+ ASSERT(ret == -EINPROGRESS);
+ ret = 0;
}
out_free_path:
@@ -1106,12 +1164,23 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
int ret = 0;
/*
- * We need to have subvol_sem write locked, to prevent races between
- * concurrent tasks trying to disable quotas, because we will unlock
- * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+ * We need to have subvol_sem write locked to prevent races with
+ * snapshot creation.
*/
lockdep_assert_held_write(&fs_info->subvol_sem);
+ /*
+ * Lock the cleaner mutex to prevent races with concurrent relocation,
+ * because relocation may be building backrefs for blocks of the quota
+ * root while we are deleting the root. This is like dropping fs roots
+ * of deleted snapshots/subvolumes, we need the same protection.
+ *
+ * This also prevents races between concurrent tasks trying to disable
+ * quotas, because we will unlock and relock qgroup_ioctl_lock across
+ * BTRFS_FS_QUOTA_ENABLED changes.
+ */
+ mutex_lock(&fs_info->cleaner_mutex);
+
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
@@ -1174,7 +1243,9 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
goto out;
}
+ spin_lock(&fs_info->trans_lock);
list_del(&quota_root->dirty_list);
+ spin_unlock(&fs_info->trans_lock);
btrfs_tree_lock(quota_root->node);
btrfs_clean_tree_block(quota_root->node);
@@ -1191,6 +1262,7 @@ out:
btrfs_end_transaction(trans);
else if (trans)
ret = btrfs_end_transaction(trans);
+ mutex_unlock(&fs_info->cleaner_mutex);
return ret;
}
@@ -1323,7 +1395,6 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
u64 dst)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *quota_root;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
@@ -1339,9 +1410,8 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
return -ENOMEM;
mutex_lock(&fs_info->qgroup_ioctl_lock);
- quota_root = fs_info->quota_root;
- if (!quota_root) {
- ret = -EINVAL;
+ if (!fs_info->quota_root) {
+ ret = -ENOTCONN;
goto out;
}
member = find_qgroup_rb(fs_info, src);
@@ -1387,7 +1457,6 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
u64 dst)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *quota_root;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
@@ -1400,9 +1469,8 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
if (!tmp)
return -ENOMEM;
- quota_root = fs_info->quota_root;
- if (!quota_root) {
- ret = -EINVAL;
+ if (!fs_info->quota_root) {
+ ret = -ENOTCONN;
goto out;
}
@@ -1467,11 +1535,11 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
- quota_root = fs_info->quota_root;
- if (!quota_root) {
- ret = -EINVAL;
+ if (!fs_info->quota_root) {
+ ret = -ENOTCONN;
goto out;
}
+ quota_root = fs_info->quota_root;
qgroup = find_qgroup_rb(fs_info, qgroupid);
if (qgroup) {
ret = -EEXIST;
@@ -1493,18 +1561,25 @@ out:
return ret;
}
+static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
+{
+ return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
+ qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
+}
+
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
struct btrfs_qgroup_list *list;
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
- quota_root = fs_info->quota_root;
- if (!quota_root) {
- ret = -EINVAL;
+ if (!fs_info->quota_root) {
+ ret = -ENOTCONN;
goto out;
}
@@ -1514,6 +1589,11 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
goto out;
}
+ if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* Check if there are no children of this qgroup */
if (!list_empty(&qgroup->members)) {
ret = -EBUSY;
@@ -1545,7 +1625,6 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
int ret = 0;
/* Sometimes we would want to clear the limit on this qgroup.
@@ -1555,9 +1634,8 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
const u64 CLEAR_VALUE = -1;
mutex_lock(&fs_info->qgroup_ioctl_lock);
- quota_root = fs_info->quota_root;
- if (!quota_root) {
- ret = -EINVAL;
+ if (!fs_info->quota_root) {
+ ret = -ENOTCONN;
goto out;
}
@@ -2657,15 +2735,23 @@ cleanup:
}
/*
- * called from commit_transaction. Writes all changed qgroups to disk.
+ * Writes all changed qgroups to disk.
+ * Called by the transaction commit path and the qgroup assign ioctl.
*/
int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *quota_root = fs_info->quota_root;
int ret = 0;
- if (!quota_root)
+ /*
+ * In case we are called from the qgroup assign ioctl, assert that we
+ * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
+ * disable operation (ioctl) and access a freed quota root.
+ */
+ if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
+ lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
+
+ if (!fs_info->quota_root)
return ret;
spin_lock(&fs_info->qgroup_lock);
@@ -2809,14 +2895,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
dstgroup->rsv_excl = inherit->lim.rsv_excl;
- ret = update_qgroup_limit_item(trans, dstgroup);
- if (ret) {
- fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_info(fs_info,
- "unable to update quota limit for %llu",
- dstgroup->qgroupid);
- goto unlock;
- }
+ qgroup_dirty(fs_info, dstgroup);
}
if (srcid) {
@@ -2935,7 +3014,6 @@ static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
enum btrfs_qgroup_rsv_type type)
{
- struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
struct btrfs_fs_info *fs_info = root->fs_info;
u64 ref_root = root->root_key.objectid;
@@ -2954,8 +3032,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
enforce = false;
spin_lock(&fs_info->qgroup_lock);
- quota_root = fs_info->quota_root;
- if (!quota_root)
+ if (!fs_info->quota_root)
goto out;
qgroup = find_qgroup_rb(fs_info, ref_root);
@@ -3022,7 +3099,6 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes,
enum btrfs_qgroup_rsv_type type)
{
- struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
struct ulist_node *unode;
struct ulist_iterator uiter;
@@ -3040,8 +3116,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
}
spin_lock(&fs_info->qgroup_lock);
- quota_root = fs_info->quota_root;
- if (!quota_root)
+ if (!fs_info->quota_root)
goto out;
qgroup = find_qgroup_rb(fs_info, ref_root);
@@ -3188,7 +3263,8 @@ out:
static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
{
return btrfs_fs_closing(fs_info) ||
- test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+ test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) ||
+ !test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
}
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
@@ -3200,6 +3276,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
int err = -ENOMEM;
int ret = 0;
bool stopped = false;
+ bool did_leaf_rescans = false;
path = btrfs_alloc_path();
if (!path)
@@ -3218,11 +3295,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
err = PTR_ERR(trans);
break;
}
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
- err = -EINTR;
- } else {
- err = qgroup_rescan_leaf(trans, path);
- }
+
+ err = qgroup_rescan_leaf(trans, path);
+ did_leaf_rescans = true;
+
if (err > 0)
btrfs_commit_transaction(trans);
else
@@ -3236,22 +3312,29 @@ out:
if (err > 0 &&
fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- } else if (err < 0) {
+ } else if (err < 0 || stopped) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
}
mutex_unlock(&fs_info->qgroup_rescan_lock);
/*
- * only update status, since the previous part has already updated the
- * qgroup info.
+ * Only update status, since the previous part has already updated the
+ * qgroup info, and only if we did any actual work. This also prevents
+ * race with a concurrent quota disable, which has already set
+ * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
+ * btrfs_quota_disable().
*/
- trans = btrfs_start_transaction(fs_info->quota_root, 1);
- if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ if (did_leaf_rescans) {
+ trans = btrfs_start_transaction(fs_info->quota_root, 1);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ trans = NULL;
+ btrfs_err(fs_info,
+ "fail to start transaction for status update: %d",
+ err);
+ }
+ } else {
trans = NULL;
- btrfs_err(fs_info,
- "fail to start transaction for status update: %d",
- err);
}
mutex_lock(&fs_info->qgroup_rescan_lock);
@@ -3924,7 +4007,6 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
int num_bytes)
{
- struct btrfs_root *quota_root = fs_info->quota_root;
struct btrfs_qgroup *qgroup;
struct ulist_node *unode;
struct ulist_iterator uiter;
@@ -3932,7 +4014,7 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
if (num_bytes == 0)
return;
- if (!quota_root)
+ if (!fs_info->quota_root)
return;
spin_lock(&fs_info->qgroup_lock);
@@ -4267,4 +4349,5 @@ void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
ulist_free(entry->old_roots);
kfree(entry);
}
+ *root = RB_ROOT;
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 0a2659685ad6..94bdfb89505e 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -416,5 +416,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *eb);
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
+bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
#endif
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 7ac679ed2b6c..226a17a335da 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -334,6 +334,9 @@ static void merge_rbio(struct btrfs_raid_bio *dest,
{
bio_list_merge(&dest->bio_list, &victim->bio_list);
dest->bio_list_bytes += victim->bio_list_bytes;
+ /* Also inherit the bitmaps from @victim. */
+ bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
+ dest->stripe_npages);
dest->generic_bio_cnt += victim->generic_bio_cnt;
bio_list_init(&victim->bio_list);
}
@@ -878,6 +881,12 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
if (rbio->generic_bio_cnt)
btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
+ /*
+ * Clear the data bitmap, as the rbio may be cached for later usage.
+ * do this before before unlock_stripe() so there will be no new bio
+ * for this bio.
+ */
+ bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
/*
* At this moment, rbio->bio_list is empty, however since rbio does not
@@ -1212,6 +1221,9 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
else
BUG();
+ /* We should have at least one data sector. */
+ ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
+
/* at this point we either have a full stripe,
* or we've read the full stripe from the drive.
* recalculate the parity and write the new results.
@@ -1285,6 +1297,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *page;
+
+ /* This vertical stripe has no data, skip it. */
+ if (!test_bit(pagenr, rbio->dbitmap))
+ continue;
+
if (stripe < rbio->nr_data) {
page = page_in_rbio(rbio, stripe, pagenr, 1);
if (!page)
@@ -1309,6 +1326,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *page;
+
+ /* This vertical stripe has no data, skip it. */
+ if (!test_bit(pagenr, rbio->dbitmap))
+ continue;
+
if (stripe < rbio->nr_data) {
page = page_in_rbio(rbio, stripe, pagenr, 1);
if (!page)
@@ -1748,6 +1770,33 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
run_plug(plug);
}
+/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
+static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
+{
+ const struct btrfs_fs_info *fs_info = rbio->fs_info;
+ const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ const u64 full_stripe_start = rbio->bbio->raid_map[0];
+ const u32 orig_len = orig_bio->bi_iter.bi_size;
+ const u32 sectorsize = fs_info->sectorsize;
+ u64 cur_logical;
+
+ ASSERT(orig_logical >= full_stripe_start &&
+ orig_logical + orig_len <= full_stripe_start +
+ rbio->nr_data * rbio->stripe_len);
+
+ bio_list_add(&rbio->bio_list, orig_bio);
+ rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
+
+ /* Update the dbitmap. */
+ for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
+ cur_logical += sectorsize) {
+ int bit = ((u32)(cur_logical - full_stripe_start) >>
+ PAGE_SHIFT) % rbio->stripe_npages;
+
+ set_bit(bit, rbio->dbitmap);
+ }
+}
+
/*
* our main entry point for writes from the rest of the FS.
*/
@@ -1764,9 +1813,8 @@ int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
btrfs_put_bbio(bbio);
return PTR_ERR(rbio);
}
- bio_list_add(&rbio->bio_list, bio);
- rbio->bio_list_bytes = bio->bi_iter.bi_size;
rbio->operation = BTRFS_RBIO_WRITE;
+ rbio_add_bio(rbio, bio);
btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1;
@@ -2068,9 +2116,12 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
atomic_set(&rbio->error, 0);
/*
- * read everything that hasn't failed. Thanks to the
- * stripe cache, it is possible that some or all of these
- * pages are going to be uptodate.
+ * Read everything that hasn't failed. However this time we will
+ * not trust any cached sector.
+ * As we may read out some stale data but higher layer is not reading
+ * that stale part.
+ *
+ * So here we always re-read everything in recovery path.
*/
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
if (rbio->faila == stripe || rbio->failb == stripe) {
@@ -2079,16 +2130,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
}
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
- struct page *p;
-
- /*
- * the rmw code may have already read this
- * page in
- */
- p = rbio_stripe_page(rbio, stripe, pagenr);
- if (PageUptodate(p))
- continue;
-
ret = rbio_add_io_page(rbio, &bio_list,
rbio_stripe_page(rbio, stripe, pagenr),
stripe, pagenr, rbio->stripe_len);
@@ -2170,8 +2211,7 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
}
rbio->operation = BTRFS_RBIO_READ_REBUILD;
- bio_list_add(&rbio->bio_list, bio);
- rbio->bio_list_bytes = bio->bi_iter.bi_size;
+ rbio_add_bio(rbio, bio);
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
index a97dc74a4d3d..02f15321cecc 100644
--- a/fs/btrfs/rcu-string.h
+++ b/fs/btrfs/rcu-string.h
@@ -18,7 +18,11 @@ static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
(len * sizeof(char)), mask);
if (!ret)
return ret;
- strncpy(ret->str, src, len);
+ /* Warn if the source got unexpectedly truncated. */
+ if (WARN_ON(strscpy(ret->str, src, len) < 0)) {
+ kfree(ret);
+ return NULL;
+ }
return ret;
}
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index bbd63535965c..d59e89ef3251 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -888,8 +888,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
out_unlock:
spin_unlock(&fs_info->ref_verify_lock);
out:
- if (ret)
+ if (ret) {
+ btrfs_free_ref_cache(fs_info);
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ }
return ret;
}
@@ -1018,8 +1020,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
}
}
if (ret) {
- btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
btrfs_free_ref_cache(fs_info);
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
}
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ba68b0b41dff..e603cc8c141e 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2511,7 +2511,7 @@ again:
list_splice(&reloc_roots, &rc->reloc_roots);
if (!err)
- btrfs_commit_transaction(trans);
+ err = btrfs_commit_transaction(trans);
else
btrfs_end_transaction(trans);
return err;
@@ -4102,8 +4102,12 @@ int prepare_to_relocate(struct reloc_control *rc)
*/
return PTR_ERR(trans);
}
- btrfs_commit_transaction(trans);
- return 0;
+
+ ret = btrfs_commit_transaction(trans);
+ if (ret)
+ unset_reloc_control(rc);
+
+ return ret;
}
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
@@ -4263,7 +4267,9 @@ restart:
err = PTR_ERR(trans);
goto out_free;
}
- btrfs_commit_transaction(trans);
+ ret = btrfs_commit_transaction(trans);
+ if (ret && !err)
+ err = ret;
out_free:
ret = clean_dirty_subvols(rc);
if (ret < 0 && !err)
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 0d07ebe511e7..ba4e198811a4 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -371,9 +371,10 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
key.offset = ref_id;
again:
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- if (ret < 0)
+ if (ret < 0) {
+ err = ret;
goto out;
- if (ret == 0) {
+ } else if (ret == 0) {
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_root_ref);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index e5db948daa12..45809f75692e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3849,6 +3849,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
int ret;
struct btrfs_device *dev;
unsigned int nofs_flag;
+ bool need_commit = false;
if (btrfs_fs_closing(fs_info))
return -EAGAIN;
@@ -3961,6 +3962,12 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
*/
nofs_flag = memalloc_nofs_save();
if (!is_dev_replace) {
+ u64 old_super_errors;
+
+ spin_lock(&sctx->stat_lock);
+ old_super_errors = sctx->stat.super_errors;
+ spin_unlock(&sctx->stat_lock);
+
btrfs_info(fs_info, "scrub: started on devid %llu", devid);
/*
* by holding device list mutex, we can
@@ -3969,6 +3976,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_lock(&fs_info->fs_devices->device_list_mutex);
ret = scrub_supers(sctx, dev);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+
+ spin_lock(&sctx->stat_lock);
+ /*
+ * Super block errors found, but we can not commit transaction
+ * at current context, since btrfs_commit_transaction() needs
+ * to pause the current running scrub (hold by ourselves).
+ */
+ if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
+ need_commit = true;
+ spin_unlock(&sctx->stat_lock);
}
if (!ret)
@@ -3995,6 +4012,25 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
scrub_workers_put(fs_info);
scrub_put_ctx(sctx);
+ /*
+ * We found some super block errors before, now try to force a
+ * transaction commit, as scrub has finished.
+ */
+ if (need_commit) {
+ struct btrfs_trans_handle *trans;
+
+ trans = btrfs_start_transaction(fs_info->tree_root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ btrfs_err(fs_info,
+ "scrub: failed to start transaction to fix super block errors: %d", ret);
+ return ret;
+ }
+ ret = btrfs_commit_transaction(trans);
+ if (ret < 0)
+ btrfs_err(fs_info,
+ "scrub: failed to commit transaction to fix super block errors: %d", ret);
+ }
return ret;
out:
scrub_workers_put(fs_info);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index e258fc484cea..0dfa88ac0139 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5405,6 +5405,7 @@ static int clone_range(struct send_ctx *sctx,
u64 ext_len;
u64 clone_len;
u64 clone_data_offset;
+ bool crossed_src_i_size = false;
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(clone_root->root, path);
@@ -5461,8 +5462,10 @@ static int clone_range(struct send_ctx *sctx,
if (key.offset >= clone_src_i_size)
break;
- if (key.offset + ext_len > clone_src_i_size)
+ if (key.offset + ext_len > clone_src_i_size) {
ext_len = clone_src_i_size - key.offset;
+ crossed_src_i_size = true;
+ }
clone_data_offset = btrfs_file_extent_offset(leaf, ei);
if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
@@ -5522,6 +5525,25 @@ static int clone_range(struct send_ctx *sctx,
ret = send_clone(sctx, offset, clone_len,
clone_root);
}
+ } else if (crossed_src_i_size && clone_len < len) {
+ /*
+ * If we are at i_size of the clone source inode and we
+ * can not clone from it, terminate the loop. This is
+ * to avoid sending two write operations, one with a
+ * length matching clone_len and the final one after
+ * this loop with a length of len - clone_len.
+ *
+ * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
+ * was passed to the send ioctl), this helps avoid
+ * sending an encoded write for an offset that is not
+ * sector size aligned, in case the i_size of the source
+ * inode is not sector size aligned. That will make the
+ * receiver fallback to decompression of the data and
+ * writing it using regular buffered IO, therefore while
+ * not incorrect, it's not optimal due decompression and
+ * possible re-compression at the receiver.
+ */
+ break;
} else {
ret = send_extent_data(sctx, offset, clone_len);
}
@@ -7325,10 +7347,10 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
/*
* Check that we don't overflow at later allocations, we request
* clone_sources_count + 1 items, and compare to unsigned long inside
- * access_ok.
+ * access_ok. Also set an upper limit for allocation size so this can't
+ * easily exhaust memory. Max number of clone sources is about 200K.
*/
- if (arg->clone_sources_count >
- ULONG_MAX / sizeof(struct clone_root) - 1) {
+ if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
ret = -EINVAL;
goto out;
}
@@ -7341,7 +7363,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
}
if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
- ret = -EINVAL;
+ ret = -EOPNOTSUPP;
goto out;
}
@@ -7359,7 +7381,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
sctx->flags = arg->flags;
sctx->send_filp = fget(arg->send_fd);
- if (!sctx->send_filp) {
+ if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
ret = -EBADF;
goto out;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 1a69bdb96fb2..ea8b5b2d859d 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1567,8 +1567,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
} else {
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
btrfs_sb(s)->bdev_holder = fs_type;
- if (!strstr(crc32c_impl(), "generic"))
- set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
error = btrfs_fill_super(s, fs_devices, data);
}
if (!error)
@@ -2137,7 +2135,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* calculated f_bavail.
*/
if (!mixed && block_rsv->space_info->full &&
- total_free_meta - thresh < block_rsv->size)
+ (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 5c299e1f2297..356f2274c8eb 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -1154,8 +1154,11 @@ int __init btrfs_init_sysfs(void)
#ifdef CONFIG_BTRFS_DEBUG
ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group);
- if (ret)
- goto out2;
+ if (ret) {
+ sysfs_unmerge_group(&btrfs_kset->kobj,
+ &btrfs_static_feature_attr_group);
+ goto out_remove_group;
+ }
#endif
return 0;
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 98f9684e7ffc..8e413c82190e 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -189,7 +189,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
void btrfs_free_dummy_root(struct btrfs_root *root)
{
- if (!root)
+ if (IS_ERR_OR_NULL(root))
return;
/* Will be freed by btrfs_free_fs_roots */
if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ac035a6fa003..f6169bece7c0 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -230,21 +230,21 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FS_TREE_OBJECTID);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return ret;
+ }
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
@@ -256,31 +256,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
}
+ /* btrfs_qgroup_account_extent() always frees the ulists passed to it. */
+ old_roots = NULL;
+ new_roots = NULL;
+
if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
nodesize, nodesize)) {
test_err("qgroup counts didn't match expected values");
return -EINVAL;
}
- old_roots = NULL;
- new_roots = NULL;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
ret = remove_extent_item(root, nodesize, nodesize);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return -EINVAL;
+ }
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
@@ -331,21 +333,21 @@ static int test_multiple_refs(struct btrfs_root *root,
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FS_TREE_OBJECTID);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return ret;
+ }
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
@@ -366,21 +368,21 @@ static int test_multiple_refs(struct btrfs_root *root,
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
ret = add_tree_ref(root, nodesize, nodesize, 0,
BTRFS_FIRST_FREE_OBJECTID);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return ret;
+ }
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
@@ -407,21 +409,21 @@ static int test_multiple_refs(struct btrfs_root *root,
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
false);
if (ret) {
- ulist_free(old_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
ret = remove_extent_ref(root, nodesize, nodesize, 0,
BTRFS_FIRST_FREE_OBJECTID);
- if (ret)
+ if (ret) {
+ ulist_free(old_roots);
return ret;
+ }
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
false);
if (ret) {
ulist_free(old_roots);
- ulist_free(new_roots);
test_err("couldn't find old roots: %d", ret);
return ret;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index e6cb95b81787..89ffc0255406 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -190,10 +190,11 @@ loop:
spin_unlock(&fs_info->trans_lock);
/*
- * If we are ATTACH, we just want to catch the current transaction,
- * and commit it. If there is no transaction, just return ENOENT.
+ * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
+ * current transaction, and commit it. If there is no transaction, just
+ * return ENOENT.
*/
- if (type == TRANS_ATTACH)
+ if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
return -ENOENT;
/*
@@ -706,8 +707,13 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH, true);
- if (trans == ERR_PTR(-ENOENT))
- btrfs_wait_for_commit(root->fs_info, 0);
+ if (trans == ERR_PTR(-ENOENT)) {
+ int ret;
+
+ ret = btrfs_wait_for_commit(root->fs_info, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
return trans;
}
@@ -771,6 +777,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
}
wait_for_commit(cur_trans);
+ ret = cur_trans->aborted;
btrfs_put_transaction(cur_trans);
out:
return ret;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 368c43c6cbd0..597362eaf300 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1019,7 +1019,8 @@ static void extent_err(const struct extent_buffer *eb, int slot,
}
static int check_extent_item(struct extent_buffer *leaf,
- struct btrfs_key *key, int slot)
+ struct btrfs_key *key, int slot,
+ struct btrfs_key *prev_key)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei;
@@ -1164,7 +1165,7 @@ static int check_extent_item(struct extent_buffer *leaf,
if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) {
extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
- ptr, inline_type, end);
+ ptr, btrfs_extent_inline_ref_size(inline_type), end);
return -EUCLEAN;
}
@@ -1230,6 +1231,26 @@ static int check_extent_item(struct extent_buffer *leaf,
total_refs, inline_refs);
return -EUCLEAN;
}
+
+ if ((prev_key->type == BTRFS_EXTENT_ITEM_KEY) ||
+ (prev_key->type == BTRFS_METADATA_ITEM_KEY)) {
+ u64 prev_end = prev_key->objectid;
+
+ if (prev_key->type == BTRFS_METADATA_ITEM_KEY)
+ prev_end += fs_info->nodesize;
+ else
+ prev_end += prev_key->offset;
+
+ if (unlikely(prev_end > key->objectid)) {
+ extent_err(leaf, slot,
+ "previous extent [%llu %u %llu] overlaps current extent [%llu %u %llu]",
+ prev_key->objectid, prev_key->type,
+ prev_key->offset, key->objectid, key->type,
+ key->offset);
+ return -EUCLEAN;
+ }
+ }
+
return 0;
}
@@ -1343,7 +1364,7 @@ static int check_leaf_item(struct extent_buffer *leaf,
break;
case BTRFS_EXTENT_ITEM_KEY:
case BTRFS_METADATA_ITEM_KEY:
- ret = check_extent_item(leaf, key, slot);
+ ret = check_extent_item(leaf, key, slot, prev_key);
break;
case BTRFS_TREE_BLOCK_REF_KEY:
case BTRFS_SHARED_DATA_REF_KEY:
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index b7bfecfc2ea3..f75333d7b78a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -918,8 +918,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
index, name, name_len, 0);
if (IS_ERR(di)) {
- if (PTR_ERR(di) != -ENOENT)
- ret = PTR_ERR(di);
+ ret = PTR_ERR(di);
goto out;
} else if (di) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
@@ -1100,7 +1099,9 @@ again:
extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
inode_objectid, parent_objectid, 0,
0);
- if (!IS_ERR_OR_NULL(extref)) {
+ if (IS_ERR(extref)) {
+ return PTR_ERR(extref);
+ } else if (extref) {
u32 item_size;
u32 cur_offset = 0;
unsigned long base;
@@ -1169,8 +1170,7 @@ next:
di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
ref_index, name, namelen, 0);
if (IS_ERR(di)) {
- if (PTR_ERR(di) != -ENOENT)
- return PTR_ERR(di);
+ return PTR_ERR(di);
} else if (di) {
ret = drop_one_dir_item(trans, root, path, dir, di);
if (ret)
@@ -2020,9 +2020,6 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
goto out;
}
- if (dst_di == ERR_PTR(-ENOENT))
- dst_di = NULL;
-
if (IS_ERR(dst_di)) {
ret = PTR_ERR(dst_di);
goto out;
@@ -2307,7 +2304,7 @@ again:
dir_key->offset,
name, name_len, 0);
}
- if (!log_di || log_di == ERR_PTR(-ENOENT)) {
+ if (!log_di) {
btrfs_dir_item_key_to_cpu(eb, di, &location);
btrfs_release_path(path);
btrfs_release_path(log_path);
@@ -3520,8 +3517,7 @@ out_unlock:
if (err == -ENOSPC) {
btrfs_set_log_full_commit(trans);
err = 0;
- } else if (err < 0 && err != -ENOENT) {
- /* ENOENT can be returned if the entry hasn't been fsynced yet */
+ } else if (err < 0) {
btrfs_abort_transaction(trans, err);
}
@@ -4287,7 +4283,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
int slot;
int ins_nr = 0;
- int start_slot;
+ int start_slot = 0;
int ret;
if (!(inode->flags & BTRFS_INODE_PREALLOC))
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c7706a769de1..e11c8da9a560 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -354,6 +354,7 @@ void btrfs_free_device(struct btrfs_device *device)
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
+
WARN_ON(fs_devices->opened);
while (!list_empty(&fs_devices->devices)) {
device = list_entry(fs_devices->devices.next,
@@ -713,15 +714,47 @@ static void pending_bios_fn(struct btrfs_work *work)
run_scheduled_bios(device);
}
-static bool device_path_matched(const char *path, struct btrfs_device *device)
+/*
+ * Check if the device in the path matches the device in the given struct device.
+ *
+ * Returns:
+ * true If it is the same device.
+ * false If it is not the same device or on error.
+ */
+static bool device_matched(const struct btrfs_device *device, const char *path)
{
- int found;
+ char *device_name;
+ struct block_device *bdev_old;
+ struct block_device *bdev_new;
+
+ /*
+ * If we are looking for a device with the matching dev_t, then skip
+ * device without a name (a missing device).
+ */
+ if (!device->name)
+ return false;
+
+ device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
+ if (!device_name)
+ return false;
rcu_read_lock();
- found = strcmp(rcu_str_deref(device->name), path);
+ scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name));
rcu_read_unlock();
- return found == 0;
+ bdev_old = lookup_bdev(device_name);
+ kfree(device_name);
+ if (IS_ERR(bdev_old))
+ return false;
+
+ bdev_new = lookup_bdev(path);
+ if (IS_ERR(bdev_new))
+ return false;
+
+ if (bdev_old == bdev_new)
+ return true;
+
+ return false;
}
/*
@@ -754,9 +787,7 @@ static int btrfs_free_stale_devices(const char *path,
&fs_devices->devices, dev_list) {
if (skip_device && skip_device == device)
continue;
- if (path && !device->name)
- continue;
- if (path && !device_path_matched(path, device))
+ if (path && !device_matched(device, path))
continue;
if (fs_devices->opened) {
/* for an already deleted device return 0 */
@@ -864,6 +895,14 @@ error_brelse:
return -EINVAL;
}
+u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
+{
+ bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
+
+ return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
+}
+
/*
* Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
* being created with a disk that has already completed its fsid change.
@@ -1371,6 +1410,17 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
if (!fs_devices->opened) {
seed_devices = fs_devices->seed;
fs_devices->seed = NULL;
+
+ /*
+ * If the struct btrfs_fs_devices is not assembled with any
+ * other device, it can be re-initialized during the next mount
+ * without the needing device-scan step. Therefore, it can be
+ * fully freed.
+ */
+ if (fs_devices->num_devices == 1) {
+ list_del(&fs_devices->fs_list);
+ free_fs_devices(fs_devices);
+ }
}
mutex_unlock(&uuid_mutex);
@@ -1537,8 +1587,17 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
* later supers, using BTRFS_SUPER_MIRROR_MAX instead
*/
bytenr = btrfs_sb_offset(0);
- flags |= FMODE_EXCL;
+ /*
+ * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
+ * initiate the device scan which may race with the user's mount
+ * or mkfs command, resulting in failure.
+ * Since the device scan is solely for reading purposes, there is
+ * no need for FMODE_EXCL. Additionally, the devices are read again
+ * during the mount process. It is ok to get some inconsistent
+ * values temporarily, as the device paths of the fsid are the only
+ * required information for assembling the volume.
+ */
bdev = blkdev_get_by_path(path, flags, holder);
if (IS_ERR(bdev))
return ERR_CAST(bdev);
@@ -1671,7 +1730,7 @@ again:
goto out;
}
- while (1) {
+ while (search_start < search_end) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
@@ -1694,6 +1753,9 @@ again:
if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next;
+ if (key.offset > search_end)
+ break;
+
if (key.offset > search_start) {
hole_size = key.offset - search_start;
@@ -1764,6 +1826,7 @@ next:
else
ret = 0;
+ ASSERT(max_hole_start + max_hole_size <= search_end);
out:
btrfs_free_path(path);
*start = max_hole_start;
@@ -3027,15 +3090,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
read_unlock(&em_tree->lock);
if (!em) {
- btrfs_crit(fs_info, "unable to find logical %llu length %llu",
+ btrfs_crit(fs_info,
+ "unable to find chunk map for logical %llu length %llu",
logical, length);
return ERR_PTR(-EINVAL);
}
- if (em->start > logical || em->start + em->len < logical) {
+ if (em->start > logical || em->start + em->len <= logical) {
btrfs_crit(fs_info,
- "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
- logical, length, em->start, em->start + em->len);
+ "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
+ logical, logical + length, em->start, em->start + em->len);
free_extent_map(em);
return ERR_PTR(-EINVAL);
}
@@ -4503,8 +4567,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
}
}
- BUG_ON(fs_info->balance_ctl ||
- test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
+ ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
atomic_dec(&fs_info->balance_cancel_req);
mutex_unlock(&fs_info->balance_mutex);
return 0;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index aa6a6d7b2978..762c0a375498 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -581,4 +581,7 @@ int btrfs_bg_type_to_factor(u64 flags);
const char *btrfs_bg_type_to_raid_name(u64 flags);
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
+bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
+u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb);
+
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 48858510739b..cd7ddf24157a 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -387,6 +387,9 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
const char *name, const void *buffer,
size_t size, int flags)
{
+ if (btrfs_root_readonly(BTRFS_I(inode)->root))
+ return -EROFS;
+
name = xattr_full_name(handler, name);
return btrfs_setxattr_trans(inode, name, buffer, size, flags);
}
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index df1aace5df50..9385d0bb276d 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -74,7 +74,7 @@ static struct list_head *zlib_alloc_workspace(unsigned int level)
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
- workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
+ workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL);
workspace->level = level;
workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!workspace->strm.workspace || !workspace->buf)
diff --git a/fs/buffer.c b/fs/buffer.c
index 0d7bd7712076..4ec88d08d04e 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2318,7 +2318,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
- void *fsdata;
+ void *fsdata = NULL;
int err;
err = inode_newsize_ok(inode, size);
@@ -2344,7 +2344,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
unsigned int blocksize = i_blocksize(inode);
struct page *page;
- void *fsdata;
+ void *fsdata = NULL;
pgoff_t index, curidx;
loff_t curpos;
unsigned zerofrom, offset, len;
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index dfb14dbddf51..3b39552c2365 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -245,6 +245,8 @@ error_open_root:
kmem_cache_free(cachefiles_object_jar, fsdef);
error_root_object:
cachefiles_end_secure(cache, saved_cred);
+ put_cred(cache->cache_cred);
+ cache->cache_cred = NULL;
pr_err("Failed to register: %d\n", ret);
return ret;
}
@@ -265,6 +267,7 @@ void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
dput(cache->graveyard);
mntput(cache->mnt);
+ put_cred(cache->cache_cred);
kfree(cache->rootdirname);
kfree(cache->secctx);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 06e9b26bf277..45b8f6741f8d 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1558,6 +1558,7 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
struct inode *inode = &ci->vfs_inode;
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_mds_session *session = NULL;
+ bool need_put = false;
int mds;
dout("ceph_flush_snaps %p\n", inode);
@@ -1609,8 +1610,13 @@ out:
}
/* we flushed them all; remove this inode from the queue */
spin_lock(&mdsc->snap_flush_lock);
+ if (!list_empty(&ci->i_snap_flush_item))
+ need_put = true;
list_del_init(&ci->i_snap_flush_item);
spin_unlock(&mdsc->snap_flush_lock);
+
+ if (need_put)
+ iput(inode);
}
/*
@@ -2784,7 +2790,19 @@ int ceph_get_caps(struct file *filp, int need, int want,
if (ret == -EAGAIN)
continue;
if (!ret) {
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct cap_wait cw;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ cw.ino = inode->i_ino;
+ cw.tgid = current->tgid;
+ cw.need = need;
+ cw.want = want;
+
+ spin_lock(&mdsc->caps_list_lock);
+ list_add(&cw.list, &mdsc->cap_wait_list);
+ spin_unlock(&mdsc->caps_list_lock);
+
add_wait_queue(&ci->i_cap_wq, &wait);
flags |= NON_BLOCKING;
@@ -2798,6 +2816,11 @@ int ceph_get_caps(struct file *filp, int need, int want,
}
remove_wait_queue(&ci->i_cap_wq, &wait);
+
+ spin_lock(&mdsc->caps_list_lock);
+ list_del(&cw.list);
+ spin_unlock(&mdsc->caps_list_lock);
+
if (ret == -EAGAIN)
continue;
}
@@ -3334,6 +3357,15 @@ static void handle_cap_grant(struct inode *inode,
}
BUG_ON(cap->issued & ~cap->implemented);
+ /* don't let check_caps skip sending a response to MDS for revoke msgs */
+ if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
+ cap->mds_wanted = 0;
+ if (cap == ci->i_auth_cap)
+ check_caps = 1; /* check auth cap only */
+ else
+ check_caps = 2; /* check all caps */
+ }
+
if (extra_info->inline_version > 0 &&
extra_info->inline_version >= ci->i_inline_version) {
ci->i_inline_version = extra_info->inline_version;
@@ -4272,12 +4304,14 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
struct inode *dir,
int mds, int drop, int unless)
{
- struct dentry *parent = NULL;
struct ceph_mds_request_release *rel = *p;
struct ceph_dentry_info *di = ceph_dentry(dentry);
int force = 0;
int ret;
+ /* This shouldn't happen */
+ BUG_ON(!dir);
+
/*
* force an record for the directory caps if we have a dentry lease.
* this is racy (can't take i_ceph_lock and d_lock together), but it
@@ -4287,14 +4321,9 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
spin_lock(&dentry->d_lock);
if (di->lease_session && di->lease_session->s_mds == mds)
force = 1;
- if (!dir) {
- parent = dget(dentry->d_parent);
- dir = d_inode(parent);
- }
spin_unlock(&dentry->d_lock);
ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
- dput(parent);
spin_lock(&dentry->d_lock);
if (ret && di->lease_session && di->lease_session->s_mds == mds) {
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index facb387c2735..c281f32b54f7 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -139,6 +139,7 @@ static int caps_show(struct seq_file *s, void *p)
struct ceph_fs_client *fsc = s->private;
struct ceph_mds_client *mdsc = fsc->mdsc;
int total, avail, used, reserved, min, i;
+ struct cap_wait *cw;
ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
seq_printf(s, "total\t\t%d\n"
@@ -166,6 +167,18 @@ static int caps_show(struct seq_file *s, void *p)
}
mutex_unlock(&mdsc->mutex);
+ seq_printf(s, "\n\nWaiters:\n--------\n");
+ seq_printf(s, "tgid ino need want\n");
+ seq_printf(s, "-----------------------------------------------------\n");
+
+ spin_lock(&mdsc->caps_list_lock);
+ list_for_each_entry(cw, &mdsc->cap_wait_list, list) {
+ seq_printf(s, "%-13d0x%-17lx%-17s%-17s\n", cw->tgid, cw->ino,
+ ceph_cap_string(cw->need),
+ ceph_cap_string(cw->want));
+ }
+ spin_unlock(&mdsc->caps_list_lock);
+
return 0;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index aa1eac6d89f2..83122fc5f813 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -452,6 +452,12 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
if (dentry->d_name.len > NAME_MAX)
return -ENAMETOOLONG;
+ /*
+ * Do not truncate the file, since atomic_open is called before the
+ * permission check. The caller will do the truncation afterward.
+ */
+ flags &= ~O_TRUNC;
+
if (flags & O_CREAT) {
if (ceph_quota_is_max_files_exceeded(dir))
return -EDQUOT;
@@ -490,9 +496,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
req->r_parent = dir;
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
- err = ceph_mdsc_do_request(mdsc,
- (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
- req);
+ err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
err = ceph_handle_snapdir(req, dentry, err);
if (err)
goto out_req;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index af85a7237604..a08ddd4e26d9 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -619,9 +619,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
ci->i_truncate_seq = truncate_seq;
/* the MDS should have revoked these caps */
- WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
- CEPH_CAP_FILE_RD |
- CEPH_CAP_FILE_WR |
+ WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
CEPH_CAP_FILE_LAZYIO));
/*
* If we hold relevant caps, or in the case where we're
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 37fb71797b34..f7acf9680c9b 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3151,6 +3151,12 @@ static void handle_session(struct ceph_mds_session *session,
break;
case CEPH_SESSION_FLUSHMSG:
+ /* flush cap releases */
+ spin_lock(&session->s_cap_lock);
+ if (session->s_num_cap_releases)
+ ceph_flush_cap_releases(mdsc, session);
+ spin_unlock(&session->s_cap_lock);
+
send_flushmsg_ack(mdsc, session, seq);
break;
@@ -4068,7 +4074,7 @@ static void delayed_work(struct work_struct *work)
dout("mdsc delayed_work\n");
- if (mdsc->stopping)
+ if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
return;
mutex_lock(&mdsc->mutex);
@@ -4168,6 +4174,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
mdsc->last_renew_caps = jiffies;
INIT_LIST_HEAD(&mdsc->cap_delay_list);
+ INIT_LIST_HEAD(&mdsc->cap_wait_list);
spin_lock_init(&mdsc->cap_delay_lock);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
@@ -4239,7 +4246,7 @@ static void wait_requests(struct ceph_mds_client *mdsc)
void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
{
dout("pre_umount\n");
- mdsc->stopping = 1;
+ mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
lock_unlock_sessions(mdsc);
ceph_flush_dirty_caps(mdsc);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 5cd131b41d84..4fbbc33023c9 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -340,6 +340,19 @@ struct ceph_quotarealm_inode {
struct inode *inode;
};
+struct cap_wait {
+ struct list_head list;
+ unsigned long ino;
+ pid_t tgid;
+ int need;
+ int want;
+};
+
+enum {
+ CEPH_MDSC_STOPPING_BEGIN = 1,
+ CEPH_MDSC_STOPPING_FLUSHED = 2,
+};
+
/*
* mds client state
*/
@@ -416,6 +429,7 @@ struct ceph_mds_client {
spinlock_t caps_list_lock;
struct list_head caps_list; /* unused (reserved or
unreserved) */
+ struct list_head cap_wait_list;
int caps_total_count; /* total caps allocated */
int caps_use_count; /* in use */
int caps_use_max; /* max used caps */
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e1b9b224fcb2..4df068bdba68 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -644,8 +644,10 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
- if (list_empty(&ci->i_snap_flush_item))
+ if (list_empty(&ci->i_snap_flush_item)) {
+ ihold(inode);
list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+ }
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}
@@ -694,9 +696,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
struct ceph_mds_snap_realm *ri; /* encoded */
__le64 *snaps; /* encoded */
__le64 *prior_parent_snaps; /* encoded */
- struct ceph_snap_realm *realm = NULL;
+ struct ceph_snap_realm *realm;
struct ceph_snap_realm *first_realm = NULL;
- int invalidate = 0;
+ struct ceph_snap_realm *realm_to_rebuild = NULL;
+ int rebuild_snapcs;
int err = -ENOMEM;
LIST_HEAD(dirty_realms);
@@ -704,6 +707,8 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
dout("update_snap_trace deletion=%d\n", deletion);
more:
+ realm = NULL;
+ rebuild_snapcs = 0;
ceph_decode_need(&p, e, sizeof(*ri), bad);
ri = p;
p += sizeof(*ri);
@@ -727,7 +732,7 @@ more:
err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
if (err < 0)
goto fail;
- invalidate += err;
+ rebuild_snapcs += err;
if (le64_to_cpu(ri->seq) > realm->seq) {
dout("update_snap_trace updating %llx %p %lld -> %lld\n",
@@ -752,22 +757,30 @@ more:
if (realm->seq > mdsc->last_snap_seq)
mdsc->last_snap_seq = realm->seq;
- invalidate = 1;
+ rebuild_snapcs = 1;
} else if (!realm->cached_context) {
dout("update_snap_trace %llx %p seq %lld new\n",
realm->ino, realm, realm->seq);
- invalidate = 1;
+ rebuild_snapcs = 1;
} else {
dout("update_snap_trace %llx %p seq %lld unchanged\n",
realm->ino, realm, realm->seq);
}
- dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
- realm, invalidate, p, e);
+ dout("done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
+ realm, rebuild_snapcs, p, e);
+
+ /*
+ * this will always track the uppest parent realm from which
+ * we need to rebuild the snapshot contexts _downward_ in
+ * hierarchy.
+ */
+ if (rebuild_snapcs)
+ realm_to_rebuild = realm;
- /* invalidate when we reach the _end_ (root) of the trace */
- if (invalidate && p >= e)
- rebuild_snap_realms(realm, &dirty_realms);
+ /* rebuild_snapcs when we reach the _end_ (root) of the trace */
+ if (realm_to_rebuild && p >= e)
+ rebuild_snap_realms(realm_to_rebuild, &dirty_realms);
if (!first_realm)
first_realm = realm;
@@ -994,6 +1007,19 @@ skip_inode:
continue;
adjust_snap_realm_parent(mdsc, child, realm->ino);
}
+ } else {
+ /*
+ * In the non-split case both 'num_split_inos' and
+ * 'num_split_realms' should be 0, making this a no-op.
+ * However the MDS happens to populate 'split_realms' list
+ * in one of the UPDATE op cases by mistake.
+ *
+ * Skip both lists just in case to ensure that 'p' is
+ * positioned at the start of realm info, as expected by
+ * ceph_update_snap_trace().
+ */
+ p += sizeof(u64) * num_split_inos;
+ p += sizeof(u64) * num_split_realms;
}
/*
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index d40658d5e808..0e38678d5add 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -1174,14 +1174,23 @@ out_final:
static void ceph_kill_sb(struct super_block *s)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
- dev_t dev = s->s_dev;
dout("kill_sb %p\n", s);
ceph_mdsc_pre_umount(fsc->mdsc);
flush_fs_workqueues(fsc);
- generic_shutdown_super(s);
+ /*
+ * Though the kill_anon_super() will finally trigger the
+ * sync_filesystem() anyway, we still need to do it here
+ * and then bump the stage of shutdown to stop the work
+ * queue as earlier as possible.
+ */
+ sync_filesystem(s);
+
+ fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
+
+ kill_anon_super(s);
fsc->client->extra_mon_dispatch = NULL;
ceph_fs_debugfs_cleanup(fsc);
@@ -1189,7 +1198,6 @@ static void ceph_kill_sb(struct super_block *s)
ceph_fscache_unregister_fs(fsc);
destroy_fs_client(fsc);
- free_anon_bdev(dev);
}
static struct file_system_type ceph_fs_type = {
diff --git a/fs/char_dev.c b/fs/char_dev.c
index c5e6eff5a381..36479b72d278 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -544,7 +544,7 @@ int cdev_device_add(struct cdev *cdev, struct device *dev)
}
rc = device_add(dev);
- if (rc)
+ if (rc && dev->devt)
cdev_del(cdev);
return rc;
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 7f01c6e60791..6eb65988321f 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -76,8 +76,8 @@ struct key_type cifs_spnego_key_type = {
* strlen(";sec=ntlmsspi") */
#define MAX_MECH_STR_LEN 13
-/* strlen of "host=" */
-#define HOST_KEY_LEN 5
+/* strlen of ";host=" */
+#define HOST_KEY_LEN 6
/* strlen of ";ip4=" or ";ip6=" */
#define IP_KEY_LEN 5
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 1f55072aa302..1766d6d077f2 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1056,7 +1056,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
struct cifs_ntsd *pntsd = NULL;
int oplock = 0;
unsigned int xid;
- int rc, create_options = 0;
+ int rc;
struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct cifs_fid fid;
@@ -1068,13 +1068,10 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = READ_CONTROL;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
@@ -1119,7 +1116,7 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
{
int oplock = 0;
unsigned int xid;
- int rc, access_flags, create_options = 0;
+ int rc, access_flags;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
@@ -1132,9 +1129,6 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
access_flags = WRITE_OWNER;
else
@@ -1143,7 +1137,7 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = access_flags;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 79a18692b84c..12f632287d16 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -275,7 +275,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_ffree = 0; /* unlimited */
if (server->ops->queryfs)
- rc = server->ops->queryfs(xid, tcon, buf);
+ rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
free_xid(xid);
return rc;
@@ -609,9 +609,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",echo_interval=%lu",
tcon->ses->server->echo_interval / HZ);
- /* Only display max_credits if it was overridden on mount */
+ /* Only display the following if overridden on mount */
if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
+ if (tcon->ses->server->tcp_nodelay)
+ seq_puts(s, ",tcpnodelay");
+ if (tcon->ses->server->noautotune)
+ seq_puts(s, ",noautotune");
+ if (tcon->ses->server->noblocksnd)
+ seq_puts(s, ",noblocksend");
if (tcon->snapshot_time)
seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
@@ -732,11 +738,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
struct inode *dir = d_inode(dentry);
struct dentry *child;
- if (!dir) {
- dput(dentry);
- dentry = ERR_PTR(-ENOENT);
- break;
- }
if (!S_ISDIR(dir->i_mode)) {
dput(dentry);
dentry = ERR_PTR(-ENOTDIR);
@@ -753,7 +754,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
while (*s && *s != sep)
s++;
- child = lookup_one_len_unlocked(p, dentry, s - p);
+ child = lookup_positive_unlocked(p, dentry, s - p);
dput(dentry);
dentry = child;
} while (!IS_ERR(dentry));
@@ -1061,6 +1062,7 @@ const struct inode_operations cifs_file_inode_ops = {
const struct inode_operations cifs_symlink_inode_ops = {
.get_link = cifs_get_link,
+ .setattr = cifs_setattr,
.permission = cifs_permission,
.listxattr = cifs_listxattr,
};
@@ -1077,7 +1079,9 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
unsigned int xid;
int rc;
- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+ if (remap_flags & REMAP_FILE_DEDUP)
+ return -EOPNOTSUPP;
+ if (remap_flags & ~REMAP_FILE_ADVISORY)
return -EINVAL;
cifs_dbg(FYI, "clone range\n");
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index bc4ca94137f2..7b155e19df4e 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -125,7 +125,10 @@ extern const struct dentry_operations cifs_ci_dentry_ops;
#ifdef CONFIG_CIFS_DFS_UPCALL
extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
#else
-#define cifs_dfs_d_automount NULL
+static inline struct vfsmount *cifs_dfs_d_automount(struct path *path)
+{
+ return ERR_PTR(-EREMOTE);
+}
#endif
/* Functions related to symlinks */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 414936989255..253321adc266 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -22,6 +22,8 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/mm.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
#include "cifs_fs_sb.h"
@@ -30,6 +32,7 @@
#include <linux/scatterlist.h>
#include <uapi/linux/cifs/cifs_mount.h>
#include "smb2pdu.h"
+#include "smb2glob.h"
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
@@ -297,7 +300,8 @@ struct smb_version_operations {
const char *, struct dfs_info3_param **,
unsigned int *, const struct nls_table *, int);
/* informational QFS call */
- void (*qfs_tcon)(const unsigned int, struct cifs_tcon *);
+ void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
+ struct cifs_sb_info *);
/* check if a path is accessible or not */
int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const char *);
@@ -405,7 +409,7 @@ struct smb_version_operations {
struct cifsInodeInfo *);
/* query remote filesystem */
int (*queryfs)(const unsigned int, struct cifs_tcon *,
- struct kstatfs *);
+ struct cifs_sb_info *, struct kstatfs *);
/* send mandatory brlock to the server */
int (*mand_lock)(const unsigned int, struct cifsFileInfo *, __u64,
__u64, __u32, int, int, bool);
@@ -486,6 +490,7 @@ struct smb_version_operations {
/* ioctl passthrough for query_info */
int (*ioctl_query_info)(const unsigned int xid,
struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
__le16 *path, int is_dir,
unsigned long p);
/* make unix special files (block, char, fifo, socket) */
@@ -1955,4 +1960,71 @@ extern struct smb_version_values smb302_values;
#define ALT_SMB311_VERSION_STRING "3.11"
extern struct smb_version_operations smb311_operations;
extern struct smb_version_values smb311_values;
+
+static inline unsigned int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ int num_rqst,
+ const u8 *sig)
+{
+ unsigned int len, skip;
+ unsigned int nents = 0;
+ unsigned long addr;
+ int i, j;
+
+ /* Assumes the first rqst has a transform header as the first iov.
+ * I.e.
+ * rqst[0].rq_iov[0] is transform header
+ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+ */
+ for (i = 0; i < num_rqst; i++) {
+ /*
+ * The first rqst has a transform header where the
+ * first 20 bytes are not part of the encrypted blob.
+ */
+ for (j = 0; j < rqst[i].rq_nvec; j++) {
+ struct kvec *iov = &rqst[i].rq_iov[j];
+
+ skip = (i == 0) && (j == 0) ? 20 : 0;
+ addr = (unsigned long)iov->iov_base + skip;
+ if (unlikely(is_vmalloc_addr((void *)addr))) {
+ len = iov->iov_len - skip;
+ nents += DIV_ROUND_UP(offset_in_page(addr) + len,
+ PAGE_SIZE);
+ } else {
+ nents++;
+ }
+ }
+ nents += rqst[i].rq_npages;
+ }
+ nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
+ return nents;
+}
+
+/* We can not use the normal sg_set_buf() as we will sometimes pass a
+ * stack object as buf.
+ */
+static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg,
+ const void *buf,
+ unsigned int buflen)
+{
+ unsigned long addr = (unsigned long)buf;
+ unsigned int off = offset_in_page(addr);
+
+ addr &= PAGE_MASK;
+ if (unlikely(is_vmalloc_addr((void *)addr))) {
+ do {
+ unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off);
+
+ sg_set_page(sg++, vmalloc_to_page((void *)addr), len, off);
+
+ off = 0;
+ addr += PAGE_SIZE;
+ buflen -= len;
+ } while (buflen);
+ } else {
+ sg_set_page(sg++, virt_to_page(addr), buflen, off);
+ }
+ return sg;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f18da99a6b55..2dde83a96968 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -583,8 +583,8 @@ int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
struct sdesc **sdesc);
void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
-extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
- unsigned int *len, unsigned int *offset);
+void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
+ unsigned int *len, unsigned int *offset);
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
int copy_path_name(char *dst, const char *src);
@@ -600,4 +600,12 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
}
#endif
+static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
+{
+ if (cifs_sb && (backup_cred(cifs_sb)))
+ return options | CREATE_OPEN_BACKUP_INTENT;
+ else
+ return options;
+}
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4b8632eda2bd..f924f05b1829 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4933,8 +4933,13 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
return -ENODEV;
getDFSRetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB,
- (void **) &pSMBr);
+ /*
+ * Use smb_init_no_reconnect() instead of smb_init() as
+ * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus
+ * causing an infinite recursion.
+ */
+ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc,
+ (void **)&pSMB, (void **)&pSMBr);
if (rc)
return rc;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 86bdebd2ece6..d8d9d9061544 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -469,8 +469,7 @@ static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
}
static inline int reconn_setup_dfs_targets(struct cifs_sb_info *cifs_sb,
- struct dfs_cache_tgt_list *tl,
- struct dfs_cache_tgt_iterator **it)
+ struct dfs_cache_tgt_list *tl)
{
if (!cifs_sb->origin_fullpath)
return -EOPNOTSUPP;
@@ -514,11 +513,13 @@ cifs_reconnect(struct TCP_Server_Info *server)
sb = NULL;
} else {
cifs_sb = CIFS_SB(sb);
-
- rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it);
- if (rc && (rc != -EOPNOTSUPP)) {
- cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
- __func__);
+ rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list);
+ if (rc) {
+ cifs_sb = NULL;
+ if (rc != -EOPNOTSUPP) {
+ cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
+ __func__);
+ }
} else {
server->nr_targets = dfs_cache_get_nr_tgts(&tgt_list);
}
@@ -791,9 +792,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
int length = 0;
int total_read;
- smb_msg->msg_control = NULL;
- smb_msg->msg_controllen = 0;
-
for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
try_to_freeze();
@@ -844,7 +842,7 @@ int
cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read)
{
- struct msghdr smb_msg;
+ struct msghdr smb_msg = {};
struct kvec iov = {.iov_base = buf, .iov_len = to_read};
iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
@@ -855,7 +853,7 @@ int
cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
unsigned int page_offset, unsigned int to_read)
{
- struct msghdr smb_msg;
+ struct msghdr smb_msg = {};
struct bio_vec bv = {
.bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
@@ -3238,7 +3236,7 @@ cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
- int rc = -ENOMEM;
+ int rc = 0;
unsigned int xid;
struct cifs_ses *ses;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
@@ -3280,6 +3278,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
return ses;
}
+ rc = -ENOMEM;
+
cifs_dbg(FYI, "Existing smb sess not found\n");
ses = sesInfoAlloc();
if (ses == NULL)
@@ -4320,7 +4320,7 @@ static int mount_get_conns(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
/* do not care if a following call succeed - informational */
if (!tcon->pipe && server->ops->qfs_tcon) {
- server->ops->qfs_tcon(*xid, tcon);
+ server->ops->qfs_tcon(*xid, tcon, cifs_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
if (tcon->fsDevInfo.DeviceCharacteristics &
cpu_to_le32(FILE_READ_ONLY_DEVICE))
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index cf6cec59696c..cf9267cf42e7 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -22,67 +22,69 @@
#include "dfs_cache.h"
-#define DFS_CACHE_HTABLE_SIZE 32
-#define DFS_CACHE_MAX_ENTRIES 64
+#define CACHE_HTABLE_SIZE 32
+#define CACHE_MAX_ENTRIES 64
#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
DFSREF_STORAGE_SERVER))
-struct dfs_cache_tgt {
- char *t_name;
- struct list_head t_list;
+struct cache_dfs_tgt {
+ char *name;
+ struct list_head list;
};
-struct dfs_cache_entry {
- struct hlist_node ce_hlist;
- const char *ce_path;
- int ce_ttl;
- int ce_srvtype;
- int ce_flags;
- struct timespec64 ce_etime;
- int ce_path_consumed;
- int ce_numtgts;
- struct list_head ce_tlist;
- struct dfs_cache_tgt *ce_tgthint;
- struct rcu_head ce_rcu;
+struct cache_entry {
+ struct hlist_node hlist;
+ const char *path;
+ int ttl;
+ int srvtype;
+ int flags;
+ struct timespec64 etime;
+ int path_consumed;
+ int numtgts;
+ struct list_head tlist;
+ struct cache_dfs_tgt *tgthint;
+ struct rcu_head rcu;
};
-static struct kmem_cache *dfs_cache_slab __read_mostly;
-
-struct dfs_cache_vol_info {
- char *vi_fullpath;
- struct smb_vol vi_vol;
- char *vi_mntdata;
- struct list_head vi_list;
+struct vol_info {
+ char *fullpath;
+ spinlock_t smb_vol_lock;
+ struct smb_vol smb_vol;
+ char *mntdata;
+ struct list_head list;
+ struct list_head rlist;
+ struct kref refcnt;
};
-struct dfs_cache {
- struct mutex dc_lock;
- struct nls_table *dc_nlsc;
- struct list_head dc_vol_list;
- int dc_ttl;
- struct delayed_work dc_refresh;
-};
+static struct kmem_cache *cache_slab __read_mostly;
+static struct workqueue_struct *dfscache_wq __read_mostly;
+
+static int cache_ttl;
+static DEFINE_SPINLOCK(cache_ttl_lock);
-static struct dfs_cache dfs_cache;
+static struct nls_table *cache_nlsc;
/*
* Number of entries in the cache
*/
-static size_t dfs_cache_count;
+static size_t cache_count;
-static DEFINE_MUTEX(dfs_cache_list_lock);
-static struct hlist_head dfs_cache_htable[DFS_CACHE_HTABLE_SIZE];
+static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
+static DEFINE_MUTEX(list_lock);
+
+static LIST_HEAD(vol_list);
+static DEFINE_SPINLOCK(vol_list_lock);
static void refresh_cache_worker(struct work_struct *work);
-static inline bool is_path_valid(const char *path)
-{
- return path && (strchr(path + 1, '\\') || strchr(path + 1, '/'));
-}
+static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
-static inline int get_normalized_path(const char *path, char **npath)
+static int get_normalized_path(const char *path, char **npath)
{
+ if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
+ return -EINVAL;
+
if (*path == '\\') {
*npath = (char *)path;
} else {
@@ -100,42 +102,42 @@ static inline void free_normalized_path(const char *path, char *npath)
kfree(npath);
}
-static inline bool cache_entry_expired(const struct dfs_cache_entry *ce)
+static inline bool cache_entry_expired(const struct cache_entry *ce)
{
struct timespec64 ts;
ktime_get_coarse_real_ts64(&ts);
- return timespec64_compare(&ts, &ce->ce_etime) >= 0;
+ return timespec64_compare(&ts, &ce->etime) >= 0;
}
-static inline void free_tgts(struct dfs_cache_entry *ce)
+static inline void free_tgts(struct cache_entry *ce)
{
- struct dfs_cache_tgt *t, *n;
+ struct cache_dfs_tgt *t, *n;
- list_for_each_entry_safe(t, n, &ce->ce_tlist, t_list) {
- list_del(&t->t_list);
- kfree(t->t_name);
+ list_for_each_entry_safe(t, n, &ce->tlist, list) {
+ list_del(&t->list);
+ kfree(t->name);
kfree(t);
}
}
static void free_cache_entry(struct rcu_head *rcu)
{
- struct dfs_cache_entry *ce = container_of(rcu, struct dfs_cache_entry,
- ce_rcu);
- kmem_cache_free(dfs_cache_slab, ce);
+ struct cache_entry *ce = container_of(rcu, struct cache_entry, rcu);
+
+ kmem_cache_free(cache_slab, ce);
}
-static inline void flush_cache_ent(struct dfs_cache_entry *ce)
+static inline void flush_cache_ent(struct cache_entry *ce)
{
- if (hlist_unhashed(&ce->ce_hlist))
+ if (hlist_unhashed(&ce->hlist))
return;
- hlist_del_init_rcu(&ce->ce_hlist);
- kfree_const(ce->ce_path);
+ hlist_del_init_rcu(&ce->hlist);
+ kfree(ce->path);
free_tgts(ce);
- dfs_cache_count--;
- call_rcu(&ce->ce_rcu, free_cache_entry);
+ cache_count--;
+ call_rcu(&ce->rcu, free_cache_entry);
}
static void flush_cache_ents(void)
@@ -143,11 +145,11 @@ static void flush_cache_ents(void)
int i;
rcu_read_lock();
- for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++) {
- struct hlist_head *l = &dfs_cache_htable[i];
- struct dfs_cache_entry *ce;
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+ struct hlist_head *l = &cache_htable[i];
+ struct cache_entry *ce;
- hlist_for_each_entry_rcu(ce, l, ce_hlist)
+ hlist_for_each_entry_rcu(ce, l, hlist)
flush_cache_ent(ce);
}
rcu_read_unlock();
@@ -159,35 +161,35 @@ static void flush_cache_ents(void)
static int dfscache_proc_show(struct seq_file *m, void *v)
{
int bucket;
- struct dfs_cache_entry *ce;
- struct dfs_cache_tgt *t;
+ struct cache_entry *ce;
+ struct cache_dfs_tgt *t;
seq_puts(m, "DFS cache\n---------\n");
- mutex_lock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
rcu_read_lock();
- hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
+ hash_for_each_rcu(cache_htable, bucket, ce, hlist) {
seq_printf(m,
"cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
"interlink=%s,path_consumed=%d,expired=%s\n",
- ce->ce_path,
- ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link",
- ce->ce_ttl, ce->ce_etime.tv_nsec,
- IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no",
- ce->ce_path_consumed,
+ ce->path,
+ ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
+ ce->ttl, ce->etime.tv_nsec,
+ IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
+ ce->path_consumed,
cache_entry_expired(ce) ? "yes" : "no");
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
+ list_for_each_entry(t, &ce->tlist, list) {
seq_printf(m, " %s%s\n",
- t->t_name,
- ce->ce_tgthint == t ? " (target hint)" : "");
+ t->name,
+ ce->tgthint == t ? " (target hint)" : "");
}
}
rcu_read_unlock();
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_unlock(&list_lock);
return 0;
}
@@ -205,9 +207,9 @@ static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
return -EINVAL;
cifs_dbg(FYI, "clearing dfs cache");
- mutex_lock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
flush_cache_ents();
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_unlock(&list_lock);
return count;
}
@@ -226,25 +228,25 @@ const struct file_operations dfscache_proc_fops = {
};
#ifdef CONFIG_CIFS_DEBUG2
-static inline void dump_tgts(const struct dfs_cache_entry *ce)
+static inline void dump_tgts(const struct cache_entry *ce)
{
- struct dfs_cache_tgt *t;
+ struct cache_dfs_tgt *t;
cifs_dbg(FYI, "target list:\n");
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
- cifs_dbg(FYI, " %s%s\n", t->t_name,
- ce->ce_tgthint == t ? " (target hint)" : "");
+ list_for_each_entry(t, &ce->tlist, list) {
+ cifs_dbg(FYI, " %s%s\n", t->name,
+ ce->tgthint == t ? " (target hint)" : "");
}
}
-static inline void dump_ce(const struct dfs_cache_entry *ce)
+static inline void dump_ce(const struct cache_entry *ce)
{
cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
- "interlink=%s,path_consumed=%d,expired=%s\n", ce->ce_path,
- ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ce_ttl,
- ce->ce_etime.tv_nsec,
- IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no",
- ce->ce_path_consumed,
+ "interlink=%s,path_consumed=%d,expired=%s\n", ce->path,
+ ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
+ ce->etime.tv_nsec,
+ IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
+ ce->path_consumed,
cache_entry_expired(ce) ? "yes" : "no");
dump_tgts(ce);
}
@@ -284,25 +286,33 @@ static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
*/
int dfs_cache_init(void)
{
+ int rc;
int i;
- dfs_cache_slab = kmem_cache_create("cifs_dfs_cache",
- sizeof(struct dfs_cache_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!dfs_cache_slab)
+ dfscache_wq = alloc_workqueue("cifs-dfscache",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
+ if (!dfscache_wq)
return -ENOMEM;
- for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++)
- INIT_HLIST_HEAD(&dfs_cache_htable[i]);
+ cache_slab = kmem_cache_create("cifs_dfs_cache",
+ sizeof(struct cache_entry), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!cache_slab) {
+ rc = -ENOMEM;
+ goto out_destroy_wq;
+ }
- INIT_LIST_HEAD(&dfs_cache.dc_vol_list);
- mutex_init(&dfs_cache.dc_lock);
- INIT_DELAYED_WORK(&dfs_cache.dc_refresh, refresh_cache_worker);
- dfs_cache.dc_ttl = -1;
- dfs_cache.dc_nlsc = load_nls_default();
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&cache_htable[i]);
+
+ cache_nlsc = load_nls_default();
cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
return 0;
+
+out_destroy_wq:
+ destroy_workqueue(dfscache_wq);
+ return rc;
}
static inline unsigned int cache_entry_hash(const void *data, int size)
@@ -310,7 +320,7 @@ static inline unsigned int cache_entry_hash(const void *data, int size)
unsigned int h;
h = jhash(data, size, 0);
- return h & (DFS_CACHE_HTABLE_SIZE - 1);
+ return h & (CACHE_HTABLE_SIZE - 1);
}
/* Check whether second path component of @path is SYSVOL or NETLOGON */
@@ -325,11 +335,11 @@ static inline bool is_sysvol_or_netlogon(const char *path)
}
/* Return target hint of a DFS cache entry */
-static inline char *get_tgt_name(const struct dfs_cache_entry *ce)
+static inline char *get_tgt_name(const struct cache_entry *ce)
{
- struct dfs_cache_tgt *t = ce->ce_tgthint;
+ struct cache_dfs_tgt *t = ce->tgthint;
- return t ? t->t_name : ERR_PTR(-ENOENT);
+ return t ? t->name : ERR_PTR(-ENOENT);
}
/* Return expire time out of a new entry's TTL */
@@ -346,19 +356,19 @@ static inline struct timespec64 get_expire_time(int ttl)
}
/* Allocate a new DFS target */
-static inline struct dfs_cache_tgt *alloc_tgt(const char *name)
+static inline struct cache_dfs_tgt *alloc_tgt(const char *name)
{
- struct dfs_cache_tgt *t;
+ struct cache_dfs_tgt *t;
t = kmalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return ERR_PTR(-ENOMEM);
- t->t_name = kstrndup(name, strlen(name), GFP_KERNEL);
- if (!t->t_name) {
+ t->name = kstrndup(name, strlen(name), GFP_KERNEL);
+ if (!t->name) {
kfree(t);
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&t->t_list);
+ INIT_LIST_HEAD(&t->list);
return t;
}
@@ -367,63 +377,63 @@ static inline struct dfs_cache_tgt *alloc_tgt(const char *name)
* target hint.
*/
static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
- struct dfs_cache_entry *ce, const char *tgthint)
+ struct cache_entry *ce, const char *tgthint)
{
int i;
- ce->ce_ttl = refs[0].ttl;
- ce->ce_etime = get_expire_time(ce->ce_ttl);
- ce->ce_srvtype = refs[0].server_type;
- ce->ce_flags = refs[0].ref_flag;
- ce->ce_path_consumed = refs[0].path_consumed;
+ ce->ttl = refs[0].ttl;
+ ce->etime = get_expire_time(ce->ttl);
+ ce->srvtype = refs[0].server_type;
+ ce->flags = refs[0].ref_flag;
+ ce->path_consumed = refs[0].path_consumed;
for (i = 0; i < numrefs; i++) {
- struct dfs_cache_tgt *t;
+ struct cache_dfs_tgt *t;
t = alloc_tgt(refs[i].node_name);
if (IS_ERR(t)) {
free_tgts(ce);
return PTR_ERR(t);
}
- if (tgthint && !strcasecmp(t->t_name, tgthint)) {
- list_add(&t->t_list, &ce->ce_tlist);
+ if (tgthint && !strcasecmp(t->name, tgthint)) {
+ list_add(&t->list, &ce->tlist);
tgthint = NULL;
} else {
- list_add_tail(&t->t_list, &ce->ce_tlist);
+ list_add_tail(&t->list, &ce->tlist);
}
- ce->ce_numtgts++;
+ ce->numtgts++;
}
- ce->ce_tgthint = list_first_entry_or_null(&ce->ce_tlist,
- struct dfs_cache_tgt, t_list);
+ ce->tgthint = list_first_entry_or_null(&ce->tlist,
+ struct cache_dfs_tgt, list);
return 0;
}
/* Allocate a new cache entry */
-static struct dfs_cache_entry *
-alloc_cache_entry(const char *path, const struct dfs_info3_param *refs,
- int numrefs)
+static struct cache_entry *alloc_cache_entry(const char *path,
+ const struct dfs_info3_param *refs,
+ int numrefs)
{
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
int rc;
- ce = kmem_cache_zalloc(dfs_cache_slab, GFP_KERNEL);
+ ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
if (!ce)
return ERR_PTR(-ENOMEM);
- ce->ce_path = kstrdup_const(path, GFP_KERNEL);
- if (!ce->ce_path) {
- kmem_cache_free(dfs_cache_slab, ce);
+ ce->path = kstrndup(path, strlen(path), GFP_KERNEL);
+ if (!ce->path) {
+ kmem_cache_free(cache_slab, ce);
return ERR_PTR(-ENOMEM);
}
- INIT_HLIST_NODE(&ce->ce_hlist);
- INIT_LIST_HEAD(&ce->ce_tlist);
+ INIT_HLIST_NODE(&ce->hlist);
+ INIT_LIST_HEAD(&ce->tlist);
rc = copy_ref_data(refs, numrefs, ce, NULL);
if (rc) {
- kfree_const(ce->ce_path);
- kmem_cache_free(dfs_cache_slab, ce);
+ kfree(ce->path);
+ kmem_cache_free(cache_slab, ce);
ce = ERR_PTR(rc);
}
return ce;
@@ -432,13 +442,13 @@ alloc_cache_entry(const char *path, const struct dfs_info3_param *refs,
static void remove_oldest_entry(void)
{
int bucket;
- struct dfs_cache_entry *ce;
- struct dfs_cache_entry *to_del = NULL;
+ struct cache_entry *ce;
+ struct cache_entry *to_del = NULL;
rcu_read_lock();
- hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
- if (!to_del || timespec64_compare(&ce->ce_etime,
- &to_del->ce_etime) < 0)
+ hash_for_each_rcu(cache_htable, bucket, ce, hlist) {
+ if (!to_del || timespec64_compare(&ce->etime,
+ &to_del->etime) < 0)
to_del = ce;
}
if (!to_del) {
@@ -453,94 +463,96 @@ out:
}
/* Add a new DFS cache entry */
-static inline struct dfs_cache_entry *
+static inline struct cache_entry *
add_cache_entry(unsigned int hash, const char *path,
const struct dfs_info3_param *refs, int numrefs)
{
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
ce = alloc_cache_entry(path, refs, numrefs);
if (IS_ERR(ce))
return ce;
- hlist_add_head_rcu(&ce->ce_hlist, &dfs_cache_htable[hash]);
+ hlist_add_head_rcu(&ce->hlist, &cache_htable[hash]);
- mutex_lock(&dfs_cache.dc_lock);
- if (dfs_cache.dc_ttl < 0) {
- dfs_cache.dc_ttl = ce->ce_ttl;
- queue_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh,
- dfs_cache.dc_ttl * HZ);
+ spin_lock(&cache_ttl_lock);
+ if (!cache_ttl) {
+ cache_ttl = ce->ttl;
+ queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
} else {
- dfs_cache.dc_ttl = min_t(int, dfs_cache.dc_ttl, ce->ce_ttl);
- mod_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh,
- dfs_cache.dc_ttl * HZ);
+ cache_ttl = min_t(int, cache_ttl, ce->ttl);
+ mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
}
- mutex_unlock(&dfs_cache.dc_lock);
+ spin_unlock(&cache_ttl_lock);
return ce;
}
-static struct dfs_cache_entry *__find_cache_entry(unsigned int hash,
- const char *path)
+/*
+ * Find a DFS cache entry in hash table and optionally check prefix path against
+ * @path.
+ * Use whole path components in the match.
+ * Return ERR_PTR(-ENOENT) if the entry is not found.
+ */
+static struct cache_entry *lookup_cache_entry(const char *path,
+ unsigned int *hash)
{
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
+ unsigned int h;
bool found = false;
- rcu_read_lock();
- hlist_for_each_entry_rcu(ce, &dfs_cache_htable[hash], ce_hlist) {
- if (!strcasecmp(path, ce->ce_path)) {
-#ifdef CONFIG_CIFS_DEBUG2
- char *name = get_tgt_name(ce);
+ h = cache_entry_hash(path, strlen(path));
- if (IS_ERR(name)) {
- rcu_read_unlock();
- return ERR_CAST(name);
- }
- cifs_dbg(FYI, "%s: cache hit\n", __func__);
- cifs_dbg(FYI, "%s: target hint: %s\n", __func__, name);
-#endif
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(ce, &cache_htable[h], hlist) {
+ if (!strcasecmp(path, ce->path)) {
found = true;
+ dump_ce(ce);
break;
}
}
rcu_read_unlock();
- return found ? ce : ERR_PTR(-ENOENT);
-}
-/*
- * Find a DFS cache entry in hash table and optionally check prefix path against
- * @path.
- * Use whole path components in the match.
- * Return ERR_PTR(-ENOENT) if the entry is not found.
- */
-static inline struct dfs_cache_entry *find_cache_entry(const char *path,
- unsigned int *hash)
-{
- *hash = cache_entry_hash(path, strlen(path));
- return __find_cache_entry(*hash, path);
+ if (!found)
+ ce = ERR_PTR(-ENOENT);
+ if (hash)
+ *hash = h;
+
+ return ce;
}
static inline void destroy_slab_cache(void)
{
rcu_barrier();
- kmem_cache_destroy(dfs_cache_slab);
+ kmem_cache_destroy(cache_slab);
}
-static inline void free_vol(struct dfs_cache_vol_info *vi)
+static void __vol_release(struct vol_info *vi)
{
- list_del(&vi->vi_list);
- kfree(vi->vi_fullpath);
- kfree(vi->vi_mntdata);
- cifs_cleanup_volume_info_contents(&vi->vi_vol);
+ kfree(vi->fullpath);
+ kfree(vi->mntdata);
+ cifs_cleanup_volume_info_contents(&vi->smb_vol);
kfree(vi);
}
+static void vol_release(struct kref *kref)
+{
+ struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
+
+ spin_lock(&vol_list_lock);
+ list_del(&vi->list);
+ spin_unlock(&vol_list_lock);
+ __vol_release(vi);
+}
+
static inline void free_vol_list(void)
{
- struct dfs_cache_vol_info *vi, *nvi;
+ struct vol_info *vi, *nvi;
- list_for_each_entry_safe(vi, nvi, &dfs_cache.dc_vol_list, vi_list)
- free_vol(vi);
+ list_for_each_entry_safe(vi, nvi, &vol_list, list) {
+ list_del_init(&vi->list);
+ __vol_release(vi);
+ }
}
/**
@@ -548,40 +560,38 @@ static inline void free_vol_list(void)
*/
void dfs_cache_destroy(void)
{
- cancel_delayed_work_sync(&dfs_cache.dc_refresh);
- unload_nls(dfs_cache.dc_nlsc);
+ cancel_delayed_work_sync(&refresh_task);
+ unload_nls(cache_nlsc);
free_vol_list();
- mutex_destroy(&dfs_cache.dc_lock);
-
flush_cache_ents();
destroy_slab_cache();
- mutex_destroy(&dfs_cache_list_lock);
+ destroy_workqueue(dfscache_wq);
cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
}
-static inline struct dfs_cache_entry *
+static inline struct cache_entry *
__update_cache_entry(const char *path, const struct dfs_info3_param *refs,
int numrefs)
{
int rc;
unsigned int h;
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
char *s, *th = NULL;
- ce = find_cache_entry(path, &h);
+ ce = lookup_cache_entry(path, &h);
if (IS_ERR(ce))
return ce;
- if (ce->ce_tgthint) {
- s = ce->ce_tgthint->t_name;
+ if (ce->tgthint) {
+ s = ce->tgthint->name;
th = kstrndup(s, strlen(s), GFP_KERNEL);
if (!th)
return ERR_PTR(-ENOMEM);
}
free_tgts(ce);
- ce->ce_numtgts = 0;
+ ce->numtgts = 0;
rc = copy_ref_data(refs, numrefs, ce, th);
kfree(th);
@@ -593,10 +603,10 @@ __update_cache_entry(const char *path, const struct dfs_info3_param *refs,
}
/* Update an expired cache entry by getting a new DFS referral from server */
-static struct dfs_cache_entry *
+static struct cache_entry *
update_cache_entry(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_codepage, int remap,
- const char *path, struct dfs_cache_entry *ce)
+ const char *path, struct cache_entry *ce)
{
int rc;
struct dfs_info3_param *refs = NULL;
@@ -636,20 +646,20 @@ update_cache_entry(const unsigned int xid, struct cifs_ses *ses,
* For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
* handle them properly.
*/
-static struct dfs_cache_entry *
+static struct cache_entry *
do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_codepage, int remap,
const char *path, bool noreq)
{
int rc;
unsigned int h;
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
struct dfs_info3_param *nrefs;
int numnrefs;
cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
- ce = find_cache_entry(path, &h);
+ ce = lookup_cache_entry(path, &h);
if (IS_ERR(ce)) {
cifs_dbg(FYI, "%s: cache miss\n", __func__);
/*
@@ -690,9 +700,9 @@ do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
cifs_dbg(FYI, "%s: new cache entry\n", __func__);
- if (dfs_cache_count >= DFS_CACHE_MAX_ENTRIES) {
+ if (cache_count >= CACHE_MAX_ENTRIES) {
cifs_dbg(FYI, "%s: reached max cache size (%d)",
- __func__, DFS_CACHE_MAX_ENTRIES);
+ __func__, CACHE_MAX_ENTRIES);
remove_oldest_entry();
}
ce = add_cache_entry(h, path, nrefs, numnrefs);
@@ -701,7 +711,7 @@ do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
if (IS_ERR(ce))
return ce;
- dfs_cache_count++;
+ cache_count++;
}
dump_ce(ce);
@@ -723,7 +733,7 @@ do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
}
/* Set up a new DFS referral from a given cache entry */
-static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
+static int setup_ref(const char *path, const struct cache_entry *ce,
struct dfs_info3_param *ref, const char *tgt)
{
int rc;
@@ -736,7 +746,7 @@ static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
if (!ref->path_name)
return -ENOMEM;
- ref->path_consumed = ce->ce_path_consumed;
+ ref->path_consumed = ce->path_consumed;
ref->node_name = kstrndup(tgt, strlen(tgt), GFP_KERNEL);
if (!ref->node_name) {
@@ -744,9 +754,9 @@ static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
goto err_free_path;
}
- ref->ttl = ce->ce_ttl;
- ref->server_type = ce->ce_srvtype;
- ref->ref_flag = ce->ce_flags;
+ ref->ttl = ce->ttl;
+ ref->server_type = ce->srvtype;
+ ref->ref_flag = ce->flags;
return 0;
@@ -757,25 +767,25 @@ err_free_path:
}
/* Return target list of a DFS cache entry */
-static int get_tgt_list(const struct dfs_cache_entry *ce,
+static int get_tgt_list(const struct cache_entry *ce,
struct dfs_cache_tgt_list *tl)
{
int rc;
struct list_head *head = &tl->tl_list;
- struct dfs_cache_tgt *t;
+ struct cache_dfs_tgt *t;
struct dfs_cache_tgt_iterator *it, *nit;
memset(tl, 0, sizeof(*tl));
INIT_LIST_HEAD(head);
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
+ list_for_each_entry(t, &ce->tlist, list) {
it = kzalloc(sizeof(*it), GFP_KERNEL);
if (!it) {
rc = -ENOMEM;
goto err_free_it;
}
- it->it_name = kstrndup(t->t_name, strlen(t->t_name),
+ it->it_name = kstrndup(t->name, strlen(t->name),
GFP_KERNEL);
if (!it->it_name) {
kfree(it);
@@ -783,12 +793,12 @@ static int get_tgt_list(const struct dfs_cache_entry *ce,
goto err_free_it;
}
- if (ce->ce_tgthint == t)
+ if (ce->tgthint == t)
list_add(&it->it_list, head);
else
list_add_tail(&it->it_list, head);
}
- tl->tl_numtgts = ce->ce_numtgts;
+ tl->tl_numtgts = ce->numtgts;
return 0;
@@ -829,16 +839,13 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
-
- if (unlikely(!is_path_valid(path)))
- return -EINVAL;
+ struct cache_entry *ce;
rc = get_normalized_path(path, &npath);
if (rc)
return rc;
- mutex_lock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
if (!IS_ERR(ce)) {
if (ref)
@@ -850,7 +857,7 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
} else {
rc = PTR_ERR(ce);
}
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_unlock(&list_lock);
free_normalized_path(path, npath);
return rc;
}
@@ -876,16 +883,13 @@ int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
-
- if (unlikely(!is_path_valid(path)))
- return -EINVAL;
+ struct cache_entry *ce;
rc = get_normalized_path(path, &npath);
if (rc)
return rc;
- mutex_lock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
@@ -899,7 +903,7 @@ int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
if (!rc && tgt_list)
rc = get_tgt_list(ce, tgt_list);
out:
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_unlock(&list_lock);
free_normalized_path(path, npath);
return rc;
}
@@ -929,11 +933,8 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
- struct dfs_cache_tgt *t;
-
- if (unlikely(!is_path_valid(path)))
- return -EINVAL;
+ struct cache_entry *ce;
+ struct cache_dfs_tgt *t;
rc = get_normalized_path(path, &npath);
if (rc)
@@ -941,7 +942,7 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
- mutex_lock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
@@ -950,14 +951,14 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
rc = 0;
- t = ce->ce_tgthint;
+ t = ce->tgthint;
- if (likely(!strcasecmp(it->it_name, t->t_name)))
+ if (likely(!strcasecmp(it->it_name, t->name)))
goto out;
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
- if (!strcasecmp(t->t_name, it->it_name)) {
- ce->ce_tgthint = t;
+ list_for_each_entry(t, &ce->tlist, list) {
+ if (!strcasecmp(t->name, it->it_name)) {
+ ce->tgthint = t;
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name);
break;
@@ -965,7 +966,7 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
}
out:
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_unlock(&list_lock);
free_normalized_path(path, npath);
return rc;
}
@@ -989,10 +990,10 @@ int dfs_cache_noreq_update_tgthint(const char *path,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
- struct dfs_cache_tgt *t;
+ struct cache_entry *ce;
+ struct cache_dfs_tgt *t;
- if (unlikely(!is_path_valid(path)) || !it)
+ if (!it)
return -EINVAL;
rc = get_normalized_path(path, &npath);
@@ -1001,7 +1002,7 @@ int dfs_cache_noreq_update_tgthint(const char *path,
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
- mutex_lock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true);
if (IS_ERR(ce)) {
@@ -1011,14 +1012,14 @@ int dfs_cache_noreq_update_tgthint(const char *path,
rc = 0;
- t = ce->ce_tgthint;
+ t = ce->tgthint;
- if (unlikely(!strcasecmp(it->it_name, t->t_name)))
+ if (unlikely(!strcasecmp(it->it_name, t->name)))
goto out;
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
- if (!strcasecmp(t->t_name, it->it_name)) {
- ce->ce_tgthint = t;
+ list_for_each_entry(t, &ce->tlist, list) {
+ if (!strcasecmp(t->name, it->it_name)) {
+ ce->tgthint = t;
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name);
break;
@@ -1026,7 +1027,7 @@ int dfs_cache_noreq_update_tgthint(const char *path,
}
out:
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_unlock(&list_lock);
free_normalized_path(path, npath);
return rc;
}
@@ -1047,13 +1048,11 @@ int dfs_cache_get_tgt_referral(const char *path,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
unsigned int h;
if (!it || !ref)
return -EINVAL;
- if (unlikely(!is_path_valid(path)))
- return -EINVAL;
rc = get_normalized_path(path, &npath);
if (rc)
@@ -1061,9 +1060,9 @@ int dfs_cache_get_tgt_referral(const char *path,
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
- mutex_lock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
- ce = find_cache_entry(npath, &h);
+ ce = lookup_cache_entry(npath, &h);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out;
@@ -1074,7 +1073,7 @@ int dfs_cache_get_tgt_referral(const char *path,
rc = setup_ref(path, ce, ref, it->it_name);
out:
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_unlock(&list_lock);
free_normalized_path(path, npath);
return rc;
}
@@ -1085,7 +1084,7 @@ static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
if (vol->username) {
new->username = kstrndup(vol->username, strlen(vol->username),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!new->username)
return -ENOMEM;
}
@@ -1103,7 +1102,7 @@ static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
}
if (vol->domainname) {
new->domainname = kstrndup(vol->domainname,
- strlen(vol->domainname), GFP_KERNEL);
+ strlen(vol->domainname), GFP_KERNEL);
if (!new->domainname)
goto err_free_unc;
}
@@ -1150,7 +1149,7 @@ err_free_username:
int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
{
int rc;
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi;
if (!vol || !fullpath || !mntdata)
return -EINVAL;
@@ -1161,38 +1160,41 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
if (!vi)
return -ENOMEM;
- vi->vi_fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
- if (!vi->vi_fullpath) {
+ vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
+ if (!vi->fullpath) {
rc = -ENOMEM;
goto err_free_vi;
}
- rc = dup_vol(vol, &vi->vi_vol);
+ rc = dup_vol(vol, &vi->smb_vol);
if (rc)
goto err_free_fullpath;
- vi->vi_mntdata = mntdata;
+ vi->mntdata = mntdata;
+ spin_lock_init(&vi->smb_vol_lock);
+ kref_init(&vi->refcnt);
+
+ spin_lock(&vol_list_lock);
+ list_add_tail(&vi->list, &vol_list);
+ spin_unlock(&vol_list_lock);
- mutex_lock(&dfs_cache.dc_lock);
- list_add_tail(&vi->vi_list, &dfs_cache.dc_vol_list);
- mutex_unlock(&dfs_cache.dc_lock);
return 0;
err_free_fullpath:
- kfree(vi->vi_fullpath);
+ kfree(vi->fullpath);
err_free_vi:
kfree(vi);
return rc;
}
-static inline struct dfs_cache_vol_info *find_vol(const char *fullpath)
+/* Must be called with vol_list_lock held */
+static struct vol_info *find_vol(const char *fullpath)
{
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi;
- list_for_each_entry(vi, &dfs_cache.dc_vol_list, vi_list) {
- cifs_dbg(FYI, "%s: vi->vi_fullpath: %s\n", __func__,
- vi->vi_fullpath);
- if (!strcasecmp(vi->vi_fullpath, fullpath))
+ list_for_each_entry(vi, &vol_list, list) {
+ cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
+ if (!strcasecmp(vi->fullpath, fullpath))
return vi;
}
return ERR_PTR(-ENOENT);
@@ -1208,30 +1210,31 @@ static inline struct dfs_cache_vol_info *find_vol(const char *fullpath)
*/
int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
{
- int rc;
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi;
if (!fullpath || !server)
return -EINVAL;
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
- mutex_lock(&dfs_cache.dc_lock);
-
+ spin_lock(&vol_list_lock);
vi = find_vol(fullpath);
if (IS_ERR(vi)) {
- rc = PTR_ERR(vi);
- goto out;
+ spin_unlock(&vol_list_lock);
+ return PTR_ERR(vi);
}
+ kref_get(&vi->refcnt);
+ spin_unlock(&vol_list_lock);
cifs_dbg(FYI, "%s: updating volume info\n", __func__);
- memcpy(&vi->vi_vol.dstaddr, &server->dstaddr,
- sizeof(vi->vi_vol.dstaddr));
- rc = 0;
+ spin_lock(&vi->smb_vol_lock);
+ memcpy(&vi->smb_vol.dstaddr, &server->dstaddr,
+ sizeof(vi->smb_vol.dstaddr));
+ spin_unlock(&vi->smb_vol_lock);
-out:
- mutex_unlock(&dfs_cache.dc_lock);
- return rc;
+ kref_put(&vi->refcnt, vol_release);
+
+ return 0;
}
/**
@@ -1241,18 +1244,18 @@ out:
*/
void dfs_cache_del_vol(const char *fullpath)
{
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi;
if (!fullpath || !*fullpath)
return;
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
- mutex_lock(&dfs_cache.dc_lock);
+ spin_lock(&vol_list_lock);
vi = find_vol(fullpath);
- if (!IS_ERR(vi))
- free_vol(vi);
- mutex_unlock(&dfs_cache.dc_lock);
+ spin_unlock(&vol_list_lock);
+
+ kref_put(&vi->refcnt, vol_release);
}
/* Get all tcons that are within a DFS namespace and can be refreshed */
@@ -1280,7 +1283,7 @@ static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
spin_unlock(&cifs_tcp_ses_lock);
}
-static inline bool is_dfs_link(const char *path)
+static bool is_dfs_link(const char *path)
{
char *s;
@@ -1290,7 +1293,7 @@ static inline bool is_dfs_link(const char *path)
return !!strchr(s + 1, '\\');
}
-static inline char *get_dfs_root(const char *path)
+static char *get_dfs_root(const char *path)
{
char *s, *npath;
@@ -1309,9 +1312,34 @@ static inline char *get_dfs_root(const char *path)
return npath;
}
+static inline void put_tcp_server(struct TCP_Server_Info *server)
+{
+ cifs_put_tcp_session(server, 0);
+}
+
+static struct TCP_Server_Info *get_tcp_server(struct smb_vol *vol)
+{
+ struct TCP_Server_Info *server;
+
+ server = cifs_find_tcp_session(vol);
+ if (IS_ERR_OR_NULL(server))
+ return NULL;
+
+ spin_lock(&GlobalMid_Lock);
+ if (server->tcpStatus != CifsGood) {
+ spin_unlock(&GlobalMid_Lock);
+ put_tcp_server(server);
+ return NULL;
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ return server;
+}
+
/* Find root SMB session out of a DFS link path */
-static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
- struct cifs_tcon *tcon, const char *path)
+static struct cifs_ses *find_root_ses(struct vol_info *vi,
+ struct cifs_tcon *tcon,
+ const char *path)
{
char *rpath;
int rc;
@@ -1333,8 +1361,7 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
goto out;
}
- mdata = cifs_compose_mount_options(vi->vi_mntdata, rpath, &ref,
- &devname);
+ mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref, &devname);
free_dfs_info_param(&ref);
if (IS_ERR(mdata)) {
@@ -1351,13 +1378,8 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
goto out;
}
- server = cifs_find_tcp_session(&vol);
- if (IS_ERR_OR_NULL(server)) {
- ses = ERR_PTR(-EHOSTDOWN);
- goto out;
- }
- if (server->tcpStatus != CifsGood) {
- cifs_put_tcp_session(server, 0);
+ server = get_tcp_server(&vol);
+ if (!server) {
ses = ERR_PTR(-EHOSTDOWN);
goto out;
}
@@ -1373,14 +1395,13 @@ out:
}
/* Refresh DFS cache entry from a given tcon */
-static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
- struct cifs_tcon *tcon)
+static void refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
{
int rc = 0;
unsigned int xid;
char *path, *npath;
unsigned int h;
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
struct dfs_info3_param *refs = NULL;
int numrefs = 0;
struct cifs_ses *root_ses = NULL, *ses;
@@ -1393,9 +1414,9 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
if (rc)
goto out;
- mutex_lock(&dfs_cache_list_lock);
- ce = find_cache_entry(npath, &h);
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
+ ce = lookup_cache_entry(npath, &h);
+ mutex_unlock(&list_lock);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
@@ -1421,12 +1442,12 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
rc = -EOPNOTSUPP;
} else {
rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs,
- &numrefs, dc->dc_nlsc,
+ &numrefs, cache_nlsc,
tcon->remap);
if (!rc) {
- mutex_lock(&dfs_cache_list_lock);
+ mutex_lock(&list_lock);
ce = __update_cache_entry(npath, refs, numrefs);
- mutex_unlock(&dfs_cache_list_lock);
+ mutex_unlock(&list_lock);
dump_refs(refs, numrefs);
free_dfs_info_array(refs, numrefs);
if (IS_ERR(ce))
@@ -1448,30 +1469,52 @@ out:
*/
static void refresh_cache_worker(struct work_struct *work)
{
- struct dfs_cache *dc = container_of(work, struct dfs_cache,
- dc_refresh.work);
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi, *nvi;
struct TCP_Server_Info *server;
- LIST_HEAD(list);
+ LIST_HEAD(vols);
+ LIST_HEAD(tcons);
struct cifs_tcon *tcon, *ntcon;
- mutex_lock(&dc->dc_lock);
-
- list_for_each_entry(vi, &dc->dc_vol_list, vi_list) {
- server = cifs_find_tcp_session(&vi->vi_vol);
- if (IS_ERR_OR_NULL(server))
+ /*
+ * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
+ * for refreshing.
+ */
+ spin_lock(&vol_list_lock);
+ list_for_each_entry(vi, &vol_list, list) {
+ server = get_tcp_server(&vi->smb_vol);
+ if (!server)
continue;
- if (server->tcpStatus != CifsGood)
- goto next;
- get_tcons(server, &list);
- list_for_each_entry_safe(tcon, ntcon, &list, ulist) {
- do_refresh_tcon(dc, vi, tcon);
+
+ kref_get(&vi->refcnt);
+ list_add_tail(&vi->rlist, &vols);
+ put_tcp_server(server);
+ }
+ spin_unlock(&vol_list_lock);
+
+ /* Walk through all TCONs and refresh any expired cache entry */
+ list_for_each_entry_safe(vi, nvi, &vols, rlist) {
+ spin_lock(&vi->smb_vol_lock);
+ server = get_tcp_server(&vi->smb_vol);
+ spin_unlock(&vi->smb_vol_lock);
+
+ if (!server)
+ goto next_vol;
+
+ get_tcons(server, &tcons);
+ list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+ refresh_tcon(vi, tcon);
list_del_init(&tcon->ulist);
cifs_put_tcon(tcon);
}
-next:
- cifs_put_tcp_session(server, 0);
+
+ put_tcp_server(server);
+
+next_vol:
+ list_del_init(&vi->rlist);
+ kref_put(&vi->refcnt, vol_release);
}
- queue_delayed_work(cifsiod_wq, &dc->dc_refresh, dc->dc_ttl * HZ);
- mutex_unlock(&dc->dc_lock);
+
+ spin_lock(&cache_ttl_lock);
+ queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+ spin_unlock(&cache_ttl_lock);
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 9ae9a514676c..548047a709bf 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -357,13 +357,10 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.disposition = disposition;
oparms.path = full_path;
oparms.fid = fid;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 03c85beecec1..a0b99c5e0721 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -223,9 +223,6 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
if (!buf)
return -ENOMEM;
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (f_flags & O_SYNC)
create_options |= CREATE_WRITE_THROUGH;
@@ -236,7 +233,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.disposition = disposition;
oparms.path = full_path;
oparms.fid = fid;
@@ -751,9 +748,6 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
desired_access = cifs_convert_flags(cfile->f_flags);
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (cfile->f_flags & O_SYNC)
create_options |= CREATE_WRITE_THROUGH;
@@ -767,7 +761,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.disposition = disposition;
oparms.path = full_path;
oparms.fid = &cfile->fid;
@@ -3194,6 +3188,9 @@ static ssize_t __cifs_writev(
ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
+
+ cifs_revalidate_mapping(file->f_inode);
return __cifs_writev(iocb, from, true);
}
@@ -3874,6 +3871,15 @@ static ssize_t __cifs_readv(
len = ctx->len;
}
+ if (direct) {
+ rc = filemap_write_and_wait_range(file->f_inode->i_mapping,
+ offset, offset + len - 1);
+ if (rc) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return -EAGAIN;
+ }
+ }
+
/* grab a lock here due to read response handlers can access ctx */
mutex_lock(&ctx->aio_mutex);
@@ -4504,9 +4510,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
io_error:
kunmap(page);
- unlock_page(page);
read_complete:
+ unlock_page(page);
return rc;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index fd9e289f3e72..af0980c720c7 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -472,9 +472,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
- oparms.create_options = CREATE_NOT_DIR;
- if (backup_cred(cifs_sb))
- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
@@ -1225,7 +1223,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = DELETE | FILE_WRITE_ATTRIBUTES;
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = full_path;
oparms.fid = &fid;
@@ -1763,7 +1761,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
oparms.cifs_sb = cifs_sb;
/* open the file to be renamed -- we need DELETE perms */
oparms.desired_access = DELETE;
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = from_path;
oparms.fid = &fid;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 1a01e108d75e..bc08d856ee05 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -65,7 +65,7 @@ static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
if (tcon->ses->server->ops->ioctl_query_info)
rc = tcon->ses->server->ops->ioctl_query_info(
- xid, tcon, utf16_path,
+ xid, tcon, cifs_sb, utf16_path,
filep->private_data ? 0 : 1, p);
else
rc = -EOPNOTSUPP;
@@ -191,7 +191,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
rc = put_user(ExtAttrBits &
FS_FL_USER_VISIBLE,
(int __user *)arg);
- if (rc != EOPNOTSUPP)
+ if (rc != -EOPNOTSUPP)
break;
}
#endif /* CONFIG_CIFS_POSIX */
@@ -220,7 +220,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
* pSMBFile->fid.netfid,
* extAttrBits,
* &ExtAttrMask);
- * if (rc != EOPNOTSUPP)
+ * if (rc != -EOPNOTSUPP)
* break;
*/
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index a24bcbbb5033..02949a2f2860 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -318,7 +318,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
@@ -356,15 +356,11 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms;
- int create_options = CREATE_NOT_DIR;
-
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_CREATE;
oparms.path = path;
oparms.fid = &fid;
@@ -405,9 +401,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
- oparms.create_options = CREATE_NOT_DIR;
- if (backup_cred(cifs_sb))
- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.fid = &fid;
oparms.reconnect = false;
@@ -460,14 +454,10 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms;
- int create_options = CREATE_NOT_DIR;
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct kvec iov[2];
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
@@ -477,10 +467,11 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_CREATE;
oparms.fid = &fid;
oparms.reconnect = false;
+ oparms.mode = 0644;
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
NULL);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 40ca394fd5de..db1fcdedf289 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -349,6 +349,10 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
cifs_dbg(VFS, "Length less than smb header size\n");
}
return -EIO;
+ } else if (total_read < sizeof(*smb) + 2 * smb->WordCount) {
+ cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
+ __func__, smb->WordCount);
+ return -EIO;
}
/* otherwise, there is enough to get to the BCC */
@@ -972,8 +976,8 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
* Input: rqst - a smb_rqst, page - a page index for rqst
* Output: *len - the length for this page, *offset - the offset for this page
*/
-void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
- unsigned int *len, unsigned int *offset)
+void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
+ unsigned int *len, unsigned int *offset)
{
*len = rqst->rq_pagesz;
*offset = (page == 0) ? rqst->rq_offset : 0;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index e523c05a4487..b130efaf8feb 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -504,7 +504,8 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
}
static void
-cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb)
{
CIFSSMBQFSDeviceInfo(xid, tcon);
CIFSSMBQFSAttributeInfo(xid, tcon);
@@ -565,7 +566,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = FILE_READ_ATTRIBUTES;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.disposition = FILE_OPEN;
oparms.path = full_path;
oparms.fid = &fid;
@@ -793,7 +794,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES;
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = full_path;
oparms.fid = &fid;
@@ -872,7 +873,7 @@ cifs_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
static int
cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
- struct kstatfs *buf)
+ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
{
int rc = -EOPNOTSUPP;
@@ -970,7 +971,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = FILE_READ_ATTRIBUTES;
- oparms.create_options = OPEN_REPARSE_POINT;
+ oparms.create_options = cifs_create_options(cifs_sb,
+ OPEN_REPARSE_POINT);
oparms.disposition = FILE_OPEN;
oparms.path = full_path;
oparms.fid = &fid;
@@ -1029,7 +1031,6 @@ cifs_make_node(unsigned int xid, struct inode *inode,
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct inode *newinode = NULL;
int rc = -EPERM;
- int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
FILE_ALL_INFO *buf = NULL;
struct cifs_io_parms io_parms;
__u32 oplock = 0;
@@ -1090,13 +1091,11 @@ cifs_make_node(unsigned int xid, struct inode *inode,
goto out;
}
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+ CREATE_OPTION_SPECIAL);
oparms.disposition = FILE_CREATE;
oparms.path = full_path;
oparms.fid = &fid;
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index f2a6f7f28340..c9abda93d65b 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -98,9 +98,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = desired_access;
oparms.disposition = create_disposition;
- oparms.create_options = create_options;
- if (backup_cred(cifs_sb))
- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.fid = &fid;
oparms.reconnect = false;
oparms.mode = mode;
@@ -456,7 +454,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
/* If it is a root and its handle is cached then use it */
if (!strlen(full_path) && !no_cached_open) {
- rc = open_shroot(xid, tcon, &fid);
+ rc = open_shroot(xid, tcon, cifs_sb, &fid);
if (rc)
goto out;
@@ -473,9 +471,6 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
goto out;
}
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 7177720e822e..d3d5d2c6c401 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -302,6 +302,9 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
char *
smb2_get_data_area_len(int *off, int *len, struct smb2_sync_hdr *shdr)
{
+ const int max_off = 4096;
+ const int max_len = 128 * 1024;
+
*off = 0;
*len = 0;
@@ -369,29 +372,20 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_sync_hdr *shdr)
* Invalid length or offset probably means data area is invalid, but
* we have little choice but to ignore the data area in this case.
*/
- if (*off > 4096) {
- cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
- *len = 0;
- *off = 0;
- } else if (*off < 0) {
- cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
- *off);
+ if (unlikely(*off < 0 || *off > max_off ||
+ *len < 0 || *len > max_len)) {
+ cifs_dbg(VFS, "%s: invalid data area (off=%d len=%d)\n",
+ __func__, *off, *len);
*off = 0;
*len = 0;
- } else if (*len < 0) {
- cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
- *len);
- *len = 0;
- } else if (*len > 128 * 1024) {
- cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
+ } else if (*off == 0) {
*len = 0;
}
/* return pointer to beginning of data area, ie offset from SMB start */
- if ((*off != 0) && (*len != 0))
+ if (*off > 0 && *len > 0)
return (char *)shdr + *off;
- else
- return NULL;
+ return NULL;
}
/*
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 57164563eec6..5fea15a5f341 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -79,6 +79,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
}
+ WARN_ON_ONCE(server->in_flight == 0);
server->in_flight--;
if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
rc = change_conf(server);
@@ -587,7 +588,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
if (rc == -EOPNOTSUPP) {
cifs_dbg(FYI,
"server does not support query network interfaces\n");
- goto out;
+ ret_data_len = 0;
} else if (rc != 0) {
cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
goto out;
@@ -644,7 +645,8 @@ smb2_cached_lease_break(struct work_struct *work)
/*
* Open the directory at the root of a share
*/
-int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
+int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid)
{
struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *server = ses->server;
@@ -696,7 +698,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
oparms.tcon = tcon;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
oparms.fid = pfid;
@@ -812,7 +814,8 @@ oshr_free:
}
static void
-smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb)
{
int rc;
__le16 srch_path = 0; /* Null - open root of share */
@@ -821,18 +824,19 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
struct cifs_fid fid;
bool no_cached_open = tcon->nohandlecache;
- oparms.tcon = tcon;
- oparms.desired_access = FILE_READ_ATTRIBUTES;
- oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
- oparms.fid = &fid;
- oparms.reconnect = false;
+ oparms = (struct cifs_open_parms) {
+ .tcon = tcon,
+ .desired_access = FILE_READ_ATTRIBUTES,
+ .disposition = FILE_OPEN,
+ .create_options = cifs_create_options(cifs_sb, 0),
+ .fid = &fid,
+ };
if (no_cached_open)
rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
NULL);
else
- rc = open_shroot(xid, tcon, &fid);
+ rc = open_shroot(xid, tcon, cifs_sb, &fid);
if (rc)
return;
@@ -854,7 +858,8 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
}
static void
-smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb)
{
int rc;
__le16 srch_path = 0; /* Null - open root of share */
@@ -865,7 +870,7 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -900,10 +905,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -960,9 +962,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
size_t name_len, value_len, user_name_len;
while (src_size > 0) {
- name = &src->ea_data[0];
name_len = (size_t)src->ea_name_length;
- value = &src->ea_data[src->ea_name_length + 1];
value_len = (size_t)le16_to_cpu(src->ea_value_length);
if (name_len == 0)
@@ -974,6 +974,9 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
goto out;
}
+ name = &src->ea_data[0];
+ value = &src->ea_data[src->ea_name_length + 1];
+
if (ea_name) {
if (ea_name_len == name_len &&
memcmp(ea_name, name, name_len) == 0) {
@@ -1178,10 +1181,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_WRITE_EA;
oparms.disposition = FILE_OPEN;
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -1215,6 +1215,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto sea_exit;
smb2_set_next_command(tcon, &rqst[1]);
smb2_set_related(&rqst[1]);
@@ -1411,6 +1413,7 @@ req_res_key_exit:
static int
smb2_ioctl_query_info(const unsigned int xid,
struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
__le16 *path, int is_dir,
unsigned long p)
{
@@ -1436,6 +1439,7 @@ smb2_ioctl_query_info(const unsigned int xid,
struct kvec close_iov[1];
unsigned int size[2];
void *data[2];
+ int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
memset(rqst, 0, sizeof(rqst));
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
@@ -1471,10 +1475,7 @@ smb2_ioctl_query_info(const unsigned int xid,
memset(&oparms, 0, sizeof(oparms));
oparms.tcon = tcon;
oparms.disposition = FILE_OPEN;
- if (is_dir)
- oparms.create_options = CREATE_NOT_FILE;
- else
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -1678,7 +1679,7 @@ smb2_copychunk_range(const unsigned int xid,
pcchunk->SourceOffset = cpu_to_le64(src_off);
pcchunk->TargetOffset = cpu_to_le64(dest_off);
pcchunk->Length =
- cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
+ cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
/* Request server copy to target from src identified by key */
kfree(retbuf);
@@ -2071,10 +2072,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
oparms.disposition = FILE_OPEN;
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = fid;
oparms.reconnect = false;
@@ -2275,10 +2273,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = desired_access;
oparms.disposition = FILE_OPEN;
- if (cifs_sb && backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -2334,7 +2329,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
static int
smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
- struct kstatfs *buf)
+ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
{
struct smb2_query_info_rsp *rsp;
struct smb2_fs_full_size_info *info = NULL;
@@ -2349,7 +2344,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
FS_FULL_SIZE_INFORMATION,
SMB2_O_INFO_FILESYSTEM,
sizeof(struct smb2_fs_full_size_info),
- &rsp_iov, &buftype, NULL);
+ &rsp_iov, &buftype, cifs_sb);
if (rc)
goto qfs_exit;
@@ -2371,7 +2366,7 @@ qfs_exit:
static int
smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
- struct kstatfs *buf)
+ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
{
int rc;
__le16 srch_path = 0; /* Null - open root of share */
@@ -2380,12 +2375,12 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
if (!tcon->posix_extensions)
- return smb2_queryfs(xid, tcon, buf);
+ return smb2_queryfs(xid, tcon, cifs_sb, buf);
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -2503,6 +2498,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
(char **)&dfs_rsp, &dfs_rsp_size);
} while (rc == -EAGAIN);
+ if (!rc && !dfs_rsp)
+ rc = -EIO;
if (rc) {
if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
@@ -2654,6 +2651,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_create_rsp *create_rsp;
struct smb2_ioctl_rsp *ioctl_rsp;
struct reparse_data_buffer *reparse_buf;
+ int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
u32 plen;
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
@@ -2680,14 +2678,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
-
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
- if (is_reparse_point)
- oparms.create_options = OPEN_REPARSE_POINT;
-
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -2866,11 +2857,6 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
-
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path) {
rc = -ENOMEM;
@@ -2881,6 +2867,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
oparms.tcon = tcon;
oparms.desired_access = READ_CONTROL;
oparms.disposition = FILE_OPEN;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -2922,11 +2909,6 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
-
if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
access_flags = WRITE_OWNER;
else
@@ -2941,6 +2923,7 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
oparms.tcon = tcon;
oparms.desired_access = access_flags;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
@@ -3050,7 +3033,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
loff_t offset, loff_t len)
{
- struct inode *inode;
+ struct inode *inode = file_inode(file);
struct cifsFileInfo *cfile = file->private_data;
struct file_zero_data_information fsctl_buf;
long rc;
@@ -3059,14 +3042,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
- inode = d_inode(cfile->dentry);
-
+ inode_lock(inode);
/* Need to make file sparse, if not already, before freeing range. */
/* Consider adding equivalent for compressed since it could also work */
if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
rc = -EOPNOTSUPP;
- free_xid(xid);
- return rc;
+ goto out;
}
/*
@@ -3085,6 +3066,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
true /* is_fctl */, (char *)&fsctl_buf,
sizeof(struct file_zero_data_information),
CIFSMaxBufSize, NULL, NULL);
+out:
+ inode_unlock(inode);
free_xid(xid);
return rc;
}
@@ -3622,69 +3605,82 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
}
-/* We can not use the normal sg_set_buf() as we will sometimes pass a
- * stack object as buf.
- */
-static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
- unsigned int buflen)
+static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
+ int num_rqst, const u8 *sig, u8 **iv,
+ struct aead_request **req, struct scatterlist **sgl,
+ unsigned int *num_sgs)
{
- void *addr;
- /*
- * VMAP_STACK (at least) puts stack into the vmalloc address space
- */
- if (is_vmalloc_addr(buf))
- addr = vmalloc_to_page(buf);
- else
- addr = virt_to_page(buf);
- sg_set_page(sg, addr, buflen, offset_in_page(buf));
+ unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
+ unsigned int iv_size = crypto_aead_ivsize(tfm);
+ unsigned int len;
+ u8 *p;
+
+ *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
+
+ len = iv_size;
+ len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+ len += req_size;
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += *num_sgs * sizeof(**sgl);
+
+ p = kmalloc(len, GFP_ATOMIC);
+ if (!p)
+ return NULL;
+
+ *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
+ *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
+ crypto_tfm_ctx_alignment());
+ *sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
+ __alignof__(struct scatterlist));
+ return p;
}
-/* Assumes the first rqst has a transform header as the first iov.
- * I.e.
- * rqst[0].rq_iov[0] is transform header
- * rqst[0].rq_iov[1+] data to be encrypted/decrypted
- * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
- */
-static struct scatterlist *
-init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
+static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst,
+ int num_rqst, const u8 *sig, u8 **iv,
+ struct aead_request **req, struct scatterlist **sgl)
{
- unsigned int sg_len;
+ unsigned int off, len, skip;
struct scatterlist *sg;
- unsigned int i;
- unsigned int j;
- unsigned int idx = 0;
- int skip;
-
- sg_len = 1;
- for (i = 0; i < num_rqst; i++)
- sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
+ unsigned int num_sgs;
+ unsigned long addr;
+ int i, j;
+ void *p;
- sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
+ p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs);
+ if (!p)
return NULL;
- sg_init_table(sg, sg_len);
+ sg_init_table(*sgl, num_sgs);
+ sg = *sgl;
+
+ /* Assumes the first rqst has a transform header as the first iov.
+ * I.e.
+ * rqst[0].rq_iov[0] is transform header
+ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+ */
for (i = 0; i < num_rqst; i++) {
+ /*
+ * The first rqst has a transform header where the
+ * first 20 bytes are not part of the encrypted blob.
+ */
for (j = 0; j < rqst[i].rq_nvec; j++) {
- /*
- * The first rqst has a transform header where the
- * first 20 bytes are not part of the encrypted blob
- */
- skip = (i == 0) && (j == 0) ? 20 : 0;
- smb2_sg_set_buf(&sg[idx++],
- rqst[i].rq_iov[j].iov_base + skip,
- rqst[i].rq_iov[j].iov_len - skip);
- }
+ struct kvec *iov = &rqst[i].rq_iov[j];
+ skip = (i == 0) && (j == 0) ? 20 : 0;
+ addr = (unsigned long)iov->iov_base + skip;
+ len = iov->iov_len - skip;
+ sg = cifs_sg_set_buf(sg, (void *)addr, len);
+ }
for (j = 0; j < rqst[i].rq_npages; j++) {
- unsigned int len, offset;
-
- rqst_page_get_length(&rqst[i], j, &len, &offset);
- sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
+ rqst_page_get_length(&rqst[i], j, &len, &off);
+ sg_set_page(sg++, rqst[i].rq_pages[j], len, off);
}
}
- smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
- return sg;
+ cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE);
+
+ return p;
}
static int
@@ -3726,11 +3722,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
u8 sign[SMB2_SIGNATURE_SIZE] = {};
u8 key[SMB3_SIGN_KEY_SIZE];
struct aead_request *req;
- char *iv;
- unsigned int iv_len;
+ u8 *iv;
DECLARE_CRYPTO_WAIT(wait);
struct crypto_aead *tfm;
unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+ void *creq;
rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
if (rc) {
@@ -3759,32 +3755,15 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
return rc;
}
- req = aead_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
+ creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
+ if (unlikely(!creq))
return -ENOMEM;
- }
if (!enc) {
memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
crypt_len += SMB2_SIGNATURE_SIZE;
}
- sg = init_sg(num_rqst, rqst, sign);
- if (!sg) {
- cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
- rc = -ENOMEM;
- goto free_req;
- }
-
- iv_len = crypto_aead_ivsize(tfm);
- iv = kzalloc(iv_len, GFP_KERNEL);
- if (!iv) {
- cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
- rc = -ENOMEM;
- goto free_sg;
- }
-
if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
else {
@@ -3792,6 +3771,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
}
+ aead_request_set_tfm(req, tfm);
aead_request_set_crypt(req, sg, sg, crypt_len, iv);
aead_request_set_ad(req, assoc_data_len);
@@ -3804,11 +3784,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
if (!rc && enc)
memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
- kfree(iv);
-free_sg:
- kfree(sg);
-free_req:
- kfree(req);
+ kfree(creq);
return rc;
}
@@ -4485,7 +4461,6 @@ smb2_make_node(unsigned int xid, struct inode *inode,
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
int rc = -EPERM;
- int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
FILE_ALL_INFO *buf = NULL;
struct cifs_io_parms io_parms;
__u32 oplock = 0;
@@ -4521,13 +4496,11 @@ smb2_make_node(unsigned int xid, struct inode *inode,
goto out;
}
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+ CREATE_OPTION_SPECIAL);
oparms.disposition = FILE_CREATE;
oparms.path = full_path;
oparms.fid = &fid;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 0857eb7a95e2..da1a1b531ca5 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1100,9 +1100,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
pneg_inbuf->Dialects[0] =
cpu_to_le16(server->vals->protocol_id);
pneg_inbuf->DialectCount = cpu_to_le16(1);
- /* structure is big enough for 3 dialects, sending only 1 */
+ /* structure is big enough for 4 dialects, sending only 1 */
inbuflen = sizeof(*pneg_inbuf) -
- sizeof(pneg_inbuf->Dialects[0]) * 2;
+ sizeof(pneg_inbuf->Dialects[0]) * 3;
}
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
@@ -3639,12 +3639,15 @@ smb2_readv_callback(struct mid_q_entry *mid)
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
struct cifs_credits credits = { .value = 0, .instance = 0 };
struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
- .rq_nvec = 1,
- .rq_pages = rdata->pages,
- .rq_offset = rdata->page_offset,
- .rq_npages = rdata->nr_pages,
- .rq_pagesz = rdata->pagesz,
- .rq_tailsz = rdata->tailsz };
+ .rq_nvec = 1, };
+
+ if (rdata->got_bytes) {
+ rqst.rq_pages = rdata->pages;
+ rqst.rq_offset = rdata->page_offset;
+ rqst.rq_npages = rdata->nr_pages;
+ rqst.rq_pagesz = rdata->pagesz;
+ rqst.rq_tailsz = rdata->tailsz;
+ }
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
__func__, mid->mid, mid->mid_state, rdata->result,
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 739556e385be..297f5e455a34 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -651,7 +651,7 @@ struct smb2_tree_disconnect_rsp {
#define SMB2_CREATE_SD_BUFFER "SecD" /* security descriptor */
#define SMB2_CREATE_DURABLE_HANDLE_REQUEST "DHnQ"
#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT "DHnC"
-#define SMB2_CREATE_ALLOCATION_SIZE "AISi"
+#define SMB2_CREATE_ALLOCATION_SIZE "AlSi"
#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
#define SMB2_CREATE_TIMEWARP_REQUEST "TWrp"
#define SMB2_CREATE_QUERY_ON_DISK_ID "QFid"
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 57f7075a3587..4d4c0faa3d8a 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -67,7 +67,7 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_fid *pfid);
+ struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid);
extern void close_shroot(struct cached_fid *cfid);
extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
struct smb2_file_all_info *src);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 5b1b97e9e0c9..0842a1af0b98 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -607,8 +607,13 @@ static struct rdma_cm_id *smbd_create_id(
log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
goto out;
}
- wait_for_completion_interruptible_timeout(
+ rc = wait_for_completion_interruptible_timeout(
&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ /* e.g. if interrupted returns -ERESTARTSYS */
+ if (rc < 0) {
+ log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
+ goto out;
+ }
rc = info->ri_rc;
if (rc) {
log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
@@ -621,8 +626,13 @@ static struct rdma_cm_id *smbd_create_id(
log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
goto out;
}
- wait_for_completion_interruptible_timeout(
+ rc = wait_for_completion_interruptible_timeout(
&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ /* e.g. if interrupted returns -ERESTARTSYS */
+ if (rc < 0) {
+ log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
+ goto out;
+ }
rc = info->ri_rc;
if (rc) {
log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
@@ -1478,6 +1488,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
destroy_workqueue(info->workqueue);
log_rdma_event(INFO, "rdma session destroyed\n");
kfree(info);
+ server->smbd_conn = NULL;
}
/*
@@ -1783,6 +1794,7 @@ static struct smbd_connection *_smbd_get_connection(
allocate_mr_failed:
/* At this point, need to a full transport shutdown */
+ server->smbd_conn = info;
smbd_destroy(server);
return NULL;
@@ -2346,6 +2358,7 @@ static int allocate_mr_list(struct smbd_connection *info)
atomic_set(&info->mr_ready_count, 0);
atomic_set(&info->mr_used_count, 0);
init_waitqueue_head(&info->wait_for_mr_cleanup);
+ INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
/* Allocate more MRs (2x) than hardware responder_resources */
for (i = 0; i < info->responder_resources * 2; i++) {
smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
@@ -2374,13 +2387,13 @@ static int allocate_mr_list(struct smbd_connection *info)
list_add_tail(&smbdirect_mr->list, &info->mr_list);
atomic_inc(&info->mr_ready_count);
}
- INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
return 0;
out:
kfree(smbdirect_mr);
list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
+ list_del(&smbdirect_mr->list);
ib_dereg_mr(smbdirect_mr->mr);
kfree(smbdirect_mr->sgl);
kfree(smbdirect_mr);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 61e7df4d9cb1..60141a7468b0 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -209,10 +209,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
*sent = 0;
- smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
- smb_msg->msg_namelen = sizeof(struct sockaddr);
- smb_msg->msg_control = NULL;
- smb_msg->msg_controllen = 0;
if (server->noblocksnd)
smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
else
@@ -316,7 +312,7 @@ static int
__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
struct smb_rqst *rqst)
{
- int rc = 0;
+ int rc;
struct kvec *iov;
int n_vec;
unsigned int send_length = 0;
@@ -324,10 +320,11 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
sigset_t mask, oldmask;
size_t total_len = 0, sent, size;
struct socket *ssocket = server->ssocket;
- struct msghdr smb_msg;
+ struct msghdr smb_msg = {};
int val = 1;
__be32 rfc1002_marker;
+ cifs_in_send_inc(server);
if (cifs_rdma_enabled(server)) {
/* return -EAGAIN when connecting or reconnecting */
rc = -EAGAIN;
@@ -336,14 +333,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
goto smbd_done;
}
+ rc = -EAGAIN;
if (ssocket == NULL)
- return -EAGAIN;
+ goto out;
+ rc = -ERESTARTSYS;
if (fatal_signal_pending(current)) {
cifs_dbg(FYI, "signal pending before send request\n");
- return -ERESTARTSYS;
+ goto out;
}
+ rc = 0;
/* cork the socket */
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
@@ -457,7 +457,8 @@ smbd_done:
rc);
else if (rc > 0)
rc = 0;
-
+out:
+ cifs_in_send_dec(server);
return rc;
}
@@ -834,9 +835,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
* I/O response may come back and free the mid entry on another thread.
*/
cifs_save_when_sent(mid);
- cifs_in_send_inc(server);
rc = smb_send_rqst(server, 1, rqst, flags);
- cifs_in_send_dec(server);
if (rc < 0) {
revert_current_mid(server, mid->credits);
@@ -1099,9 +1098,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
else
midQ[i]->callback = cifs_compound_last_callback;
}
- cifs_in_send_inc(server);
rc = smb_send_rqst(server, num_rqst, rqst, flags);
- cifs_in_send_dec(server);
for (i = 0; i < num_rqst; i++)
cifs_save_when_sent(midQ[i]);
@@ -1336,9 +1333,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
midQ->mid_state = MID_REQUEST_SUBMITTED;
- cifs_in_send_inc(server);
rc = smb_send(server, in_buf, len);
- cifs_in_send_dec(server);
cifs_save_when_sent(midQ);
if (rc < 0)
@@ -1475,9 +1470,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
}
midQ->mid_state = MID_REQUEST_SUBMITTED;
- cifs_in_send_inc(server);
rc = smb_send(server, in_buf, len);
- cifs_in_send_dec(server);
cifs_save_when_sent(midQ);
if (rc < 0)
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index eb3b1898da46..610484c90260 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -790,7 +790,7 @@ static int coda_upcall(struct venus_comm *vcp,
sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
if (!sig_req) goto exit;
- sig_inputArgs = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL);
+ sig_inputArgs = kvzalloc(sizeof(*sig_inputArgs), GFP_KERNEL);
if (!sig_inputArgs) {
kfree(sig_req);
goto exit;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 8fcc53d83af2..22f7dc6688de 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -994,8 +994,7 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
if (!f.file)
goto out;
- /* RED-PEN how should LSM module know it's handling 32bit? */
- error = security_file_ioctl(f.file, cmd, arg);
+ error = security_file_ioctl_compat(f.file, cmd, arg);
if (error)
goto out_fput;
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index d73d88d9c259..bc27e3ad97ff 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -317,6 +317,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
return 0;
out_remove:
+ configfs_put(dentry->d_fsdata);
configfs_remove_dirent(dentry);
return PTR_ERR(inode);
}
@@ -383,6 +384,7 @@ int configfs_create_link(struct configfs_dirent *target, struct dentry *parent,
return 0;
out_remove:
+ configfs_put(dentry->d_fsdata);
configfs_remove_dirent(dentry);
return PTR_ERR(inode);
}
diff --git a/fs/dcache.c b/fs/dcache.c
index c08d8b922770..76517330103b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -740,12 +740,12 @@ static inline bool fast_dput(struct dentry *dentry)
*/
if (unlikely(ret < 0)) {
spin_lock(&dentry->d_lock);
- if (dentry->d_lockref.count > 1) {
- dentry->d_lockref.count--;
+ if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
spin_unlock(&dentry->d_lock);
return true;
}
- return false;
+ dentry->d_lockref.count--;
+ goto locked;
}
/*
@@ -796,6 +796,7 @@ static inline bool fast_dput(struct dentry *dentry)
* else could have killed it and marked it dead. Either way, we
* don't need to do anything else.
*/
+locked:
if (dentry->d_lockref.count) {
spin_unlock(&dentry->d_lock);
return true;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index da87615ad69a..3547388ad60b 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/device.h>
+#include <linux/pm_runtime.h>
#include <linux/poll.h>
#include <linux/security.h>
@@ -377,8 +378,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
}
EXPORT_SYMBOL_GPL(debugfs_attr_read);
-ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos, bool is_signed)
{
struct dentry *dentry = F_DENTRY(file);
ssize_t ret;
@@ -386,12 +387,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
ret = debugfs_file_get(dentry);
if (unlikely(ret))
return ret;
- ret = simple_attr_write(file, buf, len, ppos);
+ if (is_signed)
+ ret = simple_attr_write_signed(file, buf, len, ppos);
+ else
+ ret = simple_attr_write(file, buf, len, ppos);
debugfs_file_put(dentry);
return ret;
}
+
+ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return debugfs_attr_write_xsigned(file, buf, len, ppos, false);
+}
EXPORT_SYMBOL_GPL(debugfs_attr_write);
+ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return debugfs_attr_write_xsigned(file, buf, len, ppos, true);
+}
+EXPORT_SYMBOL_GPL(debugfs_attr_write_signed);
+
static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *value,
const struct file_operations *fops,
@@ -784,11 +801,11 @@ static int debugfs_atomic_t_get(void *data, u64 *val)
*val = atomic_read((atomic_t *)data);
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get,
debugfs_atomic_t_set, "%lld\n");
-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
"%lld\n");
-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
"%lld\n");
/**
@@ -1103,7 +1120,14 @@ static int debugfs_show_regset32(struct seq_file *s, void *data)
{
struct debugfs_regset32 *regset = s->private;
+ if (regset->dev)
+ pm_runtime_get_sync(regset->dev);
+
debugfs_print_regs32(s, regset->regs, regset->nregs, regset->base, "");
+
+ if (regset->dev)
+ pm_runtime_put(regset->dev);
+
return 0;
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index e595a29bf46e..258230f4e485 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -299,13 +299,9 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
if (!parent)
parent = debugfs_mount->mnt_root;
- dentry = lookup_one_len_unlocked(name, parent, strlen(name));
+ dentry = lookup_positive_unlocked(name, parent, strlen(name));
if (IS_ERR(dentry))
return NULL;
- if (!d_really_is_positive(dentry)) {
- dput(dentry);
- return NULL;
- }
return dentry;
}
EXPORT_SYMBOL_GPL(debugfs_lookup);
@@ -743,6 +739,28 @@ void debugfs_remove(struct dentry *dentry)
EXPORT_SYMBOL_GPL(debugfs_remove);
/**
+ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it
+ * @name: a pointer to a string containing the name of the item to look up.
+ * @parent: a pointer to the parent dentry of the item.
+ *
+ * This is the equlivant of doing something like
+ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting
+ * handled for the directory being looked up.
+ */
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
+{
+ struct dentry *dentry;
+
+ dentry = debugfs_lookup(name, parent);
+ if (!dentry)
+ return;
+
+ debugfs_remove(dentry);
+ dput(dentry);
+}
+EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
+
+/**
* debugfs_remove_recursive - recursively removes a directory
* @dentry: a pointer to a the dentry of the directory to be removed. If this
* parameter is NULL or an error value, nothing will be done.
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 283c7b94edda..ca06069e95c8 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -198,13 +198,13 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
if (!prev_seq) {
kref_get(&lkb->lkb_ref);
+ mutex_lock(&ls->ls_cb_mutex);
if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
- mutex_lock(&ls->ls_cb_mutex);
list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
- mutex_unlock(&ls->ls_cb_mutex);
} else {
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
}
+ mutex_unlock(&ls->ls_cb_mutex);
}
out:
mutex_unlock(&lkb->lkb_cb_mutex);
@@ -284,7 +284,9 @@ void dlm_callback_stop(struct dlm_ls *ls)
void dlm_callback_suspend(struct dlm_ls *ls)
{
+ mutex_lock(&ls->ls_cb_mutex);
set_bit(LSFL_CB_DELAY, &ls->ls_flags);
+ mutex_unlock(&ls->ls_cb_mutex);
if (ls->ls_callback_wq)
flush_workqueue(ls->ls_callback_wq);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 9165bf56c6e8..86d645d02d55 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb)
void dlm_scan_timeout(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- struct dlm_lkb *lkb;
+ struct dlm_lkb *lkb = NULL, *iter;
int do_cancel, do_warn;
s64 wait_us;
@@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls)
do_cancel = 0;
do_warn = 0;
mutex_lock(&ls->ls_timeout_mutex);
- list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
+ list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
wait_us = ktime_to_us(ktime_sub(ktime_get(),
- lkb->lkb_timestamp));
+ iter->lkb_timestamp));
- if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
- wait_us >= (lkb->lkb_timeout_cs * 10000))
+ if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
+ wait_us >= (iter->lkb_timeout_cs * 10000))
do_cancel = 1;
- if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
+ if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
wait_us >= dlm_config.ci_timewarn_cs * 10000)
do_warn = 1;
if (!do_cancel && !do_warn)
continue;
- hold_lkb(lkb);
+ hold_lkb(iter);
+ lkb = iter;
break;
}
mutex_unlock(&ls->ls_timeout_mutex);
- if (!do_cancel && !do_warn)
+ if (!lkb)
break;
r = lkb->lkb_resource;
@@ -2888,24 +2889,24 @@ static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_args *args)
{
- int rv = -EINVAL;
+ int rv = -EBUSY;
if (args->flags & DLM_LKF_CONVERT) {
- if (lkb->lkb_flags & DLM_IFL_MSTCPY)
+ if (lkb->lkb_status != DLM_LKSTS_GRANTED)
goto out;
- if (args->flags & DLM_LKF_QUECVT &&
- !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
+ if (lkb->lkb_wait_type)
goto out;
- rv = -EBUSY;
- if (lkb->lkb_status != DLM_LKSTS_GRANTED)
+ if (is_overlap(lkb))
goto out;
- if (lkb->lkb_wait_type)
+ rv = -EINVAL;
+ if (lkb->lkb_flags & DLM_IFL_MSTCPY)
goto out;
- if (is_overlap(lkb))
+ if (args->flags & DLM_LKF_QUECVT &&
+ !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
goto out;
}
@@ -5241,21 +5242,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{
- struct dlm_lkb *lkb;
- int found = 0;
+ struct dlm_lkb *lkb = NULL, *iter;
mutex_lock(&ls->ls_waiters_mutex);
- list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
- if (lkb->lkb_flags & DLM_IFL_RESEND) {
- hold_lkb(lkb);
- found = 1;
+ list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
+ if (iter->lkb_flags & DLM_IFL_RESEND) {
+ hold_lkb(iter);
+ lkb = iter;
break;
}
}
mutex_unlock(&ls->ls_waiters_mutex);
- if (!found)
- lkb = NULL;
return lkb;
}
@@ -5914,37 +5912,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int mode, uint32_t flags, void *name, unsigned int namelen,
unsigned long timeout_cs, uint32_t *lkid)
{
- struct dlm_lkb *lkb;
+ struct dlm_lkb *lkb = NULL, *iter;
struct dlm_user_args *ua;
int found_other_mode = 0;
- int found = 0;
int rv = 0;
mutex_lock(&ls->ls_orphans_mutex);
- list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
- if (lkb->lkb_resource->res_length != namelen)
+ list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
+ if (iter->lkb_resource->res_length != namelen)
continue;
- if (memcmp(lkb->lkb_resource->res_name, name, namelen))
+ if (memcmp(iter->lkb_resource->res_name, name, namelen))
continue;
- if (lkb->lkb_grmode != mode) {
+ if (iter->lkb_grmode != mode) {
found_other_mode = 1;
continue;
}
- found = 1;
- list_del_init(&lkb->lkb_ownqueue);
- lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
- *lkid = lkb->lkb_id;
+ lkb = iter;
+ list_del_init(&iter->lkb_ownqueue);
+ iter->lkb_flags &= ~DLM_IFL_ORPHAN;
+ *lkid = iter->lkb_id;
break;
}
mutex_unlock(&ls->ls_orphans_mutex);
- if (!found && found_other_mode) {
+ if (!lkb && found_other_mode) {
rv = -EAGAIN;
goto out;
}
- if (!found) {
+ if (!lkb) {
rv = -ENOENT;
goto out;
}
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index e7f550327d5d..e338c407cb75 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -113,7 +113,7 @@ static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb)
void dlm_timeout_warn(struct dlm_lkb *lkb)
{
- struct sk_buff *uninitialized_var(send_skb);
+ struct sk_buff *send_skb;
struct dlm_lock_data *data;
size_t size;
int rv;
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index a10d2bcfe75a..5f2e2fa2ba09 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -19,20 +19,20 @@ static struct list_head recv_list;
static wait_queue_head_t send_wq;
static wait_queue_head_t recv_wq;
-struct plock_op {
- struct list_head list;
- int done;
- struct dlm_plock_info info;
- int (*callback)(struct file_lock *fl, int result);
-};
-
-struct plock_xop {
- struct plock_op xop;
+struct plock_async_data {
void *fl;
void *file;
struct file_lock flc;
+ int (*callback)(struct file_lock *fl, int result);
};
+struct plock_op {
+ struct list_head list;
+ int done;
+ struct dlm_plock_info info;
+ /* if set indicates async handling */
+ struct plock_async_data *data;
+};
static inline void set_version(struct dlm_plock_info *info)
{
@@ -58,6 +58,12 @@ static int check_version(struct dlm_plock_info *info)
return 0;
}
+static void dlm_release_plock_op(struct plock_op *op)
+{
+ kfree(op->data);
+ kfree(op);
+}
+
static void send_op(struct plock_op *op)
{
set_version(&op->info);
@@ -74,8 +80,7 @@ static void send_op(struct plock_op *op)
abandoned waiter. So, we have to insert the unlock-close when the
lock call is interrupted. */
-static void do_unlock_close(struct dlm_ls *ls, u64 number,
- struct file *file, struct file_lock *fl)
+static void do_unlock_close(const struct dlm_plock_info *info)
{
struct plock_op *op;
@@ -84,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
return;
op->info.optype = DLM_PLOCK_OP_UNLOCK;
- op->info.pid = fl->fl_pid;
- op->info.fsid = ls->ls_global_id;
- op->info.number = number;
+ op->info.pid = info->pid;
+ op->info.fsid = info->fsid;
+ op->info.number = info->number;
op->info.start = 0;
op->info.end = OFFSET_MAX;
- if (fl->fl_lmops && fl->fl_lmops->lm_grant)
- op->info.owner = (__u64) fl->fl_pid;
- else
- op->info.owner = (__u64)(long) fl->fl_owner;
+ op->info.owner = info->owner;
op->info.flags |= DLM_PLOCK_FL_CLOSE;
send_op(op);
@@ -101,22 +103,21 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
int cmd, struct file_lock *fl)
{
+ struct plock_async_data *op_data;
struct dlm_ls *ls;
struct plock_op *op;
- struct plock_xop *xop;
int rv;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
- xop = kzalloc(sizeof(*xop), GFP_NOFS);
- if (!xop) {
+ op = kzalloc(sizeof(*op), GFP_NOFS);
+ if (!op) {
rv = -ENOMEM;
goto out;
}
- op = &xop->xop;
op->info.optype = DLM_PLOCK_OP_LOCK;
op->info.pid = fl->fl_pid;
op->info.ex = (fl->fl_type == F_WRLCK);
@@ -125,35 +126,45 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
+ /* async handling */
if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
+ op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
+ if (!op_data) {
+ dlm_release_plock_op(op);
+ rv = -ENOMEM;
+ goto out;
+ }
+
/* fl_owner is lockd which doesn't distinguish
processes on the nfs client */
op->info.owner = (__u64) fl->fl_pid;
- op->callback = fl->fl_lmops->lm_grant;
- locks_init_lock(&xop->flc);
- locks_copy_lock(&xop->flc, fl);
- xop->fl = fl;
- xop->file = file;
+ op_data->callback = fl->fl_lmops->lm_grant;
+ locks_init_lock(&op_data->flc);
+ locks_copy_lock(&op_data->flc, fl);
+ op_data->fl = fl;
+ op_data->file = file;
+
+ op->data = op_data;
+
+ send_op(op);
+ rv = FILE_LOCK_DEFERRED;
+ goto out;
} else {
op->info.owner = (__u64)(long) fl->fl_owner;
}
send_op(op);
- if (!op->callback) {
- rv = wait_event_interruptible(recv_wq, (op->done != 0));
- if (rv == -ERESTARTSYS) {
- log_debug(ls, "dlm_posix_lock: wait killed %llx",
- (unsigned long long)number);
- spin_lock(&ops_lock);
- list_del(&op->list);
- spin_unlock(&ops_lock);
- kfree(xop);
- do_unlock_close(ls, number, file, fl);
- goto out;
- }
- } else {
- rv = FILE_LOCK_DEFERRED;
+ rv = wait_event_killable(recv_wq, (op->done != 0));
+ if (rv == -ERESTARTSYS) {
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
+ log_debug(ls, "%s: wait interrupted %x %llx pid %d",
+ __func__, ls->ls_global_id,
+ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
+ do_unlock_close(&op->info);
goto out;
}
@@ -173,7 +184,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
(unsigned long long)number);
}
- kfree(xop);
+ dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
return rv;
@@ -183,11 +194,11 @@ EXPORT_SYMBOL_GPL(dlm_posix_lock);
/* Returns failure iff a successful lock operation should be canceled */
static int dlm_plock_callback(struct plock_op *op)
{
+ struct plock_async_data *op_data = op->data;
struct file *file;
struct file_lock *fl;
struct file_lock *flc;
int (*notify)(struct file_lock *fl, int result) = NULL;
- struct plock_xop *xop = (struct plock_xop *)op;
int rv = 0;
spin_lock(&ops_lock);
@@ -199,10 +210,10 @@ static int dlm_plock_callback(struct plock_op *op)
spin_unlock(&ops_lock);
/* check if the following 2 are still valid or make a copy */
- file = xop->file;
- flc = &xop->flc;
- fl = xop->fl;
- notify = op->callback;
+ file = op_data->file;
+ flc = &op_data->flc;
+ fl = op_data->fl;
+ notify = op_data->callback;
if (op->info.rv) {
notify(fl, op->info.rv);
@@ -233,7 +244,7 @@ static int dlm_plock_callback(struct plock_op *op)
}
out:
- kfree(xop);
+ dlm_release_plock_op(op);
return rv;
}
@@ -303,7 +314,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
rv = 0;
out_free:
- kfree(op);
+ dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
fl->fl_flags = fl_flags;
@@ -363,13 +374,15 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
locks_init_lock(fl);
fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
fl->fl_flags = FL_POSIX;
- fl->fl_pid = -op->info.pid;
+ fl->fl_pid = op->info.pid;
+ if (op->info.nodeid != dlm_our_nodeid())
+ fl->fl_pid = -fl->fl_pid;
fl->fl_start = op->info.start;
fl->fl_end = op->info.end;
rv = 0;
}
- kfree(op);
+ dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
return rv;
@@ -392,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
list_del(&op->list);
else
- list_move(&op->list, &recv_list);
+ list_move_tail(&op->list, &recv_list);
memcpy(&info, &op->info, sizeof(info));
}
spin_unlock(&ops_lock);
@@ -405,7 +418,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
(the process did not make an unlock call). */
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
- kfree(op);
+ dlm_release_plock_op(op);
if (copy_to_user(u, &info, sizeof(info)))
return -EFAULT;
@@ -417,9 +430,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
loff_t *ppos)
{
+ struct plock_op *op = NULL, *iter;
struct dlm_plock_info info;
- struct plock_op *op;
- int found = 0, do_callback = 0;
+ int do_callback = 0;
if (count != sizeof(info))
return -EINVAL;
@@ -430,31 +443,63 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
if (check_version(&info))
return -EINVAL;
+ /*
+ * The results for waiting ops (SETLKW) can be returned in any
+ * order, so match all fields to find the op. The results for
+ * non-waiting ops are returned in the order that they were sent
+ * to userspace, so match the result with the first non-waiting op.
+ */
spin_lock(&ops_lock);
- list_for_each_entry(op, &recv_list, list) {
- if (op->info.fsid == info.fsid &&
- op->info.number == info.number &&
- op->info.owner == info.owner) {
- list_del_init(&op->list);
- memcpy(&op->info, &info, sizeof(info));
- if (op->callback)
- do_callback = 1;
- else
- op->done = 1;
- found = 1;
- break;
+ if (info.wait) {
+ list_for_each_entry(iter, &recv_list, list) {
+ if (iter->info.fsid == info.fsid &&
+ iter->info.number == info.number &&
+ iter->info.owner == info.owner &&
+ iter->info.pid == info.pid &&
+ iter->info.start == info.start &&
+ iter->info.end == info.end &&
+ iter->info.ex == info.ex &&
+ iter->info.wait) {
+ op = iter;
+ break;
+ }
}
+ } else {
+ list_for_each_entry(iter, &recv_list, list) {
+ if (!iter->info.wait &&
+ iter->info.fsid == info.fsid) {
+ op = iter;
+ break;
+ }
+ }
+ }
+
+ if (op) {
+ /* Sanity check that op and info match. */
+ if (info.wait)
+ WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
+ else
+ WARN_ON(op->info.number != info.number ||
+ op->info.owner != info.owner ||
+ op->info.optype != info.optype);
+
+ list_del_init(&op->list);
+ memcpy(&op->info, &info, sizeof(info));
+ if (op->data)
+ do_callback = 1;
+ else
+ op->done = 1;
}
spin_unlock(&ops_lock);
- if (found) {
+ if (op) {
if (do_callback)
dlm_plock_callback(op);
else
wake_up(&recv_wq);
} else
- log_print("dev_write no op %x %llx", info.fsid,
- (unsigned long long)info.number);
+ log_print("%s: no op %x %llx", __func__,
+ info.fsid, (unsigned long long)info.number);
return count;
}
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index 8928e99dfd47..df18f38a0273 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
static void recover_lvb(struct dlm_rsb *r)
{
- struct dlm_lkb *lkb, *high_lkb = NULL;
+ struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
uint32_t high_seq = 0;
int lock_lvb_exists = 0;
- int big_lock_exists = 0;
int lvblen = r->res_ls->ls_lvblen;
if (!rsb_flag(r, RSB_NEW_MASTER2) &&
@@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r)
/* we are the new master, so figure out if VALNOTVALID should
be set, and set the rsb lvb from the best lkb available. */
- list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
+ list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
+ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
- if (lkb->lkb_grmode > DLM_LOCK_CR) {
- big_lock_exists = 1;
+ if (iter->lkb_grmode > DLM_LOCK_CR) {
+ big_lkb = iter;
goto setflag;
}
- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
- high_lkb = lkb;
- high_seq = lkb->lkb_lvbseq;
+ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
+ high_lkb = iter;
+ high_seq = iter->lkb_lvbseq;
}
}
- list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
+ list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
+ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
- if (lkb->lkb_grmode > DLM_LOCK_CR) {
- big_lock_exists = 1;
+ if (iter->lkb_grmode > DLM_LOCK_CR) {
+ big_lkb = iter;
goto setflag;
}
- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
- high_lkb = lkb;
- high_seq = lkb->lkb_lvbseq;
+ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
+ high_lkb = iter;
+ high_seq = iter->lkb_lvbseq;
}
}
@@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
/* lvb is invalidated if only NL/CR locks remain */
- if (!big_lock_exists)
+ if (!big_lkb)
rsb_set_flag(r, RSB_VALNOTVALID);
if (!r->res_lvbptr) {
@@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
}
- if (big_lock_exists) {
- r->res_lvbseq = lkb->lkb_lvbseq;
- memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
+ if (big_lkb) {
+ r->res_lvbseq = big_lkb->lkb_lvbseq;
+ memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
} else if (high_lkb) {
r->res_lvbseq = high_lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index e23752d9a79f..c867a0d62f36 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -76,6 +76,14 @@ static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb))
return ERR_PTR(-EXDEV);
+
+ /* Reject dealing with casefold directories. */
+ if (IS_CASEFOLDED(lower_inode)) {
+ pr_err_ratelimited("%s: Can't handle casefolded directory.\n",
+ __func__);
+ return ERR_PTR(-EREMOTE);
+ }
+
if (!igrab(lower_inode))
return ERR_PTR(-ESTALE);
inode = iget5_locked(sb, (unsigned long)lower_inode,
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 23b74b8e8f96..38eeec5e3032 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -56,14 +56,18 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
if (page) {
__clear_bit(j, bounced);
- if (kaddr) {
- if (kaddr + PAGE_SIZE == page_address(page))
+ if (!PageHighMem(page)) {
+ if (!i) {
+ kaddr = page_address(page);
+ continue;
+ }
+ if (kaddr &&
+ kaddr + PAGE_SIZE == page_address(page)) {
kaddr += PAGE_SIZE;
- else
- kaddr = NULL;
- } else if (!i) {
- kaddr = page_address(page);
+ continue;
+ }
}
+ kaddr = NULL;
continue;
}
kaddr = NULL;
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 544a453f3076..cc7a42682814 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -226,7 +226,7 @@ struct erofs_inode {
unsigned char datalayout;
unsigned char inode_isize;
- unsigned short xattr_isize;
+ unsigned int xattr_isize;
unsigned int xattr_shared_count;
unsigned int *xattr_shared_xattrs;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index fdd18c250811..fb718c3e3ebd 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -636,9 +636,11 @@ hitted:
tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
- cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+ cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
zero_user_segment(page, cur, end);
+ ++spiltted;
+ tight = false;
goto next_part;
}
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index fff574100721..6553f58fb289 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -179,6 +179,10 @@ static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
m->clusterofs = le16_to_cpu(di->di_clusterofs);
+ if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
m->pblk = le32_to_cpu(di->di_u.blkaddr);
break;
default:
@@ -211,7 +215,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
int i;
u8 *in, type;
- if (1 << amortizedshift == 4)
+ if (1 << amortizedshift == 4 && lclusterbits <= 14)
vcnt = 2;
else if (1 << amortizedshift == 2 && lclusterbits == 12)
vcnt = 16;
@@ -269,7 +273,6 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
vi->inode_isize + vi->xattr_isize, 8) +
sizeof(struct z_erofs_map_header);
@@ -279,9 +282,6 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
erofs_off_t pos;
int err;
- if (lclusterbits != 12)
- return -EOPNOTSUPP;
-
if (lcn >= totalidx)
return -EINVAL;
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 8b9bd6fb08cd..1d45cedeeb43 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -182,11 +182,14 @@ static __poll_t eventfd_poll(struct file *file, poll_table *wait)
return events;
}
-static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
{
- *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
+ lockdep_assert_held(&ctx->wqh.lock);
+
+ *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
ctx->count -= *cnt;
}
+EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
/**
* eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 339453ac834c..8c0e94183186 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1803,6 +1803,25 @@ static inline struct timespec64 ep_set_mstimeout(long ms)
return timespec64_add_safe(now, ts);
}
+/*
+ * autoremove_wake_function, but remove even on failure to wake up, because we
+ * know that default_wake_function/ttwu will only fail if the thread is already
+ * woken, and in that case the ep_poll loop will remove the entry anyways, not
+ * try to reuse it.
+ */
+static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
+ unsigned int mode, int sync, void *key)
+{
+ int ret = default_wake_function(wq_entry, mode, sync, key);
+
+ /*
+ * Pairs with list_empty_careful in ep_poll, and ensures future loop
+ * iterations see the cause of this wakeup.
+ */
+ list_del_init_careful(&wq_entry->entry);
+ return ret;
+}
+
/**
* ep_poll - Retrieves ready events, and delivers them to the caller supplied
* event buffer.
@@ -1880,56 +1899,77 @@ fetch_events:
* normal wakeup path no need to call __remove_wait_queue()
* explicitly, thus ep->lock is not taken, which halts the
* event delivery.
+ *
+ * In fact, we now use an even more aggressive function that
+ * unconditionally removes, because we don't reuse the wait
+ * entry between loop iterations. This lets us also avoid the
+ * performance issue if a process is killed, causing all of its
+ * threads to wake up without being removed normally.
*/
init_wait(&wait);
+ wait.func = ep_autoremove_wake_function;
write_lock_irq(&ep->lock);
- __add_wait_queue_exclusive(&ep->wq, &wait);
- write_unlock_irq(&ep->lock);
-
/*
- * We don't want to sleep if the ep_poll_callback() sends us
- * a wakeup in between. That's why we set the task state
- * to TASK_INTERRUPTIBLE before doing the checks.
+ * Barrierless variant, waitqueue_active() is called under
+ * the same lock on wakeup ep_poll_callback() side, so it
+ * is safe to avoid an explicit barrier.
*/
- set_current_state(TASK_INTERRUPTIBLE);
+ __set_current_state(TASK_INTERRUPTIBLE);
+
/*
- * Always short-circuit for fatal signals to allow
- * threads to make a timely exit without the chance of
- * finding more events available and fetching
- * repeatedly.
+ * Do the final check under the lock. ep_scan_ready_list()
+ * plays with two lists (->rdllist and ->ovflist) and there
+ * is always a race when both lists are empty for short
+ * period of time although events are pending, so lock is
+ * important.
*/
- if (fatal_signal_pending(current)) {
- res = -EINTR;
- break;
- }
-
eavail = ep_events_available(ep);
- if (eavail)
- break;
- if (signal_pending(current)) {
- res = -EINTR;
- break;
+ if (!eavail) {
+ if (signal_pending(current))
+ res = -EINTR;
+ else
+ __add_wait_queue_exclusive(&ep->wq, &wait);
}
+ write_unlock_irq(&ep->lock);
- if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) {
- timed_out = 1;
- break;
- }
+ if (!eavail && !res)
+ timed_out = !schedule_hrtimeout_range(to, slack,
+ HRTIMER_MODE_ABS);
- /* We were woken up, thus go and try to harvest some events */
+ /*
+ * We were woken up, thus go and try to harvest some events.
+ * If timed out and still on the wait queue, recheck eavail
+ * carefully under lock, below.
+ */
eavail = 1;
-
} while (0);
__set_current_state(TASK_RUNNING);
if (!list_empty_careful(&wait.entry)) {
write_lock_irq(&ep->lock);
+ /*
+ * If the thread timed out and is not on the wait queue, it
+ * means that the thread was woken up after its timeout expired
+ * before it could reacquire the lock. Thus, when wait.entry is
+ * empty, it needs to harvest events.
+ */
+ if (timed_out)
+ eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
write_unlock_irq(&ep->lock);
}
send_events:
+ if (fatal_signal_pending(current)) {
+ /*
+ * Always short-circuit for fatal signals to allow
+ * threads to make a timely exit without the chance of
+ * finding more events available and fetching
+ * repeatedly.
+ */
+ res = -EINTR;
+ }
/*
* Try to transfer events to user space. In case we get 0 events and
* there's still timeout left over, we go trying again in search of
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 10ab238de9a6..94e39a28bee2 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -68,10 +68,7 @@ struct mb_cache;
* second extended-fs super-block data in memory
*/
struct ext2_sb_info {
- unsigned long s_frag_size; /* Size of a fragment in bytes */
- unsigned long s_frags_per_block;/* Number of fragments per block */
unsigned long s_inodes_per_block;/* Number of inodes per block */
- unsigned long s_frags_per_group;/* Number of fragments in a group */
unsigned long s_blocks_per_group;/* Number of blocks in a group */
unsigned long s_inodes_per_group;/* Number of inodes in a group */
unsigned long s_itb_per_group; /* Number of inode table blocks per group */
@@ -177,6 +174,7 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
#define EXT2_MIN_BLOCK_SIZE 1024
#define EXT2_MAX_BLOCK_SIZE 4096
#define EXT2_MIN_BLOCK_LOG_SIZE 10
+#define EXT2_MAX_BLOCK_LOG_SIZE 16
#define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
#define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
@@ -185,15 +183,6 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
#define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino)
/*
- * Macro-instructions used to manage fragments
- */
-#define EXT2_MIN_FRAG_SIZE 1024
-#define EXT2_MAX_FRAG_SIZE 4096
-#define EXT2_MIN_FRAG_LOG_SIZE 10
-#define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size)
-#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block)
-
-/*
* Structure of a blocks group descriptor
*/
struct ext2_group_desc
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index db403c01d4d5..a787f50732b8 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -681,10 +681,9 @@ static int ext2_setup_super (struct super_block * sb,
es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
if (test_opt (sb, DEBUG))
- ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
+ ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, gc=%lu, "
"bpg=%lu, ipg=%lu, mo=%04lx]",
EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
- sbi->s_frag_size,
sbi->s_groups_count,
EXT2_BLOCKS_PER_GROUP(sb),
EXT2_INODES_PER_GROUP(sb),
@@ -967,6 +966,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
+ if (le32_to_cpu(es->s_log_block_size) >
+ (EXT2_MAX_BLOCK_LOG_SIZE - BLOCK_SIZE_BITS)) {
+ ext2_msg(sb, KERN_ERR,
+ "Invalid log block size: %u",
+ le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (test_opt(sb, DAX)) {
@@ -1024,14 +1030,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
}
}
- sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
- le32_to_cpu(es->s_log_frag_size);
- if (sbi->s_frag_size == 0)
- goto cantfind_ext2;
- sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
-
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
- sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
@@ -1057,11 +1056,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
- if (sb->s_blocksize != sbi->s_frag_size) {
+ if (es->s_log_frag_size != es->s_log_block_size) {
ext2_msg(sb, KERN_ERR,
- "error: fragsize %lu != blocksize %lu"
- "(not supported yet)",
- sbi->s_frag_size, sb->s_blocksize);
+ "error: fragsize log %u != blocksize log %u",
+ le32_to_cpu(es->s_log_frag_size), sb->s_blocksize_bits);
goto failed_mount;
}
@@ -1071,15 +1069,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_blocks_per_group);
goto failed_mount;
}
- if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
- "error: #fragments per group too big: %lu",
- sbi->s_frags_per_group);
- goto failed_mount;
- }
- if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
- ext2_msg(sb, KERN_ERR,
- "error: #inodes per group too big: %lu",
+ "error: invalid #inodes per group: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
@@ -1089,6 +1082,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1)
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
+ if ((u64)sbi->s_groups_count * sbi->s_inodes_per_group !=
+ le32_to_cpu(es->s_inodes_count)) {
+ ext2_msg(sb, KERN_ERR, "error: invalid #inodes: %u vs computed %llu",
+ le32_to_cpu(es->s_inodes_count),
+ (u64)sbi->s_groups_count * sbi->s_inodes_per_group);
+ goto failed_mount;
+ }
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc_array (db_count,
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 62acbe27d8bf..ca7ff31a1f19 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -690,10 +690,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
/* We need to allocate a new block */
ext2_fsblk_t goal = ext2_group_first_block_no(sb,
EXT2_I(inode)->i_block_group);
- int block = ext2_new_block(inode, goal, &error);
+ ext2_fsblk_t block = ext2_new_block(inode, goal, &error);
if (error)
goto cleanup;
- ea_idebug(inode, "creating block %d", block);
+ ea_idebug(inode, "creating block %lu", block);
new_bh = sb_getblk(sb, block);
if (unlikely(!new_bh)) {
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index 9b63f5416a2f..7f3b25b3fa6d 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -67,6 +67,11 @@ extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
static inline int
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
{
+ /* usually, the umask is applied by posix_acl_create(), but if
+ ext4 ACL support is disabled at compile time, we need to do
+ it here, because posix_acl_create() will never be called */
+ inode->i_mode &= ~current_umask();
+
return 0;
}
#endif /* CONFIG_EXT4_FS_POSIX_ACL */
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 031ff3f19018..b68cee75f5c5 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -303,6 +303,22 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
return desc;
}
+static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
+ ext4_group_t block_group,
+ struct buffer_head *bh)
+{
+ ext4_grpblk_t next_zero_bit;
+ unsigned long bitmap_size = sb->s_blocksize * 8;
+ unsigned int offset = num_clusters_in_group(sb, block_group);
+
+ if (bitmap_size <= offset)
+ return 0;
+
+ next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
+
+ return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
+}
+
/*
* Return the block number which was discovered to be invalid, or 0 if
* the block bitmap is valid.
@@ -395,6 +411,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
return -EFSCORRUPTED;
}
+ blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
+ if (unlikely(blk != 0)) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
+ block_group, blk);
+ ext4_mark_group_bitmap_corrupted(sb, block_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ return -EFSCORRUPTED;
+ }
set_buffer_verified(bh);
verified:
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e932e8482371..4d02116193de 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -501,7 +501,7 @@ enum {
*
* It's not paranoia if the Murphy's Law really *is* out to get you. :-)
*/
-#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
+#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1U << EXT4_INODE_##FLAG))
#define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG))
static inline void ext4_check_flag_values(void)
@@ -938,11 +938,13 @@ do { \
* where the second inode has larger inode number
* than the first
* I_DATA_SEM_QUOTA - Used for quota inodes only
+ * I_DATA_SEM_EA - Used for ea_inodes only
*/
enum {
I_DATA_SEM_NORMAL = 0,
I_DATA_SEM_OTHER,
I_DATA_SEM_QUOTA,
+ I_DATA_SEM_EA
};
@@ -1439,7 +1441,7 @@ struct ext4_sb_info {
unsigned long s_commit_interval;
u32 s_max_batch_time;
u32 s_min_batch_time;
- struct block_device *journal_bdev;
+ struct block_device *s_journal_bdev;
#ifdef CONFIG_QUOTA
/* Names of quota files with journalled quota */
char __rcu *s_qf_names[EXT4_MAXQUOTAS];
@@ -1527,7 +1529,7 @@ struct ext4_sb_info {
struct task_struct *s_mmp_tsk;
/* record the last minlen when FITRIM is called. */
- atomic_t s_last_trim_minblks;
+ unsigned long s_last_trim_minblks;
/* Reference to checksum algorithm driver via cryptoapi */
struct crypto_shash *s_chksum_driver;
@@ -2608,7 +2610,9 @@ int do_journal_get_write_access(handle_t *handle,
typedef enum {
EXT4_IGET_NORMAL = 0,
EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
- EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
+ EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */
+ EXT4_IGET_BAD = 0x0004, /* Allow to iget a bad inode */
+ EXT4_IGET_EA_INODE = 0x0008 /* Inode should contain an EA value */
} ext4_iget_flags;
extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index d5e649e578cb..98e1b1ddb4ec 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -500,6 +500,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
error_msg = "invalid eh_entries";
goto corrupted;
}
+ if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
+ error_msg = "eh_entries is 0 but eh_depth is > 0";
+ goto corrupted;
+ }
if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
error_msg = "invalid extent entries";
goto corrupted;
@@ -1032,6 +1036,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
ix = curp->p_idx;
}
+ if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+ EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+ return -EFSCORRUPTED;
+ }
+
len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
BUG_ON(len < 0);
if (len > 0) {
@@ -1041,11 +1050,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
}
- if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
- EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
- return -EFSCORRUPTED;
- }
-
ix->ei_block = cpu_to_le32(logical);
ext4_idx_store_pblock(ix, ptr);
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
@@ -6018,6 +6022,15 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
struct ext4_extent *extent;
ext4_lblk_t first_lblk, first_lclu, last_lclu;
+ /*
+ * if data can be stored inline, the logical cluster isn't
+ * mapped - no physical clusters have been allocated, and the
+ * file has no extents
+ */
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
+ ext4_has_inline_data(inode))
+ return 0;
+
/* search for the extent closest to the first block in the cluster */
path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
if (IS_ERR(path)) {
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 43fba01da6c3..3346a9252063 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -144,14 +144,17 @@
static struct kmem_cache *ext4_es_cachep;
static struct kmem_cache *ext4_pending_cachep;
-static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
+static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
+ struct extent_status *prealloc);
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t end, int *reserved);
+ ext4_lblk_t end, int *reserved,
+ struct extent_status *prealloc);
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
struct ext4_inode_info *locked_ei);
-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len);
+static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len,
+ struct pending_reservation **prealloc);
int __init ext4_init_es(void)
{
@@ -269,14 +272,12 @@ static void __es_find_extent_range(struct inode *inode,
/* see if the extent has been cached */
es->es_lblk = es->es_len = es->es_pblk = 0;
- if (tree->cache_es) {
- es1 = tree->cache_es;
- if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u) %llu %x\n",
- lblk, es1->es_lblk, es1->es_len,
- ext4_es_pblock(es1), ext4_es_status(es1));
- goto out;
- }
+ es1 = READ_ONCE(tree->cache_es);
+ if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u) %llu %x\n",
+ lblk, es1->es_lblk, es1->es_len,
+ ext4_es_pblock(es1), ext4_es_status(es1));
+ goto out;
}
es1 = __es_tree_search(&tree->root, lblk);
@@ -295,7 +296,7 @@ out:
}
if (es1 && matching_fn(es1)) {
- tree->cache_es = es1;
+ WRITE_ONCE(tree->cache_es, es1);
es->es_lblk = es1->es_lblk;
es->es_len = es1->es_len;
es->es_pblk = es1->es_pblk;
@@ -441,22 +442,49 @@ static void ext4_es_list_del(struct inode *inode)
spin_unlock(&sbi->s_es_lock);
}
-static struct extent_status *
-ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
- ext4_fsblk_t pblk)
+static inline struct pending_reservation *__alloc_pending(bool nofail)
+{
+ if (!nofail)
+ return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+
+ return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
+}
+
+static inline void __free_pending(struct pending_reservation *pr)
+{
+ kmem_cache_free(ext4_pending_cachep, pr);
+}
+
+/*
+ * Returns true if we cannot fail to allocate memory for this extent_status
+ * entry and cannot reclaim it until its status changes.
+ */
+static inline bool ext4_es_must_keep(struct extent_status *es)
+{
+ /* fiemap, bigalloc, and seek_data/hole need to use it. */
+ if (ext4_es_is_delayed(es))
+ return true;
+
+ return false;
+}
+
+static inline struct extent_status *__es_alloc_extent(bool nofail)
+{
+ if (!nofail)
+ return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
+
+ return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL);
+}
+
+static void ext4_es_init_extent(struct inode *inode, struct extent_status *es,
+ ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk)
{
- struct extent_status *es;
- es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
- if (es == NULL)
- return NULL;
es->es_lblk = lblk;
es->es_len = len;
es->es_pblk = pblk;
- /*
- * We don't count delayed extent because we never try to reclaim them
- */
- if (!ext4_es_is_delayed(es)) {
+ /* We never try to reclaim a must kept extent, so we don't count it. */
+ if (!ext4_es_must_keep(es)) {
if (!EXT4_I(inode)->i_es_shk_nr++)
ext4_es_list_add(inode);
percpu_counter_inc(&EXT4_SB(inode->i_sb)->
@@ -465,8 +493,11 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
EXT4_I(inode)->i_es_all_nr++;
percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
+}
- return es;
+static inline void __es_free_extent(struct extent_status *es)
+{
+ kmem_cache_free(ext4_es_cachep, es);
}
static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
@@ -474,8 +505,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
EXT4_I(inode)->i_es_all_nr--;
percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
- /* Decrease the shrink counter when this es is not delayed */
- if (!ext4_es_is_delayed(es)) {
+ /* Decrease the shrink counter when we can reclaim the extent. */
+ if (!ext4_es_must_keep(es)) {
BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
if (!--EXT4_I(inode)->i_es_shk_nr)
ext4_es_list_del(inode);
@@ -483,7 +514,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
s_es_stats.es_stats_shk_cnt);
}
- kmem_cache_free(ext4_es_cachep, es);
+ __es_free_extent(es);
}
/*
@@ -745,7 +776,8 @@ static inline void ext4_es_insert_extent_check(struct inode *inode,
}
#endif
-static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
+static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
+ struct extent_status *prealloc)
{
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node **p = &tree->root.rb_node;
@@ -785,10 +817,15 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
}
}
- es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
- newes->es_pblk);
+ if (prealloc)
+ es = prealloc;
+ else
+ es = __es_alloc_extent(false);
if (!es)
return -ENOMEM;
+ ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len,
+ newes->es_pblk);
+
rb_link_node(&es->rb_node, parent, p);
rb_insert_color(&es->rb_node, &tree->root);
@@ -809,8 +846,12 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
{
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
- int err = 0;
+ int err1 = 0, err2 = 0, err3 = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
+ struct pending_reservation *pr = NULL;
+ bool revise_pending = false;
es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
lblk, len, pblk, status, inode->i_ino);
@@ -835,29 +876,57 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_es_insert_extent_check(inode, &newes);
+ revise_pending = sbi->s_cluster_ratio > 1 &&
+ test_opt(inode->i_sb, DELALLOC) &&
+ (status & (EXTENT_STATUS_WRITTEN |
+ EXTENT_STATUS_UNWRITTEN));
+retry:
+ if (err1 && !es1)
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
+ if ((err1 || err2 || err3) && revise_pending && !pr)
+ pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, end, NULL);
- if (err != 0)
+
+ err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+ if (err1 != 0)
goto error;
-retry:
- err = __es_insert_extent(inode, &newes);
- if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
- 128, EXT4_I(inode)))
- goto retry;
- if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
- err = 0;
+ /* Free preallocated extent if it didn't get used. */
+ if (es1) {
+ if (!es1->es_len)
+ __es_free_extent(es1);
+ es1 = NULL;
+ }
- if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
- (status & EXTENT_STATUS_WRITTEN ||
- status & EXTENT_STATUS_UNWRITTEN))
- __revise_pending(inode, lblk, len);
+ err2 = __es_insert_extent(inode, &newes, es2);
+ if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
+ err2 = 0;
+ if (err2 != 0)
+ goto error;
+ /* Free preallocated extent if it didn't get used. */
+ if (es2) {
+ if (!es2->es_len)
+ __es_free_extent(es2);
+ es2 = NULL;
+ }
+ if (revise_pending) {
+ err3 = __revise_pending(inode, lblk, len, &pr);
+ if (err3 != 0)
+ goto error;
+ if (pr) {
+ __free_pending(pr);
+ pr = NULL;
+ }
+ }
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
+ if (err1 || err2 || err3)
+ goto retry;
ext4_es_print_tree(inode);
-
- return err;
+ return 0;
}
/*
@@ -887,7 +956,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
if (!es || es->es_lblk > end)
- __es_insert_extent(inode, &newes);
+ __es_insert_extent(inode, &newes, NULL);
write_unlock(&EXT4_I(inode)->i_es_lock);
}
@@ -916,14 +985,12 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
/* find extent in cache firstly */
es->es_lblk = es->es_len = es->es_pblk = 0;
- if (tree->cache_es) {
- es1 = tree->cache_es;
- if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u)\n",
- lblk, es1->es_lblk, es1->es_len);
- found = 1;
- goto out;
- }
+ es1 = READ_ONCE(tree->cache_es);
+ if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u)\n",
+ lblk, es1->es_lblk, es1->es_len);
+ found = 1;
+ goto out;
}
node = tree->root.rb_node;
@@ -1257,7 +1324,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
rc->ndelonly--;
node = rb_next(&pr->rb_node);
rb_erase(&pr->rb_node, &tree->root);
- kmem_cache_free(ext4_pending_cachep, pr);
+ __free_pending(pr);
if (!node)
break;
pr = rb_entry(node, struct pending_reservation,
@@ -1276,6 +1343,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
* @lblk - first block in range
* @end - last block in range
* @reserved - number of cluster reservations released
+ * @prealloc - pre-allocated es to avoid memory allocation failures
*
* If @reserved is not NULL and delayed allocation is enabled, counts
* block/cluster reservations freed by removing range and if bigalloc
@@ -1283,7 +1351,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
* error code on failure.
*/
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t end, int *reserved)
+ ext4_lblk_t end, int *reserved,
+ struct extent_status *prealloc)
{
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node *node;
@@ -1291,14 +1360,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status orig_es;
ext4_lblk_t len1, len2;
ext4_fsblk_t block;
- int err;
+ int err = 0;
bool count_reserved = true;
struct rsvd_count rc;
if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
count_reserved = false;
-retry:
- err = 0;
es = __es_tree_search(&tree->root, lblk);
if (!es)
@@ -1332,14 +1399,13 @@ retry:
orig_es.es_len - len2;
ext4_es_store_pblock_status(&newes, block,
ext4_es_status(&orig_es));
- err = __es_insert_extent(inode, &newes);
+ err = __es_insert_extent(inode, &newes, prealloc);
if (err) {
+ if (!ext4_es_must_keep(&newes))
+ return 0;
+
es->es_lblk = orig_es.es_lblk;
es->es_len = orig_es.es_len;
- if ((err == -ENOMEM) &&
- __es_shrink(EXT4_SB(inode->i_sb),
- 128, EXT4_I(inode)))
- goto retry;
goto out;
}
} else {
@@ -1352,9 +1418,9 @@ retry:
}
}
if (count_reserved)
- count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
- &orig_es, &rc);
- goto out;
+ count_rsvd(inode, orig_es.es_lblk + len1,
+ orig_es.es_len - len1 - len2, &orig_es, &rc);
+ goto out_get_reserved;
}
if (len1 > 0) {
@@ -1396,6 +1462,7 @@ retry:
}
}
+out_get_reserved:
if (count_reserved)
*reserved = get_rsvd(inode, end, es, &rc);
out:
@@ -1418,6 +1485,7 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end;
int err = 0;
int reserved = 0;
+ struct extent_status *es = NULL;
trace_ext4_es_remove_extent(inode, lblk, len);
es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
@@ -1429,17 +1497,29 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
end = lblk + len - 1;
BUG_ON(end < lblk);
+retry:
+ if (err && !es)
+ es = __es_alloc_extent(true);
/*
* ext4_clear_inode() depends on us taking i_es_lock unconditionally
* so that we are sure __es_shrink() is done with the inode before it
* is reclaimed.
*/
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, end, &reserved);
+ err = __es_remove_extent(inode, lblk, end, &reserved, es);
+ /* Free preallocated extent if it didn't get used. */
+ if (es) {
+ if (!es->es_len)
+ __es_free_extent(es);
+ es = NULL;
+ }
write_unlock(&EXT4_I(inode)->i_es_lock);
+ if (err)
+ goto retry;
+
ext4_es_print_tree(inode);
ext4_da_release_space(inode, reserved);
- return err;
+ return 0;
}
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
@@ -1686,11 +1766,8 @@ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
(*nr_to_scan)--;
node = rb_next(&es->rb_node);
- /*
- * We can't reclaim delayed extent from status tree because
- * fiemap, bigallic, and seek_data/hole need to use it.
- */
- if (ext4_es_is_delayed(es))
+
+ if (ext4_es_must_keep(es))
goto next;
if (ext4_es_is_referenced(es)) {
ext4_es_clear_referenced(es);
@@ -1754,7 +1831,7 @@ void ext4_clear_inode_es(struct inode *inode)
while (node) {
es = rb_entry(node, struct extent_status, rb_node);
node = rb_next(node);
- if (!ext4_es_is_delayed(es)) {
+ if (!ext4_es_must_keep(es)) {
rb_erase(&es->rb_node, &tree->root);
ext4_es_free_extent(inode, es);
}
@@ -1841,11 +1918,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
*
* @inode - file containing the cluster
* @lblk - logical block in the cluster to be added
+ * @prealloc - preallocated pending entry
*
* Returns 0 on successful insertion and -ENOMEM on failure. If the
* pending reservation is already in the set, returns successfully.
*/
-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
+ struct pending_reservation **prealloc)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
@@ -1871,10 +1950,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
}
}
- pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
- if (pr == NULL) {
- ret = -ENOMEM;
- goto out;
+ if (likely(*prealloc == NULL)) {
+ pr = __alloc_pending(false);
+ if (!pr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else {
+ pr = *prealloc;
+ *prealloc = NULL;
}
pr->lclu = lclu;
@@ -1904,7 +1988,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
if (pr != NULL) {
tree = &EXT4_I(inode)->i_pending_tree;
rb_erase(&pr->rb_node, &tree->root);
- kmem_cache_free(ext4_pending_cachep, pr);
+ __free_pending(pr);
}
}
@@ -1965,7 +2049,10 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
bool allocated)
{
struct extent_status newes;
- int err = 0;
+ int err1 = 0, err2 = 0, err3 = 0;
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
+ struct pending_reservation *pr = NULL;
es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
lblk, inode->i_ino);
@@ -1977,29 +2064,52 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
ext4_es_insert_extent_check(inode, &newes);
+retry:
+ if (err1 && !es1)
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
+ if ((err1 || err2 || err3) && allocated && !pr)
+ pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock);
- err = __es_remove_extent(inode, lblk, lblk, NULL);
- if (err != 0)
- goto error;
-retry:
- err = __es_insert_extent(inode, &newes);
- if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
- 128, EXT4_I(inode)))
- goto retry;
- if (err != 0)
+ err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
+ if (err1 != 0)
goto error;
+ /* Free preallocated extent if it didn't get used. */
+ if (es1) {
+ if (!es1->es_len)
+ __es_free_extent(es1);
+ es1 = NULL;
+ }
- if (allocated)
- __insert_pending(inode, lblk);
+ err2 = __es_insert_extent(inode, &newes, es2);
+ if (err2 != 0)
+ goto error;
+ /* Free preallocated extent if it didn't get used. */
+ if (es2) {
+ if (!es2->es_len)
+ __es_free_extent(es2);
+ es2 = NULL;
+ }
+ if (allocated) {
+ err3 = __insert_pending(inode, lblk, &pr);
+ if (err3 != 0)
+ goto error;
+ if (pr) {
+ __free_pending(pr);
+ pr = NULL;
+ }
+ }
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
+ if (err1 || err2 || err3)
+ goto retry;
ext4_es_print_tree(inode);
ext4_print_pending_tree(inode);
-
- return err;
+ return 0;
}
/*
@@ -2100,21 +2210,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
* @inode - file containing the range
* @lblk - logical block defining the start of range
* @len - length of range in blocks
+ * @prealloc - preallocated pending entry
*
* Used after a newly allocated extent is added to the extents status tree.
* Requires that the extents in the range have either written or unwritten
* status. Must be called while holding i_es_lock.
*/
-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len)
+static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len,
+ struct pending_reservation **prealloc)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t end = lblk + len - 1;
ext4_lblk_t first, last;
bool f_del = false, l_del = false;
+ int ret = 0;
if (len == 0)
- return;
+ return 0;
/*
* Two cases - block range within single cluster and block range
@@ -2135,7 +2248,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
f_del = __es_scan_range(inode, &ext4_es_is_delonly,
first, lblk - 1);
if (f_del) {
- __insert_pending(inode, first);
+ ret = __insert_pending(inode, first, prealloc);
+ if (ret < 0)
+ goto out;
} else {
last = EXT4_LBLK_CMASK(sbi, end) +
sbi->s_cluster_ratio - 1;
@@ -2143,9 +2258,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
l_del = __es_scan_range(inode,
&ext4_es_is_delonly,
end + 1, last);
- if (l_del)
- __insert_pending(inode, last);
- else
+ if (l_del) {
+ ret = __insert_pending(inode, last, prealloc);
+ if (ret < 0)
+ goto out;
+ } else
__remove_pending(inode, last);
}
} else {
@@ -2153,18 +2270,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
if (first != lblk)
f_del = __es_scan_range(inode, &ext4_es_is_delonly,
first, lblk - 1);
- if (f_del)
- __insert_pending(inode, first);
- else
+ if (f_del) {
+ ret = __insert_pending(inode, first, prealloc);
+ if (ret < 0)
+ goto out;
+ } else
__remove_pending(inode, first);
last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
if (last != end)
l_del = __es_scan_range(inode, &ext4_es_is_delonly,
end + 1, last);
- if (l_del)
- __insert_pending(inode, last);
- else
+ if (l_del) {
+ ret = __insert_pending(inode, last, prealloc);
+ if (ret < 0)
+ goto out;
+ } else
__remove_pending(inode, last);
}
+out:
+ return ret;
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 1513e90fb6d2..7ac55d071eb2 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -505,6 +505,12 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
inode_unlock_shared(inode);
break;
}
+ /*
+ * Make sure inline data cannot be created anymore since we are going
+ * to allocate blocks for DIO. We know the inode does not have any
+ * inline data now because ext4_dio_supported() checked for that.
+ */
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
if (offset < 0)
return offset;
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
index 37347ba868b7..d18c4cd4c63f 100644
--- a/fs/ext4/fsmap.c
+++ b/fs/ext4/fsmap.c
@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
keys[0].fmr_physical = bofs;
if (keys[1].fmr_physical >= eofs)
keys[1].fmr_physical = eofs - 1;
+ if (keys[1].fmr_physical < keys[0].fmr_physical)
+ return 0;
start_fsb = keys[0].fmr_physical;
end_fsb = keys[1].fmr_physical;
@@ -574,8 +576,8 @@ static bool ext4_getfsmap_is_valid_device(struct super_block *sb,
if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev))
return true;
- if (EXT4_SB(sb)->journal_bdev &&
- fm->fmr_device == new_encode_dev(EXT4_SB(sb)->journal_bdev->bd_dev))
+ if (EXT4_SB(sb)->s_journal_bdev &&
+ fm->fmr_device == new_encode_dev(EXT4_SB(sb)->s_journal_bdev->bd_dev))
return true;
return false;
}
@@ -645,9 +647,9 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
memset(handlers, 0, sizeof(handlers));
handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev);
handlers[0].gfd_fn = ext4_getfsmap_datadev;
- if (EXT4_SB(sb)->journal_bdev) {
+ if (EXT4_SB(sb)->s_journal_bdev) {
handlers[1].gfd_dev = new_encode_dev(
- EXT4_SB(sb)->journal_bdev->bd_dev);
+ EXT4_SB(sb)->s_journal_bdev->bd_dev);
handlers[1].gfd_fn = ext4_getfsmap_logdev;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 83846cc81485..cbde5a096c7b 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -500,7 +500,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
goto fallback;
}
- max_dirs = ndirs / ngroups + inodes_per_group / 16;
+ max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
min_inodes = avefreei - inodes_per_group*flex_size / 4;
if (min_inodes < 1)
min_inodes = 1;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 36699a131168..25532bac7777 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -148,6 +148,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
+ unsigned int key;
int ret = -EIO;
*err = 0;
@@ -156,7 +157,13 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
if (!p->key)
goto no_block;
while (--depth) {
- bh = sb_getblk(sb, le32_to_cpu(p->key));
+ key = le32_to_cpu(p->key);
+ if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) {
+ /* the block was out of range */
+ ret = -EFSCORRUPTED;
+ goto failure;
+ }
+ bh = sb_getblk(sb, key);
if (unlikely(!bh)) {
ret = -ENOMEM;
goto failure;
@@ -629,6 +636,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
ext4_update_inode_fsync_trans(handle, inode, 1);
count = ar.len;
+
+ /*
+ * Update reserved blocks/metadata blocks after successful block
+ * allocation which had been deferred till now.
+ */
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+ ext4_da_update_reserve_space(inode, count, 1);
+
got_it:
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 62384ae77a78..549ceac9099c 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -32,8 +32,12 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_entry *entry;
struct ext4_inode *raw_inode;
+ void *end;
int free, min_offs;
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+ return 0;
+
min_offs = EXT4_SB(inode->i_sb)->s_inode_size -
EXT4_GOOD_OLD_INODE_SIZE -
EXT4_I(inode)->i_extra_isize -
@@ -52,14 +56,23 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
raw_inode = ext4_raw_inode(iloc);
header = IHDR(inode, raw_inode);
entry = IFIRST(header);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
/* Compute min_offs. */
- for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+ while (!IS_LAST_ENTRY(entry)) {
+ void *next = EXT4_XATTR_NEXT(entry);
+
+ if (next >= end) {
+ EXT4_ERROR_INODE(inode,
+ "corrupt xattr in inline inode");
+ return 0;
+ }
if (!entry->e_value_inum && entry->e_value_size) {
size_t offs = le16_to_cpu(entry->e_value_offs);
if (offs < min_offs)
min_offs = offs;
}
+ entry = next;
}
free = min_offs -
((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
@@ -154,7 +167,6 @@ int ext4_find_inline_data_nolock(struct inode *inode)
(void *)ext4_raw_inode(&is.iloc));
EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
le32_to_cpu(is.s.here->e_value_size);
- ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
}
out:
brelse(is.iloc.bh);
@@ -204,7 +216,7 @@ out:
/*
* write the buffer to the inline inode.
* If 'create' is set, we don't need to do the extra copy in the xattr
- * value since it is already handled by ext4_xattr_ibody_inline_set.
+ * value since it is already handled by ext4_xattr_ibody_set.
* That saves us one memcpy.
*/
static void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
@@ -286,7 +298,7 @@ static int ext4_create_inline_data(handle_t *handle,
BUG_ON(!is.s.not_found);
- error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+ error = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (error) {
if (error == -ENOSPC)
ext4_clear_inode_state(inode,
@@ -346,7 +358,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
value, len);
- if (error == -ENODATA)
+ if (error < 0)
goto out;
BUFFER_TRACE(is.iloc.bh, "get_write_access");
@@ -358,7 +370,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
i.value = value;
i.value_len = len;
- error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+ error = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (error)
goto out;
@@ -431,7 +443,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
if (error)
goto out;
- error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+ error = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (error)
goto out;
@@ -1170,6 +1182,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
ext4_initialize_dirent_tail(dir_block,
inode->i_sb->s_blocksize);
set_buffer_uptodate(dir_block);
+ unlock_buffer(dir_block);
err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
if (err)
return err;
@@ -1243,6 +1256,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
if (!S_ISDIR(inode->i_mode)) {
memcpy(data_bh->b_data, buf, inline_size);
set_buffer_uptodate(data_bh);
+ unlock_buffer(data_bh);
error = ext4_handle_dirty_metadata(handle,
inode, data_bh);
} else {
@@ -1250,7 +1264,6 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
buf, inline_size);
}
- unlock_buffer(data_bh);
out_restore:
if (error)
ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
@@ -1967,8 +1980,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
i.value = value;
i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ?
i_size - EXT4_MIN_INLINE_DATA_SIZE : 0;
- err = ext4_xattr_ibody_inline_set(handle, inode,
- &i, &is);
+ err = ext4_xattr_ibody_set(handle, inode, &i, &is);
if (err)
goto out_error;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d8fee911d4f4..8a0bca3b653b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -207,6 +207,8 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
+ if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
+ ext4_evict_ea_inode(inode);
if (inode->i_nlink) {
/*
* When journalling data dirty buffers are tracked only in the
@@ -667,16 +669,6 @@ found:
*/
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
-
- /*
- * Update reserved blocks/metadata blocks after successful
- * block allocation which had been deferred till now. We don't
- * support fallocate for non extent files. So we can update
- * reserve space here.
- */
- if ((retval > 0) &&
- (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
- ext4_da_update_reserve_space(inode, retval, 1);
}
if (retval > 0) {
@@ -1317,6 +1309,13 @@ retry_grab:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
+ /*
+ * The same as page allocation, we prealloc buffer heads before
+ * starting the handle.
+ */
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, inode->i_sb->s_blocksize, 0);
+
unlock_page(page);
retry_journal:
@@ -1430,7 +1429,8 @@ static int ext4_write_end(struct file *file,
bool verity = ext4_verity_in_progress(inode);
trace_ext4_write_end(inode, pos, len, copied);
- if (inline_data) {
+ if (inline_data &&
+ ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0) {
@@ -1717,7 +1717,14 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
ext4_lblk_t start, last;
start = index << (PAGE_SHIFT - inode->i_blkbits);
last = end << (PAGE_SHIFT - inode->i_blkbits);
+
+ /*
+ * avoid racing with extent status tree scans made by
+ * ext4_insert_delayed_block()
+ */
+ down_write(&EXT4_I(inode)->i_data_sem);
ext4_es_remove_extent(inode, start, last - start + 1);
+ up_write(&EXT4_I(inode)->i_data_sem);
}
pagevec_init(&pvec);
@@ -4523,7 +4530,7 @@ int ext4_truncate(struct inode *inode)
trace_ext4_truncate_enter(inode);
if (!ext4_can_truncate(inode))
- return 0;
+ goto out_trace;
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
@@ -4534,16 +4541,15 @@ int ext4_truncate(struct inode *inode)
int has_inline = 1;
err = ext4_inline_data_truncate(inode, &has_inline);
- if (err)
- return err;
- if (has_inline)
- return 0;
+ if (err || has_inline)
+ goto out_trace;
}
/* If we zero-out tail of the page, we have to create jinode for jbd2 */
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
- if (ext4_inode_attach_jinode(inode) < 0)
- return 0;
+ err = ext4_inode_attach_jinode(inode);
+ if (err)
+ goto out_trace;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -4552,8 +4558,10 @@ int ext4_truncate(struct inode *inode)
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out_trace;
+ }
if (inode->i_size & (inode->i_sb->s_blocksize - 1))
ext4_block_truncate_page(handle, mapping, inode->i_size);
@@ -4602,6 +4610,7 @@ out_stop:
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
+out_trace:
trace_ext4_truncate_exit(inode);
return err;
}
@@ -4638,9 +4647,17 @@ static int __ext4_get_inode_loc(struct inode *inode,
inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
inode_offset = ((inode->i_ino - 1) %
EXT4_INODES_PER_GROUP(sb));
- block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
+ block = ext4_inode_table(sb, gdp);
+ if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
+ (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
+ ext4_error(sb, "Invalid inode table block %llu in "
+ "block_group %u", block, iloc->block_group);
+ return -EFSCORRUPTED;
+ }
+ block += (inode_offset / inodes_per_block);
+
bh = sb_getblk(sb, block);
if (unlikely(!bh))
return -ENOMEM;
@@ -4834,11 +4851,15 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
__le32 *magic = (void *)raw_inode +
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
- if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
- EXT4_INODE_SIZE(inode->i_sb) &&
+ if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ int err;
+
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
- return ext4_find_inline_data_nolock(inode);
+ err = ext4_find_inline_data_nolock(inode);
+ if (!err && ext4_has_inline_data(inode))
+ ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ return err;
} else
EXT4_I(inode)->i_inline_off = 0;
return 0;
@@ -4872,6 +4893,24 @@ static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
return inode_peek_iversion(inode);
}
+static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
+
+{
+ if (flags & EXT4_IGET_EA_INODE) {
+ if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+ return "missing EA_INODE flag";
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+ EXT4_I(inode)->i_file_acl)
+ return "ea_inode with extended attributes";
+ } else {
+ if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+ return "unexpected EA_INODE flag";
+ }
+ if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
+ return "unexpected bad inode w/o EXT4_IGET_BAD";
+ return NULL;
+}
+
struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ext4_iget_flags flags, const char *function,
unsigned int line)
@@ -4880,6 +4919,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
struct ext4_inode *raw_inode;
struct ext4_inode_info *ei;
struct inode *inode;
+ const char *err_str;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
loff_t size;
@@ -4903,8 +4943,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode->i_state & I_NEW)) {
+ if ((err_str = check_igot_inode(inode, flags)) != NULL) {
+ ext4_error_inode(inode, function, line, 0, err_str);
+ iput(inode);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
return inode;
+ }
ei = EXT4_I(inode);
iloc.bh = NULL;
@@ -4914,13 +4960,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
- if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
- ext4_error_inode(inode, function, line, 0,
- "iget: root inode unallocated");
- ret = -EFSCORRUPTED;
- goto bad_inode;
- }
-
if ((flags & EXT4_IGET_HANDLE) &&
(raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
ret = -ESTALE;
@@ -4991,11 +5030,16 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
* NeilBrown 1999oct15
*/
if (inode->i_nlink == 0) {
- if ((inode->i_mode == 0 ||
+ if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
ino != EXT4_BOOT_LOADER_INO) {
- /* this inode is deleted */
- ret = -ESTALE;
+ /* this inode is deleted or unallocated */
+ if (flags & EXT4_IGET_SPECIAL) {
+ ext4_error_inode(inode, function, line, 0,
+ "iget: special inode unallocated");
+ ret = -EFSCORRUPTED;
+ } else
+ ret = -ESTALE;
goto bad_inode;
}
/* The only unlinked inodes we let through here have
@@ -5171,8 +5215,13 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
ext4_error_inode(inode, function, line, 0,
"casefold flag without casefold feature");
- brelse(iloc.bh);
+ if ((err_str = check_igot_inode(inode, flags)) != NULL) {
+ ext4_error_inode(inode, function, line, 0, err_str);
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
+ brelse(iloc.bh);
unlock_new_inode(inode);
return inode;
@@ -6032,6 +6081,14 @@ static int __ext4_expand_extra_isize(struct inode *inode,
return 0;
}
+ /*
+ * We may need to allocate external xattr block so we need quotas
+ * initialized. Here we can be called with various locks held so we
+ * cannot affort to initialize quotas ourselves. So just bail.
+ */
+ if (dquot_initialize_needed(inode))
+ return -EAGAIN;
+
/* try to expand with EAs present */
error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
raw_inode, handle);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 9fa20f9ba52b..ae47505964c4 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -121,7 +121,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
blkcnt_t blocks;
unsigned short bytes;
- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
+ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO,
+ EXT4_IGET_SPECIAL | EXT4_IGET_BAD);
if (IS_ERR(inode_bl))
return PTR_ERR(inode_bl);
ei_bl = EXT4_I(inode_bl);
@@ -169,7 +170,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
/* Protect extent tree against block allocations via delalloc */
ext4_double_down_write_data_sem(inode, inode_bl);
- if (inode_bl->i_nlink == 0) {
+ if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) {
/* this inode has never been used as a BOOT_LOADER */
set_nlink(inode_bl, 1);
i_uid_write(inode_bl, 0);
@@ -178,6 +179,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
ei_bl->i_flags = 0;
inode_set_iversion(inode_bl, 1);
i_size_write(inode_bl, 0);
+ EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
inode_bl->i_mode = S_IFREG;
if (ext4_has_feature_extents(sb)) {
ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
@@ -460,6 +462,10 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
if (ext4_is_quota_file(inode))
return err;
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
err = ext4_get_inode_loc(inode, &iloc);
if (err)
return err;
@@ -475,10 +481,6 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
brelse(iloc.bh);
}
- err = dquot_initialize(inode);
- if (err)
- return err;
-
handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
EXT4_QUOTA_INIT_BLOCKS(sb) +
EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
@@ -571,6 +573,7 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
__u32 flags;
+ struct super_block *ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -589,7 +592,9 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
switch (flags) {
case EXT4_GOING_FLAGS_DEFAULT:
- freeze_bdev(sb->s_bdev);
+ ret = freeze_bdev(sb->s_bdev);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
thaw_bdev(sb->s_bdev, sb);
break;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 3c3166ba4364..e823731110e3 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/nospec.h>
#include <linux/backing-dev.h>
+#include <linux/freezer.h>
#include <trace/events/ext4.h>
#ifdef CONFIG_EXT4_DEBUG
@@ -1801,6 +1802,9 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
return err;
ext4_lock_group(ac->ac_sb, group);
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ goto out;
+
max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
if (max > 0) {
@@ -1808,6 +1812,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
ext4_mb_use_best_found(ac, e4b);
}
+out:
ext4_unlock_group(ac->ac_sb, group);
ext4_mb_unload_buddy(e4b);
@@ -1834,12 +1839,10 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
if (err)
return err;
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
- ext4_mb_unload_buddy(e4b);
- return 0;
- }
-
ext4_lock_group(ac->ac_sb, group);
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ goto out;
+
max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
ac->ac_g_ex.fe_len, &ex);
ex.fe_logical = 0xDEADFA11; /* debug value */
@@ -1872,6 +1875,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
ac->ac_b_ex = ex;
ext4_mb_use_best_found(ac, e4b);
}
+out:
ext4_unlock_group(ac->ac_sb, group);
ext4_mb_unload_buddy(e4b);
@@ -3091,9 +3095,9 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_allocation_request *ar)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ struct ext4_super_block *es = sbi->s_es;
int bsbits, max;
- ext4_lblk_t end;
- loff_t size, start_off;
+ loff_t size, start_off, end;
loff_t orig_size __maybe_unused;
ext4_lblk_t start;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
@@ -3122,7 +3126,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* first, let's learn actual file size
* given current request is allocated */
- size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ size = extent_logical_end(sbi, &ac->ac_o_ex);
size = size << bsbits;
if (size < i_size_read(ac->ac_inode))
size = i_size_read(ac->ac_inode);
@@ -3181,6 +3185,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
start = max(start, rounddown(ac->ac_o_ex.fe_logical,
(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
+ /* avoid unnecessary preallocation that may trigger assertions */
+ if (start + size > EXT_MAX_BLOCKS)
+ size = EXT_MAX_BLOCKS - start;
+
/* don't cover already allocated blocks in selected range */
if (ar->pleft && start <= ar->lleft) {
size -= ar->lleft + 1 - start;
@@ -3201,7 +3209,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* check we don't cross already preallocated blocks */
rcu_read_lock();
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
- ext4_lblk_t pa_end;
+ loff_t pa_end;
if (pa->pa_deleted)
continue;
@@ -3211,8 +3219,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
continue;
}
- pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
- pa->pa_len);
+ pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
/* PA must not overlap original request */
BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
@@ -3241,12 +3248,11 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* XXX: extra loop to check we really don't overlap preallocations */
rcu_read_lock();
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
- ext4_lblk_t pa_end;
+ loff_t pa_end;
spin_lock(&pa->pa_lock);
if (pa->pa_deleted == 0) {
- pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
- pa->pa_len);
+ pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
}
spin_unlock(&pa->pa_lock);
@@ -3271,18 +3277,21 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
/* define goal start in order to merge */
- if (ar->pright && (ar->lright == (start + size))) {
+ if (ar->pright && (ar->lright == (start + size)) &&
+ ar->pright >= size &&
+ ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
/* merge to the right */
ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
- &ac->ac_f_ex.fe_group,
- &ac->ac_f_ex.fe_start);
+ &ac->ac_g_ex.fe_group,
+ &ac->ac_g_ex.fe_start);
ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
}
- if (ar->pleft && (ar->lleft + 1 == start)) {
+ if (ar->pleft && (ar->lleft + 1 == start) &&
+ ar->pleft + 1 < ext4_blocks_count(es)) {
/* merge to the left */
ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
- &ac->ac_f_ex.fe_group,
- &ac->ac_f_ex.fe_start);
+ &ac->ac_g_ex.fe_group,
+ &ac->ac_g_ex.fe_start);
ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
}
@@ -3374,6 +3383,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
BUG_ON(start < pa->pa_pstart);
BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
BUG_ON(pa->pa_free < len);
+ BUG_ON(ac->ac_b_ex.fe_len <= 0);
pa->pa_free -= len;
mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
@@ -3456,8 +3466,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* all fields in this condition don't change,
* so we can skip locking for them */
if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
- ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
- EXT4_C2B(sbi, pa->pa_len)))
+ ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa))
continue;
/* non-extent files can't have physical blocks past 2^32 */
@@ -3678,10 +3687,11 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
return -ENOMEM;
if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
- int winl;
- int wins;
- int win;
- int offs;
+ struct ext4_free_extent ex = {
+ .fe_logical = ac->ac_g_ex.fe_logical,
+ .fe_len = ac->ac_g_ex.fe_len,
+ };
+ loff_t orig_goal_end = extent_logical_end(sbi, &ex);
/* we can't allocate as much as normalizer wants.
* so, found space must get proper lstart
@@ -3689,26 +3699,34 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
- /* we're limited by original request in that
- * logical block must be covered any way
- * winl is window we can move our chunk within */
- winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
+ /*
+ * Use the below logic for adjusting best extent as it keeps
+ * fragmentation in check while ensuring logical range of best
+ * extent doesn't overflow out of goal extent:
+ *
+ * 1. Check if best ex can be kept at end of goal and still
+ * cover original start
+ * 2. Else, check if best ex can be kept at start of goal and
+ * still cover original start
+ * 3. Else, keep the best ex at start of original request.
+ */
+ ex.fe_len = ac->ac_b_ex.fe_len;
- /* also, we should cover whole original request */
- wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
+ ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
+ if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
+ goto adjust_bex;
- /* the smallest one defines real window */
- win = min(winl, wins);
+ ex.fe_logical = ac->ac_g_ex.fe_logical;
+ if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
+ goto adjust_bex;
- offs = ac->ac_o_ex.fe_logical %
- EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- if (offs && offs < win)
- win = offs;
+ ex.fe_logical = ac->ac_o_ex.fe_logical;
+adjust_bex:
+ ac->ac_b_ex.fe_logical = ex.fe_logical;
- ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
- EXT4_NUM_B2C(sbi, win);
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+ BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
}
/* preallocation can change ac_b_ex, thus we store actually
@@ -3895,7 +3913,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
trace_ext4_mb_release_group_pa(sb, pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
- BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+ if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
+ ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
+ e4b->bd_group, group, pa->pa_pstart);
+ return 0;
+ }
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
@@ -4212,7 +4234,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
return;
- size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ size = extent_logical_end(sbi, &ac->ac_o_ex);
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;
@@ -4929,8 +4951,8 @@ do_more:
* them with group lock_held
*/
if (test_opt(sb, DISCARD)) {
- err = ext4_issue_discard(sb, block_group, bit, count,
- NULL);
+ err = ext4_issue_discard(sb, block_group, bit,
+ count_clusters, NULL);
if (err && err != -EOPNOTSUPP)
ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%lu failed"
@@ -5138,19 +5160,19 @@ error_return:
* @sb: super block for the file system
* @start: starting block of the free extent in the alloc. group
* @count: number of blocks to TRIM
- * @group: alloc. group we are working with
* @e4b: ext4 buddy for the group
*
* Trim "count" blocks starting at "start" in the "group". To assure that no
* one will allocate those blocks, mark it as used in buddy bitmap. This must
* be called with under the group lock.
*/
-static int ext4_trim_extent(struct super_block *sb, int start, int count,
- ext4_group_t group, struct ext4_buddy *e4b)
+static int ext4_trim_extent(struct super_block *sb,
+ int start, int count, struct ext4_buddy *e4b)
__releases(bitlock)
__acquires(bitlock)
{
struct ext4_free_extent ex;
+ ext4_group_t group = e4b->bd_group;
int ret = 0;
trace_ext4_trim_extent(sb, group, start, count);
@@ -5173,6 +5195,81 @@ __acquires(bitlock)
return ret;
}
+static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ ext4_group_t grp)
+{
+ unsigned long nr_clusters_in_group;
+
+ if (grp < (ext4_get_groups_count(sb) - 1))
+ nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
+ else
+ nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+ ext4_group_first_block_no(sb, grp))
+ >> EXT4_CLUSTER_BITS(sb);
+
+ return nr_clusters_in_group - 1;
+}
+
+static bool ext4_trim_interrupted(void)
+{
+ return fatal_signal_pending(current) || freezing(current);
+}
+
+static int ext4_try_to_trim_range(struct super_block *sb,
+ struct ext4_buddy *e4b, ext4_grpblk_t start,
+ ext4_grpblk_t max, ext4_grpblk_t minblocks)
+{
+ ext4_grpblk_t next, count, free_count, last, origin_start;
+ bool set_trimmed = false;
+ void *bitmap;
+
+ last = ext4_last_grp_cluster(sb, e4b->bd_group);
+ bitmap = e4b->bd_bitmap;
+ if (start == 0 && max >= last)
+ set_trimmed = true;
+ origin_start = start;
+ start = max(e4b->bd_info->bb_first_free, start);
+ count = 0;
+ free_count = 0;
+
+ while (start <= max) {
+ start = mb_find_next_zero_bit(bitmap, max + 1, start);
+ if (start > max)
+ break;
+
+ next = mb_find_next_bit(bitmap, last + 1, start);
+ if (origin_start == 0 && next >= last)
+ set_trimmed = true;
+
+ if ((next - start) >= minblocks) {
+ int ret = ext4_trim_extent(sb, start, next - start, e4b);
+
+ if (ret && ret != -EOPNOTSUPP)
+ return count;
+ count += next - start;
+ }
+ free_count += next - start;
+ start = next + 1;
+
+ if (ext4_trim_interrupted())
+ return count;
+
+ if (need_resched()) {
+ ext4_unlock_group(sb, e4b->bd_group);
+ cond_resched();
+ ext4_lock_group(sb, e4b->bd_group);
+ }
+
+ if ((e4b->bd_info->bb_free - free_count) < minblocks)
+ break;
+ }
+
+ if (set_trimmed)
+ EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
+
+ return count;
+}
+
/**
* ext4_trim_all_free -- function to trim all free space in alloc. group
* @sb: super block for file system
@@ -5196,10 +5293,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t start, ext4_grpblk_t max,
ext4_grpblk_t minblocks)
{
- void *bitmap;
- ext4_grpblk_t next, count = 0, free_count = 0;
struct ext4_buddy e4b;
- int ret = 0;
+ int ret;
trace_ext4_trim_all_free(sb, group, start, max);
@@ -5209,58 +5304,20 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ret, group);
return ret;
}
- bitmap = e4b.bd_bitmap;
ext4_lock_group(sb, group);
- if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
- minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
- goto out;
-
- start = (e4b.bd_info->bb_first_free > start) ?
- e4b.bd_info->bb_first_free : start;
-
- while (start <= max) {
- start = mb_find_next_zero_bit(bitmap, max + 1, start);
- if (start > max)
- break;
- next = mb_find_next_bit(bitmap, max + 1, start);
- if ((next - start) >= minblocks) {
- ret = ext4_trim_extent(sb, start,
- next - start, group, &e4b);
- if (ret && ret != -EOPNOTSUPP)
- break;
- ret = 0;
- count += next - start;
- }
- free_count += next - start;
- start = next + 1;
-
- if (fatal_signal_pending(current)) {
- count = -ERESTARTSYS;
- break;
- }
-
- if (need_resched()) {
- ext4_unlock_group(sb, group);
- cond_resched();
- ext4_lock_group(sb, group);
- }
-
- if ((e4b.bd_info->bb_free - free_count) < minblocks)
- break;
- }
+ if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
+ minblocks < EXT4_SB(sb)->s_last_trim_minblks)
+ ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
+ else
+ ret = 0;
- if (!ret) {
- ret = count;
- EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
- }
-out:
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
ext4_debug("trimmed %d blocks in the group %d\n",
- count, group);
+ ret, group);
return ret;
}
@@ -5305,7 +5362,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
goto out;
}
- if (end >= max_blks)
+ if (end >= max_blks - 1)
end = max_blks - 1;
if (end <= first_data_blk)
goto out;
@@ -5322,6 +5379,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
for (group = first_group; group <= last_group; group++) {
+ if (ext4_trim_interrupted())
+ break;
grp = ext4_get_group_info(sb, group);
/* We only do this if the grp has never been initialized */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
@@ -5338,10 +5397,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
*/
if (group == last_group)
end = last_cluster;
-
if (grp->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, group, first_cluster,
- end, minlen);
+ end, minlen);
if (cnt < 0) {
ret = cnt;
break;
@@ -5357,7 +5415,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
if (!ret)
- atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
+ EXT4_SB(sb)->s_last_trim_minblks = minlen;
out:
range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
@@ -5386,8 +5444,7 @@ ext4_mballoc_query_range(
ext4_lock_group(sb, group);
- start = (e4b.bd_info->bb_first_free > start) ?
- e4b.bd_info->bb_first_free : start;
+ start = max(e4b.bd_info->bb_first_free, start);
if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 88c98f17e3d9..4e442bad6d73 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -199,6 +199,20 @@ static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
(fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
}
+static inline loff_t extent_logical_end(struct ext4_sb_info *sbi,
+ struct ext4_free_extent *fex)
+{
+ /* Use loff_t to avoid end exceeding ext4_lblk_t max. */
+ return (loff_t)fex->fe_logical + EXT4_C2B(sbi, fex->fe_len);
+}
+
+static inline loff_t pa_logical_end(struct ext4_sb_info *sbi,
+ struct ext4_prealloc_space *pa)
+{
+ /* Use loff_t to avoid end exceeding ext4_lblk_t max. */
+ return (loff_t)pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
+}
+
typedef int (*ext4_mballoc_query_range_fn)(
struct super_block *sb,
ext4_group_t agno,
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index c5b2ea1a9372..dbba3c3a2f06 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -435,7 +435,7 @@ int ext4_ext_migrate(struct inode *inode)
struct inode *tmp_inode = NULL;
struct migrate_struct lb;
unsigned long max_entries;
- __u32 goal;
+ __u32 goal, tmp_csum_seed;
uid_t owner[2];
/*
@@ -443,7 +443,8 @@ int ext4_ext_migrate(struct inode *inode)
* already is extent-based, error out.
*/
if (!ext4_has_feature_extents(inode->i_sb) ||
- (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
+ ext4_has_inline_data(inode))
return -EINVAL;
if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
@@ -483,6 +484,7 @@ int ext4_ext_migrate(struct inode *inode)
* the migration.
*/
ei = EXT4_I(inode);
+ tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed;
EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
i_size_write(tmp_inode, i_size_read(inode));
/*
@@ -593,6 +595,7 @@ err_out:
* the inode is not visible to user space.
*/
tmp_inode->i_blocks = 0;
+ EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed;
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 30ce3dc69378..e71ab64f1df5 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -615,6 +615,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
goto out;
o_end = o_start + len;
+ *moved_len = 0;
while (o_start < o_end) {
struct ext4_extent *ex;
ext4_lblk_t cur_blk, next_blk;
@@ -670,7 +671,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
*/
ext4_double_up_write_data_sem(orig_inode, donor_inode);
/* Swap original branches with new branches */
- move_extent_per_page(o_filp, donor_inode,
+ *moved_len += move_extent_per_page(o_filp, donor_inode,
orig_page_index, donor_page_index,
offset_in_page, cur_len,
unwritten, &ret);
@@ -680,9 +681,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
o_start += cur_len;
d_start += cur_len;
}
- *moved_len = o_start - orig_blk;
- if (*moved_len > len)
- *moved_len = len;
out:
if (*moved_len) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index d151892db8b0..87c6d619a564 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -54,6 +54,7 @@ static struct buffer_head *ext4_append(handle_t *handle,
struct inode *inode,
ext4_lblk_t *block)
{
+ struct ext4_map_blocks map;
struct buffer_head *bh;
int err;
@@ -63,6 +64,21 @@ static struct buffer_head *ext4_append(handle_t *handle,
return ERR_PTR(-ENOSPC);
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+ map.m_lblk = *block;
+ map.m_len = 1;
+
+ /*
+ * We're appending new directory block. Make sure the block is not
+ * allocated yet, otherwise we will end up corrupting the
+ * directory.
+ */
+ err = ext4_map_blocks(NULL, inode, &map, 0);
+ if (err < 0)
+ return ERR_PTR(err);
+ if (err) {
+ EXT4_ERROR_INODE(inode, "Logical block already allocated");
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
if (IS_ERR(bh))
@@ -309,17 +325,17 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
struct buffer_head *bh)
{
struct ext4_dir_entry_tail *t;
+ int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
#ifdef PARANOID
struct ext4_dir_entry *d, *top;
d = (struct ext4_dir_entry *)bh->b_data;
top = (struct ext4_dir_entry *)(bh->b_data +
- (EXT4_BLOCK_SIZE(inode->i_sb) -
- sizeof(struct ext4_dir_entry_tail)));
- while (d < top && d->rec_len)
+ (blocksize - sizeof(struct ext4_dir_entry_tail)));
+ while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize))
d = (struct ext4_dir_entry *)(((void *)d) +
- le16_to_cpu(d->rec_len));
+ ext4_rec_len_from_disk(d->rec_len, blocksize));
if (d != top)
return NULL;
@@ -330,7 +346,8 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
#endif
if (t->det_reserved_zero1 ||
- le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
+ (ext4_rec_len_from_disk(t->det_rec_len, blocksize) !=
+ sizeof(struct ext4_dir_entry_tail)) ||
t->det_reserved_zero2 ||
t->det_reserved_ft != EXT4_FT_DIR_CSUM)
return NULL;
@@ -411,13 +428,14 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
struct ext4_dir_entry *dp;
struct dx_root_info *root;
int count_offset;
+ int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
+ unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize);
- if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
+ if (rlen == blocksize)
count_offset = 8;
- else if (le16_to_cpu(dirent->rec_len) == 12) {
+ else if (rlen == 12) {
dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
- if (le16_to_cpu(dp->rec_len) !=
- EXT4_BLOCK_SIZE(inode->i_sb) - 12)
+ if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12)
return NULL;
root = (struct dx_root_info *)(((void *)dp + 12));
if (root->reserved_zero ||
@@ -1230,6 +1248,7 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
unsigned int buflen = bh->b_size;
char *base = bh->b_data;
struct dx_hash_info h = *hinfo;
+ int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
if (ext4_has_metadata_csum(dir->i_sb))
buflen -= sizeof(struct ext4_dir_entry_tail);
@@ -1243,11 +1262,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
- map_tail->size = le16_to_cpu(de->rec_len);
+ map_tail->size = ext4_rec_len_from_disk(de->rec_len,
+ blocksize);
count++;
cond_resched();
}
- de = ext4_next_entry(de, dir->i_sb->s_blocksize);
+ de = ext4_next_entry(de, blocksize);
}
return count;
}
@@ -1486,11 +1506,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
int has_inline_data = 1;
ret = ext4_find_inline_entry(dir, fname, res_dir,
&has_inline_data);
- if (has_inline_data) {
- if (inlined)
- *inlined = 1;
+ if (inlined)
+ *inlined = has_inline_data;
+ if (has_inline_data)
goto cleanup_and_exit;
- }
}
if ((namelen <= 2) && (name[0] == '.') &&
@@ -2125,8 +2144,16 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
memcpy(data2, de, len);
de = (struct ext4_dir_entry_2 *) data2;
top = data2 + len;
- while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
+ while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len,
+ (data2 + (blocksize - csum_size) -
+ (char *) de))) {
+ brelse(bh2);
+ brelse(bh);
+ return -EFSCORRUPTED;
+ }
de = de2;
+ }
de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
(char *) de, blocksize);
@@ -2913,11 +2940,8 @@ bool ext4_empty_dir(struct inode *inode)
de = (struct ext4_dir_entry_2 *) (bh->b_data +
(offset & (sb->s_blocksize - 1)));
if (ext4_check_dir_entry(inode, NULL, de, bh,
- bh->b_data, bh->b_size, offset)) {
- offset = (offset | (sb->s_blocksize - 1)) + 1;
- continue;
- }
- if (le32_to_cpu(de->inode)) {
+ bh->b_data, bh->b_size, offset) ||
+ le32_to_cpu(de->inode)) {
brelse(bh);
return false;
}
@@ -3609,7 +3633,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
* so the old->de may no longer valid and need to find it again
* before reset old inode info.
*/
- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
+ &old.inlined);
if (IS_ERR(old.bh))
retval = PTR_ERR(old.bh);
if (!old.bh)
@@ -3759,6 +3784,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
retval = dquot_initialize(old.dir);
if (retval)
return retval;
+ retval = dquot_initialize(old.inode);
+ if (retval)
+ return retval;
retval = dquot_initialize(new.dir);
if (retval)
return retval;
@@ -3771,9 +3799,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
return retval;
}
- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
+ &old.inlined);
if (IS_ERR(old.bh))
return PTR_ERR(old.bh);
+
/*
* Check for inode number is _not_ due to possible IO errors.
* We might rmdir the source, keep it as pwd of some process
@@ -3932,6 +3962,7 @@ release_bh:
brelse(old.dir_bh);
brelse(old.bh);
brelse(new.bh);
+
return retval;
}
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index b66b335a0ca6..5858fb9b5cd5 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -380,7 +380,8 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
static int io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
- struct page *page,
+ struct page *pagecache_page,
+ struct page *bounce_page,
struct buffer_head *bh)
{
int ret;
@@ -395,10 +396,11 @@ submit_and_retry:
return ret;
io->io_bio->bi_write_hint = inode->i_write_hint;
}
- ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
+ ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page,
+ bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
goto submit_and_retry;
- wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
+ wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size);
io->io_next_block++;
return 0;
}
@@ -511,7 +513,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do {
if (!buffer_async_write(bh))
continue;
- ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
+ ret = io_submit_add_bh(io, inode, page, bounce_page, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 6410c1e098d3..409b4ad28e71 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -227,17 +227,24 @@ struct ext4_new_flex_group_data {
in the flex group */
__u16 *bg_flags; /* block group flags of groups
in @groups */
+ ext4_group_t resize_bg; /* number of allocated
+ new_group_data */
ext4_group_t count; /* number of groups in @groups
*/
};
/*
+ * Avoiding memory allocation failures due to too many groups added each time.
+ */
+#define MAX_RESIZE_BG 16384
+
+/*
* alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
* @flexbg_size.
*
* Returns NULL on failure otherwise address of the allocated structure.
*/
-static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size)
{
struct ext4_new_flex_group_data *flex_gd;
@@ -245,17 +252,18 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
if (flex_gd == NULL)
goto out3;
- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
- goto out2;
- flex_gd->count = flexbg_size;
+ if (unlikely(flexbg_size > MAX_RESIZE_BG))
+ flex_gd->resize_bg = MAX_RESIZE_BG;
+ else
+ flex_gd->resize_bg = flexbg_size;
- flex_gd->groups = kmalloc_array(flexbg_size,
+ flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
sizeof(struct ext4_new_group_data),
GFP_NOFS);
if (flex_gd->groups == NULL)
goto out2;
- flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
+ flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
GFP_NOFS);
if (flex_gd->bg_flags == NULL)
goto out1;
@@ -292,7 +300,7 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
*/
static int ext4_alloc_group_tables(struct super_block *sb,
struct ext4_new_flex_group_data *flex_gd,
- int flexbg_size)
+ unsigned int flexbg_size)
{
struct ext4_new_group_data *group_data = flex_gd->groups;
ext4_fsblk_t start_blk;
@@ -393,12 +401,12 @@ next_group:
group = group_data[0].group;
printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
- "%d groups, flexbg size is %d:\n", flex_gd->count,
+ "%u groups, flexbg size is %u:\n", flex_gd->count,
flexbg_size);
for (i = 0; i < flex_gd->count; i++) {
ext4_debug(
- "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
+ "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
ext4_bg_has_super(sb, group + i) ? "normal" :
"no-super", group + i,
group_data[i].blocks_count,
@@ -572,13 +580,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
goto handle_itb;
- if (meta_bg == 1) {
- ext4_group_t first_group;
- first_group = ext4_meta_bg_first_group(sb, group);
- if (first_group != group + 1 &&
- first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
- goto handle_itb;
- }
+ if (meta_bg == 1)
+ goto handle_itb;
block = start + ext4_bg_has_super(sb, group);
/* Copy all of the GDT blocks into the backup in this group */
@@ -1483,6 +1486,7 @@ static void ext4_update_super(struct super_block *sb,
* Update the fs overhead information
*/
ext4_calculate_overhead(sb);
+ es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: added group %u:"
@@ -1564,10 +1568,12 @@ exit_journal:
int gdb_num_end = ((group + flex_gd->count - 1) /
EXT4_DESC_PER_BLOCK(sb));
int meta_bg = ext4_has_feature_meta_bg(sb);
+ sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
+ ext4_group_first_block_no(sb, 0);
sector_t old_gdb = 0;
- update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
- sizeof(struct ext4_super_block), 0);
+ update_backups(sb, ext4_group_first_block_no(sb, 0),
+ (char *)es, sizeof(struct ext4_super_block), 0);
for (; gdb_num <= gdb_num_end; gdb_num++) {
struct buffer_head *gdb_bh;
@@ -1575,8 +1581,8 @@ exit_journal:
gdb_num);
if (old_gdb == gdb_bh->b_blocknr)
continue;
- update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
- gdb_bh->b_size, meta_bg);
+ update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
+ gdb_bh->b_data, gdb_bh->b_size, meta_bg);
old_gdb = gdb_bh->b_blocknr;
}
}
@@ -1586,8 +1592,7 @@ exit:
static int ext4_setup_next_flex_gd(struct super_block *sb,
struct ext4_new_flex_group_data *flex_gd,
- ext4_fsblk_t n_blocks_count,
- unsigned long flexbg_size)
+ ext4_fsblk_t n_blocks_count)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
@@ -1611,7 +1616,7 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
BUG_ON(last);
ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
- last_group = group | (flexbg_size - 1);
+ last_group = group | (flex_gd->resize_bg - 1);
if (last_group > n_group)
last_group = n_group;
@@ -1774,7 +1779,7 @@ errout:
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
"blocks\n", ext4_blocks_count(es));
- update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
+ update_backups(sb, ext4_group_first_block_no(sb, 0),
(char *)es, sizeof(struct ext4_super_block), 0);
}
return err;
@@ -1937,9 +1942,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
errout:
ret = ext4_journal_stop(handle);
- if (!err)
- err = ret;
- return ret;
+ return err ? err : ret;
invalid_resize_inode:
ext4_error(sb, "corrupted/inconsistent resize inode");
@@ -1967,8 +1970,9 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
ext4_fsblk_t o_blocks_count;
ext4_fsblk_t n_blocks_count_retry = 0;
unsigned long last_update_time = 0;
- int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
+ int err = 0;
int meta_bg;
+ unsigned int flexbg_size = ext4_flex_bg_size(sbi);
/* See if the device is actually as big as what was requested */
bh = sb_bread(sb, n_blocks_count - 1);
@@ -1978,6 +1982,16 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
}
brelse(bh);
+ /*
+ * For bigalloc, trim the requested size to the nearest cluster
+ * boundary to avoid creating an unusable filesystem. We do this
+ * silently, instead of returning an error, to avoid breaking
+ * callers that blindly resize the filesystem to the full size of
+ * the underlying block device.
+ */
+ if (ext4_has_feature_bigalloc(sb))
+ n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
+
retry:
o_blocks_count = ext4_blocks_count(es);
@@ -2079,7 +2093,7 @@ retry:
goto out;
}
- if (ext4_blocks_count(es) == n_blocks_count)
+ if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
goto out;
err = ext4_alloc_flex_bg_array(sb, n_group + 1);
@@ -2099,8 +2113,7 @@ retry:
/* Add flex groups. Note that a regular group is a
* flex group with 1 group.
*/
- while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
- flexbg_size)) {
+ while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
if (jiffies - last_update_time > HZ * 10) {
if (last_update_time)
ext4_msg(sb, KERN_INFO,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index eba2506f4399..8ad3de7846c5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -906,10 +906,16 @@ static void ext4_blkdev_put(struct block_device *bdev)
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
{
struct block_device *bdev;
- bdev = sbi->journal_bdev;
+ bdev = sbi->s_journal_bdev;
if (bdev) {
+ /*
+ * Invalidate the journal device's buffers. We don't want them
+ * floating about in memory - the physical journal device may
+ * hotswapped, and it breaks the `ro-after' testing code.
+ */
+ invalidate_bdev(bdev);
ext4_blkdev_put(bdev);
- sbi->journal_bdev = NULL;
+ sbi->s_journal_bdev = NULL;
}
}
@@ -1034,14 +1040,8 @@ static void ext4_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
invalidate_bdev(sb->s_bdev);
- if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
- /*
- * Invalidate the journal device's buffers. We don't want them
- * floating about in memory - the physical journal device may
- * hotswapped, and it breaks the `ro-after' testing code.
- */
- sync_blockdev(sbi->journal_bdev);
- invalidate_bdev(sbi->journal_bdev);
+ if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
+ sync_blockdev(sbi->s_journal_bdev);
ext4_blkdev_remove(sbi);
}
@@ -1085,6 +1085,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
return NULL;
inode_set_iversion(&ei->vfs_inode, 1);
+ ei->i_flags = 0;
spin_lock_init(&ei->i_raw_lock);
INIT_LIST_HEAD(&ei->i_prealloc_list);
spin_lock_init(&ei->i_prealloc_lock);
@@ -2474,11 +2475,9 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
crc = crc16(crc, (__u8 *)gdp, offset);
offset += sizeof(gdp->bg_checksum); /* skip checksum */
/* for checksum of struct ext4_group_desc do the rest...*/
- if (ext4_has_feature_64bit(sb) &&
- offset < le16_to_cpu(sbi->s_es->s_desc_size))
+ if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
crc = crc16(crc, (__u8 *)gdp + offset,
- le16_to_cpu(sbi->s_es->s_desc_size) -
- offset);
+ sbi->s_desc_size - offset);
out:
return cpu_to_le16(crc);
@@ -3157,6 +3156,7 @@ static int ext4_lazyinit_thread(void *arg)
unsigned long next_wakeup, cur;
BUG_ON(NULL == eli);
+ set_freezable();
cont_thread:
while (true) {
@@ -3582,7 +3582,7 @@ int ext4_calculate_overhead(struct super_block *sb)
* Add the internal journal blocks whether the journal has been
* loaded or not
*/
- if (sbi->s_journal && !sbi->journal_bdev)
+ if (sbi->s_journal && !sbi->s_journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
/* j_inum for internal journal is non-zero */
@@ -4387,30 +4387,31 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_has_feature_journal_needs_recovery(sb)) {
ext4_msg(sb, KERN_ERR, "required journal recovery "
"suppressed and not mounted read-only");
- goto failed_mount_wq;
+ goto failed_mount3a;
} else {
/* Nojournal mode, all journal mount options are illegal */
- if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "journal_checksum, fs mounted w/o journal");
- goto failed_mount_wq;
- }
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"journal_async_commit, fs mounted w/o journal");
- goto failed_mount_wq;
+ goto failed_mount3a;
+ }
+
+ if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "journal_checksum, fs mounted w/o journal");
+ goto failed_mount3a;
}
if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"commit=%lu, fs mounted w/o journal",
sbi->s_commit_interval / HZ);
- goto failed_mount_wq;
+ goto failed_mount3a;
}
if (EXT4_MOUNT_DATA_FLAGS &
(sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"data=, fs mounted w/o journal");
- goto failed_mount_wq;
+ goto failed_mount3a;
}
sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
clear_opt(sb, JOURNAL_CHECKSUM);
@@ -4776,6 +4777,7 @@ failed_mount:
ext4_blkdev_remove(sbi);
brelse(bh);
out_fail:
+ invalidate_bdev(sb->s_bdev);
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
out_free_base:
@@ -4834,7 +4836,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
journal_inode, journal_inode->i_size);
- if (!S_ISREG(journal_inode->i_mode)) {
+ if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) {
ext4_msg(sb, KERN_ERR, "invalid journal inode");
iput(journal_inode);
return NULL;
@@ -4951,7 +4953,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
be32_to_cpu(journal->j_superblock->s_nr_users));
goto out_journal;
}
- EXT4_SB(sb)->journal_bdev = bdev;
+ EXT4_SB(sb)->s_journal_bdev = bdev;
ext4_init_journal_params(sb, journal);
return journal;
@@ -5604,9 +5606,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
#ifdef CONFIG_QUOTA
- /* Release old quota file names */
- for (i = 0; i < EXT4_MAXQUOTAS; i++)
- kfree(old_opts.s_qf_names[i]);
if (enable_quota) {
if (sb_any_quota_suspended(sb))
dquot_resume(sb, -1);
@@ -5616,6 +5615,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
}
+ /* Release old quota file names */
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ kfree(old_opts.s_qf_names[i]);
#endif
if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
ext4_release_system_zone(sb);
@@ -5632,6 +5634,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
return 0;
restore_opts:
+ /*
+ * If there was a failing r/w to ro transition, we may need to
+ * re-enable quota
+ */
+ if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) &&
+ sb_any_quota_suspended(sb))
+ dquot_resume(sb, -1);
sb->s_flags = old_sb_flags;
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_mount_opt2 = old_opts.s_mount_opt2;
@@ -5836,7 +5845,7 @@ static int ext4_write_info(struct super_block *sb, int type)
handle_t *handle;
/* Data block + inode block */
- handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
+ handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit_info(sb, type);
@@ -5953,6 +5962,20 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
return err;
}
+static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum)
+{
+ switch (type) {
+ case USRQUOTA:
+ return qf_inum == EXT4_USR_QUOTA_INO;
+ case GRPQUOTA:
+ return qf_inum == EXT4_GRP_QUOTA_INO;
+ case PRJQUOTA:
+ return qf_inum >= EXT4_GOOD_OLD_FIRST_INO;
+ default:
+ BUG();
+ }
+}
+
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags)
{
@@ -5969,9 +5992,16 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
if (!qf_inums[type])
return -EPERM;
+ if (!ext4_check_quota_inum(type, qf_inums[type])) {
+ ext4_error(sb, "Bad quota inum: %lu, type: %d",
+ qf_inums[type], type);
+ return -EUCLEAN;
+ }
+
qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
if (IS_ERR(qf_inode)) {
- ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
+ ext4_error(sb, "Bad quota inode: %lu, type: %d",
+ qf_inums[type], type);
return PTR_ERR(qf_inode);
}
@@ -6010,8 +6040,9 @@ static int ext4_enable_quotas(struct super_block *sb)
if (err) {
ext4_warning(sb,
"Failed to enable quota tracking "
- "(type=%d, err=%d). Please run "
- "e2fsck to fix.", type, err);
+ "(type=%d, err=%d, ino=%lu). "
+ "Please run e2fsck to fix.", type,
+ err, qf_inums[type]);
for (type--; type >= 0; type--) {
struct inode *inode;
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 9394360ff137..7768d7146d73 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -383,6 +383,11 @@ static void ext4_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister);
}
+static void ext4_feat_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
static const struct sysfs_ops ext4_attr_ops = {
.show = ext4_attr_show,
.store = ext4_attr_store,
@@ -397,7 +402,7 @@ static struct kobj_type ext4_sb_ktype = {
static struct kobj_type ext4_feat_ktype = {
.default_groups = ext4_feat_groups,
.sysfs_ops = &ext4_attr_ops,
- .release = (void (*)(struct kobject *))kfree,
+ .release = ext4_feat_release,
};
static struct kobject *ext4_root;
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index 6a30e54c1128..9879ea046e5a 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -79,8 +79,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
struct page *page;
- void *fsdata;
- void *addr;
+ void *fsdata = NULL;
int res;
res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
@@ -88,9 +87,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
if (res)
return res;
- addr = kmap_atomic(page);
- memcpy(addr + offset_in_page(pos), buf, n);
- kunmap_atomic(addr);
+ memcpy_to_page(page, offset_in_page(pos), buf, n);
res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
page, fsdata);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 20e40cac819e..cd371ac25f84 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -121,7 +121,11 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
#ifdef CONFIG_LOCKDEP
void ext4_xattr_inode_set_class(struct inode *ea_inode)
{
+ struct ext4_inode_info *ei = EXT4_I(ea_inode);
+
lockdep_set_subclass(&ea_inode->i_rwsem, 1);
+ (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
+ lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA);
}
#endif
@@ -384,7 +388,18 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
struct inode *inode;
int err;
- inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
+ /*
+ * We have to check for this corruption early as otherwise
+ * iget_locked() could wait indefinitely for the state of our
+ * parent inode.
+ */
+ if (parent->i_ino == ea_ino) {
+ ext4_error(parent->i_sb,
+ "Parent and EA inode have the same ino %lu", ea_ino);
+ return -EFSCORRUPTED;
+ }
+
+ inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
ext4_error(parent->i_sb,
@@ -392,23 +407,6 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
err);
return err;
}
-
- if (is_bad_inode(inode)) {
- ext4_error(parent->i_sb,
- "error while reading EA inode %lu is_bad_inode",
- ea_ino);
- err = -EIO;
- goto error;
- }
-
- if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
- ext4_error(parent->i_sb,
- "EA inode %lu does not have EXT4_EA_INODE_FL flag",
- ea_ino);
- err = -EINVAL;
- goto error;
- }
-
ext4_xattr_inode_set_class(inode);
/*
@@ -429,9 +427,21 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
*ea_inode = inode;
return 0;
-error:
- iput(inode);
- return err;
+}
+
+/* Remove entry from mbcache when EA inode is getting evicted */
+void ext4_evict_ea_inode(struct inode *inode)
+{
+ struct mb_cache_entry *oe;
+
+ if (!EA_INODE_CACHE(inode))
+ return;
+ /* Wait for entry to get unused so that we can remove it */
+ while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode),
+ ext4_xattr_inode_get_hash(inode), inode->i_ino))) {
+ mb_cache_entry_wait_unused(oe);
+ mb_cache_entry_put(EA_INODE_CACHE(inode), oe);
+ }
}
static int
@@ -1019,10 +1029,8 @@ static int ext4_xattr_ensure_credits(handle_t *handle, struct inode *inode,
static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
int ref_change)
{
- struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
struct ext4_iloc iloc;
s64 ref_count;
- u32 hash;
int ret;
inode_lock(ea_inode);
@@ -1045,14 +1053,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
set_nlink(ea_inode, 1);
ext4_orphan_del(handle, ea_inode);
-
- if (ea_inode_cache) {
- hash = ext4_xattr_inode_get_hash(ea_inode);
- mb_cache_entry_create(ea_inode_cache,
- GFP_NOFS, hash,
- ea_inode->i_ino,
- true /* reusable */);
- }
}
} else {
WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
@@ -1065,12 +1065,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
clear_nlink(ea_inode);
ext4_orphan_add(handle, ea_inode);
-
- if (ea_inode_cache) {
- hash = ext4_xattr_inode_get_hash(ea_inode);
- mb_cache_entry_delete(ea_inode_cache, hash,
- ea_inode->i_ino);
- }
}
}
@@ -1249,6 +1243,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
if (error)
goto out;
+retry_ref:
lock_buffer(bh);
hash = le32_to_cpu(BHDR(bh)->h_hash);
ref = le32_to_cpu(BHDR(bh)->h_refcount);
@@ -1258,9 +1253,18 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
* This must happen under buffer lock for
* ext4_xattr_block_set() to reliably detect freed block
*/
- if (ea_block_cache)
- mb_cache_entry_delete(ea_block_cache, hash,
- bh->b_blocknr);
+ if (ea_block_cache) {
+ struct mb_cache_entry *oe;
+
+ oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
+ bh->b_blocknr);
+ if (oe) {
+ unlock_buffer(bh);
+ mb_cache_entry_wait_unused(oe);
+ mb_cache_entry_put(ea_block_cache, oe);
+ goto retry_ref;
+ }
+ }
get_bh(bh);
unlock_buffer(bh);
@@ -1284,7 +1288,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
ce = mb_cache_entry_get(ea_block_cache, hash,
bh->b_blocknr);
if (ce) {
- ce->e_reusable = 1;
+ set_bit(MBE_REUSABLE_B, &ce->e_flags);
mb_cache_entry_put(ea_block_cache, ce);
}
}
@@ -1423,6 +1427,13 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle,
uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
int err;
+ if (inode->i_sb->s_root == NULL) {
+ ext4_warning(inode->i_sb,
+ "refuse to create EA inode when umounting");
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+ }
+
/*
* Let the next inode be the goal, so we try and allocate the EA inode
* in the same group, or nearby one.
@@ -1442,6 +1453,9 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle,
if (!err)
err = ext4_inode_attach_jinode(ea_inode);
if (err) {
+ if (ext4_xattr_inode_dec_ref(handle, ea_inode))
+ ext4_warning_inode(ea_inode,
+ "cleanup dec ref error %d", err);
iput(ea_inode);
return ERR_PTR(err);
}
@@ -1487,11 +1501,11 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
while (ce) {
ea_inode = ext4_iget(inode->i_sb, ce->e_value,
- EXT4_IGET_NORMAL);
- if (!IS_ERR(ea_inode) &&
- !is_bad_inode(ea_inode) &&
- (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
- i_size_read(ea_inode) == value_len &&
+ EXT4_IGET_EA_INODE);
+ if (IS_ERR(ea_inode))
+ goto next_entry;
+ ext4_xattr_inode_set_class(ea_inode);
+ if (i_size_read(ea_inode) == value_len &&
!ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
!ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
value_len) &&
@@ -1501,9 +1515,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
kvfree(ea_data);
return ea_inode;
}
-
- if (!IS_ERR(ea_inode))
- iput(ea_inode);
+ iput(ea_inode);
+ next_entry:
ce = mb_cache_entry_find_next(ea_inode_cache, ce);
}
kvfree(ea_data);
@@ -1729,6 +1742,20 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
memmove(here, (void *)here + size,
(void *)last - (void *)here + sizeof(__u32));
memset(last, 0, size);
+
+ /*
+ * Update i_inline_off - moved ibody region might contain
+ * system.data attribute. Handling a failure here won't
+ * cause other complications for setting an xattr.
+ */
+ if (!is_block && ext4_has_inline_data(inode)) {
+ ret = ext4_find_inline_data_nolock(inode);
+ if (ret) {
+ ext4_warning_inode(inode,
+ "unable to update i_inline_off");
+ goto out;
+ }
+ }
} else if (s->not_found) {
/* Insert new name. */
size_t size = EXT4_XATTR_LEN(name_len);
@@ -1868,6 +1895,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
#define header(x) ((struct ext4_xattr_header *)(x))
if (s->base) {
+ int offset = (char *)s->here - bs->bh->b_data;
+
BUFFER_TRACE(bs->bh, "get_write_access");
error = ext4_journal_get_write_access(handle, bs->bh);
if (error)
@@ -1882,9 +1911,20 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
* ext4_xattr_block_set() to reliably detect modified
* block
*/
- if (ea_block_cache)
- mb_cache_entry_delete(ea_block_cache, hash,
- bs->bh->b_blocknr);
+ if (ea_block_cache) {
+ struct mb_cache_entry *oe;
+
+ oe = mb_cache_entry_delete_or_get(ea_block_cache,
+ hash, bs->bh->b_blocknr);
+ if (oe) {
+ /*
+ * Xattr block is getting reused. Leave
+ * it alone.
+ */
+ mb_cache_entry_put(ea_block_cache, oe);
+ goto clone_block;
+ }
+ }
ea_bdebug(bs->bh, "modifying in-place");
error = ext4_xattr_set_entry(i, s, handle, inode,
true /* is_block */);
@@ -1899,50 +1939,47 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (error)
goto cleanup;
goto inserted;
- } else {
- int offset = (char *)s->here - bs->bh->b_data;
+ }
+clone_block:
+ unlock_buffer(bs->bh);
+ ea_bdebug(bs->bh, "cloning");
+ s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
+ error = -ENOMEM;
+ if (s->base == NULL)
+ goto cleanup;
+ s->first = ENTRY(header(s->base)+1);
+ header(s->base)->h_refcount = cpu_to_le32(1);
+ s->here = ENTRY(s->base + offset);
+ s->end = s->base + bs->bh->b_size;
- unlock_buffer(bs->bh);
- ea_bdebug(bs->bh, "cloning");
- s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
- error = -ENOMEM;
- if (s->base == NULL)
+ /*
+ * If existing entry points to an xattr inode, we need
+ * to prevent ext4_xattr_set_entry() from decrementing
+ * ref count on it because the reference belongs to the
+ * original block. In this case, make the entry look
+ * like it has an empty value.
+ */
+ if (!s->not_found && s->here->e_value_inum) {
+ ea_ino = le32_to_cpu(s->here->e_value_inum);
+ error = ext4_xattr_inode_iget(inode, ea_ino,
+ le32_to_cpu(s->here->e_hash),
+ &tmp_inode);
+ if (error)
goto cleanup;
- memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
- s->first = ENTRY(header(s->base)+1);
- header(s->base)->h_refcount = cpu_to_le32(1);
- s->here = ENTRY(s->base + offset);
- s->end = s->base + bs->bh->b_size;
-
- /*
- * If existing entry points to an xattr inode, we need
- * to prevent ext4_xattr_set_entry() from decrementing
- * ref count on it because the reference belongs to the
- * original block. In this case, make the entry look
- * like it has an empty value.
- */
- if (!s->not_found && s->here->e_value_inum) {
- ea_ino = le32_to_cpu(s->here->e_value_inum);
- error = ext4_xattr_inode_iget(inode, ea_ino,
- le32_to_cpu(s->here->e_hash),
- &tmp_inode);
- if (error)
- goto cleanup;
- if (!ext4_test_inode_state(tmp_inode,
- EXT4_STATE_LUSTRE_EA_INODE)) {
- /*
- * Defer quota free call for previous
- * inode until success is guaranteed.
- */
- old_ea_inode_quota = le32_to_cpu(
- s->here->e_value_size);
- }
- iput(tmp_inode);
-
- s->here->e_value_inum = 0;
- s->here->e_value_size = 0;
+ if (!ext4_test_inode_state(tmp_inode,
+ EXT4_STATE_LUSTRE_EA_INODE)) {
+ /*
+ * Defer quota free call for previous
+ * inode until success is guaranteed.
+ */
+ old_ea_inode_quota = le32_to_cpu(
+ s->here->e_value_size);
}
+ iput(tmp_inode);
+
+ s->here->e_value_inum = 0;
+ s->here->e_value_size = 0;
}
} else {
/* Allocate a buffer where we construct the new block. */
@@ -1993,8 +2030,9 @@ inserted:
else {
u32 ref;
+#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode));
-
+#endif
/* The old block is released after updating
the inode. */
error = dquot_alloc_block(inode,
@@ -2009,18 +2047,13 @@ inserted:
lock_buffer(new_bh);
/*
* We have to be careful about races with
- * freeing, rehashing or adding references to
- * xattr block. Once we hold buffer lock xattr
- * block's state is stable so we can check
- * whether the block got freed / rehashed or
- * not. Since we unhash mbcache entry under
- * buffer lock when freeing / rehashing xattr
- * block, checking whether entry is still
- * hashed is reliable. Same rules hold for
- * e_reusable handling.
+ * adding references to xattr block. Once we
+ * hold buffer lock xattr block's state is
+ * stable so we can check the additional
+ * reference fits.
*/
- if (hlist_bl_unhashed(&ce->e_hash_list) ||
- !ce->e_reusable) {
+ ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
+ if (ref > EXT4_XATTR_REFCOUNT_MAX) {
/*
* Undo everything and check mbcache
* again.
@@ -2035,10 +2068,9 @@ inserted:
new_bh = NULL;
goto inserted;
}
- ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
- if (ref >= EXT4_XATTR_REFCOUNT_MAX)
- ce->e_reusable = 0;
+ if (ref == EXT4_XATTR_REFCOUNT_MAX)
+ clear_bit(MBE_REUSABLE_B, &ce->e_flags);
ea_bdebug(new_bh, "reusing; refcount now=%d",
ref);
ext4_xattr_block_csum_set(inode, new_bh);
@@ -2062,23 +2094,16 @@ inserted:
/* We need to allocate a new block */
ext4_fsblk_t goal, block;
+#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode));
-
+#endif
goal = ext4_group_first_block_no(sb,
EXT4_I(inode)->i_block_group);
-
- /* non-extent files can't have physical blocks past 2^32 */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
-
block = ext4_new_meta_blocks(handle, inode, goal, 0,
NULL, &error);
if (error)
goto cleanup;
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
-
ea_idebug(inode, "creating block %llu",
(unsigned long long)block);
@@ -2184,8 +2209,9 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
struct ext4_inode *raw_inode;
int error;
- if (EXT4_I(inode)->i_extra_isize == 0)
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
return 0;
+
raw_inode = ext4_raw_inode(&is->iloc);
header = IHDR(inode, raw_inode);
is->s.base = is->s.first = IFIRST(header);
@@ -2205,7 +2231,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
return 0;
}
-int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
+int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is)
{
@@ -2213,32 +2239,9 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
struct ext4_xattr_search *s = &is->s;
int error;
- if (EXT4_I(inode)->i_extra_isize == 0)
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
- if (error)
- return error;
- header = IHDR(inode, ext4_raw_inode(&is->iloc));
- if (!IS_LAST_ENTRY(s->first)) {
- header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
- ext4_set_inode_state(inode, EXT4_STATE_XATTR);
- } else {
- header->h_magic = cpu_to_le32(0);
- ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
- }
- return 0;
-}
-static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
- struct ext4_xattr_info *i,
- struct ext4_xattr_ibody_find *is)
-{
- struct ext4_xattr_ibody_header *header;
- struct ext4_xattr_search *s = &is->s;
- int error;
-
- if (EXT4_I(inode)->i_extra_isize == 0)
- return -ENOSPC;
error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
if (error)
return error;
@@ -2565,13 +2568,13 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
.in_inode = !!entry->e_value_inum,
};
struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
+ int needs_kvfree = 0;
int error;
is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
- buffer = kmalloc(value_size, GFP_NOFS);
b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
- if (!is || !bs || !buffer || !b_entry_name) {
+ if (!is || !bs || !b_entry_name) {
error = -ENOMEM;
goto out;
}
@@ -2583,12 +2586,18 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
/* Save the entry name and the entry value */
if (entry->e_value_inum) {
+ buffer = kvmalloc(value_size, GFP_NOFS);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+ needs_kvfree = 1;
error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
if (error)
goto out;
} else {
size_t value_offs = le16_to_cpu(entry->e_value_offs);
- memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
+ buffer = (void *)IFIRST(header) + value_offs;
}
memcpy(b_entry_name, entry->e_name, entry->e_name_len);
@@ -2603,25 +2612,26 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
if (error)
goto out;
- /* Remove the chosen entry from the inode */
- error = ext4_xattr_ibody_set(handle, inode, &i, is);
- if (error)
- goto out;
-
i.value = buffer;
i.value_len = value_size;
error = ext4_xattr_block_find(inode, &i, bs);
if (error)
goto out;
- /* Add entry which was removed from the inode into the block */
+ /* Move ea entry from the inode into the block */
error = ext4_xattr_block_set(handle, inode, &i, bs);
if (error)
goto out;
- error = 0;
+
+ /* Remove the chosen entry from the inode */
+ i.value = NULL;
+ i.value_len = 0;
+ error = ext4_xattr_ibody_set(handle, inode, &i, is);
+
out:
kfree(b_entry_name);
- kfree(buffer);
+ if (needs_kvfree && buffer)
+ kvfree(buffer);
if (is)
brelse(is->iloc.bh);
if (bs)
@@ -2796,6 +2806,9 @@ shift:
(void *)header, total_ino);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ if (ext4_has_inline_data(inode))
+ error = ext4_find_inline_data_nolock(inode);
+
cleanup:
if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index f39cad2abe2a..66911f8a11f8 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -95,6 +95,19 @@ struct ext4_xattr_entry {
#define EXT4_ZERO_XATTR_VALUE ((void *)-1)
+/*
+ * If we want to add an xattr to the inode, we should make sure that
+ * i_extra_isize is not 0 and that the inode size is not less than
+ * EXT4_GOOD_OLD_INODE_SIZE + extra_isize + pad.
+ * EXT4_GOOD_OLD_INODE_SIZE extra_isize header entry pad data
+ * |--------------------------|------------|------|---------|---|-------|
+ */
+#define EXT4_INODE_HAS_XATTR_SPACE(inode) \
+ ((EXT4_I(inode)->i_extra_isize != 0) && \
+ (EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize + \
+ sizeof(struct ext4_xattr_ibody_header) + EXT4_XATTR_PAD <= \
+ EXT4_INODE_SIZE((inode)->i_sb)))
+
struct ext4_xattr_info {
const char *name;
const void *value;
@@ -177,6 +190,7 @@ extern void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *array);
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle);
+extern void ext4_evict_ea_inode(struct inode *inode);
extern const struct xattr_handler *ext4_xattr_handlers[];
@@ -185,9 +199,9 @@ extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
extern int ext4_xattr_ibody_get(struct inode *inode, int name_index,
const char *name,
void *buffer, size_t buffer_size);
-extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
- struct ext4_xattr_info *i,
- struct ext4_xattr_ibody_find *is);
+extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ struct ext4_xattr_info *i,
+ struct ext4_xattr_ibody_find *is);
extern struct mb_cache *ext4_xattr_create_cache(void);
extern void ext4_xattr_destroy_cache(struct mb_cache *);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 44c5110e18f0..84e98dacd452 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -137,7 +137,7 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
unsigned int segno, offset;
bool exist;
- if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ)
+ if (type == DATA_GENERIC)
return true;
segno = GET_SEGNO(sbi, blkaddr);
@@ -145,6 +145,13 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
se = get_seg_entry(sbi, segno);
exist = f2fs_test_bit(offset, se->cur_valid_map);
+ if (exist && type == DATA_GENERIC_ENHANCE_UPDATE) {
+ f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
+ blkaddr, exist);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return exist;
+ }
+
if (!exist && type == DATA_GENERIC_ENHANCE) {
f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
blkaddr, exist);
@@ -182,6 +189,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
case DATA_GENERIC:
case DATA_GENERIC_ENHANCE:
case DATA_GENERIC_ENHANCE_READ:
+ case DATA_GENERIC_ENHANCE_UPDATE:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
blkaddr < MAIN_BLKADDR(sbi))) {
f2fs_warn(sbi, "access invalid blkaddr:%u",
@@ -298,8 +306,15 @@ static int __f2fs_write_meta_page(struct page *page,
trace_f2fs_writepage(page, META);
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
+ ClearPageUptodate(page);
+ dec_page_count(sbi, F2FS_DIRTY_META);
+ unlock_page(page);
+ return 0;
+ }
goto redirty_out;
+ }
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
@@ -1269,7 +1284,8 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
if (!get_pages(sbi, type))
break;
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi) &&
+ !is_sbi_flag_set(sbi, SBI_IS_CLOSE)))
break;
io_schedule_timeout(HZ/50);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 773028921c48..8f78050c935d 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -496,7 +496,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
}
if (fio->io_wbc && !is_read_io(fio->op))
- wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
@@ -575,7 +575,7 @@ alloc_new:
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
inc_page_count(fio->sbi, WB_DATA_TYPE(page));
@@ -652,7 +652,7 @@ alloc_new:
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
@@ -2130,7 +2130,8 @@ static int __write_data_page(struct page *page, bool *submitted,
* don't drop any dirty dentry pages for keeping lastest
* directory structure.
*/
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode) &&
+ !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
goto redirty_out;
goto out;
}
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index e60078460ad1..dd53c9294ad9 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -381,7 +381,8 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
struct extent_node *en;
bool ret = false;
- f2fs_bug_on(sbi, !et);
+ if (!et)
+ return false;
trace_f2fs_lookup_extent_tree_start(inode, pgofs);
@@ -729,9 +730,8 @@ void f2fs_drop_extent_tree(struct inode *inode)
if (!f2fs_may_extent_tree(inode))
return;
- set_inode_flag(inode, FI_NO_EXTENT);
-
write_lock(&et->lock);
+ set_inode_flag(inode, FI_NO_EXTENT);
__free_extent_tree(sbi, et);
if (et->largest.len) {
et->largest.len = 0;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c73a1638c18b..f75d25682734 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -225,6 +225,10 @@ enum {
* condition of read on truncated area
* by extent_cache
*/
+ DATA_GENERIC_ENHANCE_UPDATE, /*
+ * strong check on range and segment
+ * bitmap for update case
+ */
META_GENERIC,
};
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ef08ef017030..2330600dbe02 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2821,15 +2821,16 @@ int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
struct dquot *transfer_to[MAXQUOTAS] = {};
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
- int err = 0;
+ int err;
transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
- if (!IS_ERR(transfer_to[PRJQUOTA])) {
- err = __dquot_transfer(inode, transfer_to);
- if (err)
- set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- dqput(transfer_to[PRJQUOTA]);
- }
+ if (IS_ERR(transfer_to[PRJQUOTA]))
+ return PTR_ERR(transfer_to[PRJQUOTA]);
+
+ err = __dquot_transfer(inode, transfer_to);
+ if (err)
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ dqput(transfer_to[PRJQUOTA]);
return err;
}
@@ -3116,6 +3117,7 @@ int f2fs_precache_extents(struct inode *inode)
return -EOPNOTSUPP;
map.m_lblk = 0;
+ map.m_pblk = 0;
map.m_next_pgofs = NULL;
map.m_next_extent = &m_next_extent;
map.m_seg_type = NO_CHECK_TYPE;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 68d5c73c5ed1..d26b3fe38bd9 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -612,7 +612,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
{
struct page *node_page;
nid_t nid;
- unsigned int ofs_in_node;
+ unsigned int ofs_in_node, max_addrs, base;
block_t source_blkaddr;
nid = le32_to_cpu(sum->nid);
@@ -638,6 +638,21 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
return false;
}
+ if (IS_INODE(node_page)) {
+ base = offset_in_addr(F2FS_INODE(node_page));
+ max_addrs = DEF_ADDRS_PER_INODE;
+ } else {
+ base = 0;
+ max_addrs = DEF_ADDRS_PER_BLOCK;
+ }
+
+ if (base + ofs_in_node >= max_addrs) {
+ f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
+ base, ofs_in_node, max_addrs, dni->ino, dni->nid);
+ f2fs_put_page(node_page, 1);
+ return false;
+ }
+
*nofs = ofs_of_node(node_page);
source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
f2fs_put_page(node_page, 1);
@@ -1054,7 +1069,8 @@ next_step:
if (phase == 3) {
inode = f2fs_iget(sb, dni.ino);
- if (IS_ERR(inode) || is_bad_inode(inode))
+ if (IS_ERR(inode) || is_bad_inode(inode) ||
+ special_file(inode->i_mode))
continue;
if (!down_write_trylock(
@@ -1245,8 +1261,9 @@ freed:
seg_freed++;
migrated++;
- if (__is_large_section(sbi) && segno + 1 < end_segno)
- sbi->next_victim_seg[gc_type] = segno + 1;
+ if (__is_large_section(sbi))
+ sbi->next_victim_seg[gc_type] =
+ (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
skip:
f2fs_put_page(sum_page, 0);
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index c6bd669f4b4e..a700df05cd18 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -43,7 +43,6 @@ bool f2fs_may_inline_dentry(struct inode *inode)
void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
{
struct inode *inode = page->mapping->host;
- void *src_addr, *dst_addr;
if (PageUptodate(page))
return;
@@ -53,11 +52,8 @@ void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
/* Copy the whole inline data block */
- src_addr = inline_data_addr(inode, ipage);
- dst_addr = kmap_atomic(page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
- flush_dcache_page(page);
- kunmap_atomic(dst_addr);
+ memcpy_to_page(page, 0, inline_data_addr(inode, ipage),
+ MAX_INLINE_DATA(inode));
if (!PageUptodate(page))
SetPageUptodate(page);
}
@@ -224,7 +220,6 @@ out:
int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
- void *src_addr, *dst_addr;
struct dnode_of_data dn;
int err;
@@ -241,10 +236,8 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
f2fs_bug_on(F2FS_I_SB(inode), page->index);
f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
- src_addr = kmap_atomic(page);
- dst_addr = inline_data_addr(inode, dn.inode_page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
- kunmap_atomic(src_addr);
+ memcpy_from_page(inline_data_addr(inode, dn.inode_page),
+ page, 0, MAX_INLINE_DATA(inode));
set_page_dirty(dn.inode_page);
f2fs_clear_page_cache_dirty_tag(page);
@@ -402,18 +395,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
dentry_blk = page_address(page);
+ /*
+ * Start by zeroing the full block, to ensure that all unused space is
+ * zeroed and no uninitialized memory is leaked to disk.
+ */
+ memset(dentry_blk, 0, F2FS_BLKSIZE);
+
make_dentry_ptr_inline(dir, &src, inline_dentry);
make_dentry_ptr_block(dir, &dst, dentry_blk);
/* copy data from inline dentry block to new dentry block */
memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
- memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
- /*
- * we do not need to zero out remainder part of dentry and filename
- * field, since we have used bitmap for marking the usage status of
- * them, besides, we can also ignore copying/zeroing reserved space
- * of dentry block, because them haven't been used so far.
- */
memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index ed95c27e9302..99a91c746b39 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -1009,7 +1009,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
if (old_dir_entry) {
- if (old_dir != new_dir && !whiteout)
+ if (old_dir != new_dir)
f2fs_set_link(old_inode, old_dir_entry,
old_dir_page, new_dir);
else
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3dc7cc3d6ac6..8256a2dedae8 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -889,8 +889,10 @@ static int truncate_dnode(struct dnode_of_data *dn)
dn->ofs_in_node = 0;
f2fs_truncate_data_blocks(dn);
err = truncate_node(dn);
- if (err)
+ if (err) {
+ f2fs_put_page(page, 1);
return err;
+ }
return 1;
}
@@ -1240,7 +1242,11 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
dec_valid_node_count(sbi, dn->inode, !ofs);
goto fail;
}
- f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
+ if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
+ err = -EFSCORRUPTED;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ goto fail;
+ }
#endif
new_ni.nid = dn->nid;
new_ni.ino = dn->inode->i_ino;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 5f230e981c48..7e30326b296c 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -406,7 +406,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
struct dnode_of_data tdn = *dn;
nid_t ino, nid;
struct inode *inode;
- unsigned int offset;
+ unsigned int offset, ofs_in_node, max_addrs;
block_t bidx;
int i;
@@ -432,15 +432,24 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
got_it:
/* Use the locked dnode page and inode */
nid = le32_to_cpu(sum.nid);
+ ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+
+ max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode);
+ if (ofs_in_node >= max_addrs) {
+ f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u",
+ ofs_in_node, dn->inode->i_ino, nid, max_addrs);
+ return -EFSCORRUPTED;
+ }
+
if (dn->inode->i_ino == nid) {
tdn.nid = nid;
if (!dn->inode_page_locked)
lock_page(dn->inode_page);
tdn.node_page = dn->inode_page;
- tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+ tdn.ofs_in_node = ofs_in_node;
goto truncate_out;
} else if (dn->nid == nid) {
- tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+ tdn.ofs_in_node = ofs_in_node;
goto truncate_out;
}
@@ -602,7 +611,16 @@ retry_dn:
*/
if (dest == NEW_ADDR) {
f2fs_truncate_data_blocks_range(&dn, 1);
- f2fs_reserve_new_block(&dn);
+ do {
+ err = f2fs_reserve_new_block(&dn);
+ if (err == -ENOSPC) {
+ f2fs_bug_on(sbi, 1);
+ break;
+ }
+ } while (err &&
+ IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
+ if (err)
+ goto err;
continue;
}
@@ -610,12 +628,14 @@ retry_dn:
if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
if (src == NULL_ADDR) {
- err = f2fs_reserve_new_block(&dn);
- while (err &&
- IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
+ do {
err = f2fs_reserve_new_block(&dn);
- /* We should not get -ENOSPC */
- f2fs_bug_on(sbi, err);
+ if (err == -ENOSPC) {
+ f2fs_bug_on(sbi, 1);
+ break;
+ }
+ } while (err &&
+ IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
if (err)
goto err;
}
@@ -630,6 +650,14 @@ retry_prev:
goto err;
}
+ if (f2fs_is_valid_blkaddr(sbi, dest,
+ DATA_GENERIC_ENHANCE_UPDATE)) {
+ f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u",
+ dest, inode->i_ino, dn.ofs_in_node);
+ err = -EFSCORRUPTED;
+ goto err;
+ }
+
/* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest,
ni.version, false, false);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 7759323bd775..e43b57755a7f 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1486,7 +1486,7 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
if (i + 1 < dpolicy->granularity)
break;
- if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+ if (i + 1 < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
return __issue_discard_cmd_orderly(sbi, dpolicy);
pend_list = &dcc->pend_list[i];
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 232c99e4a1ee..401bc0e7f6eb 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -255,10 +255,10 @@ static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
{
- block_t limit = min((sbi->user_block_count << 1) / 1000,
+ block_t limit = min((sbi->user_block_count >> 3),
sbi->user_block_count - sbi->reserved_blocks);
- /* limit is 0.2% */
+ /* limit is 12.5% */
if (test_opt(sbi, RESERVE_ROOT) &&
F2FS_OPTION(sbi).root_reserved_blocks > limit) {
F2FS_OPTION(sbi).root_reserved_blocks = limit;
@@ -1824,7 +1824,6 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
size_t toread;
loff_t i_size = i_size_read(inode);
struct page *page;
- char *kaddr;
if (off > i_size)
return 0;
@@ -1857,9 +1856,7 @@ repeat:
return -EIO;
}
- kaddr = kmap_atomic(page);
- memcpy(data, kaddr + offset, tocopy);
- kunmap_atomic(kaddr);
+ memcpy_from_page(data, page, offset, tocopy);
f2fs_put_page(page, 1);
offset = 0;
@@ -1881,7 +1878,6 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
size_t towrite = len;
struct page *page;
void *fsdata = NULL;
- char *kaddr;
int err = 0;
int tocopy;
@@ -1900,10 +1896,7 @@ retry:
break;
}
- kaddr = kmap_atomic(page);
- memcpy(kaddr + offset, data, tocopy);
- kunmap_atomic(kaddr);
- flush_dcache_page(page);
+ memcpy_to_page(page, offset, data, tocopy);
a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
page, fsdata);
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 7944a08a3977..c4bd5a154252 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -45,16 +45,13 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
struct page *page;
- void *addr;
page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
NULL);
if (IS_ERR(page))
return PTR_ERR(page);
- addr = kmap_atomic(page);
- memcpy(buf, addr + offset_in_page(pos), n);
- kunmap_atomic(addr);
+ memcpy_from_page(buf, page, offset_in_page(pos), n);
put_page(page);
@@ -79,8 +76,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
struct page *page;
- void *fsdata;
- void *addr;
+ void *fsdata = NULL;
int res;
res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
@@ -88,9 +84,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
if (res)
return res;
- addr = kmap_atomic(page);
- memcpy(addr + offset_in_page(pos), buf, n);
- kunmap_atomic(addr);
+ memcpy_to_page(page, offset_in_page(pos), buf, n);
res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
page, fsdata);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index cf1bfed1e662..cef2825ff069 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -722,6 +722,12 @@ static int __f2fs_setxattr(struct inode *inode, int index,
memcpy(pval, value, size);
last->e_value_size = cpu_to_le16(size);
new_hsize += newsize;
+ /*
+ * Explicitly add the null terminator. The unused xattr space
+ * is supposed to always be zeroed, which would make this
+ * unnecessary, but don't depend on that.
+ */
+ *(u32 *)((u8 *)last + newsize) = 0;
}
error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 549d91f5245c..16795955acad 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -1303,7 +1303,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */
- struct msdos_dir_entry *uninitialized_var(de);
+ struct msdos_dir_entry *de;
int err, free_slots, i, nr_bhs;
loff_t pos, i_pos;
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 01263ffbc4c0..9a5f153c8919 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -37,7 +37,7 @@ static long do_sys_name_to_handle(struct path *path,
if (f_handle.handle_bytes > MAX_HANDLE_SZ)
return -EINVAL;
- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+ handle = kzalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
GFP_KERNEL);
if (!handle)
return -ENOMEM;
diff --git a/fs/file.c b/fs/file.c
index 51f53a7dc221..e56059fa1b30 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -654,6 +654,7 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
fdt = files_fdtable(files);
if (fd >= fdt->max_fds)
goto out_unlock;
+ fd = array_index_nospec(fd, fdt->max_fds);
file = fdt->fd[fd];
if (!file)
goto out_unlock;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5b3a288e0f14..3ec2c16abece 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -700,7 +700,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
* is okay. The main goal is avoiding keeping an inode on
* the wrong wb for an extended period of time.
*/
- if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
+ if (hweight16(history) > WB_FRN_HIST_THR_SLOTS)
inode_switch_wbs(inode, max_id);
}
diff --git a/fs/fs_context.c b/fs/fs_context.c
index e492a83fa100..412712eb59ee 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -598,7 +598,8 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
return -ENOMEM;
}
- ctx->legacy_data[size++] = ',';
+ if (size)
+ ctx->legacy_data[size++] = ',';
len = strlen(param->key);
memcpy(ctx->legacy_data + size, param->key, len);
size += len;
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index c23f6f243ad4..7092958c43ed 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -120,7 +120,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned uninitialized_var(val);
+ unsigned val;
ssize_t ret;
ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
@@ -162,7 +162,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned uninitialized_var(val);
+ unsigned val;
struct fuse_conn *fc;
ssize_t ret;
@@ -265,7 +265,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
struct dentry *parent;
char name[32];
- if (!fuse_control_sb)
+ if (!fuse_control_sb || fc->no_control)
return 0;
parent = fuse_control_sb->s_root;
@@ -303,7 +303,7 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
{
int i;
- if (!fuse_control_sb)
+ if (!fuse_control_sb || fc->no_control)
return;
for (i = fc->ctl_ndents - 1; i >= 0; i--) {
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index e51b7019e887..072b68fbf8f4 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -270,7 +270,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
{
char *end = p + len;
- char *uninitialized_var(key), *uninitialized_var(val);
+ char *key, *val;
int rc;
while (true) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 34487bf1d791..b2f37809fa9b 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -246,7 +246,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
spin_unlock(&fi->lock);
}
kfree(forget);
- if (ret == -ENOMEM)
+ if (ret == -ENOMEM || ret == -EINTR)
goto out;
if (ret || fuse_invalid_attr(&outarg.attr) ||
(outarg.attr.mode ^ inode->i_mode) & S_IFMT)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index efb2a4871291..f6c499dbfd06 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2994,7 +2994,7 @@ static void fuse_register_polled_file(struct fuse_conn *fc,
{
spin_lock(&fc->lock);
if (RB_EMPTY_NODE(&ff->polled_node)) {
- struct rb_node **link, *uninitialized_var(parent);
+ struct rb_node **link, *parent;
link = fuse_find_polled_node(fc, ff->kh, &parent);
BUG_ON(*link);
@@ -3212,24 +3212,19 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
.mode = mode
};
int err;
- bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
- (mode & FALLOC_FL_PUNCH_HOLE);
-
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
if (fc->no_fallocate)
return -EOPNOTSUPP;
- if (lock_inode) {
- inode_lock(inode);
- if (mode & FALLOC_FL_PUNCH_HOLE) {
- loff_t endbyte = offset + length - 1;
+ inode_lock(inode);
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ loff_t endbyte = offset + length - 1;
- err = fuse_writeback_range(inode, offset, endbyte);
- if (err)
- goto out;
- }
+ err = fuse_writeback_range(inode, offset, endbyte);
+ if (err)
+ goto out;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -3239,6 +3234,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
goto out;
}
+ err = file_modified(file);
+ if (err)
+ goto out;
+
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
@@ -3272,8 +3271,7 @@ out:
if (!(mode & FALLOC_FL_KEEP_SIZE))
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
- if (lock_inode)
- inode_unlock(inode);
+ inode_unlock(inode);
return err;
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index aa1d5cf1bc3a..f3d712decb57 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -162,6 +162,12 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
inode->i_uid = make_kuid(fc->user_ns, attr->uid);
inode->i_gid = make_kgid(fc->user_ns, attr->gid);
inode->i_blocks = attr->blocks;
+
+ /* Sanitize nsecs */
+ attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1);
+ attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1);
+ attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1);
+
inode->i_atime.tv_sec = attr->atime;
inode->i_atime.tv_nsec = attr->atimensec;
/* mtime from server may be stale due to local buffered write */
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 70f685b61e3a..512609da8590 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -238,8 +238,16 @@ retry:
dput(dentry);
dentry = alias;
}
- if (IS_ERR(dentry))
+ if (IS_ERR(dentry)) {
+ if (!IS_ERR(inode)) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fi->lock);
+ fi->nlookup--;
+ spin_unlock(&fi->lock);
+ }
return PTR_ERR(dentry);
+ }
}
if (fc->readdirplus_auto)
set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index b9fe975d7625..b1ce0061f127 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -156,7 +156,6 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
{
struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
if (PageChecked(page)) {
ClearPageChecked(page);
@@ -164,7 +163,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
create_empty_buffers(page, inode->i_sb->s_blocksize,
BIT(BH_Dirty)|BIT(BH_Uptodate));
}
- gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
+ gfs2_page_add_databufs(ip, page, 0, PAGE_SIZE);
}
return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
}
@@ -337,7 +336,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
int done = 0;
struct pagevec pvec;
int nr_pages;
- pgoff_t uninitialized_var(writeback_index);
+ pgoff_t writeback_index;
pgoff_t index;
pgoff_t end;
pgoff_t done_index;
@@ -457,8 +456,6 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
return error;
kaddr = kmap_atomic(page);
- if (dsize > gfs2_max_stuffed_size(ip))
- dsize = gfs2_max_stuffed_size(ip);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
kunmap_atomic(kaddr);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 77a497a4b236..63e925aa12a7 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -70,9 +70,6 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
void *kaddr = kmap(page);
u64 dsize = i_size_read(inode);
- if (dsize > gfs2_max_stuffed_size(ip))
- dsize = gfs2_max_stuffed_size(ip);
-
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
kunmap(page);
@@ -1766,7 +1763,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
__u16 start_list[GFS2_MAX_META_HEIGHT];
__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
- unsigned int start_aligned, uninitialized_var(end_aligned);
+ unsigned int start_aligned, end_aligned;
unsigned int strip_h = ip->i_height - 1;
u32 btotal = 0;
int ret, state;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index d5b9274662db..092223a8b120 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -362,6 +362,7 @@ static int inode_go_demote_ok(const struct gfs2_glock *gl)
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
const struct gfs2_dinode *str = buf;
struct timespec64 atime;
u16 height, depth;
@@ -401,7 +402,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
gfs2_set_inode_flags(&ip->i_inode);
height = be16_to_cpu(str->di_height);
- if (unlikely(height > GFS2_MAX_META_HEIGHT))
+ if (unlikely(height > sdp->sd_max_height))
goto corrupt;
ip->i_height = (u8)height;
@@ -411,6 +412,9 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
ip->i_depth = (u8)depth;
ip->i_entries = be32_to_cpu(str->di_entries);
+ if (gfs2_is_stuffed(ip) && ip->i_inode.i_size > gfs2_max_stuffed_size(ip))
+ goto corrupt;
+
if (S_ISREG(ip->i_inode.i_mode))
gfs2_set_aops(&ip->i_inode);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index d2ed4dc4434c..4aaaf3a4203c 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -414,7 +414,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
struct page *page)
{
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
- struct gfs2_log_header_host uninitialized_var(lh);
+ struct gfs2_log_header_host lh;
void *kaddr = kmap_atomic(page);
unsigned int offset;
bool ret = false;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 29b27d769860..2112ff7a0172 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -180,7 +180,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
pr_warn("Invalid superblock size\n");
return -EINVAL;
}
-
+ if (sb->sb_bsize_shift != ffs(sb->sb_bsize) - 1) {
+ pr_warn("Invalid block size shift\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -377,8 +380,10 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
if (!table[0])
table = sdp->sd_vfs->s_id;
- strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
- strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
+ BUILD_BUG_ON(GFS2_LOCKNAME_LEN > GFS2_FSNAME_LEN);
+
+ strscpy(sdp->sd_proto_name, proto, GFS2_LOCKNAME_LEN);
+ strscpy(sdp->sd_table_name, table, GFS2_LOCKNAME_LEN);
table = sdp->sd_table_name;
while ((table = strchr(table, '/')))
@@ -1349,13 +1354,13 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
switch (o) {
case Opt_lockproto:
- strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
+ strscpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
break;
case Opt_locktable:
- strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
+ strscpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
break;
case Opt_hostdata:
- strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
+ strscpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
break;
case Opt_spectator:
args->ar_spectator = 1;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index cbee745169b8..ce3d65787e01 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -431,6 +431,17 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
(sync_gen && (qd->qd_sync_gen >= *sync_gen)))
return 0;
+ /*
+ * If qd_change is 0 it means a pending quota change was negated.
+ * We should not sync it, but we still have a qd reference and slot
+ * reference taken by gfs2_quota_change -> do_qc that need to be put.
+ */
+ if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
+ slot_put(qd);
+ qd_put(qd);
+ return 0;
+ }
+
if (!lockref_get_not_dead(&qd->qd_lockref))
return 0;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8153a3eac540..f0a135a48cc3 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -2275,7 +2275,7 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
rgd->rd_reserved, rgd->rd_extfail_pt);
- if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
+ if (rgd->rd_sbd->sd_args.ar_rgrplvb && rgd->rd_rgl) {
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 9c593fd50c6a..15e757f76380 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1046,7 +1046,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
{
struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
struct gfs2_args *args = &sdp->sd_args;
- int val;
+ unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
+
+ spin_lock(&sdp->sd_tune.gt_spin);
+ logd_secs = sdp->sd_tune.gt_logd_secs;
+ quota_quantum = sdp->sd_tune.gt_quota_quantum;
+ statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
+ statfs_slow = sdp->sd_tune.gt_statfs_slow;
+ spin_unlock(&sdp->sd_tune.gt_spin);
if (is_ancestor(root, sdp->sd_master_dir))
seq_puts(s, ",meta");
@@ -1101,17 +1108,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
}
if (args->ar_discard)
seq_puts(s, ",discard");
- val = sdp->sd_tune.gt_logd_secs;
- if (val != 30)
- seq_printf(s, ",commit=%d", val);
- val = sdp->sd_tune.gt_statfs_quantum;
- if (val != 30)
- seq_printf(s, ",statfs_quantum=%d", val);
- else if (sdp->sd_tune.gt_statfs_slow)
+ if (logd_secs != 30)
+ seq_printf(s, ",commit=%d", logd_secs);
+ if (statfs_quantum != 30)
+ seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
+ else if (statfs_slow)
seq_puts(s, ",statfs_quantum=0");
- val = sdp->sd_tune.gt_quota_quantum;
- if (val != 60)
- seq_printf(s, ",quota_quantum=%d", val);
+ if (quota_quantum != 60)
+ seq_printf(s, ",quota_quantum=%d", quota_quantum);
if (args->ar_statfs_percent)
seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
@@ -1258,6 +1262,14 @@ static void gfs2_evict_inode(struct inode *inode)
if (inode->i_nlink || sb_rdonly(sb))
goto out;
+ /*
+ * In case of an incomplete mount, gfs2_evict_inode() may be called for
+ * system files without having an active journal to write to. In that
+ * case, skip the filesystem evict.
+ */
+ if (!sdp->sd_jdesc)
+ goto out;
+
if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
gfs2_holder_mark_uninitialized(&gh);
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index c0a73a6ffb28..397e02a56697 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -281,6 +281,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
tree->node_hash[hash] = node;
tree->node_hash_cnt++;
} else {
+ hfs_bnode_get(node2);
spin_unlock(&tree->hash_lock);
kfree(node);
wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index da243c84e93b..ee2ea5532e69 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -453,14 +453,16 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
/* panic? */
return -EIO;
+ res = -EIO;
+ if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN)
+ goto out;
fd.search_key->cat = HFS_I(main_inode)->cat_key;
if (hfs_brec_find(&fd))
- /* panic? */
goto out;
if (S_ISDIR(main_inode->i_mode)) {
if (fd.entrylength < sizeof(struct hfs_cat_dir))
- /* panic? */;
+ goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_dir));
if (rec.type != HFS_CDR_DIR ||
@@ -473,6 +475,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_dir));
} else if (HFS_IS_RSRC(inode)) {
+ if (fd.entrylength < sizeof(struct hfs_cat_file))
+ goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
hfs_inode_write_fork(inode, rec.file.RExtRec,
@@ -481,7 +485,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
sizeof(struct hfs_cat_file));
} else {
if (fd.entrylength < sizeof(struct hfs_cat_file))
- /* panic? */;
+ goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
if (rec.type != HFS_CDR_FIL ||
@@ -498,9 +502,10 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
}
+ res = 0;
out:
hfs_find_exit(&fd);
- return 0;
+ return res;
}
static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
index 39f5e343bf4d..fdb0edb8a607 100644
--- a/fs/hfs/trans.c
+++ b/fs/hfs/trans.c
@@ -109,7 +109,7 @@ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr
if (nls_io) {
wchar_t ch;
- while (srclen > 0) {
+ while (srclen > 0 && dstlen > 0) {
size = nls_io->char2uni(src, srclen, &ch);
if (size < 0) {
ch = '?';
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index b8471bf05def..9580179ff535 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -198,6 +198,8 @@ struct hfsplus_sb_info {
#define HFSPLUS_SB_HFSX 3
#define HFSPLUS_SB_CASEFOLD 4
#define HFSPLUS_SB_NOBARRIER 5
+#define HFSPLUS_SB_UID 6
+#define HFSPLUS_SB_GID 7
static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
{
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index d131c8ea7eb6..15c14a6a9f7f 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -187,11 +187,11 @@ static void hfsplus_get_perms(struct inode *inode,
mode = be16_to_cpu(perms->mode);
i_uid_write(inode, be32_to_cpu(perms->owner));
- if (!i_uid_read(inode) && !mode)
+ if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
inode->i_uid = sbi->uid;
i_gid_write(inode, be32_to_cpu(perms->group));
- if (!i_gid_read(inode) && !mode)
+ if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode))
inode->i_gid = sbi->gid;
if (dir) {
@@ -497,8 +497,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
if (type == HFSPLUS_FOLDER) {
struct hfsplus_cat_folder *folder = &entry.folder;
- if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
- /* panic? */;
+ if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) {
+ pr_err("bad catalog folder entry\n");
+ res = -EIO;
+ goto out;
+ }
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_folder));
hfsplus_get_perms(inode, &folder->permissions, 1);
@@ -518,8 +521,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
} else if (type == HFSPLUS_FILE) {
struct hfsplus_cat_file *file = &entry.file;
- if (fd->entrylength < sizeof(struct hfsplus_cat_file))
- /* panic? */;
+ if (fd->entrylength < sizeof(struct hfsplus_cat_file)) {
+ pr_err("bad catalog file entry\n");
+ res = -EIO;
+ goto out;
+ }
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_file));
@@ -550,6 +556,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
pr_err("bad catalog entry used to create inode\n");
res = -EIO;
}
+out:
return res;
}
@@ -558,6 +565,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
struct inode *main_inode = inode;
struct hfs_find_data fd;
hfsplus_cat_entry entry;
+ int res = 0;
if (HFSPLUS_IS_RSRC(inode))
main_inode = HFSPLUS_I(inode)->rsrc_inode;
@@ -576,8 +584,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
if (S_ISDIR(main_inode->i_mode)) {
struct hfsplus_cat_folder *folder = &entry.folder;
- if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
- /* panic? */;
+ if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
+ pr_err("bad catalog folder entry\n");
+ res = -EIO;
+ goto out;
+ }
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
/* simple node checks? */
@@ -602,8 +613,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
} else {
struct hfsplus_cat_file *file = &entry.file;
- if (fd.entrylength < sizeof(struct hfsplus_cat_file))
- /* panic? */;
+ if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
+ pr_err("bad catalog file entry\n");
+ res = -EIO;
+ goto out;
+ }
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_write_fork(inode, &file->data_fork);
@@ -624,5 +638,5 @@ int hfsplus_cat_write_inode(struct inode *inode)
set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
out:
hfs_find_exit(&fd);
- return 0;
+ return res;
}
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index 047e05c57560..c94a58762ad6 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -140,6 +140,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
if (!uid_valid(sbi->uid)) {
pr_err("invalid uid specified\n");
return 0;
+ } else {
+ set_bit(HFSPLUS_SB_UID, &sbi->flags);
}
break;
case opt_gid:
@@ -151,6 +153,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
if (!gid_valid(sbi->gid)) {
pr_err("invalid gid specified\n");
return 0;
+ } else {
+ set_bit(HFSPLUS_SB_GID, &sbi->flags);
}
break;
case opt_part:
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 2b9e5743105e..29a39afe2653 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -295,11 +295,11 @@ static void hfsplus_put_super(struct super_block *sb)
hfsplus_sync_fs(sb, 1);
}
+ iput(sbi->alloc_file);
+ iput(sbi->hidden_dir);
hfs_btree_close(sbi->attr_tree);
hfs_btree_close(sbi->cat_tree);
hfs_btree_close(sbi->ext_tree);
- iput(sbi->alloc_file);
- iput(sbi->hidden_dir);
kfree(sbi->s_vhdr_buf);
kfree(sbi->s_backup_vhdr_buf);
unload_nls(sbi->nls);
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index c8d1b2be7854..73342c925a4b 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -398,7 +398,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
astr = str->name;
len = str->len;
while (len > 0) {
- int uninitialized_var(dsize);
+ int dsize;
size = asc2unichar(sb, astr, len, &c);
astr += size;
len -= size;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7d039ba5ae28..47b292f9b4f8 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1205,6 +1205,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
{
struct hugetlbfs_fs_context *ctx = fc->fs_private;
struct fs_parse_result result;
+ struct hstate *h;
char *rest;
unsigned long ps;
int opt;
@@ -1232,7 +1233,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
case Opt_size:
/* memparse() will accept a K/M/G without a digit */
- if (!isdigit(param->string[0]))
+ if (!param->string || !isdigit(param->string[0]))
goto bad_val;
ctx->max_size_opt = memparse(param->string, &rest);
ctx->max_val_type = SIZE_STD;
@@ -1242,23 +1243,24 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
case Opt_nr_inodes:
/* memparse() will accept a K/M/G without a digit */
- if (!isdigit(param->string[0]))
+ if (!param->string || !isdigit(param->string[0]))
goto bad_val;
ctx->nr_inodes = memparse(param->string, &rest);
return 0;
case Opt_pagesize:
ps = memparse(param->string, &rest);
- ctx->hstate = size_to_hstate(ps);
- if (!ctx->hstate) {
+ h = size_to_hstate(ps);
+ if (!h) {
pr_err("Unsupported page size %lu MB\n", ps >> 20);
return -EINVAL;
}
+ ctx->hstate = h;
return 0;
case Opt_min_size:
/* memparse() will accept a K/M/G without a digit */
- if (!isdigit(param->string[0]))
+ if (!param->string || !isdigit(param->string[0]))
goto bad_val;
ctx->min_size_opt = memparse(param->string, &rest);
ctx->min_val_type = SIZE_STD;
diff --git a/fs/inode.c b/fs/inode.c
index 52cc64adb75c..78d7aba27ebd 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -167,8 +167,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_wb_frn_history = 0;
#endif
- if (security_inode_alloc(inode))
- goto out;
spin_lock_init(&inode->i_lock);
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
@@ -199,11 +197,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_fsnotify_mask = 0;
#endif
inode->i_flctx = NULL;
+
+ if (unlikely(security_inode_alloc(inode)))
+ return -ENOMEM;
this_cpu_inc(nr_inodes);
return 0;
-out:
- return -ENOMEM;
}
EXPORT_SYMBOL(inode_init_always);
@@ -1014,6 +1013,48 @@ void discard_new_inode(struct inode *inode)
EXPORT_SYMBOL(discard_new_inode);
/**
+ * lock_two_inodes - lock two inodes (may be regular files but also dirs)
+ *
+ * Lock any non-NULL argument. The caller must make sure that if he is passing
+ * in two directories, one is not ancestor of the other. Zero, one or two
+ * objects may be locked by this function.
+ *
+ * @inode1: first inode to lock
+ * @inode2: second inode to lock
+ * @subclass1: inode lock subclass for the first lock obtained
+ * @subclass2: inode lock subclass for the second lock obtained
+ */
+void lock_two_inodes(struct inode *inode1, struct inode *inode2,
+ unsigned subclass1, unsigned subclass2)
+{
+ if (!inode1 || !inode2) {
+ /*
+ * Make sure @subclass1 will be used for the acquired lock.
+ * This is not strictly necessary (no current caller cares) but
+ * let's keep things consistent.
+ */
+ if (!inode1)
+ swap(inode1, inode2);
+ goto lock;
+ }
+
+ /*
+ * If one object is directory and the other is not, we must make sure
+ * to lock directory first as the other object may be its child.
+ */
+ if (S_ISDIR(inode2->i_mode) == S_ISDIR(inode1->i_mode)) {
+ if (inode1 > inode2)
+ swap(inode1, inode2);
+ } else if (!S_ISDIR(inode1->i_mode))
+ swap(inode1, inode2);
+lock:
+ if (inode1)
+ inode_lock_nested(inode1, subclass1);
+ if (inode2 && inode2 != inode1)
+ inode_lock_nested(inode2, subclass2);
+}
+
+/**
* lock_two_nondirectories - take two i_mutexes on non-directory objects
*
* Lock any non-NULL argument that is not a directory.
@@ -2060,10 +2101,6 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
/* Directories are special, and always inherit S_ISGID */
if (S_ISDIR(mode))
mode |= S_ISGID;
- else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
- !in_group_p(inode->i_gid) &&
- !capable_wrt_inode_uidgid(dir, CAP_FSETID))
- mode &= ~S_ISGID;
} else
inode->i_gid = current_fsgid();
inode->i_mode = mode;
@@ -2319,3 +2356,31 @@ int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
return 0;
}
EXPORT_SYMBOL(vfs_ioc_fssetxattr_check);
+
+/**
+ * mode_strip_sgid - handle the sgid bit for non-directories
+ * @dir: parent directory inode
+ * @mode: mode of the file to be created in @dir
+ *
+ * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
+ * raised and @dir has the S_ISGID bit raised ensure that the caller is
+ * either in the group of the parent directory or they have CAP_FSETID
+ * in their user namespace and are privileged over the parent directory.
+ * In all other cases, strip the S_ISGID bit from @mode.
+ *
+ * Return: the new mode to use for the file
+ */
+umode_t mode_strip_sgid(const struct inode *dir, umode_t mode)
+{
+ if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP))
+ return mode;
+ if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
+ return mode;
+ if (in_group_p(dir->i_gid))
+ return mode;
+ if (capable_wrt_inode_uidgid(dir, CAP_FSETID))
+ return mode;
+
+ return mode & ~S_ISGID;
+}
+EXPORT_SYMBOL(mode_strip_sgid);
diff --git a/fs/internal.h b/fs/internal.h
index 61aed95f83d1..377f984e9226 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -138,6 +138,8 @@ extern int vfs_open(const struct path *, struct file *);
extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
extern void inode_add_lru(struct inode *inode);
extern int dentry_needs_remove_privs(struct dentry *dentry);
+void lock_two_inodes(struct inode *inode1, struct inode *inode2,
+ unsigned subclass1, unsigned subclass2);
/*
* fs-writeback.c
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e73969fa96bc..2c793e4ccf09 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -63,7 +63,6 @@
#include <linux/net.h>
#include <net/sock.h>
#include <net/af_unix.h>
-#include <net/scm.h>
#include <linux/anon_inodes.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -264,10 +263,6 @@ struct io_ring_ctx {
struct async_list pending_async[2];
-#if defined(CONFIG_UNIX)
- struct socket *ring_sock;
-#endif
-
struct list_head task_list;
spinlock_t task_lock;
};
@@ -381,19 +376,6 @@ static struct kmem_cache *req_cachep;
static const struct file_operations io_uring_fops;
-struct sock *io_uring_get_socket(struct file *file)
-{
-#if defined(CONFIG_UNIX)
- if (file->f_op == &io_uring_fops) {
- struct io_ring_ctx *ctx = file->private_data;
-
- return ctx->ring_sock->sk;
- }
-#endif
- return NULL;
-}
-EXPORT_SYMBOL(io_uring_get_socket);
-
static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
@@ -551,7 +533,8 @@ static void io_kill_timeout(struct io_kiocb *req)
atomic_inc(&req->ctx->cq_timeouts);
list_del(&req->list);
io_cqring_fill_event(req->ctx, req->user_data, 0);
- __io_free_req(req);
+ if (refcount_dec_and_test(&req->refs))
+ __io_free_req(req);
}
}
@@ -1255,7 +1238,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
*/
const struct bio_vec *bvec = imu->bvec;
- if (offset <= bvec->bv_len) {
+ if (offset < bvec->bv_len) {
iov_iter_advance(iter, offset);
} else {
unsigned long seg_skip;
@@ -1908,6 +1891,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
__poll_t mask;
u16 events;
+ if (req->file->f_op->may_pollfree)
+ return -EOPNOTSUPP;
+
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
@@ -2076,12 +2062,12 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->sequence -= span;
add:
list_add(&req->list, entry);
- spin_unlock_irq(&ctx->completion_lock);
hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
req->timeout.timer.function = io_timeout_fn;
hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
HRTIMER_MODE_REL);
+ spin_unlock_irq(&ctx->completion_lock);
return 0;
}
@@ -3076,20 +3062,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
-#if defined(CONFIG_UNIX)
- if (ctx->ring_sock) {
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
- kfree_skb(skb);
- }
-#else
int i;
for (i = 0; i < ctx->nr_user_files; i++)
fput(ctx->user_files[i]);
-#endif
}
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
@@ -3133,100 +3109,6 @@ static void io_finish_async(struct io_ring_ctx *ctx)
}
}
-#if defined(CONFIG_UNIX)
-static void io_destruct_skb(struct sk_buff *skb)
-{
- struct io_ring_ctx *ctx = skb->sk->sk_user_data;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
- if (ctx->sqo_wq[i])
- flush_workqueue(ctx->sqo_wq[i]);
-
- unix_destruct_scm(skb);
-}
-
-/*
- * Ensure the UNIX gc is aware of our file set, so we are certain that
- * the io_uring can be safely unregistered on process exit, even if we have
- * loops in the file referencing.
- */
-static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
-{
- struct sock *sk = ctx->ring_sock->sk;
- struct scm_fp_list *fpl;
- struct sk_buff *skb;
- int i;
-
- fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
- if (!fpl)
- return -ENOMEM;
-
- skb = alloc_skb(0, GFP_KERNEL);
- if (!skb) {
- kfree(fpl);
- return -ENOMEM;
- }
-
- skb->sk = sk;
- skb->destructor = io_destruct_skb;
-
- fpl->user = get_uid(ctx->user);
- for (i = 0; i < nr; i++) {
- fpl->fp[i] = get_file(ctx->user_files[i + offset]);
- unix_inflight(fpl->user, fpl->fp[i]);
- }
-
- fpl->max = fpl->count = nr;
- UNIXCB(skb).fp = fpl;
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- skb_queue_head(&sk->sk_receive_queue, skb);
-
- for (i = 0; i < nr; i++)
- fput(fpl->fp[i]);
-
- return 0;
-}
-
-/*
- * If UNIX sockets are enabled, fd passing can cause a reference cycle which
- * causes regular reference counting to break down. We rely on the UNIX
- * garbage collection to take care of this problem for us.
- */
-static int io_sqe_files_scm(struct io_ring_ctx *ctx)
-{
- unsigned left, total;
- int ret = 0;
-
- total = 0;
- left = ctx->nr_user_files;
- while (left) {
- unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
-
- ret = __io_sqe_files_scm(ctx, this_files, total);
- if (ret)
- break;
- left -= this_files;
- total += this_files;
- }
-
- if (!ret)
- return 0;
-
- while (total < ctx->nr_user_files) {
- fput(ctx->user_files[total]);
- total++;
- }
-
- return ret;
-}
-#else
-static int io_sqe_files_scm(struct io_ring_ctx *ctx)
-{
- return 0;
-}
-#endif
-
static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
@@ -3280,11 +3162,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
- ret = io_sqe_files_scm(ctx);
- if (ret)
- io_sqe_files_unregister(ctx);
-
- return ret;
+ return 0;
}
static int io_sq_offload_start(struct io_ring_ctx *ctx,
@@ -3682,13 +3560,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_sqe_files_unregister(ctx);
io_eventfd_unregister(ctx);
-#if defined(CONFIG_UNIX)
- if (ctx->ring_sock) {
- ctx->ring_sock->file = NULL; /* so that iput() is called */
- sock_release(ctx->ring_sock);
- }
-#endif
-
io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);
@@ -3734,9 +3605,6 @@ static void io_cancel_async_work(struct io_ring_ctx *ctx,
{
struct io_kiocb *req;
- if (list_empty(&ctx->task_list))
- return;
-
spin_lock_irq(&ctx->task_lock);
list_for_each_entry(req, &ctx->task_list, task_list) {
@@ -3891,6 +3759,11 @@ static const struct file_operations io_uring_fops = {
.fasync = io_uring_fasync,
};
+bool io_is_uring_fops(struct file *file)
+{
+ return file->f_op == &io_uring_fops;
+}
+
static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
@@ -3938,45 +3811,26 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
/*
* Allocate an anonymous fd, this is what constitutes the application
* visible backing of an io_uring instance. The application mmaps this
- * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
- * we have to tie this fd to a socket for file garbage collection purposes.
+ * fd to gain access to the SQ/CQ ring details.
*/
static int io_uring_get_fd(struct io_ring_ctx *ctx)
{
struct file *file;
int ret;
-#if defined(CONFIG_UNIX)
- ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
- &ctx->ring_sock);
- if (ret)
- return ret;
-#endif
-
ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
if (ret < 0)
- goto err;
+ return ret;
file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
O_RDWR | O_CLOEXEC);
if (IS_ERR(file)) {
put_unused_fd(ret);
- ret = PTR_ERR(file);
- goto err;
+ return PTR_ERR(file);
}
-#if defined(CONFIG_UNIX)
- ctx->ring_sock->file = file;
- ctx->ring_sock->sk->sk_user_data = ctx;
-#endif
fd_install(ret, file);
return ret;
-err:
-#if defined(CONFIG_UNIX)
- sock_release(ctx->ring_sock);
- ctx->ring_sock = NULL;
-#endif
- return ret;
}
static int io_uring_create(unsigned entries, struct io_uring_params *p)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 53cd7b2bb580..c28ba474a25e 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -23,6 +23,7 @@ static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page)
{
struct iomap_page *iop = to_iomap_page(page);
+ unsigned int nr_blocks = PAGE_SIZE / i_blocksize(inode);
if (iop || i_blocksize(inode) == PAGE_SIZE)
return iop;
@@ -32,6 +33,8 @@ iomap_page_create(struct inode *inode, struct page *page)
atomic_set(&iop->write_count, 0);
spin_lock_init(&iop->uptodate_lock);
bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+ if (PageUptodate(page))
+ bitmap_fill(iop->uptodate, nr_blocks);
/*
* migrate_page_move_mapping() assumes that pages with private data have
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 558e7c51ce0d..58f80e1b3ac0 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -153,8 +153,8 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
int found;
- unsigned long uninitialized_var(block);
- unsigned long uninitialized_var(offset);
+ unsigned long block;
+ unsigned long offset;
struct inode *inode;
struct page *page;
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 5ef99b9ec8be..edb17822f8e6 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -58,28 +58,6 @@ static inline void __buffer_unlink(struct journal_head *jh)
}
/*
- * Move a buffer from the checkpoint list to the checkpoint io list
- *
- * Called with j_list_lock held
- */
-static inline void __buffer_relink_io(struct journal_head *jh)
-{
- transaction_t *transaction = jh->b_cp_transaction;
-
- __buffer_unlink_first(jh);
-
- if (!transaction->t_checkpoint_io_list) {
- jh->b_cpnext = jh->b_cpprev = jh;
- } else {
- jh->b_cpnext = transaction->t_checkpoint_io_list;
- jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
- jh->b_cpprev->b_cpnext = jh;
- jh->b_cpnext->b_cpprev = jh;
- }
- transaction->t_checkpoint_io_list = jh;
-}
-
-/*
* Try to release a checkpointed buffer from its transaction.
* Returns 1 if we released it and 2 if we also released the
* whole transaction.
@@ -91,8 +69,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
int ret = 0;
struct buffer_head *bh = jh2bh(jh);
- if (jh->b_transaction == NULL && !buffer_locked(bh) &&
- !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
+ if (!jh->b_transaction && !buffer_locked(bh) && !buffer_dirty(bh)) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
ret = __jbd2_journal_remove_checkpoint(jh) + 1;
}
@@ -191,6 +168,7 @@ __flush_batch(journal_t *journal, int *batch_count)
struct buffer_head *bh = journal->j_chkpt_bhs[i];
BUFFER_TRACE(bh, "brelse");
__brelse(bh);
+ journal->j_chkpt_bhs[i] = NULL;
}
*batch_count = 0;
}
@@ -228,7 +206,6 @@ int jbd2_log_do_checkpoint(journal_t *journal)
* OK, we need to start writing disk blocks. Take one transaction
* and write it.
*/
- result = 0;
spin_lock(&journal->j_list_lock);
if (!journal->j_checkpoint_transactions)
goto out;
@@ -251,15 +228,6 @@ restart:
jh = transaction->t_checkpoint_list;
bh = jh2bh(jh);
- if (buffer_locked(bh)) {
- get_bh(bh);
- spin_unlock(&journal->j_list_lock);
- wait_on_buffer(bh);
- /* the journal_head may have gone by now */
- BUFFER_TRACE(bh, "brelse");
- __brelse(bh);
- goto retry;
- }
if (jh->b_transaction != NULL) {
transaction_t *t = jh->b_transaction;
tid_t tid = t->t_tid;
@@ -294,32 +262,50 @@ restart:
spin_lock(&journal->j_list_lock);
goto restart;
}
- if (!buffer_dirty(bh)) {
- if (unlikely(buffer_write_io_error(bh)) && !result)
- result = -EIO;
+ if (!trylock_buffer(bh)) {
+ /*
+ * The buffer is locked, it may be writing back, or
+ * flushing out in the last couple of cycles, or
+ * re-adding into a new transaction, need to check
+ * it again until it's unlocked.
+ */
+ get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
+ wait_on_buffer(bh);
+ /* the journal_head may have gone by now */
+ BUFFER_TRACE(bh, "brelse");
+ __brelse(bh);
+ goto retry;
+ } else if (!buffer_dirty(bh)) {
+ unlock_buffer(bh);
BUFFER_TRACE(bh, "remove from checkpoint");
- if (__jbd2_journal_remove_checkpoint(jh))
- /* The transaction was released; we're done */
+ /*
+ * If the transaction was released or the checkpoint
+ * list was empty, we're done.
+ */
+ if (__jbd2_journal_remove_checkpoint(jh) ||
+ !transaction->t_checkpoint_list)
goto out;
- continue;
+ } else {
+ unlock_buffer(bh);
+ /*
+ * We are about to write the buffer, it could be
+ * raced by some other transaction shrink or buffer
+ * re-log logic once we release the j_list_lock,
+ * leave it on the checkpoint list and check status
+ * again to make sure it's clean.
+ */
+ BUFFER_TRACE(bh, "queue");
+ get_bh(bh);
+ J_ASSERT_BH(bh, !buffer_jwrite(bh));
+ journal->j_chkpt_bhs[batch_count++] = bh;
+ transaction->t_chp_stats.cs_written++;
+ transaction->t_checkpoint_list = jh->b_cpnext;
}
- /*
- * Important: we are about to write the buffer, and
- * possibly block, while still holding the journal
- * lock. We cannot afford to let the transaction
- * logic start messing around with this buffer before
- * we write it to disk, as that would break
- * recoverability.
- */
- BUFFER_TRACE(bh, "queue");
- get_bh(bh);
- J_ASSERT_BH(bh, !buffer_jwrite(bh));
- journal->j_chkpt_bhs[batch_count++] = bh;
- __buffer_relink_io(jh);
- transaction->t_chp_stats.cs_written++;
+
if ((batch_count == JBD2_NR_BATCH) ||
- need_resched() ||
- spin_needbreak(&journal->j_list_lock))
+ need_resched() || spin_needbreak(&journal->j_list_lock) ||
+ jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0])
goto unlock_and_flush;
}
@@ -333,46 +319,9 @@ restart:
goto restart;
}
- /*
- * Now we issued all of the transaction's buffers, let's deal
- * with the buffers that are out for I/O.
- */
-restart2:
- /* Did somebody clean up the transaction in the meanwhile? */
- if (journal->j_checkpoint_transactions != transaction ||
- transaction->t_tid != this_tid)
- goto out;
-
- while (transaction->t_checkpoint_io_list) {
- jh = transaction->t_checkpoint_io_list;
- bh = jh2bh(jh);
- if (buffer_locked(bh)) {
- get_bh(bh);
- spin_unlock(&journal->j_list_lock);
- wait_on_buffer(bh);
- /* the journal_head may have gone by now */
- BUFFER_TRACE(bh, "brelse");
- __brelse(bh);
- spin_lock(&journal->j_list_lock);
- goto restart2;
- }
- if (unlikely(buffer_write_io_error(bh)) && !result)
- result = -EIO;
-
- /*
- * Now in whatever state the buffer currently is, we
- * know that it has been written out and so we can
- * drop it from the list
- */
- if (__jbd2_journal_remove_checkpoint(jh))
- break;
- }
out:
spin_unlock(&journal->j_list_lock);
- if (result < 0)
- jbd2_journal_abort(journal, result);
- else
- result = jbd2_cleanup_journal_tail(journal);
+ result = jbd2_cleanup_journal_tail(journal);
return (result < 0) ? result : 0;
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index d45ceb2e2149..7bd4b1d4224e 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -514,13 +514,13 @@ void jbd2_journal_commit_transaction(journal_t *journal)
*/
jbd2_journal_switch_revoke_table(journal);
+ write_lock(&journal->j_state_lock);
/*
* Reserved credits cannot be claimed anymore, free them
*/
atomic_sub(atomic_read(&journal->j_reserved_credits),
&commit_transaction->t_outstanding_credits);
- write_lock(&journal->j_state_lock);
trace_jbd2_commit_flushing(journal, commit_transaction);
stats.run.rs_flushing = jiffies;
stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
@@ -531,7 +531,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
journal->j_running_transaction = NULL;
start_time = ktime_get();
commit_transaction->t_log_start = journal->j_head;
- wake_up(&journal->j_wait_transaction_locked);
+ wake_up_all(&journal->j_wait_transaction_locked);
write_unlock(&journal->j_state_lock);
jbd_debug(3, "JBD2: commit phase 2a\n");
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index b7c5819bfc41..81bd7b29a10b 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -562,12 +562,14 @@ static int __jbd2_journal_force_commit(journal_t *journal)
}
/**
- * Force and wait upon a commit if the calling process is not within
- * transaction. This is used for forcing out undo-protected data which contains
- * bitmaps, when the fs is running out of space.
+ * jbd2_journal_force_commit_nested - Force and wait upon a commit if the
+ * calling process is not within transaction.
*
* @journal: journal to force
* Returns true if progress was made.
+ *
+ * This is used for forcing out undo-protected data which contains
+ * bitmaps, when the fs is running out of space.
*/
int jbd2_journal_force_commit_nested(journal_t *journal)
{
@@ -578,7 +580,7 @@ int jbd2_journal_force_commit_nested(journal_t *journal)
}
/**
- * int journal_force_commit() - force any uncommitted transactions
+ * jbd2_journal_force_commit() - force any uncommitted transactions
* @journal: journal to force
*
* Caller want unconditional commit. We can only force the running transaction
@@ -1266,7 +1268,7 @@ journal_t *jbd2_journal_init_inode(struct inode *inode)
* superblock as being NULL to prevent the journal destroy from writing
* back a bogus superblock.
*/
-static void journal_fail_superblock (journal_t *journal)
+static void journal_fail_superblock(journal_t *journal)
{
struct buffer_head *bh = journal->j_sb_buffer;
brelse(bh);
@@ -1353,9 +1355,11 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
return -EIO;
}
- trace_jbd2_write_superblock(journal, write_flags);
if (!(journal->j_flags & JBD2_BARRIER))
write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
+
+ trace_jbd2_write_superblock(journal, write_flags);
+
if (buffer_write_io_error(bh)) {
/*
* Oh, dear. A previous attempt to write the journal
@@ -1634,7 +1638,7 @@ static int load_superblock(journal_t *journal)
/**
- * int jbd2_journal_load() - Read journal from disk.
+ * jbd2_journal_load() - Read journal from disk.
* @journal: Journal to act on.
*
* Given a journal_t structure which tells us which disk blocks contain
@@ -1704,7 +1708,7 @@ recovery_error:
}
/**
- * void jbd2_journal_destroy() - Release a journal_t structure.
+ * jbd2_journal_destroy() - Release a journal_t structure.
* @journal: Journal to act on.
*
* Release a journal_t structure once it is no longer in use by the
@@ -1780,7 +1784,7 @@ int jbd2_journal_destroy(journal_t *journal)
/**
- *int jbd2_journal_check_used_features () - Check if features specified are used.
+ * jbd2_journal_check_used_features() - Check if features specified are used.
* @journal: Journal to check.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
@@ -1790,7 +1794,7 @@ int jbd2_journal_destroy(journal_t *journal)
* features. Return true (non-zero) if it does.
**/
-int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_used_features(journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
journal_superblock_t *sb;
@@ -1815,7 +1819,7 @@ int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
}
/**
- * int jbd2_journal_check_available_features() - Check feature set in journalling layer
+ * jbd2_journal_check_available_features() - Check feature set in journalling layer
* @journal: Journal to check.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
@@ -1825,7 +1829,7 @@ int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
* all of a given set of features on this journal. Return true
* (non-zero) if it can. */
-int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_available_features(journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
if (!compat && !ro && !incompat)
@@ -1847,7 +1851,7 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com
}
/**
- * int jbd2_journal_set_features () - Mark a given journal feature in the superblock
+ * jbd2_journal_set_features() - Mark a given journal feature in the superblock
* @journal: Journal to act on.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
@@ -1858,7 +1862,7 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com
*
*/
-int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
#define INCOMPAT_FEATURE_ON(f) \
@@ -1929,7 +1933,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
}
/*
- * jbd2_journal_clear_features () - Clear a given journal feature in the
+ * jbd2_journal_clear_features() - Clear a given journal feature in the
* superblock
* @journal: Journal to act on.
* @compat: bitmask of compatible features
@@ -1956,7 +1960,7 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
EXPORT_SYMBOL(jbd2_journal_clear_features);
/**
- * int jbd2_journal_flush () - Flush journal
+ * jbd2_journal_flush() - Flush journal
* @journal: Journal to act on.
*
* Flush all data for a given journal to disk and empty the journal.
@@ -2031,7 +2035,7 @@ out:
}
/**
- * int jbd2_journal_wipe() - Wipe journal contents
+ * jbd2_journal_wipe() - Wipe journal contents
* @journal: Journal to act on.
* @write: flag (see below)
*
@@ -2072,7 +2076,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
}
/**
- * void jbd2_journal_abort () - Shutdown the journal immediately.
+ * jbd2_journal_abort () - Shutdown the journal immediately.
* @journal: the journal to shutdown.
* @errno: an error number to record in the journal indicating
* the reason for the shutdown.
@@ -2158,7 +2162,7 @@ void jbd2_journal_abort(journal_t *journal, int errno)
}
/**
- * int jbd2_journal_errno () - returns the journal's error state.
+ * jbd2_journal_errno() - returns the journal's error state.
* @journal: journal to examine.
*
* This is the errno number set with jbd2_journal_abort(), the last
@@ -2182,7 +2186,7 @@ int jbd2_journal_errno(journal_t *journal)
}
/**
- * int jbd2_journal_clear_err () - clears the journal's error state
+ * jbd2_journal_clear_err() - clears the journal's error state
* @journal: journal to act on.
*
* An error must be cleared or acked to take a FS out of readonly
@@ -2202,7 +2206,7 @@ int jbd2_journal_clear_err(journal_t *journal)
}
/**
- * void jbd2_journal_ack_err() - Ack journal err.
+ * jbd2_journal_ack_err() - Ack journal err.
* @journal: journal to act on.
*
* An error must be cleared or acked to take a FS out of readonly
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index a4967b27ffb6..ed923a9765c2 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -247,6 +247,8 @@ int jbd2_journal_recover(journal_t *journal)
journal_superblock_t * sb;
struct recovery_info info;
+ errseq_t wb_err;
+ struct address_space *mapping;
memset(&info, 0, sizeof(info));
sb = journal->j_superblock;
@@ -264,6 +266,9 @@ int jbd2_journal_recover(journal_t *journal)
return 0;
}
+ wb_err = 0;
+ mapping = journal->j_fs_dev->bd_inode->i_mapping;
+ errseq_check_and_advance(&mapping->wb_err, &wb_err);
err = do_one_pass(journal, &info, PASS_SCAN);
if (!err)
err = do_one_pass(journal, &info, PASS_REVOKE);
@@ -284,6 +289,9 @@ int jbd2_journal_recover(journal_t *journal)
err2 = sync_blockdev(journal->j_fs_dev);
if (!err)
err = err2;
+ err2 = errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ if (!err)
+ err = err2;
/* Make sure all replayed data is on permanent storage */
if (journal->j_flags & JBD2_BARRIER) {
err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index be05fb96757c..91c2d3f6d1b3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -149,7 +149,7 @@ static void wait_transaction_locked(journal_t *journal)
int need_to_start;
tid_t tid = journal->j_running_transaction->t_tid;
- prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+ prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
TASK_UNINTERRUPTIBLE);
need_to_start = !tid_geq(journal->j_commit_request, tid);
read_unlock(&journal->j_state_lock);
@@ -175,7 +175,7 @@ static void wait_transaction_switching(journal_t *journal)
read_unlock(&journal->j_state_lock);
return;
}
- prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+ prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
TASK_UNINTERRUPTIBLE);
read_unlock(&journal->j_state_lock);
/*
@@ -490,7 +490,7 @@ EXPORT_SYMBOL(jbd2__journal_start);
/**
- * handle_t *jbd2_journal_start() - Obtain a new handle.
+ * jbd2_journal_start() - Obtain a new handle.
* @journal: Journal to start transaction on.
* @nblocks: number of block buffer we might modify
*
@@ -525,7 +525,7 @@ void jbd2_journal_free_reserved(handle_t *handle)
EXPORT_SYMBOL(jbd2_journal_free_reserved);
/**
- * int jbd2_journal_start_reserved() - start reserved handle
+ * jbd2_journal_start_reserved() - start reserved handle
* @handle: handle to start
* @type: for handle statistics
* @line_no: for handle statistics
@@ -579,7 +579,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
EXPORT_SYMBOL(jbd2_journal_start_reserved);
/**
- * int jbd2_journal_extend() - extend buffer credits.
+ * jbd2_journal_extend() - extend buffer credits.
* @handle: handle to 'extend'
* @nblocks: nr blocks to try to extend by.
*
@@ -659,7 +659,7 @@ error_out:
/**
- * int jbd2_journal_restart() - restart a handle .
+ * jbd2__journal_restart() - restart a handle .
* @handle: handle to restart
* @nblocks: nr credits requested
* @gfp_mask: memory allocation flags (for start_this_handle)
@@ -736,7 +736,7 @@ int jbd2_journal_restart(handle_t *handle, int nblocks)
EXPORT_SYMBOL(jbd2_journal_restart);
/**
- * void jbd2_journal_lock_updates () - establish a transaction barrier.
+ * jbd2_journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
*
* This locks out any further updates from being started, and blocks
@@ -795,7 +795,7 @@ void jbd2_journal_lock_updates(journal_t *journal)
}
/**
- * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
+ * jbd2_journal_unlock_updates () - release barrier
* @journal: Journal to release the barrier on.
*
* Release a transaction barrier obtained with jbd2_journal_lock_updates().
@@ -810,7 +810,7 @@ void jbd2_journal_unlock_updates (journal_t *journal)
write_lock(&journal->j_state_lock);
--journal->j_barrier_count;
write_unlock(&journal->j_state_lock);
- wake_up(&journal->j_wait_transaction_locked);
+ wake_up_all(&journal->j_wait_transaction_locked);
}
static void warn_dirty_buffer(struct buffer_head *bh)
@@ -1103,7 +1103,8 @@ out:
}
/**
- * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
+ * jbd2_journal_get_write_access() - notify intent to modify a buffer
+ * for metadata (not data) update.
* @handle: transaction to add buffer modifications to
* @bh: bh to be used for metadata writes
*
@@ -1147,7 +1148,7 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
* unlocked buffer beforehand. */
/**
- * int jbd2_journal_get_create_access () - notify intent to use newly created bh
+ * jbd2_journal_get_create_access () - notify intent to use newly created bh
* @handle: transaction to new buffer to
* @bh: new buffer.
*
@@ -1227,7 +1228,7 @@ out:
}
/**
- * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
+ * jbd2_journal_get_undo_access() - Notify intent to modify metadata with
* non-rewindable consequences
* @handle: transaction
* @bh: buffer to undo
@@ -1304,7 +1305,7 @@ out:
}
/**
- * void jbd2_journal_set_triggers() - Add triggers for commit writeout
+ * jbd2_journal_set_triggers() - Add triggers for commit writeout
* @bh: buffer to trigger on
* @type: struct jbd2_buffer_trigger_type containing the trigger(s).
*
@@ -1346,7 +1347,7 @@ void jbd2_buffer_abort_trigger(struct journal_head *jh,
}
/**
- * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
+ * jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
* @handle: transaction to add buffer to.
* @bh: buffer to mark
*
@@ -1375,8 +1376,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int ret = 0;
- if (is_handle_aborted(handle))
- return -EROFS;
if (!buffer_jbd(bh))
return -EUCLEAN;
@@ -1423,6 +1422,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
journal = transaction->t_journal;
jbd_lock_bh_state(bh);
+ if (is_handle_aborted(handle)) {
+ /*
+ * Check journal aborting with @jh->b_state_lock locked,
+ * since 'jh->b_transaction' could be replaced with
+ * 'jh->b_next_transaction' during old transaction
+ * committing if journal aborted, which may fail
+ * assertion on 'jh->b_frozen_data == NULL'.
+ */
+ ret = -EROFS;
+ goto out_unlock_bh;
+ }
+
if (jh->b_modified == 0) {
/*
* This buffer's got modified and becoming part
@@ -1514,7 +1525,7 @@ out:
}
/**
- * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
+ * jbd2_journal_forget() - bforget() for potentially-journaled buffers.
* @handle: transaction handle
* @bh: bh to 'forget'
*
@@ -1689,7 +1700,7 @@ not_jbd:
}
/**
- * int jbd2_journal_stop() - complete a transaction
+ * jbd2_journal_stop() - complete a transaction
* @handle: transaction to complete.
*
* All done for a particular handle.
@@ -2037,7 +2048,7 @@ out:
}
/**
- * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
+ * jbd2_journal_try_to_free_buffers() - try to free page buffers.
* @journal: journal for operation
* @page: to try and free
* @gfp_mask: we use the mask to detect how hard should we try to release
@@ -2376,7 +2387,7 @@ zap_buffer_unlocked:
}
/**
- * void jbd2_journal_invalidatepage()
+ * jbd2_journal_invalidatepage()
* @journal: journal to use for flush...
* @page: page to flush
* @offset: start of the range to invalidate
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 837cd55fd4c5..6ae9d6fefb86 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -211,7 +211,10 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
ic->scan_dents = NULL;
cond_resched();
}
- jffs2_build_xattr_subsystem(c);
+ ret = jffs2_build_xattr_subsystem(c);
+ if (ret)
+ goto exit;
+
c->flags &= ~JFFS2_SB_FLAG_BUILDING;
dbg_fsbuild("FS build complete\n");
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 83b8f06b4a64..7e9abdb89712 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -401,7 +401,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
{
size_t retlen;
int ret;
- uint32_t uninitialized_var(bad_offset);
+ uint32_t bad_offset;
switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
case -EAGAIN: goto refile;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 34880a4c2173..94bd4bbd3787 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -137,19 +137,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
pgoff_t index = pos >> PAGE_SHIFT;
- uint32_t pageofs = index << PAGE_SHIFT;
int ret = 0;
jffs2_dbg(1, "%s()\n", __func__);
- if (pageofs > inode->i_size) {
- /* Make new hole frag from old EOF to new page */
+ if (pos > inode->i_size) {
+ /* Make new hole frag from old EOF to new position */
struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
uint32_t alloc_len;
- jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
- (unsigned int)inode->i_size, pageofs);
+ jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n",
+ (unsigned int)inode->i_size, (uint32_t)pos);
ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
@@ -169,10 +168,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
ri.mode = cpu_to_jemode(inode->i_mode);
ri.uid = cpu_to_je16(i_uid_read(inode));
ri.gid = cpu_to_je16(i_gid_read(inode));
- ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
+ ri.isize = cpu_to_je32((uint32_t)pos);
ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
ri.offset = cpu_to_je32(inode->i_size);
- ri.dsize = cpu_to_je32(pageofs - inode->i_size);
+ ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size);
ri.csize = cpu_to_je32(0);
ri.compr = JFFS2_COMPR_ZERO;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
@@ -202,7 +201,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
goto out_err;
}
jffs2_complete_reservation(c);
- inode->i_size = pageofs;
+ inode->i_size = pos;
mutex_unlock(&f->sem);
}
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index da3e18503c65..acb4492f5970 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -772,10 +772,10 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
}
#define XREF_TMPHASH_SIZE (128)
-void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
+int jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
{
struct jffs2_xattr_ref *ref, *_ref;
- struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE];
+ struct jffs2_xattr_ref **xref_tmphash;
struct jffs2_xattr_datum *xd, *_xd;
struct jffs2_inode_cache *ic;
struct jffs2_raw_node_ref *raw;
@@ -784,9 +784,12 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
+ xref_tmphash = kcalloc(XREF_TMPHASH_SIZE,
+ sizeof(struct jffs2_xattr_ref *), GFP_KERNEL);
+ if (!xref_tmphash)
+ return -ENOMEM;
+
/* Phase.1 : Merge same xref */
- for (i=0; i < XREF_TMPHASH_SIZE; i++)
- xref_tmphash[i] = NULL;
for (ref=c->xref_temp; ref; ref=_ref) {
struct jffs2_xattr_ref *tmp;
@@ -884,6 +887,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
"%u of xref (%u dead, %u orphan) found.\n",
xdatum_count, xdatum_unchecked_count, xdatum_orphan_count,
xref_count, xref_dead_count, xref_orphan_count);
+ kfree(xref_tmphash);
+ return 0;
}
struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 720007b2fd65..1b5030a3349d 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -71,7 +71,7 @@ static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref)
#ifdef CONFIG_JFFS2_FS_XATTR
extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
-extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
+extern int jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
@@ -103,7 +103,7 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
#else
#define jffs2_init_xattr_subsystem(c)
-#define jffs2_build_xattr_subsystem(c)
+#define jffs2_build_xattr_subsystem(c) (0)
#define jffs2_clear_xattr_subsystem(c)
#define jffs2_xattr_do_crccheck_inode(c, ic)
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index d3cb27487c70..deb54efb5601 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -63,10 +63,10 @@
*/
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
-static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
-static int dbBackSplit(dmtree_t * tp, int leafno);
-static int dbJoin(dmtree_t * tp, int leafno, int newval);
-static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
+static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl);
+static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl);
+static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl);
+static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl);
static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
int level);
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
@@ -87,7 +87,7 @@ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
static int dbFindBits(u32 word, int l2nb);
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
+static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
@@ -155,7 +155,7 @@ int dbMount(struct inode *ipbmap)
struct bmap *bmp;
struct dbmap_disk *dbmp_le;
struct metapage *mp;
- int i;
+ int i, err;
/*
* allocate/initialize the in-memory bmap descriptor
@@ -170,30 +170,53 @@ int dbMount(struct inode *ipbmap)
BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
- kfree(bmp);
- return -EIO;
+ err = -EIO;
+ goto err_kfree_bmp;
}
/* copy the on-disk bmap descriptor to its in-memory version. */
dbmp_le = (struct dbmap_disk *) mp->data;
bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+
bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+ if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
+ bmp->db_l2nbperpage < 0) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
if (!bmp->db_numag) {
- release_metapage(mp);
- kfree(bmp);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_release_metapage;
}
bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
+ if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
+ bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
+ if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
+ bmp->db_agl2size < 0) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+
+ if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+
for (i = 0; i < MAXAG; i++)
bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
@@ -214,6 +237,12 @@ int dbMount(struct inode *ipbmap)
BMAP_LOCK_INIT(bmp);
return (0);
+
+err_release_metapage:
+ release_metapage(mp);
+err_kfree_bmp:
+ kfree(bmp);
+ return err;
}
@@ -247,6 +276,7 @@ int dbUnmount(struct inode *ipbmap, int mounterror)
/* free the memory for the in-memory bmap. */
kfree(bmp);
+ JFS_SBI(ipbmap->i_sb)->bmap = NULL;
return (0);
}
@@ -1755,7 +1785,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
* dbFindLeaf() returns the index of the leaf at which
* free space was found.
*/
- rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
+ rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
/* release the buffer.
*/
@@ -2002,9 +2032,12 @@ dbAllocDmapLev(struct bmap * bmp,
* free space. if sufficient free space is found, dbFindLeaf()
* returns the index of the leaf at which free space was found.
*/
- if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
+ if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
return -ENOSPC;
+ if (leafidx < 0)
+ return -EIO;
+
/* determine the block number within the file system corresponding
* to the leaf at which free space was found.
*/
@@ -2138,7 +2171,7 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
* system.
*/
if (dp->tree.stree[word] == NOFREE)
- dbBackSplit((dmtree_t *) & dp->tree, word);
+ dbBackSplit((dmtree_t *)&dp->tree, word, false);
dbAllocBits(bmp, dp, blkno, nblocks);
}
@@ -2224,7 +2257,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
* the binary system of the leaves if need be.
*/
dbSplit(tp, word, BUDMIN,
- dbMaxBud((u8 *) & dp->wmap[word]));
+ dbMaxBud((u8 *)&dp->wmap[word]), false);
word += 1;
} else {
@@ -2264,7 +2297,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
* system of the leaves to reflect the current
* allocation (size).
*/
- dbSplit(tp, word, size, NOFREE);
+ dbSplit(tp, word, size, NOFREE, false);
/* get the number of dmap words handled */
nw = BUDSIZE(size, BUDMIN);
@@ -2371,7 +2404,7 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
/* update the leaf for this dmap word.
*/
rc = dbJoin(tp, word,
- dbMaxBud((u8 *) & dp->wmap[word]));
+ dbMaxBud((u8 *)&dp->wmap[word]), false);
if (rc)
return rc;
@@ -2404,7 +2437,7 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
/* update the leaf.
*/
- rc = dbJoin(tp, word, size);
+ rc = dbJoin(tp, word, size, false);
if (rc)
return rc;
@@ -2556,14 +2589,14 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
* that it is at the front of a binary buddy system.
*/
if (oldval == NOFREE) {
- rc = dbBackSplit((dmtree_t *) dcp, leafno);
+ rc = dbBackSplit((dmtree_t *)dcp, leafno, true);
if (rc)
return rc;
oldval = dcp->stree[ti];
}
- dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
+ dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval, true);
} else {
- rc = dbJoin((dmtree_t *) dcp, leafno, newval);
+ rc = dbJoin((dmtree_t *) dcp, leafno, newval, true);
if (rc)
return rc;
}
@@ -2592,7 +2625,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
*/
if (alloc) {
dbJoin((dmtree_t *) dcp, leafno,
- oldval);
+ oldval, true);
} else {
/* the dbJoin() above might have
* caused a larger binary buddy system
@@ -2602,9 +2635,9 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
*/
if (dcp->stree[ti] == NOFREE)
dbBackSplit((dmtree_t *)
- dcp, leafno);
+ dcp, leafno, true);
dbSplit((dmtree_t *) dcp, leafno,
- dcp->budmin, oldval);
+ dcp->budmin, oldval, true);
}
/* release the buffer and return the error.
@@ -2652,7 +2685,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
-static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl)
{
int budsz;
int cursz;
@@ -2674,7 +2707,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
while (cursz >= splitsz) {
/* update the buddy's leaf with its new value.
*/
- dbAdjTree(tp, leafno ^ budsz, cursz);
+ dbAdjTree(tp, leafno ^ budsz, cursz, is_ctl);
/* on to the next size and buddy.
*/
@@ -2686,7 +2719,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
/* adjust the dmap tree to reflect the specified leaf's new
* value.
*/
- dbAdjTree(tp, leafno, newval);
+ dbAdjTree(tp, leafno, newval, is_ctl);
}
@@ -2717,7 +2750,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
-static int dbBackSplit(dmtree_t * tp, int leafno)
+static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
{
int budsz, bud, w, bsz, size;
int cursz;
@@ -2768,7 +2801,7 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
* system in two.
*/
cursz = leaf[bud] - 1;
- dbSplit(tp, bud, cursz, cursz);
+ dbSplit(tp, bud, cursz, cursz, is_ctl);
break;
}
}
@@ -2796,7 +2829,7 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
*
* RETURN VALUES: none
*/
-static int dbJoin(dmtree_t * tp, int leafno, int newval)
+static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
{
int budsz, buddy;
s8 *leaf;
@@ -2851,12 +2884,12 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
if (leafno < buddy) {
/* leafno is the left buddy.
*/
- dbAdjTree(tp, buddy, NOFREE);
+ dbAdjTree(tp, buddy, NOFREE, is_ctl);
} else {
/* buddy is the left buddy and becomes
* leafno.
*/
- dbAdjTree(tp, leafno, NOFREE);
+ dbAdjTree(tp, leafno, NOFREE, is_ctl);
leafno = buddy;
}
@@ -2869,7 +2902,7 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
/* update the leaf value.
*/
- dbAdjTree(tp, leafno, newval);
+ dbAdjTree(tp, leafno, newval, is_ctl);
return 0;
}
@@ -2890,15 +2923,20 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
*
* RETURN VALUES: none
*/
-static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
+static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl)
{
int lp, pp, k;
- int max;
+ int max, size;
+
+ size = is_ctl ? CTLTREESIZE : TREESIZE;
/* pick up the index of the leaf for this leafno.
*/
lp = leafno + le32_to_cpu(tp->dmt_leafidx);
+ if (WARN_ON_ONCE(lp >= size || lp < 0))
+ return;
+
/* is the current value the same as the old value ? if so,
* there is nothing to do.
*/
@@ -2959,14 +2997,18 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
* leafidx - return pointer to be set to the index of the leaf
* describing at least l2nb free blocks if sufficient
* free blocks are found.
+ * is_ctl - determines if the tree is of type ctl
*
* RETURN VALUES:
* 0 - success
* -ENOSPC - insufficient free blocks.
*/
-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
{
int ti, n = 0, k, x = 0;
+ int max_size;
+
+ max_size = is_ctl ? CTLTREESIZE : TREESIZE;
/* first check the root of the tree to see if there is
* sufficient free space.
@@ -2987,6 +3029,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
/* sufficient free space found. move to the next
* level (or quit if this is the last level).
*/
+ if (x + n > max_size)
+ return -ENOSPC;
if (l2nb <= tp->dmt_stree[x + n])
break;
}
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 3acc954f7c04..077a87e53020 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -633,6 +633,11 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
index = base + (lim >> 1);
+ if (stbl[index] < 0) {
+ rc = -EIO;
+ goto out;
+ }
+
if (p->header.flag & BT_LEAF) {
/* uppercase leaf name to compare */
cmp =
@@ -1970,7 +1975,7 @@ static int dtSplitRoot(tid_t tid,
do {
f = &rp->slot[fsi];
fsi = f->next;
- } while (fsi != -1);
+ } while (fsi >= 0);
f->next = n;
}
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index f65bd6b35412..d4e063dbb9a0 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -508,6 +508,11 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
* blocks in the map. in that case, we'll start off with the
* maximum free.
*/
+
+ /* give up if no space left */
+ if (bmp->db_maxfreebud == -1)
+ return -ENOSPC;
+
max = (s64) 1 << bmp->db_maxfreebud;
if (*nblocks >= max && *nblocks > nbperpage)
nb = nblks = (max > nbperpage) ? max : nbperpage;
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
index b5d702df7111..33ef13a0b110 100644
--- a/fs/jfs/jfs_filsys.h
+++ b/fs/jfs/jfs_filsys.h
@@ -122,7 +122,9 @@
#define NUM_INODE_PER_IAG INOSPERIAG
#define MINBLOCKSIZE 512
+#define L2MINBLOCKSIZE 9
#define MAXBLOCKSIZE 4096
+#define L2MAXBLOCKSIZE 12
#define MAXFILESIZE ((s64)1 << 52)
#define JFS_LINK_MAX 0xffffffff
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 937ca07b58b1..b0965f3ef186 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -195,6 +195,7 @@ int diUnmount(struct inode *ipimap, int mounterror)
* free in-memory control structure
*/
kfree(imap);
+ JFS_IP(ipimap)->i_imap = NULL;
return (0);
}
@@ -1321,7 +1322,7 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
int diAlloc(struct inode *pip, bool dir, struct inode *ip)
{
int rc, ino, iagno, addext, extno, bitno, sword;
- int nwords, rem, i, agno;
+ int nwords, rem, i, agno, dn_numag;
u32 mask, inosmap, extsmap;
struct inode *ipimap;
struct metapage *mp;
@@ -1357,6 +1358,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
/* get the ag number of this iag */
agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
+ dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
+ if (agno < 0 || agno > dn_numag)
+ return -EIO;
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
/*
@@ -2177,6 +2181,9 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
/* get the ag and iag numbers for this iag.
*/
agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
+ if (agno >= MAXAG || agno < 0)
+ return -EIO;
+
iagno = le32_to_cpu(iagp->iagnum);
/* check if this is the last free extent within the
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index d41733540df9..459324f3570a 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -171,15 +171,15 @@ int jfs_mount(struct super_block *sb)
}
jfs_info("jfs_mount: ipimap:0x%p", ipimap);
- /* map further access of per fileset inodes by the fileset inode */
- sbi->ipimap = ipimap;
-
/* initialize fileset inode allocation map */
if ((rc = diMount(ipimap))) {
jfs_err("jfs_mount: diMount failed w/rc = %d", rc);
goto err_ipimap;
}
+ /* map further access of per fileset inodes by the fileset inode */
+ sbi->ipimap = ipimap;
+
return rc;
/*
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index c8ce7f1bc594..6f6a5b9203d3 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -354,6 +354,11 @@ tid_t txBegin(struct super_block *sb, int flag)
jfs_info("txBegin: flag = 0x%x", flag);
log = JFS_SBI(sb)->log;
+ if (!log) {
+ jfs_error(sb, "read-only filesystem\n");
+ return 0;
+ }
+
TXN_LOCK();
INCREMENT(TxStat.txBegin);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 7a55d14cc1af..f155ad6650bd 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -798,6 +798,11 @@ static int jfs_link(struct dentry *old_dentry,
if (rc)
goto out;
+ if (isReadOnly(ip)) {
+ jfs_error(ip->i_sb, "read-only filesystem\n");
+ return -EROFS;
+ }
+
tid = txBegin(ip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 99ee657596b5..d3a602ea795b 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -702,6 +702,18 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
{
struct kernfs_node *kn;
+ if (parent->mode & S_ISGID) {
+ /* this code block imitates inode_init_owner() for
+ * kernfs
+ */
+
+ if (parent->iattr)
+ gid = parent->iattr->ia_gid;
+
+ if (flags & KERNFS_DIR)
+ mode |= S_ISGID;
+ }
+
kn = __kernfs_new_node(kernfs_root(parent), parent,
name, mode, uid, gid, flags);
if (kn) {
@@ -1515,8 +1527,11 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
mutex_lock(&kernfs_mutex);
kn = kernfs_find_ns(parent, name, ns);
- if (kn)
+ if (kn) {
+ kernfs_get(kn);
__kernfs_remove(kn);
+ kernfs_put(kn);
+ }
mutex_unlock(&kernfs_mutex);
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 6c12fac2c287..d62cec6d838d 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -200,7 +200,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
dput(dentry);
return ERR_PTR(-EINVAL);
}
- dtmp = lookup_one_len_unlocked(kntmp->name, dentry,
+ dtmp = lookup_positive_unlocked(kntmp->name, dentry,
strlen(kntmp->name));
dput(dentry);
if (IS_ERR(dtmp))
diff --git a/fs/libfs.c b/fs/libfs.c
index 247b58a68240..e6f986da2a65 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -883,8 +883,8 @@ out:
EXPORT_SYMBOL_GPL(simple_attr_read);
/* interpret the buffer as a number to call the set function with */
-ssize_t simple_attr_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos, bool is_signed)
{
struct simple_attr *attr;
unsigned long long val;
@@ -905,7 +905,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
goto out;
attr->set_buf[size] = '\0';
- ret = kstrtoull(attr->set_buf, 0, &val);
+ if (is_signed)
+ ret = kstrtoll(attr->set_buf, 0, &val);
+ else
+ ret = kstrtoull(attr->set_buf, 0, &val);
if (ret)
goto out;
ret = attr->set(attr->data, val);
@@ -915,8 +918,21 @@ out:
mutex_unlock(&attr->mutex);
return ret;
}
+
+ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return simple_attr_write_xsigned(file, buf, len, ppos, false);
+}
EXPORT_SYMBOL_GPL(simple_attr_write);
+ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return simple_attr_write_xsigned(file, buf, len, ppos, true);
+}
+EXPORT_SYMBOL_GPL(simple_attr_write_signed);
+
/**
* generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
* @sb: filesystem to do the file handle conversion on
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 1eabd91870e6..d770a41f8569 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -276,6 +276,9 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
{
struct nsm_handle *new;
+ if (!hostname)
+ return NULL;
+
new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL);
if (unlikely(new == NULL))
return NULL;
diff --git a/fs/locks.c b/fs/locks.c
index b8a31c1c4fff..90f92784aa55 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1338,6 +1338,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
+ trace_posix_lock_inode(inode, request, error);
/*
* Free any unused locks.
*/
@@ -1346,7 +1347,6 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
if (new_fl2)
locks_free_lock(new_fl2);
locks_dispose_list(&dispose);
- trace_posix_lock_inode(inode, request, error);
return error;
}
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 97c54d3a2227..95b047256d09 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -11,7 +11,7 @@
/*
* Mbcache is a simple key-value store. Keys need not be unique, however
* key-value pairs are expected to be unique (we use this fact in
- * mb_cache_entry_delete()).
+ * mb_cache_entry_delete_or_get()).
*
* Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
* Ext4 also uses it for deduplication of xattr values stored in inodes.
@@ -90,12 +90,19 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
return -ENOMEM;
INIT_LIST_HEAD(&entry->e_list);
- /* One ref for hash, one ref returned */
- atomic_set(&entry->e_refcnt, 1);
+ /*
+ * We create entry with two references. One reference is kept by the
+ * hash table, the other reference is used to protect us from
+ * mb_cache_entry_delete_or_get() until the entry is fully setup. This
+ * avoids nesting of cache->c_list_lock into hash table bit locks which
+ * is problematic for RT.
+ */
+ atomic_set(&entry->e_refcnt, 2);
entry->e_key = key;
entry->e_value = value;
- entry->e_reusable = reusable;
- entry->e_referenced = 0;
+ entry->e_flags = 0;
+ if (reusable)
+ set_bit(MBE_REUSABLE_B, &entry->e_flags);
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
@@ -107,24 +114,41 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
}
hlist_bl_add_head(&entry->e_hash_list, head);
hlist_bl_unlock(head);
-
spin_lock(&cache->c_list_lock);
list_add_tail(&entry->e_list, &cache->c_list);
- /* Grab ref for LRU list */
- atomic_inc(&entry->e_refcnt);
cache->c_entry_count++;
spin_unlock(&cache->c_list_lock);
+ mb_cache_entry_put(cache, entry);
return 0;
}
EXPORT_SYMBOL(mb_cache_entry_create);
-void __mb_cache_entry_free(struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
{
+ struct hlist_bl_head *head;
+
+ head = mb_cache_entry_head(cache, entry->e_key);
+ hlist_bl_lock(head);
+ hlist_bl_del(&entry->e_hash_list);
+ hlist_bl_unlock(head);
kmem_cache_free(mb_entry_cache, entry);
}
EXPORT_SYMBOL(__mb_cache_entry_free);
+/*
+ * mb_cache_entry_wait_unused - wait to be the last user of the entry
+ *
+ * @entry - entry to work on
+ *
+ * Wait to be the last user of the entry.
+ */
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
+{
+ wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
+}
+EXPORT_SYMBOL(mb_cache_entry_wait_unused);
+
static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
struct mb_cache_entry *entry,
u32 key)
@@ -142,10 +166,10 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
while (node) {
entry = hlist_bl_entry(node, struct mb_cache_entry,
e_hash_list);
- if (entry->e_key == key && entry->e_reusable) {
- atomic_inc(&entry->e_refcnt);
+ if (entry->e_key == key &&
+ test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
+ atomic_inc_not_zero(&entry->e_refcnt))
goto out;
- }
node = node->next;
}
entry = NULL;
@@ -205,10 +229,9 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
- if (entry->e_key == key && entry->e_value == value) {
- atomic_inc(&entry->e_refcnt);
+ if (entry->e_key == key && entry->e_value == value &&
+ atomic_inc_not_zero(&entry->e_refcnt))
goto out;
- }
}
entry = NULL;
out:
@@ -217,7 +240,7 @@ out:
}
EXPORT_SYMBOL(mb_cache_entry_get);
-/* mb_cache_entry_delete - remove a cache entry
+/* mb_cache_entry_delete - try to remove a cache entry
* @cache - cache we work with
* @key - key
* @value - value
@@ -254,6 +277,43 @@ void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
}
EXPORT_SYMBOL(mb_cache_entry_delete);
+/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
+ * @cache - cache we work with
+ * @key - key
+ * @value - value
+ *
+ * Remove entry from cache @cache with key @key and value @value. The removal
+ * happens only if the entry is unused. The function returns NULL in case the
+ * entry was successfully removed or there's no entry in cache. Otherwise the
+ * function grabs reference of the entry that we failed to delete because it
+ * still has users and return it.
+ */
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value)
+{
+ struct mb_cache_entry *entry;
+
+ entry = mb_cache_entry_get(cache, key, value);
+ if (!entry)
+ return NULL;
+
+ /*
+ * Drop the ref we got from mb_cache_entry_get() and the initial hash
+ * ref if we are the last user
+ */
+ if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
+ return entry;
+
+ spin_lock(&cache->c_list_lock);
+ if (!list_empty(&entry->e_list))
+ list_del_init(&entry->e_list);
+ cache->c_entry_count--;
+ spin_unlock(&cache->c_list_lock);
+ __mb_cache_entry_free(cache, entry);
+ return NULL;
+}
+EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
+
/* mb_cache_entry_touch - cache entry got used
* @cache - cache the entry belongs to
* @entry - entry that got used
@@ -263,7 +323,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete);
void mb_cache_entry_touch(struct mb_cache *cache,
struct mb_cache_entry *entry)
{
- entry->e_referenced = 1;
+ set_bit(MBE_REFERENCED_B, &entry->e_flags);
}
EXPORT_SYMBOL(mb_cache_entry_touch);
@@ -281,34 +341,24 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
unsigned long nr_to_scan)
{
struct mb_cache_entry *entry;
- struct hlist_bl_head *head;
unsigned long shrunk = 0;
spin_lock(&cache->c_list_lock);
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
entry = list_first_entry(&cache->c_list,
struct mb_cache_entry, e_list);
- if (entry->e_referenced) {
- entry->e_referenced = 0;
+ /* Drop initial hash reference if there is no user */
+ if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
+ atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
+ clear_bit(MBE_REFERENCED_B, &entry->e_flags);
list_move_tail(&entry->e_list, &cache->c_list);
continue;
}
list_del_init(&entry->e_list);
cache->c_entry_count--;
- /*
- * We keep LRU list reference so that entry doesn't go away
- * from under us.
- */
spin_unlock(&cache->c_list_lock);
- head = mb_cache_entry_head(cache, entry->e_key);
- hlist_bl_lock(head);
- if (!hlist_bl_unhashed(&entry->e_hash_list)) {
- hlist_bl_del_init(&entry->e_hash_list);
- atomic_dec(&entry->e_refcnt);
- }
- hlist_bl_unlock(head);
- if (mb_cache_entry_put(cache, entry))
- shrunk++;
+ __mb_cache_entry_free(cache, entry);
+ shrunk++;
cond_resched();
spin_lock(&cache->c_list_lock);
}
@@ -400,11 +450,6 @@ void mb_cache_destroy(struct mb_cache *cache)
* point.
*/
list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
- if (!hlist_bl_unhashed(&entry->e_hash_list)) {
- hlist_bl_del_init(&entry->e_hash_list);
- atomic_dec(&entry->e_refcnt);
- } else
- WARN_ON(1);
list_del(&entry->e_list);
WARN_ON(atomic_read(&entry->e_refcnt) != 1);
mb_cache_entry_put(cache, entry);
diff --git a/fs/namei.c b/fs/namei.c
index b952ecbd49c2..a4cba6991a4d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -52,8 +52,8 @@
* The new code replaces the old recursive symlink resolution with
* an iterative one (in case of non-nested symlink chains). It does
* this with calls to <fs>_follow_link().
- * As a side effect, dir_namei(), _namei() and follow_link() are now
- * replaced with a single function lookup_dentry() that can handle all
+ * As a side effect, dir_namei(), _namei() and follow_link() are now
+ * replaced with a single function lookup_dentry() that can handle all
* the special cases of the former code.
*
* With the new dcache, the pathname is stored at each inode, at least as
@@ -2565,6 +2565,26 @@ struct dentry *lookup_one_len_unlocked(const char *name,
}
EXPORT_SYMBOL(lookup_one_len_unlocked);
+/*
+ * Like lookup_one_len_unlocked(), except that it yields ERR_PTR(-ENOENT)
+ * on negatives. Returns known positive or ERR_PTR(); that's what
+ * most of the users want. Note that pinned negative with unlocked parent
+ * _can_ become positive at any time, so callers of lookup_one_len_unlocked()
+ * need to be very careful; pinned positives have ->d_inode stable, so
+ * this one avoids such problems.
+ */
+struct dentry *lookup_positive_unlocked(const char *name,
+ struct dentry *base, int len)
+{
+ struct dentry *ret = lookup_one_len_unlocked(name, base, len);
+ if (!IS_ERR(ret) && d_is_negative(ret)) {
+ dput(ret);
+ ret = ERR_PTR(-ENOENT);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(lookup_positive_unlocked);
+
#ifdef CONFIG_UNIX98_PTYS
int path_pts(struct path *path)
{
@@ -2583,7 +2603,7 @@ int path_pts(struct path *path)
this.name = "pts";
this.len = 3;
child = d_hash_and_lookup(parent, &this);
- if (!child)
+ if (IS_ERR_OR_NULL(child))
return -ENOENT;
path->dentry = child;
@@ -2859,20 +2879,14 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
p = d_ancestor(p2, p1);
if (p) {
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
- inode_lock_nested(p1->d_inode, I_MUTEX_CHILD);
+ inode_lock_nested(p1->d_inode, I_MUTEX_PARENT2);
return p;
}
p = d_ancestor(p1, p2);
- if (p) {
- inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
- inode_lock_nested(p2->d_inode, I_MUTEX_CHILD);
- return p;
- }
-
inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
- return NULL;
+ return p;
}
EXPORT_SYMBOL(lock_rename);
@@ -2886,6 +2900,63 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
}
EXPORT_SYMBOL(unlock_rename);
+/**
+ * mode_strip_umask - handle vfs umask stripping
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode to be created in @dir
+ *
+ * Umask stripping depends on whether or not the filesystem supports POSIX
+ * ACLs. If the filesystem doesn't support it umask stripping is done directly
+ * in here. If the filesystem does support POSIX ACLs umask stripping is
+ * deferred until the filesystem calls posix_acl_create().
+ *
+ * Returns: mode
+ */
+static inline umode_t mode_strip_umask(const struct inode *dir, umode_t mode)
+{
+ if (!IS_POSIXACL(dir))
+ mode &= ~current_umask();
+ return mode;
+}
+
+/**
+ * vfs_prepare_mode - prepare the mode to be used for a new inode
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode
+ * @mask_perms: allowed permission by the vfs
+ * @type: type of file to be created
+ *
+ * This helper consolidates and enforces vfs restrictions on the @mode of a new
+ * object to be created.
+ *
+ * Umask stripping depends on whether the filesystem supports POSIX ACLs (see
+ * the kernel documentation for mode_strip_umask()). Moving umask stripping
+ * after setgid stripping allows the same ordering for both non-POSIX ACL and
+ * POSIX ACL supporting filesystems.
+ *
+ * Note that it's currently valid for @type to be 0 if a directory is created.
+ * Filesystems raise that flag individually and we need to check whether each
+ * filesystem can deal with receiving S_IFDIR from the vfs before we enforce a
+ * non-zero type.
+ *
+ * Returns: mode to be passed to the filesystem
+ */
+static inline umode_t vfs_prepare_mode(const struct inode *dir, umode_t mode,
+ umode_t mask_perms, umode_t type)
+{
+ mode = mode_strip_sgid(dir, mode);
+ mode = mode_strip_umask(dir, mode);
+
+ /*
+ * Apply the vfs mandated allowed permission mask and set the type of
+ * file to be created before we call into the filesystem.
+ */
+ mode &= (mask_perms & ~S_IFMT);
+ mode |= (type & S_IFMT);
+
+ return mode;
+}
+
int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool want_excl)
{
@@ -2895,8 +2966,8 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (!dir->i_op->create)
return -EACCES; /* shouldn't it be ENOSYS? */
- mode &= S_IALLUGO;
- mode |= S_IFREG;
+
+ mode = vfs_prepare_mode(dir, mode, S_IALLUGO, S_IFREG);
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
@@ -3166,8 +3237,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
* O_EXCL open we want to return EEXIST not EROFS).
*/
if (open_flag & O_CREAT) {
- if (!IS_POSIXACL(dir->d_inode))
- mode &= ~current_umask();
+ mode = vfs_prepare_mode(dir->d_inode, mode, mode, mode);
if (unlikely(!got_write)) {
create_error = -EROFS;
open_flag &= ~O_CREAT;
@@ -3443,6 +3513,7 @@ struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
child = d_alloc(dentry, &slash_name);
if (unlikely(!child))
goto out_err;
+ mode = vfs_prepare_mode(dir, mode, mode, mode);
error = dir->i_op->tmpfile(dir, child, mode);
if (error)
goto out_err;
@@ -3701,6 +3772,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
if (!dir->i_op->mknod)
return -EPERM;
+ mode = vfs_prepare_mode(dir, mode, mode, mode);
error = devcgroup_inode_mknod(mode, dev);
if (error)
return error;
@@ -3749,9 +3821,8 @@ retry:
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
- error = security_path_mknod(&path, dentry, mode, dev);
+ error = security_path_mknod(&path, dentry,
+ mode_strip_umask(path.dentry->d_inode, mode), dev);
if (error)
goto out;
switch (mode & S_IFMT) {
@@ -3799,7 +3870,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (!dir->i_op->mkdir)
return -EPERM;
- mode &= (S_IRWXUGO|S_ISVTX);
+ mode = vfs_prepare_mode(dir, mode, S_IRWXUGO | S_ISVTX, 0);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
return error;
@@ -3826,9 +3897,8 @@ retry:
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
- error = security_path_mkdir(&path, dentry, mode);
+ error = security_path_mkdir(&path, dentry,
+ mode_strip_umask(path.dentry->d_inode, mode));
if (!error)
error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
done_path_create(&path, dentry);
@@ -4361,11 +4431,12 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
*
* a) we can get into loop creation.
* b) race potential - two innocent renames can create a loop together.
- * That's where 4.4 screws up. Current fix: serialization on
+ * That's where 4.4BSD screws up. Current fix: serialization on
* sb->s_vfs_rename_mutex. We might be more accurate, but that's another
* story.
- * c) we have to lock _four_ objects - parents and victim (if it exists),
- * and source (if it is not a directory).
+ * c) we may have to lock up to _four_ objects - parents and victim (if it exists),
+ * and source (if it's a non-directory or a subdirectory that moves to
+ * different parent).
* And that - after we got ->i_mutex on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
@@ -4394,6 +4465,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
struct name_snapshot old_name;
+ bool lock_old_subdir, lock_new_subdir;
if (source == target)
return 0;
@@ -4442,10 +4514,33 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
take_dentry_name_snapshot(&old_name, old_dentry);
dget(new_dentry);
- if (!is_dir || (flags & RENAME_EXCHANGE))
+ /*
+ * Lock children.
+ * The source subdirectory needs to be locked on cross-directory
+ * rename or cross-directory exchange since its parent changes.
+ * The target subdirectory needs to be locked on cross-directory
+ * exchange due to parent change and on any rename due to becoming
+ * a victim.
+ * Non-directories need locking in all cases (for NFS reasons);
+ * they get locked after any subdirectories (in inode address order).
+ *
+ * NOTE: WE ONLY LOCK UNRELATED DIRECTORIES IN CROSS-DIRECTORY CASE.
+ * NEVER, EVER DO THAT WITHOUT ->s_vfs_rename_mutex.
+ */
+ lock_old_subdir = new_dir != old_dir;
+ lock_new_subdir = new_dir != old_dir || !(flags & RENAME_EXCHANGE);
+ if (is_dir) {
+ if (lock_old_subdir)
+ inode_lock_nested(source, I_MUTEX_CHILD);
+ if (target && (!new_is_dir || lock_new_subdir))
+ inode_lock(target);
+ } else if (new_is_dir) {
+ if (lock_new_subdir)
+ inode_lock_nested(target, I_MUTEX_CHILD);
+ inode_lock(source);
+ } else {
lock_two_nondirectories(source, target);
- else if (target)
- inode_lock(target);
+ }
error = -EBUSY;
if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
@@ -4489,9 +4584,9 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
d_exchange(old_dentry, new_dentry);
}
out:
- if (!is_dir || (flags & RENAME_EXCHANGE))
- unlock_two_nondirectories(source, target);
- else if (target)
+ if (!is_dir || lock_old_subdir)
+ inode_unlock(source);
+ if (target && (!new_is_dir || lock_new_subdir))
inode_unlock(target);
dput(new_dentry);
if (!error) {
@@ -4817,7 +4912,7 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
- void *fsdata;
+ void *fsdata = NULL;
int err;
unsigned int flags = 0;
if (nofs)
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 690221747b47..9f10b90debec 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -604,6 +604,8 @@ retry:
nfs4_delete_deviceid(node->ld, node->nfs_client, id);
goto retry;
}
+
+ nfs4_put_deviceid_node(node);
return ERR_PTR(-ENODEV);
}
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index dec5880ac6de..6e3a14fdff9c 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -422,7 +422,7 @@ bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d,
int ret, i;
d->children = kcalloc(v->concat.volumes_count,
- sizeof(struct pnfs_block_dev), GFP_KERNEL);
+ sizeof(struct pnfs_block_dev), gfp_mask);
if (!d->children)
return -ENOMEM;
@@ -451,7 +451,7 @@ bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d,
int ret, i;
d->children = kcalloc(v->stripe.volumes_count,
- sizeof(struct pnfs_block_dev), GFP_KERNEL);
+ sizeof(struct pnfs_block_dev), gfp_mask);
if (!d->children)
return -ENOMEM;
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 98b74cdabb99..8c72718d9fe0 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -837,6 +837,12 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
return &fl->generic_hdr;
}
+static bool
+filelayout_lseg_is_striped(const struct nfs4_filelayout_segment *flseg)
+{
+ return flseg->num_fh > 1;
+}
+
/*
* filelayout_pg_test(). Called by nfs_can_coalesce_requests()
*
@@ -857,6 +863,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
size = pnfs_generic_pg_test(pgio, prev, req);
if (!size)
return 0;
+ else if (!filelayout_lseg_is_striped(FILELAYOUT_LSEG(pgio->pg_lseg)))
+ return size;
/* see if req and prev are in the same stripe */
if (prev) {
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index fa1c920afb49..1b88b78f40be 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1280,6 +1280,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
case -EOPNOTSUPP:
+ case -EINVAL:
case -ECONNREFUSED:
case -ECONNRESET:
case -EHOSTDOWN:
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 53f3374acd0d..d4453d97c789 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -414,7 +414,9 @@ extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
extern bool nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
-
+extern int nfs_client_for_each_server(struct nfs_client *clp,
+ int (*fn)(struct nfs_server *, void *),
+ void *data);
/* io.c */
extern void nfs_start_io_read(struct inode *inode);
extern void nfs_end_io_read(struct inode *inode);
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index af557dc2cfe1..6b783e2d2855 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -953,7 +953,7 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
error = decode_filename_inline(xdr, &entry->name, &entry->len);
if (unlikely(error))
- return -EAGAIN;
+ return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
/*
* The type (size and byte order) of nfscookie isn't defined in
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 84369d51353a..6d8768ce370d 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1991,7 +1991,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
error = decode_inline_filename3(xdr, &entry->name, &entry->len);
if (unlikely(error))
- return -EAGAIN;
+ return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
error = decode_cookie3(xdr, &new_cookie);
if (unlikely(error))
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 3671a51fe5eb..1f4bdcda3fda 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -346,6 +346,7 @@ int nfs40_init_client(struct nfs_client *clp)
ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE,
"NFSv4.0 transport Slot table");
if (ret) {
+ nfs4_shutdown_slot_table(tbl);
kfree(tbl);
return ret;
}
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 1e7296395d71..4e6dd267ac50 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -560,22 +560,20 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
return true;
}
-static void
-nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
+static void nfs_idmap_complete_pipe_upcall(struct idmap_legacy_upcalldata *data,
+ int ret)
{
- struct key *authkey = idmap->idmap_upcall_data->authkey;
-
- kfree(idmap->idmap_upcall_data);
- idmap->idmap_upcall_data = NULL;
- complete_request_key(authkey, ret);
- key_put(authkey);
+ complete_request_key(data->authkey, ret);
+ key_put(data->authkey);
+ kfree(data);
}
-static void
-nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
+static void nfs_idmap_abort_pipe_upcall(struct idmap *idmap,
+ struct idmap_legacy_upcalldata *data,
+ int ret)
{
- if (idmap->idmap_upcall_data != NULL)
- nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
+ if (cmpxchg(&idmap->idmap_upcall_data, data, NULL) == data)
+ nfs_idmap_complete_pipe_upcall(data, ret);
}
static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
@@ -612,7 +610,7 @@ static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
if (ret < 0)
- nfs_idmap_abort_pipe_upcall(idmap, ret);
+ nfs_idmap_abort_pipe_upcall(idmap, data, ret);
return ret;
out2:
@@ -668,6 +666,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
struct request_key_auth *rka;
struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct idmap *idmap = (struct idmap *)rpci->private;
+ struct idmap_legacy_upcalldata *data;
struct key *authkey;
struct idmap_msg im;
size_t namelen_in;
@@ -677,10 +676,11 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
* will have been woken up and someone else may now have used
* idmap_key_cons - so after this point we may no longer touch it.
*/
- if (idmap->idmap_upcall_data == NULL)
+ data = xchg(&idmap->idmap_upcall_data, NULL);
+ if (data == NULL)
goto out_noupcall;
- authkey = idmap->idmap_upcall_data->authkey;
+ authkey = data->authkey;
rka = get_request_key_auth(authkey);
if (mlen != sizeof(im)) {
@@ -702,18 +702,17 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) {
ret = -EINVAL;
goto out;
-}
+ }
- ret = nfs_idmap_read_and_verify_message(&im,
- &idmap->idmap_upcall_data->idmap_msg,
- rka->target_key, authkey);
+ ret = nfs_idmap_read_and_verify_message(&im, &data->idmap_msg,
+ rka->target_key, authkey);
if (ret >= 0) {
key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
ret = mlen;
}
out:
- nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
+ nfs_idmap_complete_pipe_upcall(data, ret);
out_noupcall:
return ret;
}
@@ -727,7 +726,7 @@ idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
struct idmap *idmap = data->idmap;
if (msg->errno)
- nfs_idmap_abort_pipe_upcall(idmap, msg->errno);
+ nfs_idmap_abort_pipe_upcall(idmap, data, msg->errno);
}
static void
@@ -735,8 +734,11 @@ idmap_release_pipe(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
struct idmap *idmap = (struct idmap *)rpci->private;
+ struct idmap_legacy_upcalldata *data;
- nfs_idmap_abort_pipe_upcall(idmap, -EPIPE);
+ data = xchg(&idmap->idmap_upcall_data, NULL);
+ if (data)
+ nfs_idmap_complete_pipe_upcall(data, -EPIPE);
}
int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, kuid_t *uid)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ba4a03a69fbf..31503bade335 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -121,6 +121,11 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
return NULL;
+ label->lfs = 0;
+ label->pi = 0;
+ label->len = 0;
+ label->label = NULL;
+
err = security_dentry_init_security(dentry, sattr->ia_mode,
&dentry->d_name, (void **)&label->label, &label->len);
if (err == 0)
@@ -163,6 +168,7 @@ static int nfs4_map_errors(int err)
case -NFS4ERR_RESOURCE:
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
return -EREMOTEIO;
case -NFS4ERR_WRONGSEC:
case -NFS4ERR_WRONG_CRED:
@@ -547,6 +553,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
case -NFS4ERR_GRACE:
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
exception->delay = 1;
return 0;
@@ -779,10 +786,9 @@ static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
slot->seq_nr_highest_sent = seqnr;
}
-static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
- u32 seqnr)
+static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
{
- slot->seq_nr_highest_sent = seqnr;
+ nfs4_slot_sequence_record_sent(slot, seqnr);
slot->seq_nr_last_acked = seqnr;
}
@@ -849,7 +855,6 @@ static int nfs41_sequence_process(struct rpc_task *task,
__func__,
slot->slot_nr,
slot->seq_nr);
- nfs4_slot_sequence_acked(slot, slot->seq_nr);
goto out_retry;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_SEQ_FALSE_RETRY:
@@ -912,6 +917,7 @@ out:
out_noaction:
return ret;
session_recover:
+ set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
nfs4_schedule_session_recovery(session, status);
dprintk("%s ERROR: %d Reset session\n", __func__, status);
nfs41_sequence_free_slot(res);
@@ -1931,8 +1937,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
if (!data->rpc_done) {
if (data->rpc_status)
return ERR_PTR(data->rpc_status);
- /* cached opens have already been processed */
- goto update;
+ return nfs4_try_open_cached(data);
}
ret = nfs_refresh_inode(inode, &data->f_attr);
@@ -1941,7 +1946,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
if (data->o_res.delegation_type != 0)
nfs4_opendata_check_deleg(data, state);
-update:
+
if (!update_open_stateid(state, &data->o_res.stateid,
NULL, data->o_arg.fmode))
return ERR_PTR(-EAGAIN);
@@ -2082,18 +2087,18 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
}
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
- fmode_t fmode)
+ fmode_t fmode)
{
struct nfs4_state *newstate;
+ struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
+ int openflags = opendata->o_arg.open_flags;
int ret;
if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
return 0;
- opendata->o_arg.open_flags = 0;
opendata->o_arg.fmode = fmode;
- opendata->o_arg.share_access = nfs4_map_atomic_open_share(
- NFS_SB(opendata->dentry->d_sb),
- fmode, 0);
+ opendata->o_arg.share_access =
+ nfs4_map_atomic_open_share(server, fmode, openflags);
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
nfs4_init_opendata_res(opendata);
@@ -2668,10 +2673,15 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
struct nfs4_opendata *opendata;
int ret;
- opendata = nfs4_open_recoverdata_alloc(ctx, state,
- NFS4_OPEN_CLAIM_FH);
+ opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
+ /*
+ * We're not recovering a delegation, so ask for no delegation.
+ * Otherwise the recovery thread could deadlock with an outstanding
+ * delegation return.
+ */
+ opendata->o_arg.open_flags = O_DIRECT;
ret = nfs4_open_recover(opendata, state);
if (ret == -ESTALE)
d_drop(ctx->dentry);
@@ -3041,12 +3051,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
}
out:
- if (opendata->lgp) {
- nfs4_lgopen_release(opendata->lgp);
- opendata->lgp = NULL;
- }
- if (!opendata->cancelled)
+ if (!opendata->cancelled) {
+ if (opendata->lgp) {
+ nfs4_lgopen_release(opendata->lgp);
+ opendata->lgp = NULL;
+ }
nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+ }
return ret;
}
@@ -3743,7 +3754,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
int open_flags, struct iattr *attr, int *opened)
{
struct nfs4_state *state;
- struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
+ struct nfs4_label l, *label;
label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
@@ -4498,7 +4509,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags)
{
struct nfs_server *server = NFS_SERVER(dir);
- struct nfs4_label l, *ilabel = NULL;
+ struct nfs4_label l, *ilabel;
struct nfs_open_context *ctx;
struct nfs4_state *state;
int status = 0;
@@ -4851,7 +4862,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
struct nfs4_exception exception = {
.interruptible = true,
};
- struct nfs4_label l, *label = NULL;
+ struct nfs4_label l, *label;
int err;
label = nfs4_label_init_security(dir, dentry, sattr, &l);
@@ -4892,7 +4903,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
struct nfs4_exception exception = {
.interruptible = true,
};
- struct nfs4_label l, *label = NULL;
+ struct nfs4_label l, *label;
int err;
label = nfs4_label_init_security(dir, dentry, sattr, &l);
@@ -5013,7 +5024,7 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
struct nfs4_exception exception = {
.interruptible = true,
};
- struct nfs4_label l, *label = NULL;
+ struct nfs4_label l, *label;
int err;
label = nfs4_label_init_security(dir, dentry, sattr, &l);
@@ -5360,7 +5371,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
- nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
+ nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
}
static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
@@ -5401,7 +5412,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
- nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+ nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
+ NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
}
static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
@@ -6855,6 +6867,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
struct nfs4_lock_state *lsp = data->lsp;
+ struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry));
dprintk("%s: begin!\n", __func__);
@@ -6864,8 +6877,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
data->rpc_status = task->tk_status;
switch (task->tk_status) {
case 0:
- renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
- data->timestamp);
+ renew_lease(server, data->timestamp);
if (data->arg.new_lock && !data->cancelled) {
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
@@ -6878,14 +6890,23 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
goto out_restart;
break;
- case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
+ if (data->arg.new_lock_owner != 0 &&
+ nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
+ lsp->ls_state))
+ goto out_restart;
+ if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
+ goto out_restart;
+ fallthrough;
+ case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
if (data->arg.new_lock_owner != 0) {
if (!nfs4_stateid_match(&data->arg.open_stateid,
&lsp->ls_state->open_stateid))
goto out_restart;
+ else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN)
+ goto out_restart;
} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
&lsp->ls_stateid))
goto out_restart;
@@ -8988,6 +9009,9 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
rpc_delay(task, NFS4_POLL_RETRY_MAX);
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
+ case -EACCES:
+ dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
+ __func__, task->tk_status, clp->cl_hostname);
return -EAGAIN;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_DEADSESSION:
@@ -9137,6 +9161,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
status = -EBUSY;
break;
case -NFS4ERR_RECALLCONFLICT:
+ case -NFS4ERR_RETURNCONFLICT:
status = -ERECALLCONFLICT;
break;
case -NFS4ERR_DELEG_REVOKED:
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 1d2b81a233bb..1aacb0aa07f0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -61,11 +61,14 @@
#include "nfs4session.h"
#include "pnfs.h"
#include "netns.h"
+#include "nfs4trace.h"
#define NFSDBG_FACILITY NFSDBG_STATE
#define OPENOWNER_POOL_SIZE 8
+static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp);
+
const nfs4_stateid zero_stateid = {
{ .data = { 0 } },
.type = NFS4_SPECIAL_STATEID_TYPE,
@@ -329,6 +332,8 @@ do_confirm:
status = nfs4_proc_create_session(clp, cred);
if (status != 0)
goto out;
+ if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R))
+ nfs4_state_start_reclaim_reboot(clp);
nfs41_finish_session_reset(clp);
nfs_mark_client_ready(clp, NFS_CS_READY);
out:
@@ -1224,6 +1229,8 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
if (IS_ERR(task)) {
printk(KERN_ERR "%s: kthread_run: %ld\n",
__func__, PTR_ERR(task));
+ if (!nfs_client_init_is_complete(clp))
+ nfs_mark_client_ready(clp, PTR_ERR(task));
nfs4_clear_state_manager_bit(clp);
nfs_put_client(clp);
module_put(THIS_MODULE);
@@ -1743,6 +1750,7 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
{
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
/* Mark all delegations for reclaim */
nfs_delegation_mark_reclaim(clp);
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
@@ -2518,6 +2526,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Ensure exclusive access to NFSv4 state */
do {
+ trace_nfs4_state_mgr(clp);
clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
section = "purge state";
@@ -2588,6 +2597,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
if (status < 0)
goto out_error;
nfs4_state_end_reclaim_reboot(clp);
+ continue;
}
/* Detect expired delegations... */
@@ -2613,6 +2623,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
+ if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
+ &clp->cl_state)) {
+ memflags = memalloc_nofs_save();
+ continue;
+ }
+
if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
nfs_client_return_marked_delegations(clp);
@@ -2633,6 +2650,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
out_error:
if (strlen(section))
section_sep = ": ";
+ trace_nfs4_state_mgr_failed(clp, section, status);
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 2295a934a154..010ee5e6fa32 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -563,6 +563,99 @@ TRACE_EVENT(nfs4_setup_sequence,
)
);
+TRACE_DEFINE_ENUM(NFS4CLNT_MANAGER_RUNNING);
+TRACE_DEFINE_ENUM(NFS4CLNT_CHECK_LEASE);
+TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_EXPIRED);
+TRACE_DEFINE_ENUM(NFS4CLNT_RECLAIM_REBOOT);
+TRACE_DEFINE_ENUM(NFS4CLNT_RECLAIM_NOGRACE);
+TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN);
+TRACE_DEFINE_ENUM(NFS4CLNT_SESSION_RESET);
+TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_CONFIRM);
+TRACE_DEFINE_ENUM(NFS4CLNT_SERVER_SCOPE_MISMATCH);
+TRACE_DEFINE_ENUM(NFS4CLNT_PURGE_STATE);
+TRACE_DEFINE_ENUM(NFS4CLNT_BIND_CONN_TO_SESSION);
+TRACE_DEFINE_ENUM(NFS4CLNT_MOVED);
+TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED);
+TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED);
+TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER);
+TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_RUNNING);
+
+#define show_nfs4_clp_state(state) \
+ __print_flags(state, "|", \
+ { NFS4CLNT_MANAGER_RUNNING, "MANAGER_RUNNING" }, \
+ { NFS4CLNT_CHECK_LEASE, "CHECK_LEASE" }, \
+ { NFS4CLNT_LEASE_EXPIRED, "LEASE_EXPIRED" }, \
+ { NFS4CLNT_RECLAIM_REBOOT, "RECLAIM_REBOOT" }, \
+ { NFS4CLNT_RECLAIM_NOGRACE, "RECLAIM_NOGRACE" }, \
+ { NFS4CLNT_DELEGRETURN, "DELEGRETURN" }, \
+ { NFS4CLNT_SESSION_RESET, "SESSION_RESET" }, \
+ { NFS4CLNT_LEASE_CONFIRM, "LEASE_CONFIRM" }, \
+ { NFS4CLNT_SERVER_SCOPE_MISMATCH, \
+ "SERVER_SCOPE_MISMATCH" }, \
+ { NFS4CLNT_PURGE_STATE, "PURGE_STATE" }, \
+ { NFS4CLNT_BIND_CONN_TO_SESSION, \
+ "BIND_CONN_TO_SESSION" }, \
+ { NFS4CLNT_MOVED, "MOVED" }, \
+ { NFS4CLNT_LEASE_MOVED, "LEASE_MOVED" }, \
+ { NFS4CLNT_DELEGATION_EXPIRED, "DELEGATION_EXPIRED" }, \
+ { NFS4CLNT_RUN_MANAGER, "RUN_MANAGER" }, \
+ { NFS4CLNT_DELEGRETURN_RUNNING, "DELEGRETURN_RUNNING" })
+
+TRACE_EVENT(nfs4_state_mgr,
+ TP_PROTO(
+ const struct nfs_client *clp
+ ),
+
+ TP_ARGS(clp),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __string(hostname, clp->cl_hostname)
+ ),
+
+ TP_fast_assign(
+ __entry->state = clp->cl_state;
+ __assign_str(hostname, clp->cl_hostname)
+ ),
+
+ TP_printk(
+ "hostname=%s clp state=%s", __get_str(hostname),
+ show_nfs4_clp_state(__entry->state)
+ )
+)
+
+TRACE_EVENT(nfs4_state_mgr_failed,
+ TP_PROTO(
+ const struct nfs_client *clp,
+ const char *section,
+ int status
+ ),
+
+ TP_ARGS(clp, section, status),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, error)
+ __field(unsigned long, state)
+ __string(hostname, clp->cl_hostname)
+ __string(section, section)
+ ),
+
+ TP_fast_assign(
+ __entry->error = status;
+ __entry->state = clp->cl_state;
+ __assign_str(hostname, clp->cl_hostname);
+ __assign_str(section, section);
+ ),
+
+ TP_printk(
+ "hostname=%s clp state=%s error=%ld (%s) section=%s",
+ __get_str(hostname),
+ show_nfs4_clp_state(__entry->state), -__entry->error,
+ show_nfsv4_errors(__entry->error), __get_str(section)
+
+ )
+)
+
TRACE_EVENT(nfs4_xdr_status,
TP_PROTO(
const struct xdr_stream *xdr,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 2b7741fe42ea..a3592becae4a 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4169,19 +4169,17 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
return -EIO;
+ bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
if (len < NFS4_MAXLABELLEN) {
- if (label) {
- if (label->len) {
- if (label->len < len)
- return -ERANGE;
- memcpy(label->label, p, len);
- }
+ if (label && label->len) {
+ if (label->len < len)
+ return -ERANGE;
+ memcpy(label->label, p, len);
label->len = len;
label->pi = pi;
label->lfs = lfs;
status = NFS_ATTR_FATTR_V4_SECURITY_LABEL;
}
- bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
} else
printk(KERN_WARNING "%s: label too long (%u)!\n",
__func__, len);
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index effaa4247b91..c0f2e1751c33 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -169,10 +169,10 @@ static int __init root_nfs_cat(char *dest, const char *src,
size_t len = strlen(dest);
if (len && dest[len - 1] != ',')
- if (strlcat(dest, ",", destlen) > destlen)
+ if (strlcat(dest, ",", destlen) >= destlen)
return -1;
- if (strlcat(dest, src, destlen) > destlen)
+ if (strlcat(dest, src, destlen) >= destlen)
return -1;
return 0;
}
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index 537b80d693f1..d4829f3f2293 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -152,7 +152,7 @@ nfs4_get_device_info(struct nfs_server *server,
set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
out_free_pages:
- for (i = 0; i < max_pages; i++)
+ while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
out_free_pdev:
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 999b4162abba..e7e779cfdbaf 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -440,6 +440,41 @@ void nfs_sb_deactive(struct super_block *sb)
}
EXPORT_SYMBOL_GPL(nfs_sb_deactive);
+static int __nfs_list_for_each_server(struct list_head *head,
+ int (*fn)(struct nfs_server *, void *),
+ void *data)
+{
+ struct nfs_server *server, *last = NULL;
+ int ret = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, head, client_link) {
+ if (!(server->super && nfs_sb_active(server->super)))
+ continue;
+ rcu_read_unlock();
+ if (last)
+ nfs_sb_deactive(last->super);
+ last = server;
+ ret = fn(server, data);
+ if (ret)
+ goto out;
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+out:
+ if (last)
+ nfs_sb_deactive(last->super);
+ return ret;
+}
+
+int nfs_client_for_each_server(struct nfs_client *clp,
+ int (*fn)(struct nfs_server *, void *),
+ void *data)
+{
+ return __nfs_list_for_each_server(&clp->cl_superblocks, fn, data);
+}
+EXPORT_SYMBOL_GPL(nfs_client_for_each_server);
+
/*
* Deliver file system statistics to userspace
*/
@@ -2405,22 +2440,31 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
if (data && data->bsize)
sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
- if (server->nfs_client->rpc_ops->version != 2) {
- /* The VFS shouldn't apply the umask to mode bits. We will do
- * so ourselves when necessary.
+ switch (server->nfs_client->rpc_ops->version) {
+ case 2:
+ sb->s_time_gran = 1000;
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
+ break;
+ case 3:
+ /*
+ * The VFS shouldn't apply the umask to mode bits.
+ * We will do so ourselves when necessary.
*/
sb->s_flags |= SB_POSIXACL;
sb->s_time_gran = 1;
- sb->s_export_op = &nfs_export_ops;
- } else
- sb->s_time_gran = 1000;
-
- if (server->nfs_client->rpc_ops->version != 4) {
sb->s_time_min = 0;
sb->s_time_max = U32_MAX;
- } else {
+ sb->s_export_op = &nfs_export_ops;
+ break;
+ case 4:
+ sb->s_flags |= SB_POSIXACL;
+ sb->s_time_gran = 1;
sb->s_time_min = S64_MIN;
sb->s_time_max = S64_MAX;
+ if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
+ sb->s_export_op = &nfs_export_ops;
+ break;
}
nfs_initialise_sb(sb);
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 442543304930..2455dc8be18a 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -82,6 +82,15 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
int len = sizeof(__be32), ret, i;
__be32 *p;
+ /*
+ * See paragraph 5 of RFC 8881 S18.40.3.
+ */
+ if (!gdp->gd_maxcount) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+
p = xdr_reserve_space(xdr, len + sizeof(__be32));
if (!p)
return nfserr_resource;
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
index e81d2a5cf381..bb205328e043 100644
--- a/fs/nfsd/flexfilelayoutxdr.c
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -85,6 +85,15 @@ nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
int addr_len;
__be32 *p;
+ /*
+ * See paragraph 5 of RFC 8881 S18.40.3.
+ */
+ if (!gdp->gd_maxcount) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+
/* len + padding for two strings */
addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len;
ver_len = 20;
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index ed53e206a299..82329b5102c6 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -42,9 +42,6 @@ struct nfsd_net {
bool grace_ended;
time_t boot_time;
- /* internal mount of the "nfsd" pseudofilesystem: */
- struct vfsmount *nfsd_mnt;
-
struct dentry *nfsd_client_dir;
/*
@@ -121,6 +118,9 @@ struct nfsd_net {
wait_queue_head_t ntf_wq;
atomic_t ntf_refcnt;
+ /* Allow umount to wait for nfsd state cleanup */
+ struct completion nfsd_shutdown_complete;
+
/*
* clientid and stateid data for construction of net unique COPY
* stateids.
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 8f077e66e613..03e8c45a52f3 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -868,13 +868,11 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
} else
dchild = dget(dparent);
} else
- dchild = lookup_one_len_unlocked(name, dparent, namlen);
+ dchild = lookup_positive_unlocked(name, dparent, namlen);
if (IS_ERR(dchild))
return rv;
if (d_mountpoint(dchild))
goto out;
- if (d_really_is_negative(dchild))
- goto out;
if (dchild->d_inode->i_ino != ino)
goto out;
rv = fh_compose(fhp, exp, dchild, &cd->fh);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3c50d18fe8a9..771733396eab 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -840,8 +840,8 @@ static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct r
if (!kcred)
return NULL;
- kcred->uid = ses->se_cb_sec.uid;
- kcred->gid = ses->se_cb_sec.gid;
+ kcred->fsuid = ses->se_cb_sec.uid;
+ kcred->fsgid = ses->se_cb_sec.gid;
return kcred;
}
}
@@ -880,7 +880,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
} else {
if (!conn->cb_xprt)
return -EINVAL;
- clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
args.prognumber = clp->cl_cb_session->se_cb_prog;
@@ -900,6 +899,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
rpc_shutdown_client(client);
return -ENOMEM;
}
+
+ if (clp->cl_minorversion != 0)
+ clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_client = client;
clp->cl_cb_cred = cred;
return 0;
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index e12409eca7cc..215362144aec 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -322,11 +322,11 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
if (ls->ls_recalled)
goto out_unlock;
- ls->ls_recalled = true;
- atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
if (list_empty(&ls->ls_layouts))
goto out_unlock;
+ ls->ls_recalled = true;
+ atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
refcount_inc(&ls->ls_stid.sc_count);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 452ed633a2c7..e38f873f98a7 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -865,8 +865,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
rename->rn_tname, rename->rn_tnamelen);
if (status)
return status;
- set_change_info(&rename->rn_sinfo, &cstate->current_fh);
- set_change_info(&rename->rn_tinfo, &cstate->save_fh);
+ set_change_info(&rename->rn_sinfo, &cstate->save_fh);
+ set_change_info(&rename->rn_tinfo, &cstate->current_fh);
return nfs_ok;
}
@@ -1059,8 +1059,10 @@ out:
return status;
out_put_dst:
nfsd_file_put(*dst);
+ *dst = NULL;
out_put_src:
nfsd_file_put(*src);
+ *src = NULL;
goto out;
}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 7d408957ed62..14463e107918 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -825,8 +825,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
princhash.data = memdup_user(
&ci->cc_princhash.cp_data,
princhashlen);
- if (IS_ERR_OR_NULL(princhash.data))
+ if (IS_ERR_OR_NULL(princhash.data)) {
+ kfree(name.data);
return -EFAULT;
+ }
princhash.len = princhashlen;
} else
princhash.len = 0;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 228c2b0753dc..a0aa7e63739d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -508,15 +508,26 @@ find_any_file(struct nfs4_file *f)
return ret;
}
-static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
+static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
{
- struct nfsd_file *ret = NULL;
+ lockdep_assert_held(&f->fi_lock);
+
+ if (f->fi_fds[O_RDWR])
+ return f->fi_fds[O_RDWR];
+ if (f->fi_fds[O_WRONLY])
+ return f->fi_fds[O_WRONLY];
+ if (f->fi_fds[O_RDONLY])
+ return f->fi_fds[O_RDONLY];
+ return NULL;
+}
+
+static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f)
+{
+ lockdep_assert_held(&f->fi_lock);
- spin_lock(&f->fi_lock);
if (f->fi_deleg_file)
- ret = nfsd_file_get(f->fi_deleg_file);
- spin_unlock(&f->fi_lock);
- return ret;
+ return f->fi_deleg_file;
+ return NULL;
}
static atomic_long_t num_delegations;
@@ -1085,9 +1096,9 @@ static void revoke_delegation(struct nfs4_delegation *dp)
WARN_ON(!list_empty(&dp->dl_recall_lru));
if (clp->cl_minorversion) {
+ spin_lock(&clp->cl_lock);
dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
refcount_inc(&dp->dl_stid.sc_count);
- spin_lock(&clp->cl_lock);
list_add(&dp->dl_recall_lru, &clp->cl_revoked);
spin_unlock(&clp->cl_lock);
}
@@ -2402,9 +2413,11 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
ols = openlockstateid(st);
oo = ols->st_stateowner;
nf = st->sc_file;
- file = find_any_file(nf);
+
+ spin_lock(&nf->fi_lock);
+ file = find_any_file_locked(nf);
if (!file)
- return 0;
+ goto out;
seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid);
@@ -2422,8 +2435,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
- nfsd_file_put(file);
-
+out:
+ spin_unlock(&nf->fi_lock);
return 0;
}
@@ -2437,9 +2450,10 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
ols = openlockstateid(st);
oo = ols->st_stateowner;
nf = st->sc_file;
- file = find_any_file(nf);
+ spin_lock(&nf->fi_lock);
+ file = find_any_file_locked(nf);
if (!file)
- return 0;
+ goto out;
seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid);
@@ -2455,8 +2469,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
- nfsd_file_put(file);
-
+out:
+ spin_unlock(&nf->fi_lock);
return 0;
}
@@ -2468,9 +2482,10 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
ds = delegstateid(st);
nf = st->sc_file;
- file = find_deleg_file(nf);
+ spin_lock(&nf->fi_lock);
+ file = find_deleg_file_locked(nf);
if (!file)
- return 0;
+ goto out;
seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid);
@@ -2482,8 +2497,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
nfs4_show_superblock(s, file);
seq_printf(s, " }\n");
- nfsd_file_put(file);
-
+out:
+ spin_unlock(&nf->fi_lock);
return 0;
}
@@ -2556,7 +2571,7 @@ static int client_opens_release(struct inode *inode, struct file *file)
/* XXX: alternatively, we could get/drop in seq start/stop */
drop_client(clp);
- return 0;
+ return seq_release(inode, file);
}
static const struct file_operations client_states_fops = {
@@ -5498,15 +5513,6 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return status;
- /* Client debugging aid. */
- if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
- char addr_str[INET6_ADDRSTRLEN];
- rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
- sizeof(addr_str));
- pr_warn_ratelimited("NFSD: client %s testing state ID "
- "with incorrect client ID\n", addr_str);
- return status;
- }
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)
@@ -7739,14 +7745,9 @@ nfs4_state_start_net(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int ret;
- ret = get_nfsdfs(net);
- if (ret)
- return ret;
ret = nfs4_state_create_net(net);
- if (ret) {
- mntput(nn->nfsd_mnt);
+ if (ret)
return ret;
- }
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
@@ -7815,7 +7816,6 @@ nfs4_state_shutdown_net(struct net *net)
nfsd4_client_tracking_exit(net);
nfs4_state_destroy_net(net);
- mntput(nn->nfsd_mnt);
}
void
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index e61d9c435957..1d24fff2709c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2991,18 +2991,9 @@ nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
__be32 nfserr;
int ignore_crossmnt = 0;
- dentry = lookup_one_len_unlocked(name, cd->rd_fhp->fh_dentry, namlen);
+ dentry = lookup_positive_unlocked(name, cd->rd_fhp->fh_dentry, namlen);
if (IS_ERR(dentry))
return nfserrno(PTR_ERR(dentry));
- if (d_really_is_negative(dentry)) {
- /*
- * we're not holding the i_mutex here, so there's
- * a window where this directory entry could have gone
- * away.
- */
- dput(dentry);
- return nfserr_noent;
- }
exp_get(exp);
/*
@@ -3109,6 +3100,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
case nfserr_noent:
xdr_truncate_encode(xdr, start_offset);
goto skip_entry;
+ case nfserr_jukebox:
+ /*
+ * The pseudoroot should only display dentries that lead to
+ * exports. If we get EJUKEBOX here, then we can't tell whether
+ * this entry should be included. Just fail the whole READDIR
+ * with NFS4ERR_DELAY in that case, and hope that the situation
+ * will resolve itself by the client's next attempt.
+ */
+ if (cd->rd_fhp->fh_export->ex_flags & NFSEXP_V4ROOT)
+ goto fail;
+ fallthrough;
default:
/*
* If the client requested the RDATTR_ERROR attribute,
@@ -3398,7 +3400,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
p = xdr_reserve_space(xdr, 32);
if (!p)
return nfserr_resource;
- *p++ = cpu_to_be32(0);
+ *p++ = cpu_to_be32(open->op_recall);
/*
* TODO: space_limit's in delegations
@@ -3600,7 +3602,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
if (resp->xdr.buf->page_len &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) {
WARN_ON_ONCE(1);
- return nfserr_resource;
+ return nfserr_serverfault;
}
xdr_commit_encode(xdr);
@@ -4124,20 +4126,17 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
*p++ = cpu_to_be32(gdev->gd_layout_type);
- /* If maxcount is 0 then just update notifications */
- if (gdev->gd_maxcount != 0) {
- ops = nfsd4_layout_ops[gdev->gd_layout_type];
- nfserr = ops->encode_getdeviceinfo(xdr, gdev);
- if (nfserr) {
- /*
- * We don't bother to burden the layout drivers with
- * enforcing gd_maxcount, just tell the client to
- * come back with a bigger buffer if it's not enough.
- */
- if (xdr->buf->len + 4 > gdev->gd_maxcount)
- goto toosmall;
- return nfserr;
- }
+ ops = nfsd4_layout_ops[gdev->gd_layout_type];
+ nfserr = ops->encode_getdeviceinfo(xdr, gdev);
+ if (nfserr) {
+ /*
+ * We don't bother to burden the layout drivers with
+ * enforcing gd_maxcount, just tell the client to
+ * come back with a bigger buffer if it's not enough.
+ */
+ if (xdr->buf->len + 4 > gdev->gd_maxcount)
+ goto toosmall;
+ return nfserr;
}
if (gdev->gd_notify_types) {
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 670e97dd67f0..80c90fc231a5 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -20,8 +20,7 @@
#include "nfsd.h"
#include "cache.h"
-
-#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
+#include "trace.h"
/*
* We use this value to determine the number of hash buckets from the max
@@ -324,8 +323,10 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
const struct svc_cacherep *rp, struct nfsd_net *nn)
{
if (key->c_key.k_xid == rp->c_key.k_xid &&
- key->c_key.k_csum != rp->c_key.k_csum)
+ key->c_key.k_csum != rp->c_key.k_csum) {
++nn->payload_misses;
+ trace_nfsd_drc_mismatch(nn, key, rp);
+ }
return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
}
@@ -378,15 +379,22 @@ out:
return ret;
}
-/*
+/**
+ * nfsd_cache_lookup - Find an entry in the duplicate reply cache
+ * @rqstp: Incoming Call to find
+ *
* Try to find an entry matching the current call in the cache. When none
* is found, we try to grab the oldest expired entry off the LRU list. If
* a suitable one isn't there, then drop the cache_lock and allocate a
* new one, then search again in case one got inserted while this thread
* didn't hold the lock.
+ *
+ * Return values:
+ * %RC_DOIT: Process the request normally
+ * %RC_REPLY: Reply from cache
+ * %RC_DROPIT: Do not process the request further
*/
-int
-nfsd_cache_lookup(struct svc_rqst *rqstp)
+int nfsd_cache_lookup(struct svc_rqst *rqstp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct svc_cacherep *rp, *found;
@@ -400,7 +408,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) {
nfsdstats.rcnocache++;
- return rtn;
+ goto out;
}
csum = nfsd_cache_csum(rqstp);
@@ -410,10 +418,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
* preallocate an entry.
*/
rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
- if (!rp) {
- dprintk("nfsd: unable to allocate DRC entry!\n");
- return rtn;
- }
+ if (!rp)
+ goto out;
spin_lock(&b->cache_lock);
found = nfsd_cache_insert(b, rp, nn);
@@ -432,8 +438,10 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
/* go ahead and prune the cache */
prune_bucket(b, nn);
- out:
+
+out_unlock:
spin_unlock(&b->cache_lock);
+out:
return rtn;
found_entry:
@@ -443,13 +451,13 @@ found_entry:
/* Request being processed */
if (rp->c_state == RC_INPROG)
- goto out;
+ goto out_trace;
/* From the hall of fame of impractical attacks:
* Is this a user who tries to snoop on the cache? */
rtn = RC_DOIT;
if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
- goto out;
+ goto out_trace;
/* Compose RPC reply header */
switch (rp->c_type) {
@@ -461,20 +469,26 @@ found_entry:
break;
case RC_REPLBUFF:
if (!nfsd_cache_append(rqstp, &rp->c_replvec))
- goto out; /* should not happen */
+ goto out_unlock; /* should not happen */
rtn = RC_REPLY;
break;
default:
WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
}
- goto out;
+out_trace:
+ trace_nfsd_drc_found(nn, rqstp, rtn);
+ goto out_unlock;
}
-/*
- * Update a cache entry. This is called from nfsd_dispatch when
- * the procedure has been executed and the complete reply is in
- * rqstp->rq_res.
+/**
+ * nfsd_cache_update - Update an entry in the duplicate reply cache.
+ * @rqstp: svc_rqst with a finished Reply
+ * @cachetype: which cache to update
+ * @statp: Reply's status code
+ *
+ * This is called from nfsd_dispatch when the procedure has been
+ * executed and the complete reply is in rqstp->rq_res.
*
* We're copying around data here rather than swapping buffers because
* the toplevel loop requires max-sized buffers, which would be a waste
@@ -487,8 +501,7 @@ found_entry:
* nfsd failed to encode a reply that otherwise would have been cached.
* In this case, nfsd_cache_update is called with statp == NULL.
*/
-void
-nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct svc_cacherep *rp = rqstp->rq_cacherep;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 055cc0458f27..ce72869315ad 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -351,7 +351,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
{
char *dname, *path;
- int uninitialized_var(maxsize);
+ int maxsize;
char *mesg = buf;
int len;
struct auth_domain *dom;
@@ -1417,6 +1417,8 @@ static void nfsd_umount(struct super_block *sb)
{
struct net *net = sb->s_fs_info;
+ nfsd_shutdown_threads(net);
+
kill_litter_super(sb);
put_net(net);
}
@@ -1429,18 +1431,6 @@ static struct file_system_type nfsd_fs_type = {
};
MODULE_ALIAS_FS("nfsd");
-int get_nfsdfs(struct net *net)
-{
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- struct vfsmount *mnt;
-
- mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
- if (IS_ERR(mnt))
- return PTR_ERR(mnt);
- nn->nfsd_mnt = mnt;
- return 0;
-}
-
#ifdef CONFIG_PROC_FS
static int create_proc_exports_entry(void)
{
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 4ff0c5318a02..3ae9811c0bb9 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -84,11 +84,10 @@ int nfsd_get_nrthreads(int n, int *, struct net *);
int nfsd_set_nrthreads(int n, int *, struct net *);
int nfsd_pool_stats_open(struct inode *, struct file *);
int nfsd_pool_stats_release(struct inode *, struct file *);
+void nfsd_shutdown_threads(struct net *net);
void nfsd_destroy(struct net *net);
-int get_nfsdfs(struct net *);
-
struct nfsdfs_client {
struct kref cl_ref;
void (*cl_release)(struct kref *kref);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index d63cdda1782d..969a227186fa 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -420,8 +420,8 @@ static void nfsd_shutdown_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- nfsd_file_cache_shutdown_net(net);
nfs4_state_shutdown_net(net);
+ nfsd_file_cache_shutdown_net(net);
if (nn->lockd_up) {
lockd_down(net);
nn->lockd_up = 0;
@@ -594,6 +594,37 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = {
.svo_module = THIS_MODULE,
};
+static void nfsd_complete_shutdown(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ WARN_ON(!mutex_is_locked(&nfsd_mutex));
+
+ nn->nfsd_serv = NULL;
+ complete(&nn->nfsd_shutdown_complete);
+}
+
+void nfsd_shutdown_threads(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
+
+ mutex_lock(&nfsd_mutex);
+ serv = nn->nfsd_serv;
+ if (serv == NULL) {
+ mutex_unlock(&nfsd_mutex);
+ return;
+ }
+
+ svc_get(serv);
+ /* Kill outstanding nfsd threads */
+ serv->sv_ops->svo_setup(serv, NULL, 0);
+ nfsd_destroy(net);
+ mutex_unlock(&nfsd_mutex);
+ /* Wait for shutdown of nfsd_serv to complete */
+ wait_for_completion(&nn->nfsd_shutdown_complete);
+}
+
int nfsd_create_serv(struct net *net)
{
int error;
@@ -611,11 +642,13 @@ int nfsd_create_serv(struct net *net)
&nfsd_thread_sv_ops);
if (nn->nfsd_serv == NULL)
return -ENOMEM;
+ init_completion(&nn->nfsd_shutdown_complete);
nn->nfsd_serv->sv_maxconn = nn->max_connections;
error = svc_bind(nn->nfsd_serv, net);
if (error < 0) {
svc_destroy(nn->nfsd_serv);
+ nfsd_complete_shutdown(net);
return error;
}
@@ -664,7 +697,7 @@ void nfsd_destroy(struct net *net)
svc_shutdown_net(nn->nfsd_serv, net);
svc_destroy(nn->nfsd_serv);
if (destroy)
- nn->nfsd_serv = NULL;
+ nfsd_complete_shutdown(net);
}
int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 127db5351d01..9d37d09d7ca8 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -166,6 +166,12 @@ DEFINE_STATEID_EVENT(layout_recall_done);
DEFINE_STATEID_EVENT(layout_recall_fail);
DEFINE_STATEID_EVENT(layout_recall_release);
+TRACE_DEFINE_ENUM(NFSD_FILE_HASHED);
+TRACE_DEFINE_ENUM(NFSD_FILE_PENDING);
+TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ);
+TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_WRITE);
+TRACE_DEFINE_ENUM(NFSD_FILE_REFERENCED);
+
#define show_nf_flags(val) \
__print_flags(val, "|", \
{ 1 << NFSD_FILE_HASHED, "HASHED" }, \
@@ -304,6 +310,65 @@ TRACE_EVENT(nfsd_file_fsnotify_handle_event,
__entry->nlink, __entry->mode, __entry->mask)
);
+#include "cache.h"
+
+TRACE_DEFINE_ENUM(RC_DROPIT);
+TRACE_DEFINE_ENUM(RC_REPLY);
+TRACE_DEFINE_ENUM(RC_DOIT);
+
+#define show_drc_retval(x) \
+ __print_symbolic(x, \
+ { RC_DROPIT, "DROPIT" }, \
+ { RC_REPLY, "REPLY" }, \
+ { RC_DOIT, "DOIT" })
+
+TRACE_EVENT(nfsd_drc_found,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ const struct svc_rqst *rqstp,
+ int result
+ ),
+ TP_ARGS(nn, rqstp, result),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(unsigned long, result)
+ __field(u32, xid)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->result = result;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ ),
+ TP_printk("boot_time=%16llx xid=0x%08x result=%s",
+ __entry->boot_time, __entry->xid,
+ show_drc_retval(__entry->result))
+
+);
+
+TRACE_EVENT(nfsd_drc_mismatch,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ const struct svc_cacherep *key,
+ const struct svc_cacherep *rp
+ ),
+ TP_ARGS(nn, key, rp),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(u32, xid)
+ __field(u32, cached)
+ __field(u32, ingress)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->xid = be32_to_cpu(key->c_key.k_xid);
+ __entry->cached = (__force u32)key->c_key.k_csum;
+ __entry->ingress = (__force u32)rp->c_key.k_csum;
+ ),
+ TP_printk("boot_time=%16llx xid=0x%08x cached-csum=0x%08x ingress-csum=0x%08x",
+ __entry->boot_time, __entry->xid, __entry->cached,
+ __entry->ingress)
+);
+
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index b6f4b552c9af..6aa968bee0ce 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1689,6 +1689,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
+ err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+ goto out;
+ if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
+ goto out;
+
retry:
host_err = fh_want_write(ffhp);
if (host_err) {
@@ -1723,12 +1729,6 @@ retry:
if (ndentry == trap)
goto out_dput_new;
- host_err = -EXDEV;
- if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
- goto out_dput_new;
- if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
- goto out_dput_new;
-
if (nfsd_has_cached_files(ndentry)) {
has_cached = true;
goto out_dput_old;
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 235b959fc2b3..bbd82f650e93 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -205,7 +205,8 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
int ret;
spin_lock(lock);
- if (prev->bh && blkoff == prev->blkoff) {
+ if (prev->bh && blkoff == prev->blkoff &&
+ likely(buffer_uptodate(prev->bh))) {
get_bh(prev->bh);
*bhp = prev->bh;
spin_unlock(lock);
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index fb5a9a8a13cf..2ba57e4b4f0a 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
down_read(&bmap->b_sem);
ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
- if (ret < 0) {
- ret = nilfs_bmap_convert_error(bmap, __func__, ret);
+ if (ret < 0)
goto out;
- }
+
if (NILFS_BMAP_USE_VBN(bmap)) {
ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
&blocknr);
if (!ret)
*ptrp = blocknr;
+ else if (ret == -ENOENT) {
+ /*
+ * If there was no valid entry in DAT for the block
+ * address obtained by b_ops->bop_lookup, then pass
+ * internal code -EINVAL to nilfs_bmap_convert_error
+ * to treat it as metadata corruption.
+ */
+ ret = -EINVAL;
+ }
}
out:
up_read(&bmap->b_sem);
- return ret;
+ return nilfs_bmap_convert_error(bmap, __func__, ret);
}
int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index e00e184b1261..1776121677e2 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -285,6 +285,14 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
if (nbh == NULL) { /* blocksize == pagesize */
xa_erase_irq(&btnc->i_pages, newkey);
unlock_page(ctxt->bh->b_page);
- } else
- brelse(nbh);
+ } else {
+ /*
+ * When canceling a buffer that a prepare operation has
+ * allocated to copy a node block to another location, use
+ * nilfs_btnode_delete() to initialize and release the buffer
+ * so that the buffer flags will not be in an inconsistent
+ * state when it is reallocated.
+ */
+ nilfs_btnode_delete(nbh);
+ }
}
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 919d1238ce45..a0e37530dcf3 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -480,9 +480,18 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, 0, &bh,
&submit_ptr);
if (ret) {
- if (ret != -EEXIST)
- return ret;
- goto out_check;
+ if (likely(ret == -EEXIST))
+ goto out_check;
+ if (ret == -ENOENT) {
+ /*
+ * Block address translation failed due to invalid
+ * value of 'ptr'. In this case, return internal code
+ * -EINVAL (broken bmap) to notify bmap layer of fatal
+ * metadata corruption.
+ */
+ ret = -EINVAL;
+ }
+ return ret;
}
if (ra) {
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index a3523a243e11..b9c759addd50 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -40,8 +40,21 @@ static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
static int nilfs_dat_prepare_entry(struct inode *dat,
struct nilfs_palloc_req *req, int create)
{
- return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
- create, &req->pr_entry_bh);
+ int ret;
+
+ ret = nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
+ create, &req->pr_entry_bh);
+ if (unlikely(ret == -ENOENT)) {
+ nilfs_msg(dat->i_sb, KERN_ERR,
+ "DAT doesn't have a block to manage vblocknr = %llu",
+ (unsigned long long)req->pr_entry_nr);
+ /*
+ * Return internal code -EINVAL to notify bmap layer of
+ * metadata corruption.
+ */
+ ret = -EINVAL;
+ }
+ return ret;
}
static void nilfs_dat_commit_entry(struct inode *dat,
@@ -111,16 +124,19 @@ static void nilfs_dat_commit_free(struct inode *dat,
kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req);
+
+ if (unlikely(req->pr_desc_bh == NULL || req->pr_bitmap_bh == NULL)) {
+ nilfs_error(dat->i_sb,
+ "state inconsistency probably due to duplicate use of vblocknr = %llu",
+ (unsigned long long)req->pr_entry_nr);
+ return;
+ }
nilfs_palloc_commit_free_entry(dat, req);
}
int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
{
- int ret;
-
- ret = nilfs_dat_prepare_entry(dat, req, 0);
- WARN_ON(ret == -ENOENT);
- return ret;
+ return nilfs_dat_prepare_entry(dat, req, 0);
}
void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
@@ -147,10 +163,8 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
int ret;
ret = nilfs_dat_prepare_entry(dat, req, 0);
- if (ret < 0) {
- WARN_ON(ret == -ENOENT);
+ if (ret < 0)
return ret;
- }
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 64bc81363c6c..3802b42e1cb4 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb);
mapped:
- wait_for_stable_page(page);
+ /*
+ * Since checksumming including data blocks is performed to determine
+ * the validity of the log to be written and used for recovery, it is
+ * necessary to wait for writeback to finish here, regardless of the
+ * stable write requirement of the backing device.
+ */
+ wait_on_page_writeback(page);
out:
sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret);
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 114774ac2185..cef46650102e 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -73,10 +73,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
- if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
- brelse(bh);
+ if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */
goto failed;
- }
}
lock_buffer(bh);
@@ -102,6 +100,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
failed:
unlock_page(bh->b_page);
put_page(bh->b_page);
+ if (unlikely(err))
+ brelse(bh);
return err;
}
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 35b0bfe9324f..ea94dc21af0c 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -340,6 +340,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
struct inode *inode;
struct nilfs_inode_info *ii;
struct nilfs_root *root;
+ struct buffer_head *bh;
int err = -ENOMEM;
ino_t ino;
@@ -355,11 +356,26 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
ii->i_state = BIT(NILFS_I_NEW);
ii->i_root = root;
- err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
+ err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
if (unlikely(err))
goto failed_ifile_create_inode;
/* reference count of i_bh inherits from nilfs_mdt_read_block() */
+ if (unlikely(ino < NILFS_USER_INO)) {
+ nilfs_msg(sb, KERN_WARNING,
+ "inode bitmap is inconsistent for reserved inodes");
+ do {
+ brelse(bh);
+ err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
+ if (unlikely(err))
+ goto failed_ifile_create_inode;
+ } while (ino < NILFS_USER_INO);
+
+ nilfs_msg(sb, KERN_INFO,
+ "repaired inode bitmap for reserved inodes");
+ }
+ ii->i_bh = bh;
+
atomic64_inc(&root->inodes_count);
inode_init_owner(inode, dir, mode);
inode->i_ino = ino;
@@ -451,6 +467,8 @@ int nilfs_read_inode_common(struct inode *inode,
inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+ if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
+ return -EIO; /* this inode is for metadata and corrupted */
if (inode->i_nlink == 0)
return -ESTALE; /* this inode is deleted */
@@ -912,6 +930,7 @@ void nilfs_evict_inode(struct inode *inode)
struct nilfs_transaction_info ti;
struct super_block *sb = inode->i_sb;
struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct the_nilfs *nilfs;
int ret;
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
@@ -924,6 +943,23 @@ void nilfs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
+ nilfs = sb->s_fs_info;
+ if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
+ /*
+ * If this inode is about to be disposed after the file system
+ * has been degraded to read-only due to file system corruption
+ * or after the writer has been detached, do not make any
+ * changes that cause writes, just clear it.
+ * Do this check after read-locking ns_segctor_sem by
+ * nilfs_transaction_begin() in order to avoid a race with
+ * the writer detach operation.
+ */
+ clear_inode(inode);
+ nilfs_clear_inode(inode);
+ nilfs_transaction_abort(sb);
+ return;
+ }
+
/* TODO: some of the following operations may fail. */
nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);
@@ -1000,7 +1036,7 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
int err;
spin_lock(&nilfs->ns_inode_lock);
- if (ii->i_bh == NULL) {
+ if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) {
spin_unlock(&nilfs->ns_inode_lock);
err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
inode->i_ino, pbh);
@@ -1009,7 +1045,10 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
spin_lock(&nilfs->ns_inode_lock);
if (ii->i_bh == NULL)
ii->i_bh = *pbh;
- else {
+ else if (unlikely(!buffer_uptodate(ii->i_bh))) {
+ __brelse(ii->i_bh);
+ ii->i_bh = *pbh;
+ } else {
brelse(*pbh);
*pbh = ii->i_bh;
}
@@ -1076,9 +1115,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
{
+ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct buffer_head *ibh;
int err;
+ /*
+ * Do not dirty inodes after the log writer has been detached
+ * and its nilfs_root struct has been freed.
+ */
+ if (unlikely(nilfs_purging(nilfs)))
+ return 0;
+
err = nilfs_load_inode_block(inode, &ibh);
if (unlikely(err)) {
nilfs_msg(inode->i_sb, KERN_WARNING,
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 91b9dac6b2cc..83926b9ab4b5 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -70,7 +70,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
return -EINVAL;
- buf = (void *)__get_free_pages(GFP_NOFS, 0);
+ buf = (void *)get_zeroed_page(GFP_NOFS);
if (unlikely(!buf))
return -ENOMEM;
maxmembs = PAGE_SIZE / argv->v_size;
@@ -1130,7 +1130,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
minseg = range[0] + segbytes - 1;
do_div(minseg, segbytes);
+
+ if (range[1] < 4096)
+ goto out;
+
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
+ if (maxseg < segbytes)
+ goto out;
+
do_div(maxseg, segbytes);
maxseg--;
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 98d21ad9a073..108e4528fccd 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -370,7 +370,15 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
struct page *page = pvec.pages[i];
lock_page(page);
- nilfs_clear_dirty_page(page, silent);
+
+ /*
+ * This page may have been removed from the address
+ * space by truncation or invalidation when the lock
+ * was acquired. Skip processing in that case.
+ */
+ if (likely(page->mapping == mapping))
+ nilfs_clear_dirty_page(page, silent);
+
unlock_page(page);
}
pagevec_release(&pvec);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 140b663e91c7..18feb9c7c706 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -472,9 +472,10 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
struct nilfs_recovery_block *rb,
- struct page *page)
+ loff_t pos, struct page *page)
{
struct buffer_head *bh_org;
+ size_t from = pos & ~PAGE_MASK;
void *kaddr;
bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
@@ -482,7 +483,7 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
return -EIO;
kaddr = kmap_atomic(page);
- memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
+ memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
kunmap_atomic(kaddr);
brelse(bh_org);
return 0;
@@ -521,7 +522,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
goto failed_inode;
}
- err = nilfs_recovery_copy_block(nilfs, rb, page);
+ err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
if (unlikely(err))
goto failed_page;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 20c479b5e41b..e72466fc8ca9 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -101,6 +101,12 @@ int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
if (unlikely(!bh))
return -ENOMEM;
+ lock_buffer(bh);
+ if (!buffer_uptodate(bh)) {
+ memset(bh->b_data, 0, bh->b_size);
+ set_buffer_uptodate(bh);
+ }
+ unlock_buffer(bh);
nilfs_segbuf_add_segsum_buffer(segbuf, bh);
return 0;
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index eb3ac7619088..7d1860d33723 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -322,7 +322,7 @@ void nilfs_relax_pressure_in_lock(struct super_block *sb)
struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_sc_info *sci = nilfs->ns_writer;
- if (!sci || !sci->sc_flush_request)
+ if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
return;
set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
@@ -435,6 +435,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
return 0;
}
+/**
+ * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
+ * @sci: segment constructor object
+ *
+ * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
+ * the current segment summary block.
+ */
+static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
+{
+ struct nilfs_segsum_pointer *ssp;
+
+ ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
+ if (ssp->offset < ssp->bh->b_size)
+ memset(ssp->bh->b_data + ssp->offset, 0,
+ ssp->bh->b_size - ssp->offset);
+}
+
static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
{
sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
@@ -443,6 +460,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
* The current segment is filled up
* (internal code)
*/
+ nilfs_segctor_zeropad_segsum(sci);
sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
return nilfs_segctor_reset_segment_buffer(sci);
}
@@ -547,6 +565,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
goto retry;
}
if (unlikely(required)) {
+ nilfs_segctor_zeropad_segsum(sci);
err = nilfs_segbuf_extend_segsum(segbuf);
if (unlikely(err))
goto failed;
@@ -711,6 +730,11 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
struct page *page = pvec.pages[i];
lock_page(page);
+ if (unlikely(page->mapping != mapping)) {
+ /* Exclude pages removed from the address space */
+ unlock_page(page);
+ continue;
+ }
if (!page_has_buffers(page))
create_empty_buffers(page, i_blocksize(inode), 0);
unlock_page(page);
@@ -880,9 +904,11 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
nilfs_cpfile_put_checkpoint(
nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
- } else
- WARN_ON(err == -EINVAL || err == -ENOENT);
-
+ } else if (err == -EINVAL || err == -ENOENT) {
+ nilfs_error(sci->sc_super,
+ "checkpoint creation failed due to metadata corruption.");
+ err = -EIO;
+ }
return err;
}
@@ -896,7 +922,11 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
&raw_cp, &bh_cp);
if (unlikely(err)) {
- WARN_ON(err == -EINVAL || err == -ENOENT);
+ if (err == -EINVAL || err == -ENOENT) {
+ nilfs_error(sci->sc_super,
+ "checkpoint finalization failed due to metadata corruption.");
+ err = -EIO;
+ }
goto failed_ibh;
}
raw_cp->cp_snapshot_list.ssl_next = 0;
@@ -959,10 +989,13 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
unsigned int isz, srsz;
bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
+
+ lock_buffer(bh_sr);
raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
isz = nilfs->ns_inode_size;
srsz = NILFS_SR_BYTES(isz);
+ raw_sr->sr_sum = 0; /* Ensure initialization within this update */
raw_sr->sr_bytes = cpu_to_le16(srsz);
raw_sr->sr_nongc_ctime
= cpu_to_le64(nilfs_doing_gc() ?
@@ -976,6 +1009,8 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
NILFS_SR_SUFILE_OFFSET(isz), 1);
memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
+ set_buffer_uptodate(bh_sr);
+ unlock_buffer(bh_sr);
}
static void nilfs_redirty_inodes(struct list_head *head)
@@ -1525,6 +1560,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage;
}
+ nilfs_segctor_zeropad_segsum(sci);
nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
return 0;
@@ -1666,7 +1702,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- set_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
lock_page(bd_page);
@@ -1677,6 +1712,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
}
break;
}
+ set_buffer_async_write(bh);
if (bh->b_page != fs_page) {
nilfs_begin_page_io(fs_page);
fs_page = bh->b_page;
@@ -1752,6 +1788,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
list_for_each_entry(segbuf, logs, sb_list) {
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
+ clear_buffer_uptodate(bh);
if (bh->b_page != bd_page) {
if (bd_page)
end_page_writeback(bd_page);
@@ -1761,14 +1798,15 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- clear_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
+ clear_buffer_uptodate(bh);
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;
}
break;
}
+ clear_buffer_async_write(bh);
if (bh->b_page != fs_page) {
nilfs_end_page_io(fs_page, err);
fs_page = bh->b_page;
@@ -1856,8 +1894,9 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
BIT(BH_NILFS_Redirected));
- set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh == segbuf->sb_super_root) {
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;
@@ -1865,6 +1904,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
update_sr = true;
break;
}
+ set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh->b_page != fs_page) {
nilfs_end_page_io(fs_page, 0);
fs_page = bh->b_page;
@@ -2013,6 +2053,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int err;
+ if (sb_rdonly(sci->sc_super))
+ return -EROFS;
+
nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
sci->sc_cno = nilfs->ns_cno;
@@ -2237,7 +2280,7 @@ int nilfs_construct_segment(struct super_block *sb)
struct nilfs_transaction_info *ti;
int err;
- if (!sci)
+ if (sb_rdonly(sb) || unlikely(!sci))
return -EROFS;
/* A call inside transactions causes a deadlock. */
@@ -2276,7 +2319,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
struct nilfs_transaction_info ti;
int err = 0;
- if (!sci)
+ if (sb_rdonly(sb) || unlikely(!sci))
return -EROFS;
nilfs_transaction_lock(sb, &ti, 0);
@@ -2603,11 +2646,10 @@ static int nilfs_segctor_thread(void *arg)
goto loop;
end_thread:
- spin_unlock(&sci->sc_state_lock);
-
/* end sync. */
sci->sc_task = NULL;
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
+ spin_unlock(&sci->sc_state_lock);
return 0;
}
@@ -2699,7 +2741,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
flush_work(&sci->sc_iput_work);
- } while (ret && retrycount-- > 0);
+ } while (ret && ret != -EROFS && retrycount-- > 0);
}
/**
@@ -2772,11 +2814,12 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
if (nilfs->ns_writer) {
/*
- * This happens if the filesystem was remounted
- * read/write after nilfs_error degenerated it into a
- * read-only mount.
+ * This happens if the filesystem is made read-only by
+ * __nilfs_error or nilfs_remount and then remounted
+ * read/write. In these cases, reuse the existing
+ * writer.
*/
- nilfs_detach_log_writer(sb);
+ return 0;
}
nilfs->ns_writer = nilfs_segctor_new(sb, root);
@@ -2786,10 +2829,9 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
err = nilfs_segctor_start_thread(nilfs->ns_writer);
- if (err) {
- kfree(nilfs->ns_writer);
- nilfs->ns_writer = NULL;
- }
+ if (unlikely(err))
+ nilfs_detach_log_writer(sb);
+
return err;
}
@@ -2810,6 +2852,7 @@ void nilfs_detach_log_writer(struct super_block *sb)
nilfs_segctor_destroy(nilfs->ns_writer);
nilfs->ns_writer = NULL;
}
+ set_nilfs_purging(nilfs);
/* Force to free the list of dirty files */
spin_lock(&nilfs->ns_inode_lock);
@@ -2822,4 +2865,5 @@ void nilfs_detach_log_writer(struct super_block *sb)
up_write(&nilfs->ns_segctor_sem);
nilfs_dispose_list(nilfs, &garbage_list, 1);
+ clear_nilfs_purging(nilfs);
}
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index bf3f8f05c89b..4626540008c1 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -498,14 +498,45 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
{
struct buffer_head *bh;
+ void *kaddr;
+ struct nilfs_segment_usage *su;
int ret;
+ down_write(&NILFS_MDT(sufile)->mi_sem);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
- if (!ret) {
+ if (ret)
+ goto out_sem;
+
+ kaddr = kmap_atomic(bh->b_page);
+ su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+ if (unlikely(nilfs_segment_usage_error(su))) {
+ struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
+
+ kunmap_atomic(kaddr);
+ brelse(bh);
+ if (nilfs_segment_is_active(nilfs, segnum)) {
+ nilfs_error(sufile->i_sb,
+ "active segment %llu is erroneous",
+ (unsigned long long)segnum);
+ } else {
+ /*
+ * Segments marked erroneous are never allocated by
+ * nilfs_sufile_alloc(); only active segments, ie,
+ * the segments indexed by ns_segnum or ns_nextnum,
+ * can be erroneous here.
+ */
+ WARN_ON_ONCE(1);
+ }
+ ret = -EIO;
+ } else {
+ nilfs_segment_usage_set_dirty(su);
+ kunmap_atomic(kaddr);
mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile);
brelse(bh);
}
+out_sem:
+ up_write(&NILFS_MDT(sufile)->mi_sem);
return ret;
}
@@ -531,9 +562,14 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
- WARN_ON(nilfs_segment_usage_error(su));
- if (modtime)
+ if (modtime) {
+ /*
+ * Check segusage error and set su_lastmod only when updating
+ * this entry with a valid timestamp, not for cancellation.
+ */
+ WARN_ON_ONCE(nilfs_segment_usage_error(su));
su->su_lastmod = cpu_to_le64(modtime);
+ }
su->su_nblocks = cpu_to_le32(nblocks);
kunmap_atomic(kaddr);
@@ -774,6 +810,15 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
goto out_header;
sui->ncleansegs -= nsegs - newnsegs;
+
+ /*
+ * If the sufile is successfully truncated, immediately adjust
+ * the segment allocation space while locking the semaphore
+ * "mi_sem" so that nilfs_sufile_alloc() never allocates
+ * segments in the truncated space.
+ */
+ sui->allocmax = newnsegs - 1;
+ sui->allocmin = 0;
}
kaddr = kmap_atomic(header_bh->b_page);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index b5bc9f0c6a40..2263ebedf9d5 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -367,10 +367,31 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
goto out;
}
nsbp = (void *)nsbh->b_data + offset;
- memset(nsbp, 0, nilfs->ns_blocksize);
+ lock_buffer(nsbh);
if (sb2i >= 0) {
+ /*
+ * The position of the second superblock only changes by 4KiB,
+ * which is larger than the maximum superblock data size
+ * (= 1KiB), so there is no need to use memmove() to allow
+ * overlap between source and destination.
+ */
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
+
+ /*
+ * Zero fill after copy to avoid overwriting in case of move
+ * within the same block.
+ */
+ memset(nsbh->b_data, 0, offset);
+ memset((void *)nsbp + nilfs->ns_sbsize, 0,
+ nsbh->b_size - offset - nilfs->ns_sbsize);
+ } else {
+ memset(nsbh->b_data, 0, nsbh->b_size);
+ }
+ set_buffer_uptodate(nsbh);
+ unlock_buffer(nsbh);
+
+ if (sb2i >= 0) {
brelse(nilfs->ns_sbh[sb2i]);
nilfs->ns_sbh[sb2i] = nsbh;
nilfs->ns_sbp[sb2i] = nsbp;
@@ -404,6 +425,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
goto out;
/*
+ * Prevent underflow in second superblock position calculation.
+ * The exact minimum size check is done in nilfs_sufile_resize().
+ */
+ if (newsize < 4096) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /*
* Write lock is required to protect some functions depending
* on the number of segments, the number of reserved segments,
* and so forth.
@@ -468,6 +498,7 @@ static void nilfs_put_super(struct super_block *sb)
up_write(&nilfs->ns_sem);
}
+ nilfs_sysfs_delete_device_group(nilfs);
iput(nilfs->ns_sufile);
iput(nilfs->ns_cpfile);
iput(nilfs->ns_dat);
@@ -1094,6 +1125,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
nilfs_put_root(fsroot);
failed_unload:
+ nilfs_sysfs_delete_device_group(nilfs);
iput(nilfs->ns_sufile);
iput(nilfs->ns_cpfile);
iput(nilfs->ns_dat);
@@ -1131,8 +1163,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out;
if (*flags & SB_RDONLY) {
- /* Shutting down log writer */
- nilfs_detach_log_writer(sb);
sb->s_flags |= SB_RDONLY;
/*
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 931870768556..c8d869bc25b0 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/random.h>
+#include <linux/log2.h>
#include <linux/crc32.h>
#include "nilfs.h"
#include "segment.h"
@@ -86,7 +87,6 @@ void destroy_nilfs(struct the_nilfs *nilfs)
{
might_sleep();
if (nilfs_init(nilfs)) {
- nilfs_sysfs_delete_device_group(nilfs);
brelse(nilfs->ns_sbh[0]);
brelse(nilfs->ns_sbh[1]);
}
@@ -274,6 +274,10 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
goto failed;
}
+ err = nilfs_sysfs_create_device_group(sb);
+ if (unlikely(err))
+ goto sysfs_error;
+
if (valid_fs)
goto skip_recovery;
@@ -335,6 +339,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
goto failed;
failed_unload:
+ nilfs_sysfs_delete_device_group(nilfs);
+
+ sysfs_error:
iput(nilfs->ns_cpfile);
iput(nilfs->ns_sufile);
iput(nilfs->ns_dat);
@@ -368,6 +375,18 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
100));
}
+/**
+ * nilfs_max_segment_count - calculate the maximum number of segments
+ * @nilfs: nilfs object
+ */
+static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
+{
+ u64 max_count = U64_MAX;
+
+ do_div(max_count, nilfs->ns_blocks_per_segment);
+ return min_t(u64, max_count, ULONG_MAX);
+}
+
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
{
nilfs->ns_nsegments = nsegs;
@@ -377,6 +396,8 @@ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
struct nilfs_super_block *sbp)
{
+ u64 nsegments, nblocks;
+
if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
nilfs_msg(nilfs->ns_sb, KERN_ERR,
"unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
@@ -423,7 +444,35 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
return -EINVAL;
}
- nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
+ nsegments = le64_to_cpu(sbp->s_nsegments);
+ if (nsegments > nilfs_max_segment_count(nilfs)) {
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "segment count %llu exceeds upper limit (%llu segments)",
+ (unsigned long long)nsegments,
+ (unsigned long long)nilfs_max_segment_count(nilfs));
+ return -EINVAL;
+ }
+
+ nblocks = (u64)i_size_read(nilfs->ns_sb->s_bdev->bd_inode) >>
+ nilfs->ns_sb->s_blocksize_bits;
+ if (nblocks) {
+ u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment;
+ /*
+ * To avoid failing to mount early device images without a
+ * second superblock, exclude that block count from the
+ * "min_block_count" calculation.
+ */
+
+ if (nblocks < min_block_count) {
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "total number of segment blocks %llu exceeds device size (%llu blocks)",
+ (unsigned long long)min_block_count,
+ (unsigned long long)nblocks);
+ return -EINVAL;
+ }
+ }
+
+ nilfs_set_nsegments(nilfs, nsegments);
nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
return 0;
}
@@ -448,11 +497,33 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
return crc == le32_to_cpu(sbp->s_sum);
}
-static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
+/**
+ * nilfs_sb2_bad_offset - check the location of the second superblock
+ * @sbp: superblock raw data buffer
+ * @offset: byte offset of second superblock calculated from device size
+ *
+ * nilfs_sb2_bad_offset() checks if the position on the second
+ * superblock is valid or not based on the filesystem parameters
+ * stored in @sbp. If @offset points to a location within the segment
+ * area, or if the parameters themselves are not normal, it is
+ * determined to be invalid.
+ *
+ * Return Value: true if invalid, false if valid.
+ */
+static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
{
- return offset < ((le64_to_cpu(sbp->s_nsegments) *
- le32_to_cpu(sbp->s_blocks_per_segment)) <<
- (le32_to_cpu(sbp->s_log_block_size) + 10));
+ unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size);
+ u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
+ u64 nsegments = le64_to_cpu(sbp->s_nsegments);
+ u64 index;
+
+ if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS ||
+ shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)
+ return true;
+
+ index = offset >> (shift_bits + BLOCK_SIZE_BITS);
+ do_div(index, blocks_per_segment);
+ return index < nsegments;
}
static void nilfs_release_super_block(struct the_nilfs *nilfs)
@@ -494,9 +565,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
{
struct nilfs_super_block **sbp = nilfs->ns_sbp;
struct buffer_head **sbh = nilfs->ns_sbh;
- u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size);
+ u64 sb2off, devsize = nilfs->ns_bdev->bd_inode->i_size;
int valid[2], swp = 0;
+ if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
+ nilfs_msg(sb, KERN_ERR, "device size too small");
+ return -EINVAL;
+ }
+ sb2off = NILFS_SB2_OFFSET_BYTES(devsize);
+
sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
&sbh[0]);
sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
@@ -611,7 +688,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
goto failed_sbh;
}
nilfs_release_super_block(nilfs);
- sb_set_blocksize(sb, blocksize);
+ if (!sb_set_blocksize(sb, blocksize)) {
+ nilfs_msg(sb, KERN_ERR, "bad blocksize %d", blocksize);
+ err = -EINVAL;
+ goto out;
+ }
err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
if (err)
@@ -639,10 +720,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
if (err)
goto failed_sbh;
- err = nilfs_sysfs_create_device_group(sb);
- if (err)
- goto failed_sbh;
-
set_nilfs_init(nilfs);
err = 0;
out:
@@ -695,9 +772,7 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
{
unsigned long ncleansegs;
- down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
- up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
return 0;
}
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 380a543c5b19..de6e24d80eb6 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -29,6 +29,7 @@ enum {
THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
THE_NILFS_GC_RUNNING, /* gc process is running */
THE_NILFS_SB_DIRTY, /* super block is dirty */
+ THE_NILFS_PURGING, /* disposing dirty files for cleanup */
};
/**
@@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init)
THE_NILFS_FNS(DISCONTINUED, discontinued)
THE_NILFS_FNS(GC_RUNNING, gc_running)
THE_NILFS_FNS(SB_DIRTY, sb_dirty)
+THE_NILFS_FNS(PURGING, purging)
/*
* Mount option operations
diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
index 52ccd34b1e79..a026dbd3593f 100644
--- a/fs/nls/nls_base.c
+++ b/fs/nls/nls_base.c
@@ -272,7 +272,7 @@ int unregister_nls(struct nls_table * nls)
return -EINVAL;
}
-static struct nls_table *find_nls(char *charset)
+static struct nls_table *find_nls(const char *charset)
{
struct nls_table *nls;
spin_lock(&nls_lock);
@@ -288,7 +288,7 @@ static struct nls_table *find_nls(char *charset)
return nls;
}
-struct nls_table *load_nls(char *charset)
+struct nls_table *load_nls(const char *charset)
{
return try_then_request_module(find_nls(charset), "nls_%s", charset);
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 8508ab575017..ec4eadf459ae 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -928,8 +928,11 @@ static int fanotify_test_fid(struct path *path, __kernel_fsid_t *fsid)
return 0;
}
-static int fanotify_events_supported(struct path *path, __u64 mask)
+static int fanotify_events_supported(struct path *path, __u64 mask,
+ unsigned int flags)
{
+ unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+
/*
* Some filesystems such as 'proc' acquire unusual locks when opening
* files. For them fanotify permission events have high chances of
@@ -941,6 +944,21 @@ static int fanotify_events_supported(struct path *path, __u64 mask)
if (mask & FANOTIFY_PERM_EVENTS &&
path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM)
return -EINVAL;
+
+ /*
+ * mount and sb marks are not allowed on kernel internal pseudo fs,
+ * like pipe_mnt, because that would subscribe to events on all the
+ * anonynous pipes in the system.
+ *
+ * SB_NOUSER covers all of the internal pseudo fs whose objects are not
+ * exposed to user's mount namespace, but there are other SB_KERNMOUNT
+ * fs, like nsfs, debugfs, for which the value of allowing sb and mount
+ * mark is questionable. For now we leave them alone.
+ */
+ if (mark_type != FAN_MARK_INODE &&
+ path->mnt->mnt_sb->s_flags & SB_NOUSER)
+ return -EINVAL;
+
return 0;
}
@@ -1050,7 +1068,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
goto fput_and_out;
if (flags & FAN_MARK_ADD) {
- ret = fanotify_events_supported(&path, mask);
+ ret = fanotify_events_supported(&path, mask, flags);
if (ret)
goto path_put_and_out;
}
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 589dee962993..a3cbfd38f1f6 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -66,7 +66,7 @@ int inotify_handle_event(struct fsnotify_group *group,
struct inotify_event_info *event;
struct fsnotify_event *fsn_event;
int ret;
- int len = 0;
+ int len = 0, wd;
int alloc_len = sizeof(struct inotify_event_info);
if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info)))
@@ -91,6 +91,13 @@ int inotify_handle_event(struct fsnotify_group *group,
fsn_mark);
/*
+ * We can be racing with mark being detached. Don't report event with
+ * invalid wd.
+ */
+ wd = READ_ONCE(i_mark->wd);
+ if (wd == -1)
+ return 0;
+ /*
* Whoever is interested in the event, pays for the allocation. Do not
* trigger OOM killer in the target monitoring memcg as it may have
* security repercussion.
@@ -120,7 +127,7 @@ int inotify_handle_event(struct fsnotify_group *group,
fsn_event = &event->fse;
fsnotify_init_event(fsn_event, (unsigned long)inode);
event->mask = mask;
- event->wd = i_mark->wd;
+ event->wd = wd;
event->sync_cookie = cookie;
event->name_len = len;
if (len)
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 914e99173130..c0881d39d36a 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -594,17 +594,37 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
u8 *mrec_end = (u8 *)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_allocated);
- u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
- a->name_length * sizeof(ntfschar);
- if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
- name_end > mrec_end)
+ u8 *name_end;
+
+ /* check whether ATTR_RECORD wrap */
+ if ((u8 *)a < (u8 *)ctx->mrec)
+ break;
+
+ /* check whether Attribute Record Header is within bounds */
+ if ((u8 *)a > mrec_end ||
+ (u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
+ break;
+
+ /* check whether ATTR_RECORD's name is within bounds */
+ name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
+ a->name_length * sizeof(ntfschar);
+ if (name_end > mrec_end)
break;
+
ctx->attr = a;
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
a->type == AT_END))
return -ENOENT;
if (unlikely(!a->length))
break;
+
+ /* check whether ATTR_RECORD's length wrap */
+ if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
+ break;
+ /* check whether ATTR_RECORD's length is within bounds */
+ if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
+ break;
+
if (a->type != type)
continue;
/*
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index cf222c9225d6..645c4b1b23de 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -1829,6 +1829,13 @@ int ntfs_read_inode_mount(struct inode *vi)
goto err_out;
}
+ /* Sanity check offset to the first attribute */
+ if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) {
+ ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.",
+ le16_to_cpu(m->attrs_offset));
+ goto err_out;
+ }
+
/* Need this to sanity check attribute list references to $MFT. */
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 7dc3bc604f78..8f33382a00ea 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2092,7 +2092,8 @@ get_ctx_vol_failed:
// TODO: Initialize security.
/* Get the extended system files' directory inode. */
vol->extend_ino = ntfs_iget(sb, FILE_Extend);
- if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) {
+ if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino) ||
+ !S_ISDIR(vol->extend_ino->i_mode)) {
if (!IS_ERR(vol->extend_ino))
iput(vol->extend_ino);
ntfs_error(sb, "Failed to load $Extend.");
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 4db87b26cf7b..52ccd8d4ed82 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -4708,7 +4708,7 @@ int ocfs2_insert_extent(handle_t *handle,
struct ocfs2_alloc_context *meta_ac)
{
int status;
- int uninitialized_var(free_records);
+ int free_records;
struct buffer_head *last_eb_bh = NULL;
struct ocfs2_insert_type insert = {0, };
struct ocfs2_extent_rec rec;
@@ -7052,7 +7052,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
int need_free = 0;
u32 bit_off, num;
handle_t *handle;
- u64 uninitialized_var(block);
+ u64 block;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 7f66e3342475..91702ebffe84 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1990,11 +1990,25 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
}
if (unlikely(copied < len) && wc->w_target_page) {
+ loff_t new_isize;
+
if (!PageUptodate(wc->w_target_page))
copied = 0;
- ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
- start+len);
+ new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
+ if (new_isize > page_offset(wc->w_target_page))
+ ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+ start+len);
+ else {
+ /*
+ * When page is fully beyond new isize (data copy
+ * failed), do not bother zeroing the page. Invalidate
+ * it instead so that writeback does not get confused
+ * put page & buffer dirty bits into inconsistent
+ * state.
+ */
+ block_invalidatepage(wc->w_target_page, 0, PAGE_SIZE);
+ }
}
if (wc->w_target_page)
flush_dcache_page(wc->w_target_page);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index bdef72c0f099..49b9c61459c5 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -848,9 +848,9 @@ static int ocfs2_dx_dir_lookup(struct inode *inode,
u64 *ret_phys_blkno)
{
int ret = 0;
- unsigned int cend, uninitialized_var(clen);
- u32 uninitialized_var(cpos);
- u64 uninitialized_var(blkno);
+ unsigned int cend, clen;
+ u32 cpos;
+ u64 blkno;
u32 name_hash = hinfo->major_hash;
ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno,
@@ -894,7 +894,7 @@ static int ocfs2_dx_dir_search(const char *name, int namelen,
struct ocfs2_dir_lookup_result *res)
{
int ret, i, found;
- u64 uninitialized_var(phys);
+ u64 phys;
struct buffer_head *dx_leaf_bh = NULL;
struct ocfs2_dx_leaf *dx_leaf;
struct ocfs2_dx_entry *dx_entry = NULL;
@@ -4393,9 +4393,9 @@ out:
int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
{
int ret;
- unsigned int uninitialized_var(clen);
- u32 major_hash = UINT_MAX, p_cpos, uninitialized_var(cpos);
- u64 uninitialized_var(blkno);
+ unsigned int clen;
+ u32 major_hash = UINT_MAX, p_cpos, cpos;
+ u64 blkno;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct buffer_head *dx_root_bh = NULL;
struct ocfs2_dx_root_block *dx_root;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index aaf24548b02a..0463dce65bb2 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -688,10 +688,6 @@ struct dlm_begin_reco
__be32 pad2;
};
-
-#define BITS_PER_BYTE 8
-#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
-
struct dlm_query_join_request
{
u8 node_idx;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 207ec61569ea..bcc4b5d3e54e 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3396,10 +3396,12 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
- ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
- osb->cconn = NULL;
+ if (osb->cconn) {
+ ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
+ osb->cconn = NULL;
- ocfs2_dlm_shutdown_debug(osb);
+ ocfs2_dlm_shutdown_debug(osb);
+ }
}
static int ocfs2_drop_lock(struct ocfs2_super *osb,
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index e3e2d1b2af51..f3926765c3f0 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -403,7 +403,7 @@ static int ocfs2_get_clusters_nocache(struct inode *inode,
{
int i, ret, tree_height, len;
struct ocfs2_dinode *di;
- struct ocfs2_extent_block *uninitialized_var(eb);
+ struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
struct buffer_head *eb_bh = NULL;
@@ -599,7 +599,7 @@ int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
unsigned int *extent_flags)
{
int ret;
- unsigned int uninitialized_var(hole_len), flags = 0;
+ unsigned int hole_len, flags = 0;
struct buffer_head *di_bh = NULL;
struct ocfs2_extent_rec rec;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c30747bdfb12..6fea3f46a9e3 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2103,14 +2103,20 @@ static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
struct ocfs2_space_resv sr;
int change_size = 1;
int cmd = OCFS2_IOC_RESVSP64;
+ int ret = 0;
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
if (!ocfs2_writes_unwritten_extents(osb))
return -EOPNOTSUPP;
- if (mode & FALLOC_FL_KEEP_SIZE)
+ if (mode & FALLOC_FL_KEEP_SIZE) {
change_size = 0;
+ } else {
+ ret = inode_newsize_ok(inode, offset + len);
+ if (ret)
+ return ret;
+ }
if (mode & FALLOC_FL_PUNCH_HOLE)
cmd = OCFS2_IOC_UNRESVSP64;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 900e4ef686bf..da95ed79c12e 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -159,7 +159,7 @@ static void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
replay_map->rm_state = REPLAY_DONE;
}
-static void ocfs2_free_replay_slots(struct ocfs2_super *osb)
+void ocfs2_free_replay_slots(struct ocfs2_super *osb)
{
struct ocfs2_replay_map *replay_map = osb->replay_map;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index bfe611ed1b1d..eb7a21bac71e 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -152,6 +152,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb);
void ocfs2_recovery_exit(struct ocfs2_super *osb);
int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
+void ocfs2_free_replay_slots(struct ocfs2_super *osb);
/*
* Journal Control:
* Initialize, Load, Shutdown, Wipe a journal.
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 758d9661ef1e..98e77ea957ff 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -107,14 +107,6 @@ static int __ocfs2_move_extent(handle_t *handle,
*/
replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
- ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
- context->et.et_root_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
ret = ocfs2_split_extent(handle, &context->et, path, index,
&replace_rec, context->meta_ac,
&context->dealloc);
@@ -123,8 +115,6 @@ static int __ocfs2_move_extent(handle_t *handle,
goto out;
}
- ocfs2_journal_dirty(handle, context->et.et_root_bh);
-
context->new_phys_cpos = new_p_cpos;
/*
@@ -446,7 +436,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
- le16_to_cpu(bg->bg_bits))) {
+ (le16_to_cpu(bg->bg_bits) << bits_per_unit))) {
*ret_bh = gd_bh;
*vict_bit = (vict_blkno - blkno) >>
@@ -561,6 +551,7 @@ static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
last_free_bits++;
if (last_free_bits == move_len) {
+ i -= move_len;
*goal_bit = i;
*phys_cpos = base_cpos + i;
break;
@@ -1032,18 +1023,19 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
context->range = &range;
+ /*
+ * ok, the default theshold for the defragmentation
+ * is 1M, since our maximum clustersize was 1M also.
+ * any thought?
+ */
+ if (!range.me_threshold)
+ range.me_threshold = 1024 * 1024;
+
+ if (range.me_threshold > i_size_read(inode))
+ range.me_threshold = i_size_read(inode);
+
if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
context->auto_defrag = 1;
- /*
- * ok, the default theshold for the defragmentation
- * is 1M, since our maximum clustersize was 1M also.
- * any thought?
- */
- if (!range.me_threshold)
- range.me_threshold = 1024 * 1024;
-
- if (range.me_threshold > i_size_read(inode))
- range.me_threshold = i_size_read(inode);
if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
context->partial = 1;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 8ea51cf27b97..ba574fade6f0 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -198,6 +198,7 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
* callers. */
if (S_ISDIR(mode))
set_nlink(inode, 2);
+ mode = mode_strip_sgid(dir, mode);
inode_init_owner(inode, dir, mode);
status = dquot_initialize(inode);
if (status)
@@ -231,6 +232,7 @@ static int ocfs2_mknod(struct inode *dir,
handle_t *handle = NULL;
struct ocfs2_super *osb;
struct ocfs2_dinode *dirfe;
+ struct ocfs2_dinode *fe = NULL;
struct buffer_head *new_fe_bh = NULL;
struct inode *inode = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
@@ -381,6 +383,7 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
+ fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
if (S_ISDIR(mode)) {
status = ocfs2_fill_new_dir(osb, handle, dir, inode,
new_fe_bh, data_ac, meta_ac);
@@ -446,8 +449,11 @@ static int ocfs2_mknod(struct inode *dir,
leave:
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
- if (handle)
+ if (handle) {
+ if (status < 0 && fe)
+ ocfs2_set_links_count(fe, 0);
ocfs2_commit_trans(osb, handle);
+ }
ocfs2_inode_unlock(dir, 1);
if (did_block_signals)
@@ -625,18 +631,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
return status;
}
- status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+ return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
parent_fe_bh, handle, inode_ac,
fe_blkno, suballoc_loc, suballoc_bit);
- if (status < 0) {
- u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit);
- int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode,
- inode_ac->ac_bh, suballoc_bit, bg_blkno, 1);
- if (tmp)
- mlog_errno(tmp);
- }
-
- return status;
}
static int ocfs2_mkdir(struct inode *dir,
@@ -1528,6 +1525,10 @@ static int ocfs2_rename(struct inode *old_dir,
status = ocfs2_add_entry(handle, new_dentry, old_inode,
OCFS2_I(old_inode)->ip_blkno,
new_dir_bh, &target_insert);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
}
old_inode->i_ctime = current_time(old_inode);
@@ -2017,8 +2018,11 @@ bail:
ocfs2_clusters_to_bytes(osb->sb, 1));
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
- if (handle)
+ if (handle) {
+ if (status < 0 && fe)
+ ocfs2_set_links_count(fe, 0);
ocfs2_commit_trans(osb, handle);
+ }
ocfs2_inode_unlock(dir, 1);
if (did_block_signals)
@@ -2492,7 +2496,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
struct buffer_head *new_di_bh = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
- u64 uninitialized_var(di_blkno), suballoc_loc;
+ u64 di_blkno, suballoc_loc;
u16 suballoc_bit;
status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index ee43e51188be..3f812d348d6b 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -1061,7 +1061,7 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
struct buffer_head **ret_bh)
{
int ret = 0, i, found;
- u32 low_cpos, uninitialized_var(cpos_end);
+ u32 low_cpos, cpos_end;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec = NULL;
struct ocfs2_extent_block *eb = NULL;
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 188038760136..9f0326672af6 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -707,6 +707,8 @@ static struct ctl_table_header *ocfs2_table_header;
static int __init ocfs2_stack_glue_init(void)
{
+ int ret;
+
strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
@@ -716,7 +718,11 @@ static int __init ocfs2_stack_glue_init(void)
return -ENOMEM; /* or something. */
}
- return ocfs2_sysfs_init();
+ ret = ocfs2_sysfs_init();
+ if (ret)
+ unregister_sysctl_table(ocfs2_table_header);
+
+ return ret;
}
static void __exit ocfs2_stack_glue_exit(void)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c1cf67b24c19..052abfa31d38 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -955,8 +955,10 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!sb_has_quota_loaded(sb, type))
continue;
- oinfo = sb_dqinfo(sb, type)->dqi_priv;
- cancel_delayed_work_sync(&oinfo->dqi_sync_work);
+ if (!sb_has_quota_suspended(sb, type)) {
+ oinfo = sb_dqinfo(sb, type)->dqi_priv;
+ cancel_delayed_work_sync(&oinfo->dqi_sync_work);
+ }
inode = igrab(sb->s_dquot.files[type]);
/* Turn off quotas. This will remove all dquot structures from
* memory and so they will be automatically synced to global
@@ -984,28 +986,27 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
status = -EINVAL;
- goto read_super_error;
+ goto out;
}
/* probe for superblock */
status = ocfs2_sb_probe(sb, &bh, &sector_size, &stats);
if (status < 0) {
mlog(ML_ERROR, "superblock probe failed!\n");
- goto read_super_error;
+ goto out;
}
status = ocfs2_initialize_super(sb, bh, sector_size, &stats);
- osb = OCFS2_SB(sb);
- if (status < 0) {
- mlog_errno(status);
- goto read_super_error;
- }
brelse(bh);
bh = NULL;
+ if (status < 0)
+ goto out;
+
+ osb = OCFS2_SB(sb);
if (!ocfs2_check_set_options(sb, &parsed_options)) {
status = -EINVAL;
- goto read_super_error;
+ goto out_super;
}
osb->s_mount_opt = parsed_options.mount_opt;
osb->s_atime_quantum = parsed_options.atime_quantum;
@@ -1022,7 +1023,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = ocfs2_verify_userspace_stack(osb, &parsed_options);
if (status)
- goto read_super_error;
+ goto out_super;
sb->s_magic = OCFS2_SUPER_MAGIC;
@@ -1036,7 +1037,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = -EACCES;
mlog(ML_ERROR, "Readonly device detected but readonly "
"mount was not specified.\n");
- goto read_super_error;
+ goto out_super;
}
/* You should not be able to start a local heartbeat
@@ -1045,7 +1046,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = -EROFS;
mlog(ML_ERROR, "Local heartbeat specified on readonly "
"device.\n");
- goto read_super_error;
+ goto out_super;
}
status = ocfs2_check_journals_nolocks(osb);
@@ -1054,9 +1055,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
mlog(ML_ERROR, "Recovery required on readonly "
"file system, but write access is "
"unavailable.\n");
- else
- mlog_errno(status);
- goto read_super_error;
+ goto out_super;
}
ocfs2_set_ro_flag(osb, 1);
@@ -1072,10 +1071,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
}
status = ocfs2_verify_heartbeat(osb);
- if (status < 0) {
- mlog_errno(status);
- goto read_super_error;
- }
+ if (status < 0)
+ goto out_super;
osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
ocfs2_debugfs_root);
@@ -1089,15 +1086,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = ocfs2_mount_volume(sb);
if (status < 0)
- goto read_super_error;
+ goto out_debugfs;
if (osb->root_inode)
inode = igrab(osb->root_inode);
if (!inode) {
status = -EIO;
- mlog_errno(status);
- goto read_super_error;
+ goto out_dismount;
}
osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL,
@@ -1105,7 +1101,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
if (!osb->osb_dev_kset) {
status = -ENOMEM;
mlog(ML_ERROR, "Unable to create device kset %s.\n", sb->s_id);
- goto read_super_error;
+ goto out_dismount;
}
/* Create filecheck sysfs related directories/files at
@@ -1114,14 +1110,13 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
status = -ENOMEM;
mlog(ML_ERROR, "Unable to create filecheck sysfs directory at "
"/sys/fs/ocfs2/%s/filecheck.\n", sb->s_id);
- goto read_super_error;
+ goto out_dismount;
}
root = d_make_root(inode);
if (!root) {
status = -ENOMEM;
- mlog_errno(status);
- goto read_super_error;
+ goto out_dismount;
}
sb->s_root = root;
@@ -1168,17 +1163,22 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
return status;
-read_super_error:
- brelse(bh);
-
- if (status)
- mlog_errno(status);
+out_dismount:
+ atomic_set(&osb->vol_state, VOLUME_DISABLED);
+ wake_up(&osb->osb_mount_event);
+ ocfs2_free_replay_slots(osb);
+ ocfs2_dismount_volume(sb, 1);
+ goto out;
- if (osb) {
- atomic_set(&osb->vol_state, VOLUME_DISABLED);
- wake_up(&osb->osb_mount_event);
- ocfs2_dismount_volume(sb, 1);
- }
+out_debugfs:
+ debugfs_remove_recursive(osb->osb_debug_root);
+out_super:
+ ocfs2_release_system_inodes(osb);
+ kfree(osb->recovery_map);
+ ocfs2_delete_osb(osb);
+ kfree(osb);
+out:
+ mlog_errno(status);
return status;
}
@@ -1787,11 +1787,10 @@ static int ocfs2_get_sector(struct super_block *sb,
static int ocfs2_mount_volume(struct super_block *sb)
{
int status = 0;
- int unlock_super = 0;
struct ocfs2_super *osb = OCFS2_SB(sb);
if (ocfs2_is_hard_readonly(osb))
- goto leave;
+ goto out;
mutex_init(&osb->obs_trim_fs_mutex);
@@ -1801,44 +1800,58 @@ static int ocfs2_mount_volume(struct super_block *sb)
if (status == -EBADR && ocfs2_userspace_stack(osb))
mlog(ML_ERROR, "couldn't mount because cluster name on"
" disk does not match the running cluster name.\n");
- goto leave;
+ goto out;
}
status = ocfs2_super_lock(osb, 1);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto out_dlm;
}
- unlock_super = 1;
/* This will load up the node map and add ourselves to it. */
status = ocfs2_find_slot(osb);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto out_super_lock;
}
/* load all node-local system inodes */
status = ocfs2_init_local_system_inodes(osb);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto out_super_lock;
}
status = ocfs2_check_volume(osb);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto out_system_inodes;
}
status = ocfs2_truncate_log_init(osb);
- if (status < 0)
+ if (status < 0) {
mlog_errno(status);
+ goto out_check_volume;
+ }
-leave:
- if (unlock_super)
- ocfs2_super_unlock(osb, 1);
+ ocfs2_super_unlock(osb, 1);
+ return 0;
+out_check_volume:
+ ocfs2_free_replay_slots(osb);
+out_system_inodes:
+ if (osb->local_alloc_state == OCFS2_LA_ENABLED)
+ ocfs2_shutdown_local_alloc(osb);
+ ocfs2_release_system_inodes(osb);
+ /* before journal shutdown, we should release slot_info */
+ ocfs2_free_slot_info(osb);
+ ocfs2_journal_shutdown(osb);
+out_super_lock:
+ ocfs2_super_unlock(osb, 1);
+out_dlm:
+ ocfs2_dlm_shutdown(osb, 0);
+out:
return status;
}
@@ -1911,8 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
!ocfs2_is_hard_readonly(osb))
hangup_needed = 1;
- if (osb->cconn)
- ocfs2_dlm_shutdown(osb, hangup_needed);
+ ocfs2_dlm_shutdown(osb, hangup_needed);
ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
debugfs_remove_recursive(osb->osb_debug_root);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 90c830e3758e..9ccd19d8f7b1 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1211,7 +1211,7 @@ static int ocfs2_xattr_block_get(struct inode *inode,
struct ocfs2_xattr_value_root *xv;
size_t size;
int ret = -ENODATA, name_offset, name_len, i;
- int uninitialized_var(block_off);
+ int block_off;
xs->bucket = ocfs2_xattr_bucket_new(inode);
if (!xs->bucket) {
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index d640b9388238..b08881b35ac3 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -220,7 +220,7 @@ static int omfs_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh;
sector_t next, offset;
int ret;
- u64 uninitialized_var(new_block);
+ u64 new_block;
u32 max_extents;
int extent_count;
struct omfs_extent *oe;
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 29eaa4544372..1b508f543384 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -194,15 +194,10 @@ void orangefs_debugfs_init(int debug_mask)
*/
static void orangefs_kernel_debug_init(void)
{
- int rc = -ENOMEM;
- char *k_buffer = NULL;
+ static char k_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { };
gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
- k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
- if (!k_buffer)
- goto out;
-
if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
strcpy(k_buffer, kernel_debug_string);
strcat(k_buffer, "\n");
@@ -213,15 +208,14 @@ static void orangefs_kernel_debug_init(void)
debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer,
&kernel_debug_fops);
-
-out:
- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
}
void orangefs_debugfs_cleanup(void)
{
debugfs_remove_recursive(debug_dir);
+ kfree(debug_help_string);
+ debug_help_string = NULL;
}
/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
@@ -297,18 +291,13 @@ static int help_show(struct seq_file *m, void *v)
/*
* initialize the client-debug file.
*/
-static int orangefs_client_debug_init(void)
+static void orangefs_client_debug_init(void)
{
- int rc = -ENOMEM;
- char *c_buffer = NULL;
+ static char c_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { };
gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
- c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
- if (!c_buffer)
- goto out;
-
if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
strcpy(c_buffer, client_debug_string);
strcat(c_buffer, "\n");
@@ -322,13 +311,6 @@ static int orangefs_client_debug_init(void)
debug_dir,
c_buffer,
&kernel_debug_fops);
-
- rc = 0;
-
-out:
-
- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
- return rc;
}
/* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/
@@ -671,6 +653,7 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE);
strlcat(debug_help_string, new, string_size);
mutex_unlock(&orangefs_help_file_lock);
+ kfree(new);
}
rc = 0;
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index c010c1fddafc..6aa7a23b04df 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -141,7 +141,7 @@ static int __init orangefs_init(void)
gossip_err("%s: could not initialize device subsystem %d!\n",
__func__,
ret);
- goto cleanup_device;
+ goto cleanup_sysfs;
}
ret = register_filesystem(&orangefs_fs_type);
@@ -153,11 +153,11 @@ static int __init orangefs_init(void)
goto out;
}
- orangefs_sysfs_exit();
-
-cleanup_device:
orangefs_dev_cleanup();
+cleanup_sysfs:
+ orangefs_sysfs_exit();
+
sysfs_init_failed:
orangefs_debugfs_cleanup();
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 143c52de97ec..9e5223108362 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -192,7 +192,7 @@ static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
{
struct iattr attr = {
.ia_valid =
- ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
+ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME,
.ia_atime = stat->atime,
.ia_mtime = stat->mtime,
};
@@ -741,7 +741,7 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
struct path upperpath, datapath;
int err;
char *capability = NULL;
- ssize_t uninitialized_var(cap_size);
+ ssize_t cap_size;
ovl_path_upper(c->dentry, &upperpath);
if (WARN_ON(upperpath.dentry == NULL))
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 8c89eaea1583..fe88ac18ad93 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -557,28 +557,42 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
goto out_revert_creds;
}
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (override_cred) {
+ if (!attr->hardlink) {
+ err = -ENOMEM;
+ override_cred = prepare_creds();
+ if (!override_cred)
+ goto out_revert_creds;
+ /*
+ * In the creation cases(create, mkdir, mknod, symlink),
+ * ovl should transfer current's fs{u,g}id to underlying
+ * fs. Because underlying fs want to initialize its new
+ * inode owner using current's fs{u,g}id. And in this
+ * case, the @inode is a new inode that is initialized
+ * in inode_init_owner() to current's fs{u,g}id. So use
+ * the inode's i_{u,g}id to override the cred's fs{u,g}id.
+ *
+ * But in the other hardlink case, ovl_link() does not
+ * create a new inode, so just use the ovl mounter's
+ * fs{u,g}id.
+ */
override_cred->fsuid = inode->i_uid;
override_cred->fsgid = inode->i_gid;
- if (!attr->hardlink) {
- err = security_dentry_create_files_as(dentry,
- attr->mode, &dentry->d_name, old_cred,
- override_cred);
- if (err) {
- put_cred(override_cred);
- goto out_revert_creds;
- }
+ err = security_dentry_create_files_as(dentry,
+ attr->mode, &dentry->d_name, old_cred,
+ override_cred);
+ if (err) {
+ put_cred(override_cred);
+ goto out_revert_creds;
}
put_cred(override_creds(override_cred));
put_cred(override_cred);
-
- if (!ovl_dentry_is_whiteout(dentry))
- err = ovl_create_upper(dentry, inode, attr);
- else
- err = ovl_create_over_whiteout(dentry, inode, attr);
}
+
+ if (!ovl_dentry_is_whiteout(dentry))
+ err = ovl_create_upper(dentry, inode, attr);
+ else
+ err = ovl_create_over_whiteout(dentry, inode, attr);
+
out_revert_creds:
revert_creds(old_cred);
return err;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 19574ef17470..984df246962e 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -274,7 +274,7 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
return FILEID_INVALID;
dentry = d_find_any_alias(inode);
- if (WARN_ON(!dentry))
+ if (!dentry)
return FILEID_INVALID;
type = ovl_dentry_to_fh(dentry, fid, max_len);
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index f47c591402d7..625da4bc8d0f 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -200,7 +200,7 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
int err;
bool last_element = !post[0];
- this = lookup_one_len_unlocked(name, base, namelen);
+ this = lookup_positive_unlocked(name, base, namelen);
if (IS_ERR(this)) {
err = PTR_ERR(this);
this = NULL;
@@ -208,8 +208,6 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
goto out;
goto out_err;
}
- if (!this->d_inode)
- goto put_and_out;
if (ovl_dentry_weird(this)) {
/* Don't support traversing automounts and other weirdness */
@@ -659,7 +657,7 @@ struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh)
if (err)
return ERR_PTR(err);
- index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
+ index = lookup_positive_unlocked(name.name, ofs->indexdir, name.len);
kfree(name.name);
if (IS_ERR(index)) {
if (PTR_ERR(index) == -ENOENT)
@@ -667,9 +665,7 @@ struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh)
return index;
}
- if (d_is_negative(index))
- err = 0;
- else if (ovl_is_whiteout(index))
+ if (ovl_is_whiteout(index))
err = -ESTALE;
else if (ovl_dentry_weird(index))
err = -EIO;
@@ -693,7 +689,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
if (err)
return ERR_PTR(err);
- index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
+ index = lookup_positive_unlocked(name.name, ofs->indexdir, name.len);
if (IS_ERR(index)) {
err = PTR_ERR(index);
if (err == -ENOENT) {
@@ -708,9 +704,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
}
inode = d_inode(index);
- if (d_is_negative(index)) {
- goto out_dput;
- } else if (ovl_is_whiteout(index) && !verify) {
+ if (ovl_is_whiteout(index) && !verify) {
/*
* When index lookup is called with !verify for decoding an
* overlay file handle, a whiteout index implies that decode
@@ -1139,7 +1133,7 @@ bool ovl_lower_positive(struct dentry *dentry)
struct dentry *this;
struct dentry *lowerdir = poe->lowerstack[i].dentry;
- this = lookup_one_len_unlocked(name->name, lowerdir,
+ this = lookup_positive_unlocked(name->name, lowerdir,
name->len);
if (IS_ERR(this)) {
switch (PTR_ERR(this)) {
@@ -1156,10 +1150,8 @@ bool ovl_lower_positive(struct dentry *dentry)
break;
}
} else {
- if (this->d_inode) {
- positive = !ovl_is_whiteout(this);
- done = true;
- }
+ positive = !ovl_is_whiteout(this);
+ done = true;
dput(this);
}
}
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 28348c44ea5b..8d81e88f1d1e 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -27,6 +27,7 @@ struct ovl_sb {
};
struct ovl_layer {
+ /* ovl_free_fs() relies on @mnt being the first member! */
struct vfsmount *mnt;
/* Trap in ovl inode cache */
struct inode *trap;
@@ -37,6 +38,14 @@ struct ovl_layer {
int fsid;
};
+/*
+ * ovl_free_fs() relies on @mnt being the first member when unmounting
+ * the private mounts created for each layer. Let's check both the
+ * offset and type.
+ */
+static_assert(offsetof(struct ovl_layer, mnt) == 0);
+static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
+
struct ovl_path {
struct ovl_layer *layer;
struct dentry *dentry;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index f5cf0938f298..fcf453f7f4ae 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -263,8 +263,8 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
return 0;
/*
- * If this is a sync(2) call or an emergency sync, all the super blocks
- * will be iterated, including upper_sb, so no need to do anything.
+ * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
+ * All the super blocks will be iterated, including upper_sb.
*
* If this is a syncfs(2) call, then we do need to call
* sync_filesystem() on upper_sb, but enough if we do it when being
@@ -1710,6 +1710,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_xattr = ovl_xattr_handlers;
sb->s_fs_info = ofs;
sb->s_flags |= SB_POSIXACL;
+ sb->s_iflags |= SB_I_SKIP_SYNC;
err = -ENOMEM;
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
diff --git a/fs/pnode.c b/fs/pnode.c
index 1106137c747a..468e4e65a615 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -244,7 +244,7 @@ static int propagate_one(struct mount *m)
}
do {
struct mount *parent = last_source->mnt_parent;
- if (last_source == first_source)
+ if (peers(last_source, first_source))
break;
done = parent->mnt_master == p;
if (done && peers(n, parent))
diff --git a/fs/proc/base.c b/fs/proc/base.c
index cd4f16b0de67..03c3fff5a159 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3412,7 +3412,8 @@ static int proc_tid_comm_permission(struct inode *inode, int mask)
}
static const struct inode_operations proc_tid_comm_inode_operations = {
- .permission = proc_tid_comm_permission,
+ .setattr = proc_setattr,
+ .permission = proc_tid_comm_permission,
};
/*
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d80989b6c344..f4264dd4ea31 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/bpf-cgroup.h>
+#include <linux/kmemleak.h>
#include "internal.h"
static const struct dentry_operations proc_sys_dentry_operations;
@@ -1397,6 +1398,38 @@ struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *tab
}
EXPORT_SYMBOL(register_sysctl);
+/**
+ * __register_sysctl_init() - register sysctl table to path
+ * @path: path name for sysctl base
+ * @table: This is the sysctl table that needs to be registered to the path
+ * @table_name: The name of sysctl table, only used for log printing when
+ * registration fails
+ *
+ * The sysctl interface is used by userspace to query or modify at runtime
+ * a predefined value set on a variable. These variables however have default
+ * values pre-set. Code which depends on these variables will always work even
+ * if register_sysctl() fails. If register_sysctl() fails you'd just loose the
+ * ability to query or modify the sysctls dynamically at run time. Chances of
+ * register_sysctl() failing on init are extremely low, and so for both reasons
+ * this function does not return any error as it is used by initialization code.
+ *
+ * Context: Can only be called after your respective sysctl base path has been
+ * registered. So for instance, most base directories are registered early on
+ * init before init levels are processed through proc_sys_init() and
+ * sysctl_init().
+ */
+void __init __register_sysctl_init(const char *path, struct ctl_table *table,
+ const char *table_name)
+{
+ struct ctl_table_header *hdr = register_sysctl(path, table);
+
+ if (unlikely(!hdr)) {
+ pr_err("failed when register_sysctl %s to %s\n", table_name, path);
+ return;
+ }
+ kmemleak_not_leak(hdr);
+}
+
static char *append_path(const char *path, char *pos, const char *name)
{
int namelen;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 328e3eff890e..865b4d30741e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -726,9 +726,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
page = device_private_entry_to_page(swpent);
}
if (page) {
- int mapcount = page_mapcount(page);
-
- if (mapcount >= 2)
+ if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
else
mss->private_hugetlb += huge_page_size(hstate_vma(vma));
@@ -887,7 +885,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
last_vma_end = vma->vm_end;
}
- show_vma_header_prefix(m, priv->mm->mmap->vm_start,
+ show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0,
last_vma_end, 0, 0, 0, 0);
seq_pad(m, ' ');
seq_puts(m, "[rollup]\n");
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 8f0369aad22a..9fe46cc26403 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -118,6 +118,7 @@ config PSTORE_CONSOLE
config PSTORE_PMSG
bool "Log user space messages"
depends on PSTORE
+ select RT_MUTEXES
help
When the option is enabled, pstore will export a character
interface /dev/pmsg0 to log user space messages. On reboot
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 013486b5125e..9f83d8eba0e6 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -563,6 +563,7 @@ static int ramoops_init_przs(const char *name,
}
zone_sz = mem_sz / *cnt;
+ zone_sz = ALIGN_DOWN(zone_sz, 2);
if (!zone_sz) {
dev_err(dev, "%s zone size == 0\n", name);
goto fail;
@@ -759,6 +760,7 @@ static int ramoops_probe(struct platform_device *pdev)
/* Make sure we didn't get bogus platform data pointer. */
if (!pdata) {
pr_err("NULL platform data\n");
+ err = -EINVAL;
goto fail_out;
}
@@ -766,6 +768,7 @@ static int ramoops_probe(struct platform_device *pdev)
!pdata->ftrace_size && !pdata->pmsg_size)) {
pr_err("The memory size and the record/console size must be "
"non-zero\n");
+ err = -EINVAL;
goto fail_out;
}
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 1f4d8c06f9be..679b250a3912 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -190,7 +190,7 @@ static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
{
int numerr;
struct persistent_ram_buffer *buffer = prz->buffer;
- int ecc_blocks;
+ size_t ecc_blocks;
size_t ecc_total;
if (!ecc_info || !ecc_info->ecc_size)
@@ -427,7 +427,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
phys_addr_t addr = page_start + i * PAGE_SIZE;
pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
}
- vaddr = vmap(pages, page_count, VM_MAP, prot);
+ /*
+ * VM_IOREMAP used here to bypass this region during vread()
+ * and kmap_atomic() (i.e. kcore) to avoid __va() failures.
+ */
+ vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot);
kfree(pages);
/*
@@ -502,7 +506,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
sig ^= PERSISTENT_RAM_SIG;
if (prz->buffer->sig == sig) {
- if (buffer_size(prz) == 0) {
+ if (buffer_size(prz) == 0 && buffer_start(prz) == 0) {
pr_debug("found existing empty buffer\n");
return 0;
}
@@ -575,6 +579,8 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
raw_spin_lock_init(&prz->buffer_lock);
prz->flags = flags;
prz->label = kstrdup(label, GFP_KERNEL);
+ if (!prz->label)
+ goto err;
ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index dc5f8654b277..a7ddb874912d 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -223,18 +223,26 @@ static void put_quota_format(struct quota_format_type *fmt)
/*
* Dquot List Management:
- * The quota code uses four lists for dquot management: the inuse_list,
- * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
- * structure may be on some of those lists, depending on its current state.
+ * The quota code uses five lists for dquot management: the inuse_list,
+ * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
+ * A single dquot structure may be on some of those lists, depending on
+ * its current state.
*
* All dquots are placed to the end of inuse_list when first created, and this
* list is used for invalidate operation, which must look at every dquot.
*
- * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
- * and this list is searched whenever we need an available dquot. Dquots are
- * removed from the list as soon as they are used again, and
- * dqstats.free_dquots gives the number of dquots on the list. When
- * dquot is invalidated it's completely released from memory.
+ * When the last reference of a dquot is dropped, the dquot is added to
+ * releasing_dquots. We'll then queue work item which will call
+ * synchronize_srcu() and after that perform the final cleanup of all the
+ * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
+ * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
+ * struct.
+ *
+ * Unused and cleaned up dquots are in the free_dquots list and this list is
+ * searched whenever we need an available dquot. Dquots are removed from the
+ * list as soon as they are used again and dqstats.free_dquots gives the number
+ * of dquots on the list. When dquot is invalidated it's completely released
+ * from memory.
*
* Dirty dquots are added to the dqi_dirty_list of quota_info when mark
* dirtied, and this list is searched when writing dirty dquots back to
@@ -248,6 +256,7 @@ static void put_quota_format(struct quota_format_type *fmt)
static LIST_HEAD(inuse_list);
static LIST_HEAD(free_dquots);
+static LIST_HEAD(releasing_dquots);
static unsigned int dq_hash_bits, dq_hash_mask;
static struct hlist_head *dquot_hash;
@@ -258,6 +267,9 @@ static qsize_t inode_get_rsv_space(struct inode *inode);
static qsize_t __inode_get_rsv_space(struct inode *inode);
static int __dquot_initialize(struct inode *inode, int type);
+static void quota_release_workfn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
+
static inline unsigned int
hashfn(const struct super_block *sb, struct kqid qid)
{
@@ -305,12 +317,21 @@ static inline void put_dquot_last(struct dquot *dquot)
dqstats_inc(DQST_FREE_DQUOTS);
}
+static inline void put_releasing_dquots(struct dquot *dquot)
+{
+ list_add_tail(&dquot->dq_free, &releasing_dquots);
+ set_bit(DQ_RELEASING_B, &dquot->dq_flags);
+}
+
static inline void remove_free_dquot(struct dquot *dquot)
{
if (list_empty(&dquot->dq_free))
return;
list_del_init(&dquot->dq_free);
- dqstats_dec(DQST_FREE_DQUOTS);
+ if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
+ dqstats_dec(DQST_FREE_DQUOTS);
+ else
+ clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
}
static inline void put_inuse(struct dquot *dquot)
@@ -336,6 +357,11 @@ static void wait_on_dquot(struct dquot *dquot)
mutex_unlock(&dquot->dq_lock);
}
+static inline int dquot_active(struct dquot *dquot)
+{
+ return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+}
+
static inline int dquot_dirty(struct dquot *dquot)
{
return test_bit(DQ_MOD_B, &dquot->dq_flags);
@@ -351,14 +377,14 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
{
int ret = 1;
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+ if (!dquot_active(dquot))
return 0;
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
/* If quota is dirty already, we don't have to acquire dq_list_lock */
- if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ if (dquot_dirty(dquot))
return 1;
spin_lock(&dq_list_lock);
@@ -373,15 +399,17 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
/* Dirtify all the dquots - this can block when journalling */
-static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
+static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
{
int ret, err, cnt;
+ struct dquot *dquot;
ret = err = 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (dquot[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (dquot)
/* Even in case of error we have to continue */
- ret = mark_dquot_dirty(dquot[cnt]);
+ ret = mark_dquot_dirty(dquot);
if (!err)
err = ret;
}
@@ -438,7 +466,7 @@ int dquot_acquire(struct dquot *dquot)
smp_mb__before_atomic();
set_bit(DQ_READ_B, &dquot->dq_flags);
/* Instantiate dquot if needed */
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
+ if (!dquot_active(dquot) && !dquot->dq_off) {
ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
/* Write the info if needed */
if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
@@ -477,7 +505,7 @@ int dquot_commit(struct dquot *dquot)
goto out_lock;
/* Inactive dquot can be only if there was error during read/init
* => we have better not writing it */
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+ if (dquot_active(dquot))
ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
else
ret = -EIO;
@@ -538,6 +566,8 @@ static void invalidate_dquots(struct super_block *sb, int type)
struct dquot *dquot, *tmp;
restart:
+ flush_delayed_work(&quota_release_work);
+
spin_lock(&dq_list_lock);
list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
if (dquot->dq_sb != sb)
@@ -546,7 +576,7 @@ restart:
continue;
/* Wait for dquot users */
if (atomic_read(&dquot->dq_count)) {
- dqgrab(dquot);
+ atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
/*
* Once dqput() wakes us up, we know it's time to free
@@ -565,6 +595,15 @@ restart:
goto restart;
}
/*
+ * The last user already dropped its reference but dquot didn't
+ * get fully cleaned up yet. Restart the scan which flushes the
+ * work cleaning up released dquots.
+ */
+ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ goto restart;
+ }
+ /*
* Quota now has no users and it has been written on last
* dqput()
*/
@@ -588,14 +627,13 @@ int dquot_scan_active(struct super_block *sb,
spin_lock(&dq_list_lock);
list_for_each_entry(dquot, &inuse_list, dq_inuse) {
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+ if (!dquot_active(dquot))
continue;
if (dquot->dq_sb != sb)
continue;
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
- dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
old_dquot = dquot;
/*
@@ -604,7 +642,7 @@ int dquot_scan_active(struct super_block *sb,
* outstanding call and recheck the DQ_ACTIVE_B after that.
*/
wait_on_dquot(dquot);
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ if (dquot_active(dquot)) {
ret = fn(dquot, priv);
if (ret < 0)
goto out;
@@ -620,6 +658,18 @@ out:
}
EXPORT_SYMBOL(dquot_scan_active);
+static inline int dquot_write_dquot(struct dquot *dquot)
+{
+ int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't write quota structure "
+ "(error %d). Quota may get out of sync!", ret);
+ /* Clear dirty bit anyway to avoid infinite loop. */
+ clear_dquot_dirty(dquot);
+ }
+ return ret;
+}
+
/* Write all dquot structures to quota files */
int dquot_writeback_dquots(struct super_block *sb, int type)
{
@@ -643,24 +693,23 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
dquot = list_first_entry(&dirty, struct dquot,
dq_dirty);
- WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
+ WARN_ON(!dquot_active(dquot));
+ /* If the dquot is releasing we should not touch it */
+ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ flush_delayed_work(&quota_release_work);
+ spin_lock(&dq_list_lock);
+ continue;
+ }
/* Now we have active dquot from which someone is
* holding reference so we can safely just increase
* use count */
dqgrab(dquot);
spin_unlock(&dq_list_lock);
- dqstats_inc(DQST_LOOKUPS);
- err = sb->dq_op->write_dquot(dquot);
- if (err) {
- /*
- * Clear dirty bit anyway to avoid infinite
- * loop here.
- */
- clear_dquot_dirty(dquot);
- if (!ret)
- ret = err;
- }
+ err = dquot_write_dquot(dquot);
+ if (err && !ret)
+ ret = err;
dqput(dquot);
spin_lock(&dq_list_lock);
}
@@ -754,12 +803,52 @@ static struct shrinker dqcache_shrinker = {
};
/*
+ * Safely release dquot and put reference to dquot.
+ */
+static void quota_release_workfn(struct work_struct *work)
+{
+ struct dquot *dquot;
+ struct list_head rls_head;
+
+ spin_lock(&dq_list_lock);
+ /* Exchange the list head to avoid livelock. */
+ list_replace_init(&releasing_dquots, &rls_head);
+ spin_unlock(&dq_list_lock);
+ synchronize_srcu(&dquot_srcu);
+
+restart:
+ spin_lock(&dq_list_lock);
+ while (!list_empty(&rls_head)) {
+ dquot = list_first_entry(&rls_head, struct dquot, dq_free);
+ WARN_ON_ONCE(atomic_read(&dquot->dq_count));
+ /*
+ * Note that DQ_RELEASING_B protects us from racing with
+ * invalidate_dquots() calls so we are safe to work with the
+ * dquot even after we drop dq_list_lock.
+ */
+ if (dquot_dirty(dquot)) {
+ spin_unlock(&dq_list_lock);
+ /* Commit dquot before releasing */
+ dquot_write_dquot(dquot);
+ goto restart;
+ }
+ if (dquot_active(dquot)) {
+ spin_unlock(&dq_list_lock);
+ dquot->dq_sb->dq_op->release_dquot(dquot);
+ goto restart;
+ }
+ /* Dquot is inactive and clean, now move it to free list */
+ remove_free_dquot(dquot);
+ put_dquot_last(dquot);
+ }
+ spin_unlock(&dq_list_lock);
+}
+
+/*
* Put reference to dquot
*/
void dqput(struct dquot *dquot)
{
- int ret;
-
if (!dquot)
return;
#ifdef CONFIG_QUOTA_DEBUG
@@ -771,7 +860,7 @@ void dqput(struct dquot *dquot)
}
#endif
dqstats_inc(DQST_DROPS);
-we_slept:
+
spin_lock(&dq_list_lock);
if (atomic_read(&dquot->dq_count) > 1) {
/* We have more than one user... nothing to do */
@@ -783,35 +872,16 @@ we_slept:
spin_unlock(&dq_list_lock);
return;
}
+
/* Need to release dquot? */
- if (dquot_dirty(dquot)) {
- spin_unlock(&dq_list_lock);
- /* Commit dquot before releasing */
- ret = dquot->dq_sb->dq_op->write_dquot(dquot);
- if (ret < 0) {
- quota_error(dquot->dq_sb, "Can't write quota structure"
- " (error %d). Quota may get out of sync!",
- ret);
- /*
- * We clear dirty bit anyway, so that we avoid
- * infinite loop here
- */
- clear_dquot_dirty(dquot);
- }
- goto we_slept;
- }
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
- spin_unlock(&dq_list_lock);
- dquot->dq_sb->dq_op->release_dquot(dquot);
- goto we_slept;
- }
- atomic_dec(&dquot->dq_count);
#ifdef CONFIG_QUOTA_DEBUG
/* sanity check */
BUG_ON(!list_empty(&dquot->dq_free));
#endif
- put_dquot_last(dquot);
+ put_releasing_dquots(dquot);
+ atomic_dec(&dquot->dq_count);
spin_unlock(&dq_list_lock);
+ queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
}
EXPORT_SYMBOL(dqput);
@@ -898,10 +968,10 @@ we_slept:
dqstats_inc(DQST_LOOKUPS);
}
/* Wait for dq_lock - after this we know that either dquot_release() is
- * already finished or it will be canceled due to dq_count > 1 test */
+ * already finished or it will be canceled due to dq_count > 0 test */
wait_on_dquot(dquot);
/* Read the dquot / allocate space in quota file */
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ if (!dquot_active(dquot)) {
int err;
err = sb->dq_op->acquire_dquot(dquot);
@@ -927,14 +997,15 @@ out:
}
EXPORT_SYMBOL(dqget);
-static inline struct dquot **i_dquot(struct inode *inode)
+static inline struct dquot __rcu **i_dquot(struct inode *inode)
{
- return inode->i_sb->s_op->get_dquots(inode);
+ /* Force __rcu for now until filesystems are fixed */
+ return (struct dquot __rcu **)inode->i_sb->s_op->get_dquots(inode);
}
static int dqinit_needed(struct inode *inode, int type)
{
- struct dquot * const *dquots;
+ struct dquot __rcu * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
@@ -1007,59 +1078,7 @@ out:
return err;
}
-/*
- * Remove references to dquots from inode and add dquot to list for freeing
- * if we have the last reference to dquot
- */
-static void remove_inode_dquot_ref(struct inode *inode, int type,
- struct list_head *tofree_head)
-{
- struct dquot **dquots = i_dquot(inode);
- struct dquot *dquot = dquots[type];
-
- if (!dquot)
- return;
-
- dquots[type] = NULL;
- if (list_empty(&dquot->dq_free)) {
- /*
- * The inode still has reference to dquot so it can't be in the
- * free list
- */
- spin_lock(&dq_list_lock);
- list_add(&dquot->dq_free, tofree_head);
- spin_unlock(&dq_list_lock);
- } else {
- /*
- * Dquot is already in a list to put so we won't drop the last
- * reference here.
- */
- dqput(dquot);
- }
-}
-
-/*
- * Free list of dquots
- * Dquots are removed from inodes and no new references can be got so we are
- * the only ones holding reference
- */
-static void put_dquot_list(struct list_head *tofree_head)
-{
- struct list_head *act_head;
- struct dquot *dquot;
-
- act_head = tofree_head->next;
- while (act_head != tofree_head) {
- dquot = list_entry(act_head, struct dquot, dq_free);
- act_head = act_head->next;
- /* Remove dquot from the list so we won't have problems... */
- list_del_init(&dquot->dq_free);
- dqput(dquot);
- }
-}
-
-static void remove_dquot_ref(struct super_block *sb, int type,
- struct list_head *tofree_head)
+static void remove_dquot_ref(struct super_block *sb, int type)
{
struct inode *inode;
#ifdef CONFIG_QUOTA_DEBUG
@@ -1076,11 +1095,18 @@ static void remove_dquot_ref(struct super_block *sb, int type,
*/
spin_lock(&dq_data_lock);
if (!IS_NOQUOTA(inode)) {
+ struct dquot __rcu **dquots = i_dquot(inode);
+ struct dquot *dquot = srcu_dereference_check(
+ dquots[type], &dquot_srcu,
+ lockdep_is_held(&dq_data_lock));
+
#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
- remove_inode_dquot_ref(inode, type, tofree_head);
+ rcu_assign_pointer(dquots[type], NULL);
+ if (dquot)
+ dqput(dquot);
}
spin_unlock(&dq_data_lock);
}
@@ -1097,13 +1123,8 @@ static void remove_dquot_ref(struct super_block *sb, int type,
/* Gather all references from inodes and drop them */
static void drop_dquot_ref(struct super_block *sb, int type)
{
- LIST_HEAD(tofree_head);
-
- if (sb->dq_op) {
- remove_dquot_ref(sb, type, &tofree_head);
- synchronize_srcu(&dquot_srcu);
- put_dquot_list(&tofree_head);
- }
+ if (sb->dq_op)
+ remove_dquot_ref(sb, type);
}
static inline
@@ -1418,7 +1439,7 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
return QUOTA_NL_NOWARN;
}
-static int dquot_active(const struct inode *inode)
+static int inode_quota_active(const struct inode *inode)
{
struct super_block *sb = inode->i_sb;
@@ -1436,12 +1457,13 @@ static int dquot_active(const struct inode *inode)
static int __dquot_initialize(struct inode *inode, int type)
{
int cnt, init_needed = 0;
- struct dquot **dquots, *got[MAXQUOTAS] = {};
+ struct dquot __rcu **dquots;
+ struct dquot *got[MAXQUOTAS] = {};
struct super_block *sb = inode->i_sb;
qsize_t rsv;
int ret = 0;
- if (!dquot_active(inode))
+ if (!inode_quota_active(inode))
return 0;
dquots = i_dquot(inode);
@@ -1511,7 +1533,7 @@ static int __dquot_initialize(struct inode *inode, int type)
if (!got[cnt])
continue;
if (!dquots[cnt]) {
- dquots[cnt] = got[cnt];
+ rcu_assign_pointer(dquots[cnt], got[cnt]);
got[cnt] = NULL;
/*
* Make quota reservation system happy if someone
@@ -1519,12 +1541,16 @@ static int __dquot_initialize(struct inode *inode, int type)
*/
rsv = inode_get_rsv_space(inode);
if (unlikely(rsv)) {
+ struct dquot *dquot = srcu_dereference_check(
+ dquots[cnt], &dquot_srcu,
+ lockdep_is_held(&dq_data_lock));
+
spin_lock(&inode->i_lock);
/* Get reservation again under proper lock */
rsv = __inode_get_rsv_space(inode);
- spin_lock(&dquots[cnt]->dq_dqb_lock);
- dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ spin_lock(&dquot->dq_dqb_lock);
+ dquot->dq_dqb.dqb_rsvspace += rsv;
+ spin_unlock(&dquot->dq_dqb_lock);
spin_unlock(&inode->i_lock);
}
}
@@ -1546,10 +1572,10 @@ EXPORT_SYMBOL(dquot_initialize);
bool dquot_initialize_needed(struct inode *inode)
{
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
int i;
- if (!dquot_active(inode))
+ if (!inode_quota_active(inode))
return false;
dquots = i_dquot(inode);
@@ -1571,13 +1597,14 @@ EXPORT_SYMBOL(dquot_initialize_needed);
static void __dquot_drop(struct inode *inode)
{
int cnt;
- struct dquot **dquots = i_dquot(inode);
+ struct dquot __rcu **dquots = i_dquot(inode);
struct dquot *put[MAXQUOTAS];
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- put[cnt] = dquots[cnt];
- dquots[cnt] = NULL;
+ put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
+ lockdep_is_held(&dq_data_lock));
+ rcu_assign_pointer(dquots[cnt], NULL);
}
spin_unlock(&dq_data_lock);
dqput_all(put);
@@ -1585,7 +1612,7 @@ static void __dquot_drop(struct inode *inode)
void dquot_drop(struct inode *inode)
{
- struct dquot * const *dquots;
+ struct dquot __rcu * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
@@ -1658,9 +1685,10 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
int reserve = flags & DQUOT_SPACE_RESERVE;
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
+ struct dquot *dquot;
- if (!dquot_active(inode)) {
+ if (!inode_quota_active(inode)) {
if (reserve) {
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) += number;
@@ -1678,27 +1706,26 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
if (reserve) {
- ret = dquot_add_space(dquots[cnt], 0, number, flags,
- &warn[cnt]);
+ ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
} else {
- ret = dquot_add_space(dquots[cnt], number, 0, flags,
- &warn[cnt]);
+ ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
}
if (ret) {
/* Back out changes we already did */
for (cnt--; cnt >= 0; cnt--) {
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
- spin_lock(&dquots[cnt]->dq_dqb_lock);
+ spin_lock(&dquot->dq_dqb_lock);
if (reserve)
- dquot_free_reserved_space(dquots[cnt],
- number);
+ dquot_free_reserved_space(dquot, number);
else
- dquot_decr_space(dquots[cnt], number);
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ dquot_decr_space(dquot, number);
+ spin_unlock(&dquot->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
goto out_flush_warn;
@@ -1728,9 +1755,10 @@ int dquot_alloc_inode(struct inode *inode)
{
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot * const *dquots;
+ struct dquot __rcu * const *dquots;
+ struct dquot *dquot;
- if (!dquot_active(inode))
+ if (!inode_quota_active(inode))
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN;
@@ -1739,17 +1767,19 @@ int dquot_alloc_inode(struct inode *inode)
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
- ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
+ ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
if (ret) {
for (cnt--; cnt >= 0; cnt--) {
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
/* Back out changes we already did */
- spin_lock(&dquots[cnt]->dq_dqb_lock);
- dquot_decr_inodes(dquots[cnt], 1);
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ spin_lock(&dquot->dq_dqb_lock);
+ dquot_decr_inodes(dquot, 1);
+ spin_unlock(&dquot->dq_dqb_lock);
}
goto warn_put_all;
}
@@ -1770,10 +1800,11 @@ EXPORT_SYMBOL(dquot_alloc_inode);
*/
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
+ struct dquot *dquot;
int cnt, index;
- if (!dquot_active(inode)) {
+ if (!inode_quota_active(inode)) {
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
__inode_add_bytes(inode, number);
@@ -1786,9 +1817,8 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
spin_lock(&inode->i_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (dquots[cnt]) {
- struct dquot *dquot = dquots[cnt];
-
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (dquot) {
spin_lock(&dquot->dq_dqb_lock);
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
number = dquot->dq_dqb.dqb_rsvspace;
@@ -1812,10 +1842,11 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
*/
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
{
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
+ struct dquot *dquot;
int cnt, index;
- if (!dquot_active(inode)) {
+ if (!inode_quota_active(inode)) {
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) += number;
__inode_sub_bytes(inode, number);
@@ -1828,9 +1859,8 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
spin_lock(&inode->i_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (dquots[cnt]) {
- struct dquot *dquot = dquots[cnt];
-
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (dquot) {
spin_lock(&dquot->dq_dqb_lock);
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
number = dquot->dq_dqb.dqb_curspace;
@@ -1856,10 +1886,11 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
+ struct dquot *dquot;
int reserve = flags & DQUOT_SPACE_RESERVE, index;
- if (!dquot_active(inode)) {
+ if (!inode_quota_active(inode)) {
if (reserve) {
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
@@ -1877,17 +1908,18 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
int wtype;
warn[cnt].w_type = QUOTA_NL_NOWARN;
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
- spin_lock(&dquots[cnt]->dq_dqb_lock);
- wtype = info_bdq_free(dquots[cnt], number);
+ spin_lock(&dquot->dq_dqb_lock);
+ wtype = info_bdq_free(dquot, number);
if (wtype != QUOTA_NL_NOWARN)
- prepare_warning(&warn[cnt], dquots[cnt], wtype);
+ prepare_warning(&warn[cnt], dquot, wtype);
if (reserve)
- dquot_free_reserved_space(dquots[cnt], number);
+ dquot_free_reserved_space(dquot, number);
else
- dquot_decr_space(dquots[cnt], number);
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ dquot_decr_space(dquot, number);
+ spin_unlock(&dquot->dq_dqb_lock);
}
if (reserve)
*inode_reserved_space(inode) -= number;
@@ -1911,10 +1943,11 @@ void dquot_free_inode(struct inode *inode)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot * const *dquots;
+ struct dquot __rcu * const *dquots;
+ struct dquot *dquot;
int index;
- if (!dquot_active(inode))
+ if (!inode_quota_active(inode))
return;
dquots = i_dquot(inode);
@@ -1922,16 +1955,16 @@ void dquot_free_inode(struct inode *inode)
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
int wtype;
-
warn[cnt].w_type = QUOTA_NL_NOWARN;
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
- spin_lock(&dquots[cnt]->dq_dqb_lock);
- wtype = info_idq_free(dquots[cnt], 1);
+ spin_lock(&dquot->dq_dqb_lock);
+ wtype = info_idq_free(dquot, 1);
if (wtype != QUOTA_NL_NOWARN)
- prepare_warning(&warn[cnt], dquots[cnt], wtype);
- dquot_decr_inodes(dquots[cnt], 1);
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ prepare_warning(&warn[cnt], dquot, wtype);
+ dquot_decr_inodes(dquot, 1);
+ spin_unlock(&dquot->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
@@ -1957,8 +1990,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
qsize_t cur_space;
qsize_t rsv_space = 0;
qsize_t inode_usage = 1;
+ struct dquot __rcu **dquots;
struct dquot *transfer_from[MAXQUOTAS] = {};
- int cnt, ret = 0;
+ int cnt, index, ret = 0;
char is_valid[MAXQUOTAS] = {};
struct dquot_warn warn_to[MAXQUOTAS];
struct dquot_warn warn_from_inodes[MAXQUOTAS];
@@ -1989,6 +2023,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
}
cur_space = __inode_get_bytes(inode);
rsv_space = __inode_get_rsv_space(inode);
+ dquots = i_dquot(inode);
/*
* Build the transfer_from list, check limits, and update usage in
* the target structures.
@@ -2003,7 +2038,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
if (!sb_has_quota_active(inode->i_sb, cnt))
continue;
is_valid[cnt] = 1;
- transfer_from[cnt] = i_dquot(inode)[cnt];
+ transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
+ &dquot_srcu, lockdep_is_held(&dq_data_lock));
ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
&warn_to[cnt]);
if (ret)
@@ -2042,13 +2078,21 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
rsv_space);
spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
}
- i_dquot(inode)[cnt] = transfer_to[cnt];
+ rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
}
spin_unlock(&inode->i_lock);
spin_unlock(&dq_data_lock);
- mark_all_dquot_dirty(transfer_from);
- mark_all_dquot_dirty(transfer_to);
+ /*
+ * These arrays are local and we hold dquot references so we don't need
+ * the srcu protection but still take dquot_srcu to avoid warning in
+ * mark_all_dquot_dirty().
+ */
+ index = srcu_read_lock(&dquot_srcu);
+ mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
+ mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
+ srcu_read_unlock(&dquot_srcu, index);
+
flush_warnings(warn_to);
flush_warnings(warn_from_inodes);
flush_warnings(warn_from_space);
@@ -2085,7 +2129,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
struct super_block *sb = inode->i_sb;
int ret;
- if (!dquot_active(inode))
+ if (!inode_quota_active(inode))
return 0;
if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
@@ -2306,28 +2350,76 @@ EXPORT_SYMBOL(dquot_quota_off);
* Turn quotas on on a device
*/
-/*
- * Helper function to turn quotas on when we already have the inode of
- * quota file and no quota information is loaded.
- */
-static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+static int vfs_setup_quota_inode(struct inode *inode, int type)
+{
+ struct super_block *sb = inode->i_sb;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (is_bad_inode(inode))
+ return -EUCLEAN;
+ if (!S_ISREG(inode->i_mode))
+ return -EACCES;
+ if (IS_RDONLY(inode))
+ return -EROFS;
+ if (sb_has_quota_loaded(sb, type))
+ return -EBUSY;
+
+ /*
+ * Quota files should never be encrypted. They should be thought of as
+ * filesystem metadata, not user data. New-style internal quota files
+ * cannot be encrypted by users anyway, but old-style external quota
+ * files could potentially be incorrectly created in an encrypted
+ * directory, hence this explicit check. Some reasons why encrypted
+ * quota files don't work include: (1) some filesystems that support
+ * encryption don't handle it in their quota_read and quota_write, and
+ * (2) cleaning up encrypted quota files at unmount would need special
+ * consideration, as quota files are cleaned up later than user files.
+ */
+ if (IS_ENCRYPTED(inode))
+ return -EINVAL;
+
+ dqopt->files[type] = igrab(inode);
+ if (!dqopt->files[type])
+ return -EIO;
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ /* We don't want quota and atime on quota files (deadlocks
+ * possible) Also nobody should write to the file - we use
+ * special IO operations which ignore the immutable bit. */
+ inode_lock(inode);
+ inode->i_flags |= S_NOQUOTA;
+ inode_unlock(inode);
+ /*
+ * When S_NOQUOTA is set, remove dquot references as no more
+ * references can be added
+ */
+ __dquot_drop(inode);
+ }
+ return 0;
+}
+
+static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct inode *inode = dqopt->files[type];
+
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ inode_lock(inode);
+ inode->i_flags &= ~S_NOQUOTA;
+ inode_unlock(inode);
+ }
+ dqopt->files[type] = NULL;
+ iput(inode);
+}
+
+int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
unsigned int flags)
{
struct quota_format_type *fmt = find_quota_format(format_id);
- struct super_block *sb = inode->i_sb;
struct quota_info *dqopt = sb_dqopt(sb);
int error;
if (!fmt)
return -ESRCH;
- if (!S_ISREG(inode->i_mode)) {
- error = -EACCES;
- goto out_fmt;
- }
- if (IS_RDONLY(inode)) {
- error = -EROFS;
- goto out_fmt;
- }
if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
(type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
error = -EINVAL;
@@ -2359,27 +2451,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
invalidate_bdev(sb->s_bdev);
}
- if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
- /* We don't want quota and atime on quota files (deadlocks
- * possible) Also nobody should write to the file - we use
- * special IO operations which ignore the immutable bit. */
- inode_lock(inode);
- inode->i_flags |= S_NOQUOTA;
- inode_unlock(inode);
- /*
- * When S_NOQUOTA is set, remove dquot references as no more
- * references can be added
- */
- __dquot_drop(inode);
- }
-
- error = -EIO;
- dqopt->files[type] = igrab(inode);
- if (!dqopt->files[type])
- goto out_file_flags;
error = -EINVAL;
if (!fmt->qf_ops->check_quota_file(sb, type))
- goto out_file_init;
+ goto out_fmt;
dqopt->ops[type] = fmt->qf_ops;
dqopt->info[type].dqi_format = fmt;
@@ -2387,7 +2461,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
error = dqopt->ops[type]->read_file_info(sb, type);
if (error < 0)
- goto out_file_init;
+ goto out_fmt;
if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
spin_lock(&dq_data_lock);
dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
@@ -2399,21 +2473,34 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
error = add_dquot_ref(sb, type);
if (error)
- dquot_disable(sb, type, flags);
+ dquot_disable(sb, type,
+ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
return error;
-out_file_init:
- dqopt->files[type] = NULL;
- iput(inode);
-out_file_flags:
- inode_lock(inode);
- inode->i_flags &= ~S_NOQUOTA;
- inode_unlock(inode);
out_fmt:
put_quota_format(fmt);
return error;
}
+EXPORT_SYMBOL(dquot_load_quota_sb);
+
+/*
+ * Helper function to turn quotas on when we already have the inode of
+ * quota file and no quota information is loaded.
+ */
+static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+ unsigned int flags)
+{
+ int err;
+
+ err = vfs_setup_quota_inode(inode, type);
+ if (err < 0)
+ return err;
+ err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
+ if (err < 0)
+ vfs_cleanup_quota_inode(inode->i_sb, type);
+ return err;
+}
/* Reenable quotas on remount RW */
int dquot_resume(struct super_block *sb, int type)
@@ -2514,21 +2601,15 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
struct dentry *dentry;
int error;
- dentry = lookup_one_len_unlocked(qf_name, sb->s_root, strlen(qf_name));
+ dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_negative(dentry)) {
- error = -ENOENT;
- goto out;
- }
-
error = security_quota_on(dentry);
if (!error)
error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-out:
dput(dentry);
return error;
}
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 1a188fbdf34e..07948f6ac84e 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -80,6 +80,35 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
return ret;
}
+static inline int do_check_range(struct super_block *sb, const char *val_name,
+ uint val, uint min_val, uint max_val)
+{
+ if (val < min_val || val > max_val) {
+ quota_error(sb, "Getting %s %u out of range %u-%u",
+ val_name, val, min_val, max_val);
+ return -EUCLEAN;
+ }
+
+ return 0;
+}
+
+static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
+ struct qt_disk_dqdbheader *dh)
+{
+ int err = 0;
+
+ err = do_check_range(info->dqi_sb, "dqdh_next_free",
+ le32_to_cpu(dh->dqdh_next_free), 0,
+ info->dqi_blocks - 1);
+ if (err)
+ return err;
+ err = do_check_range(info->dqi_sb, "dqdh_prev_free",
+ le32_to_cpu(dh->dqdh_prev_free), 0,
+ info->dqi_blocks - 1);
+
+ return err;
+}
+
/* Remove empty block from list and return it */
static int get_free_dqblk(struct qtree_mem_dqinfo *info)
{
@@ -94,6 +123,9 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info)
ret = read_blk(info, blk, buf);
if (ret < 0)
goto out_buf;
+ ret = check_dquot_block_header(info, dh);
+ if (ret)
+ goto out_buf;
info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
}
else {
@@ -241,6 +273,9 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
*err = read_blk(info, blk, buf);
if (*err < 0)
goto out_buf;
+ *err = check_dquot_block_header(info, dh);
+ if (*err)
+ goto out_buf;
} else {
blk = get_free_dqblk(info);
if ((int)blk < 0) {
@@ -433,6 +468,9 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
goto out_buf;
}
dh = (struct qt_disk_dqdbheader *)buf;
+ ret = check_dquot_block_header(info, dh);
+ if (ret)
+ goto out_buf;
le16_add_cpu(&dh->dqdh_entries, -1);
if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
ret = remove_free_dqentry(info, buf, blk);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 09ad022a78a5..b8277a5dfe40 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2327,7 +2327,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
int i, j;
bh = __getblk(dev, block, bufsize);
- if (buffer_uptodate(bh))
+ if (!bh || buffer_uptodate(bh))
return (bh);
if (block + BUFNR > max_block) {
@@ -2337,6 +2337,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
j = 1;
for (i = 1; i < blocks; i++) {
bh = __getblk(dev, block + i, bufsize);
+ if (!bh)
+ break;
if (buffer_uptodate(bh)) {
brelse(bh);
break;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 959a066b7bb0..2843b7cf4d7a 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -695,6 +695,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
out_failed:
reiserfs_write_unlock(dir->i_sb);
+ reiserfs_security_free(&security);
return retval;
}
@@ -778,6 +779,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
out_failed:
reiserfs_write_unlock(dir->i_sb);
+ reiserfs_security_free(&security);
return retval;
}
@@ -876,6 +878,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
retval = journal_end(&th);
out_failed:
reiserfs_write_unlock(dir->i_sb);
+ reiserfs_security_free(&security);
return retval;
}
@@ -1191,6 +1194,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
retval = journal_end(&th);
out_failed:
reiserfs_write_unlock(parent_dir->i_sb);
+ reiserfs_security_free(&security);
return retval;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 913f5af9bf24..0ebb6e684908 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1437,7 +1437,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
unsigned long safe_mask = 0;
unsigned int commit_max_age = (unsigned int)-1;
struct reiserfs_journal *journal = SB_JOURNAL(s);
- char *new_opts;
int err;
char *qf_names[REISERFS_MAXQUOTAS];
unsigned int qfmt = 0;
@@ -1445,10 +1444,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
int i;
#endif
- new_opts = kstrdup(arg, GFP_KERNEL);
- if (arg && !new_opts)
- return -ENOMEM;
-
sync_filesystem(s);
reiserfs_write_lock(s);
@@ -1599,7 +1594,6 @@ out_ok_unlocked:
out_err_unlock:
reiserfs_write_unlock(s);
out_err:
- kfree(new_opts);
return err;
}
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 20be9a0e5870..159af6c26f4b 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -49,6 +49,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
int error;
sec->name = NULL;
+ sec->value = NULL;
/* Don't add selinux attributes on xattrs - they'll never get used */
if (IS_PRIVATE(dir))
@@ -80,11 +81,15 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct reiserfs_security_handle *sec)
{
+ char xattr_name[XATTR_NAME_MAX + 1] = XATTR_SECURITY_PREFIX;
int error;
- if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX))
+
+ if (XATTR_SECURITY_PREFIX_LEN + strlen(sec->name) > XATTR_NAME_MAX)
return -EINVAL;
- error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value,
+ strlcat(xattr_name, sec->name, sizeof(xattr_name));
+
+ error = reiserfs_xattr_set_handle(th, inode, xattr_name, sec->value,
sec->length, XATTR_CREATE);
if (error == -ENODATA || error == -EOPNOTSUPP)
error = 0;
@@ -94,7 +99,6 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
void reiserfs_security_free(struct reiserfs_security_handle *sec)
{
- kfree(sec->name);
kfree(sec->value);
sec->name = NULL;
sec->value = NULL;
diff --git a/fs/select.c b/fs/select.c
index 7716d9d5be1e..f405dc5adf3c 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -475,7 +475,7 @@ static inline void wait_key_set(poll_table *wait, unsigned long in,
wait->_key |= POLLOUT_SET;
}
-static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
+static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
{
ktime_t expire, *to = NULL;
struct poll_wqueues table;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 3e94d181930f..c3415d969ecf 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -248,6 +248,7 @@ static const struct file_operations signalfd_fops = {
.poll = signalfd_poll,
.read = signalfd_read,
.llseek = noop_llseek,
+ .may_pollfree = true,
};
static int do_signalfd4(int ufd, sigset_t *mask, int flags)
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 236664d69141..ccfaf8c81958 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -183,7 +183,7 @@ static inline int squashfs_block_size(__le32 raw)
#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\
sizeof(u64))
/* xattr id lookup table defines */
-#define SQUASHFS_XATTR_BYTES(A) ((A) * sizeof(struct squashfs_xattr_id))
+#define SQUASHFS_XATTR_BYTES(A) (((u64) (A)) * sizeof(struct squashfs_xattr_id))
#define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \
SQUASHFS_METADATA_SIZE)
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 166e98806265..8f9445e290e7 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -63,7 +63,7 @@ struct squashfs_sb_info {
long long bytes_used;
unsigned int inodes;
unsigned int fragments;
- int xattr_ids;
+ unsigned int xattr_ids;
unsigned int ids;
};
#endif
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
index d8a270d3ac4c..f1a463d8bfa0 100644
--- a/fs/squashfs/xattr.h
+++ b/fs/squashfs/xattr.h
@@ -10,12 +10,12 @@
#ifdef CONFIG_SQUASHFS_XATTR
extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
- u64 *, int *);
+ u64 *, unsigned int *);
extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
unsigned int *, unsigned long long *);
#else
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
- u64 start, u64 *xattr_table_start, int *xattr_ids)
+ u64 start, u64 *xattr_table_start, unsigned int *xattr_ids)
{
struct squashfs_xattr_id_table *id_table;
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
index 087cab8c78f4..c8469c656e0d 100644
--- a/fs/squashfs/xattr_id.c
+++ b/fs/squashfs/xattr_id.c
@@ -56,7 +56,7 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
* Read uncompressed xattr id lookup table indexes from disk into memory
*/
__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
- u64 *xattr_table_start, int *xattr_ids)
+ u64 *xattr_table_start, unsigned int *xattr_ids)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
unsigned int len, indexes;
diff --git a/fs/statfs.c b/fs/statfs.c
index 2616424012ea..4960c69cc47c 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -128,6 +128,7 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
if (sizeof(buf) == sizeof(*st))
memcpy(&buf, st, sizeof(*st));
else {
+ memset(&buf, 0, sizeof(buf));
if (sizeof buf.f_blocks == 4) {
if ((st->f_blocks | st->f_bfree | st->f_bavail |
st->f_bsize | st->f_frsize) &
@@ -156,7 +157,6 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
buf.f_namelen = st->f_namelen;
buf.f_frsize = st->f_frsize;
buf.f_flags = st->f_flags;
- memset(buf.f_spare, 0, sizeof(buf.f_spare));
}
if (copy_to_user(p, &buf, sizeof(buf)))
return -EFAULT;
@@ -169,6 +169,7 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
if (sizeof(buf) == sizeof(*st))
memcpy(&buf, st, sizeof(*st));
else {
+ memset(&buf, 0, sizeof(buf));
buf.f_type = st->f_type;
buf.f_bsize = st->f_bsize;
buf.f_blocks = st->f_blocks;
@@ -180,7 +181,6 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
buf.f_namelen = st->f_namelen;
buf.f_frsize = st->f_frsize;
buf.f_flags = st->f_flags;
- memset(buf.f_spare, 0, sizeof(buf.f_spare));
}
if (copy_to_user(p, &buf, sizeof(buf)))
return -EFAULT;
diff --git a/fs/super.c b/fs/super.c
index e255c18fa2c8..47ca7dc0e6c3 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -905,6 +905,7 @@ int reconfigure_super(struct fs_context *fc)
struct super_block *sb = fc->root->d_sb;
int retval;
bool remount_ro = false;
+ bool remount_rw = false;
bool force = fc->sb_flags & SB_FORCE;
if (fc->sb_flags_mask & ~MS_RMT_MASK)
@@ -921,7 +922,7 @@ int reconfigure_super(struct fs_context *fc)
if (!(fc->sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev))
return -EACCES;
#endif
-
+ remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
}
@@ -951,6 +952,14 @@ int reconfigure_super(struct fs_context *fc)
if (retval)
return retval;
}
+ } else if (remount_rw) {
+ /*
+ * We set s_readonly_remount here to protect filesystem's
+ * reconfigure code from writes from userspace until
+ * reconfigure finishes.
+ */
+ sb->s_readonly_remount = 1;
+ smp_wmb();
}
if (fc->ops->reconfigure) {
diff --git a/fs/sync.c b/fs/sync.c
index 67c66358f3fe..034ecb4ac3e3 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -77,7 +77,8 @@ static void sync_inodes_one_sb(struct super_block *sb, void *arg)
static void sync_fs_one_sb(struct super_block *sb, void *arg)
{
- if (!sb_rdonly(sb) && sb->s_op->sync_fs)
+ if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
+ sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, *(int *)arg);
}
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index bcb67b0cabe7..e3d1673b8ec9 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -145,6 +145,10 @@ static int alloc_branch(struct inode *inode,
*/
parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key);
bh = sb_getblk(inode->i_sb, parent);
+ if (!bh) {
+ sysv_free_block(inode->i_sb, branch[n].key);
+ break;
+ }
lock_buffer(bh);
memset(bh->b_data, 0, blocksize);
branch[n].bh = bh;
@@ -438,7 +442,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size)
res += blocks;
direct = 1;
}
- return blocks;
+ return res;
}
int sysv_getattr(const struct path *path, struct kstat *stat,
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 7878f145bf1b..90ce45c8ecac 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -139,6 +139,8 @@ struct tracefs_mount_opts {
kuid_t uid;
kgid_t gid;
umode_t mode;
+ /* Opt_* bitfield. */
+ unsigned int opts;
};
enum {
@@ -239,6 +241,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
kgid_t gid;
char *p;
+ opts->opts = 0;
opts->mode = TRACEFS_DEFAULT_MODE;
while ((p = strsep(&data, ",")) != NULL) {
@@ -273,24 +276,36 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
* but traditionally tracefs has ignored all mount options
*/
}
+
+ opts->opts |= BIT(token);
}
return 0;
}
-static int tracefs_apply_options(struct super_block *sb)
+static int tracefs_apply_options(struct super_block *sb, bool remount)
{
struct tracefs_fs_info *fsi = sb->s_fs_info;
struct inode *inode = sb->s_root->d_inode;
struct tracefs_mount_opts *opts = &fsi->mount_opts;
- inode->i_mode &= ~S_IALLUGO;
- inode->i_mode |= opts->mode;
+ /*
+ * On remount, only reset mode/uid/gid if they were provided as mount
+ * options.
+ */
- inode->i_uid = opts->uid;
+ if (!remount || opts->opts & BIT(Opt_mode)) {
+ inode->i_mode &= ~S_IALLUGO;
+ inode->i_mode |= opts->mode;
+ }
- /* Set all the group ids to the mount option */
- set_gid(sb->s_root, opts->gid);
+ if (!remount || opts->opts & BIT(Opt_uid))
+ inode->i_uid = opts->uid;
+
+ if (!remount || opts->opts & BIT(Opt_gid)) {
+ /* Set all the group ids to the mount option */
+ set_gid(sb->s_root, opts->gid);
+ }
return 0;
}
@@ -305,7 +320,7 @@ static int tracefs_remount(struct super_block *sb, int *flags, char *data)
if (err)
goto fail;
- tracefs_apply_options(sb);
+ tracefs_apply_options(sb, true);
fail:
return err;
@@ -357,7 +372,7 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &tracefs_super_operations;
- tracefs_apply_options(sb);
+ tracefs_apply_options(sb, false);
return 0;
@@ -536,6 +551,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
*/
struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
{
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
return __create_dir(name, parent, &simple_dir_inode_operations);
}
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index c0b84e960b20..9cb05ef9b9dd 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -212,11 +212,10 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs)
subtract_lebs += 1;
/*
- * The GC journal head LEB is not really accessible. And since
- * different write types go to different heads, we may count only on
- * one head's space.
+ * Since different write types go to different heads, we should
+ * reserve one leb for each head.
*/
- subtract_lebs += c->jhead_cnt - 1;
+ subtract_lebs += c->jhead_cnt;
/* We also reserve one LEB for deletions, which bypass budgeting */
subtract_lebs += 1;
@@ -403,7 +402,7 @@ static int calc_dd_growth(const struct ubifs_info *c,
dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
if (req->dirtied_ino)
- dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
+ dd_growth += c->bi.inode_budget * req->dirtied_ino;
if (req->mod_dent)
dd_growth += c->bi.dent_budget;
dd_growth += req->dirtied_ino_d;
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index ad292c5a43a9..b5cdac9b0368 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -552,11 +552,11 @@ out:
*/
int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
{
- int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt;
+ int lnum, offs, len, err = 0, last_level, child_cnt;
int first = 1, iip;
struct ubifs_debug_info *d = c->dbg;
- union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key;
- unsigned long long uninitialized_var(last_sqnum);
+ union ubifs_key lower_key, upper_key, l_key, u_key;
+ unsigned long long last_sqnum;
struct ubifs_idx_node *idx;
struct list_head list;
struct idx_node *i;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 83a173feb698..c63baff39ba9 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -433,6 +433,7 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
mutex_unlock(&dir_ui->ui_mutex);
ubifs_release_budget(c, &req);
+ fscrypt_free_filename(&nm);
return 0;
@@ -1125,7 +1126,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
int err, sz_change, len = strlen(symname);
struct fscrypt_str disk_link;
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
- .new_ino_d = ALIGN(len, 8),
.dirtied_ino = 1 };
struct fscrypt_name nm;
@@ -1141,6 +1141,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
* Budget request settings: new inode, new direntry and changing parent
* directory inode.
*/
+ req.new_ino_d = ALIGN(disk_link.len - 1, 8);
err = ubifs_budget_space(c, &req);
if (err)
return err;
@@ -1205,6 +1206,8 @@ out_cancel:
dir_ui->ui_size = dir->i_size;
mutex_unlock(&dir_ui->ui_mutex);
out_inode:
+ /* Free inode->i_link before inode is marked as bad. */
+ fscrypt_free_inode(inode);
make_bad_inode(inode);
iput(inode);
out_fname:
@@ -1277,7 +1280,7 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
.dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
struct timespec64 time;
- unsigned int uninitialized_var(saved_nlink);
+ unsigned int saved_nlink;
struct fscrypt_name old_nm, new_nm;
/*
@@ -1296,6 +1299,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlink) {
ubifs_assert(c, inode_is_locked(new_inode));
+ /* Budget for old inode's data when its nlink > 1. */
+ req.dirtied_ino_d = ALIGN(ubifs_inode(new_inode)->data_len, 8);
err = ubifs_purge_xattrs(new_inode);
if (err)
return err;
@@ -1538,6 +1543,10 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
+ err = ubifs_budget_space(c, &req);
+ if (err)
+ goto out;
+
lock_4_inodes(old_dir, new_dir, NULL, NULL);
time = current_time(old_dir);
@@ -1563,6 +1572,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
unlock_4_inodes(old_dir, new_dir, NULL, NULL);
ubifs_release_budget(c, &req);
+out:
fscrypt_free_filename(&fst_nm);
fscrypt_free_filename(&snd_nm);
return err;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 6069c63d833a..4d3a5cb6e9b0 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -222,7 +222,7 @@ static int write_begin_slow(struct address_space *mapping,
struct ubifs_info *c = inode->i_sb->s_fs_info;
pgoff_t index = pos >> PAGE_SHIFT;
struct ubifs_budget_req req = { .new_page = 1 };
- int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
+ int err, appending = !!(pos + len > inode->i_size);
struct page *page;
dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
@@ -426,7 +426,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
pgoff_t index = pos >> PAGE_SHIFT;
- int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
+ int err, appending = !!(pos + len > inode->i_size);
int skipped_read = 0;
struct page *page;
@@ -1031,7 +1031,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
if (page->index >= synced_i_size >> PAGE_SHIFT) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
- goto out_unlock;
+ goto out_redirty;
/*
* The inode has been written, but the write-buffer has
* not been synchronized, so in case of an unclean
@@ -1059,11 +1059,17 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
if (i_size > synced_i_size) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
- goto out_unlock;
+ goto out_redirty;
}
return do_writepage(page, len);
-
+out_redirty:
+ /*
+ * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
+ * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
+ * there is no need to do space budget for dirty inode.
+ */
+ redirty_page_for_writepage(wbc, page);
out_unlock:
unlock_page(page);
return err;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index f78c3e3ef931..99f391293c5e 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -1225,7 +1225,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
int last_reference = !!(new_inode && new_inode->i_nlink == 0);
int move = (old_dir != new_dir);
- struct ubifs_inode *uninitialized_var(new_ui);
+ struct ubifs_inode *new_ui;
u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
@@ -1511,7 +1511,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
union ubifs_key key, to_key;
struct ubifs_ino_node *ino;
struct ubifs_trun_node *trun;
- struct ubifs_data_node *uninitialized_var(dn);
+ struct ubifs_data_node *dn;
int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
struct ubifs_inode *ui = ubifs_inode(inode);
ino_t inum = inode->i_ino;
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index e21abf250951..6e0a153b7194 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -275,7 +275,7 @@ uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos,
const int k = 32 - nrbits;
uint8_t *p = *addr;
int b = *pos;
- uint32_t uninitialized_var(val);
+ uint32_t val;
const int bytes = (nrbits + b + 7) >> 3;
ubifs_assert(c, nrbits > 0);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index b37c6b1e3325..29526040ad77 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -815,7 +815,7 @@ static int alloc_wbufs(struct ubifs_info *c)
INIT_LIST_HEAD(&c->jheads[i].buds_list);
err = ubifs_wbuf_init(c, &c->jheads[i].wbuf);
if (err)
- return err;
+ goto out_wbuf;
c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
c->jheads[i].wbuf.jhead = i;
@@ -823,7 +823,7 @@ static int alloc_wbufs(struct ubifs_info *c)
c->jheads[i].log_hash = ubifs_hash_get_desc(c);
if (IS_ERR(c->jheads[i].log_hash)) {
err = PTR_ERR(c->jheads[i].log_hash);
- goto out;
+ goto out_log_hash;
}
}
@@ -836,9 +836,18 @@ static int alloc_wbufs(struct ubifs_info *c)
return 0;
-out:
- while (i--)
+out_log_hash:
+ kfree(c->jheads[i].wbuf.buf);
+ kfree(c->jheads[i].wbuf.inodes);
+
+out_wbuf:
+ while (i--) {
+ kfree(c->jheads[i].wbuf.buf);
+ kfree(c->jheads[i].wbuf.inodes);
kfree(c->jheads[i].log_hash);
+ }
+ kfree(c->jheads);
+ c->jheads = NULL;
return err;
}
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 33742ee3945b..c8b99a86b63f 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -44,6 +44,33 @@ enum {
NOT_ON_MEDIA = 3,
};
+static void do_insert_old_idx(struct ubifs_info *c,
+ struct ubifs_old_idx *old_idx)
+{
+ struct ubifs_old_idx *o;
+ struct rb_node **p, *parent = NULL;
+
+ p = &c->old_idx.rb_node;
+ while (*p) {
+ parent = *p;
+ o = rb_entry(parent, struct ubifs_old_idx, rb);
+ if (old_idx->lnum < o->lnum)
+ p = &(*p)->rb_left;
+ else if (old_idx->lnum > o->lnum)
+ p = &(*p)->rb_right;
+ else if (old_idx->offs < o->offs)
+ p = &(*p)->rb_left;
+ else if (old_idx->offs > o->offs)
+ p = &(*p)->rb_right;
+ else {
+ ubifs_err(c, "old idx added twice!");
+ kfree(old_idx);
+ }
+ }
+ rb_link_node(&old_idx->rb, parent, p);
+ rb_insert_color(&old_idx->rb, &c->old_idx);
+}
+
/**
* insert_old_idx - record an index node obsoleted since the last commit start.
* @c: UBIFS file-system description object
@@ -69,35 +96,15 @@ enum {
*/
static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
{
- struct ubifs_old_idx *old_idx, *o;
- struct rb_node **p, *parent = NULL;
+ struct ubifs_old_idx *old_idx;
old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
if (unlikely(!old_idx))
return -ENOMEM;
old_idx->lnum = lnum;
old_idx->offs = offs;
+ do_insert_old_idx(c, old_idx);
- p = &c->old_idx.rb_node;
- while (*p) {
- parent = *p;
- o = rb_entry(parent, struct ubifs_old_idx, rb);
- if (lnum < o->lnum)
- p = &(*p)->rb_left;
- else if (lnum > o->lnum)
- p = &(*p)->rb_right;
- else if (offs < o->offs)
- p = &(*p)->rb_left;
- else if (offs > o->offs)
- p = &(*p)->rb_right;
- else {
- ubifs_err(c, "old idx added twice!");
- kfree(old_idx);
- return 0;
- }
- }
- rb_link_node(&old_idx->rb, parent, p);
- rb_insert_color(&old_idx->rb, &c->old_idx);
return 0;
}
@@ -199,23 +206,6 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c,
__set_bit(DIRTY_ZNODE, &zn->flags);
__clear_bit(COW_ZNODE, &zn->flags);
- ubifs_assert(c, !ubifs_zn_obsolete(znode));
- __set_bit(OBSOLETE_ZNODE, &znode->flags);
-
- if (znode->level != 0) {
- int i;
- const int n = zn->child_cnt;
-
- /* The children now have new parent */
- for (i = 0; i < n; i++) {
- struct ubifs_zbranch *zbr = &zn->zbranch[i];
-
- if (zbr->znode)
- zbr->znode->parent = zn;
- }
- }
-
- atomic_long_inc(&c->dirty_zn_cnt);
return zn;
}
@@ -234,6 +224,42 @@ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
}
/**
+ * replace_znode - replace old znode with new znode.
+ * @c: UBIFS file-system description object
+ * @new_zn: new znode
+ * @old_zn: old znode
+ * @zbr: the branch of parent znode
+ *
+ * Replace old znode with new znode in TNC.
+ */
+static void replace_znode(struct ubifs_info *c, struct ubifs_znode *new_zn,
+ struct ubifs_znode *old_zn, struct ubifs_zbranch *zbr)
+{
+ ubifs_assert(c, !ubifs_zn_obsolete(old_zn));
+ __set_bit(OBSOLETE_ZNODE, &old_zn->flags);
+
+ if (old_zn->level != 0) {
+ int i;
+ const int n = new_zn->child_cnt;
+
+ /* The children now have new parent */
+ for (i = 0; i < n; i++) {
+ struct ubifs_zbranch *child = &new_zn->zbranch[i];
+
+ if (child->znode)
+ child->znode->parent = new_zn;
+ }
+ }
+
+ zbr->znode = new_zn;
+ zbr->lnum = 0;
+ zbr->offs = 0;
+ zbr->len = 0;
+
+ atomic_long_inc(&c->dirty_zn_cnt);
+}
+
+/**
* dirty_cow_znode - ensure a znode is not being committed.
* @c: UBIFS file-system description object
* @zbr: branch of znode to check
@@ -265,21 +291,32 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
return zn;
if (zbr->len) {
- err = insert_old_idx(c, zbr->lnum, zbr->offs);
- if (unlikely(err))
- return ERR_PTR(err);
+ struct ubifs_old_idx *old_idx;
+
+ old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
+ if (unlikely(!old_idx)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ old_idx->lnum = zbr->lnum;
+ old_idx->offs = zbr->offs;
+
err = add_idx_dirt(c, zbr->lnum, zbr->len);
- } else
- err = 0;
+ if (err) {
+ kfree(old_idx);
+ goto out;
+ }
- zbr->znode = zn;
- zbr->lnum = 0;
- zbr->offs = 0;
- zbr->len = 0;
+ do_insert_old_idx(c, old_idx);
+ }
+
+ replace_znode(c, zn, znode, zbr);
- if (unlikely(err))
- return ERR_PTR(err);
return zn;
+
+out:
+ kfree(zn);
+ return ERR_PTR(err);
}
/**
@@ -892,7 +929,7 @@ static int fallible_resolve_collision(struct ubifs_info *c,
int adding)
{
struct ubifs_znode *o_znode = NULL, *znode = *zn;
- int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
+ int o_n, err, cmp, unsure = 0, nn = *n;
cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
if (unlikely(cmp < 0))
@@ -1514,8 +1551,8 @@ out:
*/
int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
{
- int n, err = 0, lnum = -1, uninitialized_var(offs);
- int uninitialized_var(len);
+ int n, err = 0, lnum = -1, offs;
+ int len;
unsigned int block = key_block(c, &bu->key);
struct ubifs_znode *znode;
@@ -3054,6 +3091,21 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
cnext = cnext->cnext;
if (ubifs_zn_obsolete(znode))
kfree(znode);
+ else if (!ubifs_zn_cow(znode)) {
+ /*
+ * Don't forget to update clean znode count after
+ * committing failed, because ubifs will check this
+ * count while closing tnc. Non-obsolete znode could
+ * be re-dirtied during committing process, so dirty
+ * flag is untrustable. The flag 'COW_ZNODE' is set
+ * for each dirty znode before committing, and it is
+ * cleared as long as the znode become clean, so we
+ * can statistic clean znode count according to this
+ * flag.
+ */
+ atomic_long_inc(&c->clean_zn_cnt);
+ atomic_long_inc(&ubifs_clean_zn_cnt);
+ }
} while (cnext && cnext != c->cnext);
}
diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c
index 49cb34c3f324..ccaf94ea5be3 100644
--- a/fs/ubifs/tnc_misc.c
+++ b/fs/ubifs/tnc_misc.c
@@ -126,8 +126,8 @@ int ubifs_search_zbranch(const struct ubifs_info *c,
const struct ubifs_znode *znode,
const union ubifs_key *key, int *n)
{
- int beg = 0, end = znode->child_cnt, uninitialized_var(mid);
- int uninitialized_var(cmp);
+ int beg = 0, end = znode->child_cnt, mid;
+ int cmp;
const struct ubifs_zbranch *zbr = &znode->zbranch[0];
ubifs_assert(c, end > beg);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index b3b7e3576e98..85e84138649b 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1593,8 +1593,13 @@ static inline int ubifs_check_hmac(const struct ubifs_info *c,
return crypto_memneq(expected, got, c->hmac_desc_len);
}
+#ifdef CONFIG_UBIFS_FS_AUTHENTICATION
void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
const u8 *hash, int lnum, int offs);
+#else
+static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
+ const u8 *hash, int lnum, int offs) {};
+#endif
int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf,
const u8 *expected);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 02f03fadb75b..f416b7fe092f 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -36,18 +36,41 @@ static int read_block_bitmap(struct super_block *sb,
unsigned long bitmap_nr)
{
struct buffer_head *bh = NULL;
- int retval = 0;
+ int i;
+ int max_bits, off, count;
struct kernel_lb_addr loc;
loc.logicalBlockNum = bitmap->s_extPosition;
loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
+ bitmap->s_block_bitmap[bitmap_nr] = bh;
if (!bh)
- retval = -EIO;
+ return -EIO;
- bitmap->s_block_bitmap[bitmap_nr] = bh;
- return retval;
+ /* Check consistency of Space Bitmap buffer. */
+ max_bits = sb->s_blocksize * 8;
+ if (!bitmap_nr) {
+ off = sizeof(struct spaceBitmapDesc) << 3;
+ count = min(max_bits - off, bitmap->s_nr_groups);
+ } else {
+ /*
+ * Rough check if bitmap number is too big to have any bitmap
+ * blocks reserved.
+ */
+ if (bitmap_nr >
+ (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
+ return 0;
+ off = 0;
+ count = bitmap->s_nr_groups - bitmap_nr * max_bits +
+ (sizeof(struct spaceBitmapDesc) << 3);
+ count = min(count, max_bits);
+ }
+
+ for (i = 0; i < count; i++)
+ if (udf_test_bit(i + off, bh->b_data))
+ return -EFSCORRUPTED;
+ return 0;
}
static int __load_block_bitmap(struct super_block *sb,
@@ -564,7 +587,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
udf_pblk_t newblock = 0;
uint32_t adsize;
uint32_t elen, goal_elen = 0;
- struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
+ struct kernel_lb_addr eloc, goal_eloc;
struct extent_position epos, goal_epos;
int8_t etype;
struct udf_inode_info *iinfo = UDF_I(table);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 628941a6b79a..3b3729771915 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -148,26 +148,24 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
down_write(&iinfo->i_data_sem);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
- loff_t end = iocb->ki_pos + iov_iter_count(from);
-
- if (inode->i_sb->s_blocksize <
- (udf_file_entry_alloc_offset(inode) + end)) {
- err = udf_expand_file_adinicb(inode);
- if (err) {
- inode_unlock(inode);
- udf_debug("udf_expand_adinicb: err=%d\n", err);
- return err;
- }
- } else {
- iinfo->i_lenAlloc = max(end, inode->i_size);
- up_write(&iinfo->i_data_sem);
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
+ inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
+ iocb->ki_pos + iov_iter_count(from))) {
+ err = udf_expand_file_adinicb(inode);
+ if (err) {
+ inode_unlock(inode);
+ udf_debug("udf_expand_adinicb: err=%d\n", err);
+ return err;
}
} else
up_write(&iinfo->i_data_sem);
retval = __generic_file_write_iter(iocb, from);
out:
+ down_write(&iinfo->i_data_sem);
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0)
+ iinfo->i_lenAlloc = inode->i_size;
+ up_write(&iinfo->i_data_sem);
inode_unlock(inode);
if (retval > 0) {
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 639aabf30eaf..fef6e5e06e3f 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -57,15 +57,15 @@ static int udf_update_inode(struct inode *, int);
static int udf_sync_inode(struct inode *inode);
static int udf_alloc_i_data(struct inode *inode, size_t size);
static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
-static int8_t udf_insert_aext(struct inode *, struct extent_position,
- struct kernel_lb_addr, uint32_t);
+static int udf_insert_aext(struct inode *, struct extent_position,
+ struct kernel_lb_addr, uint32_t);
static void udf_split_extents(struct inode *, int *, int, udf_pblk_t,
struct kernel_long_ad *, int *);
static void udf_prealloc_extents(struct inode *, int, int,
struct kernel_long_ad *, int *);
static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *);
-static void udf_update_extents(struct inode *, struct kernel_long_ad *, int,
- int, struct extent_position *);
+static int udf_update_extents(struct inode *, struct kernel_long_ad *, int,
+ int, struct extent_position *);
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
static void __udf_clear_extent_cache(struct inode *inode)
@@ -441,6 +441,12 @@ static int udf_get_block(struct inode *inode, sector_t block,
iinfo->i_next_alloc_goal++;
}
+ /*
+ * Block beyond EOF and prealloc extents? Just discard preallocation
+ * as it is not useful and complicates things.
+ */
+ if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
+ udf_discard_prealloc(inode);
udf_clear_extent_cache(inode);
phys = inode_getblk(inode, block, &err, &new);
if (!phys)
@@ -490,8 +496,6 @@ static int udf_do_extend_file(struct inode *inode,
uint32_t add;
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
struct super_block *sb = inode->i_sb;
- struct kernel_lb_addr prealloc_loc = {};
- uint32_t prealloc_len = 0;
struct udf_inode_info *iinfo;
int err;
@@ -512,19 +516,6 @@ static int udf_do_extend_file(struct inode *inode,
~(sb->s_blocksize - 1);
}
- /* Last extent are just preallocated blocks? */
- if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
- EXT_NOT_RECORDED_ALLOCATED) {
- /* Save the extent so that we can reattach it to the end */
- prealloc_loc = last_ext->extLocation;
- prealloc_len = last_ext->extLength;
- /* Mark the extent as a hole */
- last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
- (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
- last_ext->extLocation.logicalBlockNum = 0;
- last_ext->extLocation.partitionReferenceNum = 0;
- }
-
/* Can we merge with the previous extent? */
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
EXT_NOT_RECORDED_NOT_ALLOCATED) {
@@ -537,8 +528,10 @@ static int udf_do_extend_file(struct inode *inode,
}
if (fake) {
- udf_add_aext(inode, last_pos, &last_ext->extLocation,
- last_ext->extLength, 1);
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err < 0)
+ goto out_err;
count++;
} else {
struct kernel_lb_addr tmploc;
@@ -552,7 +545,7 @@ static int udf_do_extend_file(struct inode *inode,
* more extents, we may need to enter possible following
* empty indirect extent.
*/
- if (new_block_bytes || prealloc_len)
+ if (new_block_bytes)
udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
}
@@ -572,7 +565,7 @@ static int udf_do_extend_file(struct inode *inode,
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
if (err)
- return err;
+ goto out_err;
count++;
}
if (new_block_bytes) {
@@ -581,22 +574,11 @@ static int udf_do_extend_file(struct inode *inode,
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
if (err)
- return err;
+ goto out_err;
count++;
}
out:
- /* Do we have some preallocated blocks saved? */
- if (prealloc_len) {
- err = udf_add_aext(inode, last_pos, &prealloc_loc,
- prealloc_len, 1);
- if (err)
- return err;
- last_ext->extLocation = prealloc_loc;
- last_ext->extLength = prealloc_len;
- count++;
- }
-
/* last_pos should point to the last written extent... */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
last_pos->offset -= sizeof(struct short_ad);
@@ -606,19 +588,28 @@ out:
return -EIO;
return count;
+out_err:
+ /* Remove extents we've created so far */
+ udf_clear_extent_cache(inode);
+ udf_truncate_extents(inode);
+ return err;
}
/* Extend the final block of the file to final_block_len bytes */
static void udf_do_extend_final_block(struct inode *inode,
struct extent_position *last_pos,
struct kernel_long_ad *last_ext,
- uint32_t final_block_len)
+ uint32_t new_elen)
{
- struct super_block *sb = inode->i_sb;
uint32_t added_bytes;
- added_bytes = final_block_len -
- (last_ext->extLength & (sb->s_blocksize - 1));
+ /*
+ * Extent already large enough? It may be already rounded up to block
+ * size...
+ */
+ if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
+ return;
+ added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
last_ext->extLength += added_bytes;
UDF_I(inode)->i_lenExtents += added_bytes;
@@ -635,12 +626,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
int8_t etype;
struct super_block *sb = inode->i_sb;
sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
- unsigned long partial_final_block;
+ loff_t new_elen;
int adsize;
struct udf_inode_info *iinfo = UDF_I(inode);
struct kernel_long_ad extent;
int err = 0;
- int within_final_block;
+ bool within_last_ext;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
@@ -649,8 +640,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
else
BUG();
+ /*
+ * When creating hole in file, just don't bother with preserving
+ * preallocation. It likely won't be very useful anyway.
+ */
+ udf_discard_prealloc(inode);
+
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
- within_final_block = (etype != -1);
+ within_last_ext = (etype != -1);
+ /* We don't expect extents past EOF... */
+ WARN_ON_ONCE(within_last_ext &&
+ elen > ((loff_t)offset + 1) << inode->i_blkbits);
if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
(epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
@@ -666,19 +666,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
extent.extLength |= etype << 30;
}
- partial_final_block = newsize & (sb->s_blocksize - 1);
+ new_elen = ((loff_t)offset << inode->i_blkbits) |
+ (newsize & (sb->s_blocksize - 1));
/* File has extent covering the new size (could happen when extending
* inside a block)?
*/
- if (within_final_block) {
+ if (within_last_ext) {
/* Extending file within the last file block */
- udf_do_extend_final_block(inode, &epos, &extent,
- partial_final_block);
+ udf_do_extend_final_block(inode, &epos, &extent, new_elen);
} else {
- loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
- partial_final_block;
- err = udf_do_extend_file(inode, &epos, &extent, add);
+ err = udf_do_extend_file(inode, &epos, &extent, new_elen);
}
if (err < 0)
@@ -700,7 +698,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
struct kernel_lb_addr eloc, tmpeloc;
int c = 1;
loff_t lbcount = 0, b_off = 0;
- udf_pblk_t newblocknum, newblock;
+ udf_pblk_t newblocknum, newblock = 0;
sector_t offset = 0;
int8_t etype;
struct udf_inode_info *iinfo = UDF_I(inode);
@@ -779,10 +777,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
goto out_free;
}
- /* Are we beyond EOF? */
+ /* Are we beyond EOF and preallocated extent? */
if (etype == -1) {
int ret;
loff_t hole_len;
+
isBeyondEOF = true;
if (count) {
if (c)
@@ -802,25 +801,22 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
if (ret < 0) {
*err = ret;
- newblock = 0;
goto out_free;
}
c = 0;
offset = 0;
count += ret;
- /* We are not covered by a preallocated extent? */
- if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
- EXT_NOT_RECORDED_ALLOCATED) {
- /* Is there any real extent? - otherwise we overwrite
- * the fake one... */
- if (count)
- c = !c;
- laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
- inode->i_sb->s_blocksize;
- memset(&laarr[c].extLocation, 0x00,
- sizeof(struct kernel_lb_addr));
- count++;
- }
+ /*
+ * Is there any real extent? - otherwise we overwrite the fake
+ * one...
+ */
+ if (count)
+ c = !c;
+ laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+ inode->i_sb->s_blocksize;
+ memset(&laarr[c].extLocation, 0x00,
+ sizeof(struct kernel_lb_addr));
+ count++;
endnum = c + 1;
lastblock = 1;
} else {
@@ -867,7 +863,6 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
goal, err);
if (!newblocknum) {
*err = -ENOSPC;
- newblock = 0;
goto out_free;
}
if (isBeyondEOF)
@@ -893,7 +888,9 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
/* write back the new extents, inserting new extents if the new number
* of extents is greater than the old number, and deleting extents if
* the new number of extents is less than the old number */
- udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
+ *err = udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
+ if (*err < 0)
+ goto out_free;
newblock = udf_get_pblock(inode->i_sb, newblocknum,
iinfo->i_location.partitionReferenceNum, 0);
@@ -1097,23 +1094,8 @@ static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
blocksize - 1) >> blocksize_bits)))) {
if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
- (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
- blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
- lip1->extLength = (lip1->extLength -
- (li->extLength &
- UDF_EXTENT_LENGTH_MASK) +
- UDF_EXTENT_LENGTH_MASK) &
- ~(blocksize - 1);
- li->extLength = (li->extLength &
- UDF_EXTENT_FLAG_MASK) +
- (UDF_EXTENT_LENGTH_MASK + 1) -
- blocksize;
- lip1->extLocation.logicalBlockNum =
- li->extLocation.logicalBlockNum +
- ((li->extLength &
- UDF_EXTENT_LENGTH_MASK) >>
- blocksize_bits);
- } else {
+ (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
+ blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) {
li->extLength = lip1->extLength +
(((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
@@ -1176,21 +1158,30 @@ static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
}
}
-static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
- int startnum, int endnum,
- struct extent_position *epos)
+static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
+ int startnum, int endnum,
+ struct extent_position *epos)
{
int start = 0, i;
struct kernel_lb_addr tmploc;
uint32_t tmplen;
+ int err;
if (startnum > endnum) {
for (i = 0; i < (startnum - endnum); i++)
udf_delete_aext(inode, *epos);
} else if (startnum < endnum) {
for (i = 0; i < (endnum - startnum); i++) {
- udf_insert_aext(inode, *epos, laarr[i].extLocation,
- laarr[i].extLength);
+ err = udf_insert_aext(inode, *epos,
+ laarr[i].extLocation,
+ laarr[i].extLength);
+ /*
+ * If we fail here, we are likely corrupting the extent
+ * list and leaking blocks. At least stop early to
+ * limit the damage.
+ */
+ if (err < 0)
+ return err;
udf_next_aext(inode, epos, &laarr[i].extLocation,
&laarr[i].extLength, 1);
start++;
@@ -1202,6 +1193,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
udf_write_aext(inode, epos, &laarr[i].extLocation,
laarr[i].extLength, 1);
}
+ return 0;
}
struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
@@ -1404,6 +1396,7 @@ reread:
ret = -EIO;
goto out;
}
+ iinfo->i_hidden = hidden_inode;
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
@@ -1739,8 +1732,12 @@ static int udf_update_inode(struct inode *inode, int do_sync)
if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
- else
- fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
+ else {
+ if (iinfo->i_hidden)
+ fe->fileLinkCount = cpu_to_le16(0);
+ else
+ fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
+ }
fe->informationLength = cpu_to_le64(inode->i_size);
@@ -1911,8 +1908,13 @@ struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
if (!inode)
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+ if (!(inode->i_state & I_NEW)) {
+ if (UDF_I(inode)->i_hidden != hidden_inode) {
+ iput(inode);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
return inode;
+ }
memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
err = udf_read_inode(inode, hidden_inode);
@@ -2226,12 +2228,13 @@ int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
return etype;
}
-static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
- struct kernel_lb_addr neloc, uint32_t nelen)
+static int udf_insert_aext(struct inode *inode, struct extent_position epos,
+ struct kernel_lb_addr neloc, uint32_t nelen)
{
struct kernel_lb_addr oeloc;
uint32_t oelen;
int8_t etype;
+ int err;
if (epos.bh)
get_bh(epos.bh);
@@ -2241,10 +2244,10 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
neloc = oeloc;
nelen = (etype << 30) | oelen;
}
- udf_add_aext(inode, &epos, &neloc, nelen, 1);
+ err = udf_add_aext(inode, &epos, &neloc, nelen, 1);
brelse(epos.bh);
- return (nelen >> 30);
+ return err;
}
int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 3c009562375d..c062b41a1e70 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -241,7 +241,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
poffset - lfi);
else {
if (!copy_name) {
- copy_name = kmalloc(UDF_NAME_LEN,
+ copy_name = kmalloc(UDF_NAME_LEN_CS0,
GFP_NOFS);
if (!copy_name) {
fi = ERR_PTR(-ENOMEM);
@@ -1091,8 +1091,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
return -EINVAL;
ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
- if (IS_ERR(ofi)) {
- retval = PTR_ERR(ofi);
+ if (!ofi || IS_ERR(ofi)) {
+ if (IS_ERR(ofi))
+ retval = PTR_ERR(ofi);
goto end_rename;
}
@@ -1101,8 +1102,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
brelse(ofibh.sbh);
tloc = lelb_to_cpu(ocfi.icb.extLocation);
- if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0)
- != old_inode->i_ino)
+ if (udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino)
goto end_rename;
nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 5193b94c0683..0f8b3cb35585 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -147,6 +147,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
ei->i_next_alloc_goal = 0;
ei->i_strat4096 = 0;
ei->i_streamdir = 0;
+ ei->i_hidden = 0;
init_rwsem(&ei->i_data_sem);
ei->cached_extent.lstart = -1;
spin_lock_init(&ei->i_extent_cache_lock);
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 63a47f1e1d52..fd3e4a8671f0 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -120,60 +120,42 @@ void udf_truncate_tail_extent(struct inode *inode)
void udf_discard_prealloc(struct inode *inode)
{
- struct extent_position epos = { NULL, 0, {0, 0} };
+ struct extent_position epos = {};
+ struct extent_position prev_epos = {};
struct kernel_lb_addr eloc;
uint32_t elen;
uint64_t lbcount = 0;
int8_t etype = -1, netype;
- int adsize;
struct udf_inode_info *iinfo = UDF_I(inode);
+ int bsize = 1 << inode->i_blkbits;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
- inode->i_size == iinfo->i_lenExtents)
+ ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize))
return;
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(struct short_ad);
- else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(struct long_ad);
- else
- adsize = 0;
-
epos.block = iinfo->i_location;
/* Find the last extent in the file */
- while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
- etype = netype;
+ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 0)) != -1) {
+ brelse(prev_epos.bh);
+ prev_epos = epos;
+ if (prev_epos.bh)
+ get_bh(prev_epos.bh);
+
+ etype = udf_next_aext(inode, &epos, &eloc, &elen, 1);
lbcount += elen;
}
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
- epos.offset -= adsize;
lbcount -= elen;
- extent_trunc(inode, &epos, &eloc, etype, elen, 0);
- if (!epos.bh) {
- iinfo->i_lenAlloc =
- epos.offset -
- udf_file_entry_alloc_offset(inode);
- mark_inode_dirty(inode);
- } else {
- struct allocExtDesc *aed =
- (struct allocExtDesc *)(epos.bh->b_data);
- aed->lengthAllocDescs =
- cpu_to_le32(epos.offset -
- sizeof(struct allocExtDesc));
- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
- UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
- udf_update_tag(epos.bh->b_data, epos.offset);
- else
- udf_update_tag(epos.bh->b_data,
- sizeof(struct allocExtDesc));
- mark_buffer_dirty_inode(epos.bh, inode);
- }
+ udf_delete_aext(inode, prev_epos);
+ udf_free_blocks(inode->i_sb, inode, &eloc, 0,
+ DIV_ROUND_UP(elen, 1 << inode->i_blkbits));
}
/* This inode entry is in-memory only and thus we don't have to mark
* the inode dirty */
iinfo->i_lenExtents = lbcount;
brelse(epos.bh);
+ brelse(prev_epos.bh);
}
static void udf_update_alloc_ext_desc(struct inode *inode,
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index 4245d1f63258..0372d0d14f72 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -44,7 +44,8 @@ struct udf_inode_info {
unsigned i_use : 1; /* unallocSpaceEntry */
unsigned i_strat4096 : 1;
unsigned i_streamdir : 1;
- unsigned reserved : 25;
+ unsigned i_hidden : 1; /* hidden system inode */
+ unsigned reserved : 24;
union {
struct short_ad *i_sad;
struct long_ad *i_lad;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 8eace7a633d3..d91cbfb08983 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -51,6 +51,8 @@
#define MF_DUPLICATE_MD 0x01
#define MF_MIRROR_FE_LOADED 0x02
+#define EFSCORRUPTED EUCLEAN
+
struct udf_meta_data {
__u32 s_meta_file_loc;
__u32 s_mirror_file_loc;
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 622569007b53..2142cbd1dde2 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -247,7 +247,7 @@ static int udf_name_from_CS0(struct super_block *sb,
}
if (translate) {
- if (str_o_len <= 2 && str_o[0] == '.' &&
+ if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' &&
(str_o_len == 1 || str_o[1] == '.'))
needsCRC = 1;
if (needsCRC) {
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ec57bbb6bb05..740853465356 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1018,7 +1018,7 @@ static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
int fd;
fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new,
- O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS));
+ O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS));
if (fd < 0)
return fd;
@@ -1969,7 +1969,7 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
mmgrab(ctx->mm);
fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
- O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
+ O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS));
if (fd < 0) {
mmdrop(ctx->mm);
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
index 1370bfd17e87..39459b1eff75 100644
--- a/fs/verity/enable.c
+++ b/fs/verity/enable.c
@@ -350,25 +350,27 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
goto out_drop_write;
err = enable_verity(filp, &arg);
- if (err)
- goto out_allow_write_access;
/*
- * Some pages of the file may have been evicted from pagecache after
- * being used in the Merkle tree construction, then read into pagecache
- * again by another process reading from the file concurrently. Since
- * these pages didn't undergo verification against the file measurement
- * which fs-verity now claims to be enforcing, we have to wipe the
- * pagecache to ensure that all future reads are verified.
+ * We no longer drop the inode's pagecache after enabling verity. This
+ * used to be done to try to avoid a race condition where pages could be
+ * evicted after being used in the Merkle tree construction, then
+ * re-instantiated by a concurrent read. Such pages are unverified, and
+ * the backing storage could have filled them with different content, so
+ * they shouldn't be used to fulfill reads once verity is enabled.
+ *
+ * But, dropping the pagecache has a big performance impact, and it
+ * doesn't fully solve the race condition anyway. So for those reasons,
+ * and also because this race condition isn't very important relatively
+ * speaking (especially for small-ish files, where the chance of a page
+ * being used, evicted, *and* re-instantiated all while enabling verity
+ * is quite small), we no longer drop the inode's pagecache.
*/
- filemap_write_and_wait(inode->i_mapping);
- invalidate_inode_pages2(inode->i_mapping);
/*
* allow_write_access() is needed to pair with deny_write_access().
* Regardless, the filesystem won't allow writing to verity files.
*/
-out_allow_write_access:
allow_write_access(filp);
out_drop_write:
mnt_drop_write_file(filp);
diff --git a/fs/verity/signature.c b/fs/verity/signature.c
index c8b255232de5..cd8cbc895a4f 100644
--- a/fs/verity/signature.c
+++ b/fs/verity/signature.c
@@ -58,6 +58,22 @@ int fsverity_verify_signature(const struct fsverity_info *vi,
return -EBADMSG;
}
+ if (fsverity_keyring->keys.nr_leaves_on_tree == 0) {
+ /*
+ * The ".fs-verity" keyring is empty, due to builtin signatures
+ * being supported by the kernel but not actually being used.
+ * In this case, verify_pkcs7_signature() would always return an
+ * error, usually ENOKEY. It could also be EBADMSG if the
+ * PKCS#7 is malformed, but that isn't very important to
+ * distinguish. So, just skip to ENOKEY to avoid the attack
+ * surface of the PKCS#7 parser, which would otherwise be
+ * reachable by any task able to execute FS_IOC_ENABLE_VERITY.
+ */
+ fsverity_err(inode,
+ "fs-verity keyring is empty, rejecting signed file!");
+ return -ENOKEY;
+ }
+
d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL);
if (!d)
return -ENOMEM;
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 3e8f2de44667..a7e24f949e3d 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -259,15 +259,15 @@ EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
int __init fsverity_init_workqueue(void)
{
/*
- * Use an unbound workqueue to allow bios to be verified in parallel
- * even when they happen to complete on the same CPU. This sacrifices
- * locality, but it's worthwhile since hashing is CPU-intensive.
+ * Use a high-priority workqueue to prioritize verification work, which
+ * blocks reads from completing, over regular application tasks.
*
- * Also use a high-priority workqueue to prioritize verification work,
- * which blocks reads from completing, over regular application tasks.
+ * For performance reasons, don't use an unbound workqueue. Using an
+ * unbound workqueue for crypto operations causes excessive scheduler
+ * latency on ARM64.
*/
fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
- WQ_UNBOUND | WQ_HIGHPRI,
+ WQ_HIGHPRI,
num_online_cpus());
if (!fsverity_read_workqueue)
return -ENOMEM;
diff --git a/fs/xattr.c b/fs/xattr.c
index 494313bce9cc..abd49f462a7d 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -1014,7 +1014,7 @@ static int xattr_list_one(char **buffer, ssize_t *remaining_size,
ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
char *buffer, size_t size)
{
- bool trusted = capable(CAP_SYS_ADMIN);
+ bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
struct simple_xattr *xattr;
ssize_t remaining_size = size;
int err = 0;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 436f686a9891..1193fd6e4bad 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -684,8 +684,10 @@ xfs_alloc_update_counters(
xfs_trans_agblocks_delta(tp, len);
if (unlikely(be32_to_cpu(agf->agf_freeblks) >
- be32_to_cpu(agf->agf_length)))
+ be32_to_cpu(agf->agf_length))) {
+ xfs_buf_mark_corrupt(agbp);
return -EFSCORRUPTED;
+ }
xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
return 0;
@@ -751,6 +753,7 @@ xfs_alloc_ag_vextent_small(
bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
if (!bp) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, args->mp);
error = -EFSCORRUPTED;
goto error;
}
@@ -1995,24 +1998,32 @@ xfs_alloc_longest_free_extent(
return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
}
+/*
+ * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
+ * return the largest possible minimum length.
+ */
unsigned int
xfs_alloc_min_freelist(
struct xfs_mount *mp,
struct xfs_perag *pag)
{
+ /* AG btrees have at least 1 level. */
+ static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
+ const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
unsigned int min_free;
+ ASSERT(mp->m_ag_maxlevels > 0);
+
/* space needed by-bno freespace btree */
- min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
+ min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
mp->m_ag_maxlevels);
/* space needed by-size freespace btree */
- min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
+ min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
mp->m_ag_maxlevels);
/* space needed reverse mapping used space btree */
if (xfs_sb_version_hasrmapbt(&mp->m_sb))
- min_free += min_t(unsigned int,
- pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
- mp->m_rmap_maxlevels);
+ min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
+ mp->m_rmap_maxlevels);
return min_free;
}
@@ -2087,8 +2098,10 @@ xfs_free_agfl_block(
return error;
bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
- if (!bp)
+ if (!bp) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp);
return -EFSCORRUPTED;
+ }
xfs_trans_binval(tp, bp);
return 0;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 510ca6974604..c83ff610ecb6 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -1007,7 +1007,7 @@ restart:
* The INCOMPLETE flag means that we will find the "old"
* attr, not the "new" one.
*/
- args->flags |= XFS_ATTR_INCOMPLETE;
+ args->op_flags |= XFS_DA_OP_INCOMPLETE;
state = xfs_da_state_alloc();
state->args = args;
state->mp = mp;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index de33efc9b4f9..2b74b6e9a354 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -251,14 +251,6 @@ xfs_attr3_leaf_verify(
return fa;
/*
- * In recovery there is a transient state where count == 0 is valid
- * because we may have transitioned an empty shortform attr to a leaf
- * if the attr didn't fit in shortform.
- */
- if (!xfs_log_in_recovery(mp) && ichdr.count == 0)
- return __this_address;
-
- /*
* firstused is the block offset of the first name info structure.
* Make sure it doesn't go off the block or crash into the header.
*/
@@ -443,7 +435,7 @@ xfs_attr_copy_value(
*========================================================================*/
/*
- * Query whether the requested number of additional bytes of extended
+ * Query whether the total requested number of attr fork bytes of extended
* attribute space will be able to fit inline.
*
* Returns zero if not, else the di_forkoff fork offset to be used in the
@@ -463,8 +455,14 @@ xfs_attr_shortform_bytesfit(
int maxforkoff;
int offset;
+ /*
+ * Check if the new size could fit at all first:
+ */
+ if (bytes > XFS_LITINO(mp))
+ return 0;
+
/* rounded down */
- offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
+ offset = (XFS_LITINO(mp) - bytes) >> 3;
if (dp->i_d.di_format == XFS_DINODE_FMT_DEV) {
minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
@@ -531,8 +529,7 @@ xfs_attr_shortform_bytesfit(
minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */
- maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) -
- XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
if (offset >= maxforkoff)
@@ -2287,8 +2284,10 @@ xfs_attr3_leaf_lookup_int(
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
- if (ichdr.count >= args->geo->blksize / 8)
+ if (ichdr.count >= args->geo->blksize / 8) {
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
+ }
/*
* Binary search. (note: small blocks will skip this loop)
@@ -2304,10 +2303,14 @@ xfs_attr3_leaf_lookup_int(
else
break;
}
- if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count)))
+ if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count))) {
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
- if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval))
+ }
+ if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval)) {
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
+ }
/*
* Since we may have duplicate hashval's, find the first matching
@@ -2339,8 +2342,8 @@ xfs_attr3_leaf_lookup_int(
* If we are looking for INCOMPLETE entries, show only those.
* If we are looking for complete entries, show only those.
*/
- if ((args->flags & XFS_ATTR_INCOMPLETE) !=
- (entry->flags & XFS_ATTR_INCOMPLETE)) {
+ if (!!(args->op_flags & XFS_DA_OP_INCOMPLETE) !=
+ !!(entry->flags & XFS_ATTR_INCOMPLETE)) {
continue;
}
if (entry->flags & XFS_ATTR_LOCAL) {
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 7b74e18becff..38c05d6ae2aa 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -17,13 +17,27 @@ struct xfs_inode;
struct xfs_trans;
/*
- * Used to keep a list of "remote value" extents when unlinking an inode.
+ * Incore version of the attribute leaf header.
*/
-typedef struct xfs_attr_inactive_list {
- xfs_dablk_t valueblk; /* block number of value bytes */
- int valuelen; /* number of bytes in value */
-} xfs_attr_inactive_list_t;
-
+struct xfs_attr3_icleaf_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t usedbytes;
+ /*
+ * Firstused is 32-bit here instead of 16-bit like the on-disk variant
+ * to support maximum fsb size of 64k without overflow issues throughout
+ * the attr code. Instead, the overflow condition is handled on
+ * conversion to/from disk.
+ */
+ uint32_t firstused;
+ __u8 holes;
+ struct {
+ uint16_t base;
+ uint16_t size;
+ } freemap[XFS_ATTR_LEAF_MAPSIZE];
+};
/*========================================================================
* Function prototypes for the kernel.
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 3e39b7d40f25..de9096b8a47c 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -25,6 +25,23 @@
#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
/*
+ * Remote Attribute Values
+ * =======================
+ *
+ * Remote extended attribute values are conceptually simple -- they're written
+ * to data blocks mapped by an inode's attribute fork, and they have an upper
+ * size limit of 64k. Setting a value does not involve the XFS log.
+ *
+ * However, on a v5 filesystem, maximally sized remote attr values require one
+ * block more than 64k worth of space to hold both the remote attribute value
+ * header (64 bytes). On a 4k block filesystem this results in a 68k buffer;
+ * on a 64k block filesystem, this would be a 128k buffer. Note that the log
+ * format can only handle a dirty buffer of XFS_MAX_BLOCKSIZE length (64k).
+ * Therefore, we /must/ ensure that remote attribute value buffers never touch
+ * the logging system and therefore never have a log item.
+ */
+
+/*
* Each contiguous block has a header, so it is not just a simple attribute
* length to FSB conversion.
*/
@@ -400,17 +417,25 @@ xfs_attr_rmtval_get(
(map[i].br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
- error = xfs_trans_read_buf(mp, args->trans,
- mp->m_ddev_targp,
- dblkno, dblkcnt, 0, &bp,
- &xfs_attr3_rmt_buf_ops);
- if (error)
+ bp = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt, 0,
+ &xfs_attr3_rmt_buf_ops);
+ if (!bp)
+ return -ENOMEM;
+ error = bp->b_error;
+ if (error) {
+ xfs_buf_ioerror_alert(bp, __func__);
+ xfs_buf_relse(bp);
+
+ /* bad CRC means corrupted metadata */
+ if (error == -EFSBADCRC)
+ error = -EFSCORRUPTED;
return error;
+ }
error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
&offset, &valuelen,
&dst);
- xfs_trans_brelse(args->trans, bp);
+ xfs_buf_relse(bp);
if (error)
return error;
@@ -551,6 +576,32 @@ xfs_attr_rmtval_set(
return 0;
}
+/* Mark stale any incore buffers for the remote value. */
+int
+xfs_attr_rmtval_stale(
+ struct xfs_inode *ip,
+ struct xfs_bmbt_irec *map,
+ xfs_buf_flags_t incore_flags)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_buf *bp;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
+ (map->br_startblock != HOLESTARTBLOCK));
+
+ bp = xfs_buf_incore(mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, map->br_startblock),
+ XFS_FSB_TO_BB(mp, map->br_blockcount), incore_flags);
+ if (bp) {
+ xfs_buf_stale(bp);
+ xfs_buf_relse(bp);
+ }
+
+ return 0;
+}
+
/*
* Remove the value associated with an attribute by deleting the
* out-of-line buffer that it is stored on.
@@ -559,7 +610,6 @@ int
xfs_attr_rmtval_remove(
struct xfs_da_args *args)
{
- struct xfs_mount *mp = args->dp->i_mount;
xfs_dablk_t lblkno;
int blkcnt;
int error;
@@ -574,9 +624,6 @@ xfs_attr_rmtval_remove(
blkcnt = args->rmtblkcnt;
while (blkcnt > 0) {
struct xfs_bmbt_irec map;
- struct xfs_buf *bp;
- xfs_daddr_t dblkno;
- int dblkcnt;
int nmap;
/*
@@ -588,21 +635,9 @@ xfs_attr_rmtval_remove(
if (error)
return error;
ASSERT(nmap == 1);
- ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
- (map.br_startblock != HOLESTARTBLOCK));
-
- dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
- dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
-
- /*
- * If the "remote" value is in the cache, remove it.
- */
- bp = xfs_buf_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
- if (bp) {
- xfs_buf_stale(bp);
- xfs_buf_relse(bp);
- bp = NULL;
- }
+ error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
+ if (error)
+ return error;
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h
index 9d20b66ad379..6fb4572845ce 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.h
+++ b/fs/xfs/libxfs/xfs_attr_remote.h
@@ -11,5 +11,7 @@ int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
int xfs_attr_rmtval_set(struct xfs_da_args *args);
int xfs_attr_rmtval_remove(struct xfs_da_args *args);
+int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
+ xfs_buf_flags_t incore_flags);
#endif /* __XFS_ATTR_REMOTE_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index c114d24be619..1e0fab62cd7d 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -192,14 +192,12 @@ xfs_default_attroffset(
struct xfs_mount *mp = ip->i_mount;
uint offset;
- if (mp->m_sb.sb_inodesize == 256) {
- offset = XFS_LITINO(mp, ip->i_d.di_version) -
- XFS_BMDR_SPACE_CALC(MINABTPTRS);
- } else {
+ if (mp->m_sb.sb_inodesize == 256)
+ offset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ else
offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
- }
- ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
+ ASSERT(offset < XFS_LITINO(mp));
return offset;
}
@@ -729,6 +727,7 @@ xfs_bmap_extents_to_btree(
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
if (!abp) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_unreserve_dquot;
}
@@ -1084,6 +1083,7 @@ xfs_bmap_add_attrfork(
if (XFS_IFORK_Q(ip))
goto trans_cancel;
if (ip->i_d.di_anextents != 0) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto trans_cancel;
}
@@ -1374,7 +1374,8 @@ xfs_bmap_last_before(
case XFS_DINODE_FMT_EXTENTS:
break;
default:
- return -EIO;
+ ASSERT(0);
+ return -EFSCORRUPTED;
}
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
@@ -1474,8 +1475,10 @@ xfs_bmap_last_offset(
return 0;
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- return -EIO;
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) {
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
if (error || is_empty)
@@ -5871,8 +5874,9 @@ xfs_bmap_insert_extents(
XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
del_cursor);
- if (stop_fsb >= got.br_startoff + got.br_blockcount) {
- error = -EIO;
+ if (stop_fsb > got.br_startoff) {
+ ASSERT(0);
+ error = -EFSCORRUPTED;
goto del_cursor;
}
@@ -5919,8 +5923,8 @@ del_cursor:
* @split_fsb is a block where the extents is split. If split_fsb lies in a
* hole or the first block of extents, just return 0.
*/
-STATIC int
-xfs_bmap_split_extent_at(
+int
+xfs_bmap_split_extent(
struct xfs_trans *tp,
struct xfs_inode *ip,
xfs_fileoff_t split_fsb)
@@ -6031,34 +6035,6 @@ del_cursor:
return error;
}
-int
-xfs_bmap_split_extent(
- struct xfs_inode *ip,
- xfs_fileoff_t split_fsb)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
- int error;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
- XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
- if (error)
- goto out;
-
- return xfs_trans_commit(tp);
-
-out:
- xfs_trans_cancel(tp);
- return error;
-}
-
/* Deferred mapping is only for real extents in the data fork. */
static bool
xfs_bmap_is_update_needed(
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 093716a074fb..a51c2b13a51a 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -158,17 +158,22 @@ static inline int xfs_bmapi_whichfork(int bmapi_flags)
{ BMAP_ATTRFORK, "ATTR" }, \
{ BMAP_COWFORK, "COW" }
+/* Return true if the extent is an allocated extent, written or not. */
+static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec)
+{
+ return irec->br_startblock != HOLESTARTBLOCK &&
+ irec->br_startblock != DELAYSTARTBLOCK &&
+ !isnullstartblock(irec->br_startblock);
+}
/*
* Return true if the extent is a real, allocated extent, or false if it is a
* delayed allocation, and unwritten extent or a hole.
*/
-static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec)
+static inline bool xfs_bmap_is_written_extent(struct xfs_bmbt_irec *irec)
{
- return irec->br_state != XFS_EXT_UNWRITTEN &&
- irec->br_startblock != HOLESTARTBLOCK &&
- irec->br_startblock != DELAYSTARTBLOCK &&
- !isnullstartblock(irec->br_startblock);
+ return xfs_bmap_is_real_extent(irec) &&
+ irec->br_state != XFS_EXT_UNWRITTEN;
}
/*
@@ -222,7 +227,8 @@ int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
bool *done, xfs_fileoff_t stop_fsb);
-int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
+int xfs_bmap_split_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ xfs_fileoff_t split_offset);
int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 71de937f9e64..121251651fea 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -354,20 +354,17 @@ xfs_btree_free_block(
*/
void
xfs_btree_del_cursor(
- xfs_btree_cur_t *cur, /* btree cursor */
- int error) /* del because of error */
+ struct xfs_btree_cur *cur, /* btree cursor */
+ int error) /* del because of error */
{
- int i; /* btree level */
+ int i; /* btree level */
/*
- * Clear the buffer pointers, and release the buffers.
- * If we're doing this in the face of an error, we
- * need to make sure to inspect all of the entries
- * in the bc_bufs array for buffers to be unlocked.
- * This is because some of the btree code works from
- * level n down to 0, and if we get an error along
- * the way we won't have initialized all the entries
- * down to 0.
+ * Clear the buffer pointers and release the buffers. If we're doing
+ * this because of an error, inspect all of the entries in the bc_bufs
+ * array for buffers to be unlocked. This is because some of the btree
+ * code works from level n down to 0, and if we get an error along the
+ * way we won't have initialized all the entries down to 0.
*/
for (i = 0; i < cur->bc_nlevels; i++) {
if (cur->bc_bufs[i])
@@ -375,15 +372,10 @@ xfs_btree_del_cursor(
else if (!error)
break;
}
- /*
- * Can't free a bmap cursor without having dealt with the
- * allocated indirect blocks' accounting.
- */
+
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
- cur->bc_private.b.allocated == 0);
- /*
- * Free the cursor.
- */
+ cur->bc_private.b.allocated == 0 ||
+ XFS_FORCED_SHUTDOWN(cur->bc_mp));
kmem_zone_free(xfs_btree_cur_zone, cur);
}
@@ -1820,6 +1812,7 @@ xfs_btree_lookup_get_block(
out_bad:
*blkp = NULL;
+ xfs_buf_mark_corrupt(bp);
xfs_trans_brelse(cur->bc_tp, bp);
return -EFSCORRUPTED;
}
@@ -1867,8 +1860,10 @@ xfs_btree_lookup(
XFS_BTREE_STATS_INC(cur, lookup);
/* No such thing as a zero-level tree. */
- if (cur->bc_nlevels == 0)
+ if (cur->bc_nlevels == 0) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, cur->bc_mp);
return -EFSCORRUPTED;
+ }
block = NULL;
keyno = 0;
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 4fd1223c1bd5..12ef16c157dc 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -504,6 +504,7 @@ xfs_da3_split(
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
+ xfs_buf_mark_corrupt(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
@@ -516,6 +517,7 @@ xfs_da3_split(
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
+ xfs_buf_mark_corrupt(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
@@ -1541,8 +1543,10 @@ xfs_da3_node_lookup_int(
break;
}
- if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
+ if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
+ }
blk->magic = XFS_DA_NODE_MAGIC;
@@ -1554,15 +1558,18 @@ xfs_da3_node_lookup_int(
btree = dp->d_ops->node_tree_p(node);
/* Tree taller than we can handle; bail out! */
- if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
+ if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
+ }
/* Check the level from the root. */
if (blkno == args->geo->leafblk)
expected_level = nodehdr.level - 1;
- else if (expected_level != nodehdr.level)
+ else if (expected_level != nodehdr.level) {
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
- else
+ } else
expected_level--;
max = nodehdr.count;
@@ -1612,12 +1619,17 @@ xfs_da3_node_lookup_int(
}
/* We can't point back to the root. */
- if (blkno == args->geo->leafblk)
+ if (blkno == args->geo->leafblk) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+ dp->i_mount);
return -EFSCORRUPTED;
+ }
}
- if (expected_level != 0)
+ if (expected_level != 0) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, dp->i_mount);
return -EFSCORRUPTED;
+ }
/*
* A leaf block that ends in the hashval that we are interested in
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index ae0bbd20d9ca..588e4674e931 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -82,6 +82,7 @@ typedef struct xfs_da_args {
#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
#define XFS_DA_OP_ALLOCVAL 0x0020 /* lookup to alloc buffer if found */
+#define XFS_DA_OP_INCOMPLETE 0x0040 /* lookup INCOMPLETE attr keys */
#define XFS_DA_OP_FLAGS \
{ XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
@@ -89,7 +90,8 @@ typedef struct xfs_da_args {
{ XFS_DA_OP_ADDNAME, "ADDNAME" }, \
{ XFS_DA_OP_OKNOENT, "OKNOENT" }, \
{ XFS_DA_OP_CILOOKUP, "CILOOKUP" }, \
- { XFS_DA_OP_ALLOCVAL, "ALLOCVAL" }
+ { XFS_DA_OP_ALLOCVAL, "ALLOCVAL" }, \
+ { XFS_DA_OP_INCOMPLETE, "INCOMPLETE" }
/*
* Storage for holding state during Btree searches and split/join ops.
@@ -125,6 +127,19 @@ typedef struct xfs_da_state {
} xfs_da_state_t;
/*
+ * In-core version of the node header to abstract the differences in the v2 and
+ * v3 disk format of the headers. Callers need to convert to/from disk format as
+ * appropriate.
+ */
+struct xfs_da3_icnode_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t level;
+};
+
+/*
* Utility macros to aid in logging changed structure fields.
*/
#define XFS_DA_LOGOFF(BASE, ADDR) ((char *)(ADDR) - (char *)(BASE))
diff --git a/fs/xfs/libxfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c
index b1ae572496b6..31bb250c1899 100644
--- a/fs/xfs/libxfs/xfs_da_format.c
+++ b/fs/xfs/libxfs/xfs_da_format.c
@@ -13,6 +13,7 @@
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_dir2.h"
+#include "xfs_dir2_priv.h"
/*
* Shortform directory ops
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index ae654e06b2fb..222ee48da5e8 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -94,19 +94,6 @@ struct xfs_da3_intnode {
};
/*
- * In-core version of the node header to abstract the differences in the v2 and
- * v3 disk format of the headers. Callers need to convert to/from disk format as
- * appropriate.
- */
-struct xfs_da3_icnode_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t level;
-};
-
-/*
* Directory version 2.
*
* There are 4 possible formats:
@@ -434,14 +421,6 @@ struct xfs_dir3_leaf_hdr {
__be32 pad; /* 64 bit alignment */
};
-struct xfs_dir3_icleaf_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t stale;
-};
-
/*
* Leaf block entry.
*/
@@ -521,19 +500,6 @@ struct xfs_dir3_free {
#define XFS_DIR3_FREE_CRC_OFF offsetof(struct xfs_dir3_free, hdr.hdr.crc)
/*
- * In core version of the free block header, abstracted away from on-disk format
- * differences. Use this in the code, and convert to/from the disk version using
- * xfs_dir3_free_hdr_from_disk/xfs_dir3_free_hdr_to_disk.
- */
-struct xfs_dir3_icfree_hdr {
- uint32_t magic;
- uint32_t firstdb;
- uint32_t nvalid;
- uint32_t nused;
-
-};
-
-/*
* Single block format.
*
* The single block format looks like the following drawing on disk:
@@ -710,29 +676,6 @@ struct xfs_attr3_leafblock {
};
/*
- * incore, neutral version of the attribute leaf header
- */
-struct xfs_attr3_icleaf_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t usedbytes;
- /*
- * firstused is 32-bit here instead of 16-bit like the on-disk variant
- * to support maximum fsb size of 64k without overflow issues throughout
- * the attr code. Instead, the overflow condition is handled on
- * conversion to/from disk.
- */
- uint32_t firstused;
- __u8 holes;
- struct {
- uint16_t base;
- uint16_t size;
- } freemap[XFS_ATTR_LEAF_MAPSIZE];
-};
-
-/*
* Special value to represent fs block size in the leaf header firstused field.
* Only used when block size overflows the 2-bytes available on disk.
*/
@@ -740,8 +683,6 @@ struct xfs_attr3_icleaf_hdr {
/*
* Flags used in the leaf_entry[i].flags field.
- * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
- * on the system call, they are "or"ed together for various operations.
*/
#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */
#define XFS_ATTR_ROOT_BIT 1 /* limit access to trusted attrs */
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 22557527cfdb..3a78a189ea01 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -16,6 +16,8 @@
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_trace.h"
+#include "xfs_icache.h"
+#include "xfs_log.h"
/*
* Deferred Operations in XFS
@@ -178,6 +180,19 @@ static const struct xfs_defer_op_type *defer_op_types[] = {
[XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
};
+static void
+xfs_defer_create_intent(
+ struct xfs_trans *tp,
+ struct xfs_defer_pending *dfp,
+ bool sort)
+{
+ const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
+
+ if (!dfp->dfp_intent)
+ dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
+ dfp->dfp_count, sort);
+}
+
/*
* For each pending item in the intake list, log its intent item and the
* associated extents, then add the entire intake list to the end of
@@ -187,17 +202,11 @@ STATIC void
xfs_defer_create_intents(
struct xfs_trans *tp)
{
- struct list_head *li;
struct xfs_defer_pending *dfp;
- const struct xfs_defer_op_type *ops;
list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
- ops = defer_op_types[dfp->dfp_type];
- dfp->dfp_intent = ops->create_intent(tp, dfp->dfp_count);
trace_xfs_defer_create_intent(tp->t_mountp, dfp);
- list_sort(tp->t_mountp, &dfp->dfp_work, ops->diff_items);
- list_for_each(li, &dfp->dfp_work)
- ops->log_item(tp, dfp->dfp_intent, li);
+ xfs_defer_create_intent(tp, dfp, true);
}
}
@@ -234,10 +243,13 @@ xfs_defer_trans_roll(
struct xfs_log_item *lip;
struct xfs_buf *bplist[XFS_DEFER_OPS_NR_BUFS];
struct xfs_inode *iplist[XFS_DEFER_OPS_NR_INODES];
+ unsigned int ordered = 0; /* bitmap */
int bpcount = 0, ipcount = 0;
int i;
int error;
+ BUILD_BUG_ON(NBBY * sizeof(ordered) < XFS_DEFER_OPS_NR_BUFS);
+
list_for_each_entry(lip, &tp->t_items, li_trans) {
switch (lip->li_type) {
case XFS_LI_BUF:
@@ -248,7 +260,10 @@ xfs_defer_trans_roll(
ASSERT(0);
return -EFSCORRUPTED;
}
- xfs_trans_dirty_buf(tp, bli->bli_buf);
+ if (bli->bli_flags & XFS_BLI_ORDERED)
+ ordered |= (1U << bpcount);
+ else
+ xfs_trans_dirty_buf(tp, bli->bli_buf);
bplist[bpcount++] = bli->bli_buf;
}
break;
@@ -289,6 +304,8 @@ xfs_defer_trans_roll(
/* Rejoin the buffers and dirty them so the log moves forward. */
for (i = 0; i < bpcount; i++) {
xfs_trans_bjoin(tp, bplist[i]);
+ if (ordered & (1U << i))
+ xfs_trans_ordered_buf(tp, bplist[i]);
xfs_trans_bhold(tp, bplist[i]);
}
@@ -346,6 +363,106 @@ xfs_defer_cancel_list(
}
/*
+ * Prevent a log intent item from pinning the tail of the log by logging a
+ * done item to release the intent item; and then log a new intent item.
+ * The caller should provide a fresh transaction and roll it after we're done.
+ */
+static int
+xfs_defer_relog(
+ struct xfs_trans **tpp,
+ struct list_head *dfops)
+{
+ struct xlog *log = (*tpp)->t_mountp->m_log;
+ struct xfs_defer_pending *dfp;
+ xfs_lsn_t threshold_lsn = NULLCOMMITLSN;
+
+
+ ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
+
+ list_for_each_entry(dfp, dfops, dfp_list) {
+ /*
+ * If the log intent item for this deferred op is not a part of
+ * the current log checkpoint, relog the intent item to keep
+ * the log tail moving forward. We're ok with this being racy
+ * because an incorrect decision means we'll be a little slower
+ * at pushing the tail.
+ */
+ if (dfp->dfp_intent == NULL ||
+ xfs_log_item_in_current_chkpt(dfp->dfp_intent))
+ continue;
+
+ /*
+ * Figure out where we need the tail to be in order to maintain
+ * the minimum required free space in the log. Only sample
+ * the log threshold once per call.
+ */
+ if (threshold_lsn == NULLCOMMITLSN) {
+ threshold_lsn = xlog_grant_push_threshold(log, 0);
+ if (threshold_lsn == NULLCOMMITLSN)
+ break;
+ }
+ if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
+ continue;
+
+ trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
+ XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
+ dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
+ }
+
+ if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
+ return xfs_defer_trans_roll(tpp);
+ return 0;
+}
+
+/*
+ * Log an intent-done item for the first pending intent, and finish the work
+ * items.
+ */
+static int
+xfs_defer_finish_one(
+ struct xfs_trans *tp,
+ struct xfs_defer_pending *dfp)
+{
+ const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
+ void *state = NULL;
+ struct list_head *li, *n;
+ int error;
+
+ trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
+
+ dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
+ list_for_each_safe(li, n, &dfp->dfp_work) {
+ list_del(li);
+ dfp->dfp_count--;
+ error = ops->finish_item(tp, li, dfp->dfp_done, &state);
+ if (error == -EAGAIN) {
+ /*
+ * Caller wants a fresh transaction; put the work item
+ * back on the list and log a new log intent item to
+ * replace the old one. See "Requesting a Fresh
+ * Transaction while Finishing Deferred Work" above.
+ */
+ list_add(li, &dfp->dfp_work);
+ dfp->dfp_count++;
+ dfp->dfp_done = NULL;
+ dfp->dfp_intent = NULL;
+ xfs_defer_create_intent(tp, dfp, false);
+ }
+
+ if (error)
+ goto out;
+ }
+
+ /* Done with the dfp, free it. */
+ list_del(&dfp->dfp_list);
+ kmem_free(dfp);
+out:
+ if (ops->finish_cleanup)
+ ops->finish_cleanup(tp, state, error);
+ return error;
+}
+
+/*
* Finish all the pending work. This involves logging intent items for
* any work items that wandered in since the last transaction roll (if
* one has even happened), rolling the transaction, and finishing the
@@ -358,11 +475,7 @@ xfs_defer_finish_noroll(
struct xfs_trans **tp)
{
struct xfs_defer_pending *dfp;
- struct list_head *li;
- struct list_head *n;
- void *state;
int error = 0;
- const struct xfs_defer_op_type *ops;
LIST_HEAD(dop_pending);
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -371,87 +484,44 @@ xfs_defer_finish_noroll(
/* Until we run out of pending work to finish... */
while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
- /* log intents and pull in intake items */
- xfs_defer_create_intents(*tp);
- list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
-
/*
- * Roll the transaction.
+ * Deferred items that are created in the process of finishing
+ * other deferred work items should be queued at the head of
+ * the pending list, which puts them ahead of the deferred work
+ * that was created by the caller. This keeps the number of
+ * pending work items to a minimum, which decreases the amount
+ * of time that any one intent item can stick around in memory,
+ * pinning the log tail.
*/
+ xfs_defer_create_intents(*tp);
+ list_splice_init(&(*tp)->t_dfops, &dop_pending);
+
error = xfs_defer_trans_roll(tp);
if (error)
- goto out;
+ goto out_shutdown;
+
+ /* Possibly relog intent items to keep the log moving. */
+ error = xfs_defer_relog(tp, &dop_pending);
+ if (error)
+ goto out_shutdown;
- /* Log an intent-done item for the first pending item. */
dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
dfp_list);
- ops = defer_op_types[dfp->dfp_type];
- trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
- dfp->dfp_done = ops->create_done(*tp, dfp->dfp_intent,
- dfp->dfp_count);
-
- /* Finish the work items. */
- state = NULL;
- list_for_each_safe(li, n, &dfp->dfp_work) {
- list_del(li);
- dfp->dfp_count--;
- error = ops->finish_item(*tp, li, dfp->dfp_done,
- &state);
- if (error == -EAGAIN) {
- /*
- * Caller wants a fresh transaction;
- * put the work item back on the list
- * and jump out.
- */
- list_add(li, &dfp->dfp_work);
- dfp->dfp_count++;
- break;
- } else if (error) {
- /*
- * Clean up after ourselves and jump out.
- * xfs_defer_cancel will take care of freeing
- * all these lists and stuff.
- */
- if (ops->finish_cleanup)
- ops->finish_cleanup(*tp, state, error);
- goto out;
- }
- }
- if (error == -EAGAIN) {
- /*
- * Caller wants a fresh transaction, so log a
- * new log intent item to replace the old one
- * and roll the transaction. See "Requesting
- * a Fresh Transaction while Finishing
- * Deferred Work" above.
- */
- dfp->dfp_intent = ops->create_intent(*tp,
- dfp->dfp_count);
- dfp->dfp_done = NULL;
- list_for_each(li, &dfp->dfp_work)
- ops->log_item(*tp, dfp->dfp_intent, li);
- } else {
- /* Done with the dfp, free it. */
- list_del(&dfp->dfp_list);
- kmem_free(dfp);
- }
-
- if (ops->finish_cleanup)
- ops->finish_cleanup(*tp, state, error);
- }
-
-out:
- if (error) {
- xfs_defer_trans_abort(*tp, &dop_pending);
- xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
- trace_xfs_defer_finish_error(*tp, error);
- xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
- xfs_defer_cancel(*tp);
- return error;
+ error = xfs_defer_finish_one(*tp, dfp);
+ if (error && error != -EAGAIN)
+ goto out_shutdown;
}
trace_xfs_defer_finish_done(*tp, _RET_IP_);
return 0;
+
+out_shutdown:
+ xfs_defer_trans_abort(*tp, &dop_pending);
+ xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
+ trace_xfs_defer_finish_error(*tp, error);
+ xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
+ xfs_defer_cancel(*tp);
+ return error;
}
int
@@ -552,3 +622,137 @@ xfs_defer_move(
xfs_defer_reset(stp);
}
+
+/*
+ * Prepare a chain of fresh deferred ops work items to be completed later. Log
+ * recovery requires the ability to put off until later the actual finishing
+ * work so that it can process unfinished items recovered from the log in
+ * correct order.
+ *
+ * Create and log intent items for all the work that we're capturing so that we
+ * can be assured that the items will get replayed if the system goes down
+ * before log recovery gets a chance to finish the work it put off. The entire
+ * deferred ops state is transferred to the capture structure and the
+ * transaction is then ready for the caller to commit it. If there are no
+ * intent items to capture, this function returns NULL.
+ *
+ * If capture_ip is not NULL, the capture structure will obtain an extra
+ * reference to the inode.
+ */
+static struct xfs_defer_capture *
+xfs_defer_ops_capture(
+ struct xfs_trans *tp,
+ struct xfs_inode *capture_ip)
+{
+ struct xfs_defer_capture *dfc;
+
+ if (list_empty(&tp->t_dfops))
+ return NULL;
+
+ /* Create an object to capture the defer ops. */
+ dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
+ INIT_LIST_HEAD(&dfc->dfc_list);
+ INIT_LIST_HEAD(&dfc->dfc_dfops);
+
+ xfs_defer_create_intents(tp);
+
+ /* Move the dfops chain and transaction state to the capture struct. */
+ list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
+ dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
+ tp->t_flags &= ~XFS_TRANS_LOWMODE;
+
+ /* Capture the remaining block reservations along with the dfops. */
+ dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
+ dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
+
+ /* Preserve the log reservation size. */
+ dfc->dfc_logres = tp->t_log_res;
+
+ /*
+ * Grab an extra reference to this inode and attach it to the capture
+ * structure.
+ */
+ if (capture_ip) {
+ ihold(VFS_I(capture_ip));
+ dfc->dfc_capture_ip = capture_ip;
+ }
+
+ return dfc;
+}
+
+/* Release all resources that we used to capture deferred ops. */
+void
+xfs_defer_ops_release(
+ struct xfs_mount *mp,
+ struct xfs_defer_capture *dfc)
+{
+ xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
+ if (dfc->dfc_capture_ip)
+ xfs_irele(dfc->dfc_capture_ip);
+ kmem_free(dfc);
+}
+
+/*
+ * Capture any deferred ops and commit the transaction. This is the last step
+ * needed to finish a log intent item that we recovered from the log. If any
+ * of the deferred ops operate on an inode, the caller must pass in that inode
+ * so that the reference can be transferred to the capture structure. The
+ * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
+ * xfs_defer_ops_continue.
+ */
+int
+xfs_defer_ops_capture_and_commit(
+ struct xfs_trans *tp,
+ struct xfs_inode *capture_ip,
+ struct list_head *capture_list)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_defer_capture *dfc;
+ int error;
+
+ ASSERT(!capture_ip || xfs_isilocked(capture_ip, XFS_ILOCK_EXCL));
+
+ /* If we don't capture anything, commit transaction and exit. */
+ dfc = xfs_defer_ops_capture(tp, capture_ip);
+ if (!dfc)
+ return xfs_trans_commit(tp);
+
+ /* Commit the transaction and add the capture structure to the list. */
+ error = xfs_trans_commit(tp);
+ if (error) {
+ xfs_defer_ops_release(mp, dfc);
+ return error;
+ }
+
+ list_add_tail(&dfc->dfc_list, capture_list);
+ return 0;
+}
+
+/*
+ * Attach a chain of captured deferred ops to a new transaction and free the
+ * capture structure. If an inode was captured, it will be passed back to the
+ * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
+ * The caller now owns the inode reference.
+ */
+void
+xfs_defer_ops_continue(
+ struct xfs_defer_capture *dfc,
+ struct xfs_trans *tp,
+ struct xfs_inode **captured_ipp)
+{
+ ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+ ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
+
+ /* Lock and join the captured inode to the new transaction. */
+ if (dfc->dfc_capture_ip) {
+ xfs_ilock(dfc->dfc_capture_ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, dfc->dfc_capture_ip, 0);
+ }
+ *captured_ipp = dfc->dfc_capture_ip;
+
+ /* Move captured dfops chain and state to the transaction. */
+ list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
+ tp->t_flags |= dfc->dfc_tpflags;
+
+ kmem_free(dfc);
+}
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index 7c28d7608ac6..4c3248d47a35 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -7,6 +7,7 @@
#define __XFS_DEFER_H__
struct xfs_defer_op_type;
+struct xfs_defer_capture;
/*
* Header for deferred operation list.
@@ -28,7 +29,7 @@ enum xfs_defer_ops_type {
struct xfs_defer_pending {
struct list_head dfp_list; /* pending items */
struct list_head dfp_work; /* work items */
- void *dfp_intent; /* log intent item */
+ struct xfs_log_item *dfp_intent; /* log intent item */
void *dfp_done; /* log done item */
unsigned int dfp_count; /* # extent items */
enum xfs_defer_ops_type dfp_type;
@@ -43,15 +44,15 @@ void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
/* Description of a deferred type. */
struct xfs_defer_op_type {
- void (*abort_intent)(void *);
- void *(*create_done)(struct xfs_trans *, void *, unsigned int);
+ struct xfs_log_item *(*create_intent)(struct xfs_trans *tp,
+ struct list_head *items, unsigned int count, bool sort);
+ void (*abort_intent)(struct xfs_log_item *intent);
+ void *(*create_done)(struct xfs_trans *tp, struct xfs_log_item *intent,
+ unsigned int count);
int (*finish_item)(struct xfs_trans *, struct list_head *, void *,
void **);
void (*finish_cleanup)(struct xfs_trans *, void *, int);
void (*cancel_item)(struct list_head *);
- int (*diff_items)(void *, struct list_head *, struct list_head *);
- void *(*create_intent)(struct xfs_trans *, uint);
- void (*log_item)(struct xfs_trans *, void *, struct list_head *);
unsigned int max_items;
};
@@ -61,4 +62,40 @@ extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
+/*
+ * This structure enables a dfops user to detach the chain of deferred
+ * operations from a transaction so that they can be continued later.
+ */
+struct xfs_defer_capture {
+ /* List of other capture structures. */
+ struct list_head dfc_list;
+
+ /* Deferred ops state saved from the transaction. */
+ struct list_head dfc_dfops;
+ unsigned int dfc_tpflags;
+
+ /* Block reservations for the data and rt devices. */
+ unsigned int dfc_blkres;
+ unsigned int dfc_rtxres;
+
+ /* Log reservation saved from the transaction. */
+ unsigned int dfc_logres;
+
+ /*
+ * An inode reference that must be maintained to complete the deferred
+ * work.
+ */
+ struct xfs_inode *dfc_capture_ip;
+};
+
+/*
+ * Functions to capture a chain of deferred operations and continue them later.
+ * This doesn't normally happen except log recovery.
+ */
+int xfs_defer_ops_capture_and_commit(struct xfs_trans *tp,
+ struct xfs_inode *capture_ip, struct list_head *capture_list);
+void xfs_defer_ops_continue(struct xfs_defer_capture *d, struct xfs_trans *tp,
+ struct xfs_inode **captured_ipp);
+void xfs_defer_ops_release(struct xfs_mount *mp, struct xfs_defer_capture *d);
+
#endif /* __XFS_DEFER_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 867c5dee0751..452d04ae10ce 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -600,8 +600,10 @@ xfs_dir2_isblock(
if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
return rval;
rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
- if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+ if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, args->dp->i_mount);
return -EFSCORRUPTED;
+ }
*vp = rval;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index f54244779492..e170792c0acc 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,6 +18,8 @@ struct xfs_dir2_sf_entry;
struct xfs_dir2_data_hdr;
struct xfs_dir2_data_entry;
struct xfs_dir2_data_unused;
+struct xfs_dir3_icfree_hdr;
+struct xfs_dir3_icleaf_hdr;
extern struct xfs_name xfs_name_dotdot;
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 49e4bc39e7bb..d034d661957c 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -114,6 +114,23 @@ const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
.verify_struct = xfs_dir3_block_verify,
};
+static xfs_failaddr_t
+xfs_dir3_block_header_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = dp->i_mount;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+
+ if (be64_to_cpu(hdr3->owner) != dp->i_ino)
+ return __this_address;
+ }
+
+ return NULL;
+}
+
int
xfs_dir3_block_read(
struct xfs_trans *tp,
@@ -121,12 +138,24 @@ xfs_dir3_block_read(
struct xfs_buf **bpp)
{
struct xfs_mount *mp = dp->i_mount;
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, -1, bpp,
XFS_DATA_FORK, &xfs_dir3_block_buf_ops);
- if (!err && tp && *bpp)
- xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
+ if (err || !*bpp)
+ return err;
+
+ /* Check things that we can't do in the verifier. */
+ fa = xfs_dir3_block_header_check(dp, *bpp);
+ if (fa) {
+ __xfs_buf_mark_corrupt(*bpp, fa);
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+ xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
return err;
}
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 2c79be4c3153..2d92bcd8c801 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -348,6 +348,22 @@ static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
.verify_write = xfs_dir3_data_write_verify,
};
+static xfs_failaddr_t
+xfs_dir3_data_header_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = dp->i_mount;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_data_hdr *hdr3 = bp->b_addr;
+
+ if (be64_to_cpu(hdr3->hdr.owner) != dp->i_ino)
+ return __this_address;
+ }
+
+ return NULL;
+}
int
xfs_dir3_data_read(
@@ -357,12 +373,24 @@ xfs_dir3_data_read(
xfs_daddr_t mapped_bno,
struct xfs_buf **bpp)
{
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, bno, mapped_bno, bpp,
XFS_DATA_FORK, &xfs_dir3_data_buf_ops);
- if (!err && tp && *bpp)
- xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
+ if (err || !*bpp)
+ return err;
+
+ /* Check things that we can't do in the verifier. */
+ fa = xfs_dir3_data_header_check(dp, *bpp);
+ if (fa) {
+ __xfs_buf_mark_corrupt(*bpp, fa);
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+ xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
return err;
}
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index a53e4585a2f3..c8ee3250b749 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -1343,8 +1343,10 @@ xfs_dir2_leaf_removename(
oldbest = be16_to_cpu(bf[0].length);
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
bestsp = xfs_dir2_leaf_bests_p(ltp);
- if (be16_to_cpu(bestsp[db]) != oldbest)
+ if (be16_to_cpu(bestsp[db]) != oldbest) {
+ xfs_buf_mark_corrupt(lbp);
return -EFSCORRUPTED;
+ }
/*
* Mark the former data entry unused.
*/
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 99d5b2ed67f2..c8c3c3af539f 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -208,7 +208,7 @@ __xfs_dir3_free_read(
/* Check things that we can't do in the verifier. */
fa = xfs_dir3_free_header_check(dp, fbno, *bpp);
if (fa) {
- xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
+ __xfs_buf_mark_corrupt(*bpp, fa);
xfs_trans_brelse(tp, *bpp);
*bpp = NULL;
return -EFSCORRUPTED;
@@ -374,8 +374,10 @@ xfs_dir2_leaf_to_node(
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
if (be32_to_cpu(ltp->bestcount) >
- (uint)dp->i_d.di_size / args->geo->blksize)
+ (uint)dp->i_d.di_size / args->geo->blksize) {
+ xfs_buf_mark_corrupt(lbp);
return -EFSCORRUPTED;
+ }
/*
* Copy freespace entries from the leaf block to the new block.
@@ -446,8 +448,10 @@ xfs_dir2_leafn_add(
* Quick check just to make sure we are not going to index
* into other peoples memory
*/
- if (index < 0)
+ if (index < 0) {
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
+ }
/*
* If there are already the maximum number of leaf entries in
@@ -740,8 +744,10 @@ xfs_dir2_leafn_lookup_for_entry(
ents = dp->d_ops->leaf_ents_p(leaf);
xfs_dir3_leaf_check(dp, bp);
- if (leafhdr.count <= 0)
+ if (leafhdr.count <= 0) {
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
+ }
/*
* Look up the hash value in the leaf entries.
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 59f9fb2241a5..d2eaea663e7f 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -8,6 +8,25 @@
struct dir_context;
+/*
+ * In-core version of the leaf and free block headers to abstract the
+ * differences in the v2 and v3 disk format of the headers.
+ */
+struct xfs_dir3_icleaf_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t stale;
+};
+
+struct xfs_dir3_icfree_hdr {
+ uint32_t magic;
+ uint32_t firstdb;
+ uint32_t nvalid;
+ uint32_t nused;
+};
+
/* xfs_dir2.c */
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
xfs_dir2_db_t *dbp);
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index ae16ca7c422a..f980c3f3d2f6 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -945,6 +945,27 @@ xfs_dir2_sf_removename(
}
/*
+ * Check whether the sf dir replace operation need more blocks.
+ */
+static bool
+xfs_dir2_sf_replace_needblock(
+ struct xfs_inode *dp,
+ xfs_ino_t inum)
+{
+ int newsize;
+ struct xfs_dir2_sf_hdr *sfp;
+
+ if (dp->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+ return false;
+
+ sfp = (struct xfs_dir2_sf_hdr *)dp->i_df.if_u1.if_data;
+ newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF;
+
+ return inum > XFS_DIR2_MAX_SHORT_INUM &&
+ sfp->i8count == 0 && newsize > XFS_IFORK_DSIZE(dp);
+}
+
+/*
* Replace the inode number of an entry in a shortform directory.
*/
int /* error */
@@ -980,17 +1001,14 @@ xfs_dir2_sf_replace(
*/
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) {
int error; /* error return value */
- int newsize; /* new inode size */
- newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF;
/*
* Won't fit as shortform, convert to block then do replace.
*/
- if (newsize > XFS_IFORK_DSIZE(dp)) {
+ if (xfs_dir2_sf_replace_needblock(dp, args->inumber)) {
error = xfs_dir2_sf_to_block(args);
- if (error) {
+ if (error)
return error;
- }
return xfs_dir2_block_replace(args);
}
/*
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index e8bd688a4073..bedc1e752b60 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -35,10 +35,10 @@ xfs_calc_dquots_per_chunk(
xfs_failaddr_t
xfs_dquot_verify(
- struct xfs_mount *mp,
- xfs_disk_dquot_t *ddq,
- xfs_dqid_t id,
- uint type) /* used only during quotacheck */
+ struct xfs_mount *mp,
+ struct xfs_disk_dquot *ddq,
+ xfs_dqid_t id,
+ uint type) /* used only during quotacheck */
{
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index c968b60cee15..31fa9ab2ab61 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -497,6 +497,23 @@ static inline bool xfs_sb_version_hascrc(struct xfs_sb *sbp)
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
}
+/*
+ * v5 file systems support V3 inodes only, earlier file systems support
+ * v2 and v1 inodes.
+ */
+static inline bool xfs_sb_version_has_v3inode(struct xfs_sb *sbp)
+{
+ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
+}
+
+static inline bool xfs_dinode_good_version(struct xfs_sb *sbp,
+ uint8_t version)
+{
+ if (xfs_sb_version_has_v3inode(sbp))
+ return version == 3;
+ return version == 1 || version == 2;
+}
+
static inline bool xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
{
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
@@ -946,8 +963,12 @@ typedef enum xfs_dinode_fmt {
/*
* Inode size for given fs.
*/
-#define XFS_LITINO(mp, version) \
- ((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version)))
+#define XFS_DINODE_SIZE(sbp) \
+ (xfs_sb_version_has_v3inode(sbp) ? \
+ sizeof(struct xfs_dinode) : \
+ offsetof(struct xfs_dinode, di_crc))
+#define XFS_LITINO(mp) \
+ ((mp)->m_sb.sb_inodesize - XFS_DINODE_SIZE(&(mp)->m_sb))
/*
* Inode data & attribute fork sizes, per inode.
@@ -956,13 +977,9 @@ typedef enum xfs_dinode_fmt {
#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3))
#define XFS_DFORK_DSIZE(dip,mp) \
- (XFS_DFORK_Q(dip) ? \
- XFS_DFORK_BOFF(dip) : \
- XFS_LITINO(mp, (dip)->di_version))
+ (XFS_DFORK_Q(dip) ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp))
#define XFS_DFORK_ASIZE(dip,mp) \
- (XFS_DFORK_Q(dip) ? \
- XFS_LITINO(mp, (dip)->di_version) - XFS_DFORK_BOFF(dip) : \
- 0)
+ (XFS_DFORK_Q(dip) ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0)
#define XFS_DFORK_SIZE(dip,mp,w) \
((w) == XFS_DATA_FORK ? \
XFS_DFORK_DSIZE(dip, mp) : \
@@ -1144,11 +1161,11 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
/*
* This is the main portion of the on-disk representation of quota
- * information for a user. This is the q_core of the xfs_dquot_t that
+ * information for a user. This is the q_core of the struct xfs_dquot that
* is kept in kernel memory. We pad this with some more expansion room
* to construct the on disk structure.
*/
-typedef struct xfs_disk_dquot {
+struct xfs_disk_dquot {
__be16 d_magic; /* dquot magic = XFS_DQUOT_MAGIC */
__u8 d_version; /* dquot version */
__u8 d_flags; /* XFS_DQ_USER/PROJ/GROUP */
@@ -1171,15 +1188,15 @@ typedef struct xfs_disk_dquot {
__be32 d_rtbtimer; /* similar to above; for RT disk blocks */
__be16 d_rtbwarns; /* warnings issued wrt RT disk blocks */
__be16 d_pad;
-} xfs_disk_dquot_t;
+};
/*
* This is what goes on disk. This is separated from the xfs_disk_dquot because
* carrying the unnecessary padding would be a waste of memory.
*/
typedef struct xfs_dqblk {
- xfs_disk_dquot_t dd_diskdq; /* portion that lives incore as well */
- char dd_fill[4]; /* filling for posterity */
+ struct xfs_disk_dquot dd_diskdq; /* portion living incore as well */
+ char dd_fill[4];/* filling for posterity */
/*
* These two are only present on filesystems with the CRC bits set.
@@ -1540,6 +1557,13 @@ typedef struct xfs_bmdr_block {
#define BMBT_BLOCKCOUNT_BITLEN 21
#define BMBT_STARTOFF_MASK ((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+#define BMBT_BLOCKCOUNT_MASK ((1ULL << BMBT_BLOCKCOUNT_BITLEN) - 1)
+
+/*
+ * bmbt records have a file offset (block) field that is 54 bits wide, so this
+ * is the largest xfs_fileoff_t that we ever expect to see.
+ */
+#define XFS_MAX_FILEOFF (BMBT_STARTOFF_MASK + BMBT_BLOCKCOUNT_MASK)
typedef struct xfs_bmbt_rec {
__be64 l0, l1;
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 443cf33f6666..391e441d43a0 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -303,7 +303,7 @@ xfs_ialloc_inode_init(
* That means for v3 inode we log the entire buffer rather than just the
* inode cores.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
version = 3;
ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
@@ -339,7 +339,7 @@ xfs_ialloc_inode_init(
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
int ioffset = i << mp->m_sb.sb_inodelog;
- uint isize = xfs_dinode_size(version);
+ uint isize = XFS_DINODE_SIZE(&mp->m_sb);
free = xfs_make_iptr(mp, fbuf, i);
free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
@@ -2818,7 +2818,7 @@ xfs_ialloc_setup_geometry(
* cannot change the behavior.
*/
igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
int new_size = igeo->inode_cluster_size_raw;
new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
@@ -2854,3 +2854,67 @@ xfs_ialloc_setup_geometry(
else
igeo->ialloc_align = 0;
}
+
+/* Compute the location of the root directory inode that is laid out by mkfs. */
+xfs_ino_t
+xfs_ialloc_calc_rootino(
+ struct xfs_mount *mp,
+ int sunit)
+{
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
+ xfs_agblock_t first_bno;
+
+ /*
+ * Pre-calculate the geometry of AG 0. We know what it looks like
+ * because libxfs knows how to create allocation groups now.
+ *
+ * first_bno is the first block in which mkfs could possibly have
+ * allocated the root directory inode, once we factor in the metadata
+ * that mkfs formats before it. Namely, the four AG headers...
+ */
+ first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
+
+ /* ...the two free space btree roots... */
+ first_bno += 2;
+
+ /* ...the inode btree root... */
+ first_bno += 1;
+
+ /* ...the initial AGFL... */
+ first_bno += xfs_alloc_min_freelist(mp, NULL);
+
+ /* ...the free inode btree root... */
+ if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ first_bno++;
+
+ /* ...the reverse mapping btree root... */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ first_bno++;
+
+ /* ...the reference count btree... */
+ if (xfs_sb_version_hasreflink(&mp->m_sb))
+ first_bno++;
+
+ /*
+ * ...and the log, if it is allocated in the first allocation group.
+ *
+ * This can happen with filesystems that only have a single
+ * allocation group, or very odd geometries created by old mkfs
+ * versions on very small filesystems.
+ */
+ if (mp->m_sb.sb_logstart &&
+ XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0)
+ first_bno += mp->m_sb.sb_logblocks;
+
+ /*
+ * Now round first_bno up to whatever allocation alignment is given
+ * by the filesystem or was passed in.
+ */
+ if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0)
+ first_bno = roundup(first_bno, sunit);
+ else if (xfs_sb_version_hasalign(&mp->m_sb) &&
+ mp->m_sb.sb_inoalignmt > 1)
+ first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
+
+ return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 323592d563d5..72b3468b97b1 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -152,5 +152,6 @@ int xfs_inobt_insert_rec(struct xfs_btree_cur *cur, uint16_t holemask,
int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
void xfs_ialloc_setup_geometry(struct xfs_mount *mp);
+xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit);
#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 28ab3c5255e1..962e95dcdbff 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -44,17 +44,6 @@ xfs_inobp_check(
}
#endif
-bool
-xfs_dinode_good_version(
- struct xfs_mount *mp,
- __u8 version)
-{
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return version == 3;
-
- return version == 1 || version == 2;
-}
-
/*
* If we are doing readahead on an inode buffer, we might be in log recovery
* reading an inode allocation buffer that hasn't yet been replayed, and hence
@@ -93,7 +82,7 @@ xfs_inode_buf_verify(
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
- xfs_dinode_good_version(mp, dip->di_version) &&
+ xfs_dinode_good_version(&mp->m_sb, dip->di_version) &&
xfs_verify_agino_or_null(mp, agno, unlinked_ino);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP))) {
@@ -205,26 +194,23 @@ xfs_inode_from_disk(
struct xfs_icdinode *to = &ip->i_d;
struct inode *inode = VFS_I(ip);
-
/*
* Convert v1 inodes immediately to v2 inode format as this is the
* minimum inode version format we support in the rest of the code.
+ * They will also be unconditionally written back to disk as v2 inodes.
*/
- to->di_version = from->di_version;
- if (to->di_version == 1) {
+ if (unlikely(from->di_version == 1)) {
set_nlink(inode, be16_to_cpu(from->di_onlink));
- to->di_projid_lo = 0;
- to->di_projid_hi = 0;
- to->di_version = 2;
+ to->di_projid = 0;
} else {
set_nlink(inode, be32_to_cpu(from->di_nlink));
- to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
- to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
+ to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
+ be16_to_cpu(from->di_projid_lo);
}
to->di_format = from->di_format;
- to->di_uid = be32_to_cpu(from->di_uid);
- to->di_gid = be32_to_cpu(from->di_gid);
+ i_uid_write(inode, be32_to_cpu(from->di_uid));
+ i_gid_write(inode, be32_to_cpu(from->di_gid));
to->di_flushiter = be16_to_cpu(from->di_flushiter);
/*
@@ -253,7 +239,7 @@ xfs_inode_from_disk(
to->di_dmstate = be16_to_cpu(from->di_dmstate);
to->di_flags = be16_to_cpu(from->di_flags);
- if (to->di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
inode_set_iversion_queried(inode,
be64_to_cpu(from->di_changecount));
to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
@@ -275,12 +261,11 @@ xfs_inode_to_disk(
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
to->di_onlink = 0;
- to->di_version = from->di_version;
to->di_format = from->di_format;
- to->di_uid = cpu_to_be32(from->di_uid);
- to->di_gid = cpu_to_be32(from->di_gid);
- to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
- to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
+ to->di_uid = cpu_to_be32(i_uid_read(inode));
+ to->di_gid = cpu_to_be32(i_gid_read(inode));
+ to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
+ to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
memset(to->di_pad, 0, sizeof(to->di_pad));
to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
@@ -304,7 +289,8 @@ xfs_inode_to_disk(
to->di_dmstate = cpu_to_be16(from->di_dmstate);
to->di_flags = cpu_to_be16(from->di_flags);
- if (from->di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ to->di_version = 3;
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
@@ -316,6 +302,7 @@ xfs_inode_to_disk(
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to->di_flushiter = 0;
} else {
+ to->di_version = 2;
to->di_flushiter = cpu_to_be16(from->di_flushiter);
}
}
@@ -429,7 +416,7 @@ xfs_dinode_verify_forkoff(
case XFS_DINODE_FMT_LOCAL: /* fall through ... */
case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
case XFS_DINODE_FMT_BTREE:
- if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
+ if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
return __this_address;
break;
default:
@@ -455,7 +442,7 @@ xfs_dinode_verify(
/* Verify v3 integrity information first */
if (dip->di_version >= 3) {
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_sb_version_has_v3inode(&mp->m_sb))
return __this_address;
if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF))
@@ -630,12 +617,11 @@ xfs_iread(
/* shortcut IO on inode allocation if possible */
if ((iget_flags & XFS_IGET_CREATE) &&
- xfs_sb_version_hascrc(&mp->m_sb) &&
+ xfs_sb_version_has_v3inode(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
/* initialise the on-disk inode core */
memset(&ip->i_d, 0, sizeof(ip->i_d));
VFS_I(ip)->i_generation = prandom_u32();
- ip->i_d.di_version = 3;
return 0;
}
@@ -677,7 +663,6 @@ xfs_iread(
* Partial initialisation of the in-core inode. Just the bits
* that xfs_ialloc won't overwrite or relies on being correct.
*/
- ip->i_d.di_version = dip->di_version;
VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
@@ -691,7 +676,6 @@ xfs_iread(
VFS_I(ip)->i_mode = 0;
}
- ASSERT(ip->i_d.di_version >= 2);
ip->i_delayed_blks = 0;
/*
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index ab0f84165317..80b574579a21 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -16,13 +16,9 @@ struct xfs_dinode;
* format specific structures at the appropriate time.
*/
struct xfs_icdinode {
- int8_t di_version; /* inode version */
int8_t di_format; /* format of di_c data */
uint16_t di_flushiter; /* incremented on flush */
- uint32_t di_uid; /* owner's user id */
- uint32_t di_gid; /* owner's group id */
- uint16_t di_projid_lo; /* lower part of owner's project id */
- uint16_t di_projid_hi; /* higher part of owner's project id */
+ uint32_t di_projid; /* owner's project id */
xfs_fsize_t di_size; /* number of bytes in file */
xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */
xfs_extlen_t di_extsize; /* basic/minimum extent size for file */
@@ -62,8 +58,6 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
void xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
struct xfs_dinode *to);
-bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version);
-
#if defined(DEBUG)
void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#else
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 8fdd0424070e..e758d74b2b62 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -75,11 +75,15 @@ xfs_iformat_fork(
error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
break;
default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
+ dip, sizeof(*dip), __this_address);
return -EFSCORRUPTED;
}
break;
default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
return -EFSCORRUPTED;
}
if (error)
@@ -110,6 +114,8 @@ xfs_iformat_fork(
error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
break;
default:
+ xfs_inode_verifier_error(ip, error, __func__, dip,
+ sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
break;
}
@@ -177,7 +183,7 @@ xfs_iformat_local(
*/
if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
xfs_warn(ip->i_mount,
- "corrupt inode %Lu (bad size %d for local fork, size = %d).",
+ "corrupt inode %Lu (bad size %d for local fork, size = %zd).",
(unsigned long long) ip->i_ino, size,
XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
xfs_inode_verifier_error(ip, -EFSCORRUPTED,
@@ -586,7 +592,7 @@ void
xfs_iflush_fork(
xfs_inode_t *ip,
xfs_dinode_t *dip,
- xfs_inode_log_item_t *iip,
+ struct xfs_inode_log_item *iip,
int whichfork)
{
char *cp;
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 7b845c052fb4..a84a1557d11c 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -46,14 +46,9 @@ struct xfs_ifork {
(ip)->i_afp : \
(ip)->i_cowfp))
#define XFS_IFORK_DSIZE(ip) \
- (XFS_IFORK_Q(ip) ? \
- XFS_IFORK_BOFF(ip) : \
- XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version))
+ (XFS_IFORK_Q(ip) ? XFS_IFORK_BOFF(ip) : XFS_LITINO((ip)->i_mount))
#define XFS_IFORK_ASIZE(ip) \
- (XFS_IFORK_Q(ip) ? \
- XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version) - \
- XFS_IFORK_BOFF(ip) : \
- 0)
+ (XFS_IFORK_Q(ip) ? XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : 0)
#define XFS_IFORK_SIZE(ip,w) \
((w) == XFS_DATA_FORK ? \
XFS_IFORK_DSIZE(ip) : \
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index e5f97c69b320..d3b255f42789 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -424,12 +424,10 @@ struct xfs_log_dinode {
/* structure must be padded to 64 bit alignment */
};
-static inline uint xfs_log_dinode_size(int version)
-{
- if (version == 3)
- return sizeof(struct xfs_log_dinode);
- return offsetof(struct xfs_log_dinode, di_next_unlinked);
-}
+#define xfs_log_dinode_size(mp) \
+ (xfs_sb_version_has_v3inode(&(mp)->m_sb) ? \
+ sizeof(struct xfs_log_dinode) : \
+ offsetof(struct xfs_log_dinode, di_next_unlinked))
/*
* Buffer Log Format defintions
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 9a7fadb1361c..78236bd6c64f 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1591,8 +1591,10 @@ xfs_refcount_recover_extent(
struct list_head *debris = priv;
struct xfs_refcount_recovery *rr;
- if (be32_to_cpu(rec->refc.rc_refcount) != 1)
+ if (be32_to_cpu(rec->refc.rc_refcount) != 1) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, cur->bc_mp);
return -EFSCORRUPTED;
+ }
rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0);
xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 42085e70c01a..cf99e4cab627 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -15,7 +15,7 @@
#include "xfs_bmap.h"
#include "xfs_trans.h"
#include "xfs_rtalloc.h"
-
+#include "xfs_error.h"
/*
* Realtime allocator bitmap functions shared with userspace.
@@ -70,8 +70,10 @@ xfs_rtbuf_get(
if (error)
return error;
- if (nmap == 0 || !xfs_bmap_is_real_extent(&map))
+ if (nmap == 0 || !xfs_bmap_is_written_extent(&map)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
+ }
ASSERT(map.br_startblock != NULLFSBLOCK);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index c45acbd3add9..708feb8eac76 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -65,6 +65,7 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp,
#define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */
#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
#define XFS_TRANS_NO_WRITECOUNT 0x40 /* do not elevate SB writecount */
+#define XFS_TRANS_RES_FDBLKS 0x80 /* reserve newly freed blocks */
/*
* LOWMODE is used by the allocator to activate the lowspace algorithm - when
* free space is running low the extent allocator may choose to allocate an
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 0ba7368b9a5f..1d0e78e0099d 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -27,7 +27,7 @@ xfs_trans_ijoin(
struct xfs_inode *ip,
uint lock_flags)
{
- xfs_inode_log_item_t *iip;
+ struct xfs_inode_log_item *iip;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (ip->i_itemp == NULL)
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index b3584cd2cc16..8ece346def97 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -187,7 +187,7 @@ xfs_calc_inode_chunk_res(
XFS_FSB_TO_B(mp, 1));
if (alloc) {
/* icreate tx uses ordered buffers */
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_sb_version_has_v3inode(&mp->m_sb))
return res;
size = XFS_FSB_TO_B(mp, 1);
}
@@ -776,7 +776,7 @@ xfs_calc_clear_agi_bucket_reservation(
/*
* Adjusting quota limits.
- * the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
+ * the disk quota buffer: sizeof(struct xfs_disk_dquot)
*/
STATIC uint
xfs_calc_qm_setqlim_reservation(void)
@@ -800,7 +800,7 @@ xfs_calc_qm_dqalloc_reservation(
/*
* Turning off quotas.
- * the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
+ * the quota off logitems: sizeof(struct xfs_qoff_logitem) * 2
* the superblock for the quota flags: sector size
*/
STATIC uint
@@ -813,7 +813,7 @@ xfs_calc_qm_quotaoff_reservation(
/*
* End of turning off quotas.
- * the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
+ * the quota off logitems: sizeof(struct xfs_qoff_logitem) * 2
*/
STATIC uint
xfs_calc_qm_quotaoff_end_reservation(void)
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 96d7071cfa46..6788b0ca85eb 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -12,6 +12,7 @@
#include "xfs_inode.h"
#include "xfs_attr.h"
#include "xfs_trace.h"
+#include "xfs_error.h"
#include <linux/posix_acl_xattr.h>
@@ -23,6 +24,7 @@
STATIC struct posix_acl *
xfs_acl_from_disk(
+ struct xfs_mount *mp,
const struct xfs_acl *aclp,
int len,
int max_entries)
@@ -32,11 +34,18 @@ xfs_acl_from_disk(
const struct xfs_acl_entry *ace;
unsigned int count, i;
- if (len < sizeof(*aclp))
+ if (len < sizeof(*aclp)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
+ len);
return ERR_PTR(-EFSCORRUPTED);
+ }
+
count = be32_to_cpu(aclp->acl_cnt);
- if (count > max_entries || XFS_ACL_SIZE(count) != len)
+ if (count > max_entries || XFS_ACL_SIZE(count) != len) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
+ len);
return ERR_PTR(-EFSCORRUPTED);
+ }
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
@@ -57,10 +66,12 @@ xfs_acl_from_disk(
switch (acl_e->e_tag) {
case ACL_USER:
- acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id));
+ acl_e->e_uid = make_kuid(&init_user_ns,
+ be32_to_cpu(ace->ae_id));
break;
case ACL_GROUP:
- acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id));
+ acl_e->e_gid = make_kgid(&init_user_ns,
+ be32_to_cpu(ace->ae_id));
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
@@ -93,10 +104,12 @@ xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
ace->ae_tag = cpu_to_be32(acl_e->e_tag);
switch (acl_e->e_tag) {
case ACL_USER:
- ace->ae_id = cpu_to_be32(xfs_kuid_to_uid(acl_e->e_uid));
+ ace->ae_id = cpu_to_be32(
+ from_kuid(&init_user_ns, acl_e->e_uid));
break;
case ACL_GROUP:
- ace->ae_id = cpu_to_be32(xfs_kgid_to_gid(acl_e->e_gid));
+ ace->ae_id = cpu_to_be32(
+ from_kgid(&init_user_ns, acl_e->e_gid));
break;
default:
ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
@@ -145,7 +158,7 @@ xfs_get_acl(struct inode *inode, int type)
if (error != -ENOATTR)
acl = ERR_PTR(error);
} else {
- acl = xfs_acl_from_disk(xfs_acl, len,
+ acl = xfs_acl_from_disk(ip->i_mount, xfs_acl, len,
XFS_ACL_MAX_ENTRIES(ip->i_mount));
kmem_free(xfs_acl);
}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f16d5f196c6b..5d9f8e4c4cde 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -495,7 +495,7 @@ xfs_map_blocks(
ssize_t count = i_blocksize(inode);
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
- xfs_fileoff_t cow_fsb = NULLFILEOFF;
+ xfs_fileoff_t cow_fsb;
struct xfs_bmbt_irec imap;
struct xfs_iext_cursor icur;
int retries = 0;
@@ -529,6 +529,8 @@ xfs_map_blocks(
* landed in a hole and we skip the block.
*/
retry:
+ cow_fsb = NULLFILEOFF;
+ wpc->fork = XFS_DATA_FORK;
xfs_ilock(ip, XFS_ILOCK_SHARED);
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
(ip->i_df.if_flags & XFS_IFEXTENTS));
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index a640a285cc52..f052de128fa1 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -22,24 +22,21 @@
#include "xfs_attr_leaf.h"
#include "xfs_quota.h"
#include "xfs_dir2.h"
+#include "xfs_error.h"
/*
- * Look at all the extents for this logical region,
- * invalidate any buffers that are incore/in transactions.
+ * Invalidate any incore buffers associated with this remote attribute value
+ * extent. We never log remote attribute value buffers, which means that they
+ * won't be attached to a transaction and are therefore safe to mark stale.
+ * The actual bunmapi will be taken care of later.
*/
STATIC int
-xfs_attr3_leaf_freextent(
- struct xfs_trans **trans,
+xfs_attr3_rmt_stale(
struct xfs_inode *dp,
xfs_dablk_t blkno,
int blkcnt)
{
struct xfs_bmbt_irec map;
- struct xfs_buf *bp;
- xfs_dablk_t tblkno;
- xfs_daddr_t dblkno;
- int tblkcnt;
- int dblkcnt;
int nmap;
int error;
@@ -47,47 +44,28 @@ xfs_attr3_leaf_freextent(
* Roll through the "value", invalidating the attribute value's
* blocks.
*/
- tblkno = blkno;
- tblkcnt = blkcnt;
- while (tblkcnt > 0) {
+ while (blkcnt > 0) {
/*
* Try to remember where we decided to put the value.
*/
nmap = 1;
- error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
+ error = xfs_bmapi_read(dp, (xfs_fileoff_t)blkno, blkcnt,
&map, &nmap, XFS_BMAPI_ATTRFORK);
- if (error) {
+ if (error)
return error;
- }
ASSERT(nmap == 1);
- ASSERT(map.br_startblock != DELAYSTARTBLOCK);
/*
- * If it's a hole, these are already unmapped
- * so there's nothing to invalidate.
+ * Mark any incore buffers for the remote value as stale. We
+ * never log remote attr value buffers, so the buffer should be
+ * easy to kill.
*/
- if (map.br_startblock != HOLESTARTBLOCK) {
-
- dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
- map.br_startblock);
- dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
- map.br_blockcount);
- bp = xfs_trans_get_buf(*trans,
- dp->i_mount->m_ddev_targp,
- dblkno, dblkcnt, 0);
- if (!bp)
- return -ENOMEM;
- xfs_trans_binval(*trans, bp);
- /*
- * Roll to next transaction.
- */
- error = xfs_trans_roll_inode(trans, dp);
- if (error)
- return error;
- }
+ error = xfs_attr_rmtval_stale(dp, &map, 0);
+ if (error)
+ return error;
- tblkno += map.br_blockcount;
- tblkcnt -= map.br_blockcount;
+ blkno += map.br_blockcount;
+ blkcnt -= map.br_blockcount;
}
return 0;
@@ -101,86 +79,45 @@ xfs_attr3_leaf_freextent(
*/
STATIC int
xfs_attr3_leaf_inactive(
- struct xfs_trans **trans,
- struct xfs_inode *dp,
- struct xfs_buf *bp)
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
{
- struct xfs_attr_leafblock *leaf;
- struct xfs_attr3_icleaf_hdr ichdr;
- struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_mount *mp = bp->b_mount;
+ struct xfs_attr_leafblock *leaf = bp->b_addr;
+ struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_name_remote *name_rmt;
- struct xfs_attr_inactive_list *list;
- struct xfs_attr_inactive_list *lp;
- int error;
- int count;
- int size;
- int tmp;
- int i;
- struct xfs_mount *mp = bp->b_mount;
+ int error = 0;
+ int i;
- leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
/*
- * Count the number of "remote" value extents.
+ * Find the remote value extents for this leaf and invalidate their
+ * incore buffers.
*/
- count = 0;
entry = xfs_attr3_leaf_entryp(leaf);
for (i = 0; i < ichdr.count; entry++, i++) {
- if (be16_to_cpu(entry->nameidx) &&
- ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
- name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
- if (name_rmt->valueblk)
- count++;
- }
- }
-
- /*
- * If there are no "remote" values, we're done.
- */
- if (count == 0) {
- xfs_trans_brelse(*trans, bp);
- return 0;
- }
+ int blkcnt;
- /*
- * Allocate storage for a list of all the "remote" value extents.
- */
- size = count * sizeof(xfs_attr_inactive_list_t);
- list = kmem_alloc(size, 0);
-
- /*
- * Identify each of the "remote" value extents.
- */
- lp = list;
- entry = xfs_attr3_leaf_entryp(leaf);
- for (i = 0; i < ichdr.count; entry++, i++) {
- if (be16_to_cpu(entry->nameidx) &&
- ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
- name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
- if (name_rmt->valueblk) {
- lp->valueblk = be32_to_cpu(name_rmt->valueblk);
- lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
- be32_to_cpu(name_rmt->valuelen));
- lp++;
- }
- }
- }
- xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */
+ if (!entry->nameidx || (entry->flags & XFS_ATTR_LOCAL))
+ continue;
- /*
- * Invalidate each of the "remote" value extents.
- */
- error = 0;
- for (lp = list, i = 0; i < count; i++, lp++) {
- tmp = xfs_attr3_leaf_freextent(trans, dp,
- lp->valueblk, lp->valuelen);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+ if (!name_rmt->valueblk)
+ continue;
- if (error == 0)
- error = tmp; /* save only the 1st errno */
+ blkcnt = xfs_attr3_rmt_blocks(dp->i_mount,
+ be32_to_cpu(name_rmt->valuelen));
+ error = xfs_attr3_rmt_stale(dp,
+ be32_to_cpu(name_rmt->valueblk), blkcnt);
+ if (error)
+ goto err;
}
- kmem_free(list);
+ xfs_trans_brelse(*trans, bp);
+err:
return error;
}
@@ -208,8 +145,9 @@ xfs_attr3_node_inactive(
* Since this code is recursive (gasp!) we must protect ourselves.
*/
if (level > XFS_DA_NODE_MAXDEPTH) {
+ xfs_buf_mark_corrupt(bp);
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
- return -EIO;
+ return -EFSCORRUPTED;
}
node = bp->b_addr;
@@ -258,8 +196,9 @@ xfs_attr3_node_inactive(
error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
break;
default:
- error = -EIO;
+ xfs_buf_mark_corrupt(child_bp);
xfs_trans_brelse(*trans, child_bp);
+ error = -EFSCORRUPTED;
break;
}
if (error)
@@ -341,7 +280,8 @@ xfs_attr3_root_inactive(
error = xfs_attr3_leaf_inactive(trans, dp, bp);
break;
default:
- error = -EIO;
+ error = -EFSCORRUPTED;
+ xfs_buf_mark_corrupt(bp);
xfs_trans_brelse(*trans, bp);
break;
}
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 00758fdc2fec..8c0972834449 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -258,8 +258,10 @@ xfs_attr_node_list_lookup(
return 0;
/* We can't point back to the root. */
- if (cursor->blkno == 0)
+ if (cursor->blkno == 0) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
+ }
}
if (expected_level != 0)
@@ -269,6 +271,7 @@ xfs_attr_node_list_lookup(
return 0;
out_corruptbuf:
+ xfs_buf_mark_corrupt(bp);
xfs_trans_brelse(tp, bp);
return -EFSCORRUPTED;
}
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 83d24e983d4c..7b0c4d9679d9 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -21,7 +21,8 @@
#include "xfs_icache.h"
#include "xfs_bmap_btree.h"
#include "xfs_trans_space.h"
-
+#include "xfs_error.h"
+#include "xfs_quota.h"
kmem_zone_t *xfs_bui_zone;
kmem_zone_t *xfs_bud_zone;
@@ -124,34 +125,6 @@ xfs_bui_item_release(
xfs_bui_release(BUI_ITEM(lip));
}
-static const struct xfs_item_ops xfs_bui_item_ops = {
- .iop_size = xfs_bui_item_size,
- .iop_format = xfs_bui_item_format,
- .iop_unpin = xfs_bui_item_unpin,
- .iop_release = xfs_bui_item_release,
-};
-
-/*
- * Allocate and initialize an bui item with the given number of extents.
- */
-struct xfs_bui_log_item *
-xfs_bui_init(
- struct xfs_mount *mp)
-
-{
- struct xfs_bui_log_item *buip;
-
- buip = kmem_zone_zalloc(xfs_bui_zone, 0);
-
- xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
- buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
- buip->bui_format.bui_id = (uintptr_t)(void *)buip;
- atomic_set(&buip->bui_next_extent, 0);
- atomic_set(&buip->bui_refcount, 2);
-
- return buip;
-}
-
static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_bud_log_item, bud_item);
@@ -278,27 +251,6 @@ xfs_bmap_update_diff_items(
return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
}
-/* Get an BUI. */
-STATIC void *
-xfs_bmap_update_create_intent(
- struct xfs_trans *tp,
- unsigned int count)
-{
- struct xfs_bui_log_item *buip;
-
- ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
- ASSERT(tp != NULL);
-
- buip = xfs_bui_init(tp->t_mountp);
- ASSERT(buip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &buip->bui_item);
- return buip;
-}
-
/* Set the map extent flags for this mapping. */
static void
xfs_trans_set_bmap_flags(
@@ -326,16 +278,12 @@ xfs_trans_set_bmap_flags(
STATIC void
xfs_bmap_update_log_item(
struct xfs_trans *tp,
- void *intent,
- struct list_head *item)
+ struct xfs_bui_log_item *buip,
+ struct xfs_bmap_intent *bmap)
{
- struct xfs_bui_log_item *buip = intent;
- struct xfs_bmap_intent *bmap;
uint next_extent;
struct xfs_map_extent *map;
- bmap = container_of(item, struct xfs_bmap_intent, bi_list);
-
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags);
@@ -355,14 +303,35 @@ xfs_bmap_update_log_item(
bmap->bi_bmap.br_state);
}
+static struct xfs_log_item *
+xfs_bmap_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_bui_log_item *buip = xfs_bui_init(mp);
+ struct xfs_bmap_intent *bmap;
+
+ ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
+
+ xfs_trans_add_item(tp, &buip->bui_item);
+ if (sort)
+ list_sort(mp, items, xfs_bmap_update_diff_items);
+ list_for_each_entry(bmap, items, bi_list)
+ xfs_bmap_update_log_item(tp, buip, bmap);
+ return &buip->bui_item;
+}
+
/* Get an BUD so we can process all the deferred rmap updates. */
STATIC void *
xfs_bmap_update_create_done(
struct xfs_trans *tp,
- void *intent,
+ struct xfs_log_item *intent,
unsigned int count)
{
- return xfs_trans_get_bud(tp, intent);
+ return xfs_trans_get_bud(tp, BUI_ITEM(intent));
}
/* Process a deferred rmap update. */
@@ -398,9 +367,9 @@ xfs_bmap_update_finish_item(
/* Abort all pending BUIs. */
STATIC void
xfs_bmap_update_abort_intent(
- void *intent)
+ struct xfs_log_item *intent)
{
- xfs_bui_release(intent);
+ xfs_bui_release(BUI_ITEM(intent));
}
/* Cancel a deferred rmap update. */
@@ -416,10 +385,8 @@ xfs_bmap_update_cancel_item(
const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
.max_items = XFS_BUI_MAX_FAST_EXTENTS,
- .diff_items = xfs_bmap_update_diff_items,
.create_intent = xfs_bmap_update_create_intent,
.abort_intent = xfs_bmap_update_abort_intent,
- .log_item = xfs_bmap_update_log_item,
.create_done = xfs_bmap_update_create_done,
.finish_item = xfs_bmap_update_finish_item,
.cancel_item = xfs_bmap_update_cancel_item,
@@ -431,8 +398,8 @@ const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
*/
int
xfs_bui_recover(
- struct xfs_trans *parent_tp,
- struct xfs_bui_log_item *buip)
+ struct xfs_bui_log_item *buip,
+ struct list_head *capture_list)
{
int error = 0;
unsigned int bui_type;
@@ -440,15 +407,13 @@ xfs_bui_recover(
xfs_fsblock_t startblock_fsb;
xfs_fsblock_t inode_fsb;
xfs_filblks_t count;
- bool op_ok;
struct xfs_bud_log_item *budp;
- enum xfs_bmap_intent_type type;
int whichfork;
xfs_exntst_t state;
struct xfs_trans *tp;
struct xfs_inode *ip = NULL;
struct xfs_bmbt_irec irec;
- struct xfs_mount *mp = parent_tp->t_mountp;
+ struct xfs_mount *mp = buip->bui_item.li_mountp;
ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
@@ -456,7 +421,7 @@ xfs_bui_recover(
if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_bui_release(buip);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -468,16 +433,19 @@ xfs_bui_recover(
XFS_FSB_TO_DADDR(mp, bmap->me_startblock));
inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp,
XFS_INO_TO_FSB(mp, bmap->me_owner)));
- switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) {
+ state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
+ XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
+ whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
+ XFS_ATTR_FORK : XFS_DATA_FORK;
+ bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
+ switch (bui_type) {
case XFS_BMAP_MAP:
case XFS_BMAP_UNMAP:
- op_ok = true;
break;
default:
- op_ok = false;
- break;
+ return -EFSCORRUPTED;
}
- if (!op_ok || startblock_fsb == 0 ||
+ if (startblock_fsb == 0 ||
bmap->me_len == 0 ||
inode_fsb == 0 ||
startblock_fsb >= mp->m_sb.sb_dblocks ||
@@ -490,54 +458,40 @@ xfs_bui_recover(
*/
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_bui_release(buip);
- return -EIO;
+ return -EFSCORRUPTED;
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
- XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
+ /* Grab the inode. */
+ error = xfs_iget(mp, NULL, bmap->me_owner, 0, 0, &ip);
if (error)
return error;
- /*
- * Recovery stashes all deferred ops during intent processing and
- * finishes them on completion. Transfer current dfops state to this
- * transaction and transfer the result back before we return.
- */
- xfs_defer_move(tp, parent_tp);
- budp = xfs_trans_get_bud(tp, buip);
- /* Grab the inode. */
- error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip);
+ error = xfs_qm_dqattach(ip);
if (error)
- goto err_inode;
+ goto err_rele;
if (VFS_I(ip)->i_nlink == 0)
xfs_iflags_set(ip, XFS_IRECOVERY);
- /* Process deferred bmap item. */
- state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
- XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
- whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
- XFS_ATTR_FORK : XFS_DATA_FORK;
- bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
- switch (bui_type) {
- case XFS_BMAP_MAP:
- case XFS_BMAP_UNMAP:
- type = bui_type;
- break;
- default:
- error = -EFSCORRUPTED;
- goto err_inode;
- }
+ /* Allocate transaction and do the work. */
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+ XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
+ if (error)
+ goto err_rele;
+
+ budp = xfs_trans_get_bud(tp, buip);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
count = bmap->me_len;
- error = xfs_trans_log_finish_bmap_update(tp, budp, type, ip, whichfork,
- bmap->me_startoff, bmap->me_startblock, &count, state);
+ error = xfs_trans_log_finish_bmap_update(tp, budp, bui_type, ip,
+ whichfork, bmap->me_startoff, bmap->me_startblock,
+ &count, state);
if (error)
- goto err_inode;
+ goto err_cancel;
if (count > 0) {
- ASSERT(type == XFS_BMAP_UNMAP);
+ ASSERT(bui_type == XFS_BMAP_UNMAP);
irec.br_startblock = bmap->me_startblock;
irec.br_blockcount = count;
irec.br_startoff = bmap->me_startoff;
@@ -546,19 +500,78 @@ xfs_bui_recover(
}
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
- xfs_defer_move(parent_tp, tp);
- error = xfs_trans_commit(tp);
+ /*
+ * Commit transaction, which frees the transaction and saves the inode
+ * for later replay activities.
+ */
+ error = xfs_defer_ops_capture_and_commit(tp, ip, capture_list);
+ if (error)
+ goto err_unlock;
+
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_irele(ip);
+ return 0;
- return error;
-
-err_inode:
- xfs_defer_move(parent_tp, tp);
+err_cancel:
xfs_trans_cancel(tp);
- if (ip) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_irele(ip);
- }
+err_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+err_rele:
+ xfs_irele(ip);
return error;
}
+
+/* Relog an intent item to push the log tail forward. */
+static struct xfs_log_item *
+xfs_bui_item_relog(
+ struct xfs_log_item *intent,
+ struct xfs_trans *tp)
+{
+ struct xfs_bud_log_item *budp;
+ struct xfs_bui_log_item *buip;
+ struct xfs_map_extent *extp;
+ unsigned int count;
+
+ count = BUI_ITEM(intent)->bui_format.bui_nextents;
+ extp = BUI_ITEM(intent)->bui_format.bui_extents;
+
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ budp = xfs_trans_get_bud(tp, BUI_ITEM(intent));
+ set_bit(XFS_LI_DIRTY, &budp->bud_item.li_flags);
+
+ buip = xfs_bui_init(tp->t_mountp);
+ memcpy(buip->bui_format.bui_extents, extp, count * sizeof(*extp));
+ atomic_set(&buip->bui_next_extent, count);
+ xfs_trans_add_item(tp, &buip->bui_item);
+ set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags);
+ return &buip->bui_item;
+}
+
+static const struct xfs_item_ops xfs_bui_item_ops = {
+ .iop_size = xfs_bui_item_size,
+ .iop_format = xfs_bui_item_format,
+ .iop_unpin = xfs_bui_item_unpin,
+ .iop_release = xfs_bui_item_release,
+ .iop_relog = xfs_bui_item_relog,
+};
+
+/*
+ * Allocate and initialize an bui item with the given number of extents.
+ */
+struct xfs_bui_log_item *
+xfs_bui_init(
+ struct xfs_mount *mp)
+
+{
+ struct xfs_bui_log_item *buip;
+
+ buip = kmem_zone_zalloc(xfs_bui_zone, 0);
+
+ xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
+ buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
+ buip->bui_format.bui_id = (uintptr_t)(void *)buip;
+ atomic_set(&buip->bui_next_extent, 0);
+ atomic_set(&buip->bui_refcount, 2);
+
+ return buip;
+}
diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h
index ad479cc73de8..a95e99c26979 100644
--- a/fs/xfs/xfs_bmap_item.h
+++ b/fs/xfs/xfs_bmap_item.h
@@ -77,6 +77,7 @@ extern struct kmem_zone *xfs_bud_zone;
struct xfs_bui_log_item *xfs_bui_init(struct xfs_mount *);
void xfs_bui_item_free(struct xfs_bui_log_item *);
void xfs_bui_release(struct xfs_bui_log_item *);
-int xfs_bui_recover(struct xfs_trans *parent_tp, struct xfs_bui_log_item *buip);
+int xfs_bui_recover(struct xfs_bui_log_item *buip,
+ struct list_head *capture_list);
#endif /* __XFS_BMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index d6d78e127625..32170ce2984c 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -125,7 +125,7 @@ xfs_bmap_rtalloc(
* pick an extent that will space things out in the rt area.
*/
if (ap->eof && ap->offset == 0) {
- xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
+ xfs_rtblock_t rtx; /* realtime extent no */
error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
if (error)
@@ -1167,6 +1167,7 @@ xfs_prepare_shift(
struct xfs_inode *ip,
loff_t offset)
{
+ struct xfs_mount *mp = ip->i_mount;
int error;
/*
@@ -1180,6 +1181,17 @@ xfs_prepare_shift(
}
/*
+ * Shift operations must stabilize the start block offset boundary along
+ * with the full range of the operation. If we don't, a COW writeback
+ * completion could race with an insert, front merge with the start
+ * extent (after split) during the shift and corrupt the file. Start
+ * with the block just prior to the start to stabilize the boundary.
+ */
+ offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
+ if (offset)
+ offset -= (1 << mp->m_sb.sb_blocklog);
+
+ /*
* Writeback and invalidate cache for the remainder of the file as we're
* about to shift down every extent from offset to EOF.
*/
@@ -1225,7 +1237,6 @@ xfs_collapse_file_space(
int error;
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
- uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
bool done = false;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
@@ -1241,32 +1252,34 @@ xfs_collapse_file_space(
if (error)
return error;
- while (!error && !done) {
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
- &tp);
- if (error)
- break;
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
+ if (error)
+ return error;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
- ip->i_gdquot, ip->i_pdquot, resblks, 0,
- XFS_QMOPT_RES_REGBLKS);
- if (error)
- goto out_trans_cancel;
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+ while (!done) {
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
&done);
if (error)
goto out_trans_cancel;
+ if (done)
+ break;
- error = xfs_trans_commit(tp);
+ /* finish any deferred frees and roll the transaction */
+ error = xfs_defer_finish(&tp);
+ if (error)
+ goto out_trans_cancel;
}
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -1309,35 +1322,41 @@ xfs_insert_file_space(
if (error)
return error;
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
+ XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
/*
* The extent shifting code works on extent granularity. So, if stop_fsb
* is not the starting block of extent, we need to split the extent at
* stop_fsb.
*/
- error = xfs_bmap_split_extent(ip, stop_fsb);
+ error = xfs_bmap_split_extent(tp, ip, stop_fsb);
if (error)
- return error;
+ goto out_trans_cancel;
- while (!error && !done) {
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
- &tp);
+ do {
+ error = xfs_defer_finish(&tp);
if (error)
- break;
+ goto out_trans_cancel;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
&done, stop_fsb);
if (error)
goto out_trans_cancel;
+ } while (!done);
- error = xfs_trans_commit(tp);
- }
-
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -1605,12 +1624,12 @@ xfs_swap_extent_forks(
* event of a crash. Set the owner change log flags now and leave the
* bmbt scan as the last step.
*/
- if (ip->i_d.di_version == 3 &&
- ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
- (*target_log_flags) |= XFS_ILOG_DOWNER;
- if (tip->i_d.di_version == 3 &&
- tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
- (*src_log_flags) |= XFS_ILOG_DOWNER;
+ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ (*target_log_flags) |= XFS_ILOG_DOWNER;
+ if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ (*src_log_flags) |= XFS_ILOG_DOWNER;
+ }
/*
* Swap the data forks of the inodes
@@ -1645,7 +1664,7 @@ xfs_swap_extent_forks(
(*src_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
- ASSERT(ip->i_d.di_version < 3 ||
+ ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
(*src_log_flags & XFS_ILOG_DOWNER));
(*src_log_flags) |= XFS_ILOG_DBROOT;
break;
@@ -1657,7 +1676,7 @@ xfs_swap_extent_forks(
break;
case XFS_DINODE_FMT_BTREE:
(*target_log_flags) |= XFS_ILOG_DBROOT;
- ASSERT(tip->i_d.di_version < 3 ||
+ ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
(*target_log_flags & XFS_ILOG_DOWNER));
break;
}
@@ -1721,6 +1740,7 @@ xfs_swap_extents(
int lock_flags;
uint64_t f;
int resblks = 0;
+ unsigned int flags = 0;
/*
* Lock the inodes against other IO, page faults and truncate to
@@ -1776,17 +1796,16 @@ xfs_swap_extents(
resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
/*
- * Handle the corner case where either inode might straddle the
- * btree format boundary. If so, the inode could bounce between
- * btree <-> extent format on unmap -> remap cycles, freeing and
- * allocating a bmapbt block each time.
+ * If either inode straddles a bmapbt block allocation boundary,
+ * the rmapbt algorithm triggers repeated allocs and frees as
+ * extents are remapped. This can exhaust the block reservation
+ * prematurely and cause shutdown. Return freed blocks to the
+ * transaction reservation to counter this behavior.
*/
- if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
- resblks += XFS_IFORK_MAXEXT(ip, w);
- if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
- resblks += XFS_IFORK_MAXEXT(tip, w);
+ flags |= XFS_TRANS_RES_FDBLKS;
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
+ &tp);
if (error)
goto out_unlock;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 1264ac63e4e5..4f18457aae2a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1547,6 +1547,28 @@ xfs_buf_zero(
}
/*
+ * Log a message about and stale a buffer that a caller has decided is corrupt.
+ *
+ * This function should be called for the kinds of metadata corruption that
+ * cannot be detect from a verifier, such as incorrect inter-block relationship
+ * data. Do /not/ call this function from a verifier function.
+ *
+ * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will
+ * be marked stale, but b_error will not be set. The caller is responsible for
+ * releasing the buffer or fixing it.
+ */
+void
+__xfs_buf_mark_corrupt(
+ struct xfs_buf *bp,
+ xfs_failaddr_t fa)
+{
+ ASSERT(bp->b_flags & XBF_DONE);
+
+ xfs_buf_corruption_error(bp, fa);
+ xfs_buf_stale(bp);
+}
+
+/*
* Handling of buffer targets (buftargs).
*/
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index f6ce17d8d848..621467ab17c8 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -270,6 +270,8 @@ static inline int xfs_buf_submit(struct xfs_buf *bp)
}
void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
+void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
+#define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
/* Buffer Utility Routines */
extern void *xfs_buf_offset(struct xfs_buf *, size_t);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index d74fbd1e9d3e..f98260ed6d51 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -328,7 +328,7 @@ xfs_buf_item_format(
* occurs during recovery.
*/
if (bip->bli_flags & XFS_BLI_INODE_BUF) {
- if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
+ if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
xfs_log_item_in_current_chkpt(lip)))
bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
@@ -956,7 +956,7 @@ xfs_buf_item_relse(
struct xfs_buf_log_item *bip = bp->b_log_item;
trace_xfs_buf_item_relse(bp, _RET_IP_);
- ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
+ ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
bp->b_log_item = NULL;
if (list_empty(&bp->b_li_list))
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 3cbf248af51f..672286f1762f 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -48,7 +48,7 @@ static struct lock_class_key xfs_dquot_project_class;
*/
void
xfs_qm_dqdestroy(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
ASSERT(list_empty(&dqp->q_lru));
@@ -113,8 +113,8 @@ xfs_qm_adjust_dqlimits(
*/
void
xfs_qm_adjust_dqtimers(
- xfs_mount_t *mp,
- xfs_disk_dquot_t *d)
+ struct xfs_mount *mp,
+ struct xfs_disk_dquot *d)
{
ASSERT(d->d_id);
@@ -205,16 +205,18 @@ xfs_qm_adjust_dqtimers(
*/
STATIC void
xfs_qm_init_dquot_blk(
- xfs_trans_t *tp,
- xfs_mount_t *mp,
- xfs_dqid_t id,
- uint type,
- xfs_buf_t *bp)
+ struct xfs_trans *tp,
+ struct xfs_mount *mp,
+ xfs_dqid_t id,
+ uint type,
+ struct xfs_buf *bp)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
- xfs_dqblk_t *d;
- xfs_dqid_t curid;
- int i;
+ struct xfs_dqblk *d;
+ xfs_dqid_t curid;
+ unsigned int qflag;
+ unsigned int blftype;
+ int i;
ASSERT(tp);
ASSERT(xfs_buf_islocked(bp));
@@ -238,11 +240,39 @@ xfs_qm_init_dquot_blk(
}
}
- xfs_trans_dquot_buf(tp, bp,
- (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
- ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
- XFS_BLF_GDQUOT_BUF)));
- xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
+ if (type & XFS_DQ_USER) {
+ qflag = XFS_UQUOTA_CHKD;
+ blftype = XFS_BLF_UDQUOT_BUF;
+ } else if (type & XFS_DQ_PROJ) {
+ qflag = XFS_PQUOTA_CHKD;
+ blftype = XFS_BLF_PDQUOT_BUF;
+ } else {
+ qflag = XFS_GQUOTA_CHKD;
+ blftype = XFS_BLF_GDQUOT_BUF;
+ }
+
+ xfs_trans_dquot_buf(tp, bp, blftype);
+
+ /*
+ * quotacheck uses delayed writes to update all the dquots on disk in an
+ * efficient manner instead of logging the individual dquot changes as
+ * they are made. However if we log the buffer allocated here and crash
+ * after quotacheck while the logged initialisation is still in the
+ * active region of the log, log recovery can replay the dquot buffer
+ * initialisation over the top of the checked dquots and corrupt quota
+ * accounting.
+ *
+ * To avoid this problem, quotacheck cannot log the initialised buffer.
+ * We must still dirty the buffer and write it back before the
+ * allocation transaction clears the log. Therefore, mark the buffer as
+ * ordered instead of logging it directly. This is safe for quotacheck
+ * because it detects and repairs allocated but initialized dquot blocks
+ * in the quota inodes.
+ */
+ if (!(mp->m_qflags & qflag))
+ xfs_trans_ordered_buf(tp, bp);
+ else
+ xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
}
/*
@@ -497,7 +527,7 @@ xfs_dquot_from_disk(
struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
/* copy everything from disk dquot to the incore dquot */
- memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
+ memcpy(&dqp->q_core, ddqp, sizeof(struct xfs_disk_dquot));
/*
* Reservation counters are defined as reservation plus current usage
@@ -829,11 +859,11 @@ xfs_qm_id_for_quotatype(
{
switch (type) {
case XFS_DQ_USER:
- return ip->i_d.di_uid;
+ return i_uid_read(VFS_I(ip));
case XFS_DQ_GROUP:
- return ip->i_d.di_gid;
+ return i_gid_read(VFS_I(ip));
case XFS_DQ_PROJ:
- return xfs_get_projid(ip);
+ return ip->i_d.di_projid;
}
ASSERT(0);
return 0;
@@ -989,7 +1019,7 @@ xfs_qm_dqput(
*/
void
xfs_qm_dqrele(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
if (!dqp)
return;
@@ -1018,8 +1048,8 @@ xfs_qm_dqflush_done(
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
- xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
- xfs_dquot_t *dqp = qip->qli_dquot;
+ struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
+ struct xfs_dquot *dqp = qip->qli_dquot;
struct xfs_ail *ailp = lip->li_ailp;
/*
@@ -1105,8 +1135,8 @@ xfs_qm_dqflush(
* Get the buffer containing the on-disk dquot
*/
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen, 0, &bp,
- &xfs_dquot_buf_ops);
+ mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
+ &bp, &xfs_dquot_buf_ops);
if (error)
goto out_unlock;
@@ -1125,11 +1155,11 @@ xfs_qm_dqflush(
xfs_buf_relse(bp);
xfs_dqfunlock(dqp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return -EIO;
+ return -EFSCORRUPTED;
}
/* This is the only portion of data that needs to persist */
- memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
+ memcpy(ddqp, &dqp->q_core, sizeof(struct xfs_disk_dquot));
/*
* Clear the dirty field and remember the flush lsn for later use.
@@ -1176,7 +1206,7 @@ xfs_qm_dqflush(
out_unlock:
xfs_dqfunlock(dqp);
- return -EIO;
+ return error;
}
/*
@@ -1187,8 +1217,8 @@ out_unlock:
*/
void
xfs_dqlock2(
- xfs_dquot_t *d1,
- xfs_dquot_t *d2)
+ struct xfs_dquot *d1,
+ struct xfs_dquot *d2)
{
if (d1 && d2) {
ASSERT(d1 != d2);
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 4fe85709d55d..fe3e46df604b 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -30,33 +30,36 @@ enum {
/*
* The incore dquot structure
*/
-typedef struct xfs_dquot {
- uint dq_flags; /* various flags (XFS_DQ_*) */
- struct list_head q_lru; /* global free list of dquots */
- struct xfs_mount*q_mount; /* filesystem this relates to */
- uint q_nrefs; /* # active refs from inodes */
- xfs_daddr_t q_blkno; /* blkno of dquot buffer */
- int q_bufoffset; /* off of dq in buffer (# dquots) */
- xfs_fileoff_t q_fileoffset; /* offset in quotas file */
-
- xfs_disk_dquot_t q_core; /* actual usage & quotas */
- xfs_dq_logitem_t q_logitem; /* dquot log item */
- xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
- xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
- xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
- xfs_qcnt_t q_prealloc_lo_wmark;/* prealloc throttle wmark */
- xfs_qcnt_t q_prealloc_hi_wmark;/* prealloc disabled wmark */
- int64_t q_low_space[XFS_QLOWSP_MAX];
- struct mutex q_qlock; /* quota lock */
- struct completion q_flush; /* flush completion queue */
- atomic_t q_pincount; /* dquot pin count */
- wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
-} xfs_dquot_t;
+struct xfs_dquot {
+ uint dq_flags;
+ struct list_head q_lru;
+ struct xfs_mount *q_mount;
+ uint q_nrefs;
+ xfs_daddr_t q_blkno;
+ int q_bufoffset;
+ xfs_fileoff_t q_fileoffset;
+
+ struct xfs_disk_dquot q_core;
+ struct xfs_dq_logitem q_logitem;
+ /* total regular nblks used+reserved */
+ xfs_qcnt_t q_res_bcount;
+ /* total inos allocd+reserved */
+ xfs_qcnt_t q_res_icount;
+ /* total realtime blks used+reserved */
+ xfs_qcnt_t q_res_rtbcount;
+ xfs_qcnt_t q_prealloc_lo_wmark;
+ xfs_qcnt_t q_prealloc_hi_wmark;
+ int64_t q_low_space[XFS_QLOWSP_MAX];
+ struct mutex q_qlock;
+ struct completion q_flush;
+ atomic_t q_pincount;
+ struct wait_queue_head q_pinwait;
+};
/*
* Lock hierarchy for q_qlock:
* XFS_QLOCK_NORMAL is the implicit default,
- * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
+ * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
*/
enum {
XFS_QLOCK_NORMAL = 0,
@@ -64,21 +67,21 @@ enum {
};
/*
- * Manage the q_flush completion queue embedded in the dquot. This completion
+ * Manage the q_flush completion queue embedded in the dquot. This completion
* queue synchronizes processes attempting to flush the in-core dquot back to
* disk.
*/
-static inline void xfs_dqflock(xfs_dquot_t *dqp)
+static inline void xfs_dqflock(struct xfs_dquot *dqp)
{
wait_for_completion(&dqp->q_flush);
}
-static inline bool xfs_dqflock_nowait(xfs_dquot_t *dqp)
+static inline bool xfs_dqflock_nowait(struct xfs_dquot *dqp)
{
return try_wait_for_completion(&dqp->q_flush);
}
-static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
+static inline void xfs_dqfunlock(struct xfs_dquot *dqp)
{
complete(&dqp->q_flush);
}
@@ -112,7 +115,7 @@ static inline int xfs_this_quota_on(struct xfs_mount *mp, int type)
}
}
-static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type)
+static inline struct xfs_dquot *xfs_inode_dquot(struct xfs_inode *ip, int type)
{
switch (type & XFS_DQ_ALLTYPES) {
case XFS_DQ_USER:
@@ -147,31 +150,30 @@ static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp)
#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP)
-extern void xfs_qm_dqdestroy(xfs_dquot_t *);
-extern int xfs_qm_dqflush(struct xfs_dquot *, struct xfs_buf **);
-extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
-extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
- xfs_disk_dquot_t *);
-extern void xfs_qm_adjust_dqlimits(struct xfs_mount *,
- struct xfs_dquot *);
-extern xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip,
- uint type);
-extern int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id,
+void xfs_qm_dqdestroy(struct xfs_dquot *dqp);
+int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp);
+void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
+void xfs_qm_adjust_dqtimers(struct xfs_mount *mp,
+ struct xfs_disk_dquot *d);
+void xfs_qm_adjust_dqlimits(struct xfs_mount *mp,
+ struct xfs_dquot *d);
+xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, uint type);
+int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id,
uint type, bool can_alloc,
struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_inode(struct xfs_inode *ip, uint type,
- bool can_alloc,
- struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id,
+int xfs_qm_dqget_inode(struct xfs_inode *ip, uint type,
+ bool can_alloc,
+ struct xfs_dquot **dqpp);
+int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id,
uint type, struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_uncached(struct xfs_mount *mp,
- xfs_dqid_t id, uint type,
- struct xfs_dquot **dqpp);
-extern void xfs_qm_dqput(xfs_dquot_t *);
+int xfs_qm_dqget_uncached(struct xfs_mount *mp,
+ xfs_dqid_t id, uint type,
+ struct xfs_dquot **dqpp);
+void xfs_qm_dqput(struct xfs_dquot *dqp);
-extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
+void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
-extern void xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
+void xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
{
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index d60647d7197b..baad1748d0d1 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -189,7 +189,8 @@ xfs_qm_dquot_logitem_push(
if (!xfs_buf_delwri_queue(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_relse(bp);
- }
+ } else if (error == -EAGAIN)
+ rval = XFS_ITEM_LOCKED;
spin_lock(&lip->li_ailp->ail_lock);
out_unlock:
@@ -307,36 +308,62 @@ xfs_qm_qoffend_logitem_committed(
{
struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
- struct xfs_ail *ailp = qfs->qql_item.li_ailp;
- /*
- * Delete the qoff-start logitem from the AIL.
- * xfs_trans_ail_delete() drops the AIL lock.
- */
- spin_lock(&ailp->ail_lock);
- xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_qm_qoff_logitem_relse(qfs);
- kmem_free(qfs->qql_item.li_lv_shadow);
kmem_free(lip->li_lv_shadow);
- kmem_free(qfs);
kmem_free(qfe);
return (xfs_lsn_t)-1;
}
+STATIC void
+xfs_qm_qoff_logitem_release(
+ struct xfs_log_item *lip)
+{
+ struct xfs_qoff_logitem *qoff = QOFF_ITEM(lip);
+
+ if (test_bit(XFS_LI_ABORTED, &lip->li_flags)) {
+ if (qoff->qql_start_lip)
+ xfs_qm_qoff_logitem_relse(qoff->qql_start_lip);
+ xfs_qm_qoff_logitem_relse(qoff);
+ }
+}
+
static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.iop_size = xfs_qm_qoff_logitem_size,
.iop_format = xfs_qm_qoff_logitem_format,
.iop_committed = xfs_qm_qoffend_logitem_committed,
.iop_push = xfs_qm_qoff_logitem_push,
+ .iop_release = xfs_qm_qoff_logitem_release,
};
static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.iop_size = xfs_qm_qoff_logitem_size,
.iop_format = xfs_qm_qoff_logitem_format,
.iop_push = xfs_qm_qoff_logitem_push,
+ .iop_release = xfs_qm_qoff_logitem_release,
};
/*
+ * Delete the quotaoff intent from the AIL and free it. On success,
+ * this should only be called for the start item. It can be used for
+ * either on shutdown or abort.
+ */
+void
+xfs_qm_qoff_logitem_relse(
+ struct xfs_qoff_logitem *qoff)
+{
+ struct xfs_log_item *lip = &qoff->qql_item;
+
+ ASSERT(test_bit(XFS_LI_IN_AIL, &lip->li_flags) ||
+ test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
+ XFS_FORCED_SHUTDOWN(lip->li_mountp));
+ xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+ kmem_free(lip->li_lv_shadow);
+ kmem_free(qoff);
+}
+
+/*
* Allocate and initialize an quotaoff item of the correct quota type(s).
*/
struct xfs_qoff_logitem *
diff --git a/fs/xfs/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h
index 1aed34ccdabc..2b86a43d7ce2 100644
--- a/fs/xfs/xfs_dquot_item.h
+++ b/fs/xfs/xfs_dquot_item.h
@@ -11,25 +11,28 @@ struct xfs_trans;
struct xfs_mount;
struct xfs_qoff_logitem;
-typedef struct xfs_dq_logitem {
- struct xfs_log_item qli_item; /* common portion */
- struct xfs_dquot *qli_dquot; /* dquot ptr */
- xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
-} xfs_dq_logitem_t;
+struct xfs_dq_logitem {
+ struct xfs_log_item qli_item; /* common portion */
+ struct xfs_dquot *qli_dquot; /* dquot ptr */
+ xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
+};
-typedef struct xfs_qoff_logitem {
- struct xfs_log_item qql_item; /* common portion */
- struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
+struct xfs_qoff_logitem {
+ struct xfs_log_item qql_item; /* common portion */
+ struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
unsigned int qql_flags;
-} xfs_qoff_logitem_t;
+};
-extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *);
-extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *,
- struct xfs_qoff_logitem *, uint);
-extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *,
- struct xfs_qoff_logitem *, uint);
-extern void xfs_trans_log_quotaoff_item(struct xfs_trans *,
- struct xfs_qoff_logitem *);
+void xfs_qm_dquot_logitem_init(struct xfs_dquot *dqp);
+struct xfs_qoff_logitem *xfs_qm_qoff_logitem_init(struct xfs_mount *mp,
+ struct xfs_qoff_logitem *start,
+ uint flags);
+void xfs_qm_qoff_logitem_relse(struct xfs_qoff_logitem *);
+struct xfs_qoff_logitem *xfs_trans_get_qoff_item(struct xfs_trans *tp,
+ struct xfs_qoff_logitem *startqoff,
+ uint flags);
+void xfs_trans_log_quotaoff_item(struct xfs_trans *tp,
+ struct xfs_qoff_logitem *qlp);
#endif /* __XFS_DQUOT_ITEM_H__ */
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 849fd4476950..182b70464b71 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -329,19 +329,43 @@ xfs_corruption_error(
const char *tag,
int level,
struct xfs_mount *mp,
- void *buf,
+ const void *buf,
size_t bufsize,
const char *filename,
int linenum,
xfs_failaddr_t failaddr)
{
- if (level <= xfs_error_level)
+ if (buf && level <= xfs_error_level)
xfs_hex_dump(buf, bufsize);
xfs_error_report(tag, level, mp, filename, linenum, failaddr);
xfs_alert(mp, "Corruption detected. Unmount and run xfs_repair");
}
/*
+ * Complain about the kinds of metadata corruption that we can't detect from a
+ * verifier, such as incorrect inter-block relationship data. Does not set
+ * bp->b_error.
+ *
+ * Call xfs_buf_mark_corrupt, not this function.
+ */
+void
+xfs_buf_corruption_error(
+ struct xfs_buf *bp,
+ xfs_failaddr_t fa)
+{
+ struct xfs_mount *mp = bp->b_mount;
+
+ xfs_alert_tag(mp, XFS_PTAG_VERIFIER_ERROR,
+ "Metadata corruption detected at %pS, %s block 0x%llx",
+ fa, bp->b_ops->name, bp->b_bn);
+
+ xfs_alert(mp, "Unmount and run xfs_repair");
+
+ if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
+ xfs_stack_trace();
+}
+
+/*
* Warnings specifically for verifier errors. Differentiate CRC vs. invalid
* values, and omit the stack trace unless the error level is tuned high.
*/
@@ -350,7 +374,7 @@ xfs_buf_verifier_error(
struct xfs_buf *bp,
int error,
const char *name,
- void *buf,
+ const void *buf,
size_t bufsz,
xfs_failaddr_t failaddr)
{
@@ -402,7 +426,7 @@ xfs_inode_verifier_error(
struct xfs_inode *ip,
int error,
const char *name,
- void *buf,
+ const void *buf,
size_t bufsz,
xfs_failaddr_t failaddr)
{
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 602aa7d62b66..c6bb7d7a2161 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -12,16 +12,17 @@ extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp,
const char *filename, int linenum,
xfs_failaddr_t failaddr);
extern void xfs_corruption_error(const char *tag, int level,
- struct xfs_mount *mp, void *buf, size_t bufsize,
+ struct xfs_mount *mp, const void *buf, size_t bufsize,
const char *filename, int linenum,
xfs_failaddr_t failaddr);
+void xfs_buf_corruption_error(struct xfs_buf *bp, xfs_failaddr_t fa);
extern void xfs_buf_verifier_error(struct xfs_buf *bp, int error,
- const char *name, void *buf, size_t bufsz,
+ const char *name, const void *buf, size_t bufsz,
xfs_failaddr_t failaddr);
extern void xfs_verifier_error(struct xfs_buf *bp, int error,
xfs_failaddr_t failaddr);
extern void xfs_inode_verifier_error(struct xfs_inode *ip, int error,
- const char *name, void *buf, size_t bufsz,
+ const char *name, const void *buf, size_t bufsz,
xfs_failaddr_t failaddr);
#define XFS_ERROR_REPORT(e, lvl, mp) \
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index f1372f9046e3..5a4b0119143a 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -15,7 +15,6 @@
#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_icache.h"
-#include "xfs_log.h"
#include "xfs_pnfs.h"
/*
@@ -221,18 +220,7 @@ STATIC int
xfs_fs_nfs_commit_metadata(
struct inode *inode)
{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- xfs_lsn_t lsn = 0;
-
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- if (xfs_ipincount(ip))
- lsn = ip->i_itemp->ili_last_lsn;
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- if (!lsn)
- return 0;
- return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+ return xfs_log_force_inode(XFS_I(inode));
}
const struct export_operations xfs_export_operations = {
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 2183d87be4cf..ef17c1f6db32 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -344,7 +344,6 @@ xfs_extent_busy_trim(
ASSERT(*len > 0);
spin_lock(&args->pag->pagb_lock);
-restart:
fbno = *bno;
flen = *len;
rbp = args->pag->pagb_tree.rb_node;
@@ -363,19 +362,6 @@ restart:
continue;
}
- /*
- * If this is a metadata allocation, try to reuse the busy
- * extent instead of trimming the allocation.
- */
- if (!xfs_alloc_is_userdata(args->datatype) &&
- !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
- if (!xfs_extent_busy_update_extent(args->mp, args->pag,
- busyp, fbno, flen,
- false))
- goto restart;
- continue;
- }
-
if (bbno <= fbno) {
/* start overlap */
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index e44efc41a041..de3cdce892fd 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -21,7 +21,7 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_trace.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_efi_zone;
kmem_zone_t *xfs_efd_zone;
@@ -139,44 +139,6 @@ xfs_efi_item_release(
xfs_efi_release(EFI_ITEM(lip));
}
-static const struct xfs_item_ops xfs_efi_item_ops = {
- .iop_size = xfs_efi_item_size,
- .iop_format = xfs_efi_item_format,
- .iop_unpin = xfs_efi_item_unpin,
- .iop_release = xfs_efi_item_release,
-};
-
-
-/*
- * Allocate and initialize an efi item with the given number of extents.
- */
-struct xfs_efi_log_item *
-xfs_efi_init(
- struct xfs_mount *mp,
- uint nextents)
-
-{
- struct xfs_efi_log_item *efip;
- uint size;
-
- ASSERT(nextents > 0);
- if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
- size = (uint)(sizeof(xfs_efi_log_item_t) +
- ((nextents - 1) * sizeof(xfs_extent_t)));
- efip = kmem_zalloc(size, 0);
- } else {
- efip = kmem_zone_zalloc(xfs_efi_zone, 0);
- }
-
- xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
- efip->efi_format.efi_nextents = nextents;
- efip->efi_format.efi_id = (uintptr_t)(void *)efip;
- atomic_set(&efip->efi_next_extent, 0);
- atomic_set(&efip->efi_refcount, 2);
-
- return efip;
-}
-
/*
* Copy an EFI format buffer from the given buf, and into the destination
* EFI format structure.
@@ -228,6 +190,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
}
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -411,41 +374,16 @@ xfs_extent_free_diff_items(
XFS_FSB_TO_AGNO(mp, rb->xefi_startblock);
}
-/* Get an EFI. */
-STATIC void *
-xfs_extent_free_create_intent(
- struct xfs_trans *tp,
- unsigned int count)
-{
- struct xfs_efi_log_item *efip;
-
- ASSERT(tp != NULL);
- ASSERT(count > 0);
-
- efip = xfs_efi_init(tp->t_mountp, count);
- ASSERT(efip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &efip->efi_item);
- return efip;
-}
-
/* Log a free extent to the intent item. */
STATIC void
xfs_extent_free_log_item(
struct xfs_trans *tp,
- void *intent,
- struct list_head *item)
+ struct xfs_efi_log_item *efip,
+ struct xfs_extent_free_item *free)
{
- struct xfs_efi_log_item *efip = intent;
- struct xfs_extent_free_item *free;
uint next_extent;
struct xfs_extent *extp;
- free = container_of(item, struct xfs_extent_free_item, xefi_list);
-
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &efip->efi_item.li_flags);
@@ -461,14 +399,35 @@ xfs_extent_free_log_item(
extp->ext_len = free->xefi_blockcount;
}
+static struct xfs_log_item *
+xfs_extent_free_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_efi_log_item *efip = xfs_efi_init(mp, count);
+ struct xfs_extent_free_item *free;
+
+ ASSERT(count > 0);
+
+ xfs_trans_add_item(tp, &efip->efi_item);
+ if (sort)
+ list_sort(mp, items, xfs_extent_free_diff_items);
+ list_for_each_entry(free, items, xefi_list)
+ xfs_extent_free_log_item(tp, efip, free);
+ return &efip->efi_item;
+}
+
/* Get an EFD so we can process all the free extents. */
STATIC void *
xfs_extent_free_create_done(
struct xfs_trans *tp,
- void *intent,
+ struct xfs_log_item *intent,
unsigned int count)
{
- return xfs_trans_get_efd(tp, intent, count);
+ return xfs_trans_get_efd(tp, EFI_ITEM(intent), count);
}
/* Process a free extent. */
@@ -494,9 +453,9 @@ xfs_extent_free_finish_item(
/* Abort all pending EFIs. */
STATIC void
xfs_extent_free_abort_intent(
- void *intent)
+ struct xfs_log_item *intent)
{
- xfs_efi_release(intent);
+ xfs_efi_release(EFI_ITEM(intent));
}
/* Cancel a free extent. */
@@ -512,10 +471,8 @@ xfs_extent_free_cancel_item(
const struct xfs_defer_op_type xfs_extent_free_defer_type = {
.max_items = XFS_EFI_MAX_FAST_EXTENTS,
- .diff_items = xfs_extent_free_diff_items,
.create_intent = xfs_extent_free_create_intent,
.abort_intent = xfs_extent_free_abort_intent,
- .log_item = xfs_extent_free_log_item,
.create_done = xfs_extent_free_create_done,
.finish_item = xfs_extent_free_finish_item,
.cancel_item = xfs_extent_free_cancel_item,
@@ -578,10 +535,8 @@ xfs_agfl_free_finish_item(
/* sub-type with special handling for AGFL deferred frees */
const struct xfs_defer_op_type xfs_agfl_free_defer_type = {
.max_items = XFS_EFI_MAX_FAST_EXTENTS,
- .diff_items = xfs_extent_free_diff_items,
.create_intent = xfs_extent_free_create_intent,
.abort_intent = xfs_extent_free_abort_intent,
- .log_item = xfs_extent_free_log_item,
.create_done = xfs_extent_free_create_done,
.finish_item = xfs_agfl_free_finish_item,
.cancel_item = xfs_extent_free_cancel_item,
@@ -593,9 +548,10 @@ const struct xfs_defer_op_type xfs_agfl_free_defer_type = {
*/
int
xfs_efi_recover(
- struct xfs_mount *mp,
- struct xfs_efi_log_item *efip)
+ struct xfs_efi_log_item *efip,
+ struct list_head *capture_list)
{
+ struct xfs_mount *mp = efip->efi_item.li_mountp;
struct xfs_efd_log_item *efdp;
struct xfs_trans *tp;
int i;
@@ -624,7 +580,7 @@ xfs_efi_recover(
*/
set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
xfs_efi_release(efip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -644,10 +600,76 @@ xfs_efi_recover(
}
set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
- error = xfs_trans_commit(tp);
- return error;
+
+ return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
abort_error:
xfs_trans_cancel(tp);
return error;
}
+
+/* Relog an intent item to push the log tail forward. */
+static struct xfs_log_item *
+xfs_efi_item_relog(
+ struct xfs_log_item *intent,
+ struct xfs_trans *tp)
+{
+ struct xfs_efd_log_item *efdp;
+ struct xfs_efi_log_item *efip;
+ struct xfs_extent *extp;
+ unsigned int count;
+
+ count = EFI_ITEM(intent)->efi_format.efi_nextents;
+ extp = EFI_ITEM(intent)->efi_format.efi_extents;
+
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ efdp = xfs_trans_get_efd(tp, EFI_ITEM(intent), count);
+ efdp->efd_next_extent = count;
+ memcpy(efdp->efd_format.efd_extents, extp, count * sizeof(*extp));
+ set_bit(XFS_LI_DIRTY, &efdp->efd_item.li_flags);
+
+ efip = xfs_efi_init(tp->t_mountp, count);
+ memcpy(efip->efi_format.efi_extents, extp, count * sizeof(*extp));
+ atomic_set(&efip->efi_next_extent, count);
+ xfs_trans_add_item(tp, &efip->efi_item);
+ set_bit(XFS_LI_DIRTY, &efip->efi_item.li_flags);
+ return &efip->efi_item;
+}
+
+static const struct xfs_item_ops xfs_efi_item_ops = {
+ .iop_size = xfs_efi_item_size,
+ .iop_format = xfs_efi_item_format,
+ .iop_unpin = xfs_efi_item_unpin,
+ .iop_release = xfs_efi_item_release,
+ .iop_relog = xfs_efi_item_relog,
+};
+
+/*
+ * Allocate and initialize an efi item with the given number of extents.
+ */
+struct xfs_efi_log_item *
+xfs_efi_init(
+ struct xfs_mount *mp,
+ uint nextents)
+
+{
+ struct xfs_efi_log_item *efip;
+ uint size;
+
+ ASSERT(nextents > 0);
+ if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
+ size = (uint)(sizeof(struct xfs_efi_log_item) +
+ ((nextents - 1) * sizeof(xfs_extent_t)));
+ efip = kmem_zalloc(size, 0);
+ } else {
+ efip = kmem_zone_zalloc(xfs_efi_zone, 0);
+ }
+
+ xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
+ efip->efi_format.efi_nextents = nextents;
+ efip->efi_format.efi_id = (uintptr_t)(void *)efip;
+ atomic_set(&efip->efi_next_extent, 0);
+ atomic_set(&efip->efi_refcount, 2);
+
+ return efip;
+}
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 16aaab06d4ec..883f0f1d8989 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -50,25 +50,25 @@ struct kmem_zone;
* of commit failure or log I/O errors. Note that the EFD is not inserted in the
* AIL, so at this point both the EFI and EFD are freed.
*/
-typedef struct xfs_efi_log_item {
+struct xfs_efi_log_item {
struct xfs_log_item efi_item;
atomic_t efi_refcount;
atomic_t efi_next_extent;
unsigned long efi_flags; /* misc flags */
xfs_efi_log_format_t efi_format;
-} xfs_efi_log_item_t;
+};
/*
* This is the "extent free done" log item. It is used to log
* the fact that some extents earlier mentioned in an efi item
* have been freed.
*/
-typedef struct xfs_efd_log_item {
+struct xfs_efd_log_item {
struct xfs_log_item efd_item;
- xfs_efi_log_item_t *efd_efip;
+ struct xfs_efi_log_item *efd_efip;
uint efd_next_extent;
xfs_efd_log_format_t efd_format;
-} xfs_efd_log_item_t;
+};
/*
* Max number of extents in fast allocation path.
@@ -78,13 +78,13 @@ typedef struct xfs_efd_log_item {
extern struct kmem_zone *xfs_efi_zone;
extern struct kmem_zone *xfs_efd_zone;
-xfs_efi_log_item_t *xfs_efi_init(struct xfs_mount *, uint);
+struct xfs_efi_log_item *xfs_efi_init(struct xfs_mount *, uint);
int xfs_efi_copy_format(xfs_log_iovec_t *buf,
xfs_efi_log_format_t *dst_efi_fmt);
-void xfs_efi_item_free(xfs_efi_log_item_t *);
+void xfs_efi_item_free(struct xfs_efi_log_item *);
void xfs_efi_release(struct xfs_efi_log_item *);
-int xfs_efi_recover(struct xfs_mount *mp,
- struct xfs_efi_log_item *efip);
+int xfs_efi_recover(struct xfs_efi_log_item *efip,
+ struct list_head *capture_list);
#endif /* __XFS_EXTFREE_ITEM_H__ */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 203065a64765..b651715da8c6 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -80,19 +80,9 @@ xfs_dir_fsync(
int datasync)
{
struct xfs_inode *ip = XFS_I(file->f_mapping->host);
- struct xfs_mount *mp = ip->i_mount;
- xfs_lsn_t lsn = 0;
trace_xfs_dir_fsync(ip);
-
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- if (xfs_ipincount(ip))
- lsn = ip->i_itemp->ili_last_lsn;
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- if (!lsn)
- return 0;
- return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+ return xfs_log_force_inode(ip);
}
STATIC int
@@ -187,7 +177,12 @@ xfs_file_dio_aio_read(
file_accessed(iocb->ki_filp);
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
+ return -EAGAIN;
+ } else {
+ xfs_ilock(ip, XFS_IOLOCK_SHARED);
+ }
ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
@@ -995,6 +990,21 @@ xfs_file_fadvise(
return ret;
}
+/* Does this file, inode, or mount want synchronous writes? */
+static inline bool xfs_file_sync_writes(struct file *filp)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(filp));
+
+ if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
+ return true;
+ if (filp->f_flags & (__O_SYNC | O_DSYNC))
+ return true;
+ if (IS_SYNC(file_inode(filp)))
+ return true;
+
+ return false;
+}
+
STATIC loff_t
xfs_file_remap_range(
struct file *file_in,
@@ -1049,7 +1059,11 @@ xfs_file_remap_range(
ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
remap_flags);
+ if (ret)
+ goto out_unlock;
+ if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
+ xfs_log_force_inode(dest);
out_unlock:
xfs_reflink_remap_unlock(file_in, file_out);
if (ret)
@@ -1253,10 +1267,23 @@ xfs_filemap_pfn_mkwrite(
return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
}
+static void
+xfs_filemap_map_pages(
+ struct vm_fault *vmf,
+ pgoff_t start_pgoff,
+ pgoff_t end_pgoff)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+
+ xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ filemap_map_pages(vmf, start_pgoff, end_pgoff);
+ xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+}
+
static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = xfs_filemap_fault,
.huge_fault = xfs_filemap_huge_fault,
- .map_pages = filemap_map_pages,
+ .map_pages = xfs_filemap_map_pages,
.page_mkwrite = xfs_filemap_page_mkwrite,
.pfn_mkwrite = xfs_filemap_pfn_mkwrite,
};
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 01c0933a4d10..79e8af8f4669 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -146,6 +146,7 @@ xfs_fsmap_owner_from_rmap(
dest->fmr_owner = XFS_FMR_OWN_FREE;
break;
default:
+ ASSERT(0);
return -EFSCORRUPTED;
}
return 0;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index a1135b86e79f..f1451642ce38 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -289,6 +289,8 @@ xfs_reinit_inode(
uint64_t version = inode_peek_iversion(inode);
umode_t mode = inode->i_mode;
dev_t dev = inode->i_rdev;
+ kuid_t uid = inode->i_uid;
+ kgid_t gid = inode->i_gid;
error = inode_init_always(mp->m_super, inode);
@@ -297,6 +299,8 @@ xfs_reinit_inode(
inode_set_iversion_queried(inode, version);
inode->i_mode = mode;
inode->i_rdev = dev;
+ inode->i_uid = uid;
+ inode->i_gid = gid;
return error;
}
@@ -1430,7 +1434,7 @@ xfs_inode_match_id(
return 0;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- xfs_get_projid(ip) != eofb->eof_prid)
+ ip->i_d.di_projid != eofb->eof_prid)
return 0;
return 1;
@@ -1454,7 +1458,7 @@ xfs_inode_match_id_union(
return 1;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- xfs_get_projid(ip) == eofb->eof_prid)
+ ip->i_d.di_projid == eofb->eof_prid)
return 1;
return 0;
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index 3ebd1b7f49d8..7d940b289db5 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -10,6 +10,7 @@
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_icreate_item.h"
+#include "xfs_log_priv.h"
#include "xfs_log.h"
kmem_zone_t *xfs_icreate_zone; /* inode create item zone */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 7a9048c4c2f9..568a9332efd2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -750,6 +750,7 @@ xfs_ialloc(
xfs_buf_t **ialloc_context,
xfs_inode_t **ipp)
{
+ struct inode *dir = pip ? VFS_I(pip) : NULL;
struct xfs_mount *mp = tp->t_mountp;
xfs_ino_t ino;
xfs_inode_t *ip;
@@ -795,26 +796,17 @@ xfs_ialloc(
return error;
ASSERT(ip != NULL);
inode = VFS_I(ip);
-
- /*
- * We always convert v1 inodes to v2 now - we only support filesystems
- * with >= v2 inode capability, so there is no reason for ever leaving
- * an inode in v1 format.
- */
- if (ip->i_d.di_version == 1)
- ip->i_d.di_version = 2;
-
- inode->i_mode = mode;
set_nlink(inode, nlink);
- ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
- ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
inode->i_rdev = rdev;
- xfs_set_projid(ip, prid);
+ ip->i_d.di_projid = prid;
- if (pip && XFS_INHERIT_GID(pip)) {
- ip->i_d.di_gid = pip->i_d.di_gid;
- if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
- inode->i_mode |= S_ISGID;
+ if (dir && !(dir->i_mode & S_ISGID) &&
+ (mp->m_flags & XFS_MOUNT_GRPID)) {
+ inode->i_uid = current_fsuid();
+ inode->i_gid = dir->i_gid;
+ inode->i_mode = mode;
+ } else {
+ inode_init_owner(inode, dir, mode);
}
/*
@@ -822,9 +814,8 @@ xfs_ialloc(
* ID or one of the supplementary group IDs, the S_ISGID bit is cleared
* (and only if the irix_sgid_inherit compatibility variable is set).
*/
- if ((irix_sgid_inherit) &&
- (inode->i_mode & S_ISGID) &&
- (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
+ if (irix_sgid_inherit &&
+ (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
inode->i_mode &= ~S_ISGID;
ip->i_d.di_size = 0;
@@ -841,7 +832,7 @@ xfs_ialloc(
ip->i_d.di_dmstate = 0;
ip->i_d.di_flags = 0;
- if (ip->i_d.di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
inode_set_iversion(inode, 1);
ip->i_d.di_flags2 = 0;
ip->i_d.di_cowextsize = 0;
@@ -849,7 +840,6 @@ xfs_ialloc(
ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
}
-
flags = XFS_ILOG_CORE;
switch (mode & S_IFMT) {
case S_IFIFO:
@@ -902,20 +892,13 @@ xfs_ialloc(
ip->i_d.di_flags |= di_flags;
}
- if (pip &&
- (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
- pip->i_d.di_version == 3 &&
- ip->i_d.di_version == 3) {
- uint64_t di_flags2 = 0;
-
+ if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) {
if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
- di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+ ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
}
if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
- di_flags2 |= XFS_DIFLAG2_DAX;
-
- ip->i_d.di_flags2 |= di_flags2;
+ ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
}
/* FALLTHROUGH */
case S_IFLNK:
@@ -1117,7 +1100,6 @@ xfs_bumplink(
{
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
- ASSERT(ip->i_d.di_version > 1);
inc_nlink(VFS_I(ip));
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
@@ -1153,8 +1135,7 @@ xfs_create(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
- xfs_kgid_to_gid(current_fsgid()), prid,
+ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
@@ -1304,8 +1285,7 @@ xfs_create_tmpfile(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
- xfs_kgid_to_gid(current_fsgid()), prid,
+ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
@@ -1418,7 +1398,7 @@ xfs_link(
* the tree quota mechanism could be circumvented.
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
- (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
+ tdp->i_d.di_projid != sip->i_d.di_projid)) {
error = -EXDEV;
goto error_return;
}
@@ -1513,10 +1493,8 @@ xfs_itruncate_extents_flags(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp = *tpp;
xfs_fileoff_t first_unmap_block;
- xfs_fileoff_t last_block;
xfs_filblks_t unmap_len;
int error = 0;
- int done = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
@@ -1536,21 +1514,22 @@ xfs_itruncate_extents_flags(
* the end of the file (in a crash where the space is allocated
* but the inode size is not yet updated), simply remove any
* blocks which show up between the new EOF and the maximum
- * possible file size. If the first block to be removed is
- * beyond the maximum file size (ie it is the same as last_block),
- * then there is nothing to do.
+ * possible file size.
+ *
+ * We have to free all the blocks to the bmbt maximum offset, even if
+ * the page cache can't scale that far.
*/
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
- last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
- if (first_unmap_block == last_block)
+ if (first_unmap_block >= XFS_MAX_FILEOFF) {
+ WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
return 0;
+ }
- ASSERT(first_unmap_block < last_block);
- unmap_len = last_block - first_unmap_block + 1;
- while (!done) {
+ unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
+ while (unmap_len > 0) {
ASSERT(tp->t_firstblock == NULLFSBLOCK);
- error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
- XFS_ITRUNC_MAX_EXTENTS, &done);
+ error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
+ flags, XFS_ITRUNC_MAX_EXTENTS);
if (error)
goto out;
@@ -1570,7 +1549,7 @@ xfs_itruncate_extents_flags(
if (whichfork == XFS_DATA_FORK) {
/* Remove all pending CoW reservations. */
error = xfs_reflink_cancel_cow_blocks(ip, &tp,
- first_unmap_block, last_block, true);
+ first_unmap_block, XFS_MAX_FILEOFF, true);
if (error)
goto out;
@@ -2149,8 +2128,10 @@ xfs_iunlink_update_bucket(
* passed in because either we're adding or removing ourselves from the
* head of the list.
*/
- if (old_value == new_agino)
+ if (old_value == new_agino) {
+ xfs_buf_mark_corrupt(agibp);
return -EFSCORRUPTED;
+ }
agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
offset = offsetof(struct xfs_agi, agi_unlinked) +
@@ -2213,6 +2194,8 @@ xfs_iunlink_update_inode(
/* Make sure the old pointer isn't garbage. */
old_value = be32_to_cpu(dip->di_next_unlinked);
if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
goto out;
}
@@ -2224,8 +2207,11 @@ xfs_iunlink_update_inode(
*/
*old_next_agino = old_value;
if (old_value == next_agino) {
- if (next_agino != NULLAGINO)
+ if (next_agino != NULLAGINO) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
+ dip, sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
+ }
goto out;
}
@@ -2276,8 +2262,10 @@ xfs_iunlink(
*/
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
if (next_agino == agino ||
- !xfs_verify_agino_or_null(mp, agno, next_agino))
+ !xfs_verify_agino_or_null(mp, agno, next_agino)) {
+ xfs_buf_mark_corrupt(agibp);
return -EFSCORRUPTED;
+ }
if (next_agino != NULLAGINO) {
struct xfs_perag *pag;
@@ -2547,7 +2535,7 @@ xfs_ifree_cluster(
xfs_daddr_t blkno;
xfs_buf_t *bp;
xfs_inode_t *ip;
- xfs_inode_log_item_t *iip;
+ struct xfs_inode_log_item *iip;
struct xfs_log_item *lip;
struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
@@ -2584,8 +2572,10 @@ xfs_ifree_cluster(
mp->m_bsize * igeo->blocks_per_cluster,
XBF_UNMAPPED);
- if (!bp)
+ if (!bp) {
+ xfs_perag_put(pag);
return -ENOMEM;
+ }
/*
* This buffer may not have been correctly initialised as we
@@ -2607,7 +2597,7 @@ xfs_ifree_cluster(
*/
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
if (lip->li_type == XFS_LI_INODE) {
- iip = (xfs_inode_log_item_t *)lip;
+ iip = (struct xfs_inode_log_item *)lip;
ASSERT(iip->ili_logged == 1);
lip->li_cb = xfs_istale_done;
xfs_trans_ail_copy_lsn(mp->m_ail,
@@ -3215,6 +3205,7 @@ xfs_rename(
struct xfs_trans *tp;
struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
+ int i;
int num_inodes = __XFS_SORT_INODES;
bool new_parent = (src_dp != target_dp);
bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
@@ -3288,7 +3279,7 @@ xfs_rename(
* tree quota mechanism would be circumvented.
*/
if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
- (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
+ target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
error = -EXDEV;
goto out_trans_cancel;
}
@@ -3327,6 +3318,30 @@ xfs_rename(
}
/*
+ * Lock the AGI buffers we need to handle bumping the nlink of the
+ * whiteout inode off the unlinked list and to handle dropping the
+ * nlink of the target inode. Per locking order rules, do this in
+ * increasing AG order and before directory block allocation tries to
+ * grab AGFs because we grab AGIs before AGFs.
+ *
+ * The (vfs) caller must ensure that if src is a directory then
+ * target_ip is either null or an empty directory.
+ */
+ for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
+ if (inodes[i] == wip ||
+ (inodes[i] == target_ip &&
+ (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
+ struct xfs_buf *bp;
+ xfs_agnumber_t agno;
+
+ agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
+ error = xfs_read_agi(mp, tp, agno, &bp);
+ if (error)
+ goto out_trans_cancel;
+ }
+ }
+
+ /*
* Directory entry creation below may acquire the AGF. Remove
* the whiteout from the unlinked list first to preserve correct
* AGI/AGF locking order. This dirties the transaction so failures
@@ -3796,7 +3811,6 @@ xfs_iflush_int(
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
ASSERT(iip != NULL && iip->ili_fields != 0);
- ASSERT(ip->i_d.di_version > 1);
/* set *dip = inode's place in the buffer */
dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
@@ -3857,7 +3871,7 @@ xfs_iflush_int(
* backwards compatibility with old kernels that predate logging all
* inode changes.
*/
- if (ip->i_d.di_version < 3)
+ if (!xfs_sb_version_has_v3inode(&mp->m_sb))
ip->i_d.di_flushiter++;
/* Check the inline fork data before we write out. */
@@ -3940,3 +3954,22 @@ xfs_irele(
trace_xfs_irele(ip, _RET_IP_);
iput(VFS_I(ip));
}
+
+/*
+ * Ensure all commited transactions touching the inode are written to the log.
+ */
+int
+xfs_log_force_inode(
+ struct xfs_inode *ip)
+{
+ xfs_lsn_t lsn = 0;
+
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ if (xfs_ipincount(ip))
+ lsn = ip->i_itemp->ili_last_lsn;
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+ if (!lsn)
+ return 0;
+ return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
+}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 558173f95a03..62b963d3b23d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -177,30 +177,11 @@ xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags)
return ret;
}
-/*
- * Project quota id helpers (previously projid was 16bit only
- * and using two 16bit values to hold new 32bit projid was chosen
- * to retain compatibility with "old" filesystems).
- */
-static inline prid_t
-xfs_get_projid(struct xfs_inode *ip)
-{
- return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo;
-}
-
-static inline void
-xfs_set_projid(struct xfs_inode *ip,
- prid_t projid)
-{
- ip->i_d.di_projid_hi = (uint16_t) (projid >> 16);
- ip->i_d.di_projid_lo = (uint16_t) (projid & 0xffff);
-}
-
static inline prid_t
xfs_get_initial_prid(struct xfs_inode *dp)
{
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
- return xfs_get_projid(dp);
+ return dp->i_d.di_projid;
return XFS_PROJID_DEFAULT;
}
@@ -441,6 +422,7 @@ int xfs_itruncate_extents_flags(struct xfs_trans **,
struct xfs_inode *, int, xfs_fsize_t, int);
void xfs_iext_realloc(xfs_inode_t *, int, int);
+int xfs_log_force_inode(struct xfs_inode *ip);
void xfs_iunpin_wait(xfs_inode_t *);
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index bb8f076805b9..83bf96b6cf5d 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -17,6 +17,7 @@
#include "xfs_trans_priv.h"
#include "xfs_buf_item.h"
#include "xfs_log.h"
+#include "xfs_error.h"
#include <linux/iversion.h>
@@ -124,7 +125,7 @@ xfs_inode_item_size(
*nvecs += 2;
*nbytes += sizeof(struct xfs_inode_log_format) +
- xfs_log_dinode_size(ip->i_d.di_version);
+ xfs_log_dinode_size(ip->i_mount);
xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
if (XFS_IFORK_Q(ip))
@@ -304,13 +305,11 @@ xfs_inode_to_log_dinode(
struct inode *inode = VFS_I(ip);
to->di_magic = XFS_DINODE_MAGIC;
-
- to->di_version = from->di_version;
to->di_format = from->di_format;
- to->di_uid = from->di_uid;
- to->di_gid = from->di_gid;
- to->di_projid_lo = from->di_projid_lo;
- to->di_projid_hi = from->di_projid_hi;
+ to->di_uid = i_uid_read(inode);
+ to->di_gid = i_gid_read(inode);
+ to->di_projid_lo = from->di_projid & 0xffff;
+ to->di_projid_hi = from->di_projid >> 16;
memset(to->di_pad, 0, sizeof(to->di_pad));
memset(to->di_pad3, 0, sizeof(to->di_pad3));
@@ -338,7 +337,8 @@ xfs_inode_to_log_dinode(
/* log a dummy value to ensure log structure is fully initialised */
to->di_next_unlinked = NULLAGINO;
- if (from->di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ to->di_version = 3;
to->di_changecount = inode_peek_iversion(inode);
to->di_crtime.t_sec = from->di_crtime.t_sec;
to->di_crtime.t_nsec = from->di_crtime.t_nsec;
@@ -350,6 +350,7 @@ xfs_inode_to_log_dinode(
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to->di_flushiter = 0;
} else {
+ to->di_version = 2;
to->di_flushiter = from->di_flushiter;
}
}
@@ -369,7 +370,7 @@ xfs_inode_item_format_core(
dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE);
xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn);
- xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_d.di_version));
+ xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount));
}
/*
@@ -394,8 +395,6 @@ xfs_inode_item_format(
struct xfs_log_iovec *vecp = NULL;
struct xfs_inode_log_format *ilf;
- ASSERT(ip->i_d.di_version > 1);
-
ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT);
ilf->ilf_type = XFS_LI_INODE;
ilf->ilf_ino = ip->i_ino;
@@ -731,29 +730,27 @@ xfs_iflush_done(
* holding the lock before removing the inode from the AIL.
*/
if (need_ail) {
- bool mlip_changed = false;
+ xfs_lsn_t tail_lsn = 0;
/* this is an opencoded batch version of xfs_trans_ail_delete */
spin_lock(&ailp->ail_lock);
list_for_each_entry(blip, &tmp, li_bio_list) {
if (INODE_ITEM(blip)->ili_logged &&
- blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn)
- mlip_changed |= xfs_ail_delete_one(ailp, blip);
- else {
+ blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn) {
+ /*
+ * xfs_ail_update_finish() only cares about the
+ * lsn of the first tail item removed, any
+ * others will be at the same or higher lsn so
+ * we just ignore them.
+ */
+ xfs_lsn_t lsn = xfs_ail_delete_one(ailp, blip);
+ if (!tail_lsn && lsn)
+ tail_lsn = lsn;
+ } else {
xfs_clear_li_failed(blip);
}
}
-
- if (mlip_changed) {
- if (!XFS_FORCED_SHUTDOWN(ailp->ail_mount))
- xlog_assign_tail_lsn_locked(ailp->ail_mount);
- if (list_empty(&ailp->ail_head))
- wake_up_all(&ailp->ail_empty);
- }
- spin_unlock(&ailp->ail_lock);
-
- if (mlip_changed)
- xfs_log_space_wake(ailp->ail_mount);
+ xfs_ail_update_finish(ailp, tail_lsn);
}
/*
@@ -782,7 +779,7 @@ xfs_iflush_abort(
xfs_inode_t *ip,
bool stale)
{
- xfs_inode_log_item_t *iip = ip->i_itemp;
+ struct xfs_inode_log_item *iip = ip->i_itemp;
if (iip) {
if (test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags)) {
@@ -828,8 +825,10 @@ xfs_inode_item_format_convert(
{
struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
- if (buf->i_len != sizeof(*in_f32))
+ if (buf->i_len != sizeof(*in_f32)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
+ }
in_f->ilf_type = in_f32->ilf_type;
in_f->ilf_size = in_f32->ilf_size;
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 07a60e74c39c..ad667fd4ae62 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -13,7 +13,7 @@ struct xfs_bmbt_rec;
struct xfs_inode;
struct xfs_mount;
-typedef struct xfs_inode_log_item {
+struct xfs_inode_log_item {
struct xfs_log_item ili_item; /* common portion */
struct xfs_inode *ili_inode; /* inode ptr */
xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
@@ -23,7 +23,7 @@ typedef struct xfs_inode_log_item {
unsigned int ili_last_fields; /* fields when flushed */
unsigned int ili_fields; /* fields to be logged */
unsigned int ili_fsync_fields; /* logged since last fsync */
-} xfs_inode_log_item_t;
+};
static inline int xfs_inode_clean(xfs_inode_t *ip)
{
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 7b7a009425e2..e7356e527260 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1144,7 +1144,7 @@ xfs_fill_fsxattr(
fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
ip->i_mount->m_sb.sb_blocklog;
- fa->fsx_projid = xfs_get_projid(ip);
+ fa->fsx_projid = ip->i_d.di_projid;
if (attr) {
if (ip->i_afp) {
@@ -1299,7 +1299,7 @@ xfs_ioctl_setattr_xflags(
/* diflags2 only valid for v3 inodes. */
di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
- if (di_flags2 && ip->i_d.di_version < 3)
+ if (di_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb))
return -EINVAL;
ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
@@ -1510,8 +1510,7 @@ xfs_ioctl_setattr_check_cowextsize(
if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
return 0;
- if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) ||
- ip->i_d.di_version != 3)
+ if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb))
return -EINVAL;
if (fa->fsx_cowextsize == 0)
@@ -1572,9 +1571,9 @@ xfs_ioctl_setattr(
* because the i_*dquot fields will get updated anyway.
*/
if (XFS_IS_QUOTA_ON(mp)) {
- code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
- ip->i_d.di_gid, fa->fsx_projid,
- XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
+ code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
+ VFS_I(ip)->i_gid, fa->fsx_projid,
+ XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
if (code)
return code;
}
@@ -1597,7 +1596,7 @@ xfs_ioctl_setattr(
}
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
- xfs_get_projid(ip) != fa->fsx_projid) {
+ ip->i_d.di_projid != fa->fsx_projid) {
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
if (code) /* out of quota */
@@ -1634,13 +1633,12 @@ xfs_ioctl_setattr(
VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
/* Change the ownerships and register project quota modifications */
- if (xfs_get_projid(ip) != fa->fsx_projid) {
+ if (ip->i_d.di_projid != fa->fsx_projid) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
olddquot = xfs_qm_vop_chown(tp, ip,
&ip->i_pdquot, pdqp);
}
- ASSERT(ip->i_d.di_version > 1);
- xfs_set_projid(ip, fa->fsx_projid);
+ ip->i_d.di_projid = fa->fsx_projid;
}
/*
@@ -1652,7 +1650,7 @@ xfs_ioctl_setattr(
ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
else
ip->i_d.di_extsize = 0;
- if (ip->i_d.di_version == 3 &&
+ if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
(ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
mp->m_sb.sb_blocklog;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 239c9548b156..70880422057d 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -765,6 +765,11 @@ xfs_iomap_write_unwritten(
*/
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
+ /* Attach dquots so that bmbt splits are accounted correctly. */
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ return error;
+
do {
/*
* Set up a transaction to convert the range of extents
@@ -783,6 +788,11 @@ xfs_iomap_write_unwritten(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
+ error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
+ XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES);
+ if (error)
+ goto error_on_bmapi_transaction;
+
/*
* Modify the unwritten extent state of the buffer.
*/
@@ -1055,6 +1065,13 @@ xfs_file_iomap_begin(
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
out_finish:
+ /*
+ * Writes that span EOF might trigger an IO size update on completion,
+ * so consider them to be dirty for the purposes of O_DSYNC even if
+ * there is no other metadata changes pending or have been made here.
+ */
+ if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode))
+ iomap->flags |= IOMAP_F_DIRTY;
return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
out_found:
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ca8c763902b9..a7efc8896e5e 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -20,6 +20,7 @@
#include "xfs_symlink.h"
#include "xfs_dir2.h"
#include "xfs_iomap.h"
+#include "xfs_error.h"
#include <linux/xattr.h>
#include <linux/posix_acl.h>
@@ -470,17 +471,20 @@ xfs_vn_get_link_inline(
struct inode *inode,
struct delayed_call *done)
{
+ struct xfs_inode *ip = XFS_I(inode);
char *link;
- ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
+ ASSERT(ip->i_df.if_flags & XFS_IFINLINE);
/*
* The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
* if_data is junk.
*/
- link = XFS_I(inode)->i_df.if_u1.if_data;
- if (!link)
+ link = ip->i_df.if_u1.if_data;
+ if (!link) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, ip->i_mount);
return ERR_PTR(-EFSCORRUPTED);
+ }
return link;
}
@@ -513,7 +517,7 @@ xfs_vn_getattr(
stat->blocks =
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
- if (ip->i_d.di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
@@ -662,9 +666,7 @@ xfs_setattr_nonsize(
*/
ASSERT(udqp == NULL);
ASSERT(gdqp == NULL);
- error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),
- xfs_kgid_to_gid(gid),
- xfs_get_projid(ip),
+ error = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid,
qflags, &udqp, &gdqp, NULL);
if (error)
return error;
@@ -733,7 +735,6 @@ xfs_setattr_nonsize(
olddquot1 = xfs_qm_vop_chown(tp, ip,
&ip->i_udquot, udqp);
}
- ip->i_d.di_uid = xfs_kuid_to_uid(uid);
inode->i_uid = uid;
}
if (!gid_eq(igid, gid)) {
@@ -745,7 +746,6 @@ xfs_setattr_nonsize(
olddquot2 = xfs_qm_vop_chown(tp, ip,
&ip->i_gdquot, gdqp);
}
- ip->i_d.di_gid = xfs_kgid_to_gid(gid);
inode->i_gid = gid;
}
}
@@ -1284,9 +1284,6 @@ xfs_setup_inode(
/* make the inode look hashed for the writeback code */
inode_fake_hash(inode);
- inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
- inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
-
i_size_write(inode, ip->i_d.di_size);
xfs_diflags_to_iflags(inode, ip);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 884950adbd16..42e93779374c 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -84,10 +84,10 @@ xfs_bulkstat_one_int(
/* xfs_iget returns the following without needing
* further change.
*/
- buf->bs_projectid = xfs_get_projid(ip);
+ buf->bs_projectid = ip->i_d.di_projid;
buf->bs_ino = ino;
- buf->bs_uid = dic->di_uid;
- buf->bs_gid = dic->di_gid;
+ buf->bs_uid = i_uid_read(inode);
+ buf->bs_gid = i_gid_read(inode);
buf->bs_size = dic->di_size;
buf->bs_nlink = inode->i_nlink;
@@ -110,7 +110,7 @@ xfs_bulkstat_one_int(
buf->bs_forkoff = XFS_IFORK_BOFF(ip);
buf->bs_version = XFS_BULKSTAT_VERSION_V5;
- if (dic->di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
buf->bs_cowextsize_blks = dic->di_cowextsize;
}
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index aa375cf53021..cc5c0c835884 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -55,6 +55,9 @@ struct xfs_iwalk_ag {
/* Where do we start the traversal? */
xfs_ino_t startino;
+ /* What was the last inode number we saw when iterating the inobt? */
+ xfs_ino_t lastino;
+
/* Array of inobt records we cache. */
struct xfs_inobt_rec_incore *recs;
@@ -300,6 +303,9 @@ xfs_iwalk_ag_start(
return error;
XFS_WANT_CORRUPTED_RETURN(mp, *has_more == 1);
+ iwag->lastino = XFS_AGINO_TO_INO(mp, agno,
+ irec->ir_startino + XFS_INODES_PER_CHUNK - 1);
+
/*
* If the LE lookup yielded an inobt record before the cursor position,
* skip it and see if there's another one after it.
@@ -346,15 +352,17 @@ xfs_iwalk_run_callbacks(
struct xfs_mount *mp = iwag->mp;
struct xfs_trans *tp = iwag->tp;
struct xfs_inobt_rec_incore *irec;
- xfs_agino_t restart;
+ xfs_agino_t next_agino;
int error;
+ next_agino = XFS_INO_TO_AGINO(mp, iwag->lastino) + 1;
+
ASSERT(iwag->nr_recs > 0);
/* Delete cursor but remember the last record we cached... */
xfs_iwalk_del_inobt(tp, curpp, agi_bpp, 0);
irec = &iwag->recs[iwag->nr_recs - 1];
- restart = irec->ir_startino + XFS_INODES_PER_CHUNK - 1;
+ ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK);
error = xfs_iwalk_ag_recs(iwag);
if (error)
@@ -371,7 +379,7 @@ xfs_iwalk_run_callbacks(
if (error)
return error;
- return xfs_inobt_lookup(*curpp, restart, XFS_LOOKUP_GE, has_more);
+ return xfs_inobt_lookup(*curpp, next_agino, XFS_LOOKUP_GE, has_more);
}
/* Walk all inodes in a single AG, from @iwag->startino to the end of the AG. */
@@ -395,6 +403,7 @@ xfs_iwalk_ag(
while (!error && has_more) {
struct xfs_inobt_rec_incore *irec;
+ xfs_ino_t rec_fsino;
cond_resched();
if (xfs_pwork_want_abort(&iwag->pwork))
@@ -406,6 +415,15 @@ xfs_iwalk_ag(
if (error || !has_more)
break;
+ /* Make sure that we always move forward. */
+ rec_fsino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino);
+ if (iwag->lastino != NULLFSINO && iwag->lastino >= rec_fsino) {
+ ASSERT(iwag->lastino < rec_fsino);
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+ iwag->lastino = rec_fsino + XFS_INODES_PER_CHUNK - 1;
+
/* No allocated inodes in this chunk; skip it. */
if (iwag->skip_empty && irec->ir_freecount == irec->ir_count) {
error = xfs_btree_increment(cur, 0, &has_more);
@@ -534,6 +552,7 @@ xfs_iwalk(
.trim_start = 1,
.skip_empty = 1,
.pwork = XFS_PWORK_SINGLE_THREADED,
+ .lastino = NULLFSINO,
};
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
@@ -622,6 +641,7 @@ xfs_iwalk_threaded(
iwag->data = data;
iwag->startino = startino;
iwag->sz_recs = xfs_iwalk_prefetch(inode_records);
+ iwag->lastino = NULLFSINO;
xfs_pwork_queue(&pctl, &iwag->pwork);
startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
if (flags & XFS_INOBT_WALK_SAME_AG)
@@ -695,6 +715,7 @@ xfs_inobt_walk(
.startino = startino,
.sz_recs = xfs_inobt_walk_prefetch(inobt_records),
.pwork = XFS_PWORK_SINGLE_THREADED,
+ .lastino = NULLFSINO,
};
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
int error;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ca15105681ca..4f6f09157f0d 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -163,32 +163,6 @@ struct xstats {
extern struct xstats xfsstats;
-/* Kernel uid/gid conversion. These are used to convert to/from the on disk
- * uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally.
- * The conversion here is type only, the value will remain the same since we
- * are converting to the init_user_ns. The uid is later mapped to a particular
- * user namespace value when crossing the kernel/user boundary.
- */
-static inline uint32_t xfs_kuid_to_uid(kuid_t uid)
-{
- return from_kuid(&init_user_ns, uid);
-}
-
-static inline kuid_t xfs_uid_to_kuid(uint32_t uid)
-{
- return make_kuid(&init_user_ns, uid);
-}
-
-static inline uint32_t xfs_kgid_to_gid(kgid_t gid)
-{
- return from_kgid(&init_user_ns, gid);
-}
-
-static inline kgid_t xfs_gid_to_kgid(uint32_t gid)
-{
- return make_kgid(&init_user_ns, gid);
-}
-
static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev)
{
return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
@@ -243,6 +217,12 @@ int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
#endif /* XFS_WARN */
#endif /* DEBUG */
+#define XFS_IS_CORRUPT(mp, expr) \
+ (unlikely(expr) ? xfs_corruption_error(#expr, XFS_ERRLEVEL_LOW, (mp), \
+ NULL, 0, __FILE__, __LINE__, \
+ __this_address), \
+ true : false)
+
#define STATIC static noinline
#ifdef CONFIG_XFS_RT
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 7b0d9ad8cb1a..03a52b3919b8 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -369,6 +369,25 @@ xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
tic->t_res_num++;
}
+bool
+xfs_log_writable(
+ struct xfs_mount *mp)
+{
+ /*
+ * Never write to the log on norecovery mounts, if the block device is
+ * read-only, or if the filesystem is shutdown. Read-only mounts still
+ * allow internal writes for log recovery and unmount purposes, so don't
+ * restrict that case here.
+ */
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+ return false;
+ if (xfs_readonly_buftarg(mp->m_log->l_targ))
+ return false;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return false;
+ return true;
+}
+
/*
* Replenish the byte reservation required by moving the grant write head.
*/
@@ -837,19 +856,6 @@ xfs_log_write_unmount_record(
if (error)
goto out_err;
- /*
- * If we think the summary counters are bad, clear the unmount header
- * flag in the unmount record so that the summary counters will be
- * recalculated during log recovery at next mount. Refer to
- * xlog_check_unmount_rec for more details.
- */
- if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp,
- XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
- xfs_alert(mp, "%s: will fix summary counters at next mount",
- __func__);
- flags &= ~XLOG_UNMOUNT_TRANS;
- }
-
/* remove inited flag, and account for space used */
tic->t_flags = 0;
tic->t_curr_res -= sizeof(magic);
@@ -908,15 +914,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
#endif
int error;
- /*
- * Don't write out unmount record on norecovery mounts or ro devices.
- * Or, if we are doing a forced umount (typically because of IO errors).
- */
- if (mp->m_flags & XFS_MOUNT_NORECOVERY ||
- xfs_readonly_buftarg(log->l_targ)) {
- ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
+ if (!xfs_log_writable(mp))
return 0;
- }
error = xfs_log_force(mp, XFS_LOG_SYNC);
ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
@@ -932,6 +931,19 @@ xfs_log_unmount_write(xfs_mount_t *mp)
} while (iclog != first_iclog);
#endif
if (! (XLOG_FORCED_SHUTDOWN(log))) {
+ /*
+ * If we think the summary counters are bad, avoid writing the
+ * unmount record to force log recovery at next mount, after
+ * which the summary counters will be recalculated. Refer to
+ * xlog_check_unmount_rec for more details.
+ */
+ if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS),
+ mp, XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
+ xfs_alert(mp,
+ "%s: will fix summary counters at next mount",
+ __func__);
+ return 0;
+ }
xfs_log_write_unmount_record(mp);
} else {
/*
@@ -1537,14 +1549,14 @@ xlog_commit_record(
}
/*
- * Push on the buffer cache code if we ever use more than 75% of the on-disk
- * log space. This code pushes on the lsn which would supposedly free up
- * the 25% which we want to leave free. We may need to adopt a policy which
- * pushes on an lsn which is further along in the log once we reach the high
- * water mark. In this manner, we would be creating a low water mark.
+ * Compute the LSN that we'd need to push the log tail towards in order to have
+ * (a) enough on-disk log space to log the number of bytes specified, (b) at
+ * least 25% of the log space free, and (c) at least 256 blocks free. If the
+ * log free space already meets all three thresholds, this function returns
+ * NULLCOMMITLSN.
*/
-STATIC void
-xlog_grant_push_ail(
+xfs_lsn_t
+xlog_grant_push_threshold(
struct xlog *log,
int need_bytes)
{
@@ -1570,7 +1582,7 @@ xlog_grant_push_ail(
free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
free_threshold = max(free_threshold, 256);
if (free_blocks >= free_threshold)
- return;
+ return NULLCOMMITLSN;
xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
&threshold_block);
@@ -1590,13 +1602,33 @@ xlog_grant_push_ail(
if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
threshold_lsn = last_sync_lsn;
+ return threshold_lsn;
+}
+
+/*
+ * Push the tail of the log if we need to do so to maintain the free log space
+ * thresholds set out by xlog_grant_push_threshold. We may need to adopt a
+ * policy which pushes on an lsn which is further along in the log once we
+ * reach the high water mark. In this manner, we would be creating a low water
+ * mark.
+ */
+STATIC void
+xlog_grant_push_ail(
+ struct xlog *log,
+ int need_bytes)
+{
+ xfs_lsn_t threshold_lsn;
+
+ threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
+ if (threshold_lsn == NULLCOMMITLSN || XLOG_FORCED_SHUTDOWN(log))
+ return;
+
/*
* Get the transaction layer to kick the dirty buffers out to
* disk asynchronously. No point in trying to do this if
* the filesystem is shutting down.
*/
- if (!XLOG_FORCED_SHUTDOWN(log))
- xfs_ail_push(log->l_ailp, threshold_lsn);
+ xfs_ail_push(log->l_ailp, threshold_lsn);
}
/*
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 84e06805160f..dc9229e7ddaa 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -132,6 +132,7 @@ int xfs_log_reserve(struct xfs_mount *mp,
int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
void xfs_log_unmount(struct xfs_mount *mp);
int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
+bool xfs_log_writable(struct xfs_mount *mp);
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
void xfs_log_ticket_put(struct xlog_ticket *ticket);
@@ -146,4 +147,6 @@ void xfs_log_quiesce(struct xfs_mount *mp);
bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
bool xfs_log_in_recovery(struct xfs_mount *);
+xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
+
#endif /* __XFS_LOG_H__ */
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index ef652abd112c..ae9b8efcfa54 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -671,6 +671,12 @@ xlog_cil_push(
ASSERT(push_seq <= ctx->sequence);
/*
+ * Wake up any background push waiters now this context is being pushed.
+ */
+ if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+ wake_up_all(&cil->xc_push_wait);
+
+ /*
* Check if we've anything to push. If there is nothing, then we don't
* move on to a new sequence number and so we have to be able to push
* this sequence again later.
@@ -740,7 +746,7 @@ xlog_cil_push(
/*
* initialise the new context and attach it to the CIL. Then attach
- * the current context to the CIL committing lsit so it can be found
+ * the current context to the CIL committing list so it can be found
* during log forces to extract the commit lsn of the sequence that
* needs to be forced.
*/
@@ -900,7 +906,7 @@ xlog_cil_push_work(
*/
static void
xlog_cil_push_background(
- struct xlog *log)
+ struct xlog *log) __releases(cil->xc_ctx_lock)
{
struct xfs_cil *cil = log->l_cilp;
@@ -914,14 +920,36 @@ xlog_cil_push_background(
* don't do a background push if we haven't used up all the
* space available yet.
*/
- if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+ if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
+ up_read(&cil->xc_ctx_lock);
return;
+ }
spin_lock(&cil->xc_push_lock);
if (cil->xc_push_seq < cil->xc_current_sequence) {
cil->xc_push_seq = cil->xc_current_sequence;
queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
}
+
+ /*
+ * Drop the context lock now, we can't hold that if we need to sleep
+ * because we are over the blocking threshold. The push_lock is still
+ * held, so blocking threshold sleep/wakeup is still correctly
+ * serialised here.
+ */
+ up_read(&cil->xc_ctx_lock);
+
+ /*
+ * If we are well over the space limit, throttle the work that is being
+ * done until the push work on this context has begun.
+ */
+ if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
+ trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
+ ASSERT(cil->xc_ctx->space_used < log->l_logsize);
+ xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
+ return;
+ }
+
spin_unlock(&cil->xc_push_lock);
}
@@ -1038,9 +1066,9 @@ xfs_log_commit_cil(
if (lip->li_ops->iop_committing)
lip->li_ops->iop_committing(lip, xc_commit_lsn);
}
- xlog_cil_push_background(log);
- up_read(&cil->xc_ctx_lock);
+ /* xlog_cil_push_background() releases cil->xc_ctx_lock */
+ xlog_cil_push_background(log);
}
/*
@@ -1150,21 +1178,19 @@ out_shutdown:
*/
bool
xfs_log_item_in_current_chkpt(
- struct xfs_log_item *lip)
+ struct xfs_log_item *lip)
{
- struct xfs_cil_ctx *ctx;
+ struct xfs_cil *cil = lip->li_mountp->m_log->l_cilp;
if (list_empty(&lip->li_cil))
return false;
- ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
-
/*
* li_seq is written on the first commit of a log item to record the
* first checkpoint it is written to. Hence if it is different to the
* current sequence, we're in a new checkpoint.
*/
- if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
+ if (XFS_LSN_CMP(lip->li_seq, READ_ONCE(cil->xc_current_sequence)) != 0)
return false;
return true;
}
@@ -1194,6 +1220,7 @@ xlog_cil_init(
INIT_LIST_HEAD(&cil->xc_committing);
spin_lock_init(&cil->xc_cil_lock);
spin_lock_init(&cil->xc_push_lock);
+ init_waitqueue_head(&cil->xc_push_wait);
init_rwsem(&cil->xc_ctx_lock);
init_waitqueue_head(&cil->xc_commit_wait);
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index b880c23cb6e4..3a5d7fb09c43 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -280,6 +280,7 @@ struct xfs_cil {
wait_queue_head_t xc_commit_wait;
xfs_lsn_t xc_current_sequence;
struct work_struct xc_push_work;
+ wait_queue_head_t xc_push_wait; /* background push throttle */
} ____cacheline_aligned_in_smp;
/*
@@ -323,13 +324,53 @@ struct xfs_cil {
* tries to keep 25% of the log free, so we need to keep below that limit or we
* risk running out of free log space to start any new transactions.
*
- * In order to keep background CIL push efficient, we will set a lower
- * threshold at which background pushing is attempted without blocking current
- * transaction commits. A separate, higher bound defines when CIL pushes are
- * enforced to ensure we stay within our maximum checkpoint size bounds.
- * threshold, yet give us plenty of space for aggregation on large logs.
+ * In order to keep background CIL push efficient, we only need to ensure the
+ * CIL is large enough to maintain sufficient in-memory relogging to avoid
+ * repeated physical writes of frequently modified metadata. If we allow the CIL
+ * to grow to a substantial fraction of the log, then we may be pinning hundreds
+ * of megabytes of metadata in memory until the CIL flushes. This can cause
+ * issues when we are running low on memory - pinned memory cannot be reclaimed,
+ * and the CIL consumes a lot of memory. Hence we need to set an upper physical
+ * size limit for the CIL that limits the maximum amount of memory pinned by the
+ * CIL but does not limit performance by reducing relogging efficiency
+ * significantly.
+ *
+ * As such, the CIL push threshold ends up being the smaller of two thresholds:
+ * - a threshold large enough that it allows CIL to be pushed and progress to be
+ * made without excessive blocking of incoming transaction commits. This is
+ * defined to be 12.5% of the log space - half the 25% push threshold of the
+ * AIL.
+ * - small enough that it doesn't pin excessive amounts of memory but maintains
+ * close to peak relogging efficiency. This is defined to be 16x the iclog
+ * buffer window (32MB) as measurements have shown this to be roughly the
+ * point of diminishing performance increases under highly concurrent
+ * modification workloads.
+ *
+ * To prevent the CIL from overflowing upper commit size bounds, we introduce a
+ * new threshold at which we block committing transactions until the background
+ * CIL commit commences and switches to a new context. While this is not a hard
+ * limit, it forces the process committing a transaction to the CIL to block and
+ * yeild the CPU, giving the CIL push work a chance to be scheduled and start
+ * work. This prevents a process running lots of transactions from overfilling
+ * the CIL because it is not yielding the CPU. We set the blocking limit at
+ * twice the background push space threshold so we keep in line with the AIL
+ * push thresholds.
+ *
+ * Note: this is not a -hard- limit as blocking is applied after the transaction
+ * is inserted into the CIL and the push has been triggered. It is largely a
+ * throttling mechanism that allows the CIL push to be scheduled and run. A hard
+ * limit will be difficult to implement without introducing global serialisation
+ * in the CIL commit fast path, and it's not at all clear that we actually need
+ * such hard limits given the ~7 years we've run without a hard limit before
+ * finding the first situation where a checkpoint size overflow actually
+ * occurred. Hence the simple throttle, and an ASSERT check to tell us that
+ * we've overrun the max size.
*/
-#define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3)
+#define XLOG_CIL_SPACE_LIMIT(log) \
+ min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
+
+#define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \
+ (XLOG_CIL_SPACE_LIMIT(log) * 2)
/*
* ticket grant locks, queues and accounting have their own cachlines
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index c1a514ffff55..d9b906d75dfa 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -471,7 +471,7 @@ xlog_find_verify_log_record(
xfs_warn(log->l_mp,
"Log inconsistent (didn't find previous header)");
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
goto out;
}
@@ -1347,10 +1347,11 @@ xlog_find_tail(
error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
&rhead_blk, &rhead, &wrapped);
if (error < 0)
- return error;
+ goto done;
if (!error) {
xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
- return -EIO;
+ error = -EFSCORRUPTED;
+ goto done;
}
*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
@@ -2205,6 +2206,8 @@ xlog_recover_get_buf_lsn(
case XFS_ABTC_MAGIC:
case XFS_RMAP_CRC_MAGIC:
case XFS_REFC_CRC_MAGIC:
+ case XFS_FIBT_CRC_MAGIC:
+ case XFS_FIBT_MAGIC:
case XFS_IBT_CRC_MAGIC:
case XFS_IBT_MAGIC: {
struct xfs_btree_block *btb = blk;
@@ -2576,6 +2579,7 @@ xlog_recover_do_reg_buffer(
int bit;
int nbits;
xfs_failaddr_t fa;
+ const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot);
trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
@@ -2618,7 +2622,7 @@ xlog_recover_do_reg_buffer(
"XFS: NULL dquot in %s.", __func__);
goto next;
}
- if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
+ if (item->ri_buf[i].i_len < size_disk_dquot) {
xfs_alert(mp,
"XFS: dquot too small (%d) in %s.",
item->ri_buf[i].i_len, __func__);
@@ -2779,6 +2783,16 @@ xlog_recover_buffer_pass2(
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
trace_xfs_log_recover_buf_skip(log, buf_f);
xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
+
+ /*
+ * We're skipping replay of this buffer log item due to the log
+ * item LSN being behind the ondisk buffer. Verify the buffer
+ * contents since we aren't going to run the write verifier.
+ */
+ if (bp->b_ops) {
+ bp->b_ops->verify_read(bp);
+ error = bp->b_error;
+ }
goto out_release;
}
@@ -2875,8 +2889,8 @@ xfs_recover_inode_owner_change(
return -ENOMEM;
/* instantiate the inode */
+ ASSERT(dip->di_version >= 3);
xfs_inode_from_disk(ip, dip);
- ASSERT(ip->i_d.di_version >= 3);
error = xfs_iformat_fork(ip, dip);
if (error)
@@ -3014,7 +3028,7 @@ xlog_recover_inode_pass2(
* superblock flag to determine whether we need to look at di_flushiter
* to skip replay when the on disk inode is newer than the log one
*/
- if (!xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (!xfs_sb_version_has_v3inode(&mp->m_sb) &&
ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
/*
* Deal with the wrap case, DI_MAX_FLUSH is less
@@ -3085,7 +3099,7 @@ xlog_recover_inode_pass2(
error = -EFSCORRUPTED;
goto out_release;
}
- isize = xfs_log_dinode_size(ldip->di_version);
+ isize = xfs_log_dinode_size(mp);
if (unlikely(item->ri_buf[1].i_len > isize)) {
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
XFS_ERRLEVEL_LOW, mp, ldip,
@@ -3166,7 +3180,7 @@ xlog_recover_inode_pass2(
default:
xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
goto out_release;
}
}
@@ -3247,12 +3261,12 @@ xlog_recover_dquot_pass2(
recddq = item->ri_buf[1].i_addr;
if (recddq == NULL) {
xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
- if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
+ if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) {
xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
item->ri_buf[1].i_len, __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -3279,7 +3293,7 @@ xlog_recover_dquot_pass2(
if (fa) {
xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
dq_f->qlf_id, fa);
- return -EIO;
+ return -EFSCORRUPTED;
}
ASSERT(dq_f->qlf_len == 1);
@@ -3382,7 +3396,7 @@ xlog_recover_efd_pass2(
struct xlog_recover_item *item)
{
xfs_efd_log_format_t *efd_formatp;
- xfs_efi_log_item_t *efip = NULL;
+ struct xfs_efi_log_item *efip = NULL;
struct xfs_log_item *lip;
uint64_t efi_id;
struct xfs_ail_cursor cur;
@@ -3403,7 +3417,7 @@ xlog_recover_efd_pass2(
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) {
if (lip->li_type == XFS_LI_EFI) {
- efip = (xfs_efi_log_item_t *)lip;
+ efip = (struct xfs_efi_log_item *)lip;
if (efip->efi_format.efi_id == efi_id) {
/*
* Drop the EFD reference to the EFI. This
@@ -3537,6 +3551,7 @@ xfs_cui_copy_format(
memcpy(dst_cui_fmt, src_cui_fmt, len);
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -3601,8 +3616,10 @@ xlog_recover_cud_pass2(
struct xfs_ail *ailp = log->l_ailp;
cud_formatp = item->ri_buf[0].i_addr;
- if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
cui_id = cud_formatp->cud_cui_id;
/*
@@ -3654,6 +3671,7 @@ xfs_bui_copy_format(
memcpy(dst_bui_fmt, src_bui_fmt, len);
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -3677,8 +3695,10 @@ xlog_recover_bui_pass2(
bui_formatp = item->ri_buf[0].i_addr;
- if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
+ if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
buip = xfs_bui_init(mp);
error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
if (error) {
@@ -3720,8 +3740,10 @@ xlog_recover_bud_pass2(
struct xfs_ail *ailp = log->l_ailp;
bud_formatp = item->ri_buf[0].i_addr;
- if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
bui_id = bud_formatp->bud_bui_id;
/*
@@ -4018,7 +4040,7 @@ xlog_recover_commit_pass1(
xfs_warn(log->l_mp, "%s: invalid item type (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -4066,7 +4088,7 @@ xlog_recover_commit_pass2(
xfs_warn(log->l_mp, "%s: invalid item type (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -4187,7 +4209,7 @@ xlog_recover_add_to_cont_trans(
ASSERT(len <= sizeof(struct xfs_trans_header));
if (len > sizeof(struct xfs_trans_header)) {
xfs_warn(log->l_mp, "%s: bad header length", __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
xlog_recover_add_item(&trans->r_itemq);
@@ -4243,13 +4265,13 @@ xlog_recover_add_to_trans(
xfs_warn(log->l_mp, "%s: bad header magic number",
__func__);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
if (len > sizeof(struct xfs_trans_header)) {
xfs_warn(log->l_mp, "%s: bad header length", __func__);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -4285,7 +4307,7 @@ xlog_recover_add_to_trans(
in_f->ilf_size);
ASSERT(0);
kmem_free(ptr);
- return -EIO;
+ return -EFSCORRUPTED;
}
item->ri_total = in_f->ilf_size;
@@ -4293,7 +4315,16 @@ xlog_recover_add_to_trans(
kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
0);
}
- ASSERT(item->ri_total > item->ri_cnt);
+
+ if (item->ri_total <= item->ri_cnt) {
+ xfs_warn(log->l_mp,
+ "log item region count (%d) overflowed size (%d)",
+ item->ri_cnt, item->ri_total);
+ ASSERT(0);
+ kmem_free(ptr);
+ return -EFSCORRUPTED;
+ }
+
/* Description region is ri_buf[0] */
item->ri_buf[item->ri_cnt].i_addr = ptr;
item->ri_buf[item->ri_cnt].i_len = len;
@@ -4380,7 +4411,7 @@ xlog_recovery_process_trans(
default:
xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
break;
}
if (error || freeit)
@@ -4460,7 +4491,7 @@ xlog_recover_process_ophdr(
xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
__func__, ohead->oh_clientid);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -4470,7 +4501,7 @@ xlog_recover_process_ophdr(
if (dp + len > end) {
xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
WARN_ON(1);
- return -EIO;
+ return -EFSCORRUPTED;
}
trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
@@ -4566,9 +4597,9 @@ xlog_recover_process_data(
/* Recover the EFI if necessary. */
STATIC int
xlog_recover_process_efi(
- struct xfs_mount *mp,
struct xfs_ail *ailp,
- struct xfs_log_item *lip)
+ struct xfs_log_item *lip,
+ struct list_head *capture_list)
{
struct xfs_efi_log_item *efip;
int error;
@@ -4581,7 +4612,7 @@ xlog_recover_process_efi(
return 0;
spin_unlock(&ailp->ail_lock);
- error = xfs_efi_recover(mp, efip);
+ error = xfs_efi_recover(efip, capture_list);
spin_lock(&ailp->ail_lock);
return error;
@@ -4606,9 +4637,9 @@ xlog_recover_cancel_efi(
/* Recover the RUI if necessary. */
STATIC int
xlog_recover_process_rui(
- struct xfs_mount *mp,
struct xfs_ail *ailp,
- struct xfs_log_item *lip)
+ struct xfs_log_item *lip,
+ struct list_head *capture_list)
{
struct xfs_rui_log_item *ruip;
int error;
@@ -4621,7 +4652,7 @@ xlog_recover_process_rui(
return 0;
spin_unlock(&ailp->ail_lock);
- error = xfs_rui_recover(mp, ruip);
+ error = xfs_rui_recover(ruip, capture_list);
spin_lock(&ailp->ail_lock);
return error;
@@ -4646,9 +4677,9 @@ xlog_recover_cancel_rui(
/* Recover the CUI if necessary. */
STATIC int
xlog_recover_process_cui(
- struct xfs_trans *parent_tp,
struct xfs_ail *ailp,
- struct xfs_log_item *lip)
+ struct xfs_log_item *lip,
+ struct list_head *capture_list)
{
struct xfs_cui_log_item *cuip;
int error;
@@ -4661,7 +4692,7 @@ xlog_recover_process_cui(
return 0;
spin_unlock(&ailp->ail_lock);
- error = xfs_cui_recover(parent_tp, cuip);
+ error = xfs_cui_recover(cuip, capture_list);
spin_lock(&ailp->ail_lock);
return error;
@@ -4686,9 +4717,9 @@ xlog_recover_cancel_cui(
/* Recover the BUI if necessary. */
STATIC int
xlog_recover_process_bui(
- struct xfs_trans *parent_tp,
struct xfs_ail *ailp,
- struct xfs_log_item *lip)
+ struct xfs_log_item *lip,
+ struct list_head *capture_list)
{
struct xfs_bui_log_item *buip;
int error;
@@ -4701,7 +4732,7 @@ xlog_recover_process_bui(
return 0;
spin_unlock(&ailp->ail_lock);
- error = xfs_bui_recover(parent_tp, buip);
+ error = xfs_bui_recover(buip, capture_list);
spin_lock(&ailp->ail_lock);
return error;
@@ -4740,37 +4771,66 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
/* Take all the collected deferred ops and finish them in order. */
static int
xlog_finish_defer_ops(
- struct xfs_trans *parent_tp)
+ struct xfs_mount *mp,
+ struct list_head *capture_list)
{
- struct xfs_mount *mp = parent_tp->t_mountp;
+ struct xfs_defer_capture *dfc, *next;
struct xfs_trans *tp;
- int64_t freeblks;
- uint resblks;
- int error;
+ struct xfs_inode *ip;
+ int error = 0;
- /*
- * We're finishing the defer_ops that accumulated as a result of
- * recovering unfinished intent items during log recovery. We
- * reserve an itruncate transaction because it is the largest
- * permanent transaction type. Since we're the only user of the fs
- * right now, take 93% (15/16) of the available free blocks. Use
- * weird math to avoid a 64-bit division.
- */
- freeblks = percpu_counter_sum(&mp->m_fdblocks);
- if (freeblks <= 0)
- return -ENOSPC;
- resblks = min_t(int64_t, UINT_MAX, freeblks);
- resblks = (resblks * 15) >> 4;
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
- 0, XFS_TRANS_RESERVE, &tp);
- if (error)
- return error;
- /* transfer all collected dfops to this transaction */
- xfs_defer_move(tp, parent_tp);
+ list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
+ struct xfs_trans_res resv;
+
+ /*
+ * Create a new transaction reservation from the captured
+ * information. Set logcount to 1 to force the new transaction
+ * to regrant every roll so that we can make forward progress
+ * in recovery no matter how full the log might be.
+ */
+ resv.tr_logres = dfc->dfc_logres;
+ resv.tr_logcount = 1;
+ resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+
+ error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
+ dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
+ if (error)
+ return error;
- return xfs_trans_commit(tp);
+ /*
+ * Transfer to this new transaction all the dfops we captured
+ * from recovering a single intent item.
+ */
+ list_del_init(&dfc->dfc_list);
+ xfs_defer_ops_continue(dfc, tp, &ip);
+
+ error = xfs_trans_commit(tp);
+ if (ip) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_irele(ip);
+ }
+ if (error)
+ return error;
+ }
+
+ ASSERT(list_empty(capture_list));
+ return 0;
}
+/* Release all the captured defer ops and capture structures in this list. */
+static void
+xlog_abort_defer_ops(
+ struct xfs_mount *mp,
+ struct list_head *capture_list)
+{
+ struct xfs_defer_capture *dfc;
+ struct xfs_defer_capture *next;
+
+ list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
+ list_del_init(&dfc->dfc_list);
+ xfs_defer_ops_release(mp, dfc);
+ }
+}
/*
* When this is called, all of the log intent items which did not have
* corresponding log done items should be in the AIL. What we do now
@@ -4791,35 +4851,23 @@ STATIC int
xlog_recover_process_intents(
struct xlog *log)
{
- struct xfs_trans *parent_tp;
+ LIST_HEAD(capture_list);
struct xfs_ail_cursor cur;
struct xfs_log_item *lip;
struct xfs_ail *ailp;
- int error;
+ int error = 0;
#if defined(DEBUG) || defined(XFS_WARN)
xfs_lsn_t last_lsn;
#endif
- /*
- * The intent recovery handlers commit transactions to complete recovery
- * for individual intents, but any new deferred operations that are
- * queued during that process are held off until the very end. The
- * purpose of this transaction is to serve as a container for deferred
- * operations. Each intent recovery handler must transfer dfops here
- * before its local transaction commits, and we'll finish the entire
- * list below.
- */
- error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
- if (error)
- return error;
-
ailp = log->l_ailp;
spin_lock(&ailp->ail_lock);
- lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
#if defined(DEBUG) || defined(XFS_WARN)
last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
#endif
- while (lip != NULL) {
+ for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+ lip != NULL;
+ lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
/*
* We're done when we see something other than an intent.
* There should be no intents left in the AIL now.
@@ -4841,35 +4889,40 @@ xlog_recover_process_intents(
/*
* NOTE: If your intent processing routine can create more
- * deferred ops, you /must/ attach them to the dfops in this
- * routine or else those subsequent intents will get
+ * deferred ops, you /must/ attach them to the capture list in
+ * the recover routine or else those subsequent intents will be
* replayed in the wrong order!
*/
switch (lip->li_type) {
case XFS_LI_EFI:
- error = xlog_recover_process_efi(log->l_mp, ailp, lip);
+ error = xlog_recover_process_efi(ailp, lip, &capture_list);
break;
case XFS_LI_RUI:
- error = xlog_recover_process_rui(log->l_mp, ailp, lip);
+ error = xlog_recover_process_rui(ailp, lip, &capture_list);
break;
case XFS_LI_CUI:
- error = xlog_recover_process_cui(parent_tp, ailp, lip);
+ error = xlog_recover_process_cui(ailp, lip, &capture_list);
break;
case XFS_LI_BUI:
- error = xlog_recover_process_bui(parent_tp, ailp, lip);
+ error = xlog_recover_process_bui(ailp, lip, &capture_list);
break;
}
if (error)
- goto out;
- lip = xfs_trans_ail_cursor_next(ailp, &cur);
+ break;
}
-out:
+
xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->ail_lock);
- if (!error)
- error = xlog_finish_defer_ops(parent_tp);
- xfs_trans_cancel(parent_tp);
+ if (error)
+ goto err;
+
+ error = xlog_finish_defer_ops(log->l_mp, &capture_list);
+ if (error)
+ goto err;
+ return 0;
+err:
+ xlog_abort_defer_ops(log->l_mp, &capture_list);
return error;
}
@@ -5172,8 +5225,10 @@ xlog_recover_process(
* If the filesystem is CRC enabled, this mismatch becomes a
* fatal log corruption failure.
*/
- if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
+ if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
}
xlog_unpack_data(rhead, dp, log);
@@ -5200,7 +5255,7 @@ xlog_valid_rec_header(
(be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
__func__, be32_to_cpu(rhead->h_version));
- return -EIO;
+ return -EFSCORRUPTED;
}
/* LR body must have data or it wouldn't have been written */
@@ -5296,8 +5351,12 @@ xlog_do_recovery_pass(
"invalid iclog size (%d bytes), using lsunit (%d bytes)",
h_size, log->l_mp->m_logbsize);
h_size = log->l_mp->m_logbsize;
- } else
- return -EFSCORRUPTED;
+ } else {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+ log->l_mp);
+ error = -EFSCORRUPTED;
+ goto bread_err1;
+ }
}
if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 9804efe525a9..c57e8ad39712 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -105,7 +105,7 @@ assfail(char *expr, char *file, int line)
}
void
-xfs_hex_dump(void *p, int length)
+xfs_hex_dump(const void *p, int length)
{
print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
}
diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h
index 34447dca97d1..7f040b04b739 100644
--- a/fs/xfs/xfs_message.h
+++ b/fs/xfs/xfs_message.h
@@ -60,6 +60,6 @@ do { \
extern void assfail(char *expr, char *f, int l);
extern void asswarn(char *expr, char *f, int l);
-extern void xfs_hex_dump(void *p, int length);
+extern void xfs_hex_dump(const void *p, int length);
#endif /* __XFS_MESSAGE_H */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 5a0ce0c2c4bb..2277f21c4f14 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -31,7 +31,7 @@
#include "xfs_reflink.h"
#include "xfs_extent_busy.h"
#include "xfs_health.h"
-
+#include "xfs_trace.h"
static DEFINE_MUTEX(xfs_uuid_table_mutex);
static int xfs_uuid_table_size;
@@ -365,66 +365,119 @@ release_buf:
}
/*
- * Update alignment values based on mount options and sb values
+ * If the sunit/swidth change would move the precomputed root inode value, we
+ * must reject the ondisk change because repair will stumble over that.
+ * However, we allow the mount to proceed because we never rejected this
+ * combination before. Returns true to update the sb, false otherwise.
+ */
+static inline int
+xfs_check_new_dalign(
+ struct xfs_mount *mp,
+ int new_dalign,
+ bool *update_sb)
+{
+ struct xfs_sb *sbp = &mp->m_sb;
+ xfs_ino_t calc_ino;
+
+ calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign);
+ trace_xfs_check_new_dalign(mp, new_dalign, calc_ino);
+
+ if (sbp->sb_rootino == calc_ino) {
+ *update_sb = true;
+ return 0;
+ }
+
+ xfs_warn(mp,
+"Cannot change stripe alignment; would require moving root inode.");
+
+ /*
+ * XXX: Next time we add a new incompat feature, this should start
+ * returning -EINVAL to fail the mount. Until then, spit out a warning
+ * that we're ignoring the administrator's instructions.
+ */
+ xfs_warn(mp, "Skipping superblock stripe alignment update.");
+ *update_sb = false;
+ return 0;
+}
+
+/*
+ * If we were provided with new sunit/swidth values as mount options, make sure
+ * that they pass basic alignment and superblock feature checks, and convert
+ * them into the same units (FSB) that everything else expects. This step
+ * /must/ be done before computing the inode geometry.
*/
STATIC int
-xfs_update_alignment(xfs_mount_t *mp)
+xfs_validate_new_dalign(
+ struct xfs_mount *mp)
{
- xfs_sb_t *sbp = &(mp->m_sb);
+ if (mp->m_dalign == 0)
+ return 0;
- if (mp->m_dalign) {
+ /*
+ * If stripe unit and stripe width are not multiples
+ * of the fs blocksize turn off alignment.
+ */
+ if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
+ (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
+ xfs_warn(mp,
+ "alignment check failed: sunit/swidth vs. blocksize(%d)",
+ mp->m_sb.sb_blocksize);
+ return -EINVAL;
+ } else {
/*
- * If stripe unit and stripe width are not multiples
- * of the fs blocksize turn off alignment.
+ * Convert the stripe unit and width to FSBs.
*/
- if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
- (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
+ mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
+ if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) {
xfs_warn(mp,
- "alignment check failed: sunit/swidth vs. blocksize(%d)",
- sbp->sb_blocksize);
+ "alignment check failed: sunit/swidth vs. agsize(%d)",
+ mp->m_sb.sb_agblocks);
return -EINVAL;
- } else {
- /*
- * Convert the stripe unit and width to FSBs.
- */
- mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
- if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
- xfs_warn(mp,
- "alignment check failed: sunit/swidth vs. agsize(%d)",
- sbp->sb_agblocks);
- return -EINVAL;
- } else if (mp->m_dalign) {
- mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
- } else {
- xfs_warn(mp,
- "alignment check failed: sunit(%d) less than bsize(%d)",
- mp->m_dalign, sbp->sb_blocksize);
- return -EINVAL;
- }
- }
-
- /*
- * Update superblock with new values
- * and log changes
- */
- if (xfs_sb_version_hasdalign(sbp)) {
- if (sbp->sb_unit != mp->m_dalign) {
- sbp->sb_unit = mp->m_dalign;
- mp->m_update_sb = true;
- }
- if (sbp->sb_width != mp->m_swidth) {
- sbp->sb_width = mp->m_swidth;
- mp->m_update_sb = true;
- }
+ } else if (mp->m_dalign) {
+ mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
} else {
xfs_warn(mp,
- "cannot change alignment: superblock does not support data alignment");
+ "alignment check failed: sunit(%d) less than bsize(%d)",
+ mp->m_dalign, mp->m_sb.sb_blocksize);
return -EINVAL;
}
+ }
+
+ if (!xfs_sb_version_hasdalign(&mp->m_sb)) {
+ xfs_warn(mp,
+"cannot change alignment: superblock does not support data alignment");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Update alignment values based on mount options and sb values. */
+STATIC int
+xfs_update_alignment(
+ struct xfs_mount *mp)
+{
+ struct xfs_sb *sbp = &mp->m_sb;
+
+ if (mp->m_dalign) {
+ bool update_sb;
+ int error;
+
+ if (sbp->sb_unit == mp->m_dalign &&
+ sbp->sb_width == mp->m_swidth)
+ return 0;
+
+ error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb);
+ if (error || !update_sb)
+ return error;
+
+ sbp->sb_unit = mp->m_dalign;
+ sbp->sb_width = mp->m_swidth;
+ mp->m_update_sb = true;
} else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
xfs_sb_version_hasdalign(&mp->m_sb)) {
- mp->m_dalign = sbp->sb_unit;
- mp->m_swidth = sbp->sb_width;
+ mp->m_dalign = sbp->sb_unit;
+ mp->m_swidth = sbp->sb_width;
}
return 0;
@@ -622,6 +675,47 @@ xfs_check_summary_counts(
}
/*
+ * Flush and reclaim dirty inodes in preparation for unmount. Inodes and
+ * internal inode structures can be sitting in the CIL and AIL at this point,
+ * so we need to unpin them, write them back and/or reclaim them before unmount
+ * can proceed.
+ *
+ * An inode cluster that has been freed can have its buffer still pinned in
+ * memory because the transaction is still sitting in a iclog. The stale inodes
+ * on that buffer will be pinned to the buffer until the transaction hits the
+ * disk and the callbacks run. Pushing the AIL will skip the stale inodes and
+ * may never see the pinned buffer, so nothing will push out the iclog and
+ * unpin the buffer.
+ *
+ * Hence we need to force the log to unpin everything first. However, log
+ * forces don't wait for the discards they issue to complete, so we have to
+ * explicitly wait for them to complete here as well.
+ *
+ * Then we can tell the world we are unmounting so that error handling knows
+ * that the filesystem is going away and we should error out anything that we
+ * have been retrying in the background. This will prevent never-ending
+ * retries in AIL pushing from hanging the unmount.
+ *
+ * Finally, we can push the AIL to clean all the remaining dirty objects, then
+ * reclaim the remaining inodes that are still in memory at this point in time.
+ */
+static void
+xfs_unmount_flush_inodes(
+ struct xfs_mount *mp)
+{
+ xfs_log_force(mp, XFS_LOG_SYNC);
+ xfs_extent_busy_wait_all(mp);
+ flush_workqueue(xfs_discard_wq);
+
+ mp->m_flags |= XFS_MOUNT_UNMOUNTING;
+
+ xfs_ail_push_all_sync(mp->m_ail);
+ cancel_delayed_work_sync(&mp->m_reclaim_work);
+ xfs_reclaim_inodes(mp, SYNC_WAIT);
+ xfs_health_unmount(mp);
+}
+
+/*
* This function does the following on an initial mount of a file system:
* - reads the superblock from disk and init the mount struct
* - if we're a 32-bit kernel, do a size check on the superblock
@@ -692,12 +786,12 @@ xfs_mountfs(
}
/*
- * Check if sb_agblocks is aligned at stripe boundary
- * If sb_agblocks is NOT aligned turn off m_dalign since
- * allocator alignment is within an ag, therefore ag has
- * to be aligned at stripe boundary.
+ * If we were given new sunit/swidth options, do some basic validation
+ * checks and convert the incore dalign and swidth values to the
+ * same units (FSB) that everything else uses. This /must/ happen
+ * before computing the inode geometry.
*/
- error = xfs_update_alignment(mp);
+ error = xfs_validate_new_dalign(mp);
if (error)
goto out;
@@ -708,6 +802,17 @@ xfs_mountfs(
xfs_rmapbt_compute_maxlevels(mp);
xfs_refcountbt_compute_maxlevels(mp);
+ /*
+ * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks
+ * is NOT aligned turn off m_dalign since allocator alignment is within
+ * an ag, therefore ag has to be aligned at stripe boundary. Note that
+ * we must compute the free space and rmap btree geometry before doing
+ * this.
+ */
+ error = xfs_update_alignment(mp);
+ if (error)
+ goto out;
+
/* enable fail_at_unmount as default */
mp->m_fail_unmount = true;
@@ -983,7 +1088,7 @@ xfs_mountfs(
/* Clean out dquots that might be in memory after quotacheck. */
xfs_qm_unmount(mp);
/*
- * Cancel all delayed reclaim work and reclaim the inodes directly.
+ * Flush all inode reclamation work and flush the log.
* We have to do this /after/ rtunmount and qm_unmount because those
* two will have scheduled delayed reclaim for the rt/quota inodes.
*
@@ -993,11 +1098,8 @@ xfs_mountfs(
* qm_unmount_quotas and therefore rely on qm_unmount to release the
* quota inodes.
*/
- cancel_delayed_work_sync(&mp->m_reclaim_work);
- xfs_reclaim_inodes(mp, SYNC_WAIT);
- xfs_health_unmount(mp);
+ xfs_unmount_flush_inodes(mp);
out_log_dealloc:
- mp->m_flags |= XFS_MOUNT_UNMOUNTING;
xfs_log_mount_cancel(mp);
out_fail_wait:
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
@@ -1038,47 +1140,7 @@ xfs_unmountfs(
xfs_rtunmount_inodes(mp);
xfs_irele(mp->m_rootip);
- /*
- * We can potentially deadlock here if we have an inode cluster
- * that has been freed has its buffer still pinned in memory because
- * the transaction is still sitting in a iclog. The stale inodes
- * on that buffer will have their flush locks held until the
- * transaction hits the disk and the callbacks run. the inode
- * flush takes the flush lock unconditionally and with nothing to
- * push out the iclog we will never get that unlocked. hence we
- * need to force the log first.
- */
- xfs_log_force(mp, XFS_LOG_SYNC);
-
- /*
- * Wait for all busy extents to be freed, including completion of
- * any discard operation.
- */
- xfs_extent_busy_wait_all(mp);
- flush_workqueue(xfs_discard_wq);
-
- /*
- * We now need to tell the world we are unmounting. This will allow
- * us to detect that the filesystem is going away and we should error
- * out anything that we have been retrying in the background. This will
- * prevent neverending retries in AIL pushing from hanging the unmount.
- */
- mp->m_flags |= XFS_MOUNT_UNMOUNTING;
-
- /*
- * Flush all pending changes from the AIL.
- */
- xfs_ail_push_all_sync(mp->m_ail);
-
- /*
- * And reclaim all inodes. At this point there should be no dirty
- * inodes and none should be pinned or locked, but use synchronous
- * reclaim just to be sure. We can stop background inode reclaim
- * here as well if it is still running.
- */
- cancel_delayed_work_sync(&mp->m_reclaim_work);
- xfs_reclaim_inodes(mp, SYNC_WAIT);
- xfs_health_unmount(mp);
+ xfs_unmount_flush_inodes(mp);
xfs_qm_unmount(mp);
@@ -1154,8 +1216,7 @@ xfs_fs_writable(
int
xfs_log_sbcount(xfs_mount_t *mp)
{
- /* allow this to proceed during the freeze sequence... */
- if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
+ if (!xfs_log_writable(mp))
return 0;
/*
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index fdb60e09a9c5..ca7e0c656cee 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -179,6 +179,11 @@ typedef struct xfs_mount {
struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
struct xstats m_stats; /* per-fs stats */
+ /*
+ * Workqueue item so that we can coalesce multiple inode flush attempts
+ * into a single flush.
+ */
+ struct work_struct m_flush_inodes_work;
struct workqueue_struct *m_buf_workqueue;
struct workqueue_struct *m_unwritten_workqueue;
struct workqueue_struct *m_cil_workqueue;
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index f63fe8d924a3..058af699e046 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -147,11 +147,11 @@ xfs_fs_map_blocks(
if (error)
goto out_unlock;
+ ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK);
+
if (write) {
enum xfs_prealloc_flags flags = 0;
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
-
if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
/*
* xfs_iomap_write_direct() expects to take ownership of
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index ecd8ce152ab1..6b108a4de08f 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -22,6 +22,7 @@
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_error.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -120,12 +121,11 @@ xfs_qm_dqpurge(
{
struct xfs_mount *mp = dqp->q_mount;
struct xfs_quotainfo *qi = mp->m_quotainfo;
+ int error = -EAGAIN;
xfs_dqlock(dqp);
- if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
- xfs_dqunlock(dqp);
- return -EAGAIN;
- }
+ if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0)
+ goto out_unlock;
dqp->dq_flags |= XFS_DQ_FREEING;
@@ -138,7 +138,6 @@ xfs_qm_dqpurge(
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
struct xfs_buf *bp = NULL;
- int error;
/*
* We don't care about getting disk errors here. We need
@@ -148,6 +147,9 @@ xfs_qm_dqpurge(
if (!error) {
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
+ } else if (error == -EAGAIN) {
+ dqp->dq_flags &= ~XFS_DQ_FREEING;
+ goto out_unlock;
}
xfs_dqflock(dqp);
}
@@ -173,6 +175,10 @@ xfs_qm_dqpurge(
xfs_qm_dqdestroy(dqp);
return 0;
+
+out_unlock:
+ xfs_dqunlock(dqp);
+ return error;
}
/*
@@ -243,14 +249,14 @@ xfs_qm_unmount_quotas(
STATIC int
xfs_qm_dqattach_one(
- xfs_inode_t *ip,
- xfs_dqid_t id,
- uint type,
- bool doalloc,
- xfs_dquot_t **IO_idqpp)
+ struct xfs_inode *ip,
+ xfs_dqid_t id,
+ uint type,
+ bool doalloc,
+ struct xfs_dquot **IO_idqpp)
{
- xfs_dquot_t *dqp;
- int error;
+ struct xfs_dquot *dqp;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
error = 0;
@@ -325,23 +331,23 @@ xfs_qm_dqattach_locked(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
- error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
- doalloc, &ip->i_udquot);
+ error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
+ XFS_DQ_USER, doalloc, &ip->i_udquot);
if (error)
goto done;
ASSERT(ip->i_udquot);
}
if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
- error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
- doalloc, &ip->i_gdquot);
+ error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
+ XFS_DQ_GROUP, doalloc, &ip->i_gdquot);
if (error)
goto done;
ASSERT(ip->i_gdquot);
}
if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
- error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
+ error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
doalloc, &ip->i_pdquot);
if (error)
goto done;
@@ -543,8 +549,8 @@ xfs_qm_set_defquota(
uint type,
xfs_quotainfo_t *qinf)
{
- xfs_dquot_t *dqp;
- struct xfs_def_quota *defq;
+ struct xfs_dquot *dqp;
+ struct xfs_def_quota *defq;
struct xfs_disk_dquot *ddqp;
int error;
@@ -754,11 +760,19 @@ xfs_qm_qino_alloc(
if ((flags & XFS_QMOPT_PQUOTA) &&
(mp->m_sb.sb_gquotino != NULLFSINO)) {
ino = mp->m_sb.sb_gquotino;
- ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
+ if (mp->m_sb.sb_pquotino != NULLFSINO) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+ mp);
+ return -EFSCORRUPTED;
+ }
} else if ((flags & XFS_QMOPT_GQUOTA) &&
(mp->m_sb.sb_pquotino != NULLFSINO)) {
ino = mp->m_sb.sb_pquotino;
- ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
+ if (mp->m_sb.sb_gquotino != NULLFSINO) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+ mp);
+ return -EFSCORRUPTED;
+ }
}
if (ino != NULLFSINO) {
error = xfs_iget(mp, NULL, ino, 0, 0, ip);
@@ -866,12 +880,20 @@ xfs_qm_reset_dqcounts(
ddq->d_bcount = 0;
ddq->d_icount = 0;
ddq->d_rtbcount = 0;
- ddq->d_btimer = 0;
- ddq->d_itimer = 0;
- ddq->d_rtbtimer = 0;
- ddq->d_bwarns = 0;
- ddq->d_iwarns = 0;
- ddq->d_rtbwarns = 0;
+
+ /*
+ * dquot id 0 stores the default grace period and the maximum
+ * warning limit that were set by the administrator, so we
+ * should not reset them.
+ */
+ if (ddq->d_id != 0) {
+ ddq->d_btimer = 0;
+ ddq->d_itimer = 0;
+ ddq->d_rtbtimer = 0;
+ ddq->d_bwarns = 0;
+ ddq->d_iwarns = 0;
+ ddq->d_rtbwarns = 0;
+ }
if (xfs_sb_version_hascrc(&mp->m_sb)) {
xfs_update_cksum((char *)&dqb[j],
@@ -1608,8 +1630,8 @@ xfs_qm_dqfree_one(
int
xfs_qm_vop_dqalloc(
struct xfs_inode *ip,
- xfs_dqid_t uid,
- xfs_dqid_t gid,
+ kuid_t uid,
+ kgid_t gid,
prid_t prid,
uint flags,
struct xfs_dquot **O_udqpp,
@@ -1617,6 +1639,8 @@ xfs_qm_vop_dqalloc(
struct xfs_dquot **O_pdqpp)
{
struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ struct user_namespace *user_ns = inode->i_sb->s_user_ns;
struct xfs_dquot *uq = NULL;
struct xfs_dquot *gq = NULL;
struct xfs_dquot *pq = NULL;
@@ -1630,7 +1654,7 @@ xfs_qm_vop_dqalloc(
xfs_ilock(ip, lockflags);
if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
- gid = ip->i_d.di_gid;
+ gid = inode->i_gid;
/*
* Attach the dquot(s) to this inode, doing a dquot allocation
@@ -1645,7 +1669,7 @@ xfs_qm_vop_dqalloc(
}
if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
- if (ip->i_d.di_uid != uid) {
+ if (!uid_eq(inode->i_uid, uid)) {
/*
* What we need is the dquot that has this uid, and
* if we send the inode to dqget, the uid of the inode
@@ -1656,7 +1680,8 @@ xfs_qm_vop_dqalloc(
* holding ilock.
*/
xfs_iunlock(ip, lockflags);
- error = xfs_qm_dqget(mp, uid, XFS_DQ_USER, true, &uq);
+ error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
+ XFS_DQ_USER, true, &uq);
if (error) {
ASSERT(error != -ENOENT);
return error;
@@ -1677,9 +1702,10 @@ xfs_qm_vop_dqalloc(
}
}
if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
- if (ip->i_d.di_gid != gid) {
+ if (!gid_eq(inode->i_gid, gid)) {
xfs_iunlock(ip, lockflags);
- error = xfs_qm_dqget(mp, gid, XFS_DQ_GROUP, true, &gq);
+ error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
+ XFS_DQ_GROUP, true, &gq);
if (error) {
ASSERT(error != -ENOENT);
goto error_rele;
@@ -1693,7 +1719,7 @@ xfs_qm_vop_dqalloc(
}
}
if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
- if (xfs_get_projid(ip) != prid) {
+ if (ip->i_d.di_projid != prid) {
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
true, &pq);
@@ -1737,14 +1763,14 @@ error_rele:
* Actually transfer ownership, and do dquot modifications.
* These were already reserved.
*/
-xfs_dquot_t *
+struct xfs_dquot *
xfs_qm_vop_chown(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_dquot_t **IO_olddq,
- xfs_dquot_t *newdq)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct xfs_dquot **IO_olddq,
+ struct xfs_dquot *newdq)
{
- xfs_dquot_t *prevdq;
+ struct xfs_dquot *prevdq;
uint bfield = XFS_IS_REALTIME_INODE(ip) ?
XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
@@ -1805,7 +1831,7 @@ xfs_qm_vop_chown_reserve(
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
if (XFS_IS_UQUOTA_ON(mp) && udqp &&
- ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
+ i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) {
udq_delblks = udqp;
/*
* If there are delayed allocation blocks, then we have to
@@ -1818,7 +1844,7 @@ xfs_qm_vop_chown_reserve(
}
}
if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
- ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
+ i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) {
gdq_delblks = gdqp;
if (delblks) {
ASSERT(ip->i_gdquot);
@@ -1827,7 +1853,7 @@ xfs_qm_vop_chown_reserve(
}
if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
- xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
+ ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) {
prjflags = XFS_QMOPT_ENOSPC;
pdq_delblks = pdqp;
if (delblks) {
@@ -1915,20 +1941,21 @@ xfs_qm_vop_create_dqattach(
if (udqp && XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
- ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
+ ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id));
ip->i_udquot = xfs_qm_dqhold(udqp);
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
- ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
+ ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id));
+
ip->i_gdquot = xfs_qm_dqhold(gdqp);
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
ASSERT(ip->i_pdquot == NULL);
- ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
+ ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id));
ip->i_pdquot = xfs_qm_dqhold(pdqp);
xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 5d72e88598b4..fc2fa418919f 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -54,13 +54,13 @@ xfs_fill_statvfs_from_dquot(
*/
void
xfs_qm_statvfs(
- xfs_inode_t *ip,
+ struct xfs_inode *ip,
struct kstatfs *statp)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_dquot_t *dqp;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dquot *dqp;
- if (!xfs_qm_dqget(mp, xfs_get_projid(ip), XFS_DQ_PROJ, false, &dqp)) {
+ if (!xfs_qm_dqget(mp, ip->i_d.di_projid, XFS_DQ_PROJ, false, &dqp)) {
xfs_fill_statvfs_from_dquot(statp, dqp);
xfs_qm_dqput(dqp);
}
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index da7ad0383037..5d5ac65aa1cc 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -19,9 +19,71 @@
#include "xfs_qm.h"
#include "xfs_icache.h"
-STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
-STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
- uint);
+STATIC int
+xfs_qm_log_quotaoff(
+ struct xfs_mount *mp,
+ struct xfs_qoff_logitem **qoffstartp,
+ uint flags)
+{
+ struct xfs_trans *tp;
+ int error;
+ struct xfs_qoff_logitem *qoffi;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
+ if (error)
+ goto out;
+
+ qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
+ xfs_trans_log_quotaoff_item(tp, qoffi);
+
+ spin_lock(&mp->m_sb_lock);
+ mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
+ spin_unlock(&mp->m_sb_lock);
+
+ xfs_log_sb(tp);
+
+ /*
+ * We have to make sure that the transaction is secure on disk before we
+ * return and actually stop quota accounting. So, make it synchronous.
+ * We don't care about quotoff's performance.
+ */
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp);
+ if (error)
+ goto out;
+
+ *qoffstartp = qoffi;
+out:
+ return error;
+}
+
+STATIC int
+xfs_qm_log_quotaoff_end(
+ struct xfs_mount *mp,
+ struct xfs_qoff_logitem **startqoff,
+ uint flags)
+{
+ struct xfs_trans *tp;
+ int error;
+ struct xfs_qoff_logitem *qoffi;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ qoffi = xfs_trans_get_qoff_item(tp, *startqoff,
+ flags & XFS_ALL_QUOTA_ACCT);
+ xfs_trans_log_quotaoff_item(tp, qoffi);
+ *startqoff = NULL;
+
+ /*
+ * We have to make sure that the transaction is secure on disk before we
+ * return and actually stop quota accounting. So, make it synchronous.
+ * We don't care about quotoff's performance.
+ */
+ xfs_trans_set_sync(tp);
+ return xfs_trans_commit(tp);
+}
/*
* Turn off quota accounting and/or enforcement for all udquots and/or
@@ -40,7 +102,7 @@ xfs_qm_scall_quotaoff(
uint dqtype;
int error;
uint inactivate_flags;
- xfs_qoff_logitem_t *qoffstart;
+ struct xfs_qoff_logitem *qoffstart = NULL;
/*
* No file system can have quotas enabled on disk but not in core.
@@ -165,7 +227,7 @@ xfs_qm_scall_quotaoff(
* So, we have QUOTAOFF start and end logitems; the start
* logitem won't get overwritten until the end logitem appears...
*/
- error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
+ error = xfs_qm_log_quotaoff_end(mp, &qoffstart, flags);
if (error) {
/* We're screwed now. Shutdown is the only option. */
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
@@ -198,6 +260,8 @@ xfs_qm_scall_quotaoff(
}
out_unlock:
+ if (error && qoffstart)
+ xfs_qm_qoff_logitem_relse(qoffstart);
mutex_unlock(&q->qi_quotaofflock);
return error;
}
@@ -538,74 +602,6 @@ out_unlock:
return error;
}
-STATIC int
-xfs_qm_log_quotaoff_end(
- xfs_mount_t *mp,
- xfs_qoff_logitem_t *startqoff,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- xfs_qoff_logitem_t *qoffi;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
- if (error)
- return error;
-
- qoffi = xfs_trans_get_qoff_item(tp, startqoff,
- flags & XFS_ALL_QUOTA_ACCT);
- xfs_trans_log_quotaoff_item(tp, qoffi);
-
- /*
- * We have to make sure that the transaction is secure on disk before we
- * return and actually stop quota accounting. So, make it synchronous.
- * We don't care about quotoff's performance.
- */
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp);
-}
-
-
-STATIC int
-xfs_qm_log_quotaoff(
- xfs_mount_t *mp,
- xfs_qoff_logitem_t **qoffstartp,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- xfs_qoff_logitem_t *qoffi;
-
- *qoffstartp = NULL;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
- if (error)
- goto out;
-
- qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
- xfs_trans_log_quotaoff_item(tp, qoffi);
-
- spin_lock(&mp->m_sb_lock);
- mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
- spin_unlock(&mp->m_sb_lock);
-
- xfs_log_sb(tp);
-
- /*
- * We have to make sure that the transaction is secure on disk before we
- * return and actually stop quota accounting. So, make it synchronous.
- * We don't care about quotoff's performance.
- */
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp);
- if (error)
- goto out;
-
- *qoffstartp = qoffi;
-out:
- return error;
-}
-
/* Fill out the quota context. */
static void
xfs_qm_scall_getquota_fill_qc(
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index efe42ae7a2f3..aa8fc1f55fbd 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -86,7 +86,7 @@ extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
struct xfs_mount *, struct xfs_dquot *,
struct xfs_dquot *, struct xfs_dquot *, int64_t, long, uint);
-extern int xfs_qm_vop_dqalloc(struct xfs_inode *, xfs_dqid_t, xfs_dqid_t,
+extern int xfs_qm_vop_dqalloc(struct xfs_inode *, kuid_t, kgid_t,
prid_t, uint, struct xfs_dquot **, struct xfs_dquot **,
struct xfs_dquot **);
extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
@@ -109,7 +109,7 @@ extern void xfs_qm_unmount_quotas(struct xfs_mount *);
#else
static inline int
-xfs_qm_vop_dqalloc(struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid,
+xfs_qm_vop_dqalloc(struct xfs_inode *ip, kuid_t kuid, kgid_t kgid,
prid_t prid, uint flags, struct xfs_dquot **udqp,
struct xfs_dquot **gdqp, struct xfs_dquot **pdqp)
{
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 2328268e6245..fa1018a6e677 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -17,7 +17,7 @@
#include "xfs_refcount_item.h"
#include "xfs_log.h"
#include "xfs_refcount.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_cui_zone;
kmem_zone_t *xfs_cud_zone;
@@ -123,40 +123,6 @@ xfs_cui_item_release(
xfs_cui_release(CUI_ITEM(lip));
}
-static const struct xfs_item_ops xfs_cui_item_ops = {
- .iop_size = xfs_cui_item_size,
- .iop_format = xfs_cui_item_format,
- .iop_unpin = xfs_cui_item_unpin,
- .iop_release = xfs_cui_item_release,
-};
-
-/*
- * Allocate and initialize an cui item with the given number of extents.
- */
-struct xfs_cui_log_item *
-xfs_cui_init(
- struct xfs_mount *mp,
- uint nextents)
-
-{
- struct xfs_cui_log_item *cuip;
-
- ASSERT(nextents > 0);
- if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
- cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
- 0);
- else
- cuip = kmem_zone_zalloc(xfs_cui_zone, 0);
-
- xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
- cuip->cui_format.cui_nextents = nextents;
- cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
- atomic_set(&cuip->cui_next_extent, 0);
- atomic_set(&cuip->cui_refcount, 2);
-
- return cuip;
-}
-
static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_cud_log_item, cud_item);
@@ -284,27 +250,6 @@ xfs_refcount_update_diff_items(
XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
}
-/* Get an CUI. */
-STATIC void *
-xfs_refcount_update_create_intent(
- struct xfs_trans *tp,
- unsigned int count)
-{
- struct xfs_cui_log_item *cuip;
-
- ASSERT(tp != NULL);
- ASSERT(count > 0);
-
- cuip = xfs_cui_init(tp->t_mountp, count);
- ASSERT(cuip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &cuip->cui_item);
- return cuip;
-}
-
/* Set the phys extent flags for this reverse mapping. */
static void
xfs_trans_set_refcount_flags(
@@ -328,16 +273,12 @@ xfs_trans_set_refcount_flags(
STATIC void
xfs_refcount_update_log_item(
struct xfs_trans *tp,
- void *intent,
- struct list_head *item)
+ struct xfs_cui_log_item *cuip,
+ struct xfs_refcount_intent *refc)
{
- struct xfs_cui_log_item *cuip = intent;
- struct xfs_refcount_intent *refc;
uint next_extent;
struct xfs_phys_extent *ext;
- refc = container_of(item, struct xfs_refcount_intent, ri_list);
-
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
@@ -354,14 +295,35 @@ xfs_refcount_update_log_item(
xfs_trans_set_refcount_flags(ext, refc->ri_type);
}
+static struct xfs_log_item *
+xfs_refcount_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
+ struct xfs_refcount_intent *refc;
+
+ ASSERT(count > 0);
+
+ xfs_trans_add_item(tp, &cuip->cui_item);
+ if (sort)
+ list_sort(mp, items, xfs_refcount_update_diff_items);
+ list_for_each_entry(refc, items, ri_list)
+ xfs_refcount_update_log_item(tp, cuip, refc);
+ return &cuip->cui_item;
+}
+
/* Get an CUD so we can process all the deferred refcount updates. */
STATIC void *
xfs_refcount_update_create_done(
struct xfs_trans *tp,
- void *intent,
+ struct xfs_log_item *intent,
unsigned int count)
{
- return xfs_trans_get_cud(tp, intent);
+ return xfs_trans_get_cud(tp, CUI_ITEM(intent));
}
/* Process a deferred refcount update. */
@@ -411,9 +373,9 @@ xfs_refcount_update_finish_cleanup(
/* Abort all pending CUIs. */
STATIC void
xfs_refcount_update_abort_intent(
- void *intent)
+ struct xfs_log_item *intent)
{
- xfs_cui_release(intent);
+ xfs_cui_release(CUI_ITEM(intent));
}
/* Cancel a deferred refcount update. */
@@ -429,10 +391,8 @@ xfs_refcount_update_cancel_item(
const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
.max_items = XFS_CUI_MAX_FAST_EXTENTS,
- .diff_items = xfs_refcount_update_diff_items,
.create_intent = xfs_refcount_update_create_intent,
.abort_intent = xfs_refcount_update_abort_intent,
- .log_item = xfs_refcount_update_log_item,
.create_done = xfs_refcount_update_create_done,
.finish_item = xfs_refcount_update_finish_item,
.finish_cleanup = xfs_refcount_update_finish_cleanup,
@@ -445,8 +405,8 @@ const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
*/
int
xfs_cui_recover(
- struct xfs_trans *parent_tp,
- struct xfs_cui_log_item *cuip)
+ struct xfs_cui_log_item *cuip,
+ struct list_head *capture_list)
{
int i;
int error = 0;
@@ -462,7 +422,7 @@ xfs_cui_recover(
xfs_extlen_t new_len;
struct xfs_bmbt_irec irec;
bool requeue_only = false;
- struct xfs_mount *mp = parent_tp->t_mountp;
+ struct xfs_mount *mp = cuip->cui_item.li_mountp;
ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
@@ -497,7 +457,7 @@ xfs_cui_recover(
*/
set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
xfs_cui_release(cuip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -517,12 +477,7 @@ xfs_cui_recover(
mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
if (error)
return error;
- /*
- * Recovery stashes all deferred ops during intent processing and
- * finishes them on completion. Transfer current dfops state to this
- * transaction and transfer the result back before we return.
- */
- xfs_defer_move(tp, parent_tp);
+
cudp = xfs_trans_get_cud(tp, cuip);
for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
@@ -536,6 +491,7 @@ xfs_cui_recover(
type = refc_type;
break;
default:
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto abort_error;
}
@@ -579,13 +535,71 @@ xfs_cui_recover(
xfs_refcount_finish_one_cleanup(tp, rcur, error);
set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
- xfs_defer_move(parent_tp, tp);
- error = xfs_trans_commit(tp);
- return error;
+ return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
abort_error:
xfs_refcount_finish_one_cleanup(tp, rcur, error);
- xfs_defer_move(parent_tp, tp);
xfs_trans_cancel(tp);
return error;
}
+
+/* Relog an intent item to push the log tail forward. */
+static struct xfs_log_item *
+xfs_cui_item_relog(
+ struct xfs_log_item *intent,
+ struct xfs_trans *tp)
+{
+ struct xfs_cud_log_item *cudp;
+ struct xfs_cui_log_item *cuip;
+ struct xfs_phys_extent *extp;
+ unsigned int count;
+
+ count = CUI_ITEM(intent)->cui_format.cui_nextents;
+ extp = CUI_ITEM(intent)->cui_format.cui_extents;
+
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
+ set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
+
+ cuip = xfs_cui_init(tp->t_mountp, count);
+ memcpy(cuip->cui_format.cui_extents, extp, count * sizeof(*extp));
+ atomic_set(&cuip->cui_next_extent, count);
+ xfs_trans_add_item(tp, &cuip->cui_item);
+ set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
+ return &cuip->cui_item;
+}
+
+static const struct xfs_item_ops xfs_cui_item_ops = {
+ .iop_size = xfs_cui_item_size,
+ .iop_format = xfs_cui_item_format,
+ .iop_unpin = xfs_cui_item_unpin,
+ .iop_release = xfs_cui_item_release,
+ .iop_relog = xfs_cui_item_relog,
+};
+
+/*
+ * Allocate and initialize an cui item with the given number of extents.
+ */
+struct xfs_cui_log_item *
+xfs_cui_init(
+ struct xfs_mount *mp,
+ uint nextents)
+
+{
+ struct xfs_cui_log_item *cuip;
+
+ ASSERT(nextents > 0);
+ if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
+ cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
+ 0);
+ else
+ cuip = kmem_zone_zalloc(xfs_cui_zone, 0);
+
+ xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
+ cuip->cui_format.cui_nextents = nextents;
+ cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
+ atomic_set(&cuip->cui_next_extent, 0);
+ atomic_set(&cuip->cui_refcount, 2);
+
+ return cuip;
+}
diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
index e47530f30489..de5f48ff4f74 100644
--- a/fs/xfs/xfs_refcount_item.h
+++ b/fs/xfs/xfs_refcount_item.h
@@ -80,6 +80,7 @@ extern struct kmem_zone *xfs_cud_zone;
struct xfs_cui_log_item *xfs_cui_init(struct xfs_mount *, uint);
void xfs_cui_item_free(struct xfs_cui_log_item *);
void xfs_cui_release(struct xfs_cui_log_item *);
-int xfs_cui_recover(struct xfs_trans *parent_tp, struct xfs_cui_log_item *cuip);
+int xfs_cui_recover(struct xfs_cui_log_item *cuip,
+ struct list_head *capture_list);
#endif /* __XFS_REFCOUNT_ITEM_H__ */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 904d8285c226..01191ff46647 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -181,7 +181,7 @@ xfs_reflink_trim_around_shared(
int error = 0;
/* Holes, unwritten, and delalloc extents cannot be shared */
- if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_real_extent(irec)) {
+ if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_written_extent(irec)) {
*shared = false;
return 0;
}
@@ -657,7 +657,7 @@ xfs_reflink_end_cow_extent(
* preallocations can leak into the range we are called upon, and we
* need to skip them.
*/
- if (!xfs_bmap_is_real_extent(&got)) {
+ if (!xfs_bmap_is_written_extent(&got)) {
*end_fsb = del.br_startoff;
goto out_cancel;
}
@@ -986,41 +986,28 @@ xfs_reflink_ag_has_free_space(
}
/*
- * Unmap a range of blocks from a file, then map other blocks into the hole.
- * The range to unmap is (destoff : destoff + srcioff + irec->br_blockcount).
- * The extent irec is mapped into dest at irec->br_startoff.
+ * Remap the given extent into the file. The dmap blockcount will be set to
+ * the number of blocks that were actually remapped.
*/
STATIC int
xfs_reflink_remap_extent(
struct xfs_inode *ip,
- struct xfs_bmbt_irec *irec,
- xfs_fileoff_t destoff,
+ struct xfs_bmbt_irec *dmap,
xfs_off_t new_isize)
{
+ struct xfs_bmbt_irec smap;
struct xfs_mount *mp = ip->i_mount;
- bool real_extent = xfs_bmap_is_real_extent(irec);
struct xfs_trans *tp;
- unsigned int resblks;
- struct xfs_bmbt_irec uirec;
- xfs_filblks_t rlen;
- xfs_filblks_t unmap_len;
xfs_off_t newlen;
- int64_t qres;
+ int64_t qres, qdelta;
+ unsigned int resblks;
+ bool smap_real;
+ bool dmap_written = xfs_bmap_is_written_extent(dmap);
+ int nimaps;
int error;
- unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
- trace_xfs_reflink_punch_range(ip, destoff, unmap_len);
-
- /* No reflinking if we're low on space */
- if (real_extent) {
- error = xfs_reflink_ag_has_free_space(mp,
- XFS_FSB_TO_AGNO(mp, irec->br_startblock));
- if (error)
- goto out;
- }
-
/* Start a rolling transaction to switch the mappings */
- resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+ resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
if (error)
goto out;
@@ -1029,92 +1016,121 @@ xfs_reflink_remap_extent(
xfs_trans_ijoin(tp, ip, 0);
/*
- * Reserve quota for this operation. We don't know if the first unmap
- * in the dest file will cause a bmap btree split, so we always reserve
- * at least enough blocks for that split. If the extent being mapped
- * in is written, we need to reserve quota for that too.
+ * Read what's currently mapped in the destination file into smap.
+ * If smap isn't a hole, we will have to remove it before we can add
+ * dmap to the destination file.
*/
- qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
- if (real_extent)
- qres += irec->br_blockcount;
- error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0,
- XFS_QMOPT_RES_REGBLKS);
+ nimaps = 1;
+ error = xfs_bmapi_read(ip, dmap->br_startoff, dmap->br_blockcount,
+ &smap, &nimaps, 0);
if (error)
goto out_cancel;
+ ASSERT(nimaps == 1 && smap.br_startoff == dmap->br_startoff);
+ smap_real = xfs_bmap_is_real_extent(&smap);
- trace_xfs_reflink_remap(ip, irec->br_startoff,
- irec->br_blockcount, irec->br_startblock);
+ /*
+ * We can only remap as many blocks as the smaller of the two extent
+ * maps, because we can only remap one extent at a time.
+ */
+ dmap->br_blockcount = min(dmap->br_blockcount, smap.br_blockcount);
+ ASSERT(dmap->br_blockcount == smap.br_blockcount);
- /* Unmap the old blocks in the data fork. */
- rlen = unmap_len;
- while (rlen) {
- ASSERT(tp->t_firstblock == NULLFSBLOCK);
- error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1);
+ trace_xfs_reflink_remap_extent_dest(ip, &smap);
+
+ /* No reflinking if the AG of the dest mapping is low on space. */
+ if (dmap_written) {
+ error = xfs_reflink_ag_has_free_space(mp,
+ XFS_FSB_TO_AGNO(mp, dmap->br_startblock));
if (error)
goto out_cancel;
+ }
+ /*
+ * Compute quota reservation if we think the quota block counter for
+ * this file could increase.
+ *
+ * We start by reserving enough blocks to handle a bmbt split.
+ *
+ * If we are mapping a written extent into the file, we need to have
+ * enough quota block count reservation to handle the blocks in that
+ * extent.
+ *
+ * Note that if we're replacing a delalloc reservation with a written
+ * extent, we have to take the full quota reservation because removing
+ * the delalloc reservation gives the block count back to the quota
+ * count. This is suboptimal, but the VFS flushed the dest range
+ * before we started. That should have removed all the delalloc
+ * reservations, but we code defensively.
+ */
+ qdelta = 0;
+ qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
+ if (dmap_written)
+ qres += dmap->br_blockcount;
+ error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0,
+ XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ goto out_cancel;
+
+ if (smap_real) {
/*
- * Trim the extent to whatever got unmapped.
- * Remember, bunmapi works backwards.
+ * If the extent we're unmapping is backed by storage (written
+ * or not), unmap the extent and drop its refcount.
*/
- uirec.br_startblock = irec->br_startblock + rlen;
- uirec.br_startoff = irec->br_startoff + rlen;
- uirec.br_blockcount = unmap_len - rlen;
- uirec.br_state = irec->br_state;
- unmap_len = rlen;
-
- /* If this isn't a real mapping, we're done. */
- if (!real_extent || uirec.br_blockcount == 0)
- goto next_extent;
-
- trace_xfs_reflink_remap(ip, uirec.br_startoff,
- uirec.br_blockcount, uirec.br_startblock);
+ xfs_bmap_unmap_extent(tp, ip, &smap);
+ xfs_refcount_decrease_extent(tp, &smap);
+ qdelta -= smap.br_blockcount;
+ } else if (smap.br_startblock == DELAYSTARTBLOCK) {
+ xfs_filblks_t len = smap.br_blockcount;
- /* Update the refcount tree */
- xfs_refcount_increase_extent(tp, &uirec);
-
- /* Map the new blocks into the data fork. */
- xfs_bmap_map_extent(tp, ip, &uirec);
+ /*
+ * If the extent we're unmapping is a delalloc reservation,
+ * we can use the regular bunmapi function to release the
+ * incore state. Dropping the delalloc reservation takes care
+ * of the quota reservation for us.
+ */
+ error = __xfs_bunmapi(NULL, ip, smap.br_startoff, &len, 0, 1);
+ if (error)
+ goto out_cancel;
+ ASSERT(len == 0);
+ }
- /* Update quota accounting. */
- xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
- uirec.br_blockcount);
+ /*
+ * If the extent we're sharing is backed by written storage, increase
+ * its refcount and map it into the file.
+ */
+ if (dmap_written) {
+ xfs_refcount_increase_extent(tp, dmap);
+ xfs_bmap_map_extent(tp, ip, dmap);
+ qdelta += dmap->br_blockcount;
+ }
- /* Update dest isize if needed. */
- newlen = XFS_FSB_TO_B(mp,
- uirec.br_startoff + uirec.br_blockcount);
- newlen = min_t(xfs_off_t, newlen, new_isize);
- if (newlen > i_size_read(VFS_I(ip))) {
- trace_xfs_reflink_update_inode_size(ip, newlen);
- i_size_write(VFS_I(ip), newlen);
- ip->i_d.di_size = newlen;
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- }
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, qdelta);
-next_extent:
- /* Process all the deferred stuff. */
- error = xfs_defer_finish(&tp);
- if (error)
- goto out_cancel;
+ /* Update dest isize if needed. */
+ newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount);
+ newlen = min_t(xfs_off_t, newlen, new_isize);
+ if (newlen > i_size_read(VFS_I(ip))) {
+ trace_xfs_reflink_update_inode_size(ip, newlen);
+ i_size_write(VFS_I(ip), newlen);
+ ip->i_d.di_size = newlen;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
+ /* Commit everything and unlock. */
error = xfs_trans_commit(tp);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- if (error)
- goto out;
- return 0;
+ goto out_unlock;
out_cancel:
xfs_trans_cancel(tp);
+out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
- trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
+ if (error)
+ trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
return error;
}
-/*
- * Iteratively remap one file's extents (and holes) to another's.
- */
+/* Remap a range of one file to the other. */
int
xfs_reflink_remap_blocks(
struct xfs_inode *src,
@@ -1125,25 +1141,22 @@ xfs_reflink_remap_blocks(
loff_t *remapped)
{
struct xfs_bmbt_irec imap;
- xfs_fileoff_t srcoff;
- xfs_fileoff_t destoff;
+ struct xfs_mount *mp = src->i_mount;
+ xfs_fileoff_t srcoff = XFS_B_TO_FSBT(mp, pos_in);
+ xfs_fileoff_t destoff = XFS_B_TO_FSBT(mp, pos_out);
xfs_filblks_t len;
- xfs_filblks_t range_len;
xfs_filblks_t remapped_len = 0;
xfs_off_t new_isize = pos_out + remap_len;
int nimaps;
int error = 0;
- destoff = XFS_B_TO_FSBT(src->i_mount, pos_out);
- srcoff = XFS_B_TO_FSBT(src->i_mount, pos_in);
- len = XFS_B_TO_FSB(src->i_mount, remap_len);
+ len = min_t(xfs_filblks_t, XFS_B_TO_FSB(mp, remap_len),
+ XFS_MAX_FILEOFF);
- /* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
- while (len) {
- uint lock_mode;
+ trace_xfs_reflink_remap_blocks(src, srcoff, len, dest, destoff);
- trace_xfs_reflink_remap_blocks_loop(src, srcoff, len,
- dest, destoff);
+ while (len > 0) {
+ unsigned int lock_mode;
/* Read extent from the source file */
nimaps = 1;
@@ -1152,18 +1165,25 @@ xfs_reflink_remap_blocks(
xfs_iunlock(src, lock_mode);
if (error)
break;
- ASSERT(nimaps == 1);
-
- trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_DATA_FORK,
- &imap);
+ /*
+ * The caller supposedly flushed all dirty pages in the source
+ * file range, which means that writeback should have allocated
+ * or deleted all delalloc reservations in that range. If we
+ * find one, that's a good sign that something is seriously
+ * wrong here.
+ */
+ ASSERT(nimaps == 1 && imap.br_startoff == srcoff);
+ if (imap.br_startblock == DELAYSTARTBLOCK) {
+ ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+ error = -EFSCORRUPTED;
+ break;
+ }
- /* Translate imap into the destination file. */
- range_len = imap.br_startoff + imap.br_blockcount - srcoff;
- imap.br_startoff += destoff - srcoff;
+ trace_xfs_reflink_remap_extent_src(src, &imap);
- /* Clear dest from destoff to the end of imap and map it in. */
- error = xfs_reflink_remap_extent(dest, &imap, destoff,
- new_isize);
+ /* Remap into the destination file at the given offset. */
+ imap.br_startoff = destoff;
+ error = xfs_reflink_remap_extent(dest, &imap, new_isize);
if (error)
break;
@@ -1173,10 +1193,10 @@ xfs_reflink_remap_blocks(
}
/* Advance drange/srange */
- srcoff += range_len;
- destoff += range_len;
- len -= range_len;
- remapped_len += range_len;
+ srcoff += imap.br_blockcount;
+ destoff += imap.br_blockcount;
+ len -= imap.br_blockcount;
+ remapped_len += imap.br_blockcount;
}
if (error)
@@ -1427,7 +1447,7 @@ xfs_reflink_dirty_extents(
goto out;
if (nmaps == 0)
break;
- if (!xfs_bmap_is_real_extent(&map[0]))
+ if (!xfs_bmap_is_written_extent(&map[0]))
goto next;
map[1] = map[0];
@@ -1544,7 +1564,8 @@ xfs_reflink_clear_inode_flag(
* We didn't find any shared blocks so turn off the reflink flag.
* First, get rid of any leftover CoW mappings.
*/
- error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
+ error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, XFS_MAX_FILEOFF,
+ true);
if (error)
return error;
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 8939e0ea09cd..ba1dbb6c4063 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -17,7 +17,7 @@
#include "xfs_rmap_item.h"
#include "xfs_log.h"
#include "xfs_rmap.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_rui_zone;
kmem_zone_t *xfs_rud_zone;
@@ -122,39 +122,6 @@ xfs_rui_item_release(
xfs_rui_release(RUI_ITEM(lip));
}
-static const struct xfs_item_ops xfs_rui_item_ops = {
- .iop_size = xfs_rui_item_size,
- .iop_format = xfs_rui_item_format,
- .iop_unpin = xfs_rui_item_unpin,
- .iop_release = xfs_rui_item_release,
-};
-
-/*
- * Allocate and initialize an rui item with the given number of extents.
- */
-struct xfs_rui_log_item *
-xfs_rui_init(
- struct xfs_mount *mp,
- uint nextents)
-
-{
- struct xfs_rui_log_item *ruip;
-
- ASSERT(nextents > 0);
- if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
- ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
- else
- ruip = kmem_zone_zalloc(xfs_rui_zone, 0);
-
- xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
- ruip->rui_format.rui_nextents = nextents;
- ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
- atomic_set(&ruip->rui_next_extent, 0);
- atomic_set(&ruip->rui_refcount, 2);
-
- return ruip;
-}
-
/*
* Copy an RUI format buffer from the given buf, and into the destination
* RUI format structure. The RUI/RUD items were designed not to need any
@@ -171,8 +138,10 @@ xfs_rui_copy_format(
src_rui_fmt = buf->i_addr;
len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
- if (buf->i_len != len)
+ if (buf->i_len != len) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
+ }
memcpy(dst_rui_fmt, src_rui_fmt, len);
return 0;
@@ -350,41 +319,16 @@ xfs_rmap_update_diff_items(
XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
}
-/* Get an RUI. */
-STATIC void *
-xfs_rmap_update_create_intent(
- struct xfs_trans *tp,
- unsigned int count)
-{
- struct xfs_rui_log_item *ruip;
-
- ASSERT(tp != NULL);
- ASSERT(count > 0);
-
- ruip = xfs_rui_init(tp->t_mountp, count);
- ASSERT(ruip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &ruip->rui_item);
- return ruip;
-}
-
/* Log rmap updates in the intent item. */
STATIC void
xfs_rmap_update_log_item(
struct xfs_trans *tp,
- void *intent,
- struct list_head *item)
+ struct xfs_rui_log_item *ruip,
+ struct xfs_rmap_intent *rmap)
{
- struct xfs_rui_log_item *ruip = intent;
- struct xfs_rmap_intent *rmap;
uint next_extent;
struct xfs_map_extent *map;
- rmap = container_of(item, struct xfs_rmap_intent, ri_list);
-
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
@@ -404,14 +348,35 @@ xfs_rmap_update_log_item(
rmap->ri_bmap.br_state);
}
+static struct xfs_log_item *
+xfs_rmap_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
+ struct xfs_rmap_intent *rmap;
+
+ ASSERT(count > 0);
+
+ xfs_trans_add_item(tp, &ruip->rui_item);
+ if (sort)
+ list_sort(mp, items, xfs_rmap_update_diff_items);
+ list_for_each_entry(rmap, items, ri_list)
+ xfs_rmap_update_log_item(tp, ruip, rmap);
+ return &ruip->rui_item;
+}
+
/* Get an RUD so we can process all the deferred rmap updates. */
STATIC void *
xfs_rmap_update_create_done(
struct xfs_trans *tp,
- void *intent,
+ struct xfs_log_item *intent,
unsigned int count)
{
- return xfs_trans_get_rud(tp, intent);
+ return xfs_trans_get_rud(tp, RUI_ITEM(intent));
}
/* Process a deferred rmap update. */
@@ -453,9 +418,9 @@ xfs_rmap_update_finish_cleanup(
/* Abort all pending RUIs. */
STATIC void
xfs_rmap_update_abort_intent(
- void *intent)
+ struct xfs_log_item *intent)
{
- xfs_rui_release(intent);
+ xfs_rui_release(RUI_ITEM(intent));
}
/* Cancel a deferred rmap update. */
@@ -471,10 +436,8 @@ xfs_rmap_update_cancel_item(
const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
.max_items = XFS_RUI_MAX_FAST_EXTENTS,
- .diff_items = xfs_rmap_update_diff_items,
.create_intent = xfs_rmap_update_create_intent,
.abort_intent = xfs_rmap_update_abort_intent,
- .log_item = xfs_rmap_update_log_item,
.create_done = xfs_rmap_update_create_done,
.finish_item = xfs_rmap_update_finish_item,
.finish_cleanup = xfs_rmap_update_finish_cleanup,
@@ -487,9 +450,10 @@ const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
*/
int
xfs_rui_recover(
- struct xfs_mount *mp,
- struct xfs_rui_log_item *ruip)
+ struct xfs_rui_log_item *ruip,
+ struct list_head *capture_list)
{
+ struct xfs_mount *mp = ruip->rui_item.li_mountp;
int i;
int error = 0;
struct xfs_map_extent *rmap;
@@ -539,7 +503,7 @@ xfs_rui_recover(
*/
set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
xfs_rui_release(ruip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -581,6 +545,7 @@ xfs_rui_recover(
type = XFS_RMAP_FREE;
break;
default:
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
error = -EFSCORRUPTED;
goto abort_error;
}
@@ -595,11 +560,70 @@ xfs_rui_recover(
xfs_rmap_finish_one_cleanup(tp, rcur, error);
set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
- error = xfs_trans_commit(tp);
- return error;
+ return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
abort_error:
xfs_rmap_finish_one_cleanup(tp, rcur, error);
xfs_trans_cancel(tp);
return error;
}
+
+/* Relog an intent item to push the log tail forward. */
+static struct xfs_log_item *
+xfs_rui_item_relog(
+ struct xfs_log_item *intent,
+ struct xfs_trans *tp)
+{
+ struct xfs_rud_log_item *rudp;
+ struct xfs_rui_log_item *ruip;
+ struct xfs_map_extent *extp;
+ unsigned int count;
+
+ count = RUI_ITEM(intent)->rui_format.rui_nextents;
+ extp = RUI_ITEM(intent)->rui_format.rui_extents;
+
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
+ set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
+
+ ruip = xfs_rui_init(tp->t_mountp, count);
+ memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
+ atomic_set(&ruip->rui_next_extent, count);
+ xfs_trans_add_item(tp, &ruip->rui_item);
+ set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
+ return &ruip->rui_item;
+}
+
+static const struct xfs_item_ops xfs_rui_item_ops = {
+ .iop_size = xfs_rui_item_size,
+ .iop_format = xfs_rui_item_format,
+ .iop_unpin = xfs_rui_item_unpin,
+ .iop_release = xfs_rui_item_release,
+ .iop_relog = xfs_rui_item_relog,
+};
+
+/*
+ * Allocate and initialize an rui item with the given number of extents.
+ */
+struct xfs_rui_log_item *
+xfs_rui_init(
+ struct xfs_mount *mp,
+ uint nextents)
+
+{
+ struct xfs_rui_log_item *ruip;
+
+ ASSERT(nextents > 0);
+ if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
+ ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
+ else
+ ruip = kmem_zone_zalloc(xfs_rui_zone, 0);
+
+ xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
+ ruip->rui_format.rui_nextents = nextents;
+ ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
+ atomic_set(&ruip->rui_next_extent, 0);
+ atomic_set(&ruip->rui_refcount, 2);
+
+ return ruip;
+}
diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h
index 8708e4a5aa5c..5cf4acb0e915 100644
--- a/fs/xfs/xfs_rmap_item.h
+++ b/fs/xfs/xfs_rmap_item.h
@@ -82,6 +82,7 @@ int xfs_rui_copy_format(struct xfs_log_iovec *buf,
struct xfs_rui_log_format *dst_rui_fmt);
void xfs_rui_item_free(struct xfs_rui_log_item *);
void xfs_rui_release(struct xfs_rui_log_item *);
-int xfs_rui_recover(struct xfs_mount *mp, struct xfs_rui_log_item *ruip);
+int xfs_rui_recover(struct xfs_rui_log_item *ruip,
+ struct list_head *capture_list);
#endif /* __XFS_RMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 113883c4f202..20e0534a772c 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -23,6 +23,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
uint64_t xs_xstrat_bytes = 0;
uint64_t xs_write_bytes = 0;
uint64_t xs_read_bytes = 0;
+ uint64_t defer_relog = 0;
static const struct xstats_entry {
char *desc;
@@ -57,24 +58,27 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
/* Loop over all stats groups */
for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
- len += snprintf(buf + len, PATH_MAX - len, "%s",
+ len += scnprintf(buf + len, PATH_MAX - len, "%s",
xstats[i].desc);
/* inner loop does each group */
for (; j < xstats[i].endpoint; j++)
- len += snprintf(buf + len, PATH_MAX - len, " %u",
+ len += scnprintf(buf + len, PATH_MAX - len, " %u",
counter_val(stats, j));
- len += snprintf(buf + len, PATH_MAX - len, "\n");
+ len += scnprintf(buf + len, PATH_MAX - len, "\n");
}
/* extra precision counters */
for_each_possible_cpu(i) {
xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
+ defer_relog += per_cpu_ptr(stats, i)->s.defer_relog;
}
- len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
+ len += scnprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
- len += snprintf(buf + len, PATH_MAX-len, "debug %u\n",
+ len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n",
+ defer_relog);
+ len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
#if defined(DEBUG)
1);
#else
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index 34d704f703d2..43ffba74f045 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -137,6 +137,7 @@ struct __xfsstats {
uint64_t xs_xstrat_bytes;
uint64_t xs_write_bytes;
uint64_t xs_read_bytes;
+ uint64_t defer_relog;
};
#define xfsstats_offset(f) (offsetof(struct __xfsstats, f)/sizeof(uint32_t))
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 8d1df9f8be07..e802cbc9daad 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -490,10 +490,12 @@ xfs_showargs(
seq_printf(m, ",swidth=%d",
(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
- if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
- seq_puts(m, ",usrquota");
- else if (mp->m_qflags & XFS_UQUOTA_ACCT)
- seq_puts(m, ",uqnoenforce");
+ if (mp->m_qflags & XFS_UQUOTA_ACCT) {
+ if (mp->m_qflags & XFS_UQUOTA_ENFD)
+ seq_puts(m, ",usrquota");
+ else
+ seq_puts(m, ",uqnoenforce");
+ }
if (mp->m_qflags & XFS_PQUOTA_ACCT) {
if (mp->m_qflags & XFS_PQUOTA_ENFD)
@@ -512,32 +514,6 @@ xfs_showargs(
seq_puts(m, ",noquota");
}
-static uint64_t
-xfs_max_file_offset(
- unsigned int blockshift)
-{
- unsigned int pagefactor = 1;
- unsigned int bitshift = BITS_PER_LONG - 1;
-
- /* Figure out maximum filesize, on Linux this can depend on
- * the filesystem blocksize (on 32 bit platforms).
- * __block_write_begin does this in an [unsigned] long long...
- * page->index << (PAGE_SHIFT - bbits)
- * So, for page sized blocks (4K on 32 bit platforms),
- * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
- * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
- * but for smaller blocksizes it is less (bbits = log2 bsize).
- */
-
-#if BITS_PER_LONG == 32
- ASSERT(sizeof(sector_t) == 8);
- pagefactor = PAGE_SIZE;
- bitshift = BITS_PER_LONG;
-#endif
-
- return (((uint64_t)pagefactor) << bitshift) - 1;
-}
-
/*
* Set parameters for inode allocation heuristics, taking into account
* filesystem size and inode32/inode64 mount options; i.e. specifically
@@ -866,6 +842,20 @@ xfs_destroy_mount_workqueues(
destroy_workqueue(mp->m_buf_workqueue);
}
+static void
+xfs_flush_inodes_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(work, struct xfs_mount,
+ m_flush_inodes_work);
+ struct super_block *sb = mp->m_super;
+
+ if (down_read_trylock(&sb->s_umount)) {
+ sync_inodes_sb(sb);
+ up_read(&sb->s_umount);
+ }
+}
+
/*
* Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
* or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
@@ -876,12 +866,15 @@ void
xfs_flush_inodes(
struct xfs_mount *mp)
{
- struct super_block *sb = mp->m_super;
+ /*
+ * If flush_work() returns true then that means we waited for a flush
+ * which was already in progress. Don't bother running another scan.
+ */
+ if (flush_work(&mp->m_flush_inodes_work))
+ return;
- if (down_read_trylock(&sb->s_umount)) {
- sync_inodes_sb(sb);
- up_read(&sb->s_umount);
- }
+ queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
+ flush_work(&mp->m_flush_inodes_work);
}
/* Catch misguided souls that try to use this interface on XFS */
@@ -1237,6 +1230,10 @@ xfs_fs_remount(
char *p;
int error;
+ /* version 5 superblocks always support version counters. */
+ if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
+ *flags |= SB_I_VERSION;
+
/* First, check for complete junk; i.e. invalid options */
error = xfs_test_remount_options(sb, options);
if (error)
@@ -1558,6 +1555,7 @@ xfs_mount_alloc(
spin_lock_init(&mp->m_perag_lock);
mutex_init(&mp->m_growlock);
atomic_set(&mp->m_active_trans, 0);
+ INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
@@ -1650,6 +1648,26 @@ xfs_fs_fill_super(
if (error)
goto out_free_sb;
+ /*
+ * XFS block mappings use 54 bits to store the logical block offset.
+ * This should suffice to handle the maximum file size that the VFS
+ * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
+ * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
+ * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
+ * to check this assertion.
+ *
+ * Avoid integer overflow by comparing the maximum bmbt offset to the
+ * maximum pagecache offset in units of fs blocks.
+ */
+ if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
+ xfs_warn(mp,
+"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
+ XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
+ XFS_MAX_FILEOFF);
+ error = -EINVAL;
+ goto out_free_sb;
+ }
+
error = xfs_filestream_mount(mp);
if (error)
goto out_free_sb;
@@ -1661,7 +1679,7 @@ xfs_fs_fill_super(
sb->s_magic = XFS_SUPER_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
- sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_max_links = XFS_MAXLINK;
sb->s_time_gran = 1;
sb->s_time_min = S32_MIN;
@@ -1898,13 +1916,13 @@ xfs_init_zones(void)
if (!xfs_buf_item_zone)
goto out_destroy_trans_zone;
- xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
+ xfs_efd_zone = kmem_zone_init((sizeof(struct xfs_efd_log_item) +
((XFS_EFD_MAX_FAST_EXTENTS - 1) *
sizeof(xfs_extent_t))), "xfs_efd_item");
if (!xfs_efd_zone)
goto out_destroy_buf_item_zone;
- xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
+ xfs_efi_zone = kmem_zone_init((sizeof(struct xfs_efi_log_item) +
((XFS_EFI_MAX_FAST_EXTENTS - 1) *
sizeof(xfs_extent_t))), "xfs_efi_item");
if (!xfs_efi_zone)
@@ -1918,8 +1936,8 @@ xfs_init_zones(void)
goto out_destroy_efi_zone;
xfs_ili_zone =
- kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
- KM_ZONE_SPREAD, NULL);
+ kmem_zone_init_flags(sizeof(struct xfs_inode_log_item),
+ "xfs_ili", KM_ZONE_SPREAD, NULL);
if (!xfs_ili_zone)
goto out_destroy_inode_zone;
xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index ed66fd2de327..a2037e22ebda 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -191,9 +191,7 @@ xfs_symlink(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp,
- xfs_kuid_to_uid(current_fsuid()),
- xfs_kgid_to_gid(current_fsgid()), prid,
+ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
@@ -203,7 +201,7 @@ xfs_symlink(
* The symlink will fit into the inode data fork?
* There can't be any attributes so we get the whole variable part.
*/
- if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
+ if (pathlen <= XFS_LITINO(mp))
fs_blocks = 0;
else
fs_blocks = xfs_symlink_blocks(mp, pathlen);
@@ -316,6 +314,7 @@ xfs_symlink(
}
ASSERT(pathlen == 0);
}
+ i_size_write(VFS_I(ip), ip->i_d.di_size);
/*
* Create the directory entry for the symlink.
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index eaae275ed430..4b5818395406 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1011,6 +1011,7 @@ DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
+DEFINE_LOGGRANT_EVENT(xfs_log_cil_wait);
DECLARE_EVENT_CLASS(xfs_log_item_class,
TP_PROTO(struct xfs_log_item *lip),
@@ -2417,6 +2418,7 @@ DEFINE_DEFER_PENDING_EVENT(xfs_defer_create_intent);
DEFINE_DEFER_PENDING_EVENT(xfs_defer_cancel_list);
DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish);
DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_relog_intent);
#define DEFINE_BMAP_FREE_DEFERRED_EVENT DEFINE_PHYS_EXTENT_DEFERRED_EVENT
DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_bmap_free_defer);
@@ -3077,8 +3079,7 @@ DEFINE_EVENT(xfs_inode_irec_class, name, \
DEFINE_INODE_EVENT(xfs_reflink_set_inode_flag);
DEFINE_INODE_EVENT(xfs_reflink_unset_inode_flag);
DEFINE_ITRUNC_EVENT(xfs_reflink_update_inode_size);
-DEFINE_IMAP_EVENT(xfs_reflink_remap_imap);
-TRACE_EVENT(xfs_reflink_remap_blocks_loop,
+TRACE_EVENT(xfs_reflink_remap_blocks,
TP_PROTO(struct xfs_inode *src, xfs_fileoff_t soffset,
xfs_filblks_t len, struct xfs_inode *dest,
xfs_fileoff_t doffset),
@@ -3109,59 +3110,14 @@ TRACE_EVENT(xfs_reflink_remap_blocks_loop,
__entry->dest_ino,
__entry->dest_lblk)
);
-TRACE_EVENT(xfs_reflink_punch_range,
- TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk,
- xfs_extlen_t len),
- TP_ARGS(ip, lblk, len),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(xfs_fileoff_t, lblk)
- __field(xfs_extlen_t, len)
- ),
- TP_fast_assign(
- __entry->dev = VFS_I(ip)->i_sb->s_dev;
- __entry->ino = ip->i_ino;
- __entry->lblk = lblk;
- __entry->len = len;
- ),
- TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->lblk,
- __entry->len)
-);
-TRACE_EVENT(xfs_reflink_remap,
- TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk,
- xfs_extlen_t len, xfs_fsblock_t new_pblk),
- TP_ARGS(ip, lblk, len, new_pblk),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(xfs_fileoff_t, lblk)
- __field(xfs_extlen_t, len)
- __field(xfs_fsblock_t, new_pblk)
- ),
- TP_fast_assign(
- __entry->dev = VFS_I(ip)->i_sb->s_dev;
- __entry->ino = ip->i_ino;
- __entry->lblk = lblk;
- __entry->len = len;
- __entry->new_pblk = new_pblk;
- ),
- TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x new_pblk %llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->lblk,
- __entry->len,
- __entry->new_pblk)
-);
DEFINE_DOUBLE_IO_EVENT(xfs_reflink_remap_range);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_set_inode_flag_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_update_inode_size_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_blocks_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_extent_error);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_src);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_dest);
/* dedupe tracepoints */
DEFINE_DOUBLE_IO_EVENT(xfs_reflink_compare_extents);
@@ -3609,6 +3565,27 @@ DEFINE_KMEM_EVENT(kmem_alloc_large);
DEFINE_KMEM_EVENT(kmem_realloc);
DEFINE_KMEM_EVENT(kmem_zone_alloc);
+TRACE_EVENT(xfs_check_new_dalign,
+ TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino),
+ TP_ARGS(mp, new_dalign, calc_rootino),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, new_dalign)
+ __field(xfs_ino_t, sb_rootino)
+ __field(xfs_ino_t, calc_rootino)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->new_dalign = new_dalign;
+ __entry->sb_rootino = mp->m_sb.sb_rootino;
+ __entry->calc_rootino = calc_rootino;
+ ),
+ TP_printk("dev %d:%d new_dalign %d sb_rootino %llu calc_rootino %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->new_dalign, __entry->sb_rootino,
+ __entry->calc_rootino)
+)
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index b32a66452d44..47acf4096022 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -107,7 +107,8 @@ xfs_trans_dup(
ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
(tp->t_flags & XFS_TRANS_RESERVE) |
- (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
+ (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
+ (tp->t_flags & XFS_TRANS_RES_FDBLKS);
/* We gave our writer reference to the new transaction */
tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
@@ -273,6 +274,8 @@ xfs_trans_alloc(
*/
WARN_ON(resp->tr_logres > 0 &&
mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+ ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
+ xfs_sb_version_haslazysbcount(&mp->m_sb));
atomic_inc(&mp->m_active_trans);
tp->t_magic = XFS_TRANS_HEADER_MAGIC;
@@ -368,6 +371,20 @@ xfs_trans_mod_sb(
tp->t_blk_res_used += (uint)-delta;
if (tp->t_blk_res_used > tp->t_blk_res)
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
+ int64_t blkres_delta;
+
+ /*
+ * Return freed blocks directly to the reservation
+ * instead of the global pool, being careful not to
+ * overflow the trans counter. This is used to preserve
+ * reservation across chains of transaction rolls that
+ * repeatedly free and allocate blocks.
+ */
+ blkres_delta = min_t(int64_t, delta,
+ UINT_MAX - tp->t_blk_res);
+ tp->t_blk_res += blkres_delta;
+ delta -= blkres_delta;
}
tp->t_fdblocks_delta += delta;
if (xfs_sb_version_haslazysbcount(&mp->m_sb))
@@ -532,57 +549,9 @@ xfs_trans_apply_sb_deltas(
sizeof(sbp->sb_frextents) - 1);
}
-STATIC int
-xfs_sb_mod8(
- uint8_t *field,
- int8_t delta)
-{
- int8_t counter = *field;
-
- counter += delta;
- if (counter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- *field = counter;
- return 0;
-}
-
-STATIC int
-xfs_sb_mod32(
- uint32_t *field,
- int32_t delta)
-{
- int32_t counter = *field;
-
- counter += delta;
- if (counter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- *field = counter;
- return 0;
-}
-
-STATIC int
-xfs_sb_mod64(
- uint64_t *field,
- int64_t delta)
-{
- int64_t counter = *field;
-
- counter += delta;
- if (counter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- *field = counter;
- return 0;
-}
-
/*
- * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
- * and apply superblock counter changes to the in-core superblock. The
+ * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
+ * apply superblock counter changes to the in-core superblock. The
* t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
* applied to the in-core superblock. The idea is that that has already been
* done.
@@ -627,20 +596,17 @@ xfs_trans_unreserve_and_mod_sb(
/* apply the per-cpu counters */
if (blkdelta) {
error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
- if (error)
- goto out;
+ ASSERT(!error);
}
if (idelta) {
error = xfs_mod_icount(mp, idelta);
- if (error)
- goto out_undo_fdblocks;
+ ASSERT(!error);
}
if (ifreedelta) {
error = xfs_mod_ifree(mp, ifreedelta);
- if (error)
- goto out_undo_icount;
+ ASSERT(!error);
}
if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
@@ -648,95 +614,23 @@ xfs_trans_unreserve_and_mod_sb(
/* apply remaining deltas */
spin_lock(&mp->m_sb_lock);
- if (rtxdelta) {
- error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
- if (error)
- goto out_undo_ifree;
- }
-
- if (tp->t_dblocks_delta != 0) {
- error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
- if (error)
- goto out_undo_frextents;
- }
- if (tp->t_agcount_delta != 0) {
- error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
- if (error)
- goto out_undo_dblocks;
- }
- if (tp->t_imaxpct_delta != 0) {
- error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
- if (error)
- goto out_undo_agcount;
- }
- if (tp->t_rextsize_delta != 0) {
- error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
- tp->t_rextsize_delta);
- if (error)
- goto out_undo_imaxpct;
- }
- if (tp->t_rbmblocks_delta != 0) {
- error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
- tp->t_rbmblocks_delta);
- if (error)
- goto out_undo_rextsize;
- }
- if (tp->t_rblocks_delta != 0) {
- error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
- if (error)
- goto out_undo_rbmblocks;
- }
- if (tp->t_rextents_delta != 0) {
- error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
- tp->t_rextents_delta);
- if (error)
- goto out_undo_rblocks;
- }
- if (tp->t_rextslog_delta != 0) {
- error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
- tp->t_rextslog_delta);
- if (error)
- goto out_undo_rextents;
- }
+ mp->m_sb.sb_frextents += rtxdelta;
+ mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
+ mp->m_sb.sb_agcount += tp->t_agcount_delta;
+ mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
+ mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
+ mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
+ mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
+ mp->m_sb.sb_rextents += tp->t_rextents_delta;
+ mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
spin_unlock(&mp->m_sb_lock);
- return;
-out_undo_rextents:
- if (tp->t_rextents_delta)
- xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
-out_undo_rblocks:
- if (tp->t_rblocks_delta)
- xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
-out_undo_rbmblocks:
- if (tp->t_rbmblocks_delta)
- xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
-out_undo_rextsize:
- if (tp->t_rextsize_delta)
- xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
-out_undo_imaxpct:
- if (tp->t_rextsize_delta)
- xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
-out_undo_agcount:
- if (tp->t_agcount_delta)
- xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
-out_undo_dblocks:
- if (tp->t_dblocks_delta)
- xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
-out_undo_frextents:
- if (rtxdelta)
- xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
-out_undo_ifree:
- spin_unlock(&mp->m_sb_lock);
- if (ifreedelta)
- xfs_mod_ifree(mp, -ifreedelta);
-out_undo_icount:
- if (idelta)
- xfs_mod_icount(mp, -idelta);
-out_undo_fdblocks:
- if (blkdelta)
- xfs_mod_fdblocks(mp, -blkdelta, rsvd);
-out:
- ASSERT(error == 0);
+ /*
+ * Debug checks outside of the spinlock so they don't lock up the
+ * machine if they fail.
+ */
+ ASSERT(mp->m_sb.sb_imax_pct >= 0);
+ ASSERT(mp->m_sb.sb_rextslog >= 0);
return;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 64d7f171ebd3..941647027f00 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -77,6 +77,8 @@ struct xfs_item_ops {
void (*iop_release)(struct xfs_log_item *);
xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
void (*iop_error)(struct xfs_log_item *, xfs_buf_t *);
+ struct xfs_log_item *(*iop_relog)(struct xfs_log_item *intent,
+ struct xfs_trans *tp);
};
/*
@@ -244,4 +246,12 @@ void xfs_trans_buf_copy_type(struct xfs_buf *dst_bp,
extern kmem_zone_t *xfs_trans_zone;
+static inline struct xfs_log_item *
+xfs_trans_item_relog(
+ struct xfs_log_item *lip,
+ struct xfs_trans *tp)
+{
+ return lip->li_ops->iop_relog(lip, tp);
+}
+
#endif /* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 812108f6cc89..a41ba155d3a3 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -108,17 +108,25 @@ xfs_ail_next(
* We need the AIL lock in order to get a coherent read of the lsn of the last
* item in the AIL.
*/
+static xfs_lsn_t
+__xfs_ail_min_lsn(
+ struct xfs_ail *ailp)
+{
+ struct xfs_log_item *lip = xfs_ail_min(ailp);
+
+ if (lip)
+ return lip->li_lsn;
+ return 0;
+}
+
xfs_lsn_t
xfs_ail_min_lsn(
struct xfs_ail *ailp)
{
- xfs_lsn_t lsn = 0;
- struct xfs_log_item *lip;
+ xfs_lsn_t lsn;
spin_lock(&ailp->ail_lock);
- lip = xfs_ail_min(ailp);
- if (lip)
- lsn = lip->li_lsn;
+ lsn = __xfs_ail_min_lsn(ailp);
spin_unlock(&ailp->ail_lock);
return lsn;
@@ -394,16 +402,10 @@ xfsaild_push(
target = ailp->ail_target;
ailp->ail_target_prev = target;
+ /* we're done if the AIL is empty or our push has reached the end */
lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
- if (!lip) {
- /*
- * If the AIL is empty or our push has reached the end we are
- * done now.
- */
- xfs_trans_ail_cursor_done(&cur);
- spin_unlock(&ailp->ail_lock);
+ if (!lip)
goto out_done;
- }
XFS_STATS_INC(mp, xs_push_ail);
@@ -485,6 +487,8 @@ xfsaild_push(
break;
lsn = lip->li_lsn;
}
+
+out_done:
xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->ail_lock);
@@ -492,7 +496,6 @@ xfsaild_push(
ailp->ail_log_flush++;
if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
-out_done:
/*
* We reached the target or the AIL is empty, so wait a bit
* longer for I/O to complete and remove pushed items from the
@@ -584,7 +587,8 @@ xfsaild(
*/
smp_rmb();
if (!xfs_ail_min(ailp) &&
- ailp->ail_target == ailp->ail_target_prev) {
+ ailp->ail_target == ailp->ail_target_prev &&
+ list_empty(&ailp->ail_buf_list)) {
spin_unlock(&ailp->ail_lock);
freezable_schedule();
tout = 0;
@@ -680,6 +684,28 @@ xfs_ail_push_all_sync(
finish_wait(&ailp->ail_empty, &wait);
}
+void
+xfs_ail_update_finish(
+ struct xfs_ail *ailp,
+ xfs_lsn_t old_lsn) __releases(ailp->ail_lock)
+{
+ struct xfs_mount *mp = ailp->ail_mount;
+
+ /* if the tail lsn hasn't changed, don't do updates or wakeups. */
+ if (!old_lsn || old_lsn == __xfs_ail_min_lsn(ailp)) {
+ spin_unlock(&ailp->ail_lock);
+ return;
+ }
+
+ if (!XFS_FORCED_SHUTDOWN(mp))
+ xlog_assign_tail_lsn_locked(mp);
+
+ if (list_empty(&ailp->ail_head))
+ wake_up_all(&ailp->ail_empty);
+ spin_unlock(&ailp->ail_lock);
+ xfs_log_space_wake(mp);
+}
+
/*
* xfs_trans_ail_update - bulk AIL insertion operation.
*
@@ -711,7 +737,7 @@ xfs_trans_ail_update_bulk(
xfs_lsn_t lsn) __releases(ailp->ail_lock)
{
struct xfs_log_item *mlip;
- int mlip_changed = 0;
+ xfs_lsn_t tail_lsn = 0;
int i;
LIST_HEAD(tmp);
@@ -726,9 +752,10 @@ xfs_trans_ail_update_bulk(
continue;
trace_xfs_ail_move(lip, lip->li_lsn, lsn);
+ if (mlip == lip && !tail_lsn)
+ tail_lsn = lip->li_lsn;
+
xfs_ail_delete(ailp, lip);
- if (mlip == lip)
- mlip_changed = 1;
} else {
trace_xfs_ail_insert(lip, 0, lsn);
}
@@ -739,23 +766,23 @@ xfs_trans_ail_update_bulk(
if (!list_empty(&tmp))
xfs_ail_splice(ailp, cur, &tmp, lsn);
- if (mlip_changed) {
- if (!XFS_FORCED_SHUTDOWN(ailp->ail_mount))
- xlog_assign_tail_lsn_locked(ailp->ail_mount);
- spin_unlock(&ailp->ail_lock);
-
- xfs_log_space_wake(ailp->ail_mount);
- } else {
- spin_unlock(&ailp->ail_lock);
- }
+ xfs_ail_update_finish(ailp, tail_lsn);
}
-bool
+/*
+ * Delete one log item from the AIL.
+ *
+ * If this item was at the tail of the AIL, return the LSN of the log item so
+ * that we can use it to check if the LSN of the tail of the log has moved
+ * when finishing up the AIL delete process in xfs_ail_update_finish().
+ */
+xfs_lsn_t
xfs_ail_delete_one(
struct xfs_ail *ailp,
struct xfs_log_item *lip)
{
struct xfs_log_item *mlip = xfs_ail_min(ailp);
+ xfs_lsn_t lsn = lip->li_lsn;
trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
xfs_ail_delete(ailp, lip);
@@ -763,7 +790,9 @@ xfs_ail_delete_one(
clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
lip->li_lsn = 0;
- return mlip == lip;
+ if (mlip == lip)
+ return lsn;
+ return 0;
}
/**
@@ -791,10 +820,10 @@ void
xfs_trans_ail_delete(
struct xfs_ail *ailp,
struct xfs_log_item *lip,
- int shutdown_type) __releases(ailp->ail_lock)
+ int shutdown_type)
{
struct xfs_mount *mp = ailp->ail_mount;
- bool mlip_changed;
+ xfs_lsn_t tail_lsn;
if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
spin_unlock(&ailp->ail_lock);
@@ -807,17 +836,8 @@ xfs_trans_ail_delete(
return;
}
- mlip_changed = xfs_ail_delete_one(ailp, lip);
- if (mlip_changed) {
- if (!XFS_FORCED_SHUTDOWN(mp))
- xlog_assign_tail_lsn_locked(mp);
- if (list_empty(&ailp->ail_head))
- wake_up_all(&ailp->ail_empty);
- }
-
- spin_unlock(&ailp->ail_lock);
- if (mlip_changed)
- xfs_log_space_wake(ailp->ail_mount);
+ tail_lsn = xfs_ail_delete_one(ailp, lip);
+ xfs_ail_update_finish(ailp, tail_lsn);
}
int
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 904780dd74aa..4e43d415161d 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -15,6 +15,7 @@
#include "xfs_trans_priv.h"
#include "xfs_quota.h"
#include "xfs_qm.h"
+#include "xfs_error.h"
STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
@@ -25,8 +26,8 @@ STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
*/
void
xfs_trans_dqjoin(
- xfs_trans_t *tp,
- xfs_dquot_t *dqp)
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(dqp->q_logitem.qli_dquot == dqp);
@@ -49,8 +50,8 @@ xfs_trans_dqjoin(
*/
void
xfs_trans_log_dquot(
- xfs_trans_t *tp,
- xfs_dquot_t *dqp)
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
@@ -486,12 +487,12 @@ xfs_trans_apply_dquot_deltas(
*/
void
xfs_trans_unreserve_and_mod_dquots(
- xfs_trans_t *tp)
+ struct xfs_trans *tp)
{
int i, j;
- xfs_dquot_t *dqp;
+ struct xfs_dquot *dqp;
struct xfs_dqtrx *qtrx, *qa;
- bool locked;
+ bool locked;
if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
return;
@@ -571,21 +572,21 @@ xfs_quota_warn(
*/
STATIC int
xfs_trans_dqresv(
- xfs_trans_t *tp,
- xfs_mount_t *mp,
- xfs_dquot_t *dqp,
- int64_t nblks,
- long ninos,
- uint flags)
+ struct xfs_trans *tp,
+ struct xfs_mount *mp,
+ struct xfs_dquot *dqp,
+ int64_t nblks,
+ long ninos,
+ uint flags)
{
- xfs_qcnt_t hardlimit;
- xfs_qcnt_t softlimit;
- time_t timer;
- xfs_qwarncnt_t warns;
- xfs_qwarncnt_t warnlimit;
- xfs_qcnt_t total_count;
- xfs_qcnt_t *resbcountp;
- xfs_quotainfo_t *q = mp->m_quotainfo;
+ xfs_qcnt_t hardlimit;
+ xfs_qcnt_t softlimit;
+ time_t timer;
+ xfs_qwarncnt_t warns;
+ xfs_qwarncnt_t warnlimit;
+ xfs_qcnt_t total_count;
+ xfs_qcnt_t *resbcountp;
+ xfs_quotainfo_t *q = mp->m_quotainfo;
struct xfs_def_quota *defq;
@@ -700,9 +701,14 @@ xfs_trans_dqresv(
XFS_TRANS_DQ_RES_INOS,
ninos);
}
- ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
- ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
- ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
+
+ if (XFS_IS_CORRUPT(mp,
+ dqp->q_res_bcount < be64_to_cpu(dqp->q_core.d_bcount)) ||
+ XFS_IS_CORRUPT(mp,
+ dqp->q_res_rtbcount < be64_to_cpu(dqp->q_core.d_rtbcount)) ||
+ XFS_IS_CORRUPT(mp,
+ dqp->q_res_icount < be64_to_cpu(dqp->q_core.d_icount)))
+ goto error_corrupt;
xfs_dqunlock(dqp);
return 0;
@@ -712,6 +718,10 @@ error_return:
if (flags & XFS_QMOPT_ENOSPC)
return -ENOSPC;
return -EDQUOT;
+error_corrupt:
+ xfs_dqunlock(dqp);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return -EFSCORRUPTED;
}
@@ -756,7 +766,8 @@ xfs_trans_reserve_quota_bydquots(
}
if (gdqp) {
- error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
+ error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos,
+ (flags & ~XFS_QMOPT_ENOSPC));
if (error)
goto unwind_usr;
}
@@ -824,13 +835,13 @@ xfs_trans_reserve_quota_nblks(
/*
* This routine is called to allocate a quotaoff log item.
*/
-xfs_qoff_logitem_t *
+struct xfs_qoff_logitem *
xfs_trans_get_qoff_item(
- xfs_trans_t *tp,
- xfs_qoff_logitem_t *startqoff,
+ struct xfs_trans *tp,
+ struct xfs_qoff_logitem *startqoff,
uint flags)
{
- xfs_qoff_logitem_t *q;
+ struct xfs_qoff_logitem *q;
ASSERT(tp != NULL);
@@ -852,8 +863,8 @@ xfs_trans_get_qoff_item(
*/
void
xfs_trans_log_quotaoff_item(
- xfs_trans_t *tp,
- xfs_qoff_logitem_t *qlp)
+ struct xfs_trans *tp,
+ struct xfs_qoff_logitem *qlp)
{
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 2e073c1c4614..35655eac01a6 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -91,9 +91,11 @@ xfs_trans_ail_update(
xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
}
-bool xfs_ail_delete_one(struct xfs_ail *ailp, struct xfs_log_item *lip);
+xfs_lsn_t xfs_ail_delete_one(struct xfs_ail *ailp, struct xfs_log_item *lip);
+void xfs_ail_update_finish(struct xfs_ail *ailp, xfs_lsn_t old_lsn)
+ __releases(ailp->ail_lock);
void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip,
- int shutdown_type) __releases(ailp->ail_lock);
+ int shutdown_type);
static inline void
xfs_trans_ail_remove(
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index a6a9373ab863..d9417abf4cd0 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -16,7 +16,7 @@
#include <acpi/pcc.h>
#include <acpi/processor.h>
-/* Support CPPCv2 and CPPCv3 */
+/* CPPCv2 and CPPCv3 support */
#define CPPC_V2_REV 2
#define CPPC_V3_REV 3
#define CPPC_V2_NUM_ENT 21
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 0e7316a86240..21aa26e7c988 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -35,9 +35,6 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (READ_ONCE(*p) & mask)
- return 1;
-
old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
@@ -48,9 +45,6 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (!(READ_ONCE(*p) & mask))
- return 0;
-
old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h
deleted file mode 100644
index 69021830f078..000000000000
--- a/include/asm-generic/bugs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_BUGS_H
-#define __ASM_GENERIC_BUGS_H
-/*
- * This file is included by 'init/main.c' to check for
- * architecture-dependent bugs.
- */
-
-static inline void check_bugs(void) { }
-
-#endif /* __ASM_GENERIC_BUGS_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d02806513670..3dd3416f1df0 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -190,7 +190,7 @@ static inline u64 readq(const volatile void __iomem *addr)
u64 val;
__io_br();
- val = __le64_to_cpu(__raw_readq(addr));
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
__io_ar(val);
return val;
}
@@ -233,7 +233,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
static inline void writeq(u64 value, volatile void __iomem *addr)
{
__io_bw();
- __raw_writeq(__cpu_to_le64(value), addr);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
__io_aw();
}
#endif
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index fde943d180e0..6dc2269a5398 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -38,7 +38,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
- return !atomic_read(&lock.val);
+ return !lock.val.counter;
}
/**
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 66397ed10acb..69ab5942bd14 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -114,7 +114,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
/**
* memory_intersects - checks if the region occupied by an object intersects
* with another memory region
- * @begin: virtual address of the beginning of the memory regien
+ * @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
@@ -127,7 +127,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
{
void *vend = virt + size;
- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
+ if (virt < end && vend > begin)
+ return true;
+
+ return false;
}
/**
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 268674c1d568..b06240b67199 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -190,12 +190,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_needs_table_invalidate() (true)
#endif
+void tlb_remove_table_sync_one(void);
+
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE
#endif
+static inline void tlb_remove_table_sync_one(void) { }
+
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 77d428181557..a68535f36d13 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -273,6 +273,7 @@
#define DATA_DATA \
*(.xiptext) \
*(DATA_MAIN) \
+ *(.data..decrypted) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \
@@ -824,7 +825,12 @@
#define TRACEDATA
#endif
+/*
+ * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
+ * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
+ */
#define NOTES \
+ /DISCARD/ : { *(.note.GNU-stack) } \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
__start_notes = .; \
KEEP(*(.note.*)) \
@@ -883,7 +889,6 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \
- *(.data..decrypted) \
*(.data..percpu..decrypted) \
. = ALIGN(PAGE_SIZE);
#else
@@ -900,10 +905,17 @@
* section definitions so that such archs put those in earlier section
* definitions.
*/
+#ifdef RUNTIME_DISCARD_EXIT
+#define EXIT_DISCARDS
+#else
+#define EXIT_DISCARDS \
+ EXIT_TEXT \
+ EXIT_DATA
+#endif
+
#define DISCARDS \
/DISCARD/ : { \
- EXIT_TEXT \
- EXIT_DATA \
+ EXIT_DISCARDS \
EXIT_CALL \
*(.discard) \
*(.discard.*) \
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 20c93f08c993..95a1d214108a 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask)
return (mask >> 8) ? byte : byte + 1;
}
-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
{
unsigned long rhs = val | c->low_bits;
*data = rhs;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index c1a8d4a41bb1..77cc014addf7 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -46,6 +46,8 @@ struct af_alg_type {
void *(*bind)(const char *name, u32 type, u32 mask);
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ int (*setkeytype)(void *private, const u8 *keytype,
+ unsigned int keylen);
int (*accept)(void *private, struct sock *sk);
int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
@@ -137,6 +139,7 @@ struct af_alg_async_req {
* recvmsg is invoked.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
*/
struct af_alg_ctx {
struct list_head tsgl_list;
@@ -155,6 +158,8 @@ struct af_alg_ctx {
bool init;
unsigned int len;
+
+ unsigned int inflight;
};
int af_alg_register_type(const struct af_alg_type *type);
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 0bce6005d325..29fac0600ad2 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -37,6 +37,8 @@ struct skcipher_request {
struct crypto_skcipher {
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
+ int (*setkeytype)(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
@@ -411,6 +413,12 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
+static inline int crypto_skcipher_setkeytype(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return tfm->setkeytype(tfm, key, keylen);
+}
+
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 7616f6562fe4..64172de5029d 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -162,7 +162,7 @@ struct drm_bridge_funcs {
* or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
- * singals) feeding it is no longer running when this callback is
+ * signals) feeding it is no longer running when this callback is
* called.
*
* The post_disable callback is optional.
@@ -254,9 +254,10 @@ struct drm_bridge_funcs {
* there is one) when this callback is called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_pre_enable. It
- * would be prudent to also provide an implementation of @pre_enable if
- * you are expecting driver calls into &drm_bridge_pre_enable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_pre_enable. It would be prudent to also provide an
+ * implementation of @pre_enable if you are expecting driver calls into
+ * &drm_bridge_chain_pre_enable.
*
* The @atomic_pre_enable callback is optional.
*/
@@ -279,9 +280,9 @@ struct drm_bridge_funcs {
* chain if there is one.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_enable. It
- * would be prudent to also provide an implementation of @enable if
- * you are expecting driver calls into &drm_bridge_enable.
+ * atomic commit. It will not be invoked from &drm_bridge_chain_enable.
+ * It would be prudent to also provide an implementation of @enable if
+ * you are expecting driver calls into &drm_bridge_chain_enable.
*
* The enable callback is optional.
*/
@@ -301,9 +302,10 @@ struct drm_bridge_funcs {
* signals) feeding it is still running when this callback is called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_disable. It
- * would be prudent to also provide an implementation of @disable if
- * you are expecting driver calls into &drm_bridge_disable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_disable. It would be prudent to also provide an
+ * implementation of @disable if you are expecting driver calls into
+ * &drm_bridge_chain_disable.
*
* The disable callback is optional.
*/
@@ -325,10 +327,11 @@ struct drm_bridge_funcs {
* called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_post_disable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_post_disable.
* It would be prudent to also provide an implementation of
* @post_disable if you are expecting driver calls into
- * &drm_bridge_post_disable.
+ * &drm_bridge_chain_post_disable.
*
* The post_disable callback is optional.
*/
@@ -406,27 +409,41 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous);
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_mode *mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
+/**
+ * drm_bridge_get_next_bridge() - Get the next bridge in the chain
+ * @bridge: bridge object
+ *
+ * RETURNS:
+ * the next bridge in the chain after @bridge, or NULL if @bridge is the last.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_next_bridge(struct drm_bridge *bridge)
+{
+ return bridge->next;
+}
-void drm_atomic_bridge_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
-void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
+bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+enum drm_mode_status
+drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode);
+void drm_bridge_chain_disable(struct drm_bridge *bridge);
+void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
+void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_chain_enable(struct drm_bridge *bridge);
+
+void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
-void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
-void drm_atomic_bridge_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
#ifdef CONFIG_DRM_PANEL_BRIDGE
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 408b6f4e63c0..ebcce95f9da6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -175,12 +175,25 @@ struct drm_crtc_state {
* @no_vblank:
*
* Reflects the ability of a CRTC to send VBLANK events. This state
- * usually depends on the pipeline configuration, and the main usuage
- * is CRTCs feeding a writeback connector operating in oneshot mode.
- * In this case the VBLANK event is only generated when a job is queued
- * to the writeback connector, and we want the core to fake VBLANK
- * events when this part of the pipeline hasn't changed but others had
- * or when the CRTC and connectors are being disabled.
+ * usually depends on the pipeline configuration. If set to true, DRM
+ * atomic helpers will send out a fake VBLANK event during display
+ * updates after all hardware changes have been committed. This is
+ * implemented in drm_atomic_helper_fake_vblank().
+ *
+ * One usage is for drivers and/or hardware without support for VBLANK
+ * interrupts. Such drivers typically do not initialize vblanking
+ * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs
+ * without initialized vblanking, this field is set to true in
+ * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be
+ * send out on each update of the display pipeline by
+ * drm_atomic_helper_fake_vblank().
+ *
+ * Another usage is CRTCs feeding a writeback connector operating in
+ * oneshot mode. In this case the fake VBLANK event is only generated
+ * when a job is queued to the writeback connector, and we want the
+ * core to fake VBLANK events when this part of the pipeline hasn't
+ * changed but others had or when the CRTC and connectors are being
+ * disabled.
*
* __drm_atomic_helper_crtc_duplicate_state() will not reset the value
* from the current state, the CRTC driver is then responsible for
@@ -336,7 +349,14 @@ struct drm_crtc_state {
* - Events for disabled CRTCs are not allowed, and drivers can ignore
* that case.
*
- * This can be handled by the drm_crtc_send_vblank_event() function,
+ * For very simple hardware without VBLANK interrupt, enabling
+ * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers
+ * send a fake VBLANK event at the end of the display update after all
+ * hardware changes have been applied. See
+ * drm_atomic_helper_fake_vblank().
+ *
+ * For more complex hardware this
+ * can be handled by the drm_crtc_send_vblank_event() function,
* which the driver should call on the provided event upon completion of
* the atomic commit. Note that if the driver supports vblank signalling
* and timestamping the vblank counters and timestamps must agree with
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 8364502f92cf..8eaf640d4680 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1052,7 +1052,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_DSC_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index a09864f6d684..4059e4486f84 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -27,6 +27,7 @@
#ifndef __DRM_ENCODER_SLAVE_H__
#define __DRM_ENCODER_SLAVE_H__
+#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
@@ -159,6 +160,29 @@ static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *dri
void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
+/**
+ * struct drm_platform_encoder_driver
+ * @platform_driver: platform device driver
+ * @encoder_init: callback to initialize the slave encoder
+ *
+ * Describes a device driver for an encoder connected to
+ * through a platform bus. In addition to the entry points in @platform_driver
+ * an @encoder_init function should be provided. It will be called to
+ * give the driver an opportunity to allocate any per-encoder data
+ * structures and to initialize the @slave_funcs and (optionally)
+ * @slave_priv members of @encoder.
+ */
+struct drm_platform_encoder_driver {
+ struct platform_driver platform_driver;
+
+ int (*encoder_init)(struct platform_device *pdev,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder);
+
+};
+#define to_drm_platform_encoder_driver(x) container_of((x), \
+ struct drm_platform_encoder_driver, \
+ platform_driver)
/*
* Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 553210c02ee0..627efa56e59f 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -88,7 +88,7 @@ static inline int drm_fixp2int(s64 a)
static inline int drm_fixp2int_ceil(s64 a)
{
- if (a > 0)
+ if (a >= 0)
return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
else
return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 306d1efeb5e0..2cd68710e953 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -122,7 +122,23 @@ struct drm_format_info {
* drm_format_info_block_height()
*/
u8 block_h[3];
-
+ /**
+ * @pixels_per_macropixel:
+ * Number of pixels per macro-pixel (per plane). A macro-pixel is
+ * composed of multiple pixels, and there can be extra bits between
+ * pixels. This must be used along with @bytes_per_macropixel, only
+ * when single pixel size is not byte-aligned. In this case, @cpp
+ * is not valid and should be 0.
+ */
+ u8 pixels_per_macropixel[3];
+ /**
+ * @bytes_per_macropixel:
+ * Number of bytes per macro-pixel (per plane). A macro-pixel is
+ * composed of multiple pixels. The size of single macro-pixel should
+ * be byte-aligned. This should be used with @pixels_per_macropixel,
+ * and @cpp should be 0.
+ */
+ u8 bytes_per_macropixel[3];
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
/** @vsub: Vertical chroma subsampling factor */
@@ -318,6 +334,8 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
+int drm_format_plane_width_bytes(const struct drm_format_info *info,
+ int plane, int width);
const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 13cf2ae59f6c..46a84d6b531d 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -160,6 +160,7 @@ struct mipi_dsi_device_info {
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
+ * @attached: the DSI device has been successfully attached
* @name: DSI peripheral chip type
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
@@ -175,6 +176,7 @@ struct mipi_dsi_device_info {
struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
+ bool attached;
char name[DSI_DEV_NAME_SIZE];
unsigned int channel;
@@ -279,6 +281,10 @@ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
u16 brightness);
int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
u16 *brightness);
+int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 brightness);
+int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 *brightness);
/**
* struct mipi_dsi_driver - DSI driver
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 624bd15ecfab..ce8da64022b4 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -140,6 +140,15 @@ struct drm_panel {
const struct drm_panel_funcs *funcs;
/**
+ * @connector_type:
+ *
+ * Type of the panel as a DRM_MODE_CONNECTOR_* value. This is used to
+ * initialise the drm_connector corresponding to the panel with the
+ * correct connector type.
+ */
+ int connector_type;
+
+ /**
* @list:
*
* Panel entry in registry.
@@ -147,7 +156,9 @@ struct drm_panel {
struct list_head list;
};
-void drm_panel_init(struct drm_panel *panel);
+void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ const struct drm_panel_funcs *funcs,
+ int connector_type);
int drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 4d89cd0a60db..df615eb92b09 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -100,8 +100,11 @@ struct drm_simple_display_pipe_funcs {
* This is the function drivers should submit the
* &drm_pending_vblank_event from. Using either
* drm_crtc_arm_vblank_event(), when the driver supports vblank
- * interrupt handling, or drm_crtc_send_vblank_event() directly in case
- * the hardware lacks vblank support entirely.
+ * interrupt handling, or drm_crtc_send_vblank_event() for more
+ * complex case. In case the hardware lacks vblank support entirely,
+ * drivers can set &struct drm_crtc_state.no_vblank in
+ * &struct drm_simple_display_pipe_funcs.check and let DRM's
+ * atomic helper fake a vblank event.
*/
void (*update)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state);
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 9fe4ba8bc622..2559fb921869 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -195,6 +195,7 @@ struct drm_vblank_crtc {
};
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime);
diff --git a/include/dt-bindings/clock/xlnx-versal-clk.h b/include/dt-bindings/clock/xlnx-versal-clk.h
new file mode 100644
index 000000000000..264d634d226e
--- /dev/null
+++ b/include/dt-bindings/clock/xlnx-versal-clk.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Xilinx Inc.
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_VERSAL_H
+#define _DT_BINDINGS_CLK_VERSAL_H
+
+#define PMC_PLL 1
+#define APU_PLL 2
+#define RPU_PLL 3
+#define CPM_PLL 4
+#define NOC_PLL 5
+#define PLL_MAX 6
+#define PMC_PRESRC 7
+#define PMC_POSTCLK 8
+#define PMC_PLL_OUT 9
+#define PPLL 10
+#define NOC_PRESRC 11
+#define NOC_POSTCLK 12
+#define NOC_PLL_OUT 13
+#define NPLL 14
+#define APU_PRESRC 15
+#define APU_POSTCLK 16
+#define APU_PLL_OUT 17
+#define APLL 18
+#define RPU_PRESRC 19
+#define RPU_POSTCLK 20
+#define RPU_PLL_OUT 21
+#define RPLL 22
+#define CPM_PRESRC 23
+#define CPM_POSTCLK 24
+#define CPM_PLL_OUT 25
+#define CPLL 26
+#define PPLL_TO_XPD 27
+#define NPLL_TO_XPD 28
+#define APLL_TO_XPD 29
+#define RPLL_TO_XPD 30
+#define EFUSE_REF 31
+#define SYSMON_REF 32
+#define IRO_SUSPEND_REF 33
+#define USB_SUSPEND 34
+#define SWITCH_TIMEOUT 35
+#define RCLK_PMC 36
+#define RCLK_LPD 37
+#define WDT 38
+#define TTC0 39
+#define TTC1 40
+#define TTC2 41
+#define TTC3 42
+#define GEM_TSU 43
+#define GEM_TSU_LB 44
+#define MUXED_IRO_DIV2 45
+#define MUXED_IRO_DIV4 46
+#define PSM_REF 47
+#define GEM0_RX 48
+#define GEM0_TX 49
+#define GEM1_RX 50
+#define GEM1_TX 51
+#define CPM_CORE_REF 52
+#define CPM_LSBUS_REF 53
+#define CPM_DBG_REF 54
+#define CPM_AUX0_REF 55
+#define CPM_AUX1_REF 56
+#define QSPI_REF 57
+#define OSPI_REF 58
+#define SDIO0_REF 59
+#define SDIO1_REF 60
+#define PMC_LSBUS_REF 61
+#define I2C_REF 62
+#define TEST_PATTERN_REF 63
+#define DFT_OSC_REF 64
+#define PMC_PL0_REF 65
+#define PMC_PL1_REF 66
+#define PMC_PL2_REF 67
+#define PMC_PL3_REF 68
+#define CFU_REF 69
+#define SPARE_REF 70
+#define NPI_REF 71
+#define HSM0_REF 72
+#define HSM1_REF 73
+#define SD_DLL_REF 74
+#define FPD_TOP_SWITCH 75
+#define FPD_LSBUS 76
+#define ACPU 77
+#define DBG_TRACE 78
+#define DBG_FPD 79
+#define LPD_TOP_SWITCH 80
+#define ADMA 81
+#define LPD_LSBUS 82
+#define CPU_R5 83
+#define CPU_R5_CORE 84
+#define CPU_R5_OCM 85
+#define CPU_R5_OCM2 86
+#define IOU_SWITCH 87
+#define GEM0_REF 88
+#define GEM1_REF 89
+#define GEM_TSU_REF 90
+#define USB0_BUS_REF 91
+#define UART0_REF 92
+#define UART1_REF 93
+#define SPI0_REF 94
+#define SPI1_REF 95
+#define CAN0_REF 96
+#define CAN1_REF 97
+#define I2C0_REF 98
+#define I2C1_REF 99
+#define DBG_LPD 100
+#define TIMESTAMP_REF 101
+#define DBG_TSTMP 102
+#define CPM_TOPSW_REF 103
+#define USB3_DUAL_REF 104
+#define OUTCLK_MAX 105
+#define REF_CLK 106
+#define PL_ALT_REF_CLK 107
+#define MUXED_IRO 108
+#define PL_EXT 109
+#define PL_LB 110
+#define MIO_50_OR_51 111
+#define MIO_24_OR_25 112
+
+#endif
diff --git a/include/dt-bindings/drm/mipi-dsi.h b/include/dt-bindings/drm/mipi-dsi.h
new file mode 100644
index 000000000000..c6f37ec661fe
--- /dev/null
+++ b/include/dt-bindings/drm/mipi-dsi.h
@@ -0,0 +1,11 @@
+#ifndef __DT_BINDINGS_DRM__
+#define __DT_BINDINGS_DRM__
+/*
+ * MIPI DSI pixel formats as defined in the include/drm/drm_mipi_dsi.h"
+ */
+#define MIPI_DSI_FMT_RGB888 0
+#define MIPI_DSI_FMT_RGB666 1
+#define MIPI_DSI_FMT_RGB666_PACKED 2
+#define MIPI_DSI_FMT_RGB565 3
+
+#endif /* _DT_BINDINGS_DRM__ */
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
index 94ed3edfcc70..beb50a7483bc 100644
--- a/include/dt-bindings/media/xilinx-vip.h
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -32,5 +32,11 @@
#define XVIP_VF_CUSTOM2 13
#define XVIP_VF_CUSTOM3 14
#define XVIP_VF_CUSTOM4 15
+#define XVIP_VF_VUY_422 16
+#define XVIP_VF_BGRX 17
+#define XVIP_VF_YUVX 18
+#define XVIP_VF_XBGR 19
+#define XVIP_VF_Y_GREY 20
+#define XVIP_VF_XRGB 21
#endif /* __DT_BINDINGS_MEDIA_XILINX_VIP_H__ */
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index b6a1eaf1b339..f6bc83b66ae9 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -16,5 +16,7 @@
#define PHY_TYPE_USB2 3
#define PHY_TYPE_USB3 4
#define PHY_TYPE_UFS 5
+#define PHY_TYPE_DP 6
+#define PHY_TYPE_SGMII 7
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/pinctrl-zynqmp.h b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
new file mode 100644
index 000000000000..65522a1f032d
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
@@ -0,0 +1,36 @@
+/*
+ * MIO pin configuration defines for Xilinx ZynqMP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ * Author: Chirag Parekh <chirag.parekh@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_ZYNQMP_H
+#define _DT_BINDINGS_PINCTRL_ZYNQMP_H
+
+/* Bit value for IO standards */
+#define IO_STANDARD_LVCMOS33 0
+#define IO_STANDARD_LVCMOS18 1
+
+/* Bit values for Slew Rates */
+#define SLEW_RATE_FAST 0
+#define SLEW_RATE_SLOW 1
+
+/* Bit values for Pin inputs */
+#define PIN_INPUT_TYPE_CMOS 0
+#define PIN_INPUT_TYPE_SCHMITT 1
+
+/* Bit values for drive control*/
+#define DRIVE_STRENGTH_2MA 2
+#define DRIVE_STRENGTH_4MA 4
+#define DRIVE_STRENGTH_8MA 8
+#define DRIVE_STRENGTH_12MA 12
+
+#endif /* _DT_BINDINGS_PINCTRL_ZYNQMP_H */
diff --git a/include/dt-bindings/power/xlnx-versal-power.h b/include/dt-bindings/power/xlnx-versal-power.h
new file mode 100644
index 000000000000..a869194b965a
--- /dev/null
+++ b/include/dt-bindings/power/xlnx-versal-power.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_VERSAL_POWER_H
+#define _DT_BINDINGS_VERSAL_POWER_H
+
+#define PM_DEV_USB_0 (0x18224018U)
+#define PM_DEV_GEM_0 (0x18224019U)
+#define PM_DEV_GEM_1 (0x1822401aU)
+#define PM_DEV_SPI_0 (0x1822401bU)
+#define PM_DEV_SPI_1 (0x1822401cU)
+#define PM_DEV_I2C_0 (0x1822401dU)
+#define PM_DEV_I2C_1 (0x1822401eU)
+#define PM_DEV_CAN_FD_0 (0x1822401fU)
+#define PM_DEV_CAN_FD_1 (0x18224020U)
+#define PM_DEV_UART_0 (0x18224021U)
+#define PM_DEV_UART_1 (0x18224022U)
+#define PM_DEV_GPIO (0x18224023U)
+#define PM_DEV_TTC_0 (0x18224024U)
+#define PM_DEV_TTC_1 (0x18224025U)
+#define PM_DEV_TTC_2 (0x18224026U)
+#define PM_DEV_TTC_3 (0x18224027U)
+#define PM_DEV_SWDT_FPD (0x18224029U)
+#define PM_DEV_OSPI (0x1822402aU)
+#define PM_DEV_QSPI (0x1822402bU)
+#define PM_DEV_SDIO_0 (0x1822402eU)
+#define PM_DEV_SDIO_1 (0x1822402fU)
+#define PM_DEV_RTC (0x18224034U)
+#define PM_DEV_ADMA_0 (0x18224035U)
+#define PM_DEV_ADMA_1 (0x18224036U)
+#define PM_DEV_ADMA_2 (0x18224037U)
+#define PM_DEV_ADMA_3 (0x18224038U)
+#define PM_DEV_ADMA_4 (0x18224039U)
+#define PM_DEV_ADMA_5 (0x1822403aU)
+#define PM_DEV_ADMA_6 (0x1822403bU)
+#define PM_DEV_ADMA_7 (0x1822403cU)
+
+#endif
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 8e7e2ec37f1b..cce2cbd4f301 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -21,6 +21,7 @@
*/
#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */
int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
@@ -28,8 +29,9 @@ void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void);
-u32 iort_msi_map_rid(struct device *dev, u32 req_id);
-struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+u32 iort_msi_map_id(struct device *dev, u32 id);
+struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
+ enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
@@ -38,10 +40,10 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev);
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
#else
static inline void acpi_iort_init(void) { }
-static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
-{ return req_id; }
-static inline struct irq_domain *iort_get_device_domain(struct device *dev,
- u32 req_id)
+static inline u32 iort_msi_map_id(struct device *dev, u32 id)
+{ return id; }
+static inline struct irq_domain *iort_get_device_domain(
+ struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6e67aded28f8..6d2d31b03b4d 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -565,6 +565,18 @@ struct ata_bmdma_prd {
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
+#define ata_id_has_devslp(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)))
+#define ata_id_has_ncq_autosense(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)))
+#define ata_id_has_dipm(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3)))
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
@@ -577,9 +589,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
-#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -591,17 +600,6 @@ static inline bool ata_id_has_hipm(const u16 *id)
return val & (1 << 9);
}
-static inline bool ata_id_has_dipm(const u16 *id)
-{
- u16 val = id[ATA_ID_FEATURE_SUPP];
-
- if (val == 0 || val == 0xffff)
- return false;
-
- return val & (1 << 3);
-}
-
-
static inline bool ata_id_has_fua(const u16 *id)
{
if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
@@ -770,16 +768,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15)))
+ return false;
+ if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_3] & BIT(6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!ata_id_has_sense_reporting(id))
+ return false;
+ /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */
+ if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_4] & BIT(6);
}
/**
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 4f0e62cbf2ff..e9e74af163fa 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -13,6 +13,7 @@
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5705cda3c4c4..6107b537245a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -49,7 +49,11 @@ struct bpf_map_ops {
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
- void (*map_fd_put_ptr)(void *ptr);
+ /* If need_defer is true, the implementation should guarantee that
+ * the to-be-put element is still alive before the bpf program, which
+ * may manipulate it, exists.
+ */
+ void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index b56cc825f64d..0575ad84cc55 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -117,7 +117,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
* of the form "mark_buffer_foo()". These are higher-level functions which
* do something in addition to setting a b_state bit.
*/
-BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
@@ -135,6 +134,41 @@ BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
+static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+{
+ /*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
+ * make it consistent with folio_mark_uptodate
+ * pairs with smp_load_acquire in buffer_uptodate
+ */
+ smp_mb__before_atomic();
+ set_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
+{
+ clear_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline int buffer_uptodate(const struct buffer_head *bh)
+{
+ /*
+ * make it consistent with folio_test_uptodate
+ * pairs with smp_mb__before_atomic in set_buffer_uptodate
+ */
+ return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
+}
+
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
/* If we *know* page->private refers to buffer_heads */
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 5755ae5a4712..6a869682c120 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -14,7 +14,7 @@
#define OCR_MODE_TEST 0x01
#define OCR_MODE_NORMAL 0x02
#define OCR_MODE_CLOCK 0x03
-#define OCR_MODE_MASK 0x07
+#define OCR_MODE_MASK 0x03
#define OCR_TX0_INVERT 0x04
#define OCR_TX0_PULLDOWN 0x08
#define OCR_TX0_PULLUP 0x10
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 202852383ae9..522aa6b6d9b0 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -69,6 +69,7 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
+extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2fdfe8061363..01d3298a7591 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -826,6 +826,7 @@ unsigned long clk_hw_get_flags(const struct clk_hw *hw);
#define clk_hw_can_set_rate_parent(hw) \
(clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
+unsigned int clk_get_children(char *name);
bool clk_hw_is_prepared(const struct clk_hw *hw);
bool clk_hw_rate_is_protected(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 18b7b95a8253..562859ee24f4 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -172,6 +172,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
+/**
+ * clk_rate_exclusive_get - get exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to get exclusive control over the rate of a
+ * provider. It prevents any other consumer to execute, even indirectly,
+ * opereation which could alter the rate of the provider or cause glitches
+ *
+ * If exlusivity is claimed more than once on clock, even by the same driver,
+ * the rate effectively gets locked as exclusivity can't be preempted.
+ *
+ * Must not be called from within atomic context.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_rate_exclusive_get(struct clk *clk);
+
+/**
+ * clk_rate_exclusive_put - release exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to release the exclusivity it previously got
+ * from clk_rate_exclusive_get()
+ *
+ * The caller must balance the number of clk_rate_exclusive_get() and
+ * clk_rate_exclusive_put() calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_rate_exclusive_put(struct clk *clk);
+
#else
static inline int clk_notifier_register(struct clk *clk,
@@ -218,6 +251,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
return p == q;
}
+static inline int clk_rate_exclusive_get(struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_rate_exclusive_put(struct clk *clk) {}
+
#endif
/**
@@ -419,6 +459,47 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared. Drivers must however assume
+ * that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the device
+ * is unbound from the bus.
+ */
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
+
+/**
* devm_clk_get_optional - lookup and obtain a managed reference to an optional
* clock producer.
* @dev: device for clock "consumer"
@@ -430,53 +511,65 @@ struct clk *devm_clk_get(struct device *dev, const char *id);
struct clk *devm_clk_get_optional(struct device *dev, const char *id);
/**
- * devm_get_clk_from_child - lookup and obtain a managed reference to a
- * clock producer from child node.
+ * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
* @dev: device for clock "consumer"
- * @np: pointer to clock consumer node
- * @con_id: clock consumer ID
+ * @id: clock consumer ID
*
- * This function parses the clocks, and uses them to look up the
- * struct clk from the registered list of clock providers by using
- * @np and @con_id
+ * Context: May sleep.
*
- * The clock will automatically be freed when the device is unbound
- * from the bus.
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_prepared().
+ *
+ * The returned clk (if valid) is prepared. Drivers must however
+ * assume that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the
+ * device is unbound from the bus.
*/
-struct clk *devm_get_clk_from_child(struct device *dev,
- struct device_node *np, const char *con_id);
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
+
/**
- * clk_rate_exclusive_get - get exclusivity over the rate control of a
- * producer
- * @clk: clock source
+ * devm_clk_get_optional_enabled - devm_clk_get_optional() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
*
- * This function allows drivers to get exclusive control over the rate of a
- * provider. It prevents any other consumer to execute, even indirectly,
- * opereation which could alter the rate of the provider or cause glitches
+ * Context: May sleep.
*
- * If exlusivity is claimed more than once on clock, even by the same driver,
- * the rate effectively gets locked as exclusivity can't be preempted.
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
*
- * Must not be called from within atomic context.
+ * The returned clk (if valid) is prepared and enabled.
*
- * Returns success (0) or negative errno.
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
*/
-int clk_rate_exclusive_get(struct clk *clk);
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
/**
- * clk_rate_exclusive_put - release exclusivity over the rate control of a
- * producer
- * @clk: clock source
- *
- * This function allows drivers to release the exclusivity it previously got
- * from clk_rate_exclusive_get()
+ * devm_get_clk_from_child - lookup and obtain a managed reference to a
+ * clock producer from child node.
+ * @dev: device for clock "consumer"
+ * @np: pointer to clock consumer node
+ * @con_id: clock consumer ID
*
- * The caller must balance the number of clk_rate_exclusive_get() and
- * clk_rate_exclusive_put() calls.
+ * This function parses the clocks, and uses them to look up the
+ * struct clk from the registered list of clock providers by using
+ * @np and @con_id
*
- * Must not be called from within atomic context.
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
*/
-void clk_rate_exclusive_put(struct clk *clk);
+struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id);
/**
* clk_enable - inform the system when the clock source should be running.
@@ -770,12 +863,36 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct clk *devm_clk_get_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline struct clk *devm_clk_get_optional(struct device *dev,
const char *id)
{
return NULL;
}
+static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
@@ -809,14 +926,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
-
-static inline int clk_rate_exclusive_get(struct clk *clk)
-{
- return 0;
-}
-
-static inline void clk_rate_exclusive_put(struct clk *clk) {}
-
static inline int clk_enable(struct clk *clk)
{
return 0;
diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h
index a198dd9255a4..d1135756aedf 100644
--- a/include/linux/clk/zynq.h
+++ b/include/linux/clk/zynq.h
@@ -9,6 +9,10 @@
#include <linux/spinlock.h>
+int zynq_clk_suspend_early(void);
+void zynq_clk_resume_late(void);
+void zynq_clk_topswitch_enable(void);
+void zynq_clk_topswitch_disable(void);
void zynq_clock_init(void);
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 29a6fa2f518d..782491dd1999 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -68,6 +68,8 @@ extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr,
extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
struct device_attribute *attr,
char *buf);
+extern ssize_t cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -191,6 +193,12 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
+#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
+void arch_cpu_finalize_init(void);
+#else
+static inline void arch_cpu_finalize_init(void) { }
+#endif
+
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 15835f37bd5f..206d7ac411b8 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -111,7 +111,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
- CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
@@ -142,6 +141,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
+ CPUHP_AP_HRTIMERS_DYING,
CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 18639c069263..d35e4867d5cc 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -109,7 +109,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
* same context as task->real_cred.
*/
struct cred {
- atomic_t usage;
+ atomic_long_t usage;
#ifdef CONFIG_DEBUG_CREDENTIALS
atomic_t subscribers; /* number of processes subscribed */
void *put_addr;
@@ -227,7 +227,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
*/
static inline struct cred *get_new_cred(struct cred *cred)
{
- atomic_inc(&cred->usage);
+ atomic_long_inc(&cred->usage);
return cred;
}
@@ -259,7 +259,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return NULL;
- if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
return NULL;
validate_creds(cred);
nonconst_cred->non_rcu = 0;
@@ -283,7 +283,7 @@ static inline void put_cred(const struct cred *_cred)
if (cred) {
validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
+ if (atomic_long_dec_and_test(&(cred)->usage))
__put_cred(cred);
}
}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 0c720a2982ae..8f9042b3f48e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -257,6 +257,8 @@ struct ablkcipher_alg {
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
+ int (*setkeytype)(struct crypto_tfm *tfm, const u8 *keytype,
+ unsigned int keylen);
int (*encrypt)(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
@@ -734,6 +736,8 @@ struct blkcipher_tfm {
void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
+ int (*setkeytype)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
@@ -1466,6 +1470,14 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
key, keylen);
}
+static inline int crypto_blkcipher_setkeytype(struct crypto_blkcipher *tfm,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto_blkcipher_crt(tfm)->setkeytype(crypto_blkcipher_tfm(tfm),
+ key, keylen);
+}
+
/**
* crypto_blkcipher_encrypt() - encrypt plaintext
* @desc: reference to the block cipher handle with meta data
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 798f0b9b43ae..8565c8842cde 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -35,11 +35,12 @@ struct debugfs_regset32 {
const struct debugfs_reg32 *regs;
int nregs;
void __iomem *base;
+ struct device *dev; /* Optional device for Runtime PM */
};
extern struct dentry *arch_debugfs_dir;
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -50,10 +51,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
+ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
.llseek = no_llseek, \
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
#if defined(CONFIG_DEBUG_FS)
@@ -85,6 +92,8 @@ struct dentry *debugfs_create_automount(const char *name,
void debugfs_remove(struct dentry *dentry);
void debugfs_remove_recursive(struct dentry *dentry);
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
int debugfs_file_get(struct dentry *dentry);
@@ -94,6 +103,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@@ -216,6 +227,10 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
+static inline void debugfs_lookup_and_remove(const char *name,
+ struct dentry *parent)
+{ }
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
static inline int debugfs_file_get(struct dentry *dentry)
@@ -239,6 +254,13 @@ static inline ssize_t debugfs_attr_write(struct file *file,
return -ENODEV;
}
+static inline ssize_t debugfs_attr_write_signed(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)
{
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 2bae9ed3c783..7535f860d45d 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -121,10 +121,10 @@ struct devfreq_dev_profile {
* devfreq.nb to the corresponding register notifier call chain.
* @work: delayed work for load monitoring.
* @previous_freq: previously configured frequency value.
- * @data: Private data of the governor. The devfreq framework does not
- * touch this.
- * @min_freq: Limit minimum frequency requested by user (0: none)
- * @max_freq: Limit maximum frequency requested by user (0: none)
+ * @data: devfreq driver pass to governors, governor should not change it.
+ * @governor_data: private data for governors, devfreq core doesn't touch it.
+ * @min_freq: Limit minimum frequency requested by user (0: none)
+ * @max_freq: Limit maximum frequency requested by user (0: none)
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
* @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
@@ -159,7 +159,8 @@ struct devfreq {
unsigned long previous_freq;
struct devfreq_dev_status last_status;
- void *data; /* private data for governors */
+ void *data;
+ void *governor_data;
unsigned long min_freq;
unsigned long max_freq;
diff --git a/include/linux/device.h b/include/linux/device.h
index 3414b5a67b46..af4ecbf88910 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -422,6 +422,8 @@ extern int __must_check driver_create_file(struct device_driver *driver,
extern void driver_remove_file(struct device_driver *driver,
const struct driver_attribute *attr);
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len);
extern int __must_check driver_for_each_device(struct device_driver *drv,
struct device *start,
void *data,
@@ -1528,6 +1530,7 @@ extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
static inline int dev_num_vf(struct device *dev)
{
@@ -1870,6 +1873,9 @@ do { \
WARN_ONCE(condition, "%s %s: " format, \
dev_driver_string(dev), dev_name(dev), ## arg)
+extern __printf(3, 4)
+int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/dim.h b/include/linux/dim.h
index 2571da63877c..ad5f219ce2ff 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -233,8 +233,9 @@ void dim_park_tired(struct dim *dim);
*
* Calculate the delta between two samples (in data rates).
* Takes into consideration counter wrap-around.
+ * Returned boolean indicates whether curr_stats are reliable.
*/
-void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats);
/**
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4d450672b7d6..87cbae4b051f 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -612,6 +612,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
+/**
+ * dma_map_sgtable - Map the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the map operation
+ *
+ * Maps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After success the
+ * ownership for the buffer is transferred to the DMA domain. One has to
+ * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
+ * ownership of the buffer back to the CPU domain before touching the
+ * buffer by the CPU.
+ *
+ * Returns 0 on success or -EINVAL on error during mapping the buffer.
+ */
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ int nents;
+
+ nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+ if (nents <= 0)
+ return -EINVAL;
+ sgt->nents = nents;
+ return 0;
+}
+
+/**
+ * dma_unmap_sgtable - Unmap the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the unmap operation
+ *
+ * Unmaps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After this function
+ * the ownership of the buffer is transferred back to the CPU domain.
+ */
+static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+}
+
+/**
+ * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the CPU domain, so it is safe to perform any access to it
+ * by the CPU. Before doing any further DMA operations, one has to transfer
+ * the ownership of the buffer back to the DMA domain by calling the
+ * dma_sync_sgtable_for_device().
+ */
+static inline void dma_sync_sgtable_for_cpu(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
+/**
+ * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the DMA domain, so it is safe to perform the DMA operation.
+ * Once finished, one has to call dma_sync_sgtable_for_cpu() or
+ * dma_unmap_sgtable().
+ */
+static inline void dma_sync_sgtable_for_device(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index dd3de6d88fc0..47d483063662 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -75,29 +75,29 @@ static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir);
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
#else
-static inline void arch_sync_dma_for_device(struct device *dev,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir);
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
#else
-static inline void arch_sync_dma_for_cpu(struct device *dev,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
-void arch_sync_dma_for_cpu_all(struct device *dev);
+void arch_sync_dma_for_cpu_all(void);
#else
-static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+static inline void arch_sync_dma_for_cpu_all(void)
{
}
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
diff --git a/include/linux/dma/xilinx_frmbuf.h b/include/linux/dma/xilinx_frmbuf.h
new file mode 100644
index 000000000000..0116155663d0
--- /dev/null
+++ b/include/linux/dma/xilinx_frmbuf.h
@@ -0,0 +1,205 @@
+/*
+ * Xilinx Framebuffer DMA support header file
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __XILINX_FRMBUF_DMA_H
+#define __XILINX_FRMBUF_DMA_H
+
+#include <linux/dmaengine.h>
+
+/* Modes to enable early callback */
+/* To avoid first frame delay */
+#define EARLY_CALLBACK BIT(1)
+/* Give callback at start of descriptor processing */
+#define EARLY_CALLBACK_START_DESC BIT(2)
+/**
+ * enum vid_frmwork_type - Linux video framework type
+ * @XDMA_DRM: fourcc is of type DRM
+ * @XDMA_V4L2: fourcc is of type V4L2
+ */
+enum vid_frmwork_type {
+ XDMA_DRM = 0,
+ XDMA_V4L2,
+};
+
+/**
+ * enum operation_mode - FB IP control register field settings to select mode
+ * @DEFAULT : Use default mode, No explicit bit field settings required.
+ * @AUTO_RESTART : Use auto-restart mode by setting BIT(7) of control register.
+ */
+enum operation_mode {
+ DEFAULT = 0x0,
+ AUTO_RESTART = BIT(7),
+};
+
+#if IS_ENABLED(CONFIG_XILINX_FRMBUF)
+/**
+ * xilinx_xdma_set_mode - Set operation mode for framebuffer IP
+ * @chan: dma channel instance
+ * @mode: Famebuffer IP operation mode.
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending(). This routine should be
+ * called by client driver to set the operation mode for framebuffer IP based
+ * upon the use-case, for e.g. for non-streaming usecases (like MEM2MEM) it's
+ * more appropriate to use default mode unlike streaming usecases where
+ * auto-restart mode is more suitable.
+ *
+ * auto-restart or free running mode.
+ */
+void xilinx_xdma_set_mode(struct dma_chan *chan, enum operation_mode mode);
+
+/**
+ * xilinx_xdma_drm_config - configure video format in video aware DMA
+ * @chan: dma channel instance
+ * @drm_fourcc: DRM fourcc code describing the memory layout of video data
+ *
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending() to establish the video
+ * data memory format within the hardware DMA.
+ */
+void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc);
+
+/**
+ * xilinx_xdma_v4l2_config - configure video format in video aware DMA
+ * @chan: dma channel instance
+ * @v4l2_fourcc: V4L2 fourcc code describing the memory layout of video data
+ *
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending() to establish the video
+ * data memory format within the hardware DMA.
+ */
+void xilinx_xdma_v4l2_config(struct dma_chan *chan, u32 v4l2_fourcc);
+
+/**
+ * xilinx_xdma_get_drm_vid_fmts - obtain list of supported DRM mem formats
+ * @chan: dma channel instance
+ * @fmt_cnt: Output param - total count of supported DRM fourcc codes
+ * @fmts: Output param - pointer to array of DRM fourcc codes (not a copy)
+ *
+ * Return: a reference to an array of DRM fourcc codes supported by this
+ * instance of the Video Framebuffer Driver
+ */
+int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts);
+
+/**
+ * xilinx_xdma_get_v4l2_vid_fmts - obtain list of supported V4L2 mem formats
+ * @chan: dma channel instance
+ * @fmt_cnt: Output param - total count of supported V4L2 fourcc codes
+ * @fmts: Output param - pointer to array of V4L2 fourcc codes (not a copy)
+ *
+ * Return: a reference to an array of V4L2 fourcc codes supported by this
+ * instance of the Video Framebuffer Driver
+ */
+int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts);
+
+/**
+ * xilinx_xdma_get_fid - Get the Field ID of the buffer received.
+ * This function should be called from the callback function registered
+ * per descriptor in prep_interleaved.
+ *
+ * @chan: dma channel instance
+ * @async_tx: descriptor whose parent structure contains fid.
+ * @fid: Output param - Field ID of the buffer. 0 - even, 1 - odd.
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 *fid);
+
+/**
+ * xilinx_xdma_set_fid - Set the Field ID of the buffer to be transmitted
+ * @chan: dma channel instance
+ * @async_tx: dma async tx descriptor for the buffer
+ * @fid: Field ID of the buffer. 0 - even, 1 - odd.
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 fid);
+
+/**
+ * xilinx_xdma_get_earlycb - Get info if early callback has been enabled.
+ *
+ * @chan: dma channel instance
+ * @async_tx: descriptor whose parent structure contains fid.
+ * @earlycb: Output param - Early callback mode
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *earlycb);
+
+/**
+ * xilinx_xdma_set_earlycb - Enable/Disable early callback
+ * @chan: dma channel instance
+ * @async_tx: dma async tx descriptor for the buffer
+ * @earlycb: Enable early callback mode for descriptor
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 earlycb);
+#else
+static inline void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc)
+{ }
+
+static inline void xilinx_xdma_v4l2_config(struct dma_chan *chan,
+ u32 v4l2_fourcc)
+{ }
+
+static int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ return -ENODEV;
+}
+
+static int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *fid)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 fid)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *atx,
+ u32 *earlycb)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *atx,
+ u32 earlycb)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /*__XILINX_FRMBUF_DMA_H*/
diff --git a/include/linux/dma/xilinx_ps_pcie_dma.h b/include/linux/dma/xilinx_ps_pcie_dma.h
new file mode 100644
index 000000000000..7c9912bd490e
--- /dev/null
+++ b/include/linux/dma/xilinx_ps_pcie_dma.h
@@ -0,0 +1,69 @@
+/*
+ * Xilinx PS PCIe DMA Engine support header file
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#ifndef __DMA_XILINX_PS_PCIE_H
+#define __DMA_XILINX_PS_PCIE_H
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define XLNX_PLATFORM_DRIVER_NAME "xlnx-platform-dma-driver"
+
+#define ZYNQMP_DMA_DEVID (0xD024)
+#define ZYNQMP_RC_DMA_DEVID (0xD021)
+
+#define MAX_ALLOWED_CHANNELS_IN_HW 4
+
+#define MAX_NUMBER_OF_CHANNELS MAX_ALLOWED_CHANNELS_IN_HW
+
+#define DEFAULT_DMA_QUEUES 4
+#define TWO_DMA_QUEUES 2
+
+#define NUMBER_OF_BUFFER_DESCRIPTORS 1999
+#define MAX_DESCRIPTORS 65536
+
+#define CHANNEL_COAELSE_COUNT 0
+
+#define CHANNEL_POLL_TIMER_FREQUENCY 1000 /* in milli seconds */
+
+#define PCIE_AXI_DIRECTION DMA_TO_DEVICE
+#define AXI_PCIE_DIRECTION DMA_FROM_DEVICE
+
+/**
+ * struct BAR_PARAMS - PCIe Bar Parameters
+ * @BAR_PHYS_ADDR: PCIe BAR Physical address
+ * @BAR_LENGTH: Length of PCIe BAR
+ * @BAR_VIRT_ADDR: Virtual Address to access PCIe BAR
+ */
+struct BAR_PARAMS {
+ dma_addr_t BAR_PHYS_ADDR; /**< Base physical address of BAR memory */
+ unsigned long BAR_LENGTH; /**< Length of BAR memory window */
+ void *BAR_VIRT_ADDR; /**< Virtual Address of mapped BAR memory */
+};
+
+/**
+ * struct ps_pcie_dma_channel_match - Match structure for dma clients
+ * @pci_vendorid: PCIe Vendor id of PS PCIe DMA device
+ * @pci_deviceid: PCIe Device id of PS PCIe DMA device
+ * @board_number: Unique id to identify individual device in a system
+ * @channel_number: Unique channel number of the device
+ * @direction: DMA channel direction
+ * @bar_params: Pointer to BAR_PARAMS for accessing application specific data
+ */
+struct ps_pcie_dma_channel_match {
+ u16 pci_vendorid;
+ u16 pci_deviceid;
+ u16 board_number;
+ u16 channel_number;
+ enum dma_data_direction direction;
+ struct BAR_PARAMS *bar_params;
+};
+
+#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8013562751a5..a2fa4c61dd84 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -56,6 +56,7 @@ enum dma_transaction_type {
DMA_MEMSET,
DMA_MEMSET_SG,
DMA_INTERRUPT,
+ DMA_SG,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
@@ -775,6 +776,11 @@ struct dma_device {
unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
@@ -815,7 +821,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
static inline bool is_slave_direction(enum dma_transfer_direction direction)
{
- return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
+ (direction == DMA_DEV_TO_DEV);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
@@ -904,6 +911,19 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
+ return NULL;
+
+ return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
+ src_sg, src_nents, flags);
+}
+
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 4cf02ecd67de..65575143c89f 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -191,7 +191,7 @@ static inline int ddebug_remove_module(const char *mod)
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname)
{
- if (strstr(param, "dyndbg")) {
+ if (!strcmp(param, "dyndbg")) {
/* avoid pr_warn(), which wants pr_fmt() fully defined */
printk(KERN_WARNING "dyndbg param is supported only in "
"CONFIG_DYNAMIC_DEBUG builds\n");
diff --git a/include/linux/efi.h b/include/linux/efi.h
index f9b9f9a2fd4a..880cd86c829d 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1715,7 +1715,7 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
void *priv,
efi_exit_boot_map_processing priv_func);
-#define EFI_RANDOM_SEED_SIZE 64U
+#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 0f1e95240c0c..66b89189a1e2 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -289,6 +289,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
}
/**
+ * eth_hw_addr_set - Assign Ethernet address to a net_device
+ * @dev: pointer to net_device structure
+ * @addr: address to assign
+ *
+ * Assign given address to the net_device, addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
+{
+ ether_addr_copy(dev->dev_addr, addr);
+}
+
+/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to
* @src: pointer to net_device to copy dev_addr from
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index e7684d768e3f..c24a845c85f3 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -44,6 +44,7 @@ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
DECLARE_PER_CPU(int, eventfd_wake_count);
@@ -64,7 +65,7 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
{
return -ENOSYS;
}
@@ -85,6 +86,11 @@ static inline bool eventfd_signal_count(void)
return false;
}
+static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+{
+
+}
+
#endif
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 440014875acf..0bec300b2e51 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -476,23 +476,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
u64, __ur_3, u64, __ur_4, u64, __ur_5)
-#define BPF_CALL_x(x, name, ...) \
+#define BPF_CALL_x(x, attr, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
- return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+#define __NOATTR
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
+
+#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
#define bpf_ctx_range(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 778abbbc7d94..9411e738d7ae 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2018 Xilinx
+ * Copyright (C) 2014-2019 Xilinx
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -13,6 +13,8 @@
#ifndef __FIRMWARE_ZYNQMP_H__
#define __FIRMWARE_ZYNQMP_H__
+#include <linux/device.h>
+
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -27,25 +29,41 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+
+/* ATF only commands */
#define PM_GET_TRUSTZONE_VERSION 0xa03
#define PM_SET_SUSPEND_MODE 0xa02
#define GET_CALLBACK_DATA 0xa01
+/* Loader commands */
+#define PM_LOAD_PDI 0x701
+
/* Number of 32bits values in payload */
#define PAYLOAD_ARG_CNT 4U
/* Number of arguments for a callback */
-#define CB_ARG_CNT 4
+#define CB_ARG_CNT 4
/* Payload size (consists of callback API ID + arguments) */
-#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
+#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
+
+#define ZYNQMP_PM_MAX_LATENCY (~0U)
+#define ZYNQMP_PM_MAX_QOS 100U
-#define ZYNQMP_PM_MAX_QOS 100U
+/* Usage status, returned by PmGetNodeStatus */
+#define PM_USAGE_NO_MASTER 0x0U
+#define PM_USAGE_CURRENT_MASTER 0x1U
+#define PM_USAGE_OTHER_MASTER 0x2U
+#define PM_USAGE_BOTH_MASTERS (PM_USAGE_CURRENT_MASTER | \
+ PM_USAGE_OTHER_MASTER)
+
+#define GSS_NUM_REGS (4)
/* Node capabilities */
#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
+#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
/*
* Firmware FPGA Manager flags
@@ -55,19 +73,54 @@
#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+/* Feature check status */
+#define PM_FEATURE_INVALID -1
+#define PM_FEATURE_UNCHECKED 0
+
enum pm_api_id {
PM_GET_API_VERSION = 1,
- PM_REQUEST_NODE = 13,
+ PM_SET_CONFIGURATION,
+ PM_GET_NODE_STATUS,
+ PM_GET_OPERATING_CHARACTERISTIC,
+ PM_REGISTER_NOTIFIER,
+ /* API for suspending of PUs: */
+ PM_REQUEST_SUSPEND,
+ PM_SELF_SUSPEND,
+ PM_FORCE_POWERDOWN,
+ PM_ABORT_SUSPEND,
+ PM_REQUEST_WAKEUP,
+ PM_SET_WAKEUP_SOURCE,
+ PM_SYSTEM_SHUTDOWN,
+ /* API for managing PM slaves: */
+ PM_REQUEST_NODE,
PM_RELEASE_NODE,
PM_SET_REQUIREMENT,
- PM_RESET_ASSERT = 17,
+ PM_SET_MAX_LATENCY,
+ /* Direct control API functions: */
+ PM_RESET_ASSERT,
PM_RESET_GET_STATUS,
- PM_PM_INIT_FINALIZE = 21,
+ PM_MMIO_WRITE,
+ PM_MMIO_READ,
+ PM_PM_INIT_FINALIZE,
PM_FPGA_LOAD,
PM_FPGA_GET_STATUS,
PM_GET_CHIPID = 24,
- PM_IOCTL = 34,
+ /* ID 25 is been used by U-boot to process secure boot images */
+ /* Secure library generic API functions */
+ PM_SECURE_SHA = 26,
+ PM_SECURE_RSA,
+ /* Pin control API functions */
+ PM_PINCTRL_REQUEST,
+ PM_PINCTRL_RELEASE,
+ PM_PINCTRL_GET_FUNCTION,
+ PM_PINCTRL_SET_FUNCTION,
+ PM_PINCTRL_CONFIG_PARAM_GET,
+ PM_PINCTRL_CONFIG_PARAM_SET,
+ /* PM IOCTL API */
+ PM_IOCTL,
+ /* API to query information from firmware */
PM_QUERY_DATA,
+ /* Clock control API functions */
PM_CLOCK_ENABLE,
PM_CLOCK_DISABLE,
PM_CLOCK_GETSTATE,
@@ -77,24 +130,58 @@ enum pm_api_id {
PM_CLOCK_GETRATE,
PM_CLOCK_SETPARENT,
PM_CLOCK_GETPARENT,
+ PM_SECURE_IMAGE,
+ PM_FPGA_READ = 46,
+ PM_SECURE_AES,
+ /* PM_REGISTER_ACCESS API */
+ PM_REGISTER_ACCESS = 52,
+ PM_EFUSE_ACCESS,
+ PM_FEATURE_CHECK = 63,
+ PM_API_MAX,
};
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
+ XST_PM_NO_FEATURE = 19,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT,
XST_PM_NO_ACCESS,
XST_PM_INVALID_NODE,
XST_PM_DOUBLE_REQ,
XST_PM_ABORT_SUSPEND,
+ XST_PM_MULT_USER = 2008,
};
enum pm_ioctl_id {
- IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_RPU_OPER_MODE,
+ IOCTL_SET_RPU_OPER_MODE,
+ IOCTL_RPU_BOOT_ADDR_CONFIG,
+ IOCTL_TCM_COMB_CONFIG,
+ IOCTL_SET_TAPDELAY_BYPASS,
+ IOCTL_SET_SGMII_MODE,
+ IOCTL_SD_DLL_RESET,
+ IOCTL_SET_SD_TAPDELAY,
+ /* Ioctl for clock driver */
+ IOCTL_SET_PLL_FRAC_MODE,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
+ IOCTL_WRITE_GGS,
+ IOCTL_READ_GGS,
+ IOCTL_WRITE_PGGS,
+ IOCTL_READ_PGGS,
+ /* IOCTL for ULPI reset */
+ IOCTL_ULPI_RESET,
+ /* Set healthy bit value*/
+ IOCTL_SET_BOOT_HEALTH_STATUS,
+ IOCTL_AFI,
+ /* Probe counter read/write */
+ IOCTL_PROBE_COUNTER_READ,
+ IOCTL_PROBE_COUNTER_WRITE,
+ IOCTL_OSPI_MUX_SELECT,
+ /* IOCTL for USB power request */
+ IOCTL_USB_SET_STATE,
};
enum pm_query_id {
@@ -104,7 +191,14 @@ enum pm_query_id {
PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
PM_QID_CLOCK_GET_PARENTS,
PM_QID_CLOCK_GET_ATTRIBUTES,
- PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+ PM_QID_PINCTRL_GET_NUM_PINS,
+ PM_QID_PINCTRL_GET_NUM_FUNCTIONS,
+ PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS,
+ PM_QID_PINCTRL_GET_FUNCTION_NAME,
+ PM_QID_PINCTRL_GET_FUNCTION_GROUPS,
+ PM_QID_PINCTRL_GET_PIN_GROUPS,
+ PM_QID_CLOCK_GET_NUM_CLOCKS,
+ PM_QID_CLOCK_GET_MAX_DIVISOR,
};
enum zynqmp_pm_reset_action {
@@ -238,6 +332,13 @@ enum zynqmp_pm_reset {
ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
};
+enum zynqmp_pm_abort_reason {
+ ZYNQMP_PM_ABORT_REASON_WAKEUP_EVENT = 100,
+ ZYNQMP_PM_ABORT_REASON_POWER_UNIT_BUSY,
+ ZYNQMP_PM_ABORT_REASON_NO_POWERDOWN,
+ ZYNQMP_PM_ABORT_REASON_UNKNOWN,
+};
+
enum zynqmp_pm_suspend_reason {
SUSPEND_POWER_REQUEST = 201,
SUSPEND_ALERT,
@@ -250,6 +351,197 @@ enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
};
+enum zynqmp_pm_opchar_type {
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_POWER = 1,
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_ENERGY,
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_TEMPERATURE,
+};
+
+enum zynqmp_pm_shutdown_type {
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+};
+
+enum zynqmp_pm_shutdown_subtype {
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+};
+
+enum pm_node_id {
+ NODE_UNKNOWN = 0,
+ NODE_APU,
+ NODE_APU_0,
+ NODE_APU_1,
+ NODE_APU_2,
+ NODE_APU_3,
+ NODE_RPU,
+ NODE_RPU_0,
+ NODE_RPU_1,
+ NODE_PLD,
+ NODE_FPD,
+ NODE_OCM_BANK_0,
+ NODE_OCM_BANK_1,
+ NODE_OCM_BANK_2,
+ NODE_OCM_BANK_3,
+ NODE_TCM_0_A,
+ NODE_TCM_0_B,
+ NODE_TCM_1_A,
+ NODE_TCM_1_B,
+ NODE_L2,
+ NODE_GPU_PP_0,
+ NODE_GPU_PP_1,
+ NODE_USB_0,
+ NODE_USB_1,
+ NODE_TTC_0,
+ NODE_TTC_1,
+ NODE_TTC_2,
+ NODE_TTC_3,
+ NODE_SATA,
+ NODE_ETH_0,
+ NODE_ETH_1,
+ NODE_ETH_2,
+ NODE_ETH_3,
+ NODE_UART_0,
+ NODE_UART_1,
+ NODE_SPI_0,
+ NODE_SPI_1,
+ NODE_I2C_0,
+ NODE_I2C_1,
+ NODE_SD_0,
+ NODE_SD_1,
+ NODE_DP,
+ NODE_GDMA,
+ NODE_ADMA,
+ NODE_NAND,
+ NODE_QSPI,
+ NODE_GPIO,
+ NODE_CAN_0,
+ NODE_CAN_1,
+ NODE_EXTERN,
+ NODE_APLL,
+ NODE_VPLL,
+ NODE_DPLL,
+ NODE_RPLL,
+ NODE_IOPLL,
+ NODE_DDR,
+ NODE_IPI_APU,
+ NODE_IPI_RPU_0,
+ NODE_GPU,
+ NODE_PCIE,
+ NODE_PCAP,
+ NODE_RTC,
+ NODE_LPD,
+ NODE_VCU,
+ NODE_IPI_RPU_1,
+ NODE_IPI_PL_0,
+ NODE_IPI_PL_1,
+ NODE_IPI_PL_2,
+ NODE_IPI_PL_3,
+ NODE_PL,
+ NODE_GEM_TSU,
+ NODE_SWDT_0,
+ NODE_SWDT_1,
+ NODE_CSU,
+ NODE_PJTAG,
+ NODE_TRACE,
+ NODE_TESTSCAN,
+ NODE_PMU,
+ NODE_MAX,
+};
+
+enum pm_pinctrl_config_param {
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ PM_PINCTRL_CONFIG_TRI_STATE,
+ PM_PINCTRL_CONFIG_MAX,
+};
+
+enum pm_pinctrl_slew_rate {
+ PM_PINCTRL_SLEW_RATE_FAST,
+ PM_PINCTRL_SLEW_RATE_SLOW,
+};
+
+enum pm_pinctrl_bias_status {
+ PM_PINCTRL_BIAS_DISABLE,
+ PM_PINCTRL_BIAS_ENABLE,
+};
+
+enum pm_pinctrl_pull_ctrl {
+ PM_PINCTRL_BIAS_PULL_DOWN,
+ PM_PINCTRL_BIAS_PULL_UP,
+};
+
+enum pm_pinctrl_schmitt_cmos {
+ PM_PINCTRL_INPUT_TYPE_CMOS,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT,
+};
+
+enum pm_pinctrl_drive_strength {
+ PM_PINCTRL_DRIVE_STRENGTH_2MA,
+ PM_PINCTRL_DRIVE_STRENGTH_4MA,
+ PM_PINCTRL_DRIVE_STRENGTH_8MA,
+ PM_PINCTRL_DRIVE_STRENGTH_12MA,
+};
+
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE,
+};
+
+enum rpu_oper_mode {
+ PM_RPU_MODE_LOCKSTEP,
+ PM_RPU_MODE_SPLIT,
+};
+
+enum rpu_boot_mem {
+ PM_RPU_BOOTMEM_LOVEC,
+ PM_RPU_BOOTMEM_HIVEC,
+};
+
+enum rpu_tcm_comb {
+ PM_RPU_TCM_SPLIT,
+ PM_RPU_TCM_COMB,
+};
+
+enum tap_delay_signal_type {
+ PM_TAPDELAY_NAND_DQS_IN,
+ PM_TAPDELAY_NAND_DQS_OUT,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_MAX,
+};
+
+enum tap_delay_bypass_ctrl {
+ PM_TAPDELAY_BYPASS_DISABLE,
+ PM_TAPDELAY_BYPASS_ENABLE,
+};
+
+enum sgmii_mode {
+ PM_SGMII_DISABLE,
+ PM_SGMII_ENABLE,
+};
+
+enum tap_delay_type {
+ PM_TAPDELAY_INPUT,
+ PM_TAPDELAY_OUTPUT,
+};
+
+enum dll_reset_type {
+ PM_DLL_RESET_ASSERT,
+ PM_DLL_RESET_RELEASE,
+ PM_DLL_RESET_PULSE,
+};
+
+enum pm_register_access_id {
+ CONFIG_REG_WRITE,
+ CONFIG_REG_READ,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -269,6 +561,8 @@ struct zynqmp_eemi_ops {
int (*get_chipid)(u32 *idcode, u32 *version);
int (*fpga_load)(const u64 address, const u32 size, const u32 flags);
int (*fpga_get_status)(u32 *value);
+ int (*fpga_read)(const u32 reg_numframes, const u64 phys_address,
+ u32 readback_type, u32 *value);
int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
int (*clock_enable)(u32 clock_id);
int (*clock_disable)(u32 clock_id);
@@ -294,11 +588,48 @@ struct zynqmp_eemi_ops {
const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
+ int (*sha_hash)(const u64 address, const u32 size, const u32 flags);
+ int (*rsa)(const u64 address, const u32 size, const u32 flags);
+ int (*request_suspend)(const u32 node,
+ const enum zynqmp_pm_request_ack ack,
+ const u32 latency,
+ const u32 state);
+ int (*force_powerdown)(const u32 target,
+ const enum zynqmp_pm_request_ack ack);
+ int (*request_wakeup)(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack);
+ int (*set_wakeup_source)(const u32 target,
+ const u32 wakeup_node,
+ const u32 enable);
+ int (*system_shutdown)(const u32 type, const u32 subtype);
+ int (*set_max_latency)(const u32 node, const u32 latency);
+ int (*set_configuration)(const u32 physical_addr);
+ int (*get_node_status)(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage);
+ int (*get_operating_characteristic)(const u32 node,
+ const enum zynqmp_pm_opchar_type
+ type, u32 *const result);
+ int (*pinctrl_request)(const u32 pin);
+ int (*pinctrl_release)(const u32 pin);
+ int (*pinctrl_get_function)(const u32 pin, u32 *id);
+ int (*pinctrl_set_function)(const u32 pin, const u32 id);
+ int (*pinctrl_get_config)(const u32 pin, const u32 param, u32 *value);
+ int (*pinctrl_set_config)(const u32 pin, const u32 param, u32 value);
+ int (*register_access)(u32 register_access_id, u32 address,
+ u32 mask, u32 value, u32 *out);
+ int (*aes)(const u64 address, u32 *out);
+ int (*efuse_access)(const u64 address, u32 *out);
+ int (*secure_image)(const u64 src_addr, u64 key_addr, u64 *dst);
+ int (*pdi_load)(const u32 src, const u64 address);
};
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 *ret_payload);
+int zynqmp_pm_ggs_init(struct kobject *parent_kobj);
+
#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
#else
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index e8ca62b2cb5b..d1f1c3ea3fc3 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -11,6 +11,9 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#define ENCRYPTED_KEY_LEN 64 /* Bytes */
+#define ENCRYPTED_IV_LEN 24 /* Bytes */
+
struct fpga_manager;
struct sg_table;
@@ -73,7 +76,6 @@ enum fpga_mgr_states {
#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3)
#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
-
/**
* struct fpga_image_info - information specific to a FPGA image
* @flags: boolean flags as defined above
@@ -82,6 +84,8 @@ enum fpga_mgr_states {
* @config_complete_timeout_us: maximum time for FPGA to switch to operating
* status in the write_complete op.
* @firmware_name: name of FPGA image firmware file
+ * @key: key value useful for Encrypted Bitstream loading to read the userkey
+ * @iv: iv (or) initialization vector is useful for Encrypted Bitstream loading
* @sgt: scatter/gather table containing FPGA image
* @buf: contiguous buffer containing FPGA image
* @count: size of buf
@@ -95,6 +99,8 @@ struct fpga_image_info {
u32 disable_timeout_us;
u32 config_complete_timeout_us;
char *firmware_name;
+ char key[ENCRYPTED_KEY_LEN];
+ char iv[ENCRYPTED_IV_LEN];
struct sg_table *sgt;
const char *buf;
size_t count;
@@ -114,6 +120,7 @@ struct fpga_image_info {
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
+ * @read: optional: read FPGA configuration information
* @fpga_remove: optional: Set FPGA into a specific state during driver remove
* @groups: optional attribute groups.
*
@@ -132,6 +139,7 @@ struct fpga_manager_ops {
int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt);
int (*write_complete)(struct fpga_manager *mgr,
struct fpga_image_info *info);
+ int (*read)(struct fpga_manager *mgr, struct seq_file *s);
void (*fpga_remove)(struct fpga_manager *mgr);
const struct attribute_group **groups;
};
@@ -142,6 +150,12 @@ struct fpga_manager_ops {
#define FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR BIT(2)
#define FPGA_MGR_STATUS_IP_PROTOCOL_ERR BIT(3)
#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
+#define FPGA_MGR_STATUS_SECURITY_ERR BIT(5)
+#define FPGA_MGR_STATUS_DEVICE_INIT_ERR BIT(6)
+#define FPGA_MGR_STATUS_SIGNAL_ERR BIT(7)
+#define FPGA_MGR_STATUS_HIGH_Z_STATE_ERR BIT(8)
+#define FPGA_MGR_STATUS_EOS_ERR BIT(9)
+#define FPGA_MGR_STATUS_FIRMWARE_REQ_ERR BIT(10)
/**
* struct fpga_compat_id - id for compatibility check
@@ -157,21 +171,31 @@ struct fpga_compat_id {
/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
+ * @flags: flags determines the type of Bitstream
+ * @key: key value useful for Encrypted Bitstream loading to read the userkey
+ * @iv: iv (or) initialization vector is useful for Encrypted Bitstream loading
* @dev: fpga manager device
* @ref_mutex: only allows one reference to fpga manager
* @state: state of fpga manager
* @compat_id: FPGA manager id for compatibility check.
* @mops: pointer to struct of fpga manager ops
* @priv: low level driver private date
+ * @dir: debugfs image directory
*/
struct fpga_manager {
const char *name;
+ long int flags;
+ char key[ENCRYPTED_KEY_LEN];
+ char iv[ENCRYPTED_IV_LEN];
struct device dev;
struct mutex ref_mutex;
enum fpga_mgr_states state;
struct fpga_compat_id *compat_id;
const struct fpga_manager_ops *mops;
void *priv;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ struct dentry *dir;
+#endif
};
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 50808643187f..b71bfdbdff8b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -314,6 +314,8 @@ enum rw_hint {
#define IOCB_SYNC (1 << 5)
#define IOCB_WRITE (1 << 6)
#define IOCB_NOWAIT (1 << 7)
+/* kiocb is a read or write operation submitted by fs/aio.c. */
+#define IOCB_AIO_RW (1 << 23)
struct kiocb {
struct file *ki_filp;
@@ -1361,28 +1363,28 @@ extern int send_sigurg(struct fown_struct *fown);
* sb->s_flags. Note that these mirror the equivalent MS_* flags where
* represented in both.
*/
-#define SB_RDONLY 1 /* Mount read-only */
-#define SB_NOSUID 2 /* Ignore suid and sgid bits */
-#define SB_NODEV 4 /* Disallow access to device special files */
-#define SB_NOEXEC 8 /* Disallow program execution */
-#define SB_SYNCHRONOUS 16 /* Writes are synced at once */
-#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */
-#define SB_DIRSYNC 128 /* Directory modifications are synchronous */
-#define SB_NOATIME 1024 /* Do not update access times. */
-#define SB_NODIRATIME 2048 /* Do not update directory access times */
-#define SB_SILENT 32768
-#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
-#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
-#define SB_I_VERSION (1<<23) /* Update inode I_version field */
-#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
+#define SB_RDONLY BIT(0) /* Mount read-only */
+#define SB_NOSUID BIT(1) /* Ignore suid and sgid bits */
+#define SB_NODEV BIT(2) /* Disallow access to device special files */
+#define SB_NOEXEC BIT(3) /* Disallow program execution */
+#define SB_SYNCHRONOUS BIT(4) /* Writes are synced at once */
+#define SB_MANDLOCK BIT(6) /* Allow mandatory locks on an FS */
+#define SB_DIRSYNC BIT(7) /* Directory modifications are synchronous */
+#define SB_NOATIME BIT(10) /* Do not update access times. */
+#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
+#define SB_SILENT BIT(15)
+#define SB_POSIXACL BIT(16) /* VFS does not apply the umask */
+#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
+#define SB_I_VERSION BIT(23) /* Update inode I_version field */
+#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
/* These sb flags are internal to the kernel */
-#define SB_SUBMOUNT (1<<26)
-#define SB_FORCE (1<<27)
-#define SB_NOSEC (1<<28)
-#define SB_BORN (1<<29)
-#define SB_ACTIVE (1<<30)
-#define SB_NOUSER (1<<31)
+#define SB_SUBMOUNT BIT(26)
+#define SB_FORCE BIT(27)
+#define SB_NOSEC BIT(28)
+#define SB_BORN BIT(29)
+#define SB_ACTIVE BIT(30)
+#define SB_NOUSER BIT(31)
/*
* Umount options
@@ -1405,6 +1407,8 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+
/* Possible states of 'frozen' field */
enum {
SB_UNFROZEN = 0, /* FS is unfrozen */
@@ -1742,6 +1746,7 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
extern void inode_init_owner(struct inode *inode, const struct inode *dir,
umode_t mode);
extern bool may_open_dev(const struct path *path);
+umode_t mode_strip_sgid(const struct inode *dir, umode_t mode);
/*
* VFS FS_IOC_FIEMAP helper definitions.
*/
@@ -1861,6 +1866,7 @@ struct file_operations {
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
+ bool may_pollfree;
} __randomize_layout;
struct inode_operations {
@@ -3486,7 +3492,7 @@ void simple_transaction_set(struct file *file, size_t n);
* All attributes contain a text representation of a numeric value
* that are accessed with the get() and set() functions.
*/
-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -3497,10 +3503,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = simple_attr_read, \
- .write = simple_attr_write, \
+ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
.llseek = generic_file_llseek, \
}
+#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
static inline __printf(1, 2)
void __simple_attr_check_format(const char *fmt, ...)
{
@@ -3515,6 +3527,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct ctl_table;
int proc_nr_files(struct ctl_table *table, int write,
@@ -3610,11 +3624,11 @@ extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
#if defined(CONFIG_IO_URING)
-extern struct sock *io_uring_get_socket(struct file *file);
+bool io_is_uring_fops(struct file *file);
#else
-static inline struct sock *io_uring_get_socket(struct file *file)
+static inline bool io_is_uring_fops(struct file *file)
{
- return NULL;
+ return false;
}
#endif
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9141f2263286..52c1c6feef89 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -712,7 +712,7 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
-static inline unsigned long get_lock_parent_ip(void)
+static __always_inline unsigned long get_lock_parent_ip(void)
{
unsigned long addr = CALLER_ADDR0;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 61f2f6ff9467..c89f8456f18d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -471,8 +471,8 @@ static inline int gfp_zonelist(gfp_t flags)
* There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to.
*
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
+ * For the case of non-NUMA systems the NODE_DATA() gets optimized to
+ * &contig_page_data at compile-time.
*/
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d5f9bbf8afa5..af73e8c815af 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -622,8 +622,13 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
+ struct kref ref;
+
+ unsigned int id; /* system unique id */
};
+void hiddev_free(struct kref *ref);
+
#define to_hid_device(pdev) \
container_of(pdev, struct hid_device, dev)
@@ -796,6 +801,7 @@ struct hid_driver {
* @raw_request: send raw report request to device (e.g. feature report)
* @output_report: send output report to device
* @idle: send idle request to device
+ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
*/
struct hid_ll_driver {
int (*start)(struct hid_device *hdev);
@@ -820,6 +826,8 @@ struct hid_ll_driver {
int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+
+ unsigned int max_buffer_size;
};
extern struct hid_ll_driver i2c_hid_ll_driver;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index ea5cdbd8c2c3..900f224bb640 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -276,4 +276,22 @@ static inline void copy_highpage(struct page *to, struct page *from)
#endif
+static inline void memcpy_from_page(char *to, struct page *page,
+ size_t offset, size_t len)
+{
+ char *from = kmap_atomic(page);
+
+ memcpy(to, from + offset, len);
+ kunmap_atomic(from);
+}
+
+static inline void memcpy_to_page(struct page *page, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_atomic(page);
+
+ memcpy(to + offset, from, len);
+ kunmap_atomic(to);
+}
+
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index e6eea45e1154..0b9d59807e5e 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -314,7 +314,8 @@ int host1x_client_unregister(struct host1x_client *client);
struct tegra_mipi_device;
-struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+ struct device_node *np);
void tegra_mipi_free(struct tegra_mipi_device *device);
int tegra_mipi_enable(struct tegra_mipi_device *device);
int tegra_mipi_disable(struct tegra_mipi_device *device);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 48be92aded5e..7bbcb64eeecf 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -196,6 +196,7 @@ enum hrtimer_base_type {
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
* expired
+ * @online: CPU is online from an hrtimers point of view
* @timer_waiters: A hrtimer_cancel() invocation waits for the timer
* callback to finish.
* @expires_next: absolute time of the next event, is required for remote
@@ -218,7 +219,8 @@ struct hrtimer_cpu_base {
unsigned int hres_active : 1,
in_hrtirq : 1,
hang_detected : 1,
- softirq_activated : 1;
+ softirq_activated : 1,
+ online : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int nr_events;
unsigned short nr_retries;
@@ -526,9 +528,9 @@ extern void sysrq_timer_list_show(void);
int hrtimers_prepare_cpu(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
-int hrtimers_dead_cpu(unsigned int cpu);
+int hrtimers_cpu_dying(unsigned int cpu);
#else
-#define hrtimers_dead_cpu NULL
+#define hrtimers_cpu_dying NULL
#endif
#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index cef70d6e1657..311dd8e92182 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
+#include <linux/page_ref.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <asm/pgtable.h>
@@ -127,8 +128,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd,
int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags);
+struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
+ int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
@@ -175,7 +176,7 @@ static inline void hugetlb_show_meminfo(void)
{
}
#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
-#define follow_huge_pmd(mm, addr, pmd, flags) NULL
+#define follow_huge_pmd_pte(vma, addr, flags) NULL
#define follow_huge_pud(mm, addr, pud, flags) NULL
#define follow_huge_pgd(mm, addr, pgd, flags) NULL
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
@@ -396,7 +397,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
if (!page_size_log)
return &default_hstate;
- return size_to_hstate(1UL << page_size_log);
+ if (page_size_log < BITS_PER_LONG)
+ return size_to_hstate(1UL << page_size_log);
+
+ return NULL;
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
@@ -744,4 +748,16 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
return ptl;
}
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return page_count(virt_to_page(pte)) > 1;
+}
+#else
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return false;
+}
+#endif
+
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index ac6e946b6767..bbfd934cab22 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
*/
#define idr_for_each_entry_ul(idr, entry, tmp, id) \
for (tmp = 0, id = 0; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/**
@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
* @id: Entry ID.
*
* Continue to iterate over entries, continuing after the current position.
+ * After normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
for (tmp = id; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/*
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index e147ea679467..91db78e67edc 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -52,6 +52,10 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_NONE:
case ARPHRD_RAWIP:
case ARPHRD_PIMREG:
+ /* PPP adds its l2 header automatically in ppp_start_xmit().
+ * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init().
+ */
+ case ARPHRD_PPP:
return false;
default:
return true;
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index ec7e4bd07f82..4182fa746d49 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -192,6 +192,8 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
+ const struct header_ops *header_ops_cache;
+
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*
@@ -211,6 +213,7 @@ struct team {
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
bool port_mtu_change_allowed;
+ bool notifier_ctx;
struct {
unsigned int count;
unsigned int interval; /* in ms */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 41a518336673..4e7e72f3da5b 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -626,6 +626,23 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
+/* This version of __vlan_get_protocol() also pulls mac header in skb->head */
+static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
+ __be16 type, int *depth)
+{
+ int maclen;
+
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ if (type) {
+ if (!pskb_may_pull(skb, maclen))
+ type = 0;
+ else if (depth)
+ *depth = maclen;
+ }
+ return type;
+}
+
/* A getter for the SKB protocol field which will handle VLAN tags consistently
* whether VLAN acceleration is enabled or not.
*/
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 1c37f17f7203..5d52478c22c4 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -17,7 +17,8 @@ extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_file_check(struct file *file, int mask);
extern void ima_post_create_tmpfile(struct inode *inode);
extern void ima_file_free(struct file *file);
-extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags);
extern int ima_load_data(enum kernel_load_data_id id);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
@@ -64,7 +65,8 @@ static inline void ima_file_free(struct file *file)
return;
}
-static inline int ima_file_mmap(struct file *file, unsigned long prot)
+static inline int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
{
return 0;
}
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index d70a914cba11..1e0dd0541b1e 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -29,6 +29,8 @@ struct icc_path *icc_get(struct device *dev, const int src_id,
const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
void icc_put(struct icc_path *path);
+int icc_enable(struct icc_path *path);
+int icc_disable(struct icc_path *path);
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
void icc_set_tag(struct icc_path *path, u32 tag);
@@ -50,6 +52,16 @@ static inline void icc_put(struct icc_path *path)
{
}
+static inline int icc_enable(struct icc_path *path)
+{
+ return 0;
+}
+
+static inline int icc_disable(struct icc_path *path)
+{
+ return 0;
+}
+
static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
{
return 0;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 89fc59dab57d..01517747214a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -61,6 +61,9 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
+ * later.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -74,6 +77,7 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
+#define IRQF_NO_AUTOEN 0x00080000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -598,12 +602,17 @@ struct tasklet_struct
unsigned long data;
};
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
-
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+#define DECLARE_TASKLET_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .func = _func, \
+}
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .func = _func, \
+}
enum
{
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 53b16f104081..74e05e7b67f5 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -32,6 +32,8 @@ struct vm_fault;
*
* IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
* written data and requires fdatasync to commit them to persistent storage.
+ * This needs to take into account metadata changes that *may* be made at IO
+ * completion, such as file size updates from direct IO.
*/
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 29bac5345563..6ca3fb2873d7 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -256,7 +256,7 @@ struct iommu_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
@@ -421,6 +421,8 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
+extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
@@ -428,6 +430,9 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot);
+extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -662,6 +667,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
+static inline int iommu_map_atomic(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ return -ENODEV;
+}
+
static inline size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
@@ -682,6 +694,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return 0;
}
+static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return 0;
+}
+
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{
}
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index fdc201d61460..d94db8d6df52 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -300,6 +300,13 @@ struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name);
+static inline void irqresource_disabled(struct resource *res, u32 irq)
+{
+ res->start = irq;
+ res->end = irq;
+ res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
+}
+
#ifdef CONFIG_IO_STRICT_DEVMEM
void revoke_devmem(struct resource *res);
#else
diff --git a/include/linux/iova.h b/include/linux/iova.h
index a0637abffee8..6c19b09e9663 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -132,7 +132,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-#if IS_ENABLED(CONFIG_IOMMU_IOVA)
+#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 5686711b0f40..905be6c0a527 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -134,6 +134,9 @@ struct gic_chip_data;
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
int gic_cpu_if_down(unsigned int gic_nr);
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
+
+void gic_set_cpu(unsigned int cpu, unsigned int irq);
void gic_cpu_save(struct gic_chip_data *gic);
void gic_cpu_restore(struct gic_chip_data *gic);
void gic_dist_save(struct gic_chip_data *gic);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 824d7a19dd66..2552f66a7a89 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -254,7 +254,7 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index b0e97e5de8ca..b60adc4210b5 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -415,7 +415,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
/**
- * struct jbd_inode - The jbd_inode type is the structure linking inodes in
+ * struct jbd2_inode - The jbd_inode type is the structure linking inodes in
* ordered mode present in a transaction so that we can sync them during commit.
*/
struct jbd2_inode {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 77c86a2236da..1fdb251947ed 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -321,6 +321,7 @@ extern long (*panic_blink)(int state);
__printf(1, 2)
void panic(const char *fmt, ...) __noreturn __cold;
void nmi_panic(struct pt_regs *regs, const char *msg);
+void check_panic_on_warn(const char *origin);
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 7ee2bb43b251..f7f20cf1bd3b 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -73,7 +73,7 @@ extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
* Number of interrupts per cpu, since bootup
*/
-static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
{
return kstat_cpu(cpu).irqs_sum;
}
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index fc4b0b10210f..d3e0f9dc2587 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -615,7 +615,7 @@ __kfifo_uint_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_to_user(fifo, to, len, copied) \
-__kfifo_uint_must_check_helper( \
+__kfifo_int_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index c7764d9e6f39..0be024dffa0c 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -318,6 +318,8 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
size_t *length, loff_t *ppos);
#endif
extern void wait_for_kprobe_optimizer(void);
+bool optprobe_queued_unopt(struct optimized_kprobe *op);
+bool kprobe_disarmed(struct kprobe *p);
#else
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 19e8344c51a8..ee7d57478a45 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -146,6 +146,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_PENDING_TIMER 2
#define KVM_REQ_UNHALT 3
+#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
@@ -502,6 +503,7 @@ struct kvm {
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
+ bool vm_bugged;
};
#define kvm_err(fmt, ...) \
@@ -530,6 +532,31 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+static inline void kvm_vm_bugged(struct kvm *kvm)
+{
+ kvm->vm_bugged = true;
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED);
+}
+
+#define KVM_BUG(cond, kvm, fmt...) \
+({ \
+ int __ret = (cond); \
+ \
+ if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+#define KVM_BUG_ON(cond, kvm) \
+({ \
+ int __ret = (cond); \
+ \
+ if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
@@ -790,7 +817,6 @@ void kvm_reload_remote_mmus(struct kvm *kvm);
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
unsigned long *vcpu_bitmap, cpumask_var_t tmp);
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -929,7 +955,7 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}
-static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return false;
}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2e448d65a04c..c0ecda7e7f08 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -261,6 +261,10 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
@@ -294,7 +298,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
diff --git a/include/linux/list.h b/include/linux/list.h
index ce19c6b632a5..231ff089f7d1 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -269,6 +269,24 @@ static inline int list_empty(const struct list_head *head)
}
/**
+ * list_del_init_careful - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ *
+ * This is the same as list_del_init(), except designed to be used
+ * together with list_empty_careful() in a way to guarantee ordering
+ * of other memory operations.
+ *
+ * Any memory operations done before a list_del_init_careful() are
+ * guaranteed to be visible after a list_empty_careful() test.
+ */
+static inline void list_del_init_careful(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ entry->prev = entry;
+ smp_store_release(&entry->next, entry);
+}
+
+/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
@@ -283,7 +301,7 @@ static inline int list_empty(const struct list_head *head)
*/
static inline int list_empty_careful(const struct list_head *head)
{
- struct list_head *next = head->next;
+ struct list_head *next = smp_load_acquire(&head->next);
return (next == head) && (next == head->prev);
}
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f452521f2e05..410243038add 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -405,6 +405,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
} while (0)
+#define lockdep_assert_none_held_once() do { \
+ WARN_ON_ONCE(debug_locks && current->lockdep_depth); \
+ } while (0)
+
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
@@ -482,6 +486,7 @@ struct lockdep_map { };
#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
+#define lockdep_assert_none_held_once() do { } while (0)
#define lockdep_recursing(tsk) (0)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index a21dc5413653..0f4897e97c70 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -498,6 +498,12 @@
* simple integer value. When @arg represents a user space pointer, it
* should never be used by the security module.
* Return 0 if permission is granted.
+ * @file_ioctl_compat:
+ * @file contains the file structure.
+ * @cmd contains the operation to perform.
+ * @arg contains the operational arguments.
+ * Check permission for a compat ioctl operation on @file.
+ * Return 0 if permission is granted.
* @mmap_addr :
* Check permissions for a mmap operation at @addr.
* @addr contains virtual address that will be used for the operation.
@@ -1602,6 +1608,8 @@ union security_list_options {
void (*file_free_security)(struct file *file);
int (*file_ioctl)(struct file *file, unsigned int cmd,
unsigned long arg);
+ int (*file_ioctl_compat)(struct file *file, unsigned int cmd,
+ unsigned long arg);
int (*mmap_addr)(unsigned long addr);
int (*mmap_file)(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags);
@@ -1907,6 +1915,7 @@ struct security_hook_heads {
struct hlist_head file_alloc_security;
struct hlist_head file_free_security;
struct hlist_head file_ioctl;
+ struct hlist_head file_ioctl_compat;
struct hlist_head mmap_addr;
struct hlist_head mmap_file;
struct hlist_head file_mprotect;
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
index 9542b41eacfd..f86cc6c13511 100644
--- a/include/linux/mailbox/zynqmp-ipi-message.h
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -3,13 +3,16 @@
#ifndef _LINUX_ZYNQMP_IPI_MESSAGE_H_
#define _LINUX_ZYNQMP_IPI_MESSAGE_H_
+/* IPI buffer MAX length */
+#define IPI_BUF_LEN_MAX 32U
+
/**
* struct zynqmp_ipi_message - ZynqMP IPI message structure
* @len: Length of message
* @data: message payload
*
* This is the structure for data used in mbox_send_message
- * the maximum length of data buffer is fixed to 12 bytes.
+ * the maximum length of data buffer is fixed to 32 bytes.
* Client is supposed to be aware of this.
*/
struct zynqmp_ipi_message {
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 20f1e3ff6013..591bc4cefe1d 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -10,16 +10,29 @@
struct mb_cache;
+/* Cache entry flags */
+enum {
+ MBE_REFERENCED_B = 0,
+ MBE_REUSABLE_B
+};
+
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
- /* Hash table list - protected by hash chain bitlock */
+ /*
+ * Hash table list - protected by hash chain bitlock. The entry is
+ * guaranteed to be hashed while e_refcnt > 0.
+ */
struct hlist_bl_node e_hash_list;
+ /*
+ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
+ * While refcount > 0, the entry is guaranteed to stay in the hash and
+ * e.g. mb_cache_entry_try_delete() will fail.
+ */
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
- u32 e_referenced:1;
- u32 e_reusable:1;
+ unsigned long e_flags;
/* User provided value - stable during lifetime of the entry */
u64 e_value;
};
@@ -29,16 +42,24 @@ void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
u64 value, bool reusable);
-void __mb_cache_entry_free(struct mb_cache_entry *entry);
-static inline int mb_cache_entry_put(struct mb_cache *cache,
- struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
+static inline void mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
{
- if (!atomic_dec_and_test(&entry->e_refcnt))
- return 0;
- __mb_cache_entry_free(entry);
- return 1;
+ unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
+
+ if (cnt > 0) {
+ if (cnt <= 2)
+ wake_up_var(&entry->e_refcnt);
+ return;
+ }
+ __mb_cache_entry_free(cache, entry);
}
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value);
void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
u64 value);
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 71dd10a3d928..01fd26170e6b 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
struct mcb_device {
struct device dev;
struct mcb_bus *bus;
- bool is_added;
struct mcb_driver *driver;
u16 id;
int inst;
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 0f1f784de80e..2ca46ac897ad 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -40,6 +40,9 @@ struct mdio_device {
struct reset_control *reset_ctrl;
unsigned int reset_assert_delay;
unsigned int reset_deassert_delay;
+
+ /* private data pointer for use by MDIO devices */
+ void *priv;
};
#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
diff --git a/drivers/net/phy/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
index 751dab281f57..751dab281f57 100644
--- a/drivers/net/phy/mdio-i2c.h
+++ b/include/linux/mdio/mdio-i2c.h
diff --git a/drivers/net/phy/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h
index b1f5ccb4ad9c..b1f5ccb4ad9c 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/include/linux/mdio/mdio-xgene.h
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
index 69632c1b07bd..ae3e7a5c5219 100644
--- a/include/linux/mfd/t7l66xb.h
+++ b/include/linux/mfd/t7l66xb.h
@@ -12,7 +12,6 @@
struct t7l66xb_platform_data {
int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2b65ffb3bd76..18fd0a030584 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -56,6 +56,8 @@
#include <linux/ptp_clock_kernel.h>
#include <net/devlink.h>
+#define MLX5_ADEV_NAME "mlx5_core"
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
@@ -904,7 +906,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
struct mlx5_async_ctx {
struct mlx5_core_dev *dev;
atomic_t num_inflight;
- struct wait_queue_head wait;
+ struct completion inflight_done;
};
struct mlx5_async_work;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index deca67353c6f..10313557c10c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -37,6 +37,8 @@ struct user_struct;
struct writeback_control;
struct bdi_writeback;
+extern int sysctl_page_lock_unfairness;
+
void init_mm_internals(void);
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index a0e1cf1bef4e..68880c2b4fe0 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -397,6 +397,7 @@ struct mmc_host {
unsigned int use_blk_mq:1; /* use blk-mq */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge:1; /* merging can be used */
+ unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -534,6 +535,8 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
#endif
int mmc_regulator_get_supply(struct mmc_host *mmc);
+int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
+void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
static inline int mmc_card_is_removable(struct mmc_host *host)
{
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 897a87c4c827..77192c3fc544 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -434,7 +434,7 @@ static inline bool mmc_op_multi(u32 opcode)
#define MMC_SECURE_TRIM1_ARG 0x80000001
#define MMC_SECURE_TRIM2_ARG 0x80008000
#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
+#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003
#define mmc_driver_type_mask(n) (1 << (n))
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 5d0767cb424a..4ed52879ce55 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -38,7 +38,7 @@ void dump_mm(const struct mm_struct *mm);
} \
} while (0)
#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
- static bool __section(".data.once") __warned; \
+ static bool __section(.data.once) __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 4c56404e53a7..8265b99d6d55 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -672,9 +672,7 @@ struct x86_cpu_id {
__u16 steppings;
};
-#define X86_FEATURE_MATCH(x) \
- { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x }
-
+/* Wild cards for x86_cpu_id::vendor, family, model and feature */
#define X86_VENDOR_ANY 0xffff
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 249e8d9bfbcd..375aa7c58899 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -72,7 +72,8 @@ struct mtd_oob_ops {
};
#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
+#define MTD_MAX_ECCPOS_ENTRIES_LARGE 1260
+
/**
* struct mtd_oob_region - oob region definition
* @offset: region offset
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index 339ac798568e..0837ab74b2e7 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -161,8 +161,10 @@ struct onfi_ext_param_page {
* @tR: Page read time
* @tCCS: Change column setup time
* @async_timing_mode: Supported asynchronous timing mode
+ * @src_sync_timing_mode: Supported synchronous timing mode
* @vendor_revision: Vendor specific revision number
* @vendor: Vendor specific data
+ * @jedec_id: Jedec ID of nand flash device
*/
struct onfi_params {
int version;
@@ -171,8 +173,10 @@ struct onfi_params {
u16 tR;
u16 tCCS;
u16 async_timing_mode;
+ u16 src_sync_timing_mode;
u16 vendor_revision;
u8 vendor[88];
+ u8 jedec_id;
};
#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 4ab9bccfcde0..c1d601bc9479 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -438,6 +438,7 @@ struct nand_ecc_ctrl {
* @tWHR_min: WE# high to RE# low
* @tWP_min: WE# pulse width
* @tWW_min: WP# transition to WE# low
+ * @mode: sdr timing mode value
*/
struct nand_sdr_timings {
u64 tBERS_max;
@@ -478,6 +479,7 @@ struct nand_sdr_timings {
u32 tWHR_min;
u32 tWP_min;
u32 tWW_min;
+ u8 mode;
};
/**
@@ -1405,4 +1407,13 @@ static inline void *nand_get_data_buf(struct nand_chip *chip)
return chip->data_buf;
}
+/* return the supported synchronous timing mode. */
+static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
+{
+ if (!chip->parameters.onfi)
+ return ONFI_TIMING_MODE_UNKNOWN;
+
+ return le16_to_cpu(chip->parameters.onfi->src_sync_timing_mode);
+}
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index fc0b4b19c900..c5d674c09f89 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -26,6 +26,7 @@
#define SNOR_MFR_SPANSION CFI_MFR_AMD
#define SNOR_MFR_SST CFI_MFR_SST
#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */
+#define SNOR_MFR_ISSI CFI_MFR_PMC
/*
* Note on opcode nomenclature: some opcodes have a format like
@@ -66,6 +67,7 @@
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+#define SPINOR_OP_WRCR 0x81 /* Write Configuration register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -99,6 +101,8 @@
#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
+#define GLOBAL_BLKPROT_UNLK 0x98 /* Clear global write protection bits */
+
/* Used for S3AN flashes only */
#define SPINOR_OP_XSE 0x50 /* Sector erase */
#define SPINOR_OP_XPP 0x82 /* Page program */
@@ -107,19 +111,22 @@
#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
#define XSR_RDY BIT(7) /* Ready */
-
/* Used for Macronix and Winbond flashes. */
#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17 /* Bank register write */
+#define SPINOR_OP_BRRD 0x16 /* Bank register read */
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
/* Used for Micron flashes only. */
#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+/* For Micron flashes only */
+#define SPINOR_VCR_OCTAL_DDR 0xE7 /* VCR BYTE0 value for Octal DDR mode */
+
/* Status Register bits. */
#define SR_WIP BIT(0) /* Write in progress */
#define SR_WEL BIT(1) /* Write enable latch */
@@ -127,8 +134,17 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_BP_BIT_OFFSET 2 /* Offset to Block protect 0 */
+#define SR_BP_BIT_MASK (SR_BP2 | SR_BP1 | SR_BP0)
+
#define SR_TB BIT(5) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
+#define SR_BP3 0x40
+/* Bit to determine whether protection starts from top or bottom */
+#define SR_BP_TB 0x20
+#define BP_BITS_FROM_SR(sr) (((sr) & SR_BP_BIT_MASK) >> SR_BP_BIT_OFFSET)
+#define M25P_MAX_LOCKABLE_SECTORS 64
+
/* Spansion/Cypress specific status bits */
#define SR_E_ERR BIT(5)
#define SR_P_ERR BIT(6)
@@ -147,6 +163,16 @@
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
+/* Extended/Bank Address Register bits */
+#define EAR_SEGMENT_MASK 0x7 /* 128 Mb segment mask */
+
+enum read_mode {
+ SPI_NOR_NORMAL = 0,
+ SPI_NOR_FAST,
+ SPI_NOR_DUAL,
+ SPI_NOR_QUAD,
+};
+
/* Status Register 2 bits. */
#define SR2_QUAD_EN_BIT7 BIT(7)
@@ -225,6 +251,7 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
return spi_nor_get_protocol_data_nbits(proto);
}
+#define SPI_NOR_MAX_ID_LEN 6
enum spi_nor_ops {
SPI_NOR_OPS_READ = 0,
SPI_NOR_OPS_WRITE,
@@ -243,6 +270,7 @@ enum spi_nor_option_flags {
SNOR_F_4B_OPCODES = BIT(6),
SNOR_F_HAS_4BAIT = BIT(7),
SNOR_F_HAS_LOCK = BIT(8),
+ SNOR_F_BROKEN_OCTAL_DDR = BIT(9),
};
/**
@@ -575,6 +603,7 @@ struct spi_nor {
struct spi_mem *spimem;
u8 *bouncebuf;
size_t bouncebuf_size;
+ struct spi_device *spi;
const struct flash_info *info;
u32 page_size;
u8 addr_width;
@@ -582,11 +611,21 @@ struct spi_nor {
u8 read_opcode;
u8 read_dummy;
u8 program_opcode;
+ enum read_mode flash_read;
+ u32 jedec_id;
+ u16 curbank;
+ u16 n_sectors;
+ u32 sector_size;
enum spi_nor_protocol read_proto;
enum spi_nor_protocol write_proto;
enum spi_nor_protocol reg_proto;
bool sst_write_second;
+ bool shift;
+ bool isparallel;
+ bool isstacked;
u32 flags;
+ bool is_lock;
+ u8 device_id[SPI_NOR_MAX_ID_LEN];
int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
@@ -663,9 +702,20 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
/**
+ * spi_nor_shutdown() - prepare for reboot
+ * @nor: the spi_nor structure
+ *
+ * The drivers can use this fuction to get the address back to
+ * 0 as will be required for a ROM boot.
+ */
+void spi_nor_shutdown(struct spi_mem *mem);
+
+/**
* spi_nor_restore_addr_mode() - restore the status of SPI NOR
* @nor: the spi_nor structure
*/
void spi_nor_restore(struct spi_nor *nor);
+int spi_nor_wait_till_ready(struct spi_nor *nor);
+
#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 397a08ade6a2..7fe7b87a3ded 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -60,6 +60,7 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
+extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c70b79dba1dc..a6bb64dccb88 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -160,31 +160,38 @@ static inline bool dev_xmit_complete(int rc)
* (unsigned long) so they can be read and written atomically.
*/
+#define NET_DEV_STAT(FIELD) \
+ union { \
+ unsigned long FIELD; \
+ atomic_long_t __##FIELD; \
+ }
+
struct net_device_stats {
- unsigned long rx_packets;
- unsigned long tx_packets;
- unsigned long rx_bytes;
- unsigned long tx_bytes;
- unsigned long rx_errors;
- unsigned long tx_errors;
- unsigned long rx_dropped;
- unsigned long tx_dropped;
- unsigned long multicast;
- unsigned long collisions;
- unsigned long rx_length_errors;
- unsigned long rx_over_errors;
- unsigned long rx_crc_errors;
- unsigned long rx_frame_errors;
- unsigned long rx_fifo_errors;
- unsigned long rx_missed_errors;
- unsigned long tx_aborted_errors;
- unsigned long tx_carrier_errors;
- unsigned long tx_fifo_errors;
- unsigned long tx_heartbeat_errors;
- unsigned long tx_window_errors;
- unsigned long rx_compressed;
- unsigned long tx_compressed;
+ NET_DEV_STAT(rx_packets);
+ NET_DEV_STAT(tx_packets);
+ NET_DEV_STAT(rx_bytes);
+ NET_DEV_STAT(tx_bytes);
+ NET_DEV_STAT(rx_errors);
+ NET_DEV_STAT(tx_errors);
+ NET_DEV_STAT(rx_dropped);
+ NET_DEV_STAT(tx_dropped);
+ NET_DEV_STAT(multicast);
+ NET_DEV_STAT(collisions);
+ NET_DEV_STAT(rx_length_errors);
+ NET_DEV_STAT(rx_over_errors);
+ NET_DEV_STAT(rx_crc_errors);
+ NET_DEV_STAT(rx_frame_errors);
+ NET_DEV_STAT(rx_fifo_errors);
+ NET_DEV_STAT(rx_missed_errors);
+ NET_DEV_STAT(tx_aborted_errors);
+ NET_DEV_STAT(tx_carrier_errors);
+ NET_DEV_STAT(tx_fifo_errors);
+ NET_DEV_STAT(tx_heartbeat_errors);
+ NET_DEV_STAT(tx_window_errors);
+ NET_DEV_STAT(rx_compressed);
+ NET_DEV_STAT(tx_compressed);
};
+#undef NET_DEV_STAT
#include <linux/cache.h>
@@ -258,9 +265,11 @@ struct hh_cache {
* relationship HH alignment <= LL alignment.
*/
#define LL_RESERVED_SPACE(dev) \
- ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
- ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
@@ -723,8 +732,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
/* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
- if (table->ents[index] != val)
- table->ents[index] = val;
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
+ * here, and another one in get_rps_cpu().
+ */
+ if (READ_ONCE(table->ents[index]) != val)
+ WRITE_ONCE(table->ents[index], val);
}
}
@@ -1683,7 +1695,6 @@ enum netdev_ml_priv_type {
* @tipc_ptr: TIPC specific data
* @atalk_ptr: AppleTalk link
* @ip_ptr: IPv4 specific data
- * @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
@@ -1929,9 +1940,6 @@ struct net_device {
void *atalk_ptr;
#endif
struct in_device __rcu *ip_ptr;
-#if IS_ENABLED(CONFIG_DECNET)
- struct dn_dev __rcu *dn_ptr;
-#endif
struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
@@ -1977,7 +1985,7 @@ struct net_device {
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
@@ -3755,7 +3763,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
return NET_RX_DROP;
}
- skb_scrub_packet(skb, true);
+ skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
skb->priority = 0;
return 0;
}
@@ -4181,6 +4189,24 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
+static inline void
+__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len)
+{
+ memcpy(dev->dev_addr, addr, len);
+}
+
+static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, dev->addr_len);
+}
+
+static inline void
+dev_addr_mod(struct net_device *dev, unsigned int offset,
+ const u8 *addr, size_t len)
+{
+ memcpy(&dev->dev_addr[offset], addr, len);
+}
+
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
@@ -4936,4 +4962,10 @@ do { \
extern struct net_device *blackhole_netdev;
+/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
+#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+#define DEV_STATS_ADD(DEV, FIELD, VAL) \
+ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
+#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
+
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 4c0e6539effd..92d916d24c76 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -241,11 +241,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
#endif
break;
-#if IS_ENABLED(CONFIG_DECNET)
- case NFPROTO_DECNET:
- hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
- break;
-#endif
default:
WARN_ON_ONCE(1);
break;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 32658749e9db..35342fb48866 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -188,6 +188,8 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+ /* Cancel ongoing garbage collectors before destroying the set*/
+ void (*cancel_gc)(struct ip_set *set);
/* Region-locking is used */
bool region_lock;
};
@@ -236,6 +238,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type);
/* A generic IP set */
struct ip_set {
+ /* For call_cru in destroy */
+ struct rcu_head rcu;
/* The name of the set */
char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 625f491b95de..fb31312825ae 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index f6267e2883f2..0518ca72b761 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -39,7 +39,6 @@ struct nfnetlink_subsystem {
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb,
enum nfnl_abort_action action);
- void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};
@@ -57,6 +56,33 @@ static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
return subsys << 8 | msg_type;
}
+static inline void nfnl_fill_hdr(struct nlmsghdr *nlh, u8 family, u8 version,
+ __be16 res_id)
+{
+ struct nfgenmsg *nfmsg;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = version;
+ nfmsg->res_id = res_id;
+}
+
+static inline struct nlmsghdr *nfnl_msg_put(struct sk_buff *skb, u32 portid,
+ u32 seq, int type, int flags,
+ u8 family, u8 version,
+ __be16 res_id)
+{
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags);
+ if (!nlh)
+ return NULL;
+
+ nfnl_fill_hdr(nlh, family, version, res_id);
+
+ return nlh;
+}
+
void nfnl_lock(__u8 subsys_id);
void nfnl_unlock(__u8 subsys_id);
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index db472c9cd8e9..f0d846df3a42 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -94,10 +94,6 @@ struct ebt_table {
struct ebt_replace_kernel *table;
unsigned int valid_hooks;
rwlock_t lock;
- /* e.g. could be the table explicitly only allows certain
- * matches, targets, ... 0 == let it in */
- int (*check)(const struct ebt_table_info *info,
- unsigned int valid_hooks);
/* the data used by the kernel */
struct ebt_table_info *private;
struct module *me;
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index 8dddfb151f00..a5f7bef1b3a4 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -7,14 +7,6 @@
/* in/out/forward only */
#define NF_ARP_NUMHOOKS 3
-/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
-#define NF_DN_NUMHOOKS 7
-
-#if IS_ENABLED(CONFIG_DECNET)
-/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS NF_DN_NUMHOOKS
-#else
#define NF_MAX_HOOKS NF_INET_NUMHOOKS
-#endif
#endif
diff --git a/include/linux/nls.h b/include/linux/nls.h
index 499e486b3722..e0bf8367b274 100644
--- a/include/linux/nls.h
+++ b/include/linux/nls.h
@@ -47,7 +47,7 @@ enum utf16_endian {
/* nls_base.c */
extern int __register_nls(struct nls_table *, struct module *);
extern int unregister_nls(struct nls_table *);
-extern struct nls_table *load_nls(char *);
+extern struct nls_table *load_nls(const char *charset);
extern void unload_nls(struct nls_table *);
extern struct nls_table *load_nls_default(void);
#define register_nls(nls) __register_nls((nls), THIS_MODULE)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 9003e29cde46..6cb593d9ed08 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -122,6 +122,8 @@ int watchdog_nmi_probe(void);
int watchdog_nmi_enable(unsigned int cpu);
void watchdog_nmi_disable(unsigned int cpu);
+void lockup_detector_reconfigure(void);
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
@@ -195,7 +197,7 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh);
#endif
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
- defined(CONFIG_HARDLOCKUP_DETECTOR)
+ defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
void watchdog_update_hrtimer_threshold(u64 period);
#else
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index 0c5ef54fd416..207ef2a20e48 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -9,6 +9,10 @@
struct task_struct;
+#ifndef barrier_nospec
+# define barrier_nospec() do { } while (0)
+#endif
+
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 959e0bd9a913..73364ae91689 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -114,8 +114,9 @@ struct nvme_tcp_icresp_pdu {
struct nvme_tcp_term_pdu {
struct nvme_tcp_hdr hdr;
__le16 fes;
- __le32 fei;
- __u8 rsvd[8];
+ __le16 feil;
+ __le16 feiu;
+ __u8 rsvd[10];
};
/**
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index a260cd754f28..dd2801c28b99 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -7,6 +7,7 @@
#ifndef _LINUX_NVME_H
#define _LINUX_NVME_H
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/uuid.h>
@@ -107,8 +108,22 @@ enum {
NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
- NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
+ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
+ NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
+ NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
+ NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
+ * Location
+ */
+ NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
+ NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
+ NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
+ NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
+ * Buffer Size
+ */
+ NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
+ * Write Throughput
+ */
NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
};
@@ -295,6 +310,14 @@ enum {
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
+ NVME_CTRL_CTRATT_128_ID = 1 << 0,
+ NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
+ NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
+ NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
+ NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
+ NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
+ NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
+ NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
};
struct nvme_lbaf {
@@ -352,6 +375,9 @@ enum {
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
NVME_ID_CNS_CTRL_LIST = 0x13,
+ NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
+ NVME_ID_CNS_NS_GRANULARITY = 0x16,
+ NVME_ID_CNS_UUID_LIST = 0x17,
};
enum {
@@ -409,7 +435,8 @@ struct nvme_smart_log {
__u8 avail_spare;
__u8 spare_thresh;
__u8 percent_used;
- __u8 rsvd6[26];
+ __u8 endu_grp_crit_warn_sumry;
+ __u8 rsvd7[25];
__u8 data_units_read[16];
__u8 data_units_written[16];
__u8 host_reads[16];
@@ -423,7 +450,11 @@ struct nvme_smart_log {
__le32 warning_temp_time;
__le32 critical_comp_time;
__le16 temp_sensor[8];
- __u8 rsvd216[296];
+ __le32 thm_temp1_trans_count;
+ __le32 thm_temp2_trans_count;
+ __le32 thm_temp1_total_time;
+ __le32 thm_temp2_total_time;
+ __u8 rsvd232[280];
};
struct nvme_fw_slot_info_log {
@@ -439,7 +470,8 @@ enum {
NVME_CMD_EFFECTS_NCC = 1 << 2,
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
- NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+ NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
+ NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
};
struct nvme_effects_log {
@@ -491,6 +523,10 @@ enum {
};
enum {
+ NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
+};
+
+enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
NVME_AER_NOTICE_ANA = 0x03,
@@ -563,6 +599,7 @@ enum nvme_opcode {
nvme_cmd_compare = 0x05,
nvme_cmd_write_zeroes = 0x08,
nvme_cmd_dsm = 0x09,
+ nvme_cmd_verify = 0x0c,
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
@@ -806,10 +843,14 @@ enum nvme_admin_opcode {
nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_dev_self_test = 0x14,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_directive_send = 0x19,
nvme_admin_directive_recv = 0x1a,
+ nvme_admin_virtual_mgmt = 0x1c,
+ nvme_admin_nvme_mi_send = 0x1d,
+ nvme_admin_nvme_mi_recv = 0x1e,
nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -873,6 +914,7 @@ enum {
NVME_FEAT_PLM_CONFIG = 0x13,
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_HOST_BEHAVIOR = 0x16,
+ NVME_FEAT_SANITIZE = 0x17,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -883,6 +925,10 @@ enum {
NVME_LOG_FW_SLOT = 0x03,
NVME_LOG_CHANGED_NS = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_DEVICE_SELF_TEST = 0x06,
+ NVME_LOG_TELEMETRY_HOST = 0x07,
+ NVME_LOG_TELEMETRY_CTRL = 0x08,
+ NVME_LOG_ENDURANCE_GROUP = 0x09,
NVME_LOG_ANA = 0x0c,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
@@ -1290,7 +1336,11 @@ enum {
NVME_SC_SGL_INVALID_OFFSET = 0x16,
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+ NVME_SC_SANITIZE_FAILED = 0x1C,
+ NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
+
NVME_SC_NS_WRITE_PROTECTED = 0x20,
+ NVME_SC_CMD_INTERRUPTED = 0x21,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
@@ -1328,6 +1378,8 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
+ NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
+ NVME_SC_PMR_SAN_PROHIBITED = 0x123,
/*
* I/O Command Set Specific - NVM commands:
diff --git a/include/linux/of.h b/include/linux/of.h
index a7621e2b440a..6f6e68bc7c5a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -351,6 +351,8 @@ extern const void *of_get_property(const struct device_node *node,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
+extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -422,12 +424,14 @@ extern int of_detach_node(struct device_node *);
* @sz: number of array elements to read
*
* Search for a property in a device node and read 8-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
+ * it.
*
* dts entry of array should be like:
- * property = /bits/ 8 <0x50 0x60 0x70>;
+ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
*
* The out_values is modified only if a valid u8 value can be decoded.
*/
@@ -452,12 +456,14 @@ static inline int of_property_read_u8_array(const struct device_node *np,
* @sz: number of array elements to read
*
* Search for a property in a device node and read 16-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
+ * it.
*
* dts entry of array should be like:
- * property = /bits/ 16 <0x5000 0x6000 0x7000>;
+ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
*
* The out_values is modified only if a valid u16 value can be decoded.
*/
@@ -483,7 +489,9 @@ static inline int of_property_read_u16_array(const struct device_node *np,
* @sz: number of array elements to read
*
* Search for a property in a device node and read 32-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
@@ -511,7 +519,9 @@ static inline int of_property_read_u32_array(const struct device_node *np,
* @sz: number of array elements to read
*
* Search for a property in a device node and read 64-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
@@ -552,7 +562,7 @@ bool of_console_check(struct device_node *dn, char *name, int index);
extern int of_cpu_node_to_id(struct device_node *np);
-int of_map_rid(struct device_node *np, u32 rid,
+int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
@@ -765,6 +775,12 @@ static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
return NULL;
}
+static inline struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index)
+{
+ return NULL;
+}
+
static inline int of_n_addr_cells(struct device_node *np)
{
return 0;
@@ -970,7 +986,7 @@ static inline int of_cpu_node_to_id(struct device_node *np)
return -ENODEV;
}
-static inline int of_map_rid(struct device_node *np, u32 rid,
+static inline int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
@@ -1038,7 +1054,9 @@ static inline bool of_node_is_type(const struct device_node *np, const char *typ
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u8 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u8 and -ENODATA if the
* property does not have a value.
*/
@@ -1055,7 +1073,9 @@ static inline int of_property_count_u8_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u16 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u16 and -ENODATA if the
* property does not have a value.
*/
@@ -1072,7 +1092,9 @@ static inline int of_property_count_u16_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u32 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u32 and -ENODATA if the
* property does not have a value.
*/
@@ -1089,7 +1111,9 @@ static inline int of_property_count_u32_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u64 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u64 and -ENODATA if the
* property does not have a value.
*/
@@ -1110,7 +1134,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
* Search for a property in a device tree node and retrieve a list of
* terminated string values (pointer to data, not a copy) in that property.
*
- * If @out_strs is NULL, the number of strings in the property is returned.
+ * Return: If @out_strs is NULL, the number of strings in the property is returned.
*/
static inline int of_property_read_string_array(const struct device_node *np,
const char *propname, const char **out_strs,
@@ -1126,10 +1150,11 @@ static inline int of_property_read_string_array(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device tree node and retrieve the number of null
- * terminated string contain in it. Returns the number of strings on
- * success, -EINVAL if the property does not exist, -ENODATA if property
- * does not have a value, and -EILSEQ if the string is not null-terminated
- * within the length of the property data.
+ * terminated string contain in it.
+ *
+ * Return: The number of strings on success, -EINVAL if the property does not
+ * exist, -ENODATA if property does not have a value, and -EILSEQ if the string
+ * is not null-terminated within the length of the property data.
*/
static inline int of_property_count_strings(const struct device_node *np,
const char *propname)
@@ -1149,7 +1174,8 @@ static inline int of_property_count_strings(const struct device_node *np,
* Search for a property in a device tree node and retrieve a null
* terminated string value (pointer to data, not a copy) in the list of strings
* contained in that property.
- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if
* property does not have a value, and -EILSEQ if the string is not
* null-terminated within the length of the property data.
*
@@ -1169,7 +1195,8 @@ static inline int of_property_read_string_index(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node.
- * Returns true if the property exists false otherwise.
+ *
+ * Return: true if the property exists false otherwise.
*/
static inline bool of_property_read_bool(const struct device_node *np,
const char *propname)
@@ -1347,6 +1374,8 @@ enum of_reconfig_change {
};
#ifdef CONFIG_OF_DYNAMIC
+#include <linux/slab.h>
+
extern int of_reconfig_notifier_register(struct notifier_block *);
extern int of_reconfig_notifier_unregister(struct notifier_block *);
extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
@@ -1390,6 +1419,23 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
+
+struct device_node *of_changeset_create_device_nodev(
+ struct of_changeset *ocs, struct device_node *parent,
+ const char *fmt, va_list vargs);
+
+__printf(3, 4) struct device_node *
+of_changeset_create_device_node(struct of_changeset *ocs,
+ struct device_node *parent, const char *fmt, ...);
+
+int __of_changeset_add_update_property_copy(struct of_changeset *ocs,
+ struct device_node *np, const char *name, const void *value,
+ int length, bool update);
+
+int __of_changeset_add_update_property_string_list(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char **strs, int count, bool update);
+
#else /* CONFIG_OF_DYNAMIC */
static inline int of_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -1409,13 +1455,327 @@ static inline int of_reconfig_get_state_change(unsigned long action,
{
return -EINVAL;
}
+
+static inline struct device_node *of_changeset_create_device_nodev(
+ struct of_changeset *ocs, struct device_node *parent,
+ const char *fmt, va_list vargs)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline __printf(3, 4) struct device_node *
+of_changeset_create_device_node(struct of_changeset *ocs,
+ struct device_node *parent, const char *fmt, ...)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int __of_changeset_add_update_property_copy(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const void *value, int length, bool update)
+{
+ return -EINVAL;
+}
+
+static inline __printf(4, 5) int of_changeset_add_property_stringf(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *fmt, ...)
+{
+ return -EINVAL;
+}
+
+static inline int of_changeset_update_property_stringf(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *fmt, ...)
+{
+ return -EINVAL;
+}
+
+static inline int __of_changeset_add_update_property_string_list(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char **strs, int count, bool update)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_OF_DYNAMIC */
+#ifdef CONFIG_OF_DYNAMIC
+/**
+ * of_changeset_add_property_copy - Create a new property copying name & value
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @value: pointer to the value data
+ * @length: length of the value in bytes
+ *
+ * Adds a property to the changeset by making copies of the name & value
+ * entries.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_copy(struct of_changeset *ocs,
+ struct device_node *np, const char *name,
+ const void *value, int length)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, value,
+ length, false);
+}
+
+/**
+ * of_changeset_update_property_copy - Update a property copying name & value
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @value: pointer to the value data
+ * @length: length of the value in bytes
+ *
+ * Update a property to the changeset by making copies of the name & value
+ * entries.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_copy(struct of_changeset *ocs,
+ struct device_node *np, const char *name,
+ const void *value, int length)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, value,
+ length, true);
+}
+
+/**
+ * __of_changeset_add_update_property_string - Create/update a string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @str: string property value
+ * @update: True on update operation
+ *
+ * Adds/updates a string property to the changeset by making copies of the name
+ * and the given value. The @update parameter controls whether an add or
+ * update takes place.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int __of_changeset_add_update_property_string(
+ struct of_changeset *ocs, struct device_node *np, const char *name,
+ const char *str, bool update)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, str,
+ strlen(str) + 1, update);
+}
+
+/**
+ * __of_changeset_add_update_property_stringv - Create/update a formatted
+ * string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @fmt: format of string property
+ * @vargs: arguments of the format string
+ * @update: True on update operation
+ *
+ * Adds/updates a string property to the changeset by making copies of the name
+ * and the formatted value. The @update parameter controls whether an add or
+ * update takes place.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int __of_changeset_add_update_property_stringv(
+ struct of_changeset *ocs, struct device_node *np, const char *name,
+ const char *fmt, va_list vargs, bool update)
+{
+ char *str;
+ int ret;
+
+ str = kvasprintf(GFP_KERNEL, fmt, vargs);
+ if (!str)
+ return -ENOMEM;
+ ret = __of_changeset_add_update_property_string(ocs, np, name, str,
+ update);
+ kfree(str);
+
+ return ret;
+}
+
+/**
+ * of_changeset_add_property_string_list - Create a new string list property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @strs: pointer to the string list
+ * @count: string count
+ *
+ * Adds a string list property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_string_list(
+ struct of_changeset *ocs, struct device_node *np, const char *name,
+ const char **strs, int count)
+{
+ return __of_changeset_add_update_property_string_list(ocs, np, name,
+ strs, count, false);
+}
+
+/**
+ * of_changeset_update_property_string_list - Update string list property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @strs: pointer to the string list
+ * @count: string count
+ *
+ * Updates a string list property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_string_list(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char **strs, int count)
+{
+ return __of_changeset_add_update_property_string_list(ocs, np, name,
+ strs, count, true);
+}
+
+/**
+ * of_changeset_add_property_string - Adds a string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @str: string property
+ *
+ * Adds a string property to the changeset by making copies of the name
+ * and the string value.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_string(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *str)
+{
+ return __of_changeset_add_update_property_string(ocs, np, name, str,
+ false);
+}
+
+/**
+ * of_changeset_update_property_string - Update a string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @str: string property
+ *
+ * Updates a string property to the changeset by making copies of the name
+ * and the string value.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_string(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *str)
+{
+ return __of_changeset_add_update_property_string(ocs, np, name, str,
+ true);
+}
+
+/**
+ * of_changeset_add_property_u32 - Create a new u32 property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @val: value in host endian format
+ *
+ * Adds a u32 property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_u32(struct of_changeset *ocs,
+ struct device_node *np, const char *name, u32 val)
+{
+ val = cpu_to_be32(val);
+ return __of_changeset_add_update_property_copy(ocs, np, name, &val,
+ sizeof(val), false);
+}
+
+/**
+ * of_changeset_update_property_u32 - Update u32 property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @val: value in host endian format
+ *
+ * Updates a u32 property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_u32(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, u32 val)
+{
+ val = cpu_to_be32(val);
+ return __of_changeset_add_update_property_copy(ocs, np, name, &val,
+ sizeof(val), true);
+}
+
+/**
+ * of_changeset_add_property_bool - Create a new u32 property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ *
+ * Adds a bool property to the changeset. Note that there is
+ * no option to set the value to false, since the property
+ * existing sets it to true.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_bool(
+ struct of_changeset *ocs, struct device_node *np, const char *name)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, "", 0,
+ false);
+}
+
+/**
+ * of_changeset_update_property_bool - Update a bool property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ *
+ * Updates a property to the changeset. Note that there is
+ * no option to set the value to false, since the property
+ * existing sets it to true.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_bool(struct of_changeset *ocs,
+ struct device_node *np, const char *name)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, "", 0,
+ true);
+}
+#endif
+
+/* CONFIG_OF_RESOLVE api */
+extern int of_resolve_phandles(struct device_node *tree);
+
/**
* of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
* @np: Pointer to the given device_node
*
- * return true if present false otherwise
+ * Return: true if present false otherwise
*/
static inline bool of_device_is_system_power_controller(const struct device_node *np)
{
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1214cabb2247..e8b78139f78c 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -52,9 +52,10 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
- u32 rid);
+ u32 id,
+ u32 bus_token);
extern void of_msi_configure(struct device *dev, struct device_node *np);
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
+u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
static inline int of_irq_count(struct device_node *dev)
{
@@ -85,17 +86,17 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev,
return NULL;
}
static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
- u32 rid)
+ u32 id, u32 bus_token)
{
return NULL;
}
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
-static inline u32 of_msi_map_rid(struct device *dev,
- struct device_node *msi_np, u32 rid_in)
+static inline u32 of_msi_map_id(struct device *dev,
+ struct device_node *msi_np, u32 id_in)
{
- return rid_in;
+ return id_in;
}
#endif
diff --git a/include/linux/once.h b/include/linux/once.h
index ae6f4eb41cbe..3a6671d961b9 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -5,10 +5,18 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+/* Helpers used from arbitrary contexts.
+ * Hard irqs are blocked, be cautious.
+ */
bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key,
unsigned long *flags, struct module *mod);
+/* Variant for process contexts only. */
+bool __do_once_slow_start(bool *done);
+void __do_once_slow_done(bool *done, struct static_key_true *once_key,
+ struct module *mod);
+
/* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only
* once, where DO_ONCE() can live in the fast-path. After @func has
@@ -52,9 +60,29 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
___ret; \
})
+/* Variant of DO_ONCE() for process/sleepable contexts. */
+#define DO_ONCE_SLOW(func, ...) \
+ ({ \
+ bool ___ret = false; \
+ static bool __section(.data.once) ___done = false; \
+ static DEFINE_STATIC_KEY_TRUE(___once_key); \
+ if (static_branch_unlikely(&___once_key)) { \
+ ___ret = __do_once_slow_start(&___done); \
+ if (unlikely(___ret)) { \
+ func(__VA_ARGS__); \
+ __do_once_slow_done(&___done, &___once_key, \
+ THIS_MODULE); \
+ } \
+ } \
+ ___ret; \
+ })
+
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
#define get_random_once_wait(buf, nbytes) \
DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
+#define get_random_slow_once(buf, nbytes) \
+ DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes))
+
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fc343d123127..1cd5caa567cf 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1687,6 +1687,7 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
#define pci_dev_put(dev) do { } while (0)
static inline void pci_set_master(struct pci_dev *dev) { }
+static inline void pci_clear_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 42588645478d..d8b188643a87 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -59,6 +59,8 @@
#define PCI_CLASS_BRIDGE_EISA 0x0602
#define PCI_CLASS_BRIDGE_MC 0x0603
#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400
+#define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
#define PCI_CLASS_BRIDGE_NUBUS 0x0606
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
@@ -552,6 +554,7 @@
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
@@ -3008,6 +3011,7 @@
#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
+#define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728
#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
@@ -3022,6 +3026,8 @@
#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
@@ -3102,6 +3108,8 @@
#define PCI_VENDOR_ID_3COM_2 0xa727
+#define PCI_VENDOR_ID_SOLIDRUN 0xd063
+
#define PCI_VENDOR_ID_DIGIUM 0xd161
#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b7ac395513c0..9e0e20c11aa3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -607,6 +607,7 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
+ unsigned int group_generation;
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
@@ -701,6 +702,8 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
+ atomic64_t lost_samples;
+
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
@@ -1018,15 +1021,31 @@ extern int perf_event_output(struct perf_event *event,
struct pt_regs *regs);
static inline bool
-is_default_overflow_handler(struct perf_event *event)
+__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
{
- if (likely(event->overflow_handler == perf_event_output_forward))
+ if (likely(overflow_handler == perf_event_output_forward))
return true;
- if (unlikely(event->overflow_handler == perf_event_output_backward))
+ if (unlikely(overflow_handler == perf_event_output_backward))
return true;
return false;
}
+#define is_default_overflow_handler(event) \
+ __is_default_overflow_handler((event)->overflow_handler)
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline bool uses_default_overflow_handler(struct perf_event *event)
+{
+ if (likely(is_default_overflow_handler(event)))
+ return true;
+
+ return __is_default_overflow_handler(event->orig_overflow_handler);
+}
+#else
+#define uses_default_overflow_handler(event) \
+ is_default_overflow_handler(event)
+#endif
+
extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
diff --git a/include/linux/phy/phy-zynqmp.h b/include/linux/phy/phy-zynqmp.h
new file mode 100644
index 000000000000..0a25fa85527e
--- /dev/null
+++ b/include/linux/phy/phy-zynqmp.h
@@ -0,0 +1,60 @@
+/*
+ * Xilinx ZynqMP PHY header
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PHY_ZYNQMP_H_
+#define _PHY_ZYNQMP_H_
+
+struct phy;
+
+#if defined(CONFIG_PHY_XILINX_ZYNQMP)
+
+extern int xpsgtr_override_deemph(struct phy *phy, u8 plvl, u8 vlvl);
+extern int xpsgtr_margining_factor(struct phy *phy, u8 plvl, u8 vlvl);
+extern int xpsgtr_wait_pll_lock(struct phy *phy);
+int xpsgtr_usb_crst_assert(struct phy *phy);
+int xpsgtr_usb_crst_release(struct phy *phy);
+#else
+
+static inline int xpsgtr_override_deemph(struct phy *base, u8 plvl, u8 vlvl)
+{
+ return -ENODEV;
+}
+
+static inline int xpsgtr_margining_factor(struct phy *base, u8 plvl, u8 vlvl)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_wait_pll_lock(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_usb_crst_assert(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_usb_crst_release(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* _PHY_ZYNQMP_H_ */
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index 022bcea9edec..99a9b09dc839 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -7,6 +7,8 @@
#ifndef PMC_ATOM_H
#define PMC_ATOM_H
+#include <linux/bits.h>
+
/* ValleyView Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
/* CherryTrail Power Control Unit PCI Device ID */
@@ -139,9 +141,9 @@
#define ACPI_MMIO_REG_LEN 0x100
#define PM1_CNT 0x4
-#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_MASK GENMASK(12, 10)
#define SLEEP_TYPE_S5 0x1C00
-#define SLEEP_ENABLE 0x2000
+#define SLEEP_ENABLE BIT(13)
extern int pmc_atom_read(int offset, u32 *value);
extern int pmc_atom_write(int offset, u32 value);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 569f446502be..c7bd8a1a6097 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -29,7 +29,11 @@ struct platform_device {
struct resource *resource;
const struct platform_device_id *id_entry;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
/* MFD cell pointer */
struct mfd_cell *mfd_cell;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index baf02ff91a31..4476994d40c9 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -95,8 +95,8 @@ struct generic_pm_domain {
struct device dev;
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
- struct list_head master_links; /* Links with PM domain as a master */
- struct list_head slave_links; /* Links with PM domain as a slave */
+ struct list_head parent_links; /* Links with PM domain as a parent */
+ struct list_head child_links; /* Links with PM domain as a child */
struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov;
struct work_struct power_off_work;
@@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
}
struct gpd_link {
- struct generic_pm_domain *master;
- struct list_head master_node;
- struct generic_pm_domain *slave;
- struct list_head slave_node;
+ struct generic_pm_domain *parent;
+ struct list_head parent_node;
+ struct generic_pm_domain *child;
+ struct list_head child_node;
/* Sub-domain's per-master domain performance state */
unsigned int performance_state;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 7145795b4b9d..3efa6ed2aaca 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -38,7 +38,7 @@ extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
-extern int pm_runtime_get_if_in_use(struct device *dev);
+extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
@@ -59,6 +59,13 @@ extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device_link *link);
+static inline int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_if_active(dev, false);
+}
+
+extern int devm_pm_runtime_enable(struct device *dev);
+
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
@@ -142,6 +149,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
+static inline int pm_runtime_get_if_active(struct device *dev,
+ bool ign_usage_count)
+{
+ return -EINVAL;
+}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
@@ -150,6 +162,8 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
+
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
@@ -286,6 +300,11 @@ static inline void pm_runtime_disable(struct device *dev)
__pm_runtime_disable(dev, true);
}
+/**
+ * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
+ * at driver exit time unless your driver initially enabled pm_runtime
+ * with devm_pm_runtime_enable() (which handles it for you).
+ */
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, true);
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
index cd5b62db9084..e63a63aa47a3 100644
--- a/include/linux/pm_wakeirq.h
+++ b/include/linux/pm_wakeirq.h
@@ -17,8 +17,8 @@
#ifdef CONFIG_PM
extern int dev_pm_set_wake_irq(struct device *dev, int irq);
-extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
- int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
extern void dev_pm_enable_wake_irq(struct device *dev);
extern void dev_pm_disable_wake_irq(struct device *dev);
@@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return 0;
}
+static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return 0;
+}
+
static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 1cdc32b1f1b0..7e0fdcf905d2 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -16,11 +16,7 @@
extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
-#ifdef __clang__
-#define MAX_STACK_ALLOC 768
-#else
#define MAX_STACK_ALLOC 832
-#endif
#define FRONTEND_STACK_ALLOC 256
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 7413779484d5..3baabcd7fa3e 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -64,6 +64,7 @@ struct bq27xxx_device_info {
struct bq27xxx_access_methods bus;
struct bq27xxx_reg_cache cache;
int charge_design_full;
+ bool removed;
unsigned long last_update;
struct delayed_work work;
struct power_supply *bat;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 28413f737e7d..f4aad9554de2 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -383,8 +383,9 @@ extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
int ocv, int temp);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
-extern int power_supply_set_input_current_limit_from_supplier(
- struct power_supply *psy);
+int power_supply_get_property_from_supplier(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
extern int power_supply_set_battery_charged(struct power_supply *psy);
#ifdef CONFIG_POWER_SUPPLY
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index bbb68dba37cc..bc3f1aecaa19 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -322,4 +322,34 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
#endif
+/**
+ * migrate_disable - Prevent migration of the current task
+ *
+ * Maps to preempt_disable() which also disables preemption. Use
+ * migrate_disable() to annotate that the intent is to prevent migration,
+ * but not necessarily preemption.
+ *
+ * Can be invoked nested like preempt_disable() and needs the corresponding
+ * number of migrate_enable() invocations.
+ */
+static __always_inline void migrate_disable(void)
+{
+ preempt_disable();
+}
+
+/**
+ * migrate_enable - Allow migration of the current task
+ *
+ * Counterpart to migrate_disable().
+ *
+ * As migrate_disable() can be invoked nested, only the outermost invocation
+ * reenables migration.
+ *
+ * Currently mapped to preempt_enable().
+ */
+static __always_inline void migrate_enable(void)
+{
+ preempt_enable();
+}
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 3b5cb66d8bc1..eb2a093ba042 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -529,4 +529,23 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+#ifdef CONFIG_PRINTK
+extern void __printk_safe_enter(void);
+extern void __printk_safe_exit(void);
+/*
+ * The printk_deferred_enter/exit macros are available only as a hack for
+ * some code paths that need to defer all printk console printing. Interrupts
+ * must be disabled for the deferred duration.
+ */
+#define printk_deferred_enter __printk_safe_enter
+#define printk_deferred_exit __printk_safe_exit
+#else
+static inline void printk_deferred_enter(void)
+{
+}
+static inline void printk_deferred_exit(void)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 865d02c224ad..b8d41d0e7b46 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -127,8 +127,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {}
static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
#define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;})
+#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;})
#define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
#define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
+#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;})
static inline struct pid *tgid_pidfd_to_pid(const struct file *file)
{
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index b2c9c460947d..d1c26f5174e5 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -44,8 +44,8 @@ struct pwm_args {
};
enum {
- PWMF_REQUESTED = 1 << 0,
- PWMF_EXPORTED = 1 << 1,
+ PWMF_REQUESTED = 0,
+ PWMF_EXPORTED = 1,
};
/*
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 27aab84fcbaa..b93cb93d1956 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type)
#define DQ_FAKE_B 3 /* no limits only usage */
#define DQ_READ_B 4 /* dquot was read into memory */
#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */
-#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\
+#define DQ_RELEASING_B 6 /* dquot is in releasing_dquots list waiting
+ * to be cleaned up */
+#define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\
* for the mask of entries set via SETQUOTA\
* quotactl. They are set under dq_data_lock\
* and the quota format handling dquot can\
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 91e0b7624053..844b5836d11d 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -59,7 +59,7 @@ static inline bool dquot_is_busy(struct dquot *dquot)
{
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
return true;
- if (atomic_read(&dquot->dq_count) > 1)
+ if (atomic_read(&dquot->dq_count) > 0)
return true;
return false;
}
@@ -99,6 +99,8 @@ int dquot_file_open(struct inode *inode, struct file *file);
int dquot_enable(struct inode *inode, int type, int format_id,
unsigned int flags);
+int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
+ unsigned int flags);
int dquot_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path);
int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 5cdfcb873a8f..772d45b2a60a 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
-
-int __must_check raid_component_add(struct raid_template *, struct device *,
- struct device *);
-
diff --git a/include/linux/random.h b/include/linux/random.h
index 3feafab498ad..ed75fb2b0ca9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code,
void add_interrupt_randomness(int irq) __latent_entropy;
void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
-#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
-}
#else
-static inline void add_latent_entropy(void) { }
+ add_device_randomness(NULL, 0);
#endif
+}
void get_random_bytes(void *buf, size_t len);
size_t __must_check get_random_bytes_arch(void *buf, size_t len);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 09e6ac4b669b..bc4b5c905a60 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -164,6 +164,37 @@ do { \
cond_resched(); \
} while (0)
+/**
+ * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states
+ * @old_ts: jiffies at start of processing.
+ *
+ * This helper is for long-running softirq handlers, such as NAPI threads in
+ * networking. The caller should initialize the variable passed in as @old_ts
+ * at the beginning of the softirq handler. When invoked frequently, this macro
+ * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will
+ * provide both RCU and RCU-Tasks quiescent states. Note that this macro
+ * modifies its old_ts argument.
+ *
+ * Because regions of code that have disabled softirq act as RCU read-side
+ * critical sections, this macro should be invoked with softirq (and
+ * preemption) enabled.
+ *
+ * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would
+ * have more chance to invoke schedule() calls and provide necessary quiescent
+ * states. As a contrast, calling cond_resched() only won't achieve the same
+ * effect because cond_resched() does not provide RCU-Tasks quiescent states.
+ */
+#define rcu_softirq_qs_periodic(old_ts) \
+do { \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \
+ time_after(jiffies, (old_ts) + HZ / 10)) { \
+ preempt_disable(); \
+ rcu_softirq_qs(); \
+ preempt_enable(); \
+ (old_ts) = jiffies; \
+ } \
+} while (0)
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -385,6 +416,24 @@ do { \
} while (0)
/**
+ * rcu_replace_pointer() - replace an RCU pointer, returning its old value
+ * @rcu_ptr: RCU pointer, whose old value is returned
+ * @ptr: regular pointer
+ * @c: the lockdep conditions under which the dereference will take place
+ *
+ * Perform a replacement, where @rcu_ptr is an RCU-annotated
+ * pointer and @c is the lockdep argument that is passed to the
+ * rcu_dereference_protected() call used to read that pointer. The old
+ * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
+ */
+#define rcu_replace_pointer(rcu_ptr, ptr, c) \
+({ \
+ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
+ rcu_assign_pointer((rcu_ptr), (ptr)); \
+ __tmp; \
+})
+
+/**
* rcu_swap_protected() - swap an RCU and a regular pointer
* @rcu_ptr: RCU pointer
* @ptr: regular pointer
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index dfe493ac692d..edc095434787 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -295,6 +295,17 @@ typedef void (*regmap_unlock)(void *);
* read operation on a bus such as SPI, I2C, etc. Most of the
* devices do not need this.
* @reg_write: Same as above for writing.
+ * @reg_update_bits: Optional callback that if filled will be used to perform
+ * all the update_bits(rmw) operation. Should only be provided
+ * if the function require special handling with lock and reg
+ * handling and the operation cannot be represented as a simple
+ * update_bits operation on a bus such as SPI, I2C, etc.
+ * @read: Optional callback that if filled will be used to perform all the
+ * bulk reads from the registers. Data is returned in the buffer used
+ * to transmit data.
+ * @write: Same as above for writing.
+ * @max_raw_read: Max raw read size that can be used on the device.
+ * @max_raw_write: Max raw write size that can be used on the device.
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of struct regmap_config).
@@ -371,6 +382,14 @@ struct regmap_config {
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
+ size_t max_raw_read;
+ size_t max_raw_write;
bool fast_io;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 16ad66683ad0..56fa63d27e95 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -358,6 +358,8 @@ enum rsc_handling_status {
* @start: power on the device and boot it
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
+ * @peek_remote_kick: check if remote has kicked
+ * @ack_remote_kick: ack remote kick
* @da_to_va: optional platform hook to perform address translations
* @parse_fw: parse firmware to extract information (e.g. resource table)
* @handle_rsc: optional platform hook to handle vendor resources. Should return
@@ -374,6 +376,8 @@ struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
+ bool (*peek_remote_kick)(struct rproc *rproc, char *buf, size_t *len);
+ void (*ack_remote_kick)(struct rproc *rproc);
void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc,
@@ -481,6 +485,7 @@ struct rproc_dump_segment {
* @auto_boot: flag to indicate if remote processor should be auto-started
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
+ * @sysfs_kick: allow kick remoteproc from sysfs
*/
struct rproc {
struct list_head node;
@@ -514,6 +519,7 @@ struct rproc {
bool auto_boot;
struct list_head dump_segments;
int nb_vdev;
+ int sysfs_kick;
};
/**
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 1a40277b512c..b73950772299 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -99,7 +99,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table);
+ struct file *filp, poll_table *poll_table, int full);
#define RING_BUFFER_ALL_CPUS -1
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 91ccae946716..c80bd129e939 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -39,12 +39,15 @@ struct anon_vma {
atomic_t refcount;
/*
- * Count of child anon_vmas and VMAs which points to this anon_vma.
+ * Count of child anon_vmas. Equals to the count of all anon_vmas that
+ * have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
- unsigned degree;
+ unsigned long num_children;
+ /* Count of VMAs whose ->anon_vma pointer points to this object. */
+ unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index a68972b097b7..267533fecbdd 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -41,7 +41,9 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
@@ -50,7 +52,7 @@ struct rpmsg_channel_info {
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
- char *driver_override;
+ const char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
@@ -113,6 +115,8 @@ struct rpmsg_driver {
#if IS_ENABLED(CONFIG_RPMSG)
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override);
int register_rpmsg_device(struct rpmsg_device *dev);
void unregister_rpmsg_device(struct rpmsg_device *dev);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
@@ -137,6 +141,12 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
#else
+static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
+{
+ return -ENXIO;
+}
+
static inline int register_rpmsg_device(struct rpmsg_device *dev)
{
return -ENXIO;
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 6eec50fb36c8..4f922afb607a 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+/*
+ * Loop over each sg element in the given sg_table object.
+ */
+#define for_each_sgtable_sg(sgt, sg, i) \
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+
+/*
+ * Loop over each sg element in the given *DMA mapped* sg_table object.
+ * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
+ * of the each element.
+ */
+#define for_each_sgtable_dma_sg(sgt, sg, i) \
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
@@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* @sglist: sglist to iterate over
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_page() to get each page pointer.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
@@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
/**
* for_each_sg_dma_page - iterate over the pages of the given sg list
* @sglist: sglist to iterate over
- * @dma_iter: page iterator to hold current page
+ * @dma_iter: DMA page iterator to hold current page
* @dma_nents: maximum number of sg entries to iterate over, this is the value
* returned from dma_map_sg
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_dma_address() to get each page's DMA address.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
pgoffset); \
__sg_page_iter_dma_next(dma_iter);)
+/**
+ * for_each_sgtable_page - iterate over all pages in the sg_table object
+ * @sgt: sg_table object to iterate over
+ * @piter: page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all memory pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
+ */
+#define for_each_sgtable_page(sgt, piter, pgoffset) \
+ for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
+
+/**
+ * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
+ * @sgt: sg_table object to iterate over
+ * @dma_iter: DMA page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all DMA mapped pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
+ * unit.
+ */
+#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
+ for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
+
+
/*
* Mapping sg iterator
*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 171cb7475b45..d0e639497b10 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -526,10 +526,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_boosted tells if we are boosted due to DI. If so we are
- * outside bandwidth enforcement mechanism (but only until we
- * exit the critical section);
- *
* @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job.
*
@@ -544,7 +540,6 @@ struct sched_dl_entity {
* overruns.
*/
unsigned int dl_throttled : 1;
- unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
@@ -563,6 +558,15 @@ struct sched_dl_entity {
* time.
*/
struct hrtimer inactive_timer;
+
+#ifdef CONFIG_RT_MUTEXES
+ /*
+ * Priority Inheritance. When a DEADLINE scheduling entity is boosted
+ * pi_se points to the donor, otherwise points to the dl_se it belongs
+ * to (the original one/itself).
+ */
+ struct sched_dl_entity *pi_se;
+#endif
};
#ifdef CONFIG_UCLAMP_TASK
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index b3f88470cbb5..2f355c3c0d15 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -123,7 +123,7 @@ struct signal_struct {
#ifdef CONFIG_POSIX_TIMERS
/* POSIX.1b Interval Timers */
- int posix_timer_id;
+ unsigned int next_posix_timer_id;
struct list_head posix_timers;
/* ITIMER_REAL timer for the process */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 36f3011ab601..e3c20a4f81f5 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -51,7 +51,9 @@ extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
+void __noreturn make_task_dead(int signr);
+extern void mm_cache_init(void);
extern void proc_caches_init(void);
extern void fork_init(void);
@@ -92,7 +94,6 @@ extern long _do_fork(struct kernel_clone_args *kargs);
extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
-struct mm_struct *copy_init_mm(void);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
@@ -112,10 +113,36 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
}
extern void __put_task_struct(struct task_struct *t);
+extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
static inline void put_task_struct(struct task_struct *t)
{
- if (refcount_dec_and_test(&t->usage))
+ if (!refcount_dec_and_test(&t->usage))
+ return;
+
+ /*
+ * under PREEMPT_RT, we can't call put_task_struct
+ * in atomic context because it will indirectly
+ * acquire sleeping locks.
+ *
+ * call_rcu() will schedule delayed_put_task_struct_rcu()
+ * to be called in process context.
+ *
+ * __put_task_struct() is called when
+ * refcount_dec_and_test(&t->usage) succeeds.
+ *
+ * This means that it can't "conflict" with
+ * put_task_struct_rcu_user() which abuses ->rcu the same
+ * way; rcu_users has a reference so task->usage can't be
+ * zero after rcu_users 1 -> 0 transition.
+ *
+ * delayed_free_task() also uses ->rcu, but it is only called
+ * when it fails to fork a process. Therefore, there is no
+ * way it can conflict with put_task_struct().
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible())
+ call_rcu(&t->rcu, __put_task_struct_rcu_cb);
+ else
__put_task_struct(t);
}
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index d10150587d81..879a5c8f930b 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -16,14 +16,14 @@
* try_get_task_stack() instead. task_stack_page will return a pointer
* that could get freed out from under you.
*/
-static inline void *task_stack_page(const struct task_struct *task)
+static __always_inline void *task_stack_page(const struct task_struct *task)
{
return task->stack;
}
#define setup_thread_stack(new,old) do { } while(0)
-static inline unsigned long *end_of_stack(const struct task_struct *task)
+static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
{
#ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
diff --git a/include/linux/security.h b/include/linux/security.h
index aa5c7141c8d1..1a99958b850b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -362,6 +362,8 @@ int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg);
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags);
int security_mmap_addr(unsigned long addr);
@@ -907,6 +909,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
return 0;
}
+static inline int security_file_ioctl_compat(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
static inline int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index bb2bc99388ca..432fdabd00e4 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -95,7 +95,6 @@ struct uart_8250_port {
struct list_head list; /* ports on this IRQ */
u32 capabilities; /* port capabilities */
unsigned short bugs; /* port bugs */
- bool fifo_bug; /* min RX trigger if enabled */
unsigned int tx_loadsz; /* transmit fifo load size */
unsigned char acr;
unsigned char fcr;
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 2b78cc734719..10f209d54f18 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -297,6 +297,23 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+ struct circ_buf *xmit = &up->state->xmit;
+
+ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
+ up->icount.tx += chars;
+}
+
struct module;
struct tty_driver;
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index c255273b0281..37ad81058d6a 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -97,7 +97,10 @@ struct intc_hw_desc {
unsigned int nr_subgroups;
};
-#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
+#define _INTC_SIZEOF_OR_ZERO(a) (_Generic(a, \
+ typeof(NULL): 0, \
+ default: sizeof(a)))
+#define _INTC_ARRAY(a) a, _INTC_SIZEOF_OR_ZERO(a)/sizeof(*a)
#define INTC_HW_DESC(vectors, groups, mask_regs, \
prio_regs, sense_regs, ack_regs) \
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b04b5bd43f54..302a2ad67980 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -257,6 +257,7 @@ struct nf_bridge_info {
u8 pkt_otherhost:1;
u8 in_prerouting:1;
u8 bridged_dnat:1;
+ u8 sabotage_in_done:1;
__u16 frag_max_size;
struct net_device *physindev;
@@ -659,6 +660,7 @@ typedef unsigned char *sk_buff_data_t;
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
+ * @scm_io_uring: SKB holds io_uring registered files
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @napi_id: id of the NAPI struct this skb came from
@@ -824,6 +826,7 @@ struct sk_buff {
#ifdef CONFIG_TLS_DEVICE
__u8 decrypted:1;
#endif
+ __u8 scm_io_uring:1;
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -2201,6 +2204,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+static inline void skb_assert_len(struct sk_buff *skb)
+{
+#ifdef CONFIG_DEBUG_NET
+ if (WARN_ONCE(!skb->len, "%s\n", __func__))
+ DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+#endif /* CONFIG_DEBUG_NET */
+}
+
/*
* Add data to an sk_buff
*/
@@ -4216,7 +4227,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)
static inline void nf_reset_trace(struct sk_buff *skb)
{
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
skb->nf_trace = 0;
#endif
}
@@ -4236,7 +4247,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
dst->_nfct = src->_nfct;
nf_conntrack_get(skb_nfct(src));
#endif
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
if (copy)
dst->nf_trace = src->nf_trace;
#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index eb71a50b8afc..d5cad6f7953c 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -39,18 +39,27 @@ struct llcc_slice_desc {
/**
* llcc_slice_config - Data associated with the llcc slice
- * @usecase_id: usecase id for which the llcc slice is used
- * @slice_id: llcc slice id assigned to each slice
- * @max_cap: maximum capacity of the llcc slice
- * @priority: priority of the llcc slice
- * @fixed_size: whether the llcc slice can grow beyond its size
- * @bonus_ways: bonus ways associated with llcc slice
- * @res_ways: reserved ways associated with llcc slice
- * @cache_mode: mode of the llcc slice
- * @probe_target_ways: Probe only reserved and bonus ways on a cache miss
- * @dis_cap_alloc: Disable capacity based allocation
- * @retain_on_pc: Retain through power collapse
- * @activate_on_init: activate the slice on init
+ * @usecase_id: Unique id for the client's use case
+ * @slice_id: llcc slice id for each client
+ * @max_cap: The maximum capacity of the cache slice provided in KB
+ * @priority: Priority of the client used to select victim line for replacement
+ * @fixed_size: Boolean indicating if the slice has a fixed capacity
+ * @bonus_ways: Bonus ways are additional ways to be used for any slice,
+ * if client ends up using more than reserved cache ways. Bonus
+ * ways are allocated only if they are not reserved for some
+ * other client.
+ * @res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ * be used by any other client than the one its assigned to.
+ * @cache_mode: Each slice operates as a cache, this controls the mode of the
+ * slice: normal or TCM(Tightly Coupled Memory)
+ * @probe_target_ways: Determines what ways to probe for access hit. When
+ * configured to 1 only bonus and reserved ways are probed.
+ * When configured to 0 all ways in llcc are probed.
+ * @dis_cap_alloc: Disable capacity based allocation for a client
+ * @retain_on_pc: If this bit is set and client has maintained active vote
+ * then the ways assigned to this client are not flushed on power
+ * collapse.
+ * @activate_on_init: Activate the slice immediately after it is programmed
*/
struct llcc_slice_config {
u32 usecase_id;
@@ -154,20 +163,6 @@ int llcc_slice_activate(struct llcc_slice_desc *desc);
*/
int llcc_slice_deactivate(struct llcc_slice_desc *desc);
-/**
- * qcom_llcc_probe - program the sct table
- * @pdev: platform device pointer
- * @table: soc sct table
- * @sz: Size of the config table
- */
-int qcom_llcc_probe(struct platform_device *pdev,
- const struct llcc_slice_config *table, u32 sz);
-
-/**
- * qcom_llcc_remove - remove the sct table
- * @pdev: Platform device pointer
- */
-int qcom_llcc_remove(struct platform_device *pdev);
#else
static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
{
@@ -197,16 +192,6 @@ static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc)
{
return -EINVAL;
}
-static inline int qcom_llcc_probe(struct platform_device *pdev,
- const struct llcc_slice_config *table, u32 sz)
-{
- return -ENODEV;
-}
-
-static inline int qcom_llcc_remove(struct platform_device *pdev)
-{
- return -ENODEV;
-}
#endif
#endif
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h
index 11fb5048f5f6..e3aa8b14612e 100644
--- a/include/linux/soc/ti/ti_sci_inta_msi.h
+++ b/include/linux/soc/ti/ti_sci_inta_msi.h
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 TI SCI INTA MSI helper
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index 9531ec823298..0fc452dd96d4 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
diff --git a/include/linux/soc/xilinx/zynqmp/fw.h b/include/linux/soc/xilinx/zynqmp/fw.h
new file mode 100644
index 000000000000..98165fa2d1ca
--- /dev/null
+++ b/include/linux/soc/xilinx/zynqmp/fw.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+#ifndef __SOC_ZYNQMP_FW_H__
+#define __SOC_ZYNQMP_FW_H__
+
+#include <linux/nvmem-consumer.h>
+
+enum {
+ ZYNQMP_SILICON_V1 = 0,
+ ZYNQMP_SILICON_V2,
+ ZYNQMP_SILICON_V3,
+ ZYNQMP_SILICON_V4,
+};
+
+static inline char *zynqmp_nvmem_get_silicon_version(struct device *dev,
+ const char *cname)
+{
+ struct nvmem_cell *cell;
+ ssize_t data;
+ char *ret;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell))
+ return ERR_CAST(cell);
+
+ ret = nvmem_cell_read(cell, &data);
+ nvmem_cell_put(cell);
+
+ return ret;
+}
+
+#endif /* __SOC_ZYNQMP_FW_H__ */
diff --git a/include/linux/soc/xilinx/zynqmp/tap_delays.h b/include/linux/soc/xilinx/zynqmp/tap_delays.h
new file mode 100644
index 000000000000..5f2ef35c0d8e
--- /dev/null
+++ b/include/linux/soc/xilinx/zynqmp/tap_delays.h
@@ -0,0 +1,32 @@
+/*
+ * Xilinx Zynq MPSoC Power Management
+ *
+ * Copyright (C) 2016 - 2018, Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#ifdef CONFIG_ARCH_ZYNQMP
+/* API for programming the tap delays */
+void arasan_zynqmp_set_tap_delay(u8 deviceid, u8 itap_delay, u8 otap_delay);
+
+/* API to reset the DLL */
+void zynqmp_dll_reset(u8 deviceid);
+#else
+inline void arasan_zynqmp_set_tap_delay(u8 deviceid, u8 itap_delay,
+ u8 otap_delay) {}
+inline void zynqmp_dll_reset(u8 deviceid) {}
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 7067f85cef0b..ee930fb0b6c1 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -123,6 +123,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
* the spi_master.
* @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
* not using a GPIO line)
+ * @multi_die: Flash device with multiple dies.
* @word_delay_usecs: microsecond delay to be inserted between consecutive
* words of a transfer
*
@@ -152,6 +153,7 @@ struct spi_device {
#define SPI_MODE_1 (0|SPI_CPHA)
#define SPI_MODE_2 (SPI_CPOL|0)
#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
+#define SPI_MODE_X_MASK (SPI_CPOL|SPI_CPHA)
#define SPI_CS_HIGH 0x04 /* chipselect active high? */
#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
#define SPI_3WIRE 0x10 /* SI/SO signals shared */
@@ -172,6 +174,7 @@ struct spi_device {
char modalias[SPI_NAME_SIZE];
const char *driver_override;
int cs_gpio; /* LEGACY: chip select gpio */
+ bool multi_die; /* flash with multiple dies*/
struct gpio_desc *cs_gpiod; /* chip select gpio desc */
uint8_t word_delay_usecs; /* inter-word delay */
@@ -466,6 +469,19 @@ struct spi_controller {
#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+ /* Controller may support data stripe feature when more than one
+ * chips are present.
+ * Setting data stripe will send data in following manner:
+ * -> even bytes i.e. 0, 2, 4,... are transmitted on lower data bus
+ * -> odd bytes i.e. 1, 3, 5,.. are transmitted on upper data bus
+ */
+#define SPI_MASTER_QUAD_MODE BIT(6) /* support quad mode */
+#define SPI_MASTER_DATA_STRIPE BIT(7) /* support data stripe */
+ /* Controller may support asserting more than one chip select at once.
+ * This flag will enable that feature.
+ */
+#define SPI_MASTER_BOTH_CS BIT(8) /* assert both chip selects */
+#define SPI_MASTER_U_PAGE BIT(9) /* select upper flash */
/* flag indicating this is a non-devres managed controller */
bool devm_allocated;
@@ -756,6 +772,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @len: size of rx and tx buffers (in bytes)
* @speed_hz: Select a speed other than the device default for this
* transfer. If 0 the default (from @spi_device) is used.
+ * @dummy: number of dummy cycles.
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
* @cs_change: affects chipselect after this transfer completes
@@ -775,6 +792,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
+ * @stripe: true-> enable stripe, false-> disable stripe.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
@@ -861,7 +879,8 @@ struct spi_transfer {
#define SPI_DELAY_UNIT_SCK 2
u32 speed_hz;
u16 word_delay;
-
+ u32 dummy;
+ bool stripe;
u32 effective_speed_hz;
struct list_head transfer_list;
@@ -1337,7 +1356,6 @@ struct spi_board_info {
/* slower signaling on noisy or low voltage boards */
u32 max_speed_hz;
-
/* bus_num is board specific and matches the bus_num of some
* spi_controller that will probably be registered later.
*
@@ -1417,6 +1435,9 @@ of_find_spi_device_by_node(struct device_node *node)
#endif /* IS_ENABLED(CONFIG_OF) */
+bool
+update_stripe(const u8 opcode);
+
/* Compatibility layer */
#define spi_master spi_controller
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0b35747c9837..88b107e20fc7 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -178,6 +178,7 @@ struct plat_stmmacenet_data {
int rss_en;
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
+ bool rx_clk_runs_in_lpi;
int has_xgmac;
bool sph_disable;
};
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index c28955132234..86f150c2a6b6 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
+#include <linux/ctype.h>
#include <linux/types.h>
struct file;
@@ -75,6 +76,20 @@ static inline int string_escape_str_any_np(const char *src, char *dst,
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
+static inline void string_upper(char *dst, const char *src)
+{
+ do {
+ *dst++ = toupper(*src);
+ } while (*src++);
+}
+
+static inline void string_lower(char *dst, const char *src)
+{
+ do {
+ *dst++ = tolower(*src);
+ } while (*src++);
+}
+
char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index e90b9bd99ded..396de2ef8767 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -94,6 +94,11 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
char __user *, size_t);
extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *);
+/* returns true if the msg is in-flight, i.e., already eaten by the peer */
+static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) {
+ return (msg->copied != 0 && list_empty(&msg->list));
+}
+
struct rpc_clnt;
extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
extern int rpc_remove_client_dir(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index a6ef35184ef1..5c37fabdec10 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -90,8 +90,7 @@ struct rpc_task {
#endif
unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2,
- tk_cred_retry : 2,
- tk_rebind_retry : 2;
+ tk_cred_retry : 2;
};
typedef void (*rpc_action)(struct rpc_task *);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f7c561c4dcdd..36d57654f178 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -120,6 +120,7 @@ struct clone_args;
#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
#define __SC_CAST(t, a) (__force t) a
+#define __SC_TYPE(t, a) t
#define __SC_ARGS(t, a) a
#define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 6df477329b76..aa615a0863f5 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -208,6 +208,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
void unregister_sysctl_table(struct ctl_table_header * table);
extern int sysctl_init(void);
+extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ const char *table_name);
+#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table)
extern struct ctl_table sysctl_mount_point[];
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 358deb4ff830..68dacc199437 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -258,7 +258,7 @@ struct tcp_sock {
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
u32 max_packets_out; /* max packets_out in last window */
- u32 max_packets_seq; /* right edge of max_packets_out flight */
+ u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
u16 urg_data; /* Saved octet of OOB data and control flags */
u8 ecn_flags; /* ECN status bits. */
@@ -458,7 +458,7 @@ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
- queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
+ WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn));
}
static inline void tcp_move_syn(struct tcp_sock *tp,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f92a10b5e112..cf6c92060929 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -108,7 +108,8 @@ enum tick_dep_bits {
TICK_DEP_BIT_POSIX_TIMER = 0,
TICK_DEP_BIT_PERF_EVENTS = 1,
TICK_DEP_BIT_SCHED = 2,
- TICK_DEP_BIT_CLOCK_UNSTABLE = 3
+ TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
+ TICK_DEP_BIT_RCU = 4
};
#define TICK_DEP_MASK_NONE 0
@@ -116,6 +117,7 @@ enum tick_dep_bits {
#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
+#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;
@@ -206,6 +208,7 @@ extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
enum tick_dep_bits bit);
extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
enum tick_dep_bits bit);
+extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
/*
* The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
@@ -268,6 +271,10 @@ static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
+
static inline void tick_dep_set(enum tick_dep_bits bit) { }
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index 93884086f392..adc80e29168e 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -35,7 +35,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
{
struct rb_node *leftmost = rb_first_cached(&head->rb_root);
- return rb_entry(leftmost, struct timerqueue_node, node);
+ return rb_entry_safe(leftmost, struct timerqueue_node, node);
}
static inline void timerqueue_init(struct timerqueue_node *node)
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index f3caeeb7a0d0..6c79070ec11a 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -157,7 +157,7 @@ struct tcg_algorithm_info {
* Return: size of the event on success, 0 on failure
*/
-static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
struct tcg_pcr_event *event_header,
bool do_mapping)
{
@@ -198,8 +198,8 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
* The loop below will unmap these fields if the log is larger than
* one page, so save them here for reference:
*/
- count = READ_ONCE(event->count);
- event_type = READ_ONCE(event->event_type);
+ count = event->count;
+ event_type = event->event_type;
/* Verify that it's the log header */
if (event_header->pcr_idx != 0 ||
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 30a8cdcfd4a4..b8b87e7ba93f 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -341,6 +341,7 @@ enum {
EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
EVENT_FILE_FL_WAS_ENABLED_BIT,
+ EVENT_FILE_FL_FREED_BIT,
};
/*
@@ -357,6 +358,7 @@ enum {
* TRIGGER_COND - When set, one or more triggers has an associated filter
* PID_FILTER - When set, the event is filtered based on pid
* WAS_ENABLED - Set when enabled to know to clear trace on module removal
+ * FREED - File descriptor is freed, all fields should be considered invalid
*/
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@@ -370,6 +372,7 @@ enum {
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
+ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
};
struct trace_event_file {
@@ -398,6 +401,7 @@ struct trace_event_file {
* caching and such. Which is mostly OK ;-)
*/
unsigned long flags;
+ atomic_t ref; /* ref count for opened files */
atomic_t sm_ref; /* soft-mode reference counter */
atomic_t tm_ref; /* trigger-mode reference counter */
};
@@ -594,7 +598,8 @@ extern int perf_uprobe_init(struct perf_event *event,
extern void perf_uprobe_destroy(struct perf_event *event);
extern int bpf_get_uprobe_info(const struct perf_event *event,
u32 *fd_type, const char **filename,
- u64 *probe_offset, bool perf_type_tracepoint);
+ u64 *probe_offset, u64 *probe_addr,
+ bool perf_type_tracepoint);
#endif
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index eede1a7c8195..bee7573f40f5 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -231,12 +231,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*
- * When lockdep is enabled, we make sure to always do the RCU portions of
- * the tracepoint code, regardless of whether tracing is on. However,
- * don't check if the condition is false, due to interaction with idle
- * instrumentation. This lets us find RCU issues triggered with tracepoints
- * even when this tracepoint is off. This code has no purpose other than
- * poking RCU a bit.
+ * When lockdep is enabled, we make sure to always test if RCU is
+ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
+ * require RCU to be active, and it should always warn at the tracepoint
+ * site if it is not watching, as it will need to be active when the
+ * tracepoint is enabled.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
@@ -248,9 +247,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
TP_ARGS(data_args), \
TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
- rcu_read_lock_sched_notrace(); \
- rcu_dereference_sched(__tracepoint_##name.funcs);\
- rcu_read_unlock_sched_notrace(); \
+ WARN_ON_ONCE(!rcu_is_watching()); \
} \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index eb33d948788c..8be25caa97f6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -480,6 +480,8 @@ extern void __stop_tty(struct tty_struct *tty);
extern void stop_tty(struct tty_struct *tty);
extern void __start_tty(struct tty_struct *tty);
extern void start_tty(struct tty_struct *tty);
+void tty_write_unlock(struct tty_struct *tty);
+int tty_write_lock(struct tty_struct *tty, int ndelay);
extern int tty_register_driver(struct tty_driver *driver);
extern int tty_unregister_driver(struct tty_driver *driver);
extern struct device *tty_register_device(struct tty_driver *driver,
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 38555435a64a..70941f49d66e 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -287,6 +287,10 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
size_t size = min(ksize, usize);
size_t rest = max(ksize, usize) - size;
+ /* Double check if ksize is larger than a known object size. */
+ if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
+ return -E2BIG;
+
/* Deal with trailing bytes. */
if (usize < ksize) {
memset(dst + size, 0, rest);
diff --git a/include/linux/units.h b/include/linux/units.h
new file mode 100644
index 000000000000..a0af6d2ef4e5
--- /dev/null
+++ b/include/linux/units.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNITS_H
+#define _LINUX_UNITS_H
+
+#include <linux/kernel.h>
+
+#define HZ_PER_KHZ 1000UL
+#define KHZ_PER_MHZ 1000UL
+#define HZ_PER_MHZ 1000000UL
+
+#define MILLIWATT_PER_WATT 1000UL
+#define MICROWATT_PER_MILLIWATT 1000UL
+#define MICROWATT_PER_WATT 1000000UL
+
+#define ABSOLUTE_ZERO_MILLICELSIUS -273150
+
+static inline long milli_kelvin_to_millicelsius(long t)
+{
+ return t + ABSOLUTE_ZERO_MILLICELSIUS;
+}
+
+static inline long millicelsius_to_milli_kelvin(long t)
+{
+ return t - ABSOLUTE_ZERO_MILLICELSIUS;
+}
+
+#define MILLIDEGREE_PER_DEGREE 1000
+#define MILLIDEGREE_PER_DECIDEGREE 100
+
+static inline long kelvin_to_millicelsius(long t)
+{
+ return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long millicelsius_to_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long deci_kelvin_to_celsius(long t)
+{
+ t = milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long celsius_to_deci_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t * MILLIDEGREE_PER_DEGREE);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
+}
+
+/**
+ * deci_kelvin_to_millicelsius_with_offset - convert Kelvin to Celsius
+ * @t: temperature value in decidegrees Kelvin
+ * @offset: difference between Kelvin and Celsius in millidegrees
+ *
+ * Return: temperature value in millidegrees Celsius
+ */
+static inline long deci_kelvin_to_millicelsius_with_offset(long t, long offset)
+{
+ return t * MILLIDEGREE_PER_DECIDEGREE - offset;
+}
+
+static inline long deci_kelvin_to_millicelsius(long t)
+{
+ return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
+}
+
+static inline long millicelsius_to_deci_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
+}
+
+static inline long kelvin_to_celsius(long t)
+{
+ return t + DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
+ MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long celsius_to_kelvin(long t)
+{
+ return t - DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
+ MILLIDEGREE_PER_DEGREE);
+}
+
+#endif /* _LINUX_UNITS_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index e656e7b4b1e4..abcf1ce9bb06 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -279,6 +279,11 @@ void usb_put_intf(struct usb_interface *intf);
#define USB_MAXINTERFACES 32
#define USB_MAXIADS (USB_MAXINTERFACES/2)
+bool usb_check_bulk_endpoints(
+ const struct usb_interface *intf, const u8 *ep_addrs);
+bool usb_check_int_endpoints(
+ const struct usb_interface *intf, const u8 *ep_addrs);
+
/*
* USB Resume Timer: Every Host controller driver should drive the resume
* signalling on the bus for the amount of time defined by this macro.
@@ -580,6 +585,7 @@ struct usb3_lpm_parameters {
* @devaddr: device address, XHCI: assigned by HW, others: same as devnum
* @can_submit: URBs may be submitted
* @persist_enabled: USB_PERSIST enabled for this device
+ * @reset_in_progress: the device is being reset
* @have_langid: whether string_langid is valid
* @authorized: policy has said we can use it;
* (user space) policy determines if we authorize this device to be
@@ -665,6 +671,7 @@ struct usb_device {
unsigned can_submit:1;
unsigned persist_enabled:1;
+ unsigned reset_in_progress:1;
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
@@ -749,11 +756,14 @@ extern void usb_queue_reset_device(struct usb_interface *dev);
extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
bool enable);
extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
+extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index);
#else
static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
bool enable) { return 0; }
static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
{ return true; }
+static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+ { return 0; }
#endif
/* USB autosuspend and autoresume */
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 1646c06989df..0ce4377545f8 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -46,9 +46,12 @@
#define CDC_NCM_DATA_ALTSETTING_NCM 1
#define CDC_NCM_DATA_ALTSETTING_MBIM 2
-/* CDC NCM subclass 3.2.1 */
+/* CDC NCM subclass 3.3.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
+/* CDC NCM subclass 3.3.2 */
+#define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20
+
/* Maximum NTB length */
#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
@@ -84,7 +87,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
-#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
+#define CDC_NCM_FLAG_PREFER_NTB32 0x08 /* prefer NDP32 over NDP16 */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
@@ -113,7 +116,11 @@ struct cdc_ncm_ctx {
u32 timer_interval;
u32 max_ndp_size;
- struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ u8 is_ndp16;
+ union {
+ struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ struct usb_cdc_ncm_ndp32 *delayed_ndp32;
+ };
u32 tx_timer_pending;
u32 tx_curr_frame_num;
@@ -150,6 +157,8 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset);
struct sk_buff *
cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index edd89b7c8f18..ddc9d1dc9e06 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -62,6 +62,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
+#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index c0eb85b2981e..0cfd540e7d06 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -66,6 +66,7 @@
struct giveback_urb_bh {
bool running;
+ bool high_prio;
spinlock_t lock;
struct list_head head;
struct tasklet_struct bh;
@@ -502,6 +503,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
void hcd_buffer_free(struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
+void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
+ size_t size, gfp_t mem_flags, dma_addr_t *dma);
+void hcd_buffer_free_pages(struct usb_hcd *hcd,
+ size_t size, void *addr, dma_addr_t dma);
+
/* generic bus glue, needed for host controllers that don't use PCI */
extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index 9a88c74a1d0d..969b7c504087 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -67,7 +67,7 @@ struct typec_altmode_ops {
int typec_altmode_enter(struct typec_altmode *altmode);
int typec_altmode_exit(struct typec_altmode *altmode);
-void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
+int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
int typec_altmode_vdm(struct typec_altmode *altmode,
const u32 header, const u32 *vdo, int count);
int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index fc4c7edb2e8a..296909ea04f2 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -73,6 +73,11 @@ enum {
#define DP_CAP_USB BIT(7)
#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
#define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16)
+/* Get pin assignment taking plug & receptacle into consideration */
+#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
/* DisplayPort Status Update VDO bits */
#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h
new file mode 100644
index 000000000000..af8043181395
--- /dev/null
+++ b/include/linux/usb/xhci_pdriver.h
@@ -0,0 +1,29 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __USB_CORE_XHCI_PDRIVER_H
+#define __USB_CORE_XHCI_PDRIVER_H
+
+/* Call dwc3_host_wakeup_capable() only for dwc3 DRD mode or HOST only mode */
+#if (IS_REACHABLE(CONFIG_USB_DWC3_HOST) || \
+ (IS_REACHABLE(CONFIG_USB_DWC3_OF_SIMPLE) && \
+ !IS_REACHABLE(CONFIG_USB_DWC3_GADGET)))
+
+ /* Let the dwc3 driver know about device wakeup capability */
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup);
+
+#else
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup)
+{ ; }
+#endif
+
+#endif /* __USB_CORE_XHCI_PDRIVER_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index a960de68ac69..6047058d6703 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -148,6 +148,10 @@ retry:
if (gso_type & SKB_GSO_UDP)
nh_off -= thlen;
+ /* Kernel has a special handling for GSO_BY_FRAGS. */
+ if (gso_size == GSO_BY_FRAGS)
+ return -EINVAL;
+
/* Too small packets are not really GSO ones. */
if (skb->len - nh_off > gso_size) {
shinfo->gso_size = gso_size;
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 848db1b1569f..919d999a8c1d 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -16,7 +16,7 @@
#include <linux/string.h>
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+#if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
#include <asm/vga.h>
#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 5903b1d17c92..03bff85e365f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -20,6 +20,8 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
#define WQ_FLAG_BOOKMARK 0x04
+#define WQ_FLAG_CUSTOM 0x08
+#define WQ_FLAG_DONE 0x10
/*
* A single wait-queue entry structure:
@@ -529,10 +531,11 @@ do { \
\
hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
HRTIMER_MODE_REL); \
- if ((timeout) != KTIME_MAX) \
- hrtimer_start_range_ns(&__t.timer, timeout, \
- current->timer_slack_ns, \
- HRTIMER_MODE_REL); \
+ if ((timeout) != KTIME_MAX) { \
+ hrtimer_set_expires_range_ns(&__t.timer, timeout, \
+ current->timer_slack_ns); \
+ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
+ } \
\
__ret = ___wait_event(wq_head, condition, state, 0, 0, \
if (!__t.task) { \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4261d1c6e87b..887f0b94d6e9 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -73,7 +73,6 @@ enum {
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
__WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
- WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
/*
* When a work item is off queue, its high bits point to the last
@@ -84,12 +83,6 @@ enum {
WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
- WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
-
- /* convenience constants */
- WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
- WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
- WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
/* bit mask for work_busy() return values */
WORK_BUSY_PENDING = 1 << 0,
@@ -99,6 +92,14 @@ enum {
WORKER_DESC_LEN = 24,
};
+/* Convenience constants - of type 'unsigned long', not 'enum'! */
+#define WORK_OFFQ_CANCELING (1ul << __WORK_OFFQ_CANCELING)
+#define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
+#define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
+
+#define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1)
+#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
+
struct work_struct {
atomic_long_t data;
struct list_head entry;
diff --git a/include/linux/xilinx_phy.h b/include/linux/xilinx_phy.h
new file mode 100644
index 000000000000..34a048f7dbe6
--- /dev/null
+++ b/include/linux/xilinx_phy.h
@@ -0,0 +1,20 @@
+#ifndef _XILINX_PHY_H
+#define _XILINX_PHY_H
+
+/* Mask used for ID comparisons */
+#define XILINX_PHY_ID_MASK 0xfffffff0
+
+/* Known PHY IDs */
+#define XILINX_PHY_ID 0x01740c00
+
+/* struct phy_device dev_flags definitions */
+#define XAE_PHY_TYPE_MII 0
+#define XAE_PHY_TYPE_GMII 1
+#define XAE_PHY_TYPE_RGMII_1_3 2
+#define XAE_PHY_TYPE_RGMII_2_0 3
+#define XAE_PHY_TYPE_SGMII 4
+#define XAE_PHY_TYPE_1000BASE_X 5
+#define XAE_PHY_TYPE_2500 6
+#define XXE_PHY_TYPE_USXGMII 7
+
+#endif /* _XILINX_PHY_H */
diff --git a/include/media/dvb_net.h b/include/media/dvb_net.h
index 5e31d37f25fa..cc01dffcc9f3 100644
--- a/include/media/dvb_net.h
+++ b/include/media/dvb_net.h
@@ -41,6 +41,9 @@
* @exit: flag to indicate when the device is being removed.
* @demux: pointer to &struct dmx_demux.
* @ioctl_mutex: protect access to this struct.
+ * @remove_mutex: mutex that avoids a race condition between a callback
+ * called when the hardware is disconnected and the
+ * file_operations of dvb_net.
*
* Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
* devices.
@@ -53,6 +56,7 @@ struct dvb_net {
unsigned int exit:1;
struct dmx_demux *demux;
struct mutex ioctl_mutex;
+ struct mutex remove_mutex;
};
/**
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
index 551325858de3..3745a13f6e08 100644
--- a/include/media/dvbdev.h
+++ b/include/media/dvbdev.h
@@ -126,6 +126,7 @@ struct dvb_adapter {
* struct dvb_device - represents a DVB device node
*
* @list_head: List head with all DVB devices
+ * @ref: reference counter
* @fops: pointer to struct file_operations
* @adapter: pointer to the adapter that holds this device node
* @type: type of the device, as defined by &enum dvb_device_type.
@@ -156,6 +157,7 @@ struct dvb_adapter {
*/
struct dvb_device {
struct list_head list_head;
+ struct kref ref;
const struct file_operations *fops;
struct dvb_adapter *adapter;
enum dvb_device_type type;
@@ -188,6 +190,35 @@ struct dvb_device {
};
/**
+ * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list
+ *
+ * @fops: Dynamically allocated fops for ->owner registration
+ * @type: type of dvb_device
+ * @template: dvb_device used for registration
+ * @list_head: list_head for dvbdevfops_list
+ */
+struct dvbdevfops_node {
+ struct file_operations *fops;
+ enum dvb_device_type type;
+ const struct dvb_device *template;
+ struct list_head list_head;
+};
+
+/**
+ * dvb_device_get - Increase dvb_device reference
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+struct dvb_device *dvb_device_get(struct dvb_device *dvbdev);
+
+/**
+ * dvb_device_put - Decrease dvb_device reference
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+void dvb_device_put(struct dvb_device *dvbdev);
+
+/**
* dvb_register_adapter - Registers a new DVB adapter
*
* @adap: pointer to struct dvb_adapter
@@ -231,29 +262,17 @@ int dvb_register_device(struct dvb_adapter *adap,
/**
* dvb_remove_device - Remove a registered DVB device
*
- * This does not free memory. To do that, call dvb_free_device().
+ * This does not free memory. dvb_free_device() will do that when
+ * reference counter is empty
*
* @dvbdev: pointer to struct dvb_device
*/
void dvb_remove_device(struct dvb_device *dvbdev);
-/**
- * dvb_free_device - Free memory occupied by a DVB device.
- *
- * Call dvb_unregister_device() before calling this function.
- *
- * @dvbdev: pointer to struct dvb_device
- */
-void dvb_free_device(struct dvb_device *dvbdev);
/**
* dvb_unregister_device - Unregisters a DVB device
*
- * This is a combination of dvb_remove_device() and dvb_free_device().
- * Using this function is usually a mistake, and is often an indicator
- * for a use-after-free bug (when a userspace process keeps a file
- * handle to a detached device).
- *
* @dvbdev: pointer to struct dvb_device
*/
void dvb_unregister_device(struct dvb_device *dvbdev);
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 8cb2c504a05c..96310de47a25 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -205,6 +205,9 @@ struct media_pad {
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_pipeline_start() function
* validates all links by calling this operation. Optional.
+ * @has_route: Return whether a route exists inside the entity between
+ * two given pads. Optional. If the operation isn't
+ * implemented all pads will be considered as connected.
*
* .. note::
*
@@ -217,6 +220,8 @@ struct media_entity_operations {
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
int (*link_validate)(struct media_link *link);
+ bool (*has_route)(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1);
};
/**
@@ -879,6 +884,9 @@ int media_entity_get_fwnode_pad(struct media_entity *entity,
struct fwnode_handle *fwnode,
unsigned long direction_flags);
+bool media_entity_has_route(struct media_entity *entity, unsigned int sink,
+ unsigned int source);
+
/**
* media_graph_walk_init - Allocate resources used by graph walk.
*
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index c070d8ae11e5..c2e9660c4d75 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -174,7 +174,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
*
* @sd: pointer to &struct v4l2_subdev
* @client: pointer to struct i2c_client
- * @devname: the name of the device; if NULL, the I²C device's name will be used
+ * @devname: the name of the device; if NULL, the I²C device drivers's name
+ * will be used
* @postfix: sub-device specific string to put right after the I²C device name;
* may be NULL
*/
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 48531e57cc5a..5e7c0f8acd05 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -24,7 +24,8 @@
/**
* enum vfl_devnode_type - type of V4L2 device node
*
- * @VFL_TYPE_GRABBER: for video input/output devices
+ * @VFL_TYPE_VIDEO: for video input/output devices
+ * @VFL_TYPE_GRABBER: deprecated, same as VFL_TYPE_VIDEO
* @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext)
* @VFL_TYPE_RADIO: for radio tuners
* @VFL_TYPE_SUBDEV: for V4L2 subdevices
@@ -33,7 +34,8 @@
* @VFL_TYPE_MAX: number of VFL types, must always be last in the enum
*/
enum vfl_devnode_type {
- VFL_TYPE_GRABBER = 0,
+ VFL_TYPE_VIDEO,
+ VFL_TYPE_GRABBER = VFL_TYPE_VIDEO,
VFL_TYPE_VBI,
VFL_TYPE_RADIO,
VFL_TYPE_SUBDEV,
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0b9c3a287061..57b48c33f56c 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -401,7 +401,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
static inline
unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
- return m2m_ctx->out_q_ctx.num_rdy;
+ unsigned int num_buf_rdy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+ num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+
+ return num_buf_rdy;
}
/**
@@ -413,7 +420,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
static inline
unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
- return m2m_ctx->cap_q_ctx.num_rdy;
+ unsigned int num_buf_rdy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+ num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+
+ return num_buf_rdy;
}
/**
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index d4ac251b34fe..ff0605238ad3 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -714,6 +714,10 @@ struct v4l2_subdev_pad_ops {
struct v4l2_mbus_frame_desc *fd);
int (*set_frame_desc)(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_frame_desc *fd);
+ int (*get_routing)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_routing *route);
+ int (*set_routing)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_routing *route);
};
/**
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 880e609b7352..32685eaba28f 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -31,17 +31,22 @@ struct prefix_info {
__u8 length;
__u8 prefix_len;
+ union __packed {
+ __u8 flags;
+ struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
- __u8 onlink : 1,
+ __u8 onlink : 1,
autoconf : 1,
reserved : 6;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved : 6,
+ __u8 reserved : 6,
autoconf : 1,
onlink : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
+ };
+ };
__be32 valid;
__be32 prefered;
__be32 reserved2;
@@ -49,6 +54,9 @@ struct prefix_info {
struct in6_addr prefix;
};
+/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
+static_assert(sizeof(struct prefix_info) == 32);
+
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <net/if_inet6.h>
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 3426d6dacc45..6cb5026cf727 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -43,12 +43,6 @@ struct unix_skb_parms {
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
-#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
-#define unix_state_lock_nested(s) \
- spin_lock_nested(&unix_sk(s)->lock, \
- SINGLE_DEPTH_NESTING)
-
/* The AF_UNIX socket */
struct unix_sock {
/* WARNING: sk has to be the first member */
@@ -72,6 +66,20 @@ static inline struct unix_sock *unix_sk(const struct sock *sk)
return (struct unix_sock *)sk;
}
+#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
+#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
+enum unix_socket_lock_class {
+ U_LOCK_NORMAL,
+ U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
+ U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+};
+
+static inline void unix_state_lock_nested(struct sock *sk,
+ enum unix_socket_lock_class subclass)
+{
+ spin_lock_nested(&unix_sk(sk)->lock, subclass);
+}
+
#define peer_wait peer_wq.wait
long unix_inq_len(struct sock *sk);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ecad25900ad7..26983d26af19 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -219,7 +219,7 @@ struct hci_dev {
struct list_head list;
struct mutex lock;
- char name[8];
+ const char *name;
unsigned long flags;
__u16 id;
__u8 bus;
@@ -673,7 +673,6 @@ void hci_inquiry_cache_flush(struct hci_dev *hdev);
/* ----- HCI Connections ----- */
enum {
HCI_CONN_AUTH_PEND,
- HCI_CONN_REAUTH_PEND,
HCI_CONN_ENCRYPT_PEND,
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 240786b04a46..e22c289503b5 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -54,7 +54,7 @@ struct hci_mon_new_index {
__u8 type;
__u8 bus;
bdaddr_t bdaddr;
- char name[8];
+ char name[8] __nonstring;
} __packed;
#define HCI_MON_NEW_INDEX_SIZE 16
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index 2d3c48281886..b4234b0300c7 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -156,8 +156,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
-int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
-int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 1bee8fdff7db..9e9ccbade3b5 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -205,8 +205,9 @@ struct bonding {
struct slave __rcu *curr_active_slave;
struct slave __rcu *current_arp_slave;
struct slave __rcu *primary_slave;
- struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
+ struct bond_up_slave __rcu *usable_slaves; /* Array of usable slaves */
bool force_primary;
+ bool notifier_ctx;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
@@ -685,37 +686,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
}
/* Caller must hold rcu_read_lock() for read */
-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
- const u8 *mac)
+static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
- return tmp;
-
- return NULL;
-}
-
-/* Caller must hold rcu_read_lock() for read */
-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
-{
- struct list_head *iter;
- struct slave *tmp;
- struct netdev_hw_addr *ha;
-
- bond_for_each_slave_rcu(bond, tmp, iter)
- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return true;
-
- if (netdev_uc_empty(bond->dev))
- return false;
-
- netdev_for_each_uc_addr(ha, bond->dev)
- if (ether_addr_equal_64bits(mac, ha->addr))
- return true;
-
return false;
}
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 9899b9af7f22..16258c0c7319 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -31,7 +31,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
static inline bool net_busy_loop_on(void)
{
- return sysctl_net_busy_poll;
+ return READ_ONCE(sysctl_net_busy_poll);
}
static inline bool sk_can_busy_loop(const struct sock *sk)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 69b9ccbe1ad0..2dfa3331604e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -436,6 +436,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
return NULL;
+ if (iftype == NL80211_IFTYPE_AP_VLAN)
+ iftype = NL80211_IFTYPE_AP;
+
for (i = 0; i < sband->n_iftype_data; i++) {
const struct ieee80211_sband_iftype_data *data =
&sband->iftype_data[i];
diff --git a/include/net/dn.h b/include/net/dn.h
deleted file mode 100644
index 56ab0726c641..000000000000
--- a/include/net/dn.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_H
-#define _NET_DN_H
-
-#include <linux/dn.h>
-#include <net/sock.h>
-#include <net/flow.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-
-struct dn_scp /* Session Control Port */
-{
- unsigned char state;
-#define DN_O 1 /* Open */
-#define DN_CR 2 /* Connect Receive */
-#define DN_DR 3 /* Disconnect Reject */
-#define DN_DRC 4 /* Discon. Rej. Complete*/
-#define DN_CC 5 /* Connect Confirm */
-#define DN_CI 6 /* Connect Initiate */
-#define DN_NR 7 /* No resources */
-#define DN_NC 8 /* No communication */
-#define DN_CD 9 /* Connect Delivery */
-#define DN_RJ 10 /* Rejected */
-#define DN_RUN 11 /* Running */
-#define DN_DI 12 /* Disconnect Initiate */
-#define DN_DIC 13 /* Disconnect Complete */
-#define DN_DN 14 /* Disconnect Notificat */
-#define DN_CL 15 /* Closed */
-#define DN_CN 16 /* Closed Notification */
-
- __le16 addrloc;
- __le16 addrrem;
- __u16 numdat;
- __u16 numoth;
- __u16 numoth_rcv;
- __u16 numdat_rcv;
- __u16 ackxmt_dat;
- __u16 ackxmt_oth;
- __u16 ackrcv_dat;
- __u16 ackrcv_oth;
- __u8 flowrem_sw;
- __u8 flowloc_sw;
-#define DN_SEND 2
-#define DN_DONTSEND 1
-#define DN_NOCHANGE 0
- __u16 flowrem_dat;
- __u16 flowrem_oth;
- __u16 flowloc_dat;
- __u16 flowloc_oth;
- __u8 services_rem;
- __u8 services_loc;
- __u8 info_rem;
- __u8 info_loc;
-
- __u16 segsize_rem;
- __u16 segsize_loc;
-
- __u8 nonagle;
- __u8 multi_ireq;
- __u8 accept_mode;
- unsigned long seg_total; /* Running total of current segment */
-
- struct optdata_dn conndata_in;
- struct optdata_dn conndata_out;
- struct optdata_dn discdata_in;
- struct optdata_dn discdata_out;
- struct accessdata_dn accessdata;
-
- struct sockaddr_dn addr; /* Local address */
- struct sockaddr_dn peer; /* Remote address */
-
- /*
- * In this case the RTT estimation is not specified in the
- * docs, nor is any back off algorithm. Here we follow well
- * known tcp algorithms with a few small variations.
- *
- * snd_window: Max number of packets we send before we wait for
- * an ack to come back. This will become part of a
- * more complicated scheme when we support flow
- * control.
- *
- * nsp_srtt: Round-Trip-Time (x8) in jiffies. This is a rolling
- * average.
- * nsp_rttvar: Round-Trip-Time-Varience (x4) in jiffies. This is the
- * varience of the smoothed average (but calculated in
- * a simpler way than for normal statistical varience
- * calculations).
- *
- * nsp_rxtshift: Backoff counter. Value is zero normally, each time
- * a packet is lost is increases by one until an ack
- * is received. Its used to index an array of backoff
- * multipliers.
- */
-#define NSP_MIN_WINDOW 1
-#define NSP_MAX_WINDOW (0x07fe)
- unsigned long max_window;
- unsigned long snd_window;
-#define NSP_INITIAL_SRTT (HZ)
- unsigned long nsp_srtt;
-#define NSP_INITIAL_RTTVAR (HZ*3)
- unsigned long nsp_rttvar;
-#define NSP_MAXRXTSHIFT 12
- unsigned long nsp_rxtshift;
-
- /*
- * Output queues, one for data, one for otherdata/linkservice
- */
- struct sk_buff_head data_xmit_queue;
- struct sk_buff_head other_xmit_queue;
-
- /*
- * Input queue for other data
- */
- struct sk_buff_head other_receive_queue;
- int other_report;
-
- /*
- * Stuff to do with the slow timer
- */
- unsigned long stamp; /* time of last transmit */
- unsigned long persist;
- int (*persist_fxn)(struct sock *sk);
- unsigned long keepalive;
- void (*keepalive_fxn)(struct sock *sk);
-
-};
-
-static inline struct dn_scp *DN_SK(struct sock *sk)
-{
- return (struct dn_scp *)(sk + 1);
-}
-
-/*
- * src,dst : Source and Destination DECnet addresses
- * hops : Number of hops through the network
- * dst_port, src_port : NSP port numbers
- * services, info : Useful data extracted from conninit messages
- * rt_flags : Routing flags byte
- * nsp_flags : NSP layer flags byte
- * segsize : Size of segment
- * segnum : Number, for data, otherdata and linkservice
- * xmit_count : Number of times we've transmitted this skb
- * stamp : Time stamp of most recent transmission, used in RTT calculations
- * iif: Input interface number
- *
- * As a general policy, this structure keeps all addresses in network
- * byte order, and all else in host byte order. Thus dst, src, dst_port
- * and src_port are in network order. All else is in host order.
- *
- */
-#define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
-struct dn_skb_cb {
- __le16 dst;
- __le16 src;
- __u16 hops;
- __le16 dst_port;
- __le16 src_port;
- __u8 services;
- __u8 info;
- __u8 rt_flags;
- __u8 nsp_flags;
- __u16 segsize;
- __u16 segnum;
- __u16 xmit_count;
- unsigned long stamp;
- int iif;
-};
-
-static inline __le16 dn_eth2dn(unsigned char *ethaddr)
-{
- return get_unaligned((__le16 *)(ethaddr + 4));
-}
-
-static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr)
-{
- return *(__le16 *)saddr->sdn_nodeaddr;
-}
-
-static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr)
-{
- __u16 a = le16_to_cpu(addr);
- ethaddr[0] = 0xAA;
- ethaddr[1] = 0x00;
- ethaddr[2] = 0x04;
- ethaddr[3] = 0x00;
- ethaddr[4] = (__u8)(a & 0xff);
- ethaddr[5] = (__u8)(a >> 8);
-}
-
-static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
-{
- fld->fld_sport = scp->addrloc;
- fld->fld_dport = scp->addrrem;
-}
-
-unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
-void dn_register_sysctl(void);
-void dn_unregister_sysctl(void);
-
-#define DN_MENUVER_ACC 0x01
-#define DN_MENUVER_USR 0x02
-#define DN_MENUVER_PRX 0x04
-#define DN_MENUVER_UIC 0x08
-
-struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-struct sock *dn_find_by_skb(struct sk_buff *skb);
-#define DN_ASCBUF_LEN 9
-char *dn_addr2asc(__u16, char *);
-int dn_destroy_timer(struct sock *sk);
-
-int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
- unsigned char type);
-int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
- unsigned char *type);
-
-void dn_start_slow_timer(struct sock *sk);
-void dn_stop_slow_timer(struct sock *sk);
-
-extern __le16 decnet_address;
-extern int decnet_debug_level;
-extern int decnet_time_wait;
-extern int decnet_dn_count;
-extern int decnet_di_count;
-extern int decnet_dr_count;
-extern int decnet_no_fc_max_cwnd;
-
-extern long sysctl_decnet_mem[3];
-extern int sysctl_decnet_wmem[3];
-extern int sysctl_decnet_rmem[3];
-
-#endif /* _NET_DN_H */
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
deleted file mode 100644
index 595b4f6c1eb1..000000000000
--- a/include/net/dn_dev.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_DEV_H
-#define _NET_DN_DEV_H
-
-
-struct dn_dev;
-
-struct dn_ifaddr {
- struct dn_ifaddr __rcu *ifa_next;
- struct dn_dev *ifa_dev;
- __le16 ifa_local;
- __le16 ifa_address;
- __u32 ifa_flags;
- __u8 ifa_scope;
- char ifa_label[IFNAMSIZ];
- struct rcu_head rcu;
-};
-
-#define DN_DEV_S_RU 0 /* Run - working normally */
-#define DN_DEV_S_CR 1 /* Circuit Rejected */
-#define DN_DEV_S_DS 2 /* Data Link Start */
-#define DN_DEV_S_RI 3 /* Routing Layer Initialize */
-#define DN_DEV_S_RV 4 /* Routing Layer Verify */
-#define DN_DEV_S_RC 5 /* Routing Layer Complete */
-#define DN_DEV_S_OF 6 /* Off */
-#define DN_DEV_S_HA 7 /* Halt */
-
-
-/*
- * The dn_dev_parms structure contains the set of parameters
- * for each device (hence inclusion in the dn_dev structure)
- * and an array is used to store the default types of supported
- * device (in dn_dev.c).
- *
- * The type field matches the ARPHRD_ constants and is used in
- * searching the list for supported devices when new devices
- * come up.
- *
- * The mode field is used to find out if a device is broadcast,
- * multipoint, or pointopoint. Please note that DECnet thinks
- * different ways about devices to the rest of the kernel
- * so the normal IFF_xxx flags are invalid here. For devices
- * which can be any combination of the previously mentioned
- * attributes, you can set this on a per device basis by
- * installing an up() routine.
- *
- * The device state field, defines the initial state in which the
- * device will come up. In the dn_dev structure, it is the actual
- * state.
- *
- * Things have changed here. I've killed timer1 since it's a user space
- * issue for a user space routing deamon to sort out. The kernel does
- * not need to be bothered with it.
- *
- * Timers:
- * t2 - Rate limit timer, min time between routing and hello messages
- * t3 - Hello timer, send hello messages when it expires
- *
- * Callbacks:
- * up() - Called to initialize device, return value can veto use of
- * device with DECnet.
- * down() - Called to turn device off when it goes down
- * timer3() - Called once for each ifaddr when timer 3 goes off
- *
- * sysctl - Hook for sysctl things
- *
- */
-struct dn_dev_parms {
- int type; /* ARPHRD_xxx */
- int mode; /* Broadcast, Unicast, Mulitpoint */
-#define DN_DEV_BCAST 1
-#define DN_DEV_UCAST 2
-#define DN_DEV_MPOINT 4
- int state; /* Initial state */
- int forwarding; /* 0=EndNode, 1=L1Router, 2=L2Router */
- unsigned long t2; /* Default value of t2 */
- unsigned long t3; /* Default value of t3 */
- int priority; /* Priority to be a router */
- char *name; /* Name for sysctl */
- int (*up)(struct net_device *);
- void (*down)(struct net_device *);
- void (*timer3)(struct net_device *, struct dn_ifaddr *ifa);
- void *sysctl;
-};
-
-
-struct dn_dev {
- struct dn_ifaddr __rcu *ifa_list;
- struct net_device *dev;
- struct dn_dev_parms parms;
- char use_long;
- struct timer_list timer;
- unsigned long t3;
- struct neigh_parms *neigh_parms;
- __u8 addr[ETH_ALEN];
- struct neighbour *router; /* Default router on circuit */
- struct neighbour *peer; /* Peer on pointopoint links */
- unsigned long uptime; /* Time device went up in jiffies */
-};
-
-struct dn_short_packet {
- __u8 msgflg;
- __le16 dstnode;
- __le16 srcnode;
- __u8 forward;
-} __packed;
-
-struct dn_long_packet {
- __u8 msgflg;
- __u8 d_area;
- __u8 d_subarea;
- __u8 d_id[6];
- __u8 s_area;
- __u8 s_subarea;
- __u8 s_id[6];
- __u8 nl2;
- __u8 visit_ct;
- __u8 s_class;
- __u8 pt;
-} __packed;
-
-/*------------------------- DRP - Routing messages ---------------------*/
-
-struct endnode_hello_message {
- __u8 msgflg;
- __u8 tiver[3];
- __u8 id[6];
- __u8 iinfo;
- __le16 blksize;
- __u8 area;
- __u8 seed[8];
- __u8 neighbor[6];
- __le16 timer;
- __u8 mpd;
- __u8 datalen;
- __u8 data[2];
-} __packed;
-
-struct rtnode_hello_message {
- __u8 msgflg;
- __u8 tiver[3];
- __u8 id[6];
- __u8 iinfo;
- __le16 blksize;
- __u8 priority;
- __u8 area;
- __le16 timer;
- __u8 mpd;
-} __packed;
-
-
-void dn_dev_init(void);
-void dn_dev_cleanup(void);
-
-int dn_dev_ioctl(unsigned int cmd, void __user *arg);
-
-void dn_dev_devices_off(void);
-void dn_dev_devices_on(void);
-
-void dn_dev_init_pkt(struct sk_buff *skb);
-void dn_dev_veri_pkt(struct sk_buff *skb);
-void dn_dev_hello(struct sk_buff *skb);
-
-void dn_dev_up(struct net_device *);
-void dn_dev_down(struct net_device *);
-
-int dn_dev_set_default(struct net_device *dev, int force);
-struct net_device *dn_dev_get_default(void);
-int dn_dev_bind_default(__le16 *addr);
-
-int register_dnaddr_notifier(struct notifier_block *nb);
-int unregister_dnaddr_notifier(struct notifier_block *nb);
-
-static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
-{
- struct dn_dev *dn_db;
- struct dn_ifaddr *ifa;
- int res = 0;
-
- rcu_read_lock();
- dn_db = rcu_dereference(dev->dn_ptr);
- if (dn_db == NULL) {
- printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
- goto out;
- }
-
- for (ifa = rcu_dereference(dn_db->ifa_list);
- ifa != NULL;
- ifa = rcu_dereference(ifa->ifa_next))
- if ((addr ^ ifa->ifa_local) == 0) {
- res = 1;
- break;
- }
-out:
- rcu_read_unlock();
- return res;
-}
-
-#endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
deleted file mode 100644
index 6dd2213c5eb2..000000000000
--- a/include/net/dn_fib.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_FIB_H
-#define _NET_DN_FIB_H
-
-#include <linux/netlink.h>
-#include <linux/refcount.h>
-
-extern const struct nla_policy rtm_dn_policy[];
-
-struct dn_fib_res {
- struct fib_rule *r;
- struct dn_fib_info *fi;
- unsigned char prefixlen;
- unsigned char nh_sel;
- unsigned char type;
- unsigned char scope;
-};
-
-struct dn_fib_nh {
- struct net_device *nh_dev;
- unsigned int nh_flags;
- unsigned char nh_scope;
- int nh_weight;
- int nh_power;
- int nh_oif;
- __le16 nh_gw;
-};
-
-struct dn_fib_info {
- struct dn_fib_info *fib_next;
- struct dn_fib_info *fib_prev;
- int fib_treeref;
- refcount_t fib_clntref;
- int fib_dead;
- unsigned int fib_flags;
- int fib_protocol;
- __le16 fib_prefsrc;
- __u32 fib_priority;
- __u32 fib_metrics[RTAX_MAX];
- int fib_nhs;
- int fib_power;
- struct dn_fib_nh fib_nh[0];
-#define dn_fib_dev fib_nh[0].nh_dev
-};
-
-
-#define DN_FIB_RES_RESET(res) ((res).nh_sel = 0)
-#define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
-
-#define DN_FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __dn_fib_res_prefsrc(&res))
-#define DN_FIB_RES_GW(res) (DN_FIB_RES_NH(res).nh_gw)
-#define DN_FIB_RES_DEV(res) (DN_FIB_RES_NH(res).nh_dev)
-#define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif)
-
-typedef struct {
- __le16 datum;
-} dn_fib_key_t;
-
-typedef struct {
- __le16 datum;
-} dn_fib_hash_t;
-
-typedef struct {
- __u16 datum;
-} dn_fib_idx_t;
-
-struct dn_fib_node {
- struct dn_fib_node *fn_next;
- struct dn_fib_info *fn_info;
-#define DN_FIB_INFO(f) ((f)->fn_info)
- dn_fib_key_t fn_key;
- u8 fn_type;
- u8 fn_scope;
- u8 fn_state;
-};
-
-
-struct dn_fib_table {
- struct hlist_node hlist;
- u32 n;
-
- int (*insert)(struct dn_fib_table *t, struct rtmsg *r,
- struct nlattr *attrs[], struct nlmsghdr *n,
- struct netlink_skb_parms *req);
- int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
- struct nlattr *attrs[], struct nlmsghdr *n,
- struct netlink_skb_parms *req);
- int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld,
- struct dn_fib_res *res);
- int (*flush)(struct dn_fib_table *t);
- int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
-
- unsigned char data[0];
-};
-
-#ifdef CONFIG_DECNET_ROUTER
-/*
- * dn_fib.c
- */
-void dn_fib_init(void);
-void dn_fib_cleanup(void);
-
-int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
- struct nlattr *attrs[],
- const struct nlmsghdr *nlh, int *errp);
-int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
- const struct flowidn *fld, struct dn_fib_res *res);
-void dn_fib_release_info(struct dn_fib_info *fi);
-void dn_fib_flush(void);
-void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
-
-/*
- * dn_tables.c
- */
-struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-struct dn_fib_table *dn_fib_empty_table(void);
-void dn_fib_table_init(void);
-void dn_fib_table_cleanup(void);
-
-/*
- * dn_rules.c
- */
-void dn_fib_rules_init(void);
-void dn_fib_rules_cleanup(void);
-unsigned int dnet_addr_type(__le16 addr);
-int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
-
-int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
-
-void dn_fib_free_info(struct dn_fib_info *fi);
-
-static inline void dn_fib_info_put(struct dn_fib_info *fi)
-{
- if (refcount_dec_and_test(&fi->fib_clntref))
- dn_fib_free_info(fi);
-}
-
-static inline void dn_fib_res_put(struct dn_fib_res *res)
-{
- if (res->fi)
- dn_fib_info_put(res->fi);
- if (res->r)
- fib_rule_put(res->r);
-}
-
-#else /* Endnode */
-
-#define dn_fib_init() do { } while(0)
-#define dn_fib_cleanup() do { } while(0)
-
-#define dn_fib_lookup(fl, res) (-ESRCH)
-#define dn_fib_info_put(fi) do { } while(0)
-#define dn_fib_select_multipath(fl, res) do { } while(0)
-#define dn_fib_rules_policy(saddr,res,flags) (0)
-#define dn_fib_res_put(res) do { } while(0)
-
-#endif /* CONFIG_DECNET_ROUTER */
-
-static inline __le16 dnet_make_mask(int n)
-{
- if (n)
- return cpu_to_le16(~((1 << (16 - n)) - 1));
- return cpu_to_le16(0);
-}
-
-#endif /* _NET_DN_FIB_H */
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
deleted file mode 100644
index 2e3e7793973a..000000000000
--- a/include/net/dn_neigh.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_NEIGH_H
-#define _NET_DN_NEIGH_H
-
-/*
- * The position of the first two fields of
- * this structure are critical - SJW
- */
-struct dn_neigh {
- struct neighbour n;
- __le16 addr;
- unsigned long flags;
-#define DN_NDFLAG_R1 0x0001 /* Router L1 */
-#define DN_NDFLAG_R2 0x0002 /* Router L2 */
-#define DN_NDFLAG_P3 0x0004 /* Phase III Node */
- unsigned long blksize;
- __u8 priority;
-};
-
-void dn_neigh_init(void);
-void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
-void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
-int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-
-extern struct neigh_table dn_neigh_table;
-
-#endif /* _NET_DN_NEIGH_H */
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
deleted file mode 100644
index f83932b864a9..000000000000
--- a/include/net/dn_nsp.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_DN_NSP_H
-#define _NET_DN_NSP_H
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-/* dn_nsp.c functions prototyping */
-
-void dn_nsp_send_data_ack(struct sock *sk);
-void dn_nsp_send_oth_ack(struct sock *sk);
-void dn_send_conn_ack(struct sock *sk);
-void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-void dn_nsp_send_disc(struct sock *sk, unsigned char type,
- unsigned short reason, gfp_t gfp);
-void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
- unsigned short reason);
-void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
-
-void dn_nsp_output(struct sock *sk);
-int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *q, unsigned short acknum);
-void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
- int oob);
-unsigned long dn_nsp_persist(struct sock *sk);
-int dn_nsp_xmit_timeout(struct sock *sk);
-
-int dn_nsp_rx(struct sk_buff *);
-int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
- long timeo, int *err);
-
-#define NSP_REASON_OK 0 /* No error */
-#define NSP_REASON_NR 1 /* No resources */
-#define NSP_REASON_UN 2 /* Unrecognised node name */
-#define NSP_REASON_SD 3 /* Node shutting down */
-#define NSP_REASON_ID 4 /* Invalid destination end user */
-#define NSP_REASON_ER 5 /* End user lacks resources */
-#define NSP_REASON_OB 6 /* Object too busy */
-#define NSP_REASON_US 7 /* Unspecified error */
-#define NSP_REASON_TP 8 /* Third-Party abort */
-#define NSP_REASON_EA 9 /* End user has aborted the link */
-#define NSP_REASON_IF 10 /* Invalid node name format */
-#define NSP_REASON_LS 11 /* Local node shutdown */
-#define NSP_REASON_LL 32 /* Node lacks logical-link resources */
-#define NSP_REASON_LE 33 /* End user lacks logical-link resources */
-#define NSP_REASON_UR 34 /* Unacceptable RQSTRID or PASSWORD field */
-#define NSP_REASON_UA 36 /* Unacceptable ACCOUNT field */
-#define NSP_REASON_TM 38 /* End user timed out logical link */
-#define NSP_REASON_NU 39 /* Node unreachable */
-#define NSP_REASON_NL 41 /* No-link message */
-#define NSP_REASON_DC 42 /* Disconnect confirm */
-#define NSP_REASON_IO 43 /* Image data field overflow */
-
-#define NSP_DISCINIT 0x38
-#define NSP_DISCCONF 0x48
-
-/*------------------------- NSP - messages ------------------------------*/
-/* Data Messages */
-/*---------------*/
-
-/* Data Messages (data segment/interrupt/link service) */
-
-struct nsp_data_seg_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
-} __packed;
-
-struct nsp_data_opt_msg {
- __le16 acknum;
- __le16 segnum;
- __le16 lsflgs;
-} __packed;
-
-struct nsp_data_opt_msg1 {
- __le16 acknum;
- __le16 segnum;
-} __packed;
-
-
-/* Acknowledgment Message (data/other data) */
-struct nsp_data_ack_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
- __le16 acknum;
-} __packed;
-
-/* Connect Acknowledgment Message */
-struct nsp_conn_ack_msg {
- __u8 msgflg;
- __le16 dstaddr;
-} __packed;
-
-
-/* Connect Initiate/Retransmit Initiate/Connect Confirm */
-struct nsp_conn_init_msg {
- __u8 msgflg;
-#define NSP_CI 0x18 /* Connect Initiate */
-#define NSP_RCI 0x68 /* Retrans. Conn Init */
- __le16 dstaddr;
- __le16 srcaddr;
- __u8 services;
-#define NSP_FC_NONE 0x00 /* Flow Control None */
-#define NSP_FC_SRC 0x04 /* Seg Req. Count */
-#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */
-#define NSP_FC_MASK 0x0c /* FC type mask */
- __u8 info;
- __le16 segsize;
-} __packed;
-
-/* Disconnect Initiate/Disconnect Confirm */
-struct nsp_disconn_init_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
- __le16 reason;
-} __packed;
-
-
-
-struct srcobj_fmt {
- __u8 format;
- __u8 task;
- __le16 grpcode;
- __le16 usrcode;
- __u8 dlen;
-} __packed;
-
-/*
- * A collection of functions for manipulating the sequence
- * numbers used in NSP. Similar in operation to the functions
- * of the same name in TCP.
- */
-static __inline__ int dn_before(__u16 seq1, __u16 seq2)
-{
- seq1 &= 0x0fff;
- seq2 &= 0x0fff;
-
- return (int)((seq1 - seq2) & 0x0fff) > 2048;
-}
-
-
-static __inline__ int dn_after(__u16 seq1, __u16 seq2)
-{
- seq1 &= 0x0fff;
- seq2 &= 0x0fff;
-
- return (int)((seq2 - seq1) & 0x0fff) > 2048;
-}
-
-static __inline__ int dn_equal(__u16 seq1, __u16 seq2)
-{
- return ((seq1 ^ seq2) & 0x0fff) == 0;
-}
-
-static __inline__ int dn_before_or_equal(__u16 seq1, __u16 seq2)
-{
- return (dn_before(seq1, seq2) || dn_equal(seq1, seq2));
-}
-
-static __inline__ void seq_add(__u16 *seq, __u16 off)
-{
- (*seq) += off;
- (*seq) &= 0x0fff;
-}
-
-static __inline__ int seq_next(__u16 seq1, __u16 seq2)
-{
- return dn_equal(seq1 + 1, seq2);
-}
-
-/*
- * Can we delay the ack ?
- */
-static __inline__ int sendack(__u16 seq)
-{
- return (int)((seq & 0x1000) ? 0 : 1);
-}
-
-/*
- * Is socket congested ?
- */
-static __inline__ int dn_congested(struct sock *sk)
-{
- return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
-}
-
-#define DN_MAX_NSP_DATA_HEADER (11)
-
-#endif /* _NET_DN_NSP_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
deleted file mode 100644
index 6f1e94ac0bdf..000000000000
--- a/include/net/dn_route.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_DN_ROUTE_H
-#define _NET_DN_ROUTE_H
-
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
- struct sock *sk, int flags);
-int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-void dn_rt_cache_flush(int delay);
-int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
-
-/* Masks for flags field */
-#define DN_RT_F_PID 0x07 /* Mask for packet type */
-#define DN_RT_F_PF 0x80 /* Padding Follows */
-#define DN_RT_F_VER 0x40 /* Version =0 discard packet if ==1 */
-#define DN_RT_F_IE 0x20 /* Intra Ethernet, Reserved in short pkt */
-#define DN_RT_F_RTS 0x10 /* Packet is being returned to sender */
-#define DN_RT_F_RQR 0x08 /* Return packet to sender upon non-delivery */
-
-/* Mask for types of routing packets */
-#define DN_RT_PKT_MSK 0x06
-/* Types of routing packets */
-#define DN_RT_PKT_SHORT 0x02 /* Short routing packet */
-#define DN_RT_PKT_LONG 0x06 /* Long routing packet */
-
-/* Mask for control/routing selection */
-#define DN_RT_PKT_CNTL 0x01 /* Set to 1 if a control packet */
-/* Types of control packets */
-#define DN_RT_CNTL_MSK 0x0f /* Mask for control packets */
-#define DN_RT_PKT_INIT 0x01 /* Initialisation packet */
-#define DN_RT_PKT_VERI 0x03 /* Verification Message */
-#define DN_RT_PKT_HELO 0x05 /* Hello and Test Message */
-#define DN_RT_PKT_L1RT 0x07 /* Level 1 Routing Message */
-#define DN_RT_PKT_L2RT 0x09 /* Level 2 Routing Message */
-#define DN_RT_PKT_ERTH 0x0b /* Ethernet Router Hello */
-#define DN_RT_PKT_EEDH 0x0d /* Ethernet EndNode Hello */
-
-/* Values for info field in hello message */
-#define DN_RT_INFO_TYPE 0x03 /* Type mask */
-#define DN_RT_INFO_L1RT 0x02 /* L1 Router */
-#define DN_RT_INFO_L2RT 0x01 /* L2 Router */
-#define DN_RT_INFO_ENDN 0x03 /* EndNode */
-#define DN_RT_INFO_VERI 0x04 /* Verification Reqd. */
-#define DN_RT_INFO_RJCT 0x08 /* Reject Flag, Reserved */
-#define DN_RT_INFO_VFLD 0x10 /* Verification Failed, Reserved */
-#define DN_RT_INFO_NOML 0x20 /* No Multicast traffic accepted */
-#define DN_RT_INFO_BLKR 0x40 /* Blocking Requested */
-
-/*
- * The fl structure is what we used to look up the route.
- * The rt_saddr & rt_daddr entries are the same as key.saddr & key.daddr
- * except for local input routes, where the rt_saddr = fl.fld_dst and
- * rt_daddr = fl.fld_src to allow the route to be used for returning
- * packets to the originating host.
- */
-struct dn_route {
- struct dst_entry dst;
- struct dn_route __rcu *dn_next;
-
- struct neighbour *n;
-
- struct flowidn fld;
-
- __le16 rt_saddr;
- __le16 rt_daddr;
- __le16 rt_gateway;
- __le16 rt_local_src; /* Source used for forwarding packets */
- __le16 rt_src_map;
- __le16 rt_dst_map;
-
- unsigned int rt_flags;
- unsigned int rt_type;
-};
-
-static inline bool dn_is_input_route(struct dn_route *rt)
-{
- return rt->fld.flowidn_iif != 0;
-}
-
-static inline bool dn_is_output_route(struct dn_route *rt)
-{
- return rt->fld.flowidn_iif == 0;
-}
-
-void dn_route_init(void);
-void dn_route_cleanup(void);
-
-#include <net/sock.h>
-#include <linux/if_arp.h>
-
-static inline void dn_rt_send(struct sk_buff *skb)
-{
- dev_queue_xmit(skb);
-}
-
-static inline void dn_rt_finish_output(struct sk_buff *skb, char *dst, char *src)
-{
- struct net_device *dev = skb->dev;
-
- if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
- dst = NULL;
-
- if (dev_hard_header(skb, dev, ETH_P_DNA_RT, dst, src, skb->len) >= 0)
- dn_rt_send(skb);
- else
- kfree_skb(skb);
-}
-
-#endif /* _NET_DN_ROUTE_H */
diff --git a/include/net/dst.h b/include/net/dst.h
index 433f7c1ce8a9..37c540e6394c 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -236,12 +236,6 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
}
}
-static inline void dst_hold_and_use(struct dst_entry *dst, unsigned long time)
-{
- dst_hold(dst);
- dst_use_noref(dst, time);
-}
-
static inline struct dst_entry *dst_clone(struct dst_entry *dst)
{
if (dst)
@@ -357,9 +351,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net)
{
- /* TODO : stats should be SMP safe */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ DEV_STATS_INC(dev, rx_packets);
+ DEV_STATS_ADD(dev, rx_bytes, skb->len);
__skb_tunnel_rx(skb, dev, net);
}
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 443863c7b8da..632086b2f644 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -16,7 +16,7 @@ struct dst_ops {
unsigned short family;
unsigned int gc_thresh;
- int (*gc)(struct dst_ops *ops);
+ void (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*mtu)(const struct dst_entry *);
@@ -53,9 +53,11 @@ static inline int dst_entries_get_slow(struct dst_ops *dst)
return percpu_counter_sum_positive(&dst->pcpuc_entries);
}
+#define DST_PERCPU_COUNTER_BATCH 32
static inline void dst_entries_add(struct dst_ops *dst, int val)
{
- percpu_counter_add(&dst->pcpuc_entries, val);
+ percpu_counter_add_batch(&dst->pcpuc_entries, val,
+ DST_PERCPU_COUNTER_BATCH);
}
static inline int dst_entries_init(struct dst_ops *dst)
diff --git a/include/net/flow.h b/include/net/flow.h
index d058e63fb59a..21e781c74ac5 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -39,8 +39,8 @@ struct flowi_common {
#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
kuid_t flowic_uid;
- struct flowi_tunnel flowic_tun_key;
__u32 flowic_multipath_hash;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 2d9e67a69cbe..2d52776def52 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -11,9 +11,12 @@
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
+ * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
+ u8 flags;
+ u8 cap_sys_admin:1;
};
struct genl_ops;
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index d0d188c3294b..03b64bf876a4 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -15,6 +15,22 @@
#ifndef IEEE802154_NETDEVICE_H
#define IEEE802154_NETDEVICE_H
+#define IEEE802154_REQUIRED_SIZE(struct_type, member) \
+ (offsetof(typeof(struct_type), member) + \
+ sizeof(((typeof(struct_type) *)(NULL))->member))
+
+#define IEEE802154_ADDR_OFFSET \
+ offsetof(typeof(struct sockaddr_ieee802154), addr)
+
+#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type))
+
+#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr))
+
+#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr))
+
#include <net/af_ieee802154.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -165,6 +181,33 @@ static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
memcpy(raw, &temp, IEEE802154_ADDR_LEN);
}
+static inline int
+ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len)
+{
+ struct ieee802154_addr_sa *sa;
+ int ret = 0;
+
+ sa = &daddr->addr;
+ if (len < IEEE802154_MIN_NAMELEN)
+ return -EINVAL;
+ switch (sa->addr_type) {
+ case IEEE802154_ADDR_NONE:
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (len < IEEE802154_NAMELEN_SHORT)
+ ret = -EINVAL;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (len < IEEE802154_NAMELEN_LONG)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
const struct ieee802154_addr_sa *sa)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index f6d614926e9e..601bedda91c0 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -22,10 +22,6 @@
#define IF_RS_SENT 0x10
#define IF_READY 0x80000000
-/* prefix flags */
-#define IF_PREFIX_ONLINK 0x01
-#define IF_PREFIX_AUTOCONF 0x02
-
enum {
INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
diff --git a/include/net/ip.h b/include/net/ip.h
index db841ab388c0..0ebe5385efca 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -73,6 +73,7 @@ struct ipcm_cookie {
__be32 addr;
int oif;
struct ip_options_rcu *opt;
+ __u8 protocol;
__u8 ttl;
__s16 tos;
char priority;
@@ -93,6 +94,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
ipcm->sockc.tsflags = inet->sk.sk_tsflags;
ipcm->oif = inet->sk.sk_bound_dev_if;
ipcm->addr = inet->inet_saddr;
+ ipcm->protocol = inet->inet_num;
}
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 028eaea1c854..42d50856fcf2 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -57,7 +57,7 @@ struct ip6_tnl {
/* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
- __u32 o_seqno; /* The last output seqno */
+ atomic_t o_seqno; /* The last output seqno */
int hlen; /* tun_hlen + encap_hlen */
int tun_hlen; /* Precalculated header length */
int encap_hlen; /* Encap header length (FOU,GUE) */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 56deb2501e96..36376f8b84da 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -113,7 +113,7 @@ struct ip_tunnel {
/* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */
- u32 o_seqno; /* The last output seqno */
+ atomic_t o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
/* These four fields used only by ERSPAN */
@@ -374,9 +374,11 @@ static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
{
- if (skb->protocol == htons(ETH_P_IP))
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IP))
return iph->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (payload_protocol == htons(ETH_P_IPV6))
return ipv6_get_dsfield((const struct ipv6hdr *)iph);
else
return 0;
@@ -385,9 +387,11 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
const struct sk_buff *skb)
{
- if (skb->protocol == htons(ETH_P_IP))
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IP))
return iph->ttl;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (payload_protocol == htons(ETH_P_IPV6))
return ((const struct ipv6hdr *)iph)->hop_limit;
else
return 0;
@@ -445,15 +449,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
+ return;
+ }
+
+ if (pkt_len < 0) {
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
} else {
- struct net_device_stats *err_stats = &dev->stats;
-
- if (pkt_len < 0) {
- err_stats->tx_errors++;
- err_stats->tx_aborted_errors++;
- } else {
- err_stats->tx_dropped++;
- }
+ DEV_STATS_INC(dev, tx_dropped);
}
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index b59b3dae0f71..8c454509f299 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -660,12 +660,8 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
/* more secured version of ipv6_addr_hash() */
static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
{
- u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
-
- return jhash_3words(v,
- (__force u32)a->s6_addr32[2],
- (__force u32)a->s6_addr32[3],
- initval);
+ return jhash2((__force const u32 *)a->s6_addr32,
+ ARRAY_SIZE(a->s6_addr32), initval);
}
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
@@ -1100,6 +1096,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+void inet6_cleanup_sock(struct sock *sk);
+void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 49aa79c7b278..581cd37aa98b 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
*/
static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
{
- if (skb->protocol == htons(ETH_P_802_2))
- memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+ memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
}
/**
@@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
*/
static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
{
- if (skb->protocol == htons(ETH_P_802_2))
- memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+ memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
}
/**
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 5d6c5b1fc695..ed1cd431e2b3 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -16,9 +16,12 @@
#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
+/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return
+ * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety.
+ */
enum {
LWTUNNEL_XMIT_DONE,
- LWTUNNEL_XMIT_CONTINUE,
+ LWTUNNEL_XMIT_CONTINUE = 0x100,
};
diff --git a/include/net/mrp.h b/include/net/mrp.h
index ef58b4a07190..c6c53370e390 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -120,6 +120,7 @@ struct mrp_applicant {
struct sk_buff *pdu;
struct rb_root mad;
struct rcu_head rcu;
+ bool active;
};
struct mrp_port {
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index b6494e87c897..93c5e113b36b 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -174,7 +174,7 @@ struct pneigh_entry {
struct net_device *dev;
u8 flags;
u8 protocol;
- u8 key[0];
+ u32 key[];
};
/*
@@ -260,11 +260,6 @@ static inline void *neighbour_priv(const struct neighbour *n)
extern const struct nla_policy nda_policy[];
-static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
-{
- return *(const u16 *)n->primary_key == *(const u16 *)pkey;
-}
-
static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
{
return *(const u32 *)n->primary_key == *(const u32 *)pkey;
@@ -314,8 +309,6 @@ void neigh_table_init(int index, struct neigh_table *tbl);
int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev);
-struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
- const void *pkey);
struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev, bool want_ref);
static inline struct neighbour *neigh_create(struct neigh_table *tbl,
diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
index 2418653a66db..279380de904c 100644
--- a/include/net/netfilter/nf_nat_redirect.h
+++ b/include/net/netfilter/nf_nat_redirect.h
@@ -6,8 +6,7 @@
#include <uapi/linux/netfilter/nf_nat.h>
unsigned int
-nf_nat_redirect_ipv4(struct sk_buff *skb,
- const struct nf_nat_ipv4_multi_range_compat *mr,
+nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
unsigned int hooknum);
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 886866bee8b2..cf314ce2fd17 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -130,9 +130,9 @@ static inline u16 nft_reg_load16(u32 *sreg)
return *(u16 *)sreg;
}
-static inline void nft_reg_store64(u32 *dreg, u64 val)
+static inline void nft_reg_store64(u64 *dreg, u64 val)
{
- put_unaligned(val, (u64 *)dreg);
+ put_unaligned(val, dreg);
}
static inline u64 nft_reg_load64(u32 *sreg)
@@ -205,14 +205,13 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
}
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
-unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
-int nft_validate_register_load(enum nft_registers reg, unsigned int len);
-int nft_validate_register_store(const struct nft_ctx *ctx,
- enum nft_registers reg,
- const struct nft_data *data,
- enum nft_data_types type, unsigned int len);
+int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
+int nft_parse_register_store(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *dreg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
/**
* struct nft_userdata - user defined data associated with an object
@@ -240,6 +239,10 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} key;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } data;
void *priv;
};
@@ -368,7 +371,8 @@ struct nft_set_ops {
int (*init)(const struct nft_set *set,
const struct nft_set_desc *desc,
const struct nlattr * const nla[]);
- void (*destroy)(const struct nft_set *set);
+ void (*destroy)(const struct nft_ctx *ctx,
+ const struct nft_set *set);
void (*gc_init)(const struct nft_set *set);
unsigned int elemsize;
@@ -398,6 +402,7 @@ void nft_unregister_set(struct nft_set_type *type);
*
* @list: table set list node
* @bindings: list of set bindings
+ * @refs: internal refcounting for async set destruction
* @table: table this set belongs to
* @net: netnamespace this set belongs to
* @name: name of the set
@@ -424,6 +429,7 @@ void nft_unregister_set(struct nft_set_type *type);
struct nft_set {
struct list_head list;
struct list_head bindings;
+ refcount_t refs;
struct nft_table *table;
possible_net_t net;
char *name;
@@ -442,7 +448,8 @@ struct nft_set {
unsigned char *udata;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
- u16 flags:14,
+ u16 flags:13,
+ dead:1,
genmask:2;
u8 klen;
u8 dlen;
@@ -460,6 +467,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline bool nft_set_gc_is_pending(const struct nft_set *s)
+{
+ return refcount_read(&s->refs) != 1;
+}
+
static inline struct nft_set *nft_set_container_of(const void *priv)
{
return (void *)priv - offsetof(struct nft_set, data);
@@ -493,6 +505,7 @@ struct nft_set_binding {
};
enum nft_trans_phase;
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase);
@@ -661,62 +674,8 @@ void *nft_set_elem_init(const struct nft_set *set,
u64 timeout, u64 expiration, gfp_t gfp);
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr);
-
-/**
- * struct nft_set_gc_batch_head - nf_tables set garbage collection batch
- *
- * @rcu: rcu head
- * @set: set the elements belong to
- * @cnt: count of elements
- */
-struct nft_set_gc_batch_head {
- struct rcu_head rcu;
- const struct nft_set *set;
- unsigned int cnt;
-};
-
-#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \
- sizeof(struct nft_set_gc_batch_head)) / \
- sizeof(void *))
-
-/**
- * struct nft_set_gc_batch - nf_tables set garbage collection batch
- *
- * @head: GC batch head
- * @elems: garbage collection elements
- */
-struct nft_set_gc_batch {
- struct nft_set_gc_batch_head head;
- void *elems[NFT_SET_GC_BATCH_SIZE];
-};
-
-struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
- gfp_t gfp);
-void nft_set_gc_batch_release(struct rcu_head *rcu);
-
-static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
-{
- if (gcb != NULL)
- call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
-}
-
-static inline struct nft_set_gc_batch *
-nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
- gfp_t gfp)
-{
- if (gcb != NULL) {
- if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
- return gcb;
- nft_set_gc_batch_complete(gcb);
- }
- return nft_set_gc_batch_alloc(set, gfp);
-}
-
-static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
- void *elem)
-{
- gcb->elems[gcb->head.cnt++] = elem;
-}
+void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set, void *elem);
struct nft_expr_ops;
/**
@@ -752,6 +711,7 @@ struct nft_expr_type {
enum nft_trans_phase {
NFT_TRANS_PREPARE,
+ NFT_TRANS_PREPARE_ERROR,
NFT_TRANS_ABORT,
NFT_TRANS_COMMIT,
NFT_TRANS_RELEASE
@@ -1008,6 +968,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
+static inline bool nft_use_inc(u32 *use)
+{
+ if (*use == UINT_MAX)
+ return false;
+
+ (*use)++;
+
+ return true;
+}
+
+static inline void nft_use_dec(u32 *use)
+{
+ WARN_ON_ONCE((*use)-- == 0);
+}
+
+/* For error and abort path: restore use counter to previous state. */
+static inline void nft_use_inc_restore(u32 *use)
+{
+ WARN_ON_ONCE(!nft_use_inc(use));
+}
+
+#define nft_use_dec_restore nft_use_dec
+
/**
* struct nft_table - nf_tables table
*
@@ -1077,8 +1060,8 @@ struct nft_object {
struct list_head list;
struct rhlist_head rhlhead;
struct nft_object_hash_key key;
- u32 genmask:2,
- use:30;
+ u32 genmask:2;
+ u32 use;
u64 handle;
/* runtime data below here */
const struct nft_object_ops *ops ____cacheline_aligned;
@@ -1180,8 +1163,8 @@ struct nft_flowtable {
int hooknum;
int priority;
int ops_len;
- u32 genmask:2,
- use:30;
+ u32 genmask:2;
+ u32 use;
u64 handle;
/* runtime data below here */
struct nf_hook_ops *ops ____cacheline_aligned;
@@ -1320,45 +1303,37 @@ static inline void nft_set_elem_change_active(const struct net *net,
#endif /* IS_ENABLED(CONFIG_NF_TABLES) */
-/*
- * We use a free bit in the genmask field to indicate the element
- * is busy, meaning it is currently being processed either by
- * the netlink API or GC.
- *
- * Even though the genmask is only a single byte wide, this works
- * because the extension structure if fully constant once initialized,
- * so there are no non-atomic write accesses unless it is already
- * marked busy.
- */
-#define NFT_SET_ELEM_BUSY_MASK (1 << 2)
+#define NFT_SET_ELEM_DEAD_MASK (1 << 2)
#if defined(__LITTLE_ENDIAN_BITFIELD)
-#define NFT_SET_ELEM_BUSY_BIT 2
+#define NFT_SET_ELEM_DEAD_BIT 2
#elif defined(__BIG_ENDIAN_BITFIELD)
-#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
+#define NFT_SET_ELEM_DEAD_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
#else
#error
#endif
-static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
+static inline void nft_set_elem_dead(struct nft_set_ext *ext)
{
unsigned long *word = (unsigned long *)ext;
BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
- return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
+ set_bit(NFT_SET_ELEM_DEAD_BIT, word);
}
-static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
{
unsigned long *word = (unsigned long *)ext;
- clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
+ BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+ return test_bit(NFT_SET_ELEM_DEAD_BIT, word);
}
/**
* struct nft_trans - nf_tables object update in transaction
*
* @list: used internally
+ * @binding_list: list of objects with possible bindings
* @msg_type: message type
* @put_net: ctx->net needs to be put
* @ctx: transaction context
@@ -1366,6 +1341,7 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
*/
struct nft_trans {
struct list_head list;
+ struct list_head binding_list;
int msg_type;
bool put_net;
struct nft_ctx ctx;
@@ -1416,13 +1392,10 @@ struct nft_trans_chain {
struct nft_trans_table {
bool update;
- bool enable;
};
#define nft_trans_table_update(trans) \
(((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_enable(trans) \
- (((struct nft_trans_table *)trans->data)->enable)
struct nft_trans_elem {
struct nft_set *set;
@@ -1457,6 +1430,35 @@ struct nft_trans_flowtable {
#define nft_trans_flowtable(trans) \
(((struct nft_trans_flowtable *)trans->data)->flowtable)
+#define NFT_TRANS_GC_BATCHCOUNT 256
+
+struct nft_trans_gc {
+ struct list_head list;
+ struct net *net;
+ struct nft_set *set;
+ u32 seq;
+ u16 count;
+ void *priv[NFT_TRANS_GC_BATCHCOUNT];
+ struct rcu_head rcu;
+};
+
+struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
+ unsigned int gc_seq, gfp_t gfp);
+void nft_trans_gc_destroy(struct nft_trans_gc *trans);
+
+struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq, gfp_t gfp);
+void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc);
+
+struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp);
+void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
+
+void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
+
+void nft_setelem_data_deactivate(const struct net *net,
+ const struct nft_set *set,
+ struct nft_set_elem *elem);
+
int __init nft_chain_filter_init(void);
void nft_chain_filter_fini(void);
@@ -1468,4 +1470,16 @@ void nf_tables_trans_destroy_flush_work(void);
int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
__be64 nf_jiffies64_to_msecs(u64 input);
+struct nftables_pernet {
+ struct list_head tables;
+ struct list_head commit_list;
+ struct list_head binding_list;
+ struct list_head module_list;
+ struct list_head notify_list;
+ struct mutex commit_mutex;
+ unsigned int base_seq;
+ u8 validate_state;
+ unsigned int gc_seq;
+};
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 7281895fa6d9..6a3fd54c69c1 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -25,13 +25,13 @@ void nf_tables_core_module_exit(void);
struct nft_cmp_fast_expr {
u32 data;
- enum nft_registers sreg:8;
+ u8 sreg;
u8 len;
};
struct nft_immediate_expr {
struct nft_data data;
- enum nft_registers dreg:8;
+ u8 dreg;
u8 dlen;
};
@@ -51,14 +51,14 @@ struct nft_payload {
enum nft_payload_bases base:8;
u8 offset;
u8 len;
- enum nft_registers dreg:8;
+ u8 dreg;
};
struct nft_payload_set {
enum nft_payload_bases base:8;
u8 offset;
u8 len;
- enum nft_registers sreg:8;
+ u8 sreg;
u8 csum_type;
u8 csum_offset;
u8 csum_flags;
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index 82d0e41b76f2..faa108b1ba67 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
return false;
}
+static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw)
+{
+ local_bh_disable();
+ inet_twsk_deschedule_put(tw);
+ local_bh_enable();
+}
+
/* assign a socket to the skb -- consumes sk */
static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
{
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 628b6fa579cd..237f3757637e 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -5,7 +5,7 @@
#include <net/netfilter/nf_tables.h>
struct nft_fib {
- enum nft_registers dreg:8;
+ u8 dreg;
u8 result;
u32 flags;
};
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index 07e2fd507963..2dce55c736f4 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -7,8 +7,8 @@
struct nft_meta {
enum nft_meta_keys key:8;
union {
- enum nft_registers dreg:8;
- enum nft_registers sreg:8;
+ u8 dreg;
+ u8 sreg;
};
};
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 022a0fd1a5a4..bf9c23fac74f 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -78,8 +78,8 @@ struct netns_ipv6 {
struct dst_ops ip6_dst_ops;
rwlock_t fib6_walker_lock;
spinlock_t fib6_gc_lock;
- unsigned int ip6_rt_gc_expire;
- unsigned long ip6_rt_last_gc;
+ atomic_t ip6_rt_gc_expire;
+ unsigned long ip6_rt_last_gc;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index ca043342c0eb..2e57312ac589 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -25,9 +25,6 @@ struct netns_nf {
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
#endif
-#if IS_ENABLED(CONFIG_DECNET)
- struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
-#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
bool defrag_ipv4;
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index a1a8d45adb42..8c77832d0240 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -5,13 +5,7 @@
#include <linux/list.h>
struct netns_nftables {
- struct list_head tables;
- struct list_head commit_list;
- struct list_head module_list;
- struct mutex commit_mutex;
- unsigned int base_seq;
u8 gencursor;
- u8 validate_state;
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index b59d73d529ba..0e1cef938ba4 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -49,6 +49,7 @@ struct netns_xfrm {
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
+ unsigned int idx_generator;
struct hlist_head policy_inexact[XFRM_POLICY_MAX];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 5d277d68fd8d..c55e72474eb2 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -266,7 +266,7 @@ struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
int nfc_set_remote_general_bytes(struct nfc_dev *dev,
- u8 *gt, u8 gt_len);
+ const u8 *gt, u8 gt_len);
u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
@@ -280,7 +280,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
- u8 *gb, size_t gb_len);
+ const u8 *gb, size_t gb_len);
int nfc_tm_deactivated(struct nfc_dev *dev);
int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d1585b54fb0b..fd99650a2e22 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -124,12 +124,14 @@ static inline void qdisc_run(struct Qdisc *q)
}
}
+extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
static inline unsigned int psched_mtu(const struct net_device *dev)
{
- return dev->mtu + dev->hard_header_len;
+ return READ_ONCE(dev->mtu) + dev->hard_header_len;
}
static inline struct net *qdisc_net(struct Qdisc *q)
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 2b778e1d2d8f..0fd2df844fc7 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -35,8 +35,6 @@
/* This is used to register protocols. */
struct net_protocol {
- int (*early_demux)(struct sk_buff *skb);
- int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */
@@ -53,8 +51,6 @@ struct net_protocol {
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol {
- void (*early_demux)(struct sk_buff *skb);
- void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 4da61c950e93..5c2a73bbfabe 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -166,8 +166,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
- struct netlink_ext_ack *exterr);
+int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
+ struct netlink_ext_ack *exterr);
struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 1ee396ce0eda..e8034756cbf8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -1334,4 +1334,11 @@ static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
qstats_overlimit_inc(res->qstats);
}
+/* Make sure qdisc is no longer in SCHED state. */
+static inline void qdisc_synchronize(const struct Qdisc *q)
+{
+ while (test_bit(__QDISC_STATE_SCHED, &q->state))
+ msleep(1);
+}
+
#endif
diff --git a/include/net/scm.h b/include/net/scm.h
index 1ce365f4c256..585adc1346bd 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -105,16 +105,27 @@ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct sc
}
}
}
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return test_bit(SOCK_PASSSEC, &sock->flags);
+}
#else
static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
{ }
+
+static inline bool scm_has_secdata(struct socket *sock)
+{
+ return false;
+}
#endif /* CONFIG_SECURITY_NETWORK */
static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm, int flags)
{
if (!msg->msg_control) {
- if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
+ if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp ||
+ scm_has_secdata(sock))
msg->msg_flags |= MSG_CTRUNC;
scm_destroy(scm);
return;
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index 01a70b27e026..65058faea4db 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -26,6 +26,8 @@ struct sctp_sched_ops {
int (*init)(struct sctp_stream *stream);
/* Init a stream */
int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+ /* free a stream */
+ void (*free_sid)(struct sctp_stream *stream, __u16 sid);
/* Frees the entire thing */
void (*free)(struct sctp_stream *stream);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index cb05e503c9cd..48cbf3352042 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1400,6 +1400,7 @@ struct sctp_stream_priorities {
/* The next stream stream in line */
struct sctp_stream_out_ext *next;
__u16 prio;
+ __u16 users;
};
struct sctp_stream_out_ext {
diff --git a/include/net/sock.h b/include/net/sock.h
index 9d687070d272..5293f2b65fb5 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -300,7 +300,7 @@ struct bpf_sk_storage;
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
- * @sk_user_data: RPC layer private data
+ * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
* @sk_frag: cached page frag
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
@@ -309,6 +309,7 @@ struct bpf_sk_storage;
* @sk_cgrp_data: cgroup data for this cgroup
* @sk_memcg: this socket's memory cgroup association
* @sk_write_pending: a write to stream socket waits to start
+ * @sk_wait_pending: number of threads blocked on this socket
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
* @sk_write_space: callback to indicate there is bf sending space available
@@ -390,6 +391,7 @@ struct sock {
unsigned int sk_napi_id;
#endif
int sk_rcvbuf;
+ int sk_wait_pending;
struct sk_filter __rcu *sk_filter;
union {
@@ -399,7 +401,7 @@ struct sock {
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
- struct dst_entry *sk_rx_dst;
+ struct dst_entry __rcu *sk_rx_dst;
struct dst_entry __rcu *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
@@ -995,8 +997,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
* OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line.
*/
- if (sk->sk_state == TCP_ESTABLISHED)
- sock_rps_record_flow_hash(sk->sk_rxhash);
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ /* This READ_ONCE() is paired with the WRITE_ONCE()
+ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
+ */
+ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
+ }
}
#endif
}
@@ -1005,20 +1011,25 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (unlikely(sk->sk_rxhash != skb->hash))
- sk->sk_rxhash = skb->hash;
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
+ * here, and another one in sock_rps_record_flow().
+ */
+ if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
+ WRITE_ONCE(sk->sk_rxhash, skb->hash);
#endif
}
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
- sk->sk_rxhash = 0;
+ /* Paired with READ_ONCE() in sock_rps_record_flow() */
+ WRITE_ONCE(sk->sk_rxhash, 0);
#endif
}
#define sk_wait_event(__sk, __timeo, __condition, __wait) \
({ int __rc; \
+ __sk->sk_wait_pending++; \
release_sock(__sk); \
__rc = __condition; \
if (!__rc) { \
@@ -1028,6 +1039,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
} \
sched_annotate_sleep(); \
lock_sock(__sk); \
+ __sk->sk_wait_pending--; \
__rc = __condition; \
__rc; \
})
@@ -1149,6 +1161,7 @@ struct proto {
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
+ * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
* All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
@@ -1262,6 +1275,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
return sk->sk_prot->memory_pressure != NULL;
}
+static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+{
+ return sk->sk_prot->memory_pressure &&
+ !!READ_ONCE(*sk->sk_prot->memory_pressure);
+}
+
static inline bool sk_under_memory_pressure(const struct sock *sk)
{
if (!sk->sk_prot->memory_pressure)
@@ -1271,7 +1290,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
- return !!*sk->sk_prot->memory_pressure;
+ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
}
static inline long
@@ -1325,7 +1344,7 @@ proto_memory_pressure(struct proto *prot)
{
if (!prot->memory_pressure)
return false;
- return !!*prot->memory_pressure;
+ return !!READ_ONCE(*prot->memory_pressure);
}
@@ -1706,7 +1725,12 @@ void sk_common_release(struct sock *sk);
* Default socket callbacks and setup code
*/
-/* Initialise core socket variables */
+/* Initialise core socket variables using an explicit uid. */
+void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
+
+/* Initialise core socket variables.
+ * Assumes struct socket *sock is embedded in a struct socket_alloc.
+ */
void sock_init_data(struct socket *sock, struct sock *sk);
/*
@@ -1758,21 +1782,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
/* sk_tx_queue_mapping accept only upto a 16-bit value */
if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
return;
- sk->sk_tx_queue_mapping = tx_queue;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
}
#define NO_QUEUE_MAPPING USHRT_MAX
static inline void sk_tx_queue_clear(struct sock *sk)
{
- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_tx_queue_mapping;
+ if (sk) {
+ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
+ * and sk_tx_queue_set().
+ */
+ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+ if (val != NO_QUEUE_MAPPING)
+ return val;
+ }
return -1;
}
@@ -1846,6 +1882,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
}
kuid_t sock_i_uid(struct sock *sk);
+unsigned long __sock_i_ino(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
@@ -1904,7 +1941,7 @@ static inline void dst_negative_advice(struct sock *sk)
if (ndst != dst) {
rcu_assign_pointer(sk->sk_dst_cache, ndst);
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
}
}
}
@@ -1915,7 +1952,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
old_dst = rcu_dereference_protected(sk->sk_dst_cache,
lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_dst_cache, dst);
@@ -1928,7 +1965,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
dst_release(old_dst);
}
@@ -2167,11 +2204,26 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc
return false;
}
+static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
+{
+ skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
+ if (skb) {
+ if (sk_rmem_schedule(sk, skb, skb->truesize)) {
+ skb_set_owner_r(skb, sk);
+ return skb;
+ }
+ __kfree_skb(skb);
+ }
+ return NULL;
+}
+
void sk_reset_timer(struct sock *sk, struct timer_list *timer,
unsigned long expires);
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
+
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
@@ -2428,7 +2480,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
__sock_recv_ts_and_drops(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
- else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
+ else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
sock_write_timestamp(sk, 0);
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8459145497b7..164ba7b77bd9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -125,6 +125,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
* to combine FIN-WAIT-2 timeout with
* TIME-WAIT timer.
*/
+#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
#if HZ >= 100
@@ -137,6 +138,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RTO_MAX ((unsigned)(120*HZ))
#define TCP_RTO_MIN ((unsigned)(HZ/5))
#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
+
+#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
+
#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
* used as a fallback RTO for the
@@ -342,13 +346,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
-static inline void tcp_dec_quickack_mode(struct sock *sk,
- const unsigned int pkts)
+static inline void tcp_dec_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ack.quick) {
+ /* How many ACKs S/ACKing new data have we sent? */
+ const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
+
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */
@@ -386,6 +391,7 @@ void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
+void __tcp_close(struct sock *sk, long timeout);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
@@ -548,7 +554,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
-u64 cookie_init_timestamp(struct request_sock *req);
+u64 cookie_init_timestamp(struct request_sock *req, u64 now);
bool cookie_timestamp_decode(const struct net *net,
struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -770,10 +776,16 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
+/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+static inline u64 tcp_ns_to_ts(u64 ns)
+{
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
static inline u32 tcp_time_stamp_raw(void)
{
- return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(tcp_clock_ns());
}
void tcp_mstamp_refresh(struct tcp_sock *tp);
@@ -785,7 +797,7 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(skb->skb_mstamp_ns);
}
/* provide the departure time in us unit */
@@ -922,6 +934,8 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
#endif
return 0;
}
+
+void tcp_v6_early_demux(struct sk_buff *skb);
#endif
static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
@@ -1258,11 +1272,14 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+ if (tp->is_cwnd_limited)
+ return true;
+
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
if (tcp_in_slow_start(tp))
return tp->snd_cwnd < 2 * tp->max_packets_out;
- return tp->is_cwnd_limited;
+ return false;
}
/* BBR congestion control needs pacing.
@@ -1947,7 +1964,11 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
- return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+ u32 val;
+
+ val = READ_ONCE(tp->notsent_lowat);
+
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
}
/* @wake is one when sk_stream_write_space() calls us.
diff --git a/include/net/udp.h b/include/net/udp.h
index e66854e767dc..7323f72fed70 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -169,6 +169,7 @@ typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct udphdr *uh, udp_lookup_t lookup);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
+void udp_v6_early_demux(struct sk_buff *skb);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features);
@@ -260,7 +261,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
}
/* net/ipv4/udp.c */
-void udp_destruct_sock(struct sock *sk);
+void udp_destruct_common(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 9185e45b997f..c59ba86668af 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -24,14 +24,6 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
}
-/* Designate sk as UDP-Lite socket */
-static inline int udplite_sk_init(struct sock *sk)
-{
- udp_init_sock(sk);
- udp_sk(sk)->pcflag = UDPLITE_BIT;
- return 0;
-}
-
/*
* Checksumming routines
*/
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 373aadcfea21..d79ee21d7932 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -323,10 +323,15 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
return features;
}
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-/* IPv6 header + UDP + VXLAN + Ethernet header */
-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+static inline int vxlan_headroom(u32 flags)
+{
+ /* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */
+ /* VXLAN-GPE: IP4/6 header + UDP + VXLAN */
+ return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr) +
+ (flags & VXLAN_F_GPE ? 0 : ETH_HLEN);
+}
static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
{
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 91bd749a02f7..dafde3d764d0 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -143,6 +143,12 @@ struct scsi_cmnd {
unsigned char tag; /* SCSI-II queued command tag */
};
+/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
+static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
+{
+ return blk_mq_rq_from_pdu(scmd);
+}
+
/*
* Return the driver private allocation behind the command.
* Only works if cmd_size is set in the host template.
@@ -204,7 +210,7 @@ static inline int scsi_get_resid(struct scsi_cmnd *cmd)
for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
- void *buf, int buflen)
+ const void *buf, int buflen)
{
return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
buf, buflen);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 31e0d6ca1eba..4488c3468c6d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -744,7 +744,7 @@ extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
-extern struct Scsi_Host *scsi_host_lookup(unsigned short);
+extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
extern const char *scsi_host_state_name(enum scsi_host_state);
static inline int __must_check scsi_add_host(struct Scsi_Host *host,
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 7800e12ee042..cf6e58fa5266 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -140,7 +140,10 @@ int rpi_firmware_property(struct rpi_firmware *fw,
u32 tag, void *data, size_t len);
int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size);
+void rpi_firmware_put(struct rpi_firmware *fw);
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node);
#else
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
void *data, size_t len)
@@ -154,10 +157,17 @@ static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
return -ENOSYS;
}
+static inline void rpi_firmware_put(struct rpi_firmware *fw) { }
static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
return NULL;
}
+
+static inline struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ return NULL;
+}
#endif
#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
diff --git a/include/soc/xilinx/xlnx_vcu.h b/include/soc/xilinx/xlnx_vcu.h
new file mode 100644
index 000000000000..ff03ede993ed
--- /dev/null
+++ b/include/soc/xilinx/xlnx_vcu.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx VCU Init
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Contacts Dhaval Shah <dshah@xilinx.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#ifndef _XILINX_VCU_H_
+#define _XILINX_VCU_H_
+
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+/**
+ * struct xvcu_device - Xilinx VCU init device structure
+ * @dev: Platform device
+ * @pll_ref: pll ref clock source
+ * @reset_gpio: vcu reset gpio
+ * @aclk: axi clock source
+ * @logicore_reg_ba: logicore reg base address
+ * @vcu_slcr_ba: vcu_slcr Register base address
+ */
+struct xvcu_device {
+ struct device *dev;
+ struct clk *pll_ref;
+ struct clk *aclk;
+ struct gpio_desc *reset_gpio;
+ void __iomem *logicore_reg_ba;
+ void __iomem *vcu_slcr_ba;
+};
+
+u32 xvcu_get_color_depth(struct xvcu_device *xvcu);
+u32 xvcu_get_memory_depth(struct xvcu_device *xvcu);
+u32 xvcu_get_clock_frequency(struct xvcu_device *xvcu);
+u32 xvcu_get_num_cores(struct xvcu_device *xvcu);
+
+#endif /* _XILINX_VCU_H_ */
diff --git a/include/sound/core.h b/include/sound/core.h
index ee238f100f73..e4b24dcb4b19 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -119,6 +119,9 @@ struct snd_card {
bool registered; /* card_dev is registered? */
wait_queue_head_t remove_sleep;
+ size_t total_pcm_alloc_bytes; /* total amount of allocated buffers */
+ struct mutex memory_mutex; /* protection for the above */
+
#ifdef CONFIG_PM
unsigned int power_state; /* power state */
wait_queue_head_t power_sleep;
@@ -440,4 +443,12 @@ snd_pci_quirk_lookup_id(u16 vendor, u16 device,
}
#endif
+/* async signal helpers */
+struct snd_fasync;
+
+int snd_fasync_helper(int fd, struct file *file, int on,
+ struct snd_fasync **fasyncp);
+void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll);
+void snd_fasync_free(struct snd_fasync *fasync);
+
#endif /* __SOUND_CORE_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 44e57bcc4a57..749b1bce9fc6 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -555,6 +555,8 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
+void snd_hdac_stop_streams(struct hdac_bus *bus);
+void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus);
void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
unsigned int streams, unsigned int reg);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 23dc8deac344..91440476319a 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -92,7 +92,6 @@ void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
-void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index f0045f842a60..299e35458863 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -104,24 +104,24 @@ struct snd_pcm_ops {
#define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1)
/* If you change this don't forget to change rates[] table in pcm_native.c */
-#define SNDRV_PCM_RATE_5512 (1<<0) /* 5512Hz */
-#define SNDRV_PCM_RATE_8000 (1<<1) /* 8000Hz */
-#define SNDRV_PCM_RATE_11025 (1<<2) /* 11025Hz */
-#define SNDRV_PCM_RATE_16000 (1<<3) /* 16000Hz */
-#define SNDRV_PCM_RATE_22050 (1<<4) /* 22050Hz */
-#define SNDRV_PCM_RATE_32000 (1<<5) /* 32000Hz */
-#define SNDRV_PCM_RATE_44100 (1<<6) /* 44100Hz */
-#define SNDRV_PCM_RATE_48000 (1<<7) /* 48000Hz */
-#define SNDRV_PCM_RATE_64000 (1<<8) /* 64000Hz */
-#define SNDRV_PCM_RATE_88200 (1<<9) /* 88200Hz */
-#define SNDRV_PCM_RATE_96000 (1<<10) /* 96000Hz */
-#define SNDRV_PCM_RATE_176400 (1<<11) /* 176400Hz */
-#define SNDRV_PCM_RATE_192000 (1<<12) /* 192000Hz */
-#define SNDRV_PCM_RATE_352800 (1<<13) /* 352800Hz */
-#define SNDRV_PCM_RATE_384000 (1<<14) /* 384000Hz */
-
-#define SNDRV_PCM_RATE_CONTINUOUS (1<<30) /* continuous range */
-#define SNDRV_PCM_RATE_KNOT (1<<31) /* supports more non-continuos rates */
+#define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */
+#define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */
+#define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */
+#define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */
+#define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */
+#define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */
+#define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */
+#define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */
+#define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */
+#define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */
+#define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */
+#define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */
+#define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */
+#define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */
+#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */
+
+#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
+#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */
#define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 6e8a31225383..659400d40873 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,6 +16,9 @@
#include <sound/asoc.h>
struct device;
+struct snd_pcm_substream;
+struct snd_soc_pcm_runtime;
+struct soc_enum;
/* widget has no PM register bit */
#define SND_SOC_NOPM -1
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index a7613efc271a..88266a7fbad2 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -499,7 +499,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(nid_t, nid[3])
+ __array(nid_t, nid, 3)
__field(int, depth)
__field(int, err)
),
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 2310b259329f..c9fb7b987a3a 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( char, sync_commit )
- __field( int, transaction )
+ __field( tid_t, transaction )
),
TP_fast_assign(
@@ -49,7 +49,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %u sync %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->sync_commit)
);
@@ -97,8 +97,8 @@ TRACE_EVENT(jbd2_end_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( char, sync_commit )
- __field( int, transaction )
- __field( int, head )
+ __field( tid_t, transaction )
+ __field( tid_t, head )
),
TP_fast_assign(
@@ -108,7 +108,7 @@ TRACE_EVENT(jbd2_end_commit,
__entry->head = journal->j_tail_sequence;
),
- TP_printk("dev %d,%d transaction %d sync %d head %d",
+ TP_printk("dev %d,%d transaction %u sync %d head %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->sync_commit, __entry->head)
);
@@ -134,14 +134,14 @@ TRACE_EVENT(jbd2_submit_inode_data,
);
TRACE_EVENT(jbd2_handle_start,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int requested_blocks),
TP_ARGS(dev, tid, type, line_no, requested_blocks),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, requested_blocks)
@@ -155,14 +155,14 @@ TRACE_EVENT(jbd2_handle_start,
__entry->requested_blocks = requested_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u "
+ TP_printk("dev %d,%d tid %u type %u line_no %u "
"requested_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->requested_blocks)
);
TRACE_EVENT(jbd2_handle_extend,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int buffer_credits,
int requested_blocks),
@@ -170,7 +170,7 @@ TRACE_EVENT(jbd2_handle_extend,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, buffer_credits )
@@ -186,7 +186,7 @@ TRACE_EVENT(jbd2_handle_extend,
__entry->requested_blocks = requested_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u "
+ TP_printk("dev %d,%d tid %u type %u line_no %u "
"buffer_credits %d requested_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->buffer_credits,
@@ -194,7 +194,7 @@ TRACE_EVENT(jbd2_handle_extend,
);
TRACE_EVENT(jbd2_handle_stats,
- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
unsigned int line_no, int interval, int sync,
int requested_blocks, int dirtied_blocks),
@@ -203,7 +203,7 @@ TRACE_EVENT(jbd2_handle_stats,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, interval )
@@ -223,7 +223,7 @@ TRACE_EVENT(jbd2_handle_stats,
__entry->dirtied_blocks = dirtied_blocks;
),
- TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d "
+ TP_printk("dev %d,%d tid %u type %u line_no %u interval %d "
"sync %d requested_blocks %d dirtied_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->interval,
@@ -232,14 +232,14 @@ TRACE_EVENT(jbd2_handle_stats,
);
TRACE_EVENT(jbd2_run_stats,
- TP_PROTO(dev_t dev, unsigned long tid,
+ TP_PROTO(dev_t dev, tid_t tid,
struct transaction_run_stats_s *stats),
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned long, wait )
__field( unsigned long, request_delay )
__field( unsigned long, running )
@@ -265,7 +265,7 @@ TRACE_EVENT(jbd2_run_stats,
__entry->blocks_logged = stats->rs_blocks_logged;
),
- TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u "
+ TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u "
"locked %u flushing %u logging %u handle_count %u "
"blocks %u blocks_logged %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
@@ -280,14 +280,14 @@ TRACE_EVENT(jbd2_run_stats,
);
TRACE_EVENT(jbd2_checkpoint_stats,
- TP_PROTO(dev_t dev, unsigned long tid,
+ TP_PROTO(dev_t dev, tid_t tid,
struct transaction_chp_stats_s *stats),
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( unsigned long, tid )
+ __field( tid_t, tid )
__field( unsigned long, chp_time )
__field( __u32, forced_to_close )
__field( __u32, written )
@@ -303,7 +303,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,
__entry->dropped = stats->cs_dropped;
),
- TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
+ TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u "
"written %u dropped %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
jiffies_to_msecs(__entry->chp_time),
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
index 62bb17516713..5ade62ac49b4 100644
--- a/include/trace/events/neigh.h
+++ b/include/trace/events/neigh.h
@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create,
),
TP_fast_assign(
- struct in6_addr *pin6;
__be32 *p32;
__entry->family = tbl->family;
@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create,
__entry->entries = atomic_read(&tbl->gc_entries);
__entry->created = n != NULL;
__entry->gc_exempt = exempt_from_gc;
- pin6 = (struct in6_addr *)__entry->primary_key6;
p32 = (__be32 *)__entry->primary_key4;
if (tbl->family == AF_INET)
@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
#if IS_ENABLED(CONFIG_IPV6)
if (tbl->family == AF_INET6) {
+ struct in6_addr *pin6;
+
pin6 = (struct in6_addr *)__entry->primary_key6;
*pin6 = *(struct in6_addr *)pkey;
}
diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
index 26927a560eab..3c716214dab1 100644
--- a/include/trace/events/rpm.h
+++ b/include/trace/events/rpm.h
@@ -74,6 +74,12 @@ DEFINE_EVENT(rpm_internal, rpm_idle,
TP_ARGS(dev, flags)
);
+DEFINE_EVENT(rpm_internal, rpm_usage,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
TRACE_EVENT(rpm_return_int,
TP_PROTO(struct device *dev, unsigned long ip, int ret),
diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h
index 8b60efe18ba6..a6819fd85cdf 100644
--- a/include/trace/events/spmi.h
+++ b/include/trace/events/spmi.h
@@ -21,15 +21,15 @@ TRACE_EVENT(spmi_write_begin,
__field ( u8, sid )
__field ( u16, addr )
__field ( u8, len )
- __dynamic_array ( u8, buf, len + 1 )
+ __dynamic_array ( u8, buf, len )
),
TP_fast_assign(
__entry->opcode = opcode;
__entry->sid = sid;
__entry->addr = addr;
- __entry->len = len + 1;
- memcpy(__get_dynamic_array(buf), buf, len + 1);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf), buf, len);
),
TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]",
@@ -92,7 +92,7 @@ TRACE_EVENT(spmi_read_end,
__field ( u16, addr )
__field ( int, ret )
__field ( u8, len )
- __dynamic_array ( u8, buf, len + 1 )
+ __dynamic_array ( u8, buf, len )
),
TP_fast_assign(
@@ -100,8 +100,8 @@ TRACE_EVENT(spmi_read_end,
__entry->sid = sid;
__entry->addr = addr;
__entry->ret = ret;
- __entry->len = len + 1;
- memcpy(__get_dynamic_array(buf), buf, len + 1);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf), buf, len);
),
TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]",
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index b7a904825e7d..1b5371f0317a 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -156,7 +156,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
{ HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
{ HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
{ HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
- { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" }, \
+ { HRTIMER_MODE_ABS_HARD, "ABS|HARD" }, \
+ { HRTIMER_MODE_REL_HARD, "REL|HARD" }, \
+ { HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" }, \
+ { HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" })
/**
* hrtimer_init - called when the hrtimer is initialized
@@ -367,7 +371,8 @@ TRACE_EVENT(itimer_expire,
tick_dep_name(POSIX_TIMER) \
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
- tick_dep_name_end(CLOCK_UNSTABLE)
+ tick_dep_name(CLOCK_UNSTABLE) \
+ tick_dep_name_end(RCU)
#undef tick_dep_name
#undef tick_dep_mask_name
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 011e8faa608b..b70c32e4a491 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(writeback_page_template,
strscpy_pad(__entry->name,
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
NULL), 32);
- __entry->ino = mapping ? mapping->host->i_ino : 0;
+ __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 3feeaa3f987a..3193c3c3f5c0 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -145,6 +145,14 @@ extern "C" {
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
/*
+ * 2 plane 10 bit per component YCbCr
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cb:Cr plane, [63:0] x:Cb2:Cr2:Cb1:x:Cr1:Cb0:Cr0 2:10:10:10:2:10:10:10 little endian
+ */
+#define DRM_FORMAT_XV15 fourcc_code('X', 'V', '1', '5') /* 2x2 subsampled Cb:Cr plane 2:10:10:10 */
+#define DRM_FORMAT_XV20 fourcc_code('X', 'V', '2', '0') /* 2x1 subsampled Cb:Cr plane 2:10:10:10 */
+
+/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
* [15:0] sign:exponent:mantissa 1:5:10
@@ -209,6 +217,13 @@ extern "C" {
#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8')
#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0')
+#define DRM_FORMAT_AVUY fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', '2', '4') /* [31:0] x:Cr:Cb:Y 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY2101010 fourcc_code('X', 'V', '3', '0') /* [31:0] x:Cr:Cb:Y 2:10:10:10 little endian */
+
+/* Grey scale */
+#define DRM_FORMAT_Y8 fourcc_code('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define DRM_FORMAT_Y10 fourcc_code('Y', '1', '0', ' ') /* 10 Greyscale */
/*
* 2 plane RGB + A
* index 0 = RGB plane, same format as the corresponding non _A8 format has
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 735c8cfdaaa1..5254c393c9c9 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -485,6 +485,8 @@ struct drm_mode_fb_cmd {
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_ALTERNATE_TOP (1<<2) /* for alternate top field */
+#define DRM_MODE_FB_ALTERNATE_BOTTOM (1<<3) /* for alternate bottom field */
struct drm_mode_fb_cmd2 {
__u32 fb_id;
diff --git a/include/uapi/linux/affs_hardblocks.h b/include/uapi/linux/affs_hardblocks.h
index 5e2fb8481252..a5aff2eb5f70 100644
--- a/include/uapi/linux/affs_hardblocks.h
+++ b/include/uapi/linux/affs_hardblocks.h
@@ -7,42 +7,42 @@
/* Just the needed definitions for the RDB of an Amiga HD. */
struct RigidDiskBlock {
- __u32 rdb_ID;
+ __be32 rdb_ID;
__be32 rdb_SummedLongs;
- __s32 rdb_ChkSum;
- __u32 rdb_HostID;
+ __be32 rdb_ChkSum;
+ __be32 rdb_HostID;
__be32 rdb_BlockBytes;
- __u32 rdb_Flags;
- __u32 rdb_BadBlockList;
+ __be32 rdb_Flags;
+ __be32 rdb_BadBlockList;
__be32 rdb_PartitionList;
- __u32 rdb_FileSysHeaderList;
- __u32 rdb_DriveInit;
- __u32 rdb_Reserved1[6];
- __u32 rdb_Cylinders;
- __u32 rdb_Sectors;
- __u32 rdb_Heads;
- __u32 rdb_Interleave;
- __u32 rdb_Park;
- __u32 rdb_Reserved2[3];
- __u32 rdb_WritePreComp;
- __u32 rdb_ReducedWrite;
- __u32 rdb_StepRate;
- __u32 rdb_Reserved3[5];
- __u32 rdb_RDBBlocksLo;
- __u32 rdb_RDBBlocksHi;
- __u32 rdb_LoCylinder;
- __u32 rdb_HiCylinder;
- __u32 rdb_CylBlocks;
- __u32 rdb_AutoParkSeconds;
- __u32 rdb_HighRDSKBlock;
- __u32 rdb_Reserved4;
+ __be32 rdb_FileSysHeaderList;
+ __be32 rdb_DriveInit;
+ __be32 rdb_Reserved1[6];
+ __be32 rdb_Cylinders;
+ __be32 rdb_Sectors;
+ __be32 rdb_Heads;
+ __be32 rdb_Interleave;
+ __be32 rdb_Park;
+ __be32 rdb_Reserved2[3];
+ __be32 rdb_WritePreComp;
+ __be32 rdb_ReducedWrite;
+ __be32 rdb_StepRate;
+ __be32 rdb_Reserved3[5];
+ __be32 rdb_RDBBlocksLo;
+ __be32 rdb_RDBBlocksHi;
+ __be32 rdb_LoCylinder;
+ __be32 rdb_HiCylinder;
+ __be32 rdb_CylBlocks;
+ __be32 rdb_AutoParkSeconds;
+ __be32 rdb_HighRDSKBlock;
+ __be32 rdb_Reserved4;
char rdb_DiskVendor[8];
char rdb_DiskProduct[16];
char rdb_DiskRevision[4];
char rdb_ControllerVendor[8];
char rdb_ControllerProduct[16];
char rdb_ControllerRevision[4];
- __u32 rdb_Reserved5[10];
+ __be32 rdb_Reserved5[10];
};
#define IDNAME_RIGIDDISK 0x5244534B /* "RDSK" */
@@ -50,16 +50,16 @@ struct RigidDiskBlock {
struct PartitionBlock {
__be32 pb_ID;
__be32 pb_SummedLongs;
- __s32 pb_ChkSum;
- __u32 pb_HostID;
+ __be32 pb_ChkSum;
+ __be32 pb_HostID;
__be32 pb_Next;
- __u32 pb_Flags;
- __u32 pb_Reserved1[2];
- __u32 pb_DevFlags;
+ __be32 pb_Flags;
+ __be32 pb_Reserved1[2];
+ __be32 pb_DevFlags;
__u8 pb_DriveName[32];
- __u32 pb_Reserved2[15];
+ __be32 pb_Reserved2[15];
__be32 pb_Environment[17];
- __u32 pb_EReserved[15];
+ __be32 pb_EReserved[15];
};
#define IDNAME_PARTITION 0x50415254 /* "PART" */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index c89c6495983d..a79f8c285a10 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -179,7 +179,7 @@
#define AUDIT_MAX_KEY_LEN 256
#define AUDIT_BITMASK_SIZE 64
#define AUDIT_WORD(nr) ((__u32)((nr)/32))
-#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32))
+#define AUDIT_BIT(nr) (1U << ((nr) - AUDIT_WORD(nr)*32))
#define AUDIT_SYSCALL_CLASSES 16
#define AUDIT_CLASS_DIR_WRITE 0
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 498eec813494..94649130e3d7 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -51,13 +51,13 @@ enum blk_zone_type {
*
* The Zone Condition state machine in the ZBC/ZAC standards maps the above
* deinitions as:
- * - ZC1: Empty | BLK_ZONE_EMPTY
+ * - ZC1: Empty | BLK_ZONE_COND_EMPTY
* - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
* - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
- * - ZC4: Closed | BLK_ZONE_CLOSED
- * - ZC5: Full | BLK_ZONE_FULL
- * - ZC6: Read Only | BLK_ZONE_READONLY
- * - ZC7: Offline | BLK_ZONE_OFFLINE
+ * - ZC4: Closed | BLK_ZONE_COND_CLOSED
+ * - ZC5: Full | BLK_ZONE_COND_FULL
+ * - ZC6: Read Only | BLK_ZONE_COND_READONLY
+ * - ZC7: Offline | BLK_ZONE_COND_OFFLINE
*
* Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
* be considered invalid.
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index cb0631098f91..e5250a9b813d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -780,7 +780,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Return
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 3ee0678c0a83..140722320698 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -162,6 +162,7 @@ struct btrfs_scrub_progress {
};
#define BTRFS_SCRUB_READONLY 1
+#define BTRFS_SCRUB_SUPPORTED_FLAGS (BTRFS_SCRUB_READONLY)
struct btrfs_ioctl_scrub_args {
__u64 devid; /* in */
__u64 start; /* in */
@@ -538,6 +539,9 @@ struct btrfs_ioctl_clone_range_args {
*/
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
+#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
+ BTRFS_DEFRAG_RANGE_START_IO)
+
struct btrfs_ioctl_defrag_range_args {
/* start of the defrag operation */
__u64 start;
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index 34633283de64..a1000cb63063 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -120,6 +120,9 @@
#define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /* 0111 0000 */
#define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /* 1000 0000 */
-/* controller specific additional information / data[5..7] */
+/* data[5] is reserved (do not use) */
+
+/* TX error counter / data[6] */
+/* RX error counter / data[7] */
#endif /* _UAPI_CAN_ERROR_H */
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 240fdb9a60f6..6e0d68e841cd 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -376,7 +376,7 @@ struct vfs_ns_cap_data {
*/
#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */
-#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */
+#define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */
#endif /* _UAPI_LINUX_CAPABILITY_H */
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index af2a44c08683..a429381e7ca5 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,7 +28,7 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
diff --git a/include/uapi/linux/dn.h b/include/uapi/linux/dn.h
deleted file mode 100644
index 36ca71bd8bbe..000000000000
--- a/include/uapi/linux/dn.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _LINUX_DN_H
-#define _LINUX_DN_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/if_ether.h>
-
-/*
-
- DECnet Data Structures and Constants
-
-*/
-
-/*
- * DNPROTO_NSP can't be the same as SOL_SOCKET,
- * so increment each by one (compared to ULTRIX)
- */
-#define DNPROTO_NSP 2 /* NSP protocol number */
-#define DNPROTO_ROU 3 /* Routing protocol number */
-#define DNPROTO_NML 4 /* Net mgt protocol number */
-#define DNPROTO_EVL 5 /* Evl protocol number (usr) */
-#define DNPROTO_EVR 6 /* Evl protocol number (evl) */
-#define DNPROTO_NSPT 7 /* NSP trace protocol number */
-
-
-#define DN_ADDL 2
-#define DN_MAXADDL 2 /* ULTRIX headers have 20 here, but pathworks has 2 */
-#define DN_MAXOPTL 16
-#define DN_MAXOBJL 16
-#define DN_MAXACCL 40
-#define DN_MAXALIASL 128
-#define DN_MAXNODEL 256
-#define DNBUFSIZE 65023
-
-/*
- * SET/GET Socket options - must match the DSO_ numbers below
- */
-#define SO_CONDATA 1
-#define SO_CONACCESS 2
-#define SO_PROXYUSR 3
-#define SO_LINKINFO 7
-
-#define DSO_CONDATA 1 /* Set/Get connect data */
-#define DSO_DISDATA 10 /* Set/Get disconnect data */
-#define DSO_CONACCESS 2 /* Set/Get connect access data */
-#define DSO_ACCEPTMODE 4 /* Set/Get accept mode */
-#define DSO_CONACCEPT 5 /* Accept deferred connection */
-#define DSO_CONREJECT 6 /* Reject deferred connection */
-#define DSO_LINKINFO 7 /* Set/Get link information */
-#define DSO_STREAM 8 /* Set socket type to stream */
-#define DSO_SEQPACKET 9 /* Set socket type to sequenced packet */
-#define DSO_MAXWINDOW 11 /* Maximum window size allowed */
-#define DSO_NODELAY 12 /* Turn off nagle */
-#define DSO_CORK 13 /* Wait for more data! */
-#define DSO_SERVICES 14 /* NSP Services field */
-#define DSO_INFO 15 /* NSP Info field */
-#define DSO_MAX 15 /* Maximum option number */
-
-
-/* LINK States */
-#define LL_INACTIVE 0
-#define LL_CONNECTING 1
-#define LL_RUNNING 2
-#define LL_DISCONNECTING 3
-
-#define ACC_IMMED 0
-#define ACC_DEFER 1
-
-#define SDF_WILD 1 /* Wild card object */
-#define SDF_PROXY 2 /* Addr eligible for proxy */
-#define SDF_UICPROXY 4 /* Use uic-based proxy */
-
-/* Structures */
-
-
-struct dn_naddr {
- __le16 a_len;
- __u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */
-};
-
-struct sockaddr_dn {
- __u16 sdn_family;
- __u8 sdn_flags;
- __u8 sdn_objnum;
- __le16 sdn_objnamel;
- __u8 sdn_objname[DN_MAXOBJL];
- struct dn_naddr sdn_add;
-};
-#define sdn_nodeaddrl sdn_add.a_len /* Node address length */
-#define sdn_nodeaddr sdn_add.a_addr /* Node address */
-
-
-
-/*
- * DECnet set/get DSO_CONDATA, DSO_DISDATA (optional data) structure
- */
-struct optdata_dn {
- __le16 opt_status; /* Extended status return */
-#define opt_sts opt_status
- __le16 opt_optl; /* Length of user data */
- __u8 opt_data[16]; /* User data */
-};
-
-struct accessdata_dn {
- __u8 acc_accl;
- __u8 acc_acc[DN_MAXACCL];
- __u8 acc_passl;
- __u8 acc_pass[DN_MAXACCL];
- __u8 acc_userl;
- __u8 acc_user[DN_MAXACCL];
-};
-
-/*
- * DECnet logical link information structure
- */
-struct linkinfo_dn {
- __u16 idn_segsize; /* Segment size for link */
- __u8 idn_linkstate; /* Logical link state */
-};
-
-/*
- * Ethernet address format (for DECnet)
- */
-union etheraddress {
- __u8 dne_addr[ETH_ALEN]; /* Full ethernet address */
- struct {
- __u8 dne_hiord[4]; /* DECnet HIORD prefix */
- __u8 dne_nodeaddr[2]; /* DECnet node address */
- } dne_remote;
-};
-
-
-/*
- * DECnet physical socket address format
- */
-struct dn_addr {
- __le16 dna_family; /* AF_DECnet */
- union etheraddress dna_netaddr; /* DECnet ethernet address */
-};
-
-#define DECNET_IOCTL_BASE 0x89 /* PROTOPRIVATE range */
-
-#define SIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, struct dn_naddr)
-#define SIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, struct dn_naddr)
-#define OSIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, int)
-#define OSIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, int)
-
-#endif /* _LINUX_DN_H */
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index c7d66755d212..78bd62014efd 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -30,6 +30,6 @@ enum gtp_attrs {
GTPA_PAD,
__GTPA_MAX,
};
-#define GTPA_MAX (__GTPA_MAX + 1)
+#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index 769050771423..a0f4f6e1135d 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -51,6 +51,7 @@ struct af_alg_iv {
#define ALG_SET_OP 3
#define ALG_SET_AEAD_ASSOCLEN 4
#define ALG_SET_AEAD_AUTHSIZE 5
+#define ALG_SET_KEY_TYPE 6
/* Operations */
#define ALG_OP_DECRYPT 0
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 60e1241d4b77..f5ac290c2250 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -154,6 +154,8 @@ struct in_addr {
#define MCAST_MSFILTER 48
#define IP_MULTICAST_ALL 49
#define IP_UNICAST_IF 50
+#define IP_LOCAL_PORT_RANGE 51
+#define IP_PROTOCOL 52
#define MCAST_EXCLUDE 0
#define MCAST_INCLUDE 1
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 16c1fa2d89a4..b93b3da3cc84 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x101d */
+/* RGB - next is 0x101b */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -155,4 +155,27 @@
/* HSV - next is 0x6002 */
#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
+/* RGB: Xilinx Specific - next is 0x1103 */
+#define MEDIA_BUS_FMT_RBG101010_1X30 0x1100
+#define MEDIA_BUS_FMT_RBG121212_1X36 0x1101
+#define MEDIA_BUS_FMT_RBG161616_1X48 0x1102
+
+/* YUV: Xilinx Specific - next is 0x2109 */
+#define MEDIA_BUS_FMT_VYYUYY8_1X24 0x2100
+#define MEDIA_BUS_FMT_VYYUYY10_4X20 0x2101
+#define MEDIA_BUS_FMT_VUY10_1X30 0x2102
+#define MEDIA_BUS_FMT_UYYVYY12_4X24 0x2103
+#define MEDIA_BUS_FMT_VUY12_1X36 0x2104
+#define MEDIA_BUS_FMT_Y16_1X16 0x2105
+#define MEDIA_BUS_FMT_UYYVYY16_4X32 0x2106
+#define MEDIA_BUS_FMT_VUY16_1X48 0x2107
+#define MEDIA_BUS_FMT_UYVY16_2X32 0x2108
+/*
+ * This format should be used when the same driver handles
+ * both sides of the link and the bus format is a fixed
+ * metadata format that is not configurable from userspace.
+ * Width and height will be set to 0 for this format.
+ */
+#define MEDIA_BUS_FMT_METADATA_FIXED 0x7001
+
#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
index edc6ddab0de6..2d6f80d75ae7 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
@@ -15,7 +15,7 @@ enum sctp_conntrack {
SCTP_CONNTRACK_SHUTDOWN_RECD,
SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
SCTP_CONNTRACK_HEARTBEAT_SENT,
- SCTP_CONNTRACK_HEARTBEAT_ACKED,
+ SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */
SCTP_CONNTRACK_MAX
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 0a995403172c..bc70d580e8d6 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -162,6 +162,7 @@ enum nft_hook_attributes {
enum nft_table_flags {
NFT_TABLE_F_DORMANT = 0x1,
};
+#define NFT_TABLE_F_MASK (NFT_TABLE_F_DORMANT)
/**
* enum nft_table_attributes - nf_tables table netlink attributes
@@ -242,9 +243,11 @@ enum nft_rule_attributes {
/**
* enum nft_rule_compat_flags - nf_tables rule compat flags
*
+ * @NFT_RULE_COMPAT_F_UNUSED: unused
* @NFT_RULE_COMPAT_F_INV: invert the check result
*/
enum nft_rule_compat_flags {
+ NFT_RULE_COMPAT_F_UNUSED = (1 << 0),
NFT_RULE_COMPAT_F_INV = (1 << 1),
NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV,
};
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index 6b20fb22717b..aa805e6d4e28 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -94,7 +94,7 @@ enum ctattr_timeout_sctp {
CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
- CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
+ CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
__CTA_TIMEOUT_SCTP_MAX
};
#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
deleted file mode 100644
index 3c77f54560f2..000000000000
--- a/include/uapi/linux/netfilter_decnet.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_DECNET_NETFILTER_H
-#define __LINUX_DECNET_NETFILTER_H
-
-/* DECnet-specific defines for netfilter.
- * This file (C) Steve Whitehouse 1999 derived from the
- * ipv4 netfilter header file which is
- * (C)1998 Rusty Russell -- This code is GPL.
- */
-
-#include <linux/netfilter.h>
-
-/* only for userspace compatibility */
-#ifndef __KERNEL__
-
-#include <limits.h> /* for INT_MIN, INT_MAX */
-
-/* kernel define is in netfilter_defs.h */
-#define NF_DN_NUMHOOKS 7
-#endif /* ! __KERNEL__ */
-
-/* DECnet Hooks */
-/* After promisc drops, checksum checks. */
-#define NF_DN_PRE_ROUTING 0
-/* If the packet is destined for this box. */
-#define NF_DN_LOCAL_IN 1
-/* If the packet is destined for another interface. */
-#define NF_DN_FORWARD 2
-/* Packets coming from a local process. */
-#define NF_DN_LOCAL_OUT 3
-/* Packets about to hit the wire. */
-#define NF_DN_POST_ROUTING 4
-/* Input Hello Packets */
-#define NF_DN_HELLO 5
-/* Input Routing Packets */
-#define NF_DN_ROUTE 6
-
-enum nf_dn_hook_priorities {
- NF_DN_PRI_FIRST = INT_MIN,
- NF_DN_PRI_CONNTRACK = -200,
- NF_DN_PRI_MANGLE = -150,
- NF_DN_PRI_NAT_DST = -100,
- NF_DN_PRI_FILTER = 0,
- NF_DN_PRI_NAT_SRC = 100,
- NF_DN_PRI_DNRTMSG = 200,
- NF_DN_PRI_LAST = INT_MAX,
-};
-
-struct nf_dn_rtmsg {
- int nfdn_ifindex;
-};
-
-#define NFDN_RTMSG(r) ((unsigned char *)(r) + NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)))
-
-#ifndef __KERNEL__
-/* backwards compatibility for userspace */
-#define DNRMG_L1_GROUP 0x01
-#define DNRMG_L2_GROUP 0x02
-#endif
-
-enum {
- DNRNG_NLGRP_NONE,
-#define DNRNG_NLGRP_NONE DNRNG_NLGRP_NONE
- DNRNG_NLGRP_L1,
-#define DNRNG_NLGRP_L1 DNRNG_NLGRP_L1
- DNRNG_NLGRP_L2,
-#define DNRNG_NLGRP_L2 DNRNG_NLGRP_L2
- __DNRNG_NLGRP_MAX
-};
-#define DNRNG_NLGRP_MAX (__DNRNG_NLGRP_MAX - 1)
-
-#endif /*__LINUX_DECNET_NETFILTER_H*/
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 622c78c821aa..cf4e4836338f 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -20,7 +20,7 @@
#define NETLINK_CONNECTOR 11
#define NETLINK_NETFILTER 12 /* netfilter subsystem */
#define NETLINK_IP6_FW 13
-#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
+#define NETLINK_DNRTMSG 14 /* DECnet routing messages (obsolete) */
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
#define NETLINK_GENERIC 16
/* leave room for NETLINK_DM (DM Events) */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index b485d8b0d5a7..5d830a95daf2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -679,6 +679,8 @@
#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
+#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index ceccd980ffcf..19c143a293be 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -273,6 +273,7 @@ enum {
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
@@ -280,6 +281,7 @@ enum {
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
@@ -289,8 +291,9 @@ enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
+ PERF_FORMAT_LOST = 1U << 4,
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+ PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index cc00fd079631..74ef57b38f9f 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -22,8 +22,8 @@
#define RUSAGE_THREAD 1 /* only the calling thread */
struct rusage {
- struct timeval ru_utime; /* user time used */
- struct timeval ru_stime; /* system time used */
+ struct __kernel_old_timeval ru_utime; /* user time used */
+ struct __kernel_old_timeval ru_stime; /* system time used */
__kernel_long_t ru_maxrss; /* maximum resident set size */
__kernel_long_t ru_ixrss; /* integral shared memory size */
__kernel_long_t ru_idrss; /* integral unshared data size */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 7272f85d6d6a..3736f2fe1541 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -3,7 +3,7 @@
#define _UAPI_LINUX_SWAB_H
#include <linux/types.h>
-#include <linux/compiler.h>
+#include <linux/stddef.h>
#include <asm/bitsperlong.h>
#include <asm/swab.h>
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index ee2dcfb3d660..d7f7c04a6e0c 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -52,7 +52,7 @@ struct sync_fence_info {
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
* @flags: sync_file_info flags
- * @num_fences number of fences in the sync_file
+ * @num_fences: number of fences in the sync_file
* @pad: padding for 64-bit alignment, should always be zero
* @sync_fence_info: pointer to array of structs sync_fence_info with all
* fences in the sync_file
diff --git a/include/uapi/linux/uio/uio.h b/include/uapi/linux/uio/uio.h
new file mode 100644
index 000000000000..db92d311c85f
--- /dev/null
+++ b/include/uapi/linux/uio/uio.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * The header for UIO driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#ifndef _UAPI_UIO_UIO_H_
+#define _UAPI_UIO_UIO_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * enum uio_dmabuf_dir - list of dma directions for mapping management
+ * @UIO_DMABUF_DIR_BIDIR: Bidirectional DMA. To and from device
+ * @UIO_DMABUF_DIR_TO_DEV: DMA to device
+ * @UIO_DMABUF_DIR_FROM_DEV: DMA from device
+ * @UIO_DMABUF_DIR_NONE: Direction not specified
+ */
+enum uio_dmabuf_dir {
+ UIO_DMABUF_DIR_BIDIR = 1,
+ UIO_DMABUF_DIR_TO_DEV = 2,
+ UIO_DMABUF_DIR_FROM_DEV = 3,
+ UIO_DMABUF_DIR_NONE = 4,
+};
+
+/**
+ * struct uio_dmabuf_args - arguments from userspace to map / unmap dmabuf
+ * @dbuf_fd: The fd or dma buf
+ * @dma_addr: The dma address of dmabuf @dbuf_fd
+ * @size: The size of dmabuf @dbuf_fd
+ * @dir: direction of dma transfer of dmabuf @dbuf_fd
+ */
+struct uio_dmabuf_args {
+ __s32 dbuf_fd;
+ __u64 dma_addr;
+ __u64 size;
+ __u8 dir;
+};
+
+#define UIO_IOC_BASE 'U'
+
+/**
+ * DOC: UIO_IOC_MAP_DMABUF - Map the dma buf to userspace uio application
+ *
+ * This takes uio_dmabuf_args, and maps the given dmabuf @dbuf_fd and returns
+ * information to userspace.
+ * FIXME: This is experimental and may change at any time. Don't consider this
+ * as stable ABI.
+ */
+#define UIO_IOC_MAP_DMABUF _IOWR(UIO_IOC_BASE, 0x1, struct uio_dmabuf_args)
+
+/**
+ * DOC: UIO_IOC_UNMAP_DMABUF - Unmap the dma buf
+ *
+ * This takes uio_dmabuf_args, and unmaps the previous mapped dmabuf @dbuf_fd.
+ * FIXME: This is experimental and may change at any time. Don't consider this
+ * as stable ABI.
+ */
+#define UIO_IOC_UNMAP_DMABUF _IOWR(UIO_IOC_BASE, 0x2, struct uio_dmabuf_args)
+
+#endif
diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
index bfdae12cdacf..c58854fb7d94 100644
--- a/include/uapi/linux/usb/video.h
+++ b/include/uapi/linux/usb/video.h
@@ -179,6 +179,36 @@
#define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3)
#define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4)
+/* 3.9.2.6 Color Matching Descriptor Values */
+enum uvc_color_primaries_values {
+ UVC_COLOR_PRIMARIES_UNSPECIFIED,
+ UVC_COLOR_PRIMARIES_BT_709_SRGB,
+ UVC_COLOR_PRIMARIES_BT_470_2_M,
+ UVC_COLOR_PRIMARIES_BT_470_2_B_G,
+ UVC_COLOR_PRIMARIES_SMPTE_170M,
+ UVC_COLOR_PRIMARIES_SMPTE_240M,
+};
+
+enum uvc_transfer_characteristics_values {
+ UVC_TRANSFER_CHARACTERISTICS_UNSPECIFIED,
+ UVC_TRANSFER_CHARACTERISTICS_BT_709,
+ UVC_TRANSFER_CHARACTERISTICS_BT_470_2_M,
+ UVC_TRANSFER_CHARACTERISTICS_BT_470_2_B_G,
+ UVC_TRANSFER_CHARACTERISTICS_SMPTE_170M,
+ UVC_TRANSFER_CHARACTERISTICS_SMPTE_240M,
+ UVC_TRANSFER_CHARACTERISTICS_LINEAR,
+ UVC_TRANSFER_CHARACTERISTICS_SRGB,
+};
+
+enum uvc_matrix_coefficients {
+ UVC_MATRIX_COEFFICIENTS_UNSPECIFIED,
+ UVC_MATRIX_COEFFICIENTS_BT_709,
+ UVC_MATRIX_COEFFICIENTS_FCC,
+ UVC_MATRIX_COEFFICIENTS_BT_470_2_B_G,
+ UVC_MATRIX_COEFFICIENTS_SMPTE_170M,
+ UVC_MATRIX_COEFFICIENTS_SMPTE_240M,
+};
+
/* ------------------------------------------------------------------------
* UVC structures
*/
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index f80f05b3c423..214092366193 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -86,7 +86,7 @@ struct uvc_xu_control_query {
* struct. The first two fields are added by the driver, they can be used for
* clock synchronisation. The rest is an exact copy of a UVC payload header.
* Only complete objects with complete buffers are included. Therefore it's
- * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large.
+ * always sizeof(meta->ns) + sizeof(meta->sof) + meta->length bytes large.
*/
struct uvc_meta_buf {
__u64 ns;
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 123a231001a8..325c985ed06f 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -68,6 +68,8 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RBG888_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X32_PADHI),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8),
@@ -104,6 +106,7 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VUY8_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8),
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index 03970ce30741..b76f9b4afe05 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -155,6 +155,27 @@ struct v4l2_subdev_selection {
__u32 reserved[8];
};
+
+/**
+ * struct v4l2_subdev_route - A signal route inside a subdev
+ * @sink: the sink pad
+ * @source: the source pad
+ */
+struct v4l2_subdev_route {
+ __u32 sink;
+ __u32 source;
+};
+
+/**
+ * struct v4l2_subdev_routing - Routing information
+ * @num_routes: the total number of routes in the routes array
+ * @routes: the routes array
+ */
+struct v4l2_subdev_routing {
+ __u32 num_routes;
+ struct v4l2_subdev_route *routes;
+};
+
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
@@ -181,5 +202,7 @@ struct v4l2_subdev_selection {
#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
#define VIDIOC_SUBDEV_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
+#define VIDIOC_SUBDEV_G_ROUTING _IOWR('V', 38, struct v4l2_subdev_routing)
+#define VIDIOC_SUBDEV_S_ROUTING _IOWR('V', 39, struct v4l2_subdev_routing)
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 3210b3c82a4a..581f2d39505a 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -545,20 +545,25 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4') /* 32 BGRA-8-8-8-8 */
#define V4L2_PIX_FMT_XBGR32 v4l2_fourcc('X', 'R', '2', '4') /* 32 BGRX-8-8-8-8 */
-#define V4L2_PIX_FMT_BGRA32 v4l2_fourcc('R', 'A', '2', '4') /* 32 ABGR-8-8-8-8 */
-#define V4L2_PIX_FMT_BGRX32 v4l2_fourcc('R', 'X', '2', '4') /* 32 XBGR-8-8-8-8 */
#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */
#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4') /* 32 RGBA-8-8-8-8 */
#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
+#define V4L2_PIX_FMT_BGRA32 v4l2_fourcc('A', 'B', 'G', 'R') /* 32 ABGR-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
+#define V4L2_PIX_FMT_BGRX32 v4l2_fourcc('X', 'B', 'G', 'R') /* 32 XBGR-8-8-8-8 */
+#define V4L2_PIX_FMT_XBGR30 v4l2_fourcc('R', 'X', '3', '0') /* 32 XBGR-2-10-10-10 */
+#define V4L2_PIX_FMT_XBGR40 v4l2_fourcc('R', 'X', '4', '0') /* 40 XBGR-4-12-12-12 */
+#define V4L2_PIX_FMT_BGR48 v4l2_fourcc('R', 'G', '4', '8') /* 32 BGR-16-16-16 */
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
+#define V4L2_PIX_FMT_XY10 v4l2_fourcc('X', 'Y', '1', '0') /* 10 Greyscale 2-10-10-10 */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
+#define V4L2_PIX_FMT_XY12 v4l2_fourcc('X', 'Y', '1', '2') /* 12 Greyscale 4-12-12-12 */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
@@ -580,6 +585,9 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
+#define V4L2_PIX_FMT_XVUY32 v4l2_fourcc('X', 'V', '3', '2') /* 32 XVUY 8:8:8:8 */
+#define V4L2_PIX_FMT_AVUY32 v4l2_fourcc('A', 'V', '3', '2') /* 32 AVUY 8:8:8:8 */
+#define V4L2_PIX_FMT_VUY24 v4l2_fourcc('V', 'U', '2', '4') /* 24 VUY 8:8:8 */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
@@ -590,6 +598,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
+#define V4L2_PIX_FMT_XVUY10 v4l2_fourcc('X', '4', '1', '0') /* 32 XVUY 2-10-10-10 */
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -598,6 +607,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
+#define V4L2_PIX_FMT_XV20 v4l2_fourcc('X', 'V', '2', '0') /* 32 XY/UV 4:2:2 10-bit */
+#define V4L2_PIX_FMT_XV15 v4l2_fourcc('X', 'V', '1', '5') /* 32 XY/UV 4:2:0 10-bit */
+#define V4L2_PIX_FMT_X012 v4l2_fourcc('X', '0', '1', '2') /* 40 XY/UV 4:2:0 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X212 v4l2_fourcc('X', '2', '1', '2') /* 40 XY/UV 4:2:2 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X412 v4l2_fourcc('X', '4', '1', '2') /* 40 XY/UV 4:4:4 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X016 v4l2_fourcc('X', '0', '1', '6') /* 32 XY/UV 4:2:0 16-bit */
+#define V4L2_PIX_FMT_X216 v4l2_fourcc('X', '2', '1', '6') /* 32 XY/UV 4:2:2 16-bit */
+#define V4L2_PIX_FMT_X416 v4l2_fourcc('X', '4', '1', '6') /* 32 XY/UV 4:4:4 16-bit */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -605,6 +622,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
+#define V4L2_PIX_FMT_XV20M v4l2_fourcc('X', 'M', '2', '0') /* 32 XY/UV 4:2:2 10-bit */
+#define V4L2_PIX_FMT_XV15M v4l2_fourcc('X', 'M', '1', '5') /* 32 XY/UV 4:2:0 10-bit */
+#define V4L2_PIX_FMT_X012M v4l2_fourcc('M', '0', '1', '2') /* 40 XY/UV 4:2:0 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X212M v4l2_fourcc('M', '2', '1', '2') /* 40 XY/UV 4:2:2 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X412M v4l2_fourcc('M', '4', '1', '2') /* 40 XY/UV 4:4:4 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X016M v4l2_fourcc('M', '0', '1', '6') /* 32 XY/UV 4:2:0 16-bit */
+#define V4L2_PIX_FMT_X216M v4l2_fourcc('M', '2', '1', '6') /* 32 XY/UV 4:2:2 16-bit */
+#define V4L2_PIX_FMT_X416M v4l2_fourcc('M', '4', '1', '6') /* 32 XY/UV 4:4:4 16-bit */
#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 macroblocks */
/* three planes - Y Cb, Cr */
@@ -1496,7 +1521,8 @@ struct v4l2_bt_timings {
((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
- (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
+ ((bt)->interlaced ? \
+ ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
@@ -1587,7 +1613,7 @@ struct v4l2_input {
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
- __u32 tuner; /* enum v4l2_tuner_type */
+ __u32 tuner; /* Tuner index */
v4l2_std_id std;
__u32 status;
__u32 capabilities;
diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h
index 50cdc8aebfcf..05330284eb59 100644
--- a/include/uapi/linux/virtio_crypto.h
+++ b/include/uapi/linux/virtio_crypto.h
@@ -408,6 +408,7 @@ struct virtio_crypto_op_data_req {
#define VIRTIO_CRYPTO_BADMSG 2
#define VIRTIO_CRYPTO_NOTSUPP 3
#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */
/* The accelerator hardware is ready */
#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
diff --git a/include/uapi/linux/watch_queue.h b/include/uapi/linux/watch_queue.h
new file mode 100644
index 000000000000..5f3d21e8a34b
--- /dev/null
+++ b/include/uapi/linux/watch_queue.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_WATCH_QUEUE_H
+#define _UAPI_LINUX_WATCH_QUEUE_H
+
+#include <linux/types.h>
+
+enum watch_notification_type {
+ WATCH_TYPE_META = 0, /* Special record */
+ WATCH_TYPE__NR = 1
+};
+
+enum watch_meta_notification_subtype {
+ WATCH_META_REMOVAL_NOTIFICATION = 0, /* Watched object was removed */
+ WATCH_META_LOSS_NOTIFICATION = 1, /* Data loss occurred */
+};
+
+/*
+ * Notification record header. This is aligned to 64-bits so that subclasses
+ * can contain __u64 fields.
+ */
+struct watch_notification {
+ __u32 type:24; /* enum watch_notification_type */
+ __u32 subtype:8; /* Type-specific subtype (filterable) */
+ __u32 info;
+#define WATCH_INFO_LENGTH 0x0000007f /* Length of record */
+#define WATCH_INFO_LENGTH__SHIFT 0
+#define WATCH_INFO_ID 0x0000ff00 /* ID of watchpoint */
+#define WATCH_INFO_ID__SHIFT 8
+#define WATCH_INFO_TYPE_INFO 0xffff0000 /* Type-specific info */
+#define WATCH_INFO_TYPE_INFO__SHIFT 16
+#define WATCH_INFO_FLAG_0 0x00010000 /* Type-specific info, flag bit 0 */
+#define WATCH_INFO_FLAG_1 0x00020000 /* ... */
+#define WATCH_INFO_FLAG_2 0x00040000
+#define WATCH_INFO_FLAG_3 0x00080000
+#define WATCH_INFO_FLAG_4 0x00100000
+#define WATCH_INFO_FLAG_5 0x00200000
+#define WATCH_INFO_FLAG_6 0x00400000
+#define WATCH_INFO_FLAG_7 0x00800000
+};
+
+
+/*
+ * Extended watch removal notification. This is used optionally if the type
+ * wants to indicate an identifier for the object being watched, if there is
+ * such. This can be distinguished by the length.
+ *
+ * type -> WATCH_TYPE_META
+ * subtype -> WATCH_META_REMOVAL_NOTIFICATION
+ */
+struct watch_notification_removal {
+ struct watch_notification watch;
+ __u64 id; /* Type-dependent identifier */
+};
+
+#endif /* _UAPI_LINUX_WATCH_QUEUE_H */
diff --git a/include/uapi/linux/xilinx-csi2rxss.h b/include/uapi/linux/xilinx-csi2rxss.h
new file mode 100644
index 000000000000..df64ddc5eed4
--- /dev/null
+++ b/include/uapi/linux/xilinx-csi2rxss.h
@@ -0,0 +1,18 @@
+#ifndef __UAPI_XILINX_CSI2RXSS_H__
+#define __UAPI_XILINX_CSI2RXSS_H__
+
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXCSIRX_SPKT: Short packet received
+ * V4L2_EVENT_XLNXCSIRX_SPKT_OVF: Short packet FIFO overflow
+ * V4L2_EVENT_XLNXCSIRX_SLBF: Stream line buffer full
+ */
+#define V4L2_EVENT_XLNXCSIRX_CLASS (V4L2_EVENT_PRIVATE_START | 0x100)
+#define V4L2_EVENT_XLNXCSIRX_SPKT (V4L2_EVENT_XLNXCSIRX_CLASS | 0x1)
+#define V4L2_EVENT_XLNXCSIRX_SPKT_OVF (V4L2_EVENT_XLNXCSIRX_CLASS | 0x2)
+#define V4L2_EVENT_XLNXCSIRX_SLBF (V4L2_EVENT_XLNXCSIRX_CLASS | 0x3)
+
+#endif /* __UAPI_XILINX_CSI2RXSS_H__ */
diff --git a/include/uapi/linux/xilinx-hls.h b/include/uapi/linux/xilinx-hls.h
new file mode 100644
index 000000000000..a7f6447927e0
--- /dev/null
+++ b/include/uapi/linux/xilinx-hls.h
@@ -0,0 +1,21 @@
+#ifndef __UAPI_XILINX_HLS_H__
+#define __UAPI_XILINX_HLS_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+struct xilinx_axi_hls_register {
+ __u32 offset;
+ __u32 value;
+};
+
+struct xilinx_axi_hls_registers {
+ __u32 num_regs;
+ struct xilinx_axi_hls_register __user *regs;
+};
+
+#define XILINX_AXI_HLS_READ _IOWR('V', BASE_VIDIOC_PRIVATE+0, struct xilinx_axi_hls_registers)
+#define XILINX_AXI_HLS_WRITE _IOW('V', BASE_VIDIOC_PRIVATE+1, struct xilinx_axi_hls_registers)
+
+#endif /* __UAPI_XILINX_HLS_H__ */
diff --git a/include/uapi/linux/xilinx-sdirxss.h b/include/uapi/linux/xilinx-sdirxss.h
new file mode 100644
index 000000000000..1731f8b71fe8
--- /dev/null
+++ b/include/uapi/linux/xilinx-sdirxss.h
@@ -0,0 +1,64 @@
+#ifndef __UAPI_XILINX_SDIRXSS_H__
+#define __UAPI_XILINX_SDIRXSS_H__
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXSDIRX_UNDERFLOW: Video in to AXI4 Stream core underflowed
+ * V4L2_EVENT_XLNXSDIRX_OVERFLOW: Video in to AXI4 Stream core overflowed
+ */
+#define V4L2_EVENT_XLNXSDIRX_CLASS (V4L2_EVENT_PRIVATE_START | 0x200)
+#define V4L2_EVENT_XLNXSDIRX_UNDERFLOW (V4L2_EVENT_XLNXSDIRX_CLASS | 0x1)
+#define V4L2_EVENT_XLNXSDIRX_OVERFLOW (V4L2_EVENT_XLNXSDIRX_CLASS | 0x2)
+
+/*
+ * This enum is used to prepare the bitmask
+ * of modes to be detected
+ */
+enum {
+ XSDIRX_MODE_SD_OFFSET = 0,
+ XSDIRX_MODE_HD_OFFSET,
+ XSDIRX_MODE_3G_OFFSET,
+ XSDIRX_MODE_6G_OFFSET,
+ XSDIRX_MODE_12GI_OFFSET,
+ XSDIRX_MODE_12GF_OFFSET,
+ XSDIRX_MODE_NUM_SUPPORTED,
+};
+
+#define XSDIRX_DETECT_ALL_MODES (BIT(XSDIRX_MODE_SD_OFFSET) | \
+ BIT(XSDIRX_MODE_HD_OFFSET) | \
+ BIT(XSDIRX_MODE_3G_OFFSET) | \
+ BIT(XSDIRX_MODE_6G_OFFSET) | \
+ BIT(XSDIRX_MODE_12GI_OFFSET) | \
+ BIT(XSDIRX_MODE_12GF_OFFSET))
+
+/*
+ * EDH Error Types
+ * ANC - Ancillary Data Packet Errors
+ * FF - Full Field Errors
+ * AP - Active Portion Errors
+ */
+
+#define XSDIRX_EDH_ERRCNT_ANC_EDH_ERR BIT(0)
+#define XSDIRX_EDH_ERRCNT_ANC_EDA_ERR BIT(1)
+#define XSDIRX_EDH_ERRCNT_ANC_IDH_ERR BIT(2)
+#define XSDIRX_EDH_ERRCNT_ANC_IDA_ERR BIT(3)
+#define XSDIRX_EDH_ERRCNT_ANC_UES_ERR BIT(4)
+#define XSDIRX_EDH_ERRCNT_FF_EDH_ERR BIT(5)
+#define XSDIRX_EDH_ERRCNT_FF_EDA_ERR BIT(6)
+#define XSDIRX_EDH_ERRCNT_FF_IDH_ERR BIT(7)
+#define XSDIRX_EDH_ERRCNT_FF_IDA_ERR BIT(8)
+#define XSDIRX_EDH_ERRCNT_FF_UES_ERR BIT(9)
+#define XSDIRX_EDH_ERRCNT_AP_EDH_ERR BIT(10)
+#define XSDIRX_EDH_ERRCNT_AP_EDA_ERR BIT(11)
+#define XSDIRX_EDH_ERRCNT_AP_IDH_ERR BIT(12)
+#define XSDIRX_EDH_ERRCNT_AP_IDA_ERR BIT(13)
+#define XSDIRX_EDH_ERRCNT_AP_UES_ERR BIT(14)
+#define XSDIRX_EDH_ERRCNT_PKT_CHKSUM_ERR BIT(15)
+
+#define XSDIRX_EDH_ALLERR_MASK 0xFFFF
+
+#endif /* __UAPI_XILINX_SDIRXSS_H__ */
diff --git a/include/uapi/linux/xilinx-v4l2-controls.h b/include/uapi/linux/xilinx-v4l2-controls.h
index b6441fe705c5..23d1574c6d55 100644
--- a/include/uapi/linux/xilinx-v4l2-controls.h
+++ b/include/uapi/linux/xilinx-v4l2-controls.h
@@ -70,5 +70,166 @@
#define V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH (V4L2_CID_XILINX_TPG + 16)
/* Noise level */
#define V4L2_CID_XILINX_TPG_NOISE_GAIN (V4L2_CID_XILINX_TPG + 17)
+/* Foreground pattern (HLS)*/
+#define V4L2_CID_XILINX_TPG_HLS_FG_PATTERN (V4L2_CID_XILINX_TPG + 18)
+/*
+ * Xilinx CRESAMPLE Video IP
+ */
+
+#define V4L2_CID_XILINX_CRESAMPLE (V4L2_CID_USER_BASE + 0xc020)
+
+/* The field parity for interlaced video */
+#define V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY (V4L2_CID_XILINX_CRESAMPLE + 1)
+/* Specify if the first line of video contains the Chroma information */
+#define V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY (V4L2_CID_XILINX_CRESAMPLE + 2)
+
+/*
+ * Xilinx RGB2YUV Video IPs
+ */
+
+#define V4L2_CID_XILINX_RGB2YUV (V4L2_CID_USER_BASE + 0xc040)
+
+/* Maximum Luma(Y) value */
+#define V4L2_CID_XILINX_RGB2YUV_YMAX (V4L2_CID_XILINX_RGB2YUV + 1)
+/* Minimum Luma(Y) value */
+#define V4L2_CID_XILINX_RGB2YUV_YMIN (V4L2_CID_XILINX_RGB2YUV + 2)
+/* Maximum Cb Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CBMAX (V4L2_CID_XILINX_RGB2YUV + 3)
+/* Minimum Cb Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CBMIN (V4L2_CID_XILINX_RGB2YUV + 4)
+/* Maximum Cr Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CRMAX (V4L2_CID_XILINX_RGB2YUV + 5)
+/* Minimum Cr Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CRMIN (V4L2_CID_XILINX_RGB2YUV + 6)
+/* The offset compensation value for Luma(Y) */
+#define V4L2_CID_XILINX_RGB2YUV_YOFFSET (V4L2_CID_XILINX_RGB2YUV + 7)
+/* The offset compensation value for Cb Chroma */
+#define V4L2_CID_XILINX_RGB2YUV_CBOFFSET (V4L2_CID_XILINX_RGB2YUV + 8)
+/* The offset compensation value for Cr Chroma */
+#define V4L2_CID_XILINX_RGB2YUV_CROFFSET (V4L2_CID_XILINX_RGB2YUV + 9)
+
+/* Y = CA * R + (1 - CA - CB) * G + CB * B */
+
+/* CA coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_ACOEF (V4L2_CID_XILINX_RGB2YUV + 10)
+/* CB coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_BCOEF (V4L2_CID_XILINX_RGB2YUV + 11)
+/* CC coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_CCOEF (V4L2_CID_XILINX_RGB2YUV + 12)
+/* CD coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_DCOEF (V4L2_CID_XILINX_RGB2YUV + 13)
+
+/*
+ * Xilinx HLS Video IP
+ */
+
+#define V4L2_CID_XILINX_HLS (V4L2_CID_USER_BASE + 0xc060)
+
+/* The IP model */
+#define V4L2_CID_XILINX_HLS_MODEL (V4L2_CID_XILINX_HLS + 1)
+
+/*
+ * Xilinx MIPI CSI2 Rx Subsystem
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_MIPICSISS (V4L2_CID_USER_BASE + 0xc080)
+
+/* Active Lanes */
+#define V4L2_CID_XILINX_MIPICSISS_ACT_LANES (V4L2_CID_XILINX_MIPICSISS + 1)
+/* Frames received since streaming is set */
+#define V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER (V4L2_CID_XILINX_MIPICSISS + 2)
+/* Reset all event counters */
+#define V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS (V4L2_CID_XILINX_MIPICSISS + 3)
+
+/*
+ * Xilinx Gamma Correction IP
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_GAMMA_CORR (V4L2_CID_USER_BASE + 0xc0c0)
+/* Adjust Red Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 1)
+/* Adjust Blue Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 2)
+/* Adjust Green Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 3)
+
+/*
+ * Xilinx Color Space Converter (CSC) VPSS
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_CSC (V4L2_CID_USER_BASE + 0xc0a0)
+/* Adjust Brightness */
+#define V4L2_CID_XILINX_CSC_BRIGHTNESS (V4L2_CID_XILINX_CSC + 1)
+/* Adjust Contrast */
+#define V4L2_CID_XILINX_CSC_CONTRAST (V4L2_CID_XILINX_CSC + 2)
+/* Adjust Red Gain */
+#define V4L2_CID_XILINX_CSC_RED_GAIN (V4L2_CID_XILINX_CSC + 3)
+/* Adjust Green Gain */
+#define V4L2_CID_XILINX_CSC_GREEN_GAIN (V4L2_CID_XILINX_CSC + 4)
+/* Adjust Blue Gain */
+#define V4L2_CID_XILINX_CSC_BLUE_GAIN (V4L2_CID_XILINX_CSC + 5)
+
+/*
+ * Xilinx SDI Rx Subsystem
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_SDIRX (V4L2_CID_USER_BASE + 0xc100)
+
+/* Framer Control */
+#define V4L2_CID_XILINX_SDIRX_FRAMER (V4L2_CID_XILINX_SDIRX + 1)
+/* Video Lock Window Control */
+#define V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW (V4L2_CID_XILINX_SDIRX + 2)
+/* EDH Error Mask Control */
+#define V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE (V4L2_CID_XILINX_SDIRX + 3)
+/* Mode search Control */
+#define V4L2_CID_XILINX_SDIRX_SEARCH_MODES (V4L2_CID_XILINX_SDIRX + 4)
+/* Get Detected Mode control */
+#define V4L2_CID_XILINX_SDIRX_MODE_DETECT (V4L2_CID_XILINX_SDIRX + 5)
+/* Get CRC error status */
+#define V4L2_CID_XILINX_SDIRX_CRC (V4L2_CID_XILINX_SDIRX + 6)
+/* Get EDH error count control */
+#define V4L2_CID_XILINX_SDIRX_EDH_ERRCNT (V4L2_CID_XILINX_SDIRX + 7)
+/* Get EDH status control */
+#define V4L2_CID_XILINX_SDIRX_EDH_STATUS (V4L2_CID_XILINX_SDIRX + 8)
+/* Get Transport Interlaced status */
+#define V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED (V4L2_CID_XILINX_SDIRX + 9)
+/* Get Active Streams count */
+#define V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS (V4L2_CID_XILINX_SDIRX + 10)
+/* Is Mode 3GB */
+#define V4L2_CID_XILINX_SDIRX_IS_3GB (V4L2_CID_XILINX_SDIRX + 11)
+
+/*
+ * Xilinx VIP
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_VIP (V4L2_CID_USER_BASE + 0xc120)
+
+/* Low latency mode */
+#define V4L2_CID_XILINX_LOW_LATENCY (V4L2_CID_XILINX_VIP + 1)
+
+/* Control values to enable/disable low latency capture mode */
+#define XVIP_LOW_LATENCY_ENABLE BIT(1)
+#define XVIP_LOW_LATENCY_DISABLE BIT(2)
+
+/* Control value to start DMA */
+#define XVIP_START_DMA BIT(3)
+
+/*
+ * Xilinx SCD
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_SCD (V4L2_CID_USER_BASE + 0xc140)
+
+/*
+ * SCD Threshold
+ * User can pass percentage as an integer to tune threshold value
+ */
+#define V4L2_CID_XILINX_SCD_THRESHOLD (V4L2_CID_XILINX_SCD + 1)
#endif /* __UAPI_XILINX_V4L2_CONTROLS_H__ */
diff --git a/include/uapi/linux/xilinx-v4l2-events.h b/include/uapi/linux/xilinx-v4l2-events.h
new file mode 100644
index 000000000000..e31e998eba67
--- /dev/null
+++ b/include/uapi/linux/xilinx-v4l2-events.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx V4L2 SCD Driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ *
+ */
+
+#ifndef __UAPI_XILINX_V4L2_EVENTS_H__
+#define __UAPI_XILINX_V4L2_EVENTS_H__
+
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXSCD: Scene Change Detection
+ */
+#define V4L2_EVENT_XLNXSCD_CLASS (V4L2_EVENT_PRIVATE_START | 0x300)
+#define V4L2_EVENT_XLNXSCD (V4L2_EVENT_XLNXSCD_CLASS | 0x1)
+
+#endif /* __UAPI_XILINX_V4L2_EVENTS_H__ */
diff --git a/include/uapi/linux/xlnx_ctrl.h b/include/uapi/linux/xlnx_ctrl.h
new file mode 100644
index 000000000000..35ff1fdbf65b
--- /dev/null
+++ b/include/uapi/linux/xlnx_ctrl.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Xilinx Controls Header
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Contacts: Saurabh Sengar <saurabh.singh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_XLNX_CTRL_H__
+#define __UAPI_XLNX_CTRL_H__
+
+#define XSET_FB_CAPTURE 16
+#define XSET_FB_CONFIGURE 17
+#define XSET_FB_ENABLE 18
+#define XSET_FB_DISABLE 19
+#define XSET_FB_RELEASE 20
+#define XSET_FB_ENABLE_SNGL 21
+#define XSET_FB_POLL 22
+#define XVPSS_SET_CONFIGURE 16
+#define XVPSS_SET_ENABLE 17
+#define XVPSS_SET_DISABLE 18
+
+#endif /* __UAPI_XLNX_CTRL_H__ */
+
diff --git a/include/uapi/linux/xlnx_mpg2tsmux_interface.h b/include/uapi/linux/xlnx_mpg2tsmux_interface.h
new file mode 100644
index 000000000000..356a627a6899
--- /dev/null
+++ b/include/uapi/linux/xlnx_mpg2tsmux_interface.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx mpeg2 transport stream muxer ioctl calls
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <venkateshwar.rao.gannava@xilinx.com>
+ */
+
+#ifndef __XLNX_MPG2TSMUX_INTERFACE_H__
+#define __XLNX_MPG2TSMUX_INTERFACE_H__
+
+#include <linux/ioctl.h>
+
+/**
+ * enum ts_mux_command - command for stream context
+ * @CREATE_TS_MISC: create misc
+ * @CREATE_TS_VIDEO_KEYFRAME: create video key frame
+ * @CREATE_TS_VIDEO_NON_KEYFRAME: create non key frame
+ * @CREATE_TS_AUDIO: create audio
+ * @WRITE_PAT: write pat
+ * @WRITE_PMT: write pmt
+ * @WRITE_SI: write si
+ * @INVALID: invalid
+ */
+enum ts_mux_command {
+ CREATE_TS_MISC = 0,
+ CREATE_TS_VIDEO_KEYFRAME,
+ CREATE_TS_VIDEO_NON_KEYFRAME,
+ CREATE_TS_AUDIO,
+ WRITE_PAT,
+ WRITE_PMT,
+ WRITE_SI,
+ INVALID
+};
+
+/**
+ * struct stream_context_in - struct to enqueue a stream context descriptor
+ * @command: stream context type
+ * @stream_id: stream identification number
+ * @extended_stream_id: extended stream id
+ * @is_pcr_stream: flag for pcr stream
+ * @is_valid_pts: flag for valid pts
+ * @is_valid_dts: flag for valid dts
+ * @is_dmabuf: flag to set if external src buffer is DMA allocated
+ * @pid: packet id number
+ * @size_data_in: size in bytes of input buffer
+ * @pts: presentation time stamp
+ * @dts: display time stamp
+ * @srcbuf_id: source buffer id after mmap
+ * @insert_pcr: flag for inserting pcr in stream context
+ * @pcr_extension: pcr extension number
+ * @pcr_base: pcr base number
+ */
+struct stream_context_in {
+ enum ts_mux_command command;
+ u8 stream_id;
+ u8 extended_stream_id;
+ u8 is_pcr_stream;
+ u8 is_valid_pts;
+ u8 is_valid_dts;
+ u8 is_dmabuf;
+ u16 pid;
+ u64 size_data_in;
+ u64 pts;
+ u64 dts;
+ u32 srcbuf_id;
+ u8 insert_pcr;
+ u16 pcr_extension;
+ u64 pcr_base;
+};
+
+/**
+ * struct mux_context_in - struct to enqueue a mux context descriptor
+ * @is_dmabuf: flag to set if external src buffer is DMA allocated
+ * @dstbuf_id: destination buffer id after mmap
+ * @dmabuf_size: size in bytes of output buffer
+ */
+struct muxer_context_in {
+ u8 is_dmabuf;
+ u32 dstbuf_id;
+ u32 dmabuf_size;
+};
+
+/**
+ * enum xlnx_tsmux_status - ip status
+ * @MPG2MUX_BUSY: device busy
+ * @MPG2MUX_READY: device ready
+ * @MPG2MUX_ERROR: error state
+ */
+enum xlnx_tsmux_status {
+ MPG2MUX_BUSY = 0,
+ MPG2MUX_READY,
+ MPG2MUX_ERROR
+};
+
+/**
+ * struct strc_bufs_info - struct to specify bufs requirement
+ * @num_buf: number of buffers
+ * @buf_size: size of each buffer
+ */
+struct strc_bufs_info {
+ u32 num_buf;
+ u32 buf_size;
+};
+
+/**
+ * struct strc_out_buf - struct to get output buffer info
+ * @buf_id: buf id into which output is written
+ * @buf_write: output bytes written in buf
+ */
+struct out_buffer {
+ u32 buf_id;
+ u32 buf_write;
+};
+
+/**
+ * enum strmtbl_cnxt - streamid table operation
+ * @NO_UPDATE: no table update
+ * @ADD_TO_TBL: add the entry to table
+ * @DEL_FR_TBL: delete the entry from table
+ */
+enum strmtbl_cnxt {
+ NO_UPDATE = 0,
+ ADD_TO_TBL,
+ DEL_FR_TBL,
+};
+
+/**
+ * struct strm_tbl_info - struct to enqueue/dequeue streamid in table
+ * @strmtbl_ctxt: enqueue/dequeue stream id
+ * @pid: stream id
+ */
+struct strc_strminfo {
+ enum strmtbl_cnxt strmtbl_ctxt;
+ u16 pid;
+};
+
+/**
+ * enum xlnx_tsmux_dma_dir - dma direction
+ * @DMA_TO_MPG2MUX: memory to device
+ * @DMA_FROM_MPG2MUX: device to memory
+ */
+enum xlnx_tsmux_dma_dir {
+ DMA_TO_MPG2MUX = 1,
+ DMA_FROM_MPG2MUX,
+};
+
+/**
+ * enum xlnx_tsmux_dmabuf_flags - dma buffer handling
+ * @DMABUF_ERROR: buffer error
+ * @DMABUF_CONTIG: contig buffer
+ * @DMABUF_NON_CONTIG: non contigs buffer
+ * @DMABUF_ATTACHED: buffer attached
+ */
+enum xlnx_tsmux_dmabuf_flags {
+ DMABUF_ERROR = 1,
+ DMABUF_CONTIG = 2,
+ DMABUF_NON_CONTIG = 4,
+ DMABUF_ATTACHED = 8,
+};
+
+/**
+ * struct xlnx_tsmux_dmabuf_info - struct to verify dma buf before enque
+ * @buf_fd: file descriptor
+ * @dir: direction of the dma buffer
+ * @flags: flags returned by the driver
+ */
+struct xlnx_tsmux_dmabuf_info {
+ int buf_fd;
+ enum xlnx_tsmux_dma_dir dir;
+ enum xlnx_tsmux_dmabuf_flags flags;
+};
+
+/* MPG2MUX IOCTL CALL LIST */
+
+#define MPG2MUX_MAGIC 'M'
+
+/**
+ * MPG2MUX_INBUFALLOC - src buffer allocation
+ */
+#define MPG2MUX_INBUFALLOC _IOWR(MPG2MUX_MAGIC, 1, struct strc_bufs_info *)
+
+/**
+ * MPG2MUX_INBUFDEALLOC - deallocates the all src buffers
+ */
+#define MPG2MUX_INBUFDEALLOC _IO(MPG2MUX_MAGIC, 2)
+
+/**
+ * MPG2MUX_OUTBUFALLOC - allocates DMA able memory for dst
+ */
+#define MPG2MUX_OUTBUFALLOC _IOWR(MPG2MUX_MAGIC, 3, struct strc_bufs_info *)
+
+/**
+ * MPG2MUX_OUTBUFDEALLOC - deallocates the all dst buffers allocated
+ */
+#define MPG2MUX_OUTBUFDEALLOC _IO(MPG2MUX_MAGIC, 4)
+
+/**
+ * MPG2MUX_STBLALLOC - allocates DMA able memory for streamid table
+ */
+#define MPG2MUX_STBLALLOC _IOW(MPG2MUX_MAGIC, 5, unsigned short *)
+
+/**
+ * MPG2MUX_STBLDEALLOC - deallocates streamid table memory
+ */
+#define MPG2MUX_STBLDEALLOC _IO(MPG2MUX_MAGIC, 6)
+
+/**
+ * MPG2MUX_TBLUPDATE - enqueue or dequeue in streamid table
+ */
+#define MPG2MUX_TBLUPDATE _IOW(MPG2MUX_MAGIC, 7, struct strc_strminfo *)
+
+/**
+ * MPG2MUX_SETSTRM - enqueue a stream descriptor in stream context
+ * linked list along with src buf address
+ */
+#define MPG2MUX_SETSTRM _IOW(MPG2MUX_MAGIC, 8, struct stream_context_in *)
+
+/**
+ * MPG2MUX_START - starts muxer IP after configuring stream
+ * and mux context registers
+ */
+#define MPG2MUX_START _IO(MPG2MUX_MAGIC, 9)
+
+/**
+ * MPG2MUX_STOP - stops the muxer IP
+ */
+#define MPG2MUX_STOP _IO(MPG2MUX_MAGIC, 10)
+
+/**
+ * MPG2MUX_STATUS - command to get the status of IP
+ */
+#define MPG2MUX_STATUS _IOR(MPG2MUX_MAGIC, 11, unsigned short *)
+
+/**
+ * MPG2MUX_GETOUTBUF - get the output buffer id with size of output data
+ */
+#define MPG2MUX_GETOUTBUF _IOW(MPG2MUX_MAGIC, 12, struct out_buffer *)
+
+/**
+ * MPG2MUX_SETMUX - enqueue a mux descriptor with dst buf address
+ */
+#define MPG2MUX_SETMUX _IOW(MPG2MUX_MAGIC, 13, struct muxer_context_in *)
+
+/**
+ * MPG2MUX_VRFY_DMABUF - status of a given dma buffer fd
+ */
+#define MPG2MUX_VDBUF _IOWR(MPG2MUX_MAGIC, 14, struct xlnx_tsmux_dmabuf_info *)
+
+#endif
diff --git a/include/uapi/linux/xlnxsync.h b/include/uapi/linux/xlnxsync.h
new file mode 100644
index 000000000000..d5f1062f2369
--- /dev/null
+++ b/include/uapi/linux/xlnxsync.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __XLNXSYNC_H__
+#define __XLNXSYNC_H__
+
+#define XLNXSYNC_IOCTL_HDR_VER 0x10001
+
+/*
+ * This is set in the fb_id of struct xlnxsync_chan_config when
+ * configuring the channel. This makes the driver auto search for
+ * a free framebuffer slot.
+ */
+#define XLNXSYNC_AUTO_SEARCH 0xFF
+
+#define XLNXSYNC_MAX_ENC_CHAN 4
+#define XLNXSYNC_MAX_DEC_CHAN 2
+#define XLNXSYNC_BUF_PER_CHAN 3
+
+#define XLNXSYNC_PROD 0
+#define XLNXSYNC_CONS 1
+#define XLNXSYNC_IO 2
+
+#define XLNXSYNC_MAX_CORES 4
+/**
+ * struct xlnxsync_chan_config - Synchronizer channel configuration struct
+ * @hdr_ver: IOCTL header version
+ * @luma_start_offset: Start offset of Luma buffer
+ * @chroma_start_offset: Start offset of Chroma buffer
+ * @luma_end_offset: End offset of Luma buffer
+ * @chroma_end_offset: End offset of Chroma buffer
+ * @luma_margin: Margin for Luma buffer
+ * @chroma_margin: Margin for Chroma buffer
+ * @luma_core_offset: Array of 4 offsets for luma
+ * @chroma_core_offset: Array of 4 offsets for chroma
+ * @dma_fd: File descriptor of dma
+ * @fb_id: Framebuffer index. Valid values 0/1/2/XLNXSYNC_AUTO_SEARCH
+ * @ismono: Flag to indicate if buffer is Luma only.
+ * @channel_id: Channel index to be configured.
+ * Valid 0..3 & XLNXSYNC_AUTO_SEARCH
+ *
+ * This structure contains the configuration for monitoring a particular
+ * framebuffer on a particular channel.
+ */
+struct xlnxsync_chan_config {
+ u64 hdr_ver;
+ u64 luma_start_offset[XLNXSYNC_IO];
+ u64 chroma_start_offset[XLNXSYNC_IO];
+ u64 luma_end_offset[XLNXSYNC_IO];
+ u64 chroma_end_offset[XLNXSYNC_IO];
+ u32 luma_margin;
+ u32 chroma_margin;
+ u32 luma_core_offset[XLNXSYNC_MAX_CORES];
+ u32 chroma_core_offset[XLNXSYNC_MAX_CORES];
+ u32 dma_fd;
+ u8 fb_id[XLNXSYNC_IO];
+ u8 ismono[XLNXSYNC_IO];
+ u8 channel_id;
+};
+
+/**
+ * struct xlnxsync_clr_err - Clear channel error
+ * @hdr_ver: IOCTL header version
+ * @channel_id: Channel id whose error needs to be cleared
+ * @sync_err: Set this to clear sync error
+ * @wdg_err: Set this to clear watchdog error
+ * @ldiff_err: Set this to clear luma difference error
+ * @cdiff_err: Set this to clear chroma difference error
+ */
+struct xlnxsync_clr_err {
+ u64 hdr_ver;
+ u8 channel_id;
+ u8 sync_err;
+ u8 wdg_err;
+ u8 ldiff_err;
+ u8 cdiff_err;
+};
+
+/**
+ * struct xlnxsync_fbdone - Framebuffer Done
+ * @hdr_ver: IOCTL header version
+ * @status: Framebuffer Done status
+ */
+struct xlnxsync_fbdone {
+ u64 hdr_ver;
+ u8 status[XLNXSYNC_MAX_ENC_CHAN][XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+};
+
+/**
+ * struct xlnxsync_config - Synchronizer IP configuration
+ * @hdr_ver: IOCTL header version
+ * @encode: true if encoder type, false for decoder type
+ * @max_channels: Maximum channels this IP supports
+ */
+struct xlnxsync_config {
+ u64 hdr_ver;
+ u8 encode;
+ u8 max_channels;
+};
+
+/**
+ * struct xlnxsync_stat - Sync IP status
+ * @hdr_ver: IOCTL header version
+ * @fbdone: for every pair of luma/chroma buffer for every producer/consumer
+ * @enable: channel enable
+ * @sync_err: Synchronization error
+ * @wdg_err: Watchdog error
+ * @ldiff_err: Luma difference > 1 for channel
+ * @cdiff_err: Chroma difference > 1 for channel
+ */
+struct xlnxsync_stat {
+ u64 hdr_ver;
+ u8 fbdone[XLNXSYNC_MAX_ENC_CHAN][XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+ u8 enable[XLNXSYNC_MAX_ENC_CHAN];
+ u8 sync_err[XLNXSYNC_MAX_ENC_CHAN];
+ u8 wdg_err[XLNXSYNC_MAX_ENC_CHAN];
+ u8 ldiff_err[XLNXSYNC_MAX_ENC_CHAN];
+ u8 cdiff_err[XLNXSYNC_MAX_ENC_CHAN];
+};
+
+#define XLNXSYNC_MAGIC 'X'
+
+/*
+ * This ioctl is used to get the IP config (i.e. encode / decode)
+ * and max number of channels
+ */
+#define XLNXSYNC_GET_CFG _IOR(XLNXSYNC_MAGIC, 1,\
+ struct xlnxsync_config *)
+/* This ioctl is used to get the channel status */
+#define XLNXSYNC_GET_CHAN_STATUS _IOR(XLNXSYNC_MAGIC, 2, u32 *)
+/* This is used to set the framebuffer address for a channel */
+#define XLNXSYNC_SET_CHAN_CONFIG _IOW(XLNXSYNC_MAGIC, 3,\
+ struct xlnxsync_chan_config *)
+/* Enable a channel. The argument is channel number between 0 and 3 */
+#define XLNXSYNC_CHAN_ENABLE _IOR(XLNXSYNC_MAGIC, 4, u8)
+/* Enable a channel. The argument is channel number between 0 and 3 */
+#define XLNXSYNC_CHAN_DISABLE _IOR(XLNXSYNC_MAGIC, 5, u8)
+/* This is used to clear the Sync and Watchdog errors for a channel */
+#define XLNXSYNC_CLR_CHAN_ERR _IOW(XLNXSYNC_MAGIC, 6,\
+ struct xlnxsync_clr_err *)
+/* This is used to get the framebuffer done status for a channel */
+#define XLNXSYNC_GET_CHAN_FBDONE_STAT _IOR(XLNXSYNC_MAGIC, 7,\
+ struct xlnxsync_fbdone *)
+/* This is used to clear the framebuffer done status for a channel */
+#define XLNXSYNC_CLR_CHAN_FBDONE_STAT _IOW(XLNXSYNC_MAGIC, 8,\
+ struct xlnxsync_fbdone *)
+/* Reserve channel */
+#define XLNXSYNC_RESERVE_GET_CHAN_ID _IOR(XLNXSYNC_MAGIC, 9, u8 *)
+
+#endif
diff --git a/include/uapi/linux/zocl_ioctl.h b/include/uapi/linux/zocl_ioctl.h
new file mode 100644
index 000000000000..ee1f1e289cd8
--- /dev/null
+++ b/include/uapi/linux/zocl_ioctl.h
@@ -0,0 +1,125 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XCL_ZOCL_IOCTL_H_
+#define _XCL_ZOCL_IOCTL_H_
+
+enum {
+ DRM_ZOCL_CREATE_BO = 0,
+ DRM_ZOCL_MAP_BO,
+ DRM_ZOCL_SYNC_BO,
+ DRM_ZOCL_INFO_BO,
+ DRM_ZOCL_PWRITE_BO,
+ DRM_ZOCL_PREAD_BO,
+ DRM_ZOCL_NUM_IOCTLS
+};
+
+enum drm_zocl_sync_bo_dir {
+ DRM_ZOCL_SYNC_BO_TO_DEVICE,
+ DRM_ZOCL_SYNC_BO_FROM_DEVICE
+};
+
+#define DRM_ZOCL_BO_FLAGS_COHERENT 0x00000001
+#define DRM_ZOCL_BO_FLAGS_CMA 0x00000002
+
+struct drm_zocl_create_bo {
+ uint64_t size;
+ uint32_t handle;
+ uint32_t flags;
+};
+
+struct drm_zocl_map_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+};
+
+/**
+ * struct drm_zocl_sync_bo - used for SYNQ_BO IOCTL
+ * @handle: GEM object handle
+ * @dir: DRM_ZOCL_SYNC_DIR_XXX
+ * @offset: Offset into the object to write to
+ * @size: Length of data to write
+ */
+struct drm_zocl_sync_bo {
+ uint32_t handle;
+ enum drm_zocl_sync_bo_dir dir;
+ uint64_t offset;
+ uint64_t size;
+};
+
+/**
+ * struct drm_zocl_info_bo - used for INFO_BO IOCTL
+ * @handle: GEM object handle
+ * @size: Size of BO
+ * @paddr: physical address
+ */
+struct drm_zocl_info_bo {
+ uint32_t handle;
+ uint64_t size;
+ uint64_t paddr;
+};
+
+/**
+ * struct drm_zocl_pwrite_bo - used for PWRITE_BO IOCTL
+ * @handle: GEM object handle
+ * @pad: Padding
+ * @offset: Offset into the object to write to
+ * @size: Length of data to write
+ * @data_ptr: Pointer to read the data from (pointers not 32/64 compatible)
+ */
+struct drm_zocl_pwrite_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t data_ptr;
+};
+
+/**
+ * struct drm_zocl_pread_bo - used for PREAD_BO IOCTL
+ * @handle: GEM object handle
+ * @pad: Padding
+ * @offset: Offset into the object to read from
+ * @size: Length of data to wrreadite
+ * @data_ptr: Pointer to write the data into (pointers not 32/64 compatible)
+ */
+struct drm_zocl_pread_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t data_ptr;
+};
+
+#define DRM_IOCTL_ZOCL_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_CREATE_BO, \
+ struct drm_zocl_create_bo)
+#define DRM_IOCTL_ZOCL_MAP_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_MAP_BO, struct drm_zocl_map_bo)
+#define DRM_IOCTL_ZOCL_SYNC_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_SYNC_BO, struct drm_zocl_sync_bo)
+#define DRM_IOCTL_ZOCL_INFO_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_INFO_BO, struct drm_zocl_info_bo)
+#define DRM_IOCTL_ZOCL_PWRITE_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_PWRITE_BO, \
+ struct drm_zocl_pwrite_bo)
+#define DRM_IOCTL_ZOCL_PREAD_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_PREAD_BO, struct drm_zocl_pread_bo)
+#endif
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index a75e14edc957..dbd60f48b4b0 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -344,10 +344,10 @@ typedef int __bitwise snd_seq_client_type_t;
#define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2)
/* event filter flags */
-#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */
-#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */
-#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */
-#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */
+#define SNDRV_SEQ_FILTER_BROADCAST (1U<<0) /* accept broadcast messages */
+#define SNDRV_SEQ_FILTER_MULTICAST (1U<<1) /* accept multicast messages */
+#define SNDRV_SEQ_FILTER_BOUNCE (1U<<2) /* accept bounce event in error */
+#define SNDRV_SEQ_FILTER_USE_EVENT (1U<<31) /* use event filter */
struct snd_seq_client_info {
int client; /* client number to inquire */
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index a93c0decfdd5..215ce16b37d2 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -66,7 +66,8 @@ enum skl_ch_cfg {
SKL_CH_CFG_DUAL_MONO = 9,
SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
- SKL_CH_CFG_4_CHANNEL = 12,
+ SKL_CH_CFG_7_1 = 12,
+ SKL_CH_CFG_4_CHANNEL = SKL_CH_CFG_7_1,
SKL_CH_CFG_INVALID
};
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index d71380f6ed0b..ffc0d3902b71 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -4,10 +4,10 @@
#include <linux/swiotlb.h>
-void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir);
-void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir);
+void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;
diff --git a/init/Kconfig b/init/Kconfig
index 74f44b753d61..f641518f4ac5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -36,7 +36,7 @@ config CC_HAS_ASM_GOTO
config CC_HAS_ASM_GOTO_TIED_OUTPUT
depends on CC_HAS_ASM_GOTO_OUTPUT
# Detect buggy gcc and clang, fixed in gcc-11 clang-14.
- def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
+ def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
config CC_HAS_ASM_GOTO_OUTPUT
depends on CC_HAS_ASM_GOTO
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 15469d362129..69a08cf36545 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -647,7 +647,10 @@ struct file_system_type rootfs_fs_type = {
void __init init_rootfs(void)
{
- if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
- (!root_fs_names || strstr(root_fs_names, "tmpfs")))
- is_tmpfs = true;
+ if (IS_ENABLED(CONFIG_TMPFS)) {
+ if (!saved_root_name[0] && !root_fs_names)
+ is_tmpfs = true;
+ else if (root_fs_names && !!strstr(root_fs_names, "tmpfs"))
+ is_tmpfs = true;
+ }
}
diff --git a/init/main.c b/init/main.c
index 7c1004ccd4da..1c786bd8ca47 100644
--- a/init/main.c
+++ b/init/main.c
@@ -93,10 +93,8 @@
#include <linux/cache.h>
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
-#include <linux/mem_encrypt.h>
#include <asm/io.h>
-#include <asm/bugs.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -504,8 +502,6 @@ void __init __weak thread_stack_cache_init(void)
}
#endif
-void __init __weak mem_encrypt_init(void) { }
-
void __init __weak poking_init(void) { }
void __init __weak pgtable_cache_init(void) { }
@@ -567,6 +563,7 @@ static void __init mm_init(void)
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
pti_init();
+ mm_cache_init();
}
void __init __weak arch_call_rest_init(void)
@@ -627,7 +624,7 @@ asmlinkage __visible void __init start_kernel(void)
sort_main_extable();
trap_init();
mm_init();
-
+ poking_init();
ftrace_init();
/* trace_printk can be enabled here */
@@ -721,14 +718,6 @@ asmlinkage __visible void __init start_kernel(void)
*/
locking_selftest();
- /*
- * This needs to be called before any devices perform DMA
- * operations that might use the SWIOTLB bounce buffers. It will
- * mark the bounce buffers as decrypted so that their usage will
- * not cause "plain-text" data to be decrypted when accessed.
- */
- mem_encrypt_init();
-
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start && !initrd_below_start_ok &&
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
@@ -745,6 +734,9 @@ asmlinkage __visible void __init start_kernel(void)
late_time_init();
sched_clock_init();
calibrate_delay();
+
+ arch_cpu_finalize_init();
+
pid_idr_init();
anon_vma_init();
#ifdef CONFIG_X86
@@ -771,9 +763,6 @@ asmlinkage __visible void __init start_kernel(void)
taskstats_init_early();
delayacct_init();
- poking_init();
- check_bugs();
-
acpi_subsystem_init();
arch_post_acpi_subsys_init();
sfi_init_late();
diff --git a/ipc/msg.c b/ipc/msg.c
index 767587ab45a3..46a870e31e25 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -137,7 +137,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
key_t key = params->key;
int msgflg = params->flg;
- msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
+ msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT);
if (unlikely(!msq))
return -ENOMEM;
diff --git a/ipc/sem.c b/ipc/sem.c
index fe12ea8dd2b3..31cfe0f5fa8d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -492,7 +492,7 @@ static struct sem_array *sem_alloc(size_t nsems)
if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
return NULL;
- sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
+ sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
if (unlikely(!sma))
return NULL;
@@ -1835,7 +1835,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
undo_list = current->sysvsem.undo_list;
if (!undo_list) {
- undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
+ undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
if (undo_list == NULL)
return -ENOMEM;
spin_lock_init(&undo_list->lock);
@@ -1920,7 +1920,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
rcu_read_unlock();
/* step 2: allocate new undo structure */
- new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT);
if (!new) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);
@@ -2171,6 +2171,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
* scenarios where we were awakened externally, during the
* window between wake_q_add() and wake_up_q().
*/
+ rcu_read_lock();
error = READ_ONCE(queue.status);
if (error != -EINTR) {
/*
@@ -2180,10 +2181,10 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
* overwritten by the previous owner of the semaphore.
*/
smp_mb();
+ rcu_read_unlock();
goto out_free;
}
- rcu_read_lock();
locknum = sem_lock(sma, sops, nsops);
if (!ipc_valid_object(&sma->sem_perm))
diff --git a/ipc/shm.c b/ipc/shm.c
index 984addb5aeb5..0145767da1c1 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -711,7 +711,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
- shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
+ shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
if (unlikely(!shp))
return -ENOMEM;
diff --git a/kernel/acct.c b/kernel/acct.c
index 81f9831a7859..6d98aed403ba 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -331,6 +331,8 @@ static comp_t encode_comp_t(unsigned long value)
exp++;
}
+ if (exp > (((comp_t) ~0U) >> MANTSIZE))
+ return (comp_t) ~0U;
/*
* Clean it up and polish it off.
*/
diff --git a/kernel/async.c b/kernel/async.c
index 74660f611b97..1746cd65e271 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -111,7 +111,7 @@ static void async_run_entry_fn(struct work_struct *work)
struct async_entry *entry =
container_of(work, struct async_entry, work);
unsigned long flags;
- ktime_t uninitialized_var(calltime), delta, rettime;
+ ktime_t calltime, delta, rettime;
/* 1) run (and print duration) */
if (initcall_debug && system_state < SYSTEM_RUNNING) {
@@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
*/
void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
{
- ktime_t uninitialized_var(starttime), delta, endtime;
+ ktime_t starttime, delta, endtime;
if (initcall_debug && system_state < SYSTEM_RUNNING) {
pr_debug("async_waiting @ %i\n", task_pid_nr(current));
diff --git a/kernel/audit.c b/kernel/audit.c
index db8141866cea..39e84d65d253 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -484,15 +484,19 @@ static void auditd_conn_free(struct rcu_head *rcu)
* @pid: auditd PID
* @portid: auditd netlink portid
* @net: auditd network namespace pointer
+ * @skb: the netlink command from the audit daemon
+ * @ack: netlink ack flag, cleared if ack'd here
*
* Description:
* This function will obtain and drop network namespace references as
* necessary. Returns zero on success, negative values on failure.
*/
-static int auditd_set(struct pid *pid, u32 portid, struct net *net)
+static int auditd_set(struct pid *pid, u32 portid, struct net *net,
+ struct sk_buff *skb, bool *ack)
{
unsigned long flags;
struct auditd_connection *ac_old, *ac_new;
+ struct nlmsghdr *nlh;
if (!pid || !net)
return -EINVAL;
@@ -504,6 +508,13 @@ static int auditd_set(struct pid *pid, u32 portid, struct net *net)
ac_new->portid = portid;
ac_new->net = get_net(net);
+ /* send the ack now to avoid a race with the queue backlog */
+ if (*ack) {
+ nlh = nlmsg_hdr(skb);
+ netlink_ack(skb, nlh, 0, NULL);
+ *ack = false;
+ }
+
spin_lock_irqsave(&auditd_conn_lock, flags);
ac_old = rcu_dereference_protected(auditd_conn,
lockdep_is_held(&auditd_conn_lock));
@@ -1198,7 +1209,8 @@ static int audit_replace(struct pid *pid)
return auditd_send_unicast_skb(skb);
}
-static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ bool *ack)
{
u32 seq;
void *data;
@@ -1290,7 +1302,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
/* register a new auditd connection */
err = auditd_set(req_pid,
NETLINK_CB(skb).portid,
- sock_net(NETLINK_CB(skb).sk));
+ sock_net(NETLINK_CB(skb).sk),
+ skb, ack);
if (audit_enabled != AUDIT_OFF)
audit_log_config_change("audit_pid",
new_pid,
@@ -1529,9 +1542,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
* Parse the provided skb and deal with any messages that may be present,
* malformed skbs are discarded.
*/
-static void audit_receive(struct sk_buff *skb)
+static void audit_receive(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
+ bool ack;
/*
* len MUST be signed for nlmsg_next to be able to dec it below 0
* if the nlmsg_len was not aligned
@@ -1544,9 +1558,12 @@ static void audit_receive(struct sk_buff *skb)
audit_ctl_lock();
while (nlmsg_ok(nlh, len)) {
- err = audit_receive_msg(skb, nlh);
- /* if err or if this message says it wants a response */
- if (err || (nlh->nlmsg_flags & NLM_F_ACK))
+ ack = nlh->nlmsg_flags & NLM_F_ACK;
+ err = audit_receive_msg(skb, nlh, &ack);
+
+ /* send an ack if the user asked for one and audit_receive_msg
+ * didn't already do it, or if there was an error. */
+ if (ack || err)
netlink_ack(skb, nlh, err, NULL);
nlh = nlmsg_next(nlh, &len);
@@ -1796,7 +1813,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
{
struct audit_buffer *ab;
struct timespec64 t;
- unsigned int uninitialized_var(serial);
+ unsigned int serial;
if (audit_initialized != AUDIT_INITIALIZED)
return NULL;
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
index f0d243318452..297387816296 100644
--- a/kernel/audit_fsnotify.c
+++ b/kernel/audit_fsnotify.c
@@ -102,6 +102,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true);
if (ret < 0) {
+ audit_mark->path = NULL;
fsnotify_put_mark(&audit_mark->mark);
audit_mark = ERR_PTR(ret);
}
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 8a8fd732ff6d..2874ad5d06e4 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -542,11 +542,18 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
unsigned long ino;
dev_t dev;
- exe_file = get_task_exe_file(tsk);
+ /* only do exe filtering if we are recording @current events/records */
+ if (tsk != current)
+ return 0;
+
+ if (!current->mm)
+ return 0;
+ exe_file = get_mm_exe_file(current->mm);
if (!exe_file)
return 0;
ino = file_inode(exe_file)->i_ino;
dev = file_inode(exe_file)->i_sb->s_dev;
fput(exe_file);
+
return audit_mark_compare(mark, ino, dev);
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index e8e90c0c4936..fc4223f30e84 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2161,6 +2161,8 @@ void __audit_inode_child(struct inode *parent,
}
}
+ cond_resched();
+
/* is there a matching child entry? */
list_for_each_entry(n, &context->names_list, list) {
/* can only match entries that have a name */
diff --git a/kernel/backtracetest.c b/kernel/backtracetest.c
index a2a97fa3071b..370217dd7e39 100644
--- a/kernel/backtracetest.c
+++ b/kernel/backtracetest.c
@@ -29,7 +29,7 @@ static void backtrace_test_irq_callback(unsigned long data)
complete(&backtrace_work);
}
-static DECLARE_TASKLET(backtrace_tasklet, &backtrace_test_irq_callback, 0);
+static DECLARE_TASKLET_OLD(backtrace_tasklet, &backtrace_test_irq_callback);
static void backtrace_test_irq(void)
{
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 1c65ce0098a9..81ed9b79f401 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -542,7 +542,7 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
old_ptr = xchg(array->ptrs + index, new_ptr);
if (old_ptr)
- map->ops->map_fd_put_ptr(old_ptr);
+ map->ops->map_fd_put_ptr(map, old_ptr, true);
return 0;
}
@@ -558,7 +558,7 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
old_ptr = xchg(array->ptrs + index, NULL);
if (old_ptr) {
- map->ops->map_fd_put_ptr(old_ptr);
+ map->ops->map_fd_put_ptr(map, old_ptr, true);
return 0;
} else {
return -ENOENT;
@@ -582,8 +582,9 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
return prog;
}
-static void prog_fd_array_put_ptr(void *ptr)
+static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
+ /* bpf_prog is freed after one RCU or tasks trace grace period */
bpf_prog_put(ptr);
}
@@ -694,8 +695,9 @@ err_out:
return ee;
}
-static void perf_event_fd_array_put_ptr(void *ptr)
+static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
+ /* bpf_perf_event is freed after one RCU grace period */
bpf_event_entry_free_rcu(ptr);
}
@@ -736,7 +738,7 @@ static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
return cgroup_get_from_fd(fd);
}
-static void cgroup_fd_array_put_ptr(void *ptr)
+static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
/* cgroup_put free cgrp after a rcu grace period */
cgroup_put(ptr);
diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
index d99e89f113c4..3dabdd137d10 100644
--- a/kernel/bpf/bpf_lru_list.c
+++ b/kernel/bpf/bpf_lru_list.c
@@ -41,7 +41,12 @@ static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
/* bpf_lru_node helpers */
static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
{
- return node->ref;
+ return READ_ONCE(node->ref);
+}
+
+static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
+{
+ WRITE_ONCE(node->ref, 0);
}
static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
@@ -89,7 +94,7 @@ static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
bpf_lru_list_count_inc(l, tgt_type);
node->type = tgt_type;
- node->ref = 0;
+ bpf_lru_node_clear_ref(node);
list_move(&node->list, &l->lists[tgt_type]);
}
@@ -110,7 +115,7 @@ static void __bpf_lru_node_move(struct bpf_lru_list *l,
bpf_lru_list_count_inc(l, tgt_type);
node->type = tgt_type;
}
- node->ref = 0;
+ bpf_lru_node_clear_ref(node);
/* If the moving node is the next_inactive_rotation candidate,
* move the next_inactive_rotation pointer also.
@@ -353,7 +358,7 @@ static void __local_list_add_pending(struct bpf_lru *lru,
*(u32 *)((void *)node + lru->hash_offset) = hash;
node->cpu = cpu;
node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
- node->ref = 0;
+ bpf_lru_node_clear_ref(node);
list_add(&node->list, local_pending_list(loc_l));
}
@@ -419,7 +424,7 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
if (!list_empty(free_list)) {
node = list_first_entry(free_list, struct bpf_lru_node, list);
*(u32 *)((void *)node + lru->hash_offset) = hash;
- node->ref = 0;
+ bpf_lru_node_clear_ref(node);
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
}
@@ -522,7 +527,7 @@ static void bpf_common_lru_push_free(struct bpf_lru *lru,
}
node->type = BPF_LRU_LOCAL_LIST_T_FREE;
- node->ref = 0;
+ bpf_lru_node_clear_ref(node);
list_move(&node->list, local_free_list(loc_l));
raw_spin_unlock_irqrestore(&loc_l->lock, flags);
@@ -568,7 +573,7 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
node = (struct bpf_lru_node *)(buf + node_offset);
node->type = BPF_LRU_LIST_T_FREE;
- node->ref = 0;
+ bpf_lru_node_clear_ref(node);
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
buf += elem_size;
}
@@ -594,7 +599,7 @@ again:
node = (struct bpf_lru_node *)(buf + node_offset);
node->cpu = cpu;
node->type = BPF_LRU_LIST_T_FREE;
- node->ref = 0;
+ bpf_lru_node_clear_ref(node);
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
i++;
buf += elem_size;
diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h
index f02504640e18..41f8fea530c8 100644
--- a/kernel/bpf/bpf_lru_list.h
+++ b/kernel/bpf/bpf_lru_list.h
@@ -63,11 +63,8 @@ struct bpf_lru {
static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
{
- /* ref is an approximation on access frequency. It does not
- * have to be very accurate. Hence, no protection is used.
- */
- if (!node->ref)
- node->ref = 1;
+ if (!READ_ONCE(node->ref))
+ WRITE_ONCE(node->ref, 1);
}
int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index b03087f110eb..5189bc5ebd89 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -2148,7 +2148,7 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
if (v->next_member) {
const struct btf_type *last_member_type;
const struct btf_member *last_member;
- u16 last_member_type_id;
+ u32 last_member_type_id;
last_member = btf_type_member(v->t) + v->next_member - 1;
last_member_type_id = last_member->type;
@@ -2719,6 +2719,7 @@ static int btf_datasec_resolve(struct btf_verifier_env *env,
struct btf *btf = env->btf;
u16 i;
+ env->resolve_mode = RESOLVE_TBD;
for_each_vsi_from(i, v->next_member, v->t, vsi) {
u32 var_type_id = vsi->type, type_id, type_size = 0;
const struct btf_type *var_type = btf_type_by_id(env->btf,
@@ -2849,6 +2850,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env,
break;
}
+ if (btf_type_is_resolve_source_only(arg_type)) {
+ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
+ return -EINVAL;
+ }
+
if (args[i].name_off &&
(!btf_name_offset_valid(btf, args[i].name_off) ||
!btf_name_valid_identifier(btf, args[i].name_off))) {
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index c2f0aa818b7a..08c1246c758e 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1131,7 +1131,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
goto out;
}
- if (ctx.optlen > max_optlen || ctx.optlen < 0) {
+ if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
ret = -EFAULT;
goto out;
}
@@ -1145,8 +1145,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
}
if (ctx.optlen != 0) {
- if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
- put_user(ctx.optlen, optlen)) {
+ if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(ctx.optlen, optlen)) {
ret = -EFAULT;
goto out;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6b33a8a148b8..dde21d23f220 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -30,6 +30,7 @@
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/perf_event.h>
+#include <linux/nospec.h>
#include <asm/barrier.h>
#include <asm/unaligned.h>
@@ -763,7 +764,7 @@ static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
- bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
PAGE_SIZE), LONG_MAX);
return 0;
}
@@ -1567,9 +1568,7 @@ out:
* reuse preexisting logic from Spectre v1 mitigation that
* happens to produce the required code on x86 for v4 as well.
*/
-#ifdef CONFIG_X86
barrier_nospec();
-#endif
CONT;
#define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index a367fc850393..19be747f4e5a 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -249,6 +249,7 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
static int cpu_map_kthread_run(void *data)
{
struct bpf_cpu_map_entry *rcpu = data;
+ unsigned long last_qs = jiffies;
set_current_state(TASK_INTERRUPTIBLE);
@@ -271,10 +272,12 @@ static int cpu_map_kthread_run(void *data)
if (__ptr_ring_empty(rcpu->queue)) {
schedule();
sched = 1;
+ last_qs = jiffies;
} else {
__set_current_state(TASK_RUNNING);
}
} else {
+ rcu_softirq_qs_periodic(last_qs);
sched = cond_resched();
}
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 03a67583f6fb..34c4f709b1ed 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -327,7 +327,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
num_possible_cpus());
}
- /* hash table size must be power of 2 */
+ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
+ * into UB on 32-bit arches, so check that first
+ */
+ err = -E2BIG;
+ if (htab->map.max_entries > 1UL << 31)
+ goto free_htab;
+
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
htab->elem_size = sizeof(struct htab_elem) +
@@ -337,10 +343,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
htab->elem_size += round_up(htab->map.value_size, 8);
- err = -E2BIG;
- /* prevent zero size kmalloc and check for u32 overflow */
- if (htab->n_buckets == 0 ||
- htab->n_buckets > U32_MAX / sizeof(struct bucket))
+ /* check for u32 overflow */
+ if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
goto free_htab;
cost = (u64) htab->n_buckets * sizeof(struct bucket) +
@@ -674,7 +678,7 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
if (map->ops->map_fd_put_ptr) {
ptr = fd_htab_map_get_ptr(map, l);
- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, true);
}
}
@@ -1426,7 +1430,7 @@ static void fd_htab_map_free(struct bpf_map *map)
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
void *ptr = fd_htab_map_get_ptr(map, l);
- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, false);
}
}
@@ -1467,7 +1471,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
ret = htab_map_update_elem(map, key, &ptr, map_flags);
if (ret)
- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, false);
return ret;
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index a77d2814cac5..9bfb4685d068 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -262,13 +262,18 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
static DEFINE_PER_CPU(unsigned long, irqsave_flags);
-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
{
unsigned long flags;
local_irq_save(flags);
__bpf_spin_lock(lock);
__this_cpu_write(irqsave_flags, flags);
+}
+
+NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+{
+ __bpf_spin_lock_irqsave(lock);
return 0;
}
@@ -279,13 +284,18 @@ const struct bpf_func_proto bpf_spin_lock_proto = {
.arg1_type = ARG_PTR_TO_SPIN_LOCK,
};
-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
{
unsigned long flags;
flags = __this_cpu_read(irqsave_flags);
__bpf_spin_unlock(lock);
local_irq_restore(flags);
+}
+
+NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+{
+ __bpf_spin_unlock_irqrestore(lock);
return 0;
}
@@ -306,9 +316,9 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
else
lock = dst + map->spin_lock_off;
preempt_disable();
- ____bpf_spin_lock(lock);
+ __bpf_spin_lock_irqsave(lock);
copy_map_value(map, dst, src);
- ____bpf_spin_unlock(lock);
+ __bpf_spin_unlock_irqrestore(lock);
preempt_enable();
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 56e6c75d354d..d78c1afe1273 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -230,6 +230,9 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
struct lpm_trie_node *node, *found = NULL;
struct bpf_lpm_trie_key *key = _key;
+ if (key->prefixlen > trie->max_prefixlen)
+ return NULL;
+
/* Start walking the trie from the root node ... */
for (node = rcu_dereference(trie->root); node;) {
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index fab4fb134547..7fe5a73aff07 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -106,7 +106,7 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
return inner_map;
}
-void bpf_map_fd_put_ptr(void *ptr)
+void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
/* ptr->ops->map_free() has to go through one
* rcu grace period by itself.
diff --git a/kernel/bpf/map_in_map.h b/kernel/bpf/map_in_map.h
index a507bf6ef8b9..d296890813cc 100644
--- a/kernel/bpf/map_in_map.h
+++ b/kernel/bpf/map_in_map.h
@@ -15,7 +15,7 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0,
const struct bpf_map *meta1);
void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
int ufd);
-void bpf_map_fd_put_ptr(void *ptr);
+void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer);
u32 bpf_map_fd_sys_lookup_elem(void *ptr);
#endif
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index f697647ceb54..26ba7cb01136 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -118,7 +118,12 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
int err = 0;
void *ptr;
- raw_spin_lock_irqsave(&qs->lock, flags);
+ if (in_nmi()) {
+ if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+ return -EBUSY;
+ } else {
+ raw_spin_lock_irqsave(&qs->lock, flags);
+ }
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
@@ -148,7 +153,12 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
void *ptr;
u32 index;
- raw_spin_lock_irqsave(&qs->lock, flags);
+ if (in_nmi()) {
+ if (!raw_spin_trylock_irqsave(&qs->lock, flags))
+ return -EBUSY;
+ } else {
+ raw_spin_lock_irqsave(&qs->lock, flags);
+ }
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
@@ -213,7 +223,12 @@ static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
if (flags & BPF_NOEXIST || flags > BPF_EXIST)
return -EINVAL;
- raw_spin_lock_irqsave(&qs->lock, irq_flags);
+ if (in_nmi()) {
+ if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
+ return -EBUSY;
+ } else {
+ raw_spin_lock_irqsave(&qs->lock, irq_flags);
+ }
if (queue_stack_map_is_full(qs)) {
if (!replace) {
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 811071c227f1..bd8516d96745 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -111,11 +111,14 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
} else if (value_size / 8 > sysctl_perf_event_max_stack)
return ERR_PTR(-EINVAL);
- /* hash table size must be power of 2 */
- n_buckets = roundup_pow_of_two(attr->max_entries);
- if (!n_buckets)
+ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
+ * into UB on 32-bit arches, so check that first
+ */
+ if (attr->max_entries > 1UL << 31)
return ERR_PTR(-E2BIG);
+ n_buckets = roundup_pow_of_two(attr->max_entries);
+
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
err = bpf_map_charge_init(&mem, cost + attr->max_entries *
(sizeof(struct stack_map_bucket) + (u64)value_size));
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 9ebdcdaa5f16..de788761b708 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2787,7 +2787,9 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
if (attr->task_fd_query.flags != 0)
return -EINVAL;
+ rcu_read_lock();
task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
+ rcu_read_unlock();
if (!task)
return -ENOENT;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 34262d83dce1..0901911b42b5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1469,7 +1469,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
if (class == BPF_ALU || class == BPF_ALU64) {
if (!(*reg_mask & dreg))
return 0;
- if (opcode == BPF_MOV) {
+ if (opcode == BPF_END || opcode == BPF_NEG) {
+ /* sreg is reserved and unused
+ * dreg still need precision before this insn
+ */
+ return 0;
+ } else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
/* dreg = sreg
* dreg needs precision after this insn
@@ -1563,6 +1568,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
}
} else if (opcode == BPF_EXIT) {
return -ENOTSUPP;
+ } else if (BPF_SRC(insn->code) == BPF_X) {
+ if (!(*reg_mask & (dreg | sreg)))
+ return 0;
+ /* dreg <cond> sreg
+ * Both dreg and sreg need precision before
+ * this insn. If only sreg was marked precise
+ * before it would be equally necessary to
+ * propagate it to dreg.
+ */
+ *reg_mask |= (sreg | dreg);
+ /* else dreg <cond> K
+ * Only dreg still needs precision before
+ * this insn, so for the K-based conditional
+ * there is nothing new to be marked.
+ */
}
} else if (class == BPF_LD) {
if (!(*reg_mask & dreg))
@@ -1924,7 +1944,9 @@ static int check_stack_write(struct bpf_verifier_env *env,
bool sanitize = reg && is_spillable_regtype(reg->type);
for (i = 0; i < size; i++) {
- if (state->stack[spi].slot_type[i] == STACK_INVALID) {
+ u8 type = state->stack[spi].slot_type[i];
+
+ if (type != STACK_MISC && type != STACK_ZERO) {
sanitize = true;
break;
}
@@ -5083,6 +5105,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
coerce_reg_to_size(dst_reg, 4);
}
+ __update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
@@ -5139,6 +5162,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
return err;
return adjust_ptr_min_max_vals(env, insn,
dst_reg, src_reg);
+ } else if (dst_reg->precise) {
+ /* if dst_reg is precise, src_reg should be precise as well */
+ err = mark_chain_precision(env, insn->src_reg);
+ if (err)
+ return err;
}
} else {
/* Pretend the src is a reg with a known value, since we only
@@ -8943,7 +8971,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
insn->dst_reg,
shift);
- insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
+ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
(1ULL << size * 8) - 1);
}
}
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 236f290224aa..803989eae99e 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -169,7 +169,6 @@ extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots;
-extern struct file_system_type cgroup_fs_type;
/* iterate across the hierarchies */
#define for_each_root(root) \
@@ -250,9 +249,10 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup,
int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
bool threadgroup);
-struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
+struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
+ bool *locked)
__acquires(&cgroup_threadgroup_rwsem);
-void cgroup_procs_write_finish(struct task_struct *task)
+void cgroup_procs_write_finish(struct task_struct *task, bool locked)
__releases(&cgroup_threadgroup_rwsem);
void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 117d70098cd4..6fcabeacb7d8 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -15,6 +15,7 @@
#include <linux/pid_namespace.h>
#include <linux/cgroupstats.h>
#include <linux/fs_parser.h>
+#include <linux/cpu.h>
#include <trace/events/cgroup.h>
@@ -62,6 +63,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
int retval = 0;
mutex_lock(&cgroup_mutex);
+ cpus_read_lock();
percpu_down_write(&cgroup_threadgroup_rwsem);
for_each_root(root) {
struct cgroup *from_cgrp;
@@ -78,6 +80,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
break;
}
percpu_up_write(&cgroup_threadgroup_rwsem);
+ cpus_read_unlock();
mutex_unlock(&cgroup_mutex);
return retval;
@@ -364,10 +367,9 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
}
css_task_iter_end(&it);
length = n;
- /* now sort & (if procs) strip out duplicates */
+ /* now sort & strip out duplicates (tgids or recycled thread PIDs) */
sort(array, length, sizeof(pid_t), cmppid, NULL);
- if (type == CGROUP_FILE_PROCS)
- length = pidlist_uniq(array, length);
+ length = pidlist_uniq(array, length);
l = cgroup_pidlist_find_create(cgrp, type);
if (!l) {
@@ -498,12 +500,13 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
struct task_struct *task;
const struct cred *cred, *tcred;
ssize_t ret;
+ bool locked;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENODEV;
- task = cgroup_procs_write_start(buf, threadgroup);
+ task = cgroup_procs_write_start(buf, threadgroup, &locked);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -526,7 +529,7 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(cgrp, task, threadgroup);
out_finish:
- cgroup_procs_write_finish(task);
+ cgroup_procs_write_finish(task, locked);
out_unlock:
cgroup_kn_unlock(of->kn);
@@ -940,6 +943,9 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
for_each_subsys(ss, i) {
if (strcmp(param->key, ss->legacy_name))
continue;
+ if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
+ return invalf(fc, "Disabled controller '%s'",
+ param->key);
ctx->subsys_mask |= (1 << i);
return 0;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 23f0db2900e4..62a7a5075014 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -30,6 +30,7 @@
#include "cgroup-internal.h"
+#include <linux/cpu.h>
#include <linux/cred.h>
#include <linux/errno.h>
#include <linux/init_task.h>
@@ -1722,7 +1723,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
{
struct cgroup *dcgrp = &dst_root->cgrp;
struct cgroup_subsys *ss;
- int ssid, i, ret;
+ int ssid, ret;
u16 dfl_disable_ss_mask = 0;
lockdep_assert_held(&cgroup_mutex);
@@ -1766,7 +1767,8 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
struct cgroup_root *src_root = ss->root;
struct cgroup *scgrp = &src_root->cgrp;
struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
- struct css_set *cset;
+ struct css_set *cset, *cset_pos;
+ struct css_task_iter *it;
WARN_ON(!css || cgroup_css(dcgrp, ss));
@@ -1784,9 +1786,22 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
css->cgroup = dcgrp;
spin_lock_irq(&css_set_lock);
- hash_for_each(css_set_table, i, cset, hlist)
+ WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
+ list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
+ e_cset_node[ss->id]) {
list_move_tail(&cset->e_cset_node[ss->id],
&dcgrp->e_csets[ss->id]);
+ /*
+ * all css_sets of scgrp together in same order to dcgrp,
+ * patch in-flight iterators to preserve correct iteration.
+ * since the iterator is always advanced right away and
+ * finished when it->cset_pos meets it->cset_head, so only
+ * update it->cset_head is enough here.
+ */
+ list_for_each_entry(it, &cset->task_iters, iters_node)
+ if (it->cset_head == &scgrp->e_csets[ss->id])
+ it->cset_head = &dcgrp->e_csets[ss->id];
+ }
spin_unlock_irq(&css_set_lock);
/* default hierarchy doesn't enable controllers by default */
@@ -2377,6 +2392,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
EXPORT_SYMBOL_GPL(task_cgroup_path);
/**
+ * cgroup_attach_lock - Lock for ->attach()
+ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
+ *
+ * cgroup migration sometimes needs to stabilize threadgroups against forks and
+ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
+ * implementations (e.g. cpuset), also need to disable CPU hotplug.
+ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
+ * lead to deadlocks.
+ *
+ * Bringing up a CPU may involve creating and destroying tasks which requires
+ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
+ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
+ * write-locking threadgroup_rwsem, the locking order is reversed and we end up
+ * waiting for an on-going CPU hotplug operation which in turn is waiting for
+ * the threadgroup_rwsem to be released to create new tasks. For more details:
+ *
+ * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
+ *
+ * Resolve the situation by always acquiring cpus_read_lock() before optionally
+ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
+ * CPU hotplug is disabled on entry.
+ */
+static void cgroup_attach_lock(bool lock_threadgroup)
+{
+ cpus_read_lock();
+ if (lock_threadgroup)
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+}
+
+/**
+ * cgroup_attach_unlock - Undo cgroup_attach_lock()
+ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
+ */
+static void cgroup_attach_unlock(bool lock_threadgroup)
+{
+ if (lock_threadgroup)
+ percpu_up_write(&cgroup_threadgroup_rwsem);
+ cpus_read_unlock();
+}
+
+/**
* cgroup_migrate_add_task - add a migration target task to a migration context
* @task: target task
* @mgctx: target migration context
@@ -2856,8 +2912,8 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
return ret;
}
-struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
- __acquires(&cgroup_threadgroup_rwsem)
+struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
+ bool *threadgroup_locked)
{
struct task_struct *tsk;
pid_t pid;
@@ -2865,7 +2921,17 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return ERR_PTR(-EINVAL);
- percpu_down_write(&cgroup_threadgroup_rwsem);
+ /*
+ * If we migrate a single thread, we don't care about threadgroup
+ * stability. If the thread is `current`, it won't exit(2) under our
+ * hands or change PID through exec(2). We exclude
+ * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write
+ * callers by cgroup_mutex.
+ * Therefore, we can skip the global lock.
+ */
+ lockdep_assert_held(&cgroup_mutex);
+ *threadgroup_locked = pid || threadgroup;
+ cgroup_attach_lock(*threadgroup_locked);
rcu_read_lock();
if (pid) {
@@ -2896,14 +2962,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
goto out_unlock_rcu;
out_unlock_threadgroup:
- percpu_up_write(&cgroup_threadgroup_rwsem);
+ cgroup_attach_unlock(*threadgroup_locked);
+ *threadgroup_locked = false;
out_unlock_rcu:
rcu_read_unlock();
return tsk;
}
-void cgroup_procs_write_finish(struct task_struct *task)
- __releases(&cgroup_threadgroup_rwsem)
+void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
{
struct cgroup_subsys *ss;
int ssid;
@@ -2911,7 +2977,8 @@ void cgroup_procs_write_finish(struct task_struct *task)
/* release reference from cgroup_procs_write_start() */
put_task_struct(task);
- percpu_up_write(&cgroup_threadgroup_rwsem);
+ cgroup_attach_unlock(threadgroup_locked);
+
for_each_subsys(ss, ssid)
if (ss->post_attach)
ss->post_attach();
@@ -2966,12 +3033,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
struct cgroup_subsys_state *d_css;
struct cgroup *dsct;
struct css_set *src_cset;
+ bool has_tasks;
int ret;
lockdep_assert_held(&cgroup_mutex);
- percpu_down_write(&cgroup_threadgroup_rwsem);
-
/* look up all csses currently attached to @cgrp's subtree */
spin_lock_irq(&css_set_lock);
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
@@ -2982,6 +3048,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
}
spin_unlock_irq(&css_set_lock);
+ /*
+ * We need to write-lock threadgroup_rwsem while migrating tasks.
+ * However, if there are no source csets for @cgrp, changing its
+ * controllers isn't gonna produce any task migrations and the
+ * write-locking can be skipped safely.
+ */
+ has_tasks = !list_empty(&mgctx.preloaded_src_csets);
+ cgroup_attach_lock(has_tasks);
+
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&mgctx);
if (ret)
@@ -3001,7 +3076,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
ret = cgroup_migrate_execute(&mgctx);
out_finish:
cgroup_migrate_finish(&mgctx);
- percpu_up_write(&cgroup_threadgroup_rwsem);
+ cgroup_attach_unlock(has_tasks);
return ret;
}
@@ -4830,12 +4905,13 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
struct task_struct *task;
const struct cred *saved_cred;
ssize_t ret;
+ bool threadgroup_locked;
dst_cgrp = cgroup_kn_lock_live(of->kn, false);
if (!dst_cgrp)
return -ENODEV;
- task = cgroup_procs_write_start(buf, true);
+ task = cgroup_procs_write_start(buf, true, &threadgroup_locked);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -4861,7 +4937,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, true);
out_finish:
- cgroup_procs_write_finish(task);
+ cgroup_procs_write_finish(task, threadgroup_locked);
out_unlock:
cgroup_kn_unlock(of->kn);
@@ -4881,6 +4957,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
struct task_struct *task;
const struct cred *saved_cred;
ssize_t ret;
+ bool locked;
buf = strstrip(buf);
@@ -4888,7 +4965,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
if (!dst_cgrp)
return -ENODEV;
- task = cgroup_procs_write_start(buf, false);
+ task = cgroup_procs_write_start(buf, false, &locked);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -4919,7 +4996,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, false);
out_finish:
- cgroup_procs_write_finish(task);
+ cgroup_procs_write_finish(task, locked);
out_unlock:
cgroup_kn_unlock(of->kn);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index b02eca235ba3..5d50ac2f2652 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -33,6 +33,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
+#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/mempolicy.h>
#include <linux/mm.h>
@@ -1057,10 +1058,18 @@ static void update_tasks_cpumask(struct cpuset *cs)
{
struct css_task_iter it;
struct task_struct *task;
+ bool top_cs = cs == &top_cpuset;
css_task_iter_start(&cs->css, 0, &it);
- while ((task = css_task_iter_next(&it)))
+ while ((task = css_task_iter_next(&it))) {
+ /*
+ * Percpu kthreads in top_cpuset are ignored
+ */
+ if (top_cs && (task->flags & PF_KTHREAD) &&
+ kthread_is_per_cpu(task))
+ continue;
set_cpus_allowed_ptr(task, cs->effective_cpus);
+ }
css_task_iter_end(&it);
}
@@ -2014,12 +2023,7 @@ static int update_prstate(struct cpuset *cs, int val)
update_flag(CS_CPU_EXCLUSIVE, cs, 0);
}
- /*
- * Update cpumask of parent's tasks except when it is the top
- * cpuset as some system daemons cannot be mapped to other CPUs.
- */
- if (parent != &top_cpuset)
- update_tasks_cpumask(parent);
+ update_tasks_cpumask(parent);
if (parent->child_ecpus_count)
update_sibling_cpumasks(parent, cs, &tmp);
@@ -2176,11 +2180,15 @@ out_unlock:
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
+ struct cpuset *cs;
cgroup_taskset_first(tset, &css);
+ cs = css_cs(css);
percpu_down_write(&cpuset_rwsem);
- css_cs(css)->attach_in_progress--;
+ cs->attach_in_progress--;
+ if (!cs->attach_in_progress)
+ wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
}
@@ -2204,7 +2212,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
- cpus_read_lock();
+ lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
percpu_down_write(&cpuset_rwsem);
/* prepare for attach */
@@ -2260,7 +2268,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
- cpus_read_unlock();
}
/* The various types of files and directories in a cpuset file system */
diff --git a/kernel/compat.c b/kernel/compat.c
index a2bc1d6ceb57..241516f326c0 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -240,7 +240,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len,
if (len & (sizeof(compat_ulong_t)-1))
return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c08456af0c7f..ba579bb6b897 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1473,7 +1473,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
[CPUHP_HRTIMERS_PREPARE] = {
.name = "hrtimers:prepare",
.startup.single = hrtimers_prepare_cpu,
- .teardown.single = hrtimers_dead_cpu,
+ .teardown.single = NULL,
},
[CPUHP_SMPCFD_PREPARE] = {
.name = "smpcfd:prepare",
@@ -1540,6 +1540,12 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.startup.single = NULL,
.teardown.single = smpcfd_dying_cpu,
},
+ [CPUHP_AP_HRTIMERS_DYING] = {
+ .name = "hrtimers:dying",
+ .startup.single = NULL,
+ .teardown.single = hrtimers_cpu_dying,
+ },
+
/* Entry state on starting. Interrupts enabled from here on. Transient
* state for synchronsization */
[CPUHP_AP_ONLINE] = {
diff --git a/kernel/cred.c b/kernel/cred.c
index 809a985b1793..77d6f115d944 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -98,17 +98,17 @@ static void put_cred_rcu(struct rcu_head *rcu)
#ifdef CONFIG_DEBUG_CREDENTIALS
if (cred->magic != CRED_MAGIC_DEAD ||
- atomic_read(&cred->usage) != 0 ||
+ atomic_long_read(&cred->usage) != 0 ||
read_cred_subscribers(cred) != 0)
panic("CRED: put_cred_rcu() sees %p with"
- " mag %x, put %p, usage %d, subscr %d\n",
+ " mag %x, put %p, usage %ld, subscr %d\n",
cred, cred->magic, cred->put_addr,
- atomic_read(&cred->usage),
+ atomic_long_read(&cred->usage),
read_cred_subscribers(cred));
#else
- if (atomic_read(&cred->usage) != 0)
- panic("CRED: put_cred_rcu() sees %p with usage %d\n",
- cred, atomic_read(&cred->usage));
+ if (atomic_long_read(&cred->usage) != 0)
+ panic("CRED: put_cred_rcu() sees %p with usage %ld\n",
+ cred, atomic_long_read(&cred->usage));
#endif
security_cred_free(cred);
@@ -131,11 +131,11 @@ static void put_cred_rcu(struct rcu_head *rcu)
*/
void __put_cred(struct cred *cred)
{
- kdebug("__put_cred(%p{%d,%d})", cred,
- atomic_read(&cred->usage),
+ kdebug("__put_cred(%p{%ld,%d})", cred,
+ atomic_long_read(&cred->usage),
read_cred_subscribers(cred));
- BUG_ON(atomic_read(&cred->usage) != 0);
+ BUG_ON(atomic_long_read(&cred->usage) != 0);
#ifdef CONFIG_DEBUG_CREDENTIALS
BUG_ON(read_cred_subscribers(cred) != 0);
cred->magic = CRED_MAGIC_DEAD;
@@ -158,8 +158,8 @@ void exit_creds(struct task_struct *tsk)
{
struct cred *cred;
- kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
- atomic_read(&tsk->cred->usage),
+ kdebug("exit_creds(%u,%p,%p,{%ld,%d})", tsk->pid, tsk->real_cred, tsk->cred,
+ atomic_long_read(&tsk->cred->usage),
read_cred_subscribers(tsk->cred));
cred = (struct cred *) tsk->real_cred;
@@ -218,7 +218,7 @@ struct cred *cred_alloc_blank(void)
if (!new)
return NULL;
- atomic_set(&new->usage, 1);
+ atomic_long_set(&new->usage, 1);
#ifdef CONFIG_DEBUG_CREDENTIALS
new->magic = CRED_MAGIC;
#endif
@@ -265,7 +265,7 @@ struct cred *prepare_creds(void)
memcpy(new, old, sizeof(struct cred));
new->non_rcu = 0;
- atomic_set(&new->usage, 1);
+ atomic_long_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_group_info(new->group_info);
get_uid(new->user);
@@ -345,8 +345,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
p->real_cred = get_cred(p->cred);
get_cred(p->cred);
alter_cred_subscribers(p->cred, 2);
- kdebug("share_creds(%p{%d,%d})",
- p->cred, atomic_read(&p->cred->usage),
+ kdebug("share_creds(%p{%ld,%d})",
+ p->cred, atomic_long_read(&p->cred->usage),
read_cred_subscribers(p->cred));
atomic_inc(&p->cred->user->processes);
return 0;
@@ -436,8 +436,8 @@ int commit_creds(struct cred *new)
struct task_struct *task = current;
const struct cred *old = task->real_cred;
- kdebug("commit_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
+ kdebug("commit_creds(%p{%ld,%d})", new,
+ atomic_long_read(&new->usage),
read_cred_subscribers(new));
BUG_ON(task->cred != old);
@@ -446,7 +446,7 @@ int commit_creds(struct cred *new)
validate_creds(old);
validate_creds(new);
#endif
- BUG_ON(atomic_read(&new->usage) < 1);
+ BUG_ON(atomic_long_read(&new->usage) < 1);
get_cred(new); /* we will require a ref for the subj creds too */
@@ -519,14 +519,14 @@ EXPORT_SYMBOL(commit_creds);
*/
void abort_creds(struct cred *new)
{
- kdebug("abort_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
+ kdebug("abort_creds(%p{%ld,%d})", new,
+ atomic_long_read(&new->usage),
read_cred_subscribers(new));
#ifdef CONFIG_DEBUG_CREDENTIALS
BUG_ON(read_cred_subscribers(new) != 0);
#endif
- BUG_ON(atomic_read(&new->usage) < 1);
+ BUG_ON(atomic_long_read(&new->usage) < 1);
put_cred(new);
}
EXPORT_SYMBOL(abort_creds);
@@ -542,8 +542,8 @@ const struct cred *override_creds(const struct cred *new)
{
const struct cred *old = current->cred;
- kdebug("override_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
+ kdebug("override_creds(%p{%ld,%d})", new,
+ atomic_long_read(&new->usage),
read_cred_subscribers(new));
validate_creds(old);
@@ -565,8 +565,8 @@ const struct cred *override_creds(const struct cred *new)
rcu_assign_pointer(current->cred, new);
alter_cred_subscribers(old, -1);
- kdebug("override_creds() = %p{%d,%d}", old,
- atomic_read(&old->usage),
+ kdebug("override_creds() = %p{%ld,%d}", old,
+ atomic_long_read(&old->usage),
read_cred_subscribers(old));
return old;
}
@@ -583,8 +583,8 @@ void revert_creds(const struct cred *old)
{
const struct cred *override = current->cred;
- kdebug("revert_creds(%p{%d,%d})", old,
- atomic_read(&old->usage),
+ kdebug("revert_creds(%p{%ld,%d})", old,
+ atomic_long_read(&old->usage),
read_cred_subscribers(old));
validate_creds(old);
@@ -698,7 +698,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
*new = *old;
new->non_rcu = 0;
- atomic_set(&new->usage, 1);
+ atomic_long_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_uid(new->user);
get_user_ns(new->user_ns);
@@ -808,8 +808,8 @@ static void dump_invalid_creds(const struct cred *cred, const char *label,
cred == tsk->cred ? "[eff]" : "");
printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n",
cred->magic, cred->put_addr);
- printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n",
- atomic_read(&cred->usage),
+ printk(KERN_ERR "CRED: ->usage=%ld, subscr=%d\n",
+ atomic_long_read(&cred->usage),
read_cred_subscribers(cred));
printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n",
from_kuid_munged(&init_user_ns, cred->uid),
@@ -881,9 +881,9 @@ EXPORT_SYMBOL(__validate_process_creds);
*/
void validate_creds_for_do_exit(struct task_struct *tsk)
{
- kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
+ kdebug("validate_creds_for_do_exit(%p,%p{%ld,%d})",
tsk->real_cred, tsk->cred,
- atomic_read(&tsk->cred->usage),
+ atomic_long_read(&tsk->cred->usage),
read_cred_subscribers(tsk->cred));
__validate_process_creds(tsk, __FILE__, __LINE__);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 565987557ad8..1ab2e9703486 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -945,6 +945,9 @@ void kgdb_panic(const char *msg)
if (panic_timeout)
return;
+ debug_locks_off();
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+
if (dbg_kdb_mode)
kdb_printf("PANIC: %s\n", msg);
@@ -1043,7 +1046,7 @@ static void kgdb_tasklet_bpt(unsigned long ing)
atomic_set(&kgdb_break_tasklet_var, 0);
}
-static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+static DECLARE_TASKLET_OLD(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt);
void kgdb_schedule_breakpoint(void)
{
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 7c96bf9a6c2c..51cfb8618205 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -453,6 +453,13 @@ int kdb_set(int argc, const char **argv)
return KDB_ARGCOUNT;
/*
+ * Censor sensitive variables
+ */
+ if (strcmp(argv[1], "PROMPT") == 0 &&
+ !kdb_check_flags(KDB_ENABLE_MEM_READ, kdb_cmd_enabled, false))
+ return KDB_NOPERM;
+
+ /*
* Check for internal variables
*/
if (strcmp(argv[1], "KDBDEBUG") == 0) {
@@ -1355,14 +1362,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
*(cmd_hist[cmd_head]) = '\0';
do_full_getstr:
-#if defined(CONFIG_SMP)
+ /* PROMPT can only be set if we have MEM_READ permission. */
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
raw_smp_processor_id());
-#else
- snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
-#endif
- if (defcmd_in_progress)
- strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
/*
* Fetch command from keyboard
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 2a0c4985f38e..d164b3dbcd93 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -323,8 +323,10 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
static void rmem_dma_device_release(struct reserved_mem *rmem,
struct device *dev)
{
- if (dev)
+ if (dev) {
dev->dma_mem = NULL;
+ dev->dma_mem = NULL;
+ }
}
static const struct reserved_mem_ops rmem_dma_ops = {
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 9a4837b68e18..5ecd52a4bb21 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -887,7 +887,7 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o
static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
{
struct device *dev = data;
- struct dma_debug_entry *uninitialized_var(entry);
+ struct dma_debug_entry *entry;
int count;
if (dma_debug_disabled())
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f04cfc2e9e01..4c21cdc15d1b 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -232,7 +232,7 @@ void dma_direct_sync_single_for_device(struct device *dev,
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_device(dev, paddr, size, dir);
+ arch_sync_dma_for_device(paddr, size, dir);
}
EXPORT_SYMBOL(dma_direct_sync_single_for_device);
@@ -250,7 +250,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_device(dev, paddr, sg->length,
+ arch_sync_dma_for_device(paddr, sg->length,
dir);
}
}
@@ -266,8 +266,8 @@ void dma_direct_sync_single_for_cpu(struct device *dev,
phys_addr_t paddr = dma_to_phys(dev, addr);
if (!dev_is_dma_coherent(dev)) {
- arch_sync_dma_for_cpu(dev, paddr, size, dir);
- arch_sync_dma_for_cpu_all(dev);
+ arch_sync_dma_for_cpu(paddr, size, dir);
+ arch_sync_dma_for_cpu_all();
}
if (unlikely(is_swiotlb_buffer(paddr)))
@@ -285,7 +285,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_cpu(dev, paddr, sg->length, dir);
+ arch_sync_dma_for_cpu(paddr, sg->length, dir);
if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
@@ -293,7 +293,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
}
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_cpu_all(dev);
+ arch_sync_dma_for_cpu_all();
}
EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
@@ -345,7 +345,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
}
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return dma_addr;
}
EXPORT_SYMBOL(dma_direct_map_page);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0a54780e0942..f18a5bbc66ef 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1134,6 +1134,11 @@ static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
return 0;
}
+static int perf_mux_hrtimer_restart_ipi(void *arg)
+{
+ return perf_mux_hrtimer_restart(arg);
+}
+
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -1706,28 +1711,34 @@ static inline void perf_event__state_init(struct perf_event *event)
PERF_EVENT_STATE_INACTIVE;
}
-static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
+static int __perf_event_read_size(u64 read_format, int nr_siblings)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
- if (event->attr.read_format & PERF_FORMAT_ID)
+ if (read_format & PERF_FORMAT_ID)
+ entry += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_LOST)
entry += sizeof(u64);
- if (event->attr.read_format & PERF_FORMAT_GROUP) {
+ if (read_format & PERF_FORMAT_GROUP) {
nr += nr_siblings;
size += sizeof(u64);
}
- size += entry * nr;
- event->read_size = size;
+ /*
+ * Since perf_event_validate_size() limits this to 16k and inhibits
+ * adding more siblings, this will never overflow.
+ */
+ return size + nr * entry;
}
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
@@ -1768,8 +1779,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
*/
static void perf_event__header_size(struct perf_event *event)
{
- __perf_event_read_size(event,
- event->group_leader->nr_siblings);
+ event->read_size =
+ __perf_event_read_size(event->attr.read_format,
+ event->group_leader->nr_siblings);
__perf_event_header_size(event, event->attr.sample_type);
}
@@ -1800,23 +1812,44 @@ static void perf_event__id_header_size(struct perf_event *event)
event->id_header_size = size;
}
+/*
+ * Check that adding an event to the group does not result in anybody
+ * overflowing the 64k event limit imposed by the output buffer.
+ *
+ * Specifically, check that the read_size for the event does not exceed 16k,
+ * read_size being the one term that grows with groups size. Since read_size
+ * depends on per-event read_format, also (re)check the existing events.
+ *
+ * This leaves 48k for the constant size fields and things like callchains,
+ * branch stacks and register sets.
+ */
static bool perf_event_validate_size(struct perf_event *event)
{
- /*
- * The values computed here will be over-written when we actually
- * attach the event.
- */
- __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
- __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
- perf_event__id_header_size(event);
+ struct perf_event *sibling, *group_leader = event->group_leader;
+
+ if (__perf_event_read_size(event->attr.read_format,
+ group_leader->nr_siblings + 1) > 16*1024)
+ return false;
+
+ if (__perf_event_read_size(group_leader->attr.read_format,
+ group_leader->nr_siblings + 1) > 16*1024)
+ return false;
/*
- * Sum the lot; should not exceed the 64k limit we have on records.
- * Conservative limit to allow for callchains and other variable fields.
+ * When creating a new group leader, group_leader->ctx is initialized
+ * after the size has been validated, but we cannot safely use
+ * for_each_sibling_event() until group_leader->ctx is set. A new group
+ * leader cannot have any siblings yet, so we can safely skip checking
+ * the non-existent siblings.
*/
- if (event->read_size + event->header_size +
- event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
- return false;
+ if (event == group_leader)
+ return true;
+
+ for_each_sibling_event(sibling, group_leader) {
+ if (__perf_event_read_size(sibling->attr.read_format,
+ group_leader->nr_siblings + 1) > 16*1024)
+ return false;
+ }
return true;
}
@@ -1844,6 +1877,7 @@ static void perf_group_attach(struct perf_event *event)
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
+ group_leader->group_generation++;
perf_event__header_size(group_leader);
@@ -1999,6 +2033,7 @@ static void perf_group_detach(struct perf_event *event)
if (event->group_leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
+ event->group_leader->group_generation++;
goto out;
}
@@ -4849,7 +4884,7 @@ static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
- struct perf_event *sub;
+ struct perf_event *sub, *parent;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@@ -4859,6 +4894,33 @@ static int __perf_read_group_add(struct perf_event *leader,
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
+ /*
+ * Verify the grouping between the parent and child (inherited)
+ * events is still in tact.
+ *
+ * Specifically:
+ * - leader->ctx->lock pins leader->sibling_list
+ * - parent->child_mutex pins parent->child_list
+ * - parent->ctx->mutex pins parent->sibling_list
+ *
+ * Because parent->ctx != leader->ctx (and child_list nests inside
+ * ctx->mutex), group destruction is not atomic between children, also
+ * see perf_event_release_kernel(). Additionally, parent can grow the
+ * group.
+ *
+ * Therefore it is possible to have parent and child groups in a
+ * different configuration and summing over such a beast makes no sense
+ * what so ever.
+ *
+ * Reject this.
+ */
+ parent = leader->parent;
+ if (parent &&
+ (parent->group_generation != leader->group_generation ||
+ parent->nr_siblings != leader->nr_siblings)) {
+ ret = -ECHILD;
+ goto unlock;
+ }
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@@ -4881,15 +4943,20 @@ static int __perf_read_group_add(struct perf_event *leader,
values[n++] += perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
+ if (read_format & PERF_FORMAT_LOST)
+ values[n++] = atomic64_read(&leader->lost_samples);
for_each_sibling_event(sub, leader) {
values[n++] += perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
+ if (read_format & PERF_FORMAT_LOST)
+ values[n++] = atomic64_read(&sub->lost_samples);
}
+unlock:
raw_spin_unlock_irqrestore(&ctx->lock, flags);
- return 0;
+ return ret;
}
static int perf_read_group(struct perf_event *event,
@@ -4908,10 +4975,6 @@ static int perf_read_group(struct perf_event *event,
values[0] = 1 + leader->nr_siblings;
- /*
- * By locking the child_mutex of the leader we effectively
- * lock the child list of all siblings.. XXX explain how.
- */
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
@@ -4942,7 +5005,7 @@ static int perf_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 enabled, running;
- u64 values[4];
+ u64 values[5];
int n = 0;
values[n++] = __perf_event_read_value(event, &enabled, &running);
@@ -4952,6 +5015,8 @@ static int perf_read_one(struct perf_event *event,
values[n++] = running;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
+ if (read_format & PERF_FORMAT_LOST)
+ values[n++] = atomic64_read(&event->lost_samples);
if (copy_to_user(buf, values, n * sizeof(u64)))
return -EFAULT;
@@ -6281,7 +6346,7 @@ static void perf_output_read_one(struct perf_output_handle *handle,
u64 enabled, u64 running)
{
u64 read_format = event->attr.read_format;
- u64 values[4];
+ u64 values[5];
int n = 0;
values[n++] = perf_event_count(event);
@@ -6295,6 +6360,8 @@ static void perf_output_read_one(struct perf_output_handle *handle,
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
+ if (read_format & PERF_FORMAT_LOST)
+ values[n++] = atomic64_read(&event->lost_samples);
__output_copy(handle, values, n * sizeof(u64));
}
@@ -6305,7 +6372,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
- u64 values[5];
+ u64 values[6];
int n = 0;
values[n++] = 1 + leader->nr_siblings;
@@ -6323,6 +6390,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
values[n++] = perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
+ if (read_format & PERF_FORMAT_LOST)
+ values[n++] = atomic64_read(&leader->lost_samples);
__output_copy(handle, values, n * sizeof(u64));
@@ -6336,6 +6405,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
values[n++] = perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
+ if (read_format & PERF_FORMAT_LOST)
+ values[n++] = atomic64_read(&sub->lost_samples);
__output_copy(handle, values, n * sizeof(u64));
}
@@ -8166,8 +8237,8 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
hwc->interrupts = 1;
} else {
hwc->interrupts++;
- if (unlikely(throttle
- && hwc->interrupts >= max_samples_per_tick)) {
+ if (unlikely(throttle &&
+ hwc->interrupts > max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
hwc->interrupts = MAX_INTERRUPTS;
@@ -9997,8 +10068,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
- cpu_function_call(cpu,
- (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
+ cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx);
}
cpus_read_unlock();
mutex_unlock(&mux_interval_mutex);
@@ -10010,9 +10080,30 @@ static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
static struct attribute *pmu_dev_attrs[] = {
&dev_attr_type.attr,
&dev_attr_perf_event_mux_interval_ms.attr,
+ &dev_attr_nr_addr_filters.attr,
+ NULL,
+};
+
+static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pmu *pmu = dev_get_drvdata(dev);
+
+ if (n == 2 && !pmu->nr_addr_filters)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute_group pmu_dev_attr_group = {
+ .is_visible = pmu_dev_is_visible,
+ .attrs = pmu_dev_attrs,
+};
+
+static const struct attribute_group *pmu_dev_groups[] = {
+ &pmu_dev_attr_group,
NULL,
};
-ATTRIBUTE_GROUPS(pmu_dev);
static int pmu_bus_running;
static struct bus_type pmu_bus = {
@@ -10035,29 +10126,24 @@ static int pmu_dev_alloc(struct pmu *pmu)
pmu->dev->groups = pmu->attr_groups;
device_initialize(pmu->dev);
- ret = dev_set_name(pmu->dev, "%s", pmu->name);
- if (ret)
- goto free_dev;
dev_set_drvdata(pmu->dev, pmu);
pmu->dev->bus = &pmu_bus;
pmu->dev->release = pmu_dev_release;
- ret = device_add(pmu->dev);
+
+ ret = dev_set_name(pmu->dev, "%s", pmu->name);
if (ret)
goto free_dev;
- /* For PMUs with address filters, throw in an extra attribute: */
- if (pmu->nr_addr_filters)
- ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
-
+ ret = device_add(pmu->dev);
if (ret)
- goto del_dev;
+ goto free_dev;
- if (pmu->attr_update)
+ if (pmu->attr_update) {
ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
-
- if (ret)
- goto del_dev;
+ if (ret)
+ goto del_dev;
+ }
out:
return ret;
@@ -10796,7 +10882,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
/*
* If its not a per-cpu rb, it must be the same task.
*/
- if (output_event->cpu == -1 && output_event->ctx != event->ctx)
+ if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
goto out;
/*
@@ -10940,7 +11026,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
- struct perf_event_context *ctx, *uninitialized_var(gctx);
+ struct perf_event_context *ctx, *gctx;
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
@@ -11983,6 +12069,8 @@ static int inherit_group(struct perf_event *parent_event,
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
+ if (leader)
+ leader->group_generation = parent_event->group_generation;
return 0;
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index ffb59a4ef4ff..679cc87b40f4 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -171,8 +171,10 @@ __perf_output_begin(struct perf_output_handle *handle,
goto out;
if (unlikely(rb->paused)) {
- if (rb->nr_pages)
+ if (rb->nr_pages) {
local_inc(&rb->lost);
+ atomic64_inc(&event->lost_samples);
+ }
goto out;
}
@@ -255,6 +257,7 @@ __perf_output_begin(struct perf_output_handle *handle,
fail:
local_inc(&rb->lost);
+ atomic64_inc(&event->lost_samples);
perf_output_put_handle(handle);
out:
rcu_read_unlock();
@@ -653,6 +656,12 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
max_order--;
}
+ /*
+ * kcalloc_node() is unable to allocate buffer if the size is larger
+ * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
+ */
+ if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
+ return -ENOMEM;
rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
node);
if (!rb->aux_pages)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index a793bd23fe56..1799427a384a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -2195,7 +2195,7 @@ static void handle_swbp(struct pt_regs *regs)
{
struct uprobe *uprobe;
unsigned long bp_vaddr;
- int uninitialized_var(is_swbp);
+ int is_swbp;
bp_vaddr = uprobe_get_swbp_addr(regs);
if (bp_vaddr == get_trampoline_vaddr())
diff --git a/kernel/exit.c b/kernel/exit.c
index ece64771a31f..c764d16328f6 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -63,12 +63,59 @@
#include <linux/random.h>
#include <linux/rcuwait.h>
#include <linux/compat.h>
+#include <linux/sysfs.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
+/*
+ * The default value should be high enough to not crash a system that randomly
+ * crashes its kernel from time to time, but low enough to at least not permit
+ * overflowing 32-bit refcounts or the ldsem writer count.
+ */
+static unsigned int oops_limit = 10000;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table kern_exit_table[] = {
+ {
+ .procname = "oops_limit",
+ .data = &oops_limit,
+ .maxlen = sizeof(oops_limit),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ { }
+};
+
+static __init int kernel_exit_sysctls_init(void)
+{
+ register_sysctl_init("kernel", kern_exit_table);
+ return 0;
+}
+late_initcall(kernel_exit_sysctls_init);
+#endif
+
+static atomic_t oops_count = ATOMIC_INIT(0);
+
+#ifdef CONFIG_SYSFS
+static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
+{
+ return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
+}
+
+static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
+
+static __init int kernel_exit_sysfs_init(void)
+{
+ sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
+ return 0;
+}
+late_initcall(kernel_exit_sysfs_init);
+#endif
+
static void __unhash_process(struct task_struct *p, bool group_dead)
{
nr_threads--;
@@ -94,7 +141,7 @@ static void __exit_signal(struct task_struct *tsk)
struct signal_struct *sig = tsk->signal;
bool group_dead = thread_group_leader(tsk);
struct sighand_struct *sighand;
- struct tty_struct *uninitialized_var(tty);
+ struct tty_struct *tty;
u64 utime, stime;
sighand = rcu_dereference_check(tsk->sighand,
@@ -864,6 +911,31 @@ void __noreturn do_exit(long code)
}
EXPORT_SYMBOL_GPL(do_exit);
+void __noreturn make_task_dead(int signr)
+{
+ /*
+ * Take the task off the cpu after something catastrophic has
+ * happened.
+ */
+ unsigned int limit;
+
+ /*
+ * Every time the system oopses, if the oops happens while a reference
+ * to an object was held, the reference leaks.
+ * If the oops doesn't also leak memory, repeated oopsing can cause
+ * reference counters to wrap around (if they're not using refcount_t).
+ * This means that repeated oopsing can make unexploitable-looking bugs
+ * exploitable through repeated oopsing.
+ * To make sure this can't happen, place an upper bound on how often the
+ * kernel may oops without panic().
+ */
+ limit = READ_ONCE(oops_limit);
+ if (atomic_inc_return(&oops_count) >= limit && limit)
+ panic("Oopsed too often (kernel.oops_limit is %d)", limit);
+
+ do_exit(signr);
+}
+
void complete_and_exit(struct completion *comp, long code)
{
if (comp)
diff --git a/kernel/extable.c b/kernel/extable.c
index f6c9406eec7d..14bda7cab65f 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -34,7 +34,8 @@ u32 __initdata __visible main_extable_sort_needed = 1;
/* Sort the kernel's built-in exception table */
void __init sort_main_extable(void)
{
- if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
+ if (main_extable_sort_needed &&
+ &__stop___ex_table > &__start___ex_table) {
pr_notice("Sorting __ex_table...\n");
sort_extable(__start___ex_table, __stop___ex_table);
}
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index b0b1ad93fa95..8f3795d8ac5b 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -163,10 +163,7 @@ static void fei_debugfs_add_attr(struct fei_attr *attr)
static void fei_debugfs_remove_attr(struct fei_attr *attr)
{
- struct dentry *dir;
-
- dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir);
- debugfs_remove_recursive(dir);
+ debugfs_lookup_and_remove(attr->kp.symbol_name, fei_debugfs_dir);
}
static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
diff --git a/kernel/fork.c b/kernel/fork.c
index b77823cb1164..f2cd6afbde0d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -749,6 +749,14 @@ void __put_task_struct(struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(__put_task_struct);
+void __put_task_struct_rcu_cb(struct rcu_head *rhp)
+{
+ struct task_struct *task = container_of(rhp, struct task_struct, rcu);
+
+ __put_task_struct(task);
+}
+EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
+
void __init __weak arch_task_cache_init(void) { }
/*
@@ -2335,11 +2343,6 @@ struct task_struct *fork_idle(int cpu)
return task;
}
-struct mm_struct *copy_init_mm(void)
-{
- return dup_mm(NULL, &init_mm);
-}
-
/*
* Ok, this is the main fork-routine.
*
@@ -2710,10 +2713,27 @@ static void sighand_ctor(void *data)
init_waitqueue_head(&sighand->signalfd_wqh);
}
-void __init proc_caches_init(void)
+void __init mm_cache_init(void)
{
unsigned int mm_size;
+ /*
+ * The mm_cpumask is located at the end of mm_struct, and is
+ * dynamically sized based on the maximum CPU number this system
+ * can have, taking hotplug into account (nr_cpu_ids).
+ */
+ mm_size = sizeof(struct mm_struct) + cpumask_size();
+
+ mm_cachep = kmem_cache_create_usercopy("mm_struct",
+ mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+ offsetof(struct mm_struct, saved_auxv),
+ sizeof_field(struct mm_struct, saved_auxv),
+ NULL);
+}
+
+void __init proc_caches_init(void)
+{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
@@ -2731,19 +2751,6 @@ void __init proc_caches_init(void)
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL);
- /*
- * The mm_cpumask is located at the end of mm_struct, and is
- * dynamically sized based on the maximum CPU number this system
- * can have, taking hotplug into account (nr_cpu_ids).
- */
- mm_size = sizeof(struct mm_struct) + cpumask_size();
-
- mm_cachep = kmem_cache_create_usercopy("mm_struct",
- mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
- offsetof(struct mm_struct, saved_auxv),
- sizeof_field(struct mm_struct, saved_auxv),
- NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
mmap_init();
nsproxy_cache_init();
diff --git a/kernel/futex.c b/kernel/futex.c
index f82879ae6577..d51bbb398142 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1416,7 +1416,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
int err;
- u32 uninitialized_var(curval);
+ u32 curval;
if (unlikely(should_fail_futex(true)))
return -EFAULT;
@@ -1586,7 +1586,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
*/
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
{
- u32 uninitialized_var(curval), newval;
+ u32 curval, newval;
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_WAKE_Q(wake_q);
@@ -3085,7 +3085,7 @@ uaddr_faulted:
*/
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{
- u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
+ u32 curval, uval, vpid = task_pid_vnr(current);
union futex_key key = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb;
struct futex_q *top_waiter;
@@ -3553,7 +3553,7 @@ err_unlock:
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
bool pi, bool pending_op)
{
- u32 uval, uninitialized_var(nval), mval;
+ u32 uval, nval, mval;
int err;
/* Futex address must be 32bit aligned */
@@ -3683,7 +3683,7 @@ static void exit_robust_list(struct task_struct *curr)
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
- unsigned int uninitialized_var(next_pi);
+ unsigned int next_pi;
unsigned long futex_offset;
int rc;
@@ -3982,7 +3982,7 @@ static void compat_exit_robust_list(struct task_struct *curr)
struct compat_robust_list_head __user *head = curr->compat_robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
- unsigned int uninitialized_var(next_pi);
+ unsigned int next_pi;
compat_uptr_t uentry, next_uentry, upending;
compat_long_t futex_offset;
int rc;
diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
index c466c7fbdece..ea6b45d0fa0d 100644
--- a/kernel/gcov/clang.c
+++ b/kernel/gcov/clang.c
@@ -327,6 +327,8 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
for (i = 0; i < sfn_ptr->num_counters; i++)
dfn_ptr->counters[i] += sfn_ptr->counters[i];
+
+ sfn_ptr = list_next_entry(sfn_ptr, head);
}
}
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 60c7be5ff5c8..0f8a7af5d591 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -33,6 +33,13 @@
#define GCOV_TAG_FUNCTION_LENGTH 3
+/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */
+#if (__GNUC__ >= 12)
+#define GCOV_UNIT_SIZE 4
+#else
+#define GCOV_UNIT_SIZE 1
+#endif
+
static struct gcov_info *gcov_info_head;
/**
@@ -78,6 +85,7 @@ struct gcov_fn_info {
* @version: gcov version magic indicating the gcc version used for compilation
* @next: list head for a singly-linked list
* @stamp: uniquifying time stamp
+ * @checksum: unique object checksum
* @filename: name of the associated gcov data file
* @merge: merge functions (null for unused counter type)
* @n_functions: number of instrumented functions
@@ -90,6 +98,10 @@ struct gcov_info {
unsigned int version;
struct gcov_info *next;
unsigned int stamp;
+ /* Since GCC 12.1 a checksum field is added. */
+#if (__GNUC__ >= 12)
+ unsigned int checksum;
+#endif
const char *filename;
void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int);
unsigned int n_functions;
@@ -451,12 +463,18 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
pos += store_gcov_u32(buffer, pos, info->version);
pos += store_gcov_u32(buffer, pos, info->stamp);
+#if (__GNUC__ >= 12)
+ /* Use zero as checksum of the compilation unit. */
+ pos += store_gcov_u32(buffer, pos, 0);
+#endif
+
for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) {
fi_ptr = info->functions[fi_idx];
/* Function record. */
pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
- pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH);
+ pos += store_gcov_u32(buffer, pos,
+ GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE);
pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum);
pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
@@ -470,7 +488,8 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
/* Counter record. */
pos += store_gcov_u32(buffer, pos,
GCOV_TAG_FOR_COUNTER(ct_idx));
- pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2);
+ pos += store_gcov_u32(buffer, pos,
+ ci_ptr->num * 2 * GCOV_UNIT_SIZE);
for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) {
pos += store_gcov_u64(buffer, pos,
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 856f0297dc73..521121c2666c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1484,7 +1484,8 @@ int irq_chip_request_resources_parent(struct irq_data *data)
if (data->chip->irq_request_resources)
return data->chip->irq_request_resources(data);
- return -ENOSYS;
+ /* no error on missing optional irq_chip::irq_request_resources */
+ return 0;
}
EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index e2999a070a99..4195e7ad1ff2 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -537,21 +537,34 @@ EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
unsigned int clr, unsigned int set)
{
- unsigned int i = gc->irq_base;
+ unsigned int i, virq;
raw_spin_lock(&gc_lock);
list_del(&gc->list);
raw_spin_unlock(&gc_lock);
- for (; msk; msk >>= 1, i++) {
+ for (i = 0; msk; msk >>= 1, i++) {
if (!(msk & 0x01))
continue;
+ /*
+ * Interrupt domain based chips store the base hardware
+ * interrupt number in gc::irq_base. Otherwise gc::irq_base
+ * contains the base Linux interrupt number.
+ */
+ if (gc->domain) {
+ virq = irq_find_mapping(gc->domain, gc->irq_base + i);
+ if (!virq)
+ continue;
+ } else {
+ virq = gc->irq_base + i;
+ }
+
/* Remove handler first. That will mask the irq line */
- irq_set_handler(i, NULL);
- irq_set_chip(i, &no_irq_chip);
- irq_set_chip_data(i, NULL);
- irq_modify_status(i, clr, set);
+ irq_set_handler(virq, NULL);
+ irq_set_chip(virq, &no_irq_chip);
+ irq_set_chip_data(virq, NULL);
+ irq_modify_status(virq, clr, set);
}
}
EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index ba4d742c1c65..7057b60afabe 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -52,6 +52,7 @@ enum {
* IRQS_PENDING - irq is pending and replayed later
* IRQS_SUSPENDED - irq is suspended
* IRQS_NMI - irq line is used to deliver NMIs
+ * IRQS_SYSFS - descriptor has been added to sysfs
*/
enum {
IRQS_AUTODETECT = 0x00000001,
@@ -64,6 +65,7 @@ enum {
IRQS_SUSPENDED = 0x00000800,
IRQS_TIMINGS = 0x00001000,
IRQS_NMI = 0x00002000,
+ IRQS_SYSFS = 0x00004000,
};
#include "debug.h"
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 172b5e6bc4c2..0272a2e36ae6 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -288,22 +288,25 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
if (irq_kobj_base) {
/*
* Continue even in case of failure as this is nothing
- * crucial.
+ * crucial and failures in the late irq_sysfs_init()
+ * cannot be rolled back.
*/
if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
pr_warn("Failed to add kobject for irq %d\n", irq);
+ else
+ desc->istate |= IRQS_SYSFS;
}
}
static void irq_sysfs_del(struct irq_desc *desc)
{
/*
- * If irq_sysfs_init() has not yet been invoked (early boot), then
- * irq_kobj_base is NULL and the descriptor was never added.
- * kobject_del() complains about a object with no parent, so make
- * it conditional.
+ * Only invoke kobject_del() when kobject_add() was successfully
+ * invoked for the descriptor. This covers both early boot, where
+ * sysfs is not initialized yet, and the case of a failed
+ * kobject_add() invocation.
*/
- if (irq_kobj_base)
+ if (desc->istate & IRQS_SYSFS)
kobject_del(&desc->kobj);
}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5e03cbee70d6..a5f1dd7b6dc3 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -25,6 +25,9 @@ static DEFINE_MUTEX(irq_domain_mutex);
static struct irq_domain *irq_default_domain;
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
+ unsigned int nr_irqs, int node, void *arg,
+ bool realloc, const struct irq_affinity_desc *affinity);
static void irq_domain_check_hierarchy(struct irq_domain *domain);
struct irqchip_fwid {
@@ -114,23 +117,12 @@ void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
-/**
- * __irq_domain_add() - Allocate a new irq_domain data structure
- * @fwnode: firmware node for the interrupt controller
- * @size: Size of linear map; 0 for radix mapping only
- * @hwirq_max: Maximum number of interrupts supported by controller
- * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
- * direct mapping
- * @ops: domain callbacks
- * @host_data: Controller private data pointer
- *
- * Allocates and initializes an irq_domain structure.
- * Returns pointer to IRQ domain, or NULL on failure.
- */
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
- irq_hw_number_t hwirq_max, int direct_max,
- const struct irq_domain_ops *ops,
- void *host_data)
+static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ unsigned int size,
+ irq_hw_number_t hwirq_max,
+ int direct_max,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
struct device_node *of_node = to_of_node(fwnode);
struct irqchip_fwid *fwid;
@@ -222,12 +214,44 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
domain->revmap_direct_max_irq = direct_max;
irq_domain_check_hierarchy(domain);
+ return domain;
+}
+
+static void __irq_domain_publish(struct irq_domain *domain)
+{
mutex_lock(&irq_domain_mutex);
debugfs_add_domain_dir(domain);
list_add(&domain->link, &irq_domain_list);
mutex_unlock(&irq_domain_mutex);
pr_debug("Added domain %s\n", domain->name);
+}
+
+/**
+ * __irq_domain_add() - Allocate a new irq_domain data structure
+ * @fwnode: firmware node for the interrupt controller
+ * @size: Size of linear map; 0 for radix mapping only
+ * @hwirq_max: Maximum number of interrupts supported by controller
+ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
+ * direct mapping
+ * @ops: domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates and initializes an irq_domain structure.
+ * Returns pointer to IRQ domain, or NULL on failure.
+ */
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
+ irq_hw_number_t hwirq_max, int direct_max,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *domain;
+
+ domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max,
+ ops, host_data);
+ if (domain)
+ __irq_domain_publish(domain);
+
return domain;
}
EXPORT_SYMBOL_GPL(__irq_domain_add);
@@ -509,6 +533,9 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
return;
hwirq = irq_data->hwirq;
+
+ mutex_lock(&irq_domain_mutex);
+
irq_set_status_flags(irq, IRQ_NOREQUEST);
/* remove chip and handler */
@@ -528,10 +555,12 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
/* Clear reverse map for this hwirq */
irq_domain_clear_mapping(domain, hwirq);
+
+ mutex_unlock(&irq_domain_mutex);
}
-int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq)
+static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
int ret;
@@ -544,7 +573,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
return -EINVAL;
- mutex_lock(&irq_domain_mutex);
irq_data->hwirq = hwirq;
irq_data->domain = domain;
if (domain->ops->map) {
@@ -561,7 +589,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
}
irq_data->domain = NULL;
irq_data->hwirq = 0;
- mutex_unlock(&irq_domain_mutex);
return ret;
}
@@ -572,12 +599,23 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
domain->mapcount++;
irq_domain_set_mapping(domain, hwirq, irq_data);
- mutex_unlock(&irq_domain_mutex);
irq_clear_status_flags(virq, IRQ_NOREQUEST);
return 0;
}
+
+int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ int ret;
+
+ mutex_lock(&irq_domain_mutex);
+ ret = irq_domain_associate_locked(domain, virq, hwirq);
+ mutex_unlock(&irq_domain_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(irq_domain_associate);
void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
@@ -637,6 +675,34 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
}
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
+static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity)
+{
+ struct device_node *of_node = irq_domain_get_of_node(domain);
+ int virq;
+
+ pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+
+ /* Allocate a virtual interrupt number */
+ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
+ affinity);
+ if (virq <= 0) {
+ pr_debug("-> virq allocation failed\n");
+ return 0;
+ }
+
+ if (irq_domain_associate_locked(domain, virq, hwirq)) {
+ irq_free_desc(virq);
+ return 0;
+ }
+
+ pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
+ hwirq, of_node_full_name(of_node), virq);
+
+ return virq;
+}
+
/**
* irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
* @domain: domain owning this hardware interrupt or NULL for default domain
@@ -649,47 +715,31 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
* on the number returned from that call.
*/
unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
- irq_hw_number_t hwirq,
- const struct irq_affinity_desc *affinity)
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity)
{
- struct device_node *of_node;
int virq;
- pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
-
- /* Look for default domain if nececssary */
+ /* Look for default domain if necessary */
if (domain == NULL)
domain = irq_default_domain;
if (domain == NULL) {
WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
return 0;
}
- pr_debug("-> using domain @%p\n", domain);
- of_node = irq_domain_get_of_node(domain);
+ mutex_lock(&irq_domain_mutex);
/* Check if mapping already exists */
virq = irq_find_mapping(domain, hwirq);
if (virq) {
- pr_debug("-> existing mapping on virq %d\n", virq);
- return virq;
- }
-
- /* Allocate a virtual interrupt number */
- virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
- affinity);
- if (virq <= 0) {
- pr_debug("-> virq allocation failed\n");
- return 0;
- }
-
- if (irq_domain_associate(domain, virq, hwirq)) {
- irq_free_desc(virq);
- return 0;
+ pr_debug("existing mapping on virq %d\n", virq);
+ goto out;
}
- pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
- hwirq, of_node_full_name(of_node), virq);
+ virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
+out:
+ mutex_unlock(&irq_domain_mutex);
return virq;
}
@@ -793,6 +843,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
type &= IRQ_TYPE_SENSE_MASK;
+ mutex_lock(&irq_domain_mutex);
+
/*
* If we've already configured this interrupt,
* don't do it again, or hell will break loose.
@@ -805,7 +857,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
* interrupt number.
*/
if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
- return virq;
+ goto out;
/*
* If the trigger type has not been set yet, then set
@@ -813,40 +865,45 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
*/
if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
irq_data = irq_get_irq_data(virq);
- if (!irq_data)
- return 0;
+ if (!irq_data) {
+ virq = 0;
+ goto out;
+ }
irqd_set_trigger_type(irq_data, type);
- return virq;
+ goto out;
}
pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
- return 0;
+ virq = 0;
+ goto out;
}
if (irq_domain_is_hierarchy(domain)) {
- virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
- if (virq <= 0)
- return 0;
+ virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
+ fwspec, false, NULL);
+ if (virq <= 0) {
+ virq = 0;
+ goto out;
+ }
} else {
/* Create mapping */
- virq = irq_create_mapping(domain, hwirq);
+ virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
if (!virq)
- return virq;
+ goto out;
}
irq_data = irq_get_irq_data(virq);
- if (!irq_data) {
- if (irq_domain_is_hierarchy(domain))
- irq_domain_free_irqs(virq, 1);
- else
- irq_dispose_mapping(virq);
- return 0;
+ if (WARN_ON(!irq_data)) {
+ virq = 0;
+ goto out;
}
/* Store trigger type */
irqd_set_trigger_type(irq_data, type);
+out:
+ mutex_unlock(&irq_domain_mutex);
return virq;
}
@@ -1059,12 +1116,15 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
struct irq_domain *domain;
if (size)
- domain = irq_domain_create_linear(fwnode, size, ops, host_data);
+ domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data);
else
- domain = irq_domain_create_tree(fwnode, ops, host_data);
+ domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data);
+
if (domain) {
domain->parent = parent;
domain->flags |= flags;
+
+ __irq_domain_publish(domain);
}
return domain;
@@ -1311,6 +1371,45 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
}
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
+ unsigned int nr_irqs, int node, void *arg,
+ bool realloc, const struct irq_affinity_desc *affinity)
+{
+ int i, ret, virq;
+
+ if (realloc && irq_base >= 0) {
+ virq = irq_base;
+ } else {
+ virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
+ affinity);
+ if (virq < 0) {
+ pr_debug("cannot allocate IRQ(base %d, count %d)\n",
+ irq_base, nr_irqs);
+ return virq;
+ }
+ }
+
+ if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
+ pr_debug("cannot allocate memory for IRQ%d\n", virq);
+ ret = -ENOMEM;
+ goto out_free_desc;
+ }
+
+ ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
+ if (ret < 0)
+ goto out_free_irq_data;
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_insert_irq(virq + i);
+
+ return virq;
+
+out_free_irq_data:
+ irq_domain_free_irq_data(virq, nr_irqs);
+out_free_desc:
+ irq_free_descs(virq, nr_irqs);
+ return ret;
+}
+
/**
* __irq_domain_alloc_irqs - Allocate IRQs from domain
* @domain: domain to allocate from
@@ -1337,7 +1436,7 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc, const struct irq_affinity_desc *affinity)
{
- int i, ret, virq;
+ int ret;
if (domain == NULL) {
domain = irq_default_domain;
@@ -1345,40 +1444,11 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return -EINVAL;
}
- if (realloc && irq_base >= 0) {
- virq = irq_base;
- } else {
- virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
- affinity);
- if (virq < 0) {
- pr_debug("cannot allocate IRQ(base %d, count %d)\n",
- irq_base, nr_irqs);
- return virq;
- }
- }
-
- if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
- pr_debug("cannot allocate memory for IRQ%d\n", virq);
- ret = -ENOMEM;
- goto out_free_desc;
- }
-
mutex_lock(&irq_domain_mutex);
- ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
- if (ret < 0) {
- mutex_unlock(&irq_domain_mutex);
- goto out_free_irq_data;
- }
- for (i = 0; i < nr_irqs; i++)
- irq_domain_insert_irq(virq + i);
+ ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
+ realloc, affinity);
mutex_unlock(&irq_domain_mutex);
- return virq;
-
-out_free_irq_data:
- irq_domain_free_irq_data(virq, nr_irqs);
-out_free_desc:
- irq_free_descs(virq, nr_irqs);
return ret;
}
@@ -1739,6 +1809,13 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
irq_set_handler_data(virq, handler_data);
}
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
+ unsigned int nr_irqs, int node, void *arg,
+ bool realloc, const struct irq_affinity_desc *affinity)
+{
+ return -EINVAL;
+}
+
static void irq_domain_check_hierarchy(struct irq_domain *domain)
{
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 79214f983624..2a8a5e1779c9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1610,7 +1610,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
- if (irq_settings_can_autoenable(desc)) {
+ if (!(new->flags & IRQF_NO_AUTOEN) &&
+ irq_settings_can_autoenable(desc)) {
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
} else {
/*
@@ -2041,10 +2042,15 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
* which interrupt is which (messes up the interrupt freeing
* logic etc).
*
+ * Also shared interrupts do not go well with disabling auto enable.
+ * The sharing interrupt might request it while it's still disabled
+ * and then wait for interrupts forever.
+ *
* Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
* it cannot be set along with IRQF_NO_SUSPEND.
*/
if (((irqflags & IRQF_SHARED) && !dev_id) ||
+ ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
(!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
return -EINVAL;
@@ -2200,7 +2206,8 @@ int request_nmi(unsigned int irq, irq_handler_t handler,
desc = irq_to_desc(irq);
- if (!desc || irq_settings_can_autoenable(desc) ||
+ if (!desc || (irq_settings_can_autoenable(desc) &&
+ !(irqflags & IRQF_NO_AUTOEN)) ||
!irq_settings_can_request(desc) ||
WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
!irq_supports_nmi(desc))
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 8e586858bcf4..d25edbb87119 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -466,16 +466,16 @@ unsigned int irq_matrix_reserved(struct irq_matrix *m)
}
/**
- * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
+ * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU
* @m: Pointer to the matrix to search
*
- * This returns number of allocated irqs
+ * This returns number of allocated non-managed interrupts.
*/
unsigned int irq_matrix_allocated(struct irq_matrix *m)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
- return cm->allocated;
+ return cm->allocated - cm->managed_allocated;
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 98c04ca5fa43..b7af39e36341 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -45,7 +45,7 @@ static void resend_irqs(unsigned long arg)
}
/* Tasklet to handle resend: */
-static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
+static DECLARE_TASKLET_OLD(resend_tasklet, resend_irqs);
#endif
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index d65b0fc8fb48..3694d90c3722 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1019,6 +1019,7 @@ int crash_shrink_memory(unsigned long new_size)
start = crashk_res.start;
end = crashk_res.end;
old_size = (end == 0) ? 0 : end - start + 1;
+ new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN);
if (new_size >= old_size) {
ret = (new_size == old_size) ? 0 : -EINVAL;
goto unlock;
@@ -1030,9 +1031,7 @@ int crash_shrink_memory(unsigned long new_size)
goto unlock;
}
- start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
- end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
-
+ end = start + new_size;
crash_free_reserved_phys_range(end, crashk_res.end);
if ((start == end) && (crashk_res.parent != NULL))
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 5b58149bcd90..e3e41e3c0b51 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -901,10 +901,22 @@ static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
}
offset = ALIGN(offset, align);
+
+ /*
+ * Check if the segment contains the entry point, if so,
+ * calculate the value of image->start based on it.
+ * If the compiler has produced more than one .text section
+ * (Eg: .text.hot), they are generally after the main .text
+ * section, and they shall not be used to calculate
+ * image->start. So do not re-calculate image->start if it
+ * is not set to the initial value, and warn the user so they
+ * have a chance to fix their purgatory's linker script.
+ */
if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
pi->ehdr->e_entry < (sechdrs[i].sh_addr
- + sechdrs[i].sh_size)) {
+ + sechdrs[i].sh_size) &&
+ !WARN_ON(kbuf->image->start != pi->ehdr->e_entry)) {
kbuf->image->start -= sechdrs[i].sh_addr;
kbuf->image->start += kbuf->mem + offset;
}
diff --git a/kernel/kheaders.c b/kernel/kheaders.c
index 8f69772af77b..42163c9e94e5 100644
--- a/kernel/kheaders.c
+++ b/kernel/kheaders.c
@@ -26,15 +26,15 @@ asm (
" .popsection \n"
);
-extern char kernel_headers_data;
-extern char kernel_headers_data_end;
+extern char kernel_headers_data[];
+extern char kernel_headers_data_end[];
static ssize_t
ikheaders_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
- memcpy(buf, &kernel_headers_data + off, len);
+ memcpy(buf, &kernel_headers_data[off], len);
return len;
}
@@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = {
static int __init ikheaders_init(void)
{
- kheaders_attr.size = (&kernel_headers_data_end -
- &kernel_headers_data);
+ kheaders_attr.size = (kernel_headers_data_end -
+ kernel_headers_data);
return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c93340bae3ac..aecf4342f67c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -406,8 +406,8 @@ static inline int kprobe_optready(struct kprobe *p)
return 0;
}
-/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
-static inline int kprobe_disarmed(struct kprobe *p)
+/* Return true if the kprobe is disarmed. Note: p must be on hash list */
+bool kprobe_disarmed(struct kprobe *p)
{
struct optimized_kprobe *op;
@@ -614,7 +614,7 @@ void wait_for_kprobe_optimizer(void)
mutex_unlock(&kprobe_mutex);
}
-static bool optprobe_queued_unopt(struct optimized_kprobe *op)
+bool optprobe_queued_unopt(struct optimized_kprobe *op)
{
struct optimized_kprobe *_op;
@@ -1594,7 +1594,9 @@ static int check_kprobe_address_safe(struct kprobe *p,
preempt_disable();
/* Ensure it is not in reserved area nor out of text */
- if (!kernel_text_address((unsigned long) p->addr) ||
+ if (!(core_kernel_text((unsigned long) p->addr) ||
+ is_module_text_address((unsigned long) p->addr)) ||
+ in_gate_area_no_mm((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr)) {
@@ -1736,11 +1738,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
/* Try to disarm and disable this/parent probe */
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
/*
- * If kprobes_all_disarmed is set, orig_p
- * should have already been disarmed, so
- * skip unneed disarming process.
+ * Don't be lazy here. Even if 'kprobes_all_disarmed'
+ * is false, 'orig_p' might not have been armed yet.
+ * Note arm_all_kprobes() __tries__ to arm all kprobes
+ * on the best effort basis.
*/
- if (!kprobes_all_disarmed) {
+ if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
ret = disarm_kprobe(orig_p, true);
if (ret) {
p->flags &= ~KPROBE_FLAG_DISABLED;
@@ -1789,7 +1792,13 @@ static int __unregister_kprobe_top(struct kprobe *p)
if ((list_p != p) && (list_p->post_handler))
goto noclean;
}
- ap->post_handler = NULL;
+ /*
+ * For the kprobe-on-ftrace case, we keep the
+ * post_handler setting to identify this aggrprobe
+ * armed with kprobe_ipmodify_ops.
+ */
+ if (!kprobe_ftrace(ap))
+ ap->post_handler = NULL;
}
noclean:
/*
@@ -2196,8 +2205,11 @@ int enable_kprobe(struct kprobe *kp)
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
p->flags &= ~KPROBE_FLAG_DISABLED;
ret = arm_kprobe(p);
- if (ret)
+ if (ret) {
p->flags |= KPROBE_FLAG_DISABLED;
+ if (p != kp)
+ kp->flags |= KPROBE_FLAG_DISABLED;
+ }
}
out:
mutex_unlock(&kprobe_mutex);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index cdf318d86dd6..1fe7c458fe8b 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -611,9 +611,23 @@ void klp_reverse_transition(void)
/* Called from copy_process() during fork */
void klp_copy_process(struct task_struct *child)
{
- child->patch_state = current->patch_state;
- /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
+ /*
+ * The parent process may have gone through a KLP transition since
+ * the thread flag was copied in setup_thread_stack earlier. Bring
+ * the task flag up to date with the parent here.
+ *
+ * The operation is serialized against all klp_*_transition()
+ * operations by the tasklist_lock. The only exception is
+ * klp_update_patch_state(current), but we cannot race with
+ * that because we are current.
+ */
+ if (test_tsk_thread_flag(current, TIF_PATCH_PENDING))
+ set_tsk_thread_flag(child, TIF_PATCH_PENDING);
+ else
+ clear_tsk_thread_flag(child, TIF_PATCH_PENDING);
+
+ child->patch_state = current->patch_state;
}
/*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ed532a844c43..fc7edd5f69d7 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1707,7 +1707,7 @@ static int noop_count(struct lock_list *entry, void *data)
static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
{
unsigned long count = 0;
- struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *target_entry;
__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
@@ -1735,7 +1735,7 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
{
unsigned long count = 0;
- struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *target_entry;
__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
@@ -1792,7 +1792,7 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
struct lock_trace **const trace)
{
int ret;
- struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *target_entry;
struct lock_list src_entry = {
.class = hlock_class(src),
.parent = NULL,
@@ -1830,7 +1830,7 @@ static noinline int
check_redundant(struct held_lock *src, struct held_lock *target)
{
int ret;
- struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *target_entry;
struct lock_list src_entry = {
.class = hlock_class(src),
.parent = NULL,
@@ -2336,8 +2336,8 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
{
unsigned long usage_mask = 0, forward_mask, backward_mask;
enum lock_usage_bit forward_bit = 0, backward_bit = 0;
- struct lock_list *uninitialized_var(target_entry1);
- struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *target_entry1;
+ struct lock_list *target_entry;
struct lock_list this, that;
int ret;
@@ -3338,7 +3338,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
{
int ret;
struct lock_list root;
- struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *target_entry;
root.parent = NULL;
root.class = hlock_class(this);
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 3e82f449b4ff..da36997d8742 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -426,7 +426,6 @@ retry:
} while (!time_after(jiffies, stress->timeout));
kfree(order);
- kfree(stress);
}
struct reorder_lock {
@@ -491,7 +490,6 @@ out:
list_for_each_entry_safe(ll, ln, &locks, link)
kfree(ll);
kfree(order);
- kfree(stress);
}
static void stress_one_work(struct work_struct *work)
@@ -512,8 +510,6 @@ static void stress_one_work(struct work_struct *work)
break;
}
} while (!time_after(jiffies, stress->timeout));
-
- kfree(stress);
}
#define STRESS_INORDER BIT(0)
@@ -524,15 +520,24 @@ static void stress_one_work(struct work_struct *work)
static int stress(int nlocks, int nthreads, unsigned int flags)
{
struct ww_mutex *locks;
- int n;
+ struct stress *stress_array;
+ int n, count;
locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
if (!locks)
return -ENOMEM;
+ stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
+ GFP_KERNEL);
+ if (!stress_array) {
+ kfree(locks);
+ return -ENOMEM;
+ }
+
for (n = 0; n < nlocks; n++)
ww_mutex_init(&locks[n], &ww_class);
+ count = 0;
for (n = 0; nthreads; n++) {
struct stress *stress;
void (*fn)(struct work_struct *work);
@@ -556,9 +561,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
if (!fn)
continue;
- stress = kmalloc(sizeof(*stress), GFP_KERNEL);
- if (!stress)
- break;
+ stress = &stress_array[count++];
INIT_WORK(&stress->work, fn);
stress->locks = locks;
@@ -573,6 +576,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
for (n = 0; n < nlocks; n++)
ww_mutex_destroy(&locks[n]);
+ kfree(stress_array);
kfree(locks);
return 0;
diff --git a/kernel/module.c b/kernel/module.c
index 7c724356aca3..ba1df1fb1c5d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2291,15 +2291,26 @@ static void free_module(struct module *mod)
void *__symbol_get(const char *symbol)
{
struct module *owner;
+ enum mod_license license;
const struct kernel_symbol *sym;
preempt_disable();
- sym = find_symbol(symbol, &owner, NULL, NULL, true, true);
- if (sym && strong_try_module_get(owner))
+ sym = find_symbol(symbol, &owner, NULL, &license, true, true);
+ if (!sym)
+ goto fail;
+ if (license != GPL_ONLY) {
+ pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
+ symbol);
+ goto fail;
+ }
+ if (strong_try_module_get(owner))
sym = NULL;
preempt_enable();
return sym ? (void *)kernel_symbol_value(sym) : NULL;
+fail:
+ preempt_enable();
+ return NULL;
}
EXPORT_SYMBOL_GPL(__symbol_get);
@@ -3654,7 +3665,8 @@ static bool finished_loading(const char *name)
sched_annotate_sleep();
mutex_lock(&module_mutex);
mod = find_module_all(name, strlen(name), true);
- ret = !mod || mod->state == MODULE_STATE_LIVE;
+ ret = !mod || mod->state == MODULE_STATE_LIVE
+ || mod->state == MODULE_STATE_GOING;
mutex_unlock(&module_mutex);
return ret;
@@ -3820,20 +3832,35 @@ static int add_unformed_module(struct module *mod)
mod->state = MODULE_STATE_UNFORMED;
-again:
mutex_lock(&module_mutex);
old = find_module_all(mod->name, strlen(mod->name), true);
if (old != NULL) {
- if (old->state != MODULE_STATE_LIVE) {
+ if (old->state == MODULE_STATE_COMING
+ || old->state == MODULE_STATE_UNFORMED) {
/* Wait in case it fails to load. */
mutex_unlock(&module_mutex);
err = wait_event_interruptible(module_wq,
finished_loading(mod->name));
if (err)
goto out_unlocked;
- goto again;
+
+ /* The module might have gone in the meantime. */
+ mutex_lock(&module_mutex);
+ old = find_module_all(mod->name, strlen(mod->name),
+ true);
}
- err = -EEXIST;
+
+ /*
+ * We are here only when the same module was being loaded. Do
+ * not try to load it again right now. It prevents long delays
+ * caused by serialized module load failures. It might happen
+ * when more devices of the same type trigger load of
+ * a particular module.
+ */
+ if (old && old->state == MODULE_STATE_LIVE)
+ err = -EEXIST;
+ else
+ err = -EBUSY;
goto out;
}
mod_update_bounds(mod);
diff --git a/kernel/padata.c b/kernel/padata.c
index 92a4867e8adc..a544da60014c 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -130,7 +130,7 @@ int padata_do_parallel(struct padata_shell *ps,
*cb_cpu = cpu;
}
- err = -EBUSY;
+ err = -EBUSY;
if ((pinst->flags & PADATA_RESET))
goto out;
diff --git a/kernel/panic.c b/kernel/panic.c
index f4f3633f4f69..2973c34d6d09 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -31,6 +31,7 @@
#include <linux/bug.h>
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
+#include <linux/sysfs.h>
#include <asm/sections.h>
#define PANIC_TIMER_STEP 100
@@ -44,6 +45,7 @@ static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
bool crash_kexec_post_notifiers;
int panic_on_warn __read_mostly;
+static unsigned int warn_limit __read_mostly;
int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
@@ -60,6 +62,45 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
EXPORT_SYMBOL(panic_notifier_list);
+#ifdef CONFIG_SYSCTL
+static struct ctl_table kern_panic_table[] = {
+ {
+ .procname = "warn_limit",
+ .data = &warn_limit,
+ .maxlen = sizeof(warn_limit),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ { }
+};
+
+static __init int kernel_panic_sysctls_init(void)
+{
+ register_sysctl_init("kernel", kern_panic_table);
+ return 0;
+}
+late_initcall(kernel_panic_sysctls_init);
+#endif
+
+static atomic_t warn_count = ATOMIC_INIT(0);
+
+#ifdef CONFIG_SYSFS
+static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
+{
+ return sysfs_emit(page, "%d\n", atomic_read(&warn_count));
+}
+
+static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count);
+
+static __init int kernel_panic_sysfs_init(void)
+{
+ sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL);
+ return 0;
+}
+late_initcall(kernel_panic_sysfs_init);
+#endif
+
static long no_blink(int state)
{
return 0;
@@ -156,6 +197,19 @@ static void panic_print_sys_info(void)
ftrace_dump(DUMP_ALL);
}
+void check_panic_on_warn(const char *origin)
+{
+ unsigned int limit;
+
+ if (panic_on_warn)
+ panic("%s: panic_on_warn set ...\n", origin);
+
+ limit = READ_ONCE(warn_limit);
+ if (atomic_inc_return(&warn_count) >= limit && limit)
+ panic("%s: system warned too often (kernel.warn_limit is %d)",
+ origin, limit);
+}
+
/**
* panic - halt the system
* @fmt: The text string to print
@@ -173,6 +227,16 @@ void panic(const char *fmt, ...)
int old_cpu, this_cpu;
bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
+ if (panic_on_warn) {
+ /*
+ * This thread may hit another WARN() in the panic path.
+ * Resetting this prevents additional WARN() from panicking the
+ * system on this thread. Other threads are blocked by the
+ * panic_mutex in panic().
+ */
+ panic_on_warn = 0;
+ }
+
/*
* Disable local interrupts. This will prevent panic_smp_self_stop
* from deadlocking the first cpu that invokes the panic, since
@@ -571,16 +635,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
if (args)
vprintk(args->fmt, args->args);
- if (panic_on_warn) {
- /*
- * This thread may hit another WARN() in the panic path.
- * Resetting this prevents additional WARN() from panicking the
- * system on this thread. Other threads are blocked by the
- * panic_mutex in panic().
- */
- panic_on_warn = 0;
- panic("panic_on_warn set ...\n");
- }
+ check_panic_on_warn("kernel");
print_modules();
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 406b4cbbec5e..f8934f9746e6 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -625,7 +625,7 @@ static void power_down(void)
int error;
if (hibernation_mode == HIBERNATION_SUSPEND) {
- error = suspend_devices_and_enter(PM_SUSPEND_MEM);
+ error = suspend_devices_and_enter(mem_sleep_current);
if (error) {
hibernation_mode = hibernation_ops ?
HIBERNATION_PLATFORM :
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 46455aa7951e..336e56e97b8d 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1680,8 +1680,8 @@ static unsigned long minimum_image_size(unsigned long saveable)
* /sys/power/reserved_size, respectively). To make this happen, we compute the
* total number of available page frames and allocate at least
*
- * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
- * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
+ * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
+ * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
*
* of them, which corresponds to the maximum size of a hibernation image.
*
@@ -2377,8 +2377,9 @@ static void *get_highmem_page_buffer(struct page *page,
pbe->copy_page = tmp;
} else {
/* Copy of the page will be stored in normal memory */
- kaddr = safe_pages_list;
- safe_pages_list = safe_pages_list->next;
+ kaddr = __get_safe_page(ca->gfp_mask);
+ if (!kaddr)
+ return ERR_PTR(-ENOMEM);
pbe->copy_page = virt_to_page(kaddr);
}
pbe->next = highmem_pblist;
@@ -2558,8 +2559,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
return ERR_PTR(-ENOMEM);
}
pbe->orig_address = page_address(page);
- pbe->address = safe_pages_list;
- safe_pages_list = safe_pages_list->next;
+ pbe->address = __get_safe_page(ca->gfp_mask);
+ if (!pbe->address)
+ return ERR_PTR(-ENOMEM);
pbe->next = restore_pblist;
restore_pblist = pbe;
return pbe->address;
@@ -2590,8 +2592,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
return 0;
- handle->sync_read = 1;
-
if (!handle->cur) {
if (!buffer)
/* This makes the buffer be freed by swsusp_free() */
@@ -2632,7 +2632,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
memory_bm_position_reset(&orig_bm);
restore_pblist = NULL;
handle->buffer = get_buffer(&orig_bm, &ca);
- handle->sync_read = 0;
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
}
@@ -2644,9 +2643,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
- if (handle->buffer != buffer)
- handle->sync_read = 0;
}
+ handle->sync_read = (handle->buffer == buffer);
handle->cur++;
return PAGE_SIZE;
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index bcc9769e8a3b..85949b86f097 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -594,11 +594,11 @@ static int crc32_threadfn(void *data)
unsigned i;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@@ -607,7 +607,7 @@ static int crc32_threadfn(void *data)
for (i = 0; i < d->run_threads; i++)
*d->crc32 = crc32_le(*d->crc32,
d->unc[i], *d->unc_len[i]);
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@@ -637,12 +637,12 @@ static int lzo_compress_threadfn(void *data)
struct cmp_data *d = data;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@@ -651,7 +651,7 @@ static int lzo_compress_threadfn(void *data)
d->ret = lzo1x_1_compress(d->unc, d->unc_len,
d->cmp + LZO_HEADER, &d->cmp_len,
d->wrk);
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@@ -789,7 +789,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
data[thr].unc_len = off;
- atomic_set(&data[thr].ready, 1);
+ atomic_set_release(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
@@ -797,12 +797,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
break;
crc->run_threads = thr;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
- atomic_read(&data[thr].stop));
+ atomic_read_acquire(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
@@ -841,7 +841,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
}
}
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
}
@@ -1121,12 +1121,12 @@ static int lzo_decompress_threadfn(void *data)
struct dec_data *d = data;
while (1) {
- wait_event(d->go, atomic_read(&d->ready) ||
+ wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@@ -1139,7 +1139,7 @@ static int lzo_decompress_threadfn(void *data)
flush_icache_range((unsigned long)d->unc,
(unsigned long)d->unc + d->unc_len);
- atomic_set(&d->stop, 1);
+ atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@@ -1327,7 +1327,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
if (crc->run_threads) {
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
crc->run_threads = 0;
}
@@ -1363,7 +1363,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
pg = 0;
}
- atomic_set(&data[thr].ready, 1);
+ atomic_set_release(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
@@ -1382,7 +1382,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
- atomic_read(&data[thr].stop));
+ atomic_read_acquire(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
@@ -1413,7 +1413,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
ret = snapshot_write_next(snapshot);
if (ret <= 0) {
crc->run_threads = thr + 1;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
goto out_finish;
}
@@ -1421,13 +1421,13 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
crc->run_threads = thr;
- atomic_set(&crc->ready, 1);
+ atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
}
out_finish:
if (crc->run_threads) {
- wait_event(crc->done, atomic_read(&crc->stop));
+ wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
}
stop = ktime_get();
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 77438954cc2b..672d4e28fa8a 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -26,6 +26,7 @@
#include "power.h"
+static bool need_wait;
#define SNAPSHOT_MINOR 231
@@ -79,7 +80,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
* Resuming. We may need to wait for the image device to
* appear.
*/
- wait_for_device_probe();
+ need_wait = true;
data->swap = -1;
data->mode = O_WRONLY;
@@ -171,6 +172,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
+ if (need_wait) {
+ wait_for_device_probe();
+ need_wait = false;
+ }
+
lock_system_sleep();
data = filp->private_data;
@@ -206,6 +212,11 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
loff_t size;
sector_t offset;
+ if (need_wait) {
+ wait_for_device_probe();
+ need_wait = false;
+ }
+
if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
diff --git a/kernel/profile.c b/kernel/profile.c
index e97e42aaf202..b5ce18b6f1b9 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -109,6 +109,13 @@ int __ref profile_init(void)
/* only text is profiled */
prof_len = (_etext - _stext) >> prof_shift;
+
+ if (!prof_len) {
+ pr_warn("profiling shift: %u too large\n", prof_shift);
+ prof_on = 0;
+ return -EINVAL;
+ }
+
buffer_bytes = prof_len*sizeof(atomic_t);
if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 5797cf2909b0..615283404d9d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2317,7 +2317,7 @@ void rcu_force_quiescent_state(void)
struct rcu_node *rnp_old = NULL;
/* Funnel through hierarchy to reduce memory contention. */
- rnp = __this_cpu_read(rcu_data.mynode);
+ rnp = raw_cpu_read(rcu_data.mynode);
for (; rnp != NULL; rnp = rnp->parent) {
ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
!raw_spin_trylock(&rnp->fqslock);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 173e3ce60790..eca3df7f041c 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -523,7 +523,9 @@ static void synchronize_sched_expedited_wait(void)
mask = leaf_node_cpu_bit(rnp, cpu);
if (!(READ_ONCE(rnp->expmask) & mask))
continue;
+ preempt_disable(); // For smp_processor_id() in dump_cpu_task().
dump_cpu_task(cpu);
+ preempt_enable();
}
}
jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index ac19159d7158..a9f23d91025d 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -64,6 +64,7 @@ EXPORT_SYMBOL_GPL(pm_power_off_prepare);
void emergency_restart(void)
{
kmsg_dump(KMSG_DUMP_EMERG);
+ system_state = SYSTEM_RESTART;
machine_emergency_restart();
}
EXPORT_SYMBOL_GPL(emergency_restart);
diff --git a/kernel/relay.c b/kernel/relay.c
index d3940becf2fc..1e11199c7d7c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -163,13 +163,13 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
{
struct rchan_buf *buf;
- if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
+ if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t))
return NULL;
buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
if (!buf)
return NULL;
- buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *),
+ buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t),
GFP_KERNEL);
if (!buf->padding)
goto free_buf;
@@ -997,14 +997,14 @@ static void relay_file_read_consume(struct rchan_buf *buf,
/*
* relay_file_read_avail - boolean, are there unconsumed bytes available?
*/
-static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
+static int relay_file_read_avail(struct rchan_buf *buf)
{
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
size_t produced = buf->subbufs_produced;
size_t consumed = buf->subbufs_consumed;
- relay_file_read_consume(buf, read_pos, 0);
+ relay_file_read_consume(buf, 0, 0);
consumed = buf->subbufs_consumed;
@@ -1065,23 +1065,21 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos,
/**
* relay_file_read_start_pos - find the first available byte to read
- * @read_pos: file read position
* @buf: relay channel buffer
*
- * If the @read_pos is in the middle of padding, return the
+ * If the read_pos is in the middle of padding, return the
* position of the first actually available byte, otherwise
* return the original value.
*/
-static size_t relay_file_read_start_pos(size_t read_pos,
- struct rchan_buf *buf)
+static size_t relay_file_read_start_pos(struct rchan_buf *buf)
{
size_t read_subbuf, padding, padding_start, padding_end;
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
size_t consumed = buf->subbufs_consumed % n_subbufs;
+ size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
+ % (n_subbufs * subbuf_size);
- if (!read_pos)
- read_pos = consumed * subbuf_size + buf->bytes_consumed;
read_subbuf = read_pos / subbuf_size;
padding = buf->padding[read_subbuf];
padding_start = (read_subbuf + 1) * subbuf_size - padding;
@@ -1137,10 +1135,10 @@ static ssize_t relay_file_read(struct file *filp,
do {
void *from;
- if (!relay_file_read_avail(buf, *ppos))
+ if (!relay_file_read_avail(buf))
break;
- read_start = relay_file_read_start_pos(*ppos, buf);
+ read_start = relay_file_read_start_pos(buf);
avail = relay_file_read_subbuf_avail(read_start, buf);
if (!avail)
break;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5befdecefe94..51ac62637e4e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1411,6 +1411,9 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
+ if (task_on_rq_migrating(p))
+ flags |= ENQUEUE_MIGRATED;
+
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
@@ -3964,8 +3967,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
print_ip_sym(preempt_disable_ip);
pr_cont("\n");
}
- if (panic_on_warn)
- panic("scheduling while atomic\n");
+ check_panic_on_warn("scheduling while atomic");
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
@@ -4554,20 +4556,21 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
if (!dl_prio(p->normal_prio) ||
(pi_task && dl_prio(pi_task->prio) &&
dl_entity_preempt(&pi_task->dl, &p->dl))) {
- p->dl.dl_boosted = 1;
+ p->dl.pi_se = pi_task->dl.pi_se;
queue_flag |= ENQUEUE_REPLENISH;
- } else
- p->dl.dl_boosted = 0;
+ } else {
+ p->dl.pi_se = &p->dl;
+ }
p->sched_class = &dl_sched_class;
} else if (rt_prio(prio)) {
if (dl_prio(oldprio))
- p->dl.dl_boosted = 0;
+ p->dl.pi_se = &p->dl;
if (oldprio < prio)
queue_flag |= ENQUEUE_HEAD;
p->sched_class = &rt_sched_class;
} else {
if (dl_prio(oldprio))
- p->dl.dl_boosted = 0;
+ p->dl.pi_se = &p->dl;
if (rt_prio(oldprio))
p->rt.timeout = 0;
p->sched_class = &fair_sched_class;
@@ -5658,14 +5661,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
unsigned int retlen = min(len, cpumask_size());
- if (copy_to_user(user_mask_ptr, mask, retlen))
+ if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
ret = -EFAULT;
else
ret = retlen;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2bda9fdba31c..ba3d7c223999 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -43,6 +43,28 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
+#ifdef CONFIG_RT_MUTEXES
+static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
+{
+ return dl_se->pi_se;
+}
+
+static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
+{
+ return pi_of(dl_se) != dl_se;
+}
+#else
+static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
+{
+ return dl_se;
+}
+
+static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_SMP
static inline struct dl_bw *dl_bw_of(int i)
{
@@ -657,7 +679,7 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
- WARN_ON(dl_se->dl_boosted);
+ WARN_ON(is_dl_boosted(dl_se));
WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
/*
@@ -695,21 +717,20 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
* could happen are, typically, a entity voluntarily trying to overcome its
* runtime, or it just underestimated it during sched_setattr().
*/
-static void replenish_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se)
+static void replenish_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
- BUG_ON(pi_se->dl_runtime <= 0);
+ BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
*/
if (dl_se->dl_deadline == 0) {
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
}
if (dl_se->dl_yielded && dl_se->runtime > 0)
@@ -722,8 +743,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
* arbitrary large.
*/
while (dl_se->runtime <= 0) {
- dl_se->deadline += pi_se->dl_period;
- dl_se->runtime += pi_se->dl_runtime;
+ dl_se->deadline += pi_of(dl_se)->dl_period;
+ dl_se->runtime += pi_of(dl_se)->dl_runtime;
}
/*
@@ -737,8 +758,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
}
if (dl_se->dl_yielded)
@@ -771,8 +792,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
* task with deadline equal to period this is the same of using
* dl_period instead of dl_deadline in the equation above.
*/
-static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se, u64 t)
+static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
{
u64 left, right;
@@ -794,9 +814,9 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
- left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+ left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
- (pi_se->dl_runtime >> DL_SCALE);
+ (pi_of(dl_se)->dl_runtime >> DL_SCALE);
return dl_time_before(right, left);
}
@@ -881,24 +901,23 @@ static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
* Please refer to the comments update_dl_revised_wakeup() function to find
* more about the Revised CBS rule.
*/
-static void update_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se)
+static void update_dl_entity(struct sched_dl_entity *dl_se)
{
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
- dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
+ dl_entity_overflow(dl_se, rq_clock(rq))) {
if (unlikely(!dl_is_implicit(dl_se) &&
!dl_time_before(dl_se->deadline, rq_clock(rq)) &&
- !dl_se->dl_boosted)){
+ !is_dl_boosted(dl_se))) {
update_dl_revised_wakeup(dl_se, rq);
return;
}
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
}
}
@@ -997,7 +1016,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
* The task might have been boosted by someone else and might be in the
* boosting/deboosting path, its not throttled.
*/
- if (dl_se->dl_boosted)
+ if (is_dl_boosted(dl_se))
goto unlock;
/*
@@ -1025,7 +1044,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
* but do not enqueue -- wait for our wakeup to do that.
*/
if (!task_on_rq_queued(p)) {
- replenish_dl_entity(dl_se, dl_se);
+ replenish_dl_entity(dl_se);
goto unlock;
}
@@ -1115,7 +1134,7 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
- if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
+ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
return;
dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
@@ -1246,7 +1265,7 @@ throttle:
dl_se->dl_overrun = 1;
__dequeue_task_dl(rq, curr, 0);
- if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
+ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
if (!is_leftmost(curr, &rq->dl))
@@ -1440,8 +1459,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
}
static void
-enqueue_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se, int flags)
+enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
{
BUG_ON(on_dl_rq(dl_se));
@@ -1452,9 +1470,9 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
*/
if (flags & ENQUEUE_WAKEUP) {
task_contending(dl_se, flags);
- update_dl_entity(dl_se, pi_se);
+ update_dl_entity(dl_se);
} else if (flags & ENQUEUE_REPLENISH) {
- replenish_dl_entity(dl_se, pi_se);
+ replenish_dl_entity(dl_se);
} else if ((flags & ENQUEUE_RESTORE) &&
dl_time_before(dl_se->deadline,
rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
@@ -1471,28 +1489,40 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
- struct task_struct *pi_task = rt_mutex_get_top_task(p);
- struct sched_dl_entity *pi_se = &p->dl;
-
- /*
- * Use the scheduling parameters of the top pi-waiter task if:
- * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
- * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
- * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
- * boosted due to a SCHED_DEADLINE pi-waiter).
- * Otherwise we keep our runtime and deadline.
- */
- if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
- pi_se = &pi_task->dl;
+ if (is_dl_boosted(&p->dl)) {
+ /*
+ * Because of delays in the detection of the overrun of a
+ * thread's runtime, it might be the case that a thread
+ * goes to sleep in a rt mutex with negative runtime. As
+ * a consequence, the thread will be throttled.
+ *
+ * While waiting for the mutex, this thread can also be
+ * boosted via PI, resulting in a thread that is throttled
+ * and boosted at the same time.
+ *
+ * In this case, the boost overrides the throttle.
+ */
+ if (p->dl.dl_throttled) {
+ /*
+ * The replenish timer needs to be canceled. No
+ * problem if it fires concurrently: boosted threads
+ * are ignored in dl_task_timer().
+ */
+ hrtimer_try_to_cancel(&p->dl.dl_timer);
+ p->dl.dl_throttled = 0;
+ }
} else if (!dl_prio(p->normal_prio)) {
/*
- * Special case in which we have a !SCHED_DEADLINE task
- * that is going to be deboosted, but exceeds its
- * runtime while doing so. No point in replenishing
- * it, as it's going to return back to its original
- * scheduling class after this.
+ * Special case in which we have a !SCHED_DEADLINE task that is going
+ * to be deboosted, but exceeds its runtime while doing so. No point in
+ * replenishing it, as it's going to return back to its original
+ * scheduling class after this. If it has been throttled, we need to
+ * clear the flag, otherwise the task may wake up as throttled after
+ * being boosted again with no means to replenish the runtime and clear
+ * the throttle.
*/
- BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
+ p->dl.dl_throttled = 0;
+ BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
return;
}
@@ -1529,7 +1559,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
return;
}
- enqueue_dl_entity(&p->dl, pi_se, flags);
+ enqueue_dl_entity(&p->dl, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
@@ -1763,8 +1793,7 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
deadline_queue_push_tasks(rq);
}
-static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
- struct dl_rq *dl_rq)
+static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
{
struct rb_node *left = rb_first_cached(&dl_rq->root);
@@ -1786,7 +1815,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (!sched_dl_runnable(rq))
return NULL;
- dl_se = pick_next_dl_entity(rq, dl_rq);
+ dl_se = pick_next_dl_entity(dl_rq);
BUG_ON(!dl_se);
p = dl_task_of(dl_se);
set_next_task_dl(rq, p, true);
@@ -2698,11 +2727,14 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_bw = 0;
dl_se->dl_density = 0;
- dl_se->dl_boosted = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
dl_se->dl_overrun = 0;
+
+#ifdef CONFIG_RT_MUTEXES
+ dl_se->pi_se = dl_se;
+#endif
}
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d2a68ae7596e..2680216234ff 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3877,6 +3877,29 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
+static inline bool entity_is_long_sleeper(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq;
+ u64 sleep_time;
+
+ if (se->exec_start == 0)
+ return false;
+
+ cfs_rq = cfs_rq_of(se);
+
+ sleep_time = rq_clock_task(rq_of(cfs_rq));
+
+ /* Happen while migrating because of clock task divergence */
+ if (sleep_time <= se->exec_start)
+ return false;
+
+ sleep_time -= se->exec_start;
+ if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
+ return true;
+
+ return false;
+}
+
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
@@ -3905,8 +3928,29 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
vruntime -= thresh;
}
- /* ensure we never gain time by being placed backwards. */
- se->vruntime = max_vruntime(se->vruntime, vruntime);
+ /*
+ * Pull vruntime of the entity being placed to the base level of
+ * cfs_rq, to prevent boosting it if placed backwards.
+ * However, min_vruntime can advance much faster than real time, with
+ * the extreme being when an entity with the minimal weight always runs
+ * on the cfs_rq. If the waking entity slept for a long time, its
+ * vruntime difference from min_vruntime may overflow s64 and their
+ * comparison may get inversed, so ignore the entity's original
+ * vruntime in that case.
+ * The maximal vruntime speedup is given by the ratio of normal to
+ * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
+ * When placing a migrated waking entity, its exec_start has been set
+ * from a different rq. In order to take into account a possible
+ * divergence between new and prev rq's clocks task because of irq and
+ * stolen time, we take an additional margin.
+ * So, cutting off on the sleep time of
+ * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
+ * should be safe.
+ */
+ if (entity_is_long_sleeper(se))
+ se->vruntime = vruntime;
+ else
+ se->vruntime = max_vruntime(se->vruntime, vruntime);
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -4002,6 +4046,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+ /* Entity has migrated, no longer consider this task hot */
+ if (flags & ENQUEUE_MIGRATED)
+ se->exec_start = 0;
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
@@ -6627,9 +6674,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
/* Tell new CPU we are migrated */
p->se.avg.last_update_time = 0;
- /* We have migrated, no longer consider this task hot */
- p->se.exec_start = 0;
-
update_scan_period(p, new_cpu);
}
@@ -8894,7 +8938,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.sd = sd,
.dst_cpu = this_cpu,
.dst_rq = this_rq,
- .dst_grpmask = sched_group_span(sd->groups),
+ .dst_grpmask = group_balance_mask(sd->groups),
.idle = idle,
.loop_break = sched_nr_migrate_break,
.cpus = cpus,
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 46c142b69598..ca52041d5899 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -25,6 +25,8 @@
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
+static DEFINE_MUTEX(membarrier_ipi_mutex);
+
static void ipi_mb(void *info)
{
smp_mb(); /* IPIs should be serializing but paranoid. */
@@ -97,6 +99,7 @@ static int membarrier_global_expedited(void)
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
+ mutex_lock(&membarrier_ipi_mutex);
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
@@ -143,6 +146,8 @@ static int membarrier_global_expedited(void)
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
+ mutex_unlock(&membarrier_ipi_mutex);
+
return 0;
}
@@ -178,6 +183,7 @@ static int membarrier_private_expedited(int flags)
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
+ mutex_lock(&membarrier_ipi_mutex);
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
@@ -212,6 +218,7 @@ static int membarrier_private_expedited(int flags)
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
+ mutex_unlock(&membarrier_ipi_mutex);
return 0;
}
@@ -253,6 +260,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
* between threads which are users of @mm has its membarrier state
* updated.
*/
+ mutex_lock(&membarrier_ipi_mutex);
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
@@ -269,6 +277,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
free_cpumask_var(tmpmask);
cpus_read_unlock();
+ mutex_unlock(&membarrier_ipi_mutex);
return 0;
}
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 9dd83eb74a9d..8c19a5438d0e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1092,10 +1092,11 @@ void psi_trigger_destroy(struct psi_trigger *t)
group = t->group;
/*
- * Wakeup waiters to stop polling. Can happen if cgroup is deleted
- * from under a polling process.
+ * Wakeup waiters to stop polling and clear the queue to prevent it from
+ * being accessed later. Can happen if cgroup is deleted from under a
+ * polling process.
*/
- wake_up_interruptible(&t->event_wait);
+ wake_up_pollfree(&t->event_wait);
mutex_lock(&group->trigger_lock);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 28c82dee13ea..873f364cb3c6 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -8,7 +8,7 @@
#include "pelt.h"
int sched_rr_timeslice = RR_TIMESLICE;
-int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
+int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
/* More than 4 hours if BW_SHIFT equals 20. */
static const u64 max_rt_runtime = MAX_BW;
@@ -437,7 +437,7 @@ static inline void rt_queue_push_tasks(struct rq *rq)
#endif /* CONFIG_SMP */
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
-static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
+static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
@@ -519,7 +519,7 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
rt_se = rt_rq->tg->rt_se[cpu];
if (!rt_se) {
- dequeue_top_rt_rq(rt_rq);
+ dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
}
@@ -605,7 +605,7 @@ static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
- dequeue_top_rt_rq(rt_rq);
+ dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
@@ -1004,7 +1004,7 @@ static void update_curr_rt(struct rq *rq)
}
static void
-dequeue_top_rt_rq(struct rt_rq *rt_rq)
+dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
@@ -1015,7 +1015,7 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
BUG_ON(!rq->nr_running);
- sub_nr_running(rq, rt_rq->rt_nr_running);
+ sub_nr_running(rq, count);
rt_rq->rt_queued = 0;
}
@@ -1294,18 +1294,21 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flag
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct sched_rt_entity *back = NULL;
+ unsigned int rt_nr_running;
for_each_sched_rt_entity(rt_se) {
rt_se->back = back;
back = rt_se;
}
- dequeue_top_rt_rq(rt_rq_of_se(back));
+ rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se))
__dequeue_rt_entity(rt_se, flags);
}
+
+ dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
@@ -1547,8 +1550,7 @@ static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool f
rt_queue_push_tasks(rq);
}
-static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
- struct rt_rq *rt_rq)
+static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
{
struct rt_prio_array *array = &rt_rq->active;
struct sched_rt_entity *next = NULL;
@@ -1559,6 +1561,8 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
BUG_ON(idx >= MAX_RT_PRIO);
queue = array->queue + idx;
+ if (SCHED_WARN_ON(list_empty(queue)))
+ return NULL;
next = list_entry(queue->next, struct sched_rt_entity, run_list);
return next;
@@ -1570,8 +1574,9 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
struct rt_rq *rt_rq = &rq->rt;
do {
- rt_se = pick_next_rt_entity(rq, rt_rq);
- BUG_ON(!rt_se);
+ rt_se = pick_next_rt_entity(rt_rq);
+ if (unlikely(!rt_se))
+ return NULL;
rt_rq = group_rt_rq(rt_se);
} while (rt_rq);
@@ -2654,9 +2659,6 @@ static int sched_rt_global_constraints(void)
static int sched_rt_global_validate(void)
{
- if (sysctl_sched_rt_period <= 0)
- return -EINVAL;
-
if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
((u64)sysctl_sched_rt_runtime *
@@ -2688,7 +2690,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_validate();
@@ -2733,6 +2735,9 @@ int sched_rr_handler(struct ctl_table *table, int write,
sched_rr_timeslice =
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
msecs_to_jiffies(sysctl_sched_rr_timeslice);
+
+ if (sysctl_sched_rr_timeslice <= 0)
+ sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
}
mutex_unlock(&mutex);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 7d668b31dbc6..c76fe1d4d91e 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -384,7 +384,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
int ret = default_wake_function(wq_entry, mode, sync, key);
if (ret)
- list_del_init(&wq_entry->entry);
+ list_del_init_careful(&wq_entry->entry);
return ret;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index b075fe84eb5a..23e88587df87 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1534,6 +1534,8 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
if (resource >= RLIM_NLIMITS)
return -EINVAL;
+ resource = array_index_nospec(resource, RLIM_NLIMITS);
+
if (new_rlim) {
if (new_rlim->rlim_cur > new_rlim->rlim_max)
return -EINVAL;
@@ -1709,74 +1711,87 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
struct task_struct *t;
unsigned long flags;
u64 tgutime, tgstime, utime, stime;
- unsigned long maxrss = 0;
+ unsigned long maxrss;
+ struct mm_struct *mm;
+ struct signal_struct *sig = p->signal;
+ unsigned int seq = 0;
- memset((char *)r, 0, sizeof (*r));
+retry:
+ memset(r, 0, sizeof(*r));
utime = stime = 0;
+ maxrss = 0;
if (who == RUSAGE_THREAD) {
task_cputime_adjusted(current, &utime, &stime);
accumulate_thread_rusage(p, r);
- maxrss = p->signal->maxrss;
- goto out;
+ maxrss = sig->maxrss;
+ goto out_thread;
}
- if (!lock_task_sighand(p, &flags))
- return;
+ flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
switch (who) {
case RUSAGE_BOTH:
case RUSAGE_CHILDREN:
- utime = p->signal->cutime;
- stime = p->signal->cstime;
- r->ru_nvcsw = p->signal->cnvcsw;
- r->ru_nivcsw = p->signal->cnivcsw;
- r->ru_minflt = p->signal->cmin_flt;
- r->ru_majflt = p->signal->cmaj_flt;
- r->ru_inblock = p->signal->cinblock;
- r->ru_oublock = p->signal->coublock;
- maxrss = p->signal->cmaxrss;
+ utime = sig->cutime;
+ stime = sig->cstime;
+ r->ru_nvcsw = sig->cnvcsw;
+ r->ru_nivcsw = sig->cnivcsw;
+ r->ru_minflt = sig->cmin_flt;
+ r->ru_majflt = sig->cmaj_flt;
+ r->ru_inblock = sig->cinblock;
+ r->ru_oublock = sig->coublock;
+ maxrss = sig->cmaxrss;
if (who == RUSAGE_CHILDREN)
break;
/* fall through */
case RUSAGE_SELF:
- thread_group_cputime_adjusted(p, &tgutime, &tgstime);
- utime += tgutime;
- stime += tgstime;
- r->ru_nvcsw += p->signal->nvcsw;
- r->ru_nivcsw += p->signal->nivcsw;
- r->ru_minflt += p->signal->min_flt;
- r->ru_majflt += p->signal->maj_flt;
- r->ru_inblock += p->signal->inblock;
- r->ru_oublock += p->signal->oublock;
- if (maxrss < p->signal->maxrss)
- maxrss = p->signal->maxrss;
- t = p;
- do {
+ r->ru_nvcsw += sig->nvcsw;
+ r->ru_nivcsw += sig->nivcsw;
+ r->ru_minflt += sig->min_flt;
+ r->ru_majflt += sig->maj_flt;
+ r->ru_inblock += sig->inblock;
+ r->ru_oublock += sig->oublock;
+ if (maxrss < sig->maxrss)
+ maxrss = sig->maxrss;
+
+ rcu_read_lock();
+ __for_each_thread(sig, t)
accumulate_thread_rusage(t, r);
- } while_each_thread(p, t);
+ rcu_read_unlock();
+
break;
default:
BUG();
}
- unlock_task_sighand(p, &flags);
-out:
- r->ru_utime = ns_to_timeval(utime);
- r->ru_stime = ns_to_timeval(stime);
+ if (need_seqretry(&sig->stats_lock, seq)) {
+ seq = 1;
+ goto retry;
+ }
+ done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
- if (who != RUSAGE_CHILDREN) {
- struct mm_struct *mm = get_task_mm(p);
+ if (who == RUSAGE_CHILDREN)
+ goto out_children;
- if (mm) {
- setmax_mm_hiwater_rss(&maxrss, mm);
- mmput(mm);
- }
+ thread_group_cputime_adjusted(p, &tgutime, &tgstime);
+ utime += tgutime;
+ stime += tgstime;
+
+out_thread:
+ mm = get_task_mm(p);
+ if (mm) {
+ setmax_mm_hiwater_rss(&maxrss, mm);
+ mmput(mm);
}
+
+out_children:
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
+ r->ru_utime = ns_to_kernel_old_timeval(utime);
+ r->ru_stime = ns_to_kernel_old_timeval(stime);
}
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 34b76895b81e..189eed03e4e3 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -268,6 +268,7 @@ COND_SYSCALL_COMPAT(keyctl);
/* mm/fadvise.c */
COND_SYSCALL(fadvise64_64);
+COND_SYSCALL_COMPAT(fadvise64_64);
/* mm/, CONFIG_MMU only */
COND_SYSCALL(swapon);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6f971807bf79..865e539c7354 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -465,6 +465,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_rt_handler,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_INT_MAX,
},
{
.procname = "sched_rt_runtime_us",
@@ -472,6 +474,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = sched_rt_handler,
+ .extra1 = &neg_one,
+ .extra2 = SYSCTL_INT_MAX,
},
{
.procname = "sched_rr_timeslice_ms",
@@ -1563,6 +1567,14 @@ static struct ctl_table vm_table[] = {
.proc_handler = percpu_pagelist_fraction_sysctl_handler,
.extra1 = SYSCTL_ZERO,
},
+ {
+ .procname = "page_lock_unfairness",
+ .data = &sysctl_page_lock_unfairness,
+ .maxlen = sizeof(sysctl_page_lock_unfairness),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
#ifdef CONFIG_MMU
{
.procname = "max_map_count",
@@ -2156,13 +2168,14 @@ int proc_dostring(struct ctl_table *table, int write,
(char __user *)buffer, lenp, ppos);
}
-static size_t proc_skip_spaces(char **buf)
+static void proc_skip_spaces(char **buf, size_t *size)
{
- size_t ret;
- char *tmp = skip_spaces(*buf);
- ret = tmp - *buf;
- *buf = tmp;
- return ret;
+ while (*size) {
+ if (!isspace(**buf))
+ break;
+ (*size)--;
+ (*buf)++;
+ }
}
static void proc_skip_char(char **buf, size_t *size, const char v)
@@ -2231,13 +2244,12 @@ static int proc_get_long(char **buf, size_t *size,
unsigned long *val, bool *neg,
const char *perm_tr, unsigned perm_tr_len, char *tr)
{
- int len;
char *p, tmp[TMPBUFLEN];
+ ssize_t len = *size;
- if (!*size)
+ if (len <= 0)
return -EINVAL;
- len = *size;
if (len > TMPBUFLEN - 1)
len = TMPBUFLEN - 1;
@@ -2400,7 +2412,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
bool neg;
if (write) {
- left -= proc_skip_spaces(&p);
+ proc_skip_spaces(&p, &left);
if (!left)
break;
@@ -2431,7 +2443,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
if (!write && !first && left && !err)
err = proc_put_char(&buffer, &left, '\n');
if (write && !err && left)
- left -= proc_skip_spaces(&p);
+ proc_skip_spaces(&p, &left);
if (write) {
kfree(kbuf);
if (first)
@@ -2480,7 +2492,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data,
if (IS_ERR(kbuf))
return -EINVAL;
- left -= proc_skip_spaces(&p);
+ proc_skip_spaces(&p, &left);
if (!left) {
err = -EINVAL;
goto out_free;
@@ -2500,7 +2512,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data,
}
if (!err && left)
- left -= proc_skip_spaces(&p);
+ proc_skip_spaces(&p, &left);
out_free:
kfree(kbuf);
@@ -2914,7 +2926,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
if (write) {
bool neg;
- left -= proc_skip_spaces(&p);
+ proc_skip_spaces(&p, &left);
if (!left)
break;
@@ -2947,7 +2959,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
if (!write && !first && left && !err)
err = proc_put_char(&buffer, &left, '\n');
if (write && !err)
- left -= proc_skip_spaces(&p);
+ proc_skip_spaces(&p, &left);
if (write) {
kfree(kbuf);
if (first)
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 0e96c38204a8..9bd4e492823b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -479,11 +479,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(alarm_forward);
-u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
+static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle)
{
struct alarm_base *base = &alarm_bases[alarm->type];
+ ktime_t now = base->gettime();
+
+ if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) {
+ /*
+ * Same issue as with posix_timer_fn(). Timers which are
+ * periodic but the signal is ignored can starve the system
+ * with a very small interval. The real fix which was
+ * promised in the context of posix_timer_fn() never
+ * materialized, but someone should really work on it.
+ *
+ * To prevent DOS fake @now to be 1 jiffie out which keeps
+ * the overrun accounting correct but creates an
+ * inconsistency vs. timer_gettime(2).
+ */
+ ktime_t kj = NSEC_PER_SEC / HZ;
+
+ if (interval < kj)
+ now = ktime_add(now, kj);
+ }
+
+ return alarm_forward(alarm, now, interval);
+}
- return alarm_forward(alarm, base->gettime(), interval);
+u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
+{
+ return __alarm_forward_now(alarm, interval, false);
}
EXPORT_SYMBOL_GPL(alarm_forward_now);
@@ -557,9 +581,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
/*
* Handle ignored signals and rearm the timer. This will go
- * away once we handle ignored signals proper.
+ * away once we handle ignored signals proper. Ensure that
+ * small intervals cannot starve the system.
*/
- ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval);
+ ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true);
++ptr->it_requeue_pending;
ptr->it_active = 1;
result = ALARMTIMER_RESTART;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e1e8d5dab0c5..1b301dd1692b 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -981,6 +981,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
enum hrtimer_mode mode)
{
debug_activate(timer, mode);
+ WARN_ON_ONCE(!base->cpu_base->online);
base->cpu_base->active_bases |= 1 << base->index;
@@ -2020,6 +2021,7 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
if (!timespec64_valid(&tu))
return -EINVAL;
+ current->restart_block.fn = do_no_restart_syscall;
current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
current->restart_block.nanosleep.rmtp = rmtp;
return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
@@ -2040,6 +2042,7 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
if (!timespec64_valid(&tu))
return -EINVAL;
+ current->restart_block.fn = do_no_restart_syscall;
current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
current->restart_block.nanosleep.compat_rmtp = rmtp;
return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
@@ -2067,6 +2070,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
cpu_base->softirq_next_timer = NULL;
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
+ cpu_base->online = 1;
hrtimer_cpu_base_init_expiry_lock(cpu_base);
return 0;
}
@@ -2103,29 +2107,22 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
}
}
-int hrtimers_dead_cpu(unsigned int scpu)
+int hrtimers_cpu_dying(unsigned int dying_cpu)
{
struct hrtimer_cpu_base *old_base, *new_base;
- int i;
+ int i, ncpu = cpumask_first(cpu_active_mask);
- BUG_ON(cpu_online(scpu));
- tick_cancel_sched_timer(scpu);
+ tick_cancel_sched_timer(dying_cpu);
+
+ old_base = this_cpu_ptr(&hrtimer_bases);
+ new_base = &per_cpu(hrtimer_bases, ncpu);
- /*
- * this BH disable ensures that raise_softirq_irqoff() does
- * not wakeup ksoftirqd (and acquire the pi-lock) while
- * holding the cpu_base lock
- */
- local_bh_disable();
- local_irq_disable();
- old_base = &per_cpu(hrtimer_bases, scpu);
- new_base = this_cpu_ptr(&hrtimer_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- raw_spin_lock(&new_base->lock);
- raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&old_base->lock);
+ raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
@@ -2136,15 +2133,14 @@ int hrtimers_dead_cpu(unsigned int scpu)
* The migration might have changed the first expiring softirq
* timer on this CPU. Update it.
*/
- hrtimer_update_softirq_timer(new_base, false);
+ __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
+ /* Tell the other CPU to retrigger the next event */
+ smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
- raw_spin_unlock(&old_base->lock);
raw_spin_unlock(&new_base->lock);
+ old_base->online = 0;
+ raw_spin_unlock(&old_base->lock);
- /* Check, if we got expired work to do */
- __hrtimer_peek_ahead_timers();
- local_irq_enable();
- local_bh_enable();
return 0;
}
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index d23b434c2ca7..eddcf4970444 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -58,7 +58,8 @@ static struct clocksource clocksource_jiffies = {
.max_cycles = 10,
};
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
+__cacheline_aligned_in_smp seqcount_t jiffies_seq;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
@@ -67,9 +68,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
ret = jiffies_64;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 67df65f887ac..8ddb35d9779c 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -142,6 +142,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
return -EINVAL;
if (flags & TIMER_ABSTIME)
rmtp = NULL;
+ current->restart_block.fn = do_no_restart_syscall;
current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
current->restart_block.nanosleep.rmtp = rmtp;
return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ?
@@ -228,6 +229,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
return -EINVAL;
if (flags & TIMER_ABSTIME)
rmtp = NULL;
+ current->restart_block.fn = do_no_restart_syscall;
current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
current->restart_block.nanosleep.compat_rmtp = rmtp;
return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ?
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 97d4a9dcf339..f3b8313475ac 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -138,25 +138,30 @@ static struct k_itimer *posix_timer_by_id(timer_t id)
static int posix_timer_add(struct k_itimer *timer)
{
struct signal_struct *sig = current->signal;
- int first_free_id = sig->posix_timer_id;
struct hlist_head *head;
- int ret = -ENOENT;
+ unsigned int cnt, id;
- do {
+ /*
+ * FIXME: Replace this by a per signal struct xarray once there is
+ * a plan to handle the resulting CRIU regression gracefully.
+ */
+ for (cnt = 0; cnt <= INT_MAX; cnt++) {
spin_lock(&hash_lock);
- head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
- if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
+ id = sig->next_posix_timer_id;
+
+ /* Write the next ID back. Clamp it to the positive space */
+ sig->next_posix_timer_id = (id + 1) & INT_MAX;
+
+ head = &posix_timers_hashtable[hash(sig, id)];
+ if (!__posix_timers_find(head, sig, id)) {
hlist_add_head_rcu(&timer->t_hash, head);
- ret = sig->posix_timer_id;
+ spin_unlock(&hash_lock);
+ return id;
}
- if (++sig->posix_timer_id < 0)
- sig->posix_timer_id = 0;
- if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
- /* Loop over all possible ids completed */
- ret = -EAGAIN;
spin_unlock(&hash_lock);
- } while (ret == -ENOENT);
- return ret;
+ }
+ /* POSIX return code when no timer ID could be allocated */
+ return -EAGAIN;
}
static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
@@ -1224,6 +1229,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
return -EINVAL;
if (flags & TIMER_ABSTIME)
rmtp = NULL;
+ current->restart_block.fn = do_no_restart_syscall;
current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
current->restart_block.nanosleep.rmtp = rmtp;
@@ -1251,6 +1257,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
return -EINVAL;
if (flags & TIMER_ABSTIME)
rmtp = NULL;
+ current->restart_block.fn = do_no_restart_syscall;
current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
current->restart_block.nanosleep.compat_rmtp = rmtp;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index e51778c312f1..ce7339ff10d2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -331,7 +331,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
bc_local = tick_do_periodic_broadcast();
if (clockevent_state_oneshot(dev)) {
- ktime_t next = ktime_add(dev->next_event, tick_period);
+ ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC);
clockevents_program_event(dev, next, true);
}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 7e5d3524e924..e883d12dcb0d 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -30,7 +30,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
* Tick next event: keeps track of the tick time
*/
ktime_t tick_next_period;
-ktime_t tick_period;
/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
@@ -84,13 +83,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
/* Keep track of the next tick event */
- tick_next_period = ktime_add(tick_next_period, tick_period);
+ tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
do_timer(1);
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
@@ -125,7 +126,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
* Setup the next period for devices, which do not have
* periodic mode:
*/
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
if (!clockevents_program_event(dev, next, false))
return;
@@ -162,16 +163,16 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
next = tick_next_period;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
for (;;) {
if (!clockevents_program_event(dev, next, false))
return;
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
}
}
}
@@ -216,9 +217,7 @@ static void tick_setup_device(struct tick_device *td,
*/
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
tick_do_timer_cpu = cpu;
-
tick_next_period = ktime_get();
- tick_period = NSEC_PER_SEC / HZ;
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 5294f5b1f955..e61c1244e7d4 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -15,7 +15,6 @@
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
extern ktime_t tick_next_period;
-extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5eb04bb59802..c7b9b1e4af7d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -53,48 +53,69 @@ static ktime_t last_jiffies_update;
*/
static void tick_do_update_jiffies64(ktime_t now)
{
- unsigned long ticks = 0;
+ unsigned long ticks = 1;
ktime_t delta;
/*
- * Do a quick check without holding jiffies_lock:
- * The READ_ONCE() pairs with two updates done later in this function.
+ * Do a quick check without holding jiffies_lock. The READ_ONCE()
+ * pairs with the update done later in this function.
+ *
+ * This is also an intentional data race which is even safe on
+ * 32bit in theory. If there is a concurrent update then the check
+ * might give a random answer. It does not matter because if it
+ * returns then the concurrent update is already taking care, if it
+ * falls through then it will pointlessly contend on jiffies_lock.
+ *
+ * Though there is one nasty case on 32bit due to store tearing of
+ * the 64bit value. If the first 32bit store makes the quick check
+ * return on all other CPUs and the writing CPU context gets
+ * delayed to complete the second store (scheduled out on virt)
+ * then jiffies can become stale for up to ~2^32 nanoseconds
+ * without noticing. After that point all CPUs will wait for
+ * jiffies lock.
+ *
+ * OTOH, this is not any different than the situation with NOHZ=off
+ * where one CPU is responsible for updating jiffies and
+ * timekeeping. If that CPU goes out for lunch then all other CPUs
+ * will operate on stale jiffies until it decides to come back.
*/
- delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
- if (delta < tick_period)
+ if (ktime_before(now, READ_ONCE(tick_next_period)))
return;
/* Reevaluate with jiffies_lock held */
- write_seqlock(&jiffies_lock);
-
- delta = ktime_sub(now, last_jiffies_update);
- if (delta >= tick_period) {
-
- delta = ktime_sub(delta, tick_period);
- /* Pairs with the lockless read in this function. */
- WRITE_ONCE(last_jiffies_update,
- ktime_add(last_jiffies_update, tick_period));
+ raw_spin_lock(&jiffies_lock);
+ if (ktime_before(now, tick_next_period)) {
+ raw_spin_unlock(&jiffies_lock);
+ return;
+ }
- /* Slow path for long timeouts */
- if (unlikely(delta >= tick_period)) {
- s64 incr = ktime_to_ns(tick_period);
+ write_seqcount_begin(&jiffies_seq);
- ticks = ktime_divns(delta, incr);
+ delta = ktime_sub(now, tick_next_period);
+ if (unlikely(delta >= TICK_NSEC)) {
+ /* Slow path for long idle sleep times */
+ s64 incr = TICK_NSEC;
- /* Pairs with the lockless read in this function. */
- WRITE_ONCE(last_jiffies_update,
- ktime_add_ns(last_jiffies_update,
- incr * ticks));
- }
- do_timer(++ticks);
+ ticks += ktime_divns(delta, incr);
- /* Keep the tick_next_period variable up to date */
- tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ incr * ticks);
} else {
- write_sequnlock(&jiffies_lock);
- return;
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ TICK_NSEC);
}
- write_sequnlock(&jiffies_lock);
+
+ do_timer(ticks);
+
+ /*
+ * Keep the tick_next_period variable up to date. WRITE_ONCE()
+ * pairs with the READ_ONCE() in the lockless quick check above.
+ */
+ WRITE_ONCE(tick_next_period,
+ ktime_add_ns(last_jiffies_update, TICK_NSEC));
+
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
@@ -105,12 +126,25 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
/* Did we start the jiffies update yet ? */
- if (last_jiffies_update == 0)
+ if (last_jiffies_update == 0) {
+ u32 rem;
+
+ /*
+ * Ensure that the tick is aligned to a multiple of
+ * TICK_NSEC.
+ */
+ div_u64_rem(tick_next_period, TICK_NSEC, &rem);
+ if (rem)
+ tick_next_period += TICK_NSEC - rem;
+
last_jiffies_update = tick_next_period;
+ }
period = last_jiffies_update;
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
return period;
}
@@ -202,6 +236,11 @@ static bool check_tick_dependency(atomic_t *dep)
return true;
}
+ if (val & TICK_DEP_MASK_RCU) {
+ trace_tick_stop(0, TICK_DEP_MASK_RCU);
+ return true;
+ }
+
return false;
}
@@ -328,6 +367,7 @@ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
@@ -335,6 +375,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
+EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
/*
* Set a per-task tick dependency. Posix CPU timers need this in order to elapse
@@ -402,7 +443,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
tick_nohz_full_running = true;
}
-static int tick_nohz_cpu_down(unsigned int cpu)
+bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
{
/*
* The tick_do_timer_cpu CPU handles housekeeping duty (unbound
@@ -410,8 +451,13 @@ static int tick_nohz_cpu_down(unsigned int cpu)
* CPUs. It must remain online when nohz full is enabled.
*/
if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
- return -EBUSY;
- return 0;
+ return false;
+ return true;
+}
+
+static int tick_nohz_cpu_down(unsigned int cpu)
+{
+ return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
}
void __init tick_nohz_init(void)
@@ -636,7 +682,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
/* Forward the time to expire in the future */
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start_expires(&ts->sched_timer,
@@ -665,10 +711,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
basemono = last_jiffies_update;
basejiff = jiffies;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
ts->last_jiffies = basejiff;
ts->timer_expires_base = basemono;
@@ -1198,7 +1244,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
if (unlikely(ts->tick_stopped))
return;
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}
@@ -1235,7 +1281,7 @@ static void tick_nohz_switch_to_nohz(void)
next = tick_init_jiffy_update();
hrtimer_set_expires(&ts->sched_timer, next);
- hrtimer_forward_now(&ts->sched_timer, tick_period);
+ hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
@@ -1301,7 +1347,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
if (unlikely(ts->tick_stopped))
return HRTIMER_NORESTART;
- hrtimer_forward(timer, now, tick_period);
+ hrtimer_forward(timer, now, TICK_NSEC);
return HRTIMER_RESTART;
}
@@ -1335,13 +1381,13 @@ void tick_setup_sched_timer(void)
/* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
- u64 offset = ktime_to_ns(tick_period) >> 1;
+ u64 offset = TICK_NSEC >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
}
@@ -1351,13 +1397,23 @@ void tick_setup_sched_timer(void)
void tick_cancel_sched_timer(int cpu)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t idle_sleeptime, iowait_sleeptime;
+ unsigned long idle_calls, idle_sleeps;
# ifdef CONFIG_HIGH_RES_TIMERS
if (ts->sched_timer.base)
hrtimer_cancel(&ts->sched_timer);
# endif
+ idle_sleeptime = ts->idle_sleeptime;
+ iowait_sleeptime = ts->iowait_sleeptime;
+ idle_calls = ts->idle_calls;
+ idle_sleeps = ts->idle_sleeps;
memset(ts, 0, sizeof(*ts));
+ ts->idle_sleeptime = idle_sleeptime;
+ ts->iowait_sleeptime = iowait_sleeptime;
+ ts->idle_calls = idle_calls;
+ ts->idle_sleeps = idle_sleeps;
}
#endif
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e23c9e765a5f..c202dbd87860 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -23,6 +23,7 @@
#include <linux/pvclock_gtod.h>
#include <linux/compiler.h>
#include <linux/audit.h>
+#include <linux/random.h>
#include "tick-internal.h"
#include "ntp_internal.h"
@@ -1092,13 +1093,15 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history,
}
/*
- * cycle_between - true if test occurs chronologically between before and after
+ * timestamp_in_interval - true if ts is chronologically in [start, end]
+ *
+ * True if ts occurs chronologically at or after start, and before or at end.
*/
-static bool cycle_between(u64 before, u64 test, u64 after)
+static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
{
- if (test > before && test < after)
+ if (ts >= start && ts <= end)
return true;
- if (test < before && before > after)
+ if (start > end && (ts >= start || ts <= end))
return true;
return false;
}
@@ -1158,7 +1161,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
*/
now = tk_clock_read(&tk->tkr_mono);
interval_start = tk->tkr_mono.cycle_last;
- if (!cycle_between(interval_start, cycles, now)) {
+ if (!timestamp_in_interval(interval_start, now, cycles)) {
clock_was_set_seq = tk->clock_was_set_seq;
cs_was_changed_seq = tk->cs_was_changed_seq;
cycles = interval_start;
@@ -1171,10 +1174,8 @@ int get_device_system_crosststamp(int (*get_time_fn)
tk_core.timekeeper.offs_real);
base_raw = tk->tkr_raw.base;
- nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
- system_counterval.cycles);
- nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
- system_counterval.cycles);
+ nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
+ nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
} while (read_seqcount_retry(&tk_core.seq, seq));
xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
@@ -1189,13 +1190,13 @@ int get_device_system_crosststamp(int (*get_time_fn)
bool discontinuity;
/*
- * Check that the counter value occurs after the provided
+ * Check that the counter value is not before the provided
* history reference and that the history doesn't cross a
* clocksource change
*/
if (!history_begin ||
- !cycle_between(history_begin->cycles,
- system_counterval.cycles, cycles) ||
+ !timestamp_in_interval(history_begin->cycles,
+ cycles, system_counterval.cycles) ||
history_begin->cs_was_changed_seq != cs_was_changed_seq)
return -EINVAL;
partial_history_cycles = cycles - system_counterval.cycles;
@@ -1256,8 +1257,10 @@ out:
/* signal hrtimers about time change */
clock_was_set();
- if (!ret)
+ if (!ret) {
audit_tk_injoffset(ts_delta);
+ add_device_randomness(ts, sizeof(*ts));
+ }
return ret;
}
@@ -2336,6 +2339,7 @@ int do_adjtimex(struct __kernel_timex *txc)
ret = timekeeping_validate_timex(txc);
if (ret)
return ret;
+ add_device_randomness(txc, sizeof(*txc));
if (txc->modes & ADJ_SETOFFSET) {
struct timespec64 delta;
@@ -2353,6 +2357,7 @@ int do_adjtimex(struct __kernel_timex *txc)
audit_ntp_init(&ad);
ktime_get_real_ts64(&ts);
+ add_device_randomness(&ts, sizeof(ts));
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
@@ -2410,8 +2415,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&jiffies_lock);
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
do_timer(ticks);
- write_sequnlock(&jiffies_lock);
+ write_seqcount_end(&jiffies_seq);
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 141ab3ab0354..099737f6f10c 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -25,7 +25,8 @@ static inline void sched_clock_resume(void) { }
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
-extern seqlock_t jiffies_lock;
+extern raw_spinlock_t jiffies_lock;
+extern seqcount_t jiffies_seq;
#define CS_NAME_LEN 32
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 749b27851f45..abf5cbbb743b 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1589,7 +1589,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
{
- if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+ if ((iter->ent->type != TRACE_BLK) ||
+ !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
return TRACE_TYPE_UNHANDLED;
return print_one_line(iter, true);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 4d9f81802911..4a31763a8c5d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -649,6 +649,9 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
return -EPERM;
if (unlikely(!nmi_uaccess_okay()))
return -EPERM;
+ /* Task should not be pid=1 to avoid kernel panic. */
+ if (unlikely(is_global_init(current)))
+ return -EPERM;
if (irqs_disabled()) {
/* Do an early check on signal validity. Otherwise,
@@ -1450,7 +1453,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
#ifdef CONFIG_UPROBE_EVENTS
if (flags & TRACE_EVENT_FL_UPROBE)
err = bpf_get_uprobe_info(event, fd_type, buf,
- probe_offset,
+ probe_offset, probe_addr,
event->attr.type == PERF_TYPE_TRACEPOINT);
#endif
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7719d444bda1..412505d94865 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1100,7 +1100,7 @@ struct ftrace_page {
struct ftrace_page *next;
struct dyn_ftrace *records;
int index;
- int size;
+ int order;
};
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
@@ -1307,6 +1307,7 @@ static int ftrace_add_mod(struct trace_array *tr,
if (!ftrace_mod)
return -ENOMEM;
+ INIT_LIST_HEAD(&ftrace_mod->list);
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
ftrace_mod->enable = enable;
@@ -1556,7 +1557,8 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) {
- if (end < pg->records[0].ip ||
+ if (pg->index == 0 ||
+ end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue;
rec = bsearch(&key, pg->records, pg->index,
@@ -2732,6 +2734,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_startup_enable(command);
+ /*
+ * If ftrace is in an undefined state, we just remove ops from list
+ * to prevent the NULL pointer, instead of totally rolling it back and
+ * free trampoline, because those actions could cause further damage.
+ */
+ if (unlikely(ftrace_disabled)) {
+ __unregister_ftrace_function(ops);
+ return -ENODEV;
+ }
+
ops->flags &= ~FTRACE_OPS_FL_ADDING;
return 0;
@@ -2887,6 +2899,8 @@ static void ftrace_shutdown_sysctl(void)
static u64 ftrace_update_time;
unsigned long ftrace_update_tot_cnt;
+unsigned long ftrace_number_of_pages;
+unsigned long ftrace_number_of_groups;
static inline int ops_traces_mod(struct ftrace_ops *ops)
{
@@ -3007,12 +3021,15 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
/* if we can't allocate this size, try something smaller */
if (!order)
return -ENOMEM;
- order >>= 1;
+ order--;
goto again;
}
+ ftrace_number_of_pages += 1 << order;
+ ftrace_number_of_groups++;
+
cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
- pg->size = cnt;
+ pg->order = order;
if (cnt > count)
cnt = count;
@@ -3020,12 +3037,27 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
return cnt;
}
+static void ftrace_free_pages(struct ftrace_page *pages)
+{
+ struct ftrace_page *pg = pages;
+
+ while (pg) {
+ if (pg->records) {
+ free_pages((unsigned long)pg->records, pg->order);
+ ftrace_number_of_pages -= 1 << pg->order;
+ }
+ pages = pg->next;
+ kfree(pg);
+ pg = pages;
+ ftrace_number_of_groups--;
+ }
+}
+
static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)
{
struct ftrace_page *start_pg;
struct ftrace_page *pg;
- int order;
int cnt;
if (!num_to_init)
@@ -3059,14 +3091,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
return start_pg;
free_pages:
- pg = start_pg;
- while (pg) {
- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
- start_pg = pg->next;
- kfree(pg);
- pg = start_pg;
- }
+ ftrace_free_pages(start_pg);
pr_info("ftrace: FAILED to allocate memory for functions\n");
return NULL;
}
@@ -5074,8 +5099,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
if (filter_hash) {
orig_hash = &iter->ops->func_hash->filter_hash;
- if (iter->tr && !list_empty(&iter->tr->mod_trace))
- iter->hash->flags |= FTRACE_HASH_FL_MOD;
+ if (iter->tr) {
+ if (list_empty(&iter->tr->mod_trace))
+ iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
+ else
+ iter->hash->flags |= FTRACE_HASH_FL_MOD;
+ }
} else
orig_hash = &iter->ops->func_hash->notrace_hash;
@@ -5604,9 +5633,11 @@ static int ftrace_process_locs(struct module *mod,
unsigned long *start,
unsigned long *end)
{
+ struct ftrace_page *pg_unuse = NULL;
struct ftrace_page *start_pg;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
+ unsigned long skipped = 0;
unsigned long count;
unsigned long *p;
unsigned long addr;
@@ -5652,6 +5683,7 @@ static int ftrace_process_locs(struct module *mod,
p = start;
pg = start_pg;
while (p < end) {
+ unsigned long end_offset;
addr = ftrace_call_adjust(*p++);
/*
* Some architecture linkers will pad between
@@ -5659,10 +5691,13 @@ static int ftrace_process_locs(struct module *mod,
* object files to satisfy alignments.
* Skip any NULL pointers.
*/
- if (!addr)
+ if (!addr) {
+ skipped++;
continue;
+ }
- if (pg->index == pg->size) {
+ end_offset = (pg->index+1) * sizeof(pg->records[0]);
+ if (end_offset > PAGE_SIZE << pg->order) {
/* We should have allocated enough */
if (WARN_ON(!pg->next))
break;
@@ -5673,8 +5708,10 @@ static int ftrace_process_locs(struct module *mod,
rec->ip = addr;
}
- /* We should have used all pages */
- WARN_ON(pg->next);
+ if (pg->next) {
+ pg_unuse = pg->next;
+ pg->next = NULL;
+ }
/* Assign the last page to ftrace_pages */
ftrace_pages = pg;
@@ -5696,6 +5733,11 @@ static int ftrace_process_locs(struct module *mod,
out:
mutex_unlock(&ftrace_lock);
+ /* We should have used all pages unless we skipped some */
+ if (pg_unuse) {
+ WARN_ON(!skipped);
+ ftrace_free_pages(pg_unuse);
+ }
return ret;
}
@@ -5802,7 +5844,6 @@ void ftrace_release_mod(struct module *mod)
struct ftrace_page **last_pg;
struct ftrace_page *tmp_page = NULL;
struct ftrace_page *pg;
- int order;
mutex_lock(&ftrace_lock);
@@ -5853,10 +5894,13 @@ void ftrace_release_mod(struct module *mod)
/* Needs to be called outside of ftrace_lock */
clear_mod_from_hashes(pg);
- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
+ if (pg->records) {
+ free_pages((unsigned long)pg->records, pg->order);
+ ftrace_number_of_pages -= 1 << pg->order;
+ }
tmp_page = pg->next;
kfree(pg);
+ ftrace_number_of_groups--;
}
}
@@ -6158,7 +6202,6 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
struct ftrace_mod_map *mod_map = NULL;
struct ftrace_init_func *func, *func_next;
struct list_head clear_hash;
- int order;
INIT_LIST_HEAD(&clear_hash);
@@ -6196,8 +6239,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
ftrace_update_tot_cnt--;
if (!pg->index) {
*last_pg = pg->next;
- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
+ if (pg->records) {
+ free_pages((unsigned long)pg->records, pg->order);
+ ftrace_number_of_pages -= 1 << pg->order;
+ }
+ ftrace_number_of_groups--;
kfree(pg);
pg = container_of(last_pg, struct ftrace_page, next);
if (!(*last_pg))
@@ -6245,7 +6291,7 @@ void __init ftrace_init(void)
}
pr_info("ftrace: allocating %ld entries in %ld pages\n",
- count, count / ENTRIES_PER_PAGE + 1);
+ count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
last_ftrace_enabled = ftrace_enabled = 1;
@@ -6253,6 +6299,9 @@ void __init ftrace_init(void)
__start_mcount_loc,
__stop_mcount_loc);
+ pr_info("ftrace: allocated %ld pages with %ld groups\n",
+ ftrace_number_of_pages, ftrace_number_of_groups);
+
set_ftrace_early_filters();
return;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5e1b9f6e77f3..ed505c6de7ca 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -477,6 +477,7 @@ struct ring_buffer_per_cpu {
local_t committing;
local_t commits;
local_t pages_touched;
+ local_t pages_lost;
local_t pages_read;
long last_pages_touch;
size_t shortest_full;
@@ -484,6 +485,8 @@ struct ring_buffer_per_cpu {
unsigned long read_bytes;
u64 write_stamp;
u64 read_stamp;
+ /* pages removed since last reset */
+ unsigned long pages_removed;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
@@ -519,6 +522,7 @@ struct ring_buffer_iter {
struct buffer_page *head_page;
struct buffer_page *cache_reader_page;
unsigned long cache_read;
+ unsigned long cache_pages_removed;
u64 read_stamp;
};
@@ -544,10 +548,18 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
{
size_t read;
+ size_t lost;
size_t cnt;
read = local_read(&buffer->buffers[cpu]->pages_read);
+ lost = local_read(&buffer->buffers[cpu]->pages_lost);
cnt = local_read(&buffer->buffers[cpu]->pages_touched);
+
+ if (WARN_ON_ONCE(cnt < lost))
+ return 0;
+
+ cnt -= lost;
+
/* The reader can read an empty page, but not more than that */
if (cnt < read) {
WARN_ON_ONCE(read > cnt + 1);
@@ -557,6 +569,26 @@ size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
return cnt - read;
}
+static __always_inline bool full_hit(struct ring_buffer *buffer, int cpu, int full)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ size_t nr_pages;
+ size_t dirty;
+
+ nr_pages = cpu_buffer->nr_pages;
+ if (!nr_pages || !full)
+ return true;
+
+ /*
+ * Add one as dirty will never equal nr_pages, as the sub-buffer
+ * that the writer is on is not counted as dirty.
+ * This is needed if "buffer_percent" is set to 100.
+ */
+ dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
+
+ return (dirty * 100) >= (full * nr_pages);
+}
+
/*
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
*
@@ -568,8 +600,9 @@ static void rb_wake_up_waiters(struct irq_work *work)
struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
wake_up_all(&rbwork->waiters);
- if (rbwork->wakeup_full) {
+ if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
rbwork->wakeup_full = false;
+ rbwork->full_waiters_pending = false;
wake_up_all(&rbwork->full_waiters);
}
}
@@ -586,7 +619,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
*/
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
{
- struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
+ struct ring_buffer_per_cpu *cpu_buffer;
DEFINE_WAIT(wait);
struct rb_irq_work *work;
int ret = 0;
@@ -651,22 +684,20 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
!ring_buffer_empty_cpu(buffer, cpu)) {
unsigned long flags;
bool pagebusy;
- size_t nr_pages;
- size_t dirty;
+ bool done;
if (!full)
break;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
- nr_pages = cpu_buffer->nr_pages;
- dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
+ done = !pagebusy && full_hit(buffer, cpu, full);
+
if (!cpu_buffer->shortest_full ||
- cpu_buffer->shortest_full < full)
+ cpu_buffer->shortest_full > full)
cpu_buffer->shortest_full = full;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- if (!pagebusy &&
- (!nr_pages || (dirty * 100) > full * nr_pages))
+ if (done)
break;
}
@@ -687,6 +718,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
* @cpu: the cpu buffer to wait on
* @filp: the file descriptor
* @poll_table: The poll descriptor
+ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
*
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise
@@ -696,23 +728,33 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
* zero otherwise.
*/
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table)
+ struct file *filp, poll_table *poll_table, int full)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *work;
- if (cpu == RING_BUFFER_ALL_CPUS)
+ if (cpu == RING_BUFFER_ALL_CPUS) {
work = &buffer->irq_work;
- else {
+ full = 0;
+ } else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return -EINVAL;
+ return EPOLLERR;
cpu_buffer = buffer->buffers[cpu];
work = &cpu_buffer->irq_work;
}
- poll_wait(filp, &work->waiters, poll_table);
- work->waiters_pending = true;
+ if (full) {
+ poll_wait(filp, &work->full_waiters, poll_table);
+ work->full_waiters_pending = true;
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
+ } else {
+ poll_wait(filp, &work->waiters, poll_table);
+ work->waiters_pending = true;
+ }
+
/*
* There's a tight race between setting the waiters_pending and
* checking if the ring buffer is empty. Once the waiters_pending bit
@@ -728,6 +770,9 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
*/
smp_mb();
+ if (full)
+ return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
+
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
return EPOLLIN | EPOLLRDNORM;
@@ -1359,11 +1404,13 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
- free_buffer_page(cpu_buffer->reader_page);
+ irq_work_sync(&cpu_buffer->irq_work.work);
- rb_head_page_deactivate(cpu_buffer);
+ free_buffer_page(cpu_buffer->reader_page);
if (head) {
+ rb_head_page_deactivate(cpu_buffer);
+
list_for_each_entry_safe(bpage, tmp, head, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
@@ -1372,6 +1419,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
free_buffer_page(bpage);
}
+ free_page((unsigned long)cpu_buffer->free_page);
+
kfree(cpu_buffer);
}
@@ -1464,6 +1513,8 @@ ring_buffer_free(struct ring_buffer *buffer)
cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
+ irq_work_sync(&buffer->irq_work.work);
+
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
@@ -1543,6 +1594,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
to_remove = rb_list_head(to_remove)->next;
head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
}
+ /* Read iterators need to reset themselves when some pages removed */
+ cpu_buffer->pages_removed += nr_removed;
next_page = rb_list_head(to_remove)->next;
@@ -1564,12 +1617,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
cpu_buffer->head_page = list_entry(next_page,
struct buffer_page, list);
- /*
- * change read pointer to make sure any read iterators reset
- * themselves
- */
- cpu_buffer->read = 0;
-
/* pages are removed, resume tracing and then free the pages */
atomic_dec(&cpu_buffer->record_disabled);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
@@ -1598,6 +1645,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
*/
local_add(page_entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_inc(&cpu_buffer->pages_lost);
}
/*
@@ -1783,6 +1831,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
err = -ENOMEM;
goto out_err;
}
+
+ cond_resched();
}
get_online_cpus();
@@ -2022,6 +2072,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
*/
local_add(entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_inc(&cpu_buffer->pages_lost);
/*
* The entries will be zeroed out when we move the
@@ -2190,6 +2241,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* Mark the rest of the page with padding */
rb_event_set_padding(event);
+ /* Make sure the padding is visible before the write update */
+ smp_wmb();
+
/* Set the write back to the previous setting */
local_sub(length, &tail_page->write);
return;
@@ -2201,6 +2255,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* time delta must be non zero */
event->time_delta = 1;
+ /* Make sure the padding is visible before the tail_page->write update */
+ smp_wmb();
+
/* Set write to end of buffer */
length = (tail + length) - BUF_PAGE_SIZE;
local_sub(length, &tail_page->write);
@@ -2490,6 +2547,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
if (RB_WARN_ON(cpu_buffer,
rb_is_reader_page(cpu_buffer->tail_page)))
return;
+ /*
+ * No need for a memory barrier here, as the update
+ * of the tail_page did it for this page.
+ */
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
@@ -2503,6 +2564,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
while (rb_commit_index(cpu_buffer) !=
rb_page_write(cpu_buffer->commit_page)) {
+ /* Make sure the readers see the content of what is committed. */
+ smp_wmb();
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
RB_WARN_ON(cpu_buffer,
@@ -2622,10 +2685,6 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
- size_t nr_pages;
- size_t dirty;
- size_t full;
-
if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
@@ -2649,10 +2708,7 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
- full = cpu_buffer->shortest_full;
- nr_pages = cpu_buffer->nr_pages;
- dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
- if (full && nr_pages && (dirty * 100) <= full * nr_pages)
+ if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
return;
cpu_buffer->irq_work.wakeup_full = true;
@@ -2920,6 +2976,12 @@ rb_reserve_next_event(struct ring_buffer *buffer,
int nr_loops = 0;
u64 diff;
+ /* ring buffer does cmpxchg, make sure it is safe in NMI context */
+ if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
+ (unlikely(in_nmi()))) {
+ return NULL;
+ }
+
rb_start_commit(cpu_buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
@@ -3614,6 +3676,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
iter->cache_reader_page = iter->head_page;
iter->cache_read = cpu_buffer->read;
+ iter->cache_pages_removed = cpu_buffer->pages_removed;
if (iter->head)
iter->read_stamp = cpu_buffer->read_stamp;
@@ -3863,6 +3926,38 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
+ /*
+ * The writer has preempt disable, wait for it. But not forever
+ * Although, 1 second is pretty much "forever"
+ */
+#define USECS_WAIT 1000000
+ for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
+ /* If the write is past the end of page, a writer is still updating it */
+ if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE))
+ break;
+
+ udelay(1);
+
+ /* Get the latest version of the reader write value */
+ smp_rmb();
+ }
+
+ /* The writer is not moving forward? Something is wrong */
+ if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
+ reader = NULL;
+
+ /*
+ * Make sure we see any padding after the write update
+ * (see rb_reset_tail()).
+ *
+ * In addition, a writer may be writing on the reader page
+ * if the page has not been fully filled, so the read barrier
+ * is also needed to make sure we see the content of what is
+ * committed by the writer (see rb_set_commit_to_write()).
+ */
+ smp_rmb();
+
+
return reader;
}
@@ -4024,12 +4119,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
buffer = cpu_buffer->buffer;
/*
- * Check if someone performed a consuming read to
- * the buffer. A consuming read invalidates the iterator
- * and we need to reset the iterator in this case.
+ * Check if someone performed a consuming read to the buffer
+ * or removed some pages from the buffer. In these cases,
+ * iterator was invalidated and we need to reset it.
*/
if (unlikely(iter->cache_read != cpu_buffer->read ||
- iter->cache_reader_page != cpu_buffer->reader_page))
+ iter->cache_reader_page != cpu_buffer->reader_page ||
+ iter->cache_pages_removed != cpu_buffer->pages_removed))
rb_iter_reset(iter);
again:
@@ -4410,28 +4506,34 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_size);
+static void rb_clear_buffer_page(struct buffer_page *page)
+{
+ local_set(&page->write, 0);
+ local_set(&page->entries, 0);
+ rb_init_page(page->page);
+ page->read = 0;
+}
+
static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
{
+ struct buffer_page *page;
+
rb_head_page_deactivate(cpu_buffer);
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
- local_set(&cpu_buffer->head_page->write, 0);
- local_set(&cpu_buffer->head_page->entries, 0);
- local_set(&cpu_buffer->head_page->page->commit, 0);
-
- cpu_buffer->head_page->read = 0;
+ rb_clear_buffer_page(cpu_buffer->head_page);
+ list_for_each_entry(page, cpu_buffer->pages, list) {
+ rb_clear_buffer_page(page);
+ }
cpu_buffer->tail_page = cpu_buffer->head_page;
cpu_buffer->commit_page = cpu_buffer->head_page;
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
- local_set(&cpu_buffer->reader_page->write, 0);
- local_set(&cpu_buffer->reader_page->entries, 0);
- local_set(&cpu_buffer->reader_page->page->commit, 0);
- cpu_buffer->reader_page->read = 0;
+ rb_clear_buffer_page(cpu_buffer->reader_page);
local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0);
@@ -4441,6 +4543,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
local_set(&cpu_buffer->pages_touched, 0);
+ local_set(&cpu_buffer->pages_lost, 0);
local_set(&cpu_buffer->pages_read, 0);
cpu_buffer->last_pages_touch = 0;
cpu_buffer->shortest_full = 0;
@@ -4454,6 +4557,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->last_overrun = 0;
rb_head_page_activate(cpu_buffer);
+ cpu_buffer->pages_removed = 0;
}
/**
@@ -4706,11 +4810,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
*/
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
{
- struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_data_page *bpage = data;
struct page *page = virt_to_page(bpage);
unsigned long flags;
+ if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
+ return;
+
+ cpu_buffer = buffer->buffers[cpu];
+
/* If the page is still in use someplace else, we can't reuse it */
if (page_ref_count(page) > 1)
goto out;
@@ -4825,7 +4934,15 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
unsigned int pos = 0;
unsigned int size;
- if (full)
+ /*
+ * If a full page is expected, this can still be returned
+ * if there's been a previous partial read and the
+ * rest of the page can be read and the commit page is off
+ * the reader page.
+ */
+ if (full &&
+ (!read || (len < (commit - read)) ||
+ cpu_buffer->reader_page == cpu_buffer->commit_page))
goto out_unlock;
if (len > (commit - read))
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 55da88f18342..6fd7dca57dd9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/init.h>
+#include <linux/kmemleak.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
@@ -1015,12 +1016,14 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
{
void *cond_data = NULL;
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot)
cond_data = tr->cond_snapshot->cond_data;
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
return cond_data;
}
@@ -1156,9 +1159,11 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
goto fail_unlock;
}
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
tr->cond_snapshot = cond_snapshot;
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
mutex_unlock(&trace_types_lock);
@@ -1185,6 +1190,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
{
int ret = 0;
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (!tr->cond_snapshot)
@@ -1195,6 +1201,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
}
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
return ret;
}
@@ -1925,10 +1932,12 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
}
/* Must have trace_types_lock held */
-void tracing_reset_all_online_cpus(void)
+void tracing_reset_all_online_cpus_unlocked(void)
{
struct trace_array *tr;
+ lockdep_assert_held(&trace_types_lock);
+
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->clear_trace)
continue;
@@ -1940,6 +1949,13 @@ void tracing_reset_all_online_cpus(void)
}
}
+void tracing_reset_all_online_cpus(void)
+{
+ mutex_lock(&trace_types_lock);
+ tracing_reset_all_online_cpus_unlocked();
+ mutex_unlock(&trace_types_lock);
+}
+
/*
* The tgid_map array maps from pid to tgid; i.e. the value stored at index i
* is the tgid last observed corresponding to pid=i.
@@ -1951,13 +1967,18 @@ static size_t tgid_map_max;
#define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX
+/*
+ * Preemption must be disabled before acquiring trace_cmdline_lock.
+ * The various trace_arrays' max_lock must be acquired in a context
+ * where interrupt is disabled.
+ */
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
struct saved_cmdlines_buffer {
unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
unsigned *map_cmdline_to_pid;
unsigned cmdline_num;
int cmdline_idx;
- char *saved_cmdlines;
+ char saved_cmdlines[];
};
static struct saved_cmdlines_buffer *savedcmd;
@@ -1971,47 +1992,60 @@ static inline void set_cmdline(int idx, const char *cmdline)
strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
}
-static int allocate_cmdlines_buffer(unsigned int val,
- struct saved_cmdlines_buffer *s)
+static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+{
+ int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
+
+ kfree(s->map_cmdline_to_pid);
+ kmemleak_free(s);
+ free_pages((unsigned long)s, order);
+}
+
+static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
{
+ struct saved_cmdlines_buffer *s;
+ struct page *page;
+ int orig_size, size;
+ int order;
+
+ /* Figure out how much is needed to hold the given number of cmdlines */
+ orig_size = sizeof(*s) + val * TASK_COMM_LEN;
+ order = get_order(orig_size);
+ size = 1 << (order + PAGE_SHIFT);
+ page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ return NULL;
+
+ s = page_address(page);
+ kmemleak_alloc(s, size, 1, GFP_KERNEL);
+ memset(s, 0, sizeof(*s));
+
+ /* Round up to actual allocation */
+ val = (size - sizeof(*s)) / TASK_COMM_LEN;
+ s->cmdline_num = val;
+
s->map_cmdline_to_pid = kmalloc_array(val,
sizeof(*s->map_cmdline_to_pid),
GFP_KERNEL);
- if (!s->map_cmdline_to_pid)
- return -ENOMEM;
-
- s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
- if (!s->saved_cmdlines) {
- kfree(s->map_cmdline_to_pid);
- return -ENOMEM;
+ if (!s->map_cmdline_to_pid) {
+ free_saved_cmdlines_buffer(s);
+ return NULL;
}
s->cmdline_idx = 0;
- s->cmdline_num = val;
memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
sizeof(s->map_pid_to_cmdline));
memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
val * sizeof(*s->map_cmdline_to_pid));
- return 0;
+ return s;
}
static int trace_create_savedcmd(void)
{
- int ret;
-
- savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
- if (!savedcmd)
- return -ENOMEM;
-
- ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
- if (ret < 0) {
- kfree(savedcmd);
- savedcmd = NULL;
- return -ENOMEM;
- }
+ savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
- return 0;
+ return savedcmd ? 0 : -ENOMEM;
}
int is_tracing_stopped(void)
@@ -2163,6 +2197,9 @@ static int trace_save_cmdline(struct task_struct *tsk)
* the lock, but we also don't want to spin
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
+ *
+ * This is called within the scheduler and wake up, so interrupts
+ * had better been disabled and run queue lock been held.
*/
if (!arch_spin_trylock(&trace_cmdline_lock))
return 0;
@@ -2415,8 +2452,11 @@ void trace_buffered_event_enable(void)
for_each_tracing_cpu(cpu) {
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
- if (!page)
- goto failed;
+ /* This is just an optimization and can handle failures */
+ if (!page) {
+ pr_err("Failed to allocate event buffer\n");
+ break;
+ }
event = page_address(page);
memset(event, 0, sizeof(*event));
@@ -2430,10 +2470,6 @@ void trace_buffered_event_enable(void)
WARN_ON_ONCE(1);
preempt_enable();
}
-
- return;
- failed:
- trace_buffered_event_disable();
}
static void enable_trace_buffered_event(void *data)
@@ -2468,11 +2504,9 @@ void trace_buffered_event_disable(void)
if (--trace_buffered_event_ref)
return;
- preempt_disable();
/* For each CPU, set the buffer as used. */
- smp_call_function_many(tracing_buffer_mask,
- disable_trace_buffered_event, NULL, 1);
- preempt_enable();
+ on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
+ NULL, true);
/* Wait for all current users to finish */
synchronize_rcu();
@@ -2481,17 +2515,19 @@ void trace_buffered_event_disable(void)
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
per_cpu(trace_buffered_event, cpu) = NULL;
}
+
/*
- * Make sure trace_buffered_event is NULL before clearing
- * trace_buffered_event_cnt.
+ * Wait for all CPUs that potentially started checking if they can use
+ * their event buffer only after the previous synchronize_rcu() call and
+ * they still read a valid pointer from trace_buffered_event. It must be
+ * ensured they don't see cleared trace_buffered_event_cnt else they
+ * could wrongly decide to use the pointed-to buffer which is now freed.
*/
- smp_wmb();
+ synchronize_rcu();
- preempt_disable();
- /* Do the work on each cpu */
- smp_call_function_many(tracing_buffer_mask,
- enable_trace_buffered_event, NULL, 1);
- preempt_enable();
+ /* For each CPU, relinquish the buffer */
+ on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
+ true);
}
static struct ring_buffer *temp_buffer;
@@ -3476,8 +3512,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
* will point to the same string as current_trace->name.
*/
mutex_lock(&trace_types_lock);
- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
+ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
+ /* Close iter->trace before switching to the new current tracer */
+ if (iter->trace->close)
+ iter->trace->close(iter);
*iter->trace = *tr->current_trace;
+ /* Reopen the new current tracer */
+ if (iter->trace->open)
+ iter->trace->open(iter);
+ }
mutex_unlock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -4049,7 +4092,11 @@ static int s_show(struct seq_file *m, void *v)
iter->leftover = ret;
} else {
- print_trace_line(iter);
+ ret = print_trace_line(iter);
+ if (ret == TRACE_TYPE_PARTIAL_LINE) {
+ iter->seq.full = 0;
+ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
+ }
ret = trace_print_seq(m, &iter->seq);
/*
* If we overflow the seq_file buffer, then it will
@@ -4214,6 +4261,48 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp)
return 0;
}
+/*
+ * The private pointer of the inode is the trace_event_file.
+ * Update the tr ref count associated to it.
+ */
+int tracing_open_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(file->tr);
+ if (ret)
+ return ret;
+
+ mutex_lock(&event_mutex);
+
+ /* Fail if the file is marked for removal */
+ if (file->flags & EVENT_FILE_FL_FREED) {
+ trace_array_put(file->tr);
+ ret = -ENODEV;
+ } else {
+ event_file_get(file);
+ }
+
+ mutex_unlock(&event_mutex);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+int tracing_release_file_tr(struct inode *inode, struct file *filp)
+{
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
+ event_file_put(file);
+
+ return 0;
+}
+
static int tracing_release(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -5199,37 +5288,29 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
char buf[64];
int r;
+ preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
-static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
-{
- kfree(s->saved_cmdlines);
- kfree(s->map_cmdline_to_pid);
- kfree(s);
-}
-
static int tracing_resize_saved_cmdlines(unsigned int val)
{
struct saved_cmdlines_buffer *s, *savedcmd_temp;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = allocate_cmdlines_buffer(val);
if (!s)
return -ENOMEM;
- if (allocate_cmdlines_buffer(val, s) < 0) {
- kfree(s);
- return -ENOMEM;
- }
-
+ preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
savedcmd_temp = savedcmd;
savedcmd = s;
arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
free_saved_cmdlines_buffer(savedcmd_temp);
return 0;
@@ -5522,8 +5603,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
return ret;
#ifdef CONFIG_TRACER_MAX_TRACE
- if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
- !tr->current_trace->use_max_tr)
+ if (!tr->current_trace->use_max_tr)
goto out;
ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
@@ -5684,10 +5764,12 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
#ifdef CONFIG_TRACER_SNAPSHOT
if (t->use_max_tr) {
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot)
ret = -EBUSY;
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
if (ret)
goto out;
}
@@ -5973,7 +6055,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
return EPOLLIN | EPOLLRDNORM;
else
return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
- filp, poll_table);
+ filp, poll_table, iter->tr->buffer_percent);
}
static __poll_t
@@ -6081,7 +6163,20 @@ waitagain:
ret = print_trace_line(iter);
if (ret == TRACE_TYPE_PARTIAL_LINE) {
- /* don't print partial lines */
+ /*
+ * If one print_trace_line() fills entire trace_seq in one shot,
+ * trace_seq_to_user() will returns -EBUSY because save_len == 0,
+ * In this case, we need to consume it, otherwise, loop will peek
+ * this event next time, resulting in an infinite loop.
+ */
+ if (save_len == 0) {
+ iter->seq.full = 0;
+ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
+ trace_consume(iter);
+ break;
+ }
+
+ /* In other cases, don't print partial lines */
iter->seq.seq.len = save_len;
break;
}
@@ -6742,6 +6837,11 @@ out:
return ret;
}
+static void tracing_swap_cpu_buffer(void *tr)
+{
+ update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
+}
+
static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -6767,10 +6867,12 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
goto out;
}
+ local_irq_disable();
arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot)
ret = -EBUSY;
arch_spin_unlock(&tr->max_lock);
+ local_irq_enable();
if (ret)
goto out;
@@ -6798,13 +6900,15 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
break;
- local_irq_disable();
/* Now, we're going to swap */
- if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+ local_irq_disable();
update_max_tr(tr, current, smp_processor_id(), NULL);
- else
- update_max_tr_single(tr, current, iter->cpu_file);
- local_irq_enable();
+ local_irq_enable();
+ } else {
+ smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
+ (void *)tr, 1);
+ }
break;
default:
if (tr->allocated_snapshot) {
@@ -6893,10 +6997,11 @@ static const struct file_operations tracing_max_lat_fops = {
#endif
static const struct file_operations set_tracer_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_pipe_fops = {
@@ -7219,7 +7324,7 @@ static const struct file_operations tracing_err_log_fops = {
.open = tracing_err_log_open,
.write = tracing_err_log_write,
.read = seq_read,
- .llseek = seq_lseek,
+ .llseek = tracing_lseek,
.release = tracing_err_log_release,
};
@@ -7618,14 +7723,23 @@ static ssize_t
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- unsigned long *p = filp->private_data;
- char buf[64]; /* Not too big for a shallow stack */
+ ssize_t ret;
+ char *buf;
int r;
- r = scnprintf(buf, 63, "%ld", *p);
- buf[r++] = '\n';
+ /* 256 should be plenty to hold the amount needed */
+ buf = kmalloc(256, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
+ ftrace_update_tot_cnt,
+ ftrace_number_of_pages,
+ ftrace_number_of_groups);
+
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ kfree(buf);
+ return ret;
}
static const struct file_operations tracing_dyn_info_fops = {
@@ -7928,12 +8042,33 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
return cnt;
}
+static int tracing_open_options(struct inode *inode, struct file *filp)
+{
+ struct trace_option_dentry *topt = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(topt->tr);
+ if (ret)
+ return ret;
+
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static int tracing_release_options(struct inode *inode, struct file *file)
+{
+ struct trace_option_dentry *topt = file->private_data;
+
+ trace_array_put(topt->tr);
+ return 0;
+}
static const struct file_operations trace_options_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_options,
.read = trace_options_read,
.write = trace_options_write,
.llseek = generic_file_llseek,
+ .release = tracing_release_options,
};
/*
@@ -8263,9 +8398,6 @@ buffer_percent_write(struct file *filp, const char __user *ubuf,
if (val > 100)
return -EINVAL;
- if (!val)
- val = 1;
-
tr->buffer_percent = val;
(*ppos)++;
@@ -8510,6 +8642,7 @@ static int __remove_instance(struct trace_array *tr)
ftrace_destroy_function_files(tr);
tracefs_remove_recursive(tr->dir);
free_trace_buffers(tr);
+ clear_tracing_err_log(tr);
for (i = 0; i < tr->nr_topts; i++) {
kfree(tr->topts[i].topts);
@@ -8847,7 +8980,7 @@ static __init int tracer_init_tracefs(void)
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
- &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
+ NULL, &tracing_dyn_info_fops);
#endif
create_trace_instances(d_tracer);
@@ -9282,6 +9415,8 @@ void __init early_trace_init(void)
static_key_enable(&tracepoint_printk_key.key);
}
tracer_alloc_buffers();
+
+ init_events();
}
void __init trace_init(void)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1d514a1a3155..40644e06536c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -677,8 +677,11 @@ int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
+void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+int tracing_open_file_tr(struct inode *inode, struct file *filp);
+int tracing_release_file_tr(struct inode *inode, struct file *filp);
bool tracing_is_disabled(void);
bool tracer_tracing_is_on(struct trace_array *tr);
void tracer_tracing_on(struct trace_array *tr);
@@ -800,6 +803,8 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
+extern unsigned long ftrace_number_of_pages;
+extern unsigned long ftrace_number_of_groups;
void ftrace_init_trace_array(struct trace_array *tr);
#else
static inline void ftrace_init_trace_array(struct trace_array *tr) { }
@@ -1590,6 +1595,7 @@ extern void trace_event_enable_cmd_record(bool enable);
extern void trace_event_enable_tgid_record(bool enable);
extern int event_trace_init(void);
+extern int init_events(void);
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
extern int event_trace_del_tracer(struct trace_array *tr);
@@ -1690,6 +1696,9 @@ extern int register_event_command(struct event_command *cmd);
extern int unregister_event_command(struct event_command *cmd);
extern int register_trigger_hist_enable_disable_cmds(void);
+extern void event_file_get(struct trace_event_file *file);
+extern void event_file_put(struct trace_event_file *file);
+
/**
* struct event_trigger_ops - callbacks for trace event triggers
*
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index 89779eb84a07..2dafeab6a270 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -70,6 +70,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
if (ret)
break;
}
+ tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
return ret;
@@ -165,6 +166,7 @@ int dyn_events_release_all(struct dyn_event_operations *type)
break;
}
out:
+ tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
return ret;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 4acc77e049e5..958789fe4cef 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -166,6 +166,7 @@ static int trace_define_generic_fields(void)
__generic_field(int, CPU, FILTER_CPU);
__generic_field(int, cpu, FILTER_CPU);
+ __generic_field(int, common_cpu, FILTER_CPU);
__generic_field(char *, COMM, FILTER_COMM);
__generic_field(char *, comm, FILTER_COMM);
@@ -364,7 +365,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
{
struct trace_event_call *call = file->event_call;
struct trace_array *tr = file->tr;
- unsigned long file_flags = file->flags;
int ret = 0;
int disable;
@@ -388,6 +388,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
break;
disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
+ /* Disable use of trace_buffered_event */
+ trace_buffered_event_disable();
} else
disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
@@ -426,6 +428,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
if (atomic_inc_return(&file->sm_ref) > 1)
break;
set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
+ /* Enable use of trace_buffered_event */
+ trace_buffered_event_enable();
}
if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
@@ -465,15 +469,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
break;
}
- /* Enable or disable use of trace_buffered_event */
- if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
- (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
- if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
- trace_buffered_event_enable();
- else
- trace_buffered_event_disable();
- }
-
return ret;
}
@@ -703,21 +698,33 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
}
}
+void event_file_get(struct trace_event_file *file)
+{
+ atomic_inc(&file->ref);
+}
+
+void event_file_put(struct trace_event_file *file)
+{
+ if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
+ if (file->flags & EVENT_FILE_FL_FREED)
+ kmem_cache_free(file_cachep, file);
+ return;
+ }
+
+ if (atomic_dec_and_test(&file->ref)) {
+ /* Count should only go to zero when it is freed */
+ if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
+ return;
+ kmem_cache_free(file_cachep, file);
+ }
+}
+
static void remove_event_file_dir(struct trace_event_file *file)
{
struct dentry *dir = file->dir;
- struct dentry *child;
-
- if (dir) {
- spin_lock(&dir->d_lock); /* probably unneeded */
- list_for_each_entry(child, &dir->d_subdirs, d_child) {
- if (d_really_is_positive(child)) /* probably unneeded */
- d_inode(child)->i_private = NULL;
- }
- spin_unlock(&dir->d_lock);
+ if (dir)
tracefs_remove_recursive(dir);
- }
list_del(&file->list);
remove_subsystem(file->system);
@@ -1038,7 +1045,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
flags = file->flags;
mutex_unlock(&event_mutex);
- if (!file)
+ if (!file || flags & EVENT_FILE_FL_FREED)
return -ENODEV;
if (flags & EVENT_FILE_FL_ENABLED &&
@@ -1076,7 +1083,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = -ENODEV;
mutex_lock(&event_mutex);
file = event_file_data(filp);
- if (likely(file))
+ if (likely(file && !(file->flags & EVENT_FILE_FL_FREED)))
ret = ftrace_event_enable_disable(file, val);
mutex_unlock(&event_mutex);
break;
@@ -1345,7 +1352,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
file = event_file_data(filp);
- if (file)
+ if (file && !(file->flags & EVENT_FILE_FL_FREED))
print_event_filter(file, s);
mutex_unlock(&event_mutex);
@@ -1704,9 +1711,10 @@ static const struct file_operations ftrace_set_event_pid_fops = {
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_enable_read,
.write = event_enable_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
@@ -1723,9 +1731,10 @@ static const struct file_operations ftrace_event_id_fops = {
};
static const struct file_operations ftrace_event_filter_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_file_tr,
.read = event_filter_read,
.write = event_filter_write,
+ .release = tracing_release_file_tr,
.llseek = default_llseek,
};
@@ -2239,6 +2248,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
update_event_printk(call, map[i]);
}
}
+ cond_resched();
}
up_write(&trace_event_sem);
}
@@ -2266,6 +2276,7 @@ trace_create_new_event(struct trace_event_call *call,
atomic_set(&file->tm_ref, 0);
INIT_LIST_HEAD(&file->triggers);
list_add(&file->list, &tr->events);
+ event_file_get(file);
return file;
}
@@ -2350,7 +2361,10 @@ static int probe_remove_event_call(struct trace_event_call *call)
* TRACE_REG_UNREGISTER.
*/
if (file->flags & EVENT_FILE_FL_ENABLED)
- return -EBUSY;
+ goto busy;
+
+ if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
+ tr->clear_trace = true;
/*
* The do_for_each_event_file_safe() is
* a double loop. After finding the call for this
@@ -2363,6 +2377,12 @@ static int probe_remove_event_call(struct trace_event_call *call)
__trace_remove_event_call(call);
return 0;
+ busy:
+ /* No need to clear the trace now */
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ tr->clear_trace = false;
+ }
+ return -EBUSY;
}
/* Remove an event_call */
@@ -2430,7 +2450,7 @@ static void trace_module_remove_events(struct module *mod)
* over from this module may be passed to the new module events and
* unexpected results may occur.
*/
- tracing_reset_all_online_cpus();
+ tracing_reset_all_online_cpus_unlocked();
}
static int trace_module_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index bf44f6bbd0c3..bad8cf24837e 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1800,6 +1800,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
struct event_filter *filter = NULL;
int err;
+ if (file->flags & EVENT_FILE_FL_FREED)
+ return -ENODEV;
+
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(file);
filter = event_filter(file);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index b8f1f0eadd2e..1ede6d41ab8d 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -479,7 +479,7 @@ struct action_data {
* event param, and is passed to the synthetic event
* invocation.
*/
- unsigned int var_ref_idx[TRACING_MAP_VARS_MAX];
+ unsigned int var_ref_idx[SYNTH_FIELDS_MAX];
struct synth_event *synth_event;
bool use_trace_keyword;
char *synth_event_name;
@@ -1996,6 +1996,9 @@ static const char *hist_field_name(struct hist_field *field,
{
const char *field_name = "";
+ if (WARN_ON_ONCE(!field))
+ return field_name;
+
if (level > 1)
return field_name;
@@ -2552,6 +2555,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
hist_field->fn = hist_field_log2;
hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
+ if (!hist_field->operands[0])
+ goto free;
hist_field->size = hist_field->operands[0]->size;
hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
if (!hist_field->type)
@@ -2752,7 +2757,9 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
return ref_field;
}
}
-
+ /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
+ if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
+ return NULL;
ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
if (ref_field) {
if (init_var_ref(ref_field, var_field, system, event_name)) {
@@ -4014,6 +4021,7 @@ static int parse_action_params(struct trace_array *tr, char *params,
while (params) {
if (data->n_params >= SYNTH_FIELDS_MAX) {
hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
+ ret = -EINVAL;
goto out;
}
@@ -4338,6 +4346,10 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
lockdep_assert_held(&event_mutex);
+ /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
+ if (data->n_params > SYNTH_FIELDS_MAX)
+ return -EINVAL;
+
if (data->use_trace_keyword)
synth_event_name = data->synth_event_name;
else
@@ -6411,13 +6423,16 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
if (get_named_trigger_data(trigger_data))
goto enable;
- if (has_hist_vars(hist_data))
- save_hist_vars(hist_data);
-
ret = create_actions(hist_data);
if (ret)
goto out_unreg;
+ if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+ ret = save_hist_vars(hist_data);
+ if (ret)
+ goto out_unreg;
+ }
+
ret = tracing_map_init(hist_data->map);
if (ret)
goto out_unreg;
@@ -6433,7 +6448,7 @@ enable:
/* Just return zero, not the number of registered triggers */
ret = 0;
out:
- if (ret == 0)
+ if (ret == 0 && glob[0])
hist_err_clear();
return ret;
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 82580f7ffad9..634d120eab2b 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1140,8 +1140,10 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file)
{
- if (tracing_alloc_snapshot_instance(file->tr) != 0)
- return 0;
+ int ret = tracing_alloc_snapshot_instance(file->tr);
+
+ if (ret < 0)
+ return ret;
return register_trigger(glob, ops, data, file);
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a745b0cee5d3..07557904dab8 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -228,7 +228,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
-
+ else
+ iter->private = NULL;
}
static void irqsoff_trace_close(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index a422cf6a0358..0b95277396fc 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1134,9 +1134,10 @@ probe_mem_read_user(void *dest, void *src, size_t size)
/* Note that we don't verify it, since the code does not come from user space */
static int
-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
+process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
void *base)
{
+ struct pt_regs *regs = rec;
unsigned long val;
retry:
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a0a45901dc02..9ffe54ff3edb 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1291,11 +1291,12 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
{
struct print_entry *field;
struct trace_seq *s = &iter->seq;
+ int max = iter->ent_size - offsetof(struct print_entry, buf);
trace_assign_type(field, iter->ent);
seq_print_ip_sym(s, field->ip, flags);
- trace_seq_printf(s, ": %s", field->buf);
+ trace_seq_printf(s, ": %.*s", max, field->buf);
return trace_handle_return(s);
}
@@ -1304,10 +1305,11 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct print_entry *field;
+ int max = iter->ent_size - offsetof(struct print_entry, buf);
trace_assign_type(field, iter->ent);
- trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
+ trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf);
return trace_handle_return(&iter->seq);
}
@@ -1366,7 +1368,7 @@ static struct trace_event *events[] __initdata = {
NULL
};
-__init static int init_events(void)
+__init int init_events(void)
{
struct trace_event *event;
int i, ret;
@@ -1384,4 +1386,3 @@ __init static int init_events(void)
return 0;
}
-early_initcall(init_events);
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 26b06b09c9f6..e9645f829b94 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -56,14 +56,14 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
this_cpu_write(tracing_irq_cpu, 0);
}
- lockdep_hardirqs_on(CALLER_ADDR0);
+ lockdep_hardirqs_on(caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
- lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirqs_off(caller_addr);
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 23e85cb15134..476a685c6b6c 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -300,7 +300,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
}
} else
goto inval_var;
- } else if (strcmp(arg, "comm") == 0) {
+ } else if (strcmp(arg, "comm") == 0 || strcmp(arg, "COMM") == 0) {
code->op = FETCH_OP_COMM;
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
} else if (((flags & TPARG_FL_MASK) ==
@@ -595,7 +595,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
* Since $comm and immediate string can not be dereferred,
* we can find those by strcmp.
*/
- if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) {
+ if (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
+ strncmp(arg, "\\\"", 2) == 0) {
/* The type of $comm must be "string", and not an array. */
if (parg->count || (t && strcmp(t, "string")))
return -EINVAL;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index bab9e0dba9af..dc19d5d185d4 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -302,7 +302,7 @@ trace_probe_primary_from_call(struct trace_event_call *call)
{
struct trace_probe_event *tpe = trace_probe_event_from_call(call);
- return list_first_entry(&tpe->probes, struct trace_probe, list);
+ return list_first_entry_or_null(&tpe->probes, struct trace_probe, list);
}
static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp)
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
index e5282828f4a6..cf14a37dff8c 100644
--- a/kernel/trace/trace_probe_tmpl.h
+++ b/kernel/trace/trace_probe_tmpl.h
@@ -54,7 +54,7 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf)
* If dest is NULL, don't store result and return required dynamic data size.
*/
static int
-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs,
+process_fetch_insn(struct fetch_insn *code, void *rec,
void *dest, void *base);
static nokprobe_inline int fetch_store_strlen(unsigned long addr);
static nokprobe_inline int
@@ -143,6 +143,8 @@ stage3:
array:
/* the last stage: Loop on array */
if (code->op == FETCH_OP_LP_ARRAY) {
+ if (ret < 0)
+ ret = 0;
total += ret;
if (++i < code->param) {
code = s3;
@@ -188,7 +190,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
/* Store the value of each argument */
static nokprobe_inline void
-store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
+store_trace_args(void *data, struct trace_probe *tp, void *rec,
int header_size, int maxlen)
{
struct probe_arg *arg;
@@ -203,12 +205,14 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
/* Point the dynamic data area if needed */
if (unlikely(arg->dynamic))
*dl = make_data_loc(maxlen, dyndata - base);
- ret = process_fetch_insn(arg->code, regs, dl, base);
- if (unlikely(ret < 0 && arg->dynamic)) {
- *dl = make_data_loc(0, dyndata - base);
- } else {
- dyndata += ret;
- maxlen -= ret;
+ ret = process_fetch_insn(arg->code, rec, dl, base);
+ if (arg->dynamic) {
+ if (unlikely(ret < 0)) {
+ *dl = make_data_loc(0, dyndata - base);
+ } else {
+ dyndata += ret;
+ maxlen -= ret;
+ }
}
}
}
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 617e297f46dc..7b2d8f776ae2 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -171,6 +171,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
+ else
+ iter->private = NULL;
}
static void wakeup_trace_close(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index efb51a23a14f..e924c04af627 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -217,9 +217,10 @@ static unsigned long translate_user_vaddr(unsigned long file_offset)
/* Note that we don't verify it, since the code does not come from user space */
static int
-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
+process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
void *base)
{
+ struct pt_regs *regs = rec;
unsigned long val;
/* 1st stage: get value from context */
@@ -1401,7 +1402,7 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
const char **filename, u64 *probe_offset,
- bool perf_type_tracepoint)
+ u64 *probe_addr, bool perf_type_tracepoint)
{
const char *pevent = trace_event_name(event->tp_event);
const char *group = event->tp_event->class->system;
@@ -1418,6 +1419,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
: BPF_FD_TYPE_UPROBE;
*filename = tu->filename;
*probe_offset = tu->offset;
+ *probe_addr = 0;
return 0;
}
#endif /* CONFIG_PERF_EVENTS */
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 83c2a0598c64..33c463967bb3 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -574,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
}
memcpy(elt->key, key, map->key_size);
- entry->val = elt;
+ /*
+ * Ensure the initialization is visible and
+ * publish the elt.
+ */
+ smp_wmb();
+ WRITE_ONCE(entry->val, elt);
atomic64_inc(&map->hits);
return entry->val;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index cbd3cf503c90..a3d0e928305c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -568,7 +568,7 @@ int lockup_detector_offline_cpu(unsigned int cpu)
return 0;
}
-static void lockup_detector_reconfigure(void)
+static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
@@ -588,6 +588,13 @@ static void lockup_detector_reconfigure(void)
__lockup_detector_cleanup();
}
+void lockup_detector_reconfigure(void)
+{
+ mutex_lock(&watchdog_mutex);
+ __lockup_detector_reconfigure();
+ mutex_unlock(&watchdog_mutex);
+}
+
/*
* Create the watchdog thread infrastructure and configure the detector(s).
*
@@ -608,13 +615,13 @@ static __init void lockup_detector_setup(void)
return;
mutex_lock(&watchdog_mutex);
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
softlockup_initialized = true;
mutex_unlock(&watchdog_mutex);
}
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
-static void lockup_detector_reconfigure(void)
+static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
@@ -622,9 +629,13 @@ static void lockup_detector_reconfigure(void)
watchdog_nmi_start();
cpus_read_unlock();
}
+void lockup_detector_reconfigure(void)
+{
+ __lockup_detector_reconfigure();
+}
static inline void lockup_detector_setup(void)
{
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
}
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
@@ -664,7 +675,7 @@ static void proc_watchdog_update(void)
{
/* Remove impossible cpus to keep sysctl output clean. */
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
}
/*
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 247bf0b1582c..1e8a49dc956e 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -114,14 +114,14 @@ static void watchdog_overflow_callback(struct perf_event *event,
/* Ensure the watchdog never gets throttled */
event->hw.interrupts = 0;
+ if (!watchdog_check_timestamp())
+ return;
+
if (__this_cpu_read(watchdog_nmi_touch) == true) {
__this_cpu_write(watchdog_nmi_touch, false);
return;
}
- if (!watchdog_check_timestamp())
- return;
-
/* check for a hardlockup
* This is done by making sure our timer interrupt
* is incrementing. The timer interrupt should have
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e90f37e22202..a2c4de2a1132 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -684,12 +684,17 @@ static void clear_work_data(struct work_struct *work)
set_work_data(work, WORK_STRUCT_NO_POOL, 0);
}
+static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
+{
+ return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
+}
+
static struct pool_workqueue *get_work_pwq(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_PWQ)
- return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
+ return work_struct_pwq(data);
else
return NULL;
}
@@ -717,8 +722,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
assert_rcu_or_pool_mutex();
if (data & WORK_STRUCT_PWQ)
- return ((struct pool_workqueue *)
- (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
+ return work_struct_pwq(data)->pool;
pool_id = data >> WORK_OFFQ_POOL_SHIFT;
if (pool_id == WORK_OFFQ_POOL_NONE)
@@ -739,8 +743,7 @@ static int get_work_pool_id(struct work_struct *work)
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_PWQ)
- return ((struct pool_workqueue *)
- (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
+ return work_struct_pwq(data)->pool->id;
return data >> WORK_OFFQ_POOL_SHIFT;
}
@@ -3049,10 +3052,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
if (WARN_ON(!work->func))
return false;
- if (!from_cancel) {
- lock_map_acquire(&work->lockdep_map);
- lock_map_release(&work->lockdep_map);
- }
+ lock_map_acquire(&work->lockdep_map);
+ lock_map_release(&work->lockdep_map);
if (start_flush_work(work, &barr, from_cancel)) {
wait_for_completion(&barr.done);
@@ -5272,9 +5273,13 @@ static int workqueue_apply_unbound_cpumask(void)
list_for_each_entry(wq, &workqueues, list) {
if (!(wq->flags & WQ_UNBOUND))
continue;
+
/* creating multiple pwqs breaks ordering guarantee */
- if (wq->flags & __WQ_ORDERED)
- continue;
+ if (!list_empty(&wq->pwqs)) {
+ if (wq->flags & __WQ_ORDERED_EXPLICIT)
+ continue;
+ wq->flags &= ~__WQ_ORDERED;
+ }
ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
if (!ctx) {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dd7044aa1a12..6d79e7c3219c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -253,8 +253,10 @@ config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1280 if (!64BIT && PARISC)
- default 1024 if (!64BIT && !PARISC)
+ default 2048 if PARISC
+ default 1536 if (!64BIT && XTENSA)
+ default 1280 if KASAN && !64BIT
+ default 1024 if !64BIT
default 2048 if 64BIT
help
Tell gcc to warn at build time for stack frames larger than this.
@@ -1525,8 +1527,14 @@ config NETDEV_NOTIFIER_ERROR_INJECT
If unsure, say N.
config FUNCTION_ERROR_INJECTION
- def_bool y
+ bool "Fault-injections of functions"
depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+ help
+ Add fault injections into various functions that are annotated with
+ ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return
+ value of theses functions. This is useful to test error paths of code.
+
+ If unsure, say N
config FAULT_INJECTION
bool "Fault-injection framework"
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index 0d3a686b5ba2..fb8c0c5c2bd2 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -28,36 +28,16 @@ int __weak __clzsi2(int val)
}
EXPORT_SYMBOL(__clzsi2);
-int __weak __clzdi2(long val);
-int __weak __ctzdi2(long val);
-#if BITS_PER_LONG == 32
-
-int __weak __clzdi2(long val)
+int __weak __clzdi2(u64 val);
+int __weak __clzdi2(u64 val)
{
- return 32 - fls((int)val);
+ return 64 - fls64(val);
}
EXPORT_SYMBOL(__clzdi2);
-int __weak __ctzdi2(long val)
+int __weak __ctzdi2(u64 val);
+int __weak __ctzdi2(u64 val)
{
- return __ffs((u32)val);
+ return __ffs64(val);
}
EXPORT_SYMBOL(__ctzdi2);
-
-#elif BITS_PER_LONG == 64
-
-int __weak __clzdi2(long val)
-{
- return 64 - fls64((u64)val);
-}
-EXPORT_SYMBOL(__clzdi2);
-
-int __weak __ctzdi2(long val)
-{
- return __ffs64((u64)val);
-}
-EXPORT_SYMBOL(__ctzdi2);
-
-#else
-#error BITS_PER_LONG not 32 or 64
-#endif
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index 075f3788bbe4..51621fde0b48 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -232,7 +232,8 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
for (index = 0; index < rmap->used; index++) {
glue = rmap->obj[index];
- irq_set_affinity_notifier(glue->notify.irq, NULL);
+ if (glue)
+ irq_set_affinity_notifier(glue->notify.irq, NULL);
}
cpu_rmap_put(rmap);
@@ -267,6 +268,7 @@ static void irq_cpu_rmap_release(struct kref *ref)
struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref);
+ glue->rmap->obj[glue->index] = NULL;
cpu_rmap_put(glue->rmap);
kfree(glue);
}
@@ -297,6 +299,7 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
rc = irq_set_affinity_notifier(irq, &glue->notify);
if (rc) {
cpu_rmap_put(glue->rmap);
+ rmap->obj[glue->index] = NULL;
kfree(glue);
}
return rc;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 48054dbf1b51..b0e4301d7495 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -128,7 +128,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
static void fill_pool(void)
{
- gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
+ gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
struct debug_obj *obj;
unsigned long flags;
@@ -218,10 +218,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
return obj;
}
-/*
- * Allocate a new object. If the pool is empty, switch off the debugger.
- * Must be called with interrupts disabled.
- */
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
@@ -478,6 +474,15 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
struct debug_obj_descr *descr = obj->descr;
static int limit;
+ /*
+ * Don't report if lookup_object_or_alloc() by the current thread
+ * failed because lookup_object_or_alloc()/debug_objects_oom() by a
+ * concurrent thread turned off debug_objects_enabled and cleared
+ * the hash buckets.
+ */
+ if (!debug_objects_enabled)
+ return;
+
if (limit < 5 && descr != descr_test) {
void *hint = descr->debug_hint ?
descr->debug_hint(obj->object) : NULL;
@@ -528,31 +533,74 @@ static void debug_object_is_on_stack(void *addr, int onstack)
WARN_ON(1);
}
+static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
+ struct debug_obj_descr *descr,
+ bool onstack, bool alloc_ifstatic)
+{
+ struct debug_obj *obj = lookup_object(addr, b);
+ enum debug_obj_state state = ODEBUG_STATE_NONE;
+
+ if (likely(obj))
+ return obj;
+
+ /*
+ * debug_object_init() unconditionally allocates untracked
+ * objects. It does not matter whether it is a static object or
+ * not.
+ *
+ * debug_object_assert_init() and debug_object_activate() allow
+ * allocation only if the descriptor callback confirms that the
+ * object is static and considered initialized. For non-static
+ * objects the allocation needs to be done from the fixup callback.
+ */
+ if (unlikely(alloc_ifstatic)) {
+ if (!descr->is_static_object || !descr->is_static_object(addr))
+ return ERR_PTR(-ENOENT);
+ /* Statically allocated objects are considered initialized */
+ state = ODEBUG_STATE_INIT;
+ }
+
+ obj = alloc_object(addr, b, descr);
+ if (likely(obj)) {
+ obj->state = state;
+ debug_object_is_on_stack(addr, onstack);
+ return obj;
+ }
+
+ /* Out of memory. Do the cleanup outside of the locked region */
+ debug_objects_enabled = 0;
+ return NULL;
+}
+
+static void debug_objects_fill_pool(void)
+{
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context:
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ fill_pool();
+}
+
static void
__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
{
enum debug_obj_state state;
- bool check_stack = false;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+ debug_objects_fill_pool();
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (!obj) {
- obj = alloc_object(addr, db, descr);
- if (!obj) {
- debug_objects_enabled = 0;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_objects_oom();
- return;
- }
- check_stack = true;
+ obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
}
switch (obj->state) {
@@ -578,8 +626,6 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (check_stack)
- debug_object_is_on_stack(addr, onstack);
}
/**
@@ -619,24 +665,24 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
*/
int debug_object_activate(void *addr, struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
int ret;
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
if (!debug_objects_enabled)
return 0;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
- obj = lookup_object(addr, db);
- if (obj) {
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ if (likely(!IS_ERR_OR_NULL(obj))) {
bool print_object = false;
switch (obj->state) {
@@ -669,24 +715,16 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * We are here when a static object is activated. We
- * let the type specific code confirm whether this is
- * true or not. if true, we just make sure that the
- * static object is tracked in the object tracker. If
- * not, this must be a bug, so we try to fix it up.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* track this static object */
- debug_object_init(addr, descr);
- debug_object_activate(addr, descr);
- } else {
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
+ /* If NULL the allocation has hit OOM */
+ if (!obj) {
+ debug_objects_oom();
+ return 0;
}
- return 0;
+
+ /* Object is neither static nor tracked. It's not initialized */
+ debug_print_object(&o, "activate");
+ ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
+ return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -840,6 +878,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
*/
void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -847,34 +886,25 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
if (!debug_objects_enabled)
return;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (likely(!IS_ERR_OR_NULL(obj)))
+ return;
- obj = lookup_object(addr, db);
+ /* If NULL the allocation has hit OOM */
if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
- /*
- * Maybe the object is static, and we let the type specific
- * code confirm. Track this static object if true, else invoke
- * fixup.
- */
- if (descr->is_static_object && descr->is_static_object(addr)) {
- /* Track this static object */
- debug_object_init(addr, descr);
- } else {
- debug_print_object(&o, "assert_init");
- debug_object_fixup(descr->fixup_assert_init, addr,
- ODEBUG_STATE_NOTAVAILABLE);
- }
+ debug_objects_oom();
return;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
+ /* Object is neither tracked nor static. It's not initialized. */
+ debug_print_object(&o, "assert_init");
+ debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
}
EXPORT_SYMBOL_GPL(debug_object_assert_init);
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index 38045d6d0538..e89aaf07bde5 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -54,7 +54,7 @@ void dim_park_tired(struct dim *dim)
}
EXPORT_SYMBOL(dim_park_tired);
-void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats)
{
/* u32 holds up to 71 minutes, should be enough */
@@ -66,7 +66,7 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
start->comp_ctr);
if (!delta_us)
- return;
+ return false;
curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
@@ -79,5 +79,6 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
else
curr_stats->cpe_ratio = 0;
+ return true;
}
EXPORT_SYMBOL(dim_calc_stats);
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index dae3b51ac3d9..0e4f3a686f1d 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -227,7 +227,8 @@ void net_dim(struct dim *dim, struct dim_sample end_sample)
dim->start_sample.event_ctr);
if (nevents < DIM_NEVENTS)
break;
- dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
+ if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats))
+ break;
if (net_dim_decision(&curr_stats, dim)) {
dim->state = DIM_APPLY_NEW_PROFILE;
schedule_work(&dim->work);
diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
index f7e26c7b4749..d32c8b105adc 100644
--- a/lib/dim/rdma_dim.c
+++ b/lib/dim/rdma_dim.c
@@ -88,7 +88,8 @@ void rdma_dim(struct dim *dim, u64 completions)
nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
if (nevents < DIM_NEVENTS)
break;
- dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats);
+ if (!dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats))
+ break;
if (rdma_dim_decision(&curr_stats, dim)) {
dim->state = DIM_APPLY_NEW_PROFILE;
schedule_work(&dim->work);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index ccf05719b1ad..1ce2fa9dc5ad 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -329,10 +329,6 @@ static int ddebug_parse_query(char *words[], int nwords,
}
memset(query, 0, sizeof(*query));
- if (modname)
- /* support $modname.dyndbg=<multiple queries> */
- query->module = modname;
-
for (i = 0; i < nwords; i += 2) {
if (!strcmp(words[i], "func")) {
rc = check_set(&query->function, words[i+1], "func");
@@ -381,6 +377,13 @@ static int ddebug_parse_query(char *words[], int nwords,
if (rc)
return rc;
}
+ if (!query->module && modname)
+ /*
+ * support $modname.dyndbg=<multiple queries>, when
+ * not given in the query itself
+ */
+ query->module = modname;
+
vpr_info_dq(query, "parsed");
return 0;
}
@@ -1012,7 +1015,7 @@ static int __init dynamic_debug_init(void)
int n = 0, entries = 0, modct = 0;
int verbose_bytes = 0;
- if (__start___verbose == __stop___verbose) {
+ if (&__start___verbose == &__stop___verbose) {
pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
return 1;
}
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index e7258d8c252b..4da9707ad33d 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -132,8 +132,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
if (res > 20)
c += 20 - res;
- if ((font_w & (1 << (f->width - 1))) &&
- (font_h & (1 << (f->height - 1))))
+ if ((font_w & (1U << (f->width - 1))) &&
+ (font_h & (1U << (f->height - 1))))
c += 1000;
if (c > cc) {
diff --git a/lib/idr.c b/lib/idr.c
index b2bc190431dd..a90bd348ba03 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @nextid and @end. If
+ * Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will
@@ -506,7 +506,7 @@ void ida_free(struct ida *ida, unsigned int id)
goto delete;
xas_store(&xas, xa_mk_value(v));
} else {
- if (!test_bit(bit, bitmap->bitmap))
+ if (!bitmap || !test_bit(bit, bitmap->bitmap))
goto err;
__clear_bit(bit, bitmap->bitmap);
xas_set_mark(&xas, XA_FREE_MARK);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 9d3bda3d49fe..5c6a0b8a2adb 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -455,20 +455,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_init);
-static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
-{
- char *from = kmap_atomic(page);
- memcpy(to, from + offset, len);
- kunmap_atomic(from);
-}
-
-static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
-{
- char *to = kmap_atomic(page);
- memcpy(to + offset, from, len);
- kunmap_atomic(to);
-}
-
static void memzero_page(struct page *page, size_t offset, size_t len)
{
char *addr = kmap_atomic(page);
diff --git a/lib/kobject.c b/lib/kobject.c
index 0c6d17503a11..6666c48f125c 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -144,7 +144,7 @@ static int get_kobj_path_length(struct kobject *kobj)
return length;
}
-static void fill_kobj_path(struct kobject *kobj, char *path, int length)
+static int fill_kobj_path(struct kobject *kobj, char *path, int length)
{
struct kobject *parent;
@@ -153,12 +153,16 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
+ if (length <= 0)
+ return -EINVAL;
memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
kobj, __func__, path);
+
+ return 0;
}
/**
@@ -173,13 +177,17 @@ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
char *path;
int len;
+retry:
len = get_kobj_path_length(kobj);
if (len == 0)
return NULL;
path = kzalloc(len, gfp_mask);
if (!path)
return NULL;
- fill_kobj_path(kobj, path, len);
+ if (fill_kobj_path(kobj, path, len)) {
+ kfree(path);
+ goto retry;
+ }
return path;
}
@@ -877,6 +885,11 @@ int kset_register(struct kset *k)
if (!k)
return -EINVAL;
+ if (!k->kobj.ktype) {
+ pr_err("must have a ktype to be initialized properly!\n");
+ return -EINVAL;
+ }
+
kset_init(k);
err = kobject_add_internal(&k->kobj);
if (err)
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 5d5424b51b74..413daa72a3d8 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,7 +20,11 @@
bool __list_add_valid(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
- if (CHECK_DATA_CORRUPTION(next->prev != prev,
+ if (CHECK_DATA_CORRUPTION(prev == NULL,
+ "list_add corruption. prev is NULL.\n") ||
+ CHECK_DATA_CORRUPTION(next == NULL,
+ "list_add corruption. next is NULL.\n") ||
+ CHECK_DATA_CORRUPTION(next->prev != prev,
"list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
CHECK_DATA_CORRUPTION(prev->next != next,
@@ -42,7 +46,11 @@ bool __list_del_entry_valid(struct list_head *entry)
prev = entry->prev;
next = entry->next;
- if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ if (CHECK_DATA_CORRUPTION(next == NULL,
+ "list_del corruption, %px->next is NULL\n", entry) ||
+ CHECK_DATA_CORRUPTION(prev == NULL,
+ "list_del corruption, %px->prev is NULL\n", entry) ||
+ CHECK_DATA_CORRUPTION(next == LIST_POISON1,
"list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
diff --git a/lib/lockref.c b/lib/lockref.c
index 5b34bbd3eba8..81ac5f355242 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -24,7 +24,6 @@
} \
if (!--retry) \
break; \
- cpu_relax(); \
} \
} while (0)
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
index d25e9e96c310..ceaebe181cd7 100644
--- a/lib/mpi/mpi-cmp.c
+++ b/lib/mpi/mpi-cmp.c
@@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v)
mpi_limb_t limb = v;
mpi_normalize(u);
- if (!u->nlimbs && !limb)
- return 0;
+ if (u->nlimbs == 0) {
+ if (v == 0)
+ return 0;
+ else
+ return -1;
+ }
if (u->sign)
return -1;
if (u->nlimbs > 1)
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index eead4b339466..4f73db248009 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -397,7 +397,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
while (sg_miter_next(&miter)) {
buff = miter.addr;
- len = miter.length;
+ len = min_t(unsigned, miter.length, nbytes);
+ nbytes -= len;
for (x = 0; x < len; x++) {
a <<= 8;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 0d84f79cb4b5..b5ce5e46c06e 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
+#include <linux/nospec.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -169,6 +170,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
if (type <= 0 || type > maxtype)
return 0;
+ type = array_index_nospec(type, maxtype + 1);
pt = &policy[type];
BUG_ON(pt->type > NLA_TYPE_MAX);
@@ -377,6 +379,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
}
continue;
}
+ type = array_index_nospec(type, maxtype + 1);
if (policy) {
int err = validate_nla(nla, maxtype, policy,
validate, extack);
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 21016b32d313..2b24ea6c9497 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
+DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
"%lld\n");
static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
diff --git a/lib/once.c b/lib/once.c
index 59149bf3bfb4..351f66aad310 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -66,3 +66,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
once_disable_jump(once_key, mod);
}
EXPORT_SYMBOL(__do_once_done);
+
+static DEFINE_MUTEX(once_mutex);
+
+bool __do_once_slow_start(bool *done)
+ __acquires(once_mutex)
+{
+ mutex_lock(&once_mutex);
+ if (*done) {
+ mutex_unlock(&once_mutex);
+ /* Keep sparse happy by restoring an even lock count on
+ * this mutex. In case we return here, we don't call into
+ * __do_once_done but return early in the DO_ONCE_SLOW() macro.
+ */
+ __acquire(once_mutex);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__do_once_slow_start);
+
+void __do_once_slow_done(bool *done, struct static_key_true *once_key,
+ struct module *mod)
+ __releases(once_mutex)
+{
+ *done = true;
+ mutex_unlock(&once_mutex);
+ once_disable_jump(once_key, mod);
+}
+EXPORT_SYMBOL(__do_once_slow_done);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c8fa1d274530..4121aab98b06 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1039,7 +1039,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
- int uninitialized_var(offset);
+ int offset;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
@@ -1144,7 +1144,6 @@ static void set_iter_tags(struct radix_tree_iter *iter,
void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{
- slot++;
iter->index = __radix_tree_iter_add(iter, 1);
iter->next_index = iter->index;
iter->tags = 0;
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index e01a93f46f83..ce945c17980b 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -26,10 +26,16 @@
*/
int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
+ /* Paired with WRITE_ONCE() in .proc_handler().
+ * Changing two values seperately could be inconsistent
+ * and some message could be lost. (See: net_ratelimit_state).
+ */
+ int interval = READ_ONCE(rs->interval);
+ int burst = READ_ONCE(rs->burst);
unsigned long flags;
int ret;
- if (!rs->interval)
+ if (!interval)
return 1;
/*
@@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (!rs->begin)
rs->begin = jiffies;
- if (time_is_before_jiffies(rs->begin + rs->interval)) {
+ if (time_is_before_jiffies(rs->begin + interval)) {
if (rs->missed) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
printk_deferred(KERN_WARNING
@@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->begin = jiffies;
rs->printed = 0;
}
- if (rs->burst && rs->burst > rs->printed) {
+ if (burst && burst > rs->printed) {
rs->printed++;
ret = 1;
} else {
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
index 4c40580a99a3..f247089d63c0 100644
--- a/lib/test_blackhole_dev.c
+++ b/lib/test_blackhole_dev.c
@@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void)
{
struct ipv6hdr *ip6h;
struct sk_buff *skb;
- struct ethhdr *ethh;
struct udphdr *uh;
int data_len;
int ret;
@@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void)
ip6h->saddr = in6addr_loopback;
ip6h->daddr = in6addr_loopback;
/* Ether */
- ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
+ skb_push(skb, sizeof(struct ethhdr));
skb_set_mac_header(skb, 0);
skb->protocol = htons(ETH_P_IPV6);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 251213c872b5..dd3850ec1dfa 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -38,6 +38,7 @@ struct test_batched_req {
bool sent;
const struct firmware *fw;
const char *name;
+ const char *fw_buf;
struct completion completion;
struct task_struct *task;
struct device *dev;
@@ -134,8 +135,14 @@ static void __test_release_all_firmware(void)
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
- if (req->fw)
+ if (req->fw) {
+ if (req->fw_buf) {
+ kfree_const(req->fw_buf);
+ req->fw_buf = NULL;
+ }
release_firmware(req->fw);
+ req->fw = NULL;
+ }
}
vfree(test_fw_config->reqs);
@@ -166,7 +173,7 @@ static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
{
*dst = kstrndup(name, count, gfp);
if (!*dst)
- return -ENOSPC;
+ return -ENOMEM;
return count;
}
@@ -294,16 +301,26 @@ static ssize_t config_test_show_str(char *dst,
return len;
}
-static int test_dev_config_update_bool(const char *buf, size_t size,
- bool *cfg)
+static inline int __test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
{
int ret;
- mutex_lock(&test_fw_mutex);
if (strtobool(buf, cfg) < 0)
ret = -EINVAL;
else
ret = size;
+
+ return ret;
+}
+
+static int test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ ret = __test_dev_config_update_bool(buf, size, cfg);
mutex_unlock(&test_fw_mutex);
return ret;
@@ -333,7 +350,7 @@ static ssize_t test_dev_config_show_int(char *buf, int cfg)
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+static inline int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
{
int ret;
long new;
@@ -345,14 +362,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
if (new > U8_MAX)
return -EINVAL;
- mutex_lock(&test_fw_mutex);
*(u8 *)cfg = new;
- mutex_unlock(&test_fw_mutex);
/* Always return full write size even if we didn't consume all */
return size;
}
+static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ ret = __test_dev_config_update_u8(buf, size, cfg);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
{
u8 val;
@@ -385,10 +411,10 @@ static ssize_t config_num_requests_store(struct device *dev,
mutex_unlock(&test_fw_mutex);
goto out;
}
- mutex_unlock(&test_fw_mutex);
- rc = test_dev_config_update_u8(buf, count,
- &test_fw_config->num_requests);
+ rc = __test_dev_config_update_u8(buf, count,
+ &test_fw_config->num_requests);
+ mutex_unlock(&test_fw_mutex);
out:
return rc;
@@ -483,12 +509,14 @@ static ssize_t trigger_request_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s'\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
test_firmware = NULL;
rc = request_firmware(&test_firmware, name, dev);
if (rc) {
@@ -524,13 +552,15 @@ static ssize_t trigger_async_request_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s'\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
test_firmware = NULL;
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
NULL, trigger_async_request_cb);
if (rc) {
@@ -567,12 +597,14 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
- return -ENOSPC;
+ return -ENOMEM;
pr_info("loading '%s' using custom fallback mechanism\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
test_firmware = NULL;
rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
dev, GFP_KERNEL, NULL,
@@ -616,7 +648,7 @@ static int test_fw_run_batch_request(void *data)
test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
if (!test_buf)
- return -ENOSPC;
+ return -ENOMEM;
req->rc = request_firmware_into_buf(&req->fw,
req->name,
@@ -625,6 +657,8 @@ static int test_fw_run_batch_request(void *data)
TEST_FIRMWARE_BUF_SIZE);
if (!req->fw)
kfree(test_buf);
+ else
+ req->fw_buf = test_buf;
} else {
req->rc = test_fw_config->req_firmware(&req->fw,
req->name,
@@ -664,6 +698,11 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ rc = -EBUSY;
+ goto out_bail;
+ }
+
test_fw_config->reqs =
vzalloc(array3_size(sizeof(struct test_batched_req),
test_fw_config->num_requests, 2));
@@ -680,6 +719,7 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
req->fw = NULL;
req->idx = i;
req->name = test_fw_config->name;
+ req->fw_buf = NULL;
req->dev = dev;
init_completion(&req->completion);
req->task = kthread_run(test_fw_run_batch_request, req,
@@ -762,6 +802,11 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ rc = -EBUSY;
+ goto out_bail;
+ }
+
test_fw_config->reqs =
vzalloc(array3_size(sizeof(struct test_batched_req),
test_fw_config->num_requests, 2));
@@ -779,6 +824,7 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
req->name = test_fw_config->name;
+ req->fw_buf = NULL;
req->fw = NULL;
req->idx = i;
init_completion(&req->completion);
@@ -940,6 +986,7 @@ static int __init test_firmware_init(void)
rc = misc_register(&test_fw_misc_device);
if (rc) {
+ __test_firmware_config_free();
kfree(test_fw_config);
pr_err("could not register misc device: %d\n", rc);
return rc;
diff --git a/lib/test_ida.c b/lib/test_ida.c
index b06880625961..55105baa19da 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida)
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
+/*
+ * Check various situations where we attempt to free an ID we don't own.
+ */
+static void ida_check_bad_free(struct ida *ida)
+{
+ unsigned long i;
+
+ printk("vvv Ignore \"not allocated\" warnings\n");
+ /* IDA is empty; all of these will fail */
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single value entry */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single bitmap */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a tree */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+ printk("^^^ \"not allocated\" warnings over\n");
+
+ ida_free(ida, 3);
+ ida_free(ida, 1023);
+ ida_free(ida, (1 << 20) - 1);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
static DEFINE_IDA(ida);
static int ida_checks(void)
@@ -162,6 +201,7 @@ static int ida_checks(void)
ida_check_leaf(&ida, 1024 * 64);
ida_check_max(&ida);
ida_check_conv(&ida);
+ ida_check_bad_free(&ida);
printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run != tests_passed) ? 0 : -EINVAL;
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index ab00c79423a5..470f8260d4e0 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -86,7 +86,7 @@ static int __init test_pages(int *total_failures)
int failures = 0, num_tests = 0;
int i;
- for (i = 0; i < 10; i++)
+ for (i = 0; i < MAX_ORDER; i++)
num_tests += do_alloc_pages_order(i, &failures);
REPORT_FAILURES_IN_FN();
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index b352903c50e3..0a22ae48af61 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -60,10 +60,12 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
- int shift = bm->patlen - 1, bs;
+ int bs;
const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
+ int shift = bm->patlen - 1;
+
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
diff --git a/lib/usercopy.c b/lib/usercopy.c
index cbb4d9ec00f2..82881e0bcaa1 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/uaccess.h>
#include <linux/bitops.h>
+#include <linux/nospec.h>
/* out-of-line parts */
@@ -10,6 +11,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
+ /*
+ * Ensure that bad access_ok() speculation will not
+ * lead to nasty side effects *after* the copy is
+ * finished:
+ */
+ barrier_nospec();
kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
}
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
index c415a685d61b..e814061d6aa0 100644
--- a/lib/vdso/Makefile
+++ b/lib/vdso/Makefile
@@ -17,6 +17,6 @@ $(error ARCH_REL_TYPE_ABS is not set)
endif
quiet_cmd_vdso_check = VDSOCHK $@
- cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \
+ cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \
then (echo >&2 "$@: dynamic relocations are not supported"; \
rm -f $@; /bin/false); fi
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 45f57fd2db64..5667fb746a1f 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -38,7 +38,7 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
}
#endif
-static int do_hres(const struct vdso_data *vd, clockid_t clk,
+static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
@@ -68,8 +68,8 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
return 0;
}
-static void do_coarse(const struct vdso_data *vd, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
+ struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
u32 seq;
@@ -79,6 +79,8 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
ts->tv_sec = vdso_ts->sec;
ts->tv_nsec = vdso_ts->nsec;
} while (unlikely(vdso_read_retry(vd, seq)));
+
+ return 0;
}
static __maybe_unused int
@@ -96,15 +98,16 @@ __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
* clocks are handled in the VDSO directly.
*/
msk = 1U << clock;
- if (likely(msk & VDSO_HRES)) {
- return do_hres(&vd[CS_HRES_COARSE], clock, ts);
- } else if (msk & VDSO_COARSE) {
- do_coarse(&vd[CS_HRES_COARSE], clock, ts);
- return 0;
- } else if (msk & VDSO_RAW) {
- return do_hres(&vd[CS_RAW], clock, ts);
- }
- return -1;
+ if (likely(msk & VDSO_HRES))
+ vd = &vd[CS_HRES_COARSE];
+ else if (msk & VDSO_COARSE)
+ return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
+ else if (msk & VDSO_RAW)
+ vd = &vd[CS_RAW];
+ else
+ return -1;
+
+ return do_hres(vd, clock, ts);
}
static __maybe_unused int
diff --git a/mm/cma.c b/mm/cma.c
index 7de520c0a1db..a9635a560094 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -481,7 +481,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
*/
if (page) {
for (i = 0; i < count; i++)
- page_kasan_tag_reset(page + i);
+ page_kasan_tag_reset(nth_page(page, i));
}
if (ret && !no_warn) {
diff --git a/mm/compaction.c b/mm/compaction.c
index 0758afd6325d..7a2675dbf3cc 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1219,7 +1219,7 @@ move_freelist_tail(struct list_head *freelist, struct page *freepage)
}
static void
-fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
+fast_isolate_around(struct compact_control *cc, unsigned long pfn)
{
unsigned long start_pfn, end_pfn;
struct page *page = pfn_to_page(pfn);
@@ -1236,21 +1236,13 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
start_pfn = pageblock_start_pfn(pfn);
end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
- /* Scan before */
- if (start_pfn != pfn) {
- isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
- if (cc->nr_freepages >= cc->nr_migratepages)
- return;
- }
-
- /* Scan after */
- start_pfn = pfn + nr_isolated;
- if (start_pfn < end_pfn)
- isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
+ isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
/* Skip this pageblock in the future as it's full or nearly full */
if (cc->nr_freepages < cc->nr_migratepages)
set_pageblock_skip(page);
+
+ return;
}
/* Search orders in round-robin fashion */
@@ -1422,7 +1414,7 @@ fast_isolate_freepages(struct compact_control *cc)
return cc->free_pfn;
low_pfn = page_to_pfn(page);
- fast_isolate_around(cc, low_pfn, nr_isolated);
+ fast_isolate_around(cc, low_pfn);
return low_pfn;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index c89e9be5c6bd..7ec4fcd8a34d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1044,8 +1044,43 @@ struct wait_page_queue {
wait_queue_entry_t wait;
};
+/*
+ * The page wait code treats the "wait->flags" somewhat unusually, because
+ * we have multiple different kinds of waits, not just he usual "exclusive"
+ * one.
+ *
+ * We have:
+ *
+ * (a) no special bits set:
+ *
+ * We're just waiting for the bit to be released, and when a waker
+ * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
+ * and remove it from the wait queue.
+ *
+ * Simple and straightforward.
+ *
+ * (b) WQ_FLAG_EXCLUSIVE:
+ *
+ * The waiter is waiting to get the lock, and only one waiter should
+ * be woken up to avoid any thundering herd behavior. We'll set the
+ * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
+ *
+ * This is the traditional exclusive wait.
+ *
+ * (b) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
+ *
+ * The waiter is waiting to get the bit, and additionally wants the
+ * lock to be transferred to it for fair lock behavior. If the lock
+ * cannot be taken, we stop walking the wait queue without waking
+ * the waiter.
+ *
+ * This is the "fair lock handoff" case, and in addition to setting
+ * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
+ * that it now has the lock.
+ */
static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
{
+ unsigned int flags;
struct wait_page_key *key = arg;
struct wait_page_queue *wait_page
= container_of(wait, struct wait_page_queue, wait);
@@ -1058,17 +1093,44 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
return 0;
/*
- * Stop walking if it's locked.
- * Is this safe if put_and_wait_on_page_locked() is in use?
- * Yes: the waker must hold a reference to this page, and if PG_locked
- * has now already been set by another task, that task must also hold
- * a reference to the *same usage* of this page; so there is no need
- * to walk on to wake even the put_and_wait_on_page_locked() callers.
+ * If it's a lock handoff wait, we get the bit for it, and
+ * stop walking (and do not wake it up) if we can't.
*/
- if (test_bit(key->bit_nr, &key->page->flags))
- return -1;
+ flags = wait->flags;
+ if (flags & WQ_FLAG_EXCLUSIVE) {
+ if (test_bit(key->bit_nr, &key->page->flags))
+ return -1;
+ if (flags & WQ_FLAG_CUSTOM) {
+ if (test_and_set_bit(key->bit_nr, &key->page->flags))
+ return -1;
+ flags |= WQ_FLAG_DONE;
+ }
+ }
- return autoremove_wake_function(wait, mode, sync, key);
+ /*
+ * We are holding the wait-queue lock, but the waiter that
+ * is waiting for this will be checking the flags without
+ * any locking.
+ *
+ * So update the flags atomically, and wake up the waiter
+ * afterwards to avoid any races. This store-release pairs
+ * with the load-acquire in wait_on_page_bit_common().
+ */
+ smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
+ wake_up_state(wait->private, mode);
+
+ /*
+ * Ok, we have successfully done what we're waiting for,
+ * and we can unconditionally remove the wait entry.
+ *
+ * Note that this pairs with the "finish_wait()" in the
+ * waiter, and has to be the absolute last thing we do.
+ * After this list_del_init(&wait->entry) the wait entry
+ * might be de-allocated and the process might even have
+ * exited.
+ */
+ list_del_init_careful(&wait->entry);
+ return (flags & WQ_FLAG_EXCLUSIVE) != 0;
}
static void wake_up_page_bit(struct page *page, int bit_nr)
@@ -1147,16 +1209,35 @@ enum behavior {
*/
};
+/*
+ * Attempt to check (or get) the page bit, and mark us done
+ * if successful.
+ */
+static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
+ struct wait_queue_entry *wait)
+{
+ if (wait->flags & WQ_FLAG_EXCLUSIVE) {
+ if (test_and_set_bit(bit_nr, &page->flags))
+ return false;
+ } else if (test_bit(bit_nr, &page->flags))
+ return false;
+
+ wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
+ return true;
+}
+
+/* How many times do we accept lock stealing from under a waiter? */
+int sysctl_page_lock_unfairness = 5;
+
static inline int wait_on_page_bit_common(wait_queue_head_t *q,
struct page *page, int bit_nr, int state, enum behavior behavior)
{
+ int unfairness = sysctl_page_lock_unfairness;
struct wait_page_queue wait_page;
wait_queue_entry_t *wait = &wait_page.wait;
- bool bit_is_set;
bool thrashing = false;
bool delayacct = false;
unsigned long pflags;
- int ret = 0;
if (bit_nr == PG_locked &&
!PageUptodate(page) && PageWorkingset(page)) {
@@ -1169,55 +1250,97 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
}
init_wait(wait);
- wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0;
wait->func = wake_page_function;
wait_page.page = page;
wait_page.bit_nr = bit_nr;
- for (;;) {
- spin_lock_irq(&q->lock);
-
- if (likely(list_empty(&wait->entry))) {
- __add_wait_queue_entry_tail(q, wait);
- SetPageWaiters(page);
- }
+repeat:
+ wait->flags = 0;
+ if (behavior == EXCLUSIVE) {
+ wait->flags = WQ_FLAG_EXCLUSIVE;
+ if (--unfairness < 0)
+ wait->flags |= WQ_FLAG_CUSTOM;
+ }
- set_current_state(state);
+ /*
+ * Do one last check whether we can get the
+ * page bit synchronously.
+ *
+ * Do the SetPageWaiters() marking before that
+ * to let any waker we _just_ missed know they
+ * need to wake us up (otherwise they'll never
+ * even go to the slow case that looks at the
+ * page queue), and add ourselves to the wait
+ * queue if we need to sleep.
+ *
+ * This part needs to be done under the queue
+ * lock to avoid races.
+ */
+ spin_lock_irq(&q->lock);
+ SetPageWaiters(page);
+ if (!trylock_page_bit_common(page, bit_nr, wait))
+ __add_wait_queue_entry_tail(q, wait);
+ spin_unlock_irq(&q->lock);
- spin_unlock_irq(&q->lock);
+ /*
+ * From now on, all the logic will be based on
+ * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
+ * see whether the page bit testing has already
+ * been done by the wake function.
+ *
+ * We can drop our reference to the page.
+ */
+ if (behavior == DROP)
+ put_page(page);
- bit_is_set = test_bit(bit_nr, &page->flags);
- if (behavior == DROP)
- put_page(page);
+ /*
+ * Note that until the "finish_wait()", or until
+ * we see the WQ_FLAG_WOKEN flag, we need to
+ * be very careful with the 'wait->flags', because
+ * we may race with a waker that sets them.
+ */
+ for (;;) {
+ unsigned int flags;
- if (likely(bit_is_set))
- io_schedule();
+ set_current_state(state);
- if (behavior == EXCLUSIVE) {
- if (!test_and_set_bit_lock(bit_nr, &page->flags))
- break;
- } else if (behavior == SHARED) {
- if (!test_bit(bit_nr, &page->flags))
+ /* Loop until we've been woken or interrupted */
+ flags = smp_load_acquire(&wait->flags);
+ if (!(flags & WQ_FLAG_WOKEN)) {
+ if (signal_pending_state(state, current))
break;
+
+ io_schedule();
+ continue;
}
- if (signal_pending_state(state, current)) {
- ret = -EINTR;
+ /* If we were non-exclusive, we're done */
+ if (behavior != EXCLUSIVE)
break;
- }
- if (behavior == DROP) {
- /*
- * We can no longer safely access page->flags:
- * even if CONFIG_MEMORY_HOTREMOVE is not enabled,
- * there is a risk of waiting forever on a page reused
- * for something that keeps it locked indefinitely.
- * But best check for -EINTR above before breaking.
- */
+ /* If the waker got the lock for us, we're done */
+ if (flags & WQ_FLAG_DONE)
break;
- }
+
+ /*
+ * Otherwise, if we're getting the lock, we need to
+ * try to get it ourselves.
+ *
+ * And if that fails, we'll have to retry this all.
+ */
+ if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
+ goto repeat;
+
+ wait->flags |= WQ_FLAG_DONE;
+ break;
}
+ /*
+ * If a signal happened, this 'finish_wait()' may remove the last
+ * waiter from the wait-queues, but the PageWaiters bit will remain
+ * set. That's ok. The next wakeup will take care of it, and trying
+ * to do it here would be difficult and prone to races.
+ */
finish_wait(q, wait);
if (thrashing) {
@@ -1227,14 +1350,22 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
}
/*
- * A signal could leave PageWaiters set. Clearing it here if
- * !waitqueue_active would be possible (by open-coding finish_wait),
- * but still fail to catch it in the case of wait hash collision. We
- * already can fail to clear wait hash collision cases, so don't
- * bother with signals either.
+ * NOTE! The wait->flags weren't stable until we've done the
+ * 'finish_wait()', and we could have exited the loop above due
+ * to a signal, and had a wakeup event happen after the signal
+ * test but before the 'finish_wait()'.
+ *
+ * So only after the finish_wait() can we reliably determine
+ * if we got woken up or not, so we can now figure out the final
+ * return value based on that state without races.
+ *
+ * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
+ * waiter, but an exclusive one requires WQ_FLAG_DONE.
*/
+ if (behavior == EXCLUSIVE)
+ return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
- return ret;
+ return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
}
void wait_on_page_bit(struct page *page, int bit_nr)
@@ -1355,11 +1486,19 @@ void end_page_writeback(struct page *page)
rotate_reclaimable_page(page);
}
+ /*
+ * Writeback does not hold a page reference of its own, relying
+ * on truncation to wait for the clearing of PG_writeback.
+ * But here we must make sure that the page is not freed and
+ * reused before the wake_up_page().
+ */
+ get_page(page);
if (!test_clear_page_writeback(page))
BUG();
smp_mb__after_atomic();
wake_up_page(page, PG_writeback);
+ put_page(page);
}
EXPORT_SYMBOL(end_page_writeback);
@@ -3281,7 +3420,7 @@ ssize_t generic_perform_write(struct file *file,
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
- void *fsdata;
+ void *fsdata = NULL;
offset = (pos & (PAGE_SIZE - 1));
bytes = min_t(unsigned long, PAGE_SIZE - offset,
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index c431ca81dad5..5779c1bc6f44 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -37,7 +37,6 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int ret = 0;
- int err;
int locked;
if (nr_frames == 0)
@@ -74,32 +73,14 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
vec->is_pfns = false;
ret = get_user_pages_locked(start, nr_frames,
gup_flags, (struct page **)(vec->ptrs), &locked);
- goto out;
+ if (likely(ret > 0))
+ goto out;
}
- vec->got_ref = false;
- vec->is_pfns = true;
- do {
- unsigned long *nums = frame_vector_pfns(vec);
-
- while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
- err = follow_pfn(vma, start, &nums[ret]);
- if (err) {
- if (ret == 0)
- ret = err;
- goto out;
- }
- start += PAGE_SIZE;
- ret++;
- }
- /*
- * We stop if we have enough pages or if VMA doesn't completely
- * cover the tail page.
- */
- if (ret >= nr_frames || start < vma->vm_end)
- break;
- vma = find_vma_intersection(mm, start, start + 1);
- } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
+ /* This used to (racily) return non-refcounted pfns. Let people know */
+ WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping");
+ vec->nr_frames = 0;
+
out:
if (locked)
up_read(&mm->mmap_sem);
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 60bb20e8a951..fd3337702513 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -446,7 +446,7 @@ static int __frontswap_shrink(unsigned long target_pages,
void frontswap_shrink(unsigned long target_pages)
{
unsigned long pages_to_unuse = 0;
- int uninitialized_var(type), ret;
+ int type, ret;
/*
* we don't want to hold swap_lock while doing a very
diff --git a/mm/gup.c b/mm/gup.c
index 3ef769529548..2cff8b4d412c 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -188,6 +188,17 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
spinlock_t *ptl;
pte_t *ptep, pte;
+ /*
+ * Considering PTE level hugetlb, like continuous-PTE hugetlb on
+ * ARM64 architecture.
+ */
+ if (is_vm_hugetlb_page(vma)) {
+ page = follow_huge_pmd_pte(vma, address, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
@@ -333,7 +344,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
- page = follow_huge_pmd(mm, address, pmd, flags);
+ page = follow_huge_pmd_pte(vma, address, flags);
if (page)
return page;
return no_page_table(vma, flags);
@@ -2240,7 +2251,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
- if (unlikely(pud_huge(pud))) {
+ if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, flags,
pages, nr))
return 0;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e50799d7002e..03b57323c53b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2912,6 +2912,9 @@ void deferred_split_huge_page(struct page *page)
if (PageSwapCache(page))
return;
+ if (!list_empty(page_deferred_list(page)))
+ return;
+
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (list_empty(page_deferred_list(page))) {
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b6f029a1059f..e4478b62d0f7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2218,11 +2218,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
if (!page)
goto out_uncharge_cgroup;
+ spin_lock(&hugetlb_lock);
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
SetPagePrivate(page);
h->resv_huge_pages--;
}
- spin_lock(&hugetlb_lock);
list_move(&page->lru, &h->hugepage_activelist);
/* Fall through */
}
@@ -5157,30 +5157,30 @@ follow_huge_pd(struct vm_area_struct *vma,
}
struct page * __weak
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags)
+follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
{
+ struct hstate *h = hstate_vma(vma);
+ struct mm_struct *mm = vma->vm_mm;
struct page *page = NULL;
spinlock_t *ptl;
- pte_t pte;
+ pte_t *ptep, pte;
+
retry:
- ptl = pmd_lockptr(mm, pmd);
- spin_lock(ptl);
- /*
- * make sure that the address range covered by this pmd is not
- * unmapped from other threads.
- */
- if (!pmd_huge(*pmd))
- goto out;
- pte = huge_ptep_get((pte_t *)pmd);
+ ptep = huge_pte_offset(mm, address, huge_page_size(h));
+ if (!ptep)
+ return NULL;
+
+ ptl = huge_pte_lock(h, mm, ptep);
+ pte = huge_ptep_get(ptep);
if (pte_present(pte)) {
- page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ page = pte_page(pte) +
+ ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
if (flags & FOLL_GET)
get_page(page);
} else {
if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
- __migration_entry_wait(mm, (pte_t *)pmd, ptl);
+ __migration_entry_wait(mm, ptep, ptl);
goto retry;
}
/*
@@ -5188,7 +5188,7 @@ retry:
* follow_page_mask().
*/
}
-out:
+
spin_unlock(ptl);
return page;
}
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 621782100eaa..4d87df96acc1 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -92,8 +92,8 @@ static void end_report(unsigned long *flags)
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, *flags);
- if (panic_on_warn)
- panic("panic_on_warn set ...\n");
+ if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
+ check_panic_on_warn("KASAN");
kasan_enable_current();
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 3c2326568193..f1f98305433e 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1060,6 +1060,7 @@ static void collapse_huge_page(struct mm_struct *mm,
_pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(&range);
+ tlb_remove_table_sync_one();
spin_lock(pte_ptl);
isolated = __collapse_huge_page_isolate(vma, address, pte);
@@ -1312,6 +1313,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
spinlock_t *ptl;
int count = 0;
int i;
+ struct mmu_notifier_range range;
if (!vma || !vma->vm_file ||
vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
@@ -1338,6 +1340,19 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
if (!pmd)
goto drop_hpage;
+ /*
+ * We need to lock the mapping so that from here on, only GUP-fast and
+ * hardware page walks can access the parts of the page tables that
+ * we're operating on.
+ */
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+
+ /*
+ * This spinlock should be unnecessary: Nobody else should be accessing
+ * the page tables under spinlock protection here, only
+ * lockless_pages_from_mm() and the hardware page walker can access page
+ * tables while all the high-level locks are held in write mode.
+ */
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
/* step 1: check all mapped PTEs are to the right huge page */
@@ -1384,12 +1399,23 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
}
/* step 4: collapse pmd */
- ptl = pmd_lock(vma->vm_mm, pmd);
+ /* we make no change to anon, but protect concurrent anon page lookup */
+ if (vma->anon_vma)
+ anon_vma_lock_write(vma->anon_vma);
+
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
+ haddr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
_pmd = pmdp_collapse_flush(vma, haddr, pmd);
- spin_unlock(ptl);
mm_dec_nr_ptes(mm);
+ tlb_remove_table_sync_one();
+ mmu_notifier_invalidate_range_end(&range);
pte_free(mm, pmd_pgtable(_pmd));
+ if (vma->anon_vma)
+ anon_vma_unlock_write(vma->anon_vma);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
+
drop_hpage:
unlock_page(hpage);
put_page(hpage);
@@ -1397,6 +1423,7 @@ drop_hpage:
abort:
pte_unmap_unlock(start_pte, ptl);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
goto drop_hpage;
}
@@ -1446,7 +1473,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* An alternative would be drop the check, but check that page
* table is clear before calling pmdp_collapse_flush() under
* ptl. It has higher chance to recover THP for the VMA, but
- * has higher cost too.
+ * has higher cost too. It would also probably require locking
+ * the anon_vma.
*/
if (vma->anon_vma)
continue;
@@ -1468,12 +1496,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
*/
if (down_write_trylock(&mm->mmap_sem)) {
if (!khugepaged_test_exit(mm)) {
- spinlock_t *ptl = pmd_lock(mm, pmd);
+ struct mmu_notifier_range range;
+
+ mmu_notifier_range_init(&range,
+ MMU_NOTIFY_CLEAR, 0,
+ NULL, mm, addr,
+ addr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
/* assume page table is clear */
_pmd = pmdp_collapse_flush(vma, addr, pmd);
- spin_unlock(ptl);
mm_dec_nr_ptes(mm);
+ tlb_remove_table_sync_one();
pte_free(mm, pmd_pgtable(_pmd));
+ mmu_notifier_invalidate_range_end(&range);
}
up_write(&mm->mmap_sem);
} else {
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 3761c79137b1..d8cde7292bf9 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1123,7 +1123,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
gfp_t gfp)
{
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
kmemleak_alloc(__va(phys), size, min_count, gfp);
}
EXPORT_SYMBOL(kmemleak_alloc_phys);
@@ -1137,7 +1137,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
*/
void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
kmemleak_free_part(__va(phys), size);
}
EXPORT_SYMBOL(kmemleak_free_part_phys);
@@ -1149,7 +1149,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
*/
void __ref kmemleak_not_leak_phys(phys_addr_t phys)
{
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
kmemleak_not_leak(__va(phys));
}
EXPORT_SYMBOL(kmemleak_not_leak_phys);
@@ -1161,7 +1161,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
*/
void __ref kmemleak_ignore_phys(phys_addr_t phys)
{
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
kmemleak_ignore(__va(phys));
}
EXPORT_SYMBOL(kmemleak_ignore_phys);
diff --git a/mm/ksm.c b/mm/ksm.c
index 0bbae78aaaa0..93e1898cd705 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2388,7 +2388,7 @@ next_mm:
static void ksm_do_scan(unsigned int scan_npages)
{
struct rmap_item *rmap_item;
- struct page *uninitialized_var(page);
+ struct page *page;
while (scan_npages-- && likely(!freezing(current))) {
cond_resched();
diff --git a/mm/madvise.c b/mm/madvise.c
index 1107e99e498b..ac8d68c488b5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -428,8 +428,11 @@ regular_page:
continue;
}
- /* Do not interfere with other mappings of this page */
- if (page_mapcount(page) != 1)
+ /*
+ * Do not interfere with other mappings of this page and
+ * non-LRU page.
+ */
+ if (!PageLRU(page) || page_mapcount(page) != 1)
continue;
VM_BUG_ON_PAGE(PageTransCompound(page), page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8fc663545498..0cc7ee52d6bd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1044,7 +1044,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim)
{
- struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
+ struct mem_cgroup_reclaim_iter *iter;
struct cgroup_subsys_state *css = NULL;
struct mem_cgroup *memcg = NULL;
struct mem_cgroup *pos = NULL;
@@ -2214,6 +2214,9 @@ static void drain_stock(struct memcg_stock_pcp *stock)
{
struct mem_cgroup *old = stock->cached;
+ if (!old)
+ return;
+
if (stock->nr_pages) {
page_counter_uncharge(&old->memory, stock->nr_pages);
if (do_memsw_account())
@@ -2221,6 +2224,8 @@ static void drain_stock(struct memcg_stock_pcp *stock)
css_put_many(&old->css, stock->nr_pages);
stock->nr_pages = 0;
}
+
+ css_put(&old->css);
stock->cached = NULL;
}
@@ -2256,6 +2261,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
drain_stock(stock);
+ css_get(&memcg->css);
stock->cached = memcg;
}
stock->nr_pages += nr_pages;
@@ -3775,6 +3781,10 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
+
if (val & ~MOVE_MASK)
return -EINVAL;
@@ -4709,6 +4719,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
unsigned int efd, cfd;
struct fd efile;
struct fd cfile;
+ struct dentry *cdentry;
const char *name;
char *endp;
int ret;
@@ -4760,6 +4771,16 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
goto out_put_cfile;
/*
+ * The control file must be a regular cgroup1 file. As a regular cgroup
+ * file can't be renamed, it's safe to access its name afterwards.
+ */
+ cdentry = cfile.file->f_path.dentry;
+ if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
+ ret = -EINVAL;
+ goto out_put_cfile;
+ }
+
+ /*
* Determine the event callbacks and set them in @event. This used
* to be done via struct cftype but cgroup core no longer knows
* about these events. The following is crude but the whole thing
@@ -4767,7 +4788,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
*
* DO NOT ADD NEW FILES.
*/
- name = cfile.file->f_path.dentry->d_name.name;
+ name = cdentry->d_name.name;
if (!strcmp(name, "memory.usage_in_bytes")) {
event->register_event = mem_cgroup_usage_register_event;
@@ -4791,7 +4812,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
* automatically removed on cgroup destruction but the removal is
* asynchronous, so take an extra ref on @css.
*/
- cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
+ cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
&memory_cgrp_subsys);
ret = -EINVAL;
if (IS_ERR(cfile_css))
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9030ab0d9d97..c6453f9ffd4d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -986,7 +986,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* This check implies we don't kill processes if their pages
* are in the swap cache early. Those are always late kills.
*/
- if (!page_mapped(hpage))
+ if (!page_mapped(p))
return true;
if (PageKsm(p)) {
@@ -1030,10 +1030,10 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (kill)
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
- unmap_success = try_to_unmap(hpage, ttu);
+ unmap_success = try_to_unmap(p, ttu);
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
- pfn, page_mapcount(hpage));
+ pfn, page_mapcount(p));
/*
* try_to_unmap() might put mlocked page in lru cache, so call
diff --git a/mm/memory.c b/mm/memory.c
index d416e329442d..d514f69d9717 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2872,8 +2872,8 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows)
{
- pgoff_t hba = holebegin >> PAGE_SHIFT;
- pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
+ pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Check for overflow. */
if (sizeof(holelen) > sizeof(hlen)) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f7b231f67156..2bf4ab7b2713 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -571,7 +571,8 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
goto unlock;
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) ||
- (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
+ (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
+ !hugetlb_pmd_shared(pte)))
isolate_huge_page(page, qp->pagelist);
unlock:
spin_unlock(ptl);
@@ -1159,7 +1160,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
static struct page *new_page(struct page *page, unsigned long start)
{
struct vm_area_struct *vma;
- unsigned long uninitialized_var(address);
+ unsigned long address;
vma = find_vma(current->mm, start);
while (vma) {
@@ -1554,7 +1555,7 @@ static int kernel_get_mempolicy(int __user *policy,
unsigned long flags)
{
int err;
- int uninitialized_var(pval);
+ int pval;
nodemask_t nodes;
addr = untagged_addr(addr);
diff --git a/mm/migrate.c b/mm/migrate.c
index 6948d6ec0fd0..034b0662fd3b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2343,13 +2343,14 @@ next:
migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = mpfn;
}
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(ptep - 1, ptl);
/* Only flush the TLB if we actually modified any entries */
if (unmapped)
flush_tlb_range(walk->vma, start, end);
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(ptep - 1, ptl);
+
return 0;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index f30f2a49ff2f..dad2ca2d27fc 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1679,8 +1679,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
return 0;
- /* Do we need to track softdirty? */
- if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
+ /*
+ * Do we need to track softdirty? hugetlb does not support softdirty
+ * tracking yet.
+ */
+ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
+ !is_vm_hugetlb_page(vma))
return 1;
/* Specialty mapping? */
@@ -1860,7 +1864,6 @@ unmap_and_free_vma:
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
- charged = 0;
if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping);
allow_write_and_free_vma:
@@ -2602,11 +2605,28 @@ static void unmap_region(struct mm_struct *mm,
{
struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
struct mmu_gather tlb;
+ struct vm_area_struct *cur_vma;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
+
+ /*
+ * Ensure we have no stale TLB entries by the time this mapping is
+ * removed from the rmap.
+ * Note that we don't have to worry about nested flushes here because
+ * we're holding the mm semaphore for removing the mapping - so any
+ * concurrent flush in this region has to be coming through the rmap,
+ * and we synchronize against that using the rmap lock.
+ */
+ for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) {
+ if ((cur_vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) {
+ tlb_flush_mmu(&tlb);
+ break;
+ }
+ }
+
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, start, end);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 7c1b8f67af7b..341aa036b03c 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -117,6 +117,11 @@ static void tlb_remove_table_smp_sync(void *arg)
/* Simply deliver the interrupt */
}
+void tlb_remove_table_sync_one(void)
+{
+ smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+}
+
static void tlb_remove_table_one(void *table)
{
/*
diff --git a/mm/mremap.c b/mm/mremap.c
index 8ce1b7632fbb..f6b8c009dd05 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -293,12 +293,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
*/
bool moved;
- if (need_rmap_locks)
- take_rmap_locks(vma);
+ take_rmap_locks(vma);
moved = move_normal_pmd(vma, old_addr, new_addr,
old_end, old_pmd, new_pmd);
- if (need_rmap_locks)
- drop_rmap_locks(vma);
+ drop_rmap_locks(vma);
if (moved)
continue;
#endif
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2d658b208319..fdebfaf1873c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1530,7 +1530,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
*/
dtc->wb_thresh = __wb_calc_thresh(dtc);
dtc->wb_bg_thresh = dtc->thresh ?
- div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
@@ -2746,12 +2746,6 @@ int test_clear_page_writeback(struct page *page)
} else {
ret = TestClearPageWriteback(page);
}
- /*
- * NOTE: Page might be free now! Writeback doesn't hold a page
- * reference on its own, it relies on truncation to wait for
- * the clearing of PG_writeback. The below can only access
- * page state that is static across allocation cycles.
- */
if (ret) {
dec_lruvec_state(lruvec, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
@@ -2817,7 +2811,7 @@ EXPORT_SYMBOL(__test_set_page_writeback);
*/
void wait_on_page_writeback(struct page *page)
{
- if (PageWriteback(page)) {
+ while (PageWriteback(page)) {
trace_wait_on_page_writeback(page, page_mapping(page));
wait_on_page_bit(page, PG_writeback);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f08ce248af2a..7cadcfe37a51 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4123,6 +4123,30 @@ void fs_reclaim_release(gfp_t gfp_mask)
EXPORT_SYMBOL_GPL(fs_reclaim_release);
#endif
+/*
+ * Zonelists may change due to hotplug during allocation. Detect when zonelists
+ * have been rebuilt so allocation retries. Reader side does not lock and
+ * retries the allocation if zonelist changes. Writer side is protected by the
+ * embedded spin_lock.
+ */
+static DEFINE_SEQLOCK(zonelist_update_seq);
+
+static unsigned int zonelist_iter_begin(void)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+ return read_seqbegin(&zonelist_update_seq);
+
+ return 0;
+}
+
+static unsigned int check_retry_zonelist(unsigned int seq)
+{
+ if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+ return read_seqretry(&zonelist_update_seq, seq);
+
+ return seq;
+}
+
/* Perform direct synchronous page reclaim */
static int
__perform_reclaim(gfp_t gfp_mask, unsigned int order,
@@ -4430,6 +4454,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
int compaction_retries;
int no_progress_loops;
unsigned int cpuset_mems_cookie;
+ unsigned int zonelist_iter_cookie;
int reserve_flags;
/*
@@ -4440,11 +4465,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
gfp_mask &= ~__GFP_ATOMIC;
-retry_cpuset:
+restart:
compaction_retries = 0;
no_progress_loops = 0;
compact_priority = DEF_COMPACT_PRIORITY;
cpuset_mems_cookie = read_mems_allowed_begin();
+ zonelist_iter_cookie = zonelist_iter_begin();
/*
* The fast path uses conservative alloc_flags to succeed only until
@@ -4617,9 +4643,13 @@ retry:
goto retry;
- /* Deal with possible cpuset update races before we start OOM killing */
- if (check_retry_cpuset(cpuset_mems_cookie, ac))
- goto retry_cpuset;
+ /*
+ * Deal with possible cpuset update races or zonelist updates to avoid
+ * a unnecessary OOM kill.
+ */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+ check_retry_zonelist(zonelist_iter_cookie))
+ goto restart;
/* Reclaim has failed us, start killing things */
page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
@@ -4639,9 +4669,13 @@ retry:
}
nopage:
- /* Deal with possible cpuset update races before we fail */
- if (check_retry_cpuset(cpuset_mems_cookie, ac))
- goto retry_cpuset;
+ /*
+ * Deal with possible cpuset update races or zonelist updates to avoid
+ * a unnecessary OOM kill.
+ */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+ check_retry_zonelist(zonelist_iter_cookie))
+ goto restart;
/*
* Make sure that __GFP_NOFAIL request doesn't leak out and make sure
@@ -4945,6 +4979,18 @@ refill:
/* reset page count bias and offset to start of new frag */
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz;
+ if (unlikely(offset < 0)) {
+ /*
+ * The caller is trying to allocate a fragment
+ * with fragsz > PAGE_SIZE but the cache isn't big
+ * enough to satisfy the request, this may
+ * happen in low memory conditions.
+ * We don't release the cache page because
+ * it could make memory pressure worse
+ * so we simply return NULL here.
+ */
+ return NULL;
+ }
}
nc->pagecnt_bias--;
@@ -5770,9 +5816,22 @@ static void __build_all_zonelists(void *data)
int nid;
int __maybe_unused cpu;
pg_data_t *self = data;
- static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
- spin_lock(&lock);
+ /*
+ * Explicitly disable this CPU's interrupts before taking seqlock
+ * to prevent any IRQ handler from calling into the page allocator
+ * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
+ */
+ local_irq_save(flags);
+ /*
+ * Explicitly disable this CPU's synchronous printk() before taking
+ * seqlock to prevent any printk() from trying to hold port->lock, for
+ * tty_insert_flip_string_and_push_buffer() on other CPU might be
+ * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
+ */
+ printk_deferred_enter();
+ write_seqlock(&zonelist_update_seq);
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
@@ -5805,7 +5864,9 @@ static void __build_all_zonelists(void *data)
#endif
}
- spin_unlock(&lock);
+ write_sequnlock(&zonelist_update_seq);
+ printk_deferred_exit();
+ local_irq_restore(flags);
}
static noinline void __init
@@ -8521,7 +8582,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, false)) {
- pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
+ pr_debug("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
ret = -EBUSY;
goto done;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 4eb09e089881..ec41e7552f37 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -38,7 +38,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
do {
again:
next = pmd_addr_end(addr, end);
- if (pmd_none(*pmd) || !walk->vma) {
+ if (pmd_none(*pmd)) {
if (ops->pte_hole)
err = ops->pte_hole(addr, next, walk);
if (err)
@@ -84,7 +84,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
do {
again:
next = pud_addr_end(addr, end);
- if (pud_none(*pud) || !walk->vma) {
+ if (pud_none(*pud)) {
if (ops->pte_hole)
err = ops->pte_hole(addr, next, walk);
if (err)
@@ -254,7 +254,7 @@ static int __walk_page_range(unsigned long start, unsigned long end,
int err = 0;
struct vm_area_struct *vma = walk->vma;
- if (vma && is_vm_hugetlb_page(vma)) {
+ if (is_vm_hugetlb_page(vma)) {
if (walk->ops->hugetlb_entry)
err = walk_hugetlb_range(start, end, walk);
} else
@@ -324,9 +324,13 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
if (!vma) { /* after the last vma */
walk.vma = NULL;
next = end;
+ if (ops->pte_hole)
+ err = ops->pte_hole(start, next, &walk);
} else if (start < vma->vm_start) { /* outside vma */
walk.vma = NULL;
next = min(end, vma->vm_start);
+ if (ops->pte_hole)
+ err = ops->pte_hole(start, next, &walk);
} else { /* inside vma */
walk.vma = vma;
next = min(end, vma->vm_end);
@@ -344,9 +348,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
}
if (err < 0)
break;
- }
- if (walk.vma || walk.ops->pte_hole)
err = __walk_page_range(start, next, &walk);
+ }
if (err)
break;
} while (start = next, start < end);
diff --git a/mm/percpu.c b/mm/percpu.c
index 806bc16f88eb..2e4b286c88c8 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2536,7 +2536,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
const size_t static_size = __per_cpu_end - __per_cpu_start;
int nr_groups = 1, nr_units = 0;
size_t size_sum, min_unit_size, alloc_size;
- int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
+ int upa, max_upa, best_upa; /* units_per_alloc */
int last_allocs, group, unit;
unsigned int cpu, tcpu;
struct pcpu_alloc_info *ai;
diff --git a/mm/readahead.c b/mm/readahead.c
index 2fe72cd29b47..95fbc02cae6a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -592,7 +592,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
*/
ret = -EINVAL;
if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
- !S_ISREG(file_inode(f.file)->i_mode))
+ (!S_ISREG(file_inode(f.file)->i_mode) &&
+ !S_ISBLK(file_inode(f.file)->i_mode)))
goto out;
ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
diff --git a/mm/rmap.c b/mm/rmap.c
index 6d80e92688fe..c64da910bb73 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -83,7 +83,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
- anon_vma->degree = 1; /* Reference for first vma */
+ anon_vma->num_children = 0;
+ anon_vma->num_active_vmas = 0;
anon_vma->parent = anon_vma;
/*
* Initialise the anon_vma root to point to itself. If called
@@ -191,6 +192,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
anon_vma = anon_vma_alloc();
if (unlikely(!anon_vma))
goto out_enomem_free_avc;
+ anon_vma->num_children++; /* self-parent link for new root */
allocated = anon_vma;
}
@@ -200,8 +202,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
- /* vma reference or self-parent link for new root */
- anon_vma->degree++;
+ anon_vma->num_active_vmas++;
allocated = NULL;
avc = NULL;
}
@@ -280,19 +281,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
anon_vma_chain_link(dst, avc, anon_vma);
/*
- * Reuse existing anon_vma if its degree lower than two,
- * that means it has no vma and only one anon_vma child.
+ * Reuse existing anon_vma if it has no vma and only one
+ * anon_vma child.
*
- * Do not chose parent anon_vma, otherwise first child
- * will always reuse it. Root anon_vma is never reused:
+ * Root anon_vma is never reused:
* it has self-parent reference and at least one child.
*/
- if (!dst->anon_vma && anon_vma != src->anon_vma &&
- anon_vma->degree < 2)
+ if (!dst->anon_vma &&
+ anon_vma->num_children < 2 &&
+ anon_vma->num_active_vmas == 0)
dst->anon_vma = anon_vma;
}
if (dst->anon_vma)
- dst->anon_vma->degree++;
+ dst->anon_vma->num_active_vmas++;
unlock_anon_vma_root(root);
return 0;
@@ -342,6 +343,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
anon_vma = anon_vma_alloc();
if (!anon_vma)
goto out_error;
+ anon_vma->num_active_vmas++;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_error_free_anon_vma;
@@ -362,7 +364,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
- anon_vma->parent->degree++;
+ anon_vma->parent->num_children++;
anon_vma_unlock_write(anon_vma);
return 0;
@@ -394,7 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
* to free them outside the lock.
*/
if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
- anon_vma->parent->degree--;
+ anon_vma->parent->num_children--;
continue;
}
@@ -402,7 +404,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
anon_vma_chain_free(avc);
}
if (vma->anon_vma)
- vma->anon_vma->degree--;
+ vma->anon_vma->num_active_vmas--;
unlock_anon_vma_root(root);
/*
@@ -413,7 +415,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
- VM_WARN_ON(anon_vma->degree);
+ VM_WARN_ON(anon_vma->num_children);
+ VM_WARN_ON(anon_vma->num_active_vmas);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
diff --git a/mm/shmem.c b/mm/shmem.c
index aae2f408f905..264229680ad7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3417,6 +3417,8 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
unsigned long long size;
char *rest;
int opt;
+ kuid_t kuid;
+ kgid_t kgid;
opt = fs_parse(fc, &shmem_fs_parameters, param, &result);
if (opt < 0)
@@ -3452,14 +3454,32 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
ctx->mode = result.uint_32 & 07777;
break;
case Opt_uid:
- ctx->uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(ctx->uid))
+ kuid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(kuid))
goto bad_value;
+
+ /*
+ * The requested uid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kuid_has_mapping(fc->user_ns, kuid))
+ goto bad_value;
+
+ ctx->uid = kuid;
break;
case Opt_gid:
- ctx->gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(ctx->gid))
+ kgid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(kgid))
goto bad_value;
+
+ /*
+ * The requested gid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kgid_has_mapping(fc->user_ns, kgid))
+ goto bad_value;
+
+ ctx->gid = kgid;
break;
case Opt_huge:
ctx->huge = result.uint_32;
diff --git a/mm/slub.c b/mm/slub.c
index 5211496f6d24..e978f647e92a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1198,7 +1198,7 @@ static noinline int free_debug_processing(
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
void *object = head;
int cnt = 0;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
int ret = 0;
spin_lock_irqsave(&n->list_lock, flags);
@@ -2888,7 +2888,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
struct page new;
unsigned long counters;
struct kmem_cache_node *n = NULL;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
stat(s, FREE_SLOWPATH);
@@ -5743,7 +5743,8 @@ static char *create_unique_id(struct kmem_cache *s)
char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
char *p = name;
- BUG_ON(!name);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
*p++ = ':';
/*
@@ -5825,6 +5826,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
* for the symlinks.
*/
name = create_unique_id(s);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
}
s->kobj.kset = kset;
diff --git a/mm/swap.c b/mm/swap.c
index 38c3fa4308e2..1c5021b57e2c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -763,8 +763,8 @@ void release_pages(struct page **pages, int nr)
LIST_HEAD(pages_to_free);
struct pglist_data *locked_pgdat = NULL;
struct lruvec *lruvec;
- unsigned long uninitialized_var(flags);
- unsigned int uninitialized_var(lock_batch);
+ unsigned long flags;
+ unsigned int lock_batch;
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f6964212c6c8..5e444be061a8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -672,6 +672,7 @@ static void __del_from_avail_list(struct swap_info_struct *p)
{
int nid;
+ assert_spin_locked(&p->lock);
for_each_node(nid)
plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
}
@@ -1061,6 +1062,7 @@ start_over:
goto check_out;
pr_debug("scan_swap_map of si %d failed to find offset\n",
si->type);
+ cond_resched();
spin_lock(&swap_avail_lock);
nextsi:
@@ -1950,10 +1952,14 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
pte_unmap(pte);
swap_map = &si->swap_map[offset];
- vmf.vma = vma;
- vmf.address = addr;
- vmf.pmd = pmd;
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf);
+ page = lookup_swap_cache(entry, vma, addr);
+ if (!page) {
+ vmf.vma = vma;
+ vmf.address = addr;
+ vmf.pmd = pmd;
+ page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+ &vmf);
+ }
if (!page) {
if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
goto try_next;
@@ -2574,8 +2580,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
goto out_dput;
}
- del_from_avail_list(p);
spin_lock(&p->lock);
+ del_from_avail_list(p);
if (p->prio < 0) {
struct swap_info_struct *si = p;
int nid;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 6fa66e2111ea..e8758304a9f3 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -177,6 +177,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
+ bool *mmap_changing,
bool zeropage)
{
int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
@@ -308,6 +309,15 @@ retry:
goto out;
}
down_read(&dst_mm->mmap_sem);
+ /*
+ * If memory mappings are changing because of non-cooperative
+ * operation (e.g. mremap) running in parallel, bail out and
+ * request the user to retry later
+ */
+ if (mmap_changing && READ_ONCE(*mmap_changing)) {
+ err = -EAGAIN;
+ break;
+ }
dst_vma = NULL;
goto retry;
@@ -389,6 +399,7 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
+ bool *mmap_changing,
bool zeropage);
#endif /* CONFIG_HUGETLB_PAGE */
@@ -506,7 +517,8 @@ retry:
*/
if (is_vm_hugetlb_page(dst_vma))
return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
- src_start, len, zeropage);
+ src_start, len, mmap_changing,
+ zeropage);
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
goto out_unlock;
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 5b804dbe2d08..486becf6c78d 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -606,7 +606,10 @@ static void mrp_join_timer(struct timer_list *t)
spin_unlock(&app->lock);
mrp_queue_xmit(app);
- mrp_join_timer_arm(app);
+ spin_lock(&app->lock);
+ if (likely(app->active))
+ mrp_join_timer_arm(app);
+ spin_unlock(&app->lock);
}
static void mrp_periodic_timer_arm(struct mrp_applicant *app)
@@ -620,11 +623,12 @@ static void mrp_periodic_timer(struct timer_list *t)
struct mrp_applicant *app = from_timer(app, t, periodic_timer);
spin_lock(&app->lock);
- mrp_mad_event(app, MRP_EVENT_PERIODIC);
- mrp_pdu_queue(app);
+ if (likely(app->active)) {
+ mrp_mad_event(app, MRP_EVENT_PERIODIC);
+ mrp_pdu_queue(app);
+ mrp_periodic_timer_arm(app);
+ }
spin_unlock(&app->lock);
-
- mrp_periodic_timer_arm(app);
}
static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
@@ -872,6 +876,7 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
app->dev = dev;
app->app = appl;
app->mad = RB_ROOT;
+ app->active = true;
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
@@ -900,6 +905,9 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
RCU_INIT_POINTER(port->applicants[appl->type], NULL);
+ spin_lock_bh(&app->lock);
+ app->active = false;
+ spin_unlock_bh(&app->lock);
/* Delete timer and generate a final TX event to flush out
* all pending messages before the applicant is gone.
*/
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index a313165e7a67..4d2d501991b1 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -407,6 +407,8 @@ int vlan_vids_add_by_dev(struct net_device *dev,
return 0;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
if (err)
goto unwind;
@@ -417,6 +419,8 @@ unwind:
list_for_each_entry_continue_reverse(vid_info,
&vlan_info->vid_list,
list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
}
@@ -436,8 +440,11 @@ void vlan_vids_del_by_dev(struct net_device *dev,
if (!vlan_info)
return;
- list_for_each_entry(vid_info, &vlan_info->vid_list, list)
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
+ }
}
EXPORT_SYMBOL(vlan_vids_del_by_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 589615ec490b..0a3a16791621 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -109,8 +109,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/
- if (veth->h_vlan_proto != vlan->vlan_proto ||
- vlan->flags & VLAN_FLAG_REORDER_HDR) {
+ if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
+ veth->h_vlan_proto != vlan->vlan_proto) {
u16 vlan_tci;
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
@@ -366,7 +366,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- if (!net_eq(dev_net(dev), &init_net))
+ if (!net_eq(dev_net(dev), dev_net(real_dev)))
break;
/* fall through */
case SIOCGMIIPHY:
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 0db85aeb119b..99b277775257 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
+ if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+ continue;
m = nla_data(attr);
vlan_dev_set_ingress_priority(dev, m->to, m->from);
}
}
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
+ if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+ continue;
m = nla_data(attr);
err = vlan_dev_set_egress_priority(dev, m->from, m->to);
if (err)
diff --git a/net/9p/client.c b/net/9p/client.c
index b03fce66eb8d..9368a1c0196d 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -893,16 +893,13 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
struct p9_fid *fid;
p9_debug(P9_DEBUG_FID, "clnt %p\n", clnt);
- fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL);
+ fid = kzalloc(sizeof(struct p9_fid), GFP_KERNEL);
if (!fid)
return NULL;
- memset(&fid->qid, 0, sizeof(struct p9_qid));
fid->mode = -1;
fid->uid = current_fsuid();
fid->clnt = clnt;
- fid->rdir = NULL;
- fid->fid = 0;
idr_preload(GFP_KERNEL);
spin_lock_irq(&clnt->lock);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 03593eb240d8..ef0a82dbbe04 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -228,6 +228,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
uint16_t *nwname = va_arg(ap, uint16_t *);
char ***wnames = va_arg(ap, char ***);
+ *wnames = NULL;
+
errcode = p9pdu_readf(pdu, proto_version,
"w", nwname);
if (!errcode) {
@@ -237,6 +239,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
GFP_NOFS);
if (!*wnames)
errcode = -ENOMEM;
+ else
+ (*wnames)[0] = NULL;
}
if (!errcode) {
@@ -248,8 +252,10 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
proto_version,
"s",
&(*wnames)[i]);
- if (errcode)
+ if (errcode) {
+ (*wnames)[i] = NULL;
break;
+ }
}
}
@@ -257,11 +263,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
if (*wnames) {
int i;
- for (i = 0; i < *nwname; i++)
+ for (i = 0; i < *nwname; i++) {
+ if (!(*wnames)[i])
+ break;
kfree((*wnames)[i]);
+ }
+ kfree(*wnames);
+ *wnames = NULL;
}
- kfree(*wnames);
- *wnames = NULL;
}
}
break;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 60eb9a2b209b..872568cca926 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -118,7 +118,7 @@ struct p9_conn {
struct list_head unsent_req_list;
struct p9_req_t *rreq;
struct p9_req_t *wreq;
- char tmp_buf[7];
+ char tmp_buf[P9_HDRSZ];
struct p9_fcall rc;
int wpos;
int wsize;
@@ -200,11 +200,15 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
+ req->status = REQ_STATUS_ERROR;
}
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
+ req->status = REQ_STATUS_ERROR;
}
+ spin_unlock(&m->client->lock);
+
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
list_del(&req->req_list);
@@ -212,7 +216,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
req->t_err = err;
p9_client_cb(m->client, req, REQ_STATUS_ERROR);
}
- spin_unlock(&m->client->lock);
}
static __poll_t
@@ -288,7 +291,7 @@ static void p9_read_work(struct work_struct *work)
if (!m->rc.sdata) {
m->rc.sdata = m->tmp_buf;
m->rc.offset = 0;
- m->rc.capacity = 7; /* start by reading header */
+ m->rc.capacity = P9_HDRSZ; /* start by reading header */
}
clear_bit(Rpending, &m->wsched);
@@ -311,7 +314,7 @@ static void p9_read_work(struct work_struct *work)
p9_debug(P9_DEBUG_TRANS, "got new header\n");
/* Header size */
- m->rc.size = 7;
+ m->rc.size = P9_HDRSZ;
err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
if (err) {
p9_debug(P9_DEBUG_ERROR,
@@ -820,11 +823,14 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
goto out_free_ts;
if (!(ts->rd->f_mode & FMODE_READ))
goto out_put_rd;
+ /* prevent workers from hanging on IO when fd is a pipe */
+ ts->rd->f_flags |= O_NONBLOCK;
ts->wr = fget(wfd);
if (!ts->wr)
goto out_put_rd;
if (!(ts->wr->f_mode & FMODE_WRITE))
goto out_put_wr;
+ ts->wr->f_flags |= O_NONBLOCK;
client->trans = ts;
client->status = Connected;
@@ -846,8 +852,10 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
struct file *file;
p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
- if (!p)
+ if (!p) {
+ sock_release(csocket);
return -ENOMEM;
+ }
csocket->sk->sk_allocation = GFP_NOIO;
file = sock_alloc_file(csocket, 0, NULL);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index b21c3c209815..c00e965c082b 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -385,6 +385,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
struct p9_trans_rdma *rdma = client->trans;
struct ib_recv_wr wr;
struct ib_sge sge;
+ int ret;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->rc.sdata, client->msize,
@@ -402,7 +403,12 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
wr.wr_cqe = &c->cqe;
wr.sg_list = &sge;
wr.num_sge = 1;
- return ib_post_recv(rdma->qp, &wr, NULL);
+
+ ret = ib_post_recv(rdma->qp, &wr, NULL);
+ if (ret)
+ ib_dma_unmap_single(rdma->cm_id->device, c->busa,
+ client->msize, DMA_FROM_DEVICE);
+ return ret;
error:
p9_debug(P9_DEBUG_ERROR, "EIO\n");
@@ -499,7 +505,7 @@ dont_need_post_recv:
if (down_interruptible(&rdma->sq_sem)) {
err = -EINTR;
- goto send_error;
+ goto dma_unmap;
}
/* Mark request as `sent' *before* we actually send it,
@@ -509,11 +515,14 @@ dont_need_post_recv:
req->status = REQ_STATUS_SENT;
err = ib_post_send(rdma->qp, &wr, NULL);
if (err)
- goto send_error;
+ goto dma_unmap;
/* Success */
return 0;
+dma_unmap:
+ ib_dma_unmap_single(rdma->cm_id->device, c->busa,
+ c->req->tc.size, DMA_TO_DEVICE);
/* Handle errors that happened during or while preparing the send: */
send_error:
req->status = REQ_STATUS_ERROR;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index f582351d84ec..36b5f72e2165 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -394,7 +394,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
struct page **in_pages = NULL, **out_pages = NULL;
struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[4];
- size_t offs;
+ size_t offs = 0;
int need_drop = 0;
int kicked = 0;
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 2779ec1053a0..4a16c5dd1576 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -230,6 +230,14 @@ static void p9_xen_response(struct work_struct *work)
continue;
}
+ if (h.size > req->rc.capacity) {
+ dev_warn(&priv->dev->dev,
+ "requested packet size too big: %d for tag %d with capacity %zd\n",
+ h.size, h.tag, req->rc.capacity);
+ req->status = REQ_STATUS_ERROR;
+ goto recv_error;
+ }
+
memcpy(&req->rc, &h, sizeof(h));
req->rc.offset = 0;
@@ -239,6 +247,7 @@ static void p9_xen_response(struct work_struct *work)
masked_prod, &masked_cons,
XEN_9PFS_RING_SIZE);
+recv_error:
virt_mb();
cons += h.size;
ring->intf->in_cons = cons;
@@ -290,6 +299,10 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
write_unlock(&xen_9pfs_lock);
for (i = 0; i < priv->num_rings; i++) {
+ struct xen_9pfs_dataring *ring = &priv->rings[i];
+
+ cancel_work_sync(&ring->work);
+
if (!priv->rings[i].intf)
break;
if (priv->rings[i].irq > 0)
@@ -380,19 +393,24 @@ out:
return ret;
}
-static int xen_9pfs_front_probe(struct xenbus_device *dev,
- const struct xenbus_device_id *id)
+static int xen_9pfs_front_init(struct xenbus_device *dev)
{
int ret, i;
struct xenbus_transaction xbt;
- struct xen_9pfs_front_priv *priv = NULL;
- char *versions;
+ struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
+ char *versions, *v;
unsigned int max_rings, max_ring_order, len = 0;
versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
if (IS_ERR(versions))
return PTR_ERR(versions);
- if (strcmp(versions, "1")) {
+ for (v = versions; *v; v++) {
+ if (simple_strtoul(v, &v, 10) == 1) {
+ v = NULL;
+ break;
+ }
+ }
+ if (v) {
kfree(versions);
return -EINVAL;
}
@@ -405,11 +423,6 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
if (max_ring_order < XEN_9PFS_RING_ORDER)
return -EINVAL;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->dev = dev;
priv->num_rings = XEN_9PFS_NUM_RINGS;
priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
GFP_KERNEL);
@@ -467,23 +480,35 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
goto error;
}
- write_lock(&xen_9pfs_lock);
- list_add_tail(&priv->list, &xen_9pfs_devs);
- write_unlock(&xen_9pfs_lock);
- dev_set_drvdata(&dev->dev, priv);
- xenbus_switch_state(dev, XenbusStateInitialised);
-
return 0;
error_xenbus:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, ret, "writing xenstore");
error:
- dev_set_drvdata(&dev->dev, NULL);
xen_9pfs_front_free(priv);
return ret;
}
+static int xen_9pfs_front_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ struct xen_9pfs_front_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ dev_set_drvdata(&dev->dev, priv);
+
+ write_lock(&xen_9pfs_lock);
+ list_add_tail(&priv->list, &xen_9pfs_devs);
+ write_unlock(&xen_9pfs_lock);
+
+ return 0;
+}
+
static int xen_9pfs_front_resume(struct xenbus_device *dev)
{
dev_warn(&dev->dev, "suspend/resume unsupported\n");
@@ -502,6 +527,8 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev,
break;
case XenbusStateInitWait:
+ if (!xen_9pfs_front_init(dev))
+ xenbus_switch_state(dev, XenbusStateInitialised);
break;
case XenbusStateConnected:
diff --git a/net/Kconfig b/net/Kconfig
index 0b2fecc83452..48ed37cdd22f 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -203,7 +203,6 @@ config BRIDGE_NETFILTER
source "net/netfilter/Kconfig"
source "net/ipv4/netfilter/Kconfig"
source "net/ipv6/netfilter/Kconfig"
-source "net/decnet/netfilter/Kconfig"
source "net/bridge/netfilter/Kconfig"
endif
@@ -220,7 +219,6 @@ source "net/802/Kconfig"
source "net/bridge/Kconfig"
source "net/dsa/Kconfig"
source "net/8021q/Kconfig"
-source "net/decnet/Kconfig"
source "net/llc/Kconfig"
source "drivers/net/appletalk/Kconfig"
source "net/x25/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index 449fc0b221f8..177b6fbac29c 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_AF_KCM) += kcm/
obj-$(CONFIG_STREAM_PARSER) += strparser/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_L2TP) += l2tp/
-obj-$(CONFIG_DECNET) += decnet/
obj-$(CONFIG_PHONET) += phonet/
ifneq ($(CONFIG_VLAN_8021Q),)
obj-y += 8021q/
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 4610c352849b..70cd5f55628d 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1803,15 +1803,14 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
break;
}
case TIOCINQ: {
- /*
- * These two are safe on a single CPU system as only
- * user tasks fiddle here
- */
- struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+ struct sk_buff *skb;
long amount = 0;
+ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len - sizeof(struct ddpehdr);
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
rc = put_user(amount, (int __user *)argp);
break;
}
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index d955b683aa7c..3a2198aabc36 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -71,14 +71,17 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
case SIOCINQ:
{
struct sk_buff *skb;
+ int amount;
if (sock->state != SS_CONNECTED) {
error = -EINVAL;
goto done;
}
+ spin_lock_irq(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
- error = put_user(skb ? skb->len : 0,
- (int __user *)argp) ? -EFAULT : 0;
+ amount = skb ? skb->len : 0;
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
+ error = put_user(amount, (int __user *)argp) ? -EFAULT : 0;
goto done;
}
case ATM_SETSC:
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 46d6cd9a36ae..c4e9538ac144 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -222,11 +222,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
if (!page)
return -ENOMEM;
- for (p = page, len = 0; len < nbytes; p++, len++) {
+ for (p = page, len = 0; len < nbytes; p++) {
if (get_user(*p, buff++)) {
free_page((unsigned long)page);
return -EFAULT;
}
+ len += 1;
if (*p == '\0' || *p == '\n')
break;
}
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 45d8e1d5d033..579b66da1d95 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -393,7 +393,7 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
* Each PPPoATM instance has its own tasklet - this is just a
* prototypical one used to initialize them
*/
- static const DECLARE_TASKLET(tasklet_proto, pppoatm_wakeup_sender, 0);
+ static const DECLARE_TASKLET_OLD(tasklet_proto, pppoatm_wakeup_sender);
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 889349c6d90d..04b2235c5c26 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -443,6 +443,7 @@ done:
return error;
}
+#ifdef CONFIG_PROC_FS
void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
mutex_lock(&atm_dev_mutex);
@@ -458,3 +459,4 @@ void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &atm_devs, pos);
}
+#endif
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index a39af0eefad3..aae73f94b2c8 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -501,7 +501,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_elp_packet *elp_packet;
struct batadv_hard_iface *primary_if;
- struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ struct ethhdr *ethhdr;
bool res;
int ret = NET_RX_DROP;
@@ -509,6 +509,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
if (!res)
goto free_skb;
+ ethhdr = eth_hdr(skb);
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 3165f6ff8ee7..f13a779b8656 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -122,8 +122,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ if (hard_iface->if_status != BATADV_IF_ACTIVE) {
+ kfree_skb(skb);
return;
+ }
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
@@ -994,7 +996,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_ogm2_packet *ogm_packet;
- struct ethhdr *ethhdr = eth_hdr(skb);
+ struct ethhdr *ethhdr;
int ogm_offset;
u8 *packet_pos;
int ret = NET_RX_DROP;
@@ -1008,6 +1010,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
goto free_skb;
+ ethhdr = eth_hdr(skb);
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index ec7bf5a4a9fc..dda62dcd59c8 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -102,7 +102,6 @@ static void batadv_dat_purge(struct work_struct *work);
*/
static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
msecs_to_jiffies(10000));
}
@@ -817,6 +816,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
if (!bat_priv->dat.hash)
return -ENOMEM;
+ INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
batadv_dat_start_timer(bat_priv);
batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 5f44c94ad707..073019f2e451 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -632,7 +632,19 @@ out:
*/
void batadv_update_min_mtu(struct net_device *soft_iface)
{
- soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ int limit_mtu;
+ int mtu;
+
+ mtu = batadv_hardif_min_mtu(soft_iface);
+
+ if (bat_priv->mtu_set_by_user)
+ limit_mtu = bat_priv->mtu_set_by_user;
+ else
+ limit_mtu = ETH_DATA_LEN;
+
+ mtu = min(mtu, limit_mtu);
+ dev_set_mtu(soft_iface, mtu);
/* Check if the local translate table should be cleaned up to match a
* new (and smaller) MTU.
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index e59c5aa27ee0..f3e102a1201b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -496,7 +496,10 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED];
atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr));
+
+ rtnl_lock();
batadv_update_min_mtu(bat_priv->soft_iface);
+ rtnl_unlock();
}
if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) {
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 504e3cb67bed..bd06d5b5314e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -156,11 +156,14 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+
/* check ranges */
if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev))
return -EINVAL;
dev->mtu = new_mtu;
+ bat_priv->mtu_set_by_user = new_mtu;
return 0;
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 515205d7b650..a01b0277bdb1 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -780,7 +780,6 @@ check_roaming:
if (roamed_back) {
batadv_tt_global_free(bat_priv, tt_global,
"Roaming canceled");
- tt_global = NULL;
} else {
/* The global entry has to be marked as ROAMING and
* has to be kept for consistency purpose
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 4d7f1baee7b7..9fdf1be9b99b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1564,6 +1564,12 @@ struct batadv_priv {
struct net_device *soft_iface;
/**
+ * @mtu_set_by_user: MTU was set once by user
+ * protected by rtnl_lock
+ */
+ int mtu_set_by_user;
+
+ /**
* @bat_counters: mesh internal traffic statistic counters (see
* batadv_counters)
*/
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 52fb6d6d6d58..bccad8c048da 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -1002,6 +1002,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
if (!hcon)
return -ENOENT;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 5f508c50649d..8031526eeeee 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -735,7 +735,7 @@ static int __init bt_init(void)
err = bt_sysfs_init();
if (err < 0)
- return err;
+ goto cleanup_led;
err = sock_register(&bt_sock_family_ops);
if (err)
@@ -771,6 +771,8 @@ unregister_socket:
sock_unregister(PF_BLUETOOTH);
cleanup_sysfs:
bt_sysfs_cleanup();
+cleanup_led:
+ bt_leds_cleanup();
return err;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ee57fa20bac3..d55973cb5b54 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -125,13 +125,11 @@ static void hci_conn_cleanup(struct hci_conn *conn)
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
- hci_conn_del_sysfs(conn);
-
debugfs_remove_recursive(conn->debugfs);
- hci_dev_put(hdev);
+ hci_conn_del_sysfs(conn);
- hci_conn_put(conn);
+ hci_dev_put(hdev);
}
static void le_scan_cleanup(struct work_struct *work)
@@ -1207,6 +1205,15 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-EOPNOTSUPP);
}
+ /* Reject outgoing connection to device with same BD ADDR against
+ * CVE-2020-26555
+ */
+ if (!bacmp(&hdev->bdaddr, dst)) {
+ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+ dst);
+ return ERR_PTR(-ECONNREFUSED);
+ }
+
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
@@ -1334,12 +1341,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
- /* If we're already encrypted set the REAUTH_PEND flag,
- * otherwise set the ENCRYPT_PEND.
+ /* Set the ENCRYPT_PEND to trigger encryption after
+ * authentication.
*/
- if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
- set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
- else
+ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
}
@@ -1382,34 +1387,41 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
if (!test_bit(HCI_CONN_AUTH, &conn->flags))
goto auth;
- /* An authenticated FIPS approved combination key has sufficient
- * security for security level 4. */
- if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
- sec_level == BT_SECURITY_FIPS)
- goto encrypt;
-
- /* An authenticated combination key has sufficient security for
- security level 3. */
- if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
- conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
- sec_level == BT_SECURITY_HIGH)
- goto encrypt;
-
- /* An unauthenticated combination key has sufficient security for
- security level 1 and 2. */
- if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
- conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
- (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
- goto encrypt;
-
- /* A combination key has always sufficient security for the security
- levels 1 or 2. High security level requires the combination key
- is generated using maximum PIN code length (16).
- For pre 2.1 units. */
- if (conn->key_type == HCI_LK_COMBINATION &&
- (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
- conn->pin_length == 16))
- goto encrypt;
+ switch (conn->key_type) {
+ case HCI_LK_AUTH_COMBINATION_P256:
+ /* An authenticated FIPS approved combination key has
+ * sufficient security for security level 4 or lower.
+ */
+ if (sec_level <= BT_SECURITY_FIPS)
+ goto encrypt;
+ break;
+ case HCI_LK_AUTH_COMBINATION_P192:
+ /* An authenticated combination key has sufficient security for
+ * security level 3 or lower.
+ */
+ if (sec_level <= BT_SECURITY_HIGH)
+ goto encrypt;
+ break;
+ case HCI_LK_UNAUTH_COMBINATION_P192:
+ case HCI_LK_UNAUTH_COMBINATION_P256:
+ /* An unauthenticated combination key has sufficient security
+ * for security level 2 or lower.
+ */
+ if (sec_level <= BT_SECURITY_MEDIUM)
+ goto encrypt;
+ break;
+ case HCI_LK_COMBINATION:
+ /* A combination key has always sufficient security for the
+ * security levels 2 or lower. High security level requires the
+ * combination key is generated using maximum PIN code length
+ * (16). For pre 2.1 units.
+ */
+ if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
+ goto encrypt;
+ break;
+ default:
+ break;
+ }
auth:
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2ebb6480b6ec..c60204b639ab 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1582,6 +1582,7 @@ setup_failed:
hdev->flush(hdev);
if (hdev->sent_cmd) {
+ cancel_delayed_work_sync(&hdev->cmd_timer);
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
@@ -2128,7 +2129,7 @@ int hci_get_dev_info(void __user *arg)
else
flags = hdev->flags;
- strcpy(di.name, hdev->name);
+ strscpy(di.name, hdev->name, sizeof(di.name));
di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
di.flags = flags;
@@ -2271,6 +2272,7 @@ static void hci_error_reset(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
+ hci_dev_hold(hdev);
BT_DBG("%s", hdev->name);
if (hdev->hw_error)
@@ -2278,10 +2280,10 @@ static void hci_error_reset(struct work_struct *work)
else
bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
- if (hci_dev_do_close(hdev))
- return;
+ if (!hci_dev_do_close(hdev))
+ hci_dev_do_open(hdev);
- hci_dev_do_open(hdev);
+ hci_dev_put(hdev);
}
void hci_uuids_clear(struct hci_dev *hdev)
@@ -2571,10 +2573,10 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
{
- struct smp_ltk *k;
+ struct smp_ltk *k, *tmp;
int removed = 0;
- list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
+ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
continue;
@@ -2590,9 +2592,9 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
{
- struct smp_irk *k;
+ struct smp_irk *k, *tmp;
- list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
+ list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
continue;
@@ -3316,7 +3318,11 @@ int hci_register_dev(struct hci_dev *hdev)
if (id < 0)
return id;
- snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
+ error = dev_set_name(&hdev->dev, "hci%u", id);
+ if (error)
+ return error;
+
+ hdev->name = dev_name(&hdev->dev);
hdev->id = id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
@@ -3338,8 +3344,6 @@ int hci_register_dev(struct hci_dev *hdev)
if (!IS_ERR_OR_NULL(bt_debugfs))
hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
- dev_set_name(&hdev->dev, "%s", hdev->name);
-
error = device_add(&hdev->dev);
if (error < 0)
goto err_wqueue;
@@ -4455,7 +4459,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
else
*req_complete = bt_cb(skb)->hci.req_complete;
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index ff6625493c9f..2125421ad746 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -25,6 +25,8 @@
/* Bluetooth HCI event handling. */
#include <asm/unaligned.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -1783,7 +1785,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
return;
}
- set_bit(HCI_INQUIRY, &hdev->flags);
+ if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
+ set_bit(HCI_INQUIRY, &hdev->flags);
}
static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -2594,6 +2597,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
ev->link_type);
+ /* Reject incoming connection from device with same BD ADDR against
+ * CVE-2020-26555
+ */
+ if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
+ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+ &ev->bdaddr);
+ hci_reject_conn(hdev, &ev->bdaddr);
+ return;
+ }
+
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
&flags);
@@ -2793,14 +2806,8 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (!ev->status) {
clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
-
- if (!hci_conn_ssp_enabled(conn) &&
- test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
- bt_dev_info(hdev, "re-auth of legacy device is not possible.");
- } else {
- set_bit(HCI_CONN_AUTH, &conn->flags);
- conn->sec_level = conn->pending_sec_level;
- }
+ set_bit(HCI_CONN_AUTH, &conn->flags);
+ conn->sec_level = conn->pending_sec_level;
} else {
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
@@ -2809,7 +2816,6 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
- clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
if (conn->state == BT_CONFIG) {
if (!ev->status && hci_conn_ssp_enabled(conn)) {
@@ -2855,8 +2861,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s", hdev->name);
- hci_conn_check_pending(hdev);
-
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -3935,6 +3939,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (!conn)
goto unlock;
+ /* Ignore NULL link key against CVE-2020-26555 */
+ if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
+ bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
+ &ev->bdaddr);
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_drop(conn);
@@ -4177,6 +4190,19 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
+ switch (ev->link_type) {
+ case SCO_LINK:
+ case ESCO_LINK:
+ break;
+ default:
+ /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
+ * for HCI_Synchronous_Connection_Complete is limited to
+ * either SCO or eSCO
+ */
+ bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
+ return;
+ }
+
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -4413,8 +4439,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* available, then do not declare that OOB data is
* present.
*/
- if (!memcmp(data->rand256, ZERO_KEY, 16) ||
- !memcmp(data->hash256, ZERO_KEY, 16))
+ if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
+ !crypto_memneq(data->hash256, ZERO_KEY, 16))
return 0x00;
return 0x02;
@@ -4424,8 +4450,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* not supported by the hardware, then check that if
* P-192 data values are present.
*/
- if (!memcmp(data->rand192, ZERO_KEY, 16) ||
- !memcmp(data->hash192, ZERO_KEY, 16))
+ if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
+ !crypto_memneq(data->hash192, ZERO_KEY, 16))
return 0x00;
return 0x01;
@@ -4441,9 +4467,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
+ if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
goto unlock;
+ /* Assume remote supports SSP since it has triggered this event */
+ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+
hci_conn_hold(conn);
if (!hci_dev_test_flag(hdev, HCI_MGMT))
@@ -4676,7 +4705,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
+ if (!conn || !hci_conn_ssp_enabled(conn))
goto unlock;
/* Reset the authentication requirement to unknown */
@@ -5738,6 +5767,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_UNKNOWN_CONN_ID);
+ if (max > hcon->le_conn_max_interval)
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_INVALID_LL_PARAMS);
+
if (hci_check_conn_params(min, max, latency, timeout))
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index befab857a39b..1d08f7b78fa0 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -430,7 +430,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
ni->type = hdev->dev_type;
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
- memcpy(ni->name, hdev->name, 8);
+ memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
+ strnlen(hdev->name, sizeof(ni->name)), '\0');
opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
break;
@@ -881,10 +882,6 @@ static int hci_sock_release(struct socket *sock)
}
sock_orphan(sk);
-
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-
release_sock(sk);
sock_put(sk);
return 0;
@@ -977,6 +974,34 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
BT_DBG("cmd %x arg %lx", cmd, arg);
+ /* Make sure the cmd is valid before doing anything */
+ switch (cmd) {
+ case HCIGETDEVLIST:
+ case HCIGETDEVINFO:
+ case HCIGETCONNLIST:
+ case HCIDEVUP:
+ case HCIDEVDOWN:
+ case HCIDEVRESET:
+ case HCIDEVRESTAT:
+ case HCISETSCAN:
+ case HCISETAUTH:
+ case HCISETENCRYPT:
+ case HCISETPTYPE:
+ case HCISETLINKPOL:
+ case HCISETLINKMODE:
+ case HCISETACLMTU:
+ case HCISETSCOMTU:
+ case HCIINQUIRY:
+ case HCISETRAW:
+ case HCIGETCONNINFO:
+ case HCIGETAUTHINFO:
+ case HCIBLOCKADDR:
+ case HCIUNBLOCKADDR:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
@@ -993,7 +1018,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
if (hci_sock_gen_cookie(sk)) {
struct sk_buff *skb;
- if (capable(CAP_NET_ADMIN))
+ /* Perform careful checks before setting the HCI_SOCK_TRUSTED
+ * flag. Make sure that not only the current task but also
+ * the socket opener has the required capability, since
+ * privileged programs can be tricked into making ioctl calls
+ * on HCI sockets, and the socket should not be marked as
+ * trusted simply because the ioctl caller is privileged.
+ */
+ if (sk_capable(sk, CAP_NET_ADMIN))
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
/* Send event to monitor */
@@ -1985,6 +2017,12 @@ done:
return err;
}
+static void hci_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+}
+
static const struct proto_ops hci_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
@@ -2035,6 +2073,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
sock->state = SS_UNCONNECTED;
sk->sk_state = BT_OPEN;
+ sk->sk_destruct = hci_sock_destruct;
bt_sock_link(&hci_sk_list, sk);
return 0;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index b69d88b88d2e..266112c960ee 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -33,7 +33,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p", conn);
+ bt_dev_dbg(hdev, "conn %p", conn);
conn->dev.type = &bt_link;
conn->dev.class = bt_class;
@@ -46,24 +46,30 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p", conn);
+ bt_dev_dbg(hdev, "conn %p", conn);
+
+ if (device_is_registered(&conn->dev))
+ return;
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
- if (device_add(&conn->dev) < 0) {
+ if (device_add(&conn->dev) < 0)
bt_dev_err(hdev, "failed to register connection device");
- return;
- }
-
- hci_dev_hold(hdev);
}
void hci_conn_del_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- if (!device_is_registered(&conn->dev))
+ bt_dev_dbg(hdev, "conn %p", conn);
+
+ if (!device_is_registered(&conn->dev)) {
+ /* If device_add() has *not* succeeded, use *only* put_device()
+ * to drop the reference count.
+ */
+ put_device(&conn->dev);
return;
+ }
while (1) {
struct device *dev;
@@ -75,9 +81,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
put_device(dev);
}
- device_del(&conn->dev);
-
- hci_dev_put(hdev);
+ device_unregister(&conn->dev);
}
static void bt_host_release(struct device *dev)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index ac98e3b37ab4..a94ec229ad2e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -433,7 +433,7 @@ static void hidp_set_timer(struct hidp_session *session)
static void hidp_del_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
- del_timer(&session->timer);
+ del_timer_sync(&session->timer);
}
static void hidp_process_report(struct hidp_session *session, int type,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 286fca6a9ab2..9c06f5ffd1b5 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -60,6 +60,9 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff_head *skbs, u8 event);
+static void l2cap_retrans_timeout(struct work_struct *work);
+static void l2cap_monitor_timeout(struct work_struct *work);
+static void l2cap_ack_timeout(struct work_struct *work);
static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
{
@@ -475,6 +478,9 @@ struct l2cap_chan *l2cap_chan_create(void)
write_unlock(&chan_list_lock);
INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
+ INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
+ INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
+ INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
chan->state = BT_OPEN;
@@ -1813,11 +1819,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
bdaddr_t *dst,
u8 link_type)
{
- struct l2cap_chan *c, *c1 = NULL;
+ struct l2cap_chan *c, *tmp, *c1 = NULL;
read_lock(&chan_list_lock);
- list_for_each_entry(c, &chan_list, global_l) {
+ list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
if (state && c->state != state)
continue;
@@ -1827,7 +1833,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
continue;
- if (c->psm == psm) {
+ if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
int src_match, dst_match;
int src_any, dst_any;
@@ -1835,8 +1841,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
src_match = !bacmp(&c->src, src);
dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
- c = l2cap_chan_hold_unless_zero(c);
- if (!c)
+ if (!l2cap_chan_hold_unless_zero(c))
continue;
read_unlock(&chan_list_lock);
@@ -2521,14 +2526,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
if (IS_ERR(skb))
return PTR_ERR(skb);
- /* Channel lock is released before requesting new skb and then
- * reacquired thus we need to recheck channel state.
- */
- if (chan->state != BT_CONNECTED) {
- kfree_skb(skb);
- return -ENOTCONN;
- }
-
l2cap_do_send(chan, skb);
return len;
}
@@ -2572,14 +2569,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
if (IS_ERR(skb))
return PTR_ERR(skb);
- /* Channel lock is released before requesting new skb and then
- * reacquired thus we need to recheck channel state.
- */
- if (chan->state != BT_CONNECTED) {
- kfree_skb(skb);
- return -ENOTCONN;
- }
-
l2cap_do_send(chan, skb);
err = len;
break;
@@ -2600,14 +2589,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
*/
err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
- /* The channel could have been closed while segmenting,
- * check that it is still connected.
- */
- if (chan->state != BT_CONNECTED) {
- __skb_queue_purge(&seg_queue);
- err = -ENOTCONN;
- }
-
if (err)
break;
@@ -3164,10 +3145,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
chan->rx_state = L2CAP_RX_STATE_RECV;
chan->tx_state = L2CAP_TX_STATE_XMIT;
- INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
- INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
- INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
-
skb_queue_head_init(&chan->srej_q);
err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
@@ -3559,7 +3536,8 @@ done:
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
- if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ if (remote_efs &&
+ test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
chan->remote_id = efs.id;
chan->remote_stype = efs.stype;
chan->remote_msdu = le16_to_cpu(efs.msdu);
@@ -4046,6 +4024,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
result = __le16_to_cpu(rsp->result);
status = __le16_to_cpu(rsp->status);
+ if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
+ dcid > L2CAP_CID_DYN_END))
+ return -EPROTO;
+
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
dcid, scid, result, status);
@@ -4065,12 +4047,23 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
}
}
+ chan = l2cap_chan_hold_unless_zero(chan);
+ if (!chan) {
+ err = -EBADSLT;
+ goto unlock;
+ }
+
err = 0;
l2cap_chan_lock(chan);
switch (result) {
case L2CAP_CR_SUCCESS:
+ if (__l2cap_get_chan_by_dcid(conn, dcid)) {
+ err = -EBADSLT;
+ break;
+ }
+
l2cap_state_change(chan, BT_CONFIG);
chan->ident = 0;
chan->dcid = dcid;
@@ -4094,6 +4087,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
}
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
unlock:
mutex_unlock(&conn->chan_lock);
@@ -4201,7 +4195,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
chan->ident = cmd->ident;
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
- chan->num_conf_rsp++;
+ if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
+ chan->num_conf_rsp++;
/* Reset config buffer. */
chan->conf_len = 0;
@@ -4382,33 +4377,29 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
- mutex_lock(&conn->chan_lock);
-
- chan = __l2cap_get_chan_by_scid(conn, dcid);
+ chan = l2cap_get_chan_by_scid(conn, dcid);
if (!chan) {
- mutex_unlock(&conn->chan_lock);
cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
return 0;
}
- l2cap_chan_hold(chan);
- l2cap_chan_lock(chan);
-
rsp.dcid = cpu_to_le16(chan->scid);
rsp.scid = cpu_to_le16(chan->dcid);
l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
chan->ops->set_shutdown(chan);
+ l2cap_chan_unlock(chan);
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(chan);
l2cap_chan_del(chan, ECONNRESET);
+ mutex_unlock(&conn->chan_lock);
chan->ops->close(chan);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
- mutex_unlock(&conn->chan_lock);
-
return 0;
}
@@ -4428,33 +4419,28 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
- mutex_lock(&conn->chan_lock);
-
- chan = __l2cap_get_chan_by_scid(conn, scid);
+ chan = l2cap_get_chan_by_scid(conn, scid);
if (!chan) {
- mutex_unlock(&conn->chan_lock);
return 0;
}
- l2cap_chan_hold(chan);
- l2cap_chan_lock(chan);
-
if (chan->state != BT_DISCONN) {
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
- mutex_unlock(&conn->chan_lock);
return 0;
}
+ l2cap_chan_unlock(chan);
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(chan);
l2cap_chan_del(chan, 0);
+ mutex_unlock(&conn->chan_lock);
chan->ops->close(chan);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
- mutex_unlock(&conn->chan_lock);
-
return 0;
}
@@ -5345,7 +5331,13 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
- err = hci_check_conn_params(min, max, latency, to_multiplier);
+ if (max > hcon->le_conn_max_interval) {
+ BT_DBG("requested connection interval exceeds current bounds.");
+ err = -EINVAL;
+ } else {
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
+ }
+
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
@@ -5562,6 +5554,19 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
scid, mtu, mps);
+ /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
+ * page 1059:
+ *
+ * Valid range: 0x0001-0x00ff
+ *
+ * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
+ */
+ if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
+ result = L2CAP_CR_LE_BAD_PSM;
+ chan = NULL;
+ goto response;
+ }
+
/* Check if we have socket listening on psm */
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
&conn->hcon->dst, LE_LINK);
@@ -5724,9 +5729,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
if (!chan)
goto done;
+ chan = l2cap_chan_hold_unless_zero(chan);
+ if (!chan)
+ goto done;
+
l2cap_chan_lock(chan);
l2cap_chan_del(chan, ECONNREFUSED);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
done:
mutex_unlock(&conn->chan_lock);
@@ -6265,6 +6275,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
struct l2cap_ctrl *control,
struct sk_buff *skb, u8 event)
{
+ struct l2cap_ctrl local_control;
int err = 0;
bool skb_in_use = false;
@@ -6289,15 +6300,32 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
chan->buffer_seq = chan->expected_tx_seq;
skb_in_use = true;
+ /* l2cap_reassemble_sdu may free skb, hence invalidate
+ * control, so make a copy in advance to use it after
+ * l2cap_reassemble_sdu returns and to avoid the race
+ * condition, for example:
+ *
+ * The current thread calls:
+ * l2cap_reassemble_sdu
+ * chan->ops->recv == l2cap_sock_recv_cb
+ * __sock_queue_rcv_skb
+ * Another thread calls:
+ * bt_sock_recvmsg
+ * skb_recv_datagram
+ * skb_free_datagram
+ * Then the current thread tries to access control, but
+ * it was freed by skb_free_datagram.
+ */
+ local_control = *control;
err = l2cap_reassemble_sdu(chan, skb, control);
if (err)
break;
- if (control->final) {
+ if (local_control.final) {
if (!test_and_clear_bit(CONN_REJ_ACT,
&chan->conn_state)) {
- control->final = 0;
- l2cap_retransmit_all(chan, control);
+ local_control.final = 0;
+ l2cap_retransmit_all(chan, &local_control);
l2cap_ertm_send(chan);
}
}
@@ -6677,11 +6705,27 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff *skb)
{
+ /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
+ * the txseq field in advance to use it after l2cap_reassemble_sdu
+ * returns and to avoid the race condition, for example:
+ *
+ * The current thread calls:
+ * l2cap_reassemble_sdu
+ * chan->ops->recv == l2cap_sock_recv_cb
+ * __sock_queue_rcv_skb
+ * Another thread calls:
+ * bt_sock_recvmsg
+ * skb_recv_datagram
+ * skb_free_datagram
+ * Then the current thread tries to access control, but it was freed by
+ * skb_free_datagram.
+ */
+ u16 txseq = control->txseq;
+
BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
chan->rx_state);
- if (l2cap_classify_txseq(chan, control->txseq) ==
- L2CAP_TXSEQ_EXPECTED) {
+ if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
l2cap_pass_to_tx(chan, control);
BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
@@ -6704,8 +6748,8 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
}
}
- chan->last_acked_seq = control->txseq;
- chan->expected_tx_seq = __next_seq(chan, control->txseq);
+ chan->last_acked_seq = txseq;
+ chan->expected_tx_seq = __next_seq(chan, txseq);
return 0;
}
@@ -6959,6 +7003,7 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
return;
}
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
} else {
BT_DBG("unknown cid 0x%4.4x", cid);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 08e9f332adad..712bffa4e8b4 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -45,6 +45,7 @@ static const struct proto_ops l2cap_sock_ops;
static void l2cap_sock_init(struct sock *sk, struct sock *parent);
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
int proto, gfp_t prio, int kern);
+static void l2cap_sock_cleanup_listen(struct sock *parent);
bool l2cap_is_socket(struct socket *sock)
{
@@ -1224,6 +1225,7 @@ static int l2cap_sock_release(struct socket *sock)
if (!sk)
return 0;
+ l2cap_sock_cleanup_listen(sk);
bt_sock_unlink(&l2cap_sk_list, sk);
err = l2cap_sock_shutdown(sock, 2);
@@ -1433,6 +1435,14 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
if (!skb)
return ERR_PTR(err);
+ /* Channel lock is released before requesting new skb and then
+ * reacquired thus we need to recheck channel state.
+ */
+ if (chan->state != BT_CONNECTED) {
+ kfree_skb(skb);
+ return ERR_PTR(-ENOTCONN);
+ }
+
skb->priority = sk->sk_priority;
bt_cb(skb)->l2cap.chan = chan;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 83a8c48dfaa8..596fa3172642 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -594,7 +594,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
ret = rfcomm_dlc_send_frag(d, frag);
if (ret < 0) {
- kfree_skb(frag);
+ dev_kfree_skb_irq(frag);
goto unlock;
}
@@ -1940,7 +1940,7 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- if (!skb_linearize(skb)) {
+ if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) {
s = rfcomm_recv_frame(s, skb);
if (!s)
break;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 1153bbcdff72..591d146a5308 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -114,6 +114,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
return ERR_PTR(-EINVAL);
+ size = SKB_DATA_ALIGN(size);
data = kzalloc(size + headroom + tailroom, GFP_USER);
if (!data)
return ERR_PTR(-ENOMEM);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index f085b1648e66..501f77f0f480 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -35,6 +35,8 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
const unsigned char *dest;
u16 vid = 0;
+ memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
+
rcu_read_lock();
nf_ops = rcu_dereference(nf_br_ops);
if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 86637000f275..4f8eb83976f1 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -43,7 +43,7 @@ int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb
skb->protocol == htons(ETH_P_8021AD))) {
int depth;
- if (!__vlan_get_protocol(skb, skb->protocol, &depth))
+ if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth))
goto drop;
skb_set_network_header(skb, depth);
@@ -118,7 +118,7 @@ static int deliver_clone(const struct net_bridge_port *prev,
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return -ENOMEM;
}
@@ -255,7 +255,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb) {
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e2a999890d05..6b650dfc084d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -157,8 +157,9 @@ void br_manage_promisc(struct net_bridge *br)
* This lets us disable promiscuous mode and write
* this config to hw.
*/
- if (br->auto_cnt == 0 ||
- (br->auto_cnt == 1 && br_auto_port(p)))
+ if ((p->dev->priv_flags & IFF_UNICAST_FLT) &&
+ (br->auto_cnt == 0 ||
+ (br->auto_cnt == 1 && br_auto_port(p))))
br_port_clear_promisc(p);
else
br_port_set_promisc(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 464f6a619444..3d07dedd93bd 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -141,12 +141,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if ((mdst && mdst->host_joined) ||
br_multicast_is_router(br)) {
local_rcv = true;
- br->dev->stats.multicast++;
+ DEV_STATS_INC(br->dev, multicast);
}
mcast_hit = true;
} else {
local_rcv = true;
- br->dev->stats.multicast++;
+ DEV_STATS_INC(br->dev, multicast);
}
break;
case BR_PKT_UNICAST:
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 19726d81025d..277b6fb92ac5 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
/* - Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding. */
if (rt->dst.dev == dev) {
+ skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
goto bridged_dnat;
}
@@ -413,6 +414,7 @@ bridged_dnat:
kfree_skb(skb);
return 0;
}
+ skb_dst_drop(skb);
skb_dst_set_noref(skb, &rt->dst);
}
@@ -866,11 +868,17 @@ static unsigned int ip_sabotage_in(void *priv,
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (nf_bridge && !nf_bridge->in_prerouting &&
- !netif_is_l3_master(skb->dev) &&
- !netif_is_l3_slave(skb->dev)) {
- state->okfn(state->net, state->sk, skb);
- return NF_STOLEN;
+ if (nf_bridge) {
+ if (nf_bridge->sabotage_in_done)
+ return NF_ACCEPT;
+
+ if (!nf_bridge->in_prerouting &&
+ !netif_is_l3_master(skb->dev) &&
+ !netif_is_l3_slave(skb->dev)) {
+ nf_bridge->sabotage_in_done = 1;
+ state->okfn(state->net, state->sk, skb);
+ return NF_STOLEN;
+ }
}
return NF_ACCEPT;
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index e4e0c836c3f5..6b07f30675bb 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
kfree_skb(skb);
return 0;
}
+ skb_dst_drop(skb);
skb_dst_set_noref(skb, &rt->dst);
}
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 32bc2821027f..57f91efce0f7 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = {
.entries = (char *)&initial_chain,
};
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
- if (valid_hooks & ~(1 << NF_BR_BROUTING))
- return -EINVAL;
- return 0;
-}
-
static const struct ebt_table broute_table = {
.name = "broute",
.table = &initial_table,
.valid_hooks = 1 << NF_BR_BROUTING,
- .check = check,
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index bcf982e12f16..7f2e620f4978 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
.entries = (char *)initial_chains,
};
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
- if (valid_hooks & ~FILTER_VALID_HOOKS)
- return -EINVAL;
- return 0;
-}
-
static const struct ebt_table frame_filter = {
.name = "filter",
.table = &initial_table,
.valid_hooks = FILTER_VALID_HOOKS,
- .check = check,
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 0d092773f816..1743a105485c 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
.entries = (char *)initial_chains,
};
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
- if (valid_hooks & ~NAT_VALID_HOOKS)
- return -EINVAL;
- return 0;
-}
-
static const struct ebt_table frame_nat = {
.name = "nat",
.table = &initial_table,
.valid_hooks = NAT_VALID_HOOKS,
- .check = check,
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index d9375c52f50e..f6853fc0fcc0 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -999,9 +999,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
goto free_iterate;
}
- /* the table doesn't like it */
- if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
+ if (repl->valid_hooks != t->valid_hooks) {
+ ret = -EINVAL;
goto free_unlock;
+ }
if (repl->num_counters && repl->num_counters != t->private->nentries) {
ret = -EINVAL;
@@ -1193,11 +1194,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
if (ret != 0)
goto free_chainstack;
- if (table->check && table->check(newinfo, table->valid_hooks)) {
- ret = -EINVAL;
- goto free_chainstack;
- }
-
table->private = newinfo;
rwlock_init(&table->lock);
mutex_lock(&ebt_mutex);
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index fdbed3158555..d14b2dbbd1df 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -36,7 +36,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
ktime_t tstamp = skb->tstamp;
struct ip_frag_state state;
struct iphdr *iph;
- int err;
+ int err = 0;
/* for offloaded checksums cleanup checksum before fragmentation */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 7c9e92b2f806..0c28fa4647b7 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -87,9 +87,8 @@ static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
return nft_meta_get_init(ctx, expr, tb);
}
- priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, len);
+ return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, len);
}
static struct nft_expr_type nft_meta_bridge_type;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8fa98c62c4fc..53f19ee5642f 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -1022,6 +1022,7 @@ static void caif_sock_destructor(struct sock *sk)
return;
}
sk_stream_kill_queues(&cf_sk->sk);
+ WARN_ON(sk->sk_forward_alloc);
caif_free_client(&cf_sk->layer);
}
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 46c62dd1479b..862226be2286 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -134,6 +134,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
struct usb_device *usbdev;
int res;
+ if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED)
+ return 0;
+
/* Check whether we have a NCM device, and find its VID/PID. */
if (!(dev->dev.parent && dev->dev.parent->driver &&
strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0))
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 2809cbd6b7f7..d8cb4b2a076b 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -269,11 +269,15 @@ int cfctrl_linkup_request(struct cflayer *layer,
default:
pr_warn("Request setup of bad link type = %d\n",
param->linktype);
+ cfpkt_destroy(pkt);
return -EINVAL;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
+ if (!req) {
+ cfpkt_destroy(pkt);
return -ENOMEM;
+ }
+
req->client_layer = user_layer;
req->cmd = CFCTRL_CMD_LINK_SETUP;
req->param = *param;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 910f164dd20c..4dfac31f9466 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -314,9 +314,6 @@ static int chnl_net_open(struct net_device *dev)
if (result == 0) {
pr_debug("connect timeout\n");
- caif_disconnect_client(dev_net(dev), &priv->chnl);
- priv->state = CAIF_DISCONNECTED;
- pr_debug("state disconnected\n");
result = -ETIMEDOUT;
goto error;
}
diff --git a/net/can/af_can.c b/net/can/af_can.c
index c758a12ffe46..b396c23561d6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -450,7 +450,7 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
/* insert new receiver (dev,canid,mask) -> (func,data) */
- if (dev && dev->type != ARPHRD_CAN)
+ if (dev && (dev->type != ARPHRD_CAN || !can_get_ml_priv(dev)))
return -ENODEV;
if (dev && !net_eq(net, dev_net(dev)))
@@ -678,7 +678,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
+ if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CAN_MTU)) {
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
dev->type, skb->len);
goto free_skb;
@@ -704,7 +704,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
+ if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CANFD_MTU)) {
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
dev->type, skb->len);
goto free_skb;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 63d81147fb4e..5cb4b6129263 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -276,6 +276,7 @@ static void bcm_can_tx(struct bcm_op *op)
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
+ int err;
/* no target device? => exit */
if (!op->ifindex)
@@ -300,11 +301,11 @@ static void bcm_can_tx(struct bcm_op *op)
/* send with loopback */
skb->dev = dev;
can_skb_set_owner(skb, op->sk);
- can_send(skb, 1);
+ err = can_send(skb, 1);
+ if (!err)
+ op->frames_abs++;
- /* update statistics */
op->currframe++;
- op->frames_abs++;
/* reached last frame? */
if (op->currframe >= op->nframes)
@@ -937,6 +938,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
cf = op->frames + op->cfsiz * i;
err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
+ if (err < 0)
+ goto free_op;
if (op->flags & CAN_FD_FRAME) {
if (cf->len > 64)
@@ -946,12 +949,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
err = -EINVAL;
}
- if (err < 0) {
- if (op->frames != &op->sframe)
- kfree(op->frames);
- kfree(op);
- return err;
- }
+ if (err < 0)
+ goto free_op;
if (msg_head->flags & TX_CP_CAN_ID) {
/* copy can_id into frame */
@@ -1022,6 +1021,12 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
bcm_tx_start_timer(op);
return msg_head->nframes * op->cfsiz + MHSIZ;
+
+free_op:
+ if (op->frames != &op->sframe)
+ kfree(op->frames);
+ kfree(op);
+ return err;
}
/*
@@ -1518,6 +1523,12 @@ static int bcm_release(struct socket *sock)
lock_sock(sk);
+#if IS_ENABLED(CONFIG_PROC_FS)
+ /* remove procfs entry */
+ if (net->can.bcmproc_dir && bo->bcm_proc_read)
+ remove_proc_entry(bo->procname, net->can.bcmproc_dir);
+#endif /* CONFIG_PROC_FS */
+
list_for_each_entry_safe(op, next, &bo->tx_ops, list)
bcm_remove_op(op);
@@ -1553,12 +1564,6 @@ static int bcm_release(struct socket *sock)
list_for_each_entry_safe(op, next, &bo->rx_ops, list)
bcm_remove_op(op);
-#if IS_ENABLED(CONFIG_PROC_FS)
- /* remove procfs entry */
- if (net->can.bcmproc_dir && bo->bcm_proc_read)
- remove_proc_entry(bo->procname, net->can.bcmproc_dir);
-#endif /* CONFIG_PROC_FS */
-
/* remove device reference */
if (bo->bound) {
bo->bound = 0;
diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c
index f33c47327927..ca4ad6cdd5cb 100644
--- a/net/can/j1939/address-claim.c
+++ b/net/can/j1939/address-claim.c
@@ -165,6 +165,46 @@ static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
* leaving this function.
*/
ecu = j1939_ecu_get_by_name_locked(priv, name);
+
+ if (ecu && ecu->addr == skcb->addr.sa) {
+ /* The ISO 11783-5 standard, in "4.5.2 - Address claim
+ * requirements", states:
+ * d) No CF shall begin, or resume, transmission on the
+ * network until 250 ms after it has successfully claimed
+ * an address except when responding to a request for
+ * address-claimed.
+ *
+ * But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim
+ * prioritization" show that the CF begins the transmission
+ * after 250 ms from the first AC (address-claimed) message
+ * even if it sends another AC message during that time window
+ * to resolve the address contention with another CF.
+ *
+ * As stated in "4.4.2.3 - Address-claimed message":
+ * In order to successfully claim an address, the CF sending
+ * an address claimed message shall not receive a contending
+ * claim from another CF for at least 250 ms.
+ *
+ * As stated in "4.4.3.2 - NAME management (NM) message":
+ * 1) A commanding CF can
+ * d) request that a CF with a specified NAME transmit
+ * the address-claimed message with its current NAME.
+ * 2) A target CF shall
+ * d) send an address-claimed message in response to a
+ * request for a matching NAME
+ *
+ * Taking the above arguments into account, the 250 ms wait is
+ * requested only during network initialization.
+ *
+ * Do not restart the timer on AC message if both the NAME and
+ * the address match and so if the address has already been
+ * claimed (timer has expired) or the AC message has been sent
+ * to resolve the contention with another CF (timer is still
+ * running).
+ */
+ goto out_ecu_put;
+ }
+
if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
ecu = j1939_ecu_create_locked(priv, name);
diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
index cea712fb2a9e..9ac2a10b1826 100644
--- a/net/can/j1939/j1939-priv.h
+++ b/net/can/j1939/j1939-priv.h
@@ -297,6 +297,7 @@ struct j1939_sock {
int ifindex;
struct j1939_addr addr;
+ spinlock_t filters_lock;
struct j1939_filter *filters;
int nfilters;
pgn_t pgn_rx_filter;
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index ca75d1b8f415..9169ef174ff0 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -122,7 +122,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
#define J1939_CAN_ID CAN_EFF_FLAG
#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
-static DEFINE_SPINLOCK(j1939_netdev_lock);
+static DEFINE_MUTEX(j1939_netdev_lock);
static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
{
@@ -216,7 +216,7 @@ static void __j1939_rx_release(struct kref *kref)
j1939_can_rx_unregister(priv);
j1939_ecu_unmap_all(priv);
j1939_priv_set(priv->ndev, NULL);
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
}
/* get pointer to priv without increasing ref counter */
@@ -244,9 +244,9 @@ static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
{
struct j1939_priv *priv;
- spin_lock(&j1939_netdev_lock);
+ mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
return priv;
}
@@ -256,14 +256,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
struct j1939_priv *priv, *priv_new;
int ret;
- spin_lock(&j1939_netdev_lock);
+ mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) {
kref_get(&priv->rx_kref);
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
return priv;
}
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev);
if (!priv)
@@ -273,29 +273,31 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
spin_lock_init(&priv->j1939_socks_lock);
INIT_LIST_HEAD(&priv->j1939_socks);
- spin_lock(&j1939_netdev_lock);
+ mutex_lock(&j1939_netdev_lock);
priv_new = j1939_priv_get_by_ndev_locked(ndev);
if (priv_new) {
/* Someone was faster than us, use their priv and roll
* back our's.
*/
kref_get(&priv_new->rx_kref);
- spin_unlock(&j1939_netdev_lock);
+ mutex_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
return priv_new;
}
j1939_priv_set(ndev, priv);
- spin_unlock(&j1939_netdev_lock);
ret = j1939_can_rx_register(priv);
if (ret < 0)
goto out_priv_put;
+ mutex_unlock(&j1939_netdev_lock);
return priv;
out_priv_put:
j1939_priv_set(ndev, NULL);
+ mutex_unlock(&j1939_netdev_lock);
+
dev_put(ndev);
kfree(priv);
@@ -304,7 +306,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
void j1939_netdev_stop(struct j1939_priv *priv)
{
- kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
+ kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
j1939_priv_put(priv);
}
@@ -332,6 +334,9 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
/* re-claim the CAN_HDR from the SKB */
cf = skb_push(skb, J1939_CAN_HDR);
+ /* initialize header structure */
+ memset(cf, 0, J1939_CAN_HDR);
+
/* make it a full can frame again */
skb_put(skb, J1939_CAN_FTR + (8 - dlc));
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 51bfb220fad8..2e3dc74fea21 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -178,7 +178,10 @@ activate_next:
if (!first)
return;
- if (WARN_ON_ONCE(j1939_session_activate(first))) {
+ if (j1939_session_activate(first)) {
+ netdev_warn_once(first->priv->ndev,
+ "%s: 0x%p: Identical session is already activated.\n",
+ __func__, first);
first->err = -EBUSY;
goto activate_next;
} else {
@@ -259,12 +262,17 @@ static bool j1939_sk_match_dst(struct j1939_sock *jsk,
static bool j1939_sk_match_filter(struct j1939_sock *jsk,
const struct j1939_sk_buff_cb *skcb)
{
- const struct j1939_filter *f = jsk->filters;
- int nfilter = jsk->nfilters;
+ const struct j1939_filter *f;
+ int nfilter;
+
+ spin_lock_bh(&jsk->filters_lock);
+
+ f = jsk->filters;
+ nfilter = jsk->nfilters;
if (!nfilter)
/* receive all when no filters are assigned */
- return true;
+ goto filter_match_found;
for (; nfilter; ++f, --nfilter) {
if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
@@ -273,9 +281,15 @@ static bool j1939_sk_match_filter(struct j1939_sock *jsk,
continue;
if ((skcb->addr.src_name & f->name_mask) != f->name)
continue;
- return true;
+ goto filter_match_found;
}
+
+ spin_unlock_bh(&jsk->filters_lock);
return false;
+
+filter_match_found:
+ spin_unlock_bh(&jsk->filters_lock);
+ return true;
}
static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
@@ -398,6 +412,7 @@ static int j1939_sk_init(struct sock *sk)
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
+ spin_lock_init(&jsk->filters_lock);
/* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
sock_set_flag(sk, SOCK_RCU_FREE);
@@ -700,9 +715,11 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
}
lock_sock(&jsk->sk);
+ spin_lock_bh(&jsk->filters_lock);
ofilters = jsk->filters;
jsk->filters = filters;
jsk->nfilters = count;
+ spin_unlock_bh(&jsk->filters_lock);
release_sock(&jsk->sk);
kfree(ofilters);
return 0;
@@ -795,7 +812,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
struct j1939_sk_buff_cb *skcb;
int ret = 0;
- if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE))
+ if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
return -EINVAL;
if (flags & MSG_ERRQUEUE)
@@ -1010,6 +1027,11 @@ void j1939_sk_errqueue(struct j1939_session *session,
void j1939_sk_send_loop_abort(struct sock *sk, int err)
{
+ struct j1939_sock *jsk = j1939_sk(sk);
+
+ if (jsk->state & J1939_SOCK_ERRQUEUE)
+ return;
+
sk->sk_err = err;
sk->sk_error_report(sk);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 22f4b798d385..42fb83605d4c 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -260,6 +260,8 @@ static void __j1939_session_drop(struct j1939_session *session)
static void j1939_session_destroy(struct j1939_session *session)
{
+ struct sk_buff *skb;
+
if (session->err)
j1939_sk_errqueue(session, J1939_ERRQUEUE_ABORT);
else
@@ -270,7 +272,11 @@ static void j1939_session_destroy(struct j1939_session *session)
WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry));
WARN_ON_ONCE(!list_empty(&session->active_session_list_entry));
- skb_queue_purge(&session->skb_queue);
+ while ((skb = skb_dequeue(&session->skb_queue)) != NULL) {
+ /* drop ref taken in j1939_session_skb_queue() */
+ skb_unref(skb);
+ kfree_skb(skb);
+ }
__j1939_session_drop(session);
j1939_priv_put(session->priv);
kfree(session);
@@ -332,10 +338,12 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
__skb_unlink(do_skb, &session->skb_queue);
/* drop ref taken in j1939_session_skb_queue() */
skb_unref(do_skb);
+ spin_unlock_irqrestore(&session->skb_queue.lock, flags);
kfree_skb(do_skb);
+ } else {
+ spin_unlock_irqrestore(&session->skb_queue.lock, flags);
}
- spin_unlock_irqrestore(&session->skb_queue.lock, flags);
}
void j1939_session_skb_queue(struct j1939_session *session,
@@ -592,7 +600,10 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
/* reserve CAN header */
skb_reserve(skb, offsetof(struct can_frame, data));
- memcpy(skb->cb, re_skcb, sizeof(skb->cb));
+ /* skb->cb must be large enough to hold a j1939_sk_buff_cb structure */
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb));
+
+ memcpy(skb->cb, re_skcb, sizeof(*re_skcb));
skcb = j1939_skb_to_cb(skb);
if (swap_src_dst)
j1939_skbcb_swap(skcb);
@@ -1079,10 +1090,6 @@ static bool j1939_session_deactivate(struct j1939_session *session)
bool active;
j1939_session_list_lock(priv);
- /* This function should be called with a session ref-count of at
- * least 2.
- */
- WARN_ON_ONCE(kref_read(&session->kref) < 2);
active = j1939_session_deactivate_locked(session);
j1939_session_list_unlock(priv);
diff --git a/net/can/raw.c b/net/can/raw.c
index bb837019d172..2f500d8a0af2 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -770,6 +770,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
+ struct sockcm_cookie sockc;
struct sk_buff *skb;
struct net_device *dev;
int ifindex;
@@ -815,11 +816,20 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err < 0)
goto free_skb;
- skb_setup_tx_timestamp(skb, sk->sk_tsflags);
+ sockcm_init(&sockc, sk);
+ if (msg->msg_controllen) {
+ err = sock_cmsg_send(sk, msg, &sockc);
+ if (unlikely(err))
+ goto free_skb;
+ }
skb->dev = dev;
skb->sk = sk;
skb->priority = sk->sk_priority;
+ skb->mark = sk->sk_mark;
+ skb->tstamp = sockc.transmit_time;
+
+ skb_setup_tx_timestamp(skb, sockc.tsflags);
err = can_send(skb, ro->loopback);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 49726c378aab..db6320076e23 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -477,8 +477,8 @@ static int ceph_tcp_connect(struct ceph_connection *con)
dout("connect %s\n", ceph_pr_addr(&con->peer_addr));
con_sock_state_connecting(con);
- ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
- O_NONBLOCK);
+ ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss),
+ O_NONBLOCK);
if (ret == -EINPROGRESS) {
dout("connect %s EINPROGRESS sk_state = %u\n",
ceph_pr_addr(&con->peer_addr),
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index a8481da37f1a..7412bcc94d1d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -3249,17 +3249,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
int ret;
dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
- ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
+ ret = wait_for_completion_killable(&lreq->reg_commit_wait);
return ret ?: lreq->reg_commit_error;
}
-static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
+static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
+ unsigned long timeout)
{
- int ret;
+ long left;
dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
- ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
- return ret ?: lreq->notify_finish_error;
+ left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait,
+ ceph_timeout_jiffies(timeout));
+ if (left <= 0)
+ left = left ?: -ETIMEDOUT;
+ else
+ left = lreq->notify_finish_error; /* completed */
+
+ return left;
}
/*
@@ -4875,7 +4882,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
linger_submit(lreq);
ret = linger_reg_commit_wait(lreq);
if (!ret)
- ret = linger_notify_finish_wait(lreq);
+ ret = linger_notify_finish_wait(lreq,
+ msecs_to_jiffies(2 * timeout * MSEC_PER_SEC));
else
dout("lreq %p failed to initiate notify %d\n", lreq, ret);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b0488f30f2c4..a5fc44448d60 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -778,18 +778,21 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
{
struct sock *sk = sock->sk;
__poll_t mask;
+ u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
/* exceptional events? */
- if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ if (READ_ONCE(sk->sk_err) ||
+ !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ shutdown = READ_ONCE(sk->sk_shutdown);
+ if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
- if (sk->sk_shutdown == SHUTDOWN_MASK)
+ if (shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
/* readable? */
@@ -798,10 +801,12 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
/* Connection-based need to check for termination and startup */
if (connection_based(sk)) {
- if (sk->sk_state == TCP_CLOSE)
+ int state = READ_ONCE(sk->sk_state);
+
+ if (state == TCP_CLOSE)
mask |= EPOLLHUP;
/* connection hasn't started yet? */
- if (sk->sk_state == TCP_SYN_SENT)
+ if (state == TCP_SYN_SENT)
return mask;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index a03036456221..4f39d9720981 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1940,7 +1940,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
again:
list_for_each_entry_rcu(ptype, ptype_list, list) {
- if (ptype->ignore_outgoing)
+ if (READ_ONCE(ptype->ignore_outgoing))
continue;
/* Never send packets back to the socket
@@ -2244,6 +2244,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
bool active = false;
unsigned int nr_ids;
+ WARN_ON_ONCE(index >= dev->num_tx_queues);
+
if (dev->num_tc) {
/* Do not allow XPS on subordinate device directly */
num_tc = dev->num_tc;
@@ -2735,8 +2737,10 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
{
if (in_irq() || irqs_disabled())
__dev_kfree_skb_irq(skb, reason);
+ else if (unlikely(reason == SKB_REASON_DROPPED))
+ kfree_skb(skb);
else
- dev_kfree_skb(skb);
+ consume_skb(skb);
}
EXPORT_SYMBOL(__dev_kfree_skb_any);
@@ -2934,7 +2938,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
type = eth->h_proto;
}
- return __vlan_get_protocol(skb, type, depth);
+ return vlan_get_protocol_and_depth(skb, type, depth);
}
/**
@@ -3136,6 +3140,14 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
if (gso_segs > dev->gso_max_segs)
return features & ~NETIF_F_GSO_MASK;
+ if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
+ return features & ~NETIF_F_GSO_MASK;
+
+ if (!skb_shinfo(skb)->gso_type) {
+ skb_warn_bad_offload(skb);
+ return features & ~NETIF_F_GSO_MASK;
+ }
+
/* Support for GSO partial features requires software
* intervention before we can actually process the packets
* so we need to strip support for any partial features now
@@ -3712,6 +3724,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
bool again = false;
skb_reset_mac_header(skb);
+ skb_assert_len(skb);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
@@ -3997,8 +4010,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
u32 next_cpu;
u32 ident;
- /* First check into global flow table if there is a match */
- ident = sock_flow_table->ents[hash & sock_flow_table->mask];
+ /* First check into global flow table if there is a match.
+ * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
+ */
+ ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
if ((ident ^ hash) & ~rps_cpu_mask)
goto try_rps;
@@ -4411,7 +4426,7 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(netdev_tstamp_prequeue, skb);
+ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
trace_netif_rx(skb);
@@ -4753,7 +4768,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret = NET_RX_DROP;
__be16 type;
- net_timestamp_check(!netdev_tstamp_prequeue, skb);
+ net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
@@ -5135,7 +5150,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(netdev_tstamp_prequeue, skb);
+ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
@@ -5165,7 +5180,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
- net_timestamp_check(netdev_tstamp_prequeue, skb);
+ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
@@ -5892,7 +5907,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
net_rps_action_and_irq_enable(sd);
}
- napi->weight = dev_rx_weight;
+ napi->weight = READ_ONCE(dev_rx_weight);
while (again) {
struct sk_buff *skb;
@@ -6393,8 +6408,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
- usecs_to_jiffies(netdev_budget_usecs);
- int budget = netdev_budget;
+ usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
+ int budget = READ_ONCE(netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
@@ -9433,9 +9448,7 @@ void netdev_run_todo(void)
BUG_ON(!list_empty(&dev->ptype_specific));
WARN_ON(rcu_access_pointer(dev->ip_ptr));
WARN_ON(rcu_access_pointer(dev->ip6_ptr));
-#if IS_ENABLED(CONFIG_DECNET)
- WARN_ON(dev->dn_ptr);
-#endif
+
if (dev->priv_destructor)
dev->priv_destructor(dev);
if (dev->needs_free_netdev)
@@ -9460,24 +9473,16 @@ void netdev_run_todo(void)
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats)
{
-#if BITS_PER_LONG == 64
- BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
- memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
- /* zero out counters that only exist in rtnl_link_stats64 */
- memset((char *)stats64 + sizeof(*netdev_stats), 0,
- sizeof(*stats64) - sizeof(*netdev_stats));
-#else
- size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
- const unsigned long *src = (const unsigned long *)netdev_stats;
+ size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
+ const atomic_long_t *src = (atomic_long_t *)netdev_stats;
u64 *dst = (u64 *)stats64;
BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
for (i = 0; i < n; i++)
- dst[i] = src[i];
+ dst[i] = (unsigned long)atomic_long_read(&src[i]);
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + n * sizeof(u64), 0,
sizeof(*stats64) - n * sizeof(u64));
-#endif
}
EXPORT_SYMBOL(netdev_stats_to_stats64);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 0ac02cab3087..5bd6330ab427 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -6299,7 +6299,10 @@ EXPORT_SYMBOL_GPL(devlink_free);
static void devlink_port_type_warn(struct work_struct *work)
{
- WARN(true, "Type was not set for devlink port.");
+ struct devlink_port *port = container_of(to_delayed_work(work),
+ struct devlink_port,
+ type_warn_dw);
+ dev_warn(port->devlink->dev, "Type was not set for devlink port.");
}
static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index e8e8389ddc96..feb946c954b6 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -180,7 +180,7 @@ out:
}
static const struct genl_multicast_group dropmon_mcgrps[] = {
- { .name = "events", },
+ { .name = "events", .cap_sys_admin = 1 },
};
static void send_dm_alert(struct work_struct *work)
@@ -1539,11 +1539,13 @@ static const struct genl_ops dropmon_ops[] = {
.cmd = NET_DM_CMD_START,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = net_dm_cmd_trace,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NET_DM_CMD_STOP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = net_dm_cmd_trace,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NET_DM_CMD_CONFIG_GET,
diff --git a/net/core/dst.c b/net/core/dst.c
index 193af526e908..107aea25a564 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -81,14 +81,10 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
{
struct dst_entry *dst;
- if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
- if (ops->gc(ops)) {
- printk_ratelimited(KERN_NOTICE "Route cache is full: "
- "consider increasing sysctl "
- "net.ipv[4|6].route.max_size.\n");
- return NULL;
- }
- }
+ if (ops->gc &&
+ !(flags & DST_NOCOUNT) &&
+ dst_entries_get_fast(ops) > ops->gc_thresh)
+ ops->gc(ops);
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
if (!dst)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index cbd1885f2459..9ae38c3e2bf0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1947,7 +1947,8 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
return n_stats;
if (n_stats > S32_MAX / sizeof(u64))
return -ENOMEM;
- WARN_ON_ONCE(!n_stats);
+ if (WARN_ON_ONCE(!n_stats))
+ return -EOPNOTSUPP;
if (copy_from_user(&stats, useraddr, sizeof(stats)))
return -EFAULT;
diff --git a/net/core/filter.c b/net/core/filter.c
index 72bf78032f45..3c4dcdc7217e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2071,8 +2071,17 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
{
unsigned int mlen = skb_network_offset(skb);
+ if (unlikely(skb->len <= mlen)) {
+ kfree_skb(skb);
+ return -ERANGE;
+ }
+
if (mlen) {
__skb_pull(skb, mlen);
+ if (unlikely(!skb->len)) {
+ kfree_skb(skb);
+ return -ERANGE;
+ }
/* At ingress, the mac header has already been pulled once.
* At egress, skb_pospull_rcsum has to be done in case that
@@ -2092,7 +2101,7 @@ static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
/* Verify that a link layer header is carried */
- if (unlikely(skb->mac_header >= skb->network_header)) {
+ if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) {
kfree_skb(skb);
return -ERANGE;
}
@@ -2210,6 +2219,22 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
return 0;
}
+static void sk_msg_reset_curr(struct sk_msg *msg)
+{
+ u32 i = msg->sg.start;
+ u32 len = 0;
+
+ do {
+ len += sk_msg_elem(msg, i)->length;
+ sk_msg_iter_var_next(i);
+ if (len >= msg->sg.size)
+ break;
+ } while (i != msg->sg.end);
+
+ msg->sg.curr = i;
+ msg->sg.copybreak = 0;
+}
+
static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
.func = bpf_msg_cork_bytes,
.gpl_only = false,
@@ -2329,6 +2354,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
msg->sg.end - shift + NR_MSG_FRAG_IDS :
msg->sg.end - shift;
out:
+ sk_msg_reset_curr(msg);
msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
msg->data_end = msg->data + bytes;
return 0;
@@ -2462,6 +2488,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
msg->sg.data[new] = rsge;
}
+ sk_msg_reset_curr(msg);
sk_msg_compute_data_pointers(msg);
return 0;
}
@@ -2633,6 +2660,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
sk_mem_uncharge(msg->sk, len - pop);
msg->sg.size -= (len - pop);
+ sk_msg_reset_curr(msg);
sk_msg_compute_data_pointers(msg);
return 0;
}
@@ -2786,15 +2814,18 @@ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
{
+ void *old_data;
+
/* skb_ensure_writable() is not needed here, as we're
* already working on an uncloned skb.
*/
if (unlikely(!pskb_may_pull(skb, off + len)))
return -ENOMEM;
- skb_postpull_rcsum(skb, skb->data + off, len);
- memmove(skb->data + len, skb->data, off);
+ old_data = skb->data;
__skb_pull(skb, len);
+ skb_postpull_rcsum(skb, old_data + off, len);
+ memmove(skb->data, old_data, off);
return 0;
}
@@ -4605,7 +4636,6 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
memcpy(params->smac, dev->dev_addr, ETH_ALEN);
params->h_vlan_TCI = 0;
params->h_vlan_proto = 0;
- params->ifindex = dev->ifindex;
return 0;
}
@@ -4702,6 +4732,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
dev = nhc->nhc_dev;
params->rt_metric = res.fi->fib_priority;
+ params->ifindex = dev->ifindex;
/* xdp and cls_bpf programs are run in RCU-bh so
* rcu_read_lock_bh is not needed here
@@ -4720,7 +4751,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
}
- if (!neigh)
+ if (!neigh || !(neigh->nud_state & NUD_VALID))
return BPF_FIB_LKUP_RET_NO_NEIGH;
return bpf_fib_set_fwd_params(params, neigh, dev);
@@ -4827,12 +4858,13 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
dev = res.nh->fib_nh_dev;
params->rt_metric = res.f6i->fib6_metric;
+ params->ifindex = dev->ifindex;
/* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
* not needed here.
*/
neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
- if (!neigh)
+ if (!neigh || !(neigh->nud_state & NUD_VALID))
return BPF_FIB_LKUP_RET_NO_NEIGH;
return bpf_fib_set_fwd_params(params, neigh, dev);
@@ -6196,6 +6228,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_adjust_room_proto;
case BPF_FUNC_skb_change_tail:
return &bpf_skb_change_tail_proto;
+ case BPF_FUNC_skb_change_head:
+ return &bpf_skb_change_head_proto;
case BPF_FUNC_skb_get_tunnel_key:
return &bpf_skb_get_tunnel_key_proto;
case BPF_FUNC_skb_set_tunnel_key:
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 4dac27c98623..5daa72a930a9 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1564,8 +1564,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
memset(&keys, 0, sizeof(keys));
__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
- &keys, NULL, 0, 0, 0,
- FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+ &keys, NULL, 0, 0, 0, 0);
return __flow_hash_from_keys(&keys, &hashrnd);
}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index bf270b6a99b4..017ed82611a8 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -59,9 +59,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
ret = BPF_OK;
} else {
skb_reset_mac_header(skb);
- ret = skb_do_redirect(skb);
- if (ret == 0)
- ret = BPF_REDIRECT;
+ skb_do_redirect(skb);
+ ret = BPF_REDIRECT;
}
break;
@@ -254,7 +253,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
if (unlikely(err))
- return err;
+ return net_xmit_errno(err);
/* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
return LWTUNNEL_XMIT_DONE;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8b6140e67e7f..e571007d083c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -224,10 +224,13 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
- int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+ int max_clean = atomic_read(&tbl->gc_entries) -
+ READ_ONCE(tbl->gc_thresh2);
+ u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
+ int loop = 0;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
@@ -242,7 +245,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
(n->nud_state == NUD_NOARP) ||
(tbl->is_multicast &&
tbl->is_multicast(n->primary_key)) ||
- time_after(tref, n->updated))
+ !time_in_range(n->updated, tref, jiffies))
remove = true;
write_unlock(&n->lock);
@@ -250,11 +253,16 @@ static int neigh_forced_gc(struct neigh_table *tbl)
shrunk++;
if (shrunk >= max_clean)
break;
+ if (++loop == 16) {
+ if (ktime_get_ns() > tmax)
+ goto unlock;
+ loop = 0;
+ }
}
}
- tbl->last_flush = jiffies;
-
+ WRITE_ONCE(tbl->last_flush, jiffies);
+unlock:
write_unlock_bh(&tbl->lock);
return shrunk;
@@ -262,7 +270,17 @@ static int neigh_forced_gc(struct neigh_table *tbl)
static void neigh_add_timer(struct neighbour *n, unsigned long when)
{
+ /* Use safe distance from the jiffies - LONG_MAX point while timer
+ * is running in DELAY/PROBE state but still show to user space
+ * large times in the past.
+ */
+ unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
+
neigh_hold(n);
+ if (!time_in_range(n->confirmed, mint, jiffies))
+ n->confirmed = mint;
+ if (time_before(n->used, n->confirmed))
+ n->used = n->confirmed;
if (unlikely(mod_timer(&n->timer, when))) {
printk("NEIGH: BUG, double timer add, state is %x\n",
n->nud_state);
@@ -280,11 +298,26 @@ static int neigh_del_timer(struct neighbour *n)
return 0;
}
-static void pneigh_queue_purge(struct sk_buff_head *list)
+static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
{
+ struct sk_buff_head tmp;
+ unsigned long flags;
struct sk_buff *skb;
- while ((skb = skb_dequeue(list)) != NULL) {
+ skb_queue_head_init(&tmp);
+ spin_lock_irqsave(&list->lock, flags);
+ skb = skb_peek(list);
+ while (skb != NULL) {
+ struct sk_buff *skb_next = skb_peek_next(skb, list);
+ if (net == NULL || net_eq(dev_net(skb->dev), net)) {
+ __skb_unlink(skb, list);
+ __skb_queue_tail(&tmp, skb);
+ }
+ skb = skb_next;
+ }
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ while ((skb = __skb_dequeue(&tmp))) {
dev_put(skb->dev);
kfree_skb(skb);
}
@@ -358,9 +391,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev, skip_perm);
pneigh_ifdown_and_unlock(tbl, dev);
-
- del_timer_sync(&tbl->proxy_timer);
- pneigh_queue_purge(&tbl->proxy_queue);
+ pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
+ if (skb_queue_empty_lockless(&tbl->proxy_queue))
+ del_timer_sync(&tbl->proxy_timer);
return 0;
}
@@ -384,17 +417,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
- int entries;
+ int entries, gc_thresh3;
if (exempt_from_gc)
goto do_alloc;
entries = atomic_inc_return(&tbl->gc_entries) - 1;
- if (entries >= tbl->gc_thresh3 ||
- (entries >= tbl->gc_thresh2 &&
- time_after(now, tbl->last_flush + 5 * HZ))) {
- if (!neigh_forced_gc(tbl) &&
- entries >= tbl->gc_thresh3) {
+ gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
+ if (entries >= gc_thresh3 ||
+ (entries >= READ_ONCE(tbl->gc_thresh2) &&
+ time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
+ if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
net_info_ratelimited("%s: neighbor table overflow!\n",
tbl->id);
NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@@ -546,37 +579,6 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
}
EXPORT_SYMBOL(neigh_lookup);
-struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
- const void *pkey)
-{
- struct neighbour *n;
- unsigned int key_len = tbl->key_len;
- u32 hash_val;
- struct neigh_hash_table *nht;
-
- NEIGH_CACHE_STAT_INC(tbl, lookups);
-
- rcu_read_lock_bh();
- nht = rcu_dereference_bh(tbl->nht);
- hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
-
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference_bh(n->next)) {
- if (!memcmp(n->primary_key, pkey, key_len) &&
- net_eq(dev_net(n->dev), net)) {
- if (!refcount_inc_not_zero(&n->refcnt))
- n = NULL;
- NEIGH_CACHE_STAT_INC(tbl, hits);
- break;
- }
- }
-
- rcu_read_unlock_bh();
- return n;
-}
-EXPORT_SYMBOL(neigh_lookup_nodev);
-
static struct neighbour *
___neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev, u8 flags,
@@ -908,13 +910,14 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
- tbl->last_rand = jiffies;
+
+ WRITE_ONCE(tbl->last_rand, jiffies);
list_for_each_entry(p, &tbl->parms_list, list)
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+ if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
goto out;
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -933,13 +936,17 @@ static void neigh_periodic_work(struct work_struct *work)
goto next_elt;
}
- if (time_before(n->used, n->confirmed))
+ if (time_before(n->used, n->confirmed) &&
+ time_is_before_eq_jiffies(n->confirmed))
n->used = n->confirmed;
if (refcount_read(&n->refcnt) == 1 &&
(state == NUD_FAILED ||
- time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
- *np = n->next;
+ !time_in_range_open(jiffies, n->used,
+ n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(n->next,
+ lockdep_is_held(&tbl->lock)));
neigh_mark_dead(n);
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
@@ -1741,7 +1748,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
/* It is not clean... Fix it to unload IPv6 module safely */
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
- pneigh_queue_purge(&tbl->proxy_queue);
+ pneigh_queue_purge(&tbl->proxy_queue, NULL);
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
pr_crit("neighbour leakage\n");
@@ -1773,9 +1780,6 @@ static struct neigh_table *neigh_find_table(int family)
case AF_INET6:
tbl = neigh_tables[NEIGH_ND_TABLE];
break;
- case AF_DECnet:
- tbl = neigh_tables[NEIGH_DN_TABLE];
- break;
}
return tbl;
@@ -2055,15 +2059,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndtmsg->ndtm_pad2 = 0;
if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
- nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
- nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
- nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
- nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+ nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
+ NDTA_PAD) ||
+ nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
+ nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
+ nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
goto nla_put_failure;
{
unsigned long now = jiffies;
- long flush_delta = now - tbl->last_flush;
- long rand_delta = now - tbl->last_rand;
+ long flush_delta = now - READ_ONCE(tbl->last_flush);
+ long rand_delta = now - READ_ONCE(tbl->last_rand);
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
@@ -2071,7 +2076,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
- .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
+ .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
};
rcu_read_lock_bh();
@@ -2094,17 +2099,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
- ndst.ndts_allocs += st->allocs;
- ndst.ndts_destroys += st->destroys;
- ndst.ndts_hash_grows += st->hash_grows;
- ndst.ndts_res_failed += st->res_failed;
- ndst.ndts_lookups += st->lookups;
- ndst.ndts_hits += st->hits;
- ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
- ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
- ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
- ndst.ndts_forced_gc_runs += st->forced_gc_runs;
- ndst.ndts_table_fulls += st->table_fulls;
+ ndst.ndts_allocs += READ_ONCE(st->allocs);
+ ndst.ndts_destroys += READ_ONCE(st->destroys);
+ ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
+ ndst.ndts_res_failed += READ_ONCE(st->res_failed);
+ ndst.ndts_lookups += READ_ONCE(st->lookups);
+ ndst.ndts_hits += READ_ONCE(st->hits);
+ ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
+ ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
+ ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
+ ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
+ ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
}
if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@@ -2328,16 +2333,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout_tbl_lock;
if (tb[NDTA_THRESH1])
- tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+ WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
if (tb[NDTA_THRESH2])
- tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+ WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
if (tb[NDTA_THRESH3])
- tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+ WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
if (tb[NDTA_GC_INTERVAL])
- tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+ WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
err = 0;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 62a972f04cef..534a53124d14 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -120,6 +120,7 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data)
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
+ struct net_generic *ng;
int err = -ENOMEM;
void *data = NULL;
@@ -138,6 +139,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
if (!err)
return 0;
+ if (ops->id && ops->size) {
+ ng = rcu_dereference_protected(net->gen,
+ lockdep_is_held(&pernet_ops_rwsem));
+ ng->ptr[*ops->id] = NULL;
+ }
+
cleanup:
kfree(data);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 78bbb912e502..9b263a5c0f36 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -136,6 +136,20 @@ static void queue_process(struct work_struct *work)
}
}
+static int netif_local_xmit_active(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
+ return 1;
+ }
+
+ return 0;
+}
+
static void poll_one_napi(struct napi_struct *napi)
{
int work;
@@ -182,7 +196,10 @@ void netpoll_poll_dev(struct net_device *dev)
if (!ni || down_trylock(&ni->dev_lock))
return;
- if (!netif_running(dev)) {
+ /* Some drivers will take the same locks in poll and xmit,
+ * we can't poll if local CPU is already in xmit.
+ */
+ if (!netif_running(dev) || netif_local_xmit_active(dev)) {
up(&ni->dev_lock);
return;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1d20dd70879b..5e9bd9d80b39 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -645,19 +645,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_puts(seq, " Flags: ");
for (i = 0; i < NR_PKT_FLAGS; i++) {
- if (i == F_FLOW_SEQ)
+ if (i == FLOW_SEQ_SHIFT)
if (!pkt_dev->cflows)
continue;
- if (pkt_dev->flags & (1 << i))
+ if (pkt_dev->flags & (1 << i)) {
seq_printf(seq, "%s ", pkt_flag_names[i]);
- else if (i == F_FLOW_SEQ)
- seq_puts(seq, "FLOW_RND ");
-
#ifdef CONFIG_XFRM
- if (i == F_IPSEC && pkt_dev->spi)
- seq_printf(seq, "spi:%u", pkt_dev->spi);
+ if (i == IPSEC_SHIFT && pkt_dev->spi)
+ seq_printf(seq, "spi:%u ", pkt_dev->spi);
#endif
+ } else if (i == FLOW_SEQ_SHIFT) {
+ seq_puts(seq, "FLOW_RND ");
+ }
}
seq_puts(seq, "\n");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index dbc9b2f53649..ee599636f817 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -922,24 +922,27 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
nla_total_size(sizeof(struct ifla_vf_rate)) +
nla_total_size(sizeof(struct ifla_vf_link_state)) +
nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
- nla_total_size(0) + /* nest IFLA_VF_STATS */
- /* IFLA_VF_STATS_RX_PACKETS */
- nla_total_size_64bit(sizeof(__u64)) +
- /* IFLA_VF_STATS_TX_PACKETS */
- nla_total_size_64bit(sizeof(__u64)) +
- /* IFLA_VF_STATS_RX_BYTES */
- nla_total_size_64bit(sizeof(__u64)) +
- /* IFLA_VF_STATS_TX_BYTES */
- nla_total_size_64bit(sizeof(__u64)) +
- /* IFLA_VF_STATS_BROADCAST */
- nla_total_size_64bit(sizeof(__u64)) +
- /* IFLA_VF_STATS_MULTICAST */
- nla_total_size_64bit(sizeof(__u64)) +
- /* IFLA_VF_STATS_RX_DROPPED */
- nla_total_size_64bit(sizeof(__u64)) +
- /* IFLA_VF_STATS_TX_DROPPED */
- nla_total_size_64bit(sizeof(__u64)) +
nla_total_size(sizeof(struct ifla_vf_trust)));
+ if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
+ size += num_vfs *
+ (nla_total_size(0) + /* nest IFLA_VF_STATS */
+ /* IFLA_VF_STATS_RX_PACKETS */
+ nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_TX_PACKETS */
+ nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_RX_BYTES */
+ nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_TX_BYTES */
+ nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_BROADCAST */
+ nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_MULTICAST */
+ nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_RX_DROPPED */
+ nla_total_size_64bit(sizeof(__u64)) +
+ /* IFLA_VF_STATS_TX_DROPPED */
+ nla_total_size_64bit(sizeof(__u64)));
+ }
return size;
} else
return 0;
@@ -1189,7 +1192,8 @@ static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
struct net_device *dev,
int vfs_num,
- struct nlattr *vfinfo)
+ struct nlattr *vfinfo,
+ u32 ext_filter_mask)
{
struct ifla_vf_rss_query_en vf_rss_query_en;
struct nlattr *vf, *vfstats, *vfvlanlist;
@@ -1279,33 +1283,35 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
goto nla_put_vf_failure;
}
nla_nest_end(skb, vfvlanlist);
- memset(&vf_stats, 0, sizeof(vf_stats));
- if (dev->netdev_ops->ndo_get_vf_stats)
- dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
- &vf_stats);
- vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
- if (!vfstats)
- goto nla_put_vf_failure;
- if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
- vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
- vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
- nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
- vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
- vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
- nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
- vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
- nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
- vf_stats.multicast, IFLA_VF_STATS_PAD) ||
- nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
- vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
- nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
- vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
- nla_nest_cancel(skb, vfstats);
- goto nla_put_vf_failure;
+ if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
+ memset(&vf_stats, 0, sizeof(vf_stats));
+ if (dev->netdev_ops->ndo_get_vf_stats)
+ dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
+ &vf_stats);
+ vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
+ if (!vfstats)
+ goto nla_put_vf_failure;
+ if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
+ vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
+ vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
+ vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
+ vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
+ vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
+ vf_stats.multicast, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
+ vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
+ nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
+ vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
+ nla_nest_cancel(skb, vfstats);
+ goto nla_put_vf_failure;
+ }
+ nla_nest_end(skb, vfstats);
}
- nla_nest_end(skb, vfstats);
nla_nest_end(skb, vf);
return 0;
@@ -1338,7 +1344,7 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
return -EMSGSIZE;
for (i = 0; i < num_vfs; i++) {
- if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
+ if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask))
return -EMSGSIZE;
}
@@ -1593,6 +1599,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
+ struct Qdisc *qdisc;
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -1610,6 +1617,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
goto nla_put_failure;
+ qdisc = rtnl_dereference(dev->qdisc);
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
nla_put_u8(skb, IFLA_OPERSTATE,
@@ -1628,8 +1636,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
#endif
put_master_ifindex(skb, dev) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
- (dev->qdisc &&
- nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+ (qdisc &&
+ nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
nla_put_ifalias(skb, dev) ||
nla_put_u32(skb, IFLA_CARRIER_CHANGES,
atomic_read(&dev->carrier_up_count) +
@@ -2026,13 +2034,27 @@ out_err:
return err;
}
-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
- struct netlink_ext_ack *exterr)
+int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
+ struct netlink_ext_ack *exterr)
{
- return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
+ const struct ifinfomsg *ifmp;
+ const struct nlattr *attrs;
+ size_t len;
+
+ ifmp = nla_data(nla_peer);
+ attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
+ len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
+
+ if (ifmp->ifi_index < 0) {
+ NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
+ "ifindex can't be negative");
+ return -EINVAL;
+ }
+
+ return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
exterr);
}
-EXPORT_SYMBOL(rtnl_nla_parse_ifla);
+EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{
@@ -3054,9 +3076,12 @@ replay:
ifname[0] = '\0';
ifm = nlmsg_data(nlh);
- if (ifm->ifi_index > 0)
+ if (ifm->ifi_index > 0) {
dev = __dev_get_by_index(net, ifm->ifi_index);
- else {
+ } else if (ifm->ifi_index < 0) {
+ NL_SET_ERR_MSG(extack, "ifindex can't be negative");
+ return -EINVAL;
+ } else {
if (ifname[0])
dev = __dev_get_by_name(net, ifname);
else
@@ -3578,7 +3603,7 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
ndm->ndm_ifindex = dev->ifindex;
ndm->ndm_state = ndm_state;
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+ if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
goto nla_put_failure;
if (vid)
if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
@@ -3592,10 +3617,10 @@ nla_put_failure:
return -EMSGSIZE;
}
-static inline size_t rtnl_fdb_nlmsg_size(void)
+static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
{
return NLMSG_ALIGN(sizeof(struct ndmsg)) +
- nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
+ nla_total_size(dev->addr_len) + /* NDA_LLADDR */
nla_total_size(sizeof(u16)) + /* NDA_VLAN */
0;
}
@@ -3607,7 +3632,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
struct sk_buff *skb;
int err = -ENOBUFS;
- skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
+ skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
if (!skb)
goto errout;
@@ -4561,10 +4586,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
- struct nlattr *br_spec, *attr = NULL;
+ struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
int rem, err = -EOPNOTSUPP;
u16 flags = 0;
- bool have_flags = false;
if (nlmsg_len(nlh) < sizeof(*ifm))
return -EINVAL;
@@ -4582,13 +4606,17 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+ if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
if (nla_len(attr) < sizeof(flags))
return -EINVAL;
- have_flags = true;
+ br_flags_attr = attr;
flags = nla_get_u16(attr);
- break;
+ }
+
+ if (nla_type(attr) == IFLA_BRIDGE_MODE) {
+ if (nla_len(attr) < sizeof(u16))
+ return -EINVAL;
}
}
}
@@ -4626,8 +4654,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
}
}
- if (have_flags)
- memcpy(nla_data(attr), &flags, sizeof(flags));
+ if (br_flags_attr)
+ memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
out:
return err;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 31a38239c92f..a442bf63cd48 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -26,6 +26,7 @@
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/errqueue.h>
+#include <linux/io_uring.h>
#include <linux/uaccess.h>
@@ -103,6 +104,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (fd < 0 || !(file = fget_raw(fd)))
return -EBADF;
+ /* don't allow io_uring files */
+ if (io_is_uring_fops(file)) {
+ fput(file);
+ return -EINVAL;
+ }
*fpp++ = file;
fpl->count++;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5bdb3cd20d61..82be36c87eb6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2115,6 +2115,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
insp = list;
} else {
/* Eaten partially. */
+ if (skb_is_gso(skb) && !list->head_frag &&
+ skb_headlen(list))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
if (skb_shared(list)) {
/* Sucks! We need to fork list. :-( */
@@ -3683,40 +3686,41 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
- skb_frag_t *frag = skb_shinfo(head_skb)->frags;
unsigned int mss = skb_shinfo(head_skb)->gso_size;
unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
- struct sk_buff *frag_skb = head_skb;
unsigned int offset = doffset;
unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
unsigned int partial_segs = 0;
unsigned int headroom;
unsigned int len = head_skb->len;
+ struct sk_buff *frag_skb;
+ skb_frag_t *frag;
__be16 proto;
bool csum, sg;
- int nfrags = skb_shinfo(head_skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
- int pos;
+ int nfrags, pos;
int dummy;
- if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
- (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
- /* gso_size is untrusted, and we have a frag_list with a linear
- * non head_frag head.
- *
- * (we assume checking the first list_skb member suffices;
- * i.e if either of the list_skb members have non head_frag
- * head, then the first one has too).
- *
- * If head_skb's headlen does not fit requested gso_size, it
- * means that the frag_list members do NOT terminate on exact
- * gso_size boundaries. Hence we cannot perform skb_frag_t page
- * sharing. Therefore we must fallback to copying the frag_list
- * skbs; we do so by disabling SG.
- */
- if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
- features &= ~NETIF_F_SG;
+ if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
+ mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
+ struct sk_buff *check_skb;
+
+ for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
+ if (skb_headlen(check_skb) && !check_skb->head_frag) {
+ /* gso_size is untrusted, and we have a frag_list with
+ * a linear non head_frag item.
+ *
+ * If head_skb's headlen does not fit requested gso_size,
+ * it means that the frag_list members do NOT terminate
+ * on exact gso_size boundaries. Hence we cannot perform
+ * skb_frag_t page sharing. Therefore we must fallback to
+ * copying the frag_list skbs; we do so by disabling SG.
+ */
+ features &= ~NETIF_F_SG;
+ break;
+ }
+ }
}
__skb_push(head_skb, doffset);
@@ -3761,8 +3765,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
/* GSO partial only requires that we trim off any excess that
* doesn't fit into an MSS sized block, so take care of that
* now.
+ * Cap len to not accidentally hit GSO_BY_FRAGS.
*/
- partial_segs = len / mss;
+ partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss;
if (partial_segs > 1)
mss *= partial_segs;
else
@@ -3773,6 +3778,13 @@ normal:
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
+ if (skb_orphan_frags(head_skb, GFP_ATOMIC))
+ return ERR_PTR(-ENOMEM);
+
+ nfrags = skb_shinfo(head_skb)->nr_frags;
+ frag = skb_shinfo(head_skb)->frags;
+ frag_skb = head_skb;
+
do {
struct sk_buff *nskb;
skb_frag_t *nskb_frag;
@@ -3797,6 +3809,10 @@ normal:
(skb_headlen(list_skb) == len || sg)) {
BUG_ON(skb_headlen(list_skb) > len);
+ nskb = skb_clone(list_skb, GFP_ATOMIC);
+ if (unlikely(!nskb))
+ goto err;
+
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
@@ -3815,12 +3831,8 @@ normal:
frag++;
}
- nskb = skb_clone(list_skb, GFP_ATOMIC);
list_skb = list_skb->next;
- if (unlikely(!nskb))
- goto err;
-
if (unlikely(pskb_trim(nskb, len))) {
kfree_skb(nskb);
goto err;
@@ -3885,12 +3897,16 @@ normal:
skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
SKBTX_SHARED_FRAG;
- if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
- skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+ if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
goto err;
while (pos < offset + len) {
if (i >= nfrags) {
+ if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
+ skb_zerocopy_clone(nskb, list_skb,
+ GFP_ATOMIC))
+ goto err;
+
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
@@ -3904,10 +3920,6 @@ normal:
i--;
frag--;
}
- if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
- skb_zerocopy_clone(nskb, frag_skb,
- GFP_ATOMIC))
- goto err;
list_skb = list_skb->next;
}
@@ -4564,7 +4576,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
{
bool ret;
- if (likely(sysctl_tstamp_allow_data || tsonly))
+ if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
return true;
read_lock_bh(&sk->sk_callback_lock);
@@ -4627,6 +4639,11 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
skb = alloc_skb(0, GFP_ATOMIC);
} else {
skb = skb_clone(orig_skb, GFP_ATOMIC);
+
+ if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ return;
+ }
}
if (!skb)
return;
diff --git a/net/core/sock.c b/net/core/sock.c
index c84f68bff7f5..daeb0e69c71b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -545,7 +545,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
dst_release(dst);
return NULL;
@@ -701,7 +701,8 @@ bool sk_mc_loop(struct sock *sk)
return false;
if (!sk)
return true;
- switch (sk->sk_family) {
+ /* IPV6_ADDRFORM can change sk->sk_family under us. */
+ switch (READ_ONCE(sk->sk_family)) {
case AF_INET:
return inet_sk(sk)->mc_loop;
#if IS_ENABLED(CONFIG_IPV6)
@@ -1117,7 +1118,8 @@ set_rcvbuf:
cmpxchg(&sk->sk_pacing_status,
SK_PACING_NONE,
SK_PACING_NEEDED);
- sk->sk_max_pacing_rate = ulval;
+ /* Pairs with READ_ONCE() from sk_getsockopt() */
+ WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
break;
}
@@ -1257,11 +1259,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_SNDBUF:
- v.val = sk->sk_sndbuf;
+ v.val = READ_ONCE(sk->sk_sndbuf);
break;
case SO_RCVBUF:
- v.val = sk->sk_rcvbuf;
+ v.val = READ_ONCE(sk->sk_rcvbuf);
break;
case SO_REUSEADDR:
@@ -1349,7 +1351,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_RCVLOWAT:
- v.val = sk->sk_rcvlowat;
+ v.val = READ_ONCE(sk->sk_rcvlowat);
break;
case SO_SNDLOWAT:
@@ -1443,7 +1445,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
if (!sock->ops->set_peek_off)
return -EOPNOTSUPP;
- v.val = sk->sk_peek_off;
+ v.val = READ_ONCE(sk->sk_peek_off);
break;
case SO_NOFCS:
v.val = sock_flag(sk, SOCK_NOFCS);
@@ -1473,17 +1475,19 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
- v.val = sk->sk_ll_usec;
+ v.val = READ_ONCE(sk->sk_ll_usec);
break;
#endif
case SO_MAX_PACING_RATE:
+ /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */
if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
lv = sizeof(v.ulval);
- v.ulval = sk->sk_max_pacing_rate;
+ v.ulval = READ_ONCE(sk->sk_max_pacing_rate);
} else {
/* 32bit version */
- v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
+ v.val = min_t(unsigned long, ~0U,
+ READ_ONCE(sk->sk_max_pacing_rate));
}
break;
@@ -1938,7 +1942,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
u32 max_segs = 1;
- sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
@@ -1953,6 +1956,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
}
}
sk->sk_gso_max_segs = max_segs;
+ sk_dst_set(sk, dst);
}
EXPORT_SYMBOL_GPL(sk_setup_caps);
@@ -2085,13 +2089,24 @@ kuid_t sock_i_uid(struct sock *sk)
}
EXPORT_SYMBOL(sock_i_uid);
-unsigned long sock_i_ino(struct sock *sk)
+unsigned long __sock_i_ino(struct sock *sk)
{
unsigned long ino;
- read_lock_bh(&sk->sk_callback_lock);
+ read_lock(&sk->sk_callback_lock);
ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
- read_unlock_bh(&sk->sk_callback_lock);
+ read_unlock(&sk->sk_callback_lock);
+ return ino;
+}
+EXPORT_SYMBOL(__sock_i_ino);
+
+unsigned long sock_i_ino(struct sock *sk)
+{
+ unsigned long ino;
+
+ local_bh_disable();
+ ino = __sock_i_ino(sk);
+ local_bh_enable();
return ino;
}
EXPORT_SYMBOL(sock_i_ino);
@@ -2208,9 +2223,9 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
break;
- if (sk->sk_shutdown & SEND_SHUTDOWN)
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
break;
- if (sk->sk_err)
+ if (READ_ONCE(sk->sk_err))
break;
timeo = schedule_timeout(timeo);
}
@@ -2238,7 +2253,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
goto failure;
err = -EPIPE;
- if (sk->sk_shutdown & SEND_SHUTDOWN)
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
goto failure;
if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
@@ -2288,6 +2303,7 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
sockc->mark = *(u32 *)CMSG_DATA(cmsg);
break;
case SO_TIMESTAMPING_OLD:
+ case SO_TIMESTAMPING_NEW:
if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
return -EINVAL;
@@ -2617,7 +2633,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
- if (sk_under_memory_pressure(sk) &&
+ if (sk_under_global_memory_pressure(sk) &&
(sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
sk_leave_memory_pressure(sk);
}
@@ -2638,7 +2654,7 @@ EXPORT_SYMBOL(__sk_mem_reclaim);
int sk_set_peek_off(struct sock *sk, int val)
{
- sk->sk_peek_off = val;
+ WRITE_ONCE(sk->sk_peek_off, val);
return 0;
}
EXPORT_SYMBOL_GPL(sk_set_peek_off);
@@ -2882,7 +2898,14 @@ void sk_stop_timer(struct sock *sk, struct timer_list* timer)
}
EXPORT_SYMBOL(sk_stop_timer);
-void sock_init_data(struct socket *sock, struct sock *sk)
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
+{
+ if (del_timer_sync(timer))
+ __sock_put(sk);
+}
+EXPORT_SYMBOL(sk_stop_timer_sync);
+
+void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
{
sk_init_common(sk);
sk->sk_send_head = NULL;
@@ -2901,11 +2924,10 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_type = sock->type;
RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
sock->sk = sk;
- sk->sk_uid = SOCK_INODE(sock)->i_uid;
} else {
RCU_INIT_POINTER(sk->sk_wq, NULL);
- sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
}
+ sk->sk_uid = uid;
rwlock_init(&sk->sk_callback_lock);
if (sk->sk_kern_sock)
@@ -2946,7 +2968,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0;
- sk->sk_ll_usec = sysctl_net_busy_read;
+ sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
#endif
sk->sk_max_pacing_rate = ~0UL;
@@ -2963,6 +2985,16 @@ void sock_init_data(struct socket *sock, struct sock *sk)
refcount_set(&sk->sk_refcnt, 1);
atomic_set(&sk->sk_drops, 0);
}
+EXPORT_SYMBOL(sock_init_data_uid);
+
+void sock_init_data(struct socket *sock, struct sock *sk)
+{
+ kuid_t uid = sock ?
+ SOCK_INODE(sock)->i_uid :
+ make_kuid(sock_net(sk)->user_ns, 0);
+
+ sock_init_data_uid(sock, sk, uid);
+}
EXPORT_SYMBOL(sock_init_data);
void lock_sock_nested(struct sock *sk, int subclass)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index c13ffbd33d8d..315a5200d170 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -188,7 +188,7 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
if (sock_diag_handlers[hndl->family])
err = -EBUSY;
else
- sock_diag_handlers[hndl->family] = hndl;
+ WRITE_ONCE(sock_diag_handlers[hndl->family], hndl);
mutex_unlock(&sock_diag_table_mutex);
return err;
@@ -204,7 +204,7 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
mutex_lock(&sock_diag_table_mutex);
BUG_ON(sock_diag_handlers[family] != hnld);
- sock_diag_handlers[family] = NULL;
+ WRITE_ONCE(sock_diag_handlers[family], NULL);
mutex_unlock(&sock_diag_table_mutex);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
@@ -222,7 +222,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
- if (sock_diag_handlers[req->sdiag_family] == NULL)
+ if (READ_ONCE(sock_diag_handlers[req->sdiag_family]) == NULL)
sock_load_diag_module(req->sdiag_family, 0);
mutex_lock(&sock_diag_table_mutex);
@@ -281,12 +281,12 @@ static int sock_diag_bind(struct net *net, int group)
switch (group) {
case SKNLGRP_INET_TCP_DESTROY:
case SKNLGRP_INET_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET])
+ if (!READ_ONCE(sock_diag_handlers[AF_INET]))
sock_load_diag_module(AF_INET, 0);
break;
case SKNLGRP_INET6_TCP_DESTROY:
case SKNLGRP_INET6_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET6])
+ if (!READ_ONCE(sock_diag_handlers[AF_INET6]))
sock_load_diag_module(AF_INET6, 0);
break;
}
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 2646e8f98f67..5b82ff0e2680 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -115,7 +115,6 @@ static void sock_map_sk_acquire(struct sock *sk)
__acquires(&sk->sk_lock.slock)
{
lock_sock(sk);
- preempt_disable();
rcu_read_lock();
}
@@ -123,7 +122,6 @@ static void sock_map_sk_release(struct sock *sk)
__releases(&sk->sk_lock.slock)
{
rcu_read_unlock();
- preempt_enable();
release_sock(sk);
}
@@ -279,11 +277,13 @@ static void sock_map_free(struct bpf_map *map)
sk = xchg(psk, NULL);
if (sk) {
+ sock_hold(sk);
lock_sock(sk);
rcu_read_lock();
sock_map_unref(sk, psk);
rcu_read_unlock();
release_sock(sk);
+ sock_put(sk);
}
}
diff --git a/net/core/stream.c b/net/core/stream.c
index a166a32b411f..cd60746877b1 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -159,7 +159,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
*timeo_p = current_timeo;
}
out:
- remove_wait_queue(sk_sleep(sk), &wait);
+ if (!sock_flag(sk, SOCK_DEAD))
+ remove_wait_queue(sk_sleep(sk), &wait);
return err;
do_error:
@@ -195,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk)
/* First the read buffer. */
__skb_queue_purge(&sk->sk_receive_queue);
+ /* Next, the error queue.
+ * We need to use queue lock, because other threads might
+ * add packets to the queue without socket lock being held.
+ */
+ skb_queue_purge(&sk->sk_error_queue);
+
/* Next, the write queue. */
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
@@ -202,7 +209,6 @@ void sk_stream_kill_queues(struct sock *sk)
sk_mem_reclaim(sk);
WARN_ON(sk->sk_wmem_queued);
- WARN_ON(sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 48041f50ecfb..586598887095 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -238,14 +238,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
static int proc_do_dev_weight(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int ret;
+ static DEFINE_MUTEX(dev_weight_mutex);
+ int ret, weight;
+ mutex_lock(&dev_weight_mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ret != 0)
- return ret;
-
- dev_rx_weight = weight_p * dev_weight_rx_bias;
- dev_tx_weight = weight_p * dev_weight_tx_bias;
+ if (!ret && write) {
+ weight = READ_ONCE(weight_p);
+ WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
+ WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
+ }
+ mutex_unlock(&dev_weight_mutex);
return ret;
}
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index b53d5e1d026f..71e97e2a3684 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -946,7 +946,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
return -EOPNOTSUPP;
ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
- tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
+ tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
NULL);
if (ret)
return ret;
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index cb818617699c..8c9a63ef7585 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -288,6 +288,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned int len);
+void dccp_destruct_common(struct sock *sk);
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
void dccp_destroy_sock(struct sock *sk);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 7cf903f9e29a..944cc34f707d 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -130,6 +130,8 @@ failure:
* This unhashes the socket and releases the local port, if necessary.
*/
dccp_set_state(sk, DCCP_CLOSED);
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
+ inet_reset_saddr(sk);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
@@ -241,12 +243,12 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
int err;
struct net *net = dev_net(skb->dev);
- /* Only need dccph_dport & dccph_sport which are the first
- * 4 bytes in dccp header.
- * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
- */
- BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
- BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
+ if (!pskb_may_pull(skb, offset + sizeof(*dh)))
+ return -EINVAL;
+ dh = (struct dccp_hdr *)(skb->data + offset);
+ if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ return -EINVAL;
+ iph = (struct iphdr *)skb->data;
dh = (struct dccp_hdr *)(skb->data + offset);
sk = __inet_lookup_established(net, &dccp_hashinfo,
@@ -609,9 +611,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
- if (security_inet_conn_request(sk, skb, req))
- goto drop_and_free;
-
ireq = inet_rsk(req);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
@@ -619,6 +618,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
ireq->ireq_family = AF_INET;
ireq->ir_iif = sk->sk_bound_dev_if;
+ if (security_inet_conn_request(sk, skb, req))
+ goto drop_and_free;
+
/*
* Step 3: Process LISTEN state
*
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 7c24927e9c2c..c3a378ef95aa 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -67,7 +67,7 @@ static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
- const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
+ const struct ipv6hdr *hdr;
const struct dccp_hdr *dh;
struct dccp_sock *dp;
struct ipv6_pinfo *np;
@@ -76,12 +76,12 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
__u64 seq;
struct net *net = dev_net(skb->dev);
- /* Only need dccph_dport & dccph_sport which are the first
- * 4 bytes in dccp header.
- * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
- */
- BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
- BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
+ if (!pskb_may_pull(skb, offset + sizeof(*dh)))
+ return -EINVAL;
+ dh = (struct dccp_hdr *)(skb->data + offset);
+ if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ return -EINVAL;
+ hdr = (const struct ipv6hdr *)skb->data;
dh = (struct dccp_hdr *)(skb->data + offset);
sk = __inet6_lookup_established(net, &dccp_hashinfo,
@@ -349,15 +349,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
- if (security_inet_conn_request(sk, skb, req))
- goto drop_and_free;
-
ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ireq->ireq_family = AF_INET6;
ireq->ir_mark = inet_request_mark(sk, skb);
+ if (security_inet_conn_request(sk, skb, req))
+ goto drop_and_free;
+
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -541,11 +541,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
/* Clone pktoptions received with SYN, if we own the req */
if (*own_req && ireq->pktopts) {
- newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
+ newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
consume_skb(ireq->pktopts);
ireq->pktopts = NULL;
- if (newnp->pktoptions)
- skb_set_owner_r(newnp->pktoptions, newsk);
}
return newsk;
@@ -605,7 +603,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
--ANK (980728)
*/
if (np->rxopt.all)
- opt_skb = skb_clone(skb, GFP_ATOMIC);
+ opt_skb = skb_clone_and_charge_r(skb, sk);
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
@@ -669,7 +667,6 @@ ipv6_pktoptions:
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb,
&DCCP_SKB_CB(opt_skb)->header.h6)) {
- skb_set_owner_r(opt_skb, sk);
memmove(IP6CB(opt_skb),
&DCCP_SKB_CB(opt_skb)->header.h6,
sizeof(struct inet6_skb_parm));
@@ -957,6 +954,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure:
dccp_set_state(sk, DCCP_CLOSED);
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
+ inet_reset_saddr(sk);
__sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
@@ -1001,6 +1000,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
#endif
};
+static void dccp_v6_sk_destruct(struct sock *sk)
+{
+ dccp_destruct_common(sk);
+ inet6_sock_destruct(sk);
+}
+
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
@@ -1013,17 +1018,12 @@ static int dccp_v6_init_sock(struct sock *sk)
if (unlikely(!dccp_v6_ctl_sock_initialized))
dccp_v6_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
+ sk->sk_destruct = dccp_v6_sk_destruct;
}
return err;
}
-static void dccp_v6_destroy_sock(struct sock *sk)
-{
- dccp_destroy_sock(sk);
- inet6_destroy_sock(sk);
-}
-
static struct timewait_sock_ops dccp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct dccp6_timewait_sock),
};
@@ -1046,7 +1046,7 @@ static struct proto dccp_v6_prot = {
.accept = inet_csk_accept,
.get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
- .destroy = dccp_v6_destroy_sock,
+ .destroy = dccp_destroy_sock,
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp6_sock),
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 3b42f5c6a63d..9fed0ae21e63 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -56,7 +56,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
(dh->dccph_doff * 4);
struct dccp_options_received *opt_recv = &dp->dccps_options_received;
unsigned char opt, len;
- unsigned char *uninitialized_var(value);
+ unsigned char *value;
u32 elapsed_time;
__be32 opt_val;
int rc;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 6433187a5cc4..7490b4f09bbb 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -185,7 +185,7 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
/* And store cached results */
icsk->icsk_pmtu_cookie = pmtu;
- dp->dccps_mss_cache = cur_mps;
+ WRITE_ONCE(dp->dccps_mss_cache, cur_mps);
return cur_mps;
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index cb61a9d281f6..491b148afa8f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type)
EXPORT_SYMBOL_GPL(dccp_packet_name);
-static void dccp_sk_destruct(struct sock *sk)
+void dccp_destruct_common(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
dp->dccps_hc_tx_ccid = NULL;
+}
+EXPORT_SYMBOL_GPL(dccp_destruct_common);
+
+static void dccp_sk_destruct(struct sock *sk)
+{
+ dccp_destruct_common(sk);
inet_sock_destruct(sk);
}
@@ -318,11 +324,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
__poll_t dccp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- __poll_t mask;
struct sock *sk = sock->sk;
+ __poll_t mask;
+ u8 shutdown;
+ int state;
sock_poll_wait(file, sock, wait);
- if (sk->sk_state == DCCP_LISTEN)
+
+ state = inet_sk_state_load(sk);
+ if (state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
/* Socket is not locked. We are protected from async events
@@ -331,20 +341,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
*/
mask = 0;
- if (sk->sk_err)
+ if (READ_ONCE(sk->sk_err))
mask = EPOLLERR;
+ shutdown = READ_ONCE(sk->sk_shutdown);
- if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
+ if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
mask |= EPOLLHUP;
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
/* Connected? */
- if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
+ if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
if (atomic_read(&sk->sk_rmem_alloc) > 0)
mask |= EPOLLIN | EPOLLRDNORM;
- if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
+ if (!(shutdown & SEND_SHUTDOWN)) {
if (sk_stream_is_writeable(sk)) {
mask |= EPOLLOUT | EPOLLWRNORM;
} else { /* send SIGIO later */
@@ -362,7 +373,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
}
return mask;
}
-
EXPORT_SYMBOL_GPL(dccp_poll);
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
@@ -638,7 +648,7 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
return dccp_getsockopt_service(sk, len,
(__be32 __user *)optval, optlen);
case DCCP_SOCKOPT_GET_CUR_MPS:
- val = dp->dccps_mss_cache;
+ val = READ_ONCE(dp->dccps_mss_cache);
break;
case DCCP_SOCKOPT_AVAILABLE_CCIDS:
return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
@@ -760,16 +770,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
trace_dccp_probe(sk, len);
- if (len > dp->dccps_mss_cache)
+ if (len > READ_ONCE(dp->dccps_mss_cache))
return -EMSGSIZE;
lock_sock(sk);
- if (dccp_qpolicy_full(sk)) {
- rc = -EAGAIN;
- goto out_release;
- }
-
timeo = sock_sndtimeo(sk, noblock);
/*
@@ -788,11 +793,22 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (skb == NULL)
goto out_release;
+ if (dccp_qpolicy_full(sk)) {
+ rc = -EAGAIN;
+ goto out_discard;
+ }
+
if (sk->sk_state == DCCP_CLOSED) {
rc = -ENOTCONN;
goto out_discard;
}
+ /* We need to check dccps_mss_cache after socket is locked. */
+ if (len > dp->dccps_mss_cache) {
+ rc = -EMSGSIZE;
+ goto out_discard;
+ }
+
skb_reserve(skb, sk->sk_prot->max_header);
rc = memcpy_from_msg(skb_put(skb, len), msg, len);
if (rc != 0)
diff --git a/net/decnet/Kconfig b/net/decnet/Kconfig
deleted file mode 100644
index 0935453ccfd5..000000000000
--- a/net/decnet/Kconfig
+++ /dev/null
@@ -1,43 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# DECnet configuration
-#
-config DECNET
- tristate "DECnet Support"
- ---help---
- The DECnet networking protocol was used in many products made by
- Digital (now Compaq). It provides reliable stream and sequenced
- packet communications over which run a variety of services similar
- to those which run over TCP/IP.
-
- To find some tools to use with the kernel layer support, please
- look at Patrick Caulfield's web site:
- <http://linux-decnet.sourceforge.net/>.
-
- More detailed documentation is available in
- <file:Documentation/networking/decnet.txt>.
-
- Be sure to say Y to "/proc file system support" and "Sysctl support"
- below when using DECnet, since you will need sysctl support to aid
- in configuration at run time.
-
- The DECnet code is also available as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want).
- The module is called decnet.
-
-config DECNET_ROUTER
- bool "DECnet: router support"
- depends on DECNET
- select FIB_RULES
- ---help---
- Add support for turning your DECnet Endnode into a level 1 or 2
- router. This is an experimental, but functional option. If you
- do say Y here, then make sure that you also say Y to "Kernel/User
- network link driver", "Routing messages" and "Network packet
- filtering". The first two are required to allow configuration via
- rtnetlink (you will need Alexey Kuznetsov's iproute2 package
- from <ftp://ftp.tux.org/pub/net/ip-routing/>). The "Network packet
- filtering" option will be required for the forthcoming routing daemon
- to work.
-
- See <file:Documentation/networking/decnet.txt> for more information.
diff --git a/net/decnet/Makefile b/net/decnet/Makefile
deleted file mode 100644
index 07b38e441b2d..000000000000
--- a/net/decnet/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_DECNET) += decnet.o
-
-decnet-y := af_decnet.o dn_nsp_in.o dn_nsp_out.o \
- dn_route.o dn_dev.o dn_neigh.o dn_timer.o
-decnet-$(CONFIG_DECNET_ROUTER) += dn_fib.o dn_rules.o dn_table.o
-decnet-y += sysctl_net_decnet.o
-
-obj-$(CONFIG_NETFILTER) += netfilter/
diff --git a/net/decnet/README b/net/decnet/README
deleted file mode 100644
index 60e7ec88c81f..000000000000
--- a/net/decnet/README
+++ /dev/null
@@ -1,8 +0,0 @@
- Linux DECnet Project
- ======================
-
-The documentation for this kernel subsystem is available in the
-Documentation/networking subdirectory of this distribution and also
-on line at http://www.chygwyn.com/DECnet/
-
-Steve Whitehouse <SteveW@ACM.org>
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
deleted file mode 100644
index b9b847dc097c..000000000000
--- a/net/decnet/af_decnet.c
+++ /dev/null
@@ -1,2400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Socket Layer Interface
- *
- * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
- * Patrick Caulfield <patrick@pandh.demon.co.uk>
- *
- * Changes:
- * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
- * version of the code. Original copyright preserved
- * below.
- * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
- * compatible with my routing layer.
- * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
- * Caulfield.
- * Steve Whitehouse: Further bug fixes, checking module code still works
- * with new routing layer.
- * Steve Whitehouse: Additional set/get_sockopt() calls.
- * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
- * code.
- * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
- * way. Didn't manage it entirely, but its better.
- * Steve Whitehouse: ditto for sendmsg().
- * Steve Whitehouse: A selection of bug fixes to various things.
- * Steve Whitehouse: Added TIOCOUTQ ioctl.
- * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
- * Steve Whitehouse: Fixes to connect() error returns.
- * Patrick Caulfield: Fixes to delayed acceptance logic.
- * David S. Miller: New socket locking
- * Steve Whitehouse: Socket list hashing/locking
- * Arnaldo C. Melo: use capable, not suser
- * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
- * when required.
- * Patrick Caulfield: /proc/net/decnet now has object name/number
- * Steve Whitehouse: Fixed local port allocation, hashed sk list
- * Matthew Wilcox: Fixes for dn_ioctl()
- * Steve Whitehouse: New connect/accept logic to allow timeouts and
- * prepare for sendpage etc.
- */
-
-
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-
-HISTORY:
-
-Version Kernel Date Author/Comments
-------- ------ ---- ---------------
-Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
- (emserrat@geocities.com)
-
- First Development of DECnet Socket La-
- yer for Linux. Only supports outgoing
- connections.
-
-Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
- (patrick@pandh.demon.co.uk)
-
- Port to new kernel development version.
-
-Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
- (emserrat@geocities.com)
- _
- Added support for incoming connections
- so we can start developing server apps
- on Linux.
- -
- Module Support
-Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
- (emserrat@geocities.com)
- _
- Added support for X11R6.4. Now we can
- use DECnet transport for X on Linux!!!
- -
-Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
- (emserrat@geocities.com)
- Removed bugs on flow control
- Removed bugs on incoming accessdata
- order
- -
-Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
- dn_recvmsg fixes
-
- Patrick J. Caulfield
- dn_bind fixes
-*******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/sockios.h>
-#include <linux/net.h>
-#include <linux/netdevice.h>
-#include <linux/inet.h>
-#include <linux/route.h>
-#include <linux/netfilter.h>
-#include <linux/seq_file.h>
-#include <net/sock.h>
-#include <net/tcp_states.h>
-#include <net/flow.h>
-#include <asm/ioctls.h>
-#include <linux/capability.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/jiffies.h>
-#include <net/net_namespace.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/fib_rules.h>
-#include <net/tcp.h>
-#include <net/dn.h>
-#include <net/dn_nsp.h>
-#include <net/dn_dev.h>
-#include <net/dn_route.h>
-#include <net/dn_fib.h>
-#include <net/dn_neigh.h>
-
-struct dn_sock {
- struct sock sk;
- struct dn_scp scp;
-};
-
-static void dn_keepalive(struct sock *sk);
-
-#define DN_SK_HASH_SHIFT 8
-#define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
-#define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
-
-
-static const struct proto_ops dn_proto_ops;
-static DEFINE_RWLOCK(dn_hash_lock);
-static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
-static struct hlist_head dn_wild_sk;
-static atomic_long_t decnet_memory_allocated;
-
-static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
-static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
-
-static struct hlist_head *dn_find_list(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- if (scp->addr.sdn_flags & SDF_WILD)
- return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
-
- return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
-}
-
-/*
- * Valid ports are those greater than zero and not already in use.
- */
-static int check_port(__le16 port)
-{
- struct sock *sk;
-
- if (port == 0)
- return -1;
-
- sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
- struct dn_scp *scp = DN_SK(sk);
- if (scp->addrloc == port)
- return -1;
- }
- return 0;
-}
-
-static unsigned short port_alloc(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
- static unsigned short port = 0x2000;
- unsigned short i_port = port;
-
- while(check_port(cpu_to_le16(++port)) != 0) {
- if (port == i_port)
- return 0;
- }
-
- scp->addrloc = cpu_to_le16(port);
-
- return 1;
-}
-
-/*
- * Since this is only ever called from user
- * level, we don't need a write_lock() version
- * of this.
- */
-static int dn_hash_sock(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct hlist_head *list;
- int rv = -EUSERS;
-
- BUG_ON(sk_hashed(sk));
-
- write_lock_bh(&dn_hash_lock);
-
- if (!scp->addrloc && !port_alloc(sk))
- goto out;
-
- rv = -EADDRINUSE;
- if ((list = dn_find_list(sk)) == NULL)
- goto out;
-
- sk_add_node(sk, list);
- rv = 0;
-out:
- write_unlock_bh(&dn_hash_lock);
- return rv;
-}
-
-static void dn_unhash_sock(struct sock *sk)
-{
- write_lock(&dn_hash_lock);
- sk_del_node_init(sk);
- write_unlock(&dn_hash_lock);
-}
-
-static void dn_unhash_sock_bh(struct sock *sk)
-{
- write_lock_bh(&dn_hash_lock);
- sk_del_node_init(sk);
- write_unlock_bh(&dn_hash_lock);
-}
-
-static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
-{
- int i;
- unsigned int hash = addr->sdn_objnum;
-
- if (hash == 0) {
- hash = addr->sdn_objnamel;
- for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) {
- hash ^= addr->sdn_objname[i];
- hash ^= (hash << 3);
- }
- }
-
- return &dn_sk_hash[hash & DN_SK_HASH_MASK];
-}
-
-/*
- * Called to transform a socket from bound (i.e. with a local address)
- * into a listening socket (doesn't need a local port number) and rehashes
- * based upon the object name/number.
- */
-static void dn_rehash_sock(struct sock *sk)
-{
- struct hlist_head *list;
- struct dn_scp *scp = DN_SK(sk);
-
- if (scp->addr.sdn_flags & SDF_WILD)
- return;
-
- write_lock_bh(&dn_hash_lock);
- sk_del_node_init(sk);
- DN_SK(sk)->addrloc = 0;
- list = listen_hash(&DN_SK(sk)->addr);
- sk_add_node(sk, list);
- write_unlock_bh(&dn_hash_lock);
-}
-
-int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
-{
- int len = 2;
-
- *buf++ = type;
-
- switch (type) {
- case 0:
- *buf++ = sdn->sdn_objnum;
- break;
- case 1:
- *buf++ = 0;
- *buf++ = le16_to_cpu(sdn->sdn_objnamel);
- memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
- len = 3 + le16_to_cpu(sdn->sdn_objnamel);
- break;
- case 2:
- memset(buf, 0, 5);
- buf += 5;
- *buf++ = le16_to_cpu(sdn->sdn_objnamel);
- memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
- len = 7 + le16_to_cpu(sdn->sdn_objnamel);
- break;
- }
-
- return len;
-}
-
-/*
- * On reception of usernames, we handle types 1 and 0 for destination
- * addresses only. Types 2 and 4 are used for source addresses, but the
- * UIC, GIC are ignored and they are both treated the same way. Type 3
- * is never used as I've no idea what its purpose might be or what its
- * format is.
- */
-int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
-{
- unsigned char type;
- int size = len;
- int namel = 12;
-
- sdn->sdn_objnum = 0;
- sdn->sdn_objnamel = cpu_to_le16(0);
- memset(sdn->sdn_objname, 0, DN_MAXOBJL);
-
- if (len < 2)
- return -1;
-
- len -= 2;
- *fmt = *data++;
- type = *data++;
-
- switch (*fmt) {
- case 0:
- sdn->sdn_objnum = type;
- return 2;
- case 1:
- namel = 16;
- break;
- case 2:
- len -= 4;
- data += 4;
- break;
- case 4:
- len -= 8;
- data += 8;
- break;
- default:
- return -1;
- }
-
- len -= 1;
-
- if (len < 0)
- return -1;
-
- sdn->sdn_objnamel = cpu_to_le16(*data++);
- len -= le16_to_cpu(sdn->sdn_objnamel);
-
- if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel))
- return -1;
-
- memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel));
-
- return size - len;
-}
-
-struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
-{
- struct hlist_head *list = listen_hash(addr);
- struct sock *sk;
-
- read_lock(&dn_hash_lock);
- sk_for_each(sk, list) {
- struct dn_scp *scp = DN_SK(sk);
- if (sk->sk_state != TCP_LISTEN)
- continue;
- if (scp->addr.sdn_objnum) {
- if (scp->addr.sdn_objnum != addr->sdn_objnum)
- continue;
- } else {
- if (addr->sdn_objnum)
- continue;
- if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
- continue;
- if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0)
- continue;
- }
- sock_hold(sk);
- read_unlock(&dn_hash_lock);
- return sk;
- }
-
- sk = sk_head(&dn_wild_sk);
- if (sk) {
- if (sk->sk_state == TCP_LISTEN)
- sock_hold(sk);
- else
- sk = NULL;
- }
-
- read_unlock(&dn_hash_lock);
- return sk;
-}
-
-struct sock *dn_find_by_skb(struct sk_buff *skb)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct sock *sk;
- struct dn_scp *scp;
-
- read_lock(&dn_hash_lock);
- sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
- scp = DN_SK(sk);
- if (cb->src != dn_saddr2dn(&scp->peer))
- continue;
- if (cb->dst_port != scp->addrloc)
- continue;
- if (scp->addrrem && (cb->src_port != scp->addrrem))
- continue;
- sock_hold(sk);
- goto found;
- }
- sk = NULL;
-found:
- read_unlock(&dn_hash_lock);
- return sk;
-}
-
-
-
-static void dn_destruct(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- skb_queue_purge(&scp->data_xmit_queue);
- skb_queue_purge(&scp->other_xmit_queue);
- skb_queue_purge(&scp->other_receive_queue);
-
- dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
-}
-
-static unsigned long dn_memory_pressure;
-
-static void dn_enter_memory_pressure(struct sock *sk)
-{
- if (!dn_memory_pressure) {
- dn_memory_pressure = 1;
- }
-}
-
-static struct proto dn_proto = {
- .name = "NSP",
- .owner = THIS_MODULE,
- .enter_memory_pressure = dn_enter_memory_pressure,
- .memory_pressure = &dn_memory_pressure,
- .memory_allocated = &decnet_memory_allocated,
- .sysctl_mem = sysctl_decnet_mem,
- .sysctl_wmem = sysctl_decnet_wmem,
- .sysctl_rmem = sysctl_decnet_rmem,
- .max_header = DN_MAX_NSP_DATA_HEADER + 64,
- .obj_size = sizeof(struct dn_sock),
-};
-
-static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
-{
- struct dn_scp *scp;
- struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
-
- if (!sk)
- goto out;
-
- if (sock)
- sock->ops = &dn_proto_ops;
- sock_init_data(sock, sk);
-
- sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
- sk->sk_destruct = dn_destruct;
- sk->sk_no_check_tx = 1;
- sk->sk_family = PF_DECnet;
- sk->sk_protocol = 0;
- sk->sk_allocation = gfp;
- sk->sk_sndbuf = sysctl_decnet_wmem[1];
- sk->sk_rcvbuf = sysctl_decnet_rmem[1];
-
- /* Initialization of DECnet Session Control Port */
- scp = DN_SK(sk);
- scp->state = DN_O; /* Open */
- scp->numdat = 1; /* Next data seg to tx */
- scp->numoth = 1; /* Next oth data to tx */
- scp->ackxmt_dat = 0; /* Last data seg ack'ed */
- scp->ackxmt_oth = 0; /* Last oth data ack'ed */
- scp->ackrcv_dat = 0; /* Highest data ack recv*/
- scp->ackrcv_oth = 0; /* Last oth data ack rec*/
- scp->flowrem_sw = DN_SEND;
- scp->flowloc_sw = DN_SEND;
- scp->flowrem_dat = 0;
- scp->flowrem_oth = 1;
- scp->flowloc_dat = 0;
- scp->flowloc_oth = 1;
- scp->services_rem = 0;
- scp->services_loc = 1 | NSP_FC_NONE;
- scp->info_rem = 0;
- scp->info_loc = 0x03; /* NSP version 4.1 */
- scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
- scp->nonagle = 0;
- scp->multi_ireq = 1;
- scp->accept_mode = ACC_IMMED;
- scp->addr.sdn_family = AF_DECnet;
- scp->peer.sdn_family = AF_DECnet;
- scp->accessdata.acc_accl = 5;
- memcpy(scp->accessdata.acc_acc, "LINUX", 5);
-
- scp->max_window = NSP_MAX_WINDOW;
- scp->snd_window = NSP_MIN_WINDOW;
- scp->nsp_srtt = NSP_INITIAL_SRTT;
- scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
- scp->nsp_rxtshift = 0;
-
- skb_queue_head_init(&scp->data_xmit_queue);
- skb_queue_head_init(&scp->other_xmit_queue);
- skb_queue_head_init(&scp->other_receive_queue);
-
- scp->persist = 0;
- scp->persist_fxn = NULL;
- scp->keepalive = 10 * HZ;
- scp->keepalive_fxn = dn_keepalive;
-
- dn_start_slow_timer(sk);
-out:
- return sk;
-}
-
-/*
- * Keepalive timer.
- * FIXME: Should respond to SO_KEEPALIVE etc.
- */
-static void dn_keepalive(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- /*
- * By checking the other_data transmit queue is empty
- * we are double checking that we are not sending too
- * many of these keepalive frames.
- */
- if (skb_queue_empty(&scp->other_xmit_queue))
- dn_nsp_send_link(sk, DN_NOCHANGE, 0);
-}
-
-
-/*
- * Timer for shutdown/destroyed sockets.
- * When socket is dead & no packets have been sent for a
- * certain amount of time, they are removed by this
- * routine. Also takes care of sending out DI & DC
- * frames at correct times.
- */
-int dn_destroy_timer(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- scp->persist = dn_nsp_persist(sk);
-
- switch (scp->state) {
- case DN_DI:
- dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
- if (scp->nsp_rxtshift >= decnet_di_count)
- scp->state = DN_CN;
- return 0;
-
- case DN_DR:
- dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
- if (scp->nsp_rxtshift >= decnet_dr_count)
- scp->state = DN_DRC;
- return 0;
-
- case DN_DN:
- if (scp->nsp_rxtshift < decnet_dn_count) {
- /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
- dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
- GFP_ATOMIC);
- return 0;
- }
- }
-
- scp->persist = (HZ * decnet_time_wait);
-
- if (sk->sk_socket)
- return 0;
-
- if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) {
- dn_unhash_sock(sk);
- sock_put(sk);
- return 1;
- }
-
- return 0;
-}
-
-static void dn_destroy_sock(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- scp->nsp_rxtshift = 0; /* reset back off */
-
- if (sk->sk_socket) {
- if (sk->sk_socket->state != SS_UNCONNECTED)
- sk->sk_socket->state = SS_DISCONNECTING;
- }
-
- sk->sk_state = TCP_CLOSE;
-
- switch (scp->state) {
- case DN_DN:
- dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
- sk->sk_allocation);
- scp->persist_fxn = dn_destroy_timer;
- scp->persist = dn_nsp_persist(sk);
- break;
- case DN_CR:
- scp->state = DN_DR;
- goto disc_reject;
- case DN_RUN:
- scp->state = DN_DI;
- /* fall through */
- case DN_DI:
- case DN_DR:
-disc_reject:
- dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
- /* fall through */
- case DN_NC:
- case DN_NR:
- case DN_RJ:
- case DN_DIC:
- case DN_CN:
- case DN_DRC:
- case DN_CI:
- case DN_CD:
- scp->persist_fxn = dn_destroy_timer;
- scp->persist = dn_nsp_persist(sk);
- break;
- default:
- printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
- /* fall through */
- case DN_O:
- dn_stop_slow_timer(sk);
-
- dn_unhash_sock_bh(sk);
- sock_put(sk);
-
- break;
- }
-}
-
-char *dn_addr2asc(__u16 addr, char *buf)
-{
- unsigned short node, area;
-
- node = addr & 0x03ff;
- area = addr >> 10;
- sprintf(buf, "%hd.%hd", area, node);
-
- return buf;
-}
-
-
-
-static int dn_create(struct net *net, struct socket *sock, int protocol,
- int kern)
-{
- struct sock *sk;
-
- if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
- return -EINVAL;
-
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
- switch (sock->type) {
- case SOCK_SEQPACKET:
- if (protocol != DNPROTO_NSP)
- return -EPROTONOSUPPORT;
- break;
- case SOCK_STREAM:
- break;
- default:
- return -ESOCKTNOSUPPORT;
- }
-
-
- if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
- return -ENOBUFS;
-
- sk->sk_protocol = protocol;
-
- return 0;
-}
-
-
-static int
-dn_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- if (sk) {
- sock_orphan(sk);
- sock_hold(sk);
- lock_sock(sk);
- dn_destroy_sock(sk);
- release_sock(sk);
- sock_put(sk);
- }
-
- return 0;
-}
-
-static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
- struct net_device *dev, *ldev;
- int rv;
-
- if (addr_len != sizeof(struct sockaddr_dn))
- return -EINVAL;
-
- if (saddr->sdn_family != AF_DECnet)
- return -EINVAL;
-
- if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2))
- return -EINVAL;
-
- if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL)
- return -EINVAL;
-
- if (saddr->sdn_flags & ~SDF_WILD)
- return -EINVAL;
-
- if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
- (saddr->sdn_flags & SDF_WILD)))
- return -EACCES;
-
- if (!(saddr->sdn_flags & SDF_WILD)) {
- if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
- rcu_read_lock();
- ldev = NULL;
- for_each_netdev_rcu(&init_net, dev) {
- if (!dev->dn_ptr)
- continue;
- if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
- ldev = dev;
- break;
- }
- }
- rcu_read_unlock();
- if (ldev == NULL)
- return -EADDRNOTAVAIL;
- }
- }
-
- rv = -EINVAL;
- lock_sock(sk);
- if (sock_flag(sk, SOCK_ZAPPED)) {
- memcpy(&scp->addr, saddr, addr_len);
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- rv = dn_hash_sock(sk);
- if (rv)
- sock_set_flag(sk, SOCK_ZAPPED);
- }
- release_sock(sk);
-
- return rv;
-}
-
-
-static int dn_auto_bind(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- int rv;
-
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- scp->addr.sdn_flags = 0;
- scp->addr.sdn_objnum = 0;
-
- /*
- * This stuff is to keep compatibility with Eduardo's
- * patch. I hope I can dispense with it shortly...
- */
- if ((scp->accessdata.acc_accl != 0) &&
- (scp->accessdata.acc_accl <= 12)) {
-
- scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl);
- memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel));
-
- scp->accessdata.acc_accl = 0;
- memset(scp->accessdata.acc_acc, 0, 40);
- }
- /* End of compatibility stuff */
-
- scp->addr.sdn_add.a_len = cpu_to_le16(2);
- rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
- if (rv == 0) {
- rv = dn_hash_sock(sk);
- if (rv)
- sock_set_flag(sk, SOCK_ZAPPED);
- }
-
- return rv;
-}
-
-static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
-{
- struct dn_scp *scp = DN_SK(sk);
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int err;
-
- if (scp->state != DN_CR)
- return -EINVAL;
-
- scp->state = DN_CC;
- scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
- dn_send_conn_conf(sk, allocation);
-
- add_wait_queue(sk_sleep(sk), &wait);
- for(;;) {
- release_sock(sk);
- if (scp->state == DN_CC)
- *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
- lock_sock(sk);
- err = 0;
- if (scp->state == DN_RUN)
- break;
- err = sock_error(sk);
- if (err)
- break;
- err = sock_intr_errno(*timeo);
- if (signal_pending(current))
- break;
- err = -EAGAIN;
- if (!*timeo)
- break;
- }
- remove_wait_queue(sk_sleep(sk), &wait);
- if (err == 0) {
- sk->sk_socket->state = SS_CONNECTED;
- } else if (scp->state != DN_CC) {
- sk->sk_socket->state = SS_UNCONNECTED;
- }
- return err;
-}
-
-static int dn_wait_run(struct sock *sk, long *timeo)
-{
- struct dn_scp *scp = DN_SK(sk);
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int err = 0;
-
- if (scp->state == DN_RUN)
- goto out;
-
- if (!*timeo)
- return -EALREADY;
-
- add_wait_queue(sk_sleep(sk), &wait);
- for(;;) {
- release_sock(sk);
- if (scp->state == DN_CI || scp->state == DN_CC)
- *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
- lock_sock(sk);
- err = 0;
- if (scp->state == DN_RUN)
- break;
- err = sock_error(sk);
- if (err)
- break;
- err = sock_intr_errno(*timeo);
- if (signal_pending(current))
- break;
- err = -ETIMEDOUT;
- if (!*timeo)
- break;
- }
- remove_wait_queue(sk_sleep(sk), &wait);
-out:
- if (err == 0) {
- sk->sk_socket->state = SS_CONNECTED;
- } else if (scp->state != DN_CI && scp->state != DN_CC) {
- sk->sk_socket->state = SS_UNCONNECTED;
- }
- return err;
-}
-
-static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
-{
- struct socket *sock = sk->sk_socket;
- struct dn_scp *scp = DN_SK(sk);
- int err = -EISCONN;
- struct flowidn fld;
- struct dst_entry *dst;
-
- if (sock->state == SS_CONNECTED)
- goto out;
-
- if (sock->state == SS_CONNECTING) {
- err = 0;
- if (scp->state == DN_RUN) {
- sock->state = SS_CONNECTED;
- goto out;
- }
- err = -ECONNREFUSED;
- if (scp->state != DN_CI && scp->state != DN_CC) {
- sock->state = SS_UNCONNECTED;
- goto out;
- }
- return dn_wait_run(sk, timeo);
- }
-
- err = -EINVAL;
- if (scp->state != DN_O)
- goto out;
-
- if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
- goto out;
- if (addr->sdn_family != AF_DECnet)
- goto out;
- if (addr->sdn_flags & SDF_WILD)
- goto out;
-
- if (sock_flag(sk, SOCK_ZAPPED)) {
- err = dn_auto_bind(sk->sk_socket);
- if (err)
- goto out;
- }
-
- memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
-
- err = -EHOSTUNREACH;
- memset(&fld, 0, sizeof(fld));
- fld.flowidn_oif = sk->sk_bound_dev_if;
- fld.daddr = dn_saddr2dn(&scp->peer);
- fld.saddr = dn_saddr2dn(&scp->addr);
- dn_sk_ports_copy(&fld, scp);
- fld.flowidn_proto = DNPROTO_NSP;
- if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0)
- goto out;
- dst = __sk_dst_get(sk);
- sk->sk_route_caps = dst->dev->features;
- sock->state = SS_CONNECTING;
- scp->state = DN_CI;
- scp->segsize_loc = dst_metric_advmss(dst);
-
- dn_nsp_send_conninit(sk, NSP_CI);
- err = -EINPROGRESS;
- if (*timeo) {
- err = dn_wait_run(sk, timeo);
- }
-out:
- return err;
-}
-
-static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
-{
- struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
- struct sock *sk = sock->sk;
- int err;
- long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
-
- lock_sock(sk);
- err = __dn_connect(sk, addr, addrlen, &timeo, 0);
- release_sock(sk);
-
- return err;
-}
-
-static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- switch (scp->state) {
- case DN_RUN:
- return 0;
- case DN_CR:
- return dn_confirm_accept(sk, timeo, sk->sk_allocation);
- case DN_CI:
- case DN_CC:
- return dn_wait_run(sk, timeo);
- case DN_O:
- return __dn_connect(sk, addr, addrlen, timeo, flags);
- }
-
- return -EINVAL;
-}
-
-
-static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
-{
- unsigned char *ptr = skb->data;
-
- acc->acc_userl = *ptr++;
- memcpy(&acc->acc_user, ptr, acc->acc_userl);
- ptr += acc->acc_userl;
-
- acc->acc_passl = *ptr++;
- memcpy(&acc->acc_pass, ptr, acc->acc_passl);
- ptr += acc->acc_passl;
-
- acc->acc_accl = *ptr++;
- memcpy(&acc->acc_acc, ptr, acc->acc_accl);
-
- skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
-
-}
-
-static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
-{
- unsigned char *ptr = skb->data;
- u16 len = *ptr++; /* yes, it's 8bit on the wire */
-
- BUG_ON(len > 16); /* we've checked the contents earlier */
- opt->opt_optl = cpu_to_le16(len);
- opt->opt_status = 0;
- memcpy(opt->opt_data, ptr, len);
- skb_pull(skb, len + 1);
-}
-
-static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct sk_buff *skb = NULL;
- int err = 0;
-
- add_wait_queue(sk_sleep(sk), &wait);
- for(;;) {
- release_sock(sk);
- skb = skb_dequeue(&sk->sk_receive_queue);
- if (skb == NULL) {
- *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
- skb = skb_dequeue(&sk->sk_receive_queue);
- }
- lock_sock(sk);
- if (skb != NULL)
- break;
- err = -EINVAL;
- if (sk->sk_state != TCP_LISTEN)
- break;
- err = sock_intr_errno(*timeo);
- if (signal_pending(current))
- break;
- err = -EAGAIN;
- if (!*timeo)
- break;
- }
- remove_wait_queue(sk_sleep(sk), &wait);
-
- return skb == NULL ? ERR_PTR(err) : skb;
-}
-
-static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern)
-{
- struct sock *sk = sock->sk, *newsk;
- struct sk_buff *skb = NULL;
- struct dn_skb_cb *cb;
- unsigned char menuver;
- int err = 0;
- unsigned char type;
- long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
- struct dst_entry *dst;
-
- lock_sock(sk);
-
- if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
- release_sock(sk);
- return -EINVAL;
- }
-
- skb = skb_dequeue(&sk->sk_receive_queue);
- if (skb == NULL) {
- skb = dn_wait_for_connect(sk, &timeo);
- if (IS_ERR(skb)) {
- release_sock(sk);
- return PTR_ERR(skb);
- }
- }
-
- cb = DN_SKB_CB(skb);
- sk->sk_ack_backlog--;
- newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
- if (newsk == NULL) {
- release_sock(sk);
- kfree_skb(skb);
- return -ENOBUFS;
- }
- release_sock(sk);
-
- dst = skb_dst(skb);
- sk_dst_set(newsk, dst);
- skb_dst_set(skb, NULL);
-
- DN_SK(newsk)->state = DN_CR;
- DN_SK(newsk)->addrrem = cb->src_port;
- DN_SK(newsk)->services_rem = cb->services;
- DN_SK(newsk)->info_rem = cb->info;
- DN_SK(newsk)->segsize_rem = cb->segsize;
- DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
-
- if (DN_SK(newsk)->segsize_rem < 230)
- DN_SK(newsk)->segsize_rem = 230;
-
- if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
- DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
-
- newsk->sk_state = TCP_LISTEN;
- memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
-
- /*
- * If we are listening on a wild socket, we don't want
- * the newly created socket on the wrong hash queue.
- */
- DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
-
- skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
- skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
- *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
- *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
-
- menuver = *skb->data;
- skb_pull(skb, 1);
-
- if (menuver & DN_MENUVER_ACC)
- dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
-
- if (menuver & DN_MENUVER_USR)
- dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
-
- if (menuver & DN_MENUVER_PRX)
- DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
-
- if (menuver & DN_MENUVER_UIC)
- DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
-
- kfree_skb(skb);
-
- memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
- sizeof(struct optdata_dn));
- memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
- sizeof(struct optdata_dn));
-
- lock_sock(newsk);
- err = dn_hash_sock(newsk);
- if (err == 0) {
- sock_reset_flag(newsk, SOCK_ZAPPED);
- dn_send_conn_ack(newsk);
-
- /*
- * Here we use sk->sk_allocation since although the conn conf is
- * for the newsk, the context is the old socket.
- */
- if (DN_SK(newsk)->accept_mode == ACC_IMMED)
- err = dn_confirm_accept(newsk, &timeo,
- sk->sk_allocation);
- }
- release_sock(newsk);
- return err;
-}
-
-
-static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
-{
- struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
-
- lock_sock(sk);
-
- if (peer) {
- if ((sock->state != SS_CONNECTED &&
- sock->state != SS_CONNECTING) &&
- scp->accept_mode == ACC_IMMED) {
- release_sock(sk);
- return -ENOTCONN;
- }
-
- memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
- } else {
- memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
- }
-
- release_sock(sk);
-
- return sizeof(struct sockaddr_dn);
-}
-
-
-static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- __poll_t mask = datagram_poll(file, sock, wait);
-
- if (!skb_queue_empty_lockless(&scp->other_receive_queue))
- mask |= EPOLLRDBAND;
-
- return mask;
-}
-
-static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- int err = -EOPNOTSUPP;
- long amount = 0;
- struct sk_buff *skb;
- int val;
-
- switch(cmd)
- {
- case SIOCGIFADDR:
- case SIOCSIFADDR:
- return dn_dev_ioctl(cmd, (void __user *)arg);
-
- case SIOCATMARK:
- lock_sock(sk);
- val = !skb_queue_empty(&scp->other_receive_queue);
- if (scp->state != DN_RUN)
- val = -ENOTCONN;
- release_sock(sk);
- return val;
-
- case TIOCOUTQ:
- amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
- if (amount < 0)
- amount = 0;
- err = put_user(amount, (int __user *)arg);
- break;
-
- case TIOCINQ:
- lock_sock(sk);
- skb = skb_peek(&scp->other_receive_queue);
- if (skb) {
- amount = skb->len;
- } else {
- skb_queue_walk(&sk->sk_receive_queue, skb)
- amount += skb->len;
- }
- release_sock(sk);
- err = put_user(amount, (int __user *)arg);
- break;
-
- default:
- err = -ENOIOCTLCMD;
- break;
- }
-
- return err;
-}
-
-static int dn_listen(struct socket *sock, int backlog)
-{
- struct sock *sk = sock->sk;
- int err = -EINVAL;
-
- lock_sock(sk);
-
- if (sock_flag(sk, SOCK_ZAPPED))
- goto out;
-
- if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
- goto out;
-
- sk->sk_max_ack_backlog = backlog;
- sk->sk_ack_backlog = 0;
- sk->sk_state = TCP_LISTEN;
- err = 0;
- dn_rehash_sock(sk);
-
-out:
- release_sock(sk);
-
- return err;
-}
-
-
-static int dn_shutdown(struct socket *sock, int how)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- int err = -ENOTCONN;
-
- lock_sock(sk);
-
- if (sock->state == SS_UNCONNECTED)
- goto out;
-
- err = 0;
- if (sock->state == SS_DISCONNECTING)
- goto out;
-
- err = -EINVAL;
- if (scp->state == DN_O)
- goto out;
-
- if (how != SHUT_RDWR)
- goto out;
-
- sk->sk_shutdown = SHUTDOWN_MASK;
- dn_destroy_sock(sk);
- err = 0;
-
-out:
- release_sock(sk);
-
- return err;
-}
-
-static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- int err;
-
- lock_sock(sk);
- err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
- release_sock(sk);
-#ifdef CONFIG_NETFILTER
- /* we need to exclude all possible ENOPROTOOPTs except default case */
- if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
- optname != DSO_STREAM && optname != DSO_SEQPACKET)
- err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
-#endif
-
- return err;
-}
-
-static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- long timeo;
- union {
- struct optdata_dn opt;
- struct accessdata_dn acc;
- int mode;
- unsigned long win;
- int val;
- unsigned char services;
- unsigned char info;
- } u;
- int err;
-
- if (optlen && !optval)
- return -EINVAL;
-
- if (optlen > sizeof(u))
- return -EINVAL;
-
- if (copy_from_user(&u, optval, optlen))
- return -EFAULT;
-
- switch (optname) {
- case DSO_CONDATA:
- if (sock->state == SS_CONNECTED)
- return -EISCONN;
- if ((scp->state != DN_O) && (scp->state != DN_CR))
- return -EINVAL;
-
- if (optlen != sizeof(struct optdata_dn))
- return -EINVAL;
-
- if (le16_to_cpu(u.opt.opt_optl) > 16)
- return -EINVAL;
-
- memcpy(&scp->conndata_out, &u.opt, optlen);
- break;
-
- case DSO_DISDATA:
- if (sock->state != SS_CONNECTED &&
- scp->accept_mode == ACC_IMMED)
- return -ENOTCONN;
-
- if (optlen != sizeof(struct optdata_dn))
- return -EINVAL;
-
- if (le16_to_cpu(u.opt.opt_optl) > 16)
- return -EINVAL;
-
- memcpy(&scp->discdata_out, &u.opt, optlen);
- break;
-
- case DSO_CONACCESS:
- if (sock->state == SS_CONNECTED)
- return -EISCONN;
- if (scp->state != DN_O)
- return -EINVAL;
-
- if (optlen != sizeof(struct accessdata_dn))
- return -EINVAL;
-
- if ((u.acc.acc_accl > DN_MAXACCL) ||
- (u.acc.acc_passl > DN_MAXACCL) ||
- (u.acc.acc_userl > DN_MAXACCL))
- return -EINVAL;
-
- memcpy(&scp->accessdata, &u.acc, optlen);
- break;
-
- case DSO_ACCEPTMODE:
- if (sock->state == SS_CONNECTED)
- return -EISCONN;
- if (scp->state != DN_O)
- return -EINVAL;
-
- if (optlen != sizeof(int))
- return -EINVAL;
-
- if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
- return -EINVAL;
-
- scp->accept_mode = (unsigned char)u.mode;
- break;
-
- case DSO_CONACCEPT:
- if (scp->state != DN_CR)
- return -EINVAL;
- timeo = sock_rcvtimeo(sk, 0);
- err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
- return err;
-
- case DSO_CONREJECT:
- if (scp->state != DN_CR)
- return -EINVAL;
-
- scp->state = DN_DR;
- sk->sk_shutdown = SHUTDOWN_MASK;
- dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
- break;
-
- case DSO_MAXWINDOW:
- if (optlen != sizeof(unsigned long))
- return -EINVAL;
- if (u.win > NSP_MAX_WINDOW)
- u.win = NSP_MAX_WINDOW;
- if (u.win == 0)
- return -EINVAL;
- scp->max_window = u.win;
- if (scp->snd_window > u.win)
- scp->snd_window = u.win;
- break;
-
- case DSO_NODELAY:
- if (optlen != sizeof(int))
- return -EINVAL;
- if (scp->nonagle == TCP_NAGLE_CORK)
- return -EINVAL;
- scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF;
- /* if (scp->nonagle == 1) { Push pending frames } */
- break;
-
- case DSO_CORK:
- if (optlen != sizeof(int))
- return -EINVAL;
- if (scp->nonagle == TCP_NAGLE_OFF)
- return -EINVAL;
- scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK;
- /* if (scp->nonagle == 0) { Push pending frames } */
- break;
-
- case DSO_SERVICES:
- if (optlen != sizeof(unsigned char))
- return -EINVAL;
- if ((u.services & ~NSP_FC_MASK) != 0x01)
- return -EINVAL;
- if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
- return -EINVAL;
- scp->services_loc = u.services;
- break;
-
- case DSO_INFO:
- if (optlen != sizeof(unsigned char))
- return -EINVAL;
- if (u.info & 0xfc)
- return -EINVAL;
- scp->info_loc = u.info;
- break;
-
- case DSO_LINKINFO:
- case DSO_STREAM:
- case DSO_SEQPACKET:
- default:
- return -ENOPROTOOPT;
- }
-
- return 0;
-}
-
-static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- int err;
-
- lock_sock(sk);
- err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
- release_sock(sk);
-#ifdef CONFIG_NETFILTER
- if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
- optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
- optname != DSO_CONREJECT) {
- int len;
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
- if (err >= 0)
- err = put_user(len, optlen);
- }
-#endif
-
- return err;
-}
-
-static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- struct linkinfo_dn link;
- unsigned int r_len;
- void *r_data = NULL;
- unsigned int val;
-
- if(get_user(r_len , optlen))
- return -EFAULT;
-
- switch (optname) {
- case DSO_CONDATA:
- if (r_len > sizeof(struct optdata_dn))
- r_len = sizeof(struct optdata_dn);
- r_data = &scp->conndata_in;
- break;
-
- case DSO_DISDATA:
- if (r_len > sizeof(struct optdata_dn))
- r_len = sizeof(struct optdata_dn);
- r_data = &scp->discdata_in;
- break;
-
- case DSO_CONACCESS:
- if (r_len > sizeof(struct accessdata_dn))
- r_len = sizeof(struct accessdata_dn);
- r_data = &scp->accessdata;
- break;
-
- case DSO_ACCEPTMODE:
- if (r_len > sizeof(unsigned char))
- r_len = sizeof(unsigned char);
- r_data = &scp->accept_mode;
- break;
-
- case DSO_LINKINFO:
- if (r_len > sizeof(struct linkinfo_dn))
- r_len = sizeof(struct linkinfo_dn);
-
- memset(&link, 0, sizeof(link));
-
- switch (sock->state) {
- case SS_CONNECTING:
- link.idn_linkstate = LL_CONNECTING;
- break;
- case SS_DISCONNECTING:
- link.idn_linkstate = LL_DISCONNECTING;
- break;
- case SS_CONNECTED:
- link.idn_linkstate = LL_RUNNING;
- break;
- default:
- link.idn_linkstate = LL_INACTIVE;
- }
-
- link.idn_segsize = scp->segsize_rem;
- r_data = &link;
- break;
-
- case DSO_MAXWINDOW:
- if (r_len > sizeof(unsigned long))
- r_len = sizeof(unsigned long);
- r_data = &scp->max_window;
- break;
-
- case DSO_NODELAY:
- if (r_len > sizeof(int))
- r_len = sizeof(int);
- val = (scp->nonagle == TCP_NAGLE_OFF);
- r_data = &val;
- break;
-
- case DSO_CORK:
- if (r_len > sizeof(int))
- r_len = sizeof(int);
- val = (scp->nonagle == TCP_NAGLE_CORK);
- r_data = &val;
- break;
-
- case DSO_SERVICES:
- if (r_len > sizeof(unsigned char))
- r_len = sizeof(unsigned char);
- r_data = &scp->services_rem;
- break;
-
- case DSO_INFO:
- if (r_len > sizeof(unsigned char))
- r_len = sizeof(unsigned char);
- r_data = &scp->info_rem;
- break;
-
- case DSO_STREAM:
- case DSO_SEQPACKET:
- case DSO_CONACCEPT:
- case DSO_CONREJECT:
- default:
- return -ENOPROTOOPT;
- }
-
- if (r_data) {
- if (copy_to_user(optval, r_data, r_len))
- return -EFAULT;
- if (put_user(r_len, optlen))
- return -EFAULT;
- }
-
- return 0;
-}
-
-
-static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
-{
- struct sk_buff *skb;
- int len = 0;
-
- if (flags & MSG_OOB)
- return !skb_queue_empty(q) ? 1 : 0;
-
- skb_queue_walk(q, skb) {
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- len += skb->len;
-
- if (cb->nsp_flags & 0x40) {
- /* SOCK_SEQPACKET reads to EOM */
- if (sk->sk_type == SOCK_SEQPACKET)
- return 1;
- /* so does SOCK_STREAM unless WAITALL is specified */
- if (!(flags & MSG_WAITALL))
- return 1;
- }
-
- /* minimum data length for read exceeded */
- if (len >= target)
- return 1;
- }
-
- return 0;
-}
-
-
-static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
- int flags)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- struct sk_buff_head *queue = &sk->sk_receive_queue;
- size_t target = size > 1 ? 1 : 0;
- size_t copied = 0;
- int rv = 0;
- struct sk_buff *skb, *n;
- struct dn_skb_cb *cb = NULL;
- unsigned char eor = 0;
- long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-
- lock_sock(sk);
-
- if (sock_flag(sk, SOCK_ZAPPED)) {
- rv = -EADDRNOTAVAIL;
- goto out;
- }
-
- if (sk->sk_shutdown & RCV_SHUTDOWN) {
- rv = 0;
- goto out;
- }
-
- rv = dn_check_state(sk, NULL, 0, &timeo, flags);
- if (rv)
- goto out;
-
- if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
- rv = -EOPNOTSUPP;
- goto out;
- }
-
- if (flags & MSG_OOB)
- queue = &scp->other_receive_queue;
-
- if (flags & MSG_WAITALL)
- target = size;
-
-
- /*
- * See if there is data ready to read, sleep if there isn't
- */
- for(;;) {
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
-
- if (sk->sk_err)
- goto out;
-
- if (!skb_queue_empty(&scp->other_receive_queue)) {
- if (!(flags & MSG_OOB)) {
- msg->msg_flags |= MSG_OOB;
- if (!scp->other_report) {
- scp->other_report = 1;
- goto out;
- }
- }
- }
-
- if (scp->state != DN_RUN)
- goto out;
-
- if (signal_pending(current)) {
- rv = sock_intr_errno(timeo);
- goto out;
- }
-
- if (dn_data_ready(sk, queue, flags, target))
- break;
-
- if (flags & MSG_DONTWAIT) {
- rv = -EWOULDBLOCK;
- goto out;
- }
-
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait);
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
- }
-
- skb_queue_walk_safe(queue, skb, n) {
- unsigned int chunk = skb->len;
- cb = DN_SKB_CB(skb);
-
- if ((chunk + copied) > size)
- chunk = size - copied;
-
- if (memcpy_to_msg(msg, skb->data, chunk)) {
- rv = -EFAULT;
- break;
- }
- copied += chunk;
-
- if (!(flags & MSG_PEEK))
- skb_pull(skb, chunk);
-
- eor = cb->nsp_flags & 0x40;
-
- if (skb->len == 0) {
- skb_unlink(skb, queue);
- kfree_skb(skb);
- /*
- * N.B. Don't refer to skb or cb after this point
- * in loop.
- */
- if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
- scp->flowloc_sw = DN_SEND;
- dn_nsp_send_link(sk, DN_SEND, 0);
- }
- }
-
- if (eor) {
- if (sk->sk_type == SOCK_SEQPACKET)
- break;
- if (!(flags & MSG_WAITALL))
- break;
- }
-
- if (flags & MSG_OOB)
- break;
-
- if (copied >= target)
- break;
- }
-
- rv = copied;
-
-
- if (eor && (sk->sk_type == SOCK_SEQPACKET))
- msg->msg_flags |= MSG_EOR;
-
-out:
- if (rv == 0)
- rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
-
- if ((rv >= 0) && msg->msg_name) {
- __sockaddr_check_size(sizeof(struct sockaddr_dn));
- memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
- msg->msg_namelen = sizeof(struct sockaddr_dn);
- }
-
- release_sock(sk);
-
- return rv;
-}
-
-
-static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
-{
- unsigned char fctype = scp->services_rem & NSP_FC_MASK;
- if (skb_queue_len(queue) >= scp->snd_window)
- return 1;
- if (fctype != NSP_FC_NONE) {
- if (flags & MSG_OOB) {
- if (scp->flowrem_oth == 0)
- return 1;
- } else {
- if (scp->flowrem_dat == 0)
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * The DECnet spec requires that the "routing layer" accepts packets which
- * are at least 230 bytes in size. This excludes any headers which the NSP
- * layer might add, so we always assume that we'll be using the maximal
- * length header on data packets. The variation in length is due to the
- * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
- * make much practical difference.
- */
-unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
-{
- unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
- if (dev) {
- struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
- mtu -= LL_RESERVED_SPACE(dev);
- if (dn_db->use_long)
- mtu -= 21;
- else
- mtu -= 6;
- mtu -= DN_MAX_NSP_DATA_HEADER;
- } else {
- /*
- * 21 = long header, 16 = guess at MAC header length
- */
- mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
- }
- if (mtu > mss)
- mss = mtu;
- return mss;
-}
-
-static inline unsigned int dn_current_mss(struct sock *sk, int flags)
-{
- struct dst_entry *dst = __sk_dst_get(sk);
- struct dn_scp *scp = DN_SK(sk);
- int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
-
- /* Other data messages are limited to 16 bytes per packet */
- if (flags & MSG_OOB)
- return 16;
-
- /* This works out the maximum size of segment we can send out */
- if (dst) {
- u32 mtu = dst_mtu(dst);
- mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
- }
-
- return mss_now;
-}
-
-/*
- * N.B. We get the timeout wrong here, but then we always did get it
- * wrong before and this is another step along the road to correcting
- * it. It ought to get updated each time we pass through the routine,
- * but in practise it probably doesn't matter too much for now.
- */
-static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
- unsigned long datalen, int noblock,
- int *errcode)
-{
- struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
- noblock, errcode);
- if (skb) {
- skb->protocol = htons(ETH_P_DNA_RT);
- skb->pkt_type = PACKET_OUTGOING;
- }
- return skb;
-}
-
-static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
-{
- struct sock *sk = sock->sk;
- struct dn_scp *scp = DN_SK(sk);
- size_t mss;
- struct sk_buff_head *queue = &scp->data_xmit_queue;
- int flags = msg->msg_flags;
- int err = 0;
- size_t sent = 0;
- int addr_len = msg->msg_namelen;
- DECLARE_SOCKADDR(struct sockaddr_dn *, addr, msg->msg_name);
- struct sk_buff *skb = NULL;
- struct dn_skb_cb *cb;
- size_t len;
- unsigned char fctype;
- long timeo;
-
- if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
- return -EOPNOTSUPP;
-
- if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
- return -EINVAL;
-
- lock_sock(sk);
- timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- /*
- * The only difference between stream sockets and sequenced packet
- * sockets is that the stream sockets always behave as if MSG_EOR
- * has been set.
- */
- if (sock->type == SOCK_STREAM) {
- if (flags & MSG_EOR) {
- err = -EINVAL;
- goto out;
- }
- flags |= MSG_EOR;
- }
-
-
- err = dn_check_state(sk, addr, addr_len, &timeo, flags);
- if (err)
- goto out_err;
-
- if (sk->sk_shutdown & SEND_SHUTDOWN) {
- err = -EPIPE;
- if (!(flags & MSG_NOSIGNAL))
- send_sig(SIGPIPE, current, 0);
- goto out_err;
- }
-
- if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
- dst_negative_advice(sk);
-
- mss = scp->segsize_rem;
- fctype = scp->services_rem & NSP_FC_MASK;
-
- mss = dn_current_mss(sk, flags);
-
- if (flags & MSG_OOB) {
- queue = &scp->other_xmit_queue;
- if (size > mss) {
- err = -EMSGSIZE;
- goto out;
- }
- }
-
- scp->persist_fxn = dn_nsp_xmit_timeout;
-
- while(sent < size) {
- err = sock_error(sk);
- if (err)
- goto out;
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- goto out;
- }
-
- /*
- * Calculate size that we wish to send.
- */
- len = size - sent;
-
- if (len > mss)
- len = mss;
-
- /*
- * Wait for queue size to go down below the window
- * size.
- */
- if (dn_queue_too_long(scp, queue, flags)) {
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
-
- if (flags & MSG_DONTWAIT) {
- err = -EWOULDBLOCK;
- goto out;
- }
-
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- sk_wait_event(sk, &timeo,
- !dn_queue_too_long(scp, queue, flags), &wait);
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
- continue;
- }
-
- /*
- * Get a suitably sized skb.
- * 64 is a bit of a hack really, but its larger than any
- * link-layer headers and has served us well as a good
- * guess as to their real length.
- */
- skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
- flags & MSG_DONTWAIT, &err);
-
- if (err)
- break;
-
- if (!skb)
- continue;
-
- cb = DN_SKB_CB(skb);
-
- skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
-
- if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
- err = -EFAULT;
- goto out;
- }
-
- if (flags & MSG_OOB) {
- cb->nsp_flags = 0x30;
- if (fctype != NSP_FC_NONE)
- scp->flowrem_oth--;
- } else {
- cb->nsp_flags = 0x00;
- if (scp->seg_total == 0)
- cb->nsp_flags |= 0x20;
-
- scp->seg_total += len;
-
- if (((sent + len) == size) && (flags & MSG_EOR)) {
- cb->nsp_flags |= 0x40;
- scp->seg_total = 0;
- if (fctype == NSP_FC_SCMC)
- scp->flowrem_dat--;
- }
- if (fctype == NSP_FC_SRC)
- scp->flowrem_dat--;
- }
-
- sent += len;
- dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
- skb = NULL;
-
- scp->persist = dn_nsp_persist(sk);
-
- }
-out:
-
- kfree_skb(skb);
-
- release_sock(sk);
-
- return sent ? sent : err;
-
-out_err:
- err = sk_stream_error(sk, flags, err);
- release_sock(sk);
- return err;
-}
-
-static int dn_device_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- switch (event) {
- case NETDEV_UP:
- dn_dev_up(dev);
- break;
- case NETDEV_DOWN:
- dn_dev_down(dev);
- break;
- default:
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block dn_dev_notifier = {
- .notifier_call = dn_device_event,
-};
-
-static struct packet_type dn_dix_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_DNA_RT),
- .func = dn_route_rcv,
-};
-
-#ifdef CONFIG_PROC_FS
-struct dn_iter_state {
- int bucket;
-};
-
-static struct sock *dn_socket_get_first(struct seq_file *seq)
-{
- struct dn_iter_state *state = seq->private;
- struct sock *n = NULL;
-
- for(state->bucket = 0;
- state->bucket < DN_SK_HASH_SIZE;
- ++state->bucket) {
- n = sk_head(&dn_sk_hash[state->bucket]);
- if (n)
- break;
- }
-
- return n;
-}
-
-static struct sock *dn_socket_get_next(struct seq_file *seq,
- struct sock *n)
-{
- struct dn_iter_state *state = seq->private;
-
- n = sk_next(n);
-try_again:
- if (n)
- goto out;
- if (++state->bucket >= DN_SK_HASH_SIZE)
- goto out;
- n = sk_head(&dn_sk_hash[state->bucket]);
- goto try_again;
-out:
- return n;
-}
-
-static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
-{
- struct sock *sk = dn_socket_get_first(seq);
-
- if (sk) {
- while(*pos && (sk = dn_socket_get_next(seq, sk)))
- --*pos;
- }
- return *pos ? NULL : sk;
-}
-
-static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
-{
- void *rc;
- read_lock_bh(&dn_hash_lock);
- rc = socket_get_idx(seq, &pos);
- if (!rc) {
- read_unlock_bh(&dn_hash_lock);
- }
- return rc;
-}
-
-static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
-}
-
-static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- void *rc;
-
- if (v == SEQ_START_TOKEN) {
- rc = dn_socket_get_idx(seq, 0);
- goto out;
- }
-
- rc = dn_socket_get_next(seq, v);
- if (rc)
- goto out;
- read_unlock_bh(&dn_hash_lock);
-out:
- ++*pos;
- return rc;
-}
-
-static void dn_socket_seq_stop(struct seq_file *seq, void *v)
-{
- if (v && v != SEQ_START_TOKEN)
- read_unlock_bh(&dn_hash_lock);
-}
-
-#define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
-
-static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
-{
- int i;
-
- switch (le16_to_cpu(dn->sdn_objnamel)) {
- case 0:
- sprintf(buf, "%d", dn->sdn_objnum);
- break;
- default:
- for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
- buf[i] = dn->sdn_objname[i];
- if (IS_NOT_PRINTABLE(buf[i]))
- buf[i] = '.';
- }
- buf[i] = 0;
- }
-}
-
-static char *dn_state2asc(unsigned char state)
-{
- switch (state) {
- case DN_O:
- return "OPEN";
- case DN_CR:
- return " CR";
- case DN_DR:
- return " DR";
- case DN_DRC:
- return " DRC";
- case DN_CC:
- return " CC";
- case DN_CI:
- return " CI";
- case DN_NR:
- return " NR";
- case DN_NC:
- return " NC";
- case DN_CD:
- return " CD";
- case DN_RJ:
- return " RJ";
- case DN_RUN:
- return " RUN";
- case DN_DI:
- return " DI";
- case DN_DIC:
- return " DIC";
- case DN_DN:
- return " DN";
- case DN_CL:
- return " CL";
- case DN_CN:
- return " CN";
- }
-
- return "????";
-}
-
-static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
- char buf1[DN_ASCBUF_LEN];
- char buf2[DN_ASCBUF_LEN];
- char local_object[DN_MAXOBJL+3];
- char remote_object[DN_MAXOBJL+3];
-
- dn_printable_object(&scp->addr, local_object);
- dn_printable_object(&scp->peer, remote_object);
-
- seq_printf(seq,
- "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
- "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
- dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1),
- scp->addrloc,
- scp->numdat,
- scp->numoth,
- scp->ackxmt_dat,
- scp->ackxmt_oth,
- scp->flowloc_sw,
- local_object,
- dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
- scp->addrrem,
- scp->numdat_rcv,
- scp->numoth_rcv,
- scp->ackrcv_dat,
- scp->ackrcv_oth,
- scp->flowrem_sw,
- remote_object,
- dn_state2asc(scp->state),
- ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
-}
-
-static int dn_socket_seq_show(struct seq_file *seq, void *v)
-{
- if (v == SEQ_START_TOKEN) {
- seq_puts(seq, "Local Remote\n");
- } else {
- dn_socket_format_entry(seq, v);
- }
- return 0;
-}
-
-static const struct seq_operations dn_socket_seq_ops = {
- .start = dn_socket_seq_start,
- .next = dn_socket_seq_next,
- .stop = dn_socket_seq_stop,
- .show = dn_socket_seq_show,
-};
-#endif
-
-static const struct net_proto_family dn_family_ops = {
- .family = AF_DECnet,
- .create = dn_create,
- .owner = THIS_MODULE,
-};
-
-static const struct proto_ops dn_proto_ops = {
- .family = AF_DECnet,
- .owner = THIS_MODULE,
- .release = dn_release,
- .bind = dn_bind,
- .connect = dn_connect,
- .socketpair = sock_no_socketpair,
- .accept = dn_accept,
- .getname = dn_getname,
- .poll = dn_poll,
- .ioctl = dn_ioctl,
- .listen = dn_listen,
- .shutdown = dn_shutdown,
- .setsockopt = dn_setsockopt,
- .getsockopt = dn_getsockopt,
- .sendmsg = dn_sendmsg,
- .recvmsg = dn_recvmsg,
- .mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
-};
-
-MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
-MODULE_AUTHOR("Linux DECnet Project Team");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETPROTO(PF_DECnet);
-
-static const char banner[] __initconst = KERN_INFO
-"NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
-
-static int __init decnet_init(void)
-{
- int rc;
-
- printk(banner);
-
- rc = proto_register(&dn_proto, 1);
- if (rc != 0)
- goto out;
-
- dn_neigh_init();
- dn_dev_init();
- dn_route_init();
- dn_fib_init();
-
- sock_register(&dn_family_ops);
- dev_add_pack(&dn_dix_packet_type);
- register_netdevice_notifier(&dn_dev_notifier);
-
- proc_create_seq_private("decnet", 0444, init_net.proc_net,
- &dn_socket_seq_ops, sizeof(struct dn_iter_state),
- NULL);
- dn_register_sysctl();
-out:
- return rc;
-
-}
-module_init(decnet_init);
-
-/*
- * Prevent DECnet module unloading until its fixed properly.
- * Requires an audit of the code to check for memory leaks and
- * initialisation problems etc.
- */
-#if 0
-static void __exit decnet_exit(void)
-{
- sock_unregister(AF_DECnet);
- rtnl_unregister_all(PF_DECnet);
- dev_remove_pack(&dn_dix_packet_type);
-
- dn_unregister_sysctl();
-
- unregister_netdevice_notifier(&dn_dev_notifier);
-
- dn_route_cleanup();
- dn_dev_cleanup();
- dn_neigh_cleanup();
- dn_fib_cleanup();
-
- remove_proc_entry("decnet", init_net.proc_net);
-
- proto_unregister(&dn_proto);
-
- rcu_barrier(); /* Wait for completion of call_rcu()'s */
-}
-module_exit(decnet_exit);
-#endif
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
deleted file mode 100644
index cca7ae712995..000000000000
--- a/net/decnet/dn_dev.c
+++ /dev/null
@@ -1,1438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Device Layer
- *
- * Authors: Steve Whitehouse <SteveW@ACM.org>
- * Eduardo Marcelo Serrat <emserrat@geocities.com>
- *
- * Changes:
- * Steve Whitehouse : Devices now see incoming frames so they
- * can mark on who it came from.
- * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour
- * can now have a device specific setup func.
- * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/
- * Steve Whitehouse : Fixed bug which sometimes killed timer
- * Steve Whitehouse : Multiple ifaddr support
- * Steve Whitehouse : SIOCGIFCONF is now a compile time option
- * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding
- * Steve Whitehouse : Removed timer1 - it's a user space issue now
- * Patrick Caulfield : Fixed router hello message format
- * Steve Whitehouse : Got rid of constant sizes for blksize for
- * devices. All mtu based now.
- */
-
-#include <linux/capability.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/net.h>
-#include <linux/netdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/if_addr.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/sysctl.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-#include <net/net_namespace.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/flow.h>
-#include <net/fib_rules.h>
-#include <net/netlink.h>
-#include <net/dn.h>
-#include <net/dn_dev.h>
-#include <net/dn_route.h>
-#include <net/dn_neigh.h>
-#include <net/dn_fib.h>
-
-#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn))
-
-static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
-static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
-static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00};
-static unsigned char dn_eco_version[3] = {0x02,0x00,0x00};
-
-extern struct neigh_table dn_neigh_table;
-
-/*
- * decnet_address is kept in network order.
- */
-__le16 decnet_address = 0;
-
-static DEFINE_SPINLOCK(dndev_lock);
-static struct net_device *decnet_default_device;
-static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
-
-static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
-static void dn_dev_delete(struct net_device *dev);
-static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa);
-
-static int dn_eth_up(struct net_device *);
-static void dn_eth_down(struct net_device *);
-static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa);
-static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa);
-
-static struct dn_dev_parms dn_dev_list[] = {
-{
- .type = ARPHRD_ETHER, /* Ethernet */
- .mode = DN_DEV_BCAST,
- .state = DN_DEV_S_RU,
- .t2 = 1,
- .t3 = 10,
- .name = "ethernet",
- .up = dn_eth_up,
- .down = dn_eth_down,
- .timer3 = dn_send_brd_hello,
-},
-{
- .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */
- .mode = DN_DEV_BCAST,
- .state = DN_DEV_S_RU,
- .t2 = 1,
- .t3 = 10,
- .name = "ipgre",
- .timer3 = dn_send_brd_hello,
-},
-#if 0
-{
- .type = ARPHRD_X25, /* Bog standard X.25 */
- .mode = DN_DEV_UCAST,
- .state = DN_DEV_S_DS,
- .t2 = 1,
- .t3 = 120,
- .name = "x25",
- .timer3 = dn_send_ptp_hello,
-},
-#endif
-#if 0
-{
- .type = ARPHRD_PPP, /* DECnet over PPP */
- .mode = DN_DEV_BCAST,
- .state = DN_DEV_S_RU,
- .t2 = 1,
- .t3 = 10,
- .name = "ppp",
- .timer3 = dn_send_brd_hello,
-},
-#endif
-{
- .type = ARPHRD_DDCMP, /* DECnet over DDCMP */
- .mode = DN_DEV_UCAST,
- .state = DN_DEV_S_DS,
- .t2 = 1,
- .t3 = 120,
- .name = "ddcmp",
- .timer3 = dn_send_ptp_hello,
-},
-{
- .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */
- .mode = DN_DEV_BCAST,
- .state = DN_DEV_S_RU,
- .t2 = 1,
- .t3 = 10,
- .name = "loopback",
- .timer3 = dn_send_brd_hello,
-}
-};
-
-#define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list)
-
-#define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x)
-
-#ifdef CONFIG_SYSCTL
-
-static int min_t2[] = { 1 };
-static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */
-static int min_t3[] = { 1 };
-static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */
-
-static int min_priority[1];
-static int max_priority[] = { 127 }; /* From DECnet spec */
-
-static int dn_forwarding_proc(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-static struct dn_dev_sysctl_table {
- struct ctl_table_header *sysctl_header;
- struct ctl_table dn_dev_vars[5];
-} dn_dev_sysctl = {
- NULL,
- {
- {
- .procname = "forwarding",
- .data = (void *)DN_DEV_PARMS_OFFSET(forwarding),
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = dn_forwarding_proc,
- },
- {
- .procname = "priority",
- .data = (void *)DN_DEV_PARMS_OFFSET(priority),
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_priority,
- .extra2 = &max_priority
- },
- {
- .procname = "t2",
- .data = (void *)DN_DEV_PARMS_OFFSET(t2),
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_t2,
- .extra2 = &max_t2
- },
- {
- .procname = "t3",
- .data = (void *)DN_DEV_PARMS_OFFSET(t3),
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_t3,
- .extra2 = &max_t3
- },
- { }
- },
-};
-
-static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
-{
- struct dn_dev_sysctl_table *t;
- int i;
-
- char path[sizeof("net/decnet/conf/") + IFNAMSIZ];
-
- t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
- if (t == NULL)
- return;
-
- for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) {
- long offset = (long)t->dn_dev_vars[i].data;
- t->dn_dev_vars[i].data = ((char *)parms) + offset;
- }
-
- snprintf(path, sizeof(path), "net/decnet/conf/%s",
- dev? dev->name : parms->name);
-
- t->dn_dev_vars[0].extra1 = (void *)dev;
-
- t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars);
- if (t->sysctl_header == NULL)
- kfree(t);
- else
- parms->sysctl = t;
-}
-
-static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
-{
- if (parms->sysctl) {
- struct dn_dev_sysctl_table *t = parms->sysctl;
- parms->sysctl = NULL;
- unregister_net_sysctl_table(t->sysctl_header);
- kfree(t);
- }
-}
-
-static int dn_forwarding_proc(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
-#ifdef CONFIG_DECNET_ROUTER
- struct net_device *dev = table->extra1;
- struct dn_dev *dn_db;
- int err;
- int tmp, old;
-
- if (table->extra1 == NULL)
- return -EINVAL;
-
- dn_db = rcu_dereference_raw(dev->dn_ptr);
- old = dn_db->parms.forwarding;
-
- err = proc_dointvec(table, write, buffer, lenp, ppos);
-
- if ((err >= 0) && write) {
- if (dn_db->parms.forwarding < 0)
- dn_db->parms.forwarding = 0;
- if (dn_db->parms.forwarding > 2)
- dn_db->parms.forwarding = 2;
- /*
- * What an ugly hack this is... its works, just. It
- * would be nice if sysctl/proc were just that little
- * bit more flexible so I don't have to write a special
- * routine, or suffer hacks like this - SJW
- */
- tmp = dn_db->parms.forwarding;
- dn_db->parms.forwarding = old;
- if (dn_db->parms.down)
- dn_db->parms.down(dev);
- dn_db->parms.forwarding = tmp;
- if (dn_db->parms.up)
- dn_db->parms.up(dev);
- }
-
- return err;
-#else
- return -EINVAL;
-#endif
-}
-
-#else /* CONFIG_SYSCTL */
-static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
-{
-}
-static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
-{
-}
-
-#endif /* CONFIG_SYSCTL */
-
-static inline __u16 mtu2blksize(struct net_device *dev)
-{
- u32 blksize = dev->mtu;
- if (blksize > 0xffff)
- blksize = 0xffff;
-
- if (dev->type == ARPHRD_ETHER ||
- dev->type == ARPHRD_PPP ||
- dev->type == ARPHRD_IPGRE ||
- dev->type == ARPHRD_LOOPBACK)
- blksize -= 2;
-
- return (__u16)blksize;
-}
-
-static struct dn_ifaddr *dn_dev_alloc_ifa(void)
-{
- struct dn_ifaddr *ifa;
-
- ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
-
- return ifa;
-}
-
-static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
-{
- kfree_rcu(ifa, rcu);
-}
-
-static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
-{
- struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap);
- unsigned char mac_addr[6];
- struct net_device *dev = dn_db->dev;
-
- ASSERT_RTNL();
-
- *ifap = ifa1->ifa_next;
-
- if (dn_db->dev->type == ARPHRD_ETHER) {
- if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
- dn_dn2eth(mac_addr, ifa1->ifa_local);
- dev_mc_del(dev, mac_addr);
- }
- }
-
- dn_ifaddr_notify(RTM_DELADDR, ifa1);
- blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
- if (destroy) {
- dn_dev_free_ifa(ifa1);
-
- if (dn_db->ifa_list == NULL)
- dn_dev_delete(dn_db->dev);
- }
-}
-
-static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
-{
- struct net_device *dev = dn_db->dev;
- struct dn_ifaddr *ifa1;
- unsigned char mac_addr[6];
-
- ASSERT_RTNL();
-
- /* Check for duplicates */
- for (ifa1 = rtnl_dereference(dn_db->ifa_list);
- ifa1 != NULL;
- ifa1 = rtnl_dereference(ifa1->ifa_next)) {
- if (ifa1->ifa_local == ifa->ifa_local)
- return -EEXIST;
- }
-
- if (dev->type == ARPHRD_ETHER) {
- if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
- dn_dn2eth(mac_addr, ifa->ifa_local);
- dev_mc_add(dev, mac_addr);
- }
- }
-
- ifa->ifa_next = dn_db->ifa_list;
- rcu_assign_pointer(dn_db->ifa_list, ifa);
-
- dn_ifaddr_notify(RTM_NEWADDR, ifa);
- blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
-
- return 0;
-}
-
-static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
-{
- struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
- int rv;
-
- if (dn_db == NULL) {
- int err;
- dn_db = dn_dev_create(dev, &err);
- if (dn_db == NULL)
- return err;
- }
-
- ifa->ifa_dev = dn_db;
-
- if (dev->flags & IFF_LOOPBACK)
- ifa->ifa_scope = RT_SCOPE_HOST;
-
- rv = dn_dev_insert_ifa(dn_db, ifa);
- if (rv)
- dn_dev_free_ifa(ifa);
- return rv;
-}
-
-
-int dn_dev_ioctl(unsigned int cmd, void __user *arg)
-{
- char buffer[DN_IFREQ_SIZE];
- struct ifreq *ifr = (struct ifreq *)buffer;
- struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
- struct dn_dev *dn_db;
- struct net_device *dev;
- struct dn_ifaddr *ifa = NULL;
- struct dn_ifaddr __rcu **ifap = NULL;
- int ret = 0;
-
- if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
- return -EFAULT;
- ifr->ifr_name[IFNAMSIZ-1] = 0;
-
- dev_load(&init_net, ifr->ifr_name);
-
- switch (cmd) {
- case SIOCGIFADDR:
- break;
- case SIOCSIFADDR:
- if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- if (sdn->sdn_family != AF_DECnet)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- rtnl_lock();
-
- if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) {
- ret = -ENODEV;
- goto done;
- }
-
- if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
- for (ifap = &dn_db->ifa_list;
- (ifa = rtnl_dereference(*ifap)) != NULL;
- ifap = &ifa->ifa_next)
- if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
- break;
- }
-
- if (ifa == NULL && cmd != SIOCSIFADDR) {
- ret = -EADDRNOTAVAIL;
- goto done;
- }
-
- switch (cmd) {
- case SIOCGIFADDR:
- *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
- goto rarok;
-
- case SIOCSIFADDR:
- if (!ifa) {
- if ((ifa = dn_dev_alloc_ifa()) == NULL) {
- ret = -ENOBUFS;
- break;
- }
- memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
- } else {
- if (ifa->ifa_local == dn_saddr2dn(sdn))
- break;
- dn_dev_del_ifa(dn_db, ifap, 0);
- }
-
- ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
-
- ret = dn_dev_set_ifa(dev, ifa);
- }
-done:
- rtnl_unlock();
-
- return ret;
-rarok:
- if (copy_to_user(arg, ifr, DN_IFREQ_SIZE))
- ret = -EFAULT;
- goto done;
-}
-
-struct net_device *dn_dev_get_default(void)
-{
- struct net_device *dev;
-
- spin_lock(&dndev_lock);
- dev = decnet_default_device;
- if (dev) {
- if (dev->dn_ptr)
- dev_hold(dev);
- else
- dev = NULL;
- }
- spin_unlock(&dndev_lock);
-
- return dev;
-}
-
-int dn_dev_set_default(struct net_device *dev, int force)
-{
- struct net_device *old = NULL;
- int rv = -EBUSY;
- if (!dev->dn_ptr)
- return -ENODEV;
-
- spin_lock(&dndev_lock);
- if (force || decnet_default_device == NULL) {
- old = decnet_default_device;
- decnet_default_device = dev;
- rv = 0;
- }
- spin_unlock(&dndev_lock);
-
- if (old)
- dev_put(old);
- return rv;
-}
-
-static void dn_dev_check_default(struct net_device *dev)
-{
- spin_lock(&dndev_lock);
- if (dev == decnet_default_device) {
- decnet_default_device = NULL;
- } else {
- dev = NULL;
- }
- spin_unlock(&dndev_lock);
-
- if (dev)
- dev_put(dev);
-}
-
-/*
- * Called with RTNL
- */
-static struct dn_dev *dn_dev_by_index(int ifindex)
-{
- struct net_device *dev;
- struct dn_dev *dn_dev = NULL;
-
- dev = __dev_get_by_index(&init_net, ifindex);
- if (dev)
- dn_dev = rtnl_dereference(dev->dn_ptr);
-
- return dn_dev;
-}
-
-static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = {
- [IFA_ADDRESS] = { .type = NLA_U16 },
- [IFA_LOCAL] = { .type = NLA_U16 },
- [IFA_LABEL] = { .type = NLA_STRING,
- .len = IFNAMSIZ - 1 },
- [IFA_FLAGS] = { .type = NLA_U32 },
-};
-
-static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
-{
- struct net *net = sock_net(skb->sk);
- struct nlattr *tb[IFA_MAX+1];
- struct dn_dev *dn_db;
- struct ifaddrmsg *ifm;
- struct dn_ifaddr *ifa;
- struct dn_ifaddr __rcu **ifap;
- int err = -EINVAL;
-
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
- if (!net_eq(net, &init_net))
- goto errout;
-
- err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
- dn_ifa_policy, extack);
- if (err < 0)
- goto errout;
-
- err = -ENODEV;
- ifm = nlmsg_data(nlh);
- if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
- goto errout;
-
- err = -EADDRNOTAVAIL;
- for (ifap = &dn_db->ifa_list;
- (ifa = rtnl_dereference(*ifap)) != NULL;
- ifap = &ifa->ifa_next) {
- if (tb[IFA_LOCAL] &&
- nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
- continue;
-
- if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
- continue;
-
- dn_dev_del_ifa(dn_db, ifap, 1);
- return 0;
- }
-
-errout:
- return err;
-}
-
-static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
-{
- struct net *net = sock_net(skb->sk);
- struct nlattr *tb[IFA_MAX+1];
- struct net_device *dev;
- struct dn_dev *dn_db;
- struct ifaddrmsg *ifm;
- struct dn_ifaddr *ifa;
- int err;
-
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
- err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
- dn_ifa_policy, extack);
- if (err < 0)
- return err;
-
- if (tb[IFA_LOCAL] == NULL)
- return -EINVAL;
-
- ifm = nlmsg_data(nlh);
- if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
- return -ENODEV;
-
- if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) {
- dn_db = dn_dev_create(dev, &err);
- if (!dn_db)
- return err;
- }
-
- if ((ifa = dn_dev_alloc_ifa()) == NULL)
- return -ENOBUFS;
-
- if (tb[IFA_ADDRESS] == NULL)
- tb[IFA_ADDRESS] = tb[IFA_LOCAL];
-
- ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]);
- ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]);
- ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
- ifm->ifa_flags;
- ifa->ifa_scope = ifm->ifa_scope;
- ifa->ifa_dev = dn_db;
-
- if (tb[IFA_LABEL])
- nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
- else
- memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
-
- err = dn_dev_insert_ifa(dn_db, ifa);
- if (err)
- dn_dev_free_ifa(ifa);
-
- return err;
-}
-
-static inline size_t dn_ifaddr_nlmsg_size(void)
-{
- return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
- + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
- + nla_total_size(2) /* IFA_ADDRESS */
- + nla_total_size(2) /* IFA_LOCAL */
- + nla_total_size(4); /* IFA_FLAGS */
-}
-
-static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
- u32 portid, u32 seq, int event, unsigned int flags)
-{
- struct ifaddrmsg *ifm;
- struct nlmsghdr *nlh;
- u32 ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
-
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
- if (nlh == NULL)
- return -EMSGSIZE;
-
- ifm = nlmsg_data(nlh);
- ifm->ifa_family = AF_DECnet;
- ifm->ifa_prefixlen = 16;
- ifm->ifa_flags = ifa_flags;
- ifm->ifa_scope = ifa->ifa_scope;
- ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
-
- if ((ifa->ifa_address &&
- nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) ||
- (ifa->ifa_local &&
- nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
- (ifa->ifa_label[0] &&
- nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
- nla_put_u32(skb, IFA_FLAGS, ifa_flags))
- goto nla_put_failure;
- nlmsg_end(skb, nlh);
- return 0;
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
-}
-
-static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
-{
- struct sk_buff *skb;
- int err = -ENOBUFS;
-
- skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL);
- if (skb == NULL)
- goto errout;
-
- err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
- if (err < 0) {
- /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */
- WARN_ON(err == -EMSGSIZE);
- kfree_skb(skb);
- goto errout;
- }
- rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
- return;
-errout:
- if (err < 0)
- rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err);
-}
-
-static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
-{
- struct net *net = sock_net(skb->sk);
- int idx, dn_idx = 0, skip_ndevs, skip_naddr;
- struct net_device *dev;
- struct dn_dev *dn_db;
- struct dn_ifaddr *ifa;
-
- if (!net_eq(net, &init_net))
- return 0;
-
- skip_ndevs = cb->args[0];
- skip_naddr = cb->args[1];
-
- idx = 0;
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, dev) {
- if (idx < skip_ndevs)
- goto cont;
- else if (idx > skip_ndevs) {
- /* Only skip over addresses for first dev dumped
- * in this iteration (idx == skip_ndevs) */
- skip_naddr = 0;
- }
-
- if ((dn_db = rcu_dereference(dev->dn_ptr)) == NULL)
- goto cont;
-
- for (ifa = rcu_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
- ifa = rcu_dereference(ifa->ifa_next), dn_idx++) {
- if (dn_idx < skip_naddr)
- continue;
-
- if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, RTM_NEWADDR,
- NLM_F_MULTI) < 0)
- goto done;
- }
-cont:
- idx++;
- }
-done:
- rcu_read_unlock();
- cb->args[0] = idx;
- cb->args[1] = dn_idx;
-
- return skb->len;
-}
-
-static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
-{
- struct dn_dev *dn_db;
- struct dn_ifaddr *ifa;
- int rv = -ENODEV;
-
- rcu_read_lock();
- dn_db = rcu_dereference(dev->dn_ptr);
- if (dn_db == NULL)
- goto out;
-
- ifa = rcu_dereference(dn_db->ifa_list);
- if (ifa != NULL) {
- *addr = ifa->ifa_local;
- rv = 0;
- }
-out:
- rcu_read_unlock();
- return rv;
-}
-
-/*
- * Find a default address to bind to.
- *
- * This is one of those areas where the initial VMS concepts don't really
- * map onto the Linux concepts, and since we introduced multiple addresses
- * per interface we have to cope with slightly odd ways of finding out what
- * "our address" really is. Mostly it's not a problem; for this we just guess
- * a sensible default. Eventually the routing code will take care of all the
- * nasties for us I hope.
- */
-int dn_dev_bind_default(__le16 *addr)
-{
- struct net_device *dev;
- int rv;
- dev = dn_dev_get_default();
-last_chance:
- if (dev) {
- rv = dn_dev_get_first(dev, addr);
- dev_put(dev);
- if (rv == 0 || dev == init_net.loopback_dev)
- return rv;
- }
- dev = init_net.loopback_dev;
- dev_hold(dev);
- goto last_chance;
-}
-
-static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
-{
- struct endnode_hello_message *msg;
- struct sk_buff *skb = NULL;
- __le16 *pktlen;
- struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
-
- if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
- return;
-
- skb->dev = dev;
-
- msg = skb_put(skb, sizeof(*msg));
-
- msg->msgflg = 0x0D;
- memcpy(msg->tiver, dn_eco_version, 3);
- dn_dn2eth(msg->id, ifa->ifa_local);
- msg->iinfo = DN_RT_INFO_ENDN;
- msg->blksize = cpu_to_le16(mtu2blksize(dev));
- msg->area = 0x00;
- memset(msg->seed, 0, 8);
- memcpy(msg->neighbor, dn_hiord, ETH_ALEN);
-
- if (dn_db->router) {
- struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
- dn_dn2eth(msg->neighbor, dn->addr);
- }
-
- msg->timer = cpu_to_le16((unsigned short)dn_db->parms.t3);
- msg->mpd = 0x00;
- msg->datalen = 0x02;
- memset(msg->data, 0xAA, 2);
-
- pktlen = skb_push(skb, 2);
- *pktlen = cpu_to_le16(skb->len - 2);
-
- skb_reset_network_header(skb);
-
- dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id);
-}
-
-
-#define DRDELAY (5 * HZ)
-
-static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa)
-{
- /* First check time since device went up */
- if (time_before(jiffies, dn_db->uptime + DRDELAY))
- return 0;
-
- /* If there is no router, then yes... */
- if (!dn_db->router)
- return 1;
-
- /* otherwise only if we have a higher priority or.. */
- if (dn->priority < dn_db->parms.priority)
- return 1;
-
- /* if we have equal priority and a higher node number */
- if (dn->priority != dn_db->parms.priority)
- return 0;
-
- if (le16_to_cpu(dn->addr) < le16_to_cpu(ifa->ifa_local))
- return 1;
-
- return 0;
-}
-
-static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
-{
- int n;
- struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
- struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
- struct sk_buff *skb;
- size_t size;
- unsigned char *ptr;
- unsigned char *i1, *i2;
- __le16 *pktlen;
- char *src;
-
- if (mtu2blksize(dev) < (26 + 7))
- return;
-
- n = mtu2blksize(dev) - 26;
- n /= 7;
-
- if (n > 32)
- n = 32;
-
- size = 2 + 26 + 7 * n;
-
- if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL)
- return;
-
- skb->dev = dev;
- ptr = skb_put(skb, size);
-
- *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH;
- *ptr++ = 2; /* ECO */
- *ptr++ = 0;
- *ptr++ = 0;
- dn_dn2eth(ptr, ifa->ifa_local);
- src = ptr;
- ptr += ETH_ALEN;
- *ptr++ = dn_db->parms.forwarding == 1 ?
- DN_RT_INFO_L1RT : DN_RT_INFO_L2RT;
- *((__le16 *)ptr) = cpu_to_le16(mtu2blksize(dev));
- ptr += 2;
- *ptr++ = dn_db->parms.priority; /* Priority */
- *ptr++ = 0; /* Area: Reserved */
- *((__le16 *)ptr) = cpu_to_le16((unsigned short)dn_db->parms.t3);
- ptr += 2;
- *ptr++ = 0; /* MPD: Reserved */
- i1 = ptr++;
- memset(ptr, 0, 7); /* Name: Reserved */
- ptr += 7;
- i2 = ptr++;
-
- n = dn_neigh_elist(dev, ptr, n);
-
- *i2 = 7 * n;
- *i1 = 8 + *i2;
-
- skb_trim(skb, (27 + *i2));
-
- pktlen = skb_push(skb, 2);
- *pktlen = cpu_to_le16(skb->len - 2);
-
- skb_reset_network_header(skb);
-
- if (dn_am_i_a_router(dn, dn_db, ifa)) {
- struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
- if (skb2) {
- dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src);
- }
- }
-
- dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
-}
-
-static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
-{
- struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
-
- if (dn_db->parms.forwarding == 0)
- dn_send_endnode_hello(dev, ifa);
- else
- dn_send_router_hello(dev, ifa);
-}
-
-static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
-{
- int tdlen = 16;
- int size = dev->hard_header_len + 2 + 4 + tdlen;
- struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC);
- int i;
- unsigned char *ptr;
- char src[ETH_ALEN];
-
- if (skb == NULL)
- return ;
-
- skb->dev = dev;
- skb_push(skb, dev->hard_header_len);
- ptr = skb_put(skb, 2 + 4 + tdlen);
-
- *ptr++ = DN_RT_PKT_HELO;
- *((__le16 *)ptr) = ifa->ifa_local;
- ptr += 2;
- *ptr++ = tdlen;
-
- for(i = 0; i < tdlen; i++)
- *ptr++ = 0252;
-
- dn_dn2eth(src, ifa->ifa_local);
- dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
-}
-
-static int dn_eth_up(struct net_device *dev)
-{
- struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
-
- if (dn_db->parms.forwarding == 0)
- dev_mc_add(dev, dn_rt_all_end_mcast);
- else
- dev_mc_add(dev, dn_rt_all_rt_mcast);
-
- dn_db->use_long = 1;
-
- return 0;
-}
-
-static void dn_eth_down(struct net_device *dev)
-{
- struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
-
- if (dn_db->parms.forwarding == 0)
- dev_mc_del(dev, dn_rt_all_end_mcast);
- else
- dev_mc_del(dev, dn_rt_all_rt_mcast);
-}
-
-static void dn_dev_set_timer(struct net_device *dev);
-
-static void dn_dev_timer_func(struct timer_list *t)
-{
- struct dn_dev *dn_db = from_timer(dn_db, t, timer);
- struct net_device *dev;
- struct dn_ifaddr *ifa;
-
- rcu_read_lock();
- dev = dn_db->dev;
- if (dn_db->t3 <= dn_db->parms.t2) {
- if (dn_db->parms.timer3) {
- for (ifa = rcu_dereference(dn_db->ifa_list);
- ifa;
- ifa = rcu_dereference(ifa->ifa_next)) {
- if (!(ifa->ifa_flags & IFA_F_SECONDARY))
- dn_db->parms.timer3(dev, ifa);
- }
- }
- dn_db->t3 = dn_db->parms.t3;
- } else {
- dn_db->t3 -= dn_db->parms.t2;
- }
- rcu_read_unlock();
- dn_dev_set_timer(dev);
-}
-
-static void dn_dev_set_timer(struct net_device *dev)
-{
- struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
-
- if (dn_db->parms.t2 > dn_db->parms.t3)
- dn_db->parms.t2 = dn_db->parms.t3;
-
- dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ);
-
- add_timer(&dn_db->timer);
-}
-
-static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
-{
- int i;
- struct dn_dev_parms *p = dn_dev_list;
- struct dn_dev *dn_db;
-
- for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) {
- if (p->type == dev->type)
- break;
- }
-
- *err = -ENODEV;
- if (i == DN_DEV_LIST_SIZE)
- return NULL;
-
- *err = -ENOBUFS;
- if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
- return NULL;
-
- memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
-
- rcu_assign_pointer(dev->dn_ptr, dn_db);
- dn_db->dev = dev;
- timer_setup(&dn_db->timer, dn_dev_timer_func, 0);
-
- dn_db->uptime = jiffies;
-
- dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
- if (!dn_db->neigh_parms) {
- RCU_INIT_POINTER(dev->dn_ptr, NULL);
- kfree(dn_db);
- return NULL;
- }
-
- if (dn_db->parms.up) {
- if (dn_db->parms.up(dev) < 0) {
- neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
- dev->dn_ptr = NULL;
- kfree(dn_db);
- return NULL;
- }
- }
-
- dn_dev_sysctl_register(dev, &dn_db->parms);
-
- dn_dev_set_timer(dev);
-
- *err = 0;
- return dn_db;
-}
-
-
-/*
- * This processes a device up event. We only start up
- * the loopback device & ethernet devices with correct
- * MAC addresses automatically. Others must be started
- * specifically.
- *
- * FIXME: How should we configure the loopback address ? If we could dispense
- * with using decnet_address here and for autobind, it will be one less thing
- * for users to worry about setting up.
- */
-
-void dn_dev_up(struct net_device *dev)
-{
- struct dn_ifaddr *ifa;
- __le16 addr = decnet_address;
- int maybe_default = 0;
- struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
-
- if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
- return;
-
- /*
- * Need to ensure that loopback device has a dn_db attached to it
- * to allow creation of neighbours against it, even though it might
- * not have a local address of its own. Might as well do the same for
- * all autoconfigured interfaces.
- */
- if (dn_db == NULL) {
- int err;
- dn_db = dn_dev_create(dev, &err);
- if (dn_db == NULL)
- return;
- }
-
- if (dev->type == ARPHRD_ETHER) {
- if (memcmp(dev->dev_addr, dn_hiord, 4) != 0)
- return;
- addr = dn_eth2dn(dev->dev_addr);
- maybe_default = 1;
- }
-
- if (addr == 0)
- return;
-
- if ((ifa = dn_dev_alloc_ifa()) == NULL)
- return;
-
- ifa->ifa_local = ifa->ifa_address = addr;
- ifa->ifa_flags = 0;
- ifa->ifa_scope = RT_SCOPE_UNIVERSE;
- strcpy(ifa->ifa_label, dev->name);
-
- dn_dev_set_ifa(dev, ifa);
-
- /*
- * Automagically set the default device to the first automatically
- * configured ethernet card in the system.
- */
- if (maybe_default) {
- dev_hold(dev);
- if (dn_dev_set_default(dev, 0))
- dev_put(dev);
- }
-}
-
-static void dn_dev_delete(struct net_device *dev)
-{
- struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
-
- if (dn_db == NULL)
- return;
-
- del_timer_sync(&dn_db->timer);
- dn_dev_sysctl_unregister(&dn_db->parms);
- dn_dev_check_default(dev);
- neigh_ifdown(&dn_neigh_table, dev);
-
- if (dn_db->parms.down)
- dn_db->parms.down(dev);
-
- dev->dn_ptr = NULL;
-
- neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
- neigh_ifdown(&dn_neigh_table, dev);
-
- if (dn_db->router)
- neigh_release(dn_db->router);
- if (dn_db->peer)
- neigh_release(dn_db->peer);
-
- kfree(dn_db);
-}
-
-void dn_dev_down(struct net_device *dev)
-{
- struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
- struct dn_ifaddr *ifa;
-
- if (dn_db == NULL)
- return;
-
- while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) {
- dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
- dn_dev_free_ifa(ifa);
- }
-
- dn_dev_delete(dev);
-}
-
-void dn_dev_init_pkt(struct sk_buff *skb)
-{
-}
-
-void dn_dev_veri_pkt(struct sk_buff *skb)
-{
-}
-
-void dn_dev_hello(struct sk_buff *skb)
-{
-}
-
-void dn_dev_devices_off(void)
-{
- struct net_device *dev;
-
- rtnl_lock();
- for_each_netdev(&init_net, dev)
- dn_dev_down(dev);
- rtnl_unlock();
-
-}
-
-void dn_dev_devices_on(void)
-{
- struct net_device *dev;
-
- rtnl_lock();
- for_each_netdev(&init_net, dev) {
- if (dev->flags & IFF_UP)
- dn_dev_up(dev);
- }
- rtnl_unlock();
-}
-
-int register_dnaddr_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&dnaddr_chain, nb);
-}
-
-int unregister_dnaddr_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&dnaddr_chain, nb);
-}
-
-#ifdef CONFIG_PROC_FS
-static inline int is_dn_dev(struct net_device *dev)
-{
- return dev->dn_ptr != NULL;
-}
-
-static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
-{
- int i;
- struct net_device *dev;
-
- rcu_read_lock();
-
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- i = 1;
- for_each_netdev_rcu(&init_net, dev) {
- if (!is_dn_dev(dev))
- continue;
-
- if (i++ == *pos)
- return dev;
- }
-
- return NULL;
-}
-
-static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- struct net_device *dev;
-
- ++*pos;
-
- dev = v;
- if (v == SEQ_START_TOKEN)
- dev = net_device_entry(&init_net.dev_base_head);
-
- for_each_netdev_continue_rcu(&init_net, dev) {
- if (!is_dn_dev(dev))
- continue;
-
- return dev;
- }
-
- return NULL;
-}
-
-static void dn_dev_seq_stop(struct seq_file *seq, void *v)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static char *dn_type2asc(char type)
-{
- switch (type) {
- case DN_DEV_BCAST:
- return "B";
- case DN_DEV_UCAST:
- return "U";
- case DN_DEV_MPOINT:
- return "M";
- }
-
- return "?";
-}
-
-static int dn_dev_seq_show(struct seq_file *seq, void *v)
-{
- if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n");
- else {
- struct net_device *dev = v;
- char peer_buf[DN_ASCBUF_LEN];
- char router_buf[DN_ASCBUF_LEN];
- struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr);
-
- seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
- " %04hu %03d %02x %-10s %-7s %-7s\n",
- dev->name,
- dn_type2asc(dn_db->parms.mode),
- 0, 0,
- dn_db->t3, dn_db->parms.t3,
- mtu2blksize(dev),
- dn_db->parms.priority,
- dn_db->parms.state, dn_db->parms.name,
- dn_db->router ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->router->primary_key), router_buf) : "",
- dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : "");
- }
- return 0;
-}
-
-static const struct seq_operations dn_dev_seq_ops = {
- .start = dn_dev_seq_start,
- .next = dn_dev_seq_next,
- .stop = dn_dev_seq_stop,
- .show = dn_dev_seq_show,
-};
-#endif /* CONFIG_PROC_FS */
-
-static int addr[2];
-module_param_array(addr, int, NULL, 0444);
-MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
-
-void __init dn_dev_init(void)
-{
- if (addr[0] > 63 || addr[0] < 0) {
- printk(KERN_ERR "DECnet: Area must be between 0 and 63");
- return;
- }
-
- if (addr[1] > 1023 || addr[1] < 0) {
- printk(KERN_ERR "DECnet: Node must be between 0 and 1023");
- return;
- }
-
- decnet_address = cpu_to_le16((addr[0] << 10) | addr[1]);
-
- dn_dev_devices_on();
-
- rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWADDR,
- dn_nl_newaddr, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELADDR,
- dn_nl_deladdr, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETADDR,
- NULL, dn_nl_dump_ifaddr, 0);
-
- proc_create_seq("decnet_dev", 0444, init_net.proc_net, &dn_dev_seq_ops);
-
-#ifdef CONFIG_SYSCTL
- {
- int i;
- for(i = 0; i < DN_DEV_LIST_SIZE; i++)
- dn_dev_sysctl_register(NULL, &dn_dev_list[i]);
- }
-#endif /* CONFIG_SYSCTL */
-}
-
-void __exit dn_dev_cleanup(void)
-{
-#ifdef CONFIG_SYSCTL
- {
- int i;
- for(i = 0; i < DN_DEV_LIST_SIZE; i++)
- dn_dev_sysctl_unregister(&dn_dev_list[i]);
- }
-#endif /* CONFIG_SYSCTL */
-
- remove_proc_entry("decnet_dev", init_net.proc_net);
-
- dn_dev_devices_off();
-}
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
deleted file mode 100644
index 77fbf8e9df4b..000000000000
--- a/net/decnet/dn_fib.c
+++ /dev/null
@@ -1,799 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Routing Forwarding Information Base (Glue/Info List)
- *
- * Author: Steve Whitehouse <SteveW@ACM.org>
- *
- *
- * Changes:
- * Alexey Kuznetsov : SMP locking changes
- * Steve Whitehouse : Rewrote it... Well to be more correct, I
- * copied most of it from the ipv4 fib code.
- * Steve Whitehouse : Updated it in style and fixed a few bugs
- * which were fixed in the ipv4 code since
- * this code was copied from it.
- *
- */
-#include <linux/string.h>
-#include <linux/net.h>
-#include <linux/socket.h>
-#include <linux/slab.h>
-#include <linux/sockios.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/rtnetlink.h>
-#include <linux/proc_fs.h>
-#include <linux/netdevice.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/flow.h>
-#include <net/fib_rules.h>
-#include <net/dn.h>
-#include <net/dn_route.h>
-#include <net/dn_fib.h>
-#include <net/dn_neigh.h>
-#include <net/dn_dev.h>
-#include <net/rtnh.h>
-
-#define RT_MIN_TABLE 1
-
-#define for_fib_info() { struct dn_fib_info *fi;\
- for(fi = dn_fib_info_list; fi; fi = fi->fib_next)
-#define endfor_fib_info() }
-
-#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
- for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
-
-#define change_nexthops(fi) { int nhsel; struct dn_fib_nh *nh;\
- for(nhsel = 0, nh = (struct dn_fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)
-
-#define endfor_nexthops(fi) }
-
-static DEFINE_SPINLOCK(dn_fib_multipath_lock);
-static struct dn_fib_info *dn_fib_info_list;
-static DEFINE_SPINLOCK(dn_fib_info_lock);
-
-static struct
-{
- int error;
- u8 scope;
-} dn_fib_props[RTN_MAX+1] = {
- [RTN_UNSPEC] = { .error = 0, .scope = RT_SCOPE_NOWHERE },
- [RTN_UNICAST] = { .error = 0, .scope = RT_SCOPE_UNIVERSE },
- [RTN_LOCAL] = { .error = 0, .scope = RT_SCOPE_HOST },
- [RTN_BROADCAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE },
- [RTN_ANYCAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE },
- [RTN_MULTICAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE },
- [RTN_BLACKHOLE] = { .error = -EINVAL, .scope = RT_SCOPE_UNIVERSE },
- [RTN_UNREACHABLE] = { .error = -EHOSTUNREACH, .scope = RT_SCOPE_UNIVERSE },
- [RTN_PROHIBIT] = { .error = -EACCES, .scope = RT_SCOPE_UNIVERSE },
- [RTN_THROW] = { .error = -EAGAIN, .scope = RT_SCOPE_UNIVERSE },
- [RTN_NAT] = { .error = 0, .scope = RT_SCOPE_NOWHERE },
- [RTN_XRESOLVE] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE },
-};
-
-static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force);
-static int dn_fib_sync_up(struct net_device *dev);
-
-void dn_fib_free_info(struct dn_fib_info *fi)
-{
- if (fi->fib_dead == 0) {
- printk(KERN_DEBUG "DECnet: BUG! Attempt to free alive dn_fib_info\n");
- return;
- }
-
- change_nexthops(fi) {
- if (nh->nh_dev)
- dev_put(nh->nh_dev);
- nh->nh_dev = NULL;
- } endfor_nexthops(fi);
- kfree(fi);
-}
-
-void dn_fib_release_info(struct dn_fib_info *fi)
-{
- spin_lock(&dn_fib_info_lock);
- if (fi && --fi->fib_treeref == 0) {
- if (fi->fib_next)
- fi->fib_next->fib_prev = fi->fib_prev;
- if (fi->fib_prev)
- fi->fib_prev->fib_next = fi->fib_next;
- if (fi == dn_fib_info_list)
- dn_fib_info_list = fi->fib_next;
- fi->fib_dead = 1;
- dn_fib_info_put(fi);
- }
- spin_unlock(&dn_fib_info_lock);
-}
-
-static inline int dn_fib_nh_comp(const struct dn_fib_info *fi, const struct dn_fib_info *ofi)
-{
- const struct dn_fib_nh *onh = ofi->fib_nh;
-
- for_nexthops(fi) {
- if (nh->nh_oif != onh->nh_oif ||
- nh->nh_gw != onh->nh_gw ||
- nh->nh_scope != onh->nh_scope ||
- nh->nh_weight != onh->nh_weight ||
- ((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD))
- return -1;
- onh++;
- } endfor_nexthops(fi);
- return 0;
-}
-
-static inline struct dn_fib_info *dn_fib_find_info(const struct dn_fib_info *nfi)
-{
- for_fib_info() {
- if (fi->fib_nhs != nfi->fib_nhs)
- continue;
- if (nfi->fib_protocol == fi->fib_protocol &&
- nfi->fib_prefsrc == fi->fib_prefsrc &&
- nfi->fib_priority == fi->fib_priority &&
- memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(fi->fib_metrics)) == 0 &&
- ((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 &&
- (nfi->fib_nhs == 0 || dn_fib_nh_comp(fi, nfi) == 0))
- return fi;
- } endfor_fib_info();
- return NULL;
-}
-
-static int dn_fib_count_nhs(const struct nlattr *attr)
-{
- struct rtnexthop *nhp = nla_data(attr);
- int nhs = 0, nhlen = nla_len(attr);
-
- while (rtnh_ok(nhp, nhlen)) {
- nhs++;
- nhp = rtnh_next(nhp, &nhlen);
- }
-
- /* leftover implies invalid nexthop configuration, discard it */
- return nhlen > 0 ? 0 : nhs;
-}
-
-static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
- const struct rtmsg *r)
-{
- struct rtnexthop *nhp = nla_data(attr);
- int nhlen = nla_len(attr);
-
- change_nexthops(fi) {
- int attrlen;
-
- if (!rtnh_ok(nhp, nhlen))
- return -EINVAL;
-
- nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
- nh->nh_oif = nhp->rtnh_ifindex;
- nh->nh_weight = nhp->rtnh_hops + 1;
-
- attrlen = rtnh_attrlen(nhp);
- if (attrlen > 0) {
- struct nlattr *gw_attr;
-
- gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
- nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
- }
-
- nhp = rtnh_next(nhp, &nhlen);
- } endfor_nexthops(fi);
-
- return 0;
-}
-
-
-static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct dn_fib_nh *nh)
-{
- int err;
-
- if (nh->nh_gw) {
- struct flowidn fld;
- struct dn_fib_res res;
-
- if (nh->nh_flags&RTNH_F_ONLINK) {
- struct net_device *dev;
-
- if (r->rtm_scope >= RT_SCOPE_LINK)
- return -EINVAL;
- if (dnet_addr_type(nh->nh_gw) != RTN_UNICAST)
- return -EINVAL;
- if ((dev = __dev_get_by_index(&init_net, nh->nh_oif)) == NULL)
- return -ENODEV;
- if (!(dev->flags&IFF_UP))
- return -ENETDOWN;
- nh->nh_dev = dev;
- dev_hold(dev);
- nh->nh_scope = RT_SCOPE_LINK;
- return 0;
- }
-
- memset(&fld, 0, sizeof(fld));
- fld.daddr = nh->nh_gw;
- fld.flowidn_oif = nh->nh_oif;
- fld.flowidn_scope = r->rtm_scope + 1;
-
- if (fld.flowidn_scope < RT_SCOPE_LINK)
- fld.flowidn_scope = RT_SCOPE_LINK;
-
- if ((err = dn_fib_lookup(&fld, &res)) != 0)
- return err;
-
- err = -EINVAL;
- if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
- goto out;
- nh->nh_scope = res.scope;
- nh->nh_oif = DN_FIB_RES_OIF(res);
- nh->nh_dev = DN_FIB_RES_DEV(res);
- if (nh->nh_dev == NULL)
- goto out;
- dev_hold(nh->nh_dev);
- err = -ENETDOWN;
- if (!(nh->nh_dev->flags & IFF_UP))
- goto out;
- err = 0;
-out:
- dn_fib_res_put(&res);
- return err;
- } else {
- struct net_device *dev;
-
- if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK))
- return -EINVAL;
-
- dev = __dev_get_by_index(&init_net, nh->nh_oif);
- if (dev == NULL || dev->dn_ptr == NULL)
- return -ENODEV;
- if (!(dev->flags&IFF_UP))
- return -ENETDOWN;
- nh->nh_dev = dev;
- dev_hold(nh->nh_dev);
- nh->nh_scope = RT_SCOPE_HOST;
- }
-
- return 0;
-}
-
-
-struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct nlattr *attrs[],
- const struct nlmsghdr *nlh, int *errp)
-{
- int err;
- struct dn_fib_info *fi = NULL;
- struct dn_fib_info *ofi;
- int nhs = 1;
-
- if (r->rtm_type > RTN_MAX)
- goto err_inval;
-
- if (dn_fib_props[r->rtm_type].scope > r->rtm_scope)
- goto err_inval;
-
- if (attrs[RTA_MULTIPATH] &&
- (nhs = dn_fib_count_nhs(attrs[RTA_MULTIPATH])) == 0)
- goto err_inval;
-
- fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
- err = -ENOBUFS;
- if (fi == NULL)
- goto failure;
-
- fi->fib_protocol = r->rtm_protocol;
- fi->fib_nhs = nhs;
- fi->fib_flags = r->rtm_flags;
-
- if (attrs[RTA_PRIORITY])
- fi->fib_priority = nla_get_u32(attrs[RTA_PRIORITY]);
-
- if (attrs[RTA_METRICS]) {
- struct nlattr *attr;
- int rem;
-
- nla_for_each_nested(attr, attrs[RTA_METRICS], rem) {
- int type = nla_type(attr);
-
- if (type) {
- if (type > RTAX_MAX || type == RTAX_CC_ALGO ||
- nla_len(attr) < 4)
- goto err_inval;
-
- fi->fib_metrics[type-1] = nla_get_u32(attr);
- }
- }
- }
-
- if (attrs[RTA_PREFSRC])
- fi->fib_prefsrc = nla_get_le16(attrs[RTA_PREFSRC]);
-
- if (attrs[RTA_MULTIPATH]) {
- if ((err = dn_fib_get_nhs(fi, attrs[RTA_MULTIPATH], r)) != 0)
- goto failure;
-
- if (attrs[RTA_OIF] &&
- fi->fib_nh->nh_oif != nla_get_u32(attrs[RTA_OIF]))
- goto err_inval;
-
- if (attrs[RTA_GATEWAY] &&
- fi->fib_nh->nh_gw != nla_get_le16(attrs[RTA_GATEWAY]))
- goto err_inval;
- } else {
- struct dn_fib_nh *nh = fi->fib_nh;
-
- if (attrs[RTA_OIF])
- nh->nh_oif = nla_get_u32(attrs[RTA_OIF]);
-
- if (attrs[RTA_GATEWAY])
- nh->nh_gw = nla_get_le16(attrs[RTA_GATEWAY]);
-
- nh->nh_flags = r->rtm_flags;
- nh->nh_weight = 1;
- }
-
- if (r->rtm_type == RTN_NAT) {
- if (!attrs[RTA_GATEWAY] || nhs != 1 || attrs[RTA_OIF])
- goto err_inval;
-
- fi->fib_nh->nh_gw = nla_get_le16(attrs[RTA_GATEWAY]);
- goto link_it;
- }
-
- if (dn_fib_props[r->rtm_type].error) {
- if (attrs[RTA_GATEWAY] || attrs[RTA_OIF] || attrs[RTA_MULTIPATH])
- goto err_inval;
-
- goto link_it;
- }
-
- if (r->rtm_scope > RT_SCOPE_HOST)
- goto err_inval;
-
- if (r->rtm_scope == RT_SCOPE_HOST) {
- struct dn_fib_nh *nh = fi->fib_nh;
-
- /* Local address is added */
- if (nhs != 1 || nh->nh_gw)
- goto err_inval;
- nh->nh_scope = RT_SCOPE_NOWHERE;
- nh->nh_dev = dev_get_by_index(&init_net, fi->fib_nh->nh_oif);
- err = -ENODEV;
- if (nh->nh_dev == NULL)
- goto failure;
- } else {
- change_nexthops(fi) {
- if ((err = dn_fib_check_nh(r, fi, nh)) != 0)
- goto failure;
- } endfor_nexthops(fi)
- }
-
- if (fi->fib_prefsrc) {
- if (r->rtm_type != RTN_LOCAL || !attrs[RTA_DST] ||
- fi->fib_prefsrc != nla_get_le16(attrs[RTA_DST]))
- if (dnet_addr_type(fi->fib_prefsrc) != RTN_LOCAL)
- goto err_inval;
- }
-
-link_it:
- if ((ofi = dn_fib_find_info(fi)) != NULL) {
- fi->fib_dead = 1;
- dn_fib_free_info(fi);
- ofi->fib_treeref++;
- return ofi;
- }
-
- fi->fib_treeref++;
- refcount_set(&fi->fib_clntref, 1);
- spin_lock(&dn_fib_info_lock);
- fi->fib_next = dn_fib_info_list;
- fi->fib_prev = NULL;
- if (dn_fib_info_list)
- dn_fib_info_list->fib_prev = fi;
- dn_fib_info_list = fi;
- spin_unlock(&dn_fib_info_lock);
- return fi;
-
-err_inval:
- err = -EINVAL;
-
-failure:
- *errp = err;
- if (fi) {
- fi->fib_dead = 1;
- dn_fib_free_info(fi);
- }
-
- return NULL;
-}
-
-int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn *fld, struct dn_fib_res *res)
-{
- int err = dn_fib_props[type].error;
-
- if (err == 0) {
- if (fi->fib_flags & RTNH_F_DEAD)
- return 1;
-
- res->fi = fi;
-
- switch (type) {
- case RTN_NAT:
- DN_FIB_RES_RESET(*res);
- refcount_inc(&fi->fib_clntref);
- return 0;
- case RTN_UNICAST:
- case RTN_LOCAL:
- for_nexthops(fi) {
- if (nh->nh_flags & RTNH_F_DEAD)
- continue;
- if (!fld->flowidn_oif ||
- fld->flowidn_oif == nh->nh_oif)
- break;
- }
- if (nhsel < fi->fib_nhs) {
- res->nh_sel = nhsel;
- refcount_inc(&fi->fib_clntref);
- return 0;
- }
- endfor_nexthops(fi);
- res->fi = NULL;
- return 1;
- default:
- net_err_ratelimited("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
- type);
- res->fi = NULL;
- return -EINVAL;
- }
- }
- return err;
-}
-
-void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res)
-{
- struct dn_fib_info *fi = res->fi;
- int w;
-
- spin_lock_bh(&dn_fib_multipath_lock);
- if (fi->fib_power <= 0) {
- int power = 0;
- change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD)) {
- power += nh->nh_weight;
- nh->nh_power = nh->nh_weight;
- }
- } endfor_nexthops(fi);
- fi->fib_power = power;
- if (power < 0) {
- spin_unlock_bh(&dn_fib_multipath_lock);
- res->nh_sel = 0;
- return;
- }
- }
-
- w = jiffies % fi->fib_power;
-
- change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) {
- if ((w -= nh->nh_power) <= 0) {
- nh->nh_power--;
- fi->fib_power--;
- res->nh_sel = nhsel;
- spin_unlock_bh(&dn_fib_multipath_lock);
- return;
- }
- }
- } endfor_nexthops(fi);
- res->nh_sel = 0;
- spin_unlock_bh(&dn_fib_multipath_lock);
-}
-
-static inline u32 rtm_get_table(struct nlattr *attrs[], u8 table)
-{
- if (attrs[RTA_TABLE])
- table = nla_get_u32(attrs[RTA_TABLE]);
-
- return table;
-}
-
-static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
-{
- struct net *net = sock_net(skb->sk);
- struct dn_fib_table *tb;
- struct rtmsg *r = nlmsg_data(nlh);
- struct nlattr *attrs[RTA_MAX+1];
- int err;
-
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
- err = nlmsg_parse_deprecated(nlh, sizeof(*r), attrs, RTA_MAX,
- rtm_dn_policy, extack);
- if (err < 0)
- return err;
-
- tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 0);
- if (!tb)
- return -ESRCH;
-
- return tb->delete(tb, r, attrs, nlh, &NETLINK_CB(skb));
-}
-
-static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
-{
- struct net *net = sock_net(skb->sk);
- struct dn_fib_table *tb;
- struct rtmsg *r = nlmsg_data(nlh);
- struct nlattr *attrs[RTA_MAX+1];
- int err;
-
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
- err = nlmsg_parse_deprecated(nlh, sizeof(*r), attrs, RTA_MAX,
- rtm_dn_policy, extack);
- if (err < 0)
- return err;
-
- tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 1);
- if (!tb)
- return -ENOBUFS;
-
- return tb->insert(tb, r, attrs, nlh, &NETLINK_CB(skb));
-}
-
-static void fib_magic(int cmd, int type, __le16 dst, int dst_len, struct dn_ifaddr *ifa)
-{
- struct dn_fib_table *tb;
- struct {
- struct nlmsghdr nlh;
- struct rtmsg rtm;
- } req;
- struct {
- struct nlattr hdr;
- __le16 dst;
- } dst_attr = {
- .dst = dst,
- };
- struct {
- struct nlattr hdr;
- __le16 prefsrc;
- } prefsrc_attr = {
- .prefsrc = ifa->ifa_local,
- };
- struct {
- struct nlattr hdr;
- u32 oif;
- } oif_attr = {
- .oif = ifa->ifa_dev->dev->ifindex,
- };
- struct nlattr *attrs[RTA_MAX+1] = {
- [RTA_DST] = (struct nlattr *) &dst_attr,
- [RTA_PREFSRC] = (struct nlattr * ) &prefsrc_attr,
- [RTA_OIF] = (struct nlattr *) &oif_attr,
- };
-
- memset(&req.rtm, 0, sizeof(req.rtm));
-
- if (type == RTN_UNICAST)
- tb = dn_fib_get_table(RT_MIN_TABLE, 1);
- else
- tb = dn_fib_get_table(RT_TABLE_LOCAL, 1);
-
- if (tb == NULL)
- return;
-
- req.nlh.nlmsg_len = sizeof(req);
- req.nlh.nlmsg_type = cmd;
- req.nlh.nlmsg_flags = NLM_F_REQUEST|NLM_F_CREATE|NLM_F_APPEND;
- req.nlh.nlmsg_pid = 0;
- req.nlh.nlmsg_seq = 0;
-
- req.rtm.rtm_dst_len = dst_len;
- req.rtm.rtm_table = tb->n;
- req.rtm.rtm_protocol = RTPROT_KERNEL;
- req.rtm.rtm_scope = (type != RTN_LOCAL ? RT_SCOPE_LINK : RT_SCOPE_HOST);
- req.rtm.rtm_type = type;
-
- if (cmd == RTM_NEWROUTE)
- tb->insert(tb, &req.rtm, attrs, &req.nlh, NULL);
- else
- tb->delete(tb, &req.rtm, attrs, &req.nlh, NULL);
-}
-
-static void dn_fib_add_ifaddr(struct dn_ifaddr *ifa)
-{
-
- fib_magic(RTM_NEWROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
-
-#if 0
- if (!(dev->flags&IFF_UP))
- return;
- /* In the future, we will want to add default routes here */
-
-#endif
-}
-
-static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
-{
- int found_it = 0;
- struct net_device *dev;
- struct dn_dev *dn_db;
- struct dn_ifaddr *ifa2;
-
- ASSERT_RTNL();
-
- /* Scan device list */
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, dev) {
- dn_db = rcu_dereference(dev->dn_ptr);
- if (dn_db == NULL)
- continue;
- for (ifa2 = rcu_dereference(dn_db->ifa_list);
- ifa2 != NULL;
- ifa2 = rcu_dereference(ifa2->ifa_next)) {
- if (ifa2->ifa_local == ifa->ifa_local) {
- found_it = 1;
- break;
- }
- }
- }
- rcu_read_unlock();
-
- if (found_it == 0) {
- fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
-
- if (dnet_addr_type(ifa->ifa_local) != RTN_LOCAL) {
- if (dn_fib_sync_down(ifa->ifa_local, NULL, 0))
- dn_fib_flush();
- }
- }
-}
-
-static void dn_fib_disable_addr(struct net_device *dev, int force)
-{
- if (dn_fib_sync_down(0, dev, force))
- dn_fib_flush();
- dn_rt_cache_flush(0);
- neigh_ifdown(&dn_neigh_table, dev);
-}
-
-static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- struct dn_ifaddr *ifa = (struct dn_ifaddr *)ptr;
-
- switch (event) {
- case NETDEV_UP:
- dn_fib_add_ifaddr(ifa);
- dn_fib_sync_up(ifa->ifa_dev->dev);
- dn_rt_cache_flush(-1);
- break;
- case NETDEV_DOWN:
- dn_fib_del_ifaddr(ifa);
- if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) {
- dn_fib_disable_addr(ifa->ifa_dev->dev, 1);
- } else {
- dn_rt_cache_flush(-1);
- }
- break;
- }
- return NOTIFY_DONE;
-}
-
-static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force)
-{
- int ret = 0;
- int scope = RT_SCOPE_NOWHERE;
-
- if (force)
- scope = -1;
-
- for_fib_info() {
- /*
- * This makes no sense for DECnet.... we will almost
- * certainly have more than one local address the same
- * over all our interfaces. It needs thinking about
- * some more.
- */
- if (local && fi->fib_prefsrc == local) {
- fi->fib_flags |= RTNH_F_DEAD;
- ret++;
- } else if (dev && fi->fib_nhs) {
- int dead = 0;
-
- change_nexthops(fi) {
- if (nh->nh_flags&RTNH_F_DEAD)
- dead++;
- else if (nh->nh_dev == dev &&
- nh->nh_scope != scope) {
- spin_lock_bh(&dn_fib_multipath_lock);
- nh->nh_flags |= RTNH_F_DEAD;
- fi->fib_power -= nh->nh_power;
- nh->nh_power = 0;
- spin_unlock_bh(&dn_fib_multipath_lock);
- dead++;
- }
- } endfor_nexthops(fi)
- if (dead == fi->fib_nhs) {
- fi->fib_flags |= RTNH_F_DEAD;
- ret++;
- }
- }
- } endfor_fib_info();
- return ret;
-}
-
-
-static int dn_fib_sync_up(struct net_device *dev)
-{
- int ret = 0;
-
- if (!(dev->flags&IFF_UP))
- return 0;
-
- for_fib_info() {
- int alive = 0;
-
- change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD)) {
- alive++;
- continue;
- }
- if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
- continue;
- if (nh->nh_dev != dev || dev->dn_ptr == NULL)
- continue;
- alive++;
- spin_lock_bh(&dn_fib_multipath_lock);
- nh->nh_power = 0;
- nh->nh_flags &= ~RTNH_F_DEAD;
- spin_unlock_bh(&dn_fib_multipath_lock);
- } endfor_nexthops(fi);
-
- if (alive > 0) {
- fi->fib_flags &= ~RTNH_F_DEAD;
- ret++;
- }
- } endfor_fib_info();
- return ret;
-}
-
-static struct notifier_block dn_fib_dnaddr_notifier = {
- .notifier_call = dn_fib_dnaddr_event,
-};
-
-void __exit dn_fib_cleanup(void)
-{
- dn_fib_table_cleanup();
- dn_fib_rules_cleanup();
-
- unregister_dnaddr_notifier(&dn_fib_dnaddr_notifier);
-}
-
-
-void __init dn_fib_init(void)
-{
- dn_fib_table_init();
- dn_fib_rules_init();
-
- register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
-
- rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWROUTE,
- dn_fib_rtm_newroute, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELROUTE,
- dn_fib_rtm_delroute, NULL, 0);
-}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
deleted file mode 100644
index 94b306f6d551..000000000000
--- a/net/decnet/dn_neigh.c
+++ /dev/null
@@ -1,605 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Neighbour Functions (Adjacency Database and
- * On-Ethernet Cache)
- *
- * Author: Steve Whitehouse <SteveW@ACM.org>
- *
- *
- * Changes:
- * Steve Whitehouse : Fixed router listing routine
- * Steve Whitehouse : Added error_report functions
- * Steve Whitehouse : Added default router detection
- * Steve Whitehouse : Hop counts in outgoing messages
- * Steve Whitehouse : Fixed src/dst in outgoing messages so
- * forwarding now stands a good chance of
- * working.
- * Steve Whitehouse : Fixed neighbour states (for now anyway).
- * Steve Whitehouse : Made error_report functions dummies. This
- * is not the right place to return skbs.
- * Steve Whitehouse : Convert to seq_file
- *
- */
-
-#include <linux/net.h>
-#include <linux/module.h>
-#include <linux/socket.h>
-#include <linux/if_arp.h>
-#include <linux/slab.h>
-#include <linux/if_ether.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/string.h>
-#include <linux/netfilter_decnet.h>
-#include <linux/spinlock.h>
-#include <linux/seq_file.h>
-#include <linux/rcupdate.h>
-#include <linux/jhash.h>
-#include <linux/atomic.h>
-#include <net/net_namespace.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/flow.h>
-#include <net/dn.h>
-#include <net/dn_dev.h>
-#include <net/dn_neigh.h>
-#include <net/dn_route.h>
-
-static int dn_neigh_construct(struct neighbour *);
-static void dn_neigh_error_report(struct neighbour *, struct sk_buff *);
-static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb);
-
-/*
- * Operations for adding the link layer header.
- */
-static const struct neigh_ops dn_neigh_ops = {
- .family = AF_DECnet,
- .error_report = dn_neigh_error_report,
- .output = dn_neigh_output,
- .connected_output = dn_neigh_output,
-};
-
-static u32 dn_neigh_hash(const void *pkey,
- const struct net_device *dev,
- __u32 *hash_rnd)
-{
- return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]);
-}
-
-static bool dn_key_eq(const struct neighbour *neigh, const void *pkey)
-{
- return neigh_key_eq16(neigh, pkey);
-}
-
-struct neigh_table dn_neigh_table = {
- .family = PF_DECnet,
- .entry_size = NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)),
- .key_len = sizeof(__le16),
- .protocol = cpu_to_be16(ETH_P_DNA_RT),
- .hash = dn_neigh_hash,
- .key_eq = dn_key_eq,
- .constructor = dn_neigh_construct,
- .id = "dn_neigh_cache",
- .parms ={
- .tbl = &dn_neigh_table,
- .reachable_time = 30 * HZ,
- .data = {
- [NEIGH_VAR_MCAST_PROBES] = 0,
- [NEIGH_VAR_UCAST_PROBES] = 0,
- [NEIGH_VAR_APP_PROBES] = 0,
- [NEIGH_VAR_RETRANS_TIME] = 1 * HZ,
- [NEIGH_VAR_BASE_REACHABLE_TIME] = 30 * HZ,
- [NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
- [NEIGH_VAR_GC_STALETIME] = 60 * HZ,
- [NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_MAX,
- [NEIGH_VAR_PROXY_QLEN] = 0,
- [NEIGH_VAR_ANYCAST_DELAY] = 0,
- [NEIGH_VAR_PROXY_DELAY] = 0,
- [NEIGH_VAR_LOCKTIME] = 1 * HZ,
- },
- },
- .gc_interval = 30 * HZ,
- .gc_thresh1 = 128,
- .gc_thresh2 = 512,
- .gc_thresh3 = 1024,
-};
-
-static int dn_neigh_construct(struct neighbour *neigh)
-{
- struct net_device *dev = neigh->dev;
- struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n);
- struct dn_dev *dn_db;
- struct neigh_parms *parms;
-
- rcu_read_lock();
- dn_db = rcu_dereference(dev->dn_ptr);
- if (dn_db == NULL) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- parms = dn_db->neigh_parms;
- if (!parms) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- __neigh_parms_put(neigh->parms);
- neigh->parms = neigh_parms_clone(parms);
- rcu_read_unlock();
-
- neigh->ops = &dn_neigh_ops;
- neigh->nud_state = NUD_NOARP;
- neigh->output = neigh->ops->connected_output;
-
- if ((dev->type == ARPHRD_IPGRE) || (dev->flags & IFF_POINTOPOINT))
- memcpy(neigh->ha, dev->broadcast, dev->addr_len);
- else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK))
- dn_dn2eth(neigh->ha, dn->addr);
- else {
- net_dbg_ratelimited("Trying to create neigh for hw %d\n",
- dev->type);
- return -EINVAL;
- }
-
- /*
- * Make an estimate of the remote block size by assuming that its
- * two less then the device mtu, which it true for ethernet (and
- * other things which support long format headers) since there is
- * an extra length field (of 16 bits) which isn't part of the
- * ethernet headers and which the DECnet specs won't admit is part
- * of the DECnet routing headers either.
- *
- * If we over estimate here its no big deal, the NSP negotiations
- * will prevent us from sending packets which are too large for the
- * remote node to handle. In any case this figure is normally updated
- * by a hello message in most cases.
- */
- dn->blksize = dev->mtu - 2;
-
- return 0;
-}
-
-static void dn_neigh_error_report(struct neighbour *neigh, struct sk_buff *skb)
-{
- printk(KERN_DEBUG "dn_neigh_error_report: called\n");
- kfree_skb(skb);
-}
-
-static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
- struct dn_route *rt = (struct dn_route *)dst;
- struct net_device *dev = neigh->dev;
- char mac_addr[ETH_ALEN];
- unsigned int seq;
- int err;
-
- dn_dn2eth(mac_addr, rt->rt_local_src);
- do {
- seq = read_seqbegin(&neigh->ha_lock);
- err = dev_hard_header(skb, dev, ntohs(skb->protocol),
- neigh->ha, mac_addr, skb->len);
- } while (read_seqretry(&neigh->ha_lock, seq));
-
- if (err >= 0)
- err = dev_queue_xmit(skb);
- else {
- kfree_skb(skb);
- err = -EINVAL;
- }
- return err;
-}
-
-static int dn_neigh_output_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
- struct dn_route *rt = (struct dn_route *)dst;
- struct neighbour *neigh = rt->n;
-
- return neigh->output(neigh, skb);
-}
-
-/*
- * For talking to broadcast devices: Ethernet & PPP
- */
-static int dn_long_output(struct neighbour *neigh, struct sock *sk,
- struct sk_buff *skb)
-{
- struct net_device *dev = neigh->dev;
- int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
- unsigned char *data;
- struct dn_long_packet *lp;
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
-
-
- if (skb_headroom(skb) < headroom) {
- struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
- if (skb2 == NULL) {
- net_crit_ratelimited("dn_long_output: no memory\n");
- kfree_skb(skb);
- return -ENOBUFS;
- }
- consume_skb(skb);
- skb = skb2;
- net_info_ratelimited("dn_long_output: Increasing headroom\n");
- }
-
- data = skb_push(skb, sizeof(struct dn_long_packet) + 3);
- lp = (struct dn_long_packet *)(data+3);
-
- *((__le16 *)data) = cpu_to_le16(skb->len - 2);
- *(data + 2) = 1 | DN_RT_F_PF; /* Padding */
-
- lp->msgflg = DN_RT_PKT_LONG|(cb->rt_flags&(DN_RT_F_IE|DN_RT_F_RQR|DN_RT_F_RTS));
- lp->d_area = lp->d_subarea = 0;
- dn_dn2eth(lp->d_id, cb->dst);
- lp->s_area = lp->s_subarea = 0;
- dn_dn2eth(lp->s_id, cb->src);
- lp->nl2 = 0;
- lp->visit_ct = cb->hops & 0x3f;
- lp->s_class = 0;
- lp->pt = 0;
-
- skb_reset_network_header(skb);
-
- return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING,
- &init_net, sk, skb, NULL, neigh->dev,
- dn_neigh_output_packet);
-}
-
-/*
- * For talking to pointopoint and multidrop devices: DDCMP and X.25
- */
-static int dn_short_output(struct neighbour *neigh, struct sock *sk,
- struct sk_buff *skb)
-{
- struct net_device *dev = neigh->dev;
- int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
- struct dn_short_packet *sp;
- unsigned char *data;
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
-
-
- if (skb_headroom(skb) < headroom) {
- struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
- if (skb2 == NULL) {
- net_crit_ratelimited("dn_short_output: no memory\n");
- kfree_skb(skb);
- return -ENOBUFS;
- }
- consume_skb(skb);
- skb = skb2;
- net_info_ratelimited("dn_short_output: Increasing headroom\n");
- }
-
- data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
- *((__le16 *)data) = cpu_to_le16(skb->len - 2);
- sp = (struct dn_short_packet *)(data+2);
-
- sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS));
- sp->dstnode = cb->dst;
- sp->srcnode = cb->src;
- sp->forward = cb->hops & 0x3f;
-
- skb_reset_network_header(skb);
-
- return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING,
- &init_net, sk, skb, NULL, neigh->dev,
- dn_neigh_output_packet);
-}
-
-/*
- * For talking to DECnet phase III nodes
- * Phase 3 output is the same as short output, execpt that
- * it clears the area bits before transmission.
- */
-static int dn_phase3_output(struct neighbour *neigh, struct sock *sk,
- struct sk_buff *skb)
-{
- struct net_device *dev = neigh->dev;
- int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
- struct dn_short_packet *sp;
- unsigned char *data;
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
-
- if (skb_headroom(skb) < headroom) {
- struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
- if (skb2 == NULL) {
- net_crit_ratelimited("dn_phase3_output: no memory\n");
- kfree_skb(skb);
- return -ENOBUFS;
- }
- consume_skb(skb);
- skb = skb2;
- net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
- }
-
- data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
- *((__le16 *)data) = cpu_to_le16(skb->len - 2);
- sp = (struct dn_short_packet *)(data + 2);
-
- sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS));
- sp->dstnode = cb->dst & cpu_to_le16(0x03ff);
- sp->srcnode = cb->src & cpu_to_le16(0x03ff);
- sp->forward = cb->hops & 0x3f;
-
- skb_reset_network_header(skb);
-
- return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING,
- &init_net, sk, skb, NULL, neigh->dev,
- dn_neigh_output_packet);
-}
-
-int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
- struct dn_route *rt = (struct dn_route *) dst;
- struct neighbour *neigh = rt->n;
- struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n);
- struct dn_dev *dn_db;
- bool use_long;
-
- rcu_read_lock();
- dn_db = rcu_dereference(neigh->dev->dn_ptr);
- if (dn_db == NULL) {
- rcu_read_unlock();
- return -EINVAL;
- }
- use_long = dn_db->use_long;
- rcu_read_unlock();
-
- if (dn->flags & DN_NDFLAG_P3)
- return dn_phase3_output(neigh, sk, skb);
- if (use_long)
- return dn_long_output(neigh, sk, skb);
- else
- return dn_short_output(neigh, sk, skb);
-}
-
-/*
- * Unfortunately, the neighbour code uses the device in its hash
- * function, so we don't get any advantage from it. This function
- * basically does a neigh_lookup(), but without comparing the device
- * field. This is required for the On-Ethernet cache
- */
-
-/*
- * Pointopoint link receives a hello message
- */
-void dn_neigh_pointopoint_hello(struct sk_buff *skb)
-{
- kfree_skb(skb);
-}
-
-/*
- * Ethernet router hello message received
- */
-int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data;
-
- struct neighbour *neigh;
- struct dn_neigh *dn;
- struct dn_dev *dn_db;
- __le16 src;
-
- src = dn_eth2dn(msg->id);
-
- neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
-
- dn = container_of(neigh, struct dn_neigh, n);
-
- if (neigh) {
- write_lock(&neigh->lock);
-
- neigh->used = jiffies;
- dn_db = rcu_dereference(neigh->dev->dn_ptr);
-
- if (!(neigh->nud_state & NUD_PERMANENT)) {
- neigh->updated = jiffies;
-
- if (neigh->dev->type == ARPHRD_ETHER)
- memcpy(neigh->ha, &eth_hdr(skb)->h_source, ETH_ALEN);
-
- dn->blksize = le16_to_cpu(msg->blksize);
- dn->priority = msg->priority;
-
- dn->flags &= ~DN_NDFLAG_P3;
-
- switch (msg->iinfo & DN_RT_INFO_TYPE) {
- case DN_RT_INFO_L1RT:
- dn->flags &=~DN_NDFLAG_R2;
- dn->flags |= DN_NDFLAG_R1;
- break;
- case DN_RT_INFO_L2RT:
- dn->flags |= DN_NDFLAG_R2;
- }
- }
-
- /* Only use routers in our area */
- if ((le16_to_cpu(src)>>10) == (le16_to_cpu((decnet_address))>>10)) {
- if (!dn_db->router) {
- dn_db->router = neigh_clone(neigh);
- } else {
- if (msg->priority > ((struct dn_neigh *)dn_db->router)->priority)
- neigh_release(xchg(&dn_db->router, neigh_clone(neigh)));
- }
- }
- write_unlock(&neigh->lock);
- neigh_release(neigh);
- }
-
- kfree_skb(skb);
- return 0;
-}
-
-/*
- * Endnode hello message received
- */
-int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data;
- struct neighbour *neigh;
- struct dn_neigh *dn;
- __le16 src;
-
- src = dn_eth2dn(msg->id);
-
- neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
-
- dn = container_of(neigh, struct dn_neigh, n);
-
- if (neigh) {
- write_lock(&neigh->lock);
-
- neigh->used = jiffies;
-
- if (!(neigh->nud_state & NUD_PERMANENT)) {
- neigh->updated = jiffies;
-
- if (neigh->dev->type == ARPHRD_ETHER)
- memcpy(neigh->ha, &eth_hdr(skb)->h_source, ETH_ALEN);
- dn->flags &= ~(DN_NDFLAG_R1 | DN_NDFLAG_R2);
- dn->blksize = le16_to_cpu(msg->blksize);
- dn->priority = 0;
- }
-
- write_unlock(&neigh->lock);
- neigh_release(neigh);
- }
-
- kfree_skb(skb);
- return 0;
-}
-
-static char *dn_find_slot(char *base, int max, int priority)
-{
- int i;
- unsigned char *min = NULL;
-
- base += 6; /* skip first id */
-
- for(i = 0; i < max; i++) {
- if (!min || (*base < *min))
- min = base;
- base += 7; /* find next priority */
- }
-
- if (!min)
- return NULL;
-
- return (*min < priority) ? (min - 6) : NULL;
-}
-
-struct elist_cb_state {
- struct net_device *dev;
- unsigned char *ptr;
- unsigned char *rs;
- int t, n;
-};
-
-static void neigh_elist_cb(struct neighbour *neigh, void *_info)
-{
- struct elist_cb_state *s = _info;
- struct dn_neigh *dn;
-
- if (neigh->dev != s->dev)
- return;
-
- dn = container_of(neigh, struct dn_neigh, n);
- if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2)))
- return;
-
- if (s->t == s->n)
- s->rs = dn_find_slot(s->ptr, s->n, dn->priority);
- else
- s->t++;
- if (s->rs == NULL)
- return;
-
- dn_dn2eth(s->rs, dn->addr);
- s->rs += 6;
- *(s->rs) = neigh->nud_state & NUD_CONNECTED ? 0x80 : 0x0;
- *(s->rs) |= dn->priority;
- s->rs++;
-}
-
-int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n)
-{
- struct elist_cb_state state;
-
- state.dev = dev;
- state.t = 0;
- state.n = n;
- state.ptr = ptr;
- state.rs = ptr;
-
- neigh_for_each(&dn_neigh_table, neigh_elist_cb, &state);
-
- return state.t;
-}
-
-
-#ifdef CONFIG_PROC_FS
-
-static inline void dn_neigh_format_entry(struct seq_file *seq,
- struct neighbour *n)
-{
- struct dn_neigh *dn = container_of(n, struct dn_neigh, n);
- char buf[DN_ASCBUF_LEN];
-
- read_lock(&n->lock);
- seq_printf(seq, "%-7s %s%s%s %02x %02d %07ld %-8s\n",
- dn_addr2asc(le16_to_cpu(dn->addr), buf),
- (dn->flags&DN_NDFLAG_R1) ? "1" : "-",
- (dn->flags&DN_NDFLAG_R2) ? "2" : "-",
- (dn->flags&DN_NDFLAG_P3) ? "3" : "-",
- dn->n.nud_state,
- refcount_read(&dn->n.refcnt),
- dn->blksize,
- (dn->n.dev) ? dn->n.dev->name : "?");
- read_unlock(&n->lock);
-}
-
-static int dn_neigh_seq_show(struct seq_file *seq, void *v)
-{
- if (v == SEQ_START_TOKEN) {
- seq_puts(seq, "Addr Flags State Use Blksize Dev\n");
- } else {
- dn_neigh_format_entry(seq, v);
- }
-
- return 0;
-}
-
-static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return neigh_seq_start(seq, pos, &dn_neigh_table,
- NEIGH_SEQ_NEIGH_ONLY);
-}
-
-static const struct seq_operations dn_neigh_seq_ops = {
- .start = dn_neigh_seq_start,
- .next = neigh_seq_next,
- .stop = neigh_seq_stop,
- .show = dn_neigh_seq_show,
-};
-#endif
-
-void __init dn_neigh_init(void)
-{
- neigh_table_init(NEIGH_DN_TABLE, &dn_neigh_table);
- proc_create_net("decnet_neigh", 0444, init_net.proc_net,
- &dn_neigh_seq_ops, sizeof(struct neigh_seq_state));
-}
-
-void __exit dn_neigh_cleanup(void)
-{
- remove_proc_entry("decnet_neigh", init_net.proc_net);
- neigh_table_clear(NEIGH_DN_TABLE, &dn_neigh_table);
-}
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
deleted file mode 100644
index e4161e0c86aa..000000000000
--- a/net/decnet/dn_nsp_in.c
+++ /dev/null
@@ -1,906 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Network Services Protocol (Input)
- *
- * Author: Eduardo Marcelo Serrat <emserrat@geocities.com>
- *
- * Changes:
- *
- * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from
- * original dn_nsp.c.
- * Steve Whitehouse: Updated to work with my new routing architecture.
- * Steve Whitehouse: Add changes from Eduardo Serrat's patches.
- * Steve Whitehouse: Put all ack handling code in a common routine.
- * Steve Whitehouse: Put other common bits into dn_nsp_rx()
- * Steve Whitehouse: More checks on skb->len to catch bogus packets
- * Fixed various race conditions and possible nasties.
- * Steve Whitehouse: Now handles returned conninit frames.
- * David S. Miller: New socket locking
- * Steve Whitehouse: Fixed lockup when socket filtering was enabled.
- * Paul Koning: Fix to push CC sockets into RUN when acks are
- * received.
- * Steve Whitehouse:
- * Patrick Caulfield: Checking conninits for correctness & sending of error
- * responses.
- * Steve Whitehouse: Added backlog congestion level return codes.
- * Patrick Caulfield:
- * Steve Whitehouse: Added flow control support (outbound)
- * Steve Whitehouse: Prepare for nonlinear skbs
- */
-
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/sockios.h>
-#include <linux/net.h>
-#include <linux/netdevice.h>
-#include <linux/inet.h>
-#include <linux/route.h>
-#include <linux/slab.h>
-#include <net/sock.h>
-#include <net/tcp_states.h>
-#include <linux/fcntl.h>
-#include <linux/mm.h>
-#include <linux/termios.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/netfilter_decnet.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/dn.h>
-#include <net/dn_nsp.h>
-#include <net/dn_dev.h>
-#include <net/dn_route.h>
-
-extern int decnet_log_martians;
-
-static void dn_log_martian(struct sk_buff *skb, const char *msg)
-{
- if (decnet_log_martians) {
- char *devname = skb->dev ? skb->dev->name : "???";
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
- msg, devname,
- le16_to_cpu(cb->src),
- le16_to_cpu(cb->dst),
- le16_to_cpu(cb->src_port),
- le16_to_cpu(cb->dst_port));
- }
-}
-
-/*
- * For this function we've flipped the cross-subchannel bit
- * if the message is an otherdata or linkservice message. Thus
- * we can use it to work out what to update.
- */
-static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
-{
- struct dn_scp *scp = DN_SK(sk);
- unsigned short type = ((ack >> 12) & 0x0003);
- int wakeup = 0;
-
- switch (type) {
- case 0: /* ACK - Data */
- if (dn_after(ack, scp->ackrcv_dat)) {
- scp->ackrcv_dat = ack & 0x0fff;
- wakeup |= dn_nsp_check_xmit_queue(sk, skb,
- &scp->data_xmit_queue,
- ack);
- }
- break;
- case 1: /* NAK - Data */
- break;
- case 2: /* ACK - OtherData */
- if (dn_after(ack, scp->ackrcv_oth)) {
- scp->ackrcv_oth = ack & 0x0fff;
- wakeup |= dn_nsp_check_xmit_queue(sk, skb,
- &scp->other_xmit_queue,
- ack);
- }
- break;
- case 3: /* NAK - OtherData */
- break;
- }
-
- if (wakeup && !sock_flag(sk, SOCK_DEAD))
- sk->sk_state_change(sk);
-}
-
-/*
- * This function is a universal ack processor.
- */
-static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth)
-{
- __le16 *ptr = (__le16 *)skb->data;
- int len = 0;
- unsigned short ack;
-
- if (skb->len < 2)
- return len;
-
- if ((ack = le16_to_cpu(*ptr)) & 0x8000) {
- skb_pull(skb, 2);
- ptr++;
- len += 2;
- if ((ack & 0x4000) == 0) {
- if (oth)
- ack ^= 0x2000;
- dn_ack(sk, skb, ack);
- }
- }
-
- if (skb->len < 2)
- return len;
-
- if ((ack = le16_to_cpu(*ptr)) & 0x8000) {
- skb_pull(skb, 2);
- len += 2;
- if ((ack & 0x4000) == 0) {
- if (oth)
- ack ^= 0x2000;
- dn_ack(sk, skb, ack);
- }
- }
-
- return len;
-}
-
-
-/**
- * dn_check_idf - Check an image data field format is correct.
- * @pptr: Pointer to pointer to image data
- * @len: Pointer to length of image data
- * @max: The maximum allowed length of the data in the image data field
- * @follow_on: Check that this many bytes exist beyond the end of the image data
- *
- * Returns: 0 if ok, -1 on error
- */
-static inline int dn_check_idf(unsigned char **pptr, int *len, unsigned char max, unsigned char follow_on)
-{
- unsigned char *ptr = *pptr;
- unsigned char flen = *ptr++;
-
- (*len)--;
- if (flen > max)
- return -1;
- if ((flen + follow_on) > *len)
- return -1;
-
- *len -= flen;
- *pptr = ptr + flen;
- return 0;
-}
-
-/*
- * Table of reason codes to pass back to node which sent us a badly
- * formed message, plus text messages for the log. A zero entry in
- * the reason field means "don't reply" otherwise a disc init is sent with
- * the specified reason code.
- */
-static struct {
- unsigned short reason;
- const char *text;
-} ci_err_table[] = {
- { 0, "CI: Truncated message" },
- { NSP_REASON_ID, "CI: Destination username error" },
- { NSP_REASON_ID, "CI: Destination username type" },
- { NSP_REASON_US, "CI: Source username error" },
- { 0, "CI: Truncated at menuver" },
- { 0, "CI: Truncated before access or user data" },
- { NSP_REASON_IO, "CI: Access data format error" },
- { NSP_REASON_IO, "CI: User data format error" }
-};
-
-/*
- * This function uses a slightly different lookup method
- * to find its sockets, since it searches on object name/number
- * rather than port numbers. Various tests are done to ensure that
- * the incoming data is in the correct format before it is queued to
- * a socket.
- */
-static struct sock *dn_find_listener(struct sk_buff *skb, unsigned short *reason)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct nsp_conn_init_msg *msg = (struct nsp_conn_init_msg *)skb->data;
- struct sockaddr_dn dstaddr;
- struct sockaddr_dn srcaddr;
- unsigned char type = 0;
- int dstlen;
- int srclen;
- unsigned char *ptr;
- int len;
- int err = 0;
- unsigned char menuver;
-
- memset(&dstaddr, 0, sizeof(struct sockaddr_dn));
- memset(&srcaddr, 0, sizeof(struct sockaddr_dn));
-
- /*
- * 1. Decode & remove message header
- */
- cb->src_port = msg->srcaddr;
- cb->dst_port = msg->dstaddr;
- cb->services = msg->services;
- cb->info = msg->info;
- cb->segsize = le16_to_cpu(msg->segsize);
-
- if (!pskb_may_pull(skb, sizeof(*msg)))
- goto err_out;
-
- skb_pull(skb, sizeof(*msg));
-
- len = skb->len;
- ptr = skb->data;
-
- /*
- * 2. Check destination end username format
- */
- dstlen = dn_username2sockaddr(ptr, len, &dstaddr, &type);
- err++;
- if (dstlen < 0)
- goto err_out;
-
- err++;
- if (type > 1)
- goto err_out;
-
- len -= dstlen;
- ptr += dstlen;
-
- /*
- * 3. Check source end username format
- */
- srclen = dn_username2sockaddr(ptr, len, &srcaddr, &type);
- err++;
- if (srclen < 0)
- goto err_out;
-
- len -= srclen;
- ptr += srclen;
- err++;
- if (len < 1)
- goto err_out;
-
- menuver = *ptr;
- ptr++;
- len--;
-
- /*
- * 4. Check that optional data actually exists if menuver says it does
- */
- err++;
- if ((menuver & (DN_MENUVER_ACC | DN_MENUVER_USR)) && (len < 1))
- goto err_out;
-
- /*
- * 5. Check optional access data format
- */
- err++;
- if (menuver & DN_MENUVER_ACC) {
- if (dn_check_idf(&ptr, &len, 39, 1))
- goto err_out;
- if (dn_check_idf(&ptr, &len, 39, 1))
- goto err_out;
- if (dn_check_idf(&ptr, &len, 39, (menuver & DN_MENUVER_USR) ? 1 : 0))
- goto err_out;
- }
-
- /*
- * 6. Check optional user data format
- */
- err++;
- if (menuver & DN_MENUVER_USR) {
- if (dn_check_idf(&ptr, &len, 16, 0))
- goto err_out;
- }
-
- /*
- * 7. Look up socket based on destination end username
- */
- return dn_sklist_find_listener(&dstaddr);
-err_out:
- dn_log_martian(skb, ci_err_table[err].text);
- *reason = ci_err_table[err].reason;
- return NULL;
-}
-
-
-static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
-{
- if (sk_acceptq_is_full(sk)) {
- kfree_skb(skb);
- return;
- }
-
- sk->sk_ack_backlog++;
- skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_state_change(sk);
-}
-
-static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct dn_scp *scp = DN_SK(sk);
- unsigned char *ptr;
-
- if (skb->len < 4)
- goto out;
-
- ptr = skb->data;
- cb->services = *ptr++;
- cb->info = *ptr++;
- cb->segsize = le16_to_cpu(*(__le16 *)ptr);
-
- if ((scp->state == DN_CI) || (scp->state == DN_CD)) {
- scp->persist = 0;
- scp->addrrem = cb->src_port;
- sk->sk_state = TCP_ESTABLISHED;
- scp->state = DN_RUN;
- scp->services_rem = cb->services;
- scp->info_rem = cb->info;
- scp->segsize_rem = cb->segsize;
-
- if ((scp->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
- scp->max_window = decnet_no_fc_max_cwnd;
-
- if (skb->len > 0) {
- u16 dlen = *skb->data;
- if ((dlen <= 16) && (dlen <= skb->len)) {
- scp->conndata_in.opt_optl = cpu_to_le16(dlen);
- skb_copy_from_linear_data_offset(skb, 1,
- scp->conndata_in.opt_data, dlen);
- }
- }
- dn_nsp_send_link(sk, DN_NOCHANGE, 0);
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_state_change(sk);
- }
-
-out:
- kfree_skb(skb);
-}
-
-static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- if (scp->state == DN_CI) {
- scp->state = DN_CD;
- scp->persist = 0;
- }
-
- kfree_skb(skb);
-}
-
-static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- unsigned short reason;
-
- if (skb->len < 2)
- goto out;
-
- reason = le16_to_cpu(*(__le16 *)skb->data);
- skb_pull(skb, 2);
-
- scp->discdata_in.opt_status = cpu_to_le16(reason);
- scp->discdata_in.opt_optl = 0;
- memset(scp->discdata_in.opt_data, 0, 16);
-
- if (skb->len > 0) {
- u16 dlen = *skb->data;
- if ((dlen <= 16) && (dlen <= skb->len)) {
- scp->discdata_in.opt_optl = cpu_to_le16(dlen);
- skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen);
- }
- }
-
- scp->addrrem = cb->src_port;
- sk->sk_state = TCP_CLOSE;
-
- switch (scp->state) {
- case DN_CI:
- case DN_CD:
- scp->state = DN_RJ;
- sk->sk_err = ECONNREFUSED;
- break;
- case DN_RUN:
- sk->sk_shutdown |= SHUTDOWN_MASK;
- scp->state = DN_DN;
- break;
- case DN_DI:
- scp->state = DN_DIC;
- break;
- }
-
- if (!sock_flag(sk, SOCK_DEAD)) {
- if (sk->sk_socket->state != SS_UNCONNECTED)
- sk->sk_socket->state = SS_DISCONNECTING;
- sk->sk_state_change(sk);
- }
-
- /*
- * It appears that its possible for remote machines to send disc
- * init messages with no port identifier if we are in the CI and
- * possibly also the CD state. Obviously we shouldn't reply with
- * a message if we don't know what the end point is.
- */
- if (scp->addrrem) {
- dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
- }
- scp->persist_fxn = dn_destroy_timer;
- scp->persist = dn_nsp_persist(sk);
-
-out:
- kfree_skb(skb);
-}
-
-/*
- * disc_conf messages are also called no_resources or no_link
- * messages depending upon the "reason" field.
- */
-static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
-{
- struct dn_scp *scp = DN_SK(sk);
- unsigned short reason;
-
- if (skb->len != 2)
- goto out;
-
- reason = le16_to_cpu(*(__le16 *)skb->data);
-
- sk->sk_state = TCP_CLOSE;
-
- switch (scp->state) {
- case DN_CI:
- scp->state = DN_NR;
- break;
- case DN_DR:
- if (reason == NSP_REASON_DC)
- scp->state = DN_DRC;
- if (reason == NSP_REASON_NL)
- scp->state = DN_CN;
- break;
- case DN_DI:
- scp->state = DN_DIC;
- break;
- case DN_RUN:
- sk->sk_shutdown |= SHUTDOWN_MASK;
- /* fall through */
- case DN_CC:
- scp->state = DN_CN;
- }
-
- if (!sock_flag(sk, SOCK_DEAD)) {
- if (sk->sk_socket->state != SS_UNCONNECTED)
- sk->sk_socket->state = SS_DISCONNECTING;
- sk->sk_state_change(sk);
- }
-
- scp->persist_fxn = dn_destroy_timer;
- scp->persist = dn_nsp_persist(sk);
-
-out:
- kfree_skb(skb);
-}
-
-static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
-{
- struct dn_scp *scp = DN_SK(sk);
- unsigned short segnum;
- unsigned char lsflags;
- signed char fcval;
- int wake_up = 0;
- char *ptr = skb->data;
- unsigned char fctype = scp->services_rem & NSP_FC_MASK;
-
- if (skb->len != 4)
- goto out;
-
- segnum = le16_to_cpu(*(__le16 *)ptr);
- ptr += 2;
- lsflags = *(unsigned char *)ptr++;
- fcval = *ptr;
-
- /*
- * Here we ignore erronous packets which should really
- * should cause a connection abort. It is not critical
- * for now though.
- */
- if (lsflags & 0xf8)
- goto out;
-
- if (seq_next(scp->numoth_rcv, segnum)) {
- seq_add(&scp->numoth_rcv, 1);
- switch(lsflags & 0x04) { /* FCVAL INT */
- case 0x00: /* Normal Request */
- switch(lsflags & 0x03) { /* FCVAL MOD */
- case 0x00: /* Request count */
- if (fcval < 0) {
- unsigned char p_fcval = -fcval;
- if ((scp->flowrem_dat > p_fcval) &&
- (fctype == NSP_FC_SCMC)) {
- scp->flowrem_dat -= p_fcval;
- }
- } else if (fcval > 0) {
- scp->flowrem_dat += fcval;
- wake_up = 1;
- }
- break;
- case 0x01: /* Stop outgoing data */
- scp->flowrem_sw = DN_DONTSEND;
- break;
- case 0x02: /* Ok to start again */
- scp->flowrem_sw = DN_SEND;
- dn_nsp_output(sk);
- wake_up = 1;
- }
- break;
- case 0x04: /* Interrupt Request */
- if (fcval > 0) {
- scp->flowrem_oth += fcval;
- wake_up = 1;
- }
- break;
- }
- if (wake_up && !sock_flag(sk, SOCK_DEAD))
- sk->sk_state_change(sk);
- }
-
- dn_nsp_send_oth_ack(sk);
-
-out:
- kfree_skb(skb);
-}
-
-/*
- * Copy of sock_queue_rcv_skb (from sock.h) without
- * bh_lock_sock() (its already held when this is called) which
- * also allows data and other data to be queued to a socket.
- */
-static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
-{
- int err;
-
- /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
- number of warnings when compiling with -W --ANK
- */
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned int)sk->sk_rcvbuf) {
- err = -ENOMEM;
- goto out;
- }
-
- err = sk_filter(sk, skb);
- if (err)
- goto out;
-
- skb_set_owner_r(skb, sk);
- skb_queue_tail(queue, skb);
-
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk);
-out:
- return err;
-}
-
-static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb)
-{
- struct dn_scp *scp = DN_SK(sk);
- unsigned short segnum;
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- int queued = 0;
-
- if (skb->len < 2)
- goto out;
-
- cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data);
- skb_pull(skb, 2);
-
- if (seq_next(scp->numoth_rcv, segnum)) {
-
- if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) {
- seq_add(&scp->numoth_rcv, 1);
- scp->other_report = 0;
- queued = 1;
- }
- }
-
- dn_nsp_send_oth_ack(sk);
-out:
- if (!queued)
- kfree_skb(skb);
-}
-
-static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
-{
- int queued = 0;
- unsigned short segnum;
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct dn_scp *scp = DN_SK(sk);
-
- if (skb->len < 2)
- goto out;
-
- cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data);
- skb_pull(skb, 2);
-
- if (seq_next(scp->numdat_rcv, segnum)) {
- if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) {
- seq_add(&scp->numdat_rcv, 1);
- queued = 1;
- }
-
- if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) {
- scp->flowloc_sw = DN_DONTSEND;
- dn_nsp_send_link(sk, DN_DONTSEND, 0);
- }
- }
-
- dn_nsp_send_data_ack(sk);
-out:
- if (!queued)
- kfree_skb(skb);
-}
-
-/*
- * If one of our conninit messages is returned, this function
- * deals with it. It puts the socket into the NO_COMMUNICATION
- * state.
- */
-static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- if (scp->state == DN_CI) {
- scp->state = DN_NC;
- sk->sk_state = TCP_CLOSE;
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_state_change(sk);
- }
-
- kfree_skb(skb);
-}
-
-static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- int ret = NET_RX_DROP;
-
- /* Must not reply to returned packets */
- if (cb->rt_flags & DN_RT_F_RTS)
- goto out;
-
- if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) {
- switch (cb->nsp_flags & 0x70) {
- case 0x10:
- case 0x60: /* (Retransmitted) Connect Init */
- dn_nsp_return_disc(skb, NSP_DISCINIT, reason);
- ret = NET_RX_SUCCESS;
- break;
- case 0x20: /* Connect Confirm */
- dn_nsp_return_disc(skb, NSP_DISCCONF, reason);
- ret = NET_RX_SUCCESS;
- break;
- }
- }
-
-out:
- kfree_skb(skb);
- return ret;
-}
-
-static int dn_nsp_rx_packet(struct net *net, struct sock *sk2,
- struct sk_buff *skb)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct sock *sk = NULL;
- unsigned char *ptr = (unsigned char *)skb->data;
- unsigned short reason = NSP_REASON_NL;
-
- if (!pskb_may_pull(skb, 2))
- goto free_out;
-
- skb_reset_transport_header(skb);
- cb->nsp_flags = *ptr++;
-
- if (decnet_debug_level & 2)
- printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x\n", (int)cb->nsp_flags);
-
- if (cb->nsp_flags & 0x83)
- goto free_out;
-
- /*
- * Filter out conninits and useless packet types
- */
- if ((cb->nsp_flags & 0x0c) == 0x08) {
- switch (cb->nsp_flags & 0x70) {
- case 0x00: /* NOP */
- case 0x70: /* Reserved */
- case 0x50: /* Reserved, Phase II node init */
- goto free_out;
- case 0x10:
- case 0x60:
- if (unlikely(cb->rt_flags & DN_RT_F_RTS))
- goto free_out;
- sk = dn_find_listener(skb, &reason);
- goto got_it;
- }
- }
-
- if (!pskb_may_pull(skb, 3))
- goto free_out;
-
- /*
- * Grab the destination address.
- */
- cb->dst_port = *(__le16 *)ptr;
- cb->src_port = 0;
- ptr += 2;
-
- /*
- * If not a connack, grab the source address too.
- */
- if (pskb_may_pull(skb, 5)) {
- cb->src_port = *(__le16 *)ptr;
- ptr += 2;
- skb_pull(skb, 5);
- }
-
- /*
- * Returned packets...
- * Swap src & dst and look up in the normal way.
- */
- if (unlikely(cb->rt_flags & DN_RT_F_RTS)) {
- swap(cb->dst_port, cb->src_port);
- swap(cb->dst, cb->src);
- }
-
- /*
- * Find the socket to which this skb is destined.
- */
- sk = dn_find_by_skb(skb);
-got_it:
- if (sk != NULL) {
- struct dn_scp *scp = DN_SK(sk);
-
- /* Reset backoff */
- scp->nsp_rxtshift = 0;
-
- /*
- * We linearize everything except data segments here.
- */
- if (cb->nsp_flags & ~0x60) {
- if (unlikely(skb_linearize(skb)))
- goto free_out;
- }
-
- return sk_receive_skb(sk, skb, 0);
- }
-
- return dn_nsp_no_socket(skb, reason);
-
-free_out:
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-int dn_nsp_rx(struct sk_buff *skb)
-{
- return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN,
- &init_net, NULL, skb, skb->dev, NULL,
- dn_nsp_rx_packet);
-}
-
-/*
- * This is the main receive routine for sockets. It is called
- * from the above when the socket is not busy, and also from
- * sock_release() when there is a backlog queued up.
- */
-int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
-
- if (cb->rt_flags & DN_RT_F_RTS) {
- if (cb->nsp_flags == 0x18 || cb->nsp_flags == 0x68)
- dn_returned_conn_init(sk, skb);
- else
- kfree_skb(skb);
- return NET_RX_SUCCESS;
- }
-
- /*
- * Control packet.
- */
- if ((cb->nsp_flags & 0x0c) == 0x08) {
- switch (cb->nsp_flags & 0x70) {
- case 0x10:
- case 0x60:
- dn_nsp_conn_init(sk, skb);
- break;
- case 0x20:
- dn_nsp_conn_conf(sk, skb);
- break;
- case 0x30:
- dn_nsp_disc_init(sk, skb);
- break;
- case 0x40:
- dn_nsp_disc_conf(sk, skb);
- break;
- }
-
- } else if (cb->nsp_flags == 0x24) {
- /*
- * Special for connacks, 'cos they don't have
- * ack data or ack otherdata info.
- */
- dn_nsp_conn_ack(sk, skb);
- } else {
- int other = 1;
-
- /* both data and ack frames can kick a CC socket into RUN */
- if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) {
- scp->state = DN_RUN;
- sk->sk_state = TCP_ESTABLISHED;
- sk->sk_state_change(sk);
- }
-
- if ((cb->nsp_flags & 0x1c) == 0)
- other = 0;
- if (cb->nsp_flags == 0x04)
- other = 0;
-
- /*
- * Read out ack data here, this applies equally
- * to data, other data, link serivce and both
- * ack data and ack otherdata.
- */
- dn_process_ack(sk, skb, other);
-
- /*
- * If we've some sort of data here then call a
- * suitable routine for dealing with it, otherwise
- * the packet is an ack and can be discarded.
- */
- if ((cb->nsp_flags & 0x0c) == 0) {
-
- if (scp->state != DN_RUN)
- goto free_out;
-
- switch (cb->nsp_flags) {
- case 0x10: /* LS */
- dn_nsp_linkservice(sk, skb);
- break;
- case 0x30: /* OD */
- dn_nsp_otherdata(sk, skb);
- break;
- default:
- dn_nsp_data(sk, skb);
- }
-
- } else { /* Ack, chuck it out here */
-free_out:
- kfree_skb(skb);
- }
- }
-
- return NET_RX_SUCCESS;
-}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
deleted file mode 100644
index 00f2ed721ec1..000000000000
--- a/net/decnet/dn_nsp_out.c
+++ /dev/null
@@ -1,695 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Network Services Protocol (Output)
- *
- * Author: Eduardo Marcelo Serrat <emserrat@geocities.com>
- *
- * Changes:
- *
- * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from
- * original dn_nsp.c.
- * Steve Whitehouse: Updated to work with my new routing architecture.
- * Steve Whitehouse: Added changes from Eduardo Serrat's patches.
- * Steve Whitehouse: Now conninits have the "return" bit set.
- * Steve Whitehouse: Fixes to check alloc'd skbs are non NULL!
- * Moved output state machine into one function
- * Steve Whitehouse: New output state machine
- * Paul Koning: Connect Confirm message fix.
- * Eduardo Serrat: Fix to stop dn_nsp_do_disc() sending malformed packets.
- * Steve Whitehouse: dn_nsp_output() and friends needed a spring clean
- * Steve Whitehouse: Moved dn_nsp_send() in here from route.h
- */
-
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/sockios.h>
-#include <linux/net.h>
-#include <linux/netdevice.h>
-#include <linux/inet.h>
-#include <linux/route.h>
-#include <linux/slab.h>
-#include <net/sock.h>
-#include <linux/fcntl.h>
-#include <linux/mm.h>
-#include <linux/termios.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/if_packet.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/flow.h>
-#include <net/dn.h>
-#include <net/dn_nsp.h>
-#include <net/dn_dev.h>
-#include <net/dn_route.h>
-
-
-static int nsp_backoff[NSP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
-
-static void dn_nsp_send(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
- struct dn_scp *scp = DN_SK(sk);
- struct dst_entry *dst;
- struct flowidn fld;
-
- skb_reset_transport_header(skb);
- scp->stamp = jiffies;
-
- dst = sk_dst_check(sk, 0);
- if (dst) {
-try_again:
- skb_dst_set(skb, dst);
- dst_output(&init_net, skb->sk, skb);
- return;
- }
-
- memset(&fld, 0, sizeof(fld));
- fld.flowidn_oif = sk->sk_bound_dev_if;
- fld.saddr = dn_saddr2dn(&scp->addr);
- fld.daddr = dn_saddr2dn(&scp->peer);
- dn_sk_ports_copy(&fld, scp);
- fld.flowidn_proto = DNPROTO_NSP;
- if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) {
- dst = sk_dst_get(sk);
- sk->sk_route_caps = dst->dev->features;
- goto try_again;
- }
-
- sk->sk_err = EHOSTUNREACH;
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_state_change(sk);
-}
-
-
-/*
- * If sk == NULL, then we assume that we are supposed to be making
- * a routing layer skb. If sk != NULL, then we are supposed to be
- * creating an skb for the NSP layer.
- *
- * The eventual aim is for each socket to have a cached header size
- * for its outgoing packets, and to set hdr from this when sk != NULL.
- */
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri)
-{
- struct sk_buff *skb;
- int hdr = 64;
-
- if ((skb = alloc_skb(size + hdr, pri)) == NULL)
- return NULL;
-
- skb->protocol = htons(ETH_P_DNA_RT);
- skb->pkt_type = PACKET_OUTGOING;
-
- if (sk)
- skb_set_owner_w(skb, sk);
-
- skb_reserve(skb, hdr);
-
- return skb;
-}
-
-/*
- * Calculate persist timer based upon the smoothed round
- * trip time and the variance. Backoff according to the
- * nsp_backoff[] array.
- */
-unsigned long dn_nsp_persist(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
-
- t *= nsp_backoff[scp->nsp_rxtshift];
-
- if (t < HZ) t = HZ;
- if (t > (600*HZ)) t = (600*HZ);
-
- if (scp->nsp_rxtshift < NSP_MAXRXTSHIFT)
- scp->nsp_rxtshift++;
-
- /* printk(KERN_DEBUG "rxtshift %lu, t=%lu\n", scp->nsp_rxtshift, t); */
-
- return t;
-}
-
-/*
- * This is called each time we get an estimate for the rtt
- * on the link.
- */
-static void dn_nsp_rtt(struct sock *sk, long rtt)
-{
- struct dn_scp *scp = DN_SK(sk);
- long srtt = (long)scp->nsp_srtt;
- long rttvar = (long)scp->nsp_rttvar;
- long delta;
-
- /*
- * If the jiffies clock flips over in the middle of timestamp
- * gathering this value might turn out negative, so we make sure
- * that is it always positive here.
- */
- if (rtt < 0)
- rtt = -rtt;
- /*
- * Add new rtt to smoothed average
- */
- delta = ((rtt << 3) - srtt);
- srtt += (delta >> 3);
- if (srtt >= 1)
- scp->nsp_srtt = (unsigned long)srtt;
- else
- scp->nsp_srtt = 1;
-
- /*
- * Add new rtt varience to smoothed varience
- */
- delta >>= 1;
- rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2);
- if (rttvar >= 1)
- scp->nsp_rttvar = (unsigned long)rttvar;
- else
- scp->nsp_rttvar = 1;
-
- /* printk(KERN_DEBUG "srtt=%lu rttvar=%lu\n", scp->nsp_srtt, scp->nsp_rttvar); */
-}
-
-/**
- * dn_nsp_clone_and_send - Send a data packet by cloning it
- * @skb: The packet to clone and transmit
- * @gfp: memory allocation flag
- *
- * Clone a queued data or other data packet and transmit it.
- *
- * Returns: The number of times the packet has been sent previously
- */
-static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb,
- gfp_t gfp)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct sk_buff *skb2;
- int ret = 0;
-
- if ((skb2 = skb_clone(skb, gfp)) != NULL) {
- ret = cb->xmit_count;
- cb->xmit_count++;
- cb->stamp = jiffies;
- skb2->sk = skb->sk;
- dn_nsp_send(skb2);
- }
-
- return ret;
-}
-
-/**
- * dn_nsp_output - Try and send something from socket queues
- * @sk: The socket whose queues are to be investigated
- *
- * Try and send the packet on the end of the data and other data queues.
- * Other data gets priority over data, and if we retransmit a packet we
- * reduce the window by dividing it in two.
- *
- */
-void dn_nsp_output(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct sk_buff *skb;
- unsigned int reduce_win = 0;
-
- /*
- * First we check for otherdata/linkservice messages
- */
- if ((skb = skb_peek(&scp->other_xmit_queue)) != NULL)
- reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
-
- /*
- * If we may not send any data, we don't.
- * If we are still trying to get some other data down the
- * channel, we don't try and send any data.
- */
- if (reduce_win || (scp->flowrem_sw != DN_SEND))
- goto recalc_window;
-
- if ((skb = skb_peek(&scp->data_xmit_queue)) != NULL)
- reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
-
- /*
- * If we've sent any frame more than once, we cut the
- * send window size in half. There is always a minimum
- * window size of one available.
- */
-recalc_window:
- if (reduce_win) {
- scp->snd_window >>= 1;
- if (scp->snd_window < NSP_MIN_WINDOW)
- scp->snd_window = NSP_MIN_WINDOW;
- }
-}
-
-int dn_nsp_xmit_timeout(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- dn_nsp_output(sk);
-
- if (!skb_queue_empty(&scp->data_xmit_queue) ||
- !skb_queue_empty(&scp->other_xmit_queue))
- scp->persist = dn_nsp_persist(sk);
-
- return 0;
-}
-
-static inline __le16 *dn_mk_common_header(struct dn_scp *scp, struct sk_buff *skb, unsigned char msgflag, int len)
-{
- unsigned char *ptr = skb_push(skb, len);
-
- BUG_ON(len < 5);
-
- *ptr++ = msgflag;
- *((__le16 *)ptr) = scp->addrrem;
- ptr += 2;
- *((__le16 *)ptr) = scp->addrloc;
- ptr += 2;
- return (__le16 __force *)ptr;
-}
-
-static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other)
-{
- struct dn_scp *scp = DN_SK(sk);
- unsigned short acknum = scp->numdat_rcv & 0x0FFF;
- unsigned short ackcrs = scp->numoth_rcv & 0x0FFF;
- __le16 *ptr;
-
- BUG_ON(hlen < 9);
-
- scp->ackxmt_dat = acknum;
- scp->ackxmt_oth = ackcrs;
- acknum |= 0x8000;
- ackcrs |= 0x8000;
-
- /* If this is an "other data/ack" message, swap acknum and ackcrs */
- if (other)
- swap(acknum, ackcrs);
-
- /* Set "cross subchannel" bit in ackcrs */
- ackcrs |= 0x2000;
-
- ptr = dn_mk_common_header(scp, skb, msgflag, hlen);
-
- *ptr++ = cpu_to_le16(acknum);
- *ptr++ = cpu_to_le16(ackcrs);
-
- return ptr;
-}
-
-static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- __le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth);
-
- if (unlikely(oth)) {
- cb->segnum = scp->numoth;
- seq_add(&scp->numoth, 1);
- } else {
- cb->segnum = scp->numdat;
- seq_add(&scp->numdat, 1);
- }
- *(ptr++) = cpu_to_le16(cb->segnum);
-
- return ptr;
-}
-
-void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb,
- gfp_t gfp, int oth)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
-
- cb->xmit_count = 0;
- dn_nsp_mk_data_header(sk, skb, oth);
-
- /*
- * Slow start: If we have been idle for more than
- * one RTT, then reset window to min size.
- */
- if ((jiffies - scp->stamp) > t)
- scp->snd_window = NSP_MIN_WINDOW;
-
- if (oth)
- skb_queue_tail(&scp->other_xmit_queue, skb);
- else
- skb_queue_tail(&scp->data_xmit_queue, skb);
-
- if (scp->flowrem_sw != DN_SEND)
- return;
-
- dn_nsp_clone_and_send(skb, gfp);
-}
-
-
-int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct dn_scp *scp = DN_SK(sk);
- struct sk_buff *skb2, *n, *ack = NULL;
- int wakeup = 0;
- int try_retrans = 0;
- unsigned long reftime = cb->stamp;
- unsigned long pkttime;
- unsigned short xmit_count;
- unsigned short segnum;
-
- skb_queue_walk_safe(q, skb2, n) {
- struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);
-
- if (dn_before_or_equal(cb2->segnum, acknum))
- ack = skb2;
-
- /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */
-
- if (ack == NULL)
- continue;
-
- /* printk(KERN_DEBUG "check_xmit_queue: %04x, %d\n", acknum, cb2->xmit_count); */
-
- /* Does _last_ packet acked have xmit_count > 1 */
- try_retrans = 0;
- /* Remember to wake up the sending process */
- wakeup = 1;
- /* Keep various statistics */
- pkttime = cb2->stamp;
- xmit_count = cb2->xmit_count;
- segnum = cb2->segnum;
- /* Remove and drop ack'ed packet */
- skb_unlink(ack, q);
- kfree_skb(ack);
- ack = NULL;
-
- /*
- * We don't expect to see acknowledgements for packets we
- * haven't sent yet.
- */
- WARN_ON(xmit_count == 0);
-
- /*
- * If the packet has only been sent once, we can use it
- * to calculate the RTT and also open the window a little
- * further.
- */
- if (xmit_count == 1) {
- if (dn_equal(segnum, acknum))
- dn_nsp_rtt(sk, (long)(pkttime - reftime));
-
- if (scp->snd_window < scp->max_window)
- scp->snd_window++;
- }
-
- /*
- * Packet has been sent more than once. If this is the last
- * packet to be acknowledged then we want to send the next
- * packet in the send queue again (assumes the remote host does
- * go-back-N error control).
- */
- if (xmit_count > 1)
- try_retrans = 1;
- }
-
- if (try_retrans)
- dn_nsp_output(sk);
-
- return wakeup;
-}
-
-void dn_nsp_send_data_ack(struct sock *sk)
-{
- struct sk_buff *skb = NULL;
-
- if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
- return;
-
- skb_reserve(skb, 9);
- dn_mk_ack_header(sk, skb, 0x04, 9, 0);
- dn_nsp_send(skb);
-}
-
-void dn_nsp_send_oth_ack(struct sock *sk)
-{
- struct sk_buff *skb = NULL;
-
- if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
- return;
-
- skb_reserve(skb, 9);
- dn_mk_ack_header(sk, skb, 0x14, 9, 1);
- dn_nsp_send(skb);
-}
-
-
-void dn_send_conn_ack (struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct sk_buff *skb = NULL;
- struct nsp_conn_ack_msg *msg;
-
- if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
- return;
-
- msg = skb_put(skb, 3);
- msg->msgflg = 0x24;
- msg->dstaddr = scp->addrrem;
-
- dn_nsp_send(skb);
-}
-
-static int dn_nsp_retrans_conn_conf(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- if (scp->state == DN_CC)
- dn_send_conn_conf(sk, GFP_ATOMIC);
-
- return 0;
-}
-
-void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct sk_buff *skb = NULL;
- struct nsp_conn_init_msg *msg;
- __u8 len = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
-
- if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
- return;
-
- msg = skb_put(skb, sizeof(*msg));
- msg->msgflg = 0x28;
- msg->dstaddr = scp->addrrem;
- msg->srcaddr = scp->addrloc;
- msg->services = scp->services_loc;
- msg->info = scp->info_loc;
- msg->segsize = cpu_to_le16(scp->segsize_loc);
-
- skb_put_u8(skb, len);
-
- if (len > 0)
- skb_put_data(skb, scp->conndata_out.opt_data, len);
-
-
- dn_nsp_send(skb);
-
- scp->persist = dn_nsp_persist(sk);
- scp->persist_fxn = dn_nsp_retrans_conn_conf;
-}
-
-
-static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
- unsigned short reason, gfp_t gfp,
- struct dst_entry *dst,
- int ddl, unsigned char *dd, __le16 rem, __le16 loc)
-{
- struct sk_buff *skb = NULL;
- int size = 7 + ddl + ((msgflg == NSP_DISCINIT) ? 1 : 0);
- unsigned char *msg;
-
- if ((dst == NULL) || (rem == 0)) {
- net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n",
- le16_to_cpu(rem), dst);
- return;
- }
-
- if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL)
- return;
-
- msg = skb_put(skb, size);
- *msg++ = msgflg;
- *(__le16 *)msg = rem;
- msg += 2;
- *(__le16 *)msg = loc;
- msg += 2;
- *(__le16 *)msg = cpu_to_le16(reason);
- msg += 2;
- if (msgflg == NSP_DISCINIT)
- *msg++ = ddl;
-
- if (ddl) {
- memcpy(msg, dd, ddl);
- }
-
- /*
- * This doesn't go via the dn_nsp_send() function since we need
- * to be able to send disc packets out which have no socket
- * associations.
- */
- skb_dst_set(skb, dst_clone(dst));
- dst_output(&init_net, skb->sk, skb);
-}
-
-
-void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
- unsigned short reason, gfp_t gfp)
-{
- struct dn_scp *scp = DN_SK(sk);
- int ddl = 0;
-
- if (msgflg == NSP_DISCINIT)
- ddl = le16_to_cpu(scp->discdata_out.opt_optl);
-
- if (reason == 0)
- reason = le16_to_cpu(scp->discdata_out.opt_status);
-
- dn_nsp_do_disc(sk, msgflg, reason, gfp, __sk_dst_get(sk), ddl,
- scp->discdata_out.opt_data, scp->addrrem, scp->addrloc);
-}
-
-
-void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
- unsigned short reason)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- int ddl = 0;
- gfp_t gfp = GFP_ATOMIC;
-
- dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb_dst(skb), ddl,
- NULL, cb->src_port, cb->dst_port);
-}
-
-
-void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct sk_buff *skb;
- unsigned char *ptr;
- gfp_t gfp = GFP_ATOMIC;
-
- if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
- return;
-
- skb_reserve(skb, DN_MAX_NSP_DATA_HEADER);
- ptr = skb_put(skb, 2);
- DN_SKB_CB(skb)->nsp_flags = 0x10;
- *ptr++ = lsflags;
- *ptr = fcval;
-
- dn_nsp_queue_xmit(sk, skb, gfp, 1);
-
- scp->persist = dn_nsp_persist(sk);
- scp->persist_fxn = dn_nsp_xmit_timeout;
-}
-
-static int dn_nsp_retrans_conninit(struct sock *sk)
-{
- struct dn_scp *scp = DN_SK(sk);
-
- if (scp->state == DN_CI)
- dn_nsp_send_conninit(sk, NSP_RCI);
-
- return 0;
-}
-
-void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
-{
- struct dn_scp *scp = DN_SK(sk);
- struct nsp_conn_init_msg *msg;
- unsigned char aux;
- unsigned char menuver;
- struct dn_skb_cb *cb;
- unsigned char type = 1;
- gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
- struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
-
- if (!skb)
- return;
-
- cb = DN_SKB_CB(skb);
- msg = skb_put(skb, sizeof(*msg));
-
- msg->msgflg = msgflg;
- msg->dstaddr = 0x0000; /* Remote Node will assign it*/
-
- msg->srcaddr = scp->addrloc;
- msg->services = scp->services_loc; /* Requested flow control */
- msg->info = scp->info_loc; /* Version Number */
- msg->segsize = cpu_to_le16(scp->segsize_loc); /* Max segment size */
-
- if (scp->peer.sdn_objnum)
- type = 0;
-
- skb_put(skb, dn_sockaddr2username(&scp->peer,
- skb_tail_pointer(skb), type));
- skb_put(skb, dn_sockaddr2username(&scp->addr,
- skb_tail_pointer(skb), 2));
-
- menuver = DN_MENUVER_ACC | DN_MENUVER_USR;
- if (scp->peer.sdn_flags & SDF_PROXY)
- menuver |= DN_MENUVER_PRX;
- if (scp->peer.sdn_flags & SDF_UICPROXY)
- menuver |= DN_MENUVER_UIC;
-
- skb_put_u8(skb, menuver); /* Menu Version */
-
- aux = scp->accessdata.acc_userl;
- skb_put_u8(skb, aux);
- if (aux > 0)
- skb_put_data(skb, scp->accessdata.acc_user, aux);
-
- aux = scp->accessdata.acc_passl;
- skb_put_u8(skb, aux);
- if (aux > 0)
- skb_put_data(skb, scp->accessdata.acc_pass, aux);
-
- aux = scp->accessdata.acc_accl;
- skb_put_u8(skb, aux);
- if (aux > 0)
- skb_put_data(skb, scp->accessdata.acc_acc, aux);
-
- aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
- skb_put_u8(skb, aux);
- if (aux > 0)
- skb_put_data(skb, scp->conndata_out.opt_data, aux);
-
- scp->persist = dn_nsp_persist(sk);
- scp->persist_fxn = dn_nsp_retrans_conninit;
-
- cb->rt_flags = DN_RT_F_RQR;
-
- dn_nsp_send(skb);
-}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
deleted file mode 100644
index 08c3dc45f1a4..000000000000
--- a/net/decnet/dn_route.c
+++ /dev/null
@@ -1,1921 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Routing Functions (Endnode and Router)
- *
- * Authors: Steve Whitehouse <SteveW@ACM.org>
- * Eduardo Marcelo Serrat <emserrat@geocities.com>
- *
- * Changes:
- * Steve Whitehouse : Fixes to allow "intra-ethernet" and
- * "return-to-sender" bits on outgoing
- * packets.
- * Steve Whitehouse : Timeouts for cached routes.
- * Steve Whitehouse : Use dst cache for input routes too.
- * Steve Whitehouse : Fixed error values in dn_send_skb.
- * Steve Whitehouse : Rework routing functions to better fit
- * DECnet routing design
- * Alexey Kuznetsov : New SMP locking
- * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
- * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
- * Fixed possible skb leak in rtnetlink funcs.
- * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
- * Alexey Kuznetsov's finer grained locking
- * from ipv4/route.c.
- * Steve Whitehouse : Routing is now starting to look like a
- * sensible set of code now, mainly due to
- * my copying the IPv4 routing code. The
- * hooks here are modified and will continue
- * to evolve for a while.
- * Steve Whitehouse : Real SMP at last :-) Also new netfilter
- * stuff. Look out raw sockets your days
- * are numbered!
- * Steve Whitehouse : Added return-to-sender functions. Added
- * backlog congestion level return codes.
- * Steve Whitehouse : Fixed bug where routes were set up with
- * no ref count on net devices.
- * Steve Whitehouse : RCU for the route cache
- * Steve Whitehouse : Preparations for the flow cache
- * Steve Whitehouse : Prepare for nonlinear skbs
- */
-
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/kernel.h>
-#include <linux/sockios.h>
-#include <linux/net.h>
-#include <linux/netdevice.h>
-#include <linux/inet.h>
-#include <linux/route.h>
-#include <linux/in_route.h>
-#include <linux/slab.h>
-#include <net/sock.h>
-#include <linux/mm.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/rtnetlink.h>
-#include <linux/string.h>
-#include <linux/netfilter_decnet.h>
-#include <linux/rcupdate.h>
-#include <linux/times.h>
-#include <linux/export.h>
-#include <asm/errno.h>
-#include <net/net_namespace.h>
-#include <net/netlink.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/flow.h>
-#include <net/fib_rules.h>
-#include <net/dn.h>
-#include <net/dn_dev.h>
-#include <net/dn_nsp.h>
-#include <net/dn_route.h>
-#include <net/dn_neigh.h>
-#include <net/dn_fib.h>
-
-struct dn_rt_hash_bucket
-{
- struct dn_route __rcu *chain;
- spinlock_t lock;
-};
-
-extern struct neigh_table dn_neigh_table;
-
-
-static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
-
-static const int dn_rt_min_delay = 2 * HZ;
-static const int dn_rt_max_delay = 10 * HZ;
-static const int dn_rt_mtu_expires = 10 * 60 * HZ;
-
-static unsigned long dn_rt_deadline;
-
-static int dn_dst_gc(struct dst_ops *ops);
-static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
-static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
-static unsigned int dn_dst_mtu(const struct dst_entry *dst);
-static void dn_dst_destroy(struct dst_entry *);
-static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
-static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
-static void dn_dst_link_failure(struct sk_buff *);
-static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb , u32 mtu,
- bool confirm_neigh);
-static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb);
-static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
- struct sk_buff *skb,
- const void *daddr);
-static int dn_route_input(struct sk_buff *);
-static void dn_run_flush(struct timer_list *unused);
-
-static struct dn_rt_hash_bucket *dn_rt_hash_table;
-static unsigned int dn_rt_hash_mask;
-
-static struct timer_list dn_route_timer;
-static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush);
-int decnet_dst_gc_interval = 2;
-
-static struct dst_ops dn_dst_ops = {
- .family = PF_DECnet,
- .gc_thresh = 128,
- .gc = dn_dst_gc,
- .check = dn_dst_check,
- .default_advmss = dn_dst_default_advmss,
- .mtu = dn_dst_mtu,
- .cow_metrics = dst_cow_metrics_generic,
- .destroy = dn_dst_destroy,
- .ifdown = dn_dst_ifdown,
- .negative_advice = dn_dst_negative_advice,
- .link_failure = dn_dst_link_failure,
- .update_pmtu = dn_dst_update_pmtu,
- .redirect = dn_dst_redirect,
- .neigh_lookup = dn_dst_neigh_lookup,
-};
-
-static void dn_dst_destroy(struct dst_entry *dst)
-{
- struct dn_route *rt = (struct dn_route *) dst;
-
- if (rt->n)
- neigh_release(rt->n);
- dst_destroy_metrics_generic(dst);
-}
-
-static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how)
-{
- if (how) {
- struct dn_route *rt = (struct dn_route *) dst;
- struct neighbour *n = rt->n;
-
- if (n && n->dev == dev) {
- n->dev = dev_net(dev)->loopback_dev;
- dev_hold(n->dev);
- dev_put(dev);
- }
- }
-}
-
-static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
-{
- __u16 tmp = (__u16 __force)(src ^ dst);
- tmp ^= (tmp >> 3);
- tmp ^= (tmp >> 5);
- tmp ^= (tmp >> 10);
- return dn_rt_hash_mask & (unsigned int)tmp;
-}
-
-static void dn_dst_check_expire(struct timer_list *unused)
-{
- int i;
- struct dn_route *rt;
- struct dn_route __rcu **rtp;
- unsigned long now = jiffies;
- unsigned long expire = 120 * HZ;
-
- for (i = 0; i <= dn_rt_hash_mask; i++) {
- rtp = &dn_rt_hash_table[i].chain;
-
- spin_lock(&dn_rt_hash_table[i].lock);
- while ((rt = rcu_dereference_protected(*rtp,
- lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
- if (atomic_read(&rt->dst.__refcnt) > 1 ||
- (now - rt->dst.lastuse) < expire) {
- rtp = &rt->dn_next;
- continue;
- }
- *rtp = rt->dn_next;
- rt->dn_next = NULL;
- dst_dev_put(&rt->dst);
- dst_release(&rt->dst);
- }
- spin_unlock(&dn_rt_hash_table[i].lock);
-
- if ((jiffies - now) > 0)
- break;
- }
-
- mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
-}
-
-static int dn_dst_gc(struct dst_ops *ops)
-{
- struct dn_route *rt;
- struct dn_route __rcu **rtp;
- int i;
- unsigned long now = jiffies;
- unsigned long expire = 10 * HZ;
-
- for (i = 0; i <= dn_rt_hash_mask; i++) {
-
- spin_lock_bh(&dn_rt_hash_table[i].lock);
- rtp = &dn_rt_hash_table[i].chain;
-
- while ((rt = rcu_dereference_protected(*rtp,
- lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
- if (atomic_read(&rt->dst.__refcnt) > 1 ||
- (now - rt->dst.lastuse) < expire) {
- rtp = &rt->dn_next;
- continue;
- }
- *rtp = rt->dn_next;
- rt->dn_next = NULL;
- dst_dev_put(&rt->dst);
- dst_release(&rt->dst);
- break;
- }
- spin_unlock_bh(&dn_rt_hash_table[i].lock);
- }
-
- return 0;
-}
-
-/*
- * The decnet standards don't impose a particular minimum mtu, what they
- * do insist on is that the routing layer accepts a datagram of at least
- * 230 bytes long. Here we have to subtract the routing header length from
- * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
- * assume the worst and use a long header size.
- *
- * We update both the mtu and the advertised mss (i.e. the segment size we
- * advertise to the other end).
- */
-static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu,
- bool confirm_neigh)
-{
- struct dn_route *rt = (struct dn_route *) dst;
- struct neighbour *n = rt->n;
- u32 min_mtu = 230;
- struct dn_dev *dn;
-
- dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL;
-
- if (dn && dn->use_long == 0)
- min_mtu -= 6;
- else
- min_mtu -= 21;
-
- if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
- if (!(dst_metric_locked(dst, RTAX_MTU))) {
- dst_metric_set(dst, RTAX_MTU, mtu);
- dst_set_expires(dst, dn_rt_mtu_expires);
- }
- if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
- u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
- u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS);
- if (!existing_mss || existing_mss > mss)
- dst_metric_set(dst, RTAX_ADVMSS, mss);
- }
- }
-}
-
-static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb)
-{
-}
-
-/*
- * When a route has been marked obsolete. (e.g. routing cache flush)
- */
-static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
-{
- return NULL;
-}
-
-static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
-{
- dst_release(dst);
- return NULL;
-}
-
-static void dn_dst_link_failure(struct sk_buff *skb)
-{
-}
-
-static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
-{
- return ((fl1->daddr ^ fl2->daddr) |
- (fl1->saddr ^ fl2->saddr) |
- (fl1->flowidn_mark ^ fl2->flowidn_mark) |
- (fl1->flowidn_scope ^ fl2->flowidn_scope) |
- (fl1->flowidn_oif ^ fl2->flowidn_oif) |
- (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
-}
-
-static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
-{
- struct dn_route *rth;
- struct dn_route __rcu **rthp;
- unsigned long now = jiffies;
-
- rthp = &dn_rt_hash_table[hash].chain;
-
- spin_lock_bh(&dn_rt_hash_table[hash].lock);
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
- if (compare_keys(&rth->fld, &rt->fld)) {
- /* Put it first */
- *rthp = rth->dn_next;
- rcu_assign_pointer(rth->dn_next,
- dn_rt_hash_table[hash].chain);
- rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
-
- dst_hold_and_use(&rth->dst, now);
- spin_unlock_bh(&dn_rt_hash_table[hash].lock);
-
- dst_release_immediate(&rt->dst);
- *rp = rth;
- return 0;
- }
- rthp = &rth->dn_next;
- }
-
- rcu_assign_pointer(rt->dn_next, dn_rt_hash_table[hash].chain);
- rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
-
- dst_hold_and_use(&rt->dst, now);
- spin_unlock_bh(&dn_rt_hash_table[hash].lock);
- *rp = rt;
- return 0;
-}
-
-static void dn_run_flush(struct timer_list *unused)
-{
- int i;
- struct dn_route *rt, *next;
-
- for (i = 0; i < dn_rt_hash_mask; i++) {
- spin_lock_bh(&dn_rt_hash_table[i].lock);
-
- if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
- goto nothing_to_declare;
-
- for(; rt; rt = next) {
- next = rcu_dereference_raw(rt->dn_next);
- RCU_INIT_POINTER(rt->dn_next, NULL);
- dst_dev_put(&rt->dst);
- dst_release(&rt->dst);
- }
-
-nothing_to_declare:
- spin_unlock_bh(&dn_rt_hash_table[i].lock);
- }
-}
-
-static DEFINE_SPINLOCK(dn_rt_flush_lock);
-
-void dn_rt_cache_flush(int delay)
-{
- unsigned long now = jiffies;
- int user_mode = !in_interrupt();
-
- if (delay < 0)
- delay = dn_rt_min_delay;
-
- spin_lock_bh(&dn_rt_flush_lock);
-
- if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
- long tmo = (long)(dn_rt_deadline - now);
-
- if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
- tmo = 0;
-
- if (delay > tmo)
- delay = tmo;
- }
-
- if (delay <= 0) {
- spin_unlock_bh(&dn_rt_flush_lock);
- dn_run_flush(NULL);
- return;
- }
-
- if (dn_rt_deadline == 0)
- dn_rt_deadline = now + dn_rt_max_delay;
-
- dn_rt_flush_timer.expires = now + delay;
- add_timer(&dn_rt_flush_timer);
- spin_unlock_bh(&dn_rt_flush_lock);
-}
-
-/**
- * dn_return_short - Return a short packet to its sender
- * @skb: The packet to return
- *
- */
-static int dn_return_short(struct sk_buff *skb)
-{
- struct dn_skb_cb *cb;
- unsigned char *ptr;
- __le16 *src;
- __le16 *dst;
-
- /* Add back headers */
- skb_push(skb, skb->data - skb_network_header(skb));
-
- if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
- return NET_RX_DROP;
-
- cb = DN_SKB_CB(skb);
- /* Skip packet length and point to flags */
- ptr = skb->data + 2;
- *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
-
- dst = (__le16 *)ptr;
- ptr += 2;
- src = (__le16 *)ptr;
- ptr += 2;
- *ptr = 0; /* Zero hop count */
-
- swap(*src, *dst);
-
- skb->pkt_type = PACKET_OUTGOING;
- dn_rt_finish_output(skb, NULL, NULL);
- return NET_RX_SUCCESS;
-}
-
-/**
- * dn_return_long - Return a long packet to its sender
- * @skb: The long format packet to return
- *
- */
-static int dn_return_long(struct sk_buff *skb)
-{
- struct dn_skb_cb *cb;
- unsigned char *ptr;
- unsigned char *src_addr, *dst_addr;
- unsigned char tmp[ETH_ALEN];
-
- /* Add back all headers */
- skb_push(skb, skb->data - skb_network_header(skb));
-
- if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
- return NET_RX_DROP;
-
- cb = DN_SKB_CB(skb);
- /* Ignore packet length and point to flags */
- ptr = skb->data + 2;
-
- /* Skip padding */
- if (*ptr & DN_RT_F_PF) {
- char padlen = (*ptr & ~DN_RT_F_PF);
- ptr += padlen;
- }
-
- *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
- ptr += 2;
- dst_addr = ptr;
- ptr += 8;
- src_addr = ptr;
- ptr += 6;
- *ptr = 0; /* Zero hop count */
-
- /* Swap source and destination */
- memcpy(tmp, src_addr, ETH_ALEN);
- memcpy(src_addr, dst_addr, ETH_ALEN);
- memcpy(dst_addr, tmp, ETH_ALEN);
-
- skb->pkt_type = PACKET_OUTGOING;
- dn_rt_finish_output(skb, dst_addr, src_addr);
- return NET_RX_SUCCESS;
-}
-
-/**
- * dn_route_rx_packet - Try and find a route for an incoming packet
- * @skb: The packet to find a route for
- *
- * Returns: result of input function if route is found, error code otherwise
- */
-static int dn_route_rx_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- struct dn_skb_cb *cb;
- int err;
-
- if ((err = dn_route_input(skb)) == 0)
- return dst_input(skb);
-
- cb = DN_SKB_CB(skb);
- if (decnet_debug_level & 4) {
- char *devname = skb->dev ? skb->dev->name : "???";
-
- printk(KERN_DEBUG
- "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
- (int)cb->rt_flags, devname, skb->len,
- le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
- err, skb->pkt_type);
- }
-
- if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
- switch (cb->rt_flags & DN_RT_PKT_MSK) {
- case DN_RT_PKT_SHORT:
- return dn_return_short(skb);
- case DN_RT_PKT_LONG:
- return dn_return_long(skb);
- }
- }
-
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-static int dn_route_rx_long(struct sk_buff *skb)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- unsigned char *ptr = skb->data;
-
- if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
- goto drop_it;
-
- skb_pull(skb, 20);
- skb_reset_transport_header(skb);
-
- /* Destination info */
- ptr += 2;
- cb->dst = dn_eth2dn(ptr);
- if (memcmp(ptr, dn_hiord_addr, 4) != 0)
- goto drop_it;
- ptr += 6;
-
-
- /* Source info */
- ptr += 2;
- cb->src = dn_eth2dn(ptr);
- if (memcmp(ptr, dn_hiord_addr, 4) != 0)
- goto drop_it;
- ptr += 6;
- /* Other junk */
- ptr++;
- cb->hops = *ptr++; /* Visit Count */
-
- return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING,
- &init_net, NULL, skb, skb->dev, NULL,
- dn_route_rx_packet);
-
-drop_it:
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-
-
-static int dn_route_rx_short(struct sk_buff *skb)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- unsigned char *ptr = skb->data;
-
- if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
- goto drop_it;
-
- skb_pull(skb, 5);
- skb_reset_transport_header(skb);
-
- cb->dst = *(__le16 *)ptr;
- ptr += 2;
- cb->src = *(__le16 *)ptr;
- ptr += 2;
- cb->hops = *ptr & 0x3f;
-
- return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING,
- &init_net, NULL, skb, skb->dev, NULL,
- dn_route_rx_packet);
-
-drop_it:
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-static int dn_route_discard(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- /*
- * I know we drop the packet here, but thats considered success in
- * this case
- */
- kfree_skb(skb);
- return NET_RX_SUCCESS;
-}
-
-static int dn_route_ptp_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- dn_dev_hello(skb);
- dn_neigh_pointopoint_hello(skb);
- return NET_RX_SUCCESS;
-}
-
-int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
-{
- struct dn_skb_cb *cb;
- unsigned char flags = 0;
- __u16 len = le16_to_cpu(*(__le16 *)skb->data);
- struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
- unsigned char padlen = 0;
-
- if (!net_eq(dev_net(dev), &init_net))
- goto dump_it;
-
- if (dn == NULL)
- goto dump_it;
-
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
- goto out;
-
- if (!pskb_may_pull(skb, 3))
- goto dump_it;
-
- skb_pull(skb, 2);
-
- if (len > skb->len)
- goto dump_it;
-
- skb_trim(skb, len);
-
- flags = *skb->data;
-
- cb = DN_SKB_CB(skb);
- cb->stamp = jiffies;
- cb->iif = dev->ifindex;
-
- /*
- * If we have padding, remove it.
- */
- if (flags & DN_RT_F_PF) {
- padlen = flags & ~DN_RT_F_PF;
- if (!pskb_may_pull(skb, padlen + 1))
- goto dump_it;
- skb_pull(skb, padlen);
- flags = *skb->data;
- }
-
- skb_reset_network_header(skb);
-
- /*
- * Weed out future version DECnet
- */
- if (flags & DN_RT_F_VER)
- goto dump_it;
-
- cb->rt_flags = flags;
-
- if (decnet_debug_level & 1)
- printk(KERN_DEBUG
- "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
- (int)flags, (dev) ? dev->name : "???", len, skb->len,
- padlen);
-
- if (flags & DN_RT_PKT_CNTL) {
- if (unlikely(skb_linearize(skb)))
- goto dump_it;
-
- switch (flags & DN_RT_CNTL_MSK) {
- case DN_RT_PKT_INIT:
- dn_dev_init_pkt(skb);
- break;
- case DN_RT_PKT_VERI:
- dn_dev_veri_pkt(skb);
- break;
- }
-
- if (dn->parms.state != DN_DEV_S_RU)
- goto dump_it;
-
- switch (flags & DN_RT_CNTL_MSK) {
- case DN_RT_PKT_HELO:
- return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- &init_net, NULL, skb, skb->dev, NULL,
- dn_route_ptp_hello);
-
- case DN_RT_PKT_L1RT:
- case DN_RT_PKT_L2RT:
- return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
- &init_net, NULL, skb, skb->dev, NULL,
- dn_route_discard);
- case DN_RT_PKT_ERTH:
- return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- &init_net, NULL, skb, skb->dev, NULL,
- dn_neigh_router_hello);
-
- case DN_RT_PKT_EEDH:
- return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- &init_net, NULL, skb, skb->dev, NULL,
- dn_neigh_endnode_hello);
- }
- } else {
- if (dn->parms.state != DN_DEV_S_RU)
- goto dump_it;
-
- skb_pull(skb, 1); /* Pull flags */
-
- switch (flags & DN_RT_PKT_MSK) {
- case DN_RT_PKT_LONG:
- return dn_route_rx_long(skb);
- case DN_RT_PKT_SHORT:
- return dn_route_rx_short(skb);
- }
- }
-
-dump_it:
- kfree_skb(skb);
-out:
- return NET_RX_DROP;
-}
-
-static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
- struct dn_route *rt = (struct dn_route *)dst;
- struct net_device *dev = dst->dev;
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
-
- int err = -EINVAL;
-
- if (rt->n == NULL)
- goto error;
-
- skb->dev = dev;
-
- cb->src = rt->rt_saddr;
- cb->dst = rt->rt_daddr;
-
- /*
- * Always set the Intra-Ethernet bit on all outgoing packets
- * originated on this node. Only valid flag from upper layers
- * is return-to-sender-requested. Set hop count to 0 too.
- */
- cb->rt_flags &= ~DN_RT_F_RQR;
- cb->rt_flags |= DN_RT_F_IE;
- cb->hops = 0;
-
- return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT,
- &init_net, sk, skb, NULL, dev,
- dn_to_neigh_output);
-
-error:
- net_dbg_ratelimited("dn_output: This should not happen\n");
-
- kfree_skb(skb);
-
- return err;
-}
-
-static int dn_forward(struct sk_buff *skb)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct dst_entry *dst = skb_dst(skb);
- struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
- struct dn_route *rt;
- int header_len;
- struct net_device *dev = skb->dev;
-
- if (skb->pkt_type != PACKET_HOST)
- goto drop;
-
- /* Ensure that we have enough space for headers */
- rt = (struct dn_route *)skb_dst(skb);
- header_len = dn_db->use_long ? 21 : 6;
- if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
- goto drop;
-
- /*
- * Hop count exceeded.
- */
- if (++cb->hops > 30)
- goto drop;
-
- skb->dev = rt->dst.dev;
-
- /*
- * If packet goes out same interface it came in on, then set
- * the Intra-Ethernet bit. This has no effect for short
- * packets, so we don't need to test for them here.
- */
- cb->rt_flags &= ~DN_RT_F_IE;
- if (rt->rt_flags & RTCF_DOREDIRECT)
- cb->rt_flags |= DN_RT_F_IE;
-
- return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD,
- &init_net, NULL, skb, dev, skb->dev,
- dn_to_neigh_output);
-
-drop:
- kfree_skb(skb);
- return NET_RX_DROP;
-}
-
-/*
- * Used to catch bugs. This should never normally get
- * called.
- */
-static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
-
- net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
- le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
-
- kfree_skb(skb);
-
- return NET_RX_DROP;
-}
-
-static int dn_rt_bug(struct sk_buff *skb)
-{
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
-
- net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
- le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
-
- kfree_skb(skb);
-
- return NET_RX_DROP;
-}
-
-static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
-{
- return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
-}
-
-static unsigned int dn_dst_mtu(const struct dst_entry *dst)
-{
- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
-
- return mtu ? : dst->dev->mtu;
-}
-
-static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
- struct sk_buff *skb,
- const void *daddr)
-{
- return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
-}
-
-static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
-{
- struct dn_fib_info *fi = res->fi;
- struct net_device *dev = rt->dst.dev;
- unsigned int mss_metric;
- struct neighbour *n;
-
- if (fi) {
- if (DN_FIB_RES_GW(*res) &&
- DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
- rt->rt_gateway = DN_FIB_RES_GW(*res);
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
- }
- rt->rt_type = res->type;
-
- if (dev != NULL && rt->n == NULL) {
- n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
- if (IS_ERR(n))
- return PTR_ERR(n);
- rt->n = n;
- }
-
- if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
- dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
- mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
- if (mss_metric) {
- unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
- if (mss_metric > mss)
- dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
- }
- return 0;
-}
-
-static inline int dn_match_addr(__le16 addr1, __le16 addr2)
-{
- __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2);
- int match = 16;
- while(tmp) {
- tmp >>= 1;
- match--;
- }
- return match;
-}
-
-static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
-{
- __le16 saddr = 0;
- struct dn_dev *dn_db;
- struct dn_ifaddr *ifa;
- int best_match = 0;
- int ret;
-
- rcu_read_lock();
- dn_db = rcu_dereference(dev->dn_ptr);
- for (ifa = rcu_dereference(dn_db->ifa_list);
- ifa != NULL;
- ifa = rcu_dereference(ifa->ifa_next)) {
- if (ifa->ifa_scope > scope)
- continue;
- if (!daddr) {
- saddr = ifa->ifa_local;
- break;
- }
- ret = dn_match_addr(daddr, ifa->ifa_local);
- if (ret > best_match)
- saddr = ifa->ifa_local;
- if (best_match == 0)
- saddr = ifa->ifa_local;
- }
- rcu_read_unlock();
-
- return saddr;
-}
-
-static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
-{
- return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
-}
-
-static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res)
-{
- __le16 mask = dnet_make_mask(res->prefixlen);
- return (daddr&~mask)|res->fi->fib_nh->nh_gw;
-}
-
-static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard)
-{
- struct flowidn fld = {
- .daddr = oldflp->daddr,
- .saddr = oldflp->saddr,
- .flowidn_scope = RT_SCOPE_UNIVERSE,
- .flowidn_mark = oldflp->flowidn_mark,
- .flowidn_iif = LOOPBACK_IFINDEX,
- .flowidn_oif = oldflp->flowidn_oif,
- };
- struct dn_route *rt = NULL;
- struct net_device *dev_out = NULL, *dev;
- struct neighbour *neigh = NULL;
- unsigned int hash;
- unsigned int flags = 0;
- struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
- int err;
- int free_res = 0;
- __le16 gateway = 0;
-
- if (decnet_debug_level & 16)
- printk(KERN_DEBUG
- "dn_route_output_slow: dst=%04x src=%04x mark=%d"
- " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
- le16_to_cpu(oldflp->saddr),
- oldflp->flowidn_mark, LOOPBACK_IFINDEX,
- oldflp->flowidn_oif);
-
- /* If we have an output interface, verify its a DECnet device */
- if (oldflp->flowidn_oif) {
- dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif);
- err = -ENODEV;
- if (dev_out && dev_out->dn_ptr == NULL) {
- dev_put(dev_out);
- dev_out = NULL;
- }
- if (dev_out == NULL)
- goto out;
- }
-
- /* If we have a source address, verify that its a local address */
- if (oldflp->saddr) {
- err = -EADDRNOTAVAIL;
-
- if (dev_out) {
- if (dn_dev_islocal(dev_out, oldflp->saddr))
- goto source_ok;
- dev_put(dev_out);
- goto out;
- }
- rcu_read_lock();
- for_each_netdev_rcu(&init_net, dev) {
- if (!dev->dn_ptr)
- continue;
- if (!dn_dev_islocal(dev, oldflp->saddr))
- continue;
- if ((dev->flags & IFF_LOOPBACK) &&
- oldflp->daddr &&
- !dn_dev_islocal(dev, oldflp->daddr))
- continue;
-
- dev_out = dev;
- break;
- }
- rcu_read_unlock();
- if (dev_out == NULL)
- goto out;
- dev_hold(dev_out);
-source_ok:
- ;
- }
-
- /* No destination? Assume its local */
- if (!fld.daddr) {
- fld.daddr = fld.saddr;
-
- if (dev_out)
- dev_put(dev_out);
- err = -EINVAL;
- dev_out = init_net.loopback_dev;
- if (!dev_out->dn_ptr)
- goto out;
- err = -EADDRNOTAVAIL;
- dev_hold(dev_out);
- if (!fld.daddr) {
- fld.daddr =
- fld.saddr = dnet_select_source(dev_out, 0,
- RT_SCOPE_HOST);
- if (!fld.daddr)
- goto out;
- }
- fld.flowidn_oif = LOOPBACK_IFINDEX;
- res.type = RTN_LOCAL;
- goto make_route;
- }
-
- if (decnet_debug_level & 16)
- printk(KERN_DEBUG
- "dn_route_output_slow: initial checks complete."
- " dst=%04x src=%04x oif=%d try_hard=%d\n",
- le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
- fld.flowidn_oif, try_hard);
-
- /*
- * N.B. If the kernel is compiled without router support then
- * dn_fib_lookup() will evaluate to non-zero so this if () block
- * will always be executed.
- */
- err = -ESRCH;
- if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) {
- struct dn_dev *dn_db;
- if (err != -ESRCH)
- goto out;
- /*
- * Here the fallback is basically the standard algorithm for
- * routing in endnodes which is described in the DECnet routing
- * docs
- *
- * If we are not trying hard, look in neighbour cache.
- * The result is tested to ensure that if a specific output
- * device/source address was requested, then we honour that
- * here
- */
- if (!try_hard) {
- neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr);
- if (neigh) {
- if ((oldflp->flowidn_oif &&
- (neigh->dev->ifindex != oldflp->flowidn_oif)) ||
- (oldflp->saddr &&
- (!dn_dev_islocal(neigh->dev,
- oldflp->saddr)))) {
- neigh_release(neigh);
- neigh = NULL;
- } else {
- if (dev_out)
- dev_put(dev_out);
- if (dn_dev_islocal(neigh->dev, fld.daddr)) {
- dev_out = init_net.loopback_dev;
- res.type = RTN_LOCAL;
- } else {
- dev_out = neigh->dev;
- }
- dev_hold(dev_out);
- goto select_source;
- }
- }
- }
-
- /* Not there? Perhaps its a local address */
- if (dev_out == NULL)
- dev_out = dn_dev_get_default();
- err = -ENODEV;
- if (dev_out == NULL)
- goto out;
- dn_db = rcu_dereference_raw(dev_out->dn_ptr);
- if (!dn_db)
- goto e_inval;
- /* Possible improvement - check all devices for local addr */
- if (dn_dev_islocal(dev_out, fld.daddr)) {
- dev_put(dev_out);
- dev_out = init_net.loopback_dev;
- dev_hold(dev_out);
- res.type = RTN_LOCAL;
- goto select_source;
- }
- /* Not local either.... try sending it to the default router */
- neigh = neigh_clone(dn_db->router);
- BUG_ON(neigh && neigh->dev != dev_out);
-
- /* Ok then, we assume its directly connected and move on */
-select_source:
- if (neigh)
- gateway = ((struct dn_neigh *)neigh)->addr;
- if (gateway == 0)
- gateway = fld.daddr;
- if (fld.saddr == 0) {
- fld.saddr = dnet_select_source(dev_out, gateway,
- res.type == RTN_LOCAL ?
- RT_SCOPE_HOST :
- RT_SCOPE_LINK);
- if (fld.saddr == 0 && res.type != RTN_LOCAL)
- goto e_addr;
- }
- fld.flowidn_oif = dev_out->ifindex;
- goto make_route;
- }
- free_res = 1;
-
- if (res.type == RTN_NAT)
- goto e_inval;
-
- if (res.type == RTN_LOCAL) {
- if (!fld.saddr)
- fld.saddr = fld.daddr;
- if (dev_out)
- dev_put(dev_out);
- dev_out = init_net.loopback_dev;
- dev_hold(dev_out);
- if (!dev_out->dn_ptr)
- goto e_inval;
- fld.flowidn_oif = dev_out->ifindex;
- if (res.fi)
- dn_fib_info_put(res.fi);
- res.fi = NULL;
- goto make_route;
- }
-
- if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
- dn_fib_select_multipath(&fld, &res);
-
- /*
- * We could add some logic to deal with default routes here and
- * get rid of some of the special casing above.
- */
-
- if (!fld.saddr)
- fld.saddr = DN_FIB_RES_PREFSRC(res);
-
- if (dev_out)
- dev_put(dev_out);
- dev_out = DN_FIB_RES_DEV(res);
- dev_hold(dev_out);
- fld.flowidn_oif = dev_out->ifindex;
- gateway = DN_FIB_RES_GW(res);
-
-make_route:
- if (dev_out->flags & IFF_LOOPBACK)
- flags |= RTCF_LOCAL;
-
- rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
- if (rt == NULL)
- goto e_nobufs;
-
- rt->dn_next = NULL;
- memset(&rt->fld, 0, sizeof(rt->fld));
- rt->fld.saddr = oldflp->saddr;
- rt->fld.daddr = oldflp->daddr;
- rt->fld.flowidn_oif = oldflp->flowidn_oif;
- rt->fld.flowidn_iif = 0;
- rt->fld.flowidn_mark = oldflp->flowidn_mark;
-
- rt->rt_saddr = fld.saddr;
- rt->rt_daddr = fld.daddr;
- rt->rt_gateway = gateway ? gateway : fld.daddr;
- rt->rt_local_src = fld.saddr;
-
- rt->rt_dst_map = fld.daddr;
- rt->rt_src_map = fld.saddr;
-
- rt->n = neigh;
- neigh = NULL;
-
- rt->dst.lastuse = jiffies;
- rt->dst.output = dn_output;
- rt->dst.input = dn_rt_bug;
- rt->rt_flags = flags;
- if (flags & RTCF_LOCAL)
- rt->dst.input = dn_nsp_rx;
-
- err = dn_rt_set_next_hop(rt, &res);
- if (err)
- goto e_neighbour;
-
- hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
- /* dn_insert_route() increments dst->__refcnt */
- dn_insert_route(rt, hash, (struct dn_route **)pprt);
-
-done:
- if (neigh)
- neigh_release(neigh);
- if (free_res)
- dn_fib_res_put(&res);
- if (dev_out)
- dev_put(dev_out);
-out:
- return err;
-
-e_addr:
- err = -EADDRNOTAVAIL;
- goto done;
-e_inval:
- err = -EINVAL;
- goto done;
-e_nobufs:
- err = -ENOBUFS;
- goto done;
-e_neighbour:
- dst_release_immediate(&rt->dst);
- goto e_nobufs;
-}
-
-
-/*
- * N.B. The flags may be moved into the flowi at some future stage.
- */
-static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
-{
- unsigned int hash = dn_hash(flp->saddr, flp->daddr);
- struct dn_route *rt = NULL;
-
- if (!(flags & MSG_TRYHARD)) {
- rcu_read_lock_bh();
- for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
- rt = rcu_dereference_bh(rt->dn_next)) {
- if ((flp->daddr == rt->fld.daddr) &&
- (flp->saddr == rt->fld.saddr) &&
- (flp->flowidn_mark == rt->fld.flowidn_mark) &&
- dn_is_output_route(rt) &&
- (rt->fld.flowidn_oif == flp->flowidn_oif)) {
- dst_hold_and_use(&rt->dst, jiffies);
- rcu_read_unlock_bh();
- *pprt = &rt->dst;
- return 0;
- }
- }
- rcu_read_unlock_bh();
- }
-
- return dn_route_output_slow(pprt, flp, flags);
-}
-
-static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags)
-{
- int err;
-
- err = __dn_route_output_key(pprt, flp, flags);
- if (err == 0 && flp->flowidn_proto) {
- *pprt = xfrm_lookup(&init_net, *pprt,
- flowidn_to_flowi(flp), NULL, 0);
- if (IS_ERR(*pprt)) {
- err = PTR_ERR(*pprt);
- *pprt = NULL;
- }
- }
- return err;
-}
-
-int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, struct sock *sk, int flags)
-{
- int err;
-
- err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
- if (err == 0 && fl->flowidn_proto) {
- *pprt = xfrm_lookup(&init_net, *pprt,
- flowidn_to_flowi(fl), sk, 0);
- if (IS_ERR(*pprt)) {
- err = PTR_ERR(*pprt);
- *pprt = NULL;
- }
- }
- return err;
-}
-
-static int dn_route_input_slow(struct sk_buff *skb)
-{
- struct dn_route *rt = NULL;
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct net_device *in_dev = skb->dev;
- struct net_device *out_dev = NULL;
- struct dn_dev *dn_db;
- struct neighbour *neigh = NULL;
- unsigned int hash;
- int flags = 0;
- __le16 gateway = 0;
- __le16 local_src = 0;
- struct flowidn fld = {
- .daddr = cb->dst,
- .saddr = cb->src,
- .flowidn_scope = RT_SCOPE_UNIVERSE,
- .flowidn_mark = skb->mark,
- .flowidn_iif = skb->dev->ifindex,
- };
- struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
- int err = -EINVAL;
- int free_res = 0;
-
- dev_hold(in_dev);
-
- if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
- goto out;
-
- /* Zero source addresses are not allowed */
- if (fld.saddr == 0)
- goto out;
-
- /*
- * In this case we've just received a packet from a source
- * outside ourselves pretending to come from us. We don't
- * allow it any further to prevent routing loops, spoofing and
- * other nasties. Loopback packets already have the dst attached
- * so this only affects packets which have originated elsewhere.
- */
- err = -ENOTUNIQ;
- if (dn_dev_islocal(in_dev, cb->src))
- goto out;
-
- err = dn_fib_lookup(&fld, &res);
- if (err) {
- if (err != -ESRCH)
- goto out;
- /*
- * Is the destination us ?
- */
- if (!dn_dev_islocal(in_dev, cb->dst))
- goto e_inval;
-
- res.type = RTN_LOCAL;
- } else {
- __le16 src_map = fld.saddr;
- free_res = 1;
-
- out_dev = DN_FIB_RES_DEV(res);
- if (out_dev == NULL) {
- net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
- goto e_inval;
- }
- dev_hold(out_dev);
-
- if (res.r)
- src_map = fld.saddr; /* no NAT support for now */
-
- gateway = DN_FIB_RES_GW(res);
- if (res.type == RTN_NAT) {
- fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res);
- dn_fib_res_put(&res);
- free_res = 0;
- if (dn_fib_lookup(&fld, &res))
- goto e_inval;
- free_res = 1;
- if (res.type != RTN_UNICAST)
- goto e_inval;
- flags |= RTCF_DNAT;
- gateway = fld.daddr;
- }
- fld.saddr = src_map;
- }
-
- switch(res.type) {
- case RTN_UNICAST:
- /*
- * Forwarding check here, we only check for forwarding
- * being turned off, if you want to only forward intra
- * area, its up to you to set the routing tables up
- * correctly.
- */
- if (dn_db->parms.forwarding == 0)
- goto e_inval;
-
- if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
- dn_fib_select_multipath(&fld, &res);
-
- /*
- * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
- * flag as a hint to set the intra-ethernet bit when
- * forwarding. If we've got NAT in operation, we don't do
- * this optimisation.
- */
- if (out_dev == in_dev && !(flags & RTCF_NAT))
- flags |= RTCF_DOREDIRECT;
-
- local_src = DN_FIB_RES_PREFSRC(res);
-
- case RTN_BLACKHOLE:
- case RTN_UNREACHABLE:
- break;
- case RTN_LOCAL:
- flags |= RTCF_LOCAL;
- fld.saddr = cb->dst;
- fld.daddr = cb->src;
-
- /* Routing tables gave us a gateway */
- if (gateway)
- goto make_route;
-
- /* Packet was intra-ethernet, so we know its on-link */
- if (cb->rt_flags & DN_RT_F_IE) {
- gateway = cb->src;
- goto make_route;
- }
-
- /* Use the default router if there is one */
- neigh = neigh_clone(dn_db->router);
- if (neigh) {
- gateway = ((struct dn_neigh *)neigh)->addr;
- goto make_route;
- }
-
- /* Close eyes and pray */
- gateway = cb->src;
- goto make_route;
- default:
- goto e_inval;
- }
-
-make_route:
- rt = dst_alloc(&dn_dst_ops, out_dev, 1, DST_OBSOLETE_NONE, DST_HOST);
- if (rt == NULL)
- goto e_nobufs;
-
- rt->dn_next = NULL;
- memset(&rt->fld, 0, sizeof(rt->fld));
- rt->rt_saddr = fld.saddr;
- rt->rt_daddr = fld.daddr;
- rt->rt_gateway = fld.daddr;
- if (gateway)
- rt->rt_gateway = gateway;
- rt->rt_local_src = local_src ? local_src : rt->rt_saddr;
-
- rt->rt_dst_map = fld.daddr;
- rt->rt_src_map = fld.saddr;
-
- rt->fld.saddr = cb->src;
- rt->fld.daddr = cb->dst;
- rt->fld.flowidn_oif = 0;
- rt->fld.flowidn_iif = in_dev->ifindex;
- rt->fld.flowidn_mark = fld.flowidn_mark;
-
- rt->n = neigh;
- rt->dst.lastuse = jiffies;
- rt->dst.output = dn_rt_bug_out;
- switch (res.type) {
- case RTN_UNICAST:
- rt->dst.input = dn_forward;
- break;
- case RTN_LOCAL:
- rt->dst.output = dn_output;
- rt->dst.input = dn_nsp_rx;
- rt->dst.dev = in_dev;
- flags |= RTCF_LOCAL;
- break;
- default:
- case RTN_UNREACHABLE:
- case RTN_BLACKHOLE:
- rt->dst.input = dst_discard;
- }
- rt->rt_flags = flags;
-
- err = dn_rt_set_next_hop(rt, &res);
- if (err)
- goto e_neighbour;
-
- hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
- /* dn_insert_route() increments dst->__refcnt */
- dn_insert_route(rt, hash, &rt);
- skb_dst_set(skb, &rt->dst);
-
-done:
- if (neigh)
- neigh_release(neigh);
- if (free_res)
- dn_fib_res_put(&res);
- dev_put(in_dev);
- if (out_dev)
- dev_put(out_dev);
-out:
- return err;
-
-e_inval:
- err = -EINVAL;
- goto done;
-
-e_nobufs:
- err = -ENOBUFS;
- goto done;
-
-e_neighbour:
- dst_release_immediate(&rt->dst);
- goto done;
-}
-
-static int dn_route_input(struct sk_buff *skb)
-{
- struct dn_route *rt;
- struct dn_skb_cb *cb = DN_SKB_CB(skb);
- unsigned int hash = dn_hash(cb->src, cb->dst);
-
- if (skb_dst(skb))
- return 0;
-
- rcu_read_lock();
- for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
- rt = rcu_dereference(rt->dn_next)) {
- if ((rt->fld.saddr == cb->src) &&
- (rt->fld.daddr == cb->dst) &&
- (rt->fld.flowidn_oif == 0) &&
- (rt->fld.flowidn_mark == skb->mark) &&
- (rt->fld.flowidn_iif == cb->iif)) {
- dst_hold_and_use(&rt->dst, jiffies);
- rcu_read_unlock();
- skb_dst_set(skb, (struct dst_entry *)rt);
- return 0;
- }
- }
- rcu_read_unlock();
-
- return dn_route_input_slow(skb);
-}
-
-static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
- int event, int nowait, unsigned int flags)
-{
- struct dn_route *rt = (struct dn_route *)skb_dst(skb);
- struct rtmsg *r;
- struct nlmsghdr *nlh;
- long expires;
-
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
- if (!nlh)
- return -EMSGSIZE;
-
- r = nlmsg_data(nlh);
- r->rtm_family = AF_DECnet;
- r->rtm_dst_len = 16;
- r->rtm_src_len = 0;
- r->rtm_tos = 0;
- r->rtm_table = RT_TABLE_MAIN;
- r->rtm_type = rt->rt_type;
- r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
- r->rtm_scope = RT_SCOPE_UNIVERSE;
- r->rtm_protocol = RTPROT_UNSPEC;
-
- if (rt->rt_flags & RTCF_NOTIFY)
- r->rtm_flags |= RTM_F_NOTIFY;
-
- if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
- nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
- goto errout;
-
- if (rt->fld.saddr) {
- r->rtm_src_len = 16;
- if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
- goto errout;
- }
- if (rt->dst.dev &&
- nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
- goto errout;
-
- /*
- * Note to self - change this if input routes reverse direction when
- * they deal only with inputs and not with replies like they do
- * currently.
- */
- if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
- goto errout;
-
- if (rt->rt_daddr != rt->rt_gateway &&
- nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
- goto errout;
-
- if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
- goto errout;
-
- expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
- if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
- rt->dst.error) < 0)
- goto errout;
-
- if (dn_is_input_route(rt) &&
- nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
- goto errout;
-
- nlmsg_end(skb, nlh);
- return 0;
-
-errout:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
-}
-
-const struct nla_policy rtm_dn_policy[RTA_MAX + 1] = {
- [RTA_DST] = { .type = NLA_U16 },
- [RTA_SRC] = { .type = NLA_U16 },
- [RTA_IIF] = { .type = NLA_U32 },
- [RTA_OIF] = { .type = NLA_U32 },
- [RTA_GATEWAY] = { .type = NLA_U16 },
- [RTA_PRIORITY] = { .type = NLA_U32 },
- [RTA_PREFSRC] = { .type = NLA_U16 },
- [RTA_METRICS] = { .type = NLA_NESTED },
- [RTA_MULTIPATH] = { .type = NLA_NESTED },
- [RTA_TABLE] = { .type = NLA_U32 },
- [RTA_MARK] = { .type = NLA_U32 },
-};
-
-/*
- * This is called by both endnodes and routers now.
- */
-static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
-{
- struct net *net = sock_net(in_skb->sk);
- struct rtmsg *rtm = nlmsg_data(nlh);
- struct dn_route *rt = NULL;
- struct dn_skb_cb *cb;
- int err;
- struct sk_buff *skb;
- struct flowidn fld;
- struct nlattr *tb[RTA_MAX+1];
-
- if (!net_eq(net, &init_net))
- return -EINVAL;
-
- err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_dn_policy, extack);
- if (err < 0)
- return err;
-
- memset(&fld, 0, sizeof(fld));
- fld.flowidn_proto = DNPROTO_NSP;
-
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb == NULL)
- return -ENOBUFS;
- skb_reset_mac_header(skb);
- cb = DN_SKB_CB(skb);
-
- if (tb[RTA_SRC])
- fld.saddr = nla_get_le16(tb[RTA_SRC]);
-
- if (tb[RTA_DST])
- fld.daddr = nla_get_le16(tb[RTA_DST]);
-
- if (tb[RTA_IIF])
- fld.flowidn_iif = nla_get_u32(tb[RTA_IIF]);
-
- if (fld.flowidn_iif) {
- struct net_device *dev;
- dev = __dev_get_by_index(&init_net, fld.flowidn_iif);
- if (!dev || !dev->dn_ptr) {
- kfree_skb(skb);
- return -ENODEV;
- }
- skb->protocol = htons(ETH_P_DNA_RT);
- skb->dev = dev;
- cb->src = fld.saddr;
- cb->dst = fld.daddr;
- local_bh_disable();
- err = dn_route_input(skb);
- local_bh_enable();
- memset(cb, 0, sizeof(struct dn_skb_cb));
- rt = (struct dn_route *)skb_dst(skb);
- if (!err && -rt->dst.error)
- err = rt->dst.error;
- } else {
- if (tb[RTA_OIF])
- fld.flowidn_oif = nla_get_u32(tb[RTA_OIF]);
-
- err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0);
- }
-
- skb->dev = NULL;
- if (err)
- goto out_free;
- skb_dst_set(skb, &rt->dst);
- if (rtm->rtm_flags & RTM_F_NOTIFY)
- rt->rt_flags |= RTCF_NOTIFY;
-
- err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
- if (err < 0) {
- err = -EMSGSIZE;
- goto out_free;
- }
-
- return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).portid);
-
-out_free:
- kfree_skb(skb);
- return err;
-}
-
-/*
- * For routers, this is called from dn_fib_dump, but for endnodes its
- * called directly from the rtnetlink dispatch table.
- */
-int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
-{
- struct net *net = sock_net(skb->sk);
- struct dn_route *rt;
- int h, s_h;
- int idx, s_idx;
- struct rtmsg *rtm;
-
- if (!net_eq(net, &init_net))
- return 0;
-
- if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
- return -EINVAL;
-
- rtm = nlmsg_data(cb->nlh);
- if (!(rtm->rtm_flags & RTM_F_CLONED))
- return 0;
-
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
- for(h = 0; h <= dn_rt_hash_mask; h++) {
- if (h < s_h)
- continue;
- if (h > s_h)
- s_idx = 0;
- rcu_read_lock_bh();
- for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
- rt;
- rt = rcu_dereference_bh(rt->dn_next), idx++) {
- if (idx < s_idx)
- continue;
- skb_dst_set(skb, dst_clone(&rt->dst));
- if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE,
- 1, NLM_F_MULTI) < 0) {
- skb_dst_drop(skb);
- rcu_read_unlock_bh();
- goto done;
- }
- skb_dst_drop(skb);
- }
- rcu_read_unlock_bh();
- }
-
-done:
- cb->args[0] = h;
- cb->args[1] = idx;
- return skb->len;
-}
-
-#ifdef CONFIG_PROC_FS
-struct dn_rt_cache_iter_state {
- int bucket;
-};
-
-static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
-{
- struct dn_route *rt = NULL;
- struct dn_rt_cache_iter_state *s = seq->private;
-
- for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
- rcu_read_lock_bh();
- rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
- if (rt)
- break;
- rcu_read_unlock_bh();
- }
- return rt;
-}
-
-static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
-{
- struct dn_rt_cache_iter_state *s = seq->private;
-
- rt = rcu_dereference_bh(rt->dn_next);
- while (!rt) {
- rcu_read_unlock_bh();
- if (--s->bucket < 0)
- break;
- rcu_read_lock_bh();
- rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
- }
- return rt;
-}
-
-static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
-{
- struct dn_route *rt = dn_rt_cache_get_first(seq);
-
- if (rt) {
- while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
- --*pos;
- }
- return *pos ? NULL : rt;
-}
-
-static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- struct dn_route *rt = dn_rt_cache_get_next(seq, v);
- ++*pos;
- return rt;
-}
-
-static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v)
-{
- if (v)
- rcu_read_unlock_bh();
-}
-
-static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
-{
- struct dn_route *rt = v;
- char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
-
- seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
- rt->dst.dev ? rt->dst.dev->name : "*",
- dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
- dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
- atomic_read(&rt->dst.__refcnt),
- rt->dst.__use, 0);
- return 0;
-}
-
-static const struct seq_operations dn_rt_cache_seq_ops = {
- .start = dn_rt_cache_seq_start,
- .next = dn_rt_cache_seq_next,
- .stop = dn_rt_cache_seq_stop,
- .show = dn_rt_cache_seq_show,
-};
-#endif /* CONFIG_PROC_FS */
-
-void __init dn_route_init(void)
-{
- int i, goal, order;
-
- dn_dst_ops.kmem_cachep =
- kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
- dst_entries_init(&dn_dst_ops);
- timer_setup(&dn_route_timer, dn_dst_check_expire, 0);
- dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
- add_timer(&dn_route_timer);
-
- goal = totalram_pages() >> (26 - PAGE_SHIFT);
-
- for(order = 0; (1UL << order) < goal; order++)
- /* NOTHING */;
-
- /*
- * Only want 1024 entries max, since the table is very, very unlikely
- * to be larger than that.
- */
- while(order && ((((1UL << order) * PAGE_SIZE) /
- sizeof(struct dn_rt_hash_bucket)) >= 2048))
- order--;
-
- do {
- dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
- sizeof(struct dn_rt_hash_bucket);
- while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
- dn_rt_hash_mask--;
- dn_rt_hash_table = (struct dn_rt_hash_bucket *)
- __get_free_pages(GFP_ATOMIC, order);
- } while (dn_rt_hash_table == NULL && --order > 0);
-
- if (!dn_rt_hash_table)
- panic("Failed to allocate DECnet route cache hash table\n");
-
- printk(KERN_INFO
- "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
- dn_rt_hash_mask,
- (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
-
- dn_rt_hash_mask--;
- for(i = 0; i <= dn_rt_hash_mask; i++) {
- spin_lock_init(&dn_rt_hash_table[i].lock);
- dn_rt_hash_table[i].chain = NULL;
- }
-
- dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
-
- proc_create_seq_private("decnet_cache", 0444, init_net.proc_net,
- &dn_rt_cache_seq_ops,
- sizeof(struct dn_rt_cache_iter_state), NULL);
-
-#ifdef CONFIG_DECNET_ROUTER
- rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
- dn_cache_getroute, dn_fib_dump, 0);
-#else
- rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
- dn_cache_getroute, dn_cache_dump, 0);
-#endif
-}
-
-void __exit dn_route_cleanup(void)
-{
- del_timer(&dn_route_timer);
- dn_run_flush(NULL);
-
- remove_proc_entry("decnet_cache", init_net.proc_net);
- dst_entries_destroy(&dn_dst_ops);
-}
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
deleted file mode 100644
index 4a4e3c17740c..000000000000
--- a/net/decnet/dn_rules.c
+++ /dev/null
@@ -1,258 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Routing Forwarding Information Base (Rules)
- *
- * Author: Steve Whitehouse <SteveW@ACM.org>
- * Mostly copied from Alexey Kuznetsov's ipv4/fib_rules.c
- *
- *
- * Changes:
- * Steve Whitehouse <steve@chygwyn.com>
- * Updated for Thomas Graf's generic rules
- *
- */
-#include <linux/net.h>
-#include <linux/init.h>
-#include <linux/netlink.h>
-#include <linux/rtnetlink.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/rcupdate.h>
-#include <linux/export.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/flow.h>
-#include <net/fib_rules.h>
-#include <net/dn.h>
-#include <net/dn_fib.h>
-#include <net/dn_neigh.h>
-#include <net/dn_dev.h>
-#include <net/dn_route.h>
-
-static struct fib_rules_ops *dn_fib_rules_ops;
-
-struct dn_fib_rule
-{
- struct fib_rule common;
- unsigned char dst_len;
- unsigned char src_len;
- __le16 src;
- __le16 srcmask;
- __le16 dst;
- __le16 dstmask;
- __le16 srcmap;
- u8 flags;
-};
-
-
-int dn_fib_lookup(struct flowidn *flp, struct dn_fib_res *res)
-{
- struct fib_lookup_arg arg = {
- .result = res,
- };
- int err;
-
- err = fib_rules_lookup(dn_fib_rules_ops,
- flowidn_to_flowi(flp), 0, &arg);
- res->r = arg.rule;
-
- return err;
-}
-
-static int dn_fib_rule_action(struct fib_rule *rule, struct flowi *flp,
- int flags, struct fib_lookup_arg *arg)
-{
- struct flowidn *fld = &flp->u.dn;
- int err = -EAGAIN;
- struct dn_fib_table *tbl;
-
- switch(rule->action) {
- case FR_ACT_TO_TBL:
- break;
-
- case FR_ACT_UNREACHABLE:
- err = -ENETUNREACH;
- goto errout;
-
- case FR_ACT_PROHIBIT:
- err = -EACCES;
- goto errout;
-
- case FR_ACT_BLACKHOLE:
- default:
- err = -EINVAL;
- goto errout;
- }
-
- tbl = dn_fib_get_table(rule->table, 0);
- if (tbl == NULL)
- goto errout;
-
- err = tbl->lookup(tbl, fld, (struct dn_fib_res *)arg->result);
- if (err > 0)
- err = -EAGAIN;
-errout:
- return err;
-}
-
-static const struct nla_policy dn_fib_rule_policy[FRA_MAX+1] = {
- FRA_GENERIC_POLICY,
-};
-
-static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
-{
- struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
- struct flowidn *fld = &fl->u.dn;
- __le16 daddr = fld->daddr;
- __le16 saddr = fld->saddr;
-
- if (((saddr ^ r->src) & r->srcmask) ||
- ((daddr ^ r->dst) & r->dstmask))
- return 0;
-
- return 1;
-}
-
-static int dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
- struct fib_rule_hdr *frh,
- struct nlattr **tb,
- struct netlink_ext_ack *extack)
-{
- int err = -EINVAL;
- struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
-
- if (frh->tos) {
- NL_SET_ERR_MSG(extack, "Invalid tos value");
- goto errout;
- }
-
- if (rule->table == RT_TABLE_UNSPEC) {
- if (rule->action == FR_ACT_TO_TBL) {
- struct dn_fib_table *table;
-
- table = dn_fib_empty_table();
- if (table == NULL) {
- err = -ENOBUFS;
- goto errout;
- }
-
- rule->table = table->n;
- }
- }
-
- if (frh->src_len)
- r->src = nla_get_le16(tb[FRA_SRC]);
-
- if (frh->dst_len)
- r->dst = nla_get_le16(tb[FRA_DST]);
-
- r->src_len = frh->src_len;
- r->srcmask = dnet_make_mask(r->src_len);
- r->dst_len = frh->dst_len;
- r->dstmask = dnet_make_mask(r->dst_len);
- err = 0;
-errout:
- return err;
-}
-
-static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
- struct nlattr **tb)
-{
- struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
-
- if (frh->src_len && (r->src_len != frh->src_len))
- return 0;
-
- if (frh->dst_len && (r->dst_len != frh->dst_len))
- return 0;
-
- if (frh->src_len && (r->src != nla_get_le16(tb[FRA_SRC])))
- return 0;
-
- if (frh->dst_len && (r->dst != nla_get_le16(tb[FRA_DST])))
- return 0;
-
- return 1;
-}
-
-unsigned int dnet_addr_type(__le16 addr)
-{
- struct flowidn fld = { .daddr = addr };
- struct dn_fib_res res;
- unsigned int ret = RTN_UNICAST;
- struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
-
- res.r = NULL;
-
- if (tb) {
- if (!tb->lookup(tb, &fld, &res)) {
- ret = res.type;
- dn_fib_res_put(&res);
- }
- }
- return ret;
-}
-
-static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
- struct fib_rule_hdr *frh)
-{
- struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
-
- frh->dst_len = r->dst_len;
- frh->src_len = r->src_len;
- frh->tos = 0;
-
- if ((r->dst_len &&
- nla_put_le16(skb, FRA_DST, r->dst)) ||
- (r->src_len &&
- nla_put_le16(skb, FRA_SRC, r->src)))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -ENOBUFS;
-}
-
-static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
-{
- dn_rt_cache_flush(-1);
-}
-
-static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = {
- .family = AF_DECnet,
- .rule_size = sizeof(struct dn_fib_rule),
- .addr_size = sizeof(u16),
- .action = dn_fib_rule_action,
- .match = dn_fib_rule_match,
- .configure = dn_fib_rule_configure,
- .compare = dn_fib_rule_compare,
- .fill = dn_fib_rule_fill,
- .flush_cache = dn_fib_rule_flush_cache,
- .nlgroup = RTNLGRP_DECnet_RULE,
- .policy = dn_fib_rule_policy,
- .owner = THIS_MODULE,
- .fro_net = &init_net,
-};
-
-void __init dn_fib_rules_init(void)
-{
- dn_fib_rules_ops =
- fib_rules_register(&dn_fib_rules_ops_template, &init_net);
- BUG_ON(IS_ERR(dn_fib_rules_ops));
- BUG_ON(fib_default_rule_add(dn_fib_rules_ops, 0x7fff,
- RT_TABLE_MAIN, 0));
-}
-
-void __exit dn_fib_rules_cleanup(void)
-{
- rtnl_lock();
- fib_rules_unregister(dn_fib_rules_ops);
- rtnl_unlock();
- rcu_barrier();
-}
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
deleted file mode 100644
index 33fefb0aebca..000000000000
--- a/net/decnet/dn_table.c
+++ /dev/null
@@ -1,929 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Routing Forwarding Information Base (Routing Tables)
- *
- * Author: Steve Whitehouse <SteveW@ACM.org>
- * Mostly copied from the IPv4 routing code
- *
- *
- * Changes:
- *
- */
-#include <linux/string.h>
-#include <linux/net.h>
-#include <linux/socket.h>
-#include <linux/slab.h>
-#include <linux/sockios.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/proc_fs.h>
-#include <linux/netdevice.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/route.h> /* RTF_xxx */
-#include <net/neighbour.h>
-#include <net/netlink.h>
-#include <net/tcp.h>
-#include <net/dst.h>
-#include <net/flow.h>
-#include <net/fib_rules.h>
-#include <net/dn.h>
-#include <net/dn_route.h>
-#include <net/dn_fib.h>
-#include <net/dn_neigh.h>
-#include <net/dn_dev.h>
-
-struct dn_zone
-{
- struct dn_zone *dz_next;
- struct dn_fib_node **dz_hash;
- int dz_nent;
- int dz_divisor;
- u32 dz_hashmask;
-#define DZ_HASHMASK(dz) ((dz)->dz_hashmask)
- int dz_order;
- __le16 dz_mask;
-#define DZ_MASK(dz) ((dz)->dz_mask)
-};
-
-struct dn_hash
-{
- struct dn_zone *dh_zones[17];
- struct dn_zone *dh_zone_list;
-};
-
-#define dz_key_0(key) ((key).datum = 0)
-
-#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
- for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
-
-#define endfor_nexthops(fi) }
-
-#define DN_MAX_DIVISOR 1024
-#define DN_S_ZOMBIE 1
-#define DN_S_ACCESSED 2
-
-#define DN_FIB_SCAN(f, fp) \
-for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next)
-
-#define DN_FIB_SCAN_KEY(f, fp, key) \
-for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next)
-
-#define RT_TABLE_MIN 1
-#define DN_FIB_TABLE_HASHSZ 256
-static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
-static DEFINE_RWLOCK(dn_fib_tables_lock);
-
-static struct kmem_cache *dn_hash_kmem __read_mostly;
-static int dn_fib_hash_zombies;
-
-static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
-{
- u16 h = le16_to_cpu(key.datum)>>(16 - dz->dz_order);
- h ^= (h >> 10);
- h ^= (h >> 6);
- h &= DZ_HASHMASK(dz);
- return *(dn_fib_idx_t *)&h;
-}
-
-static inline dn_fib_key_t dz_key(__le16 dst, struct dn_zone *dz)
-{
- dn_fib_key_t k;
- k.datum = dst & DZ_MASK(dz);
- return k;
-}
-
-static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz)
-{
- return &dz->dz_hash[dn_hash(key, dz).datum];
-}
-
-static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz)
-{
- return dz->dz_hash[dn_hash(key, dz).datum];
-}
-
-static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b)
-{
- return a.datum == b.datum;
-}
-
-static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b)
-{
- return a.datum <= b.datum;
-}
-
-static inline void dn_rebuild_zone(struct dn_zone *dz,
- struct dn_fib_node **old_ht,
- int old_divisor)
-{
- struct dn_fib_node *f, **fp, *next;
- int i;
-
- for(i = 0; i < old_divisor; i++) {
- for(f = old_ht[i]; f; f = next) {
- next = f->fn_next;
- for(fp = dn_chain_p(f->fn_key, dz);
- *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
- fp = &(*fp)->fn_next)
- /* NOTHING */;
- f->fn_next = *fp;
- *fp = f;
- }
- }
-}
-
-static void dn_rehash_zone(struct dn_zone *dz)
-{
- struct dn_fib_node **ht, **old_ht;
- int old_divisor, new_divisor;
- u32 new_hashmask;
-
- old_divisor = dz->dz_divisor;
-
- switch (old_divisor) {
- case 16:
- new_divisor = 256;
- new_hashmask = 0xFF;
- break;
- default:
- printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n",
- old_divisor);
- /* fall through */
- case 256:
- new_divisor = 1024;
- new_hashmask = 0x3FF;
- break;
- }
-
- ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
- if (ht == NULL)
- return;
-
- write_lock_bh(&dn_fib_tables_lock);
- old_ht = dz->dz_hash;
- dz->dz_hash = ht;
- dz->dz_hashmask = new_hashmask;
- dz->dz_divisor = new_divisor;
- dn_rebuild_zone(dz, old_ht, old_divisor);
- write_unlock_bh(&dn_fib_tables_lock);
- kfree(old_ht);
-}
-
-static void dn_free_node(struct dn_fib_node *f)
-{
- dn_fib_release_info(DN_FIB_INFO(f));
- kmem_cache_free(dn_hash_kmem, f);
-}
-
-
-static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
-{
- int i;
- struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
- if (!dz)
- return NULL;
-
- if (z) {
- dz->dz_divisor = 16;
- dz->dz_hashmask = 0x0F;
- } else {
- dz->dz_divisor = 1;
- dz->dz_hashmask = 0;
- }
-
- dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
- if (!dz->dz_hash) {
- kfree(dz);
- return NULL;
- }
-
- dz->dz_order = z;
- dz->dz_mask = dnet_make_mask(z);
-
- for(i = z + 1; i <= 16; i++)
- if (table->dh_zones[i])
- break;
-
- write_lock_bh(&dn_fib_tables_lock);
- if (i>16) {
- dz->dz_next = table->dh_zone_list;
- table->dh_zone_list = dz;
- } else {
- dz->dz_next = table->dh_zones[i]->dz_next;
- table->dh_zones[i]->dz_next = dz;
- }
- table->dh_zones[z] = dz;
- write_unlock_bh(&dn_fib_tables_lock);
- return dz;
-}
-
-
-static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct nlattr *attrs[], struct dn_fib_info *fi)
-{
- struct rtnexthop *nhp;
- int nhlen;
-
- if (attrs[RTA_PRIORITY] &&
- nla_get_u32(attrs[RTA_PRIORITY]) != fi->fib_priority)
- return 1;
-
- if (attrs[RTA_OIF] || attrs[RTA_GATEWAY]) {
- if ((!attrs[RTA_OIF] || nla_get_u32(attrs[RTA_OIF]) == fi->fib_nh->nh_oif) &&
- (!attrs[RTA_GATEWAY] || nla_get_le16(attrs[RTA_GATEWAY]) != fi->fib_nh->nh_gw))
- return 0;
- return 1;
- }
-
- if (!attrs[RTA_MULTIPATH])
- return 0;
-
- nhp = nla_data(attrs[RTA_MULTIPATH]);
- nhlen = nla_len(attrs[RTA_MULTIPATH]);
-
- for_nexthops(fi) {
- int attrlen = nhlen - sizeof(struct rtnexthop);
- __le16 gw;
-
- if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
- return -EINVAL;
- if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif)
- return 1;
- if (attrlen) {
- struct nlattr *gw_attr;
-
- gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
- gw = gw_attr ? nla_get_le16(gw_attr) : 0;
-
- if (gw && gw != nh->nh_gw)
- return 1;
- }
- nhp = RTNH_NEXT(nhp);
- } endfor_nexthops(fi);
-
- return 0;
-}
-
-static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
-{
- size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
- + nla_total_size(4) /* RTA_TABLE */
- + nla_total_size(2) /* RTA_DST */
- + nla_total_size(4) /* RTA_PRIORITY */
- + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
-
- /* space for nested metrics */
- payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
-
- if (fi->fib_nhs) {
- /* Also handles the special case fib_nhs == 1 */
-
- /* each nexthop is packed in an attribute */
- size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
-
- /* may contain a gateway attribute */
- nhsize += nla_total_size(4);
-
- /* all nexthops are packed in a nested attribute */
- payload += nla_total_size(fi->fib_nhs * nhsize);
- }
-
- return payload;
-}
-
-static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
- u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
- struct dn_fib_info *fi, unsigned int flags)
-{
- struct rtmsg *rtm;
- struct nlmsghdr *nlh;
-
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
- if (!nlh)
- return -EMSGSIZE;
-
- rtm = nlmsg_data(nlh);
- rtm->rtm_family = AF_DECnet;
- rtm->rtm_dst_len = dst_len;
- rtm->rtm_src_len = 0;
- rtm->rtm_tos = 0;
- rtm->rtm_table = tb_id;
- rtm->rtm_flags = fi->fib_flags;
- rtm->rtm_scope = scope;
- rtm->rtm_type = type;
- rtm->rtm_protocol = fi->fib_protocol;
-
- if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0)
- goto errout;
-
- if (rtm->rtm_dst_len &&
- nla_put(skb, RTA_DST, 2, dst) < 0)
- goto errout;
-
- if (fi->fib_priority &&
- nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0)
- goto errout;
-
- if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
- goto errout;
-
- if (fi->fib_nhs == 1) {
- if (fi->fib_nh->nh_gw &&
- nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0)
- goto errout;
-
- if (fi->fib_nh->nh_oif &&
- nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0)
- goto errout;
- }
-
- if (fi->fib_nhs > 1) {
- struct rtnexthop *nhp;
- struct nlattr *mp_head;
-
- mp_head = nla_nest_start_noflag(skb, RTA_MULTIPATH);
- if (!mp_head)
- goto errout;
-
- for_nexthops(fi) {
- if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp))))
- goto errout;
-
- nhp->rtnh_flags = nh->nh_flags & 0xFF;
- nhp->rtnh_hops = nh->nh_weight - 1;
- nhp->rtnh_ifindex = nh->nh_oif;
-
- if (nh->nh_gw &&
- nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0)
- goto errout;
-
- nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
- } endfor_nexthops(fi);
-
- nla_nest_end(skb, mp_head);
- }
-
- nlmsg_end(skb, nlh);
- return 0;
-
-errout:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
-}
-
-
-static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
- struct nlmsghdr *nlh, struct netlink_skb_parms *req)
-{
- struct sk_buff *skb;
- u32 portid = req ? req->portid : 0;
- int err = -ENOBUFS;
-
- skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
- if (skb == NULL)
- goto errout;
-
- err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id,
- f->fn_type, f->fn_scope, &f->fn_key, z,
- DN_FIB_INFO(f), 0);
- if (err < 0) {
- /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */
- WARN_ON(err == -EMSGSIZE);
- kfree_skb(skb);
- goto errout;
- }
- rtnl_notify(skb, &init_net, portid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
- return;
-errout:
- if (err < 0)
- rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_ROUTE, err);
-}
-
-static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct dn_fib_table *tb,
- struct dn_zone *dz,
- struct dn_fib_node *f)
-{
- int i, s_i;
-
- s_i = cb->args[4];
- for(i = 0; f; i++, f = f->fn_next) {
- if (i < s_i)
- continue;
- if (f->fn_state & DN_S_ZOMBIE)
- continue;
- if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWROUTE,
- tb->n,
- (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type,
- f->fn_scope, &f->fn_key, dz->dz_order,
- f->fn_info, NLM_F_MULTI) < 0) {
- cb->args[4] = i;
- return -1;
- }
- }
- cb->args[4] = i;
- return skb->len;
-}
-
-static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct dn_fib_table *tb,
- struct dn_zone *dz)
-{
- int h, s_h;
-
- s_h = cb->args[3];
- for(h = 0; h < dz->dz_divisor; h++) {
- if (h < s_h)
- continue;
- if (h > s_h)
- memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0]));
- if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL)
- continue;
- if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) {
- cb->args[3] = h;
- return -1;
- }
- }
- cb->args[3] = h;
- return skb->len;
-}
-
-static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- int m, s_m;
- struct dn_zone *dz;
- struct dn_hash *table = (struct dn_hash *)tb->data;
-
- s_m = cb->args[2];
- read_lock(&dn_fib_tables_lock);
- for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) {
- if (m < s_m)
- continue;
- if (m > s_m)
- memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0]));
-
- if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) {
- cb->args[2] = m;
- read_unlock(&dn_fib_tables_lock);
- return -1;
- }
- }
- read_unlock(&dn_fib_tables_lock);
- cb->args[2] = m;
-
- return skb->len;
-}
-
-int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
-{
- struct net *net = sock_net(skb->sk);
- unsigned int h, s_h;
- unsigned int e = 0, s_e;
- struct dn_fib_table *tb;
- int dumped = 0;
-
- if (!net_eq(net, &init_net))
- return 0;
-
- if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
- ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED)
- return dn_cache_dump(skb, cb);
-
- s_h = cb->args[0];
- s_e = cb->args[1];
-
- for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
- e = 0;
- hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) {
- if (e < s_e)
- goto next;
- if (dumped)
- memset(&cb->args[2], 0, sizeof(cb->args) -
- 2 * sizeof(cb->args[0]));
- if (tb->dump(tb, skb, cb) < 0)
- goto out;
- dumped = 1;
-next:
- e++;
- }
- }
-out:
- cb->args[1] = e;
- cb->args[0] = h;
-
- return skb->len;
-}
-
-static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
- struct nlmsghdr *n, struct netlink_skb_parms *req)
-{
- struct dn_hash *table = (struct dn_hash *)tb->data;
- struct dn_fib_node *new_f, *f, **fp, **del_fp;
- struct dn_zone *dz;
- struct dn_fib_info *fi;
- int z = r->rtm_dst_len;
- int type = r->rtm_type;
- dn_fib_key_t key;
- int err;
-
- if (z > 16)
- return -EINVAL;
-
- dz = table->dh_zones[z];
- if (!dz && !(dz = dn_new_zone(table, z)))
- return -ENOBUFS;
-
- dz_key_0(key);
- if (attrs[RTA_DST]) {
- __le16 dst = nla_get_le16(attrs[RTA_DST]);
- if (dst & ~DZ_MASK(dz))
- return -EINVAL;
- key = dz_key(dst, dz);
- }
-
- if ((fi = dn_fib_create_info(r, attrs, n, &err)) == NULL)
- return err;
-
- if (dz->dz_nent > (dz->dz_divisor << 2) &&
- dz->dz_divisor > DN_MAX_DIVISOR &&
- (z==16 || (1<<z) > dz->dz_divisor))
- dn_rehash_zone(dz);
-
- fp = dn_chain_p(key, dz);
-
- DN_FIB_SCAN(f, fp) {
- if (dn_key_leq(key, f->fn_key))
- break;
- }
-
- del_fp = NULL;
-
- if (f && (f->fn_state & DN_S_ZOMBIE) &&
- dn_key_eq(f->fn_key, key)) {
- del_fp = fp;
- fp = &f->fn_next;
- f = *fp;
- goto create;
- }
-
- DN_FIB_SCAN_KEY(f, fp, key) {
- if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority)
- break;
- }
-
- if (f && dn_key_eq(f->fn_key, key) &&
- fi->fib_priority == DN_FIB_INFO(f)->fib_priority) {
- struct dn_fib_node **ins_fp;
-
- err = -EEXIST;
- if (n->nlmsg_flags & NLM_F_EXCL)
- goto out;
-
- if (n->nlmsg_flags & NLM_F_REPLACE) {
- del_fp = fp;
- fp = &f->fn_next;
- f = *fp;
- goto replace;
- }
-
- ins_fp = fp;
- err = -EEXIST;
-
- DN_FIB_SCAN_KEY(f, fp, key) {
- if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority)
- break;
- if (f->fn_type == type &&
- f->fn_scope == r->rtm_scope &&
- DN_FIB_INFO(f) == fi)
- goto out;
- }
-
- if (!(n->nlmsg_flags & NLM_F_APPEND)) {
- fp = ins_fp;
- f = *fp;
- }
- }
-
-create:
- err = -ENOENT;
- if (!(n->nlmsg_flags & NLM_F_CREATE))
- goto out;
-
-replace:
- err = -ENOBUFS;
- new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL);
- if (new_f == NULL)
- goto out;
-
- new_f->fn_key = key;
- new_f->fn_type = type;
- new_f->fn_scope = r->rtm_scope;
- DN_FIB_INFO(new_f) = fi;
-
- new_f->fn_next = f;
- write_lock_bh(&dn_fib_tables_lock);
- *fp = new_f;
- write_unlock_bh(&dn_fib_tables_lock);
- dz->dz_nent++;
-
- if (del_fp) {
- f = *del_fp;
- write_lock_bh(&dn_fib_tables_lock);
- *del_fp = f->fn_next;
- write_unlock_bh(&dn_fib_tables_lock);
-
- if (!(f->fn_state & DN_S_ZOMBIE))
- dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
- if (f->fn_state & DN_S_ACCESSED)
- dn_rt_cache_flush(-1);
- dn_free_node(f);
- dz->dz_nent--;
- } else {
- dn_rt_cache_flush(-1);
- }
-
- dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
-
- return 0;
-out:
- dn_fib_release_info(fi);
- return err;
-}
-
-
-static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
- struct nlmsghdr *n, struct netlink_skb_parms *req)
-{
- struct dn_hash *table = (struct dn_hash*)tb->data;
- struct dn_fib_node **fp, **del_fp, *f;
- int z = r->rtm_dst_len;
- struct dn_zone *dz;
- dn_fib_key_t key;
- int matched;
-
-
- if (z > 16)
- return -EINVAL;
-
- if ((dz = table->dh_zones[z]) == NULL)
- return -ESRCH;
-
- dz_key_0(key);
- if (attrs[RTA_DST]) {
- __le16 dst = nla_get_le16(attrs[RTA_DST]);
- if (dst & ~DZ_MASK(dz))
- return -EINVAL;
- key = dz_key(dst, dz);
- }
-
- fp = dn_chain_p(key, dz);
-
- DN_FIB_SCAN(f, fp) {
- if (dn_key_eq(f->fn_key, key))
- break;
- if (dn_key_leq(key, f->fn_key))
- return -ESRCH;
- }
-
- matched = 0;
- del_fp = NULL;
- DN_FIB_SCAN_KEY(f, fp, key) {
- struct dn_fib_info *fi = DN_FIB_INFO(f);
-
- if (f->fn_state & DN_S_ZOMBIE)
- return -ESRCH;
-
- matched++;
-
- if (del_fp == NULL &&
- (!r->rtm_type || f->fn_type == r->rtm_type) &&
- (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
- (!r->rtm_protocol ||
- fi->fib_protocol == r->rtm_protocol) &&
- dn_fib_nh_match(r, n, attrs, fi) == 0)
- del_fp = fp;
- }
-
- if (del_fp) {
- f = *del_fp;
- dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
-
- if (matched != 1) {
- write_lock_bh(&dn_fib_tables_lock);
- *del_fp = f->fn_next;
- write_unlock_bh(&dn_fib_tables_lock);
-
- if (f->fn_state & DN_S_ACCESSED)
- dn_rt_cache_flush(-1);
- dn_free_node(f);
- dz->dz_nent--;
- } else {
- f->fn_state |= DN_S_ZOMBIE;
- if (f->fn_state & DN_S_ACCESSED) {
- f->fn_state &= ~DN_S_ACCESSED;
- dn_rt_cache_flush(-1);
- }
- if (++dn_fib_hash_zombies > 128)
- dn_fib_flush();
- }
-
- return 0;
- }
-
- return -ESRCH;
-}
-
-static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
-{
- int found = 0;
- struct dn_fib_node *f;
-
- while((f = *fp) != NULL) {
- struct dn_fib_info *fi = DN_FIB_INFO(f);
-
- if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) {
- write_lock_bh(&dn_fib_tables_lock);
- *fp = f->fn_next;
- write_unlock_bh(&dn_fib_tables_lock);
-
- dn_free_node(f);
- found++;
- continue;
- }
- fp = &f->fn_next;
- }
-
- return found;
-}
-
-static int dn_fib_table_flush(struct dn_fib_table *tb)
-{
- struct dn_hash *table = (struct dn_hash *)tb->data;
- struct dn_zone *dz;
- int found = 0;
-
- dn_fib_hash_zombies = 0;
- for(dz = table->dh_zone_list; dz; dz = dz->dz_next) {
- int i;
- int tmp = 0;
- for(i = dz->dz_divisor-1; i >= 0; i--)
- tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table);
- dz->dz_nent -= tmp;
- found += tmp;
- }
-
- return found;
-}
-
-static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowidn *flp, struct dn_fib_res *res)
-{
- int err;
- struct dn_zone *dz;
- struct dn_hash *t = (struct dn_hash *)tb->data;
-
- read_lock(&dn_fib_tables_lock);
- for(dz = t->dh_zone_list; dz; dz = dz->dz_next) {
- struct dn_fib_node *f;
- dn_fib_key_t k = dz_key(flp->daddr, dz);
-
- for(f = dz_chain(k, dz); f; f = f->fn_next) {
- if (!dn_key_eq(k, f->fn_key)) {
- if (dn_key_leq(k, f->fn_key))
- break;
- else
- continue;
- }
-
- f->fn_state |= DN_S_ACCESSED;
-
- if (f->fn_state&DN_S_ZOMBIE)
- continue;
-
- if (f->fn_scope < flp->flowidn_scope)
- continue;
-
- err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res);
-
- if (err == 0) {
- res->type = f->fn_type;
- res->scope = f->fn_scope;
- res->prefixlen = dz->dz_order;
- goto out;
- }
- if (err < 0)
- goto out;
- }
- }
- err = 1;
-out:
- read_unlock(&dn_fib_tables_lock);
- return err;
-}
-
-
-struct dn_fib_table *dn_fib_get_table(u32 n, int create)
-{
- struct dn_fib_table *t;
- unsigned int h;
-
- if (n < RT_TABLE_MIN)
- return NULL;
-
- if (n > RT_TABLE_MAX)
- return NULL;
-
- h = n & (DN_FIB_TABLE_HASHSZ - 1);
- rcu_read_lock();
- hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) {
- if (t->n == n) {
- rcu_read_unlock();
- return t;
- }
- }
- rcu_read_unlock();
-
- if (!create)
- return NULL;
-
- if (in_interrupt()) {
- net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n");
- return NULL;
- }
-
- t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
- GFP_KERNEL);
- if (t == NULL)
- return NULL;
-
- t->n = n;
- t->insert = dn_fib_table_insert;
- t->delete = dn_fib_table_delete;
- t->lookup = dn_fib_table_lookup;
- t->flush = dn_fib_table_flush;
- t->dump = dn_fib_table_dump;
- hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
-
- return t;
-}
-
-struct dn_fib_table *dn_fib_empty_table(void)
-{
- u32 id;
-
- for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
- if (dn_fib_get_table(id, 0) == NULL)
- return dn_fib_get_table(id, 1);
- return NULL;
-}
-
-void dn_fib_flush(void)
-{
- int flushed = 0;
- struct dn_fib_table *tb;
- unsigned int h;
-
- for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
- hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist)
- flushed += tb->flush(tb);
- }
-
- if (flushed)
- dn_rt_cache_flush(-1);
-}
-
-void __init dn_fib_table_init(void)
-{
- dn_hash_kmem = kmem_cache_create("dn_fib_info_cache",
- sizeof(struct dn_fib_info),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
-}
-
-void __exit dn_fib_table_cleanup(void)
-{
- struct dn_fib_table *t;
- struct hlist_node *next;
- unsigned int h;
-
- write_lock(&dn_fib_tables_lock);
- for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
- hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h],
- hlist) {
- hlist_del(&t->hlist);
- kfree(t);
- }
- }
- write_unlock(&dn_fib_tables_lock);
-}
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
deleted file mode 100644
index aa4155875ca8..000000000000
--- a/net/decnet/dn_timer.c
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Socket Timer Functions
- *
- * Author: Steve Whitehouse <SteveW@ACM.org>
- *
- *
- * Changes:
- * Steve Whitehouse : Made keepalive timer part of the same
- * timer idea.
- * Steve Whitehouse : Added checks for sk->sock_readers
- * David S. Miller : New socket locking
- * Steve Whitehouse : Timer grabs socket ref.
- */
-#include <linux/net.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
-#include <net/sock.h>
-#include <linux/atomic.h>
-#include <linux/jiffies.h>
-#include <net/flow.h>
-#include <net/dn.h>
-
-/*
- * Slow timer is for everything else (n * 500mS)
- */
-
-#define SLOW_INTERVAL (HZ/2)
-
-static void dn_slow_timer(struct timer_list *t);
-
-void dn_start_slow_timer(struct sock *sk)
-{
- timer_setup(&sk->sk_timer, dn_slow_timer, 0);
- sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
-}
-
-void dn_stop_slow_timer(struct sock *sk)
-{
- sk_stop_timer(sk, &sk->sk_timer);
-}
-
-static void dn_slow_timer(struct timer_list *t)
-{
- struct sock *sk = from_timer(sk, t, sk_timer);
- struct dn_scp *scp = DN_SK(sk);
-
- bh_lock_sock(sk);
-
- if (sock_owned_by_user(sk)) {
- sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
- goto out;
- }
-
- /*
- * The persist timer is the standard slow timer used for retransmits
- * in both connection establishment and disconnection as well as
- * in the RUN state. The different states are catered for by changing
- * the function pointer in the socket. Setting the timer to a value
- * of zero turns it off. We allow the persist_fxn to turn the
- * timer off in a permant way by returning non-zero, so that
- * timer based routines may remove sockets. This is why we have a
- * sock_hold()/sock_put() around the timer to prevent the socket
- * going away in the middle.
- */
- if (scp->persist && scp->persist_fxn) {
- if (scp->persist <= SLOW_INTERVAL) {
- scp->persist = 0;
-
- if (scp->persist_fxn(sk))
- goto out;
- } else {
- scp->persist -= SLOW_INTERVAL;
- }
- }
-
- /*
- * Check for keepalive timeout. After the other timer 'cos if
- * the previous timer caused a retransmit, we don't need to
- * do this. scp->stamp is the last time that we sent a packet.
- * The keepalive function sends a link service packet to the
- * other end. If it remains unacknowledged, the standard
- * socket timers will eventually shut the socket down. Each
- * time we do this, scp->stamp will be updated, thus
- * we won't try and send another until scp->keepalive has passed
- * since the last successful transmission.
- */
- if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) {
- if (time_after_eq(jiffies, scp->stamp + scp->keepalive))
- scp->keepalive_fxn(sk);
- }
-
- sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
-out:
- bh_unlock_sock(sk);
- sock_put(sk);
-}
diff --git a/net/decnet/netfilter/Kconfig b/net/decnet/netfilter/Kconfig
deleted file mode 100644
index 14ec4ef95fab..000000000000
--- a/net/decnet/netfilter/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# DECnet netfilter configuration
-#
-
-menu "DECnet: Netfilter Configuration"
- depends on DECNET && NETFILTER
- depends on NETFILTER_ADVANCED
-
-config DECNET_NF_GRABULATOR
- tristate "Routing message grabulator (for userland routing daemon)"
- help
- Enable this module if you want to use the userland DECnet routing
- daemon. You will also need to enable routing support for DECnet
- unless you just want to monitor routing messages from other nodes.
-
-endmenu
diff --git a/net/decnet/netfilter/Makefile b/net/decnet/netfilter/Makefile
deleted file mode 100644
index 429c84289d0f..000000000000
--- a/net/decnet/netfilter/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for DECnet netfilter modules
-#
-
-obj-$(CONFIG_DECNET_NF_GRABULATOR) += dn_rtmsg.o
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
deleted file mode 100644
index dc705769acc9..000000000000
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet Routing Message Grabulator
- *
- * (C) 2000 ChyGwyn Limited - http://www.chygwyn.com/
- *
- * Author: Steven Whitehouse <steve@chygwyn.com>
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter.h>
-#include <linux/spinlock.h>
-#include <net/netlink.h>
-#include <linux/netfilter_decnet.h>
-
-#include <net/sock.h>
-#include <net/flow.h>
-#include <net/dn.h>
-#include <net/dn_route.h>
-
-static struct sock *dnrmg = NULL;
-
-
-static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp)
-{
- struct sk_buff *skb = NULL;
- size_t size;
- sk_buff_data_t old_tail;
- struct nlmsghdr *nlh;
- unsigned char *ptr;
- struct nf_dn_rtmsg *rtm;
-
- size = NLMSG_ALIGN(rt_skb->len) +
- NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg));
- skb = nlmsg_new(size, GFP_ATOMIC);
- if (!skb) {
- *errp = -ENOMEM;
- return NULL;
- }
- old_tail = skb->tail;
- nlh = nlmsg_put(skb, 0, 0, 0, size, 0);
- if (!nlh) {
- kfree_skb(skb);
- *errp = -ENOMEM;
- return NULL;
- }
- rtm = (struct nf_dn_rtmsg *)nlmsg_data(nlh);
- rtm->nfdn_ifindex = rt_skb->dev->ifindex;
- ptr = NFDN_RTMSG(rtm);
- skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len);
- nlh->nlmsg_len = skb->tail - old_tail;
- return skb;
-}
-
-static void dnrmg_send_peer(struct sk_buff *skb)
-{
- struct sk_buff *skb2;
- int status = 0;
- int group = 0;
- unsigned char flags = *skb->data;
-
- switch (flags & DN_RT_CNTL_MSK) {
- case DN_RT_PKT_L1RT:
- group = DNRNG_NLGRP_L1;
- break;
- case DN_RT_PKT_L2RT:
- group = DNRNG_NLGRP_L2;
- break;
- default:
- return;
- }
-
- skb2 = dnrmg_build_message(skb, &status);
- if (skb2 == NULL)
- return;
- NETLINK_CB(skb2).dst_group = group;
- netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC);
-}
-
-
-static unsigned int dnrmg_hook(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- dnrmg_send_peer(skb);
- return NF_ACCEPT;
-}
-
-
-#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err), NULL); return; } while (0)
-
-static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
-{
- struct nlmsghdr *nlh = nlmsg_hdr(skb);
-
- if (skb->len < sizeof(*nlh) ||
- nlh->nlmsg_len < sizeof(*nlh) ||
- skb->len < nlh->nlmsg_len)
- return;
-
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- RCV_SKB_FAIL(-EPERM);
-
- /* Eventually we might send routing messages too */
-
- RCV_SKB_FAIL(-EINVAL);
-}
-
-static const struct nf_hook_ops dnrmg_ops = {
- .hook = dnrmg_hook,
- .pf = NFPROTO_DECNET,
- .hooknum = NF_DN_ROUTE,
- .priority = NF_DN_PRI_DNRTMSG,
-};
-
-static int __init dn_rtmsg_init(void)
-{
- int rv = 0;
- struct netlink_kernel_cfg cfg = {
- .groups = DNRNG_NLGRP_MAX,
- .input = dnrmg_receive_user_skb,
- };
-
- dnrmg = netlink_kernel_create(&init_net, NETLINK_DNRTMSG, &cfg);
- if (dnrmg == NULL) {
- printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
- return -ENOMEM;
- }
-
- rv = nf_register_net_hook(&init_net, &dnrmg_ops);
- if (rv) {
- netlink_kernel_release(dnrmg);
- }
-
- return rv;
-}
-
-static void __exit dn_rtmsg_fini(void)
-{
- nf_unregister_net_hook(&init_net, &dnrmg_ops);
- netlink_kernel_release(dnrmg);
-}
-
-
-MODULE_DESCRIPTION("DECnet Routing Message Grabulator");
-MODULE_AUTHOR("Steven Whitehouse <steve@chygwyn.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_DNRTMSG);
-
-module_init(dn_rtmsg_init);
-module_exit(dn_rtmsg_fini);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
deleted file mode 100644
index 55bf64a22b59..000000000000
--- a/net/decnet/sysctl_net_decnet.c
+++ /dev/null
@@ -1,373 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DECnet An implementation of the DECnet protocol suite for the LINUX
- * operating system. DECnet is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * DECnet sysctl support functions
- *
- * Author: Steve Whitehouse <SteveW@ACM.org>
- *
- *
- * Changes:
- * Steve Whitehouse - C99 changes and default device handling
- * Steve Whitehouse - Memory buffer settings, like the tcp ones
- *
- */
-#include <linux/mm.h>
-#include <linux/sysctl.h>
-#include <linux/fs.h>
-#include <linux/netdevice.h>
-#include <linux/string.h>
-#include <net/neighbour.h>
-#include <net/dst.h>
-#include <net/flow.h>
-
-#include <linux/uaccess.h>
-
-#include <net/dn.h>
-#include <net/dn_dev.h>
-#include <net/dn_route.h>
-
-
-int decnet_debug_level;
-int decnet_time_wait = 30;
-int decnet_dn_count = 1;
-int decnet_di_count = 3;
-int decnet_dr_count = 3;
-int decnet_log_martians = 1;
-int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
-
-/* Reasonable defaults, I hope, based on tcp's defaults */
-long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
-int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
-int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
-
-#ifdef CONFIG_SYSCTL
-extern int decnet_dst_gc_interval;
-static int min_decnet_time_wait[] = { 5 };
-static int max_decnet_time_wait[] = { 600 };
-static int min_state_count[] = { 1 };
-static int max_state_count[] = { NSP_MAXRXTSHIFT };
-static int min_decnet_dst_gc_interval[] = { 1 };
-static int max_decnet_dst_gc_interval[] = { 60 };
-static int min_decnet_no_fc_max_cwnd[] = { NSP_MIN_WINDOW };
-static int max_decnet_no_fc_max_cwnd[] = { NSP_MAX_WINDOW };
-static char node_name[7] = "???";
-
-static struct ctl_table_header *dn_table_header = NULL;
-
-/*
- * ctype.h :-)
- */
-#define ISNUM(x) (((x) >= '0') && ((x) <= '9'))
-#define ISLOWER(x) (((x) >= 'a') && ((x) <= 'z'))
-#define ISUPPER(x) (((x) >= 'A') && ((x) <= 'Z'))
-#define ISALPHA(x) (ISLOWER(x) || ISUPPER(x))
-#define INVALID_END_CHAR(x) (ISNUM(x) || ISALPHA(x))
-
-static void strip_it(char *str)
-{
- for(;;) {
- switch (*str) {
- case ' ':
- case '\n':
- case '\r':
- case ':':
- *str = 0;
- /* Fallthrough */
- case 0:
- return;
- }
- str++;
- }
-}
-
-/*
- * Simple routine to parse an ascii DECnet address
- * into a network order address.
- */
-static int parse_addr(__le16 *addr, char *str)
-{
- __u16 area, node;
-
- while(*str && !ISNUM(*str)) str++;
-
- if (*str == 0)
- return -1;
-
- area = (*str++ - '0');
- if (ISNUM(*str)) {
- area *= 10;
- area += (*str++ - '0');
- }
-
- if (*str++ != '.')
- return -1;
-
- if (!ISNUM(*str))
- return -1;
-
- node = *str++ - '0';
- if (ISNUM(*str)) {
- node *= 10;
- node += (*str++ - '0');
- }
- if (ISNUM(*str)) {
- node *= 10;
- node += (*str++ - '0');
- }
- if (ISNUM(*str)) {
- node *= 10;
- node += (*str++ - '0');
- }
-
- if ((node > 1023) || (area > 63))
- return -1;
-
- if (INVALID_END_CHAR(*str))
- return -1;
-
- *addr = cpu_to_le16((area << 10) | node);
-
- return 0;
-}
-
-static int dn_node_address_handler(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- char addr[DN_ASCBUF_LEN];
- size_t len;
- __le16 dnaddr;
-
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
-
- if (write) {
- len = (*lenp < DN_ASCBUF_LEN) ? *lenp : (DN_ASCBUF_LEN-1);
-
- if (copy_from_user(addr, buffer, len))
- return -EFAULT;
-
- addr[len] = 0;
- strip_it(addr);
-
- if (parse_addr(&dnaddr, addr))
- return -EINVAL;
-
- dn_dev_devices_off();
-
- decnet_address = dnaddr;
-
- dn_dev_devices_on();
-
- *ppos += len;
-
- return 0;
- }
-
- dn_addr2asc(le16_to_cpu(decnet_address), addr);
- len = strlen(addr);
- addr[len++] = '\n';
-
- if (len > *lenp) len = *lenp;
-
- if (copy_to_user(buffer, addr, len))
- return -EFAULT;
-
- *lenp = len;
- *ppos += len;
-
- return 0;
-}
-
-static int dn_def_dev_handler(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
-{
- size_t len;
- struct net_device *dev;
- char devname[17];
-
- if (!*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
-
- if (write) {
- if (*lenp > 16)
- return -E2BIG;
-
- if (copy_from_user(devname, buffer, *lenp))
- return -EFAULT;
-
- devname[*lenp] = 0;
- strip_it(devname);
-
- dev = dev_get_by_name(&init_net, devname);
- if (dev == NULL)
- return -ENODEV;
-
- if (dev->dn_ptr == NULL) {
- dev_put(dev);
- return -ENODEV;
- }
-
- if (dn_dev_set_default(dev, 1)) {
- dev_put(dev);
- return -ENODEV;
- }
- *ppos += *lenp;
-
- return 0;
- }
-
- dev = dn_dev_get_default();
- if (dev == NULL) {
- *lenp = 0;
- return 0;
- }
-
- strcpy(devname, dev->name);
- dev_put(dev);
- len = strlen(devname);
- devname[len++] = '\n';
-
- if (len > *lenp) len = *lenp;
-
- if (copy_to_user(buffer, devname, len))
- return -EFAULT;
-
- *lenp = len;
- *ppos += len;
-
- return 0;
-}
-
-static struct ctl_table dn_table[] = {
- {
- .procname = "node_address",
- .maxlen = 7,
- .mode = 0644,
- .proc_handler = dn_node_address_handler,
- },
- {
- .procname = "node_name",
- .data = node_name,
- .maxlen = 7,
- .mode = 0644,
- .proc_handler = proc_dostring,
- },
- {
- .procname = "default_device",
- .maxlen = 16,
- .mode = 0644,
- .proc_handler = dn_def_dev_handler,
- },
- {
- .procname = "time_wait",
- .data = &decnet_time_wait,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_decnet_time_wait,
- .extra2 = &max_decnet_time_wait
- },
- {
- .procname = "dn_count",
- .data = &decnet_dn_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_state_count,
- .extra2 = &max_state_count
- },
- {
- .procname = "di_count",
- .data = &decnet_di_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_state_count,
- .extra2 = &max_state_count
- },
- {
- .procname = "dr_count",
- .data = &decnet_dr_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_state_count,
- .extra2 = &max_state_count
- },
- {
- .procname = "dst_gc_interval",
- .data = &decnet_dst_gc_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_decnet_dst_gc_interval,
- .extra2 = &max_decnet_dst_gc_interval
- },
- {
- .procname = "no_fc_max_cwnd",
- .data = &decnet_no_fc_max_cwnd,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_decnet_no_fc_max_cwnd,
- .extra2 = &max_decnet_no_fc_max_cwnd
- },
- {
- .procname = "decnet_mem",
- .data = &sysctl_decnet_mem,
- .maxlen = sizeof(sysctl_decnet_mem),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax
- },
- {
- .procname = "decnet_rmem",
- .data = &sysctl_decnet_rmem,
- .maxlen = sizeof(sysctl_decnet_rmem),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "decnet_wmem",
- .data = &sysctl_decnet_wmem,
- .maxlen = sizeof(sysctl_decnet_wmem),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "debug",
- .data = &decnet_debug_level,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-
-void dn_register_sysctl(void)
-{
- dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table);
-}
-
-void dn_unregister_sysctl(void)
-{
- unregister_net_sysctl_table(dn_table_header);
-}
-
-#else /* CONFIG_SYSCTL */
-void dn_unregister_sysctl(void)
-{
-}
-void dn_register_sysctl(void)
-{
-}
-
-#endif
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 73605bcbb385..7354c5db3a14 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -62,7 +62,8 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
if (!skb->dev)
return NULL;
- pskb_trim_rcsum(skb, skb->len - len);
+ if (pskb_trim_rcsum(skb, skb->len - len))
+ return NULL;
skb->offload_fwd_mark = true;
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 12f3ce52e62e..836a75030a52 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -48,8 +48,8 @@ static void sja1105_meta_unpack(const struct sk_buff *skb,
* a unified unpacking command for both device series.
*/
packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
- packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
- packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
+ packing(buf + 4, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
+ packing(buf + 5, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index bf3ecf792688..7073724fdfa6 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -198,17 +198,18 @@ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
struct hsr_node *node_src)
{
bool was_multicast_frame;
- int res;
+ int res, recv_len;
was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
hsr_addr_subst_source(node_src, skb);
skb_pull(skb, ETH_HLEN);
+ recv_len = skb->len;
res = netif_rx(skb);
if (res == NET_RX_DROP) {
dev->stats.rx_dropped++;
} else {
dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_bytes += recv_len;
if (was_multicast_frame)
dev->stats.multicast++;
}
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 4a9200729a32..1a0f447113f2 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -198,6 +198,10 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
if (ethhdr->h_proto == htons(ETH_P_PRP) ||
ethhdr->h_proto == htons(ETH_P_HSR)) {
+ /* Check if skb contains hsr_ethhdr */
+ if (skb->mac_len < sizeof(struct hsr_ethhdr))
+ return NULL;
+
/* Use the existing sequence_nr from the tag as starting point
* for filtering duplicate frames.
*/
@@ -269,9 +273,12 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
node_real->addr_B_port = port_rcv->type;
spin_lock_bh(&hsr->list_lock);
- list_del_rcu(&node_curr->mac_list);
+ if (!node_curr->removed) {
+ list_del_rcu(&node_curr->mac_list);
+ node_curr->removed = true;
+ kfree_rcu(node_curr, rcu_head);
+ }
spin_unlock_bh(&hsr->list_lock);
- kfree_rcu(node_curr, rcu_head);
done:
skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
@@ -436,9 +443,12 @@ void hsr_prune_nodes(struct timer_list *t)
if (time_is_before_jiffies(timestamp +
msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
hsr_nl_nodedown(hsr, node->macaddress_A);
- list_del_rcu(&node->mac_list);
- /* Note that we need to free this entry later: */
- kfree_rcu(node, rcu_head);
+ if (!node->removed) {
+ list_del_rcu(&node->mac_list);
+ node->removed = true;
+ /* Note that we need to free this entry later: */
+ kfree_rcu(node, rcu_head);
+ }
}
}
spin_unlock_bh(&hsr->list_lock);
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 0f0fa12b4329..01f4ef4ae494 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -56,6 +56,7 @@ struct hsr_node {
unsigned long time_in[HSR_PT_PORTS];
bool time_in_stale[HSR_PT_PORTS];
u16 seq_out[HSR_PT_PORTS];
+ bool removed;
struct rcu_head rcu_head;
};
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 9e389accbfc7..ea627e532aab 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -113,14 +113,21 @@ static struct notifier_block hsr_nb = {
static int __init hsr_init(void)
{
- int res;
+ int err;
BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
- register_netdevice_notifier(&hsr_nb);
- res = hsr_netlink_init();
+ err = register_netdevice_notifier(&hsr_nb);
+ if (err)
+ return err;
+
+ err = hsr_netlink_init();
+ if (err) {
+ unregister_netdevice_notifier(&hsr_nb);
+ return err;
+ }
- return res;
+ return 0;
}
static void __exit hsr_exit(void)
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 9a675ba0bf0a..ce5f25c89dfa 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -201,8 +201,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
int err = 0;
struct net_device *dev = NULL;
- if (len < sizeof(*uaddr))
- return -EINVAL;
+ err = ieee802154_sockaddr_check_size(uaddr, len);
+ if (err < 0)
+ return err;
uaddr = (struct sockaddr_ieee802154 *)_uaddr;
if (uaddr->family != AF_IEEE802154)
@@ -272,6 +273,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
err = -EMSGSIZE;
goto out_dev;
}
+ if (!size) {
+ err = 0;
+ goto out_dev;
+ }
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
@@ -498,11 +503,14 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
ro->bound = 0;
- if (len < sizeof(*addr))
+ err = ieee802154_sockaddr_check_size(addr, len);
+ if (err < 0)
goto out;
- if (addr->family != AF_IEEE802154)
+ if (addr->family != AF_IEEE802154) {
+ err = -EINVAL;
goto out;
+ }
ieee802154_addr_from_sa(&haddr, &addr->addr);
dev = ieee802154_get_dev(sock_net(sk), &haddr);
@@ -569,8 +577,9 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
struct dgram_sock *ro = dgram_sk(sk);
int err = 0;
- if (len < sizeof(*addr))
- return -EINVAL;
+ err = ieee802154_sockaddr_check_size(addr, len);
+ if (err < 0)
+ return err;
if (addr->family != AF_IEEE802154)
return -EINVAL;
@@ -609,6 +618,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
struct ieee802154_mac_cb *cb;
struct dgram_sock *ro = dgram_sk(sk);
struct ieee802154_addr dst_addr;
+ DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
int hlen, tlen;
int err;
@@ -617,10 +627,20 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
return -EOPNOTSUPP;
}
- if (!ro->connected && !msg->msg_name)
- return -EDESTADDRREQ;
- else if (ro->connected && msg->msg_name)
- return -EISCONN;
+ if (msg->msg_name) {
+ if (ro->connected)
+ return -EISCONN;
+ if (msg->msg_namelen < IEEE802154_MIN_NAMELEN)
+ return -EINVAL;
+ err = ieee802154_sockaddr_check_size(daddr, msg->msg_namelen);
+ if (err < 0)
+ return err;
+ ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
+ } else {
+ if (!ro->connected)
+ return -EDESTADDRREQ;
+ dst_addr = ro->dst_addr;
+ }
if (!ro->bound)
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
@@ -656,16 +676,6 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
cb = mac_cb_init(skb);
cb->type = IEEE802154_FC_TYPE_DATA;
cb->ackreq = ro->want_ack;
-
- if (msg->msg_name) {
- DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
- daddr, msg->msg_name);
-
- ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
- } else {
- dst_addr = ro->dst_addr;
- }
-
cb->secen = ro->secen;
cb->secen_override = ro->secen_override;
cb->seclevel = ro->seclevel;
diff --git a/net/ife/ife.c b/net/ife/ife.c
index 13bbf8cb6a39..be05b690b9ef 100644
--- a/net/ife/ife.c
+++ b/net/ife/ife.c
@@ -82,6 +82,7 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
if (unlikely(!pskb_may_pull(skb, total_pull)))
return NULL;
+ ifehdr = (struct ifeheadr *)(skb->data + skb->dev->hard_header_len);
skb_set_mac_header(skb, total_pull);
__skb_pull(skb, total_pull);
*metalen = ifehdrln - IFE_METAHDRLEN;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index a926de2e42b5..f5af8c6b2f87 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -389,6 +389,16 @@ config INET_IPCOMP
If unsure, say Y.
+config INET_TABLE_PERTURB_ORDER
+ int "INET: Source port perturbation table size (as power of 2)" if EXPERT
+ default 16
+ help
+ Source port perturbation table size (as power of 2) for
+ RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm.
+
+ The default is almost always what you want.
+ Only change this if you know what you are doing.
+
config INET_XFRM_TUNNEL
tristate
select INET_TUNNEL
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d61ca7be6eda..d7ebee3c048d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -157,7 +157,7 @@ void inet_sock_destruct(struct sock *sk)
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
- dst_release(sk->sk_rx_dst);
+ dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -578,6 +578,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
add_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending += writebias;
+ sk->sk_wait_pending++;
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -593,6 +594,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
}
remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending -= writebias;
+ sk->sk_wait_pending--;
return timeo;
}
@@ -874,7 +876,7 @@ int inet_shutdown(struct socket *sock, int how)
EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
/* fall through */
default:
- sk->sk_shutdown |= how;
+ WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
if (sk->sk_prot->shutdown)
sk->sk_prot->shutdown(sk, how);
break;
@@ -1570,10 +1572,12 @@ EXPORT_SYMBOL(inet_current_timestamp);
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
- if (sk->sk_family == AF_INET)
+ unsigned int family = READ_ONCE(sk->sk_family);
+
+ if (family == AF_INET)
return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6)
+ if (family == AF_INET6)
return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
#endif
return -EINVAL;
@@ -1702,12 +1706,7 @@ static const struct net_protocol igmp_protocol = {
};
#endif
-/* thinking of making this const? Don't.
- * early_demux can change based on sysctl.
- */
-static struct net_protocol tcp_protocol = {
- .early_demux = tcp_v4_early_demux,
- .early_demux_handler = tcp_v4_early_demux,
+static const struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.no_policy = 1,
@@ -1715,12 +1714,7 @@ static struct net_protocol tcp_protocol = {
.icmp_strict_tag_validation = 1,
};
-/* thinking of making this const? Don't.
- * early_demux can change based on sysctl.
- */
-static struct net_protocol udp_protocol = {
- .early_demux = udp_v4_early_demux,
- .early_demux_handler = udp_v4_early_demux,
+static const struct net_protocol udp_protocol = {
.handler = udp_rcv,
.err_handler = udp_err,
.no_policy = 1,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 4a8ad46397c0..ed00b233cee2 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -351,14 +351,14 @@ static void __inet_del_ifa(struct in_device *in_dev,
{
struct in_ifaddr *promote = NULL;
struct in_ifaddr *ifa, *ifa1;
- struct in_ifaddr *last_prim;
+ struct in_ifaddr __rcu **last_prim;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
ASSERT_RTNL();
ifa1 = rtnl_dereference(*ifap);
- last_prim = rtnl_dereference(in_dev->ifa_list);
+ last_prim = ifap;
if (in_dev->dead)
goto no_promotions;
@@ -372,7 +372,7 @@ static void __inet_del_ifa(struct in_device *in_dev,
while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
- last_prim = ifa;
+ last_prim = &ifa->ifa_next;
if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask ||
@@ -436,9 +436,9 @@ no_promotions:
rcu_assign_pointer(prev_prom->ifa_next, next_sec);
- last_sec = rtnl_dereference(last_prim->ifa_next);
+ last_sec = rtnl_dereference(*last_prim);
rcu_assign_pointer(promote->ifa_next, last_sec);
- rcu_assign_pointer(last_prim->ifa_next, promote);
+ rcu_assign_pointer(*last_prim, promote);
}
promote->ifa_flags &= ~IFA_F_SECONDARY;
@@ -1798,6 +1798,21 @@ done:
return err;
}
+/* Combine dev_addr_genid and dev_base_seq to detect changes.
+ */
+static u32 inet_base_seq(const struct net *net)
+{
+ u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
+ net->dev_base_seq;
+
+ /* Must not return 0 (see nl_dump_check_consistent()).
+ * Chose a value far away from 0.
+ */
+ if (!res)
+ res = 0x80000000;
+ return res;
+}
+
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
@@ -1849,8 +1864,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
head = &tgt_net->dev_index_head[h];
rcu_read_lock();
- cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
- tgt_net->dev_base_seq;
+ cb->seq = inet_base_seq(tgt_net);
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
@@ -2249,8 +2263,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
idx = 0;
head = &net->dev_index_head[h];
rcu_read_lock();
- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
- net->dev_base_seq;
+ cb->seq = inet_base_seq(net);
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index f555dd4bac65..9a8f0e36bbf9 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -567,7 +567,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
- pskb_trim(skb, skb->len - trimlen);
+ ret = pskb_trim(skb, skb->len - trimlen);
+ if (unlikely(ret))
+ return ret;
ret = nexthdr[1];
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 8c0af30fb067..5cd219d7e746 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -283,6 +283,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
secpath_reset(skb);
+ if (skb_needs_linearize(skb, skb->dev->features) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
return 0;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index ef3e7a3e3a29..c31003d8c22f 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -399,7 +399,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
dev_match = dev_match || (res.type == RTN_LOCAL &&
dev == net->loopback_dev);
if (dev_match) {
- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
+ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
return ret;
}
if (no_addr)
@@ -411,7 +411,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
ret = 0;
if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
if (res.type == RTN_UNICAST)
- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
+ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
}
return ret;
@@ -583,6 +583,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
cfg->fc_scope = RT_SCOPE_UNIVERSE;
}
+ if (!cfg->fc_table)
+ cfg->fc_table = RT_TABLE_MAIN;
+
if (cmd == SIOCDELRT)
return 0;
@@ -840,6 +843,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
return -EINVAL;
}
+ if (!cfg->fc_table)
+ cfg->fc_table = RT_TABLE_MAIN;
+
return 0;
errout:
return err;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 28da0443f3e9..2890dbe08d17 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/netlink.h>
#include <linux/hash.h>
+#include <linux/nospec.h>
#include <net/arp.h>
#include <net/ip.h>
@@ -274,7 +275,8 @@ void fib_release_info(struct fib_info *fi)
hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
}
- fi->fib_dead = 1;
+ /* Paired with READ_ONCE() from fib_table_lookup() */
+ WRITE_ONCE(fi->fib_dead, 1);
fib_info_put(fi);
}
spin_unlock_bh(&fib_info_lock);
@@ -420,6 +422,7 @@ static struct fib_info *fib_find_info(struct fib_info *nfi)
nfi->fib_prefsrc == fi->fib_prefsrc &&
nfi->fib_priority == fi->fib_priority &&
nfi->fib_type == fi->fib_type &&
+ nfi->fib_tb_id == fi->fib_tb_id &&
memcmp(nfi->fib_metrics, fi->fib_metrics,
sizeof(u32) * RTAX_MAX) == 0 &&
!((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
@@ -875,13 +878,15 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
return 1;
}
+ if (fi->nh) {
+ if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_mp)
+ return 1;
+ return 0;
+ }
+
if (cfg->fc_oif || cfg->fc_gw_family) {
struct fib_nh *nh;
- /* cannot match on nexthop object attributes */
- if (fi->nh)
- return 1;
-
nh = fib_info_nh(fi, 0);
if (cfg->fc_encap) {
if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap,
@@ -1006,6 +1011,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
if (type > RTAX_MAX)
return false;
+ type = array_index_nospec(type, RTAX_MAX + 1);
if (type == RTAX_CC_ALGO) {
char tmp[TCP_CA_NAME_MAX];
bool ecn_ca = false;
@@ -1327,15 +1333,18 @@ __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
unsigned char scope)
{
struct fib_nh *nh;
+ __be32 saddr;
if (nhc->nhc_family != AF_INET)
return inet_select_addr(nhc->nhc_dev, 0, scope);
nh = container_of(nhc, struct fib_nh, nh_common);
- nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
+ saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
+
+ WRITE_ONCE(nh->nh_saddr, saddr);
+ WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid));
- return nh->nh_saddr;
+ return saddr;
}
__be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
@@ -1349,8 +1358,9 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
struct fib_nh *nh;
nh = container_of(nhc, struct fib_nh, nh_common);
- if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
- return nh->nh_saddr;
+ if (READ_ONCE(nh->nh_saddr_genid) ==
+ atomic_read(&net->ipv4.dev_addr_genid))
+ return READ_ONCE(nh->nh_saddr);
}
return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
@@ -1581,6 +1591,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
link_it:
ofi = fib_find_info(fi);
if (ofi) {
+ /* fib_table_lookup() should not see @fi yet. */
fi->fib_dead = 1;
free_fib_info(fi);
ofi->fib_treeref++;
@@ -1618,6 +1629,7 @@ err_inval:
failure:
if (fi) {
+ /* fib_table_lookup() should not see @fi yet. */
fi->fib_dead = 1;
free_fib_info(fi);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index a1f830da4ad3..7f933ead3bf4 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1448,7 +1448,8 @@ found:
}
if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
continue;
- if (fi->fib_dead)
+ /* Paired with WRITE_ONCE() in fib_release_info() */
+ if (READ_ONCE(fi->fib_dead))
continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index b44f51e404ae..ac82a4158b86 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -754,6 +754,11 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
room = 576;
room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
room -= sizeof(struct icmphdr);
+ /* Guard against tiny mtu. We need to include at least one
+ * IP network header for this message to make any sense.
+ */
+ if (room <= (int)sizeof(struct iphdr))
+ goto ende;
icmp_param.data_len = skb_in->len - icmp_param.offset;
if (icmp_param.data_len > room)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1023f881091e..715f99e76826 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -218,8 +218,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
int tv = prandom_u32() % max_delay;
im->tm_running = 1;
- if (!mod_timer(&im->timer, jiffies+tv+2))
- refcount_inc(&im->refcnt);
+ if (refcount_inc_not_zero(&im->refcnt)) {
+ if (mod_timer(&im->timer, jiffies + tv + 2))
+ ip_ma_put(im);
+ }
}
static void igmp_gq_start_timer(struct in_device *in_dev)
@@ -355,8 +357,9 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
struct flowi4 fl4;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
- unsigned int size = mtu;
+ unsigned int size;
+ size = min(mtu, IP_MAX_MTU);
while (1) {
skb = alloc_skb(size + hlen + tlen,
GFP_ATOMIC | __GFP_NOWARN);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6cbf0db57ad0..374a0c3f39cc 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -824,6 +824,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
+ newsk->sk_wait_pending = 0;
inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;
@@ -902,11 +903,25 @@ void inet_csk_prepare_forced_close(struct sock *sk)
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
+static int inet_ulp_can_listen(const struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ulp_ops)
+ return -EINVAL;
+
+ return 0;
+}
+
int inet_csk_listen_start(struct sock *sk, int backlog)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
- int err = -EADDRINUSE;
+ int err;
+
+ err = inet_ulp_can_listen(sk);
+ if (unlikely(err))
+ return err;
reqsk_queue_alloc(&icsk->icsk_accept_queue);
@@ -918,6 +933,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
+ err = -EADDRINUSE;
inet_sk_state_store(sk, TCP_LISTEN);
if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
inet->inet_sport = htons(inet->inet_num);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index d9bee15e36a5..e4f2790fd641 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -675,13 +675,13 @@ EXPORT_SYMBOL_GPL(inet_unhash);
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this
* property might be used by clever attacker.
+ *
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
- * attacks were since demonstrated, thus we use 65536 instead to really
- * give more isolation and privacy, at the expense of 256kB of kernel
- * memory.
+ * attacks were since demonstrated, thus we use 65536 by default instead
+ * to really give more isolation and privacy, at the expense of 256kB
+ * of kernel memory.
*/
-#define INET_TABLE_PERTURB_SHIFT 16
-#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
+#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
static u32 *table_perturb;
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
@@ -701,17 +701,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
u32 index;
if (port) {
- head = &hinfo->bhash[inet_bhashfn(net, port,
- hinfo->bhash_size)];
- tb = inet_csk(sk)->icsk_bind_hash;
- spin_lock_bh(&head->lock);
- if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- inet_ehash_nolisten(sk, NULL, NULL);
- spin_unlock_bh(&head->lock);
- return 0;
- }
- spin_unlock(&head->lock);
- /* No definite answer... Walk to established hash table */
+ local_bh_disable();
ret = check_established(death_row, sk, port, NULL);
local_bh_enable();
return ret;
@@ -725,8 +715,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
if (likely(remaining > 1))
remaining &= ~1U;
- net_get_random_once(table_perturb,
- INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
+ get_random_slow_once(table_perturb,
+ INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 52dbffb7bc2f..db48dec61f30 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -437,7 +437,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
/* Push GRE header. */
gre_build_header(skb, tunnel->tun_hlen,
flags, proto, tunnel->parms.o_key,
- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
+ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
@@ -475,7 +475,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
gre_build_header(skb, tunnel_hlen, flags, proto,
tunnel_id_to_key32(tun_info->key.tun_id),
- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
+ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@@ -525,7 +525,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
truncate = true;
}
- nhoff = skb_network_header(skb) - skb_mac_header(skb);
+ nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP) &&
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
truncate = true;
@@ -534,7 +534,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
int thoff;
if (skb_transport_header_was_set(skb))
- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+ thoff = skb_transport_offset(skb);
else
thoff = nhoff + sizeof(struct ipv6hdr);
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
@@ -557,7 +557,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
}
gre_build_header(skb, 8, TUNNEL_SEQ,
- proto, 0, htonl(tunnel->o_seqno++));
+ proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@@ -607,15 +607,18 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
}
if (dev->header_ops) {
+ int pull_len = tunnel->hlen + sizeof(struct iphdr);
+
if (skb_cow_head(skb, 0))
goto free_skb;
tnl_params = (const struct iphdr *)skb->data;
- /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
- * to gre header.
- */
- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
+ if (!pskb_network_may_pull(skb, pull_len))
+ goto free_skb;
+
+ /* ip_tunnel_xmit() needs skb->data pointing to gre header. */
+ skb_pull(skb, pull_len);
skb_reset_mac_header(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index c59a78a267c3..1464e2738211 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -302,31 +302,38 @@ drop:
return true;
}
-INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
+int udp_v4_early_demux(struct sk_buff *);
+int tcp_v4_early_demux(struct sk_buff *);
static int ip_rcv_finish_core(struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *dev)
{
const struct iphdr *iph = ip_hdr(skb);
- int (*edemux)(struct sk_buff *skb);
struct rtable *rt;
int err;
- if (net->ipv4.sysctl_ip_early_demux &&
+ if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
!skb_dst(skb) &&
!skb->sk &&
!ip_is_fragment(iph)) {
- const struct net_protocol *ipprot;
- int protocol = iph->protocol;
-
- ipprot = rcu_dereference(inet_protos[protocol]);
- if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
- err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux,
- udp_v4_early_demux, skb);
- if (unlikely(err))
- goto drop_error;
- /* must reload iph, skb->head might have changed */
- iph = ip_hdr(skb);
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
+ tcp_v4_early_demux(skb);
+
+ /* must reload iph, skb->head might have changed */
+ iph = ip_hdr(skb);
+ }
+ break;
+ case IPPROTO_UDP:
+ if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
+ err = udp_v4_early_demux(skb);
+ if (unlikely(err))
+ goto drop_error;
+
+ /* must reload iph, skb->head might have changed */
+ iph = ip_hdr(skb);
+ }
+ break;
}
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 418e93987800..d57d484a929f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -222,7 +222,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
if (lwtunnel_xmit_redirect(dst->lwtstate)) {
int res = lwtunnel_xmit(skb);
- if (res < 0 || res == LWTUNNEL_XMIT_DONE)
+ if (res != LWTUNNEL_XMIT_CONTINUE)
return res;
}
@@ -993,7 +993,7 @@ static int __ip_append_data(struct sock *sk,
mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
paged = !!cork->gso_size;
- if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
+ if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
tskey = sk->sk_tskey++;
@@ -1255,6 +1255,12 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
if (unlikely(!rt))
return -EFAULT;
+ cork->fragsize = ip_sk_use_pmtu(sk) ?
+ dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
+
+ if (!inetdev_valid_mtu(cork->fragsize))
+ return -ENETUNREACH;
+
/*
* setup for corking.
*/
@@ -1271,12 +1277,6 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
cork->addr = ipc->addr;
}
- cork->fragsize = ip_sk_use_pmtu(sk) ?
- dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
-
- if (!inetdev_valid_mtu(cork->fragsize))
- return -ENETUNREACH;
-
cork->gso_size = ipc->gso_size;
cork->dst = &rt->dst;
@@ -1559,9 +1559,19 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
cork->dst = NULL;
skb_dst_set(skb, &rt->dst);
- if (iph->protocol == IPPROTO_ICMP)
- icmp_out_count(net, ((struct icmphdr *)
- skb_transport_header(skb))->type);
+ if (iph->protocol == IPPROTO_ICMP) {
+ u8 icmp_type;
+
+ /* For such sockets, transhdrlen is zero when do ip_append_data(),
+ * so icmphdr does not in skb linear region and can not get icmp_type
+ * by icmp_hdr(skb)->type.
+ */
+ if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
+ icmp_type = fl4->fl4_icmp_type;
+ else
+ icmp_type = icmp_hdr(skb)->type;
+ icmp_out_count(net, icmp_type);
+ }
ip_cork_release(cork);
out:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index aa3fd61818c4..0c528f642eb8 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -316,7 +316,14 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
ipc->tos = val;
ipc->priority = rt_tos2priority(ipc->tos);
break;
-
+ case IP_PROTOCOL:
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+ return -EINVAL;
+ val = *(int *)CMSG_DATA(cmsg);
+ if (val < 1 || val > 255)
+ return -EINVAL;
+ ipc->protocol = val;
+ break;
default:
return -EINVAL;
}
@@ -1524,6 +1531,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_MINTTL:
val = inet->min_ttl;
break;
+ case IP_PROTOCOL:
+ val = inet_sk(sk)->inet_num;
+ break;
default:
release_sock(sk);
return -ENOPROTOOPT;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 38d3095ef979..906c37c7f80d 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -365,7 +365,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
{
struct pcpu_sw_netstats *tstats;
const struct iphdr *iph = ip_hdr(skb);
- int err;
+ int nh, err;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
@@ -391,8 +391,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
+ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto drop;
+ }
+ iph = (struct iphdr *)(skb->head + nh);
+
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
@@ -547,6 +560,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
+static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
+{
+ /* we must cap headroom to some upperlimit, else pskb_expand_head
+ * will overflow header offsets in skb_headers_offset_update().
+ */
+ static const unsigned int max_allowed = 512;
+
+ if (headroom > max_allowed)
+ headroom = max_allowed;
+
+ if (headroom > READ_ONCE(dev->needed_headroom))
+ WRITE_ONCE(dev->needed_headroom, headroom);
+}
+
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
u8 proto, int tunnel_hlen)
{
@@ -620,13 +647,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
- if (headroom > dev->needed_headroom)
- dev->needed_headroom = headroom;
-
- if (skb_cow_head(skb, dev->needed_headroom)) {
+ if (skb_cow_head(skb, headroom)) {
ip_rt_put(rt);
goto tx_dropped;
}
+
+ ip_tunnel_adj_headroom(dev, headroom);
+
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
@@ -804,16 +831,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
- if (max_headroom > dev->needed_headroom)
- dev->needed_headroom = max_headroom;
- if (skb_cow_head(skb, dev->needed_headroom)) {
+ if (skb_cow_head(skb, max_headroom)) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
kfree_skb(skb);
return;
}
+ ip_tunnel_adj_headroom(dev, max_headroom);
+
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index bd41354ed8c1..275f2ecf0ba6 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -314,12 +314,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->protocol) {
case htons(ETH_P_IP):
- xfrm_decode_session(skb, &fl, AF_INET);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ xfrm_decode_session(skb, &fl, AF_INET);
break;
case htons(ETH_P_IPV6):
- xfrm_decode_session(skb, &fl, AF_INET6);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ xfrm_decode_session(skb, &fl, AF_INET6);
break;
default:
goto tx_err;
diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
index 3205d5f7c8c9..4966ac2aaf87 100644
--- a/net/ipv4/metrics.c
+++ b/net/ipv4/metrics.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/netlink.h>
+#include <linux/nospec.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
#include <net/ip.h>
@@ -28,6 +29,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
return -EINVAL;
}
+ type = array_index_nospec(type, RTAX_MAX + 1);
if (type == RTAX_CC_ALGO) {
char tmp[TCP_CA_NAME_MAX];
diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c
index 36a28d46149c..1b669e9a3ca7 100644
--- a/net/ipv4/netfilter/nf_socket_ipv4.c
+++ b/net/ipv4/netfilter/nf_socket_ipv4.c
@@ -92,11 +92,11 @@ nf_socket_get_sock_v4(struct net *net, struct sk_buff *skb, const int doff,
struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb,
const struct net_device *indev)
{
- __be32 uninitialized_var(daddr), uninitialized_var(saddr);
- __be16 uninitialized_var(dport), uninitialized_var(sport);
+ __be32 daddr, saddr;
+ __be16 dport, sport;
const struct iphdr *iph = ip_hdr(skb);
struct sk_buff *data_skb = NULL;
- u8 uninitialized_var(protocol);
+ u8 protocol;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
enum ip_conntrack_info ctinfo;
struct nf_conn const *ct;
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
index b2bae0b0e42a..61cb2341f50f 100644
--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
+++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
@@ -38,7 +38,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
hp->source, lport ? lport : hp->dest,
skb->dev, NF_TPROXY_LOOKUP_LISTENER);
if (sk2) {
- inet_twsk_deschedule_put(inet_twsk(sk));
+ nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
}
}
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index abf89b972094..330349b5d6a4 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -13,8 +13,8 @@
#include <net/netfilter/ipv4/nf_dup_ipv4.h>
struct nft_dup_ipv4 {
- enum nft_registers sreg_addr:8;
- enum nft_registers sreg_dev:8;
+ u8 sreg_addr;
+ u8 sreg_dev;
};
static void nft_dup_ipv4_eval(const struct nft_expr *expr,
@@ -40,16 +40,16 @@ static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
if (tb[NFTA_DUP_SREG_ADDR] == NULL)
return -EINVAL;
- priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
- err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
+ err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
+ sizeof(struct in_addr));
if (err < 0)
return err;
- if (tb[NFTA_DUP_SREG_DEV] != NULL) {
- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
- }
- return 0;
+ if (tb[NFTA_DUP_SREG_DEV])
+ err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
+ &priv->sreg_dev, sizeof(int));
+
+ return err;
}
static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index ce294113dbcd..85eac5aa5204 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -83,6 +83,9 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
else
oif = NULL;
+ if (priv->flags & NFTA_FIB_F_IIF)
+ fl4.flowi4_oif = l3mdev_master_ifindex_rcu(oif);
+
if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
nft_fib_store_result(dest, priv, nft_in(pkt));
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 4d69b3de980a..0137854a7faa 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1222,7 +1222,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
if (!err) {
nh->nh_flags = fib_nh->fib_nh_flags;
fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
- fib_nh->fib_nh_scope);
+ !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
} else {
fib_nh_release(net, fib_nh);
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ddc24e57dc55..1044f4985922 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -558,6 +558,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
ipcm_init_sk(&ipc, inet);
+ /* Keep backward compat */
+ if (hdrincl)
+ ipc.protocol = IPPROTO_RAW;
if (msg->msg_controllen) {
err = ip_cmsg_send(sk, msg, &ipc, false);
@@ -625,7 +628,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos,
RT_SCOPE_UNIVERSE,
- hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+ hdrincl ? ipc.protocol : sk->sk_protocol,
inet_sk_flowi_flags(sk) |
(hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0, sk->sk_uid);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7004e379c325..902296ef3e5a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -799,7 +799,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
goto reject_redirect;
}
- n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
+ n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
if (!n)
n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
if (!IS_ERR(n)) {
@@ -1221,6 +1221,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
static void ipv4_send_dest_unreach(struct sk_buff *skb)
{
+ struct net_device *dev;
struct ip_options opt;
int res;
@@ -1238,7 +1239,8 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb)
opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
rcu_read_lock();
- res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+ dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
+ res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
rcu_read_unlock();
if (res)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 3f6c9514c7a9..3cc22f776a31 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -43,7 +43,6 @@ static siphash_key_t syncookie_secret[2] __read_mostly;
* requested/supported by the syn/synack exchange.
*/
#define TSBITS 6
-#define TSMASK (((__u32)1 << TSBITS) - 1)
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c)
@@ -62,29 +61,24 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
-u64 cookie_init_timestamp(struct request_sock *req)
+u64 cookie_init_timestamp(struct request_sock *req, u64 now)
{
- struct inet_request_sock *ireq;
- u32 ts, ts_now = tcp_time_stamp_raw();
+ const struct inet_request_sock *ireq = inet_rsk(req);
+ u64 ts, ts_now = tcp_ns_to_ts(now);
u32 options = 0;
- ireq = inet_rsk(req);
-
options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
if (ireq->sack_ok)
options |= TS_OPT_SACK;
if (ireq->ecn_ok)
options |= TS_OPT_ECN;
- ts = ts_now & ~TSMASK;
+ ts = (ts_now >> TSBITS) << TSBITS;
ts |= options;
- if (ts > ts_now) {
- ts >>= TSBITS;
- ts--;
- ts <<= TSBITS;
- ts |= options;
- }
- return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
+ if (ts > ts_now)
+ ts -= (1UL << TSBITS);
+
+ return ts * (NSEC_PER_SEC / TCP_TS_HZ);
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index c83a5d05aeaa..4d4dba1d42ae 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -363,61 +363,6 @@ bad_key:
return ret;
}
-static void proc_configure_early_demux(int enabled, int protocol)
-{
- struct net_protocol *ipprot;
-#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_protocol *ip6prot;
-#endif
-
- rcu_read_lock();
-
- ipprot = rcu_dereference(inet_protos[protocol]);
- if (ipprot)
- ipprot->early_demux = enabled ? ipprot->early_demux_handler :
- NULL;
-
-#if IS_ENABLED(CONFIG_IPV6)
- ip6prot = rcu_dereference(inet6_protos[protocol]);
- if (ip6prot)
- ip6prot->early_demux = enabled ? ip6prot->early_demux_handler :
- NULL;
-#endif
- rcu_read_unlock();
-}
-
-static int proc_tcp_early_demux(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int ret = 0;
-
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
-
- if (write && !ret) {
- int enabled = init_net.ipv4.sysctl_tcp_early_demux;
-
- proc_configure_early_demux(enabled, IPPROTO_TCP);
- }
-
- return ret;
-}
-
-static int proc_udp_early_demux(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int ret = 0;
-
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
-
- if (write && !ret) {
- int enabled = init_net.ipv4.sysctl_udp_early_demux;
-
- proc_configure_early_demux(enabled, IPPROTO_UDP);
- }
-
- return ret;
-}
-
static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
int write,
void __user *buffer,
@@ -701,14 +646,14 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_udp_early_demux,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_udp_early_demux
+ .proc_handler = proc_douintvec_minmax,
},
{
.procname = "tcp_early_demux",
.data = &init_net.ipv4.sysctl_tcp_early_demux,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_tcp_early_demux
+ .proc_handler = proc_douintvec_minmax,
},
{
.procname = "ip_default_ttl",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0a570d5d0b38..8ebcff40bc5a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -429,6 +429,7 @@ void tcp_init_sock(struct sock *sk)
/* There's a bubble in the pipe until at least the first ACK. */
tp->app_limited = ~0U;
+ tp->rate_app_limited = 1;
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
@@ -504,6 +505,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
__poll_t mask;
struct sock *sk = sock->sk;
const struct tcp_sock *tp = tcp_sk(sk);
+ u8 shutdown;
int state;
sock_poll_wait(file, sock, wait);
@@ -546,9 +548,10 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* NOTE. Check for TCP_CLOSE is added. The goal is to prevent
* blocking on fresh not-connected or disconnected socket. --ANK
*/
- if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
+ shutdown = READ_ONCE(sk->sk_shutdown);
+ if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
mask |= EPOLLHUP;
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
/* Connected or passive Fast Open socket? */
@@ -564,8 +567,8 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (tcp_stream_is_readable(tp, target, sk))
mask |= EPOLLIN | EPOLLRDNORM;
- if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_is_writeable(sk)) {
+ if (!(shutdown & SEND_SHUTDOWN)) {
+ if (__sk_stream_is_writeable(sk, 1)) {
mask |= EPOLLOUT | EPOLLWRNORM;
} else { /* send SIGIO later */
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -577,7 +580,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* pairs with the input side.
*/
smp_mb__after_atomic();
- if (sk_stream_is_writeable(sk))
+ if (__sk_stream_is_writeable(sk, 1))
mask |= EPOLLOUT | EPOLLWRNORM;
}
} else
@@ -722,6 +725,7 @@ static void tcp_push(struct sock *sk, int flags, int mss_now,
if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
+ smp_mb__after_atomic();
}
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED.
@@ -2349,14 +2353,13 @@ bool tcp_check_oom(struct sock *sk, int shift)
return too_many_orphans || out_of_socket_memory;
}
-void tcp_close(struct sock *sk, long timeout)
+void __tcp_close(struct sock *sk, long timeout)
{
struct sk_buff *skb;
int data_was_unread = 0;
int state;
- lock_sock(sk);
- sk->sk_shutdown = SHUTDOWN_MASK;
+ WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
if (sk->sk_state == TCP_LISTEN) {
tcp_set_state(sk, TCP_CLOSE);
@@ -2519,6 +2522,12 @@ adjudge_to_death:
out:
bh_unlock_sock(sk);
local_bh_enable();
+}
+
+void tcp_close(struct sock *sk, long timeout)
+{
+ lock_sock(sk);
+ __tcp_close(sk, timeout);
release_sock(sk);
sock_put(sk);
}
@@ -2580,6 +2589,12 @@ int tcp_disconnect(struct sock *sk, int flags)
int old_state = sk->sk_state;
u32 seq;
+ /* Deny disconnect if other threads are blocked in sk_wait_event()
+ * or inet_wait_for_connect().
+ */
+ if (sk->sk_wait_pending)
+ return -EBUSY;
+
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
@@ -2616,7 +2631,7 @@ int tcp_disconnect(struct sock *sk, int flags)
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
- sk->sk_shutdown = 0;
+ WRITE_ONCE(sk->sk_shutdown, 0);
sock_reset_flag(sk, SOCK_DONE);
tp->srtt_us = 0;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
@@ -2635,6 +2650,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd = TCP_INIT_CWND;
tp->snd_cwnd_cnt = 0;
+ tp->is_cwnd_limited = 0;
+ tp->max_packets_out = 0;
tp->window_clamp = 0;
tp->delivered = 0;
tp->delivered_ce = 0;
@@ -2652,8 +2669,7 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
- dst_release(sk->sk_rx_dst);
- sk->sk_rx_dst = NULL;
+ dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
tp->segs_in = 0;
@@ -2674,6 +2690,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->last_oow_ack_time = 0;
/* There's a bubble in the pipe until at least the first ACK. */
tp->app_limited = ~0U;
+ tp->rate_app_limited = 1;
tp->rack.mstamp = 0;
tp->rack.advanced = 0;
tp->rack.reo_wnd_steps = 1;
@@ -3057,18 +3074,18 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_LINGER2:
if (val < 0)
- tp->linger2 = -1;
- else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ)
- tp->linger2 = 0;
+ WRITE_ONCE(tp->linger2, -1);
+ else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
+ WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX);
else
- tp->linger2 = val * HZ;
+ WRITE_ONCE(tp->linger2, val * HZ);
break;
case TCP_DEFER_ACCEPT:
/* Translate value in seconds to number of retransmits */
- icsk->icsk_accept_queue.rskq_defer_accept =
- secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
- TCP_RTO_MAX / HZ);
+ WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept,
+ secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
+ TCP_RTO_MAX / HZ));
break;
case TCP_WINDOW_CLAMP:
@@ -3156,7 +3173,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
err = tcp_repair_set_window(tp, optval, optlen);
break;
case TCP_NOTSENT_LOWAT:
- tp->notsent_lowat = val;
+ WRITE_ONCE(tp->notsent_lowat, val);
sk->sk_write_space(sk);
break;
case TCP_INQ:
@@ -3168,7 +3185,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_TX_DELAY:
if (val)
tcp_enable_tx_delay();
- tp->tcp_tx_delay = val;
+ WRITE_ONCE(tp->tcp_tx_delay, val);
break;
default:
err = -ENOPROTOOPT;
@@ -3434,15 +3451,16 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache;
- if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
+ if (tp->rx_opt.user_mss &&
+ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
val = tp->rx_opt.user_mss;
if (tp->repair)
val = tp->rx_opt.mss_clamp;
@@ -3466,13 +3484,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
break;
case TCP_LINGER2:
- val = tp->linger2;
+ val = READ_ONCE(tp->linger2);
if (val >= 0)
val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
break;
case TCP_DEFER_ACCEPT:
- val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
- TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
+ val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept);
+ val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ,
+ TCP_RTO_MAX / HZ);
break;
case TCP_WINDOW_CLAMP:
val = tp->window_clamp;
@@ -3612,7 +3631,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break;
case TCP_FASTOPEN:
- val = icsk->icsk_accept_queue.fastopenq.max_qlen;
+ val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen);
break;
case TCP_FASTOPEN_CONNECT:
@@ -3624,14 +3643,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break;
case TCP_TX_DELAY:
- val = tp->tcp_tx_delay;
+ val = READ_ONCE(tp->tcp_tx_delay);
break;
case TCP_TIMESTAMP:
val = tcp_time_stamp_raw() + tp->tsoffset;
break;
case TCP_NOTSENT_LOWAT:
- val = tp->notsent_lowat;
+ val = READ_ONCE(tp->notsent_lowat);
break;
case TCP_INQ:
val = tp->recvmsg_inq;
@@ -3768,12 +3787,16 @@ static void __tcp_alloc_md5sig_pool(void)
* to memory. See smp_rmb() in tcp_get_md5sig_pool()
*/
smp_wmb();
- tcp_md5sig_pool_populated = true;
+ /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool()
+ * and tcp_get_md5sig_pool().
+ */
+ WRITE_ONCE(tcp_md5sig_pool_populated, true);
}
bool tcp_alloc_md5sig_pool(void)
{
- if (unlikely(!tcp_md5sig_pool_populated)) {
+ /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
+ if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) {
mutex_lock(&tcp_md5sig_mutex);
if (!tcp_md5sig_pool_populated) {
@@ -3784,7 +3807,8 @@ bool tcp_alloc_md5sig_pool(void)
mutex_unlock(&tcp_md5sig_mutex);
}
- return tcp_md5sig_pool_populated;
+ /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
+ return READ_ONCE(tcp_md5sig_pool_populated);
}
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
@@ -3800,7 +3824,8 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
local_bh_disable();
- if (tcp_md5sig_pool_populated) {
+ /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
+ if (READ_ONCE(tcp_md5sig_pool_populated)) {
/* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
smp_rmb();
return this_cpu_ptr(&tcp_md5sig_pool);
@@ -3882,7 +3907,7 @@ void tcp_done(struct sock *sk)
if (req)
reqsk_fastopen_remove(sk, req, false);
- sk->sk_shutdown = SHUTDOWN_MASK;
+ WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index bcc13368c836..ca49d68a0e04 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -203,8 +203,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
tmp->sg.end = i;
if (apply) {
apply_bytes -= size;
- if (!apply_bytes)
+ if (!apply_bytes) {
+ if (sge->length)
+ sk_msg_iter_var_prev(i);
break;
+ }
}
} while (i != msg->sg.end);
@@ -311,8 +314,8 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
{
bool cork = false, enospc = sk_msg_full(msg);
struct sock *sk_redir;
- u32 tosend, delta = 0;
- u32 eval = __SK_NONE;
+ u32 tosend, origsize, sent, delta = 0;
+ u32 eval;
int ret;
more_data:
@@ -343,6 +346,7 @@ more_data:
tosend = msg->sg.size;
if (psock->apply_bytes && psock->apply_bytes < tosend)
tosend = psock->apply_bytes;
+ eval = __SK_NONE;
switch (psock->eval) {
case __SK_PASS:
@@ -366,10 +370,12 @@ more_data:
cork = true;
psock->cork = NULL;
}
- sk_msg_return(sk, msg, msg->sg.size);
+ sk_msg_return(sk, msg, tosend);
release_sock(sk);
+ origsize = msg->sg.size;
ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
+ sent = origsize - msg->sg.size;
if (eval == __SK_REDIRECT)
sock_put(sk_redir);
@@ -408,7 +414,7 @@ more_data:
msg->sg.data[msg->sg.start].page_link &&
msg->sg.data[msg->sg.start].length) {
if (eval == __SK_REDIRECT)
- sk_mem_charge(sk, msg->sg.size);
+ sk_mem_charge(sk, tosend - sent);
goto more_data;
}
}
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 709d23801823..56dede4b59d9 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -375,6 +375,7 @@ static void tcp_cdg_init(struct sock *sk)
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
+ ca->gradients = NULL;
/* We silently fall back to window = 1 if allocation fails. */
if (window > 1)
ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
@@ -388,6 +389,7 @@ static void tcp_cdg_release(struct sock *sk)
struct cdg *ca = inet_csk_ca(sk);
kfree(ca->gradients);
+ ca->gradients = NULL;
}
static struct tcp_congestion_ops tcp_cdg __read_mostly = {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 21705b2ddaff..35088cd30840 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -312,6 +312,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
static bool tcp_fastopen_queue_check(struct sock *sk)
{
struct fastopen_queue *fastopenq;
+ int max_qlen;
/* Make sure the listener has enabled fastopen, and we don't
* exceed the max # of pending TFO requests allowed before trying
@@ -324,10 +325,11 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
* temporarily vs a server not supporting Fast Open at all.
*/
fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
- if (fastopenq->max_qlen == 0)
+ max_qlen = READ_ONCE(fastopenq->max_qlen);
+ if (max_qlen == 0)
return false;
- if (fastopenq->qlen >= fastopenq->max_qlen) {
+ if (fastopenq->qlen >= max_qlen) {
struct request_sock *req1;
spin_lock(&fastopenq->lock);
req1 = fastopenq->rskq_rst_head;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f4e00ff909da..61243531a7f4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -178,6 +178,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
if (unlikely(len > icsk->icsk_ack.rcv_mss +
MAX_TCP_OPTION_SPACE))
tcp_gro_dev_warn(sk, skb, len);
+ /* If the skb has a len of exactly 1*MSS and has the PSH bit
+ * set then it is likely the end of an application write. So
+ * more data may not be arriving soon, and yet the data sender
+ * may be waiting for an ACK if cwnd-bound or using TX zero
+ * copy. So we set ICSK_ACK_PUSHED here so that
+ * tcp_cleanup_rbuf() will send an ACK immediately if the app
+ * reads all of the data and is not ping-pong. If len > MSS
+ * then this logic does not matter (and does not hurt) because
+ * tcp_cleanup_rbuf() will always ACK immediately if the app
+ * reads data and there is more than an MSS of unACKed data.
+ */
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
+ icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.
@@ -222,7 +235,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
icsk->icsk_ack.quick = quickacks;
}
-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -230,7 +243,6 @@ void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
inet_csk_exit_pingpong_mode(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
-EXPORT_SYMBOL(tcp_enter_quickack_mode);
/* Send ACKs quickly, if "quick" count is not exhausted
* and the session is not interactive.
@@ -2045,15 +2057,17 @@ void tcp_enter_loss(struct sock *sk)
* restore sanity to the SACK scoreboard. If the apparent reneging
* persists until this RTO then we'll clear the SACK scoreboard.
*/
-static bool tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
{
- if (flag & FLAG_SACK_RENEGING) {
+ if (*ack_flag & FLAG_SACK_RENEGING &&
+ *ack_flag & FLAG_SND_UNA_ADVANCED) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
delay, TCP_RTO_MAX);
+ *ack_flag &= ~FLAG_SET_XMIT_TIMER;
return true;
}
return false;
@@ -2384,6 +2398,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp)
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
}
+static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
+ /* Hold old state until something *above* high_seq
+ * is ACKed. For Reno it is MUST to prevent false
+ * fast retransmits (RFC2582). SACK TCP is safe. */
+ if (!tcp_any_retrans_done(sk))
+ tp->retrans_stamp = 0;
+ return true;
+ }
+ return false;
+}
+
/* People celebrate: "We love our President!" */
static bool tcp_try_undo_recovery(struct sock *sk)
{
@@ -2406,14 +2435,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
} else if (tp->rack.reo_wnd_persist) {
tp->rack.reo_wnd_persist--;
}
- if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
- /* Hold old state until something *above* high_seq
- * is ACKed. For Reno it is MUST to prevent false
- * fast retransmits (RFC2582). SACK TCP is safe. */
- if (!tcp_any_retrans_done(sk))
- tp->retrans_stamp = 0;
+ if (tcp_is_non_sack_preventing_reopen(sk))
return true;
- }
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
return false;
@@ -2449,6 +2472,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPSPURIOUSRTOS);
inet_csk(sk)->icsk_retransmits = 0;
+ if (tcp_is_non_sack_preventing_reopen(sk))
+ return true;
if (frto_undo || tcp_is_sack(tp)) {
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
@@ -2816,7 +2841,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */
- if (tcp_check_sack_reneging(sk, flag))
+ if (tcp_check_sack_reneging(sk, ack_flag))
return;
/* C. Check consistency of the current state. */
@@ -3433,8 +3458,11 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
u32 *last_oow_ack_time)
{
- if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
+ /* Paired with the WRITE_ONCE() in this function. */
+ u32 val = READ_ONCE(*last_oow_ack_time);
+
+ if (val) {
+ s32 elapsed = (s32)(tcp_jiffies32 - val);
if (0 <= elapsed &&
elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
@@ -3443,7 +3471,10 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
}
}
- *last_oow_ack_time = tcp_jiffies32;
+ /* Paired with the prior READ_ONCE() and with itself,
+ * as we might be lockless.
+ */
+ WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32);
return false; /* not rate-limited: go ahead, send dupack now! */
}
@@ -3484,11 +3515,11 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
/* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
- if (now != challenge_timestamp) {
+ if (now != READ_ONCE(challenge_timestamp)) {
u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
u32 half = (ack_limit + 1) >> 1;
- challenge_timestamp = now;
+ WRITE_ONCE(challenge_timestamp, now);
WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit));
}
count = READ_ONCE(challenge_count);
@@ -3626,8 +3657,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
* then we can probably ignore it.
*/
if (before(ack, prior_snd_una)) {
+ u32 max_window;
+
+ /* do not accept ACK for bytes we never sent. */
+ max_window = min_t(u64, tp->max_window, tp->bytes_acked);
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
- if (before(ack, prior_snd_una - tp->max_window)) {
+ if (before(ack, prior_snd_una - max_window)) {
if (!(flag & FLAG_NO_CHALLENGE_ACK))
tcp_send_challenge_ack(sk, skb);
return -1;
@@ -4181,7 +4216,7 @@ void tcp_fin(struct sock *sk)
inet_csk_schedule_ack(sk);
- sk->sk_shutdown |= RCV_SHUTDOWN;
+ WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
sock_set_flag(sk, SOCK_DONE);
switch (sk->sk_state) {
@@ -5605,7 +5640,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
trace_tcp_probe(sk, skb);
tcp_mstamp_refresh(tp);
- if (unlikely(!sk->sk_rx_dst))
+ if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
* Header prediction.
@@ -6145,22 +6180,23 @@ reset_and_undo:
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
{
+ struct tcp_sock *tp = tcp_sk(sk);
struct request_sock *req;
/* If we are still handling the SYNACK RTO, see if timestamp ECR allows
* undo. If peer SACKs triggered fast recovery, we can't undo here.
*/
- if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
- tcp_try_undo_loss(sk, false);
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
+ tcp_try_undo_recovery(sk);
/* Reset rtx states to prevent spurious retransmits_timed_out() */
- tcp_sk(sk)->retrans_stamp = 0;
+ tp->retrans_stamp = 0;
inet_csk(sk)->icsk_retransmits = 0;
/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
* we no longer need req so release it.
*/
- req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+ req = rcu_dereference_protected(tp->fastopen_rsk,
lockdep_sock_is_held(sk));
reqsk_fastopen_remove(sk, req, false);
@@ -6318,7 +6354,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
break;
tcp_set_state(sk, TCP_FIN_WAIT2);
- sk->sk_shutdown |= SEND_SHUTDOWN;
+ WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN);
sk_dst_confirm(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b95e1a3487c8..a54505c29a5c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -323,6 +323,8 @@ failure:
* if necessary.
*/
tcp_set_state(sk, TCP_CLOSE);
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
+ inet_reset_saddr(sk);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
@@ -1570,15 +1572,18 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
struct sock *rsk;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst;
+
+ dst = rcu_dereference_protected(sk->sk_rx_dst,
+ lockdep_sock_is_held(sk));
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
if (dst) {
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
!dst->ops->check(dst, 0)) {
+ RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
dst_release(dst);
- sk->sk_rx_dst = NULL;
}
}
tcp_rcv_established(sk, skb);
@@ -1653,7 +1658,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk_fullsock(sk)) {
- struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+ struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
@@ -2059,7 +2064,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst_hold_safe(dst)) {
- sk->sk_rx_dst = dst;
+ rcu_assign_pointer(sk->sk_rx_dst, dst);
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
}
}
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 0af6249a993a..627e3be0f754 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -40,7 +40,7 @@ struct tcp_fastopen_metrics {
struct tcp_metrics_block {
struct tcp_metrics_block __rcu *tcpm_next;
- possible_net_t tcpm_net;
+ struct net *tcpm_net;
struct inetpeer_addr tcpm_saddr;
struct inetpeer_addr tcpm_daddr;
unsigned long tcpm_stamp;
@@ -51,34 +51,38 @@ struct tcp_metrics_block {
struct rcu_head rcu_head;
};
-static inline struct net *tm_net(struct tcp_metrics_block *tm)
+static inline struct net *tm_net(const struct tcp_metrics_block *tm)
{
- return read_pnet(&tm->tcpm_net);
+ /* Paired with the WRITE_ONCE() in tcpm_new() */
+ return READ_ONCE(tm->tcpm_net);
}
static bool tcp_metric_locked(struct tcp_metrics_block *tm,
enum tcp_metric_index idx)
{
- return tm->tcpm_lock & (1 << idx);
+ /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
+ return READ_ONCE(tm->tcpm_lock) & (1 << idx);
}
-static u32 tcp_metric_get(struct tcp_metrics_block *tm,
+static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
enum tcp_metric_index idx)
{
- return tm->tcpm_vals[idx];
+ /* Paired with WRITE_ONCE() in tcp_metric_set() */
+ return READ_ONCE(tm->tcpm_vals[idx]);
}
static void tcp_metric_set(struct tcp_metrics_block *tm,
enum tcp_metric_index idx,
u32 val)
{
- tm->tcpm_vals[idx] = val;
+ /* Paired with READ_ONCE() in tcp_metric_get() */
+ WRITE_ONCE(tm->tcpm_vals[idx], val);
}
static bool addr_same(const struct inetpeer_addr *a,
const struct inetpeer_addr *b)
{
- return inetpeer_addr_cmp(a, b) == 0;
+ return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
}
struct tcpm_hash_bucket {
@@ -89,6 +93,7 @@ static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
static unsigned int tcp_metrics_hash_log __read_mostly;
static DEFINE_SPINLOCK(tcp_metrics_lock);
+static DEFINE_SEQLOCK(fastopen_seqlock);
static void tcpm_suck_dst(struct tcp_metrics_block *tm,
const struct dst_entry *dst,
@@ -97,7 +102,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
u32 msval;
u32 val;
- tm->tcpm_stamp = jiffies;
+ WRITE_ONCE(tm->tcpm_stamp, jiffies);
val = 0;
if (dst_metric_locked(dst, RTAX_RTT))
@@ -110,30 +115,42 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
val |= 1 << TCP_METRIC_CWND;
if (dst_metric_locked(dst, RTAX_REORDERING))
val |= 1 << TCP_METRIC_REORDERING;
- tm->tcpm_lock = val;
+ /* Paired with READ_ONCE() in tcp_metric_locked() */
+ WRITE_ONCE(tm->tcpm_lock, val);
msval = dst_metric_raw(dst, RTAX_RTT);
- tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
+ tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
msval = dst_metric_raw(dst, RTAX_RTTVAR);
- tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
- tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
- tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
- tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
+ tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
+ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+ dst_metric_raw(dst, RTAX_SSTHRESH));
+ tcp_metric_set(tm, TCP_METRIC_CWND,
+ dst_metric_raw(dst, RTAX_CWND));
+ tcp_metric_set(tm, TCP_METRIC_REORDERING,
+ dst_metric_raw(dst, RTAX_REORDERING));
if (fastopen_clear) {
+ write_seqlock(&fastopen_seqlock);
tm->tcpm_fastopen.mss = 0;
tm->tcpm_fastopen.syn_loss = 0;
tm->tcpm_fastopen.try_exp = 0;
tm->tcpm_fastopen.cookie.exp = false;
tm->tcpm_fastopen.cookie.len = 0;
+ write_sequnlock(&fastopen_seqlock);
}
}
#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
-static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+static void tcpm_check_stamp(struct tcp_metrics_block *tm,
+ const struct dst_entry *dst)
{
- if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+ unsigned long limit;
+
+ if (!tm)
+ return;
+ limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
+ if (unlikely(time_after(jiffies, limit)))
tcpm_suck_dst(tm, dst, false);
}
@@ -174,20 +191,23 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
oldest = deref_locked(tcp_metrics_hash[hash].chain);
for (tm = deref_locked(oldest->tcpm_next); tm;
tm = deref_locked(tm->tcpm_next)) {
- if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
+ if (time_before(READ_ONCE(tm->tcpm_stamp),
+ READ_ONCE(oldest->tcpm_stamp)))
oldest = tm;
}
tm = oldest;
} else {
- tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
+ tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
if (!tm)
goto out_unlock;
}
- write_pnet(&tm->tcpm_net, net);
+ /* Paired with the READ_ONCE() in tm_net() */
+ WRITE_ONCE(tm->tcpm_net, net);
+
tm->tcpm_saddr = *saddr;
tm->tcpm_daddr = *daddr;
- tcpm_suck_dst(tm, dst, true);
+ tcpm_suck_dst(tm, dst, reclaim);
if (likely(!reclaim)) {
tm->tcpm_next = tcp_metrics_hash[hash].chain;
@@ -431,7 +451,7 @@ void tcp_update_metrics(struct sock *sk)
tp->reordering);
}
}
- tm->tcpm_stamp = jiffies;
+ WRITE_ONCE(tm->tcpm_stamp, jiffies);
out_unlock:
rcu_read_unlock();
}
@@ -446,11 +466,15 @@ void tcp_init_metrics(struct sock *sk)
u32 val, crtt = 0; /* cached RTT scaled by 8 */
sk_dst_confirm(sk);
+ /* ssthresh may have been reduced unnecessarily during.
+ * 3WHS. Restore it back to its initial default.
+ */
+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
if (!dst)
goto reset;
rcu_read_lock();
- tm = tcp_get_metrics(sk, dst, true);
+ tm = tcp_get_metrics(sk, dst, false);
if (!tm) {
rcu_read_unlock();
goto reset;
@@ -464,11 +488,6 @@ void tcp_init_metrics(struct sock *sk)
tp->snd_ssthresh = val;
if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
tp->snd_ssthresh = tp->snd_cwnd_clamp;
- } else {
- /* ssthresh may have been reduced unnecessarily during.
- * 3WHS. Restore it back to its initial default.
- */
- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
}
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
if (val && tp->reordering != val)
@@ -534,8 +553,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
return ret;
}
-static DEFINE_SEQLOCK(fastopen_seqlock);
-
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie)
{
@@ -642,7 +659,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
}
if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
- jiffies - tm->tcpm_stamp,
+ jiffies - READ_ONCE(tm->tcpm_stamp),
TCP_METRICS_ATTR_PAD) < 0)
goto nla_put_failure;
@@ -653,7 +670,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
if (!nest)
goto nla_put_failure;
for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
- u32 val = tm->tcpm_vals[i];
+ u32 val = tcp_metric_get(tm, i);
if (!val)
continue;
@@ -885,7 +902,7 @@ static void tcp_metrics_flush_all(struct net *net)
match = net ? net_eq(tm_net(tm), net) :
!refcount_read(&tm_net(tm)->count);
if (match) {
- *pp = tm->tcpm_next;
+ rcu_assign_pointer(*pp, tm->tcpm_next);
kfree_rcu(tm, rcu_head);
} else {
pp = &tm->tcpm_next;
@@ -926,7 +943,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
if (addr_same(&tm->tcpm_daddr, &daddr) &&
(!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
net_eq(tm_net(tm), net)) {
- *pp = tm->tcpm_next;
+ rcu_assign_pointer(*pp, tm->tcpm_next);
kfree_rcu(tm, rcu_head);
found = true;
} else {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 324f43fadb37..e5dc08579cf5 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -563,6 +563,9 @@ EXPORT_SYMBOL(tcp_create_openreq_child);
* validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
*
* We don't need to initialize tmp_opt.sack_ok as we don't use the results
+ *
+ * Note: If @fastopen is true, this can be called from process context.
+ * Otherwise, this is from BH context.
*/
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
@@ -715,7 +718,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
&tcp_rsk(req)->last_oow_ack_time))
req->rsk_ops->send_ack(sk, skb, req);
if (paws_reject)
- __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
return NULL;
}
@@ -734,7 +737,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* "fourth, check the SYN bit"
*/
if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
- __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
goto embryonic_reset;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ef749a47768a..6d7f441c7dd7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -179,8 +179,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
}
/* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
- u32 rcv_nxt)
+static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -194,7 +193,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
if (unlikely(rcv_nxt != tp->rcv_nxt))
return; /* Special ACK sent by DCTCP to reflect ECN */
- tcp_dec_quickack_mode(sk, pkts);
+ tcp_dec_quickack_mode(sk);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
@@ -1104,7 +1103,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
skb_set_hash_from_sk(skb, sk);
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
+ skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
/* Build TCP header and checksum it. */
th = (struct tcphdr *)skb->data;
@@ -1152,7 +1151,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
- tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
+ tcp_event_ack_sent(sk, rcv_nxt);
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);
@@ -1653,15 +1652,20 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
struct tcp_sock *tp = tcp_sk(sk);
- /* Track the maximum number of outstanding packets in each
- * window, and remember whether we were cwnd-limited then.
+ /* Track the strongest available signal of the degree to which the cwnd
+ * is fully utilized. If cwnd-limited then remember that fact for the
+ * current window. If not cwnd-limited then track the maximum number of
+ * outstanding packets in the current window. (If cwnd-limited then we
+ * chose to not update tp->max_packets_out to avoid an extra else
+ * clause with no functional impact.)
*/
- if (!before(tp->snd_una, tp->max_packets_seq) ||
- tp->packets_out > tp->max_packets_out ||
- is_cwnd_limited) {
- tp->max_packets_out = tp->packets_out;
- tp->max_packets_seq = tp->snd_nxt;
+ if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
+ is_cwnd_limited ||
+ (!tp->is_cwnd_limited &&
+ tp->packets_out > tp->max_packets_out)) {
tp->is_cwnd_limited = is_cwnd_limited;
+ tp->max_packets_out = tp->packets_out;
+ tp->cwnd_usage_seq = tp->snd_nxt;
}
if (tcp_is_cwnd_limited(sk)) {
@@ -2255,6 +2259,18 @@ static bool tcp_pacing_check(struct sock *sk)
return true;
}
+static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
+{
+ const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
+
+ /* No skb in the rtx queue. */
+ if (!node)
+ return true;
+
+ /* Only one skb in rtx queue. */
+ return !node->rb_left && !node->rb_right;
+}
+
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* (These limits are doubled for retransmits)
@@ -2292,12 +2308,12 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit += extra_bytes;
}
if (refcount_read(&sk->sk_wmem_alloc) > limit) {
- /* Always send skb if rtx queue is empty.
+ /* Always send skb if rtx queue is empty or has one skb.
* No need to wait for TX completion to call us back,
* after softirq/tasklet schedule.
* This helps when TX completions are delayed too much.
*/
- if (tcp_rtx_queue_empty(sk))
+ if (tcp_rtx_queue_empty_or_single_skb(sk))
return false;
set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
@@ -2500,7 +2516,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- u32 timeout, rto_delta_us;
+ u32 timeout, timeout_us, rto_delta_us;
int early_retrans;
/* Don't do any loss probe on a Fast Open connection before 3WHS
@@ -2524,11 +2540,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
* sample is available then probe after TCP_TIMEOUT_INIT.
*/
if (tp->srtt_us) {
- timeout = usecs_to_jiffies(tp->srtt_us >> 2);
+ timeout_us = tp->srtt_us >> 2;
if (tp->packets_out == 1)
- timeout += TCP_RTO_MIN;
+ timeout_us += tcp_rto_min_us(sk);
else
- timeout += TCP_TIMEOUT_MIN;
+ timeout_us += TCP_TIMEOUT_MIN_US;
+ timeout = usecs_to_jiffies(timeout_us);
} else {
timeout = TCP_TIMEOUT_INIT;
}
@@ -2911,7 +2928,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
struct tcp_sock *tp = tcp_sk(sk);
unsigned int cur_mss;
int diff, len, err;
-
+ int avail_wnd;
/* Inconclusive MTU probe */
if (icsk->icsk_mtup.probe_size)
@@ -2928,7 +2945,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
if (skb_still_in_host_queue(sk, skb))
return -EBUSY;
+start:
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
+ if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
+ TCP_SKB_CB(skb)->seq++;
+ goto start;
+ }
if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
WARN_ON_ONCE(1);
return -EINVAL;
@@ -2941,17 +2964,25 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
return -EHOSTUNREACH; /* Routing failure or similar. */
cur_mss = tcp_current_mss(sk);
+ avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
/* If receiver has shrunk his window, and skb is out of
* new window, do not retransmit it. The exception is the
* case, when window is shrunk to zero. In this case
- * our retransmit serves as a zero window probe.
+ * our retransmit of one segment serves as a zero window probe.
*/
- if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
- TCP_SKB_CB(skb)->seq != tp->snd_una)
- return -EAGAIN;
+ if (avail_wnd <= 0) {
+ if (TCP_SKB_CB(skb)->seq != tp->snd_una)
+ return -EAGAIN;
+ avail_wnd = cur_mss;
+ }
len = cur_mss * segs;
+ if (len > avail_wnd) {
+ len = rounddown(avail_wnd, cur_mss);
+ if (!len)
+ len = avail_wnd;
+ }
if (skb->len > len) {
if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
cur_mss, GFP_ATOMIC))
@@ -2965,8 +2996,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
diff -= tcp_skb_pcount(skb);
if (diff)
tcp_adjust_pcount(sk, skb, diff);
- if (skb->len < cur_mss)
- tcp_retrans_try_collapse(sk, skb, cur_mss);
+ avail_wnd = min_t(int, avail_wnd, cur_mss);
+ if (skb->len < avail_wnd)
+ tcp_retrans_try_collapse(sk, skb, avail_wnd);
}
/* RFC3168, section 6.1.1.1. ECN fallback */
@@ -3134,11 +3166,12 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
*/
void sk_forced_mem_schedule(struct sock *sk, int size)
{
- int amt;
+ int delta, amt;
- if (size <= sk->sk_forward_alloc)
+ delta = size - sk->sk_forward_alloc;
+ if (delta <= 0)
return;
- amt = sk_mem_pages(size);
+ amt = sk_mem_pages(delta);
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
sk_memory_allocated_add(sk, amt);
@@ -3322,7 +3355,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
now = tcp_clock_ns();
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
- skb->skb_mstamp_ns = cookie_init_timestamp(req);
+ skb->skb_mstamp_ns = cookie_init_timestamp(req, now);
else
#endif
{
@@ -3359,7 +3392,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
th->window = htons(min(req->rsk_rcv_wnd, 65535U));
tcp_options_write((__be32 *)(th + 1), NULL, &opts);
th->doff = (tcp_header_size >> 2);
- __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 22ec8dcc1428..db3469c95c49 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -123,7 +123,7 @@ bool tcp_rack_mark_lost(struct sock *sk)
tp->rack.advanced = 0;
tcp_rack_detect_loss(sk, &timeout);
if (timeout) {
- timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
+ timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index a0107eb02ae4..551c4a78f68d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -573,7 +573,9 @@ out_reset_timer:
tcp_stream_is_thin(tp) &&
icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
icsk->icsk_backoff = 0;
- icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
+ icsk->icsk_rto = clamp(__tcp_set_rto(tp),
+ tcp_rto_min(sk),
+ TCP_RTO_MAX);
} else {
/* Use normal (exponential) backoff */
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 83fd4fa40d5e..3b3f94479885 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1528,7 +1528,7 @@ drop:
}
EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
-void udp_destruct_sock(struct sock *sk)
+void udp_destruct_common(struct sock *sk)
{
/* reclaim completely the forward allocated memory */
struct udp_sock *up = udp_sk(sk);
@@ -1541,10 +1541,14 @@ void udp_destruct_sock(struct sock *sk)
kfree_skb(skb);
}
udp_rmem_release(sk, total, 0, true);
+}
+EXPORT_SYMBOL_GPL(udp_destruct_common);
+static void udp_destruct_sock(struct sock *sk)
+{
+ udp_destruct_common(sk);
inet_sock_destruct(sk);
}
-EXPORT_SYMBOL_GPL(udp_destruct_sock);
int udp_init_sock(struct sock *sk)
{
@@ -1552,7 +1556,6 @@ int udp_init_sock(struct sock *sk)
sk->sk_destruct = udp_destruct_sock;
return 0;
}
-EXPORT_SYMBOL_GPL(udp_init_sock);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
@@ -2137,7 +2140,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old;
if (dst_hold_safe(dst)) {
- old = xchg(&sk->sk_rx_dst, dst);
+ old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
dst_release(old);
return old != dst;
}
@@ -2326,7 +2329,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct dst_entry *dst = skb_dst(skb);
int ret;
- if (unlikely(sk->sk_rx_dst != dst))
+ if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
udp_sk_rx_dst_set(sk, dst);
ret = udp_unicast_rcv_skb(sk, skb, uh);
@@ -2484,7 +2487,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_efree;
- dst = READ_ONCE(sk->sk_rx_dst);
+ dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
@@ -2676,11 +2679,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case UDP_CORK:
val = READ_ONCE(up->corkflag);
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 150e6f0fdbf5..bbe4eca42d36 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -196,6 +196,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
void udp_tunnel_sock_release(struct socket *sock)
{
rcu_assign_sk_user_data(sock->sk, NULL);
+ synchronize_rcu();
kernel_sock_shutdown(sock, SHUT_RDWR);
sock_release(sock);
}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 5936d66d1ce2..3a74a3675ca1 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -17,6 +17,14 @@
struct udp_table udplite_table __read_mostly;
EXPORT_SYMBOL(udplite_table);
+/* Designate sk as UDP-Lite socket */
+static int udplite_sk_init(struct sock *sk)
+{
+ udp_init_sock(sk);
+ udp_sk(sk)->pcflag = UDPLITE_BIT;
+ return 0;
+}
+
static int udplite_rcv(struct sk_buff *skb)
{
return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
@@ -54,6 +62,8 @@ struct proto udplite_prot = {
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
+ .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
+ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
.obj_size = sizeof(struct udp_sock),
.h.udp_table = &udplite_table,
#ifdef CONFIG_COMPAT
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a0123760fb2c..974e650e749e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -313,9 +313,8 @@ static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
static void addrconf_mod_rs_timer(struct inet6_dev *idev,
unsigned long when)
{
- if (!timer_pending(&idev->rs_timer))
+ if (!mod_timer(&idev->rs_timer, jiffies + when))
in6_dev_hold(idev);
- mod_timer(&idev->rs_timer, jiffies + when);
}
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
@@ -697,6 +696,22 @@ errout:
return err;
}
+/* Combine dev_addr_genid and dev_base_seq to detect changes.
+ */
+static u32 inet6_base_seq(const struct net *net)
+{
+ u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
+ net->dev_base_seq;
+
+ /* Must not return 0 (see nl_dump_check_consistent()).
+ * Chose a value far away from 0.
+ */
+ if (!res)
+ res = 0x80000000;
+ return res;
+}
+
+
static int inet6_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -730,8 +745,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
idx = 0;
head = &net->dev_index_head[h];
rcu_read_lock();
- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
- net->dev_base_seq;
+ cb->seq = inet6_base_seq(net);
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
@@ -1368,7 +1382,7 @@ retry:
* idev->desync_factor if it's larger
*/
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
- max_desync_factor = min_t(__u32,
+ max_desync_factor = min_t(long,
idev->cnf.max_desync_factor,
cnf_temp_preferred_lft - regen_advance);
@@ -2542,12 +2556,18 @@ static void manage_tempaddrs(struct inet6_dev *idev,
ipv6_ifa_notify(0, ift);
}
- if ((create || list_empty(&idev->tempaddr_list)) &&
- idev->cnf.use_tempaddr > 0) {
+ /* Also create a temporary address if it's enabled but no temporary
+ * address currently exists.
+ * However, we get called with valid_lft == 0, prefered_lft == 0, create == false
+ * as part of cleanup (ie. deleting the mngtmpaddr).
+ * We don't want that to result in creating a new temporary ip address.
+ */
+ if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
+ create = true;
+
+ if (create && idev->cnf.use_tempaddr > 0) {
/* When a new public address is created as described
* in [ADDRCONF], also create a new temporary address.
- * Also create a temporary address if it's enabled but
- * no temporary address currently exists.
*/
read_unlock_bh(&idev->lock);
ipv6_create_tempaddr(ifp, NULL, false);
@@ -5227,7 +5247,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
}
rcu_read_lock();
- cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
+ cb->seq = inet6_base_seq(tgt_net);
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &tgt_net->dev_index_head[h];
@@ -5360,9 +5380,10 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
}
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
- if (!addr)
- return -EINVAL;
-
+ if (!addr) {
+ err = -EINVAL;
+ goto errout;
+ }
ifm = nlmsg_data(nlh);
if (ifm->ifa_index)
dev = dev_get_by_index(tgt_net, ifm->ifa_index);
@@ -5972,11 +5993,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
pmsg->prefix_len = pinfo->prefix_len;
pmsg->prefix_type = pinfo->type;
pmsg->prefix_pad3 = 0;
- pmsg->prefix_flags = 0;
- if (pinfo->onlink)
- pmsg->prefix_flags |= IF_PREFIX_ONLINK;
- if (pinfo->autoconf)
- pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
+ pmsg->prefix_flags = pinfo->flags;
if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
goto nla_put_failure;
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index ea00ce3d4117..8494ee9679b4 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -204,19 +204,26 @@ const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
EXPORT_SYMBOL_GPL(ipv6_stub);
/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
-const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
+const struct in6_addr in6addr_loopback __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_LOOPBACK_INIT;
EXPORT_SYMBOL(in6addr_loopback);
-const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
+const struct in6_addr in6addr_any __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_ANY_INIT;
EXPORT_SYMBOL(in6addr_any);
-const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_linklocal_allnodes __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
EXPORT_SYMBOL(in6addr_linklocal_allnodes);
-const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_linklocal_allrouters __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
EXPORT_SYMBOL(in6addr_linklocal_allrouters);
-const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_interfacelocal_allnodes __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
EXPORT_SYMBOL(in6addr_interfacelocal_allnodes);
-const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_interfacelocal_allrouters __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
EXPORT_SYMBOL(in6addr_interfacelocal_allrouters);
-const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_sitelocal_allrouters __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
EXPORT_SYMBOL(in6addr_sitelocal_allrouters);
static void snmp6_free_dev(struct inet6_dev *idev)
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 8a22486cf270..17ac45aa7194 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -437,6 +437,7 @@ static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
{
struct ifaddrlblmsg *ifal = nlmsg_data(nlh);
ifal->ifal_family = AF_INET6;
+ ifal->__ifal_reserved = 0;
ifal->ifal_prefixlen = prefixlen;
ifal->ifal_flags = 0;
ifal->ifal_index = ifindex;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 56f396ecc26b..a5fc9444c728 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -104,6 +104,13 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
+void inet6_sock_destruct(struct sock *sk)
+{
+ inet6_cleanup_sock(sk);
+ inet_sock_destruct(sk);
+}
+EXPORT_SYMBOL_GPL(inet6_sock_destruct);
+
static int inet6_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
@@ -196,7 +203,7 @@ lookup_protocol:
inet->hdrincl = 1;
}
- sk->sk_destruct = inet_sock_destruct;
+ sk->sk_destruct = inet6_sock_destruct;
sk->sk_family = PF_INET6;
sk->sk_protocol = protocol;
@@ -498,6 +505,12 @@ void inet6_destroy_sock(struct sock *sk)
}
EXPORT_SYMBOL_GPL(inet6_destroy_sock);
+void inet6_cleanup_sock(struct sock *sk)
+{
+ inet6_destroy_sock(sk);
+}
+EXPORT_SYMBOL_GPL(inet6_cleanup_sock);
+
/*
* This does both peername and sockname.
*/
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 390bedde21a5..06261603e083 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -50,7 +50,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
fl6->flowi6_mark = sk->sk_mark;
fl6->fl6_dport = inet->inet_dport;
fl6->fl6_sport = inet->inet_sport;
- fl6->flowlabel = np->flow_label;
+ fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
fl6->flowi6_uid = sk->sk_uid;
if (!fl6->flowi6_oif)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index b64791d3b0f8..a1cdb43e7216 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -506,7 +506,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
- pskb_trim(skb, skb->len - trimlen);
+ ret = pskb_trim(skb, skb->len - trimlen);
+ if (unlikely(ret))
+ return ret;
ret = nexthdr[1];
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 1c532638b2ad..e19c1844276f 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -314,6 +314,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
secpath_reset(skb);
+ if (skb_needs_linearize(skb, skb->dev->features) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
return 0;
}
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index da46c4284676..49e31e4ae7b7 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -143,6 +143,8 @@ int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type)
optlen = 1;
break;
default:
+ if (len < 2)
+ goto bad;
optlen = nh[offset + 1] + 2;
if (optlen > len)
goto bad;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 172726939652..cdc8a49d7fc3 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -437,6 +437,11 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+ nla_total_size(16); /* src */
}
+static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
+{
+ rt_genid_bump_ipv6(ops->fro_net);
+}
+
static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.family = AF_INET6,
.rule_size = sizeof(struct fib6_rule),
@@ -449,6 +454,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
.nlmsg_payload = fib6_rule_nlmsg_payload,
+ .flush_cache = fib6_rule_flush_cache,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = fib6_rule_policy,
.owner = THIS_MODULE,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 3db10cae7b17..169467b5c98a 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -410,7 +410,10 @@ static struct net_device *icmp6_dev(const struct sk_buff *skb)
if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
const struct rt6_info *rt6 = skb_rt6_info(skb);
- if (rt6)
+ /* The destination could be an external IP in Ext Hdr (SRv6, RPL, etc.),
+ * and ip6_null_entry could be set to skb if no route is found.
+ */
+ if (rt6 && rt6->rt6i_idev)
dev = rt6->rt6i_idev->dev;
}
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 5fc1f4e0c0cf..10f1367eb4ca 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -477,6 +477,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
rcu_read_lock();
+ ret = -ESRCH;
ila = ila_lookup_by_params(&xp, ilan);
if (ila) {
ret = ila_dump_info(ila,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ef55489651f8..d74a825c50f0 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1433,13 +1433,9 @@ out:
if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
pn_leaf = fib6_find_prefix(info->nl_net, table,
pn);
-#if RT6_DEBUG >= 2
- if (!pn_leaf) {
- WARN_ON(!pn_leaf);
+ if (!pn_leaf)
pn_leaf =
info->nl_net->ipv6.fib6_null_entry;
- }
-#endif
fib6_info_hold(pn_leaf);
rcu_assign_pointer(pn->leaf, pn_leaf);
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index d64b83e85642..e6e1190a6ed9 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -535,7 +535,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
{
- int uninitialized_var(err);
+ int err;
struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_flowlabel_req freq;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4a6396d574a0..2d34bd98fcce 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -711,6 +711,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
{
struct ip6_tnl *tunnel = netdev_priv(dev);
__be16 protocol;
+ __be16 flags;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@@ -720,16 +721,12 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
else
fl6->daddr = tunnel->parms.raddr;
- if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
- return -ENOMEM;
-
/* Push GRE header. */
protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
if (tunnel->parms.collect_md) {
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
- __be16 flags;
int tun_hlen;
tun_info = skb_tunnel_info(skb);
@@ -751,19 +748,25 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
tun_hlen = gre_calc_hlen(flags);
+ if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
+ return -ENOMEM;
+
gre_build_header(skb, tun_hlen,
flags, protocol,
tunnel_id_to_key32(tun_info->key.tun_id),
- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
+ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
: 0);
} else {
- if (tunnel->parms.o_flags & TUNNEL_SEQ)
- tunnel->o_seqno++;
+ if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
+ return -ENOMEM;
+
+ flags = tunnel->parms.o_flags;
- gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+ gre_build_header(skb, tunnel->tun_hlen, flags,
protocol, tunnel->parms.o_key,
- htonl(tunnel->o_seqno));
+ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
+ : 0);
}
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
@@ -938,11 +941,12 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
goto tx_err;
if (skb->len > dev->mtu + dev->hard_header_len) {
- pskb_trim(skb, dev->mtu + dev->hard_header_len);
+ if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
+ goto tx_err;
truncate = true;
}
- nhoff = skb_network_header(skb) - skb_mac_header(skb);
+ nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP) &&
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
truncate = true;
@@ -951,7 +955,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
int thoff;
if (skb_transport_header_was_set(skb))
- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+ thoff = skb_transport_offset(skb);
else
thoff = nhoff + sizeof(struct ipv6hdr);
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
@@ -1000,12 +1004,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
ntohl(tun_id),
ntohl(md->u.index), truncate,
false);
+ proto = htons(ETH_P_ERSPAN);
} else if (md->version == 2) {
erspan_build_header_v2(skb,
ntohl(tun_id),
md->u.md2.dir,
get_hwid(&md->u.md2),
truncate, false);
+ proto = htons(ETH_P_ERSPAN2);
} else {
goto tx_err;
}
@@ -1028,25 +1034,26 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
break;
}
- if (t->parms.erspan_ver == 1)
+ if (t->parms.erspan_ver == 1) {
erspan_build_header(skb, ntohl(t->parms.o_key),
t->parms.index,
truncate, false);
- else if (t->parms.erspan_ver == 2)
+ proto = htons(ETH_P_ERSPAN);
+ } else if (t->parms.erspan_ver == 2) {
erspan_build_header_v2(skb, ntohl(t->parms.o_key),
t->parms.dir,
t->parms.hwid,
truncate, false);
- else
+ proto = htons(ETH_P_ERSPAN2);
+ } else {
goto tx_err;
+ }
fl6.daddr = t->parms.raddr;
}
/* Push GRE header. */
- proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
- : htons(ETH_P_ERSPAN2);
- gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
+ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@@ -1137,14 +1144,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
dev->needed_headroom = dst_len;
if (set_mtu) {
- dev->mtu = rt->dst.dev->mtu - t_hlen;
+ int mtu = rt->dst.dev->mtu - t_hlen;
+
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- dev->mtu -= 8;
+ mtu -= 8;
if (dev->type == ARPHRD_ETHER)
- dev->mtu -= ETH_HLEN;
+ mtu -= ETH_HLEN;
- if (dev->mtu < IPV6_MIN_MTU)
- dev->mtu = IPV6_MIN_MTU;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ WRITE_ONCE(dev->mtu, mtu);
}
}
ip6_rt_put(rt);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index e6c4966aa956..ebf90bce063a 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -44,21 +44,25 @@
#include <net/inet_ecn.h>
#include <net/dst_metadata.h>
-INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *));
+void udp_v6_early_demux(struct sk_buff *);
+void tcp_v6_early_demux(struct sk_buff *);
static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
- void (*edemux)(struct sk_buff *skb);
-
- if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
- const struct inet6_protocol *ipprot;
-
- ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
- if (ipprot && (edemux = READ_ONCE(ipprot->early_demux)))
- INDIRECT_CALL_2(edemux, tcp_v6_early_demux,
- udp_v6_early_demux, skb);
+ if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
+ !skb_dst(skb) && !skb->sk) {
+ switch (ipv6_hdr(skb)->nexthdr) {
+ case IPPROTO_TCP:
+ if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux))
+ tcp_v6_early_demux(skb);
+ break;
+ case IPPROTO_UDP:
+ if (READ_ONCE(net->ipv4.sysctl_udp_early_demux))
+ udp_v6_early_demux(skb);
+ break;
+ }
}
+
if (!skb_valid_dst(skb))
ip6_route_input(skb);
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5585e3a94f3c..c67d634dccd4 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -130,7 +130,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
if (lwtunnel_xmit_redirect(dst->lwtstate)) {
int res = lwtunnel_xmit(skb);
- if (res < 0 || res == LWTUNNEL_XMIT_DONE)
+ if (res != LWTUNNEL_XMIT_CONTINUE)
return res;
}
@@ -177,7 +177,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
int err;
skb_mark_not_on_list(segs);
- err = ip6_fragment(net, sk, segs, ip6_finish_output2);
+ /* Last GSO segment can be smaller than gso_size (and MTU).
+ * Adding a fragment header would produce an "atomic fragment",
+ * which is considered harmful (RFC-8021). Avoid that.
+ */
+ err = segs->len > mtu ?
+ ip6_fragment(net, sk, segs, ip6_finish_output2) :
+ ip6_finish_output2(net, sk, segs);
if (err && ret == 0)
ret = err;
}
@@ -919,6 +925,9 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (err < 0)
goto fail;
+ /* We prevent @rt from being freed. */
+ rcu_read_lock();
+
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
@@ -942,6 +951,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (err == 0) {
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGOKS);
+ rcu_read_unlock();
return 0;
}
@@ -949,6 +959,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGFAILS);
+ rcu_read_unlock();
return err;
slow_path_clean:
@@ -1414,7 +1425,7 @@ static int __ip6_append_data(struct sock *sk,
mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
orig_mtu = mtu;
- if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
+ if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
tskey = sk->sk_tskey++;
@@ -1850,8 +1861,13 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
if (proto == IPPROTO_ICMPV6) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+ u8 icmp6_type;
- ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
+ if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl)
+ icmp6_type = fl6->fl6_icmp_type;
+ else
+ icmp6_type = icmp6_hdr(skb)->icmp6_type;
+ ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type);
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 878a08c40fff..5319093d9aa6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -399,7 +399,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
unsigned int nhoff = raw - skb->data;
unsigned int off = nhoff + sizeof(*ipv6h);
- u8 next, nexthdr = ipv6h->nexthdr;
+ u8 nexthdr = ipv6h->nexthdr;
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
struct ipv6_opt_hdr *hdr;
@@ -410,25 +410,25 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
hdr = (struct ipv6_opt_hdr *)(skb->data + off);
if (nexthdr == NEXTHDR_FRAGMENT) {
- struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
- if (frag_hdr->frag_off)
- break;
optlen = 8;
} else if (nexthdr == NEXTHDR_AUTH) {
optlen = ipv6_authlen(hdr);
} else {
optlen = ipv6_optlen(hdr);
}
- /* cache hdr->nexthdr, since pskb_may_pull() might
- * invalidate hdr
- */
- next = hdr->nexthdr;
- if (nexthdr == NEXTHDR_DEST) {
- u16 i = 2;
- /* Remember : hdr is no longer valid at this point. */
- if (!pskb_may_pull(skb, off + optlen))
+ if (!pskb_may_pull(skb, off + optlen))
+ break;
+
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+ if (nexthdr == NEXTHDR_FRAGMENT) {
+ struct frag_hdr *frag_hdr = (struct frag_hdr *)hdr;
+
+ if (frag_hdr->frag_off)
break;
+ }
+ if (nexthdr == NEXTHDR_DEST) {
+ u16 i = 2;
while (1) {
struct ipv6_tlv_tnl_enc_lim *tel;
@@ -449,7 +449,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
i++;
}
}
- nexthdr = next;
+ nexthdr = hdr->nexthdr;
off += optlen;
}
return 0;
@@ -1201,8 +1201,8 @@ route_lookup:
*/
max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ dst->header_len + t->hlen;
- if (max_headroom > dev->needed_headroom)
- dev->needed_headroom = max_headroom;
+ if (max_headroom > READ_ONCE(dev->needed_headroom))
+ WRITE_ONCE(dev->needed_headroom, max_headroom);
err = ip6_tnl_encap(skb, t, &proto, fl6);
if (err)
@@ -1430,6 +1430,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
struct __ip6_tnl_parm *p = &t->parms;
struct flowi6 *fl6 = &t->fl.u.ip6;
int t_hlen;
+ int mtu;
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -1472,12 +1473,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
dev->hard_header_len = rt->dst.dev->hard_header_len +
t_hlen;
- dev->mtu = rt->dst.dev->mtu - t_hlen;
+ mtu = rt->dst.dev->mtu - t_hlen;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- dev->mtu -= 8;
+ mtu -= 8;
- if (dev->mtu < IPV6_MIN_MTU)
- dev->mtu = IPV6_MIN_MTU;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ WRITE_ONCE(dev->mtu, mtu);
}
ip6_rt_put(rt);
}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 8b44d3b53844..e4cd6909e9bb 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -558,12 +558,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
vti6_addr_conflict(t, ipv6_hdr(skb)))
goto tx_err;
- xfrm_decode_session(skb, &fl, AF_INET6);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ xfrm_decode_session(skb, &fl, AF_INET6);
break;
case htons(ETH_P_IP):
- xfrm_decode_session(skb, &fl, AF_INET);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ xfrm_decode_session(skb, &fl, AF_INET);
break;
default:
goto tx_err;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 6248e00c2bf7..6642bc7b9870 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1065,7 +1065,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
And all this only to mangle msg->im6_msgtype and
to set msg->im6_mbz to "mbz" :-)
*/
- skb_push(skb, -skb_network_offset(pkt));
+ __skb_pull(skb, skb_network_offset(pkt));
skb_push(skb, sizeof(*msg));
skb_reset_transport_header(skb);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 5352c7e68c42..86c550d63543 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -164,15 +164,18 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
rtnl_lock();
lock_sock(sk);
+ /* Another thread has converted the socket into IPv4 with
+ * IPV6_ADDRFORM concurrently.
+ */
+ if (unlikely(sk->sk_family != AF_INET6))
+ goto unlock;
+
switch (optname) {
case IPV6_ADDRFORM:
if (optlen < sizeof(int))
goto e_inval;
if (val == PF_INET) {
- struct ipv6_txoptions *opt;
- struct sk_buff *pktopt;
-
if (sk->sk_type == SOCK_RAW)
break;
@@ -203,7 +206,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break;
}
- fl6_free_socklist(sk);
__ipv6_sock_mc_close(sk);
__ipv6_sock_ac_close(sk);
@@ -238,14 +240,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
sk->sk_socket->ops = &inet_dgram_ops;
sk->sk_family = PF_INET;
}
- opt = xchg((__force struct ipv6_txoptions **)&np->opt,
- NULL);
- if (opt) {
- atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
- txopt_put(opt);
- }
- pktopt = xchg(&np->pktoptions, NULL);
- kfree_skb(pktopt);
+
+ /* Disable all options not to allocate memory anymore,
+ * but there is still a race. See the lockless path
+ * in udpv6_sendmsg() and ipv6_local_rxpmtu().
+ */
+ np->rxopt.all = 0;
+
+ inet6_cleanup_sock(sk);
/*
* ... and add it to the refcnt debug socks count
@@ -924,6 +926,7 @@ pref_skip_coa:
break;
}
+unlock:
release_sock(sk);
if (needs_rtnl)
rtnl_unlock();
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 118e19cabb72..74977ec77c57 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -196,7 +196,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
static inline int ndisc_is_useropt(const struct net_device *dev,
struct nd_opt_hdr *opt)
{
- return opt->nd_opt_type == ND_OPT_RDNSS ||
+ return opt->nd_opt_type == ND_OPT_PREFIX_INFO ||
+ opt->nd_opt_type == ND_OPT_RDNSS ||
opt->nd_opt_type == ND_OPT_DNSSL ||
opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
ndisc_ops_is_useropt(dev, opt->nd_opt_type);
diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
index 69c021704abd..aa5bb8789ba0 100644
--- a/net/ipv6/netfilter/nf_socket_ipv6.c
+++ b/net/ipv6/netfilter/nf_socket_ipv6.c
@@ -97,7 +97,7 @@ nf_socket_get_sock_v6(struct net *net, struct sk_buff *skb, int doff,
struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
const struct net_device *indev)
{
- __be16 uninitialized_var(dport), uninitialized_var(sport);
+ __be16 dport, sport;
const struct in6_addr *daddr = NULL, *saddr = NULL;
struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var;
struct sk_buff *data_skb = NULL;
diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
index 34d51cd426b0..00761924f276 100644
--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
+++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
@@ -63,7 +63,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
lport ? lport : hp->dest,
skb->dev, NF_TPROXY_LOOKUP_LISTENER);
if (sk2) {
- inet_twsk_deschedule_put(inet_twsk(sk));
+ nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
}
}
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index 2af32200507d..c4aa8d27e040 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -13,8 +13,8 @@
#include <net/netfilter/ipv6/nf_dup_ipv6.h>
struct nft_dup_ipv6 {
- enum nft_registers sreg_addr:8;
- enum nft_registers sreg_dev:8;
+ u8 sreg_addr;
+ u8 sreg_dev;
};
static void nft_dup_ipv6_eval(const struct nft_expr *expr,
@@ -38,16 +38,16 @@ static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
if (tb[NFTA_DUP_SREG_ADDR] == NULL)
return -EINVAL;
- priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
- err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
+ err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr,
+ sizeof(struct in6_addr));
if (err < 0)
return err;
- if (tb[NFTA_DUP_SREG_DEV] != NULL) {
- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
- }
- return 0;
+ if (tb[NFTA_DUP_SREG_DEV])
+ err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV],
+ &priv->sreg_dev, sizeof(int));
+
+ return err;
}
static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 7ece86afd079..03dbd16f9ad5 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -37,6 +37,9 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv,
if (ipv6_addr_type(&fl6->daddr) & IPV6_ADDR_LINKLOCAL) {
lookup_flags |= RT6_LOOKUP_F_IFACE;
fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev);
+ } else if ((priv->flags & NFTA_FIB_F_IIF) &&
+ (netif_is_l3_master(dev) || netif_is_l3_slave(dev))) {
+ fl6->flowi6_oif = dev->ifindex;
}
if (ipv6_addr_type(&fl6->saddr) & IPV6_ADDR_UNICAST)
@@ -179,7 +182,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
goto put_rt_err;
- if (oif && oif != rt->rt6i_idev->dev)
+ if (oif && oif != rt->rt6i_idev->dev &&
+ l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) != oif->ifindex)
goto put_rt_err;
nft_fib_store_result(dest, priv, rt->rt6i_idev->dev);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 051bbd0726df..91f352427dca 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -22,11 +22,6 @@
#include <linux/proc_fs.h>
#include <net/ping.h>
-static void ping_v6_destroy(struct sock *sk)
-{
- inet6_destroy_sock(sk);
-}
-
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len)
@@ -101,7 +96,8 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
addr_type = ipv6_addr_type(daddr);
if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
(addr_type & IPV6_ADDR_MAPPED) ||
- (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
+ (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if &&
+ l3mdev_master_ifindex_by_index(sock_net(sk), oif) != sk->sk_bound_dev_if))
return -EINVAL;
/* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@@ -170,7 +166,6 @@ struct proto pingv6_prot = {
.owner = THIS_MODULE,
.init = ping_init_sock,
.close = ping_close,
- .destroy = ping_v6_destroy,
.connect = ip6_datagram_connect_v6_only,
.disconnect = __udp_disconnect,
.setsockopt = ipv6_setsockopt,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 828dd95840b4..731485b18de3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -539,6 +539,7 @@ csum_copy_err:
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct raw6_sock *rp)
{
+ struct ipv6_txoptions *opt;
struct sk_buff *skb;
int err = 0;
int offset;
@@ -556,6 +557,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
offset = rp->offset;
total_len = inet_sk(sk)->cork.base.length;
+ opt = inet6_sk(sk)->cork.opt;
+ total_len -= opt ? opt->opt_flen : 0;
+
if (offset >= total_len - 1) {
err = -EINVAL;
ip6_flush_pending_frames(sk);
@@ -824,7 +828,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!proto)
proto = inet->inet_num;
- else if (proto != inet->inet_num)
+ else if (proto != inet->inet_num &&
+ inet->inet_num != IPPROTO_RAW)
return -EINVAL;
if (proto > 255)
@@ -1252,8 +1257,6 @@ static void raw6_destroy(struct sock *sk)
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
-
- inet6_destroy_sock(sk);
}
static int rawv6_init_sk(struct sock *sk)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 00732ee6bbd8..8eac2c890449 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -88,7 +88,7 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
struct net_device *dev, int how);
-static int ip6_dst_gc(struct dst_ops *ops);
+static void ip6_dst_gc(struct dst_ops *ops);
static int ip6_pkt_discard(struct sk_buff *skb);
static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -3207,29 +3207,30 @@ out:
return dst;
}
-static int ip6_dst_gc(struct dst_ops *ops)
+static void ip6_dst_gc(struct dst_ops *ops)
{
struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
- int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
+ unsigned int val;
int entries;
entries = dst_entries_get_fast(ops);
- if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
- entries <= rt_max_size)
+ if (entries > ops->gc_thresh)
+ entries = dst_entries_get_slow(ops);
+
+ if (time_after(rt_last_gc + rt_min_interval, jiffies))
goto out;
- net->ipv6.ip6_rt_gc_expire++;
- fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
+ fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
entries = dst_entries_get_slow(ops);
if (entries < ops->gc_thresh)
- net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
+ atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
out:
- net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
- return entries > rt_max_size;
+ val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
+ atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
}
static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
@@ -5217,19 +5218,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
err_nh = NULL;
list_for_each_entry(nh, &rt6_nh_list, next) {
err = __ip6_ins_rt(nh->fib6_info, info, extack);
- fib6_info_release(nh->fib6_info);
- if (!err) {
- /* save reference to last route successfully inserted */
- rt_last = nh->fib6_info;
-
- /* save reference to first route for notification */
- if (!rt_notif)
- rt_notif = nh->fib6_info;
- }
-
- /* nh->fib6_info is used or freed at this point, reset to NULL*/
- nh->fib6_info = NULL;
if (err) {
if (replace && nhn)
NL_SET_ERR_MSG_MOD(extack,
@@ -5237,6 +5226,12 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
err_nh = nh;
goto add_errout;
}
+ /* save reference to last route successfully inserted */
+ rt_last = nh->fib6_info;
+
+ /* save reference to first route for notification */
+ if (!rt_notif)
+ rt_notif = nh->fib6_info;
/* Because each route is added like a single route we remove
* these flags after the first nexthop: if there is a collision,
@@ -5283,8 +5278,7 @@ add_errout:
cleanup:
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
- if (nh->fib6_info)
- fib6_info_release(nh->fib6_info);
+ fib6_info_release(nh->fib6_info);
list_del(&nh->next);
kfree(nh);
}
@@ -5408,16 +5402,17 @@ static size_t rt6_nlmsg_size(struct fib6_info *f6i)
nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
&nexthop_len);
} else {
+ struct fib6_info *sibling, *next_sibling;
struct fib6_nh *nh = f6i->fib6_nh;
nexthop_len = 0;
if (f6i->fib6_nsiblings) {
- nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
- + NLA_ALIGN(sizeof(struct rtnexthop))
- + nla_total_size(16) /* RTA_GATEWAY */
- + lwtunnel_get_encap_size(nh->fib_nh_lws);
+ rt6_nh_nlmsg_size(nh, &nexthop_len);
- nexthop_len *= f6i->fib6_nsiblings;
+ list_for_each_entry_safe(sibling, next_sibling,
+ &f6i->fib6_siblings, fib6_siblings) {
+ rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
+ }
}
nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
}
@@ -6316,7 +6311,7 @@ static int __net_init ip6_route_net_init(struct net *net)
#endif
net->ipv6.sysctl.flush_delay = 0;
- net->ipv6.sysctl.ip6_rt_max_size = 4096;
+ net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
@@ -6325,7 +6320,7 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
net->ipv6.sysctl.skip_notify_on_dev_down = 0;
- net->ipv6.ip6_rt_gc_expire = 30*HZ;
+ atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
ret = 0;
out:
@@ -6359,10 +6354,16 @@ static void __net_exit ip6_route_net_exit(struct net *net)
static int __net_init ip6_route_net_init_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
- proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
- sizeof(struct ipv6_route_iter));
- proc_create_net_single("rt6_stats", 0444, net->proc_net,
- rt6_stats_seq_show, NULL);
+ if (!proc_create_net("ipv6_route", 0, net->proc_net,
+ &ipv6_route_seq_ops,
+ sizeof(struct ipv6_route_iter)))
+ return -ENOMEM;
+
+ if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
+ rt6_stats_seq_show, NULL)) {
+ remove_proc_entry("ipv6_route", net->proc_net);
+ return -ENOMEM;
+ }
#endif
return 0;
}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 75421a472d25..7094f8691ac6 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -125,6 +125,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}
+ if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
if (hinfo) {
err = seg6_hmac_info_del(net, hmackeyid);
if (err)
@@ -436,22 +441,24 @@ int __init seg6_init(void)
{
int err = -ENOMEM;
- err = genl_register_family(&seg6_genl_family);
+ err = register_pernet_subsys(&ip6_segments_ops);
if (err)
goto out;
- err = register_pernet_subsys(&ip6_segments_ops);
+ err = genl_register_family(&seg6_genl_family);
if (err)
- goto out_unregister_genl;
+ goto out_unregister_pernet;
#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
err = seg6_iptunnel_init();
if (err)
- goto out_unregister_pernet;
+ goto out_unregister_genl;
err = seg6_local_init();
- if (err)
- goto out_unregister_pernet;
+ if (err) {
+ seg6_iptunnel_exit();
+ goto out_unregister_genl;
+ }
#endif
#ifdef CONFIG_IPV6_SEG6_HMAC
@@ -472,11 +479,11 @@ out_unregister_iptun:
#endif
#endif
#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
-out_unregister_pernet:
- unregister_pernet_subsys(&ip6_segments_ops);
-#endif
out_unregister_genl:
genl_unregister_family(&seg6_genl_family);
+#endif
+out_unregister_pernet:
+ unregister_pernet_subsys(&ip6_segments_ops);
goto out;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 117d374695fe..8d704ea94693 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1054,12 +1054,13 @@ tx_err:
static void ipip6_tunnel_bind_dev(struct net_device *dev)
{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
struct net_device *tdev = NULL;
- struct ip_tunnel *tunnel;
+ int hlen = LL_MAX_HEADER;
const struct iphdr *iph;
struct flowi4 fl4;
- tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
if (iph->daddr) {
@@ -1082,12 +1083,15 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev && !netif_is_l3_master(tdev)) {
- int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ int mtu;
- dev->mtu = tdev->mtu - t_hlen;
- if (dev->mtu < IPV6_MIN_MTU)
- dev->mtu = IPV6_MIN_MTU;
+ mtu = tdev->mtu - t_hlen;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ WRITE_ONCE(dev->mtu, mtu);
+ hlen = tdev->hard_header_len + tdev->needed_headroom;
}
+ dev->needed_headroom = t_hlen + hlen;
}
static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7e5550546594..8c3beffbaf06 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -180,14 +180,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
treq->af_specific = &tcp_request_sock_ipv6_ops;
treq->tfo_listener = false;
- if (security_inet_conn_request(sk, skb, req))
- goto out_free;
-
req->mss = mss;
ireq->ir_rmt_port = th->source;
ireq->ir_num = ntohs(th->dest);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+
+ if (security_inet_conn_request(sk, skb, req))
+ goto out_free;
+
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 063898cae3e5..7cb622d300aa 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -106,7 +106,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
if (dst && dst_hold_safe(dst)) {
const struct rt6_info *rt = (const struct rt6_info *)dst;
- sk->sk_rx_dst = dst;
+ rcu_assign_pointer(sk->sk_rx_dst, dst);
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
}
@@ -264,6 +264,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = saddr ? *saddr : np->saddr;
+ fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
@@ -334,6 +335,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure:
tcp_set_state(sk, TCP_CLOSE);
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
+ inet_reset_saddr(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
@@ -1316,14 +1319,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
/* Clone pktoptions received with SYN, if we own the req */
if (ireq->pktopts) {
- newnp->pktoptions = skb_clone(ireq->pktopts,
- sk_gfp_mask(sk, GFP_ATOMIC));
+ newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
consume_skb(ireq->pktopts);
ireq->pktopts = NULL;
- if (newnp->pktoptions) {
+ if (newnp->pktoptions)
tcp_v6_restore_cb(newnp->pktoptions);
- skb_set_owner_r(newnp->pktoptions, newsk);
- }
}
} else {
if (!req_unhash && found_dup_sk) {
@@ -1391,18 +1391,21 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
--ANK (980728)
*/
if (np->rxopt.all)
- opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
+ opt_skb = skb_clone_and_charge_r(skb, sk);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst;
+
+ dst = rcu_dereference_protected(sk->sk_rx_dst,
+ lockdep_sock_is_held(sk));
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
if (dst) {
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
+ RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
dst_release(dst);
- sk->sk_rx_dst = NULL;
}
}
@@ -1470,7 +1473,6 @@ ipv6_pktoptions:
if (np->repflow)
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
- skb_set_owner_r(opt_skb, sk);
tcp_v6_restore_cb(opt_skb);
opt_skb = xchg(&np->pktoptions, opt_skb);
} else {
@@ -1726,7 +1728,7 @@ do_time_wait:
goto discard_it;
}
-INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
+void tcp_v6_early_demux(struct sk_buff *skb)
{
const struct ipv6hdr *hdr;
const struct tcphdr *th;
@@ -1753,7 +1755,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk_fullsock(sk)) {
- struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+ struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
@@ -1846,12 +1848,6 @@ static int tcp_v6_init_sock(struct sock *sk)
return 0;
}
-static void tcp_v6_destroy_sock(struct sock *sk)
-{
- tcp_v4_destroy_sock(sk);
- inet6_destroy_sock(sk);
-}
-
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
@@ -2044,7 +2040,7 @@ struct proto tcpv6_prot = {
.accept = inet_csk_accept,
.ioctl = tcp_ioctl,
.init = tcp_v6_init_sock,
- .destroy = tcp_v6_destroy_sock,
+ .destroy = tcp_v4_destroy_sock,
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
@@ -2081,12 +2077,7 @@ struct proto tcpv6_prot = {
.diag_destroy = tcp_abort,
};
-/* thinking of making this const? Don't.
- * early_demux can change based on sysctl.
- */
-static struct inet6_protocol tcpv6_protocol = {
- .early_demux = tcp_v6_early_demux,
- .early_demux_handler = tcp_v6_early_demux,
+static const struct inet6_protocol tcpv6_protocol = {
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 040869f45682..93eb62221975 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -54,6 +54,19 @@
#include <trace/events/skb.h>
#include "udp_impl.h"
+static void udpv6_destruct_sock(struct sock *sk)
+{
+ udp_destruct_common(sk);
+ inet6_sock_destruct(sk);
+}
+
+int udpv6_init_sock(struct sock *sk)
+{
+ skb_queue_head_init(&udp_sk(sk)->reader_queue);
+ sk->sk_destruct = udpv6_destruct_sock;
+ return 0;
+}
+
static u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *laddr,
const u16 lport,
@@ -74,7 +87,7 @@ static u32 udp6_ehashfn(const struct net *net,
fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
- udp_ipv6_hash_secret + net_hash_mix(net));
+ udp6_ehash_secret + net_hash_mix(net));
}
int udp_v6_get_port(struct sock *sk, unsigned short snum)
@@ -889,7 +902,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct dst_entry *dst = skb_dst(skb);
int ret;
- if (unlikely(sk->sk_rx_dst != dst))
+ if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
udp6_sk_rx_dst_set(sk, dst);
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
@@ -973,7 +986,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
return NULL;
}
-INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
+void udp_v6_early_demux(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct udphdr *uh;
@@ -1001,7 +1014,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_efree;
- dst = READ_ONCE(sk->sk_rx_dst);
+ dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
@@ -1283,9 +1296,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
- if (__ipv6_only_sock(sk))
- return -ENETUNREACH;
- return udp_sendmsg(sk, msg, len);
+ err = __ipv6_only_sock(sk) ?
+ -ENETUNREACH : udp_sendmsg(sk, msg, len);
+ msg->msg_name = sin6;
+ msg->msg_namelen = addr_len;
+ return err;
}
}
@@ -1558,8 +1573,6 @@ void udpv6_destroy_sock(struct sock *sk)
udp_encap_disable();
}
}
-
- inet6_destroy_sock(sk);
}
/*
@@ -1603,12 +1616,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
}
#endif
-/* thinking of making this const? Don't.
- * early_demux can change based on sysctl.
- */
-static struct inet6_protocol udpv6_protocol = {
- .early_demux = udp_v6_early_demux,
- .early_demux_handler = udp_v6_early_demux,
+static const struct inet6_protocol udpv6_protocol = {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
@@ -1668,7 +1676,7 @@ struct proto udpv6_prot = {
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
- .init = udp_init_sock,
+ .init = udpv6_init_sock,
.destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt,
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 20e324b6f358..16516bd69250 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -12,6 +12,7 @@ int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
__be32, struct udp_table *);
+int udpv6_init_sock(struct sock *sk);
int udp_v6_get_port(struct sock *sk, unsigned short snum);
void udp_v6_rehash(struct sock *sk);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index bf7a7acd39b1..6f6215f4d81d 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -12,6 +12,13 @@
#include <linux/proc_fs.h>
#include "udp_impl.h"
+static int udplitev6_sk_init(struct sock *sk)
+{
+ udpv6_init_sock(sk);
+ udp_sk(sk)->pcflag = UDPLITE_BIT;
+ return 0;
+}
+
static int udplitev6_rcv(struct sk_buff *skb)
{
return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
@@ -38,7 +45,7 @@ struct proto udplitev6_prot = {
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
- .init = udplite_sk_init,
+ .init = udplitev6_sk_init,
.destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt,
@@ -50,6 +57,8 @@ struct proto udplitev6_prot = {
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
+ .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
+ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
.obj_size = sizeof(struct udp6_sock),
.h.udp_table = &udplite_table,
#ifdef CONFIG_COMPAT
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index af7a4b8b1e9c..4c3aa97f23fa 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -120,11 +120,11 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- if (likely(xdst->u.rt6.rt6i_idev))
- in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst);
if (xdst->u.rt6.rt6i_uncached_list)
rt6_uncached_list_del(&xdst->u.rt6);
+ if (likely(xdst->u.rt6.rt6i_idev))
+ in6_dev_put(xdst->u.rt6.rt6i_idev);
xfrm_dst_destroy(xdst);
}
@@ -289,9 +289,13 @@ int __init xfrm6_init(void)
if (ret)
goto out_state;
- register_pernet_subsys(&xfrm6_net_ops);
+ ret = register_pernet_subsys(&xfrm6_net_ops);
+ if (ret)
+ goto out_protocol;
out:
return ret;
+out_protocol:
+ xfrm6_protocol_fini();
out_state:
xfrm6_state_fini();
out_policy:
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index bbc1924d64e5..652285191da1 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2455,7 +2455,7 @@ static int __init afiucv_init(void)
{
int err;
- if (MACHINE_IS_VM) {
+ if (MACHINE_IS_VM && IS_ENABLED(CONFIG_IUCV)) {
cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
if (unlikely(err)) {
WARN_ON(err);
@@ -2463,11 +2463,7 @@ static int __init afiucv_init(void)
goto out;
}
- pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
- if (!pr_iucv) {
- printk(KERN_WARNING "iucv_if lookup failed\n");
- memset(&iucv_userid, 0, sizeof(iucv_userid));
- }
+ pr_iucv = &iucv_if;
} else {
memset(&iucv_userid, 0, sizeof(iucv_userid));
pr_iucv = NULL;
@@ -2501,17 +2497,13 @@ out_sock:
out_proto:
proto_unregister(&iucv_proto);
out:
- if (pr_iucv)
- symbol_put(iucv_if);
return err;
}
static void __exit afiucv_exit(void)
{
- if (pr_iucv) {
+ if (pr_iucv)
afiucv_iucv_exit();
- symbol_put(iucv_if);
- }
unregister_netdevice_notifier(&afiucv_netdev_notifier);
dev_remove_pack(&iucv_packet_type);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 9a2d023842fe..8b5b8cc93ff8 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -106,7 +106,7 @@ struct iucv_irq_data {
u16 ippathid;
u8 ipflags1;
u8 iptype;
- u32 res2[8];
+ u32 res2[9];
};
struct iucv_irq_list {
@@ -128,7 +128,7 @@ static LIST_HEAD(iucv_task_queue);
* The tasklet for fast delivery of iucv interrupts.
*/
static void iucv_tasklet_fn(unsigned long);
-static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0);
+static DECLARE_TASKLET_OLD(iucv_tasklet, iucv_tasklet_fn);
/*
* Queue of interrupt buffers for delivery via a work queue
@@ -179,7 +179,7 @@ static char iucv_error_pathid[16] = "INVALID PATHID";
static LIST_HEAD(iucv_handler_list);
/*
- * iucv_path_table: an array of iucv_path structures.
+ * iucv_path_table: array of pointers to iucv_path structures.
*/
static struct iucv_path **iucv_path_table;
static unsigned long iucv_max_pathid;
@@ -590,7 +590,7 @@ static int iucv_enable(void)
get_online_cpus();
rc = -ENOMEM;
- alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
+ alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
if (!iucv_path_table)
goto out;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index ea9e73428ed9..920b0ebf1cb8 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -161,7 +161,8 @@ static void kcm_rcv_ready(struct kcm_sock *kcm)
/* Buffer limit is okay now, add to ready list */
list_add_tail(&kcm->wait_rx_list,
&kcm->mux->kcm_rx_waiters);
- kcm->rx_wait = true;
+ /* paired with lockless reads in kcm_rfree() */
+ WRITE_ONCE(kcm->rx_wait, true);
}
static void kcm_rfree(struct sk_buff *skb)
@@ -177,7 +178,7 @@ static void kcm_rfree(struct sk_buff *skb)
/* For reading rx_wait and rx_psock without holding lock */
smp_mb__after_atomic();
- if (!kcm->rx_wait && !kcm->rx_psock &&
+ if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
spin_lock_bh(&mux->rx_lock);
kcm_rcv_ready(kcm);
@@ -220,7 +221,7 @@ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
struct sk_buff *skb;
struct kcm_sock *kcm;
- while ((skb = __skb_dequeue(head))) {
+ while ((skb = skb_dequeue(head))) {
/* Reset destructor to avoid calling kcm_rcv_ready */
skb->destructor = sock_rfree;
skb_orphan(skb);
@@ -236,7 +237,8 @@ try_again:
if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
/* Should mean socket buffer full */
list_del(&kcm->wait_rx_list);
- kcm->rx_wait = false;
+ /* paired with lockless reads in kcm_rfree() */
+ WRITE_ONCE(kcm->rx_wait, false);
/* Commit rx_wait to read in kcm_free */
smp_wmb();
@@ -279,10 +281,12 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
kcm = list_first_entry(&mux->kcm_rx_waiters,
struct kcm_sock, wait_rx_list);
list_del(&kcm->wait_rx_list);
- kcm->rx_wait = false;
+ /* paired with lockless reads in kcm_rfree() */
+ WRITE_ONCE(kcm->rx_wait, false);
psock->rx_kcm = kcm;
- kcm->rx_psock = psock;
+ /* paired with lockless reads in kcm_rfree() */
+ WRITE_ONCE(kcm->rx_psock, psock);
spin_unlock_bh(&mux->rx_lock);
@@ -309,7 +313,8 @@ static void unreserve_rx_kcm(struct kcm_psock *psock,
spin_lock_bh(&mux->rx_lock);
psock->rx_kcm = NULL;
- kcm->rx_psock = NULL;
+ /* paired with lockless reads in kcm_rfree() */
+ WRITE_ONCE(kcm->rx_psock, NULL);
/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
* kcm_rfree
@@ -1061,15 +1066,18 @@ partial_message:
out_error:
kcm_push(kcm);
- if (copied && sock->type == SOCK_SEQPACKET) {
+ if (sock->type == SOCK_SEQPACKET) {
/* Wrote some bytes before encountering an
* error, return partial success.
*/
- goto partial_message;
- }
-
- if (head != kcm->seq_skb)
+ if (copied)
+ goto partial_message;
+ if (head != kcm->seq_skb)
+ kfree_skb(head);
+ } else {
kfree_skb(head);
+ kcm->seq_skb = NULL;
+ }
err = sk_stream_error(sk, msg->msg_flags, err);
@@ -1081,53 +1089,18 @@ out_error:
return err;
}
-static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
- long timeo, int *err)
-{
- struct sk_buff *skb;
-
- while (!(skb = skb_peek(&sk->sk_receive_queue))) {
- if (sk->sk_err) {
- *err = sock_error(sk);
- return NULL;
- }
-
- if (sock_flag(sk, SOCK_DONE))
- return NULL;
-
- if ((flags & MSG_DONTWAIT) || !timeo) {
- *err = -EAGAIN;
- return NULL;
- }
-
- sk_wait_data(sk, &timeo, NULL);
-
- /* Handle signals */
- if (signal_pending(current)) {
- *err = sock_intr_errno(timeo);
- return NULL;
- }
- }
-
- return skb;
-}
-
static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
+ int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
int err = 0;
- long timeo;
struct strp_msg *stm;
int copied = 0;
struct sk_buff *skb;
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-
- lock_sock(sk);
-
- skb = kcm_wait_data(sk, flags, timeo, &err);
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
@@ -1158,14 +1131,11 @@ msg_finished:
/* Finished with message */
msg->msg_flags |= MSG_EOR;
KCM_STATS_INCR(kcm->stats.rx_msgs);
- skb_unlink(skb, &sk->sk_receive_queue);
- kfree_skb(skb);
}
}
out:
- release_sock(sk);
-
+ skb_free_datagram(sk, skb);
return copied ? : err;
}
@@ -1173,9 +1143,9 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
+ int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct kcm_sock *kcm = kcm_sk(sk);
- long timeo;
struct strp_msg *stm;
int err = 0;
ssize_t copied;
@@ -1183,11 +1153,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
/* Only support splice for SOCKSEQPACKET */
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-
- lock_sock(sk);
-
- skb = kcm_wait_data(sk, flags, timeo, &err);
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto err_out;
@@ -1215,13 +1181,11 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
* finish reading the message.
*/
- release_sock(sk);
-
+ skb_free_datagram(sk, skb);
return copied;
err_out:
- release_sock(sk);
-
+ skb_free_datagram(sk, skb);
return err;
}
@@ -1241,7 +1205,8 @@ static void kcm_recv_disable(struct kcm_sock *kcm)
if (!kcm->rx_psock) {
if (kcm->rx_wait) {
list_del(&kcm->wait_rx_list);
- kcm->rx_wait = false;
+ /* paired with lockless reads in kcm_rfree() */
+ WRITE_ONCE(kcm->rx_wait, false);
}
requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
@@ -1312,10 +1277,11 @@ static int kcm_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case KCM_RECV_DISABLE:
val = kcm->rx_disabled;
@@ -1413,12 +1379,6 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
psock->sk = csk;
psock->bpf_prog = prog;
- err = strp_init(&psock->strp, csk, &cb);
- if (err) {
- kmem_cache_free(kcm_psockp, psock);
- goto out;
- }
-
write_lock_bh(&csk->sk_callback_lock);
/* Check if sk_user_data is aready by KCM or someone else.
@@ -1426,13 +1386,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
*/
if (csk->sk_user_data) {
write_unlock_bh(&csk->sk_callback_lock);
- strp_stop(&psock->strp);
- strp_done(&psock->strp);
kmem_cache_free(kcm_psockp, psock);
err = -EALREADY;
goto out;
}
+ err = strp_init(&psock->strp, csk, &cb);
+ if (err) {
+ write_unlock_bh(&csk->sk_callback_lock);
+ kmem_cache_free(kcm_psockp, psock);
+ goto out;
+ }
+
psock->save_data_ready = csk->sk_data_ready;
psock->save_write_space = csk->sk_write_space;
psock->save_state_change = csk->sk_state_change;
@@ -1795,7 +1760,8 @@ static void kcm_done(struct kcm_sock *kcm)
if (kcm->rx_wait) {
list_del(&kcm->wait_rx_list);
- kcm->rx_wait = false;
+ /* paired with lockless reads in kcm_rfree() */
+ WRITE_ONCE(kcm->rx_wait, false);
}
/* Move any pending receive messages to other kcm sockets */
requeue_rx_msgs(mux, &sk->sk_receive_queue);
@@ -1840,10 +1806,10 @@ static int kcm_release(struct socket *sock)
kcm = kcm_sk(sk);
mux = kcm->mux;
+ lock_sock(sk);
sock_orphan(sk);
kfree_skb(kcm->seq_skb);
- lock_sock(sk);
/* Purge queue under lock to avoid race condition with tx_work trying
* to act when queue is nonempty. If tx_work runs after this point
* it will just return.
@@ -2022,6 +1988,8 @@ static __net_exit void kcm_exit_net(struct net *net)
* that all multiplexors and psocks have been destroyed.
*/
WARN_ON(!list_empty(&knet->mux_list));
+
+ mutex_destroy(&knet->mutex);
}
static struct pernet_operations kcm_net_ops = {
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 32fe99cd01fc..ce844919b2eb 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1701,9 +1701,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
pfk->registered |= (1<<hdr->sadb_msg_satype);
}
+ mutex_lock(&pfkey_mutex);
xfrm_probe_algs();
supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
+ mutex_unlock(&pfkey_mutex);
+
if (!supp_skb) {
if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
pfk->registered &= ~(1<<hdr->sadb_msg_satype);
@@ -1849,9 +1852,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
- if ((xfilter->sadb_x_filter_splen >=
+ if ((xfilter->sadb_x_filter_splen >
(sizeof(xfrm_address_t) << 3)) ||
- (xfilter->sadb_x_filter_dplen >=
+ (xfilter->sadb_x_filter_dplen >
(sizeof(xfrm_address_t) << 3))) {
mutex_unlock(&pfk->dump_lock);
return -EINVAL;
@@ -1941,7 +1944,8 @@ static u32 gen_reqid(struct net *net)
}
static int
-parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
+parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_policy *pol,
+ struct sadb_x_ipsecrequest *rq)
{
struct net *net = xp_net(xp);
struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
@@ -1959,9 +1963,12 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
return -EINVAL;
t->mode = mode;
- if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE)
+ if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) {
+ if ((mode == XFRM_MODE_TUNNEL || mode == XFRM_MODE_BEET) &&
+ pol->sadb_x_policy_dir == IPSEC_DIR_OUTBOUND)
+ return -EINVAL;
t->optional = 1;
- else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
+ } else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
t->reqid = rq->sadb_x_ipsecrequest_reqid;
if (t->reqid > IPSEC_MANUAL_REQID_MAX)
t->reqid = 0;
@@ -2003,7 +2010,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
rq->sadb_x_ipsecrequest_len < sizeof(*rq))
return -EINVAL;
- if ((err = parse_ipsecrequest(xp, rq)) < 0)
+ if ((err = parse_ipsecrequest(xp, pol, rq)) < 0)
return err;
len -= rq->sadb_x_ipsecrequest_len;
rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
@@ -2906,7 +2913,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
break;
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
@@ -2924,7 +2931,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!ealg->pfkey_supported)
continue;
- if (!(ealg_tmpl_set(t, ealg) && ealg->available))
+ if (!(ealg_tmpl_set(t, ealg)))
continue;
for (k = 1; ; k++) {
@@ -2935,16 +2942,17 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg) && aalg->available)
+ if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
}
return sz + sizeof(struct sadb_prop);
}
-static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
+static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
+ int sz = 0;
int i;
p = skb_put(skb, sizeof(struct sadb_prop));
@@ -2972,13 +2980,17 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
+ sz += sizeof(*c);
}
}
+
+ return sz + sizeof(*p);
}
-static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
+static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
+ int sz = 0;
int i, k;
p = skb_put(skb, sizeof(struct sadb_prop));
@@ -3020,8 +3032,11 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
+ sz += sizeof(*c);
}
}
+
+ return sz + sizeof(*p);
}
static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
@@ -3151,6 +3166,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
struct sadb_x_sec_ctx *sec_ctx;
struct xfrm_sec_ctx *xfrm_ctx;
int ctx_size = 0;
+ int alg_size = 0;
sockaddr_size = pfkey_sockaddr_size(x->props.family);
if (!sockaddr_size)
@@ -3162,16 +3178,16 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
sizeof(struct sadb_x_policy);
if (x->id.proto == IPPROTO_AH)
- size += count_ah_combs(t);
+ alg_size = count_ah_combs(t);
else if (x->id.proto == IPPROTO_ESP)
- size += count_esp_combs(t);
+ alg_size = count_esp_combs(t);
if ((xfrm_ctx = x->security)) {
ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len);
size += sizeof(struct sadb_x_sec_ctx) + ctx_size;
}
- skb = alloc_skb(size + 16, GFP_ATOMIC);
+ skb = alloc_skb(size + alg_size + 16, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
@@ -3225,10 +3241,13 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
pol->sadb_x_policy_priority = xp->priority;
/* Set sadb_comb's. */
+ alg_size = 0;
if (x->id.proto == IPPROTO_AH)
- dump_ah_combs(skb, t);
+ alg_size = dump_ah_combs(skb, t);
else if (x->id.proto == IPPROTO_ESP)
- dump_esp_combs(skb, t);
+ alg_size = dump_esp_combs(skb, t);
+
+ hdr->sadb_msg_len += alg_size / 8;
/* security context */
if (xfrm_ctx) {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 421b2c89ce12..d001e254bada 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1171,8 +1171,10 @@ static void l2tp_tunnel_destruct(struct sock *sk)
}
/* Remove hooks into tunnel socket */
+ write_lock_bh(&sk->sk_callback_lock);
sk->sk_destruct = tunnel->old_sk_destruct;
sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
/* Call the original destructor */
if (sk->sk_destruct)
@@ -1491,20 +1493,27 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
sock = sockfd_lookup(tunnel->fd, &ret);
if (!sock)
goto err;
-
- ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
- if (ret < 0)
- goto err_sock;
}
+ sk = sock->sk;
+ write_lock_bh(&sk->sk_callback_lock);
+ ret = l2tp_validate_socket(sk, net, tunnel->encap);
+ if (ret < 0)
+ goto err_inval_sock;
+ rcu_assign_sk_user_data(sk, tunnel);
+ write_unlock_bh(&sk->sk_callback_lock);
+
tunnel->l2tp_net = net;
pn = l2tp_pernet(net);
+ sock_hold(sk);
+ tunnel->sock = sk;
+
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-
+ sock_put(sk);
ret = -EEXIST;
goto err_sock;
}
@@ -1512,10 +1521,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
- sk = sock->sk;
- sock_hold(sk);
- tunnel->sock = sk;
-
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
struct udp_tunnel_sock_cfg udp_cfg = {
.sk_user_data = tunnel,
@@ -1525,8 +1530,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
};
setup_udp_tunnel_sock(net, sock, &udp_cfg);
- } else {
- sk->sk_user_data = tunnel;
}
tunnel->old_sk_destruct = sk->sk_destruct;
@@ -1541,6 +1544,11 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
return 0;
err_sock:
+ write_lock_bh(&sk->sk_callback_lock);
+ rcu_assign_sk_user_data(sk, NULL);
+err_inval_sock:
+ write_unlock_bh(&sk->sk_callback_lock);
+
if (tunnel->fd < 0)
sock_release(sock);
else
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 23e9ce985ed6..3a24614f37be 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -268,8 +268,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk)
if (tunnel)
l2tp_tunnel_delete(tunnel);
-
- inet6_destroy_sock(sk);
}
static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -523,7 +521,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
*/
if (len > INT_MAX - transhdrlen)
return -EMSGSIZE;
- ulen = len + transhdrlen;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
@@ -647,6 +644,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
back_from_confirm:
lock_sock(sk);
+ ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
&fl6, (struct rt6_info *)dst,
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index c54cb59593ef..7d3c782e5ab1 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1375,11 +1375,11 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
err = -ENOTCONN;
if (sk->sk_user_data == NULL)
goto end;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3b1ea89a340e..d57bfce94d60 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -227,6 +227,8 @@ static int llc_ui_release(struct socket *sock)
if (llc->dev)
dev_put(llc->dev);
sock_put(sk);
+ sock_orphan(sk);
+ sock->sk = NULL;
llc_sk_free(sk);
out:
return 0;
@@ -925,14 +927,15 @@ copy_uaddr:
*/
static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
+ DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
int flags = msg->msg_flags;
int noblock = flags & MSG_DONTWAIT;
+ int rc = -EINVAL, copied = 0, hdrlen, hh_len;
struct sk_buff *skb = NULL;
+ struct net_device *dev;
size_t size = 0;
- int rc = -EINVAL, copied = 0, hdrlen;
dprintk("%s: sending from %02X to %02X\n", __func__,
llc->laddr.lsap, llc->daddr.lsap);
@@ -952,22 +955,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (rc)
goto out;
}
- hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
+ dev = llc->dev;
+ hh_len = LL_RESERVED_SPACE(dev);
+ hdrlen = llc_ui_header_len(sk, addr);
size = hdrlen + len;
- if (size > llc->dev->mtu)
- size = llc->dev->mtu;
+ size = min_t(size_t, size, READ_ONCE(dev->mtu));
copied = size - hdrlen;
rc = -EINVAL;
if (copied < 0)
goto out;
release_sock(sk);
- skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+ skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
lock_sock(sk);
if (!skb)
goto out;
- skb->dev = llc->dev;
+ if (sock_flag(sk, SOCK_ZAPPED) ||
+ llc->dev != dev ||
+ hdrlen != llc_ui_header_len(sk, addr) ||
+ hh_len != LL_RESERVED_SPACE(dev) ||
+ size > READ_ONCE(dev->mtu))
+ goto out;
+ skb->dev = dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
- skb_reserve(skb, hdrlen);
+ skb_reserve(skb, hh_len + hdrlen);
rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
if (rc)
goto out;
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 64d4bef04e73..4900a27b5176 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
.func = llc_rcv,
};
-static struct packet_type llc_tr_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_TR_802_2),
- .func = llc_rcv,
-};
-
static int __init llc_init(void)
{
dev_add_pack(&llc_packet_type);
- dev_add_pack(&llc_tr_packet_type);
return 0;
}
static void __exit llc_exit(void)
{
dev_remove_pack(&llc_packet_type);
- dev_remove_pack(&llc_tr_packet_type);
}
module_init(llc_init);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 82cb93f66b9b..f4fb309185ce 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
skb->transport_header += llc_len;
skb_pull(skb, llc_len);
if (skb->protocol == htons(ETH_P_802_2)) {
- __be16 pdulen = eth_hdr(skb)->h_proto;
- s32 data_size = ntohs(pdulen) - llc_len;
+ __be16 pdulen;
+ s32 data_size;
+
+ if (skb->mac_len < ETH_HLEN)
+ return 0;
+
+ pdulen = eth_hdr(skb)->h_proto;
+ data_size = ntohs(pdulen) - llc_len;
if (data_size < 0 ||
!pskb_may_pull(skb, data_size))
@@ -162,9 +168,6 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
void (*sta_handler)(struct sk_buff *skb);
void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
- if (!net_eq(dev_net(dev), &init_net))
- goto drop;
-
/*
* When the interface is in promisc. mode, drop all the crap that it
* receives, do not try to analyse it.
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index 9fa3342c7a82..df26557a0244 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
int rc = 1;
u32 data_size;
+ if (skb->mac_len < ETH_HLEN)
+ return 1;
+
llc_pdu_decode_sa(skb, mac_da);
llc_pdu_decode_da(skb, mac_sa);
llc_pdu_decode_ssap(skb, &dsap);
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index c29170e767a8..64e2c67e16ba 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -77,6 +77,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
u32 data_size;
struct sk_buff *nskb;
+ if (skb->mac_len < ETH_HLEN)
+ goto out;
+
/* The test request command is type U (llc_len = 3) */
data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 16f37fd0ac0e..6428c0d37145 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2581,6 +2581,10 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
else
*dbm = sdata->vif.bss_conf.txpower;
+ /* INT_MIN indicates no power level was set yet */
+ if (*dbm == INT_MIN)
+ return -EINVAL;
+
return 0;
}
@@ -3280,9 +3284,6 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_MESH_POINT: {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- if (params->chandef.width != sdata->vif.bss_conf.chandef.width)
- return -EINVAL;
-
/* changes into another band are not supported */
if (sdata->vif.bss_conf.chandef.chan->band !=
params->chandef.chan->band)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 0e26c83b6b41..d5b8568591d4 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -542,6 +542,10 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
sdata_assert_lock(sdata);
+ /* When not connected/joined, sending CSA doesn't make sense. */
+ if (ifibss->state != IEEE80211_IBSS_MLME_JOINED)
+ return -ENOLINK;
+
/* update cfg80211 bss information with the new channel */
if (!is_zero_ether_addr(ifibss->bssid)) {
cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 7747a6f46d29..3b7151501b3e 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -632,6 +632,26 @@ struct mesh_csa_settings {
struct cfg80211_csa_settings settings;
};
+/**
+ * struct mesh_table
+ *
+ * @known_gates: list of known mesh gates and their mpaths by the station. The
+ * gate's mpath may or may not be resolved and active.
+ * @gates_lock: protects updates to known_gates
+ * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containing all mesh_path objects
+ * @walk_lock: lock protecting walk_head
+ * @entries: number of entries in the table
+ */
+struct mesh_table {
+ struct hlist_head known_gates;
+ spinlock_t gates_lock;
+ struct rhashtable rhead;
+ struct hlist_head walk_head;
+ spinlock_t walk_lock;
+ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
+};
+
struct ieee80211_if_mesh {
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
@@ -706,8 +726,8 @@ struct ieee80211_if_mesh {
/* offset from skb->data while building IE */
int meshconf_offset;
- struct mesh_table *mesh_paths;
- struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
+ struct mesh_table mesh_paths;
+ struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
int mpp_paths_generation;
};
@@ -1460,7 +1480,6 @@ struct ieee802_11_elems {
const u8 *supp_rates;
const u8 *ds_params;
const struct ieee80211_tim_ie *tim;
- const u8 *challenge;
const u8 *rsn;
const u8 *erp_info;
const u8 *ext_supp_rates;
@@ -1507,7 +1526,6 @@ struct ieee802_11_elems {
u8 ssid_len;
u8 supp_rates_len;
u8 tim_len;
- u8 challenge_len;
u8 rsn_len;
u8 ext_supp_rates_len;
u8 wmm_info_len;
@@ -1521,6 +1539,8 @@ struct ieee802_11_elems {
u8 country_elem_len;
u8 bssid_index_len;
+ void *nontx_profile;
+
/* whether a parse error occurred while retrieving these elements */
bool parse_error;
};
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f215218a88c9..fa2ac02063cf 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1315,8 +1315,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_led_exit(local);
destroy_workqueue(local->workqueue);
fail_workqueue:
- if (local->wiphy_ciphers_allocated)
+ if (local->wiphy_ciphers_allocated) {
kfree(local->hw.wiphy->cipher_suites);
+ local->wiphy_ciphers_allocated = false;
+ }
kfree(local->int_scan_req);
return result;
}
@@ -1386,8 +1388,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
mutex_destroy(&local->iflist_mtx);
mutex_destroy(&local->mtx);
- if (local->wiphy_ciphers_allocated)
+ if (local->wiphy_ciphers_allocated) {
kfree(local->hw.wiphy->cipher_suites);
+ local->wiphy_ciphers_allocated = false;
+ }
idr_for_each(&local->ack_status_frames,
ieee80211_free_ack_frame, NULL);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 953f720754e8..3a610ade2c04 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -127,26 +127,6 @@ struct mesh_path {
u32 path_change_count;
};
-/**
- * struct mesh_table
- *
- * @known_gates: list of known mesh gates and their mpaths by the station. The
- * gate's mpath may or may not be resolved and active.
- * @gates_lock: protects updates to known_gates
- * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
- * @walk_head: linked list containging all mesh_path objects
- * @walk_lock: lock protecting walk_head
- * @entries: number of entries in the table
- */
-struct mesh_table {
- struct hlist_head known_gates;
- spinlock_t gates_lock;
- struct rhashtable rhead;
- struct hlist_head walk_head;
- spinlock_t walk_lock;
- atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
-};
-
/* Recent multicast cache */
/* RMC_BUCKETS must be a power of 2, maximum 256 */
#define RMC_BUCKETS 256
@@ -306,7 +286,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
void mesh_path_flush_pending(struct mesh_path *mpath);
void mesh_path_tx_pending(struct mesh_path *mpath);
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
+void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
void mesh_path_timer(struct timer_list *t);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index d7ae7415d54d..7e27e5201c54 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
mesh_path_free_rcu(tbl, mpath);
}
-static struct mesh_table *mesh_table_alloc(void)
+static void mesh_table_init(struct mesh_table *tbl)
{
- struct mesh_table *newtbl;
+ INIT_HLIST_HEAD(&tbl->known_gates);
+ INIT_HLIST_HEAD(&tbl->walk_head);
+ atomic_set(&tbl->entries, 0);
+ spin_lock_init(&tbl->gates_lock);
+ spin_lock_init(&tbl->walk_lock);
- newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
- if (!newtbl)
- return NULL;
-
- INIT_HLIST_HEAD(&newtbl->known_gates);
- INIT_HLIST_HEAD(&newtbl->walk_head);
- atomic_set(&newtbl->entries, 0);
- spin_lock_init(&newtbl->gates_lock);
- spin_lock_init(&newtbl->walk_lock);
- if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
- kfree(newtbl);
- return NULL;
- }
-
- return newtbl;
+ /* rhashtable_init() may fail only in case of wrong
+ * mesh_rht_params
+ */
+ WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
}
static void mesh_table_free(struct mesh_table *tbl)
{
rhashtable_free_and_destroy(&tbl->rhead,
mesh_path_rht_free, tbl);
- kfree(tbl);
}
/**
@@ -240,13 +232,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
- return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
+ return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
}
struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
- return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
+ return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
}
static struct mesh_path *
@@ -283,7 +275,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
- return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
+ return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
}
/**
@@ -298,7 +290,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
- return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
+ return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
}
/**
@@ -311,7 +303,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
int err;
rcu_read_lock();
- tbl = mpath->sdata->u.mesh.mesh_paths;
+ tbl = &mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock);
if (mpath->is_gate) {
@@ -420,7 +412,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
if (!new_mpath)
return ERR_PTR(-ENOMEM);
- tbl = sdata->u.mesh.mesh_paths;
+ tbl = &sdata->u.mesh.mesh_paths;
spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
&new_mpath->rhash,
@@ -462,7 +454,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
- tbl = sdata->u.mesh.mpp_paths;
+ tbl = &sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
@@ -491,7 +483,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
void mesh_plink_broken(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
@@ -550,7 +542,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@@ -565,7 +557,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy)
{
- struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@@ -599,8 +591,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
- table_flush_by_iface(sdata->u.mesh.mesh_paths);
- table_flush_by_iface(sdata->u.mesh.mpp_paths);
+ table_flush_by_iface(&sdata->u.mesh.mesh_paths);
+ table_flush_by_iface(&sdata->u.mesh.mpp_paths);
}
/**
@@ -646,7 +638,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
/* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr);
- err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
+ err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
sdata->u.mesh.mesh_paths_generation++;
return err;
}
@@ -684,7 +676,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct mesh_path *gate;
bool copy = false;
- tbl = sdata->u.mesh.mesh_paths;
+ tbl = &sdata->u.mesh.mesh_paths;
rcu_read_lock();
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
@@ -720,7 +712,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
- kfree_skb(skb);
+ ieee80211_free_txskb(&sdata->local->hw, skb);
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
@@ -764,29 +756,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mesh_path_tx_pending(mpath);
}
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
+void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{
- struct mesh_table *tbl_path, *tbl_mpp;
- int ret;
-
- tbl_path = mesh_table_alloc();
- if (!tbl_path)
- return -ENOMEM;
-
- tbl_mpp = mesh_table_alloc();
- if (!tbl_mpp) {
- ret = -ENOMEM;
- goto free_path;
- }
-
- sdata->u.mesh.mesh_paths = tbl_path;
- sdata->u.mesh.mpp_paths = tbl_mpp;
-
- return 0;
-
-free_path:
- mesh_table_free(tbl_path);
- return ret;
+ mesh_table_init(&sdata->u.mesh.mesh_paths);
+ mesh_table_init(&sdata->u.mesh.mpp_paths);
}
static
@@ -808,12 +781,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
- mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
- mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
+ mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
+ mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
}
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{
- mesh_table_free(sdata->u.mesh.mesh_paths);
- mesh_table_free(sdata->u.mesh.mpp_paths);
+ mesh_table_free(&sdata->u.mesh.mesh_paths);
+ mesh_table_free(&sdata->u.mesh.mpp_paths);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 737c5f4dbf52..def34c843f29 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1044,8 +1044,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
case WLAN_SP_MESH_PEERING_OPEN:
if (!matches_local)
event = OPN_RJCT;
- if (!mesh_plink_free_count(sdata) ||
- (sta->mesh->plid && sta->mesh->plid != plid))
+ else if (!mesh_plink_free_count(sdata) ||
+ (sta->mesh->plid && sta->mesh->plid != plid))
event = OPN_IGNR;
else
event = OPN_ACPT;
@@ -1053,9 +1053,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
case WLAN_SP_MESH_PEERING_CONFIRM:
if (!matches_local)
event = CNF_RJCT;
- if (!mesh_plink_free_count(sdata) ||
- sta->mesh->llid != llid ||
- (sta->mesh->plid && sta->mesh->plid != plid))
+ else if (!mesh_plink_free_count(sdata) ||
+ sta->mesh->llid != llid ||
+ (sta->mesh->plid && sta->mesh->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 5415e566e09d..b48a09043663 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2829,14 +2829,14 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
+ const struct element *challenge;
u8 *pos;
- struct ieee802_11_elems elems;
u32 tx_flags = 0;
pos = mgmt->u.auth.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
- mgmt->bssid, auth_data->bss->bssid);
- if (!elems.challenge)
+ challenge = cfg80211_find_elem(WLAN_EID_CHALLENGE, pos,
+ len - (pos - (u8 *)mgmt));
+ if (!challenge)
return;
auth_data->expected_transaction = 4;
drv_mgd_prepare_tx(sdata->local, sdata, 0);
@@ -2844,7 +2844,8 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
- elems.challenge - 2, elems.challenge_len + 2,
+ (void *)challenge,
+ challenge->datalen + sizeof(*challenge),
auth_data->bss->bssid, auth_data->bss->bssid,
auth_data->key, auth_data->key_len,
auth_data->key_idx, tx_flags);
@@ -3223,7 +3224,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
pos = mgmt->u.assoc_resp.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
- mgmt->bssid, assoc_data->bss->bssid);
+ mgmt->bssid, NULL);
if (!elems.supp_rates) {
sdata_info(sdata, "no SuppRates element in AssocResp\n");
@@ -3298,6 +3299,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"AP bug: VHT operation missing from AssocResp\n");
}
+ kfree(bss_elems.nontx_profile);
}
/*
@@ -3575,7 +3577,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
pos = mgmt->u.assoc_resp.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
- mgmt->bssid, assoc_data->bss->bssid);
+ mgmt->bssid, NULL);
if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
elems.timeout_int &&
@@ -3882,6 +3884,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->assoc_data->timeout = jiffies;
ifmgd->assoc_data->timeout_started = true;
run_again(sdata, ifmgd->assoc_data->timeout);
+ kfree(elems.nontx_profile);
return;
}
@@ -4049,7 +4052,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_report_disconnect(sdata, deauth_buf,
sizeof(deauth_buf), true,
WLAN_REASON_DEAUTH_LEAVING);
- return;
+ goto free;
}
if (sta && elems.opmode_notif)
@@ -4064,6 +4067,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
elems.cisco_dtpc_elem);
ieee80211_bss_info_change_notify(sdata, changed);
+free:
+ kfree(elems.nontx_profile);
}
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 344b2c22e75b..ee65f1f50a0a 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -216,6 +216,8 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
rx_status, beacon);
}
+ kfree(elems.nontx_profile);
+
return bss;
}
@@ -431,10 +433,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
- if (scan_req != local->int_scan_req) {
- local->scan_info.aborted = aborted;
- cfg80211_scan_done(scan_req, &local->scan_info);
- }
RCU_INIT_POINTER(local->scan_req, NULL);
scan_sdata = rcu_dereference_protected(local->scan_sdata,
@@ -444,6 +442,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
local->scanning = 0;
local->scan_chandef.chan = NULL;
+ synchronize_rcu();
+
+ if (scan_req != local->int_scan_req) {
+ local->scan_info.aborted = aborted;
+ cfg80211_scan_done(scan_req, &local->scan_info);
+ }
+
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7b2e8c890381..e330036e02ea 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -683,6 +683,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
+ ieee80211_check_fast_xmit(sta);
+
return 0;
out_remove:
sta_info_hash_del(local, sta);
@@ -1023,7 +1025,8 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
list_del_rcu(&sta->list);
sta->removed = true;
- drv_sta_pre_rcu_remove(local, sta->sdata, sta);
+ if (sta->uploaded)
+ drv_sta_pre_rcu_remove(local, sta->sdata, sta);
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
rcu_access_pointer(sdata->u.vlan.sta) == sta)
@@ -2121,7 +2124,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
{
- u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
if (rate == STA_STATS_RATE_INVALID)
return -EINVAL;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index d82d22b6a2a9..5fd9a6f752a1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -651,7 +651,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
}
if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
- !ieee80211_is_deauth(hdr->frame_control)))
+ !ieee80211_is_deauth(hdr->frame_control)) &&
+ tx->skb->protocol != tx->sdata->control_port_protocol)
return TX_DROP;
if (!skip_hw && tx->key &&
@@ -2918,7 +2919,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
sdata->vif.type == NL80211_IFTYPE_STATION)
goto out;
- if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
goto out;
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index c1c117fdf318..6223af1c3457 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1006,10 +1006,6 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
} else
elem_parse_failed = true;
break;
- case WLAN_EID_CHALLENGE:
- elems->challenge = pos;
- elems->challenge_len = elen;
- break;
case WLAN_EID_VENDOR_SPECIFIC:
if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
pos[2] == 0xf2) {
@@ -1289,6 +1285,8 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
if (elem->datalen < 2)
continue;
+ if (elem->data[0] < 1 || elem->data[0] > 8)
+ continue;
for_each_element(sub, elem->data + 1, elem->datalen - 1) {
u8 new_bssid[ETH_ALEN];
@@ -1365,6 +1363,11 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
nontransmitted_profile,
nontransmitted_profile_len);
+ if (!nontransmitted_profile_len) {
+ nontransmitted_profile_len = 0;
+ kfree(nontransmitted_profile);
+ nontransmitted_profile = NULL;
+ }
}
crc = _ieee802_11_parse_elems_crc(start, len, action, elems, filter,
@@ -1394,7 +1397,7 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
offsetofend(struct ieee80211_bssid_index, dtim_count))
elems->dtim_count = elems->bssid_index->dtim_count;
- kfree(nontransmitted_profile);
+ elems->nontx_profile = nontransmitted_profile;
return crc;
}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index ace44ff96635..d6e58506136b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -141,12 +141,14 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
+ const struct ethhdr *eth = (void *)skb->data;
struct mac80211_qos_map *qos_map;
bool qos;
/* all mesh/ocb stations are required to support WME */
- if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
- sdata->vif.type == NL80211_IFTYPE_OCB))
+ if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
+ !is_multicast_ether_addr(eth->h_dest)) ||
+ (sdata->vif.type == NL80211_IFTYPE_OCB && sta))
qos = true;
else if (sta)
qos = sta->sta.wme;
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 1cf5ac09edcb..a08240fe68a7 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -661,6 +661,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
sdata->dev = ndev;
sdata->wpan_dev.wpan_phy = local->hw.phy;
sdata->local = local;
+ INIT_LIST_HEAD(&sdata->wpan_dev.list);
/* setup type-dependent data */
ret = ieee802154_setup_sdata(sdata, type);
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index b8ce84618a55..726b47a4611b 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -44,7 +44,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
switch (mac_cb(skb)->dest.mode) {
case IEEE802154_ADDR_NONE:
- if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
+ if (hdr->source.mode != IEEE802154_ADDR_NONE)
/* FIXME: check if we are PAN coordinator */
skb->pkt_type = PACKET_OTHERHOST;
else
@@ -132,7 +132,7 @@ static int
ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
int hlen;
- struct ieee802154_mac_cb *cb = mac_cb_init(skb);
+ struct ieee802154_mac_cb *cb = mac_cb(skb);
skb_reset_mac_header(skb);
@@ -294,8 +294,9 @@ void
ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
{
struct ieee802154_local *local = hw_to_local(hw);
+ struct ieee802154_mac_cb *cb = mac_cb_init(skb);
- mac_cb(skb)->lqi = lqi;
+ cb->lqi = lqi;
skb->pkt_type = IEEE802154_RX_MSG;
skb_queue_tail(&local->skb_queue, skb);
tasklet_schedule(&local->tasklet);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index d5e3656fc67c..3a55a392e021 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1428,6 +1428,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev,
free:
kfree(table);
out:
+ mdev->sysctl = NULL;
return -ENOBUFS;
}
@@ -1437,6 +1438,9 @@ static void mpls_dev_sysctl_unregister(struct net_device *dev,
struct net *net = dev_net(dev);
struct ctl_table *table;
+ if (!mdev->sysctl)
+ return;
+
table = mdev->sysctl->ctl_table_arg;
unregister_net_sysctl_table(mdev->sysctl);
kfree(table);
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index ad3fd7f1da75..1dde6dc841b8 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -81,9 +81,12 @@ enum {
struct ncsi_channel_version {
- u32 version; /* Supported BCD encoded NCSI version */
- u32 alpha2; /* Supported BCD encoded NCSI version */
- u8 fw_name[12]; /* Firware name string */
+ u8 major; /* NCSI version major */
+ u8 minor; /* NCSI version minor */
+ u8 update; /* NCSI version update */
+ char alpha1; /* NCSI version alpha1 */
+ char alpha2; /* NCSI version alpha2 */
+ u8 fw_name[12]; /* Firmware name string */
u32 fw_version; /* Firmware version */
u16 pci_ids[4]; /* PCI identification */
u32 mf_id; /* Manufacture ID */
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index b635c194f0a8..62fb1031763d 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -165,6 +165,7 @@ static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
nc->state = NCSI_CHANNEL_INACTIVE;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
+ nc->modes[NCSI_MODE_TX_ENABLE].enable = 0;
return ncsi_process_next_channel(ndp);
}
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 9bd12f7517ed..6710f6b8764b 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -770,9 +770,6 @@ static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
return -1;
}
- /* Set the flag for GMA command which should only be called once */
- nca->ndp->gma_flag = 1;
-
/* Get Mac address from NCSI device */
return nch->handler(nca);
}
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 27700887c321..feb0b422d193 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -71,8 +71,8 @@ static int ncsi_write_channel_info(struct sk_buff *skb,
if (nc == nc->package->preferred_channel)
nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);
- nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
- nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2);
+ nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.major);
+ nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.minor);
nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name);
vid_nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR_VLAN_LIST);
diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h
index 80938b338fee..3fbea7e74fb1 100644
--- a/net/ncsi/ncsi-pkt.h
+++ b/net/ncsi/ncsi-pkt.h
@@ -191,9 +191,12 @@ struct ncsi_rsp_gls_pkt {
/* Get Version ID */
struct ncsi_rsp_gvi_pkt {
struct ncsi_rsp_pkt_hdr rsp; /* Response header */
- __be32 ncsi_version; /* NCSI version */
+ unsigned char major; /* NCSI version major */
+ unsigned char minor; /* NCSI version minor */
+ unsigned char update; /* NCSI version update */
+ unsigned char alpha1; /* NCSI version alpha1 */
unsigned char reserved[3]; /* Reserved */
- unsigned char alpha2; /* NCSI version */
+ unsigned char alpha2; /* NCSI version alpha2 */
unsigned char fw_name[12]; /* f/w name string */
__be32 fw_version; /* f/w version */
__be16 pci_ids[4]; /* PCI IDs */
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 7c893c379920..876622e9a5b2 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -19,6 +19,19 @@
#include "ncsi-pkt.h"
#include "ncsi-netlink.h"
+/* Nibbles within [0xA, 0xF] add zero "0" to the returned value.
+ * Optional fields (encoded as 0xFF) will default to zero.
+ */
+static u8 decode_bcd_u8(u8 x)
+{
+ int lo = x & 0xF;
+ int hi = x >> 4;
+
+ lo = lo < 0xA ? lo : 0;
+ hi = hi < 0xA ? hi : 0;
+ return lo + hi * 10;
+}
+
static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
unsigned short payload)
{
@@ -627,6 +640,9 @@ static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr)
saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
memcpy(saddr.sa_data, &rsp->data[MLX_MAC_ADDR_OFFSET], ETH_ALEN);
+ /* Set the flag for GMA command which should only be called once */
+ ndp->gma_flag = 1;
+
ret = ops->ndo_set_mac_address(ndev, &saddr);
if (ret < 0)
netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
@@ -671,6 +687,9 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
return -ENXIO;
+ /* Set the flag for GMA command which should only be called once */
+ ndp->gma_flag = 1;
+
ret = ops->ndo_set_mac_address(ndev, &saddr);
if (ret < 0)
netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
@@ -749,9 +768,18 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
if (!nc)
return -ENODEV;
- /* Update to channel's version info */
+ /* Update channel's version info
+ *
+ * Major, minor, and update fields are supposed to be
+ * unsigned integers encoded as packed BCD.
+ *
+ * Alpha1 and alpha2 are ISO/IEC 8859-1 characters.
+ */
ncv = &nc->version;
- ncv->version = ntohl(rsp->ncsi_version);
+ ncv->major = decode_bcd_u8(rsp->major);
+ ncv->minor = decode_bcd_u8(rsp->minor);
+ ncv->update = decode_bcd_u8(rsp->update);
+ ncv->alpha1 = rsp->alpha1;
ncv->alpha2 = rsp->alpha2;
memcpy(ncv->fw_name, rsp->fw_name, 12);
ncv->fw_version = ntohl(rsp->fw_version);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index ef72819d9d31..d569915da003 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES
config NF_CONNTRACK_PROCFS
bool "Supply CT list in procfs (OBSOLETE)"
- default y
depends on PROC_FS
---help---
This option enables for the list of known conntrack entries
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 451b2df998ea..7c38b8fdc7e0 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -290,12 +290,6 @@ nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
return NULL;
return net->nf.hooks_ipv6 + hooknum;
-#if IS_ENABLED(CONFIG_DECNET)
- case NFPROTO_DECNET:
- if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum))
- return NULL;
- return net->nf.hooks_decnet + hooknum;
-#endif
default:
WARN_ON_ONCE(1);
return NULL;
@@ -577,9 +571,11 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
- BUG_ON(ct_hook == NULL);
- ct_hook->destroy(nfct);
+ if (ct_hook)
+ ct_hook->destroy(nfct);
rcu_read_unlock();
+
+ WARN_ON(!ct_hook);
}
EXPORT_SYMBOL(nf_conntrack_destroy);
@@ -625,10 +621,6 @@ static int __net_init netfilter_net_init(struct net *net)
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
#endif
-#if IS_ENABLED(CONFIG_DECNET)
- __netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet));
-#endif
-
#ifdef CONFIG_PROC_FS
net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
net->proc_net);
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index bfd4b42ba305..288675ce22d0 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -28,6 +28,7 @@
#define mtype_del IPSET_TOKEN(MTYPE, _del)
#define mtype_list IPSET_TOKEN(MTYPE, _list)
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
+#define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc)
#define mtype MTYPE
#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
@@ -57,9 +58,6 @@ mtype_destroy(struct ip_set *set)
{
struct mtype *map = set->data;
- if (SET_WITH_TIMEOUT(set))
- del_timer_sync(&map->gc);
-
if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
ip_set_free(map->members);
@@ -288,6 +286,15 @@ mtype_gc(struct timer_list *t)
add_timer(&map->gc);
}
+static void
+mtype_cancel_gc(struct ip_set *set)
+{
+ struct mtype *map = set->data;
+
+ if (SET_WITH_TIMEOUT(set))
+ del_timer_sync(&map->gc);
+}
+
static const struct ip_set_type_variant mtype = {
.kadt = mtype_kadt,
.uadt = mtype_uadt,
@@ -301,6 +308,7 @@ static const struct ip_set_type_variant mtype = {
.head = mtype_head,
.list = mtype_list,
.same_set = mtype_same_set,
+ .cancel_gc = mtype_cancel_gc,
};
#endif /* __IP_SET_BITMAP_IP_GEN_H */
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 6e3cf4d19ce8..194d775ce4cc 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -296,8 +296,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
return -IPSET_ERR_BITMAP_RANGE;
pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
- hosts = 2 << (32 - netmask - 1);
- elements = 2 << (netmask - mask_bits - 1);
+ hosts = 2U << (32 - netmask - 1);
+ elements = 2UL << (netmask - mask_bits - 1);
}
if (elements > IPSET_BITMAP_MAX_RANGE + 1)
return -IPSET_ERR_BITMAP_RANGE_SIZE;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 16ae770f049d..544106475d4f 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -61,6 +61,8 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
ip_set_dereference((inst)->ip_set_list)[id]
#define ip_set_ref_netlink(inst,id) \
rcu_dereference_raw((inst)->ip_set_list)[id]
+#define ip_set_dereference_nfnl(p) \
+ rcu_dereference_check(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
/* The set types are implemented in modules and registered set types
* can be found in ip_set_type_list. Adding/deleting types is
@@ -530,6 +532,14 @@ __ip_set_put(struct ip_set *set)
/* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
* a separate reference counter
*/
+static void
+__ip_set_get_netlink(struct ip_set *set)
+{
+ write_lock_bh(&ip_set_ref_lock);
+ set->ref_netlink++;
+ write_unlock_bh(&ip_set_ref_lock);
+}
+
static inline void
__ip_set_put_netlink(struct ip_set *set)
{
@@ -548,15 +558,10 @@ __ip_set_put_netlink(struct ip_set *set)
static inline struct ip_set *
ip_set_rcu_get(struct net *net, ip_set_id_t index)
{
- struct ip_set *set;
struct ip_set_net *inst = ip_set_pernet(net);
- rcu_read_lock();
- /* ip_set_list itself needs to be protected */
- set = rcu_dereference(inst->ip_set_list)[index];
- rcu_read_unlock();
-
- return set;
+ /* ip_set_list and the set pointer need to be protected */
+ return ip_set_dereference_nfnl(inst->ip_set_list)[index];
}
static inline void
@@ -811,20 +816,9 @@ static struct nlmsghdr *
start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
enum ipset_cmd cmd)
{
- struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
-
- nlh = nlmsg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd),
- sizeof(*nfmsg), flags);
- if (!nlh)
- return NULL;
-
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = NFPROTO_IPV4;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
- return nlh;
+ return nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd), flags,
+ NFPROTO_IPV4, NFNETLINK_V0, 0);
}
/* Create a set */
@@ -1012,6 +1006,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
return ret;
cleanup:
+ set->variant->cancel_gc(set);
set->variant->destroy(set);
put_out:
module_put(set->type->me);
@@ -1040,6 +1035,14 @@ ip_set_destroy_set(struct ip_set *set)
kfree(set);
}
+static void
+ip_set_destroy_set_rcu(struct rcu_head *head)
+{
+ struct ip_set *set = container_of(head, struct ip_set, rcu);
+
+ ip_set_destroy_set(set);
+}
+
static int ip_set_destroy(struct net *net, struct sock *ctnl,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const attr[],
@@ -1053,8 +1056,6 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
- /* Must wait for flush to be really finished in list:set */
- rcu_barrier();
/* Commands are serialized and references are
* protected by the ip_set_ref_lock.
@@ -1066,8 +1067,10 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
* counter, so if it's already zero, we can proceed
* without holding the lock.
*/
- read_lock_bh(&ip_set_ref_lock);
if (!attr[IPSET_ATTR_SETNAME]) {
+ /* Must wait for flush to be really finished in list:set */
+ rcu_barrier();
+ read_lock_bh(&ip_set_ref_lock);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
if (s && (s->ref || s->ref_netlink)) {
@@ -1081,12 +1084,17 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
s = ip_set(inst, i);
if (s) {
ip_set(inst, i) = NULL;
+ /* Must cancel garbage collectors */
+ s->variant->cancel_gc(s);
ip_set_destroy_set(s);
}
}
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
+ u16 features = 0;
+
+ read_lock_bh(&ip_set_ref_lock);
s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
&i);
if (!s) {
@@ -1096,10 +1104,16 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
ret = -IPSET_ERR_BUSY;
goto out;
}
+ features = s->type->features;
ip_set(inst, i) = NULL;
read_unlock_bh(&ip_set_ref_lock);
-
- ip_set_destroy_set(s);
+ if (features & IPSET_TYPE_NAME) {
+ /* Must wait for flush to be really finished */
+ rcu_barrier();
+ }
+ /* Must cancel garbage collectors */
+ s->variant->cancel_gc(s);
+ call_rcu(&s->rcu, ip_set_destroy_set_rcu);
}
return 0;
out:
@@ -1539,6 +1553,14 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
do {
+ if (retried) {
+ __ip_set_get_netlink(set);
+ nfnl_unlock(NFNL_SUBSYS_IPSET);
+ cond_resched();
+ nfnl_lock(NFNL_SUBSYS_IPSET);
+ __ip_set_put_netlink(set);
+ }
+
ip_set_lock(set);
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
ip_set_unlock(set);
@@ -2215,6 +2237,7 @@ ip_set_net_exit(struct net *net)
set = ip_set(inst, i);
if (set) {
ip_set(inst, i) = NULL;
+ set->variant->cancel_gc(set);
ip_set_destroy_set(set);
}
}
@@ -2262,8 +2285,11 @@ ip_set_fini(void)
{
nf_unregister_sockopt(&so_set);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-
unregister_pernet_subsys(&ip_set_net_ops);
+
+ /* Wait for call_rcu() in destroy */
+ rcu_barrier();
+
pr_debug("these are the famous last words\n");
}
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 1b44dfa7ba85..4346cae25a4a 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -235,6 +235,7 @@ htable_size(u8 hbits)
#undef mtype_gc_do
#undef mtype_gc
#undef mtype_gc_init
+#undef mtype_cancel_gc
#undef mtype_variant
#undef mtype_data_match
@@ -279,6 +280,7 @@ htable_size(u8 hbits)
#define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
#define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc)
#define mtype_variant IPSET_TOKEN(MTYPE, _variant)
#define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
@@ -444,7 +446,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
u32 i;
for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = __ipset_dereference(hbucket(t, i));
+ n = (__force struct hbucket *)hbucket(t, i);
if (!n)
continue;
if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
@@ -464,10 +466,7 @@ mtype_destroy(struct ip_set *set)
struct htype *h = set->data;
struct list_head *l, *lt;
- if (SET_WITH_TIMEOUT(set))
- cancel_delayed_work_sync(&h->gc.dwork);
-
- mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
+ mtype_ahash_destroy(set, (__force struct htable *)h->table, true);
list_for_each_safe(l, lt, &h->ad) {
list_del(l);
kfree(l);
@@ -613,6 +612,15 @@ mtype_gc_init(struct htable_gc *gc)
queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
}
+static void
+mtype_cancel_gc(struct ip_set *set)
+{
+ struct htype *h = set->data;
+
+ if (SET_WITH_TIMEOUT(set))
+ cancel_delayed_work_sync(&h->gc.dwork);
+}
+
static int
mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags);
@@ -1433,6 +1441,7 @@ static const struct ip_set_type_variant mtype_variant = {
.uref = mtype_uref,
.resize = mtype_resize,
.same_set = mtype_same_set,
+ .cancel_gc = mtype_cancel_gc,
.region_lock = true,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index a82b70e8b9a6..aed319815358 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -35,6 +35,7 @@ MODULE_ALIAS("ip_set_hash:net,port,net");
#define IP_SET_HASH_WITH_PROTO
#define IP_SET_HASH_WITH_NETS
#define IPSET_NET_COUNT 2
+#define IP_SET_HASH_WITH_NET0
/* IPv4 variant */
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 63908123f7ba..64cc3e2131f3 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -426,9 +426,6 @@ list_set_destroy(struct ip_set *set)
struct list_set *map = set->data;
struct set_elem *e, *n;
- if (SET_WITH_TIMEOUT(set))
- del_timer_sync(&map->gc);
-
list_for_each_entry_safe(e, n, &map->members, list) {
list_del(&e->list);
ip_set_put_byindex(map->net, e->id);
@@ -545,6 +542,15 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
a->extensions == b->extensions;
}
+static void
+list_set_cancel_gc(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+
+ if (SET_WITH_TIMEOUT(set))
+ del_timer_sync(&map->gc);
+}
+
static const struct ip_set_type_variant set_variant = {
.kadt = list_set_kadt,
.uadt = list_set_uadt,
@@ -558,6 +564,7 @@ static const struct ip_set_type_variant set_variant = {
.head = list_set_head,
.list = list_set_list,
.same_set = list_set_same_set,
+ .cancel_gc = list_set_cancel_gc,
};
static void
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index f9b16f2b2219..fdacbc3c15be 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -599,13 +599,19 @@ static const struct seq_operations ip_vs_app_seq_ops = {
int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
{
INIT_LIST_HEAD(&ipvs->app_list);
- proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_seq_ops,
- sizeof(struct seq_net_private));
+#ifdef CONFIG_PROC_FS
+ if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net,
+ &ip_vs_app_seq_ops,
+ sizeof(struct seq_net_private)))
+ return -ENOMEM;
+#endif
return 0;
}
void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
{
unregister_ip_vs_app(ipvs, NULL /* all */);
+#ifdef CONFIG_PROC_FS
remove_proc_entry("ip_vs_app", ipvs->net->proc_net);
+#endif
}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a189079a6ea5..d66548d2e5de 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1225,8 +1225,8 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
* The drop rate array needs tuning for real environments.
* Called from timer bh only => no locking
*/
- static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
- static char todrop_counter[9] = {0};
+ static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
+ static signed char todrop_counter[9] = {0};
int i;
/* if the conn entry hasn't lasted for 60 seconds, don't drop it.
@@ -1373,20 +1373,36 @@ int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
{
atomic_set(&ipvs->conn_count, 0);
- proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
- &ip_vs_conn_seq_ops, sizeof(struct ip_vs_iter_state));
- proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
- &ip_vs_conn_sync_seq_ops,
- sizeof(struct ip_vs_iter_state));
+#ifdef CONFIG_PROC_FS
+ if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
+ &ip_vs_conn_seq_ops,
+ sizeof(struct ip_vs_iter_state)))
+ goto err_conn;
+
+ if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
+ &ip_vs_conn_sync_seq_ops,
+ sizeof(struct ip_vs_iter_state)))
+ goto err_conn_sync;
+#endif
+
return 0;
+
+#ifdef CONFIG_PROC_FS
+err_conn_sync:
+ remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
+err_conn:
+ return -ENOMEM;
+#endif
}
void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
{
/* flush all the connection entries first */
ip_vs_conn_flush(ipvs);
+#ifdef CONFIG_PROC_FS
remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
+#endif
}
int __init ip_vs_conn_init(void)
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 07242503d74d..2bc82dabfe3b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1759,6 +1759,7 @@ static int
proc_do_sync_threshold(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
+ struct netns_ipvs *ipvs = table->extra2;
int *valp = table->data;
int val[2];
int rc;
@@ -1768,6 +1769,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
.mode = table->mode,
};
+ mutex_lock(&ipvs->sync_mutex);
memcpy(val, valp, sizeof(val));
rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
if (write) {
@@ -1777,6 +1779,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
else
memcpy(valp, val, sizeof(val));
}
+ mutex_unlock(&ipvs->sync_mutex);
return rc;
}
@@ -4034,6 +4037,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
tbl[idx].data = &ipvs->sysctl_sync_threshold;
+ tbl[idx].extra2 = ipvs;
tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 0c1bc654245c..fb1dc205e3b5 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1444,7 +1444,7 @@ static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
sin.sin_addr.s_addr = addr;
sin.sin_port = 0;
- return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
+ return kernel_bind(sock, (struct sockaddr *)&sin, sizeof(sin));
}
static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
@@ -1510,8 +1510,8 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id,
}
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
- result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
- salen, 0);
+ result = kernel_connect(sock, (struct sockaddr *)&mcast_addr,
+ salen, 0);
if (result < 0) {
pr_err("Error connecting to the multicast addr\n");
goto error;
@@ -1551,7 +1551,7 @@ static int make_receive_sock(struct netns_ipvs *ipvs, int id,
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
sock->sk->sk_bound_dev_if = dev->ifindex;
- result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
+ result = kernel_bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
goto error;
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index cefc39878b1a..5c81772158d8 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -271,7 +271,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED,
ICMPV6_EXC_HOPLIMIT, 0);
- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
return false;
}
@@ -286,7 +286,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
{
if (ip_hdr(skb)->ttl <= 1) {
/* Tell the sender its packet died... */
- __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
+ IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
return false;
}
@@ -1231,6 +1231,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
skb->transport_header = skb->network_header;
skb_set_inner_ipproto(skb, next_protocol);
+ skb_set_inner_mac_header(skb, skb_inner_network_offset(skb));
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
bool check = false;
@@ -1379,6 +1380,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb->transport_header = skb->network_header;
skb_set_inner_ipproto(skb, next_protocol);
+ skb_set_inner_mac_header(skb, skb_inner_network_offset(skb));
if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
bool check = false;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index d9b6f2001d00..fb1f12bb0ff2 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1943,6 +1943,9 @@ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
return 0;
helper = rcu_dereference(help->helper);
+ if (!helper)
+ return 0;
+
if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
return 0;
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 9eca90414bb7..b22801f97bce 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -382,7 +382,7 @@ static int help(struct sk_buff *skb,
int ret;
u32 seq;
int dir = CTINFO2DIR(ctinfo);
- unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff);
+ unsigned int matchlen, matchoff;
struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct);
struct nf_conntrack_expect *exp;
union nf_inet_addr *daddr;
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 573cb4481481..814857ae3b81 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -533,6 +533,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
/* Get fields bitmap */
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
+ if (f->sz > 32)
+ return H323_ERROR_RANGE;
bmp = get_bitmap(bs, f->sz);
if (base)
*(unsigned int *)base = bmp;
@@ -589,6 +591,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
bmp2_len = get_bits(bs, 7) + 1;
if (nf_h323_error_boundary(bs, 0, bmp2_len))
return H323_ERROR_BOUND;
+ if (bmp2_len > 32)
+ return H323_ERROR_RANGE;
bmp2 = get_bitmap(bs, bmp2_len);
bmp |= bmp2 >> f->sz;
if (base)
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 118f415928ae..32cc91f5ba99 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -404,6 +404,9 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
+ if (!nf_ct_helper_hash)
+ return -ENOENT;
+
if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
return -EINVAL;
@@ -587,4 +590,5 @@ void nf_conntrack_helper_fini(void)
{
nf_ct_extend_unregister(&helper_extend);
kvfree(nf_ct_helper_hash);
+ nf_ct_helper_hash = NULL;
}
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index e40988a2f22f..65b5b05fe38d 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -148,15 +148,37 @@ static int help(struct sk_buff *skb, unsigned int protoff,
data = ib_ptr;
data_limit = ib_ptr + skb->len - dataoff;
- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
- while (data < data_limit - (19 + MINMATCHLEN)) {
- if (memcmp(data, "\1DCC ", 5)) {
+ /* Skip any whitespace */
+ while (data < data_limit - 10) {
+ if (*data == ' ' || *data == '\r' || *data == '\n')
+ data++;
+ else
+ break;
+ }
+
+ /* strlen("PRIVMSG x ")=10 */
+ if (data < data_limit - 10) {
+ if (strncasecmp("PRIVMSG ", data, 8))
+ goto out;
+ data += 8;
+ }
+
+ /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
+ * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
+ */
+ while (data < data_limit - (21 + MINMATCHLEN)) {
+ /* Find first " :", the start of message */
+ if (memcmp(data, " :", 2)) {
data++;
continue;
}
+ data += 2;
+
+ /* then check that place only for the DCC command */
+ if (memcmp(data, "\1DCC ", 5))
+ goto out;
data += 5;
- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
+ /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
iph = ip_hdr(skb);
pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
@@ -172,7 +194,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
pr_debug("DCC %s detected\n", dccprotos[i]);
/* we have at least
- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
+ * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
* data left (== 14/13 bytes) */
if (parse_dcc(data, data_limit, &dcc_ip,
&dcc_port, &addr_beg_p, &addr_end_p)) {
@@ -185,8 +207,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
/* dcc_ip can be the internal OR external (NAT'ed) IP */
tuple = &ct->tuplehash[dir].tuple;
- if (tuple->src.u3.ip != dcc_ip &&
- tuple->dst.u3.ip != dcc_ip) {
+ if ((tuple->src.u3.ip != dcc_ip &&
+ ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) ||
+ dcc_port == 0) {
net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
&tuple->src.u3.ip,
&dcc_ip, dcc_port);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index bc6f0c8874f8..45d02185f4b9 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -515,20 +515,15 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
{
const struct nf_conntrack_zone *zone;
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
unsigned int flags = portid ? NLM_F_MULTI : 0, event;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, nf_ct_l3num(ct),
+ NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = nf_ct_l3num(ct);
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
zone = nf_ct_zone(ct);
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
@@ -685,7 +680,6 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
const struct nf_conntrack_zone *zone;
struct net *net;
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
struct nf_conn *ct = item->ct;
struct sk_buff *skb;
@@ -715,15 +709,11 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
goto errout;
type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type);
- nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct),
+ NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = nf_ct_l3num(ct);
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
zone = nf_ct_zone(ct);
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
@@ -1229,9 +1219,6 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
{
- if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
- return 0;
-
return ctnetlink_filter_match(ct, data);
}
@@ -1294,11 +1281,6 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
ct = nf_ct_tuplehash_to_ctrack(h);
- if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
- nf_ct_put(ct);
- return -EBUSY;
- }
-
if (cda[CTA_ID]) {
__be32 id = nla_get_be32(cda[CTA_ID]);
@@ -2086,12 +2068,15 @@ ctnetlink_create_conntrack(struct net *net,
err = nf_conntrack_hash_check_insert(ct);
if (err < 0)
- goto err2;
+ goto err3;
rcu_read_unlock();
return ct;
+err3:
+ if (ct->master)
+ nf_ct_put(ct->master);
err2:
rcu_read_unlock();
err1:
@@ -2205,20 +2190,15 @@ ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
__u16 cpu, const struct ip_conntrack_stat *st)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0, event;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
IPCTNL_MSG_CT_GET_STATS_CPU);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
+ NFNETLINK_V0, htons(cpu));
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_UNSPEC;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(cpu);
-
if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
@@ -2289,20 +2269,15 @@ ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct net *net)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0, event;
unsigned int nr_conntracks = atomic_read(&net->ct.count);
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
+ NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_UNSPEC;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
goto nla_put_failure;
@@ -2709,7 +2684,9 @@ nla_put_failure:
return -1;
}
+#if IS_ENABLED(CONFIG_NF_NAT)
static const union nf_inet_addr any_addr;
+#endif
static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
{
@@ -2806,19 +2783,14 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
int event, const struct nf_conntrack_expect *exp)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags,
+ exp->tuple.src.l3num, NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = exp->tuple.src.l3num;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (ctnetlink_exp_dump_expect(skb, exp) < 0)
goto nla_put_failure;
@@ -2838,7 +2810,6 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
struct nf_conntrack_expect *exp = item->exp;
struct net *net = nf_ct_exp_net(exp);
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
struct sk_buff *skb;
unsigned int type, group;
int flags = 0;
@@ -2861,15 +2832,11 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
goto errout;
type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type);
- nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, item->portid, 0, type, flags,
+ exp->tuple.src.l3num, NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = exp->tuple.src.l3num;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (ctnetlink_exp_dump_expect(skb, exp) < 0)
goto nla_put_failure;
@@ -3209,10 +3176,12 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
return 0;
}
+#if IS_ENABLED(CONFIG_NF_NAT)
static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
[CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
[CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
};
+#endif
static int
ctnetlink_parse_expect_nat(const struct nlattr *attr,
@@ -3437,20 +3406,15 @@ ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
const struct ip_conntrack_stat *st)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0, event;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
IPCTNL_MSG_EXP_GET_STATS_CPU);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
+ NFNETLINK_V0, htons(cpu));
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_UNSPEC;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(cpu);
-
if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index b3f4a334f9d7..67b8dedef293 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -430,9 +430,19 @@ static bool dccp_error(const struct dccp_hdr *dh,
struct sk_buff *skb, unsigned int dataoff,
const struct nf_hook_state *state)
{
+ static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST |
+ 1 << DCCP_PKT_RESPONSE |
+ 1 << DCCP_PKT_CLOSEREQ |
+ 1 << DCCP_PKT_CLOSE |
+ 1 << DCCP_PKT_RESET |
+ 1 << DCCP_PKT_SYNC |
+ 1 << DCCP_PKT_SYNCACK;
unsigned int dccp_len = skb->len - dataoff;
unsigned int cscov;
const char *msg;
+ u8 type;
+
+ BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG);
if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
dh->dccph_doff * 4 > dccp_len) {
@@ -457,10 +467,17 @@ static bool dccp_error(const struct dccp_hdr *dh,
goto out_invalid;
}
- if (dh->dccph_type >= DCCP_PKT_INVALID) {
+ type = dh->dccph_type;
+ if (type >= DCCP_PKT_INVALID) {
msg = "nf_ct_dccp: reserved packet type ";
goto out_invalid;
}
+
+ if (test_bit(type, &require_seq48) && !dh->dccph_x) {
+ msg = "nf_ct_dccp: type lacks 48bit sequence numbers";
+ goto out_invalid;
+ }
+
return false;
out_invalid:
nf_l4proto_log_invalid(skb, state->net, state->pf,
@@ -468,24 +485,53 @@ out_invalid:
return true;
}
+struct nf_conntrack_dccp_buf {
+ struct dccp_hdr dh; /* generic header part */
+ struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */
+ union { /* depends on header type */
+ struct dccp_hdr_ack_bits ack;
+ struct dccp_hdr_request req;
+ struct dccp_hdr_response response;
+ struct dccp_hdr_reset rst;
+ } u;
+};
+
+static struct dccp_hdr *
+dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh,
+ struct nf_conntrack_dccp_buf *buf)
+{
+ unsigned int hdrlen = __dccp_hdr_len(dh);
+
+ if (hdrlen > sizeof(*buf))
+ return NULL;
+
+ return skb_header_pointer(skb, offset, hdrlen, buf);
+}
+
int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- struct dccp_hdr _dh, *dh;
+ struct nf_conntrack_dccp_buf _dh;
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
unsigned int *timeouts;
+ struct dccp_hdr *dh;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh);
if (!dh)
return NF_DROP;
if (dccp_error(dh, skb, dataoff, state))
return -NF_ACCEPT;
+ /* pull again, including possible 48 bit sequences and subtype header */
+ dh = dccp_header_pointer(skb, dataoff, dh, &_dh);
+ if (!dh)
+ return NF_DROP;
+
type = dh->dccph_type;
if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh))
return -NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index 6f9144e1f1c1..ee45dbf1b035 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -128,6 +128,56 @@ static void icmpv6_error_log(const struct sk_buff *skb,
IPPROTO_ICMPV6, "%s", msg);
}
+static noinline_for_stack int
+nf_conntrack_icmpv6_redirect(struct nf_conn *tmpl, struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state)
+{
+ u8 hl = ipv6_hdr(skb)->hop_limit;
+ union nf_inet_addr outer_daddr;
+ union {
+ struct nd_opt_hdr nd_opt;
+ struct rd_msg rd_msg;
+ } tmp;
+ const struct nd_opt_hdr *nd_opt;
+ const struct rd_msg *rd_msg;
+
+ rd_msg = skb_header_pointer(skb, dataoff, sizeof(*rd_msg), &tmp.rd_msg);
+ if (!rd_msg) {
+ icmpv6_error_log(skb, state, "short redirect");
+ return -NF_ACCEPT;
+ }
+
+ if (rd_msg->icmph.icmp6_code != 0)
+ return NF_ACCEPT;
+
+ if (hl != 255 || !(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
+ icmpv6_error_log(skb, state, "invalid saddr or hoplimit for redirect");
+ return -NF_ACCEPT;
+ }
+
+ dataoff += sizeof(*rd_msg);
+
+ /* warning: rd_msg no longer usable after this call */
+ nd_opt = skb_header_pointer(skb, dataoff, sizeof(*nd_opt), &tmp.nd_opt);
+ if (!nd_opt || nd_opt->nd_opt_len == 0) {
+ icmpv6_error_log(skb, state, "redirect without options");
+ return -NF_ACCEPT;
+ }
+
+ /* We could call ndisc_parse_options(), but it would need
+ * skb_linearize() and a bit more work.
+ */
+ if (nd_opt->nd_opt_type != ND_OPT_REDIRECT_HDR)
+ return NF_ACCEPT;
+
+ memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
+ sizeof(outer_daddr.ip6));
+ dataoff += 8;
+ return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
+ IPPROTO_ICMPV6, &outer_daddr);
+}
+
int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
@@ -158,6 +208,9 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
return NF_ACCEPT;
}
+ if (icmp6h->icmp6_type == NDISC_REDIRECT)
+ return nf_conntrack_icmpv6_redirect(tmpl, skb, dataoff, state);
+
/* is not error message ? */
if (icmp6h->icmp6_type >= 128)
return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 7626f3e1c70a..6b2a215b2786 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -27,22 +27,16 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_timeout.h>
-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
- closely. They're more complex. --RR
-
- And so for me for SCTP :D -Kiran */
-
static const char *const sctp_conntrack_names[] = {
- "NONE",
- "CLOSED",
- "COOKIE_WAIT",
- "COOKIE_ECHOED",
- "ESTABLISHED",
- "SHUTDOWN_SENT",
- "SHUTDOWN_RECD",
- "SHUTDOWN_ACK_SENT",
- "HEARTBEAT_SENT",
- "HEARTBEAT_ACKED",
+ [SCTP_CONNTRACK_NONE] = "NONE",
+ [SCTP_CONNTRACK_CLOSED] = "CLOSED",
+ [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT",
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED",
+ [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED",
+ [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT",
+ [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD",
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT",
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT",
};
#define SECS * HZ
@@ -54,12 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
[SCTP_CONNTRACK_CLOSED] = 10 SECS,
[SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
[SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
- [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS,
- [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
- [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
+ [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
+ [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
[SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
- [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
};
#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
@@ -73,7 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
#define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
#define sHS SCTP_CONNTRACK_HEARTBEAT_SENT
-#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
#define sIV SCTP_CONNTRACK_MAX
/*
@@ -96,9 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
the SHUTDOWN chunk. Connection is closed.
HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow.
-HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK in the direction opposite to
- that of the HEARTBEAT chunk. Secondary connection is
- established.
*/
/* TODO
@@ -115,33 +104,33 @@ cookie echoed to closed.
static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
-/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
-/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
-/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
-/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
-/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
-/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
-/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW},
+/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
+/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
+/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
+/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
+/* cookie_ack */ {sCL, sCL, sCW, sES, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
+/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
},
{
/* REPLY */
-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
-/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
-/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
-/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
-/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
-/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
-/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
-/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
-/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
+/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
+/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
+/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
+/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
+/* cookie_echo */ {sIV, sCL, sCE, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
+/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
+/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
}
};
@@ -310,7 +299,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
pr_debug("Setting vtag %x for secondary conntrack\n",
sh->vtag);
ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
- } else {
+ } else if (sch->type == SCTP_CID_SHUTDOWN_ACK) {
/* If it is a shutdown ack OOTB packet, we expect a return
shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
pr_debug("Setting vtag %x for new conn OOTB\n",
@@ -412,24 +401,34 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
/* Special cases of Verification tag check (Sec 8.5.1) */
if (sch->type == SCTP_CID_INIT) {
- /* Sec 8.5.1 (A) */
+ /* (A) vtag MUST be zero */
if (sh->vtag != 0)
goto out_unlock;
} else if (sch->type == SCTP_CID_ABORT) {
- /* Sec 8.5.1 (B) */
- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
- sh->vtag != ct->proto.sctp.vtag[!dir])
+ /* (B) vtag MUST match own vtag if T flag is unset OR
+ * MUST match peer's vtag if T flag is set
+ */
+ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
+ sh->vtag != ct->proto.sctp.vtag[dir]) ||
+ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
+ sh->vtag != ct->proto.sctp.vtag[!dir]))
goto out_unlock;
} else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
- /* Sec 8.5.1 (C) */
- if (sh->vtag != ct->proto.sctp.vtag[dir] &&
- sh->vtag != ct->proto.sctp.vtag[!dir] &&
- sch->flags & SCTP_CHUNK_FLAG_T)
+ /* (C) vtag MUST match own vtag if T flag is unset OR
+ * MUST match peer's vtag if T flag is set
+ */
+ if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
+ sh->vtag != ct->proto.sctp.vtag[dir]) ||
+ ((sch->flags & SCTP_CHUNK_FLAG_T) &&
+ sh->vtag != ct->proto.sctp.vtag[!dir]))
goto out_unlock;
} else if (sch->type == SCTP_CID_COOKIE_ECHO) {
- /* Sec 8.5.1 (D) */
+ /* (D) vtag must be same as init_vtag as found in INIT_ACK */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
+ } else if (sch->type == SCTP_CID_COOKIE_ACK) {
+ ct->proto.sctp.init[dir] = 0;
+ ct->proto.sctp.init[!dir] = 0;
} else if (sch->type == SCTP_CID_HEARTBEAT) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
@@ -478,16 +477,18 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
}
/* If it is an INIT or an INIT ACK note down the vtag */
- if (sch->type == SCTP_CID_INIT ||
- sch->type == SCTP_CID_INIT_ACK) {
- struct sctp_inithdr _inithdr, *ih;
+ if (sch->type == SCTP_CID_INIT) {
+ struct sctp_inithdr _ih, *ih;
- ih = skb_header_pointer(skb, offset + sizeof(_sch),
- sizeof(_inithdr), &_inithdr);
- if (ih == NULL)
+ ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
+ if (!ih)
goto out_unlock;
- pr_debug("Setting vtag %x for dir %d\n",
- ih->init_tag, !dir);
+
+ if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir])
+ ct->proto.sctp.init[!dir] = 0;
+ ct->proto.sctp.init[dir] = 1;
+
+ pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
/* don't renew timeout on init retransmit so
@@ -498,11 +499,33 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
old_state == SCTP_CONNTRACK_CLOSED &&
nf_ct_is_confirmed(ct))
ignore = true;
+ } else if (sch->type == SCTP_CID_INIT_ACK) {
+ struct sctp_inithdr _ih, *ih;
+ __be32 vtag;
+
+ ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
+ if (!ih)
+ goto out_unlock;
+
+ vtag = ct->proto.sctp.vtag[!dir];
+ if (!ct->proto.sctp.init[!dir] && vtag && vtag != ih->init_tag)
+ goto out_unlock;
+ /* collision */
+ if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir] &&
+ vtag != ih->init_tag)
+ goto out_unlock;
+
+ pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
+ ct->proto.sctp.vtag[!dir] = ih->init_tag;
}
ct->proto.sctp.state = new_state;
- if (old_state != new_state)
+ if (old_state != new_state) {
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
+ if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
+ !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
+ }
}
spin_unlock_bh(&ct->lock);
@@ -516,14 +539,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
- if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
- dir == IP_CT_DIR_REPLY &&
- new_state == SCTP_CONNTRACK_ESTABLISHED) {
- pr_debug("Setting assured bit\n");
- set_bit(IPS_ASSURED_BIT, &ct->status);
- nf_conntrack_event_cache(IPCT_ASSURED, ct);
- }
-
return NF_ACCEPT;
out_unlock:
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index b8cc3339a249..aed967e2f30f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1158,6 +1158,16 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
+
+ if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
+ /* do not renew timeout on SYN retransmit.
+ *
+ * Else port reuse by client or NAT middlebox can keep
+ * entry alive indefinitely (including nat info).
+ */
+ return NF_ACCEPT;
+ }
+
/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
* pickup with loose=1. Avoid large ESTABLISHED timeout.
*/
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index b83dc9bf0a5d..751df19fe0f8 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
return ret;
if (ret == 0)
break;
- dataoff += *matchoff;
+ dataoff = *matchoff;
}
*in_header = 0;
}
@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
break;
if (ret == 0)
return ret;
- dataoff += *matchoff;
+ dataoff = *matchoff;
}
if (in_header)
@@ -611,7 +611,7 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
start += strlen(name);
*val = simple_strtoul(start, &end, 0);
if (start == end)
- return 0;
+ return -1;
if (matchoff && matchlen) {
*matchoff = start - dptr;
*matchlen = end - start;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index a3faeacaa1cb..1e3dbed9d784 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -581,7 +581,6 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
- NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
@@ -851,12 +850,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
- .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
@@ -985,7 +978,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
XASSIGN(SHUTDOWN_RECD, sn);
XASSIGN(SHUTDOWN_ACK_SENT, sn);
XASSIGN(HEARTBEAT_SENT, sn);
- XASSIGN(HEARTBEAT_ACKED, sn);
#undef XASSIGN
#endif
}
@@ -1188,11 +1180,12 @@ static int __init nf_conntrack_standalone_init(void)
nf_conntrack_htable_size_user = nf_conntrack_htable_size;
#endif
+ nf_conntrack_init_end();
+
ret = register_pernet_subsys(&nf_conntrack_net_ops);
if (ret < 0)
goto out_pernet;
- nf_conntrack_init_end();
return 0;
out_pernet:
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index bb25d4c794c7..e25dbeb220b4 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -203,11 +203,12 @@ void nf_logger_put(int pf, enum nf_log_type type)
return;
}
- BUG_ON(loggers[pf][type] == NULL);
-
rcu_read_lock();
logger = rcu_dereference(loggers[pf][type]);
- module_put(logger->me);
+ if (!logger)
+ WARN_ON_ONCE(1);
+ else
+ module_put(logger->me);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_logger_put);
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index f91579c821e9..5b37487d9d11 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -10,6 +10,7 @@
#include <linux/if.h>
#include <linux/inetdevice.h>
+#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -24,81 +25,104 @@
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_redirect.h>
+static unsigned int
+nf_nat_redirect(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ const union nf_inet_addr *newdst)
+{
+ struct nf_nat_range2 newrange;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+
+ memset(&newrange, 0, sizeof(newrange));
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr = *newdst;
+ newrange.max_addr = *newdst;
+ newrange.min_proto = range->min_proto;
+ newrange.max_proto = range->max_proto;
+
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+
unsigned int
-nf_nat_redirect_ipv4(struct sk_buff *skb,
- const struct nf_nat_ipv4_multi_range_compat *mr,
+nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
unsigned int hooknum)
{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- __be32 newdst;
- struct nf_nat_range2 newrange;
+ union nf_inet_addr newdst = {};
WARN_ON(hooknum != NF_INET_PRE_ROUTING &&
hooknum != NF_INET_LOCAL_OUT);
- ct = nf_ct_get(skb, &ctinfo);
- WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)));
-
/* Local packets: make them go to loopback */
if (hooknum == NF_INET_LOCAL_OUT) {
- newdst = htonl(0x7F000001);
+ newdst.ip = htonl(INADDR_LOOPBACK);
} else {
const struct in_device *indev;
- newdst = 0;
-
indev = __in_dev_get_rcu(skb->dev);
if (indev) {
const struct in_ifaddr *ifa;
ifa = rcu_dereference(indev->ifa_list);
if (ifa)
- newdst = ifa->ifa_local;
+ newdst.ip = ifa->ifa_local;
}
- if (!newdst)
+ if (!newdst.ip)
return NF_DROP;
}
- /* Transfer from original range. */
- memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
- memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
- newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
- newrange.min_addr.ip = newdst;
- newrange.max_addr.ip = newdst;
- newrange.min_proto = mr->range[0].min;
- newrange.max_proto = mr->range[0].max;
-
- /* Hand modified range to generic setup. */
- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+ return nf_nat_redirect(skb, range, &newdst);
}
EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
+{
+ unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
+
+ if (ifa_addr_type & IPV6_ADDR_MAPPED)
+ return false;
+
+ if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
+ return false;
+
+ if (scope) {
+ unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
+
+ if (!(scope & ifa_scope))
+ return false;
+ }
+
+ return true;
+}
+
unsigned int
nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
unsigned int hooknum)
{
- struct nf_nat_range2 newrange;
- struct in6_addr newdst;
- enum ip_conntrack_info ctinfo;
- struct nf_conn *ct;
+ union nf_inet_addr newdst = {};
- ct = nf_ct_get(skb, &ctinfo);
if (hooknum == NF_INET_LOCAL_OUT) {
- newdst = loopback_addr;
+ newdst.in6 = loopback_addr;
} else {
+ unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa;
bool addr = false;
idev = __in6_dev_get(skb->dev);
if (idev != NULL) {
+ const struct inet6_ifaddr *ifa;
+
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
- newdst = ifa->addr;
+ if (!nf_nat_redirect_ipv6_usable(ifa, scope))
+ continue;
+
+ newdst.in6 = ifa->addr;
addr = true;
break;
}
@@ -109,12 +133,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
return NF_DROP;
}
- newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
- newrange.min_addr.in6 = newdst;
- newrange.max_addr.in6 = newdst;
- newrange.min_proto = range->min_proto;
- newrange.max_proto = range->max_proto;
-
- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+ return nf_nat_redirect(skb, range, &newdst);
}
EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 58a7d89719b1..2d372d5fcbfa 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -20,15 +20,22 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_offload.h>
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/sock.h>
#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
+#define NFT_SET_MAX_ANONLEN 16
+
+unsigned int nf_tables_net_id __read_mostly;
+EXPORT_SYMBOL_GPL(nf_tables_net_id);
static LIST_HEAD(nf_tables_expressions);
static LIST_HEAD(nf_tables_objects);
static LIST_HEAD(nf_tables_flowtables);
static LIST_HEAD(nf_tables_destroy_list);
+static LIST_HEAD(nf_tables_gc_list);
static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
+static DEFINE_SPINLOCK(nf_tables_gc_list_lock);
static u64 table_handle;
enum {
@@ -67,7 +74,9 @@ static const struct rhashtable_params nft_objname_ht_params = {
static void nft_validate_state_update(struct net *net, u8 new_validate_state)
{
- switch (net->nft.validate_state) {
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+
+ switch (nft_net->validate_state) {
case NFT_VALIDATE_SKIP:
WARN_ON_ONCE(new_validate_state == NFT_VALIDATE_DO);
break;
@@ -78,11 +87,14 @@ static void nft_validate_state_update(struct net *net, u8 new_validate_state)
return;
}
- net->nft.validate_state = new_validate_state;
+ nft_net->validate_state = new_validate_state;
}
static void nf_tables_trans_destroy_work(struct work_struct *w);
static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work);
+static void nft_trans_gc_work(struct work_struct *work);
+static DECLARE_WORK(trans_gc_work, nft_trans_gc_work);
+
static void nft_ctx_init(struct nft_ctx *ctx,
struct net *net,
const struct sk_buff *skb,
@@ -113,6 +125,8 @@ static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
if (trans == NULL)
return NULL;
+ INIT_LIST_HEAD(&trans->list);
+ INIT_LIST_HEAD(&trans->binding_list);
trans->msg_type = msg_type;
trans->ctx = *ctx;
@@ -125,34 +139,67 @@ static struct nft_trans *nft_trans_alloc(const struct nft_ctx *ctx,
return nft_trans_alloc_gfp(ctx, msg_type, size, GFP_KERNEL);
}
-static void nft_trans_destroy(struct nft_trans *trans)
+static void nft_trans_list_del(struct nft_trans *trans)
{
list_del(&trans->list);
+ list_del(&trans->binding_list);
+}
+
+static void nft_trans_destroy(struct nft_trans *trans)
+{
+ nft_trans_list_del(trans);
kfree(trans);
}
-static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
+static void __nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set,
+ bool bind)
{
+ struct nftables_pernet *nft_net;
struct net *net = ctx->net;
struct nft_trans *trans;
if (!nft_set_is_anonymous(set))
return;
- list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
+ nft_net = net_generic(net, nf_tables_net_id);
+ list_for_each_entry_reverse(trans, &nft_net->commit_list, list) {
switch (trans->msg_type) {
case NFT_MSG_NEWSET:
if (nft_trans_set(trans) == set)
- nft_trans_set_bound(trans) = true;
+ nft_trans_set_bound(trans) = bind;
break;
case NFT_MSG_NEWSETELEM:
if (nft_trans_elem_set(trans) == set)
- nft_trans_elem_set_bound(trans) = true;
+ nft_trans_elem_set_bound(trans) = bind;
break;
}
}
}
+static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ return __nft_set_trans_bind(ctx, set, true);
+}
+
+static void nft_set_trans_unbind(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ return __nft_set_trans_bind(ctx, set, false);
+}
+
+static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans)
+{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWSET:
+ if (nft_set_is_anonymous(nft_trans_set(trans)))
+ list_add_tail(&trans->binding_list, &nft_net->binding_list);
+ break;
+ }
+
+ list_add_tail(&trans->list, &nft_net->commit_list);
+}
+
static int nf_tables_register_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
@@ -173,9 +220,10 @@ static int nf_tables_register_hook(struct net *net,
return nf_register_net_hook(net, ops);
}
-static void nf_tables_unregister_hook(struct net *net,
- const struct nft_table *table,
- struct nft_chain *chain)
+static void __nf_tables_unregister_hook(struct net *net,
+ const struct nft_table *table,
+ struct nft_chain *chain,
+ bool release_netdev)
{
const struct nft_base_chain *basechain;
const struct nf_hook_ops *ops;
@@ -190,6 +238,16 @@ static void nf_tables_unregister_hook(struct net *net,
return basechain->type->ops_unregister(net, ops);
nf_unregister_net_hook(net, ops);
+ if (release_netdev &&
+ table->family == NFPROTO_NETDEV)
+ nft_base_chain(chain)->ops.dev = NULL;
+}
+
+static void nf_tables_unregister_hook(struct net *net,
+ const struct nft_table *table,
+ struct nft_chain *chain)
+{
+ __nf_tables_unregister_hook(net, table, chain, false);
}
static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -203,7 +261,7 @@ static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
if (msg_type == NFT_MSG_NEWTABLE)
nft_activate_next(ctx->net, ctx->table);
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
@@ -230,7 +288,7 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
if (msg_type == NFT_MSG_NEWCHAIN)
nft_activate_next(ctx->net, ctx->chain);
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return trans;
}
@@ -242,7 +300,7 @@ static int nft_delchain(struct nft_ctx *ctx)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ctx->table->use--;
+ nft_use_dec(&ctx->table->use);
nft_deactivate_next(ctx->net, ctx->chain);
return 0;
@@ -283,7 +341,7 @@ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
/* You cannot delete the same rule twice */
if (nft_is_active_next(ctx->net, rule)) {
nft_deactivate_next(ctx->net, rule);
- ctx->chain->use--;
+ nft_use_dec(&ctx->chain->use);
return 0;
}
return -ENOENT;
@@ -303,7 +361,7 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
ntohl(nla_get_be32(ctx->nla[NFTA_RULE_ID]));
}
nft_trans_rule(trans) = rule;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return trans;
}
@@ -358,11 +416,32 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
nft_activate_next(ctx->net, set);
}
nft_trans_set(trans) = set;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
+static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem)
+{
+ nft_setelem_data_deactivate(ctx->net, set, elem);
+
+ return 0;
+}
+
+static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ struct nft_set_iter iter = {
+ .genmask = nft_genmask_next(ctx->net),
+ .fn = nft_mapelem_deactivate,
+ };
+
+ set->ops->walk(ctx, set, &iter);
+ WARN_ON_ONCE(iter.err);
+}
+
static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
{
int err;
@@ -371,8 +450,11 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
if (err < 0)
return err;
+ if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ nft_map_deactivate(ctx, set);
+
nft_deactivate_next(ctx->net, set);
- ctx->table->use--;
+ nft_use_dec(&ctx->table->use);
return err;
}
@@ -390,7 +472,7 @@ static int nft_trans_obj_add(struct nft_ctx *ctx, int msg_type,
nft_activate_next(ctx->net, obj);
nft_trans_obj(trans) = obj;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
@@ -404,7 +486,7 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
return err;
nft_deactivate_next(ctx->net, obj);
- ctx->table->use--;
+ nft_use_dec(&ctx->table->use);
return err;
}
@@ -423,7 +505,7 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
nft_activate_next(ctx->net, flowtable);
nft_trans_flowtable(trans) = flowtable;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
}
@@ -438,7 +520,7 @@ static int nft_delflowtable(struct nft_ctx *ctx,
return err;
nft_deactivate_next(ctx->net, flowtable);
- ctx->table->use--;
+ nft_use_dec(&ctx->table->use);
return err;
}
@@ -451,13 +533,15 @@ static struct nft_table *nft_table_lookup(const struct net *net,
const struct nlattr *nla,
u8 family, u8 genmask)
{
+ struct nftables_pernet *nft_net;
struct nft_table *table;
if (nla == NULL)
return ERR_PTR(-EINVAL);
- list_for_each_entry_rcu(table, &net->nft.tables, list,
- lockdep_is_held(&net->nft.commit_mutex)) {
+ nft_net = net_generic(net, nf_tables_net_id);
+ list_for_each_entry_rcu(table, &nft_net->tables, list,
+ lockdep_is_held(&nft_net->commit_mutex)) {
if (!nla_strcmp(nla, table->name) &&
table->family == family &&
nft_active_genmask(table, genmask))
@@ -471,9 +555,11 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
const struct nlattr *nla,
u8 genmask)
{
+ struct nftables_pernet *nft_net;
struct nft_table *table;
- list_for_each_entry(table, &net->nft.tables, list) {
+ nft_net = net_generic(net, nf_tables_net_id);
+ list_for_each_entry(table, &nft_net->tables, list) {
if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
nft_active_genmask(table, genmask))
return table;
@@ -525,6 +611,7 @@ struct nft_module_request {
static int nft_request_module(struct net *net, const char *fmt, ...)
{
char module_name[MODULE_NAME_LEN];
+ struct nftables_pernet *nft_net;
struct nft_module_request *req;
va_list args;
int ret;
@@ -535,7 +622,8 @@ static int nft_request_module(struct net *net, const char *fmt, ...)
if (ret >= MODULE_NAME_LEN)
return 0;
- list_for_each_entry(req, &net->nft.module_list, list) {
+ nft_net = net_generic(net, nf_tables_net_id);
+ list_for_each_entry(req, &nft_net->module_list, list) {
if (!strcmp(req->module, module_name)) {
if (req->done)
return 0;
@@ -551,7 +639,7 @@ static int nft_request_module(struct net *net, const char *fmt, ...)
req->done = false;
strlcpy(req->module, module_name, MODULE_NAME_LEN);
- list_add_tail(&req->list, &net->nft.module_list);
+ list_add_tail(&req->list, &nft_net->module_list);
return -EAGAIN;
}
@@ -587,6 +675,13 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
return ERR_PTR(-ENOENT);
}
+static __be16 nft_base_seq(const struct net *net)
+{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+
+ return htons(nft_net->base_seq & 0xffff);
+}
+
static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
[NFTA_TABLE_NAME] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
@@ -599,20 +694,16 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
int family, const struct nft_table *table)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+ NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
-
if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
- nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
+ nla_put_be32(skb, NFTA_TABLE_FLAGS,
+ htonl(table->flags & NFT_TABLE_F_MASK)) ||
nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
NFTA_TABLE_PAD))
@@ -657,15 +748,17 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ struct nftables_pernet *nft_net;
const struct nft_table *table;
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
rcu_read_lock();
- cb->seq = net->nft.base_seq;
+ nft_net = net_generic(net, nf_tables_net_id);
+ cb->seq = nft_net->base_seq;
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
@@ -769,7 +862,7 @@ static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt)
if (cnt && i++ == cnt)
break;
- nf_unregister_net_hook(net, &nft_base_chain(chain)->ops);
+ nf_tables_unregister_hook(net, table, chain);
}
}
@@ -784,7 +877,7 @@ static int nf_tables_table_enable(struct net *net, struct nft_table *table)
if (!nft_is_base_chain(chain))
continue;
- err = nf_register_net_hook(net, &nft_base_chain(chain)->ops);
+ err = nf_tables_register_hook(net, table, chain);
if (err < 0)
goto err;
@@ -799,14 +892,22 @@ err:
static void nf_tables_table_disable(struct net *net, struct nft_table *table)
{
+ table->flags &= ~NFT_TABLE_F_DORMANT;
nft_table_disable(net, table, 0);
+ table->flags |= NFT_TABLE_F_DORMANT;
}
+#define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1)
+#define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0)
+#define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
+ __NFT_TABLE_F_WAS_AWAKEN)
+
static int nf_tables_updtable(struct nft_ctx *ctx)
{
struct nft_trans *trans;
u32 flags;
- int ret = 0;
+ int ret;
if (!ctx->nla[NFTA_TABLE_FLAGS])
return 0;
@@ -815,9 +916,13 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if (flags & ~NFT_TABLE_F_DORMANT)
return -EINVAL;
- if (flags == ctx->table->flags)
+ if (flags == (ctx->table->flags & NFT_TABLE_F_MASK))
return 0;
+ /* No dormant off/on/off/on games in single transaction */
+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ return -EINVAL;
+
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
sizeof(struct nft_trans_table));
if (trans == NULL)
@@ -825,22 +930,28 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if ((flags & NFT_TABLE_F_DORMANT) &&
!(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
- nft_trans_table_enable(trans) = false;
+ ctx->table->flags |= NFT_TABLE_F_DORMANT;
+ if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE))
+ ctx->table->flags |= __NFT_TABLE_F_WAS_AWAKEN;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
ctx->table->flags & NFT_TABLE_F_DORMANT) {
- ret = nf_tables_table_enable(ctx->net, ctx->table);
- if (ret >= 0) {
- ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
- nft_trans_table_enable(trans) = true;
+ ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+ if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE)) {
+ ret = nf_tables_table_enable(ctx->net, ctx->table);
+ if (ret < 0)
+ goto err_register_hooks;
+
+ ctx->table->flags |= __NFT_TABLE_F_WAS_DORMANT;
}
}
- if (ret < 0)
- goto err;
nft_trans_table_update(trans) = true;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
+
return 0;
-err:
+
+err_register_hooks:
+ ctx->table->flags |= NFT_TABLE_F_DORMANT;
nft_trans_destroy(trans);
return ret;
}
@@ -896,11 +1007,36 @@ static int nft_objname_hash_cmp(struct rhashtable_compare_arg *arg,
return strcmp(obj->key.name, k->name);
}
+static bool nft_supported_family(u8 family)
+{
+ return false
+#ifdef CONFIG_NF_TABLES_INET
+ || family == NFPROTO_INET
+#endif
+#ifdef CONFIG_NF_TABLES_IPV4
+ || family == NFPROTO_IPV4
+#endif
+#ifdef CONFIG_NF_TABLES_ARP
+ || family == NFPROTO_ARP
+#endif
+#ifdef CONFIG_NF_TABLES_NETDEV
+ || family == NFPROTO_NETDEV
+#endif
+#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
+ || family == NFPROTO_BRIDGE
+#endif
+#ifdef CONFIG_NF_TABLES_IPV6
+ || family == NFPROTO_IPV6
+#endif
+ ;
+}
+
static int nf_tables_newtable(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[],
struct netlink_ext_ack *extack)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
@@ -910,7 +1046,10 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
struct nft_ctx ctx;
int err;
- lockdep_assert_held(&net->nft.commit_mutex);
+ if (!nft_supported_family(family))
+ return -EOPNOTSUPP;
+
+ lockdep_assert_held(&nft_net->commit_mutex);
attr = nla[NFTA_TABLE_NAME];
table = nft_table_lookup(net, attr, family, genmask);
if (IS_ERR(table)) {
@@ -960,7 +1099,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
if (err < 0)
goto err_trans;
- list_add_tail_rcu(&table->list, &net->nft.tables);
+ list_add_tail_rcu(&table->list, &nft_net->tables);
return 0;
err_trans:
rhltable_destroy(&table->chains_ht);
@@ -995,8 +1134,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
if (!nft_is_active_next(ctx->net, set))
continue;
- if (nft_set_is_anonymous(set) &&
- !list_empty(&set->bindings))
+ if (nft_set_is_anonymous(set))
continue;
err = nft_delset(ctx, set);
@@ -1040,11 +1178,12 @@ out:
static int nft_flush(struct nft_ctx *ctx, int family)
{
+ struct nftables_pernet *nft_net = net_generic(ctx->net, nf_tables_net_id);
struct nft_table *table, *nt;
const struct nlattr * const *nla = ctx->nla;
int err = 0;
- list_for_each_entry_safe(table, nt, &ctx->net->nft.tables, list) {
+ list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
if (family != AF_UNSPEC && table->family != family)
continue;
@@ -1158,7 +1297,9 @@ nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask)
static bool lockdep_commit_lock_is_held(const struct net *net)
{
#ifdef CONFIG_PROVE_LOCKING
- return lockdep_is_held(&net->nft.commit_mutex);
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+
+ return lockdep_is_held(&nft_net->commit_mutex);
#else
return true;
#endif
@@ -1262,18 +1403,13 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
const struct nft_chain *chain)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+ NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
-
if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
goto nla_put_failure;
if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle),
@@ -1366,11 +1502,13 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ struct nftables_pernet *nft_net;
rcu_read_lock();
- cb->seq = net->nft.base_seq;
+ nft_net = net_generic(net, nf_tables_net_id);
+ cb->seq = nft_net->base_seq;
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
@@ -1556,12 +1694,13 @@ static int nft_chain_parse_hook(struct net *net,
struct nft_chain_hook *hook, u8 family,
bool autoload)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nlattr *ha[NFTA_HOOK_MAX + 1];
const struct nft_chain_type *type;
struct net_device *dev;
int err;
- lockdep_assert_held(&net->nft.commit_mutex);
+ lockdep_assert_held(&nft_net->commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
err = nla_parse_nested_deprecated(ha, NFTA_HOOK_MAX,
@@ -1662,9 +1801,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
struct nft_rule **rules;
int err;
- if (table->use == UINT_MAX)
- return -EOVERFLOW;
-
if (nla[NFTA_CHAIN_HOOK]) {
struct nft_chain_hook hook;
struct nf_hook_ops *ops;
@@ -1741,6 +1877,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (err < 0)
goto err1;
+ if (!nft_use_inc(&table->use)) {
+ err = -EMFILE;
+ goto err_use;
+ }
+
err = rhltable_insert_key(&table->chains_ht, chain->name,
&chain->rhlhead, nft_chain_ht_params);
if (err)
@@ -1758,11 +1899,12 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (nft_is_base_chain(chain))
nft_trans_chain_policy(trans) = policy;
- table->use++;
list_add_tail_rcu(&chain->list, &table->chains);
return 0;
err2:
+ nft_use_dec_restore(&table->use);
+err_use:
nf_tables_unregister_hook(net, table, chain);
err1:
nf_tables_chain_destroy(ctx);
@@ -1846,6 +1988,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
if (nla[NFTA_CHAIN_HANDLE] &&
nla[NFTA_CHAIN_NAME]) {
+ struct nftables_pernet *nft_net = net_generic(ctx->net, nf_tables_net_id);
struct nft_trans *tmp;
char *name;
@@ -1855,7 +1998,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
goto err;
err = -EEXIST;
- list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
+ list_for_each_entry(tmp, &nft_net->commit_list, list) {
if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
tmp->ctx.table == table &&
nft_trans_chain_update(tmp) &&
@@ -1868,7 +2011,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
nft_trans_chain_name(trans) = name;
}
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err:
@@ -1882,6 +2025,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
const struct nlattr * const nla[],
struct netlink_ext_ack *extack)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
@@ -1893,7 +2037,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
u64 handle = 0;
u32 flags = 0;
- lockdep_assert_held(&net->nft.commit_mutex);
+ lockdep_assert_held(&nft_net->commit_mutex);
table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask);
if (IS_ERR(table)) {
@@ -2352,20 +2496,15 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
const struct nft_rule *prule)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
const struct nft_expr *expr, *next;
struct nlattr *list;
u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, type, flags, family, NFNETLINK_V0,
+ nft_base_seq(net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
-
if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
@@ -2486,11 +2625,13 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
unsigned int idx = 0;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ struct nftables_pernet *nft_net;
rcu_read_lock();
- cb->seq = net->nft.base_seq;
+ nft_net = net_generic(net, nf_tables_net_id);
+ cb->seq = nft_net->base_seq;
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
@@ -2707,12 +2848,15 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
err = nft_chain_validate(&ctx, chain);
if (err < 0)
return err;
+
+ cond_resched();
}
return 0;
}
static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ const struct nft_chain *chain,
const struct nlattr *nla);
#define NFT_RULE_MAXEXPRS 128
@@ -2722,6 +2866,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
const struct nlattr * const nla[],
struct netlink_ext_ack *extack)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u8 genmask = nft_genmask_next(net);
struct nft_expr_info *info = NULL;
@@ -2739,7 +2884,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
int err, rem;
u64 handle, pos_handle;
- lockdep_assert_held(&net->nft.commit_mutex);
+ lockdep_assert_held(&nft_net->commit_mutex);
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask);
if (IS_ERR(table)) {
@@ -2775,9 +2920,6 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
return -EINVAL;
handle = nf_tables_alloc_handle(table);
- if (chain->use == UINT_MAX)
- return -EOVERFLOW;
-
if (nla[NFTA_RULE_POSITION]) {
pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
old_rule = __nft_rule_lookup(chain, pos_handle);
@@ -2786,7 +2928,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
return PTR_ERR(old_rule);
}
} else if (nla[NFTA_RULE_POSITION_ID]) {
- old_rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_POSITION_ID]);
+ old_rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_POSITION_ID]);
if (IS_ERR(old_rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]);
return PTR_ERR(old_rule);
@@ -2859,16 +3001,21 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
expr = nft_expr_next(expr);
}
+ if (!nft_use_inc(&chain->use)) {
+ err = -EMFILE;
+ goto err2;
+ }
+
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (trans == NULL) {
err = -ENOMEM;
- goto err2;
+ goto err_destroy_flow_rule;
}
err = nft_delrule(&ctx, old_rule);
if (err < 0) {
nft_trans_destroy(trans);
- goto err2;
+ goto err_destroy_flow_rule;
}
list_add_tail_rcu(&rule->list, &old_rule->list);
@@ -2876,7 +3023,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (!trans) {
err = -ENOMEM;
- goto err2;
+ goto err_destroy_flow_rule;
}
if (nlh->nlmsg_flags & NLM_F_APPEND) {
@@ -2892,9 +3039,8 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
}
}
kvfree(info);
- chain->use++;
- if (net->nft.validate_state == NFT_VALIDATE_DO)
+ if (nft_net->validate_state == NFT_VALIDATE_DO)
return nft_table_validate(net, table);
if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
@@ -2906,8 +3052,12 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
}
return 0;
+
+err_destroy_flow_rule:
+ nft_use_dec_restore(&chain->use);
err2:
- nf_tables_rule_release(&ctx, rule);
+ nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE_ERROR);
+ nf_tables_rule_destroy(&ctx, rule);
err1:
for (i = 0; i < n; i++) {
if (info[i].ops) {
@@ -2921,15 +3071,18 @@ err1:
}
static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ const struct nft_chain *chain,
const struct nlattr *nla)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
u32 id = ntohl(nla_get_be32(nla));
struct nft_trans *trans;
- list_for_each_entry(trans, &net->nft.commit_list, list) {
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
struct nft_rule *rule = nft_trans_rule(trans);
if (trans->msg_type == NFT_MSG_NEWRULE &&
+ trans->ctx.chain == chain &&
id == nft_trans_rule_id(trans))
return rule;
}
@@ -2976,7 +3129,7 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
err = nft_delrule(&ctx, rule);
} else if (nla[NFTA_RULE_ID]) {
- rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_ID]);
+ rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_ID]);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_ID]);
return PTR_ERR(rule);
@@ -3044,12 +3197,13 @@ nft_select_set_ops(const struct nft_ctx *ctx,
const struct nft_set_desc *desc,
enum nft_set_policies policy)
{
+ struct nftables_pernet *nft_net = net_generic(ctx->net, nf_tables_net_id);
const struct nft_set_ops *ops, *bops;
struct nft_set_estimate est, best;
const struct nft_set_type *type;
u32 flags = 0;
- lockdep_assert_held(&ctx->net->nft.commit_mutex);
+ lockdep_assert_held(&nft_net->commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (list_empty(&nf_tables_set_types)) {
@@ -3191,16 +3345,19 @@ static struct nft_set *nft_set_lookup_byhandle(const struct nft_table *table,
}
static struct nft_set *nft_set_lookup_byid(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_trans *trans;
u32 id = ntohl(nla_get_be32(nla));
- list_for_each_entry(trans, &net->nft.commit_list, list) {
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->msg_type == NFT_MSG_NEWSET) {
struct nft_set *set = nft_trans_set(trans);
if (id == nft_trans_set_id(trans) &&
+ set->table == table &&
nft_active_genmask(set, genmask))
return set;
}
@@ -3221,7 +3378,7 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
if (!nla_set_id)
return set;
- set = nft_set_lookup_byid(net, nla_set_id, genmask);
+ set = nft_set_lookup_byid(net, table, nla_set_id, genmask);
}
return set;
}
@@ -3240,6 +3397,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
if (p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
+ if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
+ return -EINVAL;
+
inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (inuse == NULL)
return -ENOMEM;
@@ -3247,7 +3407,7 @@ cont:
list_for_each_entry(i, &ctx->table->sets, list) {
int tmp;
- if (!nft_is_active_next(ctx->net, set))
+ if (!nft_is_active_next(ctx->net, i))
continue;
if (!sscanf(i->name, name, &tmp))
continue;
@@ -3303,23 +3463,17 @@ __be64 nf_jiffies64_to_msecs(u64 input)
static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
const struct nft_set *set, u16 event, u16 flags)
{
- struct nfgenmsg *nfmsg;
struct nlmsghdr *nlh;
struct nlattr *desc;
u32 portid = ctx->portid;
u32 seq = ctx->seq;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
- flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
+ NFNETLINK_V0, nft_base_seq(ctx->net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = ctx->family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff);
-
if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_NAME, set->name))
@@ -3415,14 +3569,16 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
struct net *net = sock_net(skb->sk);
struct nft_ctx *ctx = cb->data, ctx_set;
+ struct nftables_pernet *nft_net;
if (cb->args[1])
return skb->len;
rcu_read_lock();
- cb->seq = net->nft.base_seq;
+ nft_net = net_generic(net, nf_tables_net_id);
+ cb->seq = nft_net->base_seq;
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (ctx->family != NFPROTO_UNSPEC &&
ctx->family != table->family)
continue;
@@ -3715,10 +3871,15 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
if (ops->privsize != NULL)
size = ops->privsize(nla, &desc);
+ if (!nft_use_inc(&table->use)) {
+ err = -EMFILE;
+ goto err1;
+ }
+
set = kvzalloc(sizeof(*set) + size + udlen, GFP_KERNEL);
if (!set) {
err = -ENOMEM;
- goto err1;
+ goto err_alloc;
}
name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL);
@@ -3739,6 +3900,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
}
INIT_LIST_HEAD(&set->bindings);
+ refcount_set(&set->refs, 1);
set->table = table;
write_pnet(&set->net, net);
set->ops = ops;
@@ -3765,29 +3927,38 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
goto err4;
list_add_tail_rcu(&set->list, &table->sets);
- table->use++;
+
return 0;
err4:
- ops->destroy(set);
+ ops->destroy(&ctx, set);
err3:
kfree(set->name);
err2:
kvfree(set);
+err_alloc:
+ nft_use_dec_restore(&table->use);
err1:
module_put(to_set_type(ops)->owner);
return err;
}
-static void nft_set_destroy(struct nft_set *set)
+static void nft_set_put(struct nft_set *set)
+{
+ if (refcount_dec_and_test(&set->refs)) {
+ kfree(set->name);
+ kvfree(set);
+ }
+}
+
+static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
{
if (WARN_ON(set->use > 0))
return;
- set->ops->destroy(set);
+ set->ops->destroy(ctx, set);
module_put(to_set_type(set->ops)->owner);
- kfree(set->name);
- kvfree(set);
+ nft_set_put(set);
}
static int nf_tables_delset(struct net *net, struct sock *nlsk,
@@ -3833,6 +4004,12 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
return nft_delset(&ctx, set);
}
+static int nft_validate_register_store(const struct nft_ctx *ctx,
+ enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type,
+ unsigned int len);
+
static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
@@ -3854,9 +4031,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *i;
struct nft_set_iter iter;
- if (set->use == UINT_MAX)
- return -EOVERFLOW;
-
if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
return -EBUSY;
@@ -3881,10 +4055,12 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
return iter.err;
}
bind:
+ if (!nft_use_inc(&set->use))
+ return -EMFILE;
+
binding->chain = ctx->chain;
list_add_tail_rcu(&binding->list, &set->bindings);
nft_set_trans_bind(ctx, set);
- set->use++;
return 0;
}
@@ -3903,17 +4079,74 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
}
+static void nft_setelem_data_activate(const struct net *net,
+ const struct nft_set *set,
+ struct nft_set_elem *elem);
+
+static int nft_mapelem_activate(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem)
+{
+ nft_setelem_data_activate(ctx->net, set, elem);
+
+ return 0;
+}
+
+static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ struct nft_set_iter iter = {
+ .genmask = nft_genmask_next(ctx->net),
+ .fn = nft_mapelem_activate,
+ };
+
+ set->ops->walk(ctx, set, &iter);
+ WARN_ON_ONCE(iter.err);
+}
+
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ if (nft_set_is_anonymous(set)) {
+ if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ nft_map_activate(ctx, set);
+
+ nft_clear(ctx->net, set);
+ }
+
+ nft_use_inc_restore(&set->use);
+}
+EXPORT_SYMBOL_GPL(nf_tables_activate_set);
+
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase)
{
switch (phase) {
+ case NFT_TRANS_PREPARE_ERROR:
+ nft_set_trans_unbind(ctx, set);
+ if (nft_set_is_anonymous(set))
+ nft_deactivate_next(ctx->net, set);
+ else
+ list_del_rcu(&binding->list);
+
+ nft_use_dec(&set->use);
+ break;
case NFT_TRANS_PREPARE:
- set->use--;
+ if (nft_set_is_anonymous(set)) {
+ if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ nft_map_deactivate(ctx, set);
+
+ nft_deactivate_next(ctx->net, set);
+ }
+ nft_use_dec(&set->use);
return;
case NFT_TRANS_ABORT:
case NFT_TRANS_RELEASE:
- set->use--;
+ if (nft_set_is_anonymous(set) &&
+ set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ nft_map_deactivate(ctx, set);
+
+ nft_use_dec(&set->use);
/* fall through */
default:
nf_tables_unbind_set(ctx, set, binding,
@@ -3925,7 +4158,7 @@ EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
{
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
- nft_set_destroy(set);
+ nft_set_destroy(ctx, set);
}
EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
@@ -4095,8 +4328,12 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
struct nft_set_elem *elem)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
struct nft_set_dump_args *args;
+ if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
+ return 0;
+
args = container_of(iter, struct nft_set_dump_args, iter);
return nf_tables_fill_setelem(args->skb, set, elem);
}
@@ -4110,18 +4347,19 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nft_set_dump_ctx *dump_ctx = cb->data;
struct net *net = sock_net(skb->sk);
+ struct nftables_pernet *nft_net;
struct nft_table *table;
struct nft_set *set;
struct nft_set_dump_args args;
bool set_found = false;
- struct nfgenmsg *nfmsg;
struct nlmsghdr *nlh;
struct nlattr *nest;
u32 portid, seq;
int event;
rcu_read_lock();
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ nft_net = net_generic(net, nf_tables_net_id);
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
dump_ctx->ctx.family != table->family)
continue;
@@ -4147,16 +4385,11 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
portid = NETLINK_CB(cb->skb).portid;
seq = cb->nlh->nlmsg_seq;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
- NLM_F_MULTI);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, NLM_F_MULTI,
+ table->family, NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = table->family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
-
if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
@@ -4213,22 +4446,16 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
- struct nfgenmsg *nfmsg;
struct nlmsghdr *nlh;
struct nlattr *nest;
int err;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
- flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
+ NFNETLINK_V0, nft_base_seq(ctx->net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = ctx->family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff);
-
if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
goto nla_put_failure;
if (nla_put_string(skb, NFTA_SET_NAME, set->name))
@@ -4268,11 +4495,54 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
return 0;
}
+static int nft_setelem_parse_key(struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_data *key, struct nlattr *attr)
+{
+ struct nft_data_desc desc;
+ int err;
+
+ err = nft_data_init(ctx, key, NFT_DATA_VALUE_MAXLEN, &desc, attr);
+ if (err < 0)
+ return err;
+
+ if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
+ nft_data_release(key, desc.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_data_desc *desc,
+ struct nft_data *data,
+ struct nlattr *attr)
+{
+ u32 dtype;
+ int err;
+
+ err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr);
+ if (err < 0)
+ return err;
+
+ if (set->dtype == NFT_DATA_VERDICT)
+ dtype = NFT_DATA_VERDICT;
+ else
+ dtype = NFT_DATA_VALUE;
+
+ if (dtype != desc->type ||
+ set->dlen != desc->len) {
+ nft_data_release(data, desc->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
- struct nft_data_desc desc;
struct nft_set_elem elem;
struct sk_buff *skb;
uint32_t flags = 0;
@@ -4291,17 +4561,11 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (err < 0)
return err;
- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
- nla[NFTA_SET_ELEM_KEY]);
+ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
+ nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
return err;
- err = -EINVAL;
- if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
- nft_data_release(&elem.key.val, desc.type);
- return err;
- }
-
priv = set->ops->get(ctx->net, set, &elem, flags);
if (IS_ERR(priv))
return PTR_ERR(priv);
@@ -4448,6 +4712,7 @@ void *nft_set_elem_init(const struct nft_set *set,
return elem;
}
+/* Drop references and destroy. Called from gc, dynset and abort path. */
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr)
{
@@ -4471,16 +4736,16 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
}
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
- (*nft_set_ext_obj(ext))->use--;
+ nft_use_dec(&(*nft_set_ext_obj(ext))->use);
kfree(elem);
}
EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
-/* Only called from commit path, nft_set_elem_deactivate() already deals with
- * the refcounting from the preparation phase.
+/* Destroy element. References have been already dropped in the preparation
+ * path via nft_setelem_data_deactivate().
*/
-static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
- const struct nft_set *set, void *elem)
+void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set, void *elem)
{
struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
@@ -4488,20 +4753,20 @@ static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
nf_tables_expr_destroy(ctx, nft_set_ext_expr(ext));
kfree(elem);
}
+EXPORT_SYMBOL_GPL(nf_tables_set_elem_destroy);
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, u32 nlmsg_flags)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
u8 genmask = nft_genmask_next(ctx->net);
- struct nft_data_desc d1, d2;
struct nft_set_ext_tmpl tmpl;
struct nft_set_ext *ext, *ext2;
struct nft_set_elem elem;
struct nft_set_binding *binding;
struct nft_object *obj = NULL;
struct nft_userdata *udata;
- struct nft_data data;
+ struct nft_data_desc desc;
enum nft_registers dreg;
struct nft_trans *trans;
u32 flags = 0;
@@ -4535,6 +4800,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return -EINVAL;
}
+ if (set->flags & NFT_SET_OBJECT) {
+ if (!nla[NFTA_SET_ELEM_OBJREF] &&
+ !(flags & NFT_SET_ELEM_INTERVAL_END))
+ return -EINVAL;
+ } else {
+ if (nla[NFTA_SET_ELEM_OBJREF])
+ return -EINVAL;
+ }
+
if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
(nla[NFTA_SET_ELEM_DATA] ||
nla[NFTA_SET_ELEM_OBJREF] ||
@@ -4566,15 +4840,12 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return err;
}
- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &d1,
- nla[NFTA_SET_ELEM_KEY]);
+ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
+ nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
goto err1;
- err = -EINVAL;
- if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
- goto err2;
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, d1.len);
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
if (timeout > 0) {
nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
if (timeout != set->timeout)
@@ -4582,30 +4853,30 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
- if (!(set->flags & NFT_SET_OBJECT)) {
- err = -EINVAL;
- goto err2;
- }
obj = nft_obj_lookup(ctx->net, ctx->table,
nla[NFTA_SET_ELEM_OBJREF],
set->objtype, genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
+ obj = NULL;
+ goto err2;
+ }
+
+ if (!nft_use_inc(&obj->use)) {
+ err = -EMFILE;
+ obj = NULL;
goto err2;
}
+
nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
}
if (nla[NFTA_SET_ELEM_DATA] != NULL) {
- err = nft_data_init(ctx, &data, sizeof(data), &d2,
- nla[NFTA_SET_ELEM_DATA]);
+ err = nft_setelem_parse_data(ctx, set, &desc, &elem.data.val,
+ nla[NFTA_SET_ELEM_DATA]);
if (err < 0)
goto err2;
- err = -EINVAL;
- if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen)
- goto err3;
-
dreg = nft_type_to_reg(set->dtype);
list_for_each_entry(binding, &set->bindings, list) {
struct nft_ctx bind_ctx = {
@@ -4619,19 +4890,19 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
continue;
err = nft_validate_register_store(&bind_ctx, dreg,
- &data,
- d2.type, d2.len);
+ &elem.data.val,
+ desc.type, desc.len);
if (err < 0)
goto err3;
- if (d2.type == NFT_DATA_VERDICT &&
- (data.verdict.code == NFT_GOTO ||
- data.verdict.code == NFT_JUMP))
+ if (desc.type == NFT_DATA_VERDICT &&
+ (elem.data.val.verdict.code == NFT_GOTO ||
+ elem.data.val.verdict.code == NFT_JUMP))
nft_validate_state_update(ctx->net,
NFT_VALIDATE_NEED);
}
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, d2.len);
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
}
/* The full maximum length of userdata can exceed the maximum
@@ -4647,8 +4918,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
err = -ENOMEM;
- elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, data.data,
- timeout, expiration, GFP_KERNEL);
+ elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
+ elem.data.val.data, timeout, expiration,
+ GFP_KERNEL);
if (elem.priv == NULL)
goto err3;
@@ -4660,16 +4932,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
udata->len = ulen - 1;
nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
}
- if (obj) {
+ if (obj)
*nft_set_ext_obj(ext) = obj;
- obj->use++;
- }
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL)
goto err4;
- ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
+ ext->genmask = nft_genmask_cur(ctx->net);
+
err = set->ops->insert(ctx->net, set, &elem, &ext2);
if (err) {
if (err == -EEXIST) {
@@ -4701,7 +4972,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
nft_trans_elem(trans) = elem;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err6:
@@ -4709,14 +4980,15 @@ err6:
err5:
kfree(trans);
err4:
- if (obj)
- obj->use--;
kfree(elem.priv);
err3:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
- nft_data_release(&data, d2.type);
+ nft_data_release(&elem.data.val, desc.type);
err2:
- nft_data_release(&elem.key.val, d1.type);
+ if (obj)
+ nft_use_dec_restore(&obj->use);
+
+ nft_data_release(&elem.key.val, NFT_DATA_VALUE);
err1:
return err;
}
@@ -4726,6 +4998,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
const struct nlattr * const nla[],
struct netlink_ext_ack *extack)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
u8 genmask = nft_genmask_next(net);
const struct nlattr *attr;
struct nft_set *set;
@@ -4745,7 +5018,8 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
if (IS_ERR(set))
return PTR_ERR(set);
- if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+ if (!list_empty(&set->bindings) &&
+ (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
return -EBUSY;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
@@ -4754,7 +5028,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
return err;
}
- if (net->nft.validate_state == NFT_VALIDATE_DO)
+ if (nft_net->validate_state == NFT_VALIDATE_DO)
return nft_table_validate(net, ctx.table);
return 0;
@@ -4773,46 +5047,49 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
*/
void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
{
+ struct nft_chain *chain;
+
if (type == NFT_DATA_VERDICT) {
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
- data->verdict.chain->use++;
+ chain = data->verdict.chain;
+ nft_use_inc_restore(&chain->use);
break;
}
}
}
-static void nft_set_elem_activate(const struct net *net,
- const struct nft_set *set,
- struct nft_set_elem *elem)
+static void nft_setelem_data_activate(const struct net *net,
+ const struct nft_set *set,
+ struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_hold(nft_set_ext_data(ext), set->dtype);
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
- (*nft_set_ext_obj(ext))->use++;
+ nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use);
}
-static void nft_set_elem_deactivate(const struct net *net,
- const struct nft_set *set,
- struct nft_set_elem *elem)
+void nft_setelem_data_deactivate(const struct net *net,
+ const struct nft_set *set,
+ struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_release(nft_set_ext_data(ext), set->dtype);
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
- (*nft_set_ext_obj(ext))->use--;
+ nft_use_dec(&(*nft_set_ext_obj(ext))->use);
}
+EXPORT_SYMBOL_GPL(nft_setelem_data_deactivate);
static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
struct nft_set_ext_tmpl tmpl;
- struct nft_data_desc desc;
struct nft_set_elem elem;
struct nft_set_ext *ext;
struct nft_trans *trans;
@@ -4823,11 +5100,10 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
err = nla_parse_nested_deprecated(nla, NFTA_SET_ELEM_MAX, attr,
nft_set_elem_policy, NULL);
if (err < 0)
- goto err1;
+ return err;
- err = -EINVAL;
if (nla[NFTA_SET_ELEM_KEY] == NULL)
- goto err1;
+ return -EINVAL;
nft_set_ext_prepare(&tmpl);
@@ -4837,54 +5113,47 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
if (flags != 0)
nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
- err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
- nla[NFTA_SET_ELEM_KEY]);
+ err = nft_setelem_parse_key(ctx, set, &elem.key.val,
+ nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
- goto err1;
-
- err = -EINVAL;
- if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
- goto err2;
+ return err;
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, desc.len);
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
err = -ENOMEM;
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, NULL, 0,
0, GFP_KERNEL);
if (elem.priv == NULL)
- goto err2;
+ goto fail_elem;
ext = nft_set_elem_ext(set, elem.priv);
if (flags)
*nft_set_ext_flags(ext) = flags;
trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
- if (trans == NULL) {
- err = -ENOMEM;
- goto err3;
- }
+ if (trans == NULL)
+ goto fail_trans;
priv = set->ops->deactivate(ctx->net, set, &elem);
if (priv == NULL) {
err = -ENOENT;
- goto err4;
+ goto fail_ops;
}
kfree(elem.priv);
elem.priv = priv;
- nft_set_elem_deactivate(ctx->net, set, &elem);
+ nft_setelem_data_deactivate(ctx->net, set, &elem);
nft_trans_elem(trans) = elem;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
-err4:
+fail_ops:
kfree(trans);
-err3:
+fail_trans:
kfree(elem.priv);
-err2:
- nft_data_release(&elem.key.val, desc.type);
-err1:
+fail_elem:
+ nft_data_release(&elem.key.val, NFT_DATA_VALUE);
return err;
}
@@ -4907,10 +5176,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
}
set->ndeact++;
- nft_set_elem_deactivate(ctx->net, set, elem);
+ nft_setelem_data_deactivate(ctx->net, set, elem);
nft_trans_elem_set(trans) = set;
nft_trans_elem(trans) = *elem;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err1:
@@ -4937,7 +5206,11 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
if (IS_ERR(set))
return PTR_ERR(set);
- if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+
+ if (nft_set_is_anonymous(set))
+ return -EOPNOTSUPP;
+
+ if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT))
return -EBUSY;
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) {
@@ -4960,31 +5233,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
return err;
}
-void nft_set_gc_batch_release(struct rcu_head *rcu)
-{
- struct nft_set_gc_batch *gcb;
- unsigned int i;
-
- gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
- for (i = 0; i < gcb->head.cnt; i++)
- nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true);
- kfree(gcb);
-}
-EXPORT_SYMBOL_GPL(nft_set_gc_batch_release);
-
-struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
- gfp_t gfp)
-{
- struct nft_set_gc_batch *gcb;
-
- gcb = kzalloc(sizeof(*gcb), gfp);
- if (gcb == NULL)
- return gcb;
- gcb->head.set = set;
- return gcb;
-}
-EXPORT_SYMBOL_GPL(nft_set_gc_batch_alloc);
-
/*
* Stateful objects
*/
@@ -5207,7 +5455,7 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
nft_trans_obj(trans) = obj;
nft_trans_obj_update(trans) = true;
nft_trans_obj_newobj(trans) = newobj;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
@@ -5268,9 +5516,14 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
+ if (!nft_use_inc(&table->use))
+ return -EMFILE;
+
type = nft_obj_type_get(net, objtype);
- if (IS_ERR(type))
- return PTR_ERR(type);
+ if (IS_ERR(type)) {
+ err = PTR_ERR(type);
+ goto err_type;
+ }
obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
if (IS_ERR(obj)) {
@@ -5296,7 +5549,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
goto err4;
list_add_tail_rcu(&obj->list, &table->objects);
- table->use++;
+
return 0;
err4:
/* queued in transaction log */
@@ -5310,6 +5563,9 @@ err2:
kfree(obj);
err1:
module_put(type->owner);
+err_type:
+ nft_use_dec_restore(&table->use);
+
return err;
}
@@ -5318,19 +5574,14 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
int family, const struct nft_table *table,
struct nft_object *obj, bool reset)
{
- struct nfgenmsg *nfmsg;
struct nlmsghdr *nlh;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+ NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
-
if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
@@ -5361,6 +5612,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
struct nft_obj_filter *filter = cb->data;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ struct nftables_pernet *nft_net;
struct nft_object *obj;
bool reset = false;
@@ -5368,9 +5620,10 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
reset = true;
rcu_read_lock();
- cb->seq = net->nft.base_seq;
+ nft_net = net_generic(net, nf_tables_net_id);
+ cb->seq = nft_net->base_seq;
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
@@ -5653,10 +5906,11 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
enum nft_trans_phase phase)
{
switch (phase) {
+ case NFT_TRANS_PREPARE_ERROR:
case NFT_TRANS_PREPARE:
case NFT_TRANS_ABORT:
case NFT_TRANS_RELEASE:
- flowtable->use--;
+ nft_use_dec(&flowtable->use);
/* fall through */
default:
return;
@@ -5803,8 +6057,9 @@ nft_flowtable_type_get(struct net *net, u8 family)
return ERR_PTR(-ENOENT);
}
-static void nft_unregister_flowtable_net_hooks(struct net *net,
- struct nft_flowtable *flowtable)
+static void __nft_unregister_flowtable_net_hooks(struct net *net,
+ struct nft_flowtable *flowtable,
+ bool release_netdev)
{
int i;
@@ -5813,9 +6068,17 @@ static void nft_unregister_flowtable_net_hooks(struct net *net,
continue;
nf_unregister_net_hook(net, &flowtable->ops[i]);
+ if (release_netdev)
+ flowtable->ops[i].dev = NULL;
}
}
+static void nft_unregister_flowtable_net_hooks(struct net *net,
+ struct nft_flowtable *flowtable)
+{
+ __nft_unregister_flowtable_net_hooks(net, flowtable, false);
+}
+
static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
struct sk_buff *skb,
const struct nlmsghdr *nlh,
@@ -5862,9 +6125,14 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
+ if (!nft_use_inc(&table->use))
+ return -EMFILE;
+
flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL);
- if (!flowtable)
- return -ENOMEM;
+ if (!flowtable) {
+ err = -ENOMEM;
+ goto flowtable_alloc;
+ }
flowtable->table = table;
flowtable->handle = nf_tables_alloc_handle(table);
@@ -5896,6 +6164,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
continue;
list_for_each_entry(ft, &table->flowtables, list) {
+ if (!nft_is_active_next(net, ft))
+ continue;
+
for (k = 0; k < ft->ops_len; k++) {
if (!ft->ops[k].dev)
continue;
@@ -5918,7 +6189,6 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err6;
list_add_tail_rcu(&flowtable->list, &table->flowtables);
- table->use++;
return 0;
err6:
@@ -5936,6 +6206,9 @@ err2:
kfree(flowtable->name);
err1:
kfree(flowtable);
+flowtable_alloc:
+ nft_use_dec_restore(&table->use);
+
return err;
}
@@ -5993,20 +6266,15 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
struct nft_flowtable *flowtable)
{
struct nlattr *nest, *nest_devs;
- struct nfgenmsg *nfmsg;
struct nlmsghdr *nlh;
int i;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+ NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
-
if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) ||
nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
@@ -6055,13 +6323,15 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
unsigned int idx = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ struct nftables_pernet *nft_net;
struct nft_flowtable *flowtable;
const struct nft_table *table;
rcu_read_lock();
- cb->seq = net->nft.base_seq;
+ nft_net = net_generic(net, nf_tables_net_id);
+ cb->seq = nft_net->base_seq;
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
@@ -6231,21 +6501,17 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
char buf[TASK_COMM_LEN];
int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, 0, AF_UNSPEC,
+ NFNETLINK_V0, nft_base_seq(net));
+ if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_UNSPEC;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
-
- if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq)) ||
+ if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_net->base_seq)) ||
nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) ||
nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current)))
goto nla_put_failure;
@@ -6278,6 +6544,7 @@ static int nf_tables_flowtable_event(struct notifier_block *this,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nft_flowtable *flowtable;
+ struct nftables_pernet *nft_net;
struct nft_table *table;
struct net *net;
@@ -6285,13 +6552,14 @@ static int nf_tables_flowtable_event(struct notifier_block *this,
return 0;
net = dev_net(dev);
- mutex_lock(&net->nft.commit_mutex);
- list_for_each_entry(table, &net->nft.tables, list) {
+ nft_net = net_generic(net, nf_tables_net_id);
+ mutex_lock(&nft_net->commit_mutex);
+ list_for_each_entry(table, &nft_net->tables, list) {
list_for_each_entry(flowtable, &table->flowtables, list) {
nft_flowtable_event(event, dev, flowtable);
}
}
- mutex_unlock(&net->nft.commit_mutex);
+ mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
@@ -6472,19 +6740,22 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
static int nf_tables_validate(struct net *net)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_table *table;
- switch (net->nft.validate_state) {
+ switch (nft_net->validate_state) {
case NFT_VALIDATE_SKIP:
break;
case NFT_VALIDATE_NEED:
nft_validate_state_update(net, NFT_VALIDATE_DO);
/* fall through */
case NFT_VALIDATE_DO:
- list_for_each_entry(table, &net->nft.tables, list) {
+ list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_validate(net, table) < 0)
return -EAGAIN;
}
+
+ nft_validate_state_update(net, NFT_VALIDATE_SKIP);
break;
}
@@ -6570,13 +6841,10 @@ static void nft_commit_release(struct nft_trans *trans)
nf_tables_chain_destroy(&trans->ctx);
break;
case NFT_MSG_DELRULE:
- if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
- nft_flow_rule_destroy(nft_trans_flow_rule(trans));
-
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_DELSET:
- nft_set_destroy(nft_trans_set(trans));
+ nft_set_destroy(&trans->ctx, nft_trans_set(trans));
break;
case NFT_MSG_DELSETELEM:
nf_tables_set_elem_destroy(&trans->ctx,
@@ -6612,7 +6880,7 @@ static void nf_tables_trans_destroy_work(struct work_struct *w)
synchronize_rcu();
list_for_each_entry_safe(trans, next, &head, list) {
- list_del(&trans->list);
+ nft_trans_list_del(trans);
nft_commit_release(trans);
}
}
@@ -6656,9 +6924,10 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
static void nf_tables_commit_chain_prepare_cancel(struct net *net)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_trans *trans, *next;
- list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+ list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
struct nft_chain *chain = trans->ctx.chain;
if (trans->msg_type == NFT_MSG_NEWRULE ||
@@ -6754,12 +7023,204 @@ static void nft_chain_del(struct nft_chain *chain)
list_del_rcu(&chain->list);
}
+static void nft_trans_gc_setelem_remove(struct nft_ctx *ctx,
+ struct nft_trans_gc *trans)
+{
+ void **priv = trans->priv;
+ unsigned int i;
+
+ for (i = 0; i < trans->count; i++) {
+ struct nft_set_elem elem = {
+ .priv = priv[i],
+ };
+
+ nft_setelem_data_deactivate(ctx->net, trans->set, &elem);
+ trans->set->ops->remove(trans->net, trans->set, &elem);
+ }
+}
+
+void nft_trans_gc_destroy(struct nft_trans_gc *trans)
+{
+ nft_set_put(trans->set);
+ put_net(trans->net);
+ kfree(trans);
+}
+EXPORT_SYMBOL_GPL(nft_trans_gc_destroy);
+
+static void nft_trans_gc_trans_free(struct rcu_head *rcu)
+{
+ struct nft_set_elem elem = {};
+ struct nft_trans_gc *trans;
+ struct nft_ctx ctx = {};
+ unsigned int i;
+
+ trans = container_of(rcu, struct nft_trans_gc, rcu);
+ ctx.net = read_pnet(&trans->set->net);
+
+ for (i = 0; i < trans->count; i++) {
+ elem.priv = trans->priv[i];
+ atomic_dec(&trans->set->nelems);
+
+ nf_tables_set_elem_destroy(&ctx, trans->set, elem.priv);
+ }
+
+ nft_trans_gc_destroy(trans);
+}
+
+static bool nft_trans_gc_work_done(struct nft_trans_gc *trans)
+{
+ struct nftables_pernet *nft_net;
+ struct nft_ctx ctx = {};
+
+ nft_net = net_generic(trans->net, nf_tables_net_id);
+
+ mutex_lock(&nft_net->commit_mutex);
+
+ /* Check for race with transaction, otherwise this batch refers to
+ * stale objects that might not be there anymore. Skip transaction if
+ * set has been destroyed from control plane transaction in case gc
+ * worker loses race.
+ */
+ if (READ_ONCE(nft_net->gc_seq) != trans->seq || trans->set->dead) {
+ mutex_unlock(&nft_net->commit_mutex);
+ return false;
+ }
+
+ ctx.net = trans->net;
+ ctx.table = trans->set->table;
+
+ nft_trans_gc_setelem_remove(&ctx, trans);
+ mutex_unlock(&nft_net->commit_mutex);
+
+ return true;
+}
+
+static void nft_trans_gc_work(struct work_struct *work)
+{
+ struct nft_trans_gc *trans, *next;
+ LIST_HEAD(trans_gc_list);
+
+ spin_lock(&nf_tables_gc_list_lock);
+ list_splice_init(&nf_tables_gc_list, &trans_gc_list);
+ spin_unlock(&nf_tables_gc_list_lock);
+
+ list_for_each_entry_safe(trans, next, &trans_gc_list, list) {
+ list_del(&trans->list);
+ if (!nft_trans_gc_work_done(trans)) {
+ nft_trans_gc_destroy(trans);
+ continue;
+ }
+ call_rcu(&trans->rcu, nft_trans_gc_trans_free);
+ }
+}
+
+struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
+ unsigned int gc_seq, gfp_t gfp)
+{
+ struct net *net = read_pnet(&set->net);
+ struct nft_trans_gc *trans;
+
+ trans = kzalloc(sizeof(*trans), gfp);
+ if (!trans)
+ return NULL;
+
+ trans->net = maybe_get_net(net);
+ if (!trans->net) {
+ kfree(trans);
+ return NULL;
+ }
+
+ refcount_inc(&set->refs);
+ trans->set = set;
+ trans->seq = gc_seq;
+
+ return trans;
+}
+EXPORT_SYMBOL_GPL(nft_trans_gc_alloc);
+
+void nft_trans_gc_elem_add(struct nft_trans_gc *trans, void *priv)
+{
+ trans->priv[trans->count++] = priv;
+}
+EXPORT_SYMBOL_GPL(nft_trans_gc_elem_add);
+
+static void nft_trans_gc_queue_work(struct nft_trans_gc *trans)
+{
+ spin_lock(&nf_tables_gc_list_lock);
+ list_add_tail(&trans->list, &nf_tables_gc_list);
+ spin_unlock(&nf_tables_gc_list_lock);
+
+ schedule_work(&trans_gc_work);
+}
+
+static int nft_trans_gc_space(struct nft_trans_gc *trans)
+{
+ return NFT_TRANS_GC_BATCHCOUNT - trans->count;
+}
+
+struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq, gfp_t gfp)
+{
+ struct nft_set *set;
+
+ if (nft_trans_gc_space(gc))
+ return gc;
+
+ set = gc->set;
+ nft_trans_gc_queue_work(gc);
+
+ return nft_trans_gc_alloc(set, gc_seq, gfp);
+}
+EXPORT_SYMBOL_GPL(nft_trans_gc_queue_async);
+
+void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans)
+{
+ if (trans->count == 0) {
+ nft_trans_gc_destroy(trans);
+ return;
+ }
+
+ nft_trans_gc_queue_work(trans);
+}
+EXPORT_SYMBOL_GPL(nft_trans_gc_queue_async_done);
+
+struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp)
+{
+ struct nft_set *set;
+
+ if (WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net)))
+ return NULL;
+
+ if (nft_trans_gc_space(gc))
+ return gc;
+
+ set = gc->set;
+ call_rcu(&gc->rcu, nft_trans_gc_trans_free);
+
+ return nft_trans_gc_alloc(set, 0, gfp);
+}
+EXPORT_SYMBOL_GPL(nft_trans_gc_queue_sync);
+
+void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
+{
+ WARN_ON_ONCE(!lockdep_commit_lock_is_held(trans->net));
+
+ if (trans->count == 0) {
+ nft_trans_gc_destroy(trans);
+ return;
+ }
+
+ call_rcu(&trans->rcu, nft_trans_gc_trans_free);
+}
+EXPORT_SYMBOL_GPL(nft_trans_gc_queue_sync_done);
+
static void nf_tables_module_autoload_cleanup(struct net *net)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_module_request *req, *next;
- WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
- list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
+ list_for_each_entry_safe(req, next, &nft_net->module_list, list) {
WARN_ON_ONCE(!req->done);
list_del(&req->list);
kfree(req);
@@ -6768,6 +7229,7 @@ static void nf_tables_module_autoload_cleanup(struct net *net)
static void nf_tables_commit_release(struct net *net)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_trans *trans;
/* all side effects have to be made visible.
@@ -6777,41 +7239,71 @@ static void nf_tables_commit_release(struct net *net)
* Memory reclaim happens asynchronously from work queue
* to prevent expensive synchronize_rcu() in commit phase.
*/
- if (list_empty(&net->nft.commit_list)) {
+ if (list_empty(&nft_net->commit_list)) {
nf_tables_module_autoload_cleanup(net);
- mutex_unlock(&net->nft.commit_mutex);
+ mutex_unlock(&nft_net->commit_mutex);
return;
}
- trans = list_last_entry(&net->nft.commit_list,
+ trans = list_last_entry(&nft_net->commit_list,
struct nft_trans, list);
get_net(trans->ctx.net);
WARN_ON_ONCE(trans->put_net);
trans->put_net = true;
spin_lock(&nf_tables_destroy_list_lock);
- list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
+ list_splice_tail_init(&nft_net->commit_list, &nf_tables_destroy_list);
spin_unlock(&nf_tables_destroy_list_lock);
nf_tables_module_autoload_cleanup(net);
schedule_work(&trans_destroy_work);
- mutex_unlock(&net->nft.commit_mutex);
+ mutex_unlock(&nft_net->commit_mutex);
+}
+
+static unsigned int nft_gc_seq_begin(struct nftables_pernet *nft_net)
+{
+ unsigned int gc_seq;
+
+ /* Bump gc counter, it becomes odd, this is the busy mark. */
+ gc_seq = READ_ONCE(nft_net->gc_seq);
+ WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
+
+ return gc_seq;
+}
+
+static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq)
+{
+ WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
}
static int nf_tables_commit(struct net *net, struct sk_buff *skb)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
struct nft_chain *chain;
struct nft_table *table;
+ unsigned int gc_seq;
int err;
- if (list_empty(&net->nft.commit_list)) {
- mutex_unlock(&net->nft.commit_mutex);
+ if (list_empty(&nft_net->commit_list)) {
+ mutex_unlock(&nft_net->commit_mutex);
return 0;
}
+ list_for_each_entry(trans, &nft_net->binding_list, binding_list) {
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWSET:
+ if (nft_set_is_anonymous(nft_trans_set(trans)) &&
+ !nft_trans_set_bound(trans)) {
+ pr_warn_once("nftables ruleset with unbound set\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ }
+
/* 0. Validate ruleset, otherwise roll back for error reporting. */
if (nf_tables_validate(net) < 0)
return -EAGAIN;
@@ -6821,7 +7313,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
return err;
/* 1. Allocate space for next generation rules_gen_X[] */
- list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+ list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
int ret;
if (trans->msg_type == NFT_MSG_NEWRULE ||
@@ -6837,7 +7329,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
}
/* step 2. Make rules_gen_X visible to packet path */
- list_for_each_entry(table, &net->nft.tables, list) {
+ list_for_each_entry(table, &nft_net->tables, list) {
list_for_each_entry(chain, &table->chains, list)
nf_tables_commit_chain(net, chain);
}
@@ -6846,20 +7338,26 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
* Bump generation counter, invalidate any dump in progress.
* Cannot fail after this point.
*/
- while (++net->nft.base_seq == 0);
+ while (++nft_net->base_seq == 0)
+ ;
+
+ gc_seq = nft_gc_seq_begin(nft_net);
/* step 3. Start new generation, rules_gen_X now in use. */
net->nft.gencursor = nft_gencursor_next(net);
- list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+ list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) {
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (!nft_trans_table_enable(trans)) {
- nf_tables_table_disable(net,
- trans->ctx.table);
- trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+ if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ nft_trans_destroy(trans);
+ break;
}
+ if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
+ nf_tables_table_disable(net, trans->ctx.table);
+
+ trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
} else {
nft_clear(net, trans->ctx.table);
}
@@ -6907,6 +7405,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_rule_expr_deactivate(&trans->ctx,
nft_trans_rule(trans),
NFT_TRANS_COMMIT);
+
+ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_NEWSET:
nft_clear(net, nft_trans_set(trans));
@@ -6915,13 +7416,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
*/
if (nft_set_is_anonymous(nft_trans_set(trans)) &&
!list_empty(&nft_trans_set(trans)->bindings))
- trans->ctx.table->use--;
+ nft_use_dec(&trans->ctx.table->use);
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
NFT_MSG_NEWSET, GFP_KERNEL);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSET:
+ nft_trans_set(trans)->dead = 1;
list_del_rcu(&nft_trans_set(trans)->list);
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
NFT_MSG_DELSET, GFP_KERNEL);
@@ -6983,6 +7485,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
}
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+
+ nft_gc_seq_end(nft_net, gc_seq);
nf_tables_commit_release(net);
return 0;
@@ -6990,17 +7494,18 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
static void nf_tables_module_autoload(struct net *net)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_module_request *req, *next;
LIST_HEAD(module_list);
- list_splice_init(&net->nft.module_list, &module_list);
- mutex_unlock(&net->nft.commit_mutex);
+ list_splice_init(&nft_net->module_list, &module_list);
+ mutex_unlock(&nft_net->commit_mutex);
list_for_each_entry_safe(req, next, &module_list, list) {
request_module("%s", req->module);
req->done = true;
}
- mutex_lock(&net->nft.commit_mutex);
- list_splice(&module_list, &net->nft.module_list);
+ mutex_lock(&nft_net->commit_mutex);
+ list_splice(&module_list, &nft_net->module_list);
}
static void nf_tables_abort_release(struct nft_trans *trans)
@@ -7016,7 +7521,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_NEWSET:
- nft_set_destroy(nft_trans_set(trans));
+ nft_set_destroy(&trans->ctx, nft_trans_set(trans));
break;
case NFT_MSG_NEWSETELEM:
nft_set_elem_destroy(nft_trans_elem_set(trans),
@@ -7034,6 +7539,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
@@ -7041,16 +7547,22 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nf_tables_validate(net) < 0)
return -EAGAIN;
- list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list,
+ list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
list) {
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (nft_trans_table_enable(trans)) {
- nf_tables_table_disable(net,
- trans->ctx.table);
+ if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ nft_trans_destroy(trans);
+ break;
+ }
+ if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
+ nf_tables_table_disable(net, trans->ctx.table);
trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+ } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
+ trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
}
+ trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
list_del_rcu(&trans->ctx.table->list);
@@ -7066,7 +7578,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
kfree(nft_trans_chain_name(trans));
nft_trans_destroy(trans);
} else {
- trans->ctx.table->use--;
+ nft_use_dec_restore(&trans->ctx.table->use);
nft_chain_del(trans->ctx.chain);
nf_tables_unregister_hook(trans->ctx.net,
trans->ctx.table,
@@ -7074,34 +7586,37 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
}
break;
case NFT_MSG_DELCHAIN:
- trans->ctx.table->use++;
+ nft_use_inc_restore(&trans->ctx.table->use);
nft_clear(trans->ctx.net, trans->ctx.chain);
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWRULE:
- trans->ctx.chain->use--;
+ nft_use_dec_restore(&trans->ctx.chain->use);
list_del_rcu(&nft_trans_rule(trans)->list);
nft_rule_expr_deactivate(&trans->ctx,
nft_trans_rule(trans),
NFT_TRANS_ABORT);
break;
case NFT_MSG_DELRULE:
- trans->ctx.chain->use++;
+ nft_use_inc_restore(&trans->ctx.chain->use);
nft_clear(trans->ctx.net, nft_trans_rule(trans));
nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSET:
- trans->ctx.table->use--;
+ nft_use_dec_restore(&trans->ctx.table->use);
if (nft_trans_set_bound(trans)) {
nft_trans_destroy(trans);
break;
}
+ nft_trans_set(trans)->dead = 1;
list_del_rcu(&nft_trans_set(trans)->list);
break;
case NFT_MSG_DELSET:
- trans->ctx.table->use++;
+ nft_use_inc_restore(&trans->ctx.table->use);
nft_clear(trans->ctx.net, nft_trans_set(trans));
+ if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ nft_map_activate(&trans->ctx, nft_trans_set(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSETELEM:
@@ -7116,7 +7631,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
case NFT_MSG_DELSETELEM:
te = (struct nft_trans_elem *)trans->data;
- nft_set_elem_activate(net, te->set, &te->elem);
+ nft_setelem_data_activate(net, te->set, &te->elem);
te->set->ops->activate(net, te->set, &te->elem);
te->set->ndeact--;
@@ -7127,23 +7642,23 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
nft_trans_destroy(trans);
} else {
- trans->ctx.table->use--;
+ nft_use_dec_restore(&trans->ctx.table->use);
nft_obj_del(nft_trans_obj(trans));
}
break;
case NFT_MSG_DELOBJ:
- trans->ctx.table->use++;
+ nft_use_inc_restore(&trans->ctx.table->use);
nft_clear(trans->ctx.net, nft_trans_obj(trans));
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWFLOWTABLE:
- trans->ctx.table->use--;
+ nft_use_dec_restore(&trans->ctx.table->use);
list_del_rcu(&nft_trans_flowtable(trans)->list);
nft_unregister_flowtable_net_hooks(net,
nft_trans_flowtable(trans));
break;
case NFT_MSG_DELFLOWTABLE:
- trans->ctx.table->use++;
+ nft_use_inc_restore(&trans->ctx.table->use);
nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
nft_trans_destroy(trans);
break;
@@ -7153,8 +7668,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
synchronize_rcu();
list_for_each_entry_safe_reverse(trans, next,
- &net->nft.commit_list, list) {
- list_del(&trans->list);
+ &nft_net->commit_list, list) {
+ nft_trans_list_del(trans);
nf_tables_abort_release(trans);
}
@@ -7166,30 +7681,32 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
return 0;
}
-static void nf_tables_cleanup(struct net *net)
-{
- nft_validate_state_update(net, NFT_VALIDATE_SKIP);
-}
-
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
enum nfnl_abort_action action)
{
- int ret = __nf_tables_abort(net, action);
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+ unsigned int gc_seq;
+ int ret;
- mutex_unlock(&net->nft.commit_mutex);
+ gc_seq = nft_gc_seq_begin(nft_net);
+ ret = __nf_tables_abort(net, action);
+ nft_gc_seq_end(nft_net, gc_seq);
+
+ mutex_unlock(&nft_net->commit_mutex);
return ret;
}
static bool nf_tables_valid_genid(struct net *net, u32 genid)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
bool genid_ok;
- mutex_lock(&net->nft.commit_mutex);
+ mutex_lock(&nft_net->commit_mutex);
- genid_ok = genid == 0 || net->nft.base_seq == genid;
+ genid_ok = genid == 0 || nft_net->base_seq == genid;
if (!genid_ok)
- mutex_unlock(&net->nft.commit_mutex);
+ mutex_unlock(&nft_net->commit_mutex);
/* else, commit mutex has to be released by commit or abort function */
return genid_ok;
@@ -7202,7 +7719,6 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
.cb = nf_tables_cb,
.commit = nf_tables_commit,
.abort = nf_tables_abort,
- .cleanup = nf_tables_cleanup,
.valid_genid = nf_tables_valid_genid,
.owner = THIS_MODULE,
};
@@ -7364,28 +7880,24 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
}
EXPORT_SYMBOL_GPL(nft_parse_u32_check);
-/**
- * nft_parse_register - parse a register value from a netlink attribute
- *
- * @attr: netlink attribute
- *
- * Parse and translate a register value from a netlink attribute.
- * Registers used to be 128 bit wide, these register numbers will be
- * mapped to the corresponding 32 bit register numbers.
- */
-unsigned int nft_parse_register(const struct nlattr *attr)
+static int nft_parse_register(const struct nlattr *attr, u32 *preg)
{
unsigned int reg;
reg = ntohl(nla_get_be32(attr));
switch (reg) {
case NFT_REG_VERDICT...NFT_REG_4:
- return reg * NFT_REG_SIZE / NFT_REG32_SIZE;
+ *preg = reg * NFT_REG_SIZE / NFT_REG32_SIZE;
+ break;
+ case NFT_REG32_00...NFT_REG32_15:
+ *preg = reg + NFT_REG_SIZE / NFT_REG32_SIZE - NFT_REG32_00;
+ break;
default:
- return reg + NFT_REG_SIZE / NFT_REG32_SIZE - NFT_REG32_00;
+ return -ERANGE;
}
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(nft_parse_register);
/**
* nft_dump_register - dump a register value to a netlink attribute
@@ -7418,7 +7930,7 @@ EXPORT_SYMBOL_GPL(nft_dump_register);
* Validate that the input register is one of the general purpose
* registers and that the length of the load is within the bounds.
*/
-int nft_validate_register_load(enum nft_registers reg, unsigned int len)
+static int nft_validate_register_load(enum nft_registers reg, unsigned int len)
{
if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
return -EINVAL;
@@ -7429,7 +7941,24 @@ int nft_validate_register_load(enum nft_registers reg, unsigned int len)
return 0;
}
-EXPORT_SYMBOL_GPL(nft_validate_register_load);
+
+int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len)
+{
+ u32 reg;
+ int err;
+
+ err = nft_parse_register(attr, &reg);
+ if (err < 0)
+ return err;
+
+ err = nft_validate_register_load(reg, len);
+ if (err < 0)
+ return err;
+
+ *sreg = reg;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_parse_register_load);
/**
* nft_validate_register_store - validate an expressions' register store
@@ -7445,10 +7974,11 @@ EXPORT_SYMBOL_GPL(nft_validate_register_load);
* A value of NULL for the data means that its runtime gathered
* data.
*/
-int nft_validate_register_store(const struct nft_ctx *ctx,
- enum nft_registers reg,
- const struct nft_data *data,
- enum nft_data_types type, unsigned int len)
+static int nft_validate_register_store(const struct nft_ctx *ctx,
+ enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type,
+ unsigned int len)
{
int err;
@@ -7480,7 +8010,27 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
return 0;
}
}
-EXPORT_SYMBOL_GPL(nft_validate_register_store);
+
+int nft_parse_register_store(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *dreg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len)
+{
+ int err;
+ u32 reg;
+
+ err = nft_parse_register(attr, &reg);
+ if (err < 0)
+ return err;
+
+ err = nft_validate_register_store(ctx, reg, data, type, len);
+ if (err < 0)
+ return err;
+
+ *dreg = reg;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_parse_register_store);
static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
[NFTA_VERDICT_CODE] = { .type = NLA_U32 },
@@ -7503,19 +8053,16 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
if (!tb[NFTA_VERDICT_CODE])
return -EINVAL;
+
+ /* zero padding hole for memcmp */
+ memset(data, 0, sizeof(*data));
data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
switch (data->verdict.code) {
- default:
- switch (data->verdict.code & NF_VERDICT_MASK) {
- case NF_ACCEPT:
- case NF_DROP:
- case NF_QUEUE:
- break;
- default:
- return -EINVAL;
- }
- /* fall through */
+ case NF_ACCEPT:
+ case NF_DROP:
+ case NF_QUEUE:
+ break;
case NFT_CONTINUE:
case NFT_BREAK:
case NFT_RETURN:
@@ -7530,10 +8077,13 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
return PTR_ERR(chain);
if (nft_is_base_chain(chain))
return -EOPNOTSUPP;
+ if (!nft_use_inc(&chain->use))
+ return -EMFILE;
- chain->use++;
data->verdict.chain = chain;
break;
+ default:
+ return -EINVAL;
}
desc->len = sizeof(data->verdict);
@@ -7543,10 +8093,13 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
static void nft_verdict_uninit(const struct nft_data *data)
{
+ struct nft_chain *chain;
+
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
- data->verdict.chain->use--;
+ chain = data->verdict.chain;
+ nft_use_dec(&chain->use);
break;
}
}
@@ -7700,32 +8253,40 @@ int __nft_release_basechain(struct nft_ctx *ctx)
nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
list_del(&rule->list);
- ctx->chain->use--;
+ nft_use_dec(&ctx->chain->use);
nf_tables_rule_release(ctx, rule);
}
nft_chain_del(ctx->chain);
- ctx->table->use--;
+ nft_use_dec(&ctx->table->use);
nf_tables_chain_destroy(ctx);
return 0;
}
EXPORT_SYMBOL_GPL(__nft_release_basechain);
+static void __nft_release_hook(struct net *net, struct nft_table *table)
+{
+ struct nft_flowtable *flowtable;
+ struct nft_chain *chain;
+
+ list_for_each_entry(chain, &table->chains, list)
+ __nf_tables_unregister_hook(net, table, chain, true);
+ list_for_each_entry(flowtable, &table->flowtables, list)
+ __nft_unregister_flowtable_net_hooks(net, flowtable, true);
+}
+
static void __nft_release_hooks(struct net *net)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_table *table;
- struct nft_chain *chain;
- list_for_each_entry(table, &net->nft.tables, list) {
- list_for_each_entry(chain, &table->chains, list)
- nf_tables_unregister_hook(net, table, chain);
- }
+ list_for_each_entry(table, &nft_net->tables, list)
+ __nft_release_hook(net, table);
}
-static void __nft_release_tables(struct net *net)
+static void __nft_release_table(struct net *net, struct nft_table *table)
{
struct nft_flowtable *flowtable, *nf;
- struct nft_table *table, *nt;
struct nft_chain *chain, *nc;
struct nft_object *obj, *ne;
struct nft_rule *rule, *nr;
@@ -7735,75 +8296,111 @@ static void __nft_release_tables(struct net *net)
.family = NFPROTO_NETDEV,
};
- list_for_each_entry_safe(table, nt, &net->nft.tables, list) {
- ctx.family = table->family;
- ctx.table = table;
- list_for_each_entry(chain, &table->chains, list) {
- ctx.chain = chain;
- list_for_each_entry_safe(rule, nr, &chain->rules, list) {
- list_del(&rule->list);
- chain->use--;
- nf_tables_rule_release(&ctx, rule);
- }
- }
- list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
- list_del(&flowtable->list);
- table->use--;
- nf_tables_flowtable_destroy(flowtable);
- }
- list_for_each_entry_safe(set, ns, &table->sets, list) {
- list_del(&set->list);
- table->use--;
- nft_set_destroy(set);
- }
- list_for_each_entry_safe(obj, ne, &table->objects, list) {
- nft_obj_del(obj);
- table->use--;
- nft_obj_destroy(&ctx, obj);
- }
- list_for_each_entry_safe(chain, nc, &table->chains, list) {
- ctx.chain = chain;
- nft_chain_del(chain);
- table->use--;
- nf_tables_chain_destroy(&ctx);
+ ctx.family = table->family;
+ ctx.table = table;
+ list_for_each_entry(chain, &table->chains, list) {
+ ctx.chain = chain;
+ list_for_each_entry_safe(rule, nr, &chain->rules, list) {
+ list_del(&rule->list);
+ nft_use_dec(&chain->use);
+ nf_tables_rule_release(&ctx, rule);
}
- list_del(&table->list);
- nf_tables_table_destroy(&ctx);
}
+ list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
+ list_del(&flowtable->list);
+ nft_use_dec(&table->use);
+ nf_tables_flowtable_destroy(flowtable);
+ }
+ list_for_each_entry_safe(set, ns, &table->sets, list) {
+ list_del(&set->list);
+ nft_use_dec(&table->use);
+ if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ nft_map_deactivate(&ctx, set);
+
+ nft_set_destroy(&ctx, set);
+ }
+ list_for_each_entry_safe(obj, ne, &table->objects, list) {
+ nft_obj_del(obj);
+ nft_use_dec(&table->use);
+ nft_obj_destroy(&ctx, obj);
+ }
+ list_for_each_entry_safe(chain, nc, &table->chains, list) {
+ ctx.chain = chain;
+ nft_chain_del(chain);
+ nft_use_dec(&table->use);
+ nf_tables_chain_destroy(&ctx);
+ }
+ list_del(&table->list);
+ nf_tables_table_destroy(&ctx);
+}
+
+static void __nft_release_tables(struct net *net)
+{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+ struct nft_table *table, *nt;
+
+ list_for_each_entry_safe(table, nt, &nft_net->tables, list)
+ __nft_release_table(net, table);
}
static int __net_init nf_tables_init_net(struct net *net)
{
- INIT_LIST_HEAD(&net->nft.tables);
- INIT_LIST_HEAD(&net->nft.commit_list);
- INIT_LIST_HEAD(&net->nft.module_list);
- mutex_init(&net->nft.commit_mutex);
- net->nft.base_seq = 1;
- net->nft.validate_state = NFT_VALIDATE_SKIP;
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+
+ INIT_LIST_HEAD(&nft_net->tables);
+ INIT_LIST_HEAD(&nft_net->commit_list);
+ INIT_LIST_HEAD(&nft_net->binding_list);
+ INIT_LIST_HEAD(&nft_net->module_list);
+ INIT_LIST_HEAD(&nft_net->notify_list);
+ mutex_init(&nft_net->commit_mutex);
+ nft_net->base_seq = 1;
+ nft_net->validate_state = NFT_VALIDATE_SKIP;
+ nft_net->gc_seq = 0;
return 0;
}
static void __net_exit nf_tables_pre_exit_net(struct net *net)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+
+ mutex_lock(&nft_net->commit_mutex);
__nft_release_hooks(net);
+ mutex_unlock(&nft_net->commit_mutex);
}
static void __net_exit nf_tables_exit_net(struct net *net)
{
- mutex_lock(&net->nft.commit_mutex);
- if (!list_empty(&net->nft.commit_list))
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
+ unsigned int gc_seq;
+
+ mutex_lock(&nft_net->commit_mutex);
+
+ gc_seq = nft_gc_seq_begin(nft_net);
+
+ if (!list_empty(&nft_net->commit_list))
__nf_tables_abort(net, NFNL_ABORT_NONE);
__nft_release_tables(net);
- mutex_unlock(&net->nft.commit_mutex);
- WARN_ON_ONCE(!list_empty(&net->nft.tables));
- WARN_ON_ONCE(!list_empty(&net->nft.module_list));
+
+ nft_gc_seq_end(nft_net, gc_seq);
+
+ mutex_unlock(&nft_net->commit_mutex);
+ WARN_ON_ONCE(!list_empty(&nft_net->tables));
+ WARN_ON_ONCE(!list_empty(&nft_net->module_list));
+}
+
+static void nf_tables_exit_batch(struct list_head *net_exit_list)
+{
+ flush_work(&trans_gc_work);
}
static struct pernet_operations nf_tables_net_ops = {
.init = nf_tables_init_net,
.pre_exit = nf_tables_pre_exit_net,
.exit = nf_tables_exit_net,
+ .exit_batch = nf_tables_exit_batch,
+ .id = &nf_tables_net_id,
+ .size = sizeof(struct nftables_pernet),
};
static int __init nf_tables_module_init(void)
@@ -7866,6 +8463,7 @@ static void __exit nf_tables_module_exit(void)
nft_chain_filter_fini();
nft_chain_route_fini();
unregister_pernet_subsys(&nf_tables_net_ops);
+ cancel_work_sync(&trans_gc_work);
cancel_work_sync(&trans_destroy_work);
rcu_barrier();
rhltable_destroy(&nft_objname_ht);
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 2d3bc22c855c..1e691eff1c40 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -7,6 +7,8 @@
#include <net/netfilter/nf_tables_offload.h>
#include <net/pkt_cls.h>
+extern unsigned int nf_tables_net_id;
+
static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
{
struct nft_flow_rule *flow;
@@ -345,11 +347,12 @@ static int nft_flow_offload_chain(struct nft_chain *chain,
int nft_flow_rule_offload_commit(struct net *net)
{
+ struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id);
struct nft_trans *trans;
int err = 0;
u8 policy;
- list_for_each_entry(trans, &net->nft.commit_list, list) {
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->ctx.family != NFPROTO_NETDEV)
continue;
@@ -400,7 +403,7 @@ int nft_flow_rule_offload_commit(struct net *net)
break;
}
- list_for_each_entry(trans, &net->nft.commit_list, list) {
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
if (trans->ctx.family != NFPROTO_NETDEV)
continue;
@@ -419,14 +422,14 @@ int nft_flow_rule_offload_commit(struct net *net)
return err;
}
-static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
+static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
+ struct net_device *dev)
{
struct nft_base_chain *basechain;
- struct net *net = dev_net(dev);
const struct nft_table *table;
struct nft_chain *chain;
- list_for_each_entry(table, &net->nft.tables, list) {
+ list_for_each_entry(table, &nft_net->tables, list) {
if (table->family != NFPROTO_NETDEV)
continue;
@@ -450,18 +453,20 @@ static void nft_indr_block_cb(struct net_device *dev,
flow_indr_block_bind_cb_t *cb, void *cb_priv,
enum flow_block_command cmd)
{
+ struct nftables_pernet *nft_net;
struct net *net = dev_net(dev);
struct nft_chain *chain;
- mutex_lock(&net->nft.commit_mutex);
- chain = __nft_offload_get_chain(dev);
+ nft_net = net_generic(net, nf_tables_net_id);
+ mutex_lock(&nft_net->commit_mutex);
+ chain = __nft_offload_get_chain(nft_net, dev);
if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) {
struct nft_base_chain *basechain;
basechain = nft_base_chain(chain);
nft_indr_block_ing_cmd(dev, basechain, cb, cb_priv, cmd);
}
- mutex_unlock(&net->nft.commit_mutex);
+ mutex_unlock(&nft_net->commit_mutex);
}
static void nft_offload_chain_clean(struct nft_chain *chain)
@@ -480,17 +485,19 @@ static int nft_offload_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct nftables_pernet *nft_net;
struct net *net = dev_net(dev);
struct nft_chain *chain;
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- mutex_lock(&net->nft.commit_mutex);
- chain = __nft_offload_get_chain(dev);
+ nft_net = net_generic(net, nf_tables_net_id);
+ mutex_lock(&nft_net->commit_mutex);
+ chain = __nft_offload_get_chain(nft_net, dev);
if (chain)
nft_offload_chain_clean(chain);
- mutex_unlock(&net->nft.commit_mutex);
+ mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index 87b36da5cd98..0cf3278007ba 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -183,7 +183,6 @@ static bool nft_trace_have_verdict_chain(struct nft_traceinfo *info)
void nft_trace_notify(struct nft_traceinfo *info)
{
const struct nft_pktinfo *pkt = info->pkt;
- struct nfgenmsg *nfmsg;
struct nlmsghdr *nlh;
struct sk_buff *skb;
unsigned int size;
@@ -219,15 +218,11 @@ void nft_trace_notify(struct nft_traceinfo *info)
return;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_TRACE);
- nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct nfgenmsg), 0);
+ nlh = nfnl_msg_put(skb, 0, 0, event, 0, info->basechain->type->family,
+ NFNETLINK_V0, 0);
if (!nlh)
goto nla_put_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = info->basechain->type->family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (nla_put_be32(skb, NFTA_TRACE_NFPROTO, htonl(nft_pf(pkt))))
goto nla_put_failure;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 81c86a156c6c..3cff23c814c3 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -455,7 +455,8 @@ ack:
* processed, this avoids that the same error is
* reported several times when replaying the batch.
*/
- if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
+ if (err == -ENOMEM ||
+ nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
/* We failed to enqueue an error, reset the
* list of errors and send OOM to userspace
* pointing to the batch header.
@@ -512,8 +513,6 @@ done:
goto replay_abort;
}
}
- if (ss->cleanup)
- ss->cleanup(net);
nfnl_err_deliver(&err_list, oskb);
kfree_skb(skb);
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 2481470dec36..4b46421c5e17 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -132,21 +132,16 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int event, struct nf_acct *acct)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
u64 pkts, bytes;
u32 old_flags;
event = nfnl_msg_type(NFNL_SUBSYS_ACCT, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
+ NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_UNSPEC;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (nla_put_string(skb, NFACCT_NAME, acct->name))
goto nla_put_failure;
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 3d5fc07b2530..cb9c2e955996 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -530,20 +530,15 @@ nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int event, struct nf_conntrack_helper *helper)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
int status;
event = nfnl_msg_type(NFNL_SUBSYS_CTHELPER, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
+ NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_UNSPEC;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (nla_put_string(skb, NFCTH_NAME, helper->name))
goto nla_put_failure;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index da915c224a82..a854e1560cc1 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -160,22 +160,17 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int event, struct ctnl_timeout *timeout)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
const struct nf_conntrack_l4proto *l4proto = timeout->timeout.l4proto;
struct nlattr *nest_parms;
int ret;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
+ NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_UNSPEC;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
nla_put_be16(skb, CTA_TIMEOUT_L3PROTO,
htons(timeout->timeout.l3num)) ||
@@ -382,21 +377,16 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
const unsigned int *timeouts)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
struct nlattr *nest_parms;
int ret;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC,
+ NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_UNSPEC;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l3num)) ||
nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
goto nla_put_failure;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index b36af4741ad3..80c09070ea9f 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -452,20 +452,15 @@ __build_packet_message(struct nfnl_log_net *log,
{
struct nfulnl_msg_packet_hdr pmsg;
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
sk_buff_data_t old_tail = inst->skb->tail;
struct sock *sk;
const unsigned char *hwhdrp;
- nlh = nlmsg_put(inst->skb, 0, 0,
- nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET),
- sizeof(struct nfgenmsg), 0);
+ nlh = nfnl_msg_put(inst->skb, 0, 0,
+ nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET),
+ 0, pf, NFNETLINK_V0, htons(inst->group_num));
if (!nlh)
return -1;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = pf;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(inst->group_num);
memset(&pmsg, 0, sizeof(pmsg));
pmsg.hw_protocol = skb->protocol;
@@ -688,8 +683,8 @@ nfulnl_log_packet(struct net *net,
unsigned int plen = 0;
struct nfnl_log_net *log = nfnl_log_pernet(net);
const struct nfnl_ct_hook *nfnl_ct = NULL;
+ enum ip_conntrack_info ctinfo = 0;
struct nf_conn *ct = NULL;
- enum ip_conntrack_info uninitialized_var(ctinfo);
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
li = li_user;
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 79fbf37291f3..573a372e760f 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb,
struct nf_osf_hdr_ctx ctx;
const struct tcphdr *tcp;
struct tcphdr _tcph;
+ bool found = false;
memset(&ctx, 0, sizeof(ctx));
@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
data->genre = f->genre;
data->version = f->version;
+ found = true;
break;
}
- return true;
+ return found;
}
EXPORT_SYMBOL_GPL(nf_osf_find);
@@ -314,6 +316,14 @@ static int nfnl_osf_add_callback(struct net *net, struct sock *ctnl,
f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
+ if (f->opt_num > ARRAY_SIZE(f->opt))
+ return -EINVAL;
+
+ if (!memchr(f->genre, 0, MAXGENRELEN) ||
+ !memchr(f->subtype, 0, MAXGENRELEN) ||
+ !memchr(f->version, 0, MAXGENRELEN))
+ return -EINVAL;
+
kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL);
if (!kf)
return -ENOMEM;
@@ -438,3 +448,4 @@ module_init(nfnl_osf_init);
module_exit(nfnl_osf_fini);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 581bd1353a44..772f8c69818c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -383,12 +383,11 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct nlattr *nla;
struct nfqnl_msg_packet_hdr *pmsg;
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
struct sk_buff *entskb = entry->skb;
struct net_device *indev;
struct net_device *outdev;
struct nf_conn *ct = NULL;
- enum ip_conntrack_info uninitialized_var(ctinfo);
+ enum ip_conntrack_info ctinfo;
struct nfnl_ct_hook *nfnl_ct;
bool csum_verify;
char *secdata = NULL;
@@ -469,18 +468,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
goto nlmsg_failure;
}
- nlh = nlmsg_put(skb, 0, 0,
- nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET),
- sizeof(struct nfgenmsg), 0);
+ nlh = nfnl_msg_put(skb, 0, 0,
+ nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET),
+ 0, entry->state.pf, NFNETLINK_V0,
+ htons(queue->queue_num));
if (!nlh) {
skb_tx_error(entskb);
kfree_skb(skb);
goto nlmsg_failure;
}
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = entry->state.pf;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(queue->queue_num);
nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
pmsg = nla_data(nla);
@@ -1188,7 +1184,7 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
struct nfqnl_instance *queue;
unsigned int verdict;
struct nf_queue_entry *entry;
- enum ip_conntrack_info uninitialized_var(ctinfo);
+ enum ip_conntrack_info ctinfo;
struct nfnl_ct_hook *nfnl_ct;
struct nf_conn *ct = NULL;
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 10e9d50e4e19..ccab2e66d754 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -16,8 +16,8 @@
#include <net/netfilter/nf_tables_offload.h>
struct nft_bitwise {
- enum nft_registers sreg:8;
- enum nft_registers dreg:8;
+ u8 sreg;
+ u8 dreg;
u8 len;
struct nft_data mask;
struct nft_data xor;
@@ -65,14 +65,14 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
priv->len = len;
- priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
- err = nft_validate_register_load(priv->sreg, priv->len);
+ err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg,
+ priv->len);
if (err < 0)
return err;
- priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
- err = nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, priv->len);
+ err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG],
+ &priv->dreg, NULL, NFT_DATA_VALUE,
+ priv->len);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
index 12bed3f7bbc6..9d250bd60bb8 100644
--- a/net/netfilter/nft_byteorder.c
+++ b/net/netfilter/nft_byteorder.c
@@ -16,8 +16,8 @@
#include <net/netfilter/nf_tables.h>
struct nft_byteorder {
- enum nft_registers sreg:8;
- enum nft_registers dreg:8;
+ u8 sreg;
+ u8 dreg;
enum nft_byteorder_ops op:8;
u8 len;
u8 size;
@@ -30,28 +30,29 @@ void nft_byteorder_eval(const struct nft_expr *expr,
const struct nft_byteorder *priv = nft_expr_priv(expr);
u32 *src = &regs->data[priv->sreg];
u32 *dst = &regs->data[priv->dreg];
- union { u32 u32; u16 u16; } *s, *d;
+ u16 *s16, *d16;
unsigned int i;
- s = (void *)src;
- d = (void *)dst;
+ s16 = (void *)src;
+ d16 = (void *)dst;
switch (priv->size) {
case 8: {
+ u64 *dst64 = (void *)dst;
u64 src64;
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 8; i++) {
src64 = nft_reg_load64(&src[i]);
- nft_reg_store64(&dst[i], be64_to_cpu(src64));
+ nft_reg_store64(&dst64[i], be64_to_cpu(src64));
}
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 8; i++) {
src64 = (__force __u64)
cpu_to_be64(nft_reg_load64(&src[i]));
- nft_reg_store64(&dst[i], src64);
+ nft_reg_store64(&dst64[i], src64);
}
break;
}
@@ -61,11 +62,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 4; i++)
- d[i].u32 = ntohl((__force __be32)s[i].u32);
+ dst[i] = ntohl((__force __be32)src[i]);
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 4; i++)
- d[i].u32 = (__force __u32)htonl(s[i].u32);
+ dst[i] = (__force __u32)htonl(src[i]);
break;
}
break;
@@ -73,11 +74,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 2; i++)
- d[i].u16 = ntohs((__force __be16)s[i].u16);
+ d16[i] = ntohs((__force __be16)s16[i]);
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 2; i++)
- d[i].u16 = (__force __u16)htons(s[i].u16);
+ d16[i] = (__force __u16)htons(s16[i]);
break;
}
break;
@@ -131,20 +132,20 @@ static int nft_byteorder_init(const struct nft_ctx *ctx,
return -EINVAL;
}
- priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]);
err = nft_parse_u32_check(tb[NFTA_BYTEORDER_LEN], U8_MAX, &len);
if (err < 0)
return err;
priv->len = len;
- err = nft_validate_register_load(priv->sreg, priv->len);
+ err = nft_parse_register_load(tb[NFTA_BYTEORDER_SREG], &priv->sreg,
+ priv->len);
if (err < 0)
return err;
- priv->dreg = nft_parse_register(tb[NFTA_BYTEORDER_DREG]);
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, priv->len);
+ return nft_parse_register_store(ctx, tb[NFTA_BYTEORDER_DREG],
+ &priv->dreg, NULL, NFT_DATA_VALUE,
+ priv->len);
}
static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index b5d5d071d765..916195dba6f7 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -2,6 +2,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/netfilter/nf_tables.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
@@ -10,6 +11,8 @@
#include <net/netfilter/nf_tables_ipv4.h>
#include <net/netfilter/nf_tables_ipv6.h>
+extern unsigned int nf_tables_net_id;
+
#ifdef CONFIG_NF_TABLES_IPV4
static unsigned int nft_do_chain_ipv4(void *priv,
struct sk_buff *skb,
@@ -293,6 +296,9 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
if (strcmp(basechain->dev_name, dev->name) != 0)
return;
+ if (!basechain->ops.dev)
+ return;
+
/* UNREGISTER events are also happpening on netns exit.
*
* Altough nf_tables core releases all tables/chains, only
@@ -315,6 +321,7 @@ static int nf_tables_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct nftables_pernet *nft_net;
struct nft_table *table;
struct nft_chain *chain, *nr;
struct nft_ctx ctx = {
@@ -325,8 +332,9 @@ static int nf_tables_netdev_event(struct notifier_block *this,
event != NETDEV_CHANGENAME)
return NOTIFY_DONE;
- mutex_lock(&ctx.net->nft.commit_mutex);
- list_for_each_entry(table, &ctx.net->nft.tables, list) {
+ nft_net = net_generic(ctx.net, nf_tables_net_id);
+ mutex_lock(&nft_net->commit_mutex);
+ list_for_each_entry(table, &nft_net->tables, list) {
if (table->family != NFPROTO_NETDEV)
continue;
@@ -340,7 +348,7 @@ static int nf_tables_netdev_event(struct notifier_block *this,
nft_netdev_event(event, dev, &ctx);
}
}
- mutex_unlock(&ctx.net->nft.commit_mutex);
+ mutex_unlock(&nft_net->commit_mutex);
return NOTIFY_DONE;
}
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index ae730dba60c8..a7c1e7c4381a 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -17,7 +17,7 @@
struct nft_cmp_expr {
struct nft_data data;
- enum nft_registers sreg:8;
+ u8 sreg;
u8 len;
enum nft_cmp_ops op:8;
};
@@ -86,8 +86,7 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return err;
}
- priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
- err = nft_validate_register_load(priv->sreg, desc.len);
+ err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
@@ -169,8 +168,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
if (err < 0)
return err;
- priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
- err = nft_validate_register_load(priv->sreg, desc.len);
+ err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index bbe03b9a03b1..fdce5012a4f3 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -192,6 +192,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
{
struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
+ u32 l4proto;
u32 flags;
int err;
@@ -204,12 +205,18 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
return -EINVAL;
flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
- if (flags & ~NFT_RULE_COMPAT_F_MASK)
+ if (flags & NFT_RULE_COMPAT_F_UNUSED ||
+ flags & ~NFT_RULE_COMPAT_F_MASK)
return -EINVAL;
if (flags & NFT_RULE_COMPAT_F_INV)
*inv = true;
- *proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+ l4proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+ if (l4proto > U16_MAX)
+ return -EINVAL;
+
+ *proto = l4proto;
+
return 0;
}
@@ -327,6 +334,22 @@ static int nft_target_validate(const struct nft_ctx *ctx,
unsigned int hook_mask = 0;
int ret;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET &&
+ ctx->family != NFPROTO_BRIDGE &&
+ ctx->family != NFPROTO_ARP)
+ return -EOPNOTSUPP;
+
+ ret = nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING));
+ if (ret)
+ return ret;
+
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
@@ -569,6 +592,22 @@ static int nft_match_validate(const struct nft_ctx *ctx,
unsigned int hook_mask = 0;
int ret;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET &&
+ ctx->family != NFPROTO_BRIDGE &&
+ ctx->family != NFPROTO_ARP)
+ return -EOPNOTSUPP;
+
+ ret = nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING));
+ if (ret)
+ return ret;
+
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
@@ -591,19 +630,14 @@ nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
int rev, int target)
{
struct nlmsghdr *nlh;
- struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
event = nfnl_msg_type(NFNL_SUBSYS_NFT_COMPAT, event);
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
- if (nlh == NULL)
+ nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
+ NFNETLINK_V0, 0);
+ if (!nlh)
goto nlmsg_failure;
- nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = family;
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
-
if (nla_put_string(skb, NFTA_COMPAT_NAME, name) ||
nla_put_be32(skb, NFTA_COMPAT_REV, htonl(rev)) ||
nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target)))
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 28991730728b..0c7f091d7d54 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -27,8 +27,8 @@ struct nft_ct {
enum nft_ct_keys key:8;
enum ip_conntrack_dir dir:8;
union {
- enum nft_registers dreg:8;
- enum nft_registers sreg:8;
+ u8 dreg;
+ u8 sreg;
};
};
@@ -481,6 +481,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
break;
#endif
case NFT_CT_ID:
+ if (tb[NFTA_CT_DIRECTION])
+ return -EINVAL;
+
len = sizeof(u32);
break;
default:
@@ -498,9 +501,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
}
}
- priv->dreg = nft_parse_register(tb[NFTA_CT_DREG]);
- err = nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, len);
+ err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,
+ NFT_DATA_VALUE, len);
if (err < 0)
return err;
@@ -600,8 +602,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
}
}
- priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]);
- err = nft_validate_register_load(priv->sreg, len);
+ err = nft_parse_register_load(tb[NFTA_CT_SREG], &priv->sreg, len);
if (err < 0)
goto err1;
@@ -1176,7 +1177,30 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
if (tb[NFTA_CT_EXPECT_L3PROTO])
priv->l3num = ntohs(nla_get_be16(tb[NFTA_CT_EXPECT_L3PROTO]));
+ switch (priv->l3num) {
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ if (priv->l3num == ctx->family || ctx->family == NFPROTO_INET)
+ break;
+
+ return -EINVAL;
+ case NFPROTO_INET: /* tuple.src.l3num supports NFPROTO_IPV4/6 only */
+ default:
+ return -EAFNOSUPPORT;
+ }
+
priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
+ switch (priv->l4proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ case IPPROTO_DCCP:
+ case IPPROTO_SCTP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
priv->dport = nla_get_be16(tb[NFTA_CT_EXPECT_DPORT]);
priv->timeout = nla_get_u32(tb[NFTA_CT_EXPECT_TIMEOUT]);
priv->size = nla_get_u8(tb[NFTA_CT_EXPECT_SIZE]);
diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
index 6007089e1c2f..a5b560ee0337 100644
--- a/net/netfilter/nft_dup_netdev.c
+++ b/net/netfilter/nft_dup_netdev.c
@@ -14,7 +14,7 @@
#include <net/netfilter/nf_dup_netdev.h>
struct nft_dup_netdev {
- enum nft_registers sreg_dev:8;
+ u8 sreg_dev;
};
static void nft_dup_netdev_eval(const struct nft_expr *expr,
@@ -40,8 +40,8 @@ static int nft_dup_netdev_init(const struct nft_ctx *ctx,
if (tb[NFTA_DUP_SREG_DEV] == NULL)
return -EINVAL;
- priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+ return nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev,
+ sizeof(int));
}
static int nft_dup_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 6bcc18124e5b..e0c17217817d 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -11,13 +11,16 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
+#include <net/netns/generic.h>
+
+extern unsigned int nf_tables_net_id;
struct nft_dynset {
struct nft_set *set;
struct nft_set_ext_tmpl tmpl;
enum nft_dynset_ops op:8;
- enum nft_registers sreg_key:8;
- enum nft_registers sreg_data:8;
+ u8 sreg_key;
+ u8 sreg_data;
bool invert;
u64 timeout;
struct nft_expr *expr;
@@ -129,13 +132,14 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
+ struct nftables_pernet *nft_net = net_generic(ctx->net, nf_tables_net_id);
struct nft_dynset *priv = nft_expr_priv(expr);
u8 genmask = nft_genmask_next(ctx->net);
struct nft_set *set;
u64 timeout;
int err;
- lockdep_assert_held(&ctx->net->nft.commit_mutex);
+ lockdep_assert_held(&nft_net->commit_mutex);
if (tb[NFTA_DYNSET_SET_NAME] == NULL ||
tb[NFTA_DYNSET_OP] == NULL ||
@@ -157,6 +161,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (IS_ERR(set))
return PTR_ERR(set);
+ if (set->flags & NFT_SET_OBJECT)
+ return -EOPNOTSUPP;
+
if (set->ops->update == NULL)
return -EOPNOTSUPP;
@@ -177,8 +184,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
return err;
}
- priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
- err = nft_validate_register_load(priv->sreg_key, set->klen);
+ err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_KEY], &priv->sreg_key,
+ set->klen);
if (err < 0)
return err;
@@ -188,8 +195,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (set->dtype == NFT_DATA_VERDICT)
return -EOPNOTSUPP;
- priv->sreg_data = nft_parse_register(tb[NFTA_DYNSET_SREG_DATA]);
- err = nft_validate_register_load(priv->sreg_data, set->dlen);
+ err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_DATA],
+ &priv->sreg_data, set->dlen);
if (err < 0)
return err;
} else if (set->flags & NFT_SET_MAP)
@@ -259,7 +266,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
{
struct nft_dynset *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_dynset_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index faa0844c01fb..670dd146fb2b 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -19,8 +19,8 @@ struct nft_exthdr {
u8 offset;
u8 len;
u8 op;
- enum nft_registers dreg:8;
- enum nft_registers sreg:8;
+ u8 dreg;
+ u8 sreg;
u8 flags;
};
@@ -353,12 +353,12 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
priv->offset = offset;
priv->len = len;
- priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
priv->flags = flags;
priv->op = op;
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, priv->len);
+ return nft_parse_register_store(ctx, tb[NFTA_EXTHDR_DREG],
+ &priv->dreg, NULL, NFT_DATA_VALUE,
+ priv->len);
}
static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
@@ -403,11 +403,11 @@ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
priv->offset = offset;
priv->len = len;
- priv->sreg = nft_parse_register(tb[NFTA_EXTHDR_SREG]);
priv->flags = flags;
priv->op = op;
- return nft_validate_register_load(priv->sreg, priv->len);
+ return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg,
+ priv->len);
}
static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index cfac0964f48d..d2777aff5943 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -86,7 +86,6 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return -EINVAL;
priv->result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT]));
- priv->dreg = nft_parse_register(tb[NFTA_FIB_DREG]);
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
@@ -106,8 +105,8 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
return -EINVAL;
}
- err = nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, len);
+ err = nft_parse_register_store(ctx, tb[NFTA_FIB_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, len);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index ff5ac173e897..850d4e92702e 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -147,6 +147,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
{
unsigned int hook_mask = (1 << NF_INET_FORWARD);
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
return nft_chain_validate_hooks(ctx->chain, hook_mask);
}
@@ -171,8 +176,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
if (IS_ERR(flowtable))
return PTR_ERR(flowtable);
+ if (!nft_use_inc(&flowtable->use))
+ return -EMFILE;
+
priv->flowtable = flowtable;
- flowtable->use++;
return nf_ct_netns_get(ctx->net, ctx->family);
}
@@ -191,7 +198,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
- priv->flowtable->use++;
+ nft_use_inc_restore(&priv->flowtable->use);
}
static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index 3b0dcd170551..7730409f6f09 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -18,7 +18,7 @@
#include <net/ip.h>
struct nft_fwd_netdev {
- enum nft_registers sreg_dev:8;
+ u8 sreg_dev;
};
static void nft_fwd_netdev_eval(const struct nft_expr *expr,
@@ -50,8 +50,8 @@ static int nft_fwd_netdev_init(const struct nft_ctx *ctx,
if (tb[NFTA_FWD_SREG_DEV] == NULL)
return -EINVAL;
- priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]);
- return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+ return nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
+ sizeof(int));
}
static int nft_fwd_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -83,8 +83,8 @@ static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
}
struct nft_fwd_neigh {
- enum nft_registers sreg_dev:8;
- enum nft_registers sreg_addr:8;
+ u8 sreg_dev;
+ u8 sreg_addr;
u8 nfproto;
};
@@ -162,8 +162,6 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
!tb[NFTA_FWD_NFPROTO])
return -EINVAL;
- priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]);
- priv->sreg_addr = nft_parse_register(tb[NFTA_FWD_SREG_ADDR]);
priv->nfproto = ntohl(nla_get_be32(tb[NFTA_FWD_NFPROTO]));
switch (priv->nfproto) {
@@ -177,11 +175,13 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- err = nft_validate_register_load(priv->sreg_dev, sizeof(int));
+ err = nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev,
+ sizeof(int));
if (err < 0)
return err;
- return nft_validate_register_load(priv->sreg_addr, addr_len);
+ return nft_parse_register_load(tb[NFTA_FWD_SREG_ADDR], &priv->sreg_addr,
+ addr_len);
}
static int nft_fwd_neigh_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index b836d550b919..2ff6c7759494 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -14,8 +14,8 @@
#include <linux/jhash.h>
struct nft_jhash {
- enum nft_registers sreg:8;
- enum nft_registers dreg:8;
+ u8 sreg;
+ u8 dreg;
u8 len;
bool autogen_seed:1;
u32 modulus;
@@ -38,7 +38,7 @@ static void nft_jhash_eval(const struct nft_expr *expr,
}
struct nft_symhash {
- enum nft_registers dreg:8;
+ u8 dreg;
u32 modulus;
u32 offset;
};
@@ -83,9 +83,6 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
if (tb[NFTA_HASH_OFFSET])
priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
- priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
- priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
-
err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
if (err < 0)
return err;
@@ -94,6 +91,10 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
priv->len = len;
+ err = nft_parse_register_load(tb[NFTA_HASH_SREG], &priv->sreg, len);
+ if (err < 0)
+ return err;
+
priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
if (priv->modulus < 1)
return -ERANGE;
@@ -108,9 +109,8 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
get_random_bytes(&priv->seed, sizeof(priv->seed));
}
- return nft_validate_register_load(priv->sreg, len) &&
- nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, sizeof(u32));
+ return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, sizeof(u32));
}
static int nft_symhash_init(const struct nft_ctx *ctx,
@@ -126,8 +126,6 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
if (tb[NFTA_HASH_OFFSET])
priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET]));
- priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
-
priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
if (priv->modulus < 1)
return -ERANGE;
@@ -135,8 +133,9 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, sizeof(u32));
+ return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG],
+ &priv->dreg, NULL, NFT_DATA_VALUE,
+ sizeof(u32));
}
static int nft_jhash_dump(struct sk_buff *skb,
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 98a8149be094..6a95d532eaec 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -48,9 +48,9 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
priv->dlen = desc.len;
- priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]);
- err = nft_validate_register_store(ctx, priv->dreg, &priv->data,
- desc.type, desc.len);
+ err = nft_parse_register_store(ctx, tb[NFTA_IMMEDIATE_DREG],
+ &priv->dreg, &priv->data, desc.type,
+ desc.len);
if (err < 0)
goto err1;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 660bad688e2b..e0ffd463a132 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -17,8 +17,8 @@
struct nft_lookup {
struct nft_set *set;
- enum nft_registers sreg:8;
- enum nft_registers dreg:8;
+ u8 sreg;
+ u8 dreg;
bool invert;
struct nft_set_binding binding;
};
@@ -73,8 +73,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
if (IS_ERR(set))
return PTR_ERR(set);
- priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
- err = nft_validate_register_load(priv->sreg, set->klen);
+ err = nft_parse_register_load(tb[NFTA_LOOKUP_SREG], &priv->sreg,
+ set->klen);
if (err < 0)
return err;
@@ -97,9 +97,9 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
if (!(set->flags & NFT_SET_MAP))
return -EINVAL;
- priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]);
- err = nft_validate_register_store(ctx, priv->dreg, NULL,
- set->dtype, set->dlen);
+ err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
+ &priv->dreg, NULL, set->dtype,
+ set->dlen);
if (err < 0)
return err;
} else if (set->flags & NFT_SET_MAP)
@@ -129,7 +129,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
{
struct nft_lookup *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_lookup_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index 39dc94f2491e..c2f04885347e 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -15,8 +15,8 @@
struct nft_masq {
u32 flags;
- enum nft_registers sreg_proto_min:8;
- enum nft_registers sreg_proto_max:8;
+ u8 sreg_proto_min;
+ u8 sreg_proto_max;
};
static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
@@ -54,19 +54,15 @@ static int nft_masq_init(const struct nft_ctx *ctx,
}
if (tb[NFTA_MASQ_REG_PROTO_MIN]) {
- priv->sreg_proto_min =
- nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MIN]);
-
- err = nft_validate_register_load(priv->sreg_proto_min, plen);
+ err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MIN],
+ &priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_MASQ_REG_PROTO_MAX]) {
- priv->sreg_proto_max =
- nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MAX]);
-
- err = nft_validate_register_load(priv->sreg_proto_max,
- plen);
+ err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MAX],
+ &priv->sreg_proto_max,
+ plen);
if (err < 0)
return err;
} else {
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index dda1e55d5801..ac7d3c78501b 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -247,7 +247,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
strncpy((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ);
break;
case NFT_META_TIME_NS:
- nft_reg_store64(dest, ktime_get_real_ns());
+ nft_reg_store64((u64 *)dest, ktime_get_real_ns());
break;
case NFT_META_TIME_DAY:
nft_reg_store8(dest, nft_meta_weekday(ktime_get_real_seconds()));
@@ -380,9 +380,8 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, len);
+ return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, len);
}
EXPORT_SYMBOL_GPL(nft_meta_get_init);
@@ -475,8 +474,7 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
- err = nft_validate_register_load(priv->sreg, len);
+ err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 0c5bc3c37ecf..69fcea16218b 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -21,10 +21,10 @@
#include <net/ip.h>
struct nft_nat {
- enum nft_registers sreg_addr_min:8;
- enum nft_registers sreg_addr_max:8;
- enum nft_registers sreg_proto_min:8;
- enum nft_registers sreg_proto_max:8;
+ u8 sreg_addr_min;
+ u8 sreg_addr_max;
+ u8 sreg_proto_min;
+ u8 sreg_proto_max;
enum nf_nat_manip_type type:8;
u8 family;
u16 flags;
@@ -88,6 +88,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
struct nft_nat *priv = nft_expr_priv(expr);
int err;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
if (err < 0)
return err;
@@ -154,18 +159,15 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
priv->family = family;
if (tb[NFTA_NAT_REG_ADDR_MIN]) {
- priv->sreg_addr_min =
- nft_parse_register(tb[NFTA_NAT_REG_ADDR_MIN]);
- err = nft_validate_register_load(priv->sreg_addr_min, alen);
+ err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MIN],
+ &priv->sreg_addr_min, alen);
if (err < 0)
return err;
if (tb[NFTA_NAT_REG_ADDR_MAX]) {
- priv->sreg_addr_max =
- nft_parse_register(tb[NFTA_NAT_REG_ADDR_MAX]);
-
- err = nft_validate_register_load(priv->sreg_addr_max,
- alen);
+ err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MAX],
+ &priv->sreg_addr_max,
+ alen);
if (err < 0)
return err;
} else {
@@ -175,19 +177,15 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
if (tb[NFTA_NAT_REG_PROTO_MIN]) {
- priv->sreg_proto_min =
- nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]);
-
- err = nft_validate_register_load(priv->sreg_proto_min, plen);
+ err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
+ &priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_NAT_REG_PROTO_MAX]) {
- priv->sreg_proto_max =
- nft_parse_register(tb[NFTA_NAT_REG_PROTO_MAX]);
-
- err = nft_validate_register_load(priv->sreg_proto_max,
- plen);
+ err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MAX],
+ &priv->sreg_proto_max,
+ plen);
if (err < 0)
return err;
} else {
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 48edb9d5f012..7bbca252e7fc 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -16,7 +16,7 @@
static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state);
struct nft_ng_inc {
- enum nft_registers dreg:8;
+ u8 dreg;
u32 modulus;
atomic_t counter;
u32 offset;
@@ -66,11 +66,10 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
- priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
atomic_set(&priv->counter, priv->modulus - 1);
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, sizeof(u32));
+ return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, sizeof(u32));
}
static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
@@ -100,7 +99,7 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
}
struct nft_ng_random {
- enum nft_registers dreg:8;
+ u8 dreg;
u32 modulus;
u32 offset;
};
@@ -140,10 +139,8 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
prandom_init_once(&nft_numgen_prandom_state);
- priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]);
-
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, sizeof(u32));
+ return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, sizeof(u32));
}
static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index bfd18d2b65a2..f5028479c028 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx,
if (IS_ERR(obj))
return -ENOENT;
+ if (!nft_use_inc(&obj->use))
+ return -EMFILE;
+
nft_objref_priv(expr) = obj;
- obj->use++;
return 0;
}
@@ -71,7 +73,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx,
if (phase == NFT_TRANS_COMMIT)
return;
- obj->use--;
+ nft_use_dec(&obj->use);
}
static void nft_objref_activate(const struct nft_ctx *ctx,
@@ -79,7 +81,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx,
{
struct nft_object *obj = nft_objref_priv(expr);
- obj->use++;
+ nft_use_inc_restore(&obj->use);
}
static struct nft_expr_type nft_objref_type;
@@ -95,7 +97,7 @@ static const struct nft_expr_ops nft_objref_ops = {
struct nft_objref_map {
struct nft_set *set;
- enum nft_registers sreg:8;
+ u8 sreg;
struct nft_set_binding binding;
};
@@ -137,8 +139,8 @@ static int nft_objref_map_init(const struct nft_ctx *ctx,
if (!(set->flags & NFT_SET_OBJECT))
return -EINVAL;
- priv->sreg = nft_parse_register(tb[NFTA_OBJREF_SET_SREG]);
- err = nft_validate_register_load(priv->sreg, set->klen);
+ err = nft_parse_register_load(tb[NFTA_OBJREF_SET_SREG], &priv->sreg,
+ set->klen);
if (err < 0)
return err;
@@ -180,7 +182,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
{
struct nft_objref_map *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 4911f8eb394f..b7c2bc01f8a2 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -6,7 +6,7 @@
#include <linux/netfilter/nfnetlink_osf.h>
struct nft_osf {
- enum nft_registers dreg:8;
+ u8 dreg;
u8 ttl;
u32 flags;
};
@@ -83,9 +83,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
priv->flags = flags;
}
- priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
- err = nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
+ err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE,
+ NFT_OSF_MAXGENRELEN);
if (err < 0)
return err;
@@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
- return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
- (1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_FORWARD));
+ unsigned int hooks;
+
+ switch (ctx->family) {
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ hooks = (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_FORWARD);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return nft_chain_validate_hooks(ctx->chain, hooks);
}
static struct nft_expr_type nft_osf_type;
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index cf0512fc648e..a4f9a150812a 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -84,7 +84,7 @@ void nft_payload_eval(const struct nft_expr *expr,
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
- if (!skb_mac_header_was_set(skb))
+ if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
goto err;
if (skb_vlan_tag_present(skb)) {
@@ -135,10 +135,10 @@ static int nft_payload_init(const struct nft_ctx *ctx,
priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
- priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, priv->len);
+ return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
+ &priv->dreg, NULL, NFT_DATA_VALUE,
+ priv->len);
}
static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -558,18 +558,23 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_payload_set *priv = nft_expr_priv(expr);
+ u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
+ int err;
priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
- priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
if (tb[NFTA_PAYLOAD_CSUM_TYPE])
- priv->csum_type =
- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
- if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
- priv->csum_offset =
- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
+ csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
+ if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
+ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
+ &csum_offset);
+ if (err < 0)
+ return err;
+
+ priv->csum_offset = csum_offset;
+ }
if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
u32 flags;
@@ -580,15 +585,17 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
priv->csum_flags = flags;
}
- switch (priv->csum_type) {
+ switch (csum_type) {
case NFT_PAYLOAD_CSUM_NONE:
case NFT_PAYLOAD_CSUM_INET:
break;
default:
return -EOPNOTSUPP;
}
+ priv->csum_type = csum_type;
- return nft_validate_register_load(priv->sreg, priv->len);
+ return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
+ priv->len);
}
static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -624,6 +631,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
{
enum nft_payload_bases base;
unsigned int offset, len;
+ int err;
if (tb[NFTA_PAYLOAD_BASE] == NULL ||
tb[NFTA_PAYLOAD_OFFSET] == NULL ||
@@ -649,8 +657,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
if (tb[NFTA_PAYLOAD_DREG] == NULL)
return ERR_PTR(-EINVAL);
- offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
- len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
+ if (err < 0)
+ return ERR_PTR(err);
if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
base != NFT_PAYLOAD_LL_HEADER)
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 5ece0a6aa8c3..94a4f0a5a28e 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -19,10 +19,10 @@
static u32 jhash_initval __read_mostly;
struct nft_queue {
- enum nft_registers sreg_qnum:8;
- u16 queuenum;
- u16 queues_total;
- u16 flags;
+ u8 sreg_qnum;
+ u16 queuenum;
+ u16 queues_total;
+ u16 flags;
};
static void nft_queue_eval(const struct nft_expr *expr,
@@ -111,8 +111,8 @@ static int nft_queue_sreg_init(const struct nft_ctx *ctx,
struct nft_queue *priv = nft_expr_priv(expr);
int err;
- priv->sreg_qnum = nft_parse_register(tb[NFTA_QUEUE_SREG_QNUM]);
- err = nft_validate_register_load(priv->sreg_qnum, sizeof(u32));
+ err = nft_parse_register_load(tb[NFTA_QUEUE_SREG_QNUM],
+ &priv->sreg_qnum, sizeof(u32));
if (err < 0)
return err;
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index 89efcc5a533d..e4a1c44d7f51 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -15,7 +15,7 @@
struct nft_range_expr {
struct nft_data data_from;
struct nft_data data_to;
- enum nft_registers sreg:8;
+ u8 sreg;
u8 len;
enum nft_range_ops op:8;
};
@@ -86,8 +86,8 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
goto err2;
}
- priv->sreg = nft_parse_register(tb[NFTA_RANGE_SREG]);
- err = nft_validate_register_load(priv->sreg, desc_from.len);
+ err = nft_parse_register_load(tb[NFTA_RANGE_SREG], &priv->sreg,
+ desc_from.len);
if (err < 0)
goto err2;
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 43eeb1f609f1..7179dada5757 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -14,8 +14,8 @@
#include <net/netfilter/nf_tables.h>
struct nft_redir {
- enum nft_registers sreg_proto_min:8;
- enum nft_registers sreg_proto_max:8;
+ u8 sreg_proto_min;
+ u8 sreg_proto_max;
u16 flags;
};
@@ -50,24 +50,22 @@ static int nft_redir_init(const struct nft_ctx *ctx,
plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
- priv->sreg_proto_min =
- nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]);
-
- err = nft_validate_register_load(priv->sreg_proto_min, plen);
+ err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
+ &priv->sreg_proto_min, plen);
if (err < 0)
return err;
if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
- priv->sreg_proto_max =
- nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MAX]);
-
- err = nft_validate_register_load(priv->sreg_proto_max,
- plen);
+ err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MAX],
+ &priv->sreg_proto_max,
+ plen);
if (err < 0)
return err;
} else {
priv->sreg_proto_max = priv->sreg_proto_min;
}
+
+ priv->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
if (tb[NFTA_REDIR_FLAGS]) {
@@ -102,25 +100,37 @@ nla_put_failure:
return -1;
}
-static void nft_redir_ipv4_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+static void nft_redir_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
- struct nft_redir *priv = nft_expr_priv(expr);
- struct nf_nat_ipv4_multi_range_compat mr;
+ const struct nft_redir *priv = nft_expr_priv(expr);
+ struct nf_nat_range2 range;
- memset(&mr, 0, sizeof(mr));
+ memset(&range, 0, sizeof(range));
+ range.flags = priv->flags;
if (priv->sreg_proto_min) {
- mr.range[0].min.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_min]);
- mr.range[0].max.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_max]);
- mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ range.min_proto.all = (__force __be16)
+ nft_reg_load16(&regs->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)
+ nft_reg_load16(&regs->data[priv->sreg_proto_max]);
}
- mr.range[0].flags |= priv->flags;
-
- regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, nft_hook(pkt));
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &range,
+ nft_hook(pkt));
+ break;
+#ifdef CONFIG_NF_TABLES_IPV6
+ case NFPROTO_IPV6:
+ regs->verdict.code = nf_nat_redirect_ipv6(pkt->skb, &range,
+ nft_hook(pkt));
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
}
static void
@@ -133,7 +143,7 @@ static struct nft_expr_type nft_redir_ipv4_type;
static const struct nft_expr_ops nft_redir_ipv4_ops = {
.type = &nft_redir_ipv4_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
- .eval = nft_redir_ipv4_eval,
+ .eval = nft_redir_eval,
.init = nft_redir_init,
.destroy = nft_redir_ipv4_destroy,
.dump = nft_redir_dump,
@@ -150,28 +160,6 @@ static struct nft_expr_type nft_redir_ipv4_type __read_mostly = {
};
#ifdef CONFIG_NF_TABLES_IPV6
-static void nft_redir_ipv6_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_redir *priv = nft_expr_priv(expr);
- struct nf_nat_range2 range;
-
- memset(&range, 0, sizeof(range));
- if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_min]);
- range.max_proto.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_max]);
- range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
- }
-
- range.flags |= priv->flags;
-
- regs->verdict.code =
- nf_nat_redirect_ipv6(pkt->skb, &range, nft_hook(pkt));
-}
-
static void
nft_redir_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
@@ -182,7 +170,7 @@ static struct nft_expr_type nft_redir_ipv6_type;
static const struct nft_expr_ops nft_redir_ipv6_ops = {
.type = &nft_redir_ipv6_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
- .eval = nft_redir_ipv6_eval,
+ .eval = nft_redir_eval,
.init = nft_redir_init,
.destroy = nft_redir_ipv6_destroy,
.dump = nft_redir_dump,
@@ -200,20 +188,6 @@ static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
#endif
#ifdef CONFIG_NF_TABLES_INET
-static void nft_redir_inet_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- switch (nft_pf(pkt)) {
- case NFPROTO_IPV4:
- return nft_redir_ipv4_eval(expr, regs, pkt);
- case NFPROTO_IPV6:
- return nft_redir_ipv6_eval(expr, regs, pkt);
- }
-
- WARN_ON_ONCE(1);
-}
-
static void
nft_redir_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
@@ -224,7 +198,7 @@ static struct nft_expr_type nft_redir_inet_type;
static const struct nft_expr_ops nft_redir_inet_ops = {
.type = &nft_redir_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
- .eval = nft_redir_inet_eval,
+ .eval = nft_redir_eval,
.init = nft_redir_init,
.destroy = nft_redir_inet_destroy,
.dump = nft_redir_dump,
@@ -236,7 +210,7 @@ static struct nft_expr_type nft_redir_inet_type __read_mostly = {
.name = "redir",
.ops = &nft_redir_inet_ops,
.policy = nft_redir_policy,
- .maxattr = NFTA_MASQ_MAX,
+ .maxattr = NFTA_REDIR_MAX,
.owner = THIS_MODULE,
};
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index 7cfcb0e2f7ee..f4a96164a5a1 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -15,7 +15,7 @@
struct nft_rt {
enum nft_rt_keys key:8;
- enum nft_registers dreg:8;
+ u8 dreg;
};
static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skbdst)
@@ -141,9 +141,8 @@ static int nft_rt_get_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- priv->dreg = nft_parse_register(tb[NFTA_RT_DREG]);
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, len);
+ return nft_parse_register_store(ctx, tb[NFTA_RT_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, len);
}
static int nft_rt_get_dump(struct sk_buff *skb,
@@ -167,6 +166,11 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
const struct nft_rt *priv = nft_expr_priv(expr);
unsigned int hooks;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
switch (priv->key) {
case NFT_RT_NEXTHOP4:
case NFT_RT_NEXTHOP6:
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 087a056e34d1..b0f6b1490e1a 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -270,13 +270,14 @@ static int nft_bitmap_init(const struct nft_set *set,
return 0;
}
-static void nft_bitmap_destroy(const struct nft_set *set)
+static void nft_bitmap_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set)
{
struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be, *n;
list_for_each_entry_safe(be, n, &priv->list, head)
- nft_set_elem_destroy(set, be, true);
+ nf_tables_set_elem_destroy(ctx, set, be);
}
static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index e7eb56b4b89e..0581d5499c5a 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -17,6 +17,9 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
+#include <net/netns/generic.h>
+
+extern unsigned int nf_tables_net_id;
/* We target a hash table size of 4, element hint is 75% of final size */
#define NFT_RHASH_ELEMENT_HINT 3
@@ -59,6 +62,8 @@ static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
return 1;
+ if (nft_set_elem_is_dead(&he->ext))
+ return 1;
if (nft_set_elem_expired(&he->ext))
return 1;
if (!nft_set_elem_active(&he->ext, x->genmask))
@@ -187,7 +192,6 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
struct nft_rhash_elem *he = elem->priv;
nft_set_elem_change_active(net, set, &he->ext);
- nft_set_elem_clear_busy(&he->ext);
}
static bool nft_rhash_flush(const struct net *net,
@@ -195,12 +199,9 @@ static bool nft_rhash_flush(const struct net *net,
{
struct nft_rhash_elem *he = priv;
- if (!nft_set_elem_mark_busy(&he->ext) ||
- !nft_is_active(net, &he->ext)) {
- nft_set_elem_change_active(net, set, &he->ext);
- return true;
- }
- return false;
+ nft_set_elem_change_active(net, set, &he->ext);
+
+ return true;
}
static void *nft_rhash_deactivate(const struct net *net,
@@ -217,9 +218,8 @@ static void *nft_rhash_deactivate(const struct net *net,
rcu_read_lock();
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
- if (he != NULL &&
- !nft_rhash_flush(net, set, he))
- he = NULL;
+ if (he)
+ nft_set_elem_change_active(net, set, &he->ext);
rcu_read_unlock();
@@ -251,7 +251,9 @@ static bool nft_rhash_delete(const struct nft_set *set,
if (he == NULL)
return false;
- return rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params) == 0;
+ nft_set_elem_dead(&he->ext);
+
+ return true;
}
static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
@@ -277,8 +279,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
if (iter->count < iter->skip)
goto cont;
- if (nft_set_elem_expired(&he->ext))
- goto cont;
if (!nft_set_elem_active(&he->ext, iter->genmask))
goto cont;
@@ -297,49 +297,77 @@ cont:
static void nft_rhash_gc(struct work_struct *work)
{
+ struct nftables_pernet *nft_net;
struct nft_set *set;
struct nft_rhash_elem *he;
struct nft_rhash *priv;
- struct nft_set_gc_batch *gcb = NULL;
struct rhashtable_iter hti;
+ struct nft_trans_gc *gc;
+ struct net *net;
+ u32 gc_seq;
priv = container_of(work, struct nft_rhash, gc_work.work);
set = nft_set_container_of(priv);
+ net = read_pnet(&set->net);
+ nft_net = net_generic(net, nf_tables_net_id);
+ gc_seq = READ_ONCE(nft_net->gc_seq);
+
+ if (nft_set_gc_is_pending(set))
+ goto done;
+
+ gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
+ if (!gc)
+ goto done;
rhashtable_walk_enter(&priv->ht, &hti);
rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) {
- if (PTR_ERR(he) != -EAGAIN)
- break;
- continue;
+ nft_trans_gc_destroy(gc);
+ gc = NULL;
+ goto try_later;
+ }
+
+ /* Ruleset has been updated, try later. */
+ if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
+ nft_trans_gc_destroy(gc);
+ gc = NULL;
+ goto try_later;
}
+ if (nft_set_elem_is_dead(&he->ext))
+ goto dead_elem;
+
if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPR)) {
struct nft_expr *expr = nft_set_ext_expr(&he->ext);
if (expr->ops->gc &&
expr->ops->gc(read_pnet(&set->net), expr))
- goto gc;
+ goto needs_gc_run;
}
if (!nft_set_elem_expired(&he->ext))
continue;
-gc:
- if (nft_set_elem_mark_busy(&he->ext))
- continue;
- gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
- if (gcb == NULL)
- break;
- rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
- atomic_dec(&set->nelems);
- nft_set_gc_batch_add(gcb, he);
+needs_gc_run:
+ nft_set_elem_dead(&he->ext);
+dead_elem:
+ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ if (!gc)
+ goto try_later;
+
+ nft_trans_gc_elem_add(gc, he);
}
+
+try_later:
+ /* catchall list iteration requires rcu read side lock. */
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
- nft_set_gc_batch_complete(gcb);
+ if (gc)
+ nft_trans_gc_queue_async_done(gc);
+
+done:
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
nft_set_gc_interval(set));
}
@@ -374,25 +402,36 @@ static int nft_rhash_init(const struct nft_set *set,
return err;
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc);
- if (set->flags & NFT_SET_TIMEOUT)
+ if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL))
nft_rhash_gc_init(set);
return 0;
}
+struct nft_rhash_ctx {
+ const struct nft_ctx ctx;
+ const struct nft_set *set;
+};
+
static void nft_rhash_elem_destroy(void *ptr, void *arg)
{
- nft_set_elem_destroy(arg, ptr, true);
+ struct nft_rhash_ctx *rhash_ctx = arg;
+
+ nf_tables_set_elem_destroy(&rhash_ctx->ctx, rhash_ctx->set, ptr);
}
-static void nft_rhash_destroy(const struct nft_set *set)
+static void nft_rhash_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set)
{
struct nft_rhash *priv = nft_set_priv(set);
+ struct nft_rhash_ctx rhash_ctx = {
+ .ctx = *ctx,
+ .set = set,
+ };
cancel_delayed_work_sync(&priv->gc_work);
- rcu_barrier();
rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
- (void *)set);
+ (void *)&rhash_ctx);
}
/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
@@ -621,7 +660,8 @@ static int nft_hash_init(const struct nft_set *set,
return 0;
}
-static void nft_hash_destroy(const struct nft_set *set)
+static void nft_hash_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set)
{
struct nft_hash *priv = nft_set_priv(set);
struct nft_hash_elem *he;
@@ -631,7 +671,7 @@ static void nft_hash_destroy(const struct nft_set *set)
for (i = 0; i < priv->buckets; i++) {
hlist_for_each_entry_safe(he, next, &priv->table[i], node) {
hlist_del_rcu(&he->node);
- nft_set_elem_destroy(set, he, true);
+ nf_tables_set_elem_destroy(ctx, set, he);
}
}
}
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index ee7c29e0a9d7..2b5f65a424b7 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -14,6 +14,9 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
+#include <net/netns/generic.h>
+
+extern unsigned int nf_tables_net_id;
struct nft_rbtree {
struct rb_root root;
@@ -38,10 +41,18 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
return !nft_rbtree_interval_end(rbe);
}
-static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
- const struct nft_rbtree_elem *interval)
+static int nft_rbtree_cmp(const struct nft_set *set,
+ const struct nft_rbtree_elem *e1,
+ const struct nft_rbtree_elem *e2)
+{
+ return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
+ set->klen);
+}
+
+static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
{
- return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
+ return nft_set_elem_expired(&rbe->ext) ||
+ nft_set_elem_is_dead(&rbe->ext);
}
static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
@@ -52,7 +63,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
const struct nft_rbtree_elem *rbe, *interval = NULL;
u8 genmask = nft_genmask_cur(net);
const struct rb_node *parent;
- const void *this;
int d;
parent = rcu_dereference_raw(priv->root.rb_node);
@@ -62,12 +72,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- this = nft_set_ext_key(&rbe->ext);
- d = memcmp(this, key, set->klen);
+ d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
if (d < 0) {
parent = rcu_dereference_raw(parent->rb_left);
if (interval &&
- nft_rbtree_equal(set, this, interval) &&
+ !nft_rbtree_cmp(set, rbe, interval) &&
nft_rbtree_interval_end(rbe) &&
nft_rbtree_interval_start(interval))
continue;
@@ -80,7 +89,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
continue;
}
- if (nft_set_elem_expired(&rbe->ext))
+ if (nft_rbtree_elem_expired(rbe))
return false;
if (nft_rbtree_interval_end(rbe)) {
@@ -98,7 +107,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
- !nft_set_elem_expired(&interval->ext) &&
+ !nft_rbtree_elem_expired(interval) &&
nft_rbtree_interval_start(interval)) {
*ext = &interval->ext;
return true;
@@ -214,43 +223,257 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
return rbe;
}
+static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
+ struct nft_rbtree *priv,
+ struct nft_rbtree_elem *rbe)
+{
+ struct nft_set_elem elem = {
+ .priv = rbe,
+ };
+
+ nft_setelem_data_deactivate(net, set, &elem);
+ rb_erase(&rbe->node, &priv->root);
+}
+
+static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ struct nft_rbtree *priv,
+ struct nft_rbtree_elem *rbe)
+{
+ struct nft_set *set = (struct nft_set *)__set;
+ struct rb_node *prev = rb_prev(&rbe->node);
+ struct net *net = read_pnet(&set->net);
+ struct nft_rbtree_elem *rbe_prev;
+ struct nft_trans_gc *gc;
+
+ gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
+ if (!gc)
+ return -ENOMEM;
+
+ /* search for end interval coming before this element.
+ * end intervals don't carry a timeout extension, they
+ * are coupled with the interval start element.
+ */
+ while (prev) {
+ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ if (nft_rbtree_interval_end(rbe_prev) &&
+ nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
+ break;
+
+ prev = rb_prev(prev);
+ }
+
+ if (prev) {
+ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ nft_rbtree_gc_remove(net, set, priv, rbe_prev);
+
+ /* There is always room in this trans gc for this element,
+ * memory allocation never actually happens, hence, the warning
+ * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
+ * this is synchronous gc which never fails.
+ */
+ gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+ if (WARN_ON_ONCE(!gc))
+ return -ENOMEM;
+
+ nft_trans_gc_elem_add(gc, rbe_prev);
+ }
+
+ nft_rbtree_gc_remove(net, set, priv, rbe);
+ gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+ if (WARN_ON_ONCE(!gc))
+ return -ENOMEM;
+
+ nft_trans_gc_elem_add(gc, rbe);
+
+ nft_trans_gc_queue_sync_done(gc);
+
+ return 0;
+}
+
+static bool nft_rbtree_update_first(const struct nft_set *set,
+ struct nft_rbtree_elem *rbe,
+ struct rb_node *first)
+{
+ struct nft_rbtree_elem *first_elem;
+
+ first_elem = rb_entry(first, struct nft_rbtree_elem, node);
+ /* this element is closest to where the new element is to be inserted:
+ * update the first element for the node list path.
+ */
+ if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
+ return true;
+
+ return false;
+}
+
static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_set_ext **ext)
{
+ struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
+ struct rb_node *node, *next, *parent, **p, *first = NULL;
struct nft_rbtree *priv = nft_set_priv(set);
+ u8 cur_genmask = nft_genmask_cur(net);
u8 genmask = nft_genmask_next(net);
- struct nft_rbtree_elem *rbe;
- struct rb_node *parent, **p;
- int d;
+ int d, err;
+ /* Descend the tree to search for an existing element greater than the
+ * key value to insert that is greater than the new element. This is the
+ * first element to walk the ordered elements to find possible overlap.
+ */
parent = NULL;
p = &priv->root.rb_node;
while (*p != NULL) {
parent = *p;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- d = memcmp(nft_set_ext_key(&rbe->ext),
- nft_set_ext_key(&new->ext),
- set->klen);
- if (d < 0)
+ d = nft_rbtree_cmp(set, rbe, new);
+
+ if (d < 0) {
p = &parent->rb_left;
- else if (d > 0)
+ } else if (d > 0) {
+ if (!first ||
+ nft_rbtree_update_first(set, rbe, first))
+ first = &rbe->node;
+
p = &parent->rb_right;
- else {
- if (nft_rbtree_interval_end(rbe) &&
- nft_rbtree_interval_start(new)) {
+ } else {
+ if (nft_rbtree_interval_end(rbe))
p = &parent->rb_left;
- } else if (nft_rbtree_interval_start(rbe) &&
- nft_rbtree_interval_end(new)) {
+ else
p = &parent->rb_right;
- } else if (nft_set_elem_active(&rbe->ext, genmask)) {
- *ext = &rbe->ext;
- return -EEXIST;
- } else {
- p = &parent->rb_left;
+ }
+ }
+
+ if (!first)
+ first = rb_first(&priv->root);
+
+ /* Detect overlap by going through the list of valid tree nodes.
+ * Values stored in the tree are in reversed order, starting from
+ * highest to lowest value.
+ */
+ for (node = first; node != NULL; node = next) {
+ next = rb_next(node);
+
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+
+ if (!nft_set_elem_active(&rbe->ext, genmask))
+ continue;
+
+ /* perform garbage collection to avoid bogus overlap reports
+ * but skip new elements in this transaction.
+ */
+ if (nft_set_elem_expired(&rbe->ext) &&
+ nft_set_elem_active(&rbe->ext, cur_genmask)) {
+ err = nft_rbtree_gc_elem(set, priv, rbe);
+ if (err < 0)
+ return err;
+
+ continue;
+ }
+
+ d = nft_rbtree_cmp(set, rbe, new);
+ if (d == 0) {
+ /* Matching end element: no need to look for an
+ * overlapping greater or equal element.
+ */
+ if (nft_rbtree_interval_end(rbe)) {
+ rbe_le = rbe;
+ break;
+ }
+
+ /* first element that is greater or equal to key value. */
+ if (!rbe_ge) {
+ rbe_ge = rbe;
+ continue;
+ }
+
+ /* this is a closer more or equal element, update it. */
+ if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
+ rbe_ge = rbe;
+ continue;
+ }
+
+ /* element is equal to key value, make sure flags are
+ * the same, an existing more or equal start element
+ * must not be replaced by more or equal end element.
+ */
+ if ((nft_rbtree_interval_start(new) &&
+ nft_rbtree_interval_start(rbe_ge)) ||
+ (nft_rbtree_interval_end(new) &&
+ nft_rbtree_interval_end(rbe_ge))) {
+ rbe_ge = rbe;
+ continue;
}
+ } else if (d > 0) {
+ /* annotate element greater than the new element. */
+ rbe_ge = rbe;
+ continue;
+ } else if (d < 0) {
+ /* annotate element less than the new element. */
+ rbe_le = rbe;
+ break;
}
}
+
+ /* - new start element matching existing start element: full overlap
+ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
+ */
+ if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
+ nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
+ *ext = &rbe_ge->ext;
+ return -EEXIST;
+ }
+
+ /* - new end element matching existing end element: full overlap
+ * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
+ */
+ if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
+ nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
+ *ext = &rbe_le->ext;
+ return -EEXIST;
+ }
+
+ /* - new start element with existing closest, less or equal key value
+ * being a start element: partial overlap, reported as -ENOTEMPTY.
+ * Anonymous sets allow for two consecutive start element since they
+ * are constant, skip them to avoid bogus overlap reports.
+ */
+ if (!nft_set_is_anonymous(set) && rbe_le &&
+ nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
+ return -ENOTEMPTY;
+
+ /* - new end element with existing closest, less or equal key value
+ * being a end element: partial overlap, reported as -ENOTEMPTY.
+ */
+ if (rbe_le &&
+ nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
+ return -ENOTEMPTY;
+
+ /* - new end element with existing closest, greater or equal key value
+ * being an end element: partial overlap, reported as -ENOTEMPTY
+ */
+ if (rbe_ge &&
+ nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
+ return -ENOTEMPTY;
+
+ /* Accepted element: pick insertion point depending on key value */
+ parent = NULL;
+ p = &priv->root.rb_node;
+ while (*p != NULL) {
+ parent = *p;
+ rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+ d = nft_rbtree_cmp(set, rbe, new);
+
+ if (d < 0)
+ p = &parent->rb_left;
+ else if (d > 0)
+ p = &parent->rb_right;
+ else if (nft_rbtree_interval_end(rbe))
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+
rb_link_node_rcu(&new->node, parent, p);
rb_insert_color(&new->node, &priv->root);
return 0;
@@ -294,7 +517,6 @@ static void nft_rbtree_activate(const struct net *net,
struct nft_rbtree_elem *rbe = elem->priv;
nft_set_elem_change_active(net, set, &rbe->ext);
- nft_set_elem_clear_busy(&rbe->ext);
}
static bool nft_rbtree_flush(const struct net *net,
@@ -302,12 +524,9 @@ static bool nft_rbtree_flush(const struct net *net,
{
struct nft_rbtree_elem *rbe = priv;
- if (!nft_set_elem_mark_busy(&rbe->ext) ||
- !nft_is_active(net, &rbe->ext)) {
- nft_set_elem_change_active(net, set, &rbe->ext);
- return true;
- }
- return false;
+ nft_set_elem_change_active(net, set, &rbe->ext);
+
+ return true;
}
static void *nft_rbtree_deactivate(const struct net *net,
@@ -338,6 +557,8 @@ static void *nft_rbtree_deactivate(const struct net *net,
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
+ } else if (nft_set_elem_expired(&rbe->ext)) {
+ break;
} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = parent->rb_left;
continue;
@@ -364,8 +585,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
if (iter->count < iter->skip)
goto cont;
- if (nft_set_elem_expired(&rbe->ext))
- goto cont;
if (!nft_set_elem_active(&rbe->ext, iter->genmask))
goto cont;
@@ -384,58 +603,82 @@ cont:
static void nft_rbtree_gc(struct work_struct *work)
{
- struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
- struct nft_set_gc_batch *gcb = NULL;
+ struct nft_rbtree_elem *rbe, *rbe_end = NULL;
+ struct nftables_pernet *nft_net;
struct nft_rbtree *priv;
+ struct nft_trans_gc *gc;
struct rb_node *node;
struct nft_set *set;
+ unsigned int gc_seq;
+ struct net *net;
priv = container_of(work, struct nft_rbtree, gc_work.work);
set = nft_set_container_of(priv);
+ net = read_pnet(&set->net);
+ nft_net = net_generic(net, nf_tables_net_id);
+ gc_seq = READ_ONCE(nft_net->gc_seq);
- write_lock_bh(&priv->lock);
- write_seqcount_begin(&priv->count);
+ if (nft_set_gc_is_pending(set))
+ goto done;
+
+ gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
+ if (!gc)
+ goto done;
+
+ read_lock_bh(&priv->lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+
+ /* Ruleset has been updated, try later. */
+ if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
+ nft_trans_gc_destroy(gc);
+ gc = NULL;
+ goto try_later;
+ }
+
rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ if (nft_set_elem_is_dead(&rbe->ext))
+ goto dead_elem;
+
+ /* elements are reversed in the rbtree for historical reasons,
+ * from highest to lowest value, that is why end element is
+ * always visited before the start element.
+ */
if (nft_rbtree_interval_end(rbe)) {
rbe_end = rbe;
continue;
}
+
if (!nft_set_elem_expired(&rbe->ext))
continue;
- if (nft_set_elem_mark_busy(&rbe->ext))
+
+ nft_set_elem_dead(&rbe->ext);
+
+ if (!rbe_end)
continue;
- if (rbe_prev) {
- rb_erase(&rbe_prev->node, &priv->root);
- rbe_prev = NULL;
- }
- gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
- if (!gcb)
- break;
+ nft_set_elem_dead(&rbe_end->ext);
- atomic_dec(&set->nelems);
- nft_set_gc_batch_add(gcb, rbe);
- rbe_prev = rbe;
+ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ if (!gc)
+ goto try_later;
- if (rbe_end) {
- atomic_dec(&set->nelems);
- nft_set_gc_batch_add(gcb, rbe_end);
- rb_erase(&rbe_end->node, &priv->root);
- rbe_end = NULL;
- }
- node = rb_next(node);
- if (!node)
- break;
+ nft_trans_gc_elem_add(gc, rbe_end);
+ rbe_end = NULL;
+dead_elem:
+ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ if (!gc)
+ goto try_later;
+
+ nft_trans_gc_elem_add(gc, rbe);
}
- if (rbe_prev)
- rb_erase(&rbe_prev->node, &priv->root);
- write_seqcount_end(&priv->count);
- write_unlock_bh(&priv->lock);
- nft_set_gc_batch_complete(gcb);
+try_later:
+ read_unlock_bh(&priv->lock);
+ if (gc)
+ nft_trans_gc_queue_async_done(gc);
+done:
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
nft_set_gc_interval(set));
}
@@ -464,7 +707,8 @@ static int nft_rbtree_init(const struct nft_set *set,
return 0;
}
-static void nft_rbtree_destroy(const struct nft_set *set)
+static void nft_rbtree_destroy(const struct nft_ctx *ctx,
+ const struct nft_set *set)
{
struct nft_rbtree *priv = nft_set_priv(set);
struct nft_rbtree_elem *rbe;
@@ -475,7 +719,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
while ((node = priv->root.rb_node) != NULL) {
rb_erase(node, &priv->root);
rbe = rb_entry(node, struct nft_rbtree_elem, node);
- nft_set_elem_destroy(set, rbe, true);
+ nf_tables_set_elem_destroy(ctx, set, rbe);
}
}
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index 4026ec38526f..c7b78e4ef459 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -10,7 +10,7 @@
struct nft_socket {
enum nft_socket_keys key:8;
union {
- enum nft_registers dreg:8;
+ u8 dreg;
};
};
@@ -119,9 +119,8 @@ static int nft_socket_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- priv->dreg = nft_parse_register(tb[NFTA_SOCKET_DREG]);
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, len);
+ return nft_parse_register_store(ctx, tb[NFTA_SOCKET_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, len);
}
static int nft_socket_dump(struct sk_buff *skb,
@@ -140,6 +139,11 @@ static int nft_socket_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index 15abb0e49603..2e026f903a4c 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -186,7 +186,6 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
break;
#endif
case NFPROTO_INET:
- case NFPROTO_BRIDGE:
err = nf_synproxy_ipv4_init(snet, ctx->net);
if (err)
goto nf_ct_failure;
@@ -219,7 +218,6 @@ static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
break;
#endif
case NFPROTO_INET:
- case NFPROTO_BRIDGE:
nf_synproxy_ipv4_fini(snet, ctx->net);
nf_synproxy_ipv6_fini(snet, ctx->net);
break;
@@ -253,6 +251,11 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD));
}
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index b97ab1198b03..d9604a316600 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -13,9 +13,9 @@
#endif
struct nft_tproxy {
- enum nft_registers sreg_addr:8;
- enum nft_registers sreg_port:8;
- u8 family;
+ u8 sreg_addr;
+ u8 sreg_port;
+ u8 family;
};
static void nft_tproxy_eval_v4(const struct nft_expr *expr,
@@ -254,15 +254,15 @@ static int nft_tproxy_init(const struct nft_ctx *ctx,
}
if (tb[NFTA_TPROXY_REG_ADDR]) {
- priv->sreg_addr = nft_parse_register(tb[NFTA_TPROXY_REG_ADDR]);
- err = nft_validate_register_load(priv->sreg_addr, alen);
+ err = nft_parse_register_load(tb[NFTA_TPROXY_REG_ADDR],
+ &priv->sreg_addr, alen);
if (err < 0)
return err;
}
if (tb[NFTA_TPROXY_REG_PORT]) {
- priv->sreg_port = nft_parse_register(tb[NFTA_TPROXY_REG_PORT]);
- err = nft_validate_register_load(priv->sreg_port, sizeof(u16));
+ err = nft_parse_register_load(tb[NFTA_TPROXY_REG_PORT],
+ &priv->sreg_port, sizeof(u16));
if (err < 0)
return err;
}
@@ -289,6 +289,18 @@ static int nft_tproxy_dump(struct sk_buff *skb,
return 0;
}
+static int nft_tproxy_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
+ return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
+}
+
static struct nft_expr_type nft_tproxy_type;
static const struct nft_expr_ops nft_tproxy_ops = {
.type = &nft_tproxy_type,
@@ -296,6 +308,7 @@ static const struct nft_expr_ops nft_tproxy_ops = {
.eval = nft_tproxy_eval,
.init = nft_tproxy_init,
.dump = nft_tproxy_dump,
+ .validate = nft_tproxy_validate,
};
static struct nft_expr_type nft_tproxy_type __read_mostly = {
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 1effd4878619..b2070f9f98ff 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -14,7 +14,7 @@
struct nft_tunnel {
enum nft_tunnel_keys key:8;
- enum nft_registers dreg:8;
+ u8 dreg;
enum nft_tunnel_mode mode:8;
};
@@ -92,8 +92,6 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
}
- priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
-
if (tb[NFTA_TUNNEL_MODE]) {
priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
if (priv->mode > NFT_TUNNEL_MODE_MAX)
@@ -102,8 +100,8 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
priv->mode = NFT_TUNNEL_MODE_NONE;
}
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, len);
+ return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, len);
}
static int nft_tunnel_get_dump(struct sk_buff *skb,
@@ -134,6 +132,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
static struct nft_expr_type nft_tunnel_type __read_mostly = {
.name = "tunnel",
+ .family = NFPROTO_NETDEV,
.ops = &nft_tunnel_get_ops,
.policy = nft_tunnel_policy,
.maxattr = NFTA_TUNNEL_MAX,
diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
index 06d5cabf1d7c..7f762fc42891 100644
--- a/net/netfilter/nft_xfrm.c
+++ b/net/netfilter/nft_xfrm.c
@@ -24,7 +24,7 @@ static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = {
struct nft_xfrm {
enum nft_xfrm_keys key:8;
- enum nft_registers dreg:8;
+ u8 dreg;
u8 dir;
u8 spnum;
};
@@ -86,9 +86,8 @@ static int nft_xfrm_get_init(const struct nft_ctx *ctx,
priv->spnum = spnum;
- priv->dreg = nft_parse_register(tb[NFTA_XFRM_DREG]);
- return nft_validate_register_store(ctx, priv->dreg, NULL,
- NFT_DATA_VALUE, len);
+ return nft_parse_register_store(ctx, tb[NFTA_XFRM_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, len);
}
/* Return true if key asks for daddr/saddr and current
@@ -234,6 +233,11 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
const struct nft_xfrm *priv = nft_expr_priv(expr);
unsigned int hooks;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
switch (priv->dir) {
case XFRM_POLICY_IN:
hooks = (1 << NF_INET_FORWARD) |
diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
index 353ca7801251..ff66b56a3f97 100644
--- a/net/netfilter/xt_REDIRECT.c
+++ b/net/netfilter/xt_REDIRECT.c
@@ -46,7 +46,6 @@ static void redirect_tg_destroy(const struct xt_tgdtor_param *par)
nf_ct_netns_put(par->net, par->family);
}
-/* FIXME: Take multiple ranges --RR */
static int redirect_tg4_check(const struct xt_tgchk_param *par)
{
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
@@ -65,7 +64,14 @@ static int redirect_tg4_check(const struct xt_tgchk_param *par)
static unsigned int
redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
- return nf_nat_redirect_ipv4(skb, par->targinfo, xt_hooknum(par));
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+ struct nf_nat_range2 range = {
+ .flags = mr->range[0].flags,
+ .min_proto = mr->range[0].min,
+ .max_proto = mr->range[0].max,
+ };
+
+ return nf_nat_redirect_ipv4(skb, &range, xt_hooknum(par));
}
static struct xt_target redirect_tg_reg[] __read_mostly = {
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index e1990baf3a3b..dc9485854002 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -71,4 +71,3 @@ MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Passive OS fingerprint matching.");
MODULE_ALIAS("ipt_osf");
MODULE_ALIAS("ip6t_osf");
-MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index e85ce69924ae..50332888c8d2 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -76,18 +76,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
*/
return false;
- filp = sk->sk_socket->file;
- if (filp == NULL)
+ read_lock_bh(&sk->sk_callback_lock);
+ filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+ if (filp == NULL) {
+ read_unlock_bh(&sk->sk_callback_lock);
return ((info->match ^ info->invert) &
(XT_OWNER_UID | XT_OWNER_GID)) == 0;
+ }
if (info->match & XT_OWNER_UID) {
kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
uid_lte(filp->f_cred->fsuid, uid_max)) ^
- !(info->invert & XT_OWNER_UID))
+ !(info->invert & XT_OWNER_UID)) {
+ read_unlock_bh(&sk->sk_callback_lock);
return false;
+ }
}
if (info->match & XT_OWNER_GID) {
@@ -112,10 +117,13 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
}
- if (match ^ !(info->invert & XT_OWNER_GID))
+ if (match ^ !(info->invert & XT_OWNER_GID)) {
+ read_unlock_bh(&sk->sk_callback_lock);
return false;
+ }
}
+ read_unlock_bh(&sk->sk_callback_lock);
return true;
}
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 3469b6073610..6fc0deb11aff 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
{
struct recent_table *t = PDE_DATA(file_inode(file));
struct recent_entry *e;
- char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
+ char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
const char *c = buf;
union nf_inet_addr addr = {};
u_int16_t family;
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index 680015ba7cb6..d4bf089c9e3f 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -150,6 +150,8 @@ static int sctp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_sctp_info *info = par->matchinfo;
+ if (info->flag_count > ARRAY_SIZE(info->flag_info))
+ return -EINVAL;
if (info->flags & ~XT_SCTP_VALID_FLAGS)
return -EINVAL;
if (info->invflags & ~XT_SCTP_VALID_FLAGS)
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index 177b40d08098..117d4615d668 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -96,11 +96,32 @@ static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par)
return ret ^ data->invert;
}
+static int u32_mt_checkentry(const struct xt_mtchk_param *par)
+{
+ const struct xt_u32 *data = par->matchinfo;
+ const struct xt_u32_test *ct;
+ unsigned int i;
+
+ if (data->ntests > ARRAY_SIZE(data->tests))
+ return -EINVAL;
+
+ for (i = 0; i < data->ntests; ++i) {
+ ct = &data->tests[i];
+
+ if (ct->nnums > ARRAY_SIZE(ct->location) ||
+ ct->nvalues > ARRAY_SIZE(ct->value))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct xt_match xt_u32_mt_reg __read_mostly = {
.name = "u32",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = u32_mt,
+ .checkentry = u32_mt_checkentry,
.matchsize = sizeof(struct xt_u32),
.me = THIS_MODULE,
};
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
index 249da67d50a2..1bb2a7404dc4 100644
--- a/net/netlabel/netlabel_calipso.c
+++ b/net/netlabel/netlabel_calipso.c
@@ -54,6 +54,28 @@ static const struct nla_policy calipso_genl_policy[NLBL_CALIPSO_A_MAX + 1] = {
[NLBL_CALIPSO_A_MTYPE] = { .type = NLA_U32 },
};
+static const struct netlbl_calipso_ops *calipso_ops;
+
+/**
+ * netlbl_calipso_ops_register - Register the CALIPSO operations
+ * @ops: ops to register
+ *
+ * Description:
+ * Register the CALIPSO packet engine operations.
+ *
+ */
+const struct netlbl_calipso_ops *
+netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
+{
+ return xchg(&calipso_ops, ops);
+}
+EXPORT_SYMBOL(netlbl_calipso_ops_register);
+
+static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
+{
+ return READ_ONCE(calipso_ops);
+}
+
/* NetLabel Command Handlers
*/
/**
@@ -96,16 +118,19 @@ static int netlbl_calipso_add_pass(struct genl_info *info,
*
*/
static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info)
-
{
int ret_val = -EINVAL;
struct netlbl_audit audit_info;
+ const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get();
if (!info->attrs[NLBL_CALIPSO_A_DOI] ||
!info->attrs[NLBL_CALIPSO_A_MTYPE])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ netlbl_netlink_auditinfo(&audit_info);
switch (nla_get_u32(info->attrs[NLBL_CALIPSO_A_MTYPE])) {
case CALIPSO_MAP_PASS:
ret_val = netlbl_calipso_add_pass(info, &audit_info);
@@ -287,7 +312,7 @@ static int netlbl_calipso_remove(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NLBL_CALIPSO_A_DOI])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
cb_arg.doi = nla_get_u32(info->attrs[NLBL_CALIPSO_A_DOI]);
cb_arg.audit_info = &audit_info;
ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain,
@@ -362,27 +387,6 @@ int __init netlbl_calipso_genl_init(void)
return genl_register_family(&netlbl_calipso_gnl_family);
}
-static const struct netlbl_calipso_ops *calipso_ops;
-
-/**
- * netlbl_calipso_ops_register - Register the CALIPSO operations
- *
- * Description:
- * Register the CALIPSO packet engine operations.
- *
- */
-const struct netlbl_calipso_ops *
-netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
-{
- return xchg(&calipso_ops, ops);
-}
-EXPORT_SYMBOL(netlbl_calipso_ops_register);
-
-static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
-{
- return READ_ONCE(calipso_ops);
-}
-
/**
* calipso_doi_add - Add a new DOI to the CALIPSO protocol engine
* @doi_def: the DOI structure
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 1778e4e8ce24..4197a9bcaa96 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -410,7 +410,7 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
!info->attrs[NLBL_CIPSOV4_A_MTYPE])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) {
case CIPSO_V4_MAP_TRANS:
ret_val = netlbl_cipsov4_add_std(info, &audit_info);
@@ -709,7 +709,7 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NLBL_CIPSOV4_A_DOI])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
cb_arg.doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
cb_arg.audit_info = &audit_info;
ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain,
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 91b35b7c80d8..96059c99b915 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -857,7 +857,8 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
- iter->bitmap[idx] |= bitmap << (offset % NETLBL_CATMAP_MAPSIZE);
+ iter->bitmap[idx] |= (NETLBL_CATMAP_MAPTYPE)bitmap
+ << (offset % NETLBL_CATMAP_MAPSIZE);
return 0;
}
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index a92ed37d0922..e2801210467f 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -435,7 +435,7 @@ static int netlbl_mgmt_add(struct sk_buff *skb, struct genl_info *info)
(info->attrs[NLBL_MGMT_A_IPV6MASK] != NULL)))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
return netlbl_mgmt_add_common(info, &audit_info);
}
@@ -458,7 +458,7 @@ static int netlbl_mgmt_remove(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NLBL_MGMT_A_DOMAIN])
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]);
return netlbl_domhsh_remove(domain, AF_UNSPEC, &audit_info);
@@ -558,7 +558,7 @@ static int netlbl_mgmt_adddef(struct sk_buff *skb, struct genl_info *info)
(info->attrs[NLBL_MGMT_A_IPV6MASK] != NULL)))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
return netlbl_mgmt_add_common(info, &audit_info);
}
@@ -577,7 +577,7 @@ static int netlbl_mgmt_removedef(struct sk_buff *skb, struct genl_info *info)
{
struct netlbl_audit audit_info;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
return netlbl_domhsh_remove_default(AF_UNSPEC, &audit_info);
}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 7b62cdea6163..f4d9a5c796f8 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -813,7 +813,7 @@ static int netlbl_unlabel_accept(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NLBL_UNLABEL_A_ACPTFLG]) {
value = nla_get_u8(info->attrs[NLBL_UNLABEL_A_ACPTFLG]);
if (value == 1 || value == 0) {
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
netlbl_unlabel_acceptflg_set(value, &audit_info);
return 0;
}
@@ -896,7 +896,7 @@ static int netlbl_unlabel_staticadd(struct sk_buff *skb,
!info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
if (ret_val != 0)
@@ -946,7 +946,7 @@ static int netlbl_unlabel_staticadddef(struct sk_buff *skb,
!info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
if (ret_val != 0)
@@ -993,7 +993,7 @@ static int netlbl_unlabel_staticremove(struct sk_buff *skb,
!info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
if (ret_val != 0)
@@ -1033,7 +1033,7 @@ static int netlbl_unlabel_staticremovedef(struct sk_buff *skb,
!info->attrs[NLBL_UNLABEL_A_IPV6MASK])))
return -EINVAL;
- netlbl_netlink_auditinfo(skb, &audit_info);
+ netlbl_netlink_auditinfo(&audit_info);
ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len);
if (ret_val != 0)
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index 3c67afce64f1..32d8f92c9a20 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -28,11 +28,9 @@
/**
* netlbl_netlink_auditinfo - Fetch the audit information from a NETLINK msg
- * @skb: the packet
* @audit_info: NetLabel audit information
*/
-static inline void netlbl_netlink_auditinfo(struct sk_buff *skb,
- struct netlbl_audit *audit_info)
+static inline void netlbl_netlink_auditinfo(struct netlbl_audit *audit_info)
{
security_task_getsecid(current, &audit_info->secid);
audit_info->loginuid = audit_get_loginuid(current);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 86b70385dce3..3808b12da7f6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -156,7 +156,7 @@ static inline u32 netlink_group_mask(u32 group)
static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
gfp_t gfp_mask)
{
- unsigned int len = skb_end_offset(skb);
+ unsigned int len = skb->len;
struct sk_buff *new;
new = alloc_skb(len, gfp_mask);
@@ -365,7 +365,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
if (is_vmalloc_addr(skb->head)) {
if (!skb->cloned ||
!atomic_dec_return(&(skb_shinfo(skb)->dataref)))
- vfree(skb->head);
+ vfree_atomic(skb->head);
skb->head = NULL;
}
@@ -569,7 +569,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
if (nlk_sk(sk)->bound)
goto err;
- nlk_sk(sk)->portid = portid;
+ /* portid can be read locklessly from netlink_getname(). */
+ WRITE_ONCE(nlk_sk(sk)->portid, portid);
+
sock_hold(sk);
err = __netlink_insert(table, sk);
@@ -1018,7 +1020,6 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
return -EINVAL;
}
- netlink_lock_table();
if (nlk->netlink_bind && groups) {
int group;
@@ -1030,13 +1031,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (!err)
continue;
netlink_undo_bind(group, groups, sk);
- goto unlock;
+ return err;
}
}
/* No need for barriers here as we return to user-space without
* using any of the bound attributes.
*/
+ netlink_lock_table();
if (!bound) {
err = nladdr->nl_pid ?
netlink_insert(sk, nladdr->nl_pid) :
@@ -1078,9 +1080,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
return -EINVAL;
if (addr->sa_family == AF_UNSPEC) {
- sk->sk_state = NETLINK_UNCONNECTED;
- nlk->dst_portid = 0;
- nlk->dst_group = 0;
+ /* paired with READ_ONCE() in netlink_getsockbyportid() */
+ WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
+ /* dst_portid and dst_group can be read locklessly */
+ WRITE_ONCE(nlk->dst_portid, 0);
+ WRITE_ONCE(nlk->dst_group, 0);
return 0;
}
if (addr->sa_family != AF_NETLINK)
@@ -1101,9 +1105,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
err = netlink_autobind(sock);
if (err == 0) {
- sk->sk_state = NETLINK_CONNECTED;
- nlk->dst_portid = nladdr->nl_pid;
- nlk->dst_group = ffs(nladdr->nl_groups);
+ /* paired with READ_ONCE() in netlink_getsockbyportid() */
+ WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
+ /* dst_portid and dst_group can be read locklessly */
+ WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
+ WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
}
return err;
@@ -1120,10 +1126,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
nladdr->nl_pad = 0;
if (peer) {
- nladdr->nl_pid = nlk->dst_portid;
- nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
+ /* Paired with WRITE_ONCE() in netlink_connect() */
+ nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
+ nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
} else {
- nladdr->nl_pid = nlk->portid;
+ /* Paired with WRITE_ONCE() in netlink_insert() */
+ nladdr->nl_pid = READ_ONCE(nlk->portid);
netlink_lock_table();
nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
netlink_unlock_table();
@@ -1150,8 +1158,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
/* Don't bother queuing skb if kernel socket has no input function */
nlk = nlk_sk(sock);
- if (sock->sk_state == NETLINK_CONNECTED &&
- nlk->dst_portid != nlk_sk(ssk)->portid) {
+ /* dst_portid and sk_state can be changed in netlink_connect() */
+ if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
+ READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
sock_put(sock);
return ERR_PTR(-ECONNREFUSED);
}
@@ -1592,6 +1601,7 @@ out:
int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
{
struct netlink_set_err_data info;
+ unsigned long flags;
struct sock *sk;
int ret = 0;
@@ -1601,12 +1611,12 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
/* sk->sk_err wants a positive error value */
info.code = -code;
- read_lock(&nl_table_lock);
+ read_lock_irqsave(&nl_table_lock, flags);
sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
ret += do_one_set_err(sk, &info);
- read_unlock(&nl_table_lock);
+ read_unlock_irqrestore(&nl_table_lock, flags);
return ret;
}
EXPORT_SYMBOL(netlink_set_err);
@@ -1734,7 +1744,8 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- int len, val, err;
+ unsigned int flag;
+ int len, val;
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
@@ -1746,39 +1757,17 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case NETLINK_PKTINFO:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_RECV_PKTINFO;
break;
case NETLINK_BROADCAST_ERROR:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_BROADCAST_SEND_ERROR;
break;
case NETLINK_NO_ENOBUFS:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_RECV_NO_ENOBUFS;
break;
case NETLINK_LIST_MEMBERSHIPS: {
- int pos, idx, shift;
+ int pos, idx, shift, err = 0;
- err = 0;
netlink_lock_table();
for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
if (len - pos < sizeof(u32))
@@ -1792,43 +1781,35 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
break;
}
}
- if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
+ if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen))
err = -EFAULT;
netlink_unlock_table();
- break;
+ return err;
}
case NETLINK_CAP_ACK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
- if (put_user(len, optlen) ||
- put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_CAP_ACK;
break;
case NETLINK_EXT_ACK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0;
- if (put_user(len, optlen) || put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_EXT_ACK;
break;
case NETLINK_GET_STRICT_CHK:
- if (len < sizeof(int))
- return -EINVAL;
- len = sizeof(int);
- val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
- if (put_user(len, optlen) || put_user(val, optval))
- return -EFAULT;
- err = 0;
+ flag = NETLINK_F_STRICT_CHK;
break;
default:
- err = -ENOPROTOOPT;
+ return -ENOPROTOOPT;
}
- return err;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ val = nlk->flags & flag ? 1 : 0;
+
+ if (put_user(len, optlen) ||
+ copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
}
static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
@@ -1887,8 +1868,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
goto out;
netlink_skb_flags |= NETLINK_SKB_DST;
} else {
- dst_portid = nlk->dst_portid;
- dst_group = nlk->dst_group;
+ /* Paired with WRITE_ONCE() in netlink_connect() */
+ dst_portid = READ_ONCE(nlk->dst_portid);
+ dst_group = READ_ONCE(nlk->dst_group);
}
/* Paired with WRITE_ONCE() in netlink_insert() */
@@ -2010,7 +1992,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
skb_free_datagram(sk, skb);
- if (nlk->cb_running &&
+ if (READ_ONCE(nlk->cb_running) &&
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
@@ -2303,7 +2285,7 @@ static int netlink_dump(struct sock *sk)
if (cb->done)
cb->done(cb);
- nlk->cb_running = false;
+ WRITE_ONCE(nlk->cb_running, false);
module = cb->module;
skb = cb->skb;
mutex_unlock(nlk->cb_mutex);
@@ -2366,7 +2348,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
goto error_put;
}
- nlk->cb_running = true;
+ WRITE_ONCE(nlk->cb_running, true);
nlk->dump_done_errno = INT_MAX;
mutex_unlock(nlk->cb_mutex);
@@ -2655,7 +2637,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
nlk->groups ? (u32)nlk->groups[0] : 0,
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
- nlk->cb_running,
+ READ_ONCE(nlk->cb_running),
refcount_read(&s->sk_refcnt),
atomic_read(&s->sk_drops),
sock_i_ino(s)
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index c6255eac305c..e4f21b1067bc 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -94,6 +94,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net *net = sock_net(skb->sk);
struct netlink_diag_req *req;
struct netlink_sock *nlsk;
+ unsigned long flags;
struct sock *sk;
int num = 2;
int ret = 0;
@@ -152,7 +153,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
num++;
mc_list:
- read_lock(&nl_table_lock);
+ read_lock_irqsave(&nl_table_lock, flags);
sk_for_each_bound(sk, &tbl->mc_list) {
if (sk_hashed(sk))
continue;
@@ -167,13 +168,13 @@ mc_list:
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
- sock_i_ino(sk)) < 0) {
+ __sock_i_ino(sk)) < 0) {
ret = 1;
break;
}
num++;
}
- read_unlock(&nl_table_lock);
+ read_unlock_irqrestore(&nl_table_lock, flags);
done:
cb->args[0] = num;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 102b8d6b5612..a03e16e06e29 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -989,11 +989,46 @@ static struct genl_family genl_ctrl __ro_after_init = {
.netnsok = true,
};
+static int genl_bind(struct net *net, int group)
+{
+ const struct genl_family *family;
+ unsigned int id;
+ int ret = 0;
+
+ genl_lock_all();
+
+ idr_for_each_entry(&genl_fam_idr, family, id) {
+ const struct genl_multicast_group *grp;
+ int i;
+
+ if (family->n_mcgrps == 0)
+ continue;
+
+ i = group - family->mcgrp_offset;
+ if (i < 0 || i >= family->n_mcgrps)
+ continue;
+
+ grp = &family->mcgrps[i];
+ if ((grp->flags & GENL_UNS_ADMIN_PERM) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN))
+ ret = -EPERM;
+ if (grp->cap_sys_admin &&
+ !ns_capable(net->user_ns, CAP_SYS_ADMIN))
+ ret = -EPERM;
+
+ break;
+ }
+
+ genl_unlock_all();
+ return ret;
+}
+
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
+ .bind = genl_bind,
};
/* we'll bump the group number right afterwards */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 58d5373c513c..abb69c149644 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -378,6 +378,11 @@ static int nr_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
@@ -426,16 +431,16 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
nr_init_timers(sk);
nr->t1 =
- msecs_to_jiffies(sysctl_netrom_transport_timeout);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout));
nr->t2 =
- msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay));
nr->n2 =
- msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries));
nr->t4 =
- msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay));
nr->idle =
- msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
- nr->window = sysctl_netrom_transport_requested_window_size;
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout));
+ nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size);
nr->bpqext = 1;
nr->state = NR_STATE_0;
@@ -633,6 +638,11 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
goto out_release;
}
+ if (sock->state == SS_CONNECTING) {
+ err = -EALREADY;
+ goto out_release;
+ }
+
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
@@ -922,7 +932,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
* G8PZT's Xrouter which is sending packets with command type 7
* as an extension of the protocol.
*/
- if (sysctl_netrom_reset_circuit &&
+ if (READ_ONCE(sysctl_netrom_reset_circuit) &&
(frametype != NR_RESET || flags != 0))
nr_transmit_reset(skb, 1);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 29e418c8c6c3..4caee8754b79 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -81,7 +81,7 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev,
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
- *buff++ = sysctl_netrom_network_ttl_initialiser;
+ *buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
*buff++ = NR_PROTO_IP;
*buff++ = NR_PROTO_IP;
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index 2bef3779f893..8cbb57678a9e 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -97,7 +97,7 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
@@ -128,7 +128,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
@@ -263,7 +263,7 @@ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index 44929657f5b7..5e531394a724 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -204,7 +204,7 @@ void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
dptr[6] |= AX25_SSSID_SPARE;
dptr += AX25_ADDR_LEN;
- *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
if (!nr_route_frame(skb, NULL)) {
kfree_skb(skb);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 89cd9de21594..37cfa880c2d0 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -153,7 +153,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
nr_neigh->digipeat = NULL;
nr_neigh->ax25 = NULL;
nr_neigh->dev = dev;
- nr_neigh->quality = sysctl_netrom_default_path_quality;
+ nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality);
nr_neigh->locked = 0;
nr_neigh->count = 0;
nr_neigh->number = nr_neigh_no++;
@@ -725,7 +725,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
nr_neigh->ax25 = NULL;
ax25_cb_put(ax25);
- if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
+ if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
nr_neigh_put(nr_neigh);
return;
}
@@ -763,7 +763,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if (ax25 != NULL) {
ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
ax25->ax25_dev->dev, 0,
- sysctl_netrom_obsolescence_count_initialiser);
+ READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
if (ret)
return ret;
}
@@ -777,7 +777,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
return ret;
}
- if (!sysctl_netrom_routing_control && ax25 != NULL)
+ if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
return 0;
/* Its Time-To-Live has expired */
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 3f99b432ea70..c3bbd5880850 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -123,7 +123,7 @@ void nr_write_internal(struct sock *sk, int frametype)
unsigned char *dptr;
int len, timeout;
- len = NR_NETWORK_LEN + NR_TRANSPORT_LEN;
+ len = NR_TRANSPORT_LEN;
switch (frametype & 0x0F) {
case NR_CONNREQ:
@@ -141,7 +141,8 @@ void nr_write_internal(struct sock *sk, int frametype)
return;
}
- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+ skb = alloc_skb(NR_NETWORK_LEN + len, GFP_ATOMIC);
+ if (!skb)
return;
/*
@@ -149,7 +150,7 @@ void nr_write_internal(struct sock *sk, int frametype)
*/
skb_reserve(skb, NR_NETWORK_LEN);
- dptr = skb_put(skb, skb_tailroom(skb));
+ dptr = skb_put(skb, len);
switch (frametype & 0x0F) {
case NR_CONNREQ:
@@ -181,7 +182,8 @@ void nr_write_internal(struct sock *sk, int frametype)
*dptr++ = nr->my_id;
*dptr++ = frametype;
*dptr++ = nr->window;
- if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ if (nr->bpqext)
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
break;
case NR_DISCREQ:
@@ -235,7 +237,7 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
dptr[6] |= AX25_SSSID_SPARE;
dptr += AX25_ADDR_LEN;
- *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
if (mine) {
*dptr++ = 0;
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index a8da88db7893..4e7c968cde2d 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+ sock_hold(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
goto out;
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 2d4729d1f0eb..fef112fb4993 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -634,7 +634,7 @@ error:
return rc;
}
-int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+int nfc_set_remote_general_bytes(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
{
pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len);
@@ -663,7 +663,7 @@ int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb)
EXPORT_SYMBOL(nfc_tm_data_received);
int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
- u8 *gb, size_t gb_len)
+ const u8 *gb, size_t gb_len)
{
int rc;
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
index 0eb4ddc056e7..02909e3e91ef 100644
--- a/net/nfc/hci/llc_shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -123,7 +123,7 @@ static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
return ((y >= x) || (y < z)) ? true : false;
}
-static struct sk_buff *llc_shdlc_alloc_skb(struct llc_shdlc *shdlc,
+static struct sk_buff *llc_shdlc_alloc_skb(const struct llc_shdlc *shdlc,
int payload_len)
{
struct sk_buff *skb;
@@ -137,7 +137,7 @@ static struct sk_buff *llc_shdlc_alloc_skb(struct llc_shdlc *shdlc,
}
/* immediately sends an S frame. */
-static int llc_shdlc_send_s_frame(struct llc_shdlc *shdlc,
+static int llc_shdlc_send_s_frame(const struct llc_shdlc *shdlc,
enum sframe_type sframe_type, int nr)
{
int r;
@@ -159,7 +159,7 @@ static int llc_shdlc_send_s_frame(struct llc_shdlc *shdlc,
}
/* immediately sends an U frame. skb may contain optional payload */
-static int llc_shdlc_send_u_frame(struct llc_shdlc *shdlc,
+static int llc_shdlc_send_u_frame(const struct llc_shdlc *shdlc,
struct sk_buff *skb,
enum uframe_modifier uframe_modifier)
{
@@ -361,7 +361,7 @@ static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
wake_up(shdlc->connect_wq);
}
-static int llc_shdlc_connect_initiate(struct llc_shdlc *shdlc)
+static int llc_shdlc_connect_initiate(const struct llc_shdlc *shdlc)
{
struct sk_buff *skb;
@@ -377,7 +377,7 @@ static int llc_shdlc_connect_initiate(struct llc_shdlc *shdlc)
return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
}
-static int llc_shdlc_connect_send_ua(struct llc_shdlc *shdlc)
+static int llc_shdlc_connect_send_ua(const struct llc_shdlc *shdlc)
{
struct sk_buff *skb;
diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h
index 97853c9cefc7..a81893bc06ce 100644
--- a/net/nfc/llcp.h
+++ b/net/nfc/llcp.h
@@ -202,7 +202,6 @@ void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
void nfc_llcp_socket_remote_param_init(struct nfc_llcp_sock *sock);
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
-struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
int nfc_llcp_local_put(struct nfc_llcp_local *local);
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
struct nfc_llcp_sock *sock);
@@ -221,15 +220,15 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
/* TLV API */
int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
- u8 *tlv_array, u16 tlv_array_len);
+ const u8 *tlv_array, u16 tlv_array_len);
int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
- u8 *tlv_array, u16 tlv_array_len);
+ const u8 *tlv_array, u16 tlv_array_len);
/* Commands API */
void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
-u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length);
+u8 *nfc_llcp_build_tlv(u8 type, const u8 *value, u8 value_length, u8 *tlv_length);
struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap);
-struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
+struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, const char *uri,
size_t uri_len);
void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
void nfc_llcp_free_sdp_tlv_list(struct hlist_head *sdp_head);
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 475061c79c44..5b8754ae7d3a 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -15,7 +15,7 @@
#include "nfc.h"
#include "llcp.h"
-static u8 llcp_tlv_length[LLCP_TLV_MAX] = {
+static const u8 llcp_tlv_length[LLCP_TLV_MAX] = {
0,
1, /* VERSION */
2, /* MIUX */
@@ -29,7 +29,7 @@ static u8 llcp_tlv_length[LLCP_TLV_MAX] = {
};
-static u8 llcp_tlv8(u8 *tlv, u8 type)
+static u8 llcp_tlv8(const u8 *tlv, u8 type)
{
if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
return 0;
@@ -37,7 +37,7 @@ static u8 llcp_tlv8(u8 *tlv, u8 type)
return tlv[2];
}
-static u16 llcp_tlv16(u8 *tlv, u8 type)
+static u16 llcp_tlv16(const u8 *tlv, u8 type)
{
if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
return 0;
@@ -46,37 +46,37 @@ static u16 llcp_tlv16(u8 *tlv, u8 type)
}
-static u8 llcp_tlv_version(u8 *tlv)
+static u8 llcp_tlv_version(const u8 *tlv)
{
return llcp_tlv8(tlv, LLCP_TLV_VERSION);
}
-static u16 llcp_tlv_miux(u8 *tlv)
+static u16 llcp_tlv_miux(const u8 *tlv)
{
return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff;
}
-static u16 llcp_tlv_wks(u8 *tlv)
+static u16 llcp_tlv_wks(const u8 *tlv)
{
return llcp_tlv16(tlv, LLCP_TLV_WKS);
}
-static u16 llcp_tlv_lto(u8 *tlv)
+static u16 llcp_tlv_lto(const u8 *tlv)
{
return llcp_tlv8(tlv, LLCP_TLV_LTO);
}
-static u8 llcp_tlv_opt(u8 *tlv)
+static u8 llcp_tlv_opt(const u8 *tlv)
{
return llcp_tlv8(tlv, LLCP_TLV_OPT);
}
-static u8 llcp_tlv_rw(u8 *tlv)
+static u8 llcp_tlv_rw(const u8 *tlv)
{
return llcp_tlv8(tlv, LLCP_TLV_RW) & 0xf;
}
-u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
+u8 *nfc_llcp_build_tlv(u8 type, const u8 *value, u8 value_length, u8 *tlv_length)
{
u8 *tlv, length;
@@ -130,7 +130,7 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap)
return sdres;
}
-struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
+struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, const char *uri,
size_t uri_len)
{
struct nfc_llcp_sdp_tlv *sdreq;
@@ -190,9 +190,10 @@ void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head)
}
int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
- u8 *tlv_array, u16 tlv_array_len)
+ const u8 *tlv_array, u16 tlv_array_len)
{
- u8 *tlv = tlv_array, type, length, offset = 0;
+ const u8 *tlv = tlv_array;
+ u8 type, length, offset = 0;
pr_debug("TLV array length %d\n", tlv_array_len);
@@ -239,9 +240,10 @@ int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
}
int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
- u8 *tlv_array, u16 tlv_array_len)
+ const u8 *tlv_array, u16 tlv_array_len)
{
- u8 *tlv = tlv_array, type, length, offset = 0;
+ const u8 *tlv = tlv_array;
+ u8 type, length, offset = 0;
pr_debug("TLV array length %d\n", tlv_array_len);
@@ -295,7 +297,7 @@ static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
return pdu;
}
-static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv,
+static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, const u8 *tlv,
u8 tlv_length)
{
/* XXX Add an skb length check */
@@ -359,6 +361,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
struct sk_buff *skb;
struct nfc_llcp_local *local;
u16 size = 0;
+ int err;
pr_debug("Sending SYMM\n");
@@ -370,8 +373,10 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
skb = alloc_skb(size, GFP_KERNEL);
- if (skb == NULL)
- return -ENOMEM;
+ if (skb == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
@@ -381,17 +386,22 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
- return nfc_data_exchange(dev, local->target_idx, skb,
+ err = nfc_data_exchange(dev, local->target_idx, skb,
nfc_llcp_recv, local);
+out:
+ nfc_llcp_local_put(local);
+ return err;
}
int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
{
struct nfc_llcp_local *local;
struct sk_buff *skb;
- u8 *service_name_tlv = NULL, service_name_tlv_length;
- u8 *miux_tlv = NULL, miux_tlv_length;
- u8 *rw_tlv = NULL, rw_tlv_length, rw;
+ const u8 *service_name_tlv = NULL;
+ const u8 *miux_tlv = NULL;
+ const u8 *rw_tlv = NULL;
+ u8 service_name_tlv_length = 0;
+ u8 miux_tlv_length, rw_tlv_length, rw;
int err;
u16 size = 0;
__be16 miux;
@@ -465,8 +475,9 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
{
struct nfc_llcp_local *local;
struct sk_buff *skb;
- u8 *miux_tlv = NULL, miux_tlv_length;
- u8 *rw_tlv = NULL, rw_tlv_length, rw;
+ const u8 *miux_tlv = NULL;
+ const u8 *rw_tlv = NULL;
+ u8 miux_tlv_length, rw_tlv_length, rw;
int err;
u16 size = 0;
__be16 miux;
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index cc997518f79d..da3cb0d29b97 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -17,6 +17,8 @@
static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
static LIST_HEAD(llcp_devices);
+/* Protects llcp_devices list */
+static DEFINE_SPINLOCK(llcp_devices_lock);
static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb);
@@ -143,8 +145,15 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
write_unlock(&local->raw_sockets.lock);
}
-struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
+static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
{
+ /* Since using nfc_llcp_local may result in usage of nfc_dev, whenever
+ * we hold a reference to local, we also need to hold a reference to
+ * the device to avoid UAF.
+ */
+ if (!nfc_get_device(local->dev->idx))
+ return NULL;
+
kref_get(&local->ref);
return local;
@@ -159,6 +168,7 @@ static void local_cleanup(struct nfc_llcp_local *local)
cancel_work_sync(&local->rx_work);
cancel_work_sync(&local->timeout_work);
kfree_skb(local->rx_pending);
+ local->rx_pending = NULL;
del_timer_sync(&local->sdreq_timer);
cancel_work_sync(&local->sdreq_timeout_work);
nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
@@ -170,17 +180,24 @@ static void local_release(struct kref *ref)
local = container_of(ref, struct nfc_llcp_local, ref);
- list_del(&local->list);
local_cleanup(local);
kfree(local);
}
int nfc_llcp_local_put(struct nfc_llcp_local *local)
{
+ struct nfc_dev *dev;
+ int ret;
+
if (local == NULL)
return 0;
- return kref_put(&local->ref, local_release);
+ dev = local->dev;
+
+ ret = kref_put(&local->ref, local_release);
+ nfc_put_device(dev);
+
+ return ret;
}
static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
@@ -203,17 +220,13 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
llcp_sock = tmp_sock;
+ sock_hold(&llcp_sock->sk);
break;
}
}
read_unlock(&local->sockets.lock);
- if (llcp_sock == NULL)
- return NULL;
-
- sock_hold(&llcp_sock->sk);
-
return llcp_sock;
}
@@ -283,12 +296,33 @@ static void nfc_llcp_sdreq_timer(struct timer_list *t)
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
{
struct nfc_llcp_local *local;
+ struct nfc_llcp_local *res = NULL;
+ spin_lock(&llcp_devices_lock);
list_for_each_entry(local, &llcp_devices, list)
- if (local->dev == dev)
+ if (local->dev == dev) {
+ res = nfc_llcp_local_get(local);
+ break;
+ }
+ spin_unlock(&llcp_devices_lock);
+
+ return res;
+}
+
+static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev)
+{
+ struct nfc_llcp_local *local, *tmp;
+
+ spin_lock(&llcp_devices_lock);
+ list_for_each_entry_safe(local, tmp, &llcp_devices, list)
+ if (local->dev == dev) {
+ list_del(&local->list);
+ spin_unlock(&llcp_devices_lock);
return local;
+ }
+ spin_unlock(&llcp_devices_lock);
- pr_debug("No device found\n");
+ pr_warn("Shutting down device not found\n");
return NULL;
}
@@ -301,7 +335,7 @@ static char *wks[] = {
"urn:nfc:sn:snep",
};
-static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
+static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len)
{
int sap, num_wks;
@@ -325,7 +359,8 @@ static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
static
struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
- u8 *sn, size_t sn_len)
+ const u8 *sn, size_t sn_len,
+ bool needref)
{
struct sock *sk;
struct nfc_llcp_sock *llcp_sock, *tmp_sock;
@@ -361,6 +396,8 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
llcp_sock = tmp_sock;
+ if (needref)
+ sock_hold(&llcp_sock->sk);
break;
}
}
@@ -402,7 +439,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
* to this service name.
*/
if (nfc_llcp_sock_from_sn(local, sock->service_name,
- sock->service_name_len) != NULL) {
+ sock->service_name_len,
+ false) != NULL) {
mutex_unlock(&local->sdp_lock);
return LLCP_SAP_MAX;
@@ -522,7 +560,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
{
u8 *gb_cur, version, version_length;
u8 lto_length, wks_length, miux_length;
- u8 *version_tlv = NULL, *lto_tlv = NULL,
+ const u8 *version_tlv = NULL, *lto_tlv = NULL,
*wks_tlv = NULL, *miux_tlv = NULL;
__be16 wks = cpu_to_be16(local->local_wks);
u8 gb_len = 0;
@@ -609,12 +647,15 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
*general_bytes_len = local->gb_len;
+ nfc_llcp_local_put(local);
+
return local->gb;
}
-int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
{
struct nfc_llcp_local *local;
+ int err;
if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN)
return -EINVAL;
@@ -631,35 +672,39 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
if (memcmp(local->remote_gb, llcp_magic, 3)) {
pr_err("MAC does not support LLCP\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- return nfc_llcp_parse_gb_tlv(local,
+ err = nfc_llcp_parse_gb_tlv(local,
&local->remote_gb[3],
local->remote_gb_len - 3);
+out:
+ nfc_llcp_local_put(local);
+ return err;
}
-static u8 nfc_llcp_dsap(struct sk_buff *pdu)
+static u8 nfc_llcp_dsap(const struct sk_buff *pdu)
{
return (pdu->data[0] & 0xfc) >> 2;
}
-static u8 nfc_llcp_ptype(struct sk_buff *pdu)
+static u8 nfc_llcp_ptype(const struct sk_buff *pdu)
{
return ((pdu->data[0] & 0x03) << 2) | ((pdu->data[1] & 0xc0) >> 6);
}
-static u8 nfc_llcp_ssap(struct sk_buff *pdu)
+static u8 nfc_llcp_ssap(const struct sk_buff *pdu)
{
return pdu->data[1] & 0x3f;
}
-static u8 nfc_llcp_ns(struct sk_buff *pdu)
+static u8 nfc_llcp_ns(const struct sk_buff *pdu)
{
return pdu->data[2] >> 4;
}
-static u8 nfc_llcp_nr(struct sk_buff *pdu)
+static u8 nfc_llcp_nr(const struct sk_buff *pdu)
{
return pdu->data[2] & 0xf;
}
@@ -801,23 +846,15 @@ out:
}
static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
- u8 *sn, size_t sn_len)
+ const u8 *sn, size_t sn_len)
{
- struct nfc_llcp_sock *llcp_sock;
-
- llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
-
- if (llcp_sock == NULL)
- return NULL;
-
- sock_hold(&llcp_sock->sk);
-
- return llcp_sock;
+ return nfc_llcp_sock_from_sn(local, sn, sn_len, true);
}
-static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
+static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len)
{
- u8 *tlv = &skb->data[2], type, length;
+ u8 type, length;
+ const u8 *tlv = &skb->data[2];
size_t tlv_array_len = skb->len - LLCP_HEADER_SIZE, offset = 0;
while (offset < tlv_array_len) {
@@ -875,7 +912,7 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,
}
static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
struct sock *new_sk, *parent;
struct nfc_llcp_sock *sock, *new_sock;
@@ -893,7 +930,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
goto fail;
}
} else {
- u8 *sn;
+ const u8 *sn;
size_t sn_len;
sn = nfc_llcp_connect_sn(skb, &sn_len);
@@ -946,8 +983,17 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
}
new_sock = nfc_llcp_sock(new_sk);
- new_sock->dev = local->dev;
+
new_sock->local = nfc_llcp_local_get(local);
+ if (!new_sock->local) {
+ reason = LLCP_DM_REJ;
+ sock_put(&new_sock->sk);
+ release_sock(&sock->sk);
+ sock_put(&sock->sk);
+ goto fail;
+ }
+
+ new_sock->dev = local->dev;
new_sock->rw = sock->rw;
new_sock->miux = sock->miux;
new_sock->nfc_protocol = sock->nfc_protocol;
@@ -1112,7 +1158,7 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
}
static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
struct sock *sk;
@@ -1155,7 +1201,8 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
nfc_llcp_sock_put(llcp_sock);
}
-static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
+static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
+ const struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
struct sock *sk;
@@ -1188,7 +1235,8 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
nfc_llcp_sock_put(llcp_sock);
}
-static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb)
+static void nfc_llcp_recv_dm(struct nfc_llcp_local *local,
+ const struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
struct sock *sk;
@@ -1226,12 +1274,13 @@ static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb)
}
static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
- u8 dsap, ssap, *tlv, type, length, tid, sap;
+ u8 dsap, ssap, type, length, tid, sap;
+ const u8 *tlv;
u16 tlv_len, offset;
- char *service_name;
+ const char *service_name;
size_t service_name_len;
struct nfc_llcp_sdp_tlv *sdp;
HLIST_HEAD(llc_sdres_list);
@@ -1273,7 +1322,8 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
}
llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
- service_name_len);
+ service_name_len,
+ true);
if (!llcp_sock) {
sap = 0;
goto add_snl;
@@ -1293,6 +1343,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
if (sap == LLCP_SAP_MAX) {
sap = 0;
+ nfc_llcp_sock_put(llcp_sock);
goto add_snl;
}
@@ -1310,6 +1361,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
pr_debug("%p %d\n", llcp_sock, sap);
+ nfc_llcp_sock_put(llcp_sock);
add_snl:
sdp = nfc_llcp_build_sdres_tlv(tid, sap);
if (sdp == NULL)
@@ -1522,6 +1574,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
__nfc_llcp_recv(local, skb);
+ nfc_llcp_local_put(local);
+
return 0;
}
@@ -1538,6 +1592,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
/* Close and purge all existing sockets */
nfc_llcp_socket_release(local, true, 0);
+
+ nfc_llcp_local_put(local);
}
void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -1563,6 +1619,8 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
mod_timer(&local->link_timer,
jiffies + msecs_to_jiffies(local->remote_lto));
}
+
+ nfc_llcp_local_put(local);
}
int nfc_llcp_register_device(struct nfc_dev *ndev)
@@ -1573,7 +1631,16 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
if (local == NULL)
return -ENOMEM;
- local->dev = ndev;
+ /* As we are going to initialize local's refcount, we need to get the
+ * nfc_dev to avoid UAF, otherwise there is no point in continuing.
+ * See nfc_llcp_local_get().
+ */
+ local->dev = nfc_get_device(ndev->idx);
+ if (!local->dev) {
+ kfree(local);
+ return -ENODEV;
+ }
+
INIT_LIST_HEAD(&local->list);
kref_init(&local->ref);
mutex_init(&local->sdp_lock);
@@ -1606,14 +1673,16 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0);
INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
+ spin_lock(&llcp_devices_lock);
list_add(&local->list, &llcp_devices);
+ spin_unlock(&llcp_devices_lock);
return 0;
}
void nfc_llcp_unregister_device(struct nfc_dev *dev)
{
- struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
+ struct nfc_llcp_local *local = nfc_llcp_remove_local(dev);
if (local == NULL) {
pr_debug("No such device\n");
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index bd2174699af9..aea337d81702 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -99,7 +99,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
}
llcp_sock->dev = dev;
- llcp_sock->local = nfc_llcp_local_get(local);
+ llcp_sock->local = local;
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
llcp_sock->service_name_len = min_t(unsigned int,
llcp_addr.service_name_len,
@@ -181,7 +181,7 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
}
llcp_sock->dev = dev;
- llcp_sock->local = nfc_llcp_local_get(local);
+ llcp_sock->local = local;
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
nfc_llcp_sock_link(&local->raw_sockets, sk);
@@ -698,24 +698,22 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
if (dev->dep_link_up == false) {
ret = -ENOLINK;
device_unlock(&dev->dev);
- goto put_dev;
+ goto sock_llcp_put_local;
}
device_unlock(&dev->dev);
if (local->rf_mode == NFC_RF_INITIATOR &&
addr->target_idx != local->target_idx) {
ret = -ENOLINK;
- goto put_dev;
+ goto sock_llcp_put_local;
}
llcp_sock->dev = dev;
- llcp_sock->local = nfc_llcp_local_get(local);
+ llcp_sock->local = local;
llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
- nfc_llcp_local_put(llcp_sock->local);
- llcp_sock->local = NULL;
ret = -ENOMEM;
- goto put_dev;
+ goto sock_llcp_nullify;
}
llcp_sock->reserved_ssap = llcp_sock->ssap;
@@ -760,8 +758,13 @@ sock_unlink:
sock_llcp_release:
nfc_llcp_put_ssap(local, llcp_sock->ssap);
- nfc_llcp_local_put(llcp_sock->local);
+
+sock_llcp_nullify:
llcp_sock->local = NULL;
+ llcp_sock->dev = NULL;
+
+sock_llcp_put_local:
+ nfc_llcp_local_put(local);
put_dev:
nfc_put_device(dev);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index b2e922fcc70d..cb2193dec712 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -530,7 +530,7 @@ static int nci_open_device(struct nci_dev *ndev)
skb_queue_purge(&ndev->tx_q);
ndev->ops->close(ndev);
- ndev->flags = 0;
+ ndev->flags &= BIT(NCI_UNREG);
}
done:
@@ -894,6 +894,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
return -EINVAL;
}
+ if (protocol >= NFC_PROTO_MAX) {
+ pr_err("the requested nfc protocol is invalid\n");
+ return -EINVAL;
+ }
+
if (!(nci_target->supported_protocols & (1 << protocol))) {
pr_err("target does not support the requested protocol 0x%x\n",
protocol);
@@ -1192,6 +1197,10 @@ void nci_free_device(struct nci_dev *ndev)
{
nfc_free_device(ndev->nfc_dev);
nci_hci_deallocate(ndev);
+
+ /* drop partial rx data packet if present */
+ if (ndev->rx_data_reassembly)
+ kfree_skb(ndev->rx_data_reassembly);
kfree(ndev);
}
EXPORT_SYMBOL(nci_free_device);
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index b002e18f38c8..b4548d887489 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -279,8 +279,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_plen(skb->data));
conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data));
- if (!conn_info)
+ if (!conn_info) {
+ kfree_skb(skb);
return;
+ }
/* strip the nci data header */
skb_pull(skb, NCI_DATA_HDR_SIZE);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 33e1170817f0..f8b20cddd5c9 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -218,6 +218,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
target->sens_res = nfca_poll->sens_res;
target->sel_res = nfca_poll->sel_res;
target->nfcid1_len = nfca_poll->nfcid1_len;
+ if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1))
+ return -EPROTO;
if (target->nfcid1_len > 0) {
memcpy(target->nfcid1, nfca_poll->nfcid1,
target->nfcid1_len);
@@ -226,6 +228,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
target->sensb_res_len = nfcb_poll->sensb_res_len;
+ if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res))
+ return -EPROTO;
if (target->sensb_res_len > 0) {
memcpy(target->sensb_res, nfcb_poll->sensb_res,
target->sensb_res_len);
@@ -234,6 +238,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
target->sensf_res_len = nfcf_poll->sensf_res_len;
+ if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res))
+ return -EPROTO;
if (target->sensf_res_len > 0) {
memcpy(target->sensf_res, nfcf_poll->sensf_res,
target->sensf_res_len);
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
index 9dd8a1096916..96f071792a04 100644
--- a/net/nfc/nci/spi.c
+++ b/net/nfc/nci/spi.c
@@ -150,6 +150,8 @@ static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
int ret;
skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
/* add the NCI SPI header to the start of the buffer */
hdr = skb_push(skb, NCI_SPI_HDR_LEN);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 9e94f732e717..5b55466fe315 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1047,11 +1047,14 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
rc = -ENOMEM;
- goto exit;
+ goto put_local;
}
rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq);
+put_local:
+ nfc_llcp_local_put(local);
+
exit:
device_unlock(&dev->dev);
@@ -1113,7 +1116,7 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) {
if (dev->dep_link_up) {
rc = -EINPROGRESS;
- goto exit;
+ goto put_local;
}
local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]);
@@ -1125,6 +1128,9 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX])
local->miux = cpu_to_be16(miux);
+put_local:
+ nfc_llcp_local_put(local);
+
exit:
device_unlock(&dev->dev);
@@ -1180,7 +1186,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
if (rc != 0) {
rc = -EINVAL;
- goto exit;
+ goto put_local;
}
if (!sdp_attrs[NFC_SDP_ATTR_URI])
@@ -1199,7 +1205,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len);
if (sdreq == NULL) {
rc = -ENOMEM;
- goto exit;
+ goto put_local;
}
tlvs_len += sdreq->tlv_len;
@@ -1209,10 +1215,14 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
if (hlist_empty(&sdreq_list)) {
rc = -EINVAL;
- goto exit;
+ goto put_local;
}
rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len);
+
+put_local:
+ nfc_llcp_local_put(local);
+
exit:
device_unlock(&dev->dev);
@@ -1450,8 +1460,12 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
rc = dev->ops->se_io(dev, se_idx, apdu,
apdu_length, cb, cb_context);
+ device_unlock(&dev->dev);
+ return rc;
+
error:
device_unlock(&dev->dev);
+ kfree(cb_context);
return rc;
}
@@ -1505,6 +1519,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
u32 dev_idx, se_idx;
u8 *apdu;
size_t apdu_len;
+ int rc;
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
!info->attrs[NFC_ATTR_SE_INDEX] ||
@@ -1518,25 +1533,37 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
if (!dev)
return -ENODEV;
- if (!dev->ops || !dev->ops->se_io)
- return -ENOTSUPP;
+ if (!dev->ops || !dev->ops->se_io) {
+ rc = -EOPNOTSUPP;
+ goto put_dev;
+ }
apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]);
- if (apdu_len == 0)
- return -EINVAL;
+ if (apdu_len == 0) {
+ rc = -EINVAL;
+ goto put_dev;
+ }
apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
- if (!apdu)
- return -EINVAL;
+ if (!apdu) {
+ rc = -EINVAL;
+ goto put_dev;
+ }
ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto put_dev;
+ }
ctx->dev_idx = dev_idx;
ctx->se_idx = se_idx;
- return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+ rc = nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+
+put_dev:
+ nfc_put_device(dev);
+ return rc;
}
static int nfc_genl_vendor_cmd(struct sk_buff *skb,
@@ -1559,14 +1586,21 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]);
dev = nfc_get_device(dev_idx);
- if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
+ if (!dev)
return -ENODEV;
+ if (!dev->vendor_cmds || !dev->n_vendor_cmds) {
+ err = -ENODEV;
+ goto put_dev;
+ }
+
if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
- if (data_len == 0)
- return -EINVAL;
+ if (data_len == 0) {
+ err = -EINVAL;
+ goto put_dev;
+ }
} else {
data = NULL;
data_len = 0;
@@ -1581,10 +1615,14 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
dev->cur_cmd_info = info;
err = cmd->doit(dev, data, data_len);
dev->cur_cmd_info = NULL;
- return err;
+ goto put_dev;
}
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+
+put_dev:
+ nfc_put_device(dev);
+ return err;
}
/* message building helper */
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 889fefd64e56..0b1e6466f4fb 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -48,10 +48,11 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
int nfc_llcp_register_device(struct nfc_dev *dev);
void nfc_llcp_unregister_device(struct nfc_dev *dev);
-int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
+int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len);
u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
+int nfc_llcp_local_put(struct nfc_llcp_local *local);
int __init nfc_llcp_init(void);
void nfc_llcp_exit(void);
void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
index e9ca007718b7..0f23e5e8e03e 100644
--- a/net/nsh/nsh.c
+++ b/net/nsh/nsh.c
@@ -77,13 +77,12 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ u16 mac_offset = skb->mac_header;
unsigned int nsh_len, mac_len;
__be16 proto;
- int nhoff;
skb_reset_network_header(skb);
- nhoff = skb->network_header - skb->mac_header;
mac_len = skb->mac_len;
if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
@@ -108,15 +107,14 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
segs = skb_mac_gso_segment(skb, features);
if (IS_ERR_OR_NULL(segs)) {
skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len,
- skb->network_header - nhoff,
- mac_len);
+ mac_offset, mac_len);
goto out;
}
for (skb = segs; skb; skb = skb->next) {
skb->protocol = htons(ETH_P_NSH);
__skb_push(skb, nsh_len);
- skb_set_mac_header(skb, -nhoff);
+ skb->mac_header = mac_offset;
skb->network_header = skb->mac_header + mac_len;
skb->mac_len = mac_len;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 4f097bd3339e..4c537e74b18c 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -236,10 +236,17 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
upcall.mru = OVS_CB(skb)->mru;
error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
- if (unlikely(error))
- kfree_skb(skb);
- else
+ switch (error) {
+ case 0:
+ case -EAGAIN:
+ case -ERESTARTSYS:
+ case -EINTR:
consume_skb(skb);
+ break;
+ default:
+ kfree_skb(skb);
+ break;
+ }
stats_counter = &stats->n_missed;
goto out;
}
@@ -525,8 +532,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
out:
if (err)
skb_tx_error(skb);
- kfree_skb(user_skb);
- kfree_skb(nskb);
+ consume_skb(user_skb);
+ consume_skb(nskb);
+
return err;
}
@@ -902,6 +910,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
+ struct sw_flow_key *key;
struct sw_flow_actions *acts;
struct sw_flow_match match;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
@@ -929,30 +938,32 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* Extract key. */
- ovs_match_init(&match, &new_flow->key, false, &mask);
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key) {
+ error = -ENOMEM;
+ goto err_kfree_flow;
+ }
+
+ ovs_match_init(&match, key, false, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
- goto err_kfree_flow;
+ goto err_kfree_key;
+
+ ovs_flow_mask_key(&new_flow->key, key, true, &mask);
/* Extract flow identifier. */
error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
- &new_flow->key, log);
+ key, log);
if (error)
- goto err_kfree_flow;
-
- /* unmasked key is needed to match when ufid is not used. */
- if (ovs_identifier_is_key(&new_flow->id))
- match.key = new_flow->id.unmasked_key;
-
- ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
+ goto err_kfree_key;
/* Validate actions. */
error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
&new_flow->key, &acts, log);
if (error) {
OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
- goto err_kfree_flow;
+ goto err_kfree_key;
}
reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
@@ -973,7 +984,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (ovs_identifier_is_ufid(&new_flow->id))
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
if (!flow)
- flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
+ flow = ovs_flow_tbl_lookup(&dp->table, key);
if (likely(!flow)) {
rcu_assign_pointer(new_flow->sf_acts, acts);
@@ -1043,6 +1054,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (reply)
ovs_notify(&dp_flow_genl_family, reply, info);
+
+ kfree(key);
return 0;
err_unlock_ovs:
@@ -1050,6 +1063,8 @@ err_unlock_ovs:
kfree_skb(reply);
err_kfree_acts:
ovs_nla_free_flow_actions(acts);
+err_kfree_key:
+ kfree(key);
err_kfree_flow:
ovs_flow_free(new_flow, false);
error:
@@ -1552,7 +1567,8 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in
if (IS_ERR(dp))
return;
- WARN(dp->user_features, "Dropping previously announced user features\n");
+ pr_warn("%s: Dropping previously announced user features\n",
+ ovs_dp_name(dp));
dp->user_features = 0;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a2696acbcd9d..9fc020fd7ecc 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -266,7 +266,8 @@ static void packet_cached_dev_reset(struct packet_sock *po)
static bool packet_use_direct_xmit(const struct packet_sock *po)
{
- return po->xmit == packet_direct_xmit;
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ return READ_ONCE(po->xmit) == packet_direct_xmit;
}
static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -362,18 +363,20 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
{
union tpacket_uhdr h;
+ /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
+
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
- h.h1->tp_status = status;
+ WRITE_ONCE(h.h1->tp_status, status);
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
break;
case TPACKET_V2:
- h.h2->tp_status = status;
+ WRITE_ONCE(h.h2->tp_status, status);
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
break;
case TPACKET_V3:
- h.h3->tp_status = status;
+ WRITE_ONCE(h.h3->tp_status, status);
flush_dcache_page(pgv_to_page(&h.h3->tp_status));
break;
default:
@@ -390,17 +393,19 @@ static int __packet_get_status(const struct packet_sock *po, void *frame)
smp_rmb();
+ /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
+
h.raw = frame;
switch (po->tp_version) {
case TPACKET_V1:
flush_dcache_page(pgv_to_page(&h.h1->tp_status));
- return h.h1->tp_status;
+ return READ_ONCE(h.h1->tp_status);
case TPACKET_V2:
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
- return h.h2->tp_status;
+ return READ_ONCE(h.h2->tp_status);
case TPACKET_V3:
flush_dcache_page(pgv_to_page(&h.h3->tp_status));
- return h.h3->tp_status;
+ return READ_ONCE(h.h3->tp_status);
default:
WARN(1, "TPACKET version not supported.\n");
BUG();
@@ -1845,7 +1850,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
*/
spkt->spkt_family = dev->type;
- strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
+ strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
spkt->spkt_protocol = skb->protocol;
/*
@@ -1864,12 +1869,20 @@ oom:
static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
{
+ int depth;
+
if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
sock->type == SOCK_RAW) {
skb_reset_mac_header(skb);
skb->protocol = dev_parse_header_protocol(skb);
}
+ /* Move network header to the right position for VLAN tagged packets */
+ if (likely(skb->dev->type == ARPHRD_ETHER) &&
+ eth_type_vlan(skb->protocol) &&
+ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
+ skb_set_network_header(skb, depth);
+
skb_probe_transport_header(skb);
}
@@ -1964,7 +1977,7 @@ retry:
goto retry;
}
- if (!dev_validate_header(dev, skb->data, len)) {
+ if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
err = -EINVAL;
goto out_unlock;
}
@@ -2114,7 +2127,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
sll = &PACKET_SKB_CB(skb)->sa.ll;
sll->sll_hatype = dev->type;
sll->sll_pkttype = skb->pkt_type;
- if (unlikely(po->origdev))
+ if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@@ -2222,8 +2235,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= TP_STATUS_CSUMNOTREADY;
else if (skb->pkt_type != PACKET_OUTGOING &&
- (skb->ip_summed == CHECKSUM_COMPLETE ||
- skb_csum_unnecessary(skb)))
+ skb_csum_unnecessary(skb))
status |= TP_STATUS_CSUM_VALID;
if (snaplen > res)
@@ -2383,7 +2395,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
- if (unlikely(po->origdev))
+ if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@@ -2790,7 +2802,8 @@ tpacket_error:
packet_inc_pending(&po->tx_ring);
status = TP_STATUS_SEND_REQUEST;
- err = po->xmit(skb);
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ err = READ_ONCE(po->xmit)(skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
@@ -2960,8 +2973,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (err)
goto out_free;
- if (sock->type == SOCK_RAW &&
- !dev_validate_header(dev, skb->data, len)) {
+ if ((sock->type == SOCK_RAW &&
+ !dev_validate_header(dev, skb->data, len)) || !skb->len) {
err = -EINVAL;
goto out_free;
}
@@ -2980,6 +2993,11 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->mark = sockc.mark;
skb->tstamp = sockc.transmit_time;
+ if (unlikely(extra_len == 4))
+ skb->no_fcs = 1;
+
+ packet_parse_headers(skb, sock);
+
if (has_vnet_hdr) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
if (err)
@@ -2988,12 +3006,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
virtio_net_hdr_set_proto(skb, &vnet_hdr);
}
- packet_parse_headers(skb, sock);
-
- if (unlikely(extra_len == 4))
- skb->no_fcs = 1;
-
- err = po->xmit(skb);
+ /* Paired with WRITE_ONCE() in packet_setsockopt() */
+ err = READ_ONCE(po->xmit)(skb);
if (unlikely(err != 0)) {
if (err > 0)
err = net_xmit_errno(err);
@@ -3118,6 +3132,9 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
lock_sock(sk);
spin_lock(&po->bind_lock);
+ if (!proto)
+ proto = po->num;
+
rcu_read_lock();
if (po->fanout) {
@@ -3220,7 +3237,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
name[sizeof(uaddr->sa_data)] = 0;
- return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
+ return packet_do_bind(sk, name, 0, 0);
}
static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
@@ -3237,8 +3254,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
if (sll->sll_family != AF_PACKET)
return -EINVAL;
- return packet_do_bind(sk, NULL, sll->sll_ifindex,
- sll->sll_protocol ? : pkt_sk(sk)->num);
+ return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
}
static struct proto packet_proto = {
@@ -3444,15 +3460,14 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
- if (pkt_sk(sk)->auxdata) {
+ if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
struct tpacket_auxdata aux;
aux.tp_status = TP_STATUS_USER;
if (skb->ip_summed == CHECKSUM_PARTIAL)
aux.tp_status |= TP_STATUS_CSUMNOTREADY;
else if (skb->pkt_type != PACKET_OUTGOING &&
- (skb->ip_summed == CHECKSUM_COMPLETE ||
- skb_csum_unnecessary(skb)))
+ skb_csum_unnecessary(skb))
aux.tp_status |= TP_STATUS_CSUM_VALID;
aux.tp_len = origlen;
@@ -3496,7 +3511,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
if (dev)
- strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
+ strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
rcu_read_unlock();
return sizeof(*uaddr);
@@ -3828,9 +3843,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- lock_sock(sk);
- po->auxdata = !!val;
- release_sock(sk);
+ packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
return 0;
}
case PACKET_ORIGDEV:
@@ -3842,9 +3855,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- lock_sock(sk);
- po->origdev = !!val;
- release_sock(sk);
+ packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
return 0;
}
case PACKET_VNET_HDR:
@@ -3910,7 +3921,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (val < 0 || val > 1)
return -EINVAL;
- po->prot_hook.ignore_outgoing = !!val;
+ WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
return 0;
}
case PACKET_TX_HAS_OFF:
@@ -3941,7 +3952,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
+ /* Paired with all lockless reads of po->xmit */
+ WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
return 0;
}
default:
@@ -3992,10 +4004,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
break;
case PACKET_AUXDATA:
- val = po->auxdata;
+ val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
break;
case PACKET_ORIGDEV:
- val = po->origdev;
+ val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
break;
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;
@@ -4041,7 +4053,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
0);
break;
case PACKET_IGNORE_OUTGOING:
- val = po->prot_hook.ignore_outgoing;
+ val = READ_ONCE(po->prot_hook.ignore_outgoing);
break;
case PACKET_ROLLOVER_STATS:
if (!po->rollover)
@@ -4228,7 +4240,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
struct sock *sk = sock->sk;
if (sk)
- atomic_inc(&pkt_sk(sk)->mapped);
+ atomic_long_inc(&pkt_sk(sk)->mapped);
}
static void packet_mm_close(struct vm_area_struct *vma)
@@ -4238,7 +4250,7 @@ static void packet_mm_close(struct vm_area_struct *vma)
struct sock *sk = sock->sk;
if (sk)
- atomic_dec(&pkt_sk(sk)->mapped);
+ atomic_long_dec(&pkt_sk(sk)->mapped);
}
static const struct vm_operations_struct packet_mmap_ops = {
@@ -4333,7 +4345,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
err = -EBUSY;
if (!closing) {
- if (atomic_read(&po->mapped))
+ if (atomic_long_read(&po->mapped))
goto out;
if (packet_read_pending(rb))
goto out;
@@ -4436,7 +4448,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
- if (closing || atomic_read(&po->mapped) == 0) {
+ if (closing || atomic_long_read(&po->mapped) == 0) {
err = 0;
spin_lock_bh(&rb_queue->lock);
swap(rb->pg_vec, pg_vec);
@@ -4454,9 +4466,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
po->prot_hook.func = (po->rx_ring.pg_vec) ?
tpacket_rcv : packet_rcv;
skb_queue_purge(rb_queue);
- if (atomic_read(&po->mapped))
- pr_err("packet_mmap: vma is busy: %d\n",
- atomic_read(&po->mapped));
+ if (atomic_long_read(&po->mapped))
+ pr_err("packet_mmap: vma is busy: %ld\n",
+ atomic_long_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
@@ -4534,7 +4546,7 @@ static int packet_mmap(struct file *file, struct socket *sock,
}
}
- atomic_inc(&po->mapped);
+ atomic_long_inc(&po->mapped);
vma->vm_ops = &packet_mmap_ops;
err = 0;
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 07812ae5ca07..a68a84574c73 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -23,9 +23,9 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
pinfo.pdi_flags = 0;
if (po->running)
pinfo.pdi_flags |= PDI_RUNNING;
- if (po->auxdata)
+ if (packet_sock_flag(po, PACKET_SOCK_AUXDATA))
pinfo.pdi_flags |= PDI_AUXDATA;
- if (po->origdev)
+ if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV))
pinfo.pdi_flags |= PDI_ORIGDEV;
if (po->has_vnet_hdr)
pinfo.pdi_flags |= PDI_VNETHDR;
@@ -143,7 +143,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
rp = nlmsg_data(nlh);
rp->pdiag_family = AF_PACKET;
rp->pdiag_type = sk->sk_type;
- rp->pdiag_num = ntohs(po->num);
+ rp->pdiag_num = ntohs(READ_ONCE(po->num));
rp->pdiag_ino = sk_ino;
sock_diag_save_cookie(sk, rp->pdiag_cookie);
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 907f4cd2a718..878fcdca8c25 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -115,10 +115,9 @@ struct packet_sock {
int copy_thresh;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
+ unsigned long flags;
unsigned int running; /* bind_lock must be held */
- unsigned int auxdata:1, /* writer must hold sock lock */
- origdev:1,
- has_vnet_hdr:1,
+ unsigned int has_vnet_hdr:1, /* writer must hold sock lock */
tp_loss:1,
tp_tx_has_off:1;
int pressure;
@@ -126,7 +125,7 @@ struct packet_sock {
__be16 num;
struct packet_rollover *rollover;
struct packet_mclist *mclist;
- atomic_t mapped;
+ atomic_long_t mapped;
enum tpacket_versions tp_version;
unsigned int tp_hdrlen;
unsigned int tp_reserve;
@@ -143,4 +142,25 @@ static struct packet_sock *pkt_sk(struct sock *sk)
return (struct packet_sock *)sk;
}
+enum packet_sock_flags {
+ PACKET_SOCK_ORIGDEV,
+ PACKET_SOCK_AUXDATA,
+};
+
+static inline void packet_sock_flag_set(struct packet_sock *po,
+ enum packet_sock_flags flag,
+ bool val)
+{
+ if (val)
+ set_bit(flag, &po->flags);
+ else
+ clear_bit(flag, &po->flags);
+}
+
+static inline bool packet_sock_flag(const struct packet_sock *po,
+ enum packet_sock_flags flag)
+{
+ return test_bit(flag, &po->flags);
+}
+
#endif
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 6f2fbc6b9eb2..6d29983cbd07 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -28,7 +28,8 @@ enum psample_nl_multicast_groups {
static const struct genl_multicast_group psample_nl_mcgrps[] = {
[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
- [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
+ [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME,
+ .flags = GENL_UNS_ADMIN_PERM },
};
static struct genl_family psample_nl_family __ro_after_init;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 1a5bf3fa4578..af22f47c8612 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -421,7 +421,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval,
rs->rs_rx_traces = trace.rx_traces;
for (i = 0; i < rs->rs_rx_traces; i++) {
- if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
+ if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) {
rs->rs_rx_traces = 0;
return -EFAULT;
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index a0f99bbf362c..6dd3f45df9ca 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -363,6 +363,7 @@ static int acquire_refill(struct rds_connection *conn)
static void release_refill(struct rds_connection *conn)
{
clear_bit(RDS_RECV_REFILL, &conn->c_flags);
+ smp_mb__after_atomic();
/* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
* hot path and finding waiters is very rare. We don't want to walk
diff --git a/net/rds/message.c b/net/rds/message.c
index 92b6b22884d4..75043742d243 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -104,9 +104,9 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
spin_lock_irqsave(&q->lock, flags);
head = &q->zcookie_head;
if (!list_empty(head)) {
- info = list_entry(head, struct rds_msg_zcopy_info,
- rs_zcookie_next);
- if (info && rds_zcookie_add(info, cookie)) {
+ info = list_first_entry(head, struct rds_msg_zcopy_info,
+ rs_zcookie_next);
+ if (rds_zcookie_add(info, cookie)) {
spin_unlock_irqrestore(&q->lock, flags);
kfree(rds_info_from_znotifier(znotif));
/* caller invokes rds_wake_sk_sleep() */
@@ -118,7 +118,7 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
ck = &info->zcookies;
memset(ck, 0, sizeof(*ck));
WARN_ON(!rds_zcookie_add(info, cookie));
- list_add_tail(&q->zcookie_head, &info->rs_zcookie_next);
+ list_add_tail(&info->rs_zcookie_next, &q->zcookie_head);
spin_unlock_irqrestore(&q->lock, flags);
/* caller invokes rds_wake_sk_sleep() */
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 1c42a600fe7f..dc0f7cf1917f 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -279,6 +279,9 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
put_page(sg_page(&sg[i]));
kfree(sg);
ret = PTR_ERR(trans_private);
+ /* Trigger connection so that its ready for the next retry */
+ if (ret == -ENODEV)
+ rds_conn_connect_if_down(cp->cp_conn);
goto out;
}
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 5f741e51b4ba..bb38124a5d3d 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -86,10 +86,12 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_ADDR_RESOLVED:
- rdma_set_service_type(cm_id, conn->c_tos);
- /* XXX do we need to clean up if this fails? */
- ret = rdma_resolve_route(cm_id,
+ if (conn) {
+ rdma_set_service_type(cm_id, conn->c_tos);
+ /* XXX do we need to clean up if this fails? */
+ ret = rdma_resolve_route(cm_id,
RDS_RDMA_RESOLVE_TIMEOUT_MS);
+ }
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
diff --git a/net/rds/send.c b/net/rds/send.c
index 68e2bdb08fd0..c0cebf4b4fe5 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -103,13 +103,12 @@ EXPORT_SYMBOL_GPL(rds_send_path_reset);
static int acquire_in_xmit(struct rds_conn_path *cp)
{
- return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
+ return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0;
}
static void release_in_xmit(struct rds_conn_path *cp)
{
- clear_bit(RDS_IN_XMIT, &cp->cp_flags);
- smp_mb__after_atomic();
+ clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags);
/*
* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
* hot path and finding waiters is very rare. We don't want to walk
@@ -1314,12 +1313,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
/* Parse any control messages the user may have included. */
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
- if (ret) {
- /* Trigger connection so that its ready for the next retry */
- if (ret == -EAGAIN)
- rds_conn_connect_if_down(conn);
+ if (ret)
goto out;
- }
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index d55d81b01d37..bfb8d4d6a994 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -176,10 +176,10 @@ void rds_tcp_reset_callbacks(struct socket *sock,
*/
atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
- lock_sock(osock->sk);
/* reset receive side state for rds_tcp_data_recv() for osock */
cancel_delayed_work_sync(&cp->cp_send_w);
cancel_delayed_work_sync(&cp->cp_recv_w);
+ lock_sock(osock->sk);
if (tc->t_tinc) {
rds_inc_put(&tc->t_tinc->ti_inc);
tc->t_tinc = NULL;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 008f50fb25dd..63efe60fda1f 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -141,7 +141,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
addrlen = sizeof(sin);
}
- ret = sock->ops->bind(sock, addr, addrlen);
+ ret = kernel_bind(sock, addr, addrlen);
if (ret) {
rdsdebug("bind failed with %d at address %pI6c\n",
ret, &conn->c_laddr);
@@ -169,7 +169,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
* own the socket
*/
rds_tcp_set_callbacks(sock, cp);
- ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
+ ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK);
rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 26a3e18e460d..dfeceed3b533 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -332,7 +332,7 @@ struct socket *rds_tcp_listen_init(struct net *net, bool isv6)
addr_len = sizeof(*sin);
}
- ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len);
+ ret = kernel_bind(sock, (struct sockaddr *)&ss, addr_len);
if (ret < 0) {
rdsdebug("could not bind %s listener socket: %d\n",
isv6 ? "IPv6" : "IPv4", ret);
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index f5afc9bcdee6..f74baefd855d 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -98,13 +98,13 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
rfkill->clk = devm_clk_get(&pdev->dev, NULL);
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
rfkill->reset_gpio = gpio;
- gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
@@ -116,6 +116,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ret = gpiod_direction_output(rfkill->reset_gpio, true);
+ if (ret)
+ return ret;
+
+ ret = gpiod_direction_output(rfkill->shutdown_gpio, true);
+ if (ret)
+ return ret;
+
rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
rfkill->type, &rfkill_gpio_ops,
rfkill);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 6a0df7c8a939..9b36fb6aa3e1 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -159,20 +159,47 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
*/
static void rose_kill_by_device(struct net_device *dev)
{
- struct sock *s;
+ struct sock *sk, *array[16];
+ struct rose_sock *rose;
+ bool rescan;
+ int i, cnt;
+start:
+ rescan = false;
+ cnt = 0;
spin_lock_bh(&rose_list_lock);
- sk_for_each(s, &rose_list) {
- struct rose_sock *rose = rose_sk(s);
+ sk_for_each(sk, &rose_list) {
+ rose = rose_sk(sk);
+ if (rose->device == dev) {
+ if (cnt == ARRAY_SIZE(array)) {
+ rescan = true;
+ break;
+ }
+ sock_hold(sk);
+ array[cnt++] = sk;
+ }
+ }
+ spin_unlock_bh(&rose_list_lock);
+ for (i = 0; i < cnt; i++) {
+ sk = array[cnt];
+ rose = rose_sk(sk);
+ lock_sock(sk);
+ spin_lock_bh(&rose_list_lock);
if (rose->device == dev) {
- rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
+ rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
if (rose->neighbour)
rose->neighbour->use--;
+ dev_put(rose->device);
rose->device = NULL;
}
+ spin_unlock_bh(&rose_list_lock);
+ release_sock(sk);
+ sock_put(sk);
+ cond_resched();
}
- spin_unlock_bh(&rose_list_lock);
+ if (rescan)
+ goto start;
}
/*
@@ -464,6 +491,12 @@ static int rose_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
if (sk->sk_state != TCP_LISTEN) {
struct rose_sock *rose = rose_sk(sk);
@@ -473,8 +506,10 @@ static int rose_listen(struct socket *sock, int backlog)
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
+ release_sock(sk);
return 0;
}
+ release_sock(sk);
return -EOPNOTSUPP;
}
@@ -569,6 +604,8 @@ static struct sock *rose_make_new(struct sock *osk)
rose->idle = orose->idle;
rose->defer = orose->defer;
rose->device = orose->device;
+ if (rose->device)
+ dev_hold(rose->device);
rose->qbitincl = orose->qbitincl;
return sk;
@@ -622,6 +659,10 @@ static int rose_release(struct socket *sock)
break;
}
+ spin_lock_bh(&rose_list_lock);
+ dev_put(rose->device);
+ rose->device = NULL;
+ spin_unlock_bh(&rose_list_lock);
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
@@ -698,7 +739,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
unsigned char cause, diagnostic;
- struct net_device *dev;
ax25_uid_assoc *user;
int n, err = 0;
@@ -755,9 +795,12 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
+ struct net_device *dev;
+
sock_reset_flag(sk, SOCK_ZAPPED);
- if ((dev = rose_dev_first()) == NULL) {
+ dev = rose_dev_first();
+ if (!dev) {
err = -ENETUNREACH;
goto out_release;
}
@@ -765,6 +808,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
user = ax25_findbyuid(current_euid());
if (!user) {
err = -EINVAL;
+ dev_put(dev);
goto out_release;
}
@@ -1270,9 +1314,11 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
- /* These two are safe on a single CPU system as only user tasks fiddle here */
+
+ spin_lock_irq(&sk->sk_receive_queue.lock);
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
+ spin_unlock_irq(&sk->sk_receive_queue.lock);
return put_user(amount, (unsigned int __user *) argp);
}
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index f6102e6f5161..730d2205f197 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -236,6 +236,9 @@ void rose_transmit_clear_request(struct rose_neigh *neigh, unsigned int lci, uns
unsigned char *dptr;
int len;
+ if (!neigh->dev)
+ return;
+
len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3;
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 11c45c8c6c16..036d92c0ad79 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused)
}
if (frametype == ROSE_CALL_REQUEST) {
- if (!rose_loopback_neigh->dev) {
+ if (!rose_loopback_neigh->dev &&
+ !rose_loopback_neigh->loopback) {
kfree_skb(skb);
continue;
}
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 5f32113c9bbd..64d441d3b653 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -613,6 +613,8 @@ struct net_device *rose_dev_first(void)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
+ if (first)
+ dev_hold(first);
rcu_read_unlock();
return first;
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 8574e7066d94..b5f173960725 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -166,7 +166,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
now = ktime_get_real();
- max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
+ max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
spin_lock_bh(&call->lock);
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index b5864683f200..49669055ee9d 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -41,6 +41,14 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
_enter("%d", conn->debug_id);
+ if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ &pkt.ack, sizeof(pkt.ack)) < 0)
+ return;
+ if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE)
+ return;
+ }
+
chan = &conn->channels[channel];
/* If the last call got moved on whilst we were waiting to run, just
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index 21da48e3d2e5..7ad4b4e9341e 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -25,7 +25,7 @@ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
struct rxrpc_conn_proto k;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rb_node *p;
- unsigned int seq = 0;
+ unsigned int seq = 1;
k.epoch = sp->hdr.epoch;
k.cid = sp->hdr.cid & RXRPC_CIDMASK;
@@ -35,6 +35,7 @@ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
* under just the RCU read lock, so we have to check for
* changes.
*/
+ seq++; /* 2 on the 1st/lockless path, otherwise odd */
read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
p = rcu_dereference_raw(peer->service_conns.rb_node);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 01135e54d95d..fc784fcc3a94 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -448,6 +448,9 @@ static void rxrpc_local_processor(struct work_struct *work)
container_of(work, struct rxrpc_local, processor);
bool again;
+ if (local->dead)
+ return;
+
trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
atomic_read(&local->usage), NULL);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 6202d2e32914..09fcc54245c7 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -93,7 +93,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
*_hard_ack = hard_ack;
*_top = top;
- pkt->ack.bufferSpace = htons(8);
+ pkt->ack.bufferSpace = htons(0);
pkt->ack.maxSkew = htons(0);
pkt->ack.firstPacket = htonl(hard_ack + 1);
pkt->ack.previousPacket = htonl(call->ackr_highest_seq);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 52a24d4ef5d8..2ba61971f623 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -451,7 +451,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
* directly into the target buffer.
*/
sg = _sg;
- nsg = skb_shinfo(skb)->nr_frags;
+ nsg = skb_shinfo(skb)->nr_frags + 1;
if (nsg <= 4) {
nsg = 4;
} else {
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 22f020099214..3439d14168e8 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -718,7 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (call->tx_total_len != -1 ||
call->tx_pending ||
call->tx_top != 0)
- goto error_put;
+ goto out_put_unlock;
call->tx_total_len = p.call.tx_total_len;
}
}
@@ -738,7 +738,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
/* Fall through */
case 1:
if (p.call.timeouts.hard > 0) {
- j = msecs_to_jiffies(p.call.timeouts.hard);
+ j = p.call.timeouts.hard * HZ;
now = jiffies;
j += now;
WRITE_ONCE(call->expect_term_by, j);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2985509147a2..49521aa33ab9 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -45,23 +45,6 @@ if NET_SCHED
comment "Queueing/Scheduling"
-config NET_SCH_CBQ
- tristate "Class Based Queueing (CBQ)"
- ---help---
- Say Y here if you want to use the Class-Based Queueing (CBQ) packet
- scheduling algorithm. This algorithm classifies the waiting packets
- into a tree-like hierarchy of classes; the leaves of this tree are
- in turn scheduled by separate algorithms.
-
- See the top of <file:net/sched/sch_cbq.c> for more details.
-
- CBQ is a commonly used scheduler, so if you're unsure, you should
- say Y here. Then say Y to all the queueing algorithms below that you
- want to use as leaf disciplines.
-
- To compile this code as a module, choose M here: the
- module will be called sch_cbq.
-
config NET_SCH_HTB
tristate "Hierarchical Token Bucket (HTB)"
---help---
@@ -85,20 +68,6 @@ config NET_SCH_HFSC
To compile this code as a module, choose M here: the
module will be called sch_hfsc.
-config NET_SCH_ATM
- tristate "ATM Virtual Circuits (ATM)"
- depends on ATM
- ---help---
- Say Y here if you want to use the ATM pseudo-scheduler. This
- provides a framework for invoking classifiers, which in turn
- select classes of this queuing discipline. Each class maps
- the flow(s) it is handling to a given virtual circuit.
-
- See the top of <file:net/sched/sch_atm.c> for more details.
-
- To compile this code as a module, choose M here: the
- module will be called sch_atm.
-
config NET_SCH_PRIO
tristate "Multi Band Priority Queueing (PRIO)"
---help---
@@ -217,17 +186,6 @@ config NET_SCH_GRED
To compile this code as a module, choose M here: the
module will be called sch_gred.
-config NET_SCH_DSMARK
- tristate "Differentiated Services marker (DSMARK)"
- ---help---
- Say Y if you want to schedule packets according to the
- Differentiated Services architecture proposed in RFC 2475.
- Technical information on this method, with pointers to associated
- RFCs, is available at <http://www.gta.ufrj.br/diffserv/>.
-
- To compile this code as a module, choose M here: the
- module will be called sch_dsmark.
-
config NET_SCH_NETEM
tristate "Network emulator (NETEM)"
---help---
@@ -469,17 +427,6 @@ config NET_CLS_BASIC
To compile this code as a module, choose M here: the
module will be called cls_basic.
-config NET_CLS_TCINDEX
- tristate "Traffic-Control Index (TCINDEX)"
- select NET_CLS
- ---help---
- Say Y here if you want to be able to classify packets based on
- traffic control indices. You will want this feature if you want
- to implement Differentiated Services together with DSMARK.
-
- To compile this code as a module, choose M here: the
- module will be called cls_tcindex.
-
config NET_CLS_ROUTE4
tristate "Routing decision (ROUTE)"
depends on INET
@@ -525,34 +472,6 @@ config CLS_U32_MARK
---help---
Say Y here to be able to use netfilter marks as u32 key.
-config NET_CLS_RSVP
- tristate "IPv4 Resource Reservation Protocol (RSVP)"
- select NET_CLS
- ---help---
- The Resource Reservation Protocol (RSVP) permits end systems to
- request a minimum and maximum data flow rate for a connection; this
- is important for real time data such as streaming sound or video.
-
- Say Y here if you want to be able to classify outgoing packets based
- on their RSVP requests.
-
- To compile this code as a module, choose M here: the
- module will be called cls_rsvp.
-
-config NET_CLS_RSVP6
- tristate "IPv6 Resource Reservation Protocol (RSVP6)"
- select NET_CLS
- ---help---
- The Resource Reservation Protocol (RSVP) permits end systems to
- request a minimum and maximum data flow rate for a connection; this
- is important for real time data such as streaming sound or video.
-
- Say Y here if you want to be able to classify outgoing packets based
- on their RSVP requests and you are using the IPv6 protocol.
-
- To compile this code as a module, choose M here: the
- module will be called cls_rsvp6.
-
config NET_CLS_FLOW
tristate "Flow classifier"
select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 415d1e1f237e..e5ea44ec13c5 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -31,20 +31,17 @@ obj-$(CONFIG_NET_IFE_SKBTCINDEX) += act_meta_skbtcindex.o
obj-$(CONFIG_NET_ACT_TUNNEL_KEY)+= act_tunnel_key.o
obj-$(CONFIG_NET_ACT_CT) += act_ct.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
-obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o
obj-$(CONFIG_NET_SCH_RED) += sch_red.o
obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o
obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
-obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
-obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o
@@ -65,9 +62,6 @@ obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
obj-$(CONFIG_NET_CLS_FW) += cls_fw.o
-obj-$(CONFIG_NET_CLS_RSVP) += cls_rsvp.o
-obj-$(CONFIG_NET_CLS_TCINDEX) += cls_tcindex.o
-obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o
obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o
obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o
obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index b87d2a1ee0b1..e3f28cb03f7e 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -244,7 +244,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
goto out;
}
- if (unlikely(!(dev->flags & IFF_UP))) {
+ if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
dev->name);
goto out;
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 0fccae356dc1..197915332b42 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -116,6 +116,11 @@ static int valid_label(const struct nlattr *attr,
{
const u32 *label = nla_data(attr);
+ if (nla_len(attr) != sizeof(*label)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
+ return -EINVAL;
+ }
+
if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
return -EINVAL;
@@ -128,7 +133,8 @@ static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
[TCA_MPLS_UNSPEC] = { .strict_start_type = TCA_MPLS_UNSPEC + 1 },
[TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
[TCA_MPLS_PROTO] = { .type = NLA_U16 },
- [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
+ [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
+ valid_label),
[TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7),
[TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
[TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1),
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index f095a0fb75c6..bf74f3f4c752 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -26,6 +26,7 @@ static struct tc_action_ops act_pedit_ops;
static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
[TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
+ [TCA_PEDIT_PARMS_EX] = { .len = sizeof(struct tc_pedit) },
[TCA_PEDIT_KEYS_EX] = { .type = NLA_NESTED },
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 214f4efdd992..909a685b9162 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -54,8 +54,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
sample_policy, NULL);
if (ret < 0)
return ret;
- if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
- !tb[TCA_SAMPLE_PSAMPLE_GROUP])
+
+ if (!tb[TCA_SAMPLE_PARMS])
return -EINVAL;
parm = nla_data(tb[TCA_SAMPLE_PARMS]);
@@ -79,6 +79,13 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
tcf_idr_release(*a, bind);
return -EEXIST;
}
+
+ if (!tb[TCA_SAMPLE_RATE] || !tb[TCA_SAMPLE_PSAMPLE_GROUP]) {
+ NL_SET_ERR_MSG(extack, "sample rate and group are required");
+ err = -EINVAL;
+ goto release_idr;
+ }
+
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (err < 0)
goto release_idr;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 919c7fa5f02d..30e30b5a9baf 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -40,8 +40,6 @@
#include <net/tc_act/tc_mpls.h>
#include <net/flow_offload.h>
-extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
-
/* The list of all installed classifier types */
static LIST_HEAD(tcf_proto_base);
@@ -520,8 +518,8 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
{
struct tcf_block *block = chain->block;
const struct tcf_proto_ops *tmplt_ops;
+ unsigned int refcnt, non_act_refcnt;
bool free_block = false;
- unsigned int refcnt;
void *tmplt_priv;
mutex_lock(&block->lock);
@@ -541,13 +539,15 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
* save these to temporary variables.
*/
refcnt = --chain->refcnt;
+ non_act_refcnt = refcnt - chain->action_refcnt;
tmplt_ops = chain->tmplt_ops;
tmplt_priv = chain->tmplt_priv;
- /* The last dropped non-action reference will trigger notification. */
- if (refcnt - chain->action_refcnt == 0 && !by_act) {
- tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
- block, NULL, 0, 0, false);
+ if (non_act_refcnt == chain->explicitly_created && !by_act) {
+ if (non_act_refcnt == 0)
+ tc_chain_notify_delete(tmplt_ops, tmplt_priv,
+ chain->index, block, NULL, 0, 0,
+ false);
/* Last reference to chain, no need to lock. */
chain->flushing = false;
}
@@ -1079,7 +1079,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
/* Find qdisc */
if (!*parent) {
- *q = dev->qdisc;
+ *q = rcu_dereference(dev->qdisc);
*parent = (*q)->handle;
} else {
*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
@@ -1500,6 +1500,7 @@ static int tcf_block_bind(struct tcf_block *block,
err_unroll:
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
+ list_del(&block_cb->driver_list);
if (i-- > 0) {
list_del(&block_cb->list);
tcf_block_playback_offloads(block, block_cb->cb,
@@ -2098,6 +2099,7 @@ replay:
}
if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
+ tfilter_put(tp, fh);
NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
err = -EINVAL;
goto errout;
@@ -2551,7 +2553,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
parent = tcm->tcm_parent;
if (!parent)
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
@@ -2732,6 +2734,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
return PTR_ERR(ops);
if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
+ module_put(ops->owner);
return -EOPNOTSUPP;
}
@@ -2937,7 +2940,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
parent = tcm->tcm_parent;
if (!parent) {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
parent = q->handle;
} else {
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index ab53a93b2f2b..87398af2715a 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -225,7 +225,7 @@ static u32 flow_get_skgid(const struct sk_buff *skb)
static u32 flow_get_vlan_tag(const struct sk_buff *skb)
{
- u16 uninitialized_var(tag);
+ u16 tag;
if (vlan_get_tag(skb, &tag) < 0)
return 0;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 007fbc199352..c92318f68f92 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -270,14 +270,16 @@ static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
return NULL;
}
-static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
- struct fl_flow_key *mkey,
- struct fl_flow_key *key)
+static noinline_for_stack
+struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
{
+ struct fl_flow_key mkey;
+
+ fl_set_masked_key(&mkey, key, mask);
if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
- return fl_lookup_range(mask, mkey, key);
+ return fl_lookup_range(mask, &mkey, key);
- return __fl_lookup(mask, mkey);
+ return __fl_lookup(mask, &mkey);
}
static u16 fl_ct_info_to_flower_map[] = {
@@ -297,7 +299,6 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct cls_fl_head *head = rcu_dereference_bh(tp->root);
- struct fl_flow_key skb_mkey;
struct fl_flow_key skb_key;
struct fl_flow_mask *mask;
struct cls_fl_filter *f;
@@ -317,9 +318,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
ARRAY_SIZE(fl_ct_info_to_flower_map));
skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
- fl_set_masked_key(&skb_mkey, &skb_key, mask);
-
- f = fl_lookup(mask, &skb_mkey, &skb_key);
+ f = fl_mask_lookup(mask, &skb_key);
if (f && !tc_skip_sw(f->flags)) {
*res = f->res;
return tcf_exts_exec(skb, &f->exts, res);
@@ -720,7 +719,8 @@ static void fl_set_key_val(struct nlattr **tb,
}
static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
- struct fl_flow_key *mask)
+ struct fl_flow_key *mask,
+ struct netlink_ext_ack *extack)
{
fl_set_key_val(tb, &key->tp_range.tp_min.dst,
TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
@@ -735,13 +735,32 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
- if ((mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
- htons(key->tp_range.tp_max.dst) <=
- htons(key->tp_range.tp_min.dst)) ||
- (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
- htons(key->tp_range.tp_max.src) <=
- htons(key->tp_range.tp_min.src)))
+ if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
+ NL_SET_ERR_MSG(extack,
+ "Both min and max destination ports must be specified");
return -EINVAL;
+ }
+ if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
+ NL_SET_ERR_MSG(extack,
+ "Both min and max source ports must be specified");
+ return -EINVAL;
+ }
+ if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
+ htons(key->tp_range.tp_max.dst) <=
+ htons(key->tp_range.tp_min.dst)) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ tb[TCA_FLOWER_KEY_PORT_DST_MIN],
+ "Invalid destination port range (min must be strictly smaller than max)");
+ return -EINVAL;
+ }
+ if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
+ htons(key->tp_range.tp_max.src) <=
+ htons(key->tp_range.tp_min.src)) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
+ "Invalid source port range (min must be strictly smaller than max)");
+ return -EINVAL;
+ }
return 0;
}
@@ -870,6 +889,9 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
if (option_len > sizeof(struct geneve_opt))
data_len = option_len - sizeof(struct geneve_opt);
+ if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
+ return -ERANGE;
+
opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
memset(opt, 0xff, option_len);
opt->length = data_len / 4;
@@ -1209,7 +1231,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (key->basic.ip_proto == IPPROTO_TCP ||
key->basic.ip_proto == IPPROTO_UDP ||
key->basic.ip_proto == IPPROTO_SCTP) {
- ret = fl_set_key_port_range(tb, key, mask);
+ ret = fl_set_key_port_range(tb, key, mask, extack);
if (ret)
return ret;
}
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index ec945294626a..08c41f1976c4 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -210,11 +210,6 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
if (err < 0)
return err;
- if (tb[TCA_FW_CLASSID]) {
- f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
- tcf_bind_filter(tp, &f->res, base);
- }
-
if (tb[TCA_FW_INDEV]) {
int ret;
ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
@@ -231,6 +226,11 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
} else if (head->mask != 0xFFFFFFFF)
return err;
+ if (tb[TCA_FW_CLASSID]) {
+ f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
+ tcf_bind_filter(tp, &f->res, base);
+ }
+
return 0;
}
@@ -266,7 +266,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
return -ENOBUFS;
fnew->id = f->id;
- fnew->res = f->res;
fnew->ifindex = f->ifindex;
fnew->tp = f->tp;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 5efa3e7ace15..1ad4b3e60eb3 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -424,6 +424,11 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
return -EINVAL;
}
+ if (!nhandle) {
+ NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
+ return -EINVAL;
+ }
+
h1 = to_hash(nhandle);
b = rtnl_dereference(head->table[h1]);
if (!b) {
@@ -477,6 +482,11 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
int err;
bool new = true;
+ if (!handle) {
+ NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
+ return -EINVAL;
+ }
+
if (opt == NULL)
return handle ? -EINVAL : 0;
@@ -501,7 +511,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
if (fold) {
f->id = fold->id;
f->iif = fold->iif;
- f->res = fold->res;
f->handle = fold->handle;
f->tp = fold->tp;
@@ -526,7 +535,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
rcu_assign_pointer(f->next, f1);
rcu_assign_pointer(*fp, f);
- if (fold && fold->handle && f->handle != fold->handle) {
+ if (fold) {
th = to_hash(fold->handle);
h = from_hash(fold->handle >> 16);
b = rtnl_dereference(head->table[th]);
diff --git a/net/sched/cls_rsvp.c b/net/sched/cls_rsvp.c
deleted file mode 100644
index de1c1d4da597..000000000000
--- a/net/sched/cls_rsvp.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * net/sched/cls_rsvp.c Special RSVP packet classifier for IPv4.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <net/ip.h>
-#include <net/netlink.h>
-#include <net/act_api.h>
-#include <net/pkt_cls.h>
-
-#define RSVP_DST_LEN 1
-#define RSVP_ID "rsvp"
-#define RSVP_OPS cls_rsvp_ops
-
-#include "cls_rsvp.h"
-MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
deleted file mode 100644
index d36949d9382c..000000000000
--- a/net/sched/cls_rsvp.h
+++ /dev/null
@@ -1,777 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- */
-
-/*
- Comparing to general packet classification problem,
- RSVP needs only sevaral relatively simple rules:
-
- * (dst, protocol) are always specified,
- so that we are able to hash them.
- * src may be exact, or may be wildcard, so that
- we can keep a hash table plus one wildcard entry.
- * source port (or flow label) is important only if src is given.
-
- IMPLEMENTATION.
-
- We use a two level hash table: The top level is keyed by
- destination address and protocol ID, every bucket contains a list
- of "rsvp sessions", identified by destination address, protocol and
- DPI(="Destination Port ID"): triple (key, mask, offset).
-
- Every bucket has a smaller hash table keyed by source address
- (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
- Every bucket is again a list of "RSVP flows", selected by
- source address and SPI(="Source Port ID" here rather than
- "security parameter index"): triple (key, mask, offset).
-
-
- NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
- and all fragmented packets go to the best-effort traffic class.
-
-
- NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
- only one "Generalized Port Identifier". So that for classic
- ah, esp (and udp,tcp) both *pi should coincide or one of them
- should be wildcard.
-
- At first sight, this redundancy is just a waste of CPU
- resources. But DPI and SPI add the possibility to assign different
- priorities to GPIs. Look also at note 4 about tunnels below.
-
-
- NOTE 3. One complication is the case of tunneled packets.
- We implement it as following: if the first lookup
- matches a special session with "tunnelhdr" value not zero,
- flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
- In this case, we pull tunnelhdr bytes and restart lookup
- with tunnel ID added to the list of keys. Simple and stupid 8)8)
- It's enough for PIMREG and IPIP.
-
-
- NOTE 4. Two GPIs make it possible to parse even GRE packets.
- F.e. DPI can select ETH_P_IP (and necessary flags to make
- tunnelhdr correct) in GRE protocol field and SPI matches
- GRE key. Is it not nice? 8)8)
-
-
- Well, as result, despite its simplicity, we get a pretty
- powerful classification engine. */
-
-
-struct rsvp_head {
- u32 tmap[256/32];
- u32 hgenerator;
- u8 tgenerator;
- struct rsvp_session __rcu *ht[256];
- struct rcu_head rcu;
-};
-
-struct rsvp_session {
- struct rsvp_session __rcu *next;
- __be32 dst[RSVP_DST_LEN];
- struct tc_rsvp_gpi dpi;
- u8 protocol;
- u8 tunnelid;
- /* 16 (src,sport) hash slots, and one wildcard source slot */
- struct rsvp_filter __rcu *ht[16 + 1];
- struct rcu_head rcu;
-};
-
-
-struct rsvp_filter {
- struct rsvp_filter __rcu *next;
- __be32 src[RSVP_DST_LEN];
- struct tc_rsvp_gpi spi;
- u8 tunnelhdr;
-
- struct tcf_result res;
- struct tcf_exts exts;
-
- u32 handle;
- struct rsvp_session *sess;
- struct rcu_work rwork;
-};
-
-static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
-{
- unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
-
- h ^= h>>16;
- h ^= h>>8;
- return (h ^ protocol ^ tunnelid) & 0xFF;
-}
-
-static inline unsigned int hash_src(__be32 *src)
-{
- unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
-
- h ^= h>>16;
- h ^= h>>8;
- h ^= h>>4;
- return h & 0xF;
-}
-
-#define RSVP_APPLY_RESULT() \
-{ \
- int r = tcf_exts_exec(skb, &f->exts, res); \
- if (r < 0) \
- continue; \
- else if (r > 0) \
- return r; \
-}
-
-static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res)
-{
- struct rsvp_head *head = rcu_dereference_bh(tp->root);
- struct rsvp_session *s;
- struct rsvp_filter *f;
- unsigned int h1, h2;
- __be32 *dst, *src;
- u8 protocol;
- u8 tunnelid = 0;
- u8 *xprt;
-#if RSVP_DST_LEN == 4
- struct ipv6hdr *nhptr;
-
- if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
- return -1;
- nhptr = ipv6_hdr(skb);
-#else
- struct iphdr *nhptr;
-
- if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
- return -1;
- nhptr = ip_hdr(skb);
-#endif
-restart:
-
-#if RSVP_DST_LEN == 4
- src = &nhptr->saddr.s6_addr32[0];
- dst = &nhptr->daddr.s6_addr32[0];
- protocol = nhptr->nexthdr;
- xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
-#else
- src = &nhptr->saddr;
- dst = &nhptr->daddr;
- protocol = nhptr->protocol;
- xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
- if (ip_is_fragment(nhptr))
- return -1;
-#endif
-
- h1 = hash_dst(dst, protocol, tunnelid);
- h2 = hash_src(src);
-
- for (s = rcu_dereference_bh(head->ht[h1]); s;
- s = rcu_dereference_bh(s->next)) {
- if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
- protocol == s->protocol &&
- !(s->dpi.mask &
- (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
-#if RSVP_DST_LEN == 4
- dst[0] == s->dst[0] &&
- dst[1] == s->dst[1] &&
- dst[2] == s->dst[2] &&
-#endif
- tunnelid == s->tunnelid) {
-
- for (f = rcu_dereference_bh(s->ht[h2]); f;
- f = rcu_dereference_bh(f->next)) {
- if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
- !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
-#if RSVP_DST_LEN == 4
- &&
- src[0] == f->src[0] &&
- src[1] == f->src[1] &&
- src[2] == f->src[2]
-#endif
- ) {
- *res = f->res;
- RSVP_APPLY_RESULT();
-
-matched:
- if (f->tunnelhdr == 0)
- return 0;
-
- tunnelid = f->res.classid;
- nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
- goto restart;
- }
- }
-
- /* And wildcard bucket... */
- for (f = rcu_dereference_bh(s->ht[16]); f;
- f = rcu_dereference_bh(f->next)) {
- *res = f->res;
- RSVP_APPLY_RESULT();
- goto matched;
- }
- return -1;
- }
- }
- return -1;
-}
-
-static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
-{
- struct rsvp_head *head = rtnl_dereference(tp->root);
- struct rsvp_session *s;
- struct rsvp_filter __rcu **ins;
- struct rsvp_filter *pins;
- unsigned int h1 = h & 0xFF;
- unsigned int h2 = (h >> 8) & 0xFF;
-
- for (s = rtnl_dereference(head->ht[h1]); s;
- s = rtnl_dereference(s->next)) {
- for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
- ins = &pins->next, pins = rtnl_dereference(*ins)) {
- if (pins->handle == h) {
- RCU_INIT_POINTER(n->next, pins->next);
- rcu_assign_pointer(*ins, n);
- return;
- }
- }
- }
-
- /* Something went wrong if we are trying to replace a non-existant
- * node. Mind as well halt instead of silently failing.
- */
- BUG_ON(1);
-}
-
-static void *rsvp_get(struct tcf_proto *tp, u32 handle)
-{
- struct rsvp_head *head = rtnl_dereference(tp->root);
- struct rsvp_session *s;
- struct rsvp_filter *f;
- unsigned int h1 = handle & 0xFF;
- unsigned int h2 = (handle >> 8) & 0xFF;
-
- if (h2 > 16)
- return NULL;
-
- for (s = rtnl_dereference(head->ht[h1]); s;
- s = rtnl_dereference(s->next)) {
- for (f = rtnl_dereference(s->ht[h2]); f;
- f = rtnl_dereference(f->next)) {
- if (f->handle == handle)
- return f;
- }
- }
- return NULL;
-}
-
-static int rsvp_init(struct tcf_proto *tp)
-{
- struct rsvp_head *data;
-
- data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
- if (data) {
- rcu_assign_pointer(tp->root, data);
- return 0;
- }
- return -ENOBUFS;
-}
-
-static void __rsvp_delete_filter(struct rsvp_filter *f)
-{
- tcf_exts_destroy(&f->exts);
- tcf_exts_put_net(&f->exts);
- kfree(f);
-}
-
-static void rsvp_delete_filter_work(struct work_struct *work)
-{
- struct rsvp_filter *f = container_of(to_rcu_work(work),
- struct rsvp_filter,
- rwork);
- rtnl_lock();
- __rsvp_delete_filter(f);
- rtnl_unlock();
-}
-
-static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
-{
- tcf_unbind_filter(tp, &f->res);
- /* all classifiers are required to call tcf_exts_destroy() after rcu
- * grace period, since converted-to-rcu actions are relying on that
- * in cleanup() callback
- */
- if (tcf_exts_get_net(&f->exts))
- tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
- else
- __rsvp_delete_filter(f);
-}
-
-static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
- struct netlink_ext_ack *extack)
-{
- struct rsvp_head *data = rtnl_dereference(tp->root);
- int h1, h2;
-
- if (data == NULL)
- return;
-
- for (h1 = 0; h1 < 256; h1++) {
- struct rsvp_session *s;
-
- while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
- RCU_INIT_POINTER(data->ht[h1], s->next);
-
- for (h2 = 0; h2 <= 16; h2++) {
- struct rsvp_filter *f;
-
- while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
- rcu_assign_pointer(s->ht[h2], f->next);
- rsvp_delete_filter(tp, f);
- }
- }
- kfree_rcu(s, rcu);
- }
- }
- kfree_rcu(data, rcu);
-}
-
-static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
- bool rtnl_held, struct netlink_ext_ack *extack)
-{
- struct rsvp_head *head = rtnl_dereference(tp->root);
- struct rsvp_filter *nfp, *f = arg;
- struct rsvp_filter __rcu **fp;
- unsigned int h = f->handle;
- struct rsvp_session __rcu **sp;
- struct rsvp_session *nsp, *s = f->sess;
- int i, h1;
-
- fp = &s->ht[(h >> 8) & 0xFF];
- for (nfp = rtnl_dereference(*fp); nfp;
- fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
- if (nfp == f) {
- RCU_INIT_POINTER(*fp, f->next);
- rsvp_delete_filter(tp, f);
-
- /* Strip tree */
-
- for (i = 0; i <= 16; i++)
- if (s->ht[i])
- goto out;
-
- /* OK, session has no flows */
- sp = &head->ht[h & 0xFF];
- for (nsp = rtnl_dereference(*sp); nsp;
- sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
- if (nsp == s) {
- RCU_INIT_POINTER(*sp, s->next);
- kfree_rcu(s, rcu);
- goto out;
- }
- }
-
- break;
- }
- }
-
-out:
- *last = true;
- for (h1 = 0; h1 < 256; h1++) {
- if (rcu_access_pointer(head->ht[h1])) {
- *last = false;
- break;
- }
- }
-
- return 0;
-}
-
-static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
-{
- struct rsvp_head *data = rtnl_dereference(tp->root);
- int i = 0xFFFF;
-
- while (i-- > 0) {
- u32 h;
-
- if ((data->hgenerator += 0x10000) == 0)
- data->hgenerator = 0x10000;
- h = data->hgenerator|salt;
- if (!rsvp_get(tp, h))
- return h;
- }
- return 0;
-}
-
-static int tunnel_bts(struct rsvp_head *data)
-{
- int n = data->tgenerator >> 5;
- u32 b = 1 << (data->tgenerator & 0x1F);
-
- if (data->tmap[n] & b)
- return 0;
- data->tmap[n] |= b;
- return 1;
-}
-
-static void tunnel_recycle(struct rsvp_head *data)
-{
- struct rsvp_session __rcu **sht = data->ht;
- u32 tmap[256/32];
- int h1, h2;
-
- memset(tmap, 0, sizeof(tmap));
-
- for (h1 = 0; h1 < 256; h1++) {
- struct rsvp_session *s;
- for (s = rtnl_dereference(sht[h1]); s;
- s = rtnl_dereference(s->next)) {
- for (h2 = 0; h2 <= 16; h2++) {
- struct rsvp_filter *f;
-
- for (f = rtnl_dereference(s->ht[h2]); f;
- f = rtnl_dereference(f->next)) {
- if (f->tunnelhdr == 0)
- continue;
- data->tgenerator = f->res.classid;
- tunnel_bts(data);
- }
- }
- }
- }
-
- memcpy(data->tmap, tmap, sizeof(tmap));
-}
-
-static u32 gen_tunnel(struct rsvp_head *data)
-{
- int i, k;
-
- for (k = 0; k < 2; k++) {
- for (i = 255; i > 0; i--) {
- if (++data->tgenerator == 0)
- data->tgenerator = 1;
- if (tunnel_bts(data))
- return data->tgenerator;
- }
- tunnel_recycle(data);
- }
- return 0;
-}
-
-static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
- [TCA_RSVP_CLASSID] = { .type = NLA_U32 },
- [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) },
- [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) },
- [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
-};
-
-static int rsvp_change(struct net *net, struct sk_buff *in_skb,
- struct tcf_proto *tp, unsigned long base,
- u32 handle,
- struct nlattr **tca,
- void **arg, bool ovr, bool rtnl_held,
- struct netlink_ext_ack *extack)
-{
- struct rsvp_head *data = rtnl_dereference(tp->root);
- struct rsvp_filter *f, *nfp;
- struct rsvp_filter __rcu **fp;
- struct rsvp_session *nsp, *s;
- struct rsvp_session __rcu **sp;
- struct tc_rsvp_pinfo *pinfo = NULL;
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_RSVP_MAX + 1];
- struct tcf_exts e;
- unsigned int h1, h2;
- __be32 *dst;
- int err;
-
- if (opt == NULL)
- return handle ? -EINVAL : 0;
-
- err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
- NULL);
- if (err < 0)
- return err;
-
- err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
- if (err < 0)
- return err;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
- extack);
- if (err < 0)
- goto errout2;
-
- f = *arg;
- if (f) {
- /* Node exists: adjust only classid */
- struct rsvp_filter *n;
-
- if (f->handle != handle && handle)
- goto errout2;
-
- n = kmemdup(f, sizeof(*f), GFP_KERNEL);
- if (!n) {
- err = -ENOMEM;
- goto errout2;
- }
-
- err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
- TCA_RSVP_POLICE);
- if (err < 0) {
- kfree(n);
- goto errout2;
- }
-
- if (tb[TCA_RSVP_CLASSID]) {
- n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
- tcf_bind_filter(tp, &n->res, base);
- }
-
- tcf_exts_change(&n->exts, &e);
- rsvp_replace(tp, n, handle);
- return 0;
- }
-
- /* Now more serious part... */
- err = -EINVAL;
- if (handle)
- goto errout2;
- if (tb[TCA_RSVP_DST] == NULL)
- goto errout2;
-
- err = -ENOBUFS;
- f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
- if (f == NULL)
- goto errout2;
-
- err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
- if (err < 0)
- goto errout;
- h2 = 16;
- if (tb[TCA_RSVP_SRC]) {
- memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
- h2 = hash_src(f->src);
- }
- if (tb[TCA_RSVP_PINFO]) {
- pinfo = nla_data(tb[TCA_RSVP_PINFO]);
- f->spi = pinfo->spi;
- f->tunnelhdr = pinfo->tunnelhdr;
- }
- if (tb[TCA_RSVP_CLASSID])
- f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
-
- dst = nla_data(tb[TCA_RSVP_DST]);
- h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
-
- err = -ENOMEM;
- if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
- goto errout;
-
- if (f->tunnelhdr) {
- err = -EINVAL;
- if (f->res.classid > 255)
- goto errout;
-
- err = -ENOMEM;
- if (f->res.classid == 0 &&
- (f->res.classid = gen_tunnel(data)) == 0)
- goto errout;
- }
-
- for (sp = &data->ht[h1];
- (s = rtnl_dereference(*sp)) != NULL;
- sp = &s->next) {
- if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
- pinfo && pinfo->protocol == s->protocol &&
- memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
-#if RSVP_DST_LEN == 4
- dst[0] == s->dst[0] &&
- dst[1] == s->dst[1] &&
- dst[2] == s->dst[2] &&
-#endif
- pinfo->tunnelid == s->tunnelid) {
-
-insert:
- /* OK, we found appropriate session */
-
- fp = &s->ht[h2];
-
- f->sess = s;
- if (f->tunnelhdr == 0)
- tcf_bind_filter(tp, &f->res, base);
-
- tcf_exts_change(&f->exts, &e);
-
- fp = &s->ht[h2];
- for (nfp = rtnl_dereference(*fp); nfp;
- fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
- __u32 mask = nfp->spi.mask & f->spi.mask;
-
- if (mask != f->spi.mask)
- break;
- }
- RCU_INIT_POINTER(f->next, nfp);
- rcu_assign_pointer(*fp, f);
-
- *arg = f;
- return 0;
- }
- }
-
- /* No session found. Create new one. */
-
- err = -ENOBUFS;
- s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
- if (s == NULL)
- goto errout;
- memcpy(s->dst, dst, sizeof(s->dst));
-
- if (pinfo) {
- s->dpi = pinfo->dpi;
- s->protocol = pinfo->protocol;
- s->tunnelid = pinfo->tunnelid;
- }
- sp = &data->ht[h1];
- for (nsp = rtnl_dereference(*sp); nsp;
- sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
- if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
- break;
- }
- RCU_INIT_POINTER(s->next, nsp);
- rcu_assign_pointer(*sp, s);
-
- goto insert;
-
-errout:
- tcf_exts_destroy(&f->exts);
- kfree(f);
-errout2:
- tcf_exts_destroy(&e);
- return err;
-}
-
-static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
- bool rtnl_held)
-{
- struct rsvp_head *head = rtnl_dereference(tp->root);
- unsigned int h, h1;
-
- if (arg->stop)
- return;
-
- for (h = 0; h < 256; h++) {
- struct rsvp_session *s;
-
- for (s = rtnl_dereference(head->ht[h]); s;
- s = rtnl_dereference(s->next)) {
- for (h1 = 0; h1 <= 16; h1++) {
- struct rsvp_filter *f;
-
- for (f = rtnl_dereference(s->ht[h1]); f;
- f = rtnl_dereference(f->next)) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(tp, f, arg) < 0) {
- arg->stop = 1;
- return;
- }
- arg->count++;
- }
- }
- }
- }
-}
-
-static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
-{
- struct rsvp_filter *f = fh;
- struct rsvp_session *s;
- struct nlattr *nest;
- struct tc_rsvp_pinfo pinfo;
-
- if (f == NULL)
- return skb->len;
- s = f->sess;
-
- t->tcm_handle = f->handle;
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
-
- if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
- goto nla_put_failure;
- pinfo.dpi = s->dpi;
- pinfo.spi = f->spi;
- pinfo.protocol = s->protocol;
- pinfo.tunnelid = s->tunnelid;
- pinfo.tunnelhdr = f->tunnelhdr;
- pinfo.pad = 0;
- if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
- goto nla_put_failure;
- if (f->res.classid &&
- nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
- goto nla_put_failure;
- if (((f->handle >> 8) & 0xFF) != 16 &&
- nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
- goto nla_put_failure;
-
- if (tcf_exts_dump(skb, &f->exts) < 0)
- goto nla_put_failure;
-
- nla_nest_end(skb, nest);
-
- if (tcf_exts_dump_stats(skb, &f->exts) < 0)
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-
-static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
- unsigned long base)
-{
- struct rsvp_filter *f = fh;
-
- if (f && f->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &f->res, base);
- else
- __tcf_unbind_filter(q, &f->res);
- }
-}
-
-static struct tcf_proto_ops RSVP_OPS __read_mostly = {
- .kind = RSVP_ID,
- .classify = rsvp_classify,
- .init = rsvp_init,
- .destroy = rsvp_destroy,
- .get = rsvp_get,
- .change = rsvp_change,
- .delete = rsvp_delete,
- .walk = rsvp_walk,
- .dump = rsvp_dump,
- .bind_class = rsvp_bind_class,
- .owner = THIS_MODULE,
-};
-
-static int __init init_rsvp(void)
-{
- return register_tcf_proto_ops(&RSVP_OPS);
-}
-
-static void __exit exit_rsvp(void)
-{
- unregister_tcf_proto_ops(&RSVP_OPS);
-}
-
-module_init(init_rsvp)
-module_exit(exit_rsvp)
diff --git a/net/sched/cls_rsvp6.c b/net/sched/cls_rsvp6.c
deleted file mode 100644
index 64078846000e..000000000000
--- a/net/sched/cls_rsvp6.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * net/sched/cls_rsvp6.c Special RSVP packet classifier for IPv6.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ipv6.h>
-#include <linux/skbuff.h>
-#include <net/act_api.h>
-#include <net/pkt_cls.h>
-#include <net/netlink.h>
-
-#define RSVP_DST_LEN 4
-#define RSVP_ID "rsvp6"
-#define RSVP_OPS cls_rsvp6_ops
-
-#include "cls_rsvp.h"
-MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
deleted file mode 100644
index 684187a1fdb9..000000000000
--- a/net/sched/cls_tcindex.c
+++ /dev/null
@@ -1,738 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
- *
- * Written 1998,1999 by Werner Almesberger, EPFL ICA
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/refcount.h>
-#include <net/act_api.h>
-#include <net/netlink.h>
-#include <net/pkt_cls.h>
-#include <net/sch_generic.h>
-
-/*
- * Passing parameters to the root seems to be done more awkwardly than really
- * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
- * verified. FIXME.
- */
-
-#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
-#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
-
-
-struct tcindex_data;
-
-struct tcindex_filter_result {
- struct tcf_exts exts;
- struct tcf_result res;
- struct tcindex_data *p;
- struct rcu_work rwork;
-};
-
-struct tcindex_filter {
- u16 key;
- struct tcindex_filter_result result;
- struct tcindex_filter __rcu *next;
- struct rcu_work rwork;
-};
-
-
-struct tcindex_data {
- struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
- struct tcindex_filter __rcu **h; /* imperfect hash; */
- struct tcf_proto *tp;
- u16 mask; /* AND key with mask */
- u32 shift; /* shift ANDed key to the right */
- u32 hash; /* hash table size; 0 if undefined */
- u32 alloc_hash; /* allocated size */
- u32 fall_through; /* 0: only classify if explicit match */
- refcount_t refcnt; /* a temporary refcnt for perfect hash */
- struct rcu_work rwork;
-};
-
-static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
-{
- return tcf_exts_has_actions(&r->exts) || r->res.classid;
-}
-
-static void tcindex_data_get(struct tcindex_data *p)
-{
- refcount_inc(&p->refcnt);
-}
-
-static void tcindex_data_put(struct tcindex_data *p)
-{
- if (refcount_dec_and_test(&p->refcnt)) {
- kfree(p->perfect);
- kfree(p->h);
- kfree(p);
- }
-}
-
-static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
- u16 key)
-{
- if (p->perfect) {
- struct tcindex_filter_result *f = p->perfect + key;
-
- return tcindex_filter_is_set(f) ? f : NULL;
- } else if (p->h) {
- struct tcindex_filter __rcu **fp;
- struct tcindex_filter *f;
-
- fp = &p->h[key % p->hash];
- for (f = rcu_dereference_bh_rtnl(*fp);
- f;
- fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
- if (f->key == key)
- return &f->result;
- }
-
- return NULL;
-}
-
-
-static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res)
-{
- struct tcindex_data *p = rcu_dereference_bh(tp->root);
- struct tcindex_filter_result *f;
- int key = (skb->tc_index & p->mask) >> p->shift;
-
- pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
- skb, tp, res, p);
-
- f = tcindex_lookup(p, key);
- if (!f) {
- struct Qdisc *q = tcf_block_q(tp->chain->block);
-
- if (!p->fall_through)
- return -1;
- res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
- res->class = 0;
- pr_debug("alg 0x%x\n", res->classid);
- return 0;
- }
- *res = f->res;
- pr_debug("map 0x%x\n", res->classid);
-
- return tcf_exts_exec(skb, &f->exts, res);
-}
-
-
-static void *tcindex_get(struct tcf_proto *tp, u32 handle)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter_result *r;
-
- pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
- if (p->perfect && handle >= p->alloc_hash)
- return NULL;
- r = tcindex_lookup(p, handle);
- return r && tcindex_filter_is_set(r) ? r : NULL;
-}
-
-static int tcindex_init(struct tcf_proto *tp)
-{
- struct tcindex_data *p;
-
- pr_debug("tcindex_init(tp %p)\n", tp);
- p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- p->mask = 0xffff;
- p->hash = DEFAULT_HASH_SIZE;
- p->fall_through = 1;
- refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
-
- rcu_assign_pointer(tp->root, p);
- return 0;
-}
-
-static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
-{
- tcf_exts_destroy(&r->exts);
- tcf_exts_put_net(&r->exts);
- tcindex_data_put(r->p);
-}
-
-static void tcindex_destroy_rexts_work(struct work_struct *work)
-{
- struct tcindex_filter_result *r;
-
- r = container_of(to_rcu_work(work),
- struct tcindex_filter_result,
- rwork);
- rtnl_lock();
- __tcindex_destroy_rexts(r);
- rtnl_unlock();
-}
-
-static void __tcindex_destroy_fexts(struct tcindex_filter *f)
-{
- tcf_exts_destroy(&f->result.exts);
- tcf_exts_put_net(&f->result.exts);
- kfree(f);
-}
-
-static void tcindex_destroy_fexts_work(struct work_struct *work)
-{
- struct tcindex_filter *f = container_of(to_rcu_work(work),
- struct tcindex_filter,
- rwork);
-
- rtnl_lock();
- __tcindex_destroy_fexts(f);
- rtnl_unlock();
-}
-
-static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
- bool rtnl_held, struct netlink_ext_ack *extack)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter_result *r = arg;
- struct tcindex_filter __rcu **walk;
- struct tcindex_filter *f = NULL;
-
- pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
- if (p->perfect) {
- if (!r->res.class)
- return -ENOENT;
- } else {
- int i;
-
- for (i = 0; i < p->hash; i++) {
- walk = p->h + i;
- for (f = rtnl_dereference(*walk); f;
- walk = &f->next, f = rtnl_dereference(*walk)) {
- if (&f->result == r)
- goto found;
- }
- }
- return -ENOENT;
-
-found:
- rcu_assign_pointer(*walk, rtnl_dereference(f->next));
- }
- tcf_unbind_filter(tp, &r->res);
- /* all classifiers are required to call tcf_exts_destroy() after rcu
- * grace period, since converted-to-rcu actions are relying on that
- * in cleanup() callback
- */
- if (f) {
- if (tcf_exts_get_net(&f->result.exts))
- tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
- else
- __tcindex_destroy_fexts(f);
- } else {
- tcindex_data_get(p);
-
- if (tcf_exts_get_net(&r->exts))
- tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
- else
- __tcindex_destroy_rexts(r);
- }
-
- *last = false;
- return 0;
-}
-
-static void tcindex_destroy_work(struct work_struct *work)
-{
- struct tcindex_data *p = container_of(to_rcu_work(work),
- struct tcindex_data,
- rwork);
-
- tcindex_data_put(p);
-}
-
-static inline int
-valid_perfect_hash(struct tcindex_data *p)
-{
- return p->hash > (p->mask >> p->shift);
-}
-
-static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
- [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
- [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
- [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
- [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
- [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
-};
-
-static int tcindex_filter_result_init(struct tcindex_filter_result *r,
- struct tcindex_data *p,
- struct net *net)
-{
- memset(r, 0, sizeof(*r));
- r->p = p;
- return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
- TCA_TCINDEX_POLICE);
-}
-
-static void tcindex_free_perfect_hash(struct tcindex_data *cp);
-
-static void tcindex_partial_destroy_work(struct work_struct *work)
-{
- struct tcindex_data *p = container_of(to_rcu_work(work),
- struct tcindex_data,
- rwork);
-
- rtnl_lock();
- if (p->perfect)
- tcindex_free_perfect_hash(p);
- kfree(p);
- rtnl_unlock();
-}
-
-static void tcindex_free_perfect_hash(struct tcindex_data *cp)
-{
- int i;
-
- for (i = 0; i < cp->hash; i++)
- tcf_exts_destroy(&cp->perfect[i].exts);
- kfree(cp->perfect);
-}
-
-static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
-{
- int i, err = 0;
-
- cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
- GFP_KERNEL | __GFP_NOWARN);
- if (!cp->perfect)
- return -ENOMEM;
-
- for (i = 0; i < cp->hash; i++) {
- err = tcf_exts_init(&cp->perfect[i].exts, net,
- TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
- if (err < 0)
- goto errout;
- cp->perfect[i].p = cp;
- }
-
- return 0;
-
-errout:
- tcindex_free_perfect_hash(cp);
- return err;
-}
-
-static int
-tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
- u32 handle, struct tcindex_data *p,
- struct tcindex_filter_result *r, struct nlattr **tb,
- struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
-{
- struct tcindex_filter_result new_filter_result, *old_r = r;
- struct tcindex_data *cp = NULL, *oldp;
- struct tcindex_filter *f = NULL; /* make gcc behave */
- struct tcf_result cr = {};
- int err, balloc = 0;
- struct tcf_exts e;
-
- err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
- if (err < 0)
- return err;
- err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
- if (err < 0)
- goto errout;
-
- err = -ENOMEM;
- /* tcindex_data attributes must look atomic to classifier/lookup so
- * allocate new tcindex data and RCU assign it onto root. Keeping
- * perfect hash and hash pointers from old data.
- */
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp)
- goto errout;
-
- cp->mask = p->mask;
- cp->shift = p->shift;
- cp->hash = p->hash;
- cp->alloc_hash = p->alloc_hash;
- cp->fall_through = p->fall_through;
- cp->tp = tp;
- refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
-
- if (tb[TCA_TCINDEX_HASH])
- cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
-
- if (tb[TCA_TCINDEX_MASK])
- cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
-
- if (tb[TCA_TCINDEX_SHIFT]) {
- cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
- if (cp->shift > 16) {
- err = -EINVAL;
- goto errout;
- }
- }
- if (!cp->hash) {
- /* Hash not specified, use perfect hash if the upper limit
- * of the hashing index is below the threshold.
- */
- if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
- cp->hash = (cp->mask >> cp->shift) + 1;
- else
- cp->hash = DEFAULT_HASH_SIZE;
- }
-
- if (p->perfect) {
- int i;
-
- if (tcindex_alloc_perfect_hash(net, cp) < 0)
- goto errout;
- cp->alloc_hash = cp->hash;
- for (i = 0; i < min(cp->hash, p->hash); i++)
- cp->perfect[i].res = p->perfect[i].res;
- balloc = 1;
- }
- cp->h = p->h;
-
- err = tcindex_filter_result_init(&new_filter_result, cp, net);
- if (err < 0)
- goto errout_alloc;
- if (old_r)
- cr = r->res;
-
- err = -EBUSY;
-
- /* Hash already allocated, make sure that we still meet the
- * requirements for the allocated hash.
- */
- if (cp->perfect) {
- if (!valid_perfect_hash(cp) ||
- cp->hash > cp->alloc_hash)
- goto errout_alloc;
- } else if (cp->h && cp->hash != cp->alloc_hash) {
- goto errout_alloc;
- }
-
- err = -EINVAL;
- if (tb[TCA_TCINDEX_FALL_THROUGH])
- cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
-
- if (!cp->perfect && !cp->h)
- cp->alloc_hash = cp->hash;
-
- /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
- * but then, we'd fail handles that may become valid after some future
- * mask change. While this is extremely unlikely to ever matter,
- * the check below is safer (and also more backwards-compatible).
- */
- if (cp->perfect || valid_perfect_hash(cp))
- if (handle >= cp->alloc_hash)
- goto errout_alloc;
-
-
- err = -ENOMEM;
- if (!cp->perfect && !cp->h) {
- if (valid_perfect_hash(cp)) {
- if (tcindex_alloc_perfect_hash(net, cp) < 0)
- goto errout_alloc;
- balloc = 1;
- } else {
- struct tcindex_filter __rcu **hash;
-
- hash = kcalloc(cp->hash,
- sizeof(struct tcindex_filter *),
- GFP_KERNEL);
-
- if (!hash)
- goto errout_alloc;
-
- cp->h = hash;
- balloc = 2;
- }
- }
-
- if (cp->perfect)
- r = cp->perfect + handle;
- else
- r = tcindex_lookup(cp, handle) ? : &new_filter_result;
-
- if (r == &new_filter_result) {
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- goto errout_alloc;
- f->key = handle;
- f->next = NULL;
- err = tcindex_filter_result_init(&f->result, cp, net);
- if (err < 0) {
- kfree(f);
- goto errout_alloc;
- }
- }
-
- if (tb[TCA_TCINDEX_CLASSID]) {
- cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
- tcf_bind_filter(tp, &cr, base);
- }
-
- if (old_r && old_r != r) {
- err = tcindex_filter_result_init(old_r, cp, net);
- if (err < 0) {
- kfree(f);
- goto errout_alloc;
- }
- }
-
- oldp = p;
- r->res = cr;
- tcf_exts_change(&r->exts, &e);
-
- rcu_assign_pointer(tp->root, cp);
-
- if (r == &new_filter_result) {
- struct tcindex_filter *nfp;
- struct tcindex_filter __rcu **fp;
-
- f->result.res = r->res;
- tcf_exts_change(&f->result.exts, &r->exts);
-
- fp = cp->h + (handle % cp->hash);
- for (nfp = rtnl_dereference(*fp);
- nfp;
- fp = &nfp->next, nfp = rtnl_dereference(*fp))
- ; /* nothing */
-
- rcu_assign_pointer(*fp, f);
- } else {
- tcf_exts_destroy(&new_filter_result.exts);
- }
-
- if (oldp)
- tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
- return 0;
-
-errout_alloc:
- if (balloc == 1)
- tcindex_free_perfect_hash(cp);
- else if (balloc == 2)
- kfree(cp->h);
- tcf_exts_destroy(&new_filter_result.exts);
-errout:
- kfree(cp);
- tcf_exts_destroy(&e);
- return err;
-}
-
-static int
-tcindex_change(struct net *net, struct sk_buff *in_skb,
- struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca, void **arg, bool ovr,
- bool rtnl_held, struct netlink_ext_ack *extack)
-{
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_TCINDEX_MAX + 1];
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter_result *r = *arg;
- int err;
-
- pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
- "p %p,r %p,*arg %p\n",
- tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
-
- if (!opt)
- return 0;
-
- err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
- tcindex_policy, NULL);
- if (err < 0)
- return err;
-
- return tcindex_set_parms(net, tp, base, handle, p, r, tb,
- tca[TCA_RATE], ovr, extack);
-}
-
-static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
- bool rtnl_held)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter *f, *next;
- int i;
-
- pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
- if (p->perfect) {
- for (i = 0; i < p->hash; i++) {
- if (!p->perfect[i].res.class)
- continue;
- if (walker->count >= walker->skip) {
- if (walker->fn(tp, p->perfect + i, walker) < 0) {
- walker->stop = 1;
- return;
- }
- }
- walker->count++;
- }
- }
- if (!p->h)
- return;
- for (i = 0; i < p->hash; i++) {
- for (f = rtnl_dereference(p->h[i]); f; f = next) {
- next = rtnl_dereference(f->next);
- if (walker->count >= walker->skip) {
- if (walker->fn(tp, &f->result, walker) < 0) {
- walker->stop = 1;
- return;
- }
- }
- walker->count++;
- }
- }
-}
-
-static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
- struct netlink_ext_ack *extack)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- int i;
-
- pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
-
- if (p->perfect) {
- for (i = 0; i < p->hash; i++) {
- struct tcindex_filter_result *r = p->perfect + i;
-
- /* tcf_queue_work() does not guarantee the ordering we
- * want, so we have to take this refcnt temporarily to
- * ensure 'p' is freed after all tcindex_filter_result
- * here. Imperfect hash does not need this, because it
- * uses linked lists rather than an array.
- */
- tcindex_data_get(p);
-
- tcf_unbind_filter(tp, &r->res);
- if (tcf_exts_get_net(&r->exts))
- tcf_queue_work(&r->rwork,
- tcindex_destroy_rexts_work);
- else
- __tcindex_destroy_rexts(r);
- }
- }
-
- for (i = 0; p->h && i < p->hash; i++) {
- struct tcindex_filter *f, *next;
- bool last;
-
- for (f = rtnl_dereference(p->h[i]); f; f = next) {
- next = rtnl_dereference(f->next);
- tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
- }
- }
-
- tcf_queue_work(&p->rwork, tcindex_destroy_work);
-}
-
-
-static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
-{
- struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcindex_filter_result *r = fh;
- struct nlattr *nest;
-
- pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
- tp, fh, skb, t, p, r);
- pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
-
- if (!fh) {
- t->tcm_handle = ~0; /* whatever ... */
- if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
- nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
- nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
- nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
- goto nla_put_failure;
- nla_nest_end(skb, nest);
- } else {
- if (p->perfect) {
- t->tcm_handle = r - p->perfect;
- } else {
- struct tcindex_filter *f;
- struct tcindex_filter __rcu **fp;
- int i;
-
- t->tcm_handle = 0;
- for (i = 0; !t->tcm_handle && i < p->hash; i++) {
- fp = &p->h[i];
- for (f = rtnl_dereference(*fp);
- !t->tcm_handle && f;
- fp = &f->next, f = rtnl_dereference(*fp)) {
- if (&f->result == r)
- t->tcm_handle = f->key;
- }
- }
- }
- pr_debug("handle = %d\n", t->tcm_handle);
- if (r->res.class &&
- nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
- goto nla_put_failure;
-
- if (tcf_exts_dump(skb, &r->exts) < 0)
- goto nla_put_failure;
- nla_nest_end(skb, nest);
-
- if (tcf_exts_dump_stats(skb, &r->exts) < 0)
- goto nla_put_failure;
- }
-
- return skb->len;
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-
-static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
- void *q, unsigned long base)
-{
- struct tcindex_filter_result *r = fh;
-
- if (r && r->res.classid == classid) {
- if (cl)
- __tcf_bind_filter(q, &r->res, base);
- else
- __tcf_unbind_filter(q, &r->res);
- }
-}
-
-static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
- .kind = "tcindex",
- .classify = tcindex_classify,
- .init = tcindex_init,
- .destroy = tcindex_destroy,
- .get = tcindex_get,
- .change = tcindex_change,
- .delete = tcindex_delete,
- .walk = tcindex_walk,
- .dump = tcindex_dump,
- .bind_class = tcindex_bind_class,
- .owner = THIS_MODULE,
-};
-
-static int __init init_tcindex(void)
-{
- return register_tcf_proto_ops(&cls_tcindex_ops);
-}
-
-static void __exit exit_tcindex(void)
-{
- unregister_tcf_proto_ops(&cls_tcindex_ops);
-}
-
-module_init(init_tcindex)
-module_exit(exit_tcindex)
-MODULE_LICENSE("GPL");
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index ed8d26e6468c..65598207a2fc 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -716,12 +716,18 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
struct nlattr *est, bool ovr,
struct netlink_ext_ack *extack)
{
- int err;
+ int err, ifindex = -1;
err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
if (err < 0)
return err;
+ if (tb[TCA_U32_INDEV]) {
+ ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
+ if (ifindex < 0)
+ return -EINVAL;
+ }
+
if (tb[TCA_U32_LINK]) {
u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
struct tc_u_hnode *ht_down = NULL, *ht_old;
@@ -756,13 +762,9 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
tcf_bind_filter(tp, &n->res, base);
}
- if (tb[TCA_U32_INDEV]) {
- int ret;
- ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
- if (ret < 0)
- return -EINVAL;
- n->ifindex = ret;
- }
+ if (ifindex >= 0)
+ n->ifindex = ifindex;
+
return 0;
}
@@ -812,7 +814,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
new->ifindex = n->ifindex;
new->fshift = n->fshift;
- new->res = n->res;
new->flags = n->flags;
RCU_INIT_POINTER(new->ht_down, ht);
@@ -1002,18 +1003,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
}
+ /* At this point, we need to derive the new handle that will be used to
+ * uniquely map the identity of this table match entry. The
+ * identity of the entry that we need to construct is 32 bits made of:
+ * htid(12b):bucketid(8b):node/entryid(12b)
+ *
+ * At this point _we have the table(ht)_ in which we will insert this
+ * entry. We carry the table's id in variable "htid".
+ * Note that earlier code picked the ht selection either by a) the user
+ * providing the htid specified via TCA_U32_HASH attribute or b) when
+ * no such attribute is passed then the root ht, is default to at ID
+ * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
+ * If OTOH the user passed us the htid, they may also pass a bucketid of
+ * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
+ * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
+ * passed via the htid, so even if it was non-zero it will be ignored.
+ *
+ * We may also have a handle, if the user passed one. The handle also
+ * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
+ * Rule: the bucketid on the handle is ignored even if one was passed;
+ * rather the value on "htid" is always assumed to be the bucketid.
+ */
if (handle) {
+ /* Rule: The htid from handle and tableid from htid must match */
if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
return -EINVAL;
}
- handle = htid | TC_U32_NODE(handle);
- err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
- GFP_KERNEL);
- if (err)
- return err;
- } else
+ /* Ok, so far we have a valid htid(12b):bucketid(8b) but we
+ * need to finalize the table entry identification with the last
+ * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
+ * entries. Rule: nodeid of 0 is reserved only for tables(see
+ * earlier code which processes TC_U32_DIVISOR attribute).
+ * Rule: The nodeid can only be derived from the handle (and not
+ * htid).
+ * Rule: if the handle specified zero for the node id example
+ * 0x60000000, then pick a new nodeid from the pool of IDs
+ * this hash table has been allocating from.
+ * If OTOH it is specified (i.e for example the user passed a
+ * handle such as 0x60000123), then we use it generate our final
+ * handle which is used to uniquely identify the match entry.
+ */
+ if (!TC_U32_NODE(handle)) {
+ handle = gen_new_kid(ht, htid);
+ } else {
+ handle = htid | TC_U32_NODE(handle);
+ err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
+ handle, GFP_KERNEL);
+ if (err)
+ return err;
+ }
+ } else {
+ /* The user did not give us a handle; lets just generate one
+ * from the table's pool of nodeids.
+ */
handle = gen_new_kid(ht, htid);
+ }
if (tb[TCA_U32_SEL] == NULL) {
NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 6f3c1fb2fb44..f176afb70559 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -97,8 +97,10 @@ retry:
static void em_text_destroy(struct tcf_ematch *m)
{
- if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
+ if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) {
textsearch_destroy(EM_TEXT_PRIV(m)->config);
+ kfree(EM_TEXT_PRIV(m));
+ }
}
static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index dd3b8c11a2e0..43bfb33629e9 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* the value carried.
*/
if (em_hdr->flags & TCF_EM_SIMPLE) {
+ if (em->ops->datalen > 0)
+ goto errout;
if (data_len < sizeof(u32))
goto errout;
em->data = *(u32 *) data;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 6f36df85d23d..d07146a2d0bb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -298,7 +298,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
if (!handle)
return NULL;
- q = qdisc_match_from_root(dev->qdisc, handle);
+ q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
if (q)
goto out;
@@ -317,7 +317,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
if (!handle)
return NULL;
- q = qdisc_match_from_root(dev->qdisc, handle);
+ q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
if (q)
goto out;
@@ -1071,11 +1071,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
skip:
if (!ingress) {
- notify_and_destroy(net, skb, n, classid,
- dev->qdisc, new);
+ old = rtnl_dereference(dev->qdisc);
if (new && !new->ops->attach)
qdisc_refcount_inc(new);
- dev->qdisc = new ? : &noop_qdisc;
+ rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
+
+ notify_and_destroy(net, skb, n, classid, old, new);
if (new && new->ops->attach)
new->ops->attach(new);
@@ -1104,6 +1105,11 @@ skip:
return -ENOENT;
}
+ if (new && new->ops == &noqueue_qdisc_ops) {
+ NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
+ return -EINVAL;
+ }
+
err = cops->graft(parent, cl, new, &old, extack);
if (err)
return err;
@@ -1208,7 +1214,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
sch->parent = parent;
if (handle == TC_H_INGRESS) {
- sch->flags |= TCQ_F_INGRESS;
+ if (!(sch->flags & TCQ_F_INGRESS)) {
+ NL_SET_ERR_MSG(extack,
+ "Specified parent ID is reserved for ingress and clsact Qdiscs");
+ err = -EINVAL;
+ goto err_out3;
+ }
handle = TC_H_MAKE(TC_H_INGRESS, 0);
} else {
if (handle == 0) {
@@ -1450,7 +1461,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
}
if (!q) {
NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
@@ -1492,10 +1503,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
return 0;
}
+static bool req_create_or_replace(struct nlmsghdr *n)
+{
+ return (n->nlmsg_flags & NLM_F_CREATE &&
+ n->nlmsg_flags & NLM_F_REPLACE);
+}
+
+static bool req_create_exclusive(struct nlmsghdr *n)
+{
+ return (n->nlmsg_flags & NLM_F_CREATE &&
+ n->nlmsg_flags & NLM_F_EXCL);
+}
+
+static bool req_change(struct nlmsghdr *n)
+{
+ return (!(n->nlmsg_flags & NLM_F_CREATE) &&
+ !(n->nlmsg_flags & NLM_F_REPLACE) &&
+ !(n->nlmsg_flags & NLM_F_EXCL));
+}
+
/*
* Create/change qdisc.
*/
-
static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
@@ -1539,7 +1568,7 @@ replay:
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
}
/* It may be default qdisc, ignore it */
@@ -1568,11 +1597,20 @@ replay:
NL_SET_ERR_MSG(extack, "Invalid qdisc name");
return -EINVAL;
}
+ if (q->flags & TCQ_F_INGRESS) {
+ NL_SET_ERR_MSG(extack,
+ "Cannot regraft ingress or clsact Qdiscs");
+ return -EINVAL;
+ }
if (q == p ||
(p && check_loop(q, p, 0))) {
NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
return -ELOOP;
}
+ if (clid == TC_H_INGRESS) {
+ NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
+ return -EINVAL;
+ }
qdisc_refcount_inc(q);
goto graft;
} else {
@@ -1583,27 +1621,35 @@ replay:
*
* We know, that some child q is already
* attached to this parent and have choice:
- * either to change it or to create/graft new one.
+ * 1) change it or 2) create/graft new one.
+ * If the requested qdisc kind is different
+ * than the existing one, then we choose graft.
+ * If they are the same then this is "change"
+ * operation - just let it fallthrough..
*
* 1. We are allowed to create/graft only
- * if CREATE and REPLACE flags are set.
+ * if the request is explicitly stating
+ * "please create if it doesn't exist".
*
- * 2. If EXCL is set, requestor wanted to say,
- * that qdisc tcm_handle is not expected
+ * 2. If the request is to exclusive create
+ * then the qdisc tcm_handle is not expected
* to exist, so that we choose create/graft too.
*
* 3. The last case is when no flags are set.
+ * This will happen when for example tc
+ * utility issues a "change" command.
* Alas, it is sort of hole in API, we
* cannot decide what to do unambiguously.
- * For now we select create/graft, if
- * user gave KIND, which does not match existing.
+ * For now we select create/graft.
*/
- if ((n->nlmsg_flags & NLM_F_CREATE) &&
- (n->nlmsg_flags & NLM_F_REPLACE) &&
- ((n->nlmsg_flags & NLM_F_EXCL) ||
- (tca[TCA_KIND] &&
- nla_strcmp(tca[TCA_KIND], q->ops->id))))
- goto create_n_graft;
+ if (tca[TCA_KIND] &&
+ nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+ if (req_create_or_replace(n) ||
+ req_create_exclusive(n))
+ goto create_n_graft;
+ else if (req_change(n))
+ goto create_n_graft2;
+ }
}
}
} else {
@@ -1637,6 +1683,7 @@ create_n_graft:
NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
return -ENOENT;
}
+create_n_graft2:
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev)) {
q = qdisc_create(dev, dev_ingress_queue(dev), p,
@@ -1761,7 +1808,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
s_q_idx = 0;
q_idx = 0;
- if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
+ if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
+ skb, cb, &q_idx, s_q_idx,
true, tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
@@ -2037,7 +2085,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
} else if (qid1) {
qid = qid1;
} else if (qid == 0)
- qid = dev->qdisc->handle;
+ qid = rtnl_dereference(dev->qdisc)->handle;
/* Now qid is genuine qdisc handle consistent
* both with parent and child.
@@ -2048,7 +2096,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
portid = TC_H_MAKE(qid, portid);
} else {
if (qid == 0)
- qid = dev->qdisc->handle;
+ qid = rtnl_dereference(dev->qdisc)->handle;
}
/* OK. Locate qdisc */
@@ -2209,7 +2257,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
t = 0;
- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
+ if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
+ skb, tcm, cb, &t, s_t, true) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
deleted file mode 100644
index 6385995dc700..000000000000
--- a/net/sched/sch_atm.c
+++ /dev/null
@@ -1,707 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
-
-/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <linux/atmdev.h>
-#include <linux/atmclip.h>
-#include <linux/rtnetlink.h>
-#include <linux/file.h> /* for fput */
-#include <net/netlink.h>
-#include <net/pkt_sched.h>
-#include <net/pkt_cls.h>
-
-/*
- * The ATM queuing discipline provides a framework for invoking classifiers
- * (aka "filters"), which in turn select classes of this queuing discipline.
- * Each class maps the flow(s) it is handling to a given VC. Multiple classes
- * may share the same VC.
- *
- * When creating a class, VCs are specified by passing the number of the open
- * socket descriptor by which the calling process references the VC. The kernel
- * keeps the VC open at least until all classes using it are removed.
- *
- * In this file, most functions are named atm_tc_* to avoid confusion with all
- * the atm_* in net/atm. This naming convention differs from what's used in the
- * rest of net/sched.
- *
- * Known bugs:
- * - sometimes messes up the IP stack
- * - any manipulations besides the few operations described in the README, are
- * untested and likely to crash the system
- * - should lock the flow while there is data in the queue (?)
- */
-
-#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
-
-struct atm_flow_data {
- struct Qdisc_class_common common;
- struct Qdisc *q; /* FIFO, TBF, etc. */
- struct tcf_proto __rcu *filter_list;
- struct tcf_block *block;
- struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
- void (*old_pop)(struct atm_vcc *vcc,
- struct sk_buff *skb); /* chaining */
- struct atm_qdisc_data *parent; /* parent qdisc */
- struct socket *sock; /* for closing */
- int ref; /* reference count */
- struct gnet_stats_basic_packed bstats;
- struct gnet_stats_queue qstats;
- struct list_head list;
- struct atm_flow_data *excess; /* flow for excess traffic;
- NULL to set CLP instead */
- int hdr_len;
- unsigned char hdr[0]; /* header data; MUST BE LAST */
-};
-
-struct atm_qdisc_data {
- struct atm_flow_data link; /* unclassified skbs go here */
- struct list_head flows; /* NB: "link" is also on this
- list */
- struct tasklet_struct task; /* dequeue tasklet */
-};
-
-/* ------------------------- Class/flow operations ------------------------- */
-
-static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- list_for_each_entry(flow, &p->flows, list) {
- if (flow->common.classid == classid)
- return flow;
- }
- return NULL;
-}
-
-static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
-
- pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
- sch, p, flow, new, old);
- if (list_empty(&flow->list))
- return -EINVAL;
- if (!new)
- new = &noop_qdisc;
- *old = flow->q;
- flow->q = new;
- if (*old)
- qdisc_reset(*old);
- return 0;
-}
-
-static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
-{
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
-
- pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
- return flow ? flow->q : NULL;
-}
-
-static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid)
-{
- struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
- flow = lookup_flow(sch, classid);
- pr_debug("%s: flow %p\n", __func__, flow);
- return (unsigned long)flow;
-}
-
-static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
- unsigned long parent, u32 classid)
-{
- struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
- flow = lookup_flow(sch, classid);
- if (flow)
- flow->ref++;
- pr_debug("%s: flow %p\n", __func__, flow);
- return (unsigned long)flow;
-}
-
-/*
- * atm_tc_put handles all destructions, including the ones that are explicitly
- * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
- * anything that still seems to be in use.
- */
-static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
-
- pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
- if (--flow->ref)
- return;
- pr_debug("atm_tc_put: destroying\n");
- list_del_init(&flow->list);
- pr_debug("atm_tc_put: qdisc %p\n", flow->q);
- qdisc_put(flow->q);
- tcf_block_put(flow->block);
- if (flow->sock) {
- pr_debug("atm_tc_put: f_count %ld\n",
- file_count(flow->sock->file));
- flow->vcc->pop = flow->old_pop;
- sockfd_put(flow->sock);
- }
- if (flow->excess)
- atm_tc_put(sch, (unsigned long)flow->excess);
- if (flow != &p->link)
- kfree(flow);
- /*
- * If flow == &p->link, the qdisc no longer works at this point and
- * needs to be removed. (By the caller of atm_tc_put.)
- */
-}
-
-static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
-{
- struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
-
- pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
- VCC2FLOW(vcc)->old_pop(vcc, skb);
- tasklet_schedule(&p->task);
-}
-
-static const u8 llc_oui_ip[] = {
- 0xaa, /* DSAP: non-ISO */
- 0xaa, /* SSAP: non-ISO */
- 0x03, /* Ctrl: Unnumbered Information Command PDU */
- 0x00, /* OUI: EtherType */
- 0x00, 0x00,
- 0x08, 0x00
-}; /* Ethertype IP (0800) */
-
-static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
- [TCA_ATM_FD] = { .type = NLA_U32 },
- [TCA_ATM_EXCESS] = { .type = NLA_U32 },
-};
-
-static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct nlattr **tca, unsigned long *arg,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
- struct atm_flow_data *excess = NULL;
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_ATM_MAX + 1];
- struct socket *sock;
- int fd, error, hdr_len;
- void *hdr;
-
- pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
- "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
- /*
- * The concept of parents doesn't apply for this qdisc.
- */
- if (parent && parent != TC_H_ROOT && parent != sch->handle)
- return -EINVAL;
- /*
- * ATM classes cannot be changed. In order to change properties of the
- * ATM connection, that socket needs to be modified directly (via the
- * native ATM API. In order to send a flow to a different VC, the old
- * class needs to be removed and a new one added. (This may be changed
- * later.)
- */
- if (flow)
- return -EBUSY;
- if (opt == NULL)
- return -EINVAL;
-
- error = nla_parse_nested_deprecated(tb, TCA_ATM_MAX, opt, atm_policy,
- NULL);
- if (error < 0)
- return error;
-
- if (!tb[TCA_ATM_FD])
- return -EINVAL;
- fd = nla_get_u32(tb[TCA_ATM_FD]);
- pr_debug("atm_tc_change: fd %d\n", fd);
- if (tb[TCA_ATM_HDR]) {
- hdr_len = nla_len(tb[TCA_ATM_HDR]);
- hdr = nla_data(tb[TCA_ATM_HDR]);
- } else {
- hdr_len = RFC1483LLC_LEN;
- hdr = NULL; /* default LLC/SNAP for IP */
- }
- if (!tb[TCA_ATM_EXCESS])
- excess = NULL;
- else {
- excess = (struct atm_flow_data *)
- atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
- if (!excess)
- return -ENOENT;
- }
- pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
- opt->nla_type, nla_len(opt), hdr_len);
- sock = sockfd_lookup(fd, &error);
- if (!sock)
- return error; /* f_count++ */
- pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
- if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
- error = -EPROTOTYPE;
- goto err_out;
- }
- /* @@@ should check if the socket is really operational or we'll crash
- on vcc->send */
- if (classid) {
- if (TC_H_MAJ(classid ^ sch->handle)) {
- pr_debug("atm_tc_change: classid mismatch\n");
- error = -EINVAL;
- goto err_out;
- }
- } else {
- int i;
- unsigned long cl;
-
- for (i = 1; i < 0x8000; i++) {
- classid = TC_H_MAKE(sch->handle, 0x8000 | i);
- cl = atm_tc_find(sch, classid);
- if (!cl)
- break;
- }
- }
- pr_debug("atm_tc_change: new id %x\n", classid);
- flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
- pr_debug("atm_tc_change: flow %p\n", flow);
- if (!flow) {
- error = -ENOBUFS;
- goto err_out;
- }
-
- error = tcf_block_get(&flow->block, &flow->filter_list, sch,
- extack);
- if (error) {
- kfree(flow);
- goto err_out;
- }
-
- flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
- extack);
- if (!flow->q)
- flow->q = &noop_qdisc;
- pr_debug("atm_tc_change: qdisc %p\n", flow->q);
- flow->sock = sock;
- flow->vcc = ATM_SD(sock); /* speedup */
- flow->vcc->user_back = flow;
- pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
- flow->old_pop = flow->vcc->pop;
- flow->parent = p;
- flow->vcc->pop = sch_atm_pop;
- flow->common.classid = classid;
- flow->ref = 1;
- flow->excess = excess;
- list_add(&flow->list, &p->link.list);
- flow->hdr_len = hdr_len;
- if (hdr)
- memcpy(flow->hdr, hdr, hdr_len);
- else
- memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
- *arg = (unsigned long)flow;
- return 0;
-err_out:
- sockfd_put(sock);
- return error;
-}
-
-static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
-
- pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
- if (list_empty(&flow->list))
- return -EINVAL;
- if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
- return -EBUSY;
- /*
- * Reference count must be 2: one for "keepalive" (set at class
- * creation), and one for the reference held when calling delete.
- */
- if (flow->ref < 2) {
- pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
- return -EINVAL;
- }
- if (flow->ref > 2)
- return -EBUSY; /* catch references via excess, etc. */
- atm_tc_put(sch, arg);
- return 0;
-}
-
-static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
- if (walker->stop)
- return;
- list_for_each_entry(flow, &p->flows, list) {
- if (walker->count >= walker->skip &&
- walker->fn(sch, (unsigned long)flow, walker) < 0) {
- walker->stop = 1;
- break;
- }
- walker->count++;
- }
-}
-
-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
-
- pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
- return flow ? flow->block : p->link.block;
-}
-
-/* --------------------------- Qdisc operations ---------------------------- */
-
-static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
- struct tcf_result res;
- int result;
- int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-
- pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
- result = TC_ACT_OK; /* be nice to gcc */
- flow = NULL;
- if (TC_H_MAJ(skb->priority) != sch->handle ||
- !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
- struct tcf_proto *fl;
-
- list_for_each_entry(flow, &p->flows, list) {
- fl = rcu_dereference_bh(flow->filter_list);
- if (fl) {
- result = tcf_classify(skb, fl, &res, true);
- if (result < 0)
- continue;
- flow = (struct atm_flow_data *)res.class;
- if (!flow)
- flow = lookup_flow(sch, res.classid);
- goto done;
- }
- }
- flow = NULL;
-done:
- ;
- }
- if (!flow) {
- flow = &p->link;
- } else {
- if (flow->vcc)
- ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
- /*@@@ looks good ... but it's not supposed to work :-) */
-#ifdef CONFIG_NET_CLS_ACT
- switch (result) {
- case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
- case TC_ACT_TRAP:
- __qdisc_drop(skb, to_free);
- return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
- case TC_ACT_SHOT:
- __qdisc_drop(skb, to_free);
- goto drop;
- case TC_ACT_RECLASSIFY:
- if (flow->excess)
- flow = flow->excess;
- else
- ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
- break;
- }
-#endif
- }
-
- ret = qdisc_enqueue(skb, flow->q, to_free);
- if (ret != NET_XMIT_SUCCESS) {
-drop: __maybe_unused
- if (net_xmit_drop_count(ret)) {
- qdisc_qstats_drop(sch);
- if (flow)
- flow->qstats.drops++;
- }
- return ret;
- }
- /*
- * Okay, this may seem weird. We pretend we've dropped the packet if
- * it goes via ATM. The reason for this is that the outer qdisc
- * expects to be able to q->dequeue the packet later on if we return
- * success at this place. Also, sch->q.qdisc needs to reflect whether
- * there is a packet egligible for dequeuing or not. Note that the
- * statistics of the outer qdisc are necessarily wrong because of all
- * this. There's currently no correct solution for this.
- */
- if (flow == &p->link) {
- sch->q.qlen++;
- return NET_XMIT_SUCCESS;
- }
- tasklet_schedule(&p->task);
- return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-}
-
-/*
- * Dequeue packets and send them over ATM. Note that we quite deliberately
- * avoid checking net_device's flow control here, simply because sch_atm
- * uses its own channels, which have nothing to do with any CLIP/LANE/or
- * non-ATM interfaces.
- */
-
-static void sch_atm_dequeue(unsigned long data)
-{
- struct Qdisc *sch = (struct Qdisc *)data;
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
- struct sk_buff *skb;
-
- pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list) {
- if (flow == &p->link)
- continue;
- /*
- * If traffic is properly shaped, this won't generate nasty
- * little bursts. Otherwise, it may ... (but that's okay)
- */
- while ((skb = flow->q->ops->peek(flow->q))) {
- if (!atm_may_send(flow->vcc, skb->truesize))
- break;
-
- skb = qdisc_dequeue_peeked(flow->q);
- if (unlikely(!skb))
- break;
-
- qdisc_bstats_update(sch, skb);
- bstats_update(&flow->bstats, skb);
- pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
- /* remove any LL header somebody else has attached */
- skb_pull(skb, skb_network_offset(skb));
- if (skb_headroom(skb) < flow->hdr_len) {
- struct sk_buff *new;
-
- new = skb_realloc_headroom(skb, flow->hdr_len);
- dev_kfree_skb(skb);
- if (!new)
- continue;
- skb = new;
- }
- pr_debug("sch_atm_dequeue: ip %p, data %p\n",
- skb_network_header(skb), skb->data);
- ATM_SKB(skb)->vcc = flow->vcc;
- memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
- flow->hdr_len);
- refcount_add(skb->truesize,
- &sk_atm(flow->vcc)->sk_wmem_alloc);
- /* atm.atm_options are already set by atm_tc_enqueue */
- flow->vcc->send(flow->vcc, skb);
- }
- }
-}
-
-static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct sk_buff *skb;
-
- pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
- tasklet_schedule(&p->task);
- skb = qdisc_dequeue_peeked(p->link.q);
- if (skb)
- sch->q.qlen--;
- return skb;
-}
-
-static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
-
- return p->link.q->ops->peek(p->link.q);
-}
-
-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- int err;
-
- pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
- INIT_LIST_HEAD(&p->flows);
- INIT_LIST_HEAD(&p->link.list);
- list_add(&p->link.list, &p->flows);
- p->link.q = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops, sch->handle, extack);
- if (!p->link.q)
- p->link.q = &noop_qdisc;
- pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
- p->link.vcc = NULL;
- p->link.sock = NULL;
- p->link.common.classid = sch->handle;
- p->link.ref = 1;
-
- err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
- extack);
- if (err)
- return err;
-
- tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
- return 0;
-}
-
-static void atm_tc_reset(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow;
-
- pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list)
- qdisc_reset(flow->q);
- sch->q.qlen = 0;
-}
-
-static void atm_tc_destroy(struct Qdisc *sch)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow, *tmp;
-
- pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list) {
- tcf_block_put(flow->block);
- flow->block = NULL;
- }
-
- list_for_each_entry_safe(flow, tmp, &p->flows, list) {
- if (flow->ref > 1)
- pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
- atm_tc_put(sch, (unsigned long)flow);
- }
- tasklet_kill(&p->task);
-}
-
-static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
- struct sk_buff *skb, struct tcmsg *tcm)
-{
- struct atm_qdisc_data *p = qdisc_priv(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *)cl;
- struct nlattr *nest;
-
- pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
- sch, p, flow, skb, tcm);
- if (list_empty(&flow->list))
- return -EINVAL;
- tcm->tcm_handle = flow->common.classid;
- tcm->tcm_info = flow->q->handle;
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
-
- if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
- goto nla_put_failure;
- if (flow->vcc) {
- struct sockaddr_atmpvc pvc;
- int state;
-
- memset(&pvc, 0, sizeof(pvc));
- pvc.sap_family = AF_ATMPVC;
- pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
- pvc.sap_addr.vpi = flow->vcc->vpi;
- pvc.sap_addr.vci = flow->vcc->vci;
- if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
- goto nla_put_failure;
- state = ATM_VF2VS(flow->vcc->flags);
- if (nla_put_u32(skb, TCA_ATM_STATE, state))
- goto nla_put_failure;
- }
- if (flow->excess) {
- if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
- goto nla_put_failure;
- } else {
- if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
- goto nla_put_failure;
- }
- return nla_nest_end(skb, nest);
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-static int
-atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
- struct gnet_dump *d)
-{
- struct atm_flow_data *flow = (struct atm_flow_data *)arg;
-
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &flow->bstats) < 0 ||
- gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
- return -1;
-
- return 0;
-}
-
-static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
- return 0;
-}
-
-static const struct Qdisc_class_ops atm_class_ops = {
- .graft = atm_tc_graft,
- .leaf = atm_tc_leaf,
- .find = atm_tc_find,
- .change = atm_tc_change,
- .delete = atm_tc_delete,
- .walk = atm_tc_walk,
- .tcf_block = atm_tc_tcf_block,
- .bind_tcf = atm_tc_bind_filter,
- .unbind_tcf = atm_tc_put,
- .dump = atm_tc_dump_class,
- .dump_stats = atm_tc_dump_class_stats,
-};
-
-static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
- .cl_ops = &atm_class_ops,
- .id = "atm",
- .priv_size = sizeof(struct atm_qdisc_data),
- .enqueue = atm_tc_enqueue,
- .dequeue = atm_tc_dequeue,
- .peek = atm_tc_peek,
- .init = atm_tc_init,
- .reset = atm_tc_reset,
- .destroy = atm_tc_destroy,
- .dump = atm_tc_dump,
- .owner = THIS_MODULE,
-};
-
-static int __init atm_init(void)
-{
- return register_qdisc(&atm_qdisc_ops);
-}
-
-static void __exit atm_exit(void)
-{
- unregister_qdisc(&atm_qdisc_ops);
-}
-
-module_init(atm_init)
-module_exit(atm_exit)
-MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 0eb4d4a568f7..7ee0b57b3ed4 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1660,7 +1660,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
struct cake_sched_data *q = qdisc_priv(sch);
int len = qdisc_pkt_len(skb);
- int uninitialized_var(ret);
+ int ret;
struct sk_buff *ack = NULL;
ktime_t now = ktime_get();
struct cake_tin_data *b;
@@ -2190,8 +2190,12 @@ retry:
static void cake_reset(struct Qdisc *sch)
{
+ struct cake_sched_data *q = qdisc_priv(sch);
u32 c;
+ if (!q->tins)
+ return;
+
for (c = 0; c < CAKE_MAX_TINS; c++)
cake_clear_tin(sch, c);
}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
deleted file mode 100644
index e5972889cd81..000000000000
--- a/net/sched/sch_cbq.c
+++ /dev/null
@@ -1,1817 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * net/sched/sch_cbq.c Class-Based Queueing discipline.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <net/netlink.h>
-#include <net/pkt_sched.h>
-#include <net/pkt_cls.h>
-
-
-/* Class-Based Queueing (CBQ) algorithm.
- =======================================
-
- Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
- Management Models for Packet Networks",
- IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
-
- [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
-
- [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
- Parameters", 1996
-
- [4] Sally Floyd and Michael Speer, "Experimental Results
- for Class-Based Queueing", 1998, not published.
-
- -----------------------------------------------------------------------
-
- Algorithm skeleton was taken from NS simulator cbq.cc.
- If someone wants to check this code against the LBL version,
- he should take into account that ONLY the skeleton was borrowed,
- the implementation is different. Particularly:
-
- --- The WRR algorithm is different. Our version looks more
- reasonable (I hope) and works when quanta are allowed to be
- less than MTU, which is always the case when real time classes
- have small rates. Note, that the statement of [3] is
- incomplete, delay may actually be estimated even if class
- per-round allotment is less than MTU. Namely, if per-round
- allotment is W*r_i, and r_1+...+r_k = r < 1
-
- delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
-
- In the worst case we have IntServ estimate with D = W*r+k*MTU
- and C = MTU*r. The proof (if correct at all) is trivial.
-
-
- --- It seems that cbq-2.0 is not very accurate. At least, I cannot
- interpret some places, which look like wrong translations
- from NS. Anyone is advised to find these differences
- and explain to me, why I am wrong 8).
-
- --- Linux has no EOI event, so that we cannot estimate true class
- idle time. Workaround is to consider the next dequeue event
- as sign that previous packet is finished. This is wrong because of
- internal device queueing, but on a permanently loaded link it is true.
- Moreover, combined with clock integrator, this scheme looks
- very close to an ideal solution. */
-
-struct cbq_sched_data;
-
-
-struct cbq_class {
- struct Qdisc_class_common common;
- struct cbq_class *next_alive; /* next class with backlog in this priority band */
-
-/* Parameters */
- unsigned char priority; /* class priority */
- unsigned char priority2; /* priority to be used after overlimit */
- unsigned char ewma_log; /* time constant for idle time calculation */
-
- u32 defmap;
-
- /* Link-sharing scheduler parameters */
- long maxidle; /* Class parameters: see below. */
- long offtime;
- long minidle;
- u32 avpkt;
- struct qdisc_rate_table *R_tab;
-
- /* General scheduler (WRR) parameters */
- long allot;
- long quantum; /* Allotment per WRR round */
- long weight; /* Relative allotment: see below */
-
- struct Qdisc *qdisc; /* Ptr to CBQ discipline */
- struct cbq_class *split; /* Ptr to split node */
- struct cbq_class *share; /* Ptr to LS parent in the class tree */
- struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
- struct cbq_class *borrow; /* NULL if class is bandwidth limited;
- parent otherwise */
- struct cbq_class *sibling; /* Sibling chain */
- struct cbq_class *children; /* Pointer to children chain */
-
- struct Qdisc *q; /* Elementary queueing discipline */
-
-
-/* Variables */
- unsigned char cpriority; /* Effective priority */
- unsigned char delayed;
- unsigned char level; /* level of the class in hierarchy:
- 0 for leaf classes, and maximal
- level of children + 1 for nodes.
- */
-
- psched_time_t last; /* Last end of service */
- psched_time_t undertime;
- long avgidle;
- long deficit; /* Saved deficit for WRR */
- psched_time_t penalized;
- struct gnet_stats_basic_packed bstats;
- struct gnet_stats_queue qstats;
- struct net_rate_estimator __rcu *rate_est;
- struct tc_cbq_xstats xstats;
-
- struct tcf_proto __rcu *filter_list;
- struct tcf_block *block;
-
- int filters;
-
- struct cbq_class *defaults[TC_PRIO_MAX + 1];
-};
-
-struct cbq_sched_data {
- struct Qdisc_class_hash clhash; /* Hash table of all classes */
- int nclasses[TC_CBQ_MAXPRIO + 1];
- unsigned int quanta[TC_CBQ_MAXPRIO + 1];
-
- struct cbq_class link;
-
- unsigned int activemask;
- struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
- with backlog */
-
-#ifdef CONFIG_NET_CLS_ACT
- struct cbq_class *rx_class;
-#endif
- struct cbq_class *tx_class;
- struct cbq_class *tx_borrowed;
- int tx_len;
- psched_time_t now; /* Cached timestamp */
- unsigned int pmask;
-
- struct hrtimer delay_timer;
- struct qdisc_watchdog watchdog; /* Watchdog timer,
- started when CBQ has
- backlog, but cannot
- transmit just now */
- psched_tdiff_t wd_expires;
- int toplevel;
- u32 hgenerator;
-};
-
-
-#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
-
-static inline struct cbq_class *
-cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
-{
- struct Qdisc_class_common *clc;
-
- clc = qdisc_class_find(&q->clhash, classid);
- if (clc == NULL)
- return NULL;
- return container_of(clc, struct cbq_class, common);
-}
-
-#ifdef CONFIG_NET_CLS_ACT
-
-static struct cbq_class *
-cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
-{
- struct cbq_class *cl;
-
- for (cl = this->tparent; cl; cl = cl->tparent) {
- struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
-
- if (new != NULL && new != this)
- return new;
- }
- return NULL;
-}
-
-#endif
-
-/* Classify packet. The procedure is pretty complicated, but
- * it allows us to combine link sharing and priority scheduling
- * transparently.
- *
- * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
- * so that it resolves to split nodes. Then packets are classified
- * by logical priority, or a more specific classifier may be attached
- * to the split node.
- */
-
-static struct cbq_class *
-cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *head = &q->link;
- struct cbq_class **defmap;
- struct cbq_class *cl = NULL;
- u32 prio = skb->priority;
- struct tcf_proto *fl;
- struct tcf_result res;
-
- /*
- * Step 1. If skb->priority points to one of our classes, use it.
- */
- if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
- (cl = cbq_class_lookup(q, prio)) != NULL)
- return cl;
-
- *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- for (;;) {
- int result = 0;
- defmap = head->defaults;
-
- fl = rcu_dereference_bh(head->filter_list);
- /*
- * Step 2+n. Apply classifier.
- */
- result = tcf_classify(skb, fl, &res, true);
- if (!fl || result < 0)
- goto fallback;
-
- cl = (void *)res.class;
- if (!cl) {
- if (TC_H_MAJ(res.classid))
- cl = cbq_class_lookup(q, res.classid);
- else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
- cl = defmap[TC_PRIO_BESTEFFORT];
-
- if (cl == NULL)
- goto fallback;
- }
- if (cl->level >= head->level)
- goto fallback;
-#ifdef CONFIG_NET_CLS_ACT
- switch (result) {
- case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
- case TC_ACT_TRAP:
- *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
- /* fall through */
- case TC_ACT_SHOT:
- return NULL;
- case TC_ACT_RECLASSIFY:
- return cbq_reclassify(skb, cl);
- }
-#endif
- if (cl->level == 0)
- return cl;
-
- /*
- * Step 3+n. If classifier selected a link sharing class,
- * apply agency specific classifier.
- * Repeat this procdure until we hit a leaf node.
- */
- head = cl;
- }
-
-fallback:
- cl = head;
-
- /*
- * Step 4. No success...
- */
- if (TC_H_MAJ(prio) == 0 &&
- !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
- !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
- return head;
-
- return cl;
-}
-
-/*
- * A packet has just been enqueued on the empty class.
- * cbq_activate_class adds it to the tail of active class list
- * of its priority band.
- */
-
-static inline void cbq_activate_class(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- int prio = cl->cpriority;
- struct cbq_class *cl_tail;
-
- cl_tail = q->active[prio];
- q->active[prio] = cl;
-
- if (cl_tail != NULL) {
- cl->next_alive = cl_tail->next_alive;
- cl_tail->next_alive = cl;
- } else {
- cl->next_alive = cl;
- q->activemask |= (1<<prio);
- }
-}
-
-/*
- * Unlink class from active chain.
- * Note that this same procedure is done directly in cbq_dequeue*
- * during round-robin procedure.
- */
-
-static void cbq_deactivate_class(struct cbq_class *this)
-{
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
- int prio = this->cpriority;
- struct cbq_class *cl;
- struct cbq_class *cl_prev = q->active[prio];
-
- do {
- cl = cl_prev->next_alive;
- if (cl == this) {
- cl_prev->next_alive = cl->next_alive;
- cl->next_alive = NULL;
-
- if (cl == q->active[prio]) {
- q->active[prio] = cl_prev;
- if (cl == q->active[prio]) {
- q->active[prio] = NULL;
- q->activemask &= ~(1<<prio);
- return;
- }
- }
- return;
- }
- } while ((cl_prev = cl) != q->active[prio]);
-}
-
-static void
-cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
-{
- int toplevel = q->toplevel;
-
- if (toplevel > cl->level) {
- psched_time_t now = psched_get_time();
-
- do {
- if (cl->undertime < now) {
- q->toplevel = cl->level;
- return;
- }
- } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
- }
-}
-
-static int
-cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- int uninitialized_var(ret);
- struct cbq_class *cl = cbq_classify(skb, sch, &ret);
-
-#ifdef CONFIG_NET_CLS_ACT
- q->rx_class = cl;
-#endif
- if (cl == NULL) {
- if (ret & __NET_XMIT_BYPASS)
- qdisc_qstats_drop(sch);
- __qdisc_drop(skb, to_free);
- return ret;
- }
-
- ret = qdisc_enqueue(skb, cl->q, to_free);
- if (ret == NET_XMIT_SUCCESS) {
- sch->q.qlen++;
- cbq_mark_toplevel(q, cl);
- if (!cl->next_alive)
- cbq_activate_class(cl);
- return ret;
- }
-
- if (net_xmit_drop_count(ret)) {
- qdisc_qstats_drop(sch);
- cbq_mark_toplevel(q, cl);
- cl->qstats.drops++;
- }
- return ret;
-}
-
-/* Overlimit action: penalize leaf class by adding offtime */
-static void cbq_overlimit(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- psched_tdiff_t delay = cl->undertime - q->now;
-
- if (!cl->delayed) {
- delay += cl->offtime;
-
- /*
- * Class goes to sleep, so that it will have no
- * chance to work avgidle. Let's forgive it 8)
- *
- * BTW cbq-2.0 has a crap in this
- * place, apparently they forgot to shift it by cl->ewma_log.
- */
- if (cl->avgidle < 0)
- delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
- if (cl->avgidle < cl->minidle)
- cl->avgidle = cl->minidle;
- if (delay <= 0)
- delay = 1;
- cl->undertime = q->now + delay;
-
- cl->xstats.overactions++;
- cl->delayed = 1;
- }
- if (q->wd_expires == 0 || q->wd_expires > delay)
- q->wd_expires = delay;
-
- /* Dirty work! We must schedule wakeups based on
- * real available rate, rather than leaf rate,
- * which may be tiny (even zero).
- */
- if (q->toplevel == TC_CBQ_MAXLEVEL) {
- struct cbq_class *b;
- psched_tdiff_t base_delay = q->wd_expires;
-
- for (b = cl->borrow; b; b = b->borrow) {
- delay = b->undertime - q->now;
- if (delay < base_delay) {
- if (delay <= 0)
- delay = 1;
- base_delay = delay;
- }
- }
-
- q->wd_expires = base_delay;
- }
-}
-
-static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
- psched_time_t now)
-{
- struct cbq_class *cl;
- struct cbq_class *cl_prev = q->active[prio];
- psched_time_t sched = now;
-
- if (cl_prev == NULL)
- return 0;
-
- do {
- cl = cl_prev->next_alive;
- if (now - cl->penalized > 0) {
- cl_prev->next_alive = cl->next_alive;
- cl->next_alive = NULL;
- cl->cpriority = cl->priority;
- cl->delayed = 0;
- cbq_activate_class(cl);
-
- if (cl == q->active[prio]) {
- q->active[prio] = cl_prev;
- if (cl == q->active[prio]) {
- q->active[prio] = NULL;
- return 0;
- }
- }
-
- cl = cl_prev->next_alive;
- } else if (sched - cl->penalized > 0)
- sched = cl->penalized;
- } while ((cl_prev = cl) != q->active[prio]);
-
- return sched - now;
-}
-
-static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
-{
- struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
- delay_timer);
- struct Qdisc *sch = q->watchdog.qdisc;
- psched_time_t now;
- psched_tdiff_t delay = 0;
- unsigned int pmask;
-
- now = psched_get_time();
-
- pmask = q->pmask;
- q->pmask = 0;
-
- while (pmask) {
- int prio = ffz(~pmask);
- psched_tdiff_t tmp;
-
- pmask &= ~(1<<prio);
-
- tmp = cbq_undelay_prio(q, prio, now);
- if (tmp > 0) {
- q->pmask |= 1<<prio;
- if (tmp < delay || delay == 0)
- delay = tmp;
- }
- }
-
- if (delay) {
- ktime_t time;
-
- time = 0;
- time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
- hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
- }
-
- __netif_schedule(qdisc_root(sch));
- return HRTIMER_NORESTART;
-}
-
-/*
- * It is mission critical procedure.
- *
- * We "regenerate" toplevel cutoff, if transmitting class
- * has backlog and it is not regulated. It is not part of
- * original CBQ description, but looks more reasonable.
- * Probably, it is wrong. This question needs further investigation.
- */
-
-static inline void
-cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
- struct cbq_class *borrowed)
-{
- if (cl && q->toplevel >= borrowed->level) {
- if (cl->q->q.qlen > 1) {
- do {
- if (borrowed->undertime == PSCHED_PASTPERFECT) {
- q->toplevel = borrowed->level;
- return;
- }
- } while ((borrowed = borrowed->borrow) != NULL);
- }
-#if 0
- /* It is not necessary now. Uncommenting it
- will save CPU cycles, but decrease fairness.
- */
- q->toplevel = TC_CBQ_MAXLEVEL;
-#endif
- }
-}
-
-static void
-cbq_update(struct cbq_sched_data *q)
-{
- struct cbq_class *this = q->tx_class;
- struct cbq_class *cl = this;
- int len = q->tx_len;
- psched_time_t now;
-
- q->tx_class = NULL;
- /* Time integrator. We calculate EOS time
- * by adding expected packet transmission time.
- */
- now = q->now + L2T(&q->link, len);
-
- for ( ; cl; cl = cl->share) {
- long avgidle = cl->avgidle;
- long idle;
-
- cl->bstats.packets++;
- cl->bstats.bytes += len;
-
- /*
- * (now - last) is total time between packet right edges.
- * (last_pktlen/rate) is "virtual" busy time, so that
- *
- * idle = (now - last) - last_pktlen/rate
- */
-
- idle = now - cl->last;
- if ((unsigned long)idle > 128*1024*1024) {
- avgidle = cl->maxidle;
- } else {
- idle -= L2T(cl, len);
-
- /* true_avgidle := (1-W)*true_avgidle + W*idle,
- * where W=2^{-ewma_log}. But cl->avgidle is scaled:
- * cl->avgidle == true_avgidle/W,
- * hence:
- */
- avgidle += idle - (avgidle>>cl->ewma_log);
- }
-
- if (avgidle <= 0) {
- /* Overlimit or at-limit */
-
- if (avgidle < cl->minidle)
- avgidle = cl->minidle;
-
- cl->avgidle = avgidle;
-
- /* Calculate expected time, when this class
- * will be allowed to send.
- * It will occur, when:
- * (1-W)*true_avgidle + W*delay = 0, i.e.
- * idle = (1/W - 1)*(-true_avgidle)
- * or
- * idle = (1 - W)*(-cl->avgidle);
- */
- idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
-
- /*
- * That is not all.
- * To maintain the rate allocated to the class,
- * we add to undertime virtual clock,
- * necessary to complete transmitted packet.
- * (len/phys_bandwidth has been already passed
- * to the moment of cbq_update)
- */
-
- idle -= L2T(&q->link, len);
- idle += L2T(cl, len);
-
- cl->undertime = now + idle;
- } else {
- /* Underlimit */
-
- cl->undertime = PSCHED_PASTPERFECT;
- if (avgidle > cl->maxidle)
- cl->avgidle = cl->maxidle;
- else
- cl->avgidle = avgidle;
- }
- if ((s64)(now - cl->last) > 0)
- cl->last = now;
- }
-
- cbq_update_toplevel(q, this, q->tx_borrowed);
-}
-
-static inline struct cbq_class *
-cbq_under_limit(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- struct cbq_class *this_cl = cl;
-
- if (cl->tparent == NULL)
- return cl;
-
- if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
- cl->delayed = 0;
- return cl;
- }
-
- do {
- /* It is very suspicious place. Now overlimit
- * action is generated for not bounded classes
- * only if link is completely congested.
- * Though it is in agree with ancestor-only paradigm,
- * it looks very stupid. Particularly,
- * it means that this chunk of code will either
- * never be called or result in strong amplification
- * of burstiness. Dangerous, silly, and, however,
- * no another solution exists.
- */
- cl = cl->borrow;
- if (!cl) {
- this_cl->qstats.overlimits++;
- cbq_overlimit(this_cl);
- return NULL;
- }
- if (cl->level > q->toplevel)
- return NULL;
- } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
-
- cl->delayed = 0;
- return cl;
-}
-
-static inline struct sk_buff *
-cbq_dequeue_prio(struct Qdisc *sch, int prio)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl_tail, *cl_prev, *cl;
- struct sk_buff *skb;
- int deficit;
-
- cl_tail = cl_prev = q->active[prio];
- cl = cl_prev->next_alive;
-
- do {
- deficit = 0;
-
- /* Start round */
- do {
- struct cbq_class *borrow = cl;
-
- if (cl->q->q.qlen &&
- (borrow = cbq_under_limit(cl)) == NULL)
- goto skip_class;
-
- if (cl->deficit <= 0) {
- /* Class exhausted its allotment per
- * this round. Switch to the next one.
- */
- deficit = 1;
- cl->deficit += cl->quantum;
- goto next_class;
- }
-
- skb = cl->q->dequeue(cl->q);
-
- /* Class did not give us any skb :-(
- * It could occur even if cl->q->q.qlen != 0
- * f.e. if cl->q == "tbf"
- */
- if (skb == NULL)
- goto skip_class;
-
- cl->deficit -= qdisc_pkt_len(skb);
- q->tx_class = cl;
- q->tx_borrowed = borrow;
- if (borrow != cl) {
-#ifndef CBQ_XSTATS_BORROWS_BYTES
- borrow->xstats.borrows++;
- cl->xstats.borrows++;
-#else
- borrow->xstats.borrows += qdisc_pkt_len(skb);
- cl->xstats.borrows += qdisc_pkt_len(skb);
-#endif
- }
- q->tx_len = qdisc_pkt_len(skb);
-
- if (cl->deficit <= 0) {
- q->active[prio] = cl;
- cl = cl->next_alive;
- cl->deficit += cl->quantum;
- }
- return skb;
-
-skip_class:
- if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
- /* Class is empty or penalized.
- * Unlink it from active chain.
- */
- cl_prev->next_alive = cl->next_alive;
- cl->next_alive = NULL;
-
- /* Did cl_tail point to it? */
- if (cl == cl_tail) {
- /* Repair it! */
- cl_tail = cl_prev;
-
- /* Was it the last class in this band? */
- if (cl == cl_tail) {
- /* Kill the band! */
- q->active[prio] = NULL;
- q->activemask &= ~(1<<prio);
- if (cl->q->q.qlen)
- cbq_activate_class(cl);
- return NULL;
- }
-
- q->active[prio] = cl_tail;
- }
- if (cl->q->q.qlen)
- cbq_activate_class(cl);
-
- cl = cl_prev;
- }
-
-next_class:
- cl_prev = cl;
- cl = cl->next_alive;
- } while (cl_prev != cl_tail);
- } while (deficit);
-
- q->active[prio] = cl_prev;
-
- return NULL;
-}
-
-static inline struct sk_buff *
-cbq_dequeue_1(struct Qdisc *sch)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct sk_buff *skb;
- unsigned int activemask;
-
- activemask = q->activemask & 0xFF;
- while (activemask) {
- int prio = ffz(~activemask);
- activemask &= ~(1<<prio);
- skb = cbq_dequeue_prio(sch, prio);
- if (skb)
- return skb;
- }
- return NULL;
-}
-
-static struct sk_buff *
-cbq_dequeue(struct Qdisc *sch)
-{
- struct sk_buff *skb;
- struct cbq_sched_data *q = qdisc_priv(sch);
- psched_time_t now;
-
- now = psched_get_time();
-
- if (q->tx_class)
- cbq_update(q);
-
- q->now = now;
-
- for (;;) {
- q->wd_expires = 0;
-
- skb = cbq_dequeue_1(sch);
- if (skb) {
- qdisc_bstats_update(sch, skb);
- sch->q.qlen--;
- return skb;
- }
-
- /* All the classes are overlimit.
- *
- * It is possible, if:
- *
- * 1. Scheduler is empty.
- * 2. Toplevel cutoff inhibited borrowing.
- * 3. Root class is overlimit.
- *
- * Reset 2d and 3d conditions and retry.
- *
- * Note, that NS and cbq-2.0 are buggy, peeking
- * an arbitrary class is appropriate for ancestor-only
- * sharing, but not for toplevel algorithm.
- *
- * Our version is better, but slower, because it requires
- * two passes, but it is unavoidable with top-level sharing.
- */
-
- if (q->toplevel == TC_CBQ_MAXLEVEL &&
- q->link.undertime == PSCHED_PASTPERFECT)
- break;
-
- q->toplevel = TC_CBQ_MAXLEVEL;
- q->link.undertime = PSCHED_PASTPERFECT;
- }
-
- /* No packets in scheduler or nobody wants to give them to us :-(
- * Sigh... start watchdog timer in the last case.
- */
-
- if (sch->q.qlen) {
- qdisc_qstats_overlimit(sch);
- if (q->wd_expires)
- qdisc_watchdog_schedule(&q->watchdog,
- now + q->wd_expires);
- }
- return NULL;
-}
-
-/* CBQ class maintanance routines */
-
-static void cbq_adjust_levels(struct cbq_class *this)
-{
- if (this == NULL)
- return;
-
- do {
- int level = 0;
- struct cbq_class *cl;
-
- cl = this->children;
- if (cl) {
- do {
- if (cl->level > level)
- level = cl->level;
- } while ((cl = cl->sibling) != this->children);
- }
- this->level = level + 1;
- } while ((this = this->tparent) != NULL);
-}
-
-static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
-{
- struct cbq_class *cl;
- unsigned int h;
-
- if (q->quanta[prio] == 0)
- return;
-
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- /* BUGGGG... Beware! This expression suffer of
- * arithmetic overflows!
- */
- if (cl->priority == prio) {
- cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
- q->quanta[prio];
- }
- if (cl->quantum <= 0 ||
- cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
- pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
- cl->common.classid, cl->quantum);
- cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
- }
- }
- }
-}
-
-static void cbq_sync_defmap(struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- struct cbq_class *split = cl->split;
- unsigned int h;
- int i;
-
- if (split == NULL)
- return;
-
- for (i = 0; i <= TC_PRIO_MAX; i++) {
- if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
- split->defaults[i] = NULL;
- }
-
- for (i = 0; i <= TC_PRIO_MAX; i++) {
- int level = split->level;
-
- if (split->defaults[i])
- continue;
-
- for (h = 0; h < q->clhash.hashsize; h++) {
- struct cbq_class *c;
-
- hlist_for_each_entry(c, &q->clhash.hash[h],
- common.hnode) {
- if (c->split == split && c->level < level &&
- c->defmap & (1<<i)) {
- split->defaults[i] = c;
- level = c->level;
- }
- }
- }
- }
-}
-
-static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
-{
- struct cbq_class *split = NULL;
-
- if (splitid == 0) {
- split = cl->split;
- if (!split)
- return;
- splitid = split->common.classid;
- }
-
- if (split == NULL || split->common.classid != splitid) {
- for (split = cl->tparent; split; split = split->tparent)
- if (split->common.classid == splitid)
- break;
- }
-
- if (split == NULL)
- return;
-
- if (cl->split != split) {
- cl->defmap = 0;
- cbq_sync_defmap(cl);
- cl->split = split;
- cl->defmap = def & mask;
- } else
- cl->defmap = (cl->defmap & ~mask) | (def & mask);
-
- cbq_sync_defmap(cl);
-}
-
-static void cbq_unlink_class(struct cbq_class *this)
-{
- struct cbq_class *cl, **clp;
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
-
- qdisc_class_hash_remove(&q->clhash, &this->common);
-
- if (this->tparent) {
- clp = &this->sibling;
- cl = *clp;
- do {
- if (cl == this) {
- *clp = cl->sibling;
- break;
- }
- clp = &cl->sibling;
- } while ((cl = *clp) != this->sibling);
-
- if (this->tparent->children == this) {
- this->tparent->children = this->sibling;
- if (this->sibling == this)
- this->tparent->children = NULL;
- }
- } else {
- WARN_ON(this->sibling != this);
- }
-}
-
-static void cbq_link_class(struct cbq_class *this)
-{
- struct cbq_sched_data *q = qdisc_priv(this->qdisc);
- struct cbq_class *parent = this->tparent;
-
- this->sibling = this;
- qdisc_class_hash_insert(&q->clhash, &this->common);
-
- if (parent == NULL)
- return;
-
- if (parent->children == NULL) {
- parent->children = this;
- } else {
- this->sibling = parent->children->sibling;
- parent->children->sibling = this;
- }
-}
-
-static void
-cbq_reset(struct Qdisc *sch)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl;
- int prio;
- unsigned int h;
-
- q->activemask = 0;
- q->pmask = 0;
- q->tx_class = NULL;
- q->tx_borrowed = NULL;
- qdisc_watchdog_cancel(&q->watchdog);
- hrtimer_cancel(&q->delay_timer);
- q->toplevel = TC_CBQ_MAXLEVEL;
- q->now = psched_get_time();
-
- for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
- q->active[prio] = NULL;
-
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- qdisc_reset(cl->q);
-
- cl->next_alive = NULL;
- cl->undertime = PSCHED_PASTPERFECT;
- cl->avgidle = cl->maxidle;
- cl->deficit = cl->quantum;
- cl->cpriority = cl->priority;
- }
- }
- sch->q.qlen = 0;
-}
-
-
-static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
-{
- if (lss->change & TCF_CBQ_LSS_FLAGS) {
- cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
- cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
- }
- if (lss->change & TCF_CBQ_LSS_EWMA)
- cl->ewma_log = lss->ewma_log;
- if (lss->change & TCF_CBQ_LSS_AVPKT)
- cl->avpkt = lss->avpkt;
- if (lss->change & TCF_CBQ_LSS_MINIDLE)
- cl->minidle = -(long)lss->minidle;
- if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
- cl->maxidle = lss->maxidle;
- cl->avgidle = lss->maxidle;
- }
- if (lss->change & TCF_CBQ_LSS_OFFTIME)
- cl->offtime = lss->offtime;
- return 0;
-}
-
-static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
-{
- q->nclasses[cl->priority]--;
- q->quanta[cl->priority] -= cl->weight;
- cbq_normalize_quanta(q, cl->priority);
-}
-
-static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
-{
- q->nclasses[cl->priority]++;
- q->quanta[cl->priority] += cl->weight;
- cbq_normalize_quanta(q, cl->priority);
-}
-
-static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
-{
- struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
-
- if (wrr->allot)
- cl->allot = wrr->allot;
- if (wrr->weight)
- cl->weight = wrr->weight;
- if (wrr->priority) {
- cl->priority = wrr->priority - 1;
- cl->cpriority = cl->priority;
- if (cl->priority >= cl->priority2)
- cl->priority2 = TC_CBQ_MAXPRIO - 1;
- }
-
- cbq_addprio(q, cl);
- return 0;
-}
-
-static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
-{
- cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
- return 0;
-}
-
-static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
- [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
- [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
- [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
- [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
- [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
- [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
- [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
-};
-
-static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
- struct nlattr *opt,
- struct netlink_ext_ack *extack)
-{
- int err;
-
- if (!opt) {
- NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
- return -EINVAL;
- }
-
- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
- cbq_policy, extack);
- if (err < 0)
- return err;
-
- if (tb[TCA_CBQ_WRROPT]) {
- const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
-
- if (wrr->priority > TC_CBQ_MAXPRIO) {
- NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
- err = -EINVAL;
- }
- }
- return err;
-}
-
-static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct nlattr *tb[TCA_CBQ_MAX + 1];
- struct tc_ratespec *r;
- int err;
-
- qdisc_watchdog_init(&q->watchdog, sch);
- hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- q->delay_timer.function = cbq_undelay;
-
- err = cbq_opt_parse(tb, opt, extack);
- if (err < 0)
- return err;
-
- if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
- NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
- return -EINVAL;
- }
-
- r = nla_data(tb[TCA_CBQ_RATE]);
-
- q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
- if (!q->link.R_tab)
- return -EINVAL;
-
- err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
- if (err)
- goto put_rtab;
-
- err = qdisc_class_hash_init(&q->clhash);
- if (err < 0)
- goto put_block;
-
- q->link.sibling = &q->link;
- q->link.common.classid = sch->handle;
- q->link.qdisc = sch;
- q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- sch->handle, NULL);
- if (!q->link.q)
- q->link.q = &noop_qdisc;
- else
- qdisc_hash_add(q->link.q, true);
-
- q->link.priority = TC_CBQ_MAXPRIO - 1;
- q->link.priority2 = TC_CBQ_MAXPRIO - 1;
- q->link.cpriority = TC_CBQ_MAXPRIO - 1;
- q->link.allot = psched_mtu(qdisc_dev(sch));
- q->link.quantum = q->link.allot;
- q->link.weight = q->link.R_tab->rate.rate;
-
- q->link.ewma_log = TC_CBQ_DEF_EWMA;
- q->link.avpkt = q->link.allot/2;
- q->link.minidle = -0x7FFFFFFF;
-
- q->toplevel = TC_CBQ_MAXLEVEL;
- q->now = psched_get_time();
-
- cbq_link_class(&q->link);
-
- if (tb[TCA_CBQ_LSSOPT])
- cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
-
- cbq_addprio(q, &q->link);
- return 0;
-
-put_block:
- tcf_block_put(q->link.block);
-
-put_rtab:
- qdisc_put_rtab(q->link.R_tab);
- return err;
-}
-
-static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
-
- if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cbq_lssopt opt;
-
- opt.flags = 0;
- if (cl->borrow == NULL)
- opt.flags |= TCF_CBQ_LSS_BOUNDED;
- if (cl->share == NULL)
- opt.flags |= TCF_CBQ_LSS_ISOLATED;
- opt.ewma_log = cl->ewma_log;
- opt.level = cl->level;
- opt.avpkt = cl->avpkt;
- opt.maxidle = cl->maxidle;
- opt.minidle = (u32)(-cl->minidle);
- opt.offtime = cl->offtime;
- opt.change = ~0;
- if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cbq_wrropt opt;
-
- memset(&opt, 0, sizeof(opt));
- opt.flags = 0;
- opt.allot = cl->allot;
- opt.priority = cl->priority + 1;
- opt.cpriority = cl->cpriority + 1;
- opt.weight = cl->weight;
- if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
- goto nla_put_failure;
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cbq_fopt opt;
-
- if (cl->split || cl->defmap) {
- opt.split = cl->split ? cl->split->common.classid : 0;
- opt.defmap = cl->defmap;
- opt.defchange = ~0;
- if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
- goto nla_put_failure;
- }
- return skb->len;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
-{
- if (cbq_dump_lss(skb, cl) < 0 ||
- cbq_dump_rate(skb, cl) < 0 ||
- cbq_dump_wrr(skb, cl) < 0 ||
- cbq_dump_fopt(skb, cl) < 0)
- return -1;
- return 0;
-}
-
-static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct nlattr *nest;
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- if (cbq_dump_attr(skb, &q->link) < 0)
- goto nla_put_failure;
- return nla_nest_end(skb, nest);
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-
-static int
-cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
-
- q->link.xstats.avgidle = q->link.avgidle;
- return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
-}
-
-static int
-cbq_dump_class(struct Qdisc *sch, unsigned long arg,
- struct sk_buff *skb, struct tcmsg *tcm)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
- struct nlattr *nest;
-
- if (cl->tparent)
- tcm->tcm_parent = cl->tparent->common.classid;
- else
- tcm->tcm_parent = TC_H_ROOT;
- tcm->tcm_handle = cl->common.classid;
- tcm->tcm_info = cl->q->handle;
-
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- if (cbq_dump_attr(skb, cl) < 0)
- goto nla_put_failure;
- return nla_nest_end(skb, nest);
-
-nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
-}
-
-static int
-cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
- struct gnet_dump *d)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class *)arg;
- __u32 qlen;
-
- cl->xstats.avgidle = cl->avgidle;
- cl->xstats.undertime = 0;
- qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
-
- if (cl->undertime != PSCHED_PASTPERFECT)
- cl->xstats.undertime = cl->undertime - q->now;
-
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl->bstats) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
- return -1;
-
- return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
-}
-
-static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old, struct netlink_ext_ack *extack)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- if (new == NULL) {
- new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- cl->common.classid, extack);
- if (new == NULL)
- return -ENOBUFS;
- }
-
- *old = qdisc_replace(sch, new, &cl->q);
- return 0;
-}
-
-static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- return cl->q;
-}
-
-static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- cbq_deactivate_class(cl);
-}
-
-static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
-
- return (unsigned long)cbq_class_lookup(q, classid);
-}
-
-static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
-
- WARN_ON(cl->filters);
-
- tcf_block_put(cl->block);
- qdisc_put(cl->q);
- qdisc_put_rtab(cl->R_tab);
- gen_kill_estimator(&cl->rate_est);
- if (cl != &q->link)
- kfree(cl);
-}
-
-static void cbq_destroy(struct Qdisc *sch)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct hlist_node *next;
- struct cbq_class *cl;
- unsigned int h;
-
-#ifdef CONFIG_NET_CLS_ACT
- q->rx_class = NULL;
-#endif
- /*
- * Filters must be destroyed first because we don't destroy the
- * classes from root to leafs which means that filters can still
- * be bound to classes which have been destroyed already. --TGR '04
- */
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- tcf_block_put(cl->block);
- cl->block = NULL;
- }
- }
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
- common.hnode)
- cbq_destroy_class(sch, cl);
- }
- qdisc_class_hash_destroy(&q->clhash);
-}
-
-static int
-cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
- unsigned long *arg, struct netlink_ext_ack *extack)
-{
- int err;
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class *)*arg;
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_CBQ_MAX + 1];
- struct cbq_class *parent;
- struct qdisc_rate_table *rtab = NULL;
-
- err = cbq_opt_parse(tb, opt, extack);
- if (err < 0)
- return err;
-
- if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
- NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
- return -EOPNOTSUPP;
- }
-
- if (cl) {
- /* Check parent */
- if (parentid) {
- if (cl->tparent &&
- cl->tparent->common.classid != parentid) {
- NL_SET_ERR_MSG(extack, "Invalid parent id");
- return -EINVAL;
- }
- if (!cl->tparent && parentid != TC_H_ROOT) {
- NL_SET_ERR_MSG(extack, "Parent must be root");
- return -EINVAL;
- }
- }
-
- if (tb[TCA_CBQ_RATE]) {
- rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
- tb[TCA_CBQ_RTAB], extack);
- if (rtab == NULL)
- return -EINVAL;
- }
-
- if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, NULL,
- &cl->rate_est,
- NULL,
- qdisc_root_sleeping_running(sch),
- tca[TCA_RATE]);
- if (err) {
- NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
- qdisc_put_rtab(rtab);
- return err;
- }
- }
-
- /* Change class parameters */
- sch_tree_lock(sch);
-
- if (cl->next_alive != NULL)
- cbq_deactivate_class(cl);
-
- if (rtab) {
- qdisc_put_rtab(cl->R_tab);
- cl->R_tab = rtab;
- }
-
- if (tb[TCA_CBQ_LSSOPT])
- cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
-
- if (tb[TCA_CBQ_WRROPT]) {
- cbq_rmprio(q, cl);
- cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
- }
-
- if (tb[TCA_CBQ_FOPT])
- cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
-
- if (cl->q->q.qlen)
- cbq_activate_class(cl);
-
- sch_tree_unlock(sch);
-
- return 0;
- }
-
- if (parentid == TC_H_ROOT)
- return -EINVAL;
-
- if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
- NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
- return -EINVAL;
- }
-
- rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
- extack);
- if (rtab == NULL)
- return -EINVAL;
-
- if (classid) {
- err = -EINVAL;
- if (TC_H_MAJ(classid ^ sch->handle) ||
- cbq_class_lookup(q, classid)) {
- NL_SET_ERR_MSG(extack, "Specified class not found");
- goto failure;
- }
- } else {
- int i;
- classid = TC_H_MAKE(sch->handle, 0x8000);
-
- for (i = 0; i < 0x8000; i++) {
- if (++q->hgenerator >= 0x8000)
- q->hgenerator = 1;
- if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
- break;
- }
- err = -ENOSR;
- if (i >= 0x8000) {
- NL_SET_ERR_MSG(extack, "Unable to generate classid");
- goto failure;
- }
- classid = classid|q->hgenerator;
- }
-
- parent = &q->link;
- if (parentid) {
- parent = cbq_class_lookup(q, parentid);
- err = -EINVAL;
- if (!parent) {
- NL_SET_ERR_MSG(extack, "Failed to find parentid");
- goto failure;
- }
- }
-
- err = -ENOBUFS;
- cl = kzalloc(sizeof(*cl), GFP_KERNEL);
- if (cl == NULL)
- goto failure;
-
- err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
- if (err) {
- kfree(cl);
- goto failure;
- }
-
- if (tca[TCA_RATE]) {
- err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
- NULL,
- qdisc_root_sleeping_running(sch),
- tca[TCA_RATE]);
- if (err) {
- NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
- tcf_block_put(cl->block);
- kfree(cl);
- goto failure;
- }
- }
-
- cl->R_tab = rtab;
- rtab = NULL;
- cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
- NULL);
- if (!cl->q)
- cl->q = &noop_qdisc;
- else
- qdisc_hash_add(cl->q, true);
-
- cl->common.classid = classid;
- cl->tparent = parent;
- cl->qdisc = sch;
- cl->allot = parent->allot;
- cl->quantum = cl->allot;
- cl->weight = cl->R_tab->rate.rate;
-
- sch_tree_lock(sch);
- cbq_link_class(cl);
- cl->borrow = cl->tparent;
- if (cl->tparent != &q->link)
- cl->share = cl->tparent;
- cbq_adjust_levels(parent);
- cl->minidle = -0x7FFFFFFF;
- cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
- cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
- if (cl->ewma_log == 0)
- cl->ewma_log = q->link.ewma_log;
- if (cl->maxidle == 0)
- cl->maxidle = q->link.maxidle;
- if (cl->avpkt == 0)
- cl->avpkt = q->link.avpkt;
- if (tb[TCA_CBQ_FOPT])
- cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
- sch_tree_unlock(sch);
-
- qdisc_class_hash_grow(sch, &q->clhash);
-
- *arg = (unsigned long)cl;
- return 0;
-
-failure:
- qdisc_put_rtab(rtab);
- return err;
-}
-
-static int cbq_delete(struct Qdisc *sch, unsigned long arg)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- if (cl->filters || cl->children || cl == &q->link)
- return -EBUSY;
-
- sch_tree_lock(sch);
-
- qdisc_purge_queue(cl->q);
-
- if (cl->next_alive)
- cbq_deactivate_class(cl);
-
- if (q->tx_borrowed == cl)
- q->tx_borrowed = q->tx_class;
- if (q->tx_class == cl) {
- q->tx_class = NULL;
- q->tx_borrowed = NULL;
- }
-#ifdef CONFIG_NET_CLS_ACT
- if (q->rx_class == cl)
- q->rx_class = NULL;
-#endif
-
- cbq_unlink_class(cl);
- cbq_adjust_levels(cl->tparent);
- cl->defmap = 0;
- cbq_sync_defmap(cl);
-
- cbq_rmprio(q, cl);
- sch_tree_unlock(sch);
-
- cbq_destroy_class(sch, cl);
- return 0;
-}
-
-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
- struct netlink_ext_ack *extack)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- if (cl == NULL)
- cl = &q->link;
-
- return cl->block;
-}
-
-static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
- u32 classid)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *p = (struct cbq_class *)parent;
- struct cbq_class *cl = cbq_class_lookup(q, classid);
-
- if (cl) {
- if (p && p->level <= cl->level)
- return 0;
- cl->filters++;
- return (unsigned long)cl;
- }
- return 0;
-}
-
-static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
-{
- struct cbq_class *cl = (struct cbq_class *)arg;
-
- cl->filters--;
-}
-
-static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
-{
- struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl;
- unsigned int h;
-
- if (arg->stop)
- return;
-
- for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
- if (arg->count < arg->skip) {
- arg->count++;
- continue;
- }
- if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
- arg->stop = 1;
- return;
- }
- arg->count++;
- }
- }
-}
-
-static const struct Qdisc_class_ops cbq_class_ops = {
- .graft = cbq_graft,
- .leaf = cbq_leaf,
- .qlen_notify = cbq_qlen_notify,
- .find = cbq_find,
- .change = cbq_change_class,
- .delete = cbq_delete,
- .walk = cbq_walk,
- .tcf_block = cbq_tcf_block,
- .bind_tcf = cbq_bind_filter,
- .unbind_tcf = cbq_unbind_filter,
- .dump = cbq_dump_class,
- .dump_stats = cbq_dump_class_stats,
-};
-
-static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
- .next = NULL,
- .cl_ops = &cbq_class_ops,
- .id = "cbq",
- .priv_size = sizeof(struct cbq_sched_data),
- .enqueue = cbq_enqueue,
- .dequeue = cbq_dequeue,
- .peek = qdisc_peek_dequeued,
- .init = cbq_init,
- .reset = cbq_reset,
- .destroy = cbq_destroy,
- .change = NULL,
- .dump = cbq_dump,
- .dump_stats = cbq_dump_stats,
- .owner = THIS_MODULE,
-};
-
-static int __init cbq_module_init(void)
-{
- return register_qdisc(&cbq_qdisc_ops);
-}
-static void __exit cbq_module_exit(void)
-{
- unregister_qdisc(&cbq_qdisc_ops);
-}
-module_init(cbq_module_init)
-module_exit(cbq_module_exit)
-MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
deleted file mode 100644
index 76ed1a05ded2..000000000000
--- a/net/sched/sch_dsmark.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* net/sched/sch_dsmark.c - Differentiated Services field marker */
-
-/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
-
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/bitops.h>
-#include <net/pkt_sched.h>
-#include <net/pkt_cls.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <asm/byteorder.h>
-
-/*
- * classid class marking
- * ------- ----- -------
- * n/a 0 n/a
- * x:0 1 use entry [0]
- * ... ... ...
- * x:y y>0 y+1 use entry [y]
- * ... ... ...
- * x:indices-1 indices use entry [indices-1]
- * ... ... ...
- * x:y y+1 use entry [y & (indices-1)]
- * ... ... ...
- * 0xffff 0x10000 use entry [indices-1]
- */
-
-
-#define NO_DEFAULT_INDEX (1 << 16)
-
-struct mask_value {
- u8 mask;
- u8 value;
-};
-
-struct dsmark_qdisc_data {
- struct Qdisc *q;
- struct tcf_proto __rcu *filter_list;
- struct tcf_block *block;
- struct mask_value *mv;
- u16 indices;
- u8 set_tc_index;
- u32 default_index; /* index range is 0...0xffff */
-#define DSMARK_EMBEDDED_SZ 16
- struct mask_value embedded[DSMARK_EMBEDDED_SZ];
-};
-
-static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
-{
- return index <= p->indices && index > 0;
-}
-
-/* ------------------------- Class/flow operations ------------------------- */
-
-static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
- __func__, sch, p, new, old);
-
- if (new == NULL) {
- new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
- sch->handle, NULL);
- if (new == NULL)
- new = &noop_qdisc;
- }
-
- *old = qdisc_replace(sch, new, &p->q);
- return 0;
-}
-
-static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- return p->q;
-}
-
-static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
-{
- return TC_H_MIN(classid) + 1;
-}
-
-static unsigned long dsmark_bind_filter(struct Qdisc *sch,
- unsigned long parent, u32 classid)
-{
- pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
- __func__, sch, qdisc_priv(sch), classid);
-
- return dsmark_find(sch, classid);
-}
-
-static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
-{
-}
-
-static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
- [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
- [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
- [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
- [TCA_DSMARK_MASK] = { .type = NLA_U8 },
- [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
-};
-
-static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct nlattr **tca, unsigned long *arg,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_DSMARK_MAX + 1];
- int err = -EINVAL;
-
- pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
- __func__, sch, p, classid, parent, *arg);
-
- if (!dsmark_valid_index(p, *arg)) {
- err = -ENOENT;
- goto errout;
- }
-
- if (!opt)
- goto errout;
-
- err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
- dsmark_policy, NULL);
- if (err < 0)
- goto errout;
-
- if (tb[TCA_DSMARK_VALUE])
- p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
-
- if (tb[TCA_DSMARK_MASK])
- p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
-
- err = 0;
-
-errout:
- return err;
-}
-
-static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- if (!dsmark_valid_index(p, arg))
- return -EINVAL;
-
- p->mv[arg - 1].mask = 0xff;
- p->mv[arg - 1].value = 0;
-
- return 0;
-}
-
-static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- int i;
-
- pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
- __func__, sch, p, walker);
-
- if (walker->stop)
- return;
-
- for (i = 0; i < p->indices; i++) {
- if (p->mv[i].mask == 0xff && !p->mv[i].value)
- goto ignore;
- if (walker->count >= walker->skip) {
- if (walker->fn(sch, i + 1, walker) < 0) {
- walker->stop = 1;
- break;
- }
- }
-ignore:
- walker->count++;
- }
-}
-
-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- return p->block;
-}
-
-/* --------------------------- Qdisc operations ---------------------------- */
-
-static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- unsigned int len = qdisc_pkt_len(skb);
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- int err;
-
- pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
-
- if (p->set_tc_index) {
- int wlen = skb_network_offset(skb);
-
- switch (skb_protocol(skb, true)) {
- case htons(ETH_P_IP):
- wlen += sizeof(struct iphdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
- goto drop;
-
- skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
- & ~INET_ECN_MASK;
- break;
-
- case htons(ETH_P_IPV6):
- wlen += sizeof(struct ipv6hdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
- goto drop;
-
- skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
- & ~INET_ECN_MASK;
- break;
- default:
- skb->tc_index = 0;
- break;
- }
- }
-
- if (TC_H_MAJ(skb->priority) == sch->handle)
- skb->tc_index = TC_H_MIN(skb->priority);
- else {
- struct tcf_result res;
- struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
- int result = tcf_classify(skb, fl, &res, false);
-
- pr_debug("result %d class 0x%04x\n", result, res.classid);
-
- switch (result) {
-#ifdef CONFIG_NET_CLS_ACT
- case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
- case TC_ACT_TRAP:
- __qdisc_drop(skb, to_free);
- return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-
- case TC_ACT_SHOT:
- goto drop;
-#endif
- case TC_ACT_OK:
- skb->tc_index = TC_H_MIN(res.classid);
- break;
-
- default:
- if (p->default_index != NO_DEFAULT_INDEX)
- skb->tc_index = p->default_index;
- break;
- }
- }
-
- err = qdisc_enqueue(skb, p->q, to_free);
- if (err != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(err))
- qdisc_qstats_drop(sch);
- return err;
- }
-
- sch->qstats.backlog += len;
- sch->q.qlen++;
-
- return NET_XMIT_SUCCESS;
-
-drop:
- qdisc_drop(skb, sch, to_free);
- return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-}
-
-static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct sk_buff *skb;
- u32 index;
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
-
- skb = qdisc_dequeue_peeked(p->q);
- if (skb == NULL)
- return NULL;
-
- qdisc_bstats_update(sch, skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
-
- index = skb->tc_index & (p->indices - 1);
- pr_debug("index %d->%d\n", skb->tc_index, index);
-
- switch (skb_protocol(skb, true)) {
- case htons(ETH_P_IP):
- ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
- p->mv[index].value);
- break;
- case htons(ETH_P_IPV6):
- ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
- p->mv[index].value);
- break;
- default:
- /*
- * Only complain if a change was actually attempted.
- * This way, we can send non-IP traffic through dsmark
- * and don't need yet another qdisc as a bypass.
- */
- if (p->mv[index].mask != 0xff || p->mv[index].value)
- pr_warn("%s: unsupported protocol %d\n",
- __func__, ntohs(skb_protocol(skb, true)));
- break;
- }
-
- return skb;
-}
-
-static struct sk_buff *dsmark_peek(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
-
- return p->q->ops->peek(p->q);
-}
-
-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct nlattr *tb[TCA_DSMARK_MAX + 1];
- int err = -EINVAL;
- u32 default_index = NO_DEFAULT_INDEX;
- u16 indices;
- int i;
-
- pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
-
- if (!opt)
- goto errout;
-
- err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
- if (err)
- return err;
-
- err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
- dsmark_policy, NULL);
- if (err < 0)
- goto errout;
-
- err = -EINVAL;
- if (!tb[TCA_DSMARK_INDICES])
- goto errout;
- indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
-
- if (hweight32(indices) != 1)
- goto errout;
-
- if (tb[TCA_DSMARK_DEFAULT_INDEX])
- default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
-
- if (indices <= DSMARK_EMBEDDED_SZ)
- p->mv = p->embedded;
- else
- p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
- if (!p->mv) {
- err = -ENOMEM;
- goto errout;
- }
- for (i = 0; i < indices; i++) {
- p->mv[i].mask = 0xff;
- p->mv[i].value = 0;
- }
- p->indices = indices;
- p->default_index = default_index;
- p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
-
- p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
- NULL);
- if (p->q == NULL)
- p->q = &noop_qdisc;
- else
- qdisc_hash_add(p->q, true);
-
- pr_debug("%s: qdisc %p\n", __func__, p->q);
-
- err = 0;
-errout:
- return err;
-}
-
-static void dsmark_reset(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
- if (p->q)
- qdisc_reset(p->q);
- sch->qstats.backlog = 0;
- sch->q.qlen = 0;
-}
-
-static void dsmark_destroy(struct Qdisc *sch)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
-
- pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
-
- tcf_block_put(p->block);
- qdisc_put(p->q);
- if (p->mv != p->embedded)
- kfree(p->mv);
-}
-
-static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
- struct sk_buff *skb, struct tcmsg *tcm)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct nlattr *opts = NULL;
-
- pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
-
- if (!dsmark_valid_index(p, cl))
- return -EINVAL;
-
- tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
- tcm->tcm_info = p->q->handle;
-
- opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (opts == NULL)
- goto nla_put_failure;
- if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
- nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
- goto nla_put_failure;
-
- return nla_nest_end(skb, opts);
-
-nla_put_failure:
- nla_nest_cancel(skb, opts);
- return -EMSGSIZE;
-}
-
-static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
- struct dsmark_qdisc_data *p = qdisc_priv(sch);
- struct nlattr *opts = NULL;
-
- opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (opts == NULL)
- goto nla_put_failure;
- if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
- goto nla_put_failure;
-
- if (p->default_index != NO_DEFAULT_INDEX &&
- nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
- goto nla_put_failure;
-
- if (p->set_tc_index &&
- nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
- goto nla_put_failure;
-
- return nla_nest_end(skb, opts);
-
-nla_put_failure:
- nla_nest_cancel(skb, opts);
- return -EMSGSIZE;
-}
-
-static const struct Qdisc_class_ops dsmark_class_ops = {
- .graft = dsmark_graft,
- .leaf = dsmark_leaf,
- .find = dsmark_find,
- .change = dsmark_change,
- .delete = dsmark_delete,
- .walk = dsmark_walk,
- .tcf_block = dsmark_tcf_block,
- .bind_tcf = dsmark_bind_filter,
- .unbind_tcf = dsmark_unbind_filter,
- .dump = dsmark_dump_class,
-};
-
-static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
- .next = NULL,
- .cl_ops = &dsmark_class_ops,
- .id = "dsmark",
- .priv_size = sizeof(struct dsmark_qdisc_data),
- .enqueue = dsmark_enqueue,
- .dequeue = dsmark_dequeue,
- .peek = dsmark_peek,
- .init = dsmark_init,
- .reset = dsmark_reset,
- .destroy = dsmark_destroy,
- .change = NULL,
- .dump = dsmark_dump,
- .owner = THIS_MODULE,
-};
-
-static int __init dsmark_module_init(void)
-{
- return register_qdisc(&dsmark_qdisc_ops);
-}
-
-static void __exit dsmark_module_exit(void)
-{
- unregister_qdisc(&dsmark_qdisc_ops);
-}
-
-module_init(dsmark_module_init)
-module_exit(dsmark_module_exit)
-
-MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 86fb2f953bd5..30796bb54972 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -188,7 +188,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct fq_codel_sched_data *q = qdisc_priv(sch);
unsigned int idx, prev_backlog, prev_qlen;
struct fq_codel_flow *flow;
- int uninitialized_var(ret);
+ int ret;
unsigned int pkt_len;
bool memory_limited;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ae5847de94c8..4250f3cf30e7 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -403,7 +403,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
void __qdisc_run(struct Qdisc *q)
{
- int quota = dev_tx_weight;
+ int quota = READ_ONCE(dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {
@@ -1106,18 +1106,21 @@ static void attach_default_qdiscs(struct net_device *dev)
if (!netif_is_multiqueue(dev) ||
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- dev->qdisc = txq->qdisc_sleeping;
- qdisc_refcount_inc(dev->qdisc);
+ qdisc = txq->qdisc_sleeping;
+ rcu_assign_pointer(dev->qdisc, qdisc);
+ qdisc_refcount_inc(qdisc);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
if (qdisc) {
- dev->qdisc = qdisc;
+ rcu_assign_pointer(dev->qdisc, qdisc);
qdisc->ops->attach(qdisc);
}
}
+ qdisc = rtnl_dereference(dev->qdisc);
+
#ifdef CONFIG_NET_SCHED
- if (dev->qdisc != &noop_qdisc)
- qdisc_hash_add(dev->qdisc, false);
+ if (qdisc != &noop_qdisc)
+ qdisc_hash_add(qdisc, false);
#endif
}
@@ -1147,7 +1150,7 @@ void dev_activate(struct net_device *dev)
* and noqueue_qdisc for virtual interfaces
*/
- if (dev->qdisc == &noop_qdisc)
+ if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
attach_default_qdiscs(dev);
if (!netif_carrier_ok(dev))
@@ -1316,7 +1319,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev,
void dev_qdisc_change_real_num_tx(struct net_device *dev,
unsigned int new_real_tx)
{
- struct Qdisc *qdisc = dev->qdisc;
+ struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
if (qdisc->ops->change_real_num_tx)
qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
@@ -1356,7 +1359,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
void dev_init_scheduler(struct net_device *dev)
{
- dev->qdisc = &noop_qdisc;
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
@@ -1384,8 +1387,8 @@ void dev_shutdown(struct net_device *dev)
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
- qdisc_put(dev->qdisc);
- dev->qdisc = &noop_qdisc;
+ qdisc_put(rtnl_dereference(dev->qdisc));
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
WARN_ON(timer_pending(&dev->watchdog_timer));
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 433f2190960f..9ebae0d07a9c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -903,6 +903,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
cl->cl_flags |= HFSC_USC;
}
+static void
+hfsc_upgrade_rt(struct hfsc_class *cl)
+{
+ cl->cl_fsc = cl->cl_rsc;
+ rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
+ cl->cl_flags |= HFSC_FSC;
+}
+
static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
[TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
[TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
@@ -1064,6 +1072,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->cf_tree = RB_ROOT;
sch_tree_lock(sch);
+ /* Check if the inner class is a misconfigured 'rt' */
+ if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
+ NL_SET_ERR_MSG(extack,
+ "Forced curve change on parent 'rt' to 'sc'");
+ hfsc_upgrade_rt(parent);
+ }
qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
list_add_tail(&cl->siblings, &parent->children);
if (parent->level == 0)
@@ -1533,7 +1547,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
unsigned int len = qdisc_pkt_len(skb);
struct hfsc_class *cl;
- int uninitialized_var(err);
+ int err;
bool first;
cl = hfsc_classify(skb, sch, &err);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 8184c87da8be..4cdf2dd04701 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -405,7 +405,10 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
m = mask;
while (m) {
- int prio = ffz(~m);
+ unsigned int prio = ffz(~m);
+
+ if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
+ break;
m &= ~(1 << prio);
if (p->inner.clprio[prio].feed.rb_node)
@@ -579,7 +582,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
- int uninitialized_var(ret);
+ int ret;
unsigned int len = qdisc_pkt_len(skb);
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb, sch, &ret);
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index bf56aa519797..28168799ca1b 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -79,6 +79,9 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
struct ingress_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
+ if (sch->parent != TC_H_INGRESS)
+ return -EOPNOTSUPP;
+
net_inc_ingress_queue();
mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
@@ -94,6 +97,9 @@ static void ingress_destroy(struct Qdisc *sch)
{
struct ingress_sched_data *q = qdisc_priv(sch);
+ if (sch->parent != TC_H_INGRESS)
+ return;
+
tcf_block_put_ext(q->block, sch, &q->block_info);
net_dec_ingress_queue();
}
@@ -127,7 +133,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
.priv_size = sizeof(struct ingress_sched_data),
- .static_flags = TCQ_F_CPUSTATS,
+ .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
.init = ingress_init,
.destroy = ingress_destroy,
.dump = ingress_dump,
@@ -212,6 +218,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
struct net_device *dev = qdisc_dev(sch);
int err;
+ if (sch->parent != TC_H_CLSACT)
+ return -EOPNOTSUPP;
+
net_inc_ingress_queue();
net_inc_egress_queue();
@@ -239,6 +248,9 @@ static void clsact_destroy(struct Qdisc *sch)
{
struct clsact_sched_data *q = qdisc_priv(sch);
+ if (sch->parent != TC_H_CLSACT)
+ return;
+
tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
@@ -260,7 +272,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.cl_ops = &clsact_class_ops,
.id = "clsact",
.priv_size = sizeof(struct clsact_sched_data),
- .static_flags = TCQ_F_CPUSTATS,
+ .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
.init = clsact_init,
.destroy = clsact_destroy,
.dump = ingress_dump,
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 50e15add6068..56d3dc5e95c7 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -130,6 +130,97 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
return 0;
}
+static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
+ struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct nlattr *tb[TCA_MQPRIO_MAX + 1];
+ struct nlattr *attr;
+ int i, rem, err;
+
+ err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
+ sizeof(*qopt));
+ if (err < 0)
+ return err;
+
+ if (!qopt->hw) {
+ NL_SET_ERR_MSG(extack,
+ "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode");
+ return -EINVAL;
+ }
+
+ if (tb[TCA_MQPRIO_MODE]) {
+ priv->flags |= TC_MQPRIO_F_MODE;
+ priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
+ }
+
+ if (tb[TCA_MQPRIO_SHAPER]) {
+ priv->flags |= TC_MQPRIO_F_SHAPER;
+ priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
+ }
+
+ if (tb[TCA_MQPRIO_MIN_RATE64]) {
+ if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64],
+ "min_rate accepted only when shaper is in bw_rlimit mode");
+ return -EINVAL;
+ }
+ i = 0;
+ nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
+ rem) {
+ if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
+ return -EINVAL;
+ }
+
+ if (nla_len(attr) != sizeof(u64)) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
+ return -EINVAL;
+ }
+
+ if (i >= qopt->num_tc)
+ break;
+ priv->min_rate[i] = *(u64 *)nla_data(attr);
+ i++;
+ }
+ priv->flags |= TC_MQPRIO_F_MIN_RATE;
+ }
+
+ if (tb[TCA_MQPRIO_MAX_RATE64]) {
+ if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64],
+ "max_rate accepted only when shaper is in bw_rlimit mode");
+ return -EINVAL;
+ }
+ i = 0;
+ nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
+ rem) {
+ if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
+ return -EINVAL;
+ }
+
+ if (nla_len(attr) != sizeof(u64)) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
+ return -EINVAL;
+ }
+
+ if (i >= qopt->num_tc)
+ break;
+ priv->max_rate[i] = *(u64 *)nla_data(attr);
+ i++;
+ }
+ priv->flags |= TC_MQPRIO_F_MAX_RATE;
+ }
+
+ return 0;
+}
+
static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -139,9 +230,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
struct Qdisc *qdisc;
int i, err = -EOPNOTSUPP;
struct tc_mqprio_qopt *qopt = NULL;
- struct nlattr *tb[TCA_MQPRIO_MAX + 1];
- struct nlattr *attr;
- int rem;
int len;
BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
@@ -166,55 +254,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
if (len > 0) {
- err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
- sizeof(*qopt));
- if (err < 0)
+ err = mqprio_parse_nlattr(sch, qopt, opt, extack);
+ if (err)
return err;
-
- if (!qopt->hw)
- return -EINVAL;
-
- if (tb[TCA_MQPRIO_MODE]) {
- priv->flags |= TC_MQPRIO_F_MODE;
- priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
- }
-
- if (tb[TCA_MQPRIO_SHAPER]) {
- priv->flags |= TC_MQPRIO_F_SHAPER;
- priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
- }
-
- if (tb[TCA_MQPRIO_MIN_RATE64]) {
- if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
- return -EINVAL;
- i = 0;
- nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
- rem) {
- if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
- return -EINVAL;
- if (i >= qopt->num_tc)
- break;
- priv->min_rate[i] = *(u64 *)nla_data(attr);
- i++;
- }
- priv->flags |= TC_MQPRIO_F_MIN_RATE;
- }
-
- if (tb[TCA_MQPRIO_MAX_RATE64]) {
- if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
- return -EINVAL;
- i = 0;
- nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
- rem) {
- if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
- return -EINVAL;
- if (i >= qopt->num_tc)
- break;
- priv->max_rate[i] = *(u64 *)nla_data(attr);
- i++;
- }
- priv->flags |= TC_MQPRIO_F_MAX_RATE;
- }
}
/* pre-allocate qdisc, attachment can't fail */
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1802f134aa40..265a02b6ad09 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -773,12 +773,10 @@ static void dist_free(struct disttable *d)
* signed 16 bit values.
*/
-static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
- const struct nlattr *attr)
+static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
{
size_t n = nla_len(attr)/sizeof(__s16);
const __s16 *data = nla_data(attr);
- spinlock_t *root_lock;
struct disttable *d;
int i;
@@ -793,13 +791,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
for (i = 0; i < n; i++)
d->table[i] = data[i];
- root_lock = qdisc_root_sleeping_lock(sch);
-
- spin_lock_bh(root_lock);
- swap(*tbl, d);
- spin_unlock_bh(root_lock);
-
- dist_free(d);
+ *tbl = d;
return 0;
}
@@ -956,6 +948,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
{
struct netem_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_NETEM_MAX + 1];
+ struct disttable *delay_dist = NULL;
+ struct disttable *slot_dist = NULL;
struct tc_netem_qopt *qopt;
struct clgstate old_clg;
int old_loss_model = CLG_RANDOM;
@@ -969,6 +963,19 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
if (ret < 0)
return ret;
+ if (tb[TCA_NETEM_DELAY_DIST]) {
+ ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
+ if (ret)
+ goto table_free;
+ }
+
+ if (tb[TCA_NETEM_SLOT_DIST]) {
+ ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
+ if (ret)
+ goto table_free;
+ }
+
+ sch_tree_lock(sch);
/* backup q->clg and q->loss_model */
old_clg = q->clg;
old_loss_model = q->loss_model;
@@ -977,26 +984,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
if (ret) {
q->loss_model = old_loss_model;
- return ret;
+ q->clg = old_clg;
+ goto unlock;
}
} else {
q->loss_model = CLG_RANDOM;
}
- if (tb[TCA_NETEM_DELAY_DIST]) {
- ret = get_dist_table(sch, &q->delay_dist,
- tb[TCA_NETEM_DELAY_DIST]);
- if (ret)
- goto get_table_failure;
- }
-
- if (tb[TCA_NETEM_SLOT_DIST]) {
- ret = get_dist_table(sch, &q->slot_dist,
- tb[TCA_NETEM_SLOT_DIST]);
- if (ret)
- goto get_table_failure;
- }
-
+ if (delay_dist)
+ swap(q->delay_dist, delay_dist);
+ if (slot_dist)
+ swap(q->slot_dist, slot_dist);
sch->limit = qopt->limit;
q->latency = PSCHED_TICKS2NS(qopt->latency);
@@ -1044,15 +1042,12 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
/* capping jitter to the range acceptable by tabledist() */
q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
- return ret;
+unlock:
+ sch_tree_unlock(sch);
-get_table_failure:
- /* recover clg and loss_model, in case of
- * q->clg and q->loss_model were modified
- * in get_loss_clg()
- */
- q->clg = old_clg;
- q->loss_model = old_loss_model;
+table_free:
+ dist_free(delay_dist);
+ dist_free(slot_dist);
return ret;
}
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index cbc2ebca4548..339990bb5981 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -210,7 +210,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct plug_sched_data),
.enqueue = plug_enqueue,
.dequeue = plug_dequeue,
- .peek = qdisc_peek_head,
+ .peek = qdisc_peek_dequeued,
.init = plug_init,
.change = plug_change,
.reset = qdisc_reset_queue,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 1eb339d224ae..6e9e3405f26b 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -375,8 +375,13 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
u32 lmax)
{
struct qfq_sched *q = qdisc_priv(sch);
- struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
+ struct qfq_aggregate *new_agg;
+ /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */
+ if (lmax > (1UL << QFQ_MTU_SHIFT))
+ return -EINVAL;
+
+ new_agg = qfq_find_agg(q, lmax, weight);
if (new_agg == NULL) { /* create new aggregate */
new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
if (new_agg == NULL)
@@ -421,15 +426,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
} else
weight = 1;
- if (tb[TCA_QFQ_LMAX]) {
+ if (tb[TCA_QFQ_LMAX])
lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
- if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
- pr_notice("qfq: invalid max length %u\n", lmax);
- return -EINVAL;
- }
- } else
+ else
lmax = psched_mtu(qdisc_dev(sch));
+ if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
+ pr_notice("qfq: invalid max length %u\n", lmax);
+ return -EINVAL;
+ }
+
inv_w = ONE_FP / weight;
weight = ONE_FP / inv_w;
@@ -969,10 +975,13 @@ static void qfq_update_eligible(struct qfq_sched *q)
}
/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
-static void agg_dequeue(struct qfq_aggregate *agg,
- struct qfq_class *cl, unsigned int len)
+static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
+ struct qfq_class *cl, unsigned int len)
{
- qdisc_dequeue_peeked(cl->qdisc);
+ struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc);
+
+ if (!skb)
+ return NULL;
cl->deficit -= (int) len;
@@ -982,6 +991,8 @@ static void agg_dequeue(struct qfq_aggregate *agg,
cl->deficit += agg->lmax;
list_move_tail(&cl->alist, &agg->active);
}
+
+ return skb;
}
static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
@@ -1127,11 +1138,18 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
if (!skb)
return NULL;
- qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
+
+ skb = agg_dequeue(in_serv_agg, cl, len);
+
+ if (!skb) {
+ sch->q.qlen++;
+ return NULL;
+ }
+
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
- agg_dequeue(in_serv_agg, cl, len);
/* If lmax is lowered, through qfq_change_class, for a class
* owning pending packets with larger size than the new value
* of lmax, then the following condition may hold.
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 7741f102be4a..476853ff6989 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -59,6 +59,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
+ unsigned int len;
int ret;
q->vars.qavg = red_calc_qavg(&q->parms,
@@ -94,9 +95,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
break;
}
+ len = qdisc_pkt_len(skb);
ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) {
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 4074c50ac3d7..3aa6b4dcb1c8 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
}
}
-static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
+static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
{
u32 sfbhash;
- sfbhash = sfb_hash(skb, 0);
+ sfbhash = cb->hashes[0];
if (sfbhash)
increment_one_qlen(sfbhash, 0, q);
- sfbhash = sfb_hash(skb, 1);
+ sfbhash = cb->hashes[1];
if (sfbhash)
increment_one_qlen(sfbhash, 1, q);
}
@@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
struct sfb_sched_data *q = qdisc_priv(sch);
+ unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *child = q->qdisc;
struct tcf_proto *fl;
+ struct sfb_skb_cb cb;
int i;
u32 p_min = ~0;
u32 minqlen = ~0;
@@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
enqueue:
+ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) {
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
- increment_qlen(skb, q);
+ increment_qlen(&cb, q);
} else if (net_xmit_drop_count(ret)) {
q->stats.childdrop++;
qdisc_qstats_drop(sch);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index b92bafaf83f3..d7f910610de9 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -349,7 +349,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
unsigned int hash, dropped;
sfq_index x, qlen;
struct sfq_slot *slot;
- int uninitialized_var(ret);
+ int ret;
struct sk_buff *head;
int delta;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 4c26f7fb32b3..e4c4d23a1b53 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -65,6 +65,7 @@ struct taprio_sched {
u32 flags;
enum tk_offsets tk_offset;
int clockid;
+ bool offloaded;
atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
* speeds it's sub-nanoseconds per byte
*/
@@ -1268,6 +1269,8 @@ static int taprio_enable_offload(struct net_device *dev,
goto done;
}
+ q->offloaded = true;
+
done:
taprio_offload_free(offload);
@@ -1282,12 +1285,9 @@ static int taprio_disable_offload(struct net_device *dev,
struct tc_taprio_qopt_offload *offload;
int err;
- if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
+ if (!q->offloaded)
return 0;
- if (!ops->ndo_setup_tc)
- return -EOPNOTSUPP;
-
offload = taprio_offload_alloc(0);
if (!offload) {
NL_SET_ERR_MSG(extack,
@@ -1303,6 +1303,8 @@ static int taprio_disable_offload(struct net_device *dev,
goto out;
}
+ q->offloaded = false;
+
out:
taprio_offload_free(offload);
@@ -1620,6 +1622,7 @@ static void taprio_reset(struct Qdisc *sch)
int i;
hrtimer_cancel(&q->advance_timer);
+
if (q->qdiscs) {
for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
qdisc_reset(q->qdiscs[i]);
@@ -1642,6 +1645,7 @@ static void taprio_destroy(struct Qdisc *sch)
* happens in qdisc_create(), after taprio_init() has been called.
*/
hrtimer_cancel(&q->advance_timer);
+ qdisc_synchronize(sch);
taprio_disable_offload(dev, q, NULL);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 5f72f3f916a5..a7f60bb2dd51 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -297,6 +297,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
struct nlattr *tb[TCA_TBF_MAX + 1];
struct tc_tbf_qopt *qopt;
struct Qdisc *child = NULL;
+ struct Qdisc *old = NULL;
struct psched_ratecfg rate;
struct psched_ratecfg peak;
u64 max_size;
@@ -388,7 +389,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
if (child) {
qdisc_tree_flush_backlog(q->qdisc);
- qdisc_put(q->qdisc);
+ old = q->qdisc;
q->qdisc = child;
}
q->limit = qopt->limit;
@@ -408,6 +409,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
sch_tree_unlock(sch);
+ qdisc_put(old);
err = 0;
done:
return err;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f960b0e1e552..2cdcb72c8826 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1156,8 +1156,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports)
- if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
- !sctp_assoc_add_peer(asoc, &trans->ipaddr,
+ if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state))
return -ENOMEM;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 3b2d0bd616dd..6b97b734a16f 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -869,12 +869,17 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
}
list_del_init(&shkey->key_list);
- sctp_auth_shkey_release(shkey);
list_add(&cur_key->key_list, sh_keys);
- if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
- sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+ if (asoc && asoc->active_key_id == auth_key->sca_keynumber &&
+ sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) {
+ list_del_init(&cur_key->key_list);
+ sctp_auth_shkey_release(cur_key);
+ list_add(&shkey->key_list, sh_keys);
+ return -ENOMEM;
+ }
+ sctp_auth_shkey_release(shkey);
return 0;
}
@@ -908,8 +913,13 @@ int sctp_auth_set_active_key(struct sctp_endpoint *ep,
return -EINVAL;
if (asoc) {
+ __u16 active_key_id = asoc->active_key_id;
+
asoc->active_key_id = key_id;
- sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
+ if (sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) {
+ asoc->active_key_id = active_key_id;
+ return -ENOMEM;
+ }
} else
ep->active_key_id = key_id;
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index a825e74d01fc..614bc081ca50 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
}
}
+ /* If somehow no addresses were found that can be used with this
+ * scope, it's an error.
+ */
+ if (list_empty(&dest->address_list))
+ error = -ENETUNREACH;
+
out:
if (error)
sctp_bind_addr_clean(dest);
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 5a918e74bb82..2d0318a7352c 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -349,11 +349,9 @@ static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp
struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk;
const struct inet_diag_req_v2 *r = commp->r;
- struct sctp_association *assoc =
- list_entry(ep->asocs.next, struct sctp_association, asocs);
/* find the ep only once through the transports by this condition */
- if (tsp->asoc != assoc)
+ if (!list_is_first(&tsp->asoc->asocs, &ep->asocs))
return 0;
if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 982a87b3e11f..963b94517ec2 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -284,7 +284,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
assoc->init_retries, assoc->shutdown_retries,
assoc->rtx_data_chunks,
refcount_read(&sk->sk_wmem_alloc),
- sk->sk_wmem_queued,
+ READ_ONCE(sk->sk_wmem_queued),
sk->sk_sndbuf,
sk->sk_rcvbuf);
seq_printf(seq, "\n");
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8d32229199b9..c964e7ca6f7e 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1240,7 +1240,10 @@ static int sctp_side_effects(enum sctp_event_type event_type,
default:
pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
status, state, event_type, subtype.chunk);
- BUG();
+ error = status;
+ if (error >= 0)
+ error = -EINVAL;
+ WARN_ON_ONCE(1);
break;
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 1d2f633c6c7c..67df4022853b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4379,7 +4379,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
SCTP_AUTH_NEW_KEY, GFP_ATOMIC);
if (!ev)
- return -ENOMEM;
+ return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c76b40322ac7..cbcbc92748ba 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -68,7 +68,7 @@
#include <net/sctp/stream_sched.h>
/* Forward declarations for internal helper functions. */
-static bool sctp_writeable(struct sock *sk);
+static bool sctp_writeable(const struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len);
@@ -97,7 +97,7 @@ struct percpu_counter sctp_sockets_allocated;
static void sctp_enter_memory_pressure(struct sock *sk)
{
- sctp_memory_pressure = 1;
+ WRITE_ONCE(sctp_memory_pressure, 1);
}
@@ -138,7 +138,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
- sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
+ sk_wmem_queued_add(sk, chunk->skb->truesize + sizeof(struct sctp_chunk));
sk_mem_charge(sk, chunk->skb->truesize);
}
@@ -362,9 +362,9 @@ static void sctp_auto_asconf_init(struct sctp_sock *sp)
struct net *net = sock_net(&sp->inet.sk);
if (net->sctp.default_auto_asconf) {
- spin_lock(&net->sctp.addr_wq_lock);
+ spin_lock_bh(&net->sctp.addr_wq_lock);
list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
- spin_unlock(&net->sctp.addr_wq_lock);
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
sp->do_auto_asconf = 1;
}
}
@@ -1850,6 +1850,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
if (err)
goto err;
+ if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
+ err = -EINVAL;
+ goto err;
+ }
}
if (sctp_state(asoc, CLOSED)) {
@@ -2482,6 +2486,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if (trans) {
trans->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
+ sctp_transport_reset_hb_timer(trans);
} else if (asoc) {
asoc->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
@@ -5163,13 +5168,17 @@ static void sctp_destroy_sock(struct sock *sk)
}
/* Triggered when there are no references on the socket anymore */
-static void sctp_destruct_sock(struct sock *sk)
+static void sctp_destruct_common(struct sock *sk)
{
struct sctp_sock *sp = sctp_sk(sk);
/* Free up the HMAC transform. */
crypto_free_shash(sp->hmac);
+}
+static void sctp_destruct_sock(struct sock *sk)
+{
+ sctp_destruct_common(sk);
inet_sock_destruct(sk);
}
@@ -8989,7 +8998,7 @@ static void sctp_wfree(struct sk_buff *skb)
struct sock *sk = asoc->base.sk;
sk_mem_uncharge(sk, skb->truesize);
- sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
+ sk_wmem_queued_add(sk, -(skb->truesize + sizeof(struct sctp_chunk)));
asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
&sk->sk_wmem_alloc));
@@ -9144,9 +9153,9 @@ void sctp_write_space(struct sock *sk)
* UDP-style sockets or TCP-style sockets, this code should work.
* - Daisy
*/
-static bool sctp_writeable(struct sock *sk)
+static bool sctp_writeable(const struct sock *sk)
{
- return sk->sk_sndbuf > sk->sk_wmem_queued;
+ return READ_ONCE(sk->sk_sndbuf) > READ_ONCE(sk->sk_wmem_queued);
}
/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
@@ -9304,7 +9313,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
sctp_sk(newsk)->reuse = sp->reuse;
newsk->sk_shutdown = sk->sk_shutdown;
- newsk->sk_destruct = sctp_destruct_sock;
+ newsk->sk_destruct = sk->sk_destruct;
newsk->sk_family = sk->sk_family;
newsk->sk_protocol = IPPROTO_SCTP;
newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
@@ -9535,11 +9544,20 @@ struct proto sctp_prot = {
#if IS_ENABLED(CONFIG_IPV6)
-#include <net/transp_v6.h>
-static void sctp_v6_destroy_sock(struct sock *sk)
+static void sctp_v6_destruct_sock(struct sock *sk)
{
- sctp_destroy_sock(sk);
- inet6_destroy_sock(sk);
+ sctp_destruct_common(sk);
+ inet6_sock_destruct(sk);
+}
+
+static int sctp_v6_init_sock(struct sock *sk)
+{
+ int ret = sctp_init_sock(sk);
+
+ if (!ret)
+ sk->sk_destruct = sctp_v6_destruct_sock;
+
+ return ret;
}
struct proto sctpv6_prot = {
@@ -9549,8 +9567,8 @@ struct proto sctpv6_prot = {
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
- .init = sctp_init_sock,
- .destroy = sctp_v6_destroy_sock,
+ .init = sctp_v6_init_sock,
+ .destroy = sctp_destroy_sock,
.shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 56762745d6e4..ca8fdc9abca5 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -52,6 +52,19 @@ static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
}
}
+static void sctp_stream_free_ext(struct sctp_stream *stream, __u16 sid)
+{
+ struct sctp_sched_ops *sched;
+
+ if (!SCTP_SO(stream, sid)->ext)
+ return;
+
+ sched = sctp_sched_ops_from_stream(stream);
+ sched->free_sid(stream, sid);
+ kfree(SCTP_SO(stream, sid)->ext);
+ SCTP_SO(stream, sid)->ext = NULL;
+}
+
/* Migrates chunks from stream queues to new stream queues if needed,
* but not across associations. Also, removes those chunks to streams
* higher than the new max.
@@ -70,16 +83,14 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
* sctp_stream_update will swap ->out pointers.
*/
for (i = 0; i < outcnt; i++) {
- kfree(SCTP_SO(new, i)->ext);
+ sctp_stream_free_ext(new, i);
SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext;
SCTP_SO(stream, i)->ext = NULL;
}
}
- for (i = outcnt; i < stream->outcnt; i++) {
- kfree(SCTP_SO(stream, i)->ext);
- SCTP_SO(stream, i)->ext = NULL;
- }
+ for (i = outcnt; i < stream->outcnt; i++)
+ sctp_stream_free_ext(stream, i);
}
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
@@ -174,9 +185,9 @@ void sctp_stream_free(struct sctp_stream *stream)
struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
int i;
- sched->free(stream);
+ sched->unsched_all(stream);
for (i = 0; i < stream->outcnt; i++)
- kfree(SCTP_SO(stream, i)->ext);
+ sctp_stream_free_ext(stream, i);
genradix_free(&stream->out);
genradix_free(&stream->in);
}
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 40c40be23fcb..c982f99099de 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -1165,7 +1165,8 @@ static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
#define _sctp_walk_ifwdtsn(pos, chunk, end) \
for (pos = chunk->subh.ifwdtsn_hdr->skip; \
- (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
+ (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \
+ sizeof(struct sctp_ifwdtsn_skip); pos++)
#define sctp_walk_ifwdtsn(pos, ch) \
_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index a2e1d34f52c5..33c2630c2496 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -46,6 +46,10 @@ static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
return 0;
}
+static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+}
+
static void sctp_sched_fcfs_free(struct sctp_stream *stream)
{
}
@@ -96,6 +100,7 @@ static struct sctp_sched_ops sctp_sched_fcfs = {
.get = sctp_sched_fcfs_get,
.init = sctp_sched_fcfs_init,
.init_sid = sctp_sched_fcfs_init_sid,
+ .free_sid = sctp_sched_fcfs_free_sid,
.free = sctp_sched_fcfs_free,
.enqueue = sctp_sched_fcfs_enqueue,
.dequeue = sctp_sched_fcfs_dequeue,
diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c
index 80b5a2c4cbc7..7dd9f8b387cc 100644
--- a/net/sctp/stream_sched_prio.c
+++ b/net/sctp/stream_sched_prio.c
@@ -25,6 +25,18 @@
static void sctp_sched_prio_unsched_all(struct sctp_stream *stream);
+static struct sctp_stream_priorities *sctp_sched_prio_head_get(struct sctp_stream_priorities *p)
+{
+ p->users++;
+ return p;
+}
+
+static void sctp_sched_prio_head_put(struct sctp_stream_priorities *p)
+{
+ if (p && --p->users == 0)
+ kfree(p);
+}
+
static struct sctp_stream_priorities *sctp_sched_prio_new_head(
struct sctp_stream *stream, int prio, gfp_t gfp)
{
@@ -38,6 +50,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_new_head(
INIT_LIST_HEAD(&p->active);
p->next = NULL;
p->prio = prio;
+ p->users = 1;
return p;
}
@@ -53,7 +66,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head(
*/
list_for_each_entry(p, &stream->prio_list, prio_sched) {
if (p->prio == prio)
- return p;
+ return sctp_sched_prio_head_get(p);
if (p->prio > prio)
break;
}
@@ -70,7 +83,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head(
*/
break;
if (p->prio == prio)
- return p;
+ return sctp_sched_prio_head_get(p);
}
/* If not even there, allocate a new one. */
@@ -154,32 +167,21 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid,
struct sctp_stream_out_ext *soute = sout->ext;
struct sctp_stream_priorities *prio_head, *old;
bool reschedule = false;
- int i;
+
+ old = soute->prio_head;
+ if (old && old->prio == prio)
+ return 0;
prio_head = sctp_sched_prio_get_head(stream, prio, gfp);
if (!prio_head)
return -ENOMEM;
reschedule = sctp_sched_prio_unsched(soute);
- old = soute->prio_head;
soute->prio_head = prio_head;
if (reschedule)
sctp_sched_prio_sched(stream, soute);
- if (!old)
- /* Happens when we set the priority for the first time */
- return 0;
-
- for (i = 0; i < stream->outcnt; i++) {
- soute = SCTP_SO(stream, i)->ext;
- if (soute && soute->prio_head == old)
- /* It's still in use, nothing else to do here. */
- return 0;
- }
-
- /* No hits, we are good to free it. */
- kfree(old);
-
+ sctp_sched_prio_head_put(old);
return 0;
}
@@ -204,6 +206,12 @@ static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid,
return sctp_sched_prio_set(stream, sid, 0, gfp);
}
+static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+ sctp_sched_prio_head_put(SCTP_SO(stream, sid)->ext->prio_head);
+ SCTP_SO(stream, sid)->ext->prio_head = NULL;
+}
+
static void sctp_sched_prio_free(struct sctp_stream *stream)
{
struct sctp_stream_priorities *prio, *n;
@@ -323,6 +331,7 @@ static struct sctp_sched_ops sctp_sched_prio = {
.get = sctp_sched_prio_get,
.init = sctp_sched_prio_init,
.init_sid = sctp_sched_prio_init_sid,
+ .free_sid = sctp_sched_prio_free_sid,
.free = sctp_sched_prio_free,
.enqueue = sctp_sched_prio_enqueue,
.dequeue = sctp_sched_prio_dequeue,
diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c
index ff425aed62c7..cc444fe0d67c 100644
--- a/net/sctp/stream_sched_rr.c
+++ b/net/sctp/stream_sched_rr.c
@@ -90,6 +90,10 @@ static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid,
return 0;
}
+static void sctp_sched_rr_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+}
+
static void sctp_sched_rr_free(struct sctp_stream *stream)
{
sctp_sched_rr_unsched_all(stream);
@@ -177,6 +181,7 @@ static struct sctp_sched_ops sctp_sched_rr = {
.get = sctp_sched_rr_get,
.init = sctp_sched_rr_init,
.init_sid = sctp_sched_rr_init_sid,
+ .free_sid = sctp_sched_rr_free_sid,
.free = sctp_sched_rr_free,
.enqueue = sctp_sched_rr_enqueue,
.dequeue = sctp_sched_rr_dequeue,
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 394491692a07..e070b0e2a30c 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -136,7 +136,7 @@ static int __smc_release(struct smc_sock *smc)
if (!smc->use_fallback) {
rc = smc_close_active(smc);
- sock_set_flag(sk, SOCK_DEAD);
+ smc_sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
} else {
if (sk->sk_state != SMC_CLOSED) {
@@ -928,7 +928,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
if (new_clcsock)
sock_release(new_clcsock);
new_sk->sk_state = SMC_CLOSED;
- sock_set_flag(new_sk, SOCK_DEAD);
+ smc_sock_set_flag(new_sk, SOCK_DEAD);
sock_put(new_sk); /* final */
*new_smc = NULL;
goto out;
@@ -1093,7 +1093,6 @@ static void smc_listen_out_connected(struct smc_sock *new_smc)
{
struct sock *newsmcsk = &new_smc->sk;
- sk_refcnt_debug_inc(newsmcsk);
if (newsmcsk->sk_state == SMC_INIT)
newsmcsk->sk_state = SMC_ACTIVE;
@@ -1543,16 +1542,14 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
- int rc = -EPIPE;
+ int rc;
smc = smc_sk(sk);
lock_sock(sk);
- if ((sk->sk_state != SMC_ACTIVE) &&
- (sk->sk_state != SMC_APPCLOSEWAIT1) &&
- (sk->sk_state != SMC_INIT))
- goto out;
+ /* SMC does not support connect with fastopen */
if (msg->msg_flags & MSG_FASTOPEN) {
+ /* not connected yet, fallback */
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
@@ -1560,6 +1557,11 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
rc = -EINVAL;
goto out;
}
+ } else if ((sk->sk_state != SMC_ACTIVE) &&
+ (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+ (sk->sk_state != SMC_INIT)) {
+ rc = -EPIPE;
+ goto out;
}
if (smc->use_fallback)
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 878313f8d6c1..8c46eaf014a7 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -265,4 +265,9 @@ static inline bool using_ipsec(struct smc_sock *smc)
struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock);
void smc_close_non_accepted(struct sock *sk);
+static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
+{
+ set_bit(flag, &sk->sk_flags);
+}
+
#endif /* __SMC_H */
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index d0b0f4c865b4..68e1155126cf 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -298,7 +298,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
smc->sk.sk_shutdown |= RCV_SHUTDOWN;
if (smc->clcsock && smc->clcsock->sk)
smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
- sock_set_flag(&smc->sk, SOCK_DONE);
+ smc_sock_set_flag(&smc->sk, SOCK_DONE);
sock_hold(&smc->sk); /* sock_put in close_work */
if (!schedule_work(&conn->close_work))
sock_put(&smc->sk);
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 543948d970c5..a704234c1a2b 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -164,7 +164,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
break;
}
- sock_set_flag(sk, SOCK_DEAD);
+ smc_sock_set_flag(sk, SOCK_DEAD);
sk->sk_state_change(sk);
}
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index da9ba6d1679b..3d8f551cec30 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -168,7 +168,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
}
if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
(req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
- !list_empty(&smc->conn.lgr->list)) {
+ !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) {
struct smc_connection *conn = &smc->conn;
struct smcd_diag_dmbinfo dinfo;
diff --git a/net/socket.c b/net/socket.c
index 94358566c9d1..e3a50f107d64 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -641,6 +641,14 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
return ret;
}
+static int __sock_sendmsg(struct socket *sock, struct msghdr *msg)
+{
+ int err = security_socket_sendmsg(sock, msg,
+ msg_data_left(msg));
+
+ return err ?: sock_sendmsg_nosec(sock, msg);
+}
+
/**
* sock_sendmsg - send a message through @sock
* @sock: socket
@@ -651,10 +659,21 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
*/
int sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
- int err = security_socket_sendmsg(sock, msg,
- msg_data_left(msg));
+ struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
+ struct sockaddr_storage address;
+ int save_len = msg->msg_namelen;
+ int ret;
- return err ?: sock_sendmsg_nosec(sock, msg);
+ if (msg->msg_name) {
+ memcpy(&address, msg->msg_name, msg->msg_namelen);
+ msg->msg_name = &address;
+ }
+
+ ret = __sock_sendmsg(sock, msg);
+ msg->msg_name = save_addr;
+ msg->msg_namelen = save_len;
+
+ return ret;
}
EXPORT_SYMBOL(sock_sendmsg);
@@ -986,7 +1005,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (sock->type == SOCK_SEQPACKET)
msg.msg_flags |= MSG_EOR;
- res = sock_sendmsg(sock, &msg);
+ res = __sock_sendmsg(sock, &msg);
*from = msg.msg_iter;
return res;
}
@@ -1661,7 +1680,7 @@ int __sys_listen(int fd, int backlog)
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
- somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
+ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
if ((unsigned int)backlog > somaxconn)
backlog = somaxconn;
@@ -1938,7 +1957,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
- err = sock_sendmsg(sock, &msg);
+ err = __sock_sendmsg(sock, &msg);
out_put:
fput_light(sock->file, fput_needed);
@@ -2283,7 +2302,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
err = sock_sendmsg_nosec(sock, msg_sys);
goto out_freectl;
}
- err = sock_sendmsg(sock, msg_sys);
+ err = __sock_sendmsg(sock, msg_sys);
/*
* If this is sendmmsg() and sending to current destination address was
* successful, remember it.
@@ -2723,7 +2742,7 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
* error to return on the next call or if the
* app asks about it using getsockopt(SO_ERROR).
*/
- sock->sk->sk_err = -err;
+ WRITE_ONCE(sock->sk->sk_err, -err);
}
out_put:
fput_light(sock->file, fput_needed);
@@ -3567,7 +3586,11 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
- return sock->ops->bind(sock, addr, addrlen);
+ struct sockaddr_storage address;
+
+ memcpy(&address, addr, addrlen);
+
+ return sock->ops->bind(sock, (struct sockaddr *)&address, addrlen);
}
EXPORT_SYMBOL(kernel_bind);
@@ -3637,7 +3660,11 @@ EXPORT_SYMBOL(kernel_accept);
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags)
{
- return sock->ops->connect(sock, addr, addrlen, flags);
+ struct sockaddr_storage address;
+
+ memcpy(&address, addr, addrlen);
+
+ return sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, flags);
}
EXPORT_SYMBOL(kernel_connect);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 0d4a2bb09589..be2fc88f5bde 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -288,10 +288,10 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
}
if (snprintf(portbuf, sizeof(portbuf),
- ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
+ ".%u.%u", port >> 8, port & 0xff) >= (int)sizeof(portbuf))
return NULL;
- if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
+ if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) >= sizeof(addrbuf))
return NULL;
return kstrdup(addrbuf, gfp_flags);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index cdb05b48de44..e8fa21ad06a0 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -494,7 +494,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
* Enforce a 60 second garbage collection moratorium
* Note that the cred_unused list must be time-ordered.
*/
- if (!time_in_range(cred->cr_expire, expired, jiffies))
+ if (time_in_range(cred->cr_expire, expired, jiffies))
continue;
if (!rpcauth_unhash_cred(cred))
continue;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index b7a71578bd98..4d3cf146f50a 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -301,7 +301,7 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth
list_for_each_entry(pos, &pipe->in_downcall, list) {
if (!uid_eq(pos->uid, uid))
continue;
- if (auth && pos->auth->service != auth->service)
+ if (pos->auth->service != auth->service)
continue;
refcount_inc(&pos->count);
return pos;
@@ -683,6 +683,21 @@ out:
return err;
}
+static struct gss_upcall_msg *
+gss_find_downcall(struct rpc_pipe *pipe, kuid_t uid)
+{
+ struct gss_upcall_msg *pos;
+ list_for_each_entry(pos, &pipe->in_downcall, list) {
+ if (!uid_eq(pos->uid, uid))
+ continue;
+ if (!rpc_msg_is_inflight(&pos->msg))
+ continue;
+ refcount_inc(&pos->count);
+ return pos;
+ }
+ return NULL;
+}
+
#define MSG_BUF_MAXSIZE 1024
static ssize_t
@@ -729,7 +744,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
err = -ENOENT;
/* Find a matching upcall */
spin_lock(&pipe->lock);
- gss_msg = __gss_find_upcall(pipe, uid, NULL);
+ gss_msg = gss_find_downcall(pipe, uid);
if (gss_msg == NULL) {
spin_unlock(&pipe->lock);
goto err_put_ctx;
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 2ff7b7083eba..e265b8d38aa1 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -250,8 +250,8 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
if (!creds) {
- kfree(oa->data);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free_oa;
}
oa->data[0].option.data = CREDS_VALUE;
@@ -265,29 +265,40 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
/* option buffer */
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- return -ENOSPC;
+ if (unlikely(p == NULL)) {
+ err = -ENOSPC;
+ goto free_creds;
+ }
length = be32_to_cpup(p);
p = xdr_inline_decode(xdr, length);
- if (unlikely(p == NULL))
- return -ENOSPC;
+ if (unlikely(p == NULL)) {
+ err = -ENOSPC;
+ goto free_creds;
+ }
if (length == sizeof(CREDS_VALUE) &&
memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) {
/* We have creds here. parse them */
err = gssx_dec_linux_creds(xdr, creds);
if (err)
- return err;
+ goto free_creds;
oa->data[0].value.len = 1; /* presence */
} else {
/* consume uninteresting buffer */
err = gssx_dec_buffer(xdr, &dummy);
if (err)
- return err;
+ goto free_creds;
}
}
return 0;
+
+free_creds:
+ kfree(creds);
+free_oa:
+ kfree(oa->data);
+ oa->data = NULL;
+ return err;
}
static int gssx_dec_status(struct xdr_stream *xdr,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index c0016473a255..1a869a203d52 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1104,18 +1104,23 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
return res;
inlen = svc_getnl(argv);
- if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
+ if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) {
+ kfree(in_handle->data);
return SVC_DENIED;
+ }
pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
- if (!in_token->pages)
+ if (!in_token->pages) {
+ kfree(in_handle->data);
return SVC_DENIED;
+ }
in_token->page_base = 0;
in_token->page_len = inlen;
for (i = 0; i < pages; i++) {
in_token->pages[i] = alloc_page(GFP_KERNEL);
if (!in_token->pages[i]) {
+ kfree(in_handle->data);
gss_free_in_token_pages(in_token);
return SVC_DENIED;
}
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 195b40c5dae4..266f3e5f55de 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -64,6 +64,17 @@ static void xprt_free_allocation(struct rpc_rqst *req)
kfree(req);
}
+static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf)
+{
+ buf->head[0].iov_len = PAGE_SIZE;
+ buf->tail[0].iov_len = 0;
+ buf->pages = NULL;
+ buf->page_len = 0;
+ buf->flags = 0;
+ buf->len = 0;
+ buf->buflen = PAGE_SIZE;
+}
+
static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
{
struct page *page;
@@ -292,6 +303,9 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
*/
spin_lock_bh(&xprt->bc_pa_lock);
if (xprt_need_to_requeue(xprt)) {
+ xprt_bc_reinit_xdr_buf(&req->rq_snd_buf);
+ xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf);
+ req->rq_rcv_buf.len = PAGE_SIZE;
list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
xprt->bc_alloc_count++;
atomic_inc(&xprt->bc_slot_count);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 08e1ccc01e98..9071dc6928ac 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1354,7 +1354,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
break;
default:
err = -EAFNOSUPPORT;
- goto out;
+ goto out_release;
}
if (err < 0) {
dprintk("RPC: can't bind UDP socket (%d)\n", err);
@@ -1896,7 +1896,7 @@ call_encode(struct rpc_task *task)
break;
case -EKEYEXPIRED:
if (!task->tk_cred_retry) {
- rpc_exit(task, task->tk_status);
+ rpc_call_rpcerror(task, task->tk_status);
} else {
task->tk_action = call_refresh;
task->tk_cred_retry--;
@@ -2003,9 +2003,6 @@ call_bind_status(struct rpc_task *task)
status = -EOPNOTSUPP;
break;
}
- if (task->tk_rebind_retry == 0)
- break;
- task->tk_rebind_retry--;
rpc_delay(task, 3*HZ);
goto retry_timeout;
case -ENOBUFS:
@@ -2687,6 +2684,7 @@ out_msg_denied:
case rpc_autherr_rejectedverf:
case rpcsec_gsserr_credproblem:
case rpcsec_gsserr_ctxproblem:
+ rpcauth_invalcred(task);
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 32ffa801a5b9..a5c6a3d05741 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -827,7 +827,6 @@ rpc_init_task_statistics(struct rpc_task *task)
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
- task->tk_rebind_retry = 2;
/* starting timestamp */
task->tk_start = ktime_get();
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 5c04ba7d456b..5b47e3363239 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -428,14 +428,23 @@ static int unix_gid_hash(kuid_t uid)
return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
}
-static void unix_gid_put(struct kref *kref)
+static void unix_gid_free(struct rcu_head *rcu)
{
- struct cache_head *item = container_of(kref, struct cache_head, ref);
- struct unix_gid *ug = container_of(item, struct unix_gid, h);
+ struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu);
+ struct cache_head *item = &ug->h;
+
if (test_bit(CACHE_VALID, &item->flags) &&
!test_bit(CACHE_NEGATIVE, &item->flags))
put_group_info(ug->gi);
- kfree_rcu(ug, rcu);
+ kfree(ug);
+}
+
+static void unix_gid_put(struct kref *kref)
+{
+ struct cache_head *item = container_of(kref, struct cache_head, ref);
+ struct unix_gid *ug = container_of(item, struct unix_gid, h);
+
+ call_rcu(&ug->rcu, unix_gid_free);
}
static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d52abde51f1b..aeb0b3e48ad5 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -728,12 +728,6 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
dprintk("svc: socket %p TCP (listen) state change %d\n",
sk, sk->sk_state);
- if (svsk) {
- /* Refer to svc_setup_socket() for details. */
- rmb();
- svsk->sk_odata(sk);
- }
-
/*
* This callback may called twice when a new connection
* is established as a child socket inherits everything
@@ -742,15 +736,20 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
* when one of child sockets become ESTABLISHED.
* 2) data_ready method of the child socket may be called
* when it receives data before the socket is accepted.
- * In case of 2, we should ignore it silently.
+ * In case of 2, we should ignore it silently and DO NOT
+ * dereference svsk.
*/
- if (sk->sk_state == TCP_LISTEN) {
- if (svsk) {
- set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
- svc_xprt_enqueue(&svsk->sk_xprt);
- } else
- printk("svc: socket %p: no user data\n", sk);
- }
+ if (sk->sk_state != TCP_LISTEN)
+ return;
+
+ if (svsk) {
+ /* Refer to svc_setup_socket() for details. */
+ rmb();
+ svsk->sk_odata(sk);
+ set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
+ svc_xprt_enqueue(&svsk->sk_xprt);
+ } else
+ printk("svc: socket %p: no user data\n", sk);
}
/*
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 78c075a68c04..a11e80d17830 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -253,8 +253,9 @@ struct rpc_xprt *xprt_iter_current_entry(struct rpc_xprt_iter *xpi)
return xprt_switch_find_current_entry(head, xpi->xpi_cursor);
}
-bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
- const struct sockaddr *sap)
+static
+bool __rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+ const struct sockaddr *sap)
{
struct list_head *head;
struct rpc_xprt *pos;
@@ -273,6 +274,18 @@ bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
return false;
}
+bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+ const struct sockaddr *sap)
+{
+ bool res;
+
+ rcu_read_lock();
+ res = __rpc_xprt_switch_has_addr(xps, sap);
+ rcu_read_unlock();
+
+ return res;
+}
+
static
struct rpc_xprt *xprt_switch_find_next_entry(struct list_head *head,
const struct rpc_xprt *cur)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 0f4d39fdb48f..cfae1a871578 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1034,9 +1034,9 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
return req;
out4:
- kfree(req->rl_sendbuf);
+ rpcrdma_regbuf_free(req->rl_sendbuf);
out3:
- kfree(req->rl_rdmabuf);
+ rpcrdma_regbuf_free(req->rl_rdmabuf);
out2:
kfree(req);
out1:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 81f0e03b71b6..3095442b0382 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -496,8 +496,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
int flags, struct rpc_rqst *req)
{
struct xdr_buf *buf = &req->rq_private_buf;
- size_t want, uninitialized_var(read);
- ssize_t uninitialized_var(ret);
+ size_t want, read;
+ ssize_t ret;
xs_read_header(transport, buf);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 577f71dd63fb..a0bc919e4e47 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1025,6 +1025,12 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
#ifdef CONFIG_TIPC_MEDIA_UDP
if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
+ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
+ rtnl_unlock();
+ NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
+ return -EINVAL;
+ }
+
err = tipc_udp_nl_bearer_add(b,
attrs[TIPC_NLA_BEARER_UDP_OPTS]);
if (err) {
@@ -1195,7 +1201,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
struct tipc_nl_msg msg;
struct tipc_media *media;
struct sk_buff *rep;
- struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
if (!info->attrs[TIPC_NLA_MEDIA])
return -EINVAL;
@@ -1244,7 +1250,7 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
int err;
char *name;
struct tipc_media *m;
- struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
if (!info->attrs[TIPC_NLA_MEDIA])
return -EINVAL;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 90cf7e0bbaf0..58ee5ee70781 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -112,6 +112,15 @@ static void __net_exit tipc_exit_net(struct net *net)
cond_resched();
}
+static void __net_exit tipc_pernet_pre_exit(struct net *net)
+{
+ tipc_node_pre_cleanup_net(net);
+}
+
+static struct pernet_operations tipc_pernet_pre_exit_ops = {
+ .pre_exit = tipc_pernet_pre_exit,
+};
+
static struct pernet_operations tipc_net_ops = {
.init = tipc_init_net,
.exit = tipc_exit_net,
@@ -150,6 +159,10 @@ static int __init tipc_init(void)
if (err)
goto out_pernet_topsrv;
+ err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
+ if (err)
+ goto out_register_pernet_subsys;
+
err = tipc_bearer_setup();
if (err)
goto out_bearer;
@@ -170,6 +183,8 @@ out_netlink_compat:
out_netlink:
tipc_bearer_cleanup();
out_bearer:
+ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+out_register_pernet_subsys:
unregister_pernet_device(&tipc_topsrv_net_ops);
out_pernet_topsrv:
tipc_socket_stop();
@@ -187,6 +202,7 @@ static void __exit tipc_exit(void)
tipc_netlink_compat_stop();
tipc_netlink_stop();
tipc_bearer_cleanup();
+ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
unregister_pernet_device(&tipc_topsrv_net_ops);
tipc_socket_stop();
unregister_pernet_device(&tipc_net_ops);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index c6bda91f8581..59f97ef12e60 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -59,6 +59,7 @@
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
#include <net/genetlink.h>
+#include <net/netns/hash.h>
#ifdef pr_fmt
#undef pr_fmt
@@ -202,6 +203,11 @@ static inline int in_range(u16 val, u16 min, u16 max)
return !less(val, min) && !more(val, max);
}
+static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
+{
+ return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
+}
+
#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
void tipc_unregister_sysctl(void);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index c138d68e8a69..9c64567f8a74 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -94,6 +94,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
msg_set_dest_domain(hdr, dest_domain);
msg_set_bc_netid(hdr, tn->net_id);
b->media->addr2msg(msg_media_addr(hdr), &b->addr);
+ msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
msg_set_node_id(hdr, tipc_own_id(net));
}
@@ -146,8 +147,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
{
struct net *net = d->net;
struct tipc_net *tn = tipc_net(net);
- bool trial = time_before(jiffies, tn->addr_trial_end);
u32 self = tipc_own_addr(net);
+ bool trial = time_before(jiffies, tn->addr_trial_end) && !self;
if (mtyp == DSC_TRIAL_FAIL_MSG) {
if (!trial)
@@ -193,6 +194,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
{
struct tipc_net *tn = tipc_net(net);
struct tipc_msg *hdr = buf_msg(skb);
+ u32 pnet_hash = msg_peer_net_hash(hdr);
u16 caps = msg_node_capabilities(hdr);
bool legacy = tn->legacy_addr_format;
u32 sugg = msg_sugg_node_addr(hdr);
@@ -208,7 +210,10 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
u32 self;
int err;
- skb_linearize(skb);
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
+ return;
+ }
hdr = buf_msg(skb);
if (caps & TIPC_NODE_ID128)
@@ -241,7 +246,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
return;
if (!tipc_in_scope(legacy, b->domain, src))
return;
- tipc_node_check_dest(net, src, peer_id, b, caps, signature,
+ tipc_node_check_dest(net, src, peer_id, b, caps, signature, pnet_hash,
&maddr, &respond, &dupl_addr);
if (dupl_addr)
disc_dupl_alert(b, src, &maddr);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 8f2ee71c63c6..b653d16ab21f 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1971,7 +1971,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (tipc_own_addr(l->net) > msg_prevnode(hdr))
l->net_plane = msg_net_plane(hdr);
- skb_linearize(skb);
+ if (skb_linearize(skb))
+ goto exit;
+
hdr = buf_msg(skb);
data = msg_data(hdr);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index e7155a774300..0b9ad3b5ff18 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -130,7 +130,7 @@ static void map_set(u64 *up_map, int i, unsigned int v)
static int map_get(u64 up_map, int i)
{
- return (up_map & (1 << i)) >> i;
+ return (up_map & (1ULL << i)) >> i;
}
static struct tipc_peer *peer_prev(struct tipc_peer *peer)
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 0daa6f04ca81..6d692546acdc 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -358,6 +358,11 @@ static inline u32 msg_connected(struct tipc_msg *m)
return msg_type(m) == TIPC_CONN_MSG;
}
+static inline u32 msg_direct(struct tipc_msg *m)
+{
+ return msg_type(m) == TIPC_DIRECT_MSG;
+}
+
static inline u32 msg_errcode(struct tipc_msg *m)
{
return msg_bits(m, 1, 25, 0xf);
@@ -1026,6 +1031,20 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
}
+/* Word 13
+ */
+static inline void msg_set_peer_net_hash(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 13, n);
+}
+
+static inline u32 msg_peer_net_hash(struct tipc_msg *m)
+{
+ return msg_word(m, 13);
+}
+
+/* Word 14
+ */
static inline u32 msg_sugg_node_addr(struct tipc_msg *m)
{
return msg_word(m, 14);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 661bc2551a0a..6ac84e7c8b63 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -146,7 +146,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
- u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index e9bbf4a00881..5c58aff4d4fa 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -87,7 +87,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
[TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING,
+ [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING,
.len = TIPC_MAX_LINK_NAME },
[TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
[TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
@@ -118,7 +118,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
[TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING,
+ [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING,
.len = TIPC_MAX_BEARER_NAME },
[TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
[TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 561ea834f732..5c61b8ee7fc0 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -101,6 +101,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
return -EMSGSIZE;
skb_put(skb, TLV_SPACE(len));
+ memset(tlv, 0, TLV_SPACE(len));
tlv->tlv_type = htons(type);
tlv->tlv_len = htons(TLV_LENGTH(len));
if (len && data)
@@ -857,7 +858,7 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
};
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
- if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
+ if (TLV_GET_DATA_LEN(msg->req) < (int)sizeof(struct tipc_name_table_query))
return -EINVAL;
depth = ntohl(ntq->depth);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index c8f6177dd5a2..535a4a778640 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -126,6 +126,8 @@ struct tipc_node {
struct timer_list timer;
struct rcu_head rcu;
unsigned long delete_at;
+ struct net *peer_net;
+ u32 peer_hash_mix;
};
/* Node FSM states and events:
@@ -184,7 +186,7 @@ static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
return n->links[bearer_id].link;
}
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
{
struct tipc_node *n;
int bearer_id;
@@ -194,6 +196,14 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
if (unlikely(!n))
return mtu;
+ /* Allow MAX_MSG_SIZE when building connection oriented message
+ * if they are in the same core network
+ */
+ if (n->peer_net && connected) {
+ tipc_node_put(n);
+ return mtu;
+ }
+
bearer_id = n->active_links[sel & 1];
if (likely(bearer_id != INVALID_BEARER_ID))
mtu = n->links[bearer_id].mtu;
@@ -360,8 +370,37 @@ static void tipc_node_write_unlock(struct tipc_node *n)
}
}
+static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
+{
+ int net_id = tipc_netid(n->net);
+ struct tipc_net *tn_peer;
+ struct net *tmp;
+ u32 hash_chk;
+
+ if (n->peer_net)
+ return;
+
+ for_each_net_rcu(tmp) {
+ tn_peer = tipc_net(tmp);
+ if (!tn_peer)
+ continue;
+ /* Integrity checking whether node exists in namespace or not */
+ if (tn_peer->net_id != net_id)
+ continue;
+ if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
+ continue;
+ hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
+ if (hash_mixes ^ hash_chk)
+ continue;
+ n->peer_net = tmp;
+ n->peer_hash_mix = hash_mixes;
+ break;
+ }
+}
+
static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
- u8 *peer_id, u16 capabilities)
+ u8 *peer_id, u16 capabilities,
+ u32 signature, u32 hash_mixes)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n, *temp_node;
@@ -372,6 +411,8 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
spin_lock_bh(&tn->node_list_lock);
n = tipc_node_find(net, addr);
if (n) {
+ if (n->peer_hash_mix ^ hash_mixes)
+ tipc_node_assign_peer_net(n, hash_mixes);
if (n->capabilities == capabilities)
goto exit;
/* Same node may come back with new capabilities */
@@ -389,6 +430,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
tn->capabilities &= temp_node->capabilities;
}
+
goto exit;
}
n = kzalloc(sizeof(*n), GFP_ATOMIC);
@@ -399,6 +441,10 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
n->addr = addr;
memcpy(&n->peer_id, peer_id, 16);
n->net = net;
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ /* Assign kernel local namespace if exists */
+ tipc_node_assign_peer_net(n, hash_mixes);
n->capabilities = capabilities;
kref_init(&n->kref);
rwlock_init(&n->lock);
@@ -979,7 +1025,7 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
void tipc_node_check_dest(struct net *net, u32 addr,
u8 *peer_id, struct tipc_bearer *b,
- u16 capabilities, u32 signature,
+ u16 capabilities, u32 signature, u32 hash_mixes,
struct tipc_media_addr *maddr,
bool *respond, bool *dupl_addr)
{
@@ -989,8 +1035,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
bool addr_match = false;
bool sign_match = false;
bool link_up = false;
+ bool link_is_reset = false;
bool accept_addr = false;
- bool reset = true;
+ bool reset = false;
char *if_name;
unsigned long intv;
u16 session;
@@ -998,7 +1045,8 @@ void tipc_node_check_dest(struct net *net, u32 addr,
*dupl_addr = false;
*respond = false;
- n = tipc_node_create(net, addr, peer_id, capabilities);
+ n = tipc_node_create(net, addr, peer_id, capabilities, signature,
+ hash_mixes);
if (!n)
return;
@@ -1009,14 +1057,17 @@ void tipc_node_check_dest(struct net *net, u32 addr,
/* Prepare to validate requesting node's signature and media address */
l = le->link;
link_up = l && tipc_link_is_up(l);
+ link_is_reset = l && tipc_link_is_reset(l);
addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
sign_match = (signature == n->signature);
/* These three flags give us eight permutations: */
if (sign_match && addr_match && link_up) {
- /* All is fine. Do nothing. */
- reset = false;
+ /* All is fine. Ignore requests. */
+ /* Peer node is not a container/local namespace */
+ if (!n->peer_hash_mix)
+ n->peer_hash_mix = hash_mixes;
} else if (sign_match && addr_match && !link_up) {
/* Respond. The link will come up in due time */
*respond = true;
@@ -1038,6 +1089,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
*/
accept_addr = true;
*respond = true;
+ reset = true;
} else if (!sign_match && addr_match && link_up) {
/* Peer node rebooted. Two possibilities:
* - Delayed re-discovery; this link endpoint has already
@@ -1069,6 +1121,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
n->signature = signature;
accept_addr = true;
*respond = true;
+ reset = true;
}
if (!accept_addr)
@@ -1097,6 +1150,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
tipc_link_fsm_evt(l, LINK_RESET_EVT);
if (n->state == NODE_FAILINGOVER)
tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ link_is_reset = tipc_link_is_reset(l);
le->link = l;
n->link_cnt++;
tipc_node_calculate_timer(n, l);
@@ -1109,7 +1163,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
memcpy(&le->maddr, maddr, sizeof(*maddr));
exit:
tipc_node_write_unlock(n);
- if (reset && l && !tipc_link_is_reset(l))
+ if (reset && !link_is_reset)
tipc_node_link_down(n, b->identity, false);
tipc_node_put(n);
}
@@ -1342,7 +1396,8 @@ static void node_lost_contact(struct tipc_node *n,
/* Notify publications from this node */
n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
-
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
/* Notify sockets connected to node */
list_for_each_entry_safe(conn, safe, conns, list) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
@@ -1424,6 +1479,57 @@ msg_full:
return -EMSGSIZE;
}
+static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
+{
+ struct tipc_msg *hdr = buf_msg(skb_peek(list));
+ struct sk_buff_head inputq;
+
+ switch (msg_user(hdr)) {
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ if (msg_connected(hdr) || msg_named(hdr) ||
+ msg_direct(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+ return;
+ }
+ if (msg_mcast(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ skb_queue_head_init(&inputq);
+ tipc_sk_mcast_rcv(peer_net, list, &inputq);
+ __skb_queue_purge(list);
+ skb_queue_purge(&inputq);
+ return;
+ }
+ return;
+ case MSG_FRAGMENTER:
+ if (tipc_msg_assemble(list)) {
+ tipc_loopback_trace(peer_net, list);
+ skb_queue_head_init(&inputq);
+ tipc_sk_mcast_rcv(peer_net, list, &inputq);
+ __skb_queue_purge(list);
+ skb_queue_purge(&inputq);
+ }
+ return;
+ case GROUP_PROTOCOL:
+ case CONN_MANAGER:
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+ return;
+ case LINK_PROTOCOL:
+ case NAME_DISTRIBUTOR:
+ case TUNNEL_PROTOCOL:
+ case BCAST_PROTOCOL:
+ return;
+ default:
+ return;
+ };
+}
+
/**
* tipc_node_xmit() is the general link level function for message sending
* @net: the applicable net namespace
@@ -1439,6 +1545,8 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
struct tipc_link_entry *le = NULL;
struct tipc_node *n;
struct sk_buff_head xmitq;
+ bool node_up = false;
+ struct net *peer_net;
int bearer_id;
int rc;
@@ -1455,6 +1563,22 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
return -EHOSTUNREACH;
}
+ rcu_read_lock();
+ tipc_node_read_lock(n);
+ node_up = node_is_up(n);
+ peer_net = n->peer_net;
+ tipc_node_read_unlock(n);
+ if (node_up && peer_net && check_net(peer_net)) {
+ /* xmit inner linux container */
+ tipc_lxc_xmit(peer_net, list);
+ if (likely(skb_queue_empty(list))) {
+ rcu_read_unlock();
+ tipc_node_put(n);
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+
tipc_node_read_lock(n);
bearer_id = n->active_links[selector & 1];
if (unlikely(bearer_id == INVALID_BEARER_ID)) {
@@ -2591,3 +2715,33 @@ int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
return i;
}
+
+void tipc_node_pre_cleanup_net(struct net *exit_net)
+{
+ struct tipc_node *n;
+ struct tipc_net *tn;
+ struct net *tmp;
+
+ rcu_read_lock();
+ for_each_net_rcu(tmp) {
+ if (tmp == exit_net)
+ continue;
+ tn = tipc_net(tmp);
+ if (!tn)
+ continue;
+ spin_lock_bh(&tn->node_list_lock);
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ if (!n->peer_net)
+ continue;
+ if (n->peer_net != exit_net)
+ continue;
+ tipc_node_write_lock(n);
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ tipc_node_write_unlock_fast(n);
+ break;
+ }
+ spin_unlock_bh(&tn->node_list_lock);
+ }
+ rcu_read_unlock();
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 291d0ecd4101..30563c4f35d5 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -75,7 +75,7 @@ u32 tipc_node_get_addr(struct tipc_node *node);
u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
struct tipc_bearer *bearer,
- u16 capabilities, u32 signature,
+ u16 capabilities, u32 signature, u32 hash_mixes,
struct tipc_media_addr *maddr,
bool *respond, bool *dupl_addr);
void tipc_node_delete_links(struct net *net, int bearer_id);
@@ -92,7 +92,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
bool tipc_node_is_up(struct net *net, u32 addr);
u16 tipc_node_get_capabilities(struct net *net, u32 addr);
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
@@ -107,4 +107,5 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
struct netlink_callback *cb);
+void tipc_node_pre_cleanup_net(struct net *exit_net);
#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 58c4d61d603f..cf4f90de566e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -866,7 +866,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
/* Build message as chain of buffers */
__skb_queue_head_init(&pkts);
- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
@@ -1407,7 +1407,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
}
__skb_queue_head_init(&pkts);
- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
@@ -1547,7 +1547,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
tipc_set_sk_state(sk, TIPC_ESTABLISHED);
tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
- tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
+ tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
__skb_queue_purge(&sk->sk_write_queue);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 444e1792d02c..88e8e8d69b60 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -177,7 +177,7 @@ static void tipc_conn_close(struct tipc_conn *con)
conn_put(con);
}
-static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
+static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock)
{
struct tipc_conn *con;
int ret;
@@ -203,10 +203,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
}
con->conid = ret;
s->idr_in_use++;
- spin_unlock_bh(&s->idr_lock);
set_bit(CF_CONNECTED, &con->flags);
con->server = s;
+ con->sock = sock;
+ conn_get(con);
+ spin_unlock_bh(&s->idr_lock);
return con;
}
@@ -450,17 +452,24 @@ static void tipc_conn_data_ready(struct sock *sk)
static void tipc_topsrv_accept(struct work_struct *work)
{
struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
- struct socket *lsock = srv->listener;
- struct socket *newsock;
+ struct socket *newsock, *lsock;
struct tipc_conn *con;
struct sock *newsk;
int ret;
+ spin_lock_bh(&srv->idr_lock);
+ if (!srv->listener) {
+ spin_unlock_bh(&srv->idr_lock);
+ return;
+ }
+ lsock = srv->listener;
+ spin_unlock_bh(&srv->idr_lock);
+
while (1) {
ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
if (ret < 0)
return;
- con = tipc_conn_alloc(srv);
+ con = tipc_conn_alloc(srv, newsock);
if (IS_ERR(con)) {
ret = PTR_ERR(con);
sock_release(newsock);
@@ -472,11 +481,11 @@ static void tipc_topsrv_accept(struct work_struct *work)
newsk->sk_data_ready = tipc_conn_data_ready;
newsk->sk_write_space = tipc_conn_write_space;
newsk->sk_user_data = con;
- con->sock = newsock;
write_unlock_bh(&newsk->sk_callback_lock);
/* Wake up receive process in case of 'SYN+' message */
newsk->sk_data_ready(newsk);
+ conn_put(con);
}
}
@@ -489,7 +498,7 @@ static void tipc_topsrv_listener_data_ready(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
srv = sk->sk_user_data;
- if (srv->listener)
+ if (srv)
queue_work(srv->rcv_wq, &srv->awork);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -568,19 +577,19 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
sub.seq.upper = upper;
sub.timeout = TIPC_WAIT_FOREVER;
sub.filter = filter;
- *(u32 *)&sub.usr_handle = port;
+ *(u64 *)&sub.usr_handle = (u64)port;
- con = tipc_conn_alloc(tipc_topsrv(net));
+ con = tipc_conn_alloc(tipc_topsrv(net), NULL);
if (IS_ERR(con))
return false;
*conid = con->conid;
- con->sock = NULL;
rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
- if (rc >= 0)
- return true;
+ if (rc)
+ conn_put(con);
+
conn_put(con);
- return false;
+ return !rc;
}
void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
@@ -699,8 +708,9 @@ static void tipc_topsrv_stop(struct net *net)
__module_get(lsock->sk->sk_prot_creator->owner);
srv->listener = NULL;
spin_unlock_bh(&srv->idr_lock);
- sock_release(lsock);
+
tipc_topsrv_work_stop(srv);
+ sock_release(lsock);
idr_destroy(&srv->conn_idr);
kfree(srv);
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 7aba4ee77aba..cb51a2f46b11 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -371,13 +371,11 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
rc = -EINVAL;
goto out;
}
- lock_sock(sk);
memcpy(crypto_info_aes_gcm_128->iv,
ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
TLS_CIPHER_AES_GCM_128_IV_SIZE);
memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
- release_sock(sk);
if (copy_to_user(optval,
crypto_info_aes_gcm_128,
sizeof(*crypto_info_aes_gcm_128)))
@@ -395,13 +393,11 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
rc = -EINVAL;
goto out;
}
- lock_sock(sk);
memcpy(crypto_info_aes_gcm_256->iv,
ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
TLS_CIPHER_AES_GCM_256_IV_SIZE);
memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
- release_sock(sk);
if (copy_to_user(optval,
crypto_info_aes_gcm_256,
sizeof(*crypto_info_aes_gcm_256)))
@@ -421,6 +417,8 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
{
int rc = 0;
+ lock_sock(sk);
+
switch (optname) {
case TLS_TX:
rc = do_tls_getsockopt_tx(sk, optval, optlen);
@@ -429,6 +427,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
rc = -ENOPROTOOPT;
break;
}
+
+ release_sock(sk);
+
return rc;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index af3be9a29d6d..910da98d6bfb 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -677,7 +677,7 @@ static int tls_push_record(struct sock *sk, int flags,
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
- u32 i, split_point, uninitialized_var(orig_end);
+ u32 i, split_point, orig_end;
struct sk_msg *msg_pl, *msg_en;
struct aead_request *req;
bool split;
@@ -807,7 +807,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
- if (err && sk->sk_err == EBADMSG) {
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
@@ -836,7 +836,7 @@ more_data:
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
- if (err && sk->sk_err == EBADMSG) {
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
@@ -945,7 +945,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
return -EOPNOTSUPP;
- mutex_lock(&tls_ctx->tx_lock);
+ ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
+ if (ret)
+ return ret;
lock_sock(sk);
if (unlikely(msg->msg_controllen)) {
@@ -1209,6 +1211,8 @@ alloc_payload:
}
sk_msg_page_add(msg_pl, page, copy, offset);
+ msg_pl->sg.copybreak = 0;
+ msg_pl->sg.curr = msg_pl->sg.end;
sk_mem_charge(sk, copy);
offset += copy;
@@ -1279,7 +1283,9 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
return -EOPNOTSUPP;
- mutex_lock(&tls_ctx->tx_lock);
+ ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
+ if (ret)
+ return ret;
lock_sock(sk);
ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
release_sock(sk);
@@ -1742,6 +1748,7 @@ int tls_sw_recvmsg(struct sock *sk,
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct sk_psock *psock;
+ int num_async, pending;
unsigned char control = 0;
ssize_t decrypted = 0;
struct strp_msg *rxm;
@@ -1754,8 +1761,6 @@ int tls_sw_recvmsg(struct sock *sk,
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
bool bpf_strp_enabled;
- int num_async = 0;
- int pending;
flags |= nonblock;
@@ -1772,17 +1777,18 @@ int tls_sw_recvmsg(struct sock *sk,
if (err < 0) {
tls_err_abort(sk, err);
goto end;
- } else {
- copied = err;
}
- if (len <= copied)
- goto recv_end;
+ copied = err;
+ if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA))
+ goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
len = len - copied;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ decrypted = 0;
+ num_async = 0;
while (len && (decrypted + copied < target || ctx->recv_pkt)) {
bool retain_skb = false;
bool zc = false;
@@ -2263,11 +2269,19 @@ static void tx_work_handler(struct work_struct *work)
if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
return;
- mutex_lock(&tls_ctx->tx_lock);
- lock_sock(sk);
- tls_tx_records(sk, -1);
- release_sock(sk);
- mutex_unlock(&tls_ctx->tx_lock);
+
+ if (mutex_trylock(&tls_ctx->tx_lock)) {
+ lock_sock(sk);
+ tls_tx_records(sk, -1);
+ release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
+ } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ /* Someone is holding the tx_lock, they will likely run Tx
+ * and cancel the work on their way out of the lock section.
+ * Schedule a long delay just in case.
+ */
+ schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
+ }
}
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f33e90bd0683..9b1dd845bca1 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -531,7 +531,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
/* Clear state */
unix_state_lock(sk);
sock_orphan(sk);
- sk->sk_shutdown = SHUTDOWN_MASK;
+ WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
path = u->path;
u->path.dentry = NULL;
u->path.mnt = NULL;
@@ -549,7 +549,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
unix_state_lock(skpair);
/* No more writes */
- skpair->sk_shutdown = SHUTDOWN_MASK;
+ WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
skpair->sk_err = ECONNRESET;
unix_state_unlock(skpair);
@@ -589,7 +589,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
* What the above comment does talk about? --ANK(980817)
*/
- if (unix_tot_inflight)
+ if (READ_ONCE(unix_tot_inflight))
unix_gc(); /* Garbage collect fds */
}
@@ -701,7 +701,7 @@ static int unix_set_peek_off(struct sock *sk, int val)
if (mutex_lock_interruptible(&u->iolock))
return -EINTR;
- sk->sk_peek_off = val;
+ WRITE_ONCE(sk->sk_peek_off, val);
mutex_unlock(&u->iolock);
return 0;
@@ -1118,13 +1118,11 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
unix_state_lock(sk1);
return;
}
- if (sk1 < sk2) {
- unix_state_lock(sk1);
- unix_state_lock_nested(sk2);
- } else {
- unix_state_lock(sk2);
- unix_state_lock_nested(sk1);
- }
+ if (sk1 > sk2)
+ swap(sk1, sk2);
+
+ unix_state_lock(sk1);
+ unix_state_lock_nested(sk2, U_LOCK_SECOND);
}
static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
@@ -1227,7 +1225,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
sched = !sock_flag(other, SOCK_DEAD) &&
!(other->sk_shutdown & RCV_SHUTDOWN) &&
- unix_recvq_full(other);
+ unix_recvq_full_lockless(other);
unix_state_unlock(other);
@@ -1343,7 +1341,7 @@ restart:
goto out_unlock;
}
- unix_state_lock_nested(sk);
+ unix_state_lock_nested(sk, U_LOCK_SECOND);
if (sk->sk_state != st) {
unix_state_unlock(sk);
@@ -1979,6 +1977,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
if (false) {
alloc_skb:
+ spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->iolock);
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
@@ -2018,6 +2017,7 @@ alloc_skb:
init_scm = false;
}
+ spin_lock(&other->sk_receive_queue.lock);
skb = skb_peek_tail(&other->sk_receive_queue);
if (tail && tail == skb) {
skb = newskb;
@@ -2048,14 +2048,11 @@ alloc_skb:
refcount_add(size, &sk->sk_wmem_alloc);
if (newskb) {
- err = unix_scm_to_skb(&scm, skb, false);
- if (err)
- goto err_state_unlock;
- spin_lock(&other->sk_receive_queue.lock);
+ unix_scm_to_skb(&scm, skb, false);
__skb_queue_tail(&other->sk_receive_queue, newskb);
- spin_unlock(&other->sk_receive_queue.lock);
}
+ spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->iolock);
@@ -2546,7 +2543,7 @@ static int unix_shutdown(struct socket *sock, int mode)
++mode;
unix_state_lock(sk);
- sk->sk_shutdown |= mode;
+ WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
other = unix_peer(sk);
if (other)
sock_hold(other);
@@ -2563,7 +2560,7 @@ static int unix_shutdown(struct socket *sock, int mode)
if (mode&SEND_SHUTDOWN)
peer_mode |= RCV_SHUTDOWN;
unix_state_lock(other);
- other->sk_shutdown |= peer_mode;
+ WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
unix_state_unlock(other);
other->sk_state_change(other);
if (peer_mode == SHUTDOWN_MASK)
@@ -2682,16 +2679,18 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
{
struct sock *sk = sock->sk;
__poll_t mask;
+ u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
+ shutdown = READ_ONCE(sk->sk_shutdown);
/* exceptional events? */
if (sk->sk_err)
mask |= EPOLLERR;
- if (sk->sk_shutdown == SHUTDOWN_MASK)
+ if (shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
/* readable? */
@@ -2719,18 +2718,20 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk, *other;
unsigned int writable;
__poll_t mask;
+ u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
+ shutdown = READ_ONCE(sk->sk_shutdown);
/* exceptional events? */
if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
- if (sk->sk_shutdown == SHUTDOWN_MASK)
+ if (shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
/* readable? */
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 9ff64f9df1f3..2975e7a061d0 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -83,7 +83,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
* queue lock. With the other's queue locked it's
* OK to lock the state.
*/
- unix_state_lock_nested(req);
+ unix_state_lock_nested(req, U_LOCK_DIAG);
peer = unix_sk(req)->peer;
buf[i++] = (peer ? sock_i_ino(peer) : 0);
unix_state_unlock(req);
@@ -113,14 +113,16 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
}
-static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
+static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
+ struct user_namespace *user_ns)
{
- uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
+ uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
- u32 portid, u32 seq, u32 flags, int sk_ino)
+ struct user_namespace *user_ns,
+ u32 portid, u32 seq, u32 flags, int sk_ino)
{
struct nlmsghdr *nlh;
struct unix_diag_msg *rep;
@@ -166,7 +168,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
- sk_diag_dump_uid(sk, skb))
+ sk_diag_dump_uid(sk, skb, user_ns))
goto out_nlmsg_trim;
nlmsg_end(skb, nlh);
@@ -178,7 +180,8 @@ out_nlmsg_trim:
}
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
- u32 portid, u32 seq, u32 flags)
+ struct user_namespace *user_ns,
+ u32 portid, u32 seq, u32 flags)
{
int sk_ino;
@@ -189,7 +192,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
if (!sk_ino)
return 0;
- return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
+ return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
}
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -217,7 +220,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto next;
if (!(req->udiag_states & (1 << sk->sk_state)))
goto next;
- if (sk_diag_dump(sk, skb, req,
+ if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI) < 0)
@@ -285,7 +288,8 @@ again:
if (!rep)
goto out;
- err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
+ err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
+ NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0, req->udiag_ino);
if (err < 0) {
nlmsg_free(rep);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index d45d5366115a..9121a4d5436d 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -198,12 +198,13 @@ void wait_for_unix_gc(void)
if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
!READ_ONCE(gc_in_progress))
unix_gc();
- wait_event(unix_gc_wait, gc_in_progress == false);
+ wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress));
}
/* The external entry point: unix_gc() */
void unix_gc(void)
{
+ struct sk_buff *next_skb, *skb;
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
@@ -297,11 +298,30 @@ void unix_gc(void)
spin_unlock(&unix_gc_lock);
+ /* We need io_uring to clean its registered files, ignore all io_uring
+ * originated skbs. It's fine as io_uring doesn't keep references to
+ * other io_uring instances and so killing all other files in the cycle
+ * will put all io_uring references forcing it to go through normal
+ * release.path eventually putting registered files.
+ */
+ skb_queue_walk_safe(&hitlist, skb, next_skb) {
+ if (skb->scm_io_uring) {
+ __skb_unlink(skb, &hitlist);
+ skb_queue_tail(&skb->sk->sk_receive_queue, skb);
+ }
+ }
+
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
+ /* There could be io_uring registered files, just push them back to
+ * the inflight list
+ */
+ list_for_each_entry_safe(u, next, &gc_candidates, link)
+ list_move_tail(&u->link, &gc_inflight_list);
+
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
diff --git a/net/unix/scm.c b/net/unix/scm.c
index ce700b22ecce..51b623de3be5 100644
--- a/net/unix/scm.c
+++ b/net/unix/scm.c
@@ -33,10 +33,8 @@ struct sock *unix_get_socket(struct file *filp)
/* PF_UNIX ? */
if (s && sock->ops && sock->ops->family == PF_UNIX)
u_sock = s;
- } else {
- /* Could be an io_uring instance */
- u_sock = io_uring_get_socket(filp);
}
+
return u_sock;
}
EXPORT_SYMBOL(unix_get_socket);
@@ -62,7 +60,7 @@ void unix_inflight(struct user_struct *user, struct file *fp)
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
}
- user->unix_inflight++;
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
spin_unlock(&unix_gc_lock);
}
@@ -83,7 +81,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
}
- user->unix_inflight--;
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
spin_unlock(&unix_gc_lock);
}
@@ -97,7 +95,7 @@ static inline bool too_many_unix_fds(struct task_struct *p)
{
struct user_struct *user = current_user();
- if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
+ if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
return false;
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index d60d7caacbf5..4cd65a1a07f9 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1110,6 +1110,7 @@ static void vsock_connect_timeout(struct work_struct *work)
if (sk->sk_state == TCP_SYN_SENT &&
(sk->sk_shutdown != SHUTDOWN_MASK)) {
sk->sk_state = TCP_CLOSE;
+ sk->sk_socket->state = SS_UNCONNECTED;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
vsock_transport_cancel_pkt(vsk);
@@ -1207,7 +1208,14 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
* timeout fires.
*/
sock_hold(sk);
- schedule_delayed_work(&vsk->connect_work, timeout);
+
+ /* If the timeout function is already scheduled,
+ * reschedule it, then ungrab the socket refcount to
+ * keep it balanced.
+ */
+ if (mod_delayed_work(system_wq, &vsk->connect_work,
+ timeout))
+ sock_put(sk);
/* Skip ahead to preserve error code set above. */
goto out_wait;
@@ -1224,7 +1232,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
vsock_transport_cancel_pkt(vsk);
vsock_remove_connected(vsk);
goto out_wait;
- } else if (timeout == 0) {
+ } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
err = -ETIMEDOUT;
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index dde16a033a09..434c5608a75d 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -276,6 +276,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
struct virtio_vsock_pkt *pkt;
size_t bytes, total = 0;
u32 free_space;
+ u32 fwd_cnt_delta;
+ bool low_rx_bytes;
int err = -EFAULT;
spin_lock_bh(&vvs->rx_lock);
@@ -307,7 +309,10 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
}
}
- free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
+ fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt;
+ free_space = vvs->buf_alloc - fwd_cnt_delta;
+ low_rx_bytes = (vvs->rx_bytes <
+ sock_rcvlowat(sk_vsock(vsk), 0, INT_MAX));
spin_unlock_bh(&vvs->rx_lock);
@@ -317,9 +322,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
* too high causes extra messages. Too low causes transmitter
* stalls. As stalls are in theory more expensive than extra
* messages, we set the limit to a high value. TODO: experiment
- * with different values.
+ * with different values. Also send credit update message when
+ * number of bytes in rx queue is not enough to wake up reader.
*/
- if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
+ if (fwd_cnt_delta &&
+ (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || low_rx_bytes)) {
virtio_transport_send_credit_update(vsk,
VIRTIO_VSOCK_TYPE_STREAM,
NULL);
@@ -372,7 +379,7 @@ static s64 virtio_transport_has_space(struct vsock_sock *vsk)
struct virtio_vsock_sock *vvs = vsk->trans;
s64 bytes;
- bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
if (bytes < 0)
bytes = 0;
@@ -1146,7 +1153,7 @@ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
{
- kfree(pkt->buf);
+ kvfree(pkt->buf);
kfree(pkt);
}
EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index aaabcd84268a..85488e19dffc 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1725,7 +1725,11 @@ static int vmci_transport_dgram_enqueue(
if (!dg)
return -ENOMEM;
- memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
+ err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
+ if (err) {
+ kfree(dg);
+ return err;
+ }
dg->dst = vmci_make_handle(remote_addr->svm_cid,
remote_addr->svm_port);
diff --git a/net/wireless/certs/wens.hex b/net/wireless/certs/wens.hex
new file mode 100644
index 000000000000..0d50369bede9
--- /dev/null
+++ b/net/wireless/certs/wens.hex
@@ -0,0 +1,87 @@
+/* Chen-Yu Tsai's regdb certificate */
+0x30, 0x82, 0x02, 0xa7, 0x30, 0x82, 0x01, 0x8f,
+0x02, 0x14, 0x61, 0xc0, 0x38, 0x65, 0x1a, 0xab,
+0xdc, 0xf9, 0x4b, 0xd0, 0xac, 0x7f, 0xf0, 0x6c,
+0x72, 0x48, 0xdb, 0x18, 0xc6, 0x00, 0x30, 0x0d,
+0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
+0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x0f, 0x31,
+0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x03,
+0x0c, 0x04, 0x77, 0x65, 0x6e, 0x73, 0x30, 0x20,
+0x17, 0x0d, 0x32, 0x33, 0x31, 0x32, 0x30, 0x31,
+0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a, 0x18,
+0x0f, 0x32, 0x31, 0x32, 0x33, 0x31, 0x31, 0x30,
+0x37, 0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a,
+0x30, 0x0f, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03,
+0x55, 0x04, 0x03, 0x0c, 0x04, 0x77, 0x65, 0x6e,
+0x73, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06,
+0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
+0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f,
+0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01,
+0x01, 0x00, 0xa9, 0x7a, 0x2c, 0x78, 0x4d, 0xa7,
+0x19, 0x2d, 0x32, 0x52, 0xa0, 0x2e, 0x6c, 0xef,
+0x88, 0x7f, 0x15, 0xc5, 0xb6, 0x69, 0x54, 0x16,
+0x43, 0x14, 0x79, 0x53, 0xb7, 0xae, 0x88, 0xfe,
+0xc0, 0xb7, 0x5d, 0x47, 0x8e, 0x1a, 0xe1, 0xef,
+0xb3, 0x90, 0x86, 0xda, 0xd3, 0x64, 0x81, 0x1f,
+0xce, 0x5d, 0x9e, 0x4b, 0x6e, 0x58, 0x02, 0x3e,
+0xb2, 0x6f, 0x5e, 0x42, 0x47, 0x41, 0xf4, 0x2c,
+0xb8, 0xa8, 0xd4, 0xaa, 0xc0, 0x0e, 0xe6, 0x48,
+0xf0, 0xa8, 0xce, 0xcb, 0x08, 0xae, 0x37, 0xaf,
+0xf6, 0x40, 0x39, 0xcb, 0x55, 0x6f, 0x5b, 0x4f,
+0x85, 0x34, 0xe6, 0x69, 0x10, 0x50, 0x72, 0x5e,
+0x4e, 0x9d, 0x4c, 0xba, 0x38, 0x36, 0x0d, 0xce,
+0x73, 0x38, 0xd7, 0x27, 0x02, 0x2a, 0x79, 0x03,
+0xe1, 0xac, 0xcf, 0xb0, 0x27, 0x85, 0x86, 0x93,
+0x17, 0xab, 0xec, 0x42, 0x77, 0x37, 0x65, 0x8a,
+0x44, 0xcb, 0xd6, 0x42, 0x93, 0x92, 0x13, 0xe3,
+0x39, 0x45, 0xc5, 0x6e, 0x00, 0x4a, 0x7f, 0xcb,
+0x42, 0x17, 0x2b, 0x25, 0x8c, 0xb8, 0x17, 0x3b,
+0x15, 0x36, 0x59, 0xde, 0x42, 0xce, 0x21, 0xe6,
+0xb6, 0xc7, 0x6e, 0x5e, 0x26, 0x1f, 0xf7, 0x8a,
+0x57, 0x9e, 0xa5, 0x96, 0x72, 0xb7, 0x02, 0x32,
+0xeb, 0x07, 0x2b, 0x73, 0xe2, 0x4f, 0x66, 0x58,
+0x9a, 0xeb, 0x0f, 0x07, 0xb6, 0xab, 0x50, 0x8b,
+0xc3, 0x8f, 0x17, 0xfa, 0x0a, 0x99, 0xc2, 0x16,
+0x25, 0xbf, 0x2d, 0x6b, 0x1a, 0xaa, 0xe6, 0x3e,
+0x5f, 0xeb, 0x6d, 0x9b, 0x5d, 0x4d, 0x42, 0x83,
+0x2d, 0x39, 0xb8, 0xc9, 0xac, 0xdb, 0x3a, 0x91,
+0x50, 0xdf, 0xbb, 0xb1, 0x76, 0x6d, 0x15, 0x73,
+0xfd, 0xc6, 0xe6, 0x6b, 0x71, 0x9e, 0x67, 0x36,
+0x22, 0x83, 0x79, 0xb1, 0xd6, 0xb8, 0x84, 0x52,
+0xaf, 0x96, 0x5b, 0xc3, 0x63, 0x02, 0x4e, 0x78,
+0x70, 0x57, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30,
+0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7,
+0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82,
+0x01, 0x01, 0x00, 0x24, 0x28, 0xee, 0x22, 0x74,
+0x7f, 0x7c, 0xfa, 0x6c, 0x1f, 0xb3, 0x18, 0xd1,
+0xc2, 0x3d, 0x7d, 0x29, 0x42, 0x88, 0xad, 0x82,
+0xa5, 0xb1, 0x8a, 0x05, 0xd0, 0xec, 0x5c, 0x91,
+0x20, 0xf6, 0x82, 0xfd, 0xd5, 0x67, 0x60, 0x5f,
+0x31, 0xf5, 0xbd, 0x88, 0x91, 0x70, 0xbd, 0xb8,
+0xb9, 0x8c, 0x88, 0xfe, 0x53, 0xc9, 0x54, 0x9b,
+0x43, 0xc4, 0x7a, 0x43, 0x74, 0x6b, 0xdd, 0xb0,
+0xb1, 0x3b, 0x33, 0x45, 0x46, 0x78, 0xa3, 0x1c,
+0xef, 0x54, 0x68, 0xf7, 0x85, 0x9c, 0xe4, 0x51,
+0x6f, 0x06, 0xaf, 0x81, 0xdb, 0x2a, 0x7b, 0x7b,
+0x6f, 0xa8, 0x9c, 0x67, 0xd8, 0xcb, 0xc9, 0x91,
+0x40, 0x00, 0xae, 0xd9, 0xa1, 0x9f, 0xdd, 0xa6,
+0x43, 0x0e, 0x28, 0x7b, 0xaa, 0x1b, 0xe9, 0x84,
+0xdb, 0x76, 0x64, 0x42, 0x70, 0xc9, 0xc0, 0xeb,
+0xae, 0x84, 0x11, 0x16, 0x68, 0x4e, 0x84, 0x9e,
+0x7e, 0x92, 0x36, 0xee, 0x1c, 0x3b, 0x08, 0x63,
+0xeb, 0x79, 0x84, 0x15, 0x08, 0x9d, 0xaf, 0xc8,
+0x9a, 0xc7, 0x34, 0xd3, 0x94, 0x4b, 0xd1, 0x28,
+0x97, 0xbe, 0xd1, 0x45, 0x75, 0xdc, 0x35, 0x62,
+0xac, 0x1d, 0x1f, 0xb7, 0xb7, 0x15, 0x87, 0xc8,
+0x98, 0xc0, 0x24, 0x31, 0x56, 0x8d, 0xed, 0xdb,
+0x06, 0xc6, 0x46, 0xbf, 0x4b, 0x6d, 0xa6, 0xd5,
+0xab, 0xcc, 0x60, 0xfc, 0xe5, 0x37, 0xb6, 0x53,
+0x7d, 0x58, 0x95, 0xa9, 0x56, 0xc7, 0xf7, 0xee,
+0xc3, 0xa0, 0x76, 0xf7, 0x65, 0x4d, 0x53, 0xfa,
+0xff, 0x5f, 0x76, 0x33, 0x5a, 0x08, 0xfa, 0x86,
+0x92, 0x5a, 0x13, 0xfa, 0x1a, 0xfc, 0xf2, 0x1b,
+0x8c, 0x7f, 0x42, 0x6d, 0xb7, 0x7e, 0xb7, 0xb4,
+0xf0, 0xc7, 0x83, 0xbb, 0xa2, 0x81, 0x03, 0x2d,
+0xd4, 0x2a, 0x63, 0x3f, 0xf7, 0x31, 0x2e, 0x40,
+0x33, 0x5c, 0x46, 0xbc, 0x9b, 0xc1, 0x05, 0xa5,
+0x45, 0x4e, 0xc3,
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 76b845f68ac8..d80b06d66959 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -65,9 +65,10 @@ static ssize_t ht40allow_map_read(struct file *file,
{
struct wiphy *wiphy = file->private_data;
char *buf;
- unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
+ unsigned int offset = 0, buf_size = PAGE_SIZE, i;
enum nl80211_band band;
struct ieee80211_supported_band *sband;
+ ssize_t r;
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 8459f5b6002e..c698fc458f5f 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3350,6 +3350,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
if_idx++;
}
+ if_start = 0;
wp_idx++;
}
out:
@@ -3526,6 +3527,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (ntype != NL80211_IFTYPE_MESH_POINT)
return -EINVAL;
+ if (otype != NL80211_IFTYPE_MESH_POINT)
+ return -EINVAL;
if (netif_running(dev))
return -EBUSY;
@@ -6914,7 +6917,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct mesh_config cfg;
+ struct mesh_config cfg = {};
u32 mask;
int err;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 74caece77963..1f5ea82b58bf 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1060,6 +1060,8 @@ static void regdb_fw_cb(const struct firmware *fw, void *context)
static int query_regdb_file(const char *alpha2)
{
+ int err;
+
ASSERT_RTNL();
if (regdb)
@@ -1069,9 +1071,13 @@ static int query_regdb_file(const char *alpha2)
if (!alpha2)
return -ENOMEM;
- return request_firmware_nowait(THIS_MODULE, true, "regulatory.db",
- &reg_pdev->dev, GFP_KERNEL,
- (void *)alpha2, regdb_fw_cb);
+ err = request_firmware_nowait(THIS_MODULE, true, "regulatory.db",
+ &reg_pdev->dev, GFP_KERNEL,
+ (void *)alpha2, regdb_fw_cb);
+ if (err)
+ kfree(alpha2);
+
+ return err;
}
int reg_reload_regdb(void)
@@ -3964,8 +3970,10 @@ static int __init regulatory_init_db(void)
return -EINVAL;
err = load_builtin_regdb_keys();
- if (err)
+ if (err) {
+ platform_device_unregister(reg_pdev);
return err;
+ }
/* We always try to get an update for the static regdomain */
err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 6bb9437af28b..a1c53d4b6711 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -104,18 +104,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev,
lockdep_assert_held(&rdev->bss_lock);
bss->refcount++;
- if (bss->pub.hidden_beacon_bss) {
- bss = container_of(bss->pub.hidden_beacon_bss,
- struct cfg80211_internal_bss,
- pub);
- bss->refcount++;
- }
- if (bss->pub.transmitted_bss) {
- bss = container_of(bss->pub.transmitted_bss,
- struct cfg80211_internal_bss,
- pub);
- bss->refcount++;
- }
+
+ if (bss->pub.hidden_beacon_bss)
+ bss_from_pub(bss->pub.hidden_beacon_bss)->refcount++;
+
+ if (bss->pub.transmitted_bss)
+ bss_from_pub(bss->pub.transmitted_bss)->refcount++;
}
static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
@@ -229,114 +223,152 @@ bool cfg80211_is_element_inherited(const struct element *elem,
}
EXPORT_SYMBOL(cfg80211_is_element_inherited);
-static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
- const u8 *subelement, size_t subie_len,
- u8 *new_ie, gfp_t gfp)
+static size_t cfg80211_copy_elem_with_frags(const struct element *elem,
+ const u8 *ie, size_t ie_len,
+ u8 **pos, u8 *buf, size_t buf_len)
{
- u8 *pos, *tmp;
- const u8 *tmp_old, *tmp_new;
- const struct element *non_inherit_elem;
- u8 *sub_copy;
+ if (WARN_ON((u8 *)elem < ie || elem->data > ie + ie_len ||
+ elem->data + elem->datalen > ie + ie_len))
+ return 0;
- /* copy subelement as we need to change its content to
- * mark an ie after it is processed.
- */
- sub_copy = kmemdup(subelement, subie_len, gfp);
- if (!sub_copy)
+ if (elem->datalen + 2 > buf + buf_len - *pos)
return 0;
- pos = &new_ie[0];
+ memcpy(*pos, elem, elem->datalen + 2);
+ *pos += elem->datalen + 2;
- /* set new ssid */
- tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len);
- if (tmp_new) {
- memcpy(pos, tmp_new, tmp_new[1] + 2);
- pos += (tmp_new[1] + 2);
+ /* Finish if it is not fragmented */
+ if (elem->datalen != 255)
+ return *pos - buf;
+
+ ie_len = ie + ie_len - elem->data - elem->datalen;
+ ie = (const u8 *)elem->data + elem->datalen;
+
+ for_each_element(elem, ie, ie_len) {
+ if (elem->id != WLAN_EID_FRAGMENT)
+ break;
+
+ if (elem->datalen + 2 > buf + buf_len - *pos)
+ return 0;
+
+ memcpy(*pos, elem, elem->datalen + 2);
+ *pos += elem->datalen + 2;
+
+ if (elem->datalen != 255)
+ break;
}
- /* get non inheritance list if exists */
- non_inherit_elem =
- cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- sub_copy, subie_len);
+ return *pos - buf;
+}
- /* go through IEs in ie (skip SSID) and subelement,
- * merge them into new_ie
+static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
+ const u8 *subie, size_t subie_len,
+ u8 *new_ie, size_t new_ie_len)
+{
+ const struct element *non_inherit_elem, *parent, *sub;
+ u8 *pos = new_ie;
+ u8 id, ext_id;
+ unsigned int match_len;
+
+ non_inherit_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ subie, subie_len);
+
+ /* We copy the elements one by one from the parent to the generated
+ * elements.
+ * If they are not inherited (included in subie or in the non
+ * inheritance element), then we copy all occurrences the first time
+ * we see this element type.
*/
- tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
- tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie;
-
- while (tmp_old + tmp_old[1] + 2 - ie <= ielen) {
- if (tmp_old[0] == 0) {
- tmp_old++;
+ for_each_element(parent, ie, ielen) {
+ if (parent->id == WLAN_EID_FRAGMENT)
continue;
+
+ if (parent->id == WLAN_EID_EXTENSION) {
+ if (parent->datalen < 1)
+ continue;
+
+ id = WLAN_EID_EXTENSION;
+ ext_id = parent->data[0];
+ match_len = 1;
+ } else {
+ id = parent->id;
+ match_len = 0;
}
- if (tmp_old[0] == WLAN_EID_EXTENSION)
- tmp = (u8 *)cfg80211_find_ext_ie(tmp_old[2], sub_copy,
- subie_len);
- else
- tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy,
- subie_len);
+ /* Find first occurrence in subie */
+ sub = cfg80211_find_elem_match(id, subie, subie_len,
+ &ext_id, match_len, 0);
- if (!tmp) {
- const struct element *old_elem = (void *)tmp_old;
+ /* Copy from parent if not in subie and inherited */
+ if (!sub &&
+ cfg80211_is_element_inherited(parent, non_inherit_elem)) {
+ if (!cfg80211_copy_elem_with_frags(parent,
+ ie, ielen,
+ &pos, new_ie,
+ new_ie_len))
+ return 0;
- /* ie in old ie but not in subelement */
- if (cfg80211_is_element_inherited(old_elem,
- non_inherit_elem)) {
- memcpy(pos, tmp_old, tmp_old[1] + 2);
- pos += tmp_old[1] + 2;
- }
- } else {
- /* ie in transmitting ie also in subelement,
- * copy from subelement and flag the ie in subelement
- * as copied (by setting eid field to WLAN_EID_SSID,
- * which is skipped anyway).
- * For vendor ie, compare OUI + type + subType to
- * determine if they are the same ie.
- */
- if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) {
- if (!memcmp(tmp_old + 2, tmp + 2, 5)) {
- /* same vendor ie, copy from
- * subelement
- */
- memcpy(pos, tmp, tmp[1] + 2);
- pos += tmp[1] + 2;
- tmp[0] = WLAN_EID_SSID;
- } else {
- memcpy(pos, tmp_old, tmp_old[1] + 2);
- pos += tmp_old[1] + 2;
- }
- } else {
- /* copy ie from subelement into new ie */
- memcpy(pos, tmp, tmp[1] + 2);
- pos += tmp[1] + 2;
- tmp[0] = WLAN_EID_SSID;
- }
+ continue;
}
- if (tmp_old + tmp_old[1] + 2 - ie == ielen)
- break;
+ /* Already copied if an earlier element had the same type */
+ if (cfg80211_find_elem_match(id, ie, (u8 *)parent - ie,
+ &ext_id, match_len, 0))
+ continue;
- tmp_old += tmp_old[1] + 2;
+ /* Not inheriting, copy all similar elements from subie */
+ while (sub) {
+ if (!cfg80211_copy_elem_with_frags(sub,
+ subie, subie_len,
+ &pos, new_ie,
+ new_ie_len))
+ return 0;
+
+ sub = cfg80211_find_elem_match(id,
+ sub->data + sub->datalen,
+ subie_len + subie -
+ (sub->data +
+ sub->datalen),
+ &ext_id, match_len, 0);
+ }
}
- /* go through subelement again to check if there is any ie not
- * copied to new ie, skip ssid, capability, bssid-index ie
+ /* The above misses elements that are included in subie but not in the
+ * parent, so do a pass over subie and append those.
+ * Skip the non-tx BSSID caps and non-inheritance element.
*/
- tmp_new = sub_copy;
- while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
- if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP ||
- tmp_new[0] == WLAN_EID_SSID)) {
- memcpy(pos, tmp_new, tmp_new[1] + 2);
- pos += tmp_new[1] + 2;
+ for_each_element(sub, subie, subie_len) {
+ if (sub->id == WLAN_EID_NON_TX_BSSID_CAP)
+ continue;
+
+ if (sub->id == WLAN_EID_FRAGMENT)
+ continue;
+
+ if (sub->id == WLAN_EID_EXTENSION) {
+ if (sub->datalen < 1)
+ continue;
+
+ id = WLAN_EID_EXTENSION;
+ ext_id = sub->data[0];
+ match_len = 1;
+
+ if (ext_id == WLAN_EID_EXT_NON_INHERITANCE)
+ continue;
+ } else {
+ id = sub->id;
+ match_len = 0;
}
- if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len)
- break;
- tmp_new += tmp_new[1] + 2;
+
+ /* Processed if one was included in the parent */
+ if (cfg80211_find_elem_match(id, ie, ielen,
+ &ext_id, match_len, 0))
+ continue;
+
+ if (!cfg80211_copy_elem_with_frags(sub, subie, subie_len,
+ &pos, new_ie, new_ie_len))
+ return 0;
}
- kfree(sub_copy);
return pos - new_ie;
}
@@ -390,6 +422,15 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
rcu_read_unlock();
+ /*
+ * This is a bit weird - it's not on the list, but already on another
+ * one! The only way that could happen is if there's some BSSID/SSID
+ * shared by multiple APs in their multi-BSSID profiles, potentially
+ * with hidden SSID mixed in ... ignore it.
+ */
+ if (!list_empty(&nontrans_bss->nontrans_list))
+ return -EINVAL;
+
/* add to the list */
list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
return 0;
@@ -1094,6 +1135,23 @@ struct cfg80211_non_tx_bss {
u8 bssid_index;
};
+static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known,
+ const struct cfg80211_bss_ies *new_ies,
+ const struct cfg80211_bss_ies *old_ies)
+{
+ struct cfg80211_internal_bss *bss;
+
+ /* Assign beacon IEs to all sub entries */
+ list_for_each_entry(bss, &known->hidden_list, hidden_list) {
+ const struct cfg80211_bss_ies *ies;
+
+ ies = rcu_access_pointer(bss->pub.beacon_ies);
+ WARN_ON(ies != old_ies);
+
+ rcu_assign_pointer(bss->pub.beacon_ies, new_ies);
+ }
+}
+
static bool
cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *known,
@@ -1117,7 +1175,6 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
} else if (rcu_access_pointer(new->pub.beacon_ies)) {
const struct cfg80211_bss_ies *old;
- struct cfg80211_internal_bss *bss;
if (known->pub.hidden_beacon_bss &&
!list_empty(&known->hidden_list)) {
@@ -1145,16 +1202,9 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
if (old == rcu_access_pointer(known->pub.ies))
rcu_assign_pointer(known->pub.ies, new->pub.beacon_ies);
- /* Assign beacon IEs to all sub entries */
- list_for_each_entry(bss, &known->hidden_list, hidden_list) {
- const struct cfg80211_bss_ies *ies;
-
- ies = rcu_access_pointer(bss->pub.beacon_ies);
- WARN_ON(ies != old);
-
- rcu_assign_pointer(bss->pub.beacon_ies,
- new->pub.beacon_ies);
- }
+ cfg80211_update_hidden_bsses(known,
+ rcu_access_pointer(new->pub.beacon_ies),
+ old);
if (old)
kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
@@ -1231,6 +1281,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
new->refcount = 1;
INIT_LIST_HEAD(&new->hidden_list);
INIT_LIST_HEAD(&new->pub.nontrans_list);
+ /* we'll set this later if it was non-NULL */
+ new->pub.transmitted_bss = NULL;
if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
@@ -1242,8 +1294,12 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
list_add(&new->hidden_list,
&hidden->hidden_list);
hidden->refcount++;
+
+ ies = (void *)rcu_access_pointer(new->pub.beacon_ies);
rcu_assign_pointer(new->pub.beacon_ies,
hidden->pub.beacon_ies);
+ if (ies)
+ kfree_rcu(ies, rcu_head);
}
} else {
/*
@@ -1460,10 +1516,15 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
spin_lock_bh(&rdev->bss_lock);
if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
&res->pub)) {
- if (__cfg80211_unlink_bss(rdev, res))
+ if (__cfg80211_unlink_bss(rdev, res)) {
rdev->bss_generation++;
+ res = NULL;
+ }
}
spin_unlock_bh(&rdev->bss_lock);
+
+ if (!res)
+ return NULL;
}
trace_cfg80211_return_bss(&res->pub);
@@ -1582,6 +1643,8 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) {
if (elem->datalen < 4)
continue;
+ if (elem->data[0] < 1 || (int)elem->data[0] > 8)
+ continue;
for_each_element(sub, elem->data + 1, elem->datalen - 1) {
u8 profile_len;
@@ -1635,7 +1698,7 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
new_ie_len = cfg80211_gen_new_ie(ie, ielen,
profile,
profile_len, new_ie,
- gfp);
+ IEEE80211_MAX_DATA_LEN);
if (!new_ie_len)
continue;
@@ -1717,7 +1780,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
size_t new_ie_len;
struct cfg80211_bss_ies *new_ies;
const struct cfg80211_bss_ies *old;
- u8 cpy_len;
+ size_t cpy_len;
lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
@@ -1784,6 +1847,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
} else {
old = rcu_access_pointer(nontrans_bss->beacon_ies);
rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies);
+ cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss),
+ new_ies, old);
rcu_assign_pointer(nontrans_bss->ies, new_ies);
if (old)
kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 63f89687a018..a260cd60a7b9 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -269,6 +269,15 @@ void cfg80211_conn_work(struct work_struct *work)
rtnl_unlock();
}
+static void cfg80211_step_auth_next(struct cfg80211_conn *conn,
+ struct cfg80211_bss *bss)
+{
+ memcpy(conn->bssid, bss->bssid, ETH_ALEN);
+ conn->params.bssid = conn->bssid;
+ conn->params.channel = bss->channel;
+ conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+}
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
{
@@ -286,10 +295,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
if (!bss)
return NULL;
- memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN);
- wdev->conn->params.bssid = wdev->conn->bssid;
- wdev->conn->params.channel = bss->channel;
- wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+ cfg80211_step_auth_next(wdev->conn, bss);
schedule_work(&rdev->conn_work);
return bss;
@@ -568,7 +574,12 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
wdev->conn->params.ssid_len = wdev->ssid_len;
/* see if we have the bss already */
- bss = cfg80211_get_conn_bss(wdev);
+ bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
+ wdev->conn->params.bssid,
+ wdev->conn->params.ssid,
+ wdev->conn->params.ssid_len,
+ wdev->conn_bss_type,
+ IEEE80211_PRIVACY(wdev->conn->params.privacy));
if (prev_bssid) {
memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
@@ -579,6 +590,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
if (bss) {
enum nl80211_timeout_reason treason;
+ cfg80211_step_auth_next(wdev->conn, bss);
err = cfg80211_conn_do_work(wdev, &treason);
cfg80211_put_bss(wdev->wiphy, bss);
} else {
@@ -1233,6 +1245,13 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
} else {
if (WARN_ON(connkeys))
return -EINVAL;
+
+ /* connect can point to wdev->wext.connect which
+ * can hold key data from a previous connection
+ */
+ connect->key = NULL;
+ connect->key_len = 0;
+ connect->key_idx = 0;
}
wdev->connect_keys = connkeys;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 76a80a41615b..a57f54bc0e1a 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -796,6 +796,12 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
}
}
+ /* Sanity-check to ensure we never end up _allocating_ zero
+ * bytes of data for extra.
+ */
+ if (extra_size <= 0)
+ return -EFAULT;
+
/* kzalloc() ensures NULL-termination for essid_compat. */
extra = kzalloc(extra_size, GFP_KERNEL);
if (!extra)
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index c94aa587e0c9..851096110b4d 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -470,12 +470,12 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
goto out;
- len = min_t(unsigned int, len, sizeof(int));
-
rc = -EINVAL;
if (len < 0)
goto out;
+ len = min_t(unsigned int, len, sizeof(int));
+
rc = -EFAULT;
if (put_user(len, optlen))
goto out;
@@ -492,6 +492,12 @@ static int x25_listen(struct socket *sock, int backlog)
int rc = -EOPNOTSUPP;
lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED) {
+ rc = -EINVAL;
+ release_sock(sk);
+ return rc;
+ }
+
if (sk->sk_state != TCP_LISTEN) {
memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 25bf72ee6cad..226397add422 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -117,7 +117,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
if (!pskb_may_pull(skb, 1)) {
x25_neigh_put(nb);
- return 0;
+ goto drop;
}
switch (skb->data[0]) {
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 2bc0d6e3e124..d04a2345bc3f 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -613,6 +613,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
struct net_device *dev;
+ int bound_dev_if;
u32 flags, qid;
int err = 0;
@@ -626,6 +627,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
XDP_USE_NEED_WAKEUP))
return -EINVAL;
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
+ return -EINVAL;
+
rtnl_lock();
mutex_lock(&xs->mutex);
if (xs->state != XSK_READY) {
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index fbc4552d17b8..6e5e307f985e 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -3,6 +3,8 @@
# Makefile for the XFRM subsystem.
#
+xfrm_interface-$(CONFIG_XFRM_INTERFACE) += xfrm_interface_core.o
+
obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
xfrm_input.o xfrm_output.o \
xfrm_sysctl.o xfrm_replay.o xfrm_device.o
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index e120df0a6da1..4d8d7cf3d199 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -274,8 +274,7 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
goto out;
if (x->props.flags & XFRM_STATE_DECAP_DSCP)
- ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
- ipipv6_hdr(skb));
+ ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb));
if (!(x->props.flags & XFRM_STATE_NOECN))
ipip6_ecn_decapsulate(skb);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface_core.c
index 4cfa79e04e3d..82c0c0575074 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -219,8 +219,8 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
skb->dev = dev;
if (err) {
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_dropped);
return 0;
}
@@ -260,7 +260,6 @@ static int
xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
unsigned int length = skb->len;
struct net_device *tdev;
@@ -286,7 +285,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
tdev = dst->dev;
if (tdev == dev) {
- stats->collisions++;
+ DEV_STATS_INC(dev, collisions);
net_warn_ratelimited("%s: Local routing loop detected!\n",
dev->name);
goto tx_err_dst_release;
@@ -329,13 +328,13 @@ xmit:
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
} else {
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
}
return 0;
tx_err_link_failure:
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_link_failure(skb);
tx_err_dst_release:
dst_release(dst);
@@ -345,7 +344,6 @@ tx_err_dst_release:
static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
struct flowi fl;
int ret;
@@ -354,23 +352,23 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->protocol) {
case htons(ETH_P_IPV6):
- xfrm_decode_session(skb, &fl, AF_INET6);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ xfrm_decode_session(skb, &fl, AF_INET6);
if (!dst) {
fl.u.ip6.flowi6_oif = dev->ifindex;
fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
if (dst->error) {
dst_release(dst);
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_err;
}
skb_dst_set(skb, dst);
}
break;
case htons(ETH_P_IP):
- xfrm_decode_session(skb, &fl, AF_INET);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ xfrm_decode_session(skb, &fl, AF_INET);
if (!dst) {
struct rtable *rt;
@@ -378,7 +376,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
if (IS_ERR(rt)) {
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_err;
}
skb_dst_set(skb, &rt->dst);
@@ -397,8 +395,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
tx_err:
- stats->tx_errors++;
- stats->tx_dropped++;
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 4d422447aadc..4fca4b6cec8b 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -212,6 +212,7 @@ static void ipcomp_free_scratches(void)
vfree(*per_cpu_ptr(scratches, i));
free_percpu(scratches);
+ ipcomp_scratches = NULL;
}
static void * __percpu *ipcomp_alloc_scratches(void)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 28a8cdef8e51..9484f27e905a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1369,8 +1369,6 @@ EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
* of an absolute inpredictability of ordering of rules. This will not pass. */
static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
{
- static u32 idx_generator;
-
for (;;) {
struct hlist_head *list;
struct xfrm_policy *p;
@@ -1378,8 +1376,8 @@ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
int found;
if (!index) {
- idx = (idx_generator | dir);
- idx_generator += 8;
+ idx = (net->xfrm.idx_generator | dir);
+ net->xfrm.idx_generator += 8;
} else {
idx = index;
index = 0;
@@ -3223,7 +3221,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
static inline int
xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
- unsigned short family)
+ unsigned short family, u32 if_id)
{
if (xfrm_state_kern(x))
return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
@@ -3234,7 +3232,8 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
!(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
!(x->props.mode != XFRM_MODE_TRANSPORT &&
- xfrm_state_addr_cmp(tmpl, x, family));
+ xfrm_state_addr_cmp(tmpl, x, family)) &&
+ (if_id == 0 || if_id == x->if_id);
}
/*
@@ -3246,7 +3245,7 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
*/
static inline int
xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
- unsigned short family)
+ unsigned short family, u32 if_id)
{
int idx = start;
@@ -3256,7 +3255,7 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star
} else
start = -1;
for (; idx < sp->len; idx++) {
- if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
+ if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
return ++idx;
if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
if (start == -1)
@@ -3619,6 +3618,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
if (pols[1]) {
if (IS_ERR(pols[1])) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
+ xfrm_pol_put(pols[0]);
return 0;
}
pols[1]->curlft.use_time = ktime_get_real_seconds();
@@ -3665,7 +3665,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
* are implied between each two transformations.
*/
for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
- k = xfrm_policy_ok(tpp[i], sp, k, family);
+ k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
if (k < 0) {
if (k < -1)
/* "-2 - errored_index" returned */
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index bee1a8143d75..e8be18bff096 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2511,9 +2511,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
if (inner_mode == NULL)
goto error;
- if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL))
- goto error;
-
x->inner_mode = *inner_mode;
if (x->props.family == AF_INET)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index bd44a800e7db..3589c2ee3d6f 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -522,7 +522,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
- if (re) {
+ if (re && x->replay_esn && x->preplay_esn) {
struct xfrm_replay_state_esn *replay_esn;
replay_esn = nla_data(re);
memcpy(x->replay_esn, replay_esn,
@@ -1037,6 +1037,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
sizeof(*filter), GFP_KERNEL);
if (filter == NULL)
return -ENOMEM;
+
+ /* see addr_match(), (prefix length >> 5) << 2
+ * will be used to compare xfrm_address_t
+ */
+ if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
+ filter->dplen > (sizeof(xfrm_address_t) << 3)) {
+ kfree(filter);
+ return -EINVAL;
+ }
}
if (attrs[XFRMA_PROTO])
@@ -2574,7 +2583,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
[XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
- [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
+ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
[XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
[XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
[XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index e0fbab9bec83..6d6d4e4ea843 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -307,6 +307,7 @@ static int run_bpf_prog(char *prog, int cg_id)
fout = fopen(fname, "w");
fprintf(fout, "id:%d\n", cg_id);
fprintf(fout, "ERROR: Could not lookup queue_stats\n");
+ fclose(fout);
} else if (stats_flag && qstats.lastPacketTime >
qstats.firstPacketTime) {
long long delta_us = (qstats.lastPacketTime -
diff --git a/samples/bpf/tcp_basertt_kern.c b/samples/bpf/tcp_basertt_kern.c
index 9dba48c2b920..66dd58f78d52 100644
--- a/samples/bpf/tcp_basertt_kern.c
+++ b/samples/bpf/tcp_basertt_kern.c
@@ -47,7 +47,7 @@ int bpf_basertt(struct bpf_sock_ops *skops)
case BPF_SOCK_OPS_BASE_RTT:
n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION,
cong, sizeof(cong));
- if (!n && !__builtin_memcmp(cong, nv, sizeof(nv)+1)) {
+ if (!n && !__builtin_memcmp(cong, nv, sizeof(nv))) {
/* Set base_rtt to 80us */
rv = 80;
} else if (n) {
diff --git a/samples/v4l/v4l2-pci-skeleton.c b/samples/v4l/v4l2-pci-skeleton.c
index f6a551bd57ef..3fa6582b4a68 100644
--- a/samples/v4l/v4l2-pci-skeleton.c
+++ b/samples/v4l/v4l2-pci-skeleton.c
@@ -879,7 +879,7 @@ static int skeleton_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vdev->tvnorms = SKEL_TVNORMS;
video_set_drvdata(vdev, skel);
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto free_hdl;
diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
index a760e130bd0d..8ad1aa13ddd9 100644
--- a/samples/vfio-mdev/mdpy-fb.c
+++ b/samples/vfio-mdev/mdpy-fb.c
@@ -109,7 +109,7 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
ret = pci_request_regions(pdev, "mdpy-fb");
if (ret < 0)
- return ret;
+ goto err_disable_dev;
pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format);
pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET, &width);
@@ -191,6 +191,9 @@ err_release_fb:
err_release_regions:
pci_release_regions(pdev);
+err_disable_dev:
+ pci_disable_device(pdev);
+
return ret;
}
@@ -199,7 +202,10 @@ static void mdpy_fb_remove(struct pci_dev *pdev)
struct fb_info *info = pci_get_drvdata(pdev);
unregister_framebuffer(info);
+ iounmap(info->screen_base);
framebuffer_release(info);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
}
static struct pci_device_id mdpy_fb_pci_table[] = {
diff --git a/samples/xilinx_apm/Makefile b/samples/xilinx_apm/Makefile
new file mode 100644
index 000000000000..6182750c4479
--- /dev/null
+++ b/samples/xilinx_apm/Makefile
@@ -0,0 +1,71 @@
+#
+# 'make depend' uses makedepend to automatically generate dependencies
+# (dependencies are added to end of Makefile)
+# 'make' build executable file 'mycc'
+# 'make clean' removes all .o and executable files
+#
+
+# define the C compiler to use
+CC = $(CROSS_COMPILE)gcc
+
+# define any compile-time flags
+CFLAGS = -Wall -g
+
+# define any directories containing header files other than /usr/include
+#
+INCLUDES =
+
+# define library paths in addition to /usr/lib
+# if I wanted to include libraries not in /usr/lib I'd specify
+# their path using -Lpath, something like:
+LFLAGS =
+
+# define any libraries to link into executable:
+# if I want to link in libraries (libx.so or libx.a) I use the -llibname
+# option, something like (this will link in libmylib.so and libm.so:
+LIBS = -lm
+
+# define the C source files
+SRCS = main.c xaxipmon.c
+
+# define the C object files
+#
+# This uses Suffix Replacement within a macro:
+# $(name:string1=string2)
+# For each word in 'name' replace 'string1' with 'string2'
+# Below we are replacing the suffix .c of all words in the macro SRCS
+# with the .o suffix
+#
+OBJS = $(SRCS:.c=.o)
+
+# define the executable file
+MAIN = main
+
+#
+# The following part of the makefile is generic; it can be used to
+# build any executable just by changing the definitions above and by
+# deleting dependencies appended to the file from 'make depend'
+#
+
+.PHONY: depend clean
+
+all: $(MAIN)
+ @echo Xilinx AXI Performance Monitor application compiled
+
+$(MAIN): $(OBJS)
+ $(CC) $(CFLAGS) $(INCLUDES) -o $(MAIN) $(OBJS) $(LFLAGS) $(LIBS)
+
+# this is a suffix replacement rule for building .o's from .c's
+# it uses automatic variables $<: the name of the prerequisite of
+# the rule(a .c file) and $@: the name of the target of the rule (a .o file)
+# (see the gnu make manual section about automatic variables)
+.c.o:
+ $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
+
+clean:
+ $(RM) *.o *~ $(MAIN)
+
+depend: $(SRCS)
+ makedepend $(INCLUDES) $^
+
+# DO NOT DELETE THIS LINE -- make depend needs it
diff --git a/samples/xilinx_apm/main.c b/samples/xilinx_apm/main.c
new file mode 100644
index 000000000000..2a7eda4ab256
--- /dev/null
+++ b/samples/xilinx_apm/main.c
@@ -0,0 +1,134 @@
+/*
+ * Xilinx AXI Performance Monitor Example
+ *
+ * Copyright (c) 2013 Xilinx Inc.
+ *
+ * The code may be used by anyone for any purpose and can serve as a
+ * starting point for developing applications using Xilinx AXI
+ * Performance Monitor.
+ *
+ * This example based on Xilinx AXI Performance Monitor UIO driver shows
+ * sequence to read metrics from Xilinx AXI Performance Monitor IP.
+ * User need to provide the uio device file with option -d:
+ * main -d /dev/uio0, say /dev/uio0 as device file for AXI Performance
+ * Monitor driver. User need not clear Interrupt Status Register after
+ * waiting for interrupt on read since driver clears it.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <sys/msg.h>
+#include <sys/ipc.h>
+#include <stdint.h>
+#include "xaxipmon.h"
+
+#define MAP_SIZE 4096
+
+void usage(void)
+{
+ printf("*argv[0] -d <UIO_DEV_FILE> -i|-o <VALUE>\n");
+ printf(" -d UIO device file. e.g. /dev/uio0\n");
+ return;
+}
+
+static void start(int fd)
+{
+ u8 slot = 2;
+ int tmp;
+ u32 isr;
+
+ setmetrics(slot, XAPM_METRIC_SET_4, XAPM_METRIC_COUNTER_0);
+ setsampleinterval(0x3FFFFFF);
+
+ loadsic();
+
+ intrenable(XAPM_IXR_SIC_OVERFLOW_MASK);
+
+ intrglobalenable();
+
+ enablemetricscounter();
+
+ enablesic();
+
+ isr = intrgetstatus();
+ /* Wait for SIC overflow interrupt */
+ if (read(fd, &tmp, 4) < 0)
+ perror("Read\n");
+ /* Driver clears the interrupt and occured interrupt status is
+ stored in param->isr */
+ isr = intrgetstatus();
+ if (isr & XAPM_IXR_SIC_OVERFLOW_MASK)
+ disablesic();
+
+ disablemetricscounter();
+
+ intrdisable(XAPM_IXR_SIC_OVERFLOW_MASK);
+
+ intrglobaldisable();
+
+ printf("Required metrics: %u\n",
+ getsampledmetriccounter(XAPM_METRIC_COUNTER_0) *
+ params->scalefactor);
+}
+
+int main(int argc, char *argv[])
+{
+ int c;
+ char *uiod;
+ int fd;
+
+ while ((c = getopt(argc, argv, "d:h")) != -1) {
+ switch (c) {
+ case 'd':
+ uiod = optarg;
+ break;
+ case 'h':
+ usage();
+ return 0;
+ default:
+ printf("invalid option: %c\n", (char)c);
+ usage();
+ return -1;
+ }
+ }
+
+ /* Open the UIO device file */
+ fd = open(uiod, O_RDWR);
+ if (fd < 1) {
+ perror(argv[0]);
+ printf("Invalid UIO device file:%s.\n", uiod);
+ usage();
+ return -1;
+ }
+
+ baseaddr = (ulong)mmap(0, MAP_SIZE , PROT_READ|PROT_WRITE,
+ MAP_SHARED , fd, 0);
+ if ((u32 *)baseaddr == MAP_FAILED)
+ perror("mmap failed\n");
+
+ /* mmap the UIO device */
+ params = (struct xapm_param *)mmap(0, MAP_SIZE , PROT_READ|PROT_WRITE,
+ MAP_SHARED , fd, getpagesize());
+ if (params == MAP_FAILED)
+ perror("mmap failed\n");
+
+ if (params->mode == 1)
+ printf("AXI PMON is in Advanced Mode\n");
+ else if (params->mode == 2)
+ printf("AXI PMON is in Profile Mode\n");
+ else
+ printf("AXI PMON is in trace Mode\n");
+
+ start(fd);
+
+ close(fd);
+ munmap((u32 *)baseaddr, MAP_SIZE);
+ munmap(params, MAP_SIZE);
+
+ return 0;
+}
diff --git a/samples/xilinx_apm/xaxipmon.c b/samples/xilinx_apm/xaxipmon.c
new file mode 100644
index 000000000000..94a4e7511057
--- /dev/null
+++ b/samples/xilinx_apm/xaxipmon.c
@@ -0,0 +1,1269 @@
+#include "xaxipmon.h"
+/*****************************************************************************/
+/**
+*
+* This function resets all Metric Counters and Sampled Metric Counters of
+* AXI Performance Monitor.
+*
+* @return XST_SUCCESS
+*
+*
+* @note None.
+*
+******************************************************************************/
+int resetmetriccounter(void)
+{
+ u32 regval;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * Metric counters
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_MCNTR_RESET_MASK));
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_MCNTR_RESET_MASK)));
+ return XST_SUCCESS;
+
+}
+
+/*****************************************************************************/
+/**
+*
+* This function resets Global Clock Counter of AXI Performance Monitor
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void resetglobalclkcounter(void)
+{
+
+ u32 regval;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * Global Clock Counter
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_GCC_RESET_MASK));
+
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_GCC_RESET_MASK)));
+
+}
+
+/*****************************************************************************/
+/**
+*
+* This function resets Streaming FIFO of AXI Performance Monitor
+*
+* @return XST_SUCCESS
+*
+* @note None.
+*
+******************************************************************************/
+int resetfifo(void)
+{
+ u32 regval;
+
+ /* Check Event Logging is enabled in Hardware */
+ if (params->eventlog == 0)
+ /*Event Logging not enabled in Hardware*/
+ return XST_SUCCESS;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * FIFO
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_FIFO_RESET_MASK));
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_FIFO_RESET_MASK)));
+
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Ranges for Incrementers depending on parameters passed.
+*
+* @param incrementer specifies the Incrementer for which Ranges
+* need to be set
+* @param rangehigh specifies the Upper limit in 32 bit Register
+* @param rangelow specifies the Lower limit in 32 bit Register
+*
+* @return None.
+*
+* @note None
+*
+*****************************************************************************/
+void setincrementerrange(u8 incrementer, u16 rangehigh, u16 rangelow)
+{
+ u32 regval;
+
+ /*
+ * Write to the specified Range register
+ */
+ regval = rangehigh << 16;
+ regval |= rangelow;
+ writereg(baseaddr,
+ (XAPM_RANGE0_OFFSET + (incrementer * 16)), regval);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the Ranges of Incrementers Registers.
+*
+* @param incrementer specifies the Incrementer for which Ranges
+* need to be returned.
+* @param rangehigh specifies the user reference variable which returns
+* the Upper Range Value of the specified Incrementer.
+* @param rangelow specifies the user reference variable which returns
+* the Lower Range Value of the specified Incrementer.
+*
+* @return None.
+*
+* @note None
+*
+*****************************************************************************/
+void getincrementerrange(u8 incrementer, u16 *rangehigh, u16 *rangelow)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_RANGE0_OFFSET +
+ (incrementer * 16)));
+
+ *rangelow = regval & 0xFFFF;
+ *rangehigh = (regval >> 16) & 0xFFFF;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets the Sample Interval Register
+*
+* @param sampleinterval is the Sample Interval
+*
+* @return None
+*
+* @note None.
+*
+*****************************************************************************/
+void setsampleinterval(u32 sampleinterval)
+{
+ /*
+ * Set Sample Interval
+ */
+ writereg(baseaddr, XAPM_SI_LOW_OFFSET, sampleinterval);
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of Sample Interval Register
+*
+* @param sampleinterval is a pointer where Sample Interval register
+* contents are returned.
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void getsampleinterval(u32 *sampleinterval)
+{
+ /*
+ * Set Sample Interval Lower
+ */
+ *sampleinterval = readreg(baseaddr, XAPM_SI_LOW_OFFSET);
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets metrics for specified Counter in the corresponding
+* Metric Selector Register.
+*
+* @param slot is the slot ID for which specified counter has to
+* be connected.
+* @param metrics is one of the Metric Sets. User has to use
+* XAPM_METRIC_SET_* macros in xaxipmon.h for this parameter
+* @param counter is the Counter Number.
+* The valid values are 0 to 9.
+*
+* @return XST_SUCCESS if Success
+* XST_FAILURE if Failure
+*
+* @note None.
+*
+*****************************************************************************/
+int setmetrics(u8 slot, u8 metrics, u8 counter)
+{
+ u32 regval;
+ u32 mask;
+
+ /* Find mask value to force zero in counternum byte range */
+ if (counter == 0 || counter == 4 || counter == 8)
+ mask = 0xFFFFFF00;
+ else if (counter == 1 || counter == 5 || counter == 9)
+ mask = 0xFFFF00FF;
+ else if (counter == 2 || counter == 6)
+ mask = 0xFF00FFFF;
+ else
+ mask = 0x00FFFFFF;
+
+ if (counter <= 3) {
+ regval = readreg(baseaddr, XAPM_MSR0_OFFSET);
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR0_OFFSET, regval);
+ } else if ((counter >= 4) && (counter <= 7)) {
+ counter = counter - 4;
+ regval = readreg(baseaddr, XAPM_MSR1_OFFSET);
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR1_OFFSET, regval);
+ } else {
+ counter = counter - 8;
+ regval = readreg(baseaddr, XAPM_MSR2_OFFSET);
+
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR2_OFFSET, regval);
+ }
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns metrics in the specified Counter from the corresponding
+* Metric Selector Register.
+*
+* @param counter is the Counter Number.
+* The valid values are 0 to 9.
+* @param metrics is a reference parameter from application where metrics
+* of specified counter is filled.
+* @praram slot is a reference parameter in which slot Id of
+* specified counter is filled
+* @return XST_SUCCESS if Success
+* XST_FAILURE if Failure
+*
+* @note None.
+*
+*****************************************************************************/
+int getmetrics(u8 counter, u8 *metrics, u8 *slot)
+{
+ u32 regval;
+
+ if (counter <= 3) {
+ regval = readreg(baseaddr, XAPM_MSR0_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ } else if ((counter >= 4) && (counter <= 7)) {
+ counter = counter - 4;
+ regval = readreg(baseaddr, XAPM_MSR1_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ } else {
+ counter = counter - 8;
+ regval = readreg(baseaddr, XAPM_MSR2_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ }
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Global Clock Counter Register.
+*
+* @param cnthigh is the user space pointer with which upper 32 bits
+* of Global Clock Counter has to be filled
+* @param cntlow is the user space pointer with which lower 32 bits
+* of Global Clock Counter has to be filled
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void getglobalclkcounter(u32 *cnthigh, u32 *cntlow)
+{
+ *cnthigh = 0x0;
+ *cntlow = 0x0;
+
+ /*
+ * If Counter width is 64 bit then Counter Value has to be
+ * filled at CntHighValue address also.
+ */
+ if (params->globalcntwidth == 64) {
+ /* Bits[63:32] exists at XAPM_GCC_HIGH_OFFSET */
+ *cnthigh = readreg(baseaddr, XAPM_GCC_HIGH_OFFSET);
+ }
+ /* Bits[31:0] exists at XAPM_GCC_LOW_OFFSET */
+ *cntlow = readreg(baseaddr, XAPM_GCC_LOW_OFFSET);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Metric Counter Register.
+*
+* @param counter is the number of the Metric Counter to be read.
+* Use the XAPM_METRIC_COUNTER* defines for the counter number in
+* xaxipmon.h. The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+* @return regval is the content of specified Metric Counter.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getmetriccounter(u32 counter)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr,
+ (XAPM_MC0_OFFSET + (counter * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Sampled Metric Counter Register.
+*
+* @param counter is the number of the Sampled Metric Counter to read.
+* Use the XAPM_METRIC_COUNTER* defines for the counter number in
+* xaxipmon.h. The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+*
+* @return regval is the content of specified Sampled Metric Counter.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getsampledmetriccounter(u32 counter)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_SMC0_OFFSET +
+ (counter * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Incrementer Register.
+*
+* @param incrementer is the number of the Incrementer register to
+* read.Use the XAPM_INCREMENTER_* defines for the Incrementer
+* number.The valid values are 0 (XAPM_INCREMENTER_0) to
+* 9 (XAPM_INCREMENTER_9).
+* @param incrementer is the number of the specified Incrementer
+* register
+* @return regval is content of specified Metric Incrementer register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getincrementer(u32 incrementer)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_INC0_OFFSET +
+ (incrementer * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Sampled Incrementer Register.
+*
+* @param incrementer is the number of the Sampled Incrementer
+* register to read.Use the XAPM_INCREMENTER_* defines for the
+* Incrementer number.The valid values are 0 (XAPM_INCREMENTER_0)
+* to 9 (XAPM_INCREMENTER_9).
+* @param incrementer is the number of the specified Sampled
+* Incrementer register
+* @return regval is content of specified Sampled Incrementer register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getsampledincrementer(u32 incrementer)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_SINC0_OFFSET +
+ (incrementer * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Software-written Data Register.
+*
+* @param swdata is the Software written Data.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setswdatareg(u32 swdata)
+{
+ /*
+ * Set Software-written Data Register
+ */
+ writereg(baseaddr, XAPM_SWD_OFFSET, swdata);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns contents of Software-written Data Register.
+*
+* @return swdata.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getswdatareg(void)
+{
+ u32 swdata;
+
+ /*
+ * Set Metric Selector Register
+ */
+ swdata = (u32)readreg(baseaddr, XAPM_SWD_OFFSET);
+ return swdata;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables the following in the AXI Performance Monitor:
+* - Event logging
+*
+* @param flagenables is a value to write to the flag enables
+* register defined by XAPM_FEC_OFFSET. It is recommended
+* to use the XAPM_FEC_*_MASK mask bits to generate.
+* A value of 0x0 will disable all events to the event
+* log streaming FIFO.
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int starteventlog(u32 flagenables)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ /* Now write to flag enables register */
+ writereg(baseaddr, XAPM_FEC_OFFSET, flagenables);
+ /* Write the new value to the Control register to
+ * enable event logging */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVENTLOG_ENABLE_MASK);
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function disables the following in the AXI Performance Monitor:
+* - Event logging
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int stopeventlog(void)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Write the new value to the Control register to disable
+ * event logging */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~XAPM_CR_EVENTLOG_ENABLE_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables the following in the AXI Performance Monitor:
+* - Global clock counter
+* - All metric counters
+* - All sampled metric counters
+*
+* @param sampleinterval is the sample interval
+* @return XST_SUCCESS
+*
+* @note None
+******************************************************************************/
+int startcounters(u32 sampleinterval)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Global Clock Counter is present in Advanced Mode only */
+ if (params->mode == 1)
+ regval = regval | XAPM_CR_GCC_ENABLE_MASK;
+ /*
+ * Write the new value to the Control register to enable
+ * global clock counter and metric counters
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval | XAPM_CR_MCNTR_ENABLE_MASK);
+
+ /* Set, enable, and load sampled counters */
+ setsampleinterval(sampleinterval);
+ loadsic();
+ enablesic();
+
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the following in the AXI Performance Monitor:
+* - Global clock counter
+* - All metric counters
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int stopcounters(void)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Global Clock Counter is present in Advanced Mode only */
+ if (params->mode == 1)
+ regval = regval & ~XAPM_CR_GCC_ENABLE_MASK;
+
+ /*
+ * Write the new value to the Control register to disable
+ * global clock counter and metric counters
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~XAPM_CR_MCNTR_ENABLE_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables Metric Counters.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enablemetricscounter(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_MCNTR_ENABLE_MASK);
+}
+/****************************************************************************/
+/**
+*
+* This function disables the Metric Counters.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disablemetricscounter(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_MCNTR_ENABLE_MASK));
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets the Upper and Lower Ranges for specified Metric Counter
+* Log Enable Register.Event Logging starts when corresponding Metric Counter
+* value falls in between these ranges
+*
+* @param counter is the Metric Counter number for which
+* Ranges are to be assigned.Use the XAPM_METRIC_COUNTER*
+* defines for the counter number in xaxipmon.h.
+* The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+* @param rangehigh specifies the Upper limit in 32 bit Register
+* @param rangelow specifies the Lower limit in 32 bit Register
+* @return None
+*
+* @note None.
+*
+*****************************************************************************/
+void setlogenableranges(u32 counter, u16 rangehigh, u16 rangelow)
+{
+ u32 regval;
+
+ /*
+ * Write the specified Ranges to corresponding Metric Counter Log
+ * Enable Register
+ */
+ regval = rangehigh << 16;
+ regval |= rangelow;
+ writereg(baseaddr, (XAPM_MC0LOGEN_OFFSET +
+ (counter * 16)), regval);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the Ranges of specified Metric Counter Log
+* Enable Register.
+*
+* @param counter is the Metric Counter number for which
+* Ranges are to be returned.Use the XAPM_METRIC_COUNTER*
+* defines for the counter number in xaxipmon.h.
+* The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+*
+* @param rangehigh specifies the user reference variable which returns
+* the Upper Range Value of the specified Metric Counter
+* Log Enable Register.
+* @param rangelow specifies the user reference variable which returns
+* the Lower Range Value of the specified Metric Counter
+* Log Enable Register.
+*
+* @note None.
+*
+*****************************************************************************/
+void getlogenableranges(u32 counter, u16 *rangehigh, u16 *rangelow)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr,
+ (XAPM_MC0LOGEN_OFFSET + (counter * 16)));
+
+ *rangelow = regval & 0xFFFF;
+ *rangehigh = (regval >> 16) & 0xFFFF;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables Event Logging.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enableeventlog(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVENTLOG_ENABLE_MASK);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables External trigger pulse so that Metric Counters can be
+* started on external trigger pulse for a slot.
+*
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enablemctrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_MCNTR_EXTTRIGGER_MASK);
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the External trigger pulse used to start Metric
+* Counters on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disablemctrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_MCNTR_EXTTRIGGER_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables External trigger pulse for Event Log
+* so that Event Logging can be started on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enableeventlogtrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVTLOG_EXTTRIGGER_MASK);
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the External trigger pulse used to start Event
+* Log on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disableeventlogtrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_EVTLOG_EXTTRIGGER_MASK));
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns a name for a given Metric.
+*
+* @param metrics is one of the Metric Sets. User has to use
+* XAPM_METRIC_SET_* macros in xaxipmon.h for this parameter
+*
+* @return const char *
+*
+* @note None
+*
+*****************************************************************************/
+const char *getmetricname(u8 metrics)
+{
+ if (metrics == XAPM_METRIC_SET_0)
+ return "Write Transaction Count";
+ if (metrics == XAPM_METRIC_SET_1)
+ return "Read Transaction Count";
+ if (metrics == XAPM_METRIC_SET_2)
+ return "Write Byte Count";
+ if (metrics == XAPM_METRIC_SET_3)
+ return "Read Byte Count";
+ if (metrics == XAPM_METRIC_SET_4)
+ return "Write Beat Count";
+ if (metrics == XAPM_METRIC_SET_5)
+ return "Total Read Latency";
+ if (metrics == XAPM_METRIC_SET_6)
+ return "Total Write Latency";
+ if (metrics == XAPM_METRIC_SET_7)
+ return "Slv_Wr_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_8)
+ return "Mst_Rd_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_9)
+ return "Num_BValids";
+ if (metrics == XAPM_METRIC_SET_10)
+ return "Num_WLasts";
+ if (metrics == XAPM_METRIC_SET_11)
+ return "Num_RLasts";
+ if (metrics == XAPM_METRIC_SET_12)
+ return "Minimum Write Latency";
+ if (metrics == XAPM_METRIC_SET_13)
+ return "Maximum Write Latency";
+ if (metrics == XAPM_METRIC_SET_14)
+ return "Minimum Read Latency";
+ if (metrics == XAPM_METRIC_SET_15)
+ return "Maximum Read Latency";
+ if (metrics == XAPM_METRIC_SET_16)
+ return "Transfer Cycle Count";
+ if (metrics == XAPM_METRIC_SET_17)
+ return "Packet Count";
+ if (metrics == XAPM_METRIC_SET_18)
+ return "Data Byte Count";
+ if (metrics == XAPM_METRIC_SET_19)
+ return "Position Byte Count";
+ if (metrics == XAPM_METRIC_SET_20)
+ return "Null Byte Count";
+ if (metrics == XAPM_METRIC_SET_21)
+ return "Slv_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_22)
+ return "Mst_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_30)
+ return "External event count";
+ return "Unsupported";
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Write ID in Latency ID register to capture Write
+* Latency metrics.
+*
+* @param writeid is the Write ID to be written in Latency ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setwriteid(u32 writeid)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & ~(XAPM_ID_WID_MASK);
+ regval = regval | writeid;
+ writereg(baseaddr, XAPM_ID_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_ID_OFFSET, writeid);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Read ID in Latency ID register to capture
+* Read Latency metrics.
+*
+* @param readid is the Read ID to be written in Latency ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setreadid(u32 readid)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & ~(XAPM_ID_RID_MASK);
+ regval = regval | (readid << 16);
+ writereg(baseaddr, XAPM_ID_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_RID_OFFSET, readid);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Write ID in Latency ID register.
+*
+* @return writeid is the required Write ID in Latency ID register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getwriteid(void)
+{
+
+ u32 writeid;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ writeid = regval & XAPM_ID_WID_MASK;
+ } else {
+ writeid = XAPM_IDMASK_OFFSET;
+ }
+
+ return writeid;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Read ID in Latency ID register.
+*
+* @return readid is the required Read ID in Latency ID register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getreadid(void)
+{
+
+ u32 readid;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & XAPM_ID_RID_MASK;
+ readid = regval >> 16;
+ } else {
+ readid = XAPM_RID_OFFSET;
+ }
+
+ return readid;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency Start point to calculate write latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_ADDR_ISSUE
+* or 1 - XAPM_LATENCY_ADDR_ACCEPT
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setwrlatencystart(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_ADDR_ACCEPT)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_WRLATENCY_START_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_WRLATENCY_START_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency End point to calculate write latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_LASTWR
+* or 1 - XAPM_LATENCY_FIRSTWR
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setwrlatencyend(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_FIRSTWR)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_WRLATENCY_END_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_WRLATENCY_END_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency Start point to calculate read latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_ADDR_ISSUE
+* or 1 - XAPM_LATENCY_ADDR_ACCEPT
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setrdlatencystart(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_ADDR_ACCEPT)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_RDLATENCY_START_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_RDLATENCY_START_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency End point to calculate read latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_LASTRD
+* or 1 - XAPM_LATENCY_FIRSTRD
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setrdlatencyend(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_FIRSTRD)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_RDLATENCY_END_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_RDLATENCY_END_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Write Latency Start point.
+*
+* @return Returns 0 - XAPM_LATENCY_ADDR_ISSUE or
+* 1 - XAPM_LATENCY_ADDR_ACCEPT
+*
+* @note None
+*
+******************************************************************************/
+u8 getwrlatencystart(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_WRLATENCY_START_MASK;
+ if (regval != XAPM_LATENCY_ADDR_ISSUE)
+ return XAPM_LATENCY_ADDR_ACCEPT;
+ else
+ return XAPM_LATENCY_ADDR_ISSUE;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Write Latency End point.
+*
+* @return Returns 0 - XAPM_LATENCY_LASTWR or
+* 1 - XAPM_LATENCY_FIRSTWR.
+*
+* @note None
+*
+******************************************************************************/
+u8 getwrlatencyend(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_WRLATENCY_END_MASK;
+ if (regval != XAPM_LATENCY_LASTWR)
+ return XAPM_LATENCY_FIRSTWR;
+ else
+ return XAPM_LATENCY_LASTWR;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns read Latency Start point.
+*
+* @return Returns 0 - XAPM_LATENCY_ADDR_ISSUE or
+* 1 - XAPM_LATENCY_ADDR_ACCEPT
+*
+* @note None
+*
+******************************************************************************/
+u8 getrdlatencystart(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_RDLATENCY_START_MASK;
+
+ if (regval != XAPM_LATENCY_ADDR_ISSUE)
+ return XAPM_LATENCY_ADDR_ACCEPT;
+ else
+ return XAPM_LATENCY_ADDR_ISSUE;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Read Latency End point.
+*
+* @return Returns 0 - XAPM_LATENCY_LASTRD or
+* 1 - XAPM_LATENCY_FIRSTRD.
+*
+* @note None
+*
+******************************************************************************/
+u8 getrdlatencyend(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_RDLATENCY_END_MASK;
+ if (regval != XAPM_LATENCY_LASTRD)
+ return XAPM_LATENCY_FIRSTRD;
+ else
+ return XAPM_LATENCY_LASTRD;
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Write ID Mask in ID Mask register.
+*
+* @param wrmask is the Write ID mask to be written in ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setwriteidmask(u32 wrmask)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & ~(XAPM_MASKID_WID_MASK);
+ regval = regval | wrmask;
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, wrmask);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Read ID Mask in ID Mask register.
+*
+* @param rdmask is the Read ID mask to be written in ID Mask register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setreadidmask(u32 rdmask)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & ~(XAPM_MASKID_RID_MASK);
+ regval = regval | (rdmask << 16);
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_RIDMASK_OFFSET, rdmask);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Write ID Mask in ID Mask register.
+*
+* @return wrmask is the required Write ID Mask in ID Mask register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getwriteidmask(void)
+{
+
+ u32 wrmask;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ wrmask = regval & XAPM_MASKID_WID_MASK;
+ } else {
+ wrmask = XAPM_IDMASK_OFFSET;
+ }
+ return wrmask;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Read ID Mask in ID Mask register.
+*
+* @return rdmask is the required Read ID Mask in ID Mask register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getreadidmask(void)
+{
+
+ u32 rdmask;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & XAPM_MASKID_RID_MASK;
+ rdmask = regval >> 16;
+ } else {
+ rdmask = XAPM_RIDMASK_OFFSET;
+ }
+ return rdmask;
+}
diff --git a/samples/xilinx_apm/xaxipmon.h b/samples/xilinx_apm/xaxipmon.h
new file mode 100644
index 000000000000..85e0e902a1c5
--- /dev/null
+++ b/samples/xilinx_apm/xaxipmon.h
@@ -0,0 +1,943 @@
+#ifndef XAXIPMON_H /* Prevent circular inclusions */
+#define XAXIPMON_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdbool.h>
+
+
+#define XST_SUCCESS 0
+#define XST_FAILURE 1
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define XAPM_GCC_HIGH_OFFSET 0x0000 /* Global Clock Counter
+ 32 to 63 bits */
+#define XAPM_GCC_LOW_OFFSET 0x0004 /* Global Clock Counter Lower
+ 0-31 bits */
+#define XAPM_SI_HIGH_OFFSET 0x0020 /* Sample Interval MSB */
+#define XAPM_SI_LOW_OFFSET 0x0024 /* Sample Interval LSB */
+#define XAPM_SICR_OFFSET 0x0028 /* Sample Interval Control
+ Register */
+#define XAPM_SR_OFFSET 0x002C /* Sample Register */
+#define XAPM_GIE_OFFSET 0x0030 /* Global Interrupt Enable
+ Register */
+#define XAPM_IE_OFFSET 0x0034 /* Interrupt Enable Register */
+#define XAPM_IS_OFFSET 0x0038 /* Interrupt Status Register */
+
+#define XAPM_MSR0_OFFSET 0x0044 /* Metric Selector 0 Register*/
+#define XAPM_MSR1_OFFSET 0x0048 /* Metric Selector 1 Register*/
+#define XAPM_MSR2_OFFSET 0x004C /* Metric Selector 2 Register*/
+
+#define XAPM_MC0_OFFSET 0x0100 /* Metric Counter 0 Register */
+#define XAPM_INC0_OFFSET 0x0104 /* Incrementer 0 Register */
+#define XAPM_RANGE0_OFFSET 0x0108 /* Range 0 Register */
+#define XAPM_MC0LOGEN_OFFSET 0x010C /* Metric Counter 0
+ Log Enable Register */
+#define XAPM_MC1_OFFSET 0x0110 /* Metric Counter 1 Register */
+#define XAPM_INC1_OFFSET 0x0114 /* Incrementer 1 Register */
+#define XAPM_RANGE1_OFFSET 0x0118 /* Range 1 Register */
+#define XAPM_MC1LOGEN_OFFSET 0x011C /* Metric Counter 1
+ Log Enable Register */
+#define XAPM_MC2_OFFSET 0x0120 /* Metric Counter 2 Register */
+#define XAPM_INC2_OFFSET 0x0124 /* Incrementer 2 Register */
+#define XAPM_RANGE2_OFFSET 0x0128 /* Range 2 Register */
+#define XAPM_MC2LOGEN_OFFSET 0x012C /* Metric Counter 2
+ Log Enable Register */
+#define XAPM_MC3_OFFSET 0x0130 /* Metric Counter 3 Register */
+#define XAPM_INC3_OFFSET 0x0134 /* Incrementer 3 Register */
+#define XAPM_RANGE3_OFFSET 0x0138 /* Range 3 Register */
+#define XAPM_MC3LOGEN_OFFSET 0x013C /* Metric Counter 3
+ Log Enable Register */
+#define XAPM_MC4_OFFSET 0x0140 /* Metric Counter 4 Register */
+#define XAPM_INC4_OFFSET 0x0144 /* Incrementer 4 Register */
+#define XAPM_RANGE4_OFFSET 0x0148 /* Range 4 Register */
+#define XAPM_MC4LOGEN_OFFSET 0x014C /* Metric Counter 4
+ Log Enable Register */
+#define XAPM_MC5_OFFSET 0x0150 /* Metric Counter 5
+ Register */
+#define XAPM_INC5_OFFSET 0x0154 /* Incrementer 5 Register */
+#define XAPM_RANGE5_OFFSET 0x0158 /* Range 5 Register */
+#define XAPM_MC5LOGEN_OFFSET 0x015C /* Metric Counter 5
+ Log Enable Register */
+#define XAPM_MC6_OFFSET 0x0160 /* Metric Counter 6
+ Register */
+#define XAPM_INC6_OFFSET 0x0164 /* Incrementer 6 Register */
+#define XAPM_RANGE6_OFFSET 0x0168 /* Range 6 Register */
+#define XAPM_MC6LOGEN_OFFSET 0x016C /* Metric Counter 6
+ Log Enable Register */
+#define XAPM_MC7_OFFSET 0x0170 /* Metric Counter 7
+ Register */
+#define XAPM_INC7_OFFSET 0x0174 /* Incrementer 7 Register */
+#define XAPM_RANGE7_OFFSET 0x0178 /* Range 7 Register */
+#define XAPM_MC7LOGEN_OFFSET 0x017C /* Metric Counter 7
+ Log Enable Register */
+#define XAPM_MC8_OFFSET 0x0180 /* Metric Counter 8
+ Register */
+#define XAPM_INC8_OFFSET 0x0184 /* Incrementer 8 Register */
+#define XAPM_RANGE8_OFFSET 0x0188 /* Range 8 Register */
+#define XAPM_MC8LOGEN_OFFSET 0x018C /* Metric Counter 8
+ Log Enable Register */
+#define XAPM_MC9_OFFSET 0x0190 /* Metric Counter 9
+ Register */
+#define XAPM_INC9_OFFSET 0x0194 /* Incrementer 9 Register */
+#define XAPM_RANGE9_OFFSET 0x0198 /* Range 9 Register */
+#define XAPM_MC9LOGEN_OFFSET 0x019C /* Metric Counter 9
+ Log Enable Register */
+
+#define XAPM_MC10_OFFSET 0x01A0 /* Metric Counter 10
+ Register */
+#define XAPM_MC11_OFFSET 0x01B0 /* Metric Counter 11
+ Register */
+#define XAPM_MC12_OFFSET 0x0500 /* Metric Counter 12
+ Register */
+#define XAPM_MC13_OFFSET 0x0510 /* Metric Counter 13
+ Register */
+#define XAPM_MC14_OFFSET 0x0520 /* Metric Counter 14
+ Register */
+#define XAPM_MC15_OFFSET 0x0530 /* Metric Counter 15
+ Register */
+#define XAPM_MC16_OFFSET 0x0540 /* Metric Counter 16
+ Register */
+#define XAPM_MC17_OFFSET 0x0550 /* Metric Counter 17
+ Register */
+#define XAPM_MC18_OFFSET 0x0560 /* Metric Counter 18
+ Register */
+#define XAPM_MC19_OFFSET 0x0570 /* Metric Counter 19
+ Register */
+#define XAPM_MC20_OFFSET 0x0580 /* Metric Counter 20
+ Register */
+#define XAPM_MC21_OFFSET 0x0590 /* Metric Counter 21
+ Register */
+#define XAPM_MC22_OFFSET 0x05A0 /* Metric Counter 22
+ Register */
+#define XAPM_MC23_OFFSET 0x05B0 /* Metric Counter 23
+ Register */
+#define XAPM_MC24_OFFSET 0x0700 /* Metric Counter 24
+ Register */
+#define XAPM_MC25_OFFSET 0x0710 /* Metric Counter 25
+ Register */
+#define XAPM_MC26_OFFSET 0x0720 /* Metric Counter 26
+ Register */
+#define XAPM_MC27_OFFSET 0x0730 /* Metric Counter 27
+ Register */
+#define XAPM_MC28_OFFSET 0x0740 /* Metric Counter 28
+ Register */
+#define XAPM_MC29_OFFSET 0x0750 /* Metric Counter 29
+ Register */
+#define XAPM_MC30_OFFSET 0x0760 /* Metric Counter 30
+ Register */
+#define XAPM_MC31_OFFSET 0x0770 /* Metric Counter 31
+ Register */
+#define XAPM_MC32_OFFSET 0x0780 /* Metric Counter 32
+ Register */
+#define XAPM_MC33_OFFSET 0x0790 /* Metric Counter 33
+ Register */
+#define XAPM_MC34_OFFSET 0x07A0 /* Metric Counter 34
+ Register */
+#define XAPM_MC35_OFFSET 0x07B0 /* Metric Counter 35
+ Register */
+#define XAPM_MC36_OFFSET 0x0900 /* Metric Counter 36
+ Register */
+#define XAPM_MC37_OFFSET 0x0910 /* Metric Counter 37
+ Register */
+#define XAPM_MC38_OFFSET 0x0920 /* Metric Counter 38
+ Register */
+#define XAPM_MC39_OFFSET 0x0930 /* Metric Counter 39
+ Register */
+#define XAPM_MC40_OFFSET 0x0940 /* Metric Counter 40
+ Register */
+#define XAPM_MC41_OFFSET 0x0950 /* Metric Counter 41
+ Register */
+#define XAPM_MC42_OFFSET 0x0960 /* Metric Counter 42
+ Register */
+#define XAPM_MC43_OFFSET 0x0970 /* Metric Counter 43
+ Register */
+#define XAPM_MC44_OFFSET 0x0980 /* Metric Counter 44
+ Register */
+#define XAPM_MC45_OFFSET 0x0990 /* Metric Counter 45
+ Register */
+#define XAPM_MC46_OFFSET 0x09A0 /* Metric Counter 46
+ Register */
+#define XAPM_MC47_OFFSET 0x09B0 /* Metric Counter 47
+ Register */
+
+#define XAPM_SMC0_OFFSET 0x0200 /* Sampled Metric Counter
+ 0 Register */
+#define XAPM_SINC0_OFFSET 0x0204 /* Sampled Incrementer
+ 0 Register */
+#define XAPM_SMC1_OFFSET 0x0210 /* Sampled Metric Counter
+ 1 Register */
+#define XAPM_SINC1_OFFSET 0x0214 /* Sampled Incrementer
+ 1 Register */
+#define XAPM_SMC2_OFFSET 0x0220 /* Sampled Metric Counter
+ 2 Register */
+#define XAPM_SINC2_OFFSET 0x0224 /* Sampled Incrementer
+ 2 Register */
+#define XAPM_SMC3_OFFSET 0x0230 /* Sampled Metric Counter
+ 3 Register */
+#define XAPM_SINC3_OFFSET 0x0234 /* Sampled Incrementer
+ 3 Register */
+#define XAPM_SMC4_OFFSET 0x0240 /* Sampled Metric Counter
+ 4 Register */
+#define XAPM_SINC4_OFFSET 0x0244 /* Sampled Incrementer
+ 4 Register */
+#define XAPM_SMC5_OFFSET 0x0250 /* Sampled Metric Counter
+ 5 Register */
+#define XAPM_SINC5_OFFSET 0x0254 /* Sampled Incrementer
+ 5 Register */
+#define XAPM_SMC6_OFFSET 0x0260 /* Sampled Metric Counter
+ 6 Register */
+#define XAPM_SINC6_OFFSET 0x0264 /* Sampled Incrementer
+ 6 Register */
+#define XAPM_SMC7_OFFSET 0x0270 /* Sampled Metric Counter
+ 7 Register */
+#define XAPM_SINC7_OFFSET 0x0274 /* Sampled Incrementer
+ 7 Register */
+#define XAPM_SMC8_OFFSET 0x0280 /* Sampled Metric Counter
+ 8 Register */
+#define XAPM_SINC8_OFFSET 0x0284 /* Sampled Incrementer
+ 8 Register */
+#define XAPM_SMC9_OFFSET 0x0290 /* Sampled Metric Counter
+ 9 Register */
+#define XAPM_SINC9_OFFSET 0x0294 /* Sampled Incrementer
+ 9 Register */
+#define XAPM_SMC10_OFFSET 0x02A0 /* Sampled Metric Counter
+ 10 Register */
+#define XAPM_SMC11_OFFSET 0x02B0 /* Sampled Metric Counter
+ 11 Register */
+#define XAPM_SMC12_OFFSET 0x0600 /* Sampled Metric Counter
+ 12 Register */
+#define XAPM_SMC13_OFFSET 0x0610 /* Sampled Metric Counter
+ 13 Register */
+#define XAPM_SMC14_OFFSET 0x0620 /* Sampled Metric Counter
+ 14 Register */
+#define XAPM_SMC15_OFFSET 0x0630 /* Sampled Metric Counter
+ 15 Register */
+#define XAPM_SMC16_OFFSET 0x0640 /* Sampled Metric Counter
+ 16 Register */
+#define XAPM_SMC17_OFFSET 0x0650 /* Sampled Metric Counter
+ 17 Register */
+#define XAPM_SMC18_OFFSET 0x0660 /* Sampled Metric Counter
+ 18 Register */
+#define XAPM_SMC19_OFFSET 0x0670 /* Sampled Metric Counter
+ 19 Register */
+#define XAPM_SMC20_OFFSET 0x0680 /* Sampled Metric Counter
+ 20 Register */
+#define XAPM_SMC21_OFFSET 0x0690 /* Sampled Metric Counter
+ 21 Register */
+#define XAPM_SMC22_OFFSET 0x06A0 /* Sampled Metric Counter
+ 22 Register */
+#define XAPM_SMC23_OFFSET 0x06B0 /* Sampled Metric Counter
+ 23 Register */
+#define XAPM_SMC24_OFFSET 0x0800 /* Sampled Metric Counter
+ 24 Register */
+#define XAPM_SMC25_OFFSET 0x0810 /* Sampled Metric Counter
+ 25 Register */
+#define XAPM_SMC26_OFFSET 0x0820 /* Sampled Metric Counter
+ 26 Register */
+#define XAPM_SMC27_OFFSET 0x0830 /* Sampled Metric Counter
+ 27 Register */
+#define XAPM_SMC28_OFFSET 0x0840 /* Sampled Metric Counter
+ 28 Register */
+#define XAPM_SMC29_OFFSET 0x0850 /* Sampled Metric Counter
+ 29 Register */
+#define XAPM_SMC30_OFFSET 0x0860 /* Sampled Metric Counter
+ 30 Register */
+#define XAPM_SMC31_OFFSET 0x0870 /* Sampled Metric Counter
+ 31 Register */
+#define XAPM_SMC32_OFFSET 0x0880 /* Sampled Metric Counter
+ 32 Register */
+#define XAPM_SMC33_OFFSET 0x0890 /* Sampled Metric Counter
+ 33 Register */
+#define XAPM_SMC34_OFFSET 0x08A0 /* Sampled Metric Counter
+ 34 Register */
+#define XAPM_SMC35_OFFSET 0x08B0 /* Sampled Metric Counter
+ 35 Register */
+#define XAPM_SMC36_OFFSET 0x0A00 /* Sampled Metric Counter
+ 36 Register */
+#define XAPM_SMC37_OFFSET 0x0A10 /* Sampled Metric Counter
+ 37 Register */
+#define XAPM_SMC38_OFFSET 0x0A20 /* Sampled Metric Counter
+ 38 Register */
+#define XAPM_SMC39_OFFSET 0x0A30 /* Sampled Metric Counter
+ 39 Register */
+#define XAPM_SMC40_OFFSET 0x0A40 /* Sampled Metric Counter
+ 40 Register */
+#define XAPM_SMC41_OFFSET 0x0A50 /* Sampled Metric Counter
+ 41 Register */
+#define XAPM_SMC42_OFFSET 0x0A60 /* Sampled Metric Counter
+ 42 Register */
+#define XAPM_SMC43_OFFSET 0x0A70 /* Sampled Metric Counter
+ 43 Register */
+#define XAPM_SMC44_OFFSET 0x0A80 /* Sampled Metric Counter
+ 44 Register */
+#define XAPM_SMC45_OFFSET 0x0A90 /* Sampled Metric Counter
+ 45 Register */
+#define XAPM_SMC46_OFFSET 0x0AA0 /* Sampled Metric Counter
+ 46 Register */
+#define XAPM_SMC47_OFFSET 0x0AB0 /* Sampled Metric Counter
+ 47 Register */
+
+#define XAPM_CTL_OFFSET 0x0300 /* Control Register */
+
+#define XAPM_ID_OFFSET 0x0304 /* Latency ID Register */
+
+#define XAPM_IDMASK_OFFSET 0x0308 /* ID Mask Register */
+
+#define XAPM_RID_OFFSET 0x030C /* Latency Write ID Register */
+
+#define XAPM_RIDMASK_OFFSET 0x0310 /* Read ID mask register */
+
+#define XAPM_FEC_OFFSET 0x0400 /* flag Enable
+ Control Register */
+
+#define XAPM_SWD_OFFSET 0x0404 /* Software-written
+ Data Register */
+
+#define XAPM_SICR_MCNTR_RST_MASK 0x00000100 /* Enable the Metric
+ Counter Reset */
+#define XAPM_SICR_LOAD_MASK 0x00000002 /* Load the Sample Interval
+ Register Value into
+ the counter */
+#define XAPM_SICR_ENABLE_MASK 0x00000001 /* Enable the downcounter */
+
+#define XAPM_IXR_MC9_OVERFLOW_MASK 0x00001000 /**< Metric Counter 9
+ * Overflow> */
+#define XAPM_IXR_MC8_OVERFLOW_MASK 0x00000800 /**< Metric Counter 8
+ * Overflow> */
+#define XAPM_IXR_MC7_OVERFLOW_MASK 0x00000400 /**< Metric Counter 7
+ * Overflow> */
+#define XAPM_IXR_MC6_OVERFLOW_MASK 0x00000200 /**< Metric Counter 6
+ * Overflow> */
+#define XAPM_IXR_MC5_OVERFLOW_MASK 0x00000100 /**< Metric Counter 5
+ * Overflow> */
+#define XAPM_IXR_MC4_OVERFLOW_MASK 0x00000080 /**< Metric Counter 4
+ * Overflow> */
+#define XAPM_IXR_MC3_OVERFLOW_MASK 0x00000040 /**< Metric Counter 3
+ * Overflow> */
+#define XAPM_IXR_MC2_OVERFLOW_MASK 0x00000020 /**< Metric Counter 2
+ * Overflow> */
+#define XAPM_IXR_MC1_OVERFLOW_MASK 0x00000010 /**< Metric Counter 1
+ * Overflow> */
+#define XAPM_IXR_MC0_OVERFLOW_MASK 0x00000008 /**< Metric Counter 0
+ * Overflow> */
+#define XAPM_IXR_FIFO_FULL_MASK 0x00000004 /**< Event Log FIFO
+ * full> */
+#define XAPM_IXR_SIC_OVERFLOW_MASK 0x00000002 /**< Sample Interval
+ * Counter Overflow */
+#define XAPM_IXR_GCC_OVERFLOW_MASK 0x00000001 /**< Global Clock
+ Counter Overflow */
+#define XAPM_IXR_ALL_MASK (XAPM_IXR_SIC_OVERFLOW_MASK | \
+ XAPM_IXR_GCC_OVERFLOW_MASK | \
+ XAPM_IXR_FIFO_FULL_MASK | \
+ XAPM_IXR_MC0_OVERFLOW_MASK | \
+ XAPM_IXR_MC1_OVERFLOW_MASK | \
+ XAPM_IXR_MC2_OVERFLOW_MASK | \
+ XAPM_IXR_MC3_OVERFLOW_MASK | \
+ XAPM_IXR_MC4_OVERFLOW_MASK | \
+ XAPM_IXR_MC5_OVERFLOW_MASK | \
+ XAPM_IXR_MC6_OVERFLOW_MASK | \
+ XAPM_IXR_MC7_OVERFLOW_MASK | \
+ XAPM_IXR_MC8_OVERFLOW_MASK | \
+ XAPM_IXR_MC9_OVERFLOW_MASK)
+
+#define XAPM_CR_FIFO_RESET_MASK 0x02000000
+ /**< FIFO Reset */
+#define XAPM_CR_MUXSEL_MASK 0x01000000
+ /**< Mux Selector mask */
+#define XAPM_CR_GCC_RESET_MASK 0x00020000
+ /**< Global Clk
+ Counter Reset */
+#define XAPM_CR_GCC_ENABLE_MASK 0x00010000
+ /**< Global Clk
+ Counter Enable */
+#define XAPM_CR_EVTLOG_EXTTRIGGER_MASK 0x00000200
+ /**< Enable External trigger
+ to start event Log */
+#define XAPM_CR_EVENTLOG_ENABLE_MASK 0x00000100
+ /**< Event Log Enable */
+#define XAPM_CR_RDLATENCY_END_MASK 0x00000080
+ /**< Write Latency
+ End point */
+#define XAPM_CR_RDLATENCY_START_MASK 0x00000040
+ /**< Read Latency
+ Start point */
+#define XAPM_CR_WRLATENCY_END_MASK 0x00000020
+ /**< Write Latency
+ End point */
+#define XAPM_CR_WRLATENCY_START_MASK 0x00000010
+ /**< Write Latency
+ Start point */
+#define XAPM_CR_IDFILTER_ENABLE_MASK 0x00000008
+ /**< ID Filter Enable */
+#define XAPM_CR_MCNTR_EXTTRIGGER_MASK 0x00000004
+ /**< Enable External
+ trigger to start
+ Metric Counters */
+#define XAPM_CR_MCNTR_RESET_MASK 0x00000002
+ /**< Metrics Counter
+ Reset */
+#define XAPM_CR_MCNTR_ENABLE_MASK 0x00000001
+ /**< Metrics Counter
+ Enable */
+
+#define XAPM_ID_RID_MASK 0xFFFF0000 /**< Read ID */
+
+#define XAPM_ID_WID_MASK 0x0000FFFF /**< Write ID */
+
+#define XAPM_MASKID_RID_MASK 0xFFFF0000 /**< Read ID Mask */
+
+#define XAPM_MASKID_WID_MASK 0x0000FFFF /**< Write ID Mask*/
+
+
+#define XAPM_MAX_COUNTERS 10 /**< Maximum number of Counters */
+#define XAPM_MAX_COUNTERS_PROFILE 48 /**< Maximum number of Counters in
+ profile mode */
+
+#define XAPM_METRIC_COUNTER_0 0 /**< Metric Counter 0 Register Index */
+#define XAPM_METRIC_COUNTER_1 1 /**< Metric Counter 1 Register Index */
+#define XAPM_METRIC_COUNTER_2 2 /**< Metric Counter 2 Register Index */
+#define XAPM_METRIC_COUNTER_3 3 /**< Metric Counter 3 Register Index */
+#define XAPM_METRIC_COUNTER_4 4 /**< Metric Counter 4 Register Index */
+#define XAPM_METRIC_COUNTER_5 5 /**< Metric Counter 5 Register Index */
+#define XAPM_METRIC_COUNTER_6 6 /**< Metric Counter 6 Register Index */
+#define XAPM_METRIC_COUNTER_7 7 /**< Metric Counter 7 Register Index */
+#define XAPM_METRIC_COUNTER_8 8 /**< Metric Counter 8 Register Index */
+#define XAPM_METRIC_COUNTER_9 9 /**< Metric Counter 9 Register Index */
+
+#define XAPM_INCREMENTER_0 0 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_1 1 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_2 2 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_3 3 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_4 4 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_5 5 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_6 6 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_7 7 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_8 8 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_9 9 /**< Metric Counter 0 Register Index */
+
+#define XAPM_METRIC_SET_0 0 /**< Write Transaction Count */
+#define XAPM_METRIC_SET_1 1 /**< Read Transaction Count */
+#define XAPM_METRIC_SET_2 2 /**< Write Byte Count */
+#define XAPM_METRIC_SET_3 3 /**< Read Byte Count */
+#define XAPM_METRIC_SET_4 4 /**< Write Beat Count */
+#define XAPM_METRIC_SET_5 5 /**< Total Read Latency */
+#define XAPM_METRIC_SET_6 6 /**< Total Write Latency */
+#define XAPM_METRIC_SET_7 7 /**< Slv_Wr_Idle_Cnt */
+#define XAPM_METRIC_SET_8 8 /**< Mst_Rd_Idle_Cnt */
+#define XAPM_METRIC_SET_9 9 /**< Num_BValids */
+#define XAPM_METRIC_SET_10 10 /**< Num_WLasts */
+#define XAPM_METRIC_SET_11 11 /**< Num_RLasts */
+#define XAPM_METRIC_SET_12 12 /**< Minimum Write Latency */
+#define XAPM_METRIC_SET_13 13 /**< Maximum Write Latency */
+#define XAPM_METRIC_SET_14 14 /**< Minimum Read Latency */
+#define XAPM_METRIC_SET_15 15 /**< Maximum Read Latency */
+#define XAPM_METRIC_SET_16 16 /**< Transfer Cycle Count */
+#define XAPM_METRIC_SET_17 17 /**< Packet Count */
+#define XAPM_METRIC_SET_18 18 /**< Data Byte Count */
+#define XAPM_METRIC_SET_19 19 /**< Position Byte Count */
+#define XAPM_METRIC_SET_20 20 /**< Null Byte Count */
+#define XAPM_METRIC_SET_21 21 /**< Slv_Idle_Cnt */
+#define XAPM_METRIC_SET_22 22 /**< Mst_Idle_Cnt */
+#define XAPM_METRIC_SET_30 30 /**< External event count */
+
+#define XAPM_MAX_AGENTS 8 /**< Maximum number of Agents */
+
+#define XAPM_FLAG_WRADDR 0x00000001 /**< Write Address flag */
+#define XAPM_FLAG_FIRSTWR 0x00000002 /**< First Write flag */
+#define XAPM_FLAG_LASTWR 0x00000004 /**< Last Write flag */
+#define XAPM_FLAG_RESPONSE 0x00000008 /**< Response flag */
+#define XAPM_FLAG_RDADDR 0x00000010 /**< Read Address flag */
+#define XAPM_FLAG_FIRSTRD 0x00000020 /**< First Read flag */
+#define XAPM_FLAG_LASTRD 0x00000040 /**< Last Read flag */
+#define XAPM_FLAG_SWDATA 0x00010000 /**< Software-written Data flag */
+#define XAPM_FLAG_EVENT 0x00020000 /**< Last Read flag */
+#define XAPM_FLAG_EVNTSTOP 0x00040000 /**< Last Read flag */
+#define XAPM_FLAG_EVNTSTART 0x00080000 /**< Last Read flag */
+#define XAPM_FLAG_GCCOVF 0x00100000 /**< Global Clock Counter Overflow
+ * flag */
+#define XAPM_FLAG_SCLAPSE 0x00200000 /**< Sample Counter Lapse flag */
+#define XAPM_FLAG_MC0 0x00400000 /**< Metric Counter 0 flag */
+#define XAPM_FLAG_MC1 0x00800000 /**< Metric Counter 1 flag */
+#define XAPM_FLAG_MC2 0x01000000 /**< Metric Counter 2 flag */
+#define XAPM_FLAG_MC3 0x02000000 /**< Metric Counter 3 flag */
+#define XAPM_FLAG_MC4 0x04000000 /**< Metric Counter 4 flag */
+#define XAPM_FLAG_MC5 0x08000000 /**< Metric Counter 5 flag */
+#define XAPM_FLAG_MC6 0x10000000 /**< Metric Counter 6 flag */
+#define XAPM_FLAG_MC7 0x20000000 /**< Metric Counter 7 flag */
+#define XAPM_FLAG_MC8 0x40000000 /**< Metric Counter 8 flag */
+#define XAPM_FLAG_MC9 0x80000000 /**< Metric Counter 9 flag */
+
+#define XAPM_LATENCY_ADDR_ISSUE 0 /**< Address Issue as start
+ point for Latency calculation*/
+#define XAPM_LATENCY_ADDR_ACCEPT 1 /**< Address Acceptance as start
+ point for Latency calculation*/
+#define XAPM_LATENCY_LASTRD 0 /**< Last Read as end point for
+ Latency calculation */
+#define XAPM_LATENCY_LASTWR 0 /**< Last Write as end point for
+ Latency calculation */
+#define XAPM_LATENCY_FIRSTRD 1 /**< First Read as end point for
+ Latency calculation */
+#define XAPM_LATENCY_FIRSTWR 1 /**< First Write as end point for
+ Latency calculation */
+
+#define XAPM_MODE_TRACE 2 /**< APM in Trace mode */
+
+#define XAPM_MODE_PROFILE 1 /**< APM in Profile mode */
+
+#define XAPM_MODE_ADVANCED 0 /**< APM in Advanced mode */
+
+typedef unsigned char u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef unsigned long ulong;
+
+ulong baseaddr;
+
+struct xapm_param {
+ u32 mode;
+ u32 maxslots;
+ u32 eventcnt;
+ u32 eventlog;
+ u32 sampledcnt;
+ u32 numcounters;
+ u32 metricwidth;
+ u32 sampledwidth;
+ u32 globalcntwidth;
+ u32 scalefactor;
+ u32 isr;
+ bool is_32bit_filter;
+};
+
+static struct xapm_param *params;
+
+/*****************************************************************************/
+/**
+*
+* Read a register of the AXI Performance Monitor device. This macro provides
+* register access to all registers using the register offsets defined above.
+*
+* @param baseaddr contains the base address of the device.
+* @param regoffset is the offset of the register to read.
+*
+* @return The contents of the register.
+*
+* @note C-style Signature:
+* u32 readreg(u32 baseaddr, u32 regoffset);
+*
+******************************************************************************/
+#define readreg(baseaddr, regoffset) \
+ (*(u32 *)(baseaddr + regoffset))
+
+/*****************************************************************************/
+/**
+*
+* Write a register of the AXI Performance Monitor device. This macro provides
+* register access to all registers using the register offsets defined above.
+*
+* @param baseaddr contains the base address of the device.
+* @param regoffset is the offset of the register to write.
+* @param data is the value to write to the register.
+*
+* @return None.
+*
+* @note C-style Signature:
+* void writereg(u32 baseaddr,
+* u32 regoffset,u32 Data)
+*
+******************************************************************************/
+#define writereg(baseaddr, regoffset, data) \
+ (*(u32 *)(baseaddr + regoffset) = data)
+
+/****************************************************************************/
+/**
+*
+* This routine enables the Global Interrupt.
+*
+* @note C-Style signature:
+* void intrglobalenable()
+*
+*****************************************************************************/
+#define intrglobalenable() \
+ writereg(baseaddr, XAPM_GIE_OFFSET, 1)
+
+
+/****************************************************************************/
+/**
+*
+* This routine disables the Global Interrupt.
+*
+* @note C-Style signature:
+* void intrglobaldisable(void)
+*
+*****************************************************************************/
+#define intrglobaldisable() \
+ writereg(baseaddr, XAPM_GIE_OFFSET, 0)
+
+/****************************************************************************/
+/**
+*
+* This routine enables interrupt(s). Use the XAPM_IXR_* constants defined in
+* xaxipmon_hw.h to create the bit-mask to enable interrupts.
+*
+* @param mask is the mask to enable. Bit positions of 1 will be enabled.
+* Bit positions of 0 will keep the previous setting. This mask is
+* formed by OR'ing XAPM_IXR__* bits defined in xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrenable(u32 mask)
+*
+*****************************************************************************/
+#define intrenable(mask) \
+ writereg(baseaddr, XAPM_IE_OFFSET, readreg(baseaddr, \
+ XAPM_IE_OFFSET) | mask);
+
+
+/****************************************************************************/
+/**
+*
+* This routine disable interrupt(s). Use the XAPM_IXR_* constants defined in
+* xaxipmon_hw.h to create the bit-mask to disable interrupts.
+*
+* @param mask is the mask to disable. Bit positions of 1 will be
+* disabled. Bit positions of 0 will keep the previous setting.
+* This mask is formed by OR'ing XAPM_IXR_* bits defined in
+* xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrdisable(u32 mask)
+*
+*****************************************************************************/
+#define intrdisable(mask) \
+ writereg(baseaddr, XAPM_IE_OFFSET, readreg(baseaddr, \
+ XAPM_IE_OFFSET) | mask);
+
+/****************************************************************************/
+/**
+*
+* This routine clears the specified interrupt(s).
+*
+* @param mask is the mask to clear. Bit positions of 1 will be cleared.
+* This mask is formed by OR'ing XAPM_IXR_* bits defined in
+* xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrclear(u32 mask)
+*
+*****************************************************************************/
+#define intrclear(mask) \
+ writereg(baseaddr, XAPM_IS_OFFSET, readreg(baseaddr, \
+ XAPM_IS_OFFSET) | mask);
+
+/****************************************************************************/
+/**
+*
+* This routine returns the Interrupt Status Register.
+*
+* @return isr value updated by kernel driver
+*
+* @note This macro returns isr value updated by kernel driver.
+* C-Style signature:
+* void intrgetstatus(void)
+*
+*****************************************************************************/
+#define intrgetstatus() (params->isr)
+
+/****************************************************************************/
+/**
+*
+* This routine returns the Interrupt Status Register.
+*
+* @return Interrupt Status Register contents
+*
+* @note C-Style signature:
+* void intrhwgetstatus(void)
+*
+*****************************************************************************/
+#define intrhwgetstatus() (params->isr)
+
+/****************************************************************************/
+/**
+*
+* This function enables the Global Clock Counter.
+*
+* @note C-Style signature:
+* void enablegcc(void);
+*
+*****************************************************************************/
+#define enablegcc() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) | XAPM_CR_GCC_ENABLE_MASK);
+
+/****************************************************************************/
+/**
+*
+* This function disbles the Global Clock Counter.
+*
+* @note C-Style signature:
+* void disablegcc(void);
+*
+*****************************************************************************/
+#define disablegcc() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_GCC_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function enables the specified flag in flag Control Register.
+*
+* @param flag is one of the XAPM_FLAG_* masks defined in xaxipmon.h
+*
+* @return None
+*
+* @note C-Style signature:
+* void enableflag(void);
+*
+*****************************************************************************/
+#define enableflag(flag) \
+ writereg(baseaddr, XAPM_FEC_OFFSET, \
+ readreg(baseaddr, XAPM_FEC_OFFSET) | flag);
+
+/****************************************************************************/
+/**
+*
+* This function disables the specified flag in flag Control Register.
+*
+* @param flag is one of the XAPM_FLAG_* masks defined in xaxipmon.h*
+* @return None
+*
+* @note C-Style signature:
+* void disableflag(void);
+*
+*****************************************************************************/
+#define disableflag(flag) \
+ writereg(baseaddr, XAPM_FEC_OFFSET, \
+ readreg(baseaddr, XAPM_FEC_OFFSET) & ~(flag));
+
+/****************************************************************************/
+/**
+*
+* This function loads the sample interval register value into the sample
+* interval counter.
+*
+* @note C-Style signature:
+* void loadsic(void);
+*
+*****************************************************************************/
+#define loadsic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_LOAD_MASK)
+
+
+/****************************************************************************/
+/**
+*
+* This enables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void enablesic(void);
+*
+*****************************************************************************/
+#define enablesic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_ENABLE_MASK)
+
+/****************************************************************************/
+/**
+*
+* This disables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void disablesic(void);
+*
+*****************************************************************************/
+#define disablesic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, \
+ readreg(baseaddr, XAPM_SICR_OFFSET) & ~(XAPM_SICR_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This enables Reset of Metric Counters when Sample Interval Counter lapses.
+*
+* @note C-Style signature:
+* void enablemcreset(void);
+*
+*****************************************************************************/
+#define enablemcreset() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_MCNTR_RST_MASK);
+
+/****************************************************************************/
+/**
+*
+* This disables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void disablemcreset(void);
+*
+*****************************************************************************/
+#define disablemcreset() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, \
+ readreg(baseaddr, XAPM_SICR_OFFSET) & ~(XAPM_SICR_MCNTR_RST_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function enables the ID Filter Masking.
+*
+* @note C-Style signature:
+* void enableidfilter(void);
+*
+*****************************************************************************/
+#define enableidfilter() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) | XAPM_CR_IDFILTER_ENABLE_MASK);
+
+/****************************************************************************/
+/**
+*
+* This function disbles the ID Filter masking.
+*
+* @note C-Style signature:
+* void disableidfilter(void);
+*
+*****************************************************************************/
+#define disableidfilter() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_IDFILTER_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function samples Metric Counters to Sampled Metric Counters by
+* reading Sample Register and also returns interval. i.e. the number of
+* clocks in between previous read to the current read of sample register.
+*
+* @return Interval. i.e. the number of clocks in between previous
+* read to the current read of sample register.
+*
+* @note C-Style signature:
+* u32 samplemetrics(void);
+*
+*****************************************************************************/
+#define samplemetrics() readreg(baseaddr, XAPM_SR_OFFSET);
+
+
+/************************** Function Prototypes *****************************/
+
+int resetmetriccounter(void);
+
+void resetglobalclkcounter(void);
+
+int resetfifo(void);
+
+void setincrementerrange(u8 incrementer, u16 rangehigh, u16 rangelow);
+
+void getincrementerrange(u8 incrementer, u16 *rangehigh, u16 *rangelow);
+
+void setsampleinterval(u32 sampleinterval);
+
+void getsampleinterval(u32 *sampleinterval);
+
+int setmetrics(u8 slot, u8 metrics, u8 counter);
+
+int getmetrics(u8 counter, u8 *metrics, u8 *slot);
+void getglobalclkcounter(u32 *cnthigh, u32 *cntlow);
+
+u32 getmetriccounter(u32 counter);
+
+u32 getsampledmetriccounter(u32 counter);
+
+u32 getincrementer(u32 incrementer);
+
+u32 getsampledincrementer(u32 incrementer);
+
+void setswdatareg(u32 swdata);
+
+u32 getswdatareg(void);
+
+int starteventlog(u32 flagenables);
+
+int stopeventlog(void);
+
+int startcounters(u32 sampleinterval);
+
+int stopcounters(void);
+
+void enablemetricscounter(void);
+
+void disablemetricscounter(void);
+
+void setlogenableranges(u32 counter, u16 rangehigh, u16 rangelow);
+
+void getlogenableranges(u32 counter, u16 *rangehigh, u16 *rangelow);
+
+void enableeventlog(void);
+
+void enablemctrigger(void);
+
+void disablemctrigger(void);
+
+void enableeventlogtrigger(void);
+
+void disableeventlogtrigger(void);
+
+const char *getmetricname(u8 metrics);
+
+void setwriteid(u32 writeid);
+
+void setreadid(u32 readid);
+
+u32 getwriteid(void);
+
+u32 getreadid(void);
+
+void setwrlatencystart(u8 param);
+
+void setwrlatencyend(u8 param);
+
+void setrdlatencystart(u8 param);
+
+void setrdlatencyend(u8 param);
+
+u8 getwrlatencystart(void);
+
+u8 getwrlatencyend(void);
+
+u8 getrdlatencystart(void);
+
+u8 getrdlatencyend(void);
+
+void setwriteidmask(u32 wrmask);
+
+void setreadidmask(u32 rdmask);
+
+u32 getwriteidmask(void);
+
+u32 getreadidmask(void);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* End of protection macro. */
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index b14a7d4a2f05..5d247d8f1e04 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -187,8 +187,29 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\
quiet_redirect :=
silent_redirect := exec >/dev/null;
+# Delete the target on interruption
+#
+# GNU Make automatically deletes the target if it has already been changed by
+# the interrupted recipe. So, you can safely stop the build by Ctrl-C (Make
+# will delete incomplete targets), and resume it later.
+#
+# However, this does not work when the stderr is piped to another program, like
+# $ make >&2 | tee log
+# Make dies with SIGPIPE before cleaning the targets.
+#
+# To address it, we clean the target in signal traps.
+#
+# Make deletes the target when it catches SIGHUP, SIGINT, SIGQUIT, SIGTERM.
+# So, we cover them, and also SIGPIPE just in case.
+#
+# Of course, this is unneeded for phony targets.
+delete-on-interrupt = \
+ $(if $(filter-out $(PHONY), $@), \
+ $(foreach sig, HUP INT QUIT TERM PIPE, \
+ trap 'rm -f $@; trap - $(sig); kill -s $(sig) $$$$' $(sig);))
+
# printing commands
-cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(cmd_$(1))
+cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(delete-on-interrupt) $(cmd_$(1))
###
# if_changed - execute command if any prerequisite is newer than
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 854e2ba9daa2..6a78afc6f13b 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -50,6 +50,7 @@ KBUILD_CFLAGS += -Wno-sign-compare
KBUILD_CFLAGS += -Wno-format-zero-length
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
+KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict)
endif
endif
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 63d21489216b..ba4e271702c6 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -6,7 +6,7 @@ gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += latent_entropy_plugin.so
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) \
+= -DLATENT_ENTROPY_PLUGIN
ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
- DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable
+ DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable -ULATENT_ENTROPY_PLUGIN
endif
export DISABLE_LATENT_ENTROPY_PLUGIN
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 952fff485546..2dde6e5e9e69 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -75,8 +75,7 @@ obj := $(KBUILD_EXTMOD)
src := $(obj)
# Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
- $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
+include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
endif
MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux)
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index adabd4145264..985fb81cae79 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -625,7 +625,7 @@ int main(int argc, char **argv)
p = strrchr(argv[1], '/');
p = p ? p + 1 : argv[1];
grammar_name = strdup(p);
- if (!p) {
+ if (!grammar_name) {
perror(NULL);
exit(1);
}
diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py
index 894cc58c1a03..030a843a4794 100755
--- a/scripts/bpf_helpers_doc.py
+++ b/scripts/bpf_helpers_doc.py
@@ -286,7 +286,7 @@ eBPF programs can have an associated license, passed along with the bytecode
instructions to the kernel when the programs are loaded. The format for that
string is identical to the one in use for kernel modules (Dual licenses, such
as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
-programs that are compatible with the GNU Privacy License (GPL).
+programs that are compatible with the GNU General Public License (GNU GPL).
In order to use such helpers, the eBPF program must be loaded with the correct
license string passed (via **attr**) to the **bpf**\ () system call, and this
@@ -391,6 +391,154 @@ SEE ALSO
print('')
+class PrinterHelpers(Printer):
+ """
+ A printer for dumping collected information about helpers as C header to
+ be included from BPF program.
+ @helpers: array of Helper objects to print to standard output
+ """
+
+ type_fwds = [
+ 'struct bpf_fib_lookup',
+ 'struct bpf_perf_event_data',
+ 'struct bpf_perf_event_value',
+ 'struct bpf_sock',
+ 'struct bpf_sock_addr',
+ 'struct bpf_sock_ops',
+ 'struct bpf_sock_tuple',
+ 'struct bpf_spin_lock',
+ 'struct bpf_sysctl',
+ 'struct bpf_tcp_sock',
+ 'struct bpf_tunnel_key',
+ 'struct bpf_xfrm_state',
+ 'struct pt_regs',
+ 'struct sk_reuseport_md',
+ 'struct sockaddr',
+ 'struct tcphdr',
+
+ 'struct __sk_buff',
+ 'struct sk_msg_md',
+ 'struct xdp_md',
+ ]
+ known_types = {
+ '...',
+ 'void',
+ 'const void',
+ 'char',
+ 'const char',
+ 'int',
+ 'long',
+ 'unsigned long',
+
+ '__be16',
+ '__be32',
+ '__wsum',
+
+ 'struct bpf_fib_lookup',
+ 'struct bpf_perf_event_data',
+ 'struct bpf_perf_event_value',
+ 'struct bpf_sock',
+ 'struct bpf_sock_addr',
+ 'struct bpf_sock_ops',
+ 'struct bpf_sock_tuple',
+ 'struct bpf_spin_lock',
+ 'struct bpf_sysctl',
+ 'struct bpf_tcp_sock',
+ 'struct bpf_tunnel_key',
+ 'struct bpf_xfrm_state',
+ 'struct pt_regs',
+ 'struct sk_reuseport_md',
+ 'struct sockaddr',
+ 'struct tcphdr',
+ }
+ mapped_types = {
+ 'u8': '__u8',
+ 'u16': '__u16',
+ 'u32': '__u32',
+ 'u64': '__u64',
+ 's8': '__s8',
+ 's16': '__s16',
+ 's32': '__s32',
+ 's64': '__s64',
+ 'size_t': 'unsigned long',
+ 'struct bpf_map': 'void',
+ 'struct sk_buff': 'struct __sk_buff',
+ 'const struct sk_buff': 'const struct __sk_buff',
+ 'struct sk_msg_buff': 'struct sk_msg_md',
+ 'struct xdp_buff': 'struct xdp_md',
+ }
+
+ def print_header(self):
+ header = '''\
+/* This is auto-generated file. See bpf_helpers_doc.py for details. */
+
+/* Forward declarations of BPF structs */'''
+
+ print(header)
+ for fwd in self.type_fwds:
+ print('%s;' % fwd)
+ print('')
+
+ def print_footer(self):
+ footer = ''
+ print(footer)
+
+ def map_type(self, t):
+ if t in self.known_types:
+ return t
+ if t in self.mapped_types:
+ return self.mapped_types[t]
+ print("")
+ print("Unrecognized type '%s', please add it to known types!" % t)
+ sys.exit(1)
+
+ seen_helpers = set()
+
+ def print_one(self, helper):
+ proto = helper.proto_break_down()
+
+ if proto['name'] in self.seen_helpers:
+ return
+ self.seen_helpers.add(proto['name'])
+
+ print('/*')
+ print(" * %s" % proto['name'])
+ print(" *")
+ if (helper.desc):
+ # Do not strip all newline characters: formatted code at the end of
+ # a section must be followed by a blank line.
+ for line in re.sub('\n$', '', helper.desc, count=1).split('\n'):
+ print(' *{}{}'.format(' \t' if line else '', line))
+
+ if (helper.ret):
+ print(' *')
+ print(' * Returns')
+ for line in helper.ret.rstrip().split('\n'):
+ print(' *{}{}'.format(' \t' if line else '', line))
+
+ print(' */')
+ print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']),
+ proto['ret_star'], proto['name']), end='')
+ comma = ''
+ for i, a in enumerate(proto['args']):
+ t = a['type']
+ n = a['name']
+ if proto['name'] == 'bpf_get_socket_cookie' and i == 0:
+ t = 'void'
+ n = 'ctx'
+ one_arg = '{}{}'.format(comma, self.map_type(t))
+ if n:
+ if a['star']:
+ one_arg += ' {}'.format(a['star'])
+ else:
+ one_arg += ' '
+ one_arg += '{}'.format(n)
+ comma = ', '
+ print(one_arg, end='')
+
+ print(') = (void *) %d;' % len(self.seen_helpers))
+ print('')
+
###############################################################################
# If script is launched from scripts/ from kernel tree and can access
@@ -405,6 +553,8 @@ Parse eBPF header file and generate documentation for eBPF helper functions.
The RST-formatted output produced can be turned into a manual page with the
rst2man utility.
""")
+argParser.add_argument('--header', action='store_true',
+ help='generate C header file')
if (os.path.isfile(bpfh)):
argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h',
default=bpfh)
@@ -417,5 +567,8 @@ headerParser = HeaderParser(args.filename)
headerParser.run()
# Print formatted output to standard output.
-printer = PrinterRST(headerParser.helpers)
+if args.header:
+ printer = PrinterHelpers(headerParser.helpers)
+else:
+ printer = PrinterRST(headerParser.helpers)
printer.print_all()
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c
index b071bf476fea..dd1a4bd706a2 100644
--- a/scripts/extract-cert.c
+++ b/scripts/extract-cert.c
@@ -23,6 +23,13 @@
#include <openssl/err.h>
#include <openssl/engine.h>
+/*
+ * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
+ *
+ * Remove this if/when that API is no longer used
+ */
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
#define PKEY_ID_PKCS7 2
static __attribute__((noreturn))
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 94ed98dd899f..9e730b805e87 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -73,7 +73,8 @@ command -v ${ADDR2LINE} >/dev/null 2>&1 || die "${ADDR2LINE} isn't installed"
find_dir_prefix() {
local objfile=$1
- local start_kernel_addr=$(${READELF} --symbols --wide $objfile | ${AWK} '$8 == "start_kernel" {printf "0x%s", $2}')
+ local start_kernel_addr=$(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' |
+ ${AWK} '$8 == "start_kernel" {printf "0x%s", $2}')
[[ -z $start_kernel_addr ]] && return
local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
@@ -112,7 +113,9 @@ __faddr2line() {
# section offsets.
local file_type=$(${READELF} --file-header $objfile |
${AWK} '$1 == "Type:" { print $2; exit }')
- [[ $file_type = "EXEC" ]] && is_vmlinux=1
+ if [[ $file_type = "EXEC" ]] || [[ $file_type == "DYN" ]]; then
+ is_vmlinux=1
+ fi
# Go through each of the object's symbols which match the func name.
# In rare cases there might be duplicates, in which case we print all
@@ -175,7 +178,7 @@ __faddr2line() {
found=2
break
fi
- done < <(${READELF} --symbols --wide $objfile | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2)
+ done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2)
if [[ $found = 0 ]]; then
warn "can't find symbol: sym_name: $sym_name sym_sec: $sym_sec sym_addr: $sym_addr sym_elf_size: $sym_elf_size"
@@ -256,7 +259,7 @@ __faddr2line() {
DONE=1
- done < <(${READELF} --symbols --wide $objfile | ${AWK} -v fn=$sym_name '$4 == "FUNC" && $8 == fn')
+ done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v fn=$sym_name '$4 == "FUNC" && $8 == fn')
}
[[ $# -lt 2 ]] && usage
diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
index bd29e4e7a524..c7ff92b4189c 100644
--- a/scripts/gcc-plugins/randomize_layout_plugin.c
+++ b/scripts/gcc-plugins/randomize_layout_plugin.c
@@ -209,12 +209,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
{
- unsigned long i, x;
+ unsigned long i, x, index;
struct partition_group size_group[length];
unsigned long num_groups = 0;
unsigned long randnum;
partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
+
+ /* FIXME: this group shuffle is currently a no-op. */
for (i = num_groups - 1; i > 0; i--) {
struct partition_group tmp;
randnum = ranval(prng_state) % (i + 1);
@@ -224,11 +226,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
}
for (x = 0; x < num_groups; x++) {
- for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
+ for (index = size_group[x].length - 1; index > 0; index--) {
tree tmp;
+
+ i = size_group[x].start + index;
if (DECL_BIT_FIELD_TYPE(newtree[i]))
continue;
- randnum = ranval(prng_state) % (i + 1);
+ randnum = ranval(prng_state) % (index + 1);
+ randnum += size_group[x].start;
// we could handle this case differently if desired
if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
continue;
diff --git a/scripts/gdb/linux/clk.py b/scripts/gdb/linux/clk.py
index 061aecfa294e..7a01fdc3e844 100644
--- a/scripts/gdb/linux/clk.py
+++ b/scripts/gdb/linux/clk.py
@@ -41,6 +41,8 @@ are cached and potentially out of date"""
self.show_subtree(child, level + 1)
def invoke(self, arg, from_tty):
+ if utils.gdb_eval_or_none("clk_root_list") is None:
+ raise gdb.GdbError("No clocks registered")
gdb.write(" enable prepare protect \n")
gdb.write(" clock count count count rate \n")
gdb.write("------------------------------------------------------------------------\n")
diff --git a/scripts/gdb/linux/genpd.py b/scripts/gdb/linux/genpd.py
index 6ca93bd2949e..b53649c0a77a 100644
--- a/scripts/gdb/linux/genpd.py
+++ b/scripts/gdb/linux/genpd.py
@@ -5,7 +5,7 @@
import gdb
import sys
-from linux.utils import CachedType
+from linux.utils import CachedType, gdb_eval_or_none
from linux.lists import list_for_each_entry
generic_pm_domain_type = CachedType('struct generic_pm_domain')
@@ -49,17 +49,17 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
else:
status_string = 'off-{}'.format(genpd['state_idx'])
- slave_names = []
+ child_names = []
for link in list_for_each_entry(
- genpd['master_links'],
+ genpd['parent_links'],
device_link_type.get_type().pointer(),
- 'master_node'):
- slave_names.apend(link['slave']['name'])
+ 'parent_node'):
+ child_names.append(link['child']['name'])
gdb.write('%-30s %-15s %s\n' % (
genpd['name'].string(),
status_string,
- ', '.join(slave_names)))
+ ', '.join(child_names)))
# Print devices in domain
for pm_data in list_for_each_entry(genpd['dev_list'],
@@ -70,7 +70,9 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))
def invoke(self, arg, from_tty):
- gdb.write('domain status slaves\n');
+ if gdb_eval_or_none("&gpd_list") is None:
+ raise gdb.GdbError("No power domain(s) registered")
+ gdb.write('domain status children\n');
gdb.write(' /device runtime status\n');
gdb.write('----------------------------------------------------------------------\n');
for genpd in list_for_each_entry(
diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
index 071d0dd5a634..51def847f1ef 100644
--- a/scripts/gdb/linux/timerlist.py
+++ b/scripts/gdb/linux/timerlist.py
@@ -73,7 +73,7 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
ts = cpus.per_cpu(tick_sched_ptr, cpu)
text = "cpu: {}\n".format(cpu)
- for i in xrange(max_clock_bases):
+ for i in range(max_clock_bases):
text += " clock {}:\n".format(i)
text += print_base(cpu_base['clock_base'][i])
@@ -158,6 +158,8 @@ def pr_cpumask(mask):
num_bytes = (nr_cpu_ids + 7) / 8
buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
buf = binascii.b2a_hex(buf)
+ if type(buf) is not str:
+ buf=buf.decode()
chunks = []
i = num_bytes
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
index ea94221dbd39..8c33be7985b6 100644
--- a/scripts/gdb/linux/utils.py
+++ b/scripts/gdb/linux/utils.py
@@ -89,7 +89,10 @@ def get_target_endianness():
def read_memoryview(inf, start, length):
- return memoryview(inf.read_memory(start, length))
+ m = inf.read_memory(start, length)
+ if type(m) is memoryview:
+ return m
+ return memoryview(m)
def read_u16(buffer, offset):
diff --git a/scripts/kconfig/lexer.l b/scripts/kconfig/lexer.l
index 6354c905b006..3bf90d01c39f 100644
--- a/scripts/kconfig/lexer.l
+++ b/scripts/kconfig/lexer.l
@@ -305,8 +305,11 @@ static char *expand_token(const char *in, size_t n)
new_string();
append_string(in, n);
- /* get the whole line because we do not know the end of token. */
- while ((c = input()) != EOF) {
+ /*
+ * get the whole line because we do not know the end of token.
+ * input() returns 0 (not EOF!) when it reachs the end of file.
+ */
+ while ((c = input()) != 0) {
if (c == '\n') {
unput(c);
break;
diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
index 748da578b418..d1f5bcff4b62 100644
--- a/scripts/kconfig/preprocess.c
+++ b/scripts/kconfig/preprocess.c
@@ -396,6 +396,9 @@ static char *eval_clause(const char *str, size_t len, int argc, char *argv[])
p++;
}
+
+ if (new_argc >= FUNCTION_MAX_ARGS)
+ pperror("too many function arguments");
new_argv[new_argc++] = prev;
/*
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index f56eec5ea4c7..342fefe5dba4 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -117,9 +117,9 @@ static long long sym_get_range_val(struct symbol *sym, int base)
static void sym_validate_range(struct symbol *sym)
{
struct property *prop;
+ struct symbol *range_sym;
int base;
long long val, val2;
- char str[64];
switch (sym->type) {
case S_INT:
@@ -135,17 +135,15 @@ static void sym_validate_range(struct symbol *sym)
if (!prop)
return;
val = strtoll(sym->curr.val, NULL, base);
- val2 = sym_get_range_val(prop->expr->left.sym, base);
+ range_sym = prop->expr->left.sym;
+ val2 = sym_get_range_val(range_sym, base);
if (val >= val2) {
- val2 = sym_get_range_val(prop->expr->right.sym, base);
+ range_sym = prop->expr->right.sym;
+ val2 = sym_get_range_val(range_sym, base);
if (val <= val2)
return;
}
- if (sym->type == S_INT)
- sprintf(str, "%lld", val2);
- else
- sprintf(str, "0x%llx", val2);
- sym->curr.val = xstrdup(str);
+ sym->curr.val = range_sym->curr.val;
}
static void sym_set_changed(struct symbol *sym)
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 8b6325c2dfc5..f25f102abbc7 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -138,8 +138,13 @@ gen_btf()
${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
--strip-all ${1} ${2} 2>/dev/null
# Change e_type to ET_REL so that it can be used to link final vmlinux.
- # Unlike GNU ld, lld does not allow an ET_EXEC input.
- printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none
+ # GNU ld 2.35+ and lld do not allow an ET_EXEC input.
+ if [ -n "${CONFIG_CPU_BIG_ENDIAN}" ]; then
+ et_rel='\0\1'
+ else
+ et_rel='\1\0'
+ fi
+ printf "${et_rel}" | dd of=${2} conv=notrunc bs=1 seek=16 status=none
}
# Create ${2} .o file with all symbols from the ${1} object file
diff --git a/scripts/mksysmap b/scripts/mksysmap
index 9aa23d15862a..ad8bbc52267d 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -41,4 +41,4 @@
# so we just ignore them to let readprofile continue to work.
# (At least sparc64 has __crc_ in the middle).
-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2
+$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)\|\( L0\)' > $2
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index c91eba751804..7b845483717b 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1302,13 +1302,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
/* Looks like: tee:uuid */
static int do_tee_entry(const char *filename, void *symval, char *alias)
{
- DEF_FIELD(symval, tee_client_device_id, uuid);
+ DEF_FIELD_ADDR(symval, tee_client_device_id, uuid);
sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
- uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
- uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
- uuid.b[15]);
+ uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4],
+ uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9],
+ uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14],
+ uuid->b[15]);
add_wildcard(alias);
return 1;
@@ -1455,7 +1455,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
/* First handle the "special" cases */
if (sym_is(name, namelen, "usb"))
do_usb_table(symval, sym->st_size, mod);
- if (sym_is(name, namelen, "of"))
+ else if (sym_is(name, namelen, "of"))
do_of_table(symval, sym->st_size, mod);
else if (sym_is(name, namelen, "pnp"))
do_pnp_device_entry(symval, sym->st_size, mod);
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 79e239b816e0..468985f0968a 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1329,6 +1329,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
if (relsym->st_name != 0)
return relsym;
+ /*
+ * Strive to find a better symbol name, but the resulting name may not
+ * match the symbol referenced in the original code.
+ */
relsym_secindex = get_secindex(elf, relsym);
for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
if (get_secindex(elf, sym) != relsym_secindex)
@@ -1633,7 +1637,7 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
static int is_executable_section(struct elf_info* elf, unsigned int section_index)
{
- if (section_index > elf->num_sections)
+ if (section_index >= elf->num_sections)
fatal("section_index is outside elf->num_sections!\n");
return ((elf->sechdrs[section_index].sh_flags & SHF_EXECINSTR) == SHF_EXECINSTR);
@@ -1812,19 +1816,33 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
#define R_ARM_THM_JUMP19 51
#endif
+static int32_t sign_extend32(int32_t value, int index)
+{
+ uint8_t shift = 31 - index;
+
+ return (int32_t)(value << shift) >> shift;
+}
+
static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
+ Elf_Sym *sym = elf->symtab_start + ELF_R_SYM(r->r_info);
+ void *loc = reloc_location(elf, sechdr, r);
+ uint32_t inst;
+ int32_t offset;
switch (r_typ) {
case R_ARM_ABS32:
- /* From ARM ABI: (S + A) | T */
- r->r_addend = (int)(long)
- (elf->symtab_start + ELF_R_SYM(r->r_info));
+ inst = TO_NATIVE(*(uint32_t *)loc);
+ r->r_addend = inst + sym->st_value;
break;
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
+ inst = TO_NATIVE(*(uint32_t *)loc);
+ offset = sign_extend32((inst & 0x00ffffff) << 2, 25);
+ r->r_addend = offset + sym->st_value + 8;
+ break;
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
case R_ARM_THM_JUMP19:
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index cce12e1971d8..ec692af8ce9e 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -102,6 +102,7 @@ static ssize_t uwrite(void const *const buf, size_t const count)
{
size_t cnt = count;
off_t idx = 0;
+ void *p = NULL;
file_updated = 1;
@@ -109,7 +110,10 @@ static ssize_t uwrite(void const *const buf, size_t const count)
off_t aoffset = (file_ptr + count) - file_end;
if (aoffset > file_append_size) {
- file_append = realloc(file_append, aoffset);
+ p = realloc(file_append, aoffset);
+ if (!p)
+ free(file_append);
+ file_append = p;
file_append_size = aoffset;
}
if (!file_append) {
diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh
index 2dccf141241d..20af56ce245c 100755
--- a/scripts/selinux/install_policy.sh
+++ b/scripts/selinux/install_policy.sh
@@ -78,7 +78,7 @@ cd /etc/selinux/dummy/contexts/files
$SF -F file_contexts /
mounts=`cat /proc/$$/mounts | \
- egrep "ext[234]|jfs|xfs|reiserfs|jffs2|gfs2|btrfs|f2fs|ocfs2" | \
+ grep -E "ext[234]|jfs|xfs|reiserfs|jffs2|gfs2|btrfs|f2fs|ocfs2" | \
awk '{ print $2 '}`
$SF -F file_contexts $mounts
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index fbd34b8e8f57..12acc70e5a7a 100644
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -30,6 +30,13 @@
#include <openssl/engine.h>
/*
+ * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
+ *
+ * Remove this if/when that API is no longer used
+ */
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
+/*
* Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
* assume that it's not available and its header file is missing and that we
* should use PKCS#7 instead. Switching to the older PKCS#7 format restricts
@@ -315,7 +322,7 @@ int main(int argc, char **argv)
CMS_NOSMIMECAP | use_keyid |
use_signed_attrs),
"CMS_add1_signer");
- ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
+ ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) != 1,
"CMS_final");
#else
@@ -334,10 +341,10 @@ int main(int argc, char **argv)
b = BIO_new_file(sig_file_name, "wb");
ERR(!b, "%s", sig_file_name);
#ifndef USE_PKCS7
- ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
+ ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) != 1,
"%s", sig_file_name);
#else
- ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
+ ERR(i2d_PKCS7_bio(b, pkcs7) != 1,
"%s", sig_file_name);
#endif
BIO_free(b);
@@ -367,9 +374,9 @@ int main(int argc, char **argv)
if (!raw_sig) {
#ifndef USE_PKCS7
- ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
+ ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) != 1, "%s", dest_name);
#else
- ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
+ ERR(i2d_PKCS7_bio(bd, pkcs7) != 1, "%s", dest_name);
#endif
} else {
BIO *b;
@@ -389,7 +396,7 @@ int main(int argc, char **argv)
ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
- ERR(BIO_free(bd) < 0, "%s", dest_name);
+ ERR(BIO_free(bd) != 1, "%s", dest_name);
/* Finally, if we're signing in place, replace the original. */
if (replace_orig)
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 4e18ae5282a6..f667527ddff5 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -28,6 +28,13 @@ fi
# ignore userspace tools
ignore="$ignore ( -path ${tree}tools ) -prune -o"
+# gtags(1) refuses to index any file outside of its current working dir.
+# If gtags indexing is requested and the build output directory is not
+# the kernel source tree, index all files in absolute-path form.
+if [[ "$1" == "gtags" && -n "${tree}" ]]; then
+ tree=$(realpath "$tree")/
+fi
+
# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
if [ "${ALLSOURCE_ARCHS}" = "" ]; then
ALLSOURCE_ARCHS=${SRCARCH}
@@ -134,7 +141,7 @@ docscope()
dogtags()
{
- all_target_sources | gtags -i -f -
+ all_target_sources | gtags -i -C "${tree:-.}" -f - "$PWD"
}
# Basic regular expressions with an optional /kind-spec/ for ctags and
diff --git a/scripts/tracing/ftrace-bisect.sh b/scripts/tracing/ftrace-bisect.sh
index 926701162bc8..bb4f59262bbe 100755
--- a/scripts/tracing/ftrace-bisect.sh
+++ b/scripts/tracing/ftrace-bisect.sh
@@ -12,7 +12,7 @@
# (note, if this is a problem with function_graph tracing, then simply
# replace "function" with "function_graph" in the following steps).
#
-# # cd /sys/kernel/debug/tracing
+# # cd /sys/kernel/tracing
# # echo schedule > set_ftrace_filter
# # echo function > current_tracer
#
@@ -20,22 +20,40 @@
#
# # echo nop > current_tracer
#
-# # cat available_filter_functions > ~/full-file
+# Starting with v5.1 this can be done with numbers, making it much faster:
+#
+# The old (slow) way, for kernels before v5.1.
+#
+# [old-way] # cat available_filter_functions > ~/full-file
+#
+# [old-way] *** Note *** this process will take several minutes to update the
+# [old-way] filters. Setting multiple functions is an O(n^2) operation, and we
+# [old-way] are dealing with thousands of functions. So go have coffee, talk
+# [old-way] with your coworkers, read facebook. And eventually, this operation
+# [old-way] will end.
+#
+# The new way (using numbers) is an O(n) operation, and usually takes less than a second.
+#
+# seq `wc -l available_filter_functions | cut -d' ' -f1` > ~/full-file
+#
+# This will create a sequence of numbers that match the functions in
+# available_filter_functions, and when echoing in a number into the
+# set_ftrace_filter file, it will enable the corresponding function in
+# O(1) time. Making enabling all functions O(n) where n is the number of
+# functions to enable.
+#
+# For either the new or old way, the rest of the operations remain the same.
+#
# # ftrace-bisect ~/full-file ~/test-file ~/non-test-file
# # cat ~/test-file > set_ftrace_filter
#
-# *** Note *** this will take several minutes. Setting multiple functions is
-# an O(n^2) operation, and we are dealing with thousands of functions. So go
-# have coffee, talk with your coworkers, read facebook. And eventually, this
-# operation will end.
-#
# # echo function > current_tracer
#
# If it crashes, we know that ~/test-file has a bad function.
#
# Reboot back to test kernel.
#
-# # cd /sys/kernel/debug/tracing
+# # cd /sys/kernel/tracing
# # mv ~/test-file ~/full-file
#
# If it didn't crash.
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 7a8813677950..62736465ac82 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -403,7 +403,7 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
data->size = copy_size;
if (copy_from_user(data->data, userbuf, copy_size)) {
- kvfree(data);
+ aa_put_loaddata(data);
return ERR_PTR(-EFAULT);
}
@@ -869,8 +869,10 @@ static struct multi_transaction *multi_transaction_new(struct file *file,
if (!t)
return ERR_PTR(-ENOMEM);
kref_init(&t->count);
- if (copy_from_user(t->data, buf, size))
+ if (copy_from_user(t->data, buf, size)) {
+ put_multi_transaction(t);
return ERR_PTR(-EFAULT);
+ }
return t;
}
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 597732503815..68e06d87908e 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -139,7 +139,7 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
}
if (AUDIT_MODE(profile) == AUDIT_QUIET ||
(type == AUDIT_APPARMOR_DENIED &&
- AUDIT_MODE(profile) == AUDIT_QUIET))
+ AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
return aad(sa)->error;
if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 1a33f490e667..2f61b4cc2fc0 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -460,7 +460,7 @@ restart:
* xattrs, or a longer match
*/
candidate = profile;
- candidate_len = profile->xmatch_len;
+ candidate_len = max(count, profile->xmatch_len);
candidate_xattrs = ret;
conflict = false;
}
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 7d27db740bc2..ac5054899f6f 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -22,6 +22,11 @@
*/
#define DEBUG_ON (aa_g_debug)
+/*
+ * split individual debug cases out in preparation for finer grained
+ * debug controls in the future.
+ */
+#define AA_DEBUG_LABEL DEBUG_ON
#define dbg_printk(__fmt, __args...) pr_debug(__fmt, ##__args)
#define AA_DEBUG(fmt, args...) \
do { \
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index b5b4b8190e65..b5aa4231af68 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -135,7 +135,7 @@ struct aa_profile {
const char *attach;
struct aa_dfa *xmatch;
- int xmatch_len;
+ unsigned int xmatch_len;
enum audit_mode audit;
long mode;
u32 path_flags;
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 747a734a0824..e2674e425ca7 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -1637,9 +1637,9 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
AA_BUG(!str && size != 0);
AA_BUG(!label);
- if (flags & FLAG_ABS_ROOT) {
+ if (AA_DEBUG_LABEL && (flags & FLAG_ABS_ROOT)) {
ns = root_ns;
- len = snprintf(str, size, "=");
+ len = snprintf(str, size, "_");
update_for_len(total, len, size, str);
} else if (!ns) {
ns = labels_ns(label);
@@ -1750,7 +1750,7 @@ void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
if (!use_label_hname(ns, label, flags) ||
display_mode(ns, label, flags)) {
len = aa_label_asxprint(&name, ns, label, flags, gfp);
- if (len == -1) {
+ if (len < 0) {
AA_DEBUG("label print error");
return;
}
@@ -1778,7 +1778,7 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
int len;
len = aa_label_asxprint(&str, ns, label, flags, gfp);
- if (len == -1) {
+ if (len < 0) {
AA_DEBUG("label print error");
return;
}
@@ -1801,7 +1801,7 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
int len;
len = aa_label_asxprint(&str, ns, label, flags, gfp);
- if (len == -1) {
+ if (len < 0) {
AA_DEBUG("label print error");
return;
}
@@ -1901,7 +1901,8 @@ struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
AA_BUG(!str);
str = skipn_spaces(str, n);
- if (str == NULL || (*str == '=' && base != &root_ns->unconfined->label))
+ if (str == NULL || (AA_DEBUG_LABEL && *str == '_' &&
+ base != &root_ns->unconfined->label))
return ERR_PTR(-EINVAL);
len = label_count_strn_entries(str, end - str);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index e31965dc6dd1..21e03380dd86 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -1148,10 +1148,10 @@ static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb,
#endif
/*
- * The cred blob is a pointer to, not an instance of, an aa_task_ctx.
+ * The cred blob is a pointer to, not an instance of, an aa_label.
*/
struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = {
- .lbs_cred = sizeof(struct aa_task_ctx *),
+ .lbs_cred = sizeof(struct aa_label *),
.lbs_file = sizeof(struct aa_file_ctx),
.lbs_task = sizeof(struct aa_task_ctx),
};
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index 17081c8dbefa..4b433cf1526f 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -229,7 +229,8 @@ static const char * const mnt_info_table[] = {
"failed srcname match",
"failed type match",
"failed flags match",
- "failed data match"
+ "failed data match",
+ "failed perms check"
};
/*
@@ -284,8 +285,8 @@ static int do_match_mnt(struct aa_dfa *dfa, unsigned int start,
return 0;
}
- /* failed at end of flags match */
- return 4;
+ /* failed at perms check, don't confuse with flags match */
+ return 6;
}
@@ -682,6 +683,7 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
aa_put_label(target);
goto out;
}
+ aa_put_label(target);
} else
/* already audited error */
error = PTR_ERR(target);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 06355717ee84..e38ceba39200 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -1123,7 +1123,7 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
- mutex_lock_nested(&ns->parent->lock, ns->level);
+ mutex_lock_nested(&ns->parent->lock, ns->parent->level);
__aa_bump_ns_revision(ns);
__aa_remove_ns(ns);
mutex_unlock(&ns->parent->lock);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 8cfc9493eefc..8bd79aad37fd 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -693,6 +693,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
if (tmpns) {
+ if (!tmpname) {
+ info = "empty profile name";
+ goto fail;
+ }
*ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
if (!*ns_name) {
info = "out of memory";
@@ -955,7 +959,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
* if not specified use previous version
* Mask off everything that is not kernel abi version
*/
- if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
+ if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v8)) {
audit_iface(NULL, NULL, NULL, "unsupported interface version",
e, error);
return error;
diff --git a/security/commoncap.c b/security/commoncap.c
index 1c70d1149186..d1890a6e6475 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -391,8 +391,10 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
&tmpbuf, size, GFP_NOFS);
dput(dentry);
- if (ret < 0 || !tmpbuf)
- return ret;
+ if (ret < 0 || !tmpbuf) {
+ size = ret;
+ goto out_free;
+ }
fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf;
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 89b9c277f0c0..ad070cf71c36 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -79,6 +79,17 @@ free_and_exit:
return -ENOMEM;
}
+static void dev_exceptions_move(struct list_head *dest, struct list_head *orig)
+{
+ struct dev_exception_item *ex, *tmp;
+
+ lockdep_assert_held(&devcgroup_mutex);
+
+ list_for_each_entry_safe(ex, tmp, orig, list) {
+ list_move_tail(&ex->list, dest);
+ }
+}
+
/*
* called under devcgroup_mutex
*/
@@ -601,11 +612,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
int count, rc = 0;
struct dev_exception_item ex;
struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
+ struct dev_cgroup tmp_devcgrp;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
memset(&ex, 0, sizeof(ex));
+ memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp));
b = buffer;
switch (*b) {
@@ -617,15 +630,27 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
if (!may_allow_all(parent))
return -EPERM;
- dev_exception_clean(devcgroup);
- devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
- if (!parent)
+ if (!parent) {
+ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ dev_exception_clean(devcgroup);
break;
+ }
+ INIT_LIST_HEAD(&tmp_devcgrp.exceptions);
+ rc = dev_exceptions_copy(&tmp_devcgrp.exceptions,
+ &devcgroup->exceptions);
+ if (rc)
+ return rc;
+ dev_exception_clean(devcgroup);
rc = dev_exceptions_copy(&devcgroup->exceptions,
&parent->exceptions);
- if (rc)
+ if (rc) {
+ dev_exceptions_move(&devcgroup->exceptions,
+ &tmp_devcgrp.exceptions);
return rc;
+ }
+ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ dev_exception_clean(&tmp_devcgrp);
break;
case DEVCG_DENY:
if (css_has_online_children(&devcgroup->css))
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index ea1aae3d07b3..12bae4714211 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -121,6 +121,7 @@ int __init integrity_init_keyring(const unsigned int id)
{
struct key_restriction *restriction;
key_perm_t perm;
+ int ret;
perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW
| KEY_USR_READ | KEY_USR_SEARCH;
@@ -141,7 +142,10 @@ int __init integrity_init_keyring(const unsigned int id)
perm |= KEY_USR_WRITE;
out:
- return __integrity_init_keyring(id, perm, restriction);
+ ret = __integrity_init_keyring(id, perm, restriction);
+ if (ret)
+ kfree(restriction);
+ return ret;
}
int __init integrity_add_key(const unsigned int id, const void *data,
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index b82291d10e73..cc7e4e4439b0 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -471,7 +471,9 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
/**
* evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @idmap: idmap of the mount
* @dentry: pointer to the affected dentry
+ * @attr: iattr structure containing the new file attributes
*
* Permit update of file attributes when files have a valid EVM signature,
* except in the case of them having an immutable portable signature.
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 0b9cb639a0ed..ffdc3ca1e9c1 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -43,12 +43,10 @@ static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
else if (inode > iint->inode)
n = n->rb_right;
else
- break;
+ return iint;
}
- if (!n)
- return NULL;
- return iint;
+ return NULL;
}
/*
@@ -68,9 +66,32 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
return iint;
}
-static void iint_free(struct integrity_iint_cache *iint)
+#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
+
+/*
+ * It is not clear that IMA should be nested at all, but as long is it measures
+ * files both on overlayfs and on underlying fs, we need to annotate the iint
+ * mutex to avoid lockdep false positives related to IMA + overlayfs.
+ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
+ */
+static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
+ struct inode *inode)
+{
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
+
+ int depth = inode->i_sb->s_stack_depth;
+
+ if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
+ depth = 0;
+
+ lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
+#endif
+}
+
+static void iint_init_always(struct integrity_iint_cache *iint,
+ struct inode *inode)
{
- kfree(iint->ima_hash);
iint->ima_hash = NULL;
iint->version = 0;
iint->flags = 0UL;
@@ -82,6 +103,14 @@ static void iint_free(struct integrity_iint_cache *iint)
iint->ima_creds_status = INTEGRITY_UNKNOWN;
iint->evm_status = INTEGRITY_UNKNOWN;
iint->measured_pcrs = 0;
+ mutex_init(&iint->mutex);
+ iint_lockdep_annotate(iint, inode);
+}
+
+static void iint_free(struct integrity_iint_cache *iint)
+{
+ kfree(iint->ima_hash);
+ mutex_destroy(&iint->mutex);
kmem_cache_free(iint_cache, iint);
}
@@ -114,6 +143,8 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
if (!iint)
return NULL;
+ iint_init_always(iint, inode);
+
write_lock(&integrity_iint_lock);
p = &integrity_iint_tree.rb_node;
@@ -121,10 +152,15 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
parent = *p;
test_iint = rb_entry(parent, struct integrity_iint_cache,
rb_node);
- if (inode < test_iint->inode)
+ if (inode < test_iint->inode) {
p = &(*p)->rb_left;
- else
+ } else if (inode > test_iint->inode) {
p = &(*p)->rb_right;
+ } else {
+ write_unlock(&integrity_iint_lock);
+ kmem_cache_free(iint_cache, iint);
+ return test_iint;
+ }
}
iint->inode = inode;
@@ -158,25 +194,18 @@ void integrity_inode_free(struct inode *inode)
iint_free(iint);
}
-static void init_once(void *foo)
+static void iint_init_once(void *foo)
{
struct integrity_iint_cache *iint = foo;
memset(iint, 0, sizeof(*iint));
- iint->ima_file_status = INTEGRITY_UNKNOWN;
- iint->ima_mmap_status = INTEGRITY_UNKNOWN;
- iint->ima_bprm_status = INTEGRITY_UNKNOWN;
- iint->ima_read_status = INTEGRITY_UNKNOWN;
- iint->ima_creds_status = INTEGRITY_UNKNOWN;
- iint->evm_status = INTEGRITY_UNKNOWN;
- mutex_init(&iint->mutex);
}
static int __init integrity_iintcache_init(void)
{
iint_cache =
kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
- 0, SLAB_PANIC, init_once);
+ 0, SLAB_PANIC, iint_init_once);
return 0;
}
DEFINE_LSM(integrity) = {
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 44b3315f3235..425c0d5e0a75 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -8,7 +8,7 @@ config IMA
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_HASH_INFO
- select TCG_TPM if HAS_IOMEM && !UML
+ select TCG_TPM if HAS_IOMEM
select TCG_TIS if TCG_TPM && X86
select TCG_CRB if TCG_TPM && ACPI
select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
@@ -29,9 +29,11 @@ config IMA
to learn more about IMA.
If unsure, say N.
+if IMA
+
config IMA_KEXEC
bool "Enable carrying the IMA measurement list across a soft boot"
- depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
+ depends on TCG_TPM && HAVE_IMA_KEXEC
default n
help
TPM PCRs are only reset on a hard reboot. In order to validate
@@ -43,7 +45,6 @@ config IMA_KEXEC
config IMA_MEASURE_PCR_IDX
int
- depends on IMA
range 8 14
default 10
help
@@ -53,7 +54,7 @@ config IMA_MEASURE_PCR_IDX
config IMA_LSM_RULES
bool
- depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
+ depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
default y
help
Disabling this option will disregard LSM based policy rules.
@@ -61,7 +62,6 @@ config IMA_LSM_RULES
choice
prompt "Default template"
default IMA_NG_TEMPLATE
- depends on IMA
help
Select the default IMA measurement template.
@@ -80,14 +80,12 @@ endchoice
config IMA_DEFAULT_TEMPLATE
string
- depends on IMA
default "ima-ng" if IMA_NG_TEMPLATE
default "ima-sig" if IMA_SIG_TEMPLATE
choice
prompt "Default integrity hash algorithm"
default IMA_DEFAULT_HASH_SHA1
- depends on IMA
help
Select the default hash algorithm used for the measurement
list, integrity appraisal and audit log. The compiled default
@@ -113,7 +111,6 @@ endchoice
config IMA_DEFAULT_HASH
string
- depends on IMA
default "sha1" if IMA_DEFAULT_HASH_SHA1
default "sha256" if IMA_DEFAULT_HASH_SHA256
default "sha512" if IMA_DEFAULT_HASH_SHA512
@@ -121,7 +118,6 @@ config IMA_DEFAULT_HASH
config IMA_WRITE_POLICY
bool "Enable multiple writes to the IMA policy"
- depends on IMA
default n
help
IMA policy can now be updated multiple times. The new rules get
@@ -132,7 +128,6 @@ config IMA_WRITE_POLICY
config IMA_READ_POLICY
bool "Enable reading back the current IMA policy"
- depends on IMA
default y if IMA_WRITE_POLICY
default n if !IMA_WRITE_POLICY
help
@@ -142,7 +137,6 @@ config IMA_READ_POLICY
config IMA_APPRAISE
bool "Appraise integrity measurements"
- depends on IMA
default n
help
This option enables local measurement integrity appraisal.
@@ -243,18 +237,6 @@ config IMA_APPRAISE_MODSIG
The modsig keyword can be used in the IMA policy to allow a hook
to accept such signatures.
-config IMA_TRUSTED_KEYRING
- bool "Require all keys on the .ima keyring be signed (deprecated)"
- depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
- depends on INTEGRITY_ASYMMETRIC_KEYS
- select INTEGRITY_TRUSTED_KEYRING
- default y
- help
- This option requires that all keys added to the .ima
- keyring be signed by a key on the system trusted keyring.
-
- This option is deprecated in favor of INTEGRITY_TRUSTED_KEYRING
-
config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
bool "Permit keys validly signed by a built-in or secondary CA cert (EXPERIMENTAL)"
depends on SYSTEM_TRUSTED_KEYRING
@@ -275,7 +257,7 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
config IMA_BLACKLIST_KEYRING
bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
depends on SYSTEM_TRUSTED_KEYRING
- depends on IMA_TRUSTED_KEYRING
+ depends on INTEGRITY_TRUSTED_KEYRING
default n
help
This option creates an IMA blacklist keyring, which contains all
@@ -285,7 +267,7 @@ config IMA_BLACKLIST_KEYRING
config IMA_LOAD_X509
bool "Load X509 certificate onto the '.ima' trusted keyring"
- depends on IMA_TRUSTED_KEYRING
+ depends on INTEGRITY_TRUSTED_KEYRING
default n
help
File signature verification is based on the public keys
@@ -307,3 +289,5 @@ config IMA_APPRAISE_SIGNED_INIT
default n
help
This option requires user-space init to be signed.
+
+endif
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 5fae6cfe8d91..146154e333e6 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -361,24 +361,24 @@ static inline void ima_free_modsig(struct modsig *modsig)
/* LSM based policy rules require audit */
#ifdef CONFIG_IMA_LSM_RULES
-#define security_filter_rule_init security_audit_rule_init
-#define security_filter_rule_free security_audit_rule_free
-#define security_filter_rule_match security_audit_rule_match
+#define ima_filter_rule_init security_audit_rule_init
+#define ima_filter_rule_free security_audit_rule_free
+#define ima_filter_rule_match security_audit_rule_match
#else
-static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
+ void **lsmrule)
{
return -EINVAL;
}
-static inline void security_filter_rule_free(void *lsmrule)
+static inline void ima_filter_rule_free(void *lsmrule)
{
}
-static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
- void *lsmrule)
+static inline int ima_filter_rule_match(u32 secid, u32 field, u32 op,
+ void *lsmrule)
{
return -EINVAL;
}
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 610759fe63b8..364979233a17 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -209,6 +209,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
{
const char *audit_cause = "failed";
struct inode *inode = file_inode(file);
+ struct inode *real_inode = d_real_inode(file_dentry(file));
const char *filename = file->f_path.dentry->d_name.name;
int result = 0;
int length;
@@ -259,6 +260,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
iint->ima_hash = tmpbuf;
memcpy(iint->ima_hash, &hash, length);
iint->version = i_version;
+ if (real_inode != inode) {
+ iint->real_ino = real_inode->i_ino;
+ iint->real_dev = real_inode->i_sb->s_dev;
+ }
/* Possibly temporary failure due to type of read (eg. O_DIRECT) */
if (!result)
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index a768f37a0a4d..09b9c2b25294 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -27,6 +27,7 @@
#include <linux/ima.h>
#include <linux/iversion.h>
#include <linux/fs.h>
+#include <linux/iversion.h>
#include "ima.h"
@@ -193,7 +194,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
u32 secid, char *buf, loff_t size, int mask,
enum ima_hooks func)
{
- struct inode *inode = file_inode(file);
+ struct inode *backing_inode, *inode = file_inode(file);
struct integrity_iint_cache *iint = NULL;
struct ima_template_desc *template_desc = NULL;
char *pathbuf = NULL;
@@ -267,6 +268,19 @@ static int process_measurement(struct file *file, const struct cred *cred,
iint->measured_pcrs = 0;
}
+ /* Detect and re-evaluate changes made to the backing file. */
+ backing_inode = d_real_inode(file_dentry(file));
+ if (backing_inode != inode &&
+ (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
+ if (!IS_I_VERSION(backing_inode) ||
+ backing_inode->i_sb->s_dev != iint->real_dev ||
+ backing_inode->i_ino != iint->real_ino ||
+ !inode_eq_iversion(backing_inode, iint->version)) {
+ iint->flags &= ~IMA_DONE_MASK;
+ iint->measured_pcrs = 0;
+ }
+ }
+
/* Determine if already appraised/measured based on bitmask
* (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
* IMA_AUDIT, IMA_AUDITED)
@@ -370,7 +384,9 @@ out:
/**
* ima_file_mmap - based on policy, collect/store measurement.
* @file: pointer to the file to be measured (May be NULL)
- * @prot: contains the protection that will be applied by the kernel.
+ * @reqprot: protection requested by the application
+ * @prot: protection that will be applied by the kernel
+ * @flags: operational flags
*
* Measure files being mmapped executable based on the ima_must_measure()
* policy decision.
@@ -378,7 +394,8 @@ out:
* On success return 0. On integrity appraisal error, assuming the file
* is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
-int ima_file_mmap(struct file *file, unsigned long prot)
+int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
{
u32 secid;
@@ -615,6 +632,7 @@ int ima_load_data(enum kernel_load_data_id id)
pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
return -EACCES; /* INTEGRITY_UNKNOWN */
}
+ break;
default:
break;
}
diff --git a/security/integrity/ima/ima_modsig.c b/security/integrity/ima/ima_modsig.c
index d106885cc495..5fb971efc6e1 100644
--- a/security/integrity/ima/ima_modsig.c
+++ b/security/integrity/ima/ima_modsig.c
@@ -109,6 +109,9 @@ int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
/**
* ima_collect_modsig - Calculate the file hash without the appended signature.
+ * @modsig: parsed module signature
+ * @buf: data to verify the signature on
+ * @size: data size
*
* Since the modsig is part of the file contents, the hash used in its signature
* isn't the same one ordinarily calculated by IMA. Therefore PKCS7 code
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 14aef74d3588..e749403f07a8 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -254,7 +254,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry)
int i;
for (i = 0; i < MAX_LSM_RULES; i++) {
- security_filter_rule_free(entry->lsm[i].rule);
+ ima_filter_rule_free(entry->lsm[i].rule);
kfree(entry->lsm[i].args_p);
}
kfree(entry);
@@ -286,10 +286,9 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
if (!nentry->lsm[i].args_p)
goto out_err;
- security_filter_rule_init(nentry->lsm[i].type,
- Audit_equal,
- nentry->lsm[i].args_p,
- &nentry->lsm[i].rule);
+ ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+ nentry->lsm[i].args_p,
+ &nentry->lsm[i].rule);
if (!nentry->lsm[i].rule)
pr_warn("rule for LSM \'%s\' is undefined\n",
(char *)entry->lsm[i].args_p);
@@ -371,6 +370,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
enum ima_hooks func, int mask)
{
int i;
+ bool result = false;
+ struct ima_rule_entry *lsm_rule = rule;
+ bool rule_reinitialized = false;
if (func == KEXEC_CMDLINE) {
if ((rule->flags & IMA_FUNC) && (rule->func == func))
@@ -414,36 +416,55 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
int rc = 0;
u32 osid;
- if (!rule->lsm[i].rule) {
- if (!rule->lsm[i].args_p)
+ if (!lsm_rule->lsm[i].rule) {
+ if (!lsm_rule->lsm[i].args_p)
continue;
else
return false;
}
+
+retry:
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
case LSM_OBJ_TYPE:
security_inode_getsecid(inode, &osid);
- rc = security_filter_rule_match(osid,
- rule->lsm[i].type,
- Audit_equal,
- rule->lsm[i].rule);
+ rc = ima_filter_rule_match(osid, lsm_rule->lsm[i].type,
+ Audit_equal,
+ lsm_rule->lsm[i].rule);
break;
case LSM_SUBJ_USER:
case LSM_SUBJ_ROLE:
case LSM_SUBJ_TYPE:
- rc = security_filter_rule_match(secid,
- rule->lsm[i].type,
- Audit_equal,
- rule->lsm[i].rule);
+ rc = ima_filter_rule_match(secid, lsm_rule->lsm[i].type,
+ Audit_equal,
+ lsm_rule->lsm[i].rule);
+ break;
default:
break;
}
- if (!rc)
- return false;
+
+ if (rc == -ESTALE && !rule_reinitialized) {
+ lsm_rule = ima_lsm_copy_rule(rule);
+ if (lsm_rule) {
+ rule_reinitialized = true;
+ goto retry;
+ }
+ }
+ if (!rc) {
+ result = false;
+ goto out;
+ }
+ }
+ result = true;
+
+out:
+ if (rule_reinitialized) {
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ ima_filter_rule_free(lsm_rule->lsm[i].rule);
+ kfree(lsm_rule);
}
- return true;
+ return result;
}
/*
@@ -479,6 +500,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
* @secid: LSM secid of the task to be validated
* @func: IMA hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ * @flags: IMA actions to consider (e.g. IMA_MEASURE | IMA_APPRAISE)
* @pcr: set the pcr to extend
* @template_desc: the template that should be used for this rule
*
@@ -669,6 +691,7 @@ void __init ima_init_policy(void)
add_rules(default_measurement_rules,
ARRAY_SIZE(default_measurement_rules),
IMA_DEFAULT_POLICY);
+ break;
default:
break;
}
@@ -821,10 +844,9 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
return -ENOMEM;
entry->lsm[lsm_rule].type = audit_type;
- result = security_filter_rule_init(entry->lsm[lsm_rule].type,
- Audit_equal,
- entry->lsm[lsm_rule].args_p,
- &entry->lsm[lsm_rule].rule);
+ result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
+ entry->lsm[lsm_rule].args_p,
+ &entry->lsm[lsm_rule].rule);
if (!entry->lsm[lsm_rule].rule) {
pr_warn("rule for LSM \'%s\' is undefined\n",
(char *)entry->lsm[lsm_rule].args_p);
@@ -1245,7 +1267,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
/**
* ima_parse_add_rule - add a rule to ima_policy_rules
- * @rule - ima measurement policy rule
+ * @rule: ima measurement policy rule
*
* Avoid locking by allowing just one writer at a time in ima_write_policy()
* Returns the length of the rule parsed, an error code on failure
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 2283051d063b..7b8be19dbb89 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -222,11 +222,11 @@ int template_desc_init_fields(const char *template_fmt,
}
if (fields && num_fields) {
- *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL);
+ *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL);
if (*fields == NULL)
return -ENOMEM;
- memcpy(*fields, found_fields, i * sizeof(*fields));
+ memcpy(*fields, found_fields, i * sizeof(**fields));
*num_fields = i;
}
@@ -292,8 +292,11 @@ static struct ima_template_desc *restore_template_fmt(char *template_name)
template_desc->name = "";
template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
- if (!template_desc->fmt)
+ if (!template_desc->fmt) {
+ kfree(template_desc);
+ template_desc = NULL;
goto out;
+ }
spin_lock(&template_list);
list_add_tail_rcu(&template_desc->list, &defined_templates);
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index d9323d31a3a8..f63516ebec5d 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -124,6 +124,8 @@ struct integrity_iint_cache {
unsigned long flags;
unsigned long measured_pcrs;
unsigned long atomic_flags;
+ unsigned long real_ino;
+ dev_t real_dev;
enum integrity_status ima_file_status:4;
enum integrity_status ima_mmap_status:4;
enum integrity_status ima_bprm_status:4;
diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
index 452011428d11..0fabdea04514 100644
--- a/security/integrity/platform_certs/load_uefi.c
+++ b/security/integrity/platform_certs/load_uefi.c
@@ -30,10 +30,11 @@ static const struct dmi_system_id uefi_skip_cert[] = {
{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,1") },
{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,2") },
{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir9,1") },
- { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacMini8,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "Macmini8,1") },
{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") },
{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") },
{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMacPro1,1") },
{ }
};
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index edde63a63007..f42968f34958 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -977,14 +977,19 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
ret = -EACCES;
down_write(&key->sem);
- if (!capable(CAP_SYS_ADMIN)) {
+ {
+ bool is_privileged_op = false;
+
/* only the sysadmin can chown a key to some other UID */
if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
- goto error_put;
+ is_privileged_op = true;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
+ is_privileged_op = true;
+
+ if (is_privileged_op && !capable(CAP_SYS_ADMIN))
goto error_put;
}
@@ -1084,7 +1089,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
down_write(&key->sem);
/* if we're not the sysadmin, we can only change a key that we own */
- if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
+ if (uid_eq(key->uid, current_fsuid()) || capable(CAP_SYS_ADMIN)) {
key->perm = perm;
ret = 0;
}
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 957b9e3e1492..964e2456f34d 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -38,9 +38,12 @@ static void cache_requested_key(struct key *key)
#ifdef CONFIG_KEYS_REQUEST_CACHE
struct task_struct *t = current;
- key_put(t->cached_requested_key);
- t->cached_requested_key = key_get(key);
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ /* Do not cache key if it is a kernel thread */
+ if (!(t->flags & PF_KTHREAD)) {
+ key_put(t->cached_requested_key);
+ t->cached_requested_key = key_get(key);
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ }
#endif
}
@@ -398,17 +401,21 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
if (dest_keyring) {
- ret = __key_link_lock(dest_keyring, &ctx->index_key);
+ ret = __key_link_lock(dest_keyring, &key->index_key);
if (ret < 0)
goto link_lock_failed;
- ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
- if (ret < 0)
- goto link_prealloc_failed;
}
- /* attach the key to the destination keyring under lock, but we do need
+ /*
+ * Attach the key to the destination keyring under lock, but we do need
* to do another check just in case someone beat us to it whilst we
- * waited for locks */
+ * waited for locks.
+ *
+ * The caller might specify a comparison function which looks for keys
+ * that do not exactly match but are still equivalent from the caller's
+ * perspective. The __key_link_begin() operation must be done only after
+ * an actual key is determined.
+ */
mutex_lock(&key_construction_mutex);
rcu_read_lock();
@@ -417,12 +424,16 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
if (!IS_ERR(key_ref))
goto key_already_present;
- if (dest_keyring)
+ if (dest_keyring) {
+ ret = __key_link_begin(dest_keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto link_alloc_failed;
__key_link(key, &edit);
+ }
mutex_unlock(&key_construction_mutex);
if (dest_keyring)
- __key_link_end(dest_keyring, &ctx->index_key, edit);
+ __key_link_end(dest_keyring, &key->index_key, edit);
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = 0 [%d]", key_serial(key));
@@ -435,10 +446,13 @@ key_already_present:
mutex_unlock(&key_construction_mutex);
key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
+ ret = __key_link_begin(dest_keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto link_alloc_failed_unlocked;
ret = __key_link_check_live_key(dest_keyring, key);
if (ret == 0)
__key_link(key, &edit);
- __key_link_end(dest_keyring, &ctx->index_key, edit);
+ __key_link_end(dest_keyring, &key->index_key, edit);
if (ret < 0)
goto link_check_failed;
}
@@ -453,8 +467,10 @@ link_check_failed:
kleave(" = %d [linkcheck]", ret);
return ret;
-link_prealloc_failed:
- __key_link_end(dest_keyring, &ctx->index_key, edit);
+link_alloc_failed:
+ mutex_unlock(&key_construction_mutex);
+link_alloc_failed_unlocked:
+ __key_link_end(dest_keyring, &key->index_key, edit);
link_lock_failed:
mutex_unlock(&user->cons_lock);
key_put(key);
diff --git a/security/security.c b/security/security.c
index f0a287896100..c0a0b7edecea 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1430,6 +1430,23 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return call_int_hook(file_ioctl, 0, file, cmd, arg);
}
+/**
+ * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode
+ * @file: associated file
+ * @cmd: ioctl cmd
+ * @arg: ioctl arguments
+ *
+ * Compat version of security_file_ioctl() that correctly handles 32-bit
+ * processes running on 64-bit kernels.
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return call_int_hook(file_ioctl_compat, 0, file, cmd, arg);
+}
+
static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
{
/*
@@ -1466,12 +1483,13 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
+ unsigned long prot_adj = mmap_prot(file, prot);
int ret;
- ret = call_int_hook(mmap_file, 0, file, prot,
- mmap_prot(file, prot), flags);
+
+ ret = call_int_hook(mmap_file, 0, file, prot, prot_adj, flags);
if (ret)
return ret;
- return ima_file_mmap(file, prot);
+ return ima_file_mmap(file, prot, prot_adj, flags);
}
int security_mmap_addr(unsigned long addr)
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index ccf950409384..b8798cd58aee 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -19,8 +19,12 @@ ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
- cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
+ cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h
targets += flask.h av_permissions.h
-$(obj)/flask.h: $(src)/include/classmap.h FORCE
+# once make >= 4.3 is required, we can use grouped targets in the rule below,
+# which basically involves adding both headers and a '&' before the colon, see
+# the example below:
+# $(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/...
+$(obj)/flask.h: scripts/selinux/genheaders/genheaders FORCE
$(call if_changed,flask)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d9f15c84aab7..6fec9fba41a8 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3668,6 +3668,33 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
return error;
}
+static int selinux_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ /*
+ * If we are in a 64-bit kernel running 32-bit userspace, we need to
+ * make sure we don't compare 32-bit flags to 64-bit flags.
+ */
+ switch (cmd) {
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
+ break;
+ case FS_IOC32_SETVERSION:
+ cmd = FS_IOC_SETVERSION;
+ break;
+ default:
+ break;
+ }
+
+ return selinux_file_ioctl(file, cmd, arg);
+}
+
static int default_noexec;
static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
@@ -4625,6 +4652,13 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
return -EINVAL;
addr4 = (struct sockaddr_in *)address;
if (family_sa == AF_UNSPEC) {
+ if (family == PF_INET6) {
+ /* Length check from inet6_bind_sk() */
+ if (addrlen < SIN6_LEN_RFC2133)
+ return -EINVAL;
+ /* Family check from __inet6_bind() */
+ goto err_af;
+ }
/* see __inet_bind(), we only want to allow
* AF_UNSPEC if the address is INADDR_ANY
*/
@@ -6926,6 +6960,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(file_permission, selinux_file_permission),
LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, selinux_file_ioctl_compat),
LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
LSM_HOOK_INIT(file_mprotect, selinux_file_mprotect),
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 162d0e79b85b..b18bc405f820 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -356,6 +356,8 @@ static inline int put_entry(const void *buf, size_t bytes, int num, struct polic
{
size_t len = bytes * num;
+ if (len > fp->len)
+ return -EINVAL;
memcpy(fp->data, buf, len);
fp->data += len;
fp->len -= len;
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 335d2411abe4..a567b3808184 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -117,6 +117,7 @@ struct inode_smack {
struct task_smack {
struct smack_known *smk_task; /* label for access control */
struct smack_known *smk_forked; /* label when forked */
+ struct smack_known *smk_transmuted;/* label when transmuted */
struct list_head smk_rules; /* per task access rules */
struct mutex smk_rules_lock; /* lock for the rules */
struct list_head smk_relabel; /* transit allowed labels */
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 0253cd2e2358..6f2613f874fa 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -982,8 +982,9 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, const char **name,
void **value, size_t *len)
{
+ struct task_smack *tsp = smack_cred(current_cred());
struct inode_smack *issp = smack_inode(inode);
- struct smack_known *skp = smk_of_current();
+ struct smack_known *skp = smk_of_task(tsp);
struct smack_known *isp = smk_of_inode(inode);
struct smack_known *dsp = smk_of_inode(dir);
int may;
@@ -992,20 +993,34 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
*name = XATTR_SMACK_SUFFIX;
if (value && len) {
- rcu_read_lock();
- may = smk_access_entry(skp->smk_known, dsp->smk_known,
- &skp->smk_rules);
- rcu_read_unlock();
+ /*
+ * If equal, transmuting already occurred in
+ * smack_dentry_create_files_as(). No need to check again.
+ */
+ if (tsp->smk_task != tsp->smk_transmuted) {
+ rcu_read_lock();
+ may = smk_access_entry(skp->smk_known, dsp->smk_known,
+ &skp->smk_rules);
+ rcu_read_unlock();
+ }
/*
- * If the access rule allows transmutation and
- * the directory requests transmutation then
- * by all means transmute.
+ * In addition to having smk_task equal to smk_transmuted,
+ * if the access rule allows transmutation and the directory
+ * requests transmutation then by all means transmute.
* Mark the inode as changed.
*/
- if (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
- smk_inode_transmutable(dir)) {
- isp = dsp;
+ if ((tsp->smk_task == tsp->smk_transmuted) ||
+ (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
+ smk_inode_transmutable(dir))) {
+ /*
+ * The caller of smack_dentry_create_files_as()
+ * should have overridden the current cred, so the
+ * inode label was already set correctly in
+ * smack_inode_alloc_security().
+ */
+ if (tsp->smk_task != tsp->smk_transmuted)
+ isp = dsp;
issp->smk_flags |= SMK_INODE_CHANGED;
}
@@ -1439,10 +1454,19 @@ static int smack_inode_getsecurity(struct inode *inode,
struct super_block *sbp;
struct inode *ip = (struct inode *)inode;
struct smack_known *isp;
+ struct inode_smack *ispp;
+ size_t label_len;
+ char *label = NULL;
- if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
+ if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
isp = smk_of_inode(inode);
- else {
+ } else if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) {
+ ispp = smack_inode(inode);
+ if (ispp->smk_flags & SMK_INODE_TRANSMUTE)
+ label = TRANS_TRUE;
+ else
+ label = "";
+ } else {
/*
* The rest of the Smack xattrs are only on sockets.
*/
@@ -1464,13 +1488,18 @@ static int smack_inode_getsecurity(struct inode *inode,
return -EOPNOTSUPP;
}
+ if (!label)
+ label = isp->smk_known;
+
+ label_len = strlen(label);
+
if (alloc) {
- *buffer = kstrdup(isp->smk_known, GFP_KERNEL);
+ *buffer = kstrdup(label, GFP_KERNEL);
if (*buffer == NULL)
return -ENOMEM;
}
- return strlen(isp->smk_known);
+ return label_len;
}
@@ -4515,7 +4544,7 @@ static int smack_inode_copy_up(struct dentry *dentry, struct cred **new)
/*
* Get label from overlay inode and set it in create_sid
*/
- isp = smack_inode(d_inode(dentry->d_parent));
+ isp = smack_inode(d_inode(dentry));
skp = isp->smk_inode;
tsp->smk_task = skp;
*new = new_creds;
@@ -4566,8 +4595,10 @@ static int smack_dentry_create_files_as(struct dentry *dentry, int mode,
* providing access is transmuting use the containing
* directory label instead of the process label.
*/
- if (may > 0 && (may & MAY_TRANSMUTE))
+ if (may > 0 && (may & MAY_TRANSMUTE)) {
ntsp->smk_task = isp->smk_inode;
+ ntsp->smk_transmuted = ntsp->smk_task;
+ }
}
return 0;
}
@@ -4617,6 +4648,7 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security),
LSM_HOOK_INIT(file_ioctl, smack_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, smack_file_ioctl),
LSM_HOOK_INIT(file_lock, smack_file_lock),
LSM_HOOK_INIT(file_fcntl, smack_file_fcntl),
LSM_HOOK_INIT(mmap_file, smack_mmap_file),
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 6b6fec04c412..a71975ea88a9 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -895,7 +895,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
}
ret = sscanf(rule, "%d", &catlen);
- if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
+ if (ret != 1 || catlen < 0 || catlen > SMACK_CIPSO_MAXCATNUM)
goto out;
if (format == SMK_FIXED24_FMT &&
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index cca5a3012fee..221eaadffb09 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -10,7 +10,7 @@ endef
quiet_cmd_policy = POLICY $@
cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
-$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
+$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE
$(call if_changed,policy)
$(obj)/common.o: $(obj)/builtin-policy.h
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 716c92ec941a..0176612bac96 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -554,6 +554,7 @@ static struct security_hook_list tomoyo_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
diff --git a/sound/Kconfig b/sound/Kconfig
index 36785410fbe1..aaf2022ffc57 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig SOUND
tristate "Sound card support"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM || UML
help
If you have a sound card in your computer, i.e. if it can say more
than an occasional beep, say Y.
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 17df288fbe98..3f5ee9f96ef1 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -147,6 +147,7 @@ static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index,
return rc;
}
+/* Returns 1 if added, 0 for otherwise; don't return a negative value! */
/* FIXME: look at device node refcounting */
static int i2sbus_add_dev(struct macio_dev *macio,
struct i2sbus_control *control,
@@ -213,7 +214,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
* either as the second one in that case is just a modem. */
if (!ok) {
kfree(dev);
- return -ENODEV;
+ return 0;
}
mutex_init(&dev->lock);
@@ -302,6 +303,10 @@ static int i2sbus_add_dev(struct macio_dev *macio,
if (soundbus_add_one(&dev->sound)) {
printk(KERN_DEBUG "i2sbus: device registration error!\n");
+ if (dev->sound.ofdev.dev.kobj.state_initialized) {
+ soundbus_dev_put(&dev->sound);
+ return 0;
+ }
goto err;
}
diff --git a/sound/core/Makefile b/sound/core/Makefile
index d123587c0fd8..bc04acf4a45c 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -32,7 +32,6 @@ snd-pcm-dmaengine-objs := pcm_dmaengine.o
snd-rawmidi-objs := rawmidi.o
snd-timer-objs := timer.o
snd-hrtimer-objs := hrtimer.o
-snd-rtctimer-objs := rtctimer.o
snd-hwdep-objs := hwdep.o
snd-seq-device-objs := seq_device.o
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index cca3ed9b0629..766679037a70 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -223,7 +223,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
{
struct snd_ctl_elem_value32 __user *data32 = userdata;
int i, type, size;
- int uninitialized_var(count);
+ int count;
unsigned int indirect;
if (copy_from_user(&data->id, &data32->id, sizeof(data->id)))
@@ -306,7 +306,9 @@ static int ctl_elem_read_user(struct snd_card *card,
err = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (err < 0)
goto error;
+ down_read(&card->controls_rwsem);
err = snd_ctl_elem_read(card, data);
+ up_read(&card->controls_rwsem);
if (err < 0)
goto error;
err = copy_ctl_value_to_user(userdata, valuep, data, type, count);
@@ -334,7 +336,9 @@ static int ctl_elem_write_user(struct snd_ctl_file *file,
err = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (err < 0)
goto error;
+ down_write(&card->controls_rwsem);
err = snd_ctl_elem_write(card, file, data);
+ up_write(&card->controls_rwsem);
if (err < 0)
goto error;
err = copy_ctl_value_to_user(userdata, valuep, data, type, count);
diff --git a/sound/core/info.c b/sound/core/info.c
index f18f4ef6661e..a68a9689ac06 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -57,7 +57,7 @@ struct snd_info_private_data {
};
static int snd_info_version_init(void);
-static void snd_info_disconnect(struct snd_info_entry *entry);
+static void snd_info_clear_entries(struct snd_info_entry *entry);
/*
@@ -112,9 +112,9 @@ static loff_t snd_info_entry_llseek(struct file *file, loff_t offset, int orig)
entry = data->entry;
mutex_lock(&entry->access);
if (entry->c.ops->llseek) {
- offset = entry->c.ops->llseek(entry,
- data->file_private_data,
- file, offset, orig);
+ ret = entry->c.ops->llseek(entry,
+ data->file_private_data,
+ file, offset, orig);
goto out;
}
@@ -572,11 +572,16 @@ void snd_info_card_disconnect(struct snd_card *card)
{
if (!card)
return;
- mutex_lock(&info_mutex);
+
proc_remove(card->proc_root_link);
- card->proc_root_link = NULL;
if (card->proc_root)
- snd_info_disconnect(card->proc_root);
+ proc_remove(card->proc_root->p);
+
+ mutex_lock(&info_mutex);
+ if (card->proc_root)
+ snd_info_clear_entries(card->proc_root);
+ card->proc_root_link = NULL;
+ card->proc_root = NULL;
mutex_unlock(&info_mutex);
}
@@ -748,15 +753,14 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
}
EXPORT_SYMBOL(snd_info_create_card_entry);
-static void snd_info_disconnect(struct snd_info_entry *entry)
+static void snd_info_clear_entries(struct snd_info_entry *entry)
{
struct snd_info_entry *p;
if (!entry->p)
return;
list_for_each_entry(p, &entry->children, list)
- snd_info_disconnect(p);
- proc_remove(entry->p);
+ snd_info_clear_entries(p);
entry->p = NULL;
}
@@ -773,8 +777,9 @@ void snd_info_free_entry(struct snd_info_entry * entry)
if (!entry)
return;
if (entry->p) {
+ proc_remove(entry->p);
mutex_lock(&info_mutex);
- snd_info_disconnect(entry);
+ snd_info_clear_entries(entry);
mutex_unlock(&info_mutex);
}
diff --git a/sound/core/init.c b/sound/core/init.c
index 45bbc4884ef0..a127763ae5fb 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -211,6 +211,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
INIT_LIST_HEAD(&card->ctl_files);
spin_lock_init(&card->files_lock);
INIT_LIST_HEAD(&card->files_list);
+ mutex_init(&card->memory_mutex);
#ifdef CONFIG_PM
init_waitqueue_head(&card->power_sleep);
#endif
diff --git a/sound/core/jack.c b/sound/core/jack.c
index e7ac82d46821..c2022b13fddc 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -364,6 +364,7 @@ void snd_jack_report(struct snd_jack *jack, int status)
{
struct snd_jack_kctl *jack_kctl;
#ifdef CONFIG_SND_JACK_INPUT_DEV
+ struct input_dev *idev;
int i;
#endif
@@ -375,30 +376,28 @@ void snd_jack_report(struct snd_jack *jack, int status)
status & jack_kctl->mask_bits);
#ifdef CONFIG_SND_JACK_INPUT_DEV
- mutex_lock(&jack->input_dev_lock);
- if (!jack->input_dev) {
- mutex_unlock(&jack->input_dev_lock);
+ idev = input_get_device(jack->input_dev);
+ if (!idev)
return;
- }
for (i = 0; i < ARRAY_SIZE(jack->key); i++) {
int testbit = SND_JACK_BTN_0 >> i;
if (jack->type & testbit)
- input_report_key(jack->input_dev, jack->key[i],
+ input_report_key(idev, jack->key[i],
status & testbit);
}
for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++) {
int testbit = 1 << i;
if (jack->type & testbit)
- input_report_switch(jack->input_dev,
+ input_report_switch(idev,
jack_switch_types[i],
status & testbit);
}
- input_sync(jack->input_dev);
- mutex_unlock(&jack->input_dev_lock);
+ input_sync(idev);
+ input_put_device(idev);
#endif /* CONFIG_SND_JACK_INPUT_DEV */
}
EXPORT_SYMBOL(snd_jack_report);
diff --git a/sound/core/misc.c b/sound/core/misc.c
index 3579dd7a161f..c3f3d94b5197 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -10,6 +10,7 @@
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/fs.h>
#include <sound/core.h>
#ifdef CONFIG_SND_DEBUG
@@ -145,3 +146,96 @@ snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list)
}
EXPORT_SYMBOL(snd_pci_quirk_lookup);
#endif
+
+/*
+ * Deferred async signal helpers
+ *
+ * Below are a few helper functions to wrap the async signal handling
+ * in the deferred work. The main purpose is to avoid the messy deadlock
+ * around tasklist_lock and co at the kill_fasync() invocation.
+ * fasync_helper() and kill_fasync() are replaced with snd_fasync_helper()
+ * and snd_kill_fasync(), respectively. In addition, snd_fasync_free() has
+ * to be called at releasing the relevant file object.
+ */
+struct snd_fasync {
+ struct fasync_struct *fasync;
+ int signal;
+ int poll;
+ int on;
+ struct list_head list;
+};
+
+static DEFINE_SPINLOCK(snd_fasync_lock);
+static LIST_HEAD(snd_fasync_list);
+
+static void snd_fasync_work_fn(struct work_struct *work)
+{
+ struct snd_fasync *fasync;
+
+ spin_lock_irq(&snd_fasync_lock);
+ while (!list_empty(&snd_fasync_list)) {
+ fasync = list_first_entry(&snd_fasync_list, struct snd_fasync, list);
+ list_del_init(&fasync->list);
+ spin_unlock_irq(&snd_fasync_lock);
+ if (fasync->on)
+ kill_fasync(&fasync->fasync, fasync->signal, fasync->poll);
+ spin_lock_irq(&snd_fasync_lock);
+ }
+ spin_unlock_irq(&snd_fasync_lock);
+}
+
+static DECLARE_WORK(snd_fasync_work, snd_fasync_work_fn);
+
+int snd_fasync_helper(int fd, struct file *file, int on,
+ struct snd_fasync **fasyncp)
+{
+ struct snd_fasync *fasync = NULL;
+
+ if (on) {
+ fasync = kzalloc(sizeof(*fasync), GFP_KERNEL);
+ if (!fasync)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&fasync->list);
+ }
+
+ spin_lock_irq(&snd_fasync_lock);
+ if (*fasyncp) {
+ kfree(fasync);
+ fasync = *fasyncp;
+ } else {
+ if (!fasync) {
+ spin_unlock_irq(&snd_fasync_lock);
+ return 0;
+ }
+ *fasyncp = fasync;
+ }
+ fasync->on = on;
+ spin_unlock_irq(&snd_fasync_lock);
+ return fasync_helper(fd, file, on, &fasync->fasync);
+}
+EXPORT_SYMBOL_GPL(snd_fasync_helper);
+
+void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll)
+{
+ unsigned long flags;
+
+ if (!fasync || !fasync->on)
+ return;
+ spin_lock_irqsave(&snd_fasync_lock, flags);
+ fasync->signal = signal;
+ fasync->poll = poll;
+ list_move(&fasync->list, &snd_fasync_list);
+ schedule_work(&snd_fasync_work);
+ spin_unlock_irqrestore(&snd_fasync_lock, flags);
+}
+EXPORT_SYMBOL_GPL(snd_kill_fasync);
+
+void snd_fasync_free(struct snd_fasync *fasync)
+{
+ if (!fasync)
+ return;
+ fasync->on = 0;
+ flush_work(&snd_fasync_work);
+ kfree(fasync);
+}
+EXPORT_SYMBOL_GPL(snd_fasync_free);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index ad4e0af2d0d0..51d2911366e9 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1661,13 +1661,14 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
goto __direct;
- if ((err = snd_pcm_oss_make_ready(substream)) < 0)
- return err;
atomic_inc(&runtime->oss.rw_ref);
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
atomic_dec(&runtime->oss.rw_ref);
return -ERESTARTSYS;
}
+ err = snd_pcm_oss_make_ready_locked(substream);
+ if (err < 0)
+ goto unlock;
format = snd_pcm_oss_format_from(runtime->oss.format);
width = snd_pcm_format_physical_width(format);
if (runtime->oss.buffer_used > 0) {
diff --git a/sound/core/oss/pcm_plugin.h b/sound/core/oss/pcm_plugin.h
index 8d2f7a4e3ab6..0e1c8cae6c5b 100644
--- a/sound/core/oss/pcm_plugin.h
+++ b/sound/core/oss/pcm_plugin.h
@@ -141,6 +141,14 @@ int snd_pcm_area_copy(const struct snd_pcm_channel_area *src_channel,
void *snd_pcm_plug_buf_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t size);
void snd_pcm_plug_buf_unlock(struct snd_pcm_substream *plug, void *ptr);
+#else
+
+static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
+static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
+static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
+
+#endif
+
snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream,
const char *ptr, snd_pcm_uframes_t size,
int in_kernel);
@@ -151,14 +159,6 @@ snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream,
snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream,
void **bufs, snd_pcm_uframes_t frames);
-#else
-
-static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
-static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
-static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
-
-#endif
-
#ifdef PLUGIN_DEBUG
#define pdprintf(fmt, args...) printk(KERN_DEBUG "plugin: " fmt, ##args)
#else
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 3561cdceaadc..9722cf378d31 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -251,6 +251,7 @@ static char *snd_pcm_state_names[] = {
STATE(DRAINING),
STATE(PAUSED),
STATE(SUSPENDED),
+ STATE(DISCONNECTED),
};
static char *snd_pcm_access_names[] = {
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 6f9003b1869a..b30b18349e71 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -315,10 +315,14 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
goto error;
}
- if (refine)
+ if (refine) {
err = snd_pcm_hw_refine(substream, data);
- else
+ if (err < 0)
+ goto error;
+ err = fixup_unreferenced_params(substream, data);
+ } else {
err = snd_pcm_hw_params(substream, data);
+ }
if (err < 0)
goto error;
if (copy_to_user(data32, data, sizeof(*data32)) ||
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 89a05926ac73..5d9a24ca6f3e 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -130,12 +130,14 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data);
static void dmaengine_pcm_dma_complete(void *arg)
{
+ unsigned int new_pos;
struct snd_pcm_substream *substream = arg;
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
- prtd->pos += snd_pcm_lib_period_bytes(substream);
- if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream))
- prtd->pos = 0;
+ new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream);
+ if (new_pos >= snd_pcm_lib_buffer_bytes(substream))
+ new_pos = 0;
+ prtd->pos = new_pos;
snd_pcm_period_elapsed(substream);
}
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 9aea1d6fb054..b961a30c2a22 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -26,6 +26,67 @@ MODULE_PARM_DESC(maximum_substreams, "Maximum substreams with preallocated DMA m
static const size_t snd_minimum_buffer = 16384;
+static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
+module_param(max_alloc_per_card, ulong, 0644);
+MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
+
+static void __update_allocated_size(struct snd_card *card, ssize_t bytes)
+{
+ card->total_pcm_alloc_bytes += bytes;
+}
+
+static void update_allocated_size(struct snd_card *card, ssize_t bytes)
+{
+ mutex_lock(&card->memory_mutex);
+ __update_allocated_size(card, bytes);
+ mutex_unlock(&card->memory_mutex);
+}
+
+static void decrease_allocated_size(struct snd_card *card, size_t bytes)
+{
+ mutex_lock(&card->memory_mutex);
+ WARN_ON(card->total_pcm_alloc_bytes < bytes);
+ __update_allocated_size(card, -(ssize_t)bytes);
+ mutex_unlock(&card->memory_mutex);
+}
+
+static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
+ size_t size, struct snd_dma_buffer *dmab)
+{
+ int err;
+
+ /* check and reserve the requested size */
+ mutex_lock(&card->memory_mutex);
+ if (max_alloc_per_card &&
+ card->total_pcm_alloc_bytes + size > max_alloc_per_card) {
+ mutex_unlock(&card->memory_mutex);
+ return -ENOMEM;
+ }
+ __update_allocated_size(card, size);
+ mutex_unlock(&card->memory_mutex);
+
+ err = snd_dma_alloc_pages(type, dev, size, dmab);
+ if (!err) {
+ /* the actual allocation size might be bigger than requested,
+ * and we need to correct the account
+ */
+ if (dmab->bytes != size)
+ update_allocated_size(card, dmab->bytes - size);
+ } else {
+ /* take back on allocation failure */
+ decrease_allocated_size(card, size);
+ }
+ return err;
+}
+
+static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
+{
+ if (!dmab->area)
+ return;
+ decrease_allocated_size(card, dmab->bytes);
+ snd_dma_free_pages(dmab);
+ dmab->area = NULL;
+}
/*
* try to allocate as the large pages as possible.
@@ -36,16 +97,15 @@ static const size_t snd_minimum_buffer = 16384;
static int preallocate_pcm_pages(struct snd_pcm_substream *substream, size_t size)
{
struct snd_dma_buffer *dmab = &substream->dma_buffer;
+ struct snd_card *card = substream->pcm->card;
size_t orig_size = size;
int err;
do {
- if ((err = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev,
- size, dmab)) < 0) {
- if (err != -ENOMEM)
- return err; /* fatal error */
- } else
- return 0;
+ err = do_alloc_pages(card, dmab->dev.type, dmab->dev.dev,
+ size, dmab);
+ if (err != -ENOMEM)
+ return err;
size >>= 1;
} while (size >= snd_minimum_buffer);
dmab->bytes = 0; /* tell error */
@@ -61,10 +121,7 @@ static int preallocate_pcm_pages(struct snd_pcm_substream *substream, size_t siz
*/
static void snd_pcm_lib_preallocate_dma_free(struct snd_pcm_substream *substream)
{
- if (substream->dma_buffer.area == NULL)
- return;
- snd_dma_free_pages(&substream->dma_buffer);
- substream->dma_buffer.area = NULL;
+ do_free_pages(substream->pcm->card, &substream->dma_buffer);
}
/**
@@ -129,6 +186,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
+ struct snd_card *card = substream->pcm->card;
char line[64], str[64];
size_t size;
struct snd_dma_buffer new_dmab;
@@ -150,9 +208,10 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
memset(&new_dmab, 0, sizeof(new_dmab));
new_dmab.dev = substream->dma_buffer.dev;
if (size > 0) {
- if (snd_dma_alloc_pages(substream->dma_buffer.dev.type,
- substream->dma_buffer.dev.dev,
- size, &new_dmab) < 0) {
+ if (do_alloc_pages(card,
+ substream->dma_buffer.dev.type,
+ substream->dma_buffer.dev.dev,
+ size, &new_dmab) < 0) {
buffer->error = -ENOMEM;
goto unlock;
}
@@ -161,7 +220,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
substream->buffer_bytes_max = UINT_MAX;
}
if (substream->dma_buffer.area)
- snd_dma_free_pages(&substream->dma_buffer);
+ do_free_pages(card, &substream->dma_buffer);
substream->dma_buffer = new_dmab;
} else {
buffer->error = -EINVAL;
@@ -289,6 +348,7 @@ EXPORT_SYMBOL(snd_pcm_sgbuf_ops_page);
*/
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size)
{
+ struct snd_card *card = substream->pcm->card;
struct snd_pcm_runtime *runtime;
struct snd_dma_buffer *dmab = NULL;
@@ -317,9 +377,10 @@ int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size)
if (! dmab)
return -ENOMEM;
dmab->dev = substream->dma_buffer.dev;
- if (snd_dma_alloc_pages(substream->dma_buffer.dev.type,
- substream->dma_buffer.dev.dev,
- size, dmab) < 0) {
+ if (do_alloc_pages(card,
+ substream->dma_buffer.dev.type,
+ substream->dma_buffer.dev.dev,
+ size, dmab) < 0) {
kfree(dmab);
return -ENOMEM;
}
@@ -348,8 +409,10 @@ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream)
if (runtime->dma_area == NULL)
return 0;
if (runtime->dma_buffer_p != &substream->dma_buffer) {
+ struct snd_card *card = substream->pcm->card;
+
/* it's a newly allocated buffer. release it now. */
- snd_dma_free_pages(runtime->dma_buffer_p);
+ do_free_pages(card, runtime->dma_buffer_p);
kfree(runtime->dma_buffer_p);
}
snd_pcm_set_runtime_buffer(substream, NULL);
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 6a3543b8455f..6b50c3dccc82 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1662,10 +1662,8 @@ static int snd_rawmidi_free(struct snd_rawmidi *rmidi)
snd_info_free_entry(rmidi->proc_entry);
rmidi->proc_entry = NULL;
- mutex_lock(&register_mutex);
if (rmidi->ops && rmidi->ops->dev_unregister)
rmidi->ops->dev_unregister(rmidi);
- mutex_unlock(&register_mutex);
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]);
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]);
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 2ddfe2226651..be80ce72e0c7 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -37,6 +37,7 @@ struct seq_oss_midi {
struct snd_midi_event *coder; /* MIDI event coder */
struct seq_oss_devinfo *devinfo; /* assigned OSSseq device */
snd_use_lock_t use_lock;
+ struct mutex open_mutex;
};
@@ -171,6 +172,7 @@ snd_seq_oss_midi_check_new_port(struct snd_seq_port_info *pinfo)
mdev->flags = pinfo->capability;
mdev->opened = 0;
snd_use_lock_init(&mdev->use_lock);
+ mutex_init(&mdev->open_mutex);
/* copy and truncate the name of synth device */
strlcpy(mdev->name, pinfo->name, sizeof(mdev->name));
@@ -267,7 +269,9 @@ snd_seq_oss_midi_clear_all(void)
void
snd_seq_oss_midi_setup(struct seq_oss_devinfo *dp)
{
+ spin_lock_irq(&register_lock);
dp->max_mididev = max_midi_devs;
+ spin_unlock_irq(&register_lock);
}
/*
@@ -317,14 +321,16 @@ snd_seq_oss_midi_open(struct seq_oss_devinfo *dp, int dev, int fmode)
int perm;
struct seq_oss_midi *mdev;
struct snd_seq_port_subscribe subs;
+ int err;
if ((mdev = get_mididev(dp, dev)) == NULL)
return -ENODEV;
+ mutex_lock(&mdev->open_mutex);
/* already used? */
if (mdev->opened && mdev->devinfo != dp) {
- snd_use_lock_free(&mdev->use_lock);
- return -EBUSY;
+ err = -EBUSY;
+ goto unlock;
}
perm = 0;
@@ -334,14 +340,14 @@ snd_seq_oss_midi_open(struct seq_oss_devinfo *dp, int dev, int fmode)
perm |= PERM_READ;
perm &= mdev->flags;
if (perm == 0) {
- snd_use_lock_free(&mdev->use_lock);
- return -ENXIO;
+ err = -ENXIO;
+ goto unlock;
}
/* already opened? */
if ((mdev->opened & perm) == perm) {
- snd_use_lock_free(&mdev->use_lock);
- return 0;
+ err = 0;
+ goto unlock;
}
perm &= ~mdev->opened;
@@ -366,13 +372,17 @@ snd_seq_oss_midi_open(struct seq_oss_devinfo *dp, int dev, int fmode)
}
if (! mdev->opened) {
- snd_use_lock_free(&mdev->use_lock);
- return -ENXIO;
+ err = -ENXIO;
+ goto unlock;
}
mdev->devinfo = dp;
+ err = 0;
+
+ unlock:
+ mutex_unlock(&mdev->open_mutex);
snd_use_lock_free(&mdev->use_lock);
- return 0;
+ return err;
}
/*
@@ -386,10 +396,9 @@ snd_seq_oss_midi_close(struct seq_oss_devinfo *dp, int dev)
if ((mdev = get_mididev(dp, dev)) == NULL)
return -ENODEV;
- if (! mdev->opened || mdev->devinfo != dp) {
- snd_use_lock_free(&mdev->use_lock);
- return 0;
- }
+ mutex_lock(&mdev->open_mutex);
+ if (!mdev->opened || mdev->devinfo != dp)
+ goto unlock;
memset(&subs, 0, sizeof(subs));
if (mdev->opened & PERM_WRITE) {
@@ -408,6 +417,8 @@ snd_seq_oss_midi_close(struct seq_oss_devinfo *dp, int dev)
mdev->opened = 0;
mdev->devinfo = NULL;
+ unlock:
+ mutex_unlock(&mdev->open_mutex);
snd_use_lock_free(&mdev->use_lock);
return 0;
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index cc93157fa950..0363670a56e7 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -121,13 +121,13 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
spin_unlock_irqrestore(&clients_lock, flags);
#ifdef CONFIG_MODULES
if (!in_interrupt()) {
- static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
- static char card_requested[SNDRV_CARDS];
+ static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS);
+ static DECLARE_BITMAP(card_requested, SNDRV_CARDS);
+
if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
int idx;
- if (!client_requested[clientid]) {
- client_requested[clientid] = 1;
+ if (!test_and_set_bit(clientid, client_requested)) {
for (idx = 0; idx < 15; idx++) {
if (seq_client_load[idx] < 0)
break;
@@ -142,10 +142,8 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
SNDRV_SEQ_CLIENTS_PER_CARD;
if (card < snd_ecards_limit) {
- if (! card_requested[card]) {
- card_requested[card] = 1;
+ if (!test_and_set_bit(card, card_requested))
snd_request_card(card);
- }
snd_seq_device_load_drivers();
}
}
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 65db1a7c77b7..bb76a2dd0a2f 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -112,15 +112,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event);
* expand the variable length event to linear buffer space.
*/
-static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
+static int seq_copy_in_kernel(void *ptr, void *src, int size)
{
+ char **bufptr = ptr;
+
memcpy(*bufptr, src, size);
*bufptr += size;
return 0;
}
-static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
+static int seq_copy_in_user(void *ptr, void *src, int size)
{
+ char __user **bufptr = ptr;
+
if (copy_to_user(*bufptr, src, size))
return -EFAULT;
*bufptr += size;
@@ -149,8 +153,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
return newlen;
}
err = snd_seq_dump_var_event(event,
- in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
- (snd_seq_dump_func_t)seq_copy_in_user,
+ in_kernel ? seq_copy_in_kernel : seq_copy_in_user,
&buf);
return err < 0 ? err : newlen;
}
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
index 6825940ea2cf..a741d1ae6639 100644
--- a/sound/core/seq/seq_midi.c
+++ b/sound/core/seq/seq_midi.c
@@ -111,6 +111,12 @@ static int dump_midi(struct snd_rawmidi_substream *substream, const char *buf, i
return 0;
}
+/* callback for snd_seq_dump_var_event(), bridging to dump_midi() */
+static int __dump_midi(void *ptr, void *buf, int count)
+{
+ return dump_midi(ptr, buf, count);
+}
+
static int event_process_midi(struct snd_seq_event *ev, int direct,
void *private_data, int atomic, int hop)
{
@@ -130,7 +136,7 @@ static int event_process_midi(struct snd_seq_event *ev, int direct,
pr_debug("ALSA: seq_midi: invalid sysex event flags = 0x%x\n", ev->flags);
return 0;
}
- snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)dump_midi, substream);
+ snd_seq_dump_var_event(ev, __dump_midi, substream);
snd_midi_event_reset_decode(msynth->parser);
} else {
if (msynth->parser == NULL)
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 77d7037d1476..82396b8c885a 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -62,6 +62,13 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
/*
* decode input event and put to read buffer of each opened file
*/
+
+/* callback for snd_seq_dump_var_event(), bridging to snd_rawmidi_receive() */
+static int dump_to_rawmidi(void *ptr, void *buf, int count)
+{
+ return snd_rawmidi_receive(ptr, buf, count);
+}
+
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
struct snd_seq_event *ev,
bool atomic)
@@ -80,7 +87,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
continue;
- snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
+ snd_seq_dump_var_event(ev, dump_to_rawmidi, vmidi->substream);
snd_midi_event_reset_decode(vmidi->parser);
} else {
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index 610f317bea9d..99874e80b682 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -162,7 +162,6 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev)
mutex_unlock(&sound_oss_mutex);
return -ENOENT;
}
- unregister_sound_special(minor);
switch (SNDRV_MINOR_OSS_DEVICE(minor)) {
case SNDRV_MINOR_OSS_PCM:
track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_AUDIO);
@@ -174,12 +173,18 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev)
track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1);
break;
}
- if (track2 >= 0) {
- unregister_sound_special(track2);
+ if (track2 >= 0)
snd_oss_minors[track2] = NULL;
- }
snd_oss_minors[minor] = NULL;
mutex_unlock(&sound_oss_mutex);
+
+ /* call unregister_sound_special() outside sound_oss_mutex;
+ * otherwise may deadlock, as it can trigger the release of a card
+ */
+ unregister_sound_special(minor);
+ if (track2 >= 0)
+ unregister_sound_special(track2);
+
kfree(mptr);
return 0;
}
diff --git a/sound/core/timer.c b/sound/core/timer.c
index d684aa4150aa..420cc07a7f88 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -61,7 +61,7 @@ struct snd_timer_user {
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
- struct fasync_struct *fasync;
+ struct snd_fasync *fasync;
struct mutex ioctl_lock;
};
@@ -1317,7 +1317,7 @@ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
}
__wake:
spin_unlock(&tu->qlock);
- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
+ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
@@ -1354,7 +1354,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
+ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
@@ -1421,7 +1421,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
spin_unlock(&tu->qlock);
if (append == 0)
return;
- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
+ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
@@ -1487,6 +1487,7 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
if (tu->timeri)
snd_timer_close(tu->timeri);
mutex_unlock(&tu->ioctl_lock);
+ snd_fasync_free(tu->fasync);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
@@ -2050,7 +2051,7 @@ static int snd_timer_user_fasync(int fd, struct file * file, int on)
struct snd_timer_user *tu;
tu = file->private_data;
- return fasync_helper(fd, file, on, &tu->fasync);
+ return snd_fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 452b9eaca815..474347aba50e 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -463,17 +463,18 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable)
cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
struct loopback_pcm *dpcm_capt =
cable->streams[SNDRV_PCM_STREAM_CAPTURE];
- unsigned long delta_play = 0, delta_capt = 0;
+ unsigned long delta_play = 0, delta_capt = 0, cur_jiffies;
unsigned int running, count1, count2;
+ cur_jiffies = jiffies;
running = cable->running ^ cable->pause;
if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
- delta_play = jiffies - dpcm_play->last_jiffies;
+ delta_play = cur_jiffies - dpcm_play->last_jiffies;
dpcm_play->last_jiffies += delta_play;
}
if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) {
- delta_capt = jiffies - dpcm_capt->last_jiffies;
+ delta_capt = cur_jiffies - dpcm_capt->last_jiffies;
dpcm_capt->last_jiffies += delta_capt;
}
diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
index 44776e1463cb..71d0ab1c99b3 100644
--- a/sound/drivers/mts64.c
+++ b/sound/drivers/mts64.c
@@ -816,6 +816,9 @@ static void snd_mts64_interrupt(void *private)
u8 status, data;
struct snd_rawmidi_substream *substream;
+ if (!mts)
+ return;
+
spin_lock(&mts->lock);
ret = mts64_read(mts->pardev->port);
data = ret & 0x00ff;
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index 8f0f05bbc081..ce5bab7425d5 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -36,7 +36,7 @@ static void pcsp_call_pcm_elapsed(unsigned long priv)
}
}
-static DECLARE_TASKLET(pcsp_pcm_tasklet, pcsp_call_pcm_elapsed, 0);
+static DECLARE_TASKLET_OLD(pcsp_pcm_tasklet, pcsp_call_pcm_elapsed);
/* write the port and returns the next expire time in ns;
* called at the trigger-start and in hrtimer callback
diff --git a/sound/firewire/digi00x/digi00x-stream.c b/sound/firewire/digi00x/digi00x-stream.c
index d6a92460060f..1a841c858e06 100644
--- a/sound/firewire/digi00x/digi00x-stream.c
+++ b/sound/firewire/digi00x/digi00x-stream.c
@@ -259,8 +259,10 @@ int snd_dg00x_stream_init_duplex(struct snd_dg00x *dg00x)
return err;
err = init_stream(dg00x, &dg00x->tx_stream);
- if (err < 0)
+ if (err < 0) {
destroy_stream(dg00x, &dg00x->rx_stream);
+ return err;
+ }
err = amdtp_domain_init(&dg00x->domain);
if (err < 0) {
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index adf69a520b80..89918c6003e6 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -465,7 +465,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
err = amdtp_domain_start(&tscm->domain);
if (err < 0)
- return err;
+ goto error;
if (!amdtp_stream_wait_callback(&tscm->rx_stream,
CALLBACK_TIMEOUT) ||
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 04f4070fbf36..17b34bb9fecd 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -475,23 +475,6 @@ int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo);
-
-/**
- * snd_hdac_ext_stop_streams - stop all stream if running
- * @bus: HD-audio core bus
- */
-void snd_hdac_ext_stop_streams(struct hdac_bus *bus)
-{
- struct hdac_stream *stream;
-
- if (bus->chip_init) {
- list_for_each_entry(stream, &bus->stream_list, list)
- snd_hdac_stream_stop(stream);
- snd_hdac_bus_stop_chip(bus);
- }
-}
-EXPORT_SYMBOL_GPL(snd_hdac_ext_stop_streams);
-
/**
* snd_hdac_ext_stream_drsm_enable - enable DMA resume for a stream
* @bus: HD-audio core bus
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index 489f996d86bc..9df0158e89f4 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -607,7 +607,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
int snd_hdac_keep_power_up(struct hdac_device *codec)
{
if (!atomic_inc_not_zero(&codec->in_pm)) {
- int ret = pm_runtime_get_if_in_use(&codec->dev);
+ int ret = pm_runtime_get_if_active(&codec->dev, true);
if (!ret)
return -1;
if (ret < 0)
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 49780399c284..a035a7d74ce0 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -596,10 +596,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once);
*/
void snd_hdac_regmap_sync(struct hdac_device *codec)
{
- if (codec->regmap) {
- mutex_lock(&codec->regmap_lock);
+ mutex_lock(&codec->regmap_lock);
+ if (codec->regmap)
regcache_sync(codec->regmap);
- mutex_unlock(&codec->regmap_lock);
- }
+ mutex_unlock(&codec->regmap_lock);
}
EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync);
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index b299b8b7f871..f810f401c1de 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -143,6 +143,33 @@ void snd_hdac_stream_stop(struct hdac_stream *azx_dev)
EXPORT_SYMBOL_GPL(snd_hdac_stream_stop);
/**
+ * snd_hdac_stop_streams - stop all streams
+ * @bus: HD-audio core bus
+ */
+void snd_hdac_stop_streams(struct hdac_bus *bus)
+{
+ struct hdac_stream *stream;
+
+ list_for_each_entry(stream, &bus->stream_list, list)
+ snd_hdac_stream_stop(stream);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_stop_streams);
+
+/**
+ * snd_hdac_stop_streams_and_chip - stop all streams and chip if running
+ * @bus: HD-audio core bus
+ */
+void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus)
+{
+
+ if (bus->chip_init) {
+ snd_hdac_stop_streams(bus);
+ snd_hdac_bus_stop_chip(bus);
+ }
+}
+EXPORT_SYMBOL_GPL(snd_hdac_stop_streams_and_chip);
+
+/**
* snd_hdac_stream_reset - reset a stream
* @azx_dev: HD-audio core stream to reset
*/
@@ -286,8 +313,10 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
struct hdac_stream *res = NULL;
/* make a non-zero unique key for the substream */
- int key = (substream->pcm->device << 16) | (substream->number << 2) |
- (substream->stream + 1);
+ int key = (substream->number << 2) | (substream->stream + 1);
+
+ if (substream->pcm)
+ key |= (substream->pcm->device << 16);
spin_lock_irq(&bus->reg_lock);
list_for_each_entry(azx_dev, &bus->stream_list, list) {
diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c
index e56e83325903..bcf302f5115a 100644
--- a/sound/hda/hdac_sysfs.c
+++ b/sound/hda/hdac_sysfs.c
@@ -346,8 +346,10 @@ static int add_widget_node(struct kobject *parent, hda_nid_t nid,
return -ENOMEM;
kobject_init(kobj, &widget_ktype);
err = kobject_add(kobj, parent, "%02x", nid);
- if (err < 0)
+ if (err < 0) {
+ kobject_put(kobj);
return err;
+ }
err = sysfs_create_group(kobj, group);
if (err < 0) {
kobject_put(kobj);
diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
index bac4f0036cd6..e8947a07725c 100644
--- a/sound/i2c/cs8427.c
+++ b/sound/i2c/cs8427.c
@@ -553,10 +553,13 @@ int snd_cs8427_iec958_active(struct snd_i2c_device *cs8427, int active)
if (snd_BUG_ON(!cs8427))
return -ENXIO;
chip = cs8427->private_data;
- if (active)
+ if (active) {
memcpy(chip->playback.pcm_status,
chip->playback.def_status, 24);
- chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ } else {
+ chip->playback.pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ }
snd_ctl_notify(cs8427->bus->card,
SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
&chip->playback.pcm_ctl->id);
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index 30021ab5e0e9..6a4051bce3a3 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -102,7 +102,7 @@ static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buff
int snd_sb_csp_new(struct snd_sb *chip, int device, struct snd_hwdep ** rhwdep)
{
struct snd_sb_csp *p;
- int uninitialized_var(version);
+ int version;
int err;
struct snd_hwdep *hw;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6fb192a94762..418a7a666cf4 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1945,6 +1945,7 @@ static int snd_ac97_dev_register(struct snd_device *device)
snd_ac97_get_short_name(ac97));
if ((err = device_register(&ac97->dev)) < 0) {
ac97_err(ac97, "Can't register ac97 bus\n");
+ put_device(&ac97->dev);
ac97->dev.bus = NULL;
return err;
}
@@ -2005,10 +2006,9 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
.dev_disconnect = snd_ac97_dev_disconnect,
};
- if (rac97)
- *rac97 = NULL;
- if (snd_BUG_ON(!bus || !template))
+ if (snd_BUG_ON(!bus || !template || !rac97))
return -EINVAL;
+ *rac97 = NULL;
if (snd_BUG_ON(template->num >= 4))
return -EINVAL;
if (bus->codec[template->num])
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 3d6914c64c4a..4cdaeefeb688 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -430,7 +430,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
pao = hpi_find_adapter(phm->adapter_index);
} else {
/* subsys messages don't address an adapter */
- _HPI_6205(NULL, phm, phr);
+ phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
return;
}
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 9790f5108a16..5cab049413fc 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -352,7 +352,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
pci_dev->device, pci_dev->subsystem_vendor,
pci_dev->subsystem_device, pci_dev->devfn);
- if (pci_enable_device(pci_dev) < 0) {
+ if (pcim_enable_device(pci_dev) < 0) {
dev_err(&pci_dev->dev,
"pci_enable_device failed, disabling device\n");
return -EIO;
diff --git a/sound/pci/au88x0/au88x0.h b/sound/pci/au88x0/au88x0.h
index 0aa7af049b1b..6cbb2bc4a048 100644
--- a/sound/pci/au88x0/au88x0.h
+++ b/sound/pci/au88x0/au88x0.h
@@ -141,7 +141,7 @@ struct snd_vortex {
#ifndef CHIP_AU8810
stream_t dma_wt[NR_WT];
wt_voice_t wt_voice[NR_WT]; /* WT register cache. */
- char mixwt[(NR_WT / NR_WTPB) * 6]; /* WT mixin objects */
+ s8 mixwt[(NR_WT / NR_WTPB) * 6]; /* WT mixin objects */
#endif
/* Global resources */
@@ -235,8 +235,8 @@ static int vortex_alsafmt_aspfmt(snd_pcm_format_t alsafmt, vortex_t *v);
static void vortex_connect_default(vortex_t * vortex, int en);
static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch,
int dir, int type, int subdev);
-static char vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out,
- int restype);
+static int vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out,
+ int restype);
#ifndef CHIP_AU8810
static int vortex_wt_allocroute(vortex_t * vortex, int dma, int nr_ch);
static void vortex_wt_connect(vortex_t * vortex, int en);
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index ce0564c5392a..73a7ef8334c1 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1998,7 +1998,7 @@ static int resnum[VORTEX_RESOURCE_LAST] =
out: Mean checkout if != 0. Else mean Checkin resource.
restype: Indicates type of resource to be checked in or out.
*/
-static char
+static int
vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype)
{
int i, qty = resnum[restype], resinuse = 0;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index e053f0d58bdd..2f3cfcfcdb9a 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -1536,14 +1536,8 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
gpr += 2;
/* Master volume (will be renamed later) */
- A_OP(icode, &ptr, iMAC0, A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS));
- A_OP(icode, &ptr, iMAC0, A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS));
- A_OP(icode, &ptr, iMAC0, A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS));
- A_OP(icode, &ptr, iMAC0, A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS));
- A_OP(icode, &ptr, iMAC0, A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS));
- A_OP(icode, &ptr, iMAC0, A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS));
- A_OP(icode, &ptr, iMAC0, A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS));
- A_OP(icode, &ptr, iMAC0, A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS));
+ for (z = 0; z < 8; z++)
+ A_OP(icode, &ptr, iMAC0, A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS));
snd_emu10k1_init_mono_control(&controls[nctl++], "Wave Master Playback Volume", gpr, 0);
gpr += 2;
@@ -1627,102 +1621,14 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
dev_dbg(emu->card->dev, "emufx.c: gpr=0x%x, tmp=0x%x\n",
gpr, tmp);
*/
- /* For the EMU1010: How to get 32bit values from the DSP. High 16bits into L, low 16bits into R. */
- /* A_P16VIN(0) is delayed by one sample,
- * so all other A_P16VIN channels will need to also be delayed
- */
- /* Left ADC in. 1 of 2 */
snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_P16VIN(0x0), A_FXBUS2(0) );
- /* Right ADC in 1 of 2 */
- gpr_map[gpr++] = 0x00000000;
- /* Delaying by one sample: instead of copying the input
- * value A_P16VIN to output A_FXBUS2 as in the first channel,
- * we use an auxiliary register, delaying the value by one
- * sample
- */
- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(2) );
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x1), A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(4) );
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x2), A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(6) );
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x3), A_C_00000000, A_C_00000000);
- /* For 96kHz mode */
- /* Left ADC in. 2 of 2 */
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x8) );
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x4), A_C_00000000, A_C_00000000);
- /* Right ADC in 2 of 2 */
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xa) );
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x5), A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xc) );
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x6), A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xe) );
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x7), A_C_00000000, A_C_00000000);
- /* Pavel Hofman - we still have voices, A_FXBUS2s, and
- * A_P16VINs available -
- * let's add 8 more capture channels - total of 16
- */
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
- bit_shifter16,
- A_GPR(gpr - 1),
- A_FXBUS2(0x10));
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x8),
- A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
- bit_shifter16,
- A_GPR(gpr - 1),
- A_FXBUS2(0x12));
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x9),
- A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
- bit_shifter16,
- A_GPR(gpr - 1),
- A_FXBUS2(0x14));
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xa),
- A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
- bit_shifter16,
- A_GPR(gpr - 1),
- A_FXBUS2(0x16));
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xb),
- A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
- bit_shifter16,
- A_GPR(gpr - 1),
- A_FXBUS2(0x18));
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xc),
- A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
- bit_shifter16,
- A_GPR(gpr - 1),
- A_FXBUS2(0x1a));
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xd),
- A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
- bit_shifter16,
- A_GPR(gpr - 1),
- A_FXBUS2(0x1c));
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xe),
- A_C_00000000, A_C_00000000);
- gpr_map[gpr++] = 0x00000000;
- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
- bit_shifter16,
- A_GPR(gpr - 1),
- A_FXBUS2(0x1e));
- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xf),
- A_C_00000000, A_C_00000000);
+ /* A_P16VIN(0) is delayed by one sample, so all other A_P16VIN channels
+ * will need to also be delayed; we use an auxiliary register for that. */
+ for (z = 1; z < 0x10; z++) {
+ snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr), A_FXBUS2(z * 2) );
+ A_OP(icode, &ptr, iACC3, A_GPR(gpr), A_P16VIN(z), A_C_00000000, A_C_00000000);
+ gpr_map[gpr++] = 0x00000000;
+ }
}
#if 0
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 6530a55fb878..81590de8d78b 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -123,7 +123,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic
epcm->voices[0]->epcm = epcm;
if (voices > 1) {
for (i = 1; i < voices; i++) {
- epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i];
+ epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G];
epcm->voices[i]->epcm = epcm;
}
}
@@ -1244,7 +1244,7 @@ static int snd_emu10k1_capture_mic_close(struct snd_pcm_substream *substream)
{
struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
- emu->capture_interrupt = NULL;
+ emu->capture_mic_interrupt = NULL;
emu->pcm_capture_mic_substream = NULL;
return 0;
}
@@ -1352,7 +1352,7 @@ static int snd_emu10k1_capture_efx_close(struct snd_pcm_substream *substream)
{
struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
- emu->capture_interrupt = NULL;
+ emu->capture_efx_interrupt = NULL;
emu->pcm_capture_efx_substream = NULL;
return 0;
}
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index c6e1e03a5e4d..5a50b6c1d604 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -118,6 +118,12 @@ static int snd_hda_beep_event(struct input_dev *dev, unsigned int type,
return 0;
}
+static void turn_on_beep(struct hda_beep *beep)
+{
+ if (beep->keep_power_at_enable)
+ snd_hda_power_up_pm(beep->codec);
+}
+
static void turn_off_beep(struct hda_beep *beep)
{
cancel_work_sync(&beep->beep_work);
@@ -125,6 +131,8 @@ static void turn_off_beep(struct hda_beep *beep)
/* turn off beep */
generate_tone(beep, 0);
}
+ if (beep->keep_power_at_enable)
+ snd_hda_power_down_pm(beep->codec);
}
/**
@@ -140,7 +148,9 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable)
enable = !!enable;
if (beep->enabled != enable) {
beep->enabled = enable;
- if (!enable)
+ if (enable)
+ turn_on_beep(beep);
+ else
turn_off_beep(beep);
return 1;
}
@@ -167,7 +177,8 @@ static int beep_dev_disconnect(struct snd_device *device)
input_unregister_device(beep->dev);
else
input_free_device(beep->dev);
- turn_off_beep(beep);
+ if (beep->enabled)
+ turn_off_beep(beep);
return 0;
}
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index a25358a4807a..db76e3ddba65 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -25,6 +25,7 @@ struct hda_beep {
unsigned int enabled:1;
unsigned int linear_tone:1; /* linear tone for IDT/STAC codec */
unsigned int playing:1;
+ unsigned int keep_power_at_enable:1; /* set by driver */
struct work_struct beep_work; /* scheduled task for beep event */
struct mutex mutex;
void (*power_hook)(struct hda_beep *beep, bool on);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 6a159c6c2f54..6dff68691dff 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1093,10 +1093,8 @@ EXPORT_SYMBOL_GPL(azx_init_chip);
void azx_stop_all_streams(struct azx *chip)
{
struct hdac_bus *bus = azx_bus(chip);
- struct hdac_stream *s;
- list_for_each_entry(s, &bus->stream_list, list)
- snd_hdac_stream_stop(s);
+ snd_hdac_stop_streams(bus);
}
EXPORT_SYMBOL_GPL(azx_stop_all_streams);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index e92fcb150e57..77c0abd252eb 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -1147,8 +1147,8 @@ static bool path_has_mixer(struct hda_codec *codec, int path_idx, int ctl_type)
return path && path->ctls[ctl_type];
}
-static const char * const channel_name[4] = {
- "Front", "Surround", "CLFE", "Side"
+static const char * const channel_name[] = {
+ "Front", "Surround", "CLFE", "Side", "Back",
};
/* give some appropriate ctl name prefix for the given line out channel */
@@ -1174,7 +1174,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
/* multi-io channels */
if (ch >= cfg->line_outs)
- return channel_name[ch];
+ goto fixed_name;
switch (cfg->line_out_type) {
case AUTO_PIN_SPEAKER_OUT:
@@ -1226,6 +1226,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
if (cfg->line_outs == 1 && !spec->multi_ios)
return "Line Out";
+ fixed_name:
if (ch >= ARRAY_SIZE(channel_name)) {
snd_BUG();
return "PCM";
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b8fe0ec5d624..cc9b8b6595c8 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -363,10 +363,15 @@ enum {
#define needs_eld_notify_link(chip) false
#endif
-#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+#define CONTROLLER_IN_GPU(pci) (((pci)->vendor == 0x8086) && \
+ (((pci)->device == 0x0a0c) || \
((pci)->device == 0x0c0c) || \
((pci)->device == 0x0d0c) || \
- ((pci)->device == 0x160c))
+ ((pci)->device == 0x160c) || \
+ ((pci)->device == 0x490d) || \
+ ((pci)->device == 0x4f90) || \
+ ((pci)->device == 0x4f91) || \
+ ((pci)->device == 0x4f92)))
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
@@ -2232,12 +2237,15 @@ static struct snd_pci_quirk power_save_blacklist[] = {
SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+ SND_PCI_QUIRK(0x17aa, 0x316e, "Lenovo ThinkCentre M70q", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
/* https://bugs.launchpad.net/bugs/1821663 */
SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
+ /* KONTRON SinglePC may cause a stall at runtime resume */
+ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
{}
};
#endif /* CONFIG_PM */
@@ -2501,11 +2509,26 @@ static const struct pci_device_id azx_ids[] = {
/* Tigerlake-H */
{ PCI_DEVICE(0x8086, 0x43c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* DG1 */
+ { PCI_DEVICE(0x8086, 0x490d),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* DG2 */
+ { PCI_DEVICE(0x8086, 0x4f90),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ { PCI_DEVICE(0x8086, 0x4f91),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ { PCI_DEVICE(0x8086, 0x4f92),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Alderlake-S */
+ { PCI_DEVICE(0x8086, 0x7ad0),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Elkhart Lake */
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
{ PCI_DEVICE(0x8086, 0x4b58),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Arrow Lake */
+ { PCI_DEVICE_DATA(INTEL, HDA_ARL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE) },
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
@@ -2528,9 +2551,12 @@ static const struct pci_device_id azx_ids[] = {
/* 5 Series/3400 */
{ PCI_DEVICE(0x8086, 0x3b56),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+ { PCI_DEVICE(0x8086, 0x3b57),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
/* Poulsbo */
{ PCI_DEVICE(0x8086, 0x811b),
- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE |
+ AZX_DCAPS_POSFIX_LPIB },
/* Oaktrail */
{ PCI_DEVICE(0x8086, 0x080a),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 2971b34c87c1..e235c3ec634d 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -428,7 +428,8 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match);
static int hda_tegra_probe(struct platform_device *pdev)
{
const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR |
- AZX_DCAPS_PM_RUNTIME;
+ AZX_DCAPS_PM_RUNTIME |
+ AZX_DCAPS_4K_BDLE_BOUNDARY;
struct snd_card *card;
struct azx *chip;
struct hda_tegra *hda;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 9412bdda85c8..40f50571ad63 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1182,6 +1182,8 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x3842, 0x104b, "EVGA X299 Dark", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x3842, 0x1055, "EVGA Z390 DARK", QUIRK_R3DI),
SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
@@ -2046,7 +2048,7 @@ static int dspio_set_uint_param_no_source(struct hda_codec *codec, int mod_id,
static int dspio_alloc_dma_chan(struct hda_codec *codec, unsigned int *dma_chan)
{
int status = 0;
- unsigned int size = sizeof(dma_chan);
+ unsigned int size = sizeof(*dma_chan);
codec_dbg(codec, " dspio_alloc_dma_chan() -- begin\n");
status = dspio_scp(codec, MASTERCONTROL, 0x20,
@@ -3842,8 +3844,10 @@ static int tuning_ctl_set(struct hda_codec *codec, hda_nid_t nid,
for (i = 0; i < TUNING_CTLS_COUNT; i++)
if (nid == ca0132_tuning_ctls[i].nid)
- break;
+ goto found;
+ return -EINVAL;
+found:
snd_hda_power_up(codec);
dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20,
ca0132_tuning_ctls[i].req,
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index f46204ab0b90..c10a264e9567 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -396,6 +396,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
/* codec SSID */
SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
+ SND_PCI_QUIRK(0x106b, 0x0900, "iMac 12,1", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index d4bfb1149327..ba70e053c3a2 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -197,6 +197,7 @@ enum {
CXT_PINCFG_LEMOTE_A1205,
CXT_PINCFG_COMPAQ_CQ60,
CXT_FIXUP_STEREO_DMIC,
+ CXT_PINCFG_LENOVO_NOTEBOOK,
CXT_FIXUP_INC_MIC_BOOST,
CXT_FIXUP_HEADPHONE_MIC_PIN,
CXT_FIXUP_HEADPHONE_MIC,
@@ -215,6 +216,7 @@ enum {
CXT_FIXUP_MUTE_LED_GPIO,
CXT_FIXUP_HEADSET_MIC,
CXT_FIXUP_HP_MIC_NO_PRESENCE,
+ CXT_PINCFG_SWS_JS201D,
};
/* for hda_fixup_thinkpad_acpi() */
@@ -703,6 +705,17 @@ static const struct hda_pintbl cxt_pincfg_lemote[] = {
{}
};
+/* SuoWoSi/South-holding JS201D with sn6140 */
+static const struct hda_pintbl cxt_pincfg_sws_js201d[] = {
+ { 0x16, 0x03211040 }, /* hp out */
+ { 0x17, 0x91170110 }, /* SPK/Class_D */
+ { 0x18, 0x95a70130 }, /* Internal mic */
+ { 0x19, 0x03a11020 }, /* Headset Mic */
+ { 0x1a, 0x40f001f0 }, /* Not used */
+ { 0x21, 0x40f001f0 }, /* Not used */
+ {}
+};
+
static const struct hda_fixup cxt_fixups[] = {
[CXT_PINCFG_LENOVO_X200] = {
.type = HDA_FIXUP_PINS,
@@ -737,6 +750,14 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_stereo_dmic,
},
+ [CXT_PINCFG_LENOVO_NOTEBOOK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x05d71030 },
+ { }
+ },
+ .chain_id = CXT_FIXUP_STEREO_DMIC,
+ },
[CXT_FIXUP_INC_MIC_BOOST] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt5066_increase_mic_boost,
@@ -846,6 +867,10 @@ static const struct hda_fixup cxt_fixups[] = {
.chained = true,
.chain_id = CXT_FIXUP_HEADSET_MIC,
},
+ [CXT_PINCFG_SWS_JS201D] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = cxt_pincfg_sws_js201d,
+ },
};
static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -917,6 +942,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+ SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
@@ -930,6 +956,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+ /* NOTE: we'd need to extend the quirk for 17aa:3977 as the same
+ * PCI SSID is used on multiple Lenovo models
+ */
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
@@ -952,6 +981,8 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
{ .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
{ .id = CXT_FIXUP_MUTE_LED_GPIO, .name = "mute-led-gpio" },
{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+ { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
+ { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
{}
};
@@ -1084,6 +1115,7 @@ static const struct hda_device_id snd_hda_id_conexant[] = {
HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
+ HDA_CODEC_ENTRY(0x14f120d1, "SN6180", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5128a5df16fd..ff81e6051773 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1820,7 +1820,13 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
static const struct snd_pci_quirk force_connect_list[] = {
SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+ SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+ SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
+ SND_PCI_QUIRK(0x1043, 0x86ae, "ASUS", 1), /* Z170 PRO */
+ SND_PCI_QUIRK(0x1043, 0x86c7, "ASUS", 1), /* Z170M PLUS */
SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+ SND_PCI_QUIRK(0x8086, 0x2060, "Intel NUC5CPYB", 1),
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
{}
};
@@ -2570,9 +2576,6 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id)
*/
if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
return;
- /* ditto during suspend/resume process itself */
- if (snd_hdac_is_in_pm(&codec->core))
- return;
check_presence_and_report(codec, pin_nid, dev_id);
}
@@ -2775,9 +2778,6 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
*/
if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
return;
- /* ditto during suspend/resume process itself */
- if (snd_hdac_is_in_pm(&codec->core))
- return;
snd_hdac_i915_set_bclk(&codec->bus->core);
check_presence_and_report(codec, pin_nid, dev_id);
@@ -3703,6 +3703,7 @@ static int patch_tegra_hdmi(struct hda_codec *codec)
if (err)
return err;
+ codec->depop_delay = 10;
codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
spec = codec->spec;
spec->chmap.ops.chmap_cea_alloc_validate_get_type =
@@ -4200,6 +4201,11 @@ HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a3, "GPU a3 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a4, "GPU a4 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a5, "GPU a5 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a6, "GPU a6 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
@@ -4223,7 +4229,10 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 145e494f776a..893b9fde59ac 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -770,7 +770,7 @@ do_sku:
alc_setup_gpio(codec, 0x02);
break;
case 7:
- alc_setup_gpio(codec, 0x03);
+ alc_setup_gpio(codec, 0x04);
break;
case 5:
default:
@@ -1935,6 +1935,7 @@ enum {
ALC887_FIXUP_ASUS_AUDIO,
ALC887_FIXUP_ASUS_HMIC,
ALCS1200A_FIXUP_MIC_VREF,
+ ALC888VD_FIXUP_MIC_100VREF,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2488,6 +2489,13 @@ static const struct hda_fixup alc882_fixups[] = {
{}
}
},
+ [ALC888VD_FIXUP_MIC_100VREF] = {
+ .type = HDA_FIXUP_PINCTLS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, PIN_VREF100 }, /* headset mic */
+ {}
+ }
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2557,6 +2565,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF),
SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
@@ -2574,6 +2583,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+ SND_PCI_QUIRK(0x1558, 0x3702, "Clevo X370SN[VW]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -3197,6 +3207,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
+ case 0x10ec0257:
case 0x19e58326:
alc_write_coef_idx(codec, 0x48, 0x0);
alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
@@ -3226,6 +3237,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
+ case 0x10ec0257:
case 0x19e58326:
alc_write_coef_idx(codec, 0x48, 0xd011);
alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
@@ -3503,6 +3515,15 @@ static void alc256_init(struct hda_codec *codec)
hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
+ if (spec->ultra_low_power) {
+ alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
+ alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
+ alc_update_coef_idx(codec, 0x08, 7<<4, 0);
+ alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
+ alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+ msleep(30);
+ }
+
if (!hp_pin)
hp_pin = 0x21;
@@ -3514,14 +3535,6 @@ static void alc256_init(struct hda_codec *codec)
msleep(2);
alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
- if (spec->ultra_low_power) {
- alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
- alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
- alc_update_coef_idx(codec, 0x08, 7<<4, 0);
- alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
- alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
- msleep(30);
- }
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
@@ -3603,6 +3616,13 @@ static void alc225_init(struct hda_codec *codec)
hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp1_pin_sense, hp2_pin_sense;
+ if (spec->ultra_low_power) {
+ alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
+ alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+ alc_update_coef_idx(codec, 0x33, 1<<11, 0);
+ msleep(30);
+ }
+
if (!hp_pin)
hp_pin = 0x21;
msleep(30);
@@ -3614,12 +3634,6 @@ static void alc225_init(struct hda_codec *codec)
msleep(2);
alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
- if (spec->ultra_low_power) {
- alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
- alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
- alc_update_coef_idx(codec, 0x33, 1<<11, 0);
- msleep(30);
- }
if (hp1_pin_sense || spec->ultra_low_power)
snd_hda_codec_write(codec, hp_pin, 0,
@@ -4312,7 +4326,11 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
static void alc285_fixup_hp_gpio_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- alc_fixup_hp_gpio_led(codec, action, 0x04, 0x00);
+ struct alc_spec *spec = codec->spec;
+
+ spec->micmute_led_polarity = 1;
+
+ alc_fixup_hp_gpio_led(codec, action, 0x04, 0x01);
}
static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
@@ -6173,6 +6191,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
+ case 0x10ec0257:
case 0x19e58326:
alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
@@ -6370,6 +6389,7 @@ enum {
ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
ALC269VB_FIXUP_ASUS_ZENBOOK,
ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A,
+ ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED,
ALC269VB_FIXUP_ORDISSIMO_EVE2,
ALC283_FIXUP_CHROME_BOOK,
@@ -6382,6 +6402,7 @@ enum {
ALC290_FIXUP_SUBWOOFER_HSJACK,
ALC269_FIXUP_THINKPAD_ACPI,
ALC269_FIXUP_DMIC_THINKPAD_ACPI,
+ ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO,
ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
ALC255_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -6483,6 +6504,8 @@ enum {
ALC294_FIXUP_ASUS_GU502_HP,
ALC294_FIXUP_ASUS_GU502_PINS,
ALC294_FIXUP_ASUS_GU502_VERBS,
+ ALC294_FIXUP_ASUS_G513_PINS,
+ ALC285_FIXUP_ASUS_G533Z_PINS,
ALC285_FIXUP_HP_GPIO_LED,
ALC285_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_GPIO_LED,
@@ -6627,6 +6650,14 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_U7x7_headset_mic,
},
+ [ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x03a19020 }, /* headset mic */
+ { 0x1b, 0x90170150 }, /* speaker */
+ { }
+ },
+ },
[ALC269_FIXUP_AMIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -6901,6 +6932,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269VB_FIXUP_ASUS_ZENBOOK,
},
+ [ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a110f0 }, /* use as headset mic */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
[ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_limit_int_mic_boost,
@@ -7751,6 +7791,26 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc294_fixup_gu502_hp,
},
+ [ALC294_FIXUP_ASUS_G513_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11050 }, /* front HP mic */
+ { 0x1a, 0x03a11c30 }, /* rear external mic */
+ { 0x21, 0x03211420 }, /* front HP out */
+ { }
+ },
+ },
+ [ALC285_FIXUP_ASUS_G533Z_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x14, 0x90170152 }, /* Speaker Surround Playback Switch */
+ { 0x19, 0x03a19020 }, /* Mic Boost Volume */
+ { 0x1a, 0x03a11c30 }, /* Mic Boost Volume */
+ { 0x1e, 0x90170151 }, /* Rear jack, IN OUT EAPD Detect */
+ { 0x21, 0x03211420 },
+ { }
+ },
+ },
[ALC294_FIXUP_ASUS_COEF_1B] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -8207,6 +8267,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
@@ -8215,13 +8276,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
- SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -8236,14 +8299,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
- SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
- SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -8265,6 +8332,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
@@ -8414,6 +8482,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
@@ -8425,6 +8494,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+ SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
@@ -9738,6 +9809,17 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
}
}
+static void alc897_fixup_lenovo_headset_mode(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+ spec->gen.hp_automute_hook = alc897_hp_automute_hook;
+ }
+}
+
static const struct coef_fw alc668_coefs[] = {
WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
@@ -9821,6 +9903,9 @@ enum {
ALC897_FIXUP_LENOVO_HEADSET_MIC,
ALC897_FIXUP_HEADSET_MIC_PIN,
ALC897_FIXUP_HP_HSMIC_VERB,
+ ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ ALC897_FIXUP_HEADSET_MIC_PIN2,
+ ALC897_FIXUP_UNIS_H3C_X500S,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -10247,6 +10332,26 @@ static const struct hda_fixup alc662_fixups[] = {
{ }
},
},
+ [ALC897_FIXUP_LENOVO_HEADSET_MODE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc897_fixup_lenovo_headset_mode,
+ },
+ [ALC897_FIXUP_HEADSET_MIC_PIN2] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
+ },
+ [ALC897_FIXUP_UNIS_H3C_X500S] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 },
+ {}
+ },
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10272,8 +10377,11 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ SND_PCI_QUIRK(0x103c, 0x8768, "HP Slim Desktop S01", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
@@ -10295,10 +10403,15 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x1064, "Lenovo P3 Tower", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3364, "Lenovo ThinkCentre M90 Gen5", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
@@ -10306,6 +10419,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
+ SND_PCI_QUIRK(0x1c6c, 0x1239, "Compaq N14JP6-V2", ALC897_FIXUP_HP_HSMIC_VERB),
#if 0
/* Below is a quirk table taken from the old code.
@@ -10400,6 +10514,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
{.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"},
{.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
{.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"},
+ {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"},
{}
};
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index bfd3fe5eff31..7fa238e9a782 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -209,6 +209,7 @@ struct sigmatel_spec {
/* beep widgets */
hda_nid_t anabeep_nid;
+ bool beep_power_on;
/* SPDIF-out mux */
const char * const *spdif_labels;
@@ -1700,6 +1701,7 @@ static const struct snd_pci_quirk stac925x_fixup_tbl[] = {
};
static const struct hda_pintbl ref92hd73xx_pin_configs[] = {
+ // Port A-H
{ 0x0a, 0x02214030 },
{ 0x0b, 0x02a19040 },
{ 0x0c, 0x01a19020 },
@@ -1708,9 +1710,12 @@ static const struct hda_pintbl ref92hd73xx_pin_configs[] = {
{ 0x0f, 0x01014010 },
{ 0x10, 0x01014020 },
{ 0x11, 0x01014030 },
+ // CD in
{ 0x12, 0x02319040 },
+ // Digial Mic ins
{ 0x13, 0x90a000f0 },
{ 0x14, 0x90a000f0 },
+ // Digital outs
{ 0x22, 0x01452050 },
{ 0x23, 0x01452050 },
{}
@@ -1751,6 +1756,7 @@ static const struct hda_pintbl alienware_m17x_pin_configs[] = {
};
static const struct hda_pintbl intel_dg45id_pin_configs[] = {
+ // Analog outputs
{ 0x0a, 0x02214230 },
{ 0x0b, 0x02A19240 },
{ 0x0c, 0x01013214 },
@@ -1758,6 +1764,9 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
{ 0x0e, 0x01A19250 },
{ 0x0f, 0x01011212 },
{ 0x10, 0x01016211 },
+ // Digital output
+ { 0x22, 0x01451380 },
+ { 0x23, 0x40f000f0 },
{}
};
@@ -1948,6 +1957,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
"DFI LanParty", STAC_92HD73XX_REF),
SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101,
"DFI LanParty", STAC_92HD73XX_REF),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5001,
+ "Intel DP45SG", STAC_92HD73XX_INTEL),
SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5002,
"Intel DG45ID", STAC_92HD73XX_INTEL),
SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5003,
@@ -4301,6 +4312,8 @@ static int stac_parse_auto_config(struct hda_codec *codec)
if (codec->beep) {
/* IDT/STAC codecs have linear beep tone parameter */
codec->beep->linear_tone = spec->linear_tone_beep;
+ /* keep power up while beep is enabled */
+ codec->beep->keep_power_at_enable = 1;
/* if no beep switch is available, make its own one */
caps = query_amp_caps(codec, nid, HDA_OUTPUT);
if (!(caps & AC_AMPCAP_MUTE)) {
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 3edb4e25797d..4a74ccf7cf3e 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -821,6 +821,9 @@ static int add_secret_dac_path(struct hda_codec *codec)
return 0;
nums = snd_hda_get_connections(codec, spec->gen.mixer_nid, conn,
ARRAY_SIZE(conn) - 1);
+ if (nums < 0)
+ return nums;
+
for (i = 0; i < nums; i++) {
if (get_wcaps_type(get_wcaps(codec, conn[i])) == AC_WID_AUD_OUT)
return 0;
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 4556ba76b791..71752618a7af 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -1892,6 +1892,7 @@ static int aureon_add_controls(struct snd_ice1712 *ice)
unsigned char id;
snd_ice1712_save_gpio_status(ice);
id = aureon_cs8415_get(ice, CS8415_ID);
+ snd_ice1712_restore_gpio_status(ice);
if (id != 0x41)
dev_info(ice->card->dev,
"No CS8415 chip. Skipping CS8415 controls.\n");
@@ -1909,7 +1910,6 @@ static int aureon_add_controls(struct snd_ice1712 *ice)
kctl->id.device = ice->pcm->device;
}
}
- snd_ice1712_restore_gpio_status(ice);
}
return 0;
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index dd3a873777eb..00975e86473c 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -493,12 +493,11 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
dev_dbg(chip->card->dev,
"CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
*r_needed, *r_freed);
- for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
- for (i = 0; i != chip->rmh.stat_len; ++i)
- dev_dbg(chip->card->dev,
- " stat[%d]: %x, %x\n", i,
- chip->rmh.stat[i],
- chip->rmh.stat[i] & MASK_DATA_SIZE);
+ for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len;
+ ++i) {
+ dev_dbg(chip->card->dev, " stat[%d]: %x, %x\n", i,
+ chip->rmh.stat[i],
+ chip->rmh.stat[i] & MASK_DATA_SIZE);
}
}
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 46705ec77b48..eb3aca16359c 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -718,7 +718,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
oldreg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN);
newreg = oldreg & ~0x0707;
newreg = newreg | (value->value.integer.value[0] & 7);
- newreg = newreg | ((value->value.integer.value[0] & 7) << 8);
+ newreg = newreg | ((value->value.integer.value[1] & 7) << 8);
change = newreg != oldreg;
if (change)
oxygen_write_ac97(chip, 1, AC97_REC_GAIN, newreg);
diff --git a/sound/soc/atmel/atmel-i2s.c b/sound/soc/atmel/atmel-i2s.c
index d870f56c44cf..0341b3119767 100644
--- a/sound/soc/atmel/atmel-i2s.c
+++ b/sound/soc/atmel/atmel-i2s.c
@@ -163,11 +163,14 @@ struct atmel_i2s_gck_param {
#define I2S_MCK_12M288 12288000UL
#define I2S_MCK_11M2896 11289600UL
+#define I2S_MCK_6M144 6144000UL
/* mck = (32 * (imckfs+1) / (imckdiv+1)) * fs */
static const struct atmel_i2s_gck_param gck_params[] = {
+ /* mck = 6.144Mhz */
+ { 8000, I2S_MCK_6M144, 1, 47}, /* mck = 768 fs */
+
/* mck = 12.288MHz */
- { 8000, I2S_MCK_12M288, 0, 47}, /* mck = 1536 fs */
{ 16000, I2S_MCK_12M288, 1, 47}, /* mck = 768 fs */
{ 24000, I2S_MCK_12M288, 3, 63}, /* mck = 512 fs */
{ 32000, I2S_MCK_12M288, 3, 47}, /* mck = 384 fs */
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index d1579896f3a1..05277a88e20d 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -46,6 +46,35 @@
*/
#undef ENABLE_MIC_INPUT
+static struct clk *mclk;
+
+static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ static int mclk_on;
+ int ret = 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ if (!mclk_on)
+ ret = clk_enable(mclk);
+ if (ret == 0)
+ mclk_on = 1;
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ case SND_SOC_BIAS_STANDBY:
+ if (mclk_on)
+ clk_disable(mclk);
+ mclk_on = 0;
+ break;
+ }
+
+ return ret;
+}
+
static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
@@ -106,6 +135,7 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
.owner = THIS_MODULE,
.dai_link = &at91sam9g20ek_dai,
.num_links = 1,
+ .set_bias_level = at91sam9g20ek_set_bias_level,
.dapm_widgets = at91sam9g20ek_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
@@ -118,6 +148,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *codec_np, *cpu_np;
+ struct clk *pllb;
struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
int ret;
@@ -131,6 +162,31 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
return -EINVAL;
}
+ /*
+ * Codec MCLK is supplied by PCK0 - set it up.
+ */
+ mclk = clk_get(NULL, "pck0");
+ if (IS_ERR(mclk)) {
+ dev_err(&pdev->dev, "Failed to get MCLK\n");
+ ret = PTR_ERR(mclk);
+ goto err;
+ }
+
+ pllb = clk_get(NULL, "pllb");
+ if (IS_ERR(pllb)) {
+ dev_err(&pdev->dev, "Failed to get PLLB\n");
+ ret = PTR_ERR(pllb);
+ goto err_mclk;
+ }
+ ret = clk_set_parent(mclk, pllb);
+ clk_put(pllb);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to set MCLK parent\n");
+ goto err_mclk;
+ }
+
+ clk_set_rate(mclk, MCLK_RATE);
+
card->dev = &pdev->dev;
/* Parse device node info */
@@ -174,6 +230,9 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
return ret;
+err_mclk:
+ clk_put(mclk);
+ mclk = NULL;
err:
atmel_ssc_put_audio(0);
return ret;
@@ -183,6 +242,8 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
+ clk_disable(mclk);
+ mclk = NULL;
snd_soc_unregister_card(card);
atmel_ssc_put_audio(0);
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index 8894369e329a..87b299d24bd8 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -22,13 +22,11 @@
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <sound/cs35l33.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/machine.h>
-#include <linux/of_gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -1168,7 +1166,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
/* We could issue !RST or skip it based on AMP topology */
cs35l33->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
- "reset-gpios", GPIOD_OUT_HIGH);
+ "reset", GPIOD_OUT_HIGH);
if (IS_ERR(cs35l33->reset_gpio)) {
dev_err(&i2c_client->dev, "%s ERROR: Can't get reset GPIO\n",
__func__);
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index b792c006e530..d9f975b52b21 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -20,14 +20,12 @@
#include <linux/regulator/machine.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <sound/initval.h>
#include <sound/tlv.h>
@@ -1058,7 +1056,7 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
dev_err(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
cs35l34->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
- "reset-gpios", GPIOD_OUT_LOW);
+ "reset", GPIOD_OUT_LOW);
if (IS_ERR(cs35l34->reset_gpio))
return PTR_ERR(cs35l34->reset_gpio);
diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c
index 70260e0a8f09..3ff73367897d 100644
--- a/sound/soc/codecs/cs42l51-i2c.c
+++ b/sound/soc/codecs/cs42l51-i2c.c
@@ -19,6 +19,12 @@ static struct i2c_device_id cs42l51_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cs42l51_i2c_id);
+const struct of_device_id cs42l51_of_match[] = {
+ { .compatible = "cirrus,cs42l51", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cs42l51_of_match);
+
static int cs42l51_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index cdd7ae90c2b5..07371e32167c 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -811,13 +811,6 @@ int __maybe_unused cs42l51_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(cs42l51_resume);
-const struct of_device_id cs42l51_of_match[] = {
- { .compatible = "cirrus,cs42l51", },
- { }
-};
-MODULE_DEVICE_TABLE(of, cs42l51_of_match);
-EXPORT_SYMBOL_GPL(cs42l51_of_match);
-
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs42l51.h b/sound/soc/codecs/cs42l51.h
index 9d06cf7f8876..4f13c38484b7 100644
--- a/sound/soc/codecs/cs42l51.h
+++ b/sound/soc/codecs/cs42l51.h
@@ -16,7 +16,6 @@ int cs42l51_probe(struct device *dev, struct regmap *regmap);
int cs42l51_remove(struct device *dev);
int __maybe_unused cs42l51_suspend(struct device *dev);
int __maybe_unused cs42l51_resume(struct device *dev);
-extern const struct of_device_id cs42l51_of_match[];
#define CS42L51_CHIP_ID 0x1B
#define CS42L51_CHIP_REV_A 0x00
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 8be7d83f0ce9..732405587c5a 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1192,18 +1192,12 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs42l56->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
if (i2c_client->dev.of_node) {
ret = cs42l56_handle_of_data(i2c_client,
&cs42l56->pdata);
if (ret != 0)
return ret;
}
- cs42l56->pdata = *pdata;
}
if (cs42l56->pdata.gpio_nreset) {
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index 8f70dee95878..02fb9317b697 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -578,7 +578,7 @@ static int cs43130_set_sp_fmt(int dai_id, unsigned int bitwidth_sclk,
break;
case SND_SOC_DAIFMT_LEFT_J:
hi_size = bitwidth_sclk;
- frm_delay = 2;
+ frm_delay = 0;
frm_phase = 1;
break;
case SND_SOC_DAIFMT_DSP_A:
@@ -1683,7 +1683,7 @@ static ssize_t cs43130_show_dc_r(struct device *dev,
return cs43130_show_dc(dev, buf, HP_RIGHT);
}
-static u16 const cs43130_ac_freq[CS43130_AC_FREQ] = {
+static const u16 cs43130_ac_freq[CS43130_AC_FREQ] = {
24,
43,
93,
@@ -2364,7 +2364,7 @@ static const struct regmap_config cs43130_regmap = {
.use_single_write = true,
};
-static u16 const cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
+static const u16 cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
50,
120,
};
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index e172913d04a4..efc5049c0796 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -1333,6 +1333,8 @@ static int __init da7210_modinit(void)
int ret = 0;
#if IS_ENABLED(CONFIG_I2C)
ret = i2c_add_driver(&da7210_i2c_driver);
+ if (ret)
+ return ret;
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&da7210_spi_driver);
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 4f2a96e9fd45..e4e314604c0a 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -347,11 +347,15 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
u8 events[DA7219_AAD_IRQ_REG_MAX];
u8 statusa;
- int i, report = 0, mask = 0;
+ int i, ret, report = 0, mask = 0;
/* Read current IRQ events */
- regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A,
- events, DA7219_AAD_IRQ_REG_MAX);
+ ret = regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A,
+ events, DA7219_AAD_IRQ_REG_MAX);
+ if (ret) {
+ dev_warn_ratelimited(component->dev, "Failed to read IRQ events: %d\n", ret);
+ return IRQ_NONE;
+ }
if (!events[DA7219_AAD_IRQ_REG_A] && !events[DA7219_AAD_IRQ_REG_B])
return IRQ_NONE;
@@ -651,7 +655,7 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct snd_soc_component
aad_pdata->mic_det_thr =
da7219_aad_fw_mic_det_thr(component, fw_val32);
else
- aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
+ aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_200_OHMS;
if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
aad_pdata->jack_ins_deb =
@@ -855,6 +859,8 @@ void da7219_aad_suspend(struct snd_soc_component *component)
}
}
}
+
+ synchronize_irq(da7219_aad->irq);
}
void da7219_aad_resume(struct snd_soc_component *component)
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index b781b28de012..dd2df9a903e0 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -52,7 +52,12 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(dac_vol_tlv, -9600, 50, 1);
static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
-static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
+
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(alc_target_tlv,
+ 0, 10, TLV_DB_SCALE_ITEM(-1650, 150, 0),
+ 11, 11, TLV_DB_SCALE_ITEM(-150, 0, 0),
+);
+
static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
@@ -115,7 +120,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
alc_max_gain_tlv),
SOC_SINGLE_TLV("ALC Capture Min Volume", ES8316_ADC_ALC2, 0, 28, 0,
alc_min_gain_tlv),
- SOC_SINGLE_TLV("ALC Capture Target Volume", ES8316_ADC_ALC3, 4, 10, 0,
+ SOC_SINGLE_TLV("ALC Capture Target Volume", ES8316_ADC_ALC3, 4, 11, 0,
alc_target_tlv),
SOC_SINGLE("ALC Capture Hold Time", ES8316_ADC_ALC3, 0, 10, 0),
SOC_SINGLE("ALC Capture Decay Time", ES8316_ADC_ALC4, 4, 10, 0),
@@ -148,7 +153,7 @@ static const char * const es8316_dmic_txt[] = {
"dmic data at high level",
"dmic data at low level",
};
-static const unsigned int es8316_dmic_values[] = { 0, 1, 2 };
+static const unsigned int es8316_dmic_values[] = { 0, 2, 3 };
static const struct soc_enum es8316_dmic_src_enum =
SOC_VALUE_ENUM_SINGLE(ES8316_ADC_DMIC, 0, 3,
ARRAY_SIZE(es8316_dmic_txt),
@@ -364,13 +369,11 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int count = 0;
es8316->sysclk = freq;
+ es8316->sysclk_constraints.list = NULL;
+ es8316->sysclk_constraints.count = 0;
- if (freq == 0) {
- es8316->sysclk_constraints.list = NULL;
- es8316->sysclk_constraints.count = 0;
-
+ if (freq == 0)
return 0;
- }
ret = clk_set_rate(es8316->mclk, freq);
if (ret)
@@ -386,8 +389,10 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
es8316->allowed_rates[count++] = freq / ratio;
}
- es8316->sysclk_constraints.list = es8316->allowed_rates;
- es8316->sysclk_constraints.count = count;
+ if (count) {
+ es8316->sysclk_constraints.list = es8316->allowed_rates;
+ es8316->sysclk_constraints.count = count;
+ }
return 0;
}
@@ -806,15 +811,14 @@ static int es8316_i2c_probe(struct i2c_client *i2c_client,
es8316->irq = i2c_client->irq;
mutex_init(&es8316->lock);
- ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "es8316", es8316);
- if (ret == 0) {
- /* Gets re-enabled by es8316_set_jack() */
- disable_irq(es8316->irq);
- } else {
- dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
- es8316->irq = -ENXIO;
+ if (es8316->irq > 0) {
+ ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "es8316", es8316);
+ if (ret) {
+ dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
+ es8316->irq = -ENXIO;
+ }
}
return devm_snd_soc_register_component(&i2c_client->dev,
diff --git a/sound/soc/codecs/jz4725b.c b/sound/soc/codecs/jz4725b.c
index 2567a5d15b55..48c40a44b763 100644
--- a/sound/soc/codecs/jz4725b.c
+++ b/sound/soc/codecs/jz4725b.c
@@ -136,14 +136,17 @@ enum {
#define REG_CGR3_GO1L_OFFSET 0
#define REG_CGR3_GO1L_MASK (0x1f << REG_CGR3_GO1L_OFFSET)
+#define REG_CGR10_GIL_OFFSET 0
+#define REG_CGR10_GIR_OFFSET 4
+
struct jz_icdc {
struct regmap *regmap;
void __iomem *base;
struct clk *clk;
};
-static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_dac_tlv, -2250, 0);
-static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_line_tlv, -1500, 600);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(jz4725b_adc_tlv, 0, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(jz4725b_dac_tlv, -2250, 150, 0);
static const struct snd_kcontrol_new jz4725b_codec_controls[] = {
SOC_DOUBLE_TLV("Master Playback Volume",
@@ -151,11 +154,11 @@ static const struct snd_kcontrol_new jz4725b_codec_controls[] = {
REG_CGR1_GODL_OFFSET,
REG_CGR1_GODR_OFFSET,
0xf, 1, jz4725b_dac_tlv),
- SOC_DOUBLE_R_TLV("Master Capture Volume",
- JZ4725B_CODEC_REG_CGR3,
- JZ4725B_CODEC_REG_CGR2,
- REG_CGR2_GO1R_OFFSET,
- 0x1f, 1, jz4725b_line_tlv),
+ SOC_DOUBLE_TLV("Master Capture Volume",
+ JZ4725B_CODEC_REG_CGR10,
+ REG_CGR10_GIL_OFFSET,
+ REG_CGR10_GIR_OFFSET,
+ 0xf, 0, jz4725b_adc_tlv),
SOC_SINGLE("Master Playback Switch", JZ4725B_CODEC_REG_CR1,
REG_CR1_DAC_MUTE_OFFSET, 1, 1),
@@ -180,7 +183,7 @@ static SOC_VALUE_ENUM_SINGLE_DECL(jz4725b_codec_adc_src_enum,
jz4725b_codec_adc_src_texts,
jz4725b_codec_adc_src_values);
static const struct snd_kcontrol_new jz4725b_codec_adc_src_ctrl =
- SOC_DAPM_ENUM("Route", jz4725b_codec_adc_src_enum);
+ SOC_DAPM_ENUM("ADC Source Capture Route", jz4725b_codec_adc_src_enum);
static const struct snd_kcontrol_new jz4725b_codec_mixer_controls[] = {
SOC_DAPM_SINGLE("Line In Bypass", JZ4725B_CODEC_REG_CR1,
@@ -225,7 +228,7 @@ static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = {
SND_SOC_DAPM_ADC("ADC", "Capture",
JZ4725B_CODEC_REG_PMR1, REG_PMR1_SB_ADC_OFFSET, 1),
- SND_SOC_DAPM_MUX("ADC Source", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_MUX("ADC Source Capture Route", SND_SOC_NOPM, 0, 0,
&jz4725b_codec_adc_src_ctrl),
/* Mixer */
@@ -236,7 +239,8 @@ static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("DAC to Mixer", JZ4725B_CODEC_REG_CR1,
REG_CR1_DACSEL_OFFSET, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("Line In", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("Line In", JZ4725B_CODEC_REG_PMR1,
+ REG_PMR1_SB_LIN_OFFSET, 1, NULL, 0),
SND_SOC_DAPM_MIXER("HP Out", JZ4725B_CODEC_REG_CR1,
REG_CR1_HP_DIS_OFFSET, 1, NULL, 0),
@@ -283,11 +287,11 @@ static const struct snd_soc_dapm_route jz4725b_codec_dapm_routes[] = {
{"Mixer", NULL, "DAC to Mixer"},
{"Mixer to ADC", NULL, "Mixer"},
- {"ADC Source", "Mixer", "Mixer to ADC"},
- {"ADC Source", "Line In", "Line In"},
- {"ADC Source", "Mic 1", "Mic 1"},
- {"ADC Source", "Mic 2", "Mic 2"},
- {"ADC", NULL, "ADC Source"},
+ {"ADC Source Capture Route", "Mixer", "Mixer to ADC"},
+ {"ADC Source Capture Route", "Line In", "Line In"},
+ {"ADC Source Capture Route", "Mic 1", "Mic 1"},
+ {"ADC Source Capture Route", "Mic 2", "Mic 2"},
+ {"ADC", NULL, "ADC Source Capture Route"},
{"Out Stage", NULL, "Mixer"},
{"HP Out", NULL, "Out Stage"},
diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
index e4cde214b7b2..6e5bce4f5eb2 100644
--- a/sound/soc/codecs/msm8916-wcd-digital.c
+++ b/sound/soc/codecs/msm8916-wcd-digital.c
@@ -328,8 +328,8 @@ static const struct snd_kcontrol_new rx1_mix2_inp1_mux = SOC_DAPM_ENUM(
static const struct snd_kcontrol_new rx2_mix2_inp1_mux = SOC_DAPM_ENUM(
"RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
-/* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
-static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
+/* Digital Gain control -84 dB to +40 dB in 1 dB steps */
+static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400);
/* Cutoff Freq for High Pass Filter at -3dB */
static const char * const hpf_cutoff_text[] = {
@@ -510,15 +510,15 @@ static int wcd_iir_filter_info(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = {
SOC_SINGLE_S8_TLV("RX1 Digital Volume", LPASS_CDC_RX1_VOL_CTL_B2_CTL,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_SINGLE_S8_TLV("RX2 Digital Volume", LPASS_CDC_RX2_VOL_CTL_B2_CTL,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_SINGLE_S8_TLV("RX3 Digital Volume", LPASS_CDC_RX3_VOL_CTL_B2_CTL,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_SINGLE_S8_TLV("TX1 Digital Volume", LPASS_CDC_TX1_VOL_CTL_GAIN,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_SINGLE_S8_TLV("TX2 Digital Volume", LPASS_CDC_TX2_VOL_CTL_GAIN,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_ENUM("TX1 HPF Cutoff", tx1_hpf_cutoff_enum),
SOC_ENUM("TX2 HPF Cutoff", tx2_hpf_cutoff_enum),
SOC_SINGLE("TX1 HPF Switch", LPASS_CDC_TX1_MUX_CTL, 3, 1, 0),
@@ -553,22 +553,22 @@ static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = {
WCD_IIR_FILTER_CTL("IIR2 Band3", IIR2, BAND3),
WCD_IIR_FILTER_CTL("IIR2 Band4", IIR2, BAND4),
WCD_IIR_FILTER_CTL("IIR2 Band5", IIR2, BAND5),
- SOC_SINGLE_SX_TLV("IIR1 INP1 Volume", LPASS_CDC_IIR1_GAIN_B1_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP2 Volume", LPASS_CDC_IIR1_GAIN_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP3 Volume", LPASS_CDC_IIR1_GAIN_B3_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP4 Volume", LPASS_CDC_IIR1_GAIN_B4_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP1 Volume", LPASS_CDC_IIR2_GAIN_B1_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP2 Volume", LPASS_CDC_IIR2_GAIN_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP3 Volume", LPASS_CDC_IIR2_GAIN_B3_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP4 Volume", LPASS_CDC_IIR2_GAIN_B4_CTL,
- 0, -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR1 INP1 Volume", LPASS_CDC_IIR1_GAIN_B1_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR1 INP2 Volume", LPASS_CDC_IIR1_GAIN_B2_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR1 INP3 Volume", LPASS_CDC_IIR1_GAIN_B3_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR1 INP4 Volume", LPASS_CDC_IIR1_GAIN_B4_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP1 Volume", LPASS_CDC_IIR2_GAIN_B1_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP2 Volume", LPASS_CDC_IIR2_GAIN_B2_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP3 Volume", LPASS_CDC_IIR2_GAIN_B3_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP4 Volume", LPASS_CDC_IIR2_GAIN_B4_CTL,
+ -84, 40, digital_gain),
};
diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
index cd163978792e..1450a84df4e8 100644
--- a/sound/soc/codecs/nau8822.c
+++ b/sound/soc/codecs/nau8822.c
@@ -184,6 +184,7 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
struct soc_bytes_ext *params = (void *)kcontrol->private_value;
int i, reg;
u16 reg_val, *val;
+ __be16 tmp;
val = (u16 *)ucontrol->value.bytes.data;
reg = NAU8822_REG_EQ1;
@@ -192,8 +193,8 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
/* conversion of 16-bit integers between native CPU format
* and big endian format
*/
- reg_val = cpu_to_be16(reg_val);
- memcpy(val + i, &reg_val, sizeof(reg_val));
+ tmp = cpu_to_be16(reg_val);
+ memcpy(val + i, &tmp, sizeof(tmp));
}
return 0;
@@ -216,6 +217,7 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
void *data;
u16 *val, value;
int i, reg, ret;
+ __be16 *tmp;
data = kmemdup(ucontrol->value.bytes.data,
params->max, GFP_KERNEL | GFP_DMA);
@@ -228,7 +230,8 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
/* conversion of 16-bit integers between native CPU format
* and big endian format
*/
- value = be16_to_cpu(*(val + i));
+ tmp = (__be16 *)(val + i);
+ value = be16_to_cpup(tmp);
ret = snd_soc_component_write(component, reg + i, value);
if (ret) {
dev_err(component->dev,
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index c8ccfa2fff84..9b22219a7693 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -1072,6 +1072,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_component *component = dai->component;
struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
unsigned int val_len = 0, osr, ctrl_val, bclk_fs, bclk_div;
+ int err = -EINVAL;
nau8824_sema_acquire(nau8824, HZ);
@@ -1088,7 +1089,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
osr &= NAU8824_DAC_OVERSAMPLE_MASK;
if (nau8824_clock_check(nau8824, substream->stream,
nau8824->fs, osr))
- return -EINVAL;
+ goto error;
regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
NAU8824_CLK_DAC_SRC_MASK,
osr_dac_sel[osr].clk_src << NAU8824_CLK_DAC_SRC_SFT);
@@ -1098,7 +1099,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
osr &= NAU8824_ADC_SYNC_DOWN_MASK;
if (nau8824_clock_check(nau8824, substream->stream,
nau8824->fs, osr))
- return -EINVAL;
+ goto error;
regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
NAU8824_CLK_ADC_SRC_MASK,
osr_adc_sel[osr].clk_src << NAU8824_CLK_ADC_SRC_SFT);
@@ -1119,7 +1120,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
else if (bclk_fs <= 256)
bclk_div = 0;
else
- return -EINVAL;
+ goto error;
regmap_update_bits(nau8824->regmap,
NAU8824_REG_PORT0_I2S_PCM_CTRL_2,
NAU8824_I2S_LRC_DIV_MASK | NAU8824_I2S_BLK_DIV_MASK,
@@ -1140,15 +1141,17 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
val_len |= NAU8824_I2S_DL_32;
break;
default:
- return -EINVAL;
+ goto error;
}
regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1,
NAU8824_I2S_DL_MASK, val_len);
+ err = 0;
+ error:
nau8824_sema_release(nau8824);
- return 0;
+ return err;
}
static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
@@ -1157,8 +1160,6 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
unsigned int ctrl1_val = 0, ctrl2_val = 0;
- nau8824_sema_acquire(nau8824, HZ);
-
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
ctrl2_val |= NAU8824_I2S_MS_MASTER;
@@ -1200,6 +1201,8 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
+ nau8824_sema_acquire(nau8824, HZ);
+
regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1,
NAU8824_I2S_DF_MASK | NAU8824_I2S_BP_MASK |
NAU8824_I2S_PCMB_EN, ctrl1_val);
@@ -1893,6 +1896,30 @@ static const struct dmi_system_id nau8824_quirk_table[] = {
},
.driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
},
+ {
+ /* Positivo CW14Q01P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+ DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P"),
+ },
+ .driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
+ },
+ {
+ /* Positivo K1424G */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+ DMI_MATCH(DMI_BOARD_NAME, "K1424G"),
+ },
+ .driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
+ },
+ {
+ /* Positivo N14ZP74G */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+ DMI_MATCH(DMI_BOARD_NAME, "N14ZP74G"),
+ },
+ .driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
+ },
{}
};
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 4cbef9affffd..feb590a20544 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1598,7 +1598,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
if (val > 6) {
dev_err(dev, "Invalid pll-in\n");
ret = -EINVAL;
- goto err_clk;
+ goto err_pm;
}
pcm512x->pll_in = val;
}
@@ -1607,7 +1607,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
if (val > 6) {
dev_err(dev, "Invalid pll-out\n");
ret = -EINVAL;
- goto err_clk;
+ goto err_pm;
}
pcm512x->pll_out = val;
}
@@ -1616,12 +1616,12 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
dev_err(dev,
"Error: both pll-in and pll-out, or none\n");
ret = -EINVAL;
- goto err_clk;
+ goto err_pm;
}
if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) {
dev_err(dev, "Error: pll-in == pll-out\n");
ret = -EINVAL;
- goto err_clk;
+ goto err_pm;
}
}
#endif
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index f8c0f977206c..cc7eb34a641d 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -1166,6 +1166,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Geminilake")
}
},
+ {
+ .ident = "Intel Kabylake R RVP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
+ }
+ },
{ }
};
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index a66e93a3af74..c78d5833c9cd 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -421,6 +421,7 @@ struct rt5645_priv {
struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
struct rt5645_eq_param_s *eq_param;
struct timer_list btn_check_timer;
+ struct mutex jd_mutex;
int codec_type;
int sysclk;
@@ -3179,6 +3180,8 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
rt5645_enable_push_button_irq(component, true);
}
} else {
+ if (rt5645->en_button_func)
+ rt5645_enable_push_button_irq(component, false);
snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
snd_soc_dapm_sync(dapm);
rt5645->jack_type = SND_JACK_HEADPHONE;
@@ -3241,6 +3244,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
+ regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
+ RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
}
rt5645_irq(0, rt5645);
@@ -3257,6 +3262,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
if (!rt5645->component)
return;
+ mutex_lock(&rt5645->jd_mutex);
+
switch (rt5645->pdata.jd_mode) {
case 0: /* Not using rt5645 JD */
if (rt5645->gpiod_hp_det) {
@@ -3269,6 +3276,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
report, SND_JACK_HEADPHONE);
snd_soc_jack_report(rt5645->mic_jack,
report, SND_JACK_MICROPHONE);
+ mutex_unlock(&rt5645->jd_mutex);
return;
case 4:
val = snd_soc_component_read32(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020;
@@ -3281,7 +3289,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
if (!val && (rt5645->jack_type == 0)) { /* jack in */
report = rt5645_jack_detect(rt5645->component, 1);
- } else if (!val && rt5645->jack_type != 0) {
+ } else if (!val && rt5645->jack_type == SND_JACK_HEADSET) {
/* for push button and jack out */
btn_type = 0;
if (snd_soc_component_read32(rt5645->component, RT5645_INT_IRQ_ST) & 0x4) {
@@ -3337,6 +3345,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
rt5645_jack_detect(rt5645->component, 0);
}
+ mutex_unlock(&rt5645->jd_mutex);
+
snd_soc_jack_report(rt5645->hp_jack, report, SND_JACK_HEADPHONE);
snd_soc_jack_report(rt5645->mic_jack, report, SND_JACK_MICROPHONE);
if (rt5645->en_button_func)
@@ -3756,6 +3766,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_EXACT_MATCH(DMI_BOARD_VERSION, "Default string"),
+ /*
+ * Above strings are too generic, LattePanda BIOS versions for
+ * all 4 hw revisions are:
+ * DF-BI-7-S70CR100-*
+ * DF-BI-7-S70CR110-*
+ * DF-BI-7-S70CR200-*
+ * LP-BS-7-S70CR700-*
+ * Do a partial match for S70CR to avoid false positive matches.
+ */
+ DMI_MATCH(DMI_BIOS_VERSION, "S70CR"),
},
.driver_data = (void *)&lattepanda_board_platform_data,
},
@@ -4039,6 +4059,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
}
timer_setup(&rt5645->btn_check_timer, rt5645_btn_check_callback, 0);
+ mutex_init(&rt5645->jd_mutex);
INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index 68299ce26d3e..648e0708007e 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component)
struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
regmap_write(rt5665->regmap, RT5665_RESET, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
}
#ifdef CONFIG_PM
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index f21181734170..fefdd8cbd8f5 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -3185,8 +3185,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err;
- pm_runtime_put(&i2c->dev);
-
return 0;
err:
pm_runtime_disable(&i2c->dev);
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 76d3c0681f37..d2dfc53e30ff 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1788,6 +1788,7 @@ static int sgtl5000_i2c_remove(struct i2c_client *client)
{
struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
+ regmap_write(sgtl5000->regmap, SGTL5000_CHIP_CLK_CTRL, SGTL5000_CHIP_CLK_CTRL_DEFAULT);
regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT);
regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT);
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 464a4d7873bb..b797f620e352 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -53,6 +53,18 @@ static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
{ .reg = 0x09, .def = 0x0000 }
};
+/*
+ * ssm2602 register patch
+ * Workaround for playback distortions after power up: activates digital
+ * core, and then powers on output, DAC, and whole chip at the same time
+ */
+
+static const struct reg_sequence ssm2602_patch[] = {
+ { SSM2602_ACTIVE, 0x01 },
+ { SSM2602_PWR, 0x07 },
+ { SSM2602_RESET, 0x00 },
+};
+
/*Appending several "None"s just for OSS mixer use*/
static const char *ssm2602_input_select[] = {
@@ -588,6 +600,9 @@ static int ssm260x_component_probe(struct snd_soc_component *component)
return ret;
}
+ regmap_register_patch(ssm2602->regmap, ssm2602_patch,
+ ARRAY_SIZE(ssm2602_patch));
+
/* set the update bits */
regmap_update_bits(ssm2602->regmap, SSM2602_LINVOL,
LINVOL_LRIN_BOTH, LINVOL_LRIN_BOTH);
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 016aff97e2fb..a952b9454513 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -1971,8 +1971,8 @@ static int wcd9335_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- slim_stream_unprepare(dai_data->sruntime);
slim_stream_disable(dai_data->sruntime);
+ slim_stream_unprepare(dai_data->sruntime);
break;
default:
break;
@@ -2252,51 +2252,42 @@ static int wcd9335_rx_hph_mode_put(struct snd_kcontrol *kc,
static const struct snd_kcontrol_new wcd9335_snd_controls[] = {
/* -84dB min - 40dB max */
- SOC_SINGLE_SX_TLV("RX0 Digital Volume", WCD9335_CDC_RX0_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX1 Digital Volume", WCD9335_CDC_RX1_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX2 Digital Volume", WCD9335_CDC_RX2_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX3 Digital Volume", WCD9335_CDC_RX3_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX4 Digital Volume", WCD9335_CDC_RX4_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX5 Digital Volume", WCD9335_CDC_RX5_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX6 Digital Volume", WCD9335_CDC_RX6_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX7 Digital Volume", WCD9335_CDC_RX7_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX8 Digital Volume", WCD9335_CDC_RX8_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX0 Mix Digital Volume",
- WCD9335_CDC_RX0_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX1 Mix Digital Volume",
- WCD9335_CDC_RX1_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX2 Mix Digital Volume",
- WCD9335_CDC_RX2_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX3 Mix Digital Volume",
- WCD9335_CDC_RX3_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX4 Mix Digital Volume",
- WCD9335_CDC_RX4_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX5 Mix Digital Volume",
- WCD9335_CDC_RX5_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX6 Mix Digital Volume",
- WCD9335_CDC_RX6_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX7 Mix Digital Volume",
- WCD9335_CDC_RX7_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX8 Mix Digital Volume",
- WCD9335_CDC_RX8_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX0 Digital Volume", WCD9335_CDC_RX0_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX1 Digital Volume", WCD9335_CDC_RX1_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX2 Digital Volume", WCD9335_CDC_RX2_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX3 Digital Volume", WCD9335_CDC_RX3_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX4 Digital Volume", WCD9335_CDC_RX4_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX5 Digital Volume", WCD9335_CDC_RX5_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX6 Digital Volume", WCD9335_CDC_RX6_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX7 Digital Volume", WCD9335_CDC_RX7_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX8 Digital Volume", WCD9335_CDC_RX8_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX0 Mix Digital Volume", WCD9335_CDC_RX0_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX1 Mix Digital Volume", WCD9335_CDC_RX1_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX2 Mix Digital Volume", WCD9335_CDC_RX2_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX3 Mix Digital Volume", WCD9335_CDC_RX3_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX4 Mix Digital Volume", WCD9335_CDC_RX4_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX5 Mix Digital Volume", WCD9335_CDC_RX5_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX6 Mix Digital Volume", WCD9335_CDC_RX6_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX7 Mix Digital Volume", WCD9335_CDC_RX7_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX8 Mix Digital Volume", WCD9335_CDC_RX8_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
SOC_ENUM("RX INT0_1 HPF cut off", cf_int0_1_enum),
SOC_ENUM("RX INT0_2 HPF cut off", cf_int0_2_enum),
SOC_ENUM("RX INT1_1 HPF cut off", cf_int1_1_enum),
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 9e8c564f6e9c..9787257b69a9 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -2276,6 +2276,9 @@ static int wm8904_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(wm8904->regmap, WM8904_BIAS_CONTROL_0,
WM8904_POBCTRL, 0);
+ /* Fill the cache for the ADC test register */
+ regmap_read(wm8904->regmap, WM8904_ADC_TEST_0, &val);
+
/* Can leave the device powered off until we need it */
regcache_cache_only(wm8904->regmap, true);
regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index ebaee468057b..15828ae62f9d 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1840,6 +1840,49 @@ SOC_SINGLE_TLV("SPKOUTR Mixer DACR Volume", WM8962_SPEAKER_MIXER_5,
4, 1, 0, inmix_tlv),
};
+static int tp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ int ret, reg, val, mask;
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+
+ ret = pm_runtime_resume_and_get(component->dev);
+ if (ret < 0) {
+ dev_err(component->dev, "Failed to resume device: %d\n", ret);
+ return ret;
+ }
+
+ reg = WM8962_ADDITIONAL_CONTROL_4;
+
+ if (!strcmp(w->name, "TEMP_HP")) {
+ mask = WM8962_TEMP_ENA_HP_MASK;
+ val = WM8962_TEMP_ENA_HP;
+ } else if (!strcmp(w->name, "TEMP_SPK")) {
+ mask = WM8962_TEMP_ENA_SPK_MASK;
+ val = WM8962_TEMP_ENA_SPK;
+ } else {
+ pm_runtime_put(component->dev);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ val = 0;
+ fallthrough;
+ case SND_SOC_DAPM_POST_PMU:
+ ret = snd_soc_component_update_bits(component, reg, mask, val);
+ break;
+ default:
+ WARN(1, "Invalid event %d\n", event);
+ pm_runtime_put(component->dev);
+ return -EINVAL;
+ }
+
+ pm_runtime_put(component->dev);
+
+ return 0;
+}
+
static int cp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -2132,8 +2175,10 @@ SND_SOC_DAPM_SUPPLY("TOCLK", WM8962_ADDITIONAL_CONTROL_1, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("DSP2", 1, WM8962_DSP2_POWER_MANAGEMENT,
WM8962_DSP2_ENA_SHIFT, 0, dsp2_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
-SND_SOC_DAPM_SUPPLY("TEMP_HP", WM8962_ADDITIONAL_CONTROL_4, 2, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("TEMP_SPK", WM8962_ADDITIONAL_CONTROL_4, 1, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("TEMP_HP", SND_SOC_NOPM, 0, 0, tp_event,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("TEMP_SPK", SND_SOC_NOPM, 0, 0, tp_event,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER("INPGAL", WM8962_LEFT_INPUT_PGA_CONTROL, 4, 0,
inpgal, ARRAY_SIZE(inpgal)),
@@ -2173,6 +2218,9 @@ SND_SOC_DAPM_PGA_E("HPOUT", SND_SOC_NOPM, 0, 0, NULL, 0, hp_event,
SND_SOC_DAPM_OUTPUT("HPOUTL"),
SND_SOC_DAPM_OUTPUT("HPOUTR"),
+
+SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
};
static const struct snd_soc_dapm_widget wm8962_dapm_spk_mono_widgets[] = {
@@ -2180,7 +2228,6 @@ SND_SOC_DAPM_MIXER("Speaker Mixer", WM8962_MIXER_ENABLES, 1, 0,
spkmixl, ARRAY_SIZE(spkmixl)),
SND_SOC_DAPM_MUX_E("Speaker PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
out_pga_event, SND_SOC_DAPM_POST_PMU),
-SND_SOC_DAPM_PGA("Speaker Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
SND_SOC_DAPM_OUTPUT("SPKOUT"),
};
@@ -2195,9 +2242,6 @@ SND_SOC_DAPM_MUX_E("SPKOUTL PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
SND_SOC_DAPM_MUX_E("SPKOUTR PGA", WM8962_PWR_MGMT_2, 3, 0, &spkoutr_mux,
out_pga_event, SND_SOC_DAPM_POST_PMU),
-SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
-SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
-
SND_SOC_DAPM_OUTPUT("SPKOUTL"),
SND_SOC_DAPM_OUTPUT("SPKOUTR"),
};
@@ -2307,12 +2351,18 @@ static const struct snd_soc_dapm_route wm8962_spk_mono_intercon[] = {
{ "Speaker PGA", "Mixer", "Speaker Mixer" },
{ "Speaker PGA", "DAC", "DACL" },
- { "Speaker Output", NULL, "Speaker PGA" },
- { "Speaker Output", NULL, "SYSCLK" },
- { "Speaker Output", NULL, "TOCLK" },
- { "Speaker Output", NULL, "TEMP_SPK" },
+ { "SPKOUTL Output", NULL, "Speaker PGA" },
+ { "SPKOUTL Output", NULL, "SYSCLK" },
+ { "SPKOUTL Output", NULL, "TOCLK" },
+ { "SPKOUTL Output", NULL, "TEMP_SPK" },
- { "SPKOUT", NULL, "Speaker Output" },
+ { "SPKOUTR Output", NULL, "Speaker PGA" },
+ { "SPKOUTR Output", NULL, "SYSCLK" },
+ { "SPKOUTR Output", NULL, "TOCLK" },
+ { "SPKOUTR Output", NULL, "TEMP_SPK" },
+
+ { "SPKOUT", NULL, "SPKOUTL Output" },
+ { "SPKOUT", NULL, "SPKOUTR Output" },
};
static const struct snd_soc_dapm_route wm8962_spk_stereo_intercon[] = {
@@ -2844,8 +2894,12 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
switch (fll_id) {
case WM8962_FLL_MCLK:
case WM8962_FLL_BCLK:
+ fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
+ break;
case WM8962_FLL_OSC:
fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
+ snd_soc_component_update_bits(component, WM8962_PLL2,
+ WM8962_OSC_ENA, WM8962_OSC_ENA);
break;
case WM8962_FLL_INT:
snd_soc_component_update_bits(component, WM8962_FLL_CONTROL_1,
@@ -2854,7 +2908,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
WM8962_FLL_FRC_NCO, WM8962_FLL_FRC_NCO);
break;
default:
- dev_err(component->dev, "Unknown FLL source %d\n", ret);
+ dev_err(component->dev, "Unknown FLL source %d\n", source);
return -EINVAL;
}
@@ -3750,6 +3804,11 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err_pm_runtime;
+ regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4,
+ WM8962_TEMP_ENA_HP_MASK, 0);
+ regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4,
+ WM8962_TEMP_ENA_SPK_MASK, 0);
+
regcache_cache_only(wm8962->regmap, true);
/* The drivers should power up as needed */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 6dbab3fc6537..4ae7ec8d71cd 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3711,7 +3711,12 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
} else {
dev_dbg(component->dev, "Jack not detected\n");
+ /* Release wm8994->accdet_lock to avoid deadlock:
+ * cancel_delayed_work_sync() takes wm8994->mic_work internal
+ * lock and wm1811_mic_work takes wm8994->accdet_lock */
+ mutex_unlock(&wm8994->accdet_lock);
cancel_delayed_work_sync(&wm8994->mic_work);
+ mutex_lock(&wm8994->accdet_lock);
snd_soc_component_update_bits(component, WM8958_MICBIAS2,
WM8958_MICB2_DISCH, WM8958_MICB2_DISCH);
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index aedfa6b2895b..8df5f3bc6e97 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -3649,12 +3649,12 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
&region->base_addr);
if (ret < 0)
- return ret;
+ goto err;
ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
&offset);
if (ret < 0)
- return ret;
+ goto err;
region->cumulative_size = offset;
@@ -3665,6 +3665,10 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
}
return 0;
+
+err:
+ kfree(buf->regions);
+ return ret;
}
static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 65112b9d8588..90b8814d7506 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -132,13 +132,13 @@ static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
/* Error Handling: TX */
if (isr[i] & ISR_TXFO) {
- dev_err(dev->dev, "TX overrun (ch_id=%d)\n", i);
+ dev_err_ratelimited(dev->dev, "TX overrun (ch_id=%d)\n", i);
irq_valid = true;
}
/* Error Handling: TX */
if (isr[i] & ISR_RXFO) {
- dev_err(dev->dev, "RX overrun (ch_id=%d)\n", i);
+ dev_err_ratelimited(dev->dev, "RX overrun (ch_id=%d)\n", i);
irq_valid = true;
}
}
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 6f3b768489f6..bf3d3f0aa858 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -86,7 +86,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
int ret;
int int_port = 0, ext_port;
struct device_node *np = pdev->dev.of_node;
- struct device_node *ssi_np = NULL, *codec_np = NULL;
+ struct device_node *ssi_np = NULL, *codec_np = NULL, *tmp_np = NULL;
eukrea_tlv320.dev = &pdev->dev;
if (np) {
@@ -143,7 +143,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
}
if (machine_is_eukrea_cpuimx27() ||
- of_find_compatible_node(NULL, NULL, "fsl,imx21-audmux")) {
+ (tmp_np = of_find_compatible_node(NULL, NULL, "fsl,imx21-audmux"))) {
imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0,
IMX_AUDMUX_V1_PCR_SYN |
IMX_AUDMUX_V1_PCR_TFSDIR |
@@ -158,10 +158,11 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
IMX_AUDMUX_V1_PCR_SYN |
IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR1_SSI0)
);
+ of_node_put(tmp_np);
} else if (machine_is_eukrea_cpuimx25sd() ||
machine_is_eukrea_cpuimx35sd() ||
machine_is_eukrea_cpuimx51sd() ||
- of_find_compatible_node(NULL, NULL, "fsl,imx31-audmux")) {
+ (tmp_np = of_find_compatible_node(NULL, NULL, "fsl,imx31-audmux"))) {
if (!np)
ext_port = machine_is_eukrea_cpuimx25sd() ?
4 : 3;
@@ -178,6 +179,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
IMX_AUDMUX_V2_PTCR_SYN,
IMX_AUDMUX_V2_PDCR_RXDSEL(int_port)
);
+ of_node_put(tmp_np);
} else {
if (np) {
/* The eukrea,asoc-tlv320 driver was explicitly
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 39ea9bda1394..db663e7d17a4 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -112,11 +112,11 @@ static const struct snd_soc_dapm_route audio_map[] = {
static const struct snd_soc_dapm_route audio_map_ac97[] = {
/* 1st half -- Normal DAPM routes */
- {"Playback", NULL, "AC97 Playback"},
- {"AC97 Capture", NULL, "Capture"},
+ {"AC97 Playback", NULL, "CPU AC97 Playback"},
+ {"CPU AC97 Capture", NULL, "AC97 Capture"},
/* 2nd half -- ASRC DAPM routes */
- {"AC97 Playback", NULL, "ASRC-Playback"},
- {"ASRC-Capture", NULL, "AC97 Capture"},
+ {"CPU AC97 Playback", NULL, "ASRC-Playback"},
+ {"ASRC-Capture", NULL, "CPU AC97 Capture"},
};
/* Add all possible widgets into here without being redundant */
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index f7f2d29f1bfe..6285ee8f829e 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -87,21 +87,21 @@ static DECLARE_TLV_DB_SCALE(gain_tlv, 0, 100, 0);
static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
- MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
+ MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
- MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
+ MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv),
SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
- MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
+ MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv),
SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
- MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
+ MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv),
SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
- MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
+ MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv),
SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
- MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
+ MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv),
SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
- MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
+ MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
- MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
+ MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
SOC_ENUM_EXT("MICFIL Quality Select",
fsl_micfil_quality_enum,
snd_soc_get_enum_double, snd_soc_put_enum_double),
@@ -740,18 +740,23 @@ static int fsl_micfil_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to pcm register\n");
+ return ret;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
&fsl_micfil_dai, 1);
if (ret) {
dev_err(&pdev->dev, "failed to register component %s\n",
fsl_micfil_component.name);
- return ret;
}
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
- if (ret)
- dev_err(&pdev->dev, "failed to pcm register\n");
-
return ret;
}
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 027259695551..fdbfaedda4ce 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -37,6 +37,24 @@ static const struct snd_pcm_hw_constraint_list fsl_sai_rate_constraints = {
.list = fsl_sai_rates,
};
+/**
+ * fsl_sai_dir_is_synced - Check if stream is synced by the opposite stream
+ *
+ * SAI supports synchronous mode using bit/frame clocks of either Transmitter's
+ * or Receiver's for both streams. This function is used to check if clocks of
+ * the stream's are synced by the opposite stream.
+ *
+ * @sai: SAI context
+ * @dir: stream direction
+ */
+static inline bool fsl_sai_dir_is_synced(struct fsl_sai *sai, int dir)
+{
+ int adir = (dir == TX) ? RX : TX;
+
+ /* current dir in async mode while opposite dir in sync mode */
+ return !sai->synchronous[dir] && sai->synchronous[adir];
+}
+
static irqreturn_t fsl_sai_isr(int irq, void *devid)
{
struct fsl_sai *sai = (struct fsl_sai *)devid;
@@ -212,6 +230,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
if (!sai->is_lsb_first)
val_cr4 |= FSL_SAI_CR4_MF;
+ sai->is_dsp_mode = false;
/* DAI mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
@@ -522,6 +541,38 @@ static int fsl_sai_hw_free(struct snd_pcm_substream *substream,
return 0;
}
+static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
+{
+ unsigned int ofs = sai->soc_data->reg_offset;
+ bool tx = dir == TX;
+ u32 xcsr, count = 100;
+
+ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+ FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE, 0);
+
+ /* TERE will remain set till the end of current frame */
+ do {
+ udelay(10);
+ regmap_read(sai->regmap, FSL_SAI_xCSR(tx, ofs), &xcsr);
+ } while (--count && xcsr & FSL_SAI_CSR_TERE);
+
+ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+ FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
+
+ /*
+ * For sai master mode, after several open/close sai,
+ * there will be no frame clock, and can't recover
+ * anymore. Add software reset to fix this issue.
+ * This is a hardware bug, and will be fix in the
+ * next sai version.
+ */
+ if (!sai->is_slave_mode) {
+ /* Software Reset */
+ regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR);
+ /* Clear SR bit to finish the reset */
+ regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0);
+ }
+}
static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *cpu_dai)
@@ -530,7 +581,9 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
unsigned int ofs = sai->soc_data->reg_offset;
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
- u32 xcsr, count = 100;
+ int adir = tx ? RX : TX;
+ int dir = tx ? TX : RX;
+ u32 xcsr;
/*
* Asynchronous mode: Clear SYNC for both Tx and Rx.
@@ -553,10 +606,22 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
FSL_SAI_CSR_FRDE, FSL_SAI_CSR_FRDE);
- regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
- FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
- regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
+ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
+ /*
+ * Enable the opposite direction for synchronous mode
+ * 1. Tx sync with Rx: only set RE for Rx; set TE & RE for Tx
+ * 2. Rx sync with Tx: only set TE for Tx; set RE & TE for Rx
+ *
+ * RM recommends to enable RE after TE for case 1 and to enable
+ * TE after RE for case 2, but we here may not always guarantee
+ * that happens: "arecord 1.wav; aplay 2.wav" in case 1 enables
+ * TE after RE, which is against what RM recommends but should
+ * be safe to do, judging by years of testing results.
+ */
+ if (fsl_sai_dir_is_synced(sai, adir))
+ regmap_update_bits(sai->regmap, FSL_SAI_xCSR((!tx), ofs),
+ FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
FSL_SAI_CSR_xIE_MASK, FSL_SAI_FLAGS);
@@ -571,43 +636,23 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
/* Check if the opposite FRDE is also disabled */
regmap_read(sai->regmap, FSL_SAI_xCSR(!tx, ofs), &xcsr);
- if (!(xcsr & FSL_SAI_CSR_FRDE)) {
- /* Disable both directions and reset their FIFOs */
- regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
- FSL_SAI_CSR_TERE, 0);
- regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
- FSL_SAI_CSR_TERE, 0);
-
- /* TERE will remain set till the end of current frame */
- do {
- udelay(10);
- regmap_read(sai->regmap,
- FSL_SAI_xCSR(tx, ofs), &xcsr);
- } while (--count && xcsr & FSL_SAI_CSR_TERE);
-
- regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
- FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
- regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
- FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
-
- /*
- * For sai master mode, after several open/close sai,
- * there will be no frame clock, and can't recover
- * anymore. Add software reset to fix this issue.
- * This is a hardware bug, and will be fix in the
- * next sai version.
- */
- if (!sai->is_slave_mode) {
- /* Software Reset for both Tx and Rx */
- regmap_write(sai->regmap, FSL_SAI_TCSR(ofs),
- FSL_SAI_CSR_SR);
- regmap_write(sai->regmap, FSL_SAI_RCSR(ofs),
- FSL_SAI_CSR_SR);
- /* Clear SR bit to finish the reset */
- regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0);
- regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0);
- }
- }
+
+ /*
+ * If opposite stream provides clocks for synchronous mode and
+ * it is inactive, disable it before disabling the current one
+ */
+ if (fsl_sai_dir_is_synced(sai, adir) && !(xcsr & FSL_SAI_CSR_FRDE))
+ fsl_sai_config_disable(sai, adir);
+
+ /*
+ * Disable current stream if either of:
+ * 1. current stream doesn't provide clocks for synchronous mode
+ * 2. current stream provides clocks for synchronous mode but no
+ * more stream is active.
+ */
+ if (!fsl_sai_dir_is_synced(sai, dir) || !(xcsr & FSL_SAI_CSR_FRDE))
+ fsl_sai_config_disable(sai, dir);
+
break;
default:
return -EINVAL;
@@ -765,6 +810,8 @@ static struct reg_default fsl_sai_reg_defaults_ofs8[] = {
{FSL_SAI_RCR4(8), 0},
{FSL_SAI_RCR5(8), 0},
{FSL_SAI_RMR, 0},
+ {FSL_SAI_MCTL, 0},
+ {FSL_SAI_MDIV, 0},
};
static bool fsl_sai_readable_reg(struct device *dev, unsigned int reg)
@@ -805,6 +852,18 @@ static bool fsl_sai_readable_reg(struct device *dev, unsigned int reg)
case FSL_SAI_RFR6:
case FSL_SAI_RFR7:
case FSL_SAI_RMR:
+ case FSL_SAI_MCTL:
+ case FSL_SAI_MDIV:
+ case FSL_SAI_VERID:
+ case FSL_SAI_PARAM:
+ case FSL_SAI_TTCTN:
+ case FSL_SAI_RTCTN:
+ case FSL_SAI_TTCTL:
+ case FSL_SAI_TBCTN:
+ case FSL_SAI_TTCAP:
+ case FSL_SAI_RTCTL:
+ case FSL_SAI_RBCTN:
+ case FSL_SAI_RTCAP:
return true;
default:
return false;
@@ -819,6 +878,10 @@ static bool fsl_sai_volatile_reg(struct device *dev, unsigned int reg)
if (reg == FSL_SAI_TCSR(ofs) || reg == FSL_SAI_RCSR(ofs))
return true;
+ /* Set VERID and PARAM be volatile for reading value in probe */
+ if (ofs == 8 && (reg == FSL_SAI_VERID || reg == FSL_SAI_PARAM))
+ return true;
+
switch (reg) {
case FSL_SAI_TFR0:
case FSL_SAI_TFR1:
@@ -872,6 +935,10 @@ static bool fsl_sai_writeable_reg(struct device *dev, unsigned int reg)
case FSL_SAI_TDR7:
case FSL_SAI_TMR:
case FSL_SAI_RMR:
+ case FSL_SAI_MCTL:
+ case FSL_SAI_MDIV:
+ case FSL_SAI_TTCTL:
+ case FSL_SAI_RTCTL:
return true;
default:
return false;
@@ -920,6 +987,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
if (sai->soc_data->reg_offset == 8) {
fsl_sai_regmap_config.reg_defaults = fsl_sai_reg_defaults_ofs8;
+ fsl_sai_regmap_config.max_register = FSL_SAI_MDIV;
fsl_sai_regmap_config.num_reg_defaults =
ARRAY_SIZE(fsl_sai_reg_defaults_ofs8);
}
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index afaef2027234..771990396804 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -14,6 +14,8 @@
SNDRV_PCM_FMTBIT_S32_LE)
/* SAI Register Map Register */
+#define FSL_SAI_VERID 0x00 /* SAI Version ID Register */
+#define FSL_SAI_PARAM 0x04 /* SAI Parameter Register */
#define FSL_SAI_TCSR(ofs) (0x00 + ofs) /* SAI Transmit Control */
#define FSL_SAI_TCR1(ofs) (0x04 + ofs) /* SAI Transmit Configuration 1 */
#define FSL_SAI_TCR2(ofs) (0x08 + ofs) /* SAI Transmit Configuration 2 */
@@ -37,6 +39,10 @@
#define FSL_SAI_TFR6 0x58 /* SAI Transmit FIFO 6 */
#define FSL_SAI_TFR7 0x5C /* SAI Transmit FIFO 7 */
#define FSL_SAI_TMR 0x60 /* SAI Transmit Mask */
+#define FSL_SAI_TTCTL 0x70 /* SAI Transmit Timestamp Control Register */
+#define FSL_SAI_TTCTN 0x74 /* SAI Transmit Timestamp Counter Register */
+#define FSL_SAI_TBCTN 0x78 /* SAI Transmit Bit Counter Register */
+#define FSL_SAI_TTCAP 0x7C /* SAI Transmit Timestamp Capture */
#define FSL_SAI_RCSR(ofs) (0x80 + ofs) /* SAI Receive Control */
#define FSL_SAI_RCR1(ofs) (0x84 + ofs)/* SAI Receive Configuration 1 */
#define FSL_SAI_RCR2(ofs) (0x88 + ofs) /* SAI Receive Configuration 2 */
@@ -60,6 +66,13 @@
#define FSL_SAI_RFR6 0xd8 /* SAI Receive FIFO 6 */
#define FSL_SAI_RFR7 0xdc /* SAI Receive FIFO 7 */
#define FSL_SAI_RMR 0xe0 /* SAI Receive Mask */
+#define FSL_SAI_RTCTL 0xf0 /* SAI Receive Timestamp Control Register */
+#define FSL_SAI_RTCTN 0xf4 /* SAI Receive Timestamp Counter Register */
+#define FSL_SAI_RBCTN 0xf8 /* SAI Receive Bit Counter Register */
+#define FSL_SAI_RTCAP 0xfc /* SAI Receive Timestamp Capture */
+
+#define FSL_SAI_MCTL 0x100 /* SAI MCLK Control Register */
+#define FSL_SAI_MDIV 0x104 /* SAI MCLK Divide Register */
#define FSL_SAI_xCSR(tx, ofs) (tx ? FSL_SAI_TCSR(ofs) : FSL_SAI_RCSR(ofs))
#define FSL_SAI_xCR1(tx, ofs) (tx ? FSL_SAI_TCR1(ofs) : FSL_SAI_RCR1(ofs))
@@ -73,6 +86,8 @@
/* SAI Transmit/Receive Control Register */
#define FSL_SAI_CSR_TERE BIT(31)
+#define FSL_SAI_CSR_SE BIT(30)
+#define FSL_SAI_CSR_BCE BIT(28)
#define FSL_SAI_CSR_FR BIT(25)
#define FSL_SAI_CSR_SR BIT(24)
#define FSL_SAI_CSR_xF_SHIFT 16
@@ -106,6 +121,7 @@
#define FSL_SAI_CR2_MSEL(ID) ((ID) << 26)
#define FSL_SAI_CR2_BCP BIT(25)
#define FSL_SAI_CR2_BCD_MSTR BIT(24)
+#define FSL_SAI_CR2_BYP BIT(23) /* BCLK bypass */
#define FSL_SAI_CR2_DIV_MASK 0xff
/* SAI Transmit and Receive Configuration 3 Register */
@@ -115,6 +131,13 @@
#define FSL_SAI_CR3_WDFL_MASK 0x1f
/* SAI Transmit and Receive Configuration 4 Register */
+
+#define FSL_SAI_CR4_FCONT BIT(28)
+#define FSL_SAI_CR4_FCOMB_SHIFT BIT(26)
+#define FSL_SAI_CR4_FCOMB_SOFT BIT(27)
+#define FSL_SAI_CR4_FCOMB_MASK (0x3 << 26)
+#define FSL_SAI_CR4_FPACK_8 (0x2 << 24)
+#define FSL_SAI_CR4_FPACK_16 (0x3 << 24)
#define FSL_SAI_CR4_FRSZ(x) (((x) - 1) << 16)
#define FSL_SAI_CR4_FRSZ_MASK (0x1f << 16)
#define FSL_SAI_CR4_SYWD(x) (((x) - 1) << 8)
@@ -132,6 +155,43 @@
#define FSL_SAI_CR5_FBT(x) ((x) << 8)
#define FSL_SAI_CR5_FBT_MASK (0x1f << 8)
+/* SAI MCLK Control Register */
+#define FSL_SAI_MCTL_MCLK_EN BIT(30) /* MCLK Enable */
+#define FSL_SAI_MCTL_MSEL_MASK (0x3 << 24)
+#define FSL_SAI_MCTL_MSEL(ID) ((ID) << 24)
+#define FSL_SAI_MCTL_MSEL_BUS 0
+#define FSL_SAI_MCTL_MSEL_MCLK1 BIT(24)
+#define FSL_SAI_MCTL_MSEL_MCLK2 BIT(25)
+#define FSL_SAI_MCTL_MSEL_MCLK3 (BIT(24) | BIT(25))
+#define FSL_SAI_MCTL_DIV_EN BIT(23)
+#define FSL_SAI_MCTL_DIV_MASK 0xFF
+
+/* SAI VERID Register */
+#define FSL_SAI_VERID_MAJOR_SHIFT 24
+#define FSL_SAI_VERID_MAJOR_MASK GENMASK(31, 24)
+#define FSL_SAI_VERID_MINOR_SHIFT 16
+#define FSL_SAI_VERID_MINOR_MASK GENMASK(23, 16)
+#define FSL_SAI_VERID_FEATURE_SHIFT 0
+#define FSL_SAI_VERID_FEATURE_MASK GENMASK(15, 0)
+#define FSL_SAI_VERID_EFIFO_EN BIT(0)
+#define FSL_SAI_VERID_TSTMP_EN BIT(1)
+
+/* SAI PARAM Register */
+#define FSL_SAI_PARAM_SPF_SHIFT 16
+#define FSL_SAI_PARAM_SPF_MASK GENMASK(19, 16)
+#define FSL_SAI_PARAM_WPF_SHIFT 8
+#define FSL_SAI_PARAM_WPF_MASK GENMASK(11, 8)
+#define FSL_SAI_PARAM_DLN_MASK GENMASK(3, 0)
+
+/* SAI MCLK Divide Register */
+#define FSL_SAI_MDIV_MASK 0xFFFFF
+
+/* SAI timestamp and bitcounter */
+#define FSL_SAI_xTCTL_TSEN BIT(0)
+#define FSL_SAI_xTCTL_TSINC BIT(1)
+#define FSL_SAI_xTCTL_RTSC BIT(8)
+#define FSL_SAI_xTCTL_RBC BIT(9)
+
/* SAI type */
#define FSL_SAI_DMA BIT(0)
#define FSL_SAI_USE_AC97 BIT(1)
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 7858a5499ac5..4fd4ba9972af 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -615,6 +615,8 @@ static int fsl_spdif_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, 0);
regmap_update_bits(regmap, REG_SPDIF_SIE, intr, 0);
+ regmap_write(regmap, REG_SPDIF_STL, 0x0);
+ regmap_write(regmap, REG_SPDIF_STR, 0x0);
break;
default:
return -EINVAL;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index ed18bc69e095..0ab35c3dc7d2 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1147,14 +1147,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
.symmetric_channels = 1,
.probe = fsl_ssi_dai_probe,
.playback = {
- .stream_name = "AC97 Playback",
+ .stream_name = "CPU AC97 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
},
.capture = {
- .stream_name = "AC97 Capture",
+ .stream_name = "CPU AC97 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
index 71590ca6394b..119a3a9684f5 100644
--- a/sound/soc/fsl/imx-audmix.c
+++ b/sound/soc/fsl/imx-audmix.c
@@ -230,6 +230,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
dai_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%s",
fe_name_pref, args.np->full_name + 1);
+ if (!dai_name)
+ return -ENOMEM;
dev_info(pdev->dev.parent, "DAI FE name:%s\n", dai_name);
@@ -238,6 +240,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
capture_dai_name =
devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s %s",
dai_name, "CPU-Capture");
+ if (!capture_dai_name)
+ return -ENOMEM;
}
priv->dai[i].cpus = &dlc[0];
@@ -268,6 +272,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
"AUDMIX-Playback-%d", i);
be_cp = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"AUDMIX-Capture-%d", i);
+ if (!be_name || !be_pb || !be_cp)
+ return -ENOMEM;
priv->dai[num_dai + i].cpus = &dlc[3];
priv->dai[num_dai + i].codecs = &dlc[4];
@@ -295,6 +301,9 @@ static int imx_audmix_probe(struct platform_device *pdev)
priv->dapm_routes[i].source =
devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s %s",
dai_name, "CPU-Playback");
+ if (!priv->dapm_routes[i].source)
+ return -ENOMEM;
+
priv->dapm_routes[i].sink = be_pb;
priv->dapm_routes[num_dai + i].source = be_pb;
priv->dapm_routes[num_dai + i].sink = be_cp;
@@ -313,7 +322,7 @@ static int imx_audmix_probe(struct platform_device *pdev)
if (IS_ERR(priv->cpu_mclk)) {
ret = PTR_ERR(priv->cpu_mclk);
dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret);
- return -EINVAL;
+ return ret;
}
priv->audmix_pdev = audmix_pdev;
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 1bc498124689..a96a7cd8af6e 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -149,8 +149,10 @@ static int asoc_simple_parse_dai(struct device_node *ep,
* if he unbinded CPU or Codec.
*/
ret = snd_soc_get_dai_name(&args, &dlc->dai_name);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
return ret;
+ }
dlc->of_node = node;
@@ -464,8 +466,10 @@ static int graph_for_each_link(struct asoc_simple_priv *priv,
of_node_put(codec_ep);
of_node_put(codec_port);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cpu_ep);
return ret;
+ }
codec_port_old = codec_port;
}
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index ed332177b0f9..57d6d0b48068 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -446,6 +446,13 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
| BYT_CHT_ES8316_INTMIC_IN2_MAP
| BYT_CHT_ES8316_JD_INVERTED),
},
+ { /* Nanote UMPC-01 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"),
+ },
+ .driver_data = (void *)BYT_CHT_ES8316_INTMIC_IN1_MAP,
+ },
{ /* Teclast X98 Plus II */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 7830d014d924..c740dec00f83 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -391,6 +391,18 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,
/* Please keep this list alphabetically sorted */
static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ { /* Acer Iconia One 7 B1-750 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD1_IN4P |
+ BYT_RT5640_OVCD_TH_1500UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Acer Iconia Tab 8 W1-810 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -429,6 +441,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MCLK_EN),
},
{
+ /* Advantech MICA-071 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"),
+ },
+ /* OVCD Th = 1500uA to reliable detect head-phones vs -set */
+ .driver_data = (void *)(BYT_RT5640_IN3_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_1500UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 80 Cesium"),
@@ -499,6 +526,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Chuwi Vi8 dual-boot (CWI506) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "i86"),
+ /* The above are too generic, also match BIOS info */
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
/* Chuwi Vi10 (CWI505) */
.matches = {
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 921c09cdb480..0c1c8628b991 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -919,7 +919,6 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
if (adev) {
snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
"i2c-%s", acpi_dev_name(adev));
- put_device(&adev->dev);
byt_rt5651_dais[dai_index].codecs->name = byt_rt5651_codec_name;
} else {
dev_err(&pdev->dev, "Error cannot find '%s' dev\n", mach->id);
@@ -928,6 +927,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
codec_dev = bus_find_device_by_name(&i2c_bus_type, NULL,
byt_rt5651_codec_name);
+ acpi_dev_put(adev);
if (!codec_dev)
return -EPROBE_DEFER;
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 439dd4ba690c..3256f7c4eb74 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -260,8 +260,10 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
snd_pcm_set_sync(substream);
mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
- if (!mconfig)
+ if (!mconfig) {
+ kfree(dma_params);
return -EINVAL;
+ }
skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
@@ -1490,6 +1492,7 @@ int skl_platform_register(struct device *dev)
dais = krealloc(skl->dais, sizeof(skl_fe_dai) +
sizeof(skl_platform_dai), GFP_KERNEL);
if (!dais) {
+ kfree(skl->dais);
ret = -ENOMEM;
goto err;
}
@@ -1502,8 +1505,10 @@ int skl_platform_register(struct device *dev)
ret = devm_snd_soc_register_component(dev, &skl_component,
skl->dais, num_dais);
- if (ret)
+ if (ret) {
+ kfree(skl->dais);
dev_err(dev, "soc component registration failed %d\n", ret);
+ }
err:
return ret;
}
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 667cdddc289f..7286cbd0c46f 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -1003,8 +1003,10 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
reply.size = (reply.header >> 32) & IPC_DATA_OFFSET_SZ_MASK;
buf = krealloc(reply.data, reply.size, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ kfree(reply.data);
return -ENOMEM;
+ }
*payload = buf;
*bytes = reply.size;
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index d43cbf4a71ef..d4db64d72b2c 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -299,6 +299,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
if (!module->instance_id) {
ret = -ENOMEM;
+ kfree(module);
goto free_uuid_list;
}
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 2e5fbd220923..80cff74c23af 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -438,7 +438,7 @@ static int skl_free(struct hdac_bus *bus)
skl->init_done = 0; /* to be sure */
- snd_hdac_ext_stop_streams(bus);
+ snd_hdac_stop_streams_and_chip(bus);
if (bus->irq >= 0)
free_irq(bus->irq, (void *)bus);
@@ -1116,7 +1116,10 @@ static void skl_shutdown(struct pci_dev *pci)
if (!skl->init_done)
return;
- snd_hdac_ext_stop_streams(bus);
+ snd_hdac_stop_streams(bus);
+ snd_hdac_ext_bus_link_power_down_all(bus);
+ skl_dsp_sleep(skl->dsp);
+
list_for_each_entry(s, &bus->stream_list, list) {
stream = stream_to_hdac_ext_stream(s);
snd_hdac_ext_stream_decouple(bus, stream, false);
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index d2d5c25bf550..0215d187bdff 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -86,7 +86,7 @@ kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
/* try to find matching cs for current dma address */
for (i = 0; i < dram->num_cs; i++) {
- const struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = &dram->cs[i];
if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) {
writel(cs->base & 0xffff0000,
base + KIRKWOOD_AUDIO_WIN_BASE_REG(win));
diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
index b66f7dee1e14..f6ec6937a71b 100644
--- a/sound/soc/mediatek/common/mtk-btcvsd.c
+++ b/sound/soc/mediatek/common/mtk-btcvsd.c
@@ -1054,11 +1054,9 @@ static int mtk_pcm_btcvsd_copy(struct snd_pcm_substream *substream,
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- mtk_btcvsd_snd_write(bt, buf, count);
+ return mtk_btcvsd_snd_write(bt, buf, count);
else
- mtk_btcvsd_snd_read(bt, buf, count);
-
- return 0;
+ return mtk_btcvsd_snd_read(bt, buf, count);
}
static struct snd_pcm_ops mtk_btcvsd_ops = {
diff --git a/sound/soc/mediatek/mt6797/mt6797-mt6351.c b/sound/soc/mediatek/mt6797/mt6797-mt6351.c
index 496f32bcfb5e..d2f6213a6bfc 100644
--- a/sound/soc/mediatek/mt6797/mt6797-mt6351.c
+++ b/sound/soc/mediatek/mt6797/mt6797-mt6351.c
@@ -217,7 +217,8 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
if (!codec_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_platform_node;
}
for_each_card_prelinks(card, i, dai_link) {
if (dai_link->codecs->name)
@@ -230,6 +231,9 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
__func__, ret);
+ of_node_put(codec_node);
+put_platform_node:
+ of_node_put(platform_node);
return ret;
}
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 0ee29255e731..f3dbd8164b86 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -1073,16 +1073,6 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
afe->dev = &pdev->dev;
- irq_id = platform_get_irq(pdev, 0);
- if (irq_id <= 0)
- return irq_id < 0 ? irq_id : -ENXIO;
- ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
- 0, "Afe_ISR_Handle", (void *)afe);
- if (ret) {
- dev_err(afe->dev, "could not request_irq\n");
- return ret;
- }
-
afe->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(afe->base_addr))
return PTR_ERR(afe->base_addr);
@@ -1158,6 +1148,16 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
if (ret)
goto err_pm_disable;
+ irq_id = platform_get_irq(pdev, 0);
+ if (irq_id <= 0)
+ return irq_id < 0 ? irq_id : -ENXIO;
+ ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
+ 0, "Afe_ISR_Handle", (void *)afe);
+ if (ret) {
+ dev_err(afe->dev, "could not request_irq\n");
+ goto err_pm_disable;
+ }
+
dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
return 0;
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
index 6f8542329bab..a21aefe1a4d1 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
@@ -200,14 +200,16 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node =
of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
mt8173_rt5650_rt5514_codec_conf[0].of_node =
mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node;
@@ -219,6 +221,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
__func__, ret);
+out:
of_node_put(platform_node);
return ret;
}
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index 727ff0f7f20b..8e1e60a9b45c 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -256,14 +256,16 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node =
of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
if (!mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
mt8173_rt5650_rt5676_codec_conf[0].of_node =
mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node;
@@ -276,7 +278,8 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codecs->of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
card->dev = &pdev->dev;
@@ -286,6 +289,7 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
__func__, ret);
+put_node:
of_node_put(platform_node);
return ret;
}
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index 21e7d4d3ded5..cdfc697ad94e 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -266,7 +266,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_platform_node;
}
mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node =
mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node;
@@ -279,7 +280,7 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"%s codec_capture_dai name fail %d\n",
__func__, ret);
- return ret;
+ goto put_platform_node;
}
mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[1].dai_name =
codec_capture_dai;
@@ -301,7 +302,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_dais[DAI_LINK_HDMI_I2S].codecs->of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_platform_node;
}
card->dev = &pdev->dev;
@@ -310,6 +312,7 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
__func__, ret);
+put_platform_node:
of_node_put(platform_node);
return ret;
}
diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
index e0d24592ebd7..f9188274f6b0 100644
--- a/sound/soc/meson/Kconfig
+++ b/sound/soc/meson/Kconfig
@@ -85,9 +85,13 @@ config SND_MESON_AXG_PDM
Select Y or M to add support for PDM input embedded
in the Amlogic AXG SoC family
+config SND_MESON_CODEC_GLUE
+ tristate
+
config SND_MESON_G12A_TOHDMITX
tristate "Amlogic G12A To HDMI TX Control Support"
select REGMAP_MMIO
+ select SND_MESON_CODEC_GLUE
imply SND_SOC_HDMI_CODEC
help
Select Y or M to add support for HDMI audio on the g12a SoC
diff --git a/sound/soc/meson/Makefile b/sound/soc/meson/Makefile
index 1a8b1470ed84..529a807b3f37 100644
--- a/sound/soc/meson/Makefile
+++ b/sound/soc/meson/Makefile
@@ -11,6 +11,7 @@ snd-soc-meson-axg-sound-card-objs := axg-card.o
snd-soc-meson-axg-spdifin-objs := axg-spdifin.o
snd-soc-meson-axg-spdifout-objs := axg-spdifout.o
snd-soc-meson-axg-pdm-objs := axg-pdm.o
+snd-soc-meson-codec-glue-objs := meson-codec-glue.o
snd-soc-meson-g12a-tohdmitx-objs := g12a-tohdmitx.o
obj-$(CONFIG_SND_MESON_AXG_FIFO) += snd-soc-meson-axg-fifo.o
@@ -24,4 +25,5 @@ obj-$(CONFIG_SND_MESON_AXG_SOUND_CARD) += snd-soc-meson-axg-sound-card.o
obj-$(CONFIG_SND_MESON_AXG_SPDIFIN) += snd-soc-meson-axg-spdifin.o
obj-$(CONFIG_SND_MESON_AXG_SPDIFOUT) += snd-soc-meson-axg-spdifout.o
obj-$(CONFIG_SND_MESON_AXG_PDM) += snd-soc-meson-axg-pdm.o
+obj-$(CONFIG_SND_MESON_CODEC_GLUE) += snd-soc-meson-codec-glue.o
obj-$(CONFIG_SND_MESON_G12A_TOHDMITX) += snd-soc-meson-g12a-tohdmitx.o
diff --git a/sound/soc/meson/axg-spdifin.c b/sound/soc/meson/axg-spdifin.c
index d0d09f945b48..7aaded1fc376 100644
--- a/sound/soc/meson/axg-spdifin.c
+++ b/sound/soc/meson/axg-spdifin.c
@@ -112,34 +112,6 @@ static int axg_spdifin_prepare(struct snd_pcm_substream *substream,
return 0;
}
-static int axg_spdifin_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
- int ret;
-
- ret = clk_prepare_enable(priv->refclk);
- if (ret) {
- dev_err(dai->dev,
- "failed to enable spdifin reference clock\n");
- return ret;
- }
-
- regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
- SPDIFIN_CTRL0_EN);
-
- return 0;
-}
-
-static void axg_spdifin_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
-
- regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
- clk_disable_unprepare(priv->refclk);
-}
-
static void axg_spdifin_write_mode_param(struct regmap *map, int mode,
unsigned int val,
unsigned int num_per_reg,
@@ -251,25 +223,38 @@ static int axg_spdifin_dai_probe(struct snd_soc_dai *dai)
ret = axg_spdifin_sample_mode_config(dai, priv);
if (ret) {
dev_err(dai->dev, "mode configuration failed\n");
- clk_disable_unprepare(priv->pclk);
- return ret;
+ goto pclk_err;
}
+ ret = clk_prepare_enable(priv->refclk);
+ if (ret) {
+ dev_err(dai->dev,
+ "failed to enable spdifin reference clock\n");
+ goto pclk_err;
+ }
+
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
+ SPDIFIN_CTRL0_EN);
+
return 0;
+
+pclk_err:
+ clk_disable_unprepare(priv->pclk);
+ return ret;
}
static int axg_spdifin_dai_remove(struct snd_soc_dai *dai)
{
struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
+ clk_disable_unprepare(priv->refclk);
clk_disable_unprepare(priv->pclk);
return 0;
}
static const struct snd_soc_dai_ops axg_spdifin_ops = {
.prepare = axg_spdifin_prepare,
- .startup = axg_spdifin_startup,
- .shutdown = axg_spdifin_shutdown,
};
static int axg_spdifin_iec958_info(struct snd_kcontrol *kcontrol,
diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c
index f7e8e9da68a0..981dbaaa6f3b 100644
--- a/sound/soc/meson/axg-tdm-formatter.c
+++ b/sound/soc/meson/axg-tdm-formatter.c
@@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
struct axg_tdm_stream *ts,
unsigned int offset)
{
- unsigned int val, ch = ts->channels;
- unsigned long mask;
- int i, j;
+ unsigned int ch = ts->channels;
+ u32 val[AXG_TDM_NUM_LANES];
+ int i, j, k;
+
+ /*
+ * We need to mimick the slot distribution used by the HW to keep the
+ * channel placement consistent regardless of the number of channel
+ * in the stream. This is why the odd algorithm below is used.
+ */
+ memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES);
/*
* Distribute the channels of the stream over the available slots
- * of each TDM lane
+ * of each TDM lane. We need to go over the 32 slots ...
*/
- for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
- val = 0;
- mask = ts->mask[i];
-
- for (j = find_first_bit(&mask, 32);
- (j < 32) && ch;
- j = find_next_bit(&mask, 32, j + 1)) {
- val |= 1 << j;
- ch -= 1;
+ for (i = 0; (i < 32) && ch; i += 2) {
+ /* ... of all the lanes ... */
+ for (j = 0; j < AXG_TDM_NUM_LANES; j++) {
+ /* ... then distribute the channels in pairs */
+ for (k = 0; k < 2; k++) {
+ if ((BIT(i + k) & ts->mask[j]) && ch) {
+ val[j] |= BIT(i + k);
+ ch -= 1;
+ }
+ }
}
-
- regmap_write(map, offset, val);
- offset += regmap_get_reg_stride(map);
}
/*
@@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
return -EINVAL;
}
+ for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
+ regmap_write(map, offset, val[i]);
+ offset += regmap_get_reg_stride(map);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index f5a431b8de6c..34aff050caf2 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -266,8 +266,8 @@ static int axg_tdm_iface_set_sclk(struct snd_soc_dai *dai,
srate = iface->slots * iface->slot_width * params_rate(params);
if (!iface->mclk_rate) {
- /* If no specific mclk is requested, default to bit clock * 4 */
- clk_set_rate(iface->mclk, 4 * srate);
+ /* If no specific mclk is requested, default to bit clock * 2 */
+ clk_set_rate(iface->mclk, 2 * srate);
} else {
/* Check if we can actually get the bit clock from mclk */
if (iface->mclk_rate % srate) {
diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
index cbe47e0cae42..c875c350be07 100644
--- a/sound/soc/meson/g12a-tohdmitx.c
+++ b/sound/soc/meson/g12a-tohdmitx.c
@@ -12,112 +12,54 @@
#include <sound/soc-dai.h>
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+#include "meson-codec-glue.h"
#define G12A_TOHDMITX_DRV_NAME "g12a-tohdmitx"
#define TOHDMITX_CTRL0 0x0
#define CTRL0_ENABLE_SHIFT 31
-#define CTRL0_I2S_DAT_SEL GENMASK(13, 12)
+#define CTRL0_I2S_DAT_SEL_SHIFT 12
+#define CTRL0_I2S_DAT_SEL (0x3 << CTRL0_I2S_DAT_SEL_SHIFT)
#define CTRL0_I2S_LRCLK_SEL GENMASK(9, 8)
#define CTRL0_I2S_BLK_CAP_INV BIT(7)
#define CTRL0_I2S_BCLK_O_INV BIT(6)
#define CTRL0_I2S_BCLK_SEL GENMASK(5, 4)
#define CTRL0_SPDIF_CLK_CAP_INV BIT(3)
#define CTRL0_SPDIF_CLK_O_INV BIT(2)
-#define CTRL0_SPDIF_SEL BIT(1)
+#define CTRL0_SPDIF_SEL_SHIFT 1
+#define CTRL0_SPDIF_SEL (0x1 << CTRL0_SPDIF_SEL_SHIFT)
#define CTRL0_SPDIF_CLK_SEL BIT(0)
-struct g12a_tohdmitx_input {
- struct snd_soc_pcm_stream params;
- unsigned int fmt;
-};
-
-static struct snd_soc_dapm_widget *
-g12a_tohdmitx_get_input(struct snd_soc_dapm_widget *w)
-{
- struct snd_soc_dapm_path *p = NULL;
- struct snd_soc_dapm_widget *in;
-
- snd_soc_dapm_widget_for_each_source_path(w, p) {
- if (!p->connect)
- continue;
-
- /* Check that we still are in the same component */
- if (snd_soc_dapm_to_component(w->dapm) !=
- snd_soc_dapm_to_component(p->source->dapm))
- continue;
-
- if (p->source->id == snd_soc_dapm_dai_in)
- return p->source;
-
- in = g12a_tohdmitx_get_input(p->source);
- if (in)
- return in;
- }
-
- return NULL;
-}
-
-static struct g12a_tohdmitx_input *
-g12a_tohdmitx_get_input_data(struct snd_soc_dapm_widget *w)
-{
- struct snd_soc_dapm_widget *in =
- g12a_tohdmitx_get_input(w);
- struct snd_soc_dai *dai;
-
- if (WARN_ON(!in))
- return NULL;
-
- dai = in->priv;
-
- return dai->playback_dma_data;
-}
-
static const char * const g12a_tohdmitx_i2s_mux_texts[] = {
"I2S A", "I2S B", "I2S C",
};
-static SOC_ENUM_SINGLE_EXT_DECL(g12a_tohdmitx_i2s_mux_enum,
- g12a_tohdmitx_i2s_mux_texts);
-
-static int g12a_tohdmitx_get_input_val(struct snd_soc_component *component,
- unsigned int mask)
-{
- unsigned int val;
-
- snd_soc_component_read(component, TOHDMITX_CTRL0, &val);
- return (val & mask) >> __ffs(mask);
-}
-
-static int g12a_tohdmitx_i2s_mux_get_enum(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component =
- snd_soc_dapm_kcontrol_component(kcontrol);
-
- ucontrol->value.enumerated.item[0] =
- g12a_tohdmitx_get_input_val(component, CTRL0_I2S_DAT_SEL);
-
- return 0;
-}
-
static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_dapm_kcontrol_component(kcontrol);
struct snd_soc_dapm_context *dapm =
snd_soc_dapm_kcontrol_dapm(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int mux = ucontrol->value.enumerated.item[0];
- unsigned int val = g12a_tohdmitx_get_input_val(component,
- CTRL0_I2S_DAT_SEL);
+ unsigned int mux, changed;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, e->reg,
+ CTRL0_I2S_DAT_SEL,
+ FIELD_PREP(CTRL0_I2S_DAT_SEL,
+ mux));
+
+ if (!changed)
+ return 0;
/* Force disconnect of the mux while updating */
- if (val != mux)
- snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
- snd_soc_component_update_bits(component, TOHDMITX_CTRL0,
+ snd_soc_component_update_bits(component, e->reg,
CTRL0_I2S_DAT_SEL |
CTRL0_I2S_LRCLK_SEL |
CTRL0_I2S_BCLK_SEL,
@@ -130,30 +72,19 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
return 1;
}
+static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_i2s_mux_enum, TOHDMITX_CTRL0,
+ CTRL0_I2S_DAT_SEL_SHIFT,
+ g12a_tohdmitx_i2s_mux_texts);
+
static const struct snd_kcontrol_new g12a_tohdmitx_i2s_mux =
SOC_DAPM_ENUM_EXT("I2S Source", g12a_tohdmitx_i2s_mux_enum,
- g12a_tohdmitx_i2s_mux_get_enum,
+ snd_soc_dapm_get_enum_double,
g12a_tohdmitx_i2s_mux_put_enum);
static const char * const g12a_tohdmitx_spdif_mux_texts[] = {
"SPDIF A", "SPDIF B",
};
-static SOC_ENUM_SINGLE_EXT_DECL(g12a_tohdmitx_spdif_mux_enum,
- g12a_tohdmitx_spdif_mux_texts);
-
-static int g12a_tohdmitx_spdif_mux_get_enum(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component =
- snd_soc_dapm_kcontrol_component(kcontrol);
-
- ucontrol->value.enumerated.item[0] =
- g12a_tohdmitx_get_input_val(component, CTRL0_SPDIF_SEL);
-
- return 0;
-}
-
static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -162,13 +93,21 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_context *dapm =
snd_soc_dapm_kcontrol_dapm(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int mux = ucontrol->value.enumerated.item[0];
- unsigned int val = g12a_tohdmitx_get_input_val(component,
- CTRL0_SPDIF_SEL);
+ unsigned int mux, changed;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, TOHDMITX_CTRL0,
+ CTRL0_SPDIF_SEL,
+ FIELD_PREP(CTRL0_SPDIF_SEL, mux));
+
+ if (!changed)
+ return 0;
/* Force disconnect of the mux while updating */
- if (val != mux)
- snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
snd_soc_component_update_bits(component, TOHDMITX_CTRL0,
CTRL0_SPDIF_SEL |
@@ -178,12 +117,16 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
- return 0;
+ return 1;
}
+static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_spdif_mux_enum, TOHDMITX_CTRL0,
+ CTRL0_SPDIF_SEL_SHIFT,
+ g12a_tohdmitx_spdif_mux_texts);
+
static const struct snd_kcontrol_new g12a_tohdmitx_spdif_mux =
SOC_DAPM_ENUM_EXT("SPDIF Source", g12a_tohdmitx_spdif_mux_enum,
- g12a_tohdmitx_spdif_mux_get_enum,
+ snd_soc_dapm_get_enum_double,
g12a_tohdmitx_spdif_mux_put_enum);
static const struct snd_kcontrol_new g12a_tohdmitx_out_enable =
@@ -201,83 +144,13 @@ static const struct snd_soc_dapm_widget g12a_tohdmitx_widgets[] = {
&g12a_tohdmitx_out_enable),
};
-static int g12a_tohdmitx_input_probe(struct snd_soc_dai *dai)
-{
- struct g12a_tohdmitx_input *data;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- dai->playback_dma_data = data;
- return 0;
-}
-
-static int g12a_tohdmitx_input_remove(struct snd_soc_dai *dai)
-{
- kfree(dai->playback_dma_data);
- return 0;
-}
-
-static int g12a_tohdmitx_input_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct g12a_tohdmitx_input *data = dai->playback_dma_data;
-
- data->params.rates = snd_pcm_rate_to_rate_bit(params_rate(params));
- data->params.rate_min = params_rate(params);
- data->params.rate_max = params_rate(params);
- data->params.formats = 1 << params_format(params);
- data->params.channels_min = params_channels(params);
- data->params.channels_max = params_channels(params);
- data->params.sig_bits = dai->driver->playback.sig_bits;
-
- return 0;
-}
-
-
-static int g12a_tohdmitx_input_set_fmt(struct snd_soc_dai *dai,
- unsigned int fmt)
-{
- struct g12a_tohdmitx_input *data = dai->playback_dma_data;
-
- /* Save the source stream format for the downstream link */
- data->fmt = fmt;
- return 0;
-}
-
-static int g12a_tohdmitx_output_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct g12a_tohdmitx_input *in_data =
- g12a_tohdmitx_get_input_data(dai->capture_widget);
-
- if (!in_data)
- return -ENODEV;
-
- if (WARN_ON(!rtd->dai_link->params)) {
- dev_warn(dai->dev, "codec2codec link expected\n");
- return -EINVAL;
- }
-
- /* Replace link params with the input params */
- rtd->dai_link->params = &in_data->params;
-
- if (!in_data->fmt)
- return 0;
-
- return snd_soc_runtime_set_dai_fmt(rtd, in_data->fmt);
-}
-
static const struct snd_soc_dai_ops g12a_tohdmitx_input_ops = {
- .hw_params = g12a_tohdmitx_input_hw_params,
- .set_fmt = g12a_tohdmitx_input_set_fmt,
+ .hw_params = meson_codec_glue_input_hw_params,
+ .set_fmt = meson_codec_glue_input_set_fmt,
};
static const struct snd_soc_dai_ops g12a_tohdmitx_output_ops = {
- .startup = g12a_tohdmitx_output_startup,
+ .startup = meson_codec_glue_output_startup,
};
#define TOHDMITX_SPDIF_FORMATS \
@@ -304,8 +177,8 @@ static const struct snd_soc_dai_ops g12a_tohdmitx_output_ops = {
.id = (xid), \
.playback = TOHDMITX_STREAM(xname, "Playback", xfmt, xchmax), \
.ops = &g12a_tohdmitx_input_ops, \
- .probe = g12a_tohdmitx_input_probe, \
- .remove = g12a_tohdmitx_input_remove, \
+ .probe = meson_codec_glue_input_dai_probe, \
+ .remove = meson_codec_glue_input_dai_remove, \
}
#define TOHDMITX_OUT(xname, xid, xfmt, xchmax) { \
diff --git a/sound/soc/meson/meson-codec-glue.c b/sound/soc/meson/meson-codec-glue.c
new file mode 100644
index 000000000000..524a33472337
--- /dev/null
+++ b/sound/soc/meson/meson-codec-glue.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "meson-codec-glue.h"
+
+static struct snd_soc_dapm_widget *
+meson_codec_glue_get_input(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_path *p = NULL;
+ struct snd_soc_dapm_widget *in;
+
+ snd_soc_dapm_widget_for_each_source_path(w, p) {
+ if (!p->connect)
+ continue;
+
+ /* Check that we still are in the same component */
+ if (snd_soc_dapm_to_component(w->dapm) !=
+ snd_soc_dapm_to_component(p->source->dapm))
+ continue;
+
+ if (p->source->id == snd_soc_dapm_dai_in)
+ return p->source;
+
+ in = meson_codec_glue_get_input(p->source);
+ if (in)
+ return in;
+ }
+
+ return NULL;
+}
+
+static void meson_codec_glue_input_set_data(struct snd_soc_dai *dai,
+ struct meson_codec_glue_input *data)
+{
+ dai->playback_dma_data = data;
+}
+
+struct meson_codec_glue_input *
+meson_codec_glue_input_get_data(struct snd_soc_dai *dai)
+{
+ return dai->playback_dma_data;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_get_data);
+
+static struct meson_codec_glue_input *
+meson_codec_glue_output_get_input_data(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_widget *in =
+ meson_codec_glue_get_input(w);
+ struct snd_soc_dai *dai;
+
+ if (WARN_ON(!in))
+ return NULL;
+
+ dai = in->priv;
+
+ return meson_codec_glue_input_get_data(dai);
+}
+
+int meson_codec_glue_input_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct meson_codec_glue_input *data =
+ meson_codec_glue_input_get_data(dai);
+
+ data->params.rates = snd_pcm_rate_to_rate_bit(params_rate(params));
+ data->params.rate_min = params_rate(params);
+ data->params.rate_max = params_rate(params);
+ data->params.formats = 1ULL << (__force int) params_format(params);
+ data->params.channels_min = params_channels(params);
+ data->params.channels_max = params_channels(params);
+ data->params.sig_bits = dai->driver->playback.sig_bits;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_hw_params);
+
+int meson_codec_glue_input_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct meson_codec_glue_input *data =
+ meson_codec_glue_input_get_data(dai);
+
+ /* Save the source stream format for the downstream link */
+ data->fmt = fmt;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_set_fmt);
+
+int meson_codec_glue_output_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct meson_codec_glue_input *in_data =
+ meson_codec_glue_output_get_input_data(dai->capture_widget);
+
+ if (!in_data)
+ return -ENODEV;
+
+ if (WARN_ON(!rtd->dai_link->params)) {
+ dev_warn(dai->dev, "codec2codec link expected\n");
+ return -EINVAL;
+ }
+
+ /* Replace link params with the input params */
+ rtd->dai_link->params = &in_data->params;
+
+ if (!in_data->fmt)
+ return 0;
+
+ return snd_soc_runtime_set_dai_fmt(rtd, in_data->fmt);
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_output_startup);
+
+int meson_codec_glue_input_dai_probe(struct snd_soc_dai *dai)
+{
+ struct meson_codec_glue_input *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ meson_codec_glue_input_set_data(dai, data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_dai_probe);
+
+int meson_codec_glue_input_dai_remove(struct snd_soc_dai *dai)
+{
+ struct meson_codec_glue_input *data =
+ meson_codec_glue_input_get_data(dai);
+
+ kfree(data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_dai_remove);
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic Codec Glue Helpers");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/meson/meson-codec-glue.h b/sound/soc/meson/meson-codec-glue.h
new file mode 100644
index 000000000000..07f99446c0c6
--- /dev/null
+++ b/sound/soc/meson/meson-codec-glue.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _MESON_CODEC_GLUE_H
+#define _MESON_CODEC_GLUE_H
+
+#include <sound/soc.h>
+
+struct meson_codec_glue_input {
+ struct snd_soc_pcm_stream params;
+ unsigned int fmt;
+};
+
+/* Input helpers */
+struct meson_codec_glue_input *
+meson_codec_glue_input_get_data(struct snd_soc_dai *dai);
+int meson_codec_glue_input_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+int meson_codec_glue_input_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt);
+int meson_codec_glue_input_dai_probe(struct snd_soc_dai *dai);
+int meson_codec_glue_input_dai_remove(struct snd_soc_dai *dai);
+
+/* Output helpers */
+int meson_codec_glue_output_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+
+#endif /* _MESON_CODEC_GLUE_H */
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 7096b5263e25..e9f9642e988f 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -85,7 +85,7 @@ static bool filter(struct dma_chan *chan, void *param)
devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name,
dma_data->ssp_id);
- if ((strcmp(dev_name(chan->device->dev), devname) == 0) &&
+ if (devname && (strcmp(dev_name(chan->device->dev), devname) == 0) &&
(chan->chan_id == dma_data->dma_res->start)) {
found = true;
}
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 5fdd1a24c232..ff3db623c476 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -797,7 +797,7 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
if (IS_ERR(priv->extclk)) {
ret = PTR_ERR(priv->extclk);
if (ret == -EPROBE_DEFER)
- return ret;
+ goto err_priv;
priv->extclk = NULL;
}
diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c
index da242515e146..8e3539941fad 100644
--- a/sound/soc/qcom/qdsp6/q6adm.c
+++ b/sound/soc/qcom/qdsp6/q6adm.c
@@ -217,7 +217,7 @@ static struct q6copp *q6adm_alloc_copp(struct q6adm *adm, int port_idx)
idx = find_first_zero_bit(&adm->copp_bitmap[port_idx],
MAX_COPPS_PER_PORT);
- if (idx > MAX_COPPS_PER_PORT)
+ if (idx >= MAX_COPPS_PER_PORT)
return ERR_PTR(-EBUSY);
c = kzalloc(sizeof(*c), GFP_ATOMIC);
diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
index 1707414cfa92..9f6cbaf3c0e9 100644
--- a/sound/soc/rockchip/rockchip_pdm.c
+++ b/sound/soc/rockchip/rockchip_pdm.c
@@ -368,6 +368,7 @@ static int rockchip_pdm_runtime_resume(struct device *dev)
ret = clk_prepare_enable(pdm->hclk);
if (ret) {
+ clk_disable_unprepare(pdm->clk);
dev_err(pdm->dev, "hclock enable failed %d\n", ret);
return ret;
}
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index 6635145a26c4..b2b4e5b7739a 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -86,6 +86,7 @@ static int __maybe_unused rk_spdif_runtime_resume(struct device *dev)
ret = clk_prepare_enable(spdif->hclk);
if (ret) {
+ clk_disable_unprepare(spdif->mclk);
dev_err(spdif->dev, "hclk clock enable failed %d\n", ret);
return ret;
}
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index 7647b3d4c0ba..25a8cfc27433 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -171,7 +171,11 @@ static int rsnd_ctu_init(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
- rsnd_mod_power_on(mod);
+ int ret;
+
+ ret = rsnd_mod_power_on(mod);
+ if (ret < 0)
+ return ret;
rsnd_ctu_activation(mod);
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 8d91c0eb0880..53b2ad01222b 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -186,7 +186,11 @@ static int rsnd_dvc_init(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
- rsnd_mod_power_on(mod);
+ int ret;
+
+ ret = rsnd_mod_power_on(mod);
+ if (ret < 0)
+ return ret;
rsnd_dvc_activation(mod);
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
index a3e0370f5704..c6fe2595c373 100644
--- a/sound/soc/sh/rcar/mix.c
+++ b/sound/soc/sh/rcar/mix.c
@@ -146,7 +146,11 @@ static int rsnd_mix_init(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
- rsnd_mod_power_on(mod);
+ int ret;
+
+ ret = rsnd_mod_power_on(mod);
+ if (ret < 0)
+ return ret;
rsnd_mix_activation(mod);
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 585ffba0244b..fd52e26a3808 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -454,11 +454,14 @@ static int rsnd_src_init(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
+ int ret;
/* reset sync convert_rate */
src->sync.val = 0;
- rsnd_mod_power_on(mod);
+ ret = rsnd_mod_power_on(mod);
+ if (ret < 0)
+ return ret;
rsnd_src_activation(mod);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 09af402ca31f..f8960bad2bd1 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -518,7 +518,9 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
ssi->usrcnt++;
- rsnd_mod_power_on(mod);
+ ret = rsnd_mod_power_on(mod);
+ if (ret < 0)
+ return ret;
rsnd_ssi_config_init(mod, io);
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index da6e40aef7b6..1e37bb7436ec 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -927,7 +927,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
rtd->fe_compr = 1;
if (rtd->dai_link->dpcm_playback)
be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
- else if (rtd->dai_link->dpcm_capture)
+ if (rtd->dai_link->dpcm_capture)
be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
} else {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 093ab32ea2c3..2115fd412c78 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3512,10 +3512,23 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_codecs);
static int __init snd_soc_init(void)
{
+ int ret;
+
snd_soc_debugfs_init();
- snd_soc_util_init();
+ ret = snd_soc_util_init();
+ if (ret)
+ goto err_util_init;
- return platform_driver_register(&soc_driver);
+ ret = platform_driver_register(&soc_driver);
+ if (ret)
+ goto err_register;
+ return 0;
+
+err_register:
+ snd_soc_util_exit();
+err_util_init:
+ snd_soc_debugfs_exit();
+ return ret;
}
module_init(snd_soc_init);
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 453b61b42dd9..e01f3bf3ef17 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -445,7 +445,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
val = ucontrol->value.integer.value[0];
if (mc->platform_max && val > mc->platform_max)
return -EINVAL;
- if (val > max - min)
+ if (val > max)
return -EINVAL;
if (val < 0)
return -EINVAL;
@@ -458,8 +458,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
return err;
if (snd_soc_volsw_is_stereo(mc)) {
+ val2 = ucontrol->value.integer.value[1];
+
+ if (mc->platform_max && val2 > mc->platform_max)
+ return -EINVAL;
+ if (val2 > max)
+ return -EINVAL;
+
val_mask = mask << rshift;
- val2 = (ucontrol->value.integer.value[1] + min) & mask;
+ val2 = (val2 + min) & mask;
val2 = val2 << rshift;
err = snd_soc_component_update_bits(component, reg2, val_mask,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 1196167364d4..2f1ab70a68fc 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1201,6 +1201,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
return;
be_substream = snd_soc_dpcm_get_substream(be, stream);
+ if (!be_substream)
+ return;
for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 54dcece52b0c..abcc5d97b134 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -174,7 +174,7 @@ int __init snd_soc_util_init(void)
return ret;
}
-void __exit snd_soc_util_exit(void)
+void snd_soc_util_exit(void)
{
platform_driver_unregister(&soc_dummy_driver);
platform_device_unregister(soc_dummy_dev);
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index b3cdd10c83ae..c30e450fa970 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -211,6 +211,10 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
int stream_tag;
int ret;
+ link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
+ if (!link)
+ return -EINVAL;
+
/* get stored dma data if resuming from system suspend */
link_dev = snd_soc_dai_get_dma_data(dai, substream);
if (!link_dev) {
@@ -231,10 +235,6 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
- if (!link)
- return -EINVAL;
-
/* set the stream tag in the codec dai dma params */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0);
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index cbe598b0fb10..680d64e0d69f 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -464,6 +464,11 @@ static const struct of_device_id sun4i_spdif_of_match[] = {
.compatible = "allwinner,sun50i-h6-spdif",
.data = &sun50i_h6_spdif_quirks,
},
+ {
+ .compatible = "allwinner,sun50i-h616-spdif",
+ /* Essentially the same as the H6, but without RX */
+ .data = &sun50i_h6_spdif_quirks,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
index 8e2fb81ad05c..84cbd7446b1e 100644
--- a/sound/soc/ti/ams-delta.c
+++ b/sound/soc/ti/ams-delta.c
@@ -303,7 +303,7 @@ static int cx81801_open(struct tty_struct *tty)
static void cx81801_close(struct tty_struct *tty)
{
struct snd_soc_component *component = tty->disc_data;
- struct snd_soc_dapm_context *dapm = &component->card->dapm;
+ struct snd_soc_dapm_context *dapm;
del_timer_sync(&cx81801_timer);
@@ -315,6 +315,8 @@ static void cx81801_close(struct tty_struct *tty)
v253_ops.close(tty);
+ dapm = &component->card->dapm;
+
/* Revert back to default audio input/output constellation */
snd_soc_dapm_mutex_lock(dapm);
diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
index 3273b317fa3b..3e8ed05f3ebd 100644
--- a/sound/soc/ti/omap-mcbsp.c
+++ b/sound/soc/ti/omap-mcbsp.c
@@ -74,7 +74,8 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
return -EINVAL;
}
- pm_runtime_put_sync(mcbsp->dev);
+ if (mcbsp->active)
+ pm_runtime_put_sync(mcbsp->dev);
r = clk_set_parent(mcbsp->fclk, fck_src);
if (r) {
@@ -84,7 +85,8 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
return r;
}
- pm_runtime_get_sync(mcbsp->dev);
+ if (mcbsp->active)
+ pm_runtime_get_sync(mcbsp->dev);
clk_put(fck_src);
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
index 69973179ef15..3660a42f7673 100644
--- a/sound/soc/xilinx/Kconfig
+++ b/sound/soc/xilinx/Kconfig
@@ -1,4 +1,19 @@
# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_XILINX_DP
+ tristate "Audio support for the the Xilinx DisplayPort"
+ select SND_DMAENGINE_PCM
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Audio support the for Xilinx DisplayPort.
+
+config SND_SOC_XILINX_SDI
+ tristate "Audio support for the the Xilinx SDI"
+ depends on DRM_XLNX_SDI
+ depends on VIDEO_XILINX_SDIRXSS
+ help
+ Select this option to enable Xilinx SDI Audio.This enables
+ SDI audio playback and capture using xilinx soft IP
+
config SND_SOC_XILINX_I2S
tristate "Audio support for the Xilinx I2S"
help
@@ -7,6 +22,26 @@ config SND_SOC_XILINX_I2S
mode, IP receives audio in AES format, extracts PCM and sends
PCM data. In receiver mode, IP receives PCM audio and
encapsulates PCM in AES format and sends AES data.
+ I2S playback and capture using xilinx soft IP
+
+config SND_SOC_XILINX_SPDIF
+ tristate "Audio support for the the Xilinx SPDIF"
+ help
+ Select this option to enable Xilinx SPDIF Audio.
+ Enabling this provides one of the component required in ASoC
+ audio pipeline.
+ This supports playback and capture usecases.
+
+config SND_SOC_XILINX_PL_SND_CARD
+ tristate "Audio support for the the Xilinx PL sound card"
+ depends on SND_SOC_XILINX_AUDIO_FORMATTER
+ depends on SND_SOC_XILINX_I2S
+ depends on SND_SOC_XILINX_SDI
+ select SND_SOC_HDMI_CODEC
+ help
+ Select this option to enable Xilinx PL sound card
+ support. This enables sound card using xilinx soft IPs
+ in audio pipeline.
config SND_SOC_XILINX_AUDIO_FORMATTER
tristate "Audio support for the the Xilinx audio formatter"
diff --git a/sound/soc/xilinx/Makefile b/sound/soc/xilinx/Makefile
index be7652ce7c13..aea78bae7e86 100644
--- a/sound/soc/xilinx/Makefile
+++ b/sound/soc/xilinx/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-pcm.o
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-codec.o
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-card.o
+obj-$(CONFIG_SND_SOC_XILINX_SDI) += xlnx_sdi_audio.o
snd-soc-xlnx-i2s-objs := xlnx_i2s.o
obj-$(CONFIG_SND_SOC_XILINX_I2S) += snd-soc-xlnx-i2s.o
+obj-$(CONFIG_SND_SOC_XILINX_SPDIF) += xlnx_spdif.o
+obj-$(CONFIG_SND_SOC_XILINX_PL_SND_CARD) += xlnx_pl_snd_card.o
snd-soc-xlnx-formatter-pcm-objs := xlnx_formatter_pcm.o
obj-$(CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER) += snd-soc-xlnx-formatter-pcm.o
snd-soc-xlnx-spdif-objs := xlnx_spdif.o
diff --git a/sound/soc/xilinx/xilinx-dp-card.c b/sound/soc/xilinx/xilinx-dp-card.c
new file mode 100644
index 000000000000..f216b40c647d
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-card.c
@@ -0,0 +1,118 @@
+/*
+ * Xilinx DisplayPort SoC Sound Card support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+static int xilinx_dp_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 256);
+ return 0;
+}
+
+static const struct snd_soc_ops xilinx_dp_ops = {
+ .startup = xilinx_dp_startup,
+};
+
+SND_SOC_DAILINK_DEFS(pcm,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xilinx-dp-snd-codec-dai")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link xilinx_dp_dai_links[] = {
+ {
+ .name = "xilinx-dp0",
+ .stream_name = "xilinx-dp0",
+ .ops = &xilinx_dp_ops,
+ SND_SOC_DAILINK_REG(pcm),
+ },
+ {
+ .name = "xilinx-dp1",
+ .stream_name = "xilinx-dp1",
+ .ops = &xilinx_dp_ops,
+ SND_SOC_DAILINK_REG(pcm),
+ },
+
+};
+
+static struct snd_soc_card xilinx_dp_card = {
+ .name = "DisplayPort monitor",
+ .owner = THIS_MODULE,
+ .dai_link = xilinx_dp_dai_links,
+ .num_links = 2,
+};
+
+static int xilinx_dp_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &xilinx_dp_card;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *codec, *pcm;
+ int ret;
+
+ card->dev = &pdev->dev;
+
+ codec = of_parse_phandle(node, "xlnx,dp-snd-codec", 0);
+ if (!codec)
+ return -ENODEV;
+
+ pcm = of_parse_phandle(node, "xlnx,dp-snd-pcm", 0);
+ if (!pcm)
+ return -ENODEV;
+ xilinx_dp_dai_links[0].platforms->of_node = pcm;
+ xilinx_dp_dai_links[0].cpus->of_node = codec;
+ xilinx_dp_dai_links[0].codecs->of_node = codec;
+
+ pcm = of_parse_phandle(node, "xlnx,dp-snd-pcm", 1);
+ if (!pcm)
+ return -ENODEV;
+ xilinx_dp_dai_links[1].platforms->of_node = pcm;
+ xilinx_dp_dai_links[1].cpus->of_node = codec;
+ xilinx_dp_dai_links[1].codecs->of_node = codec;
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound Card probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dp_of_match[] = {
+ { .compatible = "xlnx,dp-snd-card", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_of_match);
+
+static struct platform_driver xilinx_dp_aud_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-card",
+ .of_match_table = xilinx_dp_of_match,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = xilinx_dp_probe,
+};
+module_platform_driver(xilinx_dp_aud_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound Card module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xilinx-dp-codec.c b/sound/soc/xilinx/xilinx-dp-codec.c
new file mode 100644
index 000000000000..af6e6b08c415
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-codec.c
@@ -0,0 +1,178 @@
+/*
+ * Xilinx DisplayPort Sound Codec support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+/**
+ * struct xilinx_dp_codec - DisplayPort codec
+ * @aud_clk: audio clock
+ */
+struct xilinx_dp_codec {
+ struct clk *aud_clk;
+};
+
+struct xilinx_dp_codec_fmt {
+ unsigned long rate;
+ unsigned int snd_rate;
+};
+
+static struct snd_soc_dai_driver xilinx_dp_codec_dai = {
+ .name = "xilinx-dp-snd-codec-dai",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+static const struct xilinx_dp_codec_fmt rates[] = {
+ {
+ .rate = 48000 * 512,
+ .snd_rate = SNDRV_PCM_RATE_48000
+ },
+ {
+ .rate = 44100 * 512,
+ .snd_rate = SNDRV_PCM_RATE_44100
+ }
+};
+
+static const struct snd_soc_component_driver xilinx_dp_component_driver = {
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static int xilinx_dp_codec_probe(struct platform_device *pdev)
+{
+ struct xilinx_dp_codec *codec;
+ unsigned int i;
+ unsigned long rate;
+ int ret;
+
+ codec = devm_kzalloc(&pdev->dev, sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+
+ codec->aud_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(codec->aud_clk))
+ return PTR_ERR(codec->aud_clk);
+
+ ret = clk_prepare_enable(codec->aud_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable the aud_clk\n");
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rates); i++) {
+ clk_disable_unprepare(codec->aud_clk);
+ ret = clk_set_rate(codec->aud_clk, rates[i].rate);
+ clk_prepare_enable(codec->aud_clk);
+ if (ret)
+ continue;
+
+ rate = clk_get_rate(codec->aud_clk);
+ /* Ignore some offset +- 10 */
+ if (abs(rates[i].rate - rate) < 10) {
+ xilinx_dp_codec_dai.playback.rates = rates[i].snd_rate;
+ break;
+ }
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get required clock freq\n");
+ goto error_clk;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &xilinx_dp_component_driver,
+ &xilinx_dp_codec_dai, 1);
+ if (ret)
+ goto error_clk;
+
+ platform_set_drvdata(pdev, codec);
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound Codec probed\n");
+
+ return 0;
+
+error_clk:
+ clk_disable_unprepare(codec->aud_clk);
+ return ret;
+}
+
+static int xilinx_dp_codec_dev_remove(struct platform_device *pdev)
+{
+ struct xilinx_dp_codec *codec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(codec->aud_clk);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_dp_codec_pm_suspend(struct device *dev)
+{
+ struct xilinx_dp_codec *codec = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(codec->aud_clk);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_dp_codec_pm_resume(struct device *dev)
+{
+ struct xilinx_dp_codec *codec = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(codec->aud_clk);
+ if (ret)
+ dev_err(dev, "failed to enable the aud_clk\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops xilinx_dp_codec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_dp_codec_pm_suspend,
+ xilinx_dp_codec_pm_resume)
+};
+
+static const struct of_device_id xilinx_dp_codec_of_match[] = {
+ { .compatible = "xlnx,dp-snd-codec", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_codec_of_match);
+
+static struct platform_driver xilinx_dp_codec_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-codec",
+ .of_match_table = xilinx_dp_codec_of_match,
+ .pm = &xilinx_dp_codec_pm_ops,
+ },
+ .probe = xilinx_dp_codec_probe,
+ .remove = xilinx_dp_codec_dev_remove,
+};
+module_platform_driver(xilinx_dp_codec_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound Codec module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xilinx-dp-pcm.c b/sound/soc/xilinx/xilinx-dp-pcm.c
new file mode 100644
index 000000000000..fa8abe788cf7
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-pcm.c
@@ -0,0 +1,76 @@
+/*
+ * Xilinx DisplayPort Sound PCM support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+static const struct snd_pcm_hardware xilinx_pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+ .buffer_bytes_max = 128 * 1024,
+ .period_bytes_min = 256,
+ .period_bytes_max = 1024 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+};
+
+static const struct snd_dmaengine_pcm_config xilinx_dmaengine_pcm_config = {
+ .pcm_hardware = &xilinx_pcm_hw,
+ .prealloc_buffer_size = 64 * 1024,
+};
+
+static int xilinx_dp_pcm_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dev_set_name(&pdev->dev, pdev->dev.of_node->name);
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
+ &xilinx_dmaengine_pcm_config, 0);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound PCM probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dp_pcm_of_match[] = {
+ { .compatible = "xlnx,dp-snd-pcm", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_pcm_of_match);
+
+static struct platform_driver xilinx_dp_pcm_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-pcm",
+ .of_match_table = xilinx_dp_pcm_of_match,
+ },
+ .probe = xilinx_dp_pcm_probe,
+};
+module_platform_driver(xilinx_dp_pcm_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound PCM module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
index 4caee57114b8..303331243f34 100644
--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -17,6 +17,8 @@
#include <sound/soc.h>
#include <sound/pcm_params.h>
+#include "xlnx_snd_common.h"
+
#define DRV_NAME "xlnx_formatter_pcm"
#define XLNX_S2MM_OFFSET 0
@@ -57,7 +59,9 @@
#define CFG_S2MM_XFER_SHIFT 29
#define CFG_S2MM_PKG_MASK BIT(28)
+#define AUD_CTRL_DATA_WIDTH_MASK GENMASK(18, 16)
#define AUD_CTRL_DATA_WIDTH_SHIFT 16
+#define AUD_CTRL_ACTIVE_CH_MASK GENMASK(22, 19)
#define AUD_CTRL_ACTIVE_CH_SHIFT 19
#define PERIOD_CFG_PERIODS_SHIFT 16
@@ -83,7 +87,12 @@ struct xlnx_pcm_drv_data {
int mm2s_irq;
struct snd_pcm_substream *play_stream;
struct snd_pcm_substream *capture_stream;
+ struct platform_device *pdev;
+ struct device_node *nodes[XLNX_MAX_PATHS];
struct clk *axi_clk;
+ struct clk *mm2s_axis_clk;
+ struct clk *s2mm_axis_clk;
+ struct clk *aud_mclk;
};
/*
@@ -451,11 +460,13 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
u32 aes_reg1_val, aes_reg2_val;
int status;
u64 size;
+ struct pl_card_data *prv;
struct snd_soc_pcm_runtime *prtd = substream->private_data;
struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
DRV_NAME);
struct snd_pcm_runtime *runtime = substream->runtime;
struct xlnx_pcm_stream_param *stream_data = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
active_ch = params_channels(params);
if (active_ch > stream_data->ch_limit)
@@ -488,6 +499,7 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB);
val = readl(stream_data->mmio + XLNX_AUD_CTRL);
+ val &= ~AUD_CTRL_DATA_WIDTH_MASK;
bits_per_sample = params_width(params);
switch (bits_per_sample) {
case 8:
@@ -509,6 +521,7 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ val &= ~AUD_CTRL_ACTIVE_CH_MASK;
val |= active_ch << AUD_CTRL_ACTIVE_CH_SHIFT;
writel(val, stream_data->mmio + XLNX_AUD_CTRL);
@@ -518,6 +531,12 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
bytes_per_ch = DIV_ROUND_UP(params_period_bytes(params), active_ch);
writel(bytes_per_ch, stream_data->mmio + XLNX_BYTES_PER_CH);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ prv = snd_soc_card_get_drvdata(rtd->card);
+ writel(prv->mclk_ratio,
+ stream_data->mmio + XLNX_AUD_FS_MULTIPLIER);
+ }
+
return 0;
}
@@ -580,10 +599,146 @@ static const struct snd_soc_component_driver xlnx_asoc_component = {
.pcm_new = xlnx_formatter_pcm_new,
};
+static int configure_mm2s(struct xlnx_pcm_drv_data *aud_drv_data,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ aud_drv_data->mm2s_axis_clk = devm_clk_get(dev, "m_axis_mm2s_aclk");
+ if (IS_ERR(aud_drv_data->mm2s_axis_clk)) {
+ ret = PTR_ERR(aud_drv_data->mm2s_axis_clk);
+ dev_err(dev, "failed to get m_axis_mm2s_aclk(%d)\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(aud_drv_data->mm2s_axis_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable m_axis_mm2s_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ aud_drv_data->aud_mclk = devm_clk_get(dev, "aud_mclk");
+ if (IS_ERR(aud_drv_data->aud_mclk)) {
+ ret = PTR_ERR(aud_drv_data->aud_mclk);
+ dev_err(dev, "failed to get aud_mclk(%d)\n", ret);
+ goto axis_clk_err;
+ }
+ ret = clk_prepare_enable(aud_drv_data->aud_mclk);
+ if (ret) {
+ dev_err(dev, "failed to enable aud_mclk(%d)\n", ret);
+ goto axis_clk_err;
+ }
+
+ aud_drv_data->mm2s_irq = platform_get_irq_byname(pdev,
+ "irq_mm2s");
+ if (aud_drv_data->mm2s_irq < 0) {
+ dev_err(dev, "xlnx audio mm2s irq resource failed\n");
+ ret = aud_drv_data->mm2s_irq;
+ goto mm2s_err;
+ }
+ ret = devm_request_irq(dev, aud_drv_data->mm2s_irq,
+ xlnx_mm2s_irq_handler, 0,
+ "xlnx_formatter_pcm_mm2s_irq",
+ dev);
+ if (ret) {
+ dev_err(dev, "xlnx audio mm2s irq request failed\n");
+ goto mm2s_err;
+ }
+ ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
+ XLNX_MM2S_OFFSET);
+ if (ret) {
+ dev_err(dev, "audio formatter reset failed\n");
+ goto mm2s_err;
+ }
+ xlnx_formatter_disable_irqs(aud_drv_data->mmio +
+ XLNX_MM2S_OFFSET,
+ SNDRV_PCM_STREAM_PLAYBACK);
+
+ aud_drv_data->nodes[XLNX_PLAYBACK] =
+ of_parse_phandle(dev->of_node, "xlnx,tx", 0);
+ if (!aud_drv_data->nodes[XLNX_PLAYBACK])
+ dev_err(dev, "tx node not found\n");
+ else
+ dev_info(dev,
+ "sound card device will use DAI link: %s\n",
+ (aud_drv_data->nodes[XLNX_PLAYBACK])->name);
+ of_node_put(aud_drv_data->nodes[XLNX_PLAYBACK]);
+
+ aud_drv_data->mm2s_presence = true;
+ return 0;
+
+mm2s_err:
+ clk_disable_unprepare(aud_drv_data->aud_mclk);
+axis_clk_err:
+ clk_disable_unprepare(aud_drv_data->mm2s_axis_clk);
+
+ return ret;
+}
+
+static int configure_s2mm(struct xlnx_pcm_drv_data *aud_drv_data,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ aud_drv_data->s2mm_axis_clk = devm_clk_get(dev, "s_axis_s2mm_aclk");
+ if (IS_ERR(aud_drv_data->s2mm_axis_clk)) {
+ ret = PTR_ERR(aud_drv_data->s2mm_axis_clk);
+ dev_err(dev, "failed to get s_axis_s2mm_aclk(%d)\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(aud_drv_data->s2mm_axis_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable s_axis_s2mm_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ aud_drv_data->s2mm_irq = platform_get_irq_byname(pdev, "irq_s2mm");
+ if (aud_drv_data->s2mm_irq < 0) {
+ dev_err(dev, "xlnx audio s2mm irq resource failed\n");
+ ret = aud_drv_data->s2mm_irq;
+ goto s2mm_err;
+ }
+ ret = devm_request_irq(dev, aud_drv_data->s2mm_irq,
+ xlnx_s2mm_irq_handler, 0,
+ "xlnx_formatter_pcm_s2mm_irq",
+ dev);
+ if (ret) {
+ dev_err(dev, "xlnx audio s2mm irq request failed\n");
+ goto s2mm_err;
+ }
+ ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
+ XLNX_S2MM_OFFSET);
+ if (ret) {
+ dev_err(dev, "audio formatter reset failed\n");
+ goto s2mm_err;
+ }
+ xlnx_formatter_disable_irqs(aud_drv_data->mmio +
+ XLNX_S2MM_OFFSET,
+ SNDRV_PCM_STREAM_CAPTURE);
+
+ aud_drv_data->nodes[XLNX_CAPTURE] =
+ of_parse_phandle(dev->of_node, "xlnx,rx", 0);
+ if (!aud_drv_data->nodes[XLNX_CAPTURE])
+ dev_err(dev, "rx node not found\n");
+ else
+ dev_info(dev, "sound card device will use DAI link: %s\n",
+ (aud_drv_data->nodes[XLNX_CAPTURE])->name);
+ of_node_put(aud_drv_data->nodes[XLNX_CAPTURE]);
+
+ aud_drv_data->s2mm_presence = true;
+ return 0;
+
+s2mm_err:
+ clk_disable_unprepare(aud_drv_data->s2mm_axis_clk);
+ return ret;
+}
+
static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
{
int ret;
u32 val;
+ size_t pdata_size;
struct xlnx_pcm_drv_data *aud_drv_data;
struct resource *res;
struct device *dev = &pdev->dev;
@@ -620,57 +775,15 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
val = readl(aud_drv_data->mmio + XLNX_AUD_CORE_CONFIG);
if (val & AUD_CFG_MM2S_MASK) {
- aud_drv_data->mm2s_presence = true;
- ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
- XLNX_MM2S_OFFSET);
- if (ret) {
- dev_err(dev, "audio formatter reset failed\n");
- goto clk_err;
- }
- xlnx_formatter_disable_irqs(aud_drv_data->mmio +
- XLNX_MM2S_OFFSET,
- SNDRV_PCM_STREAM_PLAYBACK);
-
- aud_drv_data->mm2s_irq = platform_get_irq_byname(pdev,
- "irq_mm2s");
- if (aud_drv_data->mm2s_irq < 0) {
- ret = aud_drv_data->mm2s_irq;
+ ret = configure_mm2s(aud_drv_data, pdev);
+ if (ret)
goto clk_err;
- }
- ret = devm_request_irq(dev, aud_drv_data->mm2s_irq,
- xlnx_mm2s_irq_handler, 0,
- "xlnx_formatter_pcm_mm2s_irq", dev);
- if (ret) {
- dev_err(dev, "xlnx audio mm2s irq request failed\n");
- goto clk_err;
- }
}
+
if (val & AUD_CFG_S2MM_MASK) {
- aud_drv_data->s2mm_presence = true;
- ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
- XLNX_S2MM_OFFSET);
- if (ret) {
- dev_err(dev, "audio formatter reset failed\n");
+ ret = configure_s2mm(aud_drv_data, pdev);
+ if (ret)
goto clk_err;
- }
- xlnx_formatter_disable_irqs(aud_drv_data->mmio +
- XLNX_S2MM_OFFSET,
- SNDRV_PCM_STREAM_CAPTURE);
-
- aud_drv_data->s2mm_irq = platform_get_irq_byname(pdev,
- "irq_s2mm");
- if (aud_drv_data->s2mm_irq < 0) {
- ret = aud_drv_data->s2mm_irq;
- goto clk_err;
- }
- ret = devm_request_irq(dev, aud_drv_data->s2mm_irq,
- xlnx_s2mm_irq_handler, 0,
- "xlnx_formatter_pcm_s2mm_irq",
- dev);
- if (ret) {
- dev_err(dev, "xlnx audio s2mm irq request failed\n");
- goto clk_err;
- }
}
dev_set_drvdata(dev, aud_drv_data);
@@ -682,6 +795,21 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
goto clk_err;
}
+ pdata_size = sizeof(aud_drv_data->nodes);
+ if (aud_drv_data->nodes[XLNX_PLAYBACK] ||
+ aud_drv_data->nodes[XLNX_CAPTURE])
+ aud_drv_data->pdev =
+ platform_device_register_resndata(dev,
+ "xlnx_snd_card",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0,
+ &aud_drv_data->nodes,
+ pdata_size);
+ if (!aud_drv_data->pdev) {
+ dev_err(dev, "sound card device creation failed\n");
+ goto clk_err;
+ }
+ dev_info(dev, "pcm platform device registered\n");
return 0;
clk_err:
@@ -694,6 +822,8 @@ static int xlnx_formatter_pcm_remove(struct platform_device *pdev)
int ret = 0;
struct xlnx_pcm_drv_data *adata = dev_get_drvdata(&pdev->dev);
+ platform_device_unregister(adata->pdev);
+
if (adata->s2mm_presence)
ret = xlnx_formatter_pcm_reset(adata->mmio + XLNX_S2MM_OFFSET);
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c
index cc641e582c82..1ae53d1cc2ec 100644
--- a/sound/soc/xilinx/xlnx_i2s.c
+++ b/sound/soc/xilinx/xlnx_i2s.c
@@ -7,6 +7,7 @@
// Author: Praveen Vuppala <praveenv@xilinx.com>
// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -22,15 +23,22 @@
#define I2S_CH0_OFFSET 0x30
#define I2S_I2STIM_VALID_MASK GENMASK(7, 0)
+struct xlnx_i2s_dev_data {
+ void __iomem *base;
+ struct clk *axi_clk;
+ struct clk *axis_clk;
+ struct clk *aud_mclk;
+};
+
static int xlnx_i2s_set_sclkout_div(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
- void __iomem *base = snd_soc_dai_get_drvdata(cpu_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(cpu_dai);
if (!div || (div & ~I2S_I2STIM_VALID_MASK))
return -EINVAL;
- writel(div, base + I2S_I2STIM_OFFSET);
+ writel(div, dev_data->base + I2S_I2STIM_OFFSET);
return 0;
}
@@ -40,13 +48,13 @@ static int xlnx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *i2s_dai)
{
u32 reg_off, chan_id;
- void __iomem *base = snd_soc_dai_get_drvdata(i2s_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(i2s_dai);
chan_id = params_channels(params) / 2;
while (chan_id > 0) {
reg_off = I2S_CH0_OFFSET + ((chan_id - 1) * 4);
- writel(chan_id, base + reg_off);
+ writel(chan_id, dev_data->base + reg_off);
chan_id--;
}
@@ -56,18 +64,18 @@ static int xlnx_i2s_hw_params(struct snd_pcm_substream *substream,
static int xlnx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *i2s_dai)
{
- void __iomem *base = snd_soc_dai_get_drvdata(i2s_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(i2s_dai);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- writel(1, base + I2S_CORE_CTRL_OFFSET);
+ writel(1, dev_data->base + I2S_CORE_CTRL_OFFSET);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- writel(0, base + I2S_CORE_CTRL_OFFSET);
+ writel(0, dev_data->base + I2S_CORE_CTRL_OFFSET);
break;
default:
return -EINVAL;
@@ -95,8 +103,8 @@ MODULE_DEVICE_TABLE(of, xlnx_i2s_of_match);
static int xlnx_i2s_probe(struct platform_device *pdev)
{
- void __iomem *base;
struct snd_soc_dai_driver *dai_drv;
+ struct xlnx_i2s_dev_data *dev_data;
int ret;
u32 ch, format, data_width;
struct device *dev = &pdev->dev;
@@ -106,9 +114,16 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
if (!dai_drv)
return -ENOMEM;
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ dev_data = devm_kzalloc(&pdev->dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ dev_data->axi_clk = devm_clk_get(&pdev->dev, "s_axi_ctrl_aclk");
+ if (IS_ERR(dev_data->axi_clk)) {
+ ret = PTR_ERR(dev_data->axi_clk);
+ dev_err(&pdev->dev, "failed to get s_axi_ctrl_aclk(%d)\n", ret);
+ return ret;
+ }
ret = of_property_read_u32(node, "xlnx,num-channels", &ch);
if (ret < 0) {
@@ -141,6 +156,15 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
dai_drv->playback.channels_max = ch;
dai_drv->playback.rates = SNDRV_PCM_RATE_8000_192000;
dai_drv->ops = &xlnx_i2s_dai_ops;
+
+ dev_data->axis_clk = devm_clk_get(&pdev->dev,
+ "s_axis_aud_aclk");
+ if (IS_ERR(dev_data->axis_clk)) {
+ ret = PTR_ERR(dev_data->axis_clk);
+ dev_err(&pdev->dev,
+ "failed to get s_axis_aud_aclk(%d)\n", ret);
+ return ret;
+ }
} else if (of_device_is_compatible(node, "xlnx,i2s-receiver-1.0")) {
dai_drv->name = "xlnx_i2s_capture";
dai_drv->capture.stream_name = "Capture";
@@ -149,30 +173,93 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
dai_drv->capture.channels_max = ch;
dai_drv->capture.rates = SNDRV_PCM_RATE_8000_192000;
dai_drv->ops = &xlnx_i2s_dai_ops;
+
+ dev_data->axis_clk = devm_clk_get(&pdev->dev,
+ "m_axis_aud_aclk");
+ if (IS_ERR(dev_data->axis_clk)) {
+ ret = PTR_ERR(dev_data->axis_clk);
+ dev_err(&pdev->dev,
+ "failed to get m_axis_aud_aclk(%d)\n", ret);
+ return ret;
+ }
} else {
return -ENODEV;
}
- dev_set_drvdata(&pdev->dev, base);
+ dev_data->aud_mclk = devm_clk_get(&pdev->dev, "aud_mclk");
+ if (IS_ERR(dev_data->aud_mclk)) {
+ ret = PTR_ERR(dev_data->aud_mclk);
+ dev_err(&pdev->dev, "failed to get aud_mclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev_data->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_ctrl_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev_data->axis_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable axis_aud_aclk(%d)\n", ret);
+ goto err_axis_clk;
+ }
+
+ ret = clk_prepare_enable(dev_data->aud_mclk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable aud_mclk(%d)\n", ret);
+ goto err_aud_mclk;
+ }
+
+ dev_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev_data->base)) {
+ ret = PTR_ERR(dev_data->base);
+ goto clk_err;
+ }
+
+ dev_set_drvdata(&pdev->dev, dev_data);
ret = devm_snd_soc_register_component(&pdev->dev, &xlnx_i2s_component,
dai_drv, 1);
if (ret) {
dev_err(&pdev->dev, "i2s component registration failed\n");
- return ret;
+ goto clk_err;
}
dev_info(&pdev->dev, "%s DAI registered\n", dai_drv->name);
+ return 0;
+clk_err:
+ clk_disable_unprepare(dev_data->aud_mclk);
+err_aud_mclk:
+ clk_disable_unprepare(dev_data->axis_clk);
+err_axis_clk:
+ clk_disable_unprepare(dev_data->axi_clk);
+
return ret;
}
+static int xlnx_i2s_remove(struct platform_device *pdev)
+{
+ struct xlnx_i2s_dev_data *dev_data = dev_get_drvdata(&pdev->dev);
+
+ clk_disable_unprepare(dev_data->aud_mclk);
+ clk_disable_unprepare(dev_data->axis_clk);
+ clk_disable_unprepare(dev_data->axi_clk);
+
+ return 0;
+}
+
static struct platform_driver xlnx_i2s_aud_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = xlnx_i2s_of_match,
},
.probe = xlnx_i2s_probe,
+ .remove = xlnx_i2s_remove,
};
module_platform_driver(xlnx_i2s_aud_driver);
diff --git a/sound/soc/xilinx/xlnx_pl_snd_card.c b/sound/soc/xilinx/xlnx_pl_snd_card.c
new file mode 100644
index 000000000000..33eb93fe513f
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_pl_snd_card.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ASoC sound card support
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "xlnx_snd_common.h"
+
+#define I2S_CLOCK_RATIO 384
+#define XLNX_MAX_PL_SND_DEV 5
+
+static DEFINE_IDA(xlnx_snd_card_dev);
+
+enum {
+ I2S_AUDIO = 0,
+ HDMI_AUDIO,
+ SDI_AUDIO,
+ SPDIF_AUDIO,
+ XLNX_MAX_IFACE,
+};
+
+static const char *xlnx_snd_card_name[XLNX_MAX_IFACE] = {
+ [I2S_AUDIO] = "xlnx-i2s-snd-card",
+ [HDMI_AUDIO] = "xlnx-hdmi-snd-card",
+ [SDI_AUDIO] = "xlnx-sdi-snd-card",
+ [SPDIF_AUDIO] = "xlnx-spdif-snd-card",
+};
+
+static const char *dev_compat[][XLNX_MAX_IFACE] = {
+ [XLNX_PLAYBACK] = {
+ "xlnx,i2s-transmitter-1.0",
+ "xlnx,v-hdmi-tx-ss-3.1",
+ "xlnx,v-uhdsdi-audio-2.0",
+ "xlnx,spdif-2.0",
+ },
+
+ [XLNX_CAPTURE] = {
+ "xlnx,i2s-receiver-1.0",
+ "xlnx,v-hdmi-rx-ss-3.1",
+ "xlnx,v-uhdsdi-audio-2.0",
+ "xlnx,spdif-2.0",
+ },
+};
+
+static int xlnx_spdif_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ /* mclk must be >=1024 * sampleing rate */
+ prv->mclk_val = 1024 * sample_rate;
+ prv->mclk_ratio = 1024;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_sdi_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_hdmi_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ case 176400:
+ case 192000:
+ prv->mclk_ratio = 512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_i2s_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ int ret, clk_div;
+ u32 ch, data_width, sample_rate;
+ struct pl_card_data *prv;
+
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+ ch = params_channels(params);
+ data_width = params_width(params);
+ sample_rate = params_rate(params);
+
+ /* only 2 channels supported */
+ if (ch != 2)
+ return -EINVAL;
+
+ prv = snd_soc_card_get_drvdata(rtd->card);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ switch (sample_rate) {
+ case 5512:
+ case 8000:
+ case 11025:
+ case 16000:
+ case 22050:
+ case 32000:
+ case 44100:
+ case 48000:
+ case 64000:
+ case 88200:
+ case 96000:
+ prv->mclk_ratio = 384;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ prv->mclk_ratio = 384;
+ break;
+ case 64000:
+ case 176400:
+ case 192000:
+ prv->mclk_ratio = 192;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ clk_div = DIV_ROUND_UP(prv->mclk_ratio, 2 * ch * data_width);
+ ret = snd_soc_dai_set_clkdiv(cpu_dai, 0, clk_div);
+ if (ret)
+ return ret;
+
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static const struct snd_soc_ops xlnx_sdi_card_ops = {
+ .hw_params = xlnx_sdi_card_hw_params,
+};
+
+static const struct snd_soc_ops xlnx_i2s_card_ops = {
+ .hw_params = xlnx_i2s_card_hw_params,
+};
+
+SND_SOC_DAILINK_DEFS(dummy0,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC("snd-soc-dummy", "snd-soc-dummy-dai")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(dummy1,
+ DAILINK_COMP_ARRAY(COMP_CPU("snd-soc-dummy-dai")),
+ DAILINK_COMP_ARRAY(COMP_CODEC("hdmi-audio-codec.0", "i2s-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(dummy2,
+ DAILINK_COMP_ARRAY(COMP_CPU("snd-soc-dummy-dai")),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xlnx_hdmi_rx")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(dummy3,
+ DAILINK_COMP_ARRAY(COMP_CPU("snd-soc-dummy-dai")),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xlnx_sdi_tx")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(dummy4,
+ DAILINK_COMP_ARRAY(COMP_CPU("snd-soc-dummy-dai")),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xlnx_sdi_rx")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static const struct snd_soc_ops xlnx_hdmi_card_ops = {
+ .hw_params = xlnx_hdmi_card_hw_params,
+};
+
+static const struct snd_soc_ops xlnx_spdif_card_ops = {
+ .hw_params = xlnx_spdif_card_hw_params,
+};
+
+static struct snd_soc_dai_link xlnx_snd_dai[][XLNX_MAX_PATHS] = {
+ [I2S_AUDIO] = {
+ {
+ .name = "xilinx-i2s_playback",
+ .ops = &xlnx_i2s_card_ops,
+ SND_SOC_DAILINK_REG(dummy0),
+ },
+ {
+ .name = "xilinx-i2s_capture",
+ .ops = &xlnx_i2s_card_ops,
+ SND_SOC_DAILINK_REG(dummy0),
+ },
+ },
+ [HDMI_AUDIO] = {
+ {
+ .name = "xilinx-hdmi-playback",
+ SND_SOC_DAILINK_REG(dummy1),
+ .ops = &xlnx_hdmi_card_ops,
+ },
+ {
+ .name = "xilinx-hdmi-capture",
+ SND_SOC_DAILINK_REG(dummy2),
+ },
+ },
+ [SDI_AUDIO] = {
+ {
+ .name = "xlnx-sdi-playback",
+ SND_SOC_DAILINK_REG(dummy3),
+ .ops = &xlnx_sdi_card_ops,
+ },
+ {
+ .name = "xlnx-sdi-capture",
+ SND_SOC_DAILINK_REG(dummy4),
+ },
+
+ },
+ [SPDIF_AUDIO] = {
+ {
+ .name = "xilinx-spdif_playback",
+ SND_SOC_DAILINK_REG(dummy0),
+ .ops = &xlnx_spdif_card_ops,
+ },
+ {
+ .name = "xilinx-spdif_capture",
+ SND_SOC_DAILINK_REG(dummy0),
+ .ops = &xlnx_spdif_card_ops,
+ },
+ },
+
+};
+
+static int find_link(struct device_node *node, int direction)
+{
+ int ret;
+ u32 i, size;
+ const char **link_names = dev_compat[direction];
+
+ size = ARRAY_SIZE(dev_compat[direction]);
+
+ for (i = 0; i < size; i++) {
+ ret = of_device_is_compatible(node, link_names[i]);
+ if (ret)
+ return i;
+ }
+ return -ENODEV;
+}
+
+static int xlnx_snd_probe(struct platform_device *pdev)
+{
+ u32 i;
+ size_t sz;
+ char *buf;
+ int ret, audio_interface;
+ struct snd_soc_dai_link *dai;
+ struct pl_card_data *prv;
+ struct platform_device *iface_pdev;
+
+ struct snd_soc_card *card;
+ struct device_node **node = pdev->dev.platform_data;
+
+ if (!node)
+ return -ENODEV;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(struct snd_soc_card),
+ GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = &pdev->dev;
+
+ card->dai_link = devm_kzalloc(card->dev,
+ sizeof(*dai) * XLNX_MAX_PATHS,
+ GFP_KERNEL);
+ if (!card->dai_link)
+ return -ENOMEM;
+
+ prv = devm_kzalloc(card->dev,
+ sizeof(struct pl_card_data),
+ GFP_KERNEL);
+ if (!prv)
+ return -ENOMEM;
+
+ card->num_links = 0;
+ for (i = XLNX_PLAYBACK; i < XLNX_MAX_PATHS; i++) {
+ struct device_node *pnode = of_parse_phandle(node[i],
+ "xlnx,snd-pcm", 0);
+ if (!pnode) {
+ dev_err(card->dev, "platform node not found\n");
+ of_node_put(pnode);
+ return -ENODEV;
+ }
+
+ /*
+ * Check for either playback or capture is enough, as
+ * same clock is used for both.
+ */
+ if (i == XLNX_PLAYBACK) {
+ iface_pdev = of_find_device_by_node(pnode);
+ if (!iface_pdev) {
+ of_node_put(pnode);
+ return -ENODEV;
+ }
+
+ prv->mclk = devm_clk_get(&iface_pdev->dev, "aud_mclk");
+ if (IS_ERR(prv->mclk))
+ return PTR_ERR(prv->mclk);
+
+ }
+ of_node_put(pnode);
+
+ dai = &card->dai_link[i];
+ audio_interface = find_link(node[i], i);
+ switch (audio_interface) {
+ case I2S_AUDIO:
+ *dai = xlnx_snd_dai[I2S_AUDIO][i];
+ dai->platforms->of_node = pnode;
+ dai->cpus->of_node = node[i];
+ card->num_links++;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case HDMI_AUDIO:
+ *dai = xlnx_snd_dai[HDMI_AUDIO][i];
+ dai->platforms->of_node = pnode;
+ if (i == XLNX_CAPTURE)
+ dai->codecs->of_node = node[i];
+ card->num_links++;
+ /* TODO: support multiple sampling rates */
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case SDI_AUDIO:
+ *dai = xlnx_snd_dai[SDI_AUDIO][i];
+ dai->platforms->of_node = pnode;
+ dai->codecs->of_node = node[i];
+ card->num_links++;
+ /* TODO: support multiple sampling rates */
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case SPDIF_AUDIO:
+ *dai = xlnx_snd_dai[SPDIF_AUDIO][i];
+ dai->platforms->of_node = pnode;
+ dai->cpus->of_node = node[i];
+ card->num_links++;
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ default:
+ dev_err(card->dev, "Invalid audio interface\n");
+ return -ENODEV;
+ }
+ }
+
+ if (card->num_links) {
+ /*
+ * Example : i2s card name = xlnx-i2s-snd-card-0
+ * length = number of chars in "xlnx-i2s-snd-card"
+ * + 1 ('-'), + 1 (card instance num)
+ * + 1 ('\0')
+ */
+ sz = strlen(xlnx_snd_card_name[audio_interface]) + 3;
+ buf = devm_kzalloc(card->dev, sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ prv->xlnx_snd_dev_id = ida_simple_get(&xlnx_snd_card_dev, 0,
+ XLNX_MAX_PL_SND_DEV,
+ GFP_KERNEL);
+ if (prv->xlnx_snd_dev_id < 0)
+ return prv->xlnx_snd_dev_id;
+
+ snprintf(buf, sz, "%s-%d", xlnx_snd_card_name[audio_interface],
+ prv->xlnx_snd_dev_id);
+ card->name = buf;
+
+ ret = devm_snd_soc_register_card(card->dev, card);
+ if (ret) {
+ dev_err(card->dev, "%s registration failed\n",
+ card->name);
+ ida_simple_remove(&xlnx_snd_card_dev,
+ prv->xlnx_snd_dev_id);
+ return ret;
+ }
+
+ dev_set_drvdata(card->dev, prv);
+ dev_info(card->dev, "%s registered\n", card->name);
+ }
+
+ return 0;
+}
+
+static int xlnx_snd_remove(struct platform_device *pdev)
+{
+ struct pl_card_data *pdata = dev_get_drvdata(&pdev->dev);
+
+ ida_simple_remove(&xlnx_snd_card_dev, pdata->xlnx_snd_dev_id);
+ return 0;
+}
+
+static struct platform_driver xlnx_snd_driver = {
+ .driver = {
+ .name = "xlnx_snd_card",
+ },
+ .probe = xlnx_snd_probe,
+ .remove = xlnx_snd_remove,
+};
+
+module_platform_driver(xlnx_snd_driver);
+
+MODULE_DESCRIPTION("Xilinx FPGA sound card driver");
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_sdi_audio.c b/sound/soc/xilinx/xlnx_sdi_audio.c
new file mode 100644
index 000000000000..75b0b3150b6f
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_sdi_audio.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx SDI embed and extract audio support
+ *
+ * Copyright (c) 2018 Xilinx Pvt., Ltd
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <drm/drm_modes.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define DRIVER_NAME "xlnx-sdi-audio"
+
+#define XSDIAUD_CNTRL_REG_OFFSET 0x00
+#define XSDIAUD_SOFT_RST_REG_OFFSET 0x04
+#define XSDIAUD_VER_REG_OFFSET 0x08
+#define XSDIAUD_INT_EN_REG_OFFSET 0x0C
+#define XSDIAUD_INT_STS_REG_OFFSET 0x10
+#define XSDIAUD_EMB_VID_CNTRL_REG_OFFSET 0X14
+#define XSDIAUD_AUD_CNTRL_REG_OFFSET 0x18
+#define XSDIAUD_CH_VALID_REG_OFFSET 0x20
+#define XSDIAUD_CH_MUTE_REG_OFFSET 0x30
+#define XSDIAUD_ACTIVE_GRP_REG_OFFSET 0X40
+#define XSDIAUD_EXT_CH_STAT0_REG_OFFSET 0X48
+#define XSDIAUD_EXT_SRATE_STS_REG_OFFSET 0X70
+#define XSDIAUD_GUI_PARAM_REG_OFFSET 0XFC
+
+#define XSDIAUD_CNTRL_EN_MASK BIT(0)
+#define XSDIAUD_SOFT_RST_CONFIG_MASK BIT(0)
+#define XSDIAUD_SOFT_RST_CORE_MASK BIT(1)
+#define XSDIAUD_VER_MAJOR_MASK GENMASK(31, 24)
+#define XSDIAUD_VER_MINOR_MASK GENMASK(23, 16)
+
+#define XSDIAUD_EXT_GROUP_1_STS_MASK BIT(0)
+#define XSDIAUD_EXT_AUDSTS_UPDATE_MASK BIT(8)
+#define XSDIAUD_EMB_VID_CNT_ELE_SHIFT (16)
+#define XSDIAUD_EMB_VID_CNT_ELE_MASK BIT(16)
+#define XSDIAUD_EMB_VID_CNT_TSCAN_MASK BIT(8)
+#define XSDIAUD_EMB_VID_CNT_TSCAN_SHIFT (8)
+#define XSDIAUD_EMB_VID_CNT_TRATE_SHIFT (4)
+#define XSDIAUD_EMB_AUD_CNT_SS_MASK BIT(3)
+#define XSDIAUD_EMB_AUD_CNT_ASYNC_AUDIO BIT(4)
+
+#define CH_STATUS_UPDATE_TIMEOUT 40
+
+enum IP_MODE {
+ EMBED,
+ EXTRACT,
+};
+
+enum channel_id {
+ CHAN_ID_0 = 1,
+ CHAN_ID_1,
+};
+
+enum sdi_transport_family {
+ SDI_TRANSPORT_FAMILY_1920,
+ SDI_TRANSPORT_FAMILY_1280,
+ SDI_TRANSPORT_FAMILY_2048,
+ SDI_TRANSPORT_FAMILY_NTSC = 8,
+ SDI_TRANSPORT_FAMILY_PAL = 9,
+};
+
+/**
+ * enum sdi_audio_samplerate - audio sampling rate
+ * @XSDIAUD_SAMPRATE0: 48 KHz
+ * @XSDIAUD_SAMPRATE1: 44.1 KHz
+ * @XSDIAUD_SAMPRATE2: 32 KHz
+ */
+enum sdi_audio_samplerate {
+ XSDIAUD_SAMPRATE0,
+ XSDIAUD_SAMPRATE1,
+ XSDIAUD_SAMPRATE2
+};
+
+/**
+ * enum sdi_audio_samplesize - bits per sample
+ * @XSDIAUD_SAMPSIZE0: 20 Bit Audio Sample
+ * @XSDIAUD_SAMPSIZE1: 24 Bit Audio Sample
+ */
+enum sdi_audio_samplesize {
+ XSDIAUD_SAMPSIZE0,
+ XSDIAUD_SAMPSIZE1
+};
+
+struct dev_ctx {
+ enum IP_MODE mode;
+ void __iomem *base;
+ struct device *dev;
+ struct drm_display_mode *video_mode;
+ struct snd_pcm_substream *stream;
+ struct clk *axi_clk;
+ struct clk *axis_clk;
+ struct clk *aud_clk;
+ bool rx_srate_updated;
+ wait_queue_head_t srate_q;
+};
+
+static irqreturn_t xtract_irq_handler(int irq, void *dev_id)
+{
+ u32 irq_sts, irq_en, active_grps;
+ struct dev_ctx *ctx = dev_id;
+
+ irq_sts = readl(ctx->base + XSDIAUD_INT_STS_REG_OFFSET);
+ active_grps = readl(ctx->base + XSDIAUD_ACTIVE_GRP_REG_OFFSET);
+ if ((irq_sts & XSDIAUD_EXT_AUDSTS_UPDATE_MASK) &&
+ (active_grps & XSDIAUD_EXT_GROUP_1_STS_MASK)) {
+ writel(XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_STS_REG_OFFSET);
+ irq_en = readl(ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+ /* Disable further interrupts. sample rate status got updated*/
+ writel(irq_en & ~XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+
+ ctx->rx_srate_updated = true;
+ wake_up_interruptible(&ctx->srate_q);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void audio_enable(void __iomem *aud_base)
+{
+ u32 val;
+
+ val = readl(aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+ val |= XSDIAUD_CNTRL_EN_MASK;
+ writel(val, aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+}
+
+static void audio_disable(void __iomem *aud_base)
+{
+ u32 val;
+
+ val = readl(aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+ val &= ~XSDIAUD_CNTRL_EN_MASK;
+ writel(val, aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+}
+
+static void audio_reset_core(void __iomem *aud_base, bool reset)
+{
+ u32 val;
+
+ if (reset) {
+ /* reset the core */
+ val = readl(aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ val |= XSDIAUD_SOFT_RST_CORE_MASK;
+ writel(val, aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ } else {
+ /* bring the core out of reset */
+ val = readl(aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ val &= ~XSDIAUD_SOFT_RST_CORE_MASK;
+ writel(val, aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ }
+}
+
+static int xlnx_sdi_rx_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int err;
+ u32 val, sample_rate;
+
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+ unsigned long jiffies = msecs_to_jiffies(CH_STATUS_UPDATE_TIMEOUT);
+
+ audio_enable(base);
+ writel(XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+ err = wait_event_interruptible_timeout(ctx->srate_q,
+ ctx->rx_srate_updated,
+ jiffies);
+
+ if (!err) {
+ dev_err(ctx->dev, "Didn't get valid audio property update\n");
+ return -EINVAL;
+ }
+ ctx->rx_srate_updated = false;
+
+ val = readl(base + XSDIAUD_EXT_SRATE_STS_REG_OFFSET);
+ /* As both channels contain same sample rate, read either of them */
+ switch (val & CHAN_ID_0) {
+ case 0:
+ sample_rate = 48000;
+ break;
+ case 1:
+ sample_rate = 44100;
+ break;
+ case 2:
+ sample_rate = 32000;
+ break;
+ }
+
+ dev_dbg(ctx->dev,
+ "sdi rx audio enabled : sample rate = %d\n", sample_rate);
+ return 0;
+}
+
+static void xlnx_sdi_rx_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+
+ audio_disable(ctx->base);
+
+ dev_info(dai->dev, " sdi rx audio disabled\n");
+}
+
+static int xlnx_sdi_tx_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+
+ audio_enable(ctx->base);
+ ctx->stream = substream;
+
+ dev_info(ctx->dev, " sdi tx audio enabled\n");
+ return 0;
+}
+
+static int xlnx_sdi_tx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u32 val = 0;
+ u32 num_channels, sample_rate, sig_bits;
+
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+
+ /* video mode properties needed by audio driver are shared to audio
+ * driver through a pointer in platform data. This is used here in
+ * audio driver. The solution may be needed to modify/extend to avoid
+ * probable error scenarios
+ */
+ if (!ctx->video_mode || !ctx->video_mode->vdisplay ||
+ !ctx->video_mode->vrefresh) {
+ dev_err(ctx->dev, "couldn't find video display properties\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map video properties.
+ * Note: 1920x1080 and 2048x1080 are the resolutions of sub images for
+ * 3840x2160 and 4096x2160 resolutions respectively.
+ */
+ switch (ctx->video_mode->hdisplay) {
+ case 1920:
+ case 3840:
+ val = SDI_TRANSPORT_FAMILY_1920;
+ break;
+ case 1280:
+ val |= SDI_TRANSPORT_FAMILY_1280;
+ break;
+ case 2048:
+ case 4096:
+ val |= SDI_TRANSPORT_FAMILY_2048;
+ break;
+ case 720:
+ if (ctx->video_mode->vdisplay == 486)
+ val |= SDI_TRANSPORT_FAMILY_NTSC;
+ else if (ctx->video_mode->vdisplay == 576)
+ val |= SDI_TRANSPORT_FAMILY_PAL;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (ctx->video_mode->vrefresh) {
+ case 24:
+ val |= (3 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 25:
+ val |= (5 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 30:
+ val |= (7 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 48:
+ val |= (8 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 50:
+ val |= (9 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 60:
+ val |= (11 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!(ctx->video_mode->flags & DRM_MODE_FLAG_INTERLACE))
+ val |= XSDIAUD_EMB_VID_CNT_TSCAN_MASK;
+
+ val |= XSDIAUD_EMB_VID_CNT_ELE_MASK;
+
+ writel(val, base + XSDIAUD_EMB_VID_CNTRL_REG_OFFSET);
+
+ /* map audio properties */
+ num_channels = params_channels(params);
+ sample_rate = params_rate(params);
+ sig_bits = snd_pcm_format_width(params_format(params));
+
+ dev_info(ctx->dev,
+ "stream params: channels = %d sample_rate = %d bits = %d\n",
+ num_channels, sample_rate, sig_bits);
+
+ val = 0;
+ val |= XSDIAUD_EMB_AUD_CNT_ASYNC_AUDIO;
+
+ switch (sample_rate) {
+ case 48000:
+ val |= XSDIAUD_SAMPRATE0;
+ break;
+ case 44100:
+ val |= XSDIAUD_SAMPRATE1;
+ break;
+ case 32000:
+ val |= XSDIAUD_SAMPRATE2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sig_bits == 24)
+ val |= XSDIAUD_EMB_AUD_CNT_SS_MASK;
+
+ writel(val, base + XSDIAUD_AUD_CNTRL_REG_OFFSET);
+
+ /* TODO: support more channels, currently only 2. */
+ writel(CHAN_ID_1 | CHAN_ID_0, base + XSDIAUD_CH_VALID_REG_OFFSET);
+
+ return 0;
+}
+
+static void xlnx_sdi_tx_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+
+ audio_disable(base);
+ ctx->stream = NULL;
+
+ dev_info(ctx->dev, " sdi tx audio disabled\n");
+}
+
+static const struct snd_soc_component_driver xlnx_sdi_component = {
+ .name = "xlnx-sdi-dai-component",
+};
+
+static const struct snd_soc_dai_ops xlnx_sdi_rx_dai_ops = {
+ .startup = xlnx_sdi_rx_pcm_startup,
+ .shutdown = xlnx_sdi_rx_pcm_shutdown,
+};
+
+static struct snd_soc_dai_driver xlnx_sdi_rx_dai = {
+ .name = "xlnx_sdi_rx",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &xlnx_sdi_rx_dai_ops,
+};
+
+static const struct snd_soc_dai_ops xlnx_sdi_tx_dai_ops = {
+ .startup = xlnx_sdi_tx_pcm_startup,
+ .hw_params = xlnx_sdi_tx_hw_params,
+ .shutdown = xlnx_sdi_tx_pcm_shutdown,
+};
+
+static struct snd_soc_dai_driver xlnx_sdi_tx_dai = {
+ .name = "xlnx_sdi_tx",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &xlnx_sdi_tx_dai_ops,
+};
+
+static int xlnx_sdi_audio_probe(struct platform_device *pdev)
+{
+ u32 val;
+ int ret;
+ struct dev_ctx *ctx;
+ struct resource *res;
+ struct device *video_dev;
+ struct device_node *video_node;
+ struct platform_device *video_pdev;
+ struct snd_soc_dai_driver *snd_dai;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ /* TODO - remove before upstreaming */
+ if (of_device_is_compatible(node, "xlnx,v-uhdsdi-audio-1.0")) {
+ dev_err(&pdev->dev, "driver doesn't support sdi audio v1.0\n");
+ return -ENODEV;
+ }
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(struct dev_ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENODEV;
+
+ ctx->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(ctx->axi_clk)) {
+ ret = PTR_ERR(ctx->axi_clk);
+ dev_err(&pdev->dev, "failed to get s_axi_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No IO MEM resource found\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -EADDRNOTAVAIL;
+ goto err_axis;
+ }
+
+ ctx->dev = &pdev->dev;
+
+ val = readl(ctx->base + XSDIAUD_GUI_PARAM_REG_OFFSET);
+ if (val & BIT(6)) {
+ ctx->mode = EXTRACT;
+
+ ctx->axis_clk = devm_clk_get(&pdev->dev, "m_axis_clk");
+ if (IS_ERR(ctx->axis_clk)) {
+ ret = PTR_ERR(ctx->axis_clk);
+ dev_err(&pdev->dev, "failed to get m_axis_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ ctx->aud_clk = devm_clk_get(&pdev->dev, "sdi_extract_clk");
+ if (IS_ERR(ctx->aud_clk)) {
+ ret = PTR_ERR(ctx->aud_clk);
+ dev_err(&pdev->dev, "failed to get sdi_extract_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No IRQ resource found\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+ ret = devm_request_irq(&pdev->dev, res->start,
+ xtract_irq_handler,
+ 0, "XLNX_SDI_AUDIO_XTRACT", ctx);
+ if (ret) {
+ dev_err(&pdev->dev, "extract irq request failed\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ init_waitqueue_head(&ctx->srate_q);
+
+ snd_dai = &xlnx_sdi_rx_dai;
+ } else {
+ ctx->mode = EMBED;
+ ctx->axis_clk = devm_clk_get(&pdev->dev, "s_axis_clk");
+ if (IS_ERR(ctx->axis_clk)) {
+ ret = PTR_ERR(ctx->axis_clk);
+ dev_err(&pdev->dev, "failed to get s_axis_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ ctx->aud_clk = devm_clk_get(&pdev->dev, "sdi_embed_clk");
+ if (IS_ERR(ctx->aud_clk)) {
+ ret = PTR_ERR(ctx->aud_clk);
+ dev_err(&pdev->dev, "failed to get aud_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ video_node = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!video_node) {
+ dev_err(ctx->dev, "video_node not found\n");
+ of_node_put(video_node);
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ video_pdev = of_find_device_by_node(video_node);
+ if (!video_pdev) {
+ of_node_put(video_node);
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ video_dev = &video_pdev->dev;
+ ctx->video_mode =
+ (struct drm_display_mode *)video_dev->platform_data;
+ /* invalid 'platform_data' implies video driver is not loaded */
+ if (!ctx->video_mode) {
+ of_node_put(video_node);
+ ret = -EPROBE_DEFER;
+ goto err_axis;
+ }
+
+ snd_dai = &xlnx_sdi_tx_dai;
+ of_node_put(video_node);
+ }
+
+ ret = clk_prepare_enable(ctx->axis_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axis_clk(%d)\n", ret);
+ goto err_axis;
+ }
+
+ ret = clk_prepare_enable(ctx->aud_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable sdi_extract_clk(%d)\n", ret);
+ goto err_aud_clk;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &xlnx_sdi_component,
+ snd_dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register codec DAI\n");
+ goto err_clk;
+ }
+
+ dev_set_drvdata(&pdev->dev, ctx);
+
+ audio_reset_core(ctx->base, true);
+ audio_reset_core(ctx->base, false);
+
+ dev_info(&pdev->dev, "xlnx sdi codec dai component registered\n");
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(ctx->aud_clk);
+err_aud_clk:
+ clk_disable_unprepare(ctx->axis_clk);
+err_axis:
+ clk_disable_unprepare(ctx->axi_clk);
+ return ret;
+}
+
+static int xlnx_sdi_audio_remove(struct platform_device *pdev)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(&pdev->dev);
+
+ audio_disable(ctx->base);
+ audio_reset_core(ctx->base, true);
+
+ clk_disable_unprepare(ctx->aud_clk);
+ clk_disable_unprepare(ctx->axis_clk);
+ clk_disable_unprepare(ctx->axi_clk);
+ return 0;
+}
+
+static const struct of_device_id xlnx_sdi_audio_of_match[] = {
+ { .compatible = "xlnx,v-uhdsdi-audio-1.0"},
+ { .compatible = "xlnx,v-uhdsdi-audio-2.0"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_sdi_audio_of_match);
+
+static struct platform_driver xlnx_sdi_audio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnx_sdi_audio_of_match,
+ },
+ .probe = xlnx_sdi_audio_probe,
+ .remove = xlnx_sdi_audio_remove,
+};
+
+module_platform_driver(xlnx_sdi_audio_driver);
+
+MODULE_DESCRIPTION("xilinx sdi audio codec driver");
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_snd_common.h b/sound/soc/xilinx/xlnx_snd_common.h
new file mode 100644
index 000000000000..39461fac0d96
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_snd_common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx ASoC sound card support
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#ifndef _XLNX_SND_COMMON_H
+#define _XLNX_SND_COMMON_H
+
+enum {
+ XLNX_PLAYBACK,
+ XLNX_CAPTURE,
+ XLNX_MAX_PATHS
+};
+
+struct pl_card_data {
+ u32 mclk_val;
+ u32 mclk_ratio;
+ int xlnx_snd_dev_id;
+ struct clk *mclk;
+};
+#endif /* _XLNX_SND_COMMON_H */
diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c
index 6695530bba9b..c60ff81390a4 100644
--- a/sound/synth/emux/emux.c
+++ b/sound/synth/emux/emux.c
@@ -125,15 +125,10 @@ EXPORT_SYMBOL(snd_emux_register);
*/
int snd_emux_free(struct snd_emux *emu)
{
- unsigned long flags;
-
if (! emu)
return -EINVAL;
- spin_lock_irqsave(&emu->voice_lock, flags);
- if (emu->timer_active)
- del_timer(&emu->tlist);
- spin_unlock_irqrestore(&emu->voice_lock, flags);
+ del_timer_sync(&emu->tlist);
snd_emux_proc_free(emu);
snd_emux_delete_virmidi(emu);
diff --git a/sound/synth/emux/emux_nrpn.c b/sound/synth/emux/emux_nrpn.c
index 1ac22676d464..641dc02ff709 100644
--- a/sound/synth/emux/emux_nrpn.c
+++ b/sound/synth/emux/emux_nrpn.c
@@ -349,6 +349,9 @@ int
snd_emux_xg_control(struct snd_emux_port *port, struct snd_midi_channel *chan,
int param)
{
+ if (param >= ARRAY_SIZE(chan->control))
+ return -EINVAL;
+
return send_converted_effect(xg_effects, ARRAY_SIZE(xg_effects),
port, chan, param,
chan->control[param],
diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c
index 6e3175826faf..97afb3780970 100644
--- a/sound/usb/bcd2000/bcd2000.c
+++ b/sound/usb/bcd2000/bcd2000.c
@@ -348,7 +348,8 @@ static int bcd2000_init_midi(struct bcd2000 *bcd2k)
static void bcd2000_free_usb_related_resources(struct bcd2000 *bcd2k,
struct usb_interface *interface)
{
- /* usb_kill_urb not necessary, urb is aborted automatically */
+ usb_kill_urb(bcd2k->midi_out_urb);
+ usb_kill_urb(bcd2k->midi_in_urb);
usb_free_urb(bcd2k->midi_out_urb);
usb_free_urb(bcd2k->midi_in_urb);
diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
index 533eb69fe4e6..cd5a8584dbe3 100644
--- a/sound/usb/caiaq/input.c
+++ b/sound/usb/caiaq/input.c
@@ -804,6 +804,7 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
default:
/* no input methods supported on this device */
+ ret = -EINVAL;
goto exit_free_idev;
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 87cc249a31b9..469da45826d2 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -73,12 +73,13 @@ static inline unsigned get_usb_high_speed_rate(unsigned int rate)
*/
static void release_urb_ctx(struct snd_urb_ctx *u)
{
- if (u->buffer_size)
+ if (u->urb && u->buffer_size)
usb_free_coherent(u->ep->chip->dev, u->buffer_size,
u->urb->transfer_buffer,
u->urb->transfer_dma);
usb_free_urb(u->urb);
u->urb = NULL;
+ u->buffer_size = 0;
}
static const char *usb_error_string(int err)
@@ -310,7 +311,7 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
while (test_bit(EP_FLAG_RUNNING, &ep->flags)) {
unsigned long flags;
- struct snd_usb_packet_info *uninitialized_var(packet);
+ struct snd_usb_packet_info *packet;
struct snd_urb_ctx *ctx = NULL;
int err, i;
@@ -977,6 +978,7 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep)
if (!ep->syncbuf)
return -ENOMEM;
+ ep->nurbs = SYNC_URBS;
for (i = 0; i < SYNC_URBS; i++) {
struct snd_urb_ctx *u = &ep->urb[i];
u->index = i;
@@ -996,8 +998,6 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep)
u->urb->complete = snd_complete_urb;
}
- ep->nurbs = SYNC_URBS;
-
return 0;
out_of_memory:
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 84b66f7c627c..11a4454c6f64 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -40,8 +40,12 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
case UAC_VERSION_1:
default: {
struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
- if (format >= 64)
- return 0; /* invalid format */
+ if (format >= 64) {
+ usb_audio_info(chip,
+ "%u:%d: invalid format type 0x%llx is detected, processed as PCM\n",
+ fp->iface, fp->altsetting, format);
+ format = UAC_FORMAT_TYPE_I_PCM;
+ }
sample_width = fmt->bBitResolution;
sample_bytes = fmt->bSubframeSize;
format = 1ULL << format;
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 8ca56ba600cf..22104cb84b1c 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -303,7 +303,8 @@ static void line6_data_received(struct urb *urb)
for (;;) {
done =
line6_midibuf_read(mb, line6->buffer_message,
- LINE6_MIDI_MESSAGE_MAXLEN);
+ LINE6_MIDI_MESSAGE_MAXLEN,
+ LINE6_MIDIBUF_READ_RX);
if (done <= 0)
break;
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index ba0e2b7e8fe1..0838632c788e 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -44,7 +44,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
int req, done;
for (;;) {
- req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
+ req = min3(line6_midibuf_bytes_free(mb), line6->max_packet_size,
+ LINE6_FALLBACK_MAXPACKETSIZE);
done = snd_rawmidi_transmit_peek(substream, chunk, req);
if (done == 0)
@@ -56,7 +57,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
for (;;) {
done = line6_midibuf_read(mb, chunk,
- LINE6_FALLBACK_MAXPACKETSIZE);
+ LINE6_FALLBACK_MAXPACKETSIZE,
+ LINE6_MIDIBUF_READ_TX);
if (done == 0)
break;
diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c
index 6a70463f82c4..e7f830f7526c 100644
--- a/sound/usb/line6/midibuf.c
+++ b/sound/usb/line6/midibuf.c
@@ -9,6 +9,7 @@
#include "midibuf.h"
+
static int midibuf_message_length(unsigned char code)
{
int message_length;
@@ -20,12 +21,7 @@ static int midibuf_message_length(unsigned char code)
message_length = length[(code >> 4) - 8];
} else {
- /*
- Note that according to the MIDI specification 0xf2 is
- the "Song Position Pointer", but this is used by Line 6
- to send sysex messages to the host.
- */
- static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1,
+ static const int length[] = { -1, 2, 2, 2, -1, -1, 1, 1, 1, -1,
1, 1, 1, -1, 1, 1
};
message_length = length[code & 0x0f];
@@ -125,7 +121,7 @@ int line6_midibuf_write(struct midi_buffer *this, unsigned char *data,
}
int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
- int length)
+ int length, int read_type)
{
int bytes_used;
int length1, length2;
@@ -148,9 +144,22 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
length1 = this->size - this->pos_read;
- /* check MIDI command length */
command = this->buf[this->pos_read];
+ /*
+ PODxt always has status byte lower nibble set to 0010,
+ when it means to send 0000, so we correct if here so
+ that control/program changes come on channel 1 and
+ sysex message status byte is correct
+ */
+ if (read_type == LINE6_MIDIBUF_READ_RX) {
+ if (command == 0xb2 || command == 0xc2 || command == 0xf2) {
+ unsigned char fixed = command & 0xf0;
+ this->buf[this->pos_read] = fixed;
+ command = fixed;
+ }
+ }
+ /* check MIDI command length */
if (command & 0x80) {
midi_length = midibuf_message_length(command);
this->command_prev = command;
diff --git a/sound/usb/line6/midibuf.h b/sound/usb/line6/midibuf.h
index 124a8f9f7e96..542e8d836f87 100644
--- a/sound/usb/line6/midibuf.h
+++ b/sound/usb/line6/midibuf.h
@@ -8,6 +8,9 @@
#ifndef MIDIBUF_H
#define MIDIBUF_H
+#define LINE6_MIDIBUF_READ_TX 0
+#define LINE6_MIDIBUF_READ_RX 1
+
struct midi_buffer {
unsigned char *buf;
int size;
@@ -23,7 +26,7 @@ extern void line6_midibuf_destroy(struct midi_buffer *mb);
extern int line6_midibuf_ignore(struct midi_buffer *mb, int length);
extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split);
extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data,
- int length);
+ int length, int read_type);
extern void line6_midibuf_reset(struct midi_buffer *mb);
extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data,
int length);
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index b667bb8ad938..e765f98bb0b1 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -159,8 +159,9 @@ static struct line6_pcm_properties pod_pcm_properties = {
.bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
};
+
static const char pod_version_header[] = {
- 0xf2, 0x7e, 0x7f, 0x06, 0x02
+ 0xf0, 0x7e, 0x7f, 0x06, 0x02
};
static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index ce501200e592..f175c537353a 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1149,10 +1149,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
port = &umidi->endpoints[i].out->ports[j];
break;
}
- if (!port) {
- snd_BUG();
+ if (!port)
return -ENXIO;
- }
substream->runtime->private_data = port;
port->state = STATE_UNKNOWN;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index c29ccdf9e8bc..96d32766e93c 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2148,6 +2148,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
{
+ /* M-Audio Micro */
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x201a),
+},
+{
USB_DEVICE_VENDOR_SPEC(0x0763, 0x2030),
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
/* .vendor_name = "M-Audio", */
@@ -3760,6 +3764,64 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
},
/*
+ * MacroSilicon MS2100/MS2106 based AV capture cards
+ *
+ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
+ * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
+ * they pretend to be 96kHz mono as a workaround for stereo being broken
+ * by that...
+ *
+ * They also have an issue with initial stream alignment that causes the
+ * channels to be swapped and out of phase, which is dealt with in quirks.c.
+ */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x534d,
+ .idProduct = 0x0021,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "MacroSilicon",
+ .product_name = "MS210x",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 3,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
+/*
* MacroSilicon MS2109 based HDMI capture cards
*
* These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
@@ -3848,5 +3910,34 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
}
}
},
+{
+ /* Advanced modes of the Mythware XA001AU.
+ * For the standard mode, Mythware XA001AU has ID ffad:a001
+ */
+ USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Mythware",
+ .product_name = "XA001AU",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 72223545abfd..28489aab6821 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1440,6 +1440,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
set_format_emu_quirk(subs, fmt);
break;
+ case USB_ID(0x534d, 0x0021): /* MacroSilicon MS2100/MS2106 */
case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
subs->stream_offset_adj = 2;
break;
@@ -1675,6 +1676,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
/* XMOS based USB DACs */
switch (chip->usb_id) {
case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */
+ case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */
case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */
case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
if (fp->altsetting == 2)
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index eff1ac1dc9ba..967f05d7e205 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -301,9 +301,12 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
c = 0;
if (bits) {
- for (; bits && *maps; maps++, bits >>= 1)
+ for (; bits && *maps; maps++, bits >>= 1) {
if (bits & 1)
chmap->map[c++] = *maps;
+ if (c == chmap->channels)
+ break;
+ }
} else {
/* If we're missing wChannelConfig, then guess something
to make sure the channel map is not skipped entirely */
@@ -1103,7 +1106,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
* Dallas DS4201 workaround: It presents 5 altsettings, but the last
* one misses syncpipe, and does not produce any sound.
*/
- if (chip->usb_id == USB_ID(0x04fa, 0x4201))
+ if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4)
num = 4;
for (i = 0; i < num; i++) {
diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h
index f9fd1325f5bd..7d685a15723f 100644
--- a/tools/arch/parisc/include/uapi/asm/mman.h
+++ b/tools/arch/parisc/include/uapi/asm/mman.h
@@ -1,20 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H
#define TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H
-#define MADV_DODUMP 70
+#define MADV_DODUMP 17
#define MADV_DOFORK 11
-#define MADV_DONTDUMP 69
+#define MADV_DONTDUMP 16
#define MADV_DONTFORK 10
#define MADV_DONTNEED 4
#define MADV_FREE 8
-#define MADV_HUGEPAGE 67
-#define MADV_MERGEABLE 65
-#define MADV_NOHUGEPAGE 68
+#define MADV_HUGEPAGE 14
+#define MADV_MERGEABLE 12
+#define MADV_NOHUGEPAGE 15
#define MADV_NORMAL 0
#define MADV_RANDOM 1
#define MADV_REMOVE 9
#define MADV_SEQUENTIAL 2
-#define MADV_UNMERGEABLE 66
+#define MADV_UNMERGEABLE 13
#define MADV_WILLNEED 3
#define MAP_ANONYMOUS 0x10
#define MAP_DENYWRITE 0x0800
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 4133c721af6e..eea52cb0e6ab 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -13,8 +13,8 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 19 /* N 32-bit words worth of info */
-#define NBUGINTS 1 /* N 32-bit bug flags */
+#define NCAPINTS 20 /* N 32-bit words worth of info */
+#define NBUGINTS 2 /* N 32-bit bug flags */
/*
* Note: If the comment begins with a quoted string, that string is used
@@ -96,6 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
+/* FREE! ( 3*32+17) */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
@@ -199,7 +200,7 @@
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+/* FREE! ( 7*32+10) */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCEs for Spectre variant 2 */
@@ -209,7 +210,7 @@
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
-#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
+/* FREE! ( 7*32+20) */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
@@ -284,8 +285,10 @@
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
+#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM-Exit when EIBRS is enabled */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
@@ -327,6 +330,7 @@
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
+#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
@@ -366,6 +370,13 @@
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
+#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
+#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
+#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+
/*
* BUG word(s)
*/
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index a5ea841cc6d2..f0f935f8d917 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -84,6 +84,7 @@
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
#define DISABLED_MASK17 0
#define DISABLED_MASK18 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+#define DISABLED_MASK19 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index 6847d85400a8..fa5700097f64 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -101,6 +101,7 @@
#define REQUIRED_MASK16 0
#define REQUIRED_MASK17 0
#define REQUIRED_MASK18 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+#define REQUIRED_MASK19 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index 3eb8411ab60e..e95b72ec19bc 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -33,7 +33,7 @@
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_INIT_SIGNAL 3
-#define EXIT_REASON_PENDING_INTERRUPT 7
+#define EXIT_REASON_INTERRUPT_WINDOW 7
#define EXIT_REASON_NMI_WINDOW 8
#define EXIT_REASON_TASK_SWITCH 9
#define EXIT_REASON_CPUID 10
@@ -94,7 +94,7 @@
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
{ EXIT_REASON_INIT_SIGNAL, "INIT_SIGNAL" }, \
- { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
+ { EXIT_REASON_INTERRUPT_WINDOW, "INTERRUPT_WINDOW" }, \
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
{ EXIT_REASON_CPUID, "CPUID" }, \
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 397e5716ab6d..ebb73e447310 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -251,7 +251,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
*(char *)data);
break;
case BTF_INT_BOOL:
- jsonw_bool(jw, *(int *)data);
+ jsonw_bool(jw, *(bool *)data);
break;
default:
/* shouldn't happen */
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index 86501cd3c763..f2be9ef2c2d5 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -80,9 +80,6 @@ static void jsonw_puts(json_writer_t *self, const char *str)
case '"':
fputs("\\\"", self->out);
break;
- case '\'':
- fputs("\\\'", self->out);
- break;
default:
putc(*str, self->out);
}
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 4b03983acbef..35984bd354cb 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -364,6 +364,16 @@ int main(int argc, char **argv)
setlinebuf(stdout);
+#ifdef USE_LIBCAP
+ /* Libcap < 2.63 hooks before main() to compute the number of
+ * capabilities of the running kernel, and doing so it calls prctl()
+ * which may fail and set errno to non-zero.
+ * Let's reset errno to make sure this does not interfere with the
+ * batch mode.
+ */
+ errno = 0;
+#endif
+
last_do_help = do_help;
pretty_output = false;
json_output = false;
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 5b91ee65a080..762ca450d198 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -363,8 +363,15 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
struct bpf_insn *insn_start = buf_start;
struct bpf_insn *insn_end = buf_end;
struct bpf_insn *cur = insn_start;
+ bool double_insn = false;
for (; cur <= insn_end; cur++) {
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+ double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
+
printf("% 4d: ", (int)(cur - insn_start + start_idx));
print_bpf_insn(&cbs, cur, true);
if (cur != insn_end)
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 1ea26bb8c579..6714c886940f 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -42,6 +42,7 @@ FEATURE_TESTS_BASIC := \
gtk2-infobar \
libaudit \
libbfd \
+ libbfd-buildid \
libcap \
libelf \
libelf-getphdrnum \
@@ -110,6 +111,7 @@ FEATURE_DISPLAY ?= \
gtk2 \
libaudit \
libbfd \
+ libbfd-buildid \
libcap \
libelf \
libnuma \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 88392219d425..8104e505efde 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -15,6 +15,7 @@ FILES= \
test-hello.bin \
test-libaudit.bin \
test-libbfd.bin \
+ test-libbfd-buildid.bin \
test-disassembler-four-args.bin \
test-reallocarray.bin \
test-libbfd-liberty.bin \
@@ -223,6 +224,9 @@ $(OUTPUT)test-libpython.bin:
$(OUTPUT)test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+$(OUTPUT)test-libbfd-buildid.bin:
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+
$(OUTPUT)test-disassembler-four-args.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 6eaeaf2da36e..039bd2fbe7d9 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -90,6 +90,10 @@
# include "test-libbfd.c"
#undef main
+#define main main_test_libbfd_buildid
+# include "test-libbfd-buildid.c"
+#undef main
+
#define main main_test_backtrace
# include "test-backtrace.c"
#undef main
@@ -208,6 +212,7 @@ int main(int argc, char *argv[])
main_test_gtk2(argc, argv);
main_test_gtk2_infobar(argc, argv);
main_test_libbfd();
+ main_test_libbfd_buildid();
main_test_backtrace();
main_test_libnuma();
main_test_numa_num_possible_cpus();
diff --git a/tools/build/feature/test-libbfd-buildid.c b/tools/build/feature/test-libbfd-buildid.c
new file mode 100644
index 000000000000..157644b04c05
--- /dev/null
+++ b/tools/build/feature/test-libbfd-buildid.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bfd.h>
+
+int main(void)
+{
+ bfd *abfd = bfd_openr("Pedro", 0);
+ return abfd && (!abfd->build_id || abfd->build_id->size > 0x506564726f);
+}
diff --git a/tools/build/feature/test-libcrypto.c b/tools/build/feature/test-libcrypto.c
index a98174e0569c..bc34a5bbb504 100644
--- a/tools/build/feature/test-libcrypto.c
+++ b/tools/build/feature/test-libcrypto.c
@@ -1,16 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
+#include <openssl/evp.h>
#include <openssl/sha.h>
#include <openssl/md5.h>
int main(void)
{
- MD5_CTX context;
+ EVP_MD_CTX *mdctx;
unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
unsigned char dat[] = "12345";
+ unsigned int digest_len;
- MD5_Init(&context);
- MD5_Update(&context, &dat[0], sizeof(dat));
- MD5_Final(&md[0], &context);
+ mdctx = EVP_MD_CTX_new();
+ if (!mdctx)
+ return 0;
+
+ EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
+ EVP_DigestUpdate(mdctx, &dat[0], sizeof(dat));
+ EVP_DigestFinal_ex(mdctx, &md[0], &digest_len);
+ EVP_MD_CTX_free(mdctx);
SHA1(&dat[0], sizeof(dat), &md[0]);
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 34d63bcebcd2..2fd10eab75b5 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -49,12 +49,15 @@ enum autochan {
* Has the side effect of filling the channels[i].location values used
* in processing the buffer output.
**/
-int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
{
- int bytes = 0;
- int i = 0;
+ unsigned int bytes = 0;
+ int i = 0, max = 0;
+ unsigned int misalignment;
while (i < num_channels) {
+ if (channels[i].bytes > max)
+ max = channels[i].bytes;
if (bytes % channels[i].bytes == 0)
channels[i].location = bytes;
else
@@ -64,11 +67,19 @@ int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
bytes = channels[i].location + channels[i].bytes;
i++;
}
+ /*
+ * We want the data in next sample to also be properly aligned so
+ * we'll add padding at the end if needed. Adding padding only
+ * works for channel data which size is 2^n bytes.
+ */
+ misalignment = bytes % max;
+ if (misalignment)
+ bytes += max - misalignment;
return bytes;
}
-void print1byte(uint8_t input, struct iio_channel_info *info)
+static void print1byte(uint8_t input, struct iio_channel_info *info)
{
/*
* Shift before conversion to avoid sign extension
@@ -85,7 +96,7 @@ void print1byte(uint8_t input, struct iio_channel_info *info)
}
}
-void print2byte(uint16_t input, struct iio_channel_info *info)
+static void print2byte(uint16_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -108,7 +119,7 @@ void print2byte(uint16_t input, struct iio_channel_info *info)
}
}
-void print4byte(uint32_t input, struct iio_channel_info *info)
+static void print4byte(uint32_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -131,7 +142,7 @@ void print4byte(uint32_t input, struct iio_channel_info *info)
}
}
-void print8byte(uint64_t input, struct iio_channel_info *info)
+static void print8byte(uint64_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -167,9 +178,8 @@ void print8byte(uint64_t input, struct iio_channel_info *info)
* to fill the location offsets.
* @num_channels: number of channels
**/
-void process_scan(char *data,
- struct iio_channel_info *channels,
- int num_channels)
+static void process_scan(char *data, struct iio_channel_info *channels,
+ int num_channels)
{
int k;
@@ -238,7 +248,7 @@ static int enable_disable_all_channels(char *dev_dir_name, int enable)
return 0;
}
-void print_usage(void)
+static void print_usage(void)
{
fprintf(stderr, "Usage: generic_buffer [options]...\n"
"Capture, convert and output data from IIO device buffer\n"
@@ -257,12 +267,12 @@ void print_usage(void)
" -w <n> Set delay between reads in us (event-less mode)\n");
}
-enum autochan autochannels = AUTOCHANNELS_DISABLED;
-char *dev_dir_name = NULL;
-char *buf_dir_name = NULL;
-bool current_trigger_set = false;
+static enum autochan autochannels = AUTOCHANNELS_DISABLED;
+static char *dev_dir_name = NULL;
+static char *buf_dir_name = NULL;
+static bool current_trigger_set = false;
-void cleanup(void)
+static void cleanup(void)
{
int ret;
@@ -294,14 +304,14 @@ void cleanup(void)
}
}
-void sig_handler(int signum)
+static void sig_handler(int signum)
{
fprintf(stderr, "Caught signal %d\n", signum);
cleanup();
exit(-signum);
}
-void register_cleanup(void)
+static void register_cleanup(void)
{
struct sigaction sa = { .sa_handler = sig_handler };
const int signums[] = { SIGINT, SIGTERM, SIGABRT };
@@ -343,7 +353,7 @@ int main(int argc, char **argv)
ssize_t read_size;
int dev_num = -1, trig_num = -1;
char *buffer_access = NULL;
- int scan_size;
+ unsigned int scan_size;
int noevents = 0;
int notrigger = 0;
char *dummy;
@@ -613,7 +623,16 @@ int main(int argc, char **argv)
}
scan_size = size_from_channelarray(channels, num_channels);
- data = malloc(scan_size * buf_len);
+
+ size_t total_buf_len = scan_size * buf_len;
+
+ if (scan_size > 0 && total_buf_len / scan_size != buf_len) {
+ ret = -EFAULT;
+ perror("Integer overflow happened when calculate scan_size * buf_len");
+ goto error;
+ }
+
+ data = malloc(total_buf_len);
if (!data) {
ret = -ENOMEM;
goto error;
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index 7399eb7f1378..48360994c2a1 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -262,6 +262,7 @@ int iioutils_get_param_float(float *output, const char *param_name,
if (fscanf(sysfsfp, "%f", output) != 1)
ret = errno ? -errno : -ENODATA;
+ fclose(sysfsfp);
break;
}
error_free_filename:
@@ -342,9 +343,9 @@ int build_channel_array(const char *device_dir,
}
sysfsfp = fopen(filename, "r");
+ free(filename);
if (!sysfsfp) {
ret = -errno;
- free(filename);
goto error_close_dir;
}
@@ -354,7 +355,6 @@ int build_channel_array(const char *device_dir,
if (fclose(sysfsfp))
perror("build_channel_array(): Failed to close file");
- free(filename);
goto error_close_dir;
}
if (ret == 1)
@@ -362,11 +362,9 @@ int build_channel_array(const char *device_dir,
if (fclose(sysfsfp)) {
ret = -errno;
- free(filename);
goto error_close_dir;
}
- free(filename);
}
*ci_array = malloc(sizeof(**ci_array) * (*counter));
@@ -392,9 +390,9 @@ int build_channel_array(const char *device_dir,
}
sysfsfp = fopen(filename, "r");
+ free(filename);
if (!sysfsfp) {
ret = -errno;
- free(filename);
count--;
goto error_cleanup_array;
}
@@ -402,20 +400,17 @@ int build_channel_array(const char *device_dir,
errno = 0;
if (fscanf(sysfsfp, "%i", &current_enabled) != 1) {
ret = errno ? -errno : -ENODATA;
- free(filename);
count--;
goto error_cleanup_array;
}
if (fclose(sysfsfp)) {
ret = -errno;
- free(filename);
count--;
goto error_cleanup_array;
}
if (!current_enabled) {
- free(filename);
count--;
continue;
}
@@ -426,7 +421,6 @@ int build_channel_array(const char *device_dir,
strlen(ent->d_name) -
strlen("_en"));
if (!current->name) {
- free(filename);
ret = -ENOMEM;
count--;
goto error_cleanup_array;
@@ -436,7 +430,6 @@ int build_channel_array(const char *device_dir,
ret = iioutils_break_up_name(current->name,
&current->generic_name);
if (ret) {
- free(filename);
free(current->name);
count--;
goto error_cleanup_array;
@@ -447,17 +440,16 @@ int build_channel_array(const char *device_dir,
scan_el_dir,
current->name);
if (ret < 0) {
- free(filename);
ret = -ENOMEM;
goto error_cleanup_array;
}
sysfsfp = fopen(filename, "r");
+ free(filename);
if (!sysfsfp) {
ret = -errno;
- fprintf(stderr, "failed to open %s\n",
- filename);
- free(filename);
+ fprintf(stderr, "failed to open %s/%s_index\n",
+ scan_el_dir, current->name);
goto error_cleanup_array;
}
@@ -467,17 +459,14 @@ int build_channel_array(const char *device_dir,
if (fclose(sysfsfp))
perror("build_channel_array(): Failed to close file");
- free(filename);
goto error_cleanup_array;
}
if (fclose(sysfsfp)) {
ret = -errno;
- free(filename);
goto error_cleanup_array;
}
- free(filename);
/* Find the scale */
ret = iioutils_get_param_float(&current->scale,
"scale",
@@ -543,6 +532,10 @@ static int calc_digits(int num)
{
int count = 0;
+ /* It takes a digit to represent zero */
+ if (!num)
+ return 1;
+
while (num != 0) {
num /= 10;
count++;
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index b8cecb66d28b..c20d2fe7ceba 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -2318,9 +2318,9 @@ static __attribute__((unused))
int memcmp(const void *s1, const void *s2, size_t n)
{
size_t ofs = 0;
- char c1 = 0;
+ int c1 = 0;
- while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) {
+ while (ofs < n && !(c1 = ((unsigned char *)s1)[ofs] - ((unsigned char *)s2)[ofs])) {
ofs++;
}
return c1;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 0bfad86ec960..e5250a9b813d 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -780,7 +780,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Return
@@ -3068,7 +3070,8 @@ struct bpf_sock {
__u32 src_ip4;
__u32 src_ip6[4];
__u32 src_port; /* host byte order */
- __u32 dst_port; /* network byte order */
+ __be16 dst_port; /* network byte order */
+ __u16 :16; /* zero padding */
__u32 dst_ip4;
__u32 dst_ip6[4];
__u32 state;
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index fabe5aeaa351..6f1bbf2ada4c 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -273,6 +273,7 @@ enum {
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
@@ -280,6 +281,7 @@ enum {
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
@@ -289,8 +291,9 @@ enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
+ PERF_FORMAT_LOST = 1U << 4,
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+ PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2a1dbf52fc9a..b8849812449c 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1884,7 +1884,7 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
struct bpf_map_info info = {};
- __u32 len = sizeof(info);
+ __u32 len = sizeof(info), name_len;
int new_fd, err;
char *new_name;
@@ -1892,7 +1892,12 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
if (err)
return err;
- new_name = strdup(info.name);
+ name_len = strlen(info.name);
+ if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
+ new_name = strdup(map->name);
+ else
+ new_name = strdup(info.name);
+
if (!new_name)
return -errno;
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 1e69c0c8d413..e610becd03e8 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -177,7 +177,7 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh)
hlen += nlmsg_len(&err->msg);
attr = (struct nlattr *) ((void *) err + hlen);
- alen = nlh->nlmsg_len - hlen;
+ alen = (void *)nlh + nlh->nlmsg_len - (void *)attr;
if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
extack_policy) != 0) {
diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
index 2859f107abc8..4260c8b4257b 100644
--- a/tools/lib/subcmd/help.c
+++ b/tools/lib/subcmd/help.c
@@ -50,11 +50,21 @@ void uniq(struct cmdnames *cmds)
if (!cmds->cnt)
return;
- for (i = j = 1; i < cmds->cnt; i++)
- if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
- cmds->names[j++] = cmds->names[i];
-
+ for (i = 1; i < cmds->cnt; i++) {
+ if (!strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
+ zfree(&cmds->names[i - 1]);
+ }
+ for (i = 0, j = 0; i < cmds->cnt; i++) {
+ if (cmds->names[i]) {
+ if (i == j)
+ j++;
+ else
+ cmds->names[j++] = cmds->names[i];
+ }
+ }
cmds->cnt = j;
+ while (j < i)
+ cmds->names[j++] = NULL;
}
void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index bae6b261481d..dfd67243faac 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -136,6 +136,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"panic",
"do_exit",
"do_task_dead",
+ "make_task_dead",
"__module_put_and_exit",
"complete_and_exit",
"__reiserfs_panic",
@@ -143,7 +144,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"fortify_panic",
"usercopy_abort",
"machine_real_restart",
- "rewind_stack_do_exit",
+ "rewind_stack_and_make_dead",
"cpu_bringup_and_idle",
};
@@ -162,7 +163,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
return false;
insn = find_insn(file, func->sec, func->offset);
- if (!insn->func)
+ if (!insn || !insn->func)
return false;
func_for_each_insn_all(file, func, insn) {
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 9932dcb502e9..5cf88cfb6bdc 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -755,27 +755,36 @@ else
endif
endif
-ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd -lopcodes
-else
- # we are on a system that requires -liberty and (maybe) -lz
- # to link against -lbfd; test each case individually here
-
- # call all detections now so we get correct
- # status in VF output
- $(call feature_check,libbfd-liberty)
- $(call feature_check,libbfd-liberty-z)
- ifeq ($(feature-libbfd-liberty), 1)
- EXTLIBS += -lbfd -lopcodes -liberty
- FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
+ifndef NO_LIBBFD
+ ifeq ($(feature-libbfd), 1)
+ EXTLIBS += -lbfd -lopcodes
else
- ifeq ($(feature-libbfd-liberty-z), 1)
- EXTLIBS += -lbfd -lopcodes -liberty -lz
- FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
+ # we are on a system that requires -liberty and (maybe) -lz
+ # to link against -lbfd; test each case individually here
+
+ # call all detections now so we get correct
+ # status in VF output
+ $(call feature_check,libbfd-liberty)
+ $(call feature_check,libbfd-liberty-z)
+
+ ifeq ($(feature-libbfd-liberty), 1)
+ EXTLIBS += -lbfd -lopcodes -liberty
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
+ else
+ ifeq ($(feature-libbfd-liberty-z), 1)
+ EXTLIBS += -lbfd -lopcodes -liberty -lz
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
+ endif
endif
+ $(call feature_check,disassembler-four-args)
+ endif
+
+ ifeq ($(feature-libbfd-buildid), 1)
+ CFLAGS += -DHAVE_LIBBFD_BUILDID_SUPPORT
+ else
+ msg := $(warning Old version of libbfd/binutils things like PE executable profiling will not be available);
endif
- $(call feature_check,disassembler-four-args)
endif
ifdef NO_DEMANGLE
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index 4aa6de1aa67d..d9329ae84e17 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -10,25 +10,13 @@ extern struct timeval bench__start, bench__end, bench__runtime;
* The madvise transparent hugepage constants were added in glibc
* 2.13. For compatibility with older versions of glibc, define these
* tokens if they are not already defined.
- *
- * PA-RISC uses different madvise values from other architectures and
- * needs to be special-cased.
*/
-#ifdef __hppa__
-# ifndef MADV_HUGEPAGE
-# define MADV_HUGEPAGE 67
-# endif
-# ifndef MADV_NOHUGEPAGE
-# define MADV_NOHUGEPAGE 68
-# endif
-#else
# ifndef MADV_HUGEPAGE
# define MADV_HUGEPAGE 14
# endif
# ifndef MADV_NOHUGEPAGE
# define MADV_NOHUGEPAGE 15
# endif
-#endif
int bench_numa(int argc, const char **argv);
int bench_sched_messaging(int argc, const char **argv);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 454e275cd5df..9c03f67398cb 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -80,6 +80,7 @@ struct record {
struct auxtrace_record *itr;
struct evlist *evlist;
struct perf_session *session;
+ struct evlist *sb_evlist;
int realtime_prio;
bool no_buildid;
bool no_buildid_set;
@@ -1109,8 +1110,8 @@ static int
record__switch_output(struct record *rec, bool at_exit)
{
struct perf_data *data = &rec->data;
+ char *new_filename = NULL;
int fd, err;
- char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
@@ -1343,7 +1344,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
- struct evlist *sb_evlist = NULL;
int fd;
float ratio = 0;
@@ -1446,18 +1446,29 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
+ err = -1;
if (!rec->no_buildid
&& !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
pr_err("Couldn't generate buildids. "
"Use --no-buildid to profile anyway.\n");
- err = -1;
goto out_child;
}
- if (!opts->no_bpf_event)
- bpf_event__add_sb_event(&sb_evlist, &session->header.env);
+ if (!opts->no_bpf_event) {
+ rec->sb_evlist = evlist__new();
+
+ if (rec->sb_evlist == NULL) {
+ pr_err("Couldn't create side band evlist.\n.");
+ goto out_child;
+ }
+
+ if (evlist__add_bpf_sb_event(rec->sb_evlist, &session->header.env)) {
+ pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
+ goto out_child;
+ }
+ }
- if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
+ if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
@@ -1731,7 +1742,7 @@ out_delete_session:
perf_session__delete(session);
if (!opts->no_bpf_event)
- perf_evlist__stop_sb_thread(sb_evlist);
+ perf_evlist__stop_sb_thread(rec->sb_evlist);
return status;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5cacc4f84c8d..0826893098ce 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -666,7 +666,7 @@ static void create_tasks(struct perf_sched *sched)
err = pthread_attr_init(&attr);
BUG_ON(err);
err = pthread_attr_setstacksize(&attr,
- (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
+ (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
BUG_ON(err);
err = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(err);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index a30d62186f5e..b8fab267e855 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1542,7 +1542,6 @@ int cmd_top(int argc, const char **argv)
OPTS_EVSWITCH(&top.evswitch),
OPT_END()
};
- struct evlist *sb_evlist = NULL;
const char * const top_usage[] = {
"perf top [<options>]",
NULL
@@ -1679,13 +1678,27 @@ int cmd_top(int argc, const char **argv)
top.session = perf_session__new(NULL, false, NULL);
if (IS_ERR(top.session)) {
status = PTR_ERR(top.session);
+ top.session = NULL;
goto out_delete_evlist;
}
- if (!top.record_opts.no_bpf_event)
- bpf_event__add_sb_event(&sb_evlist, &perf_env);
+#ifdef HAVE_LIBBPF_SUPPORT
+ if (!top.record_opts.no_bpf_event) {
+ top.sb_evlist = evlist__new();
- if (perf_evlist__start_sb_thread(sb_evlist, target)) {
+ if (top.sb_evlist == NULL) {
+ pr_err("Couldn't create side band evlist.\n.");
+ goto out_delete_evlist;
+ }
+
+ if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
+ pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
+ goto out_delete_evlist;
+ }
+ }
+#endif
+
+ if (perf_evlist__start_sb_thread(top.sb_evlist, target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
@@ -1693,7 +1706,7 @@ int cmd_top(int argc, const char **argv)
status = __cmd_top(&top);
if (!opts->no_bpf_event)
- perf_evlist__stop_sb_thread(sb_evlist);
+ perf_evlist__stop_sb_thread(top.sb_evlist);
out_delete_evlist:
evlist__delete(top.evlist);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index a5201de1a191..6052eb057821 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -86,6 +86,34 @@
# define F_LINUX_SPECIFIC_BASE 1024
#endif
+#define RAW_SYSCALL_ARGS_NUM 6
+
+/*
+ * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
+ */
+struct syscall_arg_fmt {
+ size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
+ bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
+ unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
+ void *parm;
+ const char *name;
+ bool show_zero;
+};
+
+struct syscall_fmt {
+ const char *name;
+ const char *alias;
+ struct {
+ const char *sys_enter,
+ *sys_exit;
+ } bpf_prog_name;
+ struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
+ u8 nr_args;
+ bool errpid;
+ bool timeout;
+ bool hexret;
+};
+
struct trace {
struct perf_tool tool;
struct syscalltbl *sctbl;
@@ -694,27 +722,7 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
#include "trace/beauty/socket_type.c"
#include "trace/beauty/waitid_options.c"
-struct syscall_arg_fmt {
- size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
- unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
- void *parm;
- const char *name;
- bool show_zero;
-};
-
-static struct syscall_fmt {
- const char *name;
- const char *alias;
- struct {
- const char *sys_enter,
- *sys_exit;
- } bpf_prog_name;
- struct syscall_arg_fmt arg[6];
- u8 nr_args;
- bool errpid;
- bool timeout;
- bool hexret;
-} syscall_fmts[] = {
+static struct syscall_fmt syscall_fmts[] = {
{ .name = "access",
.arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
{ .name = "arch_prctl",
@@ -1012,7 +1020,7 @@ struct syscall {
*/
struct bpf_map_syscall_entry {
bool enabled;
- u16 string_args_len[6];
+ u16 string_args_len[RAW_SYSCALL_ARGS_NUM];
};
/*
@@ -1437,7 +1445,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
{
int idx;
- if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
+ if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
nr_args = sc->fmt->nr_args;
sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
@@ -1453,15 +1461,37 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
return 0;
}
-static int syscall__set_arg_fmts(struct syscall *sc)
+static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
+};
+
+static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
+{
+ const struct syscall_arg_fmt *fmt = fmtp;
+ return strcmp(name, fmt->name);
+}
+
+static struct syscall_arg_fmt *
+__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
{
- struct tep_format_field *field, *last_field = NULL;
- int idx = 0, len;
+ return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
+}
+
+static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
+{
+ const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
+ return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
+}
- for (field = sc->args; field; field = field->next, ++idx) {
+static struct tep_format_field *
+syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
+{
+ struct tep_format_field *last_field = NULL;
+ int len;
+
+ for (; field; field = field->next, ++arg) {
last_field = field;
- if (sc->fmt && sc->fmt->arg[idx].scnprintf)
+ if (arg->scnprintf)
continue;
len = strlen(field->name);
@@ -1469,13 +1499,13 @@ static int syscall__set_arg_fmts(struct syscall *sc)
if (strcmp(field->type, "const char *") == 0 &&
((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
strstr(field->name, "path") != NULL))
- sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
+ arg->scnprintf = SCA_FILENAME;
else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
- sc->arg_fmt[idx].scnprintf = SCA_PTR;
+ arg->scnprintf = SCA_PTR;
else if (strcmp(field->type, "pid_t") == 0)
- sc->arg_fmt[idx].scnprintf = SCA_PID;
+ arg->scnprintf = SCA_PID;
else if (strcmp(field->type, "umode_t") == 0)
- sc->arg_fmt[idx].scnprintf = SCA_MODE_T;
+ arg->scnprintf = SCA_MODE_T;
else if ((strcmp(field->type, "int") == 0 ||
strcmp(field->type, "unsigned int") == 0 ||
strcmp(field->type, "long") == 0) &&
@@ -1487,10 +1517,24 @@ static int syscall__set_arg_fmts(struct syscall *sc)
* 23 unsigned int
* 7 unsigned long
*/
- sc->arg_fmt[idx].scnprintf = SCA_FD;
+ arg->scnprintf = SCA_FD;
+ } else {
+ struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
+
+ if (fmt) {
+ arg->scnprintf = fmt->scnprintf;
+ arg->strtoul = fmt->strtoul;
+ }
}
}
+ return last_field;
+}
+
+static int syscall__set_arg_fmts(struct syscall *sc)
+{
+ struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
+
if (last_field)
sc->args_size = last_field->offset + last_field->size;
@@ -1511,11 +1555,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc = trace->syscalls.table + id;
if (sc->nonexistent)
- return 0;
+ return -EEXIST;
if (name == NULL) {
sc->nonexistent = true;
- return 0;
+ return -EEXIST;
}
sc->name = name;
@@ -1529,11 +1573,18 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc->tp_format = trace_event__tp_format("syscalls", tp_name);
}
- if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
- return -ENOMEM;
-
- if (IS_ERR(sc->tp_format))
+ /*
+ * Fails to read trace point format via sysfs node, so the trace point
+ * doesn't exist. Set the 'nonexistent' flag as true.
+ */
+ if (IS_ERR(sc->tp_format)) {
+ sc->nonexistent = true;
return PTR_ERR(sc->tp_format);
+ }
+
+ if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
+ RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
+ return -ENOMEM;
sc->args = sc->tp_format->format.fields;
/*
@@ -1736,6 +1787,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
if (arg.mask & bit)
continue;
+ arg.fmt = &sc->arg_fmt[arg.idx];
val = syscall_arg__val(&arg, arg.idx);
/*
* Some syscall args need some mask, most don't and
@@ -1825,11 +1877,8 @@ static struct syscall *trace__syscall_info(struct trace *trace,
(err = trace__read_syscall_info(trace, id)) != 0)
goto out_cant_read;
- if (trace->syscalls.table[id].name == NULL) {
- if (trace->syscalls.table[id].nonexistent)
- return NULL;
+ if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
goto out_cant_read;
- }
return &trace->syscalls.table[id];
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
index fdf75d45efff..978249d7868c 100644
--- a/tools/perf/perf-completion.sh
+++ b/tools/perf/perf-completion.sh
@@ -165,7 +165,12 @@ __perf_main ()
local cur1=${COMP_WORDS[COMP_CWORD]}
local raw_evts=$($cmd list --raw-dump)
- local arr s tmp result
+ local arr s tmp result cpu_evts
+
+ # aarch64 doesn't have /sys/bus/event_source/devices/cpu/events
+ if [[ `uname -m` != aarch64 ]]; then
+ cpu_evts=$(ls /sys/bus/event_source/devices/cpu/events)
+ fi
if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then
OLD_IFS="$IFS"
@@ -183,9 +188,9 @@ __perf_main ()
fi
done
- evts=${result}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ evts=${result}" "${cpu_evts}
else
- evts=${raw_evts}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ evts=${raw_evts}" "${cpu_evts}
fi
if [[ "$cur1" == , ]]; then
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 215ba30b8534..a055dee6a46a 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -6,10 +6,13 @@ pmu-events-y += pmu-events.o
JDIR = pmu-events/arch/$(SRCARCH)
JSON = $(shell [ -d $(JDIR) ] && \
find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
+JDIR_TEST = pmu-events/arch/test
+JSON_TEST = $(shell [ -d $(JDIR_TEST) ] && \
+ find $(JDIR_TEST) -name '*.json')
#
# Locate/process JSON files in pmu-events/arch/
# directory and create tables in pmu-events.c.
#
-$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
+$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JSON_TEST) $(JEVENTS)
$(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
index 62b864269623..ce75652e9051 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
@@ -1417,7 +1417,7 @@
{,
"EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
- "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
+ "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only."
},
{,
"EventCode": "0x201E8",
@@ -2017,7 +2017,7 @@
{,
"EventCode": "0xC0BC",
"EventName": "PM_LSU_FLUSH_OTHER",
- "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the “bad dval” back and flush all younger ops)"
+ "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the 'bad dval' back and flush all younger ops)"
},
{,
"EventCode": "0x5094",
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
index b4772f54a271..e2f2ed0a3549 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
@@ -442,7 +442,7 @@
{,
"EventCode": "0x4D052",
"EventName": "PM_2FLOP_CMPL",
- "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg "
+ "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg"
},
{,
"EventCode": "0x1F142",
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
new file mode 100644
index 000000000000..319f36ebb9a4
--- /dev/null
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# test perf probe of function from different CU
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# skip if there's no gcc
+if ! [ -x "$(command -v gcc)" ]; then
+ echo "failed: no gcc compiler"
+ exit 2
+fi
+
+temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX)
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then
+ echo "--- Cleaning up ---"
+ perf probe -x ${temp_dir}/testfile -d foo || true
+ rm -f "${temp_dir}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+cat > ${temp_dir}/testfile-foo.h << EOF
+struct t
+{
+ int *p;
+ int c;
+};
+
+extern int foo (int i, struct t *t);
+EOF
+
+cat > ${temp_dir}/testfile-foo.c << EOF
+#include "testfile-foo.h"
+
+int
+foo (int i, struct t *t)
+{
+ int j, res = 0;
+ for (j = 0; j < i && j < t->c; j++)
+ res += t->p[j];
+
+ return res;
+}
+EOF
+
+cat > ${temp_dir}/testfile-main.c << EOF
+#include "testfile-foo.h"
+
+static struct t g;
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int j[argc];
+ g.c = argc;
+ g.p = j;
+ for (i = 0; i < argc; i++)
+ j[i] = (int) argv[i][0];
+ return foo (3, &g);
+}
+EOF
+
+gcc -g -Og -flto -c ${temp_dir}/testfile-foo.c -o ${temp_dir}/testfile-foo.o
+gcc -g -Og -c ${temp_dir}/testfile-main.c -o ${temp_dir}/testfile-main.o
+gcc -g -Og -o ${temp_dir}/testfile ${temp_dir}/testfile-foo.o ${temp_dir}/testfile-main.o
+
+perf probe -x ${temp_dir}/testfile --funcs foo
+perf probe -x ${temp_dir}/testfile foo
+
+cleanup
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 7e06605f7c76..4cc4f6b3d4a1 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -78,6 +78,8 @@ struct augmented_arg {
u64 value[];
};
+struct syscall_arg_fmt;
+
/**
* @val: value of syscall argument being formatted
* @args: All the args, use syscall_args__val(arg, nth) to access one
@@ -94,6 +96,7 @@ struct augmented_arg {
struct syscall_arg {
unsigned long val;
unsigned char *args;
+ struct syscall_arg_fmt *fmt;
struct {
struct augmented_arg *args;
int size;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 514cef3a17b4..3461fa8cf440 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1718,7 +1718,7 @@ static void hists_browser__hierarchy_headers(struct hist_browser *browser)
hists_browser__scnprintf_hierarchy_headers(browser, headers,
sizeof(headers));
- ui_browser__gotorc(&browser->b, 0, 0);
+ ui_browser__gotorc_title(&browser->b, 0, 0);
ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 0c5e4ce390ef..e4f8e8903a0f 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1741,8 +1741,11 @@ static int symbol__disassemble_bpf(struct symbol *sym,
perf_exe(tpath, sizeof(tpath));
bfdf = bfd_openr(tpath, NULL);
- assert(bfdf);
- assert(bfd_check_format(bfdf, bfd_object));
+ if (bfdf == NULL)
+ abort();
+
+ if (!bfd_check_format(bfdf, bfd_object))
+ abort();
s = open_memstream(&buf, &buf_size);
if (!s) {
@@ -1790,7 +1793,8 @@ static int symbol__disassemble_bpf(struct symbol *sym,
#else
disassemble = disassembler(bfdf);
#endif
- assert(disassemble);
+ if (disassemble == NULL)
+ abort();
fflush(s);
do {
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index ae5b97427192..3d5cd16ca4de 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1710,11 +1710,19 @@ struct sym_args {
bool near;
};
+static bool kern_sym_name_match(const char *kname, const char *name)
+{
+ size_t n = strlen(name);
+
+ return !strcmp(kname, name) ||
+ (!strncmp(kname, name, n) && kname[n] == '\t');
+}
+
static bool kern_sym_match(struct sym_args *args, const char *name, char type)
{
/* A function with the same name, and global or the n'th found or any */
return kallsyms__is_function(type) &&
- !strcmp(name, args->name) &&
+ kern_sym_name_match(name, args->name) &&
((args->global && isupper(type)) ||
(args->selected && ++(args->cnt) == args->idx) ||
(!args->global && !args->selected));
@@ -1817,6 +1825,7 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
char type, u64 start)
{
struct sym_args *args = arg;
+ u64 size;
if (!kallsyms__is_function(type))
return 0;
@@ -1826,7 +1835,9 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
args->start = start;
}
/* Don't know exactly where the kernel ends, so we add a page */
- args->size = round_up(start, page_size) + page_size - args->start;
+ size = round_up(start, page_size) + page_size - args->start;
+ if (size > args->size)
+ args->size = size;
return 0;
}
@@ -1987,7 +1998,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
*size = sym->start - *start;
if (idx > 0) {
if (*size)
- return 1;
+ return 0;
} else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
print_duplicate_syms(dso, sym_name);
return -EINVAL;
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 782c0c8a9a83..ae30e20af246 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -422,8 +422,7 @@ static int bpf_event__sb_cb(union perf_event *event, void *data)
return 0;
}
-int bpf_event__add_sb_event(struct evlist **evlist,
- struct perf_env *env)
+int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
@@ -443,9 +442,9 @@ int bpf_event__add_sb_event(struct evlist **evlist,
return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
}
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
- struct perf_env *env,
- FILE *fp)
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp)
{
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
@@ -461,7 +460,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
if (info->btf_id) {
struct btf_node *node;
- node = perf_env__find_btf(env, info->btf_id);
+ node = __perf_env__find_btf(env, info->btf_id);
if (node)
btf = btf__new((__u8 *)(node->data),
node->data_size);
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index 81fdc88e6c1a..50f7412464df 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -33,11 +33,10 @@ struct btf_node {
#ifdef HAVE_LIBBPF_SUPPORT
int machine__process_bpf(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
-int bpf_event__add_sb_event(struct evlist **evlist,
- struct perf_env *env);
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
- struct perf_env *env,
- FILE *fp);
+int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp);
#else
static inline int machine__process_bpf(struct machine *machine __maybe_unused,
union perf_event *event __maybe_unused,
@@ -46,15 +45,15 @@ static inline int machine__process_bpf(struct machine *machine __maybe_unused,
return 0;
}
-static inline int bpf_event__add_sb_event(struct evlist **evlist __maybe_unused,
- struct perf_env *env __maybe_unused)
+static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
+ struct perf_env *env __maybe_unused)
{
return 0;
}
-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
- struct perf_env *env __maybe_unused,
- FILE *fp __maybe_unused)
+static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+ struct perf_env *env __maybe_unused,
+ FILE *fp __maybe_unused)
{
}
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 3c874f52f1a2..4da900bdb2f1 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -120,6 +120,7 @@ int perf_data__open_dir(struct perf_data *data)
file->size = st.st_size;
}
+ closedir(dir);
if (!files)
return -EINVAL;
@@ -128,6 +129,7 @@ int perf_data__open_dir(struct perf_data *data)
return 0;
out_err:
+ closedir(dir);
close_dir(files, nr);
return ret;
}
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index ab34ef2c661f..1d51aa88f4cb 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -254,26 +254,13 @@ static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
{
Dwarf_Attribute attr;
- if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+ if (dwarf_attr_integrate(tp_die, attr_name, &attr) == NULL ||
dwarf_formudata(&attr, result) != 0)
return -ENOENT;
return 0;
}
-/* Get attribute and translate it as a sdata */
-static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name,
- Dwarf_Sword *result)
-{
- Dwarf_Attribute attr;
-
- if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
- dwarf_formsdata(&attr, result) != 0)
- return -ENOENT;
-
- return 0;
-}
-
/**
* die_is_signed_type - Check whether a type DIE is signed or not
* @tp_die: a DIE of a type
@@ -397,9 +384,9 @@ int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
/* Get the call file index number in CU DIE */
static int die_get_call_fileno(Dwarf_Die *in_die)
{
- Dwarf_Sword idx;
+ Dwarf_Word idx;
- if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0)
+ if (die_get_attr_udata(in_die, DW_AT_call_file, &idx) == 0)
return (int)idx;
else
return -ENOENT;
@@ -408,9 +395,9 @@ static int die_get_call_fileno(Dwarf_Die *in_die)
/* Get the declared file index number in CU DIE */
static int die_get_decl_fileno(Dwarf_Die *pdie)
{
- Dwarf_Sword idx;
+ Dwarf_Word idx;
- if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0)
+ if (die_get_attr_udata(pdie, DW_AT_decl_file, &idx) == 0)
return (int)idx;
else
return -ENOENT;
@@ -1020,7 +1007,7 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
ret = die_get_typename(vr_die, buf);
if (ret < 0) {
pr_debug("Failed to get type, make it unknown.\n");
- ret = strbuf_add(buf, " (unknown_type)", 14);
+ ret = strbuf_add(buf, "(unknown_type)", 14);
}
return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index ef64e197bc8d..953db9dd25eb 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -16,12 +16,18 @@ struct perf_env perf_env;
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node)
{
+ down_write(&env->bpf_progs.lock);
+ __perf_env__insert_bpf_prog_info(env, info_node);
+ up_write(&env->bpf_progs.lock);
+}
+
+void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+{
__u32 prog_id = info_node->info_linear->info.id;
struct bpf_prog_info_node *node;
struct rb_node *parent = NULL;
struct rb_node **p;
- down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.infos.rb_node;
while (*p != NULL) {
@@ -33,15 +39,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
p = &(*p)->rb_right;
} else {
pr_debug("duplicated bpf prog info %u\n", prog_id);
- goto out;
+ return;
}
}
rb_link_node(&info_node->rb_node, parent, p);
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
env->bpf_progs.infos_cnt++;
-out:
- up_write(&env->bpf_progs.lock);
}
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
@@ -71,13 +75,21 @@ out:
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{
+ bool ret;
+
+ down_write(&env->bpf_progs.lock);
+ ret = __perf_env__insert_btf(env, btf_node);
+ up_write(&env->bpf_progs.lock);
+ return ret;
+}
+
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
struct rb_node *parent = NULL;
__u32 btf_id = btf_node->id;
struct btf_node *node;
struct rb_node **p;
- bool ret = true;
- down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.btfs.rb_node;
while (*p != NULL) {
@@ -89,25 +101,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
p = &(*p)->rb_right;
} else {
pr_debug("duplicated btf %u\n", btf_id);
- ret = false;
- goto out;
+ return false;
}
}
rb_link_node(&btf_node->rb_node, parent, p);
rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
env->bpf_progs.btfs_cnt++;
-out:
- up_write(&env->bpf_progs.lock);
- return ret;
+ return true;
}
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
{
+ struct btf_node *res;
+
+ down_read(&env->bpf_progs.lock);
+ res = __perf_env__find_btf(env, btf_id);
+ up_read(&env->bpf_progs.lock);
+ return res;
+}
+
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
struct btf_node *node = NULL;
struct rb_node *n;
- down_read(&env->bpf_progs.lock);
n = env->bpf_progs.btfs.rb_node;
while (n) {
@@ -117,13 +135,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
else if (btf_id > node->id)
n = n->rb_right;
else
- goto out;
+ return node;
}
- node = NULL;
-
-out:
- up_read(&env->bpf_progs.lock);
- return node;
+ return NULL;
}
/* purge data in bpf_progs.infos tree */
@@ -183,6 +197,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
@@ -342,3 +357,42 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
+
+
+int perf_env__numa_node(struct perf_env *env, int cpu)
+{
+ if (!env->nr_numa_map) {
+ struct numa_node *nn;
+ int i, nr = 0;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ nn = &env->numa_nodes[i];
+ nr = max(nr, perf_cpu_map__max(nn->map));
+ }
+
+ nr++;
+
+ /*
+ * We initialize the numa_map array to prepare
+ * it for missing cpus, which return node -1
+ */
+ env->numa_map = malloc(nr * sizeof(int));
+ if (!env->numa_map)
+ return -1;
+
+ for (i = 0; i < nr; i++)
+ env->numa_map[i] = -1;
+
+ env->nr_numa_map = nr;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ int tmp, j;
+
+ nn = &env->numa_nodes[i];
+ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
+ env->numa_map[j] = i;
+ }
+ }
+
+ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 37028215d4a5..b0778483fa04 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -87,6 +87,10 @@ struct perf_env {
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
+
+ /* For fast cpu to numa node lookup via perf_env__numa_node */
+ int *numa_map;
+ int nr_numa_map;
};
enum perf_compress_type {
@@ -113,10 +117,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
void perf_env__init(struct perf_env *env);
+void __perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 505b890ac85c..b110deb88425 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1672,39 +1672,26 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
return leader;
}
-int perf_evlist__add_sb_event(struct evlist **evlist,
+int perf_evlist__add_sb_event(struct evlist *evlist,
struct perf_event_attr *attr,
perf_evsel__sb_cb_t cb,
void *data)
{
struct evsel *evsel;
- bool new_evlist = (*evlist) == NULL;
-
- if (*evlist == NULL)
- *evlist = evlist__new();
- if (*evlist == NULL)
- return -1;
if (!attr->sample_id_all) {
pr_warning("enabling sample_id_all for all side band events\n");
attr->sample_id_all = 1;
}
- evsel = perf_evsel__new_idx(attr, (*evlist)->core.nr_entries);
+ evsel = perf_evsel__new_idx(attr, evlist->core.nr_entries);
if (!evsel)
- goto out_err;
+ return -1;
evsel->side_band.cb = cb;
evsel->side_band.data = data;
- evlist__add(*evlist, evsel);
+ evlist__add(evlist, evsel);
return 0;
-
-out_err:
- if (new_evlist) {
- evlist__delete(*evlist);
- *evlist = NULL;
- }
- return -1;
}
static void *perf_evlist__poll_thread(void *arg)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 7cfe75522ba5..6f920ca91a69 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -107,7 +107,7 @@ int __perf_evlist__add_default_attrs(struct evlist *evlist,
int perf_evlist__add_dummy(struct evlist *evlist);
-int perf_evlist__add_sb_event(struct evlist **evlist,
+int perf_evlist__add_sb_event(struct evlist *evlist,
struct perf_event_attr *attr,
perf_evsel__sb_cb_t cb,
void *data);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 9dd9e3f4ef59..cd1eddf0ab37 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1955,7 +1955,6 @@ int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->period = evsel->core.attr.sample_period;
data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
data->misc = event->header.misc;
- data->id = -1ULL;
data->data_src = PERF_MEM_DATA_SRC_NONE;
if (event->header.type != PERF_RECORD_SAMPLE) {
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index f9f18b8b1df9..04509144ff84 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -35,7 +35,11 @@
#define BUILD_ID_URANDOM /* different uuid for each run */
-#ifdef HAVE_LIBCRYPTO
+// FIXME, remove this and fix the deprecation warnings before its removed and
+// We'll break for good here...
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
+#ifdef HAVE_LIBCRYPTO_SUPPORT
#define BUILD_ID_MD5
#undef BUILD_ID_SHA /* does not seem to work well when linked with Java */
@@ -252,6 +256,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
Elf_Data *d;
Elf_Scn *scn;
Elf_Ehdr *ehdr;
+ Elf_Phdr *phdr;
Elf_Shdr *shdr;
uint64_t eh_frame_base_offset;
char *strsym = NULL;
@@ -287,6 +292,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
/*
+ * setup program header
+ */
+ phdr = elf_newphdr(e, 1);
+ phdr[0].p_type = PT_LOAD;
+ phdr[0].p_offset = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_filesz = csize;
+ phdr[0].p_memsz = csize;
+ phdr[0].p_flags = PF_X | PF_R;
+ phdr[0].p_align = 8;
+
+ /*
* setup text section
*/
scn = elf_newscn(e);
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index d4137559be05..ac638945b4cb 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -50,8 +50,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#if GEN_ELF_CLASS == ELFCLASS64
#define elf_newehdr elf64_newehdr
+#define elf_newphdr elf64_newphdr
#define elf_getshdr elf64_getshdr
#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Phdr Elf64_Phdr
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
@@ -59,8 +61,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
#else
#define elf_newehdr elf32_newehdr
+#define elf_newphdr elf32_newphdr
#define elf_getshdr elf32_getshdr
#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Phdr Elf32_Phdr
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c
index b205d929245f..e68935e9ac8c 100644
--- a/tools/perf/util/get_current_dir_name.c
+++ b/tools/perf/util/get_current_dir_name.c
@@ -3,8 +3,9 @@
//
#ifndef HAVE_GET_CURRENT_DIR_NAME
#include "get_current_dir_name.h"
+#include <limits.h>
+#include <string.h>
#include <unistd.h>
-#include <stdlib.h>
/* Android's 'bionic' library, for one, doesn't have this */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d3412f2c0d18..a68feeb3eb00 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1546,8 +1546,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
- bpf_event__print_bpf_prog_info(&node->info_linear->info,
- env, fp);
+ __bpf_event__print_bpf_prog_info(&node->info_linear->info,
+ env, fp);
}
up_read(&env->bpf_progs.lock);
@@ -2724,7 +2724,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
/* after reading from file, translate offset to address */
bpf_program__bpil_offs_to_addr(info_linear);
info_node->info_linear = info_linear;
- perf_env__insert_bpf_prog_info(env, info_node);
+ __perf_env__insert_bpf_prog_info(env, info_node);
}
up_write(&env->bpf_progs.lock);
@@ -2777,7 +2777,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
if (__do_read(ff, node->data, data_size))
goto out;
- perf_env__insert_btf(env, node);
+ __perf_env__insert_btf(env, node);
node = NULL;
}
@@ -3720,7 +3720,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
- u32 i, ids, n_ids;
+ u32 i, n_ids;
+ u64 *ids;
struct evsel *evsel;
struct evlist *evlist = *pevlist;
@@ -3736,9 +3737,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
evlist__add(evlist, evsel);
- ids = event->header.size;
- ids -= (void *)&event->attr.id - (void *)event;
- n_ids = ids / sizeof(u64);
+ n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
+ n_ids = n_ids / sizeof(u64);
/*
* We don't have the cpu and thread maps on the header, so
* for allocating the perf_sample_id table we fake 1 cpu and
@@ -3747,8 +3747,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
return -ENOMEM;
+ ids = (void *)&event->attr.attr + event->attr.attr.size;
for (i = 0; i < n_ids; i++) {
- perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
+ perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
}
return 0;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index eab7e8ef6789..52474e44fb28 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1592,6 +1592,8 @@ static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
decoder->cbr = cbr;
decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
+ decoder->cyc_ref_timestamp = decoder->timestamp;
+ decoder->cycle_cnt = 0;
intel_pt_mtc_cyc_cnt_cbr(decoder);
}
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 9b7cc5f909b0..b40832419a27 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -3038,6 +3038,7 @@ static const char * const intel_pt_info_fmts[] = {
[INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
[INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
[INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
+ [INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n",
[INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
[INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
[INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
@@ -3052,8 +3053,12 @@ static void intel_pt_print_info(__u64 *arr, int start, int finish)
if (!dump_trace)
return;
- for (i = start; i <= finish; i++)
- fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
+ for (i = start; i <= finish; i++) {
+ const char *fmt = intel_pt_info_fmts[i];
+
+ if (fmt)
+ fprintf(stdout, fmt, arr[i]);
+ }
}
static void intel_pt_print_info_str(const char *name, const char *str)
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index e7c7e3232fc5..b275a1b297c3 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -523,14 +523,37 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
pr_debug("llvm compiling command template: %s\n", template);
+ /*
+ * Below, substitute control characters for values that can cause the
+ * echo to misbehave, then substitute the values back.
+ */
err = -ENOMEM;
- if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
+ if (asprintf(&command_echo, "echo -n \a%s\a", template) < 0)
goto errout;
+#define SWAP_CHAR(a, b) do { if (*p == a) *p = b; } while (0)
+ for (char *p = command_echo; *p; p++) {
+ SWAP_CHAR('<', '\001');
+ SWAP_CHAR('>', '\002');
+ SWAP_CHAR('"', '\003');
+ SWAP_CHAR('\'', '\004');
+ SWAP_CHAR('|', '\005');
+ SWAP_CHAR('&', '\006');
+ SWAP_CHAR('\a', '"');
+ }
err = read_from_pipe(command_echo, (void **) &command_out, NULL);
if (err)
goto errout;
+ for (char *p = command_out; *p; p++) {
+ SWAP_CHAR('\001', '<');
+ SWAP_CHAR('\002', '>');
+ SWAP_CHAR('\003', '"');
+ SWAP_CHAR('\004', '\'');
+ SWAP_CHAR('\005', '|');
+ SWAP_CHAR('\006', '&');
+ }
+#undef SWAP_CHAR
pr_debug("llvm compiling command : %s\n", command_out);
err = read_from_pipe(template, &obj_buf, &obj_buf_sz);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 4027906fd3e3..baf73ca66a2b 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -830,8 +830,7 @@ static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
static int64_t
sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
- struct addr_map_symbol *from_l = &left->branch_info->from;
- struct addr_map_symbol *from_r = &right->branch_info->from;
+ struct addr_map_symbol *from_l, *from_r;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 93147cc40162..70612ec583f3 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -193,7 +193,7 @@ static void new_line_csv(struct perf_stat_config *config, void *ctx)
fputc('\n', os->fh);
if (os->prefix)
- fprintf(os->fh, "%s%s", os->prefix, config->csv_sep);
+ fprintf(os->fh, "%s", os->prefix);
aggr_printout(config, os->evsel, os->id, os->nr);
for (i = 0; i < os->nfields; i++)
fputs(config->csv_sep, os->fh);
@@ -263,7 +263,7 @@ static void print_metric_only(struct perf_stat_config *config,
if (color)
mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
- color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+ color_snprintf(str, sizeof(str), color ?: "", fmt ?: "", val);
fprintf(out, "%*s ", mlen, str);
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 0b185b1090ff..73f890664be5 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -546,7 +546,7 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
size_t sz = min(size, descsz);
memcpy(bf, ptr, sz);
memset(bf + sz, 0, size - sz);
- err = descsz;
+ err = sz;
break;
}
}
@@ -1157,18 +1157,31 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
(!used_opd && syms_ss->adjust_symbols)) {
GElf_Phdr phdr;
- if (elf_read_program_header(syms_ss->elf,
+ if (elf_read_program_header(runtime_ss->elf,
(u64)sym.st_value, &phdr)) {
- pr_warning("%s: failed to find program header for "
+ pr_debug4("%s: failed to find program header for "
"symbol: %s st_value: %#" PRIx64 "\n",
__func__, elf_name, (u64)sym.st_value);
- continue;
+ pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+ "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n",
+ __func__, (u64)sym.st_value, (u64)shdr.sh_addr,
+ (u64)shdr.sh_offset);
+ /*
+ * Fail to find program header, let's rollback
+ * to use shdr.sh_addr and shdr.sh_offset to
+ * calibrate symbol's file address, though this
+ * is not necessary for normal C ELF file, we
+ * still need to handle java JIT symbols in this
+ * case.
+ */
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ } else {
+ pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+ "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
+ __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
+ (u64)phdr.p_offset);
+ sym.st_value -= phdr.p_vaddr - phdr.p_offset;
}
- pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
- "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
- __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
- (u64)phdr.p_offset);
- sym.st_value -= phdr.p_vaddr - phdr.p_offset;
}
demangled = demangle_sym(dso, kmodule, elf_name);
@@ -1899,8 +1912,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
* unusual. One significant peculiarity is that the mapping (start -> pgoff)
* is not the same for the kernel map and the modules map. That happens because
* the data is copied adjacently whereas the original kcore has gaps. Finally,
- * kallsyms and modules files are compared with their copies to check that
- * modules have not been loaded or unloaded while the copies were taking place.
+ * kallsyms file is compared with its copy to check that modules have not been
+ * loaded or unloaded while the copies were taking place.
*
* Return: %0 on success, %-1 on failure.
*/
@@ -1963,9 +1976,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
goto out_extract_close;
}
- if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
- goto out_extract_close;
-
if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
goto out_extract_close;
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index c9bfe4696943..cee7fc3b5bb0 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -279,13 +279,13 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
threads->nr = ntasks;
}
out:
+ strlist__delete(slist);
if (threads)
refcount_set(&threads->refcnt, 1);
return threads;
out_free_threads:
zfree(&threads);
- strlist__delete(slist);
goto out;
}
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index f117d4f4821e..7bea36a61645 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -18,7 +18,7 @@ struct perf_session;
struct perf_top {
struct perf_tool tool;
- struct evlist *evlist;
+ struct evlist *evlist, *sb_evlist;
struct record_opts record_opts;
struct annotation_options annotation_opts;
struct evswitch evswitch;
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index c8622497ef23..73146c23eb2c 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -270,14 +270,14 @@ clean:
$(MAKE) -C bench O=$(OUTPUT) clean
-install-lib:
+install-lib: libcpupower
$(INSTALL) -d $(DESTDIR)${libdir}
$(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/
$(INSTALL) -d $(DESTDIR)${includedir}
$(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
$(INSTALL_DATA) lib/cpuidle.h $(DESTDIR)${includedir}/cpuidle.h
-install-tools:
+install-tools: $(OUTPUT)cpupower
$(INSTALL) -d $(DESTDIR)${bindir}
$(INSTALL_PROGRAM) $(OUTPUT)cpupower $(DESTDIR)${bindir}
$(INSTALL) -d $(DESTDIR)${bash_completion_dir}
@@ -293,14 +293,14 @@ install-man:
$(INSTALL_DATA) -D man/cpupower-info.1 $(DESTDIR)${mandir}/man1/cpupower-info.1
$(INSTALL_DATA) -D man/cpupower-monitor.1 $(DESTDIR)${mandir}/man1/cpupower-monitor.1
-install-gmo:
+install-gmo: create-gmo
$(INSTALL) -d $(DESTDIR)${localedir}
for HLANG in $(LANGUAGES); do \
echo '$(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo'; \
$(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
done;
-install-bench:
+install-bench: compile-bench
@#DESTDIR must be set from outside to survive
@sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index f68b4bc55273..d9d9923af85c 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -27,7 +27,7 @@ $(OUTPUT)cpufreq-bench: $(OBJS)
all: $(OUTPUT)cpufreq-bench
-install:
+install: $(OUTPUT)cpufreq-bench
mkdir -p $(DESTDIR)/$(sbindir)
mkdir -p $(DESTDIR)/$(bindir)
mkdir -p $(DESTDIR)/$(docdir)
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index 44806a6dae11..7a76d6300374 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -66,8 +66,8 @@ static int max_freq_mode;
*/
static unsigned long max_frequency;
-static unsigned long long tsc_at_measure_start;
-static unsigned long long tsc_at_measure_end;
+static unsigned long long *tsc_at_measure_start;
+static unsigned long long *tsc_at_measure_end;
static unsigned long long *mperf_previous_count;
static unsigned long long *aperf_previous_count;
static unsigned long long *mperf_current_count;
@@ -130,7 +130,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
if (max_freq_mode == MAX_FREQ_TSC_REF) {
- tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+ tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
*percent = 100.0 * mperf_diff / tsc_diff;
dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
mperf_cstates[id].name, mperf_diff, tsc_diff);
@@ -167,7 +167,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
if (max_freq_mode == MAX_FREQ_TSC_REF) {
/* Calculate max_freq from TSC count */
- tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+ tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
time_diff = timespec_diff_us(time_start, time_end);
max_frequency = tsc_diff / time_diff;
}
@@ -186,33 +186,27 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
static int mperf_start(void)
{
int cpu;
- unsigned long long dbg;
clock_gettime(CLOCK_REALTIME, &time_start);
- mperf_get_tsc(&tsc_at_measure_start);
- for (cpu = 0; cpu < cpu_count; cpu++)
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ mperf_get_tsc(&tsc_at_measure_start[cpu]);
mperf_init_stats(cpu);
+ }
- mperf_get_tsc(&dbg);
- dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
return 0;
}
static int mperf_stop(void)
{
- unsigned long long dbg;
int cpu;
- for (cpu = 0; cpu < cpu_count; cpu++)
+ for (cpu = 0; cpu < cpu_count; cpu++) {
mperf_measure_stats(cpu);
+ mperf_get_tsc(&tsc_at_measure_end[cpu]);
+ }
- mperf_get_tsc(&tsc_at_measure_end);
clock_gettime(CLOCK_REALTIME, &time_end);
-
- mperf_get_tsc(&dbg);
- dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
-
return 0;
}
@@ -311,7 +305,8 @@ struct cpuidle_monitor *mperf_register(void)
aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
mperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
-
+ tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
+ tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
mperf_monitor.name_len = strlen(mperf_monitor.name);
return &mperf_monitor;
}
@@ -322,6 +317,8 @@ void mperf_unregister(void)
free(aperf_previous_count);
free(mperf_current_count);
free(aperf_current_count);
+ free(tsc_at_measure_start);
+ free(tsc_at_measure_end);
free(is_valid);
}
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 1fc2b97d30bc..7a1cc0043f98 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1881,7 +1881,7 @@ retry:
if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
return -7;
- } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
+ } else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) {
if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
return -7;
}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index fe587a959463..698b99e25f98 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -174,6 +174,7 @@ my $store_failures;
my $store_successes;
my $test_name;
my $timeout;
+my $run_timeout;
my $connect_timeout;
my $config_bisect_exec;
my $booted_timeout;
@@ -333,6 +334,7 @@ my %option_map = (
"STORE_SUCCESSES" => \$store_successes,
"TEST_NAME" => \$test_name,
"TIMEOUT" => \$timeout,
+ "RUN_TIMEOUT" => \$run_timeout,
"CONNECT_TIMEOUT" => \$connect_timeout,
"CONFIG_BISECT_EXEC" => \$config_bisect_exec,
"BOOTED_TIMEOUT" => \$booted_timeout,
@@ -1422,7 +1424,8 @@ sub reboot {
# Still need to wait for the reboot to finish
wait_for_monitor($time, $reboot_success_line);
-
+ }
+ if ($powercycle || $time) {
end_monitor;
}
}
@@ -1765,6 +1768,14 @@ sub run_command {
$command =~ s/\$SSH_USER/$ssh_user/g;
$command =~ s/\$MACHINE/$machine/g;
+ if (!defined($timeout)) {
+ $timeout = $run_timeout;
+ }
+
+ if (!defined($timeout)) {
+ $timeout = -1; # tell wait_for_input to wait indefinitely
+ }
+
doprint("$command ... ");
$start_time = time;
@@ -1793,13 +1804,10 @@ sub run_command {
while (1) {
my $fp = \*CMD;
- if (defined($timeout)) {
- doprint "timeout = $timeout\n";
- }
my $line = wait_for_input($fp, $timeout);
if (!defined($line)) {
my $now = time;
- if (defined($timeout) && (($now - $start_time) >= $timeout)) {
+ if ($timeout >= 0 && (($now - $start_time) >= $timeout)) {
doprint "Hit timeout of $timeout, killing process\n";
$hit_timeout = 1;
kill 9, $pid;
@@ -1881,7 +1889,7 @@ sub run_scp_mod {
sub _get_grub_index {
- my ($command, $target, $skip) = @_;
+ my ($command, $target, $skip, $submenu) = @_;
return if (defined($grub_number) && defined($last_grub_menu) &&
$last_grub_menu eq $grub_menu && defined($last_machine) &&
@@ -1898,11 +1906,16 @@ sub _get_grub_index {
my $found = 0;
+ my $submenu_number = 0;
+
while (<IN>) {
if (/$target/) {
$grub_number++;
$found = 1;
last;
+ } elsif (defined($submenu) && /$submenu/) {
+ $submenu_number++;
+ $grub_number = -1;
} elsif (/$skip/) {
$grub_number++;
}
@@ -1911,6 +1924,9 @@ sub _get_grub_index {
dodie "Could not find '$grub_menu' through $command on $machine"
if (!$found);
+ if ($submenu_number > 0) {
+ $grub_number = "$submenu_number>$grub_number";
+ }
doprint "$grub_number\n";
$last_grub_menu = $grub_menu;
$last_machine = $machine;
@@ -1921,6 +1937,7 @@ sub get_grub_index {
my $command;
my $target;
my $skip;
+ my $submenu;
my $grub_menu_qt;
if ($reboot_type !~ /^grub/) {
@@ -1935,8 +1952,9 @@ sub get_grub_index {
$skip = '^\s*title\s';
} elsif ($reboot_type eq "grub2") {
$command = "cat $grub_file";
- $target = '^menuentry.*' . $grub_menu_qt;
- $skip = '^menuentry\s|^submenu\s';
+ $target = '^\s*menuentry.*' . $grub_menu_qt;
+ $skip = '^\s*menuentry';
+ $submenu = '^\s*submenu\s';
} elsif ($reboot_type eq "grub2bls") {
$command = $grub_bls_get;
$target = '^title=.*' . $grub_menu_qt;
@@ -1945,7 +1963,7 @@ sub get_grub_index {
return;
}
- _get_grub_index($command, $target, $skip);
+ _get_grub_index($command, $target, $skip, $submenu);
}
sub wait_for_input
@@ -1963,6 +1981,11 @@ sub wait_for_input
$time = $timeout;
}
+ if ($time < 0) {
+ # Negative number means wait indefinitely
+ undef $time;
+ }
+
$rin = '';
vec($rin, fileno($fp), 1) = 1;
vec($rin, fileno(\*STDIN), 1) = 1;
@@ -2009,7 +2032,7 @@ sub reboot_to {
if ($reboot_type eq "grub") {
run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
} elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
- run_ssh "$grub_reboot $grub_number";
+ run_ssh "$grub_reboot \"'$grub_number'\"";
} elsif ($reboot_type eq "syslinux") {
run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
} elsif (defined $reboot_script) {
@@ -3726,9 +3749,10 @@ sub test_this_config {
# .config to make sure it is missing the config that
# we had before
my %configs = %min_configs;
- delete $configs{$config};
+ $configs{$config} = "# $config is not set";
make_new_config ((values %configs), (values %keep_configs));
make_oldconfig;
+ delete $configs{$config};
undef %configs;
assign_configs \%configs, $output_config;
@@ -4217,6 +4241,9 @@ sub send_email {
}
sub cancel_test {
+ if ($monitor_cnt) {
+ end_monitor;
+ }
if ($email_when_canceled) {
my $name = get_test_name;
send_email("KTEST: Your [$name] test was cancelled",
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index c3bc933d437b..a7b24fda22c1 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -791,6 +791,11 @@
# is issued instead of a reboot.
# CONNECT_TIMEOUT = 25
+# The timeout in seconds for how long to wait for any running command
+# to timeout. If not defined, it will let it go indefinitely.
+# (default undefined)
+#RUN_TIMEOUT = 600
+
# In between tests, a reboot of the box may occur, and this
# is the time to wait for the console after it stops producing
# output. Some machines may not produce a large lag on reboot
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index a61c7bcbc72d..63f468bf8245 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -177,7 +177,7 @@ void regression1_test(void)
nr_threads = 2;
pthread_barrier_init(&worker_barrier, NULL, nr_threads);
- threads = malloc(nr_threads * sizeof(pthread_t *));
+ threads = malloc(nr_threads * sizeof(*threads));
for (i = 0; i < nr_threads; i++) {
arg = i;
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 612f6757015d..40ee6f57af78 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -78,26 +78,34 @@ override LDFLAGS =
override MAKEFLAGS =
endif
-# Append kselftest to KBUILD_OUTPUT to avoid cluttering
+# Append kselftest to KBUILD_OUTPUT and O to avoid cluttering
# KBUILD_OUTPUT with selftest objects and headers installed
# by selftests Makefile or lib.mk.
ifdef building_out_of_srctree
override LDFLAGS =
endif
-ifneq ($(O),)
- BUILD := $(O)
+top_srcdir ?= ../../..
+
+ifeq ("$(origin O)", "command line")
+ KBUILD_OUTPUT := $(O)
+endif
+
+ifneq ($(KBUILD_OUTPUT),)
+ # Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
+ # expand a shell special character '~'. We use a somewhat tedious way here.
+ abs_objtree := $(shell cd $(top_srcdir) && mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
+ $(if $(abs_objtree),, \
+ $(error failed to create output directory "$(KBUILD_OUTPUT)"))
+ # $(realpath ...) resolves symlinks
+ abs_objtree := $(realpath $(abs_objtree))
+ BUILD := $(abs_objtree)/kselftest
else
- ifneq ($(KBUILD_OUTPUT),)
- BUILD := $(KBUILD_OUTPUT)/kselftest
- else
- BUILD := $(shell pwd)
- DEFAULT_INSTALL_HDR_PATH := 1
- endif
+ BUILD := $(CURDIR)
+ DEFAULT_INSTALL_HDR_PATH := 1
endif
# Prepare for headers install
-top_srcdir ?= ../../..
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
export KSFT_KHDR_INSTALL_DONE := 1
diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c
index c39f559d3100..42c4a8b62e36 100644
--- a/tools/testing/selftests/bpf/progs/pyperf180.c
+++ b/tools/testing/selftests/bpf/progs/pyperf180.c
@@ -1,4 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 180
+
+/* llvm upstream commit at clang18
+ * https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e
+ * changed inlining behavior and caused compilation failure as some branch
+ * target distance exceeded 16bit representation which is the maximum for
+ * cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18
+ * to specify which cpu version is used for compilation. So a smaller
+ * unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which
+ * reduced some branch target distances and resolved the compilation failure.
+ *
+ * To capture the case where a developer/ci uses clang18 but the corresponding
+ * repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count
+ * will be set as well to prevent potential compilation failures.
+ */
+#ifdef __BPF_CPU_VERSION__
+#if __BPF_CPU_VERSION__ < 4
+#define UNROLL_COUNT 90
+#endif
+#elif __clang_major__ == 18
+#define UNROLL_COUNT 90
+#endif
+
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 0262f7b374f9..6cc29b58d670 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -359,15 +359,15 @@ static struct bpf_align_test tests[] = {
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
- {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
+ {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
- {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
+ {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
+ {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
},
},
{
@@ -410,15 +410,15 @@ static struct bpf_align_test tests[] = {
/* Adding 14 makes R6 be (4n+2) */
{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* Packet pointer has (4n+2) offset */
- {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
- {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+ {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
@@ -426,15 +426,15 @@ static struct bpf_align_test tests[] = {
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
- {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
- {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
+ {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+ {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
+ {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
},
},
{
@@ -469,11 +469,11 @@ static struct bpf_align_test tests[] = {
.matches = {
{4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
/* (ptr - ptr) << 2 == unknown, (4n) */
- {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
+ {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
- {7, "R5_w=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
+ {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
/* packet pointer + nonnegative (4n+2) */
@@ -528,7 +528,7 @@ static struct bpf_align_test tests[] = {
/* New unknown value in R7 is (4n) */
{11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
/* Subtracting it from R6 blows our unsigned bounds */
- {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
+ {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>= 0 */
{14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
/* At the time the word size load is performed from R5,
@@ -537,7 +537,8 @@ static struct bpf_align_test tests[] = {
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
+
},
},
{
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 3d617e806054..a821ff121e03 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -920,6 +920,34 @@ static struct btf_raw_test raw_tests[] = {
.btf_load_err = true,
.err_str = "Invalid elem",
},
+{
+ .descr = "var after datasec, ptr followed by modifier",
+ .raw_types = {
+ /* .bss section */ /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2),
+ sizeof(void*)+4),
+ BTF_VAR_SECINFO_ENC(4, 0, sizeof(void*)),
+ BTF_VAR_SECINFO_ENC(6, sizeof(void*), 4),
+ /* int */ /* [2] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+ /* int* */ /* [3] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
+ BTF_VAR_ENC(NAME_TBD, 3, 0), /* [4] */
+ /* const int */ /* [5] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b\0c\0",
+ .str_sec_size = sizeof("\0a\0b\0c\0"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = sizeof(void*)+4,
+ .key_type_id = 0,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
/* Test member exceeds the size of struct.
*
* struct A {
@@ -4635,6 +4663,7 @@ static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
#endif
assert(0);
+ return 0;
}
static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
@@ -4808,7 +4837,7 @@ static int do_test_pprint(int test_num)
ret = snprintf(pin_path, sizeof(pin_path), "%s/%s",
"/sys/fs/bpf", test->map_name);
- if (CHECK(ret == sizeof(pin_path), "pin_path %s/%s is too long",
+ if (CHECK(ret >= sizeof(pin_path), "pin_path %s/%s is too long",
"/sys/fs/bpf", test->map_name)) {
err = -1;
goto done;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 43224c5ec1e9..1bd285dc55e9 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1091,6 +1091,19 @@ static void get_unpriv_disabled()
static bool test_as_unpriv(struct bpf_test *test)
{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ /* Some architectures have strict alignment requirements. In
+ * that case, the BPF verifier detects if a program has
+ * unaligned accesses and rejects them. A user can pass
+ * BPF_F_ANY_ALIGNMENT to a program to override this
+ * check. That, however, will only work when a privileged user
+ * loads a program. An unprivileged user loading a program
+ * with this flag will be rejected prior entering the
+ * verifier.
+ */
+ if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
+ return false;
+#endif
return !test->prog_type ||
test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
index 92c02e4a1b62..313b345eddcc 100644
--- a/tools/testing/selftests/bpf/verifier/bounds.c
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -411,16 +411,14 @@
BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
/* r1 = 0xffff'fffe (NOT 0!) */
BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
- /* computes OOB pointer */
+ /* error on computing OOB pointer */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
+ .errstr = "math between map_value pointer and 4294967294 is not allowed",
.result = REJECT,
},
{
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
index 7e50cb80873a..7e36078f8f48 100644
--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -154,3 +154,39 @@
.result_unpriv = ACCEPT,
.insn_processed = 15,
},
+/* The test performs a conditional 64-bit write to a stack location
+ * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
+ * then data is read from fp[-8]. This sequence is unsafe.
+ *
+ * The test would be mistakenly marked as safe w/o dst register parent
+ * preservation in verifier.c:copy_register_state() function.
+ *
+ * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
+ * checkpoint state after conditional 64-bit assignment.
+ */
+{
+ "write tracking and register parent chain bug",
+ .insns = {
+ /* r6 = ktime_get_ns() */
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* r0 = ktime_get_ns() */
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ /* if r0 > r6 goto +1 */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
+ /* *(u64 *)(r10 - 8) = 0xdeadbeef */
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
+ /* r1 = 42 */
+ BPF_MOV64_IMM(BPF_REG_1, 42),
+ /* *(u8 *)(r10 - 8) = r1 */
+ BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
+ /* r2 = *(u64 *)(r10 - 8) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
+ /* exit(0) */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .flags = BPF_F_TEST_STATE_FREQ,
+ .errstr = "invalid read from stack off -8+1 size 8",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index 9ed192e14f5f..b2ce50bb935b 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -121,7 +121,25 @@
.result = ACCEPT,
},
{
- "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
+ "sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [half load]",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
@@ -139,7 +157,64 @@
.result = ACCEPT,
},
{
- "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
+ "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [byte load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
+ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
@@ -149,7 +224,7 @@
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index fb850e0ec837..7bf56ea161e3 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -10,7 +10,8 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
multiple_masks_test ctcam_edge_cases_test delta_simple_test \
delta_two_masks_one_key_test delta_simple_rehash_test \
- bloom_simple_test bloom_complex_test bloom_delta_test"
+ bloom_simple_test bloom_complex_test bloom_delta_test \
+ max_erp_entries_test"
NUM_NETIFS=2
source $lib_dir/lib.sh
source $lib_dir/tc_common.sh
@@ -983,6 +984,55 @@ bloom_delta_test()
log_test "bloom delta test ($tcflags)"
}
+max_erp_entries_test()
+{
+ # The number of eRP entries is limited. Once the maximum number of eRPs
+ # has been reached, filters cannot be added. This test verifies that
+ # when this limit is reached, inserstion fails without crashing.
+
+ RET=0
+
+ local num_masks=32
+ local num_regions=15
+ local chain_failed
+ local mask_failed
+ local ret
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ for ((i=1; i < $num_regions; i++)); do
+ for ((j=$num_masks; j >= 0; j--)); do
+ tc filter add dev $h2 ingress chain $i protocol ip \
+ pref $i handle $j flower $tcflags \
+ dst_ip 192.1.0.0/$j &> /dev/null
+ ret=$?
+
+ if [ $ret -ne 0 ]; then
+ chain_failed=$i
+ mask_failed=$j
+ break 2
+ fi
+ done
+ done
+
+ # We expect to exceed the maximum number of eRP entries, so that
+ # insertion eventually fails. Otherwise, the test should be adjusted to
+ # add more filters.
+ check_fail $ret "expected to exceed number of eRP entries"
+
+ for ((; i >= 1; i--)); do
+ for ((j=0; j <= $num_masks; j++)); do
+ tc filter del dev $h2 ingress chain $i protocol ip \
+ pref $i handle $j flower &> /dev/null
+ done
+ done
+
+ log_test "max eRP entries test ($tcflags). " \
+ "max chain $chain_failed, mask $mask_failed"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
index 9674a19396a3..7bc7af4eb2c1 100644
--- a/tools/testing/selftests/efivarfs/create-read.c
+++ b/tools/testing/selftests/efivarfs/create-read.c
@@ -32,8 +32,10 @@ int main(int argc, char **argv)
rc = read(fd, buf, sizeof(buf));
if (rc != 0) {
fprintf(stderr, "Reading a new var should return EOF\n");
+ close(fd);
return EXIT_FAILURE;
}
+ close(fd);
return EXIT_SUCCESS;
}
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index a90f394f9aa9..d374878cc0ba 100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -87,6 +87,11 @@ test_create_read()
{
local file=$efivarfs_mount/$FUNCNAME-$test_guid
./create-read $file
+ if [ $? -ne 0 ]; then
+ echo "create and read $file failed"
+ file_cleanup $file
+ exit 1
+ fi
file_cleanup $file
}
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index 19e9236dec5e..f2e1b2bfcf0b 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -30,6 +30,9 @@ err_ret=1
# kselftest skip code is 4
err_skip=4
+# umount required
+UMOUNT_DIR=""
+
# cgroup RT scheduling prevents chrt commands from succeeding, which
# induces failures in test wakeup tests. Disable for the duration of
# the tests.
@@ -44,6 +47,9 @@ setup() {
cleanup() {
echo $sched_rt_runtime_orig > $sched_rt_runtime
+ if [ -n "${UMOUNT_DIR}" ]; then
+ umount ${UMOUNT_DIR} ||:
+ fi
}
errexit() { # message
@@ -155,11 +161,13 @@ if [ -z "$TRACING_DIR" ]; then
mount -t tracefs nodev /sys/kernel/tracing ||
errexit "Failed to mount /sys/kernel/tracing"
TRACING_DIR="/sys/kernel/tracing"
+ UMOUNT_DIR=${TRACING_DIR}
# If debugfs exists, then so does /sys/kernel/debug
elif [ -d "/sys/kernel/debug" ]; then
mount -t debugfs nodev /sys/kernel/debug ||
errexit "Failed to mount /sys/kernel/debug"
TRACING_DIR="/sys/kernel/debug/tracing"
+ UMOUNT_DIR=${TRACING_DIR}
else
err_ret=$err_skip
errexit "debugfs and tracefs are not configured in this kernel"
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
index ca2ffd7957f9..020fbdaa3fba 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
@@ -42,11 +42,18 @@ cnt_trace() {
test_event_enabled() {
val=$1
+ check_times=10 # wait for 10 * SLEEP_TIME at most
- e=`cat $EVENT_ENABLE`
- if [ "$e" != $val ]; then
- fail "Expected $val but found $e"
- fi
+ while [ $check_times -ne 0 ]; do
+ e=`cat $EVENT_ENABLE`
+ if [ "$e" = $val ]; then
+ return 0
+ fi
+ sleep $SLEEP_TIME
+ check_times=$((check_times - 1))
+ done
+
+ fail "Expected $val but found $e"
}
run_enable_disable() {
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
index 4fa0f79144f4..9473934a573a 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
@@ -43,7 +43,7 @@ instance_read() {
instance_set() {
while :; do
- echo 1 > foo/events/sched/sched_switch
+ echo 1 > foo/events/sched/sched_switch/enable
done 2> /dev/null
}
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
new file mode 100644
index 000000000000..bc9514428dba
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
@@ -0,0 +1,13 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test failure of registering kprobe on non unique symbol
+# requires: kprobe_events
+
+SYMBOL='name_show'
+
+# We skip this test on kernel where SYMBOL is unique or does not exist.
+if [ "$(grep -c -E "[[:alnum:]]+ t ${SYMBOL}" /proc/kallsyms)" -le '1' ]; then
+ exit_unsupported
+fi
+
+! echo "p:test_non_unique ${SYMBOL}" > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
index ef1e9bafb098..728c2762ee58 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
@@ -24,7 +24,6 @@ check_error 'p:^/bar vfs_read' # NO_GROUP_NAME
check_error 'p:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG
check_error 'p:^foo.1/bar vfs_read' # BAD_GROUP_NAME
-check_error 'p:foo/^ vfs_read' # NO_EVENT_NAME
check_error 'p:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG
check_error 'p:foo/^bar.1 vfs_read' # BAD_EVENT_NAME
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 30996306cabc..479531f5865d 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -3,11 +3,11 @@ INCLUDES := -I../include -I../../
CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
LDFLAGS := $(LDFLAGS) -pthread -lrt
-HEADERS := \
+LOCAL_HDRS := \
../include/futextest.h \
../include/atomic.h \
../include/logging.h
-TEST_GEN_FILES := \
+TEST_GEN_PROGS := \
futex_wait_timeout \
futex_wait_wouldblock \
futex_requeue_pi \
@@ -21,5 +21,3 @@ TEST_PROGS := run.sh
top_srcdir = ../../../../..
KSFT_KHDR_INSTALL := 1
include ../../lib.mk
-
-$(TEST_GEN_FILES): $(HEADERS)
diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile
index 7340fd6a9a9f..9fc1a40b0127 100644
--- a/tools/testing/selftests/intel_pstate/Makefile
+++ b/tools/testing/selftests/intel_pstate/Makefile
@@ -2,10 +2,10 @@
CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
LDLIBS := $(LDLIBS) -lm
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+ARCH_PROCESSED := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
-ifeq (x86,$(ARCH))
+ifeq (x86,$(ARCH_PROCESSED))
TEST_GEN_FILES := msr aperf
endif
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index aead07c24afc..d60f584e8d0f 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -635,6 +635,11 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
+
+#define MSR_AMD64_DE_CFG 0xc0011029
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
+
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
@@ -685,9 +690,6 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
-#define MSR_F10H_DECFG 0xc0011029
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index f52e0ba84fed..3d27069b9ed9 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -18,8 +18,8 @@
/*
* Definitions of Primary Processor-Based VM-Execution Controls.
*/
-#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
-#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
+#define CPU_BASED_INTR_WINDOW_EXITING 0x00000004
+#define CPU_BASED_USE_TSC_OFFSETTING 0x00000008
#define CPU_BASED_HLT_EXITING 0x00000080
#define CPU_BASED_INVLPG_EXITING 0x00000200
#define CPU_BASED_MWAIT_EXITING 0x00000400
@@ -30,7 +30,7 @@
#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
#define CPU_BASED_CR8_STORE_EXITING 0x00100000
#define CPU_BASED_TPR_SHADOW 0x00200000
-#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
+#define CPU_BASED_NMI_WINDOW_EXITING 0x00400000
#define CPU_BASED_MOV_DR_EXITING 0x00800000
#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
#define CPU_BASED_USE_IO_BITMAPS 0x02000000
@@ -103,7 +103,7 @@
#define EXIT_REASON_EXCEPTION_NMI 0
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
#define EXIT_REASON_TRIPLE_FAULT 2
-#define EXIT_REASON_PENDING_INTERRUPT 7
+#define EXIT_REASON_INTERRUPT_WINDOW 7
#define EXIT_REASON_NMI_WINDOW 8
#define EXIT_REASON_TASK_SWITCH 9
#define EXIT_REASON_CPUID 10
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index 6cd91970fbad..3b2a426070c4 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
void ucall(uint64_t cmd, int nargs, ...)
{
- struct ucall uc = {
- .cmd = cmd,
- };
+ struct ucall uc = {};
va_list va;
int i;
+ WRITE_ONCE(uc.cmd, cmd);
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
- uc.args[i] = va_arg(va, uint64_t);
+ WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
va_end(va);
- *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
+ WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
}
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 5590fd2bcf87..69e482a95c47 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -98,7 +98,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
- control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
+ control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 8794ce382bf5..59479442faa6 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -130,6 +130,11 @@ endef
clean:
$(CLEAN)
+# Enables to extend CFLAGS and LDFLAGS from command line, e.g.
+# make USERCFLAGS=-Werror USERLDFLAGS=-static
+CFLAGS += $(USERCFLAGS)
+LDFLAGS += $(USERLDFLAGS)
+
# When make O= with kselftest target from main level
# the following aren't defined.
#
diff --git a/tools/testing/selftests/memfd/fuse_test.c b/tools/testing/selftests/memfd/fuse_test.c
index b018e835737d..cda63164d9d3 100644
--- a/tools/testing/selftests/memfd/fuse_test.c
+++ b/tools/testing/selftests/memfd/fuse_test.c
@@ -22,6 +22,7 @@
#include <linux/falloc.h>
#include <linux/fcntl.h>
#include <linux/memfd.h>
+#include <linux/types.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
index 51df5e305855..b52d59547fc5 100755
--- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
@@ -209,12 +209,12 @@ validate_v6_exception()
echo "Route get"
ip -netns h0 -6 ro get ${dst}
echo "Searching for:"
- echo " ${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ echo " ${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
echo
fi
ip -netns h0 -6 ro get ${dst} | \
- grep -q "${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ grep -q "${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
rc=$?
log_test $rc 0 "IPv6: host 0 to host ${i}, mtu ${mtu}"
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index 09830b88ec8c..0bdca3a2e673 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -583,6 +583,36 @@ ipv4_fcnal()
set +e
check_nexthop "dev veth1" ""
log_test $? 0 "Nexthops removed on admin down"
+
+ # nexthop route delete warning: route add with nhid and delete
+ # using device
+ run_cmd "$IP li set dev veth1 up"
+ run_cmd "$IP nexthop add id 12 via 172.16.1.3 dev veth1"
+ out1=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l`
+ run_cmd "$IP route add 172.16.101.1/32 nhid 12"
+ run_cmd "$IP route delete 172.16.101.1/32 dev veth1"
+ out2=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l`
+ [ $out1 -eq $out2 ]
+ rc=$?
+ log_test $rc 0 "Delete nexthop route warning"
+ run_cmd "$IP route delete 172.16.101.1/32 nhid 12"
+ run_cmd "$IP nexthop del id 12"
+
+ run_cmd "$IP nexthop add id 21 via 172.16.1.6 dev veth1"
+ run_cmd "$IP ro add 172.16.101.0/24 nhid 21"
+ run_cmd "$IP ro del 172.16.101.0/24 nexthop via 172.16.1.7 dev veth1 nexthop via 172.16.1.8 dev veth1"
+ log_test $? 2 "Delete multipath route with only nh id based entry"
+
+ run_cmd "$IP nexthop add id 22 via 172.16.1.6 dev veth1"
+ run_cmd "$IP ro add 172.16.102.0/24 nhid 22"
+ run_cmd "$IP ro del 172.16.102.0/24 dev veth1"
+ log_test $? 2 "Delete route when specifying only nexthop device"
+
+ run_cmd "$IP ro del 172.16.102.0/24 via 172.16.1.6"
+ log_test $? 2 "Delete route when specifying only gateway"
+
+ run_cmd "$IP ro del 172.16.102.0/24"
+ log_test $? 0 "Delete route when not specifying nexthop attributes"
}
ipv4_grp_fcnal()
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 6986086035d6..782db6ccef32 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -68,7 +68,7 @@ setup()
cleanup()
{
$IP link del dev dummy0 &> /dev/null
- ip netns del ns1
+ ip netns del ns1 &> /dev/null
ip netns del ns2 &> /dev/null
}
@@ -1662,6 +1662,8 @@ EOF
################################################################################
# main
+trap cleanup EXIT
+
while getopts :t:pPhv o
do
case $o in
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index f190ad58e00d..4d8845a68cf2 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -562,14 +562,14 @@ sysctl_set()
local value=$1; shift
SYSCTL_ORIG[$key]=$(sysctl -n $key)
- sysctl -qw $key=$value
+ sysctl -qw $key="$value"
}
sysctl_restore()
{
local key=$1; shift
- sysctl -qw $key=${SYSCTL_ORIG["$key"]}
+ sysctl -qw $key="${SYSCTL_ORIG[$key]}"
}
forwarding_enable()
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index 472bd023e2a5..b501b366367f 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -72,7 +72,8 @@ test_span_gre_ttl()
RET=0
- mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ mirror_install $swp1 ingress $tundev \
+ "prot ip flower $tcflags ip_prot icmp"
tc filter add dev $h3 ingress pref 77 prot $prot \
flower ip_ttl 50 action pass
diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
index b11d8e6b5bc1..b7cdf75efb5f 100755
--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
+++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
@@ -49,8 +49,8 @@ match_dst_mac_test()
tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched on a wrong filter"
- tc_check_packets "dev $h2 ingress" 102 1
- check_err $? "Did not match on correct filter"
+ tc_check_packets "dev $h2 ingress" 102 0
+ check_fail $? "Did not match on correct filter"
tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
@@ -75,8 +75,8 @@ match_src_mac_test()
tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched on a wrong filter"
- tc_check_packets "dev $h2 ingress" 102 1
- check_err $? "Did not match on correct filter"
+ tc_check_packets "dev $h2 ingress" 102 0
+ check_fail $? "Did not match on correct filter"
tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 88be9083b923..a4dc32729749 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -1129,6 +1129,13 @@ check_command() {
return 0
}
+check_running() {
+ pid=${1}
+ cmd=${2}
+
+ [ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "{cmd}" ]
+}
+
test_cleanup_vxlanX_exception() {
outer="${1}"
encap="vxlan"
@@ -1159,11 +1166,12 @@ test_cleanup_vxlanX_exception() {
${ns_a} ip link del dev veth_A-R1 &
iplink_pid=$!
- sleep 1
- if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
- err " can't delete veth device in a timely manner, PMTU dst likely leaked"
- return 1
- fi
+ for i in $(seq 1 20); do
+ check_running ${iplink_pid} "iplinkdeldevveth_A-R1" || return 0
+ sleep 0.1
+ done
+ err " can't delete veth device in a timely manner, PMTU dst likely leaked"
+ return 1
}
test_cleanup_ipv6_exception() {
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index b5277106df1f..b0cc082fbb84 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -330,7 +330,7 @@ static void test_extra_filter(const struct test_params p)
if (bind(fd1, addr, sockaddr_size()))
error(1, errno, "failed to bind recv socket 1");
- if (!bind(fd2, addr, sockaddr_size()) && errno != EADDRINUSE)
+ if (!bind(fd2, addr, sockaddr_size()) || errno != EADDRINUSE)
error(1, errno, "bind socket 2 should fail with EADDRINUSE");
free(addr);
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 28ea3753da20..3b929e031f59 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -780,7 +780,7 @@ kci_test_ipsec_offload()
tmpl proto esp src $srcip dst $dstip spi 9 \
mode transport reqid 42
check_err $?
- ip x p add dir out src $dstip/24 dst $srcip/24 \
+ ip x p add dir in src $dstip/24 dst $srcip/24 \
tmpl proto esp src $dstip dst $srcip spi 9 \
mode transport reqid 42
check_err $?
@@ -833,6 +833,7 @@ EOF
fi
# clean up any leftovers
+ echo 0 > /sys/bus/netdevsim/del_device
$probed && rmmod netdevsim
if [ $ret -ne 0 ]; then
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 0ea44d975b6c..81bb3cc6704f 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -311,11 +311,12 @@ TEST_F(tls, sendmsg_large)
msg.msg_iov = &vec;
msg.msg_iovlen = 1;
- EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
}
- while (recvs++ < sends)
- EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
+ while (recvs++ < sends) {
+ EXPECT_NE(recv(self->cfd, mem, send_len, 0), -1);
+ }
free(mem);
}
@@ -343,9 +344,9 @@ TEST_F(tls, sendmsg_multiple)
msg.msg_iov = vec;
msg.msg_iovlen = iov_len;
- EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
buf = malloc(total_len);
- EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
+ EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
for (i = 0; i < iov_len; i++) {
EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
strlen(test_strs[i])),
@@ -579,12 +580,12 @@ TEST_F(tls, recv_partial)
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
- MSG_WAITALL), -1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_first),
+ MSG_WAITALL), strlen(test_str_first));
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
memset(recv_mem, 0, sizeof(recv_mem));
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
- MSG_WAITALL), -1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_second),
+ MSG_WAITALL), strlen(test_str_second));
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
0);
}
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index dc932fd65363..640bc43452fa 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -7,6 +7,7 @@ readonly GREEN='\033[0;92m'
readonly YELLOW='\033[0;33m'
readonly RED='\033[0;31m'
readonly NC='\033[0m' # No Color
+readonly TESTPORT=8000
readonly KSFT_PASS=0
readonly KSFT_FAIL=1
@@ -56,11 +57,26 @@ trap wake_children EXIT
run_one() {
local -r args=$@
+ local nr_socks=0
+ local i=0
+ local -r timeout=10
+
+ ./udpgso_bench_rx -p "$TESTPORT" &
+ ./udpgso_bench_rx -p "$TESTPORT" -t &
+
+ # Wait for the above test program to get ready to receive connections.
+ while [ "$i" -lt "$timeout" ]; do
+ nr_socks="$(ss -lnHi | grep -c "\*:${TESTPORT}")"
+ [ "$nr_socks" -eq 2 ] && break
+ i=$((i + 1))
+ sleep 1
+ done
+ if [ "$nr_socks" -ne 2 ]; then
+ echo "timed out while waiting for udpgso_bench_rx"
+ exit 1
+ fi
- ./udpgso_bench_rx &
- ./udpgso_bench_rx -t &
-
- ./udpgso_bench_tx ${args}
+ ./udpgso_bench_tx -p "$TESTPORT" ${args}
}
run_in_netns() {
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 6a193425c367..f35a924d4a30 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -214,11 +214,10 @@ static void do_verify_udp(const char *data, int len)
static int recv_msg(int fd, char *buf, int len, int *gso_size)
{
- char control[CMSG_SPACE(sizeof(uint16_t))] = {0};
+ char control[CMSG_SPACE(sizeof(int))] = {0};
struct msghdr msg = {0};
struct iovec iov = {0};
struct cmsghdr *cmsg;
- uint16_t *gsosizeptr;
int ret;
iov.iov_base = buf;
@@ -237,8 +236,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size)
cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level == SOL_UDP
&& cmsg->cmsg_type == UDP_GRO) {
- gsosizeptr = (uint16_t *) CMSG_DATA(cmsg);
- *gso_size = *gsosizeptr;
+ *gso_size = *(int *)CMSG_DATA(cmsg);
break;
}
}
@@ -250,7 +248,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size)
static void do_flush_udp(int fd)
{
static char rbuf[ETH_MAX_MTU];
- int ret, len, gso_size, budget = 256;
+ int ret, len, gso_size = 0, budget = 256;
len = cfg_read_all ? sizeof(rbuf) : 0;
while (budget--) {
@@ -336,6 +334,8 @@ static void parse_opts(int argc, char **argv)
cfg_verify = true;
cfg_read_all = true;
break;
+ default:
+ exit(1);
}
}
diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
index f1fdaa270291..477392715a9a 100644
--- a/tools/testing/selftests/net/udpgso_bench_tx.c
+++ b/tools/testing/selftests/net/udpgso_bench_tx.c
@@ -62,6 +62,7 @@ static int cfg_payload_len = (1472 * 42);
static int cfg_port = 8000;
static int cfg_runtime_ms = -1;
static bool cfg_poll;
+static int cfg_poll_loop_timeout_ms = 2000;
static bool cfg_segment;
static bool cfg_sendmmsg;
static bool cfg_tcp;
@@ -235,16 +236,17 @@ static void flush_errqueue_recv(int fd)
}
}
-static void flush_errqueue(int fd, const bool do_poll)
+static void flush_errqueue(int fd, const bool do_poll,
+ unsigned long poll_timeout, const bool poll_err)
{
if (do_poll) {
struct pollfd fds = {0};
int ret;
fds.fd = fd;
- ret = poll(&fds, 1, 500);
+ ret = poll(&fds, 1, poll_timeout);
if (ret == 0) {
- if (cfg_verbose)
+ if ((cfg_verbose) && (poll_err))
fprintf(stderr, "poll timeout\n");
} else if (ret < 0) {
error(1, errno, "poll");
@@ -254,6 +256,20 @@ static void flush_errqueue(int fd, const bool do_poll)
flush_errqueue_recv(fd);
}
+static void flush_errqueue_retry(int fd, unsigned long num_sends)
+{
+ unsigned long tnow, tstop;
+ bool first_try = true;
+
+ tnow = gettimeofday_ms();
+ tstop = tnow + cfg_poll_loop_timeout_ms;
+ do {
+ flush_errqueue(fd, true, tstop - tnow, first_try);
+ first_try = false;
+ tnow = gettimeofday_ms();
+ } while ((stat_zcopies != num_sends) && (tnow < tstop));
+}
+
static int send_tcp(int fd, char *data)
{
int ret, done = 0, count = 0;
@@ -413,7 +429,8 @@ static int send_udp_segment(int fd, char *data)
static void usage(const char *filepath)
{
- error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]",
+ error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] "
+ "[-L secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]",
filepath);
}
@@ -423,7 +440,7 @@ static void parse_opts(int argc, char **argv)
int max_len, hdrlen;
int c;
- while ((c = getopt(argc, argv, "46acC:D:Hl:mM:p:s:PS:tTuvz")) != -1) {
+ while ((c = getopt(argc, argv, "46acC:D:Hl:L:mM:p:s:PS:tTuvz")) != -1) {
switch (c) {
case '4':
if (cfg_family != PF_UNSPEC)
@@ -452,6 +469,9 @@ static void parse_opts(int argc, char **argv)
case 'l':
cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
break;
+ case 'L':
+ cfg_poll_loop_timeout_ms = strtoul(optarg, NULL, 10) * 1000;
+ break;
case 'm':
cfg_sendmmsg = true;
break;
@@ -490,6 +510,8 @@ static void parse_opts(int argc, char **argv)
case 'z':
cfg_zerocopy = true;
break;
+ default:
+ exit(1);
}
}
@@ -677,7 +699,7 @@ int main(int argc, char **argv)
num_sends += send_udp(fd, buf[i]);
num_msgs++;
if ((cfg_zerocopy && ((num_msgs & 0xF) == 0)) || cfg_tx_tstamp)
- flush_errqueue(fd, cfg_poll);
+ flush_errqueue(fd, cfg_poll, 500, true);
if (cfg_msg_nr && num_msgs >= cfg_msg_nr)
break;
@@ -696,7 +718,7 @@ int main(int argc, char **argv)
} while (!interrupted && (cfg_runtime_ms == -1 || tnow < tstop));
if (cfg_zerocopy || cfg_tx_tstamp)
- flush_errqueue(fd, true);
+ flush_errqueue_retry(fd, num_sends);
if (close(fd))
error(1, errno, "close");
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
index b48e1833bc89..76645aaf2b58 100755
--- a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
+++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
@@ -35,6 +35,8 @@ cleanup() {
for i in 1 2;do ip netns del nsrouter$i;done
}
+trap cleanup EXIT
+
ipv4() {
echo -n 192.168.$1.2
}
@@ -146,11 +148,17 @@ ip netns exec nsclient1 nft -f - <<EOF
table inet filter {
counter unknown { }
counter related { }
+ counter redir4 { }
+ counter redir6 { }
chain input {
type filter hook input priority 0; policy accept;
- meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+ icmp type "redirect" ct state "related" counter name "redir4" accept
+ icmpv6 type "nd-redirect" ct state "related" counter name "redir6" accept
+
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
+
counter name "unknown" drop
}
}
@@ -279,5 +287,29 @@ else
echo "ERROR: icmp error RELATED state test has failed"
fi
-cleanup
+# add 'bad' route, expect icmp REDIRECT to be generated
+ip netns exec nsclient1 ip route add 192.168.1.42 via 192.168.1.1
+ip netns exec nsclient1 ip route add dead:1::42 via dead:1::1
+
+ip netns exec "nsclient1" ping -q -c 2 192.168.1.42 > /dev/null
+
+expect="packets 1 bytes 112"
+check_counter nsclient1 "redir4" "$expect"
+if [ $? -ne 0 ];then
+ ret=1
+fi
+
+ip netns exec "nsclient1" ping -c 1 dead:1::42 > /dev/null
+expect="packets 1 bytes 192"
+check_counter nsclient1 "redir6" "$expect"
+if [ $? -ne 0 ];then
+ ret=1
+fi
+
+if [ $ret -eq 0 ];then
+ echo "PASS: icmp redirects had RELATED state"
+else
+ echo "ERROR: icmp redirect RELATED state test has failed"
+fi
+
exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index 4e15e8167310..67697d8ea59a 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -404,6 +404,8 @@ EOF
echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
sc_s=$!
+ sleep 1
+
result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
if [ "$result" = "SERVER-inet" ];then
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
index 02f6b4efde14..e54d7a4089ea 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
@@ -24,6 +24,7 @@ static int check_cpu_dscr_default(char *file, unsigned long val)
rc = read(fd, buf, sizeof(buf));
if (rc == -1) {
perror("read() failed");
+ close(fd);
return 1;
}
close(fd);
@@ -65,8 +66,10 @@ static int check_all_cpu_dscr_defaults(unsigned long val)
if (access(file, F_OK))
continue;
- if (check_cpu_dscr_default(file, val))
+ if (check_cpu_dscr_default(file, val)) {
+ closedir(sysfs);
return 1;
+ }
}
closedir(sysfs);
return 0;
diff --git a/tools/testing/selftests/powerpc/math/fpu_preempt.c b/tools/testing/selftests/powerpc/math/fpu_preempt.c
index 5235bdc8c0b1..3e5b5663d244 100644
--- a/tools/testing/selftests/powerpc/math/fpu_preempt.c
+++ b/tools/testing/selftests/powerpc/math/fpu_preempt.c
@@ -37,19 +37,20 @@ __thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
int threads_starting;
int running;
-extern void preempt_fpu(double *darray, int *threads_starting, int *running);
+extern int preempt_fpu(double *darray, int *threads_starting, int *running);
void *preempt_fpu_c(void *p)
{
+ long rc;
int i;
+
srand(pthread_self());
for (i = 0; i < 21; i++)
darray[i] = rand();
- /* Test failed if it ever returns */
- preempt_fpu(darray, &threads_starting, &running);
+ rc = preempt_fpu(darray, &threads_starting, &running);
- return p;
+ return (void *)rc;
}
int test_preempt_fpu(void)
diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c
index 2e059f154e77..397f9da8f1c3 100644
--- a/tools/testing/selftests/powerpc/math/vmx_preempt.c
+++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c
@@ -37,19 +37,21 @@ __thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
int threads_starting;
int running;
-extern void preempt_vmx(vector int *varray, int *threads_starting, int *running);
+extern int preempt_vmx(vector int *varray, int *threads_starting, int *running);
void *preempt_vmx_c(void *p)
{
int i, j;
+ long rc;
+
srand(pthread_self());
for (i = 0; i < 12; i++)
for (j = 0; j < 4; j++)
varray[i][j] = rand();
- /* Test fails if it ever returns */
- preempt_vmx(varray, &threads_starting, &running);
- return p;
+ rc = preempt_vmx(varray, &threads_starting, &running);
+
+ return (void *)rc;
}
int test_preempt_vmx(void)
diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
index e7ceabed7f51..7d0aa22bdc12 100644
--- a/tools/testing/selftests/proc/proc-uptime-002.c
+++ b/tools/testing/selftests/proc/proc-uptime-002.c
@@ -17,6 +17,7 @@
// while shifting across CPUs.
#undef NDEBUG
#include <assert.h>
+#include <errno.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <stdlib.h>
@@ -54,7 +55,7 @@ int main(void)
len += sizeof(unsigned long);
free(m);
m = malloc(len);
- } while (sys_sched_getaffinity(0, len, m) == -EINVAL);
+ } while (sys_sched_getaffinity(0, len, m) == -1 && errno == EINVAL);
fd = open("/proc/uptime", O_RDONLY);
assert(fd >= 0);
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index c0dd10257df5..49e6eddcb361 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -455,11 +455,11 @@ int main(int argc, char *argv[])
interval = t2 - t1;
offset = (t2 + t1) / 2 - tp;
- printf("system time: %lld.%u\n",
+ printf("system time: %lld.%09u\n",
(pct+2*i)->sec, (pct+2*i)->nsec);
- printf("phc time: %lld.%u\n",
+ printf("phc time: %lld.%09u\n",
(pct+2*i+1)->sec, (pct+2*i+1)->nsec);
- printf("system time: %lld.%u\n",
+ printf("system time: %lld.%09u\n",
(pct+2*i+2)->sec, (pct+2*i+2)->nsec);
printf("system/phc clock time offset is %" PRId64 " ns\n"
"system clock time delay is %" PRId64 " ns\n",
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
index e05182d3e47d..6798ab45032d 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
@@ -1,4 +1,4 @@
-#!/usr/bin/awk -f
+#!/usr/usr/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
# Modify SRCU for formal verification. The first argument should be srcu.h and
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
index 215e1067f037..82ceca6aab96 100644
--- a/tools/testing/selftests/rseq/Makefile
+++ b/tools/testing/selftests/rseq/Makefile
@@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
CLANG_FLAGS += -no-integrated-as
endif
+top_srcdir = ../../../..
+
CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \
- $(CLANG_FLAGS)
+ $(CLANG_FLAGS) -I$(top_srcdir)/tools/include
LDLIBS += -lpthread -ldl
# Own dependencies because we only want to build against 1st prerequisite, but
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
index 986b9458efb2..e20191fb40d4 100644
--- a/tools/testing/selftests/rseq/rseq.c
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -29,12 +29,22 @@
#include <dlfcn.h>
#include <stddef.h>
+#include <linux/compiler.h>
+
#include "../kselftest.h"
#include "rseq.h"
-static const ptrdiff_t *libc_rseq_offset_p;
-static const unsigned int *libc_rseq_size_p;
-static const unsigned int *libc_rseq_flags_p;
+/*
+ * Define weak versions to play nice with binaries that are statically linked
+ * against a libc that doesn't support registering its own rseq.
+ */
+__weak ptrdiff_t __rseq_offset;
+__weak unsigned int __rseq_size;
+__weak unsigned int __rseq_flags;
+
+static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset;
+static const unsigned int *libc_rseq_size_p = &__rseq_size;
+static const unsigned int *libc_rseq_flags_p = &__rseq_flags;
/* Offset from the thread pointer to the rseq area. */
ptrdiff_t rseq_offset;
@@ -108,10 +118,19 @@ int rseq_unregister_current_thread(void)
static __attribute__((constructor))
void rseq_init(void)
{
- libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
- libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
- libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
- if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) {
+ /*
+ * If the libc's registered rseq size isn't already valid, it may be
+ * because the binary is dynamically linked and not necessarily due to
+ * libc not having registered a restartable sequence. Try to find the
+ * symbols if that's the case.
+ */
+ if (!*libc_rseq_size_p) {
+ libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
+ libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
+ libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
+ }
+ if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p &&
+ *libc_rseq_size_p != 0) {
/* rseq registration owned by glibc */
rseq_offset = *libc_rseq_offset_p;
rseq_size = *libc_rseq_size_p;
diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
new file mode 100644
index 000000000000..ea9bdf3a90b1
--- /dev/null
+++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if __alpha__
+register unsigned long sp asm("$30");
+#elif __arm__ || __aarch64__ || __csky__ || __m68k__ || __mips__ || __riscv
+register unsigned long sp asm("sp");
+#elif __i386__
+register unsigned long sp asm("esp");
+#elif __loongarch64
+register unsigned long sp asm("$sp");
+#elif __ppc__
+register unsigned long sp asm("r1");
+#elif __s390x__
+register unsigned long sp asm("%15");
+#elif __sh__
+register unsigned long sp asm("r15");
+#elif __x86_64__
+register unsigned long sp asm("rsp");
+#elif __XTENSA__
+register unsigned long sp asm("a1");
+#else
+#error "implement current_stack_pointer equivalent"
+#endif
diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
index ad0f8df2ca0a..6e6054599491 100644
--- a/tools/testing/selftests/sigaltstack/sas.c
+++ b/tools/testing/selftests/sigaltstack/sas.c
@@ -19,6 +19,7 @@
#include <errno.h>
#include "../kselftest.h"
+#include "current_stack_pointer.h"
#ifndef SS_AUTODISARM
#define SS_AUTODISARM (1U << 31)
@@ -40,12 +41,6 @@ void my_usr1(int sig, siginfo_t *si, void *u)
stack_t stk;
struct stk_data *p;
-#if __s390x__
- register unsigned long sp asm("%15");
-#else
- register unsigned long sp asm("sp");
-#endif
-
if (sp < (unsigned long)sstack ||
sp >= (unsigned long)sstack + SIGSTKSZ) {
ksft_exit_fail_msg("SP is not on sigaltstack\n");
diff --git a/tools/testing/selftests/tc-testing/settings b/tools/testing/selftests/tc-testing/settings
new file mode 100644
index 000000000000..e2206265f67c
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/settings
@@ -0,0 +1 @@
+timeout=900
diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c
index bfc974b4572d..c18313a5f357 100644
--- a/tools/testing/selftests/timers/clocksource-switch.c
+++ b/tools/testing/selftests/timers/clocksource-switch.c
@@ -110,10 +110,10 @@ int run_tests(int secs)
sprintf(buf, "./inconsistency-check -t %i", secs);
ret = system(buf);
- if (ret)
- return ret;
+ if (WIFEXITED(ret) && WEXITSTATUS(ret))
+ return WEXITSTATUS(ret);
ret = system("./nanosleep");
- return ret;
+ return WIFEXITED(ret) ? WEXITSTATUS(ret) : 0;
}
diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
index 5397de708d3c..48b9a803235a 100644
--- a/tools/testing/selftests/timers/valid-adjtimex.c
+++ b/tools/testing/selftests/timers/valid-adjtimex.c
@@ -40,7 +40,7 @@
#define ADJ_SETOFFSET 0x0100
#include <sys/syscall.h>
-static int clock_adjtime(clockid_t id, struct timex *tx)
+int clock_adjtime(clockid_t id, struct timex *tx)
{
return syscall(__NR_clock_adjtime, id, tx);
}
diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
index 312889edb84a..c65c55b7a789 100644
--- a/tools/testing/selftests/vm/map_hugetlb.c
+++ b/tools/testing/selftests/vm/map_hugetlb.c
@@ -15,6 +15,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
+#include "vm_util.h"
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
@@ -70,10 +71,16 @@ int main(int argc, char **argv)
{
void *addr;
int ret;
+ size_t hugepage_size;
size_t length = LENGTH;
int flags = FLAGS;
int shift = 0;
+ hugepage_size = default_huge_page_size();
+ /* munmap with fail if the length is not page aligned */
+ if (hugepage_size > length)
+ length = hugepage_size;
+
if (argc > 1)
length = atol(argv[1]) << 20;
if (argc > 2) {
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
index b00b1bfd9d8e..cb1108bc9249 100644
--- a/tools/thermal/tmon/sysfs.c
+++ b/tools/thermal/tmon/sysfs.c
@@ -13,6 +13,7 @@
#include <stdint.h>
#include <dirent.h>
#include <libintl.h>
+#include <limits.h>
#include <ctype.h>
#include <time.h>
#include <syslog.h>
@@ -33,9 +34,9 @@ int sysfs_set_ulong(char *path, char *filename, unsigned long val)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "w");
if (!fd) {
@@ -57,9 +58,9 @@ static int sysfs_get_ulong(char *path, char *filename, unsigned long *p_ulong)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "r");
if (!fd) {
@@ -76,9 +77,9 @@ static int sysfs_get_string(char *path, char *filename, char *str)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "r");
if (!fd) {
@@ -199,8 +200,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
{
unsigned long trip_instance = 0;
char cdev_name_linked[256];
- char cdev_name[256];
- char cdev_trip_name[256];
+ char cdev_name[PATH_MAX];
+ char cdev_trip_name[PATH_MAX];
int cdev_id;
if (nl->d_type == DT_LNK) {
@@ -213,7 +214,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
return -EINVAL;
}
/* find the link to real cooling device record binding */
- snprintf(cdev_name, 256, "%s/%s", tz_name, nl->d_name);
+ snprintf(cdev_name, sizeof(cdev_name) - 2, "%s/%s",
+ tz_name, nl->d_name);
memset(cdev_name_linked, 0, sizeof(cdev_name_linked));
if (readlink(cdev_name, cdev_name_linked,
sizeof(cdev_name_linked) - 1) != -1) {
@@ -226,8 +228,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
/* find the trip point in which the cdev is binded to
* in this tzone
*/
- snprintf(cdev_trip_name, 256, "%s%s", nl->d_name,
- "_trip_point");
+ snprintf(cdev_trip_name, sizeof(cdev_trip_name) - 1,
+ "%s%s", nl->d_name, "_trip_point");
sysfs_get_ulong(tz_name, cdev_trip_name,
&trip_instance);
/* validate trip point range, e.g. trip could return -1
diff --git a/tools/thermal/tmon/tmon.h b/tools/thermal/tmon/tmon.h
index c9066ec104dd..44d16d778f04 100644
--- a/tools/thermal/tmon/tmon.h
+++ b/tools/thermal/tmon/tmon.h
@@ -27,6 +27,9 @@
#define NR_LINES_TZDATA 1
#define TMON_LOG_FILE "/var/tmp/tmon.log"
+#include <sys/time.h>
+#include <pthread.h>
+
extern unsigned long ticktime;
extern double time_elapsed;
extern unsigned long target_temp_user;
diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h
index b14c2c3b6b85..74aef964f509 100644
--- a/tools/virtio/linux/bug.h
+++ b/tools/virtio/linux/bug.h
@@ -1,11 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef BUG_H
-#define BUG_H
+#ifndef _LINUX_BUG_H
+#define _LINUX_BUG_H
#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
-#define BUILD_BUG_ON(x)
-
#define BUG() abort()
-#endif /* BUG_H */
+#endif /* _LINUX_BUG_H */
diff --git a/tools/virtio/linux/build_bug.h b/tools/virtio/linux/build_bug.h
new file mode 100644
index 000000000000..cdbb75e28a60
--- /dev/null
+++ b/tools/virtio/linux/build_bug.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILD_BUG_H
+#define _LINUX_BUILD_BUG_H
+
+#define BUILD_BUG_ON(x)
+
+#endif /* _LINUX_BUILD_BUG_H */
diff --git a/tools/virtio/linux/cpumask.h b/tools/virtio/linux/cpumask.h
new file mode 100644
index 000000000000..307da69d6b26
--- /dev/null
+++ b/tools/virtio/linux/cpumask.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CPUMASK_H
+#define _LINUX_CPUMASK_H
+
+#include <linux/kernel.h>
+
+#endif /* _LINUX_CPUMASK_H */
diff --git a/tools/virtio/linux/gfp.h b/tools/virtio/linux/gfp.h
new file mode 100644
index 000000000000..43d146f236f1
--- /dev/null
+++ b/tools/virtio/linux/gfp.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_H
+#define __LINUX_GFP_H
+
+#include <linux/topology.h>
+
+#endif
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 6683b4a70b05..3325cdf22941 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -10,6 +10,7 @@
#include <stdarg.h>
#include <linux/compiler.h>
+#include <linux/log2.h>
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/bug.h>
diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h
new file mode 100644
index 000000000000..272b5aa285d5
--- /dev/null
+++ b/tools/virtio/linux/kmsan.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/gfp.h>
+
+inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h
index 369ee308b668..74d9e1825748 100644
--- a/tools/virtio/linux/scatterlist.h
+++ b/tools/virtio/linux/scatterlist.h
@@ -2,6 +2,7 @@
#ifndef SCATTERLIST_H
#define SCATTERLIST_H
#include <linux/kernel.h>
+#include <linux/bug.h>
struct scatterlist {
unsigned long page_link;
diff --git a/tools/virtio/linux/topology.h b/tools/virtio/linux/topology.h
new file mode 100644
index 000000000000..910794afb993
--- /dev/null
+++ b/tools/virtio/linux/topology.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TOPOLOGY_H
+#define _LINUX_TOPOLOGY_H
+
+#include <linux/cpumask.h>
+
+#endif /* _LINUX_TOPOLOGY_H */
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh
index 26e193ffd2a2..873a892147e5 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/vm/slabinfo-gnuplot.sh
@@ -150,7 +150,7 @@ do_preprocess()
let lines=3
out=`basename "$in"`"-slabs-by-loss"
`cat "$in" | grep -A "$lines" 'Slabs sorted by loss' |\
- egrep -iv '\-\-|Name|Slabs'\
+ grep -E -iv '\-\-|Name|Slabs'\
| awk '{print $1" "$4+$2*$3" "$4}' > "$out"`
if [ $? -eq 0 ]; then
do_slabs_plotting "$out"
@@ -159,7 +159,7 @@ do_preprocess()
let lines=3
out=`basename "$in"`"-slabs-by-size"
`cat "$in" | grep -A "$lines" 'Slabs sorted by size' |\
- egrep -iv '\-\-|Name|Slabs'\
+ grep -E -iv '\-\-|Name|Slabs'\
| awk '{print $1" "$4" "$4-$2*$3}' > "$out"`
if [ $? -eq 0 ]; then
do_slabs_plotting "$out"
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 68092d15e12b..dda035c358c2 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -125,7 +125,7 @@ static void usage(void)
"-n|--numa Show NUMA information\n"
"-N|--lines=K Show the first K slabs\n"
"-o|--ops Show kmem_cache_ops\n"
- "-P|--partial Sort by number of partial slabs\n"
+ "-P|--partial Sort by number of partial slabs\n"
"-r|--report Detailed report on single slabs\n"
"-s|--shrink Shrink slabs\n"
"-S|--Size Sort by size\n"
@@ -1045,15 +1045,27 @@ static void sort_slabs(void)
for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) {
int result;
- if (sort_size)
- result = slab_size(s1) < slab_size(s2);
- else if (sort_active)
- result = slab_activity(s1) < slab_activity(s2);
- else if (sort_loss)
- result = slab_waste(s1) < slab_waste(s2);
- else if (sort_partial)
- result = s1->partial < s2->partial;
- else
+ if (sort_size) {
+ if (slab_size(s1) == slab_size(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_size(s1) < slab_size(s2);
+ } else if (sort_active) {
+ if (slab_activity(s1) == slab_activity(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_activity(s1) < slab_activity(s2);
+ } else if (sort_loss) {
+ if (slab_waste(s1) == slab_waste(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_waste(s1) < slab_waste(s2);
+ } else if (sort_partial) {
+ if (s1->partial == s2->partial)
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = s1->partial < s2->partial;
+ } else
result = strcasecmp(s1->name, s2->name);
if (show_inverted)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 35be0e2a4639..f4ce08c0d0be 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -459,6 +459,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
}
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
+ if (!irq)
+ continue;
+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = pendmask & (1U << bit_nr);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
@@ -581,7 +584,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
unsigned long flags;
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+
irq = __vgic_its_check_cache(dist, db, devid, eventid);
+ if (irq)
+ vgic_get_irq_kref(irq);
+
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq;
@@ -761,6 +768,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags);
+ vgic_put_irq(kvm, irq);
return 0;
}
@@ -1368,6 +1376,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
for (i = 0; i < irq_count; i++) {
irq = vgic_get_irq(kvm, NULL, intids[i]);
+ if (!irq)
+ continue;
update_affinity(irq, vcpu2);
@@ -2095,7 +2105,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
memset(entry, 0, esz);
- while (len > 0) {
+ while (true) {
int next_offset;
size_t byte_offset;
@@ -2108,6 +2118,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
return next_offset;
byte_offset = next_offset * esz;
+ if (byte_offset >= len)
+ break;
+
id += next_offset;
gpa += byte_offset;
len -= byte_offset;
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 8d606997c299..c94e97b351ee 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -191,15 +191,17 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
r = kvm_io_bus_unregister_dev(kvm,
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
+ kvm_iodevice_destructor(&dev->dev);
+
/*
* On failure, unregister destroys all devices on the
* bus _except_ the target device, i.e. coalesced_zones
- * has been modified. No need to restart the walk as
- * there aren't any zones left.
+ * has been modified. Bail after destroying the target
+ * device, there's no need to restart the walk as there
+ * aren't any zones left.
*/
if (r)
break;
- kvm_iodevice_destructor(&dev->dev);
}
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 287444e52ccf..0008fc49528a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2937,7 +2937,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_fpu *fpu = NULL;
struct kvm_sregs *kvm_sregs = NULL;
- if (vcpu->kvm->mm != current->mm)
+ if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
return -EIO;
if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
@@ -3144,7 +3144,7 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
void __user *argp = compat_ptr(arg);
int r;
- if (vcpu->kvm->mm != current->mm)
+ if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
return -EIO;
switch (ioctl) {
@@ -3209,7 +3209,7 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
{
struct kvm_device *dev = filp->private_data;
- if (dev->kvm->mm != current->mm)
+ if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged)
return -EIO;
switch (ioctl) {
@@ -3329,8 +3329,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
kvm_put_kvm(kvm);
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
+ if (ops->release)
+ ops->release(dev);
mutex_unlock(&kvm->lock);
- ops->destroy(dev);
+ if (ops->destroy)
+ ops->destroy(dev);
return ret;
}
@@ -3410,7 +3413,7 @@ static long kvm_vm_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
int r;
- if (kvm->mm != current->mm)
+ if (kvm->mm != current->mm || kvm->vm_bugged)
return -EIO;
switch (ioctl) {
case KVM_CREATE_VCPU:
@@ -3618,7 +3621,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
struct kvm *kvm = filp->private_data;
int r;
- if (kvm->mm != current->mm)
+ if (kvm->mm != current->mm || kvm->vm_bugged)
return -EIO;
switch (ioctl) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT